aboutsummaryrefslogtreecommitdiffstats
path: root/recipes/linux
diff options
context:
space:
mode:
Diffstat (limited to 'recipes/linux')
-rw-r--r--recipes/linux/linux-2.6.35/nokia900/defconfig2237
-rw-r--r--recipes/linux/linux-2.6.35/nokia900/inconsistent-mmc-fix-2.6.35.patch94
-rw-r--r--recipes/linux/linux-2.6.35/nokia900/linux-2.6-Bluetooth-Support-for-n900-bluetooth-hardware.patch2279
-rw-r--r--recipes/linux/linux-2.6.35/nokia900/linux-2.6-Earpiece-and-headset-support-for-N900.patch577
-rw-r--r--recipes/linux/linux-2.6.35/nokia900/linux-2.6-Hacks-for-Nokia-N900.patch651
-rw-r--r--recipes/linux/linux-2.6.35/nokia900/linux-2.6-SGX-PVR-driver-for-N900.patch45789
-rw-r--r--recipes/linux/linux-2.6.35/nokia900/linux-2.6-mfd-twl4030-Driver-for-twl4030-madc-module.patch759
-rw-r--r--recipes/linux/linux-2.6.35/nokia900/linux-2.6-mfd-twl4030-Driver-for-twl4030-madc-module.patch~HEAD759
-rw-r--r--recipes/linux/linux-2.6.35/nokia900/linux-2.6-mfd-twl4030-Driver-for-twl4030-madc-module.patch~HEAD_0759
-rw-r--r--recipes/linux/linux-2.6.35/nokia900/linux-2.6-n900-modem-support.patch7773
-rw-r--r--recipes/linux/linux-2.6.35/nokia900/linux-2.6-omap3isp-rx51.patch33781
-rw-r--r--recipes/linux/linux-2.6.35/nokia900/linux-2.6-usb-bt-autosuspend.patch13
-rw-r--r--recipes/linux/linux-2.6.35/nokia900/linux-2.6-usb-musb-ignore-spurious-SESSREQ-interrupts.patch42
-rw-r--r--recipes/linux/linux-2.6.35/nokia900/linux-2.6-usb-uvc-autosuspend.patch19
-rw-r--r--recipes/linux/linux-2.6.35/nokia900/linux-2.6.29-dont-wait-for-mouse.patch47
-rw-r--r--recipes/linux/linux-2.6.35/nokia900/linux-2.6.33-ahci-alpm-accounting.patch300
-rw-r--r--recipes/linux/linux-2.6.35/nokia900/linux-2.6.33-vfs-tracepoints.patch116
-rw-r--r--recipes/linux/linux-2.6.35/nokia900/linux-2.6.35-aava-firmware-workaround-wifi.patch66
-rw-r--r--recipes/linux/linux-2.6.35/nokia900/linux-2.6.35-aava-firmware-workaround.patch39
-rw-r--r--recipes/linux/linux-2.6.35/nokia900/linux-2.6.35-ac-2010-08-24.patch301256
-rw-r--r--recipes/linux/linux-2.6.35/nokia900/linux-2.6.35-ac-pending.patch26
-rw-r--r--recipes/linux/linux-2.6.35/nokia900/linux-2.6.35-ac-revert-mmc-hacks.patch403
-rw-r--r--recipes/linux/linux-2.6.35/nokia900/linux-2.6.35-dont-skew-the-tick.patch33
-rw-r--r--recipes/linux/linux-2.6.35/nokia900/linux-2.6.35-make-gma600-work-on-IA.patch148
-rw-r--r--recipes/linux/linux-2.6.35/nokia900/linux-2.6.35-mrst-rtc.patch28
-rw-r--r--recipes/linux/linux-2.6.35/nokia900/linux-2.6.35-rc4-annotate-device-pm.patch224
-rw-r--r--recipes/linux/linux-2.6.35/nokia900/linux-2.6.35-slab-timer.patch38
-rw-r--r--recipes/linux/linux-2.6.35/nokia900/linux-2.6.35-stable-cherry-picks.patch4289
-rw-r--r--recipes/linux/linux-2.6.35/nokia900/linux-2.6.36-FM-TX-headphone-TV-out-and-basic-jack-detection-supp.patch810
-rw-r--r--recipes/linux/linux-2.6.35/nokia900/linux-2.6.36-Introduce-and-enable-tsc2005-driver.patch912
-rw-r--r--recipes/linux/linux-2.6.35/nokia900/linux-2.6.36-battery.patch87
-rw-r--r--recipes/linux/linux-2.6.35/nokia900/linux-2.6.36-battery2.patch33
-rw-r--r--recipes/linux/linux-2.6.35/nokia900/linux-2.6.36-fix-unprotected-acess-to-task-credentials.patch98
-rw-r--r--recipes/linux/linux-2.6.35/nokia900/linux-2.6.36-omap-rx51-Platform-support-for-lis3lv02d-acceleromet.patch140
-rw-r--r--recipes/linux/linux-2.6.35/nokia900/linux-2.6.36-omap-rx51-Platform-support-for-tsl2563-ALS.patch52
-rw-r--r--recipes/linux/linux-2.6.35/nokia900/linux-2.6.36-powertop-timer-tracing.patch64
-rw-r--r--recipes/linux/linux-2.6.35/nokia900/linux-2.6.36-tidspbridge.patch51086
-rw-r--r--recipes/linux/linux-2.6.35/nokia900/linux-2.6.36-wl1251-Use-MODULE_ALIAS-macro-at-correct-postion-for.patch43
-rw-r--r--recipes/linux/linux-2.6.35/nokia900/linux-2.6.36-wl1251-fix-trigger-scan-timeout-usage.patch37
-rw-r--r--recipes/linux/linux-2.6.35/nokia900/linux-2.6.37-EEM-support-for-g_nokia.patch86
-rw-r--r--recipes/linux/linux-2.6.35/nokia900/linux-2.6.37-omap-rx51-add-support-for-USB-chargers.patch34
-rw-r--r--recipes/linux/linux-2.6.35/nokia900/linux-2.6.37-omap3-rx51-Platform-support-for-lp5523-led-chip.patch127
-rw-r--r--recipes/linux/linux-2.6.35/nokia900/linux-2.6.37-power_supply-add-isp1704-charger-detection-driver.patch413
-rw-r--r--recipes/linux/linux-2.6.35/nokia900/linux-2.6.37-power_supply-add-types-for-USB-chargers.patch47
-rw-r--r--recipes/linux/linux_2.6.35.bb91
45 files changed, 456705 insertions, 0 deletions
diff --git a/recipes/linux/linux-2.6.35/nokia900/defconfig b/recipes/linux/linux-2.6.35/nokia900/defconfig
new file mode 100644
index 0000000000..2f1903d283
--- /dev/null
+++ b/recipes/linux/linux-2.6.35/nokia900/defconfig
@@ -0,0 +1,2237 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.35.3
+# Sat Oct 2 13:00:43 2010
+#
+CONFIG_ARM=y
+CONFIG_SYS_SUPPORTS_APM_EMULATION=y
+CONFIG_GENERIC_GPIO=y
+CONFIG_GENERIC_TIME=y
+# CONFIG_ARCH_USES_GETTIMEOFFSET is not set
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_HAVE_PROC_CPU=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_ARCH_HAS_CPUFREQ=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_NEED_DMA_MAP_STATE=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_ARM_L1_CACHE_SHIFT_6=y
+CONFIG_VECTORS_BASE=0xffff0000
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_LOCK_KERNEL=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_CROSS_COMPILE=""
+CONFIG_LOCALVERSION=""
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
+# CONFIG_KERNEL_GZIP is not set
+# CONFIG_KERNEL_BZIP2 is not set
+CONFIG_KERNEL_LZMA=y
+# CONFIG_KERNEL_LZO is not set
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
+CONFIG_BSD_PROCESS_ACCT=y
+# CONFIG_BSD_PROCESS_ACCT_V3 is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_TINY_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=17
+# CONFIG_CGROUPS is not set
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
+# CONFIG_RELAY is not set
+# CONFIG_NAMESPACES is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_LZO is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+CONFIG_EMBEDDED=y
+CONFIG_UID16=y
+# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+CONFIG_KALLSYMS_EXTRA_PASS=y
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
+CONFIG_PERF_USE_VMALLOC=y
+
+#
+# Kernel Performance Events And Counters
+#
+CONFIG_PERF_EVENTS=y
+# CONFIG_PERF_COUNTERS is not set
+# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_COMPAT_BRK=y
+CONFIG_SLAB=y
+# CONFIG_SLUB is not set
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_KPROBES is not set
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_CLK=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+# CONFIG_SLOW_WORK is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_BLOCK=y
+# CONFIG_LBDAF is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+# CONFIG_INLINE_SPIN_UNLOCK is not set
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+# CONFIG_INLINE_SPIN_UNLOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+# CONFIG_INLINE_READ_UNLOCK is not set
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+# CONFIG_INLINE_READ_UNLOCK_IRQ is not set
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+# CONFIG_INLINE_WRITE_UNLOCK is not set
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+# CONFIG_INLINE_WRITE_UNLOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+# CONFIG_MUTEX_SPIN_ON_OWNER is not set
+CONFIG_FREEZER=y
+
+#
+# System Type
+#
+CONFIG_MMU=y
+# CONFIG_ARCH_AAEC2000 is not set
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_REALVIEW is not set
+# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_VEXPRESS is not set
+# CONFIG_ARCH_AT91 is not set
+# CONFIG_ARCH_BCMRING is not set
+# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_CNS3XXX is not set
+# CONFIG_ARCH_GEMINI is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_EP93XX is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_MXC is not set
+# CONFIG_ARCH_STMP3XXX is not set
+# CONFIG_ARCH_NETX is not set
+# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_IOP13XX is not set
+# CONFIG_ARCH_IOP32X is not set
+# CONFIG_ARCH_IOP33X is not set
+# CONFIG_ARCH_IXP23XX is not set
+# CONFIG_ARCH_IXP2000 is not set
+# CONFIG_ARCH_IXP4XX is not set
+# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_DOVE is not set
+# CONFIG_ARCH_KIRKWOOD is not set
+# CONFIG_ARCH_LOKI is not set
+# CONFIG_ARCH_MV78XX0 is not set
+# CONFIG_ARCH_ORION5X is not set
+# CONFIG_ARCH_MMP is not set
+# CONFIG_ARCH_KS8695 is not set
+# CONFIG_ARCH_NS9XXX is not set
+# CONFIG_ARCH_W90X900 is not set
+# CONFIG_ARCH_NUC93X is not set
+# CONFIG_ARCH_PNX4008 is not set
+# CONFIG_ARCH_PXA is not set
+# CONFIG_ARCH_MSM is not set
+# CONFIG_ARCH_SHMOBILE is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+# CONFIG_ARCH_S3C2410 is not set
+# CONFIG_ARCH_S3C64XX is not set
+# CONFIG_ARCH_S5P6440 is not set
+# CONFIG_ARCH_S5P6442 is not set
+# CONFIG_ARCH_S5PC100 is not set
+# CONFIG_ARCH_S5PV210 is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_U300 is not set
+# CONFIG_ARCH_U8500 is not set
+# CONFIG_ARCH_NOMADIK is not set
+# CONFIG_ARCH_DAVINCI is not set
+CONFIG_ARCH_OMAP=y
+# CONFIG_PLAT_SPEAR is not set
+
+#
+# TI OMAP Implementations
+#
+CONFIG_ARCH_OMAP_OTG=y
+# CONFIG_ARCH_OMAP1 is not set
+CONFIG_ARCH_OMAP2PLUS=y
+# CONFIG_ARCH_OMAP2 is not set
+CONFIG_ARCH_OMAP3=y
+# CONFIG_ARCH_OMAP4 is not set
+
+#
+# OMAP Feature Selections
+#
+CONFIG_OMAP_RESET_CLOCKS=y
+CONFIG_OMAP_MUX=y
+# CONFIG_OMAP_MUX_DEBUG is not set
+CONFIG_OMAP_MUX_WARNINGS=y
+CONFIG_OMAP_MCBSP=y
+CONFIG_OMAP_MBOX_FWK=m
+# CONFIG_OMAP_MPU_TIMER is not set
+CONFIG_OMAP_32K_TIMER=y
+# CONFIG_OMAP3_L2_AUX_SECURE_SAVE_RESTORE is not set
+CONFIG_OMAP_32K_TIMER_HZ=128
+CONFIG_OMAP_DM_TIMER=y
+# CONFIG_OMAP_PM_NONE is not set
+CONFIG_OMAP_PM_NOOP=y
+CONFIG_ARCH_OMAP3430=y
+CONFIG_OMAP_PACKAGE_CBB=y
+
+#
+# OMAP Board Type
+#
+# CONFIG_MACH_OMAP3_BEAGLE is not set
+# CONFIG_MACH_DEVKIT8000 is not set
+# CONFIG_MACH_OMAP_LDP is not set
+# CONFIG_MACH_OVERO is not set
+# CONFIG_MACH_OMAP3EVM is not set
+# CONFIG_MACH_OMAP3517EVM is not set
+# CONFIG_MACH_OMAP3_PANDORA is not set
+# CONFIG_MACH_OMAP3_TOUCHBOOK is not set
+# CONFIG_MACH_OMAP_3430SDP is not set
+CONFIG_MACH_NOKIA_RX51=y
+# CONFIG_MACH_OMAP_ZOOM2 is not set
+# CONFIG_MACH_OMAP_ZOOM3 is not set
+# CONFIG_MACH_CM_T35 is not set
+# CONFIG_MACH_IGEP0020 is not set
+# CONFIG_MACH_SBC3530 is not set
+# CONFIG_MACH_OMAP_3630SDP is not set
+# CONFIG_OMAP3_EMU is not set
+# CONFIG_OMAP3_SDRC_AC_TIMING is not set
+
+#
+# Processor Type
+#
+CONFIG_CPU_32v6K=y
+CONFIG_CPU_V7=y
+CONFIG_CPU_32v7=y
+CONFIG_CPU_ABRT_EV7=y
+CONFIG_CPU_PABRT_V7=y
+CONFIG_CPU_CACHE_V7=y
+CONFIG_CPU_CACHE_VIPT=y
+CONFIG_CPU_COPY_V6=y
+CONFIG_CPU_TLB_V7=y
+CONFIG_CPU_HAS_ASID=y
+CONFIG_CPU_CP15=y
+CONFIG_CPU_CP15_MMU=y
+
+#
+# Processor Features
+#
+CONFIG_ARM_THUMB=y
+# CONFIG_ARM_THUMBEE is not set
+# CONFIG_CPU_ICACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_DISABLE is not set
+# CONFIG_CPU_BPREDICT_DISABLE is not set
+CONFIG_HAS_TLS_REG=y
+CONFIG_ARM_L1_CACHE_SHIFT=6
+CONFIG_ARM_DMA_MEM_BUFFERABLE=y
+# CONFIG_ARM_ERRATA_430973 is not set
+# CONFIG_ARM_ERRATA_458693 is not set
+# CONFIG_ARM_ERRATA_460075 is not set
+CONFIG_COMMON_CLKDEV=y
+
+#
+# Bus support
+#
+# CONFIG_PCI_SYSCALL is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Kernel Features
+#
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_VMSPLIT_3G=y
+# CONFIG_VMSPLIT_2G is not set
+# CONFIG_VMSPLIT_1G is not set
+CONFIG_PAGE_OFFSET=0xC0000000
+# CONFIG_PREEMPT_NONE is not set
+# CONFIG_PREEMPT_VOLUNTARY is not set
+CONFIG_PREEMPT=y
+CONFIG_HZ=128
+# CONFIG_THUMB2_KERNEL is not set
+CONFIG_AEABI=y
+# CONFIG_OABI_COMPAT is not set
+CONFIG_ARCH_HAS_HOLES_MEMORYMODEL=y
+# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
+# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
+# CONFIG_HIGHMEM is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_VIRT_TO_BUS=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+# CONFIG_LEDS is not set
+CONFIG_ALIGNMENT_TRAP=y
+# CONFIG_UACCESS_WITH_MEMCPY is not set
+
+#
+# Boot options
+#
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CMDLINE="root=/dev/mmcblk0p1 rootwait console=tty0 console=ttyS2,115200n8 omapfb.vram=0:2M,1:2M,2:2M mtdoops.mtddev=2 nosplash"
+# CONFIG_CMDLINE_FORCE is not set
+# CONFIG_XIP_KERNEL is not set
+CONFIG_KEXEC=y
+CONFIG_ATAGS_PROC=y
+
+#
+# CPU Power Management
+#
+# CONFIG_CPU_FREQ is not set
+# CONFIG_CPU_IDLE is not set
+
+#
+# Floating point emulation
+#
+
+#
+# At least one emulation must be selected
+#
+CONFIG_VFP=y
+CONFIG_VFPv3=y
+CONFIG_NEON=y
+
+#
+# Userspace binary formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_HAVE_AOUT=y
+# CONFIG_BINFMT_AOUT is not set
+CONFIG_BINFMT_MISC=y
+
+#
+# Power management options
+#
+CONFIG_PM=y
+CONFIG_PM_DEBUG=y
+CONFIG_PM_ADVANCED_DEBUG=y
+# CONFIG_PM_VERBOSE is not set
+CONFIG_CAN_PM_TRACE=y
+CONFIG_PM_SLEEP=y
+# CONFIG_PM_SLEEP_ADVANCED_DEBUG is not set
+CONFIG_SUSPEND_NVS=y
+CONFIG_SUSPEND=y
+# CONFIG_PM_TEST_SUSPEND is not set
+CONFIG_SUSPEND_FREEZER=y
+# CONFIG_APM_EMULATION is not set
+# CONFIG_PM_RUNTIME is not set
+CONFIG_PM_OPS=y
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+CONFIG_XFRM_USER=y
+# CONFIG_XFRM_SUB_POLICY is not set
+CONFIG_XFRM_MIGRATE=y
+# CONFIG_XFRM_STATISTICS is not set
+CONFIG_XFRM_IPCOMP=m
+CONFIG_NET_KEY=m
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+# CONFIG_IP_PNP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_IP_MROUTE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_XFRM_TUNNEL=m
+CONFIG_INET_TUNNEL=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_INET_XFRM_MODE_BEET=m
+CONFIG_INET_LRO=y
+CONFIG_INET_DIAG=m
+CONFIG_INET_TCP_DIAG=m
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+CONFIG_TCP_MD5SIG=y
+# CONFIG_IPV6 is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_L2TP is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+CONFIG_PHONET=y
+# CONFIG_IEEE802154 is not set
+# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+CONFIG_BT=m
+CONFIG_BT_L2CAP=m
+CONFIG_BT_L2CAP_EXT_FEATURES=y
+CONFIG_BT_SCO=m
+CONFIG_BT_RFCOMM=m
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=m
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_HIDP=m
+
+#
+# Bluetooth device drivers
+#
+# CONFIG_BT_HCIBTUSB is not set
+# CONFIG_BT_HCIBTSDIO is not set
+# CONFIG_BT_HCIUART is not set
+# CONFIG_BT_HCIBCM203X is not set
+# CONFIG_BT_HCIBPA10X is not set
+# CONFIG_BT_HCIBFUSB is not set
+# CONFIG_BT_HCIVHCI is not set
+# CONFIG_BT_MRVL is not set
+CONFIG_BT_HCIH4P=m
+# CONFIG_AF_RXRPC is not set
+CONFIG_WIRELESS=y
+CONFIG_WEXT_CORE=y
+CONFIG_WEXT_PROC=y
+CONFIG_CFG80211=m
+# CONFIG_NL80211_TESTMODE is not set
+# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set
+# CONFIG_CFG80211_REG_DEBUG is not set
+CONFIG_CFG80211_DEFAULT_PS=y
+# CONFIG_CFG80211_DEBUGFS is not set
+# CONFIG_CFG80211_INTERNAL_REGDB is not set
+CONFIG_CFG80211_WEXT=y
+CONFIG_WIRELESS_EXT_SYSFS=y
+# CONFIG_LIB80211 is not set
+CONFIG_MAC80211=m
+CONFIG_MAC80211_HAS_RC=y
+CONFIG_MAC80211_RC_PID=y
+# CONFIG_MAC80211_RC_MINSTREL is not set
+CONFIG_MAC80211_RC_DEFAULT_PID=y
+# CONFIG_MAC80211_RC_DEFAULT_MINSTREL is not set
+CONFIG_MAC80211_RC_DEFAULT="pid"
+# CONFIG_MAC80211_MESH is not set
+# CONFIG_MAC80211_LEDS is not set
+# CONFIG_MAC80211_DEBUGFS is not set
+# CONFIG_MAC80211_DEBUG_MENU is not set
+# CONFIG_WIMAX is not set
+CONFIG_RFKILL=y
+CONFIG_RFKILL_LEDS=y
+CONFIG_RFKILL_INPUT=y
+# CONFIG_NET_9P is not set
+# CONFIG_CAIF is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
+CONFIG_MTD_CONCAT=y
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_AFS_PARTS is not set
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+CONFIG_SM_FTL=y
+CONFIG_MTD_OOPS=y
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+CONFIG_MTD_CFI_INTELEXT=y
+# CONFIG_MTD_CFI_AMDSTD is not set
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+# CONFIG_MTD_PHYSMAP is not set
+# CONFIG_MTD_ARM_INTEGRATOR is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_DATAFLASH is not set
+# CONFIG_MTD_M25P80 is not set
+# CONFIG_MTD_SST25L is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+CONFIG_MTD_NAND_ECC=y
+# CONFIG_MTD_NAND_ECC_SMC is not set
+# CONFIG_MTD_NAND is not set
+CONFIG_MTD_ONENAND=y
+# CONFIG_MTD_ONENAND_VERIFY_WRITE is not set
+# CONFIG_MTD_ONENAND_GENERIC is not set
+CONFIG_MTD_ONENAND_OMAP2=y
+# CONFIG_MTD_ONENAND_OTP is not set
+# CONFIG_MTD_ONENAND_2X_PROGRAM is not set
+# CONFIG_MTD_ONENAND_SIM is not set
+
+#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
+# UBI - Unsorted block images
+#
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_WL_THRESHOLD=4096
+CONFIG_MTD_UBI_BEB_RESERVE=1
+# CONFIG_MTD_UBI_GLUEBI is not set
+
+#
+# UBI debugging options
+#
+# CONFIG_MTD_UBI_DEBUG is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+
+#
+# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
+#
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_UB is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=4096
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_MG_DISK is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_AD525X_DPOT is not set
+# CONFIG_ICS932S401 is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_APDS9802ALS is not set
+# CONFIG_ISL29003 is not set
+# CONFIG_ISL29020 is not set
+# CONFIG_ISL29015 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_HMC6352 is not set
+# CONFIG_DS1682 is not set
+# CONFIG_TI_DAC7512 is not set
+# CONFIG_INTEL_MID_PTI is not set
+# CONFIG_BH1770GLC is not set
+# CONFIG_C2PORT is not set
+CONFIG_CMT=m
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_AT24 is not set
+# CONFIG_EEPROM_AT25 is not set
+# CONFIG_EEPROM_LEGACY is not set
+# CONFIG_EEPROM_MAX6875 is not set
+# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_IWMC3200TOP is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+CONFIG_SCSI_MOD=m
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=m
+CONFIG_SCSI_DMA=y
+# CONFIG_SCSI_TGT is not set
+# CONFIG_SCSI_NETLINK is not set
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=m
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+# CONFIG_CHR_DEV_SG is not set
+# CONFIG_CHR_DEV_SCH is not set
+CONFIG_SCSI_MULTI_LUN=y
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_WAIT_SCAN=m
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+# CONFIG_SCSI_LOWLEVEL is not set
+# CONFIG_SCSI_DH is not set
+# CONFIG_SCSI_OSD_INITIATOR is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+CONFIG_MACVLAN=m
+# CONFIG_MACVTAP is not set
+# CONFIG_EQUALIZER is not set
+CONFIG_TUN=m
+# CONFIG_VETH is not set
+# CONFIG_PHYLIB is not set
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=m
+# CONFIG_AX88796 is not set
+CONFIG_SMC91X=m
+# CONFIG_TI_DAVINCI_EMAC is not set
+# CONFIG_DM9000 is not set
+# CONFIG_ENC28J60 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_SMC911X is not set
+# CONFIG_SMSC911X is not set
+# CONFIG_DNET is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
+# CONFIG_B44 is not set
+# CONFIG_KS8842 is not set
+# CONFIG_KS8851 is not set
+# CONFIG_KS8851_MLL is not set
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+CONFIG_WLAN=y
+# CONFIG_LIBERTAS_THINFIRM is not set
+# CONFIG_AT76C50X_USB is not set
+# CONFIG_USB_ZD1201 is not set
+# CONFIG_USB_NET_RNDIS_WLAN is not set
+# CONFIG_RTL8187 is not set
+# CONFIG_MAC80211_HWSIM is not set
+# CONFIG_ATH_COMMON is not set
+# CONFIG_B43 is not set
+# CONFIG_B43LEGACY is not set
+# CONFIG_HOSTAP is not set
+# CONFIG_IWM is not set
+# CONFIG_LIBERTAS is not set
+# CONFIG_P54_COMMON is not set
+# CONFIG_RT2X00 is not set
+CONFIG_WL12XX=m
+CONFIG_WL1251=m
+CONFIG_WL1251_SPI=m
+# CONFIG_WL1251_SDIO is not set
+# CONFIG_WL1271 is not set
+# CONFIG_ZD1211RW is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+
+#
+# USB Network Adapters
+#
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_USBNET is not set
+CONFIG_USB_HSO=m
+# CONFIG_USB_CDC_PHONET is not set
+# CONFIG_USB_IPHETH is not set
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+CONFIG_INPUT_FF_MEMLESS=m
+CONFIG_INPUT_POLLDEV=m
+# CONFIG_INPUT_SPARSEKMAP is not set
+
+#
+# Userland interfaces
+#
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_JOYDEV is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ADP5588 is not set
+# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_KEYBOARD_QT2160 is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+CONFIG_KEYBOARD_GPIO=m
+# CONFIG_KEYBOARD_TCA6416 is not set
+# CONFIG_KEYBOARD_MATRIX is not set
+# CONFIG_KEYBOARD_LM8323 is not set
+# CONFIG_KEYBOARD_MAX7359 is not set
+# CONFIG_KEYBOARD_TC35894XBG is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
+# CONFIG_KEYBOARD_STOWAWAY is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+CONFIG_KEYBOARD_TWL4030=y
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+# CONFIG_TOUCHSCREEN_ADS7846 is not set
+# CONFIG_TOUCHSCREEN_AD7877 is not set
+# CONFIG_TOUCHSCREEN_AD7879_I2C is not set
+# CONFIG_TOUCHSCREEN_AD7879_SPI is not set
+# CONFIG_TOUCHSCREEN_AD7879 is not set
+# CONFIG_TOUCHSCREEN_DYNAPRO is not set
+# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set
+# CONFIG_TOUCHSCREEN_EETI is not set
+# CONFIG_TOUCHSCREEN_FUJITSU is not set
+# CONFIG_TOUCHSCREEN_GUNZE is not set
+# CONFIG_TOUCHSCREEN_ELO is not set
+# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
+# CONFIG_TOUCHSCREEN_MCS5000 is not set
+# CONFIG_TOUCHSCREEN_MTOUCH is not set
+# CONFIG_TOUCHSCREEN_INEXIO is not set
+# CONFIG_TOUCHSCREEN_MK712 is not set
+# CONFIG_TOUCHSCREEN_PENMOUNT is not set
+# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
+# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
+# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
+# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
+CONFIG_TOUCHSCREEN_TSC2005=y
+# CONFIG_TOUCHSCREEN_TSC2007 is not set
+# CONFIG_TOUCHSCREEN_W90X900 is not set
+# CONFIG_TOUCHSCREEN_TPS6507X is not set
+# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set
+# CONFIG_TOUCHSCREEN_CLEARPAD_TM1217 is not set
+CONFIG_INPUT_MISC=y
+# CONFIG_INPUT_AD714X is not set
+# CONFIG_INPUT_ATI_REMOTE is not set
+# CONFIG_INPUT_ATI_REMOTE2 is not set
+# CONFIG_INPUT_KEYSPAN_REMOTE is not set
+# CONFIG_INPUT_POWERMATE is not set
+# CONFIG_INPUT_YEALINK is not set
+# CONFIG_INPUT_CM109 is not set
+CONFIG_INPUT_TWL4030_PWRBUTTON=m
+CONFIG_INPUT_TWL4030_VIBRA=m
+CONFIG_INPUT_UINPUT=y
+# CONFIG_INPUT_PCF8574 is not set
+# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_VT_HW_CONSOLE_BINDING is not set
+CONFIG_DEVKMEM=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+# CONFIG_N_GSM is not set
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=4
+CONFIG_SERIAL_8250_RUNTIME_UARTS=4
+# CONFIG_SERIAL_8250_EXTENDED is not set
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_MAX3100 is not set
+# CONFIG_SERIAL_MAX3107 is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
+# CONFIG_SERIAL_ALTERA_JTAGUART is not set
+# CONFIG_SERIAL_ALTERA_UART is not set
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_IPMI_HANDLER is not set
+CONFIG_HW_RANDOM=m
+# CONFIG_HW_RANDOM_TIMERIOMEM is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+# CONFIG_RAMOOPS is not set
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_HELPER_AUTO=y
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+# CONFIG_I2C_DESIGNWARE is not set
+# CONFIG_I2C_GPIO is not set
+# CONFIG_I2C_MRST is not set
+# CONFIG_I2C_OCORES is not set
+CONFIG_I2C_OMAP=y
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_XILINX is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+# CONFIG_I2C_TINY_USB is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_STUB is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+CONFIG_SPI=y
+# CONFIG_SPI_DEBUG is not set
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+# CONFIG_SPI_BITBANG is not set
+# CONFIG_SPI_GPIO is not set
+CONFIG_SPI_OMAP24XX=y
+# CONFIG_SPI_XILINX is not set
+# CONFIG_SPI_DESIGNWARE is not set
+
+#
+# SPI Protocol Masters
+#
+# CONFIG_SPI_SPIDEV is not set
+# CONFIG_SPI_TLE62X0 is not set
+CONFIG_HSI=y
+
+#
+# HSI controllers
+#
+CONFIG_OMAP_SSI=m
+CONFIG_OMAP_SSI_CONFIG=y
+
+#
+# HSI clients
+#
+CONFIG_SSI_PROTOCOL=m
+CONFIG_HSI_CHAR=m
+CONFIG_HSI_CMT_SPEECH=m
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
+# CONFIG_DEBUG_GPIO is not set
+CONFIG_GPIO_SYSFS=y
+
+#
+# Memory mapped GPIO expanders:
+#
+# CONFIG_GPIO_IT8761E is not set
+
+#
+# I2C GPIO expanders:
+#
+# CONFIG_GPIO_MAX7300 is not set
+# CONFIG_GPIO_MAX732X is not set
+# CONFIG_GPIO_PCA953X is not set
+# CONFIG_GPIO_PCF857X is not set
+CONFIG_GPIO_TWL4030=y
+# CONFIG_GPIO_ADP5588 is not set
+
+#
+# PCI GPIO expanders:
+#
+
+#
+# SPI GPIO expanders:
+#
+# CONFIG_GPIO_MAX7301 is not set
+# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_GPIO_MC33880 is not set
+
+#
+# AC97 GPIO expanders:
+#
+
+#
+# MODULbus GPIO expanders:
+#
+# CONFIG_W1 is not set
+CONFIG_POWER_SUPPLY=m
+# CONFIG_POWER_SUPPLY_DEBUG is not set
+# CONFIG_PDA_POWER is not set
+# CONFIG_TEST_POWER is not set
+# CONFIG_BATTERY_DS2760 is not set
+# CONFIG_BATTERY_DS2782 is not set
+CONFIG_BATTERY_BQ27x00=y
+# CONFIG_BATTERY_MAX17040 is not set
+CONFIG_CHARGER_ISP1704=m
+CONFIG_HWMON=m
+# CONFIG_HWMON_VID is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Native drivers
+#
+# CONFIG_SENSORS_AD7414 is not set
+# CONFIG_SENSORS_AD7418 is not set
+# CONFIG_SENSORS_ADCXX is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1029 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ADT7411 is not set
+# CONFIG_SENSORS_ADT7462 is not set
+# CONFIG_SENSORS_ADT7470 is not set
+# CONFIG_SENSORS_ADT7475 is not set
+# CONFIG_SENSORS_ASC7621 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_F71805F is not set
+# CONFIG_SENSORS_F71882FG is not set
+# CONFIG_SENSORS_F75375S is not set
+# CONFIG_SENSORS_G760A is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM70 is not set
+# CONFIG_SENSORS_LM73 is not set
+# CONFIG_SENSORS_LM75 is not set
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_LM93 is not set
+# CONFIG_SENSORS_LTC4215 is not set
+# CONFIG_SENSORS_LTC4245 is not set
+# CONFIG_SENSORS_LM95241 is not set
+# CONFIG_SENSORS_MAX1111 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_MAX6650 is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_PC87427 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_SHT15 is not set
+# CONFIG_SENSORS_DME1737 is not set
+# CONFIG_SENSORS_EMC1403 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47M192 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_ADS7828 is not set
+# CONFIG_SENSORS_ADS7871 is not set
+# CONFIG_SENSORS_AMC6821 is not set
+# CONFIG_SENSORS_THMC50 is not set
+# CONFIG_SENSORS_TMP102 is not set
+# CONFIG_SENSORS_TMP401 is not set
+# CONFIG_SENSORS_TMP421 is not set
+# CONFIG_SENSORS_VT1211 is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83791D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83793 is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83L786NG is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+# CONFIG_SENSORS_LIS3_SPI is not set
+CONFIG_SENSORS_LIS3_I2C=m
+# CONFIG_THERMAL is not set
+CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+CONFIG_OMAP_WATCHDOG=y
+CONFIG_TWL4030_WATCHDOG=y
+# CONFIG_MAX63XX_WATCHDOG is not set
+
+#
+# USB-based Watchdog Cards
+#
+# CONFIG_USBPCWATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+CONFIG_MFD_SUPPORT=y
+CONFIG_MFD_CORE=y
+# CONFIG_MFD_88PM860X is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_MFD_ASIC3 is not set
+# CONFIG_HTC_EGPIO is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_HTC_I2CPLD is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_TPS6507X is not set
+CONFIG_TWL4030_CORE=y
+# CONFIG_TWL4030_POWER is not set
+CONFIG_TWL4030_CODEC=y
+# CONFIG_MFD_TC35892 is not set
+CONFIG_TWL4030_MADC=y
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_T7L66XB is not set
+# CONFIG_MFD_TC6387XB is not set
+# CONFIG_MFD_TC6393XB is not set
+# CONFIG_PMIC_DA903X is not set
+# CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_MAX8925 is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_WM8994 is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_MC13783 is not set
+# CONFIG_ABX500_CORE is not set
+# CONFIG_EZX_PCAP is not set
+# CONFIG_AB8500_CORE is not set
+CONFIG_REGULATOR=y
+# CONFIG_REGULATOR_DEBUG is not set
+# CONFIG_REGULATOR_DUMMY is not set
+# CONFIG_REGULATOR_FIXED_VOLTAGE is not set
+# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set
+# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set
+# CONFIG_REGULATOR_BQ24022 is not set
+# CONFIG_REGULATOR_MAX1586 is not set
+# CONFIG_REGULATOR_MAX8649 is not set
+# CONFIG_REGULATOR_MAX8660 is not set
+CONFIG_REGULATOR_TWL4030=y
+# CONFIG_REGULATOR_LP3971 is not set
+# CONFIG_REGULATOR_TPS65023 is not set
+# CONFIG_REGULATOR_TPS6507X is not set
+CONFIG_MEDIA_SUPPORT=m
+
+#
+# Multimedia core support
+#
+CONFIG_VIDEO_DEV=m
+CONFIG_VIDEO_V4L2_COMMON=m
+# CONFIG_VIDEO_ALLOW_V4L1 is not set
+# CONFIG_VIDEO_V4L1_COMPAT is not set
+# CONFIG_DVB_CORE is not set
+CONFIG_VIDEO_MEDIA=m
+
+#
+# Multimedia drivers
+#
+CONFIG_IR_CORE=m
+CONFIG_VIDEO_IR=m
+# CONFIG_RC_MAP is not set
+# CONFIG_IR_NEC_DECODER is not set
+# CONFIG_IR_RC5_DECODER is not set
+# CONFIG_IR_RC6_DECODER is not set
+# CONFIG_IR_JVC_DECODER is not set
+# CONFIG_IR_SONY_DECODER is not set
+# CONFIG_IR_IMON is not set
+# CONFIG_MEDIA_ATTACH is not set
+CONFIG_MEDIA_TUNER=m
+# CONFIG_MEDIA_TUNER_CUSTOMISE is not set
+CONFIG_MEDIA_TUNER_SIMPLE=m
+CONFIG_MEDIA_TUNER_TDA8290=m
+CONFIG_MEDIA_TUNER_TDA9887=m
+CONFIG_MEDIA_TUNER_TEA5761=m
+CONFIG_MEDIA_TUNER_TEA5767=m
+CONFIG_MEDIA_TUNER_MT20XX=m
+CONFIG_MEDIA_TUNER_XC2028=m
+CONFIG_MEDIA_TUNER_XC5000=m
+CONFIG_MEDIA_TUNER_MC44S803=m
+CONFIG_VIDEO_V4L2=m
+CONFIG_VIDEOBUF_GEN=m
+CONFIG_VIDEOBUF_DMA_CONTIG=m
+CONFIG_VIDEO_CAPTURE_DRIVERS=y
+# CONFIG_VIDEO_ADV_DEBUG is not set
+# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set
+# CONFIG_VIDEO_HELPER_CHIPS_AUTO is not set
+# CONFIG_VIDEO_IR_I2C is not set
+
+#
+# Encoders/decoders and other helper chips
+#
+
+#
+# Audio decoders
+#
+# CONFIG_VIDEO_TVAUDIO is not set
+# CONFIG_VIDEO_TDA7432 is not set
+# CONFIG_VIDEO_TDA9840 is not set
+# CONFIG_VIDEO_TDA9875 is not set
+# CONFIG_VIDEO_TEA6415C is not set
+# CONFIG_VIDEO_TEA6420 is not set
+# CONFIG_VIDEO_MSP3400 is not set
+# CONFIG_VIDEO_CS5345 is not set
+# CONFIG_VIDEO_CS53L32A is not set
+# CONFIG_VIDEO_M52790 is not set
+# CONFIG_VIDEO_TLV320AIC23B is not set
+# CONFIG_VIDEO_WM8775 is not set
+# CONFIG_VIDEO_WM8739 is not set
+# CONFIG_VIDEO_VP27SMPX is not set
+
+#
+# RDS decoders
+#
+# CONFIG_VIDEO_SAA6588 is not set
+
+#
+# Video decoders
+#
+# CONFIG_VIDEO_ADV7180 is not set
+# CONFIG_VIDEO_BT819 is not set
+# CONFIG_VIDEO_BT856 is not set
+# CONFIG_VIDEO_BT866 is not set
+# CONFIG_VIDEO_KS0127 is not set
+# CONFIG_VIDEO_OV7670 is not set
+# CONFIG_VIDEO_MT9V011 is not set
+# CONFIG_VIDEO_TCM825X is not set
+# CONFIG_VIDEO_SAA7110 is not set
+# CONFIG_VIDEO_SAA711X is not set
+# CONFIG_VIDEO_SAA717X is not set
+# CONFIG_VIDEO_SAA7191 is not set
+# CONFIG_VIDEO_TVP514X is not set
+# CONFIG_VIDEO_TVP5150 is not set
+# CONFIG_VIDEO_TVP7002 is not set
+# CONFIG_VIDEO_VPX3220 is not set
+
+#
+# Video and audio decoders
+#
+# CONFIG_VIDEO_CX25840 is not set
+
+#
+# MPEG video encoders
+#
+# CONFIG_VIDEO_CX2341X is not set
+
+#
+# Video encoders
+#
+# CONFIG_VIDEO_SAA7127 is not set
+# CONFIG_VIDEO_SAA7185 is not set
+# CONFIG_VIDEO_ADV7170 is not set
+# CONFIG_VIDEO_ADV7175 is not set
+# CONFIG_VIDEO_THS7303 is not set
+# CONFIG_VIDEO_ADV7343 is not set
+# CONFIG_VIDEO_AK881X is not set
+
+#
+# Video improvement chips
+#
+# CONFIG_VIDEO_UPD64031A is not set
+# CONFIG_VIDEO_UPD64083 is not set
+CONFIG_VIDEO_OMAP2_VOUT=m
+# CONFIG_VIDEO_SAA5246A is not set
+# CONFIG_VIDEO_SAA5249 is not set
+# CONFIG_SOC_CAMERA is not set
+# CONFIG_V4L_USB_DRIVERS is not set
+# CONFIG_V4L_MEM2MEM_DRIVERS is not set
+# CONFIG_RADIO_ADAPTERS is not set
+# CONFIG_DAB is not set
+
+#
+# Graphics support
+#
+CONFIG_PVR=m
+CONFIG_PVR_RELEASE_N900=y
+# CONFIG_PVR_DEBUG is not set
+# CONFIG_PVR_TIMING is not set
+# CONFIG_PVR_DEBUG_PDUMP is not set
+# CONFIG_PVR_EDM_DEBUG is not set
+# CONFIG_PVR_NO_HARDWARE is not set
+# CONFIG_PVR_FORCE_CLOCKS_ON is not set
+# CONFIG_PVR_EXAMPLES is not set
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+CONFIG_FB=y
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+# CONFIG_FB_MODE_HELPERS is not set
+# CONFIG_FB_TILEBLITTING is not set
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_S1D13XXX is not set
+# CONFIG_FB_TMIO is not set
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
+CONFIG_FB_OMAP_BOOTLOADER_INIT=y
+CONFIG_OMAP2_VRAM=y
+CONFIG_OMAP2_VRFB=y
+CONFIG_OMAP2_DSS=y
+CONFIG_OMAP2_VRAM_SIZE=2
+# CONFIG_OMAP2_DSS_DEBUG_SUPPORT is not set
+# CONFIG_OMAP2_DSS_DPI is not set
+# CONFIG_OMAP2_DSS_RFBI is not set
+# CONFIG_OMAP2_DSS_VENC is not set
+CONFIG_OMAP2_DSS_SDI=y
+# CONFIG_OMAP2_DSS_DSI is not set
+# CONFIG_OMAP2_DSS_FAKE_VSYNC is not set
+CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK=0
+CONFIG_FB_OMAP2=y
+# CONFIG_FB_OMAP2_DEBUG_SUPPORT is not set
+CONFIG_FB_OMAP2_NUM_FBS=3
+
+#
+# OMAP2/3 Display Device Drivers
+#
+# CONFIG_PANEL_GENERIC is not set
+# CONFIG_PANEL_SHARP_LS037V7DW01 is not set
+# CONFIG_PANEL_SHARP_LQ043T1DG01 is not set
+# CONFIG_PANEL_TOPPOLY_TDO35S is not set
+# CONFIG_PANEL_TPO_TD043MTEA1 is not set
+CONFIG_PANEL_ACX565AKM=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+# CONFIG_LCD_CLASS_DEVICE is not set
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_GENERIC=y
+# CONFIG_BACKLIGHT_ADP8860 is not set
+
+#
+# Display device support
+#
+CONFIG_DISPLAY_SUPPORT=y
+
+#
+# Display hardware drivers
+#
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
+# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
+# CONFIG_FONTS is not set
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+# CONFIG_LOGO is not set
+CONFIG_SOUND=y
+# CONFIG_SOUND_OSS_CORE is not set
+CONFIG_SND=y
+CONFIG_SND_TIMER=y
+CONFIG_SND_PCM=y
+CONFIG_SND_JACK=y
+# CONFIG_SND_SEQUENCER is not set
+# CONFIG_SND_MIXER_OSS is not set
+# CONFIG_SND_PCM_OSS is not set
+# CONFIG_SND_HRTIMER is not set
+# CONFIG_SND_DYNAMIC_MINORS is not set
+CONFIG_SND_SUPPORT_OLD_API=y
+CONFIG_SND_VERBOSE_PROCFS=y
+# CONFIG_SND_VERBOSE_PRINTK is not set
+# CONFIG_SND_DEBUG is not set
+# CONFIG_SND_RAWMIDI_SEQ is not set
+# CONFIG_SND_OPL3_LIB_SEQ is not set
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+# CONFIG_SND_SBAWE_SEQ is not set
+# CONFIG_SND_EMU10K1_SEQ is not set
+# CONFIG_SND_DRIVERS is not set
+# CONFIG_SND_ARM is not set
+# CONFIG_SND_SPI is not set
+# CONFIG_SND_USB is not set
+CONFIG_SND_SOC=y
+CONFIG_SND_OMAP_SOC=y
+CONFIG_SND_OMAP_SOC_MCBSP=y
+CONFIG_SND_OMAP_SOC_RX51=y
+CONFIG_SND_SOC_I2C_AND_SPI=y
+# CONFIG_SND_SOC_ALL_CODECS is not set
+CONFIG_SND_SOC_TLV320AIC3X=y
+CONFIG_SND_SOC_TPA6130A2=y
+# CONFIG_SOUND_PRIME is not set
+# CONFIG_HID_SUPPORT is not set
+CONFIG_HID=m
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+CONFIG_USB_ARCH_HAS_EHCI=y
+CONFIG_USB=y
+# CONFIG_USB_DEBUG is not set
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEVICEFS=y
+CONFIG_USB_DEVICE_CLASS=y
+# CONFIG_USB_DYNAMIC_MINORS is not set
+CONFIG_USB_OTG=y
+CONFIG_USB_OTG_WHITELIST=y
+CONFIG_USB_OTG_BLACKLIST_HUB=y
+CONFIG_USB_MON=m
+# CONFIG_USB_WUSB is not set
+# CONFIG_USB_WUSB_CBAF is not set
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_C67X00_HCD is not set
+# CONFIG_USB_EHCI_HCD is not set
+# CONFIG_USB_OXU210HP_HCD is not set
+# CONFIG_USB_ISP116X_HCD is not set
+# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
+# CONFIG_USB_OHCI_HCD is not set
+# CONFIG_USB_SL811_HCD is not set
+# CONFIG_USB_R8A66597_HCD is not set
+# CONFIG_USB_HWA_HCD is not set
+CONFIG_USB_MUSB_HDRC=y
+CONFIG_USB_MUSB_SOC=y
+
+#
+# OMAP 343x high speed USB support
+#
+# CONFIG_USB_MUSB_HOST is not set
+# CONFIG_USB_MUSB_PERIPHERAL is not set
+CONFIG_USB_MUSB_OTG=y
+CONFIG_USB_GADGET_MUSB_HDRC=y
+CONFIG_USB_MUSB_HDRC_HCD=y
+# CONFIG_MUSB_PIO_ONLY is not set
+CONFIG_USB_INVENTRA_DMA=y
+# CONFIG_USB_TI_CPPI_DMA is not set
+# CONFIG_USB_MUSB_DEBUG is not set
+
+#
+# USB Device Class drivers
+#
+# CONFIG_USB_ACM is not set
+# CONFIG_USB_PRINTER is not set
+# CONFIG_USB_WDM is not set
+# CONFIG_USB_TMC is not set
+
+#
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
+#
+
+#
+# also be needed; see USB_STORAGE Help for more info
+#
+CONFIG_USB_STORAGE=m
+# CONFIG_USB_STORAGE_DEBUG is not set
+# CONFIG_USB_STORAGE_DATAFAB is not set
+# CONFIG_USB_STORAGE_FREECOM is not set
+# CONFIG_USB_STORAGE_ISD200 is not set
+# CONFIG_USB_STORAGE_USBAT is not set
+# CONFIG_USB_STORAGE_SDDR09 is not set
+# CONFIG_USB_STORAGE_SDDR55 is not set
+# CONFIG_USB_STORAGE_JUMPSHOT is not set
+# CONFIG_USB_STORAGE_ALAUDA is not set
+# CONFIG_USB_STORAGE_ONETOUCH is not set
+# CONFIG_USB_STORAGE_KARMA is not set
+# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
+CONFIG_USB_LIBUSUAL=y
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+
+#
+# USB port drivers
+#
+# CONFIG_USB_SERIAL is not set
+
+#
+# USB Miscellaneous drivers
+#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_ADUTUX is not set
+# CONFIG_USB_SEVSEG is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_FTDI_ELAN is not set
+# CONFIG_USB_APPLEDISPLAY is not set
+# CONFIG_USB_SISUSBVGA is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
+# CONFIG_USB_IOWARRIOR is not set
+# CONFIG_USB_TEST is not set
+# CONFIG_USB_ISIGHTFW is not set
+CONFIG_USB_GADGET=m
+# CONFIG_USB_GADGET_DEBUG is not set
+# CONFIG_USB_GADGET_DEBUG_FILES is not set
+# CONFIG_USB_GADGET_DEBUG_FS is not set
+CONFIG_USB_GADGET_VBUS_DRAW=2
+CONFIG_USB_GADGET_SELECTED=y
+# CONFIG_USB_GADGET_AT91 is not set
+# CONFIG_USB_GADGET_ATMEL_USBA is not set
+# CONFIG_USB_GADGET_FSL_USB2 is not set
+# CONFIG_USB_GADGET_LH7A40X is not set
+# CONFIG_USB_GADGET_OMAP is not set
+# CONFIG_USB_GADGET_PXA25X is not set
+# CONFIG_USB_GADGET_R8A66597 is not set
+# CONFIG_USB_GADGET_PXA27X is not set
+# CONFIG_USB_GADGET_S3C_HSOTG is not set
+# CONFIG_USB_GADGET_IMX is not set
+# CONFIG_USB_GADGET_S3C2410 is not set
+# CONFIG_USB_GADGET_M66592 is not set
+# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_FSL_QE is not set
+# CONFIG_USB_GADGET_CI13XXX is not set
+# CONFIG_USB_GADGET_NET2280 is not set
+# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_LANGWELL is not set
+# CONFIG_USB_GADGET_DUMMY_HCD is not set
+CONFIG_USB_GADGET_DUALSPEED=y
+# CONFIG_USB_ZERO is not set
+# CONFIG_USB_AUDIO is not set
+CONFIG_USB_ETH=m
+CONFIG_USB_ETH_RNDIS=y
+CONFIG_USB_ETH_EEM=n
+# CONFIG_USB_GADGETFS is not set
+# CONFIG_USB_FUNCTIONFS is not set
+CONFIG_USB_FILE_STORAGE=m
+# CONFIG_USB_FILE_STORAGE_TEST is not set
+# CONFIG_USB_MASS_STORAGE is not set
+# CONFIG_USB_G_SERIAL is not set
+# CONFIG_USB_MIDI_GADGET is not set
+# CONFIG_USB_G_PRINTER is not set
+# CONFIG_USB_CDC_COMPOSITE is not set
+CONFIG_USB_G_NOKIA=m
+CONFIG_USB_G_NOKIA_EEM=y
+# CONFIG_USB_G_MULTI is not set
+# CONFIG_USB_G_HID is not set
+# CONFIG_USB_G_WEBCAM is not set
+
+#
+# OTG and related infrastructure
+#
+CONFIG_USB_OTG_UTILS=y
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_ISP1301_OMAP is not set
+# CONFIG_USB_ULPI is not set
+CONFIG_TWL4030_USB=y
+# CONFIG_NOP_USB_XCEIV is not set
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+# CONFIG_MMC_UNSAFE_RESUME is not set
+
+#
+# MMC/SD/SDIO Card Drivers
+#
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_BLOCK_BOUNCE=y
+# CONFIG_SDIO_UART is not set
+# CONFIG_MMC_TEST is not set
+
+#
+# MMC/SD/SDIO Host Controller Drivers
+#
+# CONFIG_MMC_SDHCI is not set
+# CONFIG_MMC_OMAP is not set
+CONFIG_MMC_OMAP_HS=y
+# CONFIG_MMC_SPI is not set
+# CONFIG_MEMSTICK is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=m
+
+#
+# LED drivers
+#
+# CONFIG_LEDS_PCA9532 is not set
+# CONFIG_LEDS_GPIO is not set
+# CONFIG_LEDS_LP3944 is not set
+CONFIG_LEDS_LP5523=m
+# CONFIG_LEDS_PCA955X is not set
+# CONFIG_LEDS_DAC124S085 is not set
+# CONFIG_LEDS_REGULATOR is not set
+# CONFIG_LEDS_BD2802 is not set
+# CONFIG_LEDS_LT3593 is not set
+CONFIG_LEDS_TRIGGERS=y
+
+#
+# LED Triggers
+#
+# CONFIG_LEDS_TRIGGER_TIMER is not set
+# CONFIG_LEDS_TRIGGER_HEARTBEAT is not set
+# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
+CONFIG_LEDS_TRIGGER_GPIO=m
+# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set
+
+#
+# iptables trigger is under Netfilter config (LED target)
+#
+# CONFIG_ACCESSIBILITY is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+# CONFIG_RTC_HCTOSYS is not set
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_BQ32K is not set
+CONFIG_RTC_DRV_TWL4030=m
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
+
+#
+# SPI RTC drivers
+#
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_DS1390 is not set
+# CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
+# CONFIG_RTC_DRV_PCF2123 is not set
+
+#
+# Platform RTC drivers
+#
+# CONFIG_RTC_DRV_CMOS is not set
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_MSM6242 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_RP5C01 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# on-CPU RTC drivers
+#
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
+CONFIG_STAGING=y
+# CONFIG_STAGING_EXCLUDE_BUILD is not set
+# CONFIG_VIDEO_TM6000 is not set
+# CONFIG_USB_IP_COMMON is not set
+# CONFIG_W35UND is not set
+# CONFIG_PRISM2_USB is not set
+# CONFIG_ECHO is not set
+# CONFIG_OTUS is not set
+# CONFIG_RT2870 is not set
+# CONFIG_COMEDI is not set
+# CONFIG_ASUS_OLED is not set
+# CONFIG_TRANZPORT is not set
+# CONFIG_POHMELFS is not set
+# CONFIG_LINE6_USB is not set
+# CONFIG_SPECTRA is not set
+# CONFIG_VT6656 is not set
+# CONFIG_FB_UDL is not set
+
+#
+# RAR Register Driver
+#
+CONFIG_IIO=m
+# CONFIG_IIO_RING_BUFFER is not set
+# CONFIG_IIO_TRIGGER is not set
+
+#
+# Accelerometers
+#
+# CONFIG_ADIS16209 is not set
+# CONFIG_ADIS16220 is not set
+# CONFIG_ADIS16240 is not set
+# CONFIG_KXSD9 is not set
+# CONFIG_LIS3L02DQ is not set
+
+#
+# Analog to digital convertors
+#
+# CONFIG_MAX1363 is not set
+
+#
+# Digital gyroscope sensors
+#
+# CONFIG_ADIS16260 is not set
+
+#
+# Inertial measurement units
+#
+# CONFIG_ADIS16300 is not set
+# CONFIG_ADIS16350 is not set
+# CONFIG_ADIS16400 is not set
+
+#
+# Light sensors
+#
+CONFIG_SENSORS_TSL2563=m
+
+#
+# Triggers - standalone
+#
+# CONFIG_RAMZSWAP is not set
+# CONFIG_BATMAN_ADV is not set
+# CONFIG_FB_SM7XX is not set
+
+#
+# Texas Instruments shared transport line discipline
+#
+# CONFIG_TI_ST is not set
+# CONFIG_ST_BT is not set
+# CONFIG_ADIS16255 is not set
+# CONFIG_MFLD_SENSORS is not set
+
+#
+# Intel CE Media Processor
+#
+# CONFIG_X86_INTEL_CE is not set
+# CONFIG_VIDEO_MRSTCI is not set
+# CONFIG_N_IFX_MUX is not set
+CONFIG_TIDSPBRIDGE=m
+CONFIG_TIDSPBRIDGE_MEMPOOL_SIZE=0x600000
+# CONFIG_TIDSPBRIDGE_DEBUG is not set
+CONFIG_TIDSPBRIDGE_RECOVERY=y
+# CONFIG_TIDSPBRIDGE_CACHE_LINE_CHECK is not set
+CONFIG_TIDSPBRIDGE_WDT3=y
+CONFIG_TIDSPBRIDGE_WDT_TIMEOUT=5
+# CONFIG_TIDSPBRIDGE_NTFY_PWRERR is not set
+CONFIG_TIDSPBRIDGE_BACKTRACE=y
+
+#
+# File systems
+#
+# CONFIG_EXT2_FS is not set
+# CONFIG_EXT3_FS is not set
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_USE_FOR_EXT23=y
+# CONFIG_EXT4_FS_XATTR is not set
+# CONFIG_EXT4_DEBUG is not set
+CONFIG_JBD2=y
+# CONFIG_JBD2_DEBUG is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+CONFIG_FS_POSIX_ACL=y
+# CONFIG_XFS_FS is not set
+# CONFIG_OCFS2_FS is not set
+CONFIG_BTRFS_FS=y
+CONFIG_BTRFS_FS_POSIX_ACL=y
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+CONFIG_FUSE_FS=m
+# CONFIG_CUSE is not set
+CONFIG_GENERIC_ACL=y
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_CONFIGFS_FS=m
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_JFFS2_FS is not set
+CONFIG_UBIFS_FS=m
+# CONFIG_UBIFS_FS_XATTR is not set
+# CONFIG_UBIFS_FS_ADVANCED_COMPR is not set
+CONFIG_UBIFS_FS_LZO=y
+CONFIG_UBIFS_FS_ZLIB=y
+# CONFIG_UBIFS_FS_DEBUG is not set
+# CONFIG_LOGFS is not set
+CONFIG_CRAMFS=y
+# CONFIG_SQUASHFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=m
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+CONFIG_NFS_V4=y
+# CONFIG_NFS_V4_1 is not set
+# CONFIG_NFSD is not set
+CONFIG_LOCKD=m
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=m
+CONFIG_SUNRPC_GSS=m
+CONFIG_RPCSEC_GSS_KRB5=m
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_BSD_DISKLABEL is not set
+# CONFIG_MINIX_SUBPARTITION is not set
+# CONFIG_SOLARIS_X86_PARTITION is not set
+# CONFIG_UNIXWARE_DISKLABEL is not set
+# CONFIG_LDM_PARTITION is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_KARMA_PARTITION is not set
+# CONFIG_EFI_PARTITION is not set
+# CONFIG_SYSV68_PARTITION is not set
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_UTF8=m
+# CONFIG_DLM is not set
+
+#
+# Kernel hacking
+#
+CONFIG_PRINTK_TIME=y
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+# CONFIG_DETECT_SOFTLOCKUP is not set
+# CONFIG_DETECT_HUNG_TASK is not set
+CONFIG_SCHED_DEBUG=y
+CONFIG_SCHEDSTATS=y
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_KMEMLEAK is not set
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+CONFIG_DEBUG_SPINLOCK_SLEEP=y
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+# CONFIG_LKDTM is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_LATENCYTOP is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+# CONFIG_PAGE_POISONING is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_TRACING_SUPPORT=y
+# CONFIG_FTRACE is not set
+# CONFIG_DYNAMIC_DEBUG is not set
+# CONFIG_ATOMIC64_SELFTEST is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+CONFIG_ARM_UNWIND=y
+# CONFIG_DEBUG_USER is not set
+# CONFIG_DEBUG_ERRORS is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_DEBUG_LL is not set
+# CONFIG_OC_ETM is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_DEFAULT_SECURITY_SELINUX is not set
+# CONFIG_DEFAULT_SECURITY_SMACK is not set
+# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD=m
+CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG=m
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+CONFIG_CRYPTO_MANAGER_TESTS=y
+CONFIG_CRYPTO_GF128MUL=m
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_WORKQUEUE=y
+# CONFIG_CRYPTO_CRYPTD is not set
+CONFIG_CRYPTO_AUTHENC=m
+CONFIG_CRYPTO_TEST=m
+
+#
+# Authenticated Encryption with Associated Data
+#
+CONFIG_CRYPTO_CCM=m
+CONFIG_CRYPTO_GCM=m
+CONFIG_CRYPTO_SEQIV=m
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=m
+CONFIG_CRYPTO_CTR=m
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=y
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
+
+#
+# Hash modes
+#
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_XCBC=m
+CONFIG_CRYPTO_VMAC=m
+
+#
+# Digest
+#
+CONFIG_CRYPTO_CRC32C=y
+CONFIG_CRYPTO_GHASH=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+CONFIG_CRYPTO_SHA1=y
+CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+
+#
+# Ciphers
+#
+CONFIG_CRYPTO_AES=y
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_ARC4=y
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_DES=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SALSA20=m
+CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_TWOFISH_COMMON=m
+
+#
+# Compression
+#
+CONFIG_CRYPTO_DEFLATE=m
+CONFIG_CRYPTO_ZLIB=y
+CONFIG_CRYPTO_LZO=m
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_HW=y
+CONFIG_CRYPTO_DEV_OMAP_SHAM=m
+# CONFIG_BINARY_PRINTF is not set
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
+CONFIG_CRC_CCITT=m
+CONFIG_CRC16=y
+# CONFIG_CRC_T10DIF is not set
+CONFIG_CRC_ITU_T=m
+CONFIG_CRC32=y
+CONFIG_CRC7=m
+CONFIG_LIBCRC32C=y
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_LZO_COMPRESS=m
+CONFIG_LZO_DECOMPRESS=m
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/recipes/linux/linux-2.6.35/nokia900/inconsistent-mmc-fix-2.6.35.patch b/recipes/linux/linux-2.6.35/nokia900/inconsistent-mmc-fix-2.6.35.patch
new file mode 100644
index 0000000000..12ade34e7e
--- /dev/null
+++ b/recipes/linux/linux-2.6.35/nokia900/inconsistent-mmc-fix-2.6.35.patch
@@ -0,0 +1,94 @@
+Index: linux-2.6.35/arch/arm/mach-omap2/board-rx51-peripherals.c
+===================================================================
+--- linux-2.6.35.orig/arch/arm/mach-omap2/board-rx51-peripherals.c 2010-09-19 11:01:36.480370002 +0200
++++ linux-2.6.35/arch/arm/mach-omap2/board-rx51-peripherals.c 2010-09-19 11:02:56.730370000 +0200
+@@ -475,6 +475,7 @@
+ static struct omap2_hsmmc_info mmc[] __initdata = {
+ {
+ .name = "external",
++ .mmcblk_devidx = 1,
+ .mmc = 1,
+ .wires = 4,
+ .cover_only = true,
+@@ -484,6 +485,7 @@
+ },
+ {
+ .name = "internal",
++ .mmcblk_devidx = 0,
+ .mmc = 2,
+ .wires = 8, /* See also rx51_mmc2_remux */
+ .gpio_cd = -EINVAL,
+Index: linux-2.6.35/arch/arm/mach-omap2/hsmmc.c
+===================================================================
+--- linux-2.6.35.orig/arch/arm/mach-omap2/hsmmc.c 2010-09-19 11:01:13.140370002 +0200
++++ linux-2.6.35/arch/arm/mach-omap2/hsmmc.c 2010-09-19 11:03:48.490370007 +0200
+@@ -257,6 +257,7 @@
+ snprintf(hc->name, ARRAY_SIZE(hc->name),
+ "mmc%islot%i", c->mmc, 1);
+ mmc->slots[0].name = hc->name;
++ mmc->slots[0].mmcblk_devidx = c->mmcblk_devidx;
+ mmc->nr_slots = 1;
+ mmc->slots[0].wires = c->wires;
+ mmc->slots[0].internal_clock = !c->ext_clock;
+Index: linux-2.6.35/arch/arm/mach-omap2/hsmmc.h
+===================================================================
+--- linux-2.6.35.orig/arch/arm/mach-omap2/hsmmc.h 2010-09-19 11:01:13.150370002 +0200
++++ linux-2.6.35/arch/arm/mach-omap2/hsmmc.h 2010-09-19 11:04:09.070370002 +0200
+@@ -21,6 +21,7 @@
+ char *name; /* or NULL for default */
+ struct device *dev; /* returned: pointer to mmc adapter */
+ int ocr_mask; /* temporary HACK */
++ int mmcblk_devidx; /* preferred mmcblkX device index */
+ /* Remux (pad configuation) when powering on/off */
+ void (*remux)(struct device *dev, int slot, int power_on);
+ };
+Index: linux-2.6.35/arch/arm/plat-omap/include/plat/mmc.h
+===================================================================
+--- linux-2.6.35.orig/arch/arm/plat-omap/include/plat/mmc.h 2010-09-19 11:04:43.480369999 +0200
++++ linux-2.6.35/arch/arm/plat-omap/include/plat/mmc.h 2010-09-19 11:05:06.150370000 +0200
+@@ -132,6 +132,7 @@
+ int (*get_cover_state)(struct device *dev, int slot);
+
+ const char *name;
++ int mmcblk_devidx; /* preferred mmcblkX index for this slot */
+ u32 ocr_mask;
+
+ /* Card detection IRQs */
+Index: linux-2.6.35/drivers/mmc/card/block.c
+===================================================================
+--- linux-2.6.35.orig/drivers/mmc/card/block.c 2010-09-19 11:05:25.060370002 +0200
++++ linux-2.6.35/drivers/mmc/card/block.c 2010-09-19 11:07:06.980369995 +0200
+@@ -482,7 +482,7 @@
+ struct mmc_blk_data *md;
+ int devidx, ret;
+
+- devidx = find_first_zero_bit(dev_use, MMC_NUM_MINORS);
++ devidx = find_next_zero_bit(dev_use, MMC_NUM_MINORS, card->host->mmcblk_devidx);
+ if (devidx >= MMC_NUM_MINORS)
+ return ERR_PTR(-ENOSPC);
+ __set_bit(devidx, dev_use);
+Index: linux-2.6.35/drivers/mmc/host/omap_hsmmc.c
+===================================================================
+--- linux-2.6.35.orig/drivers/mmc/host/omap_hsmmc.c 2010-09-19 11:07:22.040370005 +0200
++++ linux-2.6.35/drivers/mmc/host/omap_hsmmc.c 2010-09-19 11:08:05.310369999 +0200
+@@ -2157,6 +2157,7 @@
+ }
+
+ mmc->ocr_avail = mmc_slot(host).ocr_mask;
++ mmc->mmcblk_devidx = mmc_slot(host).mmcblk_devidx;
+
+ /* Request IRQ for card detect */
+ if ((mmc_slot(host).card_detect_irq)) {
+Index: linux-2.6.35/include/linux/mmc/host.h
+===================================================================
+--- linux-2.6.35.orig/include/linux/mmc/host.h 2010-09-19 11:09:55.410370002 +0200
++++ linux-2.6.35/include/linux/mmc/host.h 2010-09-19 11:10:17.200370002 +0200
+@@ -210,7 +210,7 @@
+ #endif
+
+ struct dentry *debugfs_root;
+-
++ unsigned int mmcblk_devidx; /* preferred mmc block device index (mmcblkX) */
+ struct mutex *port_mutex;
+
+ unsigned long private[0] ____cacheline_aligned;
diff --git a/recipes/linux/linux-2.6.35/nokia900/linux-2.6-Bluetooth-Support-for-n900-bluetooth-hardware.patch b/recipes/linux/linux-2.6.35/nokia900/linux-2.6-Bluetooth-Support-for-n900-bluetooth-hardware.patch
new file mode 100644
index 0000000000..ceccd11117
--- /dev/null
+++ b/recipes/linux/linux-2.6.35/nokia900/linux-2.6-Bluetooth-Support-for-n900-bluetooth-hardware.patch
@@ -0,0 +1,2279 @@
+From cd320710da1ab5b340bff78311fa492f9ff9b9c6 Mon Sep 17 00:00:00 2001
+From: Ville Tervo <ville.tervo@nokia.com>
+Date: Fri, 9 Apr 2010 11:56:16 +0300
+Subject: [PATCH 04/11] Bluetooth: Support for n900 bluetooth hardware
+
+Adds support for N900 bluetooth hardware
+
+Signed-off-by: Ville Tervo <ville.tervo@nokia.com>
+---
+ arch/arm/mach-omap2/board-rx51-peripherals.c | 73 +
+ drivers/bluetooth/Kconfig | 11
+ drivers/bluetooth/Makefile | 2
+ drivers/bluetooth/hci_h4p/Makefile | 7
+ drivers/bluetooth/hci_h4p/core.c | 1038 +++++++++++++++++++++++++++
+ drivers/bluetooth/hci_h4p/fw-bcm.c | 144 +++
+ drivers/bluetooth/hci_h4p/fw-csr.c | 141 +++
+ drivers/bluetooth/hci_h4p/fw-ti1273.c | 113 ++
+ drivers/bluetooth/hci_h4p/fw.c | 166 ++++
+ drivers/bluetooth/hci_h4p/hci_h4p.h | 231 ++++++
+ drivers/bluetooth/hci_h4p/uart.c | 203 +++++
+ include/linux/bluetooth/hci_h4p.h | 41 +
+ 12 files changed, 2170 insertions(+)
+ create mode 100644 drivers/bluetooth/hci_h4p/Makefile
+ create mode 100644 drivers/bluetooth/hci_h4p/core.c
+ create mode 100644 drivers/bluetooth/hci_h4p/fw-bcm.c
+ create mode 100644 drivers/bluetooth/hci_h4p/fw-csr.c
+ create mode 100644 drivers/bluetooth/hci_h4p/fw-ti1273.c
+ create mode 100644 drivers/bluetooth/hci_h4p/fw.c
+ create mode 100644 drivers/bluetooth/hci_h4p/hci_h4p.h
+ create mode 100644 drivers/bluetooth/hci_h4p/uart.c
+ create mode 100644 include/linux/bluetooth/hci_h4p.h
+
+--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
++++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
+@@ -24,6 +24,7 @@
+ #include <linux/gpio.h>
+ #include <linux/gpio_keys.h>
+ #include <linux/mmc/host.h>
++#include <linux/bluetooth/hci_h4p.h>
+
+ #include <plat/mcspi.h>
+ #include <plat/mux.h>
+@@ -33,6 +34,7 @@
+ #include <plat/gpmc.h>
+ #include <plat/onenand.h>
+ #include <plat/gpmc-smc91x.h>
++#include <plat/serial.h>
+
+ #include "mux.h"
+ #include "hsmmc.h"
+@@ -46,6 +48,10 @@
+ #define RX51_TSC2005_RESET_GPIO 104
+ #define RX51_TSC2005_IRQ_GPIO 100
+
++#define RX51_HCI_H4P_RESET_GPIO 91
++#define RX51_HCI_H4P_HOSTWU_GPIO 101
++#define RX51_HCI_H4P_BTWU_GPIO 37
++
+ /* list all spi devices here */
+ enum {
+ RX51_SPI_WL1251,
+@@ -928,6 +934,72 @@
+ */
+ }
+
++static void rx51_hci_h4p_set_power(bool enable)
++{
++ gpio_set_value(RX51_HCI_H4P_RESET_GPIO, enable);
++}
++
++static void rx51_hci_h4p_set_bt_wu(bool enable)
++{
++ gpio_set_value(RX51_HCI_H4P_BTWU_GPIO, enable);
++}
++
++static bool rx51_hci_h4p_get_host_wu(void)
++{
++ return gpio_get_value(RX51_HCI_H4P_HOSTWU_GPIO);
++}
++
++struct hci_h4p_platform_data bt_plat_data = {
++ .uart_irq = INT_24XX_UART2_IRQ,
++ .host_wu = rx51_hci_h4p_get_host_wu,
++ .bt_wu = rx51_hci_h4p_set_bt_wu,
++ .reset = rx51_hci_h4p_set_power,
++ .host_wu_gpio = RX51_HCI_H4P_HOSTWU_GPIO,
++};
++
++static struct platform_device rx51_bt_device = {
++ .name = "hci_h4p",
++ .id = -1,
++ .num_resources = 0,
++ .dev = {
++ .platform_data = (void *)&bt_plat_data,
++ }
++};
++
++void __init rx51_bt_init(void)
++{
++ int err;
++
++ err = gpio_request(RX51_HCI_H4P_RESET_GPIO, "bt_reset");
++ if (err < 0)
++ return;
++
++ err = gpio_request(RX51_HCI_H4P_BTWU_GPIO, "bt_wakeup");
++ if (err < 0)
++ goto fail;
++
++ err = gpio_request(RX51_HCI_H4P_HOSTWU_GPIO, "host_wakeup");
++ if (err < 0)
++ goto fail2;
++
++ gpio_direction_output(RX51_HCI_H4P_RESET_GPIO, 0);
++ gpio_direction_output(RX51_HCI_H4P_BTWU_GPIO, 0);
++ gpio_direction_input(RX51_HCI_H4P_HOSTWU_GPIO);
++
++ bt_plat_data.uart_base = ioremap(OMAP3_UART2_BASE, SZ_2K);
++
++ err = platform_device_register(&rx51_bt_device);
++ if (!err)
++ return;
++
++ gpio_free(RX51_HCI_H4P_HOSTWU_GPIO);
++fail2:
++ gpio_free(RX51_HCI_H4P_BTWU_GPIO);
++fail:
++ gpio_free(RX51_HCI_H4P_RESET_GPIO);
++ printk(KERN_ERR "Bluetooth device registration failed\n");
++}
++
+ void __init rx51_peripherals_init(void)
+ {
+ rx51_i2c_init();
+@@ -936,6 +1008,7 @@
+ rx51_add_gpio_keys();
+ rx51_init_wl1251();
+ rx51_init_tsc2005();
++ rx51_bt_init();
+ spi_register_board_info(rx51_peripherals_spi_board_info,
+ ARRAY_SIZE(rx51_peripherals_spi_board_info));
+ omap2_hsmmc_init(mmc);
+--- a/drivers/bluetooth/Kconfig
++++ b/drivers/bluetooth/Kconfig
+@@ -207,4 +207,15 @@
+ Say Y here to compile support for "Atheros firmware download driver"
+ into the kernel or say M to compile it as module (ath3k).
+
++config BT_HCIH4P
++ tristate "HCI driver with Nokia H4 extensions"
++ depends on BT
++ select FW_LOADER
++ help
++ Bluetooth HCI driver with H4 extensions. This driver provides
++ support for H4+ Bluetooth chip with vendor-specific H4 extensions.
++
++ Say Y here to compile support for h4 extended devices into the kernel
++ or say M to compile it as module (hci_h4p).
++
+ endmenu
+--- a/drivers/bluetooth/Makefile
++++ b/drivers/bluetooth/Makefile
+@@ -27,3 +27,5 @@
+ hci_uart-$(CONFIG_BT_HCIUART_BCSP) += hci_bcsp.o
+ hci_uart-$(CONFIG_BT_HCIUART_LL) += hci_ll.o
+ hci_uart-objs := $(hci_uart-y)
++
++obj-$(CONFIG_BT_HCIH4P) += hci_h4p/
+--- /dev/null
++++ b/drivers/bluetooth/hci_h4p/Makefile
+@@ -0,0 +1,7 @@
++#
++# Makefile for the Linux Bluetooth HCI device drivers.
++#
++
++obj-$(CONFIG_BT_HCIH4P) += hci_h4p.o
++
++hci_h4p-objs := core.o fw.o uart.o fw-csr.o fw-bcm.o fw-ti1273.o
+--- /dev/null
++++ b/drivers/bluetooth/hci_h4p/core.c
+@@ -0,0 +1,1038 @@
++/*
++ * This file is part of hci_h4p bluetooth driver
++ *
++ * Copyright (C) 2005-2010 Nokia Corporation.
++ *
++ * Contact: Ville Tervo <ville.tervo@nokia.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ *
++ */
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/errno.h>
++#include <linux/delay.h>
++#include <linux/spinlock.h>
++#include <linux/serial_reg.h>
++#include <linux/skbuff.h>
++#include <linux/device.h>
++#include <linux/platform_device.h>
++#include <linux/clk.h>
++#include <linux/interrupt.h>
++#include <linux/gpio.h>
++#include <linux/timer.h>
++#include <linux/bluetooth/hci_h4p.h>
++
++#include <mach/hardware.h>
++#include <mach/irqs.h>
++
++#include <net/bluetooth/bluetooth.h>
++#include <net/bluetooth/hci_core.h>
++#include <net/bluetooth/hci.h>
++
++#include "hci_h4p.h"
++
++static void hci_h4p_set_clk(struct hci_h4p_info *info, int *clock, int enable)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&info->clocks_lock, flags);
++ if (enable && !*clock) {
++ NBT_DBG_POWER("Enabling %p\n", clock);
++ clk_enable(info->uart_fclk);
++ clk_enable(info->uart_iclk);
++ if (atomic_read(&info->clk_users) == 0)
++ hci_h4p_restore_regs(info);
++ atomic_inc(&info->clk_users);
++ }
++
++ if (!enable && *clock) {
++ NBT_DBG_POWER("Disabling %p\n", clock);
++ if (atomic_dec_and_test(&info->clk_users))
++ hci_h4p_store_regs(info);
++ clk_disable(info->uart_fclk);
++ clk_disable(info->uart_iclk);
++ }
++
++ *clock = enable;
++ spin_unlock_irqrestore(&info->clocks_lock, flags);
++}
++
++/* Power management functions */
++void hci_h4p_smart_idle(struct hci_h4p_info *info, bool enable)
++{
++ u8 v;
++
++ return;
++
++ v = hci_h4p_inb(info, UART_OMAP_SYSC);
++ v &= ~(UART_OMAP_SYSC_IDLEMASK);
++
++ if (enable)
++ v |= UART_OMAP_SYSC_SMART_IDLE;
++ else
++ v |= UART_OMAP_SYSC_NO_IDLE;
++
++ hci_h4p_outb(info, UART_OMAP_SYSC, v);
++}
++
++static void hci_h4p_disable_tx(struct hci_h4p_info *info)
++{
++ NBT_DBG_POWER("\n");
++
++ if (!info->pm_enabled)
++ return;
++
++ hci_h4p_smart_idle(info, 1);
++
++ info->bt_wakeup(0);
++ hci_h4p_set_clk(info, &info->tx_clocks_en, 0);
++ info->tx_enabled = 0;
++}
++
++void hci_h4p_enable_tx(struct hci_h4p_info *info)
++{
++ NBT_DBG_POWER("\n");
++
++ if (!info->pm_enabled)
++ return;
++
++ hci_h4p_set_clk(info, &info->tx_clocks_en, 1);
++ info->tx_enabled = 1;
++ hci_h4p_smart_idle(info, 0);
++ info->bt_wakeup(1);
++ hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) |
++ UART_IER_THRI);
++}
++
++static void hci_h4p_disable_rx(struct hci_h4p_info *info)
++{
++ if (!info->pm_enabled)
++ return;
++
++ info->rx_enabled = 0;
++
++ if (hci_h4p_inb(info, UART_LSR) & UART_LSR_DR) {
++ NBT_DBG("data ready postpone autorts");
++ return;
++ }
++
++ if (!(hci_h4p_inb(info, UART_LSR) & UART_LSR_TEMT)) {
++ NBT_DBG("trasmitter not empty postpone autorts");
++ return;
++ }
++
++ hci_h4p_set_rts(info, info->rx_enabled);
++ __hci_h4p_set_auto_ctsrts(info, 0, UART_EFR_RTS);
++ info->autorts = 0;
++ hci_h4p_set_clk(info, &info->rx_clocks_en, 0);
++}
++
++static void hci_h4p_enable_rx(struct hci_h4p_info *info)
++{
++ if (!info->pm_enabled)
++ return;
++
++ hci_h4p_set_clk(info, &info->rx_clocks_en, 1);
++ info->rx_enabled = 1;
++
++ hci_h4p_set_rts(info, 1);
++
++ if (!(hci_h4p_inb(info, UART_LSR) & UART_LSR_TEMT)) {
++ NBT_DBG("trasmitter not empty postpone autorts");
++ return;
++ }
++
++ if (hci_h4p_inb(info, UART_LSR) & UART_LSR_DR) {
++ NBT_DBG("data ready postpone autorts");
++ return;
++ }
++
++ __hci_h4p_set_auto_ctsrts(info, 1, UART_EFR_RTS);
++ info->autorts = 1;
++}
++
++/* Negotiation functions */
++int hci_h4p_send_alive_packet(struct hci_h4p_info *info)
++{
++ struct hci_h4p_alive_hdr *alive_hdr;
++ struct hci_h4p_alive_msg *alive_cmd;
++ struct sk_buff *skb;
++ unsigned long flags;
++
++ NBT_DBG("Sending alive packet\n");
++
++ skb = bt_skb_alloc(HCI_H4P_ALIVE_HDR_SIZE + HCI_H4P_ALIVE_MSG_SIZE, GFP_ATOMIC);
++ if (!skb)
++ return -ENOMEM;
++
++ alive_hdr = (void *) skb_put(skb, HCI_H4P_ALIVE_HDR_SIZE);
++ alive_hdr->dlen = HCI_H4P_ALIVE_MSG_SIZE;
++ alive_cmd = (void *) skb_put(skb, HCI_H4P_ALIVE_MSG_SIZE);
++ alive_cmd->message_id = HCI_H4P_ALIVE_IND_REQ;
++ alive_cmd->unused = 0x00;
++ *skb_push(skb, 1) = H4_ALIVE_PKT;
++
++ skb_queue_tail(&info->txq, skb);
++ spin_lock_irqsave(&info->lock, flags);
++ hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) |
++ UART_IER_THRI);
++ spin_unlock_irqrestore(&info->lock, flags);
++
++ NBT_DBG("Alive packet sent\n");
++
++ return 0;
++}
++
++static void hci_h4p_alive_packet(struct hci_h4p_info *info, struct sk_buff *skb)
++{
++ struct hci_h4p_alive_hdr *alive_hdr = (void *) skb->data;
++ struct hci_h4p_alive_msg *alive_evt;
++
++ if (alive_hdr->dlen > skb->len) {
++ info->init_error = -EPROTO;
++ complete(&info->init_completion);
++ return;
++ }
++
++ alive_evt = (void *) skb_pull(skb, HCI_H4P_ALIVE_HDR_SIZE);
++
++ NBT_DBG("Received alive packet\n");
++ if (alive_evt->message_id != HCI_H4P_ALIVE_IND_RESP) {
++ dev_err(info->dev, "Could not negotiate hci_h4p settings\n");
++ info->init_error = -EINVAL;
++ }
++
++ complete(&info->init_completion);
++ kfree_skb(skb);
++}
++
++static int hci_h4p_send_negotiation(struct hci_h4p_info *info)
++{
++ struct hci_h4p_init_cmd *init_cmd;
++ struct hci_h4p_init_hdr *init_hdr;
++ struct sk_buff *skb;
++ unsigned long flags;
++ int err;
++
++ NBT_DBG("Sending negotiation..\n");
++
++ skb = bt_skb_alloc(HCI_H4P_INIT_HDR_SIZE + HCI_H4P_INIT_CMD_SIZE, GFP_KERNEL);
++ if (!skb)
++ return -ENOMEM;
++
++ init_hdr = (void *)skb_put(skb, HCI_H4P_INIT_HDR_SIZE);
++ init_hdr->dlen = HCI_H4P_INIT_CMD_SIZE;
++ init_cmd = (void *)skb_put(skb, HCI_H4P_INIT_CMD_SIZE);
++ init_cmd->ack = 0x00;
++ init_cmd->baudrate = cpu_to_le16(0x01a1);
++ init_cmd->unused = cpu_to_le16(0x0000);
++ init_cmd->mode = HCI_H4P_MODE;
++ init_cmd->sys_clk = cpu_to_le16(0x9600);
++ init_cmd->unused2 = cpu_to_le16(0x0000);
++ *skb_push(skb, 1) = H4_NEG_PKT;
++
++ hci_h4p_change_speed(info, INIT_SPEED);
++
++ hci_h4p_set_rts(info, 1);
++ info->init_error = 0;
++ init_completion(&info->init_completion);
++ skb_queue_tail(&info->txq, skb);
++ spin_lock_irqsave(&info->lock, flags);
++ hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) |
++ UART_IER_THRI);
++ spin_unlock_irqrestore(&info->lock, flags);
++
++ if (!wait_for_completion_interruptible_timeout(&info->init_completion,
++ msecs_to_jiffies(1000)))
++ return -ETIMEDOUT;
++
++ if (info->init_error < 0)
++ return info->init_error;
++
++ /* Change to operational settings */
++ hci_h4p_set_auto_ctsrts(info, 0, UART_EFR_RTS);
++ hci_h4p_set_rts(info, 0);
++ hci_h4p_change_speed(info, MAX_BAUD_RATE);
++
++ err = hci_h4p_wait_for_cts(info, 1, 100);
++ if (err < 0)
++ return err;
++
++ hci_h4p_set_auto_ctsrts(info, 1, UART_EFR_RTS);
++ init_completion(&info->init_completion);
++ err = hci_h4p_send_alive_packet(info);
++
++ if (err < 0)
++ return err;
++
++ if (!wait_for_completion_interruptible_timeout(&info->init_completion,
++ msecs_to_jiffies(1000)))
++ return -ETIMEDOUT;
++
++ if (info->init_error < 0)
++ return info->init_error;
++
++ NBT_DBG("Negotiation succesful\n");
++ return 0;
++}
++
++static void hci_h4p_negotiation_packet(struct hci_h4p_info *info,
++ struct sk_buff *skb)
++{
++ struct hci_h4p_init_hdr *init_hdr = (void *) skb->data;
++ struct hci_h4p_init_evt *init_evt;
++
++ if (init_hdr->dlen > skb->len) {
++ kfree_skb(skb);
++ info->init_error = -EPROTO;
++ complete(&info->init_completion);
++ return;
++ }
++
++ init_evt = (void *)skb_pull(skb, HCI_H4P_INIT_HDR_SIZE);
++
++ if (init_evt->ack != HCI_H4P_ACK) {
++ dev_err(info->dev, "Could not negotiate hci_h4p settings\n");
++ info->init_error = -EINVAL;
++ }
++
++ info->man_id = init_evt->man_id;
++ info->ver_id = init_evt->ver_id;
++
++ complete(&info->init_completion);
++ kfree_skb(skb);
++}
++
++/* H4 packet handling functions */
++static int hci_h4p_get_hdr_len(struct hci_h4p_info *info, u8 pkt_type)
++{
++ long retval;
++
++ switch (pkt_type) {
++ case H4_EVT_PKT:
++ retval = HCI_EVENT_HDR_SIZE;
++ break;
++ case H4_ACL_PKT:
++ retval = HCI_ACL_HDR_SIZE;
++ break;
++ case H4_SCO_PKT:
++ retval = HCI_SCO_HDR_SIZE;
++ break;
++ case H4_NEG_PKT:
++ retval = HCI_H4P_INIT_HDR_SIZE;
++ break;
++ case H4_ALIVE_PKT:
++ retval = HCI_H4P_ALIVE_HDR_SIZE;
++ break;
++ case H4_RADIO_PKT:
++ retval = H4_RADIO_HDR_SIZE;
++ break;
++ default:
++ dev_err(info->dev, "Unknown H4 packet type 0x%.2x\n", pkt_type);
++ retval = -1;
++ break;
++ }
++
++ return retval;
++}
++
++static unsigned int hci_h4p_get_data_len(struct hci_h4p_info *info,
++ struct sk_buff *skb)
++{
++ long retval = -1;
++ struct hci_event_hdr *evt_hdr;
++ struct hci_acl_hdr *acl_hdr;
++ struct hci_sco_hdr *sco_hdr;
++ struct hci_h4p_radio_hdr *radio_hdr;
++ struct hci_h4p_init_hdr *init_hdr;
++ struct hci_h4p_alive_hdr *alive_hdr;
++
++ switch (bt_cb(skb)->pkt_type) {
++ case H4_EVT_PKT:
++ evt_hdr = (struct hci_event_hdr *)skb->data;
++ retval = evt_hdr->plen;
++ break;
++ case H4_ACL_PKT:
++ acl_hdr = (struct hci_acl_hdr *)skb->data;
++ retval = le16_to_cpu(acl_hdr->dlen);
++ break;
++ case H4_SCO_PKT:
++ sco_hdr = (struct hci_sco_hdr *)skb->data;
++ retval = sco_hdr->dlen;
++ break;
++ case H4_RADIO_PKT:
++ radio_hdr = (struct hci_h4p_radio_hdr *)skb->data;
++ retval = radio_hdr->dlen;
++ break;
++ case H4_NEG_PKT:
++ init_hdr = (struct hci_h4p_init_hdr *)skb->data;
++ retval = init_hdr->dlen;
++ break;
++ case H4_ALIVE_PKT:
++ alive_hdr = (struct hci_h4p_alive_hdr *)skb->data;
++ retval = alive_hdr->dlen;
++ break;
++ }
++
++ return retval;
++}
++
++static inline void hci_h4p_recv_frame(struct hci_h4p_info *info,
++ struct sk_buff *skb)
++{
++
++ if (unlikely(!test_bit(HCI_RUNNING, &info->hdev->flags))) {
++ NBT_DBG("fw_event\n");
++ if (bt_cb(info->rx_skb)->pkt_type == H4_NEG_PKT) {
++ hci_h4p_negotiation_packet(info, info->rx_skb);
++ return;
++ }
++ if (bt_cb(info->rx_skb)->pkt_type == H4_ALIVE_PKT) {
++ hci_h4p_alive_packet(info, info->rx_skb);
++ return;
++ }
++ hci_h4p_parse_fw_event(info, skb);
++ } else {
++ hci_recv_frame(skb);
++ NBT_DBG("Frame sent to upper layer\n");
++ }
++}
++
++static inline void hci_h4p_handle_byte(struct hci_h4p_info *info, u8 byte)
++{
++ switch (info->rx_state) {
++ case WAIT_FOR_PKT_TYPE:
++ bt_cb(info->rx_skb)->pkt_type = byte;
++ info->rx_count = hci_h4p_get_hdr_len(info, byte);
++ if (info->rx_count < 0) {
++ info->hdev->stat.err_rx++;
++ kfree_skb(info->rx_skb);
++ info->rx_skb = NULL;
++ } else {
++ info->rx_state = WAIT_FOR_HEADER;
++ }
++ break;
++ case WAIT_FOR_HEADER:
++ info->rx_count--;
++ *skb_put(info->rx_skb, 1) = byte;
++ if (info->rx_count != 0)
++ break;
++
++ info->rx_count = hci_h4p_get_data_len(info,
++ info->rx_skb);
++ if (info->rx_count > skb_tailroom(info->rx_skb)) {
++ dev_err(info->dev, "Too long frame.\n");
++ info->garbage_bytes = info->rx_count -
++ skb_tailroom(info->rx_skb);
++ kfree_skb(info->rx_skb);
++ info->rx_skb = NULL;
++ break;
++ }
++ info->rx_state = WAIT_FOR_DATA;
++ break;
++ case WAIT_FOR_DATA:
++ info->rx_count--;
++ *skb_put(info->rx_skb, 1) = byte;
++ break;
++ default:
++ WARN_ON(1);
++ break;
++ }
++
++ if (info->rx_count == 0) {
++ /* H4+ devices should allways send word aligned
++ * packets */
++ if (!(info->rx_skb->len % 2))
++ info->garbage_bytes++;
++ hci_h4p_recv_frame(info, info->rx_skb);
++ info->rx_skb = NULL;
++ }
++}
++
++static void hci_h4p_rx(unsigned long data)
++{
++ u8 byte;
++ struct hci_h4p_info *info = (struct hci_h4p_info *)data;
++
++ NBT_DBG("rx woke up\n");
++
++ while (hci_h4p_inb(info, UART_LSR) & UART_LSR_DR) {
++ byte = hci_h4p_inb(info, UART_RX);
++ if (info->garbage_bytes) {
++ info->garbage_bytes--;
++ continue;
++ }
++ if (info->rx_skb == NULL) {
++ info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE,
++ GFP_ATOMIC);
++ if (!info->rx_skb) {
++ dev_err(info->dev,
++ "No memory for new packet\n");
++ return;
++ }
++ info->rx_state = WAIT_FOR_PKT_TYPE;
++ info->rx_skb->dev = (void *)info->hdev;
++ }
++ info->hdev->stat.byte_rx++;
++ NBT_DBG_TRANSFER_NF("0x%.2x ", byte);
++ hci_h4p_handle_byte(info, byte);
++ }
++
++ if (info->rx_enabled == info->autorts)
++ return;
++
++ if (!(hci_h4p_inb(info, UART_LSR) & UART_LSR_TEMT))
++ return;
++
++ if (hci_h4p_inb(info, UART_LSR) & UART_LSR_DR)
++ return;
++
++ hci_h4p_set_rts(info, info->rx_enabled);
++ __hci_h4p_set_auto_ctsrts(info, info->rx_enabled, UART_EFR_RTS);
++ info->autorts = info->rx_enabled;
++
++ /* Flush posted write to avoid spurious interrupts */
++ hci_h4p_inb(info, UART_OMAP_SCR);
++ hci_h4p_set_clk(info, &info->rx_clocks_en, 0);
++}
++
++static void hci_h4p_tx(unsigned long data)
++{
++ unsigned int sent = 0;
++ struct sk_buff *skb;
++ struct hci_h4p_info *info = (struct hci_h4p_info *)data;
++
++ NBT_DBG("tx woke up\n");
++ NBT_DBG_TRANSFER("data ");
++
++ if (info->autorts != info->rx_enabled) {
++ NBT_DBG("rts unbalanced.. autorts %d rx_enabled %d", info->autorts, info->rx_enabled);
++ if (hci_h4p_inb(info, UART_LSR) & UART_LSR_TEMT &&
++ !(hci_h4p_inb(info, UART_LSR) & UART_LSR_DR)) {
++ __hci_h4p_set_auto_ctsrts(info, info->rx_enabled,
++ UART_EFR_RTS);
++ info->autorts = info->rx_enabled;
++ hci_h4p_set_rts(info, info->rx_enabled);
++ hci_h4p_set_clk(info, &info->rx_clocks_en,
++ info->rx_enabled);
++ NBT_DBG("transmitter empty. setinng into balance\n");
++ } else {
++ hci_h4p_outb(info, UART_OMAP_SCR,
++ hci_h4p_inb(info, UART_OMAP_SCR) |
++ UART_OMAP_SCR_EMPTY_THR);
++ NBT_DBG("transmitter/receiver was not empty waiting for next irq\n");
++ hci_h4p_set_rts(info, 1);
++ goto finish_tx;
++ }
++ }
++
++ skb = skb_dequeue(&info->txq);
++ if (!skb) {
++ /* No data in buffer */
++ NBT_DBG("skb ready\n");
++ if (hci_h4p_inb(info, UART_LSR) & UART_LSR_TEMT) {
++ hci_h4p_outb(info, UART_IER,
++ hci_h4p_inb(info, UART_IER) &
++ ~UART_IER_THRI);
++ hci_h4p_inb(info, UART_OMAP_SCR);
++ hci_h4p_disable_tx(info);
++ NBT_DBG("transmitter was empty. cleaning up\n");
++ return;
++ }
++ hci_h4p_outb(info, UART_OMAP_SCR,
++ hci_h4p_inb(info, UART_OMAP_SCR) |
++ UART_OMAP_SCR_EMPTY_THR);
++ NBT_DBG("transmitter was not empty waiting for next irq\n");
++ goto finish_tx;
++ }
++
++ /* Copy data to tx fifo */
++ while (!(hci_h4p_inb(info, UART_OMAP_SSR) & UART_OMAP_SSR_TXFULL) &&
++ (sent < skb->len)) {
++ NBT_DBG_TRANSFER_NF("0x%.2x ", skb->data[sent]);
++ hci_h4p_outb(info, UART_TX, skb->data[sent]);
++ sent++;
++ }
++
++ info->hdev->stat.byte_tx += sent;
++ if (skb->len == sent) {
++ kfree_skb(skb);
++ } else {
++ skb_pull(skb, sent);
++ skb_queue_head(&info->txq, skb);
++ }
++
++ hci_h4p_outb(info, UART_OMAP_SCR, hci_h4p_inb(info, UART_OMAP_SCR) &
++ ~UART_OMAP_SCR_EMPTY_THR);
++ hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) |
++ UART_IER_THRI);
++
++finish_tx:
++ /* Flush posted write to avoid spurious interrupts */
++ hci_h4p_inb(info, UART_OMAP_SCR);
++
++}
++
++static irqreturn_t hci_h4p_interrupt(int irq, void *data)
++{
++ struct hci_h4p_info *info = (struct hci_h4p_info *)data;
++ u8 iir, msr;
++ int ret;
++
++ ret = IRQ_NONE;
++
++ iir = hci_h4p_inb(info, UART_IIR);
++ if (iir & UART_IIR_NO_INT)
++ return IRQ_HANDLED;
++
++ NBT_DBG("In interrupt handler iir 0x%.2x\n", iir);
++
++ iir &= UART_IIR_ID;
++
++ if (iir == UART_IIR_MSI) {
++ msr = hci_h4p_inb(info, UART_MSR);
++ ret = IRQ_HANDLED;
++ }
++ if (iir == UART_IIR_RLSI) {
++ hci_h4p_inb(info, UART_RX);
++ hci_h4p_inb(info, UART_LSR);
++ ret = IRQ_HANDLED;
++ }
++
++ if (iir == UART_IIR_RDI) {
++ hci_h4p_rx((unsigned long)data);
++ ret = IRQ_HANDLED;
++ }
++
++ if (iir == UART_IIR_THRI) {
++ hci_h4p_tx((unsigned long)data);
++ ret = IRQ_HANDLED;
++ }
++
++ return ret;
++}
++
++static irqreturn_t hci_h4p_wakeup_interrupt(int irq, void *dev_inst)
++{
++ struct hci_h4p_info *info = dev_inst;
++ int should_wakeup;
++ struct hci_dev *hdev;
++
++ if (!info->hdev)
++ return IRQ_HANDLED;
++
++ hdev = info->hdev;
++
++ if (!test_bit(HCI_RUNNING, &hdev->flags))
++ return IRQ_HANDLED;
++
++ should_wakeup = info->host_wakeup();
++ NBT_DBG_POWER("gpio interrupt %d\n", should_wakeup);
++
++ /* Check if wee have missed some interrupts */
++ if (info->rx_enabled == should_wakeup)
++ return IRQ_HANDLED;
++
++ if (should_wakeup)
++ hci_h4p_enable_rx(info);
++ else
++ hci_h4p_disable_rx(info);
++
++ return IRQ_HANDLED;
++}
++
++static int hci_h4p_reset(struct hci_h4p_info *info)
++{
++ int err;
++
++ err = hci_h4p_reset_uart(info);
++ if (err < 0) {
++ dev_err(info->dev, "Uart reset failed\n");
++ return err;
++ }
++ hci_h4p_init_uart(info);
++ hci_h4p_set_rts(info, 0);
++
++ info->reset(0);
++ info->bt_wakeup(1);
++ msleep(10);
++ info->reset(1);
++
++ err = hci_h4p_wait_for_cts(info, 1, 100);
++ if (err < 0) {
++ dev_err(info->dev, "No cts from bt chip\n");
++ return err;
++ }
++
++ hci_h4p_set_rts(info, 1);
++
++ return 0;
++}
++
++/* hci callback functions */
++static int hci_h4p_hci_flush(struct hci_dev *hdev)
++{
++ struct hci_h4p_info *info;
++ info = hdev->driver_data;
++
++ skb_queue_purge(&info->txq);
++
++ return 0;
++}
++
++static int hci_h4p_hci_open(struct hci_dev *hdev)
++{
++ struct hci_h4p_info *info;
++ int err;
++ struct sk_buff_head fw_queue;
++ unsigned long flags;
++
++ info = hdev->driver_data;
++
++ if (test_bit(HCI_RUNNING, &hdev->flags))
++ return 0;
++
++ info->rx_enabled = 1;
++ info->rx_state = WAIT_FOR_PKT_TYPE;
++ info->rx_count = 0;
++ info->garbage_bytes = 0;
++ info->rx_skb = NULL;
++ info->pm_enabled = 0;
++ init_completion(&info->fw_completion);
++ hci_h4p_set_clk(info, &info->tx_clocks_en, 1);
++ hci_h4p_set_clk(info, &info->rx_clocks_en, 1);
++ skb_queue_head_init(&fw_queue);
++
++ err = hci_h4p_reset(info);
++ if (err < 0)
++ goto err_clean;
++
++ hci_h4p_set_auto_ctsrts(info, 1, UART_EFR_CTS | UART_EFR_RTS);
++ info->autorts = 1;
++ err = hci_h4p_send_negotiation(info);
++ if (err < 0)
++ goto err_clean;
++
++ skb_queue_head_init(&fw_queue);
++ err = hci_h4p_read_fw(info, &fw_queue);
++ if (err < 0) {
++ dev_err(info->dev, "Cannot read firmware\n");
++ return err;
++ }
++
++ /* FW image contains also unneeded negoation and alive msgs */
++ skb_dequeue(&fw_queue);
++ skb_dequeue(&fw_queue);
++
++ err = hci_h4p_send_fw(info, &fw_queue);
++ if (err < 0) {
++ dev_err(info->dev, "Sending firmware failed.\n");
++ goto err_clean;
++ }
++
++ info->pm_enabled = 1;
++
++ spin_lock_irqsave(&info->lock, flags);
++ info->rx_enabled = info->host_wakeup();
++ hci_h4p_set_clk(info, &info->rx_clocks_en, info->rx_enabled);
++ spin_unlock_irqrestore(&info->lock, flags);
++
++ hci_h4p_set_clk(info, &info->tx_clocks_en, 0);
++
++ set_bit(HCI_RUNNING, &hdev->flags);
++
++ NBT_DBG("hci up and running\n");
++ return 0;
++
++err_clean:
++ hci_h4p_hci_flush(hdev);
++ hci_h4p_reset_uart(info);
++ hci_h4p_set_clk(info, &info->tx_clocks_en, 0);
++ hci_h4p_set_clk(info, &info->rx_clocks_en, 0);
++ info->reset(0);
++ info->bt_wakeup(0);
++ skb_queue_purge(&fw_queue);
++ kfree_skb(info->rx_skb);
++
++ return err;
++}
++
++static int hci_h4p_hci_close(struct hci_dev *hdev)
++{
++ struct hci_h4p_info *info = hdev->driver_data;
++
++ if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
++ return 0;
++
++ hci_h4p_hci_flush(hdev);
++ hci_h4p_set_clk(info, &info->tx_clocks_en, 1);
++ hci_h4p_set_clk(info, &info->rx_clocks_en, 1);
++ hci_h4p_reset_uart(info);
++ hci_h4p_set_clk(info, &info->tx_clocks_en, 0);
++ hci_h4p_set_clk(info, &info->rx_clocks_en, 0);
++ info->reset(0);
++ info->bt_wakeup(0);
++ kfree_skb(info->rx_skb);
++
++ return 0;
++}
++
++static void hci_h4p_hci_destruct(struct hci_dev *hdev)
++{
++}
++
++static int hci_h4p_hci_send_frame(struct sk_buff *skb)
++{
++ struct hci_h4p_info *info;
++ struct hci_dev *hdev = (struct hci_dev *)skb->dev;
++ int err = 0;
++ unsigned long flags;
++
++ if (!hdev) {
++ printk(KERN_WARNING "hci_h4p: Frame for unknown device\n");
++ return -ENODEV;
++ }
++
++ NBT_DBG("dev %p, skb %p\n", hdev, skb);
++
++ info = hdev->driver_data;
++
++ if (!test_bit(HCI_RUNNING, &hdev->flags)) {
++ dev_warn(info->dev, "Frame for non-running device\n");
++ return -EIO;
++ }
++
++ switch (bt_cb(skb)->pkt_type) {
++ case HCI_COMMAND_PKT:
++ hdev->stat.cmd_tx++;
++ break;
++ case HCI_ACLDATA_PKT:
++ hdev->stat.acl_tx++;
++ break;
++ case HCI_SCODATA_PKT:
++ hdev->stat.sco_tx++;
++ break;
++ }
++
++ /* Push frame type to skb */
++ *skb_push(skb, 1) = (bt_cb(skb)->pkt_type);
++ /* We should allways send word aligned data to h4+ devices */
++ if (skb->len % 2) {
++ err = skb_pad(skb, 1);
++ if (!err)
++ *skb_put(skb, 1) = 0x00;
++ }
++ if (err)
++ return err;
++
++ spin_lock_irqsave(&info->lock, flags);
++ skb_queue_tail(&info->txq, skb);
++ hci_h4p_enable_tx(info);
++ spin_unlock_irqrestore(&info->lock, flags);
++
++ return 0;
++}
++
++static int hci_h4p_hci_ioctl(struct hci_dev *hdev, unsigned int cmd,
++ unsigned long arg)
++{
++ return -ENOIOCTLCMD;
++}
++
++static int hci_h4p_register_hdev(struct hci_h4p_info *info)
++{
++ struct hci_dev *hdev;
++
++ /* Initialize and register HCI device */
++
++ hdev = hci_alloc_dev();
++ if (!hdev) {
++ dev_err(info->dev, "Can't allocate memory for device\n");
++ return -ENOMEM;
++ }
++ info->hdev = hdev;
++
++ hdev->bus = HCI_UART;
++ hdev->driver_data = info;
++
++ hdev->open = hci_h4p_hci_open;
++ hdev->close = hci_h4p_hci_close;
++ hdev->flush = hci_h4p_hci_flush;
++ hdev->send = hci_h4p_hci_send_frame;
++ hdev->destruct = hci_h4p_hci_destruct;
++ hdev->ioctl = hci_h4p_hci_ioctl;
++ set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks);
++
++ hdev->owner = THIS_MODULE;
++
++ if (hci_register_dev(hdev) < 0) {
++ dev_err(info->dev, "hci_register failed %s.\n", hdev->name);
++ return -ENODEV;
++ }
++
++ return 0;
++}
++
++static int hci_h4p_probe(struct platform_device *pdev)
++{
++ struct hci_h4p_platform_data *bt_plat_data;
++ struct hci_h4p_info *info;
++ int err;
++
++ dev_info(&pdev->dev, "Registering HCI H4P device\n");
++ info = kzalloc(sizeof(struct hci_h4p_info), GFP_KERNEL);
++ if (!info)
++ return -ENOMEM;
++
++ info->dev = &pdev->dev;
++ info->pm_enabled = 0;
++ info->tx_enabled = 1;
++ info->rx_enabled = 1;
++ info->garbage_bytes = 0;
++ info->tx_clocks_en = 0;
++ info->rx_clocks_en = 0;
++ spin_lock_init(&info->lock);
++ spin_lock_init(&info->clocks_lock);
++ skb_queue_head_init(&info->txq);
++
++ if (pdev->dev.platform_data == NULL) {
++ dev_err(&pdev->dev, "Could not get Bluetooth config data\n");
++ kfree(info);
++ return -ENODATA;
++ }
++
++ bt_plat_data = pdev->dev.platform_data;
++ info->chip_type = 3;
++ info->bt_wakeup = bt_plat_data->bt_wu;
++ info->host_wakeup = bt_plat_data->host_wu;
++ info->reset = bt_plat_data->reset;
++ info->uart_base = bt_plat_data->uart_base;
++ info->host_wakeup_gpio = bt_plat_data->host_wu_gpio;
++
++ NBT_DBG("RESET gpio: %p\n", info->reset);
++ NBT_DBG("BTWU gpio: %p\n", info->bt_wakeup);
++ NBT_DBG("HOSTWU gpio: %p\n", info->host_wakeup);
++
++ info->irq = bt_plat_data->uart_irq;
++ err = request_irq(info->irq, hci_h4p_interrupt, IRQF_DISABLED | IRQF_SHARED,
++ "hci_h4p", info);
++ if (err < 0) {
++ dev_err(info->dev, "hci_h4p: unable to get IRQ %d\n", info->irq);
++ goto cleanup;
++ }
++
++ err = request_irq(gpio_to_irq(info->host_wakeup_gpio),
++ hci_h4p_wakeup_interrupt, IRQF_TRIGGER_FALLING |
++ IRQF_TRIGGER_RISING | IRQF_DISABLED,
++ "hci_h4p_wkup", info);
++ if (err < 0) {
++ dev_err(info->dev, "hci_h4p: unable to get wakeup IRQ %d\n",
++ gpio_to_irq(info->host_wakeup_gpio));
++ free_irq(info->irq, info);
++ goto cleanup;
++ }
++
++ err = set_irq_wake(gpio_to_irq(info->host_wakeup_gpio), 1);
++ if (err < 0) {
++ dev_err(info->dev, "hci_h4p: unable to set wakeup for IRQ %d\n",
++ gpio_to_irq(info->host_wakeup_gpio));
++ free_irq(info->irq, info);
++ free_irq(gpio_to_irq(info->host_wakeup_gpio), info);
++ goto cleanup;
++ }
++
++ hci_h4p_set_clk(info, &info->tx_clocks_en, 1);
++ err = hci_h4p_reset_uart(info);
++ if (err < 0)
++ goto cleanup_irq;
++ hci_h4p_init_uart(info);
++ hci_h4p_set_rts(info, 0);
++ err = hci_h4p_reset(info);
++ hci_h4p_reset_uart(info);
++ if (err < 0)
++ goto cleanup_irq;
++ info->reset(0);
++ hci_h4p_set_clk(info, &info->tx_clocks_en, 0);
++
++ platform_set_drvdata(pdev, info);
++
++ if (hci_h4p_register_hdev(info) < 0) {
++ dev_err(info->dev, "failed to register hci_h4p hci device\n");
++ goto cleanup_irq;
++ }
++
++ return 0;
++
++cleanup_irq:
++ free_irq(info->irq, (void *)info);
++ free_irq(gpio_to_irq(info->host_wakeup_gpio), info);
++cleanup:
++ info->reset(0);
++ kfree(info);
++ return err;
++
++}
++
++static int hci_h4p_remove(struct platform_device *pdev)
++{
++ struct hci_h4p_info *info;
++
++ info = platform_get_drvdata(pdev);
++
++ hci_h4p_hci_close(info->hdev);
++ free_irq(gpio_to_irq(info->host_wakeup_gpio), info);
++ hci_unregister_dev(info->hdev);
++ hci_free_dev(info->hdev);
++ free_irq(info->irq, (void *) info);
++ kfree(info);
++
++ return 0;
++}
++
++static struct platform_driver hci_h4p_driver = {
++ .probe = hci_h4p_probe,
++ .remove = hci_h4p_remove,
++ .driver = {
++ .name = "hci_h4p",
++ },
++};
++
++static int __init hci_h4p_init(void)
++{
++ int err = 0;
++
++ /* Register the driver with LDM */
++ err = platform_driver_register(&hci_h4p_driver);
++ if (err < 0)
++ printk(KERN_WARNING "failed to register hci_h4p driver\n");
++
++ return err;
++}
++
++static void __exit hci_h4p_exit(void)
++{
++ platform_driver_unregister(&hci_h4p_driver);
++}
++
++module_init(hci_h4p_init);
++module_exit(hci_h4p_exit);
++
++MODULE_ALIAS("platform:hci_h4p");
++MODULE_DESCRIPTION("h4 driver with nokia extensions");
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Ville Tervo");
+--- /dev/null
++++ b/drivers/bluetooth/hci_h4p/fw-bcm.c
+@@ -0,0 +1,144 @@
++/*
++ * This file is part of hci_h4p bluetooth driver
++ *
++ * Copyright (C) 2005-2008 Nokia Corporation.
++ *
++ * Contact: Ville Tervo <ville.tervo@nokia.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ *
++ */
++
++#include <linux/skbuff.h>
++#include <linux/delay.h>
++#include <linux/serial_reg.h>
++
++#include "hci_h4p.h"
++
++static struct sk_buff_head *fw_q;
++
++static int inject_bdaddr(struct hci_h4p_info *info, struct sk_buff *skb)
++{
++ unsigned int offset;
++
++ if (skb->len < 10) {
++ dev_info(info->dev, "Valid bluetooth address not found.\n");
++ return -ENODATA;
++ }
++
++ offset = 4;
++ skb->data[offset + 5] = 0x00;
++ skb->data[offset + 4] = 0x11;
++ skb->data[offset + 3] = 0x22;
++ skb->data[offset + 2] = 0x33;
++ skb->data[offset + 1] = 0x44;
++ skb->data[offset + 0] = 0x55;
++
++ return 0;
++}
++
++void hci_h4p_bcm_parse_fw_event(struct hci_h4p_info *info, struct sk_buff *skb)
++{
++ struct sk_buff *fw_skb;
++ int err;
++ unsigned long flags;
++
++ if (skb->data[5] != 0x00) {
++ dev_err(info->dev, "Firmware sending command failed 0x%.2x\n",
++ skb->data[5]);
++ info->fw_error = -EPROTO;
++ }
++
++ kfree_skb(skb);
++
++ fw_skb = skb_dequeue(fw_q);
++ if (fw_skb == NULL || info->fw_error) {
++ complete(&info->fw_completion);
++ return;
++ }
++
++ if (fw_skb->data[1] == 0x01 && fw_skb->data[2] == 0xfc) {
++ NBT_DBG_FW("Injecting bluetooth address\n");
++ err = inject_bdaddr(info, fw_skb);
++ if (err < 0) {
++ kfree_skb(fw_skb);
++ info->fw_error = err;
++ complete(&info->fw_completion);
++ return;
++ }
++ }
++
++ skb_queue_tail(&info->txq, fw_skb);
++ spin_lock_irqsave(&info->lock, flags);
++ hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) |
++ UART_IER_THRI);
++ spin_unlock_irqrestore(&info->lock, flags);
++}
++
++
++int hci_h4p_bcm_send_fw(struct hci_h4p_info *info,
++ struct sk_buff_head *fw_queue)
++{
++ struct sk_buff *skb;
++ unsigned long flags, time;
++
++ info->fw_error = 0;
++
++ NBT_DBG_FW("Sending firmware\n");
++
++ time = jiffies;
++
++ fw_q = fw_queue;
++ skb = skb_dequeue(fw_queue);
++ if (!skb)
++ return -ENODATA;
++
++ NBT_DBG_FW("Sending commands\n");
++
++ /*
++ * Disable smart-idle as UART TX interrupts
++ * are not wake-up capable
++ */
++ hci_h4p_smart_idle(info, 0);
++
++ /* Check if this is bd_address packet */
++ init_completion(&info->fw_completion);
++ skb_queue_tail(&info->txq, skb);
++ spin_lock_irqsave(&info->lock, flags);
++ hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) |
++ UART_IER_THRI);
++ spin_unlock_irqrestore(&info->lock, flags);
++
++ if (!wait_for_completion_timeout(&info->fw_completion,
++ msecs_to_jiffies(2000))) {
++ dev_err(info->dev, "No reply to fw command\n");
++ return -ETIMEDOUT;
++ }
++
++ if (info->fw_error) {
++ dev_err(info->dev, "FW error\n");
++ return -EPROTO;
++ }
++
++ NBT_DBG_FW("Firmware sent in %d msecs\n",
++ jiffies_to_msecs(jiffies-time));
++
++ hci_h4p_set_auto_ctsrts(info, 0, UART_EFR_RTS);
++ hci_h4p_set_rts(info, 0);
++ hci_h4p_change_speed(info, BC4_MAX_BAUD_RATE);
++ hci_h4p_set_auto_ctsrts(info, 1, UART_EFR_RTS);
++
++ return 0;
++}
+--- /dev/null
++++ b/drivers/bluetooth/hci_h4p/fw-csr.c
+@@ -0,0 +1,141 @@
++/*
++ * This file is part of hci_h4p bluetooth driver
++ *
++ * Copyright (C) 2005-2008 Nokia Corporation.
++ *
++ * Contact: Ville Tervo <ville.tervo@nokia.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ *
++ */
++
++#include <linux/skbuff.h>
++#include <linux/delay.h>
++#include <linux/serial_reg.h>
++
++#include "hci_h4p.h"
++
++void hci_h4p_bc4_parse_fw_event(struct hci_h4p_info *info, struct sk_buff *skb)
++{
++ /* Check if this is fw packet */
++ if (skb->data[0] != 0xff) {
++ hci_recv_frame(skb);
++ return;
++ }
++
++ if (skb->data[11] || skb->data[12]) {
++ dev_err(info->dev, "Firmware sending command failed\n");
++ info->fw_error = -EPROTO;
++ }
++
++ kfree_skb(skb);
++ complete(&info->fw_completion);
++}
++
++int hci_h4p_bc4_send_fw(struct hci_h4p_info *info,
++ struct sk_buff_head *fw_queue)
++{
++ struct sk_buff *skb;
++ unsigned int offset;
++ int retries, count;
++ unsigned long flags;
++ struct hci_h4p_bluetooth_config *config;
++
++ info->fw_error = 0;
++
++ NBT_DBG_FW("Sending firmware\n");
++ skb = skb_dequeue(fw_queue);
++
++ if (!skb)
++ return -ENOMSG;
++
++ config = info->dev->platform_data;
++ if (!config) {
++ kfree_skb(skb);
++ return -ENODEV;
++ }
++
++ /* Check if this is bd_address packet */
++ if (skb->data[15] == 0x01 && skb->data[16] == 0x00) {
++ offset = 21;
++ skb->data[offset + 1] = 0x00;
++ skb->data[offset + 5] = 0x00;
++ skb->data[offset + 7] = 0x00;
++ skb->data[offset + 6] = 0x01;
++ skb->data[offset + 4] = 0x22;
++ skb->data[offset + 0] = 0x66;
++ skb->data[offset + 3] = 0x77;
++ skb->data[offset + 2] = 0x33;
++ }
++
++ for (count = 1; ; count++) {
++ NBT_DBG_FW("Sending firmware command %d\n", count);
++ init_completion(&info->fw_completion);
++ skb_queue_tail(&info->txq, skb);
++ spin_lock_irqsave(&info->lock, flags);
++ hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) |
++ UART_IER_THRI);
++ spin_unlock_irqrestore(&info->lock, flags);
++
++ skb = skb_dequeue(fw_queue);
++ if (!skb)
++ break;
++
++ if (!wait_for_completion_timeout(&info->fw_completion,
++ msecs_to_jiffies(1000))) {
++ dev_err(info->dev, "No reply to fw command\n");
++ return -ETIMEDOUT;
++ }
++
++ if (info->fw_error) {
++ dev_err(info->dev, "FW error\n");
++ return -EPROTO;
++ }
++ };
++
++ /* Wait for chip warm reset */
++ retries = 100;
++ while ((!skb_queue_empty(&info->txq) ||
++ !(hci_h4p_inb(info, UART_LSR) & UART_LSR_TEMT)) &&
++ retries--) {
++ msleep(10);
++ }
++ if (!retries) {
++ dev_err(info->dev, "Transmitter not empty\n");
++ return -ETIMEDOUT;
++ }
++
++ hci_h4p_change_speed(info, BC4_MAX_BAUD_RATE);
++
++ if (hci_h4p_wait_for_cts(info, 1, 100)) {
++ dev_err(info->dev, "cts didn't deassert after final speed\n");
++ return -ETIMEDOUT;
++ }
++
++ retries = 100;
++ do {
++ init_completion(&info->init_completion);
++ hci_h4p_send_alive_packet(info);
++ retries--;
++ } while (!wait_for_completion_timeout(&info->init_completion, 100) &&
++ retries > 0);
++
++ if (!retries) {
++ dev_err(info->dev, "No alive reply after speed change\n");
++ return -ETIMEDOUT;
++ }
++
++ return 0;
++}
+--- /dev/null
++++ b/drivers/bluetooth/hci_h4p/fw-ti1273.c
+@@ -0,0 +1,113 @@
++/*
++ * This file is part of hci_h4p bluetooth driver
++ *
++ * Copyright (C) 2009 Nokia Corporation.
++ *
++ * Contact: Ville Tervo <ville.tervo@nokia.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ *
++ */
++
++#include <linux/skbuff.h>
++#include <linux/delay.h>
++#include <linux/serial_reg.h>
++
++#include "hci_h4p.h"
++
++static struct sk_buff_head *fw_q;
++
++void hci_h4p_ti1273_parse_fw_event(struct hci_h4p_info *info,
++ struct sk_buff *skb)
++{
++ struct sk_buff *fw_skb;
++ unsigned long flags;
++
++ if (skb->data[5] != 0x00) {
++ dev_err(info->dev, "Firmware sending command failed 0x%.2x\n",
++ skb->data[5]);
++ info->fw_error = -EPROTO;
++ }
++
++ kfree_skb(skb);
++
++ fw_skb = skb_dequeue(fw_q);
++ if (fw_skb == NULL || info->fw_error) {
++ complete(&info->fw_completion);
++ return;
++ }
++
++ hci_h4p_enable_tx(info);
++ skb_queue_tail(&info->txq, fw_skb);
++ spin_lock_irqsave(&info->lock, flags);
++ hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) |
++ UART_IER_THRI);
++ spin_unlock_irqrestore(&info->lock, flags);
++}
++
++
++int hci_h4p_ti1273_send_fw(struct hci_h4p_info *info,
++ struct sk_buff_head *fw_queue)
++{
++ struct sk_buff *skb;
++ unsigned long flags, time;
++
++ info->fw_error = 0;
++
++ NBT_DBG_FW("Sending firmware\n");
++
++ time = jiffies;
++
++ fw_q = fw_queue;
++ skb = skb_dequeue(fw_queue);
++ if (!skb)
++ return -ENODATA;
++
++ NBT_DBG_FW("Sending commands\n");
++ /* Check if this is bd_address packet */
++ init_completion(&info->fw_completion);
++ hci_h4p_enable_tx(info);
++ skb_queue_tail(&info->txq, skb);
++ spin_lock_irqsave(&info->lock, flags);
++ hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) |
++ UART_IER_THRI);
++ spin_unlock_irqrestore(&info->lock, flags);
++
++ if (!wait_for_completion_timeout(&info->fw_completion,
++ msecs_to_jiffies(40000))) {
++ dev_err(info->dev, "No reply to fw command\n");
++ return -ETIMEDOUT;
++ }
++
++ if (info->fw_error) {
++ dev_err(info->dev, "FW error\n");
++ return -EPROTO;
++ }
++
++ NBT_DBG_FW("Firmware sent in %d msecs\n",
++ jiffies_to_msecs(jiffies-time));
++
++ hci_h4p_set_auto_ctsrts(info, 0, UART_EFR_RTS);
++ hci_h4p_set_rts(info, 0);
++ hci_h4p_change_speed(info, BC4_MAX_BAUD_RATE);
++ if (hci_h4p_wait_for_cts(info, 1, 100)) {
++ dev_err(info->dev,
++ "cts didn't go down after final speed change\n");
++ return -ETIMEDOUT;
++ }
++ hci_h4p_set_auto_ctsrts(info, 1, UART_EFR_RTS);
++
++ return 0;
++}
+--- /dev/null
++++ b/drivers/bluetooth/hci_h4p/fw.c
+@@ -0,0 +1,166 @@
++/*
++ * This file is part of hci_h4p bluetooth driver
++ *
++ * Copyright (C) 2005, 2006 Nokia Corporation.
++ *
++ * Contact: Ville Tervo <ville.tervo@nokia.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ *
++ */
++
++#include <linux/skbuff.h>
++#include <linux/firmware.h>
++#include <linux/clk.h>
++
++#include <net/bluetooth/bluetooth.h>
++
++#include "hci_h4p.h"
++
++static int fw_pos;
++
++/* Firmware handling */
++static int hci_h4p_open_firmware(struct hci_h4p_info *info,
++ const struct firmware **fw_entry)
++{
++ int err;
++
++ fw_pos = 0;
++ NBT_DBG_FW("Opening %d/%d firmware\n", info->man_id, info->ver_id);
++ switch (info->man_id) {
++ case BT_CHIP_TI:
++ err = request_firmware(fw_entry, "ti1273.bin", info->dev);
++ break;
++ case BT_CHIP_CSR:
++ err = request_firmware(fw_entry, "bc4fw.bin", info->dev);
++ break;
++ case BT_CHIP_BCM:
++ err = request_firmware(fw_entry, "bcmfw.bin", info->dev);
++ break;
++ default:
++ dev_err(info->dev, "Invalid chip type %d\n", info->man_id);
++ *fw_entry = NULL;
++ err = -EINVAL;
++ }
++
++ return err;
++}
++
++static void hci_h4p_close_firmware(const struct firmware *fw_entry)
++{
++ release_firmware(fw_entry);
++}
++
++/* Read fw. Return length of the command. If no more commands in
++ * fw 0 is returned. In error case return value is negative.
++ */
++static int hci_h4p_read_fw_cmd(struct hci_h4p_info *info, struct sk_buff **skb,
++ const struct firmware *fw_entry, gfp_t how)
++{
++ unsigned int cmd_len;
++
++ if (fw_pos >= fw_entry->size)
++ return 0;
++
++ if (fw_pos + 2 > fw_entry->size) {
++ dev_err(info->dev, "Corrupted firmware image 1\n");
++ return -EMSGSIZE;
++ }
++
++ cmd_len = fw_entry->data[fw_pos++];
++ cmd_len += fw_entry->data[fw_pos++] << 8;
++ if (cmd_len == 0)
++ return 0;
++
++ if (fw_pos + cmd_len > fw_entry->size) {
++ dev_err(info->dev, "Corrupted firmware image 2\n");
++ return -EMSGSIZE;
++ }
++
++ *skb = bt_skb_alloc(cmd_len, how);
++ if (!*skb) {
++ dev_err(info->dev, "Cannot reserve memory for buffer\n");
++ return -ENOMEM;
++ }
++ memcpy(skb_put(*skb, cmd_len), &fw_entry->data[fw_pos], cmd_len);
++
++ fw_pos += cmd_len;
++
++ return (*skb)->len;
++}
++
++int hci_h4p_read_fw(struct hci_h4p_info *info, struct sk_buff_head *fw_queue)
++{
++ const struct firmware *fw_entry = NULL;
++ struct sk_buff *skb = NULL;
++ int err;
++
++ err = hci_h4p_open_firmware(info, &fw_entry);
++ if (err < 0 || !fw_entry)
++ goto err_clean;
++
++ while ((err = hci_h4p_read_fw_cmd(info, &skb, fw_entry, GFP_KERNEL))) {
++ if (err < 0 || !skb)
++ goto err_clean;
++
++ skb_queue_tail(fw_queue, skb);
++ }
++
++err_clean:
++ hci_h4p_close_firmware(fw_entry);
++ return err;
++}
++
++int hci_h4p_send_fw(struct hci_h4p_info *info, struct sk_buff_head *fw_queue)
++{
++ int err;
++
++ switch (info->man_id) {
++ case BT_CHIP_CSR:
++ err = hci_h4p_bc4_send_fw(info, fw_queue);
++ break;
++ case BT_CHIP_TI:
++ err = hci_h4p_ti1273_send_fw(info, fw_queue);
++ break;
++ case BT_CHIP_BCM:
++ err = hci_h4p_bcm_send_fw(info, fw_queue);
++ break;
++ default:
++ dev_err(info->dev, "Don't know how to send firmware\n");
++ err = -EINVAL;
++ }
++
++ return err;
++}
++
++void hci_h4p_parse_fw_event(struct hci_h4p_info *info, struct sk_buff *skb)
++{
++ switch (info->man_id) {
++ case BT_CHIP_CSR:
++ hci_h4p_bc4_parse_fw_event(info, skb);
++ break;
++ case BT_CHIP_TI:
++ hci_h4p_ti1273_parse_fw_event(info, skb);
++ break;
++ case BT_CHIP_BCM:
++ hci_h4p_bcm_parse_fw_event(info, skb);
++ break;
++ default:
++ dev_err(info->dev, "Don't know how to parse fw event\n");
++ info->fw_error = -EINVAL;
++ }
++
++ return;
++}
+--- /dev/null
++++ b/drivers/bluetooth/hci_h4p/hci_h4p.h
+@@ -0,0 +1,231 @@
++/*
++ * This file is part of hci_h4p bluetooth driver
++ *
++ * Copyright (C) 2005-2010 Nokia Corporation.
++ *
++ * Contact: Ville Tervo <ville.tervo@nokia.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ *
++ */
++
++#include <net/bluetooth/bluetooth.h>
++#include <net/bluetooth/hci_core.h>
++#include <net/bluetooth/hci.h>
++
++#ifndef __DRIVERS_BLUETOOTH_HCI_H4P_H
++#define __DRIVERS_BLUETOOTH_HCI_H4P_H
++
++#define BT_CHIP_CSR 0x02
++#define BT_CHIP_TI 0x30
++#define BT_CHIP_BCM 0x04
++
++#define UART_SYSC_OMAP_RESET 0x03
++#define UART_SYSS_RESETDONE 0x01
++#define UART_OMAP_SCR_EMPTY_THR 0x08
++#define UART_OMAP_SCR_WAKEUP 0x10
++#define UART_OMAP_SSR_WAKEUP 0x02
++#define UART_OMAP_SSR_TXFULL 0x01
++
++#define UART_OMAP_SYSC_IDLEMODE 0x03
++#define UART_OMAP_SYSC_IDLEMASK (3 << UART_OMAP_SYSC_IDLEMODE)
++
++#define UART_OMAP_SYSC_FORCE_IDLE (0 << UART_OMAP_SYSC_IDLEMODE)
++#define UART_OMAP_SYSC_NO_IDLE (1 << UART_OMAP_SYSC_IDLEMODE)
++#define UART_OMAP_SYSC_SMART_IDLE (2 << UART_OMAP_SYSC_IDLEMODE)
++
++#define NBT_DBG(fmt, arg...) \
++ pr_debug("%s: " fmt "" , __func__ , ## arg)
++
++#define NBT_DBG_FW(fmt, arg...) \
++ pr_debug("%s: " fmt "" , __func__ , ## arg)
++
++#define NBT_DBG_POWER(fmt, arg...) \
++ pr_debug("%s: " fmt "" , __func__ , ## arg)
++
++#define NBT_DBG_TRANSFER(fmt, arg...) \
++ pr_debug("%s: " fmt "" , __func__ , ## arg)
++
++#define NBT_DBG_TRANSFER_NF(fmt, arg...) \
++ pr_debug(fmt "" , ## arg)
++
++#define NBT_DBG_DMA(fmt, arg...) \
++ pr_debug("%s: " fmt "" , __func__ , ## arg)
++
++struct hci_h4p_info {
++ struct hci_dev *hdev;
++ spinlock_t lock;
++
++ void __iomem *uart_base;
++ unsigned long uart_phys_base;
++ int irq;
++ struct device *dev;
++ u8 chip_type;
++ void (*bt_wakeup)(bool enable);
++ bool (*host_wakeup)(void);
++ void (*reset)(bool enable);
++ int host_wakeup_gpio;
++ int man_id;
++ int ver_id;
++
++ struct sk_buff_head fw_queue;
++ struct completion init_completion;
++ struct completion fw_completion;
++ int fw_error;
++ int init_error;
++
++ struct sk_buff_head txq;
++
++ struct sk_buff *rx_skb;
++ long rx_count;
++ unsigned long rx_state;
++ unsigned long garbage_bytes;
++
++ int pm_enabled;
++ int tx_enabled;
++ int autorts;
++ int rx_enabled;
++
++ int tx_clocks_en;
++ int rx_clocks_en;
++ spinlock_t clocks_lock;
++ struct clk *uart_iclk;
++ struct clk *uart_fclk;
++ atomic_t clk_users;
++ u16 dll;
++ u16 dlh;
++ u16 ier;
++ u16 mdr1;
++ u16 efr;
++};
++
++struct hci_h4p_radio_hdr {
++ __u8 evt;
++ __u8 dlen;
++} __attribute__ ((packed));
++
++
++struct hci_h4p_init_hdr {
++ __u8 dlen;
++} __attribute__ ((packed));
++#define HCI_H4P_INIT_HDR_SIZE 1
++
++struct hci_h4p_init_cmd {
++ __u8 ack;
++ __u16 baudrate;
++ __u16 unused;
++ __u8 mode;
++ __u16 sys_clk;
++ __u16 unused2;
++} __attribute__ ((packed));
++#define HCI_H4P_INIT_CMD_SIZE 10
++
++struct hci_h4p_init_evt {
++ __u8 ack;
++ __u16 baudrate;
++ __u16 unused;
++ __u8 mode;
++ __u16 sys_clk;
++ __u16 unused2;
++ __u8 man_id;
++ __u8 ver_id;
++} __attribute__ ((packed));
++#define HCI_H4P_INIT_EVT_SIZE 12
++
++struct hci_h4p_alive_hdr {
++ __u8 dlen;
++} __attribute__ ((packed));
++#define HCI_H4P_ALIVE_HDR_SIZE 1
++
++struct hci_h4p_alive_msg {
++ __u8 message_id;
++ __u8 unused;
++} __attribute__ ((packed));
++#define HCI_H4P_ALIVE_MSG_SIZE 2
++
++#define MAX_BAUD_RATE 921600
++#define BC4_MAX_BAUD_RATE 3692300
++#define UART_CLOCK 48000000
++#define BT_INIT_DIVIDER 320
++#define BT_BAUDRATE_DIVIDER 384000000
++#define BT_SYSCLK_DIV 1000
++#define INIT_SPEED 120000
++
++#define HCI_H4P_MODE 0x4c
++
++#define HCI_H4P_ACK 0x20
++#define HCI_H4P_NACK 0x40
++#define HCI_H4P_ALIVE_IND_REQ 0x55
++#define HCI_H4P_ALIVE_IND_RESP 0xCC
++
++#define H4_TYPE_SIZE 1
++#define H4_RADIO_HDR_SIZE 2
++
++/* H4+ packet types */
++#define H4_CMD_PKT 0x01
++#define H4_ACL_PKT 0x02
++#define H4_SCO_PKT 0x03
++#define H4_EVT_PKT 0x04
++#define H4_NEG_PKT 0x06
++#define H4_ALIVE_PKT 0x07
++#define H4_RADIO_PKT 0x08
++
++/* TX states */
++#define WAIT_FOR_PKT_TYPE 1
++#define WAIT_FOR_HEADER 2
++#define WAIT_FOR_DATA 3
++
++struct hci_fw_event {
++ struct hci_event_hdr hev;
++ struct hci_ev_cmd_complete cmd;
++ u8 status;
++} __attribute__ ((packed));
++
++int hci_h4p_send_alive_packet(struct hci_h4p_info *info);
++
++void hci_h4p_bcm_parse_fw_event(struct hci_h4p_info *info,
++ struct sk_buff *skb);
++int hci_h4p_bcm_send_fw(struct hci_h4p_info *info,
++ struct sk_buff_head *fw_queue);
++
++void hci_h4p_bc4_parse_fw_event(struct hci_h4p_info *info,
++ struct sk_buff *skb);
++int hci_h4p_bc4_send_fw(struct hci_h4p_info *info,
++ struct sk_buff_head *fw_queue);
++
++void hci_h4p_ti1273_parse_fw_event(struct hci_h4p_info *info,
++ struct sk_buff *skb);
++int hci_h4p_ti1273_send_fw(struct hci_h4p_info *info,
++ struct sk_buff_head *fw_queue);
++
++int hci_h4p_read_fw(struct hci_h4p_info *info, struct sk_buff_head *fw_queue);
++int hci_h4p_send_fw(struct hci_h4p_info *info, struct sk_buff_head *fw_queue);
++void hci_h4p_parse_fw_event(struct hci_h4p_info *info, struct sk_buff *skb);
++
++void hci_h4p_outb(struct hci_h4p_info *info, unsigned int offset, u8 val);
++u8 hci_h4p_inb(struct hci_h4p_info *info, unsigned int offset);
++void hci_h4p_set_rts(struct hci_h4p_info *info, int active);
++int hci_h4p_wait_for_cts(struct hci_h4p_info *info, int active, int timeout_ms);
++void __hci_h4p_set_auto_ctsrts(struct hci_h4p_info *info, int on, u8 which);
++void hci_h4p_set_auto_ctsrts(struct hci_h4p_info *info, int on, u8 which);
++void hci_h4p_change_speed(struct hci_h4p_info *info, unsigned long speed);
++int hci_h4p_reset_uart(struct hci_h4p_info *info);
++void hci_h4p_init_uart(struct hci_h4p_info *info);
++void hci_h4p_enable_tx(struct hci_h4p_info *info);
++void hci_h4p_store_regs(struct hci_h4p_info *info);
++void hci_h4p_restore_regs(struct hci_h4p_info *info);
++void hci_h4p_smart_idle(struct hci_h4p_info *info, bool enable);
++
++#endif /* __DRIVERS_BLUETOOTH_HCI_H4P_H */
+--- /dev/null
++++ b/drivers/bluetooth/hci_h4p/uart.c
+@@ -0,0 +1,203 @@
++/*
++ * This file is part of hci_h4p bluetooth driver
++ *
++ * Copyright (C) 2005, 2006 Nokia Corporation.
++ *
++ * Contact: Ville Tervo <ville.tervo@nokia.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ *
++ */
++
++#include <linux/serial_reg.h>
++#include <linux/delay.h>
++#include <linux/clk.h>
++
++#include <linux/io.h>
++
++#include "hci_h4p.h"
++
++inline void hci_h4p_outb(struct hci_h4p_info *info, unsigned int offset, u8 val)
++{
++ __raw_writeb(val, info->uart_base + (offset << 2));
++}
++
++inline u8 hci_h4p_inb(struct hci_h4p_info *info, unsigned int offset)
++{
++ return __raw_readb(info->uart_base + (offset << 2));
++}
++
++void hci_h4p_set_rts(struct hci_h4p_info *info, int active)
++{
++ u8 b;
++
++ b = hci_h4p_inb(info, UART_MCR);
++ if (active)
++ b |= UART_MCR_RTS;
++ else
++ b &= ~UART_MCR_RTS;
++ hci_h4p_outb(info, UART_MCR, b);
++}
++
++int hci_h4p_wait_for_cts(struct hci_h4p_info *info, int active,
++ int timeout_ms)
++{
++ unsigned long timeout;
++ int state;
++
++ timeout = jiffies + msecs_to_jiffies(timeout_ms);
++ for (;;) {
++ state = hci_h4p_inb(info, UART_MSR) & UART_MSR_CTS;
++ if (active) {
++ if (state)
++ return 0;
++ } else {
++ if (!state)
++ return 0;
++ }
++ if (time_after(jiffies, timeout))
++ return -ETIMEDOUT;
++ msleep(1);
++ }
++}
++
++void __hci_h4p_set_auto_ctsrts(struct hci_h4p_info *info, int on, u8 which)
++{
++ u8 lcr, b;
++
++ lcr = hci_h4p_inb(info, UART_LCR);
++ hci_h4p_outb(info, UART_LCR, 0xbf);
++ b = hci_h4p_inb(info, UART_EFR);
++ if (on)
++ b |= which;
++ else
++ b &= ~which;
++ hci_h4p_outb(info, UART_EFR, b);
++ hci_h4p_outb(info, UART_LCR, lcr);
++}
++
++void hci_h4p_set_auto_ctsrts(struct hci_h4p_info *info, int on, u8 which)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&info->lock, flags);
++ __hci_h4p_set_auto_ctsrts(info, on, which);
++ spin_unlock_irqrestore(&info->lock, flags);
++}
++
++void hci_h4p_change_speed(struct hci_h4p_info *info, unsigned long speed)
++{
++ unsigned int divisor;
++ u8 lcr, mdr1;
++
++ NBT_DBG("Setting speed %lu\n", speed);
++
++ if (speed >= 460800) {
++ divisor = UART_CLOCK / 13 / speed;
++ mdr1 = 3;
++ } else {
++ divisor = UART_CLOCK / 16 / speed;
++ mdr1 = 0;
++ }
++
++ /* Make sure UART mode is disabled */
++ hci_h4p_outb(info, UART_OMAP_MDR1, 7);
++
++ lcr = hci_h4p_inb(info, UART_LCR);
++ hci_h4p_outb(info, UART_LCR, UART_LCR_DLAB); /* Set DLAB */
++ hci_h4p_outb(info, UART_DLL, divisor & 0xff); /* Set speed */
++ hci_h4p_outb(info, UART_DLM, divisor >> 8);
++ hci_h4p_outb(info, UART_LCR, lcr);
++
++ /* Make sure UART mode is enabled */
++ hci_h4p_outb(info, UART_OMAP_MDR1, mdr1);
++}
++
++int hci_h4p_reset_uart(struct hci_h4p_info *info)
++{
++ int count = 0;
++
++ /* Reset the UART */
++ hci_h4p_outb(info, UART_OMAP_SYSC, UART_SYSC_OMAP_RESET);
++ while (!(hci_h4p_inb(info, UART_OMAP_SYSS) & UART_SYSS_RESETDONE)) {
++ if (count++ > 100) {
++ dev_err(info->dev, "hci_h4p: UART reset timeout\n");
++ return -ENODEV;
++ }
++ udelay(1);
++ }
++
++ return 0;
++}
++
++
++void hci_h4p_store_regs(struct hci_h4p_info *info)
++{
++ u16 lcr = 0;
++
++ lcr = hci_h4p_inb(info, UART_LCR);
++ hci_h4p_outb(info, UART_LCR, 0xBF);
++ info->dll = hci_h4p_inb(info, UART_DLL);
++ info->dlh = hci_h4p_inb(info, UART_DLM);
++ info->efr = hci_h4p_inb(info, UART_EFR);
++ hci_h4p_outb(info, UART_LCR, lcr);
++ info->mdr1 = hci_h4p_inb(info, UART_OMAP_MDR1);
++ info->ier = hci_h4p_inb(info, UART_IER);
++}
++
++void hci_h4p_restore_regs(struct hci_h4p_info *info)
++{
++ u16 lcr = 0;
++
++ hci_h4p_init_uart(info);
++
++ hci_h4p_outb(info, UART_OMAP_MDR1, 7);
++ lcr = hci_h4p_inb(info, UART_LCR);
++ hci_h4p_outb(info, UART_LCR, 0xBF);
++ hci_h4p_outb(info, UART_DLL, info->dll); /* Set speed */
++ hci_h4p_outb(info, UART_DLM, info->dlh);
++ hci_h4p_outb(info, UART_EFR, info->efr);
++ hci_h4p_outb(info, UART_LCR, lcr);
++ hci_h4p_outb(info, UART_OMAP_MDR1, info->mdr1);
++ hci_h4p_outb(info, UART_IER, info->ier);
++}
++
++void hci_h4p_init_uart(struct hci_h4p_info *info)
++{
++ u8 mcr, efr;
++
++ /* Enable and setup FIFO */
++ hci_h4p_outb(info, UART_OMAP_MDR1, 0x00);
++
++ hci_h4p_outb(info, UART_LCR, 0xbf);
++ efr = hci_h4p_inb(info, UART_EFR);
++ hci_h4p_outb(info, UART_EFR, UART_EFR_ECB);
++ hci_h4p_outb(info, UART_LCR, UART_LCR_DLAB);
++ mcr = hci_h4p_inb(info, UART_MCR);
++ hci_h4p_outb(info, UART_MCR, UART_MCR_TCRTLR);
++ hci_h4p_outb(info, UART_FCR, UART_FCR_ENABLE_FIFO |
++ UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT |
++ (3 << 6) | (0 << 4));
++ hci_h4p_outb(info, UART_LCR, 0xbf);
++ hci_h4p_outb(info, UART_TI752_TLR, 0xed);
++ hci_h4p_outb(info, UART_TI752_TCR, 0xef);
++ hci_h4p_outb(info, UART_EFR, efr);
++ hci_h4p_outb(info, UART_LCR, UART_LCR_DLAB);
++ hci_h4p_outb(info, UART_MCR, 0x00);
++ hci_h4p_outb(info, UART_LCR, UART_LCR_WLEN8);
++ hci_h4p_outb(info, UART_IER, UART_IER_RDI | UART_IER_RLSI);
++ hci_h4p_outb(info, UART_OMAP_WER, 0xff);
++ hci_h4p_outb(info, UART_OMAP_SYSC, (0 << 0) | (1 << 2) | (1 << 3));
++}
+--- /dev/null
++++ b/include/linux/bluetooth/hci_h4p.h
+@@ -0,0 +1,41 @@
++/*
++ * This file is part of hci_h4p bluetooth driver
++ *
++ * Copyright (C) 2010 Nokia Corporation.
++ *
++ * Contact: Roger Quadros <roger.quadros@nokia.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ *
++ */
++
++
++/**
++ * struct hci_h4p_platform data - hci_h4p Platform data structure
++ * @uart_base: UART base address
++ * @uart_irq: UART Interrupt number
++ * @host_wu: Function hook determine if Host should wakeup or not.
++ * @bt_wu: Function hook to enable/disable Bluetooth transmission
++ * @reset: Function hook to set/clear reset conditiona
++ * @host_wu_gpio: Gpio used to wakeup host
++ */
++struct hci_h4p_platform_data {
++ void *uart_base;
++ unsigned int uart_irq;
++ bool (*host_wu)(void);
++ void (*bt_wu)(bool);
++ void (*reset)(bool);
++ unsigned int host_wu_gpio;
++};
diff --git a/recipes/linux/linux-2.6.35/nokia900/linux-2.6-Earpiece-and-headset-support-for-N900.patch b/recipes/linux/linux-2.6.35/nokia900/linux-2.6-Earpiece-and-headset-support-for-N900.patch
new file mode 100644
index 0000000000..6603785746
--- /dev/null
+++ b/recipes/linux/linux-2.6.35/nokia900/linux-2.6-Earpiece-and-headset-support-for-N900.patch
@@ -0,0 +1,577 @@
+From 3a00b962f5d60f67b0c447a47ed173d8826ffa0b Mon Sep 17 00:00:00 2001
+From: Jarkko Nikula <jhnikula@gmail.com>
+Date: Tue, 29 Jun 2010 14:44:50 +0300
+Subject: [PATCH 09/11] Earpiece and headset support for N900
+
+This patch is combination of following patches:
+
+1. ASoC: RX-51: Add aic34b_dummy driver only for the RX-51
+
+Only purpose of this dummy driver is to support a few audio connections
+on Nokia RX-51/N900 HW. Currently ASoC framework supports only single audio
+codec per card. The RX-51 has two of them since the TLV320AIC34 audio codec
+used in RX-51 integrates basically two TLV320AIC33 codecs into a same chip.
+These instances are called as AIC34A and AIC34B. The AIC34A is used as an
+audio codec and the AIC34B is used kind of audio amplifier and mic bias
+driver.
+
+Ideally features of this driver are supported by the tlv320aic3x codec
+driver but as only single instance of it can exist currently, this driver
+registers needed features as additional controls and widgets for the card.
+
+This driver should vanish as soon as the ASoC multi-component framework
+maturises and gets integrated into mainline.
+
+Signed-off-by: Jarkko Nikula <jhnikula@gmail.com>
+
+2. omap: rx51: Add second instance of the TLV320AIC34 codec
+
+TLV320AIC34 integrates basically two TLV320AIC33 audio codecs. The AIC34A
+is found from I2C address 0x18 and AIC34B from 0x19.
+
+Signed-off-by: Jarkko Nikula <jhnikula@gmail.com>
+
+3. ASoC: RX-51: Add support for earpiece
+
+Earpiece is connected to HPL pins of the second TLV320AIC34 codec instance.
+Audio routings between AIC34A, AIC34B and earpiece is following
+
+ AIC34A MONO_LOUT -> AIC34B LINE2R -> AIC34B HPL -> earpiece
+
+Add support for this routing by using the aic34b_dummy driver.
+
+As the aic34b_dummy should vanish after the ASoC multi-component is ready,
+also this patch must be edited by modifying the audio_map accordingly.
+
+Signed-off-by: Jarkko Nikula <jhnikula@gmail.com>
+
+4. ASoC: RX-51: Add mic input to AV jack
+
+Mic input is connected to LINE1L of the AIC34A via ECI_SW2 (name took from
+the Maemo 2.6.28 kernel sources) switch. This patch adds the headset feature
+to AV jack. Headset is combination of headphone stereo output with mic
+input.
+
+This patch doesn't drive the mic bias so signal will be too weak to be
+usable. The mic bias feature will be introduced by another patch.
+
+Signed-off-by: Jarkko Nikula <jhnikula@gmail.com>
+
+5. SoC: RX-51: Add route for AIC34B generated mic bias
+
+This patch adds support for headset mic bias by using a feature provided
+by the aic34b_dummy driver.
+
+As the aic34b_dummy should vanish after the ASoC multi-component is ready,
+also this patch must be edited by modifying the audio_map accordingly.
+
+Signed-off-by: Jarkko Nikula <jhnikula@gmail.com>
+---
+ arch/arm/mach-omap2/board-rx51-peripherals.c | 3 +
+ sound/soc/omap/Makefile | 2 +-
+ sound/soc/omap/aic34b_dummy.c | 260 ++++++++++++++++++++++++++
+ sound/soc/omap/aic34b_dummy.h | 29 +++
+ sound/soc/omap/rx51.c | 62 ++++++-
+ 5 files changed, 353 insertions(+), 3 deletions(-)
+ create mode 100644 sound/soc/omap/aic34b_dummy.c
+ create mode 100644 sound/soc/omap/aic34b_dummy.h
+
+diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
+index 0aab1a0..0d98752 100644
+--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
++++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
+@@ -853,6 +853,9 @@ static struct i2c_board_info __initdata rx51_peripherals_i2c_board_info_2[] = {
+ I2C_BOARD_INFO("tlv320aic3x", 0x18),
+ .platform_data = &rx51_aic3x_data,
+ },
++ {
++ I2C_BOARD_INFO("aic34b_dummy", 0x19),
++ },
+ #if defined(CONFIG_SENSORS_TSL2563) || defined(CONFIG_SENSORS_TSL2563_MODULE)
+ {
+ I2C_BOARD_INFO("tsl2563", 0x29),
+diff --git a/sound/soc/omap/Makefile b/sound/soc/omap/Makefile
+index ba9fc65..0a7374f 100644
+--- a/sound/soc/omap/Makefile
++++ b/sound/soc/omap/Makefile
+@@ -24,7 +24,7 @@ snd-soc-zoom2-objs := zoom2.o
+ snd-soc-igep0020-objs := igep0020.o
+
+ obj-$(CONFIG_SND_OMAP_SOC_N810) += snd-soc-n810.o
+-obj-$(CONFIG_SND_OMAP_SOC_RX51) += snd-soc-rx51.o
++obj-$(CONFIG_SND_OMAP_SOC_RX51) += snd-soc-rx51.o aic34b_dummy.o
+ obj-$(CONFIG_SND_OMAP_SOC_AMS_DELTA) += snd-soc-ams-delta.o
+ obj-$(CONFIG_SND_OMAP_SOC_OSK5912) += snd-soc-osk5912.o
+ obj-$(CONFIG_SND_OMAP_SOC_OVERO) += snd-soc-overo.o
+diff --git a/sound/soc/omap/aic34b_dummy.c b/sound/soc/omap/aic34b_dummy.c
+new file mode 100644
+index 0000000..bace5fc
+--- /dev/null
++++ b/sound/soc/omap/aic34b_dummy.c
+@@ -0,0 +1,260 @@
++/*
++ * aic34b_dummy.c -- Dummy driver for AIC34 block B parts used in Nokia RX51
++ *
++ * Purpose for this driver is to cover few audio connections on Nokia RX51 HW
++ * which are connected into block B of TLV320AIC34 dual codec.
++ *
++ * Copyright (C) 2008 Nokia Corporation
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ *
++ * TODO:
++ * - Get rid of this driver, at least when ASoC multi-component is merged into
++ * mainline.
++ * This driver is hacked only for Nokia RX51 HW.
++ */
++
++#include <linux/module.h>
++#include <linux/errno.h>
++#include <linux/device.h>
++#include <linux/i2c.h>
++#include <sound/soc.h>
++#include <sound/soc-dapm.h>
++#include <sound/tlv.h>
++
++#include "../codecs/tlv320aic3x.h"
++
++struct i2c_client *aic34b_client;
++
++static int aic34b_read(struct i2c_client *client, unsigned int reg)
++{
++ int val;
++
++ val = i2c_smbus_read_byte_data(client, reg);
++
++ return val;
++}
++
++static int aic34b_write(struct i2c_client *client, unsigned int reg,
++ u8 value)
++{
++ u8 data[2];
++
++ data[0] = reg & 0xff;
++ data[1] = value & 0xff;
++
++ return (i2c_master_send(client, data, 2) == 2) ? 0 : -EIO;
++}
++
++static int aic34b_get_volsw(struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_value *ucontrol)
++{
++ struct soc_mixer_control *mc =
++ (struct soc_mixer_control *)kcontrol->private_value;
++ unsigned int reg = mc->reg;
++ unsigned int shift = mc->shift;
++ int max = mc->max;
++ unsigned int mask = (1 << fls(max)) - 1;
++ unsigned int invert = mc->invert;
++ int val;
++
++ if (aic34b_client == NULL)
++ return 0;
++
++ val = (aic34b_read(aic34b_client, reg) >> shift) & mask;
++ if (invert)
++ val = max - val;
++ ucontrol->value.integer.value[0] = val;
++
++ return 0;
++}
++
++static int aic34b_put_volsw(struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_value *ucontrol)
++{
++ struct soc_mixer_control *mc =
++ (struct soc_mixer_control *)kcontrol->private_value;
++ unsigned int reg = mc->reg;
++ unsigned int shift = mc->shift;
++ int max = mc->max;
++ unsigned int mask = (1 << fls(max)) - 1;
++ unsigned int invert = mc->invert;
++ unsigned int val = (ucontrol->value.integer.value[0] & mask);
++ int val_reg;
++
++ if (aic34b_client == NULL)
++ return 0;
++
++ if (invert)
++ val = max - val;
++
++ val_reg = aic34b_read(aic34b_client, reg);
++ if (((val_reg >> shift) & mask) == val) {
++ return 0;
++ }
++
++ val_reg &= ~(mask << shift);
++ val_reg |= val << shift;
++ aic34b_write(aic34b_client, reg, val_reg);
++
++ return 1;
++}
++
++static int aic34b_bypass_event(struct snd_soc_dapm_widget *w,
++ struct snd_kcontrol *kcontrol, int event)
++{
++ int val;
++
++ if (aic34b_client == NULL)
++ return 0;
++
++ switch (event) {
++ case SND_SOC_DAPM_POST_PMU:
++ /* Connect LINE2R to RADC in differential mode and 0 dB gain */
++ aic34b_write(aic34b_client, LINE2R_2_RADC_CTRL, 0x80);
++ /* Unmute Right ADC-PGA */
++ aic34b_write(aic34b_client, RADC_VOL, 0x00);
++ /* Right PGA -> HPLOUT */
++ val = aic34b_read(aic34b_client, PGAR_2_HPLOUT_VOL);
++ aic34b_write(aic34b_client, PGAR_2_HPLOUT_VOL, val | 0x80);
++ /* Unmute HPLOUT with 1 dB gain */
++ aic34b_write(aic34b_client, HPLOUT_CTRL, 0x19);
++ /* Unmute HPLCOM with 1 dB gain */
++ aic34b_write(aic34b_client, HPLCOM_CTRL, 0x19);
++ break;
++ case SND_SOC_DAPM_POST_PMD:
++ /* Disconnect LINE2R from RADC */
++ aic34b_write(aic34b_client, LINE2R_2_RADC_CTRL, 0xF8);
++ /* Mute Right ADC-PGA */
++ aic34b_write(aic34b_client, RADC_VOL, 0x80);
++ /* Detach Right PGA from HPLOUT */
++ val = aic34b_read(aic34b_client, PGAR_2_HPLOUT_VOL);
++ aic34b_write(aic34b_client, PGAR_2_HPLOUT_VOL, val & 0x7f);
++ /* Power down HPLOUT */
++ aic34b_write(aic34b_client, HPLOUT_CTRL, 0x06);
++ /* Power down HPLCOM */
++ aic34b_write(aic34b_client, HPLCOM_CTRL, 0x06);
++ break;
++ }
++
++ return 0;
++}
++
++static int aic34b_mic_bias_event(struct snd_soc_dapm_widget *w,
++ struct snd_kcontrol *kcontrol, int event)
++{
++ if (aic34b_client == NULL)
++ return 0;
++
++ switch (event) {
++ case SND_SOC_DAPM_POST_PMU:
++ aic34b_write(aic34b_client, MICBIAS_CTRL, 2 << 6);
++ break;
++ case SND_SOC_DAPM_POST_PMD:
++ aic34b_write(aic34b_client, MICBIAS_CTRL, 0);
++ break;
++ }
++
++ return 0;
++}
++
++static DECLARE_TLV_DB_SCALE(output_stage_tlv, -5900, 50, 1);
++
++static const struct snd_kcontrol_new aic34b_snd_controls[] = {
++ SOC_SINGLE_EXT_TLV("34B HPL PGAR Bypass Playback Volume",
++ PGAR_2_HPLOUT_VOL, 0, 118, 1,
++ aic34b_get_volsw, aic34b_put_volsw,
++ output_stage_tlv),
++};
++
++static const struct snd_soc_dapm_widget aic34b_dapm_widgets[] = {
++ SND_SOC_DAPM_PGA_E("34B LINE2R HPL Bypass", SND_SOC_NOPM,
++ 0, 0, NULL, 0, aic34b_bypass_event,
++ SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_POST_PMD),
++ SND_SOC_DAPM_MICBIAS_E("34B Mic Bias 2.5V", SND_SOC_NOPM,
++ 0, 0, aic34b_mic_bias_event,
++ SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_POST_PMD),
++
++ SND_SOC_DAPM_OUTPUT("34B_HPLOUT"),
++ SND_SOC_DAPM_OUTPUT("34B_HPLCOM"),
++ SND_SOC_DAPM_INPUT("34B_LINE2R"),
++};
++
++static const struct snd_soc_dapm_route audio_map[] = {
++ {"34B LINE2R HPL Bypass", NULL, "34B_LINE2R"},
++ {"34B_HPLOUT", NULL, "34B LINE2R HPL Bypass"},
++ {"34B_HPLCOM", NULL, "34B LINE2R HPL Bypass"},
++};
++
++int aic34b_add_controls(struct snd_soc_codec *codec)
++{
++ snd_soc_dapm_new_controls(codec, aic34b_dapm_widgets,
++ ARRAY_SIZE(aic34b_dapm_widgets));
++ snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
++ return snd_soc_add_controls(codec, aic34b_snd_controls,
++ ARRAY_SIZE(aic34b_snd_controls));
++}
++EXPORT_SYMBOL_GPL(aic34b_add_controls);
++
++static int aic34b_dummy_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ if (aic34b_read(client, AIC3X_PLL_PROGA_REG) != 0x10) {
++ /* Chip not present */
++ return -ENODEV;
++ }
++ aic34b_client = client;
++
++ return 0;
++}
++
++static int aic34b_dummy_remove(struct i2c_client *client)
++{
++ aic34b_client = NULL;
++
++ return 0;
++}
++
++static const struct i2c_device_id aic34b_dummy_id[] = {
++ { "aic34b_dummy", 0 },
++ { }
++};
++MODULE_DEVICE_TABLE(i2c, aic34b_dummy_id);
++
++static struct i2c_driver aic34b_dummy_driver = {
++ .driver = {
++ .name = "aic34b_dummy"
++ },
++ .probe = aic34b_dummy_probe,
++ .remove = aic34b_dummy_remove,
++ .id_table = aic34b_dummy_id,
++};
++
++static int __init aic34b_dummy_init(void)
++{
++ return i2c_add_driver(&aic34b_dummy_driver);
++}
++
++static void __exit aic34b_dummy_exit(void)
++{
++ i2c_del_driver(&aic34b_dummy_driver);
++}
++
++MODULE_AUTHOR();
++MODULE_DESCRIPTION("Dummy driver for AIC34 block B parts used on Nokia RX51");
++MODULE_LICENSE("GPL");
++
++module_init(aic34b_dummy_init);
++module_exit(aic34b_dummy_exit);
+diff --git a/sound/soc/omap/aic34b_dummy.h b/sound/soc/omap/aic34b_dummy.h
+new file mode 100644
+index 0000000..d37588a
+--- /dev/null
++++ b/sound/soc/omap/aic34b_dummy.h
+@@ -0,0 +1,29 @@
++/*
++ * aic34b_dummy.h
++ *
++ * Copyright (C) 2008 Nokia Corporation
++ *
++ * Contact: Jarkko Nikula <jarkko.nikula@nokia.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ *
++ */
++
++#ifndef __AIC34B_DUMMY__
++#define __AIC34B_DUMMY__
++
++int aic34b_add_controls(struct snd_soc_codec *codec);
++
++#endif
+diff --git a/sound/soc/omap/rx51.c b/sound/soc/omap/rx51.c
+index 9b536da..5171428 100644
+--- a/sound/soc/omap/rx51.c
++++ b/sound/soc/omap/rx51.c
+@@ -38,9 +38,11 @@
+ #include "omap-pcm.h"
+ #include "../codecs/tlv320aic3x.h"
+ #include "../codecs/tpa6130a2.h"
++#include "aic34b_dummy.h"
+
+ #define RX51_TVOUT_SEL_GPIO 40
+ #define RX51_JACK_DETECT_GPIO 177
++#define RX51_ECI_SW2_GPIO 182
+ /*
+ * REVISIT: TWL4030 GPIO base in RX-51. Now statically defined to 192. This
+ * gpio is reserved in arch/arm/mach-omap2/board-rx51-peripherals.c
+@@ -51,18 +53,22 @@ enum {
+ RX51_JACK_DISABLED,
+ RX51_JACK_TVOUT, /* tv-out with stereo audio */
+ RX51_JACK_HP, /* stereo output, no mic */
++ RX51_JACK_HS, /* stereo output with mic input */
+ };
+
+ static int rx51_spk_func;
+ static int rx51_dmic_func;
+ static int rx51_jack_func;
+ static int rx51_fmtx_func;
++static int rx51_ear_func;
+
+ static void rx51_ext_control(struct snd_soc_codec *codec)
+ {
+- int hp = 0;
++ int hp = 0, mic = 0;
+
+ switch (rx51_jack_func) {
++ case RX51_JACK_HS:
++ mic = 1;
+ case RX51_JACK_TVOUT:
+ case RX51_JACK_HP:
+ hp = 1;
+@@ -85,6 +91,14 @@ static void rx51_ext_control(struct snd_soc_codec *codec)
+ snd_soc_dapm_enable_pin(codec, "FM Transmitter");
+ else
+ snd_soc_dapm_disable_pin(codec, "FM Transmitter");
++ if (rx51_ear_func)
++ snd_soc_dapm_enable_pin(codec, "Earpiece");
++ else
++ snd_soc_dapm_disable_pin(codec, "Earpiece");
++ if (mic)
++ snd_soc_dapm_enable_pin(codec, "Mic Jack");
++ else
++ snd_soc_dapm_disable_pin(codec, "Mic Jack");
+
+ gpio_set_value(RX51_TVOUT_SEL_GPIO,
+ rx51_jack_func == RX51_JACK_TVOUT);
+@@ -238,6 +252,28 @@ static int rx51_set_fmtx(struct snd_kcontrol *kcontrol,
+ return 1;
+ }
+
++static int rx51_get_ear(struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_value *ucontrol)
++{
++ ucontrol->value.integer.value[0] = rx51_ear_func;
++
++ return 0;
++}
++
++static int rx51_set_ear(struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_value *ucontrol)
++{
++ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
++
++ if (rx51_ear_func == ucontrol->value.integer.value[0])
++ return 0;
++
++ rx51_ear_func = ucontrol->value.integer.value[0];
++ rx51_ext_control(codec);
++
++ return 1;
++}
++
+ static struct snd_soc_jack rx51_av_jack;
+
+ static struct snd_soc_jack_gpio rx51_av_jack_gpios[] = {
+@@ -255,6 +291,8 @@ static const struct snd_soc_dapm_widget aic34_dapm_widgets[] = {
+ SND_SOC_DAPM_MIC("DMic", NULL),
+ SND_SOC_DAPM_HP("Headphone Jack", NULL),
+ SND_SOC_DAPM_LINE("FM Transmitter", NULL),
++ SND_SOC_DAPM_SPK("Earpiece", NULL),
++ SND_SOC_DAPM_MIC("Mic Jack", NULL),
+ };
+
+ static const struct snd_soc_dapm_route audio_map[] = {
+@@ -271,18 +309,27 @@ static const struct snd_soc_dapm_route audio_map[] = {
+
+ {"FM Transmitter", NULL, "LLOUT"},
+ {"FM Transmitter", NULL, "RLOUT"},
++
++ {"34B_LINE2R", NULL, "MONO_LOUT"},
++ {"Earpiece", NULL, "34B_HPLOUT"},
++ {"Earpiece", NULL, "34B_HPLCOM"},
++
++ {"LINE1L", NULL, "34B Mic Bias 2.5V"},
++ {"34B Mic Bias 2.5V", NULL, "Mic Jack"}
+ };
+
+ static const char *spk_function[] = {"Off", "On"};
+ static const char *input_function[] = {"ADC", "Digital Mic"};
+-static const char *jack_function[] = {"Off", "TV-OUT", "Headphone"};
++static const char *jack_function[] = {"Off", "TV-OUT", "Headphone", "Headset"};
+ static const char *fmtx_function[] = {"Off", "On"};
++static const char *ear_function[] = {"Off", "On"};
+
+ static const struct soc_enum rx51_enum[] = {
+ SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(spk_function), spk_function),
+ SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(input_function), input_function),
+ SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(jack_function), jack_function),
+ SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(fmtx_function), fmtx_function),
++ SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(ear_function), ear_function),
+ };
+
+ static const struct snd_kcontrol_new aic34_rx51_controls[] = {
+@@ -294,6 +341,8 @@ static const struct snd_kcontrol_new aic34_rx51_controls[] = {
+ rx51_get_jack, rx51_set_jack),
+ SOC_ENUM_EXT("FMTX Function", rx51_enum[3],
+ rx51_get_fmtx, rx51_set_fmtx),
++ SOC_ENUM_EXT("Earpiece Function", rx51_enum[4],
++ rx51_get_ear, rx51_set_ear),
+ };
+
+ static int rx51_aic34_init(struct snd_soc_codec *codec)
+@@ -319,6 +368,8 @@ static int rx51_aic34_init(struct snd_soc_codec *codec)
+ tpa6130a2_add_controls(codec);
+ snd_soc_limit_volume(codec, "TPA6130A2 Headphone Playback Volume", 42);
+
++ aic34b_add_controls(codec);
++
+ /* Set up RX-51 specific audio path audio_map */
+ snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+
+@@ -382,6 +433,10 @@ static int __init rx51_soc_init(void)
+ if (err)
+ goto err_gpio_tvout_sel;
+ gpio_direction_output(RX51_TVOUT_SEL_GPIO, 0);
++ err = gpio_request(RX51_ECI_SW2_GPIO, "eci_sw2");
++ if (err)
++ goto err_gpio_eci_sw2;
++ gpio_direction_output(RX51_ECI_SW2_GPIO, 1);
+
+ rx51_snd_device = platform_device_alloc("soc-audio", -1);
+ if (!rx51_snd_device) {
+@@ -401,6 +456,8 @@ static int __init rx51_soc_init(void)
+ err2:
+ platform_device_put(rx51_snd_device);
+ err1:
++ gpio_free(RX51_ECI_SW2_GPIO);
++err_gpio_eci_sw2:
+ gpio_free(RX51_TVOUT_SEL_GPIO);
+ err_gpio_tvout_sel:
+
+@@ -413,6 +470,7 @@ static void __exit rx51_soc_exit(void)
+ rx51_av_jack_gpios);
+
+ platform_device_unregister(rx51_snd_device);
++ gpio_free(RX51_ECI_SW2_GPIO);
+ gpio_free(RX51_TVOUT_SEL_GPIO);
+ }
+
+--
+1.7.0.4
+
diff --git a/recipes/linux/linux-2.6.35/nokia900/linux-2.6-Hacks-for-Nokia-N900.patch b/recipes/linux/linux-2.6.35/nokia900/linux-2.6-Hacks-for-Nokia-N900.patch
new file mode 100644
index 0000000000..d23b486a67
--- /dev/null
+++ b/recipes/linux/linux-2.6.35/nokia900/linux-2.6-Hacks-for-Nokia-N900.patch
@@ -0,0 +1,651 @@
+From df0b24d1e5fcb0a409debf3f9f1fcfd32f6cb2e8 Mon Sep 17 00:00:00 2001
+From: Felipe Balbi <felipe.balbi@nokia.com>
+Date: Mon, 4 Jan 2010 15:05:37 +0200
+Subject: [PATCH 01/11] Hacks for Nokia N900
+
+This patch is combination of following hacks:
+
+1. usb: musb: add mA and charger sysfs entries
+
+ This is the same mA entry and charger entry used in Fremantle program.
+ We are adding it as __deprecated because it will change and this is
+ done just for us to be able to test USB charging funtionality.
+
+ One difference from Fremantle is that we now issue
+ sysfs_notify() when that value changes.
+
+ Signed-off-by: Felipe Balbi <felipe.balbi@nokia.com>
+ Signed-off-by: Ameya Palande <ameya.palande@nokia.com>
+
+2. usb: musb: add suspend sysfs entry
+
+ This patch is combination of following patches:
+ 1. usb: musb: suspend notification only when needed
+ 2. usb: musb: Notify sysfs on suspend
+ 3. usb: musb: fix build with MUSB Host only mode
+
+ Signed-off-by: Niilo Minkkinen <ext-niilo.1.minkkinen@nokia.com>
+ Signed-off-by: Heikki Krogerus <ext-heikki.krogerus@nokia.com>
+ Signed-off-by: Felipe Balbi <felipe.balbi@nokia.com>
+ Signed-off-by: Roger Quadros <ext-roger.quadros@nokia.com>
+ Signed-off-by: Jouni Hogander <jouni.hogander@nokia.com>
+ Signed-off-by: Ameya Palande <ameya.palande@nokia.com>
+
+3. usb: otg: add detect_charger field to otg_transceiver
+
+ Then we can allow musb to kick charger detection on transceiver.
+ This is added as __deprecated because it only exists to mimic
+ the behavior we had in Fremantle.
+
+ Signed-off-by: Felipe Balbi <felipe.balbi@nokia.com>
+
+4. OMAP2/3 clock: implement clock rate/parent change notifiers
+
+ Patch-Mainline: Not sure
+
+ This patch allows core code and driver code to register for
+ notification when a clock's rate or parent changes. These are useful
+ because drivers don't have exclusive control over a clock's rate:
+ power management code (e.g., CPUFreq) may cause rate changes across
+ large parts of the clock tree.
+
+ There are three notifier messages:
+
+ 1. a pre-change notifier, called before the change;
+
+ 2. a post-change notifier, called after the change; and
+
+ 3. an abort notifier, called if the change fails for any reason after
+ the pre-change notifier callbacks have run.
+
+ Since the implementation uses a blocking notifier, notifier code may
+ block waiting for devices to quiesce; but long delays here will reduce
+ the effectiveness of DVFS. Since notifier callbacks are called with
+ clocks_mutex held, callback code must not re-enter the clock framework.
+
+ Pre-change notifiers are passed the current clock rate and the
+ post-change notifiers the new clock rate. The notifiers are called even
+ if the clock rate is the same before and after the change. This is
+ because reprogramming a clock's parent or rate may briefly disrupt the
+ clock.
+
+ There are likely to be few users of these notifiers, compared to the
+ total number of clocks. So, rather than storing one notifier per
+ struct clk, notifiers are stored in a separate, dynamically allocated
+ list, effectively trading execution speed (in terms of a sequential
+ scan of the notifier list) for memory savings. The implementation is
+ completely hidden from the callbacks and can be easily changed.
+
+ Until prototypes for these functions are made available in
+ include/linux/clk.h, drivers should pass function pointers to
+ clk_notifier_register() and clk_notifier_unregister() via their
+ platform_data struct.
+
+ This patch is a collaboration between Tero Kristo
+ <tero.kristo@nokia.com> and Paul Walmsley <paul@pwsan.com> and several
+ others. Hiroshi Doyu <Hiroshi.DOYU@nokia.com> tracked down and fixed a
+ bug where blocking_notifier_chain_*() were called while interrupts
+ were disabled. Nishanth Menon <nm@ti.com> found and fixed a bug in
+ the clk_notifier_unregister() path, where a list_del() was missing.
+ And thanks to Jouni Hogander <jouni.hogander@nokia.com> for comments
+ and review during the evolution of these patches.
+
+ Signed-off-by: Paul Walmsley <paul@pwsan.com>
+ Signed-off-by: Tero Kristo <tero.kristo@nokia.com>
+
+5. PM: export missing symbols
+
+ Patch-Mainline: never as this is a temporary hack
+
+ Needed by the PVR driver.
+
+ This patch is combined version of the following patches
+
+ 1. PM: export clk_notifier_{register/unregister}
+ 2. PM: export omap_pm_set_min_bus_tput
+
+ Signed-off-by: Imre Deak <imre.deak@nokia.com>
+ Signed-off-by: Roger Quadros <roger.quadros@nokia.com>
+---
+ arch/arm/plat-omap/clock.c | 208 +++++++++++++++++++++++++++++++
+ arch/arm/plat-omap/include/plat/clock.h | 63 +++++++++-
+ arch/arm/plat-omap/omap-pm-noop.c | 1 +
+ drivers/usb/musb/musb_core.c | 47 +++++++-
+ drivers/usb/musb/musb_core.h | 2 +
+ drivers/usb/musb/musb_gadget.c | 9 ++
+ include/linux/usb/otg.h | 11 ++
+ 7 files changed, 339 insertions(+), 2 deletions(-)
+
+diff --git a/arch/arm/plat-omap/clock.c b/arch/arm/plat-omap/clock.c
+index 7190cbd..f17e433 100644
+--- a/arch/arm/plat-omap/clock.c
++++ b/arch/arm/plat-omap/clock.c
+@@ -21,6 +21,7 @@
+ #include <linux/cpufreq.h>
+ #include <linux/debugfs.h>
+ #include <linux/io.h>
++#include <linux/slab.h>
+
+ #include <plat/clock.h>
+
+@@ -29,6 +30,78 @@ static DEFINE_MUTEX(clocks_mutex);
+ static DEFINE_SPINLOCK(clockfw_lock);
+
+ static struct clk_functions *arch_clock;
++static LIST_HEAD(clk_notifier_list);
++
++/*
++ * _clk_free_notifier_chain - safely remove struct clk_notifier
++ * @cn: struct clk_notifier *
++ *
++ * Removes the struct clk_notifier @cn from the clk_notifier_list and
++ * frees it.
++ */
++static void _clk_free_notifier_chain(struct clk_notifier *cn)
++{
++ list_del(&cn->node);
++ kfree(cn);
++}
++
++/*
++ * omap_clk_notify - call clk notifier chain
++ * @clk: struct clk * that is changing rate
++ * @msg: clk notifier type (i.e., CLK_POST_RATE_CHANGE; see mach/clock.h)
++ * @old_rate: old rate
++ * @new_rate: new rate
++ *
++ * Triggers a notifier call chain on the post-clk-rate-change notifier
++ * for clock 'clk'. Passes a pointer to the struct clk and the
++ * previous and current rates to the notifier callback. Intended to be
++ * called by internal clock code only. No return value.
++ */
++static void omap_clk_notify(struct clk *clk, unsigned long msg)
++{
++ struct clk_notifier *cn;
++ struct clk_notifier_data cnd;
++
++ cnd.clk = clk;
++ cnd.rate = clk->rate;
++
++ list_for_each_entry(cn, &clk_notifier_list, node) {
++ if (cn->clk == clk) {
++ blocking_notifier_call_chain(&cn->notifier_head, msg,
++ &cnd);
++ break;
++ }
++ }
++}
++
++/*
++ * omap_clk_notify_downstream - trigger clock change notifications
++ * @clk: struct clk * to start the notifications with
++ * @msg: notifier msg - see "Clk notifier callback types" in mach/clock.h
++ *
++ * Call clock change notifiers on clocks starting with @clk and including
++ * all of @clk's downstream children clocks. Returns NOTIFY_DONE.
++ */
++static int omap_clk_notify_downstream(struct clk *clk, unsigned long msg)
++{
++ struct clk *child;
++ int ret;
++
++ if (!clk->notifier_count)
++ return NOTIFY_DONE;
++
++ omap_clk_notify(clk, msg);
++
++ if (list_empty(&clk->children))
++ return NOTIFY_DONE;
++
++ list_for_each_entry(child, &clk->children, sibling) {
++ ret = omap_clk_notify_downstream(child, msg);
++ if (ret)
++ break;
++ }
++ return ret;
++}
+
+ /*
+ * Standard clock functions defined in include/linux/clk.h
+@@ -115,10 +188,16 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
+ {
+ unsigned long flags;
+ int ret = -EINVAL;
++ int msg;
+
+ if (clk == NULL || IS_ERR(clk))
+ return ret;
+
++ mutex_lock(&clocks_mutex);
++
++ if (clk->notifier_count)
++ omap_clk_notify_downstream(clk, CLK_PRE_RATE_CHANGE);
++
+ spin_lock_irqsave(&clockfw_lock, flags);
+ if (arch_clock->clk_set_rate)
+ ret = arch_clock->clk_set_rate(clk, rate);
+@@ -129,6 +208,12 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
+ }
+ spin_unlock_irqrestore(&clockfw_lock, flags);
+
++ msg = (ret) ? CLK_ABORT_RATE_CHANGE : CLK_POST_RATE_CHANGE;
++
++ omap_clk_notify_downstream(clk, msg);
++
++ mutex_unlock(&clocks_mutex);
++
+ return ret;
+ }
+ EXPORT_SYMBOL(clk_set_rate);
+@@ -137,10 +222,16 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
+ {
+ unsigned long flags;
+ int ret = -EINVAL;
++ int msg;
+
+ if (clk == NULL || IS_ERR(clk) || parent == NULL || IS_ERR(parent))
+ return ret;
+
++ mutex_lock(&clocks_mutex);
++
++ if (clk->notifier_count)
++ omap_clk_notify_downstream(clk, CLK_PRE_RATE_CHANGE);
++
+ spin_lock_irqsave(&clockfw_lock, flags);
+ if (clk->usecount == 0) {
+ if (arch_clock->clk_set_parent)
+@@ -154,6 +245,12 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
+ ret = -EBUSY;
+ spin_unlock_irqrestore(&clockfw_lock, flags);
+
++ msg = (ret) ? CLK_ABORT_RATE_CHANGE : CLK_POST_RATE_CHANGE;
++
++ omap_clk_notify_downstream(clk, msg);
++
++ mutex_unlock(&clocks_mutex);
++
+ return ret;
+ }
+ EXPORT_SYMBOL(clk_set_parent);
+@@ -384,9 +481,120 @@ void clk_exit_cpufreq_table(struct cpufreq_frequency_table **table)
+ }
+ #endif
+
++/* Clk notifier implementation */
++
++/*
++ * clk_notifier_register - add a clock parameter change notifier
++ * @clk: struct clk * to watch
++ * @nb: struct notifier_block * with callback info
++ *
++ * Request notification for changes to the clock 'clk'. This uses a
++ * blocking notifier. Callback code must not call into the clock
++ * framework, as clocks_mutex is held. Pre-notifier callbacks will be
++ * passed the previous and new rate of the clock.
++ *
++ * clk_notifier_register() must be called from process
++ * context. Returns -EINVAL if called with null arguments, -ENOMEM
++ * upon allocation failure; otherwise, passes along the return value
++ * of blocking_notifier_chain_register().
++ */
++int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
++{
++ struct clk_notifier *cn = NULL, *cn_new = NULL;
++ int r;
++ struct clk *clkp;
++
++ if (!clk || !nb)
++ return -EINVAL;
++
++ mutex_lock(&clocks_mutex);
++
++ list_for_each_entry(cn, &clk_notifier_list, node)
++ if (cn->clk == clk)
++ break;
++
++ if (cn->clk != clk) {
++ cn_new = kzalloc(sizeof(struct clk_notifier), GFP_KERNEL);
++ if (!cn_new) {
++ r = -ENOMEM;
++ goto cnr_out;
++ };
++
++ cn_new->clk = clk;
++ BLOCKING_INIT_NOTIFIER_HEAD(&cn_new->notifier_head);
++
++ list_add(&cn_new->node, &clk_notifier_list);
++ cn = cn_new;
++ }
++
++ r = blocking_notifier_chain_register(&cn->notifier_head, nb);
++ if (!IS_ERR_VALUE(r)) {
++ clkp = clk;
++ do {
++ clkp->notifier_count++;
++ } while ((clkp = clkp->parent));
++ } else {
++ if (cn_new)
++ _clk_free_notifier_chain(cn);
++ }
++
++cnr_out:
++ mutex_unlock(&clocks_mutex);
++
++ return r;
++}
++EXPORT_SYMBOL(clk_notifier_register);
++
+ /*
++ * clk_notifier_unregister - remove a clock change notifier
++ * @clk: struct clk *
++ * @nb: struct notifier_block * with callback info
+ *
++ * Request no further notification for changes to clock 'clk'.
++ * Returns -EINVAL if called with null arguments; otherwise, passes
++ * along the return value of blocking_notifier_chain_unregister().
+ */
++int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
++{
++ struct clk_notifier *cn = NULL;
++ struct clk *clkp;
++ int r = -EINVAL;
++
++ if (!clk || !nb)
++ return -EINVAL;
++
++ mutex_lock(&clocks_mutex);
++
++ list_for_each_entry(cn, &clk_notifier_list, node)
++ if (cn->clk == clk)
++ break;
++
++ if (cn->clk != clk) {
++ r = -ENOENT;
++ goto cnu_out;
++ };
++
++ r = blocking_notifier_chain_unregister(&cn->notifier_head, nb);
++ if (!IS_ERR_VALUE(r)) {
++ clkp = clk;
++ do {
++ clkp->notifier_count--;
++ } while ((clkp = clkp->parent));
++ }
++
++ /*
++ * XXX ugh, layering violation. There should be some
++ * support in the notifier code for this.
++ */
++ if (!cn->notifier_head.head)
++ _clk_free_notifier_chain(cn);
++
++cnu_out:
++ mutex_unlock(&clocks_mutex);
++
++ return r;
++}
++EXPORT_SYMBOL(clk_notifier_unregister);
+
+ #ifdef CONFIG_OMAP_RESET_CLOCKS
+ /*
+diff --git a/arch/arm/plat-omap/include/plat/clock.h b/arch/arm/plat-omap/include/plat/clock.h
+index dfc472c..2f584c8 100644
+--- a/arch/arm/plat-omap/include/plat/clock.h
++++ b/arch/arm/plat-omap/include/plat/clock.h
+@@ -15,6 +15,8 @@
+
+ #include <linux/list.h>
+
++#include <linux/notifier.h>
++
+ struct module;
+ struct clk;
+ struct clockdomain;
+@@ -116,6 +118,37 @@ struct dpll_data {
+
+ #endif
+
++/*
++ * struct clk_notifier - associate a clk with a notifier
++ * @clk: struct clk * to associate the notifier with
++ * @notifier_head: a blocking_notifier_head for this clk
++ * @node: linked list pointers
++ *
++ * A list of struct clk_notifier is maintained by the notifier code.
++ * An entry is created whenever code registers the first notifier on a
++ * particular @clk. Future notifiers on that @clk are added to the
++ * @notifier_head.
++ */
++struct clk_notifier {
++ struct clk *clk;
++ struct blocking_notifier_head notifier_head;
++ struct list_head node;
++};
++
++/*
++ * struct clk_notifier_data - rate data to pass to the notifier callback
++ * @clk: struct clk * being changed
++ * @rate: current rate of this clock
++ *
++ * This struct is passed as parameter to the clock notifier callbacks when
++ * a clock is changed. Current rate of the clock is passed along with the
++ * call in pre-notifier, and the new rate in post-notifier.
++ */
++struct clk_notifier_data {
++ struct clk *clk;
++ unsigned long rate;
++};
++
+ struct clk {
+ struct list_head node;
+ const struct clkops *ops;
+@@ -129,6 +162,7 @@ struct clk {
+ int (*set_rate)(struct clk *, unsigned long);
+ long (*round_rate)(struct clk *, unsigned long);
+ void (*init)(struct clk *);
++ u16 notifier_count;
+ __u8 enable_bit;
+ __s8 usecount;
+ u8 fixed_div;
+@@ -178,6 +212,8 @@ extern void recalculate_root_clocks(void);
+ extern unsigned long followparent_recalc(struct clk *clk);
+ extern void clk_enable_init_clocks(void);
+ unsigned long omap_fixed_divisor_recalc(struct clk *clk);
++extern int clk_notifier_register(struct clk *clk, struct notifier_block *nb);
++extern int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb);
+ #ifdef CONFIG_CPU_FREQ
+ extern void clk_init_cpufreq_table(struct cpufreq_frequency_table **table);
+ extern void clk_exit_cpufreq_table(struct cpufreq_frequency_table **table);
+@@ -204,7 +240,32 @@ extern struct clk dummy_ck;
+ #define RATE_IN_4430 (1 << 5)
+
+ #define RATE_IN_24XX (RATE_IN_242X | RATE_IN_243X)
+-
+ #define RATE_IN_3430ES2PLUS (RATE_IN_3430ES2 | RATE_IN_36XX)
+
++/*
++ * Clk notifier callback types
++ *
++ * Since the notifier is called with interrupts disabled, any actions
++ * taken by callbacks must be extremely fast and lightweight.
++ *
++ * CLK_PRE_RATE_CHANGE - called immediately before the clock rate is
++ * changed. Drivers must immediately terminate any operations that
++ * will be affected by the rate change. Callbacks must always
++ * return NOTIFY_DONE.
++ *
++ * CLK_ABORT_RATE_CHANGE: called if the rate change failed for some
++ * reason after CLK_PRE_RATE_CHANGE. In this case, all registered
++ * notifiers on the clock will be called with
++ * CLK_ABORT_RATE_CHANGE. Callbacks must always return
++ * NOTIFY_DONE.
++ *
++ * CLK_POST_RATE_CHANGE - called after the clock rate change has
++ * successfully completed. Callbacks must always return
++ * NOTIFY_DONE.
++ *
++ */
++#define CLK_PRE_RATE_CHANGE 1
++#define CLK_ABORT_RATE_CHANGE 2
++#define CLK_POST_RATE_CHANGE 3
++
+ #endif
+diff --git a/arch/arm/plat-omap/omap-pm-noop.c b/arch/arm/plat-omap/omap-pm-noop.c
+index 186bca8..9418f56 100644
+--- a/arch/arm/plat-omap/omap-pm-noop.c
++++ b/arch/arm/plat-omap/omap-pm-noop.c
+@@ -84,6 +84,7 @@ void omap_pm_set_min_bus_tput(struct device *dev, u8 agent_id, unsigned long r)
+ * TI CDP code can call constraint_set here on the VDD2 OPP.
+ */
+ }
++EXPORT_SYMBOL(omap_pm_set_min_bus_tput);
+
+ void omap_pm_set_max_dev_wakeup_lat(struct device *dev, long t)
+ {
+diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
+index 3b795c5..9504484 100644
+--- a/drivers/usb/musb/musb_core.c
++++ b/drivers/usb/musb/musb_core.c
+@@ -1789,8 +1789,38 @@ musb_vbus_show(struct device *dev, struct device_attribute *attr, char *buf)
+ }
+ static DEVICE_ATTR(vbus, 0644, musb_vbus_show, musb_vbus_store);
+
++static ssize_t
++musb_ma_show(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ struct musb *musb = dev_to_musb(dev);
++
++ return snprintf(buf, PAGE_SIZE, "%d\n", musb->power_draw);
++}
++static DEVICE_ATTR(mA, 0444, musb_ma_show, NULL);
++
++static ssize_t
++musb_charger_show(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ struct musb *musb = dev_to_musb(dev);
++ int charger;
++
++ charger = otg_detect_charger(musb->xceiv);
++
++ return snprintf(buf, PAGE_SIZE, "%d\n", charger);
++}
++static DEVICE_ATTR(charger, 0444, musb_charger_show, NULL);
++
+ #ifdef CONFIG_USB_GADGET_MUSB_HDRC
+
++static ssize_t
++musb_suspend_show(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ struct musb *musb = dev_to_musb(dev);
++
++ return sprintf(buf, "%d\n", musb->is_suspended);
++}
++static DEVICE_ATTR(suspend, 0444, musb_suspend_show, NULL);
++
+ /* Gadget drivers can't know that a host is connected so they might want
+ * to start SRP, but users can. This allows userspace to trigger SRP.
+ */
+@@ -1819,7 +1849,10 @@ static DEVICE_ATTR(srp, 0644, NULL, musb_srp_store);
+ static struct attribute *musb_attributes[] = {
+ &dev_attr_mode.attr,
+ &dev_attr_vbus.attr,
++ &dev_attr_mA.attr,
++ &dev_attr_charger.attr,
+ #ifdef CONFIG_USB_GADGET_MUSB_HDRC
++ &dev_attr_suspend.attr,
+ &dev_attr_srp.attr,
+ #endif
+ NULL
+@@ -1835,12 +1868,24 @@ static const struct attribute_group musb_attr_group = {
+ static void musb_irq_work(struct work_struct *data)
+ {
+ struct musb *musb = container_of(data, struct musb, irq_work);
+- static int old_state;
++ static int old_state, old_suspend;
++ static int old_power_draw;
+
+ if (musb->xceiv->state != old_state) {
+ old_state = musb->xceiv->state;
+ sysfs_notify(&musb->controller->kobj, NULL, "mode");
+ }
++
++ if (musb->power_draw != old_power_draw) {
++ old_power_draw = musb->power_draw;
++ sysfs_notify(&musb->controller->kobj, NULL, "mA");
++ }
++#ifdef CONFIG_USB_GADGET_MUSB_HDRC
++ if (old_suspend != musb->is_suspended) {
++ old_suspend = musb->is_suspended;
++ sysfs_notify(&musb->controller->kobj, NULL, "suspend");
++ }
++#endif
+ }
+
+ /* --------------------------------------------------------------------------
+diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
+index 91d6779..52e4b6a 100644
+--- a/drivers/usb/musb/musb_core.h
++++ b/drivers/usb/musb/musb_core.h
+@@ -401,6 +401,8 @@ struct musb {
+
+ u8 min_power; /* vbus for periph, in mA/2 */
+
++ unsigned power_draw __deprecated; /* current power drawn, gadget only */
++
+ bool is_host;
+
+ int a_wait_bcon; /* VBUS timeout in msecs */
+diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
+index 6fca870..ca10aef 100644
+--- a/drivers/usb/musb/musb_gadget.c
++++ b/drivers/usb/musb/musb_gadget.c
+@@ -1521,8 +1521,17 @@ static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
+ {
+ struct musb *musb = gadget_to_musb(gadget);
+
++ /* REVISIT we shouldn't need to be passing
++ * this kind of value to userland. We have
++ * now the blocking notifier for transceivers
++ * which could very well handle this
++ */
++ musb->power_draw = mA;
++ schedule_work(&musb->irq_work);
++
+ if (!musb->xceiv->set_power)
+ return -EOPNOTSUPP;
++
+ return otg_set_power(musb->xceiv, mA);
+ }
+
+diff --git a/include/linux/usb/otg.h b/include/linux/usb/otg.h
+index f8302d0..685943f 100644
+--- a/include/linux/usb/otg.h
++++ b/include/linux/usb/otg.h
+@@ -117,6 +117,8 @@ struct otg_transceiver {
+ /* start or continue HNP role switch */
+ int (*start_hnp)(struct otg_transceiver *otg);
+
++ /* detect a charger */
++ int (*detect_charger)(struct otg_transceiver *otg) __deprecated;
+ };
+
+
+@@ -226,6 +228,15 @@ otg_start_srp(struct otg_transceiver *otg)
+ return otg->start_srp(otg);
+ }
+
++static inline int
++otg_detect_charger(struct otg_transceiver *otg)
++{
++ if (otg->detect_charger)
++ return otg->detect_charger(otg);
++
++ return 0;
++}
++
+ /* notifiers */
+ static inline int
+ otg_register_notifier(struct otg_transceiver *otg, struct notifier_block *nb)
+--
+1.7.0.4
+
diff --git a/recipes/linux/linux-2.6.35/nokia900/linux-2.6-SGX-PVR-driver-for-N900.patch b/recipes/linux/linux-2.6.35/nokia900/linux-2.6-SGX-PVR-driver-for-N900.patch
new file mode 100644
index 0000000000..599fa02c3b
--- /dev/null
+++ b/recipes/linux/linux-2.6.35/nokia900/linux-2.6-SGX-PVR-driver-for-N900.patch
@@ -0,0 +1,45789 @@
+From 63e4a840772ffd345209792ff8e742218111d2c8 Mon Sep 17 00:00:00 2001
+From: Roger Quadros <roger.quadros@nokia.com>
+Date: Tue, 6 Apr 2010 16:35:41 +0300
+Subject: [PATCH 03/11] SGX PVR driver for N900
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This patch is combination of following patches:
+
+1. gpu: pvr: Add PVR GPU driver
+
+Patch-Mainline: not sure
+Add the SGX PVR driver.
+
+Signed-off-by: Imre Deak <imre.deak@nokia.com>
+Signed-off-by: Felipe Balbi <felipe.balbi@nokia.com>
+Signed-off-by: Topi Pohjolainen <topi.pohjolainen@nokia.com>
+Signed-off-by: Ville Syrjälä <ville.syrjala@nokia.com>
+Signed-off-by: Mark Underwood <mark.underwood@imgtec.com>
+Signed-off-by: Phil Carmody <ext-phil.2.carmody@nokia.com>
+Signed-off-by: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+Signed-off-by: Sami Kyöstilä <sami.kyostila@nokia.com>
+Signed-off-by: Mark Riding <mark.riding@imgtec.com>
+Signed-off-by: Janusz Sobczak <janusz.sobczak@imgtec.com>
+Signed-off-by: Roger Quadros <roger.quadros@nokia.com>
+
+2. gpu: pvr: compilation fixes for kernel > 2.6.33
+
+Signed-off-by: Ameya Palande <ameya.palande@nokia.com>
+---
+ drivers/gpu/Makefile | 2 +-
+ drivers/gpu/pvr/COPYING | 351 +++
+ drivers/gpu/pvr/Kconfig | 46 +
+ drivers/gpu/pvr/Makefile | 43 +
+ drivers/gpu/pvr/README | 27 +
+ drivers/gpu/pvr/bridged_pvr_bridge.c | 3341 +++++++++++++++++++++++++
+ drivers/gpu/pvr/bridged_pvr_bridge.h | 157 ++
+ drivers/gpu/pvr/bridged_sgx_bridge.c | 1813 ++++++++++++++
+ drivers/gpu/pvr/bridged_sgx_bridge.h | 167 ++
+ drivers/gpu/pvr/bridged_support.c | 77 +
+ drivers/gpu/pvr/bridged_support.h | 35 +
+ drivers/gpu/pvr/buffer_manager.c | 1486 +++++++++++
+ drivers/gpu/pvr/buffer_manager.h | 169 ++
+ drivers/gpu/pvr/bufferclass_example.c | 266 ++
+ drivers/gpu/pvr/bufferclass_example.h | 104 +
+ drivers/gpu/pvr/bufferclass_example_linux.c | 202 ++
+ drivers/gpu/pvr/bufferclass_example_linux.h | 46 +
+ drivers/gpu/pvr/bufferclass_example_private.c | 194 ++
+ drivers/gpu/pvr/bufferclass_example_private.h | 33 +
+ drivers/gpu/pvr/dbgdrvif.h | 318 +++
+ drivers/gpu/pvr/device.h | 186 ++
+ drivers/gpu/pvr/deviceclass.c | 1522 +++++++++++
+ drivers/gpu/pvr/devicemem.c | 1150 +++++++++
+ drivers/gpu/pvr/env_data.h | 57 +
+ drivers/gpu/pvr/env_perproc.h | 51 +
+ drivers/gpu/pvr/event.c | 270 ++
+ drivers/gpu/pvr/event.h | 35 +
+ drivers/gpu/pvr/handle.c | 1443 +++++++++++
+ drivers/gpu/pvr/handle.h | 150 ++
+ drivers/gpu/pvr/hash.c | 382 +++
+ drivers/gpu/pvr/hash.h | 51 +
+ drivers/gpu/pvr/img_defs.h | 46 +
+ drivers/gpu/pvr/img_types.h | 69 +
+ drivers/gpu/pvr/ioctldef.h | 93 +
+ drivers/gpu/pvr/kernelbuffer.h | 55 +
+ drivers/gpu/pvr/kerneldisplay.h | 104 +
+ drivers/gpu/pvr/lock.h | 31 +
+ drivers/gpu/pvr/mem.c | 130 +
+ drivers/gpu/pvr/mm.c | 1501 +++++++++++
+ drivers/gpu/pvr/mm.h | 267 ++
+ drivers/gpu/pvr/mmap.c | 922 +++++++
+ drivers/gpu/pvr/mmap.h | 74 +
+ drivers/gpu/pvr/mmu.c | 1442 +++++++++++
+ drivers/gpu/pvr/mmu.h | 85 +
+ drivers/gpu/pvr/module.c | 304 +++
+ drivers/gpu/pvr/mutex.h | 36 +
+ drivers/gpu/pvr/mutils.h | 37 +
+ drivers/gpu/pvr/ocpdefs.h | 294 +++
+ drivers/gpu/pvr/oemfuncs.h | 41 +
+ drivers/gpu/pvr/omaplfb.h | 140 +
+ drivers/gpu/pvr/omaplfb_displayclass.c | 852 +++++++
+ drivers/gpu/pvr/omaplfb_linux.c | 168 ++
+ drivers/gpu/pvr/osfunc.c | 1585 ++++++++++++
+ drivers/gpu/pvr/osfunc.h | 232 ++
+ drivers/gpu/pvr/osperproc.c | 84 +
+ drivers/gpu/pvr/osperproc.h | 36 +
+ drivers/gpu/pvr/pb.c | 419 ++++
+ drivers/gpu/pvr/pdump.c | 1271 ++++++++++
+ drivers/gpu/pvr/pdump_common.c | 237 ++
+ drivers/gpu/pvr/pdump_km.h | 268 ++
+ drivers/gpu/pvr/pdumpdefs.h | 92 +
+ drivers/gpu/pvr/perproc.c | 266 ++
+ drivers/gpu/pvr/perproc.h | 79 +
+ drivers/gpu/pvr/power.c | 628 +++++
+ drivers/gpu/pvr/power.h | 104 +
+ drivers/gpu/pvr/private_data.h | 35 +
+ drivers/gpu/pvr/proc.c | 421 ++++
+ drivers/gpu/pvr/proc.h | 54 +
+ drivers/gpu/pvr/pvr_bridge.h | 1107 ++++++++
+ drivers/gpu/pvr/pvr_bridge_k.c | 191 ++
+ drivers/gpu/pvr/pvr_bridge_km.h | 190 ++
+ drivers/gpu/pvr/pvr_debug.c | 353 +++
+ drivers/gpu/pvr/pvr_debug.h | 110 +
+ drivers/gpu/pvr/pvrconfig.h | 36 +
+ drivers/gpu/pvr/pvrmmap.h | 36 +
+ drivers/gpu/pvr/pvrmodule.h | 31 +
+ drivers/gpu/pvr/pvrsrv.c | 906 +++++++
+ drivers/gpu/pvr/pvrversion.h | 37 +
+ drivers/gpu/pvr/queue.c | 828 ++++++
+ drivers/gpu/pvr/queue.h | 81 +
+ drivers/gpu/pvr/ra.c | 1163 +++++++++
+ drivers/gpu/pvr/ra.h | 107 +
+ drivers/gpu/pvr/resman.c | 540 ++++
+ drivers/gpu/pvr/resman.h | 92 +
+ drivers/gpu/pvr/services.h | 237 ++
+ drivers/gpu/pvr/services_headers.h | 42 +
+ drivers/gpu/pvr/servicesext.h | 435 ++++
+ drivers/gpu/pvr/servicesint.h | 173 ++
+ drivers/gpu/pvr/sgx530defs.h | 471 ++++
+ drivers/gpu/pvr/sgx_bridge.h | 388 +++
+ drivers/gpu/pvr/sgx_bridge_km.h | 109 +
+ drivers/gpu/pvr/sgx_options.h | 178 ++
+ drivers/gpu/pvr/sgxapi_km.h | 237 ++
+ drivers/gpu/pvr/sgxconfig.h | 75 +
+ drivers/gpu/pvr/sgxcoretypes.h | 41 +
+ drivers/gpu/pvr/sgxdefs.h | 38 +
+ drivers/gpu/pvr/sgxerrata.h | 34 +
+ drivers/gpu/pvr/sgxfeaturedefs.h | 40 +
+ drivers/gpu/pvr/sgxinfo.h | 338 +++
+ drivers/gpu/pvr/sgxinfokm.h | 262 ++
+ drivers/gpu/pvr/sgxinit.c | 1622 ++++++++++++
+ drivers/gpu/pvr/sgxkick.c | 504 ++++
+ drivers/gpu/pvr/sgxmmu.h | 57 +
+ drivers/gpu/pvr/sgxpower.c | 398 +++
+ drivers/gpu/pvr/sgxreset.c | 223 ++
+ drivers/gpu/pvr/sgxscript.h | 65 +
+ drivers/gpu/pvr/sgxtransfer.c | 290 +++
+ drivers/gpu/pvr/sgxutils.c | 750 ++++++
+ drivers/gpu/pvr/sgxutils.h | 77 +
+ drivers/gpu/pvr/srvkm.h | 50 +
+ drivers/gpu/pvr/syscommon.h | 179 ++
+ drivers/gpu/pvr/sysconfig.c | 818 ++++++
+ drivers/gpu/pvr/sysconfig.h | 53 +
+ drivers/gpu/pvr/sysinfo.h | 94 +
+ drivers/gpu/pvr/syslocal.h | 98 +
+ drivers/gpu/pvr/sysutils.c | 719 ++++++
+ drivers/gpu/pvr/tools/Makefile | 29 +
+ drivers/gpu/pvr/tools/dbgdriv.c | 1652 ++++++++++++
+ drivers/gpu/pvr/tools/dbgdriv.h | 183 ++
+ drivers/gpu/pvr/tools/hostfunc.c | 267 ++
+ drivers/gpu/pvr/tools/hostfunc.h | 58 +
+ drivers/gpu/pvr/tools/hotkey.c | 101 +
+ drivers/gpu/pvr/tools/hotkey.h | 60 +
+ drivers/gpu/pvr/tools/ioctl.c | 399 +++
+ drivers/gpu/pvr/tools/ioctl.h | 81 +
+ drivers/gpu/pvr/tools/linuxsrv.h | 47 +
+ drivers/gpu/pvr/tools/main.c | 197 ++
+ drivers/video/Kconfig | 2 +
+ include/video/sgx-util.h | 64 +
+ 129 files changed, 44718 insertions(+), 1 deletions(-)
+ create mode 100644 drivers/gpu/pvr/COPYING
+ create mode 100644 drivers/gpu/pvr/Kconfig
+ create mode 100644 drivers/gpu/pvr/Makefile
+ create mode 100644 drivers/gpu/pvr/README
+ create mode 100644 drivers/gpu/pvr/bridged_pvr_bridge.c
+ create mode 100644 drivers/gpu/pvr/bridged_pvr_bridge.h
+ create mode 100644 drivers/gpu/pvr/bridged_sgx_bridge.c
+ create mode 100644 drivers/gpu/pvr/bridged_sgx_bridge.h
+ create mode 100644 drivers/gpu/pvr/bridged_support.c
+ create mode 100644 drivers/gpu/pvr/bridged_support.h
+ create mode 100644 drivers/gpu/pvr/buffer_manager.c
+ create mode 100644 drivers/gpu/pvr/buffer_manager.h
+ create mode 100644 drivers/gpu/pvr/bufferclass_example.c
+ create mode 100644 drivers/gpu/pvr/bufferclass_example.h
+ create mode 100644 drivers/gpu/pvr/bufferclass_example_linux.c
+ create mode 100644 drivers/gpu/pvr/bufferclass_example_linux.h
+ create mode 100644 drivers/gpu/pvr/bufferclass_example_private.c
+ create mode 100644 drivers/gpu/pvr/bufferclass_example_private.h
+ create mode 100644 drivers/gpu/pvr/dbgdrvif.h
+ create mode 100644 drivers/gpu/pvr/device.h
+ create mode 100644 drivers/gpu/pvr/deviceclass.c
+ create mode 100644 drivers/gpu/pvr/devicemem.c
+ create mode 100644 drivers/gpu/pvr/env_data.h
+ create mode 100644 drivers/gpu/pvr/env_perproc.h
+ create mode 100644 drivers/gpu/pvr/event.c
+ create mode 100644 drivers/gpu/pvr/event.h
+ create mode 100644 drivers/gpu/pvr/handle.c
+ create mode 100644 drivers/gpu/pvr/handle.h
+ create mode 100644 drivers/gpu/pvr/hash.c
+ create mode 100644 drivers/gpu/pvr/hash.h
+ create mode 100644 drivers/gpu/pvr/img_defs.h
+ create mode 100644 drivers/gpu/pvr/img_types.h
+ create mode 100644 drivers/gpu/pvr/ioctldef.h
+ create mode 100644 drivers/gpu/pvr/kernelbuffer.h
+ create mode 100644 drivers/gpu/pvr/kerneldisplay.h
+ create mode 100644 drivers/gpu/pvr/lock.h
+ create mode 100644 drivers/gpu/pvr/mem.c
+ create mode 100644 drivers/gpu/pvr/mm.c
+ create mode 100644 drivers/gpu/pvr/mm.h
+ create mode 100644 drivers/gpu/pvr/mmap.c
+ create mode 100644 drivers/gpu/pvr/mmap.h
+ create mode 100644 drivers/gpu/pvr/mmu.c
+ create mode 100644 drivers/gpu/pvr/mmu.h
+ create mode 100644 drivers/gpu/pvr/module.c
+ create mode 100644 drivers/gpu/pvr/mutex.h
+ create mode 100644 drivers/gpu/pvr/mutils.h
+ create mode 100644 drivers/gpu/pvr/ocpdefs.h
+ create mode 100644 drivers/gpu/pvr/oemfuncs.h
+ create mode 100644 drivers/gpu/pvr/omaplfb.h
+ create mode 100644 drivers/gpu/pvr/omaplfb_displayclass.c
+ create mode 100644 drivers/gpu/pvr/omaplfb_linux.c
+ create mode 100644 drivers/gpu/pvr/osfunc.c
+ create mode 100644 drivers/gpu/pvr/osfunc.h
+ create mode 100644 drivers/gpu/pvr/osperproc.c
+ create mode 100644 drivers/gpu/pvr/osperproc.h
+ create mode 100644 drivers/gpu/pvr/pb.c
+ create mode 100644 drivers/gpu/pvr/pdump.c
+ create mode 100644 drivers/gpu/pvr/pdump_common.c
+ create mode 100644 drivers/gpu/pvr/pdump_km.h
+ create mode 100644 drivers/gpu/pvr/pdumpdefs.h
+ create mode 100644 drivers/gpu/pvr/perproc.c
+ create mode 100644 drivers/gpu/pvr/perproc.h
+ create mode 100644 drivers/gpu/pvr/power.c
+ create mode 100644 drivers/gpu/pvr/power.h
+ create mode 100644 drivers/gpu/pvr/private_data.h
+ create mode 100644 drivers/gpu/pvr/proc.c
+ create mode 100644 drivers/gpu/pvr/proc.h
+ create mode 100644 drivers/gpu/pvr/pvr_bridge.h
+ create mode 100644 drivers/gpu/pvr/pvr_bridge_k.c
+ create mode 100644 drivers/gpu/pvr/pvr_bridge_km.h
+ create mode 100644 drivers/gpu/pvr/pvr_debug.c
+ create mode 100644 drivers/gpu/pvr/pvr_debug.h
+ create mode 100644 drivers/gpu/pvr/pvrconfig.h
+ create mode 100644 drivers/gpu/pvr/pvrmmap.h
+ create mode 100644 drivers/gpu/pvr/pvrmodule.h
+ create mode 100644 drivers/gpu/pvr/pvrsrv.c
+ create mode 100644 drivers/gpu/pvr/pvrversion.h
+ create mode 100644 drivers/gpu/pvr/queue.c
+ create mode 100644 drivers/gpu/pvr/queue.h
+ create mode 100644 drivers/gpu/pvr/ra.c
+ create mode 100644 drivers/gpu/pvr/ra.h
+ create mode 100644 drivers/gpu/pvr/resman.c
+ create mode 100644 drivers/gpu/pvr/resman.h
+ create mode 100644 drivers/gpu/pvr/services.h
+ create mode 100644 drivers/gpu/pvr/services_headers.h
+ create mode 100644 drivers/gpu/pvr/servicesext.h
+ create mode 100644 drivers/gpu/pvr/servicesint.h
+ create mode 100644 drivers/gpu/pvr/sgx530defs.h
+ create mode 100644 drivers/gpu/pvr/sgx_bridge.h
+ create mode 100644 drivers/gpu/pvr/sgx_bridge_km.h
+ create mode 100644 drivers/gpu/pvr/sgx_options.h
+ create mode 100644 drivers/gpu/pvr/sgxapi_km.h
+ create mode 100644 drivers/gpu/pvr/sgxconfig.h
+ create mode 100644 drivers/gpu/pvr/sgxcoretypes.h
+ create mode 100644 drivers/gpu/pvr/sgxdefs.h
+ create mode 100644 drivers/gpu/pvr/sgxerrata.h
+ create mode 100644 drivers/gpu/pvr/sgxfeaturedefs.h
+ create mode 100644 drivers/gpu/pvr/sgxinfo.h
+ create mode 100644 drivers/gpu/pvr/sgxinfokm.h
+ create mode 100644 drivers/gpu/pvr/sgxinit.c
+ create mode 100644 drivers/gpu/pvr/sgxkick.c
+ create mode 100644 drivers/gpu/pvr/sgxmmu.h
+ create mode 100644 drivers/gpu/pvr/sgxpower.c
+ create mode 100644 drivers/gpu/pvr/sgxreset.c
+ create mode 100644 drivers/gpu/pvr/sgxscript.h
+ create mode 100644 drivers/gpu/pvr/sgxtransfer.c
+ create mode 100644 drivers/gpu/pvr/sgxutils.c
+ create mode 100644 drivers/gpu/pvr/sgxutils.h
+ create mode 100644 drivers/gpu/pvr/srvkm.h
+ create mode 100644 drivers/gpu/pvr/syscommon.h
+ create mode 100644 drivers/gpu/pvr/sysconfig.c
+ create mode 100644 drivers/gpu/pvr/sysconfig.h
+ create mode 100644 drivers/gpu/pvr/sysinfo.h
+ create mode 100644 drivers/gpu/pvr/syslocal.h
+ create mode 100644 drivers/gpu/pvr/sysutils.c
+ create mode 100644 drivers/gpu/pvr/tools/Makefile
+ create mode 100644 drivers/gpu/pvr/tools/dbgdriv.c
+ create mode 100644 drivers/gpu/pvr/tools/dbgdriv.h
+ create mode 100644 drivers/gpu/pvr/tools/hostfunc.c
+ create mode 100644 drivers/gpu/pvr/tools/hostfunc.h
+ create mode 100644 drivers/gpu/pvr/tools/hotkey.c
+ create mode 100644 drivers/gpu/pvr/tools/hotkey.h
+ create mode 100644 drivers/gpu/pvr/tools/ioctl.c
+ create mode 100644 drivers/gpu/pvr/tools/ioctl.h
+ create mode 100644 drivers/gpu/pvr/tools/linuxsrv.h
+ create mode 100644 drivers/gpu/pvr/tools/main.c
+ create mode 100644 include/video/sgx-util.h
+
+diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile
+index 30879df..21f750f 100644
+--- a/drivers/gpu/Makefile
++++ b/drivers/gpu/Makefile
+@@ -1 +1 @@
+-obj-y += drm/ vga/
++obj-y += drm/ vga/ pvr/
+diff --git a/drivers/gpu/pvr/COPYING b/drivers/gpu/pvr/COPYING
+new file mode 100644
+index 0000000..80dd76b
+--- /dev/null
++++ b/drivers/gpu/pvr/COPYING
+@@ -0,0 +1,351 @@
++
++This software is Copyright (C) 2008 Imagination Technologies Ltd.
++ All rights reserved.
++
++You may use, distribute and copy this software under the terms of
++GNU General Public License version 2, which is displayed below.
++
++-------------------------------------------------------------------------
++
++ GNU GENERAL PUBLIC LICENSE
++ Version 2, June 1991
++
++ Copyright (C) 1989, 1991 Free Software Foundation, Inc.
++ 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ Everyone is permitted to copy and distribute verbatim copies
++ of this license document, but changing it is not allowed.
++
++ Preamble
++
++ The licenses for most software are designed to take away your
++freedom to share and change it. By contrast, the GNU General Public
++License is intended to guarantee your freedom to share and change free
++software--to make sure the software is free for all its users. This
++General Public License applies to most of the Free Software
++Foundation's software and to any other program whose authors commit to
++using it. (Some other Free Software Foundation software is covered by
++the GNU Library General Public License instead.) You can apply it to
++your programs, too.
++
++ When we speak of free software, we are referring to freedom, not
++price. Our General Public Licenses are designed to make sure that you
++have the freedom to distribute copies of free software (and charge for
++this service if you wish), that you receive source code or can get it
++if you want it, that you can change the software or use pieces of it
++in new free programs; and that you know you can do these things.
++
++ To protect your rights, we need to make restrictions that forbid
++anyone to deny you these rights or to ask you to surrender the rights.
++These restrictions translate to certain responsibilities for you if you
++distribute copies of the software, or if you modify it.
++
++ For example, if you distribute copies of such a program, whether
++gratis or for a fee, you must give the recipients all the rights that
++you have. You must make sure that they, too, receive or can get the
++source code. And you must show them these terms so they know their
++rights.
++
++ We protect your rights with two steps: (1) copyright the software, and
++(2) offer you this license which gives you legal permission to copy,
++distribute and/or modify the software.
++
++ Also, for each author's protection and ours, we want to make certain
++that everyone understands that there is no warranty for this free
++software. If the software is modified by someone else and passed on, we
++want its recipients to know that what they have is not the original, so
++that any problems introduced by others will not reflect on the original
++authors' reputations.
++
++ Finally, any free program is threatened constantly by software
++patents. We wish to avoid the danger that redistributors of a free
++program will individually obtain patent licenses, in effect making the
++program proprietary. To prevent this, we have made it clear that any
++patent must be licensed for everyone's free use or not licensed at all.
++
++ The precise terms and conditions for copying, distribution and
++modification follow.
++
++ GNU GENERAL PUBLIC LICENSE
++ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
++
++ 0. This License applies to any program or other work which contains
++a notice placed by the copyright holder saying it may be distributed
++under the terms of this General Public License. The "Program", below,
++refers to any such program or work, and a "work based on the Program"
++means either the Program or any derivative work under copyright law:
++that is to say, a work containing the Program or a portion of it,
++either verbatim or with modifications and/or translated into another
++language. (Hereinafter, translation is included without limitation in
++the term "modification".) Each licensee is addressed as "you".
++
++Activities other than copying, distribution and modification are not
++covered by this License; they are outside its scope. The act of
++running the Program is not restricted, and the output from the Program
++is covered only if its contents constitute a work based on the
++Program (independent of having been made by running the Program).
++Whether that is true depends on what the Program does.
++
++ 1. You may copy and distribute verbatim copies of the Program's
++source code as you receive it, in any medium, provided that you
++conspicuously and appropriately publish on each copy an appropriate
++copyright notice and disclaimer of warranty; keep intact all the
++notices that refer to this License and to the absence of any warranty;
++and give any other recipients of the Program a copy of this License
++along with the Program.
++
++You may charge a fee for the physical act of transferring a copy, and
++you may at your option offer warranty protection in exchange for a fee.
++
++ 2. You may modify your copy or copies of the Program or any portion
++of it, thus forming a work based on the Program, and copy and
++distribute such modifications or work under the terms of Section 1
++above, provided that you also meet all of these conditions:
++
++ a) You must cause the modified files to carry prominent notices
++ stating that you changed the files and the date of any change.
++
++ b) You must cause any work that you distribute or publish, that in
++ whole or in part contains or is derived from the Program or any
++ part thereof, to be licensed as a whole at no charge to all third
++ parties under the terms of this License.
++
++ c) If the modified program normally reads commands interactively
++ when run, you must cause it, when started running for such
++ interactive use in the most ordinary way, to print or display an
++ announcement including an appropriate copyright notice and a
++ notice that there is no warranty (or else, saying that you provide
++ a warranty) and that users may redistribute the program under
++ these conditions, and telling the user how to view a copy of this
++ License. (Exception: if the Program itself is interactive but
++ does not normally print such an announcement, your work based on
++ the Program is not required to print an announcement.)
++
++These requirements apply to the modified work as a whole. If
++identifiable sections of that work are not derived from the Program,
++and can be reasonably considered independent and separate works in
++themselves, then this License, and its terms, do not apply to those
++sections when you distribute them as separate works. But when you
++distribute the same sections as part of a whole which is a work based
++on the Program, the distribution of the whole must be on the terms of
++this License, whose permissions for other licensees extend to the
++entire whole, and thus to each and every part regardless of who wrote it.
++
++Thus, it is not the intent of this section to claim rights or contest
++your rights to work written entirely by you; rather, the intent is to
++exercise the right to control the distribution of derivative or
++collective works based on the Program.
++
++In addition, mere aggregation of another work not based on the Program
++with the Program (or with a work based on the Program) on a volume of
++a storage or distribution medium does not bring the other work under
++the scope of this License.
++
++ 3. You may copy and distribute the Program (or a work based on it,
++under Section 2) in object code or executable form under the terms of
++Sections 1 and 2 above provided that you also do one of the following:
++
++ a) Accompany it with the complete corresponding machine-readable
++ source code, which must be distributed under the terms of Sections
++ 1 and 2 above on a medium customarily used for software interchange; or,
++
++ b) Accompany it with a written offer, valid for at least three
++ years, to give any third party, for a charge no more than your
++ cost of physically performing source distribution, a complete
++ machine-readable copy of the corresponding source code, to be
++ distributed under the terms of Sections 1 and 2 above on a medium
++ customarily used for software interchange; or,
++
++ c) Accompany it with the information you received as to the offer
++ to distribute corresponding source code. (This alternative is
++ allowed only for noncommercial distribution and only if you
++ received the program in object code or executable form with such
++ an offer, in accord with Subsection b above.)
++
++The source code for a work means the preferred form of the work for
++making modifications to it. For an executable work, complete source
++code means all the source code for all modules it contains, plus any
++associated interface definition files, plus the scripts used to
++control compilation and installation of the executable. However, as a
++special exception, the source code distributed need not include
++anything that is normally distributed (in either source or binary
++form) with the major components (compiler, kernel, and so on) of the
++operating system on which the executable runs, unless that component
++itself accompanies the executable.
++
++If distribution of executable or object code is made by offering
++access to copy from a designated place, then offering equivalent
++access to copy the source code from the same place counts as
++distribution of the source code, even though third parties are not
++compelled to copy the source along with the object code.
++
++ 4. You may not copy, modify, sublicense, or distribute the Program
++except as expressly provided under this License. Any attempt
++otherwise to copy, modify, sublicense or distribute the Program is
++void, and will automatically terminate your rights under this License.
++However, parties who have received copies, or rights, from you under
++this License will not have their licenses terminated so long as such
++parties remain in full compliance.
++
++ 5. You are not required to accept this License, since you have not
++signed it. However, nothing else grants you permission to modify or
++distribute the Program or its derivative works. These actions are
++prohibited by law if you do not accept this License. Therefore, by
++modifying or distributing the Program (or any work based on the
++Program), you indicate your acceptance of this License to do so, and
++all its terms and conditions for copying, distributing or modifying
++the Program or works based on it.
++
++ 6. Each time you redistribute the Program (or any work based on the
++Program), the recipient automatically receives a license from the
++original licensor to copy, distribute or modify the Program subject to
++these terms and conditions. You may not impose any further
++restrictions on the recipients' exercise of the rights granted herein.
++You are not responsible for enforcing compliance by third parties to
++this License.
++
++ 7. If, as a consequence of a court judgment or allegation of patent
++infringement or for any other reason (not limited to patent issues),
++conditions are imposed on you (whether by court order, agreement or
++otherwise) that contradict the conditions of this License, they do not
++excuse you from the conditions of this License. If you cannot
++distribute so as to satisfy simultaneously your obligations under this
++License and any other pertinent obligations, then as a consequence you
++may not distribute the Program at all. For example, if a patent
++license would not permit royalty-free redistribution of the Program by
++all those who receive copies directly or indirectly through you, then
++the only way you could satisfy both it and this License would be to
++refrain entirely from distribution of the Program.
++
++If any portion of this section is held invalid or unenforceable under
++any particular circumstance, the balance of the section is intended to
++apply and the section as a whole is intended to apply in other
++circumstances.
++
++It is not the purpose of this section to induce you to infringe any
++patents or other property right claims or to contest validity of any
++such claims; this section has the sole purpose of protecting the
++integrity of the free software distribution system, which is
++implemented by public license practices. Many people have made
++generous contributions to the wide range of software distributed
++through that system in reliance on consistent application of that
++system; it is up to the author/donor to decide if he or she is willing
++to distribute software through any other system and a licensee cannot
++impose that choice.
++
++This section is intended to make thoroughly clear what is believed to
++be a consequence of the rest of this License.
++
++ 8. If the distribution and/or use of the Program is restricted in
++certain countries either by patents or by copyrighted interfaces, the
++original copyright holder who places the Program under this License
++may add an explicit geographical distribution limitation excluding
++those countries, so that distribution is permitted only in or among
++countries not thus excluded. In such case, this License incorporates
++the limitation as if written in the body of this License.
++
++ 9. The Free Software Foundation may publish revised and/or new versions
++of the General Public License from time to time. Such new versions will
++be similar in spirit to the present version, but may differ in detail to
++address new problems or concerns.
++
++Each version is given a distinguishing version number. If the Program
++specifies a version number of this License which applies to it and "any
++later version", you have the option of following the terms and conditions
++either of that version or of any later version published by the Free
++Software Foundation. If the Program does not specify a version number of
++this License, you may choose any version ever published by the Free Software
++Foundation.
++
++ 10. If you wish to incorporate parts of the Program into other free
++programs whose distribution conditions are different, write to the author
++to ask for permission. For software which is copyrighted by the Free
++Software Foundation, write to the Free Software Foundation; we sometimes
++make exceptions for this. Our decision will be guided by the two goals
++of preserving the free status of all derivatives of our free software and
++of promoting the sharing and reuse of software generally.
++
++ NO WARRANTY
++
++ 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
++FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
++OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
++PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
++OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
++MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
++TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
++PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
++REPAIR OR CORRECTION.
++
++ 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
++WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
++REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
++INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
++OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
++TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
++YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
++PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
++POSSIBILITY OF SUCH DAMAGES.
++
++ END OF TERMS AND CONDITIONS
++
++ Appendix: How to Apply These Terms to Your New Programs
++
++ If you develop a new program, and you want it to be of the greatest
++possible use to the public, the best way to achieve this is to make it
++free software which everyone can redistribute and change under these terms.
++
++ To do so, attach the following notices to the program. It is safest
++to attach them to the start of each source file to most effectively
++convey the exclusion of warranty; and each file should have at least
++the "copyright" line and a pointer to where the full notice is found.
++
++ <one line to give the program's name and a brief idea of what it does.>
++ Copyright (C) 19yy <name of author>
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 2 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++
++Also add information on how to contact you by electronic and paper mail.
++
++If the program is interactive, make it output a short notice like this
++when it starts in an interactive mode:
++
++ Gnomovision version 69, Copyright (C) 19yy name of author
++ Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
++ This is free software, and you are welcome to redistribute it
++ under certain conditions; type `show c' for details.
++
++The hypothetical commands `show w' and `show c' should show the appropriate
++parts of the General Public License. Of course, the commands you use may
++be called something other than `show w' and `show c'; they could even be
++mouse-clicks or menu items--whatever suits your program.
++
++You should also get your employer (if you work as a programmer) or your
++school, if any, to sign a "copyright disclaimer" for the program, if
++necessary. Here is a sample; alter the names:
++
++ Yoyodyne, Inc., hereby disclaims all copyright interest in the program
++ `Gnomovision' (which makes passes at compilers) written by James Hacker.
++
++ <signature of Ty Coon>, 1 April 1989
++ Ty Coon, President of Vice
++
++This General Public License does not permit incorporating your program into
++proprietary programs. If your program is a subroutine library, you may
++consider it more useful to permit linking proprietary applications with the
++library. If this is what you want to do, use the GNU Library General
++Public License instead of this License.
++
++-------------------------------------------------------------------------
++
+diff --git a/drivers/gpu/pvr/Kconfig b/drivers/gpu/pvr/Kconfig
+new file mode 100644
+index 0000000..2021d31
+--- /dev/null
++++ b/drivers/gpu/pvr/Kconfig
+@@ -0,0 +1,46 @@
++menuconfig PVR
++ tristate "PowerVR Services"
++ depends on OMAP2_DSS
++
++if PVR
++
++choice
++ prompt "Build type"
++ default PVR_RELEASE_N900
++config PVR_RELEASE_N900
++ bool "Release"
++config PVR_DEBUG
++ bool "Debug"
++config PVR_TIMING
++ bool "Timing"
++endchoice
++
++config PVR_DEBUG_PDUMP
++ tristate "PDUMP debug support"
++ depends on PVR
++ default n
++
++config PVR_EDM_DEBUG
++ depends on PVR
++ bool "Enable EDM trace"
++ default n
++ help
++ EDM trace helps to track down some HW recovery events. You _must_
++ also enabled EDM (PVRSRV_USSE_EDM_STATUS_DEBUG) in the userland
++ libraries otherwise the drivers won't start
++
++config PVR_NO_HARDWARE
++ bool
++ default n
++
++config PVR_FORCE_CLOCKS_ON
++ bool "Force clocks on"
++ depends on !PVR_NO_HARDWARE
++ default n
++
++config PVR_EXAMPLES
++ tristate "Example code"
++ default n
++
++endif
++
+diff --git a/drivers/gpu/pvr/Makefile b/drivers/gpu/pvr/Makefile
+new file mode 100644
+index 0000000..81db9be
+--- /dev/null
++++ b/drivers/gpu/pvr/Makefile
+@@ -0,0 +1,43 @@
++obj-$(CONFIG_PVR) += omaplfb.o pvrsrvkm.o
++
++omaplfb-objs := omaplfb_displayclass.o omaplfb_linux.o
++
++pvrsrvkm-objs := osfunc.o mmap.o module.o pdump.o proc.o \
++ pvr_bridge_k.o mm.o event.o \
++ buffer_manager.o devicemem.o deviceclass.o \
++ handle.o hash.o pvrsrv.o queue.o ra.o \
++ resman.o power.o mem.o bridged_pvr_bridge.o \
++ sgxinit.o sgxreset.o sgxutils.o sgxkick.o \
++ sgxtransfer.o mmu.o pb.o perproc.o sysconfig.o \
++ sysutils.o osperproc.o bridged_support.o \
++ bridged_sgx_bridge.o sgxpower.o pdump_common.o
++
++pvrsrvkm-objs-$(CONFIG_PVR_DEBUG) += pvr_debug.o
++pvrsrvkm-objs-$(CONFIG_PVR_TIMING) += pvr_debug.o
++
++pvrsrvkm-objs += $(pvrsrvkm-objs-y) $(pvrsrvkm-objs-m)
++
++obj-$(CONFIG_PVR_EXAMPLES) += bc_example.o
++
++bc_example-objs := bufferclass_example.o bufferclass_example_linux.o \
++ bufferclass_example_private.o
++
++
++obj-$(CONFIG_PVR_DEBUG_PDUMP) += pvrdbg.o
++
++pvrdbg-objs := tools/main.o tools/dbgdriv.o tools/ioctl.o \
++ tools/hostfunc.o tools/hotkey.o
++
++
++DATE := $(shell date "+%a %B %d %Z %Y" )
++CBUILD := -O2 \
++ -DPVR_BUILD_DIR="\"$(PVR_BUILD_DIR)\"" \
++ -DPVR_BUILD_DATE="\"$(DATE)\""
++
++ccflags-y += $(CBUILD) -include $(srctree)/$(src)/pvrconfig.h
++
++ccflags-$(CONFIG_PVR_DEBUG_PDUMP) += -I $(srctree)/$(src)/tools \
++ -I $(srctree)/$(src)
++ccflags-y += $(ccflags-m)
++
++
+diff --git a/drivers/gpu/pvr/README b/drivers/gpu/pvr/README
+new file mode 100644
+index 0000000..7da0e62
+--- /dev/null
++++ b/drivers/gpu/pvr/README
+@@ -0,0 +1,27 @@
++
++SGX Embedded Systems DDK for Linux kernel.
++Copyright (C) 2008 Imagination Technologies Ltd. All rights reserved.
++======================================================================
++
++
++About
++-------------------------------------------
++
++This is the Imagination Technologies SGX DDK for the Linux kernel.
++
++
++License
++-------------------------------------------
++
++You may use, distribute and copy this software under the terms of
++GNU General Public License version 2.
++
++The full GNU General Public License version 2 is included in this
++distribution in the file called "COPYING".
++
++
++Contact information:
++-------------------------------------------
++
++Imagination Technologies Ltd. <gpl-support@imgtec.com>
++Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+diff --git a/drivers/gpu/pvr/bridged_pvr_bridge.c b/drivers/gpu/pvr/bridged_pvr_bridge.c
+new file mode 100644
+index 0000000..fa1a371
+--- /dev/null
++++ b/drivers/gpu/pvr/bridged_pvr_bridge.c
+@@ -0,0 +1,3341 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <stddef.h>
++
++#include "img_defs.h"
++#include "services.h"
++#include "pvr_bridge_km.h"
++#include "pvr_debug.h"
++#include "ra.h"
++#include "pvr_bridge.h"
++#include "sgx_bridge.h"
++#include "perproc.h"
++#include "device.h"
++#include "buffer_manager.h"
++
++#include "pdump_km.h"
++#include "syscommon.h"
++
++#include "bridged_pvr_bridge.h"
++#include "bridged_sgx_bridge.h"
++#include "env_data.h"
++
++#include "mmap.h"
++
++#include <linux/kernel.h>
++#include <linux/pagemap.h> /* for cache flush */
++#include <linux/mm.h>
++#include <linux/sched.h>
++
++struct PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY
++ g_BridgeDispatchTable[BRIDGE_DISPATCH_TABLE_ENTRY_COUNT];
++
++#if defined(DEBUG_BRIDGE_KM)
++struct PVRSRV_BRIDGE_GLOBAL_STATS g_BridgeGlobalStats;
++#endif
++
++static IMG_BOOL abSharedDeviceMemHeap[PVRSRV_MAX_CLIENT_HEAPS];
++static IMG_BOOL *pbSharedDeviceMemHeap = abSharedDeviceMemHeap;
++
++#if defined(DEBUG_BRIDGE_KM)
++enum PVRSRV_ERROR
++CopyFromUserWrapper(struct PVRSRV_PER_PROCESS_DATA *pProcData,
++ u32 ui32BridgeID, void *pvDest, void __user *pvSrc,
++ u32 ui32Size)
++{
++ g_BridgeDispatchTable[ui32BridgeID].ui32CopyFromUserTotalBytes +=
++ ui32Size;
++ g_BridgeGlobalStats.ui32TotalCopyFromUserBytes += ui32Size;
++ return OSCopyFromUser(pProcData, pvDest, pvSrc, ui32Size);
++}
++
++enum PVRSRV_ERROR CopyToUserWrapper(struct PVRSRV_PER_PROCESS_DATA *pProcData,
++ u32 ui32BridgeID, void __user *pvDest, void *pvSrc,
++ u32 ui32Size)
++{
++ g_BridgeDispatchTable[ui32BridgeID].ui32CopyToUserTotalBytes +=
++ ui32Size;
++ g_BridgeGlobalStats.ui32TotalCopyToUserBytes += ui32Size;
++ return OSCopyToUser(pProcData, pvDest, pvSrc, ui32Size);
++}
++#endif
++
++static int PVRSRVEnumerateDevicesBW(u32 ui32BridgeID, void *psBridgeIn,
++ struct PVRSRV_BRIDGE_OUT_ENUMDEVICE *psEnumDeviceOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ENUM_DEVICES);
++
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++ PVR_UNREFERENCED_PARAMETER(psBridgeIn);
++
++ psEnumDeviceOUT->eError =
++ PVRSRVEnumerateDevicesKM(&psEnumDeviceOUT->ui32NumDevices,
++ psEnumDeviceOUT->asDeviceIdentifier);
++
++ return 0;
++}
++
++static int PVRSRVAcquireDeviceDataBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_ACQUIRE_DEVICEINFO *psAcquireDevInfoIN,
++ struct PVRSRV_BRIDGE_OUT_ACQUIRE_DEVICEINFO *psAcquireDevInfoOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void *hDevCookieInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_ACQUIRE_DEVICEINFO);
++
++ psAcquireDevInfoOUT->eError =
++ PVRSRVAcquireDeviceDataKM(psAcquireDevInfoIN->uiDevIndex,
++ psAcquireDevInfoIN->eDeviceType,
++ &hDevCookieInt);
++ if (psAcquireDevInfoOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psAcquireDevInfoOUT->eError = PVRSRVAllocHandle(psPerProc->psHandleBase,
++ &psAcquireDevInfoOUT->hDevCookie,
++ hDevCookieInt,
++ PVRSRV_HANDLE_TYPE_DEV_NODE,
++ PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
++
++ return 0;
++}
++
++static int PVRSRVCreateDeviceMemContextBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_CREATE_DEVMEMCONTEXT *psCreateDevMemContextIN,
++ struct PVRSRV_BRIDGE_OUT_CREATE_DEVMEMCONTEXT *psCreateDevMemContextOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void *hDevCookieInt;
++ void *hDevMemContextInt;
++ u32 i;
++ IMG_BOOL bCreated;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_CREATE_DEVMEMCONTEXT);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psCreateDevMemContextOUT->eError, psPerProc,
++ PVRSRV_MAX_CLIENT_HEAPS + 1);
++
++ psCreateDevMemContextOUT->eError = PVRSRVLookupHandle(
++ psPerProc->psHandleBase, &hDevCookieInt,
++ psCreateDevMemContextIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if (psCreateDevMemContextOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psCreateDevMemContextOUT->eError = PVRSRVCreateDeviceMemContextKM(
++ hDevCookieInt, psPerProc,
++ &hDevMemContextInt,
++ &psCreateDevMemContextOUT->ui32ClientHeapCount,
++ &psCreateDevMemContextOUT->sHeapInfo[0],
++ &bCreated, pbSharedDeviceMemHeap);
++
++ if (psCreateDevMemContextOUT->eError != PVRSRV_OK)
++ return 0;
++
++ if (bCreated) {
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psCreateDevMemContextOUT->hDevMemContext,
++ hDevMemContextInt,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++ } else {
++ psCreateDevMemContextOUT->eError =
++ PVRSRVFindHandle(psPerProc->psHandleBase,
++ &psCreateDevMemContextOUT->hDevMemContext,
++ hDevMemContextInt,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
++ if (psCreateDevMemContextOUT->eError != PVRSRV_OK)
++ return 0;
++ }
++
++ for (i = 0; i < psCreateDevMemContextOUT->ui32ClientHeapCount; i++) {
++ void *hDevMemHeapExt;
++
++ if (abSharedDeviceMemHeap[i]) {
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &hDevMemHeapExt,
++ psCreateDevMemContextOUT->
++ sHeapInfo[i].hDevMemHeap,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP,
++ PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
++ } else {
++ if (bCreated) {
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &hDevMemHeapExt,
++ psCreateDevMemContextOUT->sHeapInfo[i].
++ hDevMemHeap,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
++ psCreateDevMemContextOUT->
++ hDevMemContext);
++ } else {
++ psCreateDevMemContextOUT->eError =
++ PVRSRVFindHandle(
++ psPerProc->psHandleBase,
++ &hDevMemHeapExt,
++ psCreateDevMemContextOUT->
++ sHeapInfo[i].hDevMemHeap,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP);
++ if (psCreateDevMemContextOUT->eError !=
++ PVRSRV_OK)
++ return 0;
++ }
++ }
++ psCreateDevMemContextOUT->sHeapInfo[i].hDevMemHeap =
++ hDevMemHeapExt;
++ }
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psCreateDevMemContextOUT->eError,
++ psPerProc);
++
++ return 0;
++}
++
++static int PVRSRVDestroyDeviceMemContextBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_DESTROY_DEVMEMCONTEXT *psDestroyDevMemContextIN,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void *hDevCookieInt;
++ void *hDevMemContextInt;
++ IMG_BOOL bDestroyed;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_DESTROY_DEVMEMCONTEXT);
++
++ psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psDestroyDevMemContextIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevMemContextInt,
++ psDestroyDevMemContextIN->hDevMemContext,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
++
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psRetOUT->eError = PVRSRVDestroyDeviceMemContextKM(hDevCookieInt,
++ hDevMemContextInt, &bDestroyed);
++
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++
++ if (bDestroyed)
++ psRetOUT->eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psDestroyDevMemContextIN->
++ hDevMemContext,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
++
++ return 0;
++}
++
++static int PVRSRVGetDeviceMemHeapInfoBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_GET_DEVMEM_HEAPINFO *psGetDevMemHeapInfoIN,
++ struct PVRSRV_BRIDGE_OUT_GET_DEVMEM_HEAPINFO *psGetDevMemHeapInfoOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void *hDevCookieInt;
++ void *hDevMemContextInt;
++ u32 i;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_GET_DEVMEM_HEAPINFO);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psGetDevMemHeapInfoOUT->eError, psPerProc,
++ PVRSRV_MAX_CLIENT_HEAPS);
++
++ psGetDevMemHeapInfoOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psGetDevMemHeapInfoIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if (psGetDevMemHeapInfoOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psGetDevMemHeapInfoOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemContextInt,
++ psGetDevMemHeapInfoIN->hDevMemContext,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
++
++ if (psGetDevMemHeapInfoOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psGetDevMemHeapInfoOUT->eError =
++ PVRSRVGetDeviceMemHeapInfoKM(hDevCookieInt,
++ hDevMemContextInt,
++ &psGetDevMemHeapInfoOUT->ui32ClientHeapCount,
++ &psGetDevMemHeapInfoOUT->sHeapInfo[0],
++ pbSharedDeviceMemHeap);
++
++ if (psGetDevMemHeapInfoOUT->eError != PVRSRV_OK)
++ return 0;
++
++ for (i = 0; i < psGetDevMemHeapInfoOUT->ui32ClientHeapCount; i++) {
++ void *hDevMemHeapExt;
++ if (abSharedDeviceMemHeap[i]) {
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &hDevMemHeapExt,
++ psGetDevMemHeapInfoOUT->sHeapInfo[i].hDevMemHeap,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP,
++ PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
++ } else {
++ psGetDevMemHeapInfoOUT->eError =
++ PVRSRVFindHandle(psPerProc->psHandleBase,
++ &hDevMemHeapExt,
++ psGetDevMemHeapInfoOUT->
++ sHeapInfo[i].hDevMemHeap,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP);
++ if (psGetDevMemHeapInfoOUT->eError != PVRSRV_OK)
++ return 0;
++ }
++ psGetDevMemHeapInfoOUT->sHeapInfo[i].hDevMemHeap =
++ hDevMemHeapExt;
++ }
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psGetDevMemHeapInfoOUT->eError, psPerProc);
++
++ return 0;
++}
++
++static int PVRSRVAllocDeviceMemBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_ALLOCDEVICEMEM *psAllocDeviceMemIN,
++ struct PVRSRV_BRIDGE_OUT_ALLOCDEVICEMEM *psAllocDeviceMemOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ struct PVRSRV_KERNEL_MEM_INFO *psMemInfo;
++ void *hDevCookieInt;
++ void *hDevMemHeapInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ALLOC_DEVICEMEM);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psAllocDeviceMemOUT->eError, psPerProc, 2);
++
++ psAllocDeviceMemOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psAllocDeviceMemIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if (psAllocDeviceMemOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psAllocDeviceMemOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemHeapInt,
++ psAllocDeviceMemIN->hDevMemHeap,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP);
++
++ if (psAllocDeviceMemOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psAllocDeviceMemOUT->eError =
++ PVRSRVAllocDeviceMemKM(hDevCookieInt, psPerProc, hDevMemHeapInt,
++ psAllocDeviceMemIN->ui32Attribs,
++ psAllocDeviceMemIN->ui32Size,
++ psAllocDeviceMemIN->ui32Alignment,
++ &psMemInfo);
++
++ if (psAllocDeviceMemOUT->eError != PVRSRV_OK)
++ return 0;
++
++ OSMemSet(&psAllocDeviceMemOUT->sClientMemInfo, 0,
++ sizeof(psAllocDeviceMemOUT->sClientMemInfo));
++
++ psAllocDeviceMemOUT->sClientMemInfo.pvLinAddrKM =
++ psMemInfo->pvLinAddrKM;
++
++ psAllocDeviceMemOUT->sClientMemInfo.pvLinAddr = NULL;
++ psAllocDeviceMemOUT->sClientMemInfo.sDevVAddr = psMemInfo->sDevVAddr;
++ psAllocDeviceMemOUT->sClientMemInfo.ui32Flags = psMemInfo->ui32Flags;
++ psAllocDeviceMemOUT->sClientMemInfo.ui32AllocSize =
++ psMemInfo->ui32AllocSize;
++ psAllocDeviceMemOUT->sClientMemInfo.hMappingInfo =
++ psMemInfo->sMemBlk.hOSMemHandle;
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psAllocDeviceMemOUT->sClientMemInfo.hKernelMemInfo,
++ psMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++
++ if (psAllocDeviceMemIN->ui32Attribs & PVRSRV_MEM_NO_SYNCOBJ) {
++ OSMemSet(&psAllocDeviceMemOUT->sClientSyncInfo, 0,
++ sizeof(struct PVRSRV_CLIENT_SYNC_INFO));
++ psAllocDeviceMemOUT->sClientMemInfo.psClientSyncInfo = NULL;
++ psAllocDeviceMemOUT->psKernelSyncInfo = NULL;
++ } else {
++
++ psAllocDeviceMemOUT->psKernelSyncInfo =
++ psMemInfo->psKernelSyncInfo;
++
++ psAllocDeviceMemOUT->sClientSyncInfo.psSyncData =
++ psMemInfo->psKernelSyncInfo->psSyncData;
++ psAllocDeviceMemOUT->sClientSyncInfo.sWriteOpsCompleteDevVAddr =
++ psMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr;
++ psAllocDeviceMemOUT->sClientSyncInfo.sReadOpsCompleteDevVAddr =
++ psMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr;
++
++ psAllocDeviceMemOUT->sClientSyncInfo.hMappingInfo =
++ psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.
++ hOSMemHandle;
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psAllocDeviceMemOUT->sClientSyncInfo.
++ hKernelSyncInfo,
++ psMemInfo->psKernelSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
++ psAllocDeviceMemOUT->sClientMemInfo.
++ hKernelMemInfo);
++
++ psAllocDeviceMemOUT->sClientMemInfo.psClientSyncInfo =
++ &psAllocDeviceMemOUT->sClientSyncInfo;
++
++ }
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psAllocDeviceMemOUT->eError, psPerProc);
++
++ return 0;
++}
++
++
++static int PVRSRVFreeDeviceMemBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_FREEDEVICEMEM *psFreeDeviceMemIN,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void *hDevCookieInt;
++ void *pvKernelMemInfo;
++ struct PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_FREE_DEVICEMEM);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psFreeDeviceMemIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &pvKernelMemInfo,
++ psFreeDeviceMemIN->psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psKernelMemInfo = (struct PVRSRV_KERNEL_MEM_INFO *)pvKernelMemInfo;
++ if (psKernelMemInfo->ui32RefCount != 1) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVFreeDeviceMemBW: "
++ "mappings are open in other processes");
++ psRetOUT->eError = PVRSRV_ERROR_GENERIC;
++ return 0;
++ }
++
++ psRetOUT->eError = PVRSRVFreeDeviceMemKM(hDevCookieInt,
++ pvKernelMemInfo);
++
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psRetOUT->eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psFreeDeviceMemIN->psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++ return 0;
++}
++
++static int PVRSRVExportDeviceMemBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_EXPORTDEVICEMEM *psExportDeviceMemIN,
++ struct PVRSRV_BRIDGE_OUT_EXPORTDEVICEMEM *psExportDeviceMemOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void *hDevCookieInt;
++ struct PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_EXPORT_DEVICEMEM);
++
++ psExportDeviceMemOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psExportDeviceMemIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if (psExportDeviceMemOUT->eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVExportDeviceMemBW: can't find devcookie");
++ return 0;
++ }
++
++ psExportDeviceMemOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ (void **)&psKernelMemInfo,
++ psExportDeviceMemIN->psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++ if (psExportDeviceMemOUT->eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVExportDeviceMemBW: can't find kernel meminfo");
++ return 0;
++ }
++
++ psExportDeviceMemOUT->eError =
++ PVRSRVFindHandle(KERNEL_HANDLE_BASE,
++ &psExportDeviceMemOUT->hMemInfo,
++ psKernelMemInfo, PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if (psExportDeviceMemOUT->eError == PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_MESSAGE, "PVRSRVExportDeviceMemBW: "
++ "allocation is already exported");
++ return 0;
++ }
++
++ psExportDeviceMemOUT->eError = PVRSRVAllocHandle(KERNEL_HANDLE_BASE,
++ &psExportDeviceMemOUT->hMemInfo,
++ psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++ if (psExportDeviceMemOUT->eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVExportDeviceMemBW: "
++ "failed to allocate handle from global handle list");
++ return 0;
++ }
++
++ psKernelMemInfo->ui32Flags |= PVRSRV_MEM_EXPORTED;
++
++ return 0;
++}
++
++static int PVRSRVMapDeviceMemoryBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_MAP_DEV_MEMORY *psMapDevMemIN,
++ struct PVRSRV_BRIDGE_OUT_MAP_DEV_MEMORY *psMapDevMemOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ struct PVRSRV_KERNEL_MEM_INFO *psSrcKernelMemInfo = NULL;
++ struct PVRSRV_KERNEL_MEM_INFO *psDstKernelMemInfo = NULL;
++ void *hDstDevMemHeap = NULL;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_MAP_DEV_MEMORY);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psMapDevMemOUT->eError, psPerProc, 2);
++
++ psMapDevMemOUT->eError = PVRSRVLookupHandle(KERNEL_HANDLE_BASE,
++ (void **)&psSrcKernelMemInfo,
++ psMapDevMemIN->hKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if (psMapDevMemOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psMapDevMemOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDstDevMemHeap,
++ psMapDevMemIN->hDstDevMemHeap,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP);
++ if (psMapDevMemOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psMapDevMemOUT->eError = PVRSRVMapDeviceMemoryKM(psPerProc,
++ psSrcKernelMemInfo,
++ hDstDevMemHeap,
++ &psDstKernelMemInfo);
++ if (psMapDevMemOUT->eError != PVRSRV_OK)
++ return 0;
++
++ OSMemSet(&psMapDevMemOUT->sDstClientMemInfo, 0,
++ sizeof(psMapDevMemOUT->sDstClientMemInfo));
++ OSMemSet(&psMapDevMemOUT->sDstClientSyncInfo, 0,
++ sizeof(psMapDevMemOUT->sDstClientSyncInfo));
++
++ psMapDevMemOUT->sDstClientMemInfo.pvLinAddrKM =
++ psDstKernelMemInfo->pvLinAddrKM;
++
++ psMapDevMemOUT->sDstClientMemInfo.pvLinAddr = NULL;
++ psMapDevMemOUT->sDstClientMemInfo.sDevVAddr =
++ psDstKernelMemInfo->sDevVAddr;
++ psMapDevMemOUT->sDstClientMemInfo.ui32Flags =
++ psDstKernelMemInfo->ui32Flags;
++ psMapDevMemOUT->sDstClientMemInfo.ui32AllocSize =
++ psDstKernelMemInfo->ui32AllocSize;
++ psMapDevMemOUT->sDstClientMemInfo.hMappingInfo =
++ psDstKernelMemInfo->sMemBlk.hOSMemHandle;
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psMapDevMemOUT->sDstClientMemInfo.hKernelMemInfo,
++ psDstKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++ psMapDevMemOUT->sDstClientSyncInfo.hKernelSyncInfo = NULL;
++ psMapDevMemOUT->psDstKernelSyncInfo = NULL;
++
++ if (psDstKernelMemInfo->psKernelSyncInfo) {
++ psMapDevMemOUT->psDstKernelSyncInfo =
++ psDstKernelMemInfo->psKernelSyncInfo;
++
++ psMapDevMemOUT->sDstClientSyncInfo.psSyncData =
++ psDstKernelMemInfo->psKernelSyncInfo->psSyncData;
++ psMapDevMemOUT->sDstClientSyncInfo.sWriteOpsCompleteDevVAddr =
++ psDstKernelMemInfo->psKernelSyncInfo->
++ sWriteOpsCompleteDevVAddr;
++ psMapDevMemOUT->sDstClientSyncInfo.sReadOpsCompleteDevVAddr =
++ psDstKernelMemInfo->psKernelSyncInfo->
++ sReadOpsCompleteDevVAddr;
++
++ psMapDevMemOUT->sDstClientSyncInfo.hMappingInfo =
++ psDstKernelMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->
++ sMemBlk.hOSMemHandle;
++
++ psMapDevMemOUT->sDstClientMemInfo.psClientSyncInfo =
++ &psMapDevMemOUT->sDstClientSyncInfo;
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psMapDevMemOUT->sDstClientSyncInfo.
++ hKernelSyncInfo,
++ psDstKernelMemInfo->psKernelSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
++ psMapDevMemOUT->sDstClientMemInfo.
++ hKernelMemInfo);
++ }
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psMapDevMemOUT->eError, psPerProc);
++
++ return 0;
++}
++
++static int PVRSRVUnmapDeviceMemoryBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_UNMAP_DEV_MEMORY *psUnmapDevMemIN,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ struct PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = NULL;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_UNMAP_DEV_MEMORY);
++
++ psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ (void **) &psKernelMemInfo,
++ psUnmapDevMemIN->psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psRetOUT->eError = PVRSRVUnmapDeviceMemoryKM(psKernelMemInfo);
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psRetOUT->eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psUnmapDevMemIN->psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++ return 0;
++}
++
++static int FlushCacheDRI(u32 ui32Type, u32 ui32Virt, u32 ui32Length)
++{
++ switch (ui32Type) {
++ case DRM_PVR2D_CFLUSH_FROM_GPU:
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "DRM_PVR2D_CFLUSH_FROM_GPU 0x%08x, length 0x%08x\n",
++ ui32Virt, ui32Length);
++#ifdef CONFIG_ARM
++ dmac_map_area((const void *)ui32Virt, ui32Length, DMA_FROM_DEVICE);
++#endif
++ return 0;
++ case DRM_PVR2D_CFLUSH_TO_GPU:
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "DRM_PVR2D_CFLUSH_TO_GPU 0x%08x, length 0x%08x\n",
++ ui32Virt, ui32Length);
++#ifdef CONFIG_ARM
++ dmac_map_area((const void *)ui32Virt, ui32Length, DMA_TO_DEVICE);
++#endif
++ return 0;
++ default:
++ PVR_DPF(PVR_DBG_ERROR, "Invalid cflush type 0x%x\n",
++ ui32Type);
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static int PVRSRVCacheFlushDRIBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_CACHEFLUSHDRMFROMUSER *psCacheFlushIN,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ struct vm_area_struct *vma;
++ unsigned long start;
++ size_t len;
++ int type;
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CACHE_FLUSH_DRM);
++
++ start = psCacheFlushIN->ui32Virt;
++ len = psCacheFlushIN->ui32Length;
++ type = psCacheFlushIN->ui32Type;
++
++ down_read(&current->mm->mmap_sem);
++ vma = find_vma(current->mm, start);
++ if (vma == NULL || vma->vm_start > start ||
++ vma->vm_end < start + len) {
++ pr_err("PVR: %s: invalid address %08lx %zu %c\n",
++ __func__, start, len,
++ type == DRM_PVR2D_CFLUSH_TO_GPU ? 'c' :
++ type == DRM_PVR2D_CFLUSH_FROM_GPU ? 'i' :
++ '?');
++ return 0;
++ }
++
++ psRetOUT->eError = FlushCacheDRI(type, start, len);
++ up_read(&current->mm->mmap_sem);
++
++ return 0;
++}
++
++static int PVRSRVMapDeviceClassMemoryBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_MAP_DEVICECLASS_MEMORY *psMapDevClassMemIN,
++ struct PVRSRV_BRIDGE_OUT_MAP_DEVICECLASS_MEMORY *psMapDevClassMemOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ struct PVRSRV_KERNEL_MEM_INFO *psMemInfo;
++ void *hOSMapInfo;
++ void *hDeviceClassBufferInt;
++ void *hDevMemContextInt;
++ enum PVRSRV_HANDLE_TYPE eHandleType;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psMapDevClassMemOUT->eError, psPerProc, 2);
++
++ psMapDevClassMemOUT->eError =
++ PVRSRVLookupHandleAnyType(psPerProc->psHandleBase,
++ &hDeviceClassBufferInt, &eHandleType,
++ psMapDevClassMemIN->hDeviceClassBuffer);
++
++ if (psMapDevClassMemOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psMapDevClassMemOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemContextInt,
++ psMapDevClassMemIN->hDevMemContext,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
++
++ if (psMapDevClassMemOUT->eError != PVRSRV_OK)
++ return 0;
++
++ switch (eHandleType) {
++ case PVRSRV_HANDLE_TYPE_DISP_BUFFER:
++ case PVRSRV_HANDLE_TYPE_BUF_BUFFER:
++ break;
++ default:
++ psMapDevClassMemOUT->eError = PVRSRV_ERROR_GENERIC;
++ return 0;
++ }
++
++ psMapDevClassMemOUT->eError =
++ PVRSRVMapDeviceClassMemoryKM(psPerProc, hDevMemContextInt,
++ hDeviceClassBufferInt, &psMemInfo, &hOSMapInfo);
++
++ if (psMapDevClassMemOUT->eError != PVRSRV_OK)
++ return 0;
++
++ OSMemSet(&psMapDevClassMemOUT->sClientMemInfo, 0,
++ sizeof(psMapDevClassMemOUT->sClientMemInfo));
++ OSMemSet(&psMapDevClassMemOUT->sClientSyncInfo, 0,
++ sizeof(psMapDevClassMemOUT->sClientSyncInfo));
++
++ psMapDevClassMemOUT->sClientMemInfo.pvLinAddrKM =
++ psMemInfo->pvLinAddrKM;
++
++ psMapDevClassMemOUT->sClientMemInfo.pvLinAddr = NULL;
++ psMapDevClassMemOUT->sClientMemInfo.sDevVAddr = psMemInfo->sDevVAddr;
++ psMapDevClassMemOUT->sClientMemInfo.ui32Flags = psMemInfo->ui32Flags;
++ psMapDevClassMemOUT->sClientMemInfo.ui32AllocSize =
++ psMemInfo->ui32AllocSize;
++ psMapDevClassMemOUT->sClientMemInfo.hMappingInfo =
++ psMemInfo->sMemBlk.hOSMemHandle;
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psMapDevClassMemOUT->sClientMemInfo.hKernelMemInfo,
++ psMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
++ psMapDevClassMemIN->hDeviceClassBuffer);
++
++ psMapDevClassMemOUT->sClientSyncInfo.hKernelSyncInfo = NULL;
++ psMapDevClassMemOUT->psKernelSyncInfo = NULL;
++
++ if (psMemInfo->psKernelSyncInfo) {
++ psMapDevClassMemOUT->psKernelSyncInfo =
++ psMemInfo->psKernelSyncInfo;
++
++ psMapDevClassMemOUT->sClientSyncInfo.psSyncData =
++ psMemInfo->psKernelSyncInfo->psSyncData;
++ psMapDevClassMemOUT->sClientSyncInfo.sWriteOpsCompleteDevVAddr =
++ psMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr;
++ psMapDevClassMemOUT->sClientSyncInfo.sReadOpsCompleteDevVAddr =
++ psMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr;
++
++ psMapDevClassMemOUT->sClientSyncInfo.hMappingInfo =
++ psMemInfo->psKernelSyncInfo->
++ psSyncDataMemInfoKM->sMemBlk.hOSMemHandle;
++
++ psMapDevClassMemOUT->sClientMemInfo.psClientSyncInfo =
++ &psMapDevClassMemOUT->sClientSyncInfo;
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psMapDevClassMemOUT->sClientSyncInfo.
++ hKernelSyncInfo,
++ psMemInfo->psKernelSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
++ psMapDevClassMemOUT->sClientMemInfo.
++ hKernelMemInfo);
++ }
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psMapDevClassMemOUT->eError, psPerProc);
++
++ return 0;
++}
++
++static int PVRSRVUnmapDeviceClassMemoryBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_UNMAP_DEVICECLASS_MEMORY *psUnmapDevClassMemIN,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void *pvKernelMemInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_UNMAP_DEVICECLASS_MEMORY);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &pvKernelMemInfo,
++ psUnmapDevClassMemIN->psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psRetOUT->eError = PVRSRVUnmapDeviceClassMemoryKM(pvKernelMemInfo);
++
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psRetOUT->eError =
++ PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psUnmapDevClassMemIN->psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++ return 0;
++}
++
++static int PVRSRVWrapExtMemoryBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_WRAP_EXT_MEMORY *psWrapExtMemIN,
++ struct PVRSRV_BRIDGE_OUT_WRAP_EXT_MEMORY *psWrapExtMemOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void *hDevCookieInt;
++ void *hDevMemContextInt;
++ struct PVRSRV_KERNEL_MEM_INFO *psMemInfo;
++ u32 ui32PageTableSize = 0;
++ struct IMG_SYS_PHYADDR *psSysPAddr = NULL;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_WRAP_EXT_MEMORY);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psWrapExtMemOUT->eError, psPerProc, 2);
++
++ psWrapExtMemOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psWrapExtMemIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if (psWrapExtMemOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psWrapExtMemOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemContextInt,
++ psWrapExtMemIN->hDevMemContext,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
++
++ if (psWrapExtMemOUT->eError != PVRSRV_OK)
++ return 0;
++
++ if (psWrapExtMemIN->ui32NumPageTableEntries) {
++ ui32PageTableSize = psWrapExtMemIN->ui32NumPageTableEntries
++ * sizeof(struct IMG_SYS_PHYADDR);
++
++ ASSIGN_AND_EXIT_ON_ERROR(psWrapExtMemOUT->eError,
++ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32PageTableSize,
++ (void **)&psSysPAddr, NULL));
++
++ if (CopyFromUserWrapper(psPerProc, ui32BridgeID, psSysPAddr,
++ psWrapExtMemIN->psSysPAddr,
++ ui32PageTableSize) != PVRSRV_OK) {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32PageTableSize,
++ (void *) psSysPAddr, NULL);
++ return -EFAULT;
++ }
++ }
++
++ psWrapExtMemOUT->eError = PVRSRVWrapExtMemoryKM(hDevCookieInt,
++ psPerProc, hDevMemContextInt,
++ psWrapExtMemIN->ui32ByteSize,
++ psWrapExtMemIN->ui32PageOffset,
++ psWrapExtMemIN->bPhysContig,
++ psSysPAddr, psWrapExtMemIN->pvLinAddr,
++ &psMemInfo);
++ if (psWrapExtMemIN->ui32NumPageTableEntries)
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32PageTableSize,
++ (void *)psSysPAddr, NULL);
++ if (psWrapExtMemOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psWrapExtMemOUT->sClientMemInfo.pvLinAddrKM = psMemInfo->pvLinAddrKM;
++
++ psWrapExtMemOUT->sClientMemInfo.pvLinAddr = NULL;
++ psWrapExtMemOUT->sClientMemInfo.sDevVAddr = psMemInfo->sDevVAddr;
++ psWrapExtMemOUT->sClientMemInfo.ui32Flags = psMemInfo->ui32Flags;
++ psWrapExtMemOUT->sClientMemInfo.ui32AllocSize =
++ psMemInfo->ui32AllocSize;
++ psWrapExtMemOUT->sClientMemInfo.hMappingInfo =
++ psMemInfo->sMemBlk.hOSMemHandle;
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psWrapExtMemOUT->sClientMemInfo.hKernelMemInfo,
++ psMemInfo, PVRSRV_HANDLE_TYPE_MEM_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++
++ psWrapExtMemOUT->sClientSyncInfo.psSyncData =
++ psMemInfo->psKernelSyncInfo->psSyncData;
++ psWrapExtMemOUT->sClientSyncInfo.sWriteOpsCompleteDevVAddr =
++ psMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr;
++ psWrapExtMemOUT->sClientSyncInfo.sReadOpsCompleteDevVAddr =
++ psMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr;
++
++ psWrapExtMemOUT->sClientSyncInfo.hMappingInfo =
++ psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.
++ hOSMemHandle;
++
++ psWrapExtMemOUT->sClientMemInfo.psClientSyncInfo =
++ &psWrapExtMemOUT->sClientSyncInfo;
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psWrapExtMemOUT->sClientSyncInfo.
++ hKernelSyncInfo,
++ (void *)psMemInfo->psKernelSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
++ psWrapExtMemOUT->sClientMemInfo.hKernelMemInfo);
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psWrapExtMemOUT->eError, psPerProc);
++
++ return 0;
++}
++
++static int PVRSRVUnwrapExtMemoryBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_UNWRAP_EXT_MEMORY *psUnwrapExtMemIN,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void *pvMemInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_UNWRAP_EXT_MEMORY);
++
++ psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvMemInfo, psUnwrapExtMemIN->hKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psRetOUT->eError =
++ PVRSRVUnwrapExtMemoryKM((struct PVRSRV_KERNEL_MEM_INFO *)pvMemInfo);
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psRetOUT->eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psUnwrapExtMemIN->hKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++ return 0;
++}
++
++static int PVRSRVGetFreeDeviceMemBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_GETFREEDEVICEMEM *psGetFreeDeviceMemIN,
++ struct PVRSRV_BRIDGE_OUT_GETFREEDEVICEMEM *psGetFreeDeviceMemOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GETFREE_DEVICEMEM);
++
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++ psGetFreeDeviceMemOUT->eError =
++ PVRSRVGetFreeDeviceMemKM(psGetFreeDeviceMemIN->ui32Flags,
++ &psGetFreeDeviceMemOUT->ui32Total,
++ &psGetFreeDeviceMemOUT->ui32Free,
++ &psGetFreeDeviceMemOUT->ui32LargestBlock);
++
++ return 0;
++}
++
++static int PVRMMapOSMemHandleToMMapDataBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_MHANDLE_TO_MMAP_DATA *psMMapDataIN,
++ struct PVRSRV_BRIDGE_OUT_MHANDLE_TO_MMAP_DATA *psMMapDataOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_MHANDLE_TO_MMAP_DATA);
++
++ psMMapDataOUT->eError =
++ PVRMMapOSMemHandleToMMapData(psPerProc, psMMapDataIN->hMHandle,
++ &psMMapDataOUT->ui32MMapOffset,
++ &psMMapDataOUT->ui32ByteOffset,
++ &psMMapDataOUT->ui32RealByteSize,
++ &psMMapDataOUT->ui32UserVAddr);
++ return 0;
++}
++
++static int PVRMMapReleaseMMapDataBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_RELEASE_MMAP_DATA *psMMapDataIN,
++ struct PVRSRV_BRIDGE_OUT_RELEASE_MMAP_DATA *psMMapDataOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_RELEASE_MMAP_DATA);
++
++ psMMapDataOUT->eError = PVRMMapReleaseMMapData(psPerProc,
++ psMMapDataIN->hMHandle,
++ &psMMapDataOUT->bMUnmap,
++ &psMMapDataOUT->ui32RealByteSize,
++ &psMMapDataOUT->ui32UserVAddr);
++ return 0;
++}
++
++#ifdef PDUMP
++static int PDumpIsCaptureFrameBW(u32 ui32BridgeID, void *psBridgeIn,
++ struct PVRSRV_BRIDGE_OUT_PDUMP_ISCAPTURING *psPDumpIsCapturingOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_ISCAPTURING);
++ PVR_UNREFERENCED_PARAMETER(psBridgeIn);
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++ psPDumpIsCapturingOUT->bIsCapturing = PDumpIsCaptureFrameKM();
++ psPDumpIsCapturingOUT->eError = PVRSRV_OK;
++
++ return 0;
++}
++
++static int PDumpCommentBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_PDUMP_COMMENT *psPDumpCommentIN,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_COMMENT);
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++ psRetOUT->eError = PDumpCommentKM(&psPDumpCommentIN->szComment[0],
++ psPDumpCommentIN->ui32Flags);
++ return 0;
++}
++
++static int PDumpSetFrameBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_PDUMP_SETFRAME *psPDumpSetFrameIN,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_SETFRAME);
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++ psRetOUT->eError = PDumpSetFrameKM(psPDumpSetFrameIN->ui32Frame);
++
++ return 0;
++}
++
++static int PDumpRegWithFlagsBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_PDUMP_DUMPREG *psPDumpRegDumpIN,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_REG);
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++ psRetOUT->eError =
++ PDumpRegWithFlagsKM(psPDumpRegDumpIN->sHWReg.ui32RegAddr,
++ psPDumpRegDumpIN->sHWReg.ui32RegVal,
++ psPDumpRegDumpIN->ui32Flags);
++
++ return 0;
++}
++
++static int PDumpRegPolBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_PDUMP_REGPOL *psPDumpRegPolIN,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_REGPOL);
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++ psRetOUT->eError =
++ PDumpRegPolWithFlagsKM(psPDumpRegPolIN->sHWReg.ui32RegAddr,
++ psPDumpRegPolIN->sHWReg.ui32RegVal,
++ psPDumpRegPolIN->ui32Mask,
++ psPDumpRegPolIN->ui32Flags);
++
++ return 0;
++}
++
++static int PDumpMemPolBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_PDUMP_MEMPOL *psPDumpMemPolIN,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void *pvMemInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_MEMPOL);
++
++ psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvMemInfo,
++ psPDumpMemPolIN->psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psRetOUT->eError =
++ PDumpMemPolKM(((struct PVRSRV_KERNEL_MEM_INFO *)pvMemInfo),
++ psPDumpMemPolIN->ui32Offset,
++ psPDumpMemPolIN->ui32Value,
++ psPDumpMemPolIN->ui32Mask,
++ PDUMP_POLL_OPERATOR_EQUAL,
++ psPDumpMemPolIN->bLastFrame,
++ psPDumpMemPolIN->bOverwrite,
++ MAKEUNIQUETAG(pvMemInfo));
++
++ return 0;
++}
++
++static int PDumpMemBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_PDUMP_DUMPMEM *psPDumpMemDumpIN,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void *pvMemInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DUMPMEM);
++
++ psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvMemInfo,
++ psPDumpMemDumpIN->psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psRetOUT->eError = PDumpMemUM(psPerProc, psPDumpMemDumpIN->pvAltLinAddr,
++ psPDumpMemDumpIN->pvLinAddr,
++ pvMemInfo, psPDumpMemDumpIN->ui32Offset,
++ psPDumpMemDumpIN->ui32Bytes,
++ psPDumpMemDumpIN->ui32Flags,
++ MAKEUNIQUETAG(pvMemInfo));
++
++ return 0;
++}
++
++static int PDumpBitmapBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_PDUMP_BITMAP *psPDumpBitmapIN,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++ PVR_UNREFERENCED_PARAMETER(ui32BridgeID);
++
++ psRetOUT->eError = PDumpBitmapKM(&psPDumpBitmapIN->szFileName[0],
++ psPDumpBitmapIN->ui32FileOffset,
++ psPDumpBitmapIN->ui32Width,
++ psPDumpBitmapIN->ui32Height,
++ psPDumpBitmapIN->ui32StrideInBytes,
++ psPDumpBitmapIN->sDevBaseAddr,
++ psPDumpBitmapIN->ui32Size,
++ psPDumpBitmapIN->ePixelFormat,
++ psPDumpBitmapIN->eMemFormat,
++ psPDumpBitmapIN->ui32Flags);
++
++ return 0;
++}
++
++static int PDumpReadRegBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_PDUMP_READREG *psPDumpReadRegIN,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DUMPREADREG);
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++ psRetOUT->eError = PDumpReadRegKM(&psPDumpReadRegIN->szFileName[0],
++ psPDumpReadRegIN->ui32FileOffset,
++ psPDumpReadRegIN->ui32Address,
++ psPDumpReadRegIN->ui32Size,
++ psPDumpReadRegIN->ui32Flags);
++
++ return 0;
++}
++
++static int PDumpDriverInfoBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_PDUMP_DRIVERINFO *psPDumpDriverInfoIN,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ u32 ui32PDumpFlags;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DRIVERINFO);
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++ ui32PDumpFlags = 0;
++ if (psPDumpDriverInfoIN->bContinuous)
++ ui32PDumpFlags |= PDUMP_FLAGS_CONTINUOUS;
++ psRetOUT->eError = PDumpDriverInfoKM(&psPDumpDriverInfoIN->szString[0],
++ ui32PDumpFlags);
++
++ return 0;
++}
++
++static int PDumpSyncDumpBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_PDUMP_DUMPSYNC *psPDumpSyncDumpIN,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ u32 ui32Bytes = psPDumpSyncDumpIN->ui32Bytes;
++ void *pvSyncInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DUMPSYNC);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &pvSyncInfo,
++ psPDumpSyncDumpIN->psKernelSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psRetOUT->eError =
++ PDumpMemUM(psPerProc, psPDumpSyncDumpIN->pvAltLinAddr, NULL,
++ ((struct PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo)->
++ psSyncDataMemInfoKM,
++ psPDumpSyncDumpIN->ui32Offset, ui32Bytes, 0,
++ MAKEUNIQUETAG(((struct PVRSRV_KERNEL_SYNC_INFO *)
++ pvSyncInfo)->psSyncDataMemInfoKM));
++
++ return 0;
++}
++
++static int PDumpSyncPolBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_PDUMP_SYNCPOL *psPDumpSyncPolIN,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ u32 ui32Offset;
++ void *pvSyncInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_SYNCPOL);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &pvSyncInfo,
++ psPDumpSyncPolIN->psKernelSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++
++ if (psPDumpSyncPolIN->bIsRead)
++ ui32Offset = offsetof(struct PVRSRV_SYNC_DATA,
++ ui32ReadOpsComplete);
++ else
++ ui32Offset = offsetof(struct PVRSRV_SYNC_DATA,
++ ui32WriteOpsComplete);
++
++ psRetOUT->eError =
++ PDumpMemPolKM(((struct PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo)->
++ psSyncDataMemInfoKM, ui32Offset,
++ psPDumpSyncPolIN->ui32Value,
++ psPDumpSyncPolIN->ui32Mask, PDUMP_POLL_OPERATOR_EQUAL,
++ IMG_FALSE, IMG_FALSE,
++ MAKEUNIQUETAG(((struct PVRSRV_KERNEL_SYNC_INFO *)
++ pvSyncInfo)->psSyncDataMemInfoKM));
++
++ return 0;
++}
++
++static int PDumpPDRegBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_PDUMP_DUMPPDREG *psPDumpPDRegDumpIN,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_PDREG);
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++ PDumpPDReg(psPDumpPDRegDumpIN->sHWReg.ui32RegAddr,
++ psPDumpPDRegDumpIN->sHWReg.ui32RegVal, PDUMP_PD_UNIQUETAG);
++
++ psRetOUT->eError = PVRSRV_OK;
++ return 0;
++}
++
++static int PDumpCycleCountRegReadBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_PDUMP_CYCLE_COUNT_REG_READ
++ *psPDumpCycleCountRegReadIN,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_PDUMP_CYCLE_COUNT_REG_READ);
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++ PDumpCycleCountRegRead(psPDumpCycleCountRegReadIN->ui32RegOffset,
++ psPDumpCycleCountRegReadIN->bLastFrame);
++
++ psRetOUT->eError = PVRSRV_OK;
++
++ return 0;
++}
++
++static int PDumpPDDevPAddrBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_PDUMP_DUMPPDDEVPADDR *psPDumpPDDevPAddrIN,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void *pvMemInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_PDUMP_DUMPPDDEVPADDR);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &pvMemInfo,
++ psPDumpPDDevPAddrIN->hKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psRetOUT->eError =
++ PDumpPDDevPAddrKM((struct PVRSRV_KERNEL_MEM_INFO *)pvMemInfo,
++ psPDumpPDDevPAddrIN->ui32Offset,
++ psPDumpPDDevPAddrIN->sPDDevPAddr,
++ MAKEUNIQUETAG(pvMemInfo), PDUMP_PD_UNIQUETAG);
++ return 0;
++}
++
++static int PDumpStartInitPhaseBW(u32 ui32BridgeID, void *psBridgeIn,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_PDUMP_STARTINITPHASE);
++ PVR_UNREFERENCED_PARAMETER(psBridgeIn);
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++ psRetOUT->eError = PDumpStartInitPhaseKM();
++
++ return 0;
++}
++
++static int PDumpStopInitPhaseBW(u32 ui32BridgeID, void *psBridgeIn,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_PDUMP_STOPINITPHASE);
++ PVR_UNREFERENCED_PARAMETER(psBridgeIn);
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++ psRetOUT->eError = PDumpStopInitPhaseKM();
++
++ return 0;
++}
++
++#endif
++
++static int PVRSRVGetMiscInfoBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_GET_MISC_INFO *psGetMiscInfoIN,
++ struct PVRSRV_BRIDGE_OUT_GET_MISC_INFO *psGetMiscInfoOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ enum PVRSRV_ERROR eError;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_MISC_INFO);
++
++ OSMemCopy(&psGetMiscInfoOUT->sMiscInfo, &psGetMiscInfoIN->sMiscInfo,
++ sizeof(struct PVRSRV_MISC_INFO));
++
++ if (((psGetMiscInfoIN->sMiscInfo.ui32StateRequest &
++ PVRSRV_MISC_INFO_MEMSTATS_PRESENT) != 0) &&
++ ((psGetMiscInfoIN->sMiscInfo.ui32StateRequest &
++ PVRSRV_MISC_INFO_DDKVERSION_PRESENT) != 0)) {
++
++ psGetMiscInfoOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++ return 0;
++ }
++
++ if (((psGetMiscInfoIN->sMiscInfo.ui32StateRequest &
++ PVRSRV_MISC_INFO_MEMSTATS_PRESENT) != 0) ||
++ ((psGetMiscInfoIN->sMiscInfo.ui32StateRequest &
++ PVRSRV_MISC_INFO_DDKVERSION_PRESENT) != 0)) {
++
++ ASSIGN_AND_EXIT_ON_ERROR(
++ psGetMiscInfoOUT->eError,
++ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ psGetMiscInfoOUT->sMiscInfo.ui32MemoryStrLen,
++ (void **)&psGetMiscInfoOUT->sMiscInfo.pszMemoryStr,
++ NULL));
++
++ psGetMiscInfoOUT->eError =
++ PVRSRVGetMiscInfoKM(&psGetMiscInfoOUT->sMiscInfo);
++
++ eError = CopyToUserWrapper(psPerProc, ui32BridgeID,
++ (void __force __user *)
++ psGetMiscInfoIN->sMiscInfo.pszMemoryStr,
++ psGetMiscInfoOUT->sMiscInfo.pszMemoryStr,
++ psGetMiscInfoOUT->sMiscInfo.ui32MemoryStrLen);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ psGetMiscInfoOUT->sMiscInfo.ui32MemoryStrLen,
++ (void *)psGetMiscInfoOUT->sMiscInfo.pszMemoryStr,
++ NULL);
++
++ psGetMiscInfoOUT->sMiscInfo.pszMemoryStr =
++ psGetMiscInfoIN->sMiscInfo.pszMemoryStr;
++
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVGetMiscInfoBW Error copy to user");
++ return -EFAULT;
++ }
++ } else {
++ psGetMiscInfoOUT->eError =
++ PVRSRVGetMiscInfoKM(&psGetMiscInfoOUT->sMiscInfo);
++ }
++
++ if (psGetMiscInfoOUT->eError != PVRSRV_OK)
++ return 0;
++
++ if (psGetMiscInfoIN->sMiscInfo.ui32StateRequest &
++ PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT) {
++ psGetMiscInfoOUT->eError =
++ PVRSRVAllocHandle(psPerProc->psHandleBase,
++ &psGetMiscInfoOUT->sMiscInfo.
++ sGlobalEventObject.hOSEventKM,
++ psGetMiscInfoOUT->sMiscInfo.
++ sGlobalEventObject.hOSEventKM,
++ PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT,
++ PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
++
++ if (psGetMiscInfoOUT->eError != PVRSRV_OK)
++ return 0;
++ }
++
++ if (psGetMiscInfoOUT->sMiscInfo.hSOCTimerRegisterOSMemHandle) {
++ psGetMiscInfoOUT->eError =
++ PVRSRVAllocHandle(psPerProc->psHandleBase,
++ &psGetMiscInfoOUT->sMiscInfo.
++ hSOCTimerRegisterOSMemHandle,
++ psGetMiscInfoOUT->sMiscInfo.
++ hSOCTimerRegisterOSMemHandle,
++ PVRSRV_HANDLE_TYPE_SOC_TIMER,
++ PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
++
++ if (psGetMiscInfoOUT->eError != PVRSRV_OK)
++ return 0;
++ }
++
++ return 0;
++}
++
++static int PVRSRVConnectBW(u32 ui32BridgeID, void *psBridgeIn,
++ struct PVRSRV_BRIDGE_OUT_CONNECT_SERVICES *psConnectServicesOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVR_UNREFERENCED_PARAMETER(psBridgeIn);
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CONNECT_SERVICES);
++
++ psConnectServicesOUT->hKernelServices = psPerProc->hPerProcData;
++ psConnectServicesOUT->eError = PVRSRV_OK;
++
++#if defined(PDUMP)
++
++ {
++ struct SYS_DATA *psSysData;
++ SysAcquireData(&psSysData);
++ psSysData->bPowerUpPDumped = IMG_FALSE;
++ }
++#endif
++
++ return 0;
++}
++
++static int PVRSRVDisconnectBW(u32 ui32BridgeID, void *psBridgeIn,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++ PVR_UNREFERENCED_PARAMETER(psBridgeIn);
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_DISCONNECT_SERVICES);
++
++ psRetOUT->eError = PVRSRV_OK;
++
++ return 0;
++}
++
++static int PVRSRVEnumerateDCBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_ENUMCLASS *psEnumDispClassIN,
++ struct PVRSRV_BRIDGE_OUT_ENUMCLASS *psEnumDispClassOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ENUM_CLASS);
++
++ psEnumDispClassOUT->eError =
++ PVRSRVEnumerateDCKM(psEnumDispClassIN->sDeviceClass,
++ &psEnumDispClassOUT->ui32NumDevices,
++ &psEnumDispClassOUT->ui32DevID[0]);
++
++ return 0;
++}
++
++static int PVRSRVOpenDCDeviceBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_OPEN_DISPCLASS_DEVICE *psOpenDispClassDeviceIN,
++ struct PVRSRV_BRIDGE_OUT_OPEN_DISPCLASS_DEVICE *psOpenDispClassDeviceOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void *hDevCookieInt;
++ void *hDispClassInfoInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_OPEN_DISPCLASS_DEVICE);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psOpenDispClassDeviceOUT->eError, psPerProc,
++ 1);
++
++ psOpenDispClassDeviceOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psOpenDispClassDeviceIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if (psOpenDispClassDeviceOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psOpenDispClassDeviceOUT->eError = PVRSRVOpenDCDeviceKM(psPerProc,
++ psOpenDispClassDeviceIN->ui32DeviceID,
++ hDevCookieInt, &hDispClassInfoInt);
++
++ if (psOpenDispClassDeviceOUT->eError != PVRSRV_OK)
++ return 0;
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psOpenDispClassDeviceOUT->hDeviceKM,
++ hDispClassInfoInt,
++ PVRSRV_HANDLE_TYPE_DISP_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++ COMMIT_HANDLE_BATCH_OR_ERROR(psOpenDispClassDeviceOUT->eError,
++ psPerProc);
++
++ return 0;
++}
++
++static int PVRSRVCloseDCDeviceBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_CLOSE_DISPCLASS_DEVICE *psCloseDispClassDeviceIN,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void *pvDispClassInfoInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_CLOSE_DISPCLASS_DEVICE);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfoInt,
++ psCloseDispClassDeviceIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psRetOUT->eError = PVRSRVCloseDCDeviceKM(pvDispClassInfoInt, IMG_FALSE);
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psRetOUT->eError =
++ PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psCloseDispClassDeviceIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++ return 0;
++}
++
++static int PVRSRVEnumDCFormatsBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_FORMATS *psEnumDispClassFormatsIN,
++ struct PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_FORMATS *psEnumDispClassFormatsOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void *pvDispClassInfoInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_ENUM_DISPCLASS_FORMATS);
++
++ psEnumDispClassFormatsOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfoInt,
++ psEnumDispClassFormatsIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++ if (psEnumDispClassFormatsOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psEnumDispClassFormatsOUT->eError =
++ PVRSRVEnumDCFormatsKM(pvDispClassInfoInt,
++ &psEnumDispClassFormatsOUT->ui32Count,
++ psEnumDispClassFormatsOUT->asFormat);
++
++ return 0;
++}
++
++static int PVRSRVEnumDCDimsBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_DIMS *psEnumDispClassDimsIN,
++ struct PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_DIMS *psEnumDispClassDimsOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void *pvDispClassInfoInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_ENUM_DISPCLASS_DIMS);
++
++ psEnumDispClassDimsOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfoInt,
++ psEnumDispClassDimsIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++
++ if (psEnumDispClassDimsOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psEnumDispClassDimsOUT->eError =
++ PVRSRVEnumDCDimsKM(pvDispClassInfoInt,
++ &psEnumDispClassDimsIN->sFormat,
++ &psEnumDispClassDimsOUT->ui32Count,
++ psEnumDispClassDimsOUT->asDim);
++
++ return 0;
++}
++
++static int PVRSRVGetDCSystemBufferBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_GET_DISPCLASS_SYSBUFFER *psGetDispClassSysBufferIN,
++ struct PVRSRV_BRIDGE_OUT_GET_DISPCLASS_SYSBUFFER *psGetDispClassSysBufferOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void *hBufferInt;
++ void *pvDispClassInfoInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_GET_DISPCLASS_SYSBUFFER);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psGetDispClassSysBufferOUT->eError, psPerProc,
++ 1);
++
++ psGetDispClassSysBufferOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfoInt,
++ psGetDispClassSysBufferIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++ if (psGetDispClassSysBufferOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psGetDispClassSysBufferOUT->eError =
++ PVRSRVGetDCSystemBufferKM(pvDispClassInfoInt, &hBufferInt);
++
++ if (psGetDispClassSysBufferOUT->eError != PVRSRV_OK)
++ return 0;
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psGetDispClassSysBufferOUT->hBuffer,
++ hBufferInt,
++ PVRSRV_HANDLE_TYPE_DISP_BUFFER,
++ (enum PVRSRV_HANDLE_ALLOC_FLAG)
++ (PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE |
++ PVRSRV_HANDLE_ALLOC_FLAG_SHARED),
++ psGetDispClassSysBufferIN->hDeviceKM);
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psGetDispClassSysBufferOUT->eError,
++ psPerProc);
++
++ return 0;
++}
++
++static int PVRSRVGetDCInfoBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_GET_DISPCLASS_INFO *psGetDispClassInfoIN,
++ struct PVRSRV_BRIDGE_OUT_GET_DISPCLASS_INFO *psGetDispClassInfoOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void *pvDispClassInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_GET_DISPCLASS_INFO);
++
++ psGetDispClassInfoOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfo,
++ psGetDispClassInfoIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++ if (psGetDispClassInfoOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psGetDispClassInfoOUT->eError =
++ PVRSRVGetDCInfoKM(pvDispClassInfo,
++ &psGetDispClassInfoOUT->sDisplayInfo);
++
++ return 0;
++}
++
++static int PVRSRVCreateDCSwapChainBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_CREATE_DISPCLASS_SWAPCHAIN
++ *psCreateDispClassSwapChainIN,
++ struct PVRSRV_BRIDGE_OUT_CREATE_DISPCLASS_SWAPCHAIN
++ *psCreateDispClassSwapChainOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void *pvDispClassInfo;
++ void *hSwapChainInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_CREATE_DISPCLASS_SWAPCHAIN);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psCreateDispClassSwapChainOUT->eError,
++ psPerProc, 1);
++
++ psCreateDispClassSwapChainOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfo,
++ psCreateDispClassSwapChainIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++
++ if (psCreateDispClassSwapChainOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psCreateDispClassSwapChainOUT->eError =
++ PVRSRVCreateDCSwapChainKM(psPerProc, pvDispClassInfo,
++ psCreateDispClassSwapChainIN->ui32Flags,
++ &psCreateDispClassSwapChainIN->sDstSurfAttrib,
++ &psCreateDispClassSwapChainIN->sSrcSurfAttrib,
++ psCreateDispClassSwapChainIN->ui32BufferCount,
++ psCreateDispClassSwapChainIN->ui32OEMFlags,
++ &hSwapChainInt,
++ &psCreateDispClassSwapChainOUT->ui32SwapChainID);
++
++ if (psCreateDispClassSwapChainOUT->eError != PVRSRV_OK)
++ return 0;
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psCreateDispClassSwapChainOUT->hSwapChain,
++ hSwapChainInt,
++ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
++ psCreateDispClassSwapChainIN->hDeviceKM);
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psCreateDispClassSwapChainOUT->eError,
++ psPerProc);
++
++ return 0;
++}
++
++static int PVRSRVDestroyDCSwapChainBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_DESTROY_DISPCLASS_SWAPCHAIN
++ *psDestroyDispClassSwapChainIN,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void *pvSwapChain;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_DESTROY_DISPCLASS_SWAPCHAIN);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &pvSwapChain,
++ psDestroyDispClassSwapChainIN->hSwapChain,
++ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psRetOUT->eError = PVRSRVDestroyDCSwapChainKM(pvSwapChain);
++
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psRetOUT->eError =
++ PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psDestroyDispClassSwapChainIN->hSwapChain,
++ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
++
++ return 0;
++}
++
++static int PVRSRVSetDCDstRectBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_SET_DISPCLASS_RECT *psSetDispClassDstRectIN,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void *pvDispClassInfo;
++ void *pvSwapChain;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_SET_DISPCLASS_DSTRECT);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfo,
++ psSetDispClassDstRectIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvSwapChain,
++ psSetDispClassDstRectIN->hSwapChain,
++ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
++
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psRetOUT->eError =
++ PVRSRVSetDCDstRectKM(pvDispClassInfo,
++ pvSwapChain, &psSetDispClassDstRectIN->sRect);
++
++ return 0;
++}
++
++static int PVRSRVSetDCSrcRectBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_SET_DISPCLASS_RECT *psSetDispClassSrcRectIN,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void *pvDispClassInfo;
++ void *pvSwapChain;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_SET_DISPCLASS_SRCRECT);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfo,
++ psSetDispClassSrcRectIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvSwapChain,
++ psSetDispClassSrcRectIN->hSwapChain,
++ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psRetOUT->eError =
++ PVRSRVSetDCSrcRectKM(pvDispClassInfo,
++ pvSwapChain, &psSetDispClassSrcRectIN->sRect);
++
++ return 0;
++}
++
++static int PVRSRVSetDCDstColourKeyBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_SET_DISPCLASS_COLOURKEY *psSetDispClassColKeyIN,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void *pvDispClassInfo;
++ void *pvSwapChain;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_SET_DISPCLASS_DSTCOLOURKEY);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfo,
++ psSetDispClassColKeyIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvSwapChain,
++ psSetDispClassColKeyIN->hSwapChain,
++ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psRetOUT->eError =
++ PVRSRVSetDCDstColourKeyKM(pvDispClassInfo,
++ pvSwapChain,
++ psSetDispClassColKeyIN->ui32CKColour);
++
++ return 0;
++}
++
++static int PVRSRVSetDCSrcColourKeyBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_SET_DISPCLASS_COLOURKEY *psSetDispClassColKeyIN,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void *pvDispClassInfo;
++ void *pvSwapChain;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_SET_DISPCLASS_SRCCOLOURKEY);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfo,
++ psSetDispClassColKeyIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvSwapChain,
++ psSetDispClassColKeyIN->hSwapChain,
++ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psRetOUT->eError =
++ PVRSRVSetDCSrcColourKeyKM(pvDispClassInfo,
++ pvSwapChain,
++ psSetDispClassColKeyIN->ui32CKColour);
++
++ return 0;
++}
++
++static int PVRSRVGetDCBuffersBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_GET_DISPCLASS_BUFFERS *psGetDispClassBuffersIN,
++ struct PVRSRV_BRIDGE_OUT_GET_DISPCLASS_BUFFERS *psGetDispClassBuffersOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void *pvDispClassInfo;
++ void *pvSwapChain;
++ u32 i;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_GET_DISPCLASS_BUFFERS);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psGetDispClassBuffersOUT->eError, psPerProc,
++ PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS);
++
++ psGetDispClassBuffersOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfo,
++ psGetDispClassBuffersIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++ if (psGetDispClassBuffersOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psGetDispClassBuffersOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvSwapChain,
++ psGetDispClassBuffersIN->hSwapChain,
++ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
++ if (psGetDispClassBuffersOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psGetDispClassBuffersOUT->eError =
++ PVRSRVGetDCBuffersKM(pvDispClassInfo,
++ pvSwapChain,
++ &psGetDispClassBuffersOUT->ui32BufferCount,
++ psGetDispClassBuffersOUT->ahBuffer);
++ if (psGetDispClassBuffersOUT->eError != PVRSRV_OK)
++ return 0;
++
++ PVR_ASSERT(psGetDispClassBuffersOUT->ui32BufferCount <=
++ PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS);
++
++ for (i = 0; i < psGetDispClassBuffersOUT->ui32BufferCount; i++) {
++ void *hBufferExt;
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &hBufferExt,
++ psGetDispClassBuffersOUT->ahBuffer[i],
++ PVRSRV_HANDLE_TYPE_DISP_BUFFER,
++ (enum PVRSRV_HANDLE_ALLOC_FLAG)
++ (PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE |
++ PVRSRV_HANDLE_ALLOC_FLAG_SHARED),
++ psGetDispClassBuffersIN->hSwapChain);
++
++ psGetDispClassBuffersOUT->ahBuffer[i] = hBufferExt;
++ }
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psGetDispClassBuffersOUT->eError,
++ psPerProc);
++
++ return 0;
++}
++
++static int PVRSRVSwapToDCBufferBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_BUFFER *psSwapDispClassBufferIN,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void *pvDispClassInfo;
++ void *pvSwapChainBuf;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_BUFFER);
++
++ psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfo,
++ psSwapDispClassBufferIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psRetOUT->eError =
++ PVRSRVLookupSubHandle(psPerProc->psHandleBase,
++ &pvSwapChainBuf,
++ psSwapDispClassBufferIN->hBuffer,
++ PVRSRV_HANDLE_TYPE_DISP_BUFFER,
++ psSwapDispClassBufferIN->hDeviceKM);
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psRetOUT->eError =
++ PVRSRVSwapToDCBufferKM(pvDispClassInfo,
++ pvSwapChainBuf,
++ psSwapDispClassBufferIN->ui32SwapInterval,
++ psSwapDispClassBufferIN->hPrivateTag,
++ psSwapDispClassBufferIN->ui32ClipRectCount,
++ psSwapDispClassBufferIN->sClipRect);
++
++ return 0;
++}
++
++static int PVRSRVSwapToDCSystemBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_SYSTEM *psSwapDispClassSystemIN,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void *pvDispClassInfo;
++ void *pvSwapChain;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_SYSTEM);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfo,
++ psSwapDispClassSystemIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psRetOUT->eError =
++ PVRSRVLookupSubHandle(psPerProc->psHandleBase,
++ &pvSwapChain,
++ psSwapDispClassSystemIN->hSwapChain,
++ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN,
++ psSwapDispClassSystemIN->hDeviceKM);
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++ psRetOUT->eError = PVRSRVSwapToDCSystemKM(pvDispClassInfo, pvSwapChain);
++
++ return 0;
++}
++
++static int PVRSRVOpenBCDeviceBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_OPEN_BUFFERCLASS_DEVICE *psOpenBufferClassDeviceIN,
++ struct PVRSRV_BRIDGE_OUT_OPEN_BUFFERCLASS_DEVICE *psOpenBufferClassDeviceOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void *hDevCookieInt;
++ void *hBufClassInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_OPEN_BUFFERCLASS_DEVICE);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psOpenBufferClassDeviceOUT->eError, psPerProc,
++ 1);
++
++ psOpenBufferClassDeviceOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psOpenBufferClassDeviceIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if (psOpenBufferClassDeviceOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psOpenBufferClassDeviceOUT->eError =
++ PVRSRVOpenBCDeviceKM(psPerProc,
++ psOpenBufferClassDeviceIN->ui32DeviceID,
++ hDevCookieInt, &hBufClassInfo);
++ if (psOpenBufferClassDeviceOUT->eError != PVRSRV_OK)
++ return 0;
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psOpenBufferClassDeviceOUT->hDeviceKM,
++ hBufClassInfo,
++ PVRSRV_HANDLE_TYPE_BUF_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psOpenBufferClassDeviceOUT->eError,
++ psPerProc);
++
++ return 0;
++}
++
++static int PVRSRVCloseBCDeviceBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_CLOSE_BUFFERCLASS_DEVICE *psCloseBufferClassDeviceIN,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void *pvBufClassInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_CLOSE_BUFFERCLASS_DEVICE);
++
++ psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvBufClassInfo,
++ psCloseBufferClassDeviceIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_BUF_INFO);
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psRetOUT->eError = PVRSRVCloseBCDeviceKM(pvBufClassInfo, IMG_FALSE);
++
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psRetOUT->eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psCloseBufferClassDeviceIN->
++ hDeviceKM,
++ PVRSRV_HANDLE_TYPE_BUF_INFO);
++
++ return 0;
++}
++
++static int PVRSRVGetBCInfoBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_INFO *psGetBufferClassInfoIN,
++ struct PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_INFO *psGetBufferClassInfoOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void *pvBufClassInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_GET_BUFFERCLASS_INFO);
++
++ psGetBufferClassInfoOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvBufClassInfo,
++ psGetBufferClassInfoIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_BUF_INFO);
++ if (psGetBufferClassInfoOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psGetBufferClassInfoOUT->eError =
++ PVRSRVGetBCInfoKM(pvBufClassInfo,
++ &psGetBufferClassInfoOUT->sBufferInfo);
++ return 0;
++}
++
++static int PVRSRVGetBCBufferBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_BUFFER *psGetBufferClassBufferIN,
++ struct PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_BUFFER *psGetBufferClassBufferOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void *pvBufClassInfo;
++ void *hBufferInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_GET_BUFFERCLASS_BUFFER);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psGetBufferClassBufferOUT->eError, psPerProc,
++ 1);
++
++ psGetBufferClassBufferOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvBufClassInfo,
++ psGetBufferClassBufferIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_BUF_INFO);
++ if (psGetBufferClassBufferOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psGetBufferClassBufferOUT->eError =
++ PVRSRVGetBCBufferKM(pvBufClassInfo,
++ psGetBufferClassBufferIN->ui32BufferIndex,
++ &hBufferInt);
++
++ if (psGetBufferClassBufferOUT->eError != PVRSRV_OK)
++ return 0;
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psGetBufferClassBufferOUT->hBuffer,
++ hBufferInt,
++ PVRSRV_HANDLE_TYPE_BUF_BUFFER,
++ (enum PVRSRV_HANDLE_ALLOC_FLAG)
++ (PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE |
++ PVRSRV_HANDLE_ALLOC_FLAG_SHARED),
++ psGetBufferClassBufferIN->hDeviceKM);
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psGetBufferClassBufferOUT->eError,
++ psPerProc);
++
++ return 0;
++}
++
++static int PVRSRVAllocSharedSysMemoryBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_ALLOC_SHARED_SYS_MEM *psAllocSharedSysMemIN,
++ struct PVRSRV_BRIDGE_OUT_ALLOC_SHARED_SYS_MEM *psAllocSharedSysMemOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ struct PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_ALLOC_SHARED_SYS_MEM);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psAllocSharedSysMemOUT->eError, psPerProc, 1);
++
++ psAllocSharedSysMemOUT->eError =
++ PVRSRVAllocSharedSysMemoryKM(psPerProc,
++ psAllocSharedSysMemIN->ui32Flags,
++ psAllocSharedSysMemIN->ui32Size,
++ &psKernelMemInfo);
++ if (psAllocSharedSysMemOUT->eError != PVRSRV_OK)
++ return 0;
++
++ OSMemSet(&psAllocSharedSysMemOUT->sClientMemInfo,
++ 0, sizeof(psAllocSharedSysMemOUT->sClientMemInfo));
++
++ psAllocSharedSysMemOUT->sClientMemInfo.pvLinAddrKM =
++ psKernelMemInfo->pvLinAddrKM;
++
++ psAllocSharedSysMemOUT->sClientMemInfo.pvLinAddr = NULL;
++ psAllocSharedSysMemOUT->sClientMemInfo.ui32Flags =
++ psKernelMemInfo->ui32Flags;
++ psAllocSharedSysMemOUT->sClientMemInfo.ui32AllocSize =
++ psKernelMemInfo->ui32AllocSize;
++ psAllocSharedSysMemOUT->sClientMemInfo.hMappingInfo =
++ psKernelMemInfo->sMemBlk.hOSMemHandle;
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psAllocSharedSysMemOUT->sClientMemInfo.hKernelMemInfo,
++ psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psAllocSharedSysMemOUT->eError, psPerProc);
++
++ return 0;
++}
++
++static int PVRSRVFreeSharedSysMemoryBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_FREE_SHARED_SYS_MEM *psFreeSharedSysMemIN,
++ struct PVRSRV_BRIDGE_OUT_FREE_SHARED_SYS_MEM *psFreeSharedSysMemOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ struct PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_FREE_SHARED_SYS_MEM);
++
++ psFreeSharedSysMemOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ (void **)&psKernelMemInfo,
++ psFreeSharedSysMemIN->psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO);
++
++ if (psFreeSharedSysMemOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psFreeSharedSysMemOUT->eError =
++ PVRSRVFreeSharedSysMemoryKM(psKernelMemInfo);
++ if (psFreeSharedSysMemOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psFreeSharedSysMemOUT->eError =
++ PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psFreeSharedSysMemIN->psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO);
++ return 0;
++}
++
++static int PVRSRVMapMemInfoMemBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_MAP_MEMINFO_MEM *psMapMemInfoMemIN,
++ struct PVRSRV_BRIDGE_OUT_MAP_MEMINFO_MEM *psMapMemInfoMemOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ struct PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ enum PVRSRV_HANDLE_TYPE eHandleType;
++ void *hParent;
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_MAP_MEMINFO_MEM);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psMapMemInfoMemOUT->eError, psPerProc, 2);
++
++ psMapMemInfoMemOUT->eError =
++ PVRSRVLookupHandleAnyType(psPerProc->psHandleBase,
++ (void **)&psKernelMemInfo, &eHandleType,
++ psMapMemInfoMemIN->hKernelMemInfo);
++ if (psMapMemInfoMemOUT->eError != PVRSRV_OK)
++ return 0;
++
++ switch (eHandleType) {
++ case PVRSRV_HANDLE_TYPE_MEM_INFO:
++ case PVRSRV_HANDLE_TYPE_MEM_INFO_REF:
++ case PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO:
++ break;
++ default:
++ psMapMemInfoMemOUT->eError = PVRSRV_ERROR_GENERIC;
++ return 0;
++ }
++
++ psMapMemInfoMemOUT->eError =
++ PVRSRVGetParentHandle(psPerProc->psHandleBase, &hParent,
++ psMapMemInfoMemIN->hKernelMemInfo,
++ eHandleType);
++ if (psMapMemInfoMemOUT->eError != PVRSRV_OK)
++ return 0;
++ if (hParent == NULL)
++ hParent = psMapMemInfoMemIN->hKernelMemInfo;
++
++ OSMemSet(&psMapMemInfoMemOUT->sClientMemInfo,
++ 0, sizeof(psMapMemInfoMemOUT->sClientMemInfo));
++
++ psMapMemInfoMemOUT->sClientMemInfo.pvLinAddrKM =
++ psKernelMemInfo->pvLinAddrKM;
++
++ psMapMemInfoMemOUT->sClientMemInfo.pvLinAddr = NULL;
++ psMapMemInfoMemOUT->sClientMemInfo.sDevVAddr =
++ psKernelMemInfo->sDevVAddr;
++ psMapMemInfoMemOUT->sClientMemInfo.ui32Flags =
++ psKernelMemInfo->ui32Flags;
++ psMapMemInfoMemOUT->sClientMemInfo.ui32AllocSize =
++ psKernelMemInfo->ui32AllocSize;
++ psMapMemInfoMemOUT->sClientMemInfo.hMappingInfo =
++ psKernelMemInfo->sMemBlk.hOSMemHandle;
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psMapMemInfoMemOUT->sClientMemInfo.hKernelMemInfo,
++ psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, hParent);
++
++ if (psKernelMemInfo->ui32Flags & PVRSRV_MEM_NO_SYNCOBJ) {
++ OSMemSet(&psMapMemInfoMemOUT->sClientSyncInfo, 0,
++ sizeof(struct PVRSRV_CLIENT_SYNC_INFO));
++ psMapMemInfoMemOUT->psKernelSyncInfo = NULL;
++ } else {
++ psMapMemInfoMemOUT->sClientSyncInfo.psSyncData =
++ psKernelMemInfo->psKernelSyncInfo->psSyncData;
++ psMapMemInfoMemOUT->sClientSyncInfo.sWriteOpsCompleteDevVAddr =
++ psKernelMemInfo->psKernelSyncInfo->
++ sWriteOpsCompleteDevVAddr;
++ psMapMemInfoMemOUT->sClientSyncInfo.sReadOpsCompleteDevVAddr =
++ psKernelMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr;
++
++ psMapMemInfoMemOUT->sClientSyncInfo.hMappingInfo =
++ psKernelMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->
++ sMemBlk.hOSMemHandle;
++
++ psMapMemInfoMemOUT->sClientMemInfo.psClientSyncInfo =
++ &psMapMemInfoMemOUT->sClientSyncInfo;
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psMapMemInfoMemOUT->sClientSyncInfo.hKernelSyncInfo,
++ psKernelMemInfo->psKernelSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
++ psMapMemInfoMemOUT->sClientMemInfo.hKernelMemInfo);
++ }
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psMapMemInfoMemOUT->eError, psPerProc);
++
++ return 0;
++}
++
++static int PVRSRVModifySyncOpsBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_MODIFY_SYNC_OPS *psModifySyncOpsIN,
++ struct PVRSRV_BRIDGE_OUT_MODIFY_SYNC_OPS *psModifySyncOpsOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void *hKernelSyncInfo;
++ struct PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_MODIFY_SYNC_OPS);
++
++ psModifySyncOpsOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hKernelSyncInfo,
++ psModifySyncOpsIN->hKernelSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if (psModifySyncOpsOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psKernelSyncInfo = (struct PVRSRV_KERNEL_SYNC_INFO *)hKernelSyncInfo;
++
++ /* We return PRE-INCREMENTED versions of all sync Op Values */
++
++ psModifySyncOpsOUT->ui32ReadOpsPending =
++ psKernelSyncInfo->psSyncData->ui32ReadOpsPending;
++
++ psModifySyncOpsOUT->ui32WriteOpsPending =
++ psKernelSyncInfo->psSyncData->ui32WriteOpsPending;
++
++ psModifySyncOpsOUT->ui32ReadOpsComplete =
++ psKernelSyncInfo->psSyncData->ui32ReadOpsComplete;
++
++ psModifySyncOpsOUT->ui32WriteOpsComplete =
++ psKernelSyncInfo->psSyncData->ui32WriteOpsComplete;
++
++ if (psModifySyncOpsIN->ui32ModifyFlags &
++ PVRSRV_MODIFYSYNCOPS_FLAGS_WOP_INC)
++ psKernelSyncInfo->psSyncData->ui32WriteOpsPending++;
++
++ if (psModifySyncOpsIN->ui32ModifyFlags &
++ PVRSRV_MODIFYSYNCOPS_FLAGS_ROP_INC)
++ psKernelSyncInfo->psSyncData->ui32ReadOpsPending++;
++
++ if (psModifySyncOpsIN->ui32ModifyFlags &
++ PVRSRV_MODIFYSYNCOPS_FLAGS_WOC_INC)
++ psKernelSyncInfo->psSyncData->ui32WriteOpsComplete++;
++
++ if (psModifySyncOpsIN->ui32ModifyFlags &
++ PVRSRV_MODIFYSYNCOPS_FLAGS_ROC_INC)
++ psKernelSyncInfo->psSyncData->ui32ReadOpsComplete++;
++
++ return 0;
++}
++
++static int MMU_GetPDDevPAddrBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_GETMMU_PD_DEVPADDR *psGetMmuPDDevPAddrIN,
++ struct PVRSRV_BRIDGE_OUT_GETMMU_PD_DEVPADDR *psGetMmuPDDevPAddrOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void *hDevMemContextInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_GETMMU_PD_DEVPADDR);
++
++ psGetMmuPDDevPAddrOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemContextInt,
++ psGetMmuPDDevPAddrIN->hDevMemContext,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
++ if (psGetMmuPDDevPAddrOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psGetMmuPDDevPAddrOUT->sPDDevPAddr =
++ BM_GetDeviceNode(hDevMemContextInt)->
++ pfnMMUGetPDDevPAddr(BM_GetMMUContextFromMemContext
++ (hDevMemContextInt));
++ if (psGetMmuPDDevPAddrOUT->sPDDevPAddr.uiAddr)
++ psGetMmuPDDevPAddrOUT->eError = PVRSRV_OK;
++ else
++ psGetMmuPDDevPAddrOUT->eError = PVRSRV_ERROR_GENERIC;
++ return 0;
++}
++
++int DummyBW(u32 ui32BridgeID, void *psBridgeIn, void *psBridgeOut,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++#if !defined(DEBUG)
++ PVR_UNREFERENCED_PARAMETER(ui32BridgeID);
++#endif
++ PVR_UNREFERENCED_PARAMETER(psBridgeIn);
++ PVR_UNREFERENCED_PARAMETER(psBridgeOut);
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++#if defined(DEBUG_BRIDGE_KM)
++ PVR_DPF(PVR_DBG_ERROR, "%s: BRIDGE ERROR: BridgeID %lu (%s) mapped to "
++ "Dummy Wrapper (probably not what you want!)",
++ __func__, ui32BridgeID,
++ g_BridgeDispatchTable[ui32BridgeID].pszIOCName);
++#else
++ PVR_DPF(PVR_DBG_ERROR, "%s: BRIDGE ERROR: BridgeID %lu mapped to "
++ "Dummy Wrapper (probably not what you want!)",
++ __func__, ui32BridgeID);
++#endif
++ return -ENOTTY;
++}
++
++void _SetDispatchTableEntry(u32 ui32Index, const char *pszIOCName,
++ int (*pfFunction)(u32 ui32BridgeID,
++ void *psBridgeIn,
++ void *psBridgeOut,
++ struct PVRSRV_PER_PROCESS_DATA
++ *psPerProc),
++ const char *pszFunctionName)
++{
++ static u32 ui32PrevIndex = ~0UL;
++#if !defined(DEBUG)
++ PVR_UNREFERENCED_PARAMETER(pszIOCName);
++#endif
++#if !defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE) && !defined(DEBUG_BRIDGE_KM)
++ PVR_UNREFERENCED_PARAMETER(pszFunctionName);
++#endif
++
++
++ if (g_BridgeDispatchTable[ui32Index].pfFunction) {
++#if defined(DEBUG_BRIDGE_KM)
++ PVR_DPF(PVR_DBG_ERROR, "%s: BUG!: "
++ "Adding dispatch table entry for %s "
++ "clobbers an existing entry for %s",
++ __func__, pszIOCName,
++ g_BridgeDispatchTable[ui32Index].pszIOCName);
++#else
++ PVR_DPF(PVR_DBG_ERROR, "%s: BUG!: "
++ "Adding dispatch table entry for %s "
++ "clobbers an existing entry (index=%lu)",
++ __func__, pszIOCName, ui32Index);
++#endif
++ PVR_DPF(PVR_DBG_ERROR,
++"NOTE: Enabling DEBUG_BRIDGE_KM_DISPATCH_TABLE may help debug this issue.",
++ __func__);
++ }
++
++ if ((ui32PrevIndex != ~0UL) &&
++ ((ui32Index >= ui32PrevIndex + DISPATCH_TABLE_GAP_THRESHOLD) ||
++ (ui32Index <= ui32PrevIndex))) {
++#if defined(DEBUG_BRIDGE_KM)
++ PVR_DPF(PVR_DBG_WARNING,
++ "%s: There is a gap in the dispatch table "
++ "between indices %lu (%s) and %lu (%s)",
++ __func__, ui32PrevIndex,
++ g_BridgeDispatchTable[ui32PrevIndex].pszIOCName,
++ ui32Index, pszIOCName);
++#else
++ PVR_DPF(PVR_DBG_WARNING,
++ "%s: There is a gap in the dispatch table "
++ "between indices %u and %u (%s)",
++ __func__, (unsigned)ui32PrevIndex, (unsigned)ui32Index,
++ pszIOCName);
++#endif
++ PVR_DPF(PVR_DBG_ERROR,
++ "NOTE: Enabling DEBUG_BRIDGE_KM_DISPATCH_TABLE "
++ "may help debug this issue.",
++ __func__);
++ }
++
++ g_BridgeDispatchTable[ui32Index].pfFunction = pfFunction;
++#if defined(DEBUG_BRIDGE_KM)
++ g_BridgeDispatchTable[ui32Index].pszIOCName = pszIOCName;
++ g_BridgeDispatchTable[ui32Index].pszFunctionName = pszFunctionName;
++ g_BridgeDispatchTable[ui32Index].ui32CallCount = 0;
++ g_BridgeDispatchTable[ui32Index].ui32CopyFromUserTotalBytes = 0;
++#endif
++
++ ui32PrevIndex = ui32Index;
++}
++
++static int PVRSRVInitSrvConnectBW(u32 ui32BridgeID, void *psBridgeIn,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVR_UNREFERENCED_PARAMETER(psBridgeIn);
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_INITSRV_CONNECT);
++ PVR_UNREFERENCED_PARAMETER(psBridgeIn);
++
++ if (!OSProcHasPrivSrvInit() ||
++ PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_RUNNING) ||
++ PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_RAN)) {
++ psRetOUT->eError = PVRSRV_ERROR_GENERIC;
++ return 0;
++ }
++
++ PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_RUNNING, IMG_TRUE);
++ psPerProc->bInitProcess = IMG_TRUE;
++
++ psRetOUT->eError = PVRSRV_OK;
++
++ return 0;
++}
++
++static int PVRSRVInitSrvDisconnectBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_INITSRV_DISCONNECT *psInitSrvDisconnectIN,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_INITSRV_DISCONNECT);
++
++ if (!psPerProc->bInitProcess) {
++ psRetOUT->eError = PVRSRV_ERROR_GENERIC;
++ return 0;
++ }
++
++ psPerProc->bInitProcess = IMG_FALSE;
++
++ PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_RUNNING, IMG_FALSE);
++ PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_RAN, IMG_TRUE);
++
++ psRetOUT->eError =
++ PVRSRVFinaliseSystem(psInitSrvDisconnectIN->bInitSuccesful);
++
++ PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_SUCCESSFUL,
++ (IMG_BOOL)(((psRetOUT->eError == PVRSRV_OK) &&
++ (psInitSrvDisconnectIN->
++ bInitSuccesful))));
++
++ return 0;
++}
++
++static int PVRSRVEventObjectWaitBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_EVENT_OBJECT_WAIT *psEventObjectWaitIN,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void *hOSEventKM;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_EVENT_OBJECT_WAIT);
++
++ psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hOSEventKM,
++ psEventObjectWaitIN->hOSEventKM,
++ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT);
++
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psRetOUT->eError = OSEventObjectWait(hOSEventKM);
++
++ return 0;
++}
++
++static int PVRSRVEventObjectOpenBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_EVENT_OBJECT_OPEN *psEventObjectOpenIN,
++ struct PVRSRV_BRIDGE_OUT_EVENT_OBJECT_OPEN *psEventObjectOpenOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_EVENT_OBJECT_OPEN);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psEventObjectOpenOUT->eError, psPerProc, 1);
++
++ psEventObjectOpenOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psEventObjectOpenIN->sEventObject.hOSEventKM,
++ psEventObjectOpenIN->sEventObject.hOSEventKM,
++ PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT);
++
++ if (psEventObjectOpenOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psEventObjectOpenOUT->eError =
++ OSEventObjectOpen(&psEventObjectOpenIN->sEventObject,
++ &psEventObjectOpenOUT->hOSEvent);
++
++ if (psEventObjectOpenOUT->eError != PVRSRV_OK)
++ return 0;
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psEventObjectOpenOUT->hOSEvent,
++ psEventObjectOpenOUT->hOSEvent,
++ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT,
++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI);
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psEventObjectOpenOUT->eError, psPerProc);
++
++ return 0;
++}
++
++static int PVRSRVEventObjectCloseBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_EVENT_OBJECT_CLOSE *psEventObjectCloseIN,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void *hOSEventKM;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_EVENT_OBJECT_CLOSE);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psEventObjectCloseIN->sEventObject.hOSEventKM,
++ psEventObjectCloseIN->sEventObject.hOSEventKM,
++ PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT);
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psRetOUT->eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ &hOSEventKM,
++ psEventObjectCloseIN->hOSEventKM,
++ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT);
++
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psRetOUT->eError =
++ OSEventObjectClose(&psEventObjectCloseIN->sEventObject, hOSEventKM);
++
++ return 0;
++}
++
++enum PVRSRV_ERROR CommonBridgeInit(void)
++{
++ u32 i;
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_ENUM_DEVICES,
++ PVRSRVEnumerateDevicesBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_ACQUIRE_DEVICEINFO,
++ PVRSRVAcquireDeviceDataBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_RELEASE_DEVICEINFO, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_CREATE_DEVMEMCONTEXT,
++ PVRSRVCreateDeviceMemContextBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_DESTROY_DEVMEMCONTEXT,
++ PVRSRVDestroyDeviceMemContextBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DEVMEM_HEAPINFO,
++ PVRSRVGetDeviceMemHeapInfoBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_ALLOC_DEVICEMEM,
++ PVRSRVAllocDeviceMemBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_FREE_DEVICEMEM,
++ PVRSRVFreeDeviceMemBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GETFREE_DEVICEMEM,
++ PVRSRVGetFreeDeviceMemBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_CREATE_COMMANDQUEUE, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_DESTROY_COMMANDQUEUE, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_MHANDLE_TO_MMAP_DATA,
++ PVRMMapOSMemHandleToMMapDataBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_CONNECT_SERVICES, PVRSRVConnectBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_DISCONNECT_SERVICES,
++ PVRSRVDisconnectBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_WRAP_DEVICE_MEM, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DEVICEMEMINFO, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_RESERVE_DEV_VIRTMEM, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_FREE_DEV_VIRTMEM, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_EXT_MEMORY, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAP_EXT_MEMORY, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_DEV_MEMORY,
++ PVRSRVMapDeviceMemoryBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAP_DEV_MEMORY,
++ PVRSRVUnmapDeviceMemoryBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY,
++ PVRSRVMapDeviceClassMemoryBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAP_DEVICECLASS_MEMORY,
++ PVRSRVUnmapDeviceClassMemoryBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_MEM_INFO_TO_USER, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAP_MEM_INFO_FROM_USER, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_EXPORT_DEVICEMEM,
++ PVRSRVExportDeviceMemBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_RELEASE_MMAP_DATA,
++ PVRMMapReleaseMMapDataBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_CACHE_FLUSH_DRM,
++ PVRSRVCacheFlushDRIBW);
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PROCESS_SIMISR_EVENT, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_REGISTER_SIM_PROCESS, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_UNREGISTER_SIM_PROCESS, DummyBW);
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_MAPPHYSTOUSERSPACE, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAPPHYSTOUSERSPACE, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GETPHYSTOUSERSPACEMAP, DummyBW);
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_FB_STATS, DummyBW);
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_MISC_INFO, PVRSRVGetMiscInfoBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_RELEASE_MISC_INFO, DummyBW);
++
++
++#if defined(PDUMP)
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_INIT, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_MEMPOL, PDumpMemPolBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPMEM, PDumpMemBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_REG, PDumpRegWithFlagsBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_REGPOL, PDumpRegPolBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_COMMENT, PDumpCommentBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_SETFRAME, PDumpSetFrameBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_ISCAPTURING,
++ PDumpIsCaptureFrameBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPBITMAP, PDumpBitmapBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPREADREG, PDumpReadRegBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_SYNCPOL, PDumpSyncPolBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPSYNC, PDumpSyncDumpBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DRIVERINFO,
++ PDumpDriverInfoBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_PDREG, PDumpPDRegBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPPDDEVPADDR,
++ PDumpPDDevPAddrBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_CYCLE_COUNT_REG_READ,
++ PDumpCycleCountRegReadBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_STARTINITPHASE,
++ PDumpStartInitPhaseBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_STOPINITPHASE,
++ PDumpStopInitPhaseBW);
++#endif
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_OEMJTABLE, DummyBW);
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_ENUM_CLASS, PVRSRVEnumerateDCBW);
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_OPEN_DISPCLASS_DEVICE,
++ PVRSRVOpenDCDeviceBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_CLOSE_DISPCLASS_DEVICE,
++ PVRSRVCloseDCDeviceBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_ENUM_DISPCLASS_FORMATS,
++ PVRSRVEnumDCFormatsBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_ENUM_DISPCLASS_DIMS,
++ PVRSRVEnumDCDimsBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DISPCLASS_SYSBUFFER,
++ PVRSRVGetDCSystemBufferBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DISPCLASS_INFO,
++ PVRSRVGetDCInfoBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_CREATE_DISPCLASS_SWAPCHAIN,
++ PVRSRVCreateDCSwapChainBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_DESTROY_DISPCLASS_SWAPCHAIN,
++ PVRSRVDestroyDCSwapChainBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SET_DISPCLASS_DSTRECT,
++ PVRSRVSetDCDstRectBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SET_DISPCLASS_SRCRECT,
++ PVRSRVSetDCSrcRectBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SET_DISPCLASS_DSTCOLOURKEY,
++ PVRSRVSetDCDstColourKeyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SET_DISPCLASS_SRCCOLOURKEY,
++ PVRSRVSetDCSrcColourKeyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DISPCLASS_BUFFERS,
++ PVRSRVGetDCBuffersBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_BUFFER,
++ PVRSRVSwapToDCBufferBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_SYSTEM,
++ PVRSRVSwapToDCSystemBW);
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_OPEN_BUFFERCLASS_DEVICE,
++ PVRSRVOpenBCDeviceBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_CLOSE_BUFFERCLASS_DEVICE,
++ PVRSRVCloseBCDeviceBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_BUFFERCLASS_INFO,
++ PVRSRVGetBCInfoBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_BUFFERCLASS_BUFFER,
++ PVRSRVGetBCBufferBW);
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_WRAP_EXT_MEMORY,
++ PVRSRVWrapExtMemoryBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_UNWRAP_EXT_MEMORY,
++ PVRSRVUnwrapExtMemoryBW);
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_ALLOC_SHARED_SYS_MEM,
++ PVRSRVAllocSharedSysMemoryBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_FREE_SHARED_SYS_MEM,
++ PVRSRVFreeSharedSysMemoryBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_MEMINFO_MEM,
++ PVRSRVMapMemInfoMemBW);
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GETMMU_PD_DEVPADDR,
++ MMU_GetPDDevPAddrBW);
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_INITSRV_CONNECT,
++ PVRSRVInitSrvConnectBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_INITSRV_DISCONNECT,
++ PVRSRVInitSrvDisconnectBW);
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_EVENT_OBJECT_WAIT,
++ PVRSRVEventObjectWaitBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_EVENT_OBJECT_OPEN,
++ PVRSRVEventObjectOpenBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_EVENT_OBJECT_CLOSE,
++ PVRSRVEventObjectCloseBW);
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_MODIFY_SYNC_OPS,
++ PVRSRVModifySyncOpsBW);
++
++ SetSGXDispatchTableEntry();
++
++ for (i = 0; i < BRIDGE_DISPATCH_TABLE_ENTRY_COUNT; i++)
++ if (!g_BridgeDispatchTable[i].pfFunction) {
++ g_BridgeDispatchTable[i].pfFunction = DummyBW;
++#if defined(DEBUG_BRIDGE_KM)
++ g_BridgeDispatchTable[i].pszIOCName =
++ "_PVRSRV_BRIDGE_DUMMY";
++ g_BridgeDispatchTable[i].pszFunctionName = "DummyBW";
++ g_BridgeDispatchTable[i].ui32CallCount = 0;
++ g_BridgeDispatchTable[i].ui32CopyFromUserTotalBytes = 0;
++ g_BridgeDispatchTable[i].ui32CopyToUserTotalBytes = 0;
++#endif
++ }
++
++ return PVRSRV_OK;
++}
++
++static int bridged_check_cmd(u32 cmd_id)
++{
++ if (PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_RAN)) {
++ if (!PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_SUCCESSFUL)) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "%s: Initialisation failed. Driver unusable.",
++ __func__);
++ return 1;
++ }
++ } else {
++ if (PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_RUNNING)) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "%s: Initialisation is in progress",
++ __func__);
++ return 1;
++ } else {
++ switch (cmd_id) {
++ case PVRSRV_GET_BRIDGE_ID(
++ PVRSRV_BRIDGE_CONNECT_SERVICES):
++ case PVRSRV_GET_BRIDGE_ID(
++ PVRSRV_BRIDGE_DISCONNECT_SERVICES):
++ case PVRSRV_GET_BRIDGE_ID(
++ PVRSRV_BRIDGE_INITSRV_CONNECT):
++ case PVRSRV_GET_BRIDGE_ID(
++ PVRSRV_BRIDGE_INITSRV_DISCONNECT):
++ break;
++ default:
++ PVR_DPF(PVR_DBG_ERROR,
++ "%s: Driver initialisation not completed yet.",
++ __func__);
++ return 1;
++ }
++ }
++ }
++
++ return 0;
++}
++
++static int bridged_ioctl(u32 cmd, void *in, void *out,
++ struct PVRSRV_PER_PROCESS_DATA *per_proc)
++{
++ int err = -EFAULT;
++
++ switch (PVRSRV_IOWR(cmd)) {
++ case PVRSRV_BRIDGE_ENUM_DEVICES:
++ err = PVRSRVEnumerateDevicesBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_ACQUIRE_DEVICEINFO:
++ err = PVRSRVAcquireDeviceDataBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_RELEASE_DEVICEINFO:
++ err = DummyBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_CREATE_DEVMEMCONTEXT:
++ err = PVRSRVCreateDeviceMemContextBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_DESTROY_DEVMEMCONTEXT:
++ err = PVRSRVDestroyDeviceMemContextBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_GET_DEVMEM_HEAPINFO:
++ err = PVRSRVGetDeviceMemHeapInfoBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_ALLOC_DEVICEMEM:
++ err = PVRSRVAllocDeviceMemBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_FREE_DEVICEMEM:
++ err = PVRSRVFreeDeviceMemBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_GETFREE_DEVICEMEM:
++ err = PVRSRVGetFreeDeviceMemBW(cmd, in, out, per_proc);
++ break;
++
++ case PVRSRV_BRIDGE_CREATE_COMMANDQUEUE:
++ case PVRSRV_BRIDGE_DESTROY_COMMANDQUEUE:
++ err = DummyBW(cmd, in, out, per_proc);
++ break;
++
++ case PVRSRV_BRIDGE_MHANDLE_TO_MMAP_DATA:
++ err = PVRMMapOSMemHandleToMMapDataBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_CONNECT_SERVICES:
++ err = PVRSRVConnectBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_DISCONNECT_SERVICES:
++ err = PVRSRVDisconnectBW(cmd, in, out, per_proc);
++ break;
++
++ case PVRSRV_BRIDGE_WRAP_DEVICE_MEM:
++ case PVRSRV_BRIDGE_GET_DEVICEMEMINFO:
++ case PVRSRV_BRIDGE_RESERVE_DEV_VIRTMEM:
++ case PVRSRV_BRIDGE_FREE_DEV_VIRTMEM:
++ case PVRSRV_BRIDGE_MAP_EXT_MEMORY:
++ case PVRSRV_BRIDGE_UNMAP_EXT_MEMORY:
++ err = DummyBW(cmd, in, out, per_proc);
++ break;
++
++ case PVRSRV_BRIDGE_MAP_DEV_MEMORY:
++ err = PVRSRVMapDeviceMemoryBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_UNMAP_DEV_MEMORY:
++ err = PVRSRVUnmapDeviceMemoryBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY:
++ err = PVRSRVMapDeviceClassMemoryBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_UNMAP_DEVICECLASS_MEMORY:
++ err = PVRSRVUnmapDeviceClassMemoryBW(cmd, in, out, per_proc);
++ break;
++
++ case PVRSRV_BRIDGE_MAP_MEM_INFO_TO_USER:
++ case PVRSRV_BRIDGE_UNMAP_MEM_INFO_FROM_USER:
++ err = DummyBW(cmd, in, out, per_proc);
++ break;
++
++ case PVRSRV_BRIDGE_EXPORT_DEVICEMEM:
++ err = PVRSRVExportDeviceMemBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_RELEASE_MMAP_DATA:
++ err = PVRMMapReleaseMMapDataBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_CACHE_FLUSH_DRM:
++ err = PVRSRVCacheFlushDRIBW(cmd, in, out, per_proc);
++ break;
++
++ case PVRSRV_BRIDGE_PROCESS_SIMISR_EVENT:
++ case PVRSRV_BRIDGE_REGISTER_SIM_PROCESS:
++ case PVRSRV_BRIDGE_UNREGISTER_SIM_PROCESS:
++ case PVRSRV_BRIDGE_MAPPHYSTOUSERSPACE:
++ case PVRSRV_BRIDGE_UNMAPPHYSTOUSERSPACE:
++ case PVRSRV_BRIDGE_GETPHYSTOUSERSPACEMAP:
++ case PVRSRV_BRIDGE_GET_FB_STATS:
++ err = DummyBW(cmd, in, out, per_proc);
++ break;
++
++ case PVRSRV_BRIDGE_GET_MISC_INFO:
++ err = PVRSRVGetMiscInfoBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_RELEASE_MISC_INFO:
++ err = DummyBW(cmd, in, out, per_proc);
++ break;
++
++#if defined(PDUMP)
++ case PVRSRV_BRIDGE_PDUMP_INIT:
++ err = DummyBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_PDUMP_MEMPOL:
++ err = PDumpMemPolBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_PDUMP_DUMPMEM:
++ err = PDumpMemBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_PDUMP_REG:
++ err = PDumpRegWithFlagsBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_PDUMP_REGPOL:
++ err = PDumpRegPolBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_PDUMP_COMMENT:
++ err = PDumpCommentBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_PDUMP_SETFRAME:
++ err = PDumpSetFrameBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_PDUMP_ISCAPTURING:
++ err = PDumpIsCaptureFrameBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_PDUMP_DUMPBITMAP:
++ err = PDumpBitmapBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_PDUMP_DUMPREADREG:
++ err = PDumpReadRegBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_PDUMP_SYNCPOL:
++ err = PDumpSyncPolBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_PDUMP_DUMPSYNC:
++ err = PDumpSyncDumpBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_PDUMP_DRIVERINFO:
++ err = PDumpDriverInfoBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_PDUMP_PDREG:
++ err = PDumpPDRegBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_PDUMP_DUMPPDDEVPADDR:
++ err = PDumpPDDevPAddrBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_PDUMP_CYCLE_COUNT_REG_READ:
++ err = PDumpCycleCountRegReadBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_PDUMP_STARTINITPHASE:
++ err = PDumpStartInitPhaseBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_PDUMP_STOPINITPHASE:
++ err = PDumpStopInitPhaseBW(cmd, in, out, per_proc);
++ break;
++#endif
++
++ case PVRSRV_BRIDGE_GET_OEMJTABLE:
++ err = DummyBW(cmd, in, out, per_proc);
++ break;
++
++ case PVRSRV_BRIDGE_ENUM_CLASS:
++ err = PVRSRVEnumerateDCBW(cmd, in, out, per_proc);
++ break;
++
++ case PVRSRV_BRIDGE_OPEN_DISPCLASS_DEVICE:
++ err = PVRSRVOpenDCDeviceBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_CLOSE_DISPCLASS_DEVICE:
++ err = PVRSRVCloseDCDeviceBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_ENUM_DISPCLASS_FORMATS:
++ err = PVRSRVEnumDCFormatsBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_ENUM_DISPCLASS_DIMS:
++ err = PVRSRVEnumDCDimsBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_GET_DISPCLASS_SYSBUFFER:
++ err = PVRSRVGetDCSystemBufferBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_GET_DISPCLASS_INFO:
++ err = PVRSRVGetDCInfoBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_CREATE_DISPCLASS_SWAPCHAIN:
++ err = PVRSRVCreateDCSwapChainBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_DESTROY_DISPCLASS_SWAPCHAIN:
++ err = PVRSRVDestroyDCSwapChainBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_SET_DISPCLASS_DSTRECT:
++ err = PVRSRVSetDCDstRectBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_SET_DISPCLASS_SRCRECT:
++ err = PVRSRVSetDCSrcRectBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_SET_DISPCLASS_DSTCOLOURKEY:
++ err = PVRSRVSetDCDstColourKeyBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_SET_DISPCLASS_SRCCOLOURKEY:
++ err = PVRSRVSetDCSrcColourKeyBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_GET_DISPCLASS_BUFFERS:
++ err = PVRSRVGetDCBuffersBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_BUFFER:
++ err = PVRSRVSwapToDCBufferBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_SYSTEM:
++ err = PVRSRVSwapToDCSystemBW(cmd, in, out, per_proc);
++ break;
++
++ case PVRSRV_BRIDGE_OPEN_BUFFERCLASS_DEVICE:
++ err = PVRSRVOpenBCDeviceBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_CLOSE_BUFFERCLASS_DEVICE:
++ err = PVRSRVCloseBCDeviceBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_GET_BUFFERCLASS_INFO:
++ err = PVRSRVGetBCInfoBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_GET_BUFFERCLASS_BUFFER:
++ err = PVRSRVGetBCBufferBW(cmd, in, out, per_proc);
++ break;
++
++ case PVRSRV_BRIDGE_WRAP_EXT_MEMORY:
++ err = PVRSRVWrapExtMemoryBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_UNWRAP_EXT_MEMORY:
++ err = PVRSRVUnwrapExtMemoryBW(cmd, in, out, per_proc);
++ break;
++
++ case PVRSRV_BRIDGE_ALLOC_SHARED_SYS_MEM:
++ err = PVRSRVAllocSharedSysMemoryBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_FREE_SHARED_SYS_MEM:
++ err = PVRSRVFreeSharedSysMemoryBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_MAP_MEMINFO_MEM:
++ err = PVRSRVMapMemInfoMemBW(cmd, in, out, per_proc);
++ break;
++
++ case PVRSRV_BRIDGE_GETMMU_PD_DEVPADDR:
++ err = MMU_GetPDDevPAddrBW(cmd, in, out, per_proc);
++ break;
++
++ case PVRSRV_BRIDGE_INITSRV_CONNECT:
++ err = PVRSRVInitSrvConnectBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_INITSRV_DISCONNECT:
++ err = PVRSRVInitSrvDisconnectBW(cmd, in, out, per_proc);
++ break;
++
++ case PVRSRV_BRIDGE_EVENT_OBJECT_WAIT:
++ err = PVRSRVEventObjectWaitBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_EVENT_OBJECT_OPEN:
++ err = PVRSRVEventObjectOpenBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_EVENT_OBJECT_CLOSE:
++ err = PVRSRVEventObjectCloseBW(cmd, in, out, per_proc);
++ break;
++
++ case PVRSRV_BRIDGE_MODIFY_SYNC_OPS:
++ err = PVRSRVModifySyncOpsBW(cmd, in, out, per_proc);
++ break;
++
++ case PVRSRV_BRIDGE_SGX_GETCLIENTINFO:
++ err = SGXGetClientInfoBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_SGX_RELEASECLIENTINFO:
++ err = SGXReleaseClientInfoBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_SGX_GETINTERNALDEVINFO:
++ err = SGXGetInternalDevInfoBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_SGX_DOKICK:
++ err = SGXDoKickBW(cmd, in, out, per_proc);
++ break;
++
++ case PVRSRV_BRIDGE_SGX_GETPHYSPAGEADDR:
++ case PVRSRV_BRIDGE_SGX_READREGISTRYDWORD:
++ case PVRSRV_BRIDGE_SGX_SCHEDULECOMMAND:
++ err = DummyBW(cmd, in, out, per_proc);
++ break;
++
++ case PVRSRV_BRIDGE_SGX_2DQUERYBLTSCOMPLETE:
++ err = SGX2DQueryBlitsCompleteBW(cmd, in, out, per_proc);
++ break;
++
++ case PVRSRV_BRIDGE_SGX_GETMMUPDADDR:
++ err = DummyBW(cmd, in, out, per_proc);
++ break;
++
++ case PVRSRV_BRIDGE_SGX_SUBMITTRANSFER:
++ err = SGXSubmitTransferBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_SGX_GETMISCINFO:
++ err = SGXGetMiscInfoBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_SGXINFO_FOR_SRVINIT:
++ err = SGXGetInfoForSrvinitBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_SGX_DEVINITPART2:
++ err = SGXDevInitPart2BW(cmd, in, out, per_proc);
++ break;
++
++ case PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC:
++ err = SGXFindSharedPBDescBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_SGX_UNREFSHAREDPBDESC:
++ err = SGXUnrefSharedPBDescBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC:
++ err = SGXAddSharedPBDescBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_SGX_REGISTER_HW_RENDER_CONTEXT:
++ err = SGXRegisterHWRenderContextBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_SGX_FLUSH_HW_RENDER_TARGET:
++ err = SGXFlushHWRenderTargetBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_SGX_UNREGISTER_HW_RENDER_CONTEXT:
++ err = SGXUnregisterHWRenderContextBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_SGX_REGISTER_HW_TRANSFER_CONTEXT:
++ err = SGXRegisterHWTransferContextBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_SGX_UNREGISTER_HW_TRANSFER_CONTEXT:
++ err = SGXUnregisterHWTransferContextBW(cmd, in, out, per_proc);
++ break;
++
++ case PVRSRV_BRIDGE_SGX_READ_DIFF_COUNTERS:
++ err = SGXReadDiffCountersBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_SGX_READ_HWPERF_CB:
++ err = SGXReadHWPerfCBBW(cmd, in, out, per_proc);
++ break;
++
++ case PVRSRV_BRIDGE_SGX_SCHEDULE_PROCESS_QUEUES:
++ err = SGXScheduleProcessQueuesBW(cmd, in, out, per_proc);
++ break;
++
++#if defined(PDUMP)
++ case PVRSRV_BRIDGE_SGX_PDUMP_BUFFER_ARRAY:
++ err = SGXPDumpBufferArrayBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_SGX_PDUMP_3D_SIGNATURE_REGISTERS:
++ err = SGXPDump3DSignatureRegistersBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_SGX_PDUMP_COUNTER_REGISTERS:
++ err = SGXPDumpCounterRegistersBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_SGX_PDUMP_TA_SIGNATURE_REGISTERS:
++ err = SGXPDumpTASignatureRegistersBW(cmd, in, out, per_proc);
++ break;
++ case PVRSRV_BRIDGE_SGX_PDUMP_HWPERFCB:
++ err = SGXPDumpHWPerfCBBW(cmd, in, out, per_proc);
++ break;
++#endif
++
++ default:
++ PVR_DPF(PVR_DBG_ERROR, "%s: cmd = %d is out if range!",
++ __func__, cmd);
++ }
++
++ return err;
++}
++
++int BridgedDispatchKM(struct PVRSRV_PER_PROCESS_DATA *pd,
++ struct PVRSRV_BRIDGE_PACKAGE *pkg)
++{
++
++ void *in;
++ void *out;
++ u32 bid = pkg->ui32BridgeID;
++ int err = -EFAULT;
++ struct SYS_DATA *psSysData;
++
++#if defined(DEBUG_BRIDGE_KM)
++ g_BridgeDispatchTable[bid].ui32CallCount++;
++ g_BridgeGlobalStats.ui32IOCTLCount++;
++#endif
++ if (!pd->bInitProcess && bridged_check_cmd(bid))
++ goto return_fault;
++
++ if (SysAcquireData(&psSysData) != PVRSRV_OK)
++ goto return_fault;
++
++ in = ((struct ENV_DATA *)psSysData->pvEnvSpecificData)->pvBridgeData;
++ out = (void *)((u8 *)in + PVRSRV_MAX_BRIDGE_IN_SIZE);
++
++ if (pkg->ui32InBufferSize > 0 &&
++ CopyFromUserWrapper(pd, bid, in, pkg->pvParamIn,
++ pkg->ui32InBufferSize) != PVRSRV_OK)
++ goto return_fault;
++
++ if (bid >= (BRIDGE_DISPATCH_TABLE_ENTRY_COUNT)) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "%s: ui32BridgeID = %d is out if range!", __func__,
++ bid);
++ goto return_fault;
++ }
++
++ err = bridged_ioctl(bid, in, out, pd);
++
++ if (err < 0)
++ goto return_fault;
++
++ if (CopyToUserWrapper(pd, bid, pkg->pvParamOut, out,
++ pkg->ui32OutBufferSize) != PVRSRV_OK)
++ goto return_fault;
++
++ err = 0;
++return_fault:
++ ReleaseHandleBatch(pd);
++ return err;
++}
+diff --git a/drivers/gpu/pvr/bridged_pvr_bridge.h b/drivers/gpu/pvr/bridged_pvr_bridge.h
+new file mode 100644
+index 0000000..7249802
+--- /dev/null
++++ b/drivers/gpu/pvr/bridged_pvr_bridge.h
+@@ -0,0 +1,157 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __BRIDGED_PVR_BRIDGE_H__
++#define __BRIDGED_PVR_BRIDGE_H__
++
++#include "pvr_bridge.h"
++
++#define PVRSRV_GET_BRIDGE_ID(X) _IOC_NR(X)
++
++#if defined(DEBUG_BRIDGE_KM)
++enum PVRSRV_ERROR CopyFromUserWrapper(struct PVRSRV_PER_PROCESS_DATA *pProcData,
++ u32 ui32BridgeID, void *pvDest,
++ void __user *pvSrc, u32 ui32Size);
++enum PVRSRV_ERROR CopyToUserWrapper(struct PVRSRV_PER_PROCESS_DATA *pProcData,
++ u32 ui32BridgeID, void __user *pvDest,
++ void *pvSrc, u32 ui32Size);
++#else
++#define CopyFromUserWrapper(pProcData, ui32BridgeID, pvDest, pvSrc, ui32Size) \
++ OSCopyFromUser(pProcData, pvDest, pvSrc, ui32Size)
++#define CopyToUserWrapper(pProcData, ui32BridgeID, pvDest, pvSrc, ui32Size) \
++ OSCopyToUser(pProcData, pvDest, pvSrc, ui32Size)
++#endif
++
++#define ASSIGN_AND_RETURN_ON_ERROR(error, src, res) \
++ do { \
++ (error) = (src); \
++ if ((error) != PVRSRV_OK) \
++ return res; \
++ } while (error != PVRSRV_OK)
++
++#define ASSIGN_AND_EXIT_ON_ERROR(error, src) \
++ ASSIGN_AND_RETURN_ON_ERROR(error, src, 0)
++
++static inline enum PVRSRV_ERROR NewHandleBatch(
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc, u32 ui32BatchSize)
++{
++ enum PVRSRV_ERROR eError;
++
++ PVR_ASSERT(!psPerProc->bHandlesBatched);
++
++ eError = PVRSRVNewHandleBatch(psPerProc->psHandleBase, ui32BatchSize);
++
++ if (eError == PVRSRV_OK)
++ psPerProc->bHandlesBatched = IMG_TRUE;
++
++ return eError;
++}
++
++#define NEW_HANDLE_BATCH_OR_ERROR(error, psPerProc, ui32BatchSize) \
++ ASSIGN_AND_EXIT_ON_ERROR(error, NewHandleBatch(psPerProc, \
++ ui32BatchSize))
++
++static inline enum PVRSRV_ERROR
++CommitHandleBatch(struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVR_ASSERT(psPerProc->bHandlesBatched);
++
++ psPerProc->bHandlesBatched = IMG_FALSE;
++
++ return PVRSRVCommitHandleBatch(psPerProc->psHandleBase);
++}
++
++#define COMMIT_HANDLE_BATCH_OR_ERROR(error, psPerProc) \
++ ASSIGN_AND_EXIT_ON_ERROR(error, CommitHandleBatch(psPerProc))
++
++static inline void ReleaseHandleBatch(struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ if (psPerProc->bHandlesBatched) {
++ psPerProc->bHandlesBatched = IMG_FALSE;
++
++ PVRSRVReleaseHandleBatch(psPerProc->psHandleBase);
++ }
++}
++
++int DummyBW(u32 ui32BridgeID, void *psBridgeIn, void *psBridgeOut,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc);
++
++struct PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY {
++ int (*pfFunction)(u32 ui32BridgeID, void *psBridgeIn, void *psBridgeOut,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc);
++#if defined(DEBUG_BRIDGE_KM)
++ const char *pszIOCName;
++ const char *pszFunctionName;
++ u32 ui32CallCount;
++ u32 ui32CopyFromUserTotalBytes;
++ u32 ui32CopyToUserTotalBytes;
++#endif
++};
++
++#define BRIDGE_DISPATCH_TABLE_ENTRY_COUNT (PVRSRV_BRIDGE_LAST_SGX_CMD+1)
++#define PVRSRV_BRIDGE_LAST_DEVICE_CMD PVRSRV_BRIDGE_LAST_SGX_CMD
++
++extern struct PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY
++ g_BridgeDispatchTable[BRIDGE_DISPATCH_TABLE_ENTRY_COUNT];
++
++void _SetDispatchTableEntry(u32 ui32Index,
++ const char *pszIOCName,
++ int (*pfFunction) (u32 ui32BridgeID,
++ void *psBridgeIn,
++ void *psBridgeOut,
++ struct PVRSRV_PER_PROCESS_DATA *
++ psPerProc),
++ const char *pszFunctionName);
++
++#define SetDispatchTableEntry(ui32Index, pfFunction) \
++ _SetDispatchTableEntry(PVRSRV_GET_BRIDGE_ID(ui32Index), #ui32Index, \
++ (int (*)(u32 ui32BridgeID, void *psBridgeIn, void *psBridgeOut, \
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc))pfFunction, #pfFunction)
++
++#define DISPATCH_TABLE_GAP_THRESHOLD 5
++
++#if defined(DEBUG)
++#define PVRSRV_BRIDGE_ASSERT_CMD(X, Y) PVR_ASSERT(X == PVRSRV_GET_BRIDGE_ID(Y))
++#else
++#define PVRSRV_BRIDGE_ASSERT_CMD(X, Y) PVR_UNREFERENCED_PARAMETER(X)
++#endif
++
++#if defined(DEBUG_BRIDGE_KM)
++struct PVRSRV_BRIDGE_GLOBAL_STATS {
++ u32 ui32IOCTLCount;
++ u32 ui32TotalCopyFromUserBytes;
++ u32 ui32TotalCopyToUserBytes;
++};
++
++extern struct PVRSRV_BRIDGE_GLOBAL_STATS g_BridgeGlobalStats;
++#endif
++
++enum PVRSRV_ERROR CommonBridgeInit(void);
++
++int BridgedDispatchKM(struct PVRSRV_PER_PROCESS_DATA *psPerProc,
++ struct PVRSRV_BRIDGE_PACKAGE *psBridgePackageKM);
++
++#endif
+diff --git a/drivers/gpu/pvr/bridged_sgx_bridge.c b/drivers/gpu/pvr/bridged_sgx_bridge.c
+new file mode 100644
+index 0000000..adbd436
+--- /dev/null
++++ b/drivers/gpu/pvr/bridged_sgx_bridge.c
+@@ -0,0 +1,1813 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <linux/errno.h>
++
++#include <stddef.h>
++
++#include "img_defs.h"
++
++#include "services.h"
++#include "pvr_debug.h"
++#include "pvr_bridge.h"
++#include "sgx_bridge.h"
++#include "perproc.h"
++#include "power.h"
++#include "pvr_bridge_km.h"
++#include "sgx_bridge_km.h"
++#include "bridged_pvr_bridge.h"
++#include "bridged_sgx_bridge.h"
++#include "sgxutils.h"
++#include "pdump_km.h"
++
++int SGXGetClientInfoBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_GETCLIENTINFO *psGetClientInfoIN,
++ struct PVRSRV_BRIDGE_OUT_GETCLIENTINFO *psGetClientInfoOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void *hDevCookieInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_GETCLIENTINFO);
++
++ psGetClientInfoOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psGetClientInfoIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if (psGetClientInfoOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psGetClientInfoOUT->eError =
++ SGXGetClientInfoKM(hDevCookieInt, &psGetClientInfoOUT->sClientInfo);
++ return 0;
++}
++
++int SGXReleaseClientInfoBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_RELEASECLIENTINFO *psReleaseClientInfoIN,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ struct PVRSRV_SGXDEV_INFO *psDevInfo;
++ void *hDevCookieInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_SGX_RELEASECLIENTINFO);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psReleaseClientInfoIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psDevInfo =
++ (struct PVRSRV_SGXDEV_INFO *)((struct PVRSRV_DEVICE_NODE *)
++ hDevCookieInt)->pvDevice;
++
++ PVR_ASSERT(psDevInfo->ui32ClientRefCount > 0);
++
++ psDevInfo->ui32ClientRefCount--;
++
++ psRetOUT->eError = PVRSRV_OK;
++
++ return 0;
++}
++
++int SGXGetInternalDevInfoBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_GETINTERNALDEVINFO *psSGXGetInternalDevInfoIN,
++ struct PVRSRV_BRIDGE_OUT_GETINTERNALDEVINFO *psSGXGetInternalDevInfoOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void *hDevCookieInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_SGX_GETINTERNALDEVINFO);
++
++ psSGXGetInternalDevInfoOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psSGXGetInternalDevInfoIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if (psSGXGetInternalDevInfoOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psSGXGetInternalDevInfoOUT->eError =
++ SGXGetInternalDevInfoKM(hDevCookieInt,
++ &psSGXGetInternalDevInfoOUT->sSGXInternalDevInfo);
++
++ psSGXGetInternalDevInfoOUT->eError =
++ PVRSRVAllocHandle(psPerProc->psHandleBase,
++ &psSGXGetInternalDevInfoOUT->sSGXInternalDevInfo.
++ hHostCtlKernelMemInfoHandle,
++ psSGXGetInternalDevInfoOUT->sSGXInternalDevInfo.
++ hHostCtlKernelMemInfoHandle,
++ PVRSRV_HANDLE_TYPE_MEM_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
++
++ return 0;
++}
++
++int SGXDoKickBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_DOKICK *psDoKickIN,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void *hDevCookieInt;
++ u32 i;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_DOKICK);
++
++ psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psDoKickIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.hCCBKernelMemInfo,
++ psDoKickIN->sCCBKick.hCCBKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++
++ if (psDoKickIN->sCCBKick.hTA3DSyncInfo != NULL) {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.hTA3DSyncInfo,
++ psDoKickIN->sCCBKick.hTA3DSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++ }
++
++ if (psDoKickIN->sCCBKick.hTASyncInfo != NULL) {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.hTASyncInfo,
++ psDoKickIN->sCCBKick.hTASyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++ }
++
++ if (psDoKickIN->sCCBKick.h3DSyncInfo != NULL) {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.h3DSyncInfo,
++ psDoKickIN->sCCBKick.h3DSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++ }
++
++ if (psDoKickIN->sCCBKick.ui32NumSrcSyncs > SGX_MAX_SRC_SYNCS) {
++ psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++ return 0;
++ }
++ for (i = 0; i < psDoKickIN->sCCBKick.ui32NumSrcSyncs; i++) {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.
++ ahSrcKernelSyncInfo[i],
++ psDoKickIN->sCCBKick.
++ ahSrcKernelSyncInfo[i],
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++ }
++
++ if (psDoKickIN->sCCBKick.ui32NumTAStatusVals > SGX_MAX_TA_STATUS_VALS) {
++ psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++ return 0;
++ }
++ for (i = 0; i < psDoKickIN->sCCBKick.ui32NumTAStatusVals; i++) {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.ahTAStatusSyncInfo[i],
++ psDoKickIN->sCCBKick.ahTAStatusSyncInfo[i],
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++ }
++
++ if (psDoKickIN->sCCBKick.ui32Num3DStatusVals > SGX_MAX_3D_STATUS_VALS) {
++ psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++ return 0;
++ }
++ for (i = 0; i < psDoKickIN->sCCBKick.ui32Num3DStatusVals; i++) {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.ah3DStatusSyncInfo[i],
++ psDoKickIN->sCCBKick.ah3DStatusSyncInfo[i],
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++ }
++
++ if (psDoKickIN->sCCBKick.ui32NumDstSyncObjects > 0) {
++ psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.
++ hKernelHWSyncListMemInfo,
++ psDoKickIN->sCCBKick.
++ hKernelHWSyncListMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.sDstSyncHandle,
++ psDoKickIN->sCCBKick.sDstSyncHandle,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++ }
++
++ psRetOUT->eError = SGXDoKickKM(hDevCookieInt, &psDoKickIN->sCCBKick);
++
++ return 0;
++}
++
++int SGXScheduleProcessQueuesBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_SGX_SCHEDULE_PROCESS_QUEUES *psScheduleProcQIN,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void *hDevCookieInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_SGX_SCHEDULE_PROCESS_QUEUES);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psScheduleProcQIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psRetOUT->eError = SGXScheduleProcessQueuesKM(hDevCookieInt);
++
++ return 0;
++}
++
++int SGXSubmitTransferBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_SUBMITTRANSFER *psSubmitTransferIN,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void *hDevCookieInt;
++ struct PVRSRV_TRANSFER_SGX_KICK *psKick;
++ u32 i;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_SGX_SUBMITTRANSFER);
++ PVR_UNREFERENCED_PARAMETER(ui32BridgeID);
++
++ psKick = &psSubmitTransferIN->sKick;
++
++ psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psSubmitTransferIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psKick->hCCBMemInfo,
++ psKick->hCCBMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++
++ if (psKick->hTASyncInfo != NULL) {
++ psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psKick->hTASyncInfo,
++ psKick->hTASyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++ }
++
++ if (psKick->h3DSyncInfo != NULL) {
++ psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psKick->h3DSyncInfo,
++ psKick->h3DSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++ }
++
++ if (psKick->ui32NumSrcSync > SGX_MAX_TRANSFER_SYNC_OPS) {
++ psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++ return 0;
++ }
++ for (i = 0; i < psKick->ui32NumSrcSync; i++) {
++ psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psKick->ahSrcSyncInfo[i],
++ psKick->ahSrcSyncInfo[i],
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++ }
++
++ if (psKick->ui32NumDstSync > SGX_MAX_TRANSFER_SYNC_OPS) {
++ psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++ return 0;
++ }
++ for (i = 0; i < psKick->ui32NumDstSync; i++) {
++ psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psKick->ahDstSyncInfo[i],
++ psKick->ahDstSyncInfo[i],
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++ }
++
++ psRetOUT->eError = SGXSubmitTransferKM(hDevCookieInt, psKick);
++
++ return 0;
++}
++
++int SGXGetMiscInfoBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_SGXGETMISCINFO *psSGXGetMiscInfoIN,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void *hDevCookieInt;
++ struct PVRSRV_SGXDEV_INFO *psDevInfo;
++ struct SGX_MISC_INFO sMiscInfo;
++ struct PVRSRV_DEVICE_NODE *psDeviceNode;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_GETMISCINFO);
++
++ psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psSGXGetMiscInfoIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psDeviceNode = hDevCookieInt;
++ PVR_ASSERT(psDeviceNode != NULL);
++ if (psDeviceNode == NULL)
++ return -EFAULT;
++
++ psDevInfo = psDeviceNode->pvDevice;
++
++ psRetOUT->eError = CopyFromUserWrapper(psPerProc, ui32BridgeID,
++ &sMiscInfo,
++ psSGXGetMiscInfoIN->psMiscInfo,
++ sizeof(struct SGX_MISC_INFO));
++ if (psRetOUT->eError != PVRSRV_OK)
++ return -EFAULT;
++
++ if (sMiscInfo.eRequest == SGX_MISC_INFO_REQUEST_HWPERF_RETRIEVE_CB) {
++ void *pAllocated;
++ void *hAllocatedHandle;
++ void __user *psTmpUserData;
++ u32 allocatedSize;
++
++ allocatedSize =
++ (u32) (sMiscInfo.uData.sRetrieveCB.ui32ArraySize *
++ sizeof(struct PVRSRV_SGX_HWPERF_CBDATA));
++
++ ASSIGN_AND_EXIT_ON_ERROR(psRetOUT->eError,
++ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ allocatedSize,
++ &pAllocated,
++ &hAllocatedHandle));
++
++ psTmpUserData = (void __force __user *)
++ sMiscInfo.uData.sRetrieveCB.psHWPerfData;
++ sMiscInfo.uData.sRetrieveCB.psHWPerfData = pAllocated;
++
++ psRetOUT->eError = SGXGetMiscInfoKM(psDevInfo,
++ &sMiscInfo, psDeviceNode);
++ if (psRetOUT->eError != PVRSRV_OK) {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ allocatedSize, pAllocated, hAllocatedHandle);
++ return 0;
++ }
++
++ psRetOUT->eError = CopyToUserWrapper(psPerProc,
++ ui32BridgeID, psTmpUserData,
++ sMiscInfo.uData.sRetrieveCB.psHWPerfData,
++ allocatedSize);
++
++ sMiscInfo.uData.sRetrieveCB.psHWPerfData =
++ (void __force *)psTmpUserData;
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ allocatedSize, pAllocated, hAllocatedHandle);
++
++ if (psRetOUT->eError != PVRSRV_OK)
++ return -EFAULT;
++ } else {
++ psRetOUT->eError = SGXGetMiscInfoKM(psDevInfo,
++ &sMiscInfo, psDeviceNode);
++
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++ }
++
++ psRetOUT->eError = CopyToUserWrapper(psPerProc,
++ ui32BridgeID,
++ psSGXGetMiscInfoIN->psMiscInfo,
++ &sMiscInfo,
++ sizeof(struct SGX_MISC_INFO));
++ if (psRetOUT->eError != PVRSRV_OK)
++ return -EFAULT;
++ return 0;
++}
++
++int SGXReadDiffCountersBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_SGX_READ_DIFF_COUNTERS *psSGXReadDiffCountersIN,
++ struct PVRSRV_BRIDGE_OUT_SGX_READ_DIFF_COUNTERS
++ *psSGXReadDiffCountersOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void *hDevCookieInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_SGX_READ_DIFF_COUNTERS);
++
++ psSGXReadDiffCountersOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psSGXReadDiffCountersIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if (psSGXReadDiffCountersOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psSGXReadDiffCountersOUT->eError = SGXReadDiffCountersKM(
++ hDevCookieInt,
++ psSGXReadDiffCountersIN->ui32Reg,
++ &psSGXReadDiffCountersOUT->ui32Old,
++ psSGXReadDiffCountersIN->bNew,
++ psSGXReadDiffCountersIN->ui32New,
++ psSGXReadDiffCountersIN->ui32NewReset,
++ psSGXReadDiffCountersIN->ui32CountersReg,
++ &psSGXReadDiffCountersOUT->ui32Time,
++ &psSGXReadDiffCountersOUT->bActive,
++ &psSGXReadDiffCountersOUT->sDiffs);
++
++ return 0;
++}
++
++int SGXReadHWPerfCBBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_SGX_READ_HWPERF_CB *psSGXReadHWPerfCBIN,
++ struct PVRSRV_BRIDGE_OUT_SGX_READ_HWPERF_CB *psSGXReadHWPerfCBOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void *hDevCookieInt;
++ struct PVRSRV_SGX_HWPERF_CB_ENTRY *psAllocated;
++ void *hAllocatedHandle;
++ u32 ui32AllocatedSize;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_SGX_READ_HWPERF_CB);
++
++ psSGXReadHWPerfCBOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psSGXReadHWPerfCBIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if (psSGXReadHWPerfCBOUT->eError != PVRSRV_OK)
++ return 0;
++
++ ui32AllocatedSize = psSGXReadHWPerfCBIN->ui32ArraySize *
++ sizeof(psSGXReadHWPerfCBIN->psHWPerfCBData[0]);
++ ASSIGN_AND_EXIT_ON_ERROR(psSGXReadHWPerfCBOUT->eError,
++ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32AllocatedSize,
++ (void **)&psAllocated,
++ &hAllocatedHandle));
++
++ psSGXReadHWPerfCBOUT->eError = SGXReadHWPerfCBKM(hDevCookieInt,
++ psSGXReadHWPerfCBIN->ui32ArraySize,
++ psAllocated,
++ &psSGXReadHWPerfCBOUT->ui32DataCount,
++ &psSGXReadHWPerfCBOUT->ui32ClockSpeed,
++ &psSGXReadHWPerfCBOUT->ui32HostTimeStamp);
++ if (psSGXReadHWPerfCBOUT->eError == PVRSRV_OK)
++ psSGXReadHWPerfCBOUT->eError = CopyToUserWrapper(
++ psPerProc, ui32BridgeID,
++ psSGXReadHWPerfCBIN->psHWPerfCBData,
++ psAllocated, ui32AllocatedSize);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32AllocatedSize, psAllocated, hAllocatedHandle);
++
++ return 0;
++}
++
++int SGXDevInitPart2BW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_SGXDEVINITPART2 *psSGXDevInitPart2IN,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void *hDevCookieInt;
++ enum PVRSRV_ERROR eError;
++ IMG_BOOL bDissociateFailed = IMG_FALSE;
++ IMG_BOOL bLookupFailed = IMG_FALSE;
++ IMG_BOOL bReleaseFailed = IMG_FALSE;
++ void *hDummy;
++ u32 i;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_DEVINITPART2);
++
++ if (!psPerProc->bInitProcess) {
++ psRetOUT->eError = PVRSRV_ERROR_GENERIC;
++ return 0;
++ }
++
++ psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psSGXDevInitPart2IN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase, &hDummy,
++ psSGXDevInitPart2IN->sInitInfo.
++ hKernelCCBMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bLookupFailed |= (IMG_BOOL) (eError != PVRSRV_OK);
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase, &hDummy,
++ psSGXDevInitPart2IN->sInitInfo.
++ hKernelCCBCtlMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bLookupFailed |= (IMG_BOOL) (eError != PVRSRV_OK);
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase, &hDummy,
++ psSGXDevInitPart2IN->sInitInfo.
++ hKernelCCBEventKickerMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bLookupFailed |= (IMG_BOOL) (eError != PVRSRV_OK);
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase, &hDummy,
++ psSGXDevInitPart2IN->sInitInfo.
++ hKernelSGXHostCtlMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bLookupFailed |= (IMG_BOOL) (eError != PVRSRV_OK);
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase, &hDummy,
++ psSGXDevInitPart2IN->sInitInfo.
++ hKernelSGXTA3DCtlMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bLookupFailed |= (IMG_BOOL) (eError != PVRSRV_OK);
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase, &hDummy,
++ psSGXDevInitPart2IN->sInitInfo.
++ hKernelSGXMiscMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bLookupFailed |= (IMG_BOOL) (eError != PVRSRV_OK);
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase, &hDummy,
++ psSGXDevInitPart2IN->sInitInfo.
++ hKernelHWPerfCBMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bLookupFailed |= (IMG_BOOL) (eError != PVRSRV_OK);
++
++#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG)
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase, &hDummy,
++ psSGXDevInitPart2IN->sInitInfo.
++ hKernelEDMStatusBufferMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bLookupFailed |= (IMG_BOOL) (eError != PVRSRV_OK);
++#endif
++
++ for (i = 0; i < SGX_MAX_INIT_MEM_HANDLES; i++) {
++ void *hHandle =
++ psSGXDevInitPart2IN->sInitInfo.asInitMemHandles[i];
++
++ if (hHandle == NULL)
++ continue;
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase, &hDummy,
++ hHandle,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bLookupFailed |= (IMG_BOOL) (eError != PVRSRV_OK);
++ }
++
++ if (bLookupFailed) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "DevInitSGXPart2BW: A handle lookup failed");
++ psRetOUT->eError = PVRSRV_ERROR_GENERIC;
++ return 0;
++ }
++
++ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ &psSGXDevInitPart2IN->sInitInfo.
++ hKernelCCBMemInfo,
++ psSGXDevInitPart2IN->sInitInfo.
++ hKernelCCBMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bReleaseFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ &psSGXDevInitPart2IN->sInitInfo.
++ hKernelCCBCtlMemInfo,
++ psSGXDevInitPart2IN->sInitInfo.
++ hKernelCCBCtlMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bReleaseFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ &psSGXDevInitPart2IN->sInitInfo.
++ hKernelCCBEventKickerMemInfo,
++ psSGXDevInitPart2IN->sInitInfo.
++ hKernelCCBEventKickerMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bReleaseFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ &psSGXDevInitPart2IN->sInitInfo.
++ hKernelSGXHostCtlMemInfo,
++ psSGXDevInitPart2IN->sInitInfo.
++ hKernelSGXHostCtlMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bReleaseFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ &psSGXDevInitPart2IN->sInitInfo.
++ hKernelSGXTA3DCtlMemInfo,
++ psSGXDevInitPart2IN->sInitInfo.
++ hKernelSGXTA3DCtlMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bReleaseFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ &psSGXDevInitPart2IN->sInitInfo.
++ hKernelSGXMiscMemInfo,
++ psSGXDevInitPart2IN->sInitInfo.
++ hKernelSGXMiscMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bReleaseFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ &psSGXDevInitPart2IN->sInitInfo.
++ hKernelHWPerfCBMemInfo,
++ psSGXDevInitPart2IN->sInitInfo.
++ hKernelHWPerfCBMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bReleaseFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG)
++ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ &psSGXDevInitPart2IN->sInitInfo.
++ hKernelEDMStatusBufferMemInfo,
++ psSGXDevInitPart2IN->sInitInfo.
++ hKernelEDMStatusBufferMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bReleaseFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++#endif
++
++ for (i = 0; i < SGX_MAX_INIT_MEM_HANDLES; i++) {
++ void **phHandle =
++ &psSGXDevInitPart2IN->sInitInfo.asInitMemHandles[i];
++
++ if (*phHandle == NULL)
++ continue;
++
++ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ phHandle, *phHandle,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bReleaseFailed |= (IMG_BOOL) (eError != PVRSRV_OK);
++ }
++
++ if (bReleaseFailed) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "DevInitSGXPart2BW: A handle release failed");
++ psRetOUT->eError = PVRSRV_ERROR_GENERIC;
++
++ PVR_DBG_BREAK;
++ return 0;
++ }
++
++ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt,
++ psSGXDevInitPart2IN->sInitInfo.
++ hKernelCCBMemInfo);
++ bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt,
++ psSGXDevInitPart2IN->sInitInfo.
++ hKernelCCBCtlMemInfo);
++ bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt,
++ psSGXDevInitPart2IN->sInitInfo.
++ hKernelCCBEventKickerMemInfo);
++ bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt,
++ psSGXDevInitPart2IN->sInitInfo.
++ hKernelSGXHostCtlMemInfo);
++ bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt,
++ psSGXDevInitPart2IN->sInitInfo.
++ hKernelSGXTA3DCtlMemInfo);
++ bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt,
++ psSGXDevInitPart2IN->sInitInfo.
++ hKernelSGXMiscMemInfo);
++ bDissociateFailed |= (IMG_BOOL) (eError != PVRSRV_OK);
++
++ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt,
++ psSGXDevInitPart2IN->sInitInfo.
++ hKernelHWPerfCBMemInfo);
++ bDissociateFailed |= (IMG_BOOL) (eError != PVRSRV_OK);
++
++#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG)
++ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt,
++ psSGXDevInitPart2IN->sInitInfo.
++ hKernelEDMStatusBufferMemInfo);
++ bDissociateFailed |= (IMG_BOOL) (eError != PVRSRV_OK);
++#endif
++
++ for (i = 0; i < SGX_MAX_INIT_MEM_HANDLES; i++) {
++ void *hHandle =
++ psSGXDevInitPart2IN->sInitInfo.asInitMemHandles[i];
++
++ if (hHandle == NULL)
++ continue;
++
++ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, hHandle);
++ bDissociateFailed |= (IMG_BOOL) (eError != PVRSRV_OK);
++ }
++
++ if (bDissociateFailed) {
++ PVRSRVFreeDeviceMemKM(hDevCookieInt,
++ psSGXDevInitPart2IN->sInitInfo.
++ hKernelCCBMemInfo);
++ PVRSRVFreeDeviceMemKM(hDevCookieInt,
++ psSGXDevInitPart2IN->sInitInfo.
++ hKernelCCBCtlMemInfo);
++ PVRSRVFreeDeviceMemKM(hDevCookieInt,
++ psSGXDevInitPart2IN->sInitInfo.
++ hKernelSGXHostCtlMemInfo);
++ PVRSRVFreeDeviceMemKM(hDevCookieInt,
++ psSGXDevInitPart2IN->sInitInfo.
++ hKernelSGXTA3DCtlMemInfo);
++ PVRSRVFreeDeviceMemKM(hDevCookieInt,
++ psSGXDevInitPart2IN->sInitInfo.
++ hKernelSGXMiscMemInfo);
++
++ for (i = 0; i < SGX_MAX_INIT_MEM_HANDLES; i++) {
++ void *hHandle =
++ psSGXDevInitPart2IN->sInitInfo.asInitMemHandles[i];
++
++ if (hHandle == NULL)
++ continue;
++
++ PVRSRVFreeDeviceMemKM(hDevCookieInt,
++ (struct PVRSRV_KERNEL_MEM_INFO *)
++ hHandle);
++
++ }
++
++ PVR_DPF(PVR_DBG_ERROR,
++ "DevInitSGXPart2BW: A dissociate failed");
++
++ psRetOUT->eError = PVRSRV_ERROR_GENERIC;
++
++ PVR_DBG_BREAK;
++ return 0;
++ }
++
++ psRetOUT->eError = DevInitSGXPart2KM(psPerProc, hDevCookieInt,
++ &psSGXDevInitPart2IN->sInitInfo);
++
++ return 0;
++}
++
++int SGXRegisterHWRenderContextBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_RENDER_CONTEXT
++ *psSGXRegHWRenderContextIN,
++ struct PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_RENDER_CONTEXT
++ *psSGXRegHWRenderContextOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void *hDevCookieInt;
++ void *hHWRenderContextInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_SGX_REGISTER_HW_RENDER_CONTEXT);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psSGXRegHWRenderContextOUT->eError, psPerProc,
++ 1);
++
++ psSGXRegHWRenderContextOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psSGXRegHWRenderContextIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if (psSGXRegHWRenderContextOUT->eError != PVRSRV_OK)
++ return 0;
++
++ hHWRenderContextInt =
++ SGXRegisterHWRenderContextKM(hDevCookieInt,
++ &psSGXRegHWRenderContextIN->sHWRenderContextDevVAddr,
++ psPerProc);
++
++ if (hHWRenderContextInt == NULL) {
++ psSGXRegHWRenderContextOUT->eError = PVRSRV_ERROR_GENERIC;
++ return 0;
++ }
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psSGXRegHWRenderContextOUT->hHWRenderContext,
++ hHWRenderContextInt,
++ PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psSGXRegHWRenderContextOUT->eError,
++ psPerProc);
++
++ return 0;
++}
++
++int SGXUnregisterHWRenderContextBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_RENDER_CONTEXT
++ *psSGXUnregHWRenderContextIN,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void *hHWRenderContextInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_SGX_UNREGISTER_HW_RENDER_CONTEXT);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hHWRenderContextInt,
++ psSGXUnregHWRenderContextIN->hHWRenderContext,
++ PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT);
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psRetOUT->eError = SGXUnregisterHWRenderContextKM(hHWRenderContextInt);
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psRetOUT->eError =
++ PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psSGXUnregHWRenderContextIN->hHWRenderContext,
++ PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT);
++
++ return 0;
++}
++
++int SGXRegisterHWTransferContextBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_TRANSFER_CONTEXT
++ *psSGXRegHWTransferContextIN,
++ struct PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_TRANSFER_CONTEXT
++ *psSGXRegHWTransferContextOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void *hDevCookieInt;
++ void *hHWTransferContextInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_SGX_REGISTER_HW_TRANSFER_CONTEXT);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psSGXRegHWTransferContextOUT->eError,
++ psPerProc, 1);
++
++ psSGXRegHWTransferContextOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psSGXRegHWTransferContextIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if (psSGXRegHWTransferContextOUT->eError != PVRSRV_OK)
++ return 0;
++
++ hHWTransferContextInt =
++ SGXRegisterHWTransferContextKM(hDevCookieInt,
++ &psSGXRegHWTransferContextIN->
++ sHWTransferContextDevVAddr,
++ psPerProc);
++
++ if (hHWTransferContextInt == NULL) {
++ psSGXRegHWTransferContextOUT->eError = PVRSRV_ERROR_GENERIC;
++ return 0;
++ }
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psSGXRegHWTransferContextOUT->hHWTransferContext,
++ hHWTransferContextInt,
++ PVRSRV_HANDLE_TYPE_SGX_HW_TRANSFER_CONTEXT,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psSGXRegHWTransferContextOUT->eError,
++ psPerProc);
++
++ return 0;
++}
++
++int SGXUnregisterHWTransferContextBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_TRANSFER_CONTEXT
++ *psSGXUnregHWTransferContextIN,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void *hHWTransferContextInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_SGX_UNREGISTER_HW_TRANSFER_CONTEXT);
++
++ psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hHWTransferContextInt,
++ psSGXUnregHWTransferContextIN->
++ hHWTransferContext,
++ PVRSRV_HANDLE_TYPE_SGX_HW_TRANSFER_CONTEXT);
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psRetOUT->eError =
++ SGXUnregisterHWTransferContextKM(hHWTransferContextInt);
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psRetOUT->eError =
++ PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psSGXUnregHWTransferContextIN->
++ hHWTransferContext,
++ PVRSRV_HANDLE_TYPE_SGX_HW_TRANSFER_CONTEXT);
++
++ return 0;
++}
++
++int SGXFlushHWRenderTargetBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_SGX_FLUSH_HW_RENDER_TARGET
++ *psSGXFlushHWRenderTargetIN,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void *hDevCookieInt;
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_SGX_FLUSH_HW_RENDER_TARGET);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psSGXFlushHWRenderTargetIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++
++ SGXFlushHWRenderTargetKM(hDevCookieInt,
++ psSGXFlushHWRenderTargetIN->sHWRTDataSetDevVAddr);
++
++ return 0;
++}
++
++int SGX2DQueryBlitsCompleteBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_2DQUERYBLTSCOMPLETE *ps2DQueryBltsCompleteIN,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void *hDevCookieInt;
++ void *pvSyncInfo;
++ struct PVRSRV_SGXDEV_INFO *psDevInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_SGX_2DQUERYBLTSCOMPLETE);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ ps2DQueryBltsCompleteIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &pvSyncInfo,
++ ps2DQueryBltsCompleteIN->hKernSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psDevInfo =
++ (struct PVRSRV_SGXDEV_INFO *)((struct PVRSRV_DEVICE_NODE *)
++ hDevCookieInt)->pvDevice;
++
++ psRetOUT->eError =
++ SGX2DQueryBlitsCompleteKM(psDevInfo,
++ (struct PVRSRV_KERNEL_SYNC_INFO *)
++ pvSyncInfo,
++ ps2DQueryBltsCompleteIN->
++ bWaitForComplete);
++
++ return 0;
++}
++
++int SGXFindSharedPBDescBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_SGXFINDSHAREDPBDESC *psSGXFindSharedPBDescIN,
++ struct PVRSRV_BRIDGE_OUT_SGXFINDSHAREDPBDESC *psSGXFindSharedPBDescOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void *hDevCookieInt;
++ struct PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo;
++ struct PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo;
++ struct PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo;
++ struct PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescSubKernelMemInfos = NULL;
++ u32 ui32SharedPBDescSubKernelMemInfosCount = 0;
++ u32 i;
++ void *hSharedPBDesc = NULL;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psSGXFindSharedPBDescOUT->eError, psPerProc,
++ PVRSRV_BRIDGE_SGX_SHAREDPBDESC_MAX_SUBMEMINFOS
++ + 4);
++
++ psSGXFindSharedPBDescOUT->hSharedPBDesc = NULL;
++
++ psSGXFindSharedPBDescOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psSGXFindSharedPBDescIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if (psSGXFindSharedPBDescOUT->eError != PVRSRV_OK)
++ goto PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC_EXIT;
++
++ psSGXFindSharedPBDescOUT->eError =
++ SGXFindSharedPBDescKM(psPerProc, hDevCookieInt,
++ psSGXFindSharedPBDescIN->bLockOnFailure,
++ psSGXFindSharedPBDescIN->ui32TotalPBSize,
++ &hSharedPBDesc,
++ &psSharedPBDescKernelMemInfo,
++ &psHWPBDescKernelMemInfo,
++ &psBlockKernelMemInfo,
++ &ppsSharedPBDescSubKernelMemInfos,
++ &ui32SharedPBDescSubKernelMemInfosCount);
++ if (psSGXFindSharedPBDescOUT->eError != PVRSRV_OK)
++ goto PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC_EXIT;
++
++ PVR_ASSERT(ui32SharedPBDescSubKernelMemInfosCount <=
++ PVRSRV_BRIDGE_SGX_SHAREDPBDESC_MAX_SUBMEMINFOS);
++
++ psSGXFindSharedPBDescOUT->ui32SharedPBDescSubKernelMemInfoHandlesCount =
++ ui32SharedPBDescSubKernelMemInfosCount;
++
++ if (hSharedPBDesc == NULL) {
++ psSGXFindSharedPBDescOUT->hSharedPBDescKernelMemInfoHandle =
++ NULL;
++
++ goto PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC_EXIT;
++ }
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psSGXFindSharedPBDescOUT->hSharedPBDesc,
++ hSharedPBDesc,
++ PVRSRV_HANDLE_TYPE_SHARED_PB_DESC,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psSGXFindSharedPBDescOUT->
++ hSharedPBDescKernelMemInfoHandle,
++ psSharedPBDescKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
++ psSGXFindSharedPBDescOUT->hSharedPBDesc);
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psSGXFindSharedPBDescOUT->
++ hHWPBDescKernelMemInfoHandle,
++ psHWPBDescKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
++ psSGXFindSharedPBDescOUT->hSharedPBDesc);
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psSGXFindSharedPBDescOUT->
++ hBlockKernelMemInfoHandle,
++ psBlockKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
++ psSGXFindSharedPBDescOUT->hSharedPBDesc);
++
++ for (i = 0; i < ui32SharedPBDescSubKernelMemInfosCount; i++) {
++ struct PVRSRV_BRIDGE_OUT_SGXFINDSHAREDPBDESC
++ *psSGXFindSharedPBDescOut = psSGXFindSharedPBDescOUT;
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psSGXFindSharedPBDescOut->
++ ahSharedPBDescSubKernelMemInfoHandles[i],
++ ppsSharedPBDescSubKernelMemInfos[i],
++ PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
++ psSGXFindSharedPBDescOUT->
++ hSharedPBDescKernelMemInfoHandle);
++ }
++
++PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC_EXIT:
++ if (ppsSharedPBDescSubKernelMemInfos != NULL)
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(struct PVRSRV_KERNEL_MEM_INFO *) *
++ ui32SharedPBDescSubKernelMemInfosCount,
++ ppsSharedPBDescSubKernelMemInfos, NULL);
++
++ if (psSGXFindSharedPBDescOUT->eError != PVRSRV_OK) {
++ if (hSharedPBDesc != NULL)
++ SGXUnrefSharedPBDescKM(hSharedPBDesc);
++ } else
++ COMMIT_HANDLE_BATCH_OR_ERROR(psSGXFindSharedPBDescOUT->eError,
++ psPerProc);
++
++ return 0;
++}
++
++int SGXUnrefSharedPBDescBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_SGXUNREFSHAREDPBDESC *psSGXUnrefSharedPBDescIN,
++ struct PVRSRV_BRIDGE_OUT_SGXUNREFSHAREDPBDESC
++ *psSGXUnrefSharedPBDescOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void *hSharedPBDesc;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_SGX_UNREFSHAREDPBDESC);
++
++ psSGXUnrefSharedPBDescOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hSharedPBDesc,
++ psSGXUnrefSharedPBDescIN->hSharedPBDesc,
++ PVRSRV_HANDLE_TYPE_SHARED_PB_DESC);
++ if (psSGXUnrefSharedPBDescOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psSGXUnrefSharedPBDescOUT->eError =
++ SGXUnrefSharedPBDescKM(hSharedPBDesc);
++
++ if (psSGXUnrefSharedPBDescOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psSGXUnrefSharedPBDescOUT->eError =
++ PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psSGXUnrefSharedPBDescIN->hSharedPBDesc,
++ PVRSRV_HANDLE_TYPE_SHARED_PB_DESC);
++
++ return 0;
++}
++
++int SGXAddSharedPBDescBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_SGXADDSHAREDPBDESC *psSGXAddSharedPBDescIN,
++ struct PVRSRV_BRIDGE_OUT_SGXADDSHAREDPBDESC *psSGXAddSharedPBDescOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void *hDevCookieInt;
++ struct PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo;
++ struct PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo;
++ struct PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo;
++ u32 ui32KernelMemInfoHandlesCount =
++ psSGXAddSharedPBDescIN->ui32KernelMemInfoHandlesCount;
++ int ret = 0;
++ void **phKernelMemInfoHandles = NULL;
++ struct PVRSRV_KERNEL_MEM_INFO **ppsKernelMemInfos = NULL;
++ u32 i;
++ enum PVRSRV_ERROR eError;
++ void *hSharedPBDesc = NULL;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psSGXAddSharedPBDescOUT->eError, psPerProc,
++ 1);
++
++ psSGXAddSharedPBDescOUT->hSharedPBDesc = NULL;
++
++ PVR_ASSERT(ui32KernelMemInfoHandlesCount <=
++ PVRSRV_BRIDGE_SGX_SHAREDPBDESC_MAX_SUBMEMINFOS);
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psSGXAddSharedPBDescIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if (eError != PVRSRV_OK)
++ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ (void **)&psSharedPBDescKernelMemInfo,
++ psSGXAddSharedPBDescIN->
++ hSharedPBDescKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO);
++ if (eError != PVRSRV_OK)
++ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ (void **)&psHWPBDescKernelMemInfo,
++ psSGXAddSharedPBDescIN->
++ hHWPBDescKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if (eError != PVRSRV_OK)
++ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ (void **)&psBlockKernelMemInfo,
++ psSGXAddSharedPBDescIN->hBlockKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO);
++ if (eError != PVRSRV_OK)
++ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++
++ if (!OSAccessOK(PVR_VERIFY_READ,
++ psSGXAddSharedPBDescIN->phKernelMemInfoHandles,
++ ui32KernelMemInfoHandlesCount * sizeof(void *))) {
++ PVR_DPF(PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC:"
++ " Invalid phKernelMemInfos pointer", __func__);
++ ret = -EFAULT;
++ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++ }
++
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32KernelMemInfoHandlesCount * sizeof(void *),
++ (void **)&phKernelMemInfoHandles, NULL);
++ if (eError != PVRSRV_OK)
++ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++
++ if (CopyFromUserWrapper(psPerProc,
++ ui32BridgeID,
++ phKernelMemInfoHandles,
++ psSGXAddSharedPBDescIN->phKernelMemInfoHandles,
++ ui32KernelMemInfoHandlesCount * sizeof(void *))
++ != PVRSRV_OK) {
++ ret = -EFAULT;
++ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++ }
++
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32KernelMemInfoHandlesCount *
++ sizeof(struct PVRSRV_KERNEL_MEM_INFO *),
++ (void **)&ppsKernelMemInfos, NULL);
++ if (eError != PVRSRV_OK)
++ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++
++ for (i = 0; i < ui32KernelMemInfoHandlesCount; i++) {
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ (void **)&ppsKernelMemInfos[i],
++ phKernelMemInfoHandles[i],
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if (eError != PVRSRV_OK)
++ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++ }
++
++ eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psSGXAddSharedPBDescIN->
++ hSharedPBDescKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO);
++ PVR_ASSERT(eError == PVRSRV_OK);
++
++ eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psSGXAddSharedPBDescIN->
++ hHWPBDescKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ PVR_ASSERT(eError == PVRSRV_OK);
++
++ eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psSGXAddSharedPBDescIN->
++ hBlockKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO);
++ PVR_ASSERT(eError == PVRSRV_OK);
++
++ for (i = 0; i < ui32KernelMemInfoHandlesCount; i++) {
++ eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ phKernelMemInfoHandles[i],
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ PVR_ASSERT(eError == PVRSRV_OK);
++ }
++
++ eError = SGXAddSharedPBDescKM(psPerProc, hDevCookieInt,
++ psSharedPBDescKernelMemInfo,
++ psHWPBDescKernelMemInfo,
++ psBlockKernelMemInfo,
++ psSGXAddSharedPBDescIN->ui32TotalPBSize,
++ &hSharedPBDesc,
++ ppsKernelMemInfos,
++ ui32KernelMemInfoHandlesCount);
++
++ if (eError != PVRSRV_OK)
++ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psSGXAddSharedPBDescOUT->hSharedPBDesc,
++ hSharedPBDesc,
++ PVRSRV_HANDLE_TYPE_SHARED_PB_DESC,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++
++PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT:
++
++ if (phKernelMemInfoHandles)
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ psSGXAddSharedPBDescIN->ui32KernelMemInfoHandlesCount
++ * sizeof(void *),
++ (void *)phKernelMemInfoHandles, NULL);
++ if (ppsKernelMemInfos)
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ psSGXAddSharedPBDescIN->ui32KernelMemInfoHandlesCount
++ * sizeof(struct PVRSRV_KERNEL_MEM_INFO *),
++ (void *)ppsKernelMemInfos, NULL);
++
++ if (ret == 0 && eError == PVRSRV_OK)
++ COMMIT_HANDLE_BATCH_OR_ERROR(psSGXAddSharedPBDescOUT->eError,
++ psPerProc);
++
++ psSGXAddSharedPBDescOUT->eError = eError;
++
++ return ret;
++}
++
++int SGXGetInfoForSrvinitBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_SGXINFO_FOR_SRVINIT *psSGXInfoForSrvinitIN,
++ struct PVRSRV_BRIDGE_OUT_SGXINFO_FOR_SRVINIT *psSGXInfoForSrvinitOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void *hDevCookieInt;
++ u32 i;
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_SGXINFO_FOR_SRVINIT);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psSGXInfoForSrvinitOUT->eError, psPerProc,
++ PVRSRV_MAX_CLIENT_HEAPS);
++
++ if (!psPerProc->bInitProcess) {
++ psSGXInfoForSrvinitOUT->eError = PVRSRV_ERROR_GENERIC;
++ return 0;
++ }
++
++ psSGXInfoForSrvinitOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psSGXInfoForSrvinitIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if (psSGXInfoForSrvinitOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psSGXInfoForSrvinitOUT->eError =
++ SGXGetInfoForSrvinitKM(hDevCookieInt,
++ &psSGXInfoForSrvinitOUT->sInitInfo);
++
++ if (psSGXInfoForSrvinitOUT->eError != PVRSRV_OK)
++ return 0;
++
++ for (i = 0; i < PVRSRV_MAX_CLIENT_HEAPS; i++) {
++ struct PVRSRV_HEAP_INFO *psHeapInfo;
++
++ psHeapInfo = &psSGXInfoForSrvinitOUT->sInitInfo.asHeapInfo[i];
++
++ if (psHeapInfo->ui32HeapID != (u32)SGX_UNDEFINED_HEAP_ID) {
++ void *hDevMemHeapExt;
++
++ if (psHeapInfo->hDevMemHeap != NULL) {
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &hDevMemHeapExt,
++ psHeapInfo->hDevMemHeap,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP,
++ PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
++ psHeapInfo->hDevMemHeap = hDevMemHeapExt;
++ }
++ }
++ }
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psSGXInfoForSrvinitOUT->eError, psPerProc);
++
++ return 0;
++}
++
++#if defined(PDUMP)
++static void DumpBufferArray(struct PVRSRV_PER_PROCESS_DATA *psPerProc,
++ struct SGX_KICKTA_DUMP_BUFFER *psBufferArray,
++ u32 ui32BufferArrayLength, IMG_BOOL bDumpPolls)
++{
++ u32 i;
++
++ for (i = 0; i < ui32BufferArrayLength; i++) {
++ struct SGX_KICKTA_DUMP_BUFFER *psBuffer;
++ struct PVRSRV_KERNEL_MEM_INFO *psCtrlMemInfoKM;
++ char *pszName;
++ void *hUniqueTag;
++ u32 ui32Offset;
++
++ psBuffer = &psBufferArray[i];
++ pszName = psBuffer->pszName;
++ if (!pszName)
++ pszName = "Nameless buffer";
++
++ hUniqueTag =
++ MAKEUNIQUETAG((struct PVRSRV_KERNEL_MEM_INFO *)psBuffer->
++ hKernelMemInfo);
++
++ psCtrlMemInfoKM =
++ ((struct PVRSRV_KERNEL_MEM_INFO *)psBuffer->
++ hKernelMemInfo)->psKernelSyncInfo->psSyncDataMemInfoKM;
++ ui32Offset =
++ offsetof(struct PVRSRV_SYNC_DATA, ui32ReadOpsComplete);
++
++ if (psBuffer->ui32Start <= psBuffer->ui32End) {
++ if (bDumpPolls) {
++ PDUMPCOMMENTWITHFLAGS(0,
++ "Wait for %s space\r\n",
++ pszName);
++ PDUMPCBP(psCtrlMemInfoKM, ui32Offset,
++ psBuffer->ui32Start,
++ psBuffer->ui32SpaceUsed,
++ psBuffer->ui32BufferSize, 0,
++ MAKEUNIQUETAG(psCtrlMemInfoKM));
++ }
++
++ PDUMPCOMMENTWITHFLAGS(0, "%s\r\n", pszName);
++ PDUMPMEMUM(psPerProc,
++ NULL, psBuffer->pvLinAddr,
++ (struct PVRSRV_KERNEL_MEM_INFO *)psBuffer->
++ hKernelMemInfo,
++ psBuffer->ui32Start,
++ psBuffer->ui32End - psBuffer->ui32Start, 0,
++ hUniqueTag);
++ } else {
++
++ if (bDumpPolls) {
++ PDUMPCOMMENTWITHFLAGS(0,
++ "Wait for %s space\r\n",
++ pszName);
++ PDUMPCBP(psCtrlMemInfoKM, ui32Offset,
++ psBuffer->ui32Start,
++ psBuffer->ui32BackEndLength,
++ psBuffer->ui32BufferSize, 0,
++ MAKEUNIQUETAG(psCtrlMemInfoKM));
++ }
++ PDUMPCOMMENTWITHFLAGS(0, "%s (part 1)\r\n", pszName);
++ PDUMPMEMUM(psPerProc,
++ NULL, psBuffer->pvLinAddr,
++ (struct PVRSRV_KERNEL_MEM_INFO *)psBuffer->
++ hKernelMemInfo,
++ psBuffer->ui32Start,
++ psBuffer->ui32BackEndLength, 0, hUniqueTag);
++
++ if (bDumpPolls) {
++ PDUMPMEMPOL(psCtrlMemInfoKM, ui32Offset,
++ 0, 0xFFFFFFFF,
++ PDUMP_POLL_OPERATOR_NOTEQUAL,
++ IMG_FALSE, IMG_FALSE,
++ MAKEUNIQUETAG(psCtrlMemInfoKM));
++
++ PDUMPCOMMENTWITHFLAGS(0,
++ "Wait for %s space\r\n",
++ pszName);
++ PDUMPCBP(psCtrlMemInfoKM, ui32Offset, 0,
++ psBuffer->ui32End,
++ psBuffer->ui32BufferSize, 0,
++ MAKEUNIQUETAG(psCtrlMemInfoKM));
++ }
++ PDUMPCOMMENTWITHFLAGS(0, "%s (part 2)\r\n", pszName);
++ PDUMPMEMUM(psPerProc, NULL, psBuffer->pvLinAddr,
++ (struct PVRSRV_KERNEL_MEM_INFO *)psBuffer->
++ hKernelMemInfo,
++ 0, psBuffer->ui32End, 0, hUniqueTag);
++ }
++ }
++}
++
++int SGXPDumpBufferArrayBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_PDUMP_BUFFER_ARRAY *psPDumpBufferArrayIN,
++ void *psBridgeOut, struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ u32 i;
++ struct SGX_KICKTA_DUMP_BUFFER *psKickTADumpBuffer;
++ u32 ui32BufferArrayLength = psPDumpBufferArrayIN->ui32BufferArrayLength;
++ u32 ui32BufferArraySize =
++ ui32BufferArrayLength * sizeof(struct SGX_KICKTA_DUMP_BUFFER);
++ enum PVRSRV_ERROR eError = PVRSRV_ERROR_GENERIC;
++
++ PVR_UNREFERENCED_PARAMETER(psBridgeOut);
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_SGX_PDUMP_BUFFER_ARRAY);
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, ui32BufferArraySize,
++ (void **)&psKickTADumpBuffer, NULL) != PVRSRV_OK)
++ return -ENOMEM;
++
++ if (CopyFromUserWrapper(psPerProc, ui32BridgeID, psKickTADumpBuffer,
++ psPDumpBufferArrayIN->psBufferArray,
++ ui32BufferArraySize) != PVRSRV_OK) {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32BufferArraySize,
++ psKickTADumpBuffer, NULL);
++ return -EFAULT;
++ }
++
++ for (i = 0; i < ui32BufferArrayLength; i++) {
++ void *pvMemInfo;
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvMemInfo,
++ psKickTADumpBuffer[i].
++ hKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRV_BRIDGE_SGX_PDUMP_BUFFER_ARRAY: "
++ "PVRSRVLookupHandle failed (%d)", eError);
++ break;
++ }
++ psKickTADumpBuffer[i].hKernelMemInfo = pvMemInfo;
++
++ }
++
++ if (eError == PVRSRV_OK)
++ DumpBufferArray(psPerProc, psKickTADumpBuffer,
++ ui32BufferArrayLength,
++ psPDumpBufferArrayIN->bDumpPolls);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32BufferArraySize,
++ psKickTADumpBuffer, NULL);
++
++ return 0;
++}
++
++int SGXPDump3DSignatureRegistersBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_PDUMP_3D_SIGNATURE_REGISTERS
++ *psPDump3DSignatureRegistersIN,
++ void *psBridgeOut, struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ u32 ui32RegisterArraySize =
++ psPDump3DSignatureRegistersIN->ui32NumRegisters * sizeof(u32);
++ u32 *pui32Registers = NULL;
++ int ret = -EFAULT;
++
++ PVR_UNREFERENCED_PARAMETER(psBridgeOut);
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_SGX_PDUMP_3D_SIGNATURE_REGISTERS);
++
++ if (ui32RegisterArraySize == 0)
++ goto ExitNoError;
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32RegisterArraySize,
++ (void **)&pui32Registers, NULL) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PDump3DSignatureRegistersBW: OSAllocMem failed");
++ goto Exit;
++ }
++
++ if (CopyFromUserWrapper(psPerProc, ui32BridgeID, pui32Registers,
++ psPDump3DSignatureRegistersIN->pui32Registers,
++ ui32RegisterArraySize) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "PDump3DSignatureRegistersBW: "
++ "CopyFromUserWrapper failed");
++ goto Exit;
++ }
++
++ PDump3DSignatureRegisters(psPDump3DSignatureRegistersIN->
++ ui32DumpFrameNum,
++ psPDump3DSignatureRegistersIN->bLastFrame,
++ pui32Registers,
++ psPDump3DSignatureRegistersIN->
++ ui32NumRegisters);
++
++ExitNoError:
++ ret = 0;
++Exit:
++ if (pui32Registers != NULL)
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32RegisterArraySize,
++ pui32Registers, NULL);
++
++ return ret;
++}
++
++int SGXPDumpCounterRegistersBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_PDUMP_COUNTER_REGISTERS
++ *psPDumpCounterRegistersIN,
++ void *psBridgeOut, struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ u32 ui32RegisterArraySize =
++ psPDumpCounterRegistersIN->ui32NumRegisters * sizeof(u32);
++ u32 *pui32Registers = NULL;
++ int ret = -EFAULT;
++
++ PVR_UNREFERENCED_PARAMETER(psBridgeOut);
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_SGX_PDUMP_COUNTER_REGISTERS);
++
++ if (ui32RegisterArraySize == 0)
++ goto ExitNoError;
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, ui32RegisterArraySize,
++ (void **)&pui32Registers, NULL) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PDumpCounterRegistersBW: OSAllocMem failed");
++ ret = -ENOMEM;
++ goto Exit;
++ }
++
++ if (CopyFromUserWrapper(psPerProc, ui32BridgeID, pui32Registers,
++ psPDumpCounterRegistersIN->pui32Registers,
++ ui32RegisterArraySize) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PDumpCounterRegistersBW: CopyFromUserWrapper failed");
++ goto Exit;
++ }
++
++ PDumpCounterRegisters(psPDumpCounterRegistersIN->ui32DumpFrameNum,
++ psPDumpCounterRegistersIN->bLastFrame,
++ pui32Registers,
++ psPDumpCounterRegistersIN->ui32NumRegisters);
++
++ExitNoError:
++ ret = 0;
++Exit:
++ if (pui32Registers != NULL)
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32RegisterArraySize,
++ pui32Registers, NULL);
++
++ return ret;
++}
++
++int SGXPDumpTASignatureRegistersBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_PDUMP_TA_SIGNATURE_REGISTERS
++ *psPDumpTASignatureRegistersIN,
++ void *psBridgeOut, struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ u32 ui32RegisterArraySize =
++ psPDumpTASignatureRegistersIN->ui32NumRegisters * sizeof(u32);
++ u32 *pui32Registers = NULL;
++ int ret = -EFAULT;
++
++ PVR_UNREFERENCED_PARAMETER(psBridgeOut);
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_SGX_PDUMP_TA_SIGNATURE_REGISTERS);
++
++ if (ui32RegisterArraySize == 0)
++ goto ExitNoError;
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32RegisterArraySize,
++ (void **)&pui32Registers, NULL) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PDumpTASignatureRegistersBW: OSAllocMem failed");
++ ret = -ENOMEM;
++ goto Exit;
++ }
++
++ if (CopyFromUserWrapper(psPerProc, ui32BridgeID, pui32Registers,
++ psPDumpTASignatureRegistersIN->pui32Registers,
++ ui32RegisterArraySize) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "PDumpTASignatureRegistersBW: "
++ "CopyFromUserWrapper failed");
++ goto Exit;
++ }
++
++ PDumpTASignatureRegisters(psPDumpTASignatureRegistersIN->
++ ui32DumpFrameNum,
++ psPDumpTASignatureRegistersIN->
++ ui32TAKickCount,
++ psPDumpTASignatureRegistersIN->bLastFrame,
++ pui32Registers,
++ psPDumpTASignatureRegistersIN->
++ ui32NumRegisters);
++
++ExitNoError:
++ ret = 0;
++Exit:
++ if (pui32Registers != NULL)
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32RegisterArraySize,
++ pui32Registers, NULL);
++
++ return ret;
++}
++
++int SGXPDumpHWPerfCBBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_PDUMP_HWPERFCB *psPDumpHWPerfCBIN,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ struct PVRSRV_SGXDEV_INFO *psDevInfo;
++ void *hDevCookieInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_SGX_PDUMP_HWPERFCB);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psPDumpHWPerfCBIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if (psRetOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psDevInfo = ((struct PVRSRV_DEVICE_NODE *)hDevCookieInt)->pvDevice;
++
++ PDumpHWPerfCBKM(&psPDumpHWPerfCBIN->szFileName[0],
++ psPDumpHWPerfCBIN->ui32FileOffset,
++ psDevInfo->psKernelHWPerfCBMemInfo->sDevVAddr,
++ psDevInfo->psKernelHWPerfCBMemInfo->ui32AllocSize,
++ psPDumpHWPerfCBIN->ui32PDumpFlags);
++
++ return 0;
++}
++
++#endif
++
++void SetSGXDispatchTableEntry(void)
++{
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_GETCLIENTINFO,
++ SGXGetClientInfoBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_RELEASECLIENTINFO,
++ SGXReleaseClientInfoBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_GETINTERNALDEVINFO,
++ SGXGetInternalDevInfoBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_DOKICK, SGXDoKickBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_GETPHYSPAGEADDR, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_READREGISTRYDWORD, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_SCHEDULECOMMAND, DummyBW);
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_2DQUERYBLTSCOMPLETE,
++ SGX2DQueryBlitsCompleteBW);
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_GETMMUPDADDR, DummyBW);
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_SUBMITTRANSFER,
++ SGXSubmitTransferBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_GETMISCINFO, SGXGetMiscInfoBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGXINFO_FOR_SRVINIT,
++ SGXGetInfoForSrvinitBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_DEVINITPART2,
++ SGXDevInitPart2BW);
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC,
++ SGXFindSharedPBDescBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_UNREFSHAREDPBDESC,
++ SGXUnrefSharedPBDescBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC,
++ SGXAddSharedPBDescBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_REGISTER_HW_RENDER_CONTEXT,
++ SGXRegisterHWRenderContextBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_FLUSH_HW_RENDER_TARGET,
++ SGXFlushHWRenderTargetBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_UNREGISTER_HW_RENDER_CONTEXT,
++ SGXUnregisterHWRenderContextBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_REGISTER_HW_TRANSFER_CONTEXT,
++ SGXRegisterHWTransferContextBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_UNREGISTER_HW_TRANSFER_CONTEXT,
++ SGXUnregisterHWTransferContextBW);
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_READ_DIFF_COUNTERS,
++ SGXReadDiffCountersBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_READ_HWPERF_CB,
++ SGXReadHWPerfCBBW);
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_SCHEDULE_PROCESS_QUEUES,
++ SGXScheduleProcessQueuesBW);
++
++#if defined(PDUMP)
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_PDUMP_BUFFER_ARRAY,
++ SGXPDumpBufferArrayBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_PDUMP_3D_SIGNATURE_REGISTERS,
++ SGXPDump3DSignatureRegistersBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_PDUMP_COUNTER_REGISTERS,
++ SGXPDumpCounterRegistersBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_PDUMP_TA_SIGNATURE_REGISTERS,
++ SGXPDumpTASignatureRegistersBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_PDUMP_HWPERFCB,
++ SGXPDumpHWPerfCBBW);
++#endif
++}
+diff --git a/drivers/gpu/pvr/bridged_sgx_bridge.h b/drivers/gpu/pvr/bridged_sgx_bridge.h
+new file mode 100644
+index 0000000..3867dc4
+--- /dev/null
++++ b/drivers/gpu/pvr/bridged_sgx_bridge.h
+@@ -0,0 +1,167 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __BRIDGED_SGX_BRIDGE_H__
++#define __BRIDGED_SGX_BRIDGE_H__
++
++void SetSGXDispatchTableEntry(void);
++
++int SGXGetClientInfoBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_GETCLIENTINFO *psGetClientInfoIN,
++ struct PVRSRV_BRIDGE_OUT_GETCLIENTINFO *psGetClientInfoOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc);
++
++int SGXReleaseClientInfoBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_RELEASECLIENTINFO *psReleaseClientInfoIN,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc);
++
++int SGXGetInternalDevInfoBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_GETINTERNALDEVINFO *psSGXGetInternalDevInfoIN,
++ struct PVRSRV_BRIDGE_OUT_GETINTERNALDEVINFO *psSGXGetInternalDevInfoOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc);
++
++int SGXDoKickBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_DOKICK *psDoKickIN,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc);
++
++int SGXScheduleProcessQueuesBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_SGX_SCHEDULE_PROCESS_QUEUES *psScheduleProcQIN,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc);
++
++int SGXSubmitTransferBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_SUBMITTRANSFER *psSubmitTransferIN,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc);
++
++int SGXGetMiscInfoBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_SGXGETMISCINFO *psSGXGetMiscInfoIN,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc);
++
++int SGXReadDiffCountersBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_SGX_READ_DIFF_COUNTERS *psSGXReadDiffCountersIN,
++ struct PVRSRV_BRIDGE_OUT_SGX_READ_DIFF_COUNTERS
++ *psSGXReadDiffCountersOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc);
++
++int SGXReadHWPerfCBBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_SGX_READ_HWPERF_CB *psSGXReadHWPerfCBIN,
++ struct PVRSRV_BRIDGE_OUT_SGX_READ_HWPERF_CB *psSGXReadHWPerfCBOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc);
++
++int SGXDevInitPart2BW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_SGXDEVINITPART2 *psSGXDevInitPart2IN,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc);
++
++int SGXRegisterHWRenderContextBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_RENDER_CONTEXT
++ *psSGXRegHWRenderContextIN,
++ struct PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_RENDER_CONTEXT
++ *psSGXRegHWRenderContextOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc);
++
++int SGXUnregisterHWRenderContextBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_RENDER_CONTEXT
++ *psSGXUnregHWRenderContextIN,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc);
++
++int SGXRegisterHWTransferContextBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_TRANSFER_CONTEXT
++ *psSGXRegHWTransferContextIN,
++ struct PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_TRANSFER_CONTEXT
++ *psSGXRegHWTransferContextOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc);
++
++int SGXUnregisterHWTransferContextBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_TRANSFER_CONTEXT
++ *psSGXUnregHWTransferContextIN,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc);
++
++int SGXFlushHWRenderTargetBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_SGX_FLUSH_HW_RENDER_TARGET
++ *psSGXFlushHWRenderTargetIN,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc);
++
++int SGX2DQueryBlitsCompleteBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_2DQUERYBLTSCOMPLETE *ps2DQueryBltsCompleteIN,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc);
++
++int SGXFindSharedPBDescBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_SGXFINDSHAREDPBDESC *psSGXFindSharedPBDescIN,
++ struct PVRSRV_BRIDGE_OUT_SGXFINDSHAREDPBDESC *psSGXFindSharedPBDescOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc);
++
++int SGXUnrefSharedPBDescBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_SGXUNREFSHAREDPBDESC *psSGXUnrefSharedPBDescIN,
++ struct PVRSRV_BRIDGE_OUT_SGXUNREFSHAREDPBDESC
++ *psSGXUnrefSharedPBDescOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc);
++
++int SGXAddSharedPBDescBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_SGXADDSHAREDPBDESC *psSGXAddSharedPBDescIN,
++ struct PVRSRV_BRIDGE_OUT_SGXADDSHAREDPBDESC *psSGXAddSharedPBDescOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc);
++
++int SGXGetInfoForSrvinitBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_SGXINFO_FOR_SRVINIT *psSGXInfoForSrvinitIN,
++ struct PVRSRV_BRIDGE_OUT_SGXINFO_FOR_SRVINIT *psSGXInfoForSrvinitOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc);
++
++#if defined(PDUMP)
++int SGXPDumpBufferArrayBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_PDUMP_BUFFER_ARRAY *psPDumpBufferArrayIN,
++ void *psBridgeOut, struct PVRSRV_PER_PROCESS_DATA *psPerProc);
++
++int SGXPDump3DSignatureRegistersBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_PDUMP_3D_SIGNATURE_REGISTERS
++ *psPDump3DSignatureRegistersIN,
++ void *psBridgeOut, struct PVRSRV_PER_PROCESS_DATA *psPerProc);
++
++int SGXPDumpCounterRegistersBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_PDUMP_COUNTER_REGISTERS
++ *psPDumpCounterRegistersIN,
++ void *psBridgeOut, struct PVRSRV_PER_PROCESS_DATA *psPerProc);
++
++int SGXPDumpTASignatureRegistersBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_PDUMP_TA_SIGNATURE_REGISTERS
++ *psPDumpTASignatureRegistersIN,
++ void *psBridgeOut, struct PVRSRV_PER_PROCESS_DATA *psPerProc);
++
++int SGXPDumpHWPerfCBBW(u32 ui32BridgeID,
++ struct PVRSRV_BRIDGE_IN_PDUMP_HWPERFCB *psPDumpHWPerfCBIN,
++ struct PVRSRV_BRIDGE_RETURN *psRetOUT,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc);
++
++#endif
++#endif
+diff --git a/drivers/gpu/pvr/bridged_support.c b/drivers/gpu/pvr/bridged_support.c
+new file mode 100644
+index 0000000..499a3df
+--- /dev/null
++++ b/drivers/gpu/pvr/bridged_support.c
+@@ -0,0 +1,77 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "img_defs.h"
++#include "servicesint.h"
++#include "bridged_support.h"
++
++enum PVRSRV_ERROR
++PVRSRVLookupOSMemHandle(struct PVRSRV_HANDLE_BASE *psHandleBase,
++ void **phOSMemHandle, void *hMHandle)
++{
++ void *hMHandleInt;
++ enum PVRSRV_HANDLE_TYPE eHandleType;
++ enum PVRSRV_ERROR eError;
++
++ eError = PVRSRVLookupHandleAnyType(psHandleBase, &hMHandleInt,
++ &eHandleType, hMHandle);
++ if (eError != PVRSRV_OK)
++ return eError;
++
++ switch (eHandleType) {
++ case PVRSRV_HANDLE_TYPE_MEM_INFO:
++ case PVRSRV_HANDLE_TYPE_MEM_INFO_REF:
++ case PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO:
++ {
++ struct PVRSRV_KERNEL_MEM_INFO *psMemInfo =
++ (struct PVRSRV_KERNEL_MEM_INFO *)hMHandleInt;
++
++ *phOSMemHandle = psMemInfo->sMemBlk.hOSMemHandle;
++
++ break;
++ }
++ case PVRSRV_HANDLE_TYPE_SYNC_INFO:
++ {
++ struct PVRSRV_KERNEL_SYNC_INFO *psSyncInfo =
++ (struct PVRSRV_KERNEL_SYNC_INFO *)hMHandleInt;
++ struct PVRSRV_KERNEL_MEM_INFO *psMemInfo =
++ psSyncInfo->psSyncDataMemInfoKM;
++
++ *phOSMemHandle = psMemInfo->sMemBlk.hOSMemHandle;
++
++ break;
++ }
++ case PVRSRV_HANDLE_TYPE_SOC_TIMER:
++ {
++ *phOSMemHandle = (void *)hMHandleInt;
++ break;
++ }
++ default:
++ return PVRSRV_ERROR_BAD_MAPPING;
++ }
++
++ return PVRSRV_OK;;
++}
+diff --git a/drivers/gpu/pvr/bridged_support.h b/drivers/gpu/pvr/bridged_support.h
+new file mode 100644
+index 0000000..96b8643
+--- /dev/null
++++ b/drivers/gpu/pvr/bridged_support.h
+@@ -0,0 +1,35 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __BRIDGED_SUPPORT_H__
++#define __BRIDGED_SUPPORT_H__
++
++#include "handle.h"
++
++enum PVRSRV_ERROR PVRSRVLookupOSMemHandle(struct PVRSRV_HANDLE_BASE *psBase,
++ void **phOSMemHandle, void *hMHandle);
++
++#endif
+diff --git a/drivers/gpu/pvr/buffer_manager.c b/drivers/gpu/pvr/buffer_manager.c
+new file mode 100644
+index 0000000..7d633dc
+--- /dev/null
++++ b/drivers/gpu/pvr/buffer_manager.c
+@@ -0,0 +1,1486 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++#include "services_headers.h"
++
++#include "sysconfig.h"
++#include "hash.h"
++#include "ra.h"
++#include "pdump_km.h"
++
++#define MIN(a, b) (a > b ? b : a)
++
++static IMG_BOOL ZeroBuf(struct BM_BUF *pBuf, struct BM_MAPPING *pMapping,
++ u32 ui32Bytes, u32 ui32Flags);
++static void BM_FreeMemory(void *pH, u32 base, struct BM_MAPPING *psMapping);
++static IMG_BOOL BM_ImportMemory(void *pH, size_t uSize,
++ size_t *pActualSize, struct BM_MAPPING **ppsMapping, u32 uFlags,
++ u32 *pBase);
++
++static IMG_BOOL DevMemoryAlloc(struct BM_CONTEXT *pBMContext,
++ struct BM_MAPPING *pMapping, u32 uFlags,
++ u32 dev_vaddr_alignment, struct IMG_DEV_VIRTADDR *pDevVAddr);
++static void DevMemoryFree(struct BM_MAPPING *pMapping);
++
++static IMG_BOOL AllocMemory(struct BM_CONTEXT *pBMContext,
++ struct BM_HEAP *psBMHeap, struct IMG_DEV_VIRTADDR *psDevVAddr,
++ size_t uSize, u32 uFlags, u32 uDevVAddrAlignment,
++ struct BM_BUF *pBuf)
++{
++ struct BM_MAPPING *pMapping;
++ u32 uOffset;
++ struct RA_ARENA *pArena = NULL;
++
++ PVR_DPF(PVR_DBG_MESSAGE, "AllocMemory "
++ "(pBMContext=%08X, uSize=0x%x, uFlags=0x%x, "
++ "align=0x%x, pBuf=%08X)",
++ pBMContext, uSize, uFlags, uDevVAddrAlignment, pBuf);
++
++ if (uFlags & PVRSRV_MEM_RAM_BACKED_ALLOCATION) {
++ if (uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) {
++ PVR_DPF(PVR_DBG_ERROR, "AllocMemory: "
++ "combination of DevVAddr management and "
++ "RAM backing mode unsupported");
++ return IMG_FALSE;
++ }
++
++ if (psBMHeap->ui32Attribs &
++ (PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG |
++ PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG)) {
++ pArena = psBMHeap->pImportArena;
++ } else {
++ PVR_DPF(PVR_DBG_ERROR, "AllocMemory: "
++ "backing store type doesn't match heap");
++ return IMG_FALSE;
++ }
++
++ if (!RA_Alloc(pArena, uSize, (void *)&pMapping, uFlags,
++ uDevVAddrAlignment,
++ (u32 *)&(pBuf->DevVAddr.uiAddr))) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "AllocMemory: RA_Alloc(0x%x) FAILED", uSize);
++ return IMG_FALSE;
++ }
++
++ uOffset = pBuf->DevVAddr.uiAddr - pMapping->DevVAddr.uiAddr;
++ if (pMapping->CpuVAddr) {
++ pBuf->CpuVAddr =
++ (void *)((u32) pMapping->CpuVAddr + uOffset);
++ } else {
++ pBuf->CpuVAddr = NULL;
++ }
++
++ if (uSize == pMapping->uSize) {
++ pBuf->hOSMemHandle = pMapping->hOSMemHandle;
++ } else {
++ if (OSGetSubMemHandle(pMapping->hOSMemHandle, uOffset,
++ uSize, psBMHeap->ui32Attribs,
++ &pBuf->hOSMemHandle) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "AllocMemory: "
++ "OSGetSubMemHandle FAILED");
++ return IMG_FALSE;
++ }
++ }
++
++ pBuf->CpuPAddr.uiAddr = pMapping->CpuPAddr.uiAddr + uOffset;
++
++ if (uFlags & PVRSRV_MEM_ZERO)
++ if (!ZeroBuf(pBuf, pMapping, uSize,
++ psBMHeap->ui32Attribs | uFlags))
++ return IMG_FALSE;
++ } else {
++ if (uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) {
++ PVR_ASSERT(psDevVAddr != NULL);
++
++ if (psDevVAddr == NULL) {
++ PVR_DPF(PVR_DBG_ERROR, "AllocMemory: "
++ "invalid parameter - psDevVAddr");
++ return IMG_FALSE;
++ }
++
++ pBMContext->psDeviceNode->pfnMMUAlloc(
++ psBMHeap->pMMUHeap, uSize,
++ PVRSRV_MEM_USER_SUPPLIED_DEVVADDR,
++ uDevVAddrAlignment, psDevVAddr);
++ pBuf->DevVAddr = *psDevVAddr;
++ } else {
++ pBMContext->psDeviceNode->pfnMMUAlloc(psBMHeap->
++ pMMUHeap, uSize, 0,
++ uDevVAddrAlignment,
++ &pBuf->DevVAddr);
++ }
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(struct BM_MAPPING),
++ (void **)&pMapping, NULL) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "AllocMemory: OSAllocMem(0x%x) FAILED");
++ return IMG_FALSE;
++ }
++
++ pBuf->CpuVAddr = NULL;
++ pBuf->hOSMemHandle = NULL;
++ pBuf->CpuPAddr.uiAddr = 0;
++
++ pMapping->CpuVAddr = NULL;
++ pMapping->CpuPAddr.uiAddr = 0;
++ pMapping->DevVAddr = pBuf->DevVAddr;
++ pMapping->psSysAddr = NULL;
++ pMapping->uSize = uSize;
++ pMapping->hOSMemHandle = NULL;
++ }
++
++ pMapping->pArena = pArena;
++
++ pMapping->pBMHeap = psBMHeap;
++ pBuf->pMapping = pMapping;
++
++ PVR_DPF(PVR_DBG_MESSAGE, "AllocMemory: "
++ "pMapping=%08X: DevV=%08X CpuV=%08X CpuP=%08X uSize=0x%x",
++ pMapping, pMapping->DevVAddr.uiAddr, pMapping->CpuVAddr,
++ pMapping->CpuPAddr.uiAddr, pMapping->uSize);
++
++ PVR_DPF(PVR_DBG_MESSAGE, "AllocMemory: "
++ "pBuf=%08X: DevV=%08X CpuV=%08X CpuP=%08X uSize=0x%x",
++ pBuf, pBuf->DevVAddr.uiAddr, pBuf->CpuVAddr,
++ pBuf->CpuPAddr.uiAddr, uSize);
++
++ PVR_ASSERT(((pBuf->DevVAddr.uiAddr) & (uDevVAddrAlignment - 1)) == 0);
++
++ return IMG_TRUE;
++}
++
++static IMG_BOOL WrapMemory(struct BM_HEAP *psBMHeap,
++ size_t uSize, u32 ui32BaseOffset, IMG_BOOL bPhysContig,
++ struct IMG_SYS_PHYADDR *psAddr, void *pvCPUVAddr, u32 uFlags,
++ struct BM_BUF *pBuf)
++{
++ struct IMG_DEV_VIRTADDR DevVAddr = { 0 };
++ struct BM_MAPPING *pMapping;
++ IMG_BOOL bResult;
++ u32 const ui32PageSize = HOST_PAGESIZE();
++
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "WrapMemory(psBMHeap=%08X, size=0x%x, offset=0x%x, "
++ "bPhysContig=0x%x, pvCPUVAddr = 0x%x, flags=0x%x, pBuf=%08X)",
++ psBMHeap, uSize, ui32BaseOffset, bPhysContig, pvCPUVAddr,
++ uFlags, pBuf);
++
++ PVR_ASSERT((psAddr->uiAddr & (ui32PageSize - 1)) == 0);
++
++ PVR_ASSERT(((u32) pvCPUVAddr & (ui32PageSize - 1)) == 0);
++
++ uSize += ui32BaseOffset;
++ uSize = HOST_PAGEALIGN(uSize);
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(*pMapping),
++ (void **)&pMapping, NULL) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "WrapMemory: OSAllocMem(0x%x) FAILED",
++ sizeof(*pMapping));
++ return IMG_FALSE;
++ }
++
++ OSMemSet(pMapping, 0, sizeof(*pMapping));
++
++ pMapping->uSize = uSize;
++ pMapping->pBMHeap = psBMHeap;
++
++ if (pvCPUVAddr) {
++ pMapping->CpuVAddr = pvCPUVAddr;
++
++ if (bPhysContig) {
++ pMapping->eCpuMemoryOrigin = hm_wrapped_virtaddr;
++ pMapping->CpuPAddr = SysSysPAddrToCpuPAddr(psAddr[0]);
++
++ if (OSRegisterMem(pMapping->CpuPAddr,
++ pMapping->CpuVAddr, pMapping->uSize,
++ uFlags, &pMapping->hOSMemHandle) !=
++ PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "WrapMemory: "
++ "OSRegisterMem Phys=0x%08X, "
++ "CpuVAddr = 0x%08X, Size=%d) failed",
++ pMapping->CpuPAddr, pMapping->CpuVAddr,
++ pMapping->uSize);
++ goto fail_cleanup;
++ }
++ } else {
++ pMapping->eCpuMemoryOrigin =
++ hm_wrapped_scatter_virtaddr;
++ pMapping->psSysAddr = psAddr;
++
++ if (OSRegisterDiscontigMem(pMapping->psSysAddr,
++ pMapping->CpuVAddr,
++ pMapping->uSize,
++ uFlags,
++ &pMapping->hOSMemHandle) !=
++ PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "WrapMemory: "
++ "OSRegisterDiscontigMem CpuVAddr = "
++ "0x%08X, Size=%d) failed",
++ pMapping->CpuVAddr, pMapping->uSize);
++ goto fail_cleanup;
++ }
++ }
++ } else {
++ if (bPhysContig) {
++ pMapping->eCpuMemoryOrigin = hm_wrapped;
++ pMapping->CpuPAddr = SysSysPAddrToCpuPAddr(psAddr[0]);
++
++ if (OSReservePhys(pMapping->CpuPAddr, pMapping->uSize,
++ uFlags, &pMapping->CpuVAddr,
++ &pMapping->hOSMemHandle) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "WrapMemory: "
++ "OSReservePhys Phys=0x%08X, Size=%d) "
++ "failed",
++ pMapping->CpuPAddr, pMapping->uSize);
++ goto fail_cleanup;
++ }
++ } else {
++ pMapping->eCpuMemoryOrigin = hm_wrapped_scatter;
++ pMapping->psSysAddr = psAddr;
++
++ if (OSReserveDiscontigPhys(pMapping->psSysAddr,
++ pMapping->uSize, uFlags,
++ &pMapping->CpuVAddr,
++ &pMapping->hOSMemHandle) !=
++ PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "WrapMemory: "
++ "OSReserveDiscontigPhys Size=%d) failed",
++ pMapping->uSize);
++ goto fail_cleanup;
++ }
++ }
++ }
++
++ bResult = DevMemoryAlloc(psBMHeap->pBMContext, pMapping,
++ uFlags | PVRSRV_MEM_READ | PVRSRV_MEM_WRITE,
++ ui32PageSize, &DevVAddr);
++ if (!bResult) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "WrapMemory: DevMemoryAlloc(0x%x) failed",
++ pMapping->uSize);
++ goto fail_cleanup;
++ }
++
++ pBuf->CpuPAddr.uiAddr = pMapping->CpuPAddr.uiAddr + ui32BaseOffset;
++ if (!ui32BaseOffset)
++ pBuf->hOSMemHandle = pMapping->hOSMemHandle;
++ else
++ if (OSGetSubMemHandle(pMapping->hOSMemHandle,
++ ui32BaseOffset,
++ (pMapping->uSize - ui32BaseOffset),
++ uFlags,
++ &pBuf->hOSMemHandle) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "WrapMemory: OSGetSubMemHandle failed");
++ goto fail_cleanup;
++ }
++ if (pMapping->CpuVAddr)
++ pBuf->CpuVAddr = (void *)((u32) pMapping->CpuVAddr +
++ ui32BaseOffset);
++ pBuf->DevVAddr.uiAddr = pMapping->DevVAddr.uiAddr + ui32BaseOffset;
++
++ if (uFlags & PVRSRV_MEM_ZERO)
++ if (!ZeroBuf(pBuf, pMapping, uSize, uFlags))
++ return IMG_FALSE;
++
++ PVR_DPF(PVR_DBG_MESSAGE, "DevVaddr.uiAddr=%08X", DevVAddr.uiAddr);
++ PVR_DPF(PVR_DBG_MESSAGE, "WrapMemory: pMapping=%08X: DevV=%08X "
++ "CpuV=%08X CpuP=%08X uSize=0x%x",
++ pMapping, pMapping->DevVAddr.uiAddr, pMapping->CpuVAddr,
++ pMapping->CpuPAddr.uiAddr, pMapping->uSize);
++ PVR_DPF(PVR_DBG_MESSAGE, "WrapMemory: pBuf=%08X: DevV=%08X "
++ "CpuV=%08X CpuP=%08X uSize=0x%x",
++ pBuf, pBuf->DevVAddr.uiAddr, pBuf->CpuVAddr,
++ pBuf->CpuPAddr.uiAddr, uSize);
++
++ pBuf->pMapping = pMapping;
++ return IMG_TRUE;
++
++fail_cleanup:
++ if (ui32BaseOffset && pBuf->hOSMemHandle)
++ OSReleaseSubMemHandle(pBuf->hOSMemHandle, uFlags);
++
++ if (pMapping && (pMapping->CpuVAddr || pMapping->hOSMemHandle))
++ switch (pMapping->eCpuMemoryOrigin) {
++ case hm_wrapped:
++ OSUnReservePhys(pMapping->CpuVAddr, pMapping->uSize,
++ uFlags, pMapping->hOSMemHandle);
++ break;
++ case hm_wrapped_virtaddr:
++ OSUnRegisterMem(pMapping->CpuVAddr, pMapping->uSize,
++ uFlags, pMapping->hOSMemHandle);
++ break;
++ case hm_wrapped_scatter:
++ OSUnReserveDiscontigPhys(pMapping->CpuVAddr,
++ pMapping->uSize, uFlags,
++ pMapping->hOSMemHandle);
++ break;
++ case hm_wrapped_scatter_virtaddr:
++ OSUnRegisterDiscontigMem(pMapping->CpuVAddr,
++ pMapping->uSize, uFlags,
++ pMapping->hOSMemHandle);
++ break;
++ default:
++ break;
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct BM_MAPPING), pMapping,
++ NULL);
++
++ return IMG_FALSE;
++}
++
++static IMG_BOOL ZeroBuf(struct BM_BUF *pBuf, struct BM_MAPPING *pMapping,
++ u32 ui32Bytes, u32 ui32Flags)
++{
++ void *pvCpuVAddr;
++
++ if (pBuf->CpuVAddr) {
++ OSMemSet(pBuf->CpuVAddr, 0, ui32Bytes);
++ } else if (pMapping->eCpuMemoryOrigin == hm_contiguous ||
++ pMapping->eCpuMemoryOrigin == hm_wrapped) {
++ pvCpuVAddr = (void __force *)OSMapPhysToLin(pBuf->CpuPAddr,
++ ui32Bytes,
++ PVRSRV_HAP_KERNEL_ONLY |
++ (ui32Flags &
++ PVRSRV_HAP_CACHETYPE_MASK),
++ NULL);
++ if (!pvCpuVAddr) {
++ PVR_DPF(PVR_DBG_ERROR, "ZeroBuf: "
++ "OSMapPhysToLin for contiguous buffer failed");
++ return IMG_FALSE;
++ }
++ OSMemSet(pvCpuVAddr, 0, ui32Bytes);
++ OSUnMapPhysToLin((void __force __iomem *)pvCpuVAddr, ui32Bytes,
++ PVRSRV_HAP_KERNEL_ONLY |
++ (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK),
++ NULL);
++ } else {
++ u32 ui32BytesRemaining = ui32Bytes;
++ u32 ui32CurrentOffset = 0;
++ struct IMG_CPU_PHYADDR CpuPAddr;
++
++ PVR_ASSERT(pBuf->hOSMemHandle);
++
++ while (ui32BytesRemaining > 0) {
++ u32 ui32BlockBytes =
++ MIN(ui32BytesRemaining, HOST_PAGESIZE());
++ CpuPAddr =
++ OSMemHandleToCpuPAddr(pBuf->hOSMemHandle,
++ ui32CurrentOffset);
++
++ if (CpuPAddr.uiAddr & (HOST_PAGESIZE() - 1))
++ ui32BlockBytes =
++ MIN(ui32BytesRemaining,
++ HOST_PAGEALIGN(CpuPAddr.uiAddr) -
++ CpuPAddr.uiAddr);
++
++ pvCpuVAddr = (void __force *)OSMapPhysToLin(CpuPAddr,
++ ui32BlockBytes,
++ PVRSRV_HAP_KERNEL_ONLY |
++ (ui32Flags &
++ PVRSRV_HAP_CACHETYPE_MASK),
++ NULL);
++ if (!pvCpuVAddr) {
++ PVR_DPF(PVR_DBG_ERROR, "ZeroBuf: "
++ "OSMapPhysToLin while "
++ "zeroing non-contiguous memory FAILED");
++ return IMG_FALSE;
++ }
++ OSMemSet(pvCpuVAddr, 0, ui32BlockBytes);
++ OSUnMapPhysToLin((void __force __iomem *)pvCpuVAddr,
++ ui32BlockBytes,
++ PVRSRV_HAP_KERNEL_ONLY |
++ (ui32Flags &
++ PVRSRV_HAP_CACHETYPE_MASK),
++ NULL);
++
++ ui32BytesRemaining -= ui32BlockBytes;
++ ui32CurrentOffset += ui32BlockBytes;
++ }
++ }
++
++ return IMG_TRUE;
++}
++
++static void FreeBuf(struct BM_BUF *pBuf, u32 ui32Flags)
++{
++ struct BM_MAPPING *pMapping;
++
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "FreeBuf: pBuf=%08X: DevVAddr=%08X CpuVAddr=%08X CpuPAddr=%08X",
++ pBuf, pBuf->DevVAddr.uiAddr, pBuf->CpuVAddr,
++ pBuf->CpuPAddr.uiAddr);
++
++ pMapping = pBuf->pMapping;
++
++ if (ui32Flags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) {
++ if (ui32Flags & PVRSRV_MEM_RAM_BACKED_ALLOCATION)
++ PVR_DPF(PVR_DBG_ERROR, "FreeBuf: "
++ "combination of DevVAddr management "
++ "and RAM backing mode unsupported");
++ else
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(struct BM_MAPPING),
++ pMapping, NULL);
++ } else {
++ if (pBuf->hOSMemHandle != pMapping->hOSMemHandle)
++ OSReleaseSubMemHandle(pBuf->hOSMemHandle, ui32Flags);
++ if (ui32Flags & PVRSRV_MEM_RAM_BACKED_ALLOCATION) {
++ RA_Free(pBuf->pMapping->pArena, pBuf->DevVAddr.uiAddr,
++ IMG_FALSE);
++ } else {
++ switch (pMapping->eCpuMemoryOrigin) {
++ case hm_wrapped:
++ OSUnReservePhys(pMapping->CpuVAddr,
++ pMapping->uSize, ui32Flags,
++ pMapping->hOSMemHandle);
++ break;
++ case hm_wrapped_virtaddr:
++ OSUnRegisterMem(pMapping->CpuVAddr,
++ pMapping->uSize, ui32Flags,
++ pMapping->hOSMemHandle);
++ break;
++ case hm_wrapped_scatter:
++ OSUnReserveDiscontigPhys(pMapping->CpuVAddr,
++ pMapping->uSize,
++ ui32Flags,
++ pMapping->
++ hOSMemHandle);
++ break;
++ case hm_wrapped_scatter_virtaddr:
++ OSUnRegisterDiscontigMem(pMapping->CpuVAddr,
++ pMapping->uSize,
++ ui32Flags,
++ pMapping->
++ hOSMemHandle);
++ break;
++ default:
++ break;
++ }
++
++ DevMemoryFree(pMapping);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(struct BM_MAPPING), pMapping, NULL);
++ }
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct BM_BUF), pBuf, NULL);
++}
++
++void BM_DestroyContext(void *hBMContext)
++{
++ struct BM_CONTEXT *pBMContext = (struct BM_CONTEXT *)hBMContext;
++ struct BM_HEAP *psBMHeap;
++
++ PVR_DPF(PVR_DBG_MESSAGE, "BM_DestroyContext");
++
++ for (psBMHeap = pBMContext->psBMHeap;
++ psBMHeap != NULL; psBMHeap = psBMHeap->psNext)
++ if (psBMHeap->ui32Attribs &
++ (PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG |
++ PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG))
++ if (psBMHeap->pImportArena) {
++ IMG_BOOL bTestDelete =
++ RA_TestDelete(psBMHeap->pImportArena);
++ BUG_ON(!bTestDelete);
++ }
++
++ ResManFreeResByPtr(pBMContext->hResItem);
++}
++
++static enum PVRSRV_ERROR BM_DestroyContextCallBack(void *pvParam, u32 ui32Param)
++{
++ struct BM_CONTEXT *pBMContext = pvParam;
++ struct BM_CONTEXT **ppBMContext;
++ struct BM_HEAP *psBMHeap, *psTmpBMHeap;
++ struct PVRSRV_DEVICE_NODE *psDeviceNode;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++ psDeviceNode = pBMContext->psDeviceNode;
++
++ psBMHeap = pBMContext->psBMHeap;
++ while (psBMHeap) {
++ if (psBMHeap->ui32Attribs &
++ (PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG |
++ PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG)) {
++ if (psBMHeap->pImportArena)
++ RA_Delete(psBMHeap->pImportArena);
++ } else {
++ PVR_DPF(PVR_DBG_ERROR, "BM_DestroyContext: "
++ "backing store type unsupported");
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ psDeviceNode->pfnMMUDelete(psBMHeap->pMMUHeap);
++
++ psTmpBMHeap = psBMHeap;
++
++ psBMHeap = psBMHeap->psNext;
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct BM_HEAP),
++ psTmpBMHeap, NULL);
++ }
++
++ if (pBMContext->psMMUContext)
++ psDeviceNode->pfnMMUFinalise(pBMContext->psMMUContext);
++
++ if (pBMContext->pBufferHash)
++ HASH_Delete(pBMContext->pBufferHash);
++
++ if (pBMContext == psDeviceNode->sDevMemoryInfo.pBMKernelContext) {
++ psDeviceNode->sDevMemoryInfo.pBMKernelContext = NULL;
++ } else {
++ for (ppBMContext = &psDeviceNode->sDevMemoryInfo.pBMContext;
++ *ppBMContext; ppBMContext = &((*ppBMContext)->psNext))
++ if (*ppBMContext == pBMContext) {
++ *ppBMContext = pBMContext->psNext;
++ break;
++ }
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct BM_CONTEXT),
++ pBMContext, NULL);
++
++ return PVRSRV_OK;
++}
++
++void *BM_CreateContext(struct PVRSRV_DEVICE_NODE *psDeviceNode,
++ struct IMG_DEV_PHYADDR *psPDDevPAddr,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc, IMG_BOOL *pbCreated)
++{
++ struct BM_CONTEXT *pBMContext;
++ struct BM_HEAP *psBMHeap;
++ struct DEVICE_MEMORY_INFO *psDevMemoryInfo;
++ IMG_BOOL bKernelContext;
++ struct RESMAN_CONTEXT *hResManContext;
++
++ PVR_DPF(PVR_DBG_MESSAGE, "BM_CreateContext");
++
++ if (psPerProc == NULL) {
++ bKernelContext = IMG_TRUE;
++ hResManContext = psDeviceNode->hResManContext;
++ } else {
++ bKernelContext = IMG_FALSE;
++ hResManContext = psPerProc->hResManContext;
++ }
++
++ if (pbCreated != NULL)
++ *pbCreated = IMG_FALSE;
++
++ psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
++
++ if (bKernelContext == IMG_FALSE)
++ for (pBMContext = psDevMemoryInfo->pBMContext;
++ pBMContext != NULL; pBMContext = pBMContext->psNext)
++ if (ResManFindResourceByPtr(hResManContext,
++ pBMContext->hResItem) ==
++ PVRSRV_OK) {
++ pBMContext->ui32RefCount++;
++ return (void *)pBMContext;
++ }
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct BM_CONTEXT),
++ (void **)&pBMContext, NULL) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "BM_CreateContext: Alloc failed");
++ return NULL;
++ }
++ OSMemSet(pBMContext, 0, sizeof(struct BM_CONTEXT));
++
++ pBMContext->psDeviceNode = psDeviceNode;
++
++ pBMContext->pBufferHash = HASH_Create(32);
++ if (pBMContext->pBufferHash == NULL) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "BM_CreateContext: HASH_Create failed");
++ goto cleanup;
++ }
++
++ if (psDeviceNode->pfnMMUInitialise(psDeviceNode,
++ &pBMContext->psMMUContext,
++ psPDDevPAddr) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "BM_CreateContext: MMUInitialise failed");
++ goto cleanup;
++ }
++
++ if (bKernelContext) {
++ PVR_ASSERT(psDevMemoryInfo->pBMKernelContext == NULL);
++ psDevMemoryInfo->pBMKernelContext = pBMContext;
++ } else {
++
++ PVR_ASSERT(psDevMemoryInfo->pBMKernelContext);
++
++ if (psDevMemoryInfo->pBMKernelContext == NULL) {
++ PVR_DPF(PVR_DBG_ERROR, "BM_CreateContext: "
++ "psDevMemoryInfo->pBMKernelContext invalid");
++ goto cleanup;
++ }
++
++ PVR_ASSERT(psDevMemoryInfo->pBMKernelContext->psBMHeap);
++
++ pBMContext->psBMSharedHeap =
++ psDevMemoryInfo->pBMKernelContext->psBMHeap;
++
++ psBMHeap = pBMContext->psBMSharedHeap;
++ while (psBMHeap) {
++ switch (psBMHeap->sDevArena.DevMemHeapType) {
++ case DEVICE_MEMORY_HEAP_SHARED:
++ case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
++ {
++ psDeviceNode->
++ pfnMMUInsertHeap(pBMContext->
++ psMMUContext,
++ psBMHeap->
++ pMMUHeap);
++ break;
++ }
++ }
++ psBMHeap = psBMHeap->psNext;
++ }
++ pBMContext->psNext = psDevMemoryInfo->pBMContext;
++ psDevMemoryInfo->pBMContext = pBMContext;
++ }
++ pBMContext->ui32RefCount++;
++ pBMContext->hResItem = ResManRegisterRes(hResManContext,
++ RESMAN_TYPE_DEVICEMEM_CONTEXT,
++ pBMContext,
++ 0, BM_DestroyContextCallBack);
++ if (pBMContext->hResItem == NULL) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "BM_CreateContext: ResManRegisterRes failed");
++ goto cleanup;
++ }
++
++ if (pbCreated != NULL)
++ *pbCreated = IMG_TRUE;
++ return (void *)pBMContext;
++
++cleanup:
++ BM_DestroyContextCallBack(pBMContext, 0);
++
++ return NULL;
++}
++
++void *BM_CreateHeap(void *hBMContext,
++ struct DEVICE_MEMORY_HEAP_INFO *psDevMemHeapInfo)
++{
++ struct BM_CONTEXT *pBMContext = (struct BM_CONTEXT *)hBMContext;
++ struct PVRSRV_DEVICE_NODE *psDeviceNode = pBMContext->psDeviceNode;
++ struct BM_HEAP *psBMHeap;
++
++ PVR_DPF(PVR_DBG_MESSAGE, "BM_CreateHeap");
++
++ if (!pBMContext)
++ return NULL;
++
++ if (pBMContext->ui32RefCount > 0) {
++ psBMHeap = pBMContext->psBMHeap;
++
++ while (psBMHeap) {
++ if (psBMHeap->sDevArena.ui32HeapID ==
++ psDevMemHeapInfo->ui32HeapID)
++
++ return psBMHeap;
++ psBMHeap = psBMHeap->psNext;
++ }
++ }
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct BM_HEAP),
++ (void **) &psBMHeap, NULL) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "BM_CreateHeap: Alloc failed");
++ return NULL;
++ }
++
++ OSMemSet(psBMHeap, 0, sizeof(struct BM_HEAP));
++
++ psBMHeap->sDevArena.ui32HeapID = psDevMemHeapInfo->ui32HeapID;
++ psBMHeap->sDevArena.pszName = psDevMemHeapInfo->pszName;
++ psBMHeap->sDevArena.BaseDevVAddr = psDevMemHeapInfo->sDevVAddrBase;
++ psBMHeap->sDevArena.ui32Size = psDevMemHeapInfo->ui32HeapSize;
++ psBMHeap->sDevArena.DevMemHeapType = psDevMemHeapInfo->DevMemHeapType;
++ psBMHeap->sDevArena.ui32DataPageSize =
++ psDevMemHeapInfo->ui32DataPageSize;
++ psBMHeap->sDevArena.psDeviceMemoryHeapInfo = psDevMemHeapInfo;
++ psBMHeap->ui32Attribs = psDevMemHeapInfo->ui32Attribs;
++
++ psBMHeap->pBMContext = pBMContext;
++
++ psBMHeap->pMMUHeap =
++ psDeviceNode->pfnMMUCreate(pBMContext->psMMUContext,
++ &psBMHeap->sDevArena,
++ &psBMHeap->pVMArena);
++ if (!psBMHeap->pMMUHeap) {
++ PVR_DPF(PVR_DBG_ERROR, "BM_CreateHeap: MMUCreate failed");
++ goto ErrorExit;
++ }
++
++ psBMHeap->pImportArena = RA_Create(psDevMemHeapInfo->pszBSName,
++ 0, 0, NULL,
++ psBMHeap->sDevArena.ui32DataPageSize,
++ BM_ImportMemory,
++ BM_FreeMemory, NULL, psBMHeap);
++ if (psBMHeap->pImportArena == NULL) {
++ PVR_DPF(PVR_DBG_ERROR, "BM_CreateHeap: RA_Create failed");
++ goto ErrorExit;
++ }
++
++ if (psBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG) {
++
++ psBMHeap->pLocalDevMemArena =
++ psDevMemHeapInfo->psLocalDevMemArena;
++ if (psBMHeap->pLocalDevMemArena == NULL) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "BM_CreateHeap: LocalDevMemArena null");
++ goto ErrorExit;
++ }
++ }
++
++ psBMHeap->psNext = pBMContext->psBMHeap;
++ pBMContext->psBMHeap = psBMHeap;
++
++ return (void *)psBMHeap;
++
++ErrorExit:
++
++ if (psBMHeap->pMMUHeap != NULL) {
++ psDeviceNode->pfnMMUDelete(psBMHeap->pMMUHeap);
++ psDeviceNode->pfnMMUFinalise(pBMContext->psMMUContext);
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct BM_HEAP),
++ psBMHeap, NULL);
++
++ return NULL;
++}
++
++void BM_DestroyHeap(void *hDevMemHeap)
++{
++ struct BM_HEAP *psBMHeap = (struct BM_HEAP *)hDevMemHeap;
++ struct PVRSRV_DEVICE_NODE *psDeviceNode =
++ psBMHeap->pBMContext->psDeviceNode;
++
++ PVR_DPF(PVR_DBG_MESSAGE, "BM_DestroyHeap");
++
++ if (psBMHeap) {
++ struct BM_HEAP **ppsBMHeap;
++
++ if (psBMHeap->ui32Attribs &
++ (PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG |
++ PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG)) {
++ if (psBMHeap->pImportArena)
++ RA_Delete(psBMHeap->pImportArena);
++ } else {
++ PVR_DPF(PVR_DBG_ERROR,
++ "BM_DestroyHeap: backing store type unsupported");
++ return;
++ }
++
++ psDeviceNode->pfnMMUDelete(psBMHeap->pMMUHeap);
++
++ ppsBMHeap = &psBMHeap->pBMContext->psBMHeap;
++ while (*ppsBMHeap) {
++ if (*ppsBMHeap == psBMHeap) {
++ *ppsBMHeap = psBMHeap->psNext;
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(struct BM_HEAP), psBMHeap,
++ NULL);
++ break;
++ }
++ ppsBMHeap = &((*ppsBMHeap)->psNext);
++ }
++ } else {
++ PVR_DPF(PVR_DBG_ERROR, "BM_DestroyHeap: invalid heap handle");
++ }
++}
++
++IMG_BOOL BM_Reinitialise(struct PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ PVR_DPF(PVR_DBG_MESSAGE, "BM_Reinitialise");
++ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
++
++ return IMG_TRUE;
++}
++
++IMG_BOOL BM_Alloc(void *hDevMemHeap, struct IMG_DEV_VIRTADDR *psDevVAddr,
++ size_t uSize, u32 *pui32Flags, u32 uDevVAddrAlignment,
++ void **phBuf)
++{
++ struct BM_BUF *pBuf;
++ struct BM_CONTEXT *pBMContext;
++ struct BM_HEAP *psBMHeap;
++ struct SYS_DATA *psSysData;
++ u32 uFlags;
++
++ if (pui32Flags == NULL) {
++ PVR_DPF(PVR_DBG_ERROR, "BM_Alloc: invalid parameter");
++ PVR_DBG_BREAK;
++ return IMG_FALSE;
++ }
++
++ uFlags = *pui32Flags;
++
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "BM_Alloc (uSize=0x%x, uFlags=0x%x, uDevVAddrAlignment=0x%x)",
++ uSize, uFlags, uDevVAddrAlignment);
++
++ if (SysAcquireData(&psSysData) != PVRSRV_OK)
++ return IMG_FALSE;
++
++ psBMHeap = (struct BM_HEAP *)hDevMemHeap;
++ pBMContext = psBMHeap->pBMContext;
++
++ if (uDevVAddrAlignment == 0)
++ uDevVAddrAlignment = 1;
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct BM_BUF),
++ (void **)&pBuf, NULL) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "BM_Alloc: BM_Buf alloc FAILED");
++ return IMG_FALSE;
++ }
++ OSMemSet(pBuf, 0, sizeof(struct BM_BUF));
++
++ if (AllocMemory(pBMContext, psBMHeap, psDevVAddr, uSize, uFlags,
++ uDevVAddrAlignment, pBuf) != IMG_TRUE) {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct BM_BUF), pBuf,
++ NULL);
++ PVR_DPF(PVR_DBG_ERROR, "BM_Alloc: AllocMemory FAILED");
++ return IMG_FALSE;
++ }
++
++ PVR_DPF(PVR_DBG_MESSAGE, "BM_Alloc (uSize=0x%x, uFlags=0x%x)=%08X",
++ uSize, uFlags, pBuf);
++
++ pBuf->ui32RefCount = 1;
++ pvr_get_ctx(pBMContext);
++ *phBuf = (void *) pBuf;
++ *pui32Flags = uFlags | psBMHeap->ui32Attribs;
++
++ return IMG_TRUE;
++}
++
++IMG_BOOL BM_Wrap(void *hDevMemHeap, u32 ui32Size, u32 ui32Offset,
++ IMG_BOOL bPhysContig, struct IMG_SYS_PHYADDR *psSysAddr,
++ void *pvCPUVAddr, u32 *pui32Flags, void **phBuf)
++{
++ struct BM_BUF *pBuf;
++ struct BM_CONTEXT *psBMContext;
++ struct BM_HEAP *psBMHeap;
++ struct SYS_DATA *psSysData;
++ struct IMG_SYS_PHYADDR sHashAddress;
++ u32 uFlags;
++
++ psBMHeap = (struct BM_HEAP *)hDevMemHeap;
++ psBMContext = psBMHeap->pBMContext;
++
++ uFlags = psBMHeap->ui32Attribs &
++ (PVRSRV_HAP_CACHETYPE_MASK | PVRSRV_HAP_MAPTYPE_MASK);
++
++ if (pui32Flags)
++ uFlags |= *pui32Flags;
++
++ PVR_DPF(PVR_DBG_MESSAGE, "BM_Wrap (uSize=0x%x, uOffset=0x%x, "
++ "bPhysContig=0x%x, pvCPUVAddr=0x%x, uFlags=0x%x)",
++ ui32Size, ui32Offset, bPhysContig, pvCPUVAddr, uFlags);
++
++ if (SysAcquireData(&psSysData) != PVRSRV_OK)
++ return IMG_FALSE;
++
++ sHashAddress = psSysAddr[0];
++
++ sHashAddress.uiAddr += ui32Offset;
++
++ pBuf = (struct BM_BUF *)HASH_Retrieve(psBMContext->pBufferHash,
++ (u32) sHashAddress.uiAddr);
++
++ if (pBuf) {
++ u32 ui32MappingSize =
++ HOST_PAGEALIGN(ui32Size + ui32Offset);
++
++ if (pBuf->pMapping->uSize == ui32MappingSize &&
++ (pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped ||
++ pBuf->pMapping->eCpuMemoryOrigin ==
++ hm_wrapped_virtaddr)) {
++ PVR_DPF(PVR_DBG_MESSAGE, "BM_Wrap "
++ "(Matched previous Wrap! uSize=0x%x, "
++ "uOffset=0x%x, SysAddr=%08X)",
++ ui32Size, ui32Offset, sHashAddress.uiAddr);
++
++ pBuf->ui32RefCount++;
++ *phBuf = (void *)pBuf;
++ if (pui32Flags)
++ *pui32Flags = uFlags;
++
++ return IMG_TRUE;
++ }
++ }
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct BM_BUF),
++ (void **)&pBuf, NULL) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "BM_Wrap: BM_Buf alloc FAILED");
++ return IMG_FALSE;
++ }
++ OSMemSet(pBuf, 0, sizeof(struct BM_BUF));
++
++ if (WrapMemory(psBMHeap, ui32Size, ui32Offset, bPhysContig, psSysAddr,
++ pvCPUVAddr, uFlags, pBuf) != IMG_TRUE) {
++ PVR_DPF(PVR_DBG_ERROR, "BM_Wrap: WrapMemory FAILED");
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct BM_BUF), pBuf,
++ NULL);
++ return IMG_FALSE;
++ }
++
++ if (pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped ||
++ pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped_virtaddr) {
++
++ PVR_ASSERT(SysSysPAddrToCpuPAddr(sHashAddress).uiAddr ==
++ pBuf->CpuPAddr.uiAddr);
++
++ if (!HASH_Insert(psBMContext->pBufferHash,
++ (u32)sHashAddress.uiAddr, (u32) pBuf)) {
++ FreeBuf(pBuf, uFlags);
++ PVR_DPF(PVR_DBG_ERROR, "BM_Wrap: HASH_Insert FAILED");
++ return IMG_FALSE;
++ }
++ }
++
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "BM_Wrap (uSize=0x%x, uFlags=0x%x)=%08X(devVAddr=%08X)",
++ ui32Size, uFlags, pBuf, pBuf->DevVAddr.uiAddr);
++
++ pBuf->ui32RefCount = 1;
++ pvr_get_ctx(psBMContext);
++ *phBuf = (void *) pBuf;
++ if (pui32Flags)
++ *pui32Flags = (uFlags & ~PVRSRV_HAP_MAPTYPE_MASK) |
++ PVRSRV_HAP_MULTI_PROCESS;
++
++ return IMG_TRUE;
++}
++
++void BM_Free(void *hBuf, u32 ui32Flags)
++{
++ struct BM_BUF *pBuf = (struct BM_BUF *)hBuf;
++ struct SYS_DATA *psSysData;
++ struct IMG_SYS_PHYADDR sHashAddr;
++
++ PVR_DPF(PVR_DBG_MESSAGE, "BM_Free (h=%08X)", hBuf);
++ PVR_ASSERT(pBuf != NULL);
++
++ if (pBuf == NULL) {
++ PVR_DPF(PVR_DBG_ERROR, "BM_Free: invalid parameter");
++ return;
++ }
++
++ if (SysAcquireData(&psSysData) != PVRSRV_OK)
++ return;
++
++ pBuf->ui32RefCount--;
++
++ if (pBuf->ui32RefCount == 0) {
++ struct BM_MAPPING *map = pBuf->pMapping;
++ struct BM_CONTEXT *ctx = map->pBMHeap->pBMContext;
++
++ if (map->eCpuMemoryOrigin == hm_wrapped ||
++ map->eCpuMemoryOrigin == hm_wrapped_virtaddr) {
++ sHashAddr = SysCpuPAddrToSysPAddr(pBuf->CpuPAddr);
++
++ HASH_Remove(ctx->pBufferHash, (u32)sHashAddr.uiAddr);
++ }
++ FreeBuf(pBuf, ui32Flags);
++ pvr_put_ctx(ctx);
++ }
++}
++
++void *BM_HandleToCpuVaddr(void *hBuf)
++{
++ struct BM_BUF *pBuf = (struct BM_BUF *)hBuf;
++
++ PVR_ASSERT(pBuf != NULL);
++ if (pBuf == NULL) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "BM_HandleToCpuVaddr: invalid parameter");
++ return NULL;
++ }
++
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "BM_HandleToCpuVaddr(h=%08X)=%08X", hBuf, pBuf->CpuVAddr);
++ return pBuf->CpuVAddr;
++}
++
++struct IMG_DEV_VIRTADDR BM_HandleToDevVaddr(void *hBuf)
++{
++ struct BM_BUF *pBuf = (struct BM_BUF *)hBuf;
++
++ PVR_ASSERT(pBuf != NULL);
++ if (pBuf == NULL) {
++ struct IMG_DEV_VIRTADDR DevVAddr = { 0 };
++ PVR_DPF(PVR_DBG_ERROR,
++ "BM_HandleToDevVaddr: invalid parameter");
++ return DevVAddr;
++ }
++
++ PVR_DPF(PVR_DBG_MESSAGE, "BM_HandleToDevVaddr(h=%08X)=%08X", hBuf,
++ pBuf->DevVAddr);
++ return pBuf->DevVAddr;
++}
++
++struct IMG_SYS_PHYADDR BM_HandleToSysPaddr(void *hBuf)
++{
++ struct BM_BUF *pBuf = (struct BM_BUF *)hBuf;
++
++ PVR_ASSERT(pBuf != NULL);
++
++ if (pBuf == NULL) {
++ struct IMG_SYS_PHYADDR PhysAddr = { 0 };
++ PVR_DPF(PVR_DBG_ERROR,
++ "BM_HandleToSysPaddr: invalid parameter");
++ return PhysAddr;
++ }
++
++ PVR_DPF(PVR_DBG_MESSAGE, "BM_HandleToSysPaddr(h=%08X)=%08X", hBuf,
++ pBuf->CpuPAddr.uiAddr);
++ return SysCpuPAddrToSysPAddr(pBuf->CpuPAddr);
++}
++
++void *BM_HandleToOSMemHandle(void *hBuf)
++{
++ struct BM_BUF *pBuf = (struct BM_BUF *)hBuf;
++
++ PVR_ASSERT(pBuf != NULL);
++
++ if (pBuf == NULL) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "BM_HandleToOSMemHandle: invalid parameter");
++ return NULL;
++ }
++
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "BM_HandleToOSMemHandle(h=%08X)=%08X",
++ hBuf, pBuf->hOSMemHandle);
++ return pBuf->hOSMemHandle;
++}
++
++IMG_BOOL BM_ContiguousStatistics(u32 uFlags, u32 *pTotalBytes,
++ u32 *pAvailableBytes)
++{
++ if (pAvailableBytes || pTotalBytes || uFlags)
++ ;
++ return IMG_FALSE;
++}
++
++static IMG_BOOL DevMemoryAlloc(struct BM_CONTEXT *pBMContext,
++ struct BM_MAPPING *pMapping, u32 uFlags, u32 dev_vaddr_alignment,
++ struct IMG_DEV_VIRTADDR *pDevVAddr)
++{
++ struct PVRSRV_DEVICE_NODE *psDeviceNode;
++#ifdef PDUMP
++ u32 ui32PDumpSize = pMapping->uSize;
++#endif
++
++ psDeviceNode = pBMContext->psDeviceNode;
++
++ if (uFlags & PVRSRV_MEM_INTERLEAVED)
++
++ pMapping->uSize *= 2;
++#ifdef PDUMP
++ if (uFlags & PVRSRV_MEM_DUMMY)
++
++ ui32PDumpSize = pMapping->pBMHeap->sDevArena.ui32DataPageSize;
++#endif
++
++ if (!psDeviceNode->pfnMMUAlloc(pMapping->pBMHeap->pMMUHeap,
++ pMapping->uSize, 0, dev_vaddr_alignment,
++ &(pMapping->DevVAddr))) {
++ PVR_DPF(PVR_DBG_ERROR, "DevMemoryAlloc ERROR MMU_Alloc");
++ return IMG_FALSE;
++ }
++
++ PDUMPMALLOCPAGES(psDeviceNode->sDevId.eDeviceType,
++ pMapping->DevVAddr.uiAddr, pMapping->CpuVAddr,
++ pMapping->hOSMemHandle, ui32PDumpSize,
++ pMapping->pBMHeap->sDevArena.ui32DataPageSize,
++ (void *)pMapping);
++
++ switch (pMapping->eCpuMemoryOrigin) {
++ case hm_wrapped:
++ case hm_wrapped_virtaddr:
++ case hm_contiguous:
++ {
++ psDeviceNode->pfnMMUMapPages(pMapping->pBMHeap->
++ pMMUHeap,
++ pMapping->DevVAddr,
++ SysCpuPAddrToSysPAddr
++ (pMapping->CpuPAddr),
++ pMapping->uSize, uFlags,
++ (void *)pMapping);
++
++ *pDevVAddr = pMapping->DevVAddr;
++ break;
++ }
++ case hm_env:
++ {
++ psDeviceNode->pfnMMUMapShadow(pMapping->pBMHeap->
++ pMMUHeap,
++ pMapping->DevVAddr,
++ pMapping->uSize,
++ pMapping->CpuVAddr,
++ pMapping->hOSMemHandle,
++ pDevVAddr, uFlags,
++ (void *)pMapping);
++ break;
++ }
++ case hm_wrapped_scatter:
++ case hm_wrapped_scatter_virtaddr:
++ {
++ psDeviceNode->pfnMMUMapScatter(pMapping->pBMHeap->
++ pMMUHeap,
++ pMapping->DevVAddr,
++ pMapping->psSysAddr,
++ pMapping->uSize, uFlags,
++ (void *)pMapping);
++
++ *pDevVAddr = pMapping->DevVAddr;
++ break;
++ }
++ default:
++ PVR_DPF(PVR_DBG_ERROR,
++ "Illegal value %d for pMapping->eCpuMemoryOrigin",
++ pMapping->eCpuMemoryOrigin);
++ return IMG_FALSE;
++ }
++
++
++ return IMG_TRUE;
++}
++
++static void DevMemoryFree(struct BM_MAPPING *pMapping)
++{
++ struct PVRSRV_DEVICE_NODE *psDeviceNode;
++#ifdef PDUMP
++ u32 ui32PSize;
++#endif
++
++#ifdef PDUMP
++
++ if (pMapping->ui32Flags & PVRSRV_MEM_DUMMY)
++ ui32PSize = pMapping->pBMHeap->sDevArena.ui32DataPageSize;
++ else
++ ui32PSize = pMapping->uSize;
++
++ PDUMPFREEPAGES(pMapping->pBMHeap, pMapping->DevVAddr, ui32PSize,
++ pMapping->pBMHeap->sDevArena.ui32DataPageSize,
++ (void *)pMapping, (IMG_BOOL)(pMapping->
++ ui32Flags & PVRSRV_MEM_INTERLEAVED));
++#endif
++
++ psDeviceNode = pMapping->pBMHeap->pBMContext->psDeviceNode;
++
++ psDeviceNode->pfnMMUFree(pMapping->pBMHeap->pMMUHeap,
++ pMapping->DevVAddr, pMapping->uSize);
++}
++
++static IMG_BOOL BM_ImportMemory(void *pH, size_t uRequestSize,
++ size_t *pActualSize, struct BM_MAPPING **ppsMapping,
++ u32 uFlags, u32 *pBase)
++{
++ struct BM_MAPPING *pMapping;
++ struct BM_HEAP *pBMHeap = pH;
++ struct BM_CONTEXT *pBMContext = pBMHeap->pBMContext;
++ IMG_BOOL bResult;
++ size_t uSize;
++ size_t uPSize;
++ u32 uDevVAddrAlignment = 0;
++
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "BM_ImportMemory (pBMContext=%08X, uRequestSize=0x%x, "
++ "uFlags=0x%x, uAlign=0x%x)",
++ pBMContext, uRequestSize, uFlags, uDevVAddrAlignment);
++
++ PVR_ASSERT(ppsMapping != NULL);
++ PVR_ASSERT(pBMContext != NULL);
++
++ if (ppsMapping == NULL) {
++ PVR_DPF(PVR_DBG_ERROR, "BM_ImportMemory: invalid parameter");
++ goto fail_exit;
++ }
++
++ uSize = HOST_PAGEALIGN(uRequestSize);
++ PVR_ASSERT(uSize >= uRequestSize);
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct BM_MAPPING),
++ (void **)&pMapping, NULL) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "BM_ImportMemory: failed struct BM_MAPPING alloc");
++ goto fail_exit;
++ }
++
++ pMapping->hOSMemHandle = NULL;
++ pMapping->CpuVAddr = NULL;
++ pMapping->DevVAddr.uiAddr = 0;
++ pMapping->CpuPAddr.uiAddr = 0;
++ pMapping->uSize = uSize;
++ pMapping->pBMHeap = pBMHeap;
++ pMapping->ui32Flags = uFlags;
++
++ if (pActualSize)
++ *pActualSize = uSize;
++
++ if (pMapping->ui32Flags & PVRSRV_MEM_DUMMY)
++ uPSize = pBMHeap->sDevArena.ui32DataPageSize;
++ else
++ uPSize = pMapping->uSize;
++
++ if (pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG) {
++ if (OSAllocPages(pBMHeap->ui32Attribs, uPSize,
++ pBMHeap->sDevArena.ui32DataPageSize,
++ (void **)&pMapping->CpuVAddr,
++ &pMapping->hOSMemHandle) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "BM_ImportMemory: OSAllocPages(0x%x) failed",
++ uPSize);
++ goto fail_mapping_alloc;
++ }
++
++ pMapping->eCpuMemoryOrigin = hm_env;
++ } else if (pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG) {
++ struct IMG_SYS_PHYADDR sSysPAddr;
++
++ PVR_ASSERT(pBMHeap->pLocalDevMemArena != NULL);
++
++ if (!RA_Alloc(pBMHeap->pLocalDevMemArena, uPSize, NULL, 0,
++ pBMHeap->sDevArena.ui32DataPageSize,
++ (u32 *)&sSysPAddr.uiAddr)) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "BM_ImportMemory: RA_Alloc(0x%x) FAILED",
++ uPSize);
++ goto fail_mapping_alloc;
++ }
++
++ pMapping->CpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
++ if (OSReservePhys(pMapping->CpuPAddr, uPSize,
++ pBMHeap->ui32Attribs, &pMapping->CpuVAddr,
++ &pMapping->hOSMemHandle) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "BM_ImportMemory: OSReservePhys failed");
++ goto fail_dev_mem_alloc;
++ }
++
++ pMapping->eCpuMemoryOrigin = hm_contiguous;
++ } else {
++ PVR_DPF(PVR_DBG_ERROR,
++ "BM_ImportMemory: Invalid backing store type");
++ goto fail_mapping_alloc;
++ }
++
++ bResult = DevMemoryAlloc(pBMContext, pMapping, uFlags,
++ uDevVAddrAlignment, &pMapping->DevVAddr);
++ if (!bResult) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "BM_ImportMemory: DevMemoryAlloc(0x%x) failed",
++ pMapping->uSize);
++ goto fail_dev_mem_alloc;
++ }
++
++ PVR_ASSERT(uDevVAddrAlignment > 1 ?
++ (pMapping->DevVAddr.uiAddr % uDevVAddrAlignment) == 0 : 1);
++
++ *pBase = pMapping->DevVAddr.uiAddr;
++ *ppsMapping = pMapping;
++
++ PVR_DPF(PVR_DBG_MESSAGE, "BM_ImportMemory: IMG_TRUE");
++ return IMG_TRUE;
++
++fail_dev_mem_alloc:
++ if (pMapping && (pMapping->CpuVAddr || pMapping->hOSMemHandle)) {
++ if (pMapping->ui32Flags & PVRSRV_MEM_INTERLEAVED)
++ pMapping->uSize /= 2;
++
++ if (pMapping->ui32Flags & PVRSRV_MEM_DUMMY)
++ uPSize = pBMHeap->sDevArena.ui32DataPageSize;
++ else
++ uPSize = pMapping->uSize;
++
++ if (pBMHeap->ui32Attribs &
++ PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG) {
++ OSFreePages(pBMHeap->ui32Attribs, uPSize,
++ (void *)pMapping->CpuVAddr,
++ pMapping->hOSMemHandle);
++ } else {
++ struct IMG_SYS_PHYADDR sSysPAddr;
++
++ if (pMapping->CpuVAddr)
++ OSUnReservePhys(pMapping->CpuVAddr, uPSize,
++ pBMHeap->ui32Attribs,
++ pMapping->hOSMemHandle);
++ sSysPAddr = SysCpuPAddrToSysPAddr(pMapping->CpuPAddr);
++ RA_Free(pBMHeap->pLocalDevMemArena, sSysPAddr.uiAddr,
++ IMG_FALSE);
++ }
++ }
++fail_mapping_alloc:
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct BM_MAPPING), pMapping,
++ NULL);
++fail_exit:
++ return IMG_FALSE;
++}
++
++static void BM_FreeMemory(void *h, u32 _base, struct BM_MAPPING *psMapping)
++{
++ struct BM_HEAP *pBMHeap = h;
++ size_t uPSize;
++
++ PVR_UNREFERENCED_PARAMETER(_base);
++
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "BM_FreeMemory (h=%08X, base=0x%x, psMapping=0x%x)", h, _base,
++ psMapping);
++
++ PVR_ASSERT(psMapping != NULL);
++
++ if (psMapping == NULL) {
++ PVR_DPF(PVR_DBG_ERROR, "BM_FreeMemory: invalid parameter");
++ return;
++ }
++
++ DevMemoryFree(psMapping);
++
++ if ((psMapping->ui32Flags & PVRSRV_MEM_INTERLEAVED) != 0)
++ psMapping->uSize /= 2;
++
++ if (psMapping->ui32Flags & PVRSRV_MEM_DUMMY)
++ uPSize = psMapping->pBMHeap->sDevArena.ui32DataPageSize;
++ else
++ uPSize = psMapping->uSize;
++
++ if (pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG) {
++ OSFreePages(pBMHeap->ui32Attribs, uPSize,
++ (void *)psMapping->CpuVAddr,
++ psMapping->hOSMemHandle);
++ } else if (pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG) {
++ struct IMG_SYS_PHYADDR sSysPAddr;
++
++ OSUnReservePhys(psMapping->CpuVAddr, uPSize,
++ pBMHeap->ui32Attribs, psMapping->hOSMemHandle);
++
++ sSysPAddr = SysCpuPAddrToSysPAddr(psMapping->CpuPAddr);
++
++ RA_Free(pBMHeap->pLocalDevMemArena, sSysPAddr.uiAddr,
++ IMG_FALSE);
++ } else {
++ PVR_DPF(PVR_DBG_ERROR,
++ "BM_FreeMemory: Invalid backing store type");
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct BM_MAPPING), psMapping,
++ NULL);
++
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "..BM_FreeMemory (h=%08X, base=0x%x, psMapping=0x%x)",
++ h, _base, psMapping);
++}
++
++enum PVRSRV_ERROR BM_GetPhysPageAddr(struct PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++ struct IMG_DEV_VIRTADDR sDevVPageAddr,
++ struct IMG_DEV_PHYADDR *psDevPAddr)
++{
++ struct PVRSRV_DEVICE_NODE *psDeviceNode;
++
++ PVR_DPF(PVR_DBG_MESSAGE, "BM_GetPhysPageAddr");
++
++ if (!psMemInfo || !psDevPAddr) {
++ PVR_DPF(PVR_DBG_ERROR, "BM_GetPhysPageAddr: Invalid params");
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ PVR_ASSERT((sDevVPageAddr.uiAddr & 0xFFF) == 0);
++
++ psDeviceNode =
++ ((struct BM_BUF *)psMemInfo->sMemBlk.hBuffer)->pMapping->pBMHeap->
++ pBMContext->psDeviceNode;
++
++ *psDevPAddr = psDeviceNode->pfnMMUGetPhysPageAddr(((struct BM_BUF *)
++ psMemInfo->sMemBlk.hBuffer)->
++ pMapping->pBMHeap->pMMUHeap, sDevVPageAddr);
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR BM_GetHeapInfo(void *hDevMemHeap,
++ struct PVRSRV_HEAP_INFO *psHeapInfo)
++{
++ struct BM_HEAP *psBMHeap = (struct BM_HEAP *)hDevMemHeap;
++
++ PVR_DPF(PVR_DBG_VERBOSE, "BM_GetHeapInfo");
++
++ psHeapInfo->hDevMemHeap = hDevMemHeap;
++ psHeapInfo->sDevVAddrBase = psBMHeap->sDevArena.BaseDevVAddr;
++ psHeapInfo->ui32HeapByteSize = psBMHeap->sDevArena.ui32Size;
++ psHeapInfo->ui32Attribs = psBMHeap->ui32Attribs;
++
++ return PVRSRV_OK;
++}
++
++struct MMU_CONTEXT *BM_GetMMUContext(void *hDevMemHeap)
++{
++ struct BM_HEAP *pBMHeap = (struct BM_HEAP *)hDevMemHeap;
++
++ PVR_DPF(PVR_DBG_VERBOSE, "BM_GetMMUContext");
++
++ return pBMHeap->pBMContext->psMMUContext;
++}
++
++struct MMU_CONTEXT *BM_GetMMUContextFromMemContext(void *hDevMemContext)
++{
++ struct BM_CONTEXT *pBMContext = (struct BM_CONTEXT *)hDevMemContext;
++
++ PVR_DPF(PVR_DBG_VERBOSE, "BM_GetMMUContextFromMemContext");
++
++ return pBMContext->psMMUContext;
++}
++
++void *BM_GetMMUHeap(void *hDevMemHeap)
++{
++ PVR_DPF(PVR_DBG_VERBOSE, "BM_GetMMUHeap");
++
++ return (void *)((struct BM_HEAP *)hDevMemHeap)->pMMUHeap;
++}
++
++struct PVRSRV_DEVICE_NODE *BM_GetDeviceNode(void *hDevMemContext)
++{
++ PVR_DPF(PVR_DBG_VERBOSE, "BM_GetDeviceNode");
++
++ return ((struct BM_CONTEXT *)hDevMemContext)->psDeviceNode;
++}
++
++void *BM_GetMappingHandle(struct PVRSRV_KERNEL_MEM_INFO *psMemInfo)
++{
++ PVR_DPF(PVR_DBG_VERBOSE, "BM_GetMappingHandle");
++
++ return ((struct BM_BUF *)
++ psMemInfo->sMemBlk.hBuffer)->pMapping->hOSMemHandle;
++}
+diff --git a/drivers/gpu/pvr/buffer_manager.h b/drivers/gpu/pvr/buffer_manager.h
+new file mode 100644
+index 0000000..45de4d0
+--- /dev/null
++++ b/drivers/gpu/pvr/buffer_manager.h
+@@ -0,0 +1,169 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _BUFFER_MANAGER_H_
++#define _BUFFER_MANAGER_H_
++
++#include "img_types.h"
++#include "ra.h"
++#include "perproc.h"
++
++
++struct BM_HEAP;
++
++struct BM_MAPPING {
++ enum {
++ hm_wrapped = 1,
++ hm_wrapped_scatter,
++ hm_wrapped_virtaddr,
++ hm_wrapped_scatter_virtaddr,
++ hm_env,
++ hm_contiguous
++ } eCpuMemoryOrigin;
++
++ struct BM_HEAP *pBMHeap;
++ struct RA_ARENA *pArena;
++
++ void *CpuVAddr;
++ struct IMG_CPU_PHYADDR CpuPAddr;
++ struct IMG_DEV_VIRTADDR DevVAddr;
++ struct IMG_SYS_PHYADDR *psSysAddr;
++ size_t uSize;
++ void *hOSMemHandle;
++ u32 ui32Flags;
++};
++
++struct BM_BUF {
++ void **CpuVAddr;
++ void *hOSMemHandle;
++ struct IMG_CPU_PHYADDR CpuPAddr;
++ struct IMG_DEV_VIRTADDR DevVAddr;
++
++ struct BM_MAPPING *pMapping;
++ u32 ui32RefCount;
++};
++
++struct BM_HEAP {
++ u32 ui32Attribs;
++ struct BM_CONTEXT *pBMContext;
++ struct RA_ARENA *pImportArena;
++ struct RA_ARENA *pLocalDevMemArena;
++ struct RA_ARENA *pVMArena;
++ struct DEV_ARENA_DESCRIPTOR sDevArena;
++ struct MMU_HEAP *pMMUHeap;
++
++ struct BM_HEAP *psNext;
++};
++
++struct BM_CONTEXT {
++ struct MMU_CONTEXT *psMMUContext;
++ struct BM_HEAP *psBMHeap;
++ struct BM_HEAP *psBMSharedHeap;
++ struct PVRSRV_DEVICE_NODE *psDeviceNode;
++ struct HASH_TABLE *pBufferHash;
++ void *hResItem;
++ u32 ui32RefCount;
++ struct BM_CONTEXT *psNext;
++};
++
++#define BP_POOL_MASK 0x7
++
++#define BP_CONTIGUOUS (1 << 3)
++#define BP_PARAMBUFFER (1 << 4)
++
++#define BM_MAX_DEVMEM_ARENAS 2
++
++void *BM_CreateContext(struct PVRSRV_DEVICE_NODE *psDeviceNode,
++ struct IMG_DEV_PHYADDR *psPDDevPAddr,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_BOOL *pbCreated);
++
++void BM_DestroyContext(void *hBMContext);
++
++static inline void pvr_get_ctx(struct BM_CONTEXT *ctx)
++{
++ WARN_ON(!ctx->ui32RefCount);
++ ctx->ui32RefCount++;
++}
++
++static inline bool pvr_put_ctx(struct BM_CONTEXT *ctx)
++{
++ BUG_ON(!ctx->ui32RefCount);
++ ctx->ui32RefCount--;
++ if (!ctx->ui32RefCount) {
++ BM_DestroyContext(ctx);
++
++ return true;
++ }
++
++ return false;
++}
++
++void *BM_CreateHeap(void *hBMContext,
++ struct DEVICE_MEMORY_HEAP_INFO *psDevMemHeapInfo);
++void BM_DestroyHeap(void *hDevMemHeap);
++IMG_BOOL BM_Reinitialise(struct PVRSRV_DEVICE_NODE *psDeviceNode);
++
++IMG_BOOL BM_Alloc(void *hDevMemHeap,
++ struct IMG_DEV_VIRTADDR *psDevVAddr,
++ size_t uSize, u32 *pui32Flags, u32 uDevVAddrAlignment, void **phBuf);
++
++IMG_BOOL BM_Wrap(void *hDevMemHeap,
++ u32 ui32Size,
++ u32 ui32Offset,
++ IMG_BOOL bPhysContig,
++ struct IMG_SYS_PHYADDR *psSysAddr,
++ void *pvCPUVAddr, u32 *pui32Flags, void **phBuf);
++
++void BM_Free(void *hBuf, u32 ui32Flags);
++void *BM_HandleToCpuVaddr(void *hBuf);
++struct IMG_DEV_VIRTADDR BM_HandleToDevVaddr(void *hBuf);
++
++struct IMG_SYS_PHYADDR BM_HandleToSysPaddr(void *hBuf);
++
++void *BM_HandleToOSMemHandle(void *hBuf);
++
++IMG_BOOL BM_ContiguousStatistics(u32 uFlags, u32 *pTotalBytes,
++ u32 *pAvailableBytes);
++
++enum PVRSRV_ERROR BM_GetPhysPageAddr(struct PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++ struct IMG_DEV_VIRTADDR sDevVPageAddr,
++ struct IMG_DEV_PHYADDR *psDevPAddr);
++
++enum PVRSRV_ERROR BM_GetHeapInfo(void *hDevMemHeap,
++ struct PVRSRV_HEAP_INFO *psHeapInfo);
++
++struct MMU_CONTEXT *BM_GetMMUContext(void *hDevMemHeap);
++
++struct MMU_CONTEXT *BM_GetMMUContextFromMemContext(void *hDevMemContext);
++
++void *BM_GetMMUHeap(void *hDevMemHeap);
++
++struct PVRSRV_DEVICE_NODE *BM_GetDeviceNode(void *hDevMemContext);
++
++void *BM_GetMappingHandle(struct PVRSRV_KERNEL_MEM_INFO *psMemInfo);
++
++#endif
+diff --git a/drivers/gpu/pvr/bufferclass_example.c b/drivers/gpu/pvr/bufferclass_example.c
+new file mode 100644
+index 0000000..4f7a8ea
+--- /dev/null
++++ b/drivers/gpu/pvr/bufferclass_example.c
+@@ -0,0 +1,266 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "bufferclass_example.h"
++
++static void *gpvAnchor;
++static IMG_BOOL(*pfnGetPVRJTable)(struct PVRSRV_BC_BUFFER2SRV_KMJTABLE *);
++
++struct BC_EXAMPLE_DEVINFO *GetAnchorPtr(void)
++{
++ return (struct BC_EXAMPLE_DEVINFO *)gpvAnchor;
++}
++
++static void SetAnchorPtr(struct BC_EXAMPLE_DEVINFO *psDevInfo)
++{
++ gpvAnchor = (void *) psDevInfo;
++}
++
++static enum PVRSRV_ERROR OpenBCDevice(void **phDevice)
++{
++ struct BC_EXAMPLE_DEVINFO *psDevInfo;
++
++ psDevInfo = GetAnchorPtr();
++
++ *phDevice = (void *) psDevInfo;
++
++ return PVRSRV_OK;
++}
++
++static enum PVRSRV_ERROR CloseBCDevice(void *hDevice)
++{
++ PVR_UNREFERENCED_PARAMETER(hDevice);
++
++ return PVRSRV_OK;
++}
++
++static enum PVRSRV_ERROR GetBCBuffer(void *hDevice,
++ u32 ui32BufferNumber,
++ struct PVRSRV_SYNC_DATA *psSyncData,
++ void **phBuffer)
++{
++ struct BC_EXAMPLE_DEVINFO *psDevInfo;
++
++ if (!hDevice || !phBuffer)
++ return PVRSRV_ERROR_INVALID_PARAMS;
++
++ psDevInfo = (struct BC_EXAMPLE_DEVINFO *)hDevice;
++
++ if (ui32BufferNumber < psDevInfo->sBufferInfo.ui32BufferCount) {
++ psDevInfo->psSystemBuffer[ui32BufferNumber].psSyncData =
++ psSyncData;
++ *phBuffer =
++ (void *) &psDevInfo->psSystemBuffer[ui32BufferNumber];
++ } else {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ return PVRSRV_OK;
++}
++
++static enum PVRSRV_ERROR GetBCInfo(void *hDevice, struct BUFFER_INFO *psBCInfo)
++{
++ struct BC_EXAMPLE_DEVINFO *psDevInfo;
++
++ if (!hDevice || !psBCInfo)
++ return PVRSRV_ERROR_INVALID_PARAMS;
++
++ psDevInfo = (struct BC_EXAMPLE_DEVINFO *)hDevice;
++
++ *psBCInfo = psDevInfo->sBufferInfo;
++
++ return PVRSRV_OK;
++}
++
++static enum PVRSRV_ERROR GetBCBufferAddr(void *hDevice, void *hBuffer,
++ struct IMG_SYS_PHYADDR **ppsSysAddr,
++ u32 *pui32ByteSize, void __iomem **ppvCpuVAddr,
++ void **phOSMapInfo, IMG_BOOL *pbIsContiguous)
++{
++ struct BC_EXAMPLE_BUFFER *psBuffer;
++
++ if (!hDevice || !hBuffer || !ppsSysAddr || !pui32ByteSize)
++ return PVRSRV_ERROR_INVALID_PARAMS;
++
++ psBuffer = (struct BC_EXAMPLE_BUFFER *)hBuffer;
++
++ *ppsSysAddr = &psBuffer->sPageAlignSysAddr;
++ *ppvCpuVAddr = psBuffer->sCPUVAddr;
++
++ *pui32ByteSize = psBuffer->ui32Size;
++
++ *phOSMapInfo = NULL;
++ *pbIsContiguous = IMG_TRUE;
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR BC_Example_Init(void)
++{
++ struct BC_EXAMPLE_DEVINFO *psDevInfo;
++ struct IMG_CPU_PHYADDR sSystemBufferCPUPAddr;
++ u32 i;
++
++ psDevInfo = GetAnchorPtr();
++
++ if (psDevInfo == NULL) {
++
++ psDevInfo = (struct BC_EXAMPLE_DEVINFO *)
++ BCAllocKernelMem(sizeof(struct BC_EXAMPLE_DEVINFO));
++
++ if (!psDevInfo)
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++
++ SetAnchorPtr((void *) psDevInfo);
++
++ psDevInfo->ui32RefCount = 0;
++
++ if (BCOpenPVRServices(&psDevInfo->hPVRServices) != PVRSRV_OK)
++ return PVRSRV_ERROR_INIT_FAILURE;
++ if (BCGetLibFuncAddr
++ (psDevInfo->hPVRServices, "PVRGetBufferClassJTable",
++ &pfnGetPVRJTable) != PVRSRV_OK)
++ return PVRSRV_ERROR_INIT_FAILURE;
++
++ if (!(*pfnGetPVRJTable) (&psDevInfo->sPVRJTable))
++ return PVRSRV_ERROR_INIT_FAILURE;
++
++ psDevInfo->ui32NumBuffers = 0;
++
++ psDevInfo->psSystemBuffer =
++ BCAllocKernelMem(sizeof(struct BC_EXAMPLE_BUFFER) *
++ BC_EXAMPLE_NUM_BUFFERS);
++
++ if (!psDevInfo->psSystemBuffer)
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++
++ psDevInfo->sBufferInfo.pixelformat = BC_EXAMPLE_PIXELFORMAT;
++ psDevInfo->sBufferInfo.ui32Width = BC_EXAMPLE_WIDTH;
++ psDevInfo->sBufferInfo.ui32Height = BC_EXAMPLE_HEIGHT;
++ psDevInfo->sBufferInfo.ui32ByteStride = BC_EXAMPLE_STRIDE;
++ psDevInfo->sBufferInfo.ui32BufferDeviceID = BC_EXAMPLE_DEVICEID;
++ psDevInfo->sBufferInfo.ui32Flags =
++ PVRSRV_BC_FLAGS_YUVCSC_FULL_RANGE |
++ PVRSRV_BC_FLAGS_YUVCSC_BT601;
++
++ for (i = 0; i < BC_EXAMPLE_NUM_BUFFERS; i++) {
++ u32 ui32Size =
++ BC_EXAMPLE_HEIGHT * BC_EXAMPLE_STRIDE;
++
++ if (psDevInfo->sBufferInfo.pixelformat ==
++ PVRSRV_PIXEL_FORMAT_NV12)
++
++ ui32Size +=
++ ((BC_EXAMPLE_STRIDE >> 1) *
++ (BC_EXAMPLE_HEIGHT >> 1) << 1);
++
++ if (BCAllocContigMemory(ui32Size,
++ &psDevInfo->psSystemBuffer[i].
++ hMemHandle,
++ &psDevInfo->psSystemBuffer[i].
++ sCPUVAddr,
++ &sSystemBufferCPUPAddr) !=
++ PVRSRV_OK)
++ break;
++
++ psDevInfo->ui32NumBuffers++;
++
++ psDevInfo->psSystemBuffer[i].ui32Size = ui32Size;
++ psDevInfo->psSystemBuffer[i].sSysAddr =
++ CpuPAddrToSysPAddrBC(sSystemBufferCPUPAddr);
++ psDevInfo->psSystemBuffer[i].sPageAlignSysAddr.uiAddr =
++ (psDevInfo->psSystemBuffer[i].sSysAddr.
++ uiAddr & 0xFFFFF000);
++ psDevInfo->psSystemBuffer[i].psSyncData = NULL;
++ }
++
++ psDevInfo->sBufferInfo.ui32BufferCount =
++ psDevInfo->ui32NumBuffers;
++
++ psDevInfo->sBCJTable.ui32TableSize =
++ sizeof(struct PVRSRV_BC_SRV2BUFFER_KMJTABLE);
++ psDevInfo->sBCJTable.pfnOpenBCDevice = OpenBCDevice;
++ psDevInfo->sBCJTable.pfnCloseBCDevice = CloseBCDevice;
++ psDevInfo->sBCJTable.pfnGetBCBuffer = GetBCBuffer;
++ psDevInfo->sBCJTable.pfnGetBCInfo = GetBCInfo;
++ psDevInfo->sBCJTable.pfnGetBufferAddr = GetBCBufferAddr;
++
++ if (psDevInfo->sPVRJTable.
++ pfnPVRSRVRegisterBCDevice(&psDevInfo->sBCJTable,
++ &psDevInfo->ui32DeviceID) !=
++ PVRSRV_OK)
++ return PVRSRV_ERROR_DEVICE_REGISTER_FAILED;
++ }
++
++ psDevInfo->ui32RefCount++;
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR BC_Example_Deinit(void)
++{
++ struct BC_EXAMPLE_DEVINFO *psDevInfo;
++ u32 i;
++ psDevInfo = GetAnchorPtr();
++
++ if (psDevInfo == NULL)
++ return PVRSRV_ERROR_GENERIC;
++
++ psDevInfo->ui32RefCount--;
++
++ if (psDevInfo->ui32RefCount == 0) {
++
++ struct PVRSRV_BC_BUFFER2SRV_KMJTABLE *psJTable =
++ &psDevInfo->sPVRJTable;
++
++ if (psJTable->
++ pfnPVRSRVRemoveBCDevice(psDevInfo->ui32DeviceID) !=
++ PVRSRV_OK)
++ return PVRSRV_ERROR_GENERIC;
++
++ if (BCClosePVRServices(psDevInfo->hPVRServices) != PVRSRV_OK) {
++ psDevInfo->hPVRServices = NULL;
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ for (i = 0; i < psDevInfo->ui32NumBuffers; i++)
++ BCFreeContigMemory(psDevInfo->psSystemBuffer[i].
++ ui32Size,
++ psDevInfo->psSystemBuffer[i].
++ hMemHandle,
++ psDevInfo->psSystemBuffer[i].
++ sCPUVAddr,
++ SysPAddrToCpuPAddrBC(psDevInfo->
++ psSystemBuffer
++ [i].sSysAddr));
++
++ BCFreeKernelMem(psDevInfo);
++
++ SetAnchorPtr(NULL);
++ }
++
++ return PVRSRV_OK;
++}
+diff --git a/drivers/gpu/pvr/bufferclass_example.h b/drivers/gpu/pvr/bufferclass_example.h
+new file mode 100644
+index 0000000..c9dd094
+--- /dev/null
++++ b/drivers/gpu/pvr/bufferclass_example.h
+@@ -0,0 +1,104 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __BC_EXAMPLE_H__
++#define __BC_EXAMPLE_H__
++
++#include "img_defs.h"
++#include "servicesext.h"
++#include "kernelbuffer.h"
++
++extern IMG_BOOL PVRGetBufferClassJTable(
++ struct PVRSRV_BC_BUFFER2SRV_KMJTABLE *psJTable);
++
++#define BC_EXAMPLE_NUM_BUFFERS 3
++
++#define YUV420 1
++#ifdef YUV420
++
++#define BC_EXAMPLE_WIDTH 320
++#define BC_EXAMPLE_HEIGHT 160
++#define BC_EXAMPLE_STRIDE 320
++#define BC_EXAMPLE_PIXELFORMAT PVRSRV_PIXEL_FORMAT_NV12
++
++#else
++
++#define BC_EXAMPLE_WIDTH 320
++#define BC_EXAMPLE_HEIGHT 160
++#define BC_EXAMPLE_STRIDE (320 * 2)
++#define BC_EXAMPLE_PIXELFORMAT PVRSRV_PIXEL_FORMAT_RGB565
++
++#endif
++
++#define BC_EXAMPLE_DEVICEID 0
++
++struct BC_EXAMPLE_BUFFER {
++ u32 ui32Size;
++ void *hMemHandle;
++ struct IMG_SYS_PHYADDR sSysAddr;
++ struct IMG_SYS_PHYADDR sPageAlignSysAddr;
++ void __iomem *sCPUVAddr;
++ struct PVRSRV_SYNC_DATA *psSyncData;
++ struct BC_EXAMPLE_BUFFER *psNext;
++};
++
++struct BC_EXAMPLE_DEVINFO {
++ u32 ui32DeviceID;
++ struct BC_EXAMPLE_BUFFER *psSystemBuffer;
++ struct BUFFER_INFO sBufferInfo;
++ u32 ui32NumBuffers;
++ struct PVRSRV_BC_BUFFER2SRV_KMJTABLE sPVRJTable;
++ struct PVRSRV_BC_SRV2BUFFER_KMJTABLE sBCJTable;
++ void *hPVRServices;
++ u32 ui32RefCount;
++};
++
++enum PVRSRV_ERROR BC_Example_Init(void);
++enum PVRSRV_ERROR BC_Example_Deinit(void);
++
++enum PVRSRV_ERROR BCOpenPVRServices(void **phPVRServices);
++enum PVRSRV_ERROR BCClosePVRServices(void *hPVRServices);
++
++void *BCAllocKernelMem(u32 ui32Size);
++void BCFreeKernelMem(void *pvMem);
++
++enum PVRSRV_ERROR BCAllocContigMemory(u32 ui32Size, void **phMemHandle,
++ void __iomem **pLinAddr,
++ struct IMG_CPU_PHYADDR *pPhysAddr);
++void BCFreeContigMemory(u32 ui32Size, void *hMemHandle, void __iomem *LinAddr,
++ struct IMG_CPU_PHYADDR PhysAddr);
++
++struct IMG_SYS_PHYADDR CpuPAddrToSysPAddrBC(struct IMG_CPU_PHYADDR cpu_paddr);
++struct IMG_CPU_PHYADDR SysPAddrToCpuPAddrBC(struct IMG_SYS_PHYADDR sys_paddr);
++
++void *MapPhysAddr(struct IMG_SYS_PHYADDR sSysAddr, u32 ui32Size);
++void UnMapPhysAddr(void *pvAddr, u32 ui32Size);
++
++enum PVRSRV_ERROR BCGetLibFuncAddr(void *hExtDrv, char *szFunctionName,
++ IMG_BOOL (**ppfnFuncTable)(struct PVRSRV_BC_BUFFER2SRV_KMJTABLE *));
++struct BC_EXAMPLE_DEVINFO *GetAnchorPtr(void);
++
++#endif
+diff --git a/drivers/gpu/pvr/bufferclass_example_linux.c b/drivers/gpu/pvr/bufferclass_example_linux.c
+new file mode 100644
+index 0000000..bfb6ab6
+--- /dev/null
++++ b/drivers/gpu/pvr/bufferclass_example_linux.c
+@@ -0,0 +1,202 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/fs.h>
++#include <linux/uaccess.h>
++#include <linux/io.h>
++
++#include <linux/dma-mapping.h>
++
++#include "kernelbuffer.h"
++#include "bufferclass_example.h"
++#include "bufferclass_example_linux.h"
++#include "bufferclass_example_private.h"
++#include "pvrmodule.h"
++
++#define DEVNAME "bc_example"
++#define DRVNAME DEVNAME
++
++MODULE_SUPPORTED_DEVICE(DEVNAME);
++
++static int AssignedMajorNumber;
++
++#define unref__ __attribute__ ((unused))
++
++
++
++void *BCAllocKernelMem(u32 ui32Size)
++{
++ return kmalloc(ui32Size, GFP_KERNEL);
++}
++
++void BCFreeKernelMem(void *pvMem)
++{
++ kfree(pvMem);
++}
++
++enum PVRSRV_ERROR BCAllocContigMemory(u32 ui32Size, void *unref__ * phMemHandle,
++ void __iomem **pLinAddr,
++ struct IMG_CPU_PHYADDR *pPhysAddr)
++{
++ dma_addr_t dma;
++ void *pvLinAddr;
++
++ pvLinAddr = dma_alloc_coherent(NULL, ui32Size, &dma, GFP_KERNEL);
++
++ if (pvLinAddr == NULL)
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++
++ pPhysAddr->uiAddr = dma;
++ *pLinAddr = (void __force __iomem *)pvLinAddr;
++
++ return PVRSRV_OK;
++}
++
++void BCFreeContigMemory(u32 ui32Size, void *unref__ hMemHandle,
++ void __iomem *LinAddr, struct IMG_CPU_PHYADDR PhysAddr)
++{
++ dma_free_coherent(NULL, ui32Size, (void __force *)LinAddr,
++ (dma_addr_t)PhysAddr.uiAddr);
++}
++
++struct IMG_SYS_PHYADDR CpuPAddrToSysPAddrBC(struct IMG_CPU_PHYADDR cpu_paddr)
++{
++ struct IMG_SYS_PHYADDR sys_paddr;
++
++ sys_paddr.uiAddr = cpu_paddr.uiAddr;
++ return sys_paddr;
++}
++
++struct IMG_CPU_PHYADDR SysPAddrToCpuPAddrBC(struct IMG_SYS_PHYADDR sys_paddr)
++{
++
++ struct IMG_CPU_PHYADDR cpu_paddr;
++
++ cpu_paddr.uiAddr = sys_paddr.uiAddr;
++ return cpu_paddr;
++}
++
++enum PVRSRV_ERROR BCOpenPVRServices(void **phPVRServices)
++{
++
++ *phPVRServices = NULL;
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR BCClosePVRServices(void *unref__ hPVRServices)
++{
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR BCGetLibFuncAddr(void *unref__ hExtDrv, char *szFunctionName,
++ IMG_BOOL (**ppfnFuncTable)(struct PVRSRV_BC_BUFFER2SRV_KMJTABLE *))
++{
++ if (strcmp("PVRGetBufferClassJTable", szFunctionName) != 0)
++ return PVRSRV_ERROR_INVALID_PARAMS;
++
++ *ppfnFuncTable = PVRGetBufferClassJTable;
++
++ return PVRSRV_OK;
++}
++
++static int BC_Example_Bridge(struct inode *inode, struct file *file,
++ unsigned int cmd, unsigned long arg)
++{
++ int err = -EFAULT;
++ int command = _IOC_NR(cmd);
++ struct BC_Example_ioctl_package __user *psBridge =
++ (struct BC_Example_ioctl_package __user *)arg;
++
++ if (!access_ok
++ (VERIFY_WRITE, psBridge, sizeof(struct BC_Example_ioctl_package)))
++ return err;
++
++ switch (command) {
++ case _IOC_NR(BC_Example_ioctl_fill_buffer):
++ if (FillBuffer(psBridge->inputparam) == -1)
++ return err;
++ break;
++ case _IOC_NR(BC_Example_ioctl_get_buffer_count):
++ if (GetBufferCount(&psBridge->outputparam) == -1)
++ return err;
++ break;
++ default:
++ return err;
++ }
++
++ return 0;
++}
++
++static const struct file_operations bufferclass_example_fops = {
++ .ioctl = BC_Example_Bridge,
++};
++
++static int __init BC_Example_ModInit(void)
++{
++ AssignedMajorNumber =
++ register_chrdev(0, DEVNAME, &bufferclass_example_fops);
++
++ if (AssignedMajorNumber <= 0) {
++ printk(KERN_ERR DRVNAME
++ ": BC_Example_ModInit: unable to get major number\n");
++
++ goto ExitDisable;
++ }
++#if defined(DEBUG)
++ printk(KERN_ERR DRVNAME ": BC_Example_ModInit: major device %d\n",
++ AssignedMajorNumber);
++#endif
++
++
++ if (BC_Example_Init() != PVRSRV_OK) {
++ printk(KERN_ERR DRVNAME
++ ": BC_Example_ModInit: can't init device\n");
++ goto ExitUnregister;
++ }
++
++ return 0;
++
++ExitUnregister:
++ unregister_chrdev(AssignedMajorNumber, DEVNAME);
++ExitDisable:
++ return -EBUSY;
++}
++
++static void __exit BC_Example_ModCleanup(void)
++{
++ unregister_chrdev(AssignedMajorNumber, DEVNAME);
++
++ if (BC_Example_Deinit() != PVRSRV_OK)
++ printk(KERN_ERR DRVNAME
++ ": BC_Example_ModCleanup: can't deinit device\n");
++
++}
++
++module_init(BC_Example_ModInit);
++module_exit(BC_Example_ModCleanup);
+diff --git a/drivers/gpu/pvr/bufferclass_example_linux.h b/drivers/gpu/pvr/bufferclass_example_linux.h
+new file mode 100644
+index 0000000..8e17fdf
+--- /dev/null
++++ b/drivers/gpu/pvr/bufferclass_example_linux.h
+@@ -0,0 +1,46 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __BC_EXAMPLE_LINUX_H__
++#define __BC_EXAMPLE_LINUX_H__
++
++#include <linux/ioctl.h>
++
++struct BC_Example_ioctl_package {
++ int inputparam;
++ int outputparam;
++
++};
++
++#define BC_EXAMPLE_IOC_GID 'g'
++
++#define BC_EXAMPLE_IOWR(INDEX) \
++ _IOWR(BC_EXAMPLE_IOC_GID, INDEX, struct BC_Example_ioctl_package)
++
++#define BC_Example_ioctl_fill_buffer BC_EXAMPLE_IOWR(0)
++#define BC_Example_ioctl_get_buffer_count BC_EXAMPLE_IOWR(1)
++
++#endif
+diff --git a/drivers/gpu/pvr/bufferclass_example_private.c b/drivers/gpu/pvr/bufferclass_example_private.c
+new file mode 100644
+index 0000000..360524e
+--- /dev/null
++++ b/drivers/gpu/pvr/bufferclass_example_private.c
+@@ -0,0 +1,194 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "bufferclass_example.h"
++#include "bufferclass_example_private.h"
++
++#define MIN(a, b) ((a) < (b) ? (a) : (b))
++
++static void FillYUV420Image(void __iomem *pvDest, int width, int height,
++ int bytestride)
++{
++ static int iPhase;
++ int i, j;
++ unsigned char u, v, y;
++ unsigned char *pui8y = (unsigned char __force *)pvDest;
++ unsigned short *pui16uv;
++ unsigned int count = 0;
++
++ for (j = 0; j < height; j++)
++ for (i = 0; i < width; i++) {
++ y = (((i + iPhase) >> 6) % (2) == 0) ? 0x7f : 0x00;
++
++ pui8y[count++] = y;
++ }
++
++ pui16uv = (unsigned short *)
++ ((unsigned char __force *)pvDest + (width * height));
++ count = 0;
++
++ for (j = 0; j < height; j += 2)
++ for (i = 0; i < width; i += 2) {
++ u = (j < (height / 2)) ?
++ ((i < (width / 2)) ? 0xFF : 0x33) :
++ ((i < (width / 2)) ? 0x33 : 0xAA);
++ v = (j < (height / 2)) ?
++ ((i < (width / 2)) ? 0xAC : 0x0) :
++ ((i < (width / 2)) ? 0x03 : 0xEE);
++
++ pui16uv[count++] = (v << 8) | u;
++
++ }
++
++ iPhase++;
++}
++
++static void FillYUV422Image(void __iomem *pvDest, int width, int height,
++ int bytestride)
++{
++ static int iPhase;
++ int x, y;
++ unsigned char u, v, y0, y1;
++ unsigned int *pui32yuv = (unsigned int __force *)pvDest;
++ unsigned int count = 0;
++
++ for (y = 0; y < height; y++)
++ for (x = 0; x < width; x += 2) {
++ u = (y < (height / 2)) ?
++ ((x < (width / 2)) ? 0xFF : 0x33) :
++ ((x < (width / 2)) ? 0x33 : 0xAA);
++ v = (y < (height / 2)) ?
++ ((x < (width / 2)) ? 0xAA : 0x0) :
++ ((x < (width / 2)) ? 0x03 : 0xEE);
++
++ y0 = y1 =
++ (((x + iPhase) >> 6) % (2) == 0) ? 0x7f : 0x00;
++
++ pui32yuv[count++] =
++ (y1 << 24) | (v << 16) | (y0 << 8) | u;
++
++ }
++
++ iPhase++;
++}
++
++static void FillRGB565Image(void __iomem *pvDest, int width, int height,
++ int bytestride)
++{
++ int i, Count;
++ unsigned long *pui32Addr = (unsigned long __force *)pvDest;
++ unsigned short *pui16Addr = (unsigned short __force *)pvDest;
++ unsigned long Colour32;
++ unsigned short Colour16;
++ static unsigned char Colour8;
++
++ Colour16 = (Colour8 >> 3) | ((Colour8 >> 2) << 5) |
++ ((Colour8 >> 3) << 11);
++ Colour32 = Colour16 | Colour16 << 16;
++
++ Count = (height * bytestride) >> 2;
++
++ for (i = 0; i < Count; i++)
++ pui32Addr[i] = Colour32;
++
++ Count = height;
++
++ pui16Addr = (unsigned short *)
++ ((unsigned char __force *)pvDest + (2 * Colour8));
++
++ for (i = 0; i < Count; i++) {
++ *pui16Addr = 0xF800;
++
++ pui16Addr =
++ (unsigned short *)((unsigned char *)pui16Addr + bytestride);
++ }
++ Count = bytestride >> 2;
++
++ pui32Addr = (unsigned long *)((unsigned char __force *)pvDest +
++ (bytestride * (MIN(height - 1, 0xFF) - Colour8)));
++
++ for (i = 0; i < Count; i++)
++ pui32Addr[i] = 0x001F001F;
++
++ Colour8 = (Colour8 + 1) % MIN(height - 1, 0xFF);
++}
++
++int FillBuffer(unsigned int ui32BufferIndex)
++{
++ struct BC_EXAMPLE_DEVINFO *psDevInfo = GetAnchorPtr();
++ struct BC_EXAMPLE_BUFFER *psBuffer;
++ struct BUFFER_INFO *psBufferInfo;
++ struct PVRSRV_SYNC_DATA *psSyncData;
++
++ if (psDevInfo == NULL)
++ return -1;
++
++ psBuffer = &psDevInfo->psSystemBuffer[ui32BufferIndex];
++ psBufferInfo = &psDevInfo->sBufferInfo;
++
++ psSyncData = psBuffer->psSyncData;
++
++ if (psSyncData) {
++ if (psSyncData->ui32ReadOpsPending !=
++ psSyncData->ui32ReadOpsComplete)
++ return -1;
++
++ psSyncData->ui32WriteOpsPending++;
++ }
++
++ switch (psBufferInfo->pixelformat) {
++ case PVRSRV_PIXEL_FORMAT_RGB565:
++ default:
++ FillRGB565Image(psBuffer->sCPUVAddr, BC_EXAMPLE_WIDTH,
++ BC_EXAMPLE_HEIGHT, BC_EXAMPLE_STRIDE);
++ break;
++ case PVRSRV_PIXEL_FORMAT_FOURCC_ORG_UYVY:
++ FillYUV422Image(psBuffer->sCPUVAddr, BC_EXAMPLE_WIDTH,
++ BC_EXAMPLE_HEIGHT, BC_EXAMPLE_STRIDE);
++ break;
++ case PVRSRV_PIXEL_FORMAT_NV12:
++ FillYUV420Image(psBuffer->sCPUVAddr, BC_EXAMPLE_WIDTH,
++ BC_EXAMPLE_HEIGHT, BC_EXAMPLE_STRIDE);
++ break;
++ }
++
++ if (psSyncData)
++ psSyncData->ui32WriteOpsComplete++;
++
++ return 0;
++}
++
++int GetBufferCount(unsigned int *pui32BufferCount)
++{
++ struct BC_EXAMPLE_DEVINFO *psDevInfo = GetAnchorPtr();
++
++ if (psDevInfo == NULL)
++ return -1;
++
++ *pui32BufferCount = psDevInfo->sBufferInfo.ui32BufferCount;
++
++ return 0;
++}
+diff --git a/drivers/gpu/pvr/bufferclass_example_private.h b/drivers/gpu/pvr/bufferclass_example_private.h
+new file mode 100644
+index 0000000..527c782
+--- /dev/null
++++ b/drivers/gpu/pvr/bufferclass_example_private.h
+@@ -0,0 +1,33 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __BC_EXAMPLE_PRIVATE_H__
++#define __BC_EXAMPLE_PRIVATE_H__
++
++int FillBuffer(unsigned int ui32BufferIndex);
++int GetBufferCount(unsigned int *pui32BufferCount);
++
++#endif
+diff --git a/drivers/gpu/pvr/dbgdrvif.h b/drivers/gpu/pvr/dbgdrvif.h
+new file mode 100644
+index 0000000..d8ace97
+--- /dev/null
++++ b/drivers/gpu/pvr/dbgdrvif.h
+@@ -0,0 +1,318 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _DBGDRVIF_
++#define _DBGDRVIF_
++
++#include "ioctldef.h"
++
++#define DEBUG_CAPMODE_FRAMED 0x00000001
++#define DEBUG_CAPMODE_CONTINUOUS 0x00000002
++#define DEBUG_CAPMODE_HOTKEY 0x00000004
++
++#define DEBUG_OUTMODE_STANDARDDBG 0x00000001
++#define DEBUG_OUTMODE_MONO 0x00000002
++#define DEBUG_OUTMODE_STREAMENABLE 0x00000004
++#define DEBUG_OUTMODE_ASYNC 0x00000008
++#define DEBUG_OUTMODE_SGXVGA 0x00000010
++
++#define DEBUG_FLAGS_USE_NONPAGED_MEM 0x00000001
++#define DEBUG_FLAGS_NO_BUF_EXPANDSION 0x00000002
++#define DEBUG_FLAGS_ENABLESAMPLE 0x00000004
++
++#define DEBUG_FLAGS_TEXTSTREAM 0x80000000
++
++#define DEBUG_LEVEL_0 0x00000001
++#define DEBUG_LEVEL_1 0x00000003
++#define DEBUG_LEVEL_2 0x00000007
++#define DEBUG_LEVEL_3 0x0000000F
++#define DEBUG_LEVEL_4 0x0000001F
++#define DEBUG_LEVEL_5 0x0000003F
++#define DEBUG_LEVEL_6 0x0000007F
++#define DEBUG_LEVEL_7 0x000000FF
++#define DEBUG_LEVEL_8 0x000001FF
++#define DEBUG_LEVEL_9 0x000003FF
++#define DEBUG_LEVEL_10 0x000007FF
++#define DEBUG_LEVEL_11 0x00000FFF
++
++#define DEBUG_LEVEL_SEL0 0x00000001
++#define DEBUG_LEVEL_SEL1 0x00000002
++#define DEBUG_LEVEL_SEL2 0x00000004
++#define DEBUG_LEVEL_SEL3 0x00000008
++#define DEBUG_LEVEL_SEL4 0x00000010
++#define DEBUG_LEVEL_SEL5 0x00000020
++#define DEBUG_LEVEL_SEL6 0x00000040
++#define DEBUG_LEVEL_SEL7 0x00000080
++#define DEBUG_LEVEL_SEL8 0x00000100
++#define DEBUG_LEVEL_SEL9 0x00000200
++#define DEBUG_LEVEL_SEL10 0x00000400
++#define DEBUG_LEVEL_SEL11 0x00000800
++
++#define DEBUG_SERVICE_IOCTL_BASE 0x800
++#define DEBUG_SERVICE_CREATESTREAM \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x01, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_DESTROYSTREAM \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x02, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_GETSTREAM \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x03, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_WRITESTRING \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x04, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_READSTRING \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x05, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_WRITE \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x06, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_READ \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x07, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_SETDEBUGMODE \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x08, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_SETDEBUGOUTMODE \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x09, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_SETDEBUGLEVEL \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0A, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_SETFRAME \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0B, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_GETFRAME \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0C, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_OVERRIDEMODE \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0D, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_DEFAULTMODE \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0E, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_GETSERVICETABLE \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0F, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_WRITE2 \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x10, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_WRITESTRINGCM \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x11, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_WRITECM \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x12, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_SETMARKER \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x13, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_GETMARKER \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x14, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_ISCAPTUREFRAME \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x15, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_WRITELF \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x16, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_READLF \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x17, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_WAITFOREVENT \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x18, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++
++enum DBG_EVENT {
++ DBG_EVENT_STREAM_DATA = 1
++};
++
++struct DBG_IN_CREATESTREAM {
++ u32 ui32Pages;
++ u32 ui32CapMode;
++ u32 ui32OutMode;
++ char *pszName;
++};
++
++struct DBG_IN_FINDSTREAM {
++ IMG_BOOL bResetStream;
++ char *pszName;
++};
++
++struct DBG_IN_WRITESTRING {
++ void *pvStream;
++ u32 ui32Level;
++ char *pszString;
++};
++
++struct DBG_IN_READSTRING {
++ void *pvStream;
++ u32 ui32StringLen;
++ char *pszString;
++};
++
++struct DBG_IN_SETDEBUGMODE {
++ void *pvStream;
++ u32 ui32Mode;
++ u32 ui32Start;
++ u32 ui32End;
++ u32 ui32SampleRate;
++};
++
++struct DBG_IN_SETDEBUGOUTMODE {
++ void *pvStream;
++ u32 ui32Mode;
++};
++
++struct DBG_IN_SETDEBUGLEVEL {
++ void *pvStream;
++ u32 ui32Level;
++};
++
++struct DBG_IN_SETFRAME {
++ void *pvStream;
++ u32 ui32Frame;
++};
++
++struct DBG_IN_WRITE {
++ void *pvStream;
++ u32 ui32Level;
++ u32 ui32TransferSize;
++ u8 *pui8InBuffer;
++};
++
++struct DBG_IN_READ {
++ void *pvStream;
++ IMG_BOOL bReadInitBuffer;
++ u32 ui32OutBufferSize;
++ u8 *pui8OutBuffer;
++};
++
++struct DBG_IN_OVERRIDEMODE {
++ void *pvStream;
++ u32 ui32Mode;
++};
++
++struct DBG_IN_ISCAPTUREFRAME {
++ void *pvStream;
++ IMG_BOOL bCheckPreviousFrame;
++};
++
++struct DBG_IN_SETMARKER {
++ void *pvStream;
++ u32 ui32Marker;
++};
++
++struct DBG_IN_WRITE_LF {
++ u32 ui32Flags;
++ void *pvStream;
++ u32 ui32Level;
++ u32 ui32BufferSize;
++ u8 *pui8InBuffer;
++};
++
++#define WRITELF_FLAGS_RESETBUF 0x00000001
++
++struct DBG_STREAM {
++ struct DBG_STREAM *psNext;
++ struct DBG_STREAM *psInitStream;
++ IMG_BOOL bInitPhaseComplete;
++ u32 ui32Flags;
++ u32 ui32Base;
++ u32 ui32Size;
++ u32 ui32RPtr;
++ u32 ui32WPtr;
++ u32 ui32DataWritten;
++ u32 ui32CapMode;
++ u32 ui32OutMode;
++ u32 ui32DebugLevel;
++ u32 ui32DefaultMode;
++ u32 ui32Start;
++ u32 ui32End;
++ u32 ui32Current;
++ u32 ui32Access;
++ u32 ui32SampleRate;
++ u32 ui32Reserved;
++ u32 ui32Timeout;
++ u32 ui32Marker;
++ char szName[30];
++};
++
++struct DBGKM_SERVICE_TABLE {
++ u32 ui32Size;
++ void *(*pfnCreateStream)(char *pszName, u32 ui32CapMode,
++ u32 ui32OutMode, u32 ui32Flags, u32 ui32Pages);
++ void (*pfnDestroyStream)(struct DBG_STREAM *psStream);
++ void *(*pfnFindStream)(char *pszName, IMG_BOOL bResetInitBuffer);
++ u32 (*pfnWriteString)(struct DBG_STREAM *psStream, char *pszString,
++ u32 ui32Level);
++ u32 (*pfnReadString)(struct DBG_STREAM *psStream, char *pszString,
++ u32 ui32Limit);
++ u32 (*pfnWriteBIN)(struct DBG_STREAM *psStream, u8 *pui8InBuf,
++ u32 ui32InBuffSize, u32 ui32Level);
++ u32 (*pfnReadBIN)(struct DBG_STREAM *psStream,
++ IMG_BOOL bReadInitBuffer, u32 ui32OutBufferSize,
++ u8 *pui8OutBuf);
++ void (*pfnSetCaptureMode)(struct DBG_STREAM *psStream,
++ u32 ui32CapMode, u32 ui32Start, u32 ui32Stop,
++ u32 ui32SampleRate);
++ void (*pfnSetOutputMode)(struct DBG_STREAM *psStream,
++ u32 ui32OutMode);
++ void (*pfnSetDebugLevel)(struct DBG_STREAM *psStream,
++ u32 ui32DebugLevel);
++ void (*pfnSetFrame)(struct DBG_STREAM *psStream,
++ u32 ui32Frame);
++ u32 (*pfnGetFrame)(struct DBG_STREAM *psStream);
++ void (*pfnOverrideMode)(struct DBG_STREAM *psStream,
++ u32 ui32Mode);
++ void (*pfnDefaultMode)(struct DBG_STREAM *psStream);
++ u32 (*pfnDBGDrivWrite2)(struct DBG_STREAM *psStream,
++ u8 *pui8InBuf, u32 ui32InBuffSize, u32 ui32Level);
++ u32 (*pfnWriteStringCM)(struct DBG_STREAM *psStream, char *pszString,
++ u32 ui32Level);
++ u32 (*pfnWriteBINCM)(struct DBG_STREAM *psStream, u8 *pui8InBuf,
++ u32 ui32InBuffSize, u32 ui32Level);
++ void (*pfnSetMarker)(struct DBG_STREAM *psStream, u32 ui32Marker);
++ u32 (*pfnGetMarker)(struct DBG_STREAM *psStream);
++ void (*pfnStartInitPhase) (struct DBG_STREAM *psStream);
++ void (*pfnStopInitPhase) (struct DBG_STREAM *psStream);
++ u32 (*pfnIsCaptureFrame)(struct DBG_STREAM *psStream,
++ IMG_BOOL bCheckPreviousFrame);
++ u32 (*pfnWriteLF)(struct DBG_STREAM *psStream, u8 *pui8InBuf,
++ u32 ui32InBuffSize, u32 ui32Level, u32 ui32Flags);
++ u32 (*pfnReadLF)(struct DBG_STREAM *psStream, u32 ui32OutBuffSize,
++ u8 *pui8OutBuf);
++ u32 (*pfnGetStreamOffset)(struct DBG_STREAM *psStream);
++ void (*pfnSetStreamOffset)(struct DBG_STREAM *psStream,
++ u32 ui32StreamOffset);
++ u32 (*pfnIsLastCaptureFrame)(struct DBG_STREAM *psStream);
++ void (*pfnWaitForEvent) (enum DBG_EVENT eEvent);
++};
++
++extern struct DBGKM_SERVICE_TABLE g_sDBGKMServices;
++
++void DBGDrvGetServiceTable(void **fn_table);
++
++#endif
+diff --git a/drivers/gpu/pvr/device.h b/drivers/gpu/pvr/device.h
+new file mode 100644
+index 0000000..a5240c4
+--- /dev/null
++++ b/drivers/gpu/pvr/device.h
+@@ -0,0 +1,186 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __DEVICE_H__
++#define __DEVICE_H__
++
++#include "ra.h"
++#include "resman.h"
++
++struct BM_CONTEXT;
++
++struct MMU_HEAP;
++struct MMU_CONTEXT;
++
++#define PVRSRV_BACKINGSTORE_SYSMEM_CONTIG \
++ (1<<(PVRSRV_MEM_BACKINGSTORE_FIELD_SHIFT+0))
++#define PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG \
++ (1<<(PVRSRV_MEM_BACKINGSTORE_FIELD_SHIFT+1))
++#define PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG \
++ (1<<(PVRSRV_MEM_BACKINGSTORE_FIELD_SHIFT+2))
++#define PVRSRV_BACKINGSTORE_LOCALMEM_NONCONTIG \
++ (1<<(PVRSRV_MEM_BACKINGSTORE_FIELD_SHIFT+3))
++
++#define DEVICE_MEMORY_HEAP_PERCONTEXT 0
++#define DEVICE_MEMORY_HEAP_KERNEL 1
++#define DEVICE_MEMORY_HEAP_SHARED 2
++#define DEVICE_MEMORY_HEAP_SHARED_EXPORTED 3
++
++#define PVRSRV_DEVICE_NODE_FLAGS_PORT80DISPLAY 1
++#define PVRSRV_DEVICE_NODE_FLAGS_MMU_OPT_INV 2
++
++struct DEVICE_MEMORY_HEAP_INFO {
++ u32 ui32HeapID;
++ char *pszName;
++ char *pszBSName;
++ struct IMG_DEV_VIRTADDR sDevVAddrBase;
++ u32 ui32HeapSize;
++ u32 ui32Attribs;
++ u32 DevMemHeapType;
++ void *hDevMemHeap;
++ struct RA_ARENA *psLocalDevMemArena;
++
++ u32 ui32DataPageSize;
++
++};
++
++struct DEVICE_MEMORY_INFO {
++ u32 ui32AddressSpaceSizeLog2;
++ u32 ui32Flags;
++ u32 ui32HeapCount;
++ u32 ui32SyncHeapID;
++ u32 ui32MappingHeapID;
++ struct DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++ struct BM_CONTEXT *pBMKernelContext;
++ struct BM_CONTEXT *pBMContext;
++};
++
++struct DEV_ARENA_DESCRIPTOR {
++ u32 ui32HeapID;
++ char *pszName;
++ struct IMG_DEV_VIRTADDR BaseDevVAddr;
++ u32 ui32Size;
++ u32 DevMemHeapType;
++
++ u32 ui32DataPageSize;
++
++ struct DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeapInfo;
++};
++
++struct SYS_DATA;
++
++struct PVRSRV_DEVICE_NODE {
++ struct PVRSRV_DEVICE_IDENTIFIER sDevId;
++ u32 ui32RefCount;
++
++ enum PVRSRV_ERROR (*pfnInitDevice)(void *);
++ enum PVRSRV_ERROR (*pfnDeInitDevice)(void *);
++
++ enum PVRSRV_ERROR (*pfnInitDeviceCompatCheck)(
++ struct PVRSRV_DEVICE_NODE *);
++
++ enum PVRSRV_ERROR (*pfnMMUInitialise)(struct PVRSRV_DEVICE_NODE *,
++ struct MMU_CONTEXT **,
++ struct IMG_DEV_PHYADDR *);
++ void (*pfnMMUFinalise)(struct MMU_CONTEXT *);
++ void (*pfnMMUInsertHeap)(struct MMU_CONTEXT *, struct MMU_HEAP *);
++ struct MMU_HEAP *(*pfnMMUCreate)(struct MMU_CONTEXT *,
++ struct DEV_ARENA_DESCRIPTOR *, struct RA_ARENA **);
++ void (*pfnMMUDelete)(struct MMU_HEAP *);
++ IMG_BOOL (*pfnMMUAlloc)(struct MMU_HEAP *pMMU, size_t uSize, u32 uFlags,
++ u32 uDevVAddrAlignment,
++ struct IMG_DEV_VIRTADDR *pDevVAddr);
++ void (*pfnMMUFree)(struct MMU_HEAP *, struct IMG_DEV_VIRTADDR, u32);
++ void (*pfnMMUEnable)(struct MMU_HEAP *);
++ void (*pfnMMUDisable)(struct MMU_HEAP *);
++ void (*pfnMMUMapPages)(struct MMU_HEAP *pMMU,
++ struct IMG_DEV_VIRTADDR devVAddr,
++ struct IMG_SYS_PHYADDR SysPAddr,
++ size_t uSize, u32 ui32MemFlags, void *hUniqueTag);
++ void (*pfnMMUMapShadow)(struct MMU_HEAP *pMMU,
++ struct IMG_DEV_VIRTADDR MapBaseDevVAddr,
++ size_t uSize, void *CpuVAddr, void *hOSMemHandle,
++ struct IMG_DEV_VIRTADDR *pDevVAddr, u32 ui32MemFlags,
++ void *hUniqueTag);
++ void (*pfnMMUUnmapPages)(struct MMU_HEAP *pMMU,
++ struct IMG_DEV_VIRTADDR dev_vaddr, u32 ui32PageCount,
++ void *hUniqueTag);
++
++ void (*pfnMMUMapScatter)(struct MMU_HEAP *pMMU,
++ struct IMG_DEV_VIRTADDR DevVAddr,
++ struct IMG_SYS_PHYADDR *psSysAddr,
++ size_t uSize, u32 ui32MemFlags, void *hUniqueTag);
++
++ struct IMG_DEV_PHYADDR(*pfnMMUGetPhysPageAddr)(
++ struct MMU_HEAP *pMMUHeap,
++ struct IMG_DEV_VIRTADDR sDevVPageAddr);
++ struct IMG_DEV_PHYADDR(*pfnMMUGetPDDevPAddr)(
++ struct MMU_CONTEXT *pMMUContext);
++
++ IMG_BOOL (*pfnDeviceISR)(void *);
++
++ void *pvISRData;
++ u32 ui32SOCInterruptBit;
++
++ void (*pfnDeviceMISR)(void *);
++ void (*pfnDeviceCommandComplete)(struct PVRSRV_DEVICE_NODE *
++ psDeviceNode);
++
++ IMG_BOOL bReProcessDeviceCommandComplete;
++ struct DEVICE_MEMORY_INFO sDevMemoryInfo;
++ void *pvDevice;
++ u32 ui32pvDeviceSize;
++
++ struct RESMAN_CONTEXT *hResManContext;
++ struct SYS_DATA *psSysData;
++ struct RA_ARENA *psLocalDevMemArena;
++ u32 ui32Flags;
++ struct PVRSRV_DEVICE_NODE *psNext;
++};
++
++enum PVRSRV_ERROR PVRSRVRegisterDevice(struct SYS_DATA *psSysData,
++ enum PVRSRV_ERROR (*pfnRegisterDevice)(struct PVRSRV_DEVICE_NODE *),
++ u32 ui32SOCInterruptBit, u32 *pui32DeviceIndex);
++
++enum PVRSRV_ERROR PVRSRVInitialiseDevice(u32 ui32DevIndex);
++enum PVRSRV_ERROR PVRSRVFinaliseSystem(IMG_BOOL bInitSuccesful);
++
++enum PVRSRV_ERROR PVRSRVDevInitCompatCheck(struct PVRSRV_DEVICE_NODE
++ *psDeviceNode);
++
++enum PVRSRV_ERROR PVRSRVDeinitialiseDevice(u32 ui32DevIndex);
++
++
++enum PVRSRV_ERROR PollForValueKM(u32 __iomem *pui32LinMemAddr,
++ u32 ui32Value, u32 ui32Mask, u32 ui32Waitus, u32 ui32Tries);
++
++enum PVRSRV_ERROR PVRSRVInit(struct SYS_DATA *psSysData);
++void PVRSRVDeInit(struct SYS_DATA *psSysData);
++IMG_BOOL PVRSRVDeviceLISR(struct PVRSRV_DEVICE_NODE *psDeviceNode);
++IMG_BOOL PVRSRVSystemLISR(void *pvSysData);
++void PVRSRVMISR(void *pvSysData);
++
++#endif
+diff --git a/drivers/gpu/pvr/deviceclass.c b/drivers/gpu/pvr/deviceclass.c
+new file mode 100644
+index 0000000..6fde440
+--- /dev/null
++++ b/drivers/gpu/pvr/deviceclass.c
+@@ -0,0 +1,1522 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++#include <linux/module.h>
++
++#include "services_headers.h"
++#include "buffer_manager.h"
++#include "kernelbuffer.h"
++#include "pvr_bridge_km.h"
++
++struct PVRSRV_DC_SRV2DISP_KMJTABLE;
++
++struct PVRSRV_DC_BUFFER {
++ struct PVRSRV_DEVICECLASS_BUFFER sDeviceClassBuffer;
++ struct PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ struct PVRSRV_DC_SWAPCHAIN *psSwapChain;
++};
++
++struct PVRSRV_DC_SWAPCHAIN {
++ void *hExtSwapChain;
++ struct PVRSRV_QUEUE_INFO *psQueue;
++ struct PVRSRV_DC_BUFFER asBuffer[PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS];
++ u32 ui32BufferCount;
++ struct PVRSRV_DC_BUFFER *psLastFlipBuffer;
++ struct PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ void *hResItem;
++};
++
++struct PVRSRV_DISPLAYCLASS_INFO {
++ u32 ui32RefCount;
++ u32 ui32DeviceID;
++ void *hExtDevice;
++ struct PVRSRV_DC_SRV2DISP_KMJTABLE *psFuncTable;
++ void *hDevMemContext;
++ struct PVRSRV_DC_BUFFER sSystemBuffer;
++};
++
++struct PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO {
++ struct PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ struct RESMAN_ITEM *hResItem;
++};
++
++struct PVRSRV_BC_SRV2BUFFER_KMJTABLE;
++
++struct PVRSRV_BC_BUFFER {
++ struct PVRSRV_DEVICECLASS_BUFFER sDeviceClassBuffer;
++ struct PVRSRV_BUFFERCLASS_INFO *psBCInfo;
++};
++
++struct PVRSRV_BUFFERCLASS_INFO {
++ u32 ui32RefCount;
++ u32 ui32DeviceID;
++ void *hExtDevice;
++ struct PVRSRV_BC_SRV2BUFFER_KMJTABLE *psFuncTable;
++ void *hDevMemContext;
++
++ u32 ui32BufferCount;
++ struct PVRSRV_BC_BUFFER *psBuffer;
++
++};
++
++struct PVRSRV_BUFFERCLASS_PERCONTEXT_INFO {
++ struct PVRSRV_BUFFERCLASS_INFO *psBCInfo;
++ void *hResItem;
++};
++
++static struct PVRSRV_DISPLAYCLASS_INFO *DCDeviceHandleToDCInfo(void *hDeviceKM)
++{
++ struct PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *psDCPerContextInfo;
++
++ psDCPerContextInfo = (struct PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *)
++ hDeviceKM;
++
++ return psDCPerContextInfo->psDCInfo;
++}
++
++static struct PVRSRV_BUFFERCLASS_INFO *BCDeviceHandleToBCInfo(void *hDeviceKM)
++{
++ struct PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *psBCPerContextInfo;
++
++ psBCPerContextInfo = (struct PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *)
++ hDeviceKM;
++
++ return psBCPerContextInfo->psBCInfo;
++}
++
++enum PVRSRV_ERROR PVRSRVEnumerateDCKM(enum PVRSRV_DEVICE_CLASS DeviceClass,
++ u32 *pui32DevCount, u32 *pui32DevID)
++{
++ struct PVRSRV_DEVICE_NODE *psDeviceNode;
++ unsigned ui32DevCount = 0;
++ struct SYS_DATA *psSysData;
++
++ if (SysAcquireData(&psSysData) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVEnumerateDCKM: Failed to get SysData");
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ psDeviceNode = psSysData->psDeviceNodeList;
++ while (psDeviceNode) {
++ if ((psDeviceNode->sDevId.eDeviceClass == DeviceClass) &&
++ (psDeviceNode->sDevId.eDeviceType ==
++ PVRSRV_DEVICE_TYPE_EXT)) {
++ ui32DevCount++;
++ if (pui32DevID) {
++ *pui32DevID++ =
++ psDeviceNode->sDevId.ui32DeviceIndex;
++ }
++ }
++ psDeviceNode = psDeviceNode->psNext;
++ }
++
++ if (pui32DevCount) {
++ *pui32DevCount = ui32DevCount;
++ } else if (pui32DevID == NULL) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVEnumerateDCKM: Invalid parameters");
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ return PVRSRV_OK;
++}
++
++static enum PVRSRV_ERROR PVRSRVRegisterDCDeviceKM(
++ struct PVRSRV_DC_SRV2DISP_KMJTABLE *psFuncTable,
++ u32 *pui32DeviceID)
++{
++ struct PVRSRV_DISPLAYCLASS_INFO *psDCInfo = NULL;
++ struct PVRSRV_DEVICE_NODE *psDeviceNode;
++ struct SYS_DATA *psSysData;
++
++ if (SysAcquireData(&psSysData) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVRegisterDCDeviceKM: Failed to get SysData");
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(*psDCInfo),
++ (void **) &psDCInfo, NULL) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVRegisterDCDeviceKM: Failed psDCInfo alloc");
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ OSMemSet(psDCInfo, 0, sizeof(*psDCInfo));
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(struct PVRSRV_DC_SRV2DISP_KMJTABLE),
++ (void **)&psDCInfo->psFuncTable,
++ NULL) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVRegisterDCDeviceKM: Failed psFuncTable alloc");
++ goto ErrorExit;
++ }
++ OSMemSet(psDCInfo->psFuncTable, 0,
++ sizeof(struct PVRSRV_DC_SRV2DISP_KMJTABLE));
++
++ *psDCInfo->psFuncTable = *psFuncTable;
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(struct PVRSRV_DEVICE_NODE),
++ (void **) &psDeviceNode, NULL) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVRegisterDCDeviceKM: Failed psDeviceNode alloc");
++ goto ErrorExit;
++ }
++ OSMemSet(psDeviceNode, 0, sizeof(struct PVRSRV_DEVICE_NODE));
++
++ psDeviceNode->pvDevice = (void *) psDCInfo;
++ psDeviceNode->ui32pvDeviceSize = sizeof(*psDCInfo);
++ psDeviceNode->ui32RefCount = 1;
++ psDeviceNode->sDevId.eDeviceType = PVRSRV_DEVICE_TYPE_EXT;
++ psDeviceNode->sDevId.eDeviceClass = PVRSRV_DEVICE_CLASS_DISPLAY;
++ psDeviceNode->psSysData = psSysData;
++
++ AllocateDeviceID(psSysData, &psDeviceNode->sDevId.ui32DeviceIndex);
++ psDCInfo->ui32DeviceID = psDeviceNode->sDevId.ui32DeviceIndex;
++ if (pui32DeviceID)
++ *pui32DeviceID = psDeviceNode->sDevId.ui32DeviceIndex;
++
++ SysRegisterExternalDevice(psDeviceNode);
++
++ psDeviceNode->psNext = psSysData->psDeviceNodeList;
++ psSysData->psDeviceNodeList = psDeviceNode;
++
++ return PVRSRV_OK;
++
++ErrorExit:
++
++ if (psDCInfo->psFuncTable)
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(struct PVRSRV_DC_SRV2DISP_KMJTABLE),
++ psDCInfo->psFuncTable, NULL);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(struct PVRSRV_DISPLAYCLASS_INFO),
++ psDCInfo, NULL);
++
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++}
++
++static enum PVRSRV_ERROR PVRSRVRemoveDCDeviceKM(u32 ui32DevIndex)
++{
++ struct SYS_DATA *psSysData;
++ struct PVRSRV_DEVICE_NODE **ppsDeviceNode, *psDeviceNode;
++ struct PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++
++ if (SysAcquireData(&psSysData) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVRemoveDCDeviceKM: Failed to get SysData");
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ ppsDeviceNode = &psSysData->psDeviceNodeList;
++ while (*ppsDeviceNode) {
++ switch ((*ppsDeviceNode)->sDevId.eDeviceClass) {
++ case PVRSRV_DEVICE_CLASS_DISPLAY:
++ {
++ if ((*ppsDeviceNode)->sDevId.ui32DeviceIndex ==
++ ui32DevIndex)
++ goto FoundDevice;
++ break;
++ }
++ default:
++ {
++ break;
++ }
++ }
++ ppsDeviceNode = &((*ppsDeviceNode)->psNext);
++ }
++
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVRemoveDCDeviceKM: requested device %d not present",
++ ui32DevIndex);
++
++ return PVRSRV_ERROR_GENERIC;
++
++FoundDevice:
++
++ psDeviceNode = *ppsDeviceNode;
++
++ psDCInfo = (struct PVRSRV_DISPLAYCLASS_INFO *)psDeviceNode->pvDevice;
++
++ if (psDCInfo->ui32RefCount == 0) {
++ *ppsDeviceNode = psDeviceNode->psNext;
++ SysRemoveExternalDevice(psDeviceNode);
++ PVR_ASSERT(psDCInfo->ui32RefCount == 0);
++ FreeDeviceID(psSysData, ui32DevIndex);
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(struct PVRSRV_DC_SRV2DISP_KMJTABLE),
++ psDCInfo->psFuncTable, NULL);
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(struct PVRSRV_DISPLAYCLASS_INFO), psDCInfo,
++ NULL);
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(struct PVRSRV_DEVICE_NODE), psDeviceNode,
++ NULL);
++ } else {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVRemoveDCDeviceKM: "
++ "failed as %d Services DC API "
++ "connections are still open",
++ psDCInfo->ui32RefCount);
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ return PVRSRV_OK;
++}
++
++static enum PVRSRV_ERROR PVRSRVRegisterBCDeviceKM(
++ struct PVRSRV_BC_SRV2BUFFER_KMJTABLE *psFuncTable,
++ u32 *pui32DeviceID)
++{
++ struct PVRSRV_BUFFERCLASS_INFO *psBCInfo = NULL;
++ struct PVRSRV_DEVICE_NODE *psDeviceNode;
++ struct SYS_DATA *psSysData;
++
++ if (SysAcquireData(&psSysData) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVRegisterBCDeviceKM: Failed to get SysData");
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(*psBCInfo),
++ (void **) &psBCInfo, NULL) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVRegisterBCDeviceKM: Failed psBCInfo alloc");
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ OSMemSet(psBCInfo, 0, sizeof(*psBCInfo));
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(struct PVRSRV_BC_SRV2BUFFER_KMJTABLE),
++ (void **) &psBCInfo->psFuncTable,
++ NULL) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVRegisterBCDeviceKM: Failed psFuncTable alloc");
++ goto ErrorExit;
++ }
++ OSMemSet(psBCInfo->psFuncTable, 0,
++ sizeof(struct PVRSRV_BC_SRV2BUFFER_KMJTABLE));
++
++ *psBCInfo->psFuncTable = *psFuncTable;
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(struct PVRSRV_DEVICE_NODE),
++ (void **) &psDeviceNode, NULL) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVRegisterBCDeviceKM: Failed psDeviceNode alloc");
++ goto ErrorExit;
++ }
++ OSMemSet(psDeviceNode, 0, sizeof(struct PVRSRV_DEVICE_NODE));
++
++ psDeviceNode->pvDevice = (void *) psBCInfo;
++ psDeviceNode->ui32pvDeviceSize = sizeof(*psBCInfo);
++ psDeviceNode->ui32RefCount = 1;
++ psDeviceNode->sDevId.eDeviceType = PVRSRV_DEVICE_TYPE_EXT;
++ psDeviceNode->sDevId.eDeviceClass = PVRSRV_DEVICE_CLASS_BUFFER;
++ psDeviceNode->psSysData = psSysData;
++
++ AllocateDeviceID(psSysData, &psDeviceNode->sDevId.ui32DeviceIndex);
++ psBCInfo->ui32DeviceID = psDeviceNode->sDevId.ui32DeviceIndex;
++ if (pui32DeviceID)
++ *pui32DeviceID = psDeviceNode->sDevId.ui32DeviceIndex;
++
++ psDeviceNode->psNext = psSysData->psDeviceNodeList;
++ psSysData->psDeviceNodeList = psDeviceNode;
++
++ return PVRSRV_OK;
++
++ErrorExit:
++
++ if (psBCInfo->psFuncTable)
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(struct PVRSRV_BC_SRV2BUFFER_KMJTABLE *),
++ psBCInfo->psFuncTable, NULL);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(struct PVRSRV_BUFFERCLASS_INFO), psBCInfo, NULL);
++
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++}
++
++static enum PVRSRV_ERROR PVRSRVRemoveBCDeviceKM(u32 ui32DevIndex)
++{
++ struct SYS_DATA *psSysData;
++ struct PVRSRV_DEVICE_NODE **ppsDevNode, *psDevNode;
++ struct PVRSRV_BUFFERCLASS_INFO *psBCInfo;
++
++ if (SysAcquireData(&psSysData) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVRemoveBCDeviceKM: Failed to get SysData");
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ ppsDevNode = &psSysData->psDeviceNodeList;
++ while (*ppsDevNode) {
++ switch ((*ppsDevNode)->sDevId.eDeviceClass) {
++ case PVRSRV_DEVICE_CLASS_BUFFER:
++ {
++ if ((*ppsDevNode)->sDevId.ui32DeviceIndex ==
++ ui32DevIndex)
++ goto FoundDevice;
++ break;
++ }
++ default:
++ {
++ break;
++ }
++ }
++ ppsDevNode = &(*ppsDevNode)->psNext;
++ }
++
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVRemoveBCDeviceKM: requested device %d not present",
++ ui32DevIndex);
++
++ return PVRSRV_ERROR_GENERIC;
++
++FoundDevice:
++
++ psDevNode = *(ppsDevNode);
++
++ psBCInfo = (struct PVRSRV_BUFFERCLASS_INFO *)psDevNode->pvDevice;
++
++ if (psBCInfo->ui32RefCount == 0) {
++ *ppsDevNode = psDevNode->psNext;
++ FreeDeviceID(psSysData, ui32DevIndex);
++ psBCInfo =
++ (struct PVRSRV_BUFFERCLASS_INFO *)psDevNode->pvDevice;
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(struct PVRSRV_BC_SRV2BUFFER_KMJTABLE),
++ psBCInfo->psFuncTable, NULL);
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(struct PVRSRV_BUFFERCLASS_INFO), psBCInfo,
++ NULL);
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(struct PVRSRV_DEVICE_NODE), psDevNode, NULL);
++ } else {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVRemoveBCDeviceKM: "
++ "failed as %d Services BC API "
++ "connections are still open",
++ psBCInfo->ui32RefCount);
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR PVRSRVCloseDCDeviceKM(void *hDeviceKM,
++ IMG_BOOL bResManCallback)
++{
++ struct PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *psDCPerContextInfo;
++
++ PVR_UNREFERENCED_PARAMETER(bResManCallback);
++
++ psDCPerContextInfo = (struct PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *)
++ hDeviceKM;
++
++ ResManFreeResByPtr(psDCPerContextInfo->hResItem);
++
++ return PVRSRV_OK;
++}
++
++static enum PVRSRV_ERROR CloseDCDeviceCallBack(void *pvParam, u32 ui32Param)
++{
++ struct PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *psDCPerContextInfo;
++ struct PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++ psDCPerContextInfo = (struct PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *)
++ pvParam;
++ psDCInfo = psDCPerContextInfo->psDCInfo;
++
++ psDCInfo->ui32RefCount--;
++ if (psDCInfo->ui32RefCount == 0) {
++ struct PVRSRV_DC_SRV2DISP_KMJTABLE *jtbl;
++
++ jtbl = psDCInfo->psFuncTable;
++
++ jtbl->pfnCloseDCDevice(psDCInfo->hExtDevice);
++
++ PVRSRVFreeSyncInfoKM(psDCInfo->sSystemBuffer.sDeviceClassBuffer.
++ psKernelSyncInfo);
++
++ psDCInfo->hDevMemContext = NULL;
++ psDCInfo->hExtDevice = NULL;
++
++ module_put(jtbl->owner);
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(struct PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO),
++ psDCPerContextInfo, NULL);
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR PVRSRVOpenDCDeviceKM(
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc,
++ u32 ui32DeviceID, void *hDevCookie,
++ void **phDeviceKM)
++{
++ struct PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ struct PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *psDCPerContextInfo;
++ struct PVRSRV_DEVICE_NODE *psDeviceNode;
++ struct SYS_DATA *psSysData;
++
++ if (!phDeviceKM || !hDevCookie) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVOpenDCDeviceKM: Invalid params");
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if (SysAcquireData(&psSysData) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVOpenDCDeviceKM: Failed to get SysData");
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ psDeviceNode = psSysData->psDeviceNodeList;
++ while (psDeviceNode) {
++ if ((psDeviceNode->sDevId.eDeviceClass ==
++ PVRSRV_DEVICE_CLASS_DISPLAY) &&
++ (psDeviceNode->sDevId.ui32DeviceIndex == ui32DeviceID)) {
++
++ psDCInfo = (struct PVRSRV_DISPLAYCLASS_INFO *)
++ psDeviceNode->pvDevice;
++ goto FoundDevice;
++ }
++ psDeviceNode = psDeviceNode->psNext;
++ }
++
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVOpenDCDeviceKM: no devnode matching index %d",
++ ui32DeviceID);
++
++ return PVRSRV_ERROR_GENERIC;
++
++FoundDevice:
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(*psDCPerContextInfo),
++ (void **)&psDCPerContextInfo, NULL) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVOpenDCDeviceKM: "
++ "Failed psDCPerContextInfo alloc");
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ OSMemSet(psDCPerContextInfo, 0, sizeof(*psDCPerContextInfo));
++
++ if (psDCInfo->ui32RefCount++ == 0) {
++ enum PVRSRV_ERROR eError;
++ struct PVRSRV_DC_SRV2DISP_KMJTABLE *jtbl;
++
++ psDeviceNode = (struct PVRSRV_DEVICE_NODE *)hDevCookie;
++
++ jtbl = psDCInfo->psFuncTable;
++ if (!try_module_get(jtbl->owner)) {
++ PVR_DPF(PVR_DBG_ERROR, "%s: can't get DC module");
++ return PVRSRV_ERROR_INVALID_DEVICE;
++ }
++
++ psDCInfo->hDevMemContext =
++ (void *) psDeviceNode->sDevMemoryInfo.pBMKernelContext;
++
++ eError = PVRSRVAllocSyncInfoKM(NULL,
++ (void *)psDeviceNode->
++ sDevMemoryInfo.pBMKernelContext,
++ &psDCInfo->sSystemBuffer.
++ sDeviceClassBuffer.
++ psKernelSyncInfo);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVOpenDCDeviceKM: Failed sync info alloc");
++ psDCInfo->ui32RefCount--;
++ module_put(jtbl->owner);
++ return eError;
++ }
++
++ eError = jtbl->pfnOpenDCDevice(ui32DeviceID,
++ &psDCInfo->hExtDevice,
++ (struct PVRSRV_SYNC_DATA *)psDCInfo->sSystemBuffer.
++ sDeviceClassBuffer.psKernelSyncInfo->
++ psSyncDataMemInfoKM->pvLinAddrKM);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVOpenDCDeviceKM: "
++ "Failed to open external DC device");
++ psDCInfo->ui32RefCount--;
++ module_put(jtbl->owner);
++ PVRSRVFreeSyncInfoKM(psDCInfo->sSystemBuffer.
++ sDeviceClassBuffer.psKernelSyncInfo);
++ return eError;
++ }
++ }
++
++ psDCPerContextInfo->psDCInfo = psDCInfo;
++ psDCPerContextInfo->hResItem =
++ ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_DISPLAYCLASS_DEVICE,
++ psDCPerContextInfo, 0, CloseDCDeviceCallBack);
++
++ *phDeviceKM = (void *) psDCPerContextInfo;
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR PVRSRVEnumDCFormatsKM(void *hDeviceKM,
++ u32 *pui32Count,
++ struct DISPLAY_FORMAT *psFormat)
++{
++ struct PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++
++ if (!hDeviceKM || !pui32Count || !psFormat) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVEnumDCFormatsKM: Invalid parameters");
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++
++ return psDCInfo->psFuncTable->pfnEnumDCFormats(psDCInfo->hExtDevice,
++ pui32Count, psFormat);
++}
++
++enum PVRSRV_ERROR PVRSRVEnumDCDimsKM(void *hDeviceKM,
++ struct DISPLAY_FORMAT *psFormat,
++ u32 *pui32Count, struct DISPLAY_DIMS *psDim)
++{
++ struct PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++
++ if (!hDeviceKM || !pui32Count || !psFormat) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVEnumDCDimsKM: Invalid parameters");
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++
++ return psDCInfo->psFuncTable->pfnEnumDCDims(psDCInfo->hExtDevice,
++ psFormat, pui32Count, psDim);
++}
++
++enum PVRSRV_ERROR PVRSRVGetDCSystemBufferKM(void *hDeviceKM, void **phBuffer)
++{
++ enum PVRSRV_ERROR eError;
++ struct PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ void *hExtBuffer;
++
++ if (!hDeviceKM || !phBuffer) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVGetDCSystemBufferKM: Invalid parameters");
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++
++ eError =
++ psDCInfo->psFuncTable->pfnGetDCSystemBuffer(psDCInfo->hExtDevice,
++ &hExtBuffer);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVGetDCSystemBufferKM: "
++ "Failed to get valid buffer handle from external driver");
++ return eError;
++ }
++
++ psDCInfo->sSystemBuffer.sDeviceClassBuffer.pfnGetBufferAddr =
++ psDCInfo->psFuncTable->pfnGetBufferAddr;
++ psDCInfo->sSystemBuffer.sDeviceClassBuffer.hDevMemContext =
++ psDCInfo->hDevMemContext;
++ psDCInfo->sSystemBuffer.sDeviceClassBuffer.hExtDevice =
++ psDCInfo->hExtDevice;
++ psDCInfo->sSystemBuffer.sDeviceClassBuffer.hExtBuffer = hExtBuffer;
++
++ psDCInfo->sSystemBuffer.psDCInfo = psDCInfo;
++
++ *phBuffer = (void *) &(psDCInfo->sSystemBuffer);
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR PVRSRVGetDCInfoKM(void *hDeviceKM,
++ struct DISPLAY_INFO *psDisplayInfo)
++{
++ struct PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ enum PVRSRV_ERROR eError;
++
++ if (!hDeviceKM || !psDisplayInfo) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVGetDCInfoKM: Invalid parameters");
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++
++ eError = psDCInfo->psFuncTable->pfnGetDCInfo(psDCInfo->hExtDevice,
++ psDisplayInfo);
++ if (eError != PVRSRV_OK)
++ return eError;
++
++ if (psDisplayInfo->ui32MaxSwapChainBuffers >
++ PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS) {
++ psDisplayInfo->ui32MaxSwapChainBuffers =
++ PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS;
++ }
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR PVRSRVDestroyDCSwapChainKM(void *hSwapChain)
++{
++ struct PVRSRV_DC_SWAPCHAIN *psSwapChain;
++
++ if (!hSwapChain) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVDestroyDCSwapChainKM: Invalid parameters");
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psSwapChain = hSwapChain;
++
++ ResManFreeResByPtr(psSwapChain->hResItem);
++
++ return PVRSRV_OK;
++}
++
++static enum PVRSRV_ERROR DestroyDCSwapChainCallBack(void *pvParam,
++ u32 ui32Param)
++{
++ enum PVRSRV_ERROR eError;
++ struct PVRSRV_DC_SWAPCHAIN *psSwapChain = pvParam;
++ struct PVRSRV_DISPLAYCLASS_INFO *psDCInfo = psSwapChain->psDCInfo;
++ u32 i;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++ PVRSRVDestroyCommandQueueKM(psSwapChain->psQueue);
++
++ eError =
++ psDCInfo->psFuncTable->pfnDestroyDCSwapChain(psDCInfo->hExtDevice,
++ psSwapChain->
++ hExtSwapChain);
++
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "DestroyDCSwapChainCallBack: "
++ "Failed to destroy DC swap chain");
++ return eError;
++ }
++
++ for (i = 0; i < psSwapChain->ui32BufferCount; i++)
++ if (psSwapChain->asBuffer[i].sDeviceClassBuffer.
++ psKernelSyncInfo)
++ PVRSRVFreeSyncInfoKM(psSwapChain->asBuffer[i].
++ sDeviceClassBuffer.
++ psKernelSyncInfo);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct PVRSRV_DC_SWAPCHAIN),
++ psSwapChain, NULL);
++
++ return eError;
++}
++
++enum PVRSRV_ERROR PVRSRVCreateDCSwapChainKM(
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc,
++ void *hDeviceKM, u32 ui32Flags,
++ struct DISPLAY_SURF_ATTRIBUTES *psDstSurfAttrib,
++ struct DISPLAY_SURF_ATTRIBUTES *psSrcSurfAttrib,
++ u32 ui32BufferCount, u32 ui32OEMFlags,
++ void **phSwapChain, u32 *pui32SwapChainID)
++{
++ struct PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ struct PVRSRV_DC_SWAPCHAIN *psSwapChain = NULL;
++ struct PVRSRV_SYNC_DATA *apsSyncData[PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS];
++ struct PVRSRV_QUEUE_INFO *psQueue = NULL;
++ enum PVRSRV_ERROR eError;
++ u32 i;
++
++ if (!hDeviceKM || !psDstSurfAttrib || !psSrcSurfAttrib ||
++ !phSwapChain || !pui32SwapChainID) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVCreateDCSwapChainKM: Invalid parameters");
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ if (ui32BufferCount > PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVCreateDCSwapChainKM: Too many buffers");
++ return PVRSRV_ERROR_TOOMANYBUFFERS;
++ }
++
++ if (ui32BufferCount < 2) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVCreateDCSwapChainKM: Too few buffers");
++ return PVRSRV_ERROR_TOO_FEW_BUFFERS;
++ }
++
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(struct PVRSRV_DC_SWAPCHAIN),
++ (void **) &psSwapChain, NULL) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVCreateDCSwapChainKM: Failed psSwapChain alloc");
++ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto ErrorExit;
++ }
++ OSMemSet(psSwapChain, 0, sizeof(struct PVRSRV_DC_SWAPCHAIN));
++
++ eError = PVRSRVCreateCommandQueueKM(1024, &psQueue);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVCreateDCSwapChainKM: Failed to create CmdQueue");
++ goto ErrorExit;
++ }
++
++ psSwapChain->psQueue = psQueue;
++
++ for (i = 0; i < ui32BufferCount; i++) {
++ eError = PVRSRVAllocSyncInfoKM(NULL,
++ psDCInfo->hDevMemContext,
++ &psSwapChain->asBuffer[i].
++ sDeviceClassBuffer.
++ psKernelSyncInfo);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVCreateDCSwapChainKM: "
++ "Failed to alloc syninfo for psSwapChain");
++ goto ErrorExit;
++ }
++
++ psSwapChain->asBuffer[i].sDeviceClassBuffer.pfnGetBufferAddr =
++ psDCInfo->psFuncTable->pfnGetBufferAddr;
++ psSwapChain->asBuffer[i].sDeviceClassBuffer.hDevMemContext =
++ psDCInfo->hDevMemContext;
++ psSwapChain->asBuffer[i].sDeviceClassBuffer.hExtDevice =
++ psDCInfo->hExtDevice;
++
++ psSwapChain->asBuffer[i].psDCInfo = psDCInfo;
++ psSwapChain->asBuffer[i].psSwapChain = psSwapChain;
++
++ apsSyncData[i] =
++ (struct PVRSRV_SYNC_DATA *)psSwapChain->asBuffer[i].
++ sDeviceClassBuffer.psKernelSyncInfo->
++ psSyncDataMemInfoKM->pvLinAddrKM;
++ }
++
++ psSwapChain->ui32BufferCount = ui32BufferCount;
++ psSwapChain->psDCInfo = psDCInfo;
++
++ eError =
++ psDCInfo->psFuncTable->pfnCreateDCSwapChain(psDCInfo->hExtDevice,
++ ui32Flags,
++ psDstSurfAttrib,
++ psSrcSurfAttrib,
++ ui32BufferCount,
++ apsSyncData,
++ ui32OEMFlags,
++ &psSwapChain->hExtSwapChain,
++ pui32SwapChainID);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVCreateDCSwapChainKM: "
++ "Failed to create 3rd party SwapChain");
++ goto ErrorExit;
++ }
++
++ *phSwapChain = (void *) psSwapChain;
++
++ psSwapChain->hResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_DISPLAYCLASS_SWAPCHAIN,
++ psSwapChain, 0,
++ DestroyDCSwapChainCallBack);
++
++ return eError;
++
++ErrorExit:
++
++ for (i = 0; i < ui32BufferCount; i++) {
++ if (psSwapChain->asBuffer[i].sDeviceClassBuffer.
++ psKernelSyncInfo) {
++ PVRSRVFreeSyncInfoKM(psSwapChain->asBuffer[i].
++ sDeviceClassBuffer.
++ psKernelSyncInfo);
++ }
++ }
++
++ if (psQueue)
++ PVRSRVDestroyCommandQueueKM(psQueue);
++
++ if (psSwapChain) {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(struct PVRSRV_DC_SWAPCHAIN), psSwapChain,
++ NULL);
++ }
++
++ return eError;
++}
++
++enum PVRSRV_ERROR PVRSRVSetDCDstRectKM(void *hDeviceKM, void *hSwapChain,
++ struct IMG_RECT *psRect)
++{
++ struct PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ struct PVRSRV_DC_SWAPCHAIN *psSwapChain;
++
++ if (!hDeviceKM || !hSwapChain) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVSetDCDstRectKM: Invalid parameters");
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++ psSwapChain = (struct PVRSRV_DC_SWAPCHAIN *)hSwapChain;
++
++ return psDCInfo->psFuncTable->pfnSetDCDstRect(psDCInfo->hExtDevice,
++ psSwapChain->hExtSwapChain, psRect);
++}
++
++enum PVRSRV_ERROR PVRSRVSetDCSrcRectKM(void *hDeviceKM, void *hSwapChain,
++ struct IMG_RECT *psRect)
++{
++ struct PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ struct PVRSRV_DC_SWAPCHAIN *psSwapChain;
++
++ if (!hDeviceKM || !hSwapChain) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVSetDCSrcRectKM: Invalid parameters");
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++ psSwapChain = (struct PVRSRV_DC_SWAPCHAIN *)hSwapChain;
++
++ return psDCInfo->psFuncTable->pfnSetDCSrcRect(psDCInfo->hExtDevice,
++ psSwapChain->hExtSwapChain, psRect);
++}
++
++enum PVRSRV_ERROR PVRSRVSetDCDstColourKeyKM(void *hDeviceKM, void *hSwapChain,
++ u32 ui32CKColour)
++{
++ struct PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ struct PVRSRV_DC_SWAPCHAIN *psSwapChain;
++
++ if (!hDeviceKM || !hSwapChain) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVSetDCDstColourKeyKM: Invalid parameters");
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++ psSwapChain = (struct PVRSRV_DC_SWAPCHAIN *)hSwapChain;
++
++ return psDCInfo->psFuncTable->pfnSetDCDstColourKey(psDCInfo->hExtDevice,
++ psSwapChain->hExtSwapChain, ui32CKColour);
++}
++
++enum PVRSRV_ERROR PVRSRVSetDCSrcColourKeyKM(void *hDeviceKM, void *hSwapChain,
++ u32 ui32CKColour)
++{
++ struct PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ struct PVRSRV_DC_SWAPCHAIN *psSwapChain;
++
++ if (!hDeviceKM || !hSwapChain) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVSetDCSrcColourKeyKM: Invalid parameters");
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++ psSwapChain = (struct PVRSRV_DC_SWAPCHAIN *)hSwapChain;
++
++ return psDCInfo->psFuncTable->pfnSetDCSrcColourKey(psDCInfo->hExtDevice,
++ psSwapChain->hExtSwapChain, ui32CKColour);
++}
++
++enum PVRSRV_ERROR PVRSRVGetDCBuffersKM(void *hDeviceKM, void *hSwapChain,
++ u32 *pui32BufferCount, void **phBuffer)
++{
++ struct PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ struct PVRSRV_DC_SWAPCHAIN *psSwapChain;
++ void *ahExtBuffer[PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS];
++ enum PVRSRV_ERROR eError;
++ u32 i;
++
++ if (!hDeviceKM || !hSwapChain || !phBuffer) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVGetDCBuffersKM: Invalid parameters");
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++ psSwapChain = (struct PVRSRV_DC_SWAPCHAIN *)hSwapChain;
++
++ eError = psDCInfo->psFuncTable->pfnGetDCBuffers(psDCInfo->hExtDevice,
++ psSwapChain->hExtSwapChain,
++ pui32BufferCount, ahExtBuffer);
++
++ PVR_ASSERT(*pui32BufferCount <= PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS);
++
++ for (i = 0; i < *pui32BufferCount; i++) {
++ psSwapChain->asBuffer[i].sDeviceClassBuffer.hExtBuffer =
++ ahExtBuffer[i];
++ phBuffer[i] = (void *)&psSwapChain->asBuffer[i];
++ }
++
++ return eError;
++}
++
++enum PVRSRV_ERROR PVRSRVSwapToDCBufferKM(void *hDeviceKM, void *hBuffer,
++ u32 ui32SwapInterval, void *hPrivateTag,
++ u32 ui32ClipRectCount,
++ struct IMG_RECT *psClipRect)
++{
++ enum PVRSRV_ERROR eError;
++ struct PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ struct PVRSRV_DC_BUFFER *psBuffer;
++ struct PVRSRV_QUEUE_INFO *psQueue;
++ struct DISPLAYCLASS_FLIP_COMMAND *psFlipCmd;
++ u32 i;
++ u32 ui32NumSrcSyncs = 1;
++ struct PVRSRV_KERNEL_SYNC_INFO *apsSrcSync[2];
++ struct PVRSRV_COMMAND *psCommand;
++
++ if (!hDeviceKM || !hBuffer || !psClipRect) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVSwapToDCBufferKM: Invalid parameters");
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++ psBuffer = (struct PVRSRV_DC_BUFFER *)hBuffer;
++
++ psQueue = psBuffer->psSwapChain->psQueue;
++
++ apsSrcSync[0] = psBuffer->sDeviceClassBuffer.psKernelSyncInfo;
++ if (psBuffer->psSwapChain->psLastFlipBuffer &&
++ psBuffer != psBuffer->psSwapChain->psLastFlipBuffer) {
++ apsSrcSync[1] =
++ psBuffer->psSwapChain->psLastFlipBuffer->sDeviceClassBuffer.
++ psKernelSyncInfo;
++ ui32NumSrcSyncs++;
++ }
++
++ eError = PVRSRVInsertCommandKM(psQueue, &psCommand,
++ psDCInfo->ui32DeviceID, DC_FLIP_COMMAND,
++ 0, NULL, ui32NumSrcSyncs, apsSrcSync,
++ sizeof(struct DISPLAYCLASS_FLIP_COMMAND) +
++ (sizeof(struct IMG_RECT) *
++ ui32ClipRectCount));
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVSwapToDCBufferKM: Failed to get space in queue");
++ goto Exit;
++ }
++
++ psFlipCmd = (struct DISPLAYCLASS_FLIP_COMMAND *)psCommand->pvData;
++ psFlipCmd->hExtDevice = psDCInfo->hExtDevice;
++ psFlipCmd->hExtSwapChain = psBuffer->psSwapChain->hExtSwapChain;
++ psFlipCmd->hExtBuffer = psBuffer->sDeviceClassBuffer.hExtBuffer;
++ psFlipCmd->hPrivateTag = hPrivateTag;
++ psFlipCmd->ui32ClipRectCount = ui32ClipRectCount;
++ psFlipCmd->psClipRect =
++ (struct IMG_RECT *)((u8 *) psFlipCmd +
++ sizeof(struct DISPLAYCLASS_FLIP_COMMAND));
++
++ for (i = 0; i < ui32ClipRectCount; i++)
++ psFlipCmd->psClipRect[i] = psClipRect[i];
++
++ psFlipCmd->ui32SwapInterval = ui32SwapInterval;
++
++ eError = PVRSRVSubmitCommandKM(psQueue, psCommand);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVSwapToDCBufferKM: Failed to submit command");
++ goto Exit;
++ }
++
++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) {
++ if (PVRSRVProcessQueues(KERNEL_ID, IMG_FALSE) !=
++ PVRSRV_ERROR_PROCESSING_BLOCKED) {
++ goto ProcessedQueues;
++ }
++ OSWaitus(MAX_HW_TIME_US / WAIT_TRY_COUNT);
++ }
++ END_LOOP_UNTIL_TIMEOUT();
++
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVSwapToDCBufferKM: Failed to process queues");
++
++ eError = PVRSRV_ERROR_GENERIC;
++ goto Exit;
++
++ProcessedQueues:
++
++ psBuffer->psSwapChain->psLastFlipBuffer = psBuffer;
++
++Exit:
++ return eError;
++}
++
++enum PVRSRV_ERROR PVRSRVSwapToDCSystemKM(void *hDeviceKM, void *hSwapChain)
++{
++ enum PVRSRV_ERROR eError;
++ struct PVRSRV_QUEUE_INFO *psQueue;
++ struct PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ struct PVRSRV_DC_SWAPCHAIN *psSwapChain;
++ struct DISPLAYCLASS_FLIP_COMMAND *psFlipCmd;
++ u32 ui32NumSrcSyncs = 1;
++ struct PVRSRV_KERNEL_SYNC_INFO *apsSrcSync[2];
++ struct PVRSRV_COMMAND *psCommand;
++
++ if (!hDeviceKM || !hSwapChain) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVSwapToDCSystemKM: Invalid parameters");
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++ psSwapChain = (struct PVRSRV_DC_SWAPCHAIN *)hSwapChain;
++
++ psQueue = psSwapChain->psQueue;
++
++ apsSrcSync[0] =
++ psDCInfo->sSystemBuffer.sDeviceClassBuffer.psKernelSyncInfo;
++ if (psSwapChain->psLastFlipBuffer) {
++ if (apsSrcSync[0] !=
++ psSwapChain->psLastFlipBuffer->sDeviceClassBuffer.
++ psKernelSyncInfo) {
++ apsSrcSync[1] =
++ psSwapChain->psLastFlipBuffer->sDeviceClassBuffer.
++ psKernelSyncInfo;
++ ui32NumSrcSyncs++;
++ }
++ }
++
++ eError = PVRSRVInsertCommandKM(psQueue, &psCommand,
++ psDCInfo->ui32DeviceID, DC_FLIP_COMMAND,
++ 0, NULL, ui32NumSrcSyncs, apsSrcSync,
++ sizeof(struct DISPLAYCLASS_FLIP_COMMAND));
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVSwapToDCSystemKM: Failed to get space in queue");
++ goto Exit;
++ }
++
++ psFlipCmd = (struct DISPLAYCLASS_FLIP_COMMAND *)psCommand->pvData;
++ psFlipCmd->hExtDevice = psDCInfo->hExtDevice;
++ psFlipCmd->hExtSwapChain = psSwapChain->hExtSwapChain;
++ psFlipCmd->hExtBuffer =
++ psDCInfo->sSystemBuffer.sDeviceClassBuffer.hExtBuffer;
++ psFlipCmd->hPrivateTag = NULL;
++ psFlipCmd->ui32ClipRectCount = 0;
++ psFlipCmd->ui32SwapInterval = 1;
++
++ eError = PVRSRVSubmitCommandKM(psQueue, psCommand);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVSwapToDCSystemKM: Failed to submit command");
++ goto Exit;
++ }
++
++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) {
++ if (PVRSRVProcessQueues(KERNEL_ID, IMG_FALSE) !=
++ PVRSRV_ERROR_PROCESSING_BLOCKED) {
++ goto ProcessedQueues;
++ }
++ OSWaitus(MAX_HW_TIME_US / WAIT_TRY_COUNT);
++ }
++ END_LOOP_UNTIL_TIMEOUT();
++
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVSwapToDCSystemKM: Failed to process queues");
++ eError = PVRSRV_ERROR_GENERIC;
++ goto Exit;
++
++ProcessedQueues:
++
++ psSwapChain->psLastFlipBuffer = &psDCInfo->sSystemBuffer;
++
++ eError = PVRSRV_OK;
++
++Exit:
++ return eError;
++}
++
++static enum PVRSRV_ERROR PVRSRVRegisterSystemISRHandler(
++ IMG_BOOL (*pfnISRHandler)(void *),
++ void *pvISRHandlerData,
++ u32 ui32ISRSourceMask,
++ u32 ui32DeviceID)
++{
++ struct SYS_DATA *psSysData;
++ struct PVRSRV_DEVICE_NODE *psDevNode;
++
++ PVR_UNREFERENCED_PARAMETER(ui32ISRSourceMask);
++
++ if (SysAcquireData(&psSysData) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVRegisterSystemISRHandler: "
++ "Failed to get SysData");
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ psDevNode = psSysData->psDeviceNodeList;
++ while (psDevNode) {
++ if (psDevNode->sDevId.ui32DeviceIndex == ui32DeviceID)
++ break;
++ psDevNode = psDevNode->psNext;
++ }
++
++ if (psDevNode == NULL) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVRegisterSystemISRHandler: "
++ "Failed to get psDevNode");
++ PVR_DBG_BREAK;
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ psDevNode->pvISRData = (void *) pvISRHandlerData;
++
++ psDevNode->pfnDeviceISR = pfnISRHandler;
++
++ return PVRSRV_OK;
++}
++
++void PVRSRVSetDCState(u32 ui32State)
++{
++ struct PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ struct PVRSRV_DEVICE_NODE *psDeviceNode;
++ struct SYS_DATA *psSysData;
++
++ if (SysAcquireData(&psSysData) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVSetDCState: Failed to get SysData");
++ return;
++ }
++
++ psDeviceNode = psSysData->psDeviceNodeList;
++ while (psDeviceNode != NULL) {
++ if (psDeviceNode->sDevId.eDeviceClass ==
++ PVRSRV_DEVICE_CLASS_DISPLAY) {
++ psDCInfo = (struct PVRSRV_DISPLAYCLASS_INFO *)
++ psDeviceNode->pvDevice;
++ if (psDCInfo->psFuncTable->pfnSetDCState &&
++ psDCInfo->hExtDevice)
++ psDCInfo->psFuncTable->pfnSetDCState(
++ psDCInfo->hExtDevice,
++ ui32State);
++ }
++ psDeviceNode = psDeviceNode->psNext;
++ }
++}
++
++IMG_BOOL PVRGetDisplayClassJTable(struct PVRSRV_DC_DISP2SRV_KMJTABLE *psJTable)
++{
++ psJTable->ui32TableSize = sizeof(struct PVRSRV_DC_DISP2SRV_KMJTABLE);
++ psJTable->pfnPVRSRVRegisterDCDevice = PVRSRVRegisterDCDeviceKM;
++ psJTable->pfnPVRSRVRemoveDCDevice = PVRSRVRemoveDCDeviceKM;
++ psJTable->pfnPVRSRVOEMFunction = SysOEMFunction;
++ psJTable->pfnPVRSRVRegisterCmdProcList = PVRSRVRegisterCmdProcListKM;
++ psJTable->pfnPVRSRVRemoveCmdProcList = PVRSRVRemoveCmdProcListKM;
++ psJTable->pfnPVRSRVCmdComplete = PVRSRVCommandCompleteKM;
++ psJTable->pfnPVRSRVRegisterSystemISRHandler =
++ PVRSRVRegisterSystemISRHandler;
++ psJTable->pfnPVRSRVRegisterPowerDevice = PVRSRVRegisterPowerDevice;
++
++ return IMG_TRUE;
++}
++EXPORT_SYMBOL(PVRGetDisplayClassJTable);
++
++enum PVRSRV_ERROR PVRSRVCloseBCDeviceKM(void *hDeviceKM,
++ IMG_BOOL bResManCallback)
++{
++ struct PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *psBCPerContextInfo;
++
++ PVR_UNREFERENCED_PARAMETER(bResManCallback);
++
++ psBCPerContextInfo = (struct PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *)
++ hDeviceKM;
++
++ ResManFreeResByPtr(psBCPerContextInfo->hResItem);
++
++ return PVRSRV_OK;
++}
++
++static enum PVRSRV_ERROR CloseBCDeviceCallBack(void *pvParam, u32 ui32Param)
++{
++ struct PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *psBCPerContextInfo;
++ struct PVRSRV_BUFFERCLASS_INFO *psBCInfo;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++ psBCPerContextInfo = (struct PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *)
++ pvParam;
++ psBCInfo = psBCPerContextInfo->psBCInfo;
++
++ psBCInfo->ui32RefCount--;
++ if (psBCInfo->ui32RefCount == 0) {
++ u32 i;
++
++ psBCInfo->psFuncTable->pfnCloseBCDevice(psBCInfo->hExtDevice);
++
++ for (i = 0; i < psBCInfo->ui32BufferCount; i++)
++ if (psBCInfo->psBuffer[i].sDeviceClassBuffer.
++ psKernelSyncInfo)
++ PVRSRVFreeSyncInfoKM(psBCInfo->psBuffer[i].
++ sDeviceClassBuffer.
++ psKernelSyncInfo);
++
++ if (psBCInfo->psBuffer)
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(struct PVRSRV_BC_BUFFER),
++ psBCInfo->psBuffer, NULL);
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(struct PVRSRV_BUFFERCLASS_PERCONTEXT_INFO),
++ psBCPerContextInfo, NULL);
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR PVRSRVOpenBCDeviceKM(
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc,
++ u32 ui32DeviceID, void *hDevCookie,
++ void **phDeviceKM)
++{
++ struct PVRSRV_BUFFERCLASS_INFO *psBCInfo;
++ struct PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *psBCPerContextInfo;
++ struct PVRSRV_DEVICE_NODE *psDeviceNode;
++ struct SYS_DATA *psSysData;
++ u32 i;
++ enum PVRSRV_ERROR eError;
++
++ if (!phDeviceKM || !hDevCookie) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVOpenBCDeviceKM: Invalid params");
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if (SysAcquireData(&psSysData) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVOpenBCDeviceKM: Failed to get SysData");
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ psDeviceNode = psSysData->psDeviceNodeList;
++ while (psDeviceNode) {
++ if ((psDeviceNode->sDevId.eDeviceClass ==
++ PVRSRV_DEVICE_CLASS_BUFFER) &&
++ (psDeviceNode->sDevId.ui32DeviceIndex == ui32DeviceID)) {
++
++ psBCInfo = (struct PVRSRV_BUFFERCLASS_INFO *)
++ psDeviceNode->pvDevice;
++ goto FoundDevice;
++ }
++ psDeviceNode = psDeviceNode->psNext;
++ }
++
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVOpenBCDeviceKM: No devnode matching index %d",
++ ui32DeviceID);
++
++ return PVRSRV_ERROR_GENERIC;
++
++FoundDevice:
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(*psBCPerContextInfo),
++ (void **)&psBCPerContextInfo, NULL) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVOpenBCDeviceKM: "
++ "Failed psBCPerContextInfo alloc");
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ OSMemSet(psBCPerContextInfo, 0, sizeof(*psBCPerContextInfo));
++
++ if (psBCInfo->ui32RefCount++ == 0) {
++ struct BUFFER_INFO sBufferInfo;
++
++ psDeviceNode = (struct PVRSRV_DEVICE_NODE *)hDevCookie;
++
++ psBCInfo->hDevMemContext =
++ (void *) psDeviceNode->sDevMemoryInfo.pBMKernelContext;
++
++ eError =
++ psBCInfo->psFuncTable->pfnOpenBCDevice(&psBCInfo->
++ hExtDevice);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVOpenBCDeviceKM: "
++ "Failed to open external BC device");
++ return eError;
++ }
++
++ eError =
++ psBCInfo->psFuncTable->pfnGetBCInfo(psBCInfo->hExtDevice,
++ &sBufferInfo);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVOpenBCDeviceKM : Failed to get BC Info");
++ return eError;
++ }
++
++ psBCInfo->ui32BufferCount = sBufferInfo.ui32BufferCount;
++
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(struct PVRSRV_BC_BUFFER) *
++ sBufferInfo.ui32BufferCount,
++ (void **) &psBCInfo->psBuffer,
++ NULL);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVOpenBCDeviceKM: "
++ "Failed to allocate BC buffers");
++ return eError;
++ }
++ OSMemSet(psBCInfo->psBuffer, 0,
++ sizeof(struct PVRSRV_BC_BUFFER) *
++ sBufferInfo.ui32BufferCount);
++
++ for (i = 0; i < psBCInfo->ui32BufferCount; i++) {
++
++ eError = PVRSRVAllocSyncInfoKM(NULL,
++ psBCInfo->hDevMemContext,
++ &psBCInfo->psBuffer[i].sDeviceClassBuffer.
++ psKernelSyncInfo);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVOpenBCDeviceKM: "
++ "Failed sync info alloc");
++ goto ErrorExit;
++ }
++
++ eError = psBCInfo->psFuncTable->pfnGetBCBuffer(
++ psBCInfo->hExtDevice, i,
++ psBCInfo->psBuffer[i].sDeviceClassBuffer.
++ psKernelSyncInfo->
++ psSyncData,
++ &psBCInfo->psBuffer[i].sDeviceClassBuffer.
++ hExtBuffer);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVOpenBCDeviceKM: "
++ "Failed to get BC buffers");
++ goto ErrorExit;
++ }
++
++ psBCInfo->psBuffer[i].sDeviceClassBuffer.
++ pfnGetBufferAddr =
++ psBCInfo->psFuncTable->pfnGetBufferAddr;
++ psBCInfo->psBuffer[i].sDeviceClassBuffer.
++ hDevMemContext = psBCInfo->hDevMemContext;
++ psBCInfo->psBuffer[i].sDeviceClassBuffer.hExtDevice =
++ psBCInfo->hExtDevice;
++ }
++ }
++
++ psBCPerContextInfo->psBCInfo = psBCInfo;
++ psBCPerContextInfo->hResItem =
++ ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_BUFFERCLASS_DEVICE,
++ psBCPerContextInfo, 0, CloseBCDeviceCallBack);
++
++ *phDeviceKM = (void *)psBCPerContextInfo;
++
++ return PVRSRV_OK;
++
++ErrorExit:
++
++ for (i = 0; i < psBCInfo->ui32BufferCount; i++) {
++ if (psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo) {
++ PVRSRVFreeSyncInfoKM(psBCInfo->psBuffer[i].
++ sDeviceClassBuffer.
++ psKernelSyncInfo);
++ }
++ }
++
++ if (psBCInfo->psBuffer) {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(struct PVRSRV_BC_BUFFER), psBCInfo->psBuffer,
++ NULL);
++ }
++
++ return eError;
++}
++
++enum PVRSRV_ERROR PVRSRVGetBCInfoKM(void *hDeviceKM,
++ struct BUFFER_INFO *psBufferInfo)
++{
++ struct PVRSRV_BUFFERCLASS_INFO *psBCInfo;
++ enum PVRSRV_ERROR eError;
++
++ if (!hDeviceKM || !psBufferInfo) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVGetBCInfoKM: Invalid parameters");
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psBCInfo = BCDeviceHandleToBCInfo(hDeviceKM);
++
++ eError =
++ psBCInfo->psFuncTable->pfnGetBCInfo(psBCInfo->hExtDevice,
++ psBufferInfo);
++
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVGetBCInfoKM : Failed to get BC Info");
++ return eError;
++ }
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR PVRSRVGetBCBufferKM(void *hDeviceKM, u32 ui32BufferIndex,
++ void **phBuffer)
++{
++ struct PVRSRV_BUFFERCLASS_INFO *psBCInfo;
++
++ if (!hDeviceKM || !phBuffer) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVGetBCBufferKM: Invalid parameters");
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psBCInfo = BCDeviceHandleToBCInfo(hDeviceKM);
++
++ if (ui32BufferIndex < psBCInfo->ui32BufferCount) {
++ *phBuffer = (void *)&psBCInfo->psBuffer[ui32BufferIndex];
++ } else {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVGetBCBufferKM: "
++ "Buffer index %d out of range (%d)",
++ ui32BufferIndex, psBCInfo->ui32BufferCount);
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ return PVRSRV_OK;
++}
++
++IMG_BOOL PVRGetBufferClassJTable(struct PVRSRV_BC_BUFFER2SRV_KMJTABLE *psJTable)
++{
++ psJTable->ui32TableSize = sizeof(struct PVRSRV_BC_BUFFER2SRV_KMJTABLE);
++
++ psJTable->pfnPVRSRVRegisterBCDevice = PVRSRVRegisterBCDeviceKM;
++ psJTable->pfnPVRSRVRemoveBCDevice = PVRSRVRemoveBCDeviceKM;
++
++ return IMG_TRUE;
++}
++EXPORT_SYMBOL(PVRGetBufferClassJTable);
++
+diff --git a/drivers/gpu/pvr/devicemem.c b/drivers/gpu/pvr/devicemem.c
+new file mode 100644
+index 0000000..6cbcf2c
+--- /dev/null
++++ b/drivers/gpu/pvr/devicemem.c
+@@ -0,0 +1,1150 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <stddef.h>
++
++#include "services_headers.h"
++#include "buffer_manager.h"
++#include "pdump_km.h"
++#include "pvr_bridge_km.h"
++
++#include <linux/pagemap.h>
++
++static enum PVRSRV_ERROR AllocDeviceMem(void *hDevCookie, void *hDevMemHeap,
++ u32 ui32Flags, u32 ui32Size, u32 ui32Alignment,
++ struct PVRSRV_KERNEL_MEM_INFO **ppsMemInfo);
++
++struct RESMAN_MAP_DEVICE_MEM_DATA {
++ struct PVRSRV_KERNEL_MEM_INFO *psMemInfo;
++ struct PVRSRV_KERNEL_MEM_INFO *psSrcMemInfo;
++};
++
++static inline void get_page_details(u32 vaddr, size_t byte_size,
++ u32 *page_offset_out, int *page_count_out)
++{
++ size_t host_page_size;
++ u32 page_offset;
++ int page_count;
++
++ host_page_size = PAGE_SIZE;
++ page_offset = vaddr & (host_page_size - 1);
++ page_count = PAGE_ALIGN(byte_size + page_offset) / host_page_size;
++
++ *page_offset_out = page_offset;
++ *page_count_out = page_count;
++}
++
++static inline int get_page_count(u32 vaddr, size_t byte_size)
++{
++ u32 page_offset;
++ int page_count;
++
++ get_page_details(vaddr, byte_size, &page_offset, &page_count);
++
++ return page_count;
++}
++
++enum PVRSRV_ERROR PVRSRVGetDeviceMemHeapsKM(void *hDevCookie,
++ struct PVRSRV_HEAP_INFO *psHeapInfo)
++{
++ struct PVRSRV_DEVICE_NODE *psDeviceNode;
++ u32 ui32HeapCount;
++ struct DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++ u32 i;
++
++ if (hDevCookie == NULL) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVGetDeviceMemHeapsKM: hDevCookie invalid");
++ PVR_DBG_BREAK;
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDeviceNode = (struct PVRSRV_DEVICE_NODE *)hDevCookie;
++
++ ui32HeapCount = psDeviceNode->sDevMemoryInfo.ui32HeapCount;
++ psDeviceMemoryHeap = psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeap;
++
++ PVR_ASSERT(ui32HeapCount <= PVRSRV_MAX_CLIENT_HEAPS);
++
++ for (i = 0; i < ui32HeapCount; i++) {
++ psHeapInfo[i].ui32HeapID = psDeviceMemoryHeap[i].ui32HeapID;
++ psHeapInfo[i].hDevMemHeap = psDeviceMemoryHeap[i].hDevMemHeap;
++ psHeapInfo[i].sDevVAddrBase =
++ psDeviceMemoryHeap[i].sDevVAddrBase;
++ psHeapInfo[i].ui32HeapByteSize =
++ psDeviceMemoryHeap[i].ui32HeapSize;
++ psHeapInfo[i].ui32Attribs = psDeviceMemoryHeap[i].ui32Attribs;
++ }
++
++ for (; i < PVRSRV_MAX_CLIENT_HEAPS; i++) {
++ OSMemSet(psHeapInfo + i, 0, sizeof(*psHeapInfo));
++ psHeapInfo[i].ui32HeapID = (u32) PVRSRV_UNDEFINED_HEAP_ID;
++ }
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR PVRSRVCreateDeviceMemContextKM(void *hDevCookie,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc,
++ void **phDevMemContext,
++ u32 *pui32ClientHeapCount,
++ struct PVRSRV_HEAP_INFO *psHeapInfo,
++ IMG_BOOL *pbCreated, IMG_BOOL *pbShared)
++{
++ struct PVRSRV_DEVICE_NODE *psDeviceNode;
++ u32 ui32HeapCount, ui32ClientHeapCount = 0;
++ struct DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++ void *hDevMemContext;
++ void *hDevMemHeap;
++ struct IMG_DEV_PHYADDR sPDDevPAddr;
++ u32 i;
++
++ if (hDevCookie == NULL) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVCreateDeviceMemContextKM: hDevCookie invalid");
++ PVR_DBG_BREAK;
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDeviceNode = (struct PVRSRV_DEVICE_NODE *)hDevCookie;
++
++ ui32HeapCount = psDeviceNode->sDevMemoryInfo.ui32HeapCount;
++ psDeviceMemoryHeap = psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeap;
++
++ PVR_ASSERT(ui32HeapCount <= PVRSRV_MAX_CLIENT_HEAPS);
++
++ hDevMemContext = BM_CreateContext(psDeviceNode,
++ &sPDDevPAddr, psPerProc, pbCreated);
++ if (hDevMemContext == NULL) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVCreateDeviceMemContextKM: Failed BM_CreateContext");
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ for (i = 0; i < ui32HeapCount; i++) {
++ switch (psDeviceMemoryHeap[i].DevMemHeapType) {
++ case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
++ psHeapInfo[ui32ClientHeapCount].ui32HeapID =
++ psDeviceMemoryHeap[i].ui32HeapID;
++ psHeapInfo[ui32ClientHeapCount].hDevMemHeap =
++ psDeviceMemoryHeap[i].hDevMemHeap;
++ psHeapInfo[ui32ClientHeapCount].sDevVAddrBase =
++ psDeviceMemoryHeap[i].sDevVAddrBase;
++ psHeapInfo[ui32ClientHeapCount].ui32HeapByteSize =
++ psDeviceMemoryHeap[i].ui32HeapSize;
++ psHeapInfo[ui32ClientHeapCount].ui32Attribs =
++ psDeviceMemoryHeap[i].ui32Attribs;
++ pbShared[ui32ClientHeapCount] = IMG_TRUE;
++ ui32ClientHeapCount++;
++ break;
++ case DEVICE_MEMORY_HEAP_PERCONTEXT:
++ hDevMemHeap = BM_CreateHeap(hDevMemContext,
++ &psDeviceMemoryHeap[i]);
++
++ psHeapInfo[ui32ClientHeapCount].ui32HeapID =
++ psDeviceMemoryHeap[i].ui32HeapID;
++ psHeapInfo[ui32ClientHeapCount].hDevMemHeap =
++ hDevMemHeap;
++ psHeapInfo[ui32ClientHeapCount].sDevVAddrBase =
++ psDeviceMemoryHeap[i].sDevVAddrBase;
++ psHeapInfo[ui32ClientHeapCount].ui32HeapByteSize =
++ psDeviceMemoryHeap[i].ui32HeapSize;
++ psHeapInfo[ui32ClientHeapCount].ui32Attribs =
++ psDeviceMemoryHeap[i].ui32Attribs;
++ pbShared[ui32ClientHeapCount] = IMG_FALSE;
++
++ ui32ClientHeapCount++;
++ break;
++ }
++ }
++
++ *pui32ClientHeapCount = ui32ClientHeapCount;
++ *phDevMemContext = hDevMemContext;
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR PVRSRVDestroyDeviceMemContextKM(void *hDevCookie,
++ void *hDevMemContext,
++ IMG_BOOL *pbDestroyed)
++{
++ int destroyed;
++
++ PVR_UNREFERENCED_PARAMETER(hDevCookie);
++
++ destroyed = pvr_put_ctx(hDevMemContext);
++ if (pbDestroyed)
++ *pbDestroyed = destroyed ? IMG_TRUE : IMG_FALSE;
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR PVRSRVGetDeviceMemHeapInfoKM(void *hDevCookie,
++ void *hDevMemContext,
++ u32 *pui32ClientHeapCount,
++ struct PVRSRV_HEAP_INFO *psHeapInfo,
++ IMG_BOOL *pbShared)
++{
++ struct PVRSRV_DEVICE_NODE *psDeviceNode;
++ u32 ui32HeapCount, ui32ClientHeapCount = 0;
++ struct DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++ void *hDevMemHeap;
++ u32 i;
++
++ if (hDevCookie == NULL) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVGetDeviceMemHeapInfoKM: hDevCookie invalid");
++ PVR_DBG_BREAK;
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDeviceNode = (struct PVRSRV_DEVICE_NODE *)hDevCookie;
++
++ ui32HeapCount = psDeviceNode->sDevMemoryInfo.ui32HeapCount;
++ psDeviceMemoryHeap = psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeap;
++
++ PVR_ASSERT(ui32HeapCount <= PVRSRV_MAX_CLIENT_HEAPS);
++
++ for (i = 0; i < ui32HeapCount; i++) {
++ switch (psDeviceMemoryHeap[i].DevMemHeapType) {
++ case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
++ psHeapInfo[ui32ClientHeapCount].ui32HeapID =
++ psDeviceMemoryHeap[i].ui32HeapID;
++ psHeapInfo[ui32ClientHeapCount].hDevMemHeap =
++ psDeviceMemoryHeap[i].hDevMemHeap;
++ psHeapInfo[ui32ClientHeapCount].sDevVAddrBase =
++ psDeviceMemoryHeap[i].sDevVAddrBase;
++ psHeapInfo[ui32ClientHeapCount].ui32HeapByteSize =
++ psDeviceMemoryHeap[i].ui32HeapSize;
++ psHeapInfo[ui32ClientHeapCount].ui32Attribs =
++ psDeviceMemoryHeap[i].ui32Attribs;
++ pbShared[ui32ClientHeapCount] = IMG_TRUE;
++ ui32ClientHeapCount++;
++ break;
++ case DEVICE_MEMORY_HEAP_PERCONTEXT:
++ hDevMemHeap = BM_CreateHeap(hDevMemContext,
++ &psDeviceMemoryHeap[i]);
++ psHeapInfo[ui32ClientHeapCount].ui32HeapID =
++ psDeviceMemoryHeap[i].ui32HeapID;
++ psHeapInfo[ui32ClientHeapCount].hDevMemHeap =
++ hDevMemHeap;
++ psHeapInfo[ui32ClientHeapCount].sDevVAddrBase =
++ psDeviceMemoryHeap[i].sDevVAddrBase;
++ psHeapInfo[ui32ClientHeapCount].ui32HeapByteSize =
++ psDeviceMemoryHeap[i].ui32HeapSize;
++ psHeapInfo[ui32ClientHeapCount].ui32Attribs =
++ psDeviceMemoryHeap[i].ui32Attribs;
++ pbShared[ui32ClientHeapCount] = IMG_FALSE;
++
++ ui32ClientHeapCount++;
++ break;
++ }
++ }
++ *pui32ClientHeapCount = ui32ClientHeapCount;
++
++ return PVRSRV_OK;
++}
++
++static enum PVRSRV_ERROR AllocDeviceMem(void *hDevCookie, void *hDevMemHeap,
++ u32 ui32Flags, u32 ui32Size,
++ u32 ui32Alignment,
++ struct PVRSRV_KERNEL_MEM_INFO **ppsMemInfo)
++{
++ struct PVRSRV_KERNEL_MEM_INFO *psMemInfo;
++ void *hBuffer;
++
++ struct PVRSRV_MEMBLK *psMemBlock;
++ IMG_BOOL bBMError;
++
++ PVR_UNREFERENCED_PARAMETER(hDevCookie);
++
++ *ppsMemInfo = NULL;
++
++ if (OSAllocMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof(struct PVRSRV_KERNEL_MEM_INFO),
++ (void **) &psMemInfo, NULL) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "AllocDeviceMem: Failed to alloc memory for block");
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ OSMemSet(psMemInfo, 0, sizeof(*psMemInfo));
++
++ psMemBlock = &(psMemInfo->sMemBlk);
++
++ psMemInfo->ui32Flags = ui32Flags | PVRSRV_MEM_RAM_BACKED_ALLOCATION;
++
++ bBMError = BM_Alloc(hDevMemHeap, NULL, ui32Size,
++ &psMemInfo->ui32Flags, ui32Alignment, &hBuffer);
++
++ if (!bBMError) {
++ PVR_DPF(PVR_DBG_ERROR, "AllocDeviceMem: BM_Alloc Failed");
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof(struct PVRSRV_KERNEL_MEM_INFO), psMemInfo,
++ NULL);
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ psMemBlock->sDevVirtAddr = BM_HandleToDevVaddr(hBuffer);
++ psMemBlock->hOSMemHandle = BM_HandleToOSMemHandle(hBuffer);
++
++ psMemBlock->hBuffer = (void *)hBuffer;
++ psMemInfo->pvLinAddrKM = BM_HandleToCpuVaddr(hBuffer);
++ psMemInfo->sDevVAddr = psMemBlock->sDevVirtAddr;
++ psMemInfo->ui32AllocSize = ui32Size;
++
++ psMemInfo->pvSysBackupBuffer = NULL;
++
++ *ppsMemInfo = psMemInfo;
++
++ return PVRSRV_OK;
++}
++
++static enum PVRSRV_ERROR FreeDeviceMem(struct PVRSRV_KERNEL_MEM_INFO *psMemInfo)
++{
++ void *hBuffer;
++
++ if (!psMemInfo)
++ return PVRSRV_ERROR_INVALID_PARAMS;
++
++ hBuffer = psMemInfo->sMemBlk.hBuffer;
++ BM_Free(hBuffer, psMemInfo->ui32Flags);
++
++ if (psMemInfo->pvSysBackupBuffer)
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, psMemInfo->ui32AllocSize,
++ psMemInfo->pvSysBackupBuffer, NULL);
++
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(struct PVRSRV_KERNEL_MEM_INFO),
++ psMemInfo, NULL);
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR PVRSRVAllocSyncInfoKM(void *hDevCookie, void *hDevMemContext,
++ struct PVRSRV_KERNEL_SYNC_INFO **ppsKernelSyncInfo)
++{
++ void *hSyncDevMemHeap;
++ struct DEVICE_MEMORY_INFO *psDevMemoryInfo;
++ struct BM_CONTEXT *pBMContext;
++ enum PVRSRV_ERROR eError;
++ struct PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++ struct PVRSRV_SYNC_DATA *psSyncData;
++
++ eError = OSAllocMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof(struct PVRSRV_KERNEL_SYNC_INFO),
++ (void **) &psKernelSyncInfo, NULL);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVAllocSyncInfoKM: Failed to alloc memory");
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ pBMContext = (struct BM_CONTEXT *)hDevMemContext;
++ psDevMemoryInfo = &pBMContext->psDeviceNode->sDevMemoryInfo;
++
++ hSyncDevMemHeap = psDevMemoryInfo->psDeviceMemoryHeap[psDevMemoryInfo->
++ ui32SyncHeapID].hDevMemHeap;
++
++ eError = AllocDeviceMem(hDevCookie, hSyncDevMemHeap,
++ PVRSRV_MEM_CACHE_CONSISTENT,
++ sizeof(struct PVRSRV_SYNC_DATA), sizeof(u32),
++ &psKernelSyncInfo->psSyncDataMemInfoKM);
++
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVAllocSyncInfoKM: Failed to alloc memory");
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof(struct PVRSRV_KERNEL_SYNC_INFO),
++ psKernelSyncInfo, NULL);
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ psKernelSyncInfo->psSyncData =
++ psKernelSyncInfo->psSyncDataMemInfoKM->pvLinAddrKM;
++ psSyncData = psKernelSyncInfo->psSyncData;
++
++ psSyncData->ui32WriteOpsPending = 0;
++ psSyncData->ui32WriteOpsComplete = 0;
++ psSyncData->ui32ReadOpsPending = 0;
++ psSyncData->ui32ReadOpsComplete = 0;
++ psSyncData->ui32LastOpDumpVal = 0;
++ psSyncData->ui32LastReadOpDumpVal = 0;
++
++#if defined(PDUMP)
++ PDUMPMEM(psKernelSyncInfo->psSyncDataMemInfoKM->pvLinAddrKM,
++ psKernelSyncInfo->psSyncDataMemInfoKM, 0,
++ psKernelSyncInfo->psSyncDataMemInfoKM->ui32AllocSize,
++ 0, MAKEUNIQUETAG(psKernelSyncInfo->psSyncDataMemInfoKM));
++#endif
++
++ psKernelSyncInfo->sWriteOpsCompleteDevVAddr.uiAddr =
++ psKernelSyncInfo->psSyncDataMemInfoKM->sDevVAddr.uiAddr +
++ offsetof(struct PVRSRV_SYNC_DATA, ui32WriteOpsComplete);
++ psKernelSyncInfo->sReadOpsCompleteDevVAddr.uiAddr =
++ psKernelSyncInfo->psSyncDataMemInfoKM->sDevVAddr.uiAddr +
++ offsetof(struct PVRSRV_SYNC_DATA, ui32ReadOpsComplete);
++
++ psKernelSyncInfo->psSyncDataMemInfoKM->psKernelSyncInfo = NULL;
++
++ *ppsKernelSyncInfo = psKernelSyncInfo;
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR PVRSRVFreeSyncInfoKM(
++ struct PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo)
++{
++ FreeDeviceMem(psKernelSyncInfo->psSyncDataMemInfoKM);
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof(struct PVRSRV_KERNEL_SYNC_INFO), psKernelSyncInfo,
++ NULL);
++
++ return PVRSRV_OK;
++}
++
++static enum PVRSRV_ERROR FreeDeviceMemCallBack(void *pvParam, u32 ui32Param)
++{
++ enum PVRSRV_ERROR eError = PVRSRV_OK;
++ struct PVRSRV_KERNEL_MEM_INFO *psMemInfo = pvParam;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++ psMemInfo->ui32RefCount--;
++
++ if (psMemInfo->ui32Flags & PVRSRV_MEM_EXPORTED) {
++ void *hMemInfo = NULL;
++
++ if (psMemInfo->ui32RefCount != 0) {
++ PVR_DPF(PVR_DBG_ERROR, "FreeDeviceMemCallBack: "
++ "mappings are open in other processes");
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ eError = PVRSRVFindHandle(KERNEL_HANDLE_BASE,
++ &hMemInfo,
++ psMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "FreeDeviceMemCallBack: "
++ "can't find exported meminfo in the "
++ "global handle list");
++ return eError;
++ }
++
++ eError = PVRSRVReleaseHandle(KERNEL_HANDLE_BASE,
++ hMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "FreeDeviceMemCallBack: "
++ "PVRSRVReleaseHandle failed for exported meminfo");
++ return eError;
++ }
++ }
++
++ PVR_ASSERT(psMemInfo->ui32RefCount == 0);
++
++ if (psMemInfo->psKernelSyncInfo)
++ eError = PVRSRVFreeSyncInfoKM(psMemInfo->psKernelSyncInfo);
++
++ if (eError == PVRSRV_OK)
++ eError = FreeDeviceMem(psMemInfo);
++
++ return eError;
++}
++
++enum PVRSRV_ERROR PVRSRVFreeDeviceMemKM(void *hDevCookie,
++ struct PVRSRV_KERNEL_MEM_INFO *psMemInfo)
++{
++ PVR_UNREFERENCED_PARAMETER(hDevCookie);
++
++ if (!psMemInfo)
++ return PVRSRV_ERROR_INVALID_PARAMS;
++
++ if (psMemInfo->sMemBlk.hResItem != NULL)
++ ResManFreeResByPtr(psMemInfo->sMemBlk.hResItem);
++ else
++ FreeDeviceMemCallBack(psMemInfo, 0);
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR PVRSRVAllocDeviceMemKM(void *hDevCookie,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc,
++ void *hDevMemHeap, u32 ui32Flags,
++ u32 ui32Size, u32 ui32Alignment,
++ struct PVRSRV_KERNEL_MEM_INFO **ppsMemInfo)
++{
++ struct PVRSRV_KERNEL_MEM_INFO *psMemInfo;
++ enum PVRSRV_ERROR eError;
++ struct BM_HEAP *psBMHeap;
++ void *hDevMemContext;
++
++ if (!hDevMemHeap || (ui32Size == 0))
++ return PVRSRV_ERROR_INVALID_PARAMS;
++
++ eError = AllocDeviceMem(hDevCookie, hDevMemHeap, ui32Flags, ui32Size,
++ ui32Alignment, &psMemInfo);
++
++ if (eError != PVRSRV_OK)
++ return eError;
++
++ if (ui32Flags & PVRSRV_MEM_NO_SYNCOBJ) {
++ psMemInfo->psKernelSyncInfo = NULL;
++ } else {
++ psBMHeap = (struct BM_HEAP *)hDevMemHeap;
++ hDevMemContext = (void *) psBMHeap->pBMContext;
++ eError = PVRSRVAllocSyncInfoKM(hDevCookie,
++ hDevMemContext,
++ &psMemInfo->psKernelSyncInfo);
++ if (eError != PVRSRV_OK)
++ goto free_mainalloc;
++ }
++
++ *ppsMemInfo = psMemInfo;
++
++ if (ui32Flags & PVRSRV_MEM_NO_RESMAN) {
++ psMemInfo->sMemBlk.hResItem = NULL;
++ } else {
++ psMemInfo->sMemBlk.hResItem =
++ ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_DEVICEMEM_ALLOCATION,
++ psMemInfo, 0, FreeDeviceMemCallBack);
++ if (psMemInfo->sMemBlk.hResItem == NULL) {
++ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto free_mainalloc;
++ }
++ }
++
++ psMemInfo->ui32RefCount++;
++
++ return PVRSRV_OK;
++
++free_mainalloc:
++ FreeDeviceMem(psMemInfo);
++
++ return eError;
++}
++
++enum PVRSRV_ERROR PVRSRVDissociateDeviceMemKM(void *hDevCookie,
++ struct PVRSRV_KERNEL_MEM_INFO *psMemInfo)
++{
++ enum PVRSRV_ERROR eError;
++ struct PVRSRV_DEVICE_NODE *psDeviceNode = hDevCookie;
++
++ PVR_UNREFERENCED_PARAMETER(hDevCookie);
++
++ if (!psMemInfo)
++ return PVRSRV_ERROR_INVALID_PARAMS;
++
++ eError = ResManDissociateRes(psMemInfo->sMemBlk.hResItem,
++ psDeviceNode->hResManContext);
++
++ PVR_ASSERT(eError == PVRSRV_OK);
++
++ return eError;
++}
++
++enum PVRSRV_ERROR PVRSRVGetFreeDeviceMemKM(u32 ui32Flags, u32 *pui32Total,
++ u32 *pui32Free, u32 *pui32LargestBlock)
++{
++
++ PVR_UNREFERENCED_PARAMETER(ui32Flags);
++ PVR_UNREFERENCED_PARAMETER(pui32Total);
++ PVR_UNREFERENCED_PARAMETER(pui32Free);
++ PVR_UNREFERENCED_PARAMETER(pui32LargestBlock);
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR PVRSRVUnwrapExtMemoryKM(
++ struct PVRSRV_KERNEL_MEM_INFO *psMemInfo)
++{
++ if (!psMemInfo)
++ return PVRSRV_ERROR_INVALID_PARAMS;
++
++ ResManFreeResByPtr(psMemInfo->sMemBlk.hResItem);
++
++ return PVRSRV_OK;
++}
++
++static enum PVRSRV_ERROR UnwrapExtMemoryCallBack(void *pvParam, u32 ui32Param)
++{
++ enum PVRSRV_ERROR eError = PVRSRV_OK;
++ struct PVRSRV_KERNEL_MEM_INFO *psMemInfo = pvParam;
++ void *hOSWrapMem;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++ hOSWrapMem = psMemInfo->sMemBlk.hOSWrapMem;
++
++ if (psMemInfo->psKernelSyncInfo)
++ eError = PVRSRVFreeSyncInfoKM(psMemInfo->psKernelSyncInfo);
++
++ if (psMemInfo->sMemBlk.psIntSysPAddr) {
++ int page_count;
++
++ page_count = get_page_count((u32)psMemInfo->pvLinAddrKM,
++ psMemInfo->ui32AllocSize);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ page_count * sizeof(struct IMG_SYS_PHYADDR),
++ psMemInfo->sMemBlk.psIntSysPAddr, NULL);
++ }
++
++ if (eError == PVRSRV_OK) {
++ psMemInfo->ui32RefCount--;
++ eError = FreeDeviceMem(psMemInfo);
++ }
++
++ if (hOSWrapMem)
++ OSReleasePhysPageAddr(hOSWrapMem);
++
++ return eError;
++}
++
++enum PVRSRV_ERROR PVRSRVWrapExtMemoryKM(void *hDevCookie,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc,
++ void *hDevMemContext, u32 ui32ByteSize,
++ u32 ui32PageOffset, IMG_BOOL bPhysContig,
++ struct IMG_SYS_PHYADDR *psExtSysPAddr,
++ void *pvLinAddr,
++ struct PVRSRV_KERNEL_MEM_INFO **ppsMemInfo)
++{
++ struct PVRSRV_KERNEL_MEM_INFO *psMemInfo = NULL;
++ struct DEVICE_MEMORY_INFO *psDevMemoryInfo;
++ u32 ui32HostPageSize = HOST_PAGESIZE();
++ void *hDevMemHeap = NULL;
++ struct PVRSRV_DEVICE_NODE *psDeviceNode;
++ void *hBuffer;
++ struct PVRSRV_MEMBLK *psMemBlock;
++ IMG_BOOL bBMError;
++ struct BM_HEAP *psBMHeap;
++ enum PVRSRV_ERROR eError;
++ void *pvPageAlignedCPUVAddr;
++ struct IMG_SYS_PHYADDR *psIntSysPAddr = NULL;
++ void *hOSWrapMem = NULL;
++ struct DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++ int page_count = 0;
++ u32 i;
++
++ psDeviceNode = (struct PVRSRV_DEVICE_NODE *)hDevCookie;
++ PVR_ASSERT(psDeviceNode != NULL);
++
++ if (psDeviceNode == NULL) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVWrapExtMemoryKM: invalid parameter");
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ if (pvLinAddr) {
++ get_page_details((u32)pvLinAddr, ui32ByteSize,
++ &ui32PageOffset, &page_count);
++ pvPageAlignedCPUVAddr = (void *)((u8 *) pvLinAddr -
++ ui32PageOffset);
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ page_count * sizeof(struct IMG_SYS_PHYADDR),
++ (void **)&psIntSysPAddr, NULL) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVWrapExtMemoryKM: "
++ "Failed to alloc memory for block");
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ eError = OSAcquirePhysPageAddr(pvPageAlignedCPUVAddr,
++ page_count * ui32HostPageSize,
++ psIntSysPAddr, &hOSWrapMem);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVWrapExtMemoryKM:"
++ " Failed to alloc memory for block");
++ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto ErrorExitPhase1;
++ }
++ psExtSysPAddr = psIntSysPAddr;
++ bPhysContig = IMG_FALSE;
++ }
++
++ psDevMemoryInfo =
++ &((struct BM_CONTEXT *)hDevMemContext)->psDeviceNode->
++ sDevMemoryInfo;
++ psDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap;
++ for (i = 0; i < PVRSRV_MAX_CLIENT_HEAPS; i++) {
++ if (HEAP_IDX(psDeviceMemoryHeap[i].ui32HeapID) ==
++ psDevMemoryInfo->ui32MappingHeapID) {
++ if (psDeviceMemoryHeap[i].DevMemHeapType ==
++ DEVICE_MEMORY_HEAP_PERCONTEXT) {
++ hDevMemHeap =
++ BM_CreateHeap(hDevMemContext,
++ &psDeviceMemoryHeap[i]);
++ } else {
++ hDevMemHeap =
++ psDevMemoryInfo->psDeviceMemoryHeap[i].
++ hDevMemHeap;
++ }
++ break;
++ }
++ }
++
++ if (hDevMemHeap == NULL) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVWrapExtMemoryKM: unable to find mapping heap");
++ eError = PVRSRV_ERROR_GENERIC;
++ goto ErrorExitPhase2;
++ }
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(struct PVRSRV_KERNEL_MEM_INFO),
++ (void **) &psMemInfo, NULL) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVWrapExtMemoryKM: "
++ "Failed to alloc memory for block");
++ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto ErrorExitPhase2;
++ }
++
++ OSMemSet(psMemInfo, 0, sizeof(*psMemInfo));
++ psMemBlock = &(psMemInfo->sMemBlk);
++
++ bBMError = BM_Wrap(hDevMemHeap,
++ ui32ByteSize,
++ ui32PageOffset,
++ bPhysContig,
++ psExtSysPAddr,
++ NULL, &psMemInfo->ui32Flags, &hBuffer);
++ if (!bBMError) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVWrapExtMemoryKM: BM_Wrap Failed");
++ eError = PVRSRV_ERROR_BAD_MAPPING;
++ goto ErrorExitPhase3;
++ }
++
++ psMemBlock->sDevVirtAddr = BM_HandleToDevVaddr(hBuffer);
++ psMemBlock->hOSMemHandle = BM_HandleToOSMemHandle(hBuffer);
++ psMemBlock->hOSWrapMem = hOSWrapMem;
++ psMemBlock->psIntSysPAddr = psIntSysPAddr;
++
++ psMemBlock->hBuffer = (void *) hBuffer;
++
++ psMemInfo->pvLinAddrKM = BM_HandleToCpuVaddr(hBuffer);
++ psMemInfo->sDevVAddr = psMemBlock->sDevVirtAddr;
++ psMemInfo->ui32AllocSize = ui32ByteSize;
++
++ psMemInfo->pvSysBackupBuffer = NULL;
++
++ psBMHeap = (struct BM_HEAP *)hDevMemHeap;
++ hDevMemContext = (void *) psBMHeap->pBMContext;
++ eError = PVRSRVAllocSyncInfoKM(hDevCookie,
++ hDevMemContext,
++ &psMemInfo->psKernelSyncInfo);
++ if (eError != PVRSRV_OK)
++ goto ErrorExitPhase4;
++
++ psMemInfo->ui32RefCount++;
++
++ psMemInfo->sMemBlk.hResItem =
++ ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_DEVICEMEM_WRAP, psMemInfo, 0,
++ UnwrapExtMemoryCallBack);
++
++ *ppsMemInfo = psMemInfo;
++
++ return PVRSRV_OK;
++
++ErrorExitPhase4:
++ if (psMemInfo) {
++ FreeDeviceMem(psMemInfo);
++
++ psMemInfo = NULL;
++ }
++
++ErrorExitPhase3:
++ if (psMemInfo) {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(struct PVRSRV_KERNEL_MEM_INFO), psMemInfo,
++ NULL);
++ }
++
++ErrorExitPhase2:
++ if (psIntSysPAddr)
++ OSReleasePhysPageAddr(hOSWrapMem);
++
++ErrorExitPhase1:
++ if (psIntSysPAddr) {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ page_count * sizeof(struct IMG_SYS_PHYADDR),
++ psIntSysPAddr, NULL);
++ }
++
++ return eError;
++}
++
++enum PVRSRV_ERROR PVRSRVUnmapDeviceMemoryKM(
++ struct PVRSRV_KERNEL_MEM_INFO *psMemInfo)
++{
++ if (!psMemInfo)
++ return PVRSRV_ERROR_INVALID_PARAMS;
++
++ ResManFreeResByPtr(psMemInfo->sMemBlk.hResItem);
++
++ return PVRSRV_OK;
++}
++
++static enum PVRSRV_ERROR UnmapDeviceMemoryCallBack(void *pvParam, u32 ui32Param)
++{
++ enum PVRSRV_ERROR eError;
++ struct RESMAN_MAP_DEVICE_MEM_DATA *psMapData = pvParam;
++ int page_count;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++ page_count = get_page_count((u32)psMapData->psMemInfo->pvLinAddrKM,
++ psMapData->psMemInfo->ui32AllocSize);
++
++ if (psMapData->psMemInfo->sMemBlk.psIntSysPAddr)
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ page_count * sizeof(struct IMG_SYS_PHYADDR),
++ psMapData->psMemInfo->sMemBlk.psIntSysPAddr, NULL);
++
++ eError = FreeDeviceMem(psMapData->psMemInfo);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "UnmapDeviceMemoryCallBack: "
++ "Failed to free DST meminfo");
++ return eError;
++ }
++
++ psMapData->psSrcMemInfo->ui32RefCount--;
++
++ PVR_ASSERT(psMapData->psSrcMemInfo->ui32RefCount != (u32) (-1));
++/*
++ * Don't free the source MemInfo as we didn't allocate it
++ * and it's not our job as the process the allocated
++ * should also free it when it's finished
++ */
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(struct RESMAN_MAP_DEVICE_MEM_DATA), psMapData, NULL);
++
++ return eError;
++}
++
++static inline int bm_is_continuous(const struct BM_BUF *buf)
++{
++ return buf->pMapping->eCpuMemoryOrigin == hm_wrapped_virtaddr;
++}
++
++enum PVRSRV_ERROR PVRSRVMapDeviceMemoryKM(
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc,
++ struct PVRSRV_KERNEL_MEM_INFO *psSrcMemInfo,
++ void *hDstDevMemHeap,
++ struct PVRSRV_KERNEL_MEM_INFO **ppsDstMemInfo)
++{
++ enum PVRSRV_ERROR eError;
++ u32 ui32PageOffset;
++ u32 ui32HostPageSize = HOST_PAGESIZE();
++ int page_count;
++ int i;
++ struct IMG_SYS_PHYADDR *psSysPAddr = NULL;
++ struct IMG_DEV_PHYADDR sDevPAddr;
++ struct BM_BUF *psBuf;
++ struct IMG_DEV_VIRTADDR sDevVAddr;
++ struct PVRSRV_KERNEL_MEM_INFO *psMemInfo = NULL;
++ void *hBuffer;
++ struct PVRSRV_MEMBLK *psMemBlock;
++ IMG_BOOL bBMError;
++ struct PVRSRV_DEVICE_NODE *psDeviceNode;
++ void *pvPageAlignedCPUVAddr;
++ struct RESMAN_MAP_DEVICE_MEM_DATA *psMapData = NULL;
++
++ if (!psSrcMemInfo || !hDstDevMemHeap || !ppsDstMemInfo) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVMapDeviceMemoryKM: invalid parameters");
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ *ppsDstMemInfo = NULL;
++
++ get_page_details((u32)psSrcMemInfo->pvLinAddrKM,
++ psSrcMemInfo->ui32AllocSize,
++ &ui32PageOffset, &page_count);
++ pvPageAlignedCPUVAddr =
++ (void *) ((u8 *) psSrcMemInfo->pvLinAddrKM -
++ ui32PageOffset);
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ page_count * sizeof(struct IMG_SYS_PHYADDR),
++ (void **) &psSysPAddr, NULL) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVMapDeviceMemoryKM: "
++ "Failed to alloc memory for block");
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ psBuf = psSrcMemInfo->sMemBlk.hBuffer;
++
++ psDeviceNode = psBuf->pMapping->pBMHeap->pBMContext->psDeviceNode;
++
++ sDevVAddr.uiAddr = psSrcMemInfo->sDevVAddr.uiAddr - ui32PageOffset;
++ for (i = 0; i < page_count; i++) {
++ eError =
++ BM_GetPhysPageAddr(psSrcMemInfo, sDevVAddr, &sDevPAddr);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVMapDeviceMemoryKM: "
++ "Failed to retrieve page list from device");
++ goto ErrorExit;
++ }
++
++ psSysPAddr[i] =
++ SysDevPAddrToSysPAddr(psDeviceNode->sDevId.eDeviceType,
++ sDevPAddr);
++
++ sDevVAddr.uiAddr += ui32HostPageSize;
++ }
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(struct RESMAN_MAP_DEVICE_MEM_DATA),
++ (void **)&psMapData, NULL) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVMapDeviceMemoryKM: "
++ "Failed to alloc resman map data");
++ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto ErrorExit;
++ }
++
++ if (OSAllocMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof(struct PVRSRV_KERNEL_MEM_INFO),
++ (void **)&psMemInfo, NULL) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVMapDeviceMemoryKM: "
++ "Failed to alloc memory for block");
++ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto ErrorExit;
++ }
++
++ OSMemSet(psMemInfo, 0, sizeof(*psMemInfo));
++
++ psMemBlock = &(psMemInfo->sMemBlk);
++
++ bBMError = BM_Wrap(hDstDevMemHeap, psSrcMemInfo->ui32AllocSize,
++ ui32PageOffset, bm_is_continuous(psBuf), psSysPAddr,
++ pvPageAlignedCPUVAddr, &psMemInfo->ui32Flags,
++ &hBuffer);
++
++ if (!bBMError) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVMapDeviceMemoryKM: BM_Wrap Failed");
++ eError = PVRSRV_ERROR_BAD_MAPPING;
++ goto ErrorExit;
++ }
++
++ psMemBlock->sDevVirtAddr = BM_HandleToDevVaddr(hBuffer);
++ psMemBlock->hOSMemHandle = BM_HandleToOSMemHandle(hBuffer);
++
++ psMemBlock->hBuffer = (void *) hBuffer;
++
++ psMemBlock->psIntSysPAddr = psSysPAddr;
++
++ psMemInfo->pvLinAddrKM = psSrcMemInfo->pvLinAddrKM;
++
++ psMemInfo->sDevVAddr = psMemBlock->sDevVirtAddr;
++ psMemInfo->ui32AllocSize = psSrcMemInfo->ui32AllocSize;
++ psMemInfo->psKernelSyncInfo = psSrcMemInfo->psKernelSyncInfo;
++
++ psMemInfo->pvSysBackupBuffer = NULL;
++
++ psSrcMemInfo->ui32RefCount++;
++
++ psMapData->psMemInfo = psMemInfo;
++ psMapData->psSrcMemInfo = psSrcMemInfo;
++
++ psMemInfo->sMemBlk.hResItem =
++ ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_DEVICEMEM_MAPPING, psMapData, 0,
++ UnmapDeviceMemoryCallBack);
++
++ *ppsDstMemInfo = psMemInfo;
++
++ return PVRSRV_OK;
++
++ErrorExit:
++
++ if (psSysPAddr) {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(struct IMG_SYS_PHYADDR), psSysPAddr, NULL);
++ }
++
++ if (psMemInfo) {
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof(struct PVRSRV_KERNEL_MEM_INFO), psMemInfo,
++ NULL);
++ }
++
++ if (psMapData) {
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof(struct RESMAN_MAP_DEVICE_MEM_DATA), psMapData,
++ NULL);
++ }
++
++ return eError;
++}
++
++enum PVRSRV_ERROR PVRSRVUnmapDeviceClassMemoryKM(
++ struct PVRSRV_KERNEL_MEM_INFO *psMemInfo)
++{
++ if (!psMemInfo)
++ return PVRSRV_ERROR_INVALID_PARAMS;
++
++ ResManFreeResByPtr(psMemInfo->sMemBlk.hResItem);
++
++ return PVRSRV_OK;
++}
++
++static enum PVRSRV_ERROR UnmapDeviceClassMemoryCallBack(void *pvParam,
++ u32 ui32Param)
++{
++ struct PVRSRV_KERNEL_MEM_INFO *psMemInfo = pvParam;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++ return FreeDeviceMem(psMemInfo);
++}
++
++enum PVRSRV_ERROR PVRSRVMapDeviceClassMemoryKM(
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc,
++ void *hDevMemContext, void *hDeviceClassBuffer,
++ struct PVRSRV_KERNEL_MEM_INFO **ppsMemInfo,
++ void **phOSMapInfo)
++{
++ enum PVRSRV_ERROR eError;
++ struct PVRSRV_KERNEL_MEM_INFO *psMemInfo;
++ struct PVRSRV_DEVICECLASS_BUFFER *psDeviceClassBuffer;
++ struct IMG_SYS_PHYADDR *psSysPAddr;
++ void *pvCPUVAddr, *pvPageAlignedCPUVAddr;
++ IMG_BOOL bPhysContig;
++ struct BM_CONTEXT *psBMContext;
++ struct DEVICE_MEMORY_INFO *psDevMemoryInfo;
++ struct DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++ void *hDevMemHeap = NULL;
++ u32 ui32ByteSize;
++ u32 ui32Offset;
++ u32 ui32PageSize = HOST_PAGESIZE();
++ void *hBuffer;
++ struct PVRSRV_MEMBLK *psMemBlock;
++ IMG_BOOL bBMError;
++ u32 i;
++
++ if (!hDeviceClassBuffer || !ppsMemInfo || !phOSMapInfo ||
++ !hDevMemContext) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVMapDeviceClassMemoryKM: invalid parameters");
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDeviceClassBuffer = (struct PVRSRV_DEVICECLASS_BUFFER *)
++ hDeviceClassBuffer;
++
++ eError =
++ psDeviceClassBuffer->pfnGetBufferAddr(psDeviceClassBuffer->
++ hExtDevice,
++ psDeviceClassBuffer->
++ hExtBuffer, &psSysPAddr,
++ &ui32ByteSize,
++ (void __iomem **)&pvCPUVAddr,
++ phOSMapInfo, &bPhysContig);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVMapDeviceClassMemoryKM: "
++ "unable to get buffer address");
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ psBMContext = (struct BM_CONTEXT *)psDeviceClassBuffer->hDevMemContext;
++ psDevMemoryInfo = &psBMContext->psDeviceNode->sDevMemoryInfo;
++ psDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap;
++ for (i = 0; i < PVRSRV_MAX_CLIENT_HEAPS; i++) {
++ if (HEAP_IDX(psDeviceMemoryHeap[i].ui32HeapID) ==
++ psDevMemoryInfo->ui32MappingHeapID) {
++ if (psDeviceMemoryHeap[i].DevMemHeapType ==
++ DEVICE_MEMORY_HEAP_PERCONTEXT)
++ hDevMemHeap =
++ BM_CreateHeap(hDevMemContext,
++ &psDeviceMemoryHeap[i]);
++ else
++ hDevMemHeap =
++ psDevMemoryInfo->psDeviceMemoryHeap[i].
++ hDevMemHeap;
++ break;
++ }
++ }
++
++ if (hDevMemHeap == NULL) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVMapDeviceClassMemoryKM: "
++ "unable to find mapping heap");
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ ui32Offset = ((u32)pvCPUVAddr) & (ui32PageSize - 1);
++ pvPageAlignedCPUVAddr = (void *)((u8 *)pvCPUVAddr - ui32Offset);
++
++ if (OSAllocMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof(struct PVRSRV_KERNEL_MEM_INFO),
++ (void **)&psMemInfo, NULL) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVMapDeviceClassMemoryKM: "
++ "Failed to alloc memory for block");
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ OSMemSet(psMemInfo, 0, sizeof(*psMemInfo));
++
++ psMemBlock = &(psMemInfo->sMemBlk);
++
++ bBMError = BM_Wrap(hDevMemHeap, ui32ByteSize, ui32Offset, bPhysContig,
++ psSysPAddr, pvPageAlignedCPUVAddr,
++ &psMemInfo->ui32Flags, &hBuffer);
++
++ if (!bBMError) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVMapDeviceClassMemoryKM: BM_Wrap Failed");
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof(struct PVRSRV_KERNEL_MEM_INFO), psMemInfo,
++ NULL);
++ return PVRSRV_ERROR_BAD_MAPPING;
++ }
++
++ psMemBlock->sDevVirtAddr = BM_HandleToDevVaddr(hBuffer);
++ psMemBlock->hOSMemHandle = BM_HandleToOSMemHandle(hBuffer);
++
++ psMemBlock->hBuffer = (void *) hBuffer;
++
++ psMemInfo->pvLinAddrKM = BM_HandleToCpuVaddr(hBuffer);
++
++ psMemInfo->sDevVAddr = psMemBlock->sDevVirtAddr;
++ psMemInfo->ui32AllocSize = ui32ByteSize;
++ psMemInfo->psKernelSyncInfo = psDeviceClassBuffer->psKernelSyncInfo;
++
++ psMemInfo->pvSysBackupBuffer = NULL;
++
++ psMemInfo->sMemBlk.hResItem =
++ ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_DEVICECLASSMEM_MAPPING, psMemInfo, 0,
++ UnmapDeviceClassMemoryCallBack);
++
++ *ppsMemInfo = psMemInfo;
++
++ return PVRSRV_OK;
++}
+diff --git a/drivers/gpu/pvr/env_data.h b/drivers/gpu/pvr/env_data.h
+new file mode 100644
+index 0000000..7317a51
+--- /dev/null
++++ b/drivers/gpu/pvr/env_data.h
+@@ -0,0 +1,57 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _ENV_DATA_
++#define _ENV_DATA_
++
++#include <linux/interrupt.h>
++#include <linux/pci.h>
++#include <linux/workqueue.h>
++
++#define PVRSRV_MAX_BRIDGE_IN_SIZE 0x1000
++#define PVRSRV_MAX_BRIDGE_OUT_SIZE 0x1000
++
++struct PVR_PCI_DEV {
++ struct pci_dev *psPCIDev;
++ enum HOST_PCI_INIT_FLAGS ePCIFlags;
++ IMG_BOOL abPCIResourceInUse[DEVICE_COUNT_RESOURCE];
++};
++
++struct ENV_DATA {
++ void *pvBridgeData;
++ struct pm_dev *psPowerDevice;
++ IMG_BOOL bLISRInstalled;
++ IMG_BOOL bMISRInstalled;
++ u32 ui32IRQ;
++ void *pvISRCookie;
++ struct workqueue_struct *psMISRWorkqueue;
++ struct work_struct sMISRWork;
++ void *pvSysData;
++ struct workqueue_struct *psPerfWorkqueue;
++ struct delayed_work sPerfWork;
++};
++
++#endif
+diff --git a/drivers/gpu/pvr/env_perproc.h b/drivers/gpu/pvr/env_perproc.h
+new file mode 100644
+index 0000000..7f092f6
+--- /dev/null
++++ b/drivers/gpu/pvr/env_perproc.h
+@@ -0,0 +1,51 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __ENV_PERPROC_H__
++#define __ENV_PERPROC_H__
++
++#include "linux/list.h"
++#include "linux/proc_fs.h"
++
++#include "img_types.h"
++
++struct PVRSRV_ENV_PER_PROCESS_DATA {
++ void *hBlockAlloc;
++ struct proc_dir_entry *psProcDir;
++};
++
++void RemovePerProcessProcDir(struct PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc);
++
++enum PVRSRV_ERROR LinuxMMapPerProcessConnect(
++ struct PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc);
++
++void LinuxMMapPerProcessDisconnect(
++ struct PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc);
++
++enum PVRSRV_ERROR LinuxMMapPerProcessHandleOptions(
++ struct PVRSRV_HANDLE_BASE *psHandleBase);
++
++#endif
+diff --git a/drivers/gpu/pvr/event.c b/drivers/gpu/pvr/event.c
+new file mode 100644
+index 0000000..e538100
+--- /dev/null
++++ b/drivers/gpu/pvr/event.c
+@@ -0,0 +1,270 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <linux/version.h>
++#include <linux/io.h>
++#include <asm/page.h>
++#include <asm/system.h>
++#include <linux/mm.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/delay.h>
++#include <linux/pci.h>
++
++#include <linux/string.h>
++#include <linux/sched.h>
++#include <linux/interrupt.h>
++#include <linux/hardirq.h>
++#include <linux/timer.h>
++#include <linux/capability.h>
++#include <linux/sched.h>
++#include <linux/uaccess.h>
++
++#include "img_types.h"
++#include "services_headers.h"
++#include "mm.h"
++#include "pvrmmap.h"
++#include "mmap.h"
++#include "env_data.h"
++#include "proc.h"
++#include "mutex.h"
++#include "lock.h"
++#include "event.h"
++
++struct PVRSRV_LINUX_EVENT_OBJECT_LIST {
++ rwlock_t sLock;
++ struct list_head sList;
++
++};
++
++struct PVRSRV_LINUX_EVENT_OBJECT {
++ atomic_t sTimeStamp;
++ u32 ui32TimeStampPrevious;
++#if defined(DEBUG)
++ unsigned ui32Stats;
++#endif
++ wait_queue_head_t sWait;
++ struct list_head sList;
++ void *hResItem;
++ struct PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList;
++};
++
++enum PVRSRV_ERROR LinuxEventObjectListCreate(void **phEventObjectList)
++{
++ struct PVRSRV_LINUX_EVENT_OBJECT_LIST *psEvenObjectList;
++
++ if (OSAllocMem
++ (PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(struct PVRSRV_LINUX_EVENT_OBJECT_LIST),
++ (void **) &psEvenObjectList, NULL) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "LinuxEventObjectCreate: "
++ "failed to allocate memory for event list");
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ INIT_LIST_HEAD(&psEvenObjectList->sList);
++
++ rwlock_init(&psEvenObjectList->sLock);
++
++ *phEventObjectList = (void **) psEvenObjectList;
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR LinuxEventObjectListDestroy(void *hEventObjectList)
++{
++ struct PVRSRV_LINUX_EVENT_OBJECT_LIST *psEvenObjectList =
++ (struct PVRSRV_LINUX_EVENT_OBJECT_LIST *)hEventObjectList;
++
++ if (psEvenObjectList) {
++ if (!list_empty(&psEvenObjectList->sList)) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "LinuxEventObjectListDestroy: Event List is not empty");
++ return PVRSRV_ERROR_GENERIC;
++ }
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(struct PVRSRV_LINUX_EVENT_OBJECT_LIST),
++ psEvenObjectList, NULL);
++ }
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR LinuxEventObjectDelete(void *hOSEventObjectList,
++ void *hOSEventObject)
++{
++ if (hOSEventObjectList)
++ if (hOSEventObject) {
++ struct PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject =
++ (struct PVRSRV_LINUX_EVENT_OBJECT *)hOSEventObject;
++#if defined(DEBUG)
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "LinuxEventObjectListDelete: Event object waits: %lu",
++ psLinuxEventObject->ui32Stats);
++#endif
++ ResManFreeResByPtr(psLinuxEventObject->hResItem);
++
++ return PVRSRV_OK;
++ }
++ return PVRSRV_ERROR_GENERIC;
++
++}
++
++static enum PVRSRV_ERROR LinuxEventObjectDeleteCallback(void *pvParam,
++ u32 ui32Param)
++{
++ struct PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = pvParam;
++ struct PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList =
++ psLinuxEventObject->psLinuxEventObjectList;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++ write_lock_bh(&psLinuxEventObjectList->sLock);
++ list_del(&psLinuxEventObject->sList);
++ write_unlock_bh(&psLinuxEventObjectList->sLock);
++
++#if defined(DEBUG)
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "LinuxEventObjectDeleteCallback: Event object waits: %lu",
++ psLinuxEventObject->ui32Stats);
++#endif
++
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(struct PVRSRV_LINUX_EVENT_OBJECT), psLinuxEventObject,
++ NULL);
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR LinuxEventObjectAdd(void *hOSEventObjectList,
++ void **phOSEventObject)
++{
++ struct PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject;
++ struct PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList =
++ (struct PVRSRV_LINUX_EVENT_OBJECT_LIST *)hOSEventObjectList;
++ u32 ui32PID = OSGetCurrentProcessIDKM();
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc;
++
++ psPerProc = PVRSRVPerProcessData(ui32PID);
++ if (psPerProc == NULL) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "LinuxEventObjectAdd: Couldn't find per-process data");
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ if (OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(struct PVRSRV_LINUX_EVENT_OBJECT),
++ (void **) &psLinuxEventObject, NULL) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "LinuxEventObjectAdd: failed to allocate memory ");
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ INIT_LIST_HEAD(&psLinuxEventObject->sList);
++
++ atomic_set(&psLinuxEventObject->sTimeStamp, 0);
++ psLinuxEventObject->ui32TimeStampPrevious = 0;
++
++#if defined(DEBUG)
++ psLinuxEventObject->ui32Stats = 0;
++#endif
++ init_waitqueue_head(&psLinuxEventObject->sWait);
++
++ psLinuxEventObject->psLinuxEventObjectList = psLinuxEventObjectList;
++
++ psLinuxEventObject->hResItem =
++ ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_EVENT_OBJECT, psLinuxEventObject, 0,
++ &LinuxEventObjectDeleteCallback);
++
++ write_lock_bh(&psLinuxEventObjectList->sLock);
++ list_add(&psLinuxEventObject->sList, &psLinuxEventObjectList->sList);
++ write_unlock_bh(&psLinuxEventObjectList->sLock);
++
++ *phOSEventObject = psLinuxEventObject;
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR LinuxEventObjectSignal(void *hOSEventObjectList)
++{
++ struct PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject;
++ struct PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList =
++ (struct PVRSRV_LINUX_EVENT_OBJECT_LIST *)hOSEventObjectList;
++ struct list_head *psListEntry, *psListEntryTemp, *psList;
++ psList = &psLinuxEventObjectList->sList;
++
++ list_for_each_safe(psListEntry, psListEntryTemp, psList) {
++
++ psLinuxEventObject =
++ (struct PVRSRV_LINUX_EVENT_OBJECT *)list_entry(psListEntry,
++ struct PVRSRV_LINUX_EVENT_OBJECT,
++ sList);
++
++ atomic_inc(&psLinuxEventObject->sTimeStamp);
++ wake_up_interruptible(&psLinuxEventObject->sWait);
++ }
++
++ return PVRSRV_OK;
++
++}
++
++enum PVRSRV_ERROR LinuxEventObjectWait(void *hOSEventObject, u32 ui32MSTimeout)
++{
++ u32 ui32TimeStamp;
++ DEFINE_WAIT(sWait);
++
++ struct PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject =
++ (struct PVRSRV_LINUX_EVENT_OBJECT *)hOSEventObject;
++
++ u32 ui32TimeOutJiffies = msecs_to_jiffies(ui32MSTimeout);
++
++ do {
++ prepare_to_wait(&psLinuxEventObject->sWait, &sWait,
++ TASK_INTERRUPTIBLE);
++ ui32TimeStamp = atomic_read(&psLinuxEventObject->sTimeStamp);
++
++ if (psLinuxEventObject->ui32TimeStampPrevious != ui32TimeStamp)
++ break;
++
++ mutex_unlock(&gPVRSRVLock);
++
++ ui32TimeOutJiffies =
++ (u32) schedule_timeout((s32) ui32TimeOutJiffies);
++
++ mutex_lock(&gPVRSRVLock);
++#if defined(DEBUG)
++ psLinuxEventObject->ui32Stats++;
++#endif
++
++ } while (ui32TimeOutJiffies);
++
++ finish_wait(&psLinuxEventObject->sWait, &sWait);
++
++ psLinuxEventObject->ui32TimeStampPrevious = ui32TimeStamp;
++
++ return ui32TimeOutJiffies ? PVRSRV_OK : PVRSRV_ERROR_TIMEOUT;
++
++}
+diff --git a/drivers/gpu/pvr/event.h b/drivers/gpu/pvr/event.h
+new file mode 100644
+index 0000000..54de808
+--- /dev/null
++++ b/drivers/gpu/pvr/event.h
+@@ -0,0 +1,35 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++enum PVRSRV_ERROR LinuxEventObjectListCreate(void **phEventObjectList);
++enum PVRSRV_ERROR LinuxEventObjectListDestroy(void *hEventObjectList);
++enum PVRSRV_ERROR LinuxEventObjectAdd(void *hOSEventObjectList,
++ void **phOSEventObject);
++enum PVRSRV_ERROR LinuxEventObjectDelete(void *hOSEventObjectList,
++ void *hOSEventObject);
++enum PVRSRV_ERROR LinuxEventObjectSignal(void *hOSEventObjectList);
++enum PVRSRV_ERROR LinuxEventObjectWait(void *hOSEventObject, u32 ui32MSTimeout);
++
+diff --git a/drivers/gpu/pvr/handle.c b/drivers/gpu/pvr/handle.c
+new file mode 100644
+index 0000000..77e28fc
+--- /dev/null
++++ b/drivers/gpu/pvr/handle.c
+@@ -0,0 +1,1443 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <stddef.h>
++
++#include "services_headers.h"
++#include "handle.h"
++
++#ifdef DEBUG
++#define HANDLE_BLOCK_SIZE 1
++#else
++#define HANDLE_BLOCK_SIZE 256
++#endif
++
++#define HANDLE_HASH_TAB_INIT_SIZE 32
++
++#define DEFAULT_MAX_INDEX_PLUS_ONE 0xfffffffful
++#define DEFAULT_MAX_HANDLE DEFAULT_MAX_INDEX_PLUS_ONE
++
++#define INDEX_IS_VALID(psBase, i) ((i) < (psBase)->ui32TotalHandCount)
++
++#define INDEX_TO_HANDLE(psBase, idx) ((void *)((idx) + 1))
++#define HANDLE_TO_INDEX(psBase, hand) ((u32)(hand) - 1)
++
++#define INDEX_TO_HANDLE_PTR(psBase, i) (((psBase)->psHandleArray) + (i))
++
++#define HANDLE_TO_HANDLE_PTR(psBase, h) \
++ (INDEX_TO_HANDLE_PTR(psBase, HANDLE_TO_INDEX(psBase, h)))
++
++#define HANDLE_PTR_TO_INDEX(psBase, psHandle) \
++ ((psHandle) - ((psBase)->psHandleArray))
++
++#define HANDLE_PTR_TO_HANDLE(psBase, psHandle) \
++ INDEX_TO_HANDLE(psBase, HANDLE_PTR_TO_INDEX(psBase, psHandle))
++
++#define ROUND_UP_TO_MULTIPLE(a, b) ((((a) + (b) - 1) / (b)) * (b))
++
++#define HANDLES_BATCHED(psBase) ((psBase)->ui32HandBatchSize != 0)
++
++#define SET_FLAG(v, f) ((void)((v) |= (f)))
++#define CLEAR_FLAG(v, f) ((void)((v) &= ~(f)))
++#define TEST_FLAG(v, f) ((IMG_BOOL)(((v) & (f)) != 0))
++
++#define TEST_ALLOC_FLAG(psHandle, f) \
++ TEST_FLAG((psHandle)->eFlag, f)
++
++#define SET_INTERNAL_FLAG(psHandle, f) \
++ SET_FLAG((psHandle)->eInternalFlag, f)
++#define CLEAR_INTERNAL_FLAG(psHandle, f) \
++ CLEAR_FLAG((psHandle)->eInternalFlag, f)
++#define TEST_INTERNAL_FLAG(psHandle, f) \
++ TEST_FLAG((psHandle)->eInternalFlag, f)
++
++#define BATCHED_HANDLE(psHandle) \
++ TEST_INTERNAL_FLAG(psHandle, INTERNAL_HANDLE_FLAG_BATCHED)
++
++#define SET_BATCHED_HANDLE(psHandle) \
++ SET_INTERNAL_FLAG(psHandle, INTERNAL_HANDLE_FLAG_BATCHED)
++
++#define SET_UNBATCHED_HANDLE(psHandle) \
++ CLEAR_INTERNAL_FLAG(psHandle, INTERNAL_HANDLE_FLAG_BATCHED)
++
++#define BATCHED_HANDLE_PARTIALLY_FREE(psHandle) \
++ TEST_INTERNAL_FLAG(psHandle, \
++ INTERNAL_HANDLE_FLAG_BATCHED_PARTIALLY_FREE)
++
++#define SET_BATCHED_HANDLE_PARTIALLY_FREE(psHandle) \
++ SET_INTERNAL_FLAG(psHandle, \
++ INTERNAL_HANDLE_FLAG_BATCHED_PARTIALLY_FREE)
++
++#define HANDLE_STRUCT_IS_FREE(psHandle) \
++ ((psHandle)->eType == PVRSRV_HANDLE_TYPE_NONE && \
++ (psHandle)->eInternalFlag == INTERNAL_HANDLE_FLAG_NONE)
++
++#ifdef MIN
++#undef MIN
++#endif
++
++#define MIN(x, y) (((x) < (y)) ? (x) : (y))
++
++struct sHandleList {
++ u32 ui32Prev;
++ u32 ui32Next;
++ void *hParent;
++};
++
++enum ePVRSRVInternalHandleFlag {
++ INTERNAL_HANDLE_FLAG_NONE = 0x00,
++ INTERNAL_HANDLE_FLAG_BATCHED = 0x01,
++ INTERNAL_HANDLE_FLAG_BATCHED_PARTIALLY_FREE = 0x02,
++};
++
++struct sHandle {
++ enum PVRSRV_HANDLE_TYPE eType;
++ void *pvData;
++ u32 ui32NextIndexPlusOne;
++ enum ePVRSRVInternalHandleFlag eInternalFlag;
++ enum PVRSRV_HANDLE_ALLOC_FLAG eFlag;
++
++ u32 ui32Index;
++ struct sHandleList sChildren;
++ struct sHandleList sSiblings;
++};
++
++struct PVRSRV_HANDLE_BASE {
++ void *hBaseBlockAlloc;
++
++ void *hHandBlockAlloc;
++
++ struct sHandle *psHandleArray;
++ struct HASH_TABLE *psHashTab;
++ u32 ui32FreeHandCount;
++ u32 ui32FirstFreeIndex;
++
++ u32 ui32MaxIndexPlusOne;
++
++ u32 ui32TotalHandCount;
++ u32 ui32LastFreeIndexPlusOne;
++ u32 ui32HandBatchSize;
++ u32 ui32TotalHandCountPreBatch;
++ u32 ui32FirstBatchIndexPlusOne;
++ u32 ui32BatchHandAllocFailures;
++
++ IMG_BOOL bPurgingEnabled;
++};
++
++enum eHandKey {
++ HAND_KEY_DATA = 0,
++ HAND_KEY_TYPE,
++ HAND_KEY_PARENT,
++ HAND_KEY_LEN
++};
++
++struct PVRSRV_HANDLE_BASE *gpsKernelHandleBase;
++
++static inline void HandleListInit(u32 ui32Index, struct sHandleList *psList,
++ void *hParent)
++{
++ psList->ui32Next = ui32Index;
++ psList->ui32Prev = ui32Index;
++ psList->hParent = hParent;
++}
++
++static inline void InitParentList(struct PVRSRV_HANDLE_BASE *psBase,
++ struct sHandle *psHandle)
++{
++ u32 ui32Parent = HANDLE_PTR_TO_INDEX(psBase, psHandle);
++
++ HandleListInit(ui32Parent, &psHandle->sChildren,
++ INDEX_TO_HANDLE(psBase, ui32Parent));
++}
++
++static inline void InitChildEntry(struct PVRSRV_HANDLE_BASE *psBase,
++ struct sHandle *psHandle)
++{
++ HandleListInit(HANDLE_PTR_TO_INDEX(psBase, psHandle),
++ &psHandle->sSiblings, NULL);
++}
++
++static inline IMG_BOOL HandleListIsEmpty(u32 ui32Index,
++ struct sHandleList *psList)
++{
++ IMG_BOOL bIsEmpty;
++
++ bIsEmpty = (IMG_BOOL) (psList->ui32Next == ui32Index);
++
++#ifdef DEBUG
++ {
++ IMG_BOOL bIsEmpty2;
++
++ bIsEmpty2 = (IMG_BOOL) (psList->ui32Prev == ui32Index);
++ PVR_ASSERT(bIsEmpty == bIsEmpty2);
++ }
++#endif
++
++ return bIsEmpty;
++}
++
++#ifdef DEBUG
++static inline IMG_BOOL NoChildren(struct PVRSRV_HANDLE_BASE *psBase,
++ struct sHandle *psHandle)
++{
++ PVR_ASSERT(psHandle->sChildren.hParent ==
++ HANDLE_PTR_TO_HANDLE(psBase, psHandle));
++
++ return HandleListIsEmpty(HANDLE_PTR_TO_INDEX(psBase, psHandle),
++ &psHandle->sChildren);
++}
++
++static inline IMG_BOOL NoParent(struct PVRSRV_HANDLE_BASE *psBase,
++ struct sHandle *psHandle)
++{
++ if (HandleListIsEmpty
++ (HANDLE_PTR_TO_INDEX(psBase, psHandle), &psHandle->sSiblings)) {
++ PVR_ASSERT(psHandle->sSiblings.hParent == NULL);
++
++ return IMG_TRUE;
++ } else {
++ PVR_ASSERT(psHandle->sSiblings.hParent != NULL);
++ }
++ return IMG_FALSE;
++}
++#endif
++static inline void *ParentHandle(struct sHandle *psHandle)
++{
++ return psHandle->sSiblings.hParent;
++}
++
++#define LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, i, p, po, eo) \
++ ((struct sHandleList *) \
++ ((char *)(INDEX_TO_HANDLE_PTR(psBase, i)) + \
++ (((i) == (p)) ? (po) : (eo))))
++
++static inline void HandleListInsertBefore(struct PVRSRV_HANDLE_BASE *psBase,
++ u32 ui32InsIndex, struct sHandleList *psIns,
++ size_t uiParentOffset, u32 ui32EntryIndex,
++ struct sHandleList *psEntry,
++ size_t uiEntryOffset, u32 ui32ParentIndex)
++{
++ struct sHandleList *psPrevIns =
++ LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, psIns->ui32Prev,
++ ui32ParentIndex, uiParentOffset,
++ uiEntryOffset);
++
++ PVR_ASSERT(psEntry->hParent == NULL);
++ PVR_ASSERT(ui32InsIndex == psPrevIns->ui32Next);
++ PVR_ASSERT(LIST_PTR_FROM_INDEX_AND_OFFSET
++ (psBase, ui32ParentIndex, ui32ParentIndex, uiParentOffset,
++ uiParentOffset)->hParent == INDEX_TO_HANDLE(psBase,
++ ui32ParentIndex));
++
++ psEntry->ui32Prev = psIns->ui32Prev;
++ psIns->ui32Prev = ui32EntryIndex;
++ psEntry->ui32Next = ui32InsIndex;
++ psPrevIns->ui32Next = ui32EntryIndex;
++
++ psEntry->hParent = INDEX_TO_HANDLE(psBase, ui32ParentIndex);
++}
++
++static inline void AdoptChild(struct PVRSRV_HANDLE_BASE *psBase,
++ struct sHandle *psParent, struct sHandle *psChild)
++{
++ u32 ui32Parent =
++ HANDLE_TO_INDEX(psBase, psParent->sChildren.hParent);
++
++ PVR_ASSERT(ui32Parent ==
++ (u32) HANDLE_PTR_TO_INDEX(psBase, psParent));
++
++ HandleListInsertBefore(psBase, ui32Parent, &psParent->sChildren,
++ offsetof(struct sHandle, sChildren),
++ HANDLE_PTR_TO_INDEX(psBase, psChild),
++ &psChild->sSiblings, offsetof(struct sHandle,
++ sSiblings),
++ ui32Parent);
++
++}
++
++static inline void HandleListRemove(struct PVRSRV_HANDLE_BASE *psBase,
++ u32 ui32EntryIndex, struct sHandleList *psEntry,
++ size_t uiEntryOffset, size_t uiParentOffset)
++{
++ if (!HandleListIsEmpty(ui32EntryIndex, psEntry)) {
++ struct sHandleList *psPrev =
++ LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, psEntry->ui32Prev,
++ HANDLE_TO_INDEX(psBase,
++ psEntry->
++ hParent),
++ uiParentOffset,
++ uiEntryOffset);
++ struct sHandleList *psNext =
++ LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, psEntry->ui32Next,
++ HANDLE_TO_INDEX(psBase,
++ psEntry->
++ hParent),
++ uiParentOffset,
++ uiEntryOffset);
++
++ PVR_ASSERT(psEntry->hParent != NULL);
++
++ psPrev->ui32Next = psEntry->ui32Next;
++ psNext->ui32Prev = psEntry->ui32Prev;
++
++ HandleListInit(ui32EntryIndex, psEntry, NULL);
++ }
++}
++
++static inline void UnlinkFromParent(struct PVRSRV_HANDLE_BASE *psBase,
++ struct sHandle *psHandle)
++{
++ HandleListRemove(psBase, HANDLE_PTR_TO_INDEX(psBase, psHandle),
++ &psHandle->sSiblings, offsetof(struct sHandle,
++ sSiblings),
++ offsetof(struct sHandle, sChildren));
++}
++
++static inline enum PVRSRV_ERROR HandleListIterate(
++ struct PVRSRV_HANDLE_BASE *psBase, struct sHandleList *psHead,
++ size_t uiParentOffset, size_t uiEntryOffset,
++ enum PVRSRV_ERROR(*pfnIterFunc)(struct PVRSRV_HANDLE_BASE *,
++ struct sHandle *))
++{
++ u32 ui32Index;
++ u32 ui32Parent = HANDLE_TO_INDEX(psBase, psHead->hParent);
++
++ PVR_ASSERT(psHead->hParent != NULL);
++
++ for (ui32Index = psHead->ui32Next; ui32Index != ui32Parent;) {
++ struct sHandle *psHandle =
++ INDEX_TO_HANDLE_PTR(psBase, ui32Index);
++ struct sHandleList *psEntry =
++ LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, ui32Index,
++ ui32Parent, uiParentOffset,
++ uiEntryOffset);
++ enum PVRSRV_ERROR eError;
++
++ PVR_ASSERT(psEntry->hParent == psHead->hParent);
++
++ ui32Index = psEntry->ui32Next;
++
++ eError = (*pfnIterFunc)(psBase, psHandle);
++ if (eError != PVRSRV_OK)
++ return eError;
++ }
++
++ return PVRSRV_OK;
++}
++
++static inline enum PVRSRV_ERROR IterateOverChildren(
++ struct PVRSRV_HANDLE_BASE *psBase,
++ struct sHandle *psParent,
++ enum PVRSRV_ERROR(*pfnIterFunc)
++ (struct PVRSRV_HANDLE_BASE *, struct sHandle *))
++{
++ return HandleListIterate(psBase, &psParent->sChildren,
++ offsetof(struct sHandle, sChildren),
++ offsetof(struct sHandle, sSiblings),
++ pfnIterFunc);
++}
++
++static inline enum PVRSRV_ERROR GetHandleStructure(
++ struct PVRSRV_HANDLE_BASE *psBase,
++ struct sHandle **ppsHandle, void *hHandle,
++ enum PVRSRV_HANDLE_TYPE eType)
++{
++ u32 ui32Index = HANDLE_TO_INDEX(psBase, hHandle);
++ struct sHandle *psHandle;
++
++ if (!INDEX_IS_VALID(psBase, ui32Index)) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "GetHandleStructure: Handle index out of range (%u >= %u)",
++ ui32Index, psBase->ui32TotalHandCount);
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ psHandle = INDEX_TO_HANDLE_PTR(psBase, ui32Index);
++ if (psHandle->eType == PVRSRV_HANDLE_TYPE_NONE) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "GetHandleStructure: Handle not allocated (index: %u)",
++ ui32Index);
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if (eType != PVRSRV_HANDLE_TYPE_NONE && eType != psHandle->eType) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "GetHandleStructure: Handle type mismatch (%d != %d)",
++ eType, psHandle->eType);
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ *ppsHandle = psHandle;
++
++ return PVRSRV_OK;
++}
++
++static inline void *ParentIfPrivate(struct sHandle *psHandle)
++{
++ return TEST_ALLOC_FLAG(psHandle, PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE) ?
++ ParentHandle(psHandle) : NULL;
++}
++
++static inline void InitKey(u32 aKey[HAND_KEY_LEN],
++ struct PVRSRV_HANDLE_BASE *psBase,
++ void *pvData, enum PVRSRV_HANDLE_TYPE eType,
++ void *hParent)
++{
++ PVR_UNREFERENCED_PARAMETER(psBase);
++
++ aKey[HAND_KEY_DATA] = (u32) pvData;
++ aKey[HAND_KEY_TYPE] = (u32) eType;
++ aKey[HAND_KEY_PARENT] = (u32) hParent;
++}
++
++static void FreeHandleArray(struct PVRSRV_HANDLE_BASE *psBase)
++{
++ if (psBase->psHandleArray != NULL) {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ psBase->ui32TotalHandCount *
++ sizeof(struct sHandle),
++ psBase->psHandleArray,
++ psBase->hHandBlockAlloc);
++ psBase->psHandleArray = NULL;
++ }
++}
++
++static enum PVRSRV_ERROR FreeHandle(struct PVRSRV_HANDLE_BASE *psBase,
++ struct sHandle *psHandle)
++{
++ u32 aKey[HAND_KEY_LEN];
++ u32 ui32Index = HANDLE_PTR_TO_INDEX(psBase, psHandle);
++ enum PVRSRV_ERROR eError;
++
++ InitKey(aKey, psBase, psHandle->pvData, psHandle->eType,
++ ParentIfPrivate(psHandle));
++
++ if (!TEST_ALLOC_FLAG(psHandle, PVRSRV_HANDLE_ALLOC_FLAG_MULTI) &&
++ !BATCHED_HANDLE_PARTIALLY_FREE(psHandle)) {
++ void *hHandle;
++ hHandle = (void *)HASH_Remove_Extended(psBase->psHashTab, aKey);
++
++ PVR_ASSERT(hHandle != NULL);
++ PVR_ASSERT(hHandle == INDEX_TO_HANDLE(psBase, ui32Index));
++ PVR_UNREFERENCED_PARAMETER(hHandle);
++ }
++
++ UnlinkFromParent(psBase, psHandle);
++
++ eError = IterateOverChildren(psBase, psHandle, FreeHandle);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "FreeHandle: Error whilst freeing subhandles (%d)",
++ eError);
++ return eError;
++ }
++
++ psHandle->eType = PVRSRV_HANDLE_TYPE_NONE;
++
++ if (BATCHED_HANDLE(psHandle) &&
++ !BATCHED_HANDLE_PARTIALLY_FREE(psHandle)) {
++ SET_BATCHED_HANDLE_PARTIALLY_FREE(psHandle);
++ return PVRSRV_OK;
++ }
++
++ if (!psBase->bPurgingEnabled) {
++ if (psBase->ui32FreeHandCount == 0) {
++ PVR_ASSERT(psBase->ui32FirstFreeIndex == 0);
++ PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne == 0);
++
++ psBase->ui32FirstFreeIndex = ui32Index;
++ } else {
++ PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne != 0);
++ PVR_ASSERT(INDEX_TO_HANDLE_PTR
++ (psBase,
++ psBase->ui32LastFreeIndexPlusOne -
++ 1)->ui32NextIndexPlusOne == 0);
++ INDEX_TO_HANDLE_PTR(psBase,
++ psBase->ui32LastFreeIndexPlusOne -
++ 1)->ui32NextIndexPlusOne =
++ ui32Index + 1;
++ }
++ PVR_ASSERT(psHandle->ui32NextIndexPlusOne == 0);
++ psBase->ui32LastFreeIndexPlusOne = ui32Index + 1;
++ }
++
++ psBase->ui32FreeHandCount++;
++
++ return PVRSRV_OK;
++}
++
++static enum PVRSRV_ERROR FreeAllHandles(struct PVRSRV_HANDLE_BASE *psBase)
++{
++ u32 i;
++ enum PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if (psBase->ui32FreeHandCount == psBase->ui32TotalHandCount)
++ return eError;
++
++ for (i = 0; i < psBase->ui32TotalHandCount; i++) {
++ struct sHandle *psHandle;
++
++ psHandle = INDEX_TO_HANDLE_PTR(psBase, i);
++
++ if (psHandle->eType != PVRSRV_HANDLE_TYPE_NONE) {
++ eError = FreeHandle(psBase, psHandle);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "FreeAllHandles: FreeHandle failed (%d)",
++ eError);
++ break;
++ }
++
++ if (psBase->ui32FreeHandCount ==
++ psBase->ui32TotalHandCount)
++ break;
++ }
++ }
++
++ return eError;
++}
++
++static enum PVRSRV_ERROR FreeHandleBase(struct PVRSRV_HANDLE_BASE *psBase)
++{
++ enum PVRSRV_ERROR eError;
++
++ if (HANDLES_BATCHED(psBase)) {
++ PVR_DPF(PVR_DBG_WARNING,
++ "FreeHandleBase: Uncommitted/Unreleased handle batch");
++ PVRSRVReleaseHandleBatch(psBase);
++ }
++
++ eError = FreeAllHandles(psBase);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "FreeHandleBase: Couldn't free handles (%d)", eError);
++ return eError;
++ }
++
++ FreeHandleArray(psBase);
++
++ if (psBase->psHashTab != NULL)
++ HASH_Delete(psBase->psHashTab);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(*psBase), psBase,
++ psBase->hBaseBlockAlloc);
++
++ return PVRSRV_OK;
++}
++
++static inline void *FindHandle(struct PVRSRV_HANDLE_BASE *psBase, void *pvData,
++ enum PVRSRV_HANDLE_TYPE eType, void *hParent)
++{
++ u32 aKey[HAND_KEY_LEN];
++
++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++ InitKey(aKey, psBase, pvData, eType, hParent);
++
++ return (void *)HASH_Retrieve_Extended(psBase->psHashTab, aKey);
++}
++
++static enum PVRSRV_ERROR ReallocMem(void **ppvMem, void **phBlockAlloc,
++ u32 ui32NewSize, u32 ui32OldSize)
++{
++ void *pvOldMem = *ppvMem;
++ void *hOldBlockAlloc = *phBlockAlloc;
++ u32 ui32CopySize = MIN(ui32NewSize, ui32OldSize);
++ void *pvNewMem = NULL;
++ void *hNewBlockAlloc = NULL;
++ enum PVRSRV_ERROR eError;
++
++ if (ui32NewSize == ui32OldSize)
++ return PVRSRV_OK;
++
++ if (ui32NewSize != 0) {
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32NewSize, &pvNewMem, &hNewBlockAlloc);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "ReallocMem: Couldn't allocate new memory area (%d)",
++ eError);
++ return eError;
++ }
++ }
++
++ if (ui32CopySize != 0)
++ OSMemCopy(pvNewMem, pvOldMem, ui32CopySize);
++
++ if (ui32OldSize != 0)
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32OldSize, pvOldMem,
++ hOldBlockAlloc);
++
++ *ppvMem = pvNewMem;
++ *phBlockAlloc = hNewBlockAlloc;
++
++ return PVRSRV_OK;
++}
++
++static inline enum PVRSRV_ERROR ReallocHandleArray(struct PVRSRV_HANDLE_BASE
++ *psBase, u32 ui32NewCount,
++ u32 ui32OldCount)
++{
++ return ReallocMem((void **)&psBase->psHandleArray,
++ &psBase->hHandBlockAlloc,
++ ui32NewCount * sizeof(struct sHandle),
++ ui32OldCount * sizeof(struct sHandle));
++}
++
++static enum PVRSRV_ERROR IncreaseHandleArraySize(struct PVRSRV_HANDLE_BASE
++ *psBase, u32 ui32Delta)
++{
++ enum PVRSRV_ERROR eError;
++ struct sHandle *psHandle;
++ u32 ui32DeltaAdjusted =
++ ROUND_UP_TO_MULTIPLE(ui32Delta, HANDLE_BLOCK_SIZE);
++ u32 ui32NewTotalHandCount =
++ psBase->ui32TotalHandCount + ui32DeltaAdjusted;
++
++ PVR_ASSERT(ui32Delta != 0);
++
++ if (ui32NewTotalHandCount > psBase->ui32MaxIndexPlusOne ||
++ ui32NewTotalHandCount <= psBase->ui32TotalHandCount) {
++ ui32NewTotalHandCount = psBase->ui32MaxIndexPlusOne;
++
++ ui32DeltaAdjusted =
++ ui32NewTotalHandCount - psBase->ui32TotalHandCount;
++
++ if (ui32DeltaAdjusted < ui32Delta) {
++ PVR_DPF(PVR_DBG_ERROR, "IncreaseHandleArraySize: "
++ "Maximum handle limit reached (%d)",
++ psBase->ui32MaxIndexPlusOne);
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ }
++
++ PVR_ASSERT(ui32DeltaAdjusted >= ui32Delta);
++
++ eError = ReallocHandleArray(psBase, ui32NewTotalHandCount,
++ psBase->ui32TotalHandCount);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "IncreaseHandleArraySize: "
++ "ReallocHandleArray failed (%d)",
++ eError);
++ return eError;
++ }
++
++ for (psHandle = psBase->psHandleArray + psBase->ui32TotalHandCount;
++ psHandle < psBase->psHandleArray + ui32NewTotalHandCount;
++ psHandle++) {
++ psHandle->eType = PVRSRV_HANDLE_TYPE_NONE;
++ psHandle->eInternalFlag = INTERNAL_HANDLE_FLAG_NONE;
++ psHandle->ui32NextIndexPlusOne = 0;
++ }
++
++ psBase->ui32FreeHandCount += ui32DeltaAdjusted;
++
++ if (psBase->ui32FirstFreeIndex == 0) {
++ PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne == 0);
++
++ psBase->ui32FirstFreeIndex = psBase->ui32TotalHandCount;
++ } else {
++ if (!psBase->bPurgingEnabled) {
++ PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne != 0);
++ PVR_ASSERT(INDEX_TO_HANDLE_PTR
++ (psBase,
++ psBase->ui32LastFreeIndexPlusOne -
++ 1)->ui32NextIndexPlusOne == 0);
++
++ INDEX_TO_HANDLE_PTR(psBase,
++ psBase->ui32LastFreeIndexPlusOne -
++ 1)->ui32NextIndexPlusOne =
++ psBase->ui32TotalHandCount + 1;
++ }
++ }
++
++ if (!psBase->bPurgingEnabled)
++ psBase->ui32LastFreeIndexPlusOne = ui32NewTotalHandCount;
++
++ psBase->ui32TotalHandCount = ui32NewTotalHandCount;
++
++ return PVRSRV_OK;
++}
++
++static enum PVRSRV_ERROR EnsureFreeHandles(struct PVRSRV_HANDLE_BASE *psBase,
++ u32 ui32Free)
++{
++ enum PVRSRV_ERROR eError;
++
++ if (ui32Free > psBase->ui32FreeHandCount) {
++ u32 ui32FreeHandDelta =
++ ui32Free - psBase->ui32FreeHandCount;
++ eError = IncreaseHandleArraySize(psBase, ui32FreeHandDelta);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "EnsureFreeHandles: "
++ "Couldn't allocate %u handles to ensure %u "
++ "free handles (IncreaseHandleArraySize "
++ "failed with error %d)",
++ ui32FreeHandDelta, ui32Free, eError);
++
++ return eError;
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
++static enum PVRSRV_ERROR AllocHandle(struct PVRSRV_HANDLE_BASE *psBase,
++ void **phHandle, void *pvData,
++ enum PVRSRV_HANDLE_TYPE eType,
++ enum PVRSRV_HANDLE_ALLOC_FLAG eFlag,
++ void *hParent)
++{
++ u32 ui32NewIndex;
++ struct sHandle *psNewHandle = NULL;
++ void *hHandle;
++ u32 aKey[HAND_KEY_LEN];
++ enum PVRSRV_ERROR eError;
++
++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++ PVR_ASSERT(psBase->psHashTab != NULL);
++
++ if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI))
++ PVR_ASSERT(FindHandle(psBase, pvData, eType, hParent) == NULL);
++
++ if (psBase->ui32FreeHandCount == 0 && HANDLES_BATCHED(psBase))
++ PVR_DPF(PVR_DBG_WARNING, "AllocHandle: "
++ "Handle batch size (%u) was too small, "
++ "allocating additional space",
++ psBase->ui32HandBatchSize);
++
++ eError = EnsureFreeHandles(psBase, 1);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "AllocHandle: EnsureFreeHandles failed (%d)", eError);
++ return eError;
++ }
++ PVR_ASSERT(psBase->ui32FreeHandCount != 0);
++
++ if (!psBase->bPurgingEnabled) {
++ ui32NewIndex = psBase->ui32FirstFreeIndex;
++ psNewHandle = INDEX_TO_HANDLE_PTR(psBase, ui32NewIndex);
++ } else {
++ for (ui32NewIndex = psBase->ui32FirstFreeIndex;
++ ui32NewIndex < psBase->ui32TotalHandCount;
++ ui32NewIndex++) {
++ psNewHandle = INDEX_TO_HANDLE_PTR(psBase, ui32NewIndex);
++ if (HANDLE_STRUCT_IS_FREE(psNewHandle))
++ break;
++
++ }
++ psBase->ui32FirstFreeIndex = 0;
++ PVR_ASSERT(ui32NewIndex < psBase->ui32TotalHandCount);
++ }
++ PVR_ASSERT(psNewHandle != NULL);
++
++ hHandle = INDEX_TO_HANDLE(psBase, ui32NewIndex);
++
++ if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI)) {
++
++ InitKey(aKey, psBase, pvData, eType, hParent);
++
++ if (!HASH_Insert_Extended
++ (psBase->psHashTab, aKey, (u32) hHandle)) {
++ PVR_DPF(PVR_DBG_ERROR, "AllocHandle: "
++ "Couldn't add handle to hash table");
++
++ return PVRSRV_ERROR_GENERIC;
++ }
++ }
++
++ psBase->ui32FreeHandCount--;
++
++ if (!psBase->bPurgingEnabled) {
++ if (psBase->ui32FreeHandCount == 0) {
++ PVR_ASSERT(psBase->ui32FirstFreeIndex == ui32NewIndex);
++ PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne ==
++ (ui32NewIndex + 1));
++
++ psBase->ui32LastFreeIndexPlusOne = 0;
++ psBase->ui32FirstFreeIndex = 0;
++ } else {
++ psBase->ui32FirstFreeIndex =
++ (psNewHandle->ui32NextIndexPlusOne ==
++ 0) ? ui32NewIndex +
++ 1 : psNewHandle->ui32NextIndexPlusOne - 1;
++ }
++ }
++
++ psNewHandle->eType = eType;
++ psNewHandle->pvData = pvData;
++ psNewHandle->eInternalFlag = INTERNAL_HANDLE_FLAG_NONE;
++ psNewHandle->eFlag = eFlag;
++ psNewHandle->ui32Index = ui32NewIndex;
++
++ InitParentList(psBase, psNewHandle);
++ PVR_ASSERT(NoChildren(psBase, psNewHandle));
++
++ InitChildEntry(psBase, psNewHandle);
++ PVR_ASSERT(NoParent(psBase, psNewHandle));
++
++ if (HANDLES_BATCHED(psBase)) {
++
++ psNewHandle->ui32NextIndexPlusOne =
++ psBase->ui32FirstBatchIndexPlusOne;
++
++ psBase->ui32FirstBatchIndexPlusOne = ui32NewIndex + 1;
++
++ SET_BATCHED_HANDLE(psNewHandle);
++ } else {
++ psNewHandle->ui32NextIndexPlusOne = 0;
++ }
++
++ *phHandle = hHandle;
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR PVRSRVAllocHandle(struct PVRSRV_HANDLE_BASE *psBase,
++ void **phHandle, void *pvData,
++ enum PVRSRV_HANDLE_TYPE eType,
++ enum PVRSRV_HANDLE_ALLOC_FLAG eFlag)
++{
++ void *hHandle;
++ enum PVRSRV_ERROR eError;
++
++ *phHandle = NULL;
++
++ if (HANDLES_BATCHED(psBase))
++ psBase->ui32BatchHandAllocFailures++;
++
++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++ if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI)) {
++ hHandle = FindHandle(psBase, pvData, eType, NULL);
++ if (hHandle != NULL) {
++ struct sHandle *psHandle;
++
++ eError =
++ GetHandleStructure(psBase, &psHandle, hHandle,
++ eType);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVAllocHandle: "
++ "Lookup of existing handle failed");
++ return eError;
++ }
++
++ if (TEST_FLAG(psHandle->eFlag & eFlag,
++ PVRSRV_HANDLE_ALLOC_FLAG_SHARED)) {
++ *phHandle = hHandle;
++ eError = PVRSRV_OK;
++ goto exit_ok;
++ }
++ return PVRSRV_ERROR_GENERIC;
++ }
++ }
++
++ eError = AllocHandle(psBase, phHandle, pvData, eType, eFlag, NULL);
++
++exit_ok:
++ if (HANDLES_BATCHED(psBase) && (eError == PVRSRV_OK))
++ psBase->ui32BatchHandAllocFailures--;
++
++ return eError;
++}
++
++enum PVRSRV_ERROR PVRSRVAllocSubHandle(struct PVRSRV_HANDLE_BASE *psBase,
++ void **phHandle, void *pvData,
++ enum PVRSRV_HANDLE_TYPE eType,
++ enum PVRSRV_HANDLE_ALLOC_FLAG eFlag,
++ void *hParent)
++{
++ struct sHandle *psPHand;
++ struct sHandle *psCHand;
++ enum PVRSRV_ERROR eError;
++ void *hParentKey;
++ void *hHandle;
++
++ *phHandle = NULL;
++
++ if (HANDLES_BATCHED(psBase))
++
++ psBase->ui32BatchHandAllocFailures++;
++
++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++ hParentKey = TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE) ?
++ hParent : NULL;
++
++ eError = GetHandleStructure(psBase, &psPHand, hParent,
++ PVRSRV_HANDLE_TYPE_NONE);
++ if (eError != PVRSRV_OK)
++ return PVRSRV_ERROR_GENERIC;
++
++ if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI)) {
++
++ hHandle = FindHandle(psBase, pvData, eType, hParentKey);
++ if (hHandle != NULL) {
++ struct sHandle *psCHandle;
++ enum PVRSRV_ERROR eErr;
++
++ eErr = GetHandleStructure(psBase, &psCHandle, hHandle,
++ eType);
++ if (eErr != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVAllocSubHandle: "
++ "Lookup of existing handle failed");
++ return eErr;
++ }
++
++ PVR_ASSERT(hParentKey != NULL &&
++ ParentHandle(HANDLE_TO_HANDLE_PTR
++ (psBase, hHandle)) == hParent);
++
++ if (TEST_FLAG(psCHandle->eFlag & eFlag,
++ PVRSRV_HANDLE_ALLOC_FLAG_SHARED) &&
++ ParentHandle(HANDLE_TO_HANDLE_PTR(psBase, hHandle))
++ == hParent) {
++ *phHandle = hHandle;
++ goto exit_ok;
++ }
++ return PVRSRV_ERROR_GENERIC;
++ }
++ }
++
++ eError = AllocHandle(psBase, &hHandle, pvData, eType, eFlag,
++ hParentKey);
++ if (eError != PVRSRV_OK)
++ return eError;
++
++ psPHand = HANDLE_TO_HANDLE_PTR(psBase, hParent);
++
++ psCHand = HANDLE_TO_HANDLE_PTR(psBase, hHandle);
++
++ AdoptChild(psBase, psPHand, psCHand);
++
++ *phHandle = hHandle;
++
++exit_ok:
++ if (HANDLES_BATCHED(psBase))
++ psBase->ui32BatchHandAllocFailures--;
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR PVRSRVFindHandle(struct PVRSRV_HANDLE_BASE *psBase,
++ void **phHandle, void *pvData,
++ enum PVRSRV_HANDLE_TYPE eType)
++{
++ void *hHandle;
++
++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++ hHandle = (void *)FindHandle(psBase, pvData, eType, NULL);
++ if (hHandle == NULL)
++ return PVRSRV_ERROR_GENERIC;
++
++ *phHandle = hHandle;
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR PVRSRVLookupHandleAnyType(struct PVRSRV_HANDLE_BASE *psBase,
++ void **ppvData,
++ enum PVRSRV_HANDLE_TYPE *peType,
++ void *hHandle)
++{
++ struct sHandle *psHandle;
++ enum PVRSRV_ERROR eError;
++
++ eError = GetHandleStructure(psBase, &psHandle, hHandle,
++ PVRSRV_HANDLE_TYPE_NONE);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVLookupHandleAnyType: "
++ "Error looking up handle (%d)",
++ eError);
++ return eError;
++ }
++
++ *ppvData = psHandle->pvData;
++ *peType = psHandle->eType;
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR PVRSRVLookupHandle(struct PVRSRV_HANDLE_BASE *psBase,
++ void **ppvData, void *hHandle,
++ enum PVRSRV_HANDLE_TYPE eType)
++{
++ struct sHandle *psHandle;
++ enum PVRSRV_ERROR eError;
++
++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++ eError = GetHandleStructure(psBase, &psHandle, hHandle, eType);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVLookupHandle: Error looking up handle (%d)",
++ eError);
++ return eError;
++ }
++
++ *ppvData = psHandle->pvData;
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR PVRSRVLookupSubHandle(struct PVRSRV_HANDLE_BASE *psBase,
++ void **ppvData, void *hHandle,
++ enum PVRSRV_HANDLE_TYPE eType,
++ void *hAncestor)
++{
++ struct sHandle *psPHand;
++ struct sHandle *psCHand;
++ enum PVRSRV_ERROR eError;
++
++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++ eError = GetHandleStructure(psBase, &psCHand, hHandle, eType);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVLookupSubHandle: "
++ "Error looking up subhandle (%d)",
++ eError);
++ return eError;
++ }
++
++ for (psPHand = psCHand; ParentHandle(psPHand) != hAncestor;) {
++ eError =
++ GetHandleStructure(psBase, &psPHand, ParentHandle(psPHand),
++ PVRSRV_HANDLE_TYPE_NONE);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVLookupSubHandle: "
++ "Subhandle doesn't belong to given ancestor");
++ return PVRSRV_ERROR_GENERIC;
++ }
++ }
++
++ *ppvData = psCHand->pvData;
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR PVRSRVGetParentHandle(struct PVRSRV_HANDLE_BASE *psBase,
++ void **phParent, void *hHandle,
++ enum PVRSRV_HANDLE_TYPE eType)
++{
++ struct sHandle *psHandle;
++ enum PVRSRV_ERROR eError;
++
++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++ eError = GetHandleStructure(psBase, &psHandle, hHandle, eType);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVGetParentHandle: "
++ "Error looking up subhandle (%d)",
++ eError);
++ return eError;
++ }
++
++ *phParent = ParentHandle(psHandle);
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR PVRSRVLookupAndReleaseHandle(
++ struct PVRSRV_HANDLE_BASE *psBase,
++ void **ppvData, void *hHandle,
++ enum PVRSRV_HANDLE_TYPE eType)
++{
++ struct sHandle *psHandle;
++ enum PVRSRV_ERROR eError;
++
++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++ eError = GetHandleStructure(psBase, &psHandle, hHandle, eType);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVLookupAndReleaseHandle: "
++ "Error looking up handle (%d)",
++ eError);
++ return eError;
++ }
++
++ *ppvData = psHandle->pvData;
++
++ eError = FreeHandle(psBase, psHandle);
++
++ return eError;
++}
++
++enum PVRSRV_ERROR PVRSRVReleaseHandle(struct PVRSRV_HANDLE_BASE *psBase,
++ void *hHandle, enum PVRSRV_HANDLE_TYPE eType)
++{
++ struct sHandle *psHandle;
++ enum PVRSRV_ERROR eError;
++
++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++ eError = GetHandleStructure(psBase, &psHandle, hHandle, eType);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVReleaseHandle: Error looking up handle (%d)",
++ eError);
++ return eError;
++ }
++
++ eError = FreeHandle(psBase, psHandle);
++
++ return eError;
++}
++
++enum PVRSRV_ERROR PVRSRVNewHandleBatch(struct PVRSRV_HANDLE_BASE *psBase,
++ u32 ui32BatchSize)
++{
++ enum PVRSRV_ERROR eError;
++
++ if (HANDLES_BATCHED(psBase)) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVNewHandleBatch: "
++ "There is a handle batch already in use (size %u)",
++ psBase->ui32HandBatchSize);
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if (ui32BatchSize == 0) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVNewHandleBatch: Invalid batch size (%u)",
++ ui32BatchSize);
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ eError = EnsureFreeHandles(psBase, ui32BatchSize);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVNewHandleBatch: "
++ "EnsureFreeHandles failed (error %d)",
++ eError);
++ return eError;
++ }
++
++ psBase->ui32HandBatchSize = ui32BatchSize;
++ psBase->ui32TotalHandCountPreBatch = psBase->ui32TotalHandCount;
++
++ PVR_ASSERT(psBase->ui32BatchHandAllocFailures == 0);
++ PVR_ASSERT(psBase->ui32FirstBatchIndexPlusOne == 0);
++ PVR_ASSERT(HANDLES_BATCHED(psBase));
++
++ return PVRSRV_OK;
++}
++
++static enum PVRSRV_ERROR PVRSRVHandleBatchCommitOrRelease(
++ struct PVRSRV_HANDLE_BASE *psBase, IMG_BOOL bCommit)
++{
++ u32 ui32IndexPlusOne;
++ IMG_BOOL bCommitBatch = bCommit;
++
++ if (!HANDLES_BATCHED(psBase)) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVHandleBatchCommitOrRelease: "
++ "There is no handle batch");
++ return PVRSRV_ERROR_INVALID_PARAMS;
++
++ }
++
++ if (psBase->ui32BatchHandAllocFailures != 0) {
++ if (bCommit)
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVHandleBatchCommitOrRelease: "
++ "Attempting to commit batch with handle "
++ "allocation failures.");
++ bCommitBatch = IMG_FALSE;
++ }
++
++ PVR_ASSERT(psBase->ui32BatchHandAllocFailures == 0 || !bCommit);
++
++ ui32IndexPlusOne = psBase->ui32FirstBatchIndexPlusOne;
++ while (ui32IndexPlusOne != 0) {
++ struct sHandle *psHandle =
++ INDEX_TO_HANDLE_PTR(psBase, ui32IndexPlusOne - 1);
++ u32 ui32NextIndexPlusOne =
++ psHandle->ui32NextIndexPlusOne;
++ PVR_ASSERT(BATCHED_HANDLE(psHandle));
++
++ psHandle->ui32NextIndexPlusOne = 0;
++
++ if (!bCommitBatch || BATCHED_HANDLE_PARTIALLY_FREE(psHandle)) {
++ enum PVRSRV_ERROR eError;
++
++ if (!BATCHED_HANDLE_PARTIALLY_FREE(psHandle))
++ SET_UNBATCHED_HANDLE(psHandle);
++
++ eError = FreeHandle(psBase, psHandle);
++ if (eError != PVRSRV_OK)
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVHandleBatchCommitOrRelease: "
++ "Error freeing handle (%d)",
++ eError);
++ PVR_ASSERT(eError == PVRSRV_OK);
++ } else {
++ SET_UNBATCHED_HANDLE(psHandle);
++ }
++
++ ui32IndexPlusOne = ui32NextIndexPlusOne;
++ }
++
++#ifdef DEBUG
++ if (psBase->ui32TotalHandCountPreBatch != psBase->ui32TotalHandCount) {
++ u32 ui32Delta =
++ psBase->ui32TotalHandCount -
++ psBase->ui32TotalHandCountPreBatch;
++
++ PVR_ASSERT(psBase->ui32TotalHandCount >
++ psBase->ui32TotalHandCountPreBatch);
++
++ PVR_DPF(PVR_DBG_WARNING,
++ "PVRSRVHandleBatchCommitOrRelease: "
++ "The batch size was too small. "
++ "Batch size was %u, but needs to be %u",
++ psBase->ui32HandBatchSize,
++ psBase->ui32HandBatchSize + ui32Delta);
++
++ }
++#endif
++
++ psBase->ui32HandBatchSize = 0;
++ psBase->ui32FirstBatchIndexPlusOne = 0;
++ psBase->ui32TotalHandCountPreBatch = 0;
++ psBase->ui32BatchHandAllocFailures = 0;
++
++ if (psBase->ui32BatchHandAllocFailures != 0 && bCommit) {
++ PVR_ASSERT(!bCommitBatch);
++
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR PVRSRVCommitHandleBatch(struct PVRSRV_HANDLE_BASE *psBase)
++{
++ return PVRSRVHandleBatchCommitOrRelease(psBase, IMG_TRUE);
++}
++
++void PVRSRVReleaseHandleBatch(struct PVRSRV_HANDLE_BASE *psBase)
++{
++ (void)PVRSRVHandleBatchCommitOrRelease(psBase, IMG_FALSE);
++}
++
++enum PVRSRV_ERROR PVRSRVSetMaxHandle(struct PVRSRV_HANDLE_BASE *psBase,
++ u32 ui32MaxHandle)
++{
++ if (HANDLES_BATCHED(psBase)) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVSetMaxHandle: "
++ "Limit cannot be set whilst in batch mode");
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ if (ui32MaxHandle == 0 || ui32MaxHandle > DEFAULT_MAX_HANDLE) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVSetMaxHandle: "
++ "Limit must be between %u and %u, inclusive",
++ 0, DEFAULT_MAX_HANDLE);
++
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ if (psBase->ui32TotalHandCount != 0) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVSetMaxHandle: "
++ "Limit cannot be set becuase handles "
++ "have already been allocated");
++
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psBase->ui32MaxIndexPlusOne = ui32MaxHandle;
++
++ return PVRSRV_OK;
++}
++
++u32 PVRSRVGetMaxHandle(struct PVRSRV_HANDLE_BASE *psBase)
++{
++ return psBase->ui32MaxIndexPlusOne;
++}
++
++enum PVRSRV_ERROR PVRSRVEnableHandlePurging(struct PVRSRV_HANDLE_BASE *psBase)
++{
++ if (psBase->bPurgingEnabled) {
++ PVR_DPF(PVR_DBG_WARNING,
++ "PVRSRVEnableHandlePurging: Purging already enabled");
++ return PVRSRV_OK;
++ }
++
++ if (psBase->ui32TotalHandCount != 0) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVEnableHandlePurging: "
++ "Handles have already been allocated");
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psBase->bPurgingEnabled = IMG_TRUE;
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR PVRSRVPurgeHandles(struct PVRSRV_HANDLE_BASE *psBase)
++{
++ u32 ui32Handle;
++ u32 ui32NewHandCount;
++
++ if (!psBase->bPurgingEnabled) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVPurgeHandles: "
++ "Purging not enabled for this handle base");
++ return PVRSRV_ERROR_NOT_SUPPORTED;
++ }
++
++ if (HANDLES_BATCHED(psBase)) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVPurgeHandles: "
++ "Purging not allowed whilst in batch mode");
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ for (ui32Handle = psBase->ui32TotalHandCount; ui32Handle != 0;
++ ui32Handle--) {
++ struct sHandle *psHandle =
++ HANDLE_TO_HANDLE_PTR(psBase, ui32Handle);
++ if (!HANDLE_STRUCT_IS_FREE(psHandle))
++ break;
++ }
++
++ ui32NewHandCount = ROUND_UP_TO_MULTIPLE(ui32Handle, HANDLE_BLOCK_SIZE);
++
++ if (ui32NewHandCount >= ui32Handle
++ && ui32NewHandCount <= (psBase->ui32TotalHandCount / 2)) {
++ u32 ui32Delta = psBase->ui32TotalHandCount - ui32NewHandCount;
++ enum PVRSRV_ERROR eError;
++
++ eError =
++ ReallocHandleArray(psBase, ui32NewHandCount,
++ psBase->ui32TotalHandCount);
++ if (eError != PVRSRV_OK)
++ return eError;
++
++ psBase->ui32TotalHandCount = ui32NewHandCount;
++ psBase->ui32FreeHandCount -= ui32Delta;
++ psBase->ui32FirstFreeIndex = 0;
++ }
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR PVRSRVAllocHandleBase(struct PVRSRV_HANDLE_BASE **ppsBase)
++{
++ struct PVRSRV_HANDLE_BASE *psBase;
++ void *hBlockAlloc;
++ enum PVRSRV_ERROR eError;
++
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(*psBase), (void **)&psBase, &hBlockAlloc);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVAllocHandleBase: "
++ "Couldn't allocate handle base (%d)",
++ eError);
++ return eError;
++ }
++ OSMemSet(psBase, 0, sizeof(*psBase));
++
++ psBase->psHashTab =
++ HASH_Create_Extended(HANDLE_HASH_TAB_INIT_SIZE,
++ HAND_KEY_LEN * sizeof(u32),
++ HASH_Func_Default, HASH_Key_Comp_Default);
++ if (psBase->psHashTab == NULL) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVAllocHandleBase: "
++ "Couldn't create data pointer hash table\n");
++ goto failure;
++ }
++
++ psBase->hBaseBlockAlloc = hBlockAlloc;
++
++ psBase->ui32MaxIndexPlusOne = DEFAULT_MAX_INDEX_PLUS_ONE;
++
++ *ppsBase = psBase;
++
++ return PVRSRV_OK;
++failure:
++ (void)PVRSRVFreeHandleBase(psBase);
++ return PVRSRV_ERROR_GENERIC;
++}
++
++enum PVRSRV_ERROR PVRSRVFreeHandleBase(struct PVRSRV_HANDLE_BASE *psBase)
++{
++ enum PVRSRV_ERROR eError;
++
++ PVR_ASSERT(psBase != gpsKernelHandleBase);
++
++ eError = FreeHandleBase(psBase);
++ if (eError != PVRSRV_OK)
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVFreeHandleBase: FreeHandleBase failed (%d)",
++ eError);
++
++ return eError;
++}
++
++enum PVRSRV_ERROR PVRSRVHandleInit(void)
++{
++ enum PVRSRV_ERROR eError;
++
++ PVR_ASSERT(gpsKernelHandleBase == NULL);
++
++ eError = PVRSRVAllocHandleBase(&gpsKernelHandleBase);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVHandleInit: PVRSRVAllocHandleBase failed (%d)",
++ eError);
++ goto error;
++ }
++
++ eError = PVRSRVEnableHandlePurging(gpsKernelHandleBase);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVHandleInit: "
++ "PVRSRVEnableHandlePurging failed (%d)",
++ eError);
++ goto error;
++ }
++
++ return PVRSRV_OK;
++error:
++ (void)PVRSRVHandleDeInit();
++ return eError;
++}
++
++enum PVRSRV_ERROR PVRSRVHandleDeInit(void)
++{
++ enum PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if (gpsKernelHandleBase != NULL) {
++ eError = FreeHandleBase(gpsKernelHandleBase);
++ if (eError == PVRSRV_OK) {
++ gpsKernelHandleBase = NULL;
++ } else {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVHandleDeInit: "
++ "FreeHandleBase failed (%d)",
++ eError);
++ }
++ }
++
++ return eError;
++}
+diff --git a/drivers/gpu/pvr/handle.h b/drivers/gpu/pvr/handle.h
+new file mode 100644
+index 0000000..668e5d8
+--- /dev/null
++++ b/drivers/gpu/pvr/handle.h
+@@ -0,0 +1,150 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __HANDLE_H__
++#define __HANDLE_H__
++
++#include "img_types.h"
++#include "hash.h"
++#include "resman.h"
++
++enum PVRSRV_HANDLE_TYPE {
++ PVRSRV_HANDLE_TYPE_NONE = 0,
++ PVRSRV_HANDLE_TYPE_PERPROC_DATA,
++ PVRSRV_HANDLE_TYPE_DEV_NODE,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP,
++ PVRSRV_HANDLE_TYPE_MEM_INFO,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO,
++ PVRSRV_HANDLE_TYPE_DISP_INFO,
++ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN,
++ PVRSRV_HANDLE_TYPE_BUF_INFO,
++ PVRSRV_HANDLE_TYPE_DISP_BUFFER,
++ PVRSRV_HANDLE_TYPE_BUF_BUFFER,
++ PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT,
++ PVRSRV_HANDLE_TYPE_SGX_HW_TRANSFER_CONTEXT,
++ PVRSRV_HANDLE_TYPE_SGX_HW_2D_CONTEXT,
++ PVRSRV_HANDLE_TYPE_SHARED_PB_DESC,
++ PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
++ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO,
++ PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT,
++ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT,
++ PVRSRV_HANDLE_TYPE_MMAP_INFO,
++ PVRSRV_HANDLE_TYPE_SOC_TIMER
++};
++
++enum PVRSRV_HANDLE_ALLOC_FLAG {
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE = 0,
++
++ PVRSRV_HANDLE_ALLOC_FLAG_SHARED = 0x01,
++
++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI = 0x02,
++
++ PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE = 0x04
++};
++
++struct PVRSRV_HANDLE_BASE;
++struct PVRSRV_HANDLE_BASE;
++
++extern struct PVRSRV_HANDLE_BASE *gpsKernelHandleBase;
++
++#define KERNEL_HANDLE_BASE (gpsKernelHandleBase)
++
++enum PVRSRV_ERROR PVRSRVAllocHandle(struct PVRSRV_HANDLE_BASE *psBase,
++ void **phHandle, void *pvData,
++ enum PVRSRV_HANDLE_TYPE eType,
++ enum PVRSRV_HANDLE_ALLOC_FLAG eFlag);
++
++enum PVRSRV_ERROR PVRSRVAllocSubHandle(struct PVRSRV_HANDLE_BASE *psBase,
++ void **phHandle,
++ void *pvData,
++ enum PVRSRV_HANDLE_TYPE eType,
++ enum PVRSRV_HANDLE_ALLOC_FLAG eFlag,
++ void *hParent);
++
++enum PVRSRV_ERROR PVRSRVFindHandle(struct PVRSRV_HANDLE_BASE *psBase,
++ void **phHandle, void *pvData,
++ enum PVRSRV_HANDLE_TYPE eType);
++
++enum PVRSRV_ERROR PVRSRVLookupHandleAnyType(struct PVRSRV_HANDLE_BASE *psBase,
++ void **ppvData,
++ enum PVRSRV_HANDLE_TYPE *peType,
++ void *hHandle);
++
++enum PVRSRV_ERROR PVRSRVLookupHandle(struct PVRSRV_HANDLE_BASE *psBase,
++ void **ppvData, void *hHandle,
++ enum PVRSRV_HANDLE_TYPE eType);
++
++enum PVRSRV_ERROR PVRSRVLookupSubHandle(struct PVRSRV_HANDLE_BASE *psBase,
++ void **ppvData, void *hHandle,
++ enum PVRSRV_HANDLE_TYPE eType, void *hAncestor);
++
++enum PVRSRV_ERROR PVRSRVGetParentHandle(struct PVRSRV_HANDLE_BASE *psBase,
++ void **phParent, void *hHandle,
++ enum PVRSRV_HANDLE_TYPE eType);
++
++enum PVRSRV_ERROR PVRSRVLookupAndReleaseHandle(
++ struct PVRSRV_HANDLE_BASE *psBase,
++ void **ppvData, void *hHandle,
++ enum PVRSRV_HANDLE_TYPE eType);
++
++enum PVRSRV_ERROR PVRSRVReleaseHandle(struct PVRSRV_HANDLE_BASE *psBase,
++ void *hHandle,
++ enum PVRSRV_HANDLE_TYPE eType);
++
++enum PVRSRV_ERROR PVRSRVNewHandleBatch(struct PVRSRV_HANDLE_BASE *psBase,
++ u32 ui32BatchSize);
++
++enum PVRSRV_ERROR PVRSRVCommitHandleBatch(struct PVRSRV_HANDLE_BASE *psBase);
++
++void PVRSRVReleaseHandleBatch(struct PVRSRV_HANDLE_BASE *psBase);
++
++enum PVRSRV_ERROR PVRSRVSetMaxHandle(struct PVRSRV_HANDLE_BASE *psBase,
++ u32 ui32MaxHandle);
++
++u32 PVRSRVGetMaxHandle(struct PVRSRV_HANDLE_BASE *psBase);
++
++enum PVRSRV_ERROR PVRSRVEnableHandlePurging(struct PVRSRV_HANDLE_BASE *psBase);
++
++enum PVRSRV_ERROR PVRSRVPurgeHandles(struct PVRSRV_HANDLE_BASE *psBase);
++
++enum PVRSRV_ERROR PVRSRVAllocHandleBase(struct PVRSRV_HANDLE_BASE **ppsBase);
++
++enum PVRSRV_ERROR PVRSRVFreeHandleBase(struct PVRSRV_HANDLE_BASE *psBase);
++
++enum PVRSRV_ERROR PVRSRVHandleInit(void);
++
++enum PVRSRV_ERROR PVRSRVHandleDeInit(void);
++
++
++#define PVRSRVAllocHandleNR(psBase, phHandle, pvData, eType, eFlag) \
++ (void)PVRSRVAllocHandle(psBase, phHandle, pvData, eType, eFlag)
++
++#define PVRSRVAllocSubHandleNR(psBase, phHandle, pvData, eType, eFlag, hParent)\
++ (void)PVRSRVAllocSubHandle(psBase, phHandle, pvData, eType, \
++ eFlag, hParent)
++
++#endif
+diff --git a/drivers/gpu/pvr/hash.c b/drivers/gpu/pvr/hash.c
+new file mode 100644
+index 0000000..318d9dd
+--- /dev/null
++++ b/drivers/gpu/pvr/hash.c
+@@ -0,0 +1,382 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "pvr_debug.h"
++#include "img_defs.h"
++#include "services.h"
++#include "servicesint.h"
++#include "hash.h"
++#include "osfunc.h"
++
++#define PRIVATE_MAX(a, b) ((a) > (b) ? (a) : (b))
++
++#define KEY_TO_INDEX(pHash, key, uSize) \
++ ((pHash)->pfnHashFunc((pHash)->uKeySize, key, uSize) % uSize)
++
++#define KEY_COMPARE(pHash, pKey1, pKey2) \
++ ((pHash)->pfnKeyComp((pHash)->uKeySize, pKey1, pKey2))
++
++struct BUCKET {
++ struct BUCKET *pNext;
++ u32 v;
++ u32 k[];
++};
++struct BUCKET;
++
++struct HASH_TABLE {
++ struct BUCKET **ppBucketTable;
++ u32 uSize;
++ u32 uCount;
++ u32 uMinimumSize;
++ u32 uKeySize;
++ u32 (*pfnHashFunc)(size_t uKeySize, void *pkey, u32 uHashTabLen);
++ IMG_BOOL (*pfnKeyComp)(size_t uKeySize, void *pKey1, void *pkey2);
++};
++
++u32 HASH_Func_Default(size_t uKeySize, void *pKey, u32 uHashTabLen)
++{
++ u32 *p = (u32 *) pKey;
++ u32 uKeyLen = uKeySize / sizeof(u32);
++ u32 ui;
++ u32 uHashKey = 0;
++
++ PVR_UNREFERENCED_PARAMETER(uHashTabLen);
++
++ PVR_ASSERT((uKeySize % sizeof(u32)) == 0);
++
++ for (ui = 0; ui < uKeyLen; ui++) {
++ u32 uHashPart = (u32) *p++;
++
++ uHashPart += (uHashPart << 12);
++ uHashPart ^= (uHashPart >> 22);
++ uHashPart += (uHashPart << 4);
++ uHashPart ^= (uHashPart >> 9);
++ uHashPart += (uHashPart << 10);
++ uHashPart ^= (uHashPart >> 2);
++ uHashPart += (uHashPart << 7);
++ uHashPart ^= (uHashPart >> 12);
++
++ uHashKey += uHashPart;
++ }
++
++ return uHashKey;
++}
++
++IMG_BOOL HASH_Key_Comp_Default(size_t uKeySize, void *pKey1, void *pKey2)
++{
++ u32 *p1 = (u32 *) pKey1;
++ u32 *p2 = (u32 *) pKey2;
++ u32 uKeyLen = uKeySize / sizeof(u32);
++ u32 ui;
++
++ PVR_ASSERT((uKeySize % sizeof(u32)) == 0);
++
++ for (ui = 0; ui < uKeyLen; ui++)
++ if (*p1++ != *p2++)
++ return IMG_FALSE;
++
++ return IMG_TRUE;
++}
++
++static enum PVRSRV_ERROR _ChainInsert(struct HASH_TABLE *pHash,
++ struct BUCKET *pBucket,
++ struct BUCKET **ppBucketTable, u32 uSize)
++{
++ u32 uIndex;
++
++ PVR_ASSERT(pBucket != NULL);
++ PVR_ASSERT(ppBucketTable != NULL);
++ PVR_ASSERT(uSize != 0);
++
++ if ((pBucket == NULL) || (ppBucketTable == NULL) || (uSize == 0)) {
++ PVR_DPF(PVR_DBG_ERROR, "_ChainInsert: invalid parameter");
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ uIndex = KEY_TO_INDEX(pHash, pBucket->k, uSize);
++ pBucket->pNext = ppBucketTable[uIndex];
++ ppBucketTable[uIndex] = pBucket;
++
++ return PVRSRV_OK;
++}
++
++static enum PVRSRV_ERROR _Rehash(struct HASH_TABLE *pHash,
++ struct BUCKET **ppOldTable, u32 uOldSize,
++ struct BUCKET **ppNewTable, u32 uNewSize)
++{
++ u32 uIndex;
++ for (uIndex = 0; uIndex < uOldSize; uIndex++) {
++ struct BUCKET *pBucket;
++ pBucket = ppOldTable[uIndex];
++ while (pBucket != NULL) {
++ struct BUCKET *pNextBucket = pBucket->pNext;
++ if (_ChainInsert(pHash, pBucket, ppNewTable, uNewSize)
++ != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "_Rehash: call to _ChainInsert failed");
++ return PVRSRV_ERROR_GENERIC;
++ }
++ pBucket = pNextBucket;
++ }
++ }
++ return PVRSRV_OK;
++}
++
++static IMG_BOOL _Resize(struct HASH_TABLE *pHash, u32 uNewSize)
++{
++ if (uNewSize != pHash->uSize) {
++ struct BUCKET **ppNewTable;
++ u32 uIndex;
++
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "HASH_Resize: oldsize=0x%x newsize=0x%x count=0x%x",
++ pHash->uSize, uNewSize, pHash->uCount);
++
++ OSAllocMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof(struct BUCKET *) * uNewSize,
++ (void **) &ppNewTable, NULL);
++ if (ppNewTable == NULL)
++ return IMG_FALSE;
++
++ for (uIndex = 0; uIndex < uNewSize; uIndex++)
++ ppNewTable[uIndex] = NULL;
++
++ if (_Rehash(pHash, pHash->ppBucketTable, pHash->uSize,
++ ppNewTable, uNewSize) != PVRSRV_OK)
++ return IMG_FALSE;
++
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof(struct BUCKET *) * pHash->uSize,
++ pHash->ppBucketTable, NULL);
++ pHash->ppBucketTable = ppNewTable;
++ pHash->uSize = uNewSize;
++ }
++ return IMG_TRUE;
++}
++
++struct HASH_TABLE *HASH_Create_Extended(u32 uInitialLen, size_t uKeySize,
++ u32 (*pfnHashFunc)(size_t uKeySize, void *pkey,
++ u32 uHashTabLen),
++ IMG_BOOL (*pfnKeyComp)(size_t uKeySize,
++ void *pKey1,
++ void *pkey2))
++{
++ struct HASH_TABLE *pHash;
++ u32 uIndex;
++
++ PVR_DPF(PVR_DBG_MESSAGE, "HASH_Create_Extended: InitialSize=0x%x",
++ uInitialLen);
++
++ if (OSAllocMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof(struct HASH_TABLE),
++ (void **) &pHash, NULL) != PVRSRV_OK)
++ return NULL;
++
++ pHash->uCount = 0;
++ pHash->uSize = uInitialLen;
++ pHash->uMinimumSize = uInitialLen;
++ pHash->uKeySize = uKeySize;
++ pHash->pfnHashFunc = pfnHashFunc;
++ pHash->pfnKeyComp = pfnKeyComp;
++
++ OSAllocMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof(struct BUCKET *) * pHash->uSize,
++ (void **) &pHash->ppBucketTable, NULL);
++
++ if (pHash->ppBucketTable == NULL) {
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(struct HASH_TABLE),
++ pHash, NULL);
++ return NULL;
++ }
++
++ for (uIndex = 0; uIndex < pHash->uSize; uIndex++)
++ pHash->ppBucketTable[uIndex] = NULL;
++ return pHash;
++}
++
++struct HASH_TABLE *HASH_Create(u32 uInitialLen)
++{
++ return HASH_Create_Extended(uInitialLen, sizeof(u32),
++ &HASH_Func_Default, &HASH_Key_Comp_Default);
++}
++
++void HASH_Delete(struct HASH_TABLE *pHash)
++{
++ if (pHash != NULL) {
++ PVR_DPF(PVR_DBG_MESSAGE, "HASH_Delete");
++
++ PVR_ASSERT(pHash->uCount == 0);
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof(struct BUCKET *) * pHash->uSize,
++ pHash->ppBucketTable, NULL);
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(struct HASH_TABLE),
++ pHash, NULL);
++ }
++}
++
++IMG_BOOL HASH_Insert_Extended(struct HASH_TABLE *pHash, void *pKey, u32 v)
++{
++ struct BUCKET *pBucket;
++
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "HASH_Insert_Extended: Hash=%08X, pKey=%08X, v=0x%x", pHash,
++ pKey, v);
++
++ PVR_ASSERT(pHash != NULL);
++
++ if (pHash == NULL) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "HASH_Insert_Extended: invalid parameter");
++ return IMG_FALSE;
++ }
++
++ if (OSAllocMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof(struct BUCKET) + pHash->uKeySize,
++ (void **) &pBucket, NULL) != PVRSRV_OK)
++ return IMG_FALSE;
++
++ pBucket->v = v;
++ OSMemCopy(pBucket->k, pKey, pHash->uKeySize);
++ if (_ChainInsert(pHash, pBucket, pHash->ppBucketTable, pHash->uSize) !=
++ PVRSRV_OK) {
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof(struct BUCKET) + pHash->uKeySize,
++ pBucket, NULL);
++ return IMG_FALSE;
++ }
++
++ pHash->uCount++;
++
++ if (pHash->uCount << 1 > pHash->uSize)
++ _Resize(pHash, pHash->uSize << 1);
++
++ return IMG_TRUE;
++}
++
++IMG_BOOL HASH_Insert(struct HASH_TABLE *pHash, u32 k, u32 v)
++{
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "HASH_Insert: Hash=%08X, k=0x%x, v=0x%x", pHash, k, v);
++
++ return HASH_Insert_Extended(pHash, &k, v);
++}
++
++u32 HASH_Remove_Extended(struct HASH_TABLE *pHash, void *pKey)
++{
++ struct BUCKET **ppBucket;
++ u32 uIndex;
++
++ PVR_DPF(PVR_DBG_MESSAGE, "HASH_Remove: Hash=%08X, pKey=%08X", pHash,
++ pKey);
++
++ PVR_ASSERT(pHash != NULL);
++
++ if (pHash == NULL) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "FreeResourceByPtr: invalid parameter");
++ return 0;
++ }
++
++ uIndex = KEY_TO_INDEX(pHash, pKey, pHash->uSize);
++
++ for (ppBucket = &(pHash->ppBucketTable[uIndex]); *ppBucket != NULL;
++ ppBucket = &((*ppBucket)->pNext))
++ if (KEY_COMPARE(pHash, (*ppBucket)->k, pKey)) {
++ struct BUCKET *pBucket = *ppBucket;
++ u32 v = pBucket->v;
++ (*ppBucket) = pBucket->pNext;
++
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof(struct BUCKET) + pHash->uKeySize,
++ pBucket, NULL);
++
++ pHash->uCount--;
++
++ if (pHash->uSize > (pHash->uCount << 2) &&
++ pHash->uSize > pHash->uMinimumSize)
++
++ _Resize(pHash,
++ PRIVATE_MAX(pHash->uSize >> 1,
++ pHash->uMinimumSize));
++
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "HASH_Remove_Extended: Hash=%08X, pKey=%08X = 0x%x",
++ pHash, pKey, v);
++ return v;
++ }
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "HASH_Remove_Extended: Hash=%08X, pKey=%08X = 0x0 !!!!", pHash,
++ pKey);
++ return 0;
++}
++
++u32 HASH_Remove(struct HASH_TABLE *pHash, u32 k)
++{
++ PVR_DPF(PVR_DBG_MESSAGE, "HASH_Remove: Hash=%08X, k=0x%x", pHash, k);
++
++ return HASH_Remove_Extended(pHash, &k);
++}
++
++u32 HASH_Retrieve_Extended(struct HASH_TABLE *pHash, void *pKey)
++{
++ struct BUCKET **ppBucket;
++ u32 uIndex;
++
++ PVR_DPF(PVR_DBG_MESSAGE, "HASH_Retrieve: Hash=%08X, pKey=%08X", pHash,
++ pKey);
++
++ PVR_ASSERT(pHash != NULL);
++
++ if (pHash == NULL) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "HASH_Retrieve_Extended: invalid parameter");
++ return 0;
++ }
++
++ uIndex = KEY_TO_INDEX(pHash, pKey, pHash->uSize);
++
++ for (ppBucket = &(pHash->ppBucketTable[uIndex]); *ppBucket != NULL;
++ ppBucket = &((*ppBucket)->pNext))
++ if (KEY_COMPARE(pHash, (*ppBucket)->k, pKey)) {
++ struct BUCKET *pBucket = *ppBucket;
++ u32 v = pBucket->v;
++
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "HASH_Retrieve: Hash=%08X, pKey=%08X = 0x%x",
++ pHash, pKey, v);
++ return v;
++ }
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "HASH_Retrieve: Hash=%08X, pKey=%08X = 0x0 !!!!", pHash, pKey);
++ return 0;
++}
++
++u32 HASH_Retrieve(struct HASH_TABLE *pHash, u32 k)
++{
++ PVR_DPF(PVR_DBG_MESSAGE, "HASH_Retrieve: Hash=%08X, k=0x%x", pHash, k);
++ return HASH_Retrieve_Extended(pHash, &k);
++}
++
+diff --git a/drivers/gpu/pvr/hash.h b/drivers/gpu/pvr/hash.h
+new file mode 100644
+index 0000000..d0319ad
+--- /dev/null
++++ b/drivers/gpu/pvr/hash.h
+@@ -0,0 +1,51 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _HASH_H_
++#define _HASH_H_
++
++#include "img_types.h"
++#include "osfunc.h"
++
++struct HASH_TABLE;
++u32 HASH_Func_Default(size_t uKeySize, void *pKey, u32 uHashTabLen);
++IMG_BOOL HASH_Key_Comp_Default(size_t uKeySize, void *pKey1, void *pKey2);
++struct HASH_TABLE *HASH_Create_Extended(u32 uInitialLen, size_t uKeySize,
++ u32 (*pfnHashFunc)(size_t uKeySize, void *pkey,
++ u32 uHashTabLen),
++ IMG_BOOL (*pfnKeyComp)(size_t uKeySize,
++ void *pKey1,
++ void *pkey2));
++struct HASH_TABLE *HASH_Create(u32 uInitialLen);
++void HASH_Delete(struct HASH_TABLE *pHash);
++IMG_BOOL HASH_Insert_Extended(struct HASH_TABLE *pHash, void *pKey, u32 v);
++IMG_BOOL HASH_Insert(struct HASH_TABLE *pHash, u32 k, u32 v);
++u32 HASH_Remove_Extended(struct HASH_TABLE *pHash, void *pKey);
++u32 HASH_Remove(struct HASH_TABLE *pHash, u32 k);
++u32 HASH_Retrieve_Extended(struct HASH_TABLE *pHash, void *pKey);
++u32 HASH_Retrieve(struct HASH_TABLE *pHash, u32 k);
++
++#endif
+diff --git a/drivers/gpu/pvr/img_defs.h b/drivers/gpu/pvr/img_defs.h
+new file mode 100644
+index 0000000..b0a25c2
+--- /dev/null
++++ b/drivers/gpu/pvr/img_defs.h
+@@ -0,0 +1,46 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__IMG_DEFS_H__)
++#define __IMG_DEFS_H__
++
++#include "img_types.h"
++
++#define IMG_SUCCESS 0
++
++#define IMG_NO_REG 1
++
++#ifndef PVR_UNREFERENCED_PARAMETER
++#define PVR_UNREFERENCED_PARAMETER(param) (param) = (param)
++#endif
++
++#ifdef __GNUC__
++#define unref__ __attribute__ ((unused))
++#else
++#define unref__
++#endif
++
++#endif
+diff --git a/drivers/gpu/pvr/img_types.h b/drivers/gpu/pvr/img_types.h
+new file mode 100644
+index 0000000..c52ba6c
+--- /dev/null
++++ b/drivers/gpu/pvr/img_types.h
+@@ -0,0 +1,69 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __IMG_TYPES_H__
++#define __IMG_TYPES_H__
++
++#include <linux/types.h>
++
++#if !defined(IMG_UINT32_MAX)
++#define IMG_UINT32_MAX 0xFFFFFFFFUL
++#endif
++
++typedef enum tag_img_bool {
++ IMG_FALSE = 0,
++ IMG_TRUE = 1,
++ IMG_FORCE_ALIGN = 0x7FFFFFFF
++} IMG_BOOL, *IMG_PBOOL;
++
++struct IMG_CPU_PHYADDR {
++ u32 uiAddr;
++};
++
++struct IMG_DEV_VIRTADDR {
++ u32 uiAddr;
++};
++
++struct IMG_DEV_PHYADDR {
++ u32 uiAddr;
++};
++
++struct IMG_SYS_PHYADDR {
++ u32 uiAddr;
++};
++
++struct SYSTEM_ADDR {
++
++ u32 ui32PageCount;
++ union {
++ struct IMG_SYS_PHYADDR sContig;
++ struct IMG_SYS_PHYADDR asNonContig[1];
++ } u;
++};
++
++#include "img_defs.h"
++
++#endif
+diff --git a/drivers/gpu/pvr/ioctldef.h b/drivers/gpu/pvr/ioctldef.h
+new file mode 100644
+index 0000000..36a1684
+--- /dev/null
++++ b/drivers/gpu/pvr/ioctldef.h
+@@ -0,0 +1,93 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __IOCTLDEF_H__
++#define __IOCTLDEF_H__
++
++#define MAKEIOCTLINDEX(i) (((i) >> 2) & 0xFFF)
++
++#define DEVICE_TYPE ULONG
++
++#define FILE_DEVICE_BEEP 0x00000001
++#define FILE_DEVICE_CD_ROM 0x00000002
++#define FILE_DEVICE_CD_ROM_FILE_SYSTEM 0x00000003
++#define FILE_DEVICE_CONTROLLER 0x00000004
++#define FILE_DEVICE_DATALINK 0x00000005
++#define FILE_DEVICE_DFS 0x00000006
++#define FILE_DEVICE_DISK 0x00000007
++#define FILE_DEVICE_DISK_FILE_SYSTEM 0x00000008
++#define FILE_DEVICE_FILE_SYSTEM 0x00000009
++#define FILE_DEVICE_INPORT_PORT 0x0000000a
++#define FILE_DEVICE_KEYBOARD 0x0000000b
++#define FILE_DEVICE_MAILSLOT 0x0000000c
++#define FILE_DEVICE_MIDI_IN 0x0000000d
++#define FILE_DEVICE_MIDI_OUT 0x0000000e
++#define FILE_DEVICE_MOUSE 0x0000000f
++#define FILE_DEVICE_MULTI_UNC_PROVIDER 0x00000010
++#define FILE_DEVICE_NAMED_PIPE 0x00000011
++#define FILE_DEVICE_NETWORK 0x00000012
++#define FILE_DEVICE_NETWORK_BROWSER 0x00000013
++#define FILE_DEVICE_NETWORK_FILE_SYSTEM 0x00000014
++#define FILE_DEVICE_NULL 0x00000015
++#define FILE_DEVICE_PARALLEL_PORT 0x00000016
++#define FILE_DEVICE_PHYSICAL_NETCARD 0x00000017
++#define FILE_DEVICE_PRINTER 0x00000018
++#define FILE_DEVICE_SCANNER 0x00000019
++#define FILE_DEVICE_SERIAL_MOUSE_PORT 0x0000001a
++#define FILE_DEVICE_SERIAL_PORT 0x0000001b
++#define FILE_DEVICE_SCREEN 0x0000001c
++#define FILE_DEVICE_SOUND 0x0000001d
++#define FILE_DEVICE_STREAMS 0x0000001e
++#define FILE_DEVICE_TAPE 0x0000001f
++#define FILE_DEVICE_TAPE_FILE_SYSTEM 0x00000020
++#define FILE_DEVICE_TRANSPORT 0x00000021
++#define FILE_DEVICE_UNKNOWN 0x00000022
++#define FILE_DEVICE_VIDEO 0x00000023
++#define FILE_DEVICE_VIRTUAL_DISK 0x00000024
++#define FILE_DEVICE_WAVE_IN 0x00000025
++#define FILE_DEVICE_WAVE_OUT 0x00000026
++#define FILE_DEVICE_8042_PORT 0x00000027
++#define FILE_DEVICE_NETWORK_REDIRECTOR 0x00000028
++#define FILE_DEVICE_BATTERY 0x00000029
++#define FILE_DEVICE_BUS_EXTENDER 0x0000002a
++#define FILE_DEVICE_MODEM 0x0000002b
++#define FILE_DEVICE_VDM 0x0000002c
++#define FILE_DEVICE_MASS_STORAGE 0x0000002d
++
++#define CTL_CODE( DeviceType, Function, Method, Access) ( \
++ ((DeviceType) << 16) | ((Access) << 14) | ((Function) << 2) | (Method) \
++)
++
++#define METHOD_BUFFERED 0
++#define METHOD_IN_DIRECT 1
++#define METHOD_OUT_DIRECT 2
++#define METHOD_NEITHER 3
++
++#define FILE_ANY_ACCESS 0
++#define FILE_READ_ACCESS 0x0001
++#define FILE_WRITE_ACCESS 0x0002
++
++#endif
+diff --git a/drivers/gpu/pvr/kernelbuffer.h b/drivers/gpu/pvr/kernelbuffer.h
+new file mode 100644
+index 0000000..da69d7e
+--- /dev/null
++++ b/drivers/gpu/pvr/kernelbuffer.h
+@@ -0,0 +1,55 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__KERNELBUFFER_H__)
++#define __KERNELBUFFER_H__
++
++#include "servicesext.h"
++
++struct PVRSRV_BC_SRV2BUFFER_KMJTABLE {
++ u32 ui32TableSize;
++ enum PVRSRV_ERROR (*pfnOpenBCDevice)(void **);
++ enum PVRSRV_ERROR (*pfnCloseBCDevice)(void *);
++ enum PVRSRV_ERROR (*pfnGetBCInfo)(void *, struct BUFFER_INFO *);
++ enum PVRSRV_ERROR (*pfnGetBCBuffer)(void *, u32,
++ struct PVRSRV_SYNC_DATA *, void **);
++ enum PVRSRV_ERROR (*pfnGetBufferAddr)(void *, void *,
++ struct IMG_SYS_PHYADDR **, u32 *,
++ void __iomem **, void **, IMG_BOOL *);
++};
++
++struct PVRSRV_BC_BUFFER2SRV_KMJTABLE {
++ u32 ui32TableSize;
++ enum PVRSRV_ERROR (*pfnPVRSRVRegisterBCDevice)(
++ struct PVRSRV_BC_SRV2BUFFER_KMJTABLE *, u32 *);
++ enum PVRSRV_ERROR (*pfnPVRSRVRemoveBCDevice)(u32);
++};
++
++IMG_BOOL PVRGetBufferClassJTable(
++ struct PVRSRV_BC_BUFFER2SRV_KMJTABLE *psJTable);
++
++
++#endif
+diff --git a/drivers/gpu/pvr/kerneldisplay.h b/drivers/gpu/pvr/kerneldisplay.h
+new file mode 100644
+index 0000000..c601906
+--- /dev/null
++++ b/drivers/gpu/pvr/kerneldisplay.h
+@@ -0,0 +1,104 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__KERNELDISPLAY_H__)
++#define __KERNELDISPLAY_H__
++
++#include <linux/module.h>
++
++#define DC_FLIP_COMMAND 0
++
++#define DC_STATE_NO_FLUSH_COMMANDS 0
++#define DC_STATE_FLUSH_COMMANDS 1
++
++struct PVRSRV_DC_SRV2DISP_KMJTABLE {
++ struct module *owner;
++ u32 ui32TableSize;
++ enum PVRSRV_ERROR (*pfnOpenDCDevice)(u32, void **,
++ struct PVRSRV_SYNC_DATA *);
++ enum PVRSRV_ERROR (*pfnCloseDCDevice)(void *);
++ enum PVRSRV_ERROR (*pfnEnumDCFormats)(void *, u32 *,
++ struct DISPLAY_FORMAT *);
++ enum PVRSRV_ERROR (*pfnEnumDCDims)(void *, struct DISPLAY_FORMAT *,
++ u32 *, struct DISPLAY_DIMS *);
++ enum PVRSRV_ERROR (*pfnGetDCSystemBuffer)(void *, void **);
++ enum PVRSRV_ERROR (*pfnGetDCInfo)(void *, struct DISPLAY_INFO *);
++ enum PVRSRV_ERROR (*pfnGetBufferAddr)(void *, void *,
++ struct IMG_SYS_PHYADDR **, u32 *,
++ void __iomem **, void **, IMG_BOOL *);
++ enum PVRSRV_ERROR (*pfnCreateDCSwapChain)(void *, u32,
++ struct DISPLAY_SURF_ATTRIBUTES *,
++ struct DISPLAY_SURF_ATTRIBUTES *,
++ u32, struct PVRSRV_SYNC_DATA **,
++ u32, void **, u32 *);
++ enum PVRSRV_ERROR (*pfnDestroyDCSwapChain)(void *, void *);
++ enum PVRSRV_ERROR (*pfnSetDCDstRect)(void *, void *, struct IMG_RECT *);
++ enum PVRSRV_ERROR (*pfnSetDCSrcRect)(void *, void *, struct IMG_RECT *);
++ enum PVRSRV_ERROR (*pfnSetDCDstColourKey)(void *, void *, u32);
++ enum PVRSRV_ERROR (*pfnSetDCSrcColourKey)(void *, void *, u32);
++ enum PVRSRV_ERROR (*pfnGetDCBuffers)(void *, void *, u32 *, void **);
++ void (*pfnSetDCState)(void *, u32);
++};
++
++struct PVRSRV_DC_DISP2SRV_KMJTABLE {
++ u32 ui32TableSize;
++ enum PVRSRV_ERROR (*pfnPVRSRVRegisterDCDevice)(
++ struct PVRSRV_DC_SRV2DISP_KMJTABLE*, u32 *);
++ enum PVRSRV_ERROR (*pfnPVRSRVRemoveDCDevice)(u32);
++ enum PVRSRV_ERROR (*pfnPVRSRVOEMFunction)(u32, void *, u32, void *,
++ u32);
++ enum PVRSRV_ERROR (*pfnPVRSRVRegisterCmdProcList)(u32,
++ IMG_BOOL (**)(void *, u32, void *), u32[][2],
++ u32);
++ enum PVRSRV_ERROR (*pfnPVRSRVRemoveCmdProcList)(u32, u32);
++ void (*pfnPVRSRVCmdComplete)(void *, IMG_BOOL);
++ enum PVRSRV_ERROR (*pfnPVRSRVRegisterSystemISRHandler)(
++ IMG_BOOL (*)(void *), void *, u32, u32);
++ enum PVRSRV_ERROR (*pfnPVRSRVRegisterPowerDevice)(u32,
++ enum PVRSRV_ERROR (*)(void *, enum PVR_POWER_STATE,
++ enum PVR_POWER_STATE),
++ enum PVRSRV_ERROR (*)(void *, enum PVR_POWER_STATE,
++ enum PVR_POWER_STATE),
++ enum PVRSRV_ERROR (*)(void *, IMG_BOOL,
++ enum PVR_POWER_STATE),
++ enum PVRSRV_ERROR (*)(void *, IMG_BOOL,
++ enum PVR_POWER_STATE),
++ void *, enum PVR_POWER_STATE, enum PVR_POWER_STATE);
++};
++
++struct DISPLAYCLASS_FLIP_COMMAND {
++ void *hExtDevice;
++ void *hExtSwapChain;
++ void *hExtBuffer;
++ void *hPrivateTag;
++ u32 ui32ClipRectCount;
++ struct IMG_RECT *psClipRect;
++ u32 ui32SwapInterval;
++};
++
++IMG_BOOL PVRGetDisplayClassJTable(struct PVRSRV_DC_DISP2SRV_KMJTABLE *psJTable);
++
++#endif
+diff --git a/drivers/gpu/pvr/lock.h b/drivers/gpu/pvr/lock.h
+new file mode 100644
+index 0000000..c3b6ff3
+--- /dev/null
++++ b/drivers/gpu/pvr/lock.h
+@@ -0,0 +1,31 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++#ifndef __LOCK_H__
++#define __LOCK_H__
++
++extern struct mutex gPVRSRVLock;
++
++#endif
+diff --git a/drivers/gpu/pvr/mem.c b/drivers/gpu/pvr/mem.c
+new file mode 100644
+index 0000000..6375cad
+--- /dev/null
++++ b/drivers/gpu/pvr/mem.c
+@@ -0,0 +1,130 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "sgxapi_km.h"
++#include "pvr_bridge_km.h"
++
++static enum PVRSRV_ERROR FreeSharedSysMemCallBack(void *pvParam, u32 ui32Param)
++{
++ struct PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = pvParam;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++ OSFreePages(psKernelMemInfo->ui32Flags,
++ psKernelMemInfo->ui32AllocSize,
++ psKernelMemInfo->pvLinAddrKM,
++ psKernelMemInfo->sMemBlk.hOSMemHandle);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(struct PVRSRV_KERNEL_MEM_INFO), psKernelMemInfo, NULL);
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR PVRSRVAllocSharedSysMemoryKM(
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc,
++ u32 ui32Flags, u32 ui32Size,
++ struct PVRSRV_KERNEL_MEM_INFO **ppsKernelMemInfo)
++{
++ struct PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(struct PVRSRV_KERNEL_MEM_INFO),
++ (void **) &psKernelMemInfo, NULL) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVAllocSharedSysMemoryKM: "
++ "Failed to alloc memory for meminfo");
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ OSMemSet(psKernelMemInfo, 0, sizeof(*psKernelMemInfo));
++
++ ui32Flags &= ~PVRSRV_HAP_MAPTYPE_MASK;
++ ui32Flags |= PVRSRV_HAP_MULTI_PROCESS;
++ psKernelMemInfo->ui32Flags = ui32Flags;
++ psKernelMemInfo->ui32AllocSize = ui32Size;
++
++ if (OSAllocPages(psKernelMemInfo->ui32Flags,
++ psKernelMemInfo->ui32AllocSize, HOST_PAGESIZE(),
++ &psKernelMemInfo->pvLinAddrKM,
++ &psKernelMemInfo->sMemBlk.hOSMemHandle) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVAllocSharedSysMemoryKM: "
++ "Failed to alloc memory for block");
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(struct PVRSRV_KERNEL_MEM_INFO),
++ psKernelMemInfo, NULL);
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ psKernelMemInfo->sMemBlk.hResItem = ResManRegisterRes(
++ psPerProc->hResManContext,
++ RESMAN_TYPE_SHARED_MEM_INFO,
++ psKernelMemInfo, 0,
++ FreeSharedSysMemCallBack);
++
++ *ppsKernelMemInfo = psKernelMemInfo;
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR PVRSRVFreeSharedSysMemoryKM(
++ struct PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo)
++{
++ enum PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if (psKernelMemInfo->sMemBlk.hResItem)
++ ResManFreeResByPtr(psKernelMemInfo->sMemBlk.hResItem);
++ else
++ eError = FreeSharedSysMemCallBack(psKernelMemInfo, 0);
++
++ return eError;
++}
++
++enum PVRSRV_ERROR PVRSRVDissociateMemFromResmanKM(
++ struct PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo)
++{
++ enum PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if (!psKernelMemInfo)
++ return PVRSRV_ERROR_INVALID_PARAMS;
++
++ if (psKernelMemInfo->sMemBlk.hResItem) {
++ eError = ResManDissociateRes(psKernelMemInfo->sMemBlk.hResItem,
++ NULL);
++
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVDissociateMemFromResmanKM: "
++ "ResManDissociateRes failed");
++ PVR_DBG_BREAK;
++ return eError;
++ }
++
++ psKernelMemInfo->sMemBlk.hResItem = NULL;
++ }
++
++ return eError;
++}
+diff --git a/drivers/gpu/pvr/mm.c b/drivers/gpu/pvr/mm.c
+new file mode 100644
+index 0000000..29bdd93
+--- /dev/null
++++ b/drivers/gpu/pvr/mm.c
+@@ -0,0 +1,1501 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <linux/version.h>
++#include <linux/mm.h>
++#include <linux/vmalloc.h>
++#include <linux/io.h>
++#include <linux/slab.h>
++#include <linux/highmem.h>
++#include <linux/sched.h>
++#include <linux/dma-mapping.h>
++
++#include "img_defs.h"
++#include "services.h"
++#include "servicesint.h"
++#include "syscommon.h"
++#include "mutils.h"
++#include "mm.h"
++#include "pvrmmap.h"
++#include "mmap.h"
++#include "osfunc.h"
++#include "pvr_debug.h"
++#include "proc.h"
++#include "mutex.h"
++#include "lock.h"
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++enum DEBUG_MEM_ALLOC_TYPE {
++ DEBUG_MEM_ALLOC_TYPE_KMALLOC,
++ DEBUG_MEM_ALLOC_TYPE_VMALLOC,
++ DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES,
++ DEBUG_MEM_ALLOC_TYPE_IOREMAP,
++ DEBUG_MEM_ALLOC_TYPE_IO,
++ DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE,
++ DEBUG_MEM_ALLOC_TYPE_COUNT
++};
++
++struct DEBUG_MEM_ALLOC_REC {
++ enum DEBUG_MEM_ALLOC_TYPE eAllocType;
++ void *pvKey;
++ void *pvCpuVAddr;
++ u32 ulCpuPAddr;
++ void *pvPrivateData;
++ u32 ui32Bytes;
++ pid_t pid;
++ char *pszFileName;
++ u32 ui32Line;
++
++ struct DEBUG_MEM_ALLOC_REC *psNext;
++};
++
++static struct DEBUG_MEM_ALLOC_REC *g_MemoryRecords;
++
++static u32 g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_COUNT];
++static u32 g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_COUNT];
++
++static u32 g_SysRAMWaterMark;
++static u32 g_SysRAMHighWaterMark;
++
++static u32 g_IOMemWaterMark;
++static u32 g_IOMemHighWaterMark;
++
++static void DebugMemAllocRecordAdd(enum DEBUG_MEM_ALLOC_TYPE eAllocType,
++ void *pvKey, void *pvCpuVAddr,
++ u32 ulCpuPAddr, void *pvPrivateData,
++ u32 ui32Bytes, char *pszFileName,
++ u32 ui32Line);
++
++static void DebugMemAllocRecordRemove(enum DEBUG_MEM_ALLOC_TYPE eAllocType,
++ void *pvKey, char *pszFileName,
++ u32 ui32Line);
++
++static char *DebugMemAllocRecordTypeToString(
++ enum DEBUG_MEM_ALLOC_TYPE eAllocType);
++
++static off_t printMemoryRecords(char *buffer, size_t size, off_t off);
++#endif
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++struct DEBUG_LINUX_MEM_AREA_REC {
++ struct LinuxMemArea *psLinuxMemArea;
++ u32 ui32Flags;
++ pid_t pid;
++
++ struct DEBUG_LINUX_MEM_AREA_REC *psNext;
++};
++
++#if defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++static struct mutex g_sDebugMutex;
++#endif
++
++static struct DEBUG_LINUX_MEM_AREA_REC *g_LinuxMemAreaRecords;
++static u32 g_LinuxMemAreaCount;
++static u32 g_LinuxMemAreaWaterMark;
++static u32 g_LinuxMemAreaHighWaterMark;
++
++static off_t printLinuxMemAreaRecords(char *buffer, size_t size, off_t off);
++#endif
++
++static struct kmem_cache *psLinuxMemAreaCache;
++
++
++static struct LinuxMemArea *LinuxMemAreaStructAlloc(void);
++static void LinuxMemAreaStructFree(struct LinuxMemArea *psLinuxMemArea);
++#if defined(DEBUG_LINUX_MEM_AREAS)
++static void DebugLinuxMemAreaRecordAdd(struct LinuxMemArea *psLinuxMemArea,
++ u32 ui32Flags);
++static struct DEBUG_LINUX_MEM_AREA_REC *DebugLinuxMemAreaRecordFind(
++ struct LinuxMemArea *psLinuxMemArea);
++static void DebugLinuxMemAreaRecordRemove(struct LinuxMemArea *psLinuxMemArea);
++#endif
++
++enum PVRSRV_ERROR LinuxMMInit(void)
++{
++#if defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ mutex_init(&g_sDebugMutex);
++#endif
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ {
++ int iStatus;
++ iStatus =
++ CreateProcReadEntry("mem_areas", printLinuxMemAreaRecords);
++ if (iStatus != 0)
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++#endif
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ {
++ int iStatus;
++ iStatus = CreateProcReadEntry("meminfo", printMemoryRecords);
++ if (iStatus != 0)
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++#endif
++ psLinuxMemAreaCache =
++ kmem_cache_create("img-mm", sizeof(struct LinuxMemArea), 0, 0,
++ NULL);
++ if (!psLinuxMemAreaCache) {
++ PVR_DPF(PVR_DBG_ERROR, "%s: failed to allocate kmem_cache",
++ __func__);
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ return PVRSRV_OK;
++}
++
++void LinuxMMCleanup(void)
++{
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ {
++ struct DEBUG_LINUX_MEM_AREA_REC *psCurrentRecord =
++ g_LinuxMemAreaRecords, *psNextRecord;
++
++ if (g_LinuxMemAreaCount)
++ PVR_DPF(PVR_DBG_ERROR, "%s: BUG!: "
++ "There are %d struct LinuxMemArea "
++ "allocation unfreed (%ld bytes)",
++ __func__, g_LinuxMemAreaCount,
++ g_LinuxMemAreaWaterMark);
++
++ while (psCurrentRecord) {
++ struct LinuxMemArea *psLinuxMemArea;
++
++ psNextRecord = psCurrentRecord->psNext;
++ psLinuxMemArea = psCurrentRecord->psLinuxMemArea;
++ PVR_DPF(PVR_DBG_ERROR, "%s: BUG!: "
++ "Cleaning up Linux memory area (%p), "
++ "type=%s, size=%ld bytes",
++ __func__, psCurrentRecord->psLinuxMemArea,
++ LinuxMemAreaTypeToString(psCurrentRecord->
++ psLinuxMemArea->
++ eAreaType),
++ psCurrentRecord->psLinuxMemArea->
++ ui32ByteSize);
++
++ LinuxMemAreaDeepFree(psLinuxMemArea);
++
++ psCurrentRecord = psNextRecord;
++ }
++ RemoveProcEntry("mem_areas");
++ }
++#endif
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ {
++ struct DEBUG_MEM_ALLOC_REC *psCurrentRecord =
++ g_MemoryRecords, *psNextRecord;
++
++ while (psCurrentRecord) {
++ psNextRecord = psCurrentRecord->psNext;
++ PVR_DPF(PVR_DBG_ERROR, "%s: BUG!: Cleaning up memory: "
++ "type=%s CpuVAddr=%p CpuPAddr=0x%08lx, "
++ "allocated @ file=%s,line=%d",
++ __func__,
++ DebugMemAllocRecordTypeToString
++ (psCurrentRecord->eAllocType),
++ psCurrentRecord->pvCpuVAddr,
++ psCurrentRecord->ulCpuPAddr,
++ psCurrentRecord->pszFileName,
++ psCurrentRecord->ui32Line);
++ switch (psCurrentRecord->eAllocType) {
++ case DEBUG_MEM_ALLOC_TYPE_KMALLOC:
++ KFreeWrapper(psCurrentRecord->pvCpuVAddr);
++ break;
++ case DEBUG_MEM_ALLOC_TYPE_IOREMAP:
++ IOUnmapWrapper((__force __iomem void *)
++ psCurrentRecord->pvCpuVAddr);
++ break;
++ case DEBUG_MEM_ALLOC_TYPE_IO:
++
++ DebugMemAllocRecordRemove
++ (DEBUG_MEM_ALLOC_TYPE_IO,
++ psCurrentRecord->pvKey, __FILE__,
++ __LINE__);
++ break;
++ case DEBUG_MEM_ALLOC_TYPE_VMALLOC:
++ VFreeWrapper(psCurrentRecord->pvCpuVAddr);
++ break;
++ case DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES:
++
++ DebugMemAllocRecordRemove
++ (DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES,
++ psCurrentRecord->pvKey, __FILE__,
++ __LINE__);
++ break;
++ case DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE:
++ KMemCacheFreeWrapper(psCurrentRecord->
++ pvPrivateData,
++ psCurrentRecord->
++ pvCpuVAddr);
++ break;
++ default:
++ PVR_ASSERT(0);
++ }
++ psCurrentRecord = psNextRecord;
++ }
++ RemoveProcEntry("meminfo");
++ }
++#endif
++
++ if (psLinuxMemAreaCache) {
++ kmem_cache_destroy(psLinuxMemAreaCache);
++ psLinuxMemAreaCache = NULL;
++ }
++}
++
++void *_KMallocWrapper(u32 ui32ByteSize, char *pszFileName,
++ u32 ui32Line)
++{
++ void *pvRet;
++ pvRet = kmalloc(ui32ByteSize, GFP_KERNEL);
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ if (pvRet)
++ DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_KMALLOC,
++ pvRet, pvRet, 0, NULL, ui32ByteSize,
++ pszFileName, ui32Line);
++#endif
++ return pvRet;
++}
++
++void _KFreeWrapper(void *pvCpuVAddr, char *pszFileName,
++ u32 ui32Line)
++{
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_KMALLOC, pvCpuVAddr,
++ pszFileName, ui32Line);
++#endif
++ kfree(pvCpuVAddr);
++}
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++static void DebugMemAllocRecordAdd(enum DEBUG_MEM_ALLOC_TYPE eAllocType,
++ void *pvKey, void *pvCpuVAddr,
++ u32 ulCpuPAddr, void *pvPrivateData,
++ u32 ui32Bytes, char *pszFileName,
++ u32 ui32Line)
++{
++ struct DEBUG_MEM_ALLOC_REC *psRecord;
++
++ mutex_lock(&g_sDebugMutex);
++
++ psRecord = kmalloc(sizeof(struct DEBUG_MEM_ALLOC_REC), GFP_KERNEL);
++
++ psRecord->eAllocType = eAllocType;
++ psRecord->pvKey = pvKey;
++ psRecord->pvCpuVAddr = pvCpuVAddr;
++ psRecord->ulCpuPAddr = ulCpuPAddr;
++ psRecord->pvPrivateData = pvPrivateData;
++ psRecord->pid = current->pid;
++ psRecord->ui32Bytes = ui32Bytes;
++ psRecord->pszFileName = pszFileName;
++ psRecord->ui32Line = ui32Line;
++
++ psRecord->psNext = g_MemoryRecords;
++ g_MemoryRecords = psRecord;
++
++ g_WaterMarkData[eAllocType] += ui32Bytes;
++ if (g_WaterMarkData[eAllocType] > g_HighWaterMarkData[eAllocType])
++ g_HighWaterMarkData[eAllocType] = g_WaterMarkData[eAllocType];
++
++ if (eAllocType == DEBUG_MEM_ALLOC_TYPE_KMALLOC ||
++ eAllocType == DEBUG_MEM_ALLOC_TYPE_VMALLOC ||
++ eAllocType == DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES ||
++ eAllocType == DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE) {
++ g_SysRAMWaterMark += ui32Bytes;
++ if (g_SysRAMWaterMark > g_SysRAMHighWaterMark)
++ g_SysRAMHighWaterMark = g_SysRAMWaterMark;
++ } else if (eAllocType == DEBUG_MEM_ALLOC_TYPE_IOREMAP ||
++ eAllocType == DEBUG_MEM_ALLOC_TYPE_IO) {
++ g_IOMemWaterMark += ui32Bytes;
++ if (g_IOMemWaterMark > g_IOMemHighWaterMark)
++ g_IOMemHighWaterMark = g_IOMemWaterMark;
++ }
++
++ mutex_unlock(&g_sDebugMutex);
++}
++
++static void DebugMemAllocRecordRemove(enum DEBUG_MEM_ALLOC_TYPE eAllocType,
++ void *pvKey, char *pszFileName,
++ u32 ui32Line)
++{
++ struct DEBUG_MEM_ALLOC_REC **ppsCurrentRecord;
++
++ mutex_lock(&g_sDebugMutex);
++
++ for (ppsCurrentRecord = &g_MemoryRecords; *ppsCurrentRecord;
++ ppsCurrentRecord = &((*ppsCurrentRecord)->psNext))
++ if ((*ppsCurrentRecord)->eAllocType == eAllocType &&
++ (*ppsCurrentRecord)->pvKey == pvKey) {
++ struct DEBUG_MEM_ALLOC_REC *psNextRecord;
++
++ psNextRecord = (*ppsCurrentRecord)->psNext;
++ g_WaterMarkData[eAllocType] -=
++ (*ppsCurrentRecord)->ui32Bytes;
++
++ if (eAllocType == DEBUG_MEM_ALLOC_TYPE_KMALLOC ||
++ eAllocType == DEBUG_MEM_ALLOC_TYPE_VMALLOC ||
++ eAllocType == DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES ||
++ eAllocType == DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE) {
++ g_SysRAMWaterMark -=
++ (*ppsCurrentRecord)->ui32Bytes;
++ } else {
++ if (eAllocType == DEBUG_MEM_ALLOC_TYPE_IOREMAP
++ || eAllocType == DEBUG_MEM_ALLOC_TYPE_IO)
++ g_IOMemWaterMark -=
++ (*ppsCurrentRecord)->ui32Bytes;
++
++ }
++
++ kfree(*ppsCurrentRecord);
++ *ppsCurrentRecord = psNextRecord;
++ goto exit_unlock;
++ }
++
++ PVR_DPF(PVR_DBG_ERROR, "%s: couldn't find an entry for type=%s "
++ "with pvKey=%p (called from %s, line %d\n",
++ __func__, DebugMemAllocRecordTypeToString(eAllocType), pvKey,
++ pszFileName, ui32Line);
++
++exit_unlock:
++ mutex_unlock(&g_sDebugMutex);
++}
++
++static char *DebugMemAllocRecordTypeToString(
++ enum DEBUG_MEM_ALLOC_TYPE eAllocType)
++{
++ char *apszDebugMemoryRecordTypes[] = {
++ "KMALLOC",
++ "VMALLOC",
++ "ALLOC_PAGES",
++ "IOREMAP",
++ "IO",
++ "KMEM_CACHE_ALLOC"
++ };
++ return apszDebugMemoryRecordTypes[eAllocType];
++}
++#endif
++
++void *_VMallocWrapper(u32 ui32Bytes, u32 ui32AllocFlags, char *pszFileName,
++ u32 ui32Line)
++{
++ pgprot_t PGProtFlags;
++ void *pvRet;
++
++ switch (ui32AllocFlags & PVRSRV_HAP_CACHETYPE_MASK) {
++ case PVRSRV_HAP_CACHED:
++ PGProtFlags = PAGE_KERNEL;
++ break;
++ case PVRSRV_HAP_WRITECOMBINE:
++ PGProtFlags = PGPROT_WC(PAGE_KERNEL);
++ break;
++ case PVRSRV_HAP_UNCACHED:
++ PGProtFlags = PGPROT_UC(PAGE_KERNEL);
++ break;
++ default:
++ PVR_DPF(PVR_DBG_ERROR,
++ "VMAllocWrapper: unknown mapping flags=0x%08lx",
++ ui32AllocFlags);
++ dump_stack();
++ return NULL;
++ }
++
++ pvRet = __vmalloc(ui32Bytes, GFP_KERNEL | __GFP_HIGHMEM, PGProtFlags);
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ if (pvRet)
++ DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_VMALLOC,
++ pvRet, pvRet, 0, NULL,
++ PAGE_ALIGN(ui32Bytes),
++ pszFileName, ui32Line);
++#endif
++
++ return pvRet;
++}
++
++void _VFreeWrapper(void *pvCpuVAddr, char *pszFileName, u32 ui32Line)
++{
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_VMALLOC, pvCpuVAddr,
++ pszFileName, ui32Line);
++#endif
++ vfree(pvCpuVAddr);
++}
++
++struct LinuxMemArea *NewVMallocLinuxMemArea(u32 ui32Bytes, u32 ui32AreaFlags)
++{
++ struct LinuxMemArea *psLinuxMemArea;
++ void *pvCpuVAddr;
++
++ psLinuxMemArea = LinuxMemAreaStructAlloc();
++ if (!psLinuxMemArea)
++ goto failed;
++
++ pvCpuVAddr = VMallocWrapper(ui32Bytes, ui32AreaFlags);
++ if (!pvCpuVAddr)
++ goto failed;
++
++ psLinuxMemArea->eAreaType = LINUX_MEM_AREA_VMALLOC;
++ psLinuxMemArea->uData.sVmalloc.pvVmallocAddress = pvCpuVAddr;
++ psLinuxMemArea->ui32ByteSize = ui32Bytes;
++ psLinuxMemArea->ui32AreaFlags = ui32AreaFlags;
++ psLinuxMemArea->bMMapRegistered = IMG_FALSE;
++ INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags);
++#endif
++
++ return psLinuxMemArea;
++
++failed:
++ PVR_DPF(PVR_DBG_ERROR, "%s: failed!", __func__);
++ if (psLinuxMemArea)
++ LinuxMemAreaStructFree(psLinuxMemArea);
++ return NULL;
++}
++
++void FreeVMallocLinuxMemArea(struct LinuxMemArea *psLinuxMemArea)
++{
++ PVR_ASSERT(psLinuxMemArea);
++ PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_VMALLOC);
++ PVR_ASSERT(psLinuxMemArea->uData.sVmalloc.pvVmallocAddress);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
++#endif
++
++
++ PVR_DPF(PVR_DBG_MESSAGE, "%s: pvCpuVAddr: %p",
++ __func__,
++ psLinuxMemArea->uData.sVmalloc.pvVmallocAddress);
++ VFreeWrapper(psLinuxMemArea->uData.sVmalloc.pvVmallocAddress);
++
++ LinuxMemAreaStructFree(psLinuxMemArea);
++}
++
++void __iomem *_IORemapWrapper(struct IMG_CPU_PHYADDR BasePAddr,
++ u32 ui32Bytes, u32 ui32MappingFlags,
++ char *pszFileName, u32 ui32Line)
++{
++ void __iomem *pvIORemapCookie = NULL;
++
++ switch (ui32MappingFlags & PVRSRV_HAP_CACHETYPE_MASK) {
++ case PVRSRV_HAP_CACHED:
++ pvIORemapCookie = IOREMAP(BasePAddr.uiAddr, ui32Bytes);
++ break;
++ case PVRSRV_HAP_WRITECOMBINE:
++ pvIORemapCookie = IOREMAP_WC(BasePAddr.uiAddr, ui32Bytes);
++ break;
++ case PVRSRV_HAP_UNCACHED:
++ pvIORemapCookie = IOREMAP_UC(BasePAddr.uiAddr, ui32Bytes);
++ break;
++ default:
++ PVR_DPF(PVR_DBG_ERROR,
++ "IORemapWrapper: unknown mapping flags");
++ return NULL;
++ }
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ if (pvIORemapCookie)
++ DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_IOREMAP,
++ (__force void *)pvIORemapCookie,
++ (__force void *)pvIORemapCookie,
++ BasePAddr.uiAddr,
++ NULL, ui32Bytes, pszFileName, ui32Line);
++#endif
++
++ return pvIORemapCookie;
++}
++
++void _IOUnmapWrapper(void __iomem *pvIORemapCookie, char *pszFileName,
++ u32 ui32Line)
++{
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_IOREMAP,
++ (__force void *)pvIORemapCookie,
++ pszFileName, ui32Line);
++#endif
++ iounmap(pvIORemapCookie);
++}
++
++struct LinuxMemArea *NewIORemapLinuxMemArea(struct IMG_CPU_PHYADDR BasePAddr,
++ u32 ui32Bytes, u32 ui32AreaFlags)
++{
++ struct LinuxMemArea *psLinuxMemArea;
++ void __iomem *pvIORemapCookie;
++
++ psLinuxMemArea = LinuxMemAreaStructAlloc();
++ if (!psLinuxMemArea)
++ return NULL;
++
++ pvIORemapCookie = IORemapWrapper(BasePAddr, ui32Bytes, ui32AreaFlags);
++ if (!pvIORemapCookie) {
++ LinuxMemAreaStructFree(psLinuxMemArea);
++ return NULL;
++ }
++
++ psLinuxMemArea->eAreaType = LINUX_MEM_AREA_IOREMAP;
++ psLinuxMemArea->uData.sIORemap.pvIORemapCookie = pvIORemapCookie;
++ psLinuxMemArea->uData.sIORemap.CPUPhysAddr = BasePAddr;
++ psLinuxMemArea->ui32ByteSize = ui32Bytes;
++ psLinuxMemArea->ui32AreaFlags = ui32AreaFlags;
++ psLinuxMemArea->bMMapRegistered = IMG_FALSE;
++ INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags);
++#endif
++
++ return psLinuxMemArea;
++}
++
++void FreeIORemapLinuxMemArea(struct LinuxMemArea *psLinuxMemArea)
++{
++ PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_IOREMAP);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
++#endif
++
++ IOUnmapWrapper(psLinuxMemArea->uData.sIORemap.pvIORemapCookie);
++
++ LinuxMemAreaStructFree(psLinuxMemArea);
++}
++
++static IMG_BOOL PagesAreContiguous(struct IMG_SYS_PHYADDR *psSysPhysAddr,
++ u32 ui32Bytes)
++{
++ u32 ui32;
++ u32 ui32AddrChk;
++ u32 ui32NumPages = RANGE_TO_PAGES(ui32Bytes);
++
++ for (ui32 = 0, ui32AddrChk = psSysPhysAddr[0].uiAddr;
++ ui32 < ui32NumPages; ui32++, ui32AddrChk += PAGE_SIZE)
++ if (psSysPhysAddr[ui32].uiAddr != ui32AddrChk)
++ return IMG_FALSE;
++
++ return IMG_TRUE;
++}
++
++struct LinuxMemArea *NewExternalKVLinuxMemArea(struct IMG_SYS_PHYADDR
++ *pBasePAddr, void *pvCPUVAddr,
++ u32 ui32Bytes,
++ IMG_BOOL bPhysContig,
++ u32 ui32AreaFlags)
++{
++ struct LinuxMemArea *psLinuxMemArea;
++
++ psLinuxMemArea = LinuxMemAreaStructAlloc();
++ if (!psLinuxMemArea)
++ return NULL;
++
++ psLinuxMemArea->eAreaType = LINUX_MEM_AREA_EXTERNAL_KV;
++ psLinuxMemArea->uData.sExternalKV.pvExternalKV = pvCPUVAddr;
++ psLinuxMemArea->uData.sExternalKV.bPhysContig = bPhysContig ||
++ PagesAreContiguous(pBasePAddr, ui32Bytes);
++
++ if (psLinuxMemArea->uData.sExternalKV.bPhysContig)
++ psLinuxMemArea->uData.sExternalKV.uPhysAddr.SysPhysAddr =
++ *pBasePAddr;
++ else
++ psLinuxMemArea->uData.sExternalKV.uPhysAddr.pSysPhysAddr =
++ pBasePAddr;
++ psLinuxMemArea->ui32ByteSize = ui32Bytes;
++ psLinuxMemArea->ui32AreaFlags = ui32AreaFlags;
++ psLinuxMemArea->bMMapRegistered = IMG_FALSE;
++ INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags);
++#endif
++
++ return psLinuxMemArea;
++}
++
++void FreeExternalKVLinuxMemArea(struct LinuxMemArea *psLinuxMemArea)
++{
++ PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_EXTERNAL_KV);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
++#endif
++
++ LinuxMemAreaStructFree(psLinuxMemArea);
++}
++
++struct LinuxMemArea *NewIOLinuxMemArea(struct IMG_CPU_PHYADDR BasePAddr,
++ u32 ui32Bytes, u32 ui32AreaFlags)
++{
++ struct LinuxMemArea *psLinuxMemArea = LinuxMemAreaStructAlloc();
++ if (!psLinuxMemArea)
++ return NULL;
++
++ psLinuxMemArea->eAreaType = LINUX_MEM_AREA_IO;
++ psLinuxMemArea->uData.sIO.CPUPhysAddr.uiAddr = BasePAddr.uiAddr;
++ psLinuxMemArea->ui32ByteSize = ui32Bytes;
++ psLinuxMemArea->ui32AreaFlags = ui32AreaFlags;
++ psLinuxMemArea->bMMapRegistered = IMG_FALSE;
++ INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList);
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_IO,
++ (void *)BasePAddr.uiAddr, NULL,
++ BasePAddr.uiAddr, NULL, ui32Bytes, "unknown", 0);
++#endif
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags);
++#endif
++
++ return psLinuxMemArea;
++}
++
++void FreeIOLinuxMemArea(struct LinuxMemArea *psLinuxMemArea)
++{
++ PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_IO);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
++#endif
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_IO,
++ (void *)psLinuxMemArea->uData.sIO.
++ CPUPhysAddr.uiAddr, __FILE__, __LINE__);
++#endif
++
++ LinuxMemAreaStructFree(psLinuxMemArea);
++}
++
++struct LinuxMemArea *NewAllocPagesLinuxMemArea(u32 ui32Bytes,
++ u32 ui32AreaFlags)
++{
++ struct LinuxMemArea *psLinuxMemArea;
++ u32 ui32PageCount;
++ struct page **pvPageList;
++ void *hBlockPageList;
++ s32 i;
++ enum PVRSRV_ERROR eError;
++
++ psLinuxMemArea = LinuxMemAreaStructAlloc();
++ if (!psLinuxMemArea)
++ goto failed_area_alloc;
++
++ ui32PageCount = RANGE_TO_PAGES(ui32Bytes);
++ eError = OSAllocMem(0, sizeof(*pvPageList) * ui32PageCount,
++ (void **)&pvPageList, &hBlockPageList);
++ if (eError != PVRSRV_OK)
++ goto failed_page_list_alloc;
++
++ for (i = 0; i < ui32PageCount; i++) {
++ pvPageList[i] = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, 0);
++ if (!pvPageList[i])
++ goto failed_alloc_pages;
++
++ }
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES,
++ pvPageList, NULL, 0, NULL, PAGE_ALIGN(ui32Bytes),
++ "unknown", 0);
++#endif
++
++ psLinuxMemArea->eAreaType = LINUX_MEM_AREA_ALLOC_PAGES;
++ psLinuxMemArea->uData.sPageList.pvPageList = pvPageList;
++ psLinuxMemArea->uData.sPageList.hBlockPageList = hBlockPageList;
++ psLinuxMemArea->ui32ByteSize = ui32Bytes;
++ psLinuxMemArea->ui32AreaFlags = ui32AreaFlags;
++ psLinuxMemArea->bMMapRegistered = IMG_FALSE;
++ INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags);
++#endif
++
++ return psLinuxMemArea;
++
++failed_alloc_pages:
++ for (i--; i >= 0; i--)
++ __free_pages(pvPageList[i], 0);
++ OSFreeMem(0, sizeof(*pvPageList) * ui32PageCount, pvPageList,
++ hBlockPageList);
++failed_page_list_alloc:
++ LinuxMemAreaStructFree(psLinuxMemArea);
++failed_area_alloc:
++ PVR_DPF(PVR_DBG_ERROR, "%s: failed", __func__);
++
++ return NULL;
++}
++
++void FreeAllocPagesLinuxMemArea(struct LinuxMemArea *psLinuxMemArea)
++{
++ u32 ui32PageCount;
++ struct page **pvPageList;
++ void *hBlockPageList;
++ u32 i;
++
++ PVR_ASSERT(psLinuxMemArea);
++ PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_ALLOC_PAGES);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
++#endif
++
++ ui32PageCount = RANGE_TO_PAGES(psLinuxMemArea->ui32ByteSize);
++ pvPageList = psLinuxMemArea->uData.sPageList.pvPageList;
++ hBlockPageList = psLinuxMemArea->uData.sPageList.hBlockPageList;
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES, pvPageList,
++ __FILE__, __LINE__);
++#endif
++
++ for (i = 0; i < ui32PageCount; i++)
++ __free_pages(pvPageList[i], 0);
++
++ OSFreeMem(0, sizeof(*pvPageList) * ui32PageCount, pvPageList,
++ hBlockPageList);
++
++ LinuxMemAreaStructFree(psLinuxMemArea);
++}
++
++struct page *LinuxMemAreaOffsetToPage(struct LinuxMemArea *psLinuxMemArea,
++ u32 ui32ByteOffset)
++{
++ u32 ui32PageIndex;
++ char *pui8Addr;
++
++ switch (psLinuxMemArea->eAreaType) {
++ case LINUX_MEM_AREA_ALLOC_PAGES:
++ ui32PageIndex = PHYS_TO_PFN(ui32ByteOffset);
++ return
++ psLinuxMemArea->uData.sPageList.pvPageList[ui32PageIndex];
++ break;
++ case LINUX_MEM_AREA_VMALLOC:
++ pui8Addr = psLinuxMemArea->uData.sVmalloc.pvVmallocAddress;
++ pui8Addr += ui32ByteOffset;
++ return vmalloc_to_page(pui8Addr);
++ break;
++ case LINUX_MEM_AREA_SUB_ALLOC:
++ return LinuxMemAreaOffsetToPage(psLinuxMemArea->
++ uData.sSubAlloc.psParentLinuxMemArea,
++ psLinuxMemArea->
++ uData.sSubAlloc.ui32ByteOffset +
++ ui32ByteOffset);
++ default:
++ PVR_DPF(PVR_DBG_ERROR, "%s: Unsupported request for "
++ "struct page from struct LinuxMemArea with type=%s",
++ LinuxMemAreaTypeToString(psLinuxMemArea->eAreaType));
++ return NULL;
++ }
++}
++
++void *_KMemCacheAllocWrapper(struct kmem_cache *psCache,
++ gfp_t Flags,
++ char *pszFileName, u32 ui32Line)
++{
++ void *pvRet;
++
++ pvRet = kmem_cache_alloc(psCache, Flags);
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE, pvRet, pvRet,
++ 0, psCache, kmem_cache_size(psCache),
++ pszFileName, ui32Line);
++#endif
++
++ return pvRet;
++}
++
++void _KMemCacheFreeWrapper(struct kmem_cache *psCache, void *pvObject,
++ char *pszFileName, u32 ui32Line)
++{
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE, pvObject,
++ pszFileName, ui32Line);
++#endif
++
++ kmem_cache_free(psCache, pvObject);
++}
++
++const char *KMemCacheNameWrapper(struct kmem_cache *psCache)
++{
++
++ return "";
++}
++
++struct LinuxMemArea *NewSubLinuxMemArea(struct LinuxMemArea
++ *psParentLinuxMemArea, u32 ui32ByteOffset,
++ u32 ui32Bytes)
++{
++ struct LinuxMemArea *psLinuxMemArea;
++
++ PVR_ASSERT((ui32ByteOffset + ui32Bytes) <=
++ psParentLinuxMemArea->ui32ByteSize);
++
++ psLinuxMemArea = LinuxMemAreaStructAlloc();
++ if (!psLinuxMemArea)
++ return NULL;
++
++ psLinuxMemArea->eAreaType = LINUX_MEM_AREA_SUB_ALLOC;
++ psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea =
++ psParentLinuxMemArea;
++ psLinuxMemArea->uData.sSubAlloc.ui32ByteOffset = ui32ByteOffset;
++ psLinuxMemArea->ui32ByteSize = ui32Bytes;
++ psLinuxMemArea->ui32AreaFlags = psParentLinuxMemArea->ui32AreaFlags;
++ psLinuxMemArea->bMMapRegistered = IMG_FALSE;
++ INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ {
++ struct DEBUG_LINUX_MEM_AREA_REC *psParentRecord;
++ psParentRecord =
++ DebugLinuxMemAreaRecordFind(psParentLinuxMemArea);
++ DebugLinuxMemAreaRecordAdd(psLinuxMemArea,
++ psParentRecord->ui32Flags);
++ }
++#endif
++
++ return psLinuxMemArea;
++}
++
++static void FreeSubLinuxMemArea(struct LinuxMemArea *psLinuxMemArea)
++{
++ PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_SUB_ALLOC);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
++#endif
++
++ LinuxMemAreaStructFree(psLinuxMemArea);
++}
++
++static struct LinuxMemArea *LinuxMemAreaStructAlloc(void)
++{
++ return KMemCacheAllocWrapper(psLinuxMemAreaCache, GFP_KERNEL);
++}
++
++static void LinuxMemAreaStructFree(struct LinuxMemArea *psLinuxMemArea)
++{
++ KMemCacheFreeWrapper(psLinuxMemAreaCache, psLinuxMemArea);
++
++}
++
++void LinuxMemAreaDeepFree(struct LinuxMemArea *psLinuxMemArea)
++{
++ switch (psLinuxMemArea->eAreaType) {
++ case LINUX_MEM_AREA_VMALLOC:
++ FreeVMallocLinuxMemArea(psLinuxMemArea);
++ break;
++ case LINUX_MEM_AREA_ALLOC_PAGES:
++ FreeAllocPagesLinuxMemArea(psLinuxMemArea);
++ break;
++ case LINUX_MEM_AREA_IOREMAP:
++ FreeIORemapLinuxMemArea(psLinuxMemArea);
++ break;
++ case LINUX_MEM_AREA_EXTERNAL_KV:
++ FreeExternalKVLinuxMemArea(psLinuxMemArea);
++ break;
++ case LINUX_MEM_AREA_IO:
++ FreeIOLinuxMemArea(psLinuxMemArea);
++ break;
++ case LINUX_MEM_AREA_SUB_ALLOC:
++ FreeSubLinuxMemArea(psLinuxMemArea);
++ break;
++ default:
++ PVR_DPF(PVR_DBG_ERROR, "%s: Unknown are type (%d)\n",
++ __func__, psLinuxMemArea->eAreaType);
++ }
++}
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++static void DebugLinuxMemAreaRecordAdd(struct LinuxMemArea *psLinuxMemArea,
++ u32 ui32Flags)
++{
++ struct DEBUG_LINUX_MEM_AREA_REC *psNewRecord;
++ const char *pi8FlagsString;
++
++ mutex_lock(&g_sDebugMutex);
++
++ if (psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC) {
++ g_LinuxMemAreaWaterMark += psLinuxMemArea->ui32ByteSize;
++ if (g_LinuxMemAreaWaterMark > g_LinuxMemAreaHighWaterMark)
++ g_LinuxMemAreaHighWaterMark = g_LinuxMemAreaWaterMark;
++ }
++ g_LinuxMemAreaCount++;
++
++ psNewRecord = kmalloc(sizeof(struct DEBUG_LINUX_MEM_AREA_REC),
++ GFP_KERNEL);
++ if (psNewRecord) {
++ psNewRecord->psLinuxMemArea = psLinuxMemArea;
++ psNewRecord->ui32Flags = ui32Flags;
++ psNewRecord->pid = current->pid;
++ psNewRecord->psNext = g_LinuxMemAreaRecords;
++ g_LinuxMemAreaRecords = psNewRecord;
++ } else {
++ PVR_DPF(PVR_DBG_ERROR,
++ "%s: failed to allocate linux memory area record.",
++ __func__);
++ }
++
++ pi8FlagsString = HAPFlagsToString(ui32Flags);
++ if (strstr(pi8FlagsString, "UNKNOWN"))
++ PVR_DPF(PVR_DBG_ERROR, "%s: Unexpected flags "
++ "(0x%08lx) associated with psLinuxMemArea @ 0x%08lx",
++ __func__, ui32Flags, psLinuxMemArea);
++
++ mutex_unlock(&g_sDebugMutex);
++}
++
++static struct DEBUG_LINUX_MEM_AREA_REC *DebugLinuxMemAreaRecordFind(
++ struct LinuxMemArea *psLinuxMemArea)
++{
++ struct DEBUG_LINUX_MEM_AREA_REC *psCurrentRecord;
++
++ mutex_lock(&g_sDebugMutex);
++
++ for (psCurrentRecord = g_LinuxMemAreaRecords;
++ psCurrentRecord; psCurrentRecord = psCurrentRecord->psNext)
++ if (psCurrentRecord->psLinuxMemArea == psLinuxMemArea)
++ goto exit_unlock;
++
++exit_unlock:
++ mutex_unlock(&g_sDebugMutex);
++
++ return psCurrentRecord;
++}
++
++static void DebugLinuxMemAreaRecordRemove(struct LinuxMemArea *psLinuxMemArea)
++{
++ struct DEBUG_LINUX_MEM_AREA_REC **ppsCurrentRecord;
++
++ mutex_lock(&g_sDebugMutex);
++
++ if (psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC)
++ g_LinuxMemAreaWaterMark -= psLinuxMemArea->ui32ByteSize;
++ g_LinuxMemAreaCount--;
++
++ for (ppsCurrentRecord = &g_LinuxMemAreaRecords;
++ *ppsCurrentRecord;
++ ppsCurrentRecord = &((*ppsCurrentRecord)->psNext))
++ if ((*ppsCurrentRecord)->psLinuxMemArea == psLinuxMemArea) {
++ struct DEBUG_LINUX_MEM_AREA_REC *psNextRecord;
++
++ psNextRecord = (*ppsCurrentRecord)->psNext;
++ kfree(*ppsCurrentRecord);
++ *ppsCurrentRecord = psNextRecord;
++ goto exit_unlock;
++ }
++
++ PVR_DPF(PVR_DBG_ERROR,
++ "%s: couldn't find an entry for psLinuxMemArea=%p\n", __func__,
++ psLinuxMemArea);
++
++exit_unlock:
++ mutex_unlock(&g_sDebugMutex);
++}
++#endif
++
++void *LinuxMemAreaToCpuVAddr(struct LinuxMemArea *psLinuxMemArea)
++{
++ switch (psLinuxMemArea->eAreaType) {
++ case LINUX_MEM_AREA_VMALLOC:
++ return psLinuxMemArea->uData.sVmalloc.pvVmallocAddress;
++ case LINUX_MEM_AREA_IOREMAP:
++ return (void __force *)
++ psLinuxMemArea->uData.sIORemap.pvIORemapCookie;
++ case LINUX_MEM_AREA_EXTERNAL_KV:
++ return psLinuxMemArea->uData.sExternalKV.pvExternalKV;
++ case LINUX_MEM_AREA_SUB_ALLOC:
++ {
++ char *pAddr =
++ LinuxMemAreaToCpuVAddr(psLinuxMemArea->uData.
++ sSubAlloc.
++ psParentLinuxMemArea);
++ if (!pAddr)
++ return NULL;
++ return pAddr +
++ psLinuxMemArea->uData.sSubAlloc.ui32ByteOffset;
++ }
++ default:
++ return NULL;
++ }
++}
++
++struct IMG_CPU_PHYADDR LinuxMemAreaToCpuPAddr(
++ struct LinuxMemArea *psLinuxMemArea,
++ u32 ui32ByteOffset)
++{
++ struct IMG_CPU_PHYADDR CpuPAddr;
++
++ CpuPAddr.uiAddr = 0;
++
++ switch (psLinuxMemArea->eAreaType) {
++ case LINUX_MEM_AREA_IOREMAP:
++ {
++ CpuPAddr = psLinuxMemArea->uData.sIORemap.CPUPhysAddr;
++ CpuPAddr.uiAddr += ui32ByteOffset;
++ break;
++ }
++ case LINUX_MEM_AREA_EXTERNAL_KV:
++ {
++ if (psLinuxMemArea->uData.sExternalKV.bPhysContig) {
++ CpuPAddr =
++ SysSysPAddrToCpuPAddr(
++ psLinuxMemArea->uData.
++ sExternalKV.uPhysAddr.
++ SysPhysAddr);
++ CpuPAddr.uiAddr += ui32ByteOffset;
++ } else {
++ u32 ui32PageIndex =
++ PHYS_TO_PFN(ui32ByteOffset);
++ struct IMG_SYS_PHYADDR SysPAddr =
++ psLinuxMemArea->uData.sExternalKV.uPhysAddr.
++ pSysPhysAddr[ui32PageIndex];
++
++ CpuPAddr = SysSysPAddrToCpuPAddr(SysPAddr);
++ CpuPAddr.uiAddr +=
++ ADDR_TO_PAGE_OFFSET(ui32ByteOffset);
++ }
++ break;
++ }
++ case LINUX_MEM_AREA_IO:
++ {
++ CpuPAddr = psLinuxMemArea->uData.sIO.CPUPhysAddr;
++ CpuPAddr.uiAddr += ui32ByteOffset;
++ break;
++ }
++ case LINUX_MEM_AREA_VMALLOC:
++ {
++ char *pCpuVAddr;
++ pCpuVAddr =
++ (char *) psLinuxMemArea->uData.sVmalloc.
++ pvVmallocAddress;
++ pCpuVAddr += ui32ByteOffset;
++ CpuPAddr.uiAddr = VMallocToPhys(pCpuVAddr);
++ break;
++ }
++ case LINUX_MEM_AREA_ALLOC_PAGES:
++ {
++ struct page *page;
++ u32 ui32PageIndex = PHYS_TO_PFN(ui32ByteOffset);
++ page =
++ psLinuxMemArea->uData.sPageList.
++ pvPageList[ui32PageIndex];
++ CpuPAddr.uiAddr = page_to_phys(page);
++ CpuPAddr.uiAddr += ADDR_TO_PAGE_OFFSET(ui32ByteOffset);
++ break;
++ }
++ case LINUX_MEM_AREA_SUB_ALLOC:
++ {
++ CpuPAddr =
++ OSMemHandleToCpuPAddr(psLinuxMemArea->uData.
++ sSubAlloc.
++ psParentLinuxMemArea,
++ psLinuxMemArea->uData.
++ sSubAlloc.ui32ByteOffset +
++ ui32ByteOffset);
++ break;
++ }
++ default:
++ PVR_DPF(PVR_DBG_ERROR,
++ "%s: Unknown struct LinuxMemArea type (%d)\n",
++ __func__, psLinuxMemArea->eAreaType);
++ }
++
++ PVR_ASSERT(CpuPAddr.uiAddr);
++ return CpuPAddr;
++}
++
++static void inv_cache_vmalloc(const struct LinuxMemArea *mem_area)
++{
++ struct page *pg;
++ void *kaddr;
++ size_t chunk;
++ u32 pg_cnt;
++ u32 pg_ofs;
++ u32 vaddr, vaddr_end;
++
++ extern void ___dma_single_dev_to_cpu(const void *, size_t,
++ enum dma_data_direction);
++
++ vaddr = (u32)mem_area->uData.sVmalloc.pvVmallocAddress;
++ vaddr_end = vaddr + mem_area->ui32ByteSize;
++ pg_cnt = (PAGE_ALIGN(vaddr_end) - (vaddr & PAGE_MASK)) / PAGE_SIZE;
++
++ while (pg_cnt--) {
++ pg = pfn_to_page(VMallocToPhys((void *)vaddr) >> PAGE_SHIFT);
++ kaddr = page_address(pg);
++ pg_ofs = vaddr & ~PAGE_MASK;
++ kaddr += pg_ofs;
++ chunk = min_t(ssize_t, vaddr_end - vaddr, PAGE_SIZE - pg_ofs);
++ ___dma_single_dev_to_cpu(kaddr, chunk, DMA_FROM_DEVICE);
++ vaddr += chunk;
++ }
++}
++
++static void inv_cache_page_list(const struct LinuxMemArea *mem_area)
++{
++ u32 pg_cnt;
++ struct page **pg_list;
++
++ extern void ___dma_single_dev_to_cpu(const void *, size_t,
++ enum dma_data_direction);
++
++ pg_cnt = RANGE_TO_PAGES(mem_area->ui32ByteSize);
++ pg_list = mem_area->uData.sPageList.pvPageList;
++ while (pg_cnt--)
++ ___dma_single_dev_to_cpu(page_address(*pg_list++), PAGE_SIZE,
++ DMA_FROM_DEVICE);
++}
++
++void inv_cache_mem_area(const struct LinuxMemArea *mem_area)
++{
++ switch (mem_area->eAreaType) {
++ case LINUX_MEM_AREA_VMALLOC:
++ inv_cache_vmalloc(mem_area);
++ break;
++ case LINUX_MEM_AREA_ALLOC_PAGES:
++ inv_cache_page_list(mem_area);
++ break;
++ case LINUX_MEM_AREA_IOREMAP:
++ case LINUX_MEM_AREA_EXTERNAL_KV:
++ case LINUX_MEM_AREA_IO:
++ case LINUX_MEM_AREA_SUB_ALLOC:
++ PVR_DPF(PVR_DBG_ERROR,
++ "%s: Not implemented for type (%d)\n",
++ __func__, mem_area->eAreaType);
++ BUG();
++ default:
++ PVR_DPF(PVR_DBG_ERROR,
++ "%s: Unknown LinuxMemArea type (%d)\n",
++ __func__, mem_area->eAreaType);
++ BUG();
++ }
++}
++
++IMG_BOOL LinuxMemAreaPhysIsContig(struct LinuxMemArea *psLinuxMemArea)
++{
++ switch (psLinuxMemArea->eAreaType) {
++ case LINUX_MEM_AREA_IOREMAP:
++ case LINUX_MEM_AREA_IO:
++ return IMG_TRUE;
++
++ case LINUX_MEM_AREA_EXTERNAL_KV:
++ return psLinuxMemArea->uData.sExternalKV.bPhysContig;
++
++ case LINUX_MEM_AREA_VMALLOC:
++ case LINUX_MEM_AREA_ALLOC_PAGES:
++ return IMG_FALSE;
++
++ case LINUX_MEM_AREA_SUB_ALLOC:
++ return LinuxMemAreaPhysIsContig(psLinuxMemArea->uData.sSubAlloc.
++ psParentLinuxMemArea);
++
++ default:
++ PVR_DPF(PVR_DBG_ERROR,
++ "%s: Unknown struct LinuxMemArea type (%d)\n",
++ __func__, psLinuxMemArea->eAreaType);
++ break;
++ }
++ return IMG_FALSE;
++}
++
++const char *LinuxMemAreaTypeToString(enum LINUX_MEM_AREA_TYPE eMemAreaType)
++{
++ switch (eMemAreaType) {
++ case LINUX_MEM_AREA_IOREMAP:
++ return "LINUX_MEM_AREA_IOREMAP";
++ case LINUX_MEM_AREA_EXTERNAL_KV:
++ return "LINUX_MEM_AREA_EXTERNAL_KV";
++ case LINUX_MEM_AREA_IO:
++ return "LINUX_MEM_AREA_IO";
++ case LINUX_MEM_AREA_VMALLOC:
++ return "LINUX_MEM_AREA_VMALLOC";
++ case LINUX_MEM_AREA_SUB_ALLOC:
++ return "LINUX_MEM_AREA_SUB_ALLOC";
++ case LINUX_MEM_AREA_ALLOC_PAGES:
++ return "LINUX_MEM_AREA_ALLOC_PAGES";
++ default:
++ PVR_ASSERT(0);
++ }
++
++ return "";
++}
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++static off_t printLinuxMemAreaRecords(char *buffer, size_t count, off_t off)
++{
++ struct DEBUG_LINUX_MEM_AREA_REC *psRecord;
++ off_t Ret;
++
++ mutex_lock(&g_sDebugMutex);
++
++ if (!off) {
++ if (count < 500) {
++ Ret = 0;
++ goto unlock_and_return;
++ }
++ Ret = printAppend(buffer, count, 0,
++ "Number of Linux Memory Areas: %u\n"
++ "At the current water mark these areas "
++ "correspond to %u bytes "
++ "(excluding SUB areas)\n"
++ "At the highest water mark these areas "
++ "corresponded to %u bytes "
++ "(excluding SUB areas)\n"
++ "\nDetails for all Linux Memory Areas:\n"
++ "%s %-24s %s %s %-8s %-5s %s\n",
++ g_LinuxMemAreaCount,
++ g_LinuxMemAreaWaterMark,
++ g_LinuxMemAreaHighWaterMark,
++ "psLinuxMemArea",
++ "LinuxMemType",
++ "CpuVAddr",
++ "CpuPAddr", "Bytes", "Pid", "Flags");
++ goto unlock_and_return;
++ }
++
++ for (psRecord = g_LinuxMemAreaRecords; --off && psRecord;
++ psRecord = psRecord->psNext)
++ ;
++ if (!psRecord) {
++ Ret = END_OF_FILE;
++ goto unlock_and_return;
++ }
++
++ if (count < 500) {
++ Ret = 0;
++ goto unlock_and_return;
++ }
++
++ Ret = printAppend(buffer, count, 0,
++ "%8p %-24s %8p %08x %-8d %-5u %08x=(%s)\n",
++ psRecord->psLinuxMemArea,
++ LinuxMemAreaTypeToString(psRecord->psLinuxMemArea->
++ eAreaType),
++ LinuxMemAreaToCpuVAddr(psRecord->psLinuxMemArea),
++ LinuxMemAreaToCpuPAddr(psRecord->psLinuxMemArea,
++ 0).uiAddr,
++ psRecord->psLinuxMemArea->ui32ByteSize, psRecord->pid,
++ psRecord->ui32Flags,
++ HAPFlagsToString(psRecord->ui32Flags)
++ );
++
++unlock_and_return:
++ mutex_unlock(&g_sDebugMutex);
++ return Ret;
++}
++#endif
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++static off_t printMemoryRecords(char *buffer, size_t count, off_t off)
++{
++ struct DEBUG_MEM_ALLOC_REC *psRecord;
++ off_t Ret;
++
++ mutex_lock(&g_sDebugMutex);
++
++ if (!off) {
++ if (count < 1000) {
++ Ret = 0;
++ goto unlock_and_return;
++ }
++
++ Ret = printAppend(buffer, count, 0, "%-60s: %d bytes\n",
++ "Current Water Mark of bytes allocated via kmalloc",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMALLOC]);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %d bytes\n",
++ "Highest Water Mark of bytes allocated via kmalloc",
++ g_HighWaterMarkData
++ [DEBUG_MEM_ALLOC_TYPE_KMALLOC]);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %d bytes\n",
++ "Current Water Mark of bytes allocated via vmalloc",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %d bytes\n",
++ "Highest Water Mark of bytes allocated via vmalloc",
++ g_HighWaterMarkData
++ [DEBUG_MEM_ALLOC_TYPE_VMALLOC]);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %d bytes\n",
++ "Current Water Mark of bytes allocated via alloc_pages",
++ g_WaterMarkData
++ [DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %d bytes\n",
++ "Highest Water Mark of bytes allocated via alloc_pages",
++ g_HighWaterMarkData
++ [DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %d bytes\n",
++ "Current Water Mark of bytes allocated via ioremap",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %d bytes\n",
++ "Highest Water Mark of bytes allocated via ioremap",
++ g_HighWaterMarkData
++ [DEBUG_MEM_ALLOC_TYPE_IOREMAP]);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %d bytes\n",
++ "Current Water Mark of bytes reserved for "
++ "\"IO\" memory areas",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %d bytes\n",
++ "Highest Water Mark of bytes allocated for "
++ "\"IO\" memory areas",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %d bytes\n",
++ "Current Water Mark of bytes allocated via "
++ "kmem_cache_alloc",
++ g_WaterMarkData
++ [DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %d bytes\n",
++ "Highest Water Mark of bytes allocated via "
++ "kmem_cache_alloc",
++ g_HighWaterMarkData
++ [DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]);
++ Ret = printAppend(buffer, count, Ret, "\n");
++
++ Ret = printAppend(buffer, count, Ret, "%-60s: %d bytes\n",
++ "The Current Water Mark for memory allocated from system RAM",
++ g_SysRAMWaterMark);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %d bytes\n",
++ "The Highest Water Mark for memory allocated from system RAM",
++ g_SysRAMHighWaterMark);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %d bytes\n",
++ "The Current Water Mark for memory allocated from IO memory",
++ g_IOMemWaterMark);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %d bytes\n",
++ "The Highest Water Mark for memory allocated from IO memory",
++ g_IOMemHighWaterMark);
++
++ Ret = printAppend(buffer, count, Ret, "\n");
++
++ Ret = printAppend(buffer, count, Ret,
++ "Details for all known allocations:\n"
++ "%-16s %-8s %-8s %-10s %-5s %-10s %s\n", "Type",
++ "CpuVAddr", "CpuPAddr", "Bytes", "PID",
++ "PrivateData", "Filename:Line");
++
++
++ goto unlock_and_return;
++ }
++
++ if (count < 1000) {
++ Ret = 0;
++ goto unlock_and_return;
++ }
++
++ for (psRecord = g_MemoryRecords; --off && psRecord;
++ psRecord = psRecord->psNext)
++ ;
++ if (!psRecord) {
++ Ret = END_OF_FILE;
++ goto unlock_and_return;
++ }
++
++ if (psRecord->eAllocType != DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE)
++ Ret = printAppend(buffer, count, 0,
++ "%-16s %-8p %08x %-10d %-5d %-10s %s:%d\n",
++ DebugMemAllocRecordTypeToString(psRecord->eAllocType),
++ psRecord->pvCpuVAddr, psRecord->ulCpuPAddr,
++ psRecord->ui32Bytes, psRecord->pid, "NULL",
++ psRecord->pszFileName, psRecord->ui32Line);
++ else
++ Ret = printAppend(buffer, count, 0,
++ "%-16s %-8p %08x %-10d %-5d %-10s %s:%d\n",
++ DebugMemAllocRecordTypeToString(psRecord->eAllocType),
++ psRecord->pvCpuVAddr, psRecord->ulCpuPAddr,
++ psRecord->ui32Bytes, psRecord->pid,
++ KMemCacheNameWrapper(psRecord->pvPrivateData),
++ psRecord->pszFileName, psRecord->ui32Line);
++
++unlock_and_return:
++ mutex_unlock(&g_sDebugMutex);
++ return Ret;
++}
++#endif
++
++#if defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MMAP_AREAS)
++const char *HAPFlagsToString(u32 ui32Flags)
++{
++ static char szFlags[50];
++ s32 i32Pos = 0;
++ u32 ui32CacheTypeIndex, ui32MapTypeIndex;
++ char *apszCacheTypes[] = {
++ "UNCACHED",
++ "CACHED",
++ "WRITECOMBINE",
++ "UNKNOWN"
++ };
++ char *apszMapType[] = {
++ "KERNEL_ONLY",
++ "SINGLE_PROCESS",
++ "MULTI_PROCESS",
++ "FROM_EXISTING_PROCESS",
++ "NO_CPU_VIRTUAL",
++ "UNKNOWN"
++ };
++
++ if (ui32Flags & PVRSRV_HAP_UNCACHED) {
++ ui32CacheTypeIndex = 0;
++ } else if (ui32Flags & PVRSRV_HAP_CACHED) {
++ ui32CacheTypeIndex = 1;
++ } else if (ui32Flags & PVRSRV_HAP_WRITECOMBINE) {
++ ui32CacheTypeIndex = 2;
++ } else {
++ ui32CacheTypeIndex = 3;
++ PVR_DPF(PVR_DBG_ERROR, "%s: unknown cache type (%u)",
++ __func__, (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK));
++ }
++
++ if (ui32Flags & PVRSRV_HAP_KERNEL_ONLY) {
++ ui32MapTypeIndex = 0;
++ } else if (ui32Flags & PVRSRV_HAP_SINGLE_PROCESS) {
++ ui32MapTypeIndex = 1;
++ } else if (ui32Flags & PVRSRV_HAP_MULTI_PROCESS) {
++ ui32MapTypeIndex = 2;
++ } else if (ui32Flags & PVRSRV_HAP_FROM_EXISTING_PROCESS) {
++ ui32MapTypeIndex = 3;
++ } else if (ui32Flags & PVRSRV_HAP_NO_CPU_VIRTUAL) {
++ ui32MapTypeIndex = 4;
++ } else {
++ ui32MapTypeIndex = 5;
++ PVR_DPF(PVR_DBG_ERROR, "%s: unknown map type (%u)",
++ __func__, (ui32Flags & PVRSRV_HAP_MAPTYPE_MASK));
++ }
++
++ i32Pos = sprintf(szFlags, "%s|", apszCacheTypes[ui32CacheTypeIndex]);
++ if (i32Pos <= 0) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "%s: sprintf for cache type %u failed (%d)", __func__,
++ ui32CacheTypeIndex, i32Pos);
++ szFlags[0] = 0;
++ } else {
++ sprintf(szFlags + i32Pos, "%s", apszMapType[ui32MapTypeIndex]);
++ }
++
++ return szFlags;
++}
++#endif
+diff --git a/drivers/gpu/pvr/mm.h b/drivers/gpu/pvr/mm.h
+new file mode 100644
+index 0000000..9484363
+--- /dev/null
++++ b/drivers/gpu/pvr/mm.h
+@@ -0,0 +1,267 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __IMG_LINUX_MM_H__
++#define __IMG_LINUX_MM_H__
++
++#include <linux/version.h>
++#include <linux/slab.h>
++#include <linux/mm.h>
++#include <linux/list.h>
++
++#include <linux/io.h>
++
++#define PHYS_TO_PFN(phys) ((phys) >> PAGE_SHIFT)
++#define PFN_TO_PHYS(pfn) ((pfn) << PAGE_SHIFT)
++
++#define RANGE_TO_PAGES(range) \
++ (((range) + (PAGE_SIZE - 1)) >> PAGE_SHIFT)
++
++#define ADDR_TO_PAGE_OFFSET(addr) (((unsigned long)(addr)) & (PAGE_SIZE - 1))
++
++#define REMAP_PFN_RANGE(vma, addr, pfn, size, prot) \
++ remap_pfn_range(vma, addr, pfn, size, prot)
++
++#define IO_REMAP_PFN_RANGE(vma, addr, pfn, size, prot) \
++ io_remap_pfn_range(vma, addr, pfn, size, prot)
++
++#define VM_INSERT_PAGE(vma, addr, page) vm_insert_page(vma, addr, page)
++
++static inline u32 VMallocToPhys(void *pCpuVAddr)
++{
++ return page_to_phys(vmalloc_to_page(pCpuVAddr)) +
++ ADDR_TO_PAGE_OFFSET(pCpuVAddr);
++
++}
++
++enum LINUX_MEM_AREA_TYPE {
++ LINUX_MEM_AREA_IOREMAP,
++ LINUX_MEM_AREA_EXTERNAL_KV,
++ LINUX_MEM_AREA_IO,
++ LINUX_MEM_AREA_VMALLOC,
++ LINUX_MEM_AREA_ALLOC_PAGES,
++ LINUX_MEM_AREA_SUB_ALLOC,
++ LINUX_MEM_AREA_TYPE_COUNT
++};
++
++struct LinuxMemArea;
++
++struct LinuxMemArea {
++ enum LINUX_MEM_AREA_TYPE eAreaType;
++ union _uData {
++ struct _sIORemap {
++ struct IMG_CPU_PHYADDR CPUPhysAddr;
++ void __iomem *pvIORemapCookie;
++ } sIORemap;
++ struct _sExternalKV {
++ IMG_BOOL bPhysContig;
++ union {
++ struct IMG_SYS_PHYADDR SysPhysAddr;
++ struct IMG_SYS_PHYADDR *pSysPhysAddr;
++ } uPhysAddr;
++ void *pvExternalKV;
++ } sExternalKV;
++ struct _sIO {
++ struct IMG_CPU_PHYADDR CPUPhysAddr;
++ } sIO;
++ struct _sVmalloc {
++ void *pvVmallocAddress;
++ } sVmalloc;
++ struct _sPageList {
++ struct page **pvPageList;
++ void *hBlockPageList;
++ } sPageList;
++ struct _sSubAlloc {
++ struct LinuxMemArea *psParentLinuxMemArea;
++ u32 ui32ByteOffset;
++ } sSubAlloc;
++ } uData;
++ u32 ui32ByteSize;
++ u32 ui32AreaFlags;
++ IMG_BOOL bMMapRegistered;
++ struct list_head sMMapItem;
++ struct list_head sMMapOffsetStructList;
++};
++
++struct kmem_cache;
++
++enum PVRSRV_ERROR LinuxMMInit(void);
++
++void LinuxMMCleanup(void);
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++#define KMallocWrapper(ui32ByteSize) \
++ _KMallocWrapper(ui32ByteSize, __FILE__, __LINE__)
++#else
++#define KMallocWrapper(ui32ByteSize) \
++ _KMallocWrapper(ui32ByteSize, NULL, 0)
++#endif
++void *_KMallocWrapper(u32 ui32ByteSize, char *szFileName, u32 ui32Line);
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++#define KFreeWrapper(pvCpuVAddr) _KFreeWrapper(pvCpuVAddr, __FILE__, __LINE__)
++#else
++#define KFreeWrapper(pvCpuVAddr) _KFreeWrapper(pvCpuVAddr, NULL, 0)
++#endif
++void _KFreeWrapper(void *pvCpuVAddr, char *pszFileName, u32 ui32Line);
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++#define VMallocWrapper(ui32Bytes, ui32AllocFlags) \
++ _VMallocWrapper(ui32Bytes, ui32AllocFlags, __FILE__, __LINE__)
++#else
++#define VMallocWrapper(ui32Bytes, ui32AllocFlags) \
++ _VMallocWrapper(ui32Bytes, ui32AllocFlags, NULL, 0)
++#endif
++void *_VMallocWrapper(u32 ui32Bytes, u32 ui32AllocFlags, char *pszFileName,
++ u32 ui32Line);
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++#define VFreeWrapper(pvCpuVAddr) _VFreeWrapper(pvCpuVAddr, __FILE__, __LINE__)
++#else
++#define VFreeWrapper(pvCpuVAddr) _VFreeWrapper(pvCpuVAddr, NULL, 0)
++#endif
++void _VFreeWrapper(void *pvCpuVAddr, char *pszFileName, u32 ui32Line);
++
++struct LinuxMemArea *NewVMallocLinuxMemArea(u32 ui32Bytes, u32 ui32AreaFlags);
++
++void FreeVMallocLinuxMemArea(struct LinuxMemArea *psLinuxMemArea);
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++#define IORemapWrapper(BasePAddr, ui32Bytes, ui32MappingFlags) \
++ _IORemapWrapper(BasePAddr, ui32Bytes, ui32MappingFlags, __FILE__, __LINE__)
++#else
++#define IORemapWrapper(BasePAddr, ui32Bytes, ui32MappingFlags) \
++ _IORemapWrapper(BasePAddr, ui32Bytes, ui32MappingFlags, NULL, 0)
++#endif
++void __iomem *_IORemapWrapper(struct IMG_CPU_PHYADDR BasePAddr, u32 ui32Bytes,
++ u32 ui32MappingFlags, char *pszFileName,
++ u32 ui32Line);
++
++struct LinuxMemArea *NewIORemapLinuxMemArea(struct IMG_CPU_PHYADDR BasePAddr,
++ u32 ui32Bytes, u32 ui32AreaFlags);
++
++void FreeIORemapLinuxMemArea(struct LinuxMemArea *psLinuxMemArea);
++
++struct LinuxMemArea *NewExternalKVLinuxMemArea(
++ struct IMG_SYS_PHYADDR *pBasePAddr, void *pvCPUVAddr,
++ u32 ui32Bytes, IMG_BOOL bPhysContig, u32 ui32AreaFlags);
++
++void FreeExternalKVLinuxMemArea(struct LinuxMemArea *psLinuxMemArea);
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++#define IOUnmapWrapper(pvIORemapCookie) \
++ _IOUnmapWrapper(pvIORemapCookie, __FILE__, __LINE__)
++#else
++#define IOUnmapWrapper(pvIORemapCookie) \
++ _IOUnmapWrapper(pvIORemapCookie, NULL, 0)
++#endif
++void _IOUnmapWrapper(void __iomem *pvIORemapCookie, char *pszFileName,
++ u32 ui32Line);
++
++struct page *LinuxMemAreaOffsetToPage(struct LinuxMemArea *psLinuxMemArea,
++ u32 ui32ByteOffset);
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++#define KMemCacheAllocWrapper(psCache, Flags) \
++ _KMemCacheAllocWrapper(psCache, Flags, __FILE__, __LINE__)
++#else
++#define KMemCacheAllocWrapper(psCache, Flags) \
++ _KMemCacheAllocWrapper(psCache, Flags, NULL, 0)
++#endif
++
++void *_KMemCacheAllocWrapper(struct kmem_cache *psCache, gfp_t Flags,
++ char *pszFileName, u32 ui32Line);
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++#define KMemCacheFreeWrapper(psCache, pvObject) \
++ _KMemCacheFreeWrapper(psCache, pvObject, __FILE__, __LINE__)
++#else
++#define KMemCacheFreeWrapper(psCache, pvObject) \
++ _KMemCacheFreeWrapper(psCache, pvObject, NULL, 0)
++#endif
++void _KMemCacheFreeWrapper(struct kmem_cache *psCache, void *pvObject,
++ char *pszFileName, u32 ui32Line);
++
++const char *KMemCacheNameWrapper(struct kmem_cache *psCache);
++
++struct LinuxMemArea *NewIOLinuxMemArea(struct IMG_CPU_PHYADDR BasePAddr,
++ u32 ui32Bytes, u32 ui32AreaFlags);
++
++void FreeIOLinuxMemArea(struct LinuxMemArea *psLinuxMemArea);
++
++struct LinuxMemArea *NewAllocPagesLinuxMemArea(u32 ui32Bytes,
++ u32 ui32AreaFlags);
++
++void FreeAllocPagesLinuxMemArea(struct LinuxMemArea *psLinuxMemArea);
++
++struct LinuxMemArea *NewSubLinuxMemArea(
++ struct LinuxMemArea *psParentLinuxMemArea,
++ u32 ui32ByteOffset, u32 ui32Bytes);
++
++void LinuxMemAreaDeepFree(struct LinuxMemArea *psLinuxMemArea);
++
++#if defined(LINUX_MEM_AREAS_DEBUG)
++void LinuxMemAreaRegister(struct LinuxMemArea *psLinuxMemArea);
++#else
++#define LinuxMemAreaRegister(X)
++#endif
++
++void *LinuxMemAreaToCpuVAddr(struct LinuxMemArea *psLinuxMemArea);
++
++struct IMG_CPU_PHYADDR LinuxMemAreaToCpuPAddr(
++ struct LinuxMemArea *psLinuxMemArea,
++ u32 ui32ByteOffset);
++
++#define LinuxMemAreaToCpuPFN(psLinuxMemArea, ui32ByteOffset) \
++ PHYS_TO_PFN(LinuxMemAreaToCpuPAddr(psLinuxMemArea, \
++ ui32ByteOffset).uiAddr)
++
++void inv_cache_mem_area(const struct LinuxMemArea *mem_area);
++
++IMG_BOOL LinuxMemAreaPhysIsContig(struct LinuxMemArea *psLinuxMemArea);
++
++static inline struct LinuxMemArea *LinuxMemAreaRoot(struct LinuxMemArea
++ *psLinuxMemArea)
++{
++ if (psLinuxMemArea->eAreaType == LINUX_MEM_AREA_SUB_ALLOC)
++ return psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea;
++ else
++ return psLinuxMemArea;
++}
++
++static inline enum LINUX_MEM_AREA_TYPE LinuxMemAreaRootType(struct LinuxMemArea
++ *psLinuxMemArea)
++{
++ return LinuxMemAreaRoot(psLinuxMemArea)->eAreaType;
++}
++
++const char *LinuxMemAreaTypeToString(enum LINUX_MEM_AREA_TYPE eMemAreaType);
++
++#if defined(DEBUG) || defined(DEBUG_LINUX_MEM_AREAS)
++const char *HAPFlagsToString(u32 ui32Flags);
++#endif
++
++#endif
+diff --git a/drivers/gpu/pvr/mmap.c b/drivers/gpu/pvr/mmap.c
+new file mode 100644
+index 0000000..7b3b784
+--- /dev/null
++++ b/drivers/gpu/pvr/mmap.c
+@@ -0,0 +1,922 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <linux/kernel.h>
++#include <linux/version.h>
++#include <linux/mm.h>
++#include <linux/module.h>
++#include <linux/vmalloc.h>
++#include <linux/slab.h>
++#include <linux/io.h>
++#include <asm/page.h>
++#include <asm/shmparam.h>
++#include <asm/pgtable.h>
++#include <linux/sched.h>
++#include <asm/current.h>
++#include "img_defs.h"
++#include "services.h"
++#include "servicesint.h"
++#include "pvrmmap.h"
++#include "mutils.h"
++#include "mmap.h"
++#include "mm.h"
++#include "pvr_debug.h"
++#include "osfunc.h"
++#include "proc.h"
++#include "mutex.h"
++#include "handle.h"
++#include "perproc.h"
++#include "env_perproc.h"
++#include "bridged_support.h"
++
++static struct mutex g_sMMapMutex;
++
++static struct kmem_cache *g_psMemmapCache;
++static LIST_HEAD(g_sMMapAreaList);
++static LIST_HEAD(g_sMMapOffsetStructList);
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++static u32 g_ui32RegisteredAreas;
++static u32 g_ui32TotalByteSize;
++#endif
++
++#define FIRST_PHYSICAL_PFN 0
++#define LAST_PHYSICAL_PFN 0x7ffffffful
++#define FIRST_SPECIAL_PFN (LAST_PHYSICAL_PFN + 1)
++#define LAST_SPECIAL_PFN 0xfffffffful
++
++#define MAX_MMAP_HANDLE 0x7ffffffful
++
++static inline IMG_BOOL PFNIsPhysical(u32 pfn)
++{
++ return pfn >= FIRST_PHYSICAL_PFN && pfn <= LAST_PHYSICAL_PFN;
++}
++
++static inline IMG_BOOL PFNIsSpecial(u32 pfn)
++{
++ return pfn >= FIRST_SPECIAL_PFN && pfn <= LAST_SPECIAL_PFN;
++}
++
++static inline void *MMapOffsetToHandle(u32 pfn)
++{
++ if (PFNIsPhysical(pfn)) {
++ PVR_ASSERT(PFNIsPhysical(pfn));
++ return NULL;
++ }
++
++ return (void *)(pfn - FIRST_SPECIAL_PFN);
++}
++
++static inline u32 HandleToMMapOffset(void *hHandle)
++{
++ u32 ulHandle = (u32) hHandle;
++
++ if (PFNIsSpecial(ulHandle)) {
++ PVR_ASSERT(PFNIsSpecial(ulHandle));
++ return 0;
++ }
++
++ return ulHandle + FIRST_SPECIAL_PFN;
++ }
++
++static inline IMG_BOOL LinuxMemAreaUsesPhysicalMap(
++ struct LinuxMemArea *psLinuxMemArea)
++{
++ return LinuxMemAreaPhysIsContig(psLinuxMemArea);
++}
++
++static inline u32 GetCurrentThreadID(void)
++{
++
++ return (u32) current->pid;
++}
++
++static struct KV_OFFSET_STRUCT *CreateOffsetStruct(struct LinuxMemArea
++ *psLinuxMemArea,
++ u32 ui32Offset,
++ u32 ui32RealByteSize)
++{
++ struct KV_OFFSET_STRUCT *psOffsetStruct;
++#if defined(DEBUG) || defined(DEBUG_LINUX_MMAP_AREAS)
++ const char *pszName =
++ LinuxMemAreaTypeToString(LinuxMemAreaRootType(psLinuxMemArea));
++#endif
++
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "%s(%s, psLinuxMemArea: 0x%p, ui32AllocFlags: 0x%8lx)",
++ __func__, pszName, psLinuxMemArea,
++ psLinuxMemArea->ui32AreaFlags);
++
++ PVR_ASSERT(psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC
++ || LinuxMemAreaRoot(psLinuxMemArea)->eAreaType !=
++ LINUX_MEM_AREA_SUB_ALLOC);
++
++ PVR_ASSERT(psLinuxMemArea->bMMapRegistered);
++
++ psOffsetStruct = KMemCacheAllocWrapper(g_psMemmapCache, GFP_KERNEL);
++ if (psOffsetStruct == NULL) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRMMapRegisterArea: "
++ "Couldn't alloc another mapping record from cache");
++ return NULL;
++ }
++
++ psOffsetStruct->ui32MMapOffset = ui32Offset;
++ psOffsetStruct->psLinuxMemArea = psLinuxMemArea;
++ psOffsetStruct->ui32Mapped = 0;
++ psOffsetStruct->ui32RealByteSize = ui32RealByteSize;
++ psOffsetStruct->ui32TID = GetCurrentThreadID();
++ psOffsetStruct->ui32PID = OSGetCurrentProcessIDKM();
++ psOffsetStruct->bOnMMapList = IMG_FALSE;
++ psOffsetStruct->ui32RefCount = 0;
++ psOffsetStruct->ui32UserVAddr = 0;
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++
++ psOffsetStruct->pszName = pszName;
++#endif
++
++ list_add_tail(&psOffsetStruct->sAreaItem,
++ &psLinuxMemArea->sMMapOffsetStructList);
++
++ return psOffsetStruct;
++}
++
++static void DestroyOffsetStruct(struct KV_OFFSET_STRUCT *psOffsetStruct)
++{
++ list_del(&psOffsetStruct->sAreaItem);
++
++ if (psOffsetStruct->bOnMMapList)
++ list_del(&psOffsetStruct->sMMapItem);
++
++ PVR_DPF(PVR_DBG_MESSAGE, "%s: Table entry: "
++ "psLinuxMemArea=0x%08lX, CpuPAddr=0x%08lX", __func__,
++ psOffsetStruct->psLinuxMemArea,
++ LinuxMemAreaToCpuPAddr(psOffsetStruct->psLinuxMemArea, 0));
++
++ KMemCacheFreeWrapper(g_psMemmapCache, psOffsetStruct);
++}
++
++static inline void DetermineUsersSizeAndByteOffset(struct LinuxMemArea
++ *psLinuxMemArea,
++ u32 *pui32RealByteSize,
++ u32 *pui32ByteOffset)
++{
++ u32 ui32PageAlignmentOffset;
++ struct IMG_CPU_PHYADDR CpuPAddr;
++
++ CpuPAddr = LinuxMemAreaToCpuPAddr(psLinuxMemArea, 0);
++ ui32PageAlignmentOffset = ADDR_TO_PAGE_OFFSET(CpuPAddr.uiAddr);
++
++ *pui32ByteOffset = ui32PageAlignmentOffset;
++
++ *pui32RealByteSize =
++ PAGE_ALIGN(psLinuxMemArea->ui32ByteSize + ui32PageAlignmentOffset);
++}
++
++enum PVRSRV_ERROR PVRMMapOSMemHandleToMMapData(
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc,
++ void *hMHandle, u32 *pui32MMapOffset,
++ u32 *pui32ByteOffset, u32 *pui32RealByteSize,
++ u32 *pui32UserVAddr)
++{
++ struct LinuxMemArea *psLinuxMemArea;
++ struct KV_OFFSET_STRUCT *psOffsetStruct;
++ void *hOSMemHandle;
++ enum PVRSRV_ERROR eError = PVRSRV_ERROR_GENERIC;
++
++ mutex_lock(&g_sMMapMutex);
++
++ PVR_ASSERT(PVRSRVGetMaxHandle(psPerProc->psHandleBase) <=
++ MAX_MMAP_HANDLE);
++
++ eError =
++ PVRSRVLookupOSMemHandle(psPerProc->psHandleBase, &hOSMemHandle,
++ hMHandle);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "%s: Lookup of handle 0x%lx failed",
++ __func__, hMHandle);
++
++ goto exit_unlock;
++ }
++
++ psLinuxMemArea = (struct LinuxMemArea *)hOSMemHandle;
++
++ DetermineUsersSizeAndByteOffset(psLinuxMemArea,
++ pui32RealByteSize, pui32ByteOffset);
++
++ list_for_each_entry(psOffsetStruct,
++ &psLinuxMemArea->sMMapOffsetStructList, sAreaItem) {
++ if (psPerProc->ui32PID == psOffsetStruct->ui32PID) {
++ PVR_ASSERT(*pui32RealByteSize ==
++ psOffsetStruct->ui32RealByteSize);
++
++ *pui32MMapOffset = psOffsetStruct->ui32MMapOffset;
++ *pui32UserVAddr = psOffsetStruct->ui32UserVAddr;
++ psOffsetStruct->ui32RefCount++;
++
++ eError = PVRSRV_OK;
++ goto exit_unlock;
++ }
++ }
++
++ *pui32UserVAddr = 0;
++
++ if (LinuxMemAreaUsesPhysicalMap(psLinuxMemArea)) {
++ *pui32MMapOffset = LinuxMemAreaToCpuPFN(psLinuxMemArea, 0);
++ PVR_ASSERT(PFNIsPhysical(*pui32MMapOffset));
++ } else {
++ *pui32MMapOffset = HandleToMMapOffset(hMHandle);
++ PVR_ASSERT(PFNIsSpecial(*pui32MMapOffset));
++ }
++
++ psOffsetStruct = CreateOffsetStruct(psLinuxMemArea, *pui32MMapOffset,
++ *pui32RealByteSize);
++ if (psOffsetStruct == NULL) {
++ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto exit_unlock;
++ }
++
++ list_add_tail(&psOffsetStruct->sMMapItem, &g_sMMapOffsetStructList);
++ psOffsetStruct->bOnMMapList = IMG_TRUE;
++ psOffsetStruct->ui32RefCount++;
++ eError = PVRSRV_OK;
++
++exit_unlock:
++ mutex_unlock(&g_sMMapMutex);
++
++ return eError;
++}
++
++enum PVRSRV_ERROR PVRMMapReleaseMMapData(
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc,
++ void *hMHandle, IMG_BOOL *pbMUnmap,
++ u32 *pui32RealByteSize, u32 *pui32UserVAddr)
++{
++ struct LinuxMemArea *psLinuxMemArea;
++ struct KV_OFFSET_STRUCT *psOffsetStruct;
++ void *hOSMemHandle;
++ enum PVRSRV_ERROR eError = PVRSRV_ERROR_GENERIC;
++ u32 ui32PID = OSGetCurrentProcessIDKM();
++
++ mutex_lock(&g_sMMapMutex);
++
++ PVR_ASSERT(PVRSRVGetMaxHandle(psPerProc->psHandleBase) <=
++ MAX_MMAP_HANDLE);
++
++ eError = PVRSRVLookupOSMemHandle(psPerProc->psHandleBase, &hOSMemHandle,
++ hMHandle);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "%s: Lookup of handle 0x%lx failed",
++ __func__, hMHandle);
++
++ goto exit_unlock;
++ }
++
++ psLinuxMemArea = (struct LinuxMemArea *)hOSMemHandle;
++
++ list_for_each_entry(psOffsetStruct,
++ &psLinuxMemArea->sMMapOffsetStructList, sAreaItem) {
++ if (psOffsetStruct->ui32PID == ui32PID) {
++ if (psOffsetStruct->ui32RefCount == 0) {
++ PVR_DPF(PVR_DBG_ERROR, "%s: Attempt to "
++ "release mmap data with zero reference "
++ "count for offset struct 0x%p, "
++ "memory area 0x%p",
++ __func__, psOffsetStruct,
++ psLinuxMemArea);
++ eError = PVRSRV_ERROR_GENERIC;
++ goto exit_unlock;
++ }
++
++ psOffsetStruct->ui32RefCount--;
++
++ *pbMUnmap = (psOffsetStruct->ui32RefCount == 0)
++ && (psOffsetStruct->ui32UserVAddr != 0);
++
++ *pui32UserVAddr =
++ (*pbMUnmap) ? psOffsetStruct->ui32UserVAddr : 0;
++ *pui32RealByteSize =
++ (*pbMUnmap) ? psOffsetStruct->ui32RealByteSize : 0;
++
++ eError = PVRSRV_OK;
++ goto exit_unlock;
++ }
++ }
++
++ PVR_DPF(PVR_DBG_ERROR, "%s: Mapping data not found for handle "
++ "0x%lx (memory area 0x%p)",
++ __func__, hMHandle, psLinuxMemArea);
++
++ eError = PVRSRV_ERROR_GENERIC;
++
++exit_unlock:
++ mutex_unlock(&g_sMMapMutex);
++
++ return eError;
++}
++
++static inline struct KV_OFFSET_STRUCT *FindOffsetStructByOffset(u32 ui32Offset,
++ u32 ui32RealByteSize)
++{
++ struct KV_OFFSET_STRUCT *psOffsetStruct;
++ u32 ui32TID = GetCurrentThreadID();
++ u32 ui32PID = OSGetCurrentProcessIDKM();
++
++ list_for_each_entry(psOffsetStruct, &g_sMMapOffsetStructList,
++ sMMapItem) {
++ if (ui32Offset == psOffsetStruct->ui32MMapOffset &&
++ ui32RealByteSize == psOffsetStruct->ui32RealByteSize &&
++ psOffsetStruct->ui32PID == ui32PID)
++ if (!PFNIsPhysical(ui32Offset) ||
++ psOffsetStruct->ui32TID == ui32TID)
++ return psOffsetStruct;
++ }
++
++ return NULL;
++}
++
++static IMG_BOOL DoMapToUser(struct LinuxMemArea *psLinuxMemArea,
++ struct vm_area_struct *ps_vma, u32 ui32ByteOffset)
++{
++ u32 ui32ByteSize;
++
++ if (psLinuxMemArea->eAreaType == LINUX_MEM_AREA_SUB_ALLOC)
++ return DoMapToUser(LinuxMemAreaRoot(psLinuxMemArea), ps_vma,
++ psLinuxMemArea->uData.sSubAlloc.
++ ui32ByteOffset + ui32ByteOffset);
++
++ ui32ByteSize = ps_vma->vm_end - ps_vma->vm_start;
++ PVR_ASSERT(ADDR_TO_PAGE_OFFSET(ui32ByteSize) == 0);
++
++ if (PFNIsPhysical(ps_vma->vm_pgoff)) {
++ int result;
++
++ PVR_ASSERT(LinuxMemAreaPhysIsContig(psLinuxMemArea));
++ PVR_ASSERT(LinuxMemAreaToCpuPFN(psLinuxMemArea, ui32ByteOffset)
++ == ps_vma->vm_pgoff);
++
++ result =
++ IO_REMAP_PFN_RANGE(ps_vma, ps_vma->vm_start,
++ ps_vma->vm_pgoff, ui32ByteSize,
++ ps_vma->vm_page_prot);
++
++ if (result == 0)
++ return IMG_TRUE;
++
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "%s: Failed to map contiguous physical address "
++ "range (%d), trying non-contiguous path",
++ __func__, result);
++ }
++
++ {
++ u32 ulVMAPos;
++ u32 ui32ByteEnd = ui32ByteOffset + ui32ByteSize;
++ u32 ui32PA;
++
++ for (ui32PA = ui32ByteOffset; ui32PA < ui32ByteEnd;
++ ui32PA += PAGE_SIZE) {
++ u32 pfn = LinuxMemAreaToCpuPFN(psLinuxMemArea, ui32PA);
++
++ if (!pfn_valid(pfn)) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "%s: Error - PFN invalid: 0x%lx",
++ __func__, pfn);
++ return IMG_FALSE;
++ }
++ }
++
++ ulVMAPos = ps_vma->vm_start;
++ for (ui32PA = ui32ByteOffset; ui32PA < ui32ByteEnd;
++ ui32PA += PAGE_SIZE) {
++ u32 pfn;
++ struct page *psPage;
++ int result;
++
++ pfn = LinuxMemAreaToCpuPFN(psLinuxMemArea, ui32PA);
++ PVR_ASSERT(pfn_valid(pfn));
++
++ psPage = pfn_to_page(pfn);
++
++ result = VM_INSERT_PAGE(ps_vma, ulVMAPos, psPage);
++ if (result != 0) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "%s: Error - VM_INSERT_PAGE failed (%d)",
++ __func__, result);
++ return IMG_FALSE;
++ }
++ ulVMAPos += PAGE_SIZE;
++ }
++ }
++
++ return IMG_TRUE;
++}
++
++static IMG_BOOL CheckSize(struct LinuxMemArea *psLinuxMemArea, u32 ui32ByteSize)
++{
++ struct IMG_CPU_PHYADDR CpuPAddr;
++ u32 ui32PageAlignmentOffset;
++ u32 ui32RealByteSize;
++ CpuPAddr = LinuxMemAreaToCpuPAddr(psLinuxMemArea, 0);
++ ui32PageAlignmentOffset = ADDR_TO_PAGE_OFFSET(CpuPAddr.uiAddr);
++ ui32RealByteSize =
++ PAGE_ALIGN(psLinuxMemArea->ui32ByteSize + ui32PageAlignmentOffset);
++ if (ui32RealByteSize < ui32ByteSize) {
++ PVR_DPF(PVR_DBG_ERROR, "Cannot mmap %ld bytes from: "
++ "%-8p %-8p %08lx %-8ld %-24s\n",
++ ui32ByteSize, psLinuxMemArea,
++ LinuxMemAreaToCpuVAddr(psLinuxMemArea),
++ LinuxMemAreaToCpuPAddr(psLinuxMemArea, 0).uiAddr,
++ psLinuxMemArea->ui32ByteSize,
++ LinuxMemAreaTypeToString(psLinuxMemArea->eAreaType));
++ return IMG_FALSE;
++ }
++ return IMG_TRUE;
++}
++
++static void MMapVOpenNoLock(struct vm_area_struct *ps_vma)
++{
++ struct KV_OFFSET_STRUCT *psOffsetStruct =
++ (struct KV_OFFSET_STRUCT *)ps_vma->vm_private_data;
++
++ PVR_ASSERT(psOffsetStruct != NULL);
++ psOffsetStruct->ui32Mapped++;
++ PVR_ASSERT(!psOffsetStruct->bOnMMapList);
++
++ if (psOffsetStruct->ui32Mapped > 1) {
++ PVR_DPF(PVR_DBG_WARNING,
++ "%s: Offset structure 0x%p is being shared "
++ "across processes (psOffsetStruct->ui32Mapped: %lu)",
++ __func__, psOffsetStruct, psOffsetStruct->ui32Mapped);
++ PVR_ASSERT((ps_vma->vm_flags & VM_DONTCOPY) == 0);
++ }
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "%s: psLinuxMemArea 0x%p, KVAddress 0x%p MMapOffset %ld, ui32Mapped %d",
++ __func__,
++ psOffsetStruct->psLinuxMemArea,
++ LinuxMemAreaToCpuVAddr(psOffsetStruct->psLinuxMemArea),
++ psOffsetStruct->ui32MMapOffset, psOffsetStruct->ui32Mapped);
++#endif
++
++}
++
++static void MMapVOpen(struct vm_area_struct *ps_vma)
++{
++ mutex_lock(&g_sMMapMutex);
++ MMapVOpenNoLock(ps_vma);
++ mutex_unlock(&g_sMMapMutex);
++}
++
++static void MMapVCloseNoLock(struct vm_area_struct *ps_vma)
++{
++ struct KV_OFFSET_STRUCT *psOffsetStruct =
++ (struct KV_OFFSET_STRUCT *)ps_vma->vm_private_data;
++
++ PVR_ASSERT(psOffsetStruct != NULL);
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++ PVR_DPF(PVR_DBG_MESSAGE, "%s: psLinuxMemArea "
++ "0x%p, CpuVAddr 0x%p ui32MMapOffset %ld, ui32Mapped %d",
++ __func__,
++ psOffsetStruct->psLinuxMemArea,
++ LinuxMemAreaToCpuVAddr(psOffsetStruct->psLinuxMemArea),
++ psOffsetStruct->ui32MMapOffset,
++ psOffsetStruct->ui32Mapped);
++#endif
++
++ PVR_ASSERT(!psOffsetStruct->bOnMMapList);
++ psOffsetStruct->ui32Mapped--;
++ if (psOffsetStruct->ui32Mapped == 0) {
++ if (psOffsetStruct->ui32RefCount != 0)
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "%s: psOffsetStruct 0x%p has non-zero "
++ "reference count (ui32RefCount = %lu). "
++ "User mode address of start of mapping: 0x%lx",
++ __func__, psOffsetStruct,
++ psOffsetStruct->ui32RefCount,
++ psOffsetStruct->ui32UserVAddr);
++
++ DestroyOffsetStruct(psOffsetStruct);
++ }
++ ps_vma->vm_private_data = NULL;
++}
++
++static void MMapVClose(struct vm_area_struct *ps_vma)
++{
++ mutex_lock(&g_sMMapMutex);
++ MMapVCloseNoLock(ps_vma);
++ mutex_unlock(&g_sMMapMutex);
++}
++
++static struct vm_operations_struct MMapIOOps = {
++ .open = MMapVOpen,
++ .close = MMapVClose
++};
++
++int PVRMMap(struct file *pFile, struct vm_area_struct *ps_vma)
++{
++ u32 ui32ByteSize;
++ struct KV_OFFSET_STRUCT *psOffsetStruct = NULL;
++ int iRetVal = 0;
++
++ PVR_UNREFERENCED_PARAMETER(pFile);
++
++ mutex_lock(&g_sMMapMutex);
++
++ ui32ByteSize = ps_vma->vm_end - ps_vma->vm_start;
++
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "%s: Received mmap(2) request with ui32MMapOffset 0x%08lx,"
++ " and ui32ByteSize %ld(0x%08lx)", __func__, ps_vma->vm_pgoff,
++ ui32ByteSize, ui32ByteSize);
++
++ if ((ps_vma->vm_flags & VM_WRITE) && !(ps_vma->vm_flags & VM_SHARED)) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "%s: Cannot mmap non-shareable writable areas",
++ __func__);
++ iRetVal = -EINVAL;
++ goto unlock_and_return;
++ }
++
++ psOffsetStruct =
++ FindOffsetStructByOffset(ps_vma->vm_pgoff, ui32ByteSize);
++ if (psOffsetStruct == NULL) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "%s: Attempted to mmap unregistered area at vm_pgoff %ld",
++ __func__, ps_vma->vm_pgoff);
++ iRetVal = -EINVAL;
++ goto unlock_and_return;
++ }
++ list_del(&psOffsetStruct->sMMapItem);
++ psOffsetStruct->bOnMMapList = IMG_FALSE;
++
++ PVR_DPF(PVR_DBG_MESSAGE, "%s: Mapped psLinuxMemArea 0x%p\n",
++ __func__, psOffsetStruct->psLinuxMemArea);
++
++ if (!CheckSize(psOffsetStruct->psLinuxMemArea, ui32ByteSize)) {
++ iRetVal = -EINVAL;
++ goto unlock_and_return;
++ }
++
++ ps_vma->vm_flags |= VM_RESERVED;
++ ps_vma->vm_flags |= VM_IO;
++
++ ps_vma->vm_flags |= VM_DONTEXPAND;
++
++ ps_vma->vm_flags |= VM_DONTCOPY;
++
++ ps_vma->vm_private_data = (void *)psOffsetStruct;
++
++ switch (psOffsetStruct->psLinuxMemArea->
++ ui32AreaFlags & PVRSRV_HAP_CACHETYPE_MASK) {
++ case PVRSRV_HAP_CACHED:
++
++ break;
++ case PVRSRV_HAP_WRITECOMBINE:
++ ps_vma->vm_page_prot = PGPROT_WC(ps_vma->vm_page_prot);
++ break;
++ case PVRSRV_HAP_UNCACHED:
++ ps_vma->vm_page_prot = PGPROT_UC(ps_vma->vm_page_prot);
++ break;
++ default:
++ PVR_DPF(PVR_DBG_ERROR, "%s: unknown cache type", __func__);
++ iRetVal = -EINVAL;
++ goto unlock_and_return;
++ }
++
++ ps_vma->vm_ops = &MMapIOOps;
++
++ if (!DoMapToUser(psOffsetStruct->psLinuxMemArea, ps_vma, 0)) {
++ iRetVal = -EAGAIN;
++ goto unlock_and_return;
++ }
++
++ PVR_ASSERT(psOffsetStruct->ui32UserVAddr == 0);
++
++ psOffsetStruct->ui32UserVAddr = ps_vma->vm_start;
++
++ MMapVOpenNoLock(ps_vma);
++
++ PVR_DPF(PVR_DBG_MESSAGE, "%s: Mapped area at offset 0x%08lx\n",
++ __func__, ps_vma->vm_pgoff);
++
++unlock_and_return:
++ if (iRetVal != 0 && psOffsetStruct != NULL)
++ DestroyOffsetStruct(psOffsetStruct);
++
++ mutex_unlock(&g_sMMapMutex);
++
++ return iRetVal;
++}
++
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++static off_t PrintMMapReg_helper(char *buffer, size_t size,
++ const struct KV_OFFSET_STRUCT *psOffsetStruct,
++ struct LinuxMemArea *psLinuxMemArea)
++{
++ off_t Ret;
++ u32 ui32RealByteSize;
++ u32 ui32ByteOffset;
++
++ PVR_ASSERT(psOffsetStruct->psLinuxMemArea == psLinuxMemArea);
++
++ DetermineUsersSizeAndByteOffset(psLinuxMemArea,
++ &ui32RealByteSize,
++ &ui32ByteOffset);
++
++ Ret = printAppend(buffer, size, 0,
++ "%-8p %08x %-8p %08x %08x "
++ "%-8d %-24s %-5u %-8s %08x(%s)\n",
++ psLinuxMemArea,
++ psOffsetStruct->ui32UserVAddr + ui32ByteOffset,
++ LinuxMemAreaToCpuVAddr(psLinuxMemArea),
++ LinuxMemAreaToCpuPAddr(psLinuxMemArea, 0).uiAddr,
++ psOffsetStruct->ui32MMapOffset,
++ psLinuxMemArea->ui32ByteSize,
++ LinuxMemAreaTypeToString(psLinuxMemArea->eAreaType),
++ psOffsetStruct->ui32PID,
++ psOffsetStruct->pszName,
++ psLinuxMemArea->ui32AreaFlags,
++ HAPFlagsToString(psLinuxMemArea->ui32AreaFlags));
++ return Ret;
++
++}
++
++static off_t PrintMMapRegistrations(char *buffer, size_t size, off_t off)
++{
++ struct LinuxMemArea *psLinuxMemArea;
++ off_t Ret;
++
++ mutex_lock(&g_sMMapMutex);
++
++ if (!off) {
++ Ret = printAppend(buffer, size, 0,
++ "Allocations registered for mmap: %u\n"
++ "In total these areas correspond to %u bytes\n"
++ "psLinuxMemArea UserVAddr KernelVAddr "
++ "CpuPAddr MMapOffset ByteLength "
++ "LinuxMemType "
++ "Pid Name Flags\n",
++ g_ui32RegisteredAreas, g_ui32TotalByteSize);
++
++ goto unlock_and_return;
++ }
++
++ if (size < 135) {
++ Ret = 0;
++ goto unlock_and_return;
++ }
++
++ PVR_ASSERT(off != 0);
++ list_for_each_entry(psLinuxMemArea, &g_sMMapAreaList, sMMapItem) {
++ struct KV_OFFSET_STRUCT *psOffsetStruct;
++
++ list_for_each_entry(psOffsetStruct,
++ &psLinuxMemArea->sMMapOffsetStructList,
++ sAreaItem) {
++ off--;
++ if (off == 0) {
++ Ret = PrintMMapReg_helper(buffer, size,
++ psOffsetStruct, psLinuxMemArea);
++ goto unlock_and_return;
++ }
++ }
++ }
++ Ret = END_OF_FILE;
++
++unlock_and_return:
++ mutex_unlock(&g_sMMapMutex);
++ return Ret;
++}
++#endif
++
++enum PVRSRV_ERROR PVRMMapRegisterArea(struct LinuxMemArea *psLinuxMemArea)
++{
++ enum PVRSRV_ERROR eError = PVRSRV_ERROR_GENERIC;
++#if defined(DEBUG) || defined(DEBUG_LINUX_MMAP_AREAS)
++ const char *pszName =
++ LinuxMemAreaTypeToString(LinuxMemAreaRootType(psLinuxMemArea));
++#endif
++
++ mutex_lock(&g_sMMapMutex);
++
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "%s(%s, psLinuxMemArea 0x%p, ui32AllocFlags 0x%8lx)",
++ __func__, pszName, psLinuxMemArea,
++ psLinuxMemArea->ui32AreaFlags);
++
++ PVR_ASSERT(psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC
++ || LinuxMemAreaRoot(psLinuxMemArea)->eAreaType !=
++ LINUX_MEM_AREA_SUB_ALLOC);
++
++ if (psLinuxMemArea->bMMapRegistered) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "%s: psLinuxMemArea 0x%p is already registered",
++ __func__, psLinuxMemArea);
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ goto exit_unlock;
++ }
++
++ list_add_tail(&psLinuxMemArea->sMMapItem, &g_sMMapAreaList);
++
++ psLinuxMemArea->bMMapRegistered = IMG_TRUE;
++
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++ g_ui32RegisteredAreas++;
++
++ if (psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC)
++ g_ui32TotalByteSize += psLinuxMemArea->ui32ByteSize;
++#endif
++
++ eError = PVRSRV_OK;
++
++exit_unlock:
++ mutex_unlock(&g_sMMapMutex);
++
++ return eError;
++}
++
++enum PVRSRV_ERROR PVRMMapRemoveRegisteredArea(
++ struct LinuxMemArea *psLinuxMemArea)
++{
++ enum PVRSRV_ERROR eError = PVRSRV_ERROR_GENERIC;
++ struct KV_OFFSET_STRUCT *psOffsetStruct, *psTmpOffsetStruct;
++
++ mutex_lock(&g_sMMapMutex);
++
++ PVR_ASSERT(psLinuxMemArea->bMMapRegistered);
++
++ list_for_each_entry_safe(psOffsetStruct, psTmpOffsetStruct,
++ &psLinuxMemArea->sMMapOffsetStructList,
++ sAreaItem) {
++ if (psOffsetStruct->ui32Mapped != 0) {
++ PVR_DPF(PVR_DBG_ERROR, "%s: psOffsetStruct "
++ "0x%p for memory area "
++ "0x0x%p is still mapped; "
++ "psOffsetStruct->ui32Mapped %lu",
++ __func__, psOffsetStruct, psLinuxMemArea,
++ psOffsetStruct->ui32Mapped);
++ eError = PVRSRV_ERROR_GENERIC;
++ goto exit_unlock;
++ } else {
++
++ PVR_DPF(PVR_DBG_WARNING,
++ "%s: psOffsetStruct 0x%p was never mapped",
++ __func__, psOffsetStruct);
++ }
++
++ PVR_ASSERT((psOffsetStruct->ui32Mapped == 0)
++ && psOffsetStruct->bOnMMapList);
++
++ DestroyOffsetStruct(psOffsetStruct);
++ }
++
++ list_del(&psLinuxMemArea->sMMapItem);
++
++ psLinuxMemArea->bMMapRegistered = IMG_FALSE;
++
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++ g_ui32RegisteredAreas--;
++ if (psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC)
++ g_ui32TotalByteSize -= psLinuxMemArea->ui32ByteSize;
++#endif
++
++ eError = PVRSRV_OK;
++
++exit_unlock:
++ mutex_unlock(&g_sMMapMutex);
++ return eError;
++}
++
++enum PVRSRV_ERROR LinuxMMapPerProcessConnect(struct PVRSRV_ENV_PER_PROCESS_DATA
++ *psEnvPerProc)
++{
++ PVR_UNREFERENCED_PARAMETER(psEnvPerProc);
++
++ return PVRSRV_OK;
++}
++
++void LinuxMMapPerProcessDisconnect(struct PVRSRV_ENV_PER_PROCESS_DATA
++ *psEnvPerProc)
++{
++ struct KV_OFFSET_STRUCT *psOffsetStruct, *psTmpOffsetStruct;
++ IMG_BOOL bWarn = IMG_FALSE;
++ u32 ui32PID = OSGetCurrentProcessIDKM();
++
++ PVR_UNREFERENCED_PARAMETER(psEnvPerProc);
++
++ mutex_lock(&g_sMMapMutex);
++
++ list_for_each_entry_safe(psOffsetStruct, psTmpOffsetStruct,
++ &g_sMMapOffsetStructList, sMMapItem) {
++ if (psOffsetStruct->ui32PID == ui32PID) {
++ if (!bWarn) {
++ PVR_DPF(PVR_DBG_WARNING, "%s: process has "
++ "unmapped offset structures. "
++ "Removing them",
++ __func__);
++ bWarn = IMG_TRUE;
++ }
++ PVR_ASSERT(psOffsetStruct->ui32Mapped == 0);
++ PVR_ASSERT(psOffsetStruct->bOnMMapList);
++
++ DestroyOffsetStruct(psOffsetStruct);
++ }
++ }
++
++ mutex_unlock(&g_sMMapMutex);
++}
++
++enum PVRSRV_ERROR LinuxMMapPerProcessHandleOptions(struct PVRSRV_HANDLE_BASE
++ *psHandleBase)
++{
++ enum PVRSRV_ERROR eError = PVRSRV_OK;
++
++ eError = PVRSRVSetMaxHandle(psHandleBase, MAX_MMAP_HANDLE);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "%s: failed to set handle limit (%d)",
++ __func__, eError);
++ return eError;
++ }
++
++ return eError;
++}
++
++void PVRMMapInit(void)
++{
++ mutex_init(&g_sMMapMutex);
++
++ g_psMemmapCache =
++ kmem_cache_create("img-mmap", sizeof(struct KV_OFFSET_STRUCT),
++ 0, 0, NULL);
++ if (!g_psMemmapCache) {
++ PVR_DPF(PVR_DBG_ERROR, "%s: failed to allocate kmem_cache",
++ __func__);
++ goto error;
++ }
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++ CreateProcReadEntry("mmap", PrintMMapRegistrations);
++#endif
++
++ return;
++
++error:
++ PVRMMapCleanup();
++ return;
++}
++
++void PVRMMapCleanup(void)
++{
++ enum PVRSRV_ERROR eError;
++
++ if (!list_empty(&g_sMMapAreaList)) {
++ struct LinuxMemArea *psLinuxMemArea, *psTmpMemArea;
++
++ PVR_DPF(PVR_DBG_ERROR,
++ "%s: Memory areas are still registered with MMap",
++ __func__);
++
++ PVR_TRACE("%s: Unregistering memory areas", __func__);
++ list_for_each_entry_safe(psLinuxMemArea, psTmpMemArea,
++ &g_sMMapAreaList, sMMapItem) {
++ eError = PVRMMapRemoveRegisteredArea(psLinuxMemArea);
++ if (eError != PVRSRV_OK)
++ PVR_DPF(PVR_DBG_ERROR,
++ "%s: PVRMMapRemoveRegisteredArea failed (%d)",
++ __func__, eError);
++ PVR_ASSERT(eError == PVRSRV_OK);
++
++ LinuxMemAreaDeepFree(psLinuxMemArea);
++ }
++ }
++ PVR_ASSERT(list_empty((&g_sMMapAreaList)));
++
++ RemoveProcEntry("mmap");
++
++ if (g_psMemmapCache) {
++ kmem_cache_destroy(g_psMemmapCache);
++ g_psMemmapCache = NULL;
++ }
++}
+diff --git a/drivers/gpu/pvr/mmap.h b/drivers/gpu/pvr/mmap.h
+new file mode 100644
+index 0000000..3ab55ff
+--- /dev/null
++++ b/drivers/gpu/pvr/mmap.h
+@@ -0,0 +1,74 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__MMAP_H__)
++#define __MMAP_H__
++
++#include <linux/mm.h>
++#include <linux/list.h>
++
++#include "perproc.h"
++#include "mm.h"
++
++struct KV_OFFSET_STRUCT {
++ u32 ui32Mapped;
++ u32 ui32MMapOffset;
++ u32 ui32RealByteSize;
++ struct LinuxMemArea *psLinuxMemArea;
++ u32 ui32TID;
++ u32 ui32PID;
++ IMG_BOOL bOnMMapList;
++ u32 ui32RefCount;
++ u32 ui32UserVAddr;
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++ const char *pszName;
++#endif
++ struct list_head sMMapItem;
++ struct list_head sAreaItem;
++};
++
++void PVRMMapInit(void);
++void PVRMMapCleanup(void);
++
++enum PVRSRV_ERROR PVRMMapRegisterArea(struct LinuxMemArea *psLinuxMemArea);
++
++enum PVRSRV_ERROR PVRMMapRemoveRegisteredArea(
++ struct LinuxMemArea *psLinuxMemArea);
++
++enum PVRSRV_ERROR PVRMMapOSMemHandleToMMapData(
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc,
++ void *hMHandle, u32 *pui32MMapOffset,
++ u32 *pui32ByteOffset, u32 *pui32RealByteSize,
++ u32 *pui32UserVAddr);
++
++enum PVRSRV_ERROR PVRMMapReleaseMMapData(
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc,
++ void *hMHandle, IMG_BOOL *pbMUnmap,
++ u32 *pui32RealByteSize, u32 *pui32UserVAddr);
++
++int PVRMMap(struct file *pFile, struct vm_area_struct *ps_vma);
++
++#endif
+diff --git a/drivers/gpu/pvr/mmu.c b/drivers/gpu/pvr/mmu.c
+new file mode 100644
+index 0000000..3219cb1
+--- /dev/null
++++ b/drivers/gpu/pvr/mmu.c
+@@ -0,0 +1,1442 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "sgxdefs.h"
++#include "sgxmmu.h"
++#include "services_headers.h"
++#include "buffer_manager.h"
++#include "hash.h"
++#include "ra.h"
++#include "pdump_km.h"
++#include "sgxapi_km.h"
++#include "sgx_bridge_km.h"
++#include "sgxinfo.h"
++#include "sgxinfokm.h"
++#include "mmu.h"
++
++#define UINT32_MAX_VALUE 0xFFFFFFFFUL
++
++struct MMU_PT_INFO {
++ void *hPTPageOSMemHandle;
++ void *PTPageCpuVAddr;
++ u32 ui32ValidPTECount;
++};
++
++struct MMU_CONTEXT {
++ struct PVRSRV_DEVICE_NODE *psDeviceNode;
++ void *pvPDCpuVAddr;
++ struct IMG_DEV_PHYADDR sPDDevPAddr;
++ void *hPDOSMemHandle;
++ struct MMU_PT_INFO *apsPTInfoList[1024];
++ struct PVRSRV_SGXDEV_INFO *psDevInfo;
++ struct MMU_CONTEXT *psNext;
++};
++
++struct MMU_HEAP {
++ struct MMU_CONTEXT *psMMUContext;
++
++ u32 ui32PTBaseIndex;
++ u32 ui32PTPageCount;
++ u32 ui32PTEntryCount;
++
++ struct RA_ARENA *psVMArena;
++
++ struct DEV_ARENA_DESCRIPTOR *psDevArena;
++};
++
++
++#if defined(PDUMP)
++static void MMU_PDumpPageTables(struct MMU_HEAP *pMMUHeap,
++ struct IMG_DEV_VIRTADDR DevVAddr, size_t uSize,
++ IMG_BOOL bForUnmap, void *hUniqueTag);
++#endif
++
++#define PAGE_TEST 0
++
++
++void MMU_InvalidateDirectoryCache(struct PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++ psDevInfo->ui32CacheControl |= SGX_BIF_INVALIDATE_PDCACHE;
++}
++
++static void MMU_InvalidatePageTableCache(struct PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++ psDevInfo->ui32CacheControl |= SGX_BIF_INVALIDATE_PTCACHE;
++}
++
++static IMG_BOOL _AllocPageTables(struct MMU_HEAP *pMMUHeap)
++{
++ PVR_DPF(PVR_DBG_MESSAGE, "_AllocPageTables()");
++
++ PVR_ASSERT(pMMUHeap != NULL);
++ PVR_ASSERT(HOST_PAGESIZE() == SGX_MMU_PAGE_SIZE);
++
++ if (pMMUHeap == NULL) {
++ PVR_DPF(PVR_DBG_ERROR, "_AllocPageTables: invalid parameter");
++ return IMG_FALSE;
++ }
++
++ pMMUHeap->ui32PTEntryCount =
++ pMMUHeap->psDevArena->ui32Size >> SGX_MMU_PAGE_SHIFT;
++
++ pMMUHeap->ui32PTBaseIndex =
++ (pMMUHeap->psDevArena->BaseDevVAddr.
++ uiAddr & (SGX_MMU_PD_MASK | SGX_MMU_PT_MASK)) >>
++ SGX_MMU_PAGE_SHIFT;
++
++ pMMUHeap->ui32PTPageCount =
++ (pMMUHeap->ui32PTEntryCount + SGX_MMU_PT_SIZE - 1) >>
++ SGX_MMU_PT_SHIFT;
++
++ return IMG_TRUE;
++}
++
++static void _DeferredFreePageTable(struct MMU_HEAP *pMMUHeap, u32 ui32PTIndex)
++{
++ u32 *pui32PDEntry;
++ u32 i;
++ u32 ui32PDIndex;
++ struct SYS_DATA *psSysData;
++ struct MMU_PT_INFO **ppsPTInfoList;
++
++ if (SysAcquireData(&psSysData) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "_DeferredFreePageTables: "
++ "ERROR call to SysAcquireData failed");
++ return;
++ }
++
++ ui32PDIndex =
++ pMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT +
++ SGX_MMU_PT_SHIFT);
++
++ ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
++
++ {
++ PVR_ASSERT(ppsPTInfoList[ui32PTIndex] == NULL ||
++ ppsPTInfoList[ui32PTIndex]->ui32ValidPTECount ==
++ 0);
++ }
++
++ PDUMPCOMMENT("Free page table (page count == %08X)",
++ pMMUHeap->ui32PTPageCount);
++ if (ppsPTInfoList[ui32PTIndex]
++ && ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr)
++ PDUMPFREEPAGETABLE(PVRSRV_DEVICE_TYPE_SGX,
++ ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr,
++ SGX_MMU_PAGE_SIZE, PDUMP_PT_UNIQUETAG);
++
++ switch (pMMUHeap->psDevArena->DevMemHeapType) {
++ case DEVICE_MEMORY_HEAP_SHARED:
++ case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
++ {
++ struct MMU_CONTEXT *psMMUContext =
++ (struct MMU_CONTEXT *)
++ pMMUHeap->psMMUContext->psDevInfo->pvMMUContextList;
++
++ while (psMMUContext) {
++ pui32PDEntry =
++ (u32 *) psMMUContext->pvPDCpuVAddr;
++ pui32PDEntry += ui32PDIndex;
++ pui32PDEntry[ui32PTIndex] = 0;
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX,
++ (void *) &
++ pui32PDEntry[ui32PTIndex],
++ sizeof(u32), 0, IMG_FALSE,
++ PDUMP_PT_UNIQUETAG,
++ PDUMP_PT_UNIQUETAG);
++ psMMUContext = psMMUContext->psNext;
++ }
++ break;
++ }
++ case DEVICE_MEMORY_HEAP_PERCONTEXT:
++ case DEVICE_MEMORY_HEAP_KERNEL:
++ {
++
++ pui32PDEntry =
++ (u32 *) pMMUHeap->psMMUContext->pvPDCpuVAddr;
++ pui32PDEntry += ui32PDIndex;
++ pui32PDEntry[ui32PTIndex] = 0;
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX,
++ (void *) &pui32PDEntry[ui32PTIndex],
++ sizeof(u32), 0, IMG_FALSE,
++ PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++ break;
++ }
++ default:
++ {
++ PVR_DPF(PVR_DBG_ERROR,
++ "_DeferredFreePagetable: ERROR invalid heap type");
++ return;
++ }
++ }
++
++ if (ppsPTInfoList[ui32PTIndex] != NULL) {
++ if (ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr != NULL) {
++ u32 *pui32Tmp;
++
++ pui32Tmp =
++ (u32 *) ppsPTInfoList[ui32PTIndex]->
++ PTPageCpuVAddr;
++
++ for (i = 0;
++ (i < pMMUHeap->ui32PTEntryCount) && (i < 1024);
++ i++)
++ pui32Tmp[i] = 0;
++
++ if (pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->
++ psLocalDevMemArena == NULL) {
++ OSFreePages(PVRSRV_HAP_WRITECOMBINE |
++ PVRSRV_HAP_KERNEL_ONLY,
++ SGX_MMU_PAGE_SIZE,
++ ppsPTInfoList[ui32PTIndex]->
++ PTPageCpuVAddr,
++ ppsPTInfoList[ui32PTIndex]->
++ hPTPageOSMemHandle);
++ } else {
++ struct IMG_SYS_PHYADDR sSysPAddr;
++ struct IMG_CPU_PHYADDR sCpuPAddr;
++
++ sCpuPAddr =
++ OSMapLinToCPUPhys(ppsPTInfoList
++ [ui32PTIndex]->
++ PTPageCpuVAddr);
++ sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr);
++
++ OSUnMapPhysToLin((void __force __iomem *)
++ ppsPTInfoList[ui32PTIndex]->
++ PTPageCpuVAddr,
++ SGX_MMU_PAGE_SIZE,
++ PVRSRV_HAP_WRITECOMBINE |
++ PVRSRV_HAP_KERNEL_ONLY,
++ ppsPTInfoList[ui32PTIndex]->
++ hPTPageOSMemHandle);
++
++ RA_Free(pMMUHeap->psDevArena->
++ psDeviceMemoryHeapInfo->
++ psLocalDevMemArena,
++ sSysPAddr.uiAddr, IMG_FALSE);
++ }
++
++ pMMUHeap->ui32PTEntryCount -= i;
++ } else {
++ pMMUHeap->ui32PTEntryCount -= 1024;
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(struct MMU_PT_INFO),
++ ppsPTInfoList[ui32PTIndex], NULL);
++ ppsPTInfoList[ui32PTIndex] = NULL;
++ } else {
++ pMMUHeap->ui32PTEntryCount -= 1024;
++ }
++
++ PDUMPCOMMENT("Finished free page table (page count == %08X)",
++ pMMUHeap->ui32PTPageCount);
++}
++
++static void _DeferredFreePageTables(struct MMU_HEAP *pMMUHeap)
++{
++ u32 i;
++
++ for (i = 0; i < pMMUHeap->ui32PTPageCount; i++)
++ _DeferredFreePageTable(pMMUHeap, i);
++ MMU_InvalidateDirectoryCache(pMMUHeap->psMMUContext->psDevInfo);
++}
++
++static IMG_BOOL _DeferredAllocPagetables(struct MMU_HEAP *pMMUHeap,
++ struct IMG_DEV_VIRTADDR DevVAddr, u32 ui32Size)
++{
++ u32 ui32PTPageCount;
++ u32 ui32PDIndex;
++ u32 i;
++ u32 *pui32PDEntry;
++ struct MMU_PT_INFO **ppsPTInfoList;
++ struct SYS_DATA *psSysData;
++ struct IMG_DEV_VIRTADDR sHighDevVAddr;
++
++ PVR_ASSERT(DevVAddr.uiAddr < (1 << SGX_FEATURE_ADDRESS_SPACE_SIZE));
++
++ if (SysAcquireData(&psSysData) != PVRSRV_OK)
++ return IMG_FALSE;
++
++ ui32PDIndex =
++ DevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
++
++ if ((UINT32_MAX_VALUE - DevVAddr.uiAddr) <
++ (ui32Size + (1 << (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT)) - 1)) {
++
++ sHighDevVAddr.uiAddr = UINT32_MAX_VALUE;
++ } else {
++ sHighDevVAddr.uiAddr = DevVAddr.uiAddr + ui32Size +
++ (1 << (SGX_MMU_PAGE_SHIFT +
++ SGX_MMU_PT_SHIFT)) - 1;
++ }
++
++ ui32PTPageCount =
++ sHighDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
++
++ ui32PTPageCount -= ui32PDIndex;
++
++ pui32PDEntry = (u32 *) pMMUHeap->psMMUContext->pvPDCpuVAddr;
++ pui32PDEntry += ui32PDIndex;
++
++ ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
++
++ PDUMPCOMMENT("Alloc page table (page count == %08X)", ui32PTPageCount);
++ PDUMPCOMMENT("Page directory mods (page count == %08X)",
++ ui32PTPageCount);
++
++ for (i = 0; i < ui32PTPageCount; i++) {
++ if (ppsPTInfoList[i] == NULL) {
++ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(struct MMU_PT_INFO),
++ (void **) &ppsPTInfoList[i], NULL);
++ if (ppsPTInfoList[i] == NULL) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "_DeferredAllocPagetables: "
++ "ERROR call to OSAllocMem failed");
++ return IMG_FALSE;
++ }
++ OSMemSet(ppsPTInfoList[i], 0,
++ sizeof(struct MMU_PT_INFO));
++ }
++
++ if (ppsPTInfoList[i]->hPTPageOSMemHandle == NULL &&
++ ppsPTInfoList[i]->PTPageCpuVAddr == NULL) {
++ struct IMG_CPU_PHYADDR sCpuPAddr;
++ struct IMG_DEV_PHYADDR sDevPAddr;
++
++ PVR_ASSERT(pui32PDEntry[i] == 0);
++
++ if (pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->
++ psLocalDevMemArena == NULL) {
++ if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE |
++ PVRSRV_HAP_KERNEL_ONLY,
++ SGX_MMU_PAGE_SIZE,
++ SGX_MMU_PAGE_SIZE,
++ (void **)&ppsPTInfoList[i]->
++ PTPageCpuVAddr,
++ &ppsPTInfoList[i]->
++ hPTPageOSMemHandle) !=
++ PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "_DeferredAllocPagetables: "
++ "ERROR call to OSAllocPages failed");
++ return IMG_FALSE;
++ }
++
++ if (ppsPTInfoList[i]->PTPageCpuVAddr) {
++ sCpuPAddr =
++ OSMapLinToCPUPhys(ppsPTInfoList[i]->
++ PTPageCpuVAddr);
++ } else {
++ sCpuPAddr =
++ OSMemHandleToCpuPAddr(
++ ppsPTInfoList[i]->
++ hPTPageOSMemHandle,
++ 0);
++ }
++ sDevPAddr =
++ SysCpuPAddrToDevPAddr
++ (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
++ } else {
++ struct IMG_SYS_PHYADDR sSysPAddr;
++
++ if (RA_Alloc(pMMUHeap->psDevArena->
++ psDeviceMemoryHeapInfo->psLocalDevMemArena,
++ SGX_MMU_PAGE_SIZE, NULL, 0,
++ SGX_MMU_PAGE_SIZE,
++ &(sSysPAddr.uiAddr)) != IMG_TRUE) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "_DeferredAllocPagetables: "
++ "ERROR call to RA_Alloc failed");
++ return IMG_FALSE;
++ }
++
++ sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
++ ppsPTInfoList[i]->PTPageCpuVAddr =
++ (void __force *)
++ OSMapPhysToLin(sCpuPAddr, SGX_MMU_PAGE_SIZE,
++ PVRSRV_HAP_WRITECOMBINE |
++ PVRSRV_HAP_KERNEL_ONLY,
++ &ppsPTInfoList[i]->
++ hPTPageOSMemHandle);
++ if (!ppsPTInfoList[i]->PTPageCpuVAddr) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "_DeferredAllocPagetables: "
++ "ERROR failed to map page tables");
++ return IMG_FALSE;
++ }
++
++ sDevPAddr = SysCpuPAddrToDevPAddr
++ (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
++
++ }
++
++
++ OSMemSet(ppsPTInfoList[i]->PTPageCpuVAddr, 0,
++ SGX_MMU_PAGE_SIZE);
++
++ PDUMPMALLOCPAGETABLE(PVRSRV_DEVICE_TYPE_SGX,
++ ppsPTInfoList[i]->PTPageCpuVAddr,
++ SGX_MMU_PAGE_SIZE,
++ PDUMP_PT_UNIQUETAG);
++
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX,
++ ppsPTInfoList[i]->PTPageCpuVAddr,
++ SGX_MMU_PAGE_SIZE, 0, IMG_TRUE,
++ PDUMP_PT_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++
++ switch (pMMUHeap->psDevArena->DevMemHeapType) {
++ case DEVICE_MEMORY_HEAP_SHARED:
++ case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
++ {
++ struct MMU_CONTEXT *psMMUContext =
++ (struct MMU_CONTEXT *)pMMUHeap->
++ psMMUContext->psDevInfo->
++ pvMMUContextList;
++
++ while (psMMUContext) {
++ pui32PDEntry =
++ (u32 *)psMMUContext->
++ pvPDCpuVAddr;
++ pui32PDEntry += ui32PDIndex;
++
++ pui32PDEntry[i] =
++ sDevPAddr.uiAddr |
++ SGX_MMU_PDE_VALID;
++
++ PDUMPMEM2
++ (PVRSRV_DEVICE_TYPE_SGX,
++ (void *)&pui32PDEntry[i],
++ sizeof(u32), 0,
++ IMG_FALSE,
++ PDUMP_PD_UNIQUETAG,
++ PDUMP_PT_UNIQUETAG);
++
++ psMMUContext =
++ psMMUContext->psNext;
++ }
++ break;
++ }
++ case DEVICE_MEMORY_HEAP_PERCONTEXT:
++ case DEVICE_MEMORY_HEAP_KERNEL:
++ {
++ pui32PDEntry[i] = sDevPAddr.uiAddr |
++ SGX_MMU_PDE_VALID;
++
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX,
++ (void *)&pui32PDEntry[i],
++ sizeof(u32), 0,
++ IMG_FALSE, PDUMP_PD_UNIQUETAG,
++ PDUMP_PT_UNIQUETAG);
++
++ break;
++ }
++ default:
++ {
++ PVR_DPF(PVR_DBG_ERROR,
++ "_DeferredAllocPagetables: "
++ "ERROR invalid heap type");
++ return IMG_FALSE;
++ }
++ }
++
++
++ MMU_InvalidateDirectoryCache(pMMUHeap->psMMUContext->
++ psDevInfo);
++ } else {
++
++ PVR_ASSERT(pui32PDEntry[i] != 0);
++ }
++ }
++
++ return IMG_TRUE;
++}
++
++enum PVRSRV_ERROR MMU_Initialise(struct PVRSRV_DEVICE_NODE *psDeviceNode,
++ struct MMU_CONTEXT **ppsMMUContext,
++ struct IMG_DEV_PHYADDR *psPDDevPAddr)
++{
++ u32 *pui32Tmp;
++ u32 i;
++ void *pvPDCpuVAddr;
++ struct IMG_DEV_PHYADDR sPDDevPAddr;
++ struct IMG_CPU_PHYADDR sCpuPAddr;
++ struct MMU_CONTEXT *psMMUContext;
++ void *hPDOSMemHandle;
++ struct SYS_DATA *psSysData;
++ struct PVRSRV_SGXDEV_INFO *psDevInfo;
++
++ PVR_DPF(PVR_DBG_MESSAGE, "MMU_Initialise");
++
++ if (SysAcquireData(&psSysData) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "MMU_Initialise: ERROR call to SysAcquireData failed");
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(struct MMU_CONTEXT), (void **) &psMMUContext, NULL);
++ if (psMMUContext == NULL) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "MMU_Initialise: ERROR call to OSAllocMem failed");
++ return PVRSRV_ERROR_GENERIC;
++ }
++ OSMemSet(psMMUContext, 0, sizeof(struct MMU_CONTEXT));
++
++ psDevInfo = (struct PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
++ psMMUContext->psDevInfo = psDevInfo;
++
++ psMMUContext->psDeviceNode = psDeviceNode;
++
++ if (psDeviceNode->psLocalDevMemArena == NULL) {
++ if (OSAllocPages
++ (PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ SGX_MMU_PAGE_SIZE, SGX_MMU_PAGE_SIZE, &pvPDCpuVAddr,
++ &hPDOSMemHandle) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "MMU_Initialise: "
++ "ERROR call to OSAllocPages failed");
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if (pvPDCpuVAddr)
++ sCpuPAddr = OSMapLinToCPUPhys(pvPDCpuVAddr);
++ else
++ sCpuPAddr = OSMemHandleToCpuPAddr(hPDOSMemHandle, 0);
++ sPDDevPAddr =
++ SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
++ } else {
++ struct IMG_SYS_PHYADDR sSysPAddr;
++
++ if (RA_Alloc(psDeviceNode->psLocalDevMemArena,
++ SGX_MMU_PAGE_SIZE, NULL, 0, SGX_MMU_PAGE_SIZE,
++ &(sSysPAddr.uiAddr)) != IMG_TRUE) {
++ PVR_DPF(PVR_DBG_ERROR, "MMU_Initialise: "
++ "ERROR call to RA_Alloc failed");
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
++ sPDDevPAddr =
++ SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysPAddr);
++ pvPDCpuVAddr = (void __force *)
++ OSMapPhysToLin(sCpuPAddr, SGX_MMU_PAGE_SIZE,
++ PVRSRV_HAP_WRITECOMBINE |
++ PVRSRV_HAP_KERNEL_ONLY, &hPDOSMemHandle);
++ if (!pvPDCpuVAddr) {
++ PVR_DPF(PVR_DBG_ERROR, "MMU_Initialise: "
++ "ERROR failed to map page tables");
++ return PVRSRV_ERROR_GENERIC;
++ }
++ }
++
++ PDUMPCOMMENT("Alloc page directory");
++
++ PDUMPMALLOCPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, pvPDCpuVAddr,
++ SGX_MMU_PAGE_SIZE, PDUMP_PD_UNIQUETAG);
++
++ if (pvPDCpuVAddr) {
++ pui32Tmp = (u32 *) pvPDCpuVAddr;
++ } else {
++ PVR_DPF(PVR_DBG_ERROR,
++ "MMU_Initialise: pvPDCpuVAddr invalid");
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ for (i = 0; i < SGX_MMU_PD_SIZE; i++)
++ pui32Tmp[i] = 0;
++
++ PDUMPCOMMENT("Page directory contents");
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, pvPDCpuVAddr, SGX_MMU_PAGE_SIZE, 0,
++ IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++
++ psMMUContext->pvPDCpuVAddr = pvPDCpuVAddr;
++ psMMUContext->sPDDevPAddr = sPDDevPAddr;
++ psMMUContext->hPDOSMemHandle = hPDOSMemHandle;
++
++ *ppsMMUContext = psMMUContext;
++
++ *psPDDevPAddr = sPDDevPAddr;
++
++ psMMUContext->psNext = (struct MMU_CONTEXT *)
++ psDevInfo->pvMMUContextList;
++ psDevInfo->pvMMUContextList = (void *) psMMUContext;
++
++
++ return PVRSRV_OK;
++}
++
++void MMU_Finalise(struct MMU_CONTEXT *psMMUContext)
++{
++ u32 *pui32Tmp, i;
++ struct SYS_DATA *psSysData;
++ struct MMU_CONTEXT **ppsMMUContext;
++
++ if (SysAcquireData(&psSysData) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "MMU_Finalise: ERROR call to SysAcquireData failed");
++ return;
++ }
++
++ PDUMPCOMMENT("Free page directory");
++ PDUMPFREEPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, psMMUContext->pvPDCpuVAddr,
++ SGX_MMU_PAGE_SIZE, PDUMP_PT_UNIQUETAG);
++
++ pui32Tmp = (u32 *) psMMUContext->pvPDCpuVAddr;
++
++ for (i = 0; i < SGX_MMU_PD_SIZE; i++)
++ pui32Tmp[i] = 0;
++
++ if (psMMUContext->psDeviceNode->psLocalDevMemArena == NULL) {
++ OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ SGX_MMU_PAGE_SIZE,
++ psMMUContext->pvPDCpuVAddr,
++ psMMUContext->hPDOSMemHandle);
++
++ } else {
++ struct IMG_SYS_PHYADDR sSysPAddr;
++ struct IMG_CPU_PHYADDR sCpuPAddr;
++
++ sCpuPAddr = OSMapLinToCPUPhys(psMMUContext->pvPDCpuVAddr);
++ sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr);
++
++ OSUnMapPhysToLin((void __iomem __force *)
++ psMMUContext->pvPDCpuVAddr,
++ SGX_MMU_PAGE_SIZE,
++ PVRSRV_HAP_WRITECOMBINE |
++ PVRSRV_HAP_KERNEL_ONLY,
++ psMMUContext->hPDOSMemHandle);
++
++ RA_Free(psMMUContext->psDeviceNode->psLocalDevMemArena,
++ sSysPAddr.uiAddr, IMG_FALSE);
++
++ }
++
++ PVR_DPF(PVR_DBG_MESSAGE, "MMU_Finalise");
++
++ ppsMMUContext =
++ (struct MMU_CONTEXT **) &psMMUContext->psDevInfo->pvMMUContextList;
++ while (*ppsMMUContext) {
++ if (*ppsMMUContext == psMMUContext) {
++
++ *ppsMMUContext = psMMUContext->psNext;
++ break;
++ }
++
++ ppsMMUContext = &((*ppsMMUContext)->psNext);
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct MMU_CONTEXT),
++ psMMUContext, NULL);
++}
++
++void MMU_InsertHeap(struct MMU_CONTEXT *psMMUContext,
++ struct MMU_HEAP *psMMUHeap)
++{
++ u32 *pui32PDCpuVAddr = (u32 *)psMMUContext->pvPDCpuVAddr;
++ u32 *pui32KernelPDCpuVAddr = (u32 *)
++ psMMUHeap->psMMUContext->pvPDCpuVAddr;
++ u32 ui32PDEntry;
++ IMG_BOOL bInvalidateDirectoryCache = IMG_FALSE;
++
++ pui32PDCpuVAddr +=
++ psMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT +
++ SGX_MMU_PT_SHIFT);
++ pui32KernelPDCpuVAddr +=
++ psMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT +
++ SGX_MMU_PT_SHIFT);
++
++ PDUMPCOMMENT("Page directory shared heap range copy");
++
++ for (ui32PDEntry = 0; ui32PDEntry < psMMUHeap->ui32PTPageCount;
++ ui32PDEntry++) {
++
++ PVR_ASSERT(pui32PDCpuVAddr[ui32PDEntry] == 0);
++
++ pui32PDCpuVAddr[ui32PDEntry] =
++ pui32KernelPDCpuVAddr[ui32PDEntry];
++ if (pui32PDCpuVAddr[ui32PDEntry]) {
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX,
++ (void *) &pui32PDCpuVAddr[ui32PDEntry],
++ sizeof(u32), 0, IMG_FALSE,
++ PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++
++ bInvalidateDirectoryCache = IMG_TRUE;
++ }
++ }
++
++ if (bInvalidateDirectoryCache)
++ MMU_InvalidateDirectoryCache(psMMUContext->psDevInfo);
++}
++
++static void MMU_UnmapPagesAndFreePTs(struct MMU_HEAP *psMMUHeap,
++ struct IMG_DEV_VIRTADDR sDevVAddr,
++ u32 ui32PageCount, void *hUniqueTag)
++{
++ u32 uPageSize = HOST_PAGESIZE();
++ struct IMG_DEV_VIRTADDR sTmpDevVAddr;
++ u32 i;
++ u32 ui32PDIndex;
++ u32 ui32PTIndex;
++ u32 *pui32Tmp;
++ IMG_BOOL bInvalidateDirectoryCache = IMG_FALSE;
++
++#if !defined(PDUMP)
++ PVR_UNREFERENCED_PARAMETER(hUniqueTag);
++#endif
++
++ sTmpDevVAddr = sDevVAddr;
++
++ for (i = 0; i < ui32PageCount; i++) {
++ struct MMU_PT_INFO **ppsPTInfoList;
++
++ ui32PDIndex =
++ sTmpDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT +
++ SGX_MMU_PT_SHIFT);
++
++ ppsPTInfoList =
++ &psMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
++
++ {
++ ui32PTIndex = (sTmpDevVAddr.uiAddr & SGX_MMU_PT_MASK)
++ >> SGX_MMU_PAGE_SHIFT;
++
++ if (!ppsPTInfoList[0]) {
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "MMU_UnmapPagesAndFreePTs: "
++ "Invalid PT for alloc at VAddr:0x%08lX "
++ "(VaddrIni:0x%08lX AllocPage:%u) "
++ "PDIdx:%u PTIdx:%u",
++ sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr,
++ i, ui32PDIndex, ui32PTIndex);
++
++ sTmpDevVAddr.uiAddr += uPageSize;
++
++ continue;
++ }
++
++ pui32Tmp = (u32 *)ppsPTInfoList[0]->PTPageCpuVAddr;
++
++ if (!pui32Tmp)
++ continue;
++
++ if (pui32Tmp[ui32PTIndex] & SGX_MMU_PTE_VALID) {
++ ppsPTInfoList[0]->ui32ValidPTECount--;
++ } else {
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "MMU_UnmapPagesAndFreePTs: "
++ "Page is already invalid for alloc at "
++ "VAddr:0x%08lX "
++ "(VAddrIni:0x%08lX AllocPage:%u) "
++ "PDIdx:%u PTIdx:%u",
++ sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr,
++ i, ui32PDIndex, ui32PTIndex);
++ }
++
++ PVR_ASSERT((s32)ppsPTInfoList[0]->ui32ValidPTECount >=
++ 0);
++ pui32Tmp[ui32PTIndex] = 0;
++ }
++
++ if (ppsPTInfoList[0]
++ && ppsPTInfoList[0]->ui32ValidPTECount == 0) {
++ _DeferredFreePageTable(psMMUHeap,
++ ui32PDIndex - (psMMUHeap->
++ ui32PTBaseIndex >>
++ SGX_MMU_PT_SHIFT));
++ bInvalidateDirectoryCache = IMG_TRUE;
++ }
++
++ sTmpDevVAddr.uiAddr += uPageSize;
++ }
++
++ if (bInvalidateDirectoryCache) {
++ MMU_InvalidateDirectoryCache(psMMUHeap->psMMUContext->
++ psDevInfo);
++ } else {
++ MMU_InvalidatePageTableCache(psMMUHeap->psMMUContext->
++ psDevInfo);
++ }
++
++#if defined(PDUMP)
++ MMU_PDumpPageTables(psMMUHeap, sDevVAddr, uPageSize * ui32PageCount,
++ IMG_TRUE, hUniqueTag);
++#endif
++}
++
++static void MMU_FreePageTables(void *pvMMUHeap, u32 ui32Start, u32 ui32End,
++ void *hUniqueTag)
++{
++ struct MMU_HEAP *pMMUHeap = (struct MMU_HEAP *)pvMMUHeap;
++ struct IMG_DEV_VIRTADDR Start;
++
++ Start.uiAddr = ui32Start;
++
++ MMU_UnmapPagesAndFreePTs(pMMUHeap, Start,
++ (ui32End - ui32Start) / SGX_MMU_PAGE_SIZE,
++ hUniqueTag);
++}
++
++struct MMU_HEAP *MMU_Create(struct MMU_CONTEXT *psMMUContext,
++ struct DEV_ARENA_DESCRIPTOR *psDevArena,
++ struct RA_ARENA **ppsVMArena)
++{
++ struct MMU_HEAP *pMMUHeap;
++ IMG_BOOL bRes;
++
++ PVR_ASSERT(psDevArena != NULL);
++
++ if (psDevArena == NULL) {
++ PVR_DPF(PVR_DBG_ERROR, "MMU_Create: invalid parameter");
++ return NULL;
++ }
++
++ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(struct MMU_HEAP), (void **)&pMMUHeap, NULL);
++ if (pMMUHeap == NULL) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "MMU_Create: ERROR call to OSAllocMem failed");
++ return NULL;
++ }
++
++ pMMUHeap->psMMUContext = psMMUContext;
++ pMMUHeap->psDevArena = psDevArena;
++
++ bRes = _AllocPageTables(pMMUHeap);
++ if (!bRes) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "MMU_Create: ERROR call to _AllocPageTables failed");
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct MMU_HEAP),
++ pMMUHeap, NULL);
++ return NULL;
++ }
++
++ pMMUHeap->psVMArena = RA_Create(psDevArena->pszName,
++ psDevArena->BaseDevVAddr.uiAddr,
++ psDevArena->ui32Size, NULL,
++ SGX_MMU_PAGE_SIZE, NULL, NULL,
++ MMU_FreePageTables, pMMUHeap);
++
++ if (pMMUHeap->psVMArena == NULL) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "MMU_Create: ERROR call to RA_Create failed");
++ _DeferredFreePageTables(pMMUHeap);
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct MMU_HEAP),
++ pMMUHeap, NULL);
++ return NULL;
++ }
++
++ *ppsVMArena = pMMUHeap->psVMArena;
++
++ return pMMUHeap;
++}
++
++void MMU_Delete(struct MMU_HEAP *pMMUHeap)
++{
++ if (pMMUHeap != NULL) {
++ PVR_DPF(PVR_DBG_MESSAGE, "MMU_Delete");
++
++ if (pMMUHeap->psVMArena)
++ RA_Delete(pMMUHeap->psVMArena);
++ _DeferredFreePageTables(pMMUHeap);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct MMU_HEAP),
++ pMMUHeap, NULL);
++ }
++}
++
++IMG_BOOL MMU_Alloc(struct MMU_HEAP *pMMUHeap, size_t uSize, u32 uFlags,
++ u32 uDevVAddrAlignment, struct IMG_DEV_VIRTADDR *psDevVAddr)
++{
++ IMG_BOOL bStatus;
++
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "MMU_Alloc: uSize=0x%x, flags=0x%x, align=0x%x",
++ uSize, uFlags, uDevVAddrAlignment);
++
++ if ((uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) == 0) {
++ bStatus = RA_Alloc(pMMUHeap->psVMArena, uSize, NULL, 0,
++ uDevVAddrAlignment, &(psDevVAddr->uiAddr));
++ if (!bStatus) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "MMU_Alloc: RA_Alloc of VMArena failed");
++ return bStatus;
++ }
++ }
++
++ bStatus = _DeferredAllocPagetables(pMMUHeap, *psDevVAddr, uSize);
++
++
++ if (!bStatus) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "MMU_Alloc: _DeferredAllocPagetables failed");
++ if ((uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) == 0)
++ RA_Free(pMMUHeap->psVMArena, psDevVAddr->uiAddr,
++ IMG_FALSE);
++ }
++
++ return bStatus;
++}
++
++void MMU_Free(struct MMU_HEAP *pMMUHeap, struct IMG_DEV_VIRTADDR DevVAddr,
++ u32 ui32Size)
++{
++ PVR_ASSERT(pMMUHeap != NULL);
++
++ if (pMMUHeap == NULL) {
++ PVR_DPF(PVR_DBG_ERROR, "MMU_Free: invalid parameter");
++ return;
++ }
++
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "MMU_Free: mmu=%08X, dev_vaddr=%08X", pMMUHeap,
++ DevVAddr.uiAddr);
++
++ if ((DevVAddr.uiAddr >= pMMUHeap->psDevArena->BaseDevVAddr.uiAddr) &&
++ (DevVAddr.uiAddr + ui32Size <=
++ pMMUHeap->psDevArena->BaseDevVAddr.uiAddr +
++ pMMUHeap->psDevArena->ui32Size)) {
++ RA_Free(pMMUHeap->psVMArena, DevVAddr.uiAddr, IMG_TRUE);
++ return;
++ }
++
++ BUG();
++
++ PVR_DPF(PVR_DBG_ERROR,
++ "MMU_Free: Couldn't find DevVAddr %08X in a DevArena",
++ DevVAddr.uiAddr);
++}
++
++void MMU_Enable(struct MMU_HEAP *pMMUHeap)
++{
++ PVR_UNREFERENCED_PARAMETER(pMMUHeap);
++
++}
++
++void MMU_Disable(struct MMU_HEAP *pMMUHeap)
++{
++ PVR_UNREFERENCED_PARAMETER(pMMUHeap);
++
++}
++
++#if defined(PDUMP)
++static void MMU_PDumpPageTables(struct MMU_HEAP *pMMUHeap,
++ struct IMG_DEV_VIRTADDR DevVAddr,
++ size_t uSize, IMG_BOOL bForUnmap, void *hUniqueTag)
++{
++ u32 ui32NumPTEntries;
++ u32 ui32PTIndex;
++ u32 *pui32PTEntry;
++
++ struct MMU_PT_INFO **ppsPTInfoList;
++ u32 ui32PDIndex;
++ u32 ui32PTDumpCount;
++
++ ui32NumPTEntries =
++ (uSize + SGX_MMU_PAGE_SIZE - 1) >> SGX_MMU_PAGE_SHIFT;
++
++ ui32PDIndex =
++ DevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
++
++ ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
++
++ ui32PTIndex = (DevVAddr.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
++
++ PDUMPCOMMENT("Page table mods (num entries == %08X) %s",
++ ui32NumPTEntries, bForUnmap ? "(for unmap)" : "");
++
++ while (ui32NumPTEntries > 0) {
++ struct MMU_PT_INFO *psPTInfo = *ppsPTInfoList++;
++
++ if (ui32NumPTEntries <= 1024 - ui32PTIndex)
++ ui32PTDumpCount = ui32NumPTEntries;
++ else
++ ui32PTDumpCount = 1024 - ui32PTIndex;
++
++ if (psPTInfo) {
++ pui32PTEntry = (u32 *)psPTInfo->PTPageCpuVAddr;
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX,
++ (void *)&pui32PTEntry[ui32PTIndex],
++ ui32PTDumpCount * sizeof(u32), 0,
++ IMG_FALSE, PDUMP_PT_UNIQUETAG, hUniqueTag);
++ }
++
++ ui32NumPTEntries -= ui32PTDumpCount;
++
++ ui32PTIndex = 0;
++ }
++
++ PDUMPCOMMENT("Finished page table mods %s",
++ bForUnmap ? "(for unmap)" : "");
++}
++#endif
++
++static void MMU_MapPage(struct MMU_HEAP *pMMUHeap,
++ struct IMG_DEV_VIRTADDR DevVAddr,
++ struct IMG_DEV_PHYADDR DevPAddr, u32 ui32MemFlags)
++{
++ u32 ui32Index;
++ u32 *pui32Tmp;
++ u32 ui32MMUFlags = 0;
++ struct MMU_PT_INFO **ppsPTInfoList;
++
++ if (((PVRSRV_MEM_READ | PVRSRV_MEM_WRITE) & ui32MemFlags) ==
++ (PVRSRV_MEM_READ | PVRSRV_MEM_WRITE))
++ ui32MMUFlags = 0;
++ else if (PVRSRV_MEM_READ & ui32MemFlags)
++ ui32MMUFlags |= SGX_MMU_PTE_READONLY;
++ else if (PVRSRV_MEM_WRITE & ui32MemFlags)
++ ui32MMUFlags |= SGX_MMU_PTE_WRITEONLY;
++
++ if (PVRSRV_MEM_CACHE_CONSISTENT & ui32MemFlags)
++ ui32MMUFlags |= SGX_MMU_PTE_CACHECONSISTENT;
++
++ if (PVRSRV_MEM_EDM_PROTECT & ui32MemFlags)
++ ui32MMUFlags |= SGX_MMU_PTE_EDMPROTECT;
++
++ ui32Index = DevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
++
++ ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32Index];
++
++ ui32Index = (DevVAddr.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
++
++ pui32Tmp = (u32 *) ppsPTInfoList[0]->PTPageCpuVAddr;
++
++
++ if (pui32Tmp[ui32Index] & SGX_MMU_PTE_VALID)
++ PVR_DPF(PVR_DBG_ERROR,
++ "MMU_MapPage: "
++ "Page is already valid for alloc at "
++ "VAddr:0x%08lX PDIdx:%u PTIdx:%u",
++ DevVAddr.uiAddr,
++ DevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT +
++ SGX_MMU_PT_SHIFT), ui32Index);
++
++ PVR_ASSERT((pui32Tmp[ui32Index] & SGX_MMU_PTE_VALID) == 0);
++
++ ppsPTInfoList[0]->ui32ValidPTECount++;
++
++ pui32Tmp[ui32Index] = (DevPAddr.uiAddr & SGX_MMU_PTE_ADDR_MASK)
++ | SGX_MMU_PTE_VALID | ui32MMUFlags;
++}
++
++void MMU_MapScatter(struct MMU_HEAP *pMMUHeap, struct IMG_DEV_VIRTADDR DevVAddr,
++ struct IMG_SYS_PHYADDR *psSysAddr, size_t uSize,
++ u32 ui32MemFlags, void *hUniqueTag)
++{
++#if defined(PDUMP)
++ struct IMG_DEV_VIRTADDR MapBaseDevVAddr;
++#endif
++ u32 uCount, i;
++ struct IMG_DEV_PHYADDR DevPAddr;
++
++ PVR_ASSERT(pMMUHeap != NULL);
++
++#if defined(PDUMP)
++ MapBaseDevVAddr = DevVAddr;
++#else
++ PVR_UNREFERENCED_PARAMETER(hUniqueTag);
++#endif
++
++ for (i = 0, uCount = 0; uCount < uSize;
++ i++, uCount += SGX_MMU_PAGE_SIZE) {
++ struct IMG_SYS_PHYADDR sSysAddr;
++
++ sSysAddr = psSysAddr[i];
++
++ DevPAddr =
++ SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysAddr);
++
++ MMU_MapPage(pMMUHeap, DevVAddr, DevPAddr, ui32MemFlags);
++ DevVAddr.uiAddr += SGX_MMU_PAGE_SIZE;
++
++ PVR_DPF(PVR_DBG_MESSAGE, "MMU_MapScatter: "
++ "devVAddr=%08X, SysAddr=%08X, size=0x%x/0x%x",
++ DevVAddr.uiAddr, sSysAddr.uiAddr, uCount, uSize);
++ }
++
++#if defined(PDUMP)
++ MMU_PDumpPageTables(pMMUHeap, MapBaseDevVAddr, uSize, IMG_FALSE,
++ hUniqueTag);
++#endif
++}
++
++void MMU_MapPages(struct MMU_HEAP *pMMUHeap, struct IMG_DEV_VIRTADDR DevVAddr,
++ struct IMG_SYS_PHYADDR SysPAddr, size_t uSize,
++ u32 ui32MemFlags, void *hUniqueTag)
++{
++ struct IMG_DEV_PHYADDR DevPAddr;
++#if defined(PDUMP)
++ struct IMG_DEV_VIRTADDR MapBaseDevVAddr;
++#endif
++ u32 uCount;
++ u32 ui32VAdvance = SGX_MMU_PAGE_SIZE;
++ u32 ui32PAdvance = SGX_MMU_PAGE_SIZE;
++
++ PVR_ASSERT(pMMUHeap != NULL);
++
++ PVR_DPF(PVR_DBG_MESSAGE, "MMU_MapPages: "
++ "mmu=%08X, devVAddr=%08X, SysPAddr=%08X, size=0x%x",
++ pMMUHeap, DevVAddr.uiAddr, SysPAddr.uiAddr, uSize);
++
++#if defined(PDUMP)
++ MapBaseDevVAddr = DevVAddr;
++#else
++ PVR_UNREFERENCED_PARAMETER(hUniqueTag);
++#endif
++
++ DevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, SysPAddr);
++
++ if (ui32MemFlags & PVRSRV_MEM_DUMMY)
++ ui32PAdvance = 0;
++
++ for (uCount = 0; uCount < uSize; uCount += ui32VAdvance) {
++ MMU_MapPage(pMMUHeap, DevVAddr, DevPAddr, ui32MemFlags);
++ DevVAddr.uiAddr += ui32VAdvance;
++ DevPAddr.uiAddr += ui32PAdvance;
++ }
++
++#if defined(PDUMP)
++ MMU_PDumpPageTables(pMMUHeap, MapBaseDevVAddr, uSize, IMG_FALSE,
++ hUniqueTag);
++#endif
++}
++
++void MMU_MapShadow(struct MMU_HEAP *pMMUHeap,
++ struct IMG_DEV_VIRTADDR MapBaseDevVAddr,
++ size_t uByteSize, void *CpuVAddr, void *hOSMemHandle,
++ struct IMG_DEV_VIRTADDR *pDevVAddr, u32 ui32MemFlags,
++ void *hUniqueTag)
++{
++ u32 i;
++ u32 uOffset = 0;
++ struct IMG_DEV_VIRTADDR MapDevVAddr;
++ u32 ui32VAdvance = SGX_MMU_PAGE_SIZE;
++ u32 ui32PAdvance = SGX_MMU_PAGE_SIZE;
++
++#if !defined(PDUMP)
++ PVR_UNREFERENCED_PARAMETER(hUniqueTag);
++#endif
++
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "MMU_MapShadow: %08X, 0x%x, %08X",
++ MapBaseDevVAddr.uiAddr, uByteSize, CpuVAddr);
++
++ PVR_ASSERT(((u32) CpuVAddr & (SGX_MMU_PAGE_SIZE - 1)) == 0);
++ PVR_ASSERT(((u32) uByteSize & (SGX_MMU_PAGE_SIZE - 1)) == 0);
++ pDevVAddr->uiAddr = MapBaseDevVAddr.uiAddr;
++
++ if (ui32MemFlags & PVRSRV_MEM_DUMMY)
++ ui32PAdvance = 0;
++
++ MapDevVAddr = MapBaseDevVAddr;
++ for (i = 0; i < uByteSize; i += ui32VAdvance) {
++ struct IMG_CPU_PHYADDR CpuPAddr;
++ struct IMG_DEV_PHYADDR DevPAddr;
++
++ if (CpuVAddr)
++ CpuPAddr =
++ OSMapLinToCPUPhys((void *)((u32)CpuVAddr +
++ uOffset));
++ else
++ CpuPAddr = OSMemHandleToCpuPAddr(hOSMemHandle, uOffset);
++ DevPAddr =
++ SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, CpuPAddr);
++
++ PVR_DPF(PVR_DBG_MESSAGE, "0x%x: CpuVAddr=%08X, "
++ "CpuPAddr=%08X, DevVAddr=%08X, DevPAddr=%08X",
++ uOffset, (u32)CpuVAddr + uOffset, CpuPAddr.uiAddr,
++ MapDevVAddr.uiAddr, DevPAddr.uiAddr);
++
++ MMU_MapPage(pMMUHeap, MapDevVAddr, DevPAddr, ui32MemFlags);
++
++ MapDevVAddr.uiAddr += ui32VAdvance;
++ uOffset += ui32PAdvance;
++ }
++
++#if defined(PDUMP)
++ MMU_PDumpPageTables(pMMUHeap, MapBaseDevVAddr, uByteSize, IMG_FALSE,
++ hUniqueTag);
++#endif
++}
++
++void MMU_UnmapPages(struct MMU_HEAP *psMMUHeap,
++ struct IMG_DEV_VIRTADDR sDevVAddr, u32 ui32PageCount,
++ void *hUniqueTag)
++{
++ u32 uPageSize = HOST_PAGESIZE();
++ struct IMG_DEV_VIRTADDR sTmpDevVAddr;
++ u32 i;
++ u32 ui32PDIndex;
++ u32 ui32PTIndex;
++ u32 *pui32Tmp;
++
++#if !defined(PDUMP)
++ PVR_UNREFERENCED_PARAMETER(hUniqueTag);
++#endif
++
++ sTmpDevVAddr = sDevVAddr;
++
++ for (i = 0; i < ui32PageCount; i++) {
++ struct MMU_PT_INFO **ppsPTInfoList;
++
++ ui32PDIndex = sTmpDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT +
++ SGX_MMU_PT_SHIFT);
++
++ ppsPTInfoList = &psMMUHeap->psMMUContext->
++ apsPTInfoList[ui32PDIndex];
++
++ ui32PTIndex = (sTmpDevVAddr.uiAddr & SGX_MMU_PT_MASK) >>
++ SGX_MMU_PAGE_SHIFT;
++
++ if (!ppsPTInfoList[0]) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "MMU_UnmapPages: "
++ "ERROR Invalid PT for alloc at VAddr:0x%08lX "
++ "(VaddrIni:0x%08lX AllocPage:%u) PDIdx:%u "
++ "PTIdx:%u",
++ sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr, i,
++ ui32PDIndex, ui32PTIndex);
++
++ sTmpDevVAddr.uiAddr += uPageSize;
++
++ continue;
++ }
++
++ pui32Tmp = (u32 *)ppsPTInfoList[0]->PTPageCpuVAddr;
++
++ if (pui32Tmp[ui32PTIndex] & SGX_MMU_PTE_VALID)
++ ppsPTInfoList[0]->ui32ValidPTECount--;
++ else
++ PVR_DPF(PVR_DBG_ERROR,
++ "MMU_UnmapPages: Page is already invalid "
++ "for alloc at VAddr:0x%08lX "
++ "(VAddrIni:0x%08lX AllocPage:%u) "
++ "PDIdx:%u PTIdx:%u",
++ sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr, i,
++ ui32PDIndex, ui32PTIndex);
++
++ PVR_ASSERT((s32) ppsPTInfoList[0]->ui32ValidPTECount >= 0);
++
++ pui32Tmp[ui32PTIndex] = 0;
++
++ sTmpDevVAddr.uiAddr += uPageSize;
++ }
++
++ MMU_InvalidatePageTableCache(psMMUHeap->psMMUContext->psDevInfo);
++
++#if defined(PDUMP)
++ MMU_PDumpPageTables(psMMUHeap, sDevVAddr, uPageSize * ui32PageCount,
++ IMG_TRUE, hUniqueTag);
++#endif
++}
++
++struct IMG_DEV_PHYADDR MMU_GetPhysPageAddr(struct MMU_HEAP *pMMUHeap,
++ struct IMG_DEV_VIRTADDR sDevVPageAddr)
++{
++ u32 *pui32PageTable;
++ u32 ui32Index;
++ struct IMG_DEV_PHYADDR sDevPAddr;
++ struct MMU_PT_INFO **ppsPTInfoList;
++
++ ui32Index = sDevVPageAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT +
++ SGX_MMU_PT_SHIFT);
++
++ ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32Index];
++ if (!ppsPTInfoList[0]) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "MMU_GetPhysPageAddr: Not mapped in at 0x%08x",
++ sDevVPageAddr.uiAddr);
++ sDevPAddr.uiAddr = 0;
++ return sDevPAddr;
++ }
++
++ ui32Index =
++ (sDevVPageAddr.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
++
++ pui32PageTable = (u32 *) ppsPTInfoList[0]->PTPageCpuVAddr;
++
++ sDevPAddr.uiAddr = pui32PageTable[ui32Index];
++
++ sDevPAddr.uiAddr &= SGX_MMU_PTE_ADDR_MASK;
++
++ return sDevPAddr;
++}
++
++struct IMG_DEV_PHYADDR MMU_GetPDDevPAddr(struct MMU_CONTEXT *pMMUContext)
++{
++ return pMMUContext->sPDDevPAddr;
++}
++
++enum PVRSRV_ERROR SGXGetPhysPageAddrKM(void *hDevMemHeap,
++ struct IMG_DEV_VIRTADDR sDevVAddr,
++ struct IMG_DEV_PHYADDR *pDevPAddr,
++ struct IMG_CPU_PHYADDR *pCpuPAddr)
++{
++ struct MMU_HEAP *pMMUHeap;
++ struct IMG_DEV_PHYADDR DevPAddr;
++
++ pMMUHeap = (struct MMU_HEAP *)BM_GetMMUHeap(hDevMemHeap);
++
++ DevPAddr = MMU_GetPhysPageAddr(pMMUHeap, sDevVAddr);
++ pCpuPAddr->uiAddr = DevPAddr.uiAddr;
++ pDevPAddr->uiAddr = DevPAddr.uiAddr;
++
++ return (pDevPAddr->uiAddr != 0) ?
++ PVRSRV_OK : PVRSRV_ERROR_INVALID_PARAMS;
++}
++
++enum PVRSRV_ERROR SGXGetMMUPDAddrKM(void *hDevCookie,
++ void *hDevMemContext,
++ struct IMG_DEV_PHYADDR *psPDDevPAddr)
++{
++ if (!hDevCookie || !hDevMemContext || !psPDDevPAddr)
++ return PVRSRV_ERROR_INVALID_PARAMS;
++
++ *psPDDevPAddr =
++ ((struct BM_CONTEXT *)hDevMemContext)->psMMUContext->sPDDevPAddr;
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR MMU_BIFResetPDAlloc(struct PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++ enum PVRSRV_ERROR eError;
++ struct SYS_DATA *psSysData;
++ struct RA_ARENA *psLocalDevMemArena;
++ void *hOSMemHandle = NULL;
++ u8 *pui8MemBlock = NULL;
++ struct IMG_SYS_PHYADDR sMemBlockSysPAddr;
++ struct IMG_CPU_PHYADDR sMemBlockCpuPAddr;
++
++ eError = SysAcquireData(&psSysData);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "MMU_BIFResetPDAlloc: ERROR call to SysAcquireData failed");
++ return eError;
++ }
++
++ psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
++
++ if (psLocalDevMemArena == NULL) {
++
++ eError =
++ OSAllocPages(PVRSRV_HAP_WRITECOMBINE |
++ PVRSRV_HAP_KERNEL_ONLY, 3 * SGX_MMU_PAGE_SIZE,
++ SGX_MMU_PAGE_SIZE, (void **)&pui8MemBlock,
++ &hOSMemHandle);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "MMU_BIFResetPDAlloc: "
++ "ERROR call to OSAllocPages failed");
++ return eError;
++ }
++
++ if (pui8MemBlock) {
++ sMemBlockCpuPAddr = OSMapLinToCPUPhys(pui8MemBlock);
++ } else {
++
++ sMemBlockCpuPAddr =
++ OSMemHandleToCpuPAddr(hOSMemHandle, 0);
++ }
++ } else {
++ if (RA_Alloc(psLocalDevMemArena, 3 * SGX_MMU_PAGE_SIZE,
++ NULL, 0, SGX_MMU_PAGE_SIZE,
++ &(sMemBlockSysPAddr.uiAddr)) != IMG_TRUE) {
++ PVR_DPF(PVR_DBG_ERROR, "MMU_BIFResetPDAlloc: "
++ "ERROR call to RA_Alloc failed");
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ sMemBlockCpuPAddr = SysSysPAddrToCpuPAddr(sMemBlockSysPAddr);
++ pui8MemBlock = (void __force *)OSMapPhysToLin(sMemBlockCpuPAddr,
++ SGX_MMU_PAGE_SIZE * 3,
++ PVRSRV_HAP_WRITECOMBINE |
++ PVRSRV_HAP_KERNEL_ONLY,
++ &hOSMemHandle);
++ if (!pui8MemBlock) {
++ PVR_DPF(PVR_DBG_ERROR, "MMU_BIFResetPDAlloc: "
++ "ERROR failed to map page tables");
++ return PVRSRV_ERROR_BAD_MAPPING;
++ }
++ }
++
++ psDevInfo->hBIFResetPDOSMemHandle = hOSMemHandle;
++ psDevInfo->sBIFResetPDDevPAddr =
++ SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sMemBlockCpuPAddr);
++ psDevInfo->sBIFResetPTDevPAddr.uiAddr =
++ psDevInfo->sBIFResetPDDevPAddr.uiAddr + SGX_MMU_PAGE_SIZE;
++ psDevInfo->sBIFResetPageDevPAddr.uiAddr =
++ psDevInfo->sBIFResetPTDevPAddr.uiAddr + SGX_MMU_PAGE_SIZE;
++ psDevInfo->pui32BIFResetPD = (u32 *) pui8MemBlock;
++ psDevInfo->pui32BIFResetPT =
++ (u32 *) (pui8MemBlock + SGX_MMU_PAGE_SIZE);
++
++ OSMemSet(psDevInfo->pui32BIFResetPD, 0, SGX_MMU_PAGE_SIZE);
++ OSMemSet(psDevInfo->pui32BIFResetPT, 0, SGX_MMU_PAGE_SIZE);
++
++ OSMemSet(pui8MemBlock + (2 * SGX_MMU_PAGE_SIZE), 0xDB,
++ SGX_MMU_PAGE_SIZE);
++
++ return PVRSRV_OK;
++}
++
++void MMU_BIFResetPDFree(struct PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++ enum PVRSRV_ERROR eError;
++ struct SYS_DATA *psSysData;
++ struct RA_ARENA *psLocalDevMemArena;
++ struct IMG_SYS_PHYADDR sPDSysPAddr;
++
++ eError = SysAcquireData(&psSysData);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "MMU_BIFResetPDFree: "
++ "ERROR call to SysAcquireData failed");
++ return;
++ }
++
++ psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
++
++ if (psLocalDevMemArena == NULL) {
++ OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ 3 * SGX_MMU_PAGE_SIZE,
++ psDevInfo->pui32BIFResetPD,
++ psDevInfo->hBIFResetPDOSMemHandle);
++ } else {
++ OSUnMapPhysToLin((void __force __iomem *)
++ psDevInfo->pui32BIFResetPD,
++ 3 * SGX_MMU_PAGE_SIZE,
++ PVRSRV_HAP_WRITECOMBINE |
++ PVRSRV_HAP_KERNEL_ONLY,
++ psDevInfo->hBIFResetPDOSMemHandle);
++
++ sPDSysPAddr =
++ SysDevPAddrToSysPAddr(PVRSRV_DEVICE_TYPE_SGX,
++ psDevInfo->sBIFResetPDDevPAddr);
++ RA_Free(psLocalDevMemArena, sPDSysPAddr.uiAddr, IMG_FALSE);
++ }
++}
++
+diff --git a/drivers/gpu/pvr/mmu.h b/drivers/gpu/pvr/mmu.h
+new file mode 100644
+index 0000000..85d939e
+--- /dev/null
++++ b/drivers/gpu/pvr/mmu.h
+@@ -0,0 +1,85 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _MMU_H_
++#define _MMU_H_
++
++#include "sgxinfokm.h"
++
++enum PVRSRV_ERROR MMU_Initialise(struct PVRSRV_DEVICE_NODE *psDeviceNode,
++ struct MMU_CONTEXT **ppsMMUContext,
++ struct IMG_DEV_PHYADDR *psPDDevPAddr);
++
++void MMU_Finalise(struct MMU_CONTEXT *psMMUContext);
++
++void MMU_InsertHeap(struct MMU_CONTEXT *psMMUContext,
++ struct MMU_HEAP *psMMUHeap);
++
++struct MMU_HEAP *MMU_Create(struct MMU_CONTEXT *psMMUContext,
++ struct DEV_ARENA_DESCRIPTOR *psDevArena,
++ struct RA_ARENA **ppsVMArena);
++
++void MMU_Delete(struct MMU_HEAP *pMMU);
++
++IMG_BOOL MMU_Alloc(struct MMU_HEAP *pMMU, size_t uSize, u32 uFlags,
++ u32 uDevVAddrAlignment, struct IMG_DEV_VIRTADDR *pDevVAddr);
++
++void MMU_Free(struct MMU_HEAP *pMMU, struct IMG_DEV_VIRTADDR DevVAddr,
++ u32 ui32Size);
++
++void MMU_Enable(struct MMU_HEAP *pMMU);
++
++void MMU_Disable(struct MMU_HEAP *pMMU);
++
++void MMU_MapPages(struct MMU_HEAP *pMMU, struct IMG_DEV_VIRTADDR devVAddr,
++ struct IMG_SYS_PHYADDR SysPAddr, size_t uSize, u32 ui32MemFlags,
++ void *hUniqueTag);
++
++void MMU_MapShadow(struct MMU_HEAP *pMMU,
++ struct IMG_DEV_VIRTADDR MapBaseDevVAddr, size_t uSize,
++ void *CpuVAddr, void *hOSMemHandle,
++ struct IMG_DEV_VIRTADDR *pDevVAddr, u32 ui32MemFlags,
++ void *hUniqueTag);
++
++void MMU_UnmapPages(struct MMU_HEAP *pMMU, struct IMG_DEV_VIRTADDR dev_vaddr,
++ u32 ui32PageCount, void *hUniqueTag);
++
++void MMU_MapScatter(struct MMU_HEAP *pMMU, struct IMG_DEV_VIRTADDR DevVAddr,
++ struct IMG_SYS_PHYADDR *psSysAddr, size_t uSize,
++ u32 ui32MemFlags, void *hUniqueTag);
++
++struct IMG_DEV_PHYADDR MMU_GetPhysPageAddr(struct MMU_HEAP *pMMUHeap,
++ struct IMG_DEV_VIRTADDR sDevVPageAddr);
++
++struct IMG_DEV_PHYADDR MMU_GetPDDevPAddr(struct MMU_CONTEXT *pMMUContext);
++
++void MMU_InvalidateDirectoryCache(struct PVRSRV_SGXDEV_INFO *psDevInfo);
++
++enum PVRSRV_ERROR MMU_BIFResetPDAlloc(struct PVRSRV_SGXDEV_INFO *psDevInfo);
++
++void MMU_BIFResetPDFree(struct PVRSRV_SGXDEV_INFO *psDevInfo);
++
++#endif
+diff --git a/drivers/gpu/pvr/module.c b/drivers/gpu/pvr/module.c
+new file mode 100644
+index 0000000..a38dcf3
+--- /dev/null
++++ b/drivers/gpu/pvr/module.c
+@@ -0,0 +1,304 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/version.h>
++#include <linux/fs.h>
++#include <linux/proc_fs.h>
++#include <linux/miscdevice.h>
++
++#include <linux/platform_device.h>
++
++#include "img_defs.h"
++#include "services.h"
++#include "kerneldisplay.h"
++#include "kernelbuffer.h"
++#include "syscommon.h"
++#include "pvrmmap.h"
++#include "mutils.h"
++#include "mm.h"
++#include "mmap.h"
++#include "mutex.h"
++#include "pvr_debug.h"
++#include "srvkm.h"
++#include "perproc.h"
++#include "handle.h"
++#include "pvr_bridge_km.h"
++#include "sgx_bridge_km.h"
++#include "proc.h"
++#include "pvrmodule.h"
++#include "private_data.h"
++#include "lock.h"
++
++#define DRVNAME "pvrsrvkm"
++
++#ifdef DEBUG
++static int debug = DBGPRIV_WARNING;
++#include <linux/moduleparam.h>
++module_param(debug, int, 0);
++#endif
++
++struct mutex gPVRSRVLock;
++
++static int pvr_open(struct inode unref__ * inode, struct file *filp)
++{
++ struct PVRSRV_FILE_PRIVATE_DATA *priv;
++ void *block_alloc;
++ int ret = -ENOMEM;
++ enum PVRSRV_ERROR err;
++ u32 pid;
++
++ mutex_lock(&gPVRSRVLock);
++
++ pid = OSGetCurrentProcessIDKM();
++
++ if (PVRSRVProcessConnect(pid) != PVRSRV_OK)
++ goto err_unlock;
++
++ err = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(*priv),
++ (void **)&priv, &block_alloc);
++
++ if (err != PVRSRV_OK)
++ goto err_unlock;
++
++ priv->ui32OpenPID = pid;
++ priv->hBlockAlloc = block_alloc;
++ filp->private_data = priv;
++
++ ret = 0;
++err_unlock:
++ mutex_unlock(&gPVRSRVLock);
++ return ret;
++}
++
++static int pvr_release(struct inode unref__ * inode, struct file *filp)
++{
++ struct PVRSRV_FILE_PRIVATE_DATA *priv;
++
++ mutex_lock(&gPVRSRVLock);
++
++ priv = filp->private_data;
++
++ PVRSRVProcessDisconnect(priv->ui32OpenPID);
++
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(*priv),
++ priv, priv->hBlockAlloc);
++
++ mutex_unlock(&gPVRSRVLock);
++ return 0;
++}
++
++static const struct file_operations pvr_fops = {
++ .owner = THIS_MODULE,
++ .unlocked_ioctl = PVRSRV_BridgeDispatchKM,
++ .open = pvr_open,
++ .release = pvr_release,
++ .mmap = PVRMMap,
++};
++
++static void pvr_shutdown(struct platform_device *pdev)
++{
++ PVR_TRACE("pvr_shutdown(pdev=%p)", pdev);
++
++ (void)PVRSRVSetPowerStateKM(PVRSRV_POWER_STATE_D3);
++}
++
++static int pvr_suspend(struct platform_device *pdev, pm_message_t state)
++{
++ PVR_TRACE("pvr_suspend(pdev=%p)", pdev);
++
++ if (PVRSRVSetPowerStateKM(PVRSRV_POWER_STATE_D3) != PVRSRV_OK)
++ return -EINVAL;
++ return 0;
++}
++
++static int pvr_resume(struct platform_device *pdev)
++{
++ PVR_TRACE("pvr_resume(pdev=%p)", pdev);
++
++ if (PVRSRVSetPowerStateKM(PVRSRV_POWER_STATE_D0) != PVRSRV_OK)
++ return -EINVAL;
++ return 0;
++}
++
++static void pvr_dev_release(struct device *pdev)
++{
++ PVR_DPF(PVR_DBG_WARNING, "pvr_dev_release(pdev=%p)", pdev);
++}
++
++static struct platform_device pvr_device = {
++ .name = DRVNAME,
++ .id = -1,
++ .dev = {
++ .release = pvr_dev_release
++ }
++};
++
++static struct miscdevice pvr_miscdevice = {
++ .minor = MISC_DYNAMIC_MINOR,
++ .name = DRVNAME,
++ .fops = &pvr_fops,
++};
++
++static int __devinit pvr_probe(struct platform_device *pdev)
++{
++ struct SYS_DATA *sysdata;
++ int ret;
++
++ PVR_TRACE("pvr_probe(pdev=%p)", pdev);
++
++ if (SysAcquireData(&sysdata) != PVRSRV_OK &&
++ SysInitialise() != PVRSRV_OK) {
++ ret = -ENODEV;
++ goto err_exit;
++ }
++
++ ret = misc_register(&pvr_miscdevice);
++ if (ret < 0)
++ goto err_exit;
++
++ return 0;
++
++err_exit:
++ dev_err(&pdev->dev, "probe failed (%d)\n", ret);
++
++ return ret;
++}
++
++static int __devexit pvr_remove(struct platform_device *pdev)
++{
++ struct SYS_DATA *sysdata;
++ int ret;
++
++ PVR_TRACE("pvr_remove(pdev=%p)", pdev);
++
++ ret = misc_deregister(&pvr_miscdevice);
++ if (ret < 0) {
++ dev_err(&pdev->dev, "remove failed (%d)\n", ret);
++ return ret;
++ }
++
++ if (SysAcquireData(&sysdata) == PVRSRV_OK)
++ SysDeinitialise(sysdata);
++
++ return 0;
++}
++
++
++static struct platform_driver pvr_driver = {
++ .driver = {
++ .name = DRVNAME,
++ },
++ .probe = pvr_probe,
++ .remove = __devexit_p(pvr_remove),
++ .suspend = pvr_suspend,
++ .resume = pvr_resume,
++ .shutdown = pvr_shutdown,
++};
++
++static int __init pvr_init(void)
++{
++ int error;
++
++ pvr_dbg_init();
++
++ PVR_TRACE("pvr_init");
++
++ mutex_init(&gPVRSRVLock);
++
++#ifdef DEBUG
++ PVRDebugSetLevel(debug);
++#endif
++
++ error = CreateProcEntries();
++ if (error < 0)
++ goto err1;
++
++ error = -ENOMEM;
++ if (LinuxMMInit() != PVRSRV_OK)
++ goto err2;
++
++ if (LinuxBridgeInit() != PVRSRV_OK)
++ goto err3;
++
++ PVRMMapInit();
++
++ error = platform_driver_register(&pvr_driver);
++ if (error < 0)
++ goto err4;
++
++ error = platform_device_register(&pvr_device);
++ if (error)
++ goto err5;
++
++ return 0;
++
++err5:
++ platform_driver_unregister(&pvr_driver);
++err4:
++ PVRMMapCleanup();
++ LinuxBridgeDeInit();
++err3:
++ LinuxMMCleanup();
++err2:
++ RemoveProcEntries();
++err1:
++ pr_err("%s: failed (%d)\n", __func__, error);
++
++ return error;
++}
++
++static void __exit pvr_cleanup(void)
++{
++ struct SYS_DATA *sysdata;
++
++ PVR_TRACE("pvr_cleanup");
++
++ SysAcquireData(&sysdata);
++
++ platform_device_unregister(&pvr_device);
++ platform_driver_unregister(&pvr_driver);
++
++ PVRMMapCleanup();
++ LinuxMMCleanup();
++ LinuxBridgeDeInit();
++ RemoveProcEntries();
++
++ PVR_TRACE("pvr_cleanup: unloading");
++
++ pvr_dbg_cleanup();
++}
++
++module_init(pvr_init);
++module_exit(pvr_cleanup);
++
++MODULE_SUPPORTED_DEVICE(DRVNAME);
++MODULE_ALIAS("platform:" DRVNAME);
++
+diff --git a/drivers/gpu/pvr/mutex.h b/drivers/gpu/pvr/mutex.h
+new file mode 100644
+index 0000000..cbd963a
+--- /dev/null
++++ b/drivers/gpu/pvr/mutex.h
+@@ -0,0 +1,36 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __INCLUDED_LINUX_MUTEX_H_
++#define __INCLUDED_LINUX_MUTEX_H_
++
++#include <linux/version.h>
++
++#include <linux/mutex.h>
++
++extern struct mutex gPVRSRVLock;
++
++#endif
+diff --git a/drivers/gpu/pvr/mutils.h b/drivers/gpu/pvr/mutils.h
+new file mode 100644
+index 0000000..47279ec
+--- /dev/null
++++ b/drivers/gpu/pvr/mutils.h
+@@ -0,0 +1,37 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __IMG_LINUX_MUTILS_H__
++#define __IMG_LINUX_MUTILS_H__
++
++#define PGPROT_WC(pv) pgprot_writecombine(pv)
++#define PGPROT_UC(pv) pgprot_noncached(pv)
++
++#define IOREMAP(pa, bytes) ioremap_cached(pa, bytes)
++#define IOREMAP_WC(pa, bytes) ioremap_wc(pa, bytes)
++#define IOREMAP_UC(pa, bytes) ioremap_nocache(pa, bytes)
++
++#endif
+diff --git a/drivers/gpu/pvr/ocpdefs.h b/drivers/gpu/pvr/ocpdefs.h
+new file mode 100644
+index 0000000..12c3b36
+--- /dev/null
++++ b/drivers/gpu/pvr/ocpdefs.h
+@@ -0,0 +1,294 @@
++/****************************************************************************
++ Name : ocpdefs.h
++ Author : PowerVR
++ Copyright : 2009 by Imagination Technologies Limited.
++ All rights reserved. No part of this software, either
++ material or conceptual may be copied or distributed,
++ transmitted, transcribed, stored in a retrieval system or
++ translated into any human or computer language in any form
++ by any means, electronic, mechanical, manual or otherwise,
++ or disclosed to third parties without the express written
++ permission of Imagination Technologies Limited,
++ Home Park Estate, Kings Langley, Hertfordshire,
++ WD4 8LZ, U.K.
++ Description :
++
++ Program Type :
++
++ Modifications :
++
++****************************************************************************/
++
++#ifndef _OCPDEFS_H_
++#define _OCPDEFS_H_
++
++#include "sysconfig.h"
++
++#define SYS_OMAP3430_OCP_REGS_SYS_PHYS_BASE \
++ (SYS_OMAP3430_SGX_REGS_SYS_PHYS_BASE + EUR_CR_OCP_REVISION)
++#define SYS_OMAP3430_OCP_REGS_SIZE 0x110
++
++/* Register EUR_CR_OCP_REVISION */
++#define EUR_CR_OCP_REVISION 0xFE00
++#define EUR_CR_OCP_REVISION_REV_MASK 0xFFFFFFFFUL
++#define EUR_CR_OCP_REVISION_REV_SHIFT 0
++#define EUR_CR_OCP_REVISION_REV_SIGNED 0
++
++/* Register EUR_CR_OCP_HWINFO */
++#define EUR_CR_OCP_HWINFO 0xFE04
++#define EUR_CR_OCP_HWINFO_SYS_BUS_WIDTH_MASK 0x00000003UL
++#define EUR_CR_OCP_HWINFO_SYS_BUS_WIDTH_SHIFT 0
++#define EUR_CR_OCP_HWINFO_SYS_BUS_WIDTH_SIGNED 0
++
++#define EUR_CR_OCP_HWINFO_MEM_BUS_WIDTH_MASK 0x00000004UL
++#define EUR_CR_OCP_HWINFO_MEM_BUS_WIDTH_SHIFT 2
++#define EUR_CR_OCP_HWINFO_MEM_BUS_WIDTH_SIGNED 0
++
++/* Register EUR_CR_OCP_SYSCONFIG */
++#define EUR_CR_OCP_SYSCONFIG 0xFE10
++#define EUR_CR_OCP_SYSCONFIG_IDLE_MODE_MASK 0x0000000CUL
++#define EUR_CR_OCP_SYSCONFIG_IDLE_MODE_SHIFT 2
++#define EUR_CR_OCP_SYSCONFIG_IDLE_MODE_SIGNED 0
++
++#define EUR_CR_OCP_SYSCONFIG_STANDBY_MODE_MASK 0x00000030UL
++#define EUR_CR_OCP_SYSCONFIG_STANDBY_MODE_SHIFT 4
++#define EUR_CR_OCP_SYSCONFIG_STANDBY_MODE_SIGNED 0
++
++/* Register EUR_CR_OCP_IRQSTATUS_RAW_0 */
++#define EUR_CR_OCP_IRQSTATUS_RAW_0 0xFE24
++#define EUR_CR_OCP_IRQSTATUS_RAW_0_INIT_MASK 0x00000001UL
++#define EUR_CR_OCP_IRQSTATUS_RAW_0_INIT_SHIFT 0
++#define EUR_CR_OCP_IRQSTATUS_RAW_0_INIT_SIGNED 0
++
++/* Register EUR_CR_OCP_IRQSTATUS_RAW_1 */
++#define EUR_CR_OCP_IRQSTATUS_RAW_1 0xFE28
++#define EUR_CR_OCP_IRQSTATUS_RAW_1_TARGET_MASK 0x00000001UL
++#define EUR_CR_OCP_IRQSTATUS_RAW_1_TARGET_SHIFT 0
++#define EUR_CR_OCP_IRQSTATUS_RAW_1_TARGET_SIGNED 0
++
++/* Register EUR_CR_OCP_IRQSTATUS_RAW_2 */
++#define EUR_CR_OCP_IRQSTATUS_RAW_2 0xFE2C
++#define EUR_CR_OCP_IRQSTATUS_RAW_2_SGXCORE_MASK 0x00000001UL
++#define EUR_CR_OCP_IRQSTATUS_RAW_2_SGXCORE_SHIFT 0
++#define EUR_CR_OCP_IRQSTATUS_RAW_2_SGXCORE_SIGNED 0
++
++/* Register EUR_CR_OCP_IRQSTATUS_0 */
++#define EUR_CR_OCP_IRQSTATUS_0 0xFE30
++#define EUR_CR_OCP_IRQSTATUS_0_INIT_MASK 0x00000001UL
++#define EUR_CR_OCP_IRQSTATUS_0_INIT_SHIFT 0
++#define EUR_CR_OCP_IRQSTATUS_0_INIT_SIGNED 0
++
++/* Register EUR_CR_OCP_IRQSTATUS_1 */
++#define EUR_CR_OCP_IRQSTATUS_1 0xFE34
++#define EUR_CR_OCP_IRQSTATUS_1_TARGET_MASK 0x00000001UL
++#define EUR_CR_OCP_IRQSTATUS_1_TARGET_SHIFT 0
++#define EUR_CR_OCP_IRQSTATUS_1_TARGET_SIGNED 0
++
++/* Register EUR_CR_OCP_IRQSTATUS_2 */
++#define EUR_CR_OCP_IRQSTATUS_2 0xFE38
++#define EUR_CR_OCP_IRQSTATUS_2_SGXCORE_MASK 0x00000001UL
++#define EUR_CR_OCP_IRQSTATUS_2_SGXCORE_SHIFT 0
++#define EUR_CR_OCP_IRQSTATUS_2_SGXCORE_SIGNED 0
++
++/* Register EUR_CR_OCP_IRQENABLE_SET_0 */
++#define EUR_CR_OCP_IRQENABLE_SET_0 0xFE3C
++#define EUR_CR_OCP_IRQENABLE_SET_0_INIT_MASK 0x00000001UL
++#define EUR_CR_OCP_IRQENABLE_SET_0_INIT_SHIFT 0
++#define EUR_CR_OCP_IRQENABLE_SET_0_INIT_SIGNED 0
++
++/* Register EUR_CR_OCP_IRQENABLE_SET_1 */
++#define EUR_CR_OCP_IRQENABLE_SET_1 0xFE40
++#define EUR_CR_OCP_IRQENABLE_SET_1_TARGET_MASK 0x00000001UL
++#define EUR_CR_OCP_IRQENABLE_SET_1_TARGET_SHIFT 0
++#define EUR_CR_OCP_IRQENABLE_SET_1_TARGET_SIGNED 0
++
++/* Register EUR_CR_OCP_IRQENABLE_SET_2 */
++#define EUR_CR_OCP_IRQENABLE_SET_2 0xFE44
++#define EUR_CR_OCP_IRQENABLE_SET_2_SGXCORE_MASK 0x00000001UL
++#define EUR_CR_OCP_IRQENABLE_SET_2_SGXCORE_SHIFT 0
++#define EUR_CR_OCP_IRQENABLE_SET_2_SGXCORE_SIGNED 0
++
++/* Register EUR_CR_OCP_IRQENABLE_CLR_0 */
++#define EUR_CR_OCP_IRQENABLE_CLR_0 0xFE48
++#define EUR_CR_OCP_IRQENABLE_CLR_0_INIT_MASK 0x00000001UL
++#define EUR_CR_OCP_IRQENABLE_CLR_0_INIT_SHIFT 0
++#define EUR_CR_OCP_IRQENABLE_CLR_0_INIT_SIGNED 0
++
++/* Register EUR_CR_OCP_IRQENABLE_CLR_1 */
++#define EUR_CR_OCP_IRQENABLE_CLR_1 0xFE4C
++#define EUR_CR_OCP_IRQENABLE_CLR_1_TARGET_MASK 0x00000001UL
++#define EUR_CR_OCP_IRQENABLE_CLR_1_TARGET_SHIFT 0
++#define EUR_CR_OCP_IRQENABLE_CLR_1_TARGET_SIGNED 0
++
++/* Register EUR_CR_OCP_IRQENABLE_CLR_2 */
++#define EUR_CR_OCP_IRQENABLE_CLR_2 0xFE50
++#define EUR_CR_OCP_IRQENABLE_CLR_2_SGXCORE_MASK 0x00000001UL
++#define EUR_CR_OCP_IRQENABLE_CLR_2_SGXCORE_SHIFT 0
++#define EUR_CR_OCP_IRQENABLE_CLR_2_SGXCORE_SIGNED 0
++
++/* Register EUR_CR_OCP_PAGE_CONFIG */
++#define EUR_CR_OCP_PAGE_CONFIG 0xFF00
++#define EUR_CR_OCP_PAGE_CONFIG_MEM_PAGE_SIZE_MASK 0x00000001UL
++#define EUR_CR_OCP_PAGE_CONFIG_MEM_PAGE_SIZE_SHIFT 0
++#define EUR_CR_OCP_PAGE_CONFIG_MEM_PAGE_SIZE_SIGNED 0
++
++#define EUR_CR_OCP_PAGE_CONFIG_MEM_PAGE_CHECK_ENABLE_MASK 0x00000004UL
++#define EUR_CR_OCP_PAGE_CONFIG_MEM_PAGE_CHECK_ENABLE_SHIFT 2
++#define EUR_CR_OCP_PAGE_CONFIG_MEM_PAGE_CHECK_ENABLE_SIGNED 0
++
++#define EUR_CR_OCP_PAGE_CONFIG_SIZE_MASK 0x00000018UL
++#define EUR_CR_OCP_PAGE_CONFIG_SIZE_SHIFT 3
++#define EUR_CR_OCP_PAGE_CONFIG_SIZE_SIGNED 0
++
++/* Register EUR_CR_OCP_INTERRUPT_EVENT */
++#define EUR_CR_OCP_INTERRUPT_EVENT 0xFF04
++#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_RESP_UNEXPECTED_MASK 0x00000001UL
++#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_RESP_UNEXPECTED_SHIFT 0
++#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_RESP_UNEXPECTED_SIGNED 0
++
++#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_RESP_UNUSED_TAG_MASK 0x00000002UL
++#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_RESP_UNUSED_TAG_SHIFT 1
++#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_RESP_UNUSED_TAG_SIGNED 0
++
++#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_RESP_ERROR_MASK 0x00000004UL
++#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_RESP_ERROR_SHIFT 2
++#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_RESP_ERROR_SIGNED 0
++
++#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_PAGE_CROSS_ERROR_MASK 0x00000008UL
++#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_PAGE_CROSS_ERROR_SHIFT 3
++#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_PAGE_CROSS_ERROR_SIGNED 0
++
++#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_READ_TAG_FIFO_OVR_MASK 0x00000010UL
++#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_READ_TAG_FIFO_OVR_SHIFT 4
++#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_READ_TAG_FIFO_OVR_SIGNED 0
++
++#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_MEM_REQ_FIFO_OVR_MASK 0x00000020UL
++#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_MEM_REQ_FIFO_OVR_SHIFT 5
++#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_MEM_REQ_FIFO_OVR_SIGNED 0
++
++#define EUR_CR_OCP_INTERRUPT_EVENT_TARGET_RESP_FIFO_FULL_MASK 0x00000100UL
++#define EUR_CR_OCP_INTERRUPT_EVENT_TARGET_RESP_FIFO_FULL_SHIFT 8
++#define EUR_CR_OCP_INTERRUPT_EVENT_TARGET_RESP_FIFO_FULL_SIGNED 0
++
++#define EUR_CR_OCP_INTERRUPT_EVENT_TARGET_CMD_FIFO_FULL_MASK 0x00000200UL
++#define EUR_CR_OCP_INTERRUPT_EVENT_TARGET_CMD_FIFO_FULL_SHIFT 9
++#define EUR_CR_OCP_INTERRUPT_EVENT_TARGET_CMD_FIFO_FULL_SIGNED 0
++
++#define EUR_CR_OCP_INTERRUPT_EVENT_TARGET_INVALID_OCP_CMD_MASK 0x00000400UL
++#define EUR_CR_OCP_INTERRUPT_EVENT_TARGET_INVALID_OCP_CMD_SHIFT 10
++#define EUR_CR_OCP_INTERRUPT_EVENT_TARGET_INVALID_OCP_CMD_SIGNED 0
++
++/* Register EUR_CR_OCP_DEBUG_CONFIG */
++#define EUR_CR_OCP_DEBUG_CONFIG 0xFF08
++#define EUR_CR_OCP_DEBUG_CONFIG_FORCE_TARGET_IDLE_MASK 0x00000003UL
++#define EUR_CR_OCP_DEBUG_CONFIG_FORCE_TARGET_IDLE_SHIFT 0
++#define EUR_CR_OCP_DEBUG_CONFIG_FORCE_TARGET_IDLE_SIGNED 0
++
++#define EUR_CR_OCP_DEBUG_CONFIG_FORCE_INIT_IDLE_MASK 0x0000000CUL
++#define EUR_CR_OCP_DEBUG_CONFIG_FORCE_INIT_IDLE_SHIFT 2
++#define EUR_CR_OCP_DEBUG_CONFIG_FORCE_INIT_IDLE_SIGNED 0
++
++#define EUR_CR_OCP_DEBUG_CONFIG_FORCE_PASS_DATA_MASK 0x00000010UL
++#define EUR_CR_OCP_DEBUG_CONFIG_FORCE_PASS_DATA_SHIFT 4
++#define EUR_CR_OCP_DEBUG_CONFIG_FORCE_PASS_DATA_SIGNED 0
++
++#define EUR_CR_OCP_DEBUG_CONFIG_SELECT_INIT_IDLE_MASK 0x00000020UL
++#define EUR_CR_OCP_DEBUG_CONFIG_SELECT_INIT_IDLE_SHIFT 5
++#define EUR_CR_OCP_DEBUG_CONFIG_SELECT_INIT_IDLE_SIGNED 0
++
++#define EUR_CR_OCP_DEBUG_CONFIG_THALIA_INT_BYPASS_MASK 0x80000000UL
++#define EUR_CR_OCP_DEBUG_CONFIG_THALIA_INT_BYPASS_SHIFT 31
++#define EUR_CR_OCP_DEBUG_CONFIG_THALIA_INT_BYPASS_SIGNED 0
++
++/* Register EUR_CR_OCP_DEBUG_STATUS */
++#define EUR_CR_OCP_DEBUG_STATUS 0xFF0C
++#define EUR_CR_OCP_DEBUG_STATUS_TARGET_MCONNECT_MASK 0x00000003UL
++#define EUR_CR_OCP_DEBUG_STATUS_TARGET_MCONNECT_SHIFT 0
++#define EUR_CR_OCP_DEBUG_STATUS_TARGET_MCONNECT_SIGNED 0
++
++#define EUR_CR_OCP_DEBUG_STATUS_TARGET_SCONNECT_MASK 0x00000004UL
++#define EUR_CR_OCP_DEBUG_STATUS_TARGET_SCONNECT_SHIFT 2
++#define EUR_CR_OCP_DEBUG_STATUS_TARGET_SCONNECT_SIGNED 0
++
++#define EUR_CR_OCP_DEBUG_STATUS_TARGET_SIDLEREQ_MASK 0x00000008UL
++#define EUR_CR_OCP_DEBUG_STATUS_TARGET_SIDLEREQ_SHIFT 3
++#define EUR_CR_OCP_DEBUG_STATUS_TARGET_SIDLEREQ_SIGNED 0
++
++#define EUR_CR_OCP_DEBUG_STATUS_TARGET_SDISCACK_MASK 0x00000030UL
++#define EUR_CR_OCP_DEBUG_STATUS_TARGET_SDISCACK_SHIFT 4
++#define EUR_CR_OCP_DEBUG_STATUS_TARGET_SDISCACK_SIGNED 0
++
++#define EUR_CR_OCP_DEBUG_STATUS_TARGET_SIDLEACK_MASK 0x000000C0UL
++#define EUR_CR_OCP_DEBUG_STATUS_TARGET_SIDLEACK_SHIFT 6
++#define EUR_CR_OCP_DEBUG_STATUS_TARGET_SIDLEACK_SIGNED 0
++
++#define EUR_CR_OCP_DEBUG_STATUS_INIT_MCONNECT0_MASK 0x00000300UL
++#define EUR_CR_OCP_DEBUG_STATUS_INIT_MCONNECT0_SHIFT 8
++#define EUR_CR_OCP_DEBUG_STATUS_INIT_MCONNECT0_SIGNED 0
++
++#define EUR_CR_OCP_DEBUG_STATUS_INIT_SCONNECT0_MASK 0x00000400UL
++#define EUR_CR_OCP_DEBUG_STATUS_INIT_SCONNECT0_SHIFT 10
++#define EUR_CR_OCP_DEBUG_STATUS_INIT_SCONNECT0_SIGNED 0
++
++#define EUR_CR_OCP_DEBUG_STATUS_INIT_SCONNECT1_MASK 0x00000800UL
++#define EUR_CR_OCP_DEBUG_STATUS_INIT_SCONNECT1_SHIFT 11
++#define EUR_CR_OCP_DEBUG_STATUS_INIT_SCONNECT1_SIGNED 0
++
++#define EUR_CR_OCP_DEBUG_STATUS_INIT_SCONNECT2_MASK 0x00001000UL
++#define EUR_CR_OCP_DEBUG_STATUS_INIT_SCONNECT2_SHIFT 12
++#define EUR_CR_OCP_DEBUG_STATUS_INIT_SCONNECT2_SIGNED 0
++
++#define EUR_CR_OCP_DEBUG_STATUS_INIT_MDISCACK_MASK 0x00006000UL
++#define EUR_CR_OCP_DEBUG_STATUS_INIT_MDISCACK_SHIFT 13
++#define EUR_CR_OCP_DEBUG_STATUS_INIT_MDISCACK_SIGNED 0
++
++#define EUR_CR_OCP_DEBUG_STATUS_INIT_MDISCREQ_MASK 0x00008000UL
++#define EUR_CR_OCP_DEBUG_STATUS_INIT_MDISCREQ_SHIFT 15
++#define EUR_CR_OCP_DEBUG_STATUS_INIT_MDISCREQ_SIGNED 0
++
++#define EUR_CR_OCP_DEBUG_STATUS_INIT_MWAIT_MASK 0x00010000UL
++#define EUR_CR_OCP_DEBUG_STATUS_INIT_MWAIT_SHIFT 16
++#define EUR_CR_OCP_DEBUG_STATUS_INIT_MWAIT_SIGNED 0
++
++#define EUR_CR_OCP_DEBUG_STATUS_INIT_MSTANDBY_MASK 0x00020000UL
++#define EUR_CR_OCP_DEBUG_STATUS_INIT_MSTANDBY_SHIFT 17
++#define EUR_CR_OCP_DEBUG_STATUS_INIT_MSTANDBY_SIGNED 0
++
++#define EUR_CR_OCP_DEBUG_STATUS_TARGET_CMD_OUT_MASK 0x001C0000UL
++#define EUR_CR_OCP_DEBUG_STATUS_TARGET_CMD_OUT_SHIFT 18
++#define EUR_CR_OCP_DEBUG_STATUS_TARGET_CMD_OUT_SIGNED 0
++
++#define EUR_CR_OCP_DEBUG_STATUS_WHICH_TARGET_REGISTER_MASK 0x03E00000UL
++#define EUR_CR_OCP_DEBUG_STATUS_WHICH_TARGET_REGISTER_SHIFT 21
++#define EUR_CR_OCP_DEBUG_STATUS_WHICH_TARGET_REGISTER_SIGNED 0
++
++#define EUR_CR_OCP_DEBUG_STATUS_RESP_ERROR_MASK 0x04000000UL
++#define EUR_CR_OCP_DEBUG_STATUS_RESP_ERROR_SHIFT 26
++#define EUR_CR_OCP_DEBUG_STATUS_RESP_ERROR_SIGNED 0
++
++#define EUR_CR_OCP_DEBUG_STATUS_CMD_FIFO_FULL_MASK 0x08000000UL
++#define EUR_CR_OCP_DEBUG_STATUS_CMD_FIFO_FULL_SHIFT 27
++#define EUR_CR_OCP_DEBUG_STATUS_CMD_FIFO_FULL_SIGNED 0
++
++#define EUR_CR_OCP_DEBUG_STATUS_RESP_FIFO_FULL_MASK 0x10000000UL
++#define EUR_CR_OCP_DEBUG_STATUS_RESP_FIFO_FULL_SHIFT 28
++#define EUR_CR_OCP_DEBUG_STATUS_RESP_FIFO_FULL_SIGNED 0
++
++#define EUR_CR_OCP_DEBUG_STATUS_TARGET_IDLE_MASK 0x20000000UL
++#define EUR_CR_OCP_DEBUG_STATUS_TARGET_IDLE_SHIFT 29
++#define EUR_CR_OCP_DEBUG_STATUS_TARGET_IDLE_SIGNED 0
++
++#define EUR_CR_OCP_DEBUG_STATUS_CMD_RESP_DEBUG_STATE_MASK 0x40000000UL
++#define EUR_CR_OCP_DEBUG_STATUS_CMD_RESP_DEBUG_STATE_SHIFT 30
++#define EUR_CR_OCP_DEBUG_STATUS_CMD_RESP_DEBUG_STATE_SIGNED 0
++
++#define EUR_CR_OCP_DEBUG_STATUS_CMD_DEBUG_STATE_MASK 0x80000000UL
++#define EUR_CR_OCP_DEBUG_STATUS_CMD_DEBUG_STATE_SHIFT 31
++#define EUR_CR_OCP_DEBUG_STATUS_CMD_DEBUG_STATE_SIGNED 0
++
++
++#endif /* _OCPDEFS_H_ */
++
++/*****************************************************************************
++ End of file (ocpdefs.h)
++*****************************************************************************/
+diff --git a/drivers/gpu/pvr/oemfuncs.h b/drivers/gpu/pvr/oemfuncs.h
+new file mode 100644
+index 0000000..a957a21
+--- /dev/null
++++ b/drivers/gpu/pvr/oemfuncs.h
+@@ -0,0 +1,41 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__OEMFUNCS_H__)
++#define __OEMFUNCS_H__
++
++#include <linux/fs.h>
++
++struct PVRSRV_DC_OEM_JTABLE {
++ long (*pfnOEMBridgeDispatch)(struct file *, unsigned, unsigned long);
++ void *pvDummy1;
++ void *pvDummy2;
++ void *pvDummy3;
++};
++
++#define OEM_GET_EXT_FUNCS (1<<1)
++
++#endif
+diff --git a/drivers/gpu/pvr/omaplfb.h b/drivers/gpu/pvr/omaplfb.h
+new file mode 100644
+index 0000000..fe4b2bb
+--- /dev/null
++++ b/drivers/gpu/pvr/omaplfb.h
+@@ -0,0 +1,140 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __OMAPLFB_H__
++#define __OMAPLFB_H__
++
++#define OMAPLCD_IRQ 25
++
++#define OMAPLCD_SYSCONFIG 0x0410
++#define OMAPLCD_CONFIG 0x0444
++#define OMAPLCD_DEFAULT_COLOR0 0x044C
++#define OMAPLCD_TIMING_H 0x0464
++#define OMAPLCD_TIMING_V 0x0468
++#define OMAPLCD_POL_FREQ 0x046C
++#define OMAPLCD_DIVISOR 0x0470
++#define OMAPLCD_SIZE_DIG 0x0478
++#define OMAPLCD_SIZE_LCD 0x047C
++#define OMAPLCD_GFX_POSITION 0x0488
++#define OMAPLCD_GFX_SIZE 0x048C
++#define OMAPLCD_GFX_ATTRIBUTES 0x04a0
++#define OMAPLCD_GFX_FIFO_THRESHOLD 0x04a4
++#define OMAPLCD_GFX_WINDOW_SKIP 0x04b4
++
++#define OMAPLCD_IRQSTATUS 0x0418
++#define OMAPLCD_IRQENABLE 0x041c
++#define OMAPLCD_CONTROL 0x0440
++#define OMAPLCD_GFX_BA0 0x0480
++#define OMAPLCD_GFX_BA1 0x0484
++#define OMAPLCD_GFX_ROW_INC 0x04ac
++#define OMAPLCD_GFX_PIX_INC 0x04b0
++#define OMAPLCD_VID1_BA0 0x04bc
++#define OMAPLCD_VID1_BA1 0x04c0
++#define OMAPLCD_VID1_ROW_INC 0x04d8
++#define OMAPLCD_VID1_PIX_INC 0x04dc
++
++#define OMAP_CONTROL_GODIGITAL (1 << 6)
++#define OMAP_CONTROL_GOLCD (1 << 5)
++#define OMAP_CONTROL_DIGITALENABLE (1 << 1)
++#define OMAP_CONTROL_LCDENABLE (1 << 0)
++
++#define OMAPLCD_INTMASK_VSYNC (1 << 1)
++#define OMAPLCD_INTMASK_OFF 0
++
++struct OMAPLFB_BUFFER {
++ struct IMG_SYS_PHYADDR sSysAddr;
++ void __iomem *sCPUVAddr;
++ u32 ui32BufferSize;
++ struct PVRSRV_SYNC_DATA *psSyncData;
++ struct OMAPLFB_BUFFER *psNext;
++};
++
++struct OMAPLFB_SWAPCHAIN {
++
++ u32 ui32BufferCount;
++ struct OMAPLFB_BUFFER *psBuffer;
++ struct PVRSRV_DC_DISP2SRV_KMJTABLE *psPVRJTable;
++ IMG_BOOL bBlanked;
++};
++
++struct OMAPLFB_FBINFO {
++ struct IMG_SYS_PHYADDR sSysAddr;
++ void __iomem *sCPUVAddr;
++ u32 ui32FBSize;
++ u32 ui32BufferSize;
++ u32 ui32RoundedBufferSize;
++ u32 ui32Width;
++ u32 ui32Height;
++ u32 ui32ByteStride;
++
++ enum PVRSRV_PIXEL_FORMAT ePixelFormat;
++};
++
++struct OMAPLFB_DEVINFO {
++ u32 ui32DeviceID;
++ struct DISPLAY_INFO sDisplayInfo;
++ struct OMAPLFB_BUFFER sSystemBuffer;
++ struct DISPLAY_FORMAT sDisplayFormat;
++ struct DISPLAY_DIMS sDisplayDim;
++ struct PVRSRV_DC_DISP2SRV_KMJTABLE sPVRJTable;
++ struct PVRSRV_DC_SRV2DISP_KMJTABLE sDCJTable;
++ struct OMAPLFB_FBINFO sFBInfo;
++ u32 ui32RefCount;
++ struct OMAPLFB_SWAPCHAIN *psSwapChain;
++ struct IMG_DEV_VIRTADDR sDisplayDevVAddr;
++ struct fb_info *psLINFBInfo;
++ struct notifier_block sLINNotifBlock;
++};
++
++#define OMAPLFB_PAGE_SIZE 4096
++#define OMAPLFB_PAGE_MASK (OMAPLFB_PAGE_SIZE - 1)
++#define OMAPLFB_PAGE_TRUNC (~OMAPLFB_PAGE_MASK)
++
++#define OMAPLFB_PAGE_ROUNDUP(x) (((x) + OMAPLFB_PAGE_MASK) & OMAPLFB_PAGE_TRUNC)
++
++#ifdef DEBUG
++#define DEBUG_PRINTK(x) printk x
++#else
++#define DEBUG_PRINTK(x)
++#endif
++
++#define DISPLAY_DEVICE_NAME "PowerVR OMAP Linux Display Driver"
++#define DRVNAME "omaplfb"
++#define DEVNAME DRVNAME
++#define DRIVER_PREFIX DRVNAME
++
++enum PVRSRV_ERROR OMAPLFBInit(void);
++enum PVRSRV_ERROR OMAPLFBDeinit(void);
++
++void OMAPLFBDriverSuspend(void);
++void OMAPLFBDriverResume(void);
++
++void *OMAPLFBAllocKernelMem(u32 ui32Size);
++void OMAPLFBFreeKernelMem(void *pvMem);
++enum PVRSRV_ERROR OMAPLFBGetLibFuncAddr(char *szFunctionName,
++ IMG_BOOL (**ppfnFuncTable)(struct PVRSRV_DC_DISP2SRV_KMJTABLE *));
++
++#endif
+diff --git a/drivers/gpu/pvr/omaplfb_displayclass.c b/drivers/gpu/pvr/omaplfb_displayclass.c
+new file mode 100644
+index 0000000..a3bcc96
+--- /dev/null
++++ b/drivers/gpu/pvr/omaplfb_displayclass.c
+@@ -0,0 +1,852 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <linux/version.h>
++#include <linux/kernel.h>
++#include <linux/console.h>
++#include <linux/fb.h>
++#include <linux/module.h>
++#include <linux/string.h>
++#include <linux/notifier.h>
++
++#include <asm/div64.h>
++#include <video/sgx-util.h>
++
++#include "img_defs.h"
++#include "servicesext.h"
++#include "kerneldisplay.h"
++#include "omaplfb.h"
++
++static void *gpvAnchor;
++
++static int fb_idx;
++
++#define OMAPLFB_COMMAND_COUNT 1
++
++static IMG_BOOL (*pfnGetPVRJTable)(struct PVRSRV_DC_DISP2SRV_KMJTABLE *);
++
++static struct OMAPLFB_DEVINFO *GetAnchorPtr(void)
++{
++ return (struct OMAPLFB_DEVINFO *)gpvAnchor;
++}
++
++static void SetAnchorPtr(struct OMAPLFB_DEVINFO *psDevInfo)
++{
++ gpvAnchor = (void *) psDevInfo;
++}
++
++static int FrameBufferEvents(struct notifier_block *psNotif,
++ unsigned long event, void *data)
++{
++ struct OMAPLFB_DEVINFO *psDevInfo;
++ struct OMAPLFB_SWAPCHAIN *psSwapChain;
++ struct fb_event *psFBEvent = (struct fb_event *)data;
++
++ if (event != FB_EVENT_BLANK)
++ return 0;
++
++ psDevInfo = GetAnchorPtr();
++ psSwapChain = psDevInfo->psSwapChain;
++ psSwapChain->bBlanked = (*(int *)psFBEvent->data != 0);
++
++ return 0;
++}
++
++static enum PVRSRV_ERROR UnblankDisplay(struct OMAPLFB_DEVINFO *psDevInfo)
++{
++ int res;
++
++ acquire_console_sem();
++ res = fb_blank(psDevInfo->psLINFBInfo, 0);
++ release_console_sem();
++ if (res != 0) {
++ printk(KERN_WARNING DRIVER_PREFIX
++ ": fb_blank failed (%d)", res);
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ return PVRSRV_OK;
++}
++
++static enum PVRSRV_ERROR EnableLFBEventNotification(struct OMAPLFB_DEVINFO
++ *psDevInfo)
++{
++ int res;
++ struct OMAPLFB_SWAPCHAIN *psSwapChain = psDevInfo->psSwapChain;
++ enum PVRSRV_ERROR eError;
++
++ memset(&psDevInfo->sLINNotifBlock, 0,
++ sizeof(psDevInfo->sLINNotifBlock));
++
++ psDevInfo->sLINNotifBlock.notifier_call = FrameBufferEvents;
++
++ psSwapChain->bBlanked = IMG_FALSE;
++
++ res = fb_register_client(&psDevInfo->sLINNotifBlock);
++ if (res != 0) {
++ printk(KERN_WARNING DRIVER_PREFIX
++ ": fb_register_client failed (%d)", res);
++
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ eError = UnblankDisplay(psDevInfo);
++ if (eError != PVRSRV_OK) {
++ DEBUG_PRINTK((KERN_WARNING DRIVER_PREFIX
++ ": UnblankDisplay failed (%d)", eError));
++ return eError;
++ }
++
++ return PVRSRV_OK;
++}
++
++static enum PVRSRV_ERROR DisableLFBEventNotification(struct OMAPLFB_DEVINFO
++ *psDevInfo)
++{
++ int res;
++
++ res = fb_unregister_client(&psDevInfo->sLINNotifBlock);
++ if (res != 0) {
++ printk(KERN_WARNING DRIVER_PREFIX
++ ": fb_unregister_client failed (%d)", res);
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ return PVRSRV_OK;
++}
++
++static enum PVRSRV_ERROR OpenDCDevice(u32 ui32DeviceID, void **phDevice,
++ struct PVRSRV_SYNC_DATA *psSystemBufferSyncData)
++{
++ struct OMAPLFB_DEVINFO *psDevInfo;
++ enum PVRSRV_ERROR eError;
++
++ PVR_UNREFERENCED_PARAMETER(ui32DeviceID);
++
++ psDevInfo = GetAnchorPtr();
++
++ psDevInfo->sSystemBuffer.psSyncData = psSystemBufferSyncData;
++
++ eError = UnblankDisplay(psDevInfo);
++ if (eError != PVRSRV_OK) {
++ DEBUG_PRINTK((KERN_WARNING DRIVER_PREFIX
++ ": UnblankDisplay failed (%d)", eError));
++ return eError;
++ }
++
++ *phDevice = (void *) psDevInfo;
++
++ return PVRSRV_OK;
++}
++
++static enum PVRSRV_ERROR CloseDCDevice(void *hDevice)
++{
++ PVR_UNREFERENCED_PARAMETER(hDevice);
++
++ return PVRSRV_OK;
++}
++
++static enum PVRSRV_ERROR EnumDCFormats(void *hDevice, u32 *pui32NumFormats,
++ struct DISPLAY_FORMAT *psFormat)
++{
++ struct OMAPLFB_DEVINFO *psDevInfo;
++
++ if (!hDevice || !pui32NumFormats)
++ return PVRSRV_ERROR_INVALID_PARAMS;
++
++ psDevInfo = (struct OMAPLFB_DEVINFO *)hDevice;
++
++ *pui32NumFormats = 1;
++
++ if (psFormat)
++ psFormat[0] = psDevInfo->sDisplayFormat;
++
++ return PVRSRV_OK;
++}
++
++static enum PVRSRV_ERROR EnumDCDims(void *hDevice,
++ struct DISPLAY_FORMAT *psFormat,
++ u32 *pui32NumDims, struct DISPLAY_DIMS *psDim)
++{
++ struct OMAPLFB_DEVINFO *psDevInfo;
++
++ if (!hDevice || !psFormat || !pui32NumDims)
++ return PVRSRV_ERROR_INVALID_PARAMS;
++
++ psDevInfo = (struct OMAPLFB_DEVINFO *)hDevice;
++
++ *pui32NumDims = 1;
++
++ if (psDim)
++ psDim[0] = psDevInfo->sDisplayDim;
++
++ return PVRSRV_OK;
++}
++
++static enum PVRSRV_ERROR GetDCSystemBuffer(void *hDevice, void **phBuffer)
++{
++ struct OMAPLFB_DEVINFO *psDevInfo;
++
++ if (!hDevice || !phBuffer)
++ return PVRSRV_ERROR_INVALID_PARAMS;
++
++ psDevInfo = (struct OMAPLFB_DEVINFO *)hDevice;
++
++ *phBuffer = (void *) &psDevInfo->sSystemBuffer;
++
++ return PVRSRV_OK;
++}
++
++static enum PVRSRV_ERROR GetDCInfo(void *hDevice, struct DISPLAY_INFO *psDCInfo)
++{
++ struct OMAPLFB_DEVINFO *psDevInfo;
++
++ if (!hDevice || !psDCInfo)
++ return PVRSRV_ERROR_INVALID_PARAMS;
++
++ psDevInfo = (struct OMAPLFB_DEVINFO *)hDevice;
++
++ *psDCInfo = psDevInfo->sDisplayInfo;
++
++ return PVRSRV_OK;
++}
++
++static enum PVRSRV_ERROR GetDCBufferAddr(void *hDevice, void *hBuffer,
++ struct IMG_SYS_PHYADDR **ppsSysAddr,
++ u32 *pui32ByteSize,
++ void __iomem **ppvCpuVAddr,
++ void **phOSMapInfo,
++ IMG_BOOL *pbIsContiguous)
++{
++ struct OMAPLFB_DEVINFO *psDevInfo;
++ struct OMAPLFB_BUFFER *psSystemBuffer;
++
++ if (!hDevice)
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ psDevInfo = (struct OMAPLFB_DEVINFO *)hDevice;
++
++ if (!hBuffer)
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ psSystemBuffer = (struct OMAPLFB_BUFFER *)hBuffer;
++
++ if (!ppsSysAddr)
++ return PVRSRV_ERROR_INVALID_PARAMS;
++
++ *ppsSysAddr = &psSystemBuffer->sSysAddr;
++
++ if (!pui32ByteSize)
++ return PVRSRV_ERROR_INVALID_PARAMS;
++
++ *pui32ByteSize = psDevInfo->sFBInfo.ui32BufferSize;
++
++ if (ppvCpuVAddr)
++ *ppvCpuVAddr = psSystemBuffer->sCPUVAddr;
++
++ if (phOSMapInfo)
++ *phOSMapInfo = (void *) 0;
++
++ if (pbIsContiguous)
++ *pbIsContiguous = IMG_TRUE;
++
++ return PVRSRV_OK;
++}
++
++static enum PVRSRV_ERROR CreateDCSwapChain(void *hDevice, u32 ui32Flags,
++ struct DISPLAY_SURF_ATTRIBUTES *psDstSurfAttrib,
++ struct DISPLAY_SURF_ATTRIBUTES *psSrcSurfAttrib,
++ u32 ui32BufferCount,
++ struct PVRSRV_SYNC_DATA **ppsSyncData,
++ u32 ui32OEMFlags, void **phSwapChain,
++ u32 *pui32SwapChainID)
++{
++ struct OMAPLFB_DEVINFO *psDevInfo;
++ struct OMAPLFB_SWAPCHAIN *psSwapChain;
++ struct OMAPLFB_BUFFER *psBuffer;
++ u32 i;
++ enum PVRSRV_ERROR eError = PVRSRV_ERROR_GENERIC;
++
++ PVR_UNREFERENCED_PARAMETER(ui32OEMFlags);
++ PVR_UNREFERENCED_PARAMETER(pui32SwapChainID);
++
++ if (!hDevice || !psDstSurfAttrib || !psSrcSurfAttrib ||
++ !ppsSyncData || !phSwapChain)
++ return PVRSRV_ERROR_INVALID_PARAMS;
++
++ psDevInfo = (struct OMAPLFB_DEVINFO *)hDevice;
++
++ if (psDevInfo->sDisplayInfo.ui32MaxSwapChains == 0)
++ return PVRSRV_ERROR_NOT_SUPPORTED;
++
++ if (psDevInfo->psSwapChain != NULL)
++ return PVRSRV_ERROR_FLIP_CHAIN_EXISTS;
++
++ if (ui32BufferCount > psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers)
++ return PVRSRV_ERROR_TOOMANYBUFFERS;
++
++ if ((psDevInfo->sFBInfo.ui32RoundedBufferSize * ui32BufferCount) >
++ psDevInfo->sFBInfo.ui32FBSize)
++ return PVRSRV_ERROR_TOOMANYBUFFERS;
++
++ if (psDstSurfAttrib->pixelformat !=
++ psDevInfo->sDisplayFormat.pixelformat ||
++ psDstSurfAttrib->sDims.ui32ByteStride !=
++ psDevInfo->sDisplayDim.ui32ByteStride ||
++ psDstSurfAttrib->sDims.ui32Width !=
++ psDevInfo->sDisplayDim.ui32Width ||
++ psDstSurfAttrib->sDims.ui32Height !=
++ psDevInfo->sDisplayDim.ui32Height)
++ return PVRSRV_ERROR_INVALID_PARAMS;
++
++ if (psDstSurfAttrib->pixelformat != psSrcSurfAttrib->pixelformat ||
++ psDstSurfAttrib->sDims.ui32ByteStride !=
++ psSrcSurfAttrib->sDims.ui32ByteStride ||
++ psDstSurfAttrib->sDims.ui32Width !=
++ psSrcSurfAttrib->sDims.ui32Width ||
++ psDstSurfAttrib->sDims.ui32Height !=
++ psSrcSurfAttrib->sDims.ui32Height)
++ return PVRSRV_ERROR_INVALID_PARAMS;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Flags);
++
++ psSwapChain = (struct OMAPLFB_SWAPCHAIN *)
++ OMAPLFBAllocKernelMem(sizeof(struct OMAPLFB_SWAPCHAIN));
++ if (!psSwapChain)
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++
++ psBuffer = (struct OMAPLFB_BUFFER *)
++ OMAPLFBAllocKernelMem(sizeof(struct OMAPLFB_BUFFER) *
++ ui32BufferCount);
++ if (!psBuffer) {
++ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto ErrorFreeSwapChain;
++ }
++
++ psSwapChain->ui32BufferCount = ui32BufferCount;
++ psSwapChain->psBuffer = psBuffer;
++ psSwapChain->psPVRJTable = &psDevInfo->sPVRJTable;
++
++ for (i = 0; i < ui32BufferCount - 1; i++)
++ psBuffer[i].psNext = &psBuffer[i + 1];
++
++ psBuffer[i].psNext = &psBuffer[0];
++
++ for (i = 0; i < ui32BufferCount; i++) {
++ u32 ui32BufferOffset = i *
++ psDevInfo->sFBInfo.ui32RoundedBufferSize;
++
++ psBuffer[i].psSyncData = ppsSyncData[i];
++
++ psBuffer[i].sSysAddr.uiAddr =
++ psDevInfo->sFBInfo.sSysAddr.uiAddr + ui32BufferOffset;
++ psBuffer[i].sCPUVAddr =
++ psDevInfo->sFBInfo.sCPUVAddr + ui32BufferOffset;
++ }
++
++ psDevInfo->psSwapChain = psSwapChain;
++
++ eError = EnableLFBEventNotification(psDevInfo);
++ if (eError != PVRSRV_OK) {
++ printk(DRIVER_PREFIX
++ ": Couldn't enable framebuffer event notification\n");
++ goto ErrorFreeBuffer;
++ }
++
++ *phSwapChain = (void *) psSwapChain;
++
++ return PVRSRV_OK;
++
++ErrorFreeBuffer:
++ OMAPLFBFreeKernelMem(psBuffer);
++ErrorFreeSwapChain:
++ OMAPLFBFreeKernelMem(psSwapChain);
++
++ return eError;
++}
++
++static enum PVRSRV_ERROR DestroyDCSwapChain(void *hDevice, void *hSwapChain)
++{
++ struct OMAPLFB_DEVINFO *psDevInfo;
++ struct OMAPLFB_SWAPCHAIN *psSwapChain;
++ enum PVRSRV_ERROR eError;
++
++ if (!hDevice || !hSwapChain)
++ return PVRSRV_ERROR_INVALID_PARAMS;
++
++ psDevInfo = (struct OMAPLFB_DEVINFO *)hDevice;
++ psSwapChain = (struct OMAPLFB_SWAPCHAIN *)hSwapChain;
++ if (psSwapChain != psDevInfo->psSwapChain)
++ return PVRSRV_ERROR_INVALID_PARAMS;
++
++ eError = DisableLFBEventNotification(psDevInfo);
++ if (eError != PVRSRV_OK)
++ printk(KERN_WARNING DRIVER_PREFIX
++ ": Couldn't disable framebuffer event notification\n");
++
++ psDevInfo->psSwapChain = NULL;
++
++ OMAPLFBFreeKernelMem(psSwapChain->psBuffer);
++ OMAPLFBFreeKernelMem(psSwapChain);
++
++ return PVRSRV_OK;
++}
++
++static enum PVRSRV_ERROR SetDCDstRect(void *hDevice,
++ void *hSwapChain, struct IMG_RECT *psRect)
++{
++ PVR_UNREFERENCED_PARAMETER(hDevice);
++ PVR_UNREFERENCED_PARAMETER(hSwapChain);
++ PVR_UNREFERENCED_PARAMETER(psRect);
++
++ return PVRSRV_ERROR_NOT_SUPPORTED;
++}
++
++static enum PVRSRV_ERROR SetDCSrcRect(void *hDevice,
++ void *hSwapChain, struct IMG_RECT *psRect)
++{
++ PVR_UNREFERENCED_PARAMETER(hDevice);
++ PVR_UNREFERENCED_PARAMETER(hSwapChain);
++ PVR_UNREFERENCED_PARAMETER(psRect);
++
++ return PVRSRV_ERROR_NOT_SUPPORTED;
++}
++
++static enum PVRSRV_ERROR SetDCDstColourKey(void *hDevice, void *hSwapChain,
++ u32 ui32CKColour)
++{
++ PVR_UNREFERENCED_PARAMETER(hDevice);
++ PVR_UNREFERENCED_PARAMETER(hSwapChain);
++ PVR_UNREFERENCED_PARAMETER(ui32CKColour);
++
++ return PVRSRV_ERROR_NOT_SUPPORTED;
++}
++
++static enum PVRSRV_ERROR SetDCSrcColourKey(void *hDevice, void *hSwapChain,
++ u32 ui32CKColour)
++{
++ PVR_UNREFERENCED_PARAMETER(hDevice);
++ PVR_UNREFERENCED_PARAMETER(hSwapChain);
++ PVR_UNREFERENCED_PARAMETER(ui32CKColour);
++
++ return PVRSRV_ERROR_NOT_SUPPORTED;
++}
++
++static enum PVRSRV_ERROR GetDCBuffers(void *hDevice, void *hSwapChain,
++ u32 *pui32BufferCount, void **phBuffer)
++{
++ struct OMAPLFB_DEVINFO *psDevInfo;
++ struct OMAPLFB_SWAPCHAIN *psSwapChain;
++ u32 i;
++
++ if (!hDevice || !hSwapChain || !pui32BufferCount || !phBuffer)
++ return PVRSRV_ERROR_INVALID_PARAMS;
++
++ psDevInfo = (struct OMAPLFB_DEVINFO *)hDevice;
++ psSwapChain = (struct OMAPLFB_SWAPCHAIN *)hSwapChain;
++ if (psSwapChain != psDevInfo->psSwapChain)
++ return PVRSRV_ERROR_INVALID_PARAMS;
++
++ *pui32BufferCount = psSwapChain->ui32BufferCount;
++
++ for (i = 0; i < psSwapChain->ui32BufferCount; i++)
++ phBuffer[i] = (void *) &psSwapChain->psBuffer[i];
++
++ return PVRSRV_OK;
++}
++
++static IMG_BOOL ProcessFlip(void *hCmdCookie, u32 ui32DataSize, void *pvData)
++{
++ struct DISPLAYCLASS_FLIP_COMMAND *psFlipCmd;
++ struct OMAPLFB_DEVINFO *psDevInfo;
++ struct OMAPLFB_BUFFER *psBuffer;
++ struct OMAPLFB_SWAPCHAIN *psSwapChain;
++
++ if (!hCmdCookie || !pvData)
++ return IMG_FALSE;
++
++ psFlipCmd = (struct DISPLAYCLASS_FLIP_COMMAND *)pvData;
++
++ if (psFlipCmd == NULL
++ || sizeof(struct DISPLAYCLASS_FLIP_COMMAND) != ui32DataSize)
++ return IMG_FALSE;
++
++ psDevInfo = (struct OMAPLFB_DEVINFO *)psFlipCmd->hExtDevice;
++
++ psBuffer = (struct OMAPLFB_BUFFER *)psFlipCmd->hExtBuffer;
++ psSwapChain = (struct OMAPLFB_SWAPCHAIN *)psFlipCmd->hExtSwapChain;
++
++ psSwapChain->psPVRJTable->pfnPVRSRVCmdComplete(hCmdCookie, IMG_TRUE);
++
++ return IMG_TRUE;
++}
++
++static void CalcSwapChainSize(struct OMAPLFB_DEVINFO *psDevInfo)
++{
++ if (psDevInfo->sFBInfo.ui32RoundedBufferSize)
++ psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers =
++ psDevInfo->sFBInfo.ui32FBSize /
++ psDevInfo->sFBInfo.ui32RoundedBufferSize;
++ else
++ psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers = 0;
++
++ if (psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers == 0) {
++ psDevInfo->sDisplayInfo.ui32MaxSwapChains = 0;
++ psDevInfo->sDisplayInfo.ui32MaxSwapInterval = 0;
++ } else {
++ psDevInfo->sDisplayInfo.ui32MaxSwapChains = 1;
++ psDevInfo->sDisplayInfo.ui32MaxSwapInterval = 3;
++ }
++
++ psDevInfo->sDisplayInfo.ui32MinSwapInterval = 0;
++
++ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
++ ": Maximum number of swap chain buffers: %u\n",
++ psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers));
++}
++
++static void SetDevinfo(struct OMAPLFB_DEVINFO *psDevInfo)
++{
++ struct OMAPLFB_FBINFO *psPVRFBInfo = &psDevInfo->sFBInfo;
++ struct fb_info *psLINFBInfo = psDevInfo->psLINFBInfo;
++ unsigned long FBSize;
++
++ FBSize = (psLINFBInfo->screen_size) != 0 ?
++ psLINFBInfo->screen_size : psLINFBInfo->fix.smem_len;
++ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
++ ": Framebuffer physical address: 0x%lx\n",
++ psLINFBInfo->fix.smem_start));
++ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
++ ": Framebuffer virtual address: 0x%lx\n",
++ (unsigned long)psLINFBInfo->screen_base));
++ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
++ ": Framebuffer size: %lu\n", FBSize));
++ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
++ ": Framebuffer virtual width: %u\n",
++ psLINFBInfo->var.xres_virtual));
++ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
++ ": Framebuffer virtual height: %u\n",
++ psLINFBInfo->var.yres_virtual));
++ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
++ ": Framebuffer width: %u\n", psLINFBInfo->var.xres));
++ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
++ ": Framebuffer height: %u\n", psLINFBInfo->var.yres));
++ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
++ ": Framebuffer stride: %u\n",
++ psLINFBInfo->fix.line_length));
++
++ psPVRFBInfo->sSysAddr.uiAddr = psLINFBInfo->fix.smem_start;
++ psPVRFBInfo->sCPUVAddr = psLINFBInfo->screen_base;
++
++ psPVRFBInfo->ui32Width = psLINFBInfo->var.xres_virtual;
++ psPVRFBInfo->ui32ByteStride = psLINFBInfo->fix.line_length;
++ psPVRFBInfo->ui32FBSize = FBSize;
++
++ /* Try double buffering */
++ psPVRFBInfo->ui32Height = psLINFBInfo->var.yres_virtual >> 1;
++ psPVRFBInfo->ui32BufferSize = psPVRFBInfo->ui32ByteStride *
++ psPVRFBInfo->ui32Height;
++ psPVRFBInfo->ui32RoundedBufferSize =
++ sgx_buffer_align(psPVRFBInfo->ui32ByteStride,
++ psPVRFBInfo->ui32BufferSize);
++
++ /* If the buffers aren't aligned assume single buffering */
++ if (psPVRFBInfo->ui32BufferSize != psPVRFBInfo->ui32RoundedBufferSize) {
++ psPVRFBInfo->ui32Height = psLINFBInfo->var.yres_virtual;
++ psPVRFBInfo->ui32BufferSize = psPVRFBInfo->ui32ByteStride *
++ psPVRFBInfo->ui32Height;
++ psPVRFBInfo->ui32RoundedBufferSize =
++ sgx_buffer_align(psPVRFBInfo->ui32ByteStride,
++ psPVRFBInfo->ui32BufferSize);
++ }
++
++ CalcSwapChainSize(psDevInfo);
++
++ if (psLINFBInfo->var.bits_per_pixel == 16) {
++ if ((psLINFBInfo->var.red.length == 5) &&
++ (psLINFBInfo->var.green.length == 6) &&
++ (psLINFBInfo->var.blue.length == 5) &&
++ (psLINFBInfo->var.red.offset == 11) &&
++ (psLINFBInfo->var.green.offset == 5) &&
++ (psLINFBInfo->var.blue.offset == 0) &&
++ (psLINFBInfo->var.red.msb_right == 0))
++ psPVRFBInfo->ePixelFormat = PVRSRV_PIXEL_FORMAT_RGB565;
++ else
++ printk("Unknown FB format\n");
++ } else if (psLINFBInfo->var.bits_per_pixel == 32) {
++ if ((psLINFBInfo->var.transp.length == 8) &&
++ (psLINFBInfo->var.red.length == 8) &&
++ (psLINFBInfo->var.green.length == 8) &&
++ (psLINFBInfo->var.blue.length == 8) &&
++ (psLINFBInfo->var.transp.offset == 24) &&
++ (psLINFBInfo->var.red.offset == 16) &&
++ (psLINFBInfo->var.green.offset == 8) &&
++ (psLINFBInfo->var.blue.offset == 0) &&
++ (psLINFBInfo->var.red.msb_right == 0))
++ psPVRFBInfo->ePixelFormat =
++ PVRSRV_PIXEL_FORMAT_ARGB8888;
++ else if ((psLINFBInfo->var.transp.length == 0) &&
++ (psLINFBInfo->var.red.length == 8) &&
++ (psLINFBInfo->var.green.length == 8) &&
++ (psLINFBInfo->var.blue.length == 8) &&
++ (psLINFBInfo->var.transp.offset == 0) &&
++ (psLINFBInfo->var.red.offset == 16) &&
++ (psLINFBInfo->var.green.offset == 8) &&
++ (psLINFBInfo->var.blue.offset == 0) &&
++ (psLINFBInfo->var.red.msb_right == 0))
++ psPVRFBInfo->ePixelFormat = PVRSRV_PIXEL_FORMAT_RGB888;
++ else
++ printk(KERN_ERR "Unknown FB format\n");
++ } else {
++ printk(KERN_ERR "Unknown FB format\n");
++ }
++
++ psDevInfo->sDisplayFormat.pixelformat = psDevInfo->sFBInfo.ePixelFormat;
++ psDevInfo->sDisplayDim.ui32Width = psDevInfo->sFBInfo.ui32Width;
++ psDevInfo->sDisplayDim.ui32Height = psDevInfo->sFBInfo.ui32Height;
++ psDevInfo->sDisplayDim.ui32ByteStride =
++ psDevInfo->sFBInfo.ui32ByteStride;
++ psDevInfo->sSystemBuffer.sSysAddr = psDevInfo->sFBInfo.sSysAddr;
++ psDevInfo->sSystemBuffer.sCPUVAddr = psDevInfo->sFBInfo.sCPUVAddr;
++ psDevInfo->sSystemBuffer.ui32BufferSize =
++ psDevInfo->sFBInfo.ui32RoundedBufferSize;
++}
++
++static struct FB_EVENTS {
++ struct notifier_block notif;
++ struct OMAPLFB_DEVINFO *psDevInfo;
++} gFBEventsData;
++
++static int FBEvents(struct notifier_block *psNotif,
++ unsigned long event, void *data)
++{
++ if (event == FB_EVENT_MODE_CHANGE) {
++ struct FB_EVENTS *psEvents =
++ container_of(psNotif, struct FB_EVENTS, notif);
++ SetDevinfo(psEvents->psDevInfo);
++ }
++ return 0;
++}
++
++static enum PVRSRV_ERROR InitDev(struct OMAPLFB_DEVINFO *psDevInfo)
++{
++ struct fb_info *psLINFBInfo;
++ struct module *psLINFBOwner;
++ struct OMAPLFB_FBINFO *psPVRFBInfo = &psDevInfo->sFBInfo;
++ enum PVRSRV_ERROR eError = PVRSRV_ERROR_GENERIC;
++
++ acquire_console_sem();
++
++ if (fb_idx < 0 || fb_idx >= num_registered_fb) {
++ eError = PVRSRV_ERROR_INVALID_DEVICE;
++ goto errRelSem;
++ }
++
++ psLINFBInfo = registered_fb[fb_idx];
++
++ psLINFBOwner = psLINFBInfo->fbops->owner;
++ if (!try_module_get(psLINFBOwner)) {
++ printk(KERN_INFO DRIVER_PREFIX
++ ": Couldn't get framebuffer module\n");
++
++ goto errRelSem;
++ }
++
++ if (psLINFBInfo->fbops->fb_open != NULL) {
++ int res;
++
++ res = psLINFBInfo->fbops->fb_open(psLINFBInfo, 0);
++ if (res != 0) {
++ printk(KERN_INFO DRIVER_PREFIX
++ ": Couldn't open framebuffer: %d\n", res);
++
++ goto errModPut;
++ }
++ }
++
++ psDevInfo->psLINFBInfo = psLINFBInfo;
++
++ SetDevinfo(psDevInfo);
++
++ gFBEventsData.notif.notifier_call = FBEvents;
++ gFBEventsData.psDevInfo = psDevInfo;
++ fb_register_client(&gFBEventsData.notif);
++
++ psDevInfo->sFBInfo.sSysAddr.uiAddr = psPVRFBInfo->sSysAddr.uiAddr;
++ psDevInfo->sFBInfo.sCPUVAddr = psPVRFBInfo->sCPUVAddr;
++
++ eError = PVRSRV_OK;
++ goto errRelSem;
++
++errModPut:
++ module_put(psLINFBOwner);
++errRelSem:
++ release_console_sem();
++ return eError;
++}
++
++static void DeInitDev(struct OMAPLFB_DEVINFO *psDevInfo)
++{
++ struct fb_info *psLINFBInfo = psDevInfo->psLINFBInfo;
++ struct module *psLINFBOwner;
++
++ acquire_console_sem();
++
++ fb_unregister_client(&gFBEventsData.notif);
++
++ psLINFBOwner = psLINFBInfo->fbops->owner;
++
++ if (psLINFBInfo->fbops->fb_release != NULL)
++ (void)psLINFBInfo->fbops->fb_release(psLINFBInfo, 0);
++
++ module_put(psLINFBOwner);
++
++ release_console_sem();
++}
++
++enum PVRSRV_ERROR OMAPLFBInit(void)
++{
++ struct OMAPLFB_DEVINFO *psDevInfo;
++
++ psDevInfo = GetAnchorPtr();
++
++ if (psDevInfo == NULL) {
++ IMG_BOOL (*pfnCmdProcList[OMAPLFB_COMMAND_COUNT])
++ (void *, u32, void *);
++ u32 aui32SyncCountList[OMAPLFB_COMMAND_COUNT][2];
++
++ psDevInfo = (struct OMAPLFB_DEVINFO *)
++ OMAPLFBAllocKernelMem(sizeof(struct OMAPLFB_DEVINFO));
++
++ if (!psDevInfo)
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++
++ memset(psDevInfo, 0, sizeof(struct OMAPLFB_DEVINFO));
++
++ SetAnchorPtr((void *) psDevInfo);
++
++ psDevInfo->ui32RefCount = 0;
++
++ if (InitDev(psDevInfo) != PVRSRV_OK)
++ return PVRSRV_ERROR_INIT_FAILURE;
++
++ if (OMAPLFBGetLibFuncAddr("PVRGetDisplayClassJTable",
++ &pfnGetPVRJTable) != PVRSRV_OK)
++ return PVRSRV_ERROR_INIT_FAILURE;
++
++ if (!(*pfnGetPVRJTable) (&psDevInfo->sPVRJTable))
++ return PVRSRV_ERROR_INIT_FAILURE;
++
++ psDevInfo->psSwapChain = NULL;
++
++ CalcSwapChainSize(psDevInfo);
++
++ strncpy(psDevInfo->sDisplayInfo.szDisplayName,
++ DISPLAY_DEVICE_NAME, MAX_DISPLAY_NAME_SIZE);
++
++ psDevInfo->sDCJTable.ui32TableSize =
++ sizeof(struct PVRSRV_DC_SRV2DISP_KMJTABLE);
++ psDevInfo->sDCJTable.owner = THIS_MODULE;
++ psDevInfo->sDCJTable.pfnOpenDCDevice = OpenDCDevice;
++ psDevInfo->sDCJTable.pfnCloseDCDevice = CloseDCDevice;
++ psDevInfo->sDCJTable.pfnEnumDCFormats = EnumDCFormats;
++ psDevInfo->sDCJTable.pfnEnumDCDims = EnumDCDims;
++ psDevInfo->sDCJTable.pfnGetDCSystemBuffer = GetDCSystemBuffer;
++ psDevInfo->sDCJTable.pfnGetDCInfo = GetDCInfo;
++ psDevInfo->sDCJTable.pfnGetBufferAddr = GetDCBufferAddr;
++ psDevInfo->sDCJTable.pfnCreateDCSwapChain = CreateDCSwapChain;
++ psDevInfo->sDCJTable.pfnDestroyDCSwapChain = DestroyDCSwapChain;
++ psDevInfo->sDCJTable.pfnSetDCDstRect = SetDCDstRect;
++ psDevInfo->sDCJTable.pfnSetDCSrcRect = SetDCSrcRect;
++ psDevInfo->sDCJTable.pfnSetDCDstColourKey = SetDCDstColourKey;
++ psDevInfo->sDCJTable.pfnSetDCSrcColourKey = SetDCSrcColourKey;
++ psDevInfo->sDCJTable.pfnGetDCBuffers = GetDCBuffers;
++ psDevInfo->sDCJTable.pfnSetDCState = NULL;
++
++ if (psDevInfo->sPVRJTable.
++ pfnPVRSRVRegisterDCDevice(&psDevInfo->sDCJTable,
++ &psDevInfo->ui32DeviceID) !=
++ PVRSRV_OK)
++ return PVRSRV_ERROR_DEVICE_REGISTER_FAILED;
++
++ pfnCmdProcList[DC_FLIP_COMMAND] = ProcessFlip;
++
++ aui32SyncCountList[DC_FLIP_COMMAND][0] = 0;
++ aui32SyncCountList[DC_FLIP_COMMAND][1] = 2;
++
++ if (psDevInfo->sPVRJTable.
++ pfnPVRSRVRegisterCmdProcList(psDevInfo->ui32DeviceID,
++ &pfnCmdProcList[0],
++ aui32SyncCountList,
++ OMAPLFB_COMMAND_COUNT) !=
++ PVRSRV_OK) {
++ printk(KERN_WARNING DRIVER_PREFIX
++ ": Can't register callback\n");
++ return PVRSRV_ERROR_CANT_REGISTER_CALLBACK;
++ }
++
++ }
++
++ psDevInfo->ui32RefCount++;
++
++ return PVRSRV_OK;
++
++}
++
++enum PVRSRV_ERROR OMAPLFBDeinit(void)
++{
++ struct OMAPLFB_DEVINFO *psDevInfo, *psDevFirst;
++
++ psDevFirst = GetAnchorPtr();
++ psDevInfo = psDevFirst;
++
++ if (psDevInfo == NULL)
++ return PVRSRV_ERROR_GENERIC;
++
++ psDevInfo->ui32RefCount--;
++
++ if (psDevInfo->ui32RefCount == 0) {
++ struct PVRSRV_DC_DISP2SRV_KMJTABLE *psJTable =
++ &psDevInfo->sPVRJTable;
++ if (psDevInfo->sPVRJTable.
++ pfnPVRSRVRemoveCmdProcList(psDevInfo->ui32DeviceID,
++ OMAPLFB_COMMAND_COUNT) !=
++ PVRSRV_OK)
++ return PVRSRV_ERROR_GENERIC;
++
++ if (psJTable->
++ pfnPVRSRVRemoveDCDevice(psDevInfo->ui32DeviceID) !=
++ PVRSRV_OK)
++ return PVRSRV_ERROR_GENERIC;
++
++ DeInitDev(psDevInfo);
++
++ OMAPLFBFreeKernelMem(psDevInfo);
++ }
++
++ SetAnchorPtr(NULL);
++
++ return PVRSRV_OK;
++}
++
+diff --git a/drivers/gpu/pvr/omaplfb_linux.c b/drivers/gpu/pvr/omaplfb_linux.c
+new file mode 100644
+index 0000000..9ed02c5
+--- /dev/null
++++ b/drivers/gpu/pvr/omaplfb_linux.c
+@@ -0,0 +1,168 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <linux/version.h>
++#include <linux/module.h>
++
++#include <linux/pci.h>
++#include <linux/uaccess.h>
++#include <linux/slab.h>
++#include <linux/errno.h>
++#include <linux/interrupt.h>
++
++#include <linux/platform_device.h>
++
++#include <linux/io.h>
++
++#include "img_defs.h"
++#include "servicesext.h"
++#include "kerneldisplay.h"
++#include "omaplfb.h"
++#include "pvrmodule.h"
++
++#include <plat/display.h>
++
++MODULE_SUPPORTED_DEVICE(DEVNAME);
++
++#define unref__ __attribute__ ((unused))
++
++void *OMAPLFBAllocKernelMem(u32 ui32Size)
++{
++ return kmalloc(ui32Size, GFP_KERNEL);
++}
++
++void OMAPLFBFreeKernelMem(void *pvMem)
++{
++ kfree(pvMem);
++}
++
++enum PVRSRV_ERROR OMAPLFBGetLibFuncAddr(char *szFunctionName,
++ IMG_BOOL (**ppfnFuncTable)(struct PVRSRV_DC_DISP2SRV_KMJTABLE *))
++{
++ if (strcmp("PVRGetDisplayClassJTable", szFunctionName) != 0)
++ return PVRSRV_ERROR_INVALID_PARAMS;
++
++ *ppfnFuncTable = PVRGetDisplayClassJTable;
++
++ return PVRSRV_OK;
++}
++
++static int OMAPLFBDriverSuspend_Entry(struct platform_device unref__ * pDevice,
++ pm_message_t unref__ state)
++{
++ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
++ ": OMAPLFBDriverSuspend_Entry\n"));
++ return 0;
++}
++
++static int OMAPLFBDriverResume_Entry(struct platform_device unref__ * pDevice)
++{
++ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX ": OMAPLFBDriverResume_Entry\n"));
++ return 0;
++}
++
++static void OMAPLFBDriverShutdown_Entry(struct platform_device unref__ *
++ pDevice)
++{
++ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
++ ": OMAPLFBDriverShutdown_Entry\n"));
++}
++
++static void OMAPLFBDeviceRelease_Entry(struct device unref__ * pDevice)
++{
++ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
++ ": OMAPLFBDriverRelease_Entry\n"));
++}
++
++static struct platform_driver omaplfb_driver = {
++ .driver = {
++ .name = DRVNAME,
++ },
++ .suspend = OMAPLFBDriverSuspend_Entry,
++ .resume = OMAPLFBDriverResume_Entry,
++ .shutdown = OMAPLFBDriverShutdown_Entry,
++};
++
++static struct platform_device omaplfb_device = {
++ .name = DEVNAME,
++ .id = -1,
++ .dev = {
++ .release = OMAPLFBDeviceRelease_Entry
++ }
++};
++
++static int __init OMAPLFB_Init(void)
++{
++ int error;
++
++ if (OMAPLFBInit() != PVRSRV_OK) {
++ printk(KERN_WARNING DRIVER_PREFIX
++ ": OMAPLFB_Init: OMAPLFBInit failed\n");
++ return -ENODEV;
++ }
++ error = platform_driver_register(&omaplfb_driver);
++ if (error) {
++ printk(KERN_WARNING DRIVER_PREFIX
++ ": OMAPLFB_Init: Unable to register platform driver (%d)\n",
++ error);
++
++ goto ExitDeinit;
++ }
++
++ error = platform_device_register(&omaplfb_device);
++ if (error) {
++ printk(KERN_WARNING DRIVER_PREFIX
++ ": OMAPLFB_Init: Unable to register platform device (%d)\n",
++ error);
++
++ goto ExitDriverUnregister;
++ }
++
++ return 0;
++
++ExitDriverUnregister:
++ platform_driver_unregister(&omaplfb_driver);
++
++ExitDeinit:
++ if (OMAPLFBDeinit() != PVRSRV_OK)
++ printk(KERN_WARNING DRIVER_PREFIX
++ ": OMAPLFB_Init: OMAPLFBDeinit failed\n");
++
++ return -ENODEV;
++}
++
++static void __exit OMAPLFB_Cleanup(void)
++{
++ platform_device_unregister(&omaplfb_device);
++ platform_driver_unregister(&omaplfb_driver);
++
++ if (OMAPLFBDeinit() != PVRSRV_OK)
++ printk(KERN_WARNING DRIVER_PREFIX
++ ": OMAPLFB_Cleanup: OMAPLFBDeinit failed\n");
++}
++
++module_init(OMAPLFB_Init);
++module_exit(OMAPLFB_Cleanup);
+diff --git a/drivers/gpu/pvr/osfunc.c b/drivers/gpu/pvr/osfunc.c
+new file mode 100644
+index 0000000..07f1662
+--- /dev/null
++++ b/drivers/gpu/pvr/osfunc.c
+@@ -0,0 +1,1585 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <linux/version.h>
++#include <linux/io.h>
++#include <asm/page.h>
++#include <asm/system.h>
++#include <linux/mm.h>
++#include <linux/pagemap.h>
++#include <linux/hugetlb.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/delay.h>
++#include <linux/pci.h>
++
++#include <linux/string.h>
++#include <linux/sched.h>
++#include <linux/interrupt.h>
++#include <linux/hardirq.h>
++#include <linux/timer.h>
++#include <linux/capability.h>
++#include <linux/uaccess.h>
++#include <linux/spinlock.h>
++
++#include "img_types.h"
++#include "services_headers.h"
++#include "mm.h"
++#include "pvrmmap.h"
++#include "mmap.h"
++#include "env_data.h"
++#include "proc.h"
++#include "mutex.h"
++#include "event.h"
++
++#define EVENT_OBJECT_TIMEOUT_MS (100)
++
++#define HOST_ALLOC_MEM_USING_KMALLOC ((void *)0)
++#define HOST_ALLOC_MEM_USING_VMALLOC ((void *)1)
++
++#define LINUX_KMALLOC_LIMIT PAGE_SIZE /* 4k */
++
++#if !defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++enum PVRSRV_ERROR OSAllocMem(u32 ui32Flags, u32 ui32Size,
++ void **ppvCpuVAddr, void **phBlockAlloc)
++#else
++enum PVRSRV_ERROR _OSAllocMem(u32 ui32Flags, u32 ui32Size,
++ void **ppvCpuVAddr, void **phBlockAlloc,
++ char *pszFilename, u32 ui32Line)
++#endif
++{
++ u32 ui32Threshold;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Flags);
++
++ /* determine whether to go straight to vmalloc */
++ ui32Threshold = LINUX_KMALLOC_LIMIT;
++
++ if (ui32Size > ui32Threshold) {
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ *ppvCpuVAddr = _VMallocWrapper(ui32Size, PVRSRV_HAP_CACHED,
++ pszFilename, ui32Line);
++#else
++ *ppvCpuVAddr = VMallocWrapper(ui32Size, PVRSRV_HAP_CACHED);
++#endif
++ if (!*ppvCpuVAddr)
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++
++ if (phBlockAlloc)
++ *phBlockAlloc = HOST_ALLOC_MEM_USING_VMALLOC;
++ } else {
++ /* default - try kmalloc first */
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ *ppvCpuVAddr = _KMallocWrapper(ui32Size, pszFilename, ui32Line);
++#else
++ *ppvCpuVAddr = KMallocWrapper(ui32Size);
++#endif
++
++ if (!*ppvCpuVAddr)
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++
++ if (phBlockAlloc)
++ *phBlockAlloc = HOST_ALLOC_MEM_USING_KMALLOC;
++
++ }
++
++ return PVRSRV_OK;
++}
++
++#if !defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++void OSFreeMem(u32 ui32Flags, u32 ui32Size, void *pvCpuVAddr, void *hBlockAlloc)
++#else
++void _OSFreeMem(u32 ui32Flags, u32 ui32Size, void *pvCpuVAddr,
++ void *hBlockAlloc, char *pszFilename, u32 ui32Line)
++#endif
++{
++ PVR_UNREFERENCED_PARAMETER(ui32Flags);
++
++ if (ui32Size > LINUX_KMALLOC_LIMIT) {
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ _VFreeWrapper(pvCpuVAddr, pszFilename, ui32Line);
++#else
++ VFreeWrapper(pvCpuVAddr);
++#endif
++ } else {
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ _KFreeWrapper(pvCpuVAddr, pszFilename, ui32Line);
++#else
++ KFreeWrapper(pvCpuVAddr);
++#endif
++ }
++}
++
++enum PVRSRV_ERROR OSAllocPages(u32 ui32AllocFlags, u32 ui32Size,
++ u32 ui32PageSize, void **ppvCpuVAddr,
++ void **phOSMemHandle)
++{
++ struct LinuxMemArea *psLinuxMemArea;
++
++ PVR_UNREFERENCED_PARAMETER(ui32PageSize);
++
++ switch (ui32AllocFlags & PVRSRV_HAP_MAPTYPE_MASK) {
++ case PVRSRV_HAP_KERNEL_ONLY:
++ {
++ psLinuxMemArea =
++ NewVMallocLinuxMemArea(ui32Size, ui32AllocFlags);
++ if (!psLinuxMemArea)
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ break;
++ }
++ case PVRSRV_HAP_SINGLE_PROCESS:
++ {
++ psLinuxMemArea =
++ NewAllocPagesLinuxMemArea(ui32Size, ui32AllocFlags);
++ if (!psLinuxMemArea)
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ PVRMMapRegisterArea(psLinuxMemArea);
++ break;
++ }
++
++ case PVRSRV_HAP_MULTI_PROCESS:
++ {
++ psLinuxMemArea =
++ NewVMallocLinuxMemArea(ui32Size, ui32AllocFlags);
++ if (!psLinuxMemArea)
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ PVRMMapRegisterArea(psLinuxMemArea);
++ break;
++ }
++ default:
++ PVR_DPF(PVR_DBG_ERROR, "OSAllocPages: invalid flags 0x%x\n",
++ ui32AllocFlags);
++ *ppvCpuVAddr = NULL;
++ *phOSMemHandle = (void *) 0;
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ if (ui32AllocFlags & (PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_UNCACHED))
++ inv_cache_mem_area(psLinuxMemArea);
++
++ *ppvCpuVAddr = LinuxMemAreaToCpuVAddr(psLinuxMemArea);
++ *phOSMemHandle = psLinuxMemArea;
++
++ LinuxMemAreaRegister(psLinuxMemArea);
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR OSFreePages(u32 ui32AllocFlags, u32 ui32Bytes,
++ void *pvCpuVAddr, void *hOSMemHandle)
++{
++ struct LinuxMemArea *psLinuxMemArea;
++ PVR_UNREFERENCED_PARAMETER(ui32Bytes);
++ PVR_UNREFERENCED_PARAMETER(pvCpuVAddr);
++
++ psLinuxMemArea = (struct LinuxMemArea *)hOSMemHandle;
++
++ switch (ui32AllocFlags & PVRSRV_HAP_MAPTYPE_MASK) {
++ case PVRSRV_HAP_KERNEL_ONLY:
++ break;
++ case PVRSRV_HAP_SINGLE_PROCESS:
++ case PVRSRV_HAP_MULTI_PROCESS:
++ if (PVRMMapRemoveRegisteredArea(psLinuxMemArea) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "OSFreePages(ui32AllocFlags=0x%08X, ui32Bytes=%ld, "
++ "pvCpuVAddr=%p, hOSMemHandle=%p) FAILED!",
++ ui32AllocFlags, ui32Bytes, pvCpuVAddr,
++ hOSMemHandle);
++ return PVRSRV_ERROR_GENERIC;
++ }
++ break;
++ default:
++ PVR_DPF(PVR_DBG_ERROR, "%s: invalid flags 0x%x\n",
++ __func__, ui32AllocFlags);
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ LinuxMemAreaDeepFree(psLinuxMemArea);
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR OSGetSubMemHandle(void *hOSMemHandle, u32 ui32ByteOffset,
++ u32 ui32Bytes, u32 ui32Flags,
++ void **phOSMemHandleRet)
++{
++ struct LinuxMemArea *psParentLinuxMemArea, *psLinuxMemArea;
++ enum PVRSRV_ERROR eError;
++
++ psParentLinuxMemArea = (struct LinuxMemArea *)hOSMemHandle;
++
++ psLinuxMemArea =
++ NewSubLinuxMemArea(psParentLinuxMemArea, ui32ByteOffset, ui32Bytes);
++ if (!psLinuxMemArea) {
++ *phOSMemHandleRet = NULL;
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ *phOSMemHandleRet = psLinuxMemArea;
++
++ if (ui32Flags & PVRSRV_HAP_KERNEL_ONLY)
++ return PVRSRV_OK;
++
++ eError = PVRMMapRegisterArea(psLinuxMemArea);
++ if (eError != PVRSRV_OK)
++ goto failed_register_area;
++
++ return PVRSRV_OK;
++
++failed_register_area:
++ *phOSMemHandleRet = NULL;
++ LinuxMemAreaDeepFree(psLinuxMemArea);
++ return eError;
++}
++
++enum PVRSRV_ERROR OSReleaseSubMemHandle(void *hOSMemHandle, u32 ui32Flags)
++{
++ struct LinuxMemArea *psLinuxMemArea;
++ enum PVRSRV_ERROR eError;
++
++ psLinuxMemArea = (struct LinuxMemArea *)hOSMemHandle;
++ PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_SUB_ALLOC);
++
++ if ((ui32Flags & PVRSRV_HAP_KERNEL_ONLY) == 0) {
++ eError = PVRMMapRemoveRegisteredArea(psLinuxMemArea);
++ if (eError != PVRSRV_OK)
++ return eError;
++ }
++ LinuxMemAreaDeepFree(psLinuxMemArea);
++
++ return PVRSRV_OK;
++}
++
++struct IMG_CPU_PHYADDR OSMemHandleToCpuPAddr(void *hOSMemHandle,
++ u32 ui32ByteOffset)
++{
++ PVR_ASSERT(hOSMemHandle);
++
++ return LinuxMemAreaToCpuPAddr(hOSMemHandle, ui32ByteOffset);
++}
++
++void OSMemCopy(void *pvDst, void *pvSrc, u32 ui32Size)
++{
++ memcpy(pvDst, pvSrc, ui32Size);
++}
++
++void OSMemSet(void *pvDest, u8 ui8Value, u32 ui32Size)
++{
++ memset(pvDest, (int)ui8Value, (size_t) ui32Size);
++}
++
++char *OSStringCopy(char *pszDest, const char *pszSrc)
++{
++ return strcpy(pszDest, pszSrc);
++}
++
++s32 OSSNPrintf(char *pStr, u32 ui32Size, const char *pszFormat, ...)
++{
++ va_list argList;
++ s32 iCount;
++
++ va_start(argList, pszFormat);
++ iCount = vsnprintf(pStr, (size_t) ui32Size, pszFormat, argList);
++ va_end(argList);
++
++ return iCount;
++}
++
++void OSBreakResourceLock(struct PVRSRV_RESOURCE *psResource, u32 ui32ID)
++{
++ volatile u32 *pui32Access = (volatile u32 *)&psResource->ui32Lock;
++
++ if (*pui32Access)
++ if (psResource->ui32ID == ui32ID) {
++ psResource->ui32ID = 0;
++ *pui32Access = 0;
++ } else {
++ PVR_DPF(PVR_DBG_MESSAGE, "OSBreakResourceLock: "
++ "Resource is not locked for this process.");
++ } else
++ PVR_DPF(PVR_DBG_MESSAGE, "OSBreakResourceLock: "
++ "Resource is not locked");
++}
++
++enum PVRSRV_ERROR OSCreateResource(struct PVRSRV_RESOURCE *psResource)
++{
++ psResource->ui32ID = 0;
++ psResource->ui32Lock = 0;
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR OSDestroyResource(struct PVRSRV_RESOURCE *psResource)
++{
++ OSBreakResourceLock(psResource, psResource->ui32ID);
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR OSInitEnvData(void **ppvEnvSpecificData)
++{
++ struct ENV_DATA *psEnvData;
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct ENV_DATA),
++ (void *)&psEnvData, NULL) != PVRSRV_OK)
++ return PVRSRV_ERROR_GENERIC;
++
++ memset(psEnvData, 0, sizeof(*psEnvData));
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ PVRSRV_MAX_BRIDGE_IN_SIZE + PVRSRV_MAX_BRIDGE_OUT_SIZE,
++ &psEnvData->pvBridgeData, NULL) != PVRSRV_OK) {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct ENV_DATA),
++ psEnvData, NULL);
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ psEnvData->bMISRInstalled = IMG_FALSE;
++ psEnvData->bLISRInstalled = IMG_FALSE;
++
++ *ppvEnvSpecificData = psEnvData;
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR OSDeInitEnvData(void *pvEnvSpecificData)
++{
++ struct ENV_DATA *psEnvData = (struct ENV_DATA *)pvEnvSpecificData;
++
++ PVR_ASSERT(!psEnvData->bMISRInstalled);
++ PVR_ASSERT(!psEnvData->bLISRInstalled);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ PVRSRV_MAX_BRIDGE_IN_SIZE + PVRSRV_MAX_BRIDGE_OUT_SIZE,
++ psEnvData->pvBridgeData, NULL);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct ENV_DATA),
++ pvEnvSpecificData, NULL);
++
++ return PVRSRV_OK;
++}
++
++void OSReleaseThreadQuanta(void)
++{
++ schedule();
++}
++
++u32 OSClockus(void)
++{
++ u32 time, j = jiffies;
++
++ time = j * (1000000 / HZ);
++
++ return time;
++}
++
++void OSWaitus(u32 ui32Timeus)
++{
++ udelay(ui32Timeus);
++}
++
++u32 OSGetCurrentProcessIDKM(void)
++{
++ if (in_interrupt())
++ return KERNEL_ID;
++
++ return (u32) task_tgid_nr(current);
++}
++
++u32 OSGetPageSize(void)
++{
++ return PAGE_SIZE;
++}
++
++static irqreturn_t DeviceISRWrapper(int irq, void *dev_id)
++{
++ struct PVRSRV_DEVICE_NODE *psDeviceNode;
++ IMG_BOOL bStatus = IMG_FALSE;
++ PVR_UNREFERENCED_PARAMETER(irq);
++
++ psDeviceNode = (struct PVRSRV_DEVICE_NODE *)dev_id;
++ if (!psDeviceNode) {
++ PVR_DPF(PVR_DBG_ERROR, "DeviceISRWrapper: invalid params\n");
++ goto out;
++ }
++
++ bStatus = PVRSRVDeviceLISR(psDeviceNode);
++
++ if (bStatus) {
++ struct SYS_DATA *psSysData = psDeviceNode->psSysData;
++ struct ENV_DATA *psEnvData =
++ (struct ENV_DATA *)psSysData->pvEnvSpecificData;
++
++ queue_work(psEnvData->psMISRWorkqueue, &psEnvData->sMISRWork);
++ }
++
++out:
++ return bStatus ? IRQ_HANDLED : IRQ_NONE;
++}
++
++enum PVRSRV_ERROR OSInstallDeviceLISR(void *pvSysData,
++ u32 ui32Irq,
++ char *pszISRName, void *pvDeviceNode)
++{
++ struct SYS_DATA *psSysData = (struct SYS_DATA *)pvSysData;
++ struct ENV_DATA *psEnvData =
++ (struct ENV_DATA *)psSysData->pvEnvSpecificData;
++
++ if (psEnvData->bLISRInstalled) {
++ PVR_DPF(PVR_DBG_ERROR, "OSInstallDeviceLISR: "
++ "An ISR has already been installed: IRQ %d cookie %x",
++ psEnvData->ui32IRQ, psEnvData->pvISRCookie);
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ PVR_TRACE("Installing device LISR %s on IRQ %d with cookie %x",
++ pszISRName, ui32Irq, pvDeviceNode);
++
++ if (request_irq(ui32Irq, DeviceISRWrapper,
++ IRQF_SHARED, pszISRName, pvDeviceNode)) {
++ PVR_DPF(PVR_DBG_ERROR, "OSInstallDeviceLISR: "
++ "Couldn't install device LISR on IRQ %d",
++ ui32Irq);
++
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ psEnvData->ui32IRQ = ui32Irq;
++ psEnvData->pvISRCookie = pvDeviceNode;
++ psEnvData->bLISRInstalled = IMG_TRUE;
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR OSUninstallDeviceLISR(void *pvSysData)
++{
++ struct SYS_DATA *psSysData = (struct SYS_DATA *)pvSysData;
++ struct ENV_DATA *psEnvData =
++ (struct ENV_DATA *)psSysData->pvEnvSpecificData;
++
++ if (!psEnvData->bLISRInstalled) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "OSUninstallDeviceLISR: No LISR has been installed");
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ PVR_TRACE("Uninstalling device LISR on IRQ %d with cookie %x",
++ psEnvData->ui32IRQ, psEnvData->pvISRCookie);
++
++ free_irq(psEnvData->ui32IRQ, psEnvData->pvISRCookie);
++
++ psEnvData->bLISRInstalled = IMG_FALSE;
++
++ return PVRSRV_OK;
++}
++
++static void MISRWrapper(struct work_struct *work)
++{
++ struct ENV_DATA *psEnvData = container_of(work, struct ENV_DATA,
++ sMISRWork);
++ struct SYS_DATA *psSysData = (struct SYS_DATA *)psEnvData->pvSysData;
++ PVRSRVMISR(psSysData);
++}
++
++enum PVRSRV_ERROR OSInstallMISR(void *pvSysData)
++{
++ struct SYS_DATA *psSysData = (struct SYS_DATA *)pvSysData;
++ struct ENV_DATA *psEnvData =
++ (struct ENV_DATA *)psSysData->pvEnvSpecificData;
++
++ if (psEnvData->bMISRInstalled) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "OSInstallMISR: An MISR has already been installed");
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ PVR_TRACE("Installing MISR with cookie %x", pvSysData);
++
++ psEnvData->pvSysData = pvSysData;
++ psEnvData->psMISRWorkqueue = create_singlethread_workqueue("sgx_misr");
++ INIT_WORK(&psEnvData->sMISRWork, MISRWrapper);
++
++ psEnvData->bMISRInstalled = IMG_TRUE;
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR OSUninstallMISR(void *pvSysData)
++{
++ struct SYS_DATA *psSysData = (struct SYS_DATA *)pvSysData;
++ struct ENV_DATA *psEnvData =
++ (struct ENV_DATA *)psSysData->pvEnvSpecificData;
++
++ if (!psEnvData->bMISRInstalled) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "OSUninstallMISR: No MISR has been installed");
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ PVR_TRACE("Uninstalling MISR");
++
++ flush_workqueue(psEnvData->psMISRWorkqueue);
++ destroy_workqueue(psEnvData->psMISRWorkqueue);
++
++ psEnvData->bMISRInstalled = IMG_FALSE;
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR OSScheduleMISR(void *pvSysData)
++{
++ struct SYS_DATA *psSysData = (struct SYS_DATA *)pvSysData;
++ struct ENV_DATA *psEnvData =
++ (struct ENV_DATA *)psSysData->pvEnvSpecificData;
++
++ if (psEnvData->bMISRInstalled)
++ queue_work(psEnvData->psMISRWorkqueue, &psEnvData->sMISRWork);
++
++ return PVRSRV_OK;
++}
++
++
++#define OS_TAS(p) xchg((p), 1)
++enum PVRSRV_ERROR OSLockResource(struct PVRSRV_RESOURCE *psResource, u32 ui32ID)
++{
++ enum PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if (!OS_TAS(&psResource->ui32Lock))
++ psResource->ui32ID = ui32ID;
++ else
++ eError = PVRSRV_ERROR_GENERIC;
++
++ return eError;
++}
++
++enum PVRSRV_ERROR OSUnlockResource(struct PVRSRV_RESOURCE *psResource,
++ u32 ui32ID)
++{
++ volatile u32 *pui32Access = (volatile u32 *)&psResource->ui32Lock;
++ enum PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if (*pui32Access) {
++ if (psResource->ui32ID == ui32ID) {
++ psResource->ui32ID = 0;
++ *pui32Access = 0;
++ } else {
++ PVR_DPF(PVR_DBG_ERROR, "OSUnlockResource: "
++ "Resource %p is not locked with expected value.",
++ psResource);
++ PVR_DPF(PVR_DBG_MESSAGE, "Should be %x is actually %x",
++ ui32ID, psResource->ui32ID);
++ eError = PVRSRV_ERROR_GENERIC;
++ }
++ } else {
++ PVR_DPF(PVR_DBG_ERROR,
++ "OSUnlockResource: Resource %p is not locked",
++ psResource);
++ eError = PVRSRV_ERROR_GENERIC;
++ }
++
++ return eError;
++}
++
++IMG_BOOL OSIsResourceLocked(struct PVRSRV_RESOURCE *psResource, u32 ui32ID)
++{
++ volatile u32 *pui32Access = (volatile u32 *)&psResource->ui32Lock;
++
++ return (*(volatile u32 *)pui32Access == 1) &&
++ (psResource->ui32ID == ui32ID) ? IMG_TRUE : IMG_FALSE;
++}
++
++struct IMG_CPU_PHYADDR OSMapLinToCPUPhys(void *pvLinAddr)
++{
++ struct IMG_CPU_PHYADDR CpuPAddr;
++
++ CpuPAddr.uiAddr = (u32) VMallocToPhys(pvLinAddr);
++
++ return CpuPAddr;
++}
++
++void __iomem *OSMapPhysToLin(struct IMG_CPU_PHYADDR BasePAddr, u32 ui32Bytes,
++ u32 ui32MappingFlags, void **phOSMemHandle)
++{
++ if (phOSMemHandle)
++ *phOSMemHandle = (void *) 0;
++
++ if (ui32MappingFlags & PVRSRV_HAP_KERNEL_ONLY) {
++ void __iomem *pvIORemapCookie;
++ pvIORemapCookie =
++ IORemapWrapper(BasePAddr, ui32Bytes, ui32MappingFlags);
++ if (pvIORemapCookie == NULL)
++ return NULL;
++ return pvIORemapCookie;
++ } else {
++ PVR_DPF(PVR_DBG_ERROR, "OSMapPhysToLin "
++ "should only be used with PVRSRV_HAP_KERNEL_ONLY "
++ "(Use OSReservePhys otherwise)");
++ return NULL;
++ }
++
++}
++
++IMG_BOOL
++OSUnMapPhysToLin(void __iomem *pvLinAddr, u32 ui32Bytes,
++ u32 ui32MappingFlags, void *hPageAlloc)
++{
++ PVR_TRACE("%s: unmapping %d bytes from 0x%08x", __func__,
++ ui32Bytes, pvLinAddr);
++
++ PVR_UNREFERENCED_PARAMETER(hPageAlloc);
++ PVR_UNREFERENCED_PARAMETER(ui32Bytes);
++
++ if (ui32MappingFlags & PVRSRV_HAP_KERNEL_ONLY) {
++ IOUnmapWrapper(pvLinAddr);
++ return IMG_TRUE;
++ } else {
++ PVR_DPF(PVR_DBG_ERROR, "OSUnMapPhysToLin "
++ "should only be used with PVRSRV_HAP_KERNEL_ONLY "
++ " (Use OSUnReservePhys otherwise)");
++ return IMG_FALSE;
++ }
++
++}
++
++static enum PVRSRV_ERROR RegisterExternalMem(struct IMG_SYS_PHYADDR *pBasePAddr,
++ void *pvCPUVAddr, u32 ui32Bytes, IMG_BOOL bPhysContig,
++ u32 ui32MappingFlags, void **phOSMemHandle)
++{
++ struct LinuxMemArea *psLinuxMemArea;
++
++ switch (ui32MappingFlags & PVRSRV_HAP_MAPTYPE_MASK) {
++ case PVRSRV_HAP_KERNEL_ONLY:
++ {
++ psLinuxMemArea =
++ NewExternalKVLinuxMemArea(pBasePAddr, pvCPUVAddr,
++ ui32Bytes, bPhysContig,
++ ui32MappingFlags);
++
++ if (!psLinuxMemArea)
++ return PVRSRV_ERROR_GENERIC;
++ break;
++ }
++ case PVRSRV_HAP_SINGLE_PROCESS:
++ {
++ psLinuxMemArea =
++ NewExternalKVLinuxMemArea(pBasePAddr, pvCPUVAddr,
++ ui32Bytes, bPhysContig,
++ ui32MappingFlags);
++
++ if (!psLinuxMemArea)
++ return PVRSRV_ERROR_GENERIC;
++ PVRMMapRegisterArea(psLinuxMemArea);
++ break;
++ }
++ case PVRSRV_HAP_MULTI_PROCESS:
++ {
++ psLinuxMemArea =
++ NewExternalKVLinuxMemArea(pBasePAddr, pvCPUVAddr,
++ ui32Bytes, bPhysContig,
++ ui32MappingFlags);
++
++ if (!psLinuxMemArea)
++ return PVRSRV_ERROR_GENERIC;
++ PVRMMapRegisterArea(psLinuxMemArea);
++ break;
++ }
++ default:
++ PVR_DPF(PVR_DBG_ERROR, "OSRegisterMem : invalid flags 0x%x\n",
++ ui32MappingFlags);
++ *phOSMemHandle = (void *) 0;
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ *phOSMemHandle = (void *) psLinuxMemArea;
++
++ LinuxMemAreaRegister(psLinuxMemArea);
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR OSRegisterMem(struct IMG_CPU_PHYADDR BasePAddr,
++ void *pvCPUVAddr, u32 ui32Bytes,
++ u32 ui32MappingFlags, void **phOSMemHandle)
++{
++ struct IMG_SYS_PHYADDR SysPAddr = SysCpuPAddrToSysPAddr(BasePAddr);
++
++ return RegisterExternalMem(&SysPAddr, pvCPUVAddr, ui32Bytes, IMG_TRUE,
++ ui32MappingFlags, phOSMemHandle);
++}
++
++enum PVRSRV_ERROR OSRegisterDiscontigMem(struct IMG_SYS_PHYADDR *pBasePAddr,
++ void *pvCPUVAddr, u32 ui32Bytes,
++ u32 ui32MappingFlags,
++ void **phOSMemHandle)
++{
++ return RegisterExternalMem(pBasePAddr, pvCPUVAddr, ui32Bytes,
++ IMG_FALSE, ui32MappingFlags, phOSMemHandle);
++}
++
++enum PVRSRV_ERROR OSUnRegisterMem(void *pvCpuVAddr, u32 ui32Bytes,
++ u32 ui32MappingFlags, void *hOSMemHandle)
++{
++ struct LinuxMemArea *psLinuxMemArea = (struct LinuxMemArea *)
++ hOSMemHandle;
++
++ PVR_UNREFERENCED_PARAMETER(pvCpuVAddr);
++ PVR_UNREFERENCED_PARAMETER(ui32Bytes);
++
++ switch (ui32MappingFlags & PVRSRV_HAP_MAPTYPE_MASK) {
++ case PVRSRV_HAP_KERNEL_ONLY:
++ break;
++ case PVRSRV_HAP_SINGLE_PROCESS:
++ case PVRSRV_HAP_MULTI_PROCESS:
++ {
++ if (PVRMMapRemoveRegisteredArea(psLinuxMemArea) !=
++ PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "%s(%p, %d, 0x%08X, %p) FAILED!",
++ __func__, pvCpuVAddr, ui32Bytes,
++ ui32MappingFlags, hOSMemHandle);
++ BUG();
++ return PVRSRV_ERROR_GENERIC;
++ }
++ break;
++ }
++ default:
++ {
++ PVR_DPF(PVR_DBG_ERROR,
++ "OSUnRegisterMem : invalid flags 0x%x",
++ ui32MappingFlags);
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++ }
++
++ LinuxMemAreaDeepFree(psLinuxMemArea);
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR OSUnRegisterDiscontigMem(void *pvCpuVAddr, u32 ui32Bytes,
++ u32 ui32Flags, void *hOSMemHandle)
++{
++ return OSUnRegisterMem(pvCpuVAddr, ui32Bytes, ui32Flags, hOSMemHandle);
++}
++
++enum PVRSRV_ERROR OSReservePhys(struct IMG_CPU_PHYADDR BasePAddr,
++ u32 ui32Bytes, u32 ui32MappingFlags, void **ppvCpuVAddr,
++ void **phOSMemHandle)
++{
++ struct LinuxMemArea *psLinuxMemArea;
++
++ switch (ui32MappingFlags & PVRSRV_HAP_MAPTYPE_MASK) {
++ case PVRSRV_HAP_KERNEL_ONLY:
++ {
++ psLinuxMemArea =
++ NewIORemapLinuxMemArea(BasePAddr, ui32Bytes,
++ ui32MappingFlags);
++ if (!psLinuxMemArea)
++ return PVRSRV_ERROR_GENERIC;
++ break;
++ }
++ case PVRSRV_HAP_SINGLE_PROCESS:
++ {
++ psLinuxMemArea =
++ NewIOLinuxMemArea(BasePAddr, ui32Bytes,
++ ui32MappingFlags);
++ if (!psLinuxMemArea)
++ return PVRSRV_ERROR_GENERIC;
++ PVRMMapRegisterArea(psLinuxMemArea);
++ break;
++ }
++ case PVRSRV_HAP_MULTI_PROCESS:
++ {
++ psLinuxMemArea =
++ NewIORemapLinuxMemArea(BasePAddr, ui32Bytes,
++ ui32MappingFlags);
++ if (!psLinuxMemArea)
++ return PVRSRV_ERROR_GENERIC;
++ PVRMMapRegisterArea(psLinuxMemArea);
++ break;
++ }
++ default:
++ PVR_DPF(PVR_DBG_ERROR, "OSMapPhysToLin : invalid flags 0x%x\n",
++ ui32MappingFlags);
++ *ppvCpuVAddr = NULL;
++ *phOSMemHandle = (void *) 0;
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ *phOSMemHandle = (void *) psLinuxMemArea;
++ *ppvCpuVAddr = LinuxMemAreaToCpuVAddr(psLinuxMemArea);
++
++ LinuxMemAreaRegister(psLinuxMemArea);
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR OSUnReservePhys(void *pvCpuVAddr,
++ u32 ui32Bytes, u32 ui32MappingFlags, void *hOSMemHandle)
++{
++ struct LinuxMemArea *psLinuxMemArea;
++ PVR_UNREFERENCED_PARAMETER(pvCpuVAddr);
++ PVR_UNREFERENCED_PARAMETER(ui32Bytes);
++
++ psLinuxMemArea = (struct LinuxMemArea *)hOSMemHandle;
++
++ switch (ui32MappingFlags & PVRSRV_HAP_MAPTYPE_MASK) {
++ case PVRSRV_HAP_KERNEL_ONLY:
++ break;
++ case PVRSRV_HAP_SINGLE_PROCESS:
++ case PVRSRV_HAP_MULTI_PROCESS:
++ {
++ if (PVRMMapRemoveRegisteredArea(psLinuxMemArea) !=
++ PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "%s(%p, %d, 0x%08X, %p) FAILED!",
++ __func__, pvCpuVAddr, ui32Bytes,
++ ui32MappingFlags, hOSMemHandle);
++ return PVRSRV_ERROR_GENERIC;
++ }
++ break;
++ }
++ default:
++ {
++ PVR_DPF(PVR_DBG_ERROR,
++ "OSUnMapPhysToLin : invalid flags 0x%x",
++ ui32MappingFlags);
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++ }
++
++ LinuxMemAreaDeepFree(psLinuxMemArea);
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR OSBaseAllocContigMemory(u32 ui32Size, void **pvLinAddr,
++ struct IMG_CPU_PHYADDR *psPhysAddr)
++{
++#if !defined(NO_HARDWARE)
++ PVR_UNREFERENCED_PARAMETER(ui32Size);
++ PVR_UNREFERENCED_PARAMETER(pvLinAddr);
++ PVR_UNREFERENCED_PARAMETER(psPhysAddr);
++ PVR_DPF(PVR_DBG_ERROR, "%s: Not available", __func__);
++
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++#else
++ void *pvKernLinAddr;
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ pvKernLinAddr = _KMallocWrapper(ui32Size, __FILE__, __LINE__);
++#else
++ pvKernLinAddr = KMallocWrapper(ui32Size);
++#endif
++ if (!pvKernLinAddr)
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++
++ *pvLinAddr = pvKernLinAddr;
++
++ psPhysAddr->uiAddr = virt_to_phys(pvKernLinAddr);
++
++ return PVRSRV_OK;
++#endif
++}
++
++enum PVRSRV_ERROR OSBaseFreeContigMemory(u32 ui32Size, void *pvLinAddr,
++ struct IMG_CPU_PHYADDR psPhysAddr)
++{
++#if !defined(NO_HARDWARE)
++ PVR_UNREFERENCED_PARAMETER(ui32Size);
++ PVR_UNREFERENCED_PARAMETER(pvLinAddr);
++ PVR_UNREFERENCED_PARAMETER(psPhysAddr.uiAddr);
++
++ PVR_DPF(PVR_DBG_WARNING, "%s: Not available", __func__);
++#else
++ PVR_UNREFERENCED_PARAMETER(ui32Size);
++ PVR_UNREFERENCED_PARAMETER(psPhysAddr.uiAddr);
++
++ KFreeWrapper(pvLinAddr);
++#endif
++ return PVRSRV_OK;
++}
++
++u32 OSReadHWReg(void __iomem *pvLinRegBaseAddr, u32 ui32Offset)
++{
++#if !defined(NO_HARDWARE)
++ return (u32)readl(pvLinRegBaseAddr + ui32Offset);
++#else
++ return *(u32 *)((u8 *) pvLinRegBaseAddr + ui32Offset);
++#endif
++}
++
++void OSWriteHWReg(void __iomem *pvLinRegBaseAddr, u32 ui32Offset, u32 ui32Value)
++{
++#if !defined(NO_HARDWARE)
++ writel(ui32Value, pvLinRegBaseAddr + ui32Offset);
++#else
++ *(u32 *)((u8 *)pvLinRegBaseAddr + ui32Offset) = ui32Value;
++#endif
++}
++
++#define OS_MAX_TIMERS 8
++
++struct TIMER_CALLBACK_DATA {
++ IMG_BOOL bInUse;
++ void (*pfnTimerFunc)(void *);
++ void *pvData;
++ struct timer_list sTimer;
++ u32 ui32Delay;
++ IMG_BOOL bActive;
++};
++
++static struct TIMER_CALLBACK_DATA sTimers[OS_MAX_TIMERS];
++static DEFINE_SPINLOCK(sTimerStructLock);
++static void OSTimerCallbackWrapper(unsigned long ui32Data)
++{
++ struct TIMER_CALLBACK_DATA *psTimerCBData =
++ (struct TIMER_CALLBACK_DATA *)ui32Data;
++
++ if (!psTimerCBData->bActive)
++ return;
++
++ psTimerCBData->pfnTimerFunc(psTimerCBData->pvData);
++
++ mod_timer(&psTimerCBData->sTimer, psTimerCBData->ui32Delay + jiffies);
++}
++
++void *OSAddTimer(void (*pfnTimerFunc)(void *), void *pvData, u32 ui32MsTimeout)
++{
++ struct TIMER_CALLBACK_DATA *psTimerCBData;
++ u32 ui32i;
++ unsigned long ulLockFlags;
++
++ if (!pfnTimerFunc) {
++ PVR_DPF(PVR_DBG_ERROR, "OSAddTimer: passed invalid callback");
++ return NULL;
++ }
++
++ spin_lock_irqsave(&sTimerStructLock, ulLockFlags);
++ for (ui32i = 0; ui32i < OS_MAX_TIMERS; ui32i++) {
++ psTimerCBData = &sTimers[ui32i];
++ if (!psTimerCBData->bInUse) {
++ psTimerCBData->bInUse = IMG_TRUE;
++ break;
++ }
++ }
++ spin_unlock_irqrestore(&sTimerStructLock, ulLockFlags);
++
++ if (ui32i >= OS_MAX_TIMERS) {
++ PVR_DPF(PVR_DBG_ERROR, "OSAddTimer: all timers are in use");
++ return NULL;
++ }
++
++ psTimerCBData->pfnTimerFunc = pfnTimerFunc;
++ psTimerCBData->pvData = pvData;
++ psTimerCBData->bActive = IMG_FALSE;
++
++ psTimerCBData->ui32Delay = ((HZ * ui32MsTimeout) < 1000)
++ ? 1 : ((HZ * ui32MsTimeout) / 1000);
++
++ init_timer(&psTimerCBData->sTimer);
++
++ psTimerCBData->sTimer.function = OSTimerCallbackWrapper;
++ psTimerCBData->sTimer.data = (u32) psTimerCBData;
++ psTimerCBData->sTimer.expires = psTimerCBData->ui32Delay + jiffies;
++
++ return (void *)(ui32i + 1);
++}
++
++static inline struct TIMER_CALLBACK_DATA *GetTimerStructure(void *hTimer)
++{
++ u32 ui32i = ((u32) hTimer) - 1;
++ PVR_ASSERT(ui32i < OS_MAX_TIMERS);
++ return &sTimers[ui32i];
++}
++
++enum PVRSRV_ERROR OSRemoveTimer(void *hTimer)
++{
++ struct TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer);
++
++ PVR_ASSERT(psTimerCBData->bInUse);
++ PVR_ASSERT(!psTimerCBData->bActive);
++
++ /* free timer callback data struct */
++ psTimerCBData->bInUse = IMG_FALSE;
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR OSEnableTimer(void *hTimer)
++{
++ struct TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer);
++
++ PVR_ASSERT(psTimerCBData->bInUse);
++ PVR_ASSERT(!psTimerCBData->bActive);
++
++ psTimerCBData->bActive = IMG_TRUE;
++
++ add_timer(&psTimerCBData->sTimer);
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR OSDisableTimer(void *hTimer)
++{
++ struct TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer);
++
++ PVR_ASSERT(psTimerCBData->bInUse);
++ PVR_ASSERT(psTimerCBData->bActive);
++
++ psTimerCBData->bActive = IMG_FALSE;
++
++ del_timer_sync(&psTimerCBData->sTimer);
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR OSEventObjectCreate(const char *pszName,
++ struct PVRSRV_EVENTOBJECT *psEventObject)
++{
++ enum PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if (psEventObject) {
++ if (pszName) {
++ strncpy(psEventObject->szName, pszName,
++ EVENTOBJNAME_MAXLENGTH);
++ } else {
++ static u16 ui16NameIndex;
++ snprintf(psEventObject->szName, EVENTOBJNAME_MAXLENGTH,
++ "PVRSRV_EVENTOBJECT_%d", ui16NameIndex++);
++ }
++
++ if (LinuxEventObjectListCreate(&psEventObject->hOSEventKM) !=
++ PVRSRV_OK)
++ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++
++ } else {
++ PVR_DPF(PVR_DBG_ERROR, "OSEventObjectCreate: "
++ "psEventObject is not a valid pointer");
++ eError = PVRSRV_ERROR_GENERIC;
++ }
++
++ return eError;
++
++}
++
++enum PVRSRV_ERROR OSEventObjectDestroy(struct PVRSRV_EVENTOBJECT *psEventObject)
++{
++ enum PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if (psEventObject) {
++ if (psEventObject->hOSEventKM) {
++ LinuxEventObjectListDestroy(psEventObject->hOSEventKM);
++ } else {
++ PVR_DPF(PVR_DBG_ERROR, "OSEventObjectDestroy: "
++ "hOSEventKM is not a valid pointer");
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ }
++ } else {
++ PVR_DPF(PVR_DBG_ERROR, "OSEventObjectDestroy: "
++ "psEventObject is not a valid pointer");
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ return eError;
++}
++
++enum PVRSRV_ERROR OSEventObjectWait(void *hOSEventKM)
++{
++ enum PVRSRV_ERROR eError;
++
++ if (hOSEventKM) {
++ eError =
++ LinuxEventObjectWait(hOSEventKM, EVENT_OBJECT_TIMEOUT_MS);
++ } else {
++ PVR_DPF(PVR_DBG_ERROR,
++ "OSEventObjectWait: hOSEventKM is not a valid handle");
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ return eError;
++}
++
++enum PVRSRV_ERROR OSEventObjectOpen(struct PVRSRV_EVENTOBJECT *psEventObject,
++ void **phOSEvent)
++{
++ enum PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if (psEventObject) {
++ if (LinuxEventObjectAdd(psEventObject->hOSEventKM, phOSEvent) !=
++ PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "LinuxEventObjectAdd: failed");
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ } else {
++ PVR_DPF(PVR_DBG_ERROR, "OSEventObjectCreate: "
++ "psEventObject is not a valid pointer");
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ return eError;
++}
++
++enum PVRSRV_ERROR OSEventObjectClose(struct PVRSRV_EVENTOBJECT *psEventObject,
++ void *hOSEventKM)
++{
++ enum PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if (psEventObject) {
++ if (LinuxEventObjectDelete
++ (psEventObject->hOSEventKM, hOSEventKM) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "LinuxEventObjectDelete: failed");
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ } else {
++ PVR_DPF(PVR_DBG_ERROR, "OSEventObjectDestroy: "
++ "psEventObject is not a valid pointer");
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ return eError;
++
++}
++
++enum PVRSRV_ERROR OSEventObjectSignal(void *hOSEventKM)
++{
++ enum PVRSRV_ERROR eError;
++
++ if (hOSEventKM) {
++ eError = LinuxEventObjectSignal(hOSEventKM);
++ } else {
++ PVR_DPF(PVR_DBG_ERROR, "OSEventObjectSignal: "
++ "hOSEventKM is not a valid handle");
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ return eError;
++}
++
++IMG_BOOL OSProcHasPrivSrvInit(void)
++{
++ return (capable(CAP_SYS_MODULE) != 0) ? IMG_TRUE : IMG_FALSE;
++}
++
++enum PVRSRV_ERROR OSCopyToUser(void *pvProcess, void __user *pvDest,
++ const void *pvSrc, u32 ui32Bytes)
++{
++ PVR_UNREFERENCED_PARAMETER(pvProcess);
++
++ if (copy_to_user(pvDest, pvSrc, ui32Bytes) == 0)
++ return PVRSRV_OK;
++ else
++ return PVRSRV_ERROR_GENERIC;
++}
++
++enum PVRSRV_ERROR OSCopyFromUser(void *pvProcess, void *pvDest,
++ const void __user *pvSrc, u32 ui32Bytes)
++{
++ PVR_UNREFERENCED_PARAMETER(pvProcess);
++
++ if (copy_from_user(pvDest, pvSrc, ui32Bytes) == 0)
++ return PVRSRV_OK;
++ else
++ return PVRSRV_ERROR_GENERIC;
++}
++
++IMG_BOOL OSAccessOK(enum IMG_VERIFY_TEST eVerification,
++ const void __user *pvUserPtr, u32 ui32Bytes)
++{
++ int linuxType;
++
++ if (eVerification == PVR_VERIFY_READ) {
++ linuxType = VERIFY_READ;
++ } else {
++ PVR_ASSERT(eVerification == PVR_VERIFY_WRITE);
++ linuxType = VERIFY_WRITE;
++ }
++
++ return access_ok(linuxType, pvUserPtr, ui32Bytes);
++}
++
++enum eWrapMemType {
++ WRAP_TYPE_CLEANUP,
++ WRAP_TYPE_GET_USER_PAGES,
++ WRAP_TYPE_FIND_VMA_PAGES,
++ WRAP_TYPE_FIND_VMA_PFN
++};
++
++struct sWrapMemInfo {
++ enum eWrapMemType eType;
++ int iNumPages;
++ struct page **ppsPages;
++ struct IMG_SYS_PHYADDR *psPhysAddr;
++ int iPageOffset;
++ int iContiguous;
++#if defined(DEBUG)
++ u32 ulStartAddr;
++ u32 ulBeyondEndAddr;
++ struct vm_area_struct *psVMArea;
++#endif
++};
++
++static void CheckPagesContiguous(struct sWrapMemInfo *psInfo)
++{
++ int i;
++ u32 ui32AddrChk;
++
++ BUG_ON(psInfo == NULL);
++
++ psInfo->iContiguous = 1;
++
++ for (i = 0, ui32AddrChk = psInfo->psPhysAddr[0].uiAddr;
++ i < psInfo->iNumPages; i++, ui32AddrChk += PAGE_SIZE)
++ if (psInfo->psPhysAddr[i].uiAddr != ui32AddrChk) {
++ psInfo->iContiguous = 0;
++ break;
++ }
++}
++
++static struct page *CPUVAddrToPage(struct vm_area_struct *psVMArea,
++ u32 ulCPUVAddr)
++{
++ pgd_t *psPGD;
++ pud_t *psPUD;
++ pmd_t *psPMD;
++ pte_t *psPTE;
++ struct mm_struct *psMM = psVMArea->vm_mm;
++ u32 ulPFN;
++ spinlock_t *psPTLock;
++ struct page *psPage;
++
++ psPGD = pgd_offset(psMM, ulCPUVAddr);
++ if (pgd_none(*psPGD) || pgd_bad(*psPGD))
++ return NULL;
++
++ psPUD = pud_offset(psPGD, ulCPUVAddr);
++ if (pud_none(*psPUD) || pud_bad(*psPUD))
++ return NULL;
++
++ psPMD = pmd_offset(psPUD, ulCPUVAddr);
++ if (pmd_none(*psPMD) || pmd_bad(*psPMD))
++ return NULL;
++
++ psPage = NULL;
++
++ psPTE = (pte_t *)pte_offset_map_lock(psMM, psPMD, ulCPUVAddr,
++ &psPTLock);
++ if ((pte_none(*psPTE) != 0) || (pte_present(*psPTE) == 0) ||
++ (pte_write(*psPTE) == 0))
++ goto exit_unlock;
++
++ ulPFN = pte_pfn(*psPTE);
++ if (!pfn_valid(ulPFN))
++ goto exit_unlock;
++
++ psPage = pfn_to_page(ulPFN);
++
++ get_page(psPage);
++
++exit_unlock:
++ pte_unmap_unlock(psPTE, psPTLock);
++
++ return psPage;
++}
++
++enum PVRSRV_ERROR OSReleasePhysPageAddr(void *hOSWrapMem)
++{
++ struct sWrapMemInfo *psInfo = (struct sWrapMemInfo *)hOSWrapMem;
++ int i;
++
++ BUG_ON(psInfo == NULL);
++
++ switch (psInfo->eType) {
++ case WRAP_TYPE_CLEANUP:
++ break;
++ case WRAP_TYPE_FIND_VMA_PFN:
++ break;
++ case WRAP_TYPE_GET_USER_PAGES:
++ {
++ for (i = 0; i < psInfo->iNumPages; i++) {
++ struct page *psPage = psInfo->ppsPages[i];
++
++ if (!PageReserved(psPage))
++ SetPageDirty(psPage);
++ page_cache_release(psPage);
++ }
++ break;
++ }
++ case WRAP_TYPE_FIND_VMA_PAGES:
++ {
++ for (i = 0; i < psInfo->iNumPages; i++)
++ put_page_testzero(psInfo->ppsPages[i]);
++ break;
++ }
++ default:
++ {
++ PVR_DPF(PVR_DBG_ERROR,
++ "OSReleasePhysPageAddr: Unknown wrap type (%d)",
++ psInfo->eType);
++ return PVRSRV_ERROR_GENERIC;
++ }
++ }
++
++ if (psInfo->ppsPages != NULL)
++ kfree(psInfo->ppsPages);
++
++ if (psInfo->psPhysAddr != NULL)
++ kfree(psInfo->psPhysAddr);
++
++ kfree(psInfo);
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR OSAcquirePhysPageAddr(void *pvCPUVAddr, u32 ui32Bytes,
++ struct IMG_SYS_PHYADDR *psSysPAddr,
++ void **phOSWrapMem)
++{
++ u32 ulStartAddrOrig = (u32) pvCPUVAddr;
++ u32 ulAddrRangeOrig = (u32) ui32Bytes;
++ u32 ulBeyondEndAddrOrig = ulStartAddrOrig + ulAddrRangeOrig;
++ u32 ulStartAddr;
++ u32 ulAddrRange;
++ u32 ulBeyondEndAddr;
++ u32 ulAddr;
++ int iNumPagesMapped;
++ int i;
++ struct vm_area_struct *psVMArea;
++ struct sWrapMemInfo *psInfo;
++
++ ulStartAddr = ulStartAddrOrig & PAGE_MASK;
++ ulBeyondEndAddr = PAGE_ALIGN(ulBeyondEndAddrOrig);
++ ulAddrRange = ulBeyondEndAddr - ulStartAddr;
++
++ psInfo = kmalloc(sizeof(*psInfo), GFP_KERNEL);
++ if (psInfo == NULL) {
++ PVR_DPF(PVR_DBG_ERROR, "OSAcquirePhysPageAddr: "
++ "Couldn't allocate information structure");
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ memset(psInfo, 0, sizeof(*psInfo));
++
++#if defined(DEBUG)
++ psInfo->ulStartAddr = ulStartAddrOrig;
++ psInfo->ulBeyondEndAddr = ulBeyondEndAddrOrig;
++#endif
++
++ psInfo->iNumPages = (int)(ulAddrRange >> PAGE_SHIFT);
++ psInfo->iPageOffset = (int)(ulStartAddrOrig & ~PAGE_MASK);
++
++ psInfo->psPhysAddr =
++ kmalloc((size_t) psInfo->iNumPages * sizeof(*psInfo->psPhysAddr),
++ GFP_KERNEL);
++ if (psInfo->psPhysAddr == NULL) {
++ PVR_DPF(PVR_DBG_ERROR, "OSAcquirePhysPageAddr: "
++ "Couldn't allocate page array");
++ goto error_free;
++ }
++
++ psInfo->ppsPages =
++ kmalloc((size_t) psInfo->iNumPages * sizeof(*psInfo->ppsPages),
++ GFP_KERNEL);
++ if (psInfo->ppsPages == NULL) {
++ PVR_DPF(PVR_DBG_ERROR, "OSAcquirePhysPageAddr: "
++ "Couldn't allocate page array");
++ goto error_free;
++ }
++
++ down_read(&current->mm->mmap_sem);
++
++ iNumPagesMapped = get_user_pages(current, current->mm, ulStartAddr,
++ psInfo->iNumPages, 1, 0,
++ psInfo->ppsPages, NULL);
++ up_read(&current->mm->mmap_sem);
++
++
++ if (iNumPagesMapped >= 0) {
++ if (iNumPagesMapped != psInfo->iNumPages) {
++ PVR_TRACE("OSAcquirePhysPageAddr: "
++ "Couldn't map all the pages needed "
++ "(wanted: %d, got %d)",
++ psInfo->iNumPages, iNumPagesMapped);
++
++ for (i = 0; i < iNumPagesMapped; i++)
++ page_cache_release(psInfo->ppsPages[i]);
++
++ goto error_free;
++ }
++
++ for (i = 0; i < psInfo->iNumPages; i++) {
++ struct IMG_CPU_PHYADDR CPUPhysAddr;
++
++ CPUPhysAddr.uiAddr =
++ page_to_pfn(psInfo->ppsPages[i]) << PAGE_SHIFT;
++ psInfo->psPhysAddr[i] =
++ SysCpuPAddrToSysPAddr(CPUPhysAddr);
++ psSysPAddr[i] = psInfo->psPhysAddr[i];
++
++ }
++
++ psInfo->eType = WRAP_TYPE_GET_USER_PAGES;
++
++ goto exit_check;
++ }
++
++ PVR_TRACE("OSAcquirePhysPageAddr: "
++ "get_user_pages failed (%d), trying something else",
++ iNumPagesMapped);
++
++ down_read(&current->mm->mmap_sem);
++
++ psVMArea = find_vma(current->mm, ulStartAddrOrig);
++ if (psVMArea == NULL) {
++ PVR_DPF(PVR_DBG_ERROR, "OSAcquirePhysPageAddr: "
++ "Couldn't find memory region "
++ "containing start address %lx",
++ ulStartAddrOrig);
++
++ goto error_release_mmap_sem;
++ }
++#if defined(DEBUG)
++ psInfo->psVMArea = psVMArea;
++#endif
++
++ if (ulStartAddrOrig < psVMArea->vm_start) {
++ PVR_DPF(PVR_DBG_ERROR, "OSAcquirePhysPageAddr: "
++ "Start address %lx is outside of the "
++ "region returned by find_vma",
++ ulStartAddrOrig);
++ goto error_release_mmap_sem;
++ }
++
++ if (ulBeyondEndAddrOrig > psVMArea->vm_end) {
++ PVR_DPF(PVR_DBG_ERROR, "OSAcquirePhysPageAddr: "
++ "End address %lx is outside of the region "
++ "returned by find_vma",
++ ulBeyondEndAddrOrig);
++ goto error_release_mmap_sem;
++ }
++
++ if ((psVMArea->vm_flags & (VM_IO | VM_RESERVED)) !=
++ (VM_IO | VM_RESERVED)) {
++ PVR_DPF(PVR_DBG_ERROR, "OSAcquirePhysPageAddr: "
++ "Memory region does not represent memory "
++ "mapped I/O (VMA flags: 0x%lx)",
++ psVMArea->vm_flags);
++ goto error_release_mmap_sem;
++ }
++
++ if ((psVMArea->vm_flags & (VM_READ | VM_WRITE)) !=
++ (VM_READ | VM_WRITE)) {
++ PVR_DPF(PVR_DBG_ERROR, "OSAcquirePhysPageAddr: "
++ "No read/write access to memory region "
++ "(VMA flags: 0x%lx)",
++ psVMArea->vm_flags);
++ goto error_release_mmap_sem;
++ }
++
++ for (ulAddr = ulStartAddrOrig, i = 0; ulAddr < ulBeyondEndAddrOrig;
++ ulAddr += PAGE_SIZE, i++) {
++ struct page *psPage;
++
++ BUG_ON(i >= psInfo->iNumPages);
++
++ psPage = CPUVAddrToPage(psVMArea, ulAddr);
++ if (psPage == NULL) {
++ int j;
++
++ PVR_TRACE("OSAcquirePhysPageAddr: "
++ "Couldn't lookup page structure "
++ "for address 0x%lx, trying something else",
++ ulAddr);
++
++ for (j = 0; j < i; j++)
++ put_page_testzero(psInfo->ppsPages[j]);
++ break;
++ }
++
++ psInfo->ppsPages[i] = psPage;
++ }
++
++ BUG_ON(i > psInfo->iNumPages);
++ if (i == psInfo->iNumPages) {
++ for (i = 0; i < psInfo->iNumPages; i++) {
++ struct page *psPage = psInfo->ppsPages[i];
++ struct IMG_CPU_PHYADDR CPUPhysAddr;
++
++ CPUPhysAddr.uiAddr = page_to_pfn(psPage) << PAGE_SHIFT;
++
++ psInfo->psPhysAddr[i] =
++ SysCpuPAddrToSysPAddr(CPUPhysAddr);
++ psSysPAddr[i] = psInfo->psPhysAddr[i];
++ }
++
++ psInfo->eType = WRAP_TYPE_FIND_VMA_PAGES;
++ } else {
++
++ if ((psVMArea->vm_flags & VM_PFNMAP) == 0) {
++ PVR_DPF(PVR_DBG_WARNING, "OSAcquirePhysPageAddr: "
++ "Region isn't a raw PFN mapping. "
++ "Giving up.");
++ goto error_release_mmap_sem;
++ }
++
++ for (ulAddr = ulStartAddrOrig, i = 0;
++ ulAddr < ulBeyondEndAddrOrig; ulAddr += PAGE_SIZE, i++) {
++ struct IMG_CPU_PHYADDR CPUPhysAddr;
++
++ CPUPhysAddr.uiAddr = ((ulAddr - psVMArea->vm_start) +
++ (psVMArea->vm_pgoff << PAGE_SHIFT)) & PAGE_MASK;
++
++ psInfo->psPhysAddr[i] =
++ SysCpuPAddrToSysPAddr(CPUPhysAddr);
++ psSysPAddr[i] = psInfo->psPhysAddr[i];
++ }
++ BUG_ON(i != psInfo->iNumPages);
++
++ psInfo->eType = WRAP_TYPE_FIND_VMA_PFN;
++
++ PVR_DPF(PVR_DBG_WARNING, "OSAcquirePhysPageAddr: "
++ "Region can't be locked down");
++ }
++
++ up_read(&current->mm->mmap_sem);
++
++exit_check:
++ CheckPagesContiguous(psInfo);
++
++ *phOSWrapMem = (void *) psInfo;
++
++ return PVRSRV_OK;
++
++error_release_mmap_sem:
++ up_read(&current->mm->mmap_sem);
++
++error_free:
++ psInfo->eType = WRAP_TYPE_CLEANUP;
++ OSReleasePhysPageAddr((void *)psInfo);
++ return PVRSRV_ERROR_GENERIC;
++}
+diff --git a/drivers/gpu/pvr/osfunc.h b/drivers/gpu/pvr/osfunc.h
+new file mode 100644
+index 0000000..0cad290
+--- /dev/null
++++ b/drivers/gpu/pvr/osfunc.h
+@@ -0,0 +1,232 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++
++#ifndef __OSFUNC_H__
++#define __OSFUNC_H__
++
++#if defined(__KERNEL__)
++#include <linux/hardirq.h>
++#include <linux/string.h>
++#endif
++
++#define PVRSRV_PAGEABLE_SELECT PVRSRV_OS_PAGEABLE_HEAP
++
++#define KERNEL_ID 0xffffffffL
++#define POWER_MANAGER_ID 0xfffffffeL
++#define ISR_ID 0xfffffffdL
++#define TIMER_ID 0xfffffffcL
++
++#define HOST_PAGESIZE OSGetPageSize
++#define HOST_PAGEMASK (~(HOST_PAGESIZE()-1))
++
++#define HOST_PAGEALIGN(addr) (((addr) + HOST_PAGESIZE() - 1) & \
++ HOST_PAGEMASK)
++
++#define PVRSRV_OS_HEAP_MASK 0xf
++#define PVRSRV_OS_PAGEABLE_HEAP 0x1
++#define PVRSRV_OS_NON_PAGEABLE_HEAP 0x2
++
++u32 OSClockus(void);
++u32 OSGetPageSize(void);
++enum PVRSRV_ERROR OSInstallDeviceLISR(void *pvSysData, u32 ui32Irq,
++ char *pszISRName, void *pvDeviceNode);
++enum PVRSRV_ERROR OSUninstallDeviceLISR(void *pvSysData);
++enum PVRSRV_ERROR OSInstallSystemLISR(void *pvSysData, u32 ui32Irq);
++enum PVRSRV_ERROR OSUninstallSystemLISR(void *pvSysData);
++enum PVRSRV_ERROR OSInstallMISR(void *pvSysData);
++enum PVRSRV_ERROR OSUninstallMISR(void *pvSysData);
++enum PVRSRV_ERROR OSInitPerf(void *pvSysData);
++enum PVRSRV_ERROR OSCleanupPerf(void *pvSysData);
++struct IMG_CPU_PHYADDR OSMapLinToCPUPhys(void *pvLinAddr);
++void OSMemCopy(void *pvDst, void *pvSrc, u32 ui32Size);
++void __iomem *OSMapPhysToLin(struct IMG_CPU_PHYADDR BasePAddr,
++ u32 ui32Bytes, u32 ui32MappingFlags, void **phOSMemHandle);
++IMG_BOOL OSUnMapPhysToLin(void __iomem *pvLinAddr, u32 ui32Bytes,
++ u32 ui32MappingFlags, void *hPageAlloc);
++
++enum PVRSRV_ERROR OSReservePhys(struct IMG_CPU_PHYADDR BasePAddr, u32 ui32Bytes,
++ u32 ui32Flags, void **ppvCpuVAddr, void **phOSMemHandle);
++enum PVRSRV_ERROR OSUnReservePhys(void *pvCpuVAddr, u32 ui32Bytes,
++ u32 ui32Flags, void *hOSMemHandle);
++
++enum PVRSRV_ERROR OSRegisterDiscontigMem(struct IMG_SYS_PHYADDR *pBasePAddr,
++ void *pvCpuVAddr, u32 ui32Bytes, u32 ui32Flags,
++ void **phOSMemHandle);
++enum PVRSRV_ERROR OSUnRegisterDiscontigMem(void *pvCpuVAddr, u32 ui32Bytes,
++ u32 ui32Flags, void *hOSMemHandle);
++
++static inline enum PVRSRV_ERROR OSReserveDiscontigPhys(
++ struct IMG_SYS_PHYADDR *pBasePAddr, u32 ui32Bytes,
++ u32 ui32Flags, void **ppvCpuVAddr, void **phOSMemHandle)
++{
++ *ppvCpuVAddr = NULL;
++ return OSRegisterDiscontigMem(pBasePAddr, *ppvCpuVAddr, ui32Bytes,
++ ui32Flags, phOSMemHandle);
++}
++
++static inline enum PVRSRV_ERROR OSUnReserveDiscontigPhys(void *pvCpuVAddr,
++ u32 ui32Bytes, u32 ui32Flags, void *hOSMemHandle)
++{
++ OSUnRegisterDiscontigMem(pvCpuVAddr, ui32Bytes, ui32Flags,
++ hOSMemHandle);
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR OSRegisterMem(struct IMG_CPU_PHYADDR BasePAddr,
++ void *pvCpuVAddr, u32 ui32Bytes, u32 ui32Flags,
++ void **phOSMemHandle);
++enum PVRSRV_ERROR OSUnRegisterMem(void *pvCpuVAddr, u32 ui32Bytes,
++ u32 ui32Flags, void *hOSMemHandle);
++
++enum PVRSRV_ERROR OSGetSubMemHandle(void *hOSMemHandle, u32 ui32ByteOffset,
++ u32 ui32Bytes, u32 ui32Flags, void **phOSMemHandleRet);
++enum PVRSRV_ERROR OSReleaseSubMemHandle(void *hOSMemHandle, u32 ui32Flags);
++
++u32 OSGetCurrentProcessIDKM(void);
++u32 OSGetCurrentThreadID(void);
++void OSMemSet(void *pvDest, u8 ui8Value, u32 ui32Size);
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++enum PVRSRV_ERROR _OSAllocMem(u32 ui32Flags, u32 ui32Size, void **ppvLinAddr,
++ void **phBlockAlloc, char *pszFilename, u32 ui32Line);
++#define OSAllocMem(ui32Flags, ui32Size, ppvLinAddr, phBlockAlloc) \
++ _OSAllocMem(ui32Flags, ui32Size, ppvLinAddr, phBlockAlloc, \
++ __FILE__, __LINE__)
++void _OSFreeMem(u32 ui32Flags, u32 ui32Size, void *pvLinAddr,
++ void *hBlockAlloc, char *pszFilename, u32 ui32Line);
++#define OSFreeMem(ui32Flags, ui32Size, pvLinAddr, phBlockAlloc) \
++ _OSFreeMem(ui32Flags, ui32Size, pvLinAddr, phBlockAlloc, \
++ __FILE__, __LINE__)
++#else
++enum PVRSRV_ERROR OSAllocMem(u32 ui32Flags, u32 ui32Size, void **ppvLinAddr,
++ void **phBlockAlloc);
++void OSFreeMem(u32 ui32Flags, u32 ui32Size, void *pvLinAddr, void *hBlockAlloc);
++#endif
++enum PVRSRV_ERROR OSAllocPages(u32 ui32Flags, u32 ui32Size, u32 ui32PageSize,
++ void **ppvLinAddr, void **phPageAlloc);
++enum PVRSRV_ERROR OSFreePages(u32 ui32Flags, u32 ui32Size, void *pvLinAddr,
++ void *hPageAlloc);
++struct IMG_CPU_PHYADDR OSMemHandleToCpuPAddr(void *hOSMemHandle,
++ u32 ui32ByteOffset);
++enum PVRSRV_ERROR OSInitEnvData(void **ppvEnvSpecificData);
++enum PVRSRV_ERROR OSDeInitEnvData(void *pvEnvSpecificData);
++char *OSStringCopy(char *pszDest, const char *pszSrc);
++s32 OSSNPrintf(char *pStr, u32 ui32Size, const char *pszFormat, ...);
++#define OSStringLength(pszString) strlen(pszString)
++
++enum PVRSRV_ERROR OSEventObjectCreate(const char *pszName,
++ struct PVRSRV_EVENTOBJECT *psEventObject);
++enum PVRSRV_ERROR OSEventObjectDestroy(
++ struct PVRSRV_EVENTOBJECT *psEventObject);
++enum PVRSRV_ERROR OSEventObjectSignal(void *hOSEventKM);
++enum PVRSRV_ERROR OSEventObjectWait(void *hOSEventKM);
++enum PVRSRV_ERROR OSEventObjectOpen(struct PVRSRV_EVENTOBJECT *psEventObject,
++ void **phOSEvent);
++enum PVRSRV_ERROR OSEventObjectClose(struct PVRSRV_EVENTOBJECT *psEventObject,
++ void *hOSEventKM);
++
++enum PVRSRV_ERROR OSBaseAllocContigMemory(u32 ui32Size, void **pLinAddr,
++ struct IMG_CPU_PHYADDR *pPhysAddr);
++enum PVRSRV_ERROR OSBaseFreeContigMemory(u32 ui32Size, void *LinAddr,
++ struct IMG_CPU_PHYADDR PhysAddr);
++
++void *MapUserFromKernel(void *pvLinAddrKM, u32 ui32Size, void **phMemBlock);
++void *OSMapHWRegsIntoUserSpace(void *hDevCookie,
++ struct IMG_SYS_PHYADDR sRegAddr, u32 ulSize, void **ppvProcess);
++void OSUnmapHWRegsFromUserSpace(void *hDevCookie, void *pvUserAddr,
++ void *pvProcess);
++
++void UnmapUserFromKernel(void *pvLinAddrUM, u32 ui32Size, void *hMemBlock);
++
++enum PVRSRV_ERROR OSMapPhysToUserSpace(void *hDevCookie,
++ struct IMG_SYS_PHYADDR sCPUPhysAddr, u32 uiSizeInBytes,
++ u32 ui32CacheFlags, void **ppvUserAddr, u32 *puiActualSize,
++ void *hMappingHandle);
++
++enum PVRSRV_ERROR OSUnmapPhysToUserSpace(void *hDevCookie, void *pvUserAddr,
++ void *pvProcess);
++
++enum PVRSRV_ERROR OSLockResource(struct PVRSRV_RESOURCE *psResource,
++ u32 ui32ID);
++enum PVRSRV_ERROR OSUnlockResource(struct PVRSRV_RESOURCE *psResource,
++ u32 ui32ID);
++IMG_BOOL OSIsResourceLocked(struct PVRSRV_RESOURCE *psResource, u32 ui32ID);
++enum PVRSRV_ERROR OSCreateResource(struct PVRSRV_RESOURCE *psResource);
++enum PVRSRV_ERROR OSDestroyResource(struct PVRSRV_RESOURCE *psResource);
++void OSBreakResourceLock(struct PVRSRV_RESOURCE *psResource, u32 ui32ID);
++void OSWaitus(u32 ui32Timeus);
++void OSReleaseThreadQuanta(void);
++
++u32 OSReadHWReg(void __iomem *pvLinRegBaseAddr, u32 ui32Offset);
++void OSWriteHWReg(void __iomem *pvLinRegBaseAddr, u32 ui32Offset,
++ u32 ui32Value);
++
++void *OSAddTimer(void (*pfnTimerFunc)(void *), void *pvData, u32 ui32MsTimeout);
++enum PVRSRV_ERROR OSRemoveTimer(void *hTimer);
++enum PVRSRV_ERROR OSEnableTimer(void *hTimer);
++enum PVRSRV_ERROR OSDisableTimer(void *hTimer);
++
++enum PVRSRV_ERROR OSGetSysMemSize(u32 *pui32Bytes);
++
++enum HOST_PCI_INIT_FLAGS {
++ HOST_PCI_INIT_FLAG_BUS_MASTER = 0x00000001,
++ HOST_PCI_INIT_FLAG_MSI = 0x00000002,
++ HOST_PCI_INIT_FLAG_FORCE_I32 = 0x7fffffff
++};
++
++enum PVRSRV_ERROR OSScheduleMISR(void *pvSysData);
++
++IMG_BOOL OSProcHasPrivSrvInit(void);
++
++enum IMG_VERIFY_TEST {
++ PVR_VERIFY_WRITE = 0,
++ PVR_VERIFY_READ
++};
++
++IMG_BOOL OSAccessOK(enum IMG_VERIFY_TEST eVerification,
++ const void __user *pvUserPtr, u32 ui32Bytes);
++
++enum PVRSRV_ERROR OSCopyToUser(void *pvProcess, void __user *pvDest,
++ const void *pvSrc, u32 ui32Bytes);
++enum PVRSRV_ERROR OSCopyFromUser(void *pvProcess, void *pvDest,
++ const void __user *pvSrc, u32 ui32Bytes);
++
++enum PVRSRV_ERROR OSAcquirePhysPageAddr(void *pvCPUVAddr, u32 ui32Bytes,
++ struct IMG_SYS_PHYADDR *psSysPAddr,
++ void **phOSWrapMem);
++enum PVRSRV_ERROR OSReleasePhysPageAddr(void *hOSWrapMem);
++
++#if defined(__KERNEL__)
++#define OS_SUPPORTS_IN_LISR
++static inline IMG_BOOL OSInLISR(void unref__ * pvSysData)
++{
++ return in_irq();
++}
++#endif
++
++#endif
+diff --git a/drivers/gpu/pvr/osperproc.c b/drivers/gpu/pvr/osperproc.c
+new file mode 100644
+index 0000000..dffd317
+--- /dev/null
++++ b/drivers/gpu/pvr/osperproc.c
+@@ -0,0 +1,84 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "osperproc.h"
++
++#include "env_perproc.h"
++#include "proc.h"
++
++enum PVRSRV_ERROR OSPerProcessPrivateDataInit(void **phOsPrivateData)
++{
++ enum PVRSRV_ERROR eError;
++ void *hBlockAlloc;
++ struct PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc;
++
++ eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(struct PVRSRV_ENV_PER_PROCESS_DATA),
++ phOsPrivateData, &hBlockAlloc);
++
++ if (eError != PVRSRV_OK) {
++ *phOsPrivateData = NULL;
++
++ PVR_DPF(PVR_DBG_ERROR, "%s: OSAllocMem failed (%d)", __func__,
++ eError);
++ return eError;
++ }
++
++ psEnvPerProc = (struct PVRSRV_ENV_PER_PROCESS_DATA *)*phOsPrivateData;
++ OSMemSet(psEnvPerProc, 0, sizeof(*psEnvPerProc));
++
++ psEnvPerProc->hBlockAlloc = hBlockAlloc;
++
++ LinuxMMapPerProcessConnect(psEnvPerProc);
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR OSPerProcessPrivateDataDeInit(void *hOsPrivateData)
++{
++ struct PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc;
++
++ if (hOsPrivateData == NULL)
++ return PVRSRV_OK;
++
++ psEnvPerProc = (struct PVRSRV_ENV_PER_PROCESS_DATA *)hOsPrivateData;
++
++ LinuxMMapPerProcessDisconnect(psEnvPerProc);
++
++ RemovePerProcessProcDir(psEnvPerProc);
++
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(struct PVRSRV_ENV_PER_PROCESS_DATA),
++ hOsPrivateData, psEnvPerProc->hBlockAlloc);
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR OSPerProcessSetHandleOptions(struct PVRSRV_HANDLE_BASE
++ *psHandleBase)
++{
++ return LinuxMMapPerProcessHandleOptions(psHandleBase);
++}
+diff --git a/drivers/gpu/pvr/osperproc.h b/drivers/gpu/pvr/osperproc.h
+new file mode 100644
+index 0000000..891ab66
+--- /dev/null
++++ b/drivers/gpu/pvr/osperproc.h
+@@ -0,0 +1,36 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __OSPERPROC_H__
++#define __OSPERPROC_H__
++
++enum PVRSRV_ERROR OSPerProcessPrivateDataInit(void **phOsPrivateData);
++enum PVRSRV_ERROR OSPerProcessPrivateDataDeInit(void *hOsPrivateData);
++
++enum PVRSRV_ERROR OSPerProcessSetHandleOptions(
++ struct PVRSRV_HANDLE_BASE *psHandleBase);
++
++#endif
+diff --git a/drivers/gpu/pvr/pb.c b/drivers/gpu/pvr/pb.c
+new file mode 100644
+index 0000000..a614069
+--- /dev/null
++++ b/drivers/gpu/pvr/pb.c
+@@ -0,0 +1,419 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <stddef.h>
++
++#include "services_headers.h"
++#include "sgxapi_km.h"
++#include "sgxinfo.h"
++#include "sgxinfokm.h"
++#include "pvr_bridge_km.h"
++#include "sgx_bridge_km.h"
++#include "pdump_km.h"
++#include "sgxutils.h"
++
++static struct RESMAN_ITEM *psResItemCreateSharedPB;
++static struct PVRSRV_PER_PROCESS_DATA *psPerProcCreateSharedPB;
++
++static enum PVRSRV_ERROR SGXCleanupSharedPBDescCallback(void *pvParam,
++ u32 ui32Param);
++static enum PVRSRV_ERROR SGXCleanupSharedPBDescCreateLockCallback(void *pvParam,
++ u32 ui32Param);
++
++enum PVRSRV_ERROR SGXFindSharedPBDescKM(
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc,
++ void *hDevCookie, IMG_BOOL bLockOnFailure,
++ u32 ui32TotalPBSize, void **phSharedPBDesc,
++ struct PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescKernelMemInfo,
++ struct PVRSRV_KERNEL_MEM_INFO **ppsHWPBDescKernelMemInfo,
++ struct PVRSRV_KERNEL_MEM_INFO **ppsBlockKernelMemInfo,
++ struct PVRSRV_KERNEL_MEM_INFO ***pppsSharedPBDescSubKernelMemInfos,
++ u32 *ui32SharedPBDescSubKernelMemInfosCount)
++{
++ struct PVRSRV_STUB_PBDESC *psStubPBDesc;
++ struct PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescSubKernelMemInfos = NULL;
++ struct PVRSRV_SGXDEV_INFO *psSGXDevInfo;
++ enum PVRSRV_ERROR eError;
++
++ psSGXDevInfo = ((struct PVRSRV_DEVICE_NODE *)hDevCookie)->pvDevice;
++
++ psStubPBDesc = psSGXDevInfo->psStubPBDescListKM;
++ if (psStubPBDesc != NULL) {
++ u32 i;
++ struct RESMAN_ITEM *psResItem;
++
++ if (psStubPBDesc->ui32TotalPBSize != ui32TotalPBSize) {
++ PVR_DPF(PVR_DBG_WARNING, "SGXFindSharedPBDescKM: "
++ "Shared PB requested with different size "
++ "(0x%x) from existing shared PB (0x%x) - "
++ "requested size ignored",
++ ui32TotalPBSize,
++ psStubPBDesc->ui32TotalPBSize);
++ }
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(struct PVRSRV_KERNEL_MEM_INFO *) *
++ psStubPBDesc->ui32SubKernelMemInfosCount,
++ (void **) &ppsSharedPBDescSubKernelMemInfos,
++ NULL) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "SGXFindSharedPBDescKM: OSAllocMem failed");
++
++ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto ExitNotFound;
++ }
++
++ psResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_SHARED_PB_DESC,
++ psStubPBDesc, 0,
++ &SGXCleanupSharedPBDescCallback);
++
++ if (psResItem == NULL) {
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(struct PVRSRV_KERNEL_MEM_INFO *)*
++ psStubPBDesc->ui32SubKernelMemInfosCount,
++ ppsSharedPBDescSubKernelMemInfos, NULL);
++
++ PVR_DPF(PVR_DBG_ERROR, "SGXFindSharedPBDescKM: "
++ "ResManRegisterRes failed");
++ eError = PVRSRV_ERROR_GENERIC;
++ goto ExitNotFound;
++ }
++
++ *ppsSharedPBDescKernelMemInfo =
++ psStubPBDesc->psSharedPBDescKernelMemInfo;
++ *ppsHWPBDescKernelMemInfo =
++ psStubPBDesc->psHWPBDescKernelMemInfo;
++ *ppsBlockKernelMemInfo =
++ psStubPBDesc->psBlockKernelMemInfo;
++
++ *ui32SharedPBDescSubKernelMemInfosCount =
++ psStubPBDesc->ui32SubKernelMemInfosCount;
++
++ *pppsSharedPBDescSubKernelMemInfos =
++ ppsSharedPBDescSubKernelMemInfos;
++
++ for (i = 0;
++ i < psStubPBDesc->ui32SubKernelMemInfosCount;
++ i++) {
++ ppsSharedPBDescSubKernelMemInfos[i] =
++ psStubPBDesc->ppsSubKernelMemInfos[i];
++ }
++
++ psStubPBDesc->ui32RefCount++;
++ *phSharedPBDesc = (void *) psResItem;
++ return PVRSRV_OK;
++ }
++
++ eError = PVRSRV_OK;
++ if (bLockOnFailure) {
++ if (psResItemCreateSharedPB == NULL) {
++ psResItemCreateSharedPB =
++ ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_SHARED_PB_DESC_CREATE_LOCK,
++ psPerProc, 0,
++ &SGXCleanupSharedPBDescCreateLockCallback);
++
++ if (psResItemCreateSharedPB == NULL) {
++ PVR_DPF(PVR_DBG_ERROR, "SGXFindSharedPBDescKM: "
++ "ResManRegisterRes failed");
++
++ eError = PVRSRV_ERROR_GENERIC;
++ goto ExitNotFound;
++ }
++ PVR_ASSERT(psPerProcCreateSharedPB == NULL);
++ psPerProcCreateSharedPB = psPerProc;
++ } else {
++ eError = PVRSRV_ERROR_PROCESSING_BLOCKED;
++ }
++ }
++ExitNotFound:
++ *phSharedPBDesc = NULL;
++
++ return eError;
++}
++
++static enum PVRSRV_ERROR SGXCleanupSharedPBDescKM(
++ struct PVRSRV_STUB_PBDESC *psStubPBDescIn)
++{
++ struct PVRSRV_STUB_PBDESC **ppsStubPBDesc;
++ u32 i;
++ struct PVRSRV_DEVICE_NODE *psDeviceNode;
++ struct PVRSRV_SGXDEV_INFO *psSGXDevInfo;
++
++ psDeviceNode = (struct PVRSRV_DEVICE_NODE *)psStubPBDescIn->hDevCookie;
++ psSGXDevInfo = (struct PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
++
++ for (ppsStubPBDesc = (struct PVRSRV_STUB_PBDESC **)
++ &psSGXDevInfo->psStubPBDescListKM;
++ *ppsStubPBDesc != NULL;
++ ppsStubPBDesc = &(*ppsStubPBDesc)->psNext) {
++ struct PVRSRV_STUB_PBDESC *psStubPBDesc = *ppsStubPBDesc;
++
++ if (psStubPBDesc == psStubPBDescIn) {
++ psStubPBDesc->ui32RefCount--;
++ PVR_ASSERT((s32) psStubPBDesc->ui32RefCount >= 0);
++
++ if (psStubPBDesc->ui32RefCount == 0) {
++ *ppsStubPBDesc = psStubPBDesc->psNext;
++
++ for (i = 0;
++ i < psStubPBDesc->ui32SubKernelMemInfosCount;
++ i++)
++ PVRSRVFreeDeviceMemKM(psStubPBDesc->
++ hDevCookie,
++ psStubPBDesc->ppsSubKernelMemInfos[i]);
++
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(struct PVRSRV_KERNEL_MEM_INFO *)*
++ psStubPBDesc->
++ ui32SubKernelMemInfosCount,
++ psStubPBDesc->ppsSubKernelMemInfos,
++ NULL);
++
++ PVRSRVFreeSharedSysMemoryKM(psStubPBDesc->
++ psBlockKernelMemInfo);
++
++ PVRSRVFreeDeviceMemKM(psStubPBDesc->hDevCookie,
++ psStubPBDesc->
++ psHWPBDescKernelMemInfo);
++
++ PVRSRVFreeSharedSysMemoryKM(psStubPBDesc->
++ psSharedPBDescKernelMemInfo);
++
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(struct PVRSRV_STUB_PBDESC),
++ psStubPBDesc, NULL);
++
++ SGXCleanupRequest(psDeviceNode, NULL,
++ PVRSRV_USSE_EDM_RESMAN_CLEANUP_SHAREDPBDESC);
++ }
++ return PVRSRV_OK;
++ }
++ }
++
++ return PVRSRV_ERROR_INVALID_PARAMS;
++}
++
++static enum PVRSRV_ERROR SGXCleanupSharedPBDescCallback(void *pvParam,
++ u32 ui32Param)
++{
++ struct PVRSRV_STUB_PBDESC *psStubPBDesc =
++ (struct PVRSRV_STUB_PBDESC *)pvParam;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++ return SGXCleanupSharedPBDescKM(psStubPBDesc);
++}
++
++static enum PVRSRV_ERROR SGXCleanupSharedPBDescCreateLockCallback(void *pvParam,
++ u32 ui32Param)
++{
++#ifdef DEBUG
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc =
++ (struct PVRSRV_PER_PROCESS_DATA *)pvParam;
++ PVR_ASSERT(psPerProc == psPerProcCreateSharedPB);
++#else
++ PVR_UNREFERENCED_PARAMETER(pvParam);
++#endif
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++ psPerProcCreateSharedPB = NULL;
++ psResItemCreateSharedPB = NULL;
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR SGXUnrefSharedPBDescKM(void *hSharedPBDesc)
++{
++ PVR_ASSERT(hSharedPBDesc != NULL);
++
++ ResManFreeResByPtr(hSharedPBDesc);
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR SGXAddSharedPBDescKM(
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc,
++ void *hDevCookie,
++ struct PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo,
++ struct PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo,
++ struct PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo,
++ u32 ui32TotalPBSize, void **phSharedPBDesc,
++ struct PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescSubKernelMemInfos,
++ u32 ui32SharedPBDescSubKernelMemInfosCount)
++{
++ struct PVRSRV_STUB_PBDESC *psStubPBDesc = NULL;
++ enum PVRSRV_ERROR eRet = PVRSRV_ERROR_GENERIC;
++ u32 i;
++ struct PVRSRV_SGXDEV_INFO *psSGXDevInfo;
++ struct RESMAN_ITEM *psResItem;
++
++ if (psPerProcCreateSharedPB != psPerProc) {
++ goto NoAdd;
++ } else {
++ PVR_ASSERT(psResItemCreateSharedPB != NULL);
++
++ ResManFreeResByPtr(psResItemCreateSharedPB);
++
++ PVR_ASSERT(psResItemCreateSharedPB == NULL);
++ PVR_ASSERT(psPerProcCreateSharedPB == NULL);
++ }
++
++ psSGXDevInfo = (struct PVRSRV_SGXDEV_INFO *)
++ ((struct PVRSRV_DEVICE_NODE *)hDevCookie)->pvDevice;
++
++ psStubPBDesc = psSGXDevInfo->psStubPBDescListKM;
++ if (psStubPBDesc != NULL) {
++ if (psStubPBDesc->ui32TotalPBSize != ui32TotalPBSize) {
++ PVR_DPF(PVR_DBG_WARNING, "SGXAddSharedPBDescKM: "
++ "Shared PB requested with different size "
++ "(0x%x) from existing shared PB (0x%x) - "
++ "requested size ignored",
++ ui32TotalPBSize,
++ psStubPBDesc->ui32TotalPBSize);
++
++ }
++
++ psResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_SHARED_PB_DESC,
++ psStubPBDesc, 0,
++ &SGXCleanupSharedPBDescCallback);
++ if (psResItem == NULL) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "SGXAddSharedPBDescKM: "
++ "Failed to register existing shared "
++ "PBDesc with the resource manager");
++ goto NoAddKeepPB;
++ }
++
++ psStubPBDesc->ui32RefCount++;
++
++ *phSharedPBDesc = (void *) psResItem;
++ eRet = PVRSRV_OK;
++ goto NoAddKeepPB;
++ }
++
++ if (OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(struct PVRSRV_STUB_PBDESC),
++ (void **)&psStubPBDesc, NULL) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "SGXAddSharedPBDescKM: Failed to alloc "
++ "StubPBDesc");
++ eRet = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto NoAdd;
++ }
++
++ psStubPBDesc->ppsSubKernelMemInfos = NULL;
++
++ if (OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(struct PVRSRV_KERNEL_MEM_INFO *) *
++ ui32SharedPBDescSubKernelMemInfosCount,
++ (void **)&psStubPBDesc->ppsSubKernelMemInfos, NULL) !=
++ PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "SGXAddSharedPBDescKM: "
++ "Failed to alloc "
++ "StubPBDesc->ppsSubKernelMemInfos");
++ eRet = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto NoAdd;
++ }
++
++ if (PVRSRVDissociateMemFromResmanKM(psSharedPBDescKernelMemInfo)
++ != PVRSRV_OK)
++ goto NoAdd;
++
++ if (PVRSRVDissociateMemFromResmanKM(psHWPBDescKernelMemInfo)
++ != PVRSRV_OK)
++ goto NoAdd;
++
++ if (PVRSRVDissociateMemFromResmanKM(psBlockKernelMemInfo)
++ != PVRSRV_OK)
++ goto NoAdd;
++
++ psStubPBDesc->ui32RefCount = 1;
++ psStubPBDesc->ui32TotalPBSize = ui32TotalPBSize;
++ psStubPBDesc->psSharedPBDescKernelMemInfo = psSharedPBDescKernelMemInfo;
++ psStubPBDesc->psHWPBDescKernelMemInfo = psHWPBDescKernelMemInfo;
++ psStubPBDesc->psBlockKernelMemInfo = psBlockKernelMemInfo;
++
++ psStubPBDesc->ui32SubKernelMemInfosCount =
++ ui32SharedPBDescSubKernelMemInfosCount;
++ for (i = 0; i < ui32SharedPBDescSubKernelMemInfosCount; i++) {
++ psStubPBDesc->ppsSubKernelMemInfos[i] =
++ ppsSharedPBDescSubKernelMemInfos[i];
++ if (PVRSRVDissociateMemFromResmanKM
++ (ppsSharedPBDescSubKernelMemInfos[i]) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "SGXAddSharedPBDescKM: "
++ "Failed to dissociate shared PBDesc "
++ "from process");
++ goto NoAdd;
++ }
++ }
++
++ psResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_SHARED_PB_DESC,
++ psStubPBDesc,
++ 0, &SGXCleanupSharedPBDescCallback);
++ if (psResItem == NULL) {
++ PVR_DPF(PVR_DBG_ERROR, "SGXAddSharedPBDescKM: "
++ "Failed to register shared PBDesc "
++ " with the resource manager");
++ goto NoAdd;
++ }
++ psStubPBDesc->hDevCookie = hDevCookie;
++
++ psStubPBDesc->psNext = psSGXDevInfo->psStubPBDescListKM;
++ psSGXDevInfo->psStubPBDescListKM = psStubPBDesc;
++
++ *phSharedPBDesc = (void *) psResItem;
++
++ return PVRSRV_OK;
++
++NoAdd:
++ if (psStubPBDesc) {
++ if (psStubPBDesc->ppsSubKernelMemInfos) {
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(struct PVRSRV_KERNEL_MEM_INFO *) *
++ ui32SharedPBDescSubKernelMemInfosCount,
++ psStubPBDesc->ppsSubKernelMemInfos, NULL);
++ }
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(struct PVRSRV_STUB_PBDESC), psStubPBDesc,
++ NULL);
++ }
++
++NoAddKeepPB:
++ for (i = 0; i < ui32SharedPBDescSubKernelMemInfosCount; i++)
++ PVRSRVFreeDeviceMemKM(hDevCookie,
++ ppsSharedPBDescSubKernelMemInfos[i]);
++
++ PVRSRVFreeSharedSysMemoryKM(psSharedPBDescKernelMemInfo);
++ PVRSRVFreeDeviceMemKM(hDevCookie, psHWPBDescKernelMemInfo);
++
++ PVRSRVFreeSharedSysMemoryKM(psBlockKernelMemInfo);
++
++ return eRet;
++}
+diff --git a/drivers/gpu/pvr/pdump.c b/drivers/gpu/pvr/pdump.c
+new file mode 100644
+index 0000000..0dec52f
+--- /dev/null
++++ b/drivers/gpu/pvr/pdump.c
+@@ -0,0 +1,1271 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if defined(PDUMP)
++#include <asm/atomic.h>
++#include <stdarg.h>
++#include "sgxdefs.h"
++#include "services_headers.h"
++
++#include "pvrversion.h"
++#include "pvr_debug.h"
++
++#include "dbgdrvif.h"
++#include "sgxmmu.h"
++#include "mm.h"
++#include "pdump_km.h"
++
++#include <linux/tty.h>
++
++static IMG_BOOL PDumpWriteString2(char *pszString, u32 ui32Flags);
++static IMG_BOOL PDumpWriteILock(struct DBG_STREAM *psStream, u8 *pui8Data,
++ u32 ui32Count, u32 ui32Flags);
++static void DbgSetFrame(struct DBG_STREAM *psStream, u32 ui32Frame);
++static u32 DbgGetFrame(struct DBG_STREAM *psStream);
++static void DbgSetMarker(struct DBG_STREAM *psStream, u32 ui32Marker);
++static u32 DbgWrite(struct DBG_STREAM *psStream, u8 *pui8Data,
++ u32 ui32BCount, u32 ui32Flags);
++
++#define PDUMP_DATAMASTER_PIXEL 1
++
++#define MIN(a, b) (a > b ? b : a)
++
++#define MAX_FILE_SIZE 0x40000000
++
++static atomic_t gsPDumpSuspended = ATOMIC_INIT(0);
++
++static struct DBGKM_SERVICE_TABLE *gpfnDbgDrv;
++
++#define PDUMP_STREAM_PARAM2 0
++#define PDUMP_STREAM_SCRIPT2 1
++#define PDUMP_STREAM_DRIVERINFO 2
++#define PDUMP_NUM_STREAMS 3
++
++static char *pszStreamName[PDUMP_NUM_STREAMS] = { "ParamStream2",
++ "ScriptStream2",
++ "DriverInfoStream"
++};
++
++#define __PDBG_PDUMP_STATE_GET_MSG_STRING(ERROR) \
++ char *pszMsg = gsDBGPdumpState.pszMsg; \
++ if ((!pszMsg) || PDumpSuspended()) \
++ return ERROR
++
++#define __PDBG_PDUMP_STATE_GET_SCRIPT_STRING(ERROR) \
++ char *pszScript = gsDBGPdumpState.pszScript; \
++ if ((!pszScript) || PDumpSuspended()) \
++ return ERROR
++
++#define __PDBG_PDUMP_STATE_GET_SCRIPT_AND_FILE_STRING(ERROR) \
++ char *pszScript = gsDBGPdumpState.pszScript; \
++ char *pszFile = gsDBGPdumpState.pszFile; \
++ if ((!pszScript) || (!pszFile) || PDumpSuspended()) \
++ return ERROR
++
++struct PDBG_PDUMP_STATE {
++ struct DBG_STREAM *psStream[PDUMP_NUM_STREAMS];
++ u32 ui32ParamFileNum;
++
++ char *pszMsg;
++ char *pszScript;
++ char *pszFile;
++
++};
++
++static struct PDBG_PDUMP_STATE gsDBGPdumpState = {
++ {NULL}, 0, NULL, NULL, NULL
++};
++
++#define SZ_MSG_SIZE_MAX (PVRSRV_PDUMP_MAX_COMMENT_SIZE - 1)
++#define SZ_SCRIPT_SIZE_MAX (PVRSRV_PDUMP_MAX_COMMENT_SIZE - 1)
++#define SZ_FILENAME_SIZE_MAX (PVRSRV_PDUMP_MAX_COMMENT_SIZE - 1)
++
++static inline IMG_BOOL PDumpSuspended(void)
++{
++ return atomic_read(&gsPDumpSuspended) != 0;
++}
++
++void PDumpInit(void)
++{
++ u32 i = 0;
++
++ if (!gpfnDbgDrv) {
++ DBGDrvGetServiceTable((void **) &gpfnDbgDrv);
++
++ if (gpfnDbgDrv == NULL)
++ return;
++
++ if (!gsDBGPdumpState.pszFile)
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ SZ_FILENAME_SIZE_MAX,
++ (void **)&gsDBGPdumpState.pszFile,
++ NULL) != PVRSRV_OK)
++ goto init_failed;
++
++ if (!gsDBGPdumpState.pszMsg)
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ SZ_MSG_SIZE_MAX,
++ (void **)&gsDBGPdumpState.pszMsg,
++ NULL) != PVRSRV_OK)
++ goto init_failed;
++
++ if (!gsDBGPdumpState.pszScript)
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ SZ_SCRIPT_SIZE_MAX,
++ (void **)&gsDBGPdumpState.pszScript,
++ NULL) != PVRSRV_OK)
++ goto init_failed;
++
++ for (i = 0; i < PDUMP_NUM_STREAMS; i++) {
++ gsDBGPdumpState.psStream[i] =
++ gpfnDbgDrv->pfnCreateStream(pszStreamName[i],
++ DEBUG_CAPMODE_FRAMED,
++ DEBUG_OUTMODE_STREAMENABLE,
++ 0, 10);
++
++ gpfnDbgDrv->pfnSetCaptureMode(gsDBGPdumpState.
++ psStream[i],
++ DEBUG_CAPMODE_FRAMED,
++ 0xFFFFFFFF, 0xFFFFFFFF,
++ 1);
++ gpfnDbgDrv->pfnSetFrame(gsDBGPdumpState.psStream[i], 0);
++ }
++
++ PDUMPCOMMENT("Driver Product Name: %s", VS_PRODUCT_NAME);
++ PDUMPCOMMENT("Driver Product Version: %s (%s)",
++ PVRVERSION_STRING, PVRVERSION_FILE);
++ PDUMPCOMMENT("Start of Init Phase");
++ }
++
++ return;
++
++init_failed:
++
++ if (gsDBGPdumpState.pszFile) {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_FILENAME_SIZE_MAX,
++ (void *)gsDBGPdumpState.pszFile, NULL);
++ gsDBGPdumpState.pszFile = NULL;
++ }
++
++ if (gsDBGPdumpState.pszScript) {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_SCRIPT_SIZE_MAX,
++ (void *)gsDBGPdumpState.pszScript, NULL);
++ gsDBGPdumpState.pszScript = NULL;
++ }
++
++ if (gsDBGPdumpState.pszMsg) {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_MSG_SIZE_MAX,
++ (void *)gsDBGPdumpState.pszMsg, NULL);
++ gsDBGPdumpState.pszMsg = NULL;
++ }
++
++ gpfnDbgDrv = NULL;
++}
++
++void PDumpDeInit(void)
++{
++ u32 i = 0;
++
++ for (i = 0; i < PDUMP_NUM_STREAMS; i++)
++ gpfnDbgDrv->pfnDestroyStream(gsDBGPdumpState.psStream[i]);
++
++ if (gsDBGPdumpState.pszFile) {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_FILENAME_SIZE_MAX,
++ (void *)gsDBGPdumpState.pszFile, NULL);
++ gsDBGPdumpState.pszFile = NULL;
++ }
++
++ if (gsDBGPdumpState.pszScript) {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_SCRIPT_SIZE_MAX,
++ (void *)gsDBGPdumpState.pszScript, NULL);
++ gsDBGPdumpState.pszScript = NULL;
++ }
++
++ if (gsDBGPdumpState.pszMsg) {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_MSG_SIZE_MAX,
++ (void *)gsDBGPdumpState.pszMsg, NULL);
++ gsDBGPdumpState.pszMsg = NULL;
++ }
++
++ gpfnDbgDrv = NULL;
++}
++
++enum PVRSRV_ERROR PDumpStartInitPhaseKM(void)
++{
++ u32 i;
++
++ if (gpfnDbgDrv) {
++ PDUMPCOMMENT("Start Init Phase");
++ for (i = 0; i < PDUMP_NUM_STREAMS; i++)
++ gpfnDbgDrv->pfnStartInitPhase(gsDBGPdumpState.
++ psStream[i]);
++ }
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR PDumpStopInitPhaseKM(void)
++{
++ u32 i;
++
++ if (gpfnDbgDrv) {
++ PDUMPCOMMENT("Stop Init Phase");
++
++ for (i = 0; i < PDUMP_NUM_STREAMS; i++)
++ gpfnDbgDrv->pfnStopInitPhase(gsDBGPdumpState.
++ psStream[i]);
++ }
++ return PVRSRV_OK;
++}
++
++void PDumpComment(char *pszFormat, ...)
++{
++ va_list ap;
++
++ __PDBG_PDUMP_STATE_GET_MSG_STRING();
++
++ va_start(ap, pszFormat);
++ vsnprintf(pszMsg, SZ_MSG_SIZE_MAX, pszFormat, ap);
++ va_end(ap);
++
++ PDumpCommentKM(pszMsg, PDUMP_FLAGS_CONTINUOUS);
++}
++
++void PDumpCommentWithFlags(u32 ui32Flags, char *pszFormat, ...)
++{
++ va_list ap;
++
++ __PDBG_PDUMP_STATE_GET_MSG_STRING();
++
++ va_start(ap, pszFormat);
++ vsnprintf(pszMsg, SZ_MSG_SIZE_MAX, pszFormat, ap);
++ va_end(ap);
++
++ PDumpCommentKM(pszMsg, ui32Flags);
++}
++
++IMG_BOOL PDumpIsLastCaptureFrameKM(void)
++{
++ return gpfnDbgDrv->pfnIsLastCaptureFrame(
++ gsDBGPdumpState.psStream[PDUMP_STREAM_SCRIPT2]);
++}
++
++IMG_BOOL PDumpIsCaptureFrameKM(void)
++{
++ if (PDumpSuspended())
++ return IMG_FALSE;
++ return gpfnDbgDrv->pfnIsCaptureFrame(gsDBGPdumpState.
++ psStream[PDUMP_STREAM_SCRIPT2],
++ IMG_FALSE);
++}
++
++enum PVRSRV_ERROR PDumpRegWithFlagsKM(u32 ui32Reg, u32 ui32Data, u32 ui32Flags)
++{
++ __PDBG_PDUMP_STATE_GET_SCRIPT_STRING(PVRSRV_ERROR_GENERIC);
++
++ snprintf(pszScript, SZ_SCRIPT_SIZE_MAX,
++ "WRW :SGXREG:0x%8.8X 0x%8.8X\r\n", ui32Reg, ui32Data);
++ PDumpWriteString2(pszScript, ui32Flags);
++
++ return PVRSRV_OK;
++}
++
++void PDumpReg(u32 ui32Reg, u32 ui32Data)
++{
++ __PDBG_PDUMP_STATE_GET_SCRIPT_STRING();
++
++ snprintf(pszScript, SZ_SCRIPT_SIZE_MAX,
++ "WRW :SGXREG:0x%8.8X 0x%8.8X\r\n", ui32Reg, ui32Data);
++ PDumpWriteString2(pszScript, PDUMP_FLAGS_CONTINUOUS);
++}
++
++enum PVRSRV_ERROR PDumpRegPolWithFlagsKM(u32 ui32RegAddr, u32 ui32RegValue,
++ u32 ui32Mask, u32 ui32Flags)
++{
++#define POLL_DELAY 1000
++#define POLL_COUNT_LONG (2000000000 / POLL_DELAY)
++#define POLL_COUNT_SHORT (1000000 / POLL_DELAY)
++
++ u32 ui32PollCount;
++ __PDBG_PDUMP_STATE_GET_SCRIPT_STRING(PVRSRV_ERROR_GENERIC);
++
++ if (((ui32RegAddr == EUR_CR_EVENT_STATUS) &&
++ (ui32RegValue & ui32Mask &
++ EUR_CR_EVENT_STATUS_TA_FINISHED_MASK)) ||
++ ((ui32RegAddr == EUR_CR_EVENT_STATUS) &&
++ (ui32RegValue & ui32Mask &
++ EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_MASK)) ||
++ ((ui32RegAddr == EUR_CR_EVENT_STATUS) &&
++ (ui32RegValue & ui32Mask &
++ EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_MASK)))
++ ui32PollCount = POLL_COUNT_LONG;
++ else
++ ui32PollCount = POLL_COUNT_SHORT;
++
++ snprintf(pszScript, SZ_SCRIPT_SIZE_MAX,
++ "POL :SGXREG:0x%8.8X 0x%8.8X 0x%8.8X %d %u %d\r\n",
++ ui32RegAddr, ui32RegValue, ui32Mask, 0, ui32PollCount,
++ POLL_DELAY);
++ PDumpWriteString2(pszScript, ui32Flags);
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR PDumpRegPolKM(u32 ui32RegAddr, u32 ui32RegValue, u32 ui32Mask)
++{
++ return PDumpRegPolWithFlagsKM(ui32RegAddr, ui32RegValue, ui32Mask,
++ PDUMP_FLAGS_CONTINUOUS);
++}
++
++void PDumpMallocPages(enum PVRSRV_DEVICE_TYPE eDeviceType, u32 ui32DevVAddr,
++ void *pvLinAddr, void *hOSMemHandle, u32 ui32NumBytes,
++ u32 ui32PageSize, void *hUniqueTag)
++{
++ u32 ui32Offset;
++ u32 ui32NumPages;
++ struct IMG_CPU_PHYADDR sCpuPAddr;
++ struct IMG_DEV_PHYADDR sDevPAddr;
++ u32 ui32Page;
++ __PDBG_PDUMP_STATE_GET_SCRIPT_STRING();
++ PVR_UNREFERENCED_PARAMETER(pvLinAddr);
++
++ PVR_ASSERT(((u32) ui32DevVAddr & (ui32PageSize - 1)) == 0);
++ PVR_ASSERT(hOSMemHandle);
++ PVR_ASSERT(((u32) ui32NumBytes & (ui32PageSize - 1)) == 0);
++
++ snprintf(pszScript, SZ_SCRIPT_SIZE_MAX,
++ "-- MALLOC :SGXMEM:VA_%8.8X 0x%8.8X %u\r\n", ui32DevVAddr,
++ ui32NumBytes, ui32PageSize);
++ PDumpWriteString2(pszScript, PDUMP_FLAGS_CONTINUOUS);
++
++ ui32Offset = 0;
++ ui32NumPages = ui32NumBytes / ui32PageSize;
++ while (ui32NumPages--) {
++ sCpuPAddr = OSMemHandleToCpuPAddr(hOSMemHandle, ui32Offset);
++ PVR_ASSERT((sCpuPAddr.uiAddr & (ui32PageSize - 1)) == 0);
++ ui32Offset += ui32PageSize;
++ sDevPAddr = SysCpuPAddrToDevPAddr(eDeviceType, sCpuPAddr);
++ ui32Page = sDevPAddr.uiAddr / ui32PageSize;
++
++ snprintf(pszScript, SZ_SCRIPT_SIZE_MAX,
++ "MALLOC :SGXMEM:PA_%8.8X%8.8X %u %u 0x%8.8X\r\n",
++ (u32)hUniqueTag, ui32Page * ui32PageSize,
++ ui32PageSize, ui32PageSize, ui32Page * ui32PageSize);
++ PDumpWriteString2(pszScript, PDUMP_FLAGS_CONTINUOUS);
++ }
++}
++
++void PDumpMallocPageTable(enum PVRSRV_DEVICE_TYPE eDeviceType,
++ void *pvLinAddr, u32 ui32PTSize, void *hUniqueTag)
++{
++ u8 *pui8LinAddr;
++ u32 ui32NumPages;
++ struct IMG_CPU_PHYADDR sCpuPAddr;
++ struct IMG_DEV_PHYADDR sDevPAddr;
++ u32 ui32Page;
++ __PDBG_PDUMP_STATE_GET_SCRIPT_STRING();
++
++ PVR_ASSERT(((u32) pvLinAddr & (ui32PTSize - 1)) == 0);
++
++ snprintf(pszScript, SZ_SCRIPT_SIZE_MAX,
++ "-- MALLOC :SGXMEM:PAGE_TABLE 0x%8.8X %lu\r\n", ui32PTSize,
++ SGX_MMU_PAGE_SIZE);
++ PDumpWriteString2(pszScript, PDUMP_FLAGS_CONTINUOUS);
++
++ pui8LinAddr = (u8 *) pvLinAddr;
++
++ ui32NumPages = 1;
++
++ while (ui32NumPages--) {
++ sCpuPAddr = OSMapLinToCPUPhys(pui8LinAddr);
++ sDevPAddr = SysCpuPAddrToDevPAddr(eDeviceType, sCpuPAddr);
++ ui32Page = sDevPAddr.uiAddr >> SGX_MMU_PAGE_SHIFT;
++
++ snprintf(pszScript, SZ_SCRIPT_SIZE_MAX,
++ "MALLOC :SGXMEM:PA_%8.8X%8.8lX 0x%lX %lu 0x%8.8lX\r\n",
++ (u32)hUniqueTag, ui32Page * SGX_MMU_PAGE_SIZE,
++ SGX_MMU_PAGE_SIZE, SGX_MMU_PAGE_SIZE,
++ ui32Page * SGX_MMU_PAGE_SIZE);
++ PDumpWriteString2(pszScript, PDUMP_FLAGS_CONTINUOUS);
++ pui8LinAddr += SGX_MMU_PAGE_SIZE;
++ }
++}
++
++void PDumpFreePages(struct BM_HEAP *psBMHeap, struct IMG_DEV_VIRTADDR sDevVAddr,
++ u32 ui32NumBytes, u32 ui32PageSize, void *hUniqueTag,
++ IMG_BOOL bInterleaved)
++{
++ u32 ui32NumPages, ui32PageCounter;
++ struct IMG_DEV_PHYADDR sDevPAddr;
++ struct PVRSRV_DEVICE_NODE *psDeviceNode;
++ __PDBG_PDUMP_STATE_GET_SCRIPT_STRING();
++
++ PVR_ASSERT(((u32) sDevVAddr.uiAddr & (ui32PageSize - 1)) == 0);
++ PVR_ASSERT(((u32) ui32NumBytes & (ui32PageSize - 1)) == 0);
++
++ snprintf(pszScript, SZ_SCRIPT_SIZE_MAX, "-- FREE :SGXMEM:VA_%8.8X\r\n",
++ sDevVAddr.uiAddr);
++ PDumpWriteString2(pszScript, PDUMP_FLAGS_CONTINUOUS);
++
++ ui32NumPages = ui32NumBytes / ui32PageSize;
++ psDeviceNode = psBMHeap->pBMContext->psDeviceNode;
++ for (ui32PageCounter = 0; ui32PageCounter < ui32NumPages;
++ ui32PageCounter++) {
++ if (!bInterleaved || (ui32PageCounter % 2) == 0) {
++ sDevPAddr =
++ psDeviceNode->pfnMMUGetPhysPageAddr(psBMHeap->
++ pMMUHeap,
++ sDevVAddr);
++
++ snprintf(pszScript, SZ_SCRIPT_SIZE_MAX,
++ "FREE :SGXMEM:PA_%8.8X%8.8X\r\n",
++ (u32)hUniqueTag, sDevPAddr.uiAddr);
++ PDumpWriteString2(pszScript, PDUMP_FLAGS_CONTINUOUS);
++ } else {
++
++ }
++
++ sDevVAddr.uiAddr += ui32PageSize;
++ }
++}
++
++void PDumpFreePageTable(enum PVRSRV_DEVICE_TYPE eDeviceType,
++ void *pvLinAddr, u32 ui32PTSize, void *hUniqueTag)
++{
++ u8 *pui8LinAddr;
++ u32 ui32NumPages;
++ struct IMG_CPU_PHYADDR sCpuPAddr;
++ struct IMG_DEV_PHYADDR sDevPAddr;
++ u32 ui32Page;
++ __PDBG_PDUMP_STATE_GET_SCRIPT_STRING();
++
++ PVR_ASSERT(((u32) pvLinAddr & (ui32PTSize - 1)) == 0);
++
++ snprintf(pszScript, SZ_SCRIPT_SIZE_MAX,
++ "-- FREE :SGXMEM:PAGE_TABLE\r\n");
++ PDumpWriteString2(pszScript, PDUMP_FLAGS_CONTINUOUS);
++
++ pui8LinAddr = (u8 *) pvLinAddr;
++
++ ui32NumPages = 1;
++
++ while (ui32NumPages--) {
++ sCpuPAddr = OSMapLinToCPUPhys(pui8LinAddr);
++ sDevPAddr = SysCpuPAddrToDevPAddr(eDeviceType, sCpuPAddr);
++ ui32Page = sDevPAddr.uiAddr >> SGX_MMU_PAGE_SHIFT;
++ pui8LinAddr += SGX_MMU_PAGE_SIZE;
++
++ snprintf(pszScript, SZ_SCRIPT_SIZE_MAX,
++ "FREE :SGXMEM:PA_%8.8X%8.8lX\r\n", (u32)hUniqueTag,
++ ui32Page * SGX_MMU_PAGE_SIZE);
++ PDumpWriteString2(pszScript, PDUMP_FLAGS_CONTINUOUS);
++ }
++}
++
++void PDumpPDReg(u32 ui32Reg, u32 ui32Data, void *hUniqueTag)
++{
++ __PDBG_PDUMP_STATE_GET_SCRIPT_STRING();
++
++ snprintf(pszScript,
++ SZ_SCRIPT_SIZE_MAX,
++ "WRW :SGXREG:0x%8.8X :SGXMEM:PA_%8.8X%8.8lX:0x%8.8lX\r\n",
++ ui32Reg, (u32)hUniqueTag, ui32Data & ~(SGX_MMU_PAGE_SIZE - 1),
++ ui32Data & (SGX_MMU_PAGE_SIZE - 1));
++ PDumpWriteString2(pszScript, PDUMP_FLAGS_CONTINUOUS);
++}
++
++void PDumpPDRegWithFlags(u32 ui32Reg, u32 ui32Data, u32 ui32Flags,
++ void *hUniqueTag)
++{
++ __PDBG_PDUMP_STATE_GET_SCRIPT_STRING();
++
++ snprintf(pszScript,
++ SZ_SCRIPT_SIZE_MAX,
++ "WRW :SGXREG:0x%8.8X :SGXMEM:PA_%8.8X%8.8lX:0x%8.8lX\r\n",
++ ui32Reg, (u32) hUniqueTag, ui32Data & ~(SGX_MMU_PAGE_SIZE - 1),
++ ui32Data & (SGX_MMU_PAGE_SIZE - 1));
++ PDumpWriteString2(pszScript, ui32Flags);
++}
++
++enum PVRSRV_ERROR PDumpMemPolKM(struct PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++ u32 ui32Offset, u32 ui32Value, u32 ui32Mask,
++ enum PDUMP_POLL_OPERATOR eOperator,
++ IMG_BOOL bLastFrame, IMG_BOOL bOverwrite,
++ void *hUniqueTag)
++{
++#define MEMPOLL_DELAY (1000)
++#define MEMPOLL_COUNT (2000000000 / MEMPOLL_DELAY)
++
++ u32 ui32PageOffset;
++ struct IMG_DEV_PHYADDR sDevPAddr;
++ struct IMG_DEV_VIRTADDR sDevVPageAddr;
++ struct IMG_CPU_PHYADDR CpuPAddr;
++ u32 ui32Flags;
++ __PDBG_PDUMP_STATE_GET_SCRIPT_AND_FILE_STRING(PVRSRV_ERROR_GENERIC);
++
++ PVR_ASSERT((ui32Offset + sizeof(u32)) <=
++ psMemInfo->ui32AllocSize);
++
++ if (gsDBGPdumpState.ui32ParamFileNum == 0)
++ snprintf(pszFile, SZ_FILENAME_SIZE_MAX, "%%0%%.prm");
++ else
++ snprintf(pszFile, SZ_FILENAME_SIZE_MAX, "%%0%%%u.prm",
++ gsDBGPdumpState.ui32ParamFileNum);
++
++ ui32Flags = 0;
++
++ if (bLastFrame)
++ ui32Flags |= PDUMP_FLAGS_LASTFRAME;
++
++ if (bOverwrite)
++ ui32Flags |= PDUMP_FLAGS_RESETLFBUFFER;
++
++ CpuPAddr =
++ OSMemHandleToCpuPAddr(psMemInfo->sMemBlk.hOSMemHandle, ui32Offset);
++ ui32PageOffset = CpuPAddr.uiAddr & (PAGE_SIZE - 1);
++
++ sDevVPageAddr.uiAddr =
++ psMemInfo->sDevVAddr.uiAddr + ui32Offset - ui32PageOffset;
++
++ BM_GetPhysPageAddr(psMemInfo, sDevVPageAddr, &sDevPAddr);
++
++ sDevPAddr.uiAddr += ui32PageOffset;
++
++ snprintf(pszScript,
++ SZ_SCRIPT_SIZE_MAX, "POL :SGXMEM:"
++ "PA_%8.8X%8.8lX:0x%8.8lX 0x%8.8X 0x%8.8X %d %d %d\r\n",
++ (u32)hUniqueTag, sDevPAddr.uiAddr & ~(SGX_MMU_PAGE_SIZE - 1),
++ sDevPAddr.uiAddr & (SGX_MMU_PAGE_SIZE - 1),
++ ui32Value, ui32Mask, eOperator, MEMPOLL_COUNT, MEMPOLL_DELAY);
++ PDumpWriteString2(pszScript, ui32Flags);
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR PDumpMemKM(void *pvAltLinAddr,
++ struct PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++ u32 ui32Offset, u32 ui32Bytes, u32 ui32Flags,
++ void *hUniqueTag)
++{
++ u32 ui32PageByteOffset;
++ u8 *pui8DataLinAddr = NULL;
++ struct IMG_DEV_VIRTADDR sDevVPageAddr;
++ struct IMG_DEV_VIRTADDR sDevVAddr;
++ struct IMG_DEV_PHYADDR sDevPAddr;
++ struct IMG_CPU_PHYADDR CpuPAddr;
++ u32 ui32ParamOutPos;
++ u32 ui32CurrentOffset;
++ u32 ui32BytesRemaining;
++
++ __PDBG_PDUMP_STATE_GET_SCRIPT_AND_FILE_STRING(PVRSRV_ERROR_GENERIC);
++
++ PVR_ASSERT((ui32Offset + ui32Bytes) <= psMemInfo->ui32AllocSize);
++
++ if (ui32Bytes == 0)
++ return PVRSRV_OK;
++
++ if (pvAltLinAddr) {
++ pui8DataLinAddr = pvAltLinAddr;
++ } else {
++ if (psMemInfo->pvLinAddrKM)
++ pui8DataLinAddr =
++ (u8 *) psMemInfo->pvLinAddrKM + ui32Offset;
++
++ }
++
++ PVR_ASSERT(pui8DataLinAddr);
++
++ ui32ParamOutPos =
++ gpfnDbgDrv->pfnGetStreamOffset(gsDBGPdumpState.
++ psStream[PDUMP_STREAM_PARAM2]);
++
++ if (!PDumpWriteILock(gsDBGPdumpState.psStream[PDUMP_STREAM_PARAM2],
++ pui8DataLinAddr, ui32Bytes, ui32Flags))
++ return PVRSRV_ERROR_GENERIC;
++
++ if (gsDBGPdumpState.ui32ParamFileNum == 0) {
++ snprintf(pszFile, SZ_FILENAME_SIZE_MAX, "%%0%%.prm");
++ } else {
++ snprintf(pszFile, SZ_FILENAME_SIZE_MAX, "%%0%%%u.prm",
++ gsDBGPdumpState.ui32ParamFileNum);
++ }
++
++ snprintf(pszScript,
++ SZ_SCRIPT_SIZE_MAX,
++ "-- LDB :SGXMEM:VA_%8.8X:0x%8.8X 0x%8.8X 0x%8.8X %s\r\n",
++ psMemInfo->sDevVAddr.uiAddr,
++ ui32Offset, ui32Bytes, ui32ParamOutPos, pszFile);
++ PDumpWriteString2(pszScript, ui32Flags);
++
++ CpuPAddr =
++ OSMemHandleToCpuPAddr(psMemInfo->sMemBlk.hOSMemHandle, ui32Offset);
++ ui32PageByteOffset = CpuPAddr.uiAddr & (PAGE_SIZE - 1);
++
++ sDevVAddr = psMemInfo->sDevVAddr;
++ sDevVAddr.uiAddr += ui32Offset;
++
++ ui32BytesRemaining = ui32Bytes;
++ ui32CurrentOffset = ui32Offset;
++
++ while (ui32BytesRemaining > 0) {
++ u32 ui32BlockBytes = MIN(ui32BytesRemaining, PAGE_SIZE);
++ CpuPAddr =
++ OSMemHandleToCpuPAddr(psMemInfo->sMemBlk.hOSMemHandle,
++ ui32CurrentOffset);
++
++ sDevVPageAddr.uiAddr =
++ psMemInfo->sDevVAddr.uiAddr + ui32CurrentOffset -
++ ui32PageByteOffset;
++
++ BM_GetPhysPageAddr(psMemInfo, sDevVPageAddr, &sDevPAddr);
++
++ sDevPAddr.uiAddr += ui32PageByteOffset;
++
++ if (ui32PageByteOffset) {
++ ui32BlockBytes =
++ MIN(ui32BytesRemaining,
++ PAGE_ALIGN(CpuPAddr.uiAddr) - CpuPAddr.uiAddr);
++
++ ui32PageByteOffset = 0;
++ }
++
++ snprintf(pszScript,
++ SZ_SCRIPT_SIZE_MAX, "LDB :SGXMEM:"
++ "PA_%8.8X%8.8lX:0x%8.8lX 0x%8.8X 0x%8.8X %s\r\n",
++ (u32) hUniqueTag,
++ sDevPAddr.uiAddr & ~(SGX_MMU_PAGE_SIZE - 1),
++ sDevPAddr.uiAddr & (SGX_MMU_PAGE_SIZE - 1),
++ ui32BlockBytes, ui32ParamOutPos, pszFile);
++ PDumpWriteString2(pszScript, ui32Flags);
++
++ ui32BytesRemaining -= ui32BlockBytes;
++ ui32CurrentOffset += ui32BlockBytes;
++ ui32ParamOutPos += ui32BlockBytes;
++ }
++ PVR_ASSERT(ui32BytesRemaining == 0);
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR PDumpMem2KM(enum PVRSRV_DEVICE_TYPE eDeviceType,
++ void *pvLinAddr, u32 ui32Bytes, u32 ui32Flags,
++ IMG_BOOL bInitialisePages, void *hUniqueTag1,
++ void *hUniqueTag2)
++{
++ u32 ui32NumPages;
++ u32 ui32PageOffset;
++ u32 ui32BlockBytes;
++ u8 *pui8LinAddr;
++ struct IMG_DEV_PHYADDR sDevPAddr;
++ struct IMG_CPU_PHYADDR sCpuPAddr;
++ u32 ui32Offset;
++ u32 ui32ParamOutPos;
++
++ __PDBG_PDUMP_STATE_GET_SCRIPT_AND_FILE_STRING(PVRSRV_ERROR_GENERIC);
++
++ if (ui32Flags)
++ ;
++
++ if (!pvLinAddr)
++ return PVRSRV_ERROR_GENERIC;
++
++ ui32ParamOutPos =
++ gpfnDbgDrv->pfnGetStreamOffset(gsDBGPdumpState.
++ psStream[PDUMP_STREAM_PARAM2]);
++
++ if (bInitialisePages) {
++
++ if (!PDumpWriteILock
++ (gsDBGPdumpState.psStream[PDUMP_STREAM_PARAM2], pvLinAddr,
++ ui32Bytes, PDUMP_FLAGS_CONTINUOUS))
++ return PVRSRV_ERROR_GENERIC;
++
++ if (gsDBGPdumpState.ui32ParamFileNum == 0)
++ snprintf(pszFile, SZ_FILENAME_SIZE_MAX, "%%0%%.prm");
++ else
++ snprintf(pszFile, SZ_FILENAME_SIZE_MAX, "%%0%%%u.prm",
++ gsDBGPdumpState.ui32ParamFileNum);
++ }
++
++ ui32PageOffset = (u32) pvLinAddr & (HOST_PAGESIZE() - 1);
++ ui32NumPages =
++ (ui32PageOffset + ui32Bytes + HOST_PAGESIZE() - 1) /
++ HOST_PAGESIZE();
++ pui8LinAddr = (u8 *) pvLinAddr;
++
++ while (ui32NumPages--) {
++ sCpuPAddr = OSMapLinToCPUPhys(pui8LinAddr);
++ sDevPAddr = SysCpuPAddrToDevPAddr(eDeviceType, sCpuPAddr);
++
++ if (ui32PageOffset + ui32Bytes > HOST_PAGESIZE())
++ ui32BlockBytes = HOST_PAGESIZE() - ui32PageOffset;
++ else
++ ui32BlockBytes = ui32Bytes;
++
++ if (bInitialisePages) {
++ snprintf(pszScript,
++ SZ_SCRIPT_SIZE_MAX, "LDB :SGXMEM:"
++ "PA_%8.8X%8.8lX:0x%8.8lX 0x%8.8X "
++ "0x%8.8X %s\r\n",
++ (u32) hUniqueTag1,
++ sDevPAddr.uiAddr & ~(SGX_MMU_PAGE_SIZE - 1),
++ sDevPAddr.uiAddr & (SGX_MMU_PAGE_SIZE - 1),
++ ui32BlockBytes, ui32ParamOutPos, pszFile);
++ PDumpWriteString2(pszScript, PDUMP_FLAGS_CONTINUOUS);
++ } else {
++ for (ui32Offset = 0; ui32Offset < ui32BlockBytes;
++ ui32Offset += sizeof(u32)) {
++ u32 ui32PTE =
++ *((u32 *) (pui8LinAddr +
++ ui32Offset));
++
++ if ((ui32PTE & SGX_MMU_PDE_ADDR_MASK) != 0) {
++ snprintf(pszScript,
++ SZ_SCRIPT_SIZE_MAX,
++"WRW :SGXMEM:PA_%8.8X%8.8lX:0x%8.8lX :SGXMEM:PA_%8.8X%8.8lX:0x%8.8lX\r\n",
++ (u32)hUniqueTag1,
++ (sDevPAddr.uiAddr +
++ ui32Offset) &
++ ~(SGX_MMU_PAGE_SIZE - 1),
++ (sDevPAddr.uiAddr +
++ ui32Offset) &
++ (SGX_MMU_PAGE_SIZE - 1),
++ (u32)hUniqueTag2,
++ ui32PTE &
++ SGX_MMU_PDE_ADDR_MASK,
++ ui32PTE &
++ ~SGX_MMU_PDE_ADDR_MASK);
++ } else {
++ PVR_ASSERT(!
++ (ui32PTE &
++ SGX_MMU_PTE_VALID));
++ snprintf(pszScript, SZ_SCRIPT_SIZE_MAX,
++ "WRW :SGXMEM:PA_%8.8X%8.8lX:0x%8.8lX 0x%8.8X%8.8X\r\n",
++ (u32) hUniqueTag1,
++ (sDevPAddr.uiAddr +
++ ui32Offset) &
++ ~(SGX_MMU_PAGE_SIZE - 1),
++ (sDevPAddr.uiAddr +
++ ui32Offset) &
++ (SGX_MMU_PAGE_SIZE - 1),
++ ui32PTE, (u32)hUniqueTag2);
++ }
++ PDumpWriteString2(pszScript,
++ PDUMP_FLAGS_CONTINUOUS);
++ }
++ }
++
++ ui32PageOffset = 0;
++ ui32Bytes -= ui32BlockBytes;
++ pui8LinAddr += ui32BlockBytes;
++ ui32ParamOutPos += ui32BlockBytes;
++ }
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR PDumpPDDevPAddrKM(struct PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++ u32 ui32Offset,
++ struct IMG_DEV_PHYADDR sPDDevPAddr,
++ void *hUniqueTag1, void *hUniqueTag2)
++{
++ u32 ui32ParamOutPos;
++ struct IMG_CPU_PHYADDR CpuPAddr;
++ u32 ui32PageByteOffset;
++ struct IMG_DEV_VIRTADDR sDevVAddr;
++ struct IMG_DEV_VIRTADDR sDevVPageAddr;
++ struct IMG_DEV_PHYADDR sDevPAddr;
++
++ __PDBG_PDUMP_STATE_GET_SCRIPT_AND_FILE_STRING(PVRSRV_ERROR_GENERIC);
++
++ ui32ParamOutPos =
++ gpfnDbgDrv->pfnGetStreamOffset(gsDBGPdumpState.
++ psStream[PDUMP_STREAM_PARAM2]);
++
++ if (!PDumpWriteILock(gsDBGPdumpState.psStream[PDUMP_STREAM_PARAM2],
++ (u8 *)&sPDDevPAddr, sizeof(struct IMG_DEV_PHYADDR),
++ PDUMP_FLAGS_CONTINUOUS))
++ return PVRSRV_ERROR_GENERIC;
++
++ if (gsDBGPdumpState.ui32ParamFileNum == 0)
++ snprintf(pszFile, SZ_FILENAME_SIZE_MAX, "%%0%%.prm");
++ else
++ snprintf(pszFile, SZ_FILENAME_SIZE_MAX, "%%0%%%u.prm",
++ gsDBGPdumpState.ui32ParamFileNum);
++
++ CpuPAddr =
++ OSMemHandleToCpuPAddr(psMemInfo->sMemBlk.hOSMemHandle, ui32Offset);
++ ui32PageByteOffset = CpuPAddr.uiAddr & (PAGE_SIZE - 1);
++
++ sDevVAddr = psMemInfo->sDevVAddr;
++ sDevVAddr.uiAddr += ui32Offset;
++
++ sDevVPageAddr.uiAddr = sDevVAddr.uiAddr - ui32PageByteOffset;
++ BM_GetPhysPageAddr(psMemInfo, sDevVPageAddr, &sDevPAddr);
++ sDevPAddr.uiAddr += ui32PageByteOffset;
++
++ if ((sPDDevPAddr.uiAddr & SGX_MMU_PDE_ADDR_MASK) != 0) {
++ snprintf(pszScript,
++ SZ_SCRIPT_SIZE_MAX,
++"WRW :SGXMEM:PA_%8.8X%8.8lX:0x%8.8lX :SGXMEM:PA_%8.8X%8.8lX:0x%8.8lX\r\n",
++ (u32) hUniqueTag1,
++ sDevPAddr.uiAddr & ~(SGX_MMU_PAGE_SIZE - 1),
++ sDevPAddr.uiAddr & (SGX_MMU_PAGE_SIZE - 1),
++ (u32)hUniqueTag2,
++ sPDDevPAddr.uiAddr & SGX_MMU_PDE_ADDR_MASK,
++ sPDDevPAddr.uiAddr & ~SGX_MMU_PDE_ADDR_MASK);
++ } else {
++ PVR_ASSERT(!(sDevPAddr.uiAddr & SGX_MMU_PTE_VALID));
++ snprintf(pszScript,
++ SZ_SCRIPT_SIZE_MAX,
++ "WRW :SGXMEM:PA_%8.8X%8.8lX:0x%8.8lX 0x%8.8X\r\n",
++ (u32)hUniqueTag1,
++ sDevPAddr.uiAddr & ~(SGX_MMU_PAGE_SIZE - 1),
++ sDevPAddr.uiAddr & (SGX_MMU_PAGE_SIZE - 1),
++ sPDDevPAddr.uiAddr);
++ }
++ PDumpWriteString2(pszScript, PDUMP_FLAGS_CONTINUOUS);
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR PDumpSetFrameKM(u32 ui32Frame)
++{
++ u32 ui32Stream;
++
++ for (ui32Stream = 0; ui32Stream < PDUMP_NUM_STREAMS; ui32Stream++)
++ if (gsDBGPdumpState.psStream[ui32Stream])
++ DbgSetFrame(gsDBGPdumpState.psStream[ui32Stream],
++ ui32Frame);
++
++ return PVRSRV_OK;
++}
++
++static enum PVRSRV_ERROR PDumpGetFrameKM(u32 *pui32Frame)
++{
++ *pui32Frame =
++ DbgGetFrame(gsDBGPdumpState.psStream[PDUMP_STREAM_SCRIPT2]);
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR PDumpCommentKM(char *pszComment, u32 ui32Flags)
++{
++ u32 ui32Count = 0;
++ enum PVRSRV_ERROR eError;
++ __PDBG_PDUMP_STATE_GET_MSG_STRING(PVRSRV_ERROR_GENERIC);
++
++ if (ui32Flags & PDUMP_FLAGS_CONTINUOUS)
++ eError = PVRSRV_ERROR_GENERIC;
++ else
++ eError = PVRSRV_ERROR_CMD_NOT_PROCESSED;
++
++ if (!PDumpWriteString2("-- ", ui32Flags))
++ return eError;
++
++ snprintf(pszMsg, SZ_MSG_SIZE_MAX, "%s", pszComment);
++
++ while ((pszMsg[ui32Count] != 0) && (ui32Count < SZ_MSG_SIZE_MAX))
++ ui32Count++;
++
++ if ((pszMsg[ui32Count - 1] != '\n') && (ui32Count < SZ_MSG_SIZE_MAX)) {
++ pszMsg[ui32Count] = '\n';
++ ui32Count++;
++ pszMsg[ui32Count] = '\0';
++ }
++ if ((pszMsg[ui32Count - 2] != '\r') && (ui32Count < SZ_MSG_SIZE_MAX)) {
++ pszMsg[ui32Count - 1] = '\r';
++ pszMsg[ui32Count] = '\n';
++ ui32Count++;
++ pszMsg[ui32Count] = '\0';
++ }
++
++ PDumpWriteString2(pszMsg, ui32Flags);
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR PDumpDriverInfoKM(char *pszString, u32 ui32Flags)
++{
++ u32 ui32Count = 0;
++ __PDBG_PDUMP_STATE_GET_MSG_STRING(PVRSRV_ERROR_GENERIC);
++
++ snprintf(pszMsg, SZ_MSG_SIZE_MAX, "%s", pszString);
++
++ while ((pszMsg[ui32Count] != 0) && (ui32Count < SZ_MSG_SIZE_MAX))
++ ui32Count++;
++
++ if ((pszMsg[ui32Count - 1] != '\n') && (ui32Count < SZ_MSG_SIZE_MAX)) {
++ pszMsg[ui32Count] = '\n';
++ ui32Count++;
++ pszMsg[ui32Count] = '\0';
++ }
++ if ((pszMsg[ui32Count - 2] != '\r') && (ui32Count < SZ_MSG_SIZE_MAX)) {
++ pszMsg[ui32Count - 1] = '\r';
++ pszMsg[ui32Count] = '\n';
++ ui32Count++;
++ pszMsg[ui32Count] = '\0';
++ }
++
++ if (!PDumpWriteILock(gsDBGPdumpState.psStream[PDUMP_STREAM_DRIVERINFO],
++ (u8 *) pszMsg, ui32Count, ui32Flags)) {
++ if (ui32Flags & PDUMP_FLAGS_CONTINUOUS)
++ return PVRSRV_ERROR_GENERIC;
++ else
++ return PVRSRV_ERROR_CMD_NOT_PROCESSED;
++ }
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR PDumpBitmapKM(char *pszFileName, u32 ui32FileOffset,
++ u32 ui32Width, u32 ui32Height, u32 ui32StrideInBytes,
++ struct IMG_DEV_VIRTADDR sDevBaseAddr,
++ u32 ui32Size, enum PDUMP_PIXEL_FORMAT ePixelFormat,
++ enum PDUMP_MEM_FORMAT eMemFormat, u32 ui32PDumpFlags)
++{
++ __PDBG_PDUMP_STATE_GET_SCRIPT_STRING(PVRSRV_ERROR_GENERIC);
++ PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags,
++ "\r\n-- Dump bitmap of render\r\n");
++
++ snprintf(pszScript,
++ SZ_SCRIPT_SIZE_MAX,
++ "SII %s %s.bin :SGXMEM:v:0x%08X 0x%08X "
++ "0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\r\n",
++ pszFileName, pszFileName, sDevBaseAddr.uiAddr, ui32Size,
++ ui32FileOffset, ePixelFormat, ui32Width, ui32Height,
++ ui32StrideInBytes, eMemFormat);
++
++ PDumpWriteString2(pszScript, ui32PDumpFlags);
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR PDumpReadRegKM(char *pszFileName, u32 ui32FileOffset,
++ u32 ui32Address, u32 ui32Size, u32 ui32PDumpFlags)
++{
++ __PDBG_PDUMP_STATE_GET_SCRIPT_STRING(PVRSRV_ERROR_GENERIC);
++
++ snprintf(pszScript,
++ SZ_SCRIPT_SIZE_MAX,
++ "SAB :SGXREG:0x%08X 0x%08X %s\r\n",
++ ui32Address, ui32FileOffset, pszFileName);
++
++ PDumpWriteString2(pszScript, ui32PDumpFlags);
++
++ return PVRSRV_OK;
++}
++
++static IMG_BOOL PDumpWriteString2(char *pszString, u32 ui32Flags)
++{
++ return PDumpWriteILock(gsDBGPdumpState.psStream[PDUMP_STREAM_SCRIPT2],
++ (u8 *)pszString, strlen(pszString), ui32Flags);
++}
++
++static IMG_BOOL PDumpWriteILock(struct DBG_STREAM *psStream, u8 *pui8Data,
++ u32 ui32Count, u32 ui32Flags)
++{
++ u32 ui32Written = 0;
++ u32 ui32Off = 0;
++
++ if (!psStream || PDumpSuspended() || (ui32Flags & PDUMP_FLAGS_NEVER))
++ return IMG_TRUE;
++
++ if (psStream == gsDBGPdumpState.psStream[PDUMP_STREAM_PARAM2]) {
++ u32 ui32ParamOutPos =
++ gpfnDbgDrv->pfnGetStreamOffset(gsDBGPdumpState.
++ psStream
++ [PDUMP_STREAM_PARAM2]);
++
++ if (ui32ParamOutPos + ui32Count > MAX_FILE_SIZE)
++ if ((gsDBGPdumpState.psStream[PDUMP_STREAM_SCRIPT2]
++ &&
++ PDumpWriteString2
++ ("\r\n-- Splitting pdump output file\r\n\r\n",
++ ui32Flags))) {
++ DbgSetMarker(gsDBGPdumpState.
++ psStream[PDUMP_STREAM_PARAM2],
++ ui32ParamOutPos);
++ gsDBGPdumpState.ui32ParamFileNum++;
++ }
++ }
++
++ while (((u32) ui32Count > 0) && (ui32Written != 0xFFFFFFFF)) {
++ ui32Written =
++ DbgWrite(psStream, &pui8Data[ui32Off], ui32Count,
++ ui32Flags);
++
++ if (ui32Written == 0)
++ OSReleaseThreadQuanta();
++
++ if (ui32Written != 0xFFFFFFFF) {
++ ui32Off += ui32Written;
++ ui32Count -= ui32Written;
++ }
++ }
++
++ if (ui32Written == 0xFFFFFFFF)
++ return IMG_FALSE;
++
++ return IMG_TRUE;
++}
++
++static void DbgSetFrame(struct DBG_STREAM *psStream, u32 ui32Frame)
++{
++ gpfnDbgDrv->pfnSetFrame(psStream, ui32Frame);
++}
++
++static u32 DbgGetFrame(struct DBG_STREAM *psStream)
++{
++ return gpfnDbgDrv->pfnGetFrame(psStream);
++}
++
++static void DbgSetMarker(struct DBG_STREAM *psStream, u32 ui32Marker)
++{
++ gpfnDbgDrv->pfnSetMarker(psStream, ui32Marker);
++}
++
++static u32 DbgWrite(struct DBG_STREAM *psStream, u8 *pui8Data,
++ u32 ui32BCount, u32 ui32Flags)
++{
++ u32 ui32BytesWritten;
++
++ if (ui32Flags & PDUMP_FLAGS_CONTINUOUS) {
++ if ((psStream->ui32CapMode & DEBUG_CAPMODE_FRAMED) &&
++ (psStream->ui32Start == 0xFFFFFFFF) &&
++ (psStream->ui32End == 0xFFFFFFFF) &&
++ psStream->bInitPhaseComplete)
++ ui32BytesWritten = ui32BCount;
++ else
++ ui32BytesWritten =
++ gpfnDbgDrv->pfnDBGDrivWrite2(psStream, pui8Data,
++ ui32BCount, 1);
++ } else if (ui32Flags & PDUMP_FLAGS_LASTFRAME) {
++ u32 ui32DbgFlags;
++
++ ui32DbgFlags = 0;
++ if (ui32Flags & PDUMP_FLAGS_RESETLFBUFFER)
++ ui32DbgFlags |= WRITELF_FLAGS_RESETBUF;
++
++ ui32BytesWritten =
++ gpfnDbgDrv->pfnWriteLF(psStream, pui8Data,
++ ui32BCount, 1, ui32DbgFlags);
++ } else {
++ ui32BytesWritten =
++ gpfnDbgDrv->pfnWriteBINCM(psStream, pui8Data,
++ ui32BCount, 1);
++ }
++
++ return ui32BytesWritten;
++}
++
++IMG_BOOL PDumpTestNextFrame(u32 ui32CurrentFrame)
++{
++ IMG_BOOL bFrameDumped;
++
++ bFrameDumped = IMG_FALSE;
++ PDumpSetFrameKM(ui32CurrentFrame + 1);
++ bFrameDumped = PDumpIsCaptureFrameKM();
++ PDumpSetFrameKM(ui32CurrentFrame);
++
++ return bFrameDumped;
++}
++
++void PDump3DSignatureRegisters(u32 ui32DumpFrameNum, IMG_BOOL bLastFrame,
++ u32 *pui32Registers, u32 ui32NumRegisters)
++{
++ u32 ui32FileOffset, ui32Flags;
++ u32 i;
++
++ __PDBG_PDUMP_STATE_GET_SCRIPT_AND_FILE_STRING();
++
++ ui32Flags = bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0;
++ ui32FileOffset = 0;
++
++ PDUMPCOMMENTWITHFLAGS(ui32Flags,
++ "\r\n-- Dump 3D signature registers\r\n");
++ snprintf(pszFile, SZ_FILENAME_SIZE_MAX, "out%u_3d.sig",
++ ui32DumpFrameNum);
++
++ for (i = 0; i < ui32NumRegisters; i++) {
++ PDumpReadRegKM(pszFile, ui32FileOffset, pui32Registers[i],
++ sizeof(u32), ui32Flags);
++ ui32FileOffset += sizeof(u32);
++ }
++}
++
++static void PDumpCountRead(char *pszFileName, u32 ui32Address, u32 ui32Size,
++ u32 *pui32FileOffset, IMG_BOOL bLastFrame)
++{
++ __PDBG_PDUMP_STATE_GET_SCRIPT_STRING();
++
++ snprintf(pszScript, SZ_SCRIPT_SIZE_MAX,
++ "SAB :SGXREG:0x%08X 0x%08X %s\r\n", ui32Address,
++ *pui32FileOffset, pszFileName);
++ PDumpWriteString2(pszScript, bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0);
++
++ *pui32FileOffset += ui32Size;
++}
++
++void PDumpCounterRegisters(u32 ui32DumpFrameNum, IMG_BOOL bLastFrame,
++ u32 *pui32Registers, u32 ui32NumRegisters)
++{
++ u32 ui32FileOffset;
++ u32 i;
++
++ __PDBG_PDUMP_STATE_GET_SCRIPT_AND_FILE_STRING();
++
++ PDUMPCOMMENTWITHFLAGS(bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0,
++ "\r\n-- Dump counter registers\r\n");
++ snprintf(pszFile, SZ_FILENAME_SIZE_MAX, "out%u.perf",
++ ui32DumpFrameNum);
++ ui32FileOffset = 0;
++
++ for (i = 0; i < ui32NumRegisters; i++)
++ PDumpCountRead(pszFile, pui32Registers[i], sizeof(u32),
++ &ui32FileOffset, bLastFrame);
++}
++
++void PDumpTASignatureRegisters(u32 ui32DumpFrameNum, u32 ui32TAKickCount,
++ IMG_BOOL bLastFrame, u32 *pui32Registers,
++ u32 ui32NumRegisters)
++{
++ u32 ui32FileOffset, ui32Flags;
++ u32 i;
++
++ __PDBG_PDUMP_STATE_GET_SCRIPT_AND_FILE_STRING();
++
++ ui32Flags = bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0;
++ PDUMPCOMMENTWITHFLAGS(ui32Flags,
++ "\r\n-- Dump TA signature registers\r\n");
++ snprintf(pszFile, SZ_FILENAME_SIZE_MAX, "out%u_ta.sig",
++ ui32DumpFrameNum);
++
++ ui32FileOffset = ui32TAKickCount * ui32NumRegisters * sizeof(u32);
++
++ for (i = 0; i < ui32NumRegisters; i++) {
++ PDumpReadRegKM(pszFile, ui32FileOffset, pui32Registers[i],
++ sizeof(u32), ui32Flags);
++ ui32FileOffset += sizeof(u32);
++ }
++}
++
++void PDumpRegRead(const u32 ui32RegOffset, u32 ui32Flags)
++{
++ __PDBG_PDUMP_STATE_GET_SCRIPT_STRING();
++
++ snprintf(pszScript, SZ_SCRIPT_SIZE_MAX, "RDW :SGXREG:0x%X\r\n",
++ ui32RegOffset);
++ PDumpWriteString2(pszScript, ui32Flags);
++}
++
++void PDumpCycleCountRegRead(const u32 ui32RegOffset, IMG_BOOL bLastFrame)
++{
++ __PDBG_PDUMP_STATE_GET_SCRIPT_STRING();
++
++ snprintf(pszScript, SZ_SCRIPT_SIZE_MAX, "RDW :SGXREG:0x%X\r\n",
++ ui32RegOffset);
++ PDumpWriteString2(pszScript, bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0);
++}
++
++void PDumpHWPerfCBKM(char *pszFileName, u32 ui32FileOffset,
++ struct IMG_DEV_VIRTADDR sDevBaseAddr, u32 ui32Size,
++ u32 ui32PDumpFlags)
++{
++ __PDBG_PDUMP_STATE_GET_SCRIPT_STRING();
++ PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags,
++ "\r\n-- Dump Hardware Performance Circular Buffer\r\n");
++
++ snprintf(pszScript,
++ SZ_SCRIPT_SIZE_MAX,
++ "SAB :SGXMEM:v:0x%08X 0x%08X 0x%08X %s.bin\r\n",
++ sDevBaseAddr.uiAddr, ui32Size, ui32FileOffset, pszFileName);
++
++ PDumpWriteString2(pszScript, ui32PDumpFlags);
++}
++
++void PDumpCBP(struct PVRSRV_KERNEL_MEM_INFO *psROffMemInfo,
++ u32 ui32ROffOffset, u32 ui32WPosVal, u32 ui32PacketSize,
++ u32 ui32BufferSize, u32 ui32Flags, void *hUniqueTag)
++{
++ u32 ui32PageOffset;
++ struct IMG_DEV_VIRTADDR sDevVAddr;
++ struct IMG_DEV_PHYADDR sDevPAddr;
++ struct IMG_DEV_VIRTADDR sDevVPageAddr;
++ struct IMG_CPU_PHYADDR CpuPAddr;
++
++ __PDBG_PDUMP_STATE_GET_SCRIPT_STRING();
++
++ PVR_ASSERT((ui32ROffOffset + sizeof(u32)) <=
++ psROffMemInfo->ui32AllocSize);
++
++ sDevVAddr = psROffMemInfo->sDevVAddr;
++
++ sDevVAddr.uiAddr += ui32ROffOffset;
++
++ CpuPAddr =
++ OSMemHandleToCpuPAddr(psROffMemInfo->sMemBlk.hOSMemHandle,
++ ui32ROffOffset);
++ ui32PageOffset = CpuPAddr.uiAddr & (PAGE_SIZE - 1);
++
++ sDevVPageAddr.uiAddr = sDevVAddr.uiAddr - ui32PageOffset;
++
++ BM_GetPhysPageAddr(psROffMemInfo, sDevVPageAddr, &sDevPAddr);
++
++ sDevPAddr.uiAddr += ui32PageOffset;
++
++ snprintf(pszScript,
++ SZ_SCRIPT_SIZE_MAX,
++ "CBP :SGXMEM:PA_%8.8X%8.8lX:0x%8.8lX 0x%8.8X 0x%8.8X 0x%8.8X\r\n",
++ (u32) hUniqueTag,
++ sDevPAddr.uiAddr & ~(SGX_MMU_PAGE_SIZE - 1),
++ sDevPAddr.uiAddr & (SGX_MMU_PAGE_SIZE - 1),
++ ui32WPosVal, ui32PacketSize, ui32BufferSize);
++ PDumpWriteString2(pszScript, ui32Flags);
++}
++
++void PDumpIDLWithFlags(u32 ui32Clocks, u32 ui32Flags)
++{
++ __PDBG_PDUMP_STATE_GET_SCRIPT_STRING();
++
++ sprintf(pszScript, "IDL %u\r\n", ui32Clocks);
++ PDumpWriteString2(pszScript, ui32Flags);
++}
++
++void PDumpIDL(u32 ui32Clocks)
++{
++ PDumpIDLWithFlags(ui32Clocks, PDUMP_FLAGS_CONTINUOUS);
++}
++
++void PDumpSuspendKM(void)
++{
++ atomic_inc(&gsPDumpSuspended);
++}
++
++void PDumpResumeKM(void)
++{
++ atomic_dec(&gsPDumpSuspended);
++}
++
++#endif
+diff --git a/drivers/gpu/pvr/pdump_common.c b/drivers/gpu/pvr/pdump_common.c
+new file mode 100644
+index 0000000..4f0e6f2
+--- /dev/null
++++ b/drivers/gpu/pvr/pdump_common.c
+@@ -0,0 +1,237 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if defined(PDUMP)
++#include "services_headers.h"
++#include "pdump_km.h"
++
++#if !defined(PDUMP_TEMP_BUFFER_SIZE)
++#define PDUMP_TEMP_BUFFER_SIZE (64 * 1024L)
++#endif
++
++#define MIN(x, y) (((x) < (y)) ? (x) : (y))
++#define PTR_PLUS(t, p, x) ((t *)(((char *)(p)) + (x)))
++#define VPTR_PLUS(p, x) PTR_PLUS(void, p, x)
++#define VPTR_INC(p, x) (p = VPTR_PLUS(p, x))
++#define MAX_PDUMP_MMU_CONTEXTS 10
++static void *gpvTempBuffer;
++static void *ghTempBufferBlockAlloc;
++static u16 gui16MMUContextUsage;
++
++static void *GetTempBuffer(void)
++{
++ if (gpvTempBuffer == NULL) {
++ enum PVRSRV_ERROR eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ PDUMP_TEMP_BUFFER_SIZE,
++ &gpvTempBuffer,
++ &ghTempBufferBlockAlloc);
++ if (eError != PVRSRV_OK)
++ PVR_DPF(PVR_DBG_ERROR,
++ "GetTempBuffer: OSAllocMem failed: %d",
++ eError);
++ }
++
++ return gpvTempBuffer;
++}
++
++static void FreeTempBuffer(void)
++{
++ if (gpvTempBuffer != NULL) {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, PDUMP_TEMP_BUFFER_SIZE,
++ gpvTempBuffer, ghTempBufferBlockAlloc);
++ gpvTempBuffer = NULL;
++ }
++}
++
++void PDumpInitCommon(void)
++{
++ (void)GetTempBuffer();
++ PDumpInit();
++}
++
++void PDumpDeInitCommon(void)
++{
++ FreeTempBuffer();
++ PDumpDeInit();
++}
++
++enum PVRSRV_ERROR PDumpMemUM(struct PVRSRV_PER_PROCESS_DATA *psPerProc,
++ void *pvAltLinAddrUM, void *pvLinAddrUM,
++ struct PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++ u32 ui32Offset, u32 ui32Bytes, u32 ui32Flags,
++ void *hUniqueTag)
++{
++ void *pvAddrUM;
++ void *pvAddrKM;
++ u32 ui32BytesDumped;
++ u32 ui32CurrentOffset;
++
++ if (psMemInfo->pvLinAddrKM != NULL && pvAltLinAddrUM == NULL)
++ return PDumpMemKM(NULL, psMemInfo, ui32Offset, ui32Bytes,
++ ui32Flags, hUniqueTag);
++
++ pvAddrUM = (pvAltLinAddrUM != NULL) ? pvAltLinAddrUM :
++ ((pvLinAddrUM != NULL) ? VPTR_PLUS(pvLinAddrUM,
++ ui32Offset) : NULL);
++
++ pvAddrKM = GetTempBuffer();
++
++ PVR_ASSERT(pvAddrUM != NULL && pvAddrKM != NULL);
++ if (pvAddrUM == NULL || pvAddrKM == NULL) {
++ PVR_DPF(PVR_DBG_ERROR, "PDumpMemUM: Nothing to dump");
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if (ui32Bytes > PDUMP_TEMP_BUFFER_SIZE)
++ PDumpCommentWithFlags(ui32Flags,
++ "Dumping 0x%8.8lx bytes of memory, in blocks of 0x%8.8lx bytes",
++ ui32Bytes, (u32) PDUMP_TEMP_BUFFER_SIZE);
++
++ ui32CurrentOffset = ui32Offset;
++ for (ui32BytesDumped = 0; ui32BytesDumped < ui32Bytes;) {
++ enum PVRSRV_ERROR eError;
++ u32 ui32BytesToDump =
++ MIN(PDUMP_TEMP_BUFFER_SIZE, ui32Bytes - ui32BytesDumped);
++
++ eError = OSCopyFromUser(psPerProc,
++ pvAddrKM, pvAddrUM, ui32BytesToDump);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PDumpMemUM: OSCopyFromUser failed (%d), eError");
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ eError = PDumpMemKM(pvAddrKM, psMemInfo, ui32CurrentOffset,
++ ui32BytesToDump, ui32Flags, hUniqueTag);
++
++ if (eError != PVRSRV_OK) {
++ if (ui32BytesDumped != 0)
++ PVR_DPF(PVR_DBG_ERROR,
++ "PDumpMemUM: PDumpMemKM failed (%d)",
++ eError);
++ PVR_ASSERT(ui32BytesDumped == 0);
++ return eError;
++ }
++
++ VPTR_INC(pvAddrUM, ui32BytesToDump);
++ ui32CurrentOffset += ui32BytesToDump;
++ ui32BytesDumped += ui32BytesToDump;
++ }
++
++ return PVRSRV_OK;
++}
++
++static enum PVRSRV_ERROR _PdumpAllocMMUContext(u32 *pui32MMUContextID)
++{
++ u32 i;
++
++ for (i = 0; i < MAX_PDUMP_MMU_CONTEXTS; i++)
++ if ((gui16MMUContextUsage & (1UL << i)) == 0) {
++ gui16MMUContextUsage |= 1UL << i;
++ *pui32MMUContextID = i;
++ return PVRSRV_OK;
++ }
++
++ PVR_DPF(PVR_DBG_ERROR,
++ "_PdumpAllocMMUContext: no free MMU context ids");
++
++ return PVRSRV_ERROR_GENERIC;
++}
++
++static enum PVRSRV_ERROR _PdumpFreeMMUContext(u32 ui32MMUContextID)
++{
++ if (ui32MMUContextID < MAX_PDUMP_MMU_CONTEXTS) {
++
++ gui16MMUContextUsage &= ~(1UL << ui32MMUContextID);
++ return PVRSRV_OK;
++ }
++
++ PVR_DPF(PVR_DBG_ERROR,
++ "_PdumpFreeMMUContext: MMU context ids invalid");
++
++ return PVRSRV_ERROR_GENERIC;
++}
++
++enum PVRSRV_ERROR PDumpSetMMUContext(enum PVRSRV_DEVICE_TYPE eDeviceType,
++ char *pszMemSpace, u32 *pui32MMUContextID,
++ u32 ui32MMUType, void *hUniqueTag1,
++ void *pvPDCPUAddr)
++{
++ u8 *pui8LinAddr = (u8 *) pvPDCPUAddr;
++ struct IMG_CPU_PHYADDR sCpuPAddr;
++ struct IMG_DEV_PHYADDR sDevPAddr;
++ u32 ui32MMUContextID;
++ enum PVRSRV_ERROR eError;
++
++ eError = _PdumpAllocMMUContext(&ui32MMUContextID);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PDumpSetMMUContext: _PdumpAllocMMUContext failed: %d",
++ eError);
++ return eError;
++ }
++
++ sCpuPAddr = OSMapLinToCPUPhys(pui8LinAddr);
++ sDevPAddr = SysCpuPAddrToDevPAddr(eDeviceType, sCpuPAddr);
++
++ sDevPAddr.uiAddr &= ~PVRSRV_4K_PAGE_SIZE;
++
++ PDumpComment("Set MMU Context\r\n");
++
++ PDumpComment("MMU :%s:v%d %d :%s:PA_%8.8lX%8.8lX\r\n",
++ pszMemSpace, ui32MMUContextID, ui32MMUType, pszMemSpace,
++ hUniqueTag1, sDevPAddr.uiAddr);
++
++ *pui32MMUContextID = ui32MMUContextID;
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR PDumpClearMMUContext(enum PVRSRV_DEVICE_TYPE eDeviceType,
++ char *pszMemSpace,
++ u32 ui32MMUContextID, u32 ui32MMUType)
++{
++ enum PVRSRV_ERROR eError;
++
++ PVR_UNREFERENCED_PARAMETER(eDeviceType);
++
++ PDumpComment("Clear MMU Context\r\n");
++
++ PDumpComment("MMU :%s:v%d %d\r\n",
++ pszMemSpace, ui32MMUContextID, ui32MMUType);
++
++ eError = _PdumpFreeMMUContext(ui32MMUContextID);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PDumpClearMMUContext: _PdumpFreeMMUContext failed: %d",
++ eError);
++ return eError;
++ }
++
++ return PVRSRV_OK;
++}
++
++#endif
+diff --git a/drivers/gpu/pvr/pdump_km.h b/drivers/gpu/pvr/pdump_km.h
+new file mode 100644
+index 0000000..958c333
+--- /dev/null
++++ b/drivers/gpu/pvr/pdump_km.h
+@@ -0,0 +1,268 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _PDUMP_KM_H_
++#define _PDUMP_KM_H_
++
++
++#define PDUMP_FLAGS_NEVER 0x08000000
++#define PDUMP_FLAGS_TOOUT2MEM 0x10000000
++#define PDUMP_FLAGS_LASTFRAME 0x20000000
++#define PDUMP_FLAGS_RESETLFBUFFER 0x40000000
++#define PDUMP_FLAGS_CONTINUOUS 0x80000000
++
++#define PDUMP_PD_UNIQUETAG ((void *)0)
++#define PDUMP_PT_UNIQUETAG ((void *)0)
++
++#ifndef PDUMP
++#define MAKEUNIQUETAG(hMemInfo) (0)
++#endif
++
++#ifdef PDUMP
++
++#define MAKEUNIQUETAG(hMemInfo) \
++ (((struct BM_BUF *)(((struct PVRSRV_KERNEL_MEM_INFO *) \
++ hMemInfo)->sMemBlk.hBuffer))->pMapping)
++
++#define PDUMP_REG_FUNC_NAME PDumpReg
++
++enum PVRSRV_ERROR PDumpMemPolKM(struct PVRSRV_KERNEL_MEM_INFO
++ *psMemInfo, u32 ui32Offset,
++ u32 ui32Value, u32 ui32Mask,
++ enum PDUMP_POLL_OPERATOR eOperator,
++ IMG_BOOL bLastFrame,
++ IMG_BOOL bOverwrite,
++ void *hUniqueTag);
++
++enum PVRSRV_ERROR PDumpMemUM(struct PVRSRV_PER_PROCESS_DATA
++ *psProcData, void *pvAltLinAddr,
++ void *pvLinAddr,
++ struct PVRSRV_KERNEL_MEM_INFO
++ *psMemInfo, u32 ui32Offset,
++ u32 ui32Bytes, u32 ui32Flags,
++ void *hUniqueTag);
++
++enum PVRSRV_ERROR PDumpMemKM(void *pvAltLinAddr,
++ struct PVRSRV_KERNEL_MEM_INFO *psMemInfo, u32 ui32Offset,
++ u32 ui32Bytes, u32 ui32Flags, void *hUniqueTag);
++
++enum PVRSRV_ERROR PDumpMemPagesKM(enum PVRSRV_DEVICE_TYPE eDeviceType,
++ struct IMG_DEV_PHYADDR *pPages, u32 ui32NumPages,
++ struct IMG_DEV_VIRTADDR sDevAddr, u32 ui32Start,
++ u32 ui32Length, u32 ui32Flags, void *hUniqueTag);
++
++enum PVRSRV_ERROR PDumpMem2KM(enum PVRSRV_DEVICE_TYPE eDeviceType,
++ void *pvLinAddr,
++ u32 ui32Bytes,
++ u32 ui32Flags,
++ IMG_BOOL bInitialisePages,
++ void *hUniqueTag1, void *hUniqueTag2);
++void PDumpInitCommon(void);
++void PDumpDeInitCommon(void);
++void PDumpInit(void);
++void PDumpDeInit(void);
++enum PVRSRV_ERROR PDumpStartInitPhaseKM(void);
++enum PVRSRV_ERROR PDumpStopInitPhaseKM(void);
++enum PVRSRV_ERROR PDumpSetFrameKM(u32 ui32Frame);
++enum PVRSRV_ERROR PDumpCommentKM(char *pszComment, u32 ui32Flags);
++enum PVRSRV_ERROR PDumpDriverInfoKM(char *pszString, u32 ui32Flags);
++enum PVRSRV_ERROR PDumpRegWithFlagsKM(u32 ui32RegAddr, u32 ui32RegValue,
++ u32 ui32Flags);
++
++enum PVRSRV_ERROR PDumpBitmapKM(char *pszFileName, u32 ui32FileOffset,
++ u32 ui32Width, u32 ui32Height, u32 ui32StrideInBytes,
++ struct IMG_DEV_VIRTADDR sDevBaseAddr, u32 ui32Size,
++ enum PDUMP_PIXEL_FORMAT ePixelFormat,
++ enum PDUMP_MEM_FORMAT eMemFormat, u32 ui32PDumpFlags);
++void PDumpHWPerfCBKM(char *pszFileName, u32 ui32FileOffset,
++ struct IMG_DEV_VIRTADDR sDevBaseAddr,
++ u32 ui32Size, u32 ui32PDumpFlags);
++enum PVRSRV_ERROR PDumpReadRegKM(char *pszFileName, u32 ui32FileOffset,
++ u32 ui32Address, u32 ui32Size, u32 ui32PDumpFlags);
++void PDUMP_REG_FUNC_NAME(u32 dwReg, u32 dwData);
++
++void PDumpMsvdxRegRead(const char *const pRegRegion, const u32 dwRegOffset);
++
++void PDumpMsvdxRegWrite(const char *const pRegRegion, const u32 dwRegOffset,
++ const u32 dwData);
++
++enum PVRSRV_ERROR PDumpMsvdxRegPol(const char *const pRegRegion,
++ const u32 ui32Offset, const u32 ui32CheckFuncIdExt,
++ const u32 ui32RequValue, const u32 ui32Enable,
++ const u32 ui32PollCount, const u32 ui32TimeOut);
++
++enum PVRSRV_ERROR PDumpMsvdxWriteRef(const char *const pRegRegion,
++ const u32 ui32VLROffset, const u32 ui32Physical);
++
++void PDumpComment(char *pszFormat, ...);
++
++void PDumpCommentWithFlags(u32 ui32Flags, char *pszFormat, ...);
++enum PVRSRV_ERROR PDumpRegPolKM(u32 ui32RegAddr, u32 ui32RegValue,
++ u32 ui32Mask);
++enum PVRSRV_ERROR PDumpRegPolWithFlagsKM(u32 ui32RegAddr, u32 ui32RegValue,
++ u32 ui32Mask, u32 ui32Flags);
++
++IMG_BOOL PDumpIsLastCaptureFrameKM(void);
++IMG_BOOL PDumpIsCaptureFrameKM(void);
++
++void PDumpMallocPages(enum PVRSRV_DEVICE_TYPE eDeviceType,
++ u32 ui32DevVAddr, void *pvLinAddr, void *hOSMemHandle,
++ u32 ui32NumBytes, u32 ui32PageSize, void *hUniqueTag);
++void PDumpMallocPagesPhys(enum PVRSRV_DEVICE_TYPE eDeviceType,
++ u32 ui32DevVAddr, u32 *pui32PhysPages, u32 ui32NumPages,
++ void *hUniqueTag);
++void PDumpMallocPageTable(enum PVRSRV_DEVICE_TYPE eDeviceType,
++ void *pvLinAddr, u32 ui32NumBytes, void *hUniqueTag);
++enum PVRSRV_ERROR PDumpSetMMUContext(enum PVRSRV_DEVICE_TYPE eDeviceType,
++ char *pszMemSpace, u32 *pui32MMUContextID,
++ u32 ui32MMUType, void *hUniqueTag1,
++ void *pvPDCPUAddr);
++enum PVRSRV_ERROR PDumpClearMMUContext(enum PVRSRV_DEVICE_TYPE eDeviceType,
++ char *pszMemSpace,
++ u32 ui32MMUContextID, u32 ui32MMUType);
++void PDumpFreePages(struct BM_HEAP *psBMHeap,
++ struct IMG_DEV_VIRTADDR sDevVAddr, u32 ui32NumBytes,
++ u32 ui32PageSize, void *hUniqueTag, IMG_BOOL bInterleaved);
++void PDumpFreePageTable(enum PVRSRV_DEVICE_TYPE eDeviceType,
++ void *pvLinAddr, u32 ui32NumBytes, void *hUniqueTag);
++void PDumpPDReg(u32 ui32Reg, u32 ui32dwData, void *hUniqueTag);
++void PDumpPDRegWithFlags(u32 ui32Reg, u32 ui32Data, u32 ui32Flags,
++ void *hUniqueTag);
++
++enum PVRSRV_ERROR PDumpPDDevPAddrKM(struct PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++ u32 ui32Offset, struct IMG_DEV_PHYADDR sPDDevPAddr,
++ void *hUniqueTag1, void *hUniqueTag2);
++
++IMG_BOOL PDumpTestNextFrame(u32 ui32CurrentFrame);
++
++void PDumpTASignatureRegisters(u32 ui32DumpFrameNum,
++ u32 ui32TAKickCount, IMG_BOOL bLastFrame,
++ u32 *pui32Registers, u32 ui32NumRegisters);
++
++void PDump3DSignatureRegisters(u32 ui32DumpFrameNum, IMG_BOOL bLastFrame,
++ u32 *pui32Registers, u32 ui32NumRegisters);
++
++void PDumpRegRead(const u32 dwRegOffset, u32 ui32Flags);
++
++void PDumpCycleCountRegRead(const u32 dwRegOffset, IMG_BOOL bLastFrame);
++
++void PDumpCounterRegisters(u32 ui32DumpFrameNum, IMG_BOOL bLastFrame,
++ u32 *pui32Registers, u32 ui32NumRegisters);
++
++void PDumpCBP(struct PVRSRV_KERNEL_MEM_INFO *psROffMemInfo,
++ u32 ui32ROffOffset,
++ u32 ui32WPosVal,
++ u32 ui32PacketSize,
++ u32 ui32BufferSize, u32 ui32Flags, void *hUniqueTag);
++
++void PDumpIDLWithFlags(u32 ui32Clocks, u32 ui32Flags);
++void PDumpIDL(u32 ui32Clocks);
++
++void PDumpSuspendKM(void);
++void PDumpResumeKM(void);
++
++#define PDUMPMEMPOL PDumpMemPolKM
++#define PDUMPMEM PDumpMemKM
++#define PDUMPMEM2 PDumpMem2KM
++#define PDUMPMEMUM PDumpMemUM
++#define PDUMPINIT PDumpInitCommon
++#define PDUMPDEINIT PDumpDeInitCommon
++#define PDUMPISLASTFRAME PDumpIsLastCaptureFrameKM
++#define PDUMPTESTFRAME PDumpIsCaptureFrameKM
++#define PDUMPTESTNEXTFRAME PDumpTestNextFrame
++#define PDUMPREGWITHFLAGS PDumpRegWithFlagsKM
++#define PDUMPREG PDUMP_REG_FUNC_NAME
++#define PDUMPCOMMENT PDumpComment
++#define PDUMPCOMMENTWITHFLAGS PDumpCommentWithFlags
++#define PDUMPREGPOL PDumpRegPolKM
++#define PDUMPREGPOLWITHFLAGS PDumpRegPolWithFlagsKM
++#define PDUMPMALLOCPAGES PDumpMallocPages
++#define PDUMPMALLOCPAGETABLE PDumpMallocPageTable
++#define PDUMPSETMMUCONTEXT PDumpSetMMUContext
++#define PDUMPCLEARMMUCONTEXT PDumpClearMMUContext
++#define PDUMPFREEPAGES PDumpFreePages
++#define PDUMPFREEPAGETABLE PDumpFreePageTable
++#define PDUMPPDREG PDumpPDReg
++#define PDUMPPDREGWITHFLAGS PDumpPDRegWithFlags
++#define PDUMPCBP PDumpCBP
++#define PDUMPMALLOCPAGESPHYS PDumpMallocPagesPhys
++#define PDUMPENDINITPHASE PDumpStopInitPhaseKM
++#define PDUMPMSVDXREGWRITE PDumpMsvdxRegWrite
++#define PDUMPMSVDXREGREAD PDumpMsvdxRegRead
++#define PDUMPMSVDXPOL PDumpMsvdxRegPol
++#define PDUMPMSVDXWRITEREF PDumpMsvdxWriteRef
++#define PDUMPBITMAPKM PDumpBitmapKM
++#define PDUMPDRIVERINFO PDumpDriverInfoKM
++#define PDUMPIDLWITHFLAGS PDumpIDLWithFlags
++#define PDUMPIDL PDumpIDL
++#define PDUMPSUSPEND PDumpSuspendKM
++#define PDUMPRESUME PDumpResumeKM
++
++#else
++#define PDUMPMEMPOL(args...)
++#define PDUMPMEM(args...)
++#define PDUMPMEM2(args...)
++#define PDUMPMEMUM(args...)
++#define PDUMPINIT(args...)
++#define PDUMPDEINIT(args...)
++#define PDUMPISLASTFRAME(args...)
++#define PDUMPTESTFRAME(args...)
++#define PDUMPTESTNEXTFRAME(args...)
++#define PDUMPREGWITHFLAGS(args...)
++#define PDUMPREG(args...)
++#define PDUMPCOMMENT(args...)
++#define PDUMPREGPOL(args...)
++#define PDUMPREGPOLWITHFLAGS(args...)
++#define PDUMPMALLOCPAGES(args...)
++#define PDUMPMALLOCPAGETABLE(args...)
++#define PDUMPSETMMUCONTEXT(args...)
++#define PDUMPCLEARMMUCONTEXT(args...)
++#define PDUMPFREEPAGES(args...)
++#define PDUMPFREEPAGETABLE(args...)
++#define PDUMPPDREG(args...)
++#define PDUMPPDREGWITHFLAGS(args...)
++#define PDUMPSYNC(args...)
++#define PDUMPCOPYTOMEM(args...)
++#define PDUMPWRITE(args...)
++#define PDUMPCBP(args...)
++#define PDUMPCOMMENTWITHFLAGS(args...)
++#define PDUMPMALLOCPAGESPHYS(args...)
++#define PDUMPENDINITPHASE(args...)
++#define PDUMPMSVDXREG(args...)
++#define PDUMPMSVDXREGWRITE(args...)
++#define PDUMPMSVDXREGREAD(args...)
++#define PDUMPMSVDXPOLEQ(args...)
++#define PDUMPMSVDXPOL(args...)
++#define PDUMPBITMAPKM(args...)
++#define PDUMPDRIVERINFO(args...)
++#define PDUMPIDLWITHFLAGS(args...)
++#define PDUMPIDL(args...)
++#define PDUMPSUSPEND(args...)
++#define PDUMPRESUME(args...)
++#define PDUMPMSVDXWRITEREF(args...)
++#endif
++
++#endif
+diff --git a/drivers/gpu/pvr/pdumpdefs.h b/drivers/gpu/pvr/pdumpdefs.h
+new file mode 100644
+index 0000000..dec488b
+--- /dev/null
++++ b/drivers/gpu/pvr/pdumpdefs.h
+@@ -0,0 +1,92 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__PDUMPDEFS_H__)
++#define __PDUMPDEFS_H__
++
++enum PDUMP_PIXEL_FORMAT {
++ PVRSRV_PDUMP_PIXEL_FORMAT_RGB8 = 1,
++ PVRSRV_PDUMP_PIXEL_FORMAT_RGB332 = 2,
++ PVRSRV_PDUMP_PIXEL_FORMAT_KRGB555 = 3,
++ PVRSRV_PDUMP_PIXEL_FORMAT_RGB565 = 4,
++ PVRSRV_PDUMP_PIXEL_FORMAT_ARGB4444 = 5,
++ PVRSRV_PDUMP_PIXEL_FORMAT_ARGB1555 = 6,
++ PVRSRV_PDUMP_PIXEL_FORMAT_RGB888 = 7,
++ PVRSRV_PDUMP_PIXEL_FORMAT_ARGB8888 = 8,
++ PVRSRV_PDUMP_PIXEL_FORMAT_YUV8 = 9,
++ PVRSRV_PDUMP_PIXEL_FORMAT_AYUV4444 = 10,
++ PVRSRV_PDUMP_PIXEL_FORMAT_VY0UY1_8888 = 11,
++ PVRSRV_PDUMP_PIXEL_FORMAT_UY0VY1_8888 = 12,
++ PVRSRV_PDUMP_PIXEL_FORMAT_Y0UY1V_8888 = 13,
++ PVRSRV_PDUMP_PIXEL_FORMAT_Y0VY1U_8888 = 14,
++ PVRSRV_PDUMP_PIXEL_FORMAT_YUV888 = 15,
++ PVRSRV_PDUMP_PIXEL_FORMAT_UYVY10101010 = 16,
++ PVRSRV_PDUMP_PIXEL_FORMAT_VYAUYA8888 = 17,
++ PVRSRV_PDUMP_PIXEL_FORMAT_AYUV8888 = 18,
++ PVRSRV_PDUMP_PIXEL_FORMAT_AYUV2101010 = 19,
++ PVRSRV_PDUMP_PIXEL_FORMAT_YUV101010 = 20,
++ PVRSRV_PDUMP_PIXEL_FORMAT_PL12Y8 = 21,
++ PVRSRV_PDUMP_PIXEL_FORMAT_YUV_IMC2 = 22,
++ PVRSRV_PDUMP_PIXEL_FORMAT_YUV_YV12 = 23,
++ PVRSRV_PDUMP_PIXEL_FORMAT_YUV_PL8 = 24,
++ PVRSRV_PDUMP_PIXEL_FORMAT_YUV_PL12 = 25,
++ PVRSRV_PDUMP_PIXEL_FORMAT_422PL12YUV8 = 26,
++ PVRSRV_PDUMP_PIXEL_FORMAT_420PL12YUV8 = 27,
++ PVRSRV_PDUMP_PIXEL_FORMAT_PL12Y10 = 28,
++ PVRSRV_PDUMP_PIXEL_FORMAT_422PL12YUV10 = 29,
++ PVRSRV_PDUMP_PIXEL_FORMAT_420PL12YUV10 = 30,
++ PVRSRV_PDUMP_PIXEL_FORMAT_ABGR8888 = 31,
++ PVRSRV_PDUMP_PIXEL_FORMAT_BGRA8888 = 32,
++ PVRSRV_PDUMP_PIXEL_FORMAT_ARGB8332 = 33,
++ PVRSRV_PDUMP_PIXEL_FORMAT_RGB555 = 34,
++ PVRSRV_PDUMP_PIXEL_FORMAT_F16 = 35,
++ PVRSRV_PDUMP_PIXEL_FORMAT_F32 = 36,
++ PVRSRV_PDUMP_PIXEL_FORMAT_L16 = 37,
++ PVRSRV_PDUMP_PIXEL_FORMAT_L32 = 38,
++
++ PVRSRV_PDUMP_PIXEL_FORMAT_FORCE_I32 = 0x7fffffff
++};
++
++enum PDUMP_MEM_FORMAT {
++ PVRSRV_PDUMP_MEM_FORMAT_STRIDE = 0,
++ PVRSRV_PDUMP_MEM_FORMAT_RESERVED = 1,
++ PVRSRV_PDUMP_MEM_FORMAT_TILED = 8,
++ PVRSRV_PDUMP_MEM_FORMAT_TWIDDLED = 9,
++ PVRSRV_PDUMP_MEM_FORMAT_HYBRID = 10,
++
++ PVRSRV_PDUMP_MEM_FORMAT_FORCE_I32 = 0x7fffffff
++};
++
++enum PDUMP_POLL_OPERATOR {
++ PDUMP_POLL_OPERATOR_EQUAL = 0,
++ PDUMP_POLL_OPERATOR_LESS = 1,
++ PDUMP_POLL_OPERATOR_LESSEQUAL = 2,
++ PDUMP_POLL_OPERATOR_GREATER = 3,
++ PDUMP_POLL_OPERATOR_GREATEREQUAL = 4,
++ PDUMP_POLL_OPERATOR_NOTEQUAL = 5,
++};
++
++#endif
+diff --git a/drivers/gpu/pvr/perproc.c b/drivers/gpu/pvr/perproc.c
+new file mode 100644
+index 0000000..d0d4f4c
+--- /dev/null
++++ b/drivers/gpu/pvr/perproc.c
+@@ -0,0 +1,266 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "resman.h"
++#include "handle.h"
++#include "perproc.h"
++#include "osperproc.h"
++
++#define HASH_TAB_INIT_SIZE 32
++
++static struct HASH_TABLE *psHashTab;
++
++static enum PVRSRV_ERROR FreePerProcessData(
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ enum PVRSRV_ERROR eError;
++ u32 uiPerProc;
++
++ PVR_ASSERT(psPerProc != NULL);
++
++ if (psPerProc == NULL) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "FreePerProcessData: invalid parameter");
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ uiPerProc = HASH_Remove(psHashTab, (u32)psPerProc->ui32PID);
++ if (uiPerProc == 0) {
++ PVR_DPF(PVR_DBG_ERROR, "FreePerProcessData: "
++ "Couldn't find process in per-process data hash table");
++
++ PVR_ASSERT(psPerProc->ui32PID == 0);
++ } else {
++ PVR_ASSERT((struct PVRSRV_PER_PROCESS_DATA *)
++ uiPerProc == psPerProc);
++ PVR_ASSERT(((struct PVRSRV_PER_PROCESS_DATA *)uiPerProc)->
++ ui32PID == psPerProc->ui32PID);
++ }
++
++ if (psPerProc->psHandleBase != NULL) {
++ eError = PVRSRVFreeHandleBase(psPerProc->psHandleBase);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "FreePerProcessData: "
++ "Couldn't free handle base for process (%d)",
++ eError);
++ return eError;
++ }
++ }
++
++ if (psPerProc->hPerProcData != NULL) {
++ eError =
++ PVRSRVReleaseHandle(KERNEL_HANDLE_BASE,
++ psPerProc->hPerProcData,
++ PVRSRV_HANDLE_TYPE_PERPROC_DATA);
++
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "FreePerProcessData: "
++ "Couldn't release per-process data handle (%d)",
++ eError);
++ return eError;
++ }
++ }
++
++ eError = OSPerProcessPrivateDataDeInit(psPerProc->hOsPrivateData);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "FreePerProcessData: "
++ "OSPerProcessPrivateDataDeInit failed (%d)",
++ eError);
++ return eError;
++ }
++
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(*psPerProc), psPerProc,
++ psPerProc->hBlockAlloc);
++
++ return PVRSRV_OK;
++}
++
++struct PVRSRV_PER_PROCESS_DATA *PVRSRVPerProcessData(u32 ui32PID)
++{
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc;
++
++ PVR_ASSERT(psHashTab != NULL);
++
++ psPerProc =
++ (struct PVRSRV_PER_PROCESS_DATA *)HASH_Retrieve(psHashTab,
++ (u32) ui32PID);
++ return psPerProc;
++}
++
++enum PVRSRV_ERROR PVRSRVPerProcessDataConnect(u32 ui32PID)
++{
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc;
++ void *hBlockAlloc;
++ enum PVRSRV_ERROR eError = PVRSRV_OK;
++
++ PVR_ASSERT(psHashTab != NULL);
++
++ psPerProc = (struct PVRSRV_PER_PROCESS_DATA *)HASH_Retrieve(psHashTab,
++ (u32)ui32PID);
++ if (psPerProc == NULL) {
++ eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(*psPerProc), (void **)&psPerProc,
++ &hBlockAlloc);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: "
++ "Couldn't allocate per-process data (%d)",
++ eError);
++ return eError;
++ }
++ OSMemSet(psPerProc, 0, sizeof(*psPerProc));
++ psPerProc->hBlockAlloc = hBlockAlloc;
++
++ if (!HASH_Insert(psHashTab, (u32) ui32PID, (u32)psPerProc)) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: "
++ "Couldn't insert per-process data into hash table");
++ eError = PVRSRV_ERROR_GENERIC;
++ goto failure;
++ }
++
++ psPerProc->ui32PID = ui32PID;
++ psPerProc->ui32RefCount = 0;
++
++ eError =
++ OSPerProcessPrivateDataInit(&psPerProc->hOsPrivateData);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: "
++ "OSPerProcessPrivateDataInit failed (%d)",
++ eError);
++ goto failure;
++ }
++
++ eError = PVRSRVAllocHandle(KERNEL_HANDLE_BASE,
++ &psPerProc->hPerProcData,
++ psPerProc,
++ PVRSRV_HANDLE_TYPE_PERPROC_DATA,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: "
++ "Couldn't allocate handle for per-process data (%d)",
++ eError);
++ goto failure;
++ }
++
++ eError = PVRSRVAllocHandleBase(&psPerProc->psHandleBase);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: "
++ "Couldn't allocate handle base for process (%d)",
++ eError);
++ goto failure;
++ }
++
++ eError = OSPerProcessSetHandleOptions(psPerProc->psHandleBase);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: "
++ "Couldn't set handle options (%d)",
++ eError);
++ goto failure;
++ }
++
++ eError =
++ PVRSRVResManConnect(psPerProc, &psPerProc->hResManContext);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: "
++ "Couldn't register with the resource manager");
++ goto failure;
++ }
++ }
++
++ psPerProc->ui32RefCount++;
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "PVRSRVPerProcessDataConnect: Process 0x%x has ref-count %d",
++ ui32PID, psPerProc->ui32RefCount);
++
++ return eError;
++
++failure:
++ (void)FreePerProcessData(psPerProc);
++ return eError;
++}
++
++void PVRSRVPerProcessDataDisconnect(u32 ui32PID)
++{
++ enum PVRSRV_ERROR eError;
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc;
++
++ PVR_ASSERT(psHashTab != NULL);
++
++ psPerProc = (struct PVRSRV_PER_PROCESS_DATA *)HASH_Retrieve(psHashTab,
++ (u32)ui32PID);
++ if (psPerProc == NULL) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVPerProcessDataDealloc: "
++ "Couldn't locate per-process data for PID %u",
++ ui32PID);
++ } else {
++ psPerProc->ui32RefCount--;
++ if (psPerProc->ui32RefCount == 0) {
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "PVRSRVPerProcessDataDisconnect: "
++ "Last close from process 0x%x received",
++ ui32PID);
++
++ PVRSRVResManDisconnect(psPerProc->hResManContext,
++ IMG_FALSE);
++
++ eError = FreePerProcessData(psPerProc);
++ if (eError != PVRSRV_OK)
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVPerProcessDataDisconnect: "
++ "Error freeing per-process data");
++ }
++ }
++
++ eError = PVRSRVPurgeHandles(KERNEL_HANDLE_BASE);
++ if (eError != PVRSRV_OK)
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVPerProcessDataDisconnect: "
++ "Purge of global handle pool failed (%d)",
++ eError);
++}
++
++enum PVRSRV_ERROR PVRSRVPerProcessDataInit(void)
++{
++ PVR_ASSERT(psHashTab == NULL);
++
++ psHashTab = HASH_Create(HASH_TAB_INIT_SIZE);
++ if (psHashTab == NULL) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVPerProcessDataInit: "
++ "Couldn't create per-process data hash table");
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR PVRSRVPerProcessDataDeInit(void)
++{
++ if (psHashTab != NULL) {
++ HASH_Delete(psHashTab);
++ psHashTab = NULL;
++ }
++
++ return PVRSRV_OK;
++}
+diff --git a/drivers/gpu/pvr/perproc.h b/drivers/gpu/pvr/perproc.h
+new file mode 100644
+index 0000000..a89b0e4
+--- /dev/null
++++ b/drivers/gpu/pvr/perproc.h
+@@ -0,0 +1,79 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __PERPROC_H__
++#define __PERPROC_H__
++
++#include "img_types.h"
++#include "resman.h"
++
++#include "handle.h"
++
++struct PVRSRV_PER_PROCESS_DATA {
++ u32 ui32PID;
++ void *hBlockAlloc;
++ struct RESMAN_CONTEXT *hResManContext;
++ void *hPerProcData;
++ struct PVRSRV_HANDLE_BASE *psHandleBase;
++
++ IMG_BOOL bHandlesBatched;
++ u32 ui32RefCount;
++
++ IMG_BOOL bInitProcess;
++
++ void *hOsPrivateData;
++};
++
++struct PVRSRV_PER_PROCESS_DATA *PVRSRVPerProcessData(u32 ui32PID);
++
++enum PVRSRV_ERROR PVRSRVPerProcessDataConnect(u32 ui32PID);
++void PVRSRVPerProcessDataDisconnect(u32 ui32PID);
++
++enum PVRSRV_ERROR PVRSRVPerProcessDataInit(void);
++enum PVRSRV_ERROR PVRSRVPerProcessDataDeInit(void);
++
++static inline struct PVRSRV_PER_PROCESS_DATA *PVRSRVFindPerProcessData(void)
++{
++ return PVRSRVPerProcessData(OSGetCurrentProcessIDKM());
++}
++
++static inline void *PVRSRVProcessPrivateData(struct PVRSRV_PER_PROCESS_DATA
++ *psPerProc)
++{
++ return (psPerProc != NULL) ? psPerProc->hOsPrivateData : NULL;
++}
++
++static inline void *PVRSRVPerProcessPrivateData(u32 ui32PID)
++{
++ return PVRSRVProcessPrivateData(PVRSRVPerProcessData(ui32PID));
++}
++
++static inline void *PVRSRVFindPerProcessPrivateData(void)
++{
++ return PVRSRVProcessPrivateData(PVRSRVFindPerProcessData());
++}
++
++#endif
+diff --git a/drivers/gpu/pvr/power.c b/drivers/gpu/pvr/power.c
+new file mode 100644
+index 0000000..35adf6a
+--- /dev/null
++++ b/drivers/gpu/pvr/power.c
+@@ -0,0 +1,628 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++#include "services_headers.h"
++#include "pdump_km.h"
++#include <linux/kernel.h>
++#include <linux/mutex.h>
++#include <linux/sched.h>
++#include <linux/wait.h>
++
++static IMG_BOOL gbInitServerRunning;
++static IMG_BOOL gbInitServerRan;
++static IMG_BOOL gbInitSuccessful;
++static DEFINE_MUTEX(hPowerAndFreqLock);
++static DECLARE_WAIT_QUEUE_HEAD(hDvfsWq);
++static IMG_BOOL gbDvfsActive;
++
++enum PVRSRV_ERROR PVRSRVSetInitServerState(enum PVRSRV_INIT_SERVER_STATE
++ eInitServerState, IMG_BOOL bState)
++{
++
++ switch (eInitServerState) {
++ case PVRSRV_INIT_SERVER_RUNNING:
++ gbInitServerRunning = bState;
++ break;
++ case PVRSRV_INIT_SERVER_RAN:
++ gbInitServerRan = bState;
++ break;
++ case PVRSRV_INIT_SERVER_SUCCESSFUL:
++ gbInitSuccessful = bState;
++ break;
++ default:
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVSetInitServerState : Unknown state %lx",
++ eInitServerState);
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ return PVRSRV_OK;
++}
++
++IMG_BOOL PVRSRVGetInitServerState(
++ enum PVRSRV_INIT_SERVER_STATE eInitServerState)
++{
++ IMG_BOOL bReturnVal;
++
++ switch (eInitServerState) {
++ case PVRSRV_INIT_SERVER_RUNNING:
++ bReturnVal = gbInitServerRunning;
++ break;
++ case PVRSRV_INIT_SERVER_RAN:
++ bReturnVal = gbInitServerRan;
++ break;
++ case PVRSRV_INIT_SERVER_SUCCESSFUL:
++ bReturnVal = gbInitSuccessful;
++ break;
++ default:
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVGetInitServerState : Unknown state %lx",
++ eInitServerState);
++ bReturnVal = IMG_FALSE;
++ }
++
++ return bReturnVal;
++}
++
++static IMG_BOOL _IsSystemStatePowered(enum PVR_POWER_STATE eSystemPowerState)
++{
++ return (IMG_BOOL)(eSystemPowerState < PVRSRV_POWER_STATE_D2);
++}
++
++void PVRSRVDvfsLock(void)
++{
++ mutex_lock(&hPowerAndFreqLock);
++ gbDvfsActive = 1;
++ mutex_unlock(&hPowerAndFreqLock);
++}
++
++void PVRSRVDvfsUnlock(void)
++{
++ mutex_lock(&hPowerAndFreqLock);
++ gbDvfsActive = 0;
++ wake_up(&hDvfsWq);
++ mutex_unlock(&hPowerAndFreqLock);
++}
++
++enum PVRSRV_ERROR PVRSRVPowerLock(u32 ui32CallerID, IMG_BOOL bSystemPowerEvent)
++{
++ if (ui32CallerID == TIMER_ID) {
++ if (!mutex_trylock(&hPowerAndFreqLock))
++ return PVRSRV_ERROR_RETRY;
++
++ if (gbDvfsActive) {
++ mutex_unlock(&hPowerAndFreqLock);
++ return PVRSRV_ERROR_RETRY;
++ }
++ } else
++ mutex_lock(&hPowerAndFreqLock);
++
++ while (gbDvfsActive) {
++ DEFINE_WAIT(__wait);
++ prepare_to_wait(&hDvfsWq, &__wait, TASK_UNINTERRUPTIBLE);
++ mutex_unlock(&hPowerAndFreqLock);
++ schedule();
++ mutex_lock(&hPowerAndFreqLock);
++ finish_wait(&hDvfsWq, &__wait);
++ }
++ return PVRSRV_OK;
++}
++
++void PVRSRVPowerUnlock(u32 ui32CallerID)
++{
++ mutex_unlock(&hPowerAndFreqLock);
++}
++
++static enum PVRSRV_ERROR PVRSRVDevicePrePowerStateKM(IMG_BOOL bAllDevices,
++ u32 ui32DeviceIndex,
++ enum PVR_POWER_STATE eNewPowerState)
++{
++ enum PVRSRV_ERROR eError;
++ struct SYS_DATA *psSysData;
++ struct PVRSRV_POWER_DEV *psPowerDevice;
++ enum PVR_POWER_STATE eNewDevicePowerState;
++
++ eError = SysAcquireData(&psSysData);
++ if (eError != PVRSRV_OK)
++ return eError;
++
++ psPowerDevice = psSysData->psPowerDeviceList;
++ while (psPowerDevice) {
++ if (bAllDevices ||
++ (ui32DeviceIndex == psPowerDevice->ui32DeviceIndex)) {
++ eNewDevicePowerState =
++ (eNewPowerState == PVRSRV_POWER_Unspecified) ?
++ psPowerDevice->eDefaultPowerState :
++ eNewPowerState;
++
++ if (psPowerDevice->eCurrentPowerState !=
++ eNewDevicePowerState) {
++ if (psPowerDevice->pfnPrePower != NULL) {
++ eError =
++ psPowerDevice->
++ pfnPrePower(psPowerDevice->
++ hDevCookie,
++ eNewDevicePowerState,
++ psPowerDevice->
++ eCurrentPowerState);
++ if (eError != PVRSRV_OK) {
++ pr_err
++ ("pfnPrePower failed (%u)\n",
++ eError);
++ return eError;
++ }
++ }
++
++ eError = SysDevicePrePowerState(
++ psPowerDevice->ui32DeviceIndex,
++ eNewDevicePowerState,
++ psPowerDevice->eCurrentPowerState);
++ if (eError != PVRSRV_OK) {
++ pr_err("SysDevicePrePowerState failed "
++ "(%u)\n", eError);
++ return eError;
++ }
++ }
++ }
++
++ psPowerDevice = psPowerDevice->psNext;
++ }
++
++ return PVRSRV_OK;
++}
++
++static enum PVRSRV_ERROR PVRSRVDevicePostPowerStateKM(IMG_BOOL bAllDevices,
++ u32 ui32DeviceIndex,
++ enum PVR_POWER_STATE eNewPowerState)
++{
++ enum PVRSRV_ERROR eError;
++ struct SYS_DATA *psSysData;
++ struct PVRSRV_POWER_DEV *psPowerDevice;
++ enum PVR_POWER_STATE eNewDevicePowerState;
++
++ eError = SysAcquireData(&psSysData);
++ if (eError != PVRSRV_OK)
++ return eError;
++
++ psPowerDevice = psSysData->psPowerDeviceList;
++ while (psPowerDevice) {
++ if (bAllDevices ||
++ (ui32DeviceIndex == psPowerDevice->ui32DeviceIndex)) {
++ eNewDevicePowerState = (eNewPowerState ==
++ PVRSRV_POWER_Unspecified) ? psPowerDevice->
++ eDefaultPowerState : eNewPowerState;
++
++ if (psPowerDevice->eCurrentPowerState !=
++ eNewDevicePowerState) {
++ eError = SysDevicePostPowerState(
++ psPowerDevice->ui32DeviceIndex,
++ eNewDevicePowerState,
++ psPowerDevice->eCurrentPowerState);
++ if (eError != PVRSRV_OK) {
++ pr_err("SysDevicePostPowerState "
++ "failed (%u)\n", eError);
++ return eError;
++ }
++
++ if (psPowerDevice->pfnPostPower != NULL) {
++ eError =
++ psPowerDevice->
++ pfnPostPower(psPowerDevice->
++ hDevCookie,
++ eNewDevicePowerState,
++ psPowerDevice->
++ eCurrentPowerState);
++ if (eError != PVRSRV_OK) {
++ pr_err
++ ("pfnPostPower failed "
++ "(%u)\n", eError);
++ return eError;
++ }
++ }
++
++ psPowerDevice->eCurrentPowerState =
++ eNewDevicePowerState;
++ }
++ }
++
++ psPowerDevice = psPowerDevice->psNext;
++ }
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR PVRSRVSetDevicePowerStateKM(u32 ui32DeviceIndex,
++ enum PVR_POWER_STATE eNewPowerState,
++ u32 ui32CallerID, IMG_BOOL bRetainMutex)
++{
++ enum PVRSRV_ERROR eError;
++ struct SYS_DATA *psSysData;
++
++ eError = SysAcquireData(&psSysData);
++ if (eError != PVRSRV_OK)
++ return eError;
++
++ eError = PVRSRVPowerLock(ui32CallerID, IMG_FALSE);
++ if (eError != PVRSRV_OK)
++ return eError;
++#if defined(PDUMP)
++ if (eNewPowerState == PVRSRV_POWER_Unspecified) {
++ eError =
++ PVRSRVDevicePrePowerStateKM(IMG_FALSE, ui32DeviceIndex,
++ PVRSRV_POWER_STATE_D0);
++ if (eError != PVRSRV_OK)
++ goto Exit;
++ eError =
++ PVRSRVDevicePostPowerStateKM(IMG_FALSE, ui32DeviceIndex,
++ PVRSRV_POWER_STATE_D0);
++ if (eError != PVRSRV_OK)
++ goto Exit;
++
++ PDUMPSUSPEND();
++ }
++#endif
++
++ eError = PVRSRVDevicePrePowerStateKM(IMG_FALSE, ui32DeviceIndex,
++ eNewPowerState);
++ if (eError != PVRSRV_OK) {
++ if (eNewPowerState == PVRSRV_POWER_Unspecified)
++ PDUMPRESUME();
++ goto Exit;
++ }
++
++ eError = PVRSRVDevicePostPowerStateKM(IMG_FALSE, ui32DeviceIndex,
++ eNewPowerState);
++
++ if (eNewPowerState == PVRSRV_POWER_Unspecified)
++ PDUMPRESUME();
++
++Exit:
++
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVSetDevicePowerStateKM : "
++ "Transition to %d FAILED 0x%x",
++ eNewPowerState, eError);
++ }
++
++ if (!bRetainMutex || (eError != PVRSRV_OK))
++ PVRSRVPowerUnlock(ui32CallerID);
++
++ return eError;
++}
++
++enum PVRSRV_ERROR PVRSRVSystemPrePowerStateKM(
++ enum PVR_POWER_STATE eNewPowerState)
++{
++ enum PVRSRV_ERROR eError;
++ struct SYS_DATA *psSysData;
++ enum PVR_POWER_STATE eNewDevicePowerState;
++
++ eError = SysAcquireData(&psSysData);
++ if (eError != PVRSRV_OK)
++ return eError;
++
++ eError = PVRSRVPowerLock(KERNEL_ID, IMG_TRUE);
++ if (eError != PVRSRV_OK)
++ return eError;
++
++ if (_IsSystemStatePowered(eNewPowerState) !=
++ _IsSystemStatePowered(psSysData->eCurrentPowerState)) {
++ if (_IsSystemStatePowered(eNewPowerState))
++ eNewDevicePowerState = PVRSRV_POWER_Unspecified;
++ else
++ eNewDevicePowerState = PVRSRV_POWER_STATE_D3;
++
++ eError = PVRSRVDevicePrePowerStateKM(IMG_TRUE, 0,
++ eNewDevicePowerState);
++ if (eError != PVRSRV_OK)
++ goto ErrorExit;
++ }
++
++ if (eNewPowerState != psSysData->eCurrentPowerState) {
++ eError = SysSystemPrePowerState(eNewPowerState);
++ if (eError != PVRSRV_OK)
++ goto ErrorExit;
++ }
++
++ return eError;
++
++ErrorExit:
++
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVSystemPrePowerStateKM: "
++ "Transition from %d to %d FAILED 0x%x",
++ psSysData->eCurrentPowerState, eNewPowerState, eError);
++
++ psSysData->eFailedPowerState = eNewPowerState;
++
++ PVRSRVPowerUnlock(KERNEL_ID);
++
++ return eError;
++}
++
++enum PVRSRV_ERROR PVRSRVSystemPostPowerStateKM(
++ enum PVR_POWER_STATE eNewPowerState)
++{
++ enum PVRSRV_ERROR eError;
++ struct SYS_DATA *psSysData;
++ enum PVR_POWER_STATE eNewDevicePowerState;
++
++ eError = SysAcquireData(&psSysData);
++ if (eError != PVRSRV_OK)
++ goto Exit;
++
++ if (eNewPowerState != psSysData->eCurrentPowerState) {
++ eError = SysSystemPostPowerState(eNewPowerState);
++ if (eError != PVRSRV_OK)
++ goto Exit;
++ }
++
++ if (_IsSystemStatePowered(eNewPowerState) !=
++ _IsSystemStatePowered(psSysData->eCurrentPowerState)) {
++ if (_IsSystemStatePowered(eNewPowerState))
++ eNewDevicePowerState = PVRSRV_POWER_Unspecified;
++ else
++ eNewDevicePowerState = PVRSRV_POWER_STATE_D3;
++
++ eError =
++ PVRSRVDevicePostPowerStateKM(IMG_TRUE, 0,
++ eNewDevicePowerState);
++ if (eError != PVRSRV_OK)
++ goto Exit;
++ }
++
++ PVR_DPF(PVR_DBG_WARNING, "PVRSRVSystemPostPowerStateKM: "
++ "System Power Transition from %d to %d OK",
++ psSysData->eCurrentPowerState, eNewPowerState);
++
++ psSysData->eCurrentPowerState = eNewPowerState;
++
++Exit:
++
++ PVRSRVPowerUnlock(KERNEL_ID);
++
++ if (_IsSystemStatePowered(eNewPowerState) &&
++ PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_SUCCESSFUL))
++ PVRSRVCommandCompleteCallbacks();
++
++ return eError;
++}
++
++enum PVRSRV_ERROR PVRSRVSetPowerStateKM(enum PVR_POWER_STATE eNewPowerState)
++{
++ enum PVRSRV_ERROR eError;
++ struct SYS_DATA *psSysData;
++
++ eError = SysAcquireData(&psSysData);
++ if (eError != PVRSRV_OK)
++ return eError;
++
++ eError = PVRSRVSystemPrePowerStateKM(eNewPowerState);
++ if (eError != PVRSRV_OK)
++ goto ErrorExit;
++
++ eError = PVRSRVSystemPostPowerStateKM(eNewPowerState);
++ if (eError != PVRSRV_OK)
++ goto ErrorExit;
++
++ psSysData->eFailedPowerState = PVRSRV_POWER_Unspecified;
++
++ return PVRSRV_OK;
++
++ErrorExit:
++
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVSetPowerStateKM: "
++ "Transition from %d to %d FAILED 0x%x",
++ psSysData->eCurrentPowerState, eNewPowerState, eError);
++
++ psSysData->eFailedPowerState = eNewPowerState;
++
++ return eError;
++}
++
++enum PVRSRV_ERROR PVRSRVRegisterPowerDevice(u32 ui32DeviceIndex,
++ enum PVRSRV_ERROR (*pfnPrePower)(void *, enum PVR_POWER_STATE,
++ enum PVR_POWER_STATE),
++ enum PVRSRV_ERROR (*pfnPostPower)(void *, enum PVR_POWER_STATE,
++ enum PVR_POWER_STATE),
++ enum PVRSRV_ERROR (*pfnPreClockSpeedChange)(void *, IMG_BOOL,
++ enum PVR_POWER_STATE),
++ enum PVRSRV_ERROR (*pfnPostClockSpeedChange)(void *, IMG_BOOL,
++ enum PVR_POWER_STATE),
++ void *hDevCookie, enum PVR_POWER_STATE eCurrentPowerState,
++ enum PVR_POWER_STATE eDefaultPowerState)
++{
++ enum PVRSRV_ERROR eError;
++ struct SYS_DATA *psSysData;
++ struct PVRSRV_POWER_DEV *psPowerDevice;
++
++ if (pfnPrePower == NULL && pfnPostPower == NULL)
++ return PVRSRVRemovePowerDevice(ui32DeviceIndex);
++
++ eError = SysAcquireData(&psSysData);
++ if (eError != PVRSRV_OK)
++ return eError;
++
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(struct PVRSRV_POWER_DEV),
++ (void **) &psPowerDevice, NULL);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVRegisterPowerDevice: "
++ "Failed to alloc struct PVRSRV_POWER_DEV");
++ return eError;
++ }
++
++ psPowerDevice->pfnPrePower = pfnPrePower;
++ psPowerDevice->pfnPostPower = pfnPostPower;
++ psPowerDevice->pfnPreClockSpeedChange = pfnPreClockSpeedChange;
++ psPowerDevice->pfnPostClockSpeedChange = pfnPostClockSpeedChange;
++ psPowerDevice->hDevCookie = hDevCookie;
++ psPowerDevice->ui32DeviceIndex = ui32DeviceIndex;
++ psPowerDevice->eCurrentPowerState = eCurrentPowerState;
++ psPowerDevice->eDefaultPowerState = eDefaultPowerState;
++
++ psPowerDevice->psNext = psSysData->psPowerDeviceList;
++ psSysData->psPowerDeviceList = psPowerDevice;
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR PVRSRVRemovePowerDevice(u32 ui32DeviceIndex)
++{
++ enum PVRSRV_ERROR eError;
++ struct SYS_DATA *psSysData;
++ struct PVRSRV_POWER_DEV *psCurrent, *psPrevious;
++
++ eError = SysAcquireData(&psSysData);
++ if (eError != PVRSRV_OK)
++ return eError;
++
++ psCurrent = psSysData->psPowerDeviceList;
++ psPrevious = NULL;
++
++ while (psCurrent)
++ if (psCurrent->ui32DeviceIndex == ui32DeviceIndex) {
++ if (psPrevious)
++ psPrevious->psNext = psCurrent->psNext;
++ else
++ psSysData->psPowerDeviceList =
++ psCurrent->psNext;
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(struct PVRSRV_POWER_DEV), psCurrent,
++ NULL);
++ break;
++ } else {
++ psPrevious = psCurrent;
++ psCurrent = psCurrent->psNext;
++ }
++ return PVRSRV_OK;
++}
++
++IMG_BOOL PVRSRVIsDevicePowered(u32 ui32DeviceIndex)
++{
++ enum PVRSRV_ERROR eError;
++ struct SYS_DATA *psSysData;
++ struct PVRSRV_POWER_DEV *psPowerDevice;
++
++ eError = SysAcquireData(&psSysData);
++ if (eError != PVRSRV_OK)
++ return IMG_FALSE;
++
++ if (OSIsResourceLocked(&psSysData->sPowerStateChangeResource,
++ KERNEL_ID) ||
++ OSIsResourceLocked(&psSysData->sPowerStateChangeResource, ISR_ID))
++ return IMG_FALSE;
++
++ psPowerDevice = psSysData->psPowerDeviceList;
++ while (psPowerDevice) {
++ if (psPowerDevice->ui32DeviceIndex == ui32DeviceIndex)
++ return (IMG_BOOL)(psPowerDevice->eCurrentPowerState ==
++ PVRSRV_POWER_STATE_D0);
++ psPowerDevice = psPowerDevice->psNext;
++ }
++
++ return IMG_FALSE;
++}
++
++enum PVRSRV_ERROR PVRSRVDevicePreClockSpeedChange(u32 ui32DeviceIndex,
++ IMG_BOOL bIdleDevice,
++ void *pvInfo)
++{
++ enum PVRSRV_ERROR eError = PVRSRV_OK;
++ struct SYS_DATA *psSysData;
++ struct PVRSRV_POWER_DEV *psPowerDevice;
++
++ PVR_UNREFERENCED_PARAMETER(pvInfo);
++
++ eError = SysAcquireData(&psSysData);
++ if (eError != PVRSRV_OK)
++ return eError;
++
++ psPowerDevice = psSysData->psPowerDeviceList;
++ while (psPowerDevice) {
++ if (ui32DeviceIndex == psPowerDevice->ui32DeviceIndex)
++ if (psPowerDevice->pfnPreClockSpeedChange) {
++ eError =
++ psPowerDevice->
++ pfnPreClockSpeedChange(psPowerDevice->
++ hDevCookie,
++ bIdleDevice,
++ psPowerDevice->
++ eCurrentPowerState);
++ if (eError != PVRSRV_OK) {
++ pr_err
++ ("pfnPreClockSpeedChange failed\n");
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVDevicePreClockSpeedChange : "
++ "Device %lu failed, error:0x%lx",
++ ui32DeviceIndex, eError);
++ break;
++ }
++ }
++ psPowerDevice = psPowerDevice->psNext;
++ }
++
++ if (bIdleDevice && eError != PVRSRV_OK)
++ PVRSRVPowerUnlock(KERNEL_ID);
++
++ return eError;
++}
++
++void PVRSRVDevicePostClockSpeedChange(u32 ui32DeviceIndex, IMG_BOOL bIdleDevice,
++ void *pvInfo)
++{
++ enum PVRSRV_ERROR eError;
++ struct SYS_DATA *psSysData;
++ struct PVRSRV_POWER_DEV *psPowerDevice;
++
++ PVR_UNREFERENCED_PARAMETER(pvInfo);
++
++ eError = SysAcquireData(&psSysData);
++ if (eError != PVRSRV_OK)
++ return;
++
++ psPowerDevice = psSysData->psPowerDeviceList;
++ while (psPowerDevice) {
++ if (ui32DeviceIndex == psPowerDevice->ui32DeviceIndex)
++ if (psPowerDevice->pfnPostClockSpeedChange) {
++ eError =
++ psPowerDevice->
++ pfnPostClockSpeedChange(psPowerDevice->
++ hDevCookie,
++ bIdleDevice,
++ psPowerDevice->
++ eCurrentPowerState);
++ if (eError != PVRSRV_OK) {
++ pr_err
++ ("pfnPostClockSpeedChange "
++ "failed\n");
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVDevicePostClockSpeedChange : "
++ "Device %lu failed, error:0x%lx",
++ ui32DeviceIndex, eError);
++ }
++ }
++ psPowerDevice = psPowerDevice->psNext;
++ }
++}
+diff --git a/drivers/gpu/pvr/power.h b/drivers/gpu/pvr/power.h
+new file mode 100644
+index 0000000..807f167
+--- /dev/null
++++ b/drivers/gpu/pvr/power.h
+@@ -0,0 +1,104 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef POWER_H
++#define POWER_H
++
++struct PVRSRV_POWER_DEV {
++ enum PVRSRV_ERROR (*pfnPrePower)(void *, enum PVR_POWER_STATE,
++ enum PVR_POWER_STATE);
++ enum PVRSRV_ERROR (*pfnPostPower)(void *, enum PVR_POWER_STATE,
++ enum PVR_POWER_STATE);
++ enum PVRSRV_ERROR (*pfnPreClockSpeedChange)(void *, IMG_BOOL,
++ enum PVR_POWER_STATE);
++ enum PVRSRV_ERROR (*pfnPostClockSpeedChange)(void *, IMG_BOOL,
++ enum PVR_POWER_STATE);
++ void *hDevCookie;
++ u32 ui32DeviceIndex;
++ enum PVR_POWER_STATE eDefaultPowerState;
++ enum PVR_POWER_STATE eCurrentPowerState;
++ struct PVRSRV_POWER_DEV *psNext;
++
++};
++
++enum PVRSRV_INIT_SERVER_STATE {
++ PVRSRV_INIT_SERVER_Unspecified = -1,
++ PVRSRV_INIT_SERVER_RUNNING = 0,
++ PVRSRV_INIT_SERVER_RAN = 1,
++ PVRSRV_INIT_SERVER_SUCCESSFUL = 2,
++ PVRSRV_INIT_SERVER_NUM = 3,
++ PVRSRV_INIT_SERVER_FORCE_I32 = 0x7fffffff
++};
++
++IMG_BOOL PVRSRVGetInitServerState(enum PVRSRV_INIT_SERVER_STATE
++ eInitServerState);
++
++enum PVRSRV_ERROR PVRSRVSetInitServerState(enum PVRSRV_INIT_SERVER_STATE
++ eInitServerState,
++ IMG_BOOL bState);
++
++enum PVRSRV_ERROR PVRSRVPowerLock(u32 ui32CallerID,
++ IMG_BOOL bSystemPowerEvent);
++void PVRSRVPowerUnlock(u32 ui32CallerID);
++void PVRSRVDvfsLock(void);
++void PVRSRVDvfsUnlock(void);
++
++enum PVRSRV_ERROR PVRSRVSetDevicePowerStateKM(u32 ui32DeviceIndex,
++ enum PVR_POWER_STATE eNewPowerState, u32 ui32CallerID,
++ IMG_BOOL bRetainMutex);
++
++enum PVRSRV_ERROR PVRSRVSystemPrePowerStateKM(
++ enum PVR_POWER_STATE eNewPowerState);
++enum PVRSRV_ERROR PVRSRVSystemPostPowerStateKM(
++ enum PVR_POWER_STATE eNewPowerState);
++
++enum PVRSRV_ERROR PVRSRVSetPowerStateKM(enum PVR_POWER_STATE ePVRState);
++
++enum PVRSRV_ERROR PVRSRVRegisterPowerDevice(u32 ui32DeviceIndex,
++ enum PVRSRV_ERROR (*pfnPrePower)(void *, enum PVR_POWER_STATE,
++ enum PVR_POWER_STATE),
++ enum PVRSRV_ERROR (*pfnPostPower)(void *, enum PVR_POWER_STATE,
++ enum PVR_POWER_STATE),
++ enum PVRSRV_ERROR (*pfnPreClockSpeedChange)(void *, IMG_BOOL,
++ enum PVR_POWER_STATE),
++ enum PVRSRV_ERROR (*pfnPostClockSpeedChange)(void *, IMG_BOOL,
++ enum PVR_POWER_STATE),
++ void *hDevCookie, enum PVR_POWER_STATE eCurrentPowerState,
++ enum PVR_POWER_STATE eDefaultPowerState);
++
++enum PVRSRV_ERROR PVRSRVRemovePowerDevice(u32 ui32DeviceIndex);
++
++IMG_BOOL PVRSRVIsDevicePowered(u32 ui32DeviceIndex);
++
++enum PVRSRV_ERROR PVRSRVDevicePreClockSpeedChange(u32 ui32DeviceIndex,
++ IMG_BOOL bIdleDevice,
++ void *pvInfo);
++
++void PVRSRVDevicePostClockSpeedChange(u32 ui32DeviceIndex,
++ IMG_BOOL bIdleDevice,
++ void *pvInfo);
++
++#endif
+diff --git a/drivers/gpu/pvr/private_data.h b/drivers/gpu/pvr/private_data.h
+new file mode 100644
+index 0000000..d0192c1
+--- /dev/null
++++ b/drivers/gpu/pvr/private_data.h
+@@ -0,0 +1,35 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __INCLUDED_PRIVATE_DATA_H_
++#define __INCLUDED_PRIVATE_DATA_H_
++
++struct PVRSRV_FILE_PRIVATE_DATA {
++ u32 ui32OpenPID;
++ void *hBlockAlloc;
++};
++
++#endif
+diff --git a/drivers/gpu/pvr/proc.c b/drivers/gpu/pvr/proc.c
+new file mode 100644
+index 0000000..0ade0b9
+--- /dev/null
++++ b/drivers/gpu/pvr/proc.c
+@@ -0,0 +1,421 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/version.h>
++#include <linux/fs.h>
++#include <linux/proc_fs.h>
++
++#include "services_headers.h"
++
++#include "queue.h"
++#include "resman.h"
++#include "pvrmmap.h"
++#include "pvr_debug.h"
++#include "pvrversion.h"
++#include "proc.h"
++#include "perproc.h"
++#include "env_perproc.h"
++
++/* The proc entry for our /proc/pvr directory */
++
++static struct proc_dir_entry *dir;
++
++static off_t procDumpSysNodes(char *buf, size_t size, off_t off);
++static off_t procDumpVersion(char *buf, size_t size, off_t off);
++
++static const char PVRProcDirRoot[] = "pvr";
++
++off_t printAppend(char *buffer, size_t size, off_t off, const char *format, ...)
++{
++ int n;
++ int space = size - off;
++ va_list ap;
++
++ PVR_ASSERT(space >= 0);
++
++ va_start(ap, format);
++ n = vsnprintf(buffer + off, space, format, ap);
++ va_end(ap);
++
++ if (n >= space || n < 0) {
++
++ buffer[size - 1] = 0;
++ return size - 1;
++ } else {
++ return off + n;
++}
++}
++
++static int pvr_read_proc(char *page, char **start, off_t off,
++ int count, int *eof, void *data)
++{
++ off_t (*pprn)(char *, size_t, off_t) = data;
++
++ off_t len = pprn(page, count, off);
++
++ if (len == END_OF_FILE) {
++ len = 0;
++ *eof = 1;
++ } else if (!len) {
++ *start = (char *)0;
++ } else {
++ *start = (char *)1;
++ }
++
++ return len;
++}
++
++static int CreateProcEntryInDir(struct proc_dir_entry *pdir, const char *name,
++ read_proc_t rhandler, write_proc_t whandler,
++ void *data)
++{
++ struct proc_dir_entry *file;
++ mode_t mode;
++
++ if (!pdir) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "CreateProcEntryInDir: parent directory doesn't exist");
++
++ return -ENOMEM;
++ }
++
++ mode = S_IFREG;
++
++ if (rhandler)
++ mode |= S_IRUGO;
++
++ if (whandler)
++ mode |= S_IWUSR;
++
++ file = create_proc_entry(name, mode, pdir);
++
++ if (file) {
++ file->read_proc = rhandler;
++ file->write_proc = whandler;
++ file->data = data;
++
++ PVR_DPF(PVR_DBG_MESSAGE, "Created proc entry %s in %s", name,
++ pdir->name);
++
++ return 0;
++ }
++
++ PVR_DPF(PVR_DBG_ERROR,
++ "CreateProcEntry: cannot create proc entry %s in %s", name,
++ pdir->name);
++
++ return -ENOMEM;
++}
++
++int CreateProcEntry(const char *name, read_proc_t rhandler,
++ write_proc_t whandler, void *data)
++{
++ return CreateProcEntryInDir(dir, name, rhandler, whandler, data);
++}
++
++int CreatePerProcessProcEntry(const char *name, read_proc_t rhandler,
++ write_proc_t whandler, void *data)
++{
++ struct PVRSRV_ENV_PER_PROCESS_DATA *psPerProc;
++ u32 ui32PID;
++
++ if (!dir) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "CreatePerProcessProcEntries: /proc/%s doesn't exist",
++ PVRProcDirRoot);
++
++ return -ENOMEM;
++ }
++
++ ui32PID = OSGetCurrentProcessIDKM();
++
++ psPerProc = PVRSRVPerProcessPrivateData(ui32PID);
++ if (!psPerProc) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "CreatePerProcessProcEntries: no per process data");
++
++ return -ENOMEM;
++ }
++
++ if (!psPerProc->psProcDir) {
++ char dirname[16];
++ int ret;
++
++ ret = snprintf(dirname, sizeof(dirname), "%u", ui32PID);
++
++ if (ret <= 0 || ret >= sizeof(dirname)) {
++ PVR_DPF(PVR_DBG_ERROR, "CreatePerProcessProcEntries: "
++ "couldn't generate per process proc "
++ "directory name \"%u\"",
++ ui32PID);
++
++ return -ENOMEM;
++ } else {
++ psPerProc->psProcDir = proc_mkdir(dirname, dir);
++ if (!psPerProc->psProcDir) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "CreatePerProcessProcEntries: "
++ "couldn't create per process proc "
++ "directory /proc/%s/%u",
++ PVRProcDirRoot, ui32PID);
++
++ return -ENOMEM;
++ }
++ }
++ }
++
++ return CreateProcEntryInDir(psPerProc->psProcDir, name, rhandler,
++ whandler, data);
++}
++
++int CreateProcReadEntry(const char *name,
++ off_t (handler)(char *, size_t, off_t))
++{
++ struct proc_dir_entry *file;
++
++ if (!dir) {
++ PVR_DPF(PVR_DBG_ERROR, "CreateProcReadEntry: "
++ "cannot make proc entry /proc/%s/%s: no parent",
++ PVRProcDirRoot, name);
++
++ return -ENOMEM;
++ }
++
++ file =
++ create_proc_read_entry(name, S_IFREG | S_IRUGO, dir, pvr_read_proc,
++ (void *)handler);
++
++ if (file)
++ return 0;
++
++ PVR_DPF(PVR_DBG_ERROR, "CreateProcReadEntry: "
++ "cannot make proc entry /proc/%s/%s: no memory",
++ PVRProcDirRoot, name);
++
++ return -ENOMEM;
++}
++
++int CreateProcEntries(void)
++{
++ dir = proc_mkdir(PVRProcDirRoot, NULL);
++
++ if (!dir) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "CreateProcEntries: cannot make /proc/%s directory",
++ PVRProcDirRoot);
++
++ return -ENOMEM;
++ }
++
++ if (CreateProcReadEntry("queue", QueuePrintQueues) ||
++ CreateProcReadEntry("version", procDumpVersion) ||
++ CreateProcReadEntry("nodes", procDumpSysNodes)) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "CreateProcEntries: couldn't make /proc/%s files",
++ PVRProcDirRoot);
++
++ return -ENOMEM;
++ }
++#ifdef DEBUG
++ if (CreateProcEntry
++ ("debug_level", PVRDebugProcGetLevel, PVRDebugProcSetLevel, NULL)) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "CreateProcEntries: couldn't make /proc/%s/debug_level",
++ PVRProcDirRoot);
++
++ return -ENOMEM;
++ }
++#endif
++
++ return 0;
++}
++
++void RemoveProcEntry(const char *name)
++{
++ if (dir) {
++ remove_proc_entry(name, dir);
++ PVR_DPF(PVR_DBG_MESSAGE, "Removing /proc/%s/%s",
++ PVRProcDirRoot, name);
++ }
++}
++
++void RemovePerProcessProcEntry(const char *name)
++{
++ struct PVRSRV_ENV_PER_PROCESS_DATA *psPerProc =
++ PVRSRVFindPerProcessPrivateData();
++
++ if (!psPerProc) {
++ PVR_DPF(PVR_DBG_ERROR, "CreatePerProcessProcEntries: "
++ "can't remove %s, no per process data",
++ name);
++ return;
++ }
++
++ if (psPerProc->psProcDir) {
++ remove_proc_entry(name, psPerProc->psProcDir);
++
++ PVR_DPF(PVR_DBG_MESSAGE, "Removing proc entry %s from %s",
++ name, psPerProc->psProcDir->name);
++ }
++}
++
++void RemovePerProcessProcDir(struct PVRSRV_ENV_PER_PROCESS_DATA *psPerProc)
++{
++ if (psPerProc->psProcDir) {
++ while (psPerProc->psProcDir->subdir) {
++ PVR_DPF(PVR_DBG_WARNING,
++ "Belatedly removing /proc/%s/%s/%s",
++ PVRProcDirRoot, psPerProc->psProcDir->name,
++ psPerProc->psProcDir->subdir->name);
++
++ RemoveProcEntry(psPerProc->psProcDir->subdir->name);
++ }
++ RemoveProcEntry(psPerProc->psProcDir->name);
++ }
++}
++
++void RemoveProcEntries(void)
++{
++#ifdef DEBUG
++ RemoveProcEntry("debug_level");
++#endif
++ RemoveProcEntry("queue");
++ RemoveProcEntry("nodes");
++ RemoveProcEntry("version");
++
++ while (dir->subdir) {
++ PVR_DPF(PVR_DBG_WARNING, "Belatedly removing /proc/%s/%s",
++ PVRProcDirRoot, dir->subdir->name);
++
++ RemoveProcEntry(dir->subdir->name);
++ }
++
++ remove_proc_entry(PVRProcDirRoot, NULL);
++}
++
++static off_t procDumpVersion(char *buf, size_t size, off_t off)
++{
++ struct SYS_DATA *psSysData;
++
++ if (off == 0)
++ return printAppend(buf, size, 0, "Version %s (%s) %s\n",
++ PVRVERSION_STRING, PVR_BUILD_TYPE,
++ PVR_BUILD_DIR);
++
++ if (SysAcquireData(&psSysData) != PVRSRV_OK)
++ return PVRSRV_ERROR_GENERIC;
++
++ if (off == 1) {
++ char *pszSystemVersionString = "None";
++
++ if (psSysData->pszVersionString)
++ pszSystemVersionString = psSysData->pszVersionString;
++
++ if (strlen(pszSystemVersionString) +
++ strlen("System Version String: \n") + 1 > size)
++ return 0;
++ return printAppend(buf, size, 0, "System Version String: %s\n",
++ pszSystemVersionString);
++ }
++
++ return END_OF_FILE;
++}
++
++static const char *deviceTypeToString(enum PVRSRV_DEVICE_TYPE deviceType)
++{
++ switch (deviceType) {
++ default:
++ {
++ static char text[10];
++ sprintf(text, "?%x", deviceType);
++ return text;
++ }
++ }
++}
++
++static const char *deviceClassToString(enum PVRSRV_DEVICE_CLASS deviceClass)
++{
++ switch (deviceClass) {
++ case PVRSRV_DEVICE_CLASS_3D:
++ {
++ return "3D";
++ }
++ case PVRSRV_DEVICE_CLASS_DISPLAY:
++ {
++ return "display";
++ }
++ case PVRSRV_DEVICE_CLASS_BUFFER:
++ {
++ return "buffer";
++ }
++ default:
++ {
++ static char text[10];
++
++ sprintf(text, "?%x", deviceClass);
++ return text;
++ }
++ }
++}
++
++static off_t procDumpSysNodes(char *buf, size_t size, off_t off)
++{
++ struct SYS_DATA *psSysData;
++ struct PVRSRV_DEVICE_NODE *psDevNode;
++ off_t len;
++
++ if (size < 80)
++ return 0;
++
++ if (off == 0)
++ return printAppend(buf, size, 0,
++ "Registered nodes\n"
++ "Addr Type Class Index Ref pvDev Size Res\n");
++
++ if (SysAcquireData(&psSysData) != PVRSRV_OK)
++ return PVRSRV_ERROR_GENERIC;
++
++ for (psDevNode = psSysData->psDeviceNodeList;
++ --off && psDevNode; psDevNode = psDevNode->psNext)
++ ;
++
++ if (!psDevNode)
++ return END_OF_FILE;
++
++ len = printAppend(buf, size, 0,
++ "%p %-8s %-8s %4d %2u %p %3u %p\n",
++ psDevNode,
++ deviceTypeToString(psDevNode->sDevId.eDeviceType),
++ deviceClassToString(psDevNode->sDevId.eDeviceClass),
++ psDevNode->sDevId.eDeviceClass,
++ psDevNode->ui32RefCount,
++ psDevNode->pvDevice,
++ psDevNode->ui32pvDeviceSize,
++ psDevNode->hResManContext);
++ return len;
++}
+diff --git a/drivers/gpu/pvr/proc.h b/drivers/gpu/pvr/proc.h
+new file mode 100644
+index 0000000..942b2ea
+--- /dev/null
++++ b/drivers/gpu/pvr/proc.h
+@@ -0,0 +1,54 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __SERVICES_PROC_H__
++#define __SERVICES_PROC_H__
++
++#include <asm/system.h>
++#include <linux/proc_fs.h>
++
++#define END_OF_FILE ((off_t) -1)
++
++off_t printAppend(char *buffer, size_t size, off_t off,
++ const char *format, ...)
++ __attribute__ ((format(printf, 4, 5)));
++
++int CreateProcEntries(void);
++int CreateProcReadEntry(const char *name,
++ off_t (handler)(char *, size_t, off_t));
++int CreateProcEntry(const char *name, read_proc_t rhandler,
++ write_proc_t whandler, void *data);
++
++int CreatePerProcessProcEntry(const char *name, read_proc_t rhandler,
++ write_proc_t whandler, void *data);
++
++void RemoveProcEntry(const char *name);
++
++void RemovePerProcessProcEntry(const char *name);
++
++void RemoveProcEntries(void);
++
++#endif
+diff --git a/drivers/gpu/pvr/pvr_bridge.h b/drivers/gpu/pvr/pvr_bridge.h
+new file mode 100644
+index 0000000..d41e73d
+--- /dev/null
++++ b/drivers/gpu/pvr/pvr_bridge.h
+@@ -0,0 +1,1107 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __PVR_BRIDGE_H__
++#define __PVR_BRIDGE_H__
++
++
++#include "servicesint.h"
++
++
++#include <linux/ioctl.h>
++
++#define PVRSRV_IOC_GID 'g'
++#define PVRSRV_IO(INDEX) \
++ _IO(PVRSRV_IOC_GID, INDEX, struct PVRSRV_BRIDGE_PACKAGE)
++#define PVRSRV_IOW(INDEX) \
++ _IOW(PVRSRV_IOC_GID, INDEX, struct PVRSRV_BRIDGE_PACKAGE)
++#define PVRSRV_IOR(INDEX) \
++ _IOR(PVRSRV_IOC_GID, INDEX, struct PVRSRV_BRIDGE_PACKAGE)
++#define PVRSRV_IOWR(INDEX) \
++ _IOWR(PVRSRV_IOC_GID, INDEX, struct PVRSRV_BRIDGE_PACKAGE)
++
++
++#define PVRSRV_BRIDGE_CORE_CMD_FIRST 0
++#define PVRSRV_BRIDGE_ENUM_DEVICES \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_ACQUIRE_DEVICEINFO \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_RELEASE_DEVICEINFO \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+2)
++#define PVRSRV_BRIDGE_CREATE_DEVMEMCONTEXT \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+3)
++#define PVRSRV_BRIDGE_DESTROY_DEVMEMCONTEXT \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+4)
++#define PVRSRV_BRIDGE_GET_DEVMEM_HEAPINFO \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+5)
++#define PVRSRV_BRIDGE_ALLOC_DEVICEMEM \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+6)
++#define PVRSRV_BRIDGE_FREE_DEVICEMEM \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+7)
++#define PVRSRV_BRIDGE_GETFREE_DEVICEMEM \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+8)
++#define PVRSRV_BRIDGE_CREATE_COMMANDQUEUE \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+9)
++#define PVRSRV_BRIDGE_DESTROY_COMMANDQUEUE \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+10)
++#define PVRSRV_BRIDGE_MHANDLE_TO_MMAP_DATA \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+11)
++#define PVRSRV_BRIDGE_CONNECT_SERVICES \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+12)
++#define PVRSRV_BRIDGE_DISCONNECT_SERVICES \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+13)
++#define PVRSRV_BRIDGE_WRAP_DEVICE_MEM \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+14)
++#define PVRSRV_BRIDGE_GET_DEVICEMEMINFO \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+15)
++#define PVRSRV_BRIDGE_RESERVE_DEV_VIRTMEM \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+16)
++#define PVRSRV_BRIDGE_FREE_DEV_VIRTMEM \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+17)
++#define PVRSRV_BRIDGE_MAP_EXT_MEMORY \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+18)
++#define PVRSRV_BRIDGE_UNMAP_EXT_MEMORY \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+19)
++#define PVRSRV_BRIDGE_MAP_DEV_MEMORY \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+20)
++#define PVRSRV_BRIDGE_UNMAP_DEV_MEMORY \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+21)
++#define PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+22)
++#define PVRSRV_BRIDGE_UNMAP_DEVICECLASS_MEMORY \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+23)
++#define PVRSRV_BRIDGE_MAP_MEM_INFO_TO_USER \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+24)
++#define PVRSRV_BRIDGE_UNMAP_MEM_INFO_FROM_USER \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+25)
++#define PVRSRV_BRIDGE_EXPORT_DEVICEMEM \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+26)
++#define PVRSRV_BRIDGE_RELEASE_MMAP_DATA \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+27)
++#define PVRSRV_BRIDGE_CACHE_FLUSH_DRM \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+28)
++#define PVRSRV_BRIDGE_CORE_CMD_LAST \
++ (PVRSRV_BRIDGE_CORE_CMD_FIRST+28)
++
++#define PVRSRV_BRIDGE_SIM_CMD_FIRST \
++ (PVRSRV_BRIDGE_CORE_CMD_LAST+1)
++#define PVRSRV_BRIDGE_PROCESS_SIMISR_EVENT \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_SIM_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_REGISTER_SIM_PROCESS \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_SIM_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_UNREGISTER_SIM_PROCESS \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_SIM_CMD_FIRST+2)
++#define PVRSRV_BRIDGE_SIM_CMD_LAST \
++ (PVRSRV_BRIDGE_SIM_CMD_FIRST+2)
++
++#define PVRSRV_BRIDGE_MAPPING_CMD_FIRST \
++ (PVRSRV_BRIDGE_SIM_CMD_LAST+1)
++#define PVRSRV_BRIDGE_MAPPHYSTOUSERSPACE \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_MAPPING_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_UNMAPPHYSTOUSERSPACE \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_MAPPING_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_GETPHYSTOUSERSPACEMAP \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_MAPPING_CMD_FIRST+2)
++#define PVRSRV_BRIDGE_MAPPING_CMD_LAST \
++ (PVRSRV_BRIDGE_MAPPING_CMD_FIRST+2)
++
++#define PVRSRV_BRIDGE_STATS_CMD_FIRST \
++ (PVRSRV_BRIDGE_MAPPING_CMD_LAST+1)
++#define PVRSRV_BRIDGE_GET_FB_STATS \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_STATS_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_STATS_CMD_LAST \
++ (PVRSRV_BRIDGE_STATS_CMD_FIRST+0)
++
++#define PVRSRV_BRIDGE_MISC_CMD_FIRST \
++ (PVRSRV_BRIDGE_STATS_CMD_LAST+1)
++#define PVRSRV_BRIDGE_GET_MISC_INFO \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_MISC_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_RELEASE_MISC_INFO \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_MISC_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_MISC_CMD_LAST \
++ (PVRSRV_BRIDGE_MISC_CMD_FIRST+1)
++
++#define PVRSRV_BRIDGE_OVERLAY_CMD_FIRST \
++ (PVRSRV_BRIDGE_MISC_CMD_LAST+1)
++#define PVRSRV_BRIDGE_OVERLAY_CMD_LAST \
++ (PVRSRV_BRIDGE_OVERLAY_CMD_FIRST+1)
++
++#if defined(PDUMP)
++#define PVRSRV_BRIDGE_PDUMP_CMD_FIRST \
++ (PVRSRV_BRIDGE_OVERLAY_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_PDUMP_INIT \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_PDUMP_MEMPOL \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_PDUMP_DUMPMEM \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+2)
++#define PVRSRV_BRIDGE_PDUMP_REG \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+3)
++#define PVRSRV_BRIDGE_PDUMP_REGPOL \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+4)
++#define PVRSRV_BRIDGE_PDUMP_COMMENT \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+5)
++#define PVRSRV_BRIDGE_PDUMP_SETFRAME \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+6)
++#define PVRSRV_BRIDGE_PDUMP_ISCAPTURING \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+7)
++#define PVRSRV_BRIDGE_PDUMP_DUMPBITMAP \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+8)
++#define PVRSRV_BRIDGE_PDUMP_DUMPREADREG \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+9)
++#define PVRSRV_BRIDGE_PDUMP_SYNCPOL \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+10)
++#define PVRSRV_BRIDGE_PDUMP_DUMPSYNC \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+11)
++#define PVRSRV_BRIDGE_PDUMP_MEMPAGES \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+12)
++#define PVRSRV_BRIDGE_PDUMP_DRIVERINFO \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+13)
++#define PVRSRV_BRIDGE_PDUMP_PDREG \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+14)
++#define PVRSRV_BRIDGE_PDUMP_DUMPPDDEVPADDR \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+15)
++#define PVRSRV_BRIDGE_PDUMP_CYCLE_COUNT_REG_READ \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+16)
++#define PVRSRV_BRIDGE_PDUMP_STARTINITPHASE \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+17)
++#define PVRSRV_BRIDGE_PDUMP_STOPINITPHASE \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+18)
++#define PVRSRV_BRIDGE_PDUMP_CMD_LAST \
++ (PVRSRV_BRIDGE_PDUMP_CMD_FIRST+18)
++#else
++#define PVRSRV_BRIDGE_PDUMP_CMD_LAST PVRSRV_BRIDGE_OVERLAY_CMD_LAST
++#endif
++
++#define PVRSRV_BRIDGE_OEM_CMD_FIRST \
++ (PVRSRV_BRIDGE_PDUMP_CMD_LAST+1)
++#define PVRSRV_BRIDGE_GET_OEMJTABLE \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_OEM_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_OEM_CMD_LAST \
++ (PVRSRV_BRIDGE_OEM_CMD_FIRST+0)
++
++#define PVRSRV_BRIDGE_DEVCLASS_CMD_FIRST \
++ (PVRSRV_BRIDGE_OEM_CMD_LAST+1)
++#define PVRSRV_BRIDGE_ENUM_CLASS \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_DEVCLASS_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_DEVCLASS_CMD_LAST \
++ (PVRSRV_BRIDGE_DEVCLASS_CMD_FIRST+0)
++
++#define PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST \
++ (PVRSRV_BRIDGE_DEVCLASS_CMD_LAST+1)
++#define PVRSRV_BRIDGE_OPEN_DISPCLASS_DEVICE \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_CLOSE_DISPCLASS_DEVICE \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_ENUM_DISPCLASS_FORMATS \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+2)
++#define PVRSRV_BRIDGE_ENUM_DISPCLASS_DIMS \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+3)
++#define PVRSRV_BRIDGE_GET_DISPCLASS_SYSBUFFER \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+4)
++#define PVRSRV_BRIDGE_GET_DISPCLASS_INFO \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+5)
++#define PVRSRV_BRIDGE_CREATE_DISPCLASS_SWAPCHAIN \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+6)
++#define PVRSRV_BRIDGE_DESTROY_DISPCLASS_SWAPCHAIN \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+7)
++#define PVRSRV_BRIDGE_SET_DISPCLASS_DSTRECT \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+8)
++#define PVRSRV_BRIDGE_SET_DISPCLASS_SRCRECT \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+9)
++#define PVRSRV_BRIDGE_SET_DISPCLASS_DSTCOLOURKEY \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+10)
++#define PVRSRV_BRIDGE_SET_DISPCLASS_SRCCOLOURKEY \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+11)
++#define PVRSRV_BRIDGE_GET_DISPCLASS_BUFFERS \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+12)
++#define PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_BUFFER \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+13)
++#define PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_SYSTEM \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+14)
++#define PVRSRV_BRIDGE_DISPCLASS_CMD_LAST \
++ (PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+14)
++
++#define PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST \
++ (PVRSRV_BRIDGE_DISPCLASS_CMD_LAST+1)
++#define PVRSRV_BRIDGE_OPEN_BUFFERCLASS_DEVICE \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_CLOSE_BUFFERCLASS_DEVICE \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_GET_BUFFERCLASS_INFO \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST+2)
++#define PVRSRV_BRIDGE_GET_BUFFERCLASS_BUFFER \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST+3)
++#define PVRSRV_BRIDGE_BUFCLASS_CMD_LAST \
++ (PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST+3)
++
++#define PVRSRV_BRIDGE_WRAP_CMD_FIRST \
++ (PVRSRV_BRIDGE_BUFCLASS_CMD_LAST+1)
++#define PVRSRV_BRIDGE_WRAP_EXT_MEMORY \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_WRAP_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_UNWRAP_EXT_MEMORY \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_WRAP_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_WRAP_CMD_LAST \
++ (PVRSRV_BRIDGE_WRAP_CMD_FIRST+1)
++
++#define PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST \
++ (PVRSRV_BRIDGE_WRAP_CMD_LAST+1)
++#define PVRSRV_BRIDGE_ALLOC_SHARED_SYS_MEM \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_FREE_SHARED_SYS_MEM \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_MAP_MEMINFO_MEM \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+2)
++#define PVRSRV_BRIDGE_UNMAP_MEMINFO_MEM \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+3)
++#define PVRSRV_BRIDGE_SHAREDMEM_CMD_LAST \
++ (PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+3)
++
++#define PVRSRV_BRIDGE_SERVICES4_TMP_CMD_FIRST \
++ (PVRSRV_BRIDGE_SHAREDMEM_CMD_LAST+1)
++#define PVRSRV_BRIDGE_GETMMU_PD_DEVPADDR \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_SERVICES4_TMP_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_SERVICES4_TMP_CMD_LAST \
++ (PVRSRV_BRIDGE_SERVICES4_TMP_CMD_FIRST+0)
++
++#define PVRSRV_BRIDGE_INITSRV_CMD_FIRST \
++ (PVRSRV_BRIDGE_SERVICES4_TMP_CMD_LAST+1)
++#define PVRSRV_BRIDGE_INITSRV_CONNECT \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_INITSRV_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_INITSRV_DISCONNECT \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_INITSRV_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_INITSRV_CMD_LAST \
++ (PVRSRV_BRIDGE_INITSRV_CMD_FIRST+1)
++
++#define PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST \
++ (PVRSRV_BRIDGE_INITSRV_CMD_LAST+1)
++#define PVRSRV_BRIDGE_EVENT_OBJECT_WAIT \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_EVENT_OBJECT_OPEN \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_EVENT_OBJECT_CLOSE \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST+2)
++#define PVRSRV_BRIDGE_EVENT_OBJECT_CMD_LAST \
++ (PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST+2)
++
++#define PVRSRV_BRIDGE_SYNC_OPS_CMD_FIRST \
++ (PVRSRV_BRIDGE_EVENT_OBJECT_CMD_LAST+1)
++#define PVRSRV_BRIDGE_MODIFY_SYNC_OPS \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_SYNC_OPS_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_SYNC_OPS_CMD_LAST \
++ (PVRSRV_BRIDGE_SYNC_OPS_CMD_FIRST+0)
++
++#define PVRSRV_BRIDGE_LAST_NON_DEVICE_CMD \
++ (PVRSRV_BRIDGE_SYNC_OPS_CMD_LAST+1)
++
++#define PVRSRV_KERNEL_MODE_CLIENT 1
++
++struct PVRSRV_BRIDGE_RETURN {
++ enum PVRSRV_ERROR eError;
++ void *pvData;
++};
++
++struct PVRSRV_BRIDGE_PACKAGE {
++ u32 ui32BridgeID;
++ u32 ui32Size;
++ void __user *pvParamIn;
++ u32 ui32InBufferSize;
++ void __user *pvParamOut;
++ u32 ui32OutBufferSize;
++
++ void *hKernelServices;
++};
++
++struct PVRSRV_BRIDGE_IN_ACQUIRE_DEVICEINFO {
++ u32 ui32BridgeFlags;
++ u32 uiDevIndex;
++ enum PVRSRV_DEVICE_TYPE eDeviceType;
++};
++
++struct PVRSRV_BRIDGE_IN_ENUMCLASS {
++ u32 ui32BridgeFlags;
++ enum PVRSRV_DEVICE_CLASS sDeviceClass;
++};
++
++struct PVRSRV_BRIDGE_IN_CLOSE_DISPCLASS_DEVICE {
++ u32 ui32BridgeFlags;
++ void *hDeviceKM;
++};
++
++struct PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_FORMATS {
++ u32 ui32BridgeFlags;
++ void *hDeviceKM;
++};
++
++struct PVRSRV_BRIDGE_IN_GET_DISPCLASS_SYSBUFFER {
++ u32 ui32BridgeFlags;
++ void *hDeviceKM;
++};
++
++struct PVRSRV_BRIDGE_IN_GET_DISPCLASS_INFO {
++ u32 ui32BridgeFlags;
++ void *hDeviceKM;
++};
++
++struct PVRSRV_BRIDGE_IN_CLOSE_BUFFERCLASS_DEVICE {
++ u32 ui32BridgeFlags;
++ void *hDeviceKM;
++};
++
++struct PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_INFO {
++ u32 ui32BridgeFlags;
++ void *hDeviceKM;
++};
++
++struct PVRSRV_BRIDGE_IN_RELEASE_DEVICEINFO {
++ u32 ui32BridgeFlags;
++ void *hDevCookie;
++};
++
++struct PVRSRV_BRIDGE_IN_FREE_CLASSDEVICEINFO {
++ u32 ui32BridgeFlags;
++ enum PVRSRV_DEVICE_CLASS DeviceClass;
++ void *pvDevInfo;
++};
++
++struct PVRSRV_BRIDGE_IN_GET_DEVMEM_HEAPINFO {
++ u32 ui32BridgeFlags;
++ void *hDevCookie;
++ void *hDevMemContext;
++};
++
++struct PVRSRV_BRIDGE_IN_CREATE_DEVMEMCONTEXT {
++ u32 ui32BridgeFlags;
++ void *hDevCookie;
++};
++
++struct PVRSRV_BRIDGE_IN_DESTROY_DEVMEMCONTEXT {
++ u32 ui32BridgeFlags;
++ void *hDevCookie;
++ void *hDevMemContext;
++};
++
++struct PVRSRV_BRIDGE_IN_ALLOCDEVICEMEM {
++ u32 ui32BridgeFlags;
++ void *hDevCookie;
++ void *hDevMemHeap;
++ u32 ui32Attribs;
++ u32 ui32Size;
++ u32 ui32Alignment;
++};
++
++struct PVRSRV_BRIDGE_IN_MAPMEMINFOTOUSER {
++ u32 ui32BridgeFlags;
++ struct PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++};
++
++struct PVRSRV_BRIDGE_IN_UNMAPMEMINFOFROMUSER {
++ u32 ui32BridgeFlags;
++ struct PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ void *pvLinAddr;
++ void *hMappingInfo;
++};
++
++#define DRM_PVR2D_CFLUSH_FROM_GPU 1
++#define DRM_PVR2D_CFLUSH_TO_GPU 2
++
++struct PVRSRV_BRIDGE_IN_CACHEFLUSHDRMFROMUSER {
++ u32 ui32BridgeFlags;
++ void *hDevCookie;
++ u32 ui32Type;
++ u32 ui32Virt;
++ u32 ui32Length;
++};
++
++struct PVRSRV_BRIDGE_IN_FREEDEVICEMEM {
++ u32 ui32BridgeFlags;
++ void *hDevCookie;
++ struct PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ struct PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++
++};
++
++struct PVRSRV_BRIDGE_IN_EXPORTDEVICEMEM {
++ u32 ui32BridgeFlags;
++ void *hDevCookie;
++ struct PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++
++};
++
++struct PVRSRV_BRIDGE_IN_GETFREEDEVICEMEM {
++ u32 ui32BridgeFlags;
++ u32 ui32Flags;
++};
++
++struct PVRSRV_BRIDGE_IN_CREATECOMMANDQUEUE {
++ u32 ui32BridgeFlags;
++ void *hDevCookie;
++ u32 ui32QueueSize;
++};
++
++struct PVRSRV_BRIDGE_IN_DESTROYCOMMANDQUEUE {
++ u32 ui32BridgeFlags;
++ void *hDevCookie;
++ struct PVRSRV_QUEUE_INFO *psQueueInfo;
++
++};
++
++struct PVRSRV_BRIDGE_IN_MHANDLE_TO_MMAP_DATA {
++ u32 ui32BridgeFlags;
++ void *hMHandle;
++};
++
++struct PVRSRV_BRIDGE_IN_RELEASE_MMAP_DATA {
++ u32 ui32BridgeFlags;
++ void *hMHandle;
++};
++
++struct PVRSRV_BRIDGE_IN_RESERVE_DEV_VIRTMEM {
++ u32 ui32BridgeFlags;
++ void *hDevMemHeap;
++ struct IMG_DEV_VIRTADDR *psDevVAddr;
++ u32 ui32Size;
++ u32 ui32Alignment;
++};
++
++struct PVRSRV_BRIDGE_OUT_CONNECT_SERVICES {
++ enum PVRSRV_ERROR eError;
++ void *hKernelServices;
++};
++
++struct PVRSRV_BRIDGE_OUT_RESERVE_DEV_VIRTMEM {
++ enum PVRSRV_ERROR eError;
++ struct PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ struct PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++ struct PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++ struct PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++};
++
++struct PVRSRV_BRIDGE_IN_FREE_DEV_VIRTMEM {
++ u32 ui32BridgeFlags;
++ struct PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ struct PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++ struct PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++};
++
++struct PVRSRV_BRIDGE_IN_MAP_DEV_MEMORY {
++ u32 ui32BridgeFlags;
++ void *hKernelMemInfo;
++ void *hDstDevMemHeap;
++};
++
++struct PVRSRV_BRIDGE_OUT_MAP_DEV_MEMORY {
++ enum PVRSRV_ERROR eError;
++ struct PVRSRV_KERNEL_MEM_INFO *psDstKernelMemInfo;
++ struct PVRSRV_KERNEL_SYNC_INFO *psDstKernelSyncInfo;
++ struct PVRSRV_CLIENT_MEM_INFO sDstClientMemInfo;
++ struct PVRSRV_CLIENT_SYNC_INFO sDstClientSyncInfo;
++};
++
++struct PVRSRV_BRIDGE_IN_UNMAP_DEV_MEMORY {
++ u32 ui32BridgeFlags;
++ struct PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ struct PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++ struct PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++};
++
++struct PVRSRV_BRIDGE_IN_MAP_EXT_MEMORY {
++ u32 ui32BridgeFlags;
++ struct PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ struct IMG_SYS_PHYADDR *psSysPAddr;
++ u32 ui32Flags;
++};
++
++struct PVRSRV_BRIDGE_IN_UNMAP_EXT_MEMORY {
++ u32 ui32BridgeFlags;
++ struct PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++ struct PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++ u32 ui32Flags;
++};
++
++struct PVRSRV_BRIDGE_IN_MAP_DEVICECLASS_MEMORY {
++ u32 ui32BridgeFlags;
++ void *hDeviceClassBuffer;
++ void *hDevMemContext;
++
++};
++
++struct PVRSRV_BRIDGE_OUT_MAP_DEVICECLASS_MEMORY {
++ enum PVRSRV_ERROR eError;
++ struct PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++ struct PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++ struct PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ struct PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++ void *hMappingInfo;
++};
++
++struct PVRSRV_BRIDGE_IN_UNMAP_DEVICECLASS_MEMORY {
++ u32 ui32BridgeFlags;
++ struct PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ struct PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++ struct PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++};
++
++struct PVRSRV_BRIDGE_IN_PDUMP_MEMPOL {
++ u32 ui32BridgeFlags;
++ struct PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ u32 ui32Offset;
++ u32 ui32Value;
++ u32 ui32Mask;
++ IMG_BOOL bLastFrame;
++ IMG_BOOL bOverwrite;
++};
++
++struct PVRSRV_BRIDGE_IN_PDUMP_SYNCPOL {
++ u32 ui32BridgeFlags;
++ struct PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++ IMG_BOOL bIsRead;
++ u32 ui32Value;
++ u32 ui32Mask;
++};
++
++struct PVRSRV_BRIDGE_IN_PDUMP_DUMPMEM {
++ u32 ui32BridgeFlags;
++ void *pvLinAddr;
++ void *pvAltLinAddr;
++ struct PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ u32 ui32Offset;
++ u32 ui32Bytes;
++ u32 ui32Flags;
++};
++
++struct PVRSRV_BRIDGE_IN_PDUMP_DUMPSYNC {
++ u32 ui32BridgeFlags;
++ void *pvAltLinAddr;
++ struct PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++ u32 ui32Offset;
++ u32 ui32Bytes;
++};
++
++struct PVRSRV_BRIDGE_IN_PDUMP_DUMPREG {
++ u32 ui32BridgeFlags;
++ struct PVRSRV_HWREG sHWReg;
++ u32 ui32Flags;
++};
++
++struct PVRSRV_BRIDGE_IN_PDUMP_REGPOL {
++ u32 ui32BridgeFlags;
++ struct PVRSRV_HWREG sHWReg;
++ u32 ui32Mask;
++ u32 ui32Flags;
++};
++
++struct PVRSRV_BRIDGE_IN_PDUMP_DUMPPDREG {
++ u32 ui32BridgeFlags;
++ struct PVRSRV_HWREG sHWReg;
++ u32 ui32Flags;
++};
++
++struct PVRSRV_BRIDGE_IN_PDUMP_MEMPAGES {
++ u32 ui32BridgeFlags;
++ void *hKernelMemInfo;
++ struct IMG_DEV_PHYADDR *pPages;
++ u32 ui32NumPages;
++ struct IMG_DEV_VIRTADDR sDevAddr;
++ u32 ui32Start;
++ u32 ui32Length;
++ IMG_BOOL bContinuous;
++};
++
++struct PVRSRV_BRIDGE_IN_PDUMP_COMMENT {
++ u32 ui32BridgeFlags;
++ char szComment[PVRSRV_PDUMP_MAX_COMMENT_SIZE];
++ u32 ui32Flags;
++};
++
++struct PVRSRV_BRIDGE_IN_PDUMP_SETFRAME {
++ u32 ui32BridgeFlags;
++ u32 ui32Frame;
++};
++
++struct PVRSRV_BRIDGE_IN_PDUMP_BITMAP {
++ u32 ui32BridgeFlags;
++ char szFileName[PVRSRV_PDUMP_MAX_FILENAME_SIZE];
++ u32 ui32FileOffset;
++ u32 ui32Width;
++ u32 ui32Height;
++ u32 ui32StrideInBytes;
++ struct IMG_DEV_VIRTADDR sDevBaseAddr;
++ u32 ui32Size;
++ enum PDUMP_PIXEL_FORMAT ePixelFormat;
++ enum PDUMP_MEM_FORMAT eMemFormat;
++ u32 ui32Flags;
++};
++
++struct PVRSRV_BRIDGE_IN_PDUMP_READREG {
++ u32 ui32BridgeFlags;
++ char szFileName[PVRSRV_PDUMP_MAX_FILENAME_SIZE];
++ u32 ui32FileOffset;
++ u32 ui32Address;
++ u32 ui32Size;
++ u32 ui32Flags;
++};
++
++struct PVRSRV_BRIDGE_IN_PDUMP_DRIVERINFO {
++ u32 ui32BridgeFlags;
++ char szString[PVRSRV_PDUMP_MAX_COMMENT_SIZE];
++ IMG_BOOL bContinuous;
++};
++
++struct PVRSRV_BRIDGE_IN_PDUMP_DUMPPDDEVPADDR {
++ u32 ui32BridgeFlags;
++ void *hKernelMemInfo;
++ u32 ui32Offset;
++ struct IMG_DEV_PHYADDR sPDDevPAddr;
++};
++
++struct PVRSRV_BRIDGE_IN_PDUMP_CYCLE_COUNT_REG_READ {
++ u32 ui32BridgeFlags;
++ u32 ui32RegOffset;
++ IMG_BOOL bLastFrame;
++};
++
++struct PVRSRV_BRIDGE_OUT_ENUMDEVICE {
++ enum PVRSRV_ERROR eError;
++ u32 ui32NumDevices;
++ struct PVRSRV_DEVICE_IDENTIFIER asDeviceIdentifier[PVRSRV_MAX_DEVICES];
++};
++
++struct PVRSRV_BRIDGE_OUT_ACQUIRE_DEVICEINFO {
++
++ enum PVRSRV_ERROR eError;
++ void *hDevCookie;
++};
++
++struct PVRSRV_BRIDGE_OUT_ENUMCLASS {
++ enum PVRSRV_ERROR eError;
++ u32 ui32NumDevices;
++ u32 ui32DevID[PVRSRV_MAX_DEVICES];
++};
++
++struct PVRSRV_BRIDGE_IN_OPEN_DISPCLASS_DEVICE {
++ u32 ui32BridgeFlags;
++ u32 ui32DeviceID;
++ void *hDevCookie;
++};
++
++struct PVRSRV_BRIDGE_OUT_OPEN_DISPCLASS_DEVICE {
++ enum PVRSRV_ERROR eError;
++ void *hDeviceKM;
++};
++
++struct PVRSRV_BRIDGE_IN_WRAP_EXT_MEMORY {
++ u32 ui32BridgeFlags;
++ void *hDevCookie;
++ void *hDevMemContext;
++ void *pvLinAddr;
++ u32 ui32ByteSize;
++ u32 ui32PageOffset;
++ IMG_BOOL bPhysContig;
++ u32 ui32NumPageTableEntries;
++ struct IMG_SYS_PHYADDR __user *psSysPAddr;
++};
++
++struct PVRSRV_BRIDGE_OUT_WRAP_EXT_MEMORY {
++ enum PVRSRV_ERROR eError;
++ struct PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++ struct PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++};
++
++struct PVRSRV_BRIDGE_IN_UNWRAP_EXT_MEMORY {
++ u32 ui32BridgeFlags;
++ void *hKernelMemInfo;
++ struct PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++ struct PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++
++};
++
++#define PVRSRV_MAX_DC_DISPLAY_FORMATS 10
++#define PVRSRV_MAX_DC_DISPLAY_DIMENSIONS 10
++#define PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS 4
++#define PVRSRV_MAX_DC_CLIP_RECTS 32
++
++struct PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_FORMATS {
++ enum PVRSRV_ERROR eError;
++ u32 ui32Count;
++ struct DISPLAY_FORMAT asFormat[PVRSRV_MAX_DC_DISPLAY_FORMATS];
++};
++
++struct PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_DIMS {
++ u32 ui32BridgeFlags;
++ void *hDeviceKM;
++ struct DISPLAY_FORMAT sFormat;
++};
++
++struct PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_DIMS {
++ enum PVRSRV_ERROR eError;
++ u32 ui32Count;
++ struct DISPLAY_DIMS asDim[PVRSRV_MAX_DC_DISPLAY_DIMENSIONS];
++};
++
++struct PVRSRV_BRIDGE_OUT_GET_DISPCLASS_INFO {
++ enum PVRSRV_ERROR eError;
++ struct DISPLAY_INFO sDisplayInfo;
++};
++
++struct PVRSRV_BRIDGE_OUT_GET_DISPCLASS_SYSBUFFER {
++ enum PVRSRV_ERROR eError;
++ void *hBuffer;
++};
++
++struct PVRSRV_BRIDGE_IN_CREATE_DISPCLASS_SWAPCHAIN {
++ u32 ui32BridgeFlags;
++ void *hDeviceKM;
++ u32 ui32Flags;
++ struct DISPLAY_SURF_ATTRIBUTES sDstSurfAttrib;
++ struct DISPLAY_SURF_ATTRIBUTES sSrcSurfAttrib;
++ u32 ui32BufferCount;
++ u32 ui32OEMFlags;
++ u32 ui32SwapChainID;
++};
++
++struct PVRSRV_BRIDGE_OUT_CREATE_DISPCLASS_SWAPCHAIN {
++ enum PVRSRV_ERROR eError;
++ void *hSwapChain;
++ u32 ui32SwapChainID;
++};
++
++struct PVRSRV_BRIDGE_IN_DESTROY_DISPCLASS_SWAPCHAIN {
++ u32 ui32BridgeFlags;
++ void *hDeviceKM;
++ void *hSwapChain;
++};
++
++struct PVRSRV_BRIDGE_IN_SET_DISPCLASS_RECT {
++ u32 ui32BridgeFlags;
++ void *hDeviceKM;
++ void *hSwapChain;
++ struct IMG_RECT sRect;
++};
++
++struct PVRSRV_BRIDGE_IN_SET_DISPCLASS_COLOURKEY {
++ u32 ui32BridgeFlags;
++ void *hDeviceKM;
++ void *hSwapChain;
++ u32 ui32CKColour;
++};
++
++struct PVRSRV_BRIDGE_IN_GET_DISPCLASS_BUFFERS {
++ u32 ui32BridgeFlags;
++ void *hDeviceKM;
++ void *hSwapChain;
++};
++
++struct PVRSRV_BRIDGE_OUT_GET_DISPCLASS_BUFFERS {
++ enum PVRSRV_ERROR eError;
++ u32 ui32BufferCount;
++ void *ahBuffer[PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS];
++};
++
++struct PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_BUFFER {
++ u32 ui32BridgeFlags;
++ void *hDeviceKM;
++ void *hBuffer;
++ u32 ui32SwapInterval;
++ void *hPrivateTag;
++ u32 ui32ClipRectCount;
++ struct IMG_RECT sClipRect[PVRSRV_MAX_DC_CLIP_RECTS];
++};
++
++struct PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_SYSTEM {
++ u32 ui32BridgeFlags;
++ void *hDeviceKM;
++ void *hSwapChain;
++};
++
++struct PVRSRV_BRIDGE_IN_OPEN_BUFFERCLASS_DEVICE {
++ u32 ui32BridgeFlags;
++ u32 ui32DeviceID;
++ void *hDevCookie;
++};
++
++struct PVRSRV_BRIDGE_OUT_OPEN_BUFFERCLASS_DEVICE {
++ enum PVRSRV_ERROR eError;
++ void *hDeviceKM;
++};
++
++struct PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_INFO {
++ enum PVRSRV_ERROR eError;
++ struct BUFFER_INFO sBufferInfo;
++};
++
++struct PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_BUFFER {
++ u32 ui32BridgeFlags;
++ void *hDeviceKM;
++ u32 ui32BufferIndex;
++};
++
++struct PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_BUFFER {
++ enum PVRSRV_ERROR eError;
++ void *hBuffer;
++};
++
++struct PVRSRV_BRIDGE_OUT_GET_DEVMEM_HEAPINFO {
++ enum PVRSRV_ERROR eError;
++ u32 ui32ClientHeapCount;
++ struct PVRSRV_HEAP_INFO sHeapInfo[PVRSRV_MAX_CLIENT_HEAPS];
++};
++
++struct PVRSRV_BRIDGE_OUT_CREATE_DEVMEMCONTEXT {
++ enum PVRSRV_ERROR eError;
++ void *hDevMemContext;
++ u32 ui32ClientHeapCount;
++ struct PVRSRV_HEAP_INFO sHeapInfo[PVRSRV_MAX_CLIENT_HEAPS];
++};
++
++struct PVRSRV_BRIDGE_OUT_CREATE_DEVMEMHEAP {
++ enum PVRSRV_ERROR eError;
++ void *hDevMemHeap;
++};
++
++struct PVRSRV_BRIDGE_OUT_ALLOCDEVICEMEM {
++ enum PVRSRV_ERROR eError;
++ struct PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ struct PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++ struct PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++ struct PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++
++};
++
++struct PVRSRV_BRIDGE_OUT_EXPORTDEVICEMEM {
++ enum PVRSRV_ERROR eError;
++ void *hMemInfo;
++
++};
++
++struct PVRSRV_BRIDGE_OUT_MAPMEMINFOTOUSER {
++ enum PVRSRV_ERROR eError;
++ void *pvLinAddr;
++ void *hMappingInfo;
++};
++
++struct PVRSRV_BRIDGE_OUT_GETFREEDEVICEMEM {
++ enum PVRSRV_ERROR eError;
++ u32 ui32Total;
++ u32 ui32Free;
++ u32 ui32LargestBlock;
++};
++
++#include "pvrmmap.h"
++struct PVRSRV_BRIDGE_OUT_MHANDLE_TO_MMAP_DATA {
++ enum PVRSRV_ERROR eError;
++ u32 ui32MMapOffset;
++ u32 ui32ByteOffset;
++ u32 ui32RealByteSize;
++ u32 ui32UserVAddr;
++};
++
++struct PVRSRV_BRIDGE_OUT_RELEASE_MMAP_DATA {
++ enum PVRSRV_ERROR eError;
++ IMG_BOOL bMUnmap;
++ u32 ui32UserVAddr;
++ u32 ui32RealByteSize;
++};
++
++struct PVRSRV_BRIDGE_IN_GET_MISC_INFO {
++ u32 ui32BridgeFlags;
++ struct PVRSRV_MISC_INFO sMiscInfo;
++};
++
++struct PVRSRV_BRIDGE_OUT_GET_MISC_INFO {
++ enum PVRSRV_ERROR eError;
++ struct PVRSRV_MISC_INFO sMiscInfo;
++};
++
++struct PVRSRV_BRIDGE_IN_RELEASE_MISC_INFO {
++ u32 ui32BridgeFlags;
++ struct PVRSRV_MISC_INFO sMiscInfo;
++};
++
++struct PVRSRV_BRIDGE_OUT_RELEASE_MISC_INFO {
++ enum PVRSRV_ERROR eError;
++ struct PVRSRV_MISC_INFO sMiscInfo;
++};
++
++struct PVRSRV_BRIDGE_OUT_PDUMP_ISCAPTURING {
++ enum PVRSRV_ERROR eError;
++ IMG_BOOL bIsCapturing;
++};
++
++struct PVRSRV_BRIDGE_IN_GET_FB_STATS {
++ u32 ui32BridgeFlags;
++ u32 ui32Total;
++ u32 ui32Available;
++};
++
++struct PVRSRV_BRIDGE_IN_MAPPHYSTOUSERSPACE {
++ u32 ui32BridgeFlags;
++ void *hDevCookie;
++ struct IMG_SYS_PHYADDR sSysPhysAddr;
++ u32 uiSizeInBytes;
++};
++
++struct PVRSRV_BRIDGE_OUT_MAPPHYSTOUSERSPACE {
++ void *pvUserAddr;
++ u32 uiActualSize;
++ void *pvProcess;
++};
++
++struct PVRSRV_BRIDGE_IN_UNMAPPHYSTOUSERSPACE {
++ u32 ui32BridgeFlags;
++ void *hDevCookie;
++ void *pvUserAddr;
++ void *pvProcess;
++};
++
++struct PVRSRV_BRIDGE_OUT_GETPHYSTOUSERSPACEMAP {
++ void **ppvTbl;
++ u32 uiTblSize;
++};
++
++struct PVRSRV_BRIDGE_IN_REGISTER_SIM_PROCESS {
++ u32 ui32BridgeFlags;
++ void *hDevCookie;
++ void *pvProcess;
++};
++
++struct PVRSRV_BRIDGE_OUT_REGISTER_SIM_PROCESS {
++ struct IMG_SYS_PHYADDR sRegsPhysBase;
++ void *pvRegsBase;
++ void *pvProcess;
++ u32 ulNoOfEntries;
++ void *pvTblLinAddr;
++};
++
++struct PVRSRV_BRIDGE_IN_UNREGISTER_SIM_PROCESS {
++ u32 ui32BridgeFlags;
++ void *hDevCookie;
++ void *pvProcess;
++ void *pvRegsBase;
++};
++
++struct PVRSRV_BRIDGE_IN_PROCESS_SIMISR_EVENT {
++ u32 ui32BridgeFlags;
++ void *hDevCookie;
++ u32 ui32StatusAndMask;
++ enum PVRSRV_ERROR eError;
++};
++
++struct PVRSRV_BRIDGE_IN_INITSRV_DISCONNECT {
++ u32 ui32BridgeFlags;
++ IMG_BOOL bInitSuccesful;
++};
++
++struct PVRSRV_BRIDGE_IN_ALLOC_SHARED_SYS_MEM {
++ u32 ui32BridgeFlags;
++ u32 ui32Flags;
++ u32 ui32Size;
++};
++
++struct PVRSRV_BRIDGE_OUT_ALLOC_SHARED_SYS_MEM {
++ enum PVRSRV_ERROR eError;
++ struct PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ struct PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++};
++
++struct PVRSRV_BRIDGE_IN_FREE_SHARED_SYS_MEM {
++ u32 ui32BridgeFlags;
++ struct PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ struct PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++};
++
++struct PVRSRV_BRIDGE_OUT_FREE_SHARED_SYS_MEM {
++ enum PVRSRV_ERROR eError;
++};
++
++struct PVRSRV_BRIDGE_IN_MAP_MEMINFO_MEM {
++ u32 ui32BridgeFlags;
++ void *hKernelMemInfo;
++};
++
++struct PVRSRV_BRIDGE_OUT_MAP_MEMINFO_MEM {
++ struct PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++ struct PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++ struct PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ struct PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++ enum PVRSRV_ERROR eError;
++};
++
++struct PVRSRV_BRIDGE_IN_UNMAP_MEMINFO_MEM {
++ u32 ui32BridgeFlags;
++ struct PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++};
++
++struct PVRSRV_BRIDGE_OUT_UNMAP_MEMINFO_MEM {
++ enum PVRSRV_ERROR eError;
++};
++
++struct PVRSRV_BRIDGE_IN_GETMMU_PD_DEVPADDR {
++ u32 ui32BridgeFlags;
++ void *hDevMemContext;
++};
++
++struct PVRSRV_BRIDGE_OUT_GETMMU_PD_DEVPADDR {
++ struct IMG_DEV_PHYADDR sPDDevPAddr;
++ enum PVRSRV_ERROR eError;
++};
++
++struct PVRSRV_BRIDGE_IN_EVENT_OBJECT_WAIT {
++ u32 ui32BridgeFlags;
++ void *hOSEventKM;
++};
++
++struct PVRSRV_BRIDGE_IN_EVENT_OBJECT_OPEN {
++ struct PVRSRV_EVENTOBJECT sEventObject;
++};
++
++struct PVRSRV_BRIDGE_OUT_EVENT_OBJECT_OPEN {
++ void *hOSEvent;
++ enum PVRSRV_ERROR eError;
++};
++
++struct PVRSRV_BRIDGE_IN_EVENT_OBJECT_CLOSE {
++ struct PVRSRV_EVENTOBJECT sEventObject;
++ void *hOSEventKM;
++};
++
++struct PVRSRV_BRIDGE_IN_MODIFY_SYNC_OPS {
++ u32 ui32BridgeFlags;
++ void *hKernelSyncInfo;
++ u32 ui32ModifyFlags;
++
++};
++
++struct PVRSRV_BRIDGE_OUT_MODIFY_SYNC_OPS {
++ enum PVRSRV_ERROR eError;
++ u32 ui32ReadOpsPending;
++ u32 ui32ReadOpsComplete;
++ u32 ui32WriteOpsPending;
++ u32 ui32WriteOpsComplete;
++
++};
++
++#endif
+diff --git a/drivers/gpu/pvr/pvr_bridge_k.c b/drivers/gpu/pvr/pvr_bridge_k.c
+new file mode 100644
+index 0000000..9053a3e
+--- /dev/null
++++ b/drivers/gpu/pvr/pvr_bridge_k.c
+@@ -0,0 +1,191 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "img_defs.h"
++#include "services.h"
++#include "pvr_bridge.h"
++#include "pvr_bridge_km.h"
++#include "perproc.h"
++#include "mutex.h"
++#include "syscommon.h"
++#include "pvr_debug.h"
++#include "proc.h"
++#include "private_data.h"
++
++#include "sgx_bridge.h"
++
++#include "bridged_pvr_bridge.h"
++
++
++#if defined(DEBUG_BRIDGE_KM)
++static off_t printLinuxBridgeStats(char *buffer, size_t size, off_t off);
++#endif
++
++enum PVRSRV_ERROR LinuxBridgeInit(void)
++{
++#if defined(DEBUG_BRIDGE_KM)
++ {
++ int iStatus;
++ iStatus =
++ CreateProcReadEntry("bridge_stats", printLinuxBridgeStats);
++ if (iStatus != 0)
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++#endif
++ return CommonBridgeInit();
++}
++
++void LinuxBridgeDeInit(void)
++{
++#if defined(DEBUG_BRIDGE_KM)
++ RemoveProcEntry("bridge_stats");
++#endif
++}
++
++#if defined(DEBUG_BRIDGE_KM)
++static off_t printLinuxBridgeStats(char *buffer, size_t count, off_t off)
++{
++ struct PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *psEntry;
++ off_t Ret;
++
++ mutex_lock(&gPVRSRVLock);
++
++ if (!off) {
++ if (count < 500) {
++ Ret = 0;
++ goto unlock_and_return;
++ }
++ Ret = printAppend(buffer, count, 0,
++ "Total ioctl call count = %u\n"
++ "Total number of bytes copied via copy_from_user = %u\n"
++ "Total number of bytes copied via copy_to_user = %u\n"
++ "Total number of bytes copied via copy_*_user = %u\n\n"
++ "%-45s | %-40s | %10s | %20s | %10s\n",
++ g_BridgeGlobalStats.ui32IOCTLCount,
++ g_BridgeGlobalStats.ui32TotalCopyFromUserBytes,
++ g_BridgeGlobalStats.ui32TotalCopyToUserBytes,
++ g_BridgeGlobalStats.ui32TotalCopyFromUserBytes +
++ g_BridgeGlobalStats.ui32TotalCopyToUserBytes,
++ "Bridge Name", "Wrapper Function",
++ "Call Count", "copy_from_user Bytes",
++ "copy_to_user Bytes");
++
++ goto unlock_and_return;
++ }
++
++ if (off > BRIDGE_DISPATCH_TABLE_ENTRY_COUNT) {
++ Ret = END_OF_FILE;
++ goto unlock_and_return;
++ }
++
++ if (count < 300) {
++ Ret = 0;
++ goto unlock_and_return;
++ }
++
++ psEntry = &g_BridgeDispatchTable[off - 1];
++ Ret = printAppend(buffer, count, 0,
++ "%-45s %-40s %-10u %-20u %-10u\n",
++ psEntry->pszIOCName,
++ psEntry->pszFunctionName,
++ psEntry->ui32CallCount,
++ psEntry->ui32CopyFromUserTotalBytes,
++ psEntry->ui32CopyToUserTotalBytes);
++
++unlock_and_return:
++ mutex_unlock(&gPVRSRVLock);
++ return Ret;
++}
++#endif
++
++long PVRSRV_BridgeDispatchKM(struct file *file, unsigned int cmd,
++ unsigned long arg)
++{
++ u32 ui32BridgeID = PVRSRV_GET_BRIDGE_ID(cmd);
++ struct PVRSRV_BRIDGE_PACKAGE __user *psBridgePackageUM =
++ (struct PVRSRV_BRIDGE_PACKAGE __user *)arg;
++ struct PVRSRV_BRIDGE_PACKAGE sBridgePackageKM;
++ u32 ui32PID = OSGetCurrentProcessIDKM();
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc;
++ int err = -EFAULT;
++
++ mutex_lock(&gPVRSRVLock);
++
++ if (!OSAccessOK(PVR_VERIFY_WRITE, psBridgePackageUM,
++ sizeof(struct PVRSRV_BRIDGE_PACKAGE))) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "%s: Received invalid pointer to function arguments",
++ __func__);
++
++ goto unlock_and_return;
++ }
++
++ if (OSCopyFromUser(NULL, &sBridgePackageKM, psBridgePackageUM,
++ sizeof(struct PVRSRV_BRIDGE_PACKAGE)) != PVRSRV_OK)
++ goto unlock_and_return;
++
++ if (ui32BridgeID !=
++ PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_CONNECT_SERVICES)) {
++ enum PVRSRV_ERROR eError;
++
++ eError = PVRSRVLookupHandle(KERNEL_HANDLE_BASE,
++ (void **)&psPerProc,
++ sBridgePackageKM.hKernelServices,
++ PVRSRV_HANDLE_TYPE_PERPROC_DATA);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "%s: Invalid kernel services handle (%d)",
++ __func__, eError);
++ goto unlock_and_return;
++ }
++
++ if (psPerProc->ui32PID != ui32PID) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "%s: Process %d tried to access data "
++ "belonging to process %d", __func__,
++ ui32PID, psPerProc->ui32PID);
++ goto unlock_and_return;
++ }
++ } else {
++ psPerProc = PVRSRVPerProcessData(ui32PID);
++ if (psPerProc == NULL) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRV_BridgeDispatchKM: "
++ "Couldn't create per-process data area");
++ goto unlock_and_return;
++ }
++ }
++
++ sBridgePackageKM.ui32BridgeID = PVRSRV_GET_BRIDGE_ID(
++ sBridgePackageKM.ui32BridgeID);
++
++ err = BridgedDispatchKM(psPerProc, &sBridgePackageKM);
++ if (err != PVRSRV_OK)
++ goto unlock_and_return;
++
++unlock_and_return:
++ mutex_unlock(&gPVRSRVLock);
++ return err;
++}
+diff --git a/drivers/gpu/pvr/pvr_bridge_km.h b/drivers/gpu/pvr/pvr_bridge_km.h
+new file mode 100644
+index 0000000..0c60a42
+--- /dev/null
++++ b/drivers/gpu/pvr/pvr_bridge_km.h
+@@ -0,0 +1,190 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __PVR_BRIDGE_KM_H_
++#define __PVR_BRIDGE_KM_H_
++
++#include <linux/fs.h> /* for struct file */
++
++#include "pvr_bridge.h"
++#include "perproc.h"
++
++enum PVRSRV_ERROR LinuxBridgeInit(void);
++void LinuxBridgeDeInit(void);
++
++enum PVRSRV_ERROR PVRSRVEnumerateDevicesKM(u32 *pui32NumDevices,
++ struct PVRSRV_DEVICE_IDENTIFIER *psDevIdList);
++
++enum PVRSRV_ERROR PVRSRVAcquireDeviceDataKM(u32 uiDevIndex,
++ enum PVRSRV_DEVICE_TYPE eDeviceType,
++ void **phDevCookie);
++
++enum PVRSRV_ERROR PVRSRVCreateCommandQueueKM(u32 ui32QueueSize,
++ struct PVRSRV_QUEUE_INFO **ppsQueueInfo);
++
++enum PVRSRV_ERROR PVRSRVDestroyCommandQueueKM(
++ struct PVRSRV_QUEUE_INFO *psQueueInfo);
++
++enum PVRSRV_ERROR PVRSRVGetDeviceMemHeapsKM(void *hDevCookie,
++ struct PVRSRV_HEAP_INFO *psHeapInfo);
++
++enum PVRSRV_ERROR PVRSRVCreateDeviceMemContextKM(void *hDevCookie,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc,
++ void **phDevMemContext, u32 *pui32ClientHeapCount,
++ struct PVRSRV_HEAP_INFO *psHeapInfo, IMG_BOOL *pbCreated,
++ IMG_BOOL *pbShared);
++
++enum PVRSRV_ERROR PVRSRVDestroyDeviceMemContextKM(void *hDevCookie,
++ void *hDevMemContext, IMG_BOOL *pbCreated);
++
++enum PVRSRV_ERROR PVRSRVGetDeviceMemHeapInfoKM(void *hDevCookie,
++ void *hDevMemContext, u32 *pui32ClientHeapCount,
++ struct PVRSRV_HEAP_INFO *psHeapInfo, IMG_BOOL *pbShared);
++
++enum PVRSRV_ERROR PVRSRVAllocDeviceMemKM(void *hDevCookie,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc, void *hDevMemHeap,
++ u32 ui32Flags, u32 ui32Size, u32 ui32Alignment,
++ struct PVRSRV_KERNEL_MEM_INFO **ppsMemInfo);
++
++enum PVRSRV_ERROR PVRSRVFreeDeviceMemKM(void *hDevCookie,
++ struct PVRSRV_KERNEL_MEM_INFO *psMemInfo);
++
++enum PVRSRV_ERROR PVRSRVDissociateDeviceMemKM(void *hDevCookie,
++ struct PVRSRV_KERNEL_MEM_INFO *psMemInfo);
++
++enum PVRSRV_ERROR PVRSRVReserveDeviceVirtualMemKM(void *hDevMemHeap,
++ struct IMG_DEV_VIRTADDR *psDevVAddr, u32 ui32Size,
++ u32 ui32Alignment, struct PVRSRV_KERNEL_MEM_INFO **ppsMemInfo);
++
++enum PVRSRV_ERROR PVRSRVFreeDeviceVirtualMemKM(
++ struct PVRSRV_KERNEL_MEM_INFO *psMemInfo);
++
++enum PVRSRV_ERROR PVRSRVMapDeviceMemoryKM(
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc,
++ struct PVRSRV_KERNEL_MEM_INFO *psSrcMemInfo,
++ void *hDstDevMemHeap,
++ struct PVRSRV_KERNEL_MEM_INFO **ppsDstMemInfo);
++
++enum PVRSRV_ERROR PVRSRVUnmapDeviceMemoryKM(
++ struct PVRSRV_KERNEL_MEM_INFO *psMemInfo);
++
++enum PVRSRV_ERROR PVRSRVWrapExtMemoryKM(void *hDevCookie,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc, void *hDevMemContext,
++ u32 ui32ByteSize, u32 ui32PageOffset, IMG_BOOL bPhysContig,
++ struct IMG_SYS_PHYADDR *psSysAddr, void *pvLinAddr,
++ struct PVRSRV_KERNEL_MEM_INFO **ppsMemInfo);
++
++enum PVRSRV_ERROR PVRSRVUnwrapExtMemoryKM(struct PVRSRV_KERNEL_MEM_INFO
++ *psMemInfo);
++
++enum PVRSRV_ERROR PVRSRVEnumerateDCKM(enum PVRSRV_DEVICE_CLASS DeviceClass,
++ u32 *pui32DevCount, u32 *pui32DevID);
++
++enum PVRSRV_ERROR PVRSRVOpenDCDeviceKM(
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc,
++ u32 ui32DeviceID, void *hDevCookie, void **phDeviceKM);
++
++enum PVRSRV_ERROR PVRSRVCloseDCDeviceKM(void *hDeviceKM,
++ IMG_BOOL bResManCallback);
++
++enum PVRSRV_ERROR PVRSRVEnumDCFormatsKM(void *hDeviceKM, u32 *pui32Count,
++ struct DISPLAY_FORMAT *psFormat);
++
++enum PVRSRV_ERROR PVRSRVEnumDCDimsKM(void *hDeviceKM,
++ struct DISPLAY_FORMAT *psFormat, u32 *pui32Count,
++ struct DISPLAY_DIMS *psDim);
++
++enum PVRSRV_ERROR PVRSRVGetDCSystemBufferKM(void *hDeviceKM, void **phBuffer);
++
++enum PVRSRV_ERROR PVRSRVGetDCInfoKM(void *hDeviceKM,
++ struct DISPLAY_INFO *psDisplayInfo);
++
++enum PVRSRV_ERROR PVRSRVCreateDCSwapChainKM(
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc, void *hDeviceKM,
++ u32 ui32Flags, struct DISPLAY_SURF_ATTRIBUTES *psDstSurfAttrib,
++ struct DISPLAY_SURF_ATTRIBUTES *psSrcSurfAttrib,
++ u32 ui32BufferCount, u32 ui32OEMFlags, void **phSwapChain,
++ u32 *pui32SwapChainID);
++
++enum PVRSRV_ERROR PVRSRVDestroyDCSwapChainKM(void *hSwapChain);
++enum PVRSRV_ERROR PVRSRVSetDCDstRectKM(void *hDeviceKM, void *hSwapChain,
++ struct IMG_RECT *psRect);
++enum PVRSRV_ERROR PVRSRVSetDCSrcRectKM(void *hDeviceKM, void *hSwapChain,
++ struct IMG_RECT *psRect);
++enum PVRSRV_ERROR PVRSRVSetDCDstColourKeyKM(void *hDeviceKM, void *hSwapChain,
++ u32 ui32CKColour);
++enum PVRSRV_ERROR PVRSRVSetDCSrcColourKeyKM(void *hDeviceKM, void *hSwapChain,
++ u32 ui32CKColour);
++enum PVRSRV_ERROR PVRSRVGetDCBuffersKM(void *hDeviceKM, void *hSwapChain,
++ u32 *pui32BufferCount, void **phBuffer);
++enum PVRSRV_ERROR PVRSRVSwapToDCBufferKM(void *hDeviceKM, void *hBuffer,
++ u32 ui32SwapInterval, void *hPrivateTag,
++ u32 ui32ClipRectCount, struct IMG_RECT *psClipRect);
++enum PVRSRV_ERROR PVRSRVSwapToDCSystemKM(void *hDeviceKM, void *hSwapChain);
++
++enum PVRSRV_ERROR PVRSRVOpenBCDeviceKM(
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc,
++ u32 ui32DeviceID, void *hDevCookie, void **phDeviceKM);
++enum PVRSRV_ERROR PVRSRVCloseBCDeviceKM(void *hDeviceKM,
++ IMG_BOOL bResManCallback);
++
++enum PVRSRV_ERROR PVRSRVGetBCInfoKM(void *hDeviceKM,
++ struct BUFFER_INFO *psBufferInfo);
++enum PVRSRV_ERROR PVRSRVGetBCBufferKM(void *hDeviceKM,
++ u32 ui32BufferIndex, void **phBuffer);
++
++enum PVRSRV_ERROR PVRSRVMapDeviceClassMemoryKM(
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc, void *hDevMemContext,
++ void *hDeviceClassBuffer,
++ struct PVRSRV_KERNEL_MEM_INFO **ppsMemInfo, void **phOSMapInfo);
++
++enum PVRSRV_ERROR PVRSRVUnmapDeviceClassMemoryKM(
++ struct PVRSRV_KERNEL_MEM_INFO *psMemInfo);
++
++enum PVRSRV_ERROR PVRSRVGetFreeDeviceMemKM(u32 ui32Flags, u32 *pui32Total,
++ u32 *pui32Free, u32 *pui32LargestBlock);
++enum PVRSRV_ERROR PVRSRVAllocSyncInfoKM(void *hDevCookie, void *hDevMemContext,
++ struct PVRSRV_KERNEL_SYNC_INFO **ppsKernelSyncInfo);
++enum PVRSRV_ERROR PVRSRVFreeSyncInfoKM(
++ struct PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo);
++
++enum PVRSRV_ERROR PVRSRVGetMiscInfoKM(struct PVRSRV_MISC_INFO *psMiscInfo);
++
++enum PVRSRV_ERROR PVRSRVGetFBStatsKM(u32 *pui32Total, u32 *pui32Available);
++
++enum PVRSRV_ERROR PVRSRVAllocSharedSysMemoryKM(
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc, u32 ui32Flags,
++ u32 ui32Size, struct PVRSRV_KERNEL_MEM_INFO **ppsKernelMemInfo);
++
++enum PVRSRV_ERROR PVRSRVFreeSharedSysMemoryKM(
++ struct PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo);
++
++enum PVRSRV_ERROR PVRSRVDissociateMemFromResmanKM(
++ struct PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo);
++
++long PVRSRV_BridgeDispatchKM(struct file *, unsigned, unsigned long);
++
++#endif
+diff --git a/drivers/gpu/pvr/pvr_debug.c b/drivers/gpu/pvr/pvr_debug.c
+new file mode 100644
+index 0000000..eec9075
+--- /dev/null
++++ b/drivers/gpu/pvr/pvr_debug.c
+@@ -0,0 +1,353 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <linux/io.h>
++#include <linux/uaccess.h>
++#include <linux/kernel.h>
++#include <linux/hardirq.h>
++#include <linux/module.h>
++#include <linux/spinlock.h>
++#include <linux/tty.h>
++#include <linux/debugfs.h>
++#include <stdarg.h>
++#include "img_types.h"
++#include "servicesext.h"
++#include "pvr_debug.h"
++#include "proc.h"
++#include "mutex.h"
++#include "syscommon.h"
++#include "sgxinfokm.h"
++#include "sgxutils.h"
++
++u32 gPVRDebugLevel = DBGPRIV_WARNING;
++
++#define PVR_MAX_MSG_LEN PVR_MAX_DEBUG_MESSAGE_LEN
++
++static char gszBufferNonIRQ[PVR_MAX_MSG_LEN + 1];
++
++static char gszBufferIRQ[PVR_MAX_MSG_LEN + 1];
++static struct mutex gsDebugMutexNonIRQ;
++static DEFINE_SPINLOCK(gsDebugLockIRQ);
++#define USE_SPIN_LOCK (in_interrupt() || !preemptible())
++static inline void GetBufferLock(unsigned long *pulLockFlags)
++{
++ if (USE_SPIN_LOCK)
++ spin_lock_irqsave(&gsDebugLockIRQ, *pulLockFlags);
++ else
++ mutex_lock(&gsDebugMutexNonIRQ);
++}
++
++static inline void ReleaseBufferLock(unsigned long ulLockFlags)
++{
++ if (USE_SPIN_LOCK)
++ spin_unlock_irqrestore(&gsDebugLockIRQ, ulLockFlags);
++ else
++ mutex_unlock(&gsDebugMutexNonIRQ);
++}
++
++static inline void SelectBuffer(char **ppszBuf, u32 *pui32BufSiz)
++{
++ if (USE_SPIN_LOCK) {
++ *ppszBuf = gszBufferIRQ;
++ *pui32BufSiz = sizeof(gszBufferIRQ);
++ } else {
++ *ppszBuf = gszBufferNonIRQ;
++ *pui32BufSiz = sizeof(gszBufferNonIRQ);
++ }
++}
++
++static IMG_BOOL VBAppend(char *pszBuf, u32 ui32BufSiz, const char *pszFormat,
++ va_list VArgs)
++{
++ u32 ui32Used;
++ u32 ui32Space;
++ s32 i32Len;
++ ui32Used = strlen(pszBuf);
++ BUG_ON(ui32Used >= ui32BufSiz);
++ ui32Space = ui32BufSiz - ui32Used;
++ i32Len = vsnprintf(&pszBuf[ui32Used], ui32Space, pszFormat, VArgs);
++ pszBuf[ui32BufSiz - 1] = 0;
++ return i32Len < 0 || i32Len >= ui32Space;
++}
++
++static IMG_BOOL BAppend(char *pszBuf, u32 ui32BufSiz, const char *pszFormat,
++ ...)
++{
++ va_list VArgs;
++ IMG_BOOL bTrunc;
++
++ va_start(VArgs, pszFormat);
++
++ bTrunc = VBAppend(pszBuf, ui32BufSiz, pszFormat, VArgs);
++
++ va_end(VArgs);
++
++ return bTrunc;
++}
++
++void PVRSRVDebugPrintf(u32 ui32DebugLevel,
++ const char *pszFileName,
++ u32 ui32Line, const char *pszFormat, ...)
++{
++ IMG_BOOL bTrace, bDebug;
++ char *pszLeafName;
++
++ pszLeafName = (char *)strrchr(pszFileName, '\\');
++
++ if (pszLeafName)
++ pszFileName = pszLeafName;
++
++ bTrace = gPVRDebugLevel & ui32DebugLevel & DBGPRIV_CALLTRACE;
++ bDebug = ((gPVRDebugLevel & DBGPRIV_ALLLEVELS) >= ui32DebugLevel);
++
++ if (bTrace || bDebug) {
++ va_list vaArgs;
++ unsigned long ulLockFlags = 0; /* suppress gc warning */
++ char *pszBuf;
++ u32 ui32BufSiz;
++
++ SelectBuffer(&pszBuf, &ui32BufSiz);
++ va_start(vaArgs, pszFormat);
++
++ GetBufferLock(&ulLockFlags);
++
++ if (bDebug) {
++ switch (ui32DebugLevel) {
++ case DBGPRIV_FATAL:
++ strncpy(pszBuf, "PVR_K:(Fatal): ",
++ (ui32BufSiz - 1));
++ break;
++ case DBGPRIV_ERROR:
++ strncpy(pszBuf, "PVR_K:(Error): ",
++ (ui32BufSiz - 1));
++ break;
++ case DBGPRIV_WARNING:
++ strncpy(pszBuf, "PVR_K:(Warning): ",
++ (ui32BufSiz - 1));
++ break;
++ case DBGPRIV_MESSAGE:
++ strncpy(pszBuf, "PVR_K:(Message): ",
++ (ui32BufSiz - 1));
++ break;
++ case DBGPRIV_VERBOSE:
++ strncpy(pszBuf, "PVR_K:(Verbose): ",
++ (ui32BufSiz - 1));
++ break;
++ default:
++ strncpy(pszBuf,
++ "PVR_K:(Unknown message level)",
++ (ui32BufSiz - 1));
++ break;
++ }
++ } else {
++ strncpy(pszBuf, "PVR_K: ", (ui32BufSiz - 1));
++ }
++
++ if (VBAppend(pszBuf, ui32BufSiz, pszFormat, vaArgs)) {
++ printk(KERN_INFO "PVR_K:(Message Truncated): %s\n",
++ pszBuf);
++ } else {
++ if (!bTrace) {
++ if (BAppend
++ (pszBuf, ui32BufSiz, " [%lu, %s]", ui32Line,
++ pszFileName))
++ printk(KERN_INFO
++ "PVR_K:(Message Truncated): %s\n",
++ pszBuf);
++ else
++ printk(KERN_INFO "%s\n", pszBuf);
++ }
++ }
++
++ ReleaseBufferLock(ulLockFlags);
++ va_end(vaArgs);
++ }
++}
++
++void PVRSRVDebugAssertFail(const char *pszFile, u32 uLine)
++{
++ PVRSRVDebugPrintf(DBGPRIV_FATAL, pszFile, uLine,
++ "Debug assertion failed!");
++ BUG();
++}
++
++void PVRSRVTrace(const char *pszFormat, ...)
++{
++ va_list VArgs;
++ unsigned long ulLockFlags = 0; /* suppress gcc warning */
++ char *pszBuf;
++ u32 ui32BufSiz;
++
++ SelectBuffer(&pszBuf, &ui32BufSiz);
++
++ va_start(VArgs, pszFormat);
++ GetBufferLock(&ulLockFlags);
++ strncpy(pszBuf, "PVR: ", (ui32BufSiz - 1));
++ if (VBAppend(pszBuf, ui32BufSiz, pszFormat, VArgs))
++ printk(KERN_INFO "PVR_K:(Message Truncated): %s\n", pszBuf);
++ else
++ printk(KERN_INFO "%s\n", pszBuf);
++ ReleaseBufferLock(ulLockFlags);
++ va_end(VArgs);
++}
++
++void PVRDebugSetLevel(u32 uDebugLevel)
++{
++ printk(KERN_INFO "PVR: Setting Debug Level = 0x%x\n",
++ (unsigned)uDebugLevel);
++
++ gPVRDebugLevel = uDebugLevel;
++}
++
++int PVRDebugProcSetLevel(struct file *file, const char __user *buffer,
++ unsigned long count, void *data)
++{
++#define _PROC_SET_BUFFER_SZ 2
++ char data_buffer[_PROC_SET_BUFFER_SZ];
++
++ if (count != _PROC_SET_BUFFER_SZ) {
++ return -EINVAL;
++ } else {
++ if (copy_from_user(data_buffer, buffer, count))
++ return -EINVAL;
++ if (data_buffer[count - 1] != '\n')
++ return -EINVAL;
++ PVRDebugSetLevel(data_buffer[0] - '0');
++ }
++ return count;
++}
++
++int PVRDebugProcGetLevel(char *page, char **start, off_t off, int count,
++ int *eof, void *data)
++{
++ if (off == 0) {
++ *start = (char *)1;
++ return printAppend(page, count, 0, "%u\n", gPVRDebugLevel);
++ }
++ *eof = 1;
++ return 0;
++}
++
++#ifdef CONFIG_DEBUG_FS
++
++static struct dentry *debugfs_dentry;
++static u32 pvr_reset;
++
++static struct PVRSRV_DEVICE_NODE *get_sgx_node(void)
++{
++ struct SYS_DATA *sysdata;
++ struct PVRSRV_DEVICE_NODE *node;
++
++ if (SysAcquireData(&sysdata) != PVRSRV_OK)
++ return NULL;
++
++ for (node = sysdata->psDeviceNodeList; node; node = node->psNext)
++ if (node->sDevId.eDeviceType == PVRSRV_DEVICE_TYPE_SGX)
++ break;
++
++ return node;
++}
++
++static int pvr_dbg_reset(void *data, u64 val)
++{
++ struct PVRSRV_DEVICE_NODE *node;
++ enum PVRSRV_ERROR err;
++
++ if (val != 1)
++ return 0;
++
++ node = get_sgx_node();
++ if (!node)
++ return -ENODEV;
++
++ err = PVRSRVSetDevicePowerStateKM(node->sDevId.ui32DeviceIndex,
++ PVRSRV_POWER_STATE_D0,
++ KERNEL_ID, IMG_TRUE);
++ if (err != PVRSRV_OK)
++ return -EIO;
++
++ /*
++ * Yes, this is kinda braindead. KERNEL_ID, IMG_TRUE above means
++ * take the power lock - not just try lock - and keep it. TIMER_ID
++ * here means that we have already the power lock, so don't take it.
++ * Also - regardless of the ID - the following will release the lock.
++ * Finally we pass KERNEL_ID again to take and release the lock.
++ * Yay!
++ */
++ HWRecoveryResetSGX(node, 0, TIMER_ID);
++
++ SGXTestActivePowerEvent(node, KERNEL_ID);
++
++ return 0;
++}
++
++static int pvr_dbg_set(void *data, u64 val)
++{
++ u32 *var = data;
++
++ if (var == &pvr_reset)
++ return pvr_dbg_reset(data, val);
++
++ BUG();
++}
++
++DEFINE_SIMPLE_ATTRIBUTE(pvr_dbg_fops, NULL, pvr_dbg_set, "%llu\n");
++
++static int pvr_init_debugfs(void)
++{
++ debugfs_dentry = debugfs_create_file("reset_sgx", S_IWUGO, NULL,
++ &pvr_reset, &pvr_dbg_fops);
++
++ return debugfs_dentry ? 0 : -ENODEV;
++}
++
++static void pvr_cleanup_debugfs(void)
++{
++ debugfs_remove(debugfs_dentry);
++}
++
++#else /* !CONFIG_DEBUG_FS */
++
++static int pvr_init_debugfs(void)
++{
++ return 0;
++}
++
++static void pvr_cleanup_debugfs(void) { }
++
++#endif
++
++void pvr_dbg_init(void)
++{
++ mutex_init(&gsDebugMutexNonIRQ);
++ pvr_init_debugfs();
++}
++
++void pvr_dbg_cleanup(void)
++{
++ pvr_cleanup_debugfs();
++}
++
+diff --git a/drivers/gpu/pvr/pvr_debug.h b/drivers/gpu/pvr/pvr_debug.h
+new file mode 100644
+index 0000000..a47d718
+--- /dev/null
++++ b/drivers/gpu/pvr/pvr_debug.h
+@@ -0,0 +1,110 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __PVR_DEBUG_H__
++#define __PVR_DEBUG_H__
++
++#include <linux/fs.h>
++
++#include "img_types.h"
++
++#define PVR_MAX_DEBUG_MESSAGE_LEN 512
++
++#define DBGPRIV_FATAL 0x01UL
++#define DBGPRIV_ERROR 0x02UL
++#define DBGPRIV_WARNING 0x04UL
++#define DBGPRIV_MESSAGE 0x08UL
++#define DBGPRIV_VERBOSE 0x10UL
++#define DBGPRIV_CALLTRACE 0x20UL
++#define DBGPRIV_ALLOC 0x40UL
++#define DBGPRIV_ALLLEVELS (DBGPRIV_FATAL | DBGPRIV_ERROR | \
++ DBGPRIV_WARNING | DBGPRIV_MESSAGE | \
++ DBGPRIV_VERBOSE)
++
++#define PVR_DBG_FATAL DBGPRIV_FATAL
++#define PVR_DBG_ERROR DBGPRIV_ERROR
++#define PVR_DBG_WARNING DBGPRIV_WARNING
++#define PVR_DBG_MESSAGE DBGPRIV_MESSAGE
++#define PVR_DBG_VERBOSE DBGPRIV_VERBOSE
++#define PVR_DBG_CALLTRACE DBGPRIV_CALLTRACE
++#define PVR_DBG_ALLOC DBGPRIV_ALLOC
++
++#if defined(DEBUG)
++
++#define PVR_ASSERT(EXPR) \
++ do { \
++ if (!(EXPR)) \
++ PVRSRVDebugAssertFail(__FILE__, __LINE__); \
++ } while (0)
++
++#define PVR_DPF(level, fmt, ...) \
++ PVRSRVDebugPrintf(level, __FILE__, __LINE__, fmt, ## __VA_ARGS__)
++
++#define PVR_TRACE(fmt, ...) \
++ PVRSRVTrace(fmt, ##__VA_ARGS__)
++
++void PVRSRVDebugAssertFail(const char *pszFile, u32 ui32Line);
++void PVRSRVDebugPrintf(u32 ui32DebugLevel, const char *pszFileName,
++ u32 ui32Line, const char *pszFormat, ...);
++void PVRSRVTrace(const char *pszFormat, ...);
++
++int PVRDebugProcSetLevel(struct file *file, const char __user *buffer,
++ unsigned long count, void *data);
++int PVRDebugProcGetLevel(char *page, char **start, off_t off, int count,
++ int *eof, void *data);
++void PVRDebugSetLevel(u32 uDebugLevel);
++
++void pvr_dbg_init(void);
++void pvr_dbg_cleanup(void);
++
++#define PVR_DBG_BREAK
++
++#else
++
++#if defined(TIMING)
++
++#define PVR_ASSERT(EXPR) do { } while (0)
++#define PVR_DPF(level, fmt, ...) do { } while (0)
++#define PVR_TRACE(fmt, ...) \
++ PVRSRVTrace(fmt, ##__VA_ARGS__)
++#define PVR_DBG_BREAK do { } while (0)
++
++void PVRSRVTrace(const char *pszFormat, ...);
++
++#else
++
++#define PVR_ASSERT(EXPR) do { } while (0)
++#define PVR_DPF(level, fmt, ...) do { } while (0)
++#define PVR_TRACE(fmt, ...) do { } while (0)
++#define PVR_DBG_BREAK do { } while (0)
++
++static inline void pvr_dbg_init(void) {};
++static inline void pvr_dbg_cleanup(void) {};
++
++#endif
++#endif
++
++#endif
+diff --git a/drivers/gpu/pvr/pvrconfig.h b/drivers/gpu/pvr/pvrconfig.h
+new file mode 100644
+index 0000000..54f8549
+--- /dev/null
++++ b/drivers/gpu/pvr/pvrconfig.h
+@@ -0,0 +1,36 @@
++#ifndef _PVRCONFIG_H
++#define _PVRCONFIG_H
++
++#define SGX530 1
++
++#ifdef CONFIG_PVR_DEBUG
++# define PVR_BUILD_TYPE "debug"
++# define DEBUG 1
++#elif defined(CONFIG_PVR_TIMING)
++# define PVR_BUILD_TYPE "timing"
++# define TIMING 1
++#elif defined(CONFIG_PVR_RELEASE_N900)
++# define PVR_BUILD_TYPE "release"
++#endif
++
++#ifdef CONFIG_PVR_NO_HARDWARE
++# define NO_HARDWARE 1
++#endif
++
++#ifdef DEBUG
++# define DEBUG_LINUX_MEMORY_ALLOCATIONS 1
++# define DEBUG_LINUX_MEM_AREAS 1
++# define DEBUG_LINUX_MMAP_AREAS 1
++# define DEBUG_BRIDGE_KM 1
++
++# if (defined CONFIG_PVR_DEBUG_PDUMP) || (defined CONFIG_PVR_DEBUG_PDUMP_MODULE)
++# define PDUMP 1
++# endif
++
++#endif
++
++#ifdef CONFIG_PVR_EDM_DEBUG
++# define PVRSRV_USSE_EDM_STATUS_DEBUG 1
++#endif
++
++#endif
+diff --git a/drivers/gpu/pvr/pvrmmap.h b/drivers/gpu/pvr/pvrmmap.h
+new file mode 100644
+index 0000000..c84e4d8
+--- /dev/null
++++ b/drivers/gpu/pvr/pvrmmap.h
+@@ -0,0 +1,36 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __PVRMMAP_H__
++#define __PVRMMAP_H__
++
++enum PVRSRV_ERROR PVRPMapKMem(void *hModule, void **ppvLinAddr,
++ void *pvLinAddrKM, void **phMappingInfo,
++ void *hMHandle);
++
++IMG_BOOL PVRUnMapKMem(void *hModule, void *hMappingInfo, void *hMHandle);
++
++#endif
+diff --git a/drivers/gpu/pvr/pvrmodule.h b/drivers/gpu/pvr/pvrmodule.h
+new file mode 100644
+index 0000000..5f77d1c
+--- /dev/null
++++ b/drivers/gpu/pvr/pvrmodule.h
+@@ -0,0 +1,31 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _PVRMODULE_H_
++#define _PVRMODULE_H_
++MODULE_AUTHOR("Imagination Technologies Ltd. <gpl-support@imgtec.com>");
++MODULE_LICENSE("GPL");
++#endif
+diff --git a/drivers/gpu/pvr/pvrsrv.c b/drivers/gpu/pvr/pvrsrv.c
+new file mode 100644
+index 0000000..6db31c0
+--- /dev/null
++++ b/drivers/gpu/pvr/pvrsrv.c
+@@ -0,0 +1,906 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <linux/io.h>
++
++#include "services_headers.h"
++#include "buffer_manager.h"
++#include "pvr_bridge_km.h"
++#include "handle.h"
++#include "perproc.h"
++#include "pdump_km.h"
++#include "ra.h"
++
++#include "pvrversion.h"
++enum PVRSRV_ERROR AllocateDeviceID(struct SYS_DATA *psSysData, u32 *pui32DevID)
++{
++ struct SYS_DEVICE_ID *psDeviceWalker;
++ struct SYS_DEVICE_ID *psDeviceEnd;
++
++ psDeviceWalker = &psSysData->sDeviceID[0];
++ psDeviceEnd = psDeviceWalker + psSysData->ui32NumDevices;
++
++ while (psDeviceWalker < psDeviceEnd) {
++ if (!psDeviceWalker->bInUse) {
++ psDeviceWalker->bInUse = IMG_TRUE;
++ *pui32DevID = psDeviceWalker->uiID;
++ return PVRSRV_OK;
++ }
++ psDeviceWalker++;
++ }
++
++ PVR_DPF(PVR_DBG_ERROR,
++ "AllocateDeviceID: No free and valid device IDs available!");
++
++ PVR_ASSERT(psDeviceWalker < psDeviceEnd);
++
++ return PVRSRV_ERROR_GENERIC;
++}
++
++enum PVRSRV_ERROR FreeDeviceID(struct SYS_DATA *psSysData, u32 ui32DevID)
++{
++ struct SYS_DEVICE_ID *psDeviceWalker;
++ struct SYS_DEVICE_ID *psDeviceEnd;
++
++ psDeviceWalker = &psSysData->sDeviceID[0];
++ psDeviceEnd = psDeviceWalker + psSysData->ui32NumDevices;
++
++ while (psDeviceWalker < psDeviceEnd) {
++ if ((psDeviceWalker->uiID == ui32DevID) &&
++ (psDeviceWalker->bInUse)) {
++ psDeviceWalker->bInUse = IMG_FALSE;
++ return PVRSRV_OK;
++ }
++ psDeviceWalker++;
++ }
++
++ PVR_DPF(PVR_DBG_ERROR,
++ "FreeDeviceID: no matching dev ID that is in use!");
++
++ PVR_ASSERT(psDeviceWalker < psDeviceEnd);
++
++ return PVRSRV_ERROR_GENERIC;
++}
++
++enum PVRSRV_ERROR PVRSRVEnumerateDevicesKM(u32 *pui32NumDevices,
++ struct PVRSRV_DEVICE_IDENTIFIER *psDevIdList)
++{
++ enum PVRSRV_ERROR eError;
++ struct SYS_DATA *psSysData;
++ struct PVRSRV_DEVICE_NODE *psDeviceNode;
++ u32 i;
++
++ if (!pui32NumDevices || !psDevIdList) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVEnumerateDevicesKM: Invalid params");
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ eError = SysAcquireData(&psSysData);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVEnumerateDevicesKM: Failed to get SysData");
++ return eError;
++ }
++
++ for (i = 0; i < PVRSRV_MAX_DEVICES; i++)
++ psDevIdList[i].eDeviceType = PVRSRV_DEVICE_TYPE_UNKNOWN;
++
++ *pui32NumDevices = 0;
++
++ psDeviceNode = psSysData->psDeviceNodeList;
++ for (i = 0; psDeviceNode != NULL; i++) {
++ if (psDeviceNode->sDevId.eDeviceType !=
++ PVRSRV_DEVICE_TYPE_EXT) {
++ *psDevIdList++ = psDeviceNode->sDevId;
++ (*pui32NumDevices)++;
++ }
++ psDeviceNode = psDeviceNode->psNext;
++ }
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR PVRSRVInit(struct SYS_DATA *psSysData)
++{
++ enum PVRSRV_ERROR eError;
++
++ eError = ResManInit();
++ if (eError != PVRSRV_OK)
++ goto Error;
++
++ eError = PVRSRVPerProcessDataInit();
++ if (eError != PVRSRV_OK)
++ goto Error;
++
++ eError = PVRSRVHandleInit();
++ if (eError != PVRSRV_OK)
++ goto Error;
++
++ eError = OSCreateResource(&psSysData->sPowerStateChangeResource);
++ if (eError != PVRSRV_OK)
++ goto Error;
++
++ psSysData->eCurrentPowerState = PVRSRV_POWER_STATE_D0;
++ psSysData->eFailedPowerState = PVRSRV_POWER_Unspecified;
++
++#if defined(PDUMP)
++ psSysData->bPowerUpPDumped = IMG_FALSE;
++#endif
++
++ if (OSAllocMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof(struct PVRSRV_EVENTOBJECT),
++ (void **) &psSysData->psGlobalEventObject,
++ NULL) != PVRSRV_OK)
++
++ goto Error;
++
++ if (OSEventObjectCreate("PVRSRV_GLOBAL_EVENTOBJECT",
++ psSysData->psGlobalEventObject) != PVRSRV_OK)
++ goto Error;
++
++ return eError;
++
++Error:
++ PVRSRVDeInit(psSysData);
++ return eError;
++}
++
++void PVRSRVDeInit(struct SYS_DATA *psSysData)
++{
++ enum PVRSRV_ERROR eError;
++
++ PVR_UNREFERENCED_PARAMETER(psSysData);
++
++ if (psSysData == NULL) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVDeInit: "
++ "PVRSRVHandleDeInit failed - invalid param");
++ return;
++ }
++
++ if (psSysData->psGlobalEventObject) {
++ OSEventObjectDestroy(psSysData->psGlobalEventObject);
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof(struct PVRSRV_EVENTOBJECT),
++ psSysData->psGlobalEventObject, NULL);
++ }
++
++ eError = PVRSRVHandleDeInit();
++ if (eError != PVRSRV_OK)
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVDeInit: PVRSRVHandleDeInit failed");
++
++ eError = PVRSRVPerProcessDataDeInit();
++ if (eError != PVRSRV_OK)
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVDeInit: PVRSRVPerProcessDataDeInit failed");
++
++ ResManDeInit();
++}
++
++enum PVRSRV_ERROR PVRSRVRegisterDevice(struct SYS_DATA *psSysData,
++ enum PVRSRV_ERROR(*pfnRegisterDevice)
++ (struct PVRSRV_DEVICE_NODE *),
++ u32 ui32SOCInterruptBit,
++ u32 *pui32DeviceIndex)
++{
++ enum PVRSRV_ERROR eError;
++ struct PVRSRV_DEVICE_NODE *psDeviceNode;
++
++ if (OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(struct PVRSRV_DEVICE_NODE),
++ (void **) &psDeviceNode, NULL) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVRegisterDevice : "
++ "Failed to alloc memory for psDeviceNode");
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ OSMemSet(psDeviceNode, 0, sizeof(struct PVRSRV_DEVICE_NODE));
++
++ eError = pfnRegisterDevice(psDeviceNode);
++ if (eError != PVRSRV_OK) {
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(struct PVRSRV_DEVICE_NODE), psDeviceNode,
++ NULL);
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVRegisterDevice : "
++ "Failed to register device");
++ return PVRSRV_ERROR_DEVICE_REGISTER_FAILED;
++ }
++
++ psDeviceNode->ui32RefCount = 1;
++ psDeviceNode->psSysData = psSysData;
++ psDeviceNode->ui32SOCInterruptBit = ui32SOCInterruptBit;
++
++ AllocateDeviceID(psSysData, &psDeviceNode->sDevId.ui32DeviceIndex);
++
++ psDeviceNode->psNext = psSysData->psDeviceNodeList;
++ psSysData->psDeviceNodeList = psDeviceNode;
++
++ *pui32DeviceIndex = psDeviceNode->sDevId.ui32DeviceIndex;
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR PVRSRVInitialiseDevice(u32 ui32DevIndex)
++{
++ struct PVRSRV_DEVICE_NODE *psDeviceNode;
++ struct SYS_DATA *psSysData;
++ enum PVRSRV_ERROR eError;
++
++ PVR_DPF(PVR_DBG_MESSAGE, "PVRSRVInitialiseDevice");
++
++ eError = SysAcquireData(&psSysData);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVInitialiseDevice: Failed to get SysData");
++ return eError;
++ }
++
++ psDeviceNode = psSysData->psDeviceNodeList;
++
++ while (psDeviceNode) {
++ if (psDeviceNode->sDevId.ui32DeviceIndex == ui32DevIndex)
++ goto FoundDevice;
++ psDeviceNode = psDeviceNode->psNext;
++ }
++
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVInitialiseDevice: requested device is not present");
++ return PVRSRV_ERROR_INIT_FAILURE;
++
++FoundDevice:
++
++ PVR_ASSERT(psDeviceNode->ui32RefCount > 0);
++
++ eError = PVRSRVResManConnect(NULL, &psDeviceNode->hResManContext);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVInitialiseDevice: "
++ "Failed PVRSRVResManConnect call");
++ return eError;
++ }
++
++ if (psDeviceNode->pfnInitDevice != NULL) {
++ eError = psDeviceNode->pfnInitDevice(psDeviceNode);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVInitialiseDevice: "
++ "Failed InitDevice call");
++ return eError;
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR PVRSRVFinaliseSystem(IMG_BOOL bInitSuccessful)
++{
++ struct PVRSRV_DEVICE_NODE *psDeviceNode;
++ struct SYS_DATA *psSysData;
++ enum PVRSRV_ERROR eError;
++
++ PVR_DPF(PVR_DBG_MESSAGE, "PVRSRVFinaliseSystem");
++
++ eError = SysAcquireData(&psSysData);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVFinaliseSystem: Failed to get SysData");
++ return eError;
++ }
++
++ if (bInitSuccessful) {
++ eError = SysFinalise();
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVFinaliseSystem: SysFinalise failed (%d)",
++ eError);
++ return eError;
++ }
++
++ psDeviceNode = psSysData->psDeviceNodeList;
++ while (psDeviceNode) {
++ eError =
++ PVRSRVSetDevicePowerStateKM(psDeviceNode->sDevId.
++ ui32DeviceIndex,
++ PVRSRV_POWER_Unspecified,
++ KERNEL_ID, IMG_FALSE);
++ if (eError != PVRSRV_OK)
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVFinaliseSystem: "
++ "Failed PVRSRVSetDevicePowerStateKM "
++ "call (device index: %d)",
++ psDeviceNode->sDevId.ui32DeviceIndex);
++ psDeviceNode = psDeviceNode->psNext;
++ }
++
++ psDeviceNode = psSysData->psDeviceNodeList;
++ while (psDeviceNode) {
++ if (psDeviceNode->pfnInitDeviceCompatCheck) {
++ eError = PVRSRVDevInitCompatCheck(psDeviceNode);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVFinaliseSystem: "
++ "Failed PVRSRVDevInitCompatCheck "
++ "call (device index: %d)",
++ psDeviceNode->sDevId.
++ ui32DeviceIndex);
++ return eError;
++ }
++ }
++ psDeviceNode = psDeviceNode->psNext;
++
++ }
++
++ }
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR PVRSRVDevInitCompatCheck(struct PVRSRV_DEVICE_NODE
++ *psDeviceNode)
++{
++
++ return psDeviceNode->pfnInitDeviceCompatCheck(psDeviceNode);
++}
++
++enum PVRSRV_ERROR PVRSRVAcquireDeviceDataKM(u32 ui32DevIndex,
++ enum PVRSRV_DEVICE_TYPE eDeviceType,
++ void **phDevCookie)
++{
++ struct PVRSRV_DEVICE_NODE *psDeviceNode;
++ struct SYS_DATA *psSysData;
++ enum PVRSRV_ERROR eError;
++
++ PVR_DPF(PVR_DBG_MESSAGE, "PVRSRVAcquireDeviceDataKM");
++
++ eError = SysAcquireData(&psSysData);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVAcquireDeviceDataKM: Failed to get SysData");
++ return eError;
++ }
++
++ psDeviceNode = psSysData->psDeviceNodeList;
++
++ if (eDeviceType != PVRSRV_DEVICE_TYPE_UNKNOWN) {
++ while (psDeviceNode) {
++ if (psDeviceNode->sDevId.eDeviceType == eDeviceType)
++ goto FoundDevice;
++ psDeviceNode = psDeviceNode->psNext;
++ }
++ } else {
++ while (psDeviceNode) {
++ if (psDeviceNode->sDevId.ui32DeviceIndex ==
++ ui32DevIndex) {
++ goto FoundDevice;
++ }
++ psDeviceNode = psDeviceNode->psNext;
++ }
++ }
++
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVAcquireDeviceDataKM: requested device is not present");
++ return PVRSRV_ERROR_INIT_FAILURE;
++
++FoundDevice:
++
++ PVR_ASSERT(psDeviceNode->ui32RefCount > 0);
++
++ if (phDevCookie)
++ *phDevCookie = (void *) psDeviceNode;
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR PVRSRVDeinitialiseDevice(u32 ui32DevIndex)
++{
++ struct PVRSRV_DEVICE_NODE *psDeviceNode;
++ struct PVRSRV_DEVICE_NODE **ppsDevNode;
++ struct SYS_DATA *psSysData;
++ enum PVRSRV_ERROR eError;
++
++ eError = SysAcquireData(&psSysData);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVDeinitialiseDevice: Failed to get SysData");
++ return eError;
++ }
++
++ ppsDevNode = &psSysData->psDeviceNodeList;
++ while (*ppsDevNode) {
++ if ((*ppsDevNode)->sDevId.ui32DeviceIndex == ui32DevIndex) {
++ psDeviceNode = *ppsDevNode;
++ goto FoundDevice;
++ }
++ ppsDevNode = &((*ppsDevNode)->psNext);
++ }
++
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVDeinitialiseDevice: requested device %d is not present",
++ ui32DevIndex);
++
++ return PVRSRV_ERROR_GENERIC;
++
++FoundDevice:
++
++ eError = PVRSRVSetDevicePowerStateKM(ui32DevIndex,
++ PVRSRV_POWER_STATE_D3,
++ KERNEL_ID, IMG_FALSE);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVDeinitialiseDevice: "
++ "Failed PVRSRVSetDevicePowerStateKM call");
++ return eError;
++ }
++
++ ResManFreeResByCriteria(psDeviceNode->hResManContext,
++ RESMAN_CRITERIA_RESTYPE,
++ RESMAN_TYPE_DEVICEMEM_ALLOCATION,
++ NULL, 0);
++
++ if (psDeviceNode->pfnDeInitDevice != NULL) {
++ eError = psDeviceNode->pfnDeInitDevice(psDeviceNode);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVDeinitialiseDevice: "
++ "Failed DeInitDevice call");
++ return eError;
++ }
++ }
++
++ PVRSRVResManDisconnect(psDeviceNode->hResManContext, IMG_TRUE);
++ psDeviceNode->hResManContext = NULL;
++
++ *ppsDevNode = psDeviceNode->psNext;
++
++ FreeDeviceID(psSysData, ui32DevIndex);
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(struct PVRSRV_DEVICE_NODE), psDeviceNode, NULL);
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR PollForValueKM(u32 __iomem *pui32LinMemAddr,
++ u32 ui32Value, u32 ui32Mask, u32 ui32Waitus,
++ u32 ui32Tries)
++{
++ u32 uiMaxTime;
++
++ uiMaxTime = ui32Tries * ui32Waitus;
++
++ LOOP_UNTIL_TIMEOUT(uiMaxTime) {
++ if ((readl(pui32LinMemAddr) & ui32Mask) == ui32Value)
++ return PVRSRV_OK;
++ OSWaitus(ui32Waitus);
++ }
++ END_LOOP_UNTIL_TIMEOUT();
++
++ return PVRSRV_ERROR_GENERIC;
++}
++
++
++enum PVRSRV_ERROR PVRSRVGetMiscInfoKM(struct PVRSRV_MISC_INFO *psMiscInfo)
++{
++ struct SYS_DATA *psSysData;
++ enum PVRSRV_ERROR eError;
++
++ if (!psMiscInfo) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVGetMiscInfoKM: invalid parameters");
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psMiscInfo->ui32StatePresent = 0;
++
++ if (psMiscInfo->ui32StateRequest & ~(
++ PVRSRV_MISC_INFO_TIMER_PRESENT |
++ PVRSRV_MISC_INFO_CLOCKGATE_PRESENT |
++ PVRSRV_MISC_INFO_MEMSTATS_PRESENT |
++ PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT |
++ PVRSRV_MISC_INFO_DDKVERSION_PRESENT)) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVGetMiscInfoKM: invalid state request flags");
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ eError = SysAcquireData(&psSysData);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVGetMiscInfoKM: Failed to get SysData");
++ return eError;
++ }
++
++ if (((psMiscInfo->ui32StateRequest &
++ PVRSRV_MISC_INFO_TIMER_PRESENT) != 0UL) &&
++ (psSysData->pvSOCTimerRegisterKM != NULL)) {
++ psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_TIMER_PRESENT;
++ psMiscInfo->pvSOCTimerRegisterKM =
++ psSysData->pvSOCTimerRegisterKM;
++ psMiscInfo->hSOCTimerRegisterOSMemHandle =
++ psSysData->hSOCTimerRegisterOSMemHandle;
++ } else {
++ psMiscInfo->pvSOCTimerRegisterKM = NULL;
++ psMiscInfo->hSOCTimerRegisterOSMemHandle = NULL;
++ }
++
++ if (((psMiscInfo->ui32StateRequest &
++ PVRSRV_MISC_INFO_CLOCKGATE_PRESENT) != 0UL) &&
++ (psSysData->pvSOCClockGateRegsBase != NULL)) {
++ psMiscInfo->ui32StatePresent |=
++ PVRSRV_MISC_INFO_CLOCKGATE_PRESENT;
++ psMiscInfo->pvSOCClockGateRegs =
++ psSysData->pvSOCClockGateRegsBase;
++ psMiscInfo->ui32SOCClockGateRegsSize =
++ psSysData->ui32SOCClockGateRegsSize;
++ }
++
++ if (((psMiscInfo->ui32StateRequest &
++ PVRSRV_MISC_INFO_MEMSTATS_PRESENT) != 0UL) &&
++ (psMiscInfo->pszMemoryStr != NULL)) {
++ struct RA_ARENA **ppArena;
++ struct BM_HEAP *psBMHeap;
++ struct BM_CONTEXT *psBMContext;
++ struct PVRSRV_DEVICE_NODE *psDeviceNode;
++ char *pszStr;
++ u32 ui32StrLen;
++ s32 i32Count;
++
++ pszStr = psMiscInfo->pszMemoryStr;
++ ui32StrLen = psMiscInfo->ui32MemoryStrLen;
++
++ psMiscInfo->ui32StatePresent |=
++ PVRSRV_MISC_INFO_MEMSTATS_PRESENT;
++
++ ppArena = &psSysData->apsLocalDevMemArena[0];
++ while (*ppArena) {
++ CHECK_SPACE(ui32StrLen);
++ i32Count =
++ OSSNPrintf(pszStr, 100, "\nLocal Backing Store:\n");
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ RA_GetStats(*ppArena, &pszStr, &ui32StrLen);
++
++ ppArena++;
++ }
++
++ psDeviceNode = psSysData->psDeviceNodeList;
++ while (psDeviceNode) {
++ CHECK_SPACE(ui32StrLen);
++ i32Count =
++ OSSNPrintf(pszStr, 100, "\n\nDevice Type %d:\n",
++ psDeviceNode->sDevId.eDeviceType);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ if (psDeviceNode->sDevMemoryInfo.pBMKernelContext) {
++ CHECK_SPACE(ui32StrLen);
++ i32Count =
++ OSSNPrintf(pszStr, 100,
++ "\nKernel Context:\n");
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ psBMHeap =
++ psDeviceNode->sDevMemoryInfo.
++ pBMKernelContext->psBMHeap;
++ while (psBMHeap) {
++ if (psBMHeap->pImportArena) {
++ RA_GetStats(psBMHeap->
++ pImportArena,
++ &pszStr,
++ &ui32StrLen);
++ }
++
++ if (psBMHeap->pVMArena) {
++ RA_GetStats(psBMHeap->pVMArena,
++ &pszStr,
++ &ui32StrLen);
++ }
++ psBMHeap = psBMHeap->psNext;
++ }
++ }
++
++ psBMContext = psDeviceNode->sDevMemoryInfo.pBMContext;
++ while (psBMContext) {
++ CHECK_SPACE(ui32StrLen);
++ i32Count =
++ OSSNPrintf(pszStr, 100,
++ "\nApplication Context (hDevMemContext) 0x%08X:\n",
++ (void *)psBMContext);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ psBMHeap = psBMContext->psBMHeap;
++ while (psBMHeap) {
++ if (psBMHeap->pImportArena) {
++ RA_GetStats(psBMHeap->
++ pImportArena,
++ &pszStr,
++ &ui32StrLen);
++ }
++
++ if (psBMHeap->pVMArena) {
++ RA_GetStats(psBMHeap->pVMArena,
++ &pszStr,
++ &ui32StrLen);
++ }
++ psBMHeap = psBMHeap->psNext;
++ }
++ psBMContext = psBMContext->psNext;
++ }
++ psDeviceNode = psDeviceNode->psNext;
++ }
++
++ i32Count = OSSNPrintf(pszStr, 100, "\n\0");
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++ }
++
++ if (((psMiscInfo->ui32StateRequest &
++ PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT) != 0UL) &&
++ (psSysData->psGlobalEventObject != NULL)) {
++ psMiscInfo->ui32StatePresent |=
++ PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT;
++ psMiscInfo->sGlobalEventObject =
++ *psSysData->psGlobalEventObject;
++ }
++
++ if (((psMiscInfo->ui32StateRequest &
++ PVRSRV_MISC_INFO_DDKVERSION_PRESENT) != 0UL) &&
++ ((psMiscInfo->ui32StateRequest &
++ PVRSRV_MISC_INFO_MEMSTATS_PRESENT) == 0UL) &&
++ (psMiscInfo->pszMemoryStr != NULL)) {
++ char *pszStr;
++ u32 ui32StrLen;
++ u32 ui32LenStrPerNum = 12;
++ s32 i32Count;
++ int i;
++ psMiscInfo->ui32StatePresent |=
++ PVRSRV_MISC_INFO_DDKVERSION_PRESENT;
++
++ psMiscInfo->aui32DDKVersion[0] = PVRVERSION_MAJ;
++ psMiscInfo->aui32DDKVersion[1] = PVRVERSION_MIN;
++ psMiscInfo->aui32DDKVersion[2] = PVRVERSION_BRANCH;
++ psMiscInfo->aui32DDKVersion[3] = PVRVERSION_BUILD;
++
++ pszStr = psMiscInfo->pszMemoryStr;
++ ui32StrLen = psMiscInfo->ui32MemoryStrLen;
++
++ for (i = 0; i < 4; i++) {
++ if (ui32StrLen < ui32LenStrPerNum)
++ return PVRSRV_ERROR_INVALID_PARAMS;
++
++ i32Count = OSSNPrintf(pszStr, ui32LenStrPerNum, "%ld",
++ psMiscInfo->aui32DDKVersion[i]);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++ if (i != 3) {
++ i32Count = OSSNPrintf(pszStr, 2, ".");
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++ }
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR PVRSRVGetFBStatsKM(u32 *pui32Total, u32 *pui32Available)
++{
++ u32 ui32Total = 0, i = 0;
++ u32 ui32Available = 0;
++
++ *pui32Total = 0;
++ *pui32Available = 0;
++
++ while (BM_ContiguousStatistics(i, &ui32Total, &ui32Available) ==
++ IMG_TRUE) {
++ *pui32Total += ui32Total;
++ *pui32Available += ui32Available;
++
++ i++;
++ }
++
++ return PVRSRV_OK;
++}
++
++IMG_BOOL PVRSRVDeviceLISR(struct PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ struct SYS_DATA *psSysData;
++ IMG_BOOL bStatus = IMG_FALSE;
++ u32 ui32InterruptSource;
++
++ if (!psDeviceNode) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVDeviceLISR: Invalid params\n");
++ goto out;
++ }
++ psSysData = psDeviceNode->psSysData;
++
++ ui32InterruptSource = SysGetInterruptSource(psSysData, psDeviceNode);
++ if (ui32InterruptSource & psDeviceNode->ui32SOCInterruptBit) {
++ if (psDeviceNode->pfnDeviceISR != NULL)
++ bStatus =
++ (*psDeviceNode->pfnDeviceISR) (psDeviceNode->
++ pvISRData);
++
++ SysClearInterrupts(psSysData,
++ psDeviceNode->ui32SOCInterruptBit);
++ }
++
++out:
++ return bStatus;
++}
++
++IMG_BOOL PVRSRVSystemLISR(void *pvSysData)
++{
++ struct SYS_DATA *psSysData = pvSysData;
++ IMG_BOOL bStatus = IMG_FALSE;
++ u32 ui32InterruptSource;
++ u32 ui32ClearInterrupts = 0;
++ struct PVRSRV_DEVICE_NODE *psDeviceNode;
++
++ if (!psSysData) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVSystemLISR: Invalid params\n");
++ goto out;
++ }
++
++ ui32InterruptSource = SysGetInterruptSource(psSysData, NULL);
++
++ if (ui32InterruptSource == 0)
++ goto out;
++
++ psDeviceNode = psSysData->psDeviceNodeList;
++ while (psDeviceNode != NULL) {
++ if (psDeviceNode->pfnDeviceISR != NULL)
++ if (ui32InterruptSource & psDeviceNode->
++ ui32SOCInterruptBit) {
++ if ((*psDeviceNode->pfnDeviceISR)
++ (psDeviceNode->pvISRData))
++ bStatus = IMG_TRUE;
++
++ ui32ClearInterrupts |=
++ psDeviceNode->ui32SOCInterruptBit;
++ }
++ psDeviceNode = psDeviceNode->psNext;
++ }
++
++ SysClearInterrupts(psSysData, ui32ClearInterrupts);
++
++out:
++ return bStatus;
++}
++
++void PVRSRVMISR(void *pvSysData)
++{
++ struct SYS_DATA *psSysData = pvSysData;
++ struct PVRSRV_DEVICE_NODE *psDeviceNode;
++
++ if (!psSysData) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVMISR: Invalid params\n");
++ return;
++ }
++
++ psDeviceNode = psSysData->psDeviceNodeList;
++ while (psDeviceNode != NULL) {
++ if (psDeviceNode->pfnDeviceMISR != NULL)
++ (*psDeviceNode->pfnDeviceMISR)(psDeviceNode->
++ pvISRData);
++ psDeviceNode = psDeviceNode->psNext;
++ }
++
++ if (PVRSRVProcessQueues(ISR_ID, IMG_FALSE) ==
++ PVRSRV_ERROR_PROCESSING_BLOCKED)
++ PVRSRVProcessQueues(ISR_ID, IMG_FALSE);
++
++ if (psSysData->psGlobalEventObject) {
++ void *hOSEventKM =
++ psSysData->psGlobalEventObject->hOSEventKM;
++ if (hOSEventKM)
++ OSEventObjectSignal(hOSEventKM);
++ }
++}
++
++enum PVRSRV_ERROR PVRSRVProcessConnect(u32 ui32PID)
++{
++ return PVRSRVPerProcessDataConnect(ui32PID);
++}
++
++void PVRSRVProcessDisconnect(u32 ui32PID)
++{
++ PVRSRVPerProcessDataDisconnect(ui32PID);
++}
++
++enum PVRSRV_ERROR PVRSRVSaveRestoreLiveSegments(void *hArena, u8 *pbyBuffer,
++ u32 *puiBufSize, IMG_BOOL bSave)
++{
++ u32 uiBytesSaved = 0;
++ void *pvLocalMemCPUVAddr;
++ struct RA_SEGMENT_DETAILS sSegDetails;
++
++ if (hArena == NULL)
++ return PVRSRV_ERROR_INVALID_PARAMS;
++
++ sSegDetails.uiSize = 0;
++ sSegDetails.sCpuPhyAddr.uiAddr = 0;
++ sSegDetails.hSegment = NULL;
++
++ while (RA_GetNextLiveSegment(hArena, &sSegDetails))
++ if (pbyBuffer == NULL) {
++ uiBytesSaved +=
++ sizeof(sSegDetails.uiSize) + sSegDetails.uiSize;
++ } else {
++ if ((uiBytesSaved + sizeof(sSegDetails.uiSize) +
++ sSegDetails.uiSize) > *puiBufSize)
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "PVRSRVSaveRestoreLiveSegments: "
++ "Base %08x size %08x",
++ sSegDetails.sCpuPhyAddr.uiAddr,
++ sSegDetails.uiSize);
++
++ pvLocalMemCPUVAddr = (void __force *)
++ OSMapPhysToLin(sSegDetails.sCpuPhyAddr,
++ sSegDetails.uiSize,
++ PVRSRV_HAP_KERNEL_ONLY |
++ PVRSRV_HAP_UNCACHED, NULL);
++ if (pvLocalMemCPUVAddr == NULL) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVSaveRestoreLiveSegments: "
++ "Failed to map local memory to host");
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ if (bSave) {
++ OSMemCopy(pbyBuffer, &sSegDetails.uiSize,
++ sizeof(sSegDetails.uiSize));
++ pbyBuffer += sizeof(sSegDetails.uiSize);
++
++ OSMemCopy(pbyBuffer, pvLocalMemCPUVAddr,
++ sSegDetails.uiSize);
++ pbyBuffer += sSegDetails.uiSize;
++ } else {
++ u32 uiSize;
++
++ OSMemCopy(&uiSize, pbyBuffer,
++ sizeof(sSegDetails.uiSize));
++
++ if (uiSize != sSegDetails.uiSize) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVSaveRestoreLiveSegments:"
++ " Segment size error");
++ } else {
++ pbyBuffer += sizeof(sSegDetails.uiSize);
++
++ OSMemCopy(pvLocalMemCPUVAddr, pbyBuffer,
++ sSegDetails.uiSize);
++ pbyBuffer += sSegDetails.uiSize;
++ }
++ }
++
++ uiBytesSaved +=
++ sizeof(sSegDetails.uiSize) + sSegDetails.uiSize;
++
++ OSUnMapPhysToLin((void __force __iomem *)
++ pvLocalMemCPUVAddr,
++ sSegDetails.uiSize,
++ PVRSRV_HAP_KERNEL_ONLY |
++ PVRSRV_HAP_UNCACHED, NULL);
++ }
++
++ if (pbyBuffer == NULL)
++ *puiBufSize = uiBytesSaved;
++
++ return PVRSRV_OK;
++}
+diff --git a/drivers/gpu/pvr/pvrversion.h b/drivers/gpu/pvr/pvrversion.h
+new file mode 100644
+index 0000000..5dc4779
+--- /dev/null
++++ b/drivers/gpu/pvr/pvrversion.h
+@@ -0,0 +1,37 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _PVRVERSION_H_
++#define _PVRVERSION_H_
++
++#define PVRVERSION_MAJ 1
++#define PVRVERSION_MIN 4
++#define PVRVERSION_BRANCH 14
++#define PVRVERSION_BUILD 2514
++#define PVRVERSION_STRING "1.4.14.2514"
++#define PVRVERSION_FILE "eurasiacon.pj"
++
++#endif
+diff --git a/drivers/gpu/pvr/queue.c b/drivers/gpu/pvr/queue.c
+new file mode 100644
+index 0000000..71fa425
+--- /dev/null
++++ b/drivers/gpu/pvr/queue.c
+@@ -0,0 +1,828 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++
++#include "proc.h"
++
++static int QueuePrintCommands(struct PVRSRV_QUEUE_INFO *psQueue, char *buffer,
++ size_t size)
++{
++ off_t off = 0;
++ int cmds = 0;
++ u32 ui32ReadOffset = psQueue->ui32ReadOffset;
++ u32 ui32WriteOffset = psQueue->ui32WriteOffset;
++ struct PVRSRV_COMMAND *psCmd;
++
++ while (ui32ReadOffset != ui32WriteOffset) {
++ psCmd = (struct PVRSRV_COMMAND *)((u32) psQueue->pvLinQueueKM +
++ ui32ReadOffset);
++
++ off = printAppend(buffer, size, off,
++ "%p %p %5u %6u %3u %5u %2u %2u %3u \n",
++ psQueue, psCmd, psCmd->ui32ProcessID,
++ psCmd->CommandType, psCmd->ui32CmdSize,
++ psCmd->ui32DevIndex, psCmd->ui32DstSyncCount,
++ psCmd->ui32SrcSyncCount, psCmd->ui32DataSize);
++
++ ui32ReadOffset += psCmd->ui32CmdSize;
++ ui32ReadOffset &= psQueue->ui32QueueSize - 1;
++ cmds++;
++ }
++ if (cmds == 0)
++ off = printAppend(buffer, size, off, "%p <empty>\n", psQueue);
++ return off;
++}
++
++off_t QueuePrintQueues(char *buffer, size_t size, off_t off)
++{
++ struct SYS_DATA *psSysData;
++ struct PVRSRV_QUEUE_INFO *psQueue;
++
++ if (SysAcquireData(&psSysData) != PVRSRV_OK)
++ return END_OF_FILE;
++
++ if (!off)
++ return printAppend(buffer, size, 0,
++ "Command Queues\nQueue CmdPtr "
++ "Pid Command Size DevInd DSC SSC #Data ...\n");
++
++ for (psQueue = psSysData->psQueueList; --off && psQueue;
++ psQueue = psQueue->psNextKM)
++ ;
++
++ return psQueue ?
++ QueuePrintCommands(psQueue, buffer, size) : END_OF_FILE;
++}
++
++#define GET_SPACE_IN_CMDQ(psQueue) \
++ (((psQueue->ui32ReadOffset - psQueue->ui32WriteOffset) + \
++ (psQueue->ui32QueueSize - 1)) & (psQueue->ui32QueueSize - 1))
++
++#define UPDATE_QUEUE_WOFF(psQueue, ui32Size) \
++ psQueue->ui32WriteOffset = (psQueue->ui32WriteOffset + ui32Size) & \
++ (psQueue->ui32QueueSize - 1);
++
++#define SYNCOPS_STALE(ui32OpsComplete, ui32OpsPending) \
++ (ui32OpsComplete >= ui32OpsPending)
++
++static u32 NearestPower2(u32 ui32Value)
++{
++ u32 ui32Temp, ui32Result = 1;
++
++ if (!ui32Value)
++ return 0;
++
++ ui32Temp = ui32Value - 1;
++ while (ui32Temp) {
++ ui32Result <<= 1;
++ ui32Temp >>= 1;
++ }
++
++ return ui32Result;
++}
++
++enum PVRSRV_ERROR PVRSRVCreateCommandQueueKM(u32 ui32QueueSize,
++ struct PVRSRV_QUEUE_INFO **ppsQueueInfo)
++{
++ struct PVRSRV_QUEUE_INFO *psQueueInfo;
++ u32 ui32Power2QueueSize = NearestPower2(ui32QueueSize);
++ struct SYS_DATA *psSysData;
++ enum PVRSRV_ERROR eError;
++ void *hMemBlock;
++
++ eError = SysAcquireData(&psSysData);
++ if (eError != PVRSRV_OK)
++ return eError;
++
++ if (OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(struct PVRSRV_QUEUE_INFO),
++ (void **) &psQueueInfo, &hMemBlock) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVCreateCommandQueueKM: "
++ "Failed to alloc queue struct");
++ goto ErrorExit;
++ }
++ OSMemSet(psQueueInfo, 0, sizeof(struct PVRSRV_QUEUE_INFO));
++
++ psQueueInfo->hMemBlock[0] = hMemBlock;
++ psQueueInfo->ui32ProcessID = OSGetCurrentProcessIDKM();
++
++ if (OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ ui32Power2QueueSize + PVRSRV_MAX_CMD_SIZE,
++ &psQueueInfo->pvLinQueueKM, &hMemBlock) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVCreateCommandQueueKM: "
++ "Failed to alloc queue buffer");
++ goto ErrorExit;
++ }
++
++ psQueueInfo->hMemBlock[1] = hMemBlock;
++ psQueueInfo->pvLinQueueUM = psQueueInfo->pvLinQueueKM;
++
++ PVR_ASSERT(psQueueInfo->ui32ReadOffset == 0);
++ PVR_ASSERT(psQueueInfo->ui32WriteOffset == 0);
++
++ psQueueInfo->ui32QueueSize = ui32Power2QueueSize;
++
++ if (psSysData->psQueueList == NULL) {
++ eError = OSCreateResource(&psSysData->sQProcessResource);
++ if (eError != PVRSRV_OK)
++ goto ErrorExit;
++ }
++
++ if (OSLockResource(&psSysData->sQProcessResource,
++ KERNEL_ID) != PVRSRV_OK)
++ goto ErrorExit;
++
++ psQueueInfo->psNextKM = psSysData->psQueueList;
++ psSysData->psQueueList = psQueueInfo;
++
++ if (OSUnlockResource(&psSysData->sQProcessResource, KERNEL_ID) !=
++ PVRSRV_OK)
++ goto ErrorExit;
++
++ *ppsQueueInfo = psQueueInfo;
++
++ return PVRSRV_OK;
++
++ErrorExit:
++
++ if (psQueueInfo) {
++ if (psQueueInfo->pvLinQueueKM)
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ psQueueInfo->ui32QueueSize,
++ psQueueInfo->pvLinQueueKM,
++ psQueueInfo->hMemBlock[1]);
++
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(struct PVRSRV_QUEUE_INFO),
++ psQueueInfo, psQueueInfo->hMemBlock[0]);
++ }
++
++ return PVRSRV_ERROR_GENERIC;
++}
++
++enum PVRSRV_ERROR PVRSRVDestroyCommandQueueKM(
++ struct PVRSRV_QUEUE_INFO *psQueueInfo)
++{
++ struct PVRSRV_QUEUE_INFO *psQueue;
++ struct SYS_DATA *psSysData;
++ enum PVRSRV_ERROR eError;
++ IMG_BOOL bTimeout = IMG_TRUE;
++
++ eError = SysAcquireData(&psSysData);
++ if (eError != PVRSRV_OK)
++ return eError;
++
++ psQueue = psSysData->psQueueList;
++
++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) {
++ if (psQueueInfo->ui32ReadOffset ==
++ psQueueInfo->ui32WriteOffset) {
++ bTimeout = IMG_FALSE;
++ break;
++ }
++ OSWaitus(MAX_HW_TIME_US / WAIT_TRY_COUNT);
++ }
++ END_LOOP_UNTIL_TIMEOUT();
++
++ if (bTimeout) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVDestroyCommandQueueKM : Failed to empty queue");
++ eError = PVRSRV_ERROR_CANNOT_FLUSH_QUEUE;
++ goto ErrorExit;
++ }
++
++ eError = OSLockResource(&psSysData->sQProcessResource, KERNEL_ID);
++ if (eError != PVRSRV_OK)
++ goto ErrorExit;
++
++ if (psQueue == psQueueInfo) {
++ psSysData->psQueueList = psQueueInfo->psNextKM;
++
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ psQueueInfo->ui32QueueSize,
++ psQueueInfo->pvLinQueueKM, psQueueInfo->hMemBlock[1]);
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(struct PVRSRV_QUEUE_INFO),
++ psQueueInfo, psQueueInfo->hMemBlock[0]);
++ } else {
++ while (psQueue) {
++ if (psQueue->psNextKM == psQueueInfo) {
++ psQueue->psNextKM = psQueueInfo->psNextKM;
++
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ psQueueInfo->ui32QueueSize,
++ psQueueInfo->pvLinQueueKM,
++ psQueueInfo->hMemBlock[1]);
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(struct PVRSRV_QUEUE_INFO),
++ psQueueInfo,
++ psQueueInfo->hMemBlock[0]);
++ break;
++ }
++ psQueue = psQueue->psNextKM;
++ }
++
++ if (!psQueue) {
++ eError =
++ OSUnlockResource(&psSysData->sQProcessResource,
++ KERNEL_ID);
++ if (eError != PVRSRV_OK)
++ goto ErrorExit;
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ goto ErrorExit;
++ }
++ }
++
++ eError = OSUnlockResource(&psSysData->sQProcessResource, KERNEL_ID);
++ if (eError != PVRSRV_OK)
++ goto ErrorExit;
++
++ if (psSysData->psQueueList == NULL) {
++ eError = OSDestroyResource(&psSysData->sQProcessResource);
++ if (eError != PVRSRV_OK)
++ goto ErrorExit;
++ }
++
++ErrorExit:
++
++ return eError;
++}
++
++enum PVRSRV_ERROR PVRSRVGetQueueSpaceKM(struct PVRSRV_QUEUE_INFO *psQueue,
++ u32 ui32ParamSize, void **ppvSpace)
++{
++ IMG_BOOL bTimeout = IMG_TRUE;
++
++ ui32ParamSize = (ui32ParamSize + 3) & 0xFFFFFFFC;
++
++ if (ui32ParamSize > PVRSRV_MAX_CMD_SIZE) {
++ PVR_DPF(PVR_DBG_WARNING,
++ "PVRSRVGetQueueSpace: max command size is %d bytes",
++ PVRSRV_MAX_CMD_SIZE);
++ return PVRSRV_ERROR_CMD_TOO_BIG;
++ }
++
++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) {
++ if (GET_SPACE_IN_CMDQ(psQueue) > ui32ParamSize) {
++ bTimeout = IMG_FALSE;
++ break;
++ }
++ OSWaitus(MAX_HW_TIME_US / WAIT_TRY_COUNT);
++ }
++ END_LOOP_UNTIL_TIMEOUT();
++
++ if (bTimeout == IMG_TRUE) {
++ *ppvSpace = NULL;
++
++ return PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE;
++ } else {
++ *ppvSpace = (void *)(psQueue->ui32WriteOffset +
++ (u32)psQueue->pvLinQueueUM);
++ }
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR PVRSRVInsertCommandKM(struct PVRSRV_QUEUE_INFO *psQueue,
++ struct PVRSRV_COMMAND **ppsCommand,
++ u32 ui32DevIndex, u16 CommandType,
++ u32 ui32DstSyncCount,
++ struct PVRSRV_KERNEL_SYNC_INFO *apsDstSync[],
++ u32 ui32SrcSyncCount,
++ struct PVRSRV_KERNEL_SYNC_INFO *apsSrcSync[],
++ u32 ui32DataByteSize)
++{
++ enum PVRSRV_ERROR eError;
++ struct PVRSRV_COMMAND *psCommand;
++ u32 ui32CommandSize;
++ u32 i;
++
++ ui32DataByteSize = (ui32DataByteSize + 3) & 0xFFFFFFFC;
++
++ ui32CommandSize = sizeof(struct PVRSRV_COMMAND) +
++ ((ui32DstSyncCount + ui32SrcSyncCount) *
++ sizeof(struct PVRSRV_SYNC_OBJECT)) + ui32DataByteSize;
++
++ eError = PVRSRVGetQueueSpaceKM(psQueue, ui32CommandSize,
++ (void **) &psCommand);
++ if (eError != PVRSRV_OK)
++ return eError;
++
++ psCommand->ui32ProcessID = OSGetCurrentProcessIDKM();
++
++ psCommand->ui32CmdSize = ui32CommandSize;
++ psCommand->ui32DevIndex = ui32DevIndex;
++ psCommand->CommandType = CommandType;
++ psCommand->ui32DstSyncCount = ui32DstSyncCount;
++ psCommand->ui32SrcSyncCount = ui32SrcSyncCount;
++ psCommand->psDstSync =
++ (struct PVRSRV_SYNC_OBJECT *)(((u8 *) psCommand) +
++ sizeof(struct PVRSRV_COMMAND));
++
++ psCommand->psSrcSync =
++ (struct PVRSRV_SYNC_OBJECT *)(((u8 *) psCommand->psDstSync) +
++ (ui32DstSyncCount *
++ sizeof(struct PVRSRV_SYNC_OBJECT)));
++
++ psCommand->pvData =
++ (struct PVRSRV_SYNC_OBJECT *)(((u8 *) psCommand->psSrcSync) +
++ (ui32SrcSyncCount *
++ sizeof(struct PVRSRV_SYNC_OBJECT)));
++
++ psCommand->ui32DataSize = ui32DataByteSize;
++
++ for (i = 0; i < ui32DstSyncCount; i++) {
++ psCommand->psDstSync[i].psKernelSyncInfoKM = apsDstSync[i];
++ psCommand->psDstSync[i].ui32WriteOpsPending =
++ PVRSRVGetWriteOpsPending(apsDstSync[i], IMG_FALSE);
++ psCommand->psDstSync[i].ui32ReadOpsPending =
++ PVRSRVGetReadOpsPending(apsDstSync[i], IMG_FALSE);
++ }
++
++ for (i = 0; i < ui32SrcSyncCount; i++) {
++ psCommand->psSrcSync[i].psKernelSyncInfoKM = apsSrcSync[i];
++ psCommand->psSrcSync[i].ui32WriteOpsPending =
++ PVRSRVGetWriteOpsPending(apsSrcSync[i], IMG_TRUE);
++ psCommand->psSrcSync[i].ui32ReadOpsPending =
++ PVRSRVGetReadOpsPending(apsSrcSync[i], IMG_TRUE);
++ }
++
++ *ppsCommand = psCommand;
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR PVRSRVSubmitCommandKM(struct PVRSRV_QUEUE_INFO *psQueue,
++ struct PVRSRV_COMMAND *psCommand)
++{
++
++ if (psCommand->ui32DstSyncCount > 0) {
++ psCommand->psDstSync = (struct PVRSRV_SYNC_OBJECT *)
++ (((u8 *)psQueue->pvLinQueueKM) +
++ psQueue->ui32WriteOffset +
++ sizeof(struct PVRSRV_COMMAND));
++ }
++
++ if (psCommand->ui32SrcSyncCount > 0) {
++ psCommand->psSrcSync = (struct PVRSRV_SYNC_OBJECT *)
++ (((u8 *)psQueue->pvLinQueueKM) +
++ psQueue->ui32WriteOffset +
++ sizeof(struct PVRSRV_COMMAND) +
++ (psCommand->ui32DstSyncCount *
++ sizeof(struct PVRSRV_SYNC_OBJECT)));
++ }
++
++ psCommand->pvData = (struct PVRSRV_SYNC_OBJECT *)
++ (((u8 *)psQueue->pvLinQueueKM) +
++ psQueue->ui32WriteOffset +
++ sizeof(struct PVRSRV_COMMAND) +
++ (psCommand->ui32DstSyncCount *
++ sizeof(struct PVRSRV_SYNC_OBJECT)) +
++ (psCommand->ui32SrcSyncCount *
++ sizeof(struct PVRSRV_SYNC_OBJECT)));
++
++ UPDATE_QUEUE_WOFF(psQueue, psCommand->ui32CmdSize);
++
++ return PVRSRV_OK;
++}
++
++static enum PVRSRV_ERROR PVRSRVProcessCommand(struct SYS_DATA *psSysData,
++ struct PVRSRV_COMMAND *psCommand,
++ IMG_BOOL bFlush)
++{
++ struct PVRSRV_SYNC_OBJECT *psWalkerObj;
++ struct PVRSRV_SYNC_OBJECT *psEndObj;
++ u32 i;
++ struct COMMAND_COMPLETE_DATA *psCmdCompleteData;
++ enum PVRSRV_ERROR eError = PVRSRV_OK;
++ u32 ui32WriteOpsComplete;
++ u32 ui32ReadOpsComplete;
++
++ psWalkerObj = psCommand->psDstSync;
++ psEndObj = psWalkerObj + psCommand->ui32DstSyncCount;
++ while (psWalkerObj < psEndObj) {
++ struct PVRSRV_SYNC_DATA *psSyncData =
++ psWalkerObj->psKernelSyncInfoKM->psSyncData;
++
++ ui32WriteOpsComplete = psSyncData->ui32WriteOpsComplete;
++ ui32ReadOpsComplete = psSyncData->ui32ReadOpsComplete;
++
++ if ((ui32WriteOpsComplete != psWalkerObj->ui32WriteOpsPending)
++ || (ui32ReadOpsComplete !=
++ psWalkerObj->ui32ReadOpsPending)) {
++ if (!bFlush ||
++ !SYNCOPS_STALE(ui32WriteOpsComplete,
++ psWalkerObj->ui32WriteOpsPending) ||
++ !SYNCOPS_STALE(ui32ReadOpsComplete,
++ psWalkerObj->ui32ReadOpsPending)) {
++ return PVRSRV_ERROR_FAILED_DEPENDENCIES;
++ }
++ }
++
++ psWalkerObj++;
++ }
++
++ psWalkerObj = psCommand->psSrcSync;
++ psEndObj = psWalkerObj + psCommand->ui32SrcSyncCount;
++ while (psWalkerObj < psEndObj) {
++ struct PVRSRV_SYNC_DATA *psSyncData =
++ psWalkerObj->psKernelSyncInfoKM->psSyncData;
++
++ ui32ReadOpsComplete = psSyncData->ui32ReadOpsComplete;
++ ui32WriteOpsComplete = psSyncData->ui32WriteOpsComplete;
++
++ if ((ui32WriteOpsComplete !=
++ psWalkerObj->ui32WriteOpsPending) ||
++ (ui32ReadOpsComplete !=
++ psWalkerObj->ui32ReadOpsPending)) {
++ if (!bFlush &&
++ SYNCOPS_STALE(ui32WriteOpsComplete,
++ psWalkerObj->ui32WriteOpsPending) &&
++ SYNCOPS_STALE(ui32ReadOpsComplete,
++ psWalkerObj->ui32ReadOpsPending)) {
++ PVR_DPF(PVR_DBG_WARNING,
++ "PVRSRVProcessCommand: Stale syncops psSyncData:0x%x "
++ "ui32WriteOpsComplete:0x%x ui32WriteOpsPending:0x%x",
++ psSyncData, ui32WriteOpsComplete,
++ psWalkerObj->ui32WriteOpsPending);
++ }
++
++ if (!bFlush ||
++ !SYNCOPS_STALE(ui32WriteOpsComplete,
++ psWalkerObj->ui32WriteOpsPending) ||
++ !SYNCOPS_STALE(ui32ReadOpsComplete,
++ psWalkerObj->ui32ReadOpsPending)) {
++ return PVRSRV_ERROR_FAILED_DEPENDENCIES;
++ }
++ }
++ psWalkerObj++;
++ }
++
++ if (psCommand->ui32DevIndex >= SYS_DEVICE_COUNT) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVProcessCommand: invalid DeviceType 0x%x",
++ psCommand->ui32DevIndex);
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psCmdCompleteData =
++ psSysData->ppsCmdCompleteData[psCommand->ui32DevIndex][psCommand->
++ CommandType];
++ if (psCmdCompleteData->bInUse)
++
++ return PVRSRV_ERROR_FAILED_DEPENDENCIES;
++
++ psCmdCompleteData->bInUse = IMG_TRUE;
++
++ psCmdCompleteData->ui32DstSyncCount = psCommand->ui32DstSyncCount;
++ for (i = 0; i < psCommand->ui32DstSyncCount; i++)
++ psCmdCompleteData->psDstSync[i] = psCommand->psDstSync[i];
++
++ psCmdCompleteData->ui32SrcSyncCount = psCommand->ui32SrcSyncCount;
++ for (i = 0; i < psCommand->ui32SrcSyncCount; i++)
++ psCmdCompleteData->psSrcSync[i] = psCommand->psSrcSync[i];
++
++ if (psSysData->ppfnCmdProcList[psCommand->ui32DevIndex]
++ [psCommand->CommandType]((void *)
++ psCmdCompleteData,
++ psCommand->ui32DataSize,
++ psCommand->pvData) == IMG_FALSE) {
++ psCmdCompleteData->bInUse = IMG_FALSE;
++ eError = PVRSRV_ERROR_CMD_NOT_PROCESSED;
++ }
++
++ return eError;
++}
++
++enum PVRSRV_ERROR PVRSRVProcessQueues(u32 ui32CallerID, IMG_BOOL bFlush)
++{
++ struct PVRSRV_QUEUE_INFO *psQueue;
++ struct SYS_DATA *psSysData;
++ struct PVRSRV_COMMAND *psCommand;
++ struct PVRSRV_DEVICE_NODE *psDeviceNode;
++ enum PVRSRV_ERROR eError;
++
++ eError = SysAcquireData(&psSysData);
++ if (eError != PVRSRV_OK)
++ return eError;
++
++ psSysData->bReProcessQueues = IMG_FALSE;
++
++ eError = OSLockResource(&psSysData->sQProcessResource, ui32CallerID);
++ if (eError != PVRSRV_OK) {
++ psSysData->bReProcessQueues = IMG_TRUE;
++
++ if (ui32CallerID == ISR_ID) {
++ if (bFlush) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVProcessQueues: "
++ "Couldn't acquire queue processing "
++ "lock for FLUSH");
++ } else {
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "PVRSRVProcessQueues: "
++ "Couldn't acquire queue "
++ "processing lock");
++ }
++ } else {
++ PVR_DPF(PVR_DBG_MESSAGE, "PVRSRVProcessQueues: "
++ "Queue processing lock-acquire failed "
++ "when called from the Services driver.");
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "This is due to MISR queue processing "
++ "being interrupted by the Services driver.");
++ }
++
++ return PVRSRV_OK;
++ }
++
++ psQueue = psSysData->psQueueList;
++
++ if (!psQueue) {
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "No Queues installed - cannot process commands");
++ }
++
++ if (bFlush)
++ PVRSRVSetDCState(DC_STATE_FLUSH_COMMANDS);
++
++ while (psQueue) {
++ while (psQueue->ui32ReadOffset != psQueue->ui32WriteOffset) {
++ psCommand = (struct PVRSRV_COMMAND *)((u32) psQueue->
++ pvLinQueueKM + psQueue->ui32ReadOffset);
++
++ if (PVRSRVProcessCommand(psSysData, psCommand, bFlush)
++ == PVRSRV_OK) {
++ UPDATE_QUEUE_ROFF(psQueue,
++ psCommand->ui32CmdSize)
++ if (bFlush)
++ continue;
++ }
++ break;
++ }
++ psQueue = psQueue->psNextKM;
++ }
++
++ if (bFlush)
++ PVRSRVSetDCState(DC_STATE_NO_FLUSH_COMMANDS);
++
++ psDeviceNode = psSysData->psDeviceNodeList;
++ while (psDeviceNode != NULL) {
++ if (psDeviceNode->bReProcessDeviceCommandComplete &&
++ psDeviceNode->pfnDeviceCommandComplete != NULL) {
++ (*psDeviceNode->
++ pfnDeviceCommandComplete) (psDeviceNode);
++ }
++ psDeviceNode = psDeviceNode->psNext;
++ }
++
++ OSUnlockResource(&psSysData->sQProcessResource, ui32CallerID);
++
++ if (psSysData->bReProcessQueues)
++ return PVRSRV_ERROR_PROCESSING_BLOCKED;
++
++ return PVRSRV_OK;
++}
++
++void PVRSRVCommandCompleteKM(void *hCmdCookie, IMG_BOOL bScheduleMISR)
++{
++ u32 i;
++ struct COMMAND_COMPLETE_DATA *psCmdCompleteData =
++ (struct COMMAND_COMPLETE_DATA *)hCmdCookie;
++ struct SYS_DATA *psSysData;
++
++ if (SysAcquireData(&psSysData) != PVRSRV_OK)
++ return;
++
++ for (i = 0; i < psCmdCompleteData->ui32DstSyncCount; i++) {
++ psCmdCompleteData->psDstSync[i].psKernelSyncInfoKM->psSyncData->
++ ui32WriteOpsComplete++;
++ }
++
++ for (i = 0; i < psCmdCompleteData->ui32SrcSyncCount; i++) {
++ psCmdCompleteData->psSrcSync[i].psKernelSyncInfoKM->psSyncData->
++ ui32ReadOpsComplete++;
++ }
++
++ psCmdCompleteData->bInUse = IMG_FALSE;
++
++ PVRSRVCommandCompleteCallbacks();
++
++ if (bScheduleMISR)
++ OSScheduleMISR(psSysData);
++}
++
++void PVRSRVCommandCompleteCallbacks(void)
++{
++ struct SYS_DATA *psSysData;
++ struct PVRSRV_DEVICE_NODE *psDeviceNode;
++
++ if (SysAcquireData(&psSysData) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVCommandCompleteCallbacks: "
++ "SysAcquireData failed");
++ return;
++ }
++
++ psDeviceNode = psSysData->psDeviceNodeList;
++ while (psDeviceNode != NULL) {
++ if (psDeviceNode->pfnDeviceCommandComplete != NULL)
++ (*psDeviceNode->pfnDeviceCommandComplete)(psDeviceNode);
++ psDeviceNode = psDeviceNode->psNext;
++ }
++}
++
++enum PVRSRV_ERROR PVRSRVRegisterCmdProcListKM(u32 ui32DevIndex,
++ IMG_BOOL (**ppfnCmdProcList)(void *, u32, void *),
++ u32 ui32MaxSyncsPerCmd[][2], u32 ui32CmdCount)
++{
++ struct SYS_DATA *psSysData;
++ enum PVRSRV_ERROR eError;
++ u32 i;
++ u32 ui32AllocSize;
++ IMG_BOOL (**ppfnCmdProc)(void *, u32, void *);
++ struct COMMAND_COMPLETE_DATA *psCmdCompleteData;
++
++ if (ui32DevIndex >= SYS_DEVICE_COUNT) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVRegisterCmdProcListKM: invalid DeviceType 0x%x",
++ ui32DevIndex);
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ eError = SysAcquireData(&psSysData);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVRegisterCmdProcListKM: SysAcquireData failed");
++ return eError;
++ }
++
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, ui32CmdCount *
++ sizeof(IMG_BOOL (*)(void *, u32, void *)),
++ (void **)&psSysData->ppfnCmdProcList[ui32DevIndex],
++ NULL);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVRegisterCmdProcListKM: Failed to alloc queue");
++ return eError;
++ }
++
++ ppfnCmdProc = psSysData->ppfnCmdProcList[ui32DevIndex];
++
++ for (i = 0; i < ui32CmdCount; i++)
++ ppfnCmdProc[i] = ppfnCmdProcList[i];
++
++ ui32AllocSize = ui32CmdCount * sizeof(struct COMMAND_COMPLETE_DATA *);
++ eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ ui32AllocSize,
++ (void **) &psSysData->
++ ppsCmdCompleteData[ui32DevIndex], NULL);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVRegisterCmdProcListKM: Failed to alloc CC data");
++ goto ErrorExit;
++ }
++
++ /* clear the list to ensure that we don't try to access uninitialised
++ * pointer in the 'error' execution path */
++ OSMemSet(psSysData->ppsCmdCompleteData[ui32DevIndex], 0x00,
++ ui32AllocSize);
++
++ for (i = 0; i < ui32CmdCount; i++) {
++ ui32AllocSize = sizeof(struct COMMAND_COMPLETE_DATA)
++ + ((ui32MaxSyncsPerCmd[i][0] + ui32MaxSyncsPerCmd[i][1])
++ * sizeof(struct PVRSRV_SYNC_OBJECT));
++
++ eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ ui32AllocSize,
++ (void **)&psSysData->
++ ppsCmdCompleteData[ui32DevIndex][i],
++ NULL);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVRegisterCmdProcListKM: "
++ "Failed to alloc cmd %d",
++ i);
++ goto ErrorExit;
++ }
++
++ OSMemSet(psSysData->ppsCmdCompleteData[ui32DevIndex][i], 0x00,
++ ui32AllocSize);
++
++ psCmdCompleteData =
++ psSysData->ppsCmdCompleteData[ui32DevIndex][i];
++
++ psCmdCompleteData->psDstSync = (struct PVRSRV_SYNC_OBJECT *)
++ (((u32) psCmdCompleteData) +
++ sizeof(struct COMMAND_COMPLETE_DATA));
++ psCmdCompleteData->psSrcSync = (struct PVRSRV_SYNC_OBJECT *)
++ (((u32) psCmdCompleteData->psDstSync) +
++ (sizeof(struct PVRSRV_SYNC_OBJECT) *
++ ui32MaxSyncsPerCmd[i][0]));
++ psCmdCompleteData->ui32AllocSize = ui32AllocSize;
++ }
++
++ return PVRSRV_OK;
++
++ErrorExit:
++
++ if (psSysData->ppsCmdCompleteData[ui32DevIndex] != NULL) {
++ for (i = 0; i < ui32CmdCount; i++) {
++ if (psSysData->ppsCmdCompleteData[ui32DevIndex][i] !=
++ NULL) {
++ psCmdCompleteData =
++ psSysData->
++ ppsCmdCompleteData[ui32DevIndex][i];
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ psCmdCompleteData->ui32AllocSize,
++ psCmdCompleteData, NULL);
++ }
++ }
++
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ ui32CmdCount * sizeof(struct COMMAND_COMPLETE_DATA *),
++ psSysData->ppsCmdCompleteData[ui32DevIndex],
++ NULL);
++ }
++
++ if (psSysData->ppfnCmdProcList[ui32DevIndex] != NULL) {
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, 0,
++ psSysData->ppfnCmdProcList[ui32DevIndex], NULL);
++ }
++
++ return eError;
++}
++
++enum PVRSRV_ERROR PVRSRVRemoveCmdProcListKM(u32 ui32DevIndex, u32 ui32CmdCount)
++{
++ struct SYS_DATA *psSysData;
++ enum PVRSRV_ERROR eError;
++ u32 i;
++
++ if (ui32DevIndex >= SYS_DEVICE_COUNT) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVRemoveCmdProcListKM: invalid DeviceType 0x%x",
++ ui32DevIndex);
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ eError = SysAcquireData(&psSysData);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVRemoveCmdProcListKM: SysAcquireData failed");
++ return eError;
++ }
++
++ if (psSysData->ppsCmdCompleteData[ui32DevIndex] == NULL) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "PVRSRVRemoveCmdProcListKM: Invalid command array");
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ } else {
++ for (i = 0; i < ui32CmdCount; i++) {
++
++ if (psSysData->ppsCmdCompleteData[ui32DevIndex][i] !=
++ NULL) {
++ struct COMMAND_COMPLETE_DATA *
++ psCmdCompleteData = psSysData->
++ ppsCmdCompleteData[ui32DevIndex][i];
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ psCmdCompleteData->ui32AllocSize,
++ psCmdCompleteData, NULL);
++ }
++ }
++
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ ui32CmdCount * sizeof(struct COMMAND_COMPLETE_DATA *),
++ psSysData->ppsCmdCompleteData[ui32DevIndex], NULL);
++ }
++
++ if (psSysData->ppfnCmdProcList[ui32DevIndex] != NULL) {
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ ui32CmdCount *
++ sizeof(IMG_BOOL (*)(void *, u32, void *)),
++ psSysData->ppfnCmdProcList[ui32DevIndex], NULL);
++ }
++
++ return PVRSRV_OK;
++}
+diff --git a/drivers/gpu/pvr/queue.h b/drivers/gpu/pvr/queue.h
+new file mode 100644
+index 0000000..09f5479
+--- /dev/null
++++ b/drivers/gpu/pvr/queue.h
+@@ -0,0 +1,81 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef QUEUE_H
++#define QUEUE_H
++
++#define UPDATE_QUEUE_ROFF(psQueue, ui32Size) \
++ psQueue->ui32ReadOffset = (psQueue->ui32ReadOffset + ui32Size) \
++ & (psQueue->ui32QueueSize - 1);
++
++struct COMMAND_COMPLETE_DATA {
++ IMG_BOOL bInUse;
++
++ u32 ui32DstSyncCount;
++ u32 ui32SrcSyncCount;
++ struct PVRSRV_SYNC_OBJECT *psDstSync;
++ struct PVRSRV_SYNC_OBJECT *psSrcSync;
++ u32 ui32AllocSize;
++};
++
++enum PVRSRV_ERROR PVRSRVProcessQueues(u32 ui32CallerID, IMG_BOOL bFlush);
++
++#if defined(__KERNEL__)
++#include <linux/types.h>
++off_t QueuePrintQueues(char *buffer, size_t size, off_t off);
++#endif
++
++enum PVRSRV_ERROR PVRSRVCreateCommandQueueKM(u32 ui32QueueSize,
++ struct PVRSRV_QUEUE_INFO **ppsQueueInfo);
++enum PVRSRV_ERROR PVRSRVDestroyCommandQueueKM(
++ struct PVRSRV_QUEUE_INFO *psQueueInfo);
++
++enum PVRSRV_ERROR PVRSRVInsertCommandKM(struct PVRSRV_QUEUE_INFO *psQueue,
++ struct PVRSRV_COMMAND **ppsCommand, u32 ui32DevIndex,
++ u16 CommandType, u32 ui32DstSyncCount,
++ struct PVRSRV_KERNEL_SYNC_INFO *apsDstSync[],
++ u32 ui32SrcSyncCount,
++ struct PVRSRV_KERNEL_SYNC_INFO *apsSrcSync[],
++ u32 ui32DataByteSize);
++
++enum PVRSRV_ERROR PVRSRVGetQueueSpaceKM(struct PVRSRV_QUEUE_INFO *psQueue,
++ u32 ui32ParamSize, void **ppvSpace);
++
++enum PVRSRV_ERROR PVRSRVSubmitCommandKM(struct PVRSRV_QUEUE_INFO *psQueue,
++ struct PVRSRV_COMMAND *psCommand);
++
++void PVRSRVCommandCompleteKM(void *hCmdCookie, IMG_BOOL bScheduleMISR);
++
++void PVRSRVCommandCompleteCallbacks(void);
++
++enum PVRSRV_ERROR PVRSRVRegisterCmdProcListKM(u32 ui32DevIndex,
++ IMG_BOOL (**ppfnCmdProcList)(void *, u32, void *),
++ u32 ui32MaxSyncsPerCmd[][2], u32 ui32CmdCount);
++enum PVRSRV_ERROR PVRSRVRemoveCmdProcListKM(u32 ui32DevIndex,
++ u32 ui32CmdCount);
++
++
++#endif
+diff --git a/drivers/gpu/pvr/ra.c b/drivers/gpu/pvr/ra.c
+new file mode 100644
+index 0000000..4d002a6
+--- /dev/null
++++ b/drivers/gpu/pvr/ra.c
+@@ -0,0 +1,1163 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "hash.h"
++#include "ra.h"
++#include "buffer_manager.h"
++#include "osfunc.h"
++
++#include <linux/kernel.h>
++#include "proc.h"
++
++
++#define MINIMUM_HASH_SIZE 64
++
++struct BT {
++ enum bt_type {
++ btt_span,
++ btt_free,
++ btt_live
++ } type;
++
++ u32 base;
++ size_t uSize;
++
++ struct BT *pNextSegment;
++ struct BT *pPrevSegment;
++
++ struct BT *pNextFree;
++ struct BT *pPrevFree;
++
++ struct BM_MAPPING *psMapping;
++};
++struct BT;
++
++struct RA_ARENA {
++ char *name;
++ u32 uQuantum;
++ IMG_BOOL(*pImportAlloc)(void *, size_t uSize, size_t *pActualSize,
++ struct BM_MAPPING **ppsMapping, u32 uFlags,
++ u32 *pBase);
++ void (*pImportFree)(void *, u32, struct BM_MAPPING *psMapping);
++ void (*pBackingStoreFree)(void *, u32, u32, void *);
++ void *pImportHandle;
++#define FREE_TABLE_LIMIT 32
++ struct BT *aHeadFree[FREE_TABLE_LIMIT];
++ struct BT *pHeadSegment;
++ struct BT *pTailSegment;
++ struct HASH_TABLE *pSegmentHash;
++#ifdef RA_STATS
++ struct RA_STATISTICS sStatistics;
++#endif
++#if defined(CONFIG_PROC_FS) && defined(DEBUG)
++#define PROC_NAME_SIZE 32
++ char szProcInfoName[PROC_NAME_SIZE];
++ char szProcSegsName[PROC_NAME_SIZE];
++ IMG_BOOL bInitProcEntry;
++#endif
++};
++
++#if defined(CONFIG_PROC_FS) && defined(DEBUG)
++static int RA_DumpSegs(char *page, char **start, off_t off, int count, int *eof,
++ void *data);
++static int RA_DumpInfo(char *page, char **start, off_t off, int count, int *eof,
++ void *data);
++#endif
++
++#if defined(CONFIG_PROC_FS) && defined(DEBUG)
++static char *ReplaceSpaces(char *const pS)
++{
++ char *pT;
++
++ for (pT = pS; *pT != 0; pT++)
++ if (*pT == ' ' || *pT == '\t')
++ *pT = '_';
++
++ return pS;
++}
++#endif
++
++static IMG_BOOL _RequestAllocFail(void *_h, size_t _uSize, size_t *_pActualSize,
++ struct BM_MAPPING **_ppsMapping,
++ u32 _uFlags, u32 *_pBase)
++{
++ PVR_UNREFERENCED_PARAMETER(_h);
++ PVR_UNREFERENCED_PARAMETER(_uSize);
++ PVR_UNREFERENCED_PARAMETER(_pActualSize);
++ PVR_UNREFERENCED_PARAMETER(_ppsMapping);
++ PVR_UNREFERENCED_PARAMETER(_uFlags);
++ PVR_UNREFERENCED_PARAMETER(_pBase);
++
++ return IMG_FALSE;
++}
++
++static u32 pvr_log2(size_t n)
++{
++ u32 l = 0;
++ n >>= 1;
++ while (n > 0) {
++ n >>= 1;
++ l++;
++ }
++ return l;
++}
++
++static enum PVRSRV_ERROR _SegmentListInsertAfter(struct RA_ARENA *pArena,
++ struct BT *pInsertionPoint,
++ struct BT *pBT)
++{
++ PVR_ASSERT(pArena != NULL);
++ PVR_ASSERT(pInsertionPoint != NULL);
++
++ if ((pInsertionPoint == NULL) || (pArena == NULL)) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "_SegmentListInsertAfter: invalid parameters");
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ pBT->pNextSegment = pInsertionPoint->pNextSegment;
++ pBT->pPrevSegment = pInsertionPoint;
++ if (pInsertionPoint->pNextSegment == NULL)
++ pArena->pTailSegment = pBT;
++ else
++ pInsertionPoint->pNextSegment->pPrevSegment = pBT;
++ pInsertionPoint->pNextSegment = pBT;
++
++ return PVRSRV_OK;
++}
++
++static enum PVRSRV_ERROR _SegmentListInsert(struct RA_ARENA *pArena,
++ struct BT *pBT)
++{
++ enum PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if (pArena->pHeadSegment == NULL) {
++ pArena->pHeadSegment = pArena->pTailSegment = pBT;
++ pBT->pNextSegment = pBT->pPrevSegment = NULL;
++ } else {
++ struct BT *pBTScan;
++ if (pBT->base < pArena->pHeadSegment->base) {
++ pBT->pNextSegment = pArena->pHeadSegment;
++ pArena->pHeadSegment->pPrevSegment = pBT;
++ pArena->pHeadSegment = pBT;
++ pBT->pPrevSegment = NULL;
++ } else {
++ pBTScan = pArena->pHeadSegment;
++
++ while ((pBTScan->pNextSegment != NULL) &&
++ (pBT->base >= pBTScan->pNextSegment->base))
++ pBTScan = pBTScan->pNextSegment;
++
++ eError = _SegmentListInsertAfter(pArena, pBTScan, pBT);
++ if (eError != PVRSRV_OK)
++ return eError;
++ }
++ }
++ return eError;
++}
++
++static void _SegmentListRemove(struct RA_ARENA *pArena, struct BT *pBT)
++{
++ if (pBT->pPrevSegment == NULL)
++ pArena->pHeadSegment = pBT->pNextSegment;
++ else
++ pBT->pPrevSegment->pNextSegment = pBT->pNextSegment;
++
++ if (pBT->pNextSegment == NULL)
++ pArena->pTailSegment = pBT->pPrevSegment;
++ else
++ pBT->pNextSegment->pPrevSegment = pBT->pPrevSegment;
++}
++
++static struct BT *_SegmentSplit(struct RA_ARENA *pArena, struct BT *pBT,
++ size_t uSize)
++{
++ struct BT *pNeighbour;
++
++ PVR_ASSERT(pArena != NULL);
++
++ if (pArena == NULL) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "_SegmentSplit: invalid parameter - pArena");
++ return NULL;
++ }
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(struct BT),
++ (void **) &pNeighbour, NULL) != PVRSRV_OK)
++ return NULL;
++
++ pNeighbour->pPrevSegment = pBT;
++ pNeighbour->pNextSegment = pBT->pNextSegment;
++ if (pBT->pNextSegment == NULL)
++ pArena->pTailSegment = pNeighbour;
++ else
++ pBT->pNextSegment->pPrevSegment = pNeighbour;
++ pBT->pNextSegment = pNeighbour;
++
++ pNeighbour->type = btt_free;
++ pNeighbour->uSize = pBT->uSize - uSize;
++ pNeighbour->base = pBT->base + uSize;
++ pNeighbour->psMapping = pBT->psMapping;
++ pBT->uSize = uSize;
++ return pNeighbour;
++}
++
++static void _FreeListInsert(struct RA_ARENA *pArena, struct BT *pBT)
++{
++ u32 uIndex;
++ uIndex = pvr_log2(pBT->uSize);
++ pBT->type = btt_free;
++ pBT->pNextFree = pArena->aHeadFree[uIndex];
++ pBT->pPrevFree = NULL;
++ if (pArena->aHeadFree[uIndex] != NULL)
++ pArena->aHeadFree[uIndex]->pPrevFree = pBT;
++ pArena->aHeadFree[uIndex] = pBT;
++}
++
++static void _FreeListRemove(struct RA_ARENA *pArena, struct BT *pBT)
++{
++ u32 uIndex;
++ uIndex = pvr_log2(pBT->uSize);
++ if (pBT->pNextFree != NULL)
++ pBT->pNextFree->pPrevFree = pBT->pPrevFree;
++ if (pBT->pPrevFree == NULL)
++ pArena->aHeadFree[uIndex] = pBT->pNextFree;
++ else
++ pBT->pPrevFree->pNextFree = pBT->pNextFree;
++}
++
++static struct BT *_BuildSpanMarker(u32 base, size_t uSize)
++{
++ struct BT *pBT;
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(struct BT),
++ (void **) &pBT, NULL) != PVRSRV_OK)
++ return NULL;
++
++ pBT->type = btt_span;
++ pBT->base = base;
++ pBT->uSize = uSize;
++ pBT->psMapping = NULL;
++
++ return pBT;
++}
++
++static struct BT *_BuildBT(u32 base, size_t uSize)
++{
++ struct BT *pBT;
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(struct BT),
++ (void **) &pBT, NULL) != PVRSRV_OK)
++ return NULL;
++
++ pBT->type = btt_free;
++ pBT->base = base;
++ pBT->uSize = uSize;
++
++ return pBT;
++}
++
++static struct BT *_InsertResource(struct RA_ARENA *pArena, u32 base,
++ size_t uSize)
++{
++ struct BT *pBT;
++ PVR_ASSERT(pArena != NULL);
++ if (pArena == NULL) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "_InsertResource: invalid parameter - pArena");
++ return NULL;
++ }
++
++ pBT = _BuildBT(base, uSize);
++ if (pBT != NULL) {
++ if (_SegmentListInsert(pArena, pBT) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "_InsertResource: call to _SegmentListInsert failed");
++ return NULL;
++ }
++ _FreeListInsert(pArena, pBT);
++#ifdef RA_STATS
++ pArena->sStatistics.uTotalResourceCount += uSize;
++ pArena->sStatistics.uFreeResourceCount += uSize;
++ pArena->sStatistics.uSpanCount++;
++#endif
++ }
++ return pBT;
++}
++
++static struct BT *_InsertResourceSpan(struct RA_ARENA *pArena, u32 base,
++ size_t uSize)
++{
++ enum PVRSRV_ERROR eError;
++ struct BT *pSpanStart;
++ struct BT *pSpanEnd;
++ struct BT *pBT;
++
++ PVR_ASSERT(pArena != NULL);
++ if (pArena == NULL) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "_InsertResourceSpan: invalid parameter - pArena");
++ return NULL;
++ }
++
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "RA_InsertResourceSpan: arena='%s', base=0x%x, size=0x%x",
++ pArena->name, base, uSize);
++
++ pSpanStart = _BuildSpanMarker(base, uSize);
++ if (pSpanStart == NULL)
++ goto fail_start;
++ pSpanEnd = _BuildSpanMarker(base + uSize, 0);
++ if (pSpanEnd == NULL)
++ goto fail_end;
++
++ pBT = _BuildBT(base, uSize);
++ if (pBT == NULL)
++ goto fail_bt;
++
++ eError = _SegmentListInsert(pArena, pSpanStart);
++ if (eError != PVRSRV_OK)
++ goto fail_SegListInsert;
++
++ eError = _SegmentListInsertAfter(pArena, pSpanStart, pBT);
++ if (eError != PVRSRV_OK)
++ goto fail_SegListInsert;
++
++ _FreeListInsert(pArena, pBT);
++
++ eError = _SegmentListInsertAfter(pArena, pBT, pSpanEnd);
++ if (eError != PVRSRV_OK)
++ goto fail_SegListInsert;
++
++#ifdef RA_STATS
++ pArena->sStatistics.uTotalResourceCount += uSize;
++#endif
++ return pBT;
++
++fail_SegListInsert:
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct BT), pBT, NULL);
++fail_bt:
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct BT), pSpanEnd, NULL);
++fail_end:
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct BT), pSpanStart, NULL);
++fail_start:
++ return NULL;
++}
++
++static void _FreeBT(struct RA_ARENA *pArena, struct BT *pBT,
++ IMG_BOOL bFreeBackingStore)
++{
++ struct BT *pNeighbour;
++ u32 uOrigBase;
++ size_t uOrigSize;
++
++ PVR_ASSERT(pArena != NULL);
++ PVR_ASSERT(pBT != NULL);
++
++ if ((pArena == NULL) || (pBT == NULL)) {
++ PVR_DPF(PVR_DBG_ERROR, "_FreeBT: invalid parameter");
++ return;
++ }
++#ifdef RA_STATS
++ pArena->sStatistics.uLiveSegmentCount--;
++ pArena->sStatistics.uFreeSegmentCount++;
++ pArena->sStatistics.uFreeResourceCount += pBT->uSize;
++#endif
++
++ uOrigBase = pBT->base;
++ uOrigSize = pBT->uSize;
++
++ pNeighbour = pBT->pPrevSegment;
++ if (pNeighbour != NULL && pNeighbour->type == btt_free &&
++ pNeighbour->base + pNeighbour->uSize == pBT->base) {
++ _FreeListRemove(pArena, pNeighbour);
++ _SegmentListRemove(pArena, pNeighbour);
++ pBT->base = pNeighbour->base;
++ pBT->uSize += pNeighbour->uSize;
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct BT),
++ pNeighbour, NULL);
++#ifdef RA_STATS
++ pArena->sStatistics.uFreeSegmentCount--;
++#endif
++ }
++
++ pNeighbour = pBT->pNextSegment;
++ if (pNeighbour != NULL && pNeighbour->type == btt_free &&
++ pBT->base + pBT->uSize == pNeighbour->base) {
++ _FreeListRemove(pArena, pNeighbour);
++ _SegmentListRemove(pArena, pNeighbour);
++ pBT->uSize += pNeighbour->uSize;
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct BT),
++ pNeighbour, NULL);
++#ifdef RA_STATS
++ pArena->sStatistics.uFreeSegmentCount--;
++#endif
++ }
++
++ if (pArena->pBackingStoreFree != NULL && bFreeBackingStore) {
++ u32 uRoundedStart, uRoundedEnd;
++
++ uRoundedStart = (uOrigBase / pArena->uQuantum) *
++ pArena->uQuantum;
++
++ if (uRoundedStart < pBT->base)
++ uRoundedStart += pArena->uQuantum;
++
++ uRoundedEnd = ((uOrigBase + uOrigSize + pArena->uQuantum -
++ 1) / pArena->uQuantum) * pArena->uQuantum;
++
++ if (uRoundedEnd > (pBT->base + pBT->uSize))
++ uRoundedEnd -= pArena->uQuantum;
++
++ if (uRoundedStart < uRoundedEnd)
++ pArena->pBackingStoreFree(pArena->pImportHandle,
++ uRoundedStart, uRoundedEnd,
++ (void *) 0);
++ }
++
++ if (pBT->pNextSegment != NULL && pBT->pNextSegment->type == btt_span &&
++ pBT->pPrevSegment != NULL && pBT->pPrevSegment->type == btt_span) {
++ struct BT *next = pBT->pNextSegment;
++ struct BT *prev = pBT->pPrevSegment;
++ _SegmentListRemove(pArena, next);
++ _SegmentListRemove(pArena, prev);
++ _SegmentListRemove(pArena, pBT);
++ pArena->pImportFree(pArena->pImportHandle, pBT->base,
++ pBT->psMapping);
++#ifdef RA_STATS
++ pArena->sStatistics.uSpanCount--;
++ pArena->sStatistics.uExportCount++;
++ pArena->sStatistics.uFreeSegmentCount--;
++ pArena->sStatistics.uFreeResourceCount -= pBT->uSize;
++ pArena->sStatistics.uTotalResourceCount -= pBT->uSize;
++#endif
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct BT), next,
++ NULL);
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct BT), prev,
++ NULL);
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct BT), pBT,
++ NULL);
++ } else
++ _FreeListInsert(pArena, pBT);
++}
++
++static int alloc_from_bt(struct RA_ARENA *arena, struct BT *bt, u32 start,
++ size_t size, u32 align,
++ struct BM_MAPPING **new_mapping, u32 *new_base)
++{
++ _FreeListRemove(arena, bt);
++ PVR_ASSERT(bt->type == btt_free);
++#ifdef RA_STATS
++ arena->sStatistics.uLiveSegmentCount++;
++ arena->sStatistics.uFreeSegmentCount--;
++ arena->sStatistics.uFreeResourceCount -= bt->uSize;
++#endif
++ if (start > bt->base) {
++ struct BT *next_bt;
++
++ next_bt = _SegmentSplit(arena, bt, start - bt->base);
++
++ if (!next_bt) {
++ PVR_DPF(PVR_DBG_ERROR, "_AttemptAllocAligned: "
++ "Front split failed");
++
++ _FreeListInsert(arena, bt);
++ return -1;
++ }
++
++ _FreeListInsert(arena, bt);
++#ifdef RA_STATS
++ arena->sStatistics.uFreeSegmentCount++;
++ arena->sStatistics.uFreeResourceCount += bt->uSize;
++#endif
++ bt = next_bt;
++ }
++
++ if (bt->uSize > size) {
++ struct BT *next_bt;
++ next_bt = _SegmentSplit(arena, bt, size);
++
++ if (!next_bt) {
++ PVR_DPF(PVR_DBG_ERROR, "_AttemptAllocAligned: "
++ "Back split failed");
++
++ _FreeListInsert(arena, bt);
++ return -1;
++ }
++
++ _FreeListInsert(arena, next_bt);
++#ifdef RA_STATS
++ arena->sStatistics.uFreeSegmentCount++;
++ arena->sStatistics.uFreeResourceCount += next_bt->uSize;
++#endif
++ }
++
++ bt->type = btt_live;
++
++ if (!HASH_Insert(arena->pSegmentHash, bt->base, (u32)bt)) {
++ _FreeBT(arena, bt, IMG_FALSE);
++ return -1;
++ }
++
++ if (new_mapping)
++ *new_mapping = bt->psMapping;
++
++ *new_base = bt->base;
++
++ return 0;
++}
++
++static IMG_BOOL _AttemptAllocAligned(struct RA_ARENA *pArena, size_t uSize,
++ struct BM_MAPPING **ppsMapping, u32 uFlags, u32 uAlignment,
++ u32 *base)
++{
++ u32 uIndex;
++ PVR_ASSERT(pArena != NULL);
++ if (pArena == NULL) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "_AttemptAllocAligned: invalid parameter - pArena");
++ return IMG_FALSE;
++ }
++
++ uIndex = pvr_log2(uSize);
++
++ while (uIndex < FREE_TABLE_LIMIT && pArena->aHeadFree[uIndex] == NULL)
++ uIndex++;
++
++ for (; uIndex < FREE_TABLE_LIMIT; uIndex++) {
++ struct BT *pBT;
++
++ pBT = pArena->aHeadFree[uIndex];
++ if (!pBT)
++ continue;
++
++ for (; pBT != NULL; pBT = pBT->pNextFree) {
++ u32 aligned_base;
++
++ if (uAlignment > 1)
++ aligned_base = (pBT->base + uAlignment -
++ 1) / uAlignment * uAlignment;
++ else
++ aligned_base = pBT->base;
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "RA_AttemptAllocAligned: pBT-base=0x%x "
++ "pBT-size=0x%x alignedbase=0x%x size=0x%x",
++ pBT->base, pBT->uSize, aligned_base, uSize);
++
++ if (pBT->base + pBT->uSize < aligned_base + uSize)
++ continue;
++
++ if (pBT->psMapping && pBT->psMapping->ui32Flags !=
++ uFlags) {
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "AttemptAllocAligned: mismatch in "
++ "flags. Import has %x, request was %x",
++ pBT->psMapping->ui32Flags, uFlags);
++ continue;
++ }
++
++ if (alloc_from_bt(pArena, pBT, aligned_base, uSize,
++ uFlags, ppsMapping, base) < 0)
++ return IMG_FALSE;
++
++ return IMG_TRUE;
++ }
++ }
++
++ return IMG_FALSE;
++}
++
++struct RA_ARENA *RA_Create(char *name, u32 base, size_t uSize,
++ struct BM_MAPPING *psMapping, size_t uQuantum,
++ IMG_BOOL(*imp_alloc) (void *, size_t uSize,
++ size_t *pActualSize,
++ struct BM_MAPPING **ppsMapping,
++ u32 _flags, u32 *pBase),
++ void (*imp_free) (void *, u32, struct BM_MAPPING *),
++ void(*backingstore_free) (void *, u32, u32, void *),
++ void *pImportHandle)
++{
++ struct RA_ARENA *pArena;
++ struct BT *pBT;
++ int i;
++
++ PVR_DPF(PVR_DBG_MESSAGE, "RA_Create: "
++ "name='%s', base=0x%x, uSize=0x%x, alloc=0x%x, free=0x%x",
++ name, base, uSize, imp_alloc, imp_free);
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(*pArena),
++ (void **) &pArena, NULL) != PVRSRV_OK)
++ goto arena_fail;
++
++ pArena->name = name;
++ pArena->pImportAlloc =
++ (imp_alloc != NULL) ? imp_alloc : _RequestAllocFail;
++ pArena->pImportFree = imp_free;
++ pArena->pBackingStoreFree = backingstore_free;
++ pArena->pImportHandle = pImportHandle;
++ for (i = 0; i < FREE_TABLE_LIMIT; i++)
++ pArena->aHeadFree[i] = NULL;
++ pArena->pHeadSegment = NULL;
++ pArena->pTailSegment = NULL;
++ pArena->uQuantum = uQuantum;
++
++#ifdef RA_STATS
++ pArena->sStatistics.uSpanCount = 0;
++ pArena->sStatistics.uLiveSegmentCount = 0;
++ pArena->sStatistics.uFreeSegmentCount = 0;
++ pArena->sStatistics.uFreeResourceCount = 0;
++ pArena->sStatistics.uTotalResourceCount = 0;
++ pArena->sStatistics.uCumulativeAllocs = 0;
++ pArena->sStatistics.uCumulativeFrees = 0;
++ pArena->sStatistics.uImportCount = 0;
++ pArena->sStatistics.uExportCount = 0;
++#endif
++
++#if defined(CONFIG_PROC_FS) && defined(DEBUG)
++ if (strcmp(pArena->name, "") != 0) {
++ int ret;
++ int (*pfnCreateProcEntry) (const char *, read_proc_t,
++ write_proc_t, void *);
++
++ pArena->bInitProcEntry =
++ !PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_SUCCESSFUL);
++
++ pfnCreateProcEntry = pArena->bInitProcEntry ? CreateProcEntry :
++ CreatePerProcessProcEntry;
++
++ ret = snprintf(pArena->szProcInfoName,
++ sizeof(pArena->szProcInfoName), "ra_info_%s",
++ pArena->name);
++ if (ret > 0 && ret < sizeof(pArena->szProcInfoName)) {
++ (void)pfnCreateProcEntry(ReplaceSpaces
++ (pArena->szProcInfoName),
++ RA_DumpInfo, NULL, pArena);
++ } else {
++ pArena->szProcInfoName[0] = 0;
++ PVR_DPF(PVR_DBG_ERROR, "RA_Create: "
++ "couldn't create ra_info proc entry for arena %s",
++ pArena->name);
++ }
++
++ ret = snprintf(pArena->szProcSegsName,
++ sizeof(pArena->szProcSegsName), "ra_segs_%s",
++ pArena->name);
++ if (ret > 0 && ret < sizeof(pArena->szProcInfoName)) {
++ (void)pfnCreateProcEntry(ReplaceSpaces
++ (pArena->szProcSegsName),
++ RA_DumpSegs, NULL, pArena);
++ } else {
++ pArena->szProcSegsName[0] = 0;
++ PVR_DPF(PVR_DBG_ERROR, "RA_Create: "
++ "couldn't create ra_segs proc entry for arena %s",
++ pArena->name);
++ }
++ }
++#endif
++
++ pArena->pSegmentHash = HASH_Create(MINIMUM_HASH_SIZE);
++ if (pArena->pSegmentHash == NULL)
++ goto hash_fail;
++ if (uSize > 0) {
++ uSize = (uSize + uQuantum - 1) / uQuantum * uQuantum;
++ pBT = _InsertResource(pArena, base, uSize);
++ if (pBT == NULL)
++ goto insert_fail;
++ pBT->psMapping = psMapping;
++
++ }
++ return pArena;
++
++insert_fail:
++ HASH_Delete(pArena->pSegmentHash);
++hash_fail:
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct RA_ARENA), pArena,
++ NULL);
++arena_fail:
++ return NULL;
++}
++
++void RA_Delete(struct RA_ARENA *pArena)
++{
++ u32 uIndex;
++
++ PVR_ASSERT(pArena != NULL);
++
++ if (pArena == NULL) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "RA_Delete: invalid parameter - pArena");
++ return;
++ }
++
++ PVR_DPF(PVR_DBG_MESSAGE, "RA_Delete: name='%s'", pArena->name);
++
++ for (uIndex = 0; uIndex < FREE_TABLE_LIMIT; uIndex++)
++ pArena->aHeadFree[uIndex] = NULL;
++
++ while (pArena->pHeadSegment != NULL) {
++ struct BT *pBT = pArena->pHeadSegment;
++ PVR_ASSERT(pBT->type == btt_free);
++ _SegmentListRemove(pArena, pBT);
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct BT), pBT,
++ NULL);
++#ifdef RA_STATS
++ pArena->sStatistics.uSpanCount--;
++#endif
++ }
++#if defined(CONFIG_PROC_FS) && defined(DEBUG)
++ {
++ void (*pfnRemoveProcEntry) (const char *);
++
++ pfnRemoveProcEntry =
++ pArena->
++ bInitProcEntry ? RemoveProcEntry :
++ RemovePerProcessProcEntry;
++
++ if (pArena->szProcInfoName[0] != 0)
++ pfnRemoveProcEntry(pArena->szProcInfoName);
++
++ if (pArena->szProcSegsName[0] != 0)
++ pfnRemoveProcEntry(pArena->szProcSegsName);
++ }
++#endif
++ HASH_Delete(pArena->pSegmentHash);
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct RA_ARENA), pArena,
++ NULL);
++}
++
++IMG_BOOL RA_TestDelete(struct RA_ARENA *pArena)
++{
++ PVR_ASSERT(pArena != NULL);
++
++ if (pArena != NULL)
++ while (pArena->pHeadSegment != NULL) {
++ struct BT *pBT = pArena->pHeadSegment;
++ if (pBT->type != btt_free)
++ return IMG_FALSE;
++ }
++
++ return IMG_TRUE;
++}
++
++IMG_BOOL RA_Add(struct RA_ARENA *pArena, u32 base, size_t uSize)
++{
++ PVR_ASSERT(pArena != NULL);
++
++ if (pArena == NULL) {
++ PVR_DPF(PVR_DBG_ERROR, "RA_Add: invalid parameter - pArena");
++ return IMG_FALSE;
++ }
++
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "RA_Add: name='%s', base=0x%x, size=0x%x", pArena->name, base,
++ uSize);
++
++ uSize = (uSize + pArena->uQuantum - 1) /
++ pArena->uQuantum * pArena->uQuantum;
++ return (IMG_BOOL)(_InsertResource(pArena, base, uSize) != NULL);
++}
++
++IMG_BOOL RA_Alloc(struct RA_ARENA *pArena, size_t uRequestSize,
++ struct BM_MAPPING **ppsMapping, u32 uFlags, u32 uAlignment,
++ u32 *base)
++{
++ IMG_BOOL bResult;
++ size_t uSize = uRequestSize;
++
++ PVR_ASSERT(pArena != NULL);
++
++ if (pArena == NULL) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "RA_Alloc: invalid parameter - pArena");
++ return IMG_FALSE;
++ }
++
++ PVR_DPF(PVR_DBG_MESSAGE, "RA_Alloc: "
++ "arena='%s', size=0x%x(0x%x), alignment=0x%x",
++ pArena->name, uSize, uRequestSize, uAlignment);
++
++ bResult = _AttemptAllocAligned(pArena, uSize, ppsMapping, uFlags,
++ uAlignment, base);
++ if (!bResult) {
++ struct BM_MAPPING *psImportMapping;
++ u32 import_base;
++ size_t uImportSize = uSize;
++
++ if (uAlignment > pArena->uQuantum)
++ uImportSize += (uAlignment - 1);
++
++ uImportSize =
++ ((uImportSize + pArena->uQuantum - 1) /
++ pArena->uQuantum) * pArena->uQuantum;
++
++ bResult =
++ pArena->pImportAlloc(pArena->pImportHandle, uImportSize,
++ &uImportSize, &psImportMapping, uFlags,
++ &import_base);
++ if (bResult) {
++ struct BT *pBT;
++ pBT = _InsertResourceSpan(pArena, import_base,
++ uImportSize);
++
++ if (pBT == NULL) {
++ pArena->pImportFree(pArena->pImportHandle,
++ import_base,
++ psImportMapping);
++ PVR_DPF(PVR_DBG_MESSAGE, "RA_Alloc: "
++ "name='%s', size=0x%x failed!",
++ pArena->name, uSize);
++
++ return IMG_FALSE;
++ }
++ pBT->psMapping = psImportMapping;
++#ifdef RA_STATS
++ pArena->sStatistics.uFreeSegmentCount++;
++ pArena->sStatistics.uFreeResourceCount += uImportSize;
++ pArena->sStatistics.uImportCount++;
++ pArena->sStatistics.uSpanCount++;
++#endif
++ bResult = _AttemptAllocAligned(pArena, uSize,
++ ppsMapping, uFlags, uAlignment,
++ base);
++ if (!bResult)
++ PVR_DPF(PVR_DBG_MESSAGE, "RA_Alloc: "
++ "name='%s' uAlignment failed!",
++ pArena->name);
++ }
++ }
++#ifdef RA_STATS
++ if (bResult)
++ pArena->sStatistics.uCumulativeAllocs++;
++#endif
++
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "RA_Alloc: name='%s', size=0x%x, *base=0x%x = %d",
++ pArena->name, uSize, *base, bResult);
++
++ return bResult;
++}
++
++void RA_Free(struct RA_ARENA *pArena, u32 base, IMG_BOOL bFreeBackingStore)
++{
++ struct BT *pBT;
++
++ PVR_ASSERT(pArena != NULL);
++
++ if (pArena == NULL) {
++ PVR_DPF(PVR_DBG_ERROR, "RA_Free: invalid parameter - pArena");
++ return;
++ }
++
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "RA_Free: name='%s', base=0x%x", pArena->name, base);
++
++ pBT = (struct BT *)HASH_Remove(pArena->pSegmentHash, base);
++ PVR_ASSERT(pBT != NULL);
++
++ if (pBT) {
++ PVR_ASSERT(pBT->base == base);
++
++#ifdef RA_STATS
++ pArena->sStatistics.uCumulativeFrees++;
++#endif
++
++ _FreeBT(pArena, pBT, bFreeBackingStore);
++ }
++}
++
++IMG_BOOL RA_GetNextLiveSegment(void *hArena,
++ struct RA_SEGMENT_DETAILS *psSegDetails)
++{
++ struct BT *pBT;
++
++ if (psSegDetails->hSegment) {
++ pBT = (struct BT *)psSegDetails->hSegment;
++ } else {
++ struct RA_ARENA *pArena = (struct RA_ARENA *)hArena;
++ pBT = pArena->pHeadSegment;
++ }
++
++ while (pBT != NULL) {
++ if (pBT->type == btt_live) {
++ psSegDetails->uiSize = pBT->uSize;
++ psSegDetails->sCpuPhyAddr.uiAddr = pBT->base;
++ psSegDetails->hSegment = (void *) pBT->pNextSegment;
++
++ return IMG_TRUE;
++ }
++
++ pBT = pBT->pNextSegment;
++ }
++
++ psSegDetails->uiSize = 0;
++ psSegDetails->sCpuPhyAddr.uiAddr = 0;
++ psSegDetails->hSegment = (void *) -1;
++
++ return IMG_FALSE;
++}
++
++#if (defined(CONFIG_PROC_FS) && defined(DEBUG)) || defined(RA_STATS)
++static char *_BTType(int eType)
++{
++ switch (eType) {
++ case btt_span:
++ return "span";
++ case btt_free:
++ return "free";
++ case btt_live:
++ return "live";
++ }
++ return "junk";
++}
++#endif
++
++#if defined(ENABLE_RA_DUMP)
++void RA_Dump(struct RA_ARENA *pArena)
++{
++ struct BT *pBT;
++ PVR_ASSERT(pArena != NULL);
++ PVR_DPF(PVR_DBG_MESSAGE, "Arena '%s':", pArena->name);
++ PVR_DPF(PVR_DBG_MESSAGE,
++ " alloc=%08X free=%08X handle=%08X quantum=%d",
++ pArena->pImportAlloc, pArena->pImportFree,
++ pArena->pImportHandle, pArena->uQuantum);
++ PVR_DPF(PVR_DBG_MESSAGE, " segment Chain:");
++ if (pArena->pHeadSegment != NULL &&
++ pArena->pHeadSegment->pPrevSegment != NULL)
++ PVR_DPF(PVR_DBG_MESSAGE,
++ " error: head boundary tag has invalid pPrevSegment");
++ if (pArena->pTailSegment != NULL &&
++ pArena->pTailSegment->pNextSegment != NULL)
++ PVR_DPF(PVR_DBG_MESSAGE,
++ " error: tail boundary tag has invalid pNextSegment");
++
++ for (pBT = pArena->pHeadSegment; pBT != NULL; pBT = pBT->pNextSegment)
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "\tbase=0x%x size=0x%x type=%s ref=%08X",
++ (u32) pBT->base, pBT->uSize, _BTType(pBT->type),
++ pBT->pRef);
++
++}
++#endif
++
++#if defined(CONFIG_PROC_FS) && defined(DEBUG)
++static int RA_DumpSegs(char *page, char **start, off_t off, int count, int *eof,
++ void *data)
++{
++ struct BT *pBT = NULL;
++ int len = 0;
++ struct RA_ARENA *pArena = (struct RA_ARENA *)data;
++
++ if (count < 80) {
++ *start = (char *)0;
++ return 0;
++ }
++ *eof = 0;
++ *start = (char *)1;
++ if (off == 0)
++ return printAppend(page, count, 0,
++ "Arena \"%s\"\nBase Size Type Ref\n",
++ pArena->name);
++ for (pBT = pArena->pHeadSegment; --off && pBT;
++ pBT = pBT->pNextSegment)
++ ;
++ if (pBT)
++ len = printAppend(page, count, 0, "%08x %8x %4s %08x\n",
++ (unsigned)pBT->base, (unsigned)pBT->uSize,
++ _BTType(pBT->type), (unsigned)pBT->psMapping);
++ else
++ *eof = 1;
++ return len;
++}
++
++static int RA_DumpInfo(char *page, char **start, off_t off, int count, int *eof,
++ void *data)
++{
++ int len = 0;
++ struct RA_ARENA *pArena = (struct RA_ARENA *)data;
++
++ if (count < 80) {
++ *start = (char *)0;
++ return 0;
++ }
++ *eof = 0;
++ switch (off) {
++ case 0:
++ len = printAppend(page, count, 0, "quantum\t\t\t%u\n",
++ pArena->uQuantum);
++ break;
++ case 1:
++ len = printAppend(page, count, 0, "import_handle\t\t%08X\n",
++ (unsigned)pArena->pImportHandle);
++ break;
++#ifdef RA_STATS
++ case 2:
++ len = printAppend(page, count, 0, "span count\t\t%u\n",
++ pArena->sStatistics.uSpanCount);
++ break;
++ case 3:
++ len = printAppend(page, count, 0, "live segment count\t%u\n",
++ pArena->sStatistics.uLiveSegmentCount);
++ break;
++ case 4:
++ len = printAppend(page, count, 0, "free segment count\t%u\n",
++ pArena->sStatistics.uFreeSegmentCount);
++ break;
++ case 5:
++ len = printAppend(page, count, 0,
++ "free resource count\t%u (0x%x)\n",
++ pArena->sStatistics.uFreeResourceCount,
++ (unsigned)pArena->sStatistics.
++ uFreeResourceCount);
++ break;
++ case 6:
++ len = printAppend(page, count, 0, "total allocs\t\t%u\n",
++ pArena->sStatistics.uCumulativeAllocs);
++ break;
++ case 7:
++ len = printAppend(page, count, 0, "total frees\t\t%u\n",
++ pArena->sStatistics.uCumulativeFrees);
++ break;
++ case 8:
++ len = printAppend(page, count, 0, "import count\t\t%u\n",
++ pArena->sStatistics.uImportCount);
++ break;
++ case 9:
++ len = printAppend(page, count, 0, "export count\t\t%u\n",
++ pArena->sStatistics.uExportCount);
++ break;
++#endif
++
++ default:
++ *eof = 1;
++ }
++ *start = (char *)1;
++ return len;
++}
++#endif
++
++#ifdef RA_STATS
++enum PVRSRV_ERROR RA_GetStats(struct RA_ARENA *pArena, char **ppszStr,
++ u32 *pui32StrLen)
++{
++ char *pszStr = *ppszStr;
++ u32 ui32StrLen = *pui32StrLen;
++ s32 i32Count;
++ struct BT *pBT;
++
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100, "\nArena '%s':\n", pArena->name);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100,
++ " allocCB=%08X freeCB=%08X handle=%08X quantum=%d\n",
++ pArena->pImportAlloc, pArena->pImportFree,
++ pArena->pImportHandle, pArena->uQuantum);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100, "span count\t\t%lu\n",
++ pArena->sStatistics.uSpanCount);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100, "live segment count\t%lu\n",
++ pArena->sStatistics.uLiveSegmentCount);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100, "free segment count\t%lu\n",
++ pArena->sStatistics.uFreeSegmentCount);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100, "free resource count\t%lu (0x%x)\n",
++ pArena->sStatistics.uFreeResourceCount,
++ (unsigned)pArena->sStatistics.uFreeResourceCount);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100, "total allocs\t\t%lu\n",
++ pArena->sStatistics.uCumulativeAllocs);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100, "total frees\t\t%lu\n",
++ pArena->sStatistics.uCumulativeFrees);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100, "import count\t\t%lu\n",
++ pArena->sStatistics.uImportCount);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100, "export count\t\t%lu\n",
++ pArena->sStatistics.uExportCount);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100, " segment Chain:\n");
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ if (pArena->pHeadSegment != NULL &&
++ pArena->pHeadSegment->pPrevSegment != NULL) {
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100,
++ " error: head boundary tag has invalid pPrevSegment\n");
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++ }
++
++ if (pArena->pTailSegment != NULL &&
++ pArena->pTailSegment->pNextSegment != NULL) {
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100,
++ " error: tail boundary tag has invalid pNextSegment\n");
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++ }
++
++ for (pBT = pArena->pHeadSegment; pBT != NULL;
++ pBT = pBT->pNextSegment) {
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100,
++ "\tbase=0x%x size=0x%x type=%s ref=%08X\n",
++ (u32) pBT->base, pBT->uSize, _BTType(pBT->type),
++ pBT->psMapping);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++ }
++
++ *ppszStr = pszStr;
++ *pui32StrLen = ui32StrLen;
++
++ return PVRSRV_OK;
++}
++#endif
+diff --git a/drivers/gpu/pvr/ra.h b/drivers/gpu/pvr/ra.h
+new file mode 100644
+index 0000000..2f9ceea
+--- /dev/null
++++ b/drivers/gpu/pvr/ra.h
+@@ -0,0 +1,107 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _RA_H_
++#define _RA_H_
++
++#include "img_types.h"
++#include "hash.h"
++#include "osfunc.h"
++
++struct RA_ARENA;
++struct BM_MAPPING;
++
++#define RA_STATS
++
++struct RA_STATISTICS {
++ u32 uSpanCount;
++ u32 uLiveSegmentCount;
++ u32 uFreeSegmentCount;
++ u32 uTotalResourceCount;
++ u32 uFreeResourceCount;
++ u32 uCumulativeAllocs;
++ u32 uCumulativeFrees;
++ u32 uImportCount;
++ u32 uExportCount;
++};
++struct RA_STATISTICS;
++
++struct RA_SEGMENT_DETAILS {
++ u32 uiSize;
++ struct IMG_CPU_PHYADDR sCpuPhyAddr;
++ void *hSegment;
++};
++struct RA_SEGMENT_DETAILS;
++
++struct RA_ARENA *RA_Create(char *name, u32 base, size_t uSize,
++ struct BM_MAPPING *psMapping, size_t uQuantum,
++ IMG_BOOL(*imp_alloc)(void *_h, size_t uSize,
++ size_t *pActualSize,
++ struct BM_MAPPING **ppsMapping,
++ u32 uFlags, u32 *pBase),
++ void (*imp_free)(void *, u32, struct BM_MAPPING *),
++ void (*backingstore_free)(void *, u32, u32, void *),
++ void *import_handle);
++
++void RA_Delete(struct RA_ARENA *pArena);
++
++IMG_BOOL RA_TestDelete(struct RA_ARENA *pArena);
++
++IMG_BOOL RA_Add(struct RA_ARENA *pArena, u32 base, size_t uSize);
++
++IMG_BOOL RA_Alloc(struct RA_ARENA *pArena, size_t uSize,
++ struct BM_MAPPING **ppsMapping, u32 uFlags, u32 uAlignment,
++ u32 *pBase);
++
++void RA_Free(struct RA_ARENA *pArena, u32 base, IMG_BOOL bFreeBackingStore);
++
++#ifdef RA_STATS
++
++#define CHECK_SPACE(total) \
++{ \
++ if (total < 100) \
++ return PVRSRV_ERROR_INVALID_PARAMS; \
++}
++
++#define UPDATE_SPACE(str, count, total) \
++{ \
++ if (count == -1) \
++ return PVRSRV_ERROR_INVALID_PARAMS; \
++ else { \
++ str += count; \
++ total -= count; \
++ } \
++}
++
++IMG_BOOL RA_GetNextLiveSegment(void *hArena,
++ struct RA_SEGMENT_DETAILS *psSegDetails);
++
++enum PVRSRV_ERROR RA_GetStats(struct RA_ARENA *pArena, char **ppszStr,
++ u32 *pui32StrLen);
++
++#endif
++
++#endif
+diff --git a/drivers/gpu/pvr/resman.c b/drivers/gpu/pvr/resman.c
+new file mode 100644
+index 0000000..5b9766f
+--- /dev/null
++++ b/drivers/gpu/pvr/resman.c
+@@ -0,0 +1,540 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <linux/version.h>
++#include <linux/sched.h>
++#include <linux/hardirq.h>
++
++#include <linux/semaphore.h>
++
++#include "services_headers.h"
++#include "resman.h"
++
++static DECLARE_MUTEX(lock);
++
++#define ACQUIRE_SYNC_OBJ do { \
++ if (in_interrupt()) { \
++ printk(KERN_ERR "ISR cannot take RESMAN mutex\n"); \
++ BUG(); \
++ } else \
++ down(&lock); \
++} while (0)
++#define RELEASE_SYNC_OBJ up(&lock)
++
++
++#define RESMAN_SIGNATURE 0x12345678
++
++struct RESMAN_ITEM {
++#ifdef DEBUG
++ u32 ui32Signature;
++#endif
++ struct RESMAN_ITEM **ppsThis;
++ struct RESMAN_ITEM *psNext;
++
++ u32 ui32Flags;
++ u32 ui32ResType;
++
++ void *pvParam;
++ u32 ui32Param;
++
++ enum PVRSRV_ERROR (*pfnFreeResource)(void *pvParam, u32 ui32Param);
++};
++
++struct RESMAN_CONTEXT {
++#ifdef DEBUG
++ u32 ui32Signature;
++#endif
++ struct RESMAN_CONTEXT **ppsThis;
++ struct RESMAN_CONTEXT *psNext;
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc;
++ struct RESMAN_ITEM *psResItemList;
++
++};
++
++struct RESMAN_LIST {
++ struct RESMAN_CONTEXT *psContextList;
++};
++
++static struct RESMAN_LIST *gpsResList;
++
++#define PRINT_RESLIST(x, y, z)
++
++static void FreeResourceByPtr(struct RESMAN_ITEM *psItem,
++ IMG_BOOL bExecuteCallback);
++
++static int FreeResourceByCriteria(struct RESMAN_CONTEXT *psContext,
++ u32 ui32SearchCriteria,
++ u32 ui32ResType, void *pvParam,
++ u32 ui32Param,
++ IMG_BOOL bExecuteCallback);
++
++#ifdef DEBUG
++static void ValidateResList(struct RESMAN_LIST *psResList);
++#define VALIDATERESLIST() ValidateResList(gpsResList)
++#else
++#define VALIDATERESLIST()
++#endif
++
++enum PVRSRV_ERROR ResManInit(void)
++{
++ if (gpsResList == NULL) {
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(*gpsResList),
++ (void **) &gpsResList,
++ NULL) != PVRSRV_OK)
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++
++ gpsResList->psContextList = NULL;
++
++ VALIDATERESLIST();
++ }
++
++ return PVRSRV_OK;
++}
++
++void ResManDeInit(void)
++{
++ if (gpsResList != NULL)
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(*gpsResList),
++ gpsResList, NULL);
++}
++
++enum PVRSRV_ERROR PVRSRVResManConnect(void *hPerProc,
++ struct RESMAN_CONTEXT **phResManContext)
++{
++ enum PVRSRV_ERROR eError;
++ struct RESMAN_CONTEXT *psResManContext;
++
++ ACQUIRE_SYNC_OBJ;
++
++ VALIDATERESLIST();
++
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(*psResManContext),
++ (void **) &psResManContext, NULL);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "PVRSRVResManConnect: "
++ "ERROR allocating new RESMAN context struct");
++
++ VALIDATERESLIST();
++ RELEASE_SYNC_OBJ;
++
++ return eError;
++ }
++#ifdef DEBUG
++ psResManContext->ui32Signature = RESMAN_SIGNATURE;
++#endif
++ psResManContext->psResItemList = NULL;
++ psResManContext->psPerProc = hPerProc;
++
++ psResManContext->psNext = gpsResList->psContextList;
++ psResManContext->ppsThis = &gpsResList->psContextList;
++ gpsResList->psContextList = psResManContext;
++ if (psResManContext->psNext)
++ psResManContext->psNext->ppsThis = &(psResManContext->psNext);
++
++ VALIDATERESLIST();
++
++ RELEASE_SYNC_OBJ;
++
++ *phResManContext = psResManContext;
++
++ return PVRSRV_OK;
++}
++
++static inline bool warn_unfreed_res(void)
++{
++ return !(current->flags & PF_SIGNALED);
++}
++
++static int free_one_res(struct RESMAN_CONTEXT *ctx, u32 restype)
++{
++ int freed;
++
++ freed = FreeResourceByCriteria(ctx, RESMAN_CRITERIA_RESTYPE, restype,
++ NULL, 0, IMG_TRUE);
++ if (freed && warn_unfreed_res())
++ PVR_DPF(DBGPRIV_WARNING, "pvr: %s: cleaning up %d "
++ "unfreed resource of type %d\n",
++ current->comm, freed, restype);
++
++ return freed;
++}
++
++void PVRSRVResManDisconnect(struct RESMAN_CONTEXT *ctx, IMG_BOOL bKernelContext)
++{
++
++ ACQUIRE_SYNC_OBJ;
++
++ VALIDATERESLIST();
++
++ PRINT_RESLIST(gpsResList, ctx, IMG_TRUE);
++
++ if (!bKernelContext) {
++ int i = 0;
++
++ i += free_one_res(ctx, RESMAN_TYPE_OS_USERMODE_MAPPING);
++ i += free_one_res(ctx, RESMAN_TYPE_EVENT_OBJECT);
++ i += free_one_res(ctx, RESMAN_TYPE_HW_RENDER_CONTEXT);
++ i += free_one_res(ctx, RESMAN_TYPE_HW_TRANSFER_CONTEXT);
++ i += free_one_res(ctx, RESMAN_TYPE_HW_2D_CONTEXT);
++ i += free_one_res(ctx, RESMAN_TYPE_TRANSFER_CONTEXT);
++ i += free_one_res(ctx, RESMAN_TYPE_SHARED_PB_DESC_CREATE_LOCK);
++ i += free_one_res(ctx, RESMAN_TYPE_SHARED_PB_DESC);
++ i += free_one_res(ctx, RESMAN_TYPE_DISPLAYCLASS_SWAPCHAIN);
++ i += free_one_res(ctx, RESMAN_TYPE_DISPLAYCLASS_DEVICE);
++ i += free_one_res(ctx, RESMAN_TYPE_BUFFERCLASS_DEVICE);
++ i += free_one_res(ctx, RESMAN_TYPE_DEVICECLASSMEM_MAPPING);
++ i += free_one_res(ctx, RESMAN_TYPE_DEVICEMEM_WRAP);
++ i += free_one_res(ctx, RESMAN_TYPE_DEVICEMEM_MAPPING);
++ i += free_one_res(ctx, RESMAN_TYPE_KERNEL_DEVICEMEM_ALLOCATION);
++ i += free_one_res(ctx, RESMAN_TYPE_DEVICEMEM_ALLOCATION);
++ i += free_one_res(ctx, RESMAN_TYPE_DEVICEMEM_CONTEXT);
++
++ if (i && warn_unfreed_res())
++ pr_warning("pvr: %s: cleaning up %d "
++ "unfreed resources\n",
++ current->comm, i);
++ }
++
++ PVR_ASSERT(ctx->psResItemList == NULL);
++
++ *(ctx->ppsThis) = ctx->psNext;
++ if (ctx->psNext)
++ ctx->psNext->ppsThis = ctx->ppsThis;
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct RESMAN_CONTEXT),
++ ctx, NULL);
++
++ VALIDATERESLIST();
++
++ PRINT_RESLIST(gpsResList, ctx, IMG_FALSE);
++
++ RELEASE_SYNC_OBJ;
++}
++
++struct RESMAN_ITEM *ResManRegisterRes(struct RESMAN_CONTEXT *psResManContext,
++ u32 ui32ResType, void *pvParam,
++ u32 ui32Param,
++ enum PVRSRV_ERROR (*pfnFreeResource)
++ (void *pvParam, u32 ui32Param))
++{
++ struct RESMAN_ITEM *psNewResItem;
++
++ PVR_ASSERT(psResManContext != NULL);
++ PVR_ASSERT(ui32ResType != 0);
++
++ if (psResManContext == NULL) {
++ PVR_DPF(PVR_DBG_ERROR, "ResManRegisterRes: "
++ "invalid parameter - psResManContext");
++ return (struct RESMAN_ITEM *)NULL;
++ }
++
++ ACQUIRE_SYNC_OBJ;
++
++ VALIDATERESLIST();
++
++ PVR_DPF(PVR_DBG_MESSAGE, "ResManRegisterRes: register resource "
++ "Context 0x%x, ResType 0x%x, pvParam 0x%x, ui32Param 0x%x, "
++ "FreeFunc %08X",
++ psResManContext, ui32ResType, (u32) pvParam,
++ ui32Param, pfnFreeResource);
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(struct RESMAN_ITEM), (void **) &psNewResItem,
++ NULL) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "ResManRegisterRes: "
++ "ERROR allocating new resource item");
++
++ RELEASE_SYNC_OBJ;
++
++ return (struct RESMAN_ITEM *)NULL;
++ }
++
++#ifdef DEBUG
++ psNewResItem->ui32Signature = RESMAN_SIGNATURE;
++#endif
++ psNewResItem->ui32ResType = ui32ResType;
++ psNewResItem->pvParam = pvParam;
++ psNewResItem->ui32Param = ui32Param;
++ psNewResItem->pfnFreeResource = pfnFreeResource;
++ psNewResItem->ui32Flags = 0;
++
++ psNewResItem->ppsThis = &psResManContext->psResItemList;
++ psNewResItem->psNext = psResManContext->psResItemList;
++ psResManContext->psResItemList = psNewResItem;
++ if (psNewResItem->psNext)
++ psNewResItem->psNext->ppsThis = &psNewResItem->psNext;
++
++ VALIDATERESLIST();
++
++ RELEASE_SYNC_OBJ;
++
++ return psNewResItem;
++}
++
++void ResManFreeResByPtr(struct RESMAN_ITEM *psResItem)
++{
++ BUG_ON(!psResItem);
++
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "ResManFreeResByPtr: freeing resource at %08X", psResItem);
++
++ ACQUIRE_SYNC_OBJ;
++
++ VALIDATERESLIST();
++
++ FreeResourceByPtr(psResItem, IMG_TRUE);
++
++ VALIDATERESLIST();
++
++ RELEASE_SYNC_OBJ;
++}
++
++void ResManFreeResByCriteria(struct RESMAN_CONTEXT *psResManContext,
++ u32 ui32SearchCriteria, u32 ui32ResType,
++ void *pvParam, u32 ui32Param)
++{
++ PVR_ASSERT(psResManContext != NULL);
++
++ ACQUIRE_SYNC_OBJ;
++
++ VALIDATERESLIST();
++
++ PVR_DPF(PVR_DBG_MESSAGE, "ResManFreeResByCriteria: "
++ "Context 0x%x, Criteria 0x%x, Type 0x%x, Addr 0x%x, Param 0x%x",
++ psResManContext, ui32SearchCriteria, ui32ResType,
++ (u32) pvParam, ui32Param);
++
++ (void)FreeResourceByCriteria(psResManContext, ui32SearchCriteria,
++ ui32ResType, pvParam, ui32Param,
++ IMG_TRUE);
++
++ VALIDATERESLIST();
++
++ RELEASE_SYNC_OBJ;
++}
++
++enum PVRSRV_ERROR ResManDissociateRes(struct RESMAN_ITEM *psResItem,
++ struct RESMAN_CONTEXT *psNewResManContext)
++{
++ PVR_ASSERT(psResItem != NULL);
++
++ if (psResItem == NULL) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "ResManDissociateRes: invalid parameter - psResItem");
++ PVR_DBG_BREAK;
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++#ifdef DEBUG
++ PVR_ASSERT(psResItem->ui32Signature == RESMAN_SIGNATURE);
++#endif
++
++ if (psNewResManContext != NULL) {
++ if (psResItem->psNext)
++ psResItem->psNext->ppsThis = psResItem->ppsThis;
++ *psResItem->ppsThis = psResItem->psNext;
++
++ psResItem->ppsThis = &psNewResManContext->psResItemList;
++ psResItem->psNext = psNewResManContext->psResItemList;
++ psNewResManContext->psResItemList = psResItem;
++ if (psResItem->psNext)
++ psResItem->psNext->ppsThis = &psResItem->psNext;
++ } else {
++ FreeResourceByPtr(psResItem, IMG_FALSE);
++ }
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR ResManFindResourceByPtr(
++ struct RESMAN_CONTEXT *psResManContext,
++ struct RESMAN_ITEM *psItem)
++{
++ struct RESMAN_ITEM *psCurItem;
++
++ PVR_ASSERT(psResManContext != NULL);
++ PVR_ASSERT(psItem != NULL);
++
++ if ((psItem == NULL) || (psResManContext == NULL)) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "ResManFindResourceByPtr: invalid parameter");
++ PVR_DBG_BREAK;
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++#ifdef DEBUG
++ PVR_ASSERT(psItem->ui32Signature == RESMAN_SIGNATURE);
++#endif
++
++ ACQUIRE_SYNC_OBJ;
++
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "FindResourceByPtr: psItem=%08X, psItem->psNext=%08X",
++ psItem, psItem->psNext);
++
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "FindResourceByPtr: Resource Ctx 0x%x, Type 0x%x, Addr 0x%x, "
++ "Param 0x%x, FnCall %08X, Flags 0x%x",
++ psResManContext,
++ psItem->ui32ResType, (u32) psItem->pvParam,
++ psItem->ui32Param, psItem->pfnFreeResource,
++ psItem->ui32Flags);
++
++ psCurItem = psResManContext->psResItemList;
++
++ while (psCurItem != NULL) {
++ if (psCurItem != psItem) {
++ psCurItem = psCurItem->psNext;
++ } else {
++ RELEASE_SYNC_OBJ;
++ return PVRSRV_OK;
++ }
++ }
++
++ RELEASE_SYNC_OBJ;
++
++ return PVRSRV_ERROR_NOT_OWNER;
++}
++
++static void FreeResourceByPtr(struct RESMAN_ITEM *psItem,
++ IMG_BOOL bExecuteCallback)
++{
++ PVR_ASSERT(psItem->ui32Signature == RESMAN_SIGNATURE);
++
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "FreeResourceByPtr: psItem=%08X, psItem->psNext=%08X",
++ psItem, psItem->psNext);
++
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "FreeResourceByPtr: Type 0x%x, Addr 0x%x, "
++ "Param 0x%x, FnCall %08X, Flags 0x%x",
++ psItem->ui32ResType, (u32) psItem->pvParam,
++ psItem->ui32Param, psItem->pfnFreeResource,
++ psItem->ui32Flags);
++
++ if (psItem->psNext)
++ psItem->psNext->ppsThis = psItem->ppsThis;
++ *psItem->ppsThis = psItem->psNext;
++
++ RELEASE_SYNC_OBJ;
++
++ if (bExecuteCallback &&
++ psItem->pfnFreeResource(psItem->pvParam, psItem->ui32Param) !=
++ PVRSRV_OK)
++ PVR_DPF(PVR_DBG_ERROR, "FreeResourceByPtr: "
++ "ERROR calling FreeResource function");
++
++ ACQUIRE_SYNC_OBJ;
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct RESMAN_ITEM), psItem,
++ NULL);
++}
++
++static int FreeResourceByCriteria(struct RESMAN_CONTEXT *psResManContext,
++ u32 ui32SearchCriteria, u32 ui32ResType,
++ void *pvParam, u32 ui32Param,
++ IMG_BOOL bExecuteCallback)
++{
++ struct RESMAN_ITEM *psCurItem;
++ bool bMatch;
++ int freed = 0;
++
++ psCurItem = psResManContext->psResItemList;
++
++ while (psCurItem != NULL) {
++ bMatch = IMG_TRUE;
++
++ if (((ui32SearchCriteria & RESMAN_CRITERIA_RESTYPE) != 0UL) &&
++ (psCurItem->ui32ResType != ui32ResType))
++ bMatch = IMG_FALSE;
++ else if (((ui32SearchCriteria & RESMAN_CRITERIA_PVOID_PARAM) !=
++ 0UL) && (psCurItem->pvParam != pvParam))
++ bMatch = IMG_FALSE;
++ else if (((ui32SearchCriteria & RESMAN_CRITERIA_UI32_PARAM) !=
++ 0UL) && (psCurItem->ui32Param != ui32Param))
++ bMatch = IMG_FALSE;
++
++ if (!bMatch) {
++ psCurItem = psCurItem->psNext;
++ } else {
++ FreeResourceByPtr(psCurItem, bExecuteCallback);
++ psCurItem = psResManContext->psResItemList;
++ freed++;
++ }
++ }
++
++ return freed;
++}
++
++#ifdef DEBUG
++static void ValidateResList(struct RESMAN_LIST *psResList)
++{
++ struct RESMAN_ITEM *psCurItem, **ppsThisItem;
++ struct RESMAN_CONTEXT *psCurContext, **ppsThisContext;
++
++ if (psResList == NULL) {
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "ValidateResList: resman not initialised yet");
++ return;
++ }
++
++ psCurContext = psResList->psContextList;
++ ppsThisContext = &psResList->psContextList;
++
++ while (psCurContext != NULL) {
++ PVR_ASSERT(psCurContext->ui32Signature == RESMAN_SIGNATURE);
++ if (psCurContext->ppsThis != ppsThisContext) {
++ PVR_DPF(PVR_DBG_WARNING, "psCC=%08X "
++ "psCC->ppsThis=%08X psCC->psNext=%08X ppsTC=%08X",
++ psCurContext, psCurContext->ppsThis,
++ psCurContext->psNext, ppsThisContext);
++ PVR_ASSERT(psCurContext->ppsThis == ppsThisContext);
++ }
++
++ psCurItem = psCurContext->psResItemList;
++ ppsThisItem = &psCurContext->psResItemList;
++ while (psCurItem != NULL) {
++ PVR_ASSERT(psCurItem->ui32Signature ==
++ RESMAN_SIGNATURE);
++ if (psCurItem->ppsThis != ppsThisItem) {
++ PVR_DPF(PVR_DBG_WARNING, "psCurItem=%08X "
++ "psCurItem->ppsThis=%08X "
++ "psCurItem->psNext=%08X "
++ "ppsThisItem=%08X",
++ psCurItem, psCurItem->ppsThis,
++ psCurItem->psNext, ppsThisItem);
++ PVR_ASSERT(psCurItem->ppsThis == ppsThisItem);
++ }
++
++ ppsThisItem = &psCurItem->psNext;
++ psCurItem = psCurItem->psNext;
++ }
++
++ ppsThisContext = &psCurContext->psNext;
++ psCurContext = psCurContext->psNext;
++ }
++}
++#endif
+diff --git a/drivers/gpu/pvr/resman.h b/drivers/gpu/pvr/resman.h
+new file mode 100644
+index 0000000..60c2a3b
+--- /dev/null
++++ b/drivers/gpu/pvr/resman.h
+@@ -0,0 +1,92 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __RESMAN_H__
++#define __RESMAN_H__
++
++enum {
++ RESMAN_TYPE_SHARED_PB_DESC = 1,
++ RESMAN_TYPE_SHARED_PB_DESC_CREATE_LOCK,
++ RESMAN_TYPE_HW_RENDER_CONTEXT,
++ RESMAN_TYPE_HW_TRANSFER_CONTEXT,
++ RESMAN_TYPE_HW_2D_CONTEXT,
++ RESMAN_TYPE_TRANSFER_CONTEXT,
++
++ RESMAN_TYPE_DISPLAYCLASS_SWAPCHAIN,
++ RESMAN_TYPE_DISPLAYCLASS_DEVICE,
++
++ RESMAN_TYPE_BUFFERCLASS_DEVICE,
++
++ RESMAN_TYPE_OS_USERMODE_MAPPING,
++
++ RESMAN_TYPE_DEVICEMEM_CONTEXT,
++ RESMAN_TYPE_DEVICECLASSMEM_MAPPING,
++ RESMAN_TYPE_DEVICEMEM_MAPPING,
++ RESMAN_TYPE_DEVICEMEM_WRAP,
++ RESMAN_TYPE_DEVICEMEM_ALLOCATION,
++ RESMAN_TYPE_EVENT_OBJECT,
++ RESMAN_TYPE_SHARED_MEM_INFO,
++
++ RESMAN_TYPE_KERNEL_DEVICEMEM_ALLOCATION
++};
++
++#define RESMAN_CRITERIA_ALL 0x00000000
++#define RESMAN_CRITERIA_RESTYPE 0x00000001
++#define RESMAN_CRITERIA_PVOID_PARAM 0x00000002
++#define RESMAN_CRITERIA_UI32_PARAM 0x00000004
++
++struct RESMAN_ITEM;
++struct RESMAN_CONTEXT;
++
++enum PVRSRV_ERROR ResManInit(void);
++void ResManDeInit(void);
++
++struct RESMAN_ITEM *ResManRegisterRes(struct RESMAN_CONTEXT *hResManContext,
++ u32 ui32ResType, void *pvParam,
++ u32 ui32Param,
++ enum PVRSRV_ERROR (*pfnFreeResource)
++ (void *pvParam, u32 ui32Param));
++
++void ResManFreeResByPtr(struct RESMAN_ITEM *psResItem);
++
++void ResManFreeResByCriteria(struct RESMAN_CONTEXT *hResManContext,
++ u32 ui32SearchCriteria, u32 ui32ResType, void *pvParam,
++ u32 ui32Param);
++
++enum PVRSRV_ERROR ResManDissociateRes(struct RESMAN_ITEM *psResItem,
++ struct RESMAN_CONTEXT
++ *psNewResManContext);
++
++enum PVRSRV_ERROR ResManFindResourceByPtr(struct RESMAN_CONTEXT *hResManContext,
++ struct RESMAN_ITEM *psItem);
++
++enum PVRSRV_ERROR PVRSRVResManConnect(void *hPerProc,
++ struct RESMAN_CONTEXT **phResManContext);
++
++void PVRSRVResManDisconnect(struct RESMAN_CONTEXT *hResManContext,
++ IMG_BOOL bKernelContext);
++
++#endif
+diff --git a/drivers/gpu/pvr/services.h b/drivers/gpu/pvr/services.h
+new file mode 100644
+index 0000000..33d4931
+--- /dev/null
++++ b/drivers/gpu/pvr/services.h
+@@ -0,0 +1,237 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __SERVICES_H__
++#define __SERVICES_H__
++
++
++#include "img_defs.h"
++#include "servicesext.h"
++#include "pdumpdefs.h"
++
++struct SYS_DATA;
++
++#define PVRSRV_4K_PAGE_SIZE 4096UL
++
++#define PVRSRV_MAX_CMD_SIZE 1024
++
++#define PVRSRV_MAX_DEVICES 16
++
++#define EVENTOBJNAME_MAXLENGTH 50
++
++#define PVRSRV_MEM_READ (1UL<<0)
++#define PVRSRV_MEM_WRITE (1UL<<1)
++#define PVRSRV_MEM_CACHE_CONSISTENT (1UL<<2)
++#define PVRSRV_MEM_NO_SYNCOBJ (1UL<<3)
++#define PVRSRV_MEM_INTERLEAVED (1UL<<4)
++#define PVRSRV_MEM_DUMMY (1UL<<5)
++#define PVRSRV_MEM_EDM_PROTECT (1UL<<6)
++#define PVRSRV_MEM_ZERO (1UL<<7)
++#define PVRSRV_MEM_USER_SUPPLIED_DEVVADDR (1UL<<8)
++#define PVRSRV_MEM_RAM_BACKED_ALLOCATION (1UL<<9)
++#define PVRSRV_MEM_NO_RESMAN (1UL<<10)
++#define PVRSRV_MEM_EXPORTED (1UL<<11)
++
++#define PVRSRV_HAP_CACHED (1UL<<12)
++#define PVRSRV_HAP_UNCACHED (1UL<<13)
++#define PVRSRV_HAP_WRITECOMBINE (1UL<<14)
++#define PVRSRV_HAP_CACHETYPE_MASK (PVRSRV_HAP_CACHED | \
++ PVRSRV_HAP_UNCACHED | \
++ PVRSRV_HAP_WRITECOMBINE)
++#define PVRSRV_HAP_KERNEL_ONLY (1UL<<15)
++#define PVRSRV_HAP_SINGLE_PROCESS (1UL<<16)
++#define PVRSRV_HAP_MULTI_PROCESS (1UL<<17)
++#define PVRSRV_HAP_FROM_EXISTING_PROCESS (1UL<<18)
++#define PVRSRV_HAP_NO_CPU_VIRTUAL (1UL<<19)
++#define PVRSRV_HAP_MAPTYPE_MASK (PVRSRV_HAP_KERNEL_ONLY | \
++ PVRSRV_HAP_SINGLE_PROCESS | \
++ PVRSRV_HAP_MULTI_PROCESS | \
++ PVRSRV_HAP_FROM_EXISTING_PROCESS | \
++ PVRSRV_HAP_NO_CPU_VIRTUAL)
++#define PVRSRV_MEM_BACKINGSTORE_FIELD_SHIFT 24
++
++#define PVRSRV_MAP_NOUSERVIRTUAL (1UL << 27)
++
++#define PVRSRV_NO_CONTEXT_LOSS 0
++#define PVRSRV_SEVERE_LOSS_OF_CONTEXT 1
++#define PVRSRV_PRE_STATE_CHANGE_MASK 0x80
++
++#define PVRSRV_DEFAULT_DEV_COOKIE 1
++
++#define PVRSRV_MISC_INFO_TIMER_PRESENT (1UL << 0)
++#define PVRSRV_MISC_INFO_CLOCKGATE_PRESENT (1UL << 1)
++#define PVRSRV_MISC_INFO_MEMSTATS_PRESENT (1UL << 2)
++#define PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT (1UL << 3)
++#define PVRSRV_MISC_INFO_DDKVERSION_PRESENT (1UL << 4)
++
++#define PVRSRV_PDUMP_MAX_FILENAME_SIZE 20
++#define PVRSRV_PDUMP_MAX_COMMENT_SIZE 200
++
++#define PVRSRV_CHANGEDEVMEM_ATTRIBS_CACHECOHERENT 0x00000001
++
++#define PVRSRV_MAPEXTMEMORY_FLAGS_ALTERNATEVA 0x00000001
++#define PVRSRV_MAPEXTMEMORY_FLAGS_PHYSCONTIG 0x00000002
++
++#define PVRSRV_MODIFYSYNCOPS_FLAGS_WOP_INC 0x00000001
++#define PVRSRV_MODIFYSYNCOPS_FLAGS_ROP_INC 0x00000002
++#define PVRSRV_MODIFYSYNCOPS_FLAGS_WOC_INC 0x00000004
++#define PVRSRV_MODIFYSYNCOPS_FLAGS_ROC_INC 0x00000008
++
++enum PVRSRV_DEVICE_TYPE {
++ PVRSRV_DEVICE_TYPE_UNKNOWN = 0,
++ PVRSRV_DEVICE_TYPE_MBX1 = 1,
++ PVRSRV_DEVICE_TYPE_MBX1_LITE = 2,
++
++ PVRSRV_DEVICE_TYPE_M24VA = 3,
++ PVRSRV_DEVICE_TYPE_MVDA2 = 4,
++ PVRSRV_DEVICE_TYPE_MVED1 = 5,
++ PVRSRV_DEVICE_TYPE_MSVDX = 6,
++
++ PVRSRV_DEVICE_TYPE_SGX = 7,
++
++ PVRSRV_DEVICE_TYPE_VGX = 8,
++
++ PVRSRV_DEVICE_TYPE_EXT = 9,
++
++ PVRSRV_DEVICE_TYPE_LAST = 9,
++
++ PVRSRV_DEVICE_TYPE_FORCE_I32 = 0x7fffffff
++};
++
++#define HEAP_ID(_dev_ , _dev_heap_idx_) \
++ (((_dev_) << 24) | ((_dev_heap_idx_) & ((1 << 24) - 1)))
++
++#define HEAP_IDX(_heap_id_) \
++ ((_heap_id_) & ((1 << 24) - 1))
++
++#define HEAP_DEV(_heap_id_) \
++ ((_heap_id_) >> 24)
++
++#define PVRSRV_UNDEFINED_HEAP_ID (~0LU)
++
++enum IMG_MODULE_ID {
++ IMG_EGL = 0x00000001,
++ IMG_OPENGLES1 = 0x00000002,
++ IMG_OPENGLES2 = 0x00000003,
++ IMG_D3DM = 0x00000004,
++ IMG_SRV_UM = 0x00000005,
++ IMG_OPENVG = 0x00000006,
++ IMG_SRVCLIENT = 0x00000007,
++ IMG_VISTAKMD = 0x00000008,
++ IMG_VISTA3DNODE = 0x00000009,
++ IMG_VISTAMVIDEONODE = 0x0000000A,
++ IMG_VISTAVPBNODE = 0x0000000B,
++ IMG_OPENGL = 0x0000000C,
++ IMG_D3D = 0x0000000D
++};
++
++struct PVRSRV_CONNECTION {
++ void *hServices;
++ u32 ui32ProcessID;
++};
++
++struct PVRSRV_DEV_DATA {
++ struct PVRSRV_CONNECTION sConnection;
++ void *hDevCookie;
++};
++
++struct PVRSRV_HWREG {
++ u32 ui32RegAddr;
++ u32 ui32RegVal;
++};
++
++struct PVRSRV_MEMBLK {
++ struct IMG_DEV_VIRTADDR sDevVirtAddr;
++ void *hOSMemHandle;
++ void *hOSWrapMem;
++ void *hBuffer;
++ void *hResItem;
++ struct IMG_SYS_PHYADDR *psIntSysPAddr;
++};
++
++struct PVRSRV_KERNEL_MEM_INFO;
++
++struct PVRSRV_CLIENT_MEM_INFO {
++ void *pvLinAddr;
++ void *pvLinAddrKM;
++ struct IMG_DEV_VIRTADDR sDevVAddr;
++ struct IMG_CPU_PHYADDR sCpuPAddr;
++ u32 ui32Flags;
++ u32 ui32ClientFlags;
++ u32 ui32AllocSize;
++ struct PVRSRV_CLIENT_SYNC_INFO *psClientSyncInfo;
++ void *hMappingInfo;
++ void *hKernelMemInfo;
++ void *hResItem;
++ struct PVRSRV_CLIENT_MEM_INFO *psNext;
++};
++
++#define PVRSRV_MAX_CLIENT_HEAPS (32)
++struct PVRSRV_HEAP_INFO {
++ u32 ui32HeapID;
++ void *hDevMemHeap;
++ struct IMG_DEV_VIRTADDR sDevVAddrBase;
++ u32 ui32HeapByteSize;
++ u32 ui32Attribs;
++};
++
++struct PVRSRV_DEVICE_IDENTIFIER {
++ enum PVRSRV_DEVICE_TYPE eDeviceType;
++ enum PVRSRV_DEVICE_CLASS eDeviceClass;
++ u32 ui32DeviceIndex;
++
++};
++
++struct PVRSRV_EVENTOBJECT {
++ char szName[EVENTOBJNAME_MAXLENGTH];
++ void *hOSEventKM;
++};
++
++struct PVRSRV_MISC_INFO {
++ u32 ui32StateRequest;
++ u32 ui32StatePresent;
++
++ void *pvSOCTimerRegisterKM;
++ void *pvSOCTimerRegisterUM;
++ void *hSOCTimerRegisterOSMemHandle;
++ void *hSOCTimerRegisterMappingInfo;
++
++ void *pvSOCClockGateRegs;
++ u32 ui32SOCClockGateRegsSize;
++
++ char *pszMemoryStr;
++ u32 ui32MemoryStrLen;
++
++ struct PVRSRV_EVENTOBJECT sGlobalEventObject;
++ void *hOSGlobalEvent;
++
++ u32 aui32DDKVersion[4];
++};
++
++enum PVRSRV_ERROR AllocateDeviceID(struct SYS_DATA *psSysData, u32 *pui32DevID);
++enum PVRSRV_ERROR FreeDeviceID(struct SYS_DATA *psSysData, u32 ui32DevID);
++
++#endif
+diff --git a/drivers/gpu/pvr/services_headers.h b/drivers/gpu/pvr/services_headers.h
+new file mode 100644
+index 0000000..d443fae
+--- /dev/null
++++ b/drivers/gpu/pvr/services_headers.h
+@@ -0,0 +1,42 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef SERVICES_HEADERS_H
++#define SERVICES_HEADERS_H
++
++#include "img_defs.h"
++#include "services.h"
++#include "servicesint.h"
++#include "power.h"
++#include "resman.h"
++#include "queue.h"
++#include "srvkm.h"
++#include "kerneldisplay.h"
++#include "syscommon.h"
++#include "pvr_debug.h"
++#include "osfunc.h"
++
++#endif
+diff --git a/drivers/gpu/pvr/servicesext.h b/drivers/gpu/pvr/servicesext.h
+new file mode 100644
+index 0000000..3a1cb43
+--- /dev/null
++++ b/drivers/gpu/pvr/servicesext.h
+@@ -0,0 +1,435 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__SERVICESEXT_H__)
++#define __SERVICESEXT_H__
++
++#include "img_types.h"
++
++#define PVRSRV_LOCKFLG_READONLY 1
++
++enum PVRSRV_ERROR {
++ PVRSRV_OK = 0,
++ PVRSRV_ERROR_GENERIC = 1,
++ PVRSRV_ERROR_OUT_OF_MEMORY = 2,
++ PVRSRV_ERROR_TOO_FEW_BUFFERS = 3,
++ PVRSRV_ERROR_SYMBOL_NOT_FOUND = 4,
++ PVRSRV_ERROR_OUT_OF_HSPACE = 5,
++ PVRSRV_ERROR_INVALID_PARAMS = 6,
++ PVRSRV_ERROR_TILE_MAP_FAILED = 7,
++ PVRSRV_ERROR_INIT_FAILURE = 8,
++ PVRSRV_ERROR_CANT_REGISTER_CALLBACK = 9,
++ PVRSRV_ERROR_INVALID_DEVICE = 10,
++ PVRSRV_ERROR_NOT_OWNER = 11,
++ PVRSRV_ERROR_BAD_MAPPING = 12,
++ PVRSRV_ERROR_TIMEOUT = 13,
++ PVRSRV_ERROR_NO_PRIMARY = 14,
++ PVRSRV_ERROR_FLIP_CHAIN_EXISTS = 15,
++ PVRSRV_ERROR_CANNOT_ACQUIRE_SYSDATA = 16,
++ PVRSRV_ERROR_SCENE_INVALID = 17,
++ PVRSRV_ERROR_STREAM_ERROR = 18,
++ PVRSRV_ERROR_INVALID_INTERRUPT = 19,
++ PVRSRV_ERROR_FAILED_DEPENDENCIES = 20,
++ PVRSRV_ERROR_CMD_NOT_PROCESSED = 21,
++ PVRSRV_ERROR_CMD_TOO_BIG = 22,
++ PVRSRV_ERROR_DEVICE_REGISTER_FAILED = 23,
++ PVRSRV_ERROR_FIFO_SPACE = 24,
++ PVRSRV_ERROR_TA_RECOVERY = 25,
++ PVRSRV_ERROR_INDOSORLOWPOWER = 26,
++ PVRSRV_ERROR_TOOMANYBUFFERS = 27,
++ PVRSRV_ERROR_NOT_SUPPORTED = 28,
++ PVRSRV_ERROR_PROCESSING_BLOCKED = 29,
++
++ PVRSRV_ERROR_CANNOT_FLUSH_QUEUE = 31,
++ PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE = 32,
++ PVRSRV_ERROR_CANNOT_GET_RENDERDETAILS = 33,
++ PVRSRV_ERROR_RETRY = 34,
++
++ PVRSRV_ERROR_DDK_VERSION_MISMATCH = 35,
++ PVRSRV_ERROR_BUILD_MISMATCH = 36,
++
++ PVRSRV_ERROR_FORCE_I32 = 0x7fffffff
++};
++
++enum PVRSRV_DEVICE_CLASS {
++ PVRSRV_DEVICE_CLASS_3D = 0,
++ PVRSRV_DEVICE_CLASS_DISPLAY = 1,
++ PVRSRV_DEVICE_CLASS_BUFFER = 2,
++ PVRSRV_DEVICE_CLASS_VIDEO = 3,
++
++ PVRSRV_DEVICE_CLASS_FORCE_I32 = 0x7fffffff
++};
++
++enum PVR_POWER_STATE {
++ PVRSRV_POWER_Unspecified = -1,
++ PVRSRV_POWER_STATE_D0 = 0,
++ PVRSRV_POWER_STATE_D1 = 1,
++ PVRSRV_POWER_STATE_D2 = 2,
++ PVRSRV_POWER_STATE_D3 = 3,
++ PVRSRV_POWER_STATE_D4 = 4,
++
++ PVRSRV_POWER_STATE_FORCE_I32 = 0x7fffffff
++};
++
++enum PVRSRV_PIXEL_FORMAT {
++ PVRSRV_PIXEL_FORMAT_UNKNOWN = 0,
++ PVRSRV_PIXEL_FORMAT_RGB565 = 1,
++ PVRSRV_PIXEL_FORMAT_RGB555 = 2,
++ PVRSRV_PIXEL_FORMAT_RGB888 = 3,
++ PVRSRV_PIXEL_FORMAT_BGR888 = 4,
++ PVRSRV_PIXEL_FORMAT_GREY_SCALE = 8,
++ PVRSRV_PIXEL_FORMAT_PAL12 = 13,
++ PVRSRV_PIXEL_FORMAT_PAL8 = 14,
++ PVRSRV_PIXEL_FORMAT_PAL4 = 15,
++ PVRSRV_PIXEL_FORMAT_PAL2 = 16,
++ PVRSRV_PIXEL_FORMAT_PAL1 = 17,
++ PVRSRV_PIXEL_FORMAT_ARGB1555 = 18,
++ PVRSRV_PIXEL_FORMAT_ARGB4444 = 19,
++ PVRSRV_PIXEL_FORMAT_ARGB8888 = 20,
++ PVRSRV_PIXEL_FORMAT_ABGR8888 = 21,
++ PVRSRV_PIXEL_FORMAT_YV12 = 22,
++ PVRSRV_PIXEL_FORMAT_I420 = 23,
++ PVRSRV_PIXEL_FORMAT_IMC2 = 25,
++
++ PVRSRV_PIXEL_FORMAT_XRGB8888,
++ PVRSRV_PIXEL_FORMAT_XBGR8888,
++ PVRSRV_PIXEL_FORMAT_XRGB4444,
++ PVRSRV_PIXEL_FORMAT_ARGB8332,
++ PVRSRV_PIXEL_FORMAT_A2RGB10,
++ PVRSRV_PIXEL_FORMAT_A2BGR10,
++ PVRSRV_PIXEL_FORMAT_P8,
++ PVRSRV_PIXEL_FORMAT_L8,
++ PVRSRV_PIXEL_FORMAT_A8L8,
++ PVRSRV_PIXEL_FORMAT_A4L4,
++ PVRSRV_PIXEL_FORMAT_L16,
++ PVRSRV_PIXEL_FORMAT_L6V5U5,
++ PVRSRV_PIXEL_FORMAT_V8U8,
++ PVRSRV_PIXEL_FORMAT_V16U16,
++ PVRSRV_PIXEL_FORMAT_QWVU8888,
++ PVRSRV_PIXEL_FORMAT_XLVU8888,
++ PVRSRV_PIXEL_FORMAT_QWVU16,
++ PVRSRV_PIXEL_FORMAT_D16,
++ PVRSRV_PIXEL_FORMAT_D24S8,
++ PVRSRV_PIXEL_FORMAT_D24X8,
++
++ PVRSRV_PIXEL_FORMAT_ABGR16,
++ PVRSRV_PIXEL_FORMAT_ABGR16F,
++ PVRSRV_PIXEL_FORMAT_ABGR32,
++ PVRSRV_PIXEL_FORMAT_ABGR32F,
++ PVRSRV_PIXEL_FORMAT_B10GR11,
++ PVRSRV_PIXEL_FORMAT_GR88,
++ PVRSRV_PIXEL_FORMAT_BGR32,
++ PVRSRV_PIXEL_FORMAT_GR32,
++ PVRSRV_PIXEL_FORMAT_E5BGR9,
++
++ PVRSRV_PIXEL_FORMAT_DXT1,
++ PVRSRV_PIXEL_FORMAT_DXT23,
++ PVRSRV_PIXEL_FORMAT_DXT45,
++
++ PVRSRV_PIXEL_FORMAT_R8G8_B8G8,
++ PVRSRV_PIXEL_FORMAT_G8R8_G8B8,
++
++ PVRSRV_PIXEL_FORMAT_NV11,
++ PVRSRV_PIXEL_FORMAT_NV12,
++
++ PVRSRV_PIXEL_FORMAT_YUY2,
++ PVRSRV_PIXEL_FORMAT_YUV420,
++ PVRSRV_PIXEL_FORMAT_YUV444,
++ PVRSRV_PIXEL_FORMAT_VUY444,
++ PVRSRV_PIXEL_FORMAT_YUYV,
++ PVRSRV_PIXEL_FORMAT_YVYU,
++ PVRSRV_PIXEL_FORMAT_UYVY,
++ PVRSRV_PIXEL_FORMAT_VYUY,
++
++ PVRSRV_PIXEL_FORMAT_FOURCC_ORG_UYVY,
++ PVRSRV_PIXEL_FORMAT_FOURCC_ORG_YUYV,
++ PVRSRV_PIXEL_FORMAT_FOURCC_ORG_YVYU,
++ PVRSRV_PIXEL_FORMAT_FOURCC_ORG_VYUY,
++
++ PVRSRV_PIXEL_FORMAT_A32B32G32R32,
++ PVRSRV_PIXEL_FORMAT_A32B32G32R32F,
++ PVRSRV_PIXEL_FORMAT_A32B32G32R32_UINT,
++ PVRSRV_PIXEL_FORMAT_A32B32G32R32_SINT,
++
++ PVRSRV_PIXEL_FORMAT_B32G32R32,
++ PVRSRV_PIXEL_FORMAT_B32G32R32F,
++ PVRSRV_PIXEL_FORMAT_B32G32R32_UINT,
++ PVRSRV_PIXEL_FORMAT_B32G32R32_SINT,
++
++ PVRSRV_PIXEL_FORMAT_G32R32,
++ PVRSRV_PIXEL_FORMAT_G32R32F,
++ PVRSRV_PIXEL_FORMAT_G32R32_UINT,
++ PVRSRV_PIXEL_FORMAT_G32R32_SINT,
++
++ PVRSRV_PIXEL_FORMAT_D32F,
++ PVRSRV_PIXEL_FORMAT_R32,
++ PVRSRV_PIXEL_FORMAT_R32F,
++ PVRSRV_PIXEL_FORMAT_R32_UINT,
++ PVRSRV_PIXEL_FORMAT_R32_SINT,
++
++ PVRSRV_PIXEL_FORMAT_A16B16G16R16,
++ PVRSRV_PIXEL_FORMAT_A16B16G16R16F,
++ PVRSRV_PIXEL_FORMAT_A16B16G16R16_SINT,
++ PVRSRV_PIXEL_FORMAT_A16B16G16R16_SNORM,
++ PVRSRV_PIXEL_FORMAT_A16B16G16R16_UINT,
++ PVRSRV_PIXEL_FORMAT_A16B16G16R16_UNORM,
++
++ PVRSRV_PIXEL_FORMAT_G16R16,
++ PVRSRV_PIXEL_FORMAT_G16R16F,
++ PVRSRV_PIXEL_FORMAT_G16R16_UINT,
++ PVRSRV_PIXEL_FORMAT_G16R16_UNORM,
++ PVRSRV_PIXEL_FORMAT_G16R16_SINT,
++ PVRSRV_PIXEL_FORMAT_G16R16_SNORM,
++
++ PVRSRV_PIXEL_FORMAT_R16,
++ PVRSRV_PIXEL_FORMAT_R16F,
++ PVRSRV_PIXEL_FORMAT_R16_UINT,
++ PVRSRV_PIXEL_FORMAT_R16_UNORM,
++ PVRSRV_PIXEL_FORMAT_R16_SINT,
++ PVRSRV_PIXEL_FORMAT_R16_SNORM,
++
++ PVRSRV_PIXEL_FORMAT_A8B8G8R8,
++ PVRSRV_PIXEL_FORMAT_A8B8G8R8_UINT,
++ PVRSRV_PIXEL_FORMAT_A8B8G8R8_UNORM,
++ PVRSRV_PIXEL_FORMAT_A8B8G8R8_SINT,
++ PVRSRV_PIXEL_FORMAT_A8B8G8R8_SNORM,
++
++ PVRSRV_PIXEL_FORMAT_G8R8,
++ PVRSRV_PIXEL_FORMAT_G8R8_UINT,
++ PVRSRV_PIXEL_FORMAT_G8R8_UNORM,
++ PVRSRV_PIXEL_FORMAT_G8R8_SINT,
++ PVRSRV_PIXEL_FORMAT_G8R8_SNORM,
++
++ PVRSRV_PIXEL_FORMAT_A8,
++ PVRSRV_PIXEL_FORMAT_R8,
++ PVRSRV_PIXEL_FORMAT_R8_UINT,
++ PVRSRV_PIXEL_FORMAT_R8_UNORM,
++ PVRSRV_PIXEL_FORMAT_R8_SINT,
++ PVRSRV_PIXEL_FORMAT_R8_SNORM,
++
++ PVRSRV_PIXEL_FORMAT_A2B10G10R10,
++ PVRSRV_PIXEL_FORMAT_A2B10G10R10_UNORM,
++ PVRSRV_PIXEL_FORMAT_A2B10G10R10_UINT,
++
++ PVRSRV_PIXEL_FORMAT_B10G11R11,
++ PVRSRV_PIXEL_FORMAT_B10G11R11F,
++
++ PVRSRV_PIXEL_FORMAT_X24G8R32,
++ PVRSRV_PIXEL_FORMAT_G8R24,
++ PVRSRV_PIXEL_FORMAT_E5B9G9R9,
++ PVRSRV_PIXEL_FORMAT_R1,
++
++ PVRSRV_PIXEL_FORMAT_BC1,
++ PVRSRV_PIXEL_FORMAT_BC1_UNORM,
++ PVRSRV_PIXEL_FORMAT_BC1_SRGB,
++ PVRSRV_PIXEL_FORMAT_BC2,
++ PVRSRV_PIXEL_FORMAT_BC2_UNORM,
++ PVRSRV_PIXEL_FORMAT_BC2_SRGB,
++ PVRSRV_PIXEL_FORMAT_BC3,
++ PVRSRV_PIXEL_FORMAT_BC3_UNORM,
++ PVRSRV_PIXEL_FORMAT_BC3_SRGB,
++ PVRSRV_PIXEL_FORMAT_BC4,
++ PVRSRV_PIXEL_FORMAT_BC4_UNORM,
++ PVRSRV_PIXEL_FORMAT_BC4_SNORM,
++ PVRSRV_PIXEL_FORMAT_BC5,
++ PVRSRV_PIXEL_FORMAT_BC5_UNORM,
++ PVRSRV_PIXEL_FORMAT_BC5_SNORM,
++
++ PVRSRV_PIXEL_FORMAT_FORCE_I32 = 0x7fffffff,
++};
++
++enum PVRSRV_ALPHA_FORMAT {
++ PVRSRV_ALPHA_FORMAT_UNKNOWN = 0x00000000,
++ PVRSRV_ALPHA_FORMAT_PRE = 0x00000001,
++ PVRSRV_ALPHA_FORMAT_NONPRE = 0x00000002,
++ PVRSRV_ALPHA_FORMAT_MASK = 0x0000000F,
++};
++
++enum PVRSRV_COLOURSPACE_FORMAT {
++ PVRSRV_COLOURSPACE_FORMAT_UNKNOWN = 0x00000000,
++ PVRSRV_COLOURSPACE_FORMAT_LINEAR = 0x00010000,
++ PVRSRV_COLOURSPACE_FORMAT_NONLINEAR = 0x00020000,
++ PVRSRV_COLOURSPACE_FORMAT_MASK = 0x000F0000,
++};
++
++enum PVRSRV_ROTATION {
++ PVRSRV_ROTATE_0 = 0,
++ PVRSRV_ROTATE_90 = 1,
++ PVRSRV_ROTATE_180 = 2,
++ PVRSRV_ROTATE_270 = 3,
++ PVRSRV_FLIP_Y
++};
++
++#define PVRSRV_CREATE_SWAPCHAIN_SHARED (1<<0)
++#define PVRSRV_CREATE_SWAPCHAIN_QUERY (1<<1)
++#define PVRSRV_CREATE_SWAPCHAIN_OEMOVERLAY (1<<2)
++
++struct PVRSRV_SYNC_DATA {
++
++ u32 ui32WriteOpsPending;
++ volatile u32 ui32WriteOpsComplete;
++
++ u32 ui32ReadOpsPending;
++ volatile u32 ui32ReadOpsComplete;
++
++ u32 ui32LastOpDumpVal;
++ u32 ui32LastReadOpDumpVal;
++
++};
++
++struct PVRSRV_CLIENT_SYNC_INFO {
++ struct PVRSRV_SYNC_DATA *psSyncData;
++ struct IMG_DEV_VIRTADDR sWriteOpsCompleteDevVAddr;
++ struct IMG_DEV_VIRTADDR sReadOpsCompleteDevVAddr;
++ void *hMappingInfo;
++ void *hKernelSyncInfo;
++};
++
++struct PVRSRV_RESOURCE {
++ volatile u32 ui32Lock;
++ u32 ui32ID;
++};
++
++struct IMG_RECT {
++ s32 x0;
++ s32 y0;
++ s32 x1;
++ s32 y1;
++};
++
++struct IMG_RECT_16 {
++ s16 x0;
++ s16 y0;
++ s16 x1;
++ s16 y1;
++};
++
++struct DISPLAY_DIMS {
++ u32 ui32ByteStride;
++ u32 ui32Width;
++ u32 ui32Height;
++};
++
++struct DISPLAY_FORMAT {
++ enum PVRSRV_PIXEL_FORMAT pixelformat;
++};
++
++struct DISPLAY_SURF_ATTRIBUTES {
++ enum PVRSRV_PIXEL_FORMAT pixelformat;
++ struct DISPLAY_DIMS sDims;
++};
++
++struct DISPLAY_MODE_INFO {
++ enum PVRSRV_PIXEL_FORMAT pixelformat;
++ struct DISPLAY_DIMS sDims;
++ u32 ui32RefreshHZ;
++ u32 ui32OEMFlags;
++};
++
++#define MAX_DISPLAY_NAME_SIZE (50)
++
++struct DISPLAY_INFO {
++ u32 ui32MaxSwapChains;
++ u32 ui32MaxSwapChainBuffers;
++ u32 ui32MinSwapInterval;
++ u32 ui32MaxSwapInterval;
++ char szDisplayName[MAX_DISPLAY_NAME_SIZE];
++};
++
++struct ACCESS_INFO {
++ u32 ui32Size;
++ u32 ui32FBPhysBaseAddress;
++ u32 ui32FBMemAvailable;
++ u32 ui32SysPhysBaseAddress;
++ u32 ui32SysSize;
++ u32 ui32DevIRQ;
++};
++
++struct PVRSRV_CURSOR_SHAPE {
++ u16 ui16Width;
++ u16 ui16Height;
++ s16 i16XHot;
++ s16 i16YHot;
++
++ void *pvMask;
++ s16 i16MaskByteStride;
++
++ void *pvColour;
++ s16 i16ColourByteStride;
++ enum PVRSRV_PIXEL_FORMAT eColourPixelFormat;
++};
++
++#define PVRSRV_SET_CURSOR_VISIBILITY (1<<0)
++#define PVRSRV_SET_CURSOR_POSITION (1<<1)
++#define PVRSRV_SET_CURSOR_SHAPE (1<<2)
++#define PVRSRV_SET_CURSOR_ROTATION (1<<3)
++
++struct PVRSRV_CURSOR_INFO {
++ u32 ui32Flags;
++ IMG_BOOL bVisible;
++ s16 i16XPos;
++ s16 i16YPos;
++ struct PVRSRV_CURSOR_SHAPE sCursorShape;
++ u32 ui32Rotation;
++};
++
++struct PVRSRV_REGISTRY_INFO {
++ u32 ui32DevCookie;
++ char *pszKey;
++ char *pszValue;
++ char *pszBuf;
++ u32 ui32BufSize;
++};
++
++enum PVRSRV_ERROR PVRSRVReadRegistryString(
++ struct PVRSRV_REGISTRY_INFO *psRegInfo);
++enum PVRSRV_ERROR PVRSRVWriteRegistryString(
++ struct PVRSRV_REGISTRY_INFO *psRegInfo);
++
++#define PVRSRV_BC_FLAGS_YUVCSC_CONFORMANT_RANGE (0 << 0)
++#define PVRSRV_BC_FLAGS_YUVCSC_FULL_RANGE (1 << 0)
++
++#define PVRSRV_BC_FLAGS_YUVCSC_BT601 (0 << 1)
++#define PVRSRV_BC_FLAGS_YUVCSC_BT709 (1 << 1)
++
++struct BUFFER_INFO {
++ u32 ui32BufferCount;
++ u32 ui32BufferDeviceID;
++ enum PVRSRV_PIXEL_FORMAT pixelformat;
++ u32 ui32ByteStride;
++ u32 ui32Width;
++ u32 ui32Height;
++ u32 ui32Flags;
++};
++
++enum OVERLAY_DEINTERLACE_MODE {
++ WEAVE = 0x0,
++ BOB_ODD,
++ BOB_EVEN,
++ BOB_EVEN_NONINTERLEAVED
++};
++
++#endif
+diff --git a/drivers/gpu/pvr/servicesint.h b/drivers/gpu/pvr/servicesint.h
+new file mode 100644
+index 0000000..d18adec
+--- /dev/null
++++ b/drivers/gpu/pvr/servicesint.h
+@@ -0,0 +1,173 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__SERVICESINT_H__)
++#define __SERVICESINT_H__
++
++
++#include "services.h"
++#include "sysinfo.h"
++
++#define HWREC_DEFAULT_TIMEOUT 500
++
++#define DRIVERNAME_MAXLENGTH 100
++
++struct PVRSRV_KERNEL_MEM_INFO {
++
++ void *pvLinAddrKM;
++ struct IMG_DEV_VIRTADDR sDevVAddr;
++ u32 ui32Flags;
++ u32 ui32AllocSize;
++ struct PVRSRV_MEMBLK sMemBlk;
++
++ void *pvSysBackupBuffer;
++
++ u32 ui32RefCount;
++
++ struct PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++};
++
++struct PVRSRV_KERNEL_SYNC_INFO {
++ struct PVRSRV_SYNC_DATA *psSyncData;
++ struct IMG_DEV_VIRTADDR sWriteOpsCompleteDevVAddr;
++ struct IMG_DEV_VIRTADDR sReadOpsCompleteDevVAddr;
++ struct PVRSRV_KERNEL_MEM_INFO *psSyncDataMemInfoKM;
++};
++
++struct PVRSRV_DEVICE_SYNC_OBJECT {
++
++ u32 ui32ReadOpsPendingVal;
++ struct IMG_DEV_VIRTADDR sReadOpsCompleteDevVAddr;
++ u32 ui32WriteOpsPendingVal;
++ struct IMG_DEV_VIRTADDR sWriteOpsCompleteDevVAddr;
++};
++
++struct PVRSRV_SYNC_OBJECT {
++ struct PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfoKM;
++ u32 ui32WriteOpsPending;
++ u32 ui32ReadOpsPending;
++};
++
++struct PVRSRV_COMMAND {
++ u32 ui32CmdSize;
++ u32 ui32DevIndex;
++ u32 CommandType;
++ u32 ui32DstSyncCount;
++ u32 ui32SrcSyncCount;
++ struct PVRSRV_SYNC_OBJECT *psDstSync;
++ struct PVRSRV_SYNC_OBJECT *psSrcSync;
++ u32 ui32DataSize;
++ u32 ui32ProcessID;
++ void *pvData;
++};
++
++struct PVRSRV_QUEUE_INFO {
++ void *pvLinQueueKM;
++ void *pvLinQueueUM;
++ volatile u32 ui32ReadOffset;
++ volatile u32 ui32WriteOffset;
++ u32 *pui32KickerAddrKM;
++ u32 *pui32KickerAddrUM;
++ u32 ui32QueueSize;
++
++ u32 ui32ProcessID;
++
++ void *hMemBlock[2];
++
++ struct PVRSRV_QUEUE_INFO *psNextKM;
++};
++
++struct PVRSRV_DEVICECLASS_BUFFER {
++ enum PVRSRV_ERROR (*pfnGetBufferAddr)(void *, void *,
++ struct IMG_SYS_PHYADDR **, u32 *,
++ void __iomem **, void **, IMG_BOOL *);
++ void *hDevMemContext;
++ void *hExtDevice;
++ void *hExtBuffer;
++ struct PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++};
++
++struct PVRSRV_CLIENT_DEVICECLASS_INFO {
++ void *hDeviceKM;
++ void *hServices;
++};
++
++static inline u32 PVRSRVGetWriteOpsPending(
++ struct PVRSRV_KERNEL_SYNC_INFO *psSyncInfo, IMG_BOOL bIsReadOp)
++{
++ u32 ui32WriteOpsPending;
++
++ if (bIsReadOp)
++ ui32WriteOpsPending =
++ psSyncInfo->psSyncData->ui32WriteOpsPending;
++ else
++ ui32WriteOpsPending =
++ psSyncInfo->psSyncData->ui32WriteOpsPending++;
++
++ return ui32WriteOpsPending;
++}
++
++static inline u32 PVRSRVGetReadOpsPending(
++ struct PVRSRV_KERNEL_SYNC_INFO *psSyncInfo, IMG_BOOL bIsReadOp)
++{
++ u32 ui32ReadOpsPending;
++
++ if (bIsReadOp)
++ ui32ReadOpsPending =
++ psSyncInfo->psSyncData->ui32ReadOpsPending++;
++ else
++ ui32ReadOpsPending = psSyncInfo->psSyncData->ui32ReadOpsPending;
++
++ return ui32ReadOpsPending;
++}
++
++enum PVRSRV_ERROR PVRSRVQueueCommand(void *hQueueInfo,
++ struct PVRSRV_COMMAND *psCommand);
++
++enum PVRSRV_ERROR PVRSRVGetMMUContextPDDevPAddr(
++ const struct PVRSRV_CONNECTION *psConnection,
++ void *hDevMemContext,
++ struct IMG_DEV_PHYADDR *sPDDevPAddr);
++
++enum PVRSRV_ERROR PVRSRVAllocSharedSysMem(
++ const struct PVRSRV_CONNECTION *psConnection,
++ u32 ui32Flags, u32 ui32Size,
++ struct PVRSRV_CLIENT_MEM_INFO **ppsClientMemInfo);
++
++enum PVRSRV_ERROR PVRSRVFreeSharedSysMem(
++ const struct PVRSRV_CONNECTION *psConnection,
++ struct PVRSRV_CLIENT_MEM_INFO *psClientMemInfo);
++
++enum PVRSRV_ERROR PVRSRVUnrefSharedSysMem(
++ const struct PVRSRV_CONNECTION *psConnection,
++ struct PVRSRV_CLIENT_MEM_INFO *psClientMemInfo);
++
++enum PVRSRV_ERROR PVRSRVMapMemInfoMem(
++ const struct PVRSRV_CONNECTION *psConnection,
++ void *hKernelMemInfo,
++ struct PVRSRV_CLIENT_MEM_INFO **ppsClientMemInfo);
++
++#endif
+diff --git a/drivers/gpu/pvr/sgx530defs.h b/drivers/gpu/pvr/sgx530defs.h
+new file mode 100644
+index 0000000..1a796ee
+--- /dev/null
++++ b/drivers/gpu/pvr/sgx530defs.h
+@@ -0,0 +1,471 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _SGX530DEFS_KM_H_
++#define _SGX530DEFS_KM_H_
++
++#define EUR_CR_CLKGATECTL 0x0000
++#define EUR_CR_CLKGATECTL_2D_CLKG_MASK 0x00000003
++#define EUR_CR_CLKGATECTL_2D_CLKG_SHIFT 0
++#define EUR_CR_CLKGATECTL_ISP_CLKG_MASK 0x00000030
++#define EUR_CR_CLKGATECTL_ISP_CLKG_SHIFT 4
++#define EUR_CR_CLKGATECTL_TSP_CLKG_MASK 0x00000300
++#define EUR_CR_CLKGATECTL_TSP_CLKG_SHIFT 8
++#define EUR_CR_CLKGATECTL_TA_CLKG_MASK 0x00003000
++#define EUR_CR_CLKGATECTL_TA_CLKG_SHIFT 12
++#define EUR_CR_CLKGATECTL_DPM_CLKG_MASK 0x00030000
++#define EUR_CR_CLKGATECTL_DPM_CLKG_SHIFT 16
++#define EUR_CR_CLKGATECTL_USE_CLKG_MASK 0x00300000
++#define EUR_CR_CLKGATECTL_USE_CLKG_SHIFT 20
++#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_MASK 0x01000000
++#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_SHIFT 24
++
++#define EUR_CR_CLKGATESTATUS 0x0004
++#define EUR_CR_CLKGATESTATUS_2D_CLKS_MASK 0x00000001
++#define EUR_CR_CLKGATESTATUS_2D_CLKS_SHIFT 0
++#define EUR_CR_CLKGATESTATUS_ISP_CLKS_MASK 0x00000010
++#define EUR_CR_CLKGATESTATUS_ISP_CLKS_SHIFT 4
++#define EUR_CR_CLKGATESTATUS_TSP_CLKS_MASK 0x00000100
++#define EUR_CR_CLKGATESTATUS_TSP_CLKS_SHIFT 8
++#define EUR_CR_CLKGATESTATUS_TA_CLKS_MASK 0x00001000
++#define EUR_CR_CLKGATESTATUS_TA_CLKS_SHIFT 12
++#define EUR_CR_CLKGATESTATUS_DPM_CLKS_MASK 0x00010000
++#define EUR_CR_CLKGATESTATUS_DPM_CLKS_SHIFT 16
++#define EUR_CR_CLKGATESTATUS_USE_CLKS_MASK 0x00100000
++#define EUR_CR_CLKGATESTATUS_USE_CLKS_SHIFT 20
++
++#define EUR_CR_CLKGATECTLOVR 0x0008
++#define EUR_CR_CLKGATECTLOVR_2D_CLKO_MASK 0x00000003
++#define EUR_CR_CLKGATECTLOVR_2D_CLKO_SHIFT 0
++#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_MASK 0x00000030
++#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_SHIFT 4
++#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_MASK 0x00000300
++#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_SHIFT 8
++#define EUR_CR_CLKGATECTLOVR_TA_CLKO_MASK 0x00003000
++#define EUR_CR_CLKGATECTLOVR_TA_CLKO_SHIFT 12
++#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_MASK 0x00030000
++#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_SHIFT 16
++#define EUR_CR_CLKGATECTLOVR_USE_CLKO_MASK 0x00300000
++#define EUR_CR_CLKGATECTLOVR_USE_CLKO_SHIFT 20
++
++#define EUR_CR_CORE_ID 0x0010
++#define EUR_CR_CORE_ID_CONFIG_MASK 0x0000FFFF
++#define EUR_CR_CORE_ID_CONFIG_SHIFT 0
++#define EUR_CR_CORE_ID_ID_MASK 0xFFFF0000
++#define EUR_CR_CORE_ID_ID_SHIFT 16
++
++#define EUR_CR_CORE_REVISION 0x0014
++#define EUR_CR_CORE_REVISION_MAINTENANCE_MASK 0x000000FF
++#define EUR_CR_CORE_REVISION_MAINTENANCE_SHIFT 0
++#define EUR_CR_CORE_REVISION_MINOR_MASK 0x0000FF00
++#define EUR_CR_CORE_REVISION_MINOR_SHIFT 8
++#define EUR_CR_CORE_REVISION_MAJOR_MASK 0x00FF0000
++#define EUR_CR_CORE_REVISION_MAJOR_SHIFT 16
++#define EUR_CR_CORE_MAKE_REV(maj, min, maint) ( \
++ (((maj) << EUR_CR_CORE_REVISION_MAJOR_SHIFT) & \
++ EUR_CR_CORE_REVISION_MAJOR_MASK) | \
++ (((min) << EUR_CR_CORE_REVISION_MINOR_SHIFT) & \
++ EUR_CR_CORE_REVISION_MINOR_MASK) | \
++ (((maint) << EUR_CR_CORE_REVISION_MAINTENANCE_SHIFT) & \
++ EUR_CR_CORE_REVISION_MAINTENANCE_MASK) \
++ )
++#define EUR_CR_CORE_REVISION_DESIGNER_MASK 0xFF000000
++#define EUR_CR_CORE_REVISION_DESIGNER_SHIFT 24
++
++#define EUR_CR_DESIGNER_REV_FIELD1 0x0018
++#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_MASK 0xFFFFFFFF
++#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_SHIFT 0
++
++#define EUR_CR_DESIGNER_REV_FIELD2 0x001C
++#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_MASK 0xFFFFFFFF
++#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_SHIFT 0
++
++#define EUR_CR_SOFT_RESET 0x0080
++#define EUR_CR_SOFT_RESET_BIF_RESET_MASK 0x00000001
++#define EUR_CR_SOFT_RESET_BIF_RESET_SHIFT 0
++#define EUR_CR_SOFT_RESET_TWOD_RESET_MASK 0x00000002
++#define EUR_CR_SOFT_RESET_TWOD_RESET_SHIFT 1
++#define EUR_CR_SOFT_RESET_DPM_RESET_MASK 0x00000004
++#define EUR_CR_SOFT_RESET_DPM_RESET_SHIFT 2
++#define EUR_CR_SOFT_RESET_TA_RESET_MASK 0x00000008
++#define EUR_CR_SOFT_RESET_TA_RESET_SHIFT 3
++#define EUR_CR_SOFT_RESET_USE_RESET_MASK 0x00000010
++#define EUR_CR_SOFT_RESET_USE_RESET_SHIFT 4
++#define EUR_CR_SOFT_RESET_ISP_RESET_MASK 0x00000020
++#define EUR_CR_SOFT_RESET_ISP_RESET_SHIFT 5
++#define EUR_CR_SOFT_RESET_TSP_RESET_MASK 0x00000040
++#define EUR_CR_SOFT_RESET_TSP_RESET_SHIFT 6
++
++#define EUR_CR_EVENT_HOST_ENABLE2 0x0110
++#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_MASK 0x00000002
++#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_SHIFT 1
++#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_MASK 0x00000001
++#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_SHIFT 0
++
++#define EUR_CR_EVENT_HOST_CLEAR2 0x0114
++#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_MASK 0x00000002
++#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_SHIFT 1
++#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_MASK 0x00000001
++#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_SHIFT 0
++
++#define EUR_CR_EVENT_STATUS2 0x0118
++#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_MASK 0x00000002
++#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_SHIFT 1
++#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_MASK 0x00000001
++#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_SHIFT 0
++
++#define EUR_CR_EVENT_STATUS 0x012C
++#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_MASK 0x80000000
++#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_SHIFT 31
++#define EUR_CR_EVENT_STATUS_TIMER_MASK 0x20000000
++#define EUR_CR_EVENT_STATUS_TIMER_SHIFT 29
++#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_MASK 0x10000000
++#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_SHIFT 28
++#define EUR_CR_EVENT_STATUS_TWOD_COMPLETE_MASK 0x08000000
++#define EUR_CR_EVENT_STATUS_TWOD_COMPLETE_SHIFT 27
++#define EUR_CR_EVENT_STATUS_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000
++#define EUR_CR_EVENT_STATUS_MADD_CACHE_INVALCOMPLETE_SHIFT 26
++#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000
++#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25
++#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_MASK 0x01000000
++#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_SHIFT 24
++#define EUR_CR_EVENT_STATUS_ISP_END_TILE_MASK 0x00800000
++#define EUR_CR_EVENT_STATUS_ISP_END_TILE_SHIFT 23
++#define EUR_CR_EVENT_STATUS_DPM_INITEND_MASK 0x00400000
++#define EUR_CR_EVENT_STATUS_DPM_INITEND_SHIFT 22
++#define EUR_CR_EVENT_STATUS_OTPM_LOADED_MASK 0x00200000
++#define EUR_CR_EVENT_STATUS_OTPM_LOADED_SHIFT 21
++#define EUR_CR_EVENT_STATUS_OTPM_INV_MASK 0x00100000
++#define EUR_CR_EVENT_STATUS_OTPM_INV_SHIFT 20
++#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_MASK 0x00080000
++#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_SHIFT 19
++#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_MASK 0x00040000
++#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_SHIFT 18
++#define EUR_CR_EVENT_STATUS_ISP_HALT_MASK 0x00020000
++#define EUR_CR_EVENT_STATUS_ISP_HALT_SHIFT 17
++#define EUR_CR_EVENT_STATUS_ISP_VISIBILITY_FAIL_MASK 0x00010000
++#define EUR_CR_EVENT_STATUS_ISP_VISIBILITY_FAIL_SHIFT 16
++#define EUR_CR_EVENT_STATUS_BREAKPOINT_MASK 0x00008000
++#define EUR_CR_EVENT_STATUS_BREAKPOINT_SHIFT 15
++#define EUR_CR_EVENT_STATUS_SW_EVENT_MASK 0x00004000
++#define EUR_CR_EVENT_STATUS_SW_EVENT_SHIFT 14
++#define EUR_CR_EVENT_STATUS_TA_FINISHED_MASK 0x00002000
++#define EUR_CR_EVENT_STATUS_TA_FINISHED_SHIFT 13
++#define EUR_CR_EVENT_STATUS_TA_TERMINATE_MASK 0x00001000
++#define EUR_CR_EVENT_STATUS_TA_TERMINATE_SHIFT 12
++#define EUR_CR_EVENT_STATUS_TPC_CLEAR_MASK 0x00000800
++#define EUR_CR_EVENT_STATUS_TPC_CLEAR_SHIFT 11
++#define EUR_CR_EVENT_STATUS_TPC_FLUSH_MASK 0x00000400
++#define EUR_CR_EVENT_STATUS_TPC_FLUSH_SHIFT 10
++#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_MASK 0x00000200
++#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_SHIFT 9
++#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_MASK 0x00000100
++#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_SHIFT 8
++#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_MASK 0x00000080
++#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_SHIFT 7
++#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_MASK 0x00000040
++#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_SHIFT 6
++#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_MASK 0x00000020
++#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_SHIFT 5
++#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_MASK 0x00000010
++#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_SHIFT 4
++#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_MASK 0x00000008
++#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_SHIFT 3
++#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004
++#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_SHIFT 2
++#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002
++#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_SHIFT 1
++#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_MASK 0x00000001
++#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_SHIFT 0
++
++#define EUR_CR_EVENT_HOST_ENABLE 0x0130
++#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_MASK 0x80000000
++#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_SHIFT 31
++#define EUR_CR_EVENT_HOST_ENABLE_TIMER_MASK 0x20000000
++#define EUR_CR_EVENT_HOST_ENABLE_TIMER_SHIFT 29
++#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_MASK 0x10000000
++#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_SHIFT 28
++#define EUR_CR_EVENT_HOST_ENABLE_TWOD_COMPLETE_MASK 0x08000000
++#define EUR_CR_EVENT_HOST_ENABLE_TWOD_COMPLETE_SHIFT 27
++#define EUR_CR_EVENT_HOST_ENABLE_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000
++#define EUR_CR_EVENT_HOST_ENABLE_MADD_CACHE_INVALCOMPLETE_SHIFT 26
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_MASK 0x01000000
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_SHIFT 24
++#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_MASK 0x00800000
++#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_SHIFT 23
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_MASK 0x00400000
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_SHIFT 22
++#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_MASK 0x00200000
++#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_SHIFT 21
++#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_MASK 0x00100000
++#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_SHIFT 20
++#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_MASK 0x00080000
++#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_SHIFT 19
++#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_MASK 0x00040000
++#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_SHIFT 18
++#define EUR_CR_EVENT_HOST_ENABLE_ISP_HALT_MASK 0x00020000
++#define EUR_CR_EVENT_HOST_ENABLE_ISP_HALT_SHIFT 17
++#define EUR_CR_EVENT_HOST_ENABLE_ISP_VISIBILITY_FAIL_MASK 0x00010000
++#define EUR_CR_EVENT_HOST_ENABLE_ISP_VISIBILITY_FAIL_SHIFT 16
++#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_MASK 0x00008000
++#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_SHIFT 15
++#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_MASK 0x00004000
++#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_SHIFT 14
++#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_MASK 0x00002000
++#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_SHIFT 13
++#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_MASK 0x00001000
++#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_SHIFT 12
++#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_MASK 0x00000800
++#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_SHIFT 11
++#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_MASK 0x00000400
++#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_SHIFT 10
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_MASK 0x00000200
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_SHIFT 9
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_MASK 0x00000100
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_SHIFT 8
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_MASK 0x00000080
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_SHIFT 7
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_MASK 0x00000040
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_SHIFT 6
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_MASK 0x00000020
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_SHIFT 5
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_MASK 0x00000010
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_SHIFT 4
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_MASK 0x00000008
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_SHIFT 3
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_SHIFT 2
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_SHIFT 1
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_MASK 0x00000001
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_SHIFT 0
++
++#define EUR_CR_EVENT_HOST_CLEAR 0x0134
++#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_MASK 0x80000000
++#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_SHIFT 31
++#define EUR_CR_EVENT_HOST_CLEAR_TIMER_MASK 0x20000000
++#define EUR_CR_EVENT_HOST_CLEAR_TIMER_SHIFT 29
++#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_MASK 0x10000000
++#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_SHIFT 28
++#define EUR_CR_EVENT_HOST_CLEAR_TWOD_COMPLETE_MASK 0x08000000
++#define EUR_CR_EVENT_HOST_CLEAR_TWOD_COMPLETE_SHIFT 27
++#define EUR_CR_EVENT_HOST_CLEAR_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000
++#define EUR_CR_EVENT_HOST_CLEAR_MADD_CACHE_INVALCOMPLETE_SHIFT 26
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_MASK 0x01000000
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_SHIFT 24
++#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_MASK 0x00800000
++#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_SHIFT 23
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_MASK 0x00400000
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_SHIFT 22
++#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_MASK 0x00200000
++#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_SHIFT 21
++#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_MASK 0x00100000
++#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_SHIFT 20
++#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_MASK 0x00080000
++#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_SHIFT 19
++#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_MASK 0x00040000
++#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_SHIFT 18
++#define EUR_CR_EVENT_HOST_CLEAR_ISP_HALT_MASK 0x00020000
++#define EUR_CR_EVENT_HOST_CLEAR_ISP_HALT_SHIFT 17
++#define EUR_CR_EVENT_HOST_CLEAR_ISP_VISIBILITY_FAIL_MASK 0x00010000
++#define EUR_CR_EVENT_HOST_CLEAR_ISP_VISIBILITY_FAIL_SHIFT 16
++#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_MASK 0x00008000
++#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_SHIFT 15
++#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_MASK 0x00004000
++#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_SHIFT 14
++#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_MASK 0x00002000
++#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_SHIFT 13
++#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_MASK 0x00001000
++#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_SHIFT 12
++#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_MASK 0x00000800
++#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_SHIFT 11
++#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_MASK 0x00000400
++#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_SHIFT 10
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_MASK 0x00000200
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_SHIFT 9
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_MASK 0x00000100
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_SHIFT 8
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_MASK 0x00000080
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_SHIFT 7
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_MASK 0x00000040
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_SHIFT 6
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_MASK 0x00000020
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_SHIFT 5
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_MASK 0x00000010
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_SHIFT 4
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_MASK 0x00000008
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_SHIFT 3
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_SHIFT 2
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_SHIFT 1
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_MASK 0x00000001
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_SHIFT 0
++
++#define EUR_CR_PDS 0x0ABC
++#define EUR_CR_PDS_DOUT_TIMEOUT_DISABLE_MASK 0x00000040
++#define EUR_CR_PDS_DOUT_TIMEOUT_DISABLE_SHIFT 6
++
++#define EUR_CR_PDS_EXEC_BASE 0x0AB8
++#define EUR_CR_PDS_EXEC_BASE_ADDR_MASK 0x0FF00000
++#define EUR_CR_PDS_EXEC_BASE_ADDR_SHIFT 20
++
++#define EUR_CR_EVENT_KICKER 0x0AC4
++#define EUR_CR_EVENT_KICKER_ADDRESS_MASK 0x0FFFFFF0
++#define EUR_CR_EVENT_KICKER_ADDRESS_SHIFT 4
++
++#define EUR_CR_EVENT_KICK 0x0AC8
++#define EUR_CR_EVENT_KICK_NOW_MASK 0x00000001
++#define EUR_CR_EVENT_KICK_NOW_SHIFT 0
++
++#define EUR_CR_EVENT_TIMER 0x0ACC
++#define EUR_CR_EVENT_TIMER_ENABLE_MASK 0x01000000
++#define EUR_CR_EVENT_TIMER_ENABLE_SHIFT 24
++#define EUR_CR_EVENT_TIMER_VALUE_MASK 0x00FFFFFF
++#define EUR_CR_EVENT_TIMER_VALUE_SHIFT 0
++
++#define EUR_CR_PDS_INV0 0x0AD0
++#define EUR_CR_PDS_INV0_DSC_MASK 0x00000001
++#define EUR_CR_PDS_INV0_DSC_SHIFT 0
++
++#define EUR_CR_PDS_INV1 0x0AD4
++#define EUR_CR_PDS_INV1_DSC_MASK 0x00000001
++#define EUR_CR_PDS_INV1_DSC_SHIFT 0
++
++#define EUR_CR_PDS_INV2 0x0AD8
++#define EUR_CR_PDS_INV2_DSC_MASK 0x00000001
++#define EUR_CR_PDS_INV2_DSC_SHIFT 0
++
++#define EUR_CR_PDS_INV3 0x0ADC
++#define EUR_CR_PDS_INV3_DSC_MASK 0x00000001
++#define EUR_CR_PDS_INV3_DSC_SHIFT 0
++
++#define EUR_CR_PDS_INV_CSC 0x0AE0
++#define EUR_CR_PDS_INV_CSC_KICK_MASK 0x00000001
++#define EUR_CR_PDS_INV_CSC_KICK_SHIFT 0
++
++#define EUR_CR_PDS_PC_BASE 0x0B2C
++#define EUR_CR_PDS_PC_BASE_ADDRESS_MASK 0x3FFFFFFF
++#define EUR_CR_PDS_PC_BASE_ADDRESS_SHIFT 0
++
++#define EUR_CR_BIF_CTRL 0x0C00
++#define EUR_CR_BIF_CTRL_NOREORDER_MASK 0x00000001
++#define EUR_CR_BIF_CTRL_NOREORDER_SHIFT 0
++#define EUR_CR_BIF_CTRL_PAUSE_MASK 0x00000002
++#define EUR_CR_BIF_CTRL_PAUSE_SHIFT 1
++#define EUR_CR_BIF_CTRL_FLUSH_MASK 0x00000004
++#define EUR_CR_BIF_CTRL_FLUSH_SHIFT 2
++#define EUR_CR_BIF_CTRL_INVALDC_MASK 0x00000008
++#define EUR_CR_BIF_CTRL_INVALDC_SHIFT 3
++#define EUR_CR_BIF_CTRL_CLEAR_FAULT_MASK 0x00000010
++#define EUR_CR_BIF_CTRL_CLEAR_FAULT_SHIFT 4
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_CACHE_MASK 0x00000100
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_CACHE_SHIFT 8
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_MASK 0x00000200
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_SHIFT 9
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_TE_MASK 0x00000400
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_TE_SHIFT 10
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_TWOD_MASK 0x00000800
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_TWOD_SHIFT 11
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_MASK 0x00001000
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_SHIFT 12
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_MASK 0x00002000
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_SHIFT 13
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_MASK 0x00004000
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_SHIFT 14
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_MASK 0x00008000
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_SHIFT 15
++
++#define EUR_CR_BIF_INT_STAT 0x0C04
++#define EUR_CR_BIF_INT_STAT_FAULT_MASK 0x00003FFF
++#define EUR_CR_BIF_INT_STAT_FAULT_SHIFT 0
++#define EUR_CR_BIF_INT_STAT_PF_N_RW_MASK 0x00004000
++#define EUR_CR_BIF_INT_STAT_PF_N_RW_SHIFT 14
++#define EUR_CR_BIF_FAULT 0x0C08
++#define EUR_CR_BIF_FAULT_ADDR_MASK 0x0FFFF000
++#define EUR_CR_BIF_FAULT_ADDR_SHIFT 12
++
++#define EUR_CR_BIF_DIR_LIST_BASE0 0x0C84
++#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_MASK 0xFFFFF000
++#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_SHIFT 12
++
++#define EUR_CR_BIF_TWOD_REQ_BASE 0x0C88
++#define EUR_CR_BIF_TWOD_REQ_BASE_ADDR_MASK 0x0FF00000
++#define EUR_CR_BIF_TWOD_REQ_BASE_ADDR_SHIFT 20
++
++#define EUR_CR_BIF_TA_REQ_BASE 0x0C90
++#define EUR_CR_BIF_TA_REQ_BASE_ADDR_MASK 0x0FF00000
++#define EUR_CR_BIF_TA_REQ_BASE_ADDR_SHIFT 20
++
++#define EUR_CR_BIF_MEM_REQ_STAT 0x0CA8
++#define EUR_CR_BIF_MEM_REQ_STAT_READS_MASK 0x000000FF
++#define EUR_CR_BIF_MEM_REQ_STAT_READS_SHIFT 0
++
++#define EUR_CR_BIF_3D_REQ_BASE 0x0CAC
++#define EUR_CR_BIF_3D_REQ_BASE_ADDR_MASK 0x0FF00000
++#define EUR_CR_BIF_3D_REQ_BASE_ADDR_SHIFT 20
++
++#define EUR_CR_BIF_ZLS_REQ_BASE 0x0CB0
++#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_MASK 0x0FF00000
++#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_SHIFT 20
++
++#define EUR_CR_2D_BLIT_STATUS 0x0E04
++#define EUR_CR_2D_BLIT_STATUS_COMPLETE_MASK 0x00FFFFFF
++#define EUR_CR_2D_BLIT_STATUS_COMPLETE_SHIFT 0
++#define EUR_CR_2D_BLIT_STATUS_BUSY_MASK 0x01000000
++#define EUR_CR_2D_BLIT_STATUS_BUSY_SHIFT 24
++
++#define EUR_CR_2D_VIRTUAL_FIFO_0 0x0E10
++#define EUR_CR_2D_VIRTUAL_FIFO_0_ENABLE_MASK 0x00000001
++#define EUR_CR_2D_VIRTUAL_FIFO_0_ENABLE_SHIFT 0
++#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MASK 0x0000000E
++#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_SHIFT 1
++#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_DIV_MASK 0x00000FF0
++#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_DIV_SHIFT 4
++#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MUL_MASK 0x0000F000
++#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MUL_SHIFT 12
++
++#define EUR_CR_2D_VIRTUAL_FIFO_1 0x0E14
++#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_ACC_MASK 0x00000FFF
++#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_ACC_SHIFT 0
++#define EUR_CR_2D_VIRTUAL_FIFO_1_MAX_ACC_MASK 0x00FFF000
++#define EUR_CR_2D_VIRTUAL_FIFO_1_MAX_ACC_SHIFT 12
++#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_METRIC_MASK 0xFF000000
++#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_METRIC_SHIFT 24
++
++#define EUR_CR_USE_CODE_BASE(X) (0x0A0C + (4 * (X)))
++#define EUR_CR_USE_CODE_BASE_ADDR_MASK 0x00FFFFFF
++#define EUR_CR_USE_CODE_BASE_ADDR_SHIFT 0
++#define EUR_CR_USE_CODE_BASE_DM_MASK 0x03000000
++#define EUR_CR_USE_CODE_BASE_DM_SHIFT 24
++#define EUR_CR_USE_CODE_BASE_SIZE_UINT32 16
++#define EUR_CR_USE_CODE_BASE_NUM_ENTRIES 16
++
++#endif
+diff --git a/drivers/gpu/pvr/sgx_bridge.h b/drivers/gpu/pvr/sgx_bridge.h
+new file mode 100644
+index 0000000..0d12fce
+--- /dev/null
++++ b/drivers/gpu/pvr/sgx_bridge.h
+@@ -0,0 +1,388 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__SGX_BRIDGE_H__)
++#define __SGX_BRIDGE_H__
++
++#include "sgxapi_km.h"
++#include "sgxinfo.h"
++#include "pvr_bridge.h"
++
++#define PVRSRV_BRIDGE_SGX_CMD_BASE (PVRSRV_BRIDGE_LAST_NON_DEVICE_CMD+1)
++#define PVRSRV_BRIDGE_SGX_GETCLIENTINFO \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+0)
++#define PVRSRV_BRIDGE_SGX_RELEASECLIENTINFO \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+1)
++#define PVRSRV_BRIDGE_SGX_GETINTERNALDEVINFO \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+2)
++#define PVRSRV_BRIDGE_SGX_DOKICK \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+3)
++#define PVRSRV_BRIDGE_SGX_GETPHYSPAGEADDR \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+4)
++#define PVRSRV_BRIDGE_SGX_READREGISTRYDWORD \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+5)
++#define PVRSRV_BRIDGE_SGX_SCHEDULECOMMAND \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+6)
++
++#define PVRSRV_BRIDGE_SGX_2DQUERYBLTSCOMPLETE \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+9)
++
++#define PVRSRV_BRIDGE_SGX_GETMMUPDADDR \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+10)
++
++#define PVRSRV_BRIDGE_SGX_SUBMITTRANSFER \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+13)
++#define PVRSRV_BRIDGE_SGX_GETMISCINFO \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+14)
++#define PVRSRV_BRIDGE_SGXINFO_FOR_SRVINIT \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+15)
++#define PVRSRV_BRIDGE_SGX_DEVINITPART2 \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+16)
++
++#define PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+17)
++#define PVRSRV_BRIDGE_SGX_UNREFSHAREDPBDESC \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+18)
++#define PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+19)
++#define PVRSRV_BRIDGE_SGX_REGISTER_HW_RENDER_CONTEXT \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+20)
++#define PVRSRV_BRIDGE_SGX_FLUSH_HW_RENDER_TARGET \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+21)
++#define PVRSRV_BRIDGE_SGX_UNREGISTER_HW_RENDER_CONTEXT \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+22)
++#define PVRSRV_BRIDGE_SGX_REGISTER_HW_TRANSFER_CONTEXT \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+26)
++#define PVRSRV_BRIDGE_SGX_UNREGISTER_HW_TRANSFER_CONTEXT \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+27)
++
++#define PVRSRV_BRIDGE_SGX_SCHEDULE_PROCESS_QUEUES \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+28)
++
++#define PVRSRV_BRIDGE_SGX_READ_DIFF_COUNTERS \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+29)
++#define PVRSRV_BRIDGE_SGX_READ_HWPERF_CB \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+30)
++
++#if defined(PDUMP)
++#define PVRSRV_BRIDGE_SGX_PDUMP_BUFFER_ARRAY \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+31)
++#define PVRSRV_BRIDGE_SGX_PDUMP_3D_SIGNATURE_REGISTERS \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+32)
++#define PVRSRV_BRIDGE_SGX_PDUMP_COUNTER_REGISTERS \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+33)
++#define PVRSRV_BRIDGE_SGX_PDUMP_TA_SIGNATURE_REGISTERS \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+34)
++#define PVRSRV_BRIDGE_SGX_PDUMP_HWPERFCB \
++ PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+35)
++#endif
++
++#define PVRSRV_BRIDGE_LAST_SGX_CMD (PVRSRV_BRIDGE_SGX_CMD_BASE+35)
++
++struct PVRSRV_BRIDGE_IN_GETPHYSPAGEADDR {
++ u32 ui32BridgeFlags;
++ void *hDevMemHeap;
++ struct IMG_DEV_VIRTADDR sDevVAddr;
++};
++
++struct PVRSRV_BRIDGE_OUT_GETPHYSPAGEADDR {
++ enum PVRSRV_ERROR eError;
++ struct IMG_DEV_PHYADDR DevPAddr;
++ struct IMG_CPU_PHYADDR CpuPAddr;
++};
++
++struct PVRSRV_BRIDGE_IN_SGX_GETMMU_PDADDR {
++ u32 ui32BridgeFlags;
++ void *hDevCookie;
++ void *hDevMemContext;
++};
++
++struct PVRSRV_BRIDGE_OUT_SGX_GETMMU_PDADDR {
++ struct IMG_DEV_PHYADDR sPDDevPAddr;
++ enum PVRSRV_ERROR eError;
++};
++
++struct PVRSRV_BRIDGE_IN_GETCLIENTINFO {
++ u32 ui32BridgeFlags;
++ void *hDevCookie;
++};
++
++struct PVRSRV_BRIDGE_OUT_GETINTERNALDEVINFO {
++ struct SGX_INTERNAL_DEVINFO sSGXInternalDevInfo;
++ enum PVRSRV_ERROR eError;
++};
++
++struct PVRSRV_BRIDGE_IN_GETINTERNALDEVINFO {
++ u32 ui32BridgeFlags;
++ void *hDevCookie;
++};
++
++struct PVRSRV_BRIDGE_OUT_GETCLIENTINFO {
++ struct SGX_CLIENT_INFO sClientInfo;
++ enum PVRSRV_ERROR eError;
++};
++
++struct PVRSRV_BRIDGE_IN_RELEASECLIENTINFO {
++ u32 ui32BridgeFlags;
++ void *hDevCookie;
++ struct SGX_CLIENT_INFO sClientInfo;
++};
++
++struct PVRSRV_BRIDGE_IN_ISPBREAKPOLL {
++ u32 ui32BridgeFlags;
++ void *hDevCookie;
++};
++
++struct PVRSRV_BRIDGE_IN_DOKICK {
++ u32 ui32BridgeFlags;
++ void *hDevCookie;
++ struct SGX_CCB_KICK sCCBKick;
++};
++
++struct PVRSRV_BRIDGE_IN_SGX_SCHEDULE_PROCESS_QUEUES {
++ u32 ui32BridgeFlags;
++ void *hDevCookie;
++};
++
++struct PVRSRV_BRIDGE_IN_SUBMITTRANSFER {
++ u32 ui32BridgeFlags;
++ void *hDevCookie;
++ struct PVRSRV_TRANSFER_SGX_KICK sKick;
++};
++
++
++struct PVRSRV_BRIDGE_IN_READREGDWORD {
++ u32 ui32BridgeFlags;
++ void *hDevCookie;
++ char *pszKey;
++ char *pszValue;
++};
++
++struct PVRSRV_BRIDGE_OUT_READREGDWORD {
++ enum PVRSRV_ERROR eError;
++ u32 ui32Data;
++};
++
++struct PVRSRV_BRIDGE_IN_SCHEDULECOMMAND {
++ u32 ui32BridgeFlags;
++ void *hDevCookie;
++ enum SGXMKIF_COMMAND_TYPE eCommandType;
++ struct SGXMKIF_COMMAND *psCommandData;
++
++};
++
++struct PVRSRV_BRIDGE_IN_SGXGETMISCINFO {
++ u32 ui32BridgeFlags;
++ void *hDevCookie;
++ struct SGX_MISC_INFO __user *psMiscInfo;
++};
++
++struct PVRSRV_BRIDGE_IN_SGXINFO_FOR_SRVINIT {
++ u32 ui32BridgeFlags;
++ void *hDevCookie;
++};
++
++struct PVRSRV_BRIDGE_OUT_SGXINFO_FOR_SRVINIT {
++ enum PVRSRV_ERROR eError;
++ struct SGX_BRIDGE_INFO_FOR_SRVINIT sInitInfo;
++};
++
++struct PVRSRV_BRIDGE_IN_SGXDEVINITPART2 {
++ u32 ui32BridgeFlags;
++ void *hDevCookie;
++ struct SGX_BRIDGE_INIT_INFO sInitInfo;
++};
++
++struct PVRSRV_BRIDGE_IN_2DQUERYBLTSCOMPLETE {
++ u32 ui32BridgeFlags;
++ void *hDevCookie;
++ void *hKernSyncInfo;
++ IMG_BOOL bWaitForComplete;
++};
++
++#define PVRSRV_BRIDGE_SGX_SHAREDPBDESC_MAX_SUBMEMINFOS 10
++
++struct PVRSRV_BRIDGE_IN_SGXFINDSHAREDPBDESC {
++ u32 ui32BridgeFlags;
++ void *hDevCookie;
++ IMG_BOOL bLockOnFailure;
++ u32 ui32TotalPBSize;
++};
++
++struct PVRSRV_BRIDGE_OUT_SGXFINDSHAREDPBDESC {
++ void *hKernelMemInfo;
++ void *hSharedPBDesc;
++ void *hSharedPBDescKernelMemInfoHandle;
++ void *hHWPBDescKernelMemInfoHandle;
++ void *hBlockKernelMemInfoHandle;
++ void *ahSharedPBDescSubKernelMemInfoHandles
++ [PVRSRV_BRIDGE_SGX_SHAREDPBDESC_MAX_SUBMEMINFOS];
++ u32 ui32SharedPBDescSubKernelMemInfoHandlesCount;
++ enum PVRSRV_ERROR eError;
++};
++
++struct PVRSRV_BRIDGE_IN_SGXUNREFSHAREDPBDESC {
++ u32 ui32BridgeFlags;
++ void *hSharedPBDesc;
++};
++
++struct PVRSRV_BRIDGE_OUT_SGXUNREFSHAREDPBDESC {
++ enum PVRSRV_ERROR eError;
++};
++
++struct PVRSRV_BRIDGE_IN_SGXADDSHAREDPBDESC {
++ u32 ui32BridgeFlags;
++ void *hDevCookie;
++ void *hSharedPBDescKernelMemInfo;
++ void *hHWPBDescKernelMemInfo;
++ void *hBlockKernelMemInfo;
++ u32 ui32TotalPBSize;
++ void * __user *phKernelMemInfoHandles;
++ u32 ui32KernelMemInfoHandlesCount;
++};
++
++struct PVRSRV_BRIDGE_OUT_SGXADDSHAREDPBDESC {
++ enum PVRSRV_ERROR eError;
++ void *hSharedPBDesc;
++};
++
++#ifdef PDUMP
++struct PVRSRV_BRIDGE_IN_PDUMP_BUFFER_ARRAY {
++ u32 ui32BridgeFlags;
++ struct SGX_KICKTA_DUMP_BUFFER __user *psBufferArray;
++ u32 ui32BufferArrayLength;
++ IMG_BOOL bDumpPolls;
++};
++
++struct PVRSRV_BRIDGE_IN_PDUMP_3D_SIGNATURE_REGISTERS {
++ u32 ui32BridgeFlags;
++ u32 ui32DumpFrameNum;
++ IMG_BOOL bLastFrame;
++ u32 *pui32Registers;
++ u32 ui32NumRegisters;
++};
++
++struct PVRSRV_BRIDGE_IN_PDUMP_COUNTER_REGISTERS {
++ u32 ui32BridgeFlags;
++ u32 ui32DumpFrameNum;
++ IMG_BOOL bLastFrame;
++ u32 *pui32Registers;
++ u32 ui32NumRegisters;
++};
++
++struct PVRSRV_BRIDGE_IN_PDUMP_TA_SIGNATURE_REGISTERS {
++ u32 ui32BridgeFlags;
++ u32 ui32DumpFrameNum;
++ u32 ui32TAKickCount;
++ IMG_BOOL bLastFrame;
++ u32 *pui32Registers;
++ u32 ui32NumRegisters;
++};
++
++struct PVRSRV_BRIDGE_IN_PDUMP_HWPERFCB {
++ u32 ui32BridgeFlags;
++ void *hDevCookie;
++ char szFileName[PVRSRV_PDUMP_MAX_FILENAME_SIZE];
++ u32 ui32FileOffset;
++ u32 ui32PDumpFlags;
++
++};
++
++#endif
++
++struct PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_RENDER_CONTEXT {
++ u32 ui32BridgeFlags;
++ void *hDevCookie;
++ struct IMG_DEV_VIRTADDR sHWRenderContextDevVAddr;
++};
++
++struct PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_RENDER_CONTEXT {
++ enum PVRSRV_ERROR eError;
++ void *hHWRenderContext;
++};
++
++struct PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_RENDER_CONTEXT {
++ u32 ui32BridgeFlags;
++ void *hDevCookie;
++ void *hHWRenderContext;
++};
++
++struct PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_TRANSFER_CONTEXT {
++ u32 ui32BridgeFlags;
++ void *hDevCookie;
++ struct IMG_DEV_VIRTADDR sHWTransferContextDevVAddr;
++};
++
++struct PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_TRANSFER_CONTEXT {
++ enum PVRSRV_ERROR eError;
++ void *hHWTransferContext;
++};
++
++struct PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_TRANSFER_CONTEXT {
++ u32 ui32BridgeFlags;
++ void *hDevCookie;
++ void *hHWTransferContext;
++};
++
++struct PVRSRV_BRIDGE_IN_SGX_FLUSH_HW_RENDER_TARGET {
++ u32 ui32BridgeFlags;
++ void *hDevCookie;
++ struct IMG_DEV_VIRTADDR sHWRTDataSetDevVAddr;
++};
++
++struct PVRSRV_BRIDGE_IN_SGX_READ_DIFF_COUNTERS {
++ u32 ui32BridgeFlags;
++ void *hDevCookie;
++ u32 ui32Reg;
++ IMG_BOOL bNew;
++ u32 ui32New;
++ u32 ui32NewReset;
++ u32 ui32CountersReg;
++};
++
++struct PVRSRV_BRIDGE_OUT_SGX_READ_DIFF_COUNTERS {
++ enum PVRSRV_ERROR eError;
++ u32 ui32Old;
++ u32 ui32Time;
++ IMG_BOOL bActive;
++ struct PVRSRV_SGXDEV_DIFF_INFO sDiffs;
++};
++
++struct PVRSRV_BRIDGE_IN_SGX_READ_HWPERF_CB {
++ u32 ui32BridgeFlags;
++ void *hDevCookie;
++ u32 ui32ArraySize;
++ struct PVRSRV_SGX_HWPERF_CB_ENTRY __user *psHWPerfCBData;
++};
++
++struct PVRSRV_BRIDGE_OUT_SGX_READ_HWPERF_CB {
++ enum PVRSRV_ERROR eError;
++ u32 ui32DataCount;
++ u32 ui32ClockSpeed;
++ u32 ui32HostTimeStamp;
++};
++
++#endif
+diff --git a/drivers/gpu/pvr/sgx_bridge_km.h b/drivers/gpu/pvr/sgx_bridge_km.h
+new file mode 100644
+index 0000000..2819a7e
+--- /dev/null
++++ b/drivers/gpu/pvr/sgx_bridge_km.h
+@@ -0,0 +1,109 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__SGX_BRIDGE_KM_H__)
++#define __SGX_BRIDGE_KM_H__
++
++#include "sgxapi_km.h"
++#include "sgxinfo.h"
++#include "sgxinfokm.h"
++#include "sgx_bridge.h"
++#include "pvr_bridge.h"
++#include "perproc.h"
++
++
++enum PVRSRV_ERROR SGXSubmitTransferKM(void *hDevHandle,
++ struct PVRSRV_TRANSFER_SGX_KICK *psKick);
++
++
++enum PVRSRV_ERROR SGXDoKickKM(void *hDevHandle,
++ struct SGX_CCB_KICK *psCCBKick);
++
++enum PVRSRV_ERROR SGXGetPhysPageAddrKM(void *hDevMemHeap,
++ struct IMG_DEV_VIRTADDR sDevVAddr,
++ struct IMG_DEV_PHYADDR *pDevPAddr,
++ struct IMG_CPU_PHYADDR *pCpuPAddr);
++
++enum PVRSRV_ERROR SGXGetMMUPDAddrKM(void *hDevCookie,
++ void *hDevMemContext, struct IMG_DEV_PHYADDR *psPDDevPAddr);
++
++enum PVRSRV_ERROR SGXGetClientInfoKM(void *hDevCookie,
++ struct SGX_CLIENT_INFO *psClientInfo);
++
++enum PVRSRV_ERROR SGXGetMiscInfoKM(struct PVRSRV_SGXDEV_INFO *psDevInfo,
++ struct SGX_MISC_INFO *psMiscInfo,
++ struct PVRSRV_DEVICE_NODE *psDeviceNode);
++
++enum PVRSRV_ERROR SGXReadDiffCountersKM(void *hDevHandle, u32 ui32Reg,
++ u32 *pui32Old, IMG_BOOL bNew, u32 ui32New,
++ u32 ui32NewReset, u32 ui32CountersReg,
++ u32 *pui32Time, IMG_BOOL *pbActive,
++ struct PVRSRV_SGXDEV_DIFF_INFO *psDiffs);
++enum PVRSRV_ERROR SGXReadHWPerfCBKM(void *hDevHandle, u32 ui32ArraySize,
++ struct PVRSRV_SGX_HWPERF_CB_ENTRY *psHWPerfCBData,
++ u32 *pui32DataCount, u32 *pui32ClockSpeed,
++ u32 *pui32HostTimeStamp);
++
++enum PVRSRV_ERROR SGX2DQueryBlitsCompleteKM(
++ struct PVRSRV_SGXDEV_INFO *psDevInfo,
++ struct PVRSRV_KERNEL_SYNC_INFO *psSyncInfo,
++ IMG_BOOL bWaitForComplete);
++
++enum PVRSRV_ERROR SGXGetInfoForSrvinitKM(void *hDevHandle,
++ struct SGX_BRIDGE_INFO_FOR_SRVINIT *psInitInfo);
++
++enum PVRSRV_ERROR DevInitSGXPart2KM(struct PVRSRV_PER_PROCESS_DATA *psPerProc,
++ void *hDevHandle,
++ struct SGX_BRIDGE_INIT_INFO *psInitInfo);
++
++enum PVRSRV_ERROR SGXFindSharedPBDescKM(
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc,
++ void *hDevCookie, IMG_BOOL bLockOnFailure, u32 ui32TotalPBSize,
++ void **phSharedPBDesc,
++ struct PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescKernelMemInfo,
++ struct PVRSRV_KERNEL_MEM_INFO **ppsHWPBDescKernelMemInfo,
++ struct PVRSRV_KERNEL_MEM_INFO **ppsBlockKernelMemInfo,
++ struct PVRSRV_KERNEL_MEM_INFO ***pppsSharedPBDescSubKernelMemInfos,
++ u32 *ui32SharedPBDescSubKernelMemInfosCount);
++
++enum PVRSRV_ERROR SGXUnrefSharedPBDescKM(void *hSharedPBDesc);
++
++enum PVRSRV_ERROR SGXAddSharedPBDescKM(
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc,
++ void *hDevCookie,
++ struct PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo,
++ struct PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo,
++ struct PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo,
++ u32 ui32TotalPBSize, void **phSharedPBDesc,
++ struct PVRSRV_KERNEL_MEM_INFO **psSharedPBDescSubKernelMemInfos,
++ u32 ui32SharedPBDescSubKernelMemInfosCount);
++
++enum PVRSRV_ERROR SGXGetInternalDevInfoKM(void *hDevCookie,
++ struct SGX_INTERNAL_DEVINFO *psSGXInternalDevInfo);
++
++int sgx_force_reset(void);
++
++#endif
+diff --git a/drivers/gpu/pvr/sgx_options.h b/drivers/gpu/pvr/sgx_options.h
+new file mode 100644
+index 0000000..ce59e49
+--- /dev/null
++++ b/drivers/gpu/pvr/sgx_options.h
+@@ -0,0 +1,178 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if defined(DEBUG) || defined(INTERNAL_TEST)
++#define DEBUG_SET_OFFSET OPTIONS_BIT0
++#define OPTIONS_BIT0 0x1
++#else
++#define OPTIONS_BIT0 0x0
++#endif
++
++#if defined(PDUMP) || defined(INTERNAL_TEST)
++#define PDUMP_SET_OFFSET OPTIONS_BIT1
++#define OPTIONS_BIT1 (0x1 << 1)
++#else
++#define OPTIONS_BIT1 0x0
++#endif
++
++#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG) || defined(INTERNAL_TEST)
++#define PVRSRV_USSE_EDM_STATUS_DEBUG_SET_OFFSET OPTIONS_BIT2
++#define OPTIONS_BIT2 (0x1 << 2)
++#else
++#define OPTIONS_BIT2 0x0
++#endif
++
++#define SUPPORT_HW_RECOVERY_SET_OFFSET OPTIONS_BIT3
++#define OPTIONS_BIT3 (0x1 << 3)
++
++#define PVR_SECURE_HANDLES_SET_OFFSET OPTIONS_BIT4
++#define OPTIONS_BIT4 (0x1 << 4)
++
++#if defined(INTERNAL_TEST)
++#define SGX_BYPASS_SYSTEM_CACHE_SET_OFFSET OPTIONS_BIT5
++#define OPTIONS_BIT5 (0x1 << 5)
++#else
++#define OPTIONS_BIT5 0x0
++#endif
++
++#if defined(INTERNAL_TEST)
++#define SGX_DMS_AGE_ENABLE_SET_OFFSET OPTIONS_BIT6
++#define OPTIONS_BIT6 (0x1 << 6)
++#else
++#define OPTIONS_BIT6 0x0
++#endif
++
++#if defined(INTERNAL_TEST)
++#define SGX_DONT_SWITCH_OFF_FEATURES_SET_OFFSET OPTIONS_BIT7
++#define OPTIONS_BIT7 (0x1 << 7)
++#else
++#define OPTIONS_BIT7 0x0
++#endif
++
++#if defined(INTERNAL_TEST)
++#define SGX_FAST_DPM_INIT_SET_OFFSET OPTIONS_BIT8
++#define OPTIONS_BIT8 (0x1 << 8)
++#else
++#define OPTIONS_BIT8 0x0
++#endif
++
++#if defined(INTERNAL_TEST)
++#define SGX_FEATURE_DCU_SET_OFFSET OPTIONS_BIT9
++#define OPTIONS_BIT9 (0x1 << 9)
++#else
++#define OPTIONS_BIT9 0x0
++#endif
++
++#if defined(INTERNAL_TEST)
++#define SGX_FEATURE_MP_SET_OFFSET OPTIONS_BIT10
++#define OPTIONS_BIT10 (0x1 << 10)
++#else
++#define OPTIONS_BIT10 0x0
++#endif
++
++#if defined(INTERNAL_TEST)
++#define SGX_FEATURE_MULTITHREADED_UKERNEL_SET_OFFSET OPTIONS_BIT11
++#define OPTIONS_BIT11 (0x1 << 11)
++#else
++#define OPTIONS_BIT11 0x0
++#endif
++
++#if defined(INTERNAL_TEST)
++#define SGX_FEATURE_OVERLAPPED_SPM_SET_OFFSET OPTIONS_BIT12
++#define OPTIONS_BIT12 (0x1 << 12)
++#else
++#define OPTIONS_BIT12 0x0
++#endif
++
++#if defined(INTERNAL_TEST)
++#define SGX_FEATURE_RENDER_TARGET_ARRAYS_SET_OFFSET OPTIONS_BIT13
++#define OPTIONS_BIT13 (0x1 << 13)
++#else
++#define OPTIONS_BIT13 0x0
++#endif
++
++#if defined(INTERNAL_TEST)
++#define SGX_FEATURE_SYSTEM_CACHE_SET_OFFSET OPTIONS_BIT14
++#define OPTIONS_BIT14 (0x1 << 14)
++#else
++#define OPTIONS_BIT14 0x0
++#endif
++
++#if defined(INTERNAL_TEST)
++#define SGX_SUPPORT_HWPROFILING_SET_OFFSET OPTIONS_BIT15
++#define OPTIONS_BIT15 (0x1 << 15)
++#else
++#define OPTIONS_BIT15 0x0
++#endif
++
++#define SUPPORT_ACTIVE_POWER_MANAGEMENT_SET_OFFSET OPTIONS_BIT16
++#define OPTIONS_BIT16 (0x1 << 16)
++
++#if defined(INTERNAL_TEST)
++#define SUPPORT_DISPLAYCONTROLLER_TILING_SET_OFFSET OPTIONS_BIT17
++#define OPTIONS_BIT17 (0x1 << 17)
++#else
++#define OPTIONS_BIT17 0x0
++#endif
++
++#define SUPPORT_PERCONTEXT_PB_SET_OFFSET OPTIONS_BIT18
++#define OPTIONS_BIT18 (0x1 << 18)
++
++#define OPTIONS_BIT19 (0x1 << 19)
++
++#if defined(INTERNAL_TEST)
++#define SUPPORT_SGX_MMU_DUMMY_PAGE_SET_OFFSET OPTIONS_BIT20
++#define OPTIONS_BIT20 (0x1 << 20)
++#else
++#define OPTIONS_BIT20 0x0
++#endif
++
++#define SUPPORT_SGX_PRIORITY_SCHEDULING_SET_OFFSET OPTIONS_BIT21
++#define OPTIONS_BIT21 (0x1 << 21)
++
++#if defined(INTERNAL_TEST)
++#define USE_SUPPORT_NO_TA3D_OVERLAP_SET_OFFSET OPTIONS_BIT22
++#define OPTIONS_BIT22 (0x1 << 22)
++#else
++#define OPTIONS_BIT22 0x0
++#endif
++
++#if defined(INTERNAL_TEST)
++#define OPTIONS_HIGHBYTE \
++ ((SGX_FEATURE_MP_CORE_COUNT-1) << SGX_FEATURE_MP_CORE_COUNT_SET_OFFSET)
++#define SGX_FEATURE_MP_CORE_COUNT_SET_OFFSET 28UL
++#define SGX_FEATURE_MP_CORE_COUNT_SET_MASK 0xFF
++#else
++#define OPTIONS_HIGHBYTE 0x0
++#endif
++
++#define SGX_BUILD_OPTIONS ( \
++ OPTIONS_BIT0 | OPTIONS_BIT1 | OPTIONS_BIT2 | OPTIONS_BIT3 | \
++ OPTIONS_BIT4 | OPTIONS_BIT5 | OPTIONS_BIT6 | OPTIONS_BIT7 | \
++ OPTIONS_BIT8 | OPTIONS_BIT9 | OPTIONS_BIT10 | OPTIONS_BIT11 | \
++ OPTIONS_BIT12 | OPTIONS_BIT13 | OPTIONS_BIT14 | OPTIONS_BIT15 | \
++ OPTIONS_BIT16 | OPTIONS_BIT17 | OPTIONS_BIT18 | OPTIONS_BIT19 | \
++ OPTIONS_BIT20 | OPTIONS_BIT21 | OPTIONS_BIT22 | OPTIONS_HIGHBYTE)
+diff --git a/drivers/gpu/pvr/sgxapi_km.h b/drivers/gpu/pvr/sgxapi_km.h
+new file mode 100644
+index 0000000..a1ac0fe
+--- /dev/null
++++ b/drivers/gpu/pvr/sgxapi_km.h
+@@ -0,0 +1,237 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __SGXAPI_KM_H__
++#define __SGXAPI_KM_H__
++
++
++#include "sgxdefs.h"
++
++#if defined(__KERNEL__)
++#include <linux/unistd.h>
++#else
++#include <unistd.h>
++#endif
++
++#define SGX_UNDEFINED_HEAP_ID (~0LU)
++#define SGX_GENERAL_HEAP_ID 0
++#define SGX_TADATA_HEAP_ID 1
++#define SGX_KERNEL_CODE_HEAP_ID 2
++#define SGX_KERNEL_DATA_HEAP_ID 3
++#define SGX_PIXELSHADER_HEAP_ID 4
++#define SGX_VERTEXSHADER_HEAP_ID 5
++#define SGX_PDSPIXEL_CODEDATA_HEAP_ID 6
++#define SGX_PDSVERTEX_CODEDATA_HEAP_ID 7
++#define SGX_SYNCINFO_HEAP_ID 8
++#define SGX_3DPARAMETERS_HEAP_ID 9
++#define SGX_MAX_HEAP_ID 10
++
++#define SGX_MAX_TA_STATUS_VALS 32
++#define SGX_MAX_3D_STATUS_VALS 2
++
++#define SGX_MAX_SRC_SYNCS 4
++
++#define PVRSRV_SGX_HWPERF_NUM_COUNTERS 9
++
++#define PVRSRV_SGX_HWPERF_INVALID 0x1
++
++#define PVRSRV_SGX_HWPERF_TRANSFER 0x2
++#define PVRSRV_SGX_HWPERF_TA 0x3
++#define PVRSRV_SGX_HWPERF_3D 0x4
++#define PVRSRV_SGX_HWPERF_2D 0x5
++
++#define PVRSRV_SGX_HWPERF_MK_EVENT 0x101
++#define PVRSRV_SGX_HWPERF_MK_TA 0x102
++#define PVRSRV_SGX_HWPERF_MK_3D 0x103
++#define PVRSRV_SGX_HWPERF_MK_2D 0x104
++
++#define PVRSRV_SGX_HWPERF_TYPE_STARTEND_BIT 28
++#define PVRSRV_SGX_HWPERF_TYPE_OP_MASK \
++ ((1 << PVRSRV_SGX_HWPERF_TYPE_STARTEND_BIT) - 1)
++#define PVRSRV_SGX_HWPERF_TYPE_OP_START \
++ (0 << PVRSRV_SGX_HWPERF_TYPE_STARTEND_BIT)
++#define PVRSRV_SGX_HWPERF_TYPE_OP_END \
++ (1 << PVRSRV_SGX_HWPERF_TYPE_STARTEND_BIT)
++
++#define PVRSRV_SGX_HWPERF_TYPE_TRANSFER_START \
++ (PVRSRV_SGX_HWPERF_TRANSFER | PVRSRV_SGX_HWPERF_TYPE_OP_START)
++#define PVRSRV_SGX_HWPERF_TYPE_TRANSFER_END \
++ (PVRSRV_SGX_HWPERF_TRANSFER | PVRSRV_SGX_HWPERF_TYPE_OP_END)
++#define PVRSRV_SGX_HWPERF_TYPE_TA_START \
++ (PVRSRV_SGX_HWPERF_TA | PVRSRV_SGX_HWPERF_TYPE_OP_START)
++#define PVRSRV_SGX_HWPERF_TYPE_TA_END \
++ (PVRSRV_SGX_HWPERF_TA | PVRSRV_SGX_HWPERF_TYPE_OP_END)
++#define PVRSRV_SGX_HWPERF_TYPE_3D_START \
++ (PVRSRV_SGX_HWPERF_3D | PVRSRV_SGX_HWPERF_TYPE_OP_START)
++#define PVRSRV_SGX_HWPERF_TYPE_3D_END \
++ (PVRSRV_SGX_HWPERF_3D | PVRSRV_SGX_HWPERF_TYPE_OP_END)
++#define PVRSRV_SGX_HWPERF_TYPE_2D_START \
++ (PVRSRV_SGX_HWPERF_2D | PVRSRV_SGX_HWPERF_TYPE_OP_START)
++#define PVRSRV_SGX_HWPERF_TYPE_2D_END \
++ (PVRSRV_SGX_HWPERF_2D | PVRSRV_SGX_HWPERF_TYPE_OP_END)
++
++#define PVRSRV_SGX_HWPERF_TYPE_MK_EVENT_START \
++ (PVRSRV_SGX_HWPERF_MK_EVENT | PVRSRV_SGX_HWPERF_TYPE_OP_START)
++#define PVRSRV_SGX_HWPERF_TYPE_MK_EVENT_END \
++ (PVRSRV_SGX_HWPERF_MK_EVENT | PVRSRV_SGX_HWPERF_TYPE_OP_END)
++#define PVRSRV_SGX_HWPERF_TYPE_MK_TA_START \
++ (PVRSRV_SGX_HWPERF_MK_TA | PVRSRV_SGX_HWPERF_TYPE_OP_START)
++#define PVRSRV_SGX_HWPERF_TYPE_MK_TA_END \
++ (PVRSRV_SGX_HWPERF_MK_TA | PVRSRV_SGX_HWPERF_TYPE_OP_END)
++#define PVRSRV_SGX_HWPERF_TYPE_MK_3D_START \
++ (PVRSRV_SGX_HWPERF_MK_3D | PVRSRV_SGX_HWPERF_TYPE_OP_START)
++#define PVRSRV_SGX_HWPERF_TYPE_MK_3D_END \
++ (PVRSRV_SGX_HWPERF_MK_3D | PVRSRV_SGX_HWPERF_TYPE_OP_END)
++#define PVRSRV_SGX_HWPERF_TYPE_MK_2D_START \
++ (PVRSRV_SGX_HWPERF_MK_2D | PVRSRV_SGX_HWPERF_TYPE_OP_START)
++#define PVRSRV_SGX_HWPERF_TYPE_MK_2D_END \
++ (PVRSRV_SGX_HWPERF_MK_2D | PVRSRV_SGX_HWPERF_TYPE_OP_END)
++
++#define PVRSRV_SGX_HWPERF_OFF 0x0
++#define PVRSRV_SGX_HWPERF_GRAPHICS_ON (1UL << 0)
++#define PVRSRV_SGX_HWPERF_MK_EXECUTION_ON (1UL << 1)
++
++struct PVRSRV_SGX_HWPERF_CB_ENTRY {
++ u32 ui32FrameNo;
++ u32 ui32Type;
++ u32 ui32Ordinal;
++ u32 ui32Clocksx16;
++ u32 ui32Counters[PVRSRV_SGX_HWPERF_NUM_COUNTERS];
++};
++
++struct PVRSRV_SGX_HWPERF_CBDATA {
++ u32 ui32FrameNo;
++ u32 ui32Type;
++ u32 ui32StartTimeWraps;
++ u32 ui32StartTime;
++ u32 ui32EndTimeWraps;
++ u32 ui32EndTime;
++ u32 ui32ClockSpeed;
++ u32 ui32TimeMax;
++};
++
++struct SGX_MISC_INFO_HWPERF_RETRIEVE_CB {
++ struct PVRSRV_SGX_HWPERF_CBDATA *psHWPerfData;
++ u32 ui32ArraySize;
++ u32 ui32DataCount;
++ u32 ui32Time;
++};
++
++struct CTL_STATUS {
++ struct IMG_DEV_VIRTADDR sStatusDevAddr;
++ u32 ui32StatusValue;
++};
++
++enum SGX_MISC_INFO_REQUEST {
++ SGX_MISC_INFO_REQUEST_CLOCKSPEED = 0,
++ SGX_MISC_INFO_REQUEST_SGXREV,
++ SGX_MISC_INFO_REQUEST_DRIVER_SGXREV,
++ SGX_MISC_INFO_REQUEST_SET_HWPERF_STATUS,
++ SGX_MISC_INFO_REQUEST_HWPERF_CB_ON,
++ SGX_MISC_INFO_REQUEST_HWPERF_CB_OFF,
++ SGX_MISC_INFO_REQUEST_HWPERF_RETRIEVE_CB,
++ SGX_MISC_INFO_REQUEST_FORCE_I16 = 0x7fff
++};
++
++struct PVRSRV_SGX_MISCINFO_FEATURES {
++ u32 ui32CoreRev;
++ u32 ui32CoreID;
++ u32 ui32DDKVersion;
++ u32 ui32DDKBuild;
++ u32 ui32CoreIdSW;
++ u32 ui32CoreRevSW;
++ u32 ui32BuildOptions;
++};
++
++struct SGX_MISC_INFO {
++ enum SGX_MISC_INFO_REQUEST eRequest;
++
++ union {
++ u32 reserved;
++ struct PVRSRV_SGX_MISCINFO_FEATURES sSGXFeatures;
++ u32 ui32SGXClockSpeed;
++ u32 ui32NewHWPerfStatus;
++ struct SGX_MISC_INFO_HWPERF_RETRIEVE_CB sRetrieveCB;
++ } uData;
++};
++
++#define SGX_KICKTA_DUMPBITMAP_MAX_NAME_LENGTH 256
++
++struct SGX_KICKTA_DUMPBITMAP {
++ struct IMG_DEV_VIRTADDR sDevBaseAddr;
++ u32 ui32Flags;
++ u32 ui32Width;
++ u32 ui32Height;
++ u32 ui32Stride;
++ u32 ui32PDUMPFormat;
++ u32 ui32BytesPP;
++ char pszName[SGX_KICKTA_DUMPBITMAP_MAX_NAME_LENGTH];
++};
++
++#define PVRSRV_SGX_PDUMP_CONTEXT_MAX_BITMAP_ARRAY_SIZE 16
++
++struct PVRSRV_SGX_PDUMP_CONTEXT {
++ u32 ui32CacheControl;
++};
++
++struct SGX_KICKTA_DUMP_ROFF {
++ void *hKernelMemInfo;
++ u32 uiAllocIndex;
++ u32 ui32Offset;
++ u32 ui32Value;
++ char *pszName;
++};
++
++struct SGX_KICKTA_DUMP_BUFFER {
++ u32 ui32SpaceUsed;
++ u32 ui32Start;
++ u32 ui32End;
++ u32 ui32BufferSize;
++ u32 ui32BackEndLength;
++ u32 uiAllocIndex;
++ void *hKernelMemInfo;
++ void *pvLinAddr;
++ char *pszName;
++};
++
++#ifdef PDUMP
++struct SGX_KICKTA_PDUMP {
++
++ struct SGX_KICKTA_DUMPBITMAP *psPDumpBitmapArray;
++ u32 ui32PDumpBitmapSize;
++
++ struct SGX_KICKTA_DUMP_BUFFER *psBufferArray;
++ u32 ui32BufferArraySize;
++
++ struct SGX_KICKTA_DUMP_ROFF *psROffArray;
++ u32 ui32ROffArraySize;
++};
++#endif
++
++#define SGX_MAX_TRANSFER_STATUS_VALS 2
++#define SGX_MAX_TRANSFER_SYNC_OPS 5
++
++#endif
+diff --git a/drivers/gpu/pvr/sgxconfig.h b/drivers/gpu/pvr/sgxconfig.h
+new file mode 100644
+index 0000000..589e264
+--- /dev/null
++++ b/drivers/gpu/pvr/sgxconfig.h
+@@ -0,0 +1,75 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __SGXCONFIG_H__
++#define __SGXCONFIG_H__
++
++#define DEV_DEVICE_TYPE PVRSRV_DEVICE_TYPE_SGX
++#define DEV_DEVICE_CLASS PVRSRV_DEVICE_CLASS_3D
++
++#define DEV_MAJOR_VERSION 1
++#define DEV_MINOR_VERSION 0
++
++#define SGX_GENERAL_HEAP_BASE 0x01800000
++#define SGX_GENERAL_HEAP_SIZE (0x06C00000-0x00001000)
++
++#define SGX_3DPARAMETERS_HEAP_BASE 0x08400000
++#define SGX_3DPARAMETERS_HEAP_SIZE (0x04000000-0x00001000)
++
++#define SGX_TADATA_HEAP_BASE 0x0C400000
++#define SGX_TADATA_HEAP_SIZE (0x01000000-0x00001000)
++
++#define SGX_SYNCINFO_HEAP_BASE 0x0D400000
++#define SGX_SYNCINFO_HEAP_SIZE (0x00400000-0x00001000)
++
++#define SGX_PDSPIXEL_CODEDATA_HEAP_BASE 0x0D800000
++#define SGX_PDSPIXEL_CODEDATA_HEAP_SIZE (0x00800000-0x00001000)
++
++#define SGX_PDSVERTEX_CODEDATA_HEAP_BASE 0x0E000000
++#define SGX_PDSVERTEX_CODEDATA_HEAP_SIZE (0x00800000-0x00001000)
++
++#define SGX_RESERVED_CODE_HEAP_BASE 0x0E800000
++#define SGX_RESERVED_CODE_HEAP_SIZE (0x00080000-0x00001000)
++
++#define SGX_KERNEL_CODE_HEAP_BASE 0x0EC00000
++#define SGX_KERNEL_CODE_HEAP_SIZE (0x00080000-0x00001000)
++
++#define SGX_KERNEL_DATA_HEAP_BASE 0x0F000000
++#define SGX_KERNEL_DATA_HEAP_SIZE (0x00400000-0x00001000)
++
++#define SGX_PIXELSHADER_HEAP_BASE 0x0F400000
++#define SGX_PIXELSHADER_HEAP_SIZE (0x00500000-0x00001000)
++
++#define SGX_VERTEXSHADER_HEAP_BASE 0x0FC00000
++#define SGX_VERTEXSHADER_HEAP_SIZE (0x00200000-0x00001000)
++
++#define SGX_CORE_IDENTIFIED
++
++#if !defined(SGX_CORE_IDENTIFIED)
++#error "sgxconfig.h: ERROR: unspecified SGX Core version"
++#endif
++
++#endif
+diff --git a/drivers/gpu/pvr/sgxcoretypes.h b/drivers/gpu/pvr/sgxcoretypes.h
+new file mode 100644
+index 0000000..0eb5146
+--- /dev/null
++++ b/drivers/gpu/pvr/sgxcoretypes.h
+@@ -0,0 +1,41 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _SGXCORETYPES_KM_H_
++#define _SGXCORETYPES_KM_H_
++
++enum SGX_CORE_ID_TYPE {
++ SGX_CORE_ID_INVALID = 0,
++ SGX_CORE_ID_530 = 2,
++ SGX_CORE_ID_535 = 3,
++};
++
++struct SGX_CORE_INFO {
++ enum SGX_CORE_ID_TYPE eID;
++ u32 uiRev;
++};
++
++#endif
+diff --git a/drivers/gpu/pvr/sgxdefs.h b/drivers/gpu/pvr/sgxdefs.h
+new file mode 100644
+index 0000000..1e0cfec
+--- /dev/null
++++ b/drivers/gpu/pvr/sgxdefs.h
+@@ -0,0 +1,38 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _SGXDEFS_H_
++#define _SGXDEFS_H_
++
++#ifndef SGX530
++#error unsupported SGX revision
++#endif
++
++#include "sgx530defs.h"
++#include "sgxerrata.h"
++#include "sgxfeaturedefs.h"
++
++#endif
+diff --git a/drivers/gpu/pvr/sgxerrata.h b/drivers/gpu/pvr/sgxerrata.h
+new file mode 100644
+index 0000000..1aeaa9a
+--- /dev/null
++++ b/drivers/gpu/pvr/sgxerrata.h
+@@ -0,0 +1,34 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _SGXERRATA_KM_H_
++#define _SGXERRATA_KM_H_
++
++#ifndef SGX530
++#error unsupported SGX version
++#endif
++
++#endif
+diff --git a/drivers/gpu/pvr/sgxfeaturedefs.h b/drivers/gpu/pvr/sgxfeaturedefs.h
+new file mode 100644
+index 0000000..b36e6be
+--- /dev/null
++++ b/drivers/gpu/pvr/sgxfeaturedefs.h
+@@ -0,0 +1,40 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef SGX530
++#error unsupported SGX version
++#endif
++
++#define SGX_CORE_FRIENDLY_NAME "SGX530"
++#define SGX_CORE_ID SGX_CORE_ID_530
++#define SGX_FEATURE_ADDRESS_SPACE_SIZE 28
++#define SGX_FEATURE_AUTOCLOCKGATING
++#define SGX_FEATURE_MP_CORE_COUNT 1
++#define SUPPORT_SGX_PRIORITY_SCHEDULING
++
++#include "img_types.h"
++
++#include "sgxcoretypes.h"
+diff --git a/drivers/gpu/pvr/sgxinfo.h b/drivers/gpu/pvr/sgxinfo.h
+new file mode 100644
+index 0000000..1245140
+--- /dev/null
++++ b/drivers/gpu/pvr/sgxinfo.h
+@@ -0,0 +1,338 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__SGXINFO_H__)
++#define __SGXINFO_H__
++
++#include "sgxscript.h"
++
++#include "servicesint.h"
++
++#include "services.h"
++#include "sgxapi_km.h"
++
++#define SGX_MP_CORE_SELECT(x, i) (x)
++
++#define SGX_MAX_DEV_DATA 24
++#define SGX_MAX_INIT_MEM_HANDLES 16
++
++#define SGX_BIF_DIR_LIST_INDEX_EDM 0
++
++struct SGX_BRIDGE_INFO_FOR_SRVINIT {
++ struct IMG_DEV_PHYADDR sPDDevPAddr;
++ struct PVRSRV_HEAP_INFO asHeapInfo[PVRSRV_MAX_CLIENT_HEAPS];
++};
++
++struct SGX_BRIDGE_INIT_INFO {
++ void *hKernelCCBMemInfo;
++ void *hKernelCCBCtlMemInfo;
++ void *hKernelCCBEventKickerMemInfo;
++ void *hKernelSGXHostCtlMemInfo;
++ void *hKernelSGXTA3DCtlMemInfo;
++ void *hKernelSGXMiscMemInfo;
++ u32 ui32HostKickAddress;
++ u32 ui32GetMiscInfoAddress;
++ void *hKernelHWPerfCBMemInfo;
++#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG)
++ void *hKernelEDMStatusBufferMemInfo;
++#endif
++
++ u32 ui32EDMTaskReg0;
++ u32 ui32EDMTaskReg1;
++
++ u32 ui32ClkGateStatusReg;
++ u32 ui32ClkGateStatusMask;
++
++ u32 ui32CacheControl;
++
++ u32 asInitDevData[SGX_MAX_DEV_DATA];
++ void *asInitMemHandles[SGX_MAX_INIT_MEM_HANDLES];
++
++ struct SGX_INIT_SCRIPTS sScripts;
++
++};
++
++struct SGXMKIF_COMMAND {
++ u32 ui32ServiceAddress;
++ u32 ui32Data[3];
++};
++
++struct PVRSRV_SGX_KERNEL_CCB {
++ struct SGXMKIF_COMMAND asCommands[256];
++};
++
++struct PVRSRV_SGX_CCB_CTL {
++ u32 ui32WriteOffset;
++ u32 ui32ReadOffset;
++};
++
++#define SGX_AUXCCBFLAGS_SHARED 0x00000001
++
++enum SGXMKIF_COMMAND_TYPE {
++ SGXMKIF_COMMAND_EDM_KICK = 0,
++ SGXMKIF_COMMAND_VIDEO_KICK = 1,
++ SGXMKIF_COMMAND_REQUEST_SGXMISCINFO = 2,
++
++ SGXMKIF_COMMAND_FORCE_I32 = -1,
++
++};
++
++#define PVRSRV_CCBFLAGS_RASTERCMD 0x1
++#define PVRSRV_CCBFLAGS_TRANSFERCMD 0x2
++#define PVRSRV_CCBFLAGS_PROCESS_QUEUESCMD 0x3
++#define PVRSRV_CCBFLAGS_POWERCMD 0x5
++
++#define PVRSRV_POWERCMD_POWEROFF 0x1
++#define PVRSRV_POWERCMD_IDLE 0x2
++
++#define SGX_BIF_INVALIDATE_PTCACHE 0x1
++#define SGX_BIF_INVALIDATE_PDCACHE 0x2
++
++struct SGXMKIF_HWDEVICE_SYNC_LIST {
++ struct IMG_DEV_VIRTADDR sAccessDevAddr;
++ u32 ui32NumSyncObjects;
++
++ struct PVRSRV_DEVICE_SYNC_OBJECT asSyncData[1];
++};
++
++struct SGX_DEVICE_SYNC_LIST {
++ struct SGXMKIF_HWDEVICE_SYNC_LIST *psHWDeviceSyncList;
++
++ void *hKernelHWSyncListMemInfo;
++ struct PVRSRV_CLIENT_MEM_INFO *psHWDeviceSyncListClientMemInfo;
++ struct PVRSRV_CLIENT_MEM_INFO *psAccessResourceClientMemInfo;
++
++ volatile u32 *pui32Lock;
++
++ struct SGX_DEVICE_SYNC_LIST *psNext;
++
++ u32 ui32NumSyncObjects;
++ void *ahSyncHandles[1];
++};
++
++struct SGX_INTERNEL_STATUS_UPDATE {
++ struct CTL_STATUS sCtlStatus;
++ void *hKernelMemInfo;
++ /* pdump specific - required? */
++ u32 ui32LastStatusUpdateDumpVal;
++};
++
++struct SGX_CCB_KICK {
++ enum SGXMKIF_COMMAND_TYPE eCommand;
++ struct SGXMKIF_COMMAND sCommand;
++ void *hCCBKernelMemInfo;
++
++ u32 ui32NumDstSyncObjects;
++ void *hKernelHWSyncListMemInfo;
++ void *sDstSyncHandle;
++
++ u32 ui32NumTAStatusVals;
++ u32 ui32Num3DStatusVals;
++
++ void *ahTAStatusSyncInfo[SGX_MAX_TA_STATUS_VALS];
++ void *ah3DStatusSyncInfo[SGX_MAX_3D_STATUS_VALS];
++
++ IMG_BOOL bFirstKickOrResume;
++#if (defined(NO_HARDWARE) || defined(PDUMP))
++ IMG_BOOL bTerminateOrAbort;
++#endif
++ IMG_BOOL bKickRender;
++
++ u32 ui32CCBOffset;
++
++ u32 ui32NumSrcSyncs;
++ void *ahSrcKernelSyncInfo[SGX_MAX_SRC_SYNCS];
++
++ IMG_BOOL bTADependency;
++ void *hTA3DSyncInfo;
++
++ void *hTASyncInfo;
++ void *h3DSyncInfo;
++#if defined(PDUMP)
++ u32 ui32CCBDumpWOff;
++#endif
++#if defined(NO_HARDWARE)
++ u32 ui32WriteOpsPendingVal;
++#endif
++};
++
++#define SGX_KERNEL_USE_CODE_BASE_INDEX 15
++
++struct SGXMKIF_HOST_CTL {
++
++ u32 ui32PowerStatus;
++ u32 ui32uKernelDetectedLockups;
++ u32 ui32HostDetectedLockups;
++ u32 ui32HWRecoverySampleRate;
++ u32 ui32ActivePowManSampleRate;
++ u32 ui32InterruptFlags;
++ u32 ui32InterruptClearFlags;
++
++ u32 ui32ResManFlags;
++ struct IMG_DEV_VIRTADDR sResManCleanupData;
++
++ u32 ui32NumActivePowerEvents;
++
++ u32 ui32HWPerfFlags;
++
++#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG)
++ /* !< See SGXMK_STATUS_BUFFER */
++ struct IMG_DEV_VIRTADDR sEDMStatusBuffer;
++#endif
++
++ /*< to count time wraps in the Timer task */
++ u32 ui32TimeWraps;
++};
++
++struct SGX_CLIENT_INFO {
++ u32 ui32ProcessID;
++ void *pvProcess;
++ struct PVRSRV_MISC_INFO sMiscInfo;
++
++ u32 asDevData[SGX_MAX_DEV_DATA];
++
++};
++
++struct SGX_INTERNAL_DEVINFO {
++ u32 ui32Flags;
++ void *hHostCtlKernelMemInfoHandle;
++ IMG_BOOL bForcePTOff;
++};
++
++#define SGXTQ_MAX_STATUS (SGX_MAX_TRANSFER_STATUS_VALS + 2)
++
++#define SGXMKIF_TQFLAGS_NOSYNCUPDATE 0x00000001
++#define SGXMKIF_TQFLAGS_KEEPPENDING 0x00000002
++#define SGXMKIF_TQFLAGS_TATQ_SYNC 0x00000004
++#define SGXMKIF_TQFLAGS_3DTQ_SYNC 0x00000008
++struct SGXMKIF_CMDTA_SHARED {
++ u32 ui32NumTAStatusVals;
++ u32 ui32Num3DStatusVals;
++
++ u32 ui32TATQSyncWriteOpsPendingVal;
++ struct IMG_DEV_VIRTADDR sTATQSyncWriteOpsCompleteDevVAddr;
++ u32 ui32TATQSyncReadOpsPendingVal;
++ struct IMG_DEV_VIRTADDR sTATQSyncReadOpsCompleteDevVAddr;
++
++ u32 ui323DTQSyncWriteOpsPendingVal;
++ struct IMG_DEV_VIRTADDR s3DTQSyncWriteOpsCompleteDevVAddr;
++ u32 ui323DTQSyncReadOpsPendingVal;
++ struct IMG_DEV_VIRTADDR s3DTQSyncReadOpsCompleteDevVAddr;
++
++ u32 ui32NumSrcSyncs;
++ struct PVRSRV_DEVICE_SYNC_OBJECT asSrcSyncs[SGX_MAX_SRC_SYNCS];
++
++ struct CTL_STATUS sCtlTAStatusInfo[SGX_MAX_TA_STATUS_VALS];
++ struct CTL_STATUS sCtl3DStatusInfo[SGX_MAX_3D_STATUS_VALS];
++
++ struct PVRSRV_DEVICE_SYNC_OBJECT sTA3DDependency;
++
++};
++
++struct SGXMKIF_TRANSFERCMD_SHARED {
++
++ u32 ui32SrcReadOpPendingVal;
++ struct IMG_DEV_VIRTADDR sSrcReadOpsCompleteDevAddr;
++
++ u32 ui32SrcWriteOpPendingVal;
++ struct IMG_DEV_VIRTADDR sSrcWriteOpsCompleteDevAddr;
++
++ u32 ui32DstReadOpPendingVal;
++ struct IMG_DEV_VIRTADDR sDstReadOpsCompleteDevAddr;
++
++ u32 ui32DstWriteOpPendingVal;
++ struct IMG_DEV_VIRTADDR sDstWriteOpsCompleteDevAddr;
++
++ u32 ui32TASyncWriteOpsPendingVal;
++ struct IMG_DEV_VIRTADDR sTASyncWriteOpsCompleteDevVAddr;
++ u32 ui32TASyncReadOpsPendingVal;
++ struct IMG_DEV_VIRTADDR sTASyncReadOpsCompleteDevVAddr;
++
++ u32 ui323DSyncWriteOpsPendingVal;
++ struct IMG_DEV_VIRTADDR s3DSyncWriteOpsCompleteDevVAddr;
++ u32 ui323DSyncReadOpsPendingVal;
++ struct IMG_DEV_VIRTADDR s3DSyncReadOpsCompleteDevVAddr;
++
++ u32 ui32NumStatusVals;
++ struct CTL_STATUS sCtlStatusInfo[SGXTQ_MAX_STATUS];
++};
++
++struct PVRSRV_TRANSFER_SGX_KICK {
++ void *hCCBMemInfo;
++ u32 ui32SharedCmdCCBOffset;
++
++ struct IMG_DEV_VIRTADDR sHWTransferContextDevVAddr;
++
++ void *hTASyncInfo;
++ void *h3DSyncInfo;
++
++ u32 ui32NumSrcSync;
++ void *ahSrcSyncInfo[SGX_MAX_TRANSFER_SYNC_OPS];
++
++ u32 ui32NumDstSync;
++ void *ahDstSyncInfo[SGX_MAX_TRANSFER_SYNC_OPS];
++
++ u32 ui32Flags;
++
++ u32 ui32PDumpFlags;
++#if defined(PDUMP)
++ u32 ui32CCBDumpWOff;
++#endif
++};
++
++#define PVRSRV_SGX_DIFF_NUM_COUNTERS 9
++
++struct PVRSRV_SGXDEV_DIFF_INFO {
++ u32 aui32Counters[PVRSRV_SGX_DIFF_NUM_COUNTERS];
++ u32 ui32Time[2];
++ u32 ui32Marker[2];
++};
++
++#define SGXMKIF_HWPERF_CB_SIZE 0x100
++
++struct SGXMKIF_HWPERF_CB_ENTRY {
++ u32 ui32FrameNo;
++ u32 ui32Type;
++ u32 ui32Ordinal;
++ u32 ui32TimeWraps;
++ u32 ui32Time;
++ u32 ui32Counters[PVRSRV_SGX_HWPERF_NUM_COUNTERS];
++};
++
++struct SGXMKIF_HWPERF_CB {
++ u32 ui32Woff;
++ u32 ui32Roff;
++ u32 ui32OrdinalGRAPHICS;
++ u32 ui32OrdinalMK_EXECUTION;
++ struct SGXMKIF_HWPERF_CB_ENTRY psHWPerfCBData[SGXMKIF_HWPERF_CB_SIZE];
++};
++
++struct PVRSRV_SGX_MISCINFO_INFO {
++ u32 ui32MiscInfoFlags;
++ struct PVRSRV_SGX_MISCINFO_FEATURES sSGXFeatures;
++};
++
++#endif
+diff --git a/drivers/gpu/pvr/sgxinfokm.h b/drivers/gpu/pvr/sgxinfokm.h
+new file mode 100644
+index 0000000..f2acb46
+--- /dev/null
++++ b/drivers/gpu/pvr/sgxinfokm.h
+@@ -0,0 +1,262 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __SGXINFOKM_H__
++#define __SGXINFOKM_H__
++
++#include <linux/workqueue.h>
++#include "sgxdefs.h"
++#include "device.h"
++#include "sysconfig.h"
++#include "sgxscript.h"
++#include "sgxinfo.h"
++
++
++#define SGX_HOSTPORT_PRESENT 0x00000001UL
++
++#define PVRSRV_USSE_EDM_POWMAN_IDLE_COMPLETE (1UL << 2)
++#define PVRSRV_USSE_EDM_POWMAN_POWEROFF_COMPLETE (1UL << 3)
++#define PVRSRV_USSE_EDM_POWMAN_POWEROFF_RESTART_IMMEDIATE (1UL << 4)
++#define PVRSRV_USSE_EDM_POWMAN_NO_WORK (1UL << 5)
++
++#define PVRSRV_USSE_EDM_INTERRUPT_HWR (1UL << 0)
++#define PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER (1UL << 1)
++
++#define PVRSRV_USSE_EDM_RESMAN_CLEANUP_RT_REQUEST 0x01UL
++#define PVRSRV_USSE_EDM_RESMAN_CLEANUP_RC_REQUEST 0x02UL
++#define PVRSRV_USSE_EDM_RESMAN_CLEANUP_TC_REQUEST 0x04UL
++#define PVRSRV_USSE_EDM_RESMAN_CLEANUP_2DC_REQUEST 0x08UL
++#define PVRSRV_USSE_EDM_RESMAN_CLEANUP_SHAREDPBDESC 0x10UL
++#define PVRSRV_USSE_EDM_RESMAN_CLEANUP_INVALPD 0x20UL
++#define PVRSRV_USSE_EDM_RESMAN_CLEANUP_INVALPT 0x40UL
++#define PVRSRV_USSE_EDM_RESMAN_CLEANUP_COMPLETE 0x80UL
++
++#define PVRSRV_USSE_MISCINFO_READY 0x1UL
++
++struct PVRSRV_SGX_CCB_INFO;
++
++struct PVRSRV_SGXDEV_INFO {
++ enum PVRSRV_DEVICE_TYPE eDeviceType;
++ enum PVRSRV_DEVICE_CLASS eDeviceClass;
++
++ u8 ui8VersionMajor;
++ u8 ui8VersionMinor;
++ u32 ui32CoreConfig;
++ u32 ui32CoreFlags;
++
++ void __iomem *pvRegsBaseKM;
++
++ void *hRegMapping;
++
++ struct IMG_SYS_PHYADDR sRegsPhysBase;
++
++ u32 ui32RegSize;
++
++ u32 ui32CoreClockSpeed;
++ u32 ui32uKernelTimerClock;
++
++ void *psStubPBDescListKM;
++
++ struct IMG_DEV_PHYADDR sKernelPDDevPAddr;
++
++ void *pvDeviceMemoryHeap;
++ struct PVRSRV_KERNEL_MEM_INFO *psKernelCCBMemInfo;
++ struct PVRSRV_SGX_KERNEL_CCB *psKernelCCB;
++ struct PVRSRV_SGX_CCB_INFO *psKernelCCBInfo;
++ struct PVRSRV_KERNEL_MEM_INFO *psKernelCCBCtlMemInfo;
++ struct PVRSRV_SGX_CCB_CTL *psKernelCCBCtl;
++ struct PVRSRV_KERNEL_MEM_INFO *psKernelCCBEventKickerMemInfo;
++ u32 *pui32KernelCCBEventKicker;
++ struct PVRSRV_KERNEL_MEM_INFO *psKernelSGXMiscMemInfo;
++ u32 ui32HostKickAddress;
++ u32 ui32GetMiscInfoAddress;
++ u32 ui32KickTACounter;
++ u32 ui32KickTARenderCounter;
++ struct PVRSRV_KERNEL_MEM_INFO *psKernelHWPerfCBMemInfo;
++ struct PVRSRV_SGXDEV_DIFF_INFO sDiffInfo;
++ u32 ui32HWGroupRequested;
++ u32 ui32HWReset;
++#ifdef PVRSRV_USSE_EDM_STATUS_DEBUG
++ /*!< Meminfo for EDM status buffer */
++ struct PVRSRV_KERNEL_MEM_INFO *psKernelEDMStatusBufferMemInfo;
++#endif
++
++ u32 ui32ClientRefCount;
++
++ u32 ui32CacheControl;
++
++ void *pvMMUContextList;
++
++ IMG_BOOL bForcePTOff;
++
++ u32 ui32EDMTaskReg0;
++ u32 ui32EDMTaskReg1;
++
++ u32 ui32ClkGateStatusReg;
++ u32 ui32ClkGateStatusMask;
++ struct SGX_INIT_SCRIPTS sScripts;
++
++ void *hBIFResetPDOSMemHandle;
++ struct IMG_DEV_PHYADDR sBIFResetPDDevPAddr;
++ struct IMG_DEV_PHYADDR sBIFResetPTDevPAddr;
++ struct IMG_DEV_PHYADDR sBIFResetPageDevPAddr;
++ u32 *pui32BIFResetPD;
++ u32 *pui32BIFResetPT;
++
++ void *hTimer;
++ u32 ui32TimeStamp;
++ u32 ui32NumResets;
++
++ struct PVRSRV_KERNEL_MEM_INFO *psKernelSGXHostCtlMemInfo;
++ struct SGXMKIF_HOST_CTL __iomem *psSGXHostCtl;
++
++ struct PVRSRV_KERNEL_MEM_INFO *psKernelSGXTA3DCtlMemInfo;
++
++ u32 ui32Flags;
++
++#if defined(PDUMP)
++ struct PVRSRV_SGX_PDUMP_CONTEXT sPDContext;
++#endif
++
++
++ u32 asSGXDevData[SGX_MAX_DEV_DATA];
++
++};
++
++struct SGX_TIMING_INFORMATION {
++ u32 ui32CoreClockSpeed;
++ u32 ui32HWRecoveryFreq;
++ u32 ui32ActivePowManLatencyms;
++ u32 ui32uKernelFreq;
++};
++
++struct SGX_DEVICE_MAP {
++ u32 ui32Flags;
++
++ struct IMG_SYS_PHYADDR sRegsSysPBase;
++ struct IMG_CPU_PHYADDR sRegsCpuPBase;
++ void __iomem *pvRegsCpuVBase;
++ u32 ui32RegsSize;
++
++ struct IMG_SYS_PHYADDR sLocalMemSysPBase;
++ struct IMG_DEV_PHYADDR sLocalMemDevPBase;
++ struct IMG_CPU_PHYADDR sLocalMemCpuPBase;
++ u32 ui32LocalMemSize;
++
++ u32 ui32IRQ;
++};
++
++struct PVRSRV_STUB_PBDESC;
++struct PVRSRV_STUB_PBDESC {
++ u32 ui32RefCount;
++ u32 ui32TotalPBSize;
++ struct PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo;
++ struct PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo;
++ struct PVRSRV_KERNEL_MEM_INFO **ppsSubKernelMemInfos;
++ u32 ui32SubKernelMemInfosCount;
++ void *hDevCookie;
++ struct PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo;
++ struct PVRSRV_STUB_PBDESC *psNext;
++};
++
++struct PVRSRV_SGX_CCB_INFO {
++ struct PVRSRV_KERNEL_MEM_INFO *psCCBMemInfo;
++ struct PVRSRV_KERNEL_MEM_INFO *psCCBCtlMemInfo;
++ struct SGXMKIF_COMMAND *psCommands;
++ u32 *pui32WriteOffset;
++ volatile u32 *pui32ReadOffset;
++#if defined(PDUMP)
++ u32 ui32CCBDumpWOff;
++#endif
++};
++
++struct timer_work_data {
++ struct PVRSRV_DEVICE_NODE *psDeviceNode;
++ struct delayed_work work;
++ struct workqueue_struct *work_queue;
++ unsigned int interval;
++ bool armed;
++};
++
++enum PVRSRV_ERROR SGXRegisterDevice(struct PVRSRV_DEVICE_NODE *psDeviceNode);
++enum PVRSRV_ERROR SGXOSTimerEnable(struct timer_work_data *data);
++enum PVRSRV_ERROR SGXOSTimerCancel(struct timer_work_data *data);
++struct timer_work_data *
++SGXOSTimerInit(struct PVRSRV_DEVICE_NODE *psDeviceNode);
++void SGXOSTimerDeInit(struct timer_work_data *data);
++
++void HWRecoveryResetSGX(struct PVRSRV_DEVICE_NODE *psDeviceNode,
++ u32 ui32Component, u32 ui32CallerID);
++void SGXReset(struct PVRSRV_SGXDEV_INFO *psDevInfo, u32 ui32PDUMPFlags);
++
++enum PVRSRV_ERROR SGXInitialise(struct PVRSRV_SGXDEV_INFO *psDevInfo,
++ IMG_BOOL bHardwareRecovery);
++enum PVRSRV_ERROR SGXDeinitialise(void *hDevCookie);
++
++void SGXStartTimer(struct PVRSRV_SGXDEV_INFO *psDevInfo,
++ IMG_BOOL bStartOSTimer);
++
++enum PVRSRV_ERROR SGXPrePowerStateExt(void *hDevHandle,
++ enum PVR_POWER_STATE eNewPowerState,
++ enum PVR_POWER_STATE eCurrentPowerState);
++
++enum PVRSRV_ERROR SGXPostPowerStateExt(void *hDevHandle,
++ enum PVR_POWER_STATE eNewPowerState,
++ enum PVR_POWER_STATE eCurrentPowerState);
++
++enum PVRSRV_ERROR SGXPreClockSpeedChange(void *hDevHandle,
++ IMG_BOOL bIdleDevice,
++ enum PVR_POWER_STATE
++ eCurrentPowerState);
++
++enum PVRSRV_ERROR SGXPostClockSpeedChange(void *hDevHandle,
++ IMG_BOOL bIdleDevice,
++ enum PVR_POWER_STATE
++ eCurrentPowerState);
++
++enum PVRSRV_ERROR SGXDevInitCompatCheck(struct PVRSRV_DEVICE_NODE
++ *psDeviceNode);
++
++void SysGetSGXTimingInformation(struct SGX_TIMING_INFORMATION *psSGXTimingInfo);
++
++#if defined(NO_HARDWARE)
++static inline void NoHardwareGenerateEvent(struct PVRSRV_SGXDEV_INFO *psDevInfo,
++ u32 ui32StatusRegister,
++ u32 ui32StatusValue,
++ u32 ui32StatusMask)
++{
++ u32 ui32RegVal;
++
++ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, ui32StatusRegister);
++
++ ui32RegVal &= ~ui32StatusMask;
++ ui32RegVal |= (ui32StatusValue & ui32StatusMask);
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32StatusRegister, ui32RegVal);
++}
++#endif
++
++#endif
+diff --git a/drivers/gpu/pvr/sgxinit.c b/drivers/gpu/pvr/sgxinit.c
+new file mode 100644
+index 0000000..3d09250
+--- /dev/null
++++ b/drivers/gpu/pvr/sgxinit.c
+@@ -0,0 +1,1622 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <stddef.h>
++
++#include <linux/workqueue.h>
++#include <linux/io.h>
++#include <linux/slab.h>
++
++#include "sgxdefs.h"
++#include "sgxmmu.h"
++#include "services_headers.h"
++#include "buffer_manager.h"
++#include "sgxapi_km.h"
++#include "sgxinfo.h"
++#include "sgxinfokm.h"
++#include "sgxconfig.h"
++#include "sysconfig.h"
++#include "pvr_bridge_km.h"
++#include "sgx_bridge_km.h"
++
++#include "pdump_km.h"
++#include "ra.h"
++#include "mmu.h"
++#include "handle.h"
++#include "perproc.h"
++
++#include "sgxutils.h"
++#include "pvrversion.h"
++#include "sgx_options.h"
++
++static IMG_BOOL SGX_ISRHandler(void *pvData);
++
++static u32 gui32EventStatusServicesByISR;
++
++static enum PVRSRV_ERROR SGXGetBuildInfoKM(struct PVRSRV_SGXDEV_INFO *psDevInfo,
++ struct PVRSRV_DEVICE_NODE *psDeviceNode);
++
++static void SGXCommandComplete(struct PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ if (OSInLISR(psDeviceNode->psSysData))
++ psDeviceNode->bReProcessDeviceCommandComplete = IMG_TRUE;
++ else
++ SGXScheduleProcessQueuesKM(psDeviceNode);
++}
++
++static u32 DeinitDevInfo(struct PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++ if (psDevInfo->psKernelCCBInfo != NULL)
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(struct PVRSRV_SGX_CCB_INFO),
++ psDevInfo->psKernelCCBInfo, NULL);
++
++ return PVRSRV_OK;
++}
++
++static enum PVRSRV_ERROR InitDevInfo(struct PVRSRV_PER_PROCESS_DATA *psPerProc,
++ struct PVRSRV_DEVICE_NODE *psDeviceNode,
++ struct SGX_BRIDGE_INIT_INFO *psInitInfo)
++{
++ struct PVRSRV_SGXDEV_INFO *psDevInfo =
++ (struct PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
++ enum PVRSRV_ERROR eError;
++
++ struct PVRSRV_SGX_CCB_INFO *psKernelCCBInfo = NULL;
++
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++ psDevInfo->sScripts = psInitInfo->sScripts;
++
++ psDevInfo->psKernelCCBMemInfo =
++ (struct PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelCCBMemInfo;
++ psDevInfo->psKernelCCB =
++ (struct PVRSRV_SGX_KERNEL_CCB *)psDevInfo->psKernelCCBMemInfo->
++ pvLinAddrKM;
++
++ psDevInfo->psKernelCCBCtlMemInfo =
++ (struct PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelCCBCtlMemInfo;
++ psDevInfo->psKernelCCBCtl =
++ (struct PVRSRV_SGX_CCB_CTL *)psDevInfo->psKernelCCBCtlMemInfo->
++ pvLinAddrKM;
++
++ psDevInfo->psKernelCCBEventKickerMemInfo =
++ (struct PVRSRV_KERNEL_MEM_INFO *)
++ psInitInfo->hKernelCCBEventKickerMemInfo;
++ psDevInfo->pui32KernelCCBEventKicker =
++ (u32 *)psDevInfo->psKernelCCBEventKickerMemInfo->pvLinAddrKM;
++
++ psDevInfo->psKernelSGXHostCtlMemInfo =
++ (struct PVRSRV_KERNEL_MEM_INFO *)psInitInfo->
++ hKernelSGXHostCtlMemInfo;
++ psDevInfo->psSGXHostCtl = (struct SGXMKIF_HOST_CTL __force __iomem *)
++ psDevInfo->psKernelSGXHostCtlMemInfo->pvLinAddrKM;
++
++ psDevInfo->psKernelSGXTA3DCtlMemInfo =
++ (struct PVRSRV_KERNEL_MEM_INFO *)psInitInfo->
++ hKernelSGXTA3DCtlMemInfo;
++
++ psDevInfo->psKernelSGXMiscMemInfo =
++ (struct PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelSGXMiscMemInfo;
++
++ psDevInfo->psKernelHWPerfCBMemInfo =
++ (struct PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelHWPerfCBMemInfo;
++#ifdef PVRSRV_USSE_EDM_STATUS_DEBUG
++ psDevInfo->psKernelEDMStatusBufferMemInfo =
++ (struct PVRSRV_KERNEL_MEM_INFO *)psInitInfo->
++ hKernelEDMStatusBufferMemInfo;
++#endif
++
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(struct PVRSRV_SGX_CCB_INFO),
++ (void **)&psKernelCCBInfo, NULL);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "InitDevInfo: Failed to alloc memory");
++ goto failed_allockernelccb;
++ }
++
++ OSMemSet(psKernelCCBInfo, 0, sizeof(struct PVRSRV_SGX_CCB_INFO));
++ psKernelCCBInfo->psCCBMemInfo = psDevInfo->psKernelCCBMemInfo;
++ psKernelCCBInfo->psCCBCtlMemInfo = psDevInfo->psKernelCCBCtlMemInfo;
++ psKernelCCBInfo->psCommands = psDevInfo->psKernelCCB->asCommands;
++ psKernelCCBInfo->pui32WriteOffset =
++ &psDevInfo->psKernelCCBCtl->ui32WriteOffset;
++ psKernelCCBInfo->pui32ReadOffset =
++ &psDevInfo->psKernelCCBCtl->ui32ReadOffset;
++ psDevInfo->psKernelCCBInfo = psKernelCCBInfo;
++
++ psDevInfo->ui32HostKickAddress = psInitInfo->ui32HostKickAddress;
++
++ psDevInfo->ui32GetMiscInfoAddress = psInitInfo->ui32GetMiscInfoAddress;
++
++ psDevInfo->bForcePTOff = IMG_FALSE;
++
++ psDevInfo->ui32CacheControl = psInitInfo->ui32CacheControl;
++
++ psDevInfo->ui32EDMTaskReg0 = psInitInfo->ui32EDMTaskReg0;
++ psDevInfo->ui32EDMTaskReg1 = psInitInfo->ui32EDMTaskReg1;
++ psDevInfo->ui32ClkGateStatusReg = psInitInfo->ui32ClkGateStatusReg;
++ psDevInfo->ui32ClkGateStatusMask = psInitInfo->ui32ClkGateStatusMask;
++
++ OSMemCopy(&psDevInfo->asSGXDevData, &psInitInfo->asInitDevData,
++ sizeof(psDevInfo->asSGXDevData));
++
++ return PVRSRV_OK;
++
++failed_allockernelccb:
++ DeinitDevInfo(psDevInfo);
++
++ return eError;
++}
++
++static enum PVRSRV_ERROR SGXRunScript(struct PVRSRV_SGXDEV_INFO *psDevInfo,
++ union SGX_INIT_COMMAND *psScript,
++ u32 ui32NumInitCommands)
++{
++ u32 ui32PC;
++ union SGX_INIT_COMMAND *psComm;
++
++ for (ui32PC = 0, psComm = psScript;
++ ui32PC < ui32NumInitCommands; ui32PC++, psComm++) {
++ switch (psComm->eOp) {
++ case SGX_INIT_OP_WRITE_HW_REG:
++ {
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM,
++ psComm->sWriteHWReg.ui32Offset,
++ psComm->sWriteHWReg.ui32Value);
++ PDUMPREG(psComm->sWriteHWReg.ui32Offset,
++ psComm->sWriteHWReg.ui32Value);
++ break;
++ }
++#if defined(PDUMP)
++ case SGX_INIT_OP_PDUMP_HW_REG:
++ {
++ PDUMPREG(psComm->sPDumpHWReg.ui32Offset,
++ psComm->sPDumpHWReg.ui32Value);
++ break;
++ }
++#endif
++ case SGX_INIT_OP_HALT:
++ {
++ return PVRSRV_OK;
++ }
++ case SGX_INIT_OP_ILLEGAL:
++
++ default:
++ {
++ PVR_DPF(PVR_DBG_ERROR,
++ "SGXRunScript: PC %d: Illegal command: %d",
++ ui32PC, psComm->eOp);
++ return PVRSRV_ERROR_GENERIC;
++ }
++ }
++
++ }
++
++ return PVRSRV_ERROR_GENERIC;
++}
++
++enum PVRSRV_ERROR SGXInitialise(struct PVRSRV_SGXDEV_INFO *psDevInfo,
++ IMG_BOOL bHardwareRecovery)
++{
++ enum PVRSRV_ERROR eError;
++
++ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
++ "SGX initialisation script part 1\n");
++ eError =
++ SGXRunScript(psDevInfo, psDevInfo->sScripts.asInitCommandsPart1,
++ SGX_MAX_INIT_COMMANDS);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "SGXInitialise: SGXRunScript (part 1) failed (%d)",
++ eError);
++ return PVRSRV_ERROR_GENERIC;
++ }
++ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
++ "End of SGX initialisation script part 1\n");
++
++ SGXReset(psDevInfo, PDUMP_FLAGS_CONTINUOUS);
++
++
++
++ *psDevInfo->pui32KernelCCBEventKicker = 0;
++#if defined(PDUMP)
++ PDUMPMEM(NULL, psDevInfo->psKernelCCBEventKickerMemInfo, 0,
++ sizeof(*psDevInfo->pui32KernelCCBEventKicker),
++ PDUMP_FLAGS_CONTINUOUS,
++ MAKEUNIQUETAG(psDevInfo->psKernelCCBEventKickerMemInfo));
++#endif
++
++ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
++ "SGX initialisation script part 2\n");
++ eError =
++ SGXRunScript(psDevInfo, psDevInfo->sScripts.asInitCommandsPart2,
++ SGX_MAX_INIT_COMMANDS);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "SGXInitialise: SGXRunScript (part 2) failed (%d)",
++ eError);
++ return PVRSRV_ERROR_GENERIC;
++ }
++ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
++ "End of SGX initialisation script part 2\n");
++
++ SGXStartTimer(psDevInfo, (IMG_BOOL)!bHardwareRecovery);
++
++ if (bHardwareRecovery) {
++ struct SGXMKIF_HOST_CTL __iomem *psSGXHostCtl =
++ psDevInfo->psSGXHostCtl;
++
++ if (PollForValueKM(&psSGXHostCtl->ui32InterruptClearFlags, 0,
++ PVRSRV_USSE_EDM_INTERRUPT_HWR,
++ MAX_HW_TIME_US / WAIT_TRY_COUNT, 1000) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "SGXInitialise: "
++ "Wait for uKernel HW Recovery failed");
++ PVR_DBG_BREAK;
++ return PVRSRV_ERROR_RETRY;
++ }
++ }
++
++ PVR_ASSERT(psDevInfo->psKernelCCBCtl->ui32ReadOffset ==
++ psDevInfo->psKernelCCBCtl->ui32WriteOffset);
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR SGXDeinitialise(void *hDevCookie)
++{
++ struct PVRSRV_SGXDEV_INFO *psDevInfo = (struct PVRSRV_SGXDEV_INFO *)
++ hDevCookie;
++ enum PVRSRV_ERROR eError;
++
++ if (psDevInfo->pvRegsBaseKM == NULL)
++ return PVRSRV_OK;
++
++ eError = SGXRunScript(psDevInfo, psDevInfo->sScripts.asDeinitCommands,
++ SGX_MAX_DEINIT_COMMANDS);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "SGXDeinitialise: SGXRunScript failed (%d)", eError);
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ return PVRSRV_OK;
++}
++
++static enum PVRSRV_ERROR DevInitSGXPart1(void *pvDeviceNode)
++{
++ struct PVRSRV_SGXDEV_INFO *psDevInfo;
++ void *hKernelDevMemContext;
++ struct IMG_DEV_PHYADDR sPDDevPAddr;
++ u32 i;
++ struct PVRSRV_DEVICE_NODE *psDeviceNode = (struct PVRSRV_DEVICE_NODE *)
++ pvDeviceNode;
++ struct DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap =
++ psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeap;
++ enum PVRSRV_ERROR eError;
++
++ PDUMPCOMMENT("SGX Initialisation Part 1");
++
++ PDUMPCOMMENT("SGX Core Version Information: %s",
++ SGX_CORE_FRIENDLY_NAME);
++ PDUMPCOMMENT("SGX Core Revision Information: multi rev support");
++
++ if (OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(struct PVRSRV_SGXDEV_INFO),
++ (void **)&psDevInfo, NULL) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "DevInitSGXPart1 : Failed to alloc memory for DevInfo");
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ OSMemSet(psDevInfo, 0, sizeof(struct PVRSRV_SGXDEV_INFO));
++
++ psDevInfo->eDeviceType = DEV_DEVICE_TYPE;
++ psDevInfo->eDeviceClass = DEV_DEVICE_CLASS;
++
++ psDeviceNode->pvDevice = (void *) psDevInfo;
++
++ psDevInfo->pvDeviceMemoryHeap = (void *) psDeviceMemoryHeap;
++
++ hKernelDevMemContext = BM_CreateContext(psDeviceNode, &sPDDevPAddr,
++ NULL, NULL);
++
++ psDevInfo->sKernelPDDevPAddr = sPDDevPAddr;
++
++ for (i = 0; i < psDeviceNode->sDevMemoryInfo.ui32HeapCount; i++) {
++ void *hDevMemHeap;
++
++ switch (psDeviceMemoryHeap[i].DevMemHeapType) {
++ case DEVICE_MEMORY_HEAP_KERNEL:
++ case DEVICE_MEMORY_HEAP_SHARED:
++ case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
++ {
++ hDevMemHeap =
++ BM_CreateHeap(hKernelDevMemContext,
++ &psDeviceMemoryHeap[i]);
++
++ psDeviceMemoryHeap[i].hDevMemHeap = hDevMemHeap;
++ break;
++ }
++ }
++ }
++
++ eError = MMU_BIFResetPDAlloc(psDevInfo);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "DevInitSGX : Failed to alloc memory for BIF reset");
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR SGXGetInfoForSrvinitKM(void *hDevHandle,
++ struct SGX_BRIDGE_INFO_FOR_SRVINIT *psInitInfo)
++{
++ struct PVRSRV_DEVICE_NODE *psDeviceNode;
++ struct PVRSRV_SGXDEV_INFO *psDevInfo;
++ enum PVRSRV_ERROR eError;
++
++ PDUMPCOMMENT("SGXGetInfoForSrvinit");
++
++ psDeviceNode = (struct PVRSRV_DEVICE_NODE *)hDevHandle;
++ psDevInfo = (struct PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
++
++ psInitInfo->sPDDevPAddr = psDevInfo->sKernelPDDevPAddr;
++
++ eError =
++ PVRSRVGetDeviceMemHeapsKM(hDevHandle, &psInitInfo->asHeapInfo[0]);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "SGXGetInfoForSrvinit: "
++ "PVRSRVGetDeviceMemHeapsKM failed (%d)",
++ eError);
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ return eError;
++}
++
++enum PVRSRV_ERROR DevInitSGXPart2KM(struct PVRSRV_PER_PROCESS_DATA *psPerProc,
++ void *hDevHandle,
++ struct SGX_BRIDGE_INIT_INFO *psInitInfo)
++{
++ struct PVRSRV_DEVICE_NODE *psDeviceNode;
++ struct PVRSRV_SGXDEV_INFO *psDevInfo;
++ enum PVRSRV_ERROR eError;
++ struct SGX_DEVICE_MAP *psSGXDeviceMap;
++ enum PVR_POWER_STATE eDefaultPowerState;
++ u32 l;
++
++ PDUMPCOMMENT("SGX Initialisation Part 2");
++
++ psDeviceNode = (struct PVRSRV_DEVICE_NODE *)hDevHandle;
++ psDevInfo = (struct PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
++
++ eError = InitDevInfo(psPerProc, psDeviceNode, psInitInfo);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "DevInitSGXPart2KM: "
++ "Failed to load EDM program");
++ goto failed_init_dev_info;
++ }
++
++
++ eError = SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE_SGX,
++ (void **) &psSGXDeviceMap);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "DevInitSGXPart2KM: "
++ "Failed to get device memory map!");
++ return PVRSRV_ERROR_INIT_FAILURE;
++ }
++
++ if (psSGXDeviceMap->pvRegsCpuVBase) {
++ psDevInfo->pvRegsBaseKM = psSGXDeviceMap->pvRegsCpuVBase;
++ } else {
++ psDevInfo->pvRegsBaseKM =
++ OSMapPhysToLin(psSGXDeviceMap->sRegsCpuPBase,
++ psSGXDeviceMap->ui32RegsSize,
++ PVRSRV_HAP_KERNEL_ONLY | PVRSRV_HAP_UNCACHED,
++ NULL);
++ if (!psDevInfo->pvRegsBaseKM) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "DevInitSGXPart2KM: Failed to map in regs\n");
++ return PVRSRV_ERROR_BAD_MAPPING;
++ }
++ }
++ psDevInfo->ui32RegSize = psSGXDeviceMap->ui32RegsSize;
++ psDevInfo->sRegsPhysBase = psSGXDeviceMap->sRegsSysPBase;
++
++ psDeviceNode->pvISRData = psDeviceNode;
++
++ PVR_ASSERT(psDeviceNode->pfnDeviceISR == SGX_ISRHandler);
++
++ l = readl(&psDevInfo->psSGXHostCtl->ui32PowerStatus);
++ l |= PVRSRV_USSE_EDM_POWMAN_NO_WORK;
++ writel(l, &psDevInfo->psSGXHostCtl->ui32PowerStatus);
++ eDefaultPowerState = PVRSRV_POWER_STATE_D3;
++
++ eError = PVRSRVRegisterPowerDevice(psDeviceNode->sDevId.ui32DeviceIndex,
++ SGXPrePowerStateExt,
++ SGXPostPowerStateExt,
++ SGXPreClockSpeedChange,
++ SGXPostClockSpeedChange,
++ (void *) psDeviceNode,
++ PVRSRV_POWER_STATE_D3,
++ eDefaultPowerState);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "DevInitSGXPart2KM: "
++ "failed to register device with power manager");
++ return eError;
++ }
++
++ OSMemSet(psDevInfo->psKernelCCB, 0,
++ sizeof(struct PVRSRV_SGX_KERNEL_CCB));
++ OSMemSet(psDevInfo->psKernelCCBCtl, 0,
++ sizeof(struct PVRSRV_SGX_CCB_CTL));
++ OSMemSet(psDevInfo->pui32KernelCCBEventKicker, 0,
++ sizeof(*psDevInfo->pui32KernelCCBEventKicker));
++ PDUMPCOMMENT("Initialise Kernel CCB");
++ PDUMPMEM(NULL, psDevInfo->psKernelCCBMemInfo, 0,
++ sizeof(struct PVRSRV_SGX_KERNEL_CCB), PDUMP_FLAGS_CONTINUOUS,
++ MAKEUNIQUETAG(psDevInfo->psKernelCCBMemInfo));
++ PDUMPCOMMENT("Initialise Kernel CCB Control");
++ PDUMPMEM(NULL, psDevInfo->psKernelCCBCtlMemInfo, 0,
++ sizeof(struct PVRSRV_SGX_CCB_CTL), PDUMP_FLAGS_CONTINUOUS,
++ MAKEUNIQUETAG(psDevInfo->psKernelCCBCtlMemInfo));
++ PDUMPCOMMENT("Initialise Kernel CCB Event Kicker");
++ PDUMPMEM(NULL, psDevInfo->psKernelCCBEventKickerMemInfo, 0,
++ sizeof(*psDevInfo->pui32KernelCCBEventKicker),
++ PDUMP_FLAGS_CONTINUOUS,
++ MAKEUNIQUETAG(psDevInfo->psKernelCCBEventKickerMemInfo));
++
++ psDevInfo->hTimer = SGXOSTimerInit(psDeviceNode);
++ if (!psDevInfo->hTimer)
++ PVR_DPF(PVR_DBG_ERROR, "DevInitSGXPart2KM : "
++ "Failed to initialize HW recovery timer");
++
++ return PVRSRV_OK;
++
++failed_init_dev_info:
++ return eError;
++}
++
++static enum PVRSRV_ERROR DevDeInitSGX(void *pvDeviceNode)
++{
++ struct PVRSRV_DEVICE_NODE *psDeviceNode =
++ (struct PVRSRV_DEVICE_NODE *)pvDeviceNode;
++ struct PVRSRV_SGXDEV_INFO *psDevInfo =
++ (struct PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
++ enum PVRSRV_ERROR eError;
++ u32 ui32Heap;
++ struct DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++ struct SGX_DEVICE_MAP *psSGXDeviceMap;
++
++ if (!psDevInfo) {
++ PVR_DPF(PVR_DBG_ERROR, "DevDeInitSGX: Null DevInfo");
++ return PVRSRV_OK;
++ }
++ if (psDevInfo->hTimer) {
++ SGXOSTimerCancel(psDevInfo->hTimer);
++ SGXOSTimerDeInit(psDevInfo->hTimer);
++ psDevInfo->hTimer = NULL;
++ }
++
++ MMU_BIFResetPDFree(psDevInfo);
++
++ DeinitDevInfo(psDevInfo);
++
++
++ psDeviceMemoryHeap =
++ (struct DEVICE_MEMORY_HEAP_INFO *)psDevInfo->pvDeviceMemoryHeap;
++ for (ui32Heap = 0;
++ ui32Heap < psDeviceNode->sDevMemoryInfo.ui32HeapCount;
++ ui32Heap++) {
++ switch (psDeviceMemoryHeap[ui32Heap].DevMemHeapType) {
++ case DEVICE_MEMORY_HEAP_KERNEL:
++ case DEVICE_MEMORY_HEAP_SHARED:
++ case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
++ {
++ if (psDeviceMemoryHeap[ui32Heap].hDevMemHeap !=
++ NULL)
++ BM_DestroyHeap(psDeviceMemoryHeap
++ [ui32Heap].hDevMemHeap);
++ break;
++ }
++ }
++ }
++
++ if (!pvr_put_ctx(psDeviceNode->sDevMemoryInfo.pBMKernelContext))
++ pr_err("%s: kernel context still in use, can't free it",
++ __func__);
++
++ eError = PVRSRVRemovePowerDevice(
++ ((struct PVRSRV_DEVICE_NODE *)pvDeviceNode)->
++ sDevId.ui32DeviceIndex);
++ if (eError != PVRSRV_OK)
++ return eError;
++
++ eError = SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE_SGX,
++ (void **)&psSGXDeviceMap);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "DevDeInitSGX: Failed to get device memory map!");
++ return eError;
++ }
++
++ if (!psSGXDeviceMap->pvRegsCpuVBase)
++ if (psDevInfo->pvRegsBaseKM != NULL)
++ OSUnMapPhysToLin(psDevInfo->pvRegsBaseKM,
++ psDevInfo->ui32RegSize,
++ PVRSRV_HAP_KERNEL_ONLY |
++ PVRSRV_HAP_UNCACHED,
++ NULL);
++
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(struct PVRSRV_SGXDEV_INFO), psDevInfo, NULL);
++
++ psDeviceNode->pvDevice = NULL;
++
++ if (psDeviceMemoryHeap != NULL)
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(struct DEVICE_MEMORY_HEAP_INFO) *
++ psDeviceNode->sDevMemoryInfo.ui32HeapCount,
++ psDeviceMemoryHeap, NULL);
++
++ return PVRSRV_OK;
++}
++
++#ifdef PVRSRV_USSE_EDM_STATUS_DEBUG
++
++#define SGXMK_TRACE_BUFFER_SIZE 512
++
++static void dump_edm(struct PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++ u32 *trace_buffer =
++ psDevInfo->psKernelEDMStatusBufferMemInfo->pvLinAddrKM;
++ u32 last_code, write_offset;
++ int i;
++
++ last_code = *trace_buffer;
++ trace_buffer++;
++ write_offset = *trace_buffer;
++
++ pr_err("Last SGX microkernel status code: 0x%x\n", last_code);
++
++ trace_buffer++;
++ /* Dump the status values */
++
++ for (i = 0; i < SGXMK_TRACE_BUFFER_SIZE; i++) {
++ u32 *buf;
++ buf = trace_buffer + (((write_offset + i) %
++ SGXMK_TRACE_BUFFER_SIZE) * 4);
++ pr_err("(MKT%u) %8.8X %8.8X %8.8X %8.8X\n", i,
++ buf[2], buf[3], buf[1], buf[0]);
++ }
++}
++#else
++static void dump_edm(struct PVRSRV_SGXDEV_INFO *psDevInfo) {}
++#endif
++
++static void dump_sgx_registers(struct PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++ pr_err("EVENT_STATUS = 0x%08X\n"
++ "EVENT_STATUS2 = 0x%08X\n"
++ "BIF_CTRL = 0x%08X\n"
++ "BIF_INT_STAT = 0x%08X\n"
++ "BIF_MEM_REQ_STAT = 0x%08X\n"
++ "BIF_FAULT = 0x%08X\n"
++ "CLKGATECTL = 0x%08X\n",
++ readl(psDevInfo->pvRegsBaseKM + EUR_CR_EVENT_STATUS),
++ readl(psDevInfo->pvRegsBaseKM + EUR_CR_EVENT_STATUS2),
++ readl(psDevInfo->pvRegsBaseKM + EUR_CR_BIF_CTRL),
++ readl(psDevInfo->pvRegsBaseKM + EUR_CR_BIF_INT_STAT),
++ readl(psDevInfo->pvRegsBaseKM + EUR_CR_BIF_MEM_REQ_STAT),
++ readl(psDevInfo->pvRegsBaseKM + EUR_CR_BIF_FAULT),
++ readl(psDevInfo->pvRegsBaseKM + EUR_CR_CLKGATECTL));
++}
++
++
++void HWRecoveryResetSGX(struct PVRSRV_DEVICE_NODE *psDeviceNode,
++ u32 ui32Component, u32 ui32CallerID)
++{
++ enum PVRSRV_ERROR eError;
++ struct PVRSRV_SGXDEV_INFO *psDevInfo =
++ (struct PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
++ struct SGXMKIF_HOST_CTL __iomem *psSGXHostCtl =
++ psDevInfo->psSGXHostCtl;
++ u32 l;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Component);
++
++ /* SGXOSTimer already has the lock as it needs to read SGX registers */
++ if (ui32CallerID != TIMER_ID) {
++ eError = PVRSRVPowerLock(ui32CallerID, IMG_FALSE);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_WARNING, "HWRecoveryResetSGX: "
++ "Power transition in progress");
++ return;
++ }
++ }
++
++ l = readl(&psSGXHostCtl->ui32InterruptClearFlags);
++ l |= PVRSRV_USSE_EDM_INTERRUPT_HWR;
++ writel(l, &psSGXHostCtl->ui32InterruptClearFlags);
++
++ pr_err("HWRecoveryResetSGX: SGX Hardware Recovery triggered\n");
++
++ dump_sgx_registers(psDevInfo);
++ dump_edm(psDevInfo);
++
++ PDUMPSUSPEND();
++
++ do {
++ eError = SGXInitialise(psDevInfo, IMG_TRUE);
++ } while (eError == PVRSRV_ERROR_RETRY);
++ if (eError != PVRSRV_OK)
++ PVR_DPF(PVR_DBG_ERROR,
++ "HWRecoveryResetSGX: SGXInitialise failed (%d)",
++ eError);
++
++ PDUMPRESUME();
++
++ PVRSRVPowerUnlock(ui32CallerID);
++
++ SGXScheduleProcessQueuesKM(psDeviceNode);
++
++ PVRSRVProcessQueues(ui32CallerID, IMG_TRUE);
++}
++
++static unsigned long sgx_reset_forced;
++
++static void SGXOSTimer(struct work_struct *work)
++{
++ struct timer_work_data *data = container_of(work,
++ struct timer_work_data,
++ work.work);
++ struct PVRSRV_DEVICE_NODE *psDeviceNode = data->psDeviceNode;
++ struct PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++ static u32 ui32EDMTasks;
++ static u32 ui32LockupCounter;
++ static u32 ui32NumResets;
++ u32 ui32CurrentEDMTasks;
++ IMG_BOOL bLockup = IMG_FALSE;
++ IMG_BOOL bPoweredDown;
++ enum PVRSRV_ERROR eError;
++
++ psDevInfo->ui32TimeStamp++;
++
++ eError = PVRSRVPowerLock(TIMER_ID, IMG_FALSE);
++ if (eError != PVRSRV_OK) {
++ /*
++ * If a power transition is in progress then we're not really
++ * sure what the state of world is going to be after, so we
++ * just "pause" HW recovery and hopefully next time around we
++ * get the lock and can decide what to do
++ */
++ goto rearm;
++ }
++
++#if defined(NO_HARDWARE)
++ bPoweredDown = IMG_TRUE;
++#else
++ bPoweredDown = (IMG_BOOL) !SGXIsDevicePowered(psDeviceNode);
++#endif
++
++ if (bPoweredDown) {
++ ui32LockupCounter = 0;
++ } else {
++ ui32CurrentEDMTasks = OSReadHWReg(psDevInfo->pvRegsBaseKM,
++ psDevInfo->ui32EDMTaskReg0);
++ if (psDevInfo->ui32EDMTaskReg1 != 0)
++ ui32CurrentEDMTasks ^=
++ OSReadHWReg(psDevInfo->pvRegsBaseKM,
++ psDevInfo->ui32EDMTaskReg1);
++ if ((ui32CurrentEDMTasks == ui32EDMTasks) &&
++ (psDevInfo->ui32NumResets == ui32NumResets)) {
++ ui32LockupCounter++;
++ if (ui32LockupCounter == 3) {
++ ui32LockupCounter = 0;
++ PVR_DPF(PVR_DBG_ERROR, "SGXOSTimer() "
++ "detected SGX lockup (0x%x tasks)",
++ ui32EDMTasks);
++
++ bLockup = IMG_TRUE;
++ }
++ } else {
++ ui32LockupCounter = 0;
++ ui32EDMTasks = ui32CurrentEDMTasks;
++ ui32NumResets = psDevInfo->ui32NumResets;
++ }
++ }
++
++ bLockup |= cmpxchg(&sgx_reset_forced, 1, 0);
++
++ if (bLockup) {
++ struct SGXMKIF_HOST_CTL __iomem *psSGXHostCtl =
++ psDevInfo->psSGXHostCtl;
++ u32 l;
++
++ l = readl(&psSGXHostCtl->ui32HostDetectedLockups);
++ l++;
++ writel(l, &psSGXHostCtl->ui32HostDetectedLockups);
++
++ /* Note: This will release the lock when done */
++ HWRecoveryResetSGX(psDeviceNode, 0, TIMER_ID);
++ } else
++ PVRSRVPowerUnlock(TIMER_ID);
++
++ rearm:
++ queue_delayed_work(data->work_queue, &data->work,
++ msecs_to_jiffies(data->interval));
++}
++
++struct timer_work_data *
++SGXOSTimerInit(struct PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ struct timer_work_data *data;
++
++ data = kzalloc(sizeof(struct timer_work_data), GFP_KERNEL);
++ if (!data)
++ return NULL;
++
++ data->work_queue = create_workqueue("SGXOSTimer");
++ if (!data->work_queue) {
++ kfree(data);
++ return NULL;
++ }
++
++ data->interval = 150;
++ data->psDeviceNode = psDeviceNode;
++ INIT_DELAYED_WORK(&data->work, SGXOSTimer);
++
++ return data;
++}
++
++void SGXOSTimerDeInit(struct timer_work_data *data)
++{
++ destroy_workqueue(data->work_queue);
++ kfree(data);
++}
++
++enum PVRSRV_ERROR SGXOSTimerEnable(struct timer_work_data *data)
++{
++ if (!data)
++ return PVRSRV_ERROR_GENERIC;
++
++ if (queue_delayed_work(data->work_queue, &data->work,
++ msecs_to_jiffies(data->interval))) {
++ data->armed = true;
++ return PVRSRV_OK;
++ }
++
++ return PVRSRV_ERROR_GENERIC;
++}
++
++enum PVRSRV_ERROR SGXOSTimerCancel(struct timer_work_data *data)
++{
++ if (!data)
++ return PVRSRV_ERROR_GENERIC;
++
++ cancel_delayed_work_sync(&data->work);
++ data->armed = false;
++
++ return PVRSRV_OK;
++}
++
++int sgx_force_reset(void)
++{
++ return !cmpxchg(&sgx_reset_forced, 0, 1);
++}
++
++static IMG_BOOL SGX_ISRHandler(void *pvData)
++{
++ IMG_BOOL bInterruptProcessed = IMG_FALSE;
++
++ {
++ u32 ui32EventStatus, ui32EventEnable;
++ u32 ui32EventClear = 0;
++ struct PVRSRV_DEVICE_NODE *psDeviceNode;
++ struct PVRSRV_SGXDEV_INFO *psDevInfo;
++
++ if (pvData == NULL) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "SGX_ISRHandler: Invalid params\n");
++ return bInterruptProcessed;
++ }
++
++ psDeviceNode = (struct PVRSRV_DEVICE_NODE *)pvData;
++ psDevInfo = (struct PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
++
++ ui32EventStatus =
++ OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS);
++ ui32EventEnable = OSReadHWReg(psDevInfo->pvRegsBaseKM,
++ EUR_CR_EVENT_HOST_ENABLE);
++
++ gui32EventStatusServicesByISR = ui32EventStatus;
++
++ ui32EventStatus &= ui32EventEnable;
++
++ if (ui32EventStatus & EUR_CR_EVENT_STATUS_SW_EVENT_MASK)
++ ui32EventClear |= EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_MASK;
++
++ if (ui32EventClear) {
++ bInterruptProcessed = IMG_TRUE;
++
++ ui32EventClear |=
++ EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_MASK;
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM,
++ EUR_CR_EVENT_HOST_CLEAR, ui32EventClear);
++ }
++ }
++
++ return bInterruptProcessed;
++}
++
++static void SGX_MISRHandler(void *pvData)
++{
++ struct PVRSRV_DEVICE_NODE *psDeviceNode =
++ (struct PVRSRV_DEVICE_NODE *)pvData;
++ struct PVRSRV_SGXDEV_INFO *psDevInfo =
++ (struct PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
++ struct SGXMKIF_HOST_CTL __iomem *psSGXHostCtl =
++ psDevInfo->psSGXHostCtl;
++ u32 l1, l2;
++
++ l1 = readl(&psSGXHostCtl->ui32InterruptFlags);
++ l2 = readl(&psSGXHostCtl->ui32InterruptClearFlags);
++ if ((l1 & PVRSRV_USSE_EDM_INTERRUPT_HWR) &&
++ !(l2 & PVRSRV_USSE_EDM_INTERRUPT_HWR))
++ HWRecoveryResetSGX(psDeviceNode, 0, ISR_ID);
++
++ if (psDeviceNode->bReProcessDeviceCommandComplete)
++ SGXScheduleProcessQueuesKM(psDeviceNode);
++
++ SGXTestActivePowerEvent(psDeviceNode, ISR_ID);
++}
++
++enum PVRSRV_ERROR SGXRegisterDevice(struct PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ struct DEVICE_MEMORY_INFO *psDevMemoryInfo;
++ struct DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++
++ psDeviceNode->sDevId.eDeviceType = DEV_DEVICE_TYPE;
++ psDeviceNode->sDevId.eDeviceClass = DEV_DEVICE_CLASS;
++
++ psDeviceNode->pfnInitDevice = DevInitSGXPart1;
++ psDeviceNode->pfnDeInitDevice = DevDeInitSGX;
++
++ psDeviceNode->pfnInitDeviceCompatCheck = SGXDevInitCompatCheck;
++
++ psDeviceNode->pfnMMUInitialise = MMU_Initialise;
++ psDeviceNode->pfnMMUFinalise = MMU_Finalise;
++ psDeviceNode->pfnMMUInsertHeap = MMU_InsertHeap;
++ psDeviceNode->pfnMMUCreate = MMU_Create;
++ psDeviceNode->pfnMMUDelete = MMU_Delete;
++ psDeviceNode->pfnMMUAlloc = MMU_Alloc;
++ psDeviceNode->pfnMMUFree = MMU_Free;
++ psDeviceNode->pfnMMUMapPages = MMU_MapPages;
++ psDeviceNode->pfnMMUMapShadow = MMU_MapShadow;
++ psDeviceNode->pfnMMUUnmapPages = MMU_UnmapPages;
++ psDeviceNode->pfnMMUMapScatter = MMU_MapScatter;
++ psDeviceNode->pfnMMUGetPhysPageAddr = MMU_GetPhysPageAddr;
++ psDeviceNode->pfnMMUGetPDDevPAddr = MMU_GetPDDevPAddr;
++
++ psDeviceNode->pfnDeviceISR = SGX_ISRHandler;
++ psDeviceNode->pfnDeviceMISR = SGX_MISRHandler;
++
++ psDeviceNode->pfnDeviceCommandComplete = SGXCommandComplete;
++
++ psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
++
++ psDevMemoryInfo->ui32AddressSpaceSizeLog2 =
++ SGX_FEATURE_ADDRESS_SPACE_SIZE;
++
++ psDevMemoryInfo->ui32Flags = 0;
++ psDevMemoryInfo->ui32HeapCount = SGX_MAX_HEAP_ID;
++ psDevMemoryInfo->ui32SyncHeapID = SGX_SYNCINFO_HEAP_ID;
++
++ psDevMemoryInfo->ui32MappingHeapID = SGX_GENERAL_HEAP_ID;
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(struct DEVICE_MEMORY_HEAP_INFO) *
++ psDevMemoryInfo->ui32HeapCount,
++ (void **) &psDevMemoryInfo->psDeviceMemoryHeap,
++ NULL) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "SGXRegisterDevice : "
++ "Failed to alloc memory for "
++ "struct DEVICE_MEMORY_HEAP_INFO");
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ OSMemSet(psDevMemoryInfo->psDeviceMemoryHeap, 0,
++ sizeof(struct DEVICE_MEMORY_HEAP_INFO) *
++ psDevMemoryInfo->ui32HeapCount);
++
++ psDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap;
++
++ psDeviceMemoryHeap[SGX_GENERAL_HEAP_ID].ui32HeapID =
++ HEAP_ID(PVRSRV_DEVICE_TYPE_SGX, SGX_GENERAL_HEAP_ID);
++ psDeviceMemoryHeap[SGX_GENERAL_HEAP_ID].sDevVAddrBase.uiAddr =
++ SGX_GENERAL_HEAP_BASE;
++ psDeviceMemoryHeap[SGX_GENERAL_HEAP_ID].ui32HeapSize =
++ SGX_GENERAL_HEAP_SIZE;
++ psDeviceMemoryHeap[SGX_GENERAL_HEAP_ID].ui32Attribs =
++ PVRSRV_HAP_WRITECOMBINE | PVRSRV_MEM_RAM_BACKED_ALLOCATION |
++ PVRSRV_HAP_SINGLE_PROCESS;
++ psDeviceMemoryHeap[SGX_GENERAL_HEAP_ID].pszName = "General";
++ psDeviceMemoryHeap[SGX_GENERAL_HEAP_ID].pszBSName = "General BS";
++ psDeviceMemoryHeap[SGX_GENERAL_HEAP_ID].DevMemHeapType =
++ DEVICE_MEMORY_HEAP_PERCONTEXT;
++
++ psDeviceMemoryHeap[SGX_GENERAL_HEAP_ID].ui32DataPageSize =
++ SGX_MMU_PAGE_SIZE;
++
++ psDeviceMemoryHeap[SGX_TADATA_HEAP_ID].ui32HeapID =
++ HEAP_ID(PVRSRV_DEVICE_TYPE_SGX, SGX_TADATA_HEAP_ID);
++ psDeviceMemoryHeap[SGX_TADATA_HEAP_ID].sDevVAddrBase.uiAddr =
++ SGX_TADATA_HEAP_BASE;
++ psDeviceMemoryHeap[SGX_TADATA_HEAP_ID].ui32HeapSize =
++ SGX_TADATA_HEAP_SIZE;
++ psDeviceMemoryHeap[SGX_TADATA_HEAP_ID].ui32Attribs =
++ PVRSRV_HAP_WRITECOMBINE | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++ | PVRSRV_HAP_MULTI_PROCESS;
++ psDeviceMemoryHeap[SGX_TADATA_HEAP_ID].pszName = "TA Data";
++ psDeviceMemoryHeap[SGX_TADATA_HEAP_ID].pszBSName = "TA Data BS";
++ psDeviceMemoryHeap[SGX_TADATA_HEAP_ID].DevMemHeapType =
++ DEVICE_MEMORY_HEAP_PERCONTEXT;
++
++ psDeviceMemoryHeap[SGX_TADATA_HEAP_ID].ui32DataPageSize =
++ SGX_MMU_PAGE_SIZE;
++
++ psDeviceMemoryHeap[SGX_KERNEL_CODE_HEAP_ID].ui32HeapID =
++ HEAP_ID(PVRSRV_DEVICE_TYPE_SGX, SGX_KERNEL_CODE_HEAP_ID);
++ psDeviceMemoryHeap[SGX_KERNEL_CODE_HEAP_ID].sDevVAddrBase.uiAddr =
++ SGX_KERNEL_CODE_HEAP_BASE;
++ psDeviceMemoryHeap[SGX_KERNEL_CODE_HEAP_ID].ui32HeapSize =
++ SGX_KERNEL_CODE_HEAP_SIZE;
++ psDeviceMemoryHeap[SGX_KERNEL_CODE_HEAP_ID].ui32Attribs =
++ PVRSRV_HAP_WRITECOMBINE | PVRSRV_MEM_RAM_BACKED_ALLOCATION |
++ PVRSRV_HAP_MULTI_PROCESS;
++ psDeviceMemoryHeap[SGX_KERNEL_CODE_HEAP_ID].pszName = "Kernel Code";
++ psDeviceMemoryHeap[SGX_KERNEL_CODE_HEAP_ID].pszBSName =
++ "Kernel Code BS";
++ psDeviceMemoryHeap[SGX_KERNEL_CODE_HEAP_ID].DevMemHeapType =
++ DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
++
++ psDeviceMemoryHeap[SGX_KERNEL_CODE_HEAP_ID].ui32DataPageSize =
++ SGX_MMU_PAGE_SIZE;
++
++ psDeviceMemoryHeap[SGX_KERNEL_DATA_HEAP_ID].ui32HeapID =
++ HEAP_ID(PVRSRV_DEVICE_TYPE_SGX, SGX_KERNEL_DATA_HEAP_ID);
++ psDeviceMemoryHeap[SGX_KERNEL_DATA_HEAP_ID].sDevVAddrBase.uiAddr =
++ SGX_KERNEL_DATA_HEAP_BASE;
++ psDeviceMemoryHeap[SGX_KERNEL_DATA_HEAP_ID].ui32HeapSize =
++ SGX_KERNEL_DATA_HEAP_SIZE;
++ psDeviceMemoryHeap[SGX_KERNEL_DATA_HEAP_ID].ui32Attribs =
++ PVRSRV_HAP_WRITECOMBINE | PVRSRV_MEM_RAM_BACKED_ALLOCATION |
++ PVRSRV_HAP_MULTI_PROCESS;
++ psDeviceMemoryHeap[SGX_KERNEL_DATA_HEAP_ID].pszName = "KernelData";
++ psDeviceMemoryHeap[SGX_KERNEL_DATA_HEAP_ID].pszBSName = "KernelData BS";
++ psDeviceMemoryHeap[SGX_KERNEL_DATA_HEAP_ID].DevMemHeapType =
++ DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
++
++ psDeviceMemoryHeap[SGX_KERNEL_DATA_HEAP_ID].ui32DataPageSize =
++ SGX_MMU_PAGE_SIZE;
++
++ psDeviceMemoryHeap[SGX_PIXELSHADER_HEAP_ID].ui32HeapID =
++ HEAP_ID(PVRSRV_DEVICE_TYPE_SGX, SGX_PIXELSHADER_HEAP_ID);
++ psDeviceMemoryHeap[SGX_PIXELSHADER_HEAP_ID].sDevVAddrBase.uiAddr =
++ SGX_PIXELSHADER_HEAP_BASE;
++ psDeviceMemoryHeap[SGX_PIXELSHADER_HEAP_ID].ui32HeapSize =
++ SGX_PIXELSHADER_HEAP_SIZE;
++ psDeviceMemoryHeap[SGX_PIXELSHADER_HEAP_ID].ui32Attribs =
++ PVRSRV_HAP_WRITECOMBINE | PVRSRV_MEM_RAM_BACKED_ALLOCATION |
++ PVRSRV_HAP_SINGLE_PROCESS;
++ psDeviceMemoryHeap[SGX_PIXELSHADER_HEAP_ID].pszName = "PixelShaderUSSE";
++ psDeviceMemoryHeap[SGX_PIXELSHADER_HEAP_ID].pszBSName =
++ "PixelShaderUSSE BS";
++ psDeviceMemoryHeap[SGX_PIXELSHADER_HEAP_ID].DevMemHeapType =
++ DEVICE_MEMORY_HEAP_PERCONTEXT;
++
++ psDeviceMemoryHeap[SGX_PIXELSHADER_HEAP_ID].ui32DataPageSize =
++ SGX_MMU_PAGE_SIZE;
++
++ psDeviceMemoryHeap[SGX_VERTEXSHADER_HEAP_ID].ui32HeapID =
++ HEAP_ID(PVRSRV_DEVICE_TYPE_SGX, SGX_VERTEXSHADER_HEAP_ID);
++ psDeviceMemoryHeap[SGX_VERTEXSHADER_HEAP_ID].sDevVAddrBase.uiAddr =
++ SGX_VERTEXSHADER_HEAP_BASE;
++ psDeviceMemoryHeap[SGX_VERTEXSHADER_HEAP_ID].ui32HeapSize =
++ SGX_VERTEXSHADER_HEAP_SIZE;
++ psDeviceMemoryHeap[SGX_VERTEXSHADER_HEAP_ID].ui32Attribs =
++ PVRSRV_HAP_WRITECOMBINE | PVRSRV_MEM_RAM_BACKED_ALLOCATION |
++ PVRSRV_HAP_SINGLE_PROCESS;
++ psDeviceMemoryHeap[SGX_VERTEXSHADER_HEAP_ID].pszName =
++ "VertexShaderUSSE";
++ psDeviceMemoryHeap[SGX_VERTEXSHADER_HEAP_ID].pszBSName =
++ "VertexShaderUSSE BS";
++ psDeviceMemoryHeap[SGX_VERTEXSHADER_HEAP_ID].DevMemHeapType =
++ DEVICE_MEMORY_HEAP_PERCONTEXT;
++
++ psDeviceMemoryHeap[SGX_VERTEXSHADER_HEAP_ID].ui32DataPageSize =
++ SGX_MMU_PAGE_SIZE;
++
++ psDeviceMemoryHeap[SGX_PDSPIXEL_CODEDATA_HEAP_ID].ui32HeapID =
++ HEAP_ID(PVRSRV_DEVICE_TYPE_SGX, SGX_PDSPIXEL_CODEDATA_HEAP_ID);
++ psDeviceMemoryHeap[SGX_PDSPIXEL_CODEDATA_HEAP_ID].sDevVAddrBase.uiAddr =
++ SGX_PDSPIXEL_CODEDATA_HEAP_BASE;
++ psDeviceMemoryHeap[SGX_PDSPIXEL_CODEDATA_HEAP_ID].ui32HeapSize =
++ SGX_PDSPIXEL_CODEDATA_HEAP_SIZE;
++ psDeviceMemoryHeap[SGX_PDSPIXEL_CODEDATA_HEAP_ID].ui32Attribs =
++ PVRSRV_HAP_WRITECOMBINE | PVRSRV_MEM_RAM_BACKED_ALLOCATION |
++ PVRSRV_HAP_SINGLE_PROCESS;
++ psDeviceMemoryHeap[SGX_PDSPIXEL_CODEDATA_HEAP_ID].pszName =
++ "PDSPixelCodeData";
++ psDeviceMemoryHeap[SGX_PDSPIXEL_CODEDATA_HEAP_ID].pszBSName =
++ "PDSPixelCodeData BS";
++ psDeviceMemoryHeap[SGX_PDSPIXEL_CODEDATA_HEAP_ID].DevMemHeapType =
++ DEVICE_MEMORY_HEAP_PERCONTEXT;
++
++ psDeviceMemoryHeap[SGX_PDSPIXEL_CODEDATA_HEAP_ID].ui32DataPageSize =
++ SGX_MMU_PAGE_SIZE;
++
++ psDeviceMemoryHeap[SGX_PDSVERTEX_CODEDATA_HEAP_ID].ui32HeapID =
++ HEAP_ID(PVRSRV_DEVICE_TYPE_SGX, SGX_PDSVERTEX_CODEDATA_HEAP_ID);
++ psDeviceMemoryHeap[SGX_PDSVERTEX_CODEDATA_HEAP_ID].sDevVAddrBase.
++ uiAddr = SGX_PDSVERTEX_CODEDATA_HEAP_BASE;
++ psDeviceMemoryHeap[SGX_PDSVERTEX_CODEDATA_HEAP_ID].ui32HeapSize =
++ SGX_PDSVERTEX_CODEDATA_HEAP_SIZE;
++ psDeviceMemoryHeap[SGX_PDSVERTEX_CODEDATA_HEAP_ID].ui32Attribs =
++ PVRSRV_HAP_WRITECOMBINE | PVRSRV_MEM_RAM_BACKED_ALLOCATION |
++ PVRSRV_HAP_SINGLE_PROCESS;
++ psDeviceMemoryHeap[SGX_PDSVERTEX_CODEDATA_HEAP_ID].pszName =
++ "PDSVertexCodeData";
++ psDeviceMemoryHeap[SGX_PDSVERTEX_CODEDATA_HEAP_ID].pszBSName =
++ "PDSVertexCodeData BS";
++ psDeviceMemoryHeap[SGX_PDSVERTEX_CODEDATA_HEAP_ID].DevMemHeapType =
++ DEVICE_MEMORY_HEAP_PERCONTEXT;
++
++ psDeviceMemoryHeap[SGX_PDSVERTEX_CODEDATA_HEAP_ID].ui32DataPageSize =
++ SGX_MMU_PAGE_SIZE;
++
++ psDeviceMemoryHeap[SGX_SYNCINFO_HEAP_ID].ui32HeapID =
++ HEAP_ID(PVRSRV_DEVICE_TYPE_SGX, SGX_SYNCINFO_HEAP_ID);
++ psDeviceMemoryHeap[SGX_SYNCINFO_HEAP_ID].sDevVAddrBase.uiAddr =
++ SGX_SYNCINFO_HEAP_BASE;
++ psDeviceMemoryHeap[SGX_SYNCINFO_HEAP_ID].ui32HeapSize =
++ SGX_SYNCINFO_HEAP_SIZE;
++
++ psDeviceMemoryHeap[SGX_SYNCINFO_HEAP_ID].ui32Attribs =
++ PVRSRV_HAP_WRITECOMBINE | PVRSRV_MEM_RAM_BACKED_ALLOCATION |
++ PVRSRV_HAP_MULTI_PROCESS;
++ psDeviceMemoryHeap[SGX_SYNCINFO_HEAP_ID].pszName = "CacheCoherent";
++ psDeviceMemoryHeap[SGX_SYNCINFO_HEAP_ID].pszBSName = "CacheCoherent BS";
++
++ psDeviceMemoryHeap[SGX_SYNCINFO_HEAP_ID].DevMemHeapType =
++ DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
++
++ psDeviceMemoryHeap[SGX_SYNCINFO_HEAP_ID].ui32DataPageSize =
++ SGX_MMU_PAGE_SIZE;
++
++ psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].ui32HeapID =
++ HEAP_ID(PVRSRV_DEVICE_TYPE_SGX, SGX_3DPARAMETERS_HEAP_ID);
++ psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].sDevVAddrBase.uiAddr =
++ SGX_3DPARAMETERS_HEAP_BASE;
++ psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].ui32HeapSize =
++ SGX_3DPARAMETERS_HEAP_SIZE;
++ psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].pszName = "3DParameters";
++ psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].pszBSName =
++ "3DParameters BS";
++ psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].ui32Attribs =
++ PVRSRV_HAP_WRITECOMBINE | PVRSRV_MEM_RAM_BACKED_ALLOCATION |
++ PVRSRV_HAP_SINGLE_PROCESS;
++ psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].DevMemHeapType =
++ DEVICE_MEMORY_HEAP_PERCONTEXT;
++
++ psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].ui32DataPageSize =
++ SGX_MMU_PAGE_SIZE;
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR SGXGetClientInfoKM(void *hDevCookie,
++ struct SGX_CLIENT_INFO *psClientInfo)
++{
++ struct PVRSRV_SGXDEV_INFO *psDevInfo =
++ (struct PVRSRV_SGXDEV_INFO *)
++ ((struct PVRSRV_DEVICE_NODE *)hDevCookie)->pvDevice;
++
++ psDevInfo->ui32ClientRefCount++;
++#ifdef PDUMP
++ if (psDevInfo->ui32ClientRefCount == 1)
++ psDevInfo->psKernelCCBInfo->ui32CCBDumpWOff = 0;
++#endif
++ psClientInfo->ui32ProcessID = OSGetCurrentProcessIDKM();
++
++ OSMemCopy(&psClientInfo->asDevData, &psDevInfo->asSGXDevData,
++ sizeof(psClientInfo->asDevData));
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR SGXDevInitCompatCheck(struct PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ struct PVRSRV_SGXDEV_INFO *psDevInfo;
++ struct PVRSRV_KERNEL_MEM_INFO *psMemInfo;
++ enum PVRSRV_ERROR eError;
++#if !defined(NO_HARDWARE)
++ u32 ui32BuildOptions, ui32BuildOptionsMismatch;
++ struct PVRSRV_SGX_MISCINFO_FEATURES *psSGXFeatures;
++#endif
++
++ if (psDeviceNode->sDevId.eDeviceType != PVRSRV_DEVICE_TYPE_SGX) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "SGXDevInitCompatCheck: Device not of type SGX");
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ goto exit;
++ }
++ psDevInfo = psDeviceNode->pvDevice;
++ psMemInfo = psDevInfo->psKernelSGXMiscMemInfo;
++
++#if !defined(NO_HARDWARE)
++
++ eError = SGXGetBuildInfoKM(psDevInfo, psDeviceNode);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "SGXDevInitCompatCheck: "
++ "Unable to validate device DDK version");
++ goto exit;
++ }
++ psSGXFeatures =
++ &((struct PVRSRV_SGX_MISCINFO_INFO *)(psMemInfo->pvLinAddrKM))->
++ sSGXFeatures;
++ if ((psSGXFeatures->ui32DDKVersion !=
++ ((PVRVERSION_MAJ << 16) | (PVRVERSION_MIN << 8) |
++ PVRVERSION_BRANCH)) ||
++ (psSGXFeatures->ui32DDKBuild != PVRVERSION_BUILD)) {
++ PVR_DPF(PVR_DBG_ERROR, "SGXDevInitCompatCheck: "
++ "Incompatible driver DDK revision (%ld)"
++ "/device DDK revision (%ld).",
++ PVRVERSION_BUILD, psSGXFeatures->ui32DDKBuild);
++ eError = PVRSRV_ERROR_DDK_VERSION_MISMATCH;
++ goto exit;
++ } else {
++ PVR_DPF(PVR_DBG_WARNING, "(Success) SGXInit: "
++ "driver DDK (%ld) and device DDK (%ld) match",
++ PVRVERSION_BUILD, psSGXFeatures->ui32DDKBuild);
++ }
++
++ ui32BuildOptions = psSGXFeatures->ui32BuildOptions;
++ if (ui32BuildOptions != (SGX_BUILD_OPTIONS)) {
++ ui32BuildOptionsMismatch =
++ ui32BuildOptions ^ (SGX_BUILD_OPTIONS);
++ if (((SGX_BUILD_OPTIONS) & ui32BuildOptionsMismatch) != 0)
++ PVR_DPF(PVR_DBG_ERROR, "SGXInit: "
++ "Mismatch in driver and microkernel build "
++ "options; extra options present in driver: "
++ "(0x%lx)",
++ (SGX_BUILD_OPTIONS) &
++ ui32BuildOptionsMismatch);
++
++ if ((ui32BuildOptions & ui32BuildOptionsMismatch) != 0)
++ PVR_DPF(PVR_DBG_ERROR, "SGXInit: "
++ "Mismatch in driver and microkernel build "
++ "options; extra options present in "
++ "microkernel: (0x%lx)",
++ ui32BuildOptions & ui32BuildOptionsMismatch);
++ eError = PVRSRV_ERROR_BUILD_MISMATCH;
++ goto exit;
++ } else {
++ PVR_DPF(PVR_DBG_WARNING, "(Success) SGXInit: "
++ "Driver and microkernel build options match.");
++ }
++
++#endif
++ eError = PVRSRV_OK;
++exit:
++ return eError;
++}
++
++static
++enum PVRSRV_ERROR SGXGetBuildInfoKM(struct PVRSRV_SGXDEV_INFO *psDevInfo,
++ struct PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ enum PVRSRV_ERROR eError;
++ struct SGXMKIF_COMMAND sCommandData;
++ struct PVRSRV_SGX_MISCINFO_INFO *psSGXMiscInfoInt;
++ struct PVRSRV_SGX_MISCINFO_FEATURES *psSGXFeatures;
++
++ struct PVRSRV_KERNEL_MEM_INFO *psMemInfo =
++ psDevInfo->psKernelSGXMiscMemInfo;
++
++ if (!psMemInfo->pvLinAddrKM) {
++ PVR_DPF(PVR_DBG_ERROR, "SGXGetMiscInfoKM: Invalid address.");
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++ psSGXMiscInfoInt = psMemInfo->pvLinAddrKM;
++ psSGXMiscInfoInt->ui32MiscInfoFlags &= ~PVRSRV_USSE_MISCINFO_READY;
++ psSGXFeatures = &psSGXMiscInfoInt->sSGXFeatures;
++
++ OSMemSet(psMemInfo->pvLinAddrKM, 0,
++ sizeof(struct PVRSRV_SGX_MISCINFO_INFO));
++
++ sCommandData.ui32Data[1] = psMemInfo->sDevVAddr.uiAddr;
++
++ OSMemSet(psSGXFeatures, 0, sizeof(*psSGXFeatures));
++
++ mb();
++
++ eError = SGXScheduleCCBCommandKM(psDeviceNode,
++ SGXMKIF_COMMAND_REQUEST_SGXMISCINFO,
++ &sCommandData, KERNEL_ID, 0);
++
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "SGXGetMiscInfoKM: SGXScheduleCCBCommandKM failed.");
++ return eError;
++ }
++
++#if !defined(NO_HARDWARE)
++ {
++ IMG_BOOL bTimeout = IMG_TRUE;
++
++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) {
++ if ((psSGXMiscInfoInt->
++ ui32MiscInfoFlags & PVRSRV_USSE_MISCINFO_READY) !=
++ 0) {
++ bTimeout = IMG_FALSE;
++ break;
++ }
++ }
++ END_LOOP_UNTIL_TIMEOUT();
++
++ if (bTimeout)
++ return PVRSRV_ERROR_TIMEOUT;
++ }
++#endif
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR SGXGetMiscInfoKM(struct PVRSRV_SGXDEV_INFO *psDevInfo,
++ struct SGX_MISC_INFO *psMiscInfo,
++ struct PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ switch (psMiscInfo->eRequest) {
++ case SGX_MISC_INFO_REQUEST_CLOCKSPEED:
++ {
++ psMiscInfo->uData.ui32SGXClockSpeed =
++ psDevInfo->ui32CoreClockSpeed;
++ return PVRSRV_OK;
++ }
++
++ case SGX_MISC_INFO_REQUEST_SGXREV:
++ {
++ struct PVRSRV_SGX_MISCINFO_FEATURES *psSGXFeatures;
++ struct PVRSRV_KERNEL_MEM_INFO *psMemInfo =
++ psDevInfo->psKernelSGXMiscMemInfo;
++
++ SGXGetBuildInfoKM(psDevInfo, psDeviceNode);
++ psSGXFeatures =
++ &((struct PVRSRV_SGX_MISCINFO_INFO *)(psMemInfo->
++ pvLinAddrKM))->sSGXFeatures;
++
++ psMiscInfo->uData.sSGXFeatures = *psSGXFeatures;
++
++ PVR_DPF(PVR_DBG_MESSAGE, "SGXGetMiscInfoKM: "
++ "Core 0x%lx, sw ID 0x%lx, "
++ "sw Rev 0x%lx\n",
++ psSGXFeatures->ui32CoreRev,
++ psSGXFeatures->ui32CoreIdSW,
++ psSGXFeatures->ui32CoreRevSW);
++ PVR_DPF(PVR_DBG_MESSAGE, "SGXGetMiscInfoKM: "
++ "DDK version 0x%lx, DDK build 0x%lx\n",
++ psSGXFeatures->ui32DDKVersion,
++ psSGXFeatures->ui32DDKBuild);
++
++ return PVRSRV_OK;
++ }
++
++ case SGX_MISC_INFO_REQUEST_DRIVER_SGXREV:
++ {
++ struct PVRSRV_KERNEL_MEM_INFO *psMemInfo =
++ psDevInfo->psKernelSGXMiscMemInfo;
++ struct PVRSRV_SGX_MISCINFO_FEATURES *psSGXFeatures;
++
++ psSGXFeatures = &((struct PVRSRV_SGX_MISCINFO_INFO *)(
++ psMemInfo->pvLinAddrKM))->sSGXFeatures;
++
++ OSMemSet(psMemInfo->pvLinAddrKM, 0,
++ sizeof(struct PVRSRV_SGX_MISCINFO_INFO));
++
++ psSGXFeatures->ui32DDKVersion =
++ (PVRVERSION_MAJ << 16) |
++ (PVRVERSION_MIN << 8) | PVRVERSION_BRANCH;
++ psSGXFeatures->ui32DDKBuild = PVRVERSION_BUILD;
++
++ psMiscInfo->uData.sSGXFeatures = *psSGXFeatures;
++ return PVRSRV_OK;
++ }
++
++ case SGX_MISC_INFO_REQUEST_SET_HWPERF_STATUS:
++ {
++ struct SGXMKIF_HWPERF_CB *psHWPerfCB =
++ psDevInfo->psKernelHWPerfCBMemInfo->pvLinAddrKM;
++ unsigned ui32MatchingFlags;
++
++ if ((psMiscInfo->uData.ui32NewHWPerfStatus &
++ ~(PVRSRV_SGX_HWPERF_GRAPHICS_ON |
++ PVRSRV_SGX_HWPERF_MK_EXECUTION_ON)) != 0) {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ ui32MatchingFlags = readl(&psDevInfo->
++ psSGXHostCtl->ui32HWPerfFlags);
++ ui32MatchingFlags &=
++ psMiscInfo->uData.ui32NewHWPerfStatus;
++ if ((ui32MatchingFlags & PVRSRV_SGX_HWPERF_GRAPHICS_ON)
++ == 0UL) {
++ psHWPerfCB->ui32OrdinalGRAPHICS = 0xffffffff;
++ }
++ if ((ui32MatchingFlags &
++ PVRSRV_SGX_HWPERF_MK_EXECUTION_ON) == 0UL) {
++ psHWPerfCB->ui32OrdinalMK_EXECUTION =
++ 0xffffffffUL;
++ }
++
++
++ writel(psMiscInfo->uData.ui32NewHWPerfStatus,
++ &psDevInfo->psSGXHostCtl->ui32HWPerfFlags);
++#if defined(PDUMP)
++ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
++ "SGX ukernel HWPerf status %u\n",
++ readl(&psDevInfo->psSGXHostCtl->
++ ui32HWPerfFlags));
++ PDUMPMEM(NULL, psDevInfo->psKernelSGXHostCtlMemInfo,
++ offsetof(struct SGXMKIF_HOST_CTL,
++ ui32HWPerfFlags),
++ sizeof(psDevInfo->psSGXHostCtl->
++ ui32HWPerfFlags),
++ PDUMP_FLAGS_CONTINUOUS,
++ MAKEUNIQUETAG(psDevInfo->
++ psKernelSGXHostCtlMemInfo));
++#endif
++
++ return PVRSRV_OK;
++ }
++ case SGX_MISC_INFO_REQUEST_HWPERF_CB_ON:
++ {
++
++ struct SGXMKIF_HWPERF_CB *psHWPerfCB =
++ psDevInfo->psKernelHWPerfCBMemInfo->pvLinAddrKM;
++ u32 l;
++
++ psHWPerfCB->ui32OrdinalGRAPHICS = 0xffffffffUL;
++
++ l = readl(&psDevInfo->psSGXHostCtl->ui32HWPerfFlags);
++ l |= PVRSRV_SGX_HWPERF_GRAPHICS_ON;
++ writel(l, &psDevInfo->psSGXHostCtl->ui32HWPerfFlags);
++
++ return PVRSRV_OK;
++ }
++ case SGX_MISC_INFO_REQUEST_HWPERF_CB_OFF:
++ {
++ writel(0, &psDevInfo->psSGXHostCtl->ui32HWPerfFlags);
++
++ return PVRSRV_OK;
++ }
++ case SGX_MISC_INFO_REQUEST_HWPERF_RETRIEVE_CB:
++ {
++ struct SGX_MISC_INFO_HWPERF_RETRIEVE_CB *psRetrieve =
++ &psMiscInfo->uData.sRetrieveCB;
++ struct SGXMKIF_HWPERF_CB *psHWPerfCB =
++ psDevInfo->psKernelHWPerfCBMemInfo->pvLinAddrKM;
++ unsigned i;
++
++ for (i = 0;
++ psHWPerfCB->ui32Woff != psHWPerfCB->ui32Roff
++ && i < psRetrieve->ui32ArraySize; i++) {
++ struct SGXMKIF_HWPERF_CB_ENTRY *psData =
++ &psHWPerfCB->psHWPerfCBData[psHWPerfCB->
++ ui32Roff];
++
++ psRetrieve->psHWPerfData[i].ui32FrameNo =
++ psData->ui32FrameNo;
++ psRetrieve->psHWPerfData[i].ui32Type =
++ (psData->ui32Type &
++ PVRSRV_SGX_HWPERF_TYPE_OP_MASK);
++ psRetrieve->psHWPerfData[i].ui32StartTime =
++ psData->ui32Time;
++ psRetrieve->psHWPerfData[i].ui32StartTimeWraps =
++ psData->ui32TimeWraps;
++ psRetrieve->psHWPerfData[i].ui32EndTime =
++ psData->ui32Time;
++ psRetrieve->psHWPerfData[i].ui32EndTimeWraps =
++ psData->ui32TimeWraps;
++ psRetrieve->psHWPerfData[i].ui32ClockSpeed =
++ psDevInfo->ui32CoreClockSpeed;
++ psRetrieve->psHWPerfData[i].ui32TimeMax =
++ psDevInfo->ui32uKernelTimerClock;
++ psHWPerfCB->ui32Roff =
++ (psHWPerfCB->ui32Roff + 1) &
++ (SGXMKIF_HWPERF_CB_SIZE - 1);
++ }
++ psRetrieve->ui32DataCount = i;
++ psRetrieve->ui32Time = OSClockus();
++ return PVRSRV_OK;
++ }
++ default:
++ {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++ }
++}
++
++enum PVRSRV_ERROR SGXReadDiffCountersKM(void *hDevHandle, u32 ui32Reg,
++ u32 *pui32Old, IMG_BOOL bNew, u32 ui32New,
++ u32 ui32NewReset, u32 ui32CountersReg,
++ u32 *pui32Time, IMG_BOOL *pbActive,
++ struct PVRSRV_SGXDEV_DIFF_INFO *psDiffs)
++{
++ enum PVRSRV_ERROR eError;
++ struct SYS_DATA *psSysData;
++ struct PVRSRV_POWER_DEV *psPowerDevice;
++ IMG_BOOL bPowered = IMG_FALSE;
++ struct PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
++ struct PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++
++ if (bNew)
++ psDevInfo->ui32HWGroupRequested = ui32New;
++ psDevInfo->ui32HWReset |= ui32NewReset;
++
++ eError = PVRSRVPowerLock(KERNEL_ID, IMG_FALSE);
++ if (eError != PVRSRV_OK)
++ return eError;
++
++ SysAcquireData(&psSysData);
++
++ psPowerDevice = psSysData->psPowerDeviceList;
++ while (psPowerDevice) {
++ if (psPowerDevice->ui32DeviceIndex ==
++ psDeviceNode->sDevId.ui32DeviceIndex) {
++ bPowered =
++ (IMG_BOOL)(psPowerDevice->eCurrentPowerState ==
++ PVRSRV_POWER_STATE_D0);
++ break;
++ }
++
++ psPowerDevice = psPowerDevice->psNext;
++ }
++
++ *pbActive = bPowered;
++
++ {
++ struct PVRSRV_SGXDEV_DIFF_INFO sNew,
++ *psPrev = &psDevInfo->sDiffInfo;
++ u32 i;
++
++ sNew.ui32Time[0] = OSClockus();
++ *pui32Time = sNew.ui32Time[0];
++ if (sNew.ui32Time[0] != psPrev->ui32Time[0] && bPowered) {
++
++ *pui32Old =
++ OSReadHWReg(psDevInfo->pvRegsBaseKM, ui32Reg);
++
++ for (i = 0; i < PVRSRV_SGX_DIFF_NUM_COUNTERS; ++i) {
++ sNew.aui32Counters[i] =
++ OSReadHWReg(psDevInfo->pvRegsBaseKM,
++ ui32CountersReg + (i * 4));
++ }
++
++ if (psDevInfo->ui32HWGroupRequested != *pui32Old) {
++ if (psDevInfo->ui32HWReset != 0) {
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM,
++ ui32Reg,
++ psDevInfo->
++ ui32HWGroupRequested |
++ psDevInfo->ui32HWReset);
++ psDevInfo->ui32HWReset = 0;
++ }
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32Reg,
++ psDevInfo->ui32HWGroupRequested);
++ }
++
++ sNew.ui32Marker[0] = psDevInfo->ui32KickTACounter;
++ sNew.ui32Marker[1] = psDevInfo->ui32KickTARenderCounter;
++
++ sNew.ui32Time[1] = readl(
++ &psDevInfo->psSGXHostCtl->ui32TimeWraps);
++
++ for (i = 0; i < PVRSRV_SGX_DIFF_NUM_COUNTERS; ++i) {
++ psDiffs->aui32Counters[i] =
++ sNew.aui32Counters[i] -
++ psPrev->aui32Counters[i];
++ }
++
++ psDiffs->ui32Marker[0] =
++ sNew.ui32Marker[0] - psPrev->ui32Marker[0];
++ psDiffs->ui32Marker[1] =
++ sNew.ui32Marker[1] - psPrev->ui32Marker[1];
++
++ psDiffs->ui32Time[0] =
++ sNew.ui32Time[0] - psPrev->ui32Time[0];
++ psDiffs->ui32Time[1] =
++ sNew.ui32Time[1] - psPrev->ui32Time[1];
++
++ *psPrev = sNew;
++ } else {
++ for (i = 0; i < PVRSRV_SGX_DIFF_NUM_COUNTERS; ++i)
++ psDiffs->aui32Counters[i] = 0;
++
++ psDiffs->ui32Marker[0] = 0;
++ psDiffs->ui32Marker[1] = 0;
++
++ psDiffs->ui32Time[0] = 0;
++ psDiffs->ui32Time[1] = 0;
++ }
++ }
++
++ PVRSRVPowerUnlock(KERNEL_ID);
++
++ SGXTestActivePowerEvent(psDeviceNode, KERNEL_ID);
++
++ return eError;
++}
++
++enum PVRSRV_ERROR SGXReadHWPerfCBKM(void *hDevHandle, u32 ui32ArraySize,
++ struct PVRSRV_SGX_HWPERF_CB_ENTRY *psClientHWPerfEntry,
++ u32 *pui32DataCount, u32 *pui32ClockSpeed,
++ u32 *pui32HostTimeStamp)
++{
++ enum PVRSRV_ERROR eError = PVRSRV_OK;
++ struct PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
++ struct PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++ struct SGXMKIF_HWPERF_CB *psHWPerfCB =
++ psDevInfo->psKernelHWPerfCBMemInfo->pvLinAddrKM;
++ unsigned i;
++
++ for (i = 0;
++ psHWPerfCB->ui32Woff != psHWPerfCB->ui32Roff && i < ui32ArraySize;
++ i++) {
++ struct SGXMKIF_HWPERF_CB_ENTRY *psMKPerfEntry =
++ &psHWPerfCB->psHWPerfCBData[psHWPerfCB->ui32Roff];
++
++ psClientHWPerfEntry[i].ui32FrameNo = psMKPerfEntry->ui32FrameNo;
++ psClientHWPerfEntry[i].ui32Type = psMKPerfEntry->ui32Type;
++ psClientHWPerfEntry[i].ui32Ordinal = psMKPerfEntry->ui32Ordinal;
++ psClientHWPerfEntry[i].ui32Clocksx16 =
++ SGXConvertTimeStamp(psDevInfo, psMKPerfEntry->ui32TimeWraps,
++ psMKPerfEntry->ui32Time);
++ OSMemCopy(&psClientHWPerfEntry[i].ui32Counters[0],
++ &psMKPerfEntry->ui32Counters[0],
++ sizeof(psMKPerfEntry->ui32Counters));
++
++ psHWPerfCB->ui32Roff =
++ (psHWPerfCB->ui32Roff + 1) & (SGXMKIF_HWPERF_CB_SIZE - 1);
++ }
++
++ *pui32DataCount = i;
++ *pui32ClockSpeed = psDevInfo->ui32CoreClockSpeed;
++ *pui32HostTimeStamp = OSClockus();
++
++ return eError;
++}
+diff --git a/drivers/gpu/pvr/sgxkick.c b/drivers/gpu/pvr/sgxkick.c
+new file mode 100644
+index 0000000..d6ccb1f
+--- /dev/null
++++ b/drivers/gpu/pvr/sgxkick.c
+@@ -0,0 +1,504 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <stddef.h>
++#include "services_headers.h"
++#include "sgxinfo.h"
++#include "sgxinfokm.h"
++#if defined(PDUMP)
++#include "sgxapi_km.h"
++#include "pdump_km.h"
++#endif
++#include "sgx_bridge_km.h"
++#include "osfunc.h"
++#include "pvr_debug.h"
++#include "sgxutils.h"
++
++enum PVRSRV_ERROR SGXDoKickKM(void *hDevHandle,
++ struct SGX_CCB_KICK *psCCBKick)
++{
++ enum PVRSRV_ERROR eError;
++ struct PVRSRV_KERNEL_SYNC_INFO *psSyncInfo;
++ struct PVRSRV_KERNEL_MEM_INFO *psCCBMemInfo =
++ (struct PVRSRV_KERNEL_MEM_INFO *)psCCBKick->hCCBKernelMemInfo;
++ struct SGXMKIF_CMDTA_SHARED *psTACmd;
++ u32 i;
++ struct PVRSRV_DEVICE_NODE *psDeviceNode;
++ struct PVRSRV_SGXDEV_INFO *psDevInfo;
++
++ psDeviceNode = (struct PVRSRV_DEVICE_NODE *)hDevHandle;
++ psDevInfo = (struct PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
++
++ if (psCCBKick->bKickRender)
++ ++psDevInfo->ui32KickTARenderCounter;
++ ++psDevInfo->ui32KickTACounter;
++
++ if (!CCB_OFFSET_IS_VALID
++ (struct SGXMKIF_CMDTA_SHARED, psCCBMemInfo, psCCBKick,
++ ui32CCBOffset)) {
++ PVR_DPF(PVR_DBG_ERROR, "SGXDoKickKM: Invalid CCB offset");
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++ psTACmd =
++ CCB_DATA_FROM_OFFSET(struct SGXMKIF_CMDTA_SHARED, psCCBMemInfo,
++ psCCBKick, ui32CCBOffset);
++
++ if (psCCBKick->hTA3DSyncInfo) {
++ psSyncInfo =
++ (struct PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hTA3DSyncInfo;
++ psTACmd->sTA3DDependency.sWriteOpsCompleteDevVAddr =
++ psSyncInfo->sWriteOpsCompleteDevVAddr;
++
++ psTACmd->sTA3DDependency.ui32WriteOpsPendingVal =
++ psSyncInfo->psSyncData->ui32WriteOpsPending;
++
++ if (psCCBKick->bTADependency)
++ psSyncInfo->psSyncData->ui32WriteOpsPending++;
++ }
++
++ if (psCCBKick->hTASyncInfo != NULL) {
++ psSyncInfo = (struct PVRSRV_KERNEL_SYNC_INFO *)
++ psCCBKick->hTASyncInfo;
++
++ psTACmd->sTATQSyncReadOpsCompleteDevVAddr =
++ psSyncInfo->sReadOpsCompleteDevVAddr;
++ psTACmd->sTATQSyncWriteOpsCompleteDevVAddr =
++ psSyncInfo->sWriteOpsCompleteDevVAddr;
++
++ psTACmd->ui32TATQSyncReadOpsPendingVal =
++ psSyncInfo->psSyncData->ui32ReadOpsPending++;
++ psTACmd->ui32TATQSyncWriteOpsPendingVal =
++ psSyncInfo->psSyncData->ui32WriteOpsPending;
++ }
++
++ if (psCCBKick->h3DSyncInfo != NULL) {
++ psSyncInfo = (struct PVRSRV_KERNEL_SYNC_INFO *)
++ psCCBKick->h3DSyncInfo;
++
++ psTACmd->s3DTQSyncReadOpsCompleteDevVAddr =
++ psSyncInfo->sReadOpsCompleteDevVAddr;
++ psTACmd->s3DTQSyncWriteOpsCompleteDevVAddr =
++ psSyncInfo->sWriteOpsCompleteDevVAddr;
++
++ psTACmd->ui323DTQSyncReadOpsPendingVal =
++ psSyncInfo->psSyncData->ui32ReadOpsPending++;
++ psTACmd->ui323DTQSyncWriteOpsPendingVal =
++ psSyncInfo->psSyncData->ui32WriteOpsPending;
++ }
++
++ psTACmd->ui32NumTAStatusVals = psCCBKick->ui32NumTAStatusVals;
++ if (psCCBKick->ui32NumTAStatusVals != 0) {
++ for (i = 0; i < psCCBKick->ui32NumTAStatusVals; i++) {
++ psSyncInfo = (struct PVRSRV_KERNEL_SYNC_INFO *)
++ psCCBKick->ahTAStatusSyncInfo[i];
++
++ psTACmd->sCtlTAStatusInfo[i].sStatusDevAddr =
++ psSyncInfo->sReadOpsCompleteDevVAddr;
++
++ psTACmd->sCtlTAStatusInfo[i].ui32StatusValue =
++ psSyncInfo->psSyncData->ui32ReadOpsPending;
++ }
++ }
++
++ psTACmd->ui32Num3DStatusVals = psCCBKick->ui32Num3DStatusVals;
++ if (psCCBKick->ui32Num3DStatusVals != 0) {
++ for (i = 0; i < psCCBKick->ui32Num3DStatusVals; i++) {
++ psSyncInfo =
++ (struct PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->
++ ah3DStatusSyncInfo[i];
++
++ psTACmd->sCtl3DStatusInfo[i].sStatusDevAddr =
++ psSyncInfo->sReadOpsCompleteDevVAddr;
++
++ psTACmd->sCtl3DStatusInfo[i].ui32StatusValue =
++ psSyncInfo->psSyncData->ui32ReadOpsPending;
++ }
++ }
++
++ psTACmd->ui32NumSrcSyncs = psCCBKick->ui32NumSrcSyncs;
++ for (i = 0; i < psCCBKick->ui32NumSrcSyncs; i++) {
++ psSyncInfo =
++ (struct PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->
++ ahSrcKernelSyncInfo[i];
++
++ psTACmd->asSrcSyncs[i].sWriteOpsCompleteDevVAddr =
++ psSyncInfo->sWriteOpsCompleteDevVAddr;
++ psTACmd->asSrcSyncs[i].sReadOpsCompleteDevVAddr =
++ psSyncInfo->sReadOpsCompleteDevVAddr;
++
++ psTACmd->asSrcSyncs[i].ui32ReadOpsPendingVal =
++ psSyncInfo->psSyncData->ui32ReadOpsPending++;
++
++ psTACmd->asSrcSyncs[i].ui32WriteOpsPendingVal =
++ psSyncInfo->psSyncData->ui32WriteOpsPending;
++
++ }
++
++ if (psCCBKick->bFirstKickOrResume &&
++ psCCBKick->ui32NumDstSyncObjects > 0) {
++ struct PVRSRV_KERNEL_MEM_INFO *psHWDstSyncListMemInfo =
++ (struct PVRSRV_KERNEL_MEM_INFO *)psCCBKick->
++ hKernelHWSyncListMemInfo;
++ struct SGXMKIF_HWDEVICE_SYNC_LIST *psHWDeviceSyncList =
++ psHWDstSyncListMemInfo->pvLinAddrKM;
++ u32 ui32NumDstSyncs = psCCBKick->ui32NumDstSyncObjects;
++
++ PVR_ASSERT(((struct PVRSRV_KERNEL_MEM_INFO *)psCCBKick->
++ hKernelHWSyncListMemInfo)->ui32AllocSize >=
++ (sizeof(struct SGXMKIF_HWDEVICE_SYNC_LIST) +
++ (sizeof(struct PVRSRV_DEVICE_SYNC_OBJECT) *
++ ui32NumDstSyncs)));
++
++ psHWDeviceSyncList->ui32NumSyncObjects = ui32NumDstSyncs;
++#if defined(PDUMP)
++ if (PDumpIsCaptureFrameKM()) {
++ PDUMPCOMMENT("HWDeviceSyncList for TACmd\r\n");
++ PDUMPMEM(NULL,
++ psHWDstSyncListMemInfo, 0,
++ sizeof(struct SGXMKIF_HWDEVICE_SYNC_LIST),
++ 0, MAKEUNIQUETAG(psHWDstSyncListMemInfo));
++ }
++#endif
++ psSyncInfo = (struct PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->
++ sDstSyncHandle;
++ i = 0;
++ if (psSyncInfo) {
++ psHWDeviceSyncList->asSyncData[i].
++ sWriteOpsCompleteDevVAddr =
++ psSyncInfo->sWriteOpsCompleteDevVAddr;
++
++ psHWDeviceSyncList->asSyncData[i].
++ sReadOpsCompleteDevVAddr =
++ psSyncInfo->sReadOpsCompleteDevVAddr;
++
++ psHWDeviceSyncList->asSyncData[i].
++ ui32ReadOpsPendingVal =
++ psSyncInfo->psSyncData->ui32ReadOpsPending;
++
++ psHWDeviceSyncList->asSyncData[i].
++ ui32WriteOpsPendingVal =
++ psSyncInfo->psSyncData->
++ ui32WriteOpsPending++;
++
++#if defined(PDUMP)
++ if (PDumpIsCaptureFrameKM()) {
++ u32 ui32ModifiedValue;
++ u32 ui32SyncOffset = offsetof(
++ struct SGXMKIF_HWDEVICE_SYNC_LIST,
++ asSyncData) + (i *
++ sizeof(
++ struct PVRSRV_DEVICE_SYNC_OBJECT));
++ u32 ui32WOpsOffset = ui32SyncOffset +
++ offsetof(
++ struct PVRSRV_DEVICE_SYNC_OBJECT,
++ ui32WriteOpsPendingVal);
++ u32 ui32ROpsOffset = ui32SyncOffset +
++ offsetof(
++ struct PVRSRV_DEVICE_SYNC_OBJECT,
++ ui32ReadOpsPendingVal);
++
++ PDUMPCOMMENT("HWDeviceSyncObject for RT: "
++ "%i\r\n", i);
++
++ PDUMPMEM(NULL, psHWDstSyncListMemInfo,
++ ui32SyncOffset, sizeof(
++ struct PVRSRV_DEVICE_SYNC_OBJECT),
++ 0, MAKEUNIQUETAG(
++ psHWDstSyncListMemInfo));
++
++ if ((psSyncInfo->psSyncData->
++ ui32LastOpDumpVal == 0) &&
++ (psSyncInfo->psSyncData->
++ ui32LastReadOpDumpVal == 0)) {
++
++ PDUMPCOMMENT("Init RT ROpsComplete\r\n",
++ i);
++ PDUMPMEM(&psSyncInfo->psSyncData->
++ ui32LastReadOpDumpVal,
++ psSyncInfo->psSyncDataMemInfoKM,
++ offsetof(struct
++ PVRSRV_SYNC_DATA,
++ ui32ReadOpsComplete),
++ sizeof(psSyncInfo->psSyncData->
++ ui32ReadOpsComplete),
++ 0,
++ MAKEUNIQUETAG(psSyncInfo->
++ psSyncDataMemInfoKM));
++
++ PDUMPCOMMENT("Init RT WOpsComplete\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->
++ ui32LastOpDumpVal,
++ psSyncInfo->psSyncDataMemInfoKM,
++ offsetof(struct PVRSRV_SYNC_DATA,
++ ui32WriteOpsComplete),
++ sizeof(psSyncInfo->psSyncData->
++ ui32WriteOpsComplete),
++ 0, MAKEUNIQUETAG(psSyncInfo->
++ psSyncDataMemInfoKM));
++ }
++
++ psSyncInfo->psSyncData->ui32LastOpDumpVal++;
++
++ ui32ModifiedValue = psSyncInfo->psSyncData->
++ ui32LastOpDumpVal - 1;
++
++ PDUMPCOMMENT("Modify RT %d WOpPendingVal "
++ "in HWDevSyncList\r\n", i);
++
++ PDUMPMEM(&ui32ModifiedValue,
++ psHWDstSyncListMemInfo, ui32WOpsOffset,
++ sizeof(u32), 0,
++ MAKEUNIQUETAG(psHWDstSyncListMemInfo));
++
++ PDUMPCOMMENT("Modify RT %d ROpsPendingVal "
++ "in HWDevSyncList\r\n", i);
++
++ PDUMPMEM(&psSyncInfo->psSyncData->
++ ui32LastReadOpDumpVal,
++ psHWDstSyncListMemInfo,
++ ui32ROpsOffset, sizeof(u32), 0,
++ MAKEUNIQUETAG(psHWDstSyncListMemInfo));
++ }
++#endif
++ } else {
++ psHWDeviceSyncList->asSyncData[i].
++ sWriteOpsCompleteDevVAddr.uiAddr = 0;
++ psHWDeviceSyncList->asSyncData[i].
++ sReadOpsCompleteDevVAddr.uiAddr = 0;
++
++ psHWDeviceSyncList->asSyncData[i].
++ ui32ReadOpsPendingVal = 0;
++ psHWDeviceSyncList->asSyncData[i].
++ ui32WriteOpsPendingVal = 0;
++ }
++ }
++#if defined(PDUMP)
++ if (PDumpIsCaptureFrameKM()) {
++ PDUMPCOMMENT("Shared part of TA command\r\n");
++
++ PDUMPMEM(psTACmd, psCCBMemInfo, psCCBKick->ui32CCBDumpWOff,
++ sizeof(struct SGXMKIF_CMDTA_SHARED), 0,
++ MAKEUNIQUETAG(psCCBMemInfo));
++
++ for (i = 0; i < psCCBKick->ui32NumSrcSyncs; i++) {
++ u32 ui32ModifiedValue;
++ psSyncInfo =
++ (struct PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->
++ ahSrcKernelSyncInfo[i];
++
++ if ((psSyncInfo->psSyncData->ui32LastOpDumpVal == 0) &&
++ (psSyncInfo->psSyncData->ui32LastReadOpDumpVal ==
++ 0)) {
++ PDUMPCOMMENT("Init RT ROpsComplete\r\n", i);
++ PDUMPMEM(&psSyncInfo->psSyncData->
++ ui32LastReadOpDumpVal,
++ psSyncInfo->psSyncDataMemInfoKM,
++ offsetof(struct PVRSRV_SYNC_DATA,
++ ui32ReadOpsComplete),
++ sizeof(psSyncInfo->psSyncData->
++ ui32ReadOpsComplete), 0,
++ MAKEUNIQUETAG(psSyncInfo->
++ psSyncDataMemInfoKM));
++ PDUMPCOMMENT("Init RT WOpsComplete\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->
++ ui32LastOpDumpVal,
++ psSyncInfo->psSyncDataMemInfoKM,
++ offsetof(struct PVRSRV_SYNC_DATA,
++ ui32WriteOpsComplete),
++ sizeof(psSyncInfo->psSyncData->
++ ui32WriteOpsComplete), 0,
++ MAKEUNIQUETAG(psSyncInfo->
++ psSyncDataMemInfoKM));
++ }
++
++ psSyncInfo->psSyncData->ui32LastReadOpDumpVal++;
++
++ ui32ModifiedValue =
++ psSyncInfo->psSyncData->ui32LastReadOpDumpVal - 1;
++
++ PDUMPCOMMENT("Modify SrcSync %d ROpsPendingVal\r\n", i);
++
++ PDUMPMEM(&ui32ModifiedValue,
++ psCCBMemInfo,
++ psCCBKick->ui32CCBDumpWOff +
++ offsetof(struct SGXMKIF_CMDTA_SHARED,
++ asSrcSyncs) +
++ (i *
++ sizeof(struct PVRSRV_DEVICE_SYNC_OBJECT)) +
++ offsetof(struct PVRSRV_DEVICE_SYNC_OBJECT,
++ ui32ReadOpsPendingVal), sizeof(u32),
++ 0, MAKEUNIQUETAG(psCCBMemInfo));
++
++ PDUMPCOMMENT("Modify SrcSync %d WOpPendingVal\r\n", i);
++
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++ psCCBMemInfo,
++ psCCBKick->ui32CCBDumpWOff +
++ offsetof(struct SGXMKIF_CMDTA_SHARED,
++ asSrcSyncs) +
++ (i *
++ sizeof(struct PVRSRV_DEVICE_SYNC_OBJECT)) +
++ offsetof(struct PVRSRV_DEVICE_SYNC_OBJECT,
++ ui32WriteOpsPendingVal), sizeof(u32),
++ 0, MAKEUNIQUETAG(psCCBMemInfo));
++
++ }
++
++ for (i = 0; i < psCCBKick->ui32NumTAStatusVals; i++) {
++ psSyncInfo =
++ (struct PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->
++ ahTAStatusSyncInfo[i];
++ PDUMPCOMMENT("Modify TA status value in TA cmd\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++ psCCBMemInfo,
++ psCCBKick->ui32CCBDumpWOff +
++ offsetof(struct SGXMKIF_CMDTA_SHARED,
++ sCtlTAStatusInfo[i].ui32StatusValue),
++ sizeof(u32), 0, MAKEUNIQUETAG(psCCBMemInfo));
++ }
++
++ for (i = 0; i < psCCBKick->ui32Num3DStatusVals; i++) {
++ psSyncInfo = (struct PVRSRV_KERNEL_SYNC_INFO *)
++ psCCBKick->ah3DStatusSyncInfo[i];
++
++ PDUMPCOMMENT("Modify 3D status value in TA cmd\r\n");
++
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++ psCCBMemInfo,
++ psCCBKick->ui32CCBDumpWOff +
++ offsetof(struct SGXMKIF_CMDTA_SHARED,
++ sCtl3DStatusInfo[i].ui32StatusValue),
++ sizeof(u32), 0, MAKEUNIQUETAG(psCCBMemInfo));
++ }
++ }
++#endif
++
++ eError = SGXScheduleCCBCommandKM(hDevHandle, psCCBKick->eCommand,
++ &psCCBKick->sCommand, KERNEL_ID, 0);
++ if (eError == PVRSRV_ERROR_RETRY) {
++ if (psCCBKick->bFirstKickOrResume &&
++ psCCBKick->ui32NumDstSyncObjects > 0) {
++ psSyncInfo = (struct PVRSRV_KERNEL_SYNC_INFO *)
++ psCCBKick->sDstSyncHandle;
++ if (psSyncInfo) {
++ psSyncInfo->psSyncData->ui32WriteOpsPending--;
++#if defined(PDUMP)
++ if (PDumpIsCaptureFrameKM())
++ psSyncInfo->psSyncData->
++ ui32LastOpDumpVal--;
++#endif
++ }
++ }
++
++ for (i = 0; i < psCCBKick->ui32NumSrcSyncs; i++) {
++ psSyncInfo =
++ (struct PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->
++ ahSrcKernelSyncInfo[i];
++ psSyncInfo->psSyncData->ui32ReadOpsPending--;
++ }
++
++ return eError;
++ } else if (PVRSRV_OK != eError) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "SGXDoKickKM: SGXScheduleCCBCommandKM failed.");
++ return eError;
++ }
++
++#if defined(NO_HARDWARE)
++
++ if (psCCBKick->hTA3DSyncInfo) {
++ psSyncInfo =
++ (struct PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hTA3DSyncInfo;
++
++ if (psCCBKick->bTADependency) {
++ psSyncInfo->psSyncData->ui32WriteOpsComplete =
++ psSyncInfo->psSyncData->ui32WriteOpsPending;
++ }
++ }
++
++ if (psCCBKick->hTASyncInfo != NULL) {
++ psSyncInfo =
++ (struct PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hTASyncInfo;
++
++ psSyncInfo->psSyncData->ui32ReadOpsComplete =
++ psSyncInfo->psSyncData->ui32ReadOpsPending;
++ }
++
++ if (psCCBKick->h3DSyncInfo != NULL) {
++ psSyncInfo =
++ (struct PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->h3DSyncInfo;
++
++ psSyncInfo->psSyncData->ui32ReadOpsComplete =
++ psSyncInfo->psSyncData->ui32ReadOpsPending;
++ }
++
++ for (i = 0; i < psCCBKick->ui32NumTAStatusVals; i++) {
++ psSyncInfo =
++ (struct PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->
++ ahTAStatusSyncInfo[i];
++ psSyncInfo->psSyncData->ui32ReadOpsComplete =
++ psTACmd->sCtlTAStatusInfo[i].ui32StatusValue;
++ }
++
++ for (i = 0; i < psCCBKick->ui32NumSrcSyncs; i++) {
++ psSyncInfo =
++ (struct PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->
++ ahSrcKernelSyncInfo[i];
++
++ psSyncInfo->psSyncData->ui32ReadOpsComplete =
++ psSyncInfo->psSyncData->ui32ReadOpsPending;
++
++ }
++
++ if (psCCBKick->bTerminateOrAbort) {
++ if (psCCBKick->ui32NumDstSyncObjects > 0) {
++ struct PVRSRV_KERNEL_MEM_INFO *psHWDstSyncListMemInfo =
++ (struct PVRSRV_KERNEL_MEM_INFO *)psCCBKick->
++ hKernelHWSyncListMemInfo;
++ struct SGXMKIF_HWDEVICE_SYNC_LIST *psHWDeviceSyncList =
++ psHWDstSyncListMemInfo->pvLinAddrKM;
++
++ psSyncInfo =
++ (struct PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->
++ sDstSyncHandle;
++ if (psSyncInfo)
++ psSyncInfo->psSyncData->ui32WriteOpsComplete =
++ psHWDeviceSyncList->asSyncData[0].
++ ui32WriteOpsPendingVal + 1;
++ }
++
++ for (i = 0; i < psCCBKick->ui32Num3DStatusVals; i++) {
++ psSyncInfo =
++ (struct PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->
++ ah3DStatusSyncInfo[i];
++ psSyncInfo->psSyncData->ui32ReadOpsComplete =
++ psTACmd->sCtl3DStatusInfo[i].ui32StatusValue;
++ }
++ }
++#endif
++
++ return eError;
++}
+diff --git a/drivers/gpu/pvr/sgxmmu.h b/drivers/gpu/pvr/sgxmmu.h
+new file mode 100644
+index 0000000..d633e29
+--- /dev/null
++++ b/drivers/gpu/pvr/sgxmmu.h
+@@ -0,0 +1,57 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__SGXMMU_KM_H__)
++#define __SGXMMU_KM_H__
++
++#define SGX_MMU_PAGE_SHIFT 12
++#define SGX_MMU_PAGE_SIZE (1UL << SGX_MMU_PAGE_SHIFT)
++#define SGX_MMU_PAGE_MASK (SGX_MMU_PAGE_SIZE - 1UL)
++
++#define SGX_MMU_PD_SHIFT 10
++#define SGX_MMU_PD_SIZE (1UL << SGX_MMU_PD_SHIFT)
++#define SGX_MMU_PD_MASK 0xFFC00000UL
++
++#define SGX_MMU_PDE_ADDR_MASK 0xFFFFF000UL
++#define SGX_MMU_PDE_VALID 0x00000001UL
++#define SGX_MMU_PDE_PAGE_SIZE_4K 0x00000000UL
++#define SGX_MMU_PDE_WRITEONLY 0x00000002UL
++#define SGX_MMU_PDE_READONLY 0x00000004UL
++#define SGX_MMU_PDE_CACHECONSISTENT 0x00000008UL
++#define SGX_MMU_PDE_EDMPROTECT 0x00000010UL
++
++#define SGX_MMU_PT_SHIFT 10
++#define SGX_MMU_PT_SIZE (1UL << SGX_MMU_PT_SHIFT)
++#define SGX_MMU_PT_MASK 0x003FF000UL
++
++#define SGX_MMU_PTE_ADDR_MASK 0xFFFFF000UL
++#define SGX_MMU_PTE_VALID 0x00000001UL
++#define SGX_MMU_PTE_WRITEONLY 0x00000002UL
++#define SGX_MMU_PTE_READONLY 0x00000004UL
++#define SGX_MMU_PTE_CACHECONSISTENT 0x00000008UL
++#define SGX_MMU_PTE_EDMPROTECT 0x00000010UL
++
++#endif
+diff --git a/drivers/gpu/pvr/sgxpower.c b/drivers/gpu/pvr/sgxpower.c
+new file mode 100644
+index 0000000..79b6c61
+--- /dev/null
++++ b/drivers/gpu/pvr/sgxpower.c
+@@ -0,0 +1,398 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <stddef.h>
++#include <linux/io.h>
++
++#include "sgxdefs.h"
++#include "services_headers.h"
++#include "sgxapi_km.h"
++#include "sgxinfokm.h"
++#include "sgxutils.h"
++#include "pdump_km.h"
++
++enum PVR_DEVICE_POWER_STATE {
++
++ PVR_DEVICE_POWER_STATE_ON = 0,
++ PVR_DEVICE_POWER_STATE_IDLE = 1,
++ PVR_DEVICE_POWER_STATE_OFF = 2,
++
++ PVR_DEVICE_POWER_STATE_FORCE_I32 = 0x7fffffff
++};
++
++static enum PVR_DEVICE_POWER_STATE MapDevicePowerState(enum PVR_POWER_STATE
++ ePowerState)
++{
++ enum PVR_DEVICE_POWER_STATE eDevicePowerState;
++
++ switch (ePowerState) {
++ case PVRSRV_POWER_STATE_D0:
++ {
++ eDevicePowerState = PVR_DEVICE_POWER_STATE_ON;
++ break;
++ }
++ case PVRSRV_POWER_STATE_D3:
++ {
++ eDevicePowerState = PVR_DEVICE_POWER_STATE_OFF;
++ break;
++ }
++ default:
++ {
++ PVR_DPF(PVR_DBG_ERROR,
++ "MapDevicePowerState: Invalid state: %ld",
++ ePowerState);
++ eDevicePowerState = PVR_DEVICE_POWER_STATE_FORCE_I32;
++ PVR_DBG_BREAK;
++ }
++ }
++
++ return eDevicePowerState;
++}
++
++static void SGXGetTimingInfo(struct PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ struct PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++ struct SGX_TIMING_INFORMATION sSGXTimingInfo = { 0 };
++ u32 ui32ActivePowManSampleRate;
++ struct timer_work_data *data = psDevInfo->hTimer;
++
++ SysGetSGXTimingInformation(&sSGXTimingInfo);
++
++ if (data) {
++ BUG_ON(data->armed);
++ /*
++ * The magic calculation below sets the hardware lock-up
++ * detection and recovery timer interval to ~150msecs.
++ * The interval length will be scaled based on the SGX
++ * functional clock frequency. The higher the frequency
++ * the shorter the interval and vice versa.
++ */
++ data->interval = 150 * SYS_SGX_PDS_TIMER_FREQ /
++ sSGXTimingInfo.ui32uKernelFreq;
++ }
++
++ writel(sSGXTimingInfo.ui32uKernelFreq /
++ sSGXTimingInfo.ui32HWRecoveryFreq,
++ &psDevInfo->psSGXHostCtl->ui32HWRecoverySampleRate);
++
++ psDevInfo->ui32CoreClockSpeed = sSGXTimingInfo.ui32CoreClockSpeed;
++ psDevInfo->ui32uKernelTimerClock =
++ sSGXTimingInfo.ui32CoreClockSpeed /
++ sSGXTimingInfo.ui32uKernelFreq;
++
++ ui32ActivePowManSampleRate =
++ sSGXTimingInfo.ui32uKernelFreq *
++ sSGXTimingInfo.ui32ActivePowManLatencyms / 1000;
++ ui32ActivePowManSampleRate += 1;
++ writel(ui32ActivePowManSampleRate,
++ &psDevInfo->psSGXHostCtl->ui32ActivePowManSampleRate);
++}
++
++void SGXStartTimer(struct PVRSRV_SGXDEV_INFO *psDevInfo, IMG_BOOL bStartOSTimer)
++{
++ u32 ui32RegVal;
++
++ ui32RegVal =
++ EUR_CR_EVENT_TIMER_ENABLE_MASK | psDevInfo->ui32uKernelTimerClock;
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_TIMER, ui32RegVal);
++ PDUMPREGWITHFLAGS(EUR_CR_EVENT_TIMER, ui32RegVal,
++ PDUMP_FLAGS_CONTINUOUS);
++
++ if (bStartOSTimer) {
++ enum PVRSRV_ERROR eError;
++
++ eError = SGXOSTimerEnable(psDevInfo->hTimer);
++ if (eError != PVRSRV_OK)
++ PVR_DPF(PVR_DBG_ERROR,
++ "SGXStartTimer : Failed to enable host timer");
++ }
++}
++
++static void SGXPollForClockGating(struct PVRSRV_SGXDEV_INFO *psDevInfo,
++ u32 ui32Register,
++ u32 ui32RegisterValue, char *pszComment)
++{
++ PVR_UNREFERENCED_PARAMETER(psDevInfo);
++ PVR_UNREFERENCED_PARAMETER(ui32Register);
++ PVR_UNREFERENCED_PARAMETER(ui32RegisterValue);
++ PVR_UNREFERENCED_PARAMETER(pszComment);
++
++#if !defined(NO_HARDWARE)
++ if (psDevInfo != NULL)
++ if (PollForValueKM
++ ((u32 __iomem *)psDevInfo->pvRegsBaseKM +
++ (ui32Register >> 2), 0,
++ ui32RegisterValue, MAX_HW_TIME_US / WAIT_TRY_COUNT,
++ WAIT_TRY_COUNT) != PVRSRV_OK)
++ PVR_DPF(PVR_DBG_ERROR, "SGXPrePowerState: %s failed.",
++ pszComment);
++
++#endif
++
++ PDUMPCOMMENT(pszComment);
++ PDUMPREGPOL(ui32Register, 0, ui32RegisterValue);
++}
++
++static enum PVRSRV_ERROR SGXPrePowerState(void *hDevHandle,
++ enum PVR_DEVICE_POWER_STATE eNewPowerState,
++ enum PVR_DEVICE_POWER_STATE eCurrentPowerState)
++{
++ if ((eNewPowerState != eCurrentPowerState) &&
++ (eNewPowerState != PVR_DEVICE_POWER_STATE_ON)) {
++ enum PVRSRV_ERROR eError;
++ struct PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
++ struct PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++ u32 ui32PowerCmd, ui32CompleteStatus;
++ struct SGXMKIF_COMMAND sCommand = { 0 };
++
++ eError = SGXOSTimerCancel(psDevInfo->hTimer);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "SGXPrePowerState: Failed to disable timer");
++ return eError;
++ }
++
++ if (eNewPowerState == PVR_DEVICE_POWER_STATE_OFF) {
++
++ ui32PowerCmd = PVRSRV_POWERCMD_POWEROFF;
++ ui32CompleteStatus =
++ PVRSRV_USSE_EDM_POWMAN_POWEROFF_COMPLETE;
++ PDUMPCOMMENT("SGX power off request");
++ } else {
++
++ ui32PowerCmd = PVRSRV_POWERCMD_IDLE;
++ ui32CompleteStatus =
++ PVRSRV_USSE_EDM_POWMAN_IDLE_COMPLETE;
++ PDUMPCOMMENT("SGX idle request");
++ }
++
++ sCommand.ui32Data[0] = PVRSRV_CCBFLAGS_POWERCMD;
++ sCommand.ui32Data[1] = ui32PowerCmd;
++
++ eError =
++ SGXScheduleCCBCommand(psDevInfo, SGXMKIF_COMMAND_EDM_KICK,
++ &sCommand, KERNEL_ID, 0);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "SGXPrePowerState: "
++ "Failed to submit power down command");
++ return eError;
++ }
++
++#if !defined(NO_HARDWARE)
++ if (PollForValueKM(&psDevInfo->psSGXHostCtl->ui32PowerStatus,
++ ui32CompleteStatus,
++ ui32CompleteStatus,
++ MAX_HW_TIME_US / WAIT_TRY_COUNT,
++ WAIT_TRY_COUNT) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "SGXPrePowerState: "
++ "Wait for SGX ukernel power transition failed.");
++ PVR_DBG_BREAK;
++ }
++#endif
++
++#if defined(PDUMP)
++ PDUMPCOMMENT
++ ("TA/3D CCB Control - Wait for power event on uKernel.");
++ PDUMPMEMPOL(psDevInfo->psKernelSGXHostCtlMemInfo,
++ offsetof(struct SGXMKIF_HOST_CTL, ui32PowerStatus),
++ ui32CompleteStatus, ui32CompleteStatus,
++ PDUMP_POLL_OPERATOR_EQUAL, IMG_FALSE, IMG_FALSE,
++ MAKEUNIQUETAG(psDevInfo->
++ psKernelSGXHostCtlMemInfo));
++#endif
++
++ SGXPollForClockGating(psDevInfo,
++ psDevInfo->ui32ClkGateStatusReg,
++ psDevInfo->ui32ClkGateStatusMask,
++ "Wait for SGX clock gating");
++
++ if (eNewPowerState == PVR_DEVICE_POWER_STATE_OFF) {
++ eError = SGXDeinitialise(psDevInfo);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "SGXPrePowerState: "
++ "SGXDeinitialise failed: %lu",
++ eError);
++ return eError;
++ }
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
++static enum PVRSRV_ERROR SGXPostPowerState(void *hDevHandle,
++ enum PVR_DEVICE_POWER_STATE eNewPowerState,
++ enum PVR_DEVICE_POWER_STATE eCurrentPowerState)
++{
++ if ((eNewPowerState != eCurrentPowerState) &&
++ (eCurrentPowerState != PVR_DEVICE_POWER_STATE_ON)) {
++ enum PVRSRV_ERROR eError;
++ struct PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
++ struct PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++ struct SGXMKIF_HOST_CTL __iomem *psSGXHostCtl =
++ psDevInfo->psSGXHostCtl;
++
++ writel(0, &psSGXHostCtl->ui32PowerStatus);
++ PDUMPCOMMENT("TA/3D CCB Control - Reset power status");
++#if defined(PDUMP)
++ PDUMPMEM(NULL, psDevInfo->psKernelSGXHostCtlMemInfo,
++ offsetof(struct SGXMKIF_HOST_CTL, ui32PowerStatus),
++ sizeof(u32), PDUMP_FLAGS_CONTINUOUS,
++ MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo));
++#endif
++
++ if (eCurrentPowerState == PVR_DEVICE_POWER_STATE_OFF) {
++
++ SGXGetTimingInfo(psDeviceNode);
++
++ eError = SGXInitialise(psDevInfo, IMG_FALSE);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "SGXPostPowerState: SGXInitialise failed");
++ return eError;
++ }
++ } else {
++
++ struct SGXMKIF_COMMAND sCommand = { 0 };
++
++ SGXStartTimer(psDevInfo, IMG_TRUE);
++
++ sCommand.ui32Data[0] =
++ PVRSRV_CCBFLAGS_PROCESS_QUEUESCMD;
++ eError =
++ SGXScheduleCCBCommand(psDevInfo,
++ SGXMKIF_COMMAND_EDM_KICK,
++ &sCommand, ISR_ID, 0);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "SGXPostPowerState failed to schedule CCB command: %lu",
++ eError);
++ return PVRSRV_ERROR_GENERIC;
++ }
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR SGXPrePowerStateExt(void *hDevHandle,
++ enum PVR_POWER_STATE eNewPowerState,
++ enum PVR_POWER_STATE eCurrentPowerState)
++{
++ enum PVR_DEVICE_POWER_STATE eNewDevicePowerState =
++ MapDevicePowerState(eNewPowerState);
++ enum PVR_DEVICE_POWER_STATE eCurrentDevicePowerState =
++ MapDevicePowerState(eCurrentPowerState);
++
++ return SGXPrePowerState(hDevHandle, eNewDevicePowerState,
++ eCurrentDevicePowerState);
++}
++
++enum PVRSRV_ERROR SGXPostPowerStateExt(void *hDevHandle,
++ enum PVR_POWER_STATE eNewPowerState,
++ enum PVR_POWER_STATE eCurrentPowerState)
++{
++ enum PVRSRV_ERROR eError;
++ enum PVR_DEVICE_POWER_STATE eNewDevicePowerState =
++ MapDevicePowerState(eNewPowerState);
++ enum PVR_DEVICE_POWER_STATE eCurrentDevicePowerState =
++ MapDevicePowerState(eCurrentPowerState);
++
++ eError =
++ SGXPostPowerState(hDevHandle, eNewDevicePowerState,
++ eCurrentDevicePowerState);
++ if (eError != PVRSRV_OK)
++ return eError;
++
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "SGXPostPowerState : SGX Power Transition from %d to %d OK",
++ eCurrentPowerState, eNewPowerState);
++
++ return eError;
++}
++
++enum PVRSRV_ERROR SGXPreClockSpeedChange(void *hDevHandle, IMG_BOOL bIdleDevice,
++ enum PVR_POWER_STATE eCurrentPowerState)
++{
++ enum PVRSRV_ERROR eError;
++ struct PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
++ struct PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++
++ PVR_UNREFERENCED_PARAMETER(psDevInfo);
++
++ if (eCurrentPowerState == PVRSRV_POWER_STATE_D0)
++ if (bIdleDevice) {
++ PDUMPSUSPEND();
++ eError =
++ SGXPrePowerState(hDevHandle,
++ PVR_DEVICE_POWER_STATE_IDLE,
++ PVR_DEVICE_POWER_STATE_ON);
++ if (eError != PVRSRV_OK) {
++ PDUMPRESUME();
++ return eError;
++ }
++ }
++
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "SGXPreClockSpeedChange: SGX clock speed was %luHz",
++ psDevInfo->ui32CoreClockSpeed);
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR SGXPostClockSpeedChange(void *hDevHandle,
++ IMG_BOOL bIdleDevice,
++ enum PVR_POWER_STATE eCurrentPowerState)
++{
++ struct PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
++ struct PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++ u32 ui32OldClockSpeed = psDevInfo->ui32CoreClockSpeed;
++
++ PVR_UNREFERENCED_PARAMETER(ui32OldClockSpeed);
++
++ if (eCurrentPowerState == PVRSRV_POWER_STATE_D0) {
++ SGXGetTimingInfo(psDeviceNode);
++ if (bIdleDevice) {
++ enum PVRSRV_ERROR eError;
++ eError =
++ SGXPostPowerState(hDevHandle,
++ PVR_DEVICE_POWER_STATE_ON,
++ PVR_DEVICE_POWER_STATE_IDLE);
++ PDUMPRESUME();
++ if (eError != PVRSRV_OK)
++ return eError;
++ } else {
++ SGXStartTimer(psDevInfo, IMG_TRUE);
++ }
++
++ }
++
++ PVR_DPF(PVR_DBG_MESSAGE, "SGXPostClockSpeedChange: "
++ "SGX clock speed changed from %luHz to %luHz",
++ ui32OldClockSpeed, psDevInfo->ui32CoreClockSpeed);
++
++ return PVRSRV_OK;
++}
+diff --git a/drivers/gpu/pvr/sgxreset.c b/drivers/gpu/pvr/sgxreset.c
+new file mode 100644
+index 0000000..ad76a01
+--- /dev/null
++++ b/drivers/gpu/pvr/sgxreset.c
+@@ -0,0 +1,223 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "sgxdefs.h"
++#include "sgxmmu.h"
++#include "services_headers.h"
++#include "sgxinfokm.h"
++#include "sgxconfig.h"
++
++#include "pdump_km.h"
++
++static void SGXResetSoftReset(struct PVRSRV_SGXDEV_INFO *psDevInfo,
++ IMG_BOOL bResetBIF, u32 ui32PDUMPFlags,
++ IMG_BOOL bPDump)
++{
++ u32 ui32SoftResetRegVal =
++ EUR_CR_SOFT_RESET_DPM_RESET_MASK |
++ EUR_CR_SOFT_RESET_TA_RESET_MASK |
++ EUR_CR_SOFT_RESET_USE_RESET_MASK |
++ EUR_CR_SOFT_RESET_ISP_RESET_MASK | EUR_CR_SOFT_RESET_TSP_RESET_MASK;
++
++#ifdef EUR_CR_SOFT_RESET_TWOD_RESET_MASK
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_TWOD_RESET_MASK;
++#endif
++
++#if !defined(PDUMP)
++ PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags);
++#endif
++
++ if (bResetBIF)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_BIF_RESET_MASK;
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_SOFT_RESET,
++ ui32SoftResetRegVal);
++ if (bPDump)
++ PDUMPREGWITHFLAGS(EUR_CR_SOFT_RESET, ui32SoftResetRegVal,
++ ui32PDUMPFlags);
++}
++
++static void SGXResetSleep(struct PVRSRV_SGXDEV_INFO *psDevInfo,
++ u32 ui32PDUMPFlags, IMG_BOOL bPDump)
++{
++#if !defined(PDUMP)
++ PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags);
++#endif
++
++ OSWaitus(1000 * 1000000 / psDevInfo->ui32CoreClockSpeed);
++ if (bPDump) {
++ PDUMPIDLWITHFLAGS(30, ui32PDUMPFlags);
++#if defined(PDUMP)
++ PDumpRegRead(EUR_CR_SOFT_RESET, ui32PDUMPFlags);
++#endif
++ }
++
++}
++
++static void SGXResetInvalDC(struct PVRSRV_SGXDEV_INFO *psDevInfo,
++ u32 ui32PDUMPFlags, IMG_BOOL bPDump)
++{
++ u32 ui32RegVal;
++
++ ui32RegVal = EUR_CR_BIF_CTRL_INVALDC_MASK;
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
++ if (bPDump)
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, bPDump);
++
++ ui32RegVal = 0;
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
++ if (bPDump)
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, bPDump);
++
++ if (PollForValueKM(
++ (u32 __iomem *)((u8 __iomem *)psDevInfo->pvRegsBaseKM +
++ EUR_CR_BIF_MEM_REQ_STAT),
++ 0, EUR_CR_BIF_MEM_REQ_STAT_READS_MASK,
++ MAX_HW_TIME_US / WAIT_TRY_COUNT, WAIT_TRY_COUNT) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "Wait for DC invalidate failed.");
++ PVR_DBG_BREAK;
++ }
++
++ if (bPDump)
++ PDUMPREGPOLWITHFLAGS(EUR_CR_BIF_MEM_REQ_STAT, 0,
++ EUR_CR_BIF_MEM_REQ_STAT_READS_MASK,
++ ui32PDUMPFlags);
++}
++
++void SGXReset(struct PVRSRV_SGXDEV_INFO *psDevInfo, u32 ui32PDUMPFlags)
++{
++ u32 ui32RegVal;
++
++ const u32 ui32BifFaultMask = EUR_CR_BIF_INT_STAT_FAULT_MASK;
++
++
++#ifndef PDUMP
++ PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags);
++#endif
++
++ psDevInfo->ui32NumResets++;
++
++ PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags,
++ "Start of SGX reset sequence\r\n");
++
++ SGXResetSoftReset(psDevInfo, IMG_TRUE, ui32PDUMPFlags, IMG_TRUE);
++
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++
++
++ ui32RegVal = psDevInfo->sBIFResetPDDevPAddr.uiAddr;
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_DIR_LIST_BASE0,
++ ui32RegVal);
++
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
++
++ SGXResetSoftReset(psDevInfo, IMG_FALSE, ui32PDUMPFlags, IMG_TRUE);
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
++
++ SGXResetInvalDC(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
++
++ for (;;) {
++ u32 ui32BifIntStat =
++ OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_INT_STAT);
++ struct IMG_DEV_VIRTADDR sBifFault;
++ u32 ui32PDIndex, ui32PTIndex;
++
++ if ((ui32BifIntStat & ui32BifFaultMask) == 0)
++ break;
++
++ sBifFault.uiAddr =
++ OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_FAULT);
++ PVR_DPF(PVR_DBG_WARNING, "SGXReset: Page fault 0x%x/0x%x",
++ ui32BifIntStat, sBifFault.uiAddr);
++ ui32PDIndex =
++ sBifFault.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
++ ui32PTIndex =
++ (sBifFault.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
++
++ SGXResetSoftReset(psDevInfo, IMG_TRUE, ui32PDUMPFlags,
++ IMG_FALSE);
++
++ psDevInfo->pui32BIFResetPD[ui32PDIndex] =
++ psDevInfo->sBIFResetPTDevPAddr.uiAddr |
++ SGX_MMU_PDE_PAGE_SIZE_4K | SGX_MMU_PDE_VALID;
++ psDevInfo->pui32BIFResetPT[ui32PTIndex] =
++ psDevInfo->sBIFResetPageDevPAddr.uiAddr | SGX_MMU_PTE_VALID;
++
++ ui32RegVal =
++ OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS);
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR,
++ ui32RegVal);
++ ui32RegVal =
++ OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS2);
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR2,
++ ui32RegVal);
++
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
++
++ SGXResetSoftReset(psDevInfo, IMG_FALSE, ui32PDUMPFlags,
++ IMG_FALSE);
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
++
++ SGXResetInvalDC(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
++
++ psDevInfo->pui32BIFResetPD[ui32PDIndex] = 0;
++ psDevInfo->pui32BIFResetPT[ui32PTIndex] = 0;
++ }
++
++ {
++ u32 ui32EDMDirListReg;
++
++#if (SGX_BIF_DIR_LIST_INDEX_EDM == 0)
++ ui32EDMDirListReg = EUR_CR_BIF_DIR_LIST_BASE0;
++#else
++
++ ui32EDMDirListReg =
++ EUR_CR_BIF_DIR_LIST_BASE1 +
++ 4 * (SGX_BIF_DIR_LIST_INDEX_EDM - 1);
++#endif
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32EDMDirListReg,
++ psDevInfo->sKernelPDDevPAddr.uiAddr);
++ PDUMPPDREGWITHFLAGS(ui32EDMDirListReg,
++ psDevInfo->sKernelPDDevPAddr.uiAddr,
++ ui32PDUMPFlags, PDUMP_PD_UNIQUETAG);
++ }
++
++ SGXResetInvalDC(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++
++ PVR_DPF(PVR_DBG_MESSAGE, "Soft Reset of SGX");
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++
++ ui32RegVal = 0;
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_SOFT_RESET, ui32RegVal);
++ PDUMPREGWITHFLAGS(EUR_CR_SOFT_RESET, ui32RegVal, ui32PDUMPFlags);
++
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++
++ PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "End of SGX reset sequence\r\n");
++}
+diff --git a/drivers/gpu/pvr/sgxscript.h b/drivers/gpu/pvr/sgxscript.h
+new file mode 100644
+index 0000000..2bbb0ba
+--- /dev/null
++++ b/drivers/gpu/pvr/sgxscript.h
+@@ -0,0 +1,65 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __SGXSCRIPT_H__
++#define __SGXSCRIPT_H__
++
++
++#define SGX_MAX_INIT_COMMANDS 64
++#define SGX_MAX_DEINIT_COMMANDS 16
++
++enum SGX_INIT_OPERATION {
++ SGX_INIT_OP_ILLEGAL = 0,
++ SGX_INIT_OP_WRITE_HW_REG,
++#if defined(PDUMP)
++ SGX_INIT_OP_PDUMP_HW_REG,
++#endif
++ SGX_INIT_OP_HALT
++};
++
++union SGX_INIT_COMMAND {
++ enum SGX_INIT_OPERATION eOp;
++ struct {
++ enum SGX_INIT_OPERATION eOp;
++ u32 ui32Offset;
++ u32 ui32Value;
++ } sWriteHWReg;
++#if defined(PDUMP)
++ struct {
++ enum SGX_INIT_OPERATION eOp;
++ u32 ui32Offset;
++ u32 ui32Value;
++ } sPDumpHWReg;
++#endif
++};
++
++struct SGX_INIT_SCRIPTS {
++ union SGX_INIT_COMMAND asInitCommandsPart1[SGX_MAX_INIT_COMMANDS];
++ union SGX_INIT_COMMAND asInitCommandsPart2[SGX_MAX_INIT_COMMANDS];
++ union SGX_INIT_COMMAND asDeinitCommands[SGX_MAX_DEINIT_COMMANDS];
++};
++
++#endif
+diff --git a/drivers/gpu/pvr/sgxtransfer.c b/drivers/gpu/pvr/sgxtransfer.c
+new file mode 100644
+index 0000000..6cc159e
+--- /dev/null
++++ b/drivers/gpu/pvr/sgxtransfer.c
+@@ -0,0 +1,290 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++
++#include <stddef.h>
++
++#include "sgxdefs.h"
++#include "services_headers.h"
++#include "buffer_manager.h"
++#include "sgxinfo.h"
++#include "sysconfig.h"
++#include "pdump_km.h"
++#include "mmu.h"
++#include "pvr_bridge.h"
++#include "sgx_bridge_km.h"
++#include "sgxinfokm.h"
++#include "osfunc.h"
++#include "pvr_debug.h"
++#include "sgxutils.h"
++
++enum PVRSRV_ERROR SGXSubmitTransferKM(void *hDevHandle,
++ struct PVRSRV_TRANSFER_SGX_KICK *psKick)
++{
++ struct PVRSRV_KERNEL_MEM_INFO *psCCBMemInfo =
++ (struct PVRSRV_KERNEL_MEM_INFO *)psKick->hCCBMemInfo;
++ struct SGXMKIF_COMMAND sCommand = { 0 };
++ struct SGXMKIF_TRANSFERCMD_SHARED *psSharedTransferCmd;
++ struct PVRSRV_KERNEL_SYNC_INFO *psSyncInfo;
++ enum PVRSRV_ERROR eError;
++
++ if (!CCB_OFFSET_IS_VALID
++ (struct SGXMKIF_TRANSFERCMD_SHARED, psCCBMemInfo, psKick,
++ ui32SharedCmdCCBOffset)) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "SGXSubmitTransferKM: Invalid CCB offset");
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psSharedTransferCmd =
++ CCB_DATA_FROM_OFFSET(struct SGXMKIF_TRANSFERCMD_SHARED,
++ psCCBMemInfo, psKick, ui32SharedCmdCCBOffset);
++
++ if (psKick->hTASyncInfo != NULL) {
++ psSyncInfo = (struct PVRSRV_KERNEL_SYNC_INFO *)
++ psKick->hTASyncInfo;
++
++ psSharedTransferCmd->ui32TASyncWriteOpsPendingVal =
++ psSyncInfo->psSyncData->ui32WriteOpsPending++;
++ psSharedTransferCmd->ui32TASyncReadOpsPendingVal =
++ psSyncInfo->psSyncData->ui32ReadOpsPending;
++
++ psSharedTransferCmd->sTASyncWriteOpsCompleteDevVAddr =
++ psSyncInfo->sWriteOpsCompleteDevVAddr;
++ psSharedTransferCmd->sTASyncReadOpsCompleteDevVAddr =
++ psSyncInfo->sReadOpsCompleteDevVAddr;
++ } else {
++ psSharedTransferCmd->sTASyncWriteOpsCompleteDevVAddr.uiAddr = 0;
++ psSharedTransferCmd->sTASyncReadOpsCompleteDevVAddr.uiAddr = 0;
++ }
++
++ if (psKick->h3DSyncInfo != NULL) {
++ psSyncInfo = (struct PVRSRV_KERNEL_SYNC_INFO *)
++ psKick->h3DSyncInfo;
++
++ psSharedTransferCmd->ui323DSyncWriteOpsPendingVal =
++ psSyncInfo->psSyncData->ui32WriteOpsPending++;
++ psSharedTransferCmd->ui323DSyncReadOpsPendingVal =
++ psSyncInfo->psSyncData->ui32ReadOpsPending;
++
++ psSharedTransferCmd->s3DSyncWriteOpsCompleteDevVAddr =
++ psSyncInfo->sWriteOpsCompleteDevVAddr;
++ psSharedTransferCmd->s3DSyncReadOpsCompleteDevVAddr =
++ psSyncInfo->sReadOpsCompleteDevVAddr;
++ } else {
++ psSharedTransferCmd->s3DSyncWriteOpsCompleteDevVAddr.uiAddr = 0;
++ psSharedTransferCmd->s3DSyncReadOpsCompleteDevVAddr.uiAddr = 0;
++ }
++
++ if ((psKick->ui32Flags & SGXMKIF_TQFLAGS_KEEPPENDING) == 0UL) {
++ if (psKick->ui32NumSrcSync > 0) {
++ psSyncInfo =
++ (struct PVRSRV_KERNEL_SYNC_INFO *)
++ psKick->ahSrcSyncInfo[0];
++
++ psSharedTransferCmd->ui32SrcWriteOpPendingVal =
++ psSyncInfo->psSyncData->ui32WriteOpsPending;
++ psSharedTransferCmd->ui32SrcReadOpPendingVal =
++ psSyncInfo->psSyncData->ui32ReadOpsPending;
++
++ psSharedTransferCmd->sSrcWriteOpsCompleteDevAddr =
++ psSyncInfo->sWriteOpsCompleteDevVAddr;
++ psSharedTransferCmd->sSrcReadOpsCompleteDevAddr =
++ psSyncInfo->sReadOpsCompleteDevVAddr;
++ }
++ if (psKick->ui32NumDstSync > 0) {
++ psSyncInfo = (struct PVRSRV_KERNEL_SYNC_INFO *)
++ psKick->ahDstSyncInfo[0];
++ psSharedTransferCmd->ui32DstWriteOpPendingVal =
++ psSyncInfo->psSyncData->ui32WriteOpsPending;
++ psSharedTransferCmd->ui32DstReadOpPendingVal =
++ psSyncInfo->psSyncData->ui32ReadOpsPending;
++ psSharedTransferCmd->sDstWriteOpsCompleteDevAddr =
++ psSyncInfo->sWriteOpsCompleteDevVAddr;
++ psSharedTransferCmd->sDstReadOpsCompleteDevAddr =
++ psSyncInfo->sReadOpsCompleteDevVAddr;
++ }
++
++ if (psKick->ui32NumSrcSync > 0) {
++ psSyncInfo = (struct PVRSRV_KERNEL_SYNC_INFO *)
++ psKick->ahSrcSyncInfo[0];
++ psSyncInfo->psSyncData->ui32ReadOpsPending++;
++
++ }
++ if (psKick->ui32NumDstSync > 0) {
++ psSyncInfo =
++ (struct PVRSRV_KERNEL_SYNC_INFO *)psKick->
++ ahDstSyncInfo[0];
++ psSyncInfo->psSyncData->ui32WriteOpsPending++;
++ }
++ }
++
++ if (psKick->ui32NumDstSync > 1 || psKick->ui32NumSrcSync > 1) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "Transfer command doesn't support "
++ "more than 1 sync object per src/dst\ndst: %d, src: %d",
++ psKick->ui32NumDstSync, psKick->ui32NumSrcSync);
++ }
++#if defined(PDUMP)
++ if (PDumpIsCaptureFrameKM() ||
++ ((psKick->ui32PDumpFlags & PDUMP_FLAGS_CONTINUOUS) != 0)) {
++ PDUMPCOMMENT("Shared part of transfer command\r\n");
++ PDUMPMEM(psSharedTransferCmd,
++ psCCBMemInfo,
++ psKick->ui32CCBDumpWOff,
++ sizeof(struct SGXMKIF_TRANSFERCMD_SHARED),
++ psKick->ui32PDumpFlags, MAKEUNIQUETAG(psCCBMemInfo));
++
++ if ((psKick->ui32NumSrcSync > 0) &&
++ ((psKick->ui32Flags & SGXMKIF_TQFLAGS_KEEPPENDING) ==
++ 0UL)) {
++ psSyncInfo = psKick->ahSrcSyncInfo[0];
++
++ PDUMPCOMMENT
++ ("Hack src surface write op in transfer cmd\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++ psCCBMemInfo,
++ psKick->ui32CCBDumpWOff +
++ offsetof(struct SGXMKIF_TRANSFERCMD_SHARED,
++ ui32SrcWriteOpPendingVal),
++ sizeof(psSyncInfo->psSyncData->
++ ui32LastOpDumpVal),
++ psKick->ui32PDumpFlags,
++ MAKEUNIQUETAG(psCCBMemInfo));
++
++ PDUMPCOMMENT
++ ("Hack src surface read op in transfer cmd\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
++ psCCBMemInfo,
++ psKick->ui32CCBDumpWOff +
++ offsetof(struct SGXMKIF_TRANSFERCMD_SHARED,
++ ui32SrcReadOpPendingVal),
++ sizeof(psSyncInfo->psSyncData->
++ ui32LastReadOpDumpVal),
++ psKick->ui32PDumpFlags,
++ MAKEUNIQUETAG(psCCBMemInfo));
++ }
++ if ((psKick->ui32NumDstSync > 0) &&
++ ((psKick->ui32Flags & SGXMKIF_TQFLAGS_KEEPPENDING) ==
++ 0UL)) {
++ psSyncInfo = psKick->ahDstSyncInfo[0];
++
++ PDUMPCOMMENT
++ ("Hack dest surface write op in transfer cmd\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++ psCCBMemInfo,
++ psKick->ui32CCBDumpWOff +
++ offsetof(struct SGXMKIF_TRANSFERCMD_SHARED,
++ ui32DstWriteOpPendingVal),
++ sizeof(psSyncInfo->psSyncData->
++ ui32LastOpDumpVal),
++ psKick->ui32PDumpFlags,
++ MAKEUNIQUETAG(psCCBMemInfo));
++
++ PDUMPCOMMENT
++ ("Hack dest surface read op in transfer cmd\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
++ psCCBMemInfo,
++ psKick->ui32CCBDumpWOff +
++ offsetof(struct SGXMKIF_TRANSFERCMD_SHARED,
++ ui32DstReadOpPendingVal),
++ sizeof(psSyncInfo->psSyncData->
++ ui32LastReadOpDumpVal),
++ psKick->ui32PDumpFlags,
++ MAKEUNIQUETAG(psCCBMemInfo));
++ }
++
++ if ((psKick->ui32NumSrcSync > 0) &&
++ ((psKick->ui32Flags & SGXMKIF_TQFLAGS_KEEPPENDING) ==
++ 0UL)) {
++ psSyncInfo =
++ (struct PVRSRV_KERNEL_SYNC_INFO *)psKick->
++ ahSrcSyncInfo[0];
++ psSyncInfo->psSyncData->ui32LastReadOpDumpVal++;
++
++ }
++
++ if ((psKick->ui32NumDstSync > 0) &&
++ ((psKick->ui32Flags & SGXMKIF_TQFLAGS_KEEPPENDING) ==
++ 0UL)) {
++ psSyncInfo =
++ (struct PVRSRV_KERNEL_SYNC_INFO *)psKick->
++ ahDstSyncInfo[0];
++ psSyncInfo->psSyncData->ui32LastOpDumpVal++;
++ }
++ }
++#endif
++
++ sCommand.ui32Data[0] = PVRSRV_CCBFLAGS_TRANSFERCMD;
++ sCommand.ui32Data[1] = psKick->sHWTransferContextDevVAddr.uiAddr;
++
++ eError = SGXScheduleCCBCommandKM(hDevHandle, SGXMKIF_COMMAND_EDM_KICK,
++ &sCommand, KERNEL_ID,
++ psKick->ui32PDumpFlags);
++
++#if defined(NO_HARDWARE)
++ if (!(psKick->ui32Flags & SGXMKIF_TQFLAGS_NOSYNCUPDATE)) {
++ u32 i;
++
++ for (i = 0; i < psKick->ui32NumSrcSync; i++) {
++ psSyncInfo =
++ (struct PVRSRV_KERNEL_SYNC_INFO *)psKick->
++ ahSrcSyncInfo[i];
++ psSyncInfo->psSyncData->ui32ReadOpsComplete =
++ psSyncInfo->psSyncData->ui32ReadOpsPending;
++ }
++
++ for (i = 0; i < psKick->ui32NumDstSync; i++) {
++ psSyncInfo =
++ (struct PVRSRV_KERNEL_SYNC_INFO *)psKick->
++ ahDstSyncInfo[i];
++ psSyncInfo->psSyncData->ui32WriteOpsComplete =
++ psSyncInfo->psSyncData->ui32WriteOpsPending;
++
++ }
++
++ if (psKick->hTASyncInfo != NULL) {
++ psSyncInfo =
++ (struct PVRSRV_KERNEL_SYNC_INFO *)psKick->
++ hTASyncInfo;
++
++ psSyncInfo->psSyncData->ui32WriteOpsComplete =
++ psSyncInfo->psSyncData->ui32WriteOpsPending;
++ }
++
++ if (psKick->h3DSyncInfo != NULL) {
++ psSyncInfo = (struct PVRSRV_KERNEL_SYNC_INFO *)psKick->
++ h3DSyncInfo;
++
++ psSyncInfo->psSyncData->ui32WriteOpsComplete =
++ psSyncInfo->psSyncData->ui32WriteOpsPending;
++ }
++ }
++#endif
++
++ return eError;
++}
++
+diff --git a/drivers/gpu/pvr/sgxutils.c b/drivers/gpu/pvr/sgxutils.c
+new file mode 100644
+index 0000000..feb3504
+--- /dev/null
++++ b/drivers/gpu/pvr/sgxutils.c
+@@ -0,0 +1,750 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <stddef.h>
++
++#include "sgxdefs.h"
++#include "services_headers.h"
++#include "buffer_manager.h"
++#include "sgxapi_km.h"
++#include "sgxinfo.h"
++#include "sgxinfokm.h"
++#include "sysconfig.h"
++#include "pdump_km.h"
++#include "mmu.h"
++#include "pvr_bridge_km.h"
++#include "sgx_bridge_km.h"
++#include "osfunc.h"
++#include "pvr_debug.h"
++#include "sgxutils.h"
++
++#include <linux/tty.h>
++#include <linux/io.h>
++
++static void SGXPostActivePowerEvent(struct PVRSRV_DEVICE_NODE *psDeviceNode,
++ u32 ui32CallerID)
++{
++ struct PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++ struct SGXMKIF_HOST_CTL __iomem *psSGXHostCtl =
++ psDevInfo->psSGXHostCtl;
++ u32 l;
++
++ l = readl(&psSGXHostCtl->ui32NumActivePowerEvents);
++ l++;
++ writel(l, &psSGXHostCtl->ui32NumActivePowerEvents);
++
++ l = readl(&psSGXHostCtl->ui32PowerStatus);
++ if (l & PVRSRV_USSE_EDM_POWMAN_POWEROFF_RESTART_IMMEDIATE) {
++ if (ui32CallerID == ISR_ID)
++ psDeviceNode->bReProcessDeviceCommandComplete =
++ IMG_TRUE;
++ else
++ SGXScheduleProcessQueuesKM(psDeviceNode);
++ }
++}
++
++void SGXTestActivePowerEvent(struct PVRSRV_DEVICE_NODE *psDeviceNode,
++ u32 ui32CallerID)
++{
++ enum PVRSRV_ERROR eError = PVRSRV_OK;
++ struct PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++ struct SGXMKIF_HOST_CTL __iomem *psSGXHostCtl = psDevInfo->psSGXHostCtl;
++ u32 l;
++
++ l = readl(&psSGXHostCtl->ui32InterruptFlags);
++ if (!(l & PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER))
++ return;
++
++ l = readl(&psSGXHostCtl->ui32InterruptClearFlags);
++ if (l & PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER)
++ return;
++
++ /* Microkernel is idle and is requesting to be powered down. */
++ l = readl(&psSGXHostCtl->ui32InterruptClearFlags);
++ l |= PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER;
++ writel(l, &psSGXHostCtl->ui32InterruptClearFlags);
++
++ PDUMPSUSPEND();
++
++ eError = PVRSRVSetDevicePowerStateKM(psDeviceNode->sDevId.
++ ui32DeviceIndex,
++ PVRSRV_POWER_STATE_D3,
++ ui32CallerID, IMG_FALSE);
++ if (eError == PVRSRV_OK)
++ SGXPostActivePowerEvent(psDeviceNode, ui32CallerID);
++ if (eError == PVRSRV_ERROR_RETRY) {
++ l = readl(&psSGXHostCtl->ui32InterruptClearFlags);
++ l &= ~PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER;
++ writel(l, &psSGXHostCtl->ui32InterruptClearFlags);
++ eError = PVRSRV_OK;
++ }
++
++ PDUMPRESUME();
++
++ if (eError != PVRSRV_OK)
++ PVR_DPF(PVR_DBG_ERROR, "SGXTestActivePowerEvent error:%lu",
++ eError);
++}
++
++static inline struct SGXMKIF_COMMAND *SGXAcquireKernelCCBSlot(
++ struct PVRSRV_SGX_CCB_INFO *psCCB)
++{
++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) {
++ if (((*psCCB->pui32WriteOffset + 1) & 255) !=
++ *psCCB->pui32ReadOffset) {
++ return &psCCB->psCommands[*psCCB->pui32WriteOffset];
++ }
++ }
++ END_LOOP_UNTIL_TIMEOUT();
++
++ return NULL;
++}
++
++enum PVRSRV_ERROR SGXScheduleCCBCommand(struct PVRSRV_SGXDEV_INFO *psDevInfo,
++ enum SGXMKIF_COMMAND_TYPE eCommandType,
++ struct SGXMKIF_COMMAND *psCommandData,
++ u32 ui32CallerID, u32 ui32PDumpFlags)
++{
++ struct PVRSRV_SGX_CCB_INFO *psKernelCCB;
++ enum PVRSRV_ERROR eError = PVRSRV_OK;
++ struct SGXMKIF_COMMAND *psSGXCommand;
++#if defined(PDUMP)
++ void *pvDumpCommand;
++#else
++ PVR_UNREFERENCED_PARAMETER(ui32CallerID);
++ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
++#endif
++
++ psKernelCCB = psDevInfo->psKernelCCBInfo;
++
++ psSGXCommand = SGXAcquireKernelCCBSlot(psKernelCCB);
++
++ if (!psSGXCommand) {
++ eError = PVRSRV_ERROR_TIMEOUT;
++ goto Exit;
++ }
++
++ psCommandData->ui32Data[2] = psDevInfo->ui32CacheControl;
++
++#if defined(PDUMP)
++
++ psDevInfo->sPDContext.ui32CacheControl |= psDevInfo->ui32CacheControl;
++#endif
++
++ psDevInfo->ui32CacheControl = 0;
++
++ *psSGXCommand = *psCommandData;
++
++ switch (eCommandType) {
++ case SGXMKIF_COMMAND_EDM_KICK:
++ psSGXCommand->ui32ServiceAddress =
++ psDevInfo->ui32HostKickAddress;
++ break;
++ case SGXMKIF_COMMAND_REQUEST_SGXMISCINFO:
++ psSGXCommand->ui32ServiceAddress =
++ psDevInfo->ui32GetMiscInfoAddress;
++ break;
++ case SGXMKIF_COMMAND_VIDEO_KICK:
++ default:
++ PVR_DPF(PVR_DBG_ERROR,
++ "SGXScheduleCCBCommandKM: Unknown command type: %d",
++ eCommandType);
++ eError = PVRSRV_ERROR_GENERIC;
++ goto Exit;
++ }
++
++#if defined(PDUMP)
++ if (ui32CallerID != ISR_ID) {
++ PDUMPCOMMENTWITHFLAGS(0,
++ "Poll for space in the Kernel CCB\r\n");
++ PDUMPMEMPOL(psKernelCCB->psCCBCtlMemInfo,
++ offsetof(struct PVRSRV_SGX_CCB_CTL, ui32ReadOffset),
++ (psKernelCCB->ui32CCBDumpWOff + 1) & 0xff, 0xff,
++ PDUMP_POLL_OPERATOR_NOTEQUAL, IMG_FALSE, IMG_FALSE,
++ MAKEUNIQUETAG(psKernelCCB->psCCBCtlMemInfo));
++
++ PDUMPCOMMENTWITHFLAGS(0, "Kernel CCB command\r\n");
++ pvDumpCommand =
++ (void *)((u8 *)psKernelCCB->psCCBMemInfo->
++ pvLinAddrKM +
++ (*psKernelCCB->pui32WriteOffset *
++ sizeof(struct SGXMKIF_COMMAND)));
++
++ PDUMPMEM(pvDumpCommand,
++ psKernelCCB->psCCBMemInfo,
++ psKernelCCB->ui32CCBDumpWOff *
++ sizeof(struct SGXMKIF_COMMAND),
++ sizeof(struct SGXMKIF_COMMAND), ui32PDumpFlags,
++ MAKEUNIQUETAG(psKernelCCB->psCCBMemInfo));
++
++ PDUMPMEM(&psDevInfo->sPDContext.ui32CacheControl,
++ psKernelCCB->psCCBMemInfo,
++ psKernelCCB->ui32CCBDumpWOff *
++ sizeof(struct SGXMKIF_COMMAND) +
++ offsetof(struct SGXMKIF_COMMAND, ui32Data[2]),
++ sizeof(u32), ui32PDumpFlags,
++ MAKEUNIQUETAG(psKernelCCB->psCCBMemInfo));
++
++ if (PDumpIsCaptureFrameKM() ||
++ ((ui32PDumpFlags & PDUMP_FLAGS_CONTINUOUS) != 0))
++ psDevInfo->sPDContext.ui32CacheControl = 0;
++ }
++#endif
++ *psKernelCCB->pui32WriteOffset =
++ (*psKernelCCB->pui32WriteOffset + 1) & 255;
++
++#if defined(PDUMP)
++ if (ui32CallerID != ISR_ID) {
++ if (PDumpIsCaptureFrameKM() ||
++ ((ui32PDumpFlags & PDUMP_FLAGS_CONTINUOUS) != 0))
++ psKernelCCB->ui32CCBDumpWOff =
++ (psKernelCCB->ui32CCBDumpWOff + 1) & 0xFF;
++
++ PDUMPCOMMENTWITHFLAGS(0, "Kernel CCB write offset\r\n");
++ PDUMPMEM(&psKernelCCB->ui32CCBDumpWOff,
++ psKernelCCB->psCCBCtlMemInfo,
++ offsetof(struct PVRSRV_SGX_CCB_CTL, ui32WriteOffset),
++ sizeof(u32), ui32PDumpFlags,
++ MAKEUNIQUETAG(psKernelCCB->psCCBCtlMemInfo));
++ PDUMPCOMMENTWITHFLAGS(0, "Kernel CCB event kicker\r\n");
++ PDUMPMEM(&psKernelCCB->ui32CCBDumpWOff,
++ psDevInfo->psKernelCCBEventKickerMemInfo, 0,
++ sizeof(u32), ui32PDumpFlags,
++ MAKEUNIQUETAG(psDevInfo->
++ psKernelCCBEventKickerMemInfo));
++ PDUMPCOMMENTWITHFLAGS(0, "Event kick\r\n");
++ PDUMPREGWITHFLAGS(SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK, 0),
++ EUR_CR_EVENT_KICK_NOW_MASK, 0);
++ }
++#endif
++ *psDevInfo->pui32KernelCCBEventKicker =
++ (*psDevInfo->pui32KernelCCBEventKicker + 1) & 0xFF;
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM,
++ SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK, 0),
++ EUR_CR_EVENT_KICK_NOW_MASK);
++
++#if defined(NO_HARDWARE)
++
++ *psKernelCCB->pui32ReadOffset =
++ (*psKernelCCB->pui32ReadOffset + 1) & 255;
++#endif
++
++Exit:
++ return eError;
++}
++
++enum PVRSRV_ERROR SGXScheduleCCBCommandKM(
++ struct PVRSRV_DEVICE_NODE *psDeviceNode,
++ enum SGXMKIF_COMMAND_TYPE eCommandType,
++ struct SGXMKIF_COMMAND *psCommandData,
++ u32 ui32CallerID, u32 ui32PDumpFlags)
++{
++ enum PVRSRV_ERROR eError;
++ struct PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++
++ PDUMPSUSPEND();
++
++ eError =
++ PVRSRVSetDevicePowerStateKM(psDeviceNode->sDevId.ui32DeviceIndex,
++ PVRSRV_POWER_STATE_D0, ui32CallerID,
++ IMG_TRUE);
++
++ PDUMPRESUME();
++
++ if (eError == PVRSRV_OK) {
++ psDeviceNode->bReProcessDeviceCommandComplete = IMG_FALSE;
++ } else {
++ if (eError == PVRSRV_ERROR_RETRY) {
++ if (ui32CallerID == ISR_ID) {
++ psDeviceNode->bReProcessDeviceCommandComplete =
++ IMG_TRUE;
++ eError = PVRSRV_OK;
++ } else {
++
++ }
++ } else
++ PVR_DPF(PVR_DBG_ERROR, "SGXScheduleCCBCommandKM "
++ "failed to acquire lock - "
++ "ui32CallerID:%ld eError:%lu",
++ ui32CallerID, eError);
++ return eError;
++ }
++
++ eError = SGXScheduleCCBCommand(psDevInfo, eCommandType, psCommandData,
++ ui32CallerID, ui32PDumpFlags);
++
++ PVRSRVPowerUnlock(ui32CallerID);
++
++ if (ui32CallerID != ISR_ID)
++ SGXTestActivePowerEvent(psDeviceNode, ui32CallerID);
++
++ return eError;
++}
++
++enum PVRSRV_ERROR SGXScheduleProcessQueuesKM(struct PVRSRV_DEVICE_NODE
++ *psDeviceNode)
++{
++ enum PVRSRV_ERROR eError;
++ struct PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++ struct SGXMKIF_HOST_CTL *psHostCtl =
++ psDevInfo->psKernelSGXHostCtlMemInfo->pvLinAddrKM;
++ u32 ui32PowerStatus;
++ struct SGXMKIF_COMMAND sCommand = { 0 };
++
++ ui32PowerStatus = psHostCtl->ui32PowerStatus;
++ if ((ui32PowerStatus & PVRSRV_USSE_EDM_POWMAN_NO_WORK) != 0)
++ return PVRSRV_OK;
++
++ sCommand.ui32Data[0] = PVRSRV_CCBFLAGS_PROCESS_QUEUESCMD;
++ eError =
++ SGXScheduleCCBCommandKM(psDeviceNode, SGXMKIF_COMMAND_EDM_KICK,
++ &sCommand, ISR_ID, 0);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "SGXScheduleProcessQueuesKM failed "
++ "to schedule CCB command: %lu",
++ eError);
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ return PVRSRV_OK;
++}
++
++IMG_BOOL SGXIsDevicePowered(struct PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ return PVRSRVIsDevicePowered(psDeviceNode->sDevId.ui32DeviceIndex);
++}
++
++enum PVRSRV_ERROR SGXGetInternalDevInfoKM(void *hDevCookie,
++ struct SGX_INTERNAL_DEVINFO
++ *psSGXInternalDevInfo)
++{
++ struct PVRSRV_SGXDEV_INFO *psDevInfo = (struct PVRSRV_SGXDEV_INFO *)
++ ((struct PVRSRV_DEVICE_NODE *)hDevCookie)->pvDevice;
++
++ psSGXInternalDevInfo->ui32Flags = psDevInfo->ui32Flags;
++ psSGXInternalDevInfo->bForcePTOff = (IMG_BOOL)psDevInfo->bForcePTOff;
++
++ psSGXInternalDevInfo->hHostCtlKernelMemInfoHandle =
++ (void *)psDevInfo->psKernelSGXHostCtlMemInfo;
++
++ return PVRSRV_OK;
++}
++
++#if defined(PDUMP) && !defined(EDM_USSE_HWDEBUG)
++#define PDUMP_SGX_CLEANUP
++#endif
++
++void SGXCleanupRequest(struct PVRSRV_DEVICE_NODE *psDeviceNode,
++ struct IMG_DEV_VIRTADDR *psHWDataDevVAddr,
++ u32 ui32ResManRequestFlag)
++{
++ struct PVRSRV_SGXDEV_INFO *psSGXDevInfo =
++ (struct PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
++ struct PVRSRV_KERNEL_MEM_INFO *psSGXHostCtlMemInfo =
++ psSGXDevInfo->psKernelSGXHostCtlMemInfo;
++ struct SGXMKIF_HOST_CTL __iomem *psSGXHostCtl =
++ (struct SGXMKIF_HOST_CTL __iomem __force *)
++ psSGXHostCtlMemInfo->pvLinAddrKM;
++#if defined(PDUMP_SGX_CLEANUP)
++ void *hUniqueTag = MAKEUNIQUETAG(psSGXHostCtlMemInfo);
++#endif
++ u32 l;
++
++ if (readl(&psSGXHostCtl->ui32PowerStatus) &
++ PVRSRV_USSE_EDM_POWMAN_NO_WORK) {
++ ;
++ } else {
++ if (psSGXDevInfo->ui32CacheControl &
++ SGX_BIF_INVALIDATE_PDCACHE) {
++ l = readl(&psSGXHostCtl->ui32ResManFlags);
++ l |= PVRSRV_USSE_EDM_RESMAN_CLEANUP_INVALPD;
++ writel(l, &psSGXHostCtl->ui32ResManFlags);
++
++ psSGXDevInfo->ui32CacheControl ^=
++ SGX_BIF_INVALIDATE_PDCACHE;
++ }
++ if (psSGXDevInfo->ui32CacheControl &
++ SGX_BIF_INVALIDATE_PTCACHE) {
++ l = readl(&psSGXHostCtl->ui32ResManFlags);
++ l |= PVRSRV_USSE_EDM_RESMAN_CLEANUP_INVALPT;
++ writel(l, &psSGXHostCtl->ui32ResManFlags);
++
++ psSGXDevInfo->ui32CacheControl ^=
++ SGX_BIF_INVALIDATE_PTCACHE;
++ }
++
++ if (psHWDataDevVAddr == NULL)
++ writel(0, &psSGXHostCtl->sResManCleanupData.uiAddr);
++ else
++ writel(psHWDataDevVAddr->uiAddr,
++ &psSGXHostCtl->sResManCleanupData.uiAddr);
++
++ l = readl(&psSGXHostCtl->ui32ResManFlags);
++ l |= ui32ResManRequestFlag;
++ writel(l, &psSGXHostCtl->ui32ResManFlags);
++
++#if defined(PDUMP_SGX_CLEANUP)
++
++ PDUMPCOMMENTWITHFLAGS(0,
++ "TA/3D CCB Control - Request clean-up event on uKernel...");
++ PDUMPMEM(NULL, psSGXHostCtlMemInfo,
++ offsetof(struct SGXMKIF_HOST_CTL,
++ sResManCleanupData.uiAddr), sizeof(u32), 0,
++ hUniqueTag);
++ PDUMPMEM(&ui32ResManRequestFlag, psSGXHostCtlMemInfo,
++ offsetof(struct SGXMKIF_HOST_CTL, ui32ResManFlags),
++ sizeof(u32), 0, hUniqueTag);
++#else
++ PDUMPCOMMENTWITHFLAGS(0, "Clean-up event on uKernel disabled");
++#endif
++
++ SGXScheduleProcessQueuesKM(psDeviceNode);
++
++#if !defined(NO_HARDWARE)
++ if (PollForValueKM(&psSGXHostCtl->ui32ResManFlags,
++ PVRSRV_USSE_EDM_RESMAN_CLEANUP_COMPLETE,
++ PVRSRV_USSE_EDM_RESMAN_CLEANUP_COMPLETE,
++ MAX_HW_TIME_US / WAIT_TRY_COUNT,
++ WAIT_TRY_COUNT) != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "SGXCleanupRequest: "
++ "Wait for uKernel to clean up failed");
++ PVR_DBG_BREAK;
++ }
++#endif
++
++#if defined(PDUMP_SGX_CLEANUP)
++
++ PDUMPCOMMENTWITHFLAGS(0, "TA/3D CCB Control - "
++ "Wait for clean-up request to complete...");
++ PDUMPMEMPOL(psSGXHostCtlMemInfo,
++ offsetof(struct SGXMKIF_HOST_CTL, ui32ResManFlags),
++ PVRSRV_USSE_EDM_RESMAN_CLEANUP_COMPLETE,
++ PVRSRV_USSE_EDM_RESMAN_CLEANUP_COMPLETE,
++ PDUMP_POLL_OPERATOR_EQUAL, IMG_FALSE, IMG_FALSE,
++ hUniqueTag);
++#endif
++
++ l = readl(&psSGXHostCtl->ui32ResManFlags);
++ l &= ~ui32ResManRequestFlag;
++ writel(l, &psSGXHostCtl->ui32ResManFlags);
++
++ l = readl(&psSGXHostCtl->ui32ResManFlags);
++ l &= ~PVRSRV_USSE_EDM_RESMAN_CLEANUP_COMPLETE;
++ writel(l, &psSGXHostCtl->ui32ResManFlags);
++
++#if defined(PDUMP_SGX_CLEANUP)
++ PDUMPMEM(NULL, psSGXHostCtlMemInfo,
++ offsetof(struct SGXMKIF_HOST_CTL, ui32ResManFlags),
++ sizeof(u32), 0, hUniqueTag);
++#endif
++ }
++}
++
++struct SGX_HW_RENDER_CONTEXT_CLEANUP {
++ struct PVRSRV_DEVICE_NODE *psDeviceNode;
++ struct IMG_DEV_VIRTADDR sHWRenderContextDevVAddr;
++ void *hBlockAlloc;
++ struct RESMAN_ITEM *psResItem;
++};
++
++static enum PVRSRV_ERROR SGXCleanupHWRenderContextCallback(void *pvParam,
++ u32 ui32Param)
++{
++ struct SGX_HW_RENDER_CONTEXT_CLEANUP *psCleanup = pvParam;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++ SGXCleanupRequest(psCleanup->psDeviceNode,
++ &psCleanup->sHWRenderContextDevVAddr,
++ PVRSRV_USSE_EDM_RESMAN_CLEANUP_RC_REQUEST);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(struct SGX_HW_RENDER_CONTEXT_CLEANUP),
++ psCleanup, psCleanup->hBlockAlloc);
++
++ return PVRSRV_OK;
++}
++
++struct SGX_HW_TRANSFER_CONTEXT_CLEANUP {
++ struct PVRSRV_DEVICE_NODE *psDeviceNode;
++ struct IMG_DEV_VIRTADDR sHWTransferContextDevVAddr;
++ void *hBlockAlloc;
++ struct RESMAN_ITEM *psResItem;
++};
++
++static enum PVRSRV_ERROR SGXCleanupHWTransferContextCallback(void *pvParam,
++ u32 ui32Param)
++{
++ struct SGX_HW_TRANSFER_CONTEXT_CLEANUP *psCleanup =
++ (struct SGX_HW_TRANSFER_CONTEXT_CLEANUP *)pvParam;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++ SGXCleanupRequest(psCleanup->psDeviceNode,
++ &psCleanup->sHWTransferContextDevVAddr,
++ PVRSRV_USSE_EDM_RESMAN_CLEANUP_TC_REQUEST);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(struct SGX_HW_TRANSFER_CONTEXT_CLEANUP),
++ psCleanup, psCleanup->hBlockAlloc);
++
++ return PVRSRV_OK;
++}
++
++void *SGXRegisterHWRenderContextKM(void *psDeviceNode,
++ struct IMG_DEV_VIRTADDR *psHWRenderContextDevVAddr,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ enum PVRSRV_ERROR eError;
++ void *hBlockAlloc;
++ struct SGX_HW_RENDER_CONTEXT_CLEANUP *psCleanup;
++ struct RESMAN_ITEM *psResItem;
++
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(struct SGX_HW_RENDER_CONTEXT_CLEANUP),
++ (void **) &psCleanup, &hBlockAlloc);
++
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "SGXRegisterHWRenderContextKM: "
++ "Couldn't allocate memory for struct "
++ "SGX_HW_RENDER_CONTEXT_CLEANUP structure");
++ return NULL;
++ }
++
++ psCleanup->hBlockAlloc = hBlockAlloc;
++ psCleanup->psDeviceNode = psDeviceNode;
++ psCleanup->sHWRenderContextDevVAddr = *psHWRenderContextDevVAddr;
++
++ psResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_HW_RENDER_CONTEXT,
++ (void *) psCleanup,
++ 0, &SGXCleanupHWRenderContextCallback);
++
++ if (psResItem == NULL) {
++ PVR_DPF(PVR_DBG_ERROR, "SGXRegisterHWRenderContextKM: "
++ "ResManRegisterRes failed");
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(struct SGX_HW_RENDER_CONTEXT_CLEANUP),
++ psCleanup, psCleanup->hBlockAlloc);
++
++ return NULL;
++ }
++
++ psCleanup->psResItem = psResItem;
++
++ return (void *)psCleanup;
++}
++
++enum PVRSRV_ERROR SGXUnregisterHWRenderContextKM(void *hHWRenderContext)
++{
++ struct SGX_HW_RENDER_CONTEXT_CLEANUP *psCleanup;
++
++ PVR_ASSERT(hHWRenderContext != NULL);
++
++ psCleanup = (struct SGX_HW_RENDER_CONTEXT_CLEANUP *)hHWRenderContext;
++
++ if (psCleanup == NULL) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "SGXUnregisterHWRenderContextKM: invalid parameter");
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ ResManFreeResByPtr(psCleanup->psResItem);
++
++ return PVRSRV_OK;
++}
++
++void *SGXRegisterHWTransferContextKM(void *psDeviceNode,
++ struct IMG_DEV_VIRTADDR *psHWTransferContextDevVAddr,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ enum PVRSRV_ERROR eError;
++ void *hBlockAlloc;
++ struct SGX_HW_TRANSFER_CONTEXT_CLEANUP *psCleanup;
++ struct RESMAN_ITEM *psResItem;
++
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(struct SGX_HW_TRANSFER_CONTEXT_CLEANUP),
++ (void **) &psCleanup, &hBlockAlloc);
++
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "SGXRegisterHWTransferContextKM: "
++ "Couldn't allocate memory for struct "
++ "SGX_HW_TRANSFER_CONTEXT_CLEANUP structure");
++ return NULL;
++ }
++
++ psCleanup->hBlockAlloc = hBlockAlloc;
++ psCleanup->psDeviceNode = psDeviceNode;
++ psCleanup->sHWTransferContextDevVAddr = *psHWTransferContextDevVAddr;
++
++ psResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_HW_TRANSFER_CONTEXT,
++ psCleanup,
++ 0, &SGXCleanupHWTransferContextCallback);
++
++ if (psResItem == NULL) {
++ PVR_DPF(PVR_DBG_ERROR, "SGXRegisterHWTransferContextKM: "
++ "ResManRegisterRes failed");
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(struct SGX_HW_TRANSFER_CONTEXT_CLEANUP),
++ psCleanup, psCleanup->hBlockAlloc);
++
++ return NULL;
++ }
++
++ psCleanup->psResItem = psResItem;
++
++ return (void *)psCleanup;
++}
++
++enum PVRSRV_ERROR SGXUnregisterHWTransferContextKM(void *hHWTransferContext)
++{
++ struct SGX_HW_TRANSFER_CONTEXT_CLEANUP *psCleanup;
++
++ PVR_ASSERT(hHWTransferContext != NULL);
++
++ psCleanup =
++ (struct SGX_HW_TRANSFER_CONTEXT_CLEANUP *)hHWTransferContext;
++
++ if (psCleanup == NULL) {
++ PVR_DPF(PVR_DBG_ERROR, "SGXUnregisterHWTransferContextKM: "
++ "invalid parameter");
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ ResManFreeResByPtr(psCleanup->psResItem);
++
++ return PVRSRV_OK;
++}
++
++static inline IMG_BOOL SGX2DQuerySyncOpsComplete(
++ struct PVRSRV_KERNEL_SYNC_INFO *psSyncInfo,
++ u32 ui32ReadOpsPending, u32 ui32WriteOpsPending)
++{
++ struct PVRSRV_SYNC_DATA *psSyncData = psSyncInfo->psSyncData;
++
++ return (IMG_BOOL)((psSyncData->ui32ReadOpsComplete >=
++ ui32ReadOpsPending) &&
++ (psSyncData->ui32WriteOpsComplete >=
++ ui32WriteOpsPending));
++}
++
++enum PVRSRV_ERROR SGX2DQueryBlitsCompleteKM(
++ struct PVRSRV_SGXDEV_INFO *psDevInfo,
++ struct PVRSRV_KERNEL_SYNC_INFO *psSyncInfo,
++ IMG_BOOL bWaitForComplete)
++{
++ u32 ui32ReadOpsPending, ui32WriteOpsPending;
++
++ PVR_UNREFERENCED_PARAMETER(psDevInfo);
++
++ PVR_DPF(PVR_DBG_CALLTRACE, "SGX2DQueryBlitsCompleteKM: Start");
++
++ ui32ReadOpsPending = psSyncInfo->psSyncData->ui32ReadOpsPending;
++ ui32WriteOpsPending = psSyncInfo->psSyncData->ui32WriteOpsPending;
++
++ if (SGX2DQuerySyncOpsComplete
++ (psSyncInfo, ui32ReadOpsPending, ui32WriteOpsPending)) {
++
++ PVR_DPF(PVR_DBG_CALLTRACE,
++ "SGX2DQueryBlitsCompleteKM: No wait. Blits complete.");
++ return PVRSRV_OK;
++ }
++
++ if (!bWaitForComplete) {
++
++ PVR_DPF(PVR_DBG_CALLTRACE,
++ "SGX2DQueryBlitsCompleteKM: No wait. Ops pending.");
++ return PVRSRV_ERROR_CMD_NOT_PROCESSED;
++ }
++
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "SGX2DQueryBlitsCompleteKM: Ops pending. Start polling.");
++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) {
++ OSWaitus(MAX_HW_TIME_US / WAIT_TRY_COUNT);
++
++ if (SGX2DQuerySyncOpsComplete
++ (psSyncInfo, ui32ReadOpsPending, ui32WriteOpsPending)) {
++
++ PVR_DPF(PVR_DBG_CALLTRACE,
++ "SGX2DQueryBlitsCompleteKM: "
++ "Wait over. Blits complete.");
++ return PVRSRV_OK;
++ }
++ }
++ END_LOOP_UNTIL_TIMEOUT();
++
++ PVR_DPF(PVR_DBG_ERROR,
++ "SGX2DQueryBlitsCompleteKM: Timed out. Ops pending.");
++
++#if defined(DEBUG)
++ {
++ struct PVRSRV_SYNC_DATA *psSyncData = psSyncInfo->psSyncData;
++
++ PVR_TRACE("SGX2DQueryBlitsCompleteKM: "
++ "Syncinfo: %p, Syncdata: %p",
++ psSyncInfo, psSyncData);
++
++ PVR_TRACE("SGX2DQueryBlitsCompleteKM: "
++ "Read ops complete: %d, Read ops pending: %d",
++ psSyncData->ui32ReadOpsComplete,
++ psSyncData->ui32ReadOpsPending);
++ PVR_TRACE("SGX2DQueryBlitsCompleteKM: "
++ "Write ops complete: %d, Write ops pending: %d",
++ psSyncData->ui32WriteOpsComplete,
++ psSyncData->ui32WriteOpsPending);
++
++ }
++#endif
++ return PVRSRV_ERROR_TIMEOUT;
++}
++
++void SGXFlushHWRenderTargetKM(void *psDeviceNode,
++ struct IMG_DEV_VIRTADDR sHWRTDataSetDevVAddr)
++{
++ PVR_ASSERT(sHWRTDataSetDevVAddr.uiAddr);
++
++ SGXCleanupRequest((struct PVRSRV_DEVICE_NODE *)psDeviceNode,
++ &sHWRTDataSetDevVAddr,
++ PVRSRV_USSE_EDM_RESMAN_CLEANUP_RT_REQUEST);
++}
++
++u32 SGXConvertTimeStamp(struct PVRSRV_SGXDEV_INFO *psDevInfo, u32 ui32TimeWraps,
++ u32 ui32Time)
++{
++ u64 ui64Clocks;
++ u32 ui32Clocksx16;
++
++ ui64Clocks = ((u64) ui32TimeWraps * psDevInfo->ui32uKernelTimerClock) +
++ (psDevInfo->ui32uKernelTimerClock -
++ (ui32Time & EUR_CR_EVENT_TIMER_VALUE_MASK));
++ ui32Clocksx16 = (u32) (ui64Clocks / 16);
++
++ return ui32Clocksx16;
++}
+diff --git a/drivers/gpu/pvr/sgxutils.h b/drivers/gpu/pvr/sgxutils.h
+new file mode 100644
+index 0000000..66b61e8
+--- /dev/null
++++ b/drivers/gpu/pvr/sgxutils.h
+@@ -0,0 +1,77 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "perproc.h"
++
++#define CCB_OFFSET_IS_VALID(type, psCCBMemInfo, psCCBKick, offset) \
++ ((sizeof(type) <= (psCCBMemInfo)->ui32AllocSize) && \
++ ((psCCBKick)->offset <= (psCCBMemInfo)->ui32AllocSize - sizeof(type)))
++
++#define CCB_DATA_FROM_OFFSET(type, psCCBMemInfo, psCCBKick, offset) \
++ ((type *)(((char *)(psCCBMemInfo)->pvLinAddrKM) + \
++ (psCCBKick)->offset))
++
++void SGXTestActivePowerEvent(struct PVRSRV_DEVICE_NODE *psDeviceNode,
++ u32 ui32CallerID);
++
++enum PVRSRV_ERROR SGXScheduleCCBCommand(
++ struct PVRSRV_SGXDEV_INFO *psDevInfo,
++ enum SGXMKIF_COMMAND_TYPE eCommandType,
++ struct SGXMKIF_COMMAND *psCommandData,
++ u32 ui32CallerID, u32 ui32PDumpFlags);
++enum PVRSRV_ERROR SGXScheduleCCBCommandKM(
++ struct PVRSRV_DEVICE_NODE *psDeviceNode,
++ enum SGXMKIF_COMMAND_TYPE eCommandType,
++ struct SGXMKIF_COMMAND *psCommandData,
++ u32 ui32CallerID, u32 ui32PDumpFlags);
++
++enum PVRSRV_ERROR SGXScheduleProcessQueuesKM(
++ struct PVRSRV_DEVICE_NODE *psDeviceNode);
++
++IMG_BOOL SGXIsDevicePowered(struct PVRSRV_DEVICE_NODE *psDeviceNode);
++
++void *SGXRegisterHWRenderContextKM(void *psDeviceNode,
++ struct IMG_DEV_VIRTADDR *psHWRenderContextDevVAddr,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc);
++
++void *SGXRegisterHWTransferContextKM(void *psDeviceNode,
++ struct IMG_DEV_VIRTADDR *psHWTransferContextDevVAddr,
++ struct PVRSRV_PER_PROCESS_DATA *psPerProc);
++
++void SGXFlushHWRenderTargetKM(void *psSGXDevInfo,
++ struct IMG_DEV_VIRTADDR psHWRTDataSetDevVAddr);
++
++enum PVRSRV_ERROR SGXUnregisterHWRenderContextKM(void *hHWRenderContext);
++
++enum PVRSRV_ERROR SGXUnregisterHWTransferContextKM(void *hHWTransferContext);
++
++u32 SGXConvertTimeStamp(struct PVRSRV_SGXDEV_INFO *psDevInfo,
++ u32 ui32TimeWraps, u32 ui32Time);
++
++void SGXCleanupRequest(struct PVRSRV_DEVICE_NODE *psDeviceNode,
++ struct IMG_DEV_VIRTADDR *psHWDataDevVAddr,
++ u32 ui32ResManRequestFlag);
++
+diff --git a/drivers/gpu/pvr/srvkm.h b/drivers/gpu/pvr/srvkm.h
+new file mode 100644
+index 0000000..3dbcd24
+--- /dev/null
++++ b/drivers/gpu/pvr/srvkm.h
+@@ -0,0 +1,50 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef SRVKM_H
++#define SRVKM_H
++
++enum PVRSRV_ERROR PVRSRVProcessConnect(u32 ui32PID);
++void PVRSRVProcessDisconnect(u32 ui32PID);
++ void PVRSRVSetDCState(u32 ui32State);
++ enum PVRSRV_ERROR PVRSRVSaveRestoreLiveSegments(void *hArena,
++ u8 *pbyBuffer,
++ u32 *puiBufSize,
++ IMG_BOOL bSave);
++
++#define LOOP_UNTIL_TIMEOUT(TIMEOUT) \
++{ \
++ u32 uiOffset, uiStart, uiCurrent; \
++ for (uiOffset = 0, uiStart = OSClockus(), uiCurrent = uiStart+1; \
++ (uiCurrent - uiStart + uiOffset) < TIMEOUT; \
++ uiCurrent = OSClockus(), \
++ uiOffset = uiCurrent < uiStart ? \
++ IMG_UINT32_MAX - uiStart : uiOffset, \
++ uiStart = uiCurrent < uiStart ? 0 : uiStart)
++
++#define END_LOOP_UNTIL_TIMEOUT() \
++}
++#endif
+diff --git a/drivers/gpu/pvr/syscommon.h b/drivers/gpu/pvr/syscommon.h
+new file mode 100644
+index 0000000..4d47e7f
+--- /dev/null
++++ b/drivers/gpu/pvr/syscommon.h
+@@ -0,0 +1,179 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _SYSCOMMON_H
++#define _SYSCOMMON_H
++
++#include "sysconfig.h"
++#include "sysinfo.h"
++#include "servicesint.h"
++#include "queue.h"
++#include "power.h"
++#include "resman.h"
++#include "ra.h"
++#include "device.h"
++#include "buffer_manager.h"
++
++#if defined(NO_HARDWARE) && defined(__KERNEL__)
++#include <linux/io.h>
++#endif
++
++struct SYS_DEVICE_ID {
++ u32 uiID;
++ IMG_BOOL bInUse;
++
++};
++
++#define SYS_MAX_LOCAL_DEVMEM_ARENAS 4
++
++struct SYS_DATA {
++ u32 ui32NumDevices;
++ struct SYS_DEVICE_ID sDeviceID[SYS_DEVICE_COUNT];
++ struct PVRSRV_DEVICE_NODE *psDeviceNodeList;
++ struct PVRSRV_POWER_DEV *psPowerDeviceList;
++ struct PVRSRV_RESOURCE sPowerStateChangeResource;
++ enum PVR_POWER_STATE eCurrentPowerState;
++ enum PVR_POWER_STATE eFailedPowerState;
++ u32 ui32CurrentOSPowerState;
++ struct PVRSRV_QUEUE_INFO *psQueueList;
++ struct PVRSRV_KERNEL_SYNC_INFO *psSharedSyncInfoList;
++ void *pvEnvSpecificData;
++ void *pvSysSpecificData;
++ struct PVRSRV_RESOURCE sQProcessResource;
++ void *pvSOCRegsBase;
++ void *hSOCTimerRegisterOSMemHandle;
++ u32 *pvSOCTimerRegisterKM;
++ void *pvSOCClockGateRegsBase;
++ u32 ui32SOCClockGateRegsSize;
++ IMG_BOOL (**ppfnCmdProcList[SYS_DEVICE_COUNT])(void *, u32, void *);
++
++ struct COMMAND_COMPLETE_DATA **ppsCmdCompleteData[SYS_DEVICE_COUNT];
++
++ IMG_BOOL bReProcessQueues;
++
++ struct RA_ARENA *apsLocalDevMemArena[SYS_MAX_LOCAL_DEVMEM_ARENAS];
++
++ char *pszVersionString;
++ struct PVRSRV_EVENTOBJECT *psGlobalEventObject;
++#if defined(PDUMP)
++ IMG_BOOL bPowerUpPDumped;
++#endif
++};
++
++enum PVRSRV_ERROR SysInitialise(void);
++enum PVRSRV_ERROR SysFinalise(void);
++
++enum PVRSRV_ERROR SysDeinitialise(struct SYS_DATA *psSysData);
++
++enum PVRSRV_ERROR SysGetDeviceMemoryMap(enum PVRSRV_DEVICE_TYPE eDeviceType,
++ void **ppvDeviceMap);
++
++void SysRegisterExternalDevice(struct PVRSRV_DEVICE_NODE *psDeviceNode);
++void SysRemoveExternalDevice(struct PVRSRV_DEVICE_NODE *psDeviceNode);
++
++u32 SysGetInterruptSource(struct SYS_DATA *psSysData,
++ struct PVRSRV_DEVICE_NODE *psDeviceNode);
++
++void SysClearInterrupts(struct SYS_DATA *psSysData,
++ u32 ui32ClearBits);
++
++enum PVRSRV_ERROR SysResetDevice(u32 ui32DeviceIndex);
++
++enum PVRSRV_ERROR SysSystemPrePowerState(enum PVR_POWER_STATE eNewPowerState);
++enum PVRSRV_ERROR SysSystemPostPowerState(enum PVR_POWER_STATE eNewPowerState);
++enum PVRSRV_ERROR SysDevicePrePowerState(u32 ui32DeviceIndex,
++ enum PVR_POWER_STATE eNewPowerState,
++ enum PVR_POWER_STATE eCurrentPowerState);
++enum PVRSRV_ERROR SysDevicePostPowerState(u32 ui32DeviceIndex,
++ enum PVR_POWER_STATE eNewPowerState,
++ enum PVR_POWER_STATE
++ eCurrentPowerState);
++
++enum PVRSRV_ERROR SysOEMFunction(u32 ui32ID,
++ void *pvIn,
++ u32 ulInSize,
++ void *pvOut, u32 ulOutSize);
++
++struct IMG_DEV_PHYADDR SysCpuPAddrToDevPAddr(
++ enum PVRSRV_DEVICE_TYPE eDeviceType,
++ struct IMG_CPU_PHYADDR cpu_paddr);
++struct IMG_DEV_PHYADDR SysSysPAddrToDevPAddr(
++ enum PVRSRV_DEVICE_TYPE eDeviceType,
++ struct IMG_SYS_PHYADDR SysPAddr);
++struct IMG_SYS_PHYADDR SysDevPAddrToSysPAddr(
++ enum PVRSRV_DEVICE_TYPE eDeviceType,
++ struct IMG_DEV_PHYADDR SysPAddr);
++struct IMG_CPU_PHYADDR SysSysPAddrToCpuPAddr(struct IMG_SYS_PHYADDR SysPAddr);
++struct IMG_SYS_PHYADDR SysCpuPAddrToSysPAddr(struct IMG_CPU_PHYADDR cpu_paddr);
++
++extern struct SYS_DATA *gpsSysData;
++
++
++static inline enum PVRSRV_ERROR SysAcquireData(struct SYS_DATA **ppsSysData)
++{
++ *ppsSysData = gpsSysData;
++
++ if (!gpsSysData)
++ return PVRSRV_ERROR_GENERIC;
++
++ return PVRSRV_OK;
++}
++
++static inline enum PVRSRV_ERROR SysInitialiseCommon(struct SYS_DATA *psSysData)
++{
++ enum PVRSRV_ERROR eError;
++ eError = PVRSRVInit(psSysData);
++ return eError;
++}
++
++static inline void SysDeinitialiseCommon(struct SYS_DATA *psSysData)
++{
++ PVRSRVDeInit(psSysData);
++ OSDestroyResource(&psSysData->sPowerStateChangeResource);
++}
++
++#if !(defined(NO_HARDWARE) && defined(__KERNEL__))
++#define SysReadHWReg(p, o) OSReadHWReg(p, o)
++#define SysWriteHWReg(p, o, v) OSWriteHWReg(p, o, v)
++#else
++static inline u32 SysReadHWReg(void *pvLinRegBaseAddr, u32 ui32Offset)
++{
++ return (u32)readl(pvLinRegBaseAddr + ui32Offset);
++}
++
++static inline void SysWriteHWReg(void *pvLinRegBaseAddr, u32 ui32Offset,
++ u32 ui32Value)
++{
++ writel(ui32Value, pvLinRegBaseAddr + ui32Offset);
++}
++#endif
++
++bool sgx_is_530(void);
++u32 sgx_get_rev(void);
++void sgx_ocp_write_reg(u32 reg, u32 val);
++unsigned long sgx_get_max_freq(void);
++
++#endif
+diff --git a/drivers/gpu/pvr/sysconfig.c b/drivers/gpu/pvr/sysconfig.c
+new file mode 100644
+index 0000000..78466b7
+--- /dev/null
++++ b/drivers/gpu/pvr/sysconfig.c
+@@ -0,0 +1,818 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "kerneldisplay.h"
++#include "oemfuncs.h"
++#include "sgxinfo.h"
++#include "pdump_km.h"
++#include "sgxinfokm.h"
++#include "syslocal.h"
++#include "sysconfig.h"
++#include "syscommon.h"
++#include "img_types.h"
++#include "ocpdefs.h"
++#include "pvr_bridge_km.h"
++
++struct SYS_DATA *gpsSysData;
++static struct SYS_DATA gsSysData;
++
++static struct SYS_SPECIFIC_DATA gsSysSpecificData;
++struct SYS_SPECIFIC_DATA *gpsSysSpecificData;
++
++static u32 gui32SGXDeviceID;
++static struct SGX_DEVICE_MAP gsSGXDeviceMap;
++static struct PVRSRV_DEVICE_NODE *gpsSGXDevNode;
++
++#define DEVICE_SGX_INTERRUPT (1 << 0)
++
++#if defined(NO_HARDWARE)
++static void *gsSGXRegsCPUVAddr;
++#endif
++
++static void __iomem *ocp_base;
++
++static enum PVRSRV_ERROR SysLocateDevices(struct SYS_DATA *psSysData)
++{
++#if defined(NO_HARDWARE)
++ enum PVRSRV_ERROR eError;
++ struct IMG_CPU_PHYADDR sCpuPAddr;
++#endif
++
++ PVR_UNREFERENCED_PARAMETER(psSysData);
++
++ gsSGXDeviceMap.ui32Flags = 0x0;
++
++#if defined(NO_HARDWARE)
++
++ eError = OSBaseAllocContigMemory(SYS_OMAP3430_SGX_REGS_SIZE,
++ &gsSGXRegsCPUVAddr, &sCpuPAddr);
++ if (eError != PVRSRV_OK)
++ return eError;
++ gsSGXDeviceMap.sRegsCpuPBase = sCpuPAddr;
++ gsSGXDeviceMap.sRegsSysPBase =
++ SysCpuPAddrToSysPAddr(gsSGXDeviceMap.sRegsCpuPBase);
++ gsSGXDeviceMap.ui32RegsSize = SYS_OMAP3430_SGX_REGS_SIZE;
++
++ gsSGXDeviceMap.pvRegsCpuVBase = gsSGXRegsCPUVAddr;
++
++ OSMemSet(gsSGXRegsCPUVAddr, 0, SYS_OMAP3430_SGX_REGS_SIZE);
++
++ gsSGXDeviceMap.ui32IRQ = 0;
++
++#else
++
++ gsSGXDeviceMap.sRegsSysPBase.uiAddr =
++ SYS_OMAP3430_SGX_REGS_SYS_PHYS_BASE;
++ gsSGXDeviceMap.sRegsCpuPBase =
++ SysSysPAddrToCpuPAddr(gsSGXDeviceMap.sRegsSysPBase);
++ gsSGXDeviceMap.ui32RegsSize = SYS_OMAP3430_SGX_REGS_SIZE;
++
++ gsSGXDeviceMap.ui32IRQ = SYS_OMAP3430_SGX_IRQ;
++
++#endif
++
++ return PVRSRV_OK;
++}
++
++#ifndef NO_HARDWARE
++u32 sgx_get_rev(void)
++{
++ /*
++ * Ugly solution, used until we have proper per device instances
++ * passed to functions and get rid of most if not all globals.
++ */
++ struct SYS_SPECIFIC_DATA *sysd = gpsSysSpecificData;
++ void __iomem *regs;
++ static u32 rev = -1UL;
++ int err;
++
++ if (rev != -1UL)
++ return rev;
++
++ regs = OSMapPhysToLin(gsSGXDeviceMap.sRegsCpuPBase,
++ SYS_OMAP3430_SGX_REGS_SIZE,
++ PVRSRV_HAP_UNCACHED | PVRSRV_HAP_KERNEL_ONLY,
++ NULL);
++ if (!regs)
++ return 0;
++
++ err = clk_enable(sysd->psSGX_FCK);
++ BUG_ON(err);
++ err = clk_enable(sysd->psSGX_ICK);
++ BUG_ON(err);
++
++ rev = OSReadHWReg(regs, EUR_CR_CORE_REVISION);
++
++ clk_disable(sysd->psSGX_ICK);
++ clk_disable(sysd->psSGX_FCK);
++
++ OSUnMapPhysToLin(regs, SYS_OMAP3430_SGX_REGS_SIZE,
++ PVRSRV_HAP_UNCACHED | PVRSRV_HAP_KERNEL_ONLY, NULL);
++
++ return rev;
++}
++
++unsigned long sgx_get_max_freq(void)
++{
++ if (sgx_is_530()) {
++ switch (sgx_get_rev()) {
++ case EUR_CR_CORE_MAKE_REV(1, 2, 1):
++ return SYS_SGX_MAX_FREQ_530R121;
++ case EUR_CR_CORE_MAKE_REV(1, 2, 5):
++ return SYS_SGX_MAX_FREQ_530R125;
++ }
++ }
++ BUG();
++}
++
++#else
++
++u32 sgx_get_rev(void)
++{
++ return 0;
++}
++
++unsigned long sgx_get_max_freq()
++{
++ return SYS_SGX_MAX_FREQ_NO_HW;
++}
++
++#endif
++
++bool sgx_is_530(void)
++{
++#ifdef SGX530
++ return true;
++#endif
++ return false;
++}
++
++char *SysCreateVersionString(struct IMG_CPU_PHYADDR sRegRegion)
++{
++ static char aszVersionString[100];
++ struct SYS_DATA *psSysData;
++ u32 ui32SGXRevision;
++ s32 i32Count;
++
++ if (SysAcquireData(&psSysData) != PVRSRV_OK)
++ return NULL;
++
++ ui32SGXRevision = sgx_get_rev();
++
++ i32Count = OSSNPrintf(aszVersionString, 100,
++ "SGX revision = %u.%u.%u",
++ (unsigned)((ui32SGXRevision &
++ EUR_CR_CORE_REVISION_MAJOR_MASK)
++ >> EUR_CR_CORE_REVISION_MAJOR_SHIFT),
++ (unsigned)((ui32SGXRevision &
++ EUR_CR_CORE_REVISION_MINOR_MASK)
++ >> EUR_CR_CORE_REVISION_MINOR_SHIFT),
++ (unsigned)((ui32SGXRevision &
++ EUR_CR_CORE_REVISION_MAINTENANCE_MASK)
++ >> EUR_CR_CORE_REVISION_MAINTENANCE_SHIFT)
++ );
++
++ if (i32Count == -1)
++ return NULL;
++
++ return aszVersionString;
++}
++
++static int sgx_ocp_init(void)
++{
++ struct IMG_SYS_PHYADDR sys_pbase;
++ struct IMG_CPU_PHYADDR cpu_pbase;
++
++ sys_pbase.uiAddr = SYS_OMAP3430_OCP_REGS_SYS_PHYS_BASE;
++ cpu_pbase = SysSysPAddrToCpuPAddr(sys_pbase);
++
++ ocp_base = OSMapPhysToLin(cpu_pbase, SYS_OMAP3430_OCP_REGS_SIZE,
++ PVRSRV_HAP_UNCACHED | PVRSRV_HAP_KERNEL_ONLY,
++ NULL);
++
++ if (!ocp_base) {
++ PVR_DPF(PVR_DBG_ERROR, "%s: Failed to map OCP registers",
++ __func__);
++ return -ENOMEM;
++ }
++
++ return 0;
++}
++
++static void sgx_ocp_cleanup(void)
++{
++ OSUnMapPhysToLin(ocp_base, SYS_OMAP3430_OCP_REGS_SIZE,
++ PVRSRV_HAP_UNCACHED | PVRSRV_HAP_KERNEL_ONLY, NULL);
++}
++
++void sgx_ocp_write_reg(u32 reg, u32 val)
++{
++ BUG_ON(!ocp_base);
++
++ /* OCP offsets are based at EUR_CR_OCP_REVISION */
++ reg -= EUR_CR_OCP_REVISION;
++ OSWriteHWReg(ocp_base, reg, val);
++}
++
++enum PVRSRV_ERROR SysInitialise(void)
++{
++ u32 i;
++ enum PVRSRV_ERROR eError;
++ struct PVRSRV_DEVICE_NODE *psDeviceNode;
++ struct IMG_CPU_PHYADDR TimerRegPhysBase;
++
++ gpsSysData = &gsSysData;
++
++ gpsSysSpecificData = &gsSysSpecificData;
++
++ gpsSysData->pvSysSpecificData = gpsSysSpecificData;
++
++ eError = OSInitEnvData(&gpsSysData->pvEnvSpecificData);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "SysInitialise: Failed to setup env structure");
++ SysDeinitialise(gpsSysData);
++ gpsSysData = NULL;
++ return eError;
++ }
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData,
++ SYS_SPECIFIC_DATA_ENABLE_ENVDATA);
++
++ gpsSysData->ui32NumDevices = SYS_DEVICE_COUNT;
++
++ for (i = 0; i < SYS_DEVICE_COUNT; i++) {
++ gpsSysData->sDeviceID[i].uiID = i;
++ gpsSysData->sDeviceID[i].bInUse = IMG_FALSE;
++ }
++
++ gpsSysData->psDeviceNodeList = NULL;
++ gpsSysData->psQueueList = NULL;
++
++ eError = SysInitialiseCommon(gpsSysData);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "SysInitialise: Failed in SysInitialiseCommon");
++ SysDeinitialise(gpsSysData);
++ gpsSysData = NULL;
++ return eError;
++ }
++
++ TimerRegPhysBase.uiAddr =
++ SYS_OMAP3430_GP11TIMER_PHYS_BASE + SYS_OMAP3430_GPTIMER_REGS;
++ gpsSysData->pvSOCTimerRegisterKM = NULL;
++ gpsSysData->hSOCTimerRegisterOSMemHandle = NULL;
++ OSReservePhys(TimerRegPhysBase, 4,
++ PVRSRV_HAP_MULTI_PROCESS | PVRSRV_HAP_UNCACHED,
++ (void **)&gpsSysData->pvSOCTimerRegisterKM,
++ &gpsSysData->hSOCTimerRegisterOSMemHandle);
++
++ gpsSysSpecificData->ui32SrcClockDiv = 3;
++
++ eError = SysLocateDevices(gpsSysData);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "SysInitialise: Failed to locate devices");
++ SysDeinitialise(gpsSysData);
++ gpsSysData = NULL;
++ return eError;
++ }
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData,
++ SYS_SPECIFIC_DATA_ENABLE_LOCATEDEV);
++
++ if (sgx_ocp_init() < 0) {
++ SysDeinitialise(gpsSysData);
++ gpsSysData = NULL;
++
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ eError = PVRSRVRegisterDevice(gpsSysData, SGXRegisterDevice,
++ DEVICE_SGX_INTERRUPT, &gui32SGXDeviceID);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "SysInitialise: Failed to register device!");
++ SysDeinitialise(gpsSysData);
++ gpsSysData = NULL;
++ return eError;
++ }
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData,
++ SYS_SPECIFIC_DATA_ENABLE_REGDEV);
++
++ psDeviceNode = gpsSysData->psDeviceNodeList;
++ while (psDeviceNode) {
++ switch (psDeviceNode->sDevId.eDeviceType) {
++ case PVRSRV_DEVICE_TYPE_SGX:
++ {
++ struct DEVICE_MEMORY_INFO *psDevMemoryInfo;
++ struct DEVICE_MEMORY_HEAP_INFO
++ *psDeviceMemoryHeap;
++
++ psDeviceNode->psLocalDevMemArena = NULL;
++
++ psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
++ psDeviceMemoryHeap =
++ psDevMemoryInfo->psDeviceMemoryHeap;
++
++ for (i = 0; i < psDevMemoryInfo->ui32HeapCount;
++ i++)
++ psDeviceMemoryHeap[i].ui32Attribs |=
++ PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG;
++
++ gpsSGXDevNode = psDeviceNode;
++ gsSysSpecificData.psSGXDevNode = psDeviceNode;
++
++ break;
++ }
++ default:
++ PVR_DPF(PVR_DBG_ERROR, "SysInitialise: "
++ "Failed to find SGX device node!");
++ return PVRSRV_ERROR_INIT_FAILURE;
++ }
++
++ psDeviceNode = psDeviceNode->psNext;
++ }
++
++ PDUMPINIT();
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData,
++ SYS_SPECIFIC_DATA_ENABLE_PDUMPINIT);
++
++ eError = InitSystemClocks(gpsSysData);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "SysInitialise: Failed to init system clocks (%d)",
++ eError);
++ SysDeinitialise(gpsSysData);
++ gpsSysData = NULL;
++ return eError;
++ }
++
++ eError = EnableSystemClocks(gpsSysData);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "SysInitialise: Failed to Enable system clocks (%d)",
++ eError);
++ SysDeinitialise(gpsSysData);
++ gpsSysData = NULL;
++ return eError;
++ }
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData,
++ SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS);
++
++ eError = OSInitPerf(gpsSysData);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "SysInitialise: Failed to init DVFS (%d)", eError);
++ SysDeinitialise(gpsSysData);
++ gpsSysData = NULL;
++ return eError;
++ }
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData,
++ SYS_SPECIFIC_DATA_ENABLE_PERF);
++
++ eError = EnableSGXClocks(gpsSysData);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "SysInitialise: Failed to Enable SGX clocks (%d)",
++ eError);
++ SysDeinitialise(gpsSysData);
++ gpsSysData = NULL;
++ return eError;
++ }
++
++ eError = PVRSRVInitialiseDevice(gui32SGXDeviceID);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "SysInitialise: Failed to initialise device!");
++ SysDeinitialise(gpsSysData);
++ gpsSysData = NULL;
++ return eError;
++ }
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData,
++ SYS_SPECIFIC_DATA_ENABLE_INITDEV);
++
++ gpsSysData->pszVersionString =
++ SysCreateVersionString(gsSGXDeviceMap.sRegsCpuPBase);
++ if (!gpsSysData->pszVersionString)
++ PVR_DPF(PVR_DBG_ERROR,
++ "SysFinalise: Failed to create a system version string");
++ else
++ PVR_DPF(PVR_DBG_WARNING, "SysFinalise: Version string: %s",
++ gpsSysData->pszVersionString);
++
++ DisableSGXClocks(gpsSysData);
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR SysFinalise(void)
++{
++ enum PVRSRV_ERROR eError = PVRSRV_OK;
++
++ eError = EnableSGXClocks(gpsSysData);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "SysInitialise: Failed to Enable SGX clocks (%d)",
++ eError);
++ SysDeinitialise(gpsSysData);
++ gpsSysData = NULL;
++ return eError;
++ }
++
++
++ eError = OSInstallMISR(gpsSysData);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "SysFinalise: Failed to install MISR");
++ SysDeinitialise(gpsSysData);
++ gpsSysData = NULL;
++ return eError;
++ }
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData,
++ SYS_SPECIFIC_DATA_ENABLE_MISR);
++
++ eError =
++ OSInstallDeviceLISR(gpsSysData, gsSGXDeviceMap.ui32IRQ, "SGX ISR",
++ gpsSGXDevNode);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "SysFinalise: Failed to install ISR");
++ SysDeinitialise(gpsSysData);
++ gpsSysData = NULL;
++ return eError;
++ }
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData,
++ SYS_SPECIFIC_DATA_ENABLE_LISR);
++
++ DisableSGXClocks(gpsSysData);
++
++ gpsSysSpecificData->bSGXInitComplete = IMG_TRUE;
++
++ return eError;
++}
++
++enum PVRSRV_ERROR SysDeinitialise(struct SYS_DATA *psSysData)
++{
++ enum PVRSRV_ERROR eError;
++
++ if (SYS_SPECIFIC_DATA_TEST
++ (gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LISR)) {
++ eError = OSUninstallDeviceLISR(psSysData);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "SysDeinitialise: "
++ "OSUninstallDeviceLISR failed");
++ return eError;
++ }
++ }
++
++ if (SYS_SPECIFIC_DATA_TEST
++ (gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_MISR)) {
++ eError = OSUninstallMISR(psSysData);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "SysDeinitialise: "
++ "OSUninstallMISR failed");
++ return eError;
++ }
++ }
++
++ sgx_ocp_cleanup();
++
++ if (SYS_SPECIFIC_DATA_TEST
++ (gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_INITDEV)) {
++ PVR_ASSERT(SYS_SPECIFIC_DATA_TEST
++ (gpsSysSpecificData,
++ SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS));
++
++ eError = EnableSGXClocks(gpsSysData);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "SysDeinitialise: EnableSGXClocks failed");
++ return eError;
++ }
++
++ eError = PVRSRVDeinitialiseDevice(gui32SGXDeviceID);
++
++ DisableSGXClocks(gpsSysData);
++
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR, "SysDeinitialise: "
++ "failed to de-init the device");
++ return eError;
++ }
++ }
++
++ if (SYS_SPECIFIC_DATA_TEST
++ (gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS))
++ DisableSystemClocks(gpsSysData);
++
++ CleanupSystemClocks(gpsSysData);
++
++ if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData,
++ SYS_SPECIFIC_DATA_ENABLE_PERF)) {
++ eError = OSCleanupPerf(psSysData);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "SysDeinitialise: OSCleanupDvfs failed");
++ return eError;
++ }
++ }
++
++ if (SYS_SPECIFIC_DATA_TEST
++ (gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_ENVDATA)) {
++ eError = OSDeInitEnvData(gpsSysData->pvEnvSpecificData);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "SysDeinitialise: failed to de-init env structure");
++ return eError;
++ }
++ }
++
++ if (gpsSysData->pvSOCTimerRegisterKM)
++ OSUnReservePhys(gpsSysData->pvSOCTimerRegisterKM, 4,
++ PVRSRV_HAP_MULTI_PROCESS | PVRSRV_HAP_UNCACHED,
++ gpsSysData->hSOCTimerRegisterOSMemHandle);
++
++ SysDeinitialiseCommon(gpsSysData);
++
++#if defined(NO_HARDWARE)
++ if (SYS_SPECIFIC_DATA_TEST
++ (gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LOCATEDEV))
++
++ OSBaseFreeContigMemory(SYS_OMAP3430_SGX_REGS_SIZE,
++ gsSGXRegsCPUVAddr,
++ gsSGXDeviceMap.sRegsCpuPBase);
++#endif
++
++ if (SYS_SPECIFIC_DATA_TEST
++ (gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_PDUMPINIT))
++ PDUMPDEINIT();
++
++ gpsSysSpecificData->ui32SysSpecificData = 0;
++ gpsSysSpecificData->bSGXInitComplete = IMG_FALSE;
++
++ gpsSysData = NULL;
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR SysGetDeviceMemoryMap(enum PVRSRV_DEVICE_TYPE eDeviceType,
++ void **ppvDeviceMap)
++{
++ switch (eDeviceType) {
++ case PVRSRV_DEVICE_TYPE_SGX:
++ *ppvDeviceMap = (void *) &gsSGXDeviceMap;
++ break;
++ default:
++ PVR_DPF(PVR_DBG_ERROR, "SysGetDeviceMemoryMap: "
++ "unsupported device type");
++ }
++ return PVRSRV_OK;
++}
++
++struct IMG_DEV_PHYADDR SysCpuPAddrToDevPAddr(
++ enum PVRSRV_DEVICE_TYPE eDeviceType,
++ struct IMG_CPU_PHYADDR CpuPAddr)
++{
++ struct IMG_DEV_PHYADDR DevPAddr;
++
++ PVR_UNREFERENCED_PARAMETER(eDeviceType);
++
++ DevPAddr.uiAddr = CpuPAddr.uiAddr;
++
++ return DevPAddr;
++}
++
++struct IMG_CPU_PHYADDR SysSysPAddrToCpuPAddr(struct IMG_SYS_PHYADDR sys_paddr)
++{
++ struct IMG_CPU_PHYADDR cpu_paddr;
++
++ cpu_paddr.uiAddr = sys_paddr.uiAddr;
++ return cpu_paddr;
++}
++
++struct IMG_SYS_PHYADDR SysCpuPAddrToSysPAddr(struct IMG_CPU_PHYADDR cpu_paddr)
++{
++ struct IMG_SYS_PHYADDR sys_paddr;
++
++ sys_paddr.uiAddr = cpu_paddr.uiAddr;
++ return sys_paddr;
++}
++
++struct IMG_DEV_PHYADDR SysSysPAddrToDevPAddr(
++ enum PVRSRV_DEVICE_TYPE eDeviceType,
++ struct IMG_SYS_PHYADDR SysPAddr)
++{
++ struct IMG_DEV_PHYADDR DevPAddr;
++
++ PVR_UNREFERENCED_PARAMETER(eDeviceType);
++
++ DevPAddr.uiAddr = SysPAddr.uiAddr;
++
++ return DevPAddr;
++}
++
++struct IMG_SYS_PHYADDR SysDevPAddrToSysPAddr(
++ enum PVRSRV_DEVICE_TYPE eDeviceType,
++ struct IMG_DEV_PHYADDR DevPAddr)
++{
++ struct IMG_SYS_PHYADDR SysPAddr;
++
++ PVR_UNREFERENCED_PARAMETER(eDeviceType);
++
++ SysPAddr.uiAddr = DevPAddr.uiAddr;
++
++ return SysPAddr;
++}
++
++void SysRegisterExternalDevice(struct PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
++}
++
++void SysRemoveExternalDevice(struct PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
++}
++
++u32 SysGetInterruptSource(struct SYS_DATA *psSysData,
++ struct PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ PVR_UNREFERENCED_PARAMETER(psSysData);
++#if defined(NO_HARDWARE)
++
++ return 0xFFFFFFFF;
++#else
++
++ return psDeviceNode->ui32SOCInterruptBit;
++#endif
++}
++
++void SysClearInterrupts(struct SYS_DATA *psSysData, u32 ui32ClearBits)
++{
++ PVR_UNREFERENCED_PARAMETER(psSysData);
++ PVR_UNREFERENCED_PARAMETER(ui32ClearBits);
++
++ OSReadHWReg(((struct PVRSRV_SGXDEV_INFO *)gpsSGXDevNode->pvDevice)->
++ pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR);
++}
++
++enum PVRSRV_ERROR SysSystemPrePowerState(enum PVR_POWER_STATE eNewPowerState)
++{
++ enum PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if (eNewPowerState == PVRSRV_POWER_STATE_D3) {
++ PVR_TRACE("SysSystemPrePowerState: Entering state D3");
++
++ if (SYS_SPECIFIC_DATA_TEST
++ (&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LISR)) {
++ eError = OSUninstallDeviceLISR(gpsSysData);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "SysSystemPrePowerState: "
++ "OSUninstallDeviceLISR failed "
++ "(%d)",
++ eError);
++ return eError;
++ }
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData,
++ SYS_SPECIFIC_DATA_PM_UNINSTALL_LISR);
++ SYS_SPECIFIC_DATA_CLEAR(&gsSysSpecificData,
++ SYS_SPECIFIC_DATA_ENABLE_LISR);
++ }
++
++ if (SYS_SPECIFIC_DATA_TEST
++ (&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS)) {
++ DisableSystemClocks(gpsSysData);
++
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData,
++ SYS_SPECIFIC_DATA_PM_DISABLE_SYSCLOCKS);
++ SYS_SPECIFIC_DATA_CLEAR(&gsSysSpecificData,
++ SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS);
++ }
++ }
++
++ return eError;
++}
++
++enum PVRSRV_ERROR SysSystemPostPowerState(enum PVR_POWER_STATE eNewPowerState)
++{
++ enum PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if (eNewPowerState == PVRSRV_POWER_STATE_D0) {
++ PVR_TRACE("SysSystemPostPowerState: Entering state D0");
++
++ if (SYS_SPECIFIC_DATA_TEST
++ (&gsSysSpecificData,
++ SYS_SPECIFIC_DATA_PM_DISABLE_SYSCLOCKS)) {
++ eError = EnableSystemClocks(gpsSysData);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "SysSystemPostPowerState: "
++ "EnableSystemClocks failed (%d)",
++ eError);
++ return eError;
++ }
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData,
++ SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS);
++ SYS_SPECIFIC_DATA_CLEAR(&gsSysSpecificData,
++ SYS_SPECIFIC_DATA_PM_DISABLE_SYSCLOCKS);
++ }
++ if (SYS_SPECIFIC_DATA_TEST
++ (&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_UNINSTALL_LISR)) {
++ eError =
++ OSInstallDeviceLISR(gpsSysData,
++ gsSGXDeviceMap.ui32IRQ,
++ "SGX ISR", gpsSGXDevNode);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "SysSystemPostPowerState: "
++ "OSInstallDeviceLISR failed "
++ "to install ISR (%d)",
++ eError);
++ return eError;
++ }
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData,
++ SYS_SPECIFIC_DATA_ENABLE_LISR);
++ SYS_SPECIFIC_DATA_CLEAR(&gsSysSpecificData,
++ SYS_SPECIFIC_DATA_PM_UNINSTALL_LISR);
++ }
++ }
++ return eError;
++}
++
++enum PVRSRV_ERROR SysDevicePrePowerState(u32 ui32DeviceIndex,
++ enum PVR_POWER_STATE eNewPowerState,
++ enum PVR_POWER_STATE eCurrentPowerState)
++{
++ PVR_UNREFERENCED_PARAMETER(eCurrentPowerState);
++
++ if (ui32DeviceIndex != gui32SGXDeviceID)
++ return PVRSRV_OK;
++ if (eNewPowerState == PVRSRV_POWER_STATE_D3) {
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "SysDevicePrePowerState: SGX Entering state D3");
++ DisableSGXClocks(gpsSysData);
++ }
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR SysDevicePostPowerState(u32 ui32DeviceIndex,
++ enum PVR_POWER_STATE eNewPowerState,
++ enum PVR_POWER_STATE eCurrentPowerState)
++{
++ enum PVRSRV_ERROR eError = PVRSRV_OK;
++
++ PVR_UNREFERENCED_PARAMETER(eNewPowerState);
++
++ if (ui32DeviceIndex != gui32SGXDeviceID)
++ return eError;
++ if (eCurrentPowerState == PVRSRV_POWER_STATE_D3) {
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "SysDevicePostPowerState: SGX Leaving state D3");
++ eError = EnableSGXClocks(gpsSysData);
++ }
++
++ return eError;
++}
++
++enum PVRSRV_ERROR SysOEMFunction(u32 ui32ID, void *pvIn, u32 ulInSize,
++ void *pvOut, u32 ulOutSize)
++{
++ PVR_UNREFERENCED_PARAMETER(ui32ID);
++ PVR_UNREFERENCED_PARAMETER(pvIn);
++ PVR_UNREFERENCED_PARAMETER(ulInSize);
++ PVR_UNREFERENCED_PARAMETER(pvOut);
++ PVR_UNREFERENCED_PARAMETER(ulOutSize);
++
++ if ((ui32ID == OEM_GET_EXT_FUNCS) &&
++ (ulOutSize == sizeof(struct PVRSRV_DC_OEM_JTABLE))) {
++
++ struct PVRSRV_DC_OEM_JTABLE *psOEMJTable =
++ (struct PVRSRV_DC_OEM_JTABLE *)pvOut;
++ psOEMJTable->pfnOEMBridgeDispatch = &PVRSRV_BridgeDispatchKM;
++ return PVRSRV_OK;
++ }
++
++ return PVRSRV_ERROR_INVALID_PARAMS;
++}
+diff --git a/drivers/gpu/pvr/sysconfig.h b/drivers/gpu/pvr/sysconfig.h
+new file mode 100644
+index 0000000..3ae1c6d
+--- /dev/null
++++ b/drivers/gpu/pvr/sysconfig.h
+@@ -0,0 +1,53 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__SOCCONFIG_H__)
++#define __SOCCONFIG_H__
++
++#include "syscommon.h"
++
++#define VS_PRODUCT_NAME "OMAP3430"
++
++#define SYS_SGX_MAX_FREQ_NO_HW 200000000
++#define SYS_SGX_MAX_FREQ_530R121 110000000
++#define SYS_SGX_MAX_FREQ_530R125 200000000
++
++#define SYS_SGX_HWRECOVERY_TIMEOUT_FREQ 100
++#define SYS_SGX_PDS_TIMER_FREQ 1000
++#define SYS_SGX_ACTIVE_POWER_LATENCY_MS 1
++
++#define SYS_OMAP3430_SGX_REGS_SYS_PHYS_BASE 0x50000000
++#define SYS_OMAP3430_SGX_REGS_SIZE 0x4000
++
++#define SYS_OMAP3430_SGX_IRQ 21
++
++#define SYS_OMAP3430_GP11TIMER_PHYS_BASE 0x48088000
++#define SYS_OMAP3430_GPTIMER_ENABLE 0x24
++#define SYS_OMAP3430_GPTIMER_REGS 0x28
++#define SYS_OMAP3430_GPTIMER_TSICR 0x40
++#define SYS_OMAP3430_GPTIMER_SIZE 1024
++
++#endif
+diff --git a/drivers/gpu/pvr/sysinfo.h b/drivers/gpu/pvr/sysinfo.h
+new file mode 100644
+index 0000000..2af219d
+--- /dev/null
++++ b/drivers/gpu/pvr/sysinfo.h
+@@ -0,0 +1,94 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__SYSINFO_H__)
++#define __SYSINFO_H__
++
++#define MAX_HW_TIME_US 500000
++#define WAIT_TRY_COUNT 10000
++
++enum SYS_DEVICE_TYPE {
++ SYS_DEVICE_SGX = 0,
++ SYS_DEVICE_FORCE_I16 = 0x7fff
++};
++
++#define SYS_DEVICE_COUNT 3
++
++#define PRM_REG32(offset) (offset)
++#define CM_REG32(offset) (offset)
++
++#define CM_FCLKEN_SGX CM_REG32(0xB00)
++#define CM_FCLKEN_SGX_EN_3D 0x00000002
++
++#define CM_ICLKEN_SGX CM_REG32(0xB10)
++#define CM_ICLKEN_SGX_EN_SGX 0x00000001
++
++#define CM_IDLEST_SGX CM_REG32(0xB20)
++#define CM_IDLEST_SGX_ST_SGX 0x00000001
++
++#define CM_CLKSEL_SGX CM_REG32(0xB40)
++#define CM_CLKSEL_SGX_MASK 0x0000000f
++#define CM_CLKSEL_SGX_L3DIV3 0x00000000
++#define CM_CLKSEL_SGX_L3DIV4 0x00000001
++#define CM_CLKSEL_SGX_L3DIV6 0x00000002
++#define CM_CLKSEL_SGX_96M 0x00000003
++
++#define CM_SLEEPDEP_SGX CM_REG32(0xB44)
++#define CM_CLKSTCTRL_SGX CM_REG32(0xB48)
++#define CM_CLKSTCTRL_SGX_AUTOSTATE 0x00008001
++
++#define CM_CLKSTST_SGX CM_REG32(0xB4C)
++#define CM_CLKSTST_SGX_STATUS_VALID 0x00000001
++
++#define RM_RSTST_SGX PRM_REG32(0xB58)
++#define RM_RSTST_SGX_RST_MASK 0x0000000F
++#define RM_RSTST_SGX_COREDOMAINWKUP_RST 0x00000008
++#define RM_RSTST_SGX_DOMAINWKUP_RST 0x00000004
++#define RM_RSTST_SGX_GLOBALWARM_RST 0x00000002
++#define RM_RSTST_SGX_GLOBALCOLD_RST 0x00000001
++
++#define PM_WKDEP_SGXi PRM_REG32(0xBC8)
++#define PM_WKDEP_SGX_EN_WAKEUP 0x00000010
++#define PM_WKDEP_SGX_EN_MPU 0x00000002
++#define PM_WKDEP_SGX_EN_CORE 0x00000001
++
++#define PM_PWSTCTRL_SGX PRM_REG32(0xBE0)
++#define PM_PWSTCTRL_SGX_POWERSTATE_MASK 0x00000003
++#define PM_PWSTCTRL_SGX_OFF 0x00000000
++#define PM_PWSTCTRL_SGX_RETENTION 0x00000001
++#define PM_PWSTCTRL_SGX_ON 0x00000003
++
++#define PM_PWSTST_SGX PRM_REG32(0xBE4)
++#define PM_PWSTST_SGX_INTRANSITION 0x00100000
++#define PM_PWSTST_SGX_CLKACTIVITY 0x00080000
++#define PM_PWSTST_SGX_POWERSTATE_MASK 0x00000003
++#define PM_PWSTST_SGX_OFF 0x00000003
++#define PM_PWSTST_SGX_RETENTION 0x00000001
++#define PM_PWSTST_SGX_ON 0x00000000
++
++#define PM_PREPWSTST_SGX PRM_REG32(0xBE8)
++
++#endif
+diff --git a/drivers/gpu/pvr/syslocal.h b/drivers/gpu/pvr/syslocal.h
+new file mode 100644
+index 0000000..03f39e6
+--- /dev/null
++++ b/drivers/gpu/pvr/syslocal.h
+@@ -0,0 +1,98 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__SYSLOCAL_H__)
++#define __SYSLOCAL_H__
++
++#include <linux/version.h>
++#include <linux/clk.h>
++#include <linux/spinlock.h>
++#include <asm/atomic.h>
++
++#include <linux/semaphore.h>
++#include <linux/resource.h>
++
++char *SysCreateVersionString(struct IMG_CPU_PHYADDR sRegRegion);
++
++enum PVRSRV_ERROR InitSystemClocks(struct SYS_DATA *psSysData);
++void CleanupSystemClocks(struct SYS_DATA *psSysData);
++void DisableSystemClocks(struct SYS_DATA *psSysData);
++enum PVRSRV_ERROR EnableSystemClocks(struct SYS_DATA *psSysData);
++
++void DisableSGXClocks(struct SYS_DATA *psSysData);
++enum PVRSRV_ERROR EnableSGXClocks(struct SYS_DATA *psSysData);
++
++#define SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS 0x00000001
++#define SYS_SPECIFIC_DATA_ENABLE_LISR 0x00000002
++#define SYS_SPECIFIC_DATA_ENABLE_MISR 0x00000004
++#define SYS_SPECIFIC_DATA_ENABLE_ENVDATA 0x00000008
++#define SYS_SPECIFIC_DATA_ENABLE_LOCDEV 0x00000010
++#define SYS_SPECIFIC_DATA_ENABLE_REGDEV 0x00000020
++#define SYS_SPECIFIC_DATA_ENABLE_PDUMPINIT 0x00000040
++#define SYS_SPECIFIC_DATA_ENABLE_INITDEV 0x00000080
++#define SYS_SPECIFIC_DATA_ENABLE_LOCATEDEV 0x00000100
++
++#define SYS_SPECIFIC_DATA_PM_UNINSTALL_LISR 0x00000200
++#define SYS_SPECIFIC_DATA_PM_DISABLE_SYSCLOCKS 0x00000400
++#define SYS_SPECIFIC_DATA_ENABLE_PERF 0x00000800
++
++#define SYS_SPECIFIC_DATA_SET(psSysSpecData, flag) \
++ ((void)((psSysSpecData)->ui32SysSpecificData |= (flag)))
++
++#define SYS_SPECIFIC_DATA_CLEAR(psSysSpecData, flag) \
++ ((void)((psSysSpecData)->ui32SysSpecificData &= ~(flag)))
++
++#define SYS_SPECIFIC_DATA_TEST(psSysSpecData, flag) \
++ (((psSysSpecData)->ui32SysSpecificData & (flag)) != 0)
++
++struct SYS_SPECIFIC_DATA {
++ u32 ui32SysSpecificData;
++ struct PVRSRV_DEVICE_NODE *psSGXDevNode;
++ IMG_BOOL bSGXInitComplete;
++ u32 ui32SrcClockDiv;
++ IMG_BOOL bConstraintNotificationsEnabled;
++ atomic_t sSGXClocksEnabled;
++ spinlock_t sPowerLock;
++ atomic_t sPowerLockCPU;
++ spinlock_t sNotifyLock;
++ atomic_t sNotifyLockCPU;
++ IMG_BOOL bCallVDD2PostFunc;
++
++ struct clk *psCORE_CK;
++ struct clk *psSGX_FCK;
++ struct clk *psSGX_ICK;
++ struct clk *psMPU_CK;
++#if defined(DEBUG) || defined(TIMING)
++ struct clk *psGPT11_FCK;
++ struct clk *psGPT11_ICK;
++ void __iomem *gpt_base;
++#endif
++ struct constraint_handle *pVdd2Handle;
++};
++
++extern struct SYS_SPECIFIC_DATA *gpsSysSpecificData;
++
++#endif
+diff --git a/drivers/gpu/pvr/sysutils.c b/drivers/gpu/pvr/sysutils.c
+new file mode 100644
+index 0000000..79e1c87
+--- /dev/null
++++ b/drivers/gpu/pvr/sysutils.c
+@@ -0,0 +1,719 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <linux/version.h>
++#include <linux/clk.h>
++#include <linux/err.h>
++#include <linux/hardirq.h>
++#include <plat/omap-pm.h>
++#include <linux/bug.h>
++#include <plat/clock.h>
++#include <plat/cpu.h>
++#include "sgxdefs.h"
++#include "services_headers.h"
++#include "sysinfo.h"
++#include "sgxapi_km.h"
++#include "sysconfig.h"
++#include "sgxinfokm.h"
++#include "syslocal.h"
++#include "env_data.h"
++#include "ocpdefs.h"
++
++#define HZ_TO_MHZ(m) ((m) / 1000000)
++
++static inline unsigned long scale_by_rate(unsigned long val,
++ unsigned long rate1,
++ unsigned long rate2)
++{
++ if (rate1 >= rate2)
++ return val * (rate1 / rate2);
++
++ return val / (rate2 / rate1);
++}
++
++static inline unsigned long scale_prop_to_SGX_clock(unsigned long val,
++ unsigned long rate)
++{
++ return scale_by_rate(val, rate, sgx_get_max_freq());
++}
++
++void SysGetSGXTimingInformation(struct SGX_TIMING_INFORMATION *psTimingInfo)
++{
++ unsigned long rate;
++
++#if defined(NO_HARDWARE)
++ rate = SYS_SGX_MAX_FREQ_NO_HW;
++#else
++ PVR_ASSERT(atomic_read(&gpsSysSpecificData->sSGXClocksEnabled) != 0);
++
++ rate = clk_get_rate(gpsSysSpecificData->psSGX_FCK);
++ PVR_ASSERT(rate != 0);
++#endif
++ psTimingInfo->ui32CoreClockSpeed = rate;
++ psTimingInfo->ui32HWRecoveryFreq =
++ scale_prop_to_SGX_clock(SYS_SGX_HWRECOVERY_TIMEOUT_FREQ, rate);
++ psTimingInfo->ui32uKernelFreq =
++ scale_prop_to_SGX_clock(SYS_SGX_PDS_TIMER_FREQ, rate);
++ psTimingInfo->ui32ActivePowManLatencyms =
++ SYS_SGX_ACTIVE_POWER_LATENCY_MS;
++}
++
++static int vdd2_post_func(struct notifier_block *n, unsigned long event,
++ void *ptr)
++{
++ PVR_UNREFERENCED_PARAMETER(n);
++ PVR_UNREFERENCED_PARAMETER(event);
++ PVR_UNREFERENCED_PARAMETER(ptr);
++
++ if (atomic_read(&gpsSysSpecificData->sSGXClocksEnabled) != 0 &&
++ gpsSysSpecificData->bSGXInitComplete) {
++#if defined(DEBUG)
++ unsigned long rate;
++
++ rate = clk_get_rate(gpsSysSpecificData->psSGX_FCK);
++
++ PVR_ASSERT(rate != 0);
++
++ PVR_TRACE("%s: SGX clock rate: %dMHz", __func__,
++ HZ_TO_MHZ(rate));
++#endif
++ PVRSRVDevicePostClockSpeedChange(gpsSysSpecificData->
++ psSGXDevNode->sDevId.
++ ui32DeviceIndex, IMG_TRUE,
++ NULL);
++ }
++ return 0;
++}
++
++static int vdd2_pre_func(struct notifier_block *n, unsigned long event,
++ void *ptr)
++{
++ PVR_UNREFERENCED_PARAMETER(n);
++ PVR_UNREFERENCED_PARAMETER(event);
++ PVR_UNREFERENCED_PARAMETER(ptr);
++
++ if (atomic_read(&gpsSysSpecificData->sSGXClocksEnabled) != 0 &&
++ gpsSysSpecificData->bSGXInitComplete) {
++ BUG_ON(gpsSysData->eCurrentPowerState > PVRSRV_POWER_STATE_D1);
++ PVRSRVDevicePreClockSpeedChange(gpsSysSpecificData->
++ psSGXDevNode->sDevId.
++ ui32DeviceIndex, IMG_TRUE,
++ NULL);
++ }
++
++ return 0;
++}
++
++static int vdd2_pre_post_func(struct notifier_block *n, unsigned long event,
++ void *ptr)
++{
++ struct clk_notifier_data *cnd;
++
++ PVR_UNREFERENCED_PARAMETER(n);
++
++ cnd = (struct clk_notifier_data *)ptr;
++
++ PVR_TRACE("vdd2_pre_post_func: %s clock rate = %lu",
++ (CLK_PRE_RATE_CHANGE == event) ? "old" :
++ (CLK_POST_RATE_CHANGE == event) ? "new" :
++ "???",
++ cnd->rate);
++
++ if (CLK_PRE_RATE_CHANGE == event) {
++ PVRSRVDvfsLock();
++ PVR_TRACE("vdd2_pre_post_func: CLK_PRE_RATE_CHANGE event");
++ vdd2_pre_func(n, event, ptr);
++ } else if (CLK_POST_RATE_CHANGE == event) {
++ PVR_TRACE("vdd2_pre_post_func: CLK_POST_RATE_CHANGE event");
++ vdd2_post_func(n, event, ptr);
++ PVRSRVDvfsUnlock();
++ } else if (CLK_ABORT_RATE_CHANGE == event) {
++ PVR_TRACE("vdd2_pre_post_func: CLK_ABORT_RATE_CHANGE event");
++ PVRSRVDvfsUnlock();
++ } else {
++ printk(KERN_ERR "vdd2_pre_post_func: unexpected event (%lu)\n",
++ event);
++ PVR_DPF(PVR_DBG_ERROR,
++ "vdd2_pre_post_func: unexpected event (%lu)", event);
++ }
++ PVR_TRACE("vdd2_pre_post_func end.");
++ return 0;
++}
++
++static struct notifier_block vdd2_pre_post = {
++ vdd2_pre_post_func,
++ NULL
++};
++
++static void RegisterConstraintNotifications(struct SYS_SPECIFIC_DATA
++ *psSysSpecData)
++{
++ PVR_TRACE("Registering constraint notifications");
++
++ clk_notifier_register(psSysSpecData->psSGX_FCK, &vdd2_pre_post);
++ PVR_TRACE("VDD2 constraint notifications registered");
++}
++
++static void UnRegisterConstraintNotifications(struct SYS_SPECIFIC_DATA
++ *psSysSpecData)
++{
++ PVR_TRACE("Unregistering constraint notifications");
++
++ clk_notifier_unregister(psSysSpecData->psSGX_FCK, &vdd2_pre_post);
++}
++
++static struct device sgx_dev;
++static int sgx_clock_enabled;
++
++/* return value: current sgx load
++ * 0 - not busy
++ * 100 - busy
++ */
++static unsigned int sgx_current_load(void)
++{
++ enum PVRSRV_ERROR eError;
++ struct SYS_DATA *psSysData;
++ struct SYS_SPECIFIC_DATA *psSysSpecData;
++ struct PVRSRV_DEVICE_NODE *psDeviceNode;
++ static unsigned int kicks_prev;
++ static long time_prev;
++
++ eError = SysAcquireData(&psSysData);
++ if (eError != PVRSRV_OK)
++ return 0;
++ psSysSpecData =
++ (struct SYS_SPECIFIC_DATA *)psSysData->pvSysSpecificData;
++ if (!psSysSpecData ||
++ atomic_read(&psSysSpecData->sSGXClocksEnabled) == 0)
++ return 0;
++ psDeviceNode = psSysData->psDeviceNodeList;
++ while (psDeviceNode) {
++ if ((psDeviceNode->sDevId.eDeviceType ==
++ PVRSRV_DEVICE_TYPE_SGX) &&
++ psDeviceNode->pvDevice) {
++ struct PVRSRV_SGXDEV_INFO *psDevInfo =
++ (struct PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
++ unsigned int kicks = psDevInfo->ui32KickTACounter;
++ unsigned int load;
++ long time_elapsed;
++
++ time_elapsed = jiffies - time_prev;
++ if (likely(time_elapsed))
++ load =
++ 1000 * (kicks - kicks_prev) / time_elapsed;
++ else
++ load = 0;
++ kicks_prev = kicks;
++ time_prev += time_elapsed;
++ /*
++ * if the period between calls to this function was
++ * too long, then load stats are invalid
++ */
++ if (time_elapsed > 5 * HZ)
++ return 0;
++ /*pr_err("SGX load %u\n", load); */
++
++ /*
++ * 'load' shows how many times sgx was kicked
++ * per 1000 jiffies
++ * 150 is arbitrarily chosen threshold.
++ * If the number of kicks is below threshold
++ * then sgx is doing
++ * some small jobs and we can keep the clock freq low.
++ */
++ if (load < 150)
++ return 0;
++ else
++ return 100;
++ }
++ psDeviceNode = psDeviceNode->psNext;
++ }
++ return 0;
++}
++
++static void sgx_lock_perf(struct work_struct *work)
++{
++ int vdd1, vdd2;
++ static int bHigh;
++ int high;
++ unsigned int load;
++ struct delayed_work *d_work =
++ container_of(work, struct delayed_work, work);
++ struct ENV_DATA *psEnvData =
++ container_of(d_work, struct ENV_DATA, sPerfWork);
++
++ load = sgx_current_load();
++ if (load) {
++ vdd1 = 500000000;
++ vdd2 = 400000;
++ high = 1;
++ } else {
++ vdd1 = 0;
++ vdd2 = 0;
++ high = 0;
++ }
++ if (high != bHigh) {
++ omap_pm_set_min_bus_tput(&sgx_dev, OCP_INITIATOR_AGENT, vdd2);
++ bHigh = high;
++ }
++
++ if (sgx_clock_enabled || load)
++ queue_delayed_work(psEnvData->psPerfWorkqueue,
++ &psEnvData->sPerfWork, HZ / 5);
++}
++
++static void sgx_need_perf(struct SYS_DATA *psSysData, int ena)
++{
++ struct ENV_DATA *psEnvData =
++ (struct ENV_DATA *)psSysData->pvEnvSpecificData;
++
++ sgx_clock_enabled = ena;
++ cancel_delayed_work(&psEnvData->sPerfWork);
++ queue_delayed_work(psEnvData->psPerfWorkqueue, &psEnvData->sPerfWork,
++ 0);
++}
++
++enum PVRSRV_ERROR OSInitPerf(void *pvSysData)
++{
++ struct SYS_DATA *psSysData = (struct SYS_DATA *)pvSysData;
++ struct ENV_DATA *psEnvData =
++ (struct ENV_DATA *)psSysData->pvEnvSpecificData;
++
++ if (psEnvData->psPerfWorkqueue) {
++ PVR_DPF(PVR_DBG_ERROR, "OSInitPerf: already inited");
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ PVR_TRACE("Initing DVFS %x", pvSysData);
++
++ psEnvData->psPerfWorkqueue = create_singlethread_workqueue("sgx_perf");
++ INIT_DELAYED_WORK(&psEnvData->sPerfWork, sgx_lock_perf);
++
++ return PVRSRV_OK;
++}
++
++enum PVRSRV_ERROR OSCleanupPerf(void *pvSysData)
++{
++ struct SYS_DATA *psSysData = (struct SYS_DATA *)pvSysData;
++ struct ENV_DATA *psEnvData =
++ (struct ENV_DATA *)psSysData->pvEnvSpecificData;
++
++ if (!psEnvData->psPerfWorkqueue) {
++ PVR_DPF(PVR_DBG_ERROR, "OSCleanupPerf: not inited");
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ PVR_TRACE("Cleaning up DVFS");
++
++ sgx_clock_enabled = 0;
++ flush_workqueue(psEnvData->psPerfWorkqueue);
++ destroy_workqueue(psEnvData->psPerfWorkqueue);
++
++ return PVRSRV_OK;
++}
++
++static inline void setup_int_bypass(void)
++{
++ if (cpu_is_omap3630())
++ sgx_ocp_write_reg(EUR_CR_OCP_DEBUG_CONFIG,
++ EUR_CR_OCP_DEBUG_CONFIG_THALIA_INT_BYPASS_MASK);
++}
++
++#ifndef NO_HARDWARE
++
++static enum PVRSRV_ERROR sgx_force_enable_clocks(struct SYS_DATA *psSysData)
++{
++ struct SYS_SPECIFIC_DATA *psSysSpecData =
++ (struct SYS_SPECIFIC_DATA *)psSysData->pvSysSpecificData;
++ int res;
++
++ res = clk_enable(psSysSpecData->psSGX_FCK);
++ if (res < 0) {
++ PVR_DPF(PVR_DBG_ERROR, "%s: "
++ "Couldn't enable SGX functional clock (%d)",
++ __func__, res);
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ res = clk_enable(psSysSpecData->psSGX_ICK);
++ if (res < 0) {
++ PVR_DPF(PVR_DBG_ERROR, "%s: "
++ "Couldn't enable SGX interface clock (%d)",
++ __func__, res);
++
++ clk_disable(psSysSpecData->psSGX_FCK);
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ setup_int_bypass();
++
++ return PVRSRV_OK;
++}
++
++static void sgx_force_disable_clocks(struct SYS_DATA *psSysData)
++{
++ struct SYS_SPECIFIC_DATA *psSysSpecData =
++ (struct SYS_SPECIFIC_DATA *)psSysData->pvSysSpecificData;
++
++ if (psSysSpecData->psSGX_ICK)
++ clk_disable(psSysSpecData->psSGX_ICK);
++
++ if (psSysSpecData->psSGX_FCK)
++ clk_disable(psSysSpecData->psSGX_FCK);
++}
++
++#else /* NO_HARDWARE */
++
++static enum PVRSRV_ERROR sgx_force_enable_clocks(struct SYS_DATA *psSYsData)
++{
++ return PVRSRV_OK;
++}
++
++static void sgx_force_disable_clocks(struct SYS_DATA *psSYsData)
++{
++}
++
++#endif /* NO_HARDWARE */
++
++static bool force_clocks_on(void)
++{
++#ifdef CONFIG_PVR_FORCE_CLOCKS_ON
++ return true;
++#else
++ return false;
++#endif
++}
++
++enum PVRSRV_ERROR EnableSGXClocks(struct SYS_DATA *psSysData)
++{
++ struct SYS_SPECIFIC_DATA *psSysSpecData =
++ (struct SYS_SPECIFIC_DATA *)psSysData->pvSysSpecificData;
++ enum PVRSRV_ERROR res = PVRSRV_OK;
++
++ if (atomic_xchg(&psSysSpecData->sSGXClocksEnabled, 1))
++ return PVRSRV_OK;
++
++ /*
++ * In case of force clocks on we have already enabled the clocks
++ * at init time.
++ */
++ if (!force_clocks_on())
++ res = sgx_force_enable_clocks(psSysData);
++
++ if (res == PVRSRV_OK) {
++ BUG_ON(!atomic_read(&psSysSpecData->sSGXClocksEnabled));
++ sgx_need_perf(psSysData, 1);
++ } else {
++ atomic_set(&psSysSpecData->sSGXClocksEnabled, 0);
++ }
++
++ return res;
++}
++
++void DisableSGXClocks(struct SYS_DATA *psSysData)
++{
++ struct SYS_SPECIFIC_DATA *psSysSpecData =
++ (struct SYS_SPECIFIC_DATA *)psSysData->pvSysSpecificData;
++
++ if (!atomic_xchg(&psSysSpecData->sSGXClocksEnabled, 0))
++ return;
++
++ if (!force_clocks_on())
++ sgx_force_disable_clocks(psSysData);
++
++ BUG_ON(atomic_read(&psSysSpecData->sSGXClocksEnabled));
++
++ sgx_need_perf(psSysData, 0);
++}
++
++static enum PVRSRV_ERROR InitSgxClocks(struct SYS_DATA *psSysData)
++{
++ struct SYS_SPECIFIC_DATA *psSysSpecData = psSysData->pvSysSpecificData;
++ struct clk *psCLK;
++ struct clk *core_ck = NULL;
++
++ psCLK = clk_get(NULL, "sgx_fck");
++ if (IS_ERR(psCLK))
++ goto err0;
++ psSysSpecData->psSGX_FCK = psCLK;
++
++ psCLK = clk_get(NULL, "sgx_ick");
++ if (IS_ERR(psCLK))
++ goto err1;
++ psSysSpecData->psSGX_ICK = psCLK;
++
++ core_ck = clk_get(NULL, "core_ck");
++ if (IS_ERR(core_ck))
++ goto err2;
++ if (clk_set_parent(psSysSpecData->psSGX_FCK, core_ck) < 0) {
++ clk_put(core_ck);
++ goto err2;
++ }
++ clk_put(core_ck);
++
++ RegisterConstraintNotifications(psSysSpecData);
++ return PVRSRV_OK;
++
++err2:
++ clk_put(psSysSpecData->psSGX_ICK);
++err1:
++ clk_put(psSysSpecData->psSGX_FCK);
++err0:
++ PVR_DPF(PVR_DBG_ERROR,
++ "%s: couldn't init clocks fck %p ick %p core %p", __func__,
++ psSysSpecData->psSGX_FCK, psSysSpecData->psSGX_ICK, core_ck);
++ psSysSpecData->psSGX_FCK = NULL;
++ psSysSpecData->psSGX_ICK = NULL;
++
++ return PVRSRV_ERROR_GENERIC;
++}
++
++static void CleanupSgxClocks(struct SYS_DATA *psSysData)
++{
++ struct SYS_SPECIFIC_DATA *psSysSpecData = psSysData->pvSysSpecificData;
++ UnRegisterConstraintNotifications(psSysSpecData);
++
++ if (psSysSpecData->psSGX_ICK) {
++ clk_put(psSysSpecData->psSGX_ICK);
++ psSysSpecData->psSGX_ICK = NULL;
++ }
++
++ if (psSysSpecData->psSGX_FCK) {
++ clk_put(psSysSpecData->psSGX_FCK);
++ psSysSpecData->psSGX_FCK = NULL;
++ }
++}
++
++#if defined(DEBUG) || defined(TIMING)
++static inline u32 gpt_read_reg(struct SYS_DATA *psSysData, u32 reg)
++{
++ struct SYS_SPECIFIC_DATA *psSysSpecData = psSysData->pvSysSpecificData;
++
++ return __raw_readl(psSysSpecData->gpt_base + reg);
++}
++
++static inline void gpt_write_reg(struct SYS_DATA *psSysData, u32 reg, u32 val)
++{
++ struct SYS_SPECIFIC_DATA *psSysSpecData = psSysData->pvSysSpecificData;
++
++ __raw_writel(val, psSysSpecData->gpt_base + reg);
++}
++
++static enum PVRSRV_ERROR InitDebugClocks(struct SYS_DATA *psSysData)
++{
++ struct SYS_SPECIFIC_DATA *psSysSpecData = psSysData->pvSysSpecificData;
++ struct clk *psCLK;
++ struct clk *sys_ck = NULL;
++ u32 rate;
++
++ psCLK = clk_get(NULL, "mpu_ck");
++ if (IS_ERR(psCLK))
++ goto err0;
++ psSysSpecData->psMPU_CK = psCLK;
++
++ psCLK = clk_get(NULL, "gpt11_fck");
++ if (IS_ERR(psCLK))
++ goto err1;
++ psSysSpecData->psGPT11_FCK = psCLK;
++
++ psCLK = clk_get(NULL, "gpt11_ick");
++ if (IS_ERR(psCLK))
++ goto err2;
++ psSysSpecData->psGPT11_ICK = psCLK;
++
++ sys_ck = clk_get(NULL, "sys_ck");
++ if (IS_ERR(sys_ck))
++ goto err3;
++ if (clk_get_parent(psSysSpecData->psGPT11_FCK) != sys_ck)
++ if (clk_set_parent(psSysSpecData->psGPT11_FCK, sys_ck) < 0) {
++ clk_put(sys_ck);
++ goto err3;
++ }
++ clk_put(sys_ck);
++
++ PVR_TRACE("GPTIMER11 clock is %dMHz",
++ HZ_TO_MHZ(clk_get_rate(psSysSpecData->psGPT11_FCK)));
++
++ psSysSpecData->gpt_base = ioremap(SYS_OMAP3430_GP11TIMER_PHYS_BASE,
++ SYS_OMAP3430_GPTIMER_SIZE);
++ if (!psSysSpecData->gpt_base)
++ goto err3;
++
++ clk_enable(psSysSpecData->psGPT11_ICK);
++ clk_enable(psSysSpecData->psGPT11_FCK);
++
++ rate = gpt_read_reg(psSysData, SYS_OMAP3430_GPTIMER_TSICR);
++ if (!(rate & 4)) {
++ PVR_TRACE("Setting GPTIMER11 mode to posted "
++ "(currently is non-posted)");
++ gpt_write_reg(psSysData, SYS_OMAP3430_GPTIMER_TSICR, rate | 4);
++ }
++
++ clk_disable(psSysSpecData->psGPT11_FCK);
++ clk_disable(psSysSpecData->psGPT11_ICK);
++
++ return PVRSRV_OK;
++
++err3:
++ clk_put(psSysSpecData->psGPT11_ICK);
++err2:
++ clk_put(psSysSpecData->psGPT11_FCK);
++err1:
++ clk_put(psSysSpecData->psMPU_CK);
++err0:
++ PVR_DPF(PVR_DBG_ERROR,
++ "%s: couldn't init clocks: mpu %p sys %p fck %p ick %p",
++ __func__, psSysSpecData->psMPU_CK, sys_ck,
++ psSysSpecData->psGPT11_FCK, psSysSpecData->psGPT11_ICK);
++
++ psSysSpecData->psMPU_CK = NULL;
++ psSysSpecData->psGPT11_FCK = NULL;
++ psSysSpecData->psGPT11_ICK = NULL;
++
++ return PVRSRV_ERROR_GENERIC;
++}
++
++static void CleanupDebugClocks(struct SYS_DATA *psSysData)
++{
++ struct SYS_SPECIFIC_DATA *psSysSpecData = psSysData->pvSysSpecificData;
++
++ if (psSysSpecData->psMPU_CK) {
++ clk_put(psSysSpecData->psMPU_CK);
++ psSysSpecData->psMPU_CK = NULL;
++ }
++ if (psSysSpecData->psGPT11_FCK) {
++ clk_put(psSysSpecData->psGPT11_FCK);
++ psSysSpecData->psGPT11_FCK = NULL;
++ }
++ if (psSysSpecData->psGPT11_ICK) {
++ clk_put(psSysSpecData->psGPT11_ICK);
++ psSysSpecData->psGPT11_ICK = NULL;
++ }
++}
++
++static enum PVRSRV_ERROR EnableDebugClocks(struct SYS_DATA *psSysData)
++{
++ struct SYS_SPECIFIC_DATA *psSysSpecData = psSysData->pvSysSpecificData;
++
++ if (clk_enable(psSysSpecData->psGPT11_FCK) < 0)
++ goto err0;
++
++ if (clk_enable(psSysSpecData->psGPT11_ICK) < 0)
++ goto err1;
++
++ gpt_write_reg(psSysData, SYS_OMAP3430_GPTIMER_ENABLE, 3);
++
++ return PVRSRV_OK;
++
++err1:
++ clk_disable(psSysSpecData->psGPT11_FCK);
++err0:
++ PVR_DPF(PVR_DBG_ERROR, "%s: can't enable clocks", __func__);
++
++ return PVRSRV_ERROR_GENERIC;
++}
++
++static inline void DisableDebugClocks(struct SYS_DATA *psSysData)
++{
++ struct SYS_SPECIFIC_DATA *psSysSpecData = psSysData->pvSysSpecificData;
++
++ gpt_write_reg(psSysData, SYS_OMAP3430_GPTIMER_ENABLE, 0);
++
++ clk_disable(psSysSpecData->psGPT11_ICK);
++ clk_disable(psSysSpecData->psGPT11_FCK);
++}
++
++#else
++
++inline enum PVRSRV_ERROR InitDebugClocks(struct SYS_DATA *psSysData)
++{
++ return PVRSRV_OK;
++}
++
++static inline void CleanupDebugClocks(struct SYS_DATA *psSysData)
++{
++}
++
++static inline enum PVRSRV_ERROR EnableDebugClocks(struct SYS_DATA *psSysData)
++{
++ return PVRSRV_OK;
++}
++
++static inline void DisableDebugClocks(struct SYS_DATA *psSysData)
++{
++}
++#endif
++
++enum PVRSRV_ERROR InitSystemClocks(struct SYS_DATA *psSysData)
++{
++ if (InitSgxClocks(psSysData) != PVRSRV_OK)
++ goto err0;
++
++ if (InitDebugClocks(psSysData) != PVRSRV_OK)
++ goto err1;
++
++ return PVRSRV_OK;
++
++err1:
++ CleanupSgxClocks(psSysData);
++err0:
++ return PVRSRV_ERROR_GENERIC;
++}
++
++void CleanupSystemClocks(struct SYS_DATA *psSysData)
++{
++ CleanupDebugClocks(psSysData);
++ CleanupSgxClocks(psSysData);
++}
++
++enum PVRSRV_ERROR EnableSystemClocks(struct SYS_DATA *psSysData)
++{
++ PVR_TRACE("EnableSystemClocks: Enabling System Clocks");
++
++ /*
++ * We force clocks on by increasing their refcount here during
++ * module init time and decreasing it at cleanup time.
++ */
++ if (force_clocks_on())
++ sgx_force_enable_clocks(gpsSysData);
++ if (EnableDebugClocks(psSysData) != PVRSRV_OK)
++ goto err1;
++
++ return PVRSRV_OK;
++
++err1:
++ return PVRSRV_ERROR_GENERIC;
++}
++
++void DisableSystemClocks(struct SYS_DATA *psSysData)
++{
++ PVR_TRACE("DisableSystemClocks: Disabling System Clocks");
++
++ DisableDebugClocks(psSysData);
++ /* Decrease the clocks' refcount that was increased at init time. */
++ if (force_clocks_on())
++ sgx_force_disable_clocks(gpsSysData);
++}
+diff --git a/drivers/gpu/pvr/tools/Makefile b/drivers/gpu/pvr/tools/Makefile
+new file mode 100644
+index 0000000..27314da
+--- /dev/null
++++ b/drivers/gpu/pvr/tools/Makefile
+@@ -0,0 +1,29 @@
++#
++# Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++#
++# This program is free software; you can redistribute it and/or modify it
++# under the terms and conditions of the GNU General Public License,
++# version 2, as published by the Free Software Foundation.
++#
++# This program is distributed in the hope it will be useful but, except
++# as otherwise stated in writing, without any warranty; without even the
++# implied warranty of merchantability or fitness for a particular purpose.
++# See the GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License along with
++# this program; if not, write to the Free Software Foundation, Inc.,
++# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++#
++# The full GNU General Public License is included in this distribution in
++# the file called "COPYING".
++#
++# Contact Information:
++# Imagination Technologies Ltd. <gpl-support@imgtec.com>
++# Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++
++
++objs-$(CONFIG_PVR_TOOLS) += dbgdrv
++
++dbgdrv-objs := main.c dbgdriv.c ioctl.c hostfunc.c \
++ hotkey.c
++
+diff --git a/drivers/gpu/pvr/tools/dbgdriv.c b/drivers/gpu/pvr/tools/dbgdriv.c
+new file mode 100644
+index 0000000..1ab5e70
+--- /dev/null
++++ b/drivers/gpu/pvr/tools/dbgdriv.c
+@@ -0,0 +1,1652 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <linux/string.h>
++
++#include "img_types.h"
++#include "pvr_debug.h"
++#include "dbgdrvif.h"
++#include "dbgdriv.h"
++#include "hotkey.h"
++#include "hostfunc.h"
++
++#define LAST_FRAME_BUF_SIZE 1024
++
++struct DBG_LASTFRAME_BUFFER {
++ struct DBG_STREAM *psStream;
++ u8 ui8Buffer[LAST_FRAME_BUF_SIZE];
++ u32 ui32BufLen;
++ struct DBG_LASTFRAME_BUFFER *psNext;
++};
++
++static struct DBG_STREAM *g_psStreamList;
++static struct DBG_LASTFRAME_BUFFER *g_psLFBufferList;
++
++static u32 g_ui32LOff;
++static u32 g_ui32Line;
++static u32 g_ui32MonoLines = 25;
++
++static IMG_BOOL g_bHotkeyMiddump = IMG_FALSE;
++static u32 g_ui32HotkeyMiddumpStart = 0xffffffff;
++static u32 g_ui32HotkeyMiddumpEnd = 0xffffffff;
++
++void *g_pvAPIMutex;
++
++IMG_BOOL gbDumpThisFrame = IMG_FALSE;
++
++static u32 SpaceInStream(struct DBG_STREAM *psStream);
++static IMG_BOOL ExpandStreamBuffer(struct DBG_STREAM *psStream,
++ u32 ui32NewSize);
++struct DBG_LASTFRAME_BUFFER *FindLFBuf(struct DBG_STREAM *psStream);
++
++struct DBGKM_SERVICE_TABLE g_sDBGKMServices = {
++ sizeof(struct DBGKM_SERVICE_TABLE),
++ ExtDBGDrivCreateStream,
++ ExtDBGDrivDestroyStream,
++ ExtDBGDrivFindStream,
++ ExtDBGDrivWriteString,
++ ExtDBGDrivReadString,
++ ExtDBGDrivWrite,
++ ExtDBGDrivRead,
++ ExtDBGDrivSetCaptureMode,
++ ExtDBGDrivSetOutputMode,
++ ExtDBGDrivSetDebugLevel,
++ ExtDBGDrivSetFrame,
++ ExtDBGDrivGetFrame,
++ ExtDBGDrivOverrideMode,
++ ExtDBGDrivDefaultMode,
++ ExtDBGDrivWrite2,
++ ExtDBGDrivWriteStringCM,
++ ExtDBGDrivWriteCM,
++ ExtDBGDrivSetMarker,
++ ExtDBGDrivGetMarker,
++ ExtDBGDrivStartInitPhase,
++ ExtDBGDrivStopInitPhase,
++ ExtDBGDrivIsCaptureFrame,
++ ExtDBGDrivWriteLF,
++ ExtDBGDrivReadLF,
++ ExtDBGDrivGetStreamOffset,
++ ExtDBGDrivSetStreamOffset,
++ ExtDBGDrivIsLastCaptureFrame,
++ ExtDBGDrivWaitForEvent
++};
++
++void *ExtDBGDrivCreateStream(char *pszName,
++ u32 ui32CapMode,
++ u32 ui32OutMode,
++ u32 ui32Flags,
++ u32 ui32Size)
++{
++ void *pvRet;
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ pvRet =
++ DBGDrivCreateStream(pszName, ui32CapMode, ui32OutMode, ui32Flags,
++ ui32Size);
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return pvRet;
++}
++
++void ExtDBGDrivDestroyStream(struct DBG_STREAM *psStream)
++{
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ DBGDrivDestroyStream(psStream);
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return;
++}
++
++void *ExtDBGDrivFindStream(char *pszName,
++ IMG_BOOL bResetStream)
++{
++ void *pvRet;
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ pvRet = DBGDrivFindStream(pszName, bResetStream);
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return pvRet;
++}
++
++u32 ExtDBGDrivWriteString(struct DBG_STREAM *psStream,
++ char *pszString,
++ u32 ui32Level)
++{
++ u32 ui32Ret;
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ ui32Ret = DBGDrivWriteString(psStream, pszString, ui32Level);
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return ui32Ret;
++}
++
++u32 ExtDBGDrivReadString(struct DBG_STREAM *psStream,
++ char *pszString,
++ u32 ui32Limit)
++{
++ u32 ui32Ret;
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ ui32Ret = DBGDrivReadString(psStream, pszString, ui32Limit);
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return ui32Ret;
++}
++
++u32 ExtDBGDrivWrite(struct DBG_STREAM *psStream,
++ u8 *pui8InBuf,
++ u32 ui32InBuffSize,
++ u32 ui32Level)
++{
++ u32 ui32Ret;
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ ui32Ret = DBGDrivWrite(psStream, pui8InBuf, ui32InBuffSize, ui32Level);
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return ui32Ret;
++}
++
++u32 ExtDBGDrivRead(struct DBG_STREAM *psStream,
++ IMG_BOOL bReadInitBuffer,
++ u32 ui32OutBuffSize,
++ u8 *pui8OutBuf)
++{
++ u32 ui32Ret;
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ ui32Ret =
++ DBGDrivRead(psStream, bReadInitBuffer, ui32OutBuffSize, pui8OutBuf);
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return ui32Ret;
++}
++
++void ExtDBGDrivSetCaptureMode(struct DBG_STREAM *psStream,
++ u32 ui32Mode,
++ u32 ui32Start,
++ u32 ui32End,
++ u32 ui32SampleRate)
++{
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ DBGDrivSetCaptureMode(psStream, ui32Mode, ui32Start, ui32End,
++ ui32SampleRate);
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return;
++}
++
++void ExtDBGDrivSetOutputMode(struct DBG_STREAM *psStream,
++ u32 ui32OutMode)
++{
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ DBGDrivSetOutputMode(psStream, ui32OutMode);
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return;
++}
++
++void ExtDBGDrivSetDebugLevel(struct DBG_STREAM *psStream,
++ u32 ui32DebugLevel)
++{
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ DBGDrivSetDebugLevel(psStream, ui32DebugLevel);
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return;
++}
++
++void ExtDBGDrivSetFrame(struct DBG_STREAM *psStream, u32 ui32Frame)
++{
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ DBGDrivSetFrame(psStream, ui32Frame);
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return;
++}
++
++u32 ExtDBGDrivGetFrame(struct DBG_STREAM *psStream)
++{
++ u32 ui32Ret;
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ ui32Ret = DBGDrivGetFrame(psStream);
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return ui32Ret;
++}
++
++u32 ExtDBGDrivIsLastCaptureFrame(struct DBG_STREAM *psStream)
++{
++ u32 ui32Ret;
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ ui32Ret = DBGDrivIsLastCaptureFrame(psStream);
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return ui32Ret;
++}
++
++u32 ExtDBGDrivIsCaptureFrame(struct DBG_STREAM *psStream,
++ IMG_BOOL bCheckPreviousFrame)
++{
++ u32 ui32Ret;
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ ui32Ret = DBGDrivIsCaptureFrame(psStream, bCheckPreviousFrame);
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return ui32Ret;
++}
++
++void ExtDBGDrivOverrideMode(struct DBG_STREAM *psStream,
++ u32 ui32Mode)
++{
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ DBGDrivOverrideMode(psStream, ui32Mode);
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return;
++}
++
++void ExtDBGDrivDefaultMode(struct DBG_STREAM *psStream)
++{
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ DBGDrivDefaultMode(psStream);
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return;
++}
++
++u32 ExtDBGDrivWrite2(struct DBG_STREAM *psStream,
++ u8 *pui8InBuf,
++ u32 ui32InBuffSize,
++ u32 ui32Level)
++{
++ u32 ui32Ret;
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ ui32Ret = DBGDrivWrite2(psStream, pui8InBuf, ui32InBuffSize, ui32Level);
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return ui32Ret;
++}
++
++u32 ExtDBGDrivWriteStringCM(struct DBG_STREAM *psStream,
++ char *pszString,
++ u32 ui32Level)
++{
++ u32 ui32Ret;
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ ui32Ret = DBGDrivWriteStringCM(psStream, pszString, ui32Level);
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return ui32Ret;
++}
++
++u32 ExtDBGDrivWriteCM(struct DBG_STREAM *psStream,
++ u8 *pui8InBuf,
++ u32 ui32InBuffSize,
++ u32 ui32Level)
++{
++ u32 ui32Ret;
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ ui32Ret =
++ DBGDrivWriteCM(psStream, pui8InBuf, ui32InBuffSize, ui32Level);
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return ui32Ret;
++}
++
++void ExtDBGDrivSetMarker(struct DBG_STREAM *psStream,
++ u32 ui32Marker)
++{
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ DBGDrivSetMarker(psStream, ui32Marker);
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return;
++}
++
++u32 ExtDBGDrivGetMarker(struct DBG_STREAM *psStream)
++{
++ u32 ui32Marker;
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ ui32Marker = DBGDrivGetMarker(psStream);
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return ui32Marker;
++}
++
++u32 ExtDBGDrivWriteLF(struct DBG_STREAM *psStream,
++ u8 *pui8InBuf,
++ u32 ui32InBuffSize,
++ u32 ui32Level,
++ u32 ui32Flags)
++{
++ u32 ui32Ret;
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ ui32Ret =
++ DBGDrivWriteLF(psStream, pui8InBuf, ui32InBuffSize, ui32Level,
++ ui32Flags);
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return ui32Ret;
++}
++
++u32 ExtDBGDrivReadLF(struct DBG_STREAM *psStream,
++ u32 ui32OutBuffSize,
++ u8 *pui8OutBuf)
++{
++ u32 ui32Ret;
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ ui32Ret = DBGDrivReadLF(psStream, ui32OutBuffSize, pui8OutBuf);
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return ui32Ret;
++}
++
++void ExtDBGDrivStartInitPhase(struct DBG_STREAM *psStream)
++{
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ DBGDrivStartInitPhase(psStream);
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return;
++}
++
++void ExtDBGDrivStopInitPhase(struct DBG_STREAM *psStream)
++{
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ DBGDrivStopInitPhase(psStream);
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return;
++}
++
++u32 ExtDBGDrivGetStreamOffset(struct DBG_STREAM *psStream)
++{
++ u32 ui32Ret;
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ ui32Ret = DBGDrivGetStreamOffset(psStream);
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return ui32Ret;
++}
++
++void ExtDBGDrivSetStreamOffset(struct DBG_STREAM *psStream,
++ u32 ui32StreamOffset)
++{
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ DBGDrivSetStreamOffset(psStream, ui32StreamOffset);
++
++ HostReleaseMutex(g_pvAPIMutex);
++}
++
++void ExtDBGDrivWaitForEvent(enum DBG_EVENT eEvent)
++{
++#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
++ DBGDrivWaitForEvent(eEvent);
++#else
++ PVR_UNREFERENCED_PARAMETER(eEvent);
++#endif
++}
++
++u32 AtoI(char *szIn)
++{
++ u32 ui32Len = 0;
++ u32 ui32Value = 0;
++ u32 ui32Digit = 1;
++ u32 ui32Base = 10;
++ int iPos;
++ char bc;
++
++ while (szIn[ui32Len] > 0)
++ ui32Len++;
++
++ if (ui32Len == 0)
++ return 0;
++
++ iPos = 0;
++ while (szIn[iPos] == '0')
++ iPos++;
++ if (szIn[iPos] == '\0')
++ return 0;
++ if (szIn[iPos] == 'x' || szIn[iPos] == 'X') {
++ ui32Base = 16;
++ szIn[iPos] = '0';
++ }
++
++ for (iPos = ui32Len - 1; iPos >= 0; iPos--) {
++ bc = szIn[iPos];
++
++ if ((bc >= 'a') && (bc <= 'f') && ui32Base == 16)
++ bc -= 'a' - 0xa;
++ else if ((bc >= 'A') && (bc <= 'F') && ui32Base == 16)
++ bc -= 'A' - 0xa;
++ else if ((bc >= '0') && (bc <= '9'))
++ bc -= '0';
++ else
++ return 0;
++
++ ui32Value += bc * ui32Digit;
++
++ ui32Digit = ui32Digit * ui32Base;
++ }
++ return ui32Value;
++}
++
++IMG_BOOL StreamValid(struct DBG_STREAM *psStream)
++{
++ struct DBG_STREAM *psThis;
++
++ psThis = g_psStreamList;
++
++ while (psThis)
++ if (psStream && (psThis == psStream))
++ return IMG_TRUE;
++ else
++ psThis = psThis->psNext;
++
++ return IMG_FALSE;
++}
++
++void Write(struct DBG_STREAM *psStream, u8 *pui8Data,
++ u32 ui32InBuffSize)
++{
++
++ if ((psStream->ui32WPtr + ui32InBuffSize) > psStream->ui32Size) {
++ u32 ui32B1 = psStream->ui32Size - psStream->ui32WPtr;
++ u32 ui32B2 = ui32InBuffSize - ui32B1;
++
++ HostMemCopy((void *) (psStream->ui32Base +
++ psStream->ui32WPtr),
++ (void *) pui8Data, ui32B1);
++
++ HostMemCopy((void *) psStream->ui32Base,
++ (void *) ((u32) pui8Data + ui32B1),
++ ui32B2);
++
++ psStream->ui32WPtr = ui32B2;
++ } else {
++ HostMemCopy((void *) (psStream->ui32Base +
++ psStream->ui32WPtr),
++ (void *) pui8Data, ui32InBuffSize);
++
++ psStream->ui32WPtr += ui32InBuffSize;
++
++ if (psStream->ui32WPtr == psStream->ui32Size)
++ psStream->ui32WPtr = 0;
++ }
++ psStream->ui32DataWritten += ui32InBuffSize;
++}
++
++void MonoOut(char *pszString, IMG_BOOL bNewLine)
++{
++ u32 i;
++ char *pScreen;
++
++ pScreen = (char *)DBGDRIV_MONOBASE;
++
++ pScreen += g_ui32Line * 160;
++
++ i = 0;
++ do {
++ pScreen[g_ui32LOff + (i * 2)] = pszString[i];
++ pScreen[g_ui32LOff + (i * 2) + 1] = 127;
++ i++;
++ } while ((pszString[i] != 0) && (i < 4096));
++
++ g_ui32LOff += i * 2;
++
++ if (bNewLine) {
++ g_ui32LOff = 0;
++ g_ui32Line++;
++ }
++
++ if (g_ui32Line == g_ui32MonoLines) {
++ g_ui32Line = g_ui32MonoLines - 1;
++
++ HostMemCopy((void *) DBGDRIV_MONOBASE,
++ (void *) (DBGDRIV_MONOBASE + 160),
++ 160 * (g_ui32MonoLines - 1));
++
++ HostMemSet((void *) (DBGDRIV_MONOBASE +
++ (160 * (g_ui32MonoLines - 1))), 0,
++ 160);
++ }
++}
++
++void AppendName(char *pszOut, char *pszBase, char *pszName)
++{
++ u32 i;
++ u32 ui32Off;
++
++ i = 0;
++
++ while (pszBase[i] != 0) {
++ pszOut[i] = pszBase[i];
++ i++;
++ }
++
++ ui32Off = i;
++ i = 0;
++
++ while (pszName[i] != 0) {
++ pszOut[ui32Off + i] = pszName[i];
++ i++;
++ }
++
++ pszOut[ui32Off + i] = pszName[i];
++}
++
++void *DBGDrivCreateStream(char *pszName,
++ u32 ui32CapMode,
++ u32 ui32OutMode,
++ u32 ui32Flags,
++ u32 ui32Size)
++{
++ struct DBG_STREAM *psStream;
++ struct DBG_STREAM *psInitStream;
++ struct DBG_LASTFRAME_BUFFER *psLFBuffer;
++ u32 ui32Off;
++ void *pvBase;
++
++ psStream = (struct DBG_STREAM *)DBGDrivFindStream(pszName, IMG_FALSE);
++
++ if (psStream)
++ return (void *)psStream;
++
++ psStream = HostNonPageablePageAlloc(1);
++ psInitStream = HostNonPageablePageAlloc(1);
++ psLFBuffer = HostNonPageablePageAlloc(1);
++ if ((!psStream) || (!psInitStream) || (!psLFBuffer)
++ ) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "DBGDriv: Couldn't create buffer !!!!!\n\r");
++ return NULL;
++ }
++
++ if ((ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0)
++ pvBase = HostNonPageablePageAlloc(ui32Size);
++ else
++ pvBase = HostPageablePageAlloc(ui32Size);
++
++ if (!pvBase) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "DBGDriv: Couldn't create buffer !!!!!\n\r");
++ HostNonPageablePageFree(psStream);
++ return NULL;
++ }
++
++ psStream->psNext = 0;
++ psStream->ui32Flags = ui32Flags;
++ psStream->ui32Base = (u32) pvBase;
++ psStream->ui32Size = ui32Size * 4096;
++ psStream->ui32RPtr = 0;
++ psStream->ui32WPtr = 0;
++ psStream->ui32DataWritten = 0;
++ psStream->ui32CapMode = ui32CapMode;
++ psStream->ui32OutMode = ui32OutMode;
++ psStream->ui32DebugLevel = DEBUG_LEVEL_0;
++ psStream->ui32DefaultMode = ui32CapMode;
++ psStream->ui32Start = 0;
++ psStream->ui32End = 0;
++ psStream->ui32Current = 0;
++ psStream->ui32SampleRate = 1;
++ psStream->ui32Access = 0;
++ psStream->ui32Timeout = 0;
++ psStream->ui32Marker = 0;
++ psStream->bInitPhaseComplete = IMG_FALSE;
++
++ if ((ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0)
++ pvBase = HostNonPageablePageAlloc(ui32Size);
++ else
++ pvBase = HostPageablePageAlloc(ui32Size);
++
++ if (!pvBase) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "DBGDriv: Couldn't create buffer !!!!!\n\r");
++
++ if ((psStream->ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0)
++ HostNonPageablePageFree((void *) psStream->
++ ui32Base);
++ else
++ HostPageablePageFree((void *) psStream->ui32Base);
++ HostNonPageablePageFree(psStream);
++ return NULL;
++ }
++
++ psInitStream->psNext = 0;
++ psInitStream->ui32Flags = ui32Flags;
++ psInitStream->ui32Base = (u32) pvBase;
++ psInitStream->ui32Size = ui32Size * 4096;
++ psInitStream->ui32RPtr = 0;
++ psInitStream->ui32WPtr = 0;
++ psInitStream->ui32DataWritten = 0;
++ psInitStream->ui32CapMode = ui32CapMode;
++ psInitStream->ui32OutMode = ui32OutMode;
++ psInitStream->ui32DebugLevel = DEBUG_LEVEL_0;
++ psInitStream->ui32DefaultMode = ui32CapMode;
++ psInitStream->ui32Start = 0;
++ psInitStream->ui32End = 0;
++ psInitStream->ui32Current = 0;
++ psInitStream->ui32SampleRate = 1;
++ psInitStream->ui32Access = 0;
++ psInitStream->ui32Timeout = 0;
++ psInitStream->ui32Marker = 0;
++ psInitStream->bInitPhaseComplete = IMG_FALSE;
++
++ psStream->psInitStream = psInitStream;
++
++ psLFBuffer->psStream = psStream;
++ psLFBuffer->ui32BufLen = 0;
++
++ g_bHotkeyMiddump = IMG_FALSE;
++ g_ui32HotkeyMiddumpStart = 0xffffffff;
++ g_ui32HotkeyMiddumpEnd = 0xffffffff;
++
++ ui32Off = 0;
++
++ do {
++ psStream->szName[ui32Off] = pszName[ui32Off];
++
++ ui32Off++;
++ } while ((pszName[ui32Off] != 0)
++ && (ui32Off < (4096 - sizeof(struct DBG_STREAM))));
++
++ psStream->szName[ui32Off] = pszName[ui32Off];
++
++ psStream->psNext = g_psStreamList;
++ g_psStreamList = psStream;
++
++ psLFBuffer->psNext = g_psLFBufferList;
++ g_psLFBufferList = psLFBuffer;
++
++ return (void *)psStream;
++}
++
++void DBGDrivDestroyStream(struct DBG_STREAM *psStream)
++{
++ struct DBG_STREAM *psStreamThis;
++ struct DBG_STREAM *psStreamPrev;
++ struct DBG_LASTFRAME_BUFFER *psLFBuffer;
++ struct DBG_LASTFRAME_BUFFER *psLFThis;
++ struct DBG_LASTFRAME_BUFFER *psLFPrev;
++
++ PVR_DPF(PVR_DBG_MESSAGE, "DBGDriv: Destroying stream %s\r\n",
++ psStream->szName);
++
++ if (!StreamValid(psStream))
++ return;
++
++ psLFBuffer = FindLFBuf(psStream);
++
++ psStreamThis = g_psStreamList;
++ psStreamPrev = 0;
++
++ while (psStreamThis)
++ if (psStreamThis == psStream) {
++ if (psStreamPrev)
++ psStreamPrev->psNext = psStreamThis->psNext;
++ else
++ g_psStreamList = psStreamThis->psNext;
++
++ psStreamThis = 0;
++ } else {
++ psStreamPrev = psStreamThis;
++ psStreamThis = psStreamThis->psNext;
++ }
++
++ psLFThis = g_psLFBufferList;
++ psLFPrev = 0;
++
++ while (psLFThis)
++ if (psLFThis == psLFBuffer) {
++ if (psLFPrev)
++ psLFPrev->psNext = psLFThis->psNext;
++ else
++ g_psLFBufferList = psLFThis->psNext;
++
++ psLFThis = 0;
++ } else {
++ psLFPrev = psLFThis;
++ psLFThis = psLFThis->psNext;
++ }
++
++ if (psStream->ui32CapMode & DEBUG_CAPMODE_HOTKEY)
++ DeactivateHotKeys();
++
++ if ((psStream->ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0) {
++ HostNonPageablePageFree((void *) psStream->ui32Base);
++ HostNonPageablePageFree((void *) psStream->psInitStream->
++ ui32Base);
++ } else {
++ HostPageablePageFree((void *) psStream->ui32Base);
++ HostPageablePageFree((void *) psStream->psInitStream->
++ ui32Base);
++ }
++
++ HostNonPageablePageFree(psStream->psInitStream);
++ HostNonPageablePageFree(psStream);
++ HostNonPageablePageFree(psLFBuffer);
++
++ if (g_psStreamList == 0)
++ PVR_DPF(PVR_DBG_MESSAGE, "DBGDriv: Stream list now empty");
++
++ return;
++}
++
++void *DBGDrivFindStream(char *pszName,
++ IMG_BOOL bResetStream)
++{
++ struct DBG_STREAM *psStream;
++ struct DBG_STREAM *psThis;
++ u32 ui32Off;
++ IMG_BOOL bAreSame;
++
++ psStream = 0;
++
++ for (psThis = g_psStreamList; psThis != NULL;
++ psThis = psThis->psNext) {
++ bAreSame = IMG_TRUE;
++ ui32Off = 0;
++
++ if (strlen(psThis->szName) == strlen(pszName)) {
++ while ((psThis->szName[ui32Off] != 0)
++ && (pszName[ui32Off] != 0) && (ui32Off < 128)
++ && bAreSame) {
++ if (psThis->szName[ui32Off] != pszName[ui32Off])
++ bAreSame = IMG_FALSE;
++
++ ui32Off++;
++ }
++ } else {
++ bAreSame = IMG_FALSE;
++ }
++
++ if (bAreSame) {
++ psStream = psThis;
++ break;
++ }
++ }
++
++ if (bResetStream && psStream) {
++ static char szComment[] = "-- Init phase terminated\r\n";
++ psStream->psInitStream->ui32RPtr = 0;
++ psStream->ui32RPtr = 0;
++ psStream->ui32WPtr = 0;
++ psStream->ui32DataWritten =
++ psStream->psInitStream->ui32DataWritten;
++ if (psStream->bInitPhaseComplete == IMG_FALSE) {
++ if (psStream->ui32Flags & DEBUG_FLAGS_TEXTSTREAM)
++ DBGDrivWrite2(psStream, (u8 *) szComment,
++ sizeof(szComment) - 1, 0x01);
++ psStream->bInitPhaseComplete = IMG_TRUE;
++ }
++ }
++
++ return (void *)psStream;
++}
++
++u32 DBGDrivWriteStringCM(struct DBG_STREAM *psStream,
++ char *pszString,
++ u32 ui32Level)
++{
++
++ if (!StreamValid(psStream))
++ return 0xFFFFFFFF;
++
++ if (psStream->ui32CapMode & DEBUG_CAPMODE_FRAMED) {
++ if (!(psStream->ui32Flags & DEBUG_FLAGS_ENABLESAMPLE))
++ return 0;
++ } else {
++ if (psStream->ui32CapMode == DEBUG_CAPMODE_HOTKEY)
++ if ((psStream->ui32Current != g_ui32HotKeyFrame)
++ || (g_bHotKeyPressed == IMG_FALSE))
++ return 0;
++ }
++
++ return DBGDrivWriteString(psStream, pszString, ui32Level);
++
++}
++
++u32 DBGDrivWriteString(struct DBG_STREAM *psStream,
++ char *pszString,
++ u32 ui32Level)
++{
++ u32 ui32Len;
++ u32 ui32Space;
++ u32 ui32WPtr;
++ u8 *pui8Buffer;
++
++ if (!StreamValid(psStream))
++ return 0xFFFFFFFF;
++
++ if (!(psStream->ui32DebugLevel & ui32Level))
++ return 0xFFFFFFFF;
++
++ if (!(psStream->ui32OutMode & DEBUG_OUTMODE_ASYNC)) {
++ if (psStream->ui32OutMode & DEBUG_OUTMODE_STANDARDDBG)
++ PVR_DPF(PVR_DBG_MESSAGE, "%s: %s\r\n",
++ psStream->szName, pszString);
++
++ if (psStream->ui32OutMode & DEBUG_OUTMODE_MONO) {
++ MonoOut(psStream->szName, IMG_FALSE);
++ MonoOut(": ", IMG_FALSE);
++ MonoOut(pszString, IMG_TRUE);
++ }
++ }
++
++ if (!((psStream->ui32OutMode & DEBUG_OUTMODE_STREAMENABLE) ||
++ (psStream->ui32OutMode & DEBUG_OUTMODE_ASYNC)
++ )
++ )
++ return 0xFFFFFFFF;
++
++ ui32Space = SpaceInStream(psStream);
++
++ if (ui32Space > 0)
++ ui32Space--;
++
++ ui32Len = 0;
++ ui32WPtr = psStream->ui32WPtr;
++ pui8Buffer = (u8 *) psStream->ui32Base;
++
++ while ((pszString[ui32Len] != 0) && (ui32Len < ui32Space)) {
++ pui8Buffer[ui32WPtr] = pszString[ui32Len];
++ ui32Len++;
++ ui32WPtr++;
++ if (ui32WPtr == psStream->ui32Size)
++ ui32WPtr = 0;
++ }
++
++ if (ui32Len < ui32Space) {
++
++ pui8Buffer[ui32WPtr] = pszString[ui32Len];
++ ui32Len++;
++ ui32WPtr++;
++ if (ui32WPtr == psStream->ui32Size)
++ ui32WPtr = 0;
++
++ psStream->ui32WPtr = ui32WPtr;
++ psStream->ui32DataWritten += ui32Len;
++ } else {
++ ui32Len = 0;
++ }
++
++#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
++ if (ui32Len)
++ HostSignalEvent(DBG_EVENT_STREAM_DATA);
++#endif
++
++ return ui32Len;
++}
++
++u32 DBGDrivReadString(struct DBG_STREAM *psStream,
++ char *pszString,
++ u32 ui32Limit)
++{
++ u32 ui32OutLen;
++ u32 ui32Len;
++ u32 ui32Offset;
++ u8 *pui8Buff;
++
++ if (!StreamValid(psStream))
++ return 0;
++
++ pui8Buff = (u8 *) psStream->ui32Base;
++ ui32Offset = psStream->ui32RPtr;
++
++ if (psStream->ui32RPtr == psStream->ui32WPtr)
++ return 0;
++
++ ui32Len = 0;
++ while ((pui8Buff[ui32Offset] != 0)
++ && (ui32Offset != psStream->ui32WPtr)) {
++ ui32Offset++;
++ ui32Len++;
++
++ if (ui32Offset == psStream->ui32Size)
++ ui32Offset = 0;
++ }
++
++ ui32OutLen = ui32Len + 1;
++
++ if (ui32Len > ui32Limit)
++ return 0;
++
++ ui32Offset = psStream->ui32RPtr;
++ ui32Len = 0;
++
++ while ((pui8Buff[ui32Offset] != 0) && (ui32Len < ui32Limit)) {
++ pszString[ui32Len] = pui8Buff[ui32Offset];
++ ui32Offset++;
++ ui32Len++;
++
++ if (ui32Offset == psStream->ui32Size)
++ ui32Offset = 0;
++ }
++
++ pszString[ui32Len] = pui8Buff[ui32Offset];
++
++ psStream->ui32RPtr = ui32Offset + 1;
++
++ if (psStream->ui32RPtr == psStream->ui32Size)
++ psStream->ui32RPtr = 0;
++
++ return ui32OutLen;
++}
++
++u32 DBGDrivWrite(struct DBG_STREAM *psMainStream,
++ u8 *pui8InBuf,
++ u32 ui32InBuffSize,
++ u32 ui32Level)
++{
++ u32 ui32Space;
++ struct DBG_STREAM *psStream;
++
++ if (!StreamValid(psMainStream))
++ return 0xFFFFFFFF;
++
++ if (!(psMainStream->ui32DebugLevel & ui32Level))
++ return 0xFFFFFFFF;
++
++ if (psMainStream->ui32CapMode & DEBUG_CAPMODE_FRAMED) {
++ if (!(psMainStream->ui32Flags & DEBUG_FLAGS_ENABLESAMPLE))
++ return 0xFFFFFFFF;
++ } else {
++ if (psMainStream->ui32CapMode == DEBUG_CAPMODE_HOTKEY)
++ if ((psMainStream->ui32Current != g_ui32HotKeyFrame)
++ || (g_bHotKeyPressed == IMG_FALSE))
++ return 0xFFFFFFFF;
++
++ }
++
++ if (psMainStream->bInitPhaseComplete)
++ psStream = psMainStream;
++ else
++ psStream = psMainStream->psInitStream;
++
++ ui32Space = SpaceInStream(psStream);
++
++ if (!(psStream->ui32OutMode & DEBUG_OUTMODE_STREAMENABLE))
++ return 0;
++
++ if (ui32Space < 8)
++ return 0;
++
++ if (ui32Space <= (ui32InBuffSize + 4))
++ ui32InBuffSize = ui32Space - 8;
++
++ Write(psStream, (u8 *) &ui32InBuffSize, 4);
++ Write(psStream, pui8InBuf, ui32InBuffSize);
++
++#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
++ if (ui32InBuffSize)
++ HostSignalEvent(DBG_EVENT_STREAM_DATA);
++#endif
++ return ui32InBuffSize;
++}
++
++u32 DBGDrivWriteCM(struct DBG_STREAM *psStream,
++ u8 *pui8InBuf,
++ u32 ui32InBuffSize,
++ u32 ui32Level)
++{
++
++ if (!StreamValid(psStream))
++ return 0xFFFFFFFF;
++
++ if (psStream->ui32CapMode & DEBUG_CAPMODE_FRAMED) {
++ if (!(psStream->ui32Flags & DEBUG_FLAGS_ENABLESAMPLE))
++ return 0xFFFFFFFF;
++ } else {
++ if (psStream->ui32CapMode == DEBUG_CAPMODE_HOTKEY)
++ if ((psStream->ui32Current != g_ui32HotKeyFrame)
++ || (g_bHotKeyPressed == IMG_FALSE))
++ return 0xFFFFFFFF;
++ }
++
++ return DBGDrivWrite2(psStream, pui8InBuf, ui32InBuffSize, ui32Level);
++}
++
++u32 DBGDrivWrite2(struct DBG_STREAM *psMainStream,
++ u8 *pui8InBuf,
++ u32 ui32InBuffSize,
++ u32 ui32Level)
++{
++ u32 ui32Space;
++ struct DBG_STREAM *psStream;
++
++ if (!StreamValid(psMainStream))
++ return 0xFFFFFFFF;
++
++ if (!(psMainStream->ui32DebugLevel & ui32Level))
++ return 0xFFFFFFFF;
++
++ if (psMainStream->bInitPhaseComplete)
++ psStream = psMainStream;
++ else
++ psStream = psMainStream->psInitStream;
++
++ ui32Space = SpaceInStream(psStream);
++
++ if (!(psStream->ui32OutMode & DEBUG_OUTMODE_STREAMENABLE))
++ return 0;
++
++ if (psStream->ui32Flags & DEBUG_FLAGS_NO_BUF_EXPANDSION) {
++
++ if (ui32Space < 32)
++ return 0;
++ } else {
++ if ((ui32Space < 32) || (ui32Space <= (ui32InBuffSize + 4))) {
++ u32 ui32NewBufSize;
++
++ ui32NewBufSize = 2 * psStream->ui32Size;
++
++ if (ui32InBuffSize > psStream->ui32Size)
++ ui32NewBufSize += ui32InBuffSize;
++
++ if (!ExpandStreamBuffer(psStream, ui32NewBufSize))
++ if (ui32Space < 32)
++ return 0;
++
++ ui32Space = SpaceInStream(psStream);
++ }
++ }
++
++ if (ui32Space <= (ui32InBuffSize + 4))
++ ui32InBuffSize = ui32Space - 4;
++
++ Write(psStream, pui8InBuf, ui32InBuffSize);
++
++#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
++ if (ui32InBuffSize)
++ HostSignalEvent(DBG_EVENT_STREAM_DATA);
++#endif
++ return ui32InBuffSize;
++}
++
++u32 DBGDrivRead(struct DBG_STREAM *psMainStream,
++ IMG_BOOL bReadInitBuffer,
++ u32 ui32OutBuffSize,
++ u8 *pui8OutBuf)
++{
++ u32 ui32Data;
++ struct DBG_STREAM *psStream;
++
++ if (!StreamValid(psMainStream))
++ return 0;
++
++ if (bReadInitBuffer)
++ psStream = psMainStream->psInitStream;
++ else
++ psStream = psMainStream;
++
++ if (psStream->ui32RPtr == psStream->ui32WPtr)
++ return 0;
++
++ if (psStream->ui32RPtr <= psStream->ui32WPtr) {
++ ui32Data = psStream->ui32WPtr - psStream->ui32RPtr;
++ } else {
++ ui32Data =
++ psStream->ui32WPtr + (psStream->ui32Size -
++ psStream->ui32RPtr);
++ }
++
++ if (ui32Data > ui32OutBuffSize)
++ ui32Data = ui32OutBuffSize;
++
++ if ((psStream->ui32RPtr + ui32Data) > psStream->ui32Size) {
++ u32 ui32B1 = psStream->ui32Size - psStream->ui32RPtr;
++ u32 ui32B2 = ui32Data - ui32B1;
++
++ HostMemCopy((void *) pui8OutBuf,
++ (void *) (psStream->ui32Base +
++ psStream->ui32RPtr), ui32B1);
++
++ HostMemCopy((void *) ((u32) pui8OutBuf + ui32B1),
++ (void *) psStream->ui32Base, ui32B2);
++
++ psStream->ui32RPtr = ui32B2;
++ } else {
++ HostMemCopy((void *) pui8OutBuf,
++ (void *) (psStream->ui32Base +
++ psStream->ui32RPtr), ui32Data);
++
++ psStream->ui32RPtr += ui32Data;
++
++ if (psStream->ui32RPtr == psStream->ui32Size)
++ psStream->ui32RPtr = 0;
++ }
++
++ return ui32Data;
++}
++
++void DBGDrivSetCaptureMode(struct DBG_STREAM *psStream,
++ u32 ui32Mode,
++ u32 ui32Start,
++ u32 ui32End,
++ u32 ui32SampleRate)
++{
++
++ if (!StreamValid(psStream))
++ return;
++
++ psStream->ui32CapMode = ui32Mode;
++ psStream->ui32DefaultMode = ui32Mode;
++ psStream->ui32Start = ui32Start;
++ psStream->ui32End = ui32End;
++ psStream->ui32SampleRate = ui32SampleRate;
++
++ if (psStream->ui32CapMode & DEBUG_CAPMODE_HOTKEY)
++ ActivateHotKeys(psStream);
++}
++
++void DBGDrivSetOutputMode(struct DBG_STREAM *psStream,
++ u32 ui32OutMode)
++{
++
++ if (!StreamValid(psStream))
++ return;
++
++ psStream->ui32OutMode = ui32OutMode;
++}
++
++void DBGDrivSetDebugLevel(struct DBG_STREAM *psStream,
++ u32 ui32DebugLevel)
++{
++
++ if (!StreamValid(psStream))
++ return;
++
++ psStream->ui32DebugLevel = ui32DebugLevel;
++}
++
++void DBGDrivSetFrame(struct DBG_STREAM *psStream, u32 ui32Frame)
++{
++
++ if (!StreamValid(psStream))
++ return;
++
++ psStream->ui32Current = ui32Frame;
++
++ if ((ui32Frame >= psStream->ui32Start) &&
++ (ui32Frame <= psStream->ui32End) &&
++ (((ui32Frame - psStream->ui32Start) % psStream->ui32SampleRate) ==
++ 0))
++ psStream->ui32Flags |= DEBUG_FLAGS_ENABLESAMPLE;
++ else
++ psStream->ui32Flags &= ~DEBUG_FLAGS_ENABLESAMPLE;
++
++ if (g_bHotkeyMiddump) {
++ if ((ui32Frame >= g_ui32HotkeyMiddumpStart) &&
++ (ui32Frame <= g_ui32HotkeyMiddumpEnd) &&
++ (((ui32Frame -
++ g_ui32HotkeyMiddumpStart) % psStream->ui32SampleRate) ==
++ 0)) {
++ psStream->ui32Flags |= DEBUG_FLAGS_ENABLESAMPLE;
++ } else {
++ psStream->ui32Flags &= ~DEBUG_FLAGS_ENABLESAMPLE;
++ if (psStream->ui32Current > g_ui32HotkeyMiddumpEnd)
++ g_bHotkeyMiddump = IMG_FALSE;
++ }
++ }
++
++ if (g_bHotKeyRegistered) {
++ g_bHotKeyRegistered = IMG_FALSE;
++
++ PVR_DPF(PVR_DBG_MESSAGE, "Hotkey pressed (%08x)!\n",
++ psStream);
++
++ if (!g_bHotKeyPressed) {
++
++ g_ui32HotKeyFrame = psStream->ui32Current + 2;
++
++ g_bHotKeyPressed = IMG_TRUE;
++ }
++
++ if ((psStream->ui32CapMode & DEBUG_CAPMODE_FRAMED)
++ && (psStream->ui32CapMode & DEBUG_CAPMODE_HOTKEY)) {
++ if (!g_bHotkeyMiddump) {
++
++ g_ui32HotkeyMiddumpStart =
++ g_ui32HotKeyFrame + 1;
++ g_ui32HotkeyMiddumpEnd = 0xffffffff;
++ g_bHotkeyMiddump = IMG_TRUE;
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "Sampling every %d frame(s)\n",
++ psStream->ui32SampleRate);
++ } else {
++
++ g_ui32HotkeyMiddumpEnd = g_ui32HotKeyFrame;
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "Turning off sampling\n");
++ }
++ }
++
++ }
++
++ if (psStream->ui32Current > g_ui32HotKeyFrame)
++ g_bHotKeyPressed = IMG_FALSE;
++}
++
++u32 DBGDrivGetFrame(struct DBG_STREAM *psStream)
++{
++
++ if (!StreamValid(psStream))
++ return 0;
++
++ return psStream->ui32Current;
++}
++
++u32 DBGDrivIsLastCaptureFrame(struct DBG_STREAM *psStream)
++{
++ u32 ui32NextFrame;
++
++ if (!StreamValid(psStream))
++ return IMG_FALSE;
++
++ if (psStream->ui32CapMode & DEBUG_CAPMODE_FRAMED) {
++ ui32NextFrame =
++ psStream->ui32Current + psStream->ui32SampleRate;
++ if (ui32NextFrame > psStream->ui32End)
++ return IMG_TRUE;
++ }
++ return IMG_FALSE;
++}
++
++u32 DBGDrivIsCaptureFrame(struct DBG_STREAM *psStream,
++ IMG_BOOL bCheckPreviousFrame)
++{
++ u32 ui32FrameShift = bCheckPreviousFrame ? 1 : 0;
++
++ if (!StreamValid(psStream))
++ return IMG_FALSE;
++
++ if (psStream->ui32CapMode & DEBUG_CAPMODE_FRAMED) {
++
++ if (g_bHotkeyMiddump) {
++ if ((psStream->ui32Current >=
++ (g_ui32HotkeyMiddumpStart - ui32FrameShift))
++ && (psStream->ui32Current <=
++ (g_ui32HotkeyMiddumpEnd - ui32FrameShift))
++ &&
++ ((((psStream->ui32Current + ui32FrameShift) -
++ g_ui32HotkeyMiddumpStart) %
++ psStream->ui32SampleRate) == 0))
++ return IMG_TRUE;
++ } else {
++ if ((psStream->ui32Current >=
++ (psStream->ui32Start - ui32FrameShift))
++ && (psStream->ui32Current <=
++ (psStream->ui32End - ui32FrameShift))
++ &&
++ ((((psStream->ui32Current + ui32FrameShift) -
++ psStream->ui32Start) %
++ psStream->ui32SampleRate) == 0))
++ return IMG_TRUE;
++ }
++ } else if (psStream->ui32CapMode == DEBUG_CAPMODE_HOTKEY) {
++ if ((psStream->ui32Current ==
++ (g_ui32HotKeyFrame - ui32FrameShift))
++ && (g_bHotKeyPressed))
++ return IMG_TRUE;
++ }
++
++
++ return IMG_FALSE;
++}
++
++void DBGDrivOverrideMode(struct DBG_STREAM *psStream, u32 ui32Mode)
++{
++
++ if (!StreamValid(psStream))
++ return;
++
++ psStream->ui32CapMode = ui32Mode;
++}
++
++void DBGDrivDefaultMode(struct DBG_STREAM *psStream)
++{
++
++ if (!StreamValid(psStream))
++ return;
++
++ psStream->ui32CapMode = psStream->ui32DefaultMode;
++}
++
++void DBGDrivSetMarker(struct DBG_STREAM *psStream, u32 ui32Marker)
++{
++
++ if (!StreamValid(psStream))
++ return;
++
++ psStream->ui32Marker = ui32Marker;
++}
++
++u32 DBGDrivGetMarker(struct DBG_STREAM *psStream)
++{
++
++ if (!StreamValid(psStream))
++ return 0;
++
++ return psStream->ui32Marker;
++}
++
++u32 DBGDrivGetStreamOffset(struct DBG_STREAM *psMainStream)
++{
++ struct DBG_STREAM *psStream;
++
++ if (!StreamValid(psMainStream))
++ return 0;
++
++ if (psMainStream->bInitPhaseComplete)
++ psStream = psMainStream;
++ else
++ psStream = psMainStream->psInitStream;
++
++ return psStream->ui32DataWritten;
++}
++
++void DBGDrivSetStreamOffset(struct DBG_STREAM *psMainStream,
++ u32 ui32StreamOffset)
++{
++ struct DBG_STREAM *psStream;
++
++ if (!StreamValid(psMainStream))
++ return;
++
++ if (psMainStream->bInitPhaseComplete)
++ psStream = psMainStream;
++ else
++ psStream = psMainStream->psInitStream;
++
++ psStream->ui32DataWritten = ui32StreamOffset;
++}
++
++u32 DBGDrivGetServiceTable(void)
++{
++ return (u32)&g_sDBGKMServices;
++}
++
++u32 DBGDrivWriteLF(struct DBG_STREAM *psStream,
++ u8 *pui8InBuf,
++ u32 ui32InBuffSize,
++ u32 ui32Level,
++ u32 ui32Flags)
++{
++ struct DBG_LASTFRAME_BUFFER *psLFBuffer;
++
++ if (!StreamValid(psStream))
++ return 0xFFFFFFFF;
++
++ if (!(psStream->ui32DebugLevel & ui32Level))
++ return 0xFFFFFFFF;
++
++ if (psStream->ui32CapMode & DEBUG_CAPMODE_FRAMED) {
++ if (!(psStream->ui32Flags & DEBUG_FLAGS_ENABLESAMPLE))
++ return 0xFFFFFFFF;
++ } else if (psStream->ui32CapMode == DEBUG_CAPMODE_HOTKEY) {
++ if ((psStream->ui32Current != g_ui32HotKeyFrame)
++ || (g_bHotKeyPressed == IMG_FALSE))
++ return 0xFFFFFFFF;
++ }
++
++ psLFBuffer = FindLFBuf(psStream);
++
++ if (ui32Flags & WRITELF_FLAGS_RESETBUF) {
++
++ ui32InBuffSize =
++ (ui32InBuffSize >
++ LAST_FRAME_BUF_SIZE) ? LAST_FRAME_BUF_SIZE :
++ ui32InBuffSize;
++ HostMemCopy((void *) psLFBuffer->ui8Buffer,
++ (void *) pui8InBuf, ui32InBuffSize);
++ psLFBuffer->ui32BufLen = ui32InBuffSize;
++ } else {
++
++ ui32InBuffSize =
++ ((psLFBuffer->ui32BufLen + ui32InBuffSize) >
++ LAST_FRAME_BUF_SIZE) ? (LAST_FRAME_BUF_SIZE -
++ psLFBuffer->
++ ui32BufLen) : ui32InBuffSize;
++ HostMemCopy((void *) (&psLFBuffer->
++ ui8Buffer[psLFBuffer->ui32BufLen]),
++ (void *) pui8InBuf, ui32InBuffSize);
++ psLFBuffer->ui32BufLen += ui32InBuffSize;
++ }
++
++ return ui32InBuffSize;
++}
++
++u32 DBGDrivReadLF(struct DBG_STREAM *psStream,
++ u32 ui32OutBuffSize,
++ u8 *pui8OutBuf)
++{
++ struct DBG_LASTFRAME_BUFFER *psLFBuffer;
++ u32 ui32Data;
++
++ if (!StreamValid(psStream))
++ return 0;
++
++ psLFBuffer = FindLFBuf(psStream);
++
++ ui32Data =
++ (ui32OutBuffSize <
++ psLFBuffer->ui32BufLen) ? ui32OutBuffSize : psLFBuffer->ui32BufLen;
++
++ HostMemCopy((void *) pui8OutBuf, (void *) psLFBuffer->ui8Buffer,
++ ui32Data);
++
++ return ui32Data;
++}
++
++void DBGDrivStartInitPhase(struct DBG_STREAM *psStream)
++{
++ psStream->bInitPhaseComplete = IMG_FALSE;
++}
++
++void DBGDrivStopInitPhase(struct DBG_STREAM *psStream)
++{
++ psStream->bInitPhaseComplete = IMG_TRUE;
++}
++
++#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
++void DBGDrivWaitForEvent(enum DBG_EVENT eEvent)
++{
++ HostWaitForEvent(eEvent);
++}
++#endif
++
++static IMG_BOOL ExpandStreamBuffer(struct DBG_STREAM *psStream, u32 ui32NewSize)
++{
++ void *pvNewBuf;
++ u32 ui32NewSizeInPages;
++ u32 ui32NewWOffset;
++ u32 ui32SpaceInOldBuf;
++
++ if (psStream->ui32Size >= ui32NewSize)
++ return IMG_FALSE;
++
++ ui32SpaceInOldBuf = SpaceInStream(psStream);
++
++ ui32NewSizeInPages = ((ui32NewSize + 0xfff) & ~0xfff) / 4096;
++
++ if ((psStream->ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0)
++ pvNewBuf = HostNonPageablePageAlloc(ui32NewSizeInPages);
++ else
++ pvNewBuf = HostPageablePageAlloc(ui32NewSizeInPages);
++
++ if (pvNewBuf == NULL)
++ return IMG_FALSE;
++
++ if (psStream->ui32RPtr <= psStream->ui32WPtr) {
++
++ HostMemCopy((void *) pvNewBuf,
++ (void *) (psStream->ui32Base +
++ psStream->ui32RPtr),
++ psStream->ui32WPtr - psStream->ui32RPtr);
++ } else {
++ u32 ui32FirstCopySize;
++
++ ui32FirstCopySize = psStream->ui32Size - psStream->ui32RPtr;
++
++ HostMemCopy((void *) pvNewBuf,
++ (void *) (psStream->ui32Base +
++ psStream->ui32RPtr),
++ ui32FirstCopySize);
++
++ HostMemCopy((void *) ((u32) pvNewBuf +
++ ui32FirstCopySize),
++ (void *) psStream->ui32Base,
++ psStream->ui32WPtr);
++ }
++
++ ui32NewWOffset = psStream->ui32Size - ui32SpaceInOldBuf;
++
++ if ((psStream->ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0)
++ HostNonPageablePageFree((void *) psStream->ui32Base);
++ else
++ HostPageablePageFree((void *) psStream->ui32Base);
++
++ psStream->ui32Base = (u32) pvNewBuf;
++ psStream->ui32RPtr = 0;
++ psStream->ui32WPtr = ui32NewWOffset;
++ psStream->ui32Size = ui32NewSizeInPages * 4096;
++
++ return IMG_TRUE;
++}
++
++static u32 SpaceInStream(struct DBG_STREAM *psStream)
++{
++ u32 ui32Space;
++
++ if (psStream->ui32RPtr > psStream->ui32WPtr)
++ ui32Space = psStream->ui32RPtr - psStream->ui32WPtr;
++ else
++ ui32Space =
++ psStream->ui32RPtr + (psStream->ui32Size -
++ psStream->ui32WPtr);
++
++ return ui32Space;
++}
++
++void DestroyAllStreams(void)
++{
++ while (g_psStreamList != NULL)
++ DBGDrivDestroyStream(g_psStreamList);
++ return;
++}
++
++struct DBG_LASTFRAME_BUFFER *FindLFBuf(struct DBG_STREAM *psStream)
++{
++ struct DBG_LASTFRAME_BUFFER *psLFBuffer;
++
++ psLFBuffer = g_psLFBufferList;
++
++ while (psLFBuffer) {
++ if (psLFBuffer->psStream == psStream)
++ break;
++
++ psLFBuffer = psLFBuffer->psNext;
++ }
++
++ return psLFBuffer;
++}
+diff --git a/drivers/gpu/pvr/tools/dbgdriv.h b/drivers/gpu/pvr/tools/dbgdriv.h
+new file mode 100644
+index 0000000..4e2ebc3
+--- /dev/null
++++ b/drivers/gpu/pvr/tools/dbgdriv.h
+@@ -0,0 +1,183 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _DBGDRIV_
++#define _DBGDRIV_
++
++#define BUFFER_SIZE (64 * PAGESIZE)
++
++#define DBGDRIV_VERSION 0x100
++#define MAX_PROCESSES 2
++#define BLOCK_USED 0x01
++#define BLOCK_LOCKED 0x02
++#define DBGDRIV_MONOBASE 0x000B0000
++
++extern void *g_pvAPIMutex;
++
++void *DBGDrivCreateStream(char *pszName,
++ u32 ui32CapMode,
++ u32 ui32OutMode,
++ u32 ui32Flags,
++ u32 ui32Pages);
++void DBGDrivDestroyStream(struct DBG_STREAM *psStream);
++void *DBGDrivFindStream(char *pszName,
++ IMG_BOOL bResetStream);
++u32 DBGDrivWriteString(struct DBG_STREAM *psStream,
++ char *pszString,
++ u32 ui32Level);
++u32 DBGDrivReadString(struct DBG_STREAM *psStream,
++ char *pszString,
++ u32 ui32Limit);
++u32 DBGDrivWrite(struct DBG_STREAM *psStream,
++ u8 *pui8InBuf,
++ u32 ui32InBuffSize,
++ u32 ui32Level);
++u32 DBGDrivWrite2(struct DBG_STREAM *psStream,
++ u8 *pui8InBuf,
++ u32 ui32InBuffSize,
++ u32 ui32Level);
++u32 DBGDrivRead(struct DBG_STREAM *psStream,
++ IMG_BOOL bReadInitBuffer,
++ u32 ui32OutBufferSize,
++ u8 *pui8OutBuf);
++void DBGDrivSetCaptureMode(struct DBG_STREAM *psStream,
++ u32 ui32Mode,
++ u32 ui32Start,
++ u32 ui32Stop,
++ u32 ui32SampleRate);
++void DBGDrivSetOutputMode(struct DBG_STREAM *psStream,
++ u32 ui32OutMode);
++void DBGDrivSetDebugLevel(struct DBG_STREAM *psStream,
++ u32 ui32DebugLevel);
++void DBGDrivSetFrame(struct DBG_STREAM *psStream,
++ u32 ui32Frame);
++u32 DBGDrivGetFrame(struct DBG_STREAM *psStream);
++void DBGDrivOverrideMode(struct DBG_STREAM *psStream,
++ u32 ui32Mode);
++void DBGDrivDefaultMode(struct DBG_STREAM *psStream);
++u32 DBGDrivGetServiceTable(void);
++u32 DBGDrivWriteStringCM(struct DBG_STREAM *psStream,
++ char *pszString,
++ u32 ui32Level);
++u32 DBGDrivWriteCM(struct DBG_STREAM *psStream,
++ u8 *pui8InBuf,
++ u32 ui32InBuffSize,
++ u32 ui32Level);
++void DBGDrivSetMarker(struct DBG_STREAM *psStream,
++ u32 ui32Marker);
++u32 DBGDrivGetMarker(struct DBG_STREAM *psStream);
++u32 DBGDrivIsLastCaptureFrame(struct DBG_STREAM *psStream);
++u32 DBGDrivIsCaptureFrame(struct DBG_STREAM *psStream,
++ IMG_BOOL bCheckPreviousFrame);
++u32 DBGDrivWriteLF(struct DBG_STREAM *psStream,
++ u8 *pui8InBuf,
++ u32 ui32InBuffSize,
++ u32 ui32Level,
++ u32 ui32Flags);
++u32 DBGDrivReadLF(struct DBG_STREAM *psStream,
++ u32 ui32OutBuffSize,
++ u8 *pui8OutBuf);
++void DBGDrivStartInitPhase(struct DBG_STREAM *psStream);
++void DBGDrivStopInitPhase(struct DBG_STREAM *psStream);
++u32 DBGDrivGetStreamOffset(struct DBG_STREAM *psStream);
++void DBGDrivSetStreamOffset(struct DBG_STREAM *psStream, u32 ui32StreamOffset);
++void DBGDrivWaitForEvent(enum DBG_EVENT eEvent);
++
++void DestroyAllStreams(void);
++
++u32 AtoI(char *szIn);
++
++void HostMemSet(void *pvDest, u8 ui8Value, u32 ui32Size);
++void HostMemCopy(void *pvDest, void *pvSrc, u32 ui32Size);
++IMG_BOOL StreamValid(struct DBG_STREAM *psStream);
++void Write(struct DBG_STREAM *psStream, u8 *pui8Data,
++ u32 ui32InBuffSize);
++void MonoOut(char *pszString, IMG_BOOL bNewLine);
++
++void *ExtDBGDrivCreateStream(char *pszName,
++ u32 ui32CapMode,
++ u32 ui32OutMode,
++ u32 ui32Flags,
++ u32 ui32Size);
++void ExtDBGDrivDestroyStream(struct DBG_STREAM *psStream);
++void *ExtDBGDrivFindStream(char *pszName,
++ IMG_BOOL bResetStream);
++u32 ExtDBGDrivWriteString(struct DBG_STREAM *psStream,
++ char *pszString,
++ u32 ui32Level);
++u32 ExtDBGDrivReadString(struct DBG_STREAM *psStream,
++ char *pszString,
++ u32 ui32Limit);
++u32 ExtDBGDrivWrite(struct DBG_STREAM *psStream,
++ u8 *pui8InBuf,
++ u32 ui32InBuffSize,
++ u32 ui32Level);
++u32 ExtDBGDrivRead(struct DBG_STREAM *psStream,
++ IMG_BOOL bReadInitBuffer,
++ u32 ui32OutBuffSize,
++ u8 *pui8OutBuf);
++void ExtDBGDrivSetCaptureMode(struct DBG_STREAM *psStream,
++ u32 ui32Mode,
++ u32 ui32Start,
++ u32 ui32End,
++ u32 ui32SampleRate);
++void ExtDBGDrivSetOutputMode(struct DBG_STREAM *psStream,
++ u32 ui32OutMode);
++void ExtDBGDrivSetDebugLevel(struct DBG_STREAM *psStream,
++ u32 ui32DebugLevel);
++void ExtDBGDrivSetFrame(struct DBG_STREAM *psStream,
++ u32 ui32Frame);
++u32 ExtDBGDrivGetFrame(struct DBG_STREAM *psStream);
++void ExtDBGDrivOverrideMode(struct DBG_STREAM *psStream,
++ u32 ui32Mode);
++void ExtDBGDrivDefaultMode(struct DBG_STREAM *psStream);
++u32 ExtDBGDrivWrite2(struct DBG_STREAM *psStream, u8 *pui8InBuf,
++ u32 ui32InBuffSize, u32 ui32Level);
++u32 ExtDBGDrivWriteStringCM(struct DBG_STREAM *psStream, char *pszString,
++ u32 ui32Level);
++u32 ExtDBGDrivWriteCM(struct DBG_STREAM *psStream, u8 *pui8InBuf,
++ u32 ui32InBuffSize, u32 ui32Level);
++void ExtDBGDrivSetMarker(struct DBG_STREAM *psStream, u32 ui32Marker);
++u32 ExtDBGDrivGetMarker(struct DBG_STREAM *psStream);
++void ExtDBGDrivStartInitPhase(struct DBG_STREAM *psStream);
++void ExtDBGDrivStopInitPhase(struct DBG_STREAM *psStream);
++u32 ExtDBGDrivIsLastCaptureFrame(struct DBG_STREAM *psStream);
++u32 ExtDBGDrivIsCaptureFrame(struct DBG_STREAM *psStream,
++ IMG_BOOL bCheckPreviousFrame);
++u32 ExtDBGDrivWriteLF(struct DBG_STREAM *psStream,
++ u8 *pui8InBuf,
++ u32 ui32InBuffSize,
++ u32 ui32Level,
++ u32 ui32Flags);
++u32 ExtDBGDrivReadLF(struct DBG_STREAM *psStream,
++ u32 ui32OutBuffSize,
++ u8 *pui8OutBuf);
++u32 ExtDBGDrivGetStreamOffset(struct DBG_STREAM *psStream);
++void ExtDBGDrivSetStreamOffset(struct DBG_STREAM *psStream,
++ u32 ui32StreamOffset);
++void ExtDBGDrivWaitForEvent(enum DBG_EVENT eEvent);
++
++#endif
+diff --git a/drivers/gpu/pvr/tools/hostfunc.c b/drivers/gpu/pvr/tools/hostfunc.c
+new file mode 100644
+index 0000000..efc9f68
+--- /dev/null
++++ b/drivers/gpu/pvr/tools/hostfunc.c
+@@ -0,0 +1,267 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <linux/version.h>
++#include <linux/errno.h>
++#include <linux/module.h>
++#include <linux/fs.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/string.h>
++#include <asm/page.h>
++#include <linux/vmalloc.h>
++#include <linux/mutex.h>
++#include <linux/hardirq.h>
++
++#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
++#include <linux/sched.h>
++#include <linux/wait.h>
++#include <linux/jiffies.h>
++#include <linux/delay.h>
++#endif
++
++#include "img_types.h"
++#include "pvr_debug.h"
++
++#include "dbgdrvif.h"
++#include "hostfunc.h"
++
++#define PVR_STRING_TERMINATOR '\0'
++#define PVR_IS_FILE_SEPARATOR(character) \
++ (((character) == '\\') || ((character) == '/'))
++
++static u32 gPVRDebugLevel = DBGPRIV_WARNING;
++
++void PVRSRVDebugPrintf(u32 ui32DebugLevel,
++ const char *pszFileName,
++ u32 ui32Line, const char *pszFormat, ...
++ )
++{
++ IMG_BOOL bTrace, bDebug;
++ char *pszLeafName;
++
++ pszLeafName = (char *)strrchr(pszFileName, '\\');
++
++ if (pszLeafName)
++ pszFileName = pszLeafName;
++
++ bTrace = gPVRDebugLevel & ui32DebugLevel & DBGPRIV_CALLTRACE;
++ bDebug = ((gPVRDebugLevel & DBGPRIV_ALLLEVELS) >= ui32DebugLevel);
++
++ if (bTrace || bDebug) {
++ va_list vaArgs;
++ static char szBuffer[256];
++
++ va_start(vaArgs, pszFormat);
++
++ if (bDebug) {
++ switch (ui32DebugLevel) {
++ case DBGPRIV_FATAL:
++ {
++ strcpy(szBuffer, "PVR_K:(Fatal): ");
++ break;
++ }
++ case DBGPRIV_ERROR:
++ {
++ strcpy(szBuffer, "PVR_K:(Error): ");
++ break;
++ }
++ case DBGPRIV_WARNING:
++ {
++ strcpy(szBuffer, "PVR_K:(Warning): ");
++ break;
++ }
++ case DBGPRIV_MESSAGE:
++ {
++ strcpy(szBuffer, "PVR_K:(Message): ");
++ break;
++ }
++ case DBGPRIV_VERBOSE:
++ {
++ strcpy(szBuffer, "PVR_K:(Verbose): ");
++ break;
++ }
++ default:
++ {
++ strcpy(szBuffer,
++ "PVR_K:(Unknown message level)");
++ break;
++ }
++ }
++ } else {
++ strcpy(szBuffer, "PVR_K: ");
++ }
++
++ vsprintf(&szBuffer[strlen(szBuffer)], pszFormat, vaArgs);
++
++ if (!bTrace)
++ sprintf(&szBuffer[strlen(szBuffer)], " [%d, %s]",
++ (int)ui32Line, pszFileName);
++
++ printk(KERN_INFO "%s\r\n", szBuffer);
++
++ va_end(vaArgs);
++ }
++}
++
++void HostMemSet(void *pvDest, u8 ui8Value, u32 ui32Size)
++{
++ memset(pvDest, (int)ui8Value, (size_t) ui32Size);
++}
++
++void HostMemCopy(void *pvDst, void *pvSrc, u32 ui32Size)
++{
++ memcpy(pvDst, pvSrc, ui32Size);
++}
++
++u32 HostReadRegistryDWORDFromString(char *pcKey, char *pcValueName,
++ u32 *pui32Data)
++{
++
++ return 0;
++}
++
++void *HostPageablePageAlloc(u32 ui32Pages)
++{
++ return (void *)vmalloc(ui32Pages * PAGE_SIZE);
++}
++
++void HostPageablePageFree(void *pvBase)
++{
++ vfree(pvBase);
++}
++
++void *HostNonPageablePageAlloc(u32 ui32Pages)
++{
++ return (void *)vmalloc(ui32Pages * PAGE_SIZE);
++}
++
++void HostNonPageablePageFree(void *pvBase)
++{
++ vfree(pvBase);
++}
++
++void *HostMapKrnBufIntoUser(void *pvKrnAddr, u32 ui32Size,
++ void **ppvMdl)
++{
++
++ return NULL;
++}
++
++void HostUnMapKrnBufFromUser(void *pvUserAddr, void *pvMdl,
++ void *pvProcess)
++{
++
++}
++
++void HostCreateRegDeclStreams(void)
++{
++
++}
++
++void *HostCreateMutex(void)
++{
++ struct mutex *psSem;
++
++ psSem = kmalloc(sizeof(*psSem), GFP_KERNEL);
++ if (psSem)
++ mutex_init(psSem);
++
++ return psSem;
++}
++
++void HostAquireMutex(void *pvMutex)
++{
++ BUG_ON(in_interrupt());
++
++#if defined(PVR_DEBUG_DBGDRV_DETECT_HOST_MUTEX_COLLISIONS)
++ if (mutex_trylock((struct mutex *)pvMutex)) {
++ printk(KERN_INFO "HostAquireMutex: Waiting for mutex\n");
++ mutex_lock((struct mutex *)pvMutex);
++ }
++#else
++ mutex_lock((struct mutex *)pvMutex);
++#endif
++}
++
++void HostReleaseMutex(void *pvMutex)
++{
++ mutex_unlock((struct mutex *)pvMutex);
++}
++
++void HostDestroyMutex(void *pvMutex)
++{
++ kfree(pvMutex);
++}
++
++#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
++
++#define EVENT_WAIT_TIMEOUT_MS 500
++#define EVENT_WAIT_TIMEOUT_JIFFIES (EVENT_WAIT_TIMEOUT_MS * HZ / 1000)
++
++static int iStreamData;
++static wait_queue_head_t sStreamDataEvent;
++
++s32 HostCreateEventObjects(void)
++{
++ init_waitqueue_head(&sStreamDataEvent);
++
++ return 0;
++}
++
++void HostWaitForEvent(enum DBG_EVENT eEvent)
++{
++ switch (eEvent) {
++ case DBG_EVENT_STREAM_DATA:
++
++ wait_event_interruptible_timeout(sStreamDataEvent,
++ iStreamData != 0,
++ EVENT_WAIT_TIMEOUT_JIFFIES);
++ iStreamData = 0;
++ break;
++ default:
++
++ msleep_interruptible(EVENT_WAIT_TIMEOUT_MS);
++ break;
++ }
++}
++
++void HostSignalEvent(enum DBG_EVENT eEvent)
++{
++ switch (eEvent) {
++ case DBG_EVENT_STREAM_DATA:
++ iStreamData = 1;
++ wake_up_interruptible(&sStreamDataEvent);
++ break;
++ default:
++ break;
++ }
++}
++
++void HostDestroyEventObjects(void)
++{
++}
++#endif
+diff --git a/drivers/gpu/pvr/tools/hostfunc.h b/drivers/gpu/pvr/tools/hostfunc.h
+new file mode 100644
+index 0000000..64f8823
+--- /dev/null
++++ b/drivers/gpu/pvr/tools/hostfunc.h
+@@ -0,0 +1,58 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _HOSTFUNC_
++#define _HOSTFUNC_
++
++#define HOST_PAGESIZE (4096)
++#define DBG_MEMORY_INITIALIZER (0xe2)
++
++u32 HostReadRegistryDWORDFromString(char *pcKey, char *pcValueName,
++ u32 *pui32Data);
++
++void *HostPageablePageAlloc(u32 ui32Pages);
++void HostPageablePageFree(void *pvBase);
++void *HostNonPageablePageAlloc(u32 ui32Pages);
++void HostNonPageablePageFree(void *pvBase);
++
++void *HostMapKrnBufIntoUser(void *pvKrnAddr, u32 ui32Size, void **ppvMdl);
++void HostUnMapKrnBufFromUser(void *pvUserAddr, void *pvMdl, void *pvProcess);
++
++void HostCreateRegDeclStreams(void);
++
++void *HostCreateMutex(void);
++void HostAquireMutex(void *pvMutex);
++void HostReleaseMutex(void *pvMutex);
++void HostDestroyMutex(void *pvMutex);
++
++#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
++s32 HostCreateEventObjects(void);
++void HostWaitForEvent(enum DBG_EVENT eEvent);
++void HostSignalEvent(enum DBG_EVENT eEvent);
++void HostDestroyEventObjects(void);
++#endif
++
++#endif
+diff --git a/drivers/gpu/pvr/tools/hotkey.c b/drivers/gpu/pvr/tools/hotkey.c
+new file mode 100644
+index 0000000..b5a2eb8
+--- /dev/null
++++ b/drivers/gpu/pvr/tools/hotkey.c
+@@ -0,0 +1,101 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "img_types.h"
++#include "pvr_debug.h"
++#include "dbgdrvif.h"
++#include "dbgdriv.h"
++#include "hotkey.h"
++#include "hostfunc.h"
++
++u32 g_ui32HotKeyFrame = 0xFFFFFFFF;
++IMG_BOOL g_bHotKeyPressed = IMG_FALSE;
++IMG_BOOL g_bHotKeyRegistered = IMG_FALSE;
++
++struct PRIVATEHOTKEYDATA g_PrivateHotKeyData;
++
++void ReadInHotKeys(void)
++{
++ g_PrivateHotKeyData.ui32ScanCode = 0x58;
++ g_PrivateHotKeyData.ui32ShiftState = 0x0;
++
++ HostReadRegistryDWORDFromString("DEBUG\\Streams", "ui32ScanCode",
++ &g_PrivateHotKeyData.ui32ScanCode);
++ HostReadRegistryDWORDFromString("DEBUG\\Streams", "ui32ShiftState",
++ &g_PrivateHotKeyData.ui32ShiftState);
++}
++
++void RegisterKeyPressed(u32 dwui32ScanCode, struct HOTKEYINFO *pInfo)
++{
++ struct DBG_STREAM *psStream;
++
++ PVR_UNREFERENCED_PARAMETER(pInfo);
++
++ if (dwui32ScanCode == g_PrivateHotKeyData.ui32ScanCode) {
++ PVR_DPF(PVR_DBG_MESSAGE, "PDUMP Hotkey pressed !\n");
++
++ psStream = (struct DBG_STREAM *)
++ g_PrivateHotKeyData.sHotKeyInfo.pvStream;
++
++ if (!g_bHotKeyPressed) {
++
++ g_ui32HotKeyFrame = psStream->ui32Current + 2;
++
++ g_bHotKeyPressed = IMG_TRUE;
++ }
++ }
++}
++
++void ActivateHotKeys(struct DBG_STREAM *psStream)
++{
++
++ ReadInHotKeys();
++
++ if (!g_PrivateHotKeyData.sHotKeyInfo.hHotKey) {
++ if (g_PrivateHotKeyData.ui32ScanCode != 0) {
++ PVR_DPF(PVR_DBG_MESSAGE,
++ "Activate HotKey for PDUMP.\n");
++
++ g_PrivateHotKeyData.sHotKeyInfo.pvStream = psStream;
++
++ DefineHotKey(g_PrivateHotKeyData.ui32ScanCode,
++ g_PrivateHotKeyData.ui32ShiftState,
++ &g_PrivateHotKeyData.sHotKeyInfo);
++ } else {
++ g_PrivateHotKeyData.sHotKeyInfo.hHotKey = 0;
++ }
++ }
++}
++
++void DeactivateHotKeys(void)
++{
++ if (g_PrivateHotKeyData.sHotKeyInfo.hHotKey != 0) {
++ PVR_DPF(PVR_DBG_MESSAGE, "Deactivate HotKey.\n");
++
++ RemoveHotKey(g_PrivateHotKeyData.sHotKeyInfo.hHotKey);
++ g_PrivateHotKeyData.sHotKeyInfo.hHotKey = 0;
++ }
++}
+diff --git a/drivers/gpu/pvr/tools/hotkey.h b/drivers/gpu/pvr/tools/hotkey.h
+new file mode 100644
+index 0000000..56c559f
+--- /dev/null
++++ b/drivers/gpu/pvr/tools/hotkey.h
+@@ -0,0 +1,60 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _HOTKEY_
++#define _HOTKEY_
++
++struct HOTKEYINFO {
++ u8 ui8ScanCode;
++ u8 ui8Type;
++ u8 ui8Flag;
++ u8 ui8Filler1;
++ u32 ui32ShiftState;
++ u32 ui32HotKeyProc;
++ void *pvStream;
++ u32 hHotKey;
++};
++
++struct PRIVATEHOTKEYDATA {
++ u32 ui32ScanCode;
++ u32 ui32ShiftState;
++ struct HOTKEYINFO sHotKeyInfo;
++};
++
++extern u32 g_ui32HotKeyFrame;
++extern IMG_BOOL g_bHotKeyPressed;
++extern IMG_BOOL g_bHotKeyRegistered;
++
++void ReadInHotKeys(void);
++void ActivateHotKeys(struct DBG_STREAM *psStream);
++void DeactivateHotKeys(void);
++
++void RemoveHotKey(u32 hHotKey);
++void DefineHotKey(u32 ui32ScanCode, u32 ui32ShiftState,
++ struct HOTKEYINFO *psInfo);
++void RegisterKeyPressed(u32 ui32ScanCode, struct HOTKEYINFO *psInfo);
++
++#endif
+diff --git a/drivers/gpu/pvr/tools/ioctl.c b/drivers/gpu/pvr/tools/ioctl.c
+new file mode 100644
+index 0000000..ddba527
+--- /dev/null
++++ b/drivers/gpu/pvr/tools/ioctl.c
+@@ -0,0 +1,399 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <linux/uaccess.h>
++
++#include "img_types.h"
++#include "dbgdrvif.h"
++#include "dbgdriv.h"
++#include "hotkey.h"
++
++u32 DBGDIOCDrivCreateStream(void *pvInBuffer,
++ void *pvOutBuffer)
++{
++ struct DBG_IN_CREATESTREAM *psIn;
++ void **ppvOut;
++ static char name[32];
++
++ psIn = (struct DBG_IN_CREATESTREAM *)pvInBuffer;
++ ppvOut = (void **)pvOutBuffer;
++
++
++ if (copy_from_user(name, psIn->pszName, 32) != 0)
++ return IMG_FALSE;
++ *ppvOut =
++ ExtDBGDrivCreateStream(name, psIn->ui32CapMode, psIn->ui32OutMode,
++ 0, psIn->ui32Pages);
++
++
++ return IMG_TRUE;
++}
++
++u32 DBGDIOCDrivDestroyStream(void *pvInBuffer,
++ void *pvOutBuffer)
++{
++ u32 *pStream;
++ struct DBG_STREAM *psStream;
++
++ pStream = (u32 *) pvInBuffer;
++ psStream = (struct DBG_STREAM *)*pStream;
++
++ PVR_UNREFERENCED_PARAMETER(pvOutBuffer);
++
++ ExtDBGDrivDestroyStream(psStream);
++
++ return IMG_TRUE;
++}
++
++u32 DBGDIOCDrivGetStream(void *pvInBuffer, void *pvOutBuffer)
++{
++ struct DBG_IN_FINDSTREAM *psParams;
++ u32 *pui32Stream;
++
++ psParams = (struct DBG_IN_FINDSTREAM *)pvInBuffer;
++ pui32Stream = (u32 *) pvOutBuffer;
++
++ *pui32Stream =
++ (u32) ExtDBGDrivFindStream(psParams->pszName,
++ psParams->bResetStream);
++
++ return IMG_TRUE;
++}
++
++u32 DBGDIOCDrivWriteString(void *pvInBuffer, void *pvOutBuffer)
++{
++ struct DBG_IN_WRITESTRING *psParams;
++ u32 *pui32OutLen;
++
++ psParams = (struct DBG_IN_WRITESTRING *)pvInBuffer;
++ pui32OutLen = (u32 *) pvOutBuffer;
++
++ *pui32OutLen =
++ ExtDBGDrivWriteString((struct DBG_STREAM *)psParams->pvStream,
++ psParams->pszString, psParams->ui32Level);
++
++ return IMG_TRUE;
++}
++
++u32 DBGDIOCDrivWriteStringCM(void *pvInBuffer,
++ void *pvOutBuffer)
++{
++ struct DBG_IN_WRITESTRING *psParams;
++ u32 *pui32OutLen;
++
++ psParams = (struct DBG_IN_WRITESTRING *)pvInBuffer;
++ pui32OutLen = (u32 *) pvOutBuffer;
++
++ *pui32OutLen =
++ ExtDBGDrivWriteStringCM((struct DBG_STREAM *)psParams->pvStream,
++ psParams->pszString, psParams->ui32Level);
++
++ return IMG_TRUE;
++}
++
++u32 DBGDIOCDrivReadString(void *pvInBuffer, void *pvOutBuffer)
++{
++ u32 *pui32OutLen;
++ struct DBG_IN_READSTRING *psParams;
++
++ psParams = (struct DBG_IN_READSTRING *)pvInBuffer;
++ pui32OutLen = (u32 *) pvOutBuffer;
++
++ *pui32OutLen =
++ ExtDBGDrivReadString(psParams->pvStream, psParams->pszString,
++ psParams->ui32StringLen);
++
++ return IMG_TRUE;
++}
++
++u32 DBGDIOCDrivWrite(void *pvInBuffer, void *pvOutBuffer)
++{
++ u32 *pui32BytesCopied;
++ struct DBG_IN_WRITE *psInParams;
++
++ psInParams = (struct DBG_IN_WRITE *)pvInBuffer;
++ pui32BytesCopied = (u32 *) pvOutBuffer;
++
++ *pui32BytesCopied =
++ ExtDBGDrivWrite((struct DBG_STREAM *)psInParams->pvStream,
++ psInParams->pui8InBuffer,
++ psInParams->ui32TransferSize,
++ psInParams->ui32Level);
++
++ return IMG_TRUE;
++}
++
++u32 DBGDIOCDrivWrite2(void *pvInBuffer, void *pvOutBuffer)
++{
++ u32 *pui32BytesCopied;
++ struct DBG_IN_WRITE *psInParams;
++
++ psInParams = (struct DBG_IN_WRITE *)pvInBuffer;
++ pui32BytesCopied = (u32 *) pvOutBuffer;
++
++ *pui32BytesCopied =
++ ExtDBGDrivWrite2((struct DBG_STREAM *)psInParams->pvStream,
++ psInParams->pui8InBuffer,
++ psInParams->ui32TransferSize,
++ psInParams->ui32Level);
++
++ return IMG_TRUE;
++}
++
++u32 DBGDIOCDrivWriteCM(void *pvInBuffer, void *pvOutBuffer)
++{
++ u32 *pui32BytesCopied;
++ struct DBG_IN_WRITE *psInParams;
++
++ psInParams = (struct DBG_IN_WRITE *)pvInBuffer;
++ pui32BytesCopied = (u32 *) pvOutBuffer;
++
++ *pui32BytesCopied =
++ ExtDBGDrivWriteCM((struct DBG_STREAM *)psInParams->pvStream,
++ psInParams->pui8InBuffer,
++ psInParams->ui32TransferSize,
++ psInParams->ui32Level);
++
++ return IMG_TRUE;
++}
++
++u32 DBGDIOCDrivRead(void *pvInBuffer, void *pvOutBuffer)
++{
++ u32 *pui32BytesCopied;
++ struct DBG_IN_READ *psInParams;
++
++ psInParams = (struct DBG_IN_READ *)pvInBuffer;
++ pui32BytesCopied = (u32 *) pvOutBuffer;
++
++ *pui32BytesCopied =
++ ExtDBGDrivRead((struct DBG_STREAM *)psInParams->pvStream,
++ psInParams->bReadInitBuffer,
++ psInParams->ui32OutBufferSize,
++ psInParams->pui8OutBuffer);
++
++ return IMG_TRUE;
++}
++
++u32 DBGDIOCDrivSetCaptureMode(void *pvInBuffer,
++ void *pvOutBuffer)
++{
++ struct DBG_IN_SETDEBUGMODE *psParams;
++
++ psParams = (struct DBG_IN_SETDEBUGMODE *)pvInBuffer;
++ PVR_UNREFERENCED_PARAMETER(pvOutBuffer);
++
++ ExtDBGDrivSetCaptureMode((struct DBG_STREAM *)psParams->pvStream,
++ psParams->ui32Mode,
++ psParams->ui32Start,
++ psParams->ui32End, psParams->ui32SampleRate);
++
++ return IMG_TRUE;
++}
++
++u32 DBGDIOCDrivSetOutMode(void *pvInBuffer, void *pvOutBuffer)
++{
++ struct DBG_IN_SETDEBUGOUTMODE *psParams;
++
++ psParams = (struct DBG_IN_SETDEBUGOUTMODE *)pvInBuffer;
++ PVR_UNREFERENCED_PARAMETER(pvOutBuffer);
++
++ ExtDBGDrivSetOutputMode((struct DBG_STREAM *)psParams->pvStream,
++ psParams->ui32Mode);
++
++ return IMG_TRUE;
++}
++
++u32 DBGDIOCDrivSetDebugLevel(void *pvInBuffer,
++ void *pvOutBuffer)
++{
++ struct DBG_IN_SETDEBUGLEVEL *psParams;
++
++ psParams = (struct DBG_IN_SETDEBUGLEVEL *)pvInBuffer;
++ PVR_UNREFERENCED_PARAMETER(pvOutBuffer);
++
++ ExtDBGDrivSetDebugLevel((struct DBG_STREAM *)psParams->pvStream,
++ psParams->ui32Level);
++
++ return IMG_TRUE;
++}
++
++u32 DBGDIOCDrivSetFrame(void *pvInBuffer, void *pvOutBuffer)
++{
++ struct DBG_IN_SETFRAME *psParams;
++
++ psParams = (struct DBG_IN_SETFRAME *)pvInBuffer;
++ PVR_UNREFERENCED_PARAMETER(pvOutBuffer);
++
++ ExtDBGDrivSetFrame((struct DBG_STREAM *)psParams->pvStream,
++ psParams->ui32Frame);
++
++ return IMG_TRUE;
++}
++
++u32 DBGDIOCDrivGetFrame(void *pvInBuffer, void *pvOutBuffer)
++{
++ u32 *pStream;
++ struct DBG_STREAM *psStream;
++ u32 *pui32Current;
++
++ pStream = (u32 *) pvInBuffer;
++ psStream = (struct DBG_STREAM *)*pStream;
++ pui32Current = (u32 *) pvOutBuffer;
++
++ *pui32Current = ExtDBGDrivGetFrame(psStream);
++
++ return IMG_TRUE;
++}
++
++u32 DBGDIOCDrivIsCaptureFrame(void *pvInBuffer,
++ void *pvOutBuffer)
++{
++ struct DBG_IN_ISCAPTUREFRAME *psParams;
++ u32 *pui32Current;
++
++ psParams = (struct DBG_IN_ISCAPTUREFRAME *)pvInBuffer;
++ pui32Current = (u32 *) pvOutBuffer;
++
++ *pui32Current =
++ ExtDBGDrivIsCaptureFrame((struct DBG_STREAM *)psParams->pvStream,
++ psParams->bCheckPreviousFrame);
++
++ return IMG_TRUE;
++}
++
++u32 DBGDIOCDrivOverrideMode(void *pvInBuffer,
++ void *pvOutBuffer)
++{
++ struct DBG_IN_OVERRIDEMODE *psParams;
++
++ psParams = (struct DBG_IN_OVERRIDEMODE *)pvInBuffer;
++ PVR_UNREFERENCED_PARAMETER(pvOutBuffer);
++
++ ExtDBGDrivOverrideMode((struct DBG_STREAM *)psParams->pvStream,
++ psParams->ui32Mode);
++
++ return IMG_TRUE;
++}
++
++u32 DBGDIOCDrivDefaultMode(void *pvInBuffer, void *pvOutBuffer)
++{
++ u32 *pStream;
++ struct DBG_STREAM *psStream;
++
++ pStream = (u32 *) pvInBuffer;
++ psStream = (struct DBG_STREAM *)*pStream;
++
++ PVR_UNREFERENCED_PARAMETER(pvOutBuffer);
++
++ ExtDBGDrivDefaultMode(psStream);
++
++ return IMG_TRUE;
++}
++
++u32 DBGDIOCDrivSetMarker(void *pvInBuffer, void *pvOutBuffer)
++{
++ struct DBG_IN_SETMARKER *psParams;
++
++ psParams = (struct DBG_IN_SETMARKER *)pvInBuffer;
++ PVR_UNREFERENCED_PARAMETER(pvOutBuffer);
++
++ ExtDBGDrivSetMarker((struct DBG_STREAM *)psParams->pvStream,
++ psParams->ui32Marker);
++
++ return IMG_TRUE;
++}
++
++u32 DBGDIOCDrivGetMarker(void *pvInBuffer, void *pvOutBuffer)
++{
++ u32 *pStream;
++ struct DBG_STREAM *psStream;
++ u32 *pui32Current;
++
++ pStream = (u32 *) pvInBuffer;
++ psStream = (struct DBG_STREAM *)*pStream;
++ pui32Current = (u32 *) pvOutBuffer;
++
++ *pui32Current = ExtDBGDrivGetMarker(psStream);
++
++ return IMG_TRUE;
++}
++
++u32 DBGDIOCDrivGetServiceTable(void *pvInBuffer,
++ void *pvOutBuffer)
++{
++ u32 *pui32Out;
++
++ PVR_UNREFERENCED_PARAMETER(pvInBuffer);
++ pui32Out = (u32 *) pvOutBuffer;
++
++ *pui32Out = DBGDrivGetServiceTable();
++
++ return IMG_TRUE;
++}
++
++u32 DBGDIOCDrivWriteLF(void *pvInBuffer, void *pvOutBuffer)
++{
++ struct DBG_IN_WRITE_LF *psInParams;
++ u32 *pui32BytesCopied;
++
++ psInParams = (struct DBG_IN_WRITE_LF *)pvInBuffer;
++ pui32BytesCopied = (u32 *) pvOutBuffer;
++
++ *pui32BytesCopied = ExtDBGDrivWriteLF(psInParams->pvStream,
++ psInParams->pui8InBuffer,
++ psInParams->ui32BufferSize,
++ psInParams->ui32Level,
++ psInParams->ui32Flags);
++
++ return IMG_TRUE;
++}
++
++u32 DBGDIOCDrivReadLF(void *pvInBuffer, void *pvOutBuffer)
++{
++ u32 *pui32BytesCopied;
++ struct DBG_IN_READ *psInParams;
++
++ psInParams = (struct DBG_IN_READ *)pvInBuffer;
++ pui32BytesCopied = (u32 *) pvOutBuffer;
++
++ *pui32BytesCopied =
++ ExtDBGDrivReadLF((struct DBG_STREAM *)psInParams->pvStream,
++ psInParams->ui32OutBufferSize,
++ psInParams->pui8OutBuffer);
++
++ return IMG_TRUE;
++}
++
++u32 DBGDIOCDrivWaitForEvent(void *pvInBuffer, void *pvOutBuffer)
++{
++ enum DBG_EVENT eEvent = (enum DBG_EVENT)(*(u32 *) pvInBuffer);
++
++ PVR_UNREFERENCED_PARAMETER(pvOutBuffer);
++
++ ExtDBGDrivWaitForEvent(eEvent);
++
++ return IMG_TRUE;
++}
+diff --git a/drivers/gpu/pvr/tools/ioctl.h b/drivers/gpu/pvr/tools/ioctl.h
+new file mode 100644
+index 0000000..fcb1ff1
+--- /dev/null
++++ b/drivers/gpu/pvr/tools/ioctl.h
+@@ -0,0 +1,81 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _IOCTL_
++#define _IOCTL_
++
++u32 DBGDIOCDrivCreateStream(void *, void *);
++u32 DBGDIOCDrivDestroyStream(void *, void *);
++u32 DBGDIOCDrivGetStream(void *, void *);
++u32 DBGDIOCDrivWriteString(void *, void *);
++u32 DBGDIOCDrivReadString(void *, void *);
++u32 DBGDIOCDrivWrite(void *, void *);
++u32 DBGDIOCDrivWrite2(void *, void *);
++u32 DBGDIOCDrivRead(void *, void *);
++u32 DBGDIOCDrivSetCaptureMode(void *, void *);
++u32 DBGDIOCDrivSetOutMode(void *, void *);
++u32 DBGDIOCDrivSetDebugLevel(void *, void *);
++u32 DBGDIOCDrivSetFrame(void *, void *);
++u32 DBGDIOCDrivGetFrame(void *, void *);
++u32 DBGDIOCDrivOverrideMode(void *, void *);
++u32 DBGDIOCDrivDefaultMode(void *, void *);
++u32 DBGDIOCDrivGetServiceTable(void *, void *);
++u32 DBGDIOCDrivWriteStringCM(void *, void *);
++u32 DBGDIOCDrivWriteCM(void *, void *);
++u32 DBGDIOCDrivSetMarker(void *, void *);
++u32 DBGDIOCDrivGetMarker(void *, void *);
++u32 DBGDIOCDrivIsCaptureFrame(void *, void *);
++u32 DBGDIOCDrivWriteLF(void *, void *);
++u32 DBGDIOCDrivReadLF(void *, void *);
++u32 DBGDIOCDrivWaitForEvent(void *, void *);
++
++u32(*g_DBGDrivProc[])(void *, void *) = {
++DBGDIOCDrivCreateStream,
++ DBGDIOCDrivDestroyStream,
++ DBGDIOCDrivGetStream,
++ DBGDIOCDrivWriteString,
++ DBGDIOCDrivReadString,
++ DBGDIOCDrivWrite,
++ DBGDIOCDrivRead,
++ DBGDIOCDrivSetCaptureMode,
++ DBGDIOCDrivSetOutMode,
++ DBGDIOCDrivSetDebugLevel,
++ DBGDIOCDrivSetFrame,
++ DBGDIOCDrivGetFrame,
++ DBGDIOCDrivOverrideMode,
++ DBGDIOCDrivDefaultMode,
++ DBGDIOCDrivGetServiceTable,
++ DBGDIOCDrivWrite2,
++ DBGDIOCDrivWriteStringCM,
++ DBGDIOCDrivWriteCM,
++ DBGDIOCDrivSetMarker,
++ DBGDIOCDrivGetMarker,
++ DBGDIOCDrivIsCaptureFrame,
++ DBGDIOCDrivWriteLF, DBGDIOCDrivReadLF, DBGDIOCDrivWaitForEvent};
++
++#define MAX_DBGVXD_W32_API (sizeof(g_DBGDrivProc)/sizeof(u32))
++
++#endif
+diff --git a/drivers/gpu/pvr/tools/linuxsrv.h b/drivers/gpu/pvr/tools/linuxsrv.h
+new file mode 100644
+index 0000000..822ba4e
+--- /dev/null
++++ b/drivers/gpu/pvr/tools/linuxsrv.h
+@@ -0,0 +1,47 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _LINUXSRV_H__
++#define _LINUXSRV_H__
++
++struct IOCTL_PACKAGE {
++ u32 ui32Cmd;
++ u32 ui32Size;
++ void *pInBuffer;
++ u32 ui32InBufferSize;
++ void *pOutBuffer;
++ u32 ui32OutBufferSize;
++};
++
++u32 DeviceIoControl(u32 hDevice,
++ u32 ui32ControlCode,
++ void *pInBuffer,
++ u32 ui32InBufferSize,
++ void *pOutBuffer,
++ u32 ui32OutBufferSize,
++ u32 *pui32BytesReturned);
++
++#endif
+diff --git a/drivers/gpu/pvr/tools/main.c b/drivers/gpu/pvr/tools/main.c
+new file mode 100644
+index 0000000..5de15cc
+--- /dev/null
++++ b/drivers/gpu/pvr/tools/main.c
+@@ -0,0 +1,197 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <linux/errno.h>
++#include <linux/module.h>
++#include <linux/fs.h>
++#include <linux/kernel.h>
++#include <linux/kdev_t.h>
++#include <linux/pci.h>
++#include <linux/list.h>
++#include <linux/init.h>
++#include <linux/vmalloc.h>
++#include <linux/uaccess.h>
++
++#include "img_types.h"
++#include "linuxsrv.h"
++#include "ioctl.h"
++#include "dbgdrvif.h"
++#include "dbgdriv.h"
++#include "hostfunc.h"
++#include "pvr_debug.h"
++
++#define DRVNAME "dbgdrv"
++
++MODULE_AUTHOR("Imagination Technologies Ltd. <gpl-support@imgtec.com>");
++MODULE_LICENSE("GPL");
++MODULE_SUPPORTED_DEVICE(DRVNAME);
++
++static int AssignedMajorNumber;
++
++static long dbgdrv_ioctl(struct file *, unsigned int, unsigned long);
++
++static int dbgdrv_open(struct inode unref__ * pInode,
++ struct file unref__ * pFile)
++{
++ return 0;
++}
++
++static int dbgdrv_release(struct inode unref__ * pInode,
++ struct file unref__ * pFile)
++{
++ return 0;
++}
++
++static int dbgdrv_mmap(struct file *pFile, struct vm_area_struct *ps_vma)
++{
++ return 0;
++}
++
++static const struct file_operations dbgdrv_fops = {
++owner: THIS_MODULE,
++unlocked_ioctl : dbgdrv_ioctl,
++open : dbgdrv_open,
++release : dbgdrv_release,
++mmap : dbgdrv_mmap,
++};
++
++void DBGDrvGetServiceTable(void **fn_table)
++{
++ *fn_table = &g_sDBGKMServices;
++
++}
++EXPORT_SYMBOL(DBGDrvGetServiceTable);
++
++void cleanup_module(void)
++{
++ if (AssignedMajorNumber > 0)
++ unregister_chrdev(AssignedMajorNumber, DRVNAME);
++
++#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
++ HostDestroyEventObjects();
++#endif
++
++ if (g_pvAPIMutex != NULL)
++ HostDestroyMutex(g_pvAPIMutex);
++
++ return;
++}
++
++int init_module(void)
++{
++ g_pvAPIMutex = HostCreateMutex();
++ if (g_pvAPIMutex == NULL) {
++ cleanup_module();
++ return -ENOMEM;
++ }
++#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
++
++ (void)HostCreateEventObjects();
++#endif
++
++ AssignedMajorNumber =
++ register_chrdev(AssignedMajorNumber, DRVNAME, &dbgdrv_fops);
++
++ if (AssignedMajorNumber <= 0) {
++ PVR_DPF(PVR_DBG_ERROR, " unable to get major\n");
++ cleanup_module();
++ return -EBUSY;
++ }
++
++ return 0;
++}
++
++static long dbgdrv_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
++{
++ struct IOCTL_PACKAGE *pIP = (struct IOCTL_PACKAGE *)arg;
++
++ char *buffer, *in, *out;
++
++ if ((pIP->ui32InBufferSize > (PAGE_SIZE >> 1))
++ || (pIP->ui32OutBufferSize > (PAGE_SIZE >> 1))) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "Sizes of the buffers are too large, cannot do ioctl\n");
++ return -1;
++ }
++
++ buffer = (char *)HostPageablePageAlloc(1);
++ if (!buffer) {
++ PVR_DPF(PVR_DBG_ERROR,
++ "Failed to allocate buffer, cannot do ioctl\n");
++ return -EFAULT;
++ }
++
++ in = buffer;
++ out = buffer + (PAGE_SIZE >> 1);
++
++ if (copy_from_user(in, pIP->pInBuffer, pIP->ui32InBufferSize) != 0)
++ goto init_failed;
++
++ cmd = ((pIP->ui32Cmd >> 2) & 0xFFF) - 0x801;
++
++ if (pIP->ui32Cmd == DEBUG_SERVICE_READ) {
++ char *ui8Tmp;
++ u32 *pui32BytesCopied = (u32 *) out;
++ struct DBG_IN_READ *psReadInParams = (struct DBG_IN_READ *)in;
++
++ ui8Tmp = vmalloc(psReadInParams->ui32OutBufferSize);
++ if (!ui8Tmp)
++ goto init_failed;
++ *pui32BytesCopied = ExtDBGDrivRead((struct DBG_STREAM *)
++ psReadInParams->pvStream,
++ psReadInParams->bReadInitBuffer,
++ psReadInParams->ui32OutBufferSize, ui8Tmp);
++ if (copy_to_user(psReadInParams->pui8OutBuffer, ui8Tmp,
++ *pui32BytesCopied) != 0) {
++ vfree(ui8Tmp);
++ goto init_failed;
++ }
++ vfree(ui8Tmp);
++ } else {
++ (g_DBGDrivProc[cmd]) (in, out);
++ }
++
++ if (copy_to_user(pIP->pOutBuffer, out, pIP->ui32OutBufferSize) != 0)
++ goto init_failed;
++
++ HostPageablePageFree((void *) buffer);
++ return 0;
++
++init_failed:
++ HostPageablePageFree((void *) buffer);
++ return -EFAULT;
++}
++
++void RemoveHotKey(unsigned hHotKey)
++{
++
++}
++
++void DefineHotKey(unsigned ScanCode, unsigned ShiftState, void *pInfo)
++{
++
++}
++
+diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
+index 3d94a14..84f0d49 100644
+--- a/drivers/video/Kconfig
++++ b/drivers/video/Kconfig
+@@ -17,6 +17,8 @@ source "drivers/gpu/vga/Kconfig"
+
+ source "drivers/gpu/drm/Kconfig"
+
++source "drivers/gpu/pvr/Kconfig"
++
+ config VGASTATE
+ tristate
+ default n
+diff --git a/include/video/sgx-util.h b/include/video/sgx-util.h
+new file mode 100644
+index 0000000..4a5bd7f
+--- /dev/null
++++ b/include/video/sgx-util.h
+@@ -0,0 +1,64 @@
++/*
++ * SGX utility functions
++ *
++ * Copyright (C) 2010 Nokia Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation; either version 2 of the License, or (at your
++ * option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++#ifndef __SGX_UTIL_H
++#define __SGX_UTIL_H
++
++#include <linux/kernel.h>
++
++#define OMAPLFB_PAGE_SIZE 4096
++
++/* Greatest common divisor */
++static unsigned long gcd(unsigned long a, unsigned long b)
++{
++ unsigned long r;
++
++ if (a < b) {
++ r = a;
++ a = b;
++ b = r;
++ }
++
++ while ((r = a % b) != 0) {
++ a = b;
++ b = r;
++ }
++
++ return b;
++}
++
++/*
++ * Workout the smallest size that is aligned to both 4K (for the SGX)
++ * and line length (for the fbdev driver).
++ */
++static unsigned int sgx_buffer_align(unsigned stride, unsigned size)
++{
++ unsigned lcm;
++
++ if (!stride || !size)
++ return 0;
++
++ lcm = stride * OMAPLFB_PAGE_SIZE / gcd(stride,
++ OMAPLFB_PAGE_SIZE);
++
++ return roundup(size, lcm);
++}
++
++#endif /* __SGX_UTIL_H */
+--
+1.7.0.4
+
diff --git a/recipes/linux/linux-2.6.35/nokia900/linux-2.6-mfd-twl4030-Driver-for-twl4030-madc-module.patch b/recipes/linux/linux-2.6.35/nokia900/linux-2.6-mfd-twl4030-Driver-for-twl4030-madc-module.patch
new file mode 100644
index 0000000000..de9e842729
--- /dev/null
+++ b/recipes/linux/linux-2.6.35/nokia900/linux-2.6-mfd-twl4030-Driver-for-twl4030-madc-module.patch
@@ -0,0 +1,759 @@
+From d8e8a44e5c44c88f9b0b8b542f86c9cce1e8dccc Mon Sep 17 00:00:00 2001
+From: Mikko Ylinen <mikko.k.ylinen@nokia.com>
+Date: Fri, 9 Apr 2010 12:20:52 +0300
+Subject: [PATCH 05/11] mfd: twl4030: Driver for twl4030 madc module
+
+This ADC allows monitoring of analog signals such as battery levels,
+temperatures, etc.
+
+Several people have contributed to this driver on the linux-omap list.
+
+Signed-off-by: Amit Kucheria <amit.kucheria@verdurent.com>
+Signed-off-by: Tuukka Tikkanen <tuukka.tikkanen@nokia.com>
+Signed-off-by: Ameya Palande <ameya.palande@nokia.com>
+---
+ drivers/mfd/Kconfig | 21 ++
+ drivers/mfd/Makefile | 3 +-
+ drivers/mfd/twl4030-madc.c | 549 ++++++++++++++++++++++++++++++++++++++
+ include/linux/i2c/twl4030-madc.h | 126 +++++++++
+ 4 files changed, 698 insertions(+), 1 deletions(-)
+ create mode 100644 drivers/mfd/twl4030-madc.c
+ create mode 100644 include/linux/i2c/twl4030-madc.h
+
+diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
+index 9da0e50..10ca037 100644
+--- a/drivers/mfd/Kconfig
++++ b/drivers/mfd/Kconfig
+@@ -188,6 +188,27 @@ config MFD_TC35892
+ additional drivers must be enabled in order to use the
+ functionality of the device.
+
++config TWL4030_MADC
++ tristate "TWL4030 MADC Driver"
++ depends on TWL4030_CORE
++ help
++ The TWL4030 Monitoring ADC driver enables the host
++ processor to monitor analog signals using analog-to-digital
++ conversions on the input source. TWL4030 MADC provides the
++ following features:
++ - Single 10-bit ADC with successive approximation register (SAR) conversion;
++ - Analog multiplexer for 16 inputs;
++ - Seven (of the 16) inputs are freely available;
++ - Battery voltage monitoring;
++ - Concurrent conversion request management;
++ - Interrupt signal to Primary Interrupt Handler;
++ - Averaging feature;
++ - Selective enable/disable of the averaging feature.
++
++ Say 'y' here to statically link this module into the kernel or 'm'
++ to build it as a dinamically loadable module. The module will be
++ called twl4030-madc.ko
++
+ config MFD_TMIO
+ bool
+ default n
+diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
+index fb503e7..570592f 100644
+--- a/drivers/mfd/Makefile
++++ b/drivers/mfd/Makefile
+@@ -34,8 +34,9 @@ obj-$(CONFIG_TPS6507X) += tps6507x.o
+ obj-$(CONFIG_MENELAUS) += menelaus.o
+
+ obj-$(CONFIG_TWL4030_CORE) += twl-core.o twl4030-irq.o twl6030-irq.o
+-obj-$(CONFIG_TWL4030_POWER) += twl4030-power.o
++obj-$(CONFIG_TWL4030_POWER) += twl4030-power.o
+ obj-$(CONFIG_TWL4030_CODEC) += twl4030-codec.o
++obj-$(CONFIG_TWL4030_MADC) += twl4030-madc.o
+
+ obj-$(CONFIG_MFD_MC13783) += mc13783-core.o
+
+diff --git a/drivers/mfd/twl4030-madc.c b/drivers/mfd/twl4030-madc.c
+new file mode 100644
+index 0000000..657ce87
+--- /dev/null
++++ b/drivers/mfd/twl4030-madc.c
+@@ -0,0 +1,549 @@
++/*
++ * TWL4030 MADC module driver
++ *
++ * Copyright (C) 2008 Nokia Corporation
++ * Mikko Ylinen <mikko.k.ylinen@nokia.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ *
++ */
++
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/kernel.h>
++#include <linux/types.h>
++#include <linux/module.h>
++#include <linux/delay.h>
++#include <linux/fs.h>
++#include <linux/platform_device.h>
++#include <linux/miscdevice.h>
++#include <linux/i2c/twl.h>
++#include <linux/i2c/twl4030-madc.h>
++#include <linux/uaccess.h>
++#include <linux/slab.h>
++
++#define TWL4030_MADC_PFX "twl4030-madc: "
++
++struct twl4030_madc_data {
++ struct device *dev;
++ struct mutex lock;
++ struct work_struct ws;
++ struct twl4030_madc_request requests[TWL4030_MADC_NUM_METHODS];
++ int imr;
++ int isr;
++};
++
++static struct twl4030_madc_data *the_madc;
++static int twl4030_madc_set_current_generator(struct twl4030_madc_data *madc,
++ int chan, int on);
++
++static
++const struct twl4030_madc_conversion_method twl4030_conversion_methods[] = {
++ [TWL4030_MADC_RT] = {
++ .sel = TWL4030_MADC_RTSELECT_LSB,
++ .avg = TWL4030_MADC_RTAVERAGE_LSB,
++ .rbase = TWL4030_MADC_RTCH0_LSB,
++ },
++ [TWL4030_MADC_SW1] = {
++ .sel = TWL4030_MADC_SW1SELECT_LSB,
++ .avg = TWL4030_MADC_SW1AVERAGE_LSB,
++ .rbase = TWL4030_MADC_GPCH0_LSB,
++ .ctrl = TWL4030_MADC_CTRL_SW1,
++ },
++ [TWL4030_MADC_SW2] = {
++ .sel = TWL4030_MADC_SW2SELECT_LSB,
++ .avg = TWL4030_MADC_SW2AVERAGE_LSB,
++ .rbase = TWL4030_MADC_GPCH0_LSB,
++ .ctrl = TWL4030_MADC_CTRL_SW2,
++ },
++};
++
++static int twl4030_madc_read(struct twl4030_madc_data *madc, u8 reg)
++{
++ int ret;
++ u8 val;
++
++ ret = twl_i2c_read_u8(TWL4030_MODULE_MADC, &val, reg);
++ if (ret) {
++ dev_dbg(madc->dev, "unable to read register 0x%X\n", reg);
++ return ret;
++ }
++
++ return val;
++}
++
++static void twl4030_madc_write(struct twl4030_madc_data *madc, u8 reg, u8 val)
++{
++ int ret;
++
++ ret = twl_i2c_write_u8(TWL4030_MODULE_MADC, val, reg);
++ if (ret)
++ dev_err(madc->dev, "unable to write register 0x%X\n", reg);
++}
++
++static int twl4030_madc_channel_raw_read(struct twl4030_madc_data *madc, u8 reg)
++{
++ u8 msb, lsb;
++
++ /* For each ADC channel, we have MSB and LSB register pair. MSB address
++ * is always LSB address+1. reg parameter is the addr of LSB register */
++ msb = twl4030_madc_read(madc, reg + 1);
++ lsb = twl4030_madc_read(madc, reg);
++
++ return (int)(((msb << 8) | lsb) >> 6);
++}
++
++static int twl4030_madc_read_channels(struct twl4030_madc_data *madc,
++ u8 reg_base, u16 channels, int *buf)
++{
++ int count = 0;
++ u8 reg, i;
++
++ if (unlikely(!buf))
++ return 0;
++
++ for (i = 0; i < TWL4030_MADC_MAX_CHANNELS; i++) {
++ if (channels & (1<<i)) {
++ reg = reg_base + 2*i;
++ buf[i] = twl4030_madc_channel_raw_read(madc, reg);
++ count++;
++ }
++ }
++ return count;
++}
++
++static void twl4030_madc_enable_irq(struct twl4030_madc_data *madc, int id)
++{
++ u8 val;
++
++ val = twl4030_madc_read(madc, madc->imr);
++ val &= ~(1 << id);
++ twl4030_madc_write(madc, madc->imr, val);
++}
++
++static void twl4030_madc_disable_irq(struct twl4030_madc_data *madc, int id)
++{
++ u8 val;
++
++ val = twl4030_madc_read(madc, madc->imr);
++ val |= (1 << id);
++ twl4030_madc_write(madc, madc->imr, val);
++}
++
++static irqreturn_t twl4030_madc_irq_handler(int irq, void *_madc)
++{
++ struct twl4030_madc_data *madc = _madc;
++ u8 isr_val, imr_val;
++ int i;
++
++#ifdef CONFIG_LOCKDEP
++ /* WORKAROUND for lockdep forcing IRQF_DISABLED on us, which
++ * we don't want and can't tolerate. Although it might be
++ * friendlier not to borrow this thread context...
++ */
++ local_irq_enable();
++#endif
++
++ /* Use COR to ack interrupts since we have no shared IRQs in ISRx */
++ isr_val = twl4030_madc_read(madc, madc->isr);
++ imr_val = twl4030_madc_read(madc, madc->imr);
++
++ isr_val &= ~imr_val;
++
++ for (i = 0; i < TWL4030_MADC_NUM_METHODS; i++) {
++
++ if (!(isr_val & (1<<i)))
++ continue;
++
++ twl4030_madc_disable_irq(madc, i);
++ madc->requests[i].result_pending = 1;
++ }
++
++ schedule_work(&madc->ws);
++
++ return IRQ_HANDLED;
++}
++
++static void twl4030_madc_work(struct work_struct *ws)
++{
++ const struct twl4030_madc_conversion_method *method;
++ struct twl4030_madc_data *madc;
++ struct twl4030_madc_request *r;
++ int len, i;
++
++ madc = container_of(ws, struct twl4030_madc_data, ws);
++ mutex_lock(&madc->lock);
++
++ for (i = 0; i < TWL4030_MADC_NUM_METHODS; i++) {
++
++ r = &madc->requests[i];
++
++ /* No pending results for this method, move to next one */
++ if (!r->result_pending)
++ continue;
++
++ method = &twl4030_conversion_methods[r->method];
++
++ /* Read results */
++ len = twl4030_madc_read_channels(madc, method->rbase,
++ r->channels, r->rbuf);
++
++ /* Return results to caller */
++ if (r->func_cb != NULL) {
++ r->func_cb(len, r->channels, r->rbuf);
++ r->func_cb = NULL;
++ }
++
++ /* Free request */
++ r->result_pending = 0;
++ r->active = 0;
++ }
++
++ mutex_unlock(&madc->lock);
++}
++
++static int twl4030_madc_set_irq(struct twl4030_madc_data *madc,
++ struct twl4030_madc_request *req)
++{
++ struct twl4030_madc_request *p;
++
++ p = &madc->requests[req->method];
++
++ memcpy(p, req, sizeof *req);
++
++ twl4030_madc_enable_irq(madc, req->method);
++
++ return 0;
++}
++
++static inline void twl4030_madc_start_conversion(struct twl4030_madc_data *madc,
++ int conv_method)
++{
++ const struct twl4030_madc_conversion_method *method;
++
++ method = &twl4030_conversion_methods[conv_method];
++
++ switch (conv_method) {
++ case TWL4030_MADC_SW1:
++ case TWL4030_MADC_SW2:
++ twl4030_madc_write(madc, method->ctrl, TWL4030_MADC_SW_START);
++ break;
++ case TWL4030_MADC_RT:
++ default:
++ break;
++ }
++}
++
++static int twl4030_madc_wait_conversion_ready(
++ struct twl4030_madc_data *madc,
++ unsigned int timeout_ms, u8 status_reg)
++{
++ unsigned long timeout;
++
++ timeout = jiffies + msecs_to_jiffies(timeout_ms);
++ do {
++ int reg;
++
++ reg = twl4030_madc_read(madc, status_reg);
++ if (unlikely(reg < 0))
++ return reg;
++ if (!(reg & TWL4030_MADC_BUSY) && (reg & TWL4030_MADC_EOC_SW))
++ return 0;
++ } while (!time_after(jiffies, timeout));
++
++ return -EAGAIN;
++}
++
++static int twl4030_madc_set_power(struct twl4030_madc_data *madc, int on);
++int twl4030_madc_conversion(struct twl4030_madc_request *req)
++{
++ const struct twl4030_madc_conversion_method *method;
++ u8 ch_msb, ch_lsb;
++ int ret;
++
++ if (unlikely(!req))
++ return -EINVAL;
++
++ mutex_lock(&the_madc->lock);
++
++ twl4030_madc_set_power(the_madc, 1);
++
++ /* Do we have a conversion request ongoing */
++ if (the_madc->requests[req->method].active) {
++ ret = -EBUSY;
++ goto out;
++ }
++
++ ch_msb = (req->channels >> 8) & 0xff;
++ ch_lsb = req->channels & 0xff;
++
++ method = &twl4030_conversion_methods[req->method];
++
++ /* Select channels to be converted */
++ twl4030_madc_write(the_madc, method->sel + 1, ch_msb);
++ twl4030_madc_write(the_madc, method->sel, ch_lsb);
++
++ /* Select averaging for all channels if do_avg is set */
++ if (req->do_avg) {
++ twl4030_madc_write(the_madc, method->avg + 1, ch_msb);
++ twl4030_madc_write(the_madc, method->avg, ch_lsb);
++ }
++
++ if ((req->type == TWL4030_MADC_IRQ_ONESHOT) && (req->func_cb != NULL)) {
++ twl4030_madc_set_irq(the_madc, req);
++ twl4030_madc_start_conversion(the_madc, req->method);
++ the_madc->requests[req->method].active = 1;
++ ret = 0;
++ goto out;
++ }
++
++ /* With RT method we should not be here anymore */
++ if (req->method == TWL4030_MADC_RT) {
++ ret = -EINVAL;
++ goto out;
++ }
++
++ twl4030_madc_start_conversion(the_madc, req->method);
++ the_madc->requests[req->method].active = 1;
++
++ /* Wait until conversion is ready (ctrl register returns EOC) */
++ ret = twl4030_madc_wait_conversion_ready(the_madc, 5, method->ctrl);
++ if (ret) {
++ dev_dbg(the_madc->dev, "conversion timeout!\n");
++ the_madc->requests[req->method].active = 0;
++ goto out;
++ }
++
++ ret = twl4030_madc_read_channels(the_madc, method->rbase, req->channels,
++ req->rbuf);
++
++ the_madc->requests[req->method].active = 0;
++
++ twl4030_madc_set_power(the_madc, 0);
++
++out:
++ mutex_unlock(&the_madc->lock);
++
++ return ret;
++}
++EXPORT_SYMBOL(twl4030_madc_conversion);
++
++static int twl4030_madc_set_current_generator(struct twl4030_madc_data *madc,
++ int chan, int on)
++{
++ int ret;
++ u8 regval;
++
++ /* Current generator is only available for ADCIN0 and ADCIN1. NB:
++ * ADCIN1 current generator only works when AC or VBUS is present */
++ if (chan > 1)
++ return EINVAL;
++
++ ret = twl_i2c_read_u8(TWL4030_MODULE_MAIN_CHARGE,
++ &regval, TWL4030_BCI_BCICTL1);
++ if (ret) {
++ dev_dbg(madc->dev, "unable to read register 0x%X\n",
++ TWL4030_BCI_BCICTL1);
++ return ret;
++ }
++
++ if (on) {
++ regval |= (chan) ? TWL4030_BCI_ITHEN : TWL4030_BCI_TYPEN;
++ regval |= TWL4030_BCI_MESBAT;
++ } else {
++ regval &= (chan) ? ~TWL4030_BCI_ITHEN : ~TWL4030_BCI_TYPEN;
++ regval &= ~TWL4030_BCI_MESBAT;
++ }
++
++ ret = twl_i2c_write_u8(TWL4030_MODULE_MAIN_CHARGE,
++ regval, TWL4030_BCI_BCICTL1);
++ if (ret) {
++ dev_dbg(madc->dev, "unable to write register 0x%X\n",
++ TWL4030_BCI_BCICTL1);
++ }
++ return ret;
++}
++
++static int twl4030_madc_set_power(struct twl4030_madc_data *madc, int on)
++{
++ int ret = 0;
++ u8 regval;
++
++ if (on) {
++ regval = twl4030_madc_read(madc, TWL4030_MADC_CTRL1);
++ regval |= TWL4030_MADC_MADCON;
++ twl4030_madc_write(madc, TWL4030_MADC_CTRL1, regval);
++
++ ret |= twl4030_madc_set_current_generator(madc, 0, 1);
++
++ } else {
++ ret |= twl4030_madc_set_current_generator(madc, 0, 0);
++
++ regval = twl4030_madc_read(madc, TWL4030_MADC_CTRL1);
++ regval &= ~TWL4030_MADC_MADCON;
++ twl4030_madc_write(madc, TWL4030_MADC_CTRL1, regval);
++ }
++ return ret;
++}
++
++static long twl4030_madc_ioctl(struct file *filp, unsigned int cmd,
++ unsigned long arg)
++{
++ struct twl4030_madc_user_parms par;
++ int val, ret;
++
++ ret = copy_from_user(&par, (void __user *) arg, sizeof(par));
++ if (ret) {
++ dev_dbg(the_madc->dev, "copy_from_user: %d\n", ret);
++ return -EACCES;
++ }
++
++ switch (cmd) {
++ case TWL4030_MADC_IOCX_ADC_RAW_READ: {
++ struct twl4030_madc_request req;
++ if (par.channel >= TWL4030_MADC_MAX_CHANNELS)
++ return -EINVAL;
++
++ req.channels = (1 << par.channel);
++ req.do_avg = par.average;
++ req.method = TWL4030_MADC_SW1;
++ req.func_cb = NULL;
++ req.type = TWL4030_MADC_WAIT;
++
++ val = twl4030_madc_conversion(&req);
++ if (likely(val > 0)) {
++ par.status = 0;
++ par.result = (u16)req.rbuf[par.channel];
++ } else if (val == 0) {
++ par.status = -ENODATA;
++ } else {
++ par.status = val;
++ }
++ break;
++ }
++ default:
++ return -EINVAL;
++ }
++
++ ret = copy_to_user((void __user *) arg, &par, sizeof(par));
++ if (ret) {
++ dev_dbg(the_madc->dev, "copy_to_user: %d\n", ret);
++ return -EACCES;
++ }
++
++ return 0;
++}
++
++static const struct file_operations twl4030_madc_fileops = {
++ .owner = THIS_MODULE,
++ .unlocked_ioctl = twl4030_madc_ioctl
++};
++
++static struct miscdevice twl4030_madc_device = {
++ .minor = MISC_DYNAMIC_MINOR,
++ .name = "twl4030-adc",
++ .fops = &twl4030_madc_fileops
++};
++
++static int __init twl4030_madc_probe(struct platform_device *pdev)
++{
++ struct twl4030_madc_data *madc;
++ struct twl4030_madc_platform_data *pdata = pdev->dev.platform_data;
++
++ int ret;
++ u8 regval;
++
++ madc = kzalloc(sizeof *madc, GFP_KERNEL);
++ if (!madc)
++ return -ENOMEM;
++
++ if (!pdata) {
++ dev_dbg(&pdev->dev, "platform_data not available\n");
++ ret = -EINVAL;
++ goto err_pdata;
++ }
++
++ madc->dev = &pdev->dev;
++ madc->imr = (pdata->irq_line == 1) ? TWL4030_MADC_IMR1 : TWL4030_MADC_IMR2;
++ madc->isr = (pdata->irq_line == 1) ? TWL4030_MADC_ISR1 : TWL4030_MADC_ISR2;
++
++ ret = misc_register(&twl4030_madc_device);
++ if (ret) {
++ dev_dbg(&pdev->dev, "could not register misc_device\n");
++ goto err_misc;
++ }
++
++ ret = request_irq(platform_get_irq(pdev, 0), twl4030_madc_irq_handler,
++ 0, "twl4030_madc", madc);
++ if (ret) {
++ dev_dbg(&pdev->dev, "could not request irq\n");
++ goto err_irq;
++ }
++
++ platform_set_drvdata(pdev, madc);
++ mutex_init(&madc->lock);
++ INIT_WORK(&madc->ws, twl4030_madc_work);
++
++ the_madc = madc;
++
++ return 0;
++
++err_irq:
++ misc_deregister(&twl4030_madc_device);
++
++err_misc:
++err_pdata:
++ kfree(madc);
++
++ return ret;
++}
++
++static int __exit twl4030_madc_remove(struct platform_device *pdev)
++{
++ struct twl4030_madc_data *madc = platform_get_drvdata(pdev);
++
++ free_irq(platform_get_irq(pdev, 0), madc);
++ cancel_work_sync(&madc->ws);
++ misc_deregister(&twl4030_madc_device);
++
++ return 0;
++}
++
++static struct platform_driver twl4030_madc_driver = {
++ .probe = twl4030_madc_probe,
++ .remove = __exit_p(twl4030_madc_remove),
++ .driver = {
++ .name = "twl4030_madc",
++ .owner = THIS_MODULE,
++ },
++};
++
++static int __init twl4030_madc_init(void)
++{
++ return platform_driver_register(&twl4030_madc_driver);
++}
++module_init(twl4030_madc_init);
++
++static void __exit twl4030_madc_exit(void)
++{
++ platform_driver_unregister(&twl4030_madc_driver);
++}
++module_exit(twl4030_madc_exit);
++
++MODULE_ALIAS("platform:twl4030-madc");
++MODULE_AUTHOR("Nokia Corporation");
++MODULE_DESCRIPTION("twl4030 ADC driver");
++MODULE_LICENSE("GPL");
++
+diff --git a/include/linux/i2c/twl4030-madc.h b/include/linux/i2c/twl4030-madc.h
+new file mode 100644
+index 0000000..24523b5
+--- /dev/null
++++ b/include/linux/i2c/twl4030-madc.h
+@@ -0,0 +1,126 @@
++/*
++ * include/linux/i2c/twl4030-madc.h
++ *
++ * TWL4030 MADC module driver header
++ *
++ * Copyright (C) 2008 Nokia Corporation
++ * Mikko Ylinen <mikko.k.ylinen@nokia.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ *
++ */
++
++#ifndef _TWL4030_MADC_H
++#define _TWL4030_MADC_H
++
++struct twl4030_madc_conversion_method {
++ u8 sel;
++ u8 avg;
++ u8 rbase;
++ u8 ctrl;
++};
++
++#define TWL4030_MADC_MAX_CHANNELS 16
++
++struct twl4030_madc_request {
++ u16 channels;
++ u16 do_avg;
++ u16 method;
++ u16 type;
++ int active;
++ int result_pending;
++ int rbuf[TWL4030_MADC_MAX_CHANNELS];
++ void (*func_cb)(int len, int channels, int *buf);
++};
++
++enum conversion_methods {
++ TWL4030_MADC_RT,
++ TWL4030_MADC_SW1,
++ TWL4030_MADC_SW2,
++ TWL4030_MADC_NUM_METHODS
++};
++
++enum sample_type {
++ TWL4030_MADC_WAIT,
++ TWL4030_MADC_IRQ_ONESHOT,
++ TWL4030_MADC_IRQ_REARM
++};
++
++#define TWL4030_MADC_CTRL1 0x00
++#define TWL4030_MADC_CTRL2 0x01
++
++#define TWL4030_MADC_RTSELECT_LSB 0x02
++#define TWL4030_MADC_SW1SELECT_LSB 0x06
++#define TWL4030_MADC_SW2SELECT_LSB 0x0A
++
++#define TWL4030_MADC_RTAVERAGE_LSB 0x04
++#define TWL4030_MADC_SW1AVERAGE_LSB 0x08
++#define TWL4030_MADC_SW2AVERAGE_LSB 0x0C
++
++#define TWL4030_MADC_CTRL_SW1 0x12
++#define TWL4030_MADC_CTRL_SW2 0x13
++
++#define TWL4030_MADC_RTCH0_LSB 0x17
++#define TWL4030_MADC_GPCH0_LSB 0x37
++
++#define TWL4030_MADC_MADCON (1<<0) /* MADC power on */
++#define TWL4030_MADC_BUSY (1<<0) /* MADC busy */
++#define TWL4030_MADC_EOC_SW (1<<1) /* MADC conversion completion */
++#define TWL4030_MADC_SW_START (1<<5) /* MADC SWx start conversion */
++
++#define TWL4030_MADC_ADCIN0 (1<<0)
++#define TWL4030_MADC_ADCIN1 (1<<1)
++#define TWL4030_MADC_ADCIN2 (1<<2)
++#define TWL4030_MADC_ADCIN3 (1<<3)
++#define TWL4030_MADC_ADCIN4 (1<<4)
++#define TWL4030_MADC_ADCIN5 (1<<5)
++#define TWL4030_MADC_ADCIN6 (1<<6)
++#define TWL4030_MADC_ADCIN7 (1<<7)
++#define TWL4030_MADC_ADCIN8 (1<<8)
++#define TWL4030_MADC_ADCIN9 (1<<9)
++#define TWL4030_MADC_ADCIN10 (1<<10)
++#define TWL4030_MADC_ADCIN11 (1<<11)
++#define TWL4030_MADC_ADCIN12 (1<<12)
++#define TWL4030_MADC_ADCIN13 (1<<13)
++#define TWL4030_MADC_ADCIN14 (1<<14)
++#define TWL4030_MADC_ADCIN15 (1<<15)
++
++/* Fixed channels */
++#define TWL4030_MADC_BTEMP TWL4030_MADC_ADCIN1
++#define TWL4030_MADC_VBUS TWL4030_MADC_ADCIN8
++#define TWL4030_MADC_VBKB TWL4030_MADC_ADCIN9
++#define TWL4030_MADC_ICHG TWL4030_MADC_ADCIN10
++#define TWL4030_MADC_VCHG TWL4030_MADC_ADCIN11
++#define TWL4030_MADC_VBAT TWL4030_MADC_ADCIN12
++
++/* BCI related - XXX To be moved elsewhere */
++#define TWL4030_BCI_BCICTL1 0x23
++#define TWL4030_BCI_MESBAT (1<<1)
++#define TWL4030_BCI_TYPEN (1<<4)
++#define TWL4030_BCI_ITHEN (1<<3)
++
++#define TWL4030_MADC_IOC_MAGIC '`'
++#define TWL4030_MADC_IOCX_ADC_RAW_READ _IO(TWL4030_MADC_IOC_MAGIC, 0)
++
++struct twl4030_madc_user_parms {
++ int channel;
++ int average;
++ int status;
++ u16 result;
++};
++
++int twl4030_madc_conversion(struct twl4030_madc_request *conv);
++
++#endif
+--
+1.7.0.4
+
diff --git a/recipes/linux/linux-2.6.35/nokia900/linux-2.6-mfd-twl4030-Driver-for-twl4030-madc-module.patch~HEAD b/recipes/linux/linux-2.6.35/nokia900/linux-2.6-mfd-twl4030-Driver-for-twl4030-madc-module.patch~HEAD
new file mode 100644
index 0000000000..de9e842729
--- /dev/null
+++ b/recipes/linux/linux-2.6.35/nokia900/linux-2.6-mfd-twl4030-Driver-for-twl4030-madc-module.patch~HEAD
@@ -0,0 +1,759 @@
+From d8e8a44e5c44c88f9b0b8b542f86c9cce1e8dccc Mon Sep 17 00:00:00 2001
+From: Mikko Ylinen <mikko.k.ylinen@nokia.com>
+Date: Fri, 9 Apr 2010 12:20:52 +0300
+Subject: [PATCH 05/11] mfd: twl4030: Driver for twl4030 madc module
+
+This ADC allows monitoring of analog signals such as battery levels,
+temperatures, etc.
+
+Several people have contributed to this driver on the linux-omap list.
+
+Signed-off-by: Amit Kucheria <amit.kucheria@verdurent.com>
+Signed-off-by: Tuukka Tikkanen <tuukka.tikkanen@nokia.com>
+Signed-off-by: Ameya Palande <ameya.palande@nokia.com>
+---
+ drivers/mfd/Kconfig | 21 ++
+ drivers/mfd/Makefile | 3 +-
+ drivers/mfd/twl4030-madc.c | 549 ++++++++++++++++++++++++++++++++++++++
+ include/linux/i2c/twl4030-madc.h | 126 +++++++++
+ 4 files changed, 698 insertions(+), 1 deletions(-)
+ create mode 100644 drivers/mfd/twl4030-madc.c
+ create mode 100644 include/linux/i2c/twl4030-madc.h
+
+diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
+index 9da0e50..10ca037 100644
+--- a/drivers/mfd/Kconfig
++++ b/drivers/mfd/Kconfig
+@@ -188,6 +188,27 @@ config MFD_TC35892
+ additional drivers must be enabled in order to use the
+ functionality of the device.
+
++config TWL4030_MADC
++ tristate "TWL4030 MADC Driver"
++ depends on TWL4030_CORE
++ help
++ The TWL4030 Monitoring ADC driver enables the host
++ processor to monitor analog signals using analog-to-digital
++ conversions on the input source. TWL4030 MADC provides the
++ following features:
++ - Single 10-bit ADC with successive approximation register (SAR) conversion;
++ - Analog multiplexer for 16 inputs;
++ - Seven (of the 16) inputs are freely available;
++ - Battery voltage monitoring;
++ - Concurrent conversion request management;
++ - Interrupt signal to Primary Interrupt Handler;
++ - Averaging feature;
++ - Selective enable/disable of the averaging feature.
++
++ Say 'y' here to statically link this module into the kernel or 'm'
++ to build it as a dinamically loadable module. The module will be
++ called twl4030-madc.ko
++
+ config MFD_TMIO
+ bool
+ default n
+diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
+index fb503e7..570592f 100644
+--- a/drivers/mfd/Makefile
++++ b/drivers/mfd/Makefile
+@@ -34,8 +34,9 @@ obj-$(CONFIG_TPS6507X) += tps6507x.o
+ obj-$(CONFIG_MENELAUS) += menelaus.o
+
+ obj-$(CONFIG_TWL4030_CORE) += twl-core.o twl4030-irq.o twl6030-irq.o
+-obj-$(CONFIG_TWL4030_POWER) += twl4030-power.o
++obj-$(CONFIG_TWL4030_POWER) += twl4030-power.o
+ obj-$(CONFIG_TWL4030_CODEC) += twl4030-codec.o
++obj-$(CONFIG_TWL4030_MADC) += twl4030-madc.o
+
+ obj-$(CONFIG_MFD_MC13783) += mc13783-core.o
+
+diff --git a/drivers/mfd/twl4030-madc.c b/drivers/mfd/twl4030-madc.c
+new file mode 100644
+index 0000000..657ce87
+--- /dev/null
++++ b/drivers/mfd/twl4030-madc.c
+@@ -0,0 +1,549 @@
++/*
++ * TWL4030 MADC module driver
++ *
++ * Copyright (C) 2008 Nokia Corporation
++ * Mikko Ylinen <mikko.k.ylinen@nokia.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ *
++ */
++
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/kernel.h>
++#include <linux/types.h>
++#include <linux/module.h>
++#include <linux/delay.h>
++#include <linux/fs.h>
++#include <linux/platform_device.h>
++#include <linux/miscdevice.h>
++#include <linux/i2c/twl.h>
++#include <linux/i2c/twl4030-madc.h>
++#include <linux/uaccess.h>
++#include <linux/slab.h>
++
++#define TWL4030_MADC_PFX "twl4030-madc: "
++
++struct twl4030_madc_data {
++ struct device *dev;
++ struct mutex lock;
++ struct work_struct ws;
++ struct twl4030_madc_request requests[TWL4030_MADC_NUM_METHODS];
++ int imr;
++ int isr;
++};
++
++static struct twl4030_madc_data *the_madc;
++static int twl4030_madc_set_current_generator(struct twl4030_madc_data *madc,
++ int chan, int on);
++
++static
++const struct twl4030_madc_conversion_method twl4030_conversion_methods[] = {
++ [TWL4030_MADC_RT] = {
++ .sel = TWL4030_MADC_RTSELECT_LSB,
++ .avg = TWL4030_MADC_RTAVERAGE_LSB,
++ .rbase = TWL4030_MADC_RTCH0_LSB,
++ },
++ [TWL4030_MADC_SW1] = {
++ .sel = TWL4030_MADC_SW1SELECT_LSB,
++ .avg = TWL4030_MADC_SW1AVERAGE_LSB,
++ .rbase = TWL4030_MADC_GPCH0_LSB,
++ .ctrl = TWL4030_MADC_CTRL_SW1,
++ },
++ [TWL4030_MADC_SW2] = {
++ .sel = TWL4030_MADC_SW2SELECT_LSB,
++ .avg = TWL4030_MADC_SW2AVERAGE_LSB,
++ .rbase = TWL4030_MADC_GPCH0_LSB,
++ .ctrl = TWL4030_MADC_CTRL_SW2,
++ },
++};
++
++static int twl4030_madc_read(struct twl4030_madc_data *madc, u8 reg)
++{
++ int ret;
++ u8 val;
++
++ ret = twl_i2c_read_u8(TWL4030_MODULE_MADC, &val, reg);
++ if (ret) {
++ dev_dbg(madc->dev, "unable to read register 0x%X\n", reg);
++ return ret;
++ }
++
++ return val;
++}
++
++static void twl4030_madc_write(struct twl4030_madc_data *madc, u8 reg, u8 val)
++{
++ int ret;
++
++ ret = twl_i2c_write_u8(TWL4030_MODULE_MADC, val, reg);
++ if (ret)
++ dev_err(madc->dev, "unable to write register 0x%X\n", reg);
++}
++
++static int twl4030_madc_channel_raw_read(struct twl4030_madc_data *madc, u8 reg)
++{
++ u8 msb, lsb;
++
++ /* For each ADC channel, we have MSB and LSB register pair. MSB address
++ * is always LSB address+1. reg parameter is the addr of LSB register */
++ msb = twl4030_madc_read(madc, reg + 1);
++ lsb = twl4030_madc_read(madc, reg);
++
++ return (int)(((msb << 8) | lsb) >> 6);
++}
++
++static int twl4030_madc_read_channels(struct twl4030_madc_data *madc,
++ u8 reg_base, u16 channels, int *buf)
++{
++ int count = 0;
++ u8 reg, i;
++
++ if (unlikely(!buf))
++ return 0;
++
++ for (i = 0; i < TWL4030_MADC_MAX_CHANNELS; i++) {
++ if (channels & (1<<i)) {
++ reg = reg_base + 2*i;
++ buf[i] = twl4030_madc_channel_raw_read(madc, reg);
++ count++;
++ }
++ }
++ return count;
++}
++
++static void twl4030_madc_enable_irq(struct twl4030_madc_data *madc, int id)
++{
++ u8 val;
++
++ val = twl4030_madc_read(madc, madc->imr);
++ val &= ~(1 << id);
++ twl4030_madc_write(madc, madc->imr, val);
++}
++
++static void twl4030_madc_disable_irq(struct twl4030_madc_data *madc, int id)
++{
++ u8 val;
++
++ val = twl4030_madc_read(madc, madc->imr);
++ val |= (1 << id);
++ twl4030_madc_write(madc, madc->imr, val);
++}
++
++static irqreturn_t twl4030_madc_irq_handler(int irq, void *_madc)
++{
++ struct twl4030_madc_data *madc = _madc;
++ u8 isr_val, imr_val;
++ int i;
++
++#ifdef CONFIG_LOCKDEP
++ /* WORKAROUND for lockdep forcing IRQF_DISABLED on us, which
++ * we don't want and can't tolerate. Although it might be
++ * friendlier not to borrow this thread context...
++ */
++ local_irq_enable();
++#endif
++
++ /* Use COR to ack interrupts since we have no shared IRQs in ISRx */
++ isr_val = twl4030_madc_read(madc, madc->isr);
++ imr_val = twl4030_madc_read(madc, madc->imr);
++
++ isr_val &= ~imr_val;
++
++ for (i = 0; i < TWL4030_MADC_NUM_METHODS; i++) {
++
++ if (!(isr_val & (1<<i)))
++ continue;
++
++ twl4030_madc_disable_irq(madc, i);
++ madc->requests[i].result_pending = 1;
++ }
++
++ schedule_work(&madc->ws);
++
++ return IRQ_HANDLED;
++}
++
++static void twl4030_madc_work(struct work_struct *ws)
++{
++ const struct twl4030_madc_conversion_method *method;
++ struct twl4030_madc_data *madc;
++ struct twl4030_madc_request *r;
++ int len, i;
++
++ madc = container_of(ws, struct twl4030_madc_data, ws);
++ mutex_lock(&madc->lock);
++
++ for (i = 0; i < TWL4030_MADC_NUM_METHODS; i++) {
++
++ r = &madc->requests[i];
++
++ /* No pending results for this method, move to next one */
++ if (!r->result_pending)
++ continue;
++
++ method = &twl4030_conversion_methods[r->method];
++
++ /* Read results */
++ len = twl4030_madc_read_channels(madc, method->rbase,
++ r->channels, r->rbuf);
++
++ /* Return results to caller */
++ if (r->func_cb != NULL) {
++ r->func_cb(len, r->channels, r->rbuf);
++ r->func_cb = NULL;
++ }
++
++ /* Free request */
++ r->result_pending = 0;
++ r->active = 0;
++ }
++
++ mutex_unlock(&madc->lock);
++}
++
++static int twl4030_madc_set_irq(struct twl4030_madc_data *madc,
++ struct twl4030_madc_request *req)
++{
++ struct twl4030_madc_request *p;
++
++ p = &madc->requests[req->method];
++
++ memcpy(p, req, sizeof *req);
++
++ twl4030_madc_enable_irq(madc, req->method);
++
++ return 0;
++}
++
++static inline void twl4030_madc_start_conversion(struct twl4030_madc_data *madc,
++ int conv_method)
++{
++ const struct twl4030_madc_conversion_method *method;
++
++ method = &twl4030_conversion_methods[conv_method];
++
++ switch (conv_method) {
++ case TWL4030_MADC_SW1:
++ case TWL4030_MADC_SW2:
++ twl4030_madc_write(madc, method->ctrl, TWL4030_MADC_SW_START);
++ break;
++ case TWL4030_MADC_RT:
++ default:
++ break;
++ }
++}
++
++static int twl4030_madc_wait_conversion_ready(
++ struct twl4030_madc_data *madc,
++ unsigned int timeout_ms, u8 status_reg)
++{
++ unsigned long timeout;
++
++ timeout = jiffies + msecs_to_jiffies(timeout_ms);
++ do {
++ int reg;
++
++ reg = twl4030_madc_read(madc, status_reg);
++ if (unlikely(reg < 0))
++ return reg;
++ if (!(reg & TWL4030_MADC_BUSY) && (reg & TWL4030_MADC_EOC_SW))
++ return 0;
++ } while (!time_after(jiffies, timeout));
++
++ return -EAGAIN;
++}
++
++static int twl4030_madc_set_power(struct twl4030_madc_data *madc, int on);
++int twl4030_madc_conversion(struct twl4030_madc_request *req)
++{
++ const struct twl4030_madc_conversion_method *method;
++ u8 ch_msb, ch_lsb;
++ int ret;
++
++ if (unlikely(!req))
++ return -EINVAL;
++
++ mutex_lock(&the_madc->lock);
++
++ twl4030_madc_set_power(the_madc, 1);
++
++ /* Do we have a conversion request ongoing */
++ if (the_madc->requests[req->method].active) {
++ ret = -EBUSY;
++ goto out;
++ }
++
++ ch_msb = (req->channels >> 8) & 0xff;
++ ch_lsb = req->channels & 0xff;
++
++ method = &twl4030_conversion_methods[req->method];
++
++ /* Select channels to be converted */
++ twl4030_madc_write(the_madc, method->sel + 1, ch_msb);
++ twl4030_madc_write(the_madc, method->sel, ch_lsb);
++
++ /* Select averaging for all channels if do_avg is set */
++ if (req->do_avg) {
++ twl4030_madc_write(the_madc, method->avg + 1, ch_msb);
++ twl4030_madc_write(the_madc, method->avg, ch_lsb);
++ }
++
++ if ((req->type == TWL4030_MADC_IRQ_ONESHOT) && (req->func_cb != NULL)) {
++ twl4030_madc_set_irq(the_madc, req);
++ twl4030_madc_start_conversion(the_madc, req->method);
++ the_madc->requests[req->method].active = 1;
++ ret = 0;
++ goto out;
++ }
++
++ /* With RT method we should not be here anymore */
++ if (req->method == TWL4030_MADC_RT) {
++ ret = -EINVAL;
++ goto out;
++ }
++
++ twl4030_madc_start_conversion(the_madc, req->method);
++ the_madc->requests[req->method].active = 1;
++
++ /* Wait until conversion is ready (ctrl register returns EOC) */
++ ret = twl4030_madc_wait_conversion_ready(the_madc, 5, method->ctrl);
++ if (ret) {
++ dev_dbg(the_madc->dev, "conversion timeout!\n");
++ the_madc->requests[req->method].active = 0;
++ goto out;
++ }
++
++ ret = twl4030_madc_read_channels(the_madc, method->rbase, req->channels,
++ req->rbuf);
++
++ the_madc->requests[req->method].active = 0;
++
++ twl4030_madc_set_power(the_madc, 0);
++
++out:
++ mutex_unlock(&the_madc->lock);
++
++ return ret;
++}
++EXPORT_SYMBOL(twl4030_madc_conversion);
++
++static int twl4030_madc_set_current_generator(struct twl4030_madc_data *madc,
++ int chan, int on)
++{
++ int ret;
++ u8 regval;
++
++ /* Current generator is only available for ADCIN0 and ADCIN1. NB:
++ * ADCIN1 current generator only works when AC or VBUS is present */
++ if (chan > 1)
++ return EINVAL;
++
++ ret = twl_i2c_read_u8(TWL4030_MODULE_MAIN_CHARGE,
++ &regval, TWL4030_BCI_BCICTL1);
++ if (ret) {
++ dev_dbg(madc->dev, "unable to read register 0x%X\n",
++ TWL4030_BCI_BCICTL1);
++ return ret;
++ }
++
++ if (on) {
++ regval |= (chan) ? TWL4030_BCI_ITHEN : TWL4030_BCI_TYPEN;
++ regval |= TWL4030_BCI_MESBAT;
++ } else {
++ regval &= (chan) ? ~TWL4030_BCI_ITHEN : ~TWL4030_BCI_TYPEN;
++ regval &= ~TWL4030_BCI_MESBAT;
++ }
++
++ ret = twl_i2c_write_u8(TWL4030_MODULE_MAIN_CHARGE,
++ regval, TWL4030_BCI_BCICTL1);
++ if (ret) {
++ dev_dbg(madc->dev, "unable to write register 0x%X\n",
++ TWL4030_BCI_BCICTL1);
++ }
++ return ret;
++}
++
++static int twl4030_madc_set_power(struct twl4030_madc_data *madc, int on)
++{
++ int ret = 0;
++ u8 regval;
++
++ if (on) {
++ regval = twl4030_madc_read(madc, TWL4030_MADC_CTRL1);
++ regval |= TWL4030_MADC_MADCON;
++ twl4030_madc_write(madc, TWL4030_MADC_CTRL1, regval);
++
++ ret |= twl4030_madc_set_current_generator(madc, 0, 1);
++
++ } else {
++ ret |= twl4030_madc_set_current_generator(madc, 0, 0);
++
++ regval = twl4030_madc_read(madc, TWL4030_MADC_CTRL1);
++ regval &= ~TWL4030_MADC_MADCON;
++ twl4030_madc_write(madc, TWL4030_MADC_CTRL1, regval);
++ }
++ return ret;
++}
++
++static long twl4030_madc_ioctl(struct file *filp, unsigned int cmd,
++ unsigned long arg)
++{
++ struct twl4030_madc_user_parms par;
++ int val, ret;
++
++ ret = copy_from_user(&par, (void __user *) arg, sizeof(par));
++ if (ret) {
++ dev_dbg(the_madc->dev, "copy_from_user: %d\n", ret);
++ return -EACCES;
++ }
++
++ switch (cmd) {
++ case TWL4030_MADC_IOCX_ADC_RAW_READ: {
++ struct twl4030_madc_request req;
++ if (par.channel >= TWL4030_MADC_MAX_CHANNELS)
++ return -EINVAL;
++
++ req.channels = (1 << par.channel);
++ req.do_avg = par.average;
++ req.method = TWL4030_MADC_SW1;
++ req.func_cb = NULL;
++ req.type = TWL4030_MADC_WAIT;
++
++ val = twl4030_madc_conversion(&req);
++ if (likely(val > 0)) {
++ par.status = 0;
++ par.result = (u16)req.rbuf[par.channel];
++ } else if (val == 0) {
++ par.status = -ENODATA;
++ } else {
++ par.status = val;
++ }
++ break;
++ }
++ default:
++ return -EINVAL;
++ }
++
++ ret = copy_to_user((void __user *) arg, &par, sizeof(par));
++ if (ret) {
++ dev_dbg(the_madc->dev, "copy_to_user: %d\n", ret);
++ return -EACCES;
++ }
++
++ return 0;
++}
++
++static const struct file_operations twl4030_madc_fileops = {
++ .owner = THIS_MODULE,
++ .unlocked_ioctl = twl4030_madc_ioctl
++};
++
++static struct miscdevice twl4030_madc_device = {
++ .minor = MISC_DYNAMIC_MINOR,
++ .name = "twl4030-adc",
++ .fops = &twl4030_madc_fileops
++};
++
++static int __init twl4030_madc_probe(struct platform_device *pdev)
++{
++ struct twl4030_madc_data *madc;
++ struct twl4030_madc_platform_data *pdata = pdev->dev.platform_data;
++
++ int ret;
++ u8 regval;
++
++ madc = kzalloc(sizeof *madc, GFP_KERNEL);
++ if (!madc)
++ return -ENOMEM;
++
++ if (!pdata) {
++ dev_dbg(&pdev->dev, "platform_data not available\n");
++ ret = -EINVAL;
++ goto err_pdata;
++ }
++
++ madc->dev = &pdev->dev;
++ madc->imr = (pdata->irq_line == 1) ? TWL4030_MADC_IMR1 : TWL4030_MADC_IMR2;
++ madc->isr = (pdata->irq_line == 1) ? TWL4030_MADC_ISR1 : TWL4030_MADC_ISR2;
++
++ ret = misc_register(&twl4030_madc_device);
++ if (ret) {
++ dev_dbg(&pdev->dev, "could not register misc_device\n");
++ goto err_misc;
++ }
++
++ ret = request_irq(platform_get_irq(pdev, 0), twl4030_madc_irq_handler,
++ 0, "twl4030_madc", madc);
++ if (ret) {
++ dev_dbg(&pdev->dev, "could not request irq\n");
++ goto err_irq;
++ }
++
++ platform_set_drvdata(pdev, madc);
++ mutex_init(&madc->lock);
++ INIT_WORK(&madc->ws, twl4030_madc_work);
++
++ the_madc = madc;
++
++ return 0;
++
++err_irq:
++ misc_deregister(&twl4030_madc_device);
++
++err_misc:
++err_pdata:
++ kfree(madc);
++
++ return ret;
++}
++
++static int __exit twl4030_madc_remove(struct platform_device *pdev)
++{
++ struct twl4030_madc_data *madc = platform_get_drvdata(pdev);
++
++ free_irq(platform_get_irq(pdev, 0), madc);
++ cancel_work_sync(&madc->ws);
++ misc_deregister(&twl4030_madc_device);
++
++ return 0;
++}
++
++static struct platform_driver twl4030_madc_driver = {
++ .probe = twl4030_madc_probe,
++ .remove = __exit_p(twl4030_madc_remove),
++ .driver = {
++ .name = "twl4030_madc",
++ .owner = THIS_MODULE,
++ },
++};
++
++static int __init twl4030_madc_init(void)
++{
++ return platform_driver_register(&twl4030_madc_driver);
++}
++module_init(twl4030_madc_init);
++
++static void __exit twl4030_madc_exit(void)
++{
++ platform_driver_unregister(&twl4030_madc_driver);
++}
++module_exit(twl4030_madc_exit);
++
++MODULE_ALIAS("platform:twl4030-madc");
++MODULE_AUTHOR("Nokia Corporation");
++MODULE_DESCRIPTION("twl4030 ADC driver");
++MODULE_LICENSE("GPL");
++
+diff --git a/include/linux/i2c/twl4030-madc.h b/include/linux/i2c/twl4030-madc.h
+new file mode 100644
+index 0000000..24523b5
+--- /dev/null
++++ b/include/linux/i2c/twl4030-madc.h
+@@ -0,0 +1,126 @@
++/*
++ * include/linux/i2c/twl4030-madc.h
++ *
++ * TWL4030 MADC module driver header
++ *
++ * Copyright (C) 2008 Nokia Corporation
++ * Mikko Ylinen <mikko.k.ylinen@nokia.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ *
++ */
++
++#ifndef _TWL4030_MADC_H
++#define _TWL4030_MADC_H
++
++struct twl4030_madc_conversion_method {
++ u8 sel;
++ u8 avg;
++ u8 rbase;
++ u8 ctrl;
++};
++
++#define TWL4030_MADC_MAX_CHANNELS 16
++
++struct twl4030_madc_request {
++ u16 channels;
++ u16 do_avg;
++ u16 method;
++ u16 type;
++ int active;
++ int result_pending;
++ int rbuf[TWL4030_MADC_MAX_CHANNELS];
++ void (*func_cb)(int len, int channels, int *buf);
++};
++
++enum conversion_methods {
++ TWL4030_MADC_RT,
++ TWL4030_MADC_SW1,
++ TWL4030_MADC_SW2,
++ TWL4030_MADC_NUM_METHODS
++};
++
++enum sample_type {
++ TWL4030_MADC_WAIT,
++ TWL4030_MADC_IRQ_ONESHOT,
++ TWL4030_MADC_IRQ_REARM
++};
++
++#define TWL4030_MADC_CTRL1 0x00
++#define TWL4030_MADC_CTRL2 0x01
++
++#define TWL4030_MADC_RTSELECT_LSB 0x02
++#define TWL4030_MADC_SW1SELECT_LSB 0x06
++#define TWL4030_MADC_SW2SELECT_LSB 0x0A
++
++#define TWL4030_MADC_RTAVERAGE_LSB 0x04
++#define TWL4030_MADC_SW1AVERAGE_LSB 0x08
++#define TWL4030_MADC_SW2AVERAGE_LSB 0x0C
++
++#define TWL4030_MADC_CTRL_SW1 0x12
++#define TWL4030_MADC_CTRL_SW2 0x13
++
++#define TWL4030_MADC_RTCH0_LSB 0x17
++#define TWL4030_MADC_GPCH0_LSB 0x37
++
++#define TWL4030_MADC_MADCON (1<<0) /* MADC power on */
++#define TWL4030_MADC_BUSY (1<<0) /* MADC busy */
++#define TWL4030_MADC_EOC_SW (1<<1) /* MADC conversion completion */
++#define TWL4030_MADC_SW_START (1<<5) /* MADC SWx start conversion */
++
++#define TWL4030_MADC_ADCIN0 (1<<0)
++#define TWL4030_MADC_ADCIN1 (1<<1)
++#define TWL4030_MADC_ADCIN2 (1<<2)
++#define TWL4030_MADC_ADCIN3 (1<<3)
++#define TWL4030_MADC_ADCIN4 (1<<4)
++#define TWL4030_MADC_ADCIN5 (1<<5)
++#define TWL4030_MADC_ADCIN6 (1<<6)
++#define TWL4030_MADC_ADCIN7 (1<<7)
++#define TWL4030_MADC_ADCIN8 (1<<8)
++#define TWL4030_MADC_ADCIN9 (1<<9)
++#define TWL4030_MADC_ADCIN10 (1<<10)
++#define TWL4030_MADC_ADCIN11 (1<<11)
++#define TWL4030_MADC_ADCIN12 (1<<12)
++#define TWL4030_MADC_ADCIN13 (1<<13)
++#define TWL4030_MADC_ADCIN14 (1<<14)
++#define TWL4030_MADC_ADCIN15 (1<<15)
++
++/* Fixed channels */
++#define TWL4030_MADC_BTEMP TWL4030_MADC_ADCIN1
++#define TWL4030_MADC_VBUS TWL4030_MADC_ADCIN8
++#define TWL4030_MADC_VBKB TWL4030_MADC_ADCIN9
++#define TWL4030_MADC_ICHG TWL4030_MADC_ADCIN10
++#define TWL4030_MADC_VCHG TWL4030_MADC_ADCIN11
++#define TWL4030_MADC_VBAT TWL4030_MADC_ADCIN12
++
++/* BCI related - XXX To be moved elsewhere */
++#define TWL4030_BCI_BCICTL1 0x23
++#define TWL4030_BCI_MESBAT (1<<1)
++#define TWL4030_BCI_TYPEN (1<<4)
++#define TWL4030_BCI_ITHEN (1<<3)
++
++#define TWL4030_MADC_IOC_MAGIC '`'
++#define TWL4030_MADC_IOCX_ADC_RAW_READ _IO(TWL4030_MADC_IOC_MAGIC, 0)
++
++struct twl4030_madc_user_parms {
++ int channel;
++ int average;
++ int status;
++ u16 result;
++};
++
++int twl4030_madc_conversion(struct twl4030_madc_request *conv);
++
++#endif
+--
+1.7.0.4
+
diff --git a/recipes/linux/linux-2.6.35/nokia900/linux-2.6-mfd-twl4030-Driver-for-twl4030-madc-module.patch~HEAD_0 b/recipes/linux/linux-2.6.35/nokia900/linux-2.6-mfd-twl4030-Driver-for-twl4030-madc-module.patch~HEAD_0
new file mode 100644
index 0000000000..de9e842729
--- /dev/null
+++ b/recipes/linux/linux-2.6.35/nokia900/linux-2.6-mfd-twl4030-Driver-for-twl4030-madc-module.patch~HEAD_0
@@ -0,0 +1,759 @@
+From d8e8a44e5c44c88f9b0b8b542f86c9cce1e8dccc Mon Sep 17 00:00:00 2001
+From: Mikko Ylinen <mikko.k.ylinen@nokia.com>
+Date: Fri, 9 Apr 2010 12:20:52 +0300
+Subject: [PATCH 05/11] mfd: twl4030: Driver for twl4030 madc module
+
+This ADC allows monitoring of analog signals such as battery levels,
+temperatures, etc.
+
+Several people have contributed to this driver on the linux-omap list.
+
+Signed-off-by: Amit Kucheria <amit.kucheria@verdurent.com>
+Signed-off-by: Tuukka Tikkanen <tuukka.tikkanen@nokia.com>
+Signed-off-by: Ameya Palande <ameya.palande@nokia.com>
+---
+ drivers/mfd/Kconfig | 21 ++
+ drivers/mfd/Makefile | 3 +-
+ drivers/mfd/twl4030-madc.c | 549 ++++++++++++++++++++++++++++++++++++++
+ include/linux/i2c/twl4030-madc.h | 126 +++++++++
+ 4 files changed, 698 insertions(+), 1 deletions(-)
+ create mode 100644 drivers/mfd/twl4030-madc.c
+ create mode 100644 include/linux/i2c/twl4030-madc.h
+
+diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
+index 9da0e50..10ca037 100644
+--- a/drivers/mfd/Kconfig
++++ b/drivers/mfd/Kconfig
+@@ -188,6 +188,27 @@ config MFD_TC35892
+ additional drivers must be enabled in order to use the
+ functionality of the device.
+
++config TWL4030_MADC
++ tristate "TWL4030 MADC Driver"
++ depends on TWL4030_CORE
++ help
++ The TWL4030 Monitoring ADC driver enables the host
++ processor to monitor analog signals using analog-to-digital
++ conversions on the input source. TWL4030 MADC provides the
++ following features:
++ - Single 10-bit ADC with successive approximation register (SAR) conversion;
++ - Analog multiplexer for 16 inputs;
++ - Seven (of the 16) inputs are freely available;
++ - Battery voltage monitoring;
++ - Concurrent conversion request management;
++ - Interrupt signal to Primary Interrupt Handler;
++ - Averaging feature;
++ - Selective enable/disable of the averaging feature.
++
++ Say 'y' here to statically link this module into the kernel or 'm'
++ to build it as a dinamically loadable module. The module will be
++ called twl4030-madc.ko
++
+ config MFD_TMIO
+ bool
+ default n
+diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
+index fb503e7..570592f 100644
+--- a/drivers/mfd/Makefile
++++ b/drivers/mfd/Makefile
+@@ -34,8 +34,9 @@ obj-$(CONFIG_TPS6507X) += tps6507x.o
+ obj-$(CONFIG_MENELAUS) += menelaus.o
+
+ obj-$(CONFIG_TWL4030_CORE) += twl-core.o twl4030-irq.o twl6030-irq.o
+-obj-$(CONFIG_TWL4030_POWER) += twl4030-power.o
++obj-$(CONFIG_TWL4030_POWER) += twl4030-power.o
+ obj-$(CONFIG_TWL4030_CODEC) += twl4030-codec.o
++obj-$(CONFIG_TWL4030_MADC) += twl4030-madc.o
+
+ obj-$(CONFIG_MFD_MC13783) += mc13783-core.o
+
+diff --git a/drivers/mfd/twl4030-madc.c b/drivers/mfd/twl4030-madc.c
+new file mode 100644
+index 0000000..657ce87
+--- /dev/null
++++ b/drivers/mfd/twl4030-madc.c
+@@ -0,0 +1,549 @@
++/*
++ * TWL4030 MADC module driver
++ *
++ * Copyright (C) 2008 Nokia Corporation
++ * Mikko Ylinen <mikko.k.ylinen@nokia.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ *
++ */
++
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/kernel.h>
++#include <linux/types.h>
++#include <linux/module.h>
++#include <linux/delay.h>
++#include <linux/fs.h>
++#include <linux/platform_device.h>
++#include <linux/miscdevice.h>
++#include <linux/i2c/twl.h>
++#include <linux/i2c/twl4030-madc.h>
++#include <linux/uaccess.h>
++#include <linux/slab.h>
++
++#define TWL4030_MADC_PFX "twl4030-madc: "
++
++struct twl4030_madc_data {
++ struct device *dev;
++ struct mutex lock;
++ struct work_struct ws;
++ struct twl4030_madc_request requests[TWL4030_MADC_NUM_METHODS];
++ int imr;
++ int isr;
++};
++
++static struct twl4030_madc_data *the_madc;
++static int twl4030_madc_set_current_generator(struct twl4030_madc_data *madc,
++ int chan, int on);
++
++static
++const struct twl4030_madc_conversion_method twl4030_conversion_methods[] = {
++ [TWL4030_MADC_RT] = {
++ .sel = TWL4030_MADC_RTSELECT_LSB,
++ .avg = TWL4030_MADC_RTAVERAGE_LSB,
++ .rbase = TWL4030_MADC_RTCH0_LSB,
++ },
++ [TWL4030_MADC_SW1] = {
++ .sel = TWL4030_MADC_SW1SELECT_LSB,
++ .avg = TWL4030_MADC_SW1AVERAGE_LSB,
++ .rbase = TWL4030_MADC_GPCH0_LSB,
++ .ctrl = TWL4030_MADC_CTRL_SW1,
++ },
++ [TWL4030_MADC_SW2] = {
++ .sel = TWL4030_MADC_SW2SELECT_LSB,
++ .avg = TWL4030_MADC_SW2AVERAGE_LSB,
++ .rbase = TWL4030_MADC_GPCH0_LSB,
++ .ctrl = TWL4030_MADC_CTRL_SW2,
++ },
++};
++
++static int twl4030_madc_read(struct twl4030_madc_data *madc, u8 reg)
++{
++ int ret;
++ u8 val;
++
++ ret = twl_i2c_read_u8(TWL4030_MODULE_MADC, &val, reg);
++ if (ret) {
++ dev_dbg(madc->dev, "unable to read register 0x%X\n", reg);
++ return ret;
++ }
++
++ return val;
++}
++
++static void twl4030_madc_write(struct twl4030_madc_data *madc, u8 reg, u8 val)
++{
++ int ret;
++
++ ret = twl_i2c_write_u8(TWL4030_MODULE_MADC, val, reg);
++ if (ret)
++ dev_err(madc->dev, "unable to write register 0x%X\n", reg);
++}
++
++static int twl4030_madc_channel_raw_read(struct twl4030_madc_data *madc, u8 reg)
++{
++ u8 msb, lsb;
++
++ /* For each ADC channel, we have MSB and LSB register pair. MSB address
++ * is always LSB address+1. reg parameter is the addr of LSB register */
++ msb = twl4030_madc_read(madc, reg + 1);
++ lsb = twl4030_madc_read(madc, reg);
++
++ return (int)(((msb << 8) | lsb) >> 6);
++}
++
++static int twl4030_madc_read_channels(struct twl4030_madc_data *madc,
++ u8 reg_base, u16 channels, int *buf)
++{
++ int count = 0;
++ u8 reg, i;
++
++ if (unlikely(!buf))
++ return 0;
++
++ for (i = 0; i < TWL4030_MADC_MAX_CHANNELS; i++) {
++ if (channels & (1<<i)) {
++ reg = reg_base + 2*i;
++ buf[i] = twl4030_madc_channel_raw_read(madc, reg);
++ count++;
++ }
++ }
++ return count;
++}
++
++static void twl4030_madc_enable_irq(struct twl4030_madc_data *madc, int id)
++{
++ u8 val;
++
++ val = twl4030_madc_read(madc, madc->imr);
++ val &= ~(1 << id);
++ twl4030_madc_write(madc, madc->imr, val);
++}
++
++static void twl4030_madc_disable_irq(struct twl4030_madc_data *madc, int id)
++{
++ u8 val;
++
++ val = twl4030_madc_read(madc, madc->imr);
++ val |= (1 << id);
++ twl4030_madc_write(madc, madc->imr, val);
++}
++
++static irqreturn_t twl4030_madc_irq_handler(int irq, void *_madc)
++{
++ struct twl4030_madc_data *madc = _madc;
++ u8 isr_val, imr_val;
++ int i;
++
++#ifdef CONFIG_LOCKDEP
++ /* WORKAROUND for lockdep forcing IRQF_DISABLED on us, which
++ * we don't want and can't tolerate. Although it might be
++ * friendlier not to borrow this thread context...
++ */
++ local_irq_enable();
++#endif
++
++ /* Use COR to ack interrupts since we have no shared IRQs in ISRx */
++ isr_val = twl4030_madc_read(madc, madc->isr);
++ imr_val = twl4030_madc_read(madc, madc->imr);
++
++ isr_val &= ~imr_val;
++
++ for (i = 0; i < TWL4030_MADC_NUM_METHODS; i++) {
++
++ if (!(isr_val & (1<<i)))
++ continue;
++
++ twl4030_madc_disable_irq(madc, i);
++ madc->requests[i].result_pending = 1;
++ }
++
++ schedule_work(&madc->ws);
++
++ return IRQ_HANDLED;
++}
++
++static void twl4030_madc_work(struct work_struct *ws)
++{
++ const struct twl4030_madc_conversion_method *method;
++ struct twl4030_madc_data *madc;
++ struct twl4030_madc_request *r;
++ int len, i;
++
++ madc = container_of(ws, struct twl4030_madc_data, ws);
++ mutex_lock(&madc->lock);
++
++ for (i = 0; i < TWL4030_MADC_NUM_METHODS; i++) {
++
++ r = &madc->requests[i];
++
++ /* No pending results for this method, move to next one */
++ if (!r->result_pending)
++ continue;
++
++ method = &twl4030_conversion_methods[r->method];
++
++ /* Read results */
++ len = twl4030_madc_read_channels(madc, method->rbase,
++ r->channels, r->rbuf);
++
++ /* Return results to caller */
++ if (r->func_cb != NULL) {
++ r->func_cb(len, r->channels, r->rbuf);
++ r->func_cb = NULL;
++ }
++
++ /* Free request */
++ r->result_pending = 0;
++ r->active = 0;
++ }
++
++ mutex_unlock(&madc->lock);
++}
++
++static int twl4030_madc_set_irq(struct twl4030_madc_data *madc,
++ struct twl4030_madc_request *req)
++{
++ struct twl4030_madc_request *p;
++
++ p = &madc->requests[req->method];
++
++ memcpy(p, req, sizeof *req);
++
++ twl4030_madc_enable_irq(madc, req->method);
++
++ return 0;
++}
++
++static inline void twl4030_madc_start_conversion(struct twl4030_madc_data *madc,
++ int conv_method)
++{
++ const struct twl4030_madc_conversion_method *method;
++
++ method = &twl4030_conversion_methods[conv_method];
++
++ switch (conv_method) {
++ case TWL4030_MADC_SW1:
++ case TWL4030_MADC_SW2:
++ twl4030_madc_write(madc, method->ctrl, TWL4030_MADC_SW_START);
++ break;
++ case TWL4030_MADC_RT:
++ default:
++ break;
++ }
++}
++
++static int twl4030_madc_wait_conversion_ready(
++ struct twl4030_madc_data *madc,
++ unsigned int timeout_ms, u8 status_reg)
++{
++ unsigned long timeout;
++
++ timeout = jiffies + msecs_to_jiffies(timeout_ms);
++ do {
++ int reg;
++
++ reg = twl4030_madc_read(madc, status_reg);
++ if (unlikely(reg < 0))
++ return reg;
++ if (!(reg & TWL4030_MADC_BUSY) && (reg & TWL4030_MADC_EOC_SW))
++ return 0;
++ } while (!time_after(jiffies, timeout));
++
++ return -EAGAIN;
++}
++
++static int twl4030_madc_set_power(struct twl4030_madc_data *madc, int on);
++int twl4030_madc_conversion(struct twl4030_madc_request *req)
++{
++ const struct twl4030_madc_conversion_method *method;
++ u8 ch_msb, ch_lsb;
++ int ret;
++
++ if (unlikely(!req))
++ return -EINVAL;
++
++ mutex_lock(&the_madc->lock);
++
++ twl4030_madc_set_power(the_madc, 1);
++
++ /* Do we have a conversion request ongoing */
++ if (the_madc->requests[req->method].active) {
++ ret = -EBUSY;
++ goto out;
++ }
++
++ ch_msb = (req->channels >> 8) & 0xff;
++ ch_lsb = req->channels & 0xff;
++
++ method = &twl4030_conversion_methods[req->method];
++
++ /* Select channels to be converted */
++ twl4030_madc_write(the_madc, method->sel + 1, ch_msb);
++ twl4030_madc_write(the_madc, method->sel, ch_lsb);
++
++ /* Select averaging for all channels if do_avg is set */
++ if (req->do_avg) {
++ twl4030_madc_write(the_madc, method->avg + 1, ch_msb);
++ twl4030_madc_write(the_madc, method->avg, ch_lsb);
++ }
++
++ if ((req->type == TWL4030_MADC_IRQ_ONESHOT) && (req->func_cb != NULL)) {
++ twl4030_madc_set_irq(the_madc, req);
++ twl4030_madc_start_conversion(the_madc, req->method);
++ the_madc->requests[req->method].active = 1;
++ ret = 0;
++ goto out;
++ }
++
++ /* With RT method we should not be here anymore */
++ if (req->method == TWL4030_MADC_RT) {
++ ret = -EINVAL;
++ goto out;
++ }
++
++ twl4030_madc_start_conversion(the_madc, req->method);
++ the_madc->requests[req->method].active = 1;
++
++ /* Wait until conversion is ready (ctrl register returns EOC) */
++ ret = twl4030_madc_wait_conversion_ready(the_madc, 5, method->ctrl);
++ if (ret) {
++ dev_dbg(the_madc->dev, "conversion timeout!\n");
++ the_madc->requests[req->method].active = 0;
++ goto out;
++ }
++
++ ret = twl4030_madc_read_channels(the_madc, method->rbase, req->channels,
++ req->rbuf);
++
++ the_madc->requests[req->method].active = 0;
++
++ twl4030_madc_set_power(the_madc, 0);
++
++out:
++ mutex_unlock(&the_madc->lock);
++
++ return ret;
++}
++EXPORT_SYMBOL(twl4030_madc_conversion);
++
++static int twl4030_madc_set_current_generator(struct twl4030_madc_data *madc,
++ int chan, int on)
++{
++ int ret;
++ u8 regval;
++
++ /* Current generator is only available for ADCIN0 and ADCIN1. NB:
++ * ADCIN1 current generator only works when AC or VBUS is present */
++ if (chan > 1)
++ return EINVAL;
++
++ ret = twl_i2c_read_u8(TWL4030_MODULE_MAIN_CHARGE,
++ &regval, TWL4030_BCI_BCICTL1);
++ if (ret) {
++ dev_dbg(madc->dev, "unable to read register 0x%X\n",
++ TWL4030_BCI_BCICTL1);
++ return ret;
++ }
++
++ if (on) {
++ regval |= (chan) ? TWL4030_BCI_ITHEN : TWL4030_BCI_TYPEN;
++ regval |= TWL4030_BCI_MESBAT;
++ } else {
++ regval &= (chan) ? ~TWL4030_BCI_ITHEN : ~TWL4030_BCI_TYPEN;
++ regval &= ~TWL4030_BCI_MESBAT;
++ }
++
++ ret = twl_i2c_write_u8(TWL4030_MODULE_MAIN_CHARGE,
++ regval, TWL4030_BCI_BCICTL1);
++ if (ret) {
++ dev_dbg(madc->dev, "unable to write register 0x%X\n",
++ TWL4030_BCI_BCICTL1);
++ }
++ return ret;
++}
++
++static int twl4030_madc_set_power(struct twl4030_madc_data *madc, int on)
++{
++ int ret = 0;
++ u8 regval;
++
++ if (on) {
++ regval = twl4030_madc_read(madc, TWL4030_MADC_CTRL1);
++ regval |= TWL4030_MADC_MADCON;
++ twl4030_madc_write(madc, TWL4030_MADC_CTRL1, regval);
++
++ ret |= twl4030_madc_set_current_generator(madc, 0, 1);
++
++ } else {
++ ret |= twl4030_madc_set_current_generator(madc, 0, 0);
++
++ regval = twl4030_madc_read(madc, TWL4030_MADC_CTRL1);
++ regval &= ~TWL4030_MADC_MADCON;
++ twl4030_madc_write(madc, TWL4030_MADC_CTRL1, regval);
++ }
++ return ret;
++}
++
++static long twl4030_madc_ioctl(struct file *filp, unsigned int cmd,
++ unsigned long arg)
++{
++ struct twl4030_madc_user_parms par;
++ int val, ret;
++
++ ret = copy_from_user(&par, (void __user *) arg, sizeof(par));
++ if (ret) {
++ dev_dbg(the_madc->dev, "copy_from_user: %d\n", ret);
++ return -EACCES;
++ }
++
++ switch (cmd) {
++ case TWL4030_MADC_IOCX_ADC_RAW_READ: {
++ struct twl4030_madc_request req;
++ if (par.channel >= TWL4030_MADC_MAX_CHANNELS)
++ return -EINVAL;
++
++ req.channels = (1 << par.channel);
++ req.do_avg = par.average;
++ req.method = TWL4030_MADC_SW1;
++ req.func_cb = NULL;
++ req.type = TWL4030_MADC_WAIT;
++
++ val = twl4030_madc_conversion(&req);
++ if (likely(val > 0)) {
++ par.status = 0;
++ par.result = (u16)req.rbuf[par.channel];
++ } else if (val == 0) {
++ par.status = -ENODATA;
++ } else {
++ par.status = val;
++ }
++ break;
++ }
++ default:
++ return -EINVAL;
++ }
++
++ ret = copy_to_user((void __user *) arg, &par, sizeof(par));
++ if (ret) {
++ dev_dbg(the_madc->dev, "copy_to_user: %d\n", ret);
++ return -EACCES;
++ }
++
++ return 0;
++}
++
++static const struct file_operations twl4030_madc_fileops = {
++ .owner = THIS_MODULE,
++ .unlocked_ioctl = twl4030_madc_ioctl
++};
++
++static struct miscdevice twl4030_madc_device = {
++ .minor = MISC_DYNAMIC_MINOR,
++ .name = "twl4030-adc",
++ .fops = &twl4030_madc_fileops
++};
++
++static int __init twl4030_madc_probe(struct platform_device *pdev)
++{
++ struct twl4030_madc_data *madc;
++ struct twl4030_madc_platform_data *pdata = pdev->dev.platform_data;
++
++ int ret;
++ u8 regval;
++
++ madc = kzalloc(sizeof *madc, GFP_KERNEL);
++ if (!madc)
++ return -ENOMEM;
++
++ if (!pdata) {
++ dev_dbg(&pdev->dev, "platform_data not available\n");
++ ret = -EINVAL;
++ goto err_pdata;
++ }
++
++ madc->dev = &pdev->dev;
++ madc->imr = (pdata->irq_line == 1) ? TWL4030_MADC_IMR1 : TWL4030_MADC_IMR2;
++ madc->isr = (pdata->irq_line == 1) ? TWL4030_MADC_ISR1 : TWL4030_MADC_ISR2;
++
++ ret = misc_register(&twl4030_madc_device);
++ if (ret) {
++ dev_dbg(&pdev->dev, "could not register misc_device\n");
++ goto err_misc;
++ }
++
++ ret = request_irq(platform_get_irq(pdev, 0), twl4030_madc_irq_handler,
++ 0, "twl4030_madc", madc);
++ if (ret) {
++ dev_dbg(&pdev->dev, "could not request irq\n");
++ goto err_irq;
++ }
++
++ platform_set_drvdata(pdev, madc);
++ mutex_init(&madc->lock);
++ INIT_WORK(&madc->ws, twl4030_madc_work);
++
++ the_madc = madc;
++
++ return 0;
++
++err_irq:
++ misc_deregister(&twl4030_madc_device);
++
++err_misc:
++err_pdata:
++ kfree(madc);
++
++ return ret;
++}
++
++static int __exit twl4030_madc_remove(struct platform_device *pdev)
++{
++ struct twl4030_madc_data *madc = platform_get_drvdata(pdev);
++
++ free_irq(platform_get_irq(pdev, 0), madc);
++ cancel_work_sync(&madc->ws);
++ misc_deregister(&twl4030_madc_device);
++
++ return 0;
++}
++
++static struct platform_driver twl4030_madc_driver = {
++ .probe = twl4030_madc_probe,
++ .remove = __exit_p(twl4030_madc_remove),
++ .driver = {
++ .name = "twl4030_madc",
++ .owner = THIS_MODULE,
++ },
++};
++
++static int __init twl4030_madc_init(void)
++{
++ return platform_driver_register(&twl4030_madc_driver);
++}
++module_init(twl4030_madc_init);
++
++static void __exit twl4030_madc_exit(void)
++{
++ platform_driver_unregister(&twl4030_madc_driver);
++}
++module_exit(twl4030_madc_exit);
++
++MODULE_ALIAS("platform:twl4030-madc");
++MODULE_AUTHOR("Nokia Corporation");
++MODULE_DESCRIPTION("twl4030 ADC driver");
++MODULE_LICENSE("GPL");
++
+diff --git a/include/linux/i2c/twl4030-madc.h b/include/linux/i2c/twl4030-madc.h
+new file mode 100644
+index 0000000..24523b5
+--- /dev/null
++++ b/include/linux/i2c/twl4030-madc.h
+@@ -0,0 +1,126 @@
++/*
++ * include/linux/i2c/twl4030-madc.h
++ *
++ * TWL4030 MADC module driver header
++ *
++ * Copyright (C) 2008 Nokia Corporation
++ * Mikko Ylinen <mikko.k.ylinen@nokia.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ *
++ */
++
++#ifndef _TWL4030_MADC_H
++#define _TWL4030_MADC_H
++
++struct twl4030_madc_conversion_method {
++ u8 sel;
++ u8 avg;
++ u8 rbase;
++ u8 ctrl;
++};
++
++#define TWL4030_MADC_MAX_CHANNELS 16
++
++struct twl4030_madc_request {
++ u16 channels;
++ u16 do_avg;
++ u16 method;
++ u16 type;
++ int active;
++ int result_pending;
++ int rbuf[TWL4030_MADC_MAX_CHANNELS];
++ void (*func_cb)(int len, int channels, int *buf);
++};
++
++enum conversion_methods {
++ TWL4030_MADC_RT,
++ TWL4030_MADC_SW1,
++ TWL4030_MADC_SW2,
++ TWL4030_MADC_NUM_METHODS
++};
++
++enum sample_type {
++ TWL4030_MADC_WAIT,
++ TWL4030_MADC_IRQ_ONESHOT,
++ TWL4030_MADC_IRQ_REARM
++};
++
++#define TWL4030_MADC_CTRL1 0x00
++#define TWL4030_MADC_CTRL2 0x01
++
++#define TWL4030_MADC_RTSELECT_LSB 0x02
++#define TWL4030_MADC_SW1SELECT_LSB 0x06
++#define TWL4030_MADC_SW2SELECT_LSB 0x0A
++
++#define TWL4030_MADC_RTAVERAGE_LSB 0x04
++#define TWL4030_MADC_SW1AVERAGE_LSB 0x08
++#define TWL4030_MADC_SW2AVERAGE_LSB 0x0C
++
++#define TWL4030_MADC_CTRL_SW1 0x12
++#define TWL4030_MADC_CTRL_SW2 0x13
++
++#define TWL4030_MADC_RTCH0_LSB 0x17
++#define TWL4030_MADC_GPCH0_LSB 0x37
++
++#define TWL4030_MADC_MADCON (1<<0) /* MADC power on */
++#define TWL4030_MADC_BUSY (1<<0) /* MADC busy */
++#define TWL4030_MADC_EOC_SW (1<<1) /* MADC conversion completion */
++#define TWL4030_MADC_SW_START (1<<5) /* MADC SWx start conversion */
++
++#define TWL4030_MADC_ADCIN0 (1<<0)
++#define TWL4030_MADC_ADCIN1 (1<<1)
++#define TWL4030_MADC_ADCIN2 (1<<2)
++#define TWL4030_MADC_ADCIN3 (1<<3)
++#define TWL4030_MADC_ADCIN4 (1<<4)
++#define TWL4030_MADC_ADCIN5 (1<<5)
++#define TWL4030_MADC_ADCIN6 (1<<6)
++#define TWL4030_MADC_ADCIN7 (1<<7)
++#define TWL4030_MADC_ADCIN8 (1<<8)
++#define TWL4030_MADC_ADCIN9 (1<<9)
++#define TWL4030_MADC_ADCIN10 (1<<10)
++#define TWL4030_MADC_ADCIN11 (1<<11)
++#define TWL4030_MADC_ADCIN12 (1<<12)
++#define TWL4030_MADC_ADCIN13 (1<<13)
++#define TWL4030_MADC_ADCIN14 (1<<14)
++#define TWL4030_MADC_ADCIN15 (1<<15)
++
++/* Fixed channels */
++#define TWL4030_MADC_BTEMP TWL4030_MADC_ADCIN1
++#define TWL4030_MADC_VBUS TWL4030_MADC_ADCIN8
++#define TWL4030_MADC_VBKB TWL4030_MADC_ADCIN9
++#define TWL4030_MADC_ICHG TWL4030_MADC_ADCIN10
++#define TWL4030_MADC_VCHG TWL4030_MADC_ADCIN11
++#define TWL4030_MADC_VBAT TWL4030_MADC_ADCIN12
++
++/* BCI related - XXX To be moved elsewhere */
++#define TWL4030_BCI_BCICTL1 0x23
++#define TWL4030_BCI_MESBAT (1<<1)
++#define TWL4030_BCI_TYPEN (1<<4)
++#define TWL4030_BCI_ITHEN (1<<3)
++
++#define TWL4030_MADC_IOC_MAGIC '`'
++#define TWL4030_MADC_IOCX_ADC_RAW_READ _IO(TWL4030_MADC_IOC_MAGIC, 0)
++
++struct twl4030_madc_user_parms {
++ int channel;
++ int average;
++ int status;
++ u16 result;
++};
++
++int twl4030_madc_conversion(struct twl4030_madc_request *conv);
++
++#endif
+--
+1.7.0.4
+
diff --git a/recipes/linux/linux-2.6.35/nokia900/linux-2.6-n900-modem-support.patch b/recipes/linux/linux-2.6.35/nokia900/linux-2.6-n900-modem-support.patch
new file mode 100644
index 0000000000..9fb4181fca
--- /dev/null
+++ b/recipes/linux/linux-2.6.35/nokia900/linux-2.6-n900-modem-support.patch
@@ -0,0 +1,7773 @@
+From b1735bfe4a0a2d37132066866735af01a3ed0000 Mon Sep 17 00:00:00 2001
+From: Kai Vehmanen <kai.vehmanen@nokia.com>
+Date: Thu, 10 Jun 2010 14:13:16 +0300
+Subject: [PATCH 1/2] Cellular modem support for N900
+
+This patch is combination of following patches:
+
+1. OMAP2/3 PRCM: export cm_read_mod_reg and cm_write_mod_reg
+
+Export functions needed by the OMAP HSI driver.
+
+Partially reverts 42d75e7df324268c99153d9cc7d48e780b54d059.
+
+Signed-off-by: Kai Vehmanen <kai.vehmanen@nokia.com>
+
+2. HSI: Introducing HSI framework
+
+Adds HSI framework in to the linux kernel.
+
+High Speed Synchronous Serial Interface (HSI) is a
+serial interface mainly used for connecting application
+engines (APE) with cellular modem engines (CMT) in cellular
+handsets.
+
+HSI provides multiplexing for up to 16 logical channels,
+low-latency and full duplex communication.
+
+Signed-off-by: Carlos Chinea <carlos.chinea@nokia.com>
+Signed-off-by: Kai Vehmanen <kai.vehmanen@nokia.com>
+
+3. HSI: omap_ssi: Introducing OMAP SSI driver
+
+Introduces the OMAP SSI driver in the kernel.
+
+The Synchronous Serial Interface (SSI) is a legacy version
+of HSI. As in the case of HSI, it is mainly used to connect
+Application engines (APE) with cellular modem engines (CMT)
+in cellular handsets.
+
+It provides a multichannel, full-duplex, multi-core communication
+with no reference clock. The OMAP SSI block is capable of reaching
+speeds of 110 Mbit/s.
+
+Signed-off-by: Carlos Chinea <carlos.chinea@nokia.com>
+Signed-off-by: Kai Vehmanen <kai.vehmanen@nokia.com>
+
+4. HSI: omap_ssi: Add OMAP SSI to the kernel configuration
+
+Add OMAP SSI device and driver to the kernel configuration
+
+Signed-off-by: Carlos Chinea <carlos.chinea@nokia.com>
+Signed-off-by: Kai Vehmanen <kai.vehmanen@nokia.com>
+
+5. HSI: hsi_char: Add HSI char device driver
+
+Add HSI char device driver to the kernel.
+
+Signed-off-by: Andras Domokos <andras.domokos@nokia.com>
+Signed-off-by: Carlos Chinea <carlos.chinea@nokia.com>
+Signed-off-by: Kai Vehmanen <kai.vehmanen@nokia.com>
+
+6. HSI: hsi_char: Add HSI char device kernel configuration
+
+Add HSI character device kernel configuration
+
+Signed-off-by: Andras Domokos <andras.domokos@nokia.com>
+Signed-off-by: Carlos Chinea <carlos.chinea@nokia.com>
+Signed-off-by: Kai Vehmanen <kai.vehmanen@nokia.com>
+
+7. HSI: Add HSI API documentation
+
+Add an entry for HSI in the device-drivers section of
+the kernel documentation.
+
+Signed-off-by: Carlos Chinea <carlos.chinea@nokia.com>
+Signed-off-by: Kai Vehmanen <kai.vehmanen@nokia.com>
+
+8. HSI: ssip: Add SSI protocol to the kernel configuration
+
+Add the SSI protocol aka McSAAB to the kernel configuration.
+
+Signed-off-by: Carlos Chinea <carlos.chinea@nokia.com>
+Signed-off-by: Kai Vehmanen <kai.vehmanen@nokia.com>
+
+9. HSI: cmt_speech: Add CMT_SPEECH driver
+
+Introduces the cmt_speech driver. This driver implements
+a character device interface for transferring speech data
+frames over HSI/SSI.
+
+Signed-off-by: Kai Vehmanen <kai.vehmanen@nokia.com>
+Signed-off-by: Carlos Chinea <carlos.chinea@nokia.com>
+
+10. HSI: cmt_speech: Add CMT SPEECH to the configuration
+
+Add CMT SPEECH to the kernel configuration.
+
+Signed-off-by: Kai Vehmanen <kai.vehmanen@nokia.com>
+Signed-off-by: Carlos Chinea <carlos.chinea@nokia.com>
+
+11. CMT: Introduces the Nokia CMT driver.
+
+Add the Nokia CMT driver. The CMT driver takes care of the
+APE-CMT hardware outside the SSI/HSI communication.
+
+The initial implementation introduces a CMT reset notification
+interface for protocols drivers like cmt_speech and ssi_protocol.
+
+Signed-off-by: Carlos Chinea <carlos.chinea@nokia.com>
+Signed-off-by: Kai Vehmanen <kai.vehmanen@nokia.com>
+
+12. CMT: Add Nokia CMT driver to the configuration
+
+Add the Nokia CMT driver to the kernel configuration.
+
+Signed-off-by: Carlos Chinea <carlos.chinea@nokia.com>
+Signed-off-by: Kai Vehmanen <kai.vehmanen@nokia.com>
+
+13. rx51: Add SSI related support to board
+
+Add OMAP SSI configuration for the board.
+Add CMT device to the board.
+Add HSI/SSI device clients to the board.
+
+Signed-off-by: Carlos Chinea <carlos.chinea@nokia.com>
+Signed-off-by: Kai Vehmanen <kai.vehmanen@nokia.com>
+
+14. HSI: ssip: Introducing SSI protocol (aka McSAAB)
+
+Introduces the SSI protocol aka McSAAB.
+
+The SSI protocol is a link layer protocol for SSI, which
+is used to transport Phonet messages between APE and CMT.
+
+Signed-off-by: Carlos Chinea <carlos.chinea@nokia.com>
+Signed-off-by: Kai Vehmanen <kai.vehmanen@nokia.com>
+
+15. HSI: cmt_speech: Adapt to kernel API changes in 2.6.35
+
+Use the updated pm_qos_params.h interface.
+
+Signed-off-by: Kai Vehmanen <kai.vehmanen@nokia.com>
+---
+ Documentation/DocBook/device-drivers.tmpl | 17
+ arch/arm/mach-omap2/Makefile | 3
+ arch/arm/mach-omap2/board-rx51-peripherals.c | 65
+ arch/arm/mach-omap2/prcm.c | 2
+ arch/arm/mach-omap2/ssi.c | 139 ++
+ arch/arm/plat-omap/include/plat/ssi.h | 204 +++
+ drivers/Kconfig | 2
+ drivers/Makefile | 1
+ drivers/hsi/Kconfig | 16
+ drivers/hsi/Makefile | 5
+ drivers/hsi/clients/Kconfig | 32
+ drivers/hsi/clients/Makefile | 7
+ drivers/hsi/clients/cmt_speech.c | 1415 +++++++++++++++++++++
+ drivers/hsi/clients/hsi_char.c | 1053 +++++++++++++++
+ drivers/hsi/clients/ssi_protocol.c | 1153 +++++++++++++++++
+ drivers/hsi/controllers/Kconfig | 21
+ drivers/hsi/controllers/Makefile | 5
+ drivers/hsi/controllers/omap_ssi.c | 1819 +++++++++++++++++++++++++++
+ drivers/hsi/hsi.c | 515 +++++++
+ drivers/misc/Kconfig | 1
+ drivers/misc/Makefile | 1
+ drivers/misc/cmt/Kconfig | 9
+ drivers/misc/cmt/Makefile | 5
+ drivers/misc/cmt/cmt.c | 223 +++
+ include/linux/Kbuild | 1
+ include/linux/cmt.h | 53
+ include/linux/cs-protocol.h | 116 +
+ include/linux/hsi/Kbuild | 1
+ include/linux/hsi/hsi.h | 376 +++++
+ include/linux/hsi/hsi_char.h | 66
+ include/linux/hsi/omap_ssi_hack.h | 37
+ include/linux/hsi/ssip_slave.h | 38
+ 32 files changed, 7401 insertions(+)
+ create mode 100644 arch/arm/mach-omap2/ssi.c
+ create mode 100644 arch/arm/plat-omap/include/plat/ssi.h
+ create mode 100644 drivers/hsi/Kconfig
+ create mode 100644 drivers/hsi/Makefile
+ create mode 100644 drivers/hsi/clients/Kconfig
+ create mode 100644 drivers/hsi/clients/Makefile
+ create mode 100644 drivers/hsi/clients/cmt_speech.c
+ create mode 100644 drivers/hsi/clients/hsi_char.c
+ create mode 100644 drivers/hsi/clients/ssi_protocol.c
+ create mode 100644 drivers/hsi/controllers/Kconfig
+ create mode 100644 drivers/hsi/controllers/Makefile
+ create mode 100644 drivers/hsi/controllers/omap_ssi.c
+ create mode 100644 drivers/hsi/hsi.c
+ create mode 100644 drivers/misc/cmt/Kconfig
+ create mode 100644 drivers/misc/cmt/Makefile
+ create mode 100644 drivers/misc/cmt/cmt.c
+ create mode 100644 include/linux/cmt.h
+ create mode 100644 include/linux/cs-protocol.h
+ create mode 100644 include/linux/hsi/Kbuild
+ create mode 100644 include/linux/hsi/hsi.h
+ create mode 100644 include/linux/hsi/hsi_char.h
+ create mode 100644 include/linux/hsi/omap_ssi_hack.h
+ create mode 100644 include/linux/hsi/ssip_slave.h
+
+--- a/Documentation/DocBook/device-drivers.tmpl
++++ b/Documentation/DocBook/device-drivers.tmpl
+@@ -428,4 +428,21 @@
+ !Edrivers/i2c/i2c-core.c
+ </chapter>
+
++ <chapter id="hsi">
++ <title>High Speed Synchronous Serial Interface (HSI)</title>
++
++ <para>
++ High Speed Synchronous Serial Interface (HSI) is a
++ serial interface mainly used for connecting application
++ engines (APE) with cellular modem engines (CMT) in cellular
++ handsets.
++
++ HSI provides multiplexing for up to 16 logical channels,
++ low-latency and full duplex communication.
++ </para>
++
++!Iinclude/linux/hsi/hsi.h
++!Edrivers/hsi/hsi.c
++ </chapter>
++
+ </book>
+--- a/arch/arm/mach-omap2/Makefile
++++ b/arch/arm/mach-omap2/Makefile
+@@ -94,6 +94,9 @@
+ i2c-omap-$(CONFIG_I2C_OMAP) := i2c.o
+ obj-y += $(i2c-omap-m) $(i2c-omap-y)
+
++omap-ssi-$(CONFIG_OMAP_SSI) := ssi.o
++obj-y += $(omap-ssi-m) $(omap-ssi-y)
++
+ # Specific board support
+ obj-$(CONFIG_MACH_OMAP_GENERIC) += board-generic.o
+ obj-$(CONFIG_MACH_OMAP_H4) += board-h4.o
+--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
++++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
+@@ -27,6 +27,8 @@
+ #include <linux/bluetooth/hci_h4p.h>
+ #include <media/radio-si4713.h>
+ #include <media/si4713.h>
++#include <linux/hsi/hsi.h>
++#include <linux/cmt.h>
+
+ #include <plat/mcspi.h>
+ #include <plat/mux.h>
+@@ -37,6 +39,7 @@
+ #include <plat/onenand.h>
+ #include <plat/gpmc-smc91x.h>
+ #include <plat/serial.h>
++#include <plat/ssi.h>
+
+ #include <../drivers/staging/iio/light/tsl2563.h>
+ #include <linux/lis3lv02d.h>
+@@ -1135,6 +1138,66 @@
+ printk(KERN_ERR "Bluetooth device registration failed\n");
+ }
+
++static struct cmt_platform_data rx51_cmt_pdata = {
++ .cmt_rst_ind_gpio = 72,
++};
++
++static struct platform_device rx51_cmt_device = {
++ .name = "cmt",
++ .id = -1,
++ .dev = {
++ .platform_data = &rx51_cmt_pdata,
++ },
++};
++
++static void __init rx51_cmt_init(void)
++{
++ int err;
++
++ err = platform_device_register(&rx51_cmt_device);
++ if (err < 0)
++ pr_err("Could not register CMT device\n");
++}
++
++static struct omap_ssi_board_config __initdata rx51_ssi_config = {
++ .num_ports = 1,
++ .cawake_gpio = { 151 },
++};
++
++static struct hsi_board_info __initdata rx51_ssi_cl[] = {
++ [0] = {
++ .name = "hsi_char",
++ .hsi_id = 0,
++ .port = 0,
++ },
++ [1] = {
++ .name = "ssi_protocol",
++ .hsi_id = 0,
++ .port = 0,
++ .tx_cfg = {
++ .mode = HSI_MODE_FRAME,
++ .channels = 4,
++ .speed = 55000,
++ .arb_mode = HSI_ARB_RR,
++ },
++ .rx_cfg = {
++ .mode = HSI_MODE_FRAME,
++ .channels = 4,
++ },
++ },
++ [2] = {
++ .name = "cmt_speech",
++ .hsi_id = 0,
++ .port = 0,
++ },
++};
++
++static void __init rx51_ssi_init(void)
++{
++ omap_ssi_config(&rx51_ssi_config);
++ hsi_register_board_info(rx51_ssi_cl, ARRAY_SIZE(rx51_ssi_cl));
++}
++
+ void __init rx51_peripherals_init(void)
+ {
+ rx51_i2c_init();
+@@ -1145,6 +1208,8 @@
+ rx51_init_tsc2005();
+ rx51_bt_init();
+ rx51_init_si4713();
++ rx51_cmt_init();
++ rx51_ssi_init();
+ spi_register_board_info(rx51_peripherals_spi_board_info,
+ ARRAY_SIZE(rx51_peripherals_spi_board_info));
+ omap2_hsmmc_init(mmc);
+--- a/arch/arm/mach-omap2/prcm.c
++++ b/arch/arm/mach-omap2/prcm.c
+@@ -220,12 +220,14 @@
+ {
+ return __omap_prcm_read(cm_base, module, idx);
+ }
++EXPORT_SYMBOL(cm_read_mod_reg);
+
+ /* Write into a register in a CM module */
+ void cm_write_mod_reg(u32 val, s16 module, u16 idx)
+ {
+ __omap_prcm_write(val, cm_base, module, idx);
+ }
++EXPORT_SYMBOL(cm_write_mod_reg);
+
+ /* Read-modify-write a register in a CM module. Caller must lock */
+ u32 cm_rmw_mod_reg_bits(u32 mask, u32 bits, s16 module, s16 idx)
+--- /dev/null
++++ b/arch/arm/mach-omap2/ssi.c
+@@ -0,0 +1,139 @@
++/*
++ * linux/arch/arm/mach-omap2/ssi.c
++ *
++ * Copyright (C) 2010 Nokia Corporation. All rights reserved.
++ *
++ * Contact: Carlos Chinea <carlos.chinea@nokia.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ */
++
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/err.h>
++#include <linux/gpio.h>
++#include <linux/platform_device.h>
++#include <plat/omap-pm.h>
++#include <plat/ssi.h>
++
++static struct omap_ssi_platform_data ssi_pdata = {
++ .num_ports = SSI_NUM_PORTS,
++ .get_dev_context_loss_count = omap_pm_get_dev_context_loss_count,
++};
++
++static struct resource ssi_resources[] = {
++ /* SSI controller */
++ [0] = {
++ .start = 0x48058000,
++ .end = 0x48058fff,
++ .name = "omap_ssi_sys",
++ .flags = IORESOURCE_MEM,
++ },
++ /* GDD */
++ [1] = {
++ .start = 0x48059000,
++ .end = 0x48059fff,
++ .name = "omap_ssi_gdd",
++ .flags = IORESOURCE_MEM,
++ },
++ [2] = {
++ .start = 71,
++ .end = 71,
++ .name = "ssi_gdd",
++ .flags = IORESOURCE_IRQ,
++ },
++ /* SSI port 1 */
++ [3] = {
++ .start = 0x4805a000,
++ .end = 0x4805a7ff,
++ .name = "omap_ssi_sst1",
++ .flags = IORESOURCE_MEM,
++ },
++ [4] = {
++ .start = 0x4805a800,
++ .end = 0x4805afff,
++ .name = "omap_ssi_ssr1",
++ .flags = IORESOURCE_MEM,
++ },
++ [5] = {
++ .start = 67,
++ .end = 67,
++ .name = "ssi_p1_mpu_irq0",
++ .flags = IORESOURCE_IRQ,
++ },
++ [6] = {
++ .start = 69,
++ .end = 69,
++ .name = "ssi_p1_mpu_irq1",
++ .flags = IORESOURCE_IRQ,
++ },
++ [7] = {
++ .start = 0,
++ .end = 0,
++ .name = "ssi_p1_cawake",
++ .flags = IORESOURCE_IRQ | IORESOURCE_UNSET,
++ },
++};
++
++static void ssi_pdev_release(struct device *dev)
++{
++}
++
++static struct platform_device ssi_pdev = {
++ .name = "omap_ssi",
++ .id = 0,
++ .num_resources = ARRAY_SIZE(ssi_resources),
++ .resource = ssi_resources,
++ .dev = {
++ .release = ssi_pdev_release,
++ .platform_data = &ssi_pdata,
++ },
++};
++
++int __init omap_ssi_config(struct omap_ssi_board_config *ssi_config)
++{
++ unsigned int port, offset, cawake_gpio;
++ int err;
++
++ ssi_pdata.num_ports = ssi_config->num_ports;
++ for (port = 0, offset = 7; port < ssi_config->num_ports;
++ port++, offset += 5) {
++ cawake_gpio = ssi_config->cawake_gpio[port];
++ if (!cawake_gpio)
++ continue; /* Nothing to do */
++ err = gpio_request(cawake_gpio, "cawake");
++ if (err < 0)
++ goto rback;
++ gpio_direction_input(cawake_gpio);
++ ssi_resources[offset].start = gpio_to_irq(cawake_gpio);
++ ssi_resources[offset].flags &= ~IORESOURCE_UNSET;
++ ssi_resources[offset].flags |= IORESOURCE_IRQ_HIGHEDGE |
++ IORESOURCE_IRQ_LOWEDGE;
++ }
++
++ return 0;
++rback:
++ dev_err(&ssi_pdev.dev, "Request cawake (gpio%d) failed\n", cawake_gpio);
++ while (port > 0)
++ gpio_free(ssi_config->cawake_gpio[--port]);
++
++ return err;
++}
++
++static int __init ssi_init(void)
++{
++ return platform_device_register(&ssi_pdev);
++}
++subsys_initcall(ssi_init);
+--- /dev/null
++++ b/arch/arm/plat-omap/include/plat/ssi.h
+@@ -0,0 +1,204 @@
++/*
++ * plat/ssi.h
++ *
++ * Hardware definitions for SSI.
++ *
++ * Copyright (C) 2010 Nokia Corporation. All rights reserved.
++ *
++ * Contact: Carlos Chinea <carlos.chinea@nokia.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ */
++
++#ifndef __OMAP_SSI_REGS_H__
++#define __OMAP_SSI_REGS_H__
++
++#define SSI_NUM_PORTS 1
++/*
++ * SSI SYS registers
++ */
++#define SSI_REVISION_REG 0
++# define SSI_REV_MAJOR 0xf0
++# define SSI_REV_MINOR 0xf
++#define SSI_SYSCONFIG_REG 0x10
++# define SSI_AUTOIDLE (1 << 0)
++# define SSI_SOFTRESET (1 << 1)
++# define SSI_SIDLEMODE_FORCE 0
++# define SSI_SIDLEMODE_NO (1 << 3)
++# define SSI_SIDLEMODE_SMART (1 << 4)
++# define SSI_SIDLEMODE_MASK 0x18
++# define SSI_MIDLEMODE_FORCE 0
++# define SSI_MIDLEMODE_NO (1 << 12)
++# define SSI_MIDLEMODE_SMART (1 << 13)
++# define SSI_MIDLEMODE_MASK 0x3000
++#define SSI_SYSSTATUS_REG 0x14
++# define SSI_RESETDONE 1
++#define SSI_MPU_STATUS_REG(port, irq) (0x808 + ((port) * 0x10) + ((irq) * 2))
++#define SSI_MPU_ENABLE_REG(port, irq) (0x80c + ((port) * 0x10) + ((irq) * 8))
++# define SSI_DATAACCEPT(channel) (1 << (channel))
++# define SSI_DATAAVAILABLE(channel) (1 << ((channel) + 8))
++# define SSI_DATAOVERRUN(channel) (1 << ((channel) + 16))
++# define SSI_ERROROCCURED (1 << 24)
++# define SSI_BREAKDETECTED (1 << 25)
++#define SSI_GDD_MPU_IRQ_STATUS_REG 0x0800
++#define SSI_GDD_MPU_IRQ_ENABLE_REG 0x0804
++# define SSI_GDD_LCH(channel) (1 << (channel))
++#define SSI_WAKE_REG(port) (0xc00 + ((port) * 0x10))
++#define SSI_CLEAR_WAKE_REG(port) (0xc04 + ((port) * 0x10))
++#define SSI_SET_WAKE_REG(port) (0xc08 + ((port) * 0x10))
++# define SSI_WAKE(channel) (1 << (channel))
++# define SSI_WAKE_MASK 0xff
++
++/*
++ * SSI SST registers
++ */
++#define SSI_SST_ID_REG 0
++#define SSI_SST_MODE_REG 4
++# define SSI_MODE_VAL_MASK 3
++# define SSI_MODE_SLEEP 0
++# define SSI_MODE_STREAM 1
++# define SSI_MODE_FRAME 2
++# define SSI_MODE_MULTIPOINTS 3
++#define SSI_SST_FRAMESIZE_REG 8
++# define SSI_FRAMESIZE_DEFAULT 31
++#define SSI_SST_TXSTATE_REG 0xc
++# define SSI_TXSTATE_IDLE 0
++#define SSI_SST_BUFSTATE_REG 0x10
++# define SSI_FULL(channel) (1 << (channel))
++#define SSI_SST_DIVISOR_REG 0x18
++# define SSI_MAX_DIVISOR 127
++#define SSI_SST_BREAK_REG 0x20
++#define SSI_SST_CHANNELS_REG 0x24
++# define SSI_CHANNELS_DEFAULT 4
++#define SSI_SST_ARBMODE_REG 0x28
++# define SSI_ARBMODE_ROUNDROBIN 0
++# define SSI_ARBMODE_PRIORITY 1
++#define SSI_SST_BUFFER_CH_REG(channel) (0x80 + ((channel) * 4))
++#define SSI_SST_SWAPBUF_CH_REG(channel) (0xc0 + ((channel) * 4))
++
++/*
++ * SSI SSR registers
++ */
++#define SSI_SSR_ID_REG 0
++#define SSI_SSR_MODE_REG 4
++#define SSI_SSR_FRAMESIZE_REG 8
++#define SSI_SSR_RXSTATE_REG 0xc
++#define SSI_SSR_BUFSTATE_REG 0x10
++# define SSI_NOTEMPTY(channel) (1 << (channel))
++#define SSI_SSR_BREAK_REG 0x1c
++#define SSI_SSR_ERROR_REG 0x20
++#define SSI_SSR_ERRORACK_REG 0x24
++#define SSI_SSR_OVERRUN_REG 0x2c
++#define SSI_SSR_OVERRUNACK_REG 0x30
++#define SSI_SSR_TIMEOUT_REG 0x34
++# define SSI_TIMEOUT_DEFAULT 0
++#define SSI_SSR_CHANNELS_REG 0x28
++#define SSI_SSR_BUFFER_CH_REG(channel) (0x80 + ((channel) * 4))
++#define SSI_SSR_SWAPBUF_CH_REG(channel) (0xc0 + ((channel) * 4))
++
++/*
++ * SSI GDD registers
++ */
++#define SSI_GDD_HW_ID_REG 0
++#define SSI_GDD_PPORT_ID_REG 0x10
++#define SSI_GDD_MPORT_ID_REG 0x14
++#define SSI_GDD_PPORT_SR_REG 0x20
++#define SSI_GDD_MPORT_SR_REG 0x24
++# define SSI_ACTIVE_LCH_NUM_MASK 0xff
++#define SSI_GDD_TEST_REG 0x40
++# define SSI_TEST 1
++#define SSI_GDD_GCR_REG 0x100
++# define SSI_CLK_AUTOGATING_ON (1 << 3)
++# define SSI_FREE (1 << 2)
++# define SSI_SWITCH_OFF (1 << 0)
++#define SSI_GDD_GRST_REG 0x200
++# define SSI_SWRESET 1
++#define SSI_GDD_CSDP_REG(channel) (0x800 + ((channel) * 0x40))
++# define SSI_DST_BURST_EN_MASK 0xc000
++# define SSI_DST_SINGLE_ACCESS0 0
++# define SSI_DST_SINGLE_ACCESS (1 << 14)
++# define SSI_DST_BURST_4x32_BIT (2 << 14)
++# define SSI_DST_BURST_8x32_BIT (3 << 14)
++# define SSI_DST_MASK 0x1e00
++# define SSI_DST_MEMORY_PORT (8 << 9)
++# define SSI_DST_PERIPHERAL_PORT (9 << 9)
++# define SSI_SRC_BURST_EN_MASK 0x180
++# define SSI_SRC_SINGLE_ACCESS0 0
++# define SSI_SRC_SINGLE_ACCESS (1 << 7)
++# define SSI_SRC_BURST_4x32_BIT (2 << 7)
++# define SSI_SRC_BURST_8x32_BIT (3 << 7)
++# define SSI_SRC_MASK 0x3c
++# define SSI_SRC_MEMORY_PORT (8 << 2)
++# define SSI_SRC_PERIPHERAL_PORT (9 << 2)
++# define SSI_DATA_TYPE_MASK 3
++# define SSI_DATA_TYPE_S32 2
++#define SSI_GDD_CCR_REG(channel) (0x802 + ((channel) * 0x40))
++# define SSI_DST_AMODE_MASK (3 << 14)
++# define SSI_DST_AMODE_CONST 0
++# define SSI_DST_AMODE_POSTINC (1 << 12)
++# define SSI_SRC_AMODE_MASK (3 << 12)
++# define SSI_SRC_AMODE_CONST 0
++# define SSI_SRC_AMODE_POSTINC (1 << 12)
++# define SSI_CCR_ENABLE (1 << 7)
++# define SSI_CCR_SYNC_MASK 0x1f
++#define SSI_GDD_CICR_REG(channel) (0x804 + ((channel) * 0x40))
++# define SSI_BLOCK_IE (1 << 5)
++# define SSI_HALF_IE (1 << 2)
++# define SSI_TOUT_IE (1 << 0)
++#define SSI_GDD_CSR_REG(channel) (0x806 + ((channel) * 0x40))
++# define SSI_CSR_SYNC (1 << 6)
++# define SSI_CSR_BLOCK (1 << 5)
++# define SSI_CSR_HALF (1 << 2)
++# define SSI_CSR_TOUR (1 << 0)
++#define SSI_GDD_CSSA_REG(channel) (0x808 + ((channel) * 0x40))
++#define SSI_GDD_CDSA_REG(channel) (0x80c + ((channel) * 0x40))
++#define SSI_GDD_CEN_REG(channel) (0x810 + ((channel) * 0x40))
++#define SSI_GDD_CSAC_REG(channel) (0x818 + ((channel) * 0x40))
++#define SSI_GDD_CDAC_REG(channel) (0x81a + ((channel) * 0x40))
++#define SSI_GDD_CLNK_CTRL_REG(channel) (0x828 + ((channel) * 0x40))
++# define SSI_ENABLE_LNK (1 << 15)
++# define SSI_STOP_LNK (1 << 14)
++# define SSI_NEXT_CH_ID_MASK 0xf
++
++/**
++ * struct omap_ssi_platform_data - OMAP SSI platform data
++ * @num_ports: Number of ports on the controller
++ * @ctxt_loss_count: Pointer to omap_pm_get_dev_context_loss_count
++ */
++struct omap_ssi_platform_data {
++ unsigned int num_ports;
++ int (*get_dev_context_loss_count)(struct device *dev);
++};
++
++/**
++ * struct omap_ssi_config - SSI board configuration
++ * @num_ports: Number of ports in use
++ * @cawake_line: Array of cawake gpio lines
++ */
++struct omap_ssi_board_config {
++ unsigned int num_ports;
++ int cawake_gpio[SSI_NUM_PORTS];
++};
++
++#ifdef CONFIG_OMAP_SSI_CONFIG
++extern int omap_ssi_config(struct omap_ssi_board_config *ssi_config);
++#else
++static inline int omap_ssi_config(struct omap_ssi_board_config *ssi_config)
++{
++ return 0;
++}
++#endif /* CONFIG_OMAP_SSI_CONFIG */
++
++#endif /* __OMAP_SSI_REGS_H__ */
+--- a/drivers/Kconfig
++++ b/drivers/Kconfig
+@@ -50,6 +50,8 @@
+
+ source "drivers/spi/Kconfig"
+
++source "drivers/hsi/Kconfig"
++
+ source "drivers/pps/Kconfig"
+
+ source "drivers/gpio/Kconfig"
+--- a/drivers/Makefile
++++ b/drivers/Makefile
+@@ -47,6 +47,7 @@
+ obj-$(CONFIG_ATA) += ata/
+ obj-$(CONFIG_MTD) += mtd/
+ obj-$(CONFIG_SPI) += spi/
++obj-$(CONFIG_HSI) += hsi/
+ obj-y += net/
+ obj-$(CONFIG_ATM) += atm/
+ obj-$(CONFIG_FUSION) += message/
+--- /dev/null
++++ b/drivers/hsi/Kconfig
+@@ -0,0 +1,16 @@
++#
++# HSI driver configuration
++#
++menuconfig HSI
++ bool "HSI support"
++ ---help---
++ The "High speed synchronous Serial Interface" is
++ synchronous serial interface used mainly to connect
++ application engines and cellular modems.
++
++if HSI
++
++source "drivers/hsi/controllers/Kconfig"
++source "drivers/hsi/clients/Kconfig"
++
++endif # HSI
+--- /dev/null
++++ b/drivers/hsi/Makefile
+@@ -0,0 +1,5 @@
++#
++# Makefile for HSI
++#
++obj-$(CONFIG_HSI) += hsi.o
++obj-y += controllers/ clients/
+--- /dev/null
++++ b/drivers/hsi/clients/Kconfig
+@@ -0,0 +1,32 @@
++#
++# HSI clients configuration
++#
++
++comment "HSI clients"
++
++config SSI_PROTOCOL
++ tristate "SSI protocol"
++ default n
++ depends on HSI && OMAP_SSI && CMT && PHONET
++ ---help---
++ If you say Y here, you will enable the SSI protocol aka McSAAB.
++
++ If unsure, say N.
++
++config HSI_CHAR
++ tristate "HSI/SSI character driver"
++ depends on HSI
++ ---help---
++ If you say Y here, you will enable the HSI/SSI character driver.
++ This driver provides a simple character device interface for
++ serial communication with the cellular modem over HSI/SSI bus.
++
++config HSI_CMT_SPEECH
++ tristate "HSI/SSI CMT speech driver"
++ depends on HSI && SSI_PROTOCOL
++ ---help---
++ If you say Y here, you will enable the HSI CMT speech driver.
++ This driver implements a character device interface for transferring
++ speech data frames over HSI. This driver is used in e.g. Nokia N900.
++
++ If unsure, say Y, or else you will not be able to make voice calls.
+--- /dev/null
++++ b/drivers/hsi/clients/Makefile
+@@ -0,0 +1,7 @@
++#
++# Makefile for HSI clients
++#
++
++obj-$(CONFIG_SSI_PROTOCOL) += ssi_protocol.o
++obj-$(CONFIG_HSI_CHAR) += hsi_char.o
++obj-$(CONFIG_HSI_CMT_SPEECH) += cmt_speech.o
+--- /dev/null
++++ b/drivers/hsi/clients/cmt_speech.c
+@@ -0,0 +1,1415 @@
++/*
++ * cmt_speech.c - HSI CMT speech driver
++ *
++ * Copyright (C) 2008,2009,2010 Nokia Corporation. All rights reserved.
++ *
++ * Contact: Kai Vehmanen <kai.vehmanen@nokia.com>
++ * Original author: Peter Ujfalusi <peter.ujfalusi@nokia.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ */
++
++#include <linux/errno.h>
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/init.h>
++#include <linux/device.h>
++#include <linux/miscdevice.h>
++#include <linux/mm.h>
++#include <linux/slab.h>
++#include <linux/fs.h>
++#include <linux/poll.h>
++#include <linux/sched.h>
++#include <linux/ioctl.h>
++#include <linux/uaccess.h>
++#include <linux/pm_qos_params.h>
++#include <linux/hsi/hsi.h>
++#include <linux/hsi/ssip_slave.h>
++#include <linux/cs-protocol.h>
++
++#define CS_MMAP_SIZE PAGE_SIZE
++#define DRIVER_NAME "cmt_speech"
++
++struct char_queue {
++ struct list_head list;
++ u32 msg;
++};
++
++struct cs_char {
++ unsigned int opened;
++ struct hsi_client *cl;
++ struct cs_hsi_iface *hi;
++ struct list_head chardev_queue;
++ struct list_head dataind_queue;
++ int dataind_pending;
++ /* mmap things */
++ unsigned long mmap_base;
++ unsigned long mmap_size;
++ spinlock_t lock;
++ struct fasync_struct *async_queue;
++ wait_queue_head_t wait;
++ wait_queue_head_t datawait;
++};
++
++#define SSI_CHANNEL_STATE_READING 1
++#define SSI_CHANNEL_STATE_WRITING (1 << 1)
++#define SSI_CHANNEL_STATE_POLL (1 << 2)
++#define SSI_CHANNEL_STATE_SYNC (1 << 3)
++#define SSI_CHANNEL_STATE_ERROR (1 << 4)
++
++#define CONTROL_HSI_CH 1
++#define DATA_HSI_CH 2
++
++#define TARGET_MASK 0xf000000
++#define TARGET_REMOTE (1 << CS_DOMAIN_SHIFT)
++#define TARGET_LOCAL 0
++
++/* Number of pre-allocated commands buffers */
++#define CS_MAX_CMDS 4
++
++/*
++ * During data transfers, transactions must be handled
++ * within 20ms (fixed value in cmtspeech HSI protocol)
++ */
++#define CS_QOS_LATENCY_FOR_DATA_USEC 20000
++
++#define RX_PTR_BOUNDARY_SHIFT 8
++#define RX_PTR_MAX_SHIFT (RX_PTR_BOUNDARY_SHIFT + \
++ CS_MAX_BUFFERS_SHIFT)
++struct cs_hsi_iface {
++ struct hsi_client *cl;
++ struct hsi_client *master;
++
++ unsigned int iface_state;
++ unsigned int wakeline_state;
++ unsigned int control_state;
++ unsigned int data_state;
++
++ /* state exposed to application */
++ struct cs_mmap_config_block *mmap_cfg;
++
++ unsigned long mmap_base;
++ unsigned long mmap_size;
++
++ unsigned int rx_slot;
++ unsigned int tx_slot;
++
++ /* note: for security reasons, we do not trust the contents of
++ * mmap_cfg, but instead duplicate the variables here */
++ unsigned int buf_size;
++ unsigned int rx_bufs;
++ unsigned int tx_bufs;
++ unsigned int rx_ptr_boundary;
++ unsigned int rx_offsets[CS_MAX_BUFFERS];
++ unsigned int tx_offsets[CS_MAX_BUFFERS];
++ /* size of aligned memory blocks */
++ unsigned int slot_size;
++ unsigned int flags;
++
++ struct list_head cmdqueue;
++
++ struct hsi_msg *data_rx_msg;
++ struct hsi_msg *data_tx_msg;
++
++ struct pm_qos_request_list *pm_qos_req;
++
++ spinlock_t lock;
++};
++
++static struct cs_char cs_char_data;
++
++static void cs_hsi_read_on_control(struct cs_hsi_iface *hi);
++static void cs_hsi_read_on_data(struct cs_hsi_iface *hi);
++
++static inline void rx_ptr_shift_too_big(void)
++{
++ BUILD_BUG_ON((1LLU << RX_PTR_MAX_SHIFT) > UINT_MAX);
++}
++
++static void cs_notify(u32 message, struct list_head *head)
++{
++ struct char_queue *entry;
++
++ spin_lock(&cs_char_data.lock);
++
++ if (!cs_char_data.opened) {
++ spin_unlock(&cs_char_data.lock);
++ goto out;
++ }
++
++ entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
++ if (!entry) {
++ dev_err(&cs_char_data.cl->device,
++ "Can't allocate new entry for the queue.\n");
++ spin_unlock(&cs_char_data.lock);
++ goto out;
++ }
++
++ entry->msg = message;
++ list_add_tail(&entry->list, head);
++
++ spin_unlock(&cs_char_data.lock);
++
++ wake_up_interruptible(&cs_char_data.wait);
++ kill_fasync(&cs_char_data.async_queue, SIGIO, POLL_IN);
++
++out:
++ return;
++}
++
++static u32 cs_pop_entry(struct list_head *head)
++{
++ struct char_queue *entry;
++ u32 data;
++
++ entry = list_entry(head->next, struct char_queue, list);
++ data = entry->msg;
++ list_del(&entry->list);
++ kfree(entry);
++
++ return data;
++}
++
++static void cs_notify_control(u32 message)
++{
++ cs_notify(message, &cs_char_data.chardev_queue);
++}
++
++static void cs_notify_data(u32 message, int maxlength)
++{
++ cs_notify(message, &cs_char_data.dataind_queue);
++
++ spin_lock(&cs_char_data.lock);
++ ++cs_char_data.dataind_pending;
++ while (cs_char_data.dataind_pending > maxlength &&
++ !list_empty(&cs_char_data.dataind_queue)) {
++ dev_dbg(&cs_char_data.cl->device, "data notification "
++ "queue overrun (%u entries)\n", cs_char_data.dataind_pending);
++
++ cs_pop_entry(&cs_char_data.dataind_queue);
++ --cs_char_data.dataind_pending;
++ }
++ spin_unlock(&cs_char_data.lock);
++}
++
++static inline void cs_set_cmd(struct hsi_msg *msg, u32 cmd)
++{
++ u32 *data;
++
++ data = sg_virt(msg->sgt.sgl);
++ *data = cmd;
++}
++
++static inline u32 cs_get_cmd(struct hsi_msg *msg)
++{
++ u32 *data;
++
++ data = sg_virt(msg->sgt.sgl);
++
++ return *data;
++}
++
++static void cs_release_cmd(struct hsi_msg *msg)
++{
++ struct cs_hsi_iface *hi = msg->context;
++
++ list_add_tail(&msg->link, &hi->cmdqueue);
++}
++
++static void cs_cmd_destructor(struct hsi_msg *msg)
++{
++ struct cs_hsi_iface *hi = msg->context;
++
++ spin_lock(&hi->lock);
++
++ dev_dbg(&cs_char_data.cl->device, "control cmd destructor\n");
++
++ if (hi->iface_state != CS_STATE_CLOSED)
++ dev_err(&hi->cl->device, "Cmd flushed while driver active\n");
++
++ if (msg->ttype == HSI_MSG_READ)
++ hi->control_state &=
++ ~(SSI_CHANNEL_STATE_POLL | SSI_CHANNEL_STATE_READING);
++ else if (msg->ttype == HSI_MSG_WRITE &&
++ hi->control_state & SSI_CHANNEL_STATE_WRITING)
++ hi->control_state &= ~SSI_CHANNEL_STATE_WRITING;
++
++ cs_release_cmd(msg);
++
++ spin_unlock(&hi->lock);
++}
++
++static struct hsi_msg *cs_claim_cmd(struct cs_hsi_iface* ssi)
++{
++ struct hsi_msg *msg;
++
++ BUG_ON(list_empty(&ssi->cmdqueue));
++
++ msg = list_first_entry(&ssi->cmdqueue, struct hsi_msg, link);
++ list_del(&msg->link);
++ msg->destructor = cs_cmd_destructor;
++
++ return msg;
++}
++
++static void cs_free_cmds(struct cs_hsi_iface *ssi)
++{
++ struct hsi_msg *msg, *tmp;
++
++ list_for_each_entry_safe(msg, tmp, &ssi->cmdqueue, link) {
++ list_del(&msg->link);
++ msg->destructor = NULL;
++ kfree(sg_virt(msg->sgt.sgl));
++ hsi_free_msg(msg);
++ }
++}
++
++static int cs_alloc_cmds(struct cs_hsi_iface *hi)
++{
++ struct hsi_msg *msg;
++ u32 *buf;
++ unsigned int i;
++
++ INIT_LIST_HEAD(&hi->cmdqueue);
++
++ for (i = 0; i < CS_MAX_CMDS; i++) {
++ msg = hsi_alloc_msg(1, GFP_ATOMIC);
++ if (!msg)
++ goto out;
++ buf = kmalloc(sizeof(*buf), GFP_ATOMIC);
++ if (!buf) {
++ hsi_free_msg(msg);
++ goto out;
++ }
++ sg_init_one(msg->sgt.sgl, buf, sizeof(*buf));
++ msg->channel = CONTROL_HSI_CH;
++ msg->context = hi;
++ list_add_tail(&msg->link, &hi->cmdqueue);
++ }
++
++ return 0;
++
++out:
++ cs_free_cmds(hi);
++ return -ENOMEM;
++}
++
++static void cs_hsi_data_destructor(struct hsi_msg *msg)
++{
++ struct cs_hsi_iface *hi = msg->context;
++ const char *dir = (msg->ttype == HSI_MSG_READ) ? "TX" : "RX";
++
++ dev_dbg(&cs_char_data.cl->device, "Freeing data %s message\n", dir);
++
++ spin_lock(&hi->lock);
++ if (hi->iface_state != CS_STATE_CLOSED)
++ dev_err(&cs_char_data.cl->device,
++ "Data %s flush while device active\n", dir);
++ if (msg->ttype == HSI_MSG_READ)
++ hi->data_state &=
++ ~(SSI_CHANNEL_STATE_POLL | SSI_CHANNEL_STATE_READING);
++ else
++ hi->data_state &= ~SSI_CHANNEL_STATE_WRITING;
++ spin_unlock(&hi->lock);
++
++ msg->status = HSI_STATUS_COMPLETED;
++ wake_up_interruptible(&cs_char_data.datawait);
++}
++
++static int cs_hsi_alloc_data(struct cs_hsi_iface *hi)
++{
++ struct hsi_msg *txmsg, *rxmsg;
++ int res = 0;
++
++ rxmsg = hsi_alloc_msg(1, GFP_KERNEL);
++ if (!rxmsg) {
++ res = -ENOMEM;
++ goto out1;
++ }
++ rxmsg->channel = DATA_HSI_CH;
++ rxmsg->destructor = cs_hsi_data_destructor;
++ rxmsg->context = hi;
++
++ txmsg = hsi_alloc_msg(1, GFP_KERNEL);
++ if (!txmsg) {
++ res = -ENOMEM;
++ goto out2;
++ }
++ txmsg->channel = DATA_HSI_CH;
++ txmsg->destructor = cs_hsi_data_destructor;
++ txmsg->context = hi;
++
++ hi->data_rx_msg = rxmsg;
++ hi->data_tx_msg = txmsg;
++
++ return 0;
++
++out2:
++ hsi_free_msg(rxmsg);
++out1:
++ return res;
++}
++
++static void cs_hsi_free_data_msg(struct hsi_msg *msg)
++{
++ WARN_ON(msg->status != HSI_STATUS_COMPLETED &&
++ msg->status != HSI_STATUS_ERROR);
++ hsi_free_msg(msg);
++}
++
++static void cs_hsi_free_data(struct cs_hsi_iface *hi)
++{
++ cs_hsi_free_data_msg(hi->data_rx_msg);
++ cs_hsi_free_data_msg(hi->data_tx_msg);
++}
++
++static inline void __cs_hsi_error_pre(struct cs_hsi_iface *hi,
++ struct hsi_msg *msg, const char *info,
++ unsigned int *state)
++{
++ spin_lock(&hi->lock);
++ dev_err(&hi->cl->device, "HSI %s error, msg %d, state %u\n",
++ info, msg->status, *state);
++}
++
++static inline void __cs_hsi_error_post(struct cs_hsi_iface *hi)
++{
++ spin_unlock(&hi->lock);
++}
++
++static inline void __cs_hsi_error_read_bits(unsigned int *state)
++{
++ *state |= SSI_CHANNEL_STATE_ERROR;
++ *state &= ~(SSI_CHANNEL_STATE_READING | SSI_CHANNEL_STATE_POLL);
++}
++
++static inline void __cs_hsi_error_write_bits(unsigned int *state)
++{
++ *state |= SSI_CHANNEL_STATE_ERROR;
++ *state &= ~SSI_CHANNEL_STATE_WRITING;
++}
++
++static void cs_hsi_control_read_error(struct cs_hsi_iface *hi,
++ struct hsi_msg *msg)
++{
++ __cs_hsi_error_pre(hi, msg, "control read", &hi->control_state);
++ cs_release_cmd(msg);
++ __cs_hsi_error_read_bits(&hi->control_state);
++ __cs_hsi_error_post(hi);
++}
++
++static void cs_hsi_control_write_error(struct cs_hsi_iface *hi,
++ struct hsi_msg *msg)
++{
++ __cs_hsi_error_pre(hi, msg, "control write", &hi->control_state);
++ cs_release_cmd(msg);
++ __cs_hsi_error_write_bits(&hi->control_state);
++ __cs_hsi_error_post(hi);
++
++}
++
++static void cs_hsi_data_read_error(struct cs_hsi_iface *hi, struct hsi_msg *msg)
++{
++ __cs_hsi_error_pre(hi, msg, "data read", &hi->data_state);
++ __cs_hsi_error_read_bits(&hi->data_state);
++ __cs_hsi_error_post(hi);
++}
++
++static void cs_hsi_data_write_error(struct cs_hsi_iface *hi,
++ struct hsi_msg *msg)
++{
++ __cs_hsi_error_pre(hi, msg, "data write", &hi->data_state);
++ __cs_hsi_error_write_bits(&hi->data_state);
++ __cs_hsi_error_post(hi);
++}
++
++static void cs_hsi_read_on_control_complete(struct hsi_msg *msg)
++{
++ u32 cmd = cs_get_cmd(msg);
++ struct cs_hsi_iface *hi = msg->context;
++
++ spin_lock(&hi->lock);
++ hi->control_state &= ~SSI_CHANNEL_STATE_READING;
++ if (msg->status == HSI_STATUS_ERROR) {
++ dev_err(&hi->cl->device, "Control RX error detected\n");
++ cs_hsi_control_read_error(hi, msg);
++ spin_unlock(&hi->lock);
++ goto out;
++ }
++ dev_dbg(&hi->cl->device, "Read on control: %08X\n", cmd);
++ cs_release_cmd(msg);
++ if (hi->flags & CS_FEAT_TSTAMP_RX_CTRL) {
++ struct timespec *tstamp =
++ &hi->mmap_cfg->tstamp_rx_ctrl;
++ do_posix_clock_monotonic_gettime(tstamp);
++ }
++ spin_unlock(&hi->lock);
++
++ cs_notify_control(cmd);
++
++out:
++ cs_hsi_read_on_control(hi);
++}
++
++static void cs_hsi_peek_on_control_complete(struct hsi_msg *msg)
++{
++ struct cs_hsi_iface *hi = msg->context;
++ int ret;
++
++ if (msg->status == HSI_STATUS_ERROR) {
++ dev_err(&hi->cl->device, "Control peek RX error detected\n");
++ cs_hsi_control_read_error(hi, msg);
++ return;
++ }
++
++ WARN_ON(!(hi->control_state & SSI_CHANNEL_STATE_READING));
++
++ dev_dbg(&hi->cl->device, "Peek on control complete, reading\n");
++ msg->sgt.nents = 1;
++ msg->complete = cs_hsi_read_on_control_complete;
++ ret = hsi_async_read(hi->cl, msg);
++ if (ret)
++ cs_hsi_control_read_error(hi, msg);
++}
++
++static void cs_hsi_read_on_control(struct cs_hsi_iface *hi)
++{
++ struct hsi_msg *msg;
++ int ret;
++
++ spin_lock(&hi->lock);
++ if (hi->control_state & SSI_CHANNEL_STATE_READING) {
++ dev_err(&hi->cl->device, "Control read already pending (%d)\n",
++ hi->control_state);
++ spin_unlock(&hi->lock);
++ return;
++ }
++ if (hi->control_state & SSI_CHANNEL_STATE_ERROR) {
++ dev_err(&hi->cl->device, "Control read error (%d)\n",
++ hi->control_state);
++ spin_unlock(&hi->lock);
++ return;
++ }
++ hi->control_state |= SSI_CHANNEL_STATE_READING;
++ dev_dbg(&hi->cl->device, "Issuing RX on control\n");
++ msg = cs_claim_cmd(hi);
++ spin_unlock(&hi->lock);
++
++ msg->sgt.nents = 0;
++ msg->complete = cs_hsi_peek_on_control_complete;
++ ret = hsi_async_read(hi->cl, msg);
++ if (ret)
++ cs_hsi_control_read_error(hi, msg);
++}
++
++static void cs_hsi_write_on_control_complete(struct hsi_msg *msg)
++{
++ struct cs_hsi_iface *hi = msg->context;
++ if (msg->status == HSI_STATUS_COMPLETED) {
++ spin_lock(&hi->lock);
++ hi->control_state &= ~SSI_CHANNEL_STATE_WRITING;
++ cs_release_cmd(msg);
++ spin_unlock(&hi->lock);
++ } else if (msg->status == HSI_STATUS_ERROR) {
++ cs_hsi_control_write_error(hi, msg);
++ } else {
++ dev_err(&hi->cl->device,
++ "unexpected status in control write callback %d\n",
++ msg->status);
++ }
++}
++
++static int cs_hsi_write_on_control(struct cs_hsi_iface *hi, u32 message)
++{
++ struct hsi_msg *msg;
++ int ret;
++
++ spin_lock(&hi->lock);
++ if (hi->control_state & SSI_CHANNEL_STATE_ERROR) {
++ spin_unlock(&hi->lock);
++ return -EIO;
++ }
++ if (hi->control_state & SSI_CHANNEL_STATE_WRITING) {
++ dev_err(&hi->cl->device,
++ "Write still pending on control channel.\n");
++ spin_unlock(&hi->lock);
++ return -EBUSY;
++ }
++ hi->control_state |= SSI_CHANNEL_STATE_WRITING;
++ msg = cs_claim_cmd(hi);
++ spin_unlock(&hi->lock);
++
++ cs_set_cmd(msg, message);
++ msg->sgt.nents = 1;
++ msg->complete = cs_hsi_write_on_control_complete;
++ dev_dbg(&hi->cl->device,
++ "Sending control message %08X\n", message);
++ ret = hsi_async_write(hi->cl, msg);
++ if (ret) {
++ dev_err(&hi->cl->device,
++ "async_write failed with %d\n", ret);
++ cs_hsi_control_write_error(hi, msg);
++ }
++
++ /*
++ * Make sure control read is always pending when issuing
++ * new control writes. This is needed as the controller
++ * may flush our messages if e.g. the peer device reboots
++ * unexpectedly (and we cannot directly resubmit a new read from
++ * the message destructor; see cs_cmd_destructor()).
++ */
++ if (!(hi->control_state & SSI_CHANNEL_STATE_READING)) {
++ dev_err(&hi->cl->device, "Restarting control reads\n");
++ cs_hsi_read_on_control(hi);
++ }
++
++ return 0;
++}
++
++static void cs_hsi_read_on_data_complete(struct hsi_msg *msg)
++{
++ struct cs_hsi_iface *hi = msg->context;
++ u32 payload;
++
++ if (unlikely(msg->status == HSI_STATUS_ERROR)) {
++ cs_hsi_data_read_error(hi, msg);
++ goto out;
++ }
++
++ spin_lock(&hi->lock);
++ WARN_ON(!(hi->data_state & SSI_CHANNEL_STATE_READING));
++ hi->data_state &= ~SSI_CHANNEL_STATE_READING;
++ payload = CS_RX_DATA_RECEIVED;
++ payload |= hi->rx_slot;
++ hi->rx_slot++;
++ hi->rx_slot %= hi->rx_ptr_boundary;
++ /* expose current rx ptr in mmap area */
++ hi->mmap_cfg->rx_ptr = hi->rx_slot;
++ spin_unlock(&hi->lock);
++
++ cs_notify_data(payload, hi->rx_bufs);
++ cs_hsi_read_on_data(hi);
++
++out:
++ wake_up_interruptible(&cs_char_data.datawait);
++}
++
++static void cs_hsi_peek_on_data_complete(struct hsi_msg *msg)
++{
++ struct cs_hsi_iface *hi = msg->context;
++ u32 *address;
++ int ret;
++
++ if (msg->status == HSI_STATUS_ERROR) {
++ cs_hsi_data_read_error(hi, msg);
++ return;
++ }
++
++ spin_lock(&hi->lock);
++ WARN_ON(!(hi->data_state & SSI_CHANNEL_STATE_POLL));
++ hi->data_state &= ~SSI_CHANNEL_STATE_POLL;
++ hi->data_state |= SSI_CHANNEL_STATE_READING;
++ spin_unlock(&hi->lock);
++
++ address = (u32 *)(hi->mmap_base +
++ hi->rx_offsets[hi->rx_slot % hi->rx_bufs]);
++ sg_init_one(msg->sgt.sgl, address, hi->buf_size);
++ msg->sgt.nents = 1;
++ msg->complete = cs_hsi_read_on_data_complete;
++ ret = hsi_async_read(hi->cl, msg);
++ if (ret)
++ cs_hsi_data_read_error(hi, msg);
++}
++
++/**
++ * Read/write transaction is ongoing. Returns false if in
++ * SSI_CHANNEL_STATE_POLL state.
++ */
++static inline int cs_state_xfer_active(unsigned int state)
++{
++ return (state & SSI_CHANNEL_STATE_WRITING) ||
++ (state & SSI_CHANNEL_STATE_READING);
++}
++
++/**
++ * No pending read/writes
++ */
++static inline int cs_state_idle(unsigned int state)
++{
++ return !(state & ~SSI_CHANNEL_STATE_ERROR);
++}
++
++static void cs_hsi_read_on_data(struct cs_hsi_iface *hi)
++{
++ struct hsi_msg *rxmsg;
++ int ret;
++
++ spin_lock(&hi->lock);
++ if (hi->data_state &
++ (SSI_CHANNEL_STATE_READING | SSI_CHANNEL_STATE_POLL)) {
++ dev_dbg(&hi->cl->device, "Data read already pending (%u)\n",
++ hi->data_state);
++ spin_unlock(&hi->lock);
++ return;
++ }
++ hi->data_state |= SSI_CHANNEL_STATE_POLL;
++ spin_unlock(&hi->lock);
++
++ rxmsg = hi->data_rx_msg;
++ sg_init_one(rxmsg->sgt.sgl, (void *)hi->mmap_base, 0);
++ rxmsg->sgt.nents = 0;
++ rxmsg->complete = cs_hsi_peek_on_data_complete;
++
++ ret = hsi_async_read(hi->cl, rxmsg);
++ if (ret)
++ cs_hsi_data_read_error(hi, rxmsg);
++}
++
++static void cs_hsi_write_on_data_complete(struct hsi_msg *msg)
++{
++ struct cs_hsi_iface *hi = msg->context;
++
++ if (msg->status == HSI_STATUS_COMPLETED) {
++ spin_lock(&hi->lock);
++ hi->data_state &= ~SSI_CHANNEL_STATE_WRITING;
++ spin_unlock(&hi->lock);
++ } else {
++ cs_hsi_data_write_error(hi, msg);
++ }
++
++ wake_up_interruptible(&cs_char_data.datawait);
++}
++
++static int cs_hsi_write_on_data(struct cs_hsi_iface *hi, unsigned int slot)
++{
++ u32 *address;
++ struct hsi_msg *txmsg;
++ int ret;
++
++ spin_lock(&hi->lock);
++ if (hi->iface_state != CS_STATE_CONFIGURED) {
++ dev_err(&hi->cl->device, "Not configured, aborting\n");
++ ret = -EINVAL;
++ goto error;
++ }
++ if (hi->data_state & SSI_CHANNEL_STATE_ERROR) {
++ dev_err(&hi->cl->device, "HSI error, aborting\n");
++ ret = -EIO;
++ goto error;
++ }
++ if (hi->data_state & SSI_CHANNEL_STATE_WRITING) {
++ dev_err(&hi->cl->device, "Write pending on data channel.\n");
++ ret = -EBUSY;
++ goto error;
++ }
++ hi->data_state |= SSI_CHANNEL_STATE_WRITING;
++ spin_unlock(&hi->lock);
++
++ hi->tx_slot = slot;
++ address = (u32 *)(hi->mmap_base + hi->tx_offsets[hi->tx_slot]);
++ txmsg = hi->data_tx_msg;
++ sg_init_one(txmsg->sgt.sgl, address, hi->buf_size);
++ txmsg->complete = cs_hsi_write_on_data_complete;
++ ret = hsi_async_write(hi->cl, txmsg);
++ if (ret)
++ cs_hsi_data_write_error(hi, txmsg);
++
++ return ret;
++
++error:
++ spin_unlock(&hi->lock);
++ if (ret == -EIO)
++ cs_hsi_data_write_error(hi, hi->data_tx_msg);
++
++ return ret;
++}
++
++static unsigned int cs_hsi_get_state(struct cs_hsi_iface *hi)
++{
++ return hi->iface_state;
++}
++
++static int cs_hsi_command(struct cs_hsi_iface *hi, u32 cmd)
++{
++ int ret = 0;
++
++ local_bh_disable();
++ switch (cmd & TARGET_MASK) {
++ case TARGET_REMOTE:
++ ret = cs_hsi_write_on_control(hi, cmd);
++ break;
++ case TARGET_LOCAL:
++ if ((cmd & CS_CMD_MASK) == CS_TX_DATA_READY)
++ ret = cs_hsi_write_on_data(hi, cmd & CS_PARAM_MASK);
++ else
++ ret = -EINVAL;
++ break;
++ default:
++ ret = -EINVAL;
++ break;
++ }
++ local_bh_enable();
++
++ return ret;
++}
++
++static void cs_hsi_set_wakeline(struct cs_hsi_iface *hi,
++ unsigned int new_state)
++{
++ int change = 0;
++
++ spin_lock_bh(&hi->lock);
++ if (hi->wakeline_state != new_state) {
++ hi->wakeline_state = new_state;
++ change = 1;
++ dev_dbg(&hi->cl->device, "setting wake line to %d (%p)\n",
++ new_state, hi->cl);
++ }
++ spin_unlock_bh(&hi->lock);
++
++ if (change) {
++ if (new_state)
++ ssip_slave_start_tx(hi->master);
++ else
++ ssip_slave_stop_tx(hi->master);
++ }
++
++ dev_dbg(&hi->cl->device, "wake line set to %d (%p)\n",
++ new_state, hi->cl);
++}
++
++static void set_buffer_sizes(struct cs_hsi_iface *hi, int rx_bufs, int tx_bufs)
++{
++ hi->rx_bufs = rx_bufs;
++ hi->tx_bufs = tx_bufs;
++ hi->mmap_cfg->rx_bufs = rx_bufs;
++ hi->mmap_cfg->tx_bufs = tx_bufs;
++
++ if (hi->flags & CS_FEAT_ROLLING_RX_COUNTER) {
++ /*
++ * For more robust overrun detection, let the rx
++ * pointer run in range 0..'boundary-1'. Boundary
++ * is a multiple of rx_bufs, and limited in max size
++ * by RX_PTR_MAX_SHIFT to allow for fast ptr-diff
++ * calculation.
++ */
++ hi->rx_ptr_boundary = (rx_bufs << RX_PTR_BOUNDARY_SHIFT);
++ hi->mmap_cfg->rx_ptr_boundary = hi->rx_ptr_boundary;
++ } else {
++ hi->rx_ptr_boundary = hi->rx_bufs;
++ }
++}
++
++static int check_buf_params(struct cs_hsi_iface *hi,
++ const struct cs_buffer_config *buf_cfg)
++{
++ size_t buf_size_aligned = L1_CACHE_ALIGN(buf_cfg->buf_size) *
++ (buf_cfg->rx_bufs + buf_cfg->tx_bufs);
++ size_t ctrl_size_aligned = L1_CACHE_ALIGN(sizeof(*hi->mmap_cfg));
++ int r = 0;
++
++ if (buf_cfg->rx_bufs > CS_MAX_BUFFERS ||
++ buf_cfg->tx_bufs > CS_MAX_BUFFERS) {
++ r = -EINVAL;
++ } else if ((buf_size_aligned + ctrl_size_aligned) >= hi->mmap_size) {
++ dev_err(&hi->cl->device, "No space for the requested buffer "
++ "configuration\n");
++ r = -ENOBUFS;
++ }
++
++ return r;
++}
++
++static int cs_hsi_data_sync(struct cs_hsi_iface *hi)
++{
++ int r = 0;
++
++ spin_lock_bh(&hi->lock);
++ hi->data_state |= SSI_CHANNEL_STATE_SYNC;
++ spin_unlock_bh(&hi->lock);
++
++ for ( ; ; ) {
++ DEFINE_WAIT(wait);
++ unsigned int data_state;
++
++ spin_lock_bh(&hi->lock);
++ data_state = hi->data_state;
++ spin_unlock_bh(&hi->lock);
++
++ if (data_state & SSI_CHANNEL_STATE_ERROR) {
++ r = -EIO;
++ break;
++ }
++ if (!cs_state_xfer_active(data_state)) {
++ dev_dbg(&hi->cl->device, "hsi_data_sync break, idle\n");
++ break;
++ }
++ if (signal_pending(current)) {
++ dev_dbg(&hi->cl->device, "hsi_data_sync got signal\n");
++ r = -ERESTARTSYS;
++ break;
++ }
++ dev_dbg(&hi->cl->device, "hsi_data_sync in data state %u\n",
++ data_state);
++ prepare_to_wait_exclusive(&cs_char_data.datawait, &wait,
++ TASK_INTERRUPTIBLE);
++ schedule();
++ finish_wait(&cs_char_data.datawait, &wait);
++ }
++ spin_lock_bh(&hi->lock);
++ hi->data_state &= ~SSI_CHANNEL_STATE_SYNC;
++ WARN_ON(cs_state_xfer_active(hi->data_state));
++ spin_unlock_bh(&hi->lock);
++
++ return r;
++}
++
++static void cs_hsi_data_enable(struct cs_hsi_iface *hi,
++ struct cs_buffer_config *buf_cfg)
++{
++ unsigned int data_start, i;
++
++ BUG_ON(hi->buf_size == 0);
++
++ set_buffer_sizes(hi, buf_cfg->rx_bufs, buf_cfg->tx_bufs);
++
++ hi->slot_size = L1_CACHE_ALIGN(hi->buf_size);
++ dev_dbg(&hi->cl->device,
++ "setting slot size to %u, buf size %u, align %u\n",
++ hi->slot_size, hi->buf_size, L1_CACHE_BYTES);
++
++ data_start = L1_CACHE_ALIGN(sizeof(*hi->mmap_cfg));
++ dev_dbg(&hi->cl->device,
++ "setting data start at %u, cfg block %u, align %u\n",
++ data_start, sizeof(*hi->mmap_cfg), L1_CACHE_BYTES);
++
++ for (i = 0; i < hi->mmap_cfg->rx_bufs; i++) {
++ hi->rx_offsets[i] = data_start + i * hi->slot_size;
++ hi->mmap_cfg->rx_offsets[i] = hi->rx_offsets[i];
++ dev_dbg(&hi->cl->device, "DL buf #%u at %u\n",
++ i, hi->rx_offsets[i]);
++ }
++ for (i = 0; i < hi->mmap_cfg->tx_bufs; i++) {
++ hi->tx_offsets[i] = data_start +
++ (i + hi->mmap_cfg->rx_bufs) * hi->slot_size;
++ hi->mmap_cfg->tx_offsets[i] = hi->tx_offsets[i];
++ dev_dbg(&hi->cl->device, "UL buf #%u at %u\n",
++ i, hi->rx_offsets[i]);
++ }
++
++ hi->iface_state = CS_STATE_CONFIGURED;
++}
++
++static void cs_hsi_data_disable(struct cs_hsi_iface *hi)
++{
++ if (hi->iface_state == CS_STATE_CONFIGURED) {
++ dev_dbg(&hi->cl->device,
++ "closing data channel with slot size 0\n");
++ hi->iface_state = CS_STATE_OPENED;
++ }
++}
++
++static int cs_hsi_buf_config(struct cs_hsi_iface *hi,
++ struct cs_buffer_config *buf_cfg)
++{
++ int r = 0;
++ unsigned int old_state;
++
++ /*
++ * make sure that no non-zero data reads are ongoing before
++ * proceeding to change the buffer layout
++ */
++ r = cs_hsi_data_sync(hi);
++ if (r < 0)
++ return r;
++
++ WARN_ON(cs_state_xfer_active(hi->data_state));
++
++ spin_lock_bh(&hi->lock);
++ r = check_buf_params(hi, buf_cfg);
++ if (r < 0)
++ goto error;
++
++ hi->buf_size = buf_cfg->buf_size;
++ hi->mmap_cfg->buf_size = hi->buf_size;
++ hi->flags = buf_cfg->flags;
++
++ hi->rx_slot = 0;
++ hi->tx_slot = 0;
++ hi->slot_size = 0;
++
++ old_state = hi->iface_state;
++
++ if (hi->buf_size)
++ cs_hsi_data_enable(hi, buf_cfg);
++ else
++ cs_hsi_data_disable(hi);
++
++ spin_unlock_bh(&hi->lock);
++
++ if (hi->iface_state == CS_STATE_CONFIGURED &&
++ old_state != hi->iface_state) {
++ hi->pm_qos_req = pm_qos_add_request(PM_QOS_CPU_DMA_LATENCY,
++ CS_QOS_LATENCY_FOR_DATA_USEC);
++ local_bh_disable();
++ cs_hsi_read_on_data(hi);
++ local_bh_enable();
++ } else if (old_state == CS_STATE_CONFIGURED &&
++ hi->iface_state != old_state) {
++ if (hi->pm_qos_req) {
++ pm_qos_remove_request(hi->pm_qos_req);
++ hi->pm_qos_req = NULL;
++ }
++ }
++ return r;
++
++error:
++ spin_unlock_bh(&hi->lock);
++ return r;
++}
++
++static int cs_hsi_start(struct cs_hsi_iface **hi, struct hsi_client *cl,
++ unsigned long mmap_base, unsigned long mmap_size)
++{
++ int err = 0;
++ struct cs_hsi_iface *hsi_if = kzalloc(sizeof(*hsi_if), GFP_KERNEL);
++
++ dev_dbg(&cl->device, "cs_hsi_start\n");
++
++ if (!hsi_if) {
++ err = -ENOMEM;
++ goto leave0;
++ }
++ spin_lock_init(&hsi_if->lock);
++ hsi_if->cl = cl;
++ hsi_if->iface_state = CS_STATE_CLOSED;
++ hsi_if->mmap_cfg = (struct cs_mmap_config_block *)mmap_base;
++ hsi_if->mmap_base = mmap_base;
++ hsi_if->mmap_size = mmap_size;
++ memset(hsi_if->mmap_cfg, 0, sizeof(*hsi_if->mmap_cfg));
++ err = cs_alloc_cmds(hsi_if);
++ if (err < 0) {
++ dev_err(&cl->device, "Unable to alloc HSI messages\n");
++ goto leave1;
++ }
++ err = cs_hsi_alloc_data(hsi_if);
++ if (err < 0) {
++ dev_err(&cl->device, "Unable to alloc HSI messages for data\n");
++ goto leave2;
++ }
++ err = hsi_claim_port(cl, 1);
++ if (err < 0) {
++ dev_err(&cl->device,
++ "Could not open, HSI port already claimed\n");
++ goto leave3;
++ }
++ hsi_if->master = ssip_slave_get_master(cl);
++ if (IS_ERR(hsi_if->master)) {
++ dev_err(&cl->device, "Could not get HSI master client\n");
++ goto leave4;
++ }
++ hsi_if->iface_state = CS_STATE_OPENED;
++ local_bh_disable();
++ cs_hsi_read_on_control(hsi_if);
++ local_bh_enable();
++
++ dev_dbg(&cl->device, "cs_hsi_start...done\n");
++
++ BUG_ON(!hi);
++ *hi = hsi_if;
++
++ return 0;
++
++leave4:
++ hsi_release_port(cl);
++leave3:
++ cs_hsi_free_data(hsi_if);
++leave2:
++ cs_free_cmds(hsi_if);
++leave1:
++ kfree(hsi_if);
++leave0:
++ dev_dbg(&cl->device, "cs_hsi_start...done/error\n\n");
++
++ return err;
++}
++
++static void cs_hsi_stop(struct cs_hsi_iface *hi)
++{
++ dev_dbg(&hi->cl->device, "cs_hsi_stop\n");
++ cs_hsi_set_wakeline(hi, 0);
++ ssip_slave_put_master(hi->master);
++
++ /* hsi_release_port() needs to be called with CS_STATE_CLOSED */
++ hi->iface_state = CS_STATE_CLOSED;
++ hsi_release_port(hi->cl);
++
++ /*
++ * hsi_release_port() should flush out all the pending
++ * messages, so cs_state_idle() should be true for both
++ * control and data channels.
++ */
++ WARN_ON(!cs_state_idle(hi->control_state));
++ WARN_ON(!cs_state_idle(hi->data_state));
++
++ if (hi->pm_qos_req) {
++ pm_qos_remove_request(hi->pm_qos_req);
++ hi->pm_qos_req = 0;
++ }
++
++ spin_lock_bh(&hi->lock);
++ cs_hsi_free_data(hi);
++ cs_free_cmds(hi);
++ spin_unlock_bh(&hi->lock);
++ kfree(hi);
++}
++
++static int cs_char_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++ struct cs_char *csdata = vma->vm_private_data;
++ struct page *page;
++
++ page = virt_to_page(csdata->mmap_base);
++ get_page(page);
++ vmf->page = page;
++
++ return 0;
++}
++
++static struct vm_operations_struct cs_char_vm_ops = {
++ .fault = cs_char_vma_fault,
++};
++
++static int cs_char_fasync(int fd, struct file *file, int on)
++{
++ struct cs_char *csdata = file->private_data;
++
++ if (fasync_helper(fd, file, on, &csdata->async_queue) >= 0)
++ return 0;
++ else
++ return -EIO;
++}
++
++static unsigned int cs_char_poll(struct file *file, poll_table *wait)
++{
++ struct cs_char *csdata = file->private_data;
++ unsigned int ret = 0;
++
++ poll_wait(file, &cs_char_data.wait, wait);
++ spin_lock_bh(&csdata->lock);
++ if (!list_empty(&csdata->chardev_queue))
++ ret = POLLIN | POLLRDNORM;
++ else if (!list_empty(&csdata->dataind_queue))
++ ret = POLLIN | POLLRDNORM;
++ spin_unlock_bh(&csdata->lock);
++
++ return ret;
++}
++
++static ssize_t cs_char_read(struct file *file, char __user *buf, size_t count,
++ loff_t *unused)
++{
++ struct cs_char *csdata = file->private_data;
++ u32 data;
++ ssize_t retval;
++
++ if (count < sizeof(data))
++ return -EINVAL;
++
++ for ( ; ; ) {
++ DEFINE_WAIT(wait);
++
++ spin_lock_bh(&csdata->lock);
++ if (!list_empty(&csdata->chardev_queue)) {
++ data = cs_pop_entry(&csdata->chardev_queue);
++ } else if (!list_empty(&csdata->dataind_queue)) {
++ data = cs_pop_entry(&csdata->dataind_queue);
++ --csdata->dataind_pending;
++
++ } else {
++ data = 0;
++ }
++ spin_unlock_bh(&csdata->lock);
++
++ if (data)
++ break;
++ if (file->f_flags & O_NONBLOCK) {
++ retval = -EAGAIN;
++ goto out;
++ } else if (signal_pending(current)) {
++ retval = -ERESTARTSYS;
++ goto out;
++ }
++ prepare_to_wait_exclusive(&csdata->wait, &wait,
++ TASK_INTERRUPTIBLE);
++ schedule();
++ finish_wait(&csdata->wait, &wait);
++ }
++
++ retval = put_user(data, (u32 __user *)buf);
++ if (!retval)
++ retval = sizeof(data);
++
++out:
++ return retval;
++}
++
++static ssize_t cs_char_write(struct file *file, const char __user *buf,
++ size_t count, loff_t *unused)
++{
++ struct cs_char *csdata = file->private_data;
++ u32 data;
++ int err;
++ ssize_t retval;
++
++ if (count < sizeof(data))
++ return -EINVAL;
++
++ if (get_user(data, (u32 __user *)buf))
++ retval = -EFAULT;
++ else
++ retval = count;
++
++ err = cs_hsi_command(csdata->hi, data);
++ if (err < 0)
++ retval = err;
++
++ return retval;
++}
++
++static int cs_char_ioctl(struct inode *unused, struct file *file,
++ unsigned int cmd, unsigned long arg)
++{
++ struct cs_char *csdata = file->private_data;
++ int r = 0;
++
++ switch (cmd) {
++ case CS_GET_STATE: {
++ unsigned int state;
++
++ state = cs_hsi_get_state(csdata->hi);
++ if (copy_to_user((void __user *)arg, &state, sizeof(state)))
++ r = -EFAULT;
++ }
++ break;
++ case CS_SET_WAKELINE: {
++ unsigned int state;
++
++ if (copy_from_user(&state, (void __user *)arg, sizeof(state)))
++ r = -EFAULT;
++ else
++ cs_hsi_set_wakeline(csdata->hi, state);
++ }
++ break;
++ case CS_GET_IF_VERSION: {
++ unsigned int ifver = CS_IF_VERSION;
++
++ if (copy_to_user((void __user *)arg, &ifver, sizeof(ifver)))
++ r = -EFAULT;
++ break;
++ }
++ case CS_CONFIG_BUFS: {
++ struct cs_buffer_config buf_cfg;
++
++ if (copy_from_user(&buf_cfg, (void __user *)arg,
++ sizeof(buf_cfg)))
++ r = -EFAULT;
++ else
++ r = cs_hsi_buf_config(csdata->hi, &buf_cfg);
++ break;
++ }
++ default:
++ r = -ENOTTY;
++ break;
++ }
++
++ return r;
++}
++
++static int cs_char_mmap(struct file *file, struct vm_area_struct *vma)
++{
++ if (vma->vm_end < vma->vm_start)
++ return -EINVAL;
++
++ if (((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) != 1)
++ return -EINVAL;
++
++ vma->vm_flags |= VM_RESERVED;
++ vma->vm_ops = &cs_char_vm_ops;
++ vma->vm_private_data = file->private_data;
++
++ return 0;
++}
++
++static int cs_char_open(struct inode *unused, struct file *file)
++{
++ int ret = 0;
++
++ spin_lock_bh(&cs_char_data.lock);
++ if (cs_char_data.opened) {
++ ret = -EBUSY;
++ spin_unlock_bh(&cs_char_data.lock);
++ goto out;
++ }
++ cs_char_data.mmap_base = get_zeroed_page(GFP_ATOMIC);
++ if (!cs_char_data.mmap_base) {
++ dev_err(&cs_char_data.cl->device,
++ "Shared memory allocation failed.\n");
++ ret = -ENOMEM;
++ spin_unlock_bh(&cs_char_data.lock);
++ goto out;
++ }
++ cs_char_data.mmap_size = CS_MMAP_SIZE;
++ cs_char_data.dataind_pending = 0;
++ cs_char_data.opened = 1;
++ file->private_data = &cs_char_data;
++ spin_unlock_bh(&cs_char_data.lock);
++
++ BUG_ON(cs_char_data.hi);
++
++ ret = cs_hsi_start(&cs_char_data.hi, cs_char_data.cl,
++ cs_char_data.mmap_base, cs_char_data.mmap_size);
++ if (ret) {
++ dev_err(&cs_char_data.cl->device, "Unable to initialize HSI\n");
++ goto out;
++ }
++
++out:
++ return ret;
++}
++
++static void cs_free_char_queue(struct list_head *head)
++{
++ struct char_queue *entry;
++ struct list_head *cursor, *next;
++
++ if (!list_empty(head)) {
++ list_for_each_safe(cursor, next, head) {
++ entry = list_entry(cursor, struct char_queue, list);
++ list_del(&entry->list);
++ kfree(entry);
++ }
++ }
++
++}
++
++static int cs_char_release(struct inode *unused, struct file *file)
++{
++ struct cs_char *csdata = file->private_data;
++ struct cs_hsi_iface *hi;
++
++ spin_lock_bh(&csdata->lock);
++ hi = csdata->hi;
++ csdata->hi = NULL;
++ free_page(csdata->mmap_base);
++ csdata->opened = 0;
++ cs_free_char_queue(&csdata->chardev_queue);
++ cs_free_char_queue(&csdata->dataind_queue);
++ spin_unlock_bh(&csdata->lock);
++
++ if (hi)
++ cs_hsi_stop(hi);
++
++ return 0;
++}
++
++static const struct file_operations cs_char_fops = {
++ .owner = THIS_MODULE,
++ .read = cs_char_read,
++ .write = cs_char_write,
++ .poll = cs_char_poll,
++ .ioctl = cs_char_ioctl,
++ .mmap = cs_char_mmap,
++ .open = cs_char_open,
++ .release = cs_char_release,
++ .fasync = cs_char_fasync,
++};
++
++static struct miscdevice cs_char_miscdev = {
++ .minor = MISC_DYNAMIC_MINOR,
++ .name = DRIVER_NAME,
++ .fops = &cs_char_fops
++};
++
++static int __init cs_hsi_client_probe(struct device *dev)
++{
++ int err = 0;
++ struct hsi_client *cl = to_hsi_client(dev);
++
++ dev_dbg(dev, "hsi_client_probe\n");
++ init_waitqueue_head(&cs_char_data.wait);
++ init_waitqueue_head(&cs_char_data.datawait);
++ spin_lock_init(&cs_char_data.lock);
++ cs_char_data.opened = 0;
++ cs_char_data.cl = cl;
++ cs_char_data.hi = NULL;
++ INIT_LIST_HEAD(&cs_char_data.chardev_queue);
++ INIT_LIST_HEAD(&cs_char_data.dataind_queue);
++
++ err = misc_register(&cs_char_miscdev);
++ if (err)
++ dev_err(dev, "Failed to register\n");
++
++ return err;
++}
++
++static int __exit cs_hsi_client_remove(struct device *dev)
++{
++ struct cs_hsi_iface *hi;
++
++ dev_dbg(dev, "hsi_client_remove\n");
++ misc_deregister(&cs_char_miscdev);
++ spin_lock_bh(&cs_char_data.lock);
++ hi = cs_char_data.hi;
++ cs_char_data.hi = NULL;
++ spin_unlock_bh(&cs_char_data.lock);
++ if (hi)
++ cs_hsi_stop(hi);
++
++ return 0;
++}
++
++static struct hsi_client_driver cs_hsi_driver = {
++ .driver = {
++ .name = DRIVER_NAME,
++ .owner = THIS_MODULE,
++ .probe = cs_hsi_client_probe,
++ .remove = cs_hsi_client_remove,
++ },
++};
++
++static int __init cs_char_init(void)
++{
++ int err = 0;
++
++ err = hsi_register_client_driver(&cs_hsi_driver);
++ if (err)
++ pr_err(DRIVER_NAME ": Error when registering driver %d\n", err);
++
++ return err;
++}
++module_init(cs_char_init);
++
++static void __exit cs_char_exit(void)
++{
++ hsi_unregister_client_driver(&cs_hsi_driver);
++}
++module_exit(cs_char_exit);
++
++MODULE_ALIAS("hsi:cmt_speech");
++MODULE_AUTHOR("Kai Vehmanen <kai.vehmanen@nokia.com>");
++MODULE_AUTHOR("Peter Ujfalusi <peter.ujfalusi@nokia.com>");
++MODULE_DESCRIPTION("CMT speech driver");
++MODULE_LICENSE("GPL");
+--- /dev/null
++++ b/drivers/hsi/clients/hsi_char.c
+@@ -0,0 +1,1053 @@
++/*
++ * hsi-char.c
++ *
++ * HSI character device driver, implements the character device
++ * interface.
++ *
++ * Copyright (C) 2010 Nokia Corporation. All rights reserved.
++ *
++ * Contact: Andras Domokos <andras.domokos@nokia.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ */
++
++#include <linux/errno.h>
++#include <linux/types.h>
++#include <asm/atomic.h>
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/list.h>
++#include <linux/slab.h>
++#include <linux/poll.h>
++#include <linux/ioctl.h>
++#include <linux/wait.h>
++#include <linux/fs.h>
++#include <linux/sched.h>
++#include <linux/device.h>
++#include <linux/cdev.h>
++#include <linux/uaccess.h>
++#include <linux/scatterlist.h>
++#include <linux/hsi/hsi.h>
++#include <linux/hsi/hsi_char.h>
++
++#define HSI_CHAR_CHANNELS 8
++#define HSI_CHAR_DEVS 8
++#define HSI_CHAR_MSGS 4
++
++#define HSI_CHST_UNAVAIL 0 /* SBZ! */
++#define HSI_CHST_AVAIL 1
++
++#define HSI_CHST_CLOSED (0 << 4)
++#define HSI_CHST_CLOSING (1 << 4)
++#define HSI_CHST_OPENING (2 << 4)
++#define HSI_CHST_OPENED (3 << 4)
++
++#define HSI_CHST_READOFF (0 << 8)
++#define HSI_CHST_READON (1 << 8)
++#define HSI_CHST_READING (2 << 8)
++
++#define HSI_CHST_WRITEOFF (0 << 12)
++#define HSI_CHST_WRITEON (1 << 12)
++#define HSI_CHST_WRITING (2 << 12)
++
++#define HSI_CHST_OC_MASK 0xf0
++#define HSI_CHST_RD_MASK 0xf00
++#define HSI_CHST_WR_MASK 0xf000
++
++#define HSI_CHST_OC(c) ((c)->state & HSI_CHST_OC_MASK)
++#define HSI_CHST_RD(c) ((c)->state & HSI_CHST_RD_MASK)
++#define HSI_CHST_WR(c) ((c)->state & HSI_CHST_WR_MASK)
++
++#define HSI_CHST_OC_SET(c, v) \
++ do { \
++ (c)->state &= ~HSI_CHST_OC_MASK; \
++ (c)->state |= v; \
++ } while (0);
++
++#define HSI_CHST_RD_SET(c, v) \
++ do { \
++ (c)->state &= ~HSI_CHST_RD_MASK; \
++ (c)->state |= v; \
++ } while (0);
++
++#define HSI_CHST_WR_SET(c, v) \
++ do { \
++ (c)->state &= ~HSI_CHST_WR_MASK; \
++ (c)->state |= v; \
++ } while (0);
++
++#define HSI_CHAR_POLL_RST (-1)
++#define HSI_CHAR_POLL_OFF 0
++#define HSI_CHAR_POLL_ON 1
++
++#define HSI_CHAR_RX 0
++#define HSI_CHAR_TX 1
++
++struct hsi_char_channel {
++ int ch;
++ unsigned int state;
++ int wlrefcnt;
++ int rxpoll;
++ struct hsi_client *cl;
++ struct list_head free_msgs_list;
++ struct list_head rx_msgs_queue;
++ struct list_head tx_msgs_queue;
++ int poll_event;
++ spinlock_t lock;
++ struct fasync_struct *async_queue;
++ wait_queue_head_t rx_wait;
++ wait_queue_head_t tx_wait;
++};
++
++struct hsi_char_client_data {
++ atomic_t refcnt;
++ int attached;
++ atomic_t breq;
++ struct hsi_char_channel channels[HSI_CHAR_DEVS];
++};
++
++static unsigned int max_data_size = 0x1000;
++module_param(max_data_size, uint, 1);
++MODULE_PARM_DESC(max_data_size, "max read/write data size [4,8..65536] (^2)");
++
++static int channels_map[HSI_CHAR_DEVS] = {0, -1, -1 , -1, -1, -1, -1, -1};
++module_param_array(channels_map, int, NULL, 0);
++MODULE_PARM_DESC(channels_map, "Array of HSI channels ([0...7]) to be probed");
++
++static dev_t hsi_char_dev;
++static struct hsi_char_client_data hsi_char_cl_data;
++
++static int hsi_char_rx_poll(struct hsi_char_channel *channel);
++
++static int __devinit hsi_char_probe(struct device *dev)
++{
++ struct hsi_char_client_data *cl_data = &hsi_char_cl_data;
++ struct hsi_char_channel *channel = cl_data->channels;
++ struct hsi_client *cl = to_hsi_client(dev);
++ int i;
++
++ for (i = 0; i < HSI_CHAR_DEVS; i++) {
++ if (channel->state == HSI_CHST_AVAIL)
++ channel->cl = cl;
++ channel++;
++ }
++ cl->hsi_start_rx = NULL;
++ cl->hsi_stop_rx = NULL;
++ atomic_set(&cl_data->refcnt, 0);
++ atomic_set(&cl_data->breq, 1);
++ cl_data->attached = 0;
++ hsi_client_set_drvdata(cl, cl_data);
++
++ return 0;
++}
++
++static int __devexit hsi_char_remove(struct device *dev)
++{
++ struct hsi_client *cl = to_hsi_client(dev);
++ struct hsi_char_client_data *cl_data = hsi_client_drvdata(cl);
++ struct hsi_char_channel *channel = cl_data->channels;
++ int i;
++
++ for (i = 0; i < HSI_CHAR_DEVS; i++) {
++ if (!(channel->state & HSI_CHST_AVAIL))
++ continue;
++ if (cl_data->attached) {
++ hsi_release_port(channel->cl);
++ cl_data->attached = 0;
++ }
++ channel->state = HSI_CHST_UNAVAIL;
++ channel->cl = NULL;
++ channel++;
++ }
++
++ return 0;
++}
++
++static int hsi_char_fasync(int fd, struct file *file, int on)
++{
++ struct hsi_char_channel *channel = file->private_data;
++
++ if (fasync_helper(fd, file, on, &channel->async_queue) < 0)
++ return -EIO;
++
++ return 0;
++}
++
++static unsigned int hsi_char_poll(struct file *file, poll_table *wait)
++{
++ struct hsi_char_channel *channel = file->private_data;
++ unsigned int ret;
++
++ spin_lock_bh(&channel->lock);
++ poll_wait(file, &channel->rx_wait, wait);
++ poll_wait(file, &channel->tx_wait, wait);
++ ret = channel->poll_event;
++ spin_unlock_bh(&channel->lock);
++ hsi_char_rx_poll(channel);
++
++ return ret;
++}
++
++static inline void hsi_char_msg_len_set(struct hsi_msg *msg, unsigned int len)
++{
++ msg->sgt.sgl->length = len;
++}
++
++static inline unsigned int hsi_char_msg_len_get(struct hsi_msg *msg)
++{
++ return msg->sgt.sgl->length;
++}
++
++static void hsi_char_data_available(struct hsi_msg *msg)
++{
++ struct hsi_char_client_data *cl_data = hsi_client_drvdata(msg->cl);
++ struct hsi_char_channel *channel = cl_data->channels + msg->channel;
++ int ret;
++
++ if (msg->status == HSI_STATUS_ERROR) {
++ ret = hsi_async_read(channel->cl, msg);
++ if (ret < 0) {
++ list_add_tail(&msg->link, &channel->free_msgs_list);
++ spin_lock_bh(&channel->lock);
++ list_add_tail(&msg->link, &channel->free_msgs_list);
++ channel->rxpoll = HSI_CHAR_POLL_OFF;
++ spin_unlock_bh(&channel->lock);
++ }
++ } else {
++ spin_lock_bh(&channel->lock);
++ channel->rxpoll = HSI_CHAR_POLL_OFF;
++ channel->poll_event |= (POLLIN | POLLRDNORM);
++ spin_unlock_bh(&channel->lock);
++ spin_lock_bh(&channel->lock);
++ list_add_tail(&msg->link, &channel->free_msgs_list);
++ spin_unlock_bh(&channel->lock);
++ wake_up_interruptible(&channel->rx_wait);
++ }
++}
++
++static void hsi_char_rx_poll_destructor(struct hsi_msg *msg)
++{
++ struct hsi_char_client_data *cl_data = hsi_client_drvdata(msg->cl);
++ struct hsi_char_channel *channel = cl_data->channels + msg->channel;
++
++ spin_lock_bh(&channel->lock);
++ list_add_tail(&msg->link, &channel->free_msgs_list);
++ channel->rxpoll = HSI_CHAR_POLL_RST;
++ spin_unlock_bh(&channel->lock);
++}
++
++static int hsi_char_rx_poll(struct hsi_char_channel *channel)
++{
++ struct hsi_msg *msg;
++ int ret = 0;
++
++ spin_lock_bh(&channel->lock);
++ if (list_empty(&channel->free_msgs_list)) {
++ ret = -ENOMEM;
++ goto out;
++ }
++ if (channel->rxpoll == HSI_CHAR_POLL_ON)
++ goto out;
++ msg = list_first_entry(&channel->free_msgs_list, struct hsi_msg, link);
++ list_del(&msg->link);
++ channel->rxpoll = HSI_CHAR_POLL_ON;
++ spin_unlock_bh(&channel->lock);
++ hsi_char_msg_len_set(msg, 0);
++ msg->complete = hsi_char_data_available;
++ msg->destructor = hsi_char_rx_poll_destructor;
++ /* don't touch msg->context! */
++ ret = hsi_async_read(channel->cl, msg);
++ spin_lock_bh(&channel->lock);
++ if (ret < 0) {
++ list_add_tail(&msg->link, &channel->free_msgs_list);
++ channel->rxpoll = HSI_CHAR_POLL_OFF;
++ goto out;
++ }
++out:
++ spin_unlock_bh(&channel->lock);
++
++ return ret;
++}
++
++static void hsi_char_rx_poll_rst(struct hsi_client *cl)
++{
++ struct hsi_char_client_data *cl_data = hsi_client_drvdata(cl);
++ struct hsi_char_channel *channel = cl_data->channels;
++ int i;
++
++ for (i = 0; i < HSI_CHAR_DEVS; i++) {
++ if ((HSI_CHST_OC(channel) == HSI_CHST_OPENED) &&
++ (channel->rxpoll == HSI_CHAR_POLL_RST))
++ hsi_char_rx_poll(channel);
++ channel++;
++ }
++}
++
++static void hsi_char_rx_completed(struct hsi_msg *msg)
++{
++ struct hsi_char_client_data *cl_data = hsi_client_drvdata(msg->cl);
++ struct hsi_char_channel *channel = cl_data->channels + msg->channel;
++
++ spin_lock_bh(&channel->lock);
++ list_add_tail(&msg->link, &channel->rx_msgs_queue);
++ spin_unlock_bh(&channel->lock);
++ wake_up_interruptible(&channel->rx_wait);
++}
++
++static void hsi_char_rx_msg_destructor(struct hsi_msg *msg)
++{
++ struct hsi_char_client_data *cl_data = hsi_client_drvdata(msg->cl);
++ struct hsi_char_channel *channel = cl_data->channels + msg->channel;
++
++ spin_lock_bh(&channel->lock);
++ list_add_tail(&msg->link, &channel->free_msgs_list);
++ HSI_CHST_RD_SET(channel, HSI_CHST_READOFF);
++ spin_unlock_bh(&channel->lock);
++}
++
++static void hsi_char_rx_cancel(struct hsi_char_channel *channel)
++{
++ hsi_flush(channel->cl);
++ hsi_char_rx_poll_rst(channel->cl);
++}
++
++static void hsi_char_tx_completed(struct hsi_msg *msg)
++{
++ struct hsi_char_client_data *cl_data = hsi_client_drvdata(msg->cl);
++ struct hsi_char_channel *channel = cl_data->channels + msg->channel;
++
++ spin_lock_bh(&channel->lock);
++ list_add_tail(&msg->link, &channel->tx_msgs_queue);
++ channel->poll_event |= (POLLOUT | POLLWRNORM);
++ spin_unlock_bh(&channel->lock);
++ wake_up_interruptible(&channel->tx_wait);
++}
++
++static void hsi_char_tx_msg_destructor(struct hsi_msg *msg)
++{
++ struct hsi_char_client_data *cl_data = hsi_client_drvdata(msg->cl);
++ struct hsi_char_channel *channel = cl_data->channels + msg->channel;
++
++ spin_lock_bh(&channel->lock);
++ list_add_tail(&msg->link, &channel->free_msgs_list);
++ HSI_CHST_WR_SET(channel, HSI_CHST_WRITEOFF);
++ spin_unlock_bh(&channel->lock);
++}
++
++static void hsi_char_tx_cancel(struct hsi_char_channel *channel)
++{
++ hsi_flush(channel->cl);
++ hsi_char_rx_poll_rst(channel->cl);
++}
++
++static ssize_t hsi_char_read(struct file *file, char __user *buf,
++ size_t len, loff_t *ppos)
++{
++ struct hsi_char_channel *channel = file->private_data;
++ struct hsi_msg *msg = NULL;
++ ssize_t ret;
++
++ if (len == 0) {
++ channel->poll_event &= ~POLLPRI;
++ return 0;
++ }
++ channel->poll_event &= ~POLLPRI;
++
++ if (!IS_ALIGNED(len, sizeof(u32)))
++ return -EINVAL;
++
++ if (len > max_data_size)
++ len = max_data_size;
++
++ spin_lock_bh(&channel->lock);
++ if (HSI_CHST_RD(channel) != HSI_CHST_READOFF) {
++ ret = -EBUSY;
++ goto out;
++ }
++ if (list_empty(&channel->free_msgs_list)) {
++ ret = -ENOMEM;
++ goto out;
++ }
++ msg = list_first_entry(&channel->free_msgs_list, struct hsi_msg, link);
++ list_del(&msg->link);
++ spin_unlock_bh(&channel->lock);
++ hsi_char_msg_len_set(msg, len);
++ msg->complete = hsi_char_rx_completed;
++ msg->destructor = hsi_char_rx_msg_destructor;
++ ret = hsi_async_read(channel->cl, msg);
++ spin_lock_bh(&channel->lock);
++ if (ret < 0)
++ goto out;
++ HSI_CHST_RD_SET(channel, HSI_CHST_READING);
++ msg = NULL;
++
++ for ( ; ; ) {
++ DEFINE_WAIT(wait);
++
++ if (!list_empty(&channel->rx_msgs_queue)) {
++ msg = list_first_entry(&channel->rx_msgs_queue,
++ struct hsi_msg, link);
++ HSI_CHST_RD_SET(channel, HSI_CHST_READOFF);
++ channel->poll_event &= ~(POLLIN | POLLRDNORM);
++ list_del(&msg->link);
++ spin_unlock_bh(&channel->lock);
++ if (msg->status == HSI_STATUS_ERROR) {
++ ret = -EIO;
++ } else {
++ ret = copy_to_user((void __user *)buf,
++ msg->context,
++ hsi_char_msg_len_get(msg));
++ if (ret)
++ ret = -EFAULT;
++ else
++ ret = hsi_char_msg_len_get(msg);
++ }
++ spin_lock_bh(&channel->lock);
++ break;
++ } else if (signal_pending(current)) {
++ spin_unlock_bh(&channel->lock);
++ hsi_char_rx_cancel(channel);
++ spin_lock_bh(&channel->lock);
++ HSI_CHST_RD_SET(channel, HSI_CHST_READOFF);
++ ret = -EINTR;
++ break;
++ }
++
++ prepare_to_wait(&channel->rx_wait, &wait, TASK_INTERRUPTIBLE);
++ spin_unlock_bh(&channel->lock);
++
++ schedule();
++
++ spin_lock_bh(&channel->lock);
++ finish_wait(&channel->rx_wait, &wait);
++ }
++out:
++ if (msg)
++ list_add_tail(&msg->link, &channel->free_msgs_list);
++ spin_unlock_bh(&channel->lock);
++
++ return ret;
++}
++
++static ssize_t hsi_char_write(struct file *file, const char __user *buf,
++ size_t len, loff_t *ppos)
++{
++ struct hsi_char_channel *channel = file->private_data;
++ struct hsi_msg *msg = NULL;
++ ssize_t ret;
++
++ if ((len == 0) || !IS_ALIGNED(len, sizeof(u32)))
++ return -EINVAL;
++
++ if (len > max_data_size)
++ len = max_data_size;
++
++ spin_lock_bh(&channel->lock);
++ if (HSI_CHST_WR(channel) != HSI_CHST_WRITEOFF) {
++ ret = -EBUSY;
++ goto out;
++ }
++ if (list_empty(&channel->free_msgs_list)) {
++ ret = -ENOMEM;
++ goto out;
++ }
++ msg = list_first_entry(&channel->free_msgs_list, struct hsi_msg, link);
++ list_del(&msg->link);
++ HSI_CHST_WR_SET(channel, HSI_CHST_WRITEON);
++ spin_unlock_bh(&channel->lock);
++
++ if (copy_from_user(msg->context, (void __user *)buf, len)) {
++ spin_lock_bh(&channel->lock);
++ HSI_CHST_WR_SET(channel, HSI_CHST_WRITEOFF);
++ ret = -EFAULT;
++ goto out;
++ }
++
++ hsi_char_msg_len_set(msg, len);
++ msg->complete = hsi_char_tx_completed;
++ msg->destructor = hsi_char_tx_msg_destructor;
++ channel->poll_event &= ~(POLLOUT | POLLWRNORM);
++ ret = hsi_async_write(channel->cl, msg);
++ spin_lock_bh(&channel->lock);
++ if (ret < 0) {
++ channel->poll_event |= (POLLOUT | POLLWRNORM);
++ HSI_CHST_WR_SET(channel, HSI_CHST_WRITEOFF);
++ goto out;
++ }
++ HSI_CHST_WR_SET(channel, HSI_CHST_WRITING);
++ msg = NULL;
++
++ for ( ; ; ) {
++ DEFINE_WAIT(wait);
++
++ if (!list_empty(&channel->tx_msgs_queue)) {
++ msg = list_first_entry(&channel->tx_msgs_queue,
++ struct hsi_msg, link);
++ list_del(&msg->link);
++ HSI_CHST_WR_SET(channel, HSI_CHST_WRITEOFF);
++ if (msg->status == HSI_STATUS_ERROR)
++ ret = -EIO;
++ else
++ ret = hsi_char_msg_len_get(msg);
++ break;
++ } else if (signal_pending(current)) {
++ spin_unlock_bh(&channel->lock);
++ hsi_char_tx_cancel(channel);
++ spin_lock_bh(&channel->lock);
++ HSI_CHST_WR_SET(channel, HSI_CHST_WRITEOFF);
++ ret = -EINTR;
++ break;
++ }
++ prepare_to_wait(&channel->tx_wait, &wait, TASK_INTERRUPTIBLE);
++ spin_unlock_bh(&channel->lock);
++
++ schedule();
++
++ spin_lock_bh(&channel->lock);
++ finish_wait(&channel->tx_wait, &wait);
++ }
++out:
++ if (msg)
++ list_add_tail(&msg->link, &channel->free_msgs_list);
++
++ spin_unlock_bh(&channel->lock);
++
++ return ret;
++}
++
++static void hsi_char_bcast_break(struct hsi_client *cl)
++{
++ struct hsi_char_client_data *cl_data = hsi_client_drvdata(cl);
++ struct hsi_char_channel *channel = cl_data->channels;
++ int i;
++
++ for (i = 0; i < HSI_CHAR_DEVS; i++) {
++ if (HSI_CHST_OC(channel) != HSI_CHST_OPENED)
++ continue;
++ channel->poll_event |= POLLPRI;
++ wake_up_interruptible(&channel->rx_wait);
++ wake_up_interruptible(&channel->tx_wait);
++ channel++;
++ }
++}
++
++static void hsi_char_break_received(struct hsi_msg *msg)
++{
++ struct hsi_char_client_data *cl_data = hsi_client_drvdata(msg->cl);
++ int ret;
++
++ hsi_char_bcast_break(msg->cl);
++ ret = hsi_async_read(msg->cl, msg);
++ if (ret < 0) {
++ hsi_free_msg(msg);
++ atomic_inc(&cl_data->breq);
++ }
++}
++
++static void hsi_char_break_req_destructor(struct hsi_msg *msg)
++{
++ struct hsi_char_client_data *cl_data = hsi_client_drvdata(msg->cl);
++
++ hsi_free_msg(msg);
++ atomic_inc(&cl_data->breq);
++}
++
++static int hsi_char_break_request(struct hsi_client *cl)
++{
++ struct hsi_char_client_data *cl_data = hsi_client_drvdata(cl);
++ struct hsi_msg *msg;
++ int ret = 0;
++
++ if (!atomic_dec_and_test(&cl_data->breq)) {
++ atomic_inc(&cl_data->breq);
++ return -EBUSY;
++ }
++ msg = hsi_alloc_msg(0, GFP_KERNEL);
++ if (!msg)
++ return -ENOMEM;
++ msg->break_frame = 1;
++ msg->complete = hsi_char_break_received;
++ msg->destructor = hsi_char_break_req_destructor;
++ ret = hsi_async_read(cl, msg);
++ if (ret < 0)
++ hsi_free_msg(msg);
++
++ return ret;
++}
++
++static int hsi_char_break_send(struct hsi_client *cl)
++{
++ struct hsi_msg *msg;
++ int ret = 0;
++
++ msg = hsi_alloc_msg(0, GFP_ATOMIC);
++ if (!msg)
++ return -ENOMEM;
++ msg->break_frame = 1;
++ msg->complete = hsi_free_msg;
++ msg->destructor = hsi_free_msg;
++ ret = hsi_async_write(cl, msg);
++ if (ret < 0)
++ hsi_free_msg(msg);
++
++ return ret;
++}
++
++static void hsi_char_reset(struct hsi_client *cl)
++{
++ hsi_flush(cl);
++ hsi_char_rx_poll_rst(cl);
++}
++
++static inline int ssi_check_common_cfg(struct hsi_config *cfg)
++{
++ if ((cfg->mode != HSI_MODE_STREAM) && (cfg->mode != HSI_MODE_FRAME))
++ return -EINVAL;
++ if ((cfg->channels == 0) || (cfg->channels > HSI_CHAR_CHANNELS))
++ return -EINVAL;
++ if (cfg->channels & (cfg->channels - 1))
++ return -EINVAL;
++ if ((cfg->flow != HSI_FLOW_SYNC) && (cfg->flow != HSI_FLOW_PIPE))
++ return -EINVAL;
++
++ return 0;
++}
++
++static inline int ssi_check_rx_cfg(struct hsi_config *cfg)
++{
++ return ssi_check_common_cfg(cfg);
++}
++
++static inline int ssi_check_tx_cfg(struct hsi_config *cfg)
++{
++ int ret = ssi_check_common_cfg(cfg);
++
++ if (ret < 0)
++ return ret;
++ if ((cfg->arb_mode != HSI_ARB_RR) && (cfg->arb_mode != HSI_ARB_PRIO))
++ return -EINVAL;
++
++ return 0;
++}
++
++static inline int hsi_char_cfg_set(struct hsi_client *cl,
++ struct hsi_config *cfg, int dir)
++{
++ struct hsi_config *rxtx_cfg;
++ int ret = 0;
++
++ if (dir == HSI_CHAR_RX) {
++ rxtx_cfg = &cl->rx_cfg;
++ ret = ssi_check_rx_cfg(cfg);
++ } else {
++ rxtx_cfg = &cl->tx_cfg;
++ ret = ssi_check_tx_cfg(cfg);
++ }
++ if (ret < 0)
++ return ret;
++
++ *rxtx_cfg = *cfg;
++ ret = hsi_setup(cl);
++ if (ret < 0)
++ return ret;
++
++ if ((dir == HSI_CHAR_RX) && (cfg->mode == HSI_MODE_FRAME))
++ hsi_char_break_request(cl);
++
++ return ret;
++}
++
++static inline void hsi_char_cfg_get(struct hsi_client *cl,
++ struct hsi_config *cfg, int dir)
++{
++ struct hsi_config *rxtx_cfg;
++
++ if (dir == HSI_CHAR_RX)
++ rxtx_cfg = &cl->rx_cfg;
++ else
++ rxtx_cfg = &cl->tx_cfg;
++ *cfg = *rxtx_cfg;
++}
++
++static inline void hsi_char_rx2icfg(struct hsi_config *cfg,
++ struct hsc_rx_config *rx_cfg)
++{
++ cfg->mode = rx_cfg->mode;
++ cfg->flow = rx_cfg->flow;
++ cfg->channels = rx_cfg->channels;
++ cfg->speed = 0;
++ cfg->arb_mode = 0;
++}
++
++static inline void hsi_char_tx2icfg(struct hsi_config *cfg,
++ struct hsc_tx_config *tx_cfg)
++{
++ cfg->mode = tx_cfg->mode;
++ cfg->flow = tx_cfg->flow;
++ cfg->channels = tx_cfg->channels;
++ cfg->speed = tx_cfg->speed;
++ cfg->arb_mode = tx_cfg->arb_mode;
++}
++
++static inline void hsi_char_rx2ecfg(struct hsc_rx_config *rx_cfg,
++ struct hsi_config *cfg)
++{
++ rx_cfg->mode = cfg->mode;
++ rx_cfg->flow = cfg->flow;
++ rx_cfg->channels = cfg->channels;
++}
++
++static inline void hsi_char_tx2ecfg(struct hsc_tx_config *tx_cfg,
++ struct hsi_config *cfg)
++{
++ tx_cfg->mode = cfg->mode;
++ tx_cfg->flow = cfg->flow;
++ tx_cfg->channels = cfg->channels;
++ tx_cfg->speed = cfg->speed;
++ tx_cfg->arb_mode = cfg->arb_mode;
++}
++
++static int hsi_char_ioctl(struct inode *inode, struct file *file,
++ unsigned int cmd, unsigned long arg)
++{
++ struct hsi_char_channel *channel = file->private_data;
++ unsigned int state;
++ struct hsi_config cfg;
++ struct hsc_rx_config rx_cfg;
++ struct hsc_tx_config tx_cfg;
++ int ret = 0;
++
++ if (HSI_CHST_OC(channel) != HSI_CHST_OPENED)
++ return -EINVAL;
++
++ switch (cmd) {
++ case HSC_RESET:
++ hsi_char_reset(channel->cl);
++ break;
++ case HSC_SET_PM:
++ if (copy_from_user(&state, (void __user *)arg, sizeof(state)))
++ return -EFAULT;
++ if (state == HSC_PM_DISABLE) {
++ ret = hsi_start_tx(channel->cl);
++ if (!ret)
++ channel->wlrefcnt++;
++ } else if ((state == HSC_PM_ENABLE)
++ && (channel->wlrefcnt > 0)) {
++ ret = hsi_stop_tx(channel->cl);
++ if (!ret)
++ channel->wlrefcnt--;
++ } else {
++ ret = -EINVAL;
++ }
++ break;
++ case HSC_SEND_BREAK:
++ return hsi_char_break_send(channel->cl);
++ case HSC_SET_RX:
++ if (copy_from_user(&rx_cfg, (void __user *)arg, sizeof(rx_cfg)))
++ return -EFAULT;
++ hsi_char_rx2icfg(&cfg, &rx_cfg);
++ return hsi_char_cfg_set(channel->cl, &cfg, HSI_CHAR_RX);
++ case HSC_GET_RX:
++ hsi_char_cfg_get(channel->cl, &cfg, HSI_CHAR_RX);
++ hsi_char_rx2ecfg(&rx_cfg, &cfg);
++ if (copy_to_user((void __user *)arg, &rx_cfg, sizeof(rx_cfg)))
++ return -EFAULT;
++ break;
++ case HSC_SET_TX:
++ if (copy_from_user(&tx_cfg, (void __user *)arg, sizeof(tx_cfg)))
++ return -EFAULT;
++ hsi_char_tx2icfg(&cfg, &tx_cfg);
++ return hsi_char_cfg_set(channel->cl, &cfg, HSI_CHAR_TX);
++ case HSC_GET_TX:
++ hsi_char_cfg_get(channel->cl, &cfg, HSI_CHAR_TX);
++ hsi_char_tx2ecfg(&tx_cfg, &cfg);
++ if (copy_to_user((void __user *)arg, &tx_cfg, sizeof(tx_cfg)))
++ return -EFAULT;
++ default:
++ return -ENOIOCTLCMD;
++ }
++
++ return ret;
++}
++
++static inline struct hsi_msg *hsi_char_msg_alloc(unsigned int alloc_size)
++{
++ struct hsi_msg *msg;
++ void *buf;
++
++ msg = hsi_alloc_msg(1, GFP_KERNEL);
++ if (!msg)
++ goto out;
++ buf = kmalloc(alloc_size, GFP_KERNEL);
++ if (!buf) {
++ hsi_free_msg(msg);
++ goto out;
++ }
++ sg_init_one(msg->sgt.sgl, buf, alloc_size);
++ msg->context = buf;
++ return msg;
++out:
++ return NULL;
++}
++
++static inline void hsi_char_msg_free(struct hsi_msg *msg)
++{
++ msg->complete = NULL;
++ msg->destructor = NULL;
++ kfree(sg_virt(msg->sgt.sgl));
++ hsi_free_msg(msg);
++}
++
++static inline void hsi_char_msgs_free(struct hsi_char_channel *channel)
++{
++ struct hsi_msg *msg, *tmp;
++
++ list_for_each_entry_safe(msg, tmp, &channel->free_msgs_list, link) {
++ list_del(&msg->link);
++ hsi_char_msg_free(msg);
++ }
++ list_for_each_entry_safe(msg, tmp, &channel->rx_msgs_queue, link) {
++ list_del(&msg->link);
++ hsi_char_msg_free(msg);
++ }
++ list_for_each_entry_safe(msg, tmp, &channel->tx_msgs_queue, link) {
++ list_del(&msg->link);
++ hsi_char_msg_free(msg);
++ }
++}
++
++static inline int hsi_char_msgs_alloc(struct hsi_char_channel *channel)
++{
++ struct hsi_msg *msg;
++ int i;
++
++ for (i = 0; i < HSI_CHAR_MSGS; i++) {
++ msg = hsi_char_msg_alloc(max_data_size);
++ if (!msg)
++ goto out;
++ msg->channel = channel->ch;
++ list_add_tail(&msg->link, &channel->free_msgs_list);
++ }
++ return 0;
++out:
++ hsi_char_msgs_free(channel);
++
++ return -ENOMEM;
++}
++
++static int hsi_char_open(struct inode *inode, struct file *file)
++{
++ struct hsi_char_client_data *cl_data = &hsi_char_cl_data;
++ struct hsi_char_channel *channel = cl_data->channels + iminor(inode);
++ int ret = 0, refcnt;
++
++ if (channel->state == HSI_CHST_UNAVAIL)
++ return -ENODEV;
++
++ spin_lock_bh(&channel->lock);
++ if (HSI_CHST_OC(channel) != HSI_CHST_CLOSED) {
++ ret = -EBUSY;
++ goto out;
++ }
++ HSI_CHST_OC_SET(channel, HSI_CHST_OPENING);
++ spin_unlock_bh(&channel->lock);
++
++ refcnt = atomic_inc_return(&cl_data->refcnt);
++ if (refcnt == 1) {
++ if (cl_data->attached) {
++ atomic_dec(&cl_data->refcnt);
++ spin_lock_bh(&channel->lock);
++ HSI_CHST_OC_SET(channel, HSI_CHST_CLOSED);
++ ret = -EBUSY;
++ goto out;
++ }
++ ret = hsi_claim_port(channel->cl, 0);
++ if (ret < 0) {
++ atomic_dec(&cl_data->refcnt);
++ spin_lock_bh(&channel->lock);
++ HSI_CHST_OC_SET(channel, HSI_CHST_CLOSED);
++ goto out;
++ }
++ hsi_setup(channel->cl);
++ } else if (!cl_data->attached) {
++ atomic_dec(&cl_data->refcnt);
++ spin_lock_bh(&channel->lock);
++ HSI_CHST_OC_SET(channel, HSI_CHST_CLOSED);
++ ret = -ENODEV;
++ goto out;
++ }
++ ret = hsi_char_msgs_alloc(channel);
++
++ if (ret < 0) {
++ refcnt = atomic_dec_return(&cl_data->refcnt);
++ if (!refcnt)
++ hsi_release_port(channel->cl);
++ spin_lock_bh(&channel->lock);
++ HSI_CHST_OC_SET(channel, HSI_CHST_CLOSED);
++ goto out;
++ }
++ if (refcnt == 1)
++ cl_data->attached = 1;
++ channel->wlrefcnt = 0;
++ channel->rxpoll = HSI_CHAR_POLL_OFF;
++ channel->poll_event = (POLLOUT | POLLWRNORM);
++ file->private_data = channel;
++ spin_lock_bh(&channel->lock);
++ HSI_CHST_OC_SET(channel, HSI_CHST_OPENED);
++out:
++ spin_unlock_bh(&channel->lock);
++
++ return ret;
++}
++
++static int hsi_char_release(struct inode *inode, struct file *file)
++{
++ struct hsi_char_channel *channel = file->private_data;
++ struct hsi_char_client_data *cl_data = hsi_client_drvdata(channel->cl);
++ int ret = 0, refcnt;
++
++ spin_lock_bh(&channel->lock);
++ if (HSI_CHST_OC(channel) != HSI_CHST_OPENED)
++ goto out;
++ HSI_CHST_OC_SET(channel, HSI_CHST_CLOSING);
++ spin_unlock_bh(&channel->lock);
++
++ hsi_flush(channel->cl);
++ while (channel->wlrefcnt > 0) {
++ hsi_stop_tx(channel->cl);
++ channel->wlrefcnt--;
++ }
++
++ refcnt = atomic_dec_return(&cl_data->refcnt);
++ if (!refcnt) {
++ hsi_release_port(channel->cl);
++ cl_data->attached = 0;
++ }
++
++ hsi_char_msgs_free(channel);
++
++ spin_lock_bh(&channel->lock);
++ HSI_CHST_OC_SET(channel, HSI_CHST_CLOSED);
++ HSI_CHST_RD_SET(channel, HSI_CHST_READOFF);
++ HSI_CHST_WR_SET(channel, HSI_CHST_WRITEOFF);
++out:
++ spin_unlock_bh(&channel->lock);
++
++ return ret;
++}
++
++static const struct file_operations hsi_char_fops = {
++ .owner = THIS_MODULE,
++ .read = hsi_char_read,
++ .write = hsi_char_write,
++ .poll = hsi_char_poll,
++ .ioctl = hsi_char_ioctl,
++ .open = hsi_char_open,
++ .release = hsi_char_release,
++ .fasync = hsi_char_fasync,
++};
++
++static struct hsi_client_driver hsi_char_driver = {
++ .driver = {
++ .name = "hsi_char",
++ .owner = THIS_MODULE,
++ .probe = hsi_char_probe,
++ .remove = hsi_char_remove,
++ },
++};
++
++static inline void hsi_char_channel_init(struct hsi_char_channel *channel)
++{
++ channel->state = HSI_CHST_AVAIL;
++ INIT_LIST_HEAD(&channel->free_msgs_list);
++ init_waitqueue_head(&channel->rx_wait);
++ init_waitqueue_head(&channel->tx_wait);
++ spin_lock_init(&channel->lock);
++ INIT_LIST_HEAD(&channel->rx_msgs_queue);
++ INIT_LIST_HEAD(&channel->tx_msgs_queue);
++}
++
++static struct cdev hsi_char_cdev;
++
++static int __init hsi_char_init(void)
++{
++ char devname[] = "hsi_char";
++ struct hsi_char_client_data *cl_data = &hsi_char_cl_data;
++ struct hsi_char_channel *channel = cl_data->channels;
++ unsigned long ch_mask = 0;
++ int ret, i;
++
++ if ((max_data_size < 4) || (max_data_size > 0x10000) ||
++ (max_data_size & (max_data_size - 1))) {
++ pr_err("Invalid max read/write data size");
++ return -EINVAL;
++ }
++
++ for (i = 0; i < HSI_CHAR_DEVS && channels_map[i] >= 0; i++) {
++ if (channels_map[i] >= HSI_CHAR_DEVS) {
++ pr_err("Invalid HSI/SSI channel specified");
++ return -EINVAL;
++ }
++ set_bit(channels_map[i], &ch_mask);
++ }
++
++ if (i == 0) {
++ pr_err("No HSI channels available");
++ return -EINVAL;
++ }
++
++ memset(cl_data->channels, 0, sizeof(cl_data->channels));
++ for (i = 0; i < HSI_CHAR_DEVS; i++) {
++ channel->ch = i;
++ channel->state = HSI_CHST_UNAVAIL;
++ if (test_bit(i, &ch_mask))
++ hsi_char_channel_init(channel);
++ channel++;
++ }
++
++ ret = hsi_register_client_driver(&hsi_char_driver);
++ if (ret) {
++ pr_err("Error while registering HSI/SSI driver %d", ret);
++ return ret;
++ }
++
++ ret = alloc_chrdev_region(&hsi_char_dev, 0, HSI_CHAR_DEVS, devname);
++ if (ret < 0) {
++ hsi_unregister_client_driver(&hsi_char_driver);
++ return ret;
++ }
++
++ cdev_init(&hsi_char_cdev, &hsi_char_fops);
++ cdev_add(&hsi_char_cdev, hsi_char_dev, HSI_CHAR_DEVS);
++ pr_info("HSI/SSI char device loaded\n");
++
++ return 0;
++}
++module_init(hsi_char_init);
++
++static void __exit hsi_char_exit(void)
++{
++ cdev_del(&hsi_char_cdev);
++ unregister_chrdev_region(hsi_char_dev, HSI_CHAR_DEVS);
++ hsi_unregister_client_driver(&hsi_char_driver);
++ pr_info("HSI char device removed\n");
++}
++module_exit(hsi_char_exit);
++
++MODULE_AUTHOR("Andras Domokos <andras.domokos@nokia.com>");
++MODULE_ALIAS("hsi:hsi_char");
++MODULE_DESCRIPTION("HSI character device");
++MODULE_LICENSE("GPL");
+--- /dev/null
++++ b/drivers/hsi/clients/ssi_protocol.c
+@@ -0,0 +1,1153 @@
++/*
++ * ssi_protocol.c
++ *
++ * Implementation of the SSI McSAAB improved protocol.
++ *
++ * Copyright (C) 2010 Nokia Corporation. All rights reserved.
++ *
++ * Contact: Carlos Chinea <carlos.chinea@nokia.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ */
++
++#include <asm/atomic.h>
++#include <linux/clk.h>
++#include <linux/cmt.h>
++#include <linux/device.h>
++#include <linux/err.h>
++#include <linux/gpio.h>
++#include <linux/if_ether.h>
++#include <linux/if_arp.h>
++#include <linux/if_phonet.h>
++#include <linux/init.h>
++#include <linux/irq.h>
++#include <linux/list.h>
++#include <linux/module.h>
++#include <linux/netdevice.h>
++#include <linux/notifier.h>
++#include <linux/scatterlist.h>
++#include <linux/skbuff.h>
++#include <linux/slab.h>
++#include <linux/spinlock.h>
++#include <linux/timer.h>
++#include <linux/hsi/hsi.h>
++#include <linux/hsi/ssip_slave.h>
++#include <linux/hsi/omap_ssi_hack.h>
++
++#define SSIP_TXQUEUE_LEN 100
++#define SSIP_MAX_MTU 65535
++#define SSIP_DEFAULT_MTU 4000
++#define PN_MEDIA_SOS 21
++#define SSIP_MIN_PN_HDR 6 /* FIXME: Revisit */
++#define SSIP_WDTOUT 2000 /* FIXME: has to be 500 msecs */
++#define SSIP_KATOUT 15 /* 15 msecs */
++#define SSIP_MAX_CMDS 5 /* Number of pre-allocated commands buffers */
++#define SSIP_BYTES_TO_FRAMES(x) ((((x) - 1) >> 2) + 1)
++#define SSIP_CMT_LOADER_SYNC 0x11223344
++/*
++ * SSI protocol command definitions
++ */
++#define SSIP_COMMAND(data) ((data) >> 28)
++#define SSIP_PAYLOAD(data) ((data) & 0xfffffff)
++/* Commands */
++#define SSIP_SW_BREAK 0
++#define SSIP_BOOTINFO_REQ 1
++#define SSIP_BOOTINFO_RESP 2
++#define SSIP_WAKETEST_RESULT 3
++#define SSIP_START_TRANS 4
++#define SSIP_READY 5
++/* Payloads */
++#define SSIP_DATA_VERSION(data) ((data) & 0xff)
++#define SSIP_LOCAL_VERID 1
++#define SSIP_WAKETEST_OK 0
++#define SSIP_WAKETEST_FAILED 1
++#define SSIP_PDU_LENGTH(data) (((data) >> 8) & 0xffff)
++#define SSIP_MSG_ID(data) ((data) & 0xff)
++/* Generic Command */
++#define SSIP_CMD(cmd, payload) (((cmd) << 28) | ((payload) & 0xfffffff))
++/* Commands for the control channel */
++#define SSIP_BOOTINFO_REQ_CMD(ver) \
++ SSIP_CMD(SSIP_BOOTINFO_REQ, SSIP_DATA_VERSION(ver))
++#define SSIP_BOOTINFO_RESP_CMD(ver) \
++ SSIP_CMD(SSIP_BOOTINFO_RESP, SSIP_DATA_VERSION(ver))
++#define SSIP_START_TRANS_CMD(pdulen, id) \
++ SSIP_CMD(SSIP_START_TRANS, (((pdulen) << 8) | SSIP_MSG_ID(id)))
++#define SSIP_READY_CMD SSIP_CMD(SSIP_READY, 0)
++#define SSIP_SWBREAK_CMD SSIP_CMD(SSIP_SW_BREAK, 0)
++
++/* Main state machine states */
++enum {
++ INIT,
++ HANDSHAKE,
++ ACTIVE,
++};
++
++/* Send state machine states */
++enum {
++ SEND_IDLE,
++ WAIT4READY,
++ SEND_READY,
++ SENDING,
++ SENDING_SWBREAK,
++};
++
++/* Receive state machine states */
++enum {
++ RECV_IDLE,
++ RECV_READY,
++ RECEIVING,
++};
++
++/**
++ * struct ssi_protocol - SSI protocol (McSAAB) data
++ * @main_state: Main state machine
++ * @send_state: TX state machine
++ * @recv_state: RX state machine
++ * @waketest: Flag to follow wake line test
++ * @rxid: RX data id
++ * @txid: TX data id
++ * @txqueue_len: TX queue length
++ * @tx_wd: TX watchdog
++ * @rx_wd: RX watchdog
++ * @keep_alive: Workaround for SSI HW bug
++ * @lock: To serialize access to this struct
++ * @netdev: Phonet network device
++ * @nb: CMT reset notification block
++ * @cmt: Reference to the CMT device
++ * @txqueue: TX data queue
++ * @cmdqueue: Queue of free commands
++ * @cl: HSI client own reference
++ * @link: Link for ssip_list
++ * @tx_usecount: Refcount to keep track the slaves that use the wake line
++ */
++struct ssi_protocol {
++ unsigned int main_state;
++ unsigned int send_state;
++ unsigned int recv_state;
++ unsigned int waketest:1;
++ u8 rxid;
++ u8 txid;
++ unsigned int txqueue_len;
++ struct timer_list tx_wd;
++ struct timer_list rx_wd;
++ struct timer_list keep_alive; /* wake-up workaround */
++ spinlock_t lock;
++ struct net_device *netdev;
++ struct notifier_block nb;
++ struct cmt_device *cmt;
++ struct list_head txqueue;
++ struct list_head cmdqueue;
++ struct hsi_client *cl;
++ struct list_head link;
++ atomic_t tx_usecnt;
++};
++
++/* List of ssi protocol instances */
++static LIST_HEAD(ssip_list);
++
++static void ssip_rxcmd_complete(struct hsi_msg *msg);
++
++static inline void ssip_set_cmd(struct hsi_msg *msg, u32 cmd)
++{
++ u32 *data;
++
++ data = sg_virt(msg->sgt.sgl);
++ *data = cmd;
++}
++
++static inline u32 ssip_get_cmd(struct hsi_msg *msg)
++{
++ u32 *data;
++
++ data = sg_virt(msg->sgt.sgl);
++
++ return *data;
++}
++
++static void ssip_skb_to_msg(struct sk_buff *skb, struct hsi_msg *msg)
++{
++ skb_frag_t *frag;
++ struct scatterlist *sg;
++ int i;
++
++ BUG_ON(msg->sgt.nents != (unsigned int)(skb_shinfo(skb)->nr_frags + 1));
++
++ sg = msg->sgt.sgl;
++ sg_set_buf(sg, skb->data, skb_headlen(skb));
++ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
++ sg = sg_next(sg);
++ BUG_ON(!sg);
++ frag = &skb_shinfo(skb)->frags[i];
++ sg_set_page(sg, frag->page, frag->size, frag->page_offset);
++ }
++}
++
++static void ssip_free_data(struct hsi_msg *msg)
++{
++ struct sk_buff *skb;
++
++ skb = msg->context;
++ pr_debug("free data: msg %p context %p skb %p\n", msg, msg->context,
++ skb);
++ msg->destructor = NULL;
++ dev_kfree_skb(skb);
++ hsi_free_msg(msg);
++}
++
++static struct hsi_msg *ssip_alloc_data(struct sk_buff *skb, gfp_t flags)
++{
++ struct hsi_msg *msg;
++
++ msg = hsi_alloc_msg(skb_shinfo(skb)->nr_frags + 1, flags);
++ if (!msg)
++ return NULL;
++ ssip_skb_to_msg(skb, msg);
++ msg->destructor = ssip_free_data;
++ msg->channel = 3;
++ msg->context = skb;
++
++ return msg;
++}
++
++static inline void ssip_release_cmd(struct hsi_msg *msg)
++{
++ struct ssi_protocol *ssi = hsi_client_drvdata(msg->cl);
++
++ dev_dbg(&msg->cl->device, "Release cmd 0x%08x\n", ssip_get_cmd(msg));
++ spin_lock_bh(&ssi->lock);
++ list_add_tail(&msg->link, &ssi->cmdqueue);
++ spin_unlock_bh(&ssi->lock);
++}
++
++static struct hsi_msg *ssip_claim_cmd(struct ssi_protocol *ssi)
++{
++ struct hsi_msg *msg;
++
++ BUG_ON(list_empty(&ssi->cmdqueue));
++
++ spin_lock_bh(&ssi->lock);
++ msg = list_first_entry(&ssi->cmdqueue, struct hsi_msg, link);
++ list_del(&msg->link);
++ spin_unlock_bh(&ssi->lock);
++ msg->destructor = ssip_release_cmd;
++
++ return msg;
++}
++
++static void ssip_free_cmds(struct ssi_protocol *ssi)
++{
++ struct hsi_msg *msg, *tmp;
++
++ list_for_each_entry_safe(msg, tmp, &ssi->cmdqueue, link) {
++ list_del(&msg->link);
++ msg->destructor = NULL;
++ kfree(sg_virt(msg->sgt.sgl));
++ hsi_free_msg(msg);
++ }
++}
++
++static int ssip_alloc_cmds(struct ssi_protocol *ssi)
++{
++ struct hsi_msg *msg;
++ u32 *buf;
++ unsigned int i;
++
++ for (i = 0; i < SSIP_MAX_CMDS; i++) {
++ msg = hsi_alloc_msg(1, GFP_KERNEL);
++ if (!msg)
++ goto out;
++ buf = kmalloc(sizeof(*buf), GFP_KERNEL);
++ if (!buf) {
++ hsi_free_msg(msg);
++ goto out;
++ }
++ sg_init_one(msg->sgt.sgl, buf, sizeof(*buf));
++ msg->channel = 0;
++ list_add_tail(&msg->link, &ssi->cmdqueue);
++ }
++
++ return 0;
++out:
++ ssip_free_cmds(ssi);
++
++ return -ENOMEM;
++}
++
++static void ssip_set_rxstate(struct ssi_protocol *ssi, unsigned int state)
++{
++ ssi->recv_state = state;
++ switch (state) {
++ case RECV_IDLE:
++ del_timer(&ssi->rx_wd);
++ if (ssi->send_state == SEND_IDLE)
++ del_timer(&ssi->keep_alive);
++ break;
++ case RECV_READY:
++ /* CMT speech workaround */
++ if (atomic_read(&ssi->tx_usecnt))
++ break;
++ /* Otherwise fall through */
++ case RECEIVING:
++ mod_timer(&ssi->keep_alive, jiffies +
++ msecs_to_jiffies(SSIP_KATOUT));
++ mod_timer(&ssi->rx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT));
++ break;
++ default:
++ break;
++ }
++}
++
++static void ssip_set_txstate(struct ssi_protocol *ssi, unsigned int state)
++{
++ ssi->send_state = state;
++ switch (state) {
++ case SEND_IDLE:
++ case SEND_READY:
++ del_timer(&ssi->tx_wd);
++ if (ssi->recv_state == RECV_IDLE)
++ del_timer(&ssi->keep_alive);
++ break;
++ case WAIT4READY:
++ case SENDING:
++ case SENDING_SWBREAK:
++ mod_timer(&ssi->keep_alive,
++ jiffies + msecs_to_jiffies(SSIP_KATOUT));
++ mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT));
++ break;
++ default:
++ break;
++ }
++}
++
++struct hsi_client *ssip_slave_get_master(struct hsi_client *slave)
++{
++ struct hsi_client *master = ERR_PTR(-ENODEV);
++ struct ssi_protocol *ssi;
++
++ list_for_each_entry(ssi, &ssip_list, link)
++ if (slave->device.parent == ssi->cl->device.parent) {
++ master = ssi->cl;
++ break;
++ }
++
++ return master;
++}
++EXPORT_SYMBOL_GPL(ssip_slave_get_master);
++
++int ssip_slave_start_tx(struct hsi_client *master)
++{
++ struct ssi_protocol *ssi = hsi_client_drvdata(master);
++
++ dev_dbg(&master->device, "start TX %d\n", atomic_read(&ssi->tx_usecnt));
++ spin_lock_bh(&ssi->lock);
++ if (ssi->send_state == SEND_IDLE) {
++ ssip_set_txstate(ssi, WAIT4READY);
++ hsi_start_tx(master);
++ }
++ spin_unlock_bh(&ssi->lock);
++ atomic_inc(&ssi->tx_usecnt);
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(ssip_slave_start_tx);
++
++int ssip_slave_stop_tx(struct hsi_client *master)
++{
++ struct ssi_protocol *ssi = hsi_client_drvdata(master);
++
++ WARN_ON_ONCE(atomic_read(&ssi->tx_usecnt) == 0);
++
++ if (atomic_dec_and_test(&ssi->tx_usecnt)) {
++ spin_lock_bh(&ssi->lock);
++ if ((ssi->send_state == SEND_READY) ||
++ (ssi->send_state == WAIT4READY)) {
++ ssip_set_txstate(ssi, SEND_IDLE);
++ hsi_stop_tx(master);
++ }
++ spin_unlock_bh(&ssi->lock);
++ }
++ dev_dbg(&master->device, "stop TX %d\n", atomic_read(&ssi->tx_usecnt));
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(ssip_slave_stop_tx);
++
++static void ssip_reset(struct hsi_client *cl)
++{
++ struct ssi_protocol *ssi = hsi_client_drvdata(cl);
++ struct list_head *head, *tmp;
++ struct hsi_msg *msg;
++
++ if (netif_running(ssi->netdev))
++ netif_carrier_off(ssi->netdev);
++ hsi_flush(cl);
++ spin_lock_bh(&ssi->lock);
++ if (ssi->send_state != SEND_IDLE)
++ hsi_stop_tx(cl);
++ if (ssi->waketest)
++ ssi_waketest(cl, 0);
++ del_timer(&ssi->rx_wd);
++ del_timer(&ssi->tx_wd);
++ del_timer(&ssi->keep_alive);
++ ssi->main_state = 0;
++ ssi->send_state = 0;
++ ssi->recv_state = 0;
++ ssi->waketest = 0;
++ ssi->rxid = 0;
++ ssi->txid = 0;
++ list_for_each_safe(head, tmp, &ssi->txqueue) {
++ msg = list_entry(head, struct hsi_msg, link);
++ dev_dbg(&cl->device, "Pending TX data\n");
++ list_del(head);
++ ssip_free_data(msg);
++ }
++ ssi->txqueue_len = 0;
++ spin_unlock_bh(&ssi->lock);
++}
++
++static void ssip_dump_state(struct hsi_client *cl)
++{
++ struct ssi_protocol *ssi = hsi_client_drvdata(cl);
++ struct hsi_msg *msg;
++
++ spin_lock_bh(&ssi->lock);
++ dev_err(&cl->device, "Main state: %d\n", ssi->main_state);
++ dev_err(&cl->device, "Recv state: %d\n", ssi->recv_state);
++ dev_err(&cl->device, "Send state: %d\n", ssi->send_state);
++ dev_err(&cl->device, "CMT %s\n", (ssi->main_state == ACTIVE) ?
++ "Online" : "Offline");
++ dev_err(&cl->device, "Wake test %d\n", ssi->waketest);
++ dev_err(&cl->device, "Data RX id: %d\n", ssi->rxid);
++ dev_err(&cl->device, "Data TX id: %d\n", ssi->txid);
++
++ list_for_each_entry(msg, &ssi->txqueue, link)
++ dev_err(&cl->device, "pending TX data (%p)\n", msg);
++ spin_unlock_bh(&ssi->lock);
++}
++
++static void ssip_error(struct hsi_client *cl)
++{
++ struct ssi_protocol *ssi = hsi_client_drvdata(cl);
++ struct hsi_msg *msg;
++
++ ssip_dump_state(cl);
++ ssip_reset(cl);
++ msg = ssip_claim_cmd(ssi);
++ msg->complete = ssip_rxcmd_complete;
++ hsi_async_read(cl, msg);
++}
++
++static void ssip_keep_alive(unsigned long data)
++{
++ struct hsi_client *cl = (struct hsi_client *)data;
++ struct ssi_protocol *ssi = hsi_client_drvdata(cl);
++
++ dev_dbg(&cl->device, "Keep alive kick in: m(%d) r(%d) s(%d)\n",
++ ssi->main_state, ssi->recv_state, ssi->send_state);
++
++ spin_lock(&ssi->lock);
++ if (ssi->recv_state == RECV_IDLE)
++ switch (ssi->send_state) {
++ case SEND_READY:
++ if (atomic_read(&ssi->tx_usecnt) == 0)
++ break;
++ /*
++ * Fall through. Workaround for cmt-speech
++ * in that case we relay on audio timers.
++ */
++ case SEND_IDLE:
++ spin_unlock(&ssi->lock);
++ return;
++ }
++ mod_timer(&ssi->keep_alive, jiffies + msecs_to_jiffies(SSIP_KATOUT));
++ spin_unlock(&ssi->lock);
++}
++
++static void ssip_wd(unsigned long data)
++{
++ struct hsi_client *cl = (struct hsi_client *)data;
++
++ dev_err(&cl->device, "Watchdog trigerred\n");
++ ssip_error(cl);
++}
++
++static void ssip_start_rx(struct hsi_client *cl)
++{
++ struct ssi_protocol *ssi = hsi_client_drvdata(cl);
++ struct hsi_msg *msg;
++
++ dev_dbg(&cl->device, "RX start M(%d) R(%d)\n", ssi->main_state,
++ ssi->recv_state);
++ spin_lock(&ssi->lock);
++ /*
++ * We can have two UP events in a row due to a short low
++ * high transition. Therefore we need to ignore the sencond UP event.
++ */
++ if ((ssi->main_state != ACTIVE) || (ssi->recv_state == RECV_READY)) {
++ spin_unlock(&ssi->lock);
++ return;
++ }
++ ssip_set_rxstate(ssi, RECV_READY);
++ spin_unlock(&ssi->lock);
++
++ msg = ssip_claim_cmd(ssi);
++ ssip_set_cmd(msg, SSIP_READY_CMD);
++ msg->complete = ssip_release_cmd;
++ dev_dbg(&cl->device, "Send READY\n");
++ hsi_async_write(cl, msg);
++}
++
++static void ssip_stop_rx(struct hsi_client *cl)
++{
++ struct ssi_protocol *ssi = hsi_client_drvdata(cl);
++
++ dev_dbg(&cl->device, "RX stop M(%d)\n", ssi->main_state);
++ spin_lock(&ssi->lock);
++ if (likely(ssi->main_state == ACTIVE))
++ ssip_set_rxstate(ssi, RECV_IDLE);
++ spin_unlock(&ssi->lock);
++}
++
++static void ssip_free_strans(struct hsi_msg *msg)
++{
++ ssip_free_data(msg->context);
++ ssip_release_cmd(msg);
++}
++
++static void ssip_strans_complete(struct hsi_msg *msg)
++{
++ struct hsi_client *cl = msg->cl;
++ struct ssi_protocol *ssi = hsi_client_drvdata(cl);
++ struct hsi_msg *data;
++
++ data = msg->context;
++ ssip_release_cmd(msg);
++ spin_lock(&ssi->lock);
++ ssip_set_txstate(ssi, SENDING);
++ spin_unlock(&ssi->lock);
++ hsi_async_write(cl, data);
++}
++
++static int ssip_xmit(struct hsi_client *cl)
++{
++ struct ssi_protocol *ssi = hsi_client_drvdata(cl);
++ struct hsi_msg *msg, *dmsg;
++ struct sk_buff *skb;
++
++ spin_lock_bh(&ssi->lock);
++ if (list_empty(&ssi->txqueue)) {
++ spin_unlock_bh(&ssi->lock);
++ return 0;
++ }
++ dmsg = list_first_entry(&ssi->txqueue, struct hsi_msg, link);
++ list_del(&dmsg->link);
++ ssi->txqueue_len--;
++ spin_unlock_bh(&ssi->lock);
++
++ msg = ssip_claim_cmd(ssi);
++ skb = dmsg->context;
++ msg->context = dmsg;
++ msg->complete = ssip_strans_complete;
++ msg->destructor = ssip_free_strans;
++
++ spin_lock_bh(&ssi->lock);
++ ssip_set_cmd(msg, SSIP_START_TRANS_CMD(SSIP_BYTES_TO_FRAMES(skb->len),
++ ssi->txid));
++ ssi->txid++;
++ ssip_set_txstate(ssi, SENDING);
++ spin_unlock_bh(&ssi->lock);
++
++ dev_dbg(&cl->device, "Send STRANS (%d frames)\n",
++ SSIP_BYTES_TO_FRAMES(skb->len));
++
++ return hsi_async_write(cl, msg);
++}
++
++/* In soft IRQ context */
++static void ssip_pn_rx(struct sk_buff *skb)
++{
++ struct net_device *dev = skb->dev;
++
++ if (unlikely(!netif_running(dev))) {
++ dev_dbg(&dev->dev, "Drop RX packet\n");
++ dev->stats.rx_dropped++;
++ dev_kfree_skb(skb);
++ return;
++ }
++ if (unlikely(!pskb_may_pull(skb, SSIP_MIN_PN_HDR))) {
++ dev_dbg(&dev->dev, "Error drop RX packet\n");
++ dev->stats.rx_errors++;
++ dev->stats.rx_length_errors++;
++ dev_kfree_skb(skb);
++ return;
++ }
++ dev->stats.rx_packets++;
++ dev->stats.rx_bytes += skb->len;
++#ifdef __LITTLE_ENDIAN
++ ((u16 *)skb->data)[2] = swab16(((u16 *)skb->data)[2]);
++ dev_dbg(&dev->dev, "RX length fixed (%04x -> %u)\n",
++ ((u16 *)skb->data)[2], ntohs(((u16 *)skb->data)[2]));
++#endif
++ skb->protocol = htons(ETH_P_PHONET);
++ skb_reset_mac_header(skb);
++ __skb_pull(skb, 1);
++ netif_rx(skb);
++}
++
++static void ssip_rx_data_complete(struct hsi_msg *msg)
++{
++ struct hsi_client *cl = msg->cl;
++ struct ssi_protocol *ssi = hsi_client_drvdata(cl);
++ struct sk_buff *skb;
++
++ if (msg->status == HSI_STATUS_ERROR) {
++ dev_err(&cl->device, "RX data error\n");
++ ssip_free_data(msg);
++ ssip_error(cl);
++ return;
++ }
++ del_timer(&ssi->rx_wd); /* FIXME: Revisit */
++ skb = msg->context;
++ ssip_pn_rx(skb);
++ hsi_free_msg(msg);
++}
++
++static void ssip_rx_bootinforeq(struct hsi_client *cl, u32 cmd)
++{
++ struct ssi_protocol *ssi = hsi_client_drvdata(cl);
++ struct hsi_msg *msg;
++
++ /* Workaroud: Ignore CMT Loader message leftover */
++ if (cmd == SSIP_CMT_LOADER_SYNC)
++ return;
++
++ switch (ssi->main_state) {
++ case ACTIVE:
++ dev_err(&cl->device, "Boot info req on active state\n");
++ ssip_error(cl);
++ /* Fall through */
++ case INIT:
++ spin_lock(&ssi->lock);
++ ssi->main_state = HANDSHAKE;
++ if (!ssi->waketest) {
++ ssi->waketest = 1;
++ ssi_waketest(cl, 1); /* FIXME: To be removed */
++ }
++ /* Start boot handshake watchdog */
++ mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT));
++ spin_unlock(&ssi->lock);
++ dev_dbg(&cl->device, "Send BOOTINFO_RESP\n");
++ if (SSIP_DATA_VERSION(cmd) != SSIP_LOCAL_VERID)
++ dev_warn(&cl->device, "boot info req verid mismatch\n");
++ msg = ssip_claim_cmd(ssi);
++ ssip_set_cmd(msg, SSIP_BOOTINFO_RESP_CMD(SSIP_LOCAL_VERID));
++ msg->complete = ssip_release_cmd;
++ hsi_async_write(cl, msg);
++ break;
++ case HANDSHAKE:
++ /* Ignore */
++ break;
++ default:
++ dev_dbg(&cl->device, "Wrong state M(%d)\n", ssi->main_state);
++ break;
++ }
++}
++
++static void ssip_rx_bootinforesp(struct hsi_client *cl, u32 cmd)
++{
++ struct ssi_protocol *ssi = hsi_client_drvdata(cl);
++
++ if (SSIP_DATA_VERSION(cmd) != SSIP_LOCAL_VERID)
++ dev_warn(&cl->device, "boot info resp verid mismatch\n");
++
++ spin_lock(&ssi->lock);
++ if (ssi->main_state != ACTIVE)
++ /* Use tx_wd as a boot watchdog in non ACTIVE state */
++ mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT));
++ else
++ dev_dbg(&cl->device, "boot info resp ignored M(%d)\n",
++ ssi->main_state);
++ spin_unlock(&ssi->lock);
++}
++
++static void ssip_rx_waketest(struct hsi_client *cl, u32 cmd)
++{
++ struct ssi_protocol *ssi = hsi_client_drvdata(cl);
++ unsigned int wkres = SSIP_PAYLOAD(cmd);
++
++ spin_lock(&ssi->lock);
++ if (ssi->main_state != HANDSHAKE) {
++ dev_dbg(&cl->device, "wake lines test ignored M(%d)\n",
++ ssi->main_state);
++ spin_unlock(&ssi->lock);
++ return;
++ }
++ if (ssi->waketest) {
++ ssi->waketest = 0;
++ ssi_waketest(cl, 0); /* FIXME: To be removed */
++ }
++ ssi->main_state = ACTIVE;
++ del_timer(&ssi->tx_wd); /* Stop boot handshake timer */
++ spin_unlock(&ssi->lock);
++
++ dev_notice(&cl->device, "WAKELINES TEST %s\n",
++ wkres & SSIP_WAKETEST_FAILED ? "FAILED" : "OK");
++ if (wkres & SSIP_WAKETEST_FAILED) {
++ ssip_error(cl);
++ return;
++ }
++ dev_dbg(&cl->device, "CMT is ONLINE\n");
++ netif_wake_queue(ssi->netdev);
++ netif_carrier_on(ssi->netdev);
++}
++
++static void ssip_rx_ready(struct hsi_client *cl)
++{
++ struct ssi_protocol *ssi = hsi_client_drvdata(cl);
++
++ spin_lock(&ssi->lock);
++ if (unlikely(ssi->main_state != ACTIVE)) {
++ dev_dbg(&cl->device, "READY on wrong state: S(%d) M(%d)\n",
++ ssi->send_state, ssi->main_state);
++ spin_unlock(&ssi->lock);
++ return;
++ }
++ if (ssi->send_state != WAIT4READY) {
++ dev_dbg(&cl->device, "Ignore spurious READY command\n");
++ spin_unlock(&ssi->lock);
++ return;
++ }
++ ssip_set_txstate(ssi, SEND_READY);
++ spin_unlock(&ssi->lock);
++ ssip_xmit(cl);
++}
++
++static void ssip_rx_strans(struct hsi_client *cl, u32 cmd)
++{
++ struct ssi_protocol *ssi = hsi_client_drvdata(cl);
++ struct sk_buff *skb;
++ struct hsi_msg *msg;
++ int len = SSIP_PDU_LENGTH(cmd);
++
++ dev_dbg(&cl->device, "RX strans: %d frames\n", len);
++ spin_lock(&ssi->lock);
++ if (unlikely(ssi->main_state != ACTIVE)) {
++ dev_err(&cl->device, "START TRANS wrong state: S(%d) M(%d)\n",
++ ssi->send_state, ssi->main_state);
++ spin_unlock(&ssi->lock);
++ return;
++ }
++ ssip_set_rxstate(ssi, RECEIVING);
++ if (unlikely(SSIP_MSG_ID(cmd) != ssi->rxid)) {
++ dev_err(&cl->device, "START TRANS id %d expeceted %d\n",
++ SSIP_MSG_ID(cmd), ssi->rxid);
++ spin_unlock(&ssi->lock);
++ goto out1;
++ }
++ ssi->rxid++;
++ spin_unlock(&ssi->lock);
++ skb = netdev_alloc_skb(ssi->netdev, len * 4);
++ if (unlikely(!skb)) {
++ dev_err(&cl->device, "No memory for rx skb\n");
++ goto out1;
++ }
++ skb->dev = ssi->netdev;
++ skb_put(skb, len * 4);
++ msg = ssip_alloc_data(skb, GFP_ATOMIC);
++ if (unlikely(!msg)) {
++ dev_err(&cl->device, "No memory for RX data msg\n");
++ goto out2;
++ }
++ msg->complete = ssip_rx_data_complete;
++ hsi_async_read(cl, msg);
++
++ return;
++out2:
++ dev_kfree_skb(skb);
++out1:
++ ssip_error(cl);
++}
++
++static void ssip_rxcmd_complete(struct hsi_msg *msg)
++{
++ struct hsi_client *cl = msg->cl;
++ u32 cmd = ssip_get_cmd(msg);
++ unsigned int cmdid = SSIP_COMMAND(cmd);
++
++ if (msg->status == HSI_STATUS_ERROR) {
++ dev_err(&cl->device, "RX error detected\n");
++ ssip_release_cmd(msg);
++ ssip_error(cl);
++ return;
++ }
++ hsi_async_read(cl, msg);
++ dev_dbg(&cl->device, "RX cmd: 0x%08x\n", cmd);
++ switch (cmdid) {
++ case SSIP_SW_BREAK:
++ /* Ignored */
++ break;
++ case SSIP_BOOTINFO_REQ:
++ ssip_rx_bootinforeq(cl, cmd);
++ break;
++ case SSIP_BOOTINFO_RESP:
++ ssip_rx_bootinforesp(cl, cmd);
++ break;
++ case SSIP_WAKETEST_RESULT:
++ ssip_rx_waketest(cl, cmd);
++ break;
++ case SSIP_START_TRANS:
++ ssip_rx_strans(cl, cmd);
++ break;
++ case SSIP_READY:
++ ssip_rx_ready(cl);
++ break;
++ default:
++ dev_warn(&cl->device, "command 0x%08x not supported\n", cmd);
++ break;
++ }
++}
++
++static void ssip_swbreak_complete(struct hsi_msg *msg)
++{
++ struct hsi_client *cl = msg->cl;
++ struct ssi_protocol *ssi = hsi_client_drvdata(cl);
++
++ ssip_release_cmd(msg);
++ spin_lock(&ssi->lock);
++ if (list_empty(&ssi->txqueue)) {
++ if (atomic_read(&ssi->tx_usecnt)) {
++ ssip_set_txstate(ssi, SEND_READY);
++ } else {
++ ssip_set_txstate(ssi, SEND_IDLE);
++ hsi_stop_tx(cl);
++ }
++ spin_unlock(&ssi->lock);
++ } else {
++ spin_unlock(&ssi->lock);
++ ssip_xmit(cl);
++ }
++ netif_wake_queue(ssi->netdev);
++}
++
++static void ssip_tx_data_complete(struct hsi_msg *msg)
++{
++ struct hsi_client *cl = msg->cl;
++ struct ssi_protocol *ssi = hsi_client_drvdata(cl);
++ struct hsi_msg *cmsg;
++
++ if (msg->status == HSI_STATUS_ERROR) {
++ dev_err(&cl->device, "TX data error\n");
++ ssip_error(cl);
++ goto out;
++ }
++ spin_lock(&ssi->lock);
++ if (list_empty(&ssi->txqueue)) {
++ ssip_set_txstate(ssi, SENDING_SWBREAK);
++ spin_unlock(&ssi->lock);
++ cmsg = ssip_claim_cmd(ssi);
++ ssip_set_cmd(cmsg, SSIP_SWBREAK_CMD);
++ cmsg->complete = ssip_swbreak_complete;
++ dev_dbg(&cl->device, "Send SWBREAK\n");
++ hsi_async_write(cl, cmsg);
++ } else {
++ spin_unlock(&ssi->lock);
++ ssip_xmit(cl);
++ }
++out:
++ ssip_free_data(msg);
++}
++
++static int ssip_pn_open(struct net_device *dev)
++{
++ struct hsi_client *cl = to_hsi_client(dev->dev.parent);
++ struct ssi_protocol *ssi = hsi_client_drvdata(cl);
++ struct hsi_msg *msg;
++ int err;
++
++ err = hsi_claim_port(cl, 1);
++ if (err < 0) {
++ dev_err(&cl->device, "SSI port already claimed\n");
++ return err;
++ }
++ dev_dbg(&cl->device, "Configuring SSI port\n");
++ hsi_setup(cl);
++ spin_lock_bh(&ssi->lock);
++ if (!ssi->waketest) {
++ ssi->waketest = 1;
++ ssi_waketest(cl, 1); /* FIXME: To be removed */
++ }
++ ssi->main_state = HANDSHAKE;
++ spin_unlock_bh(&ssi->lock);
++ dev_dbg(&cl->device, "Issuing BOOT INFO REQ command\n");
++ msg = ssip_claim_cmd(ssi);
++ ssip_set_cmd(msg, SSIP_BOOTINFO_REQ_CMD(SSIP_LOCAL_VERID));
++ msg->complete = ssip_release_cmd;
++ hsi_async_write(cl, msg);
++ dev_dbg(&cl->device, "Issuing RX command\n");
++ msg = ssip_claim_cmd(ssi);
++ msg->complete = ssip_rxcmd_complete;
++ hsi_async_read(cl, msg);
++
++ return 0;
++}
++
++static int ssip_pn_stop(struct net_device *dev)
++{
++ struct hsi_client *cl = to_hsi_client(dev->dev.parent);
++
++ ssip_reset(cl);
++ hsi_release_port(cl);
++
++ return 0;
++}
++
++static int ssip_pn_set_mtu(struct net_device *dev, int new_mtu)
++{
++ if (new_mtu > SSIP_MAX_MTU || new_mtu < PHONET_MIN_MTU)
++ return -EINVAL;
++ dev->mtu = new_mtu;
++
++ return 0;
++}
++
++static int ssip_pn_xmit(struct sk_buff *skb, struct net_device *dev)
++{
++ struct hsi_client *cl = to_hsi_client(dev->dev.parent);
++ struct ssi_protocol *ssi = hsi_client_drvdata(cl);
++ struct hsi_msg *msg;
++
++ if ((skb->protocol != htons(ETH_P_PHONET)) ||
++ (skb->len < SSIP_MIN_PN_HDR))
++ goto drop;
++ /* Pad to 32-bits - FIXME: Revisit*/
++ if ((skb->len & 3) && skb_pad(skb, 4 - (skb->len & 3)))
++ goto drop;
++
++ /*
++ * Modem sends Phonet messages over SSI with its own endianess...
++ * Assume that modem has the same endianess as we do.
++ */
++ if (skb_cow_head(skb, 0))
++ goto drop;
++#ifdef __LITTLE_ENDIAN
++ ((u16 *)skb->data)[2] = swab16(((u16 *)skb->data)[2]);
++#endif
++ msg = ssip_alloc_data(skb, GFP_ATOMIC);
++ if (!msg) {
++ dev_dbg(&cl->device, "Dropping tx data: No memory\n");
++ goto drop;
++ }
++ msg->complete = ssip_tx_data_complete;
++
++ spin_lock_bh(&ssi->lock);
++ if (unlikely(ssi->main_state != ACTIVE)) {
++ spin_unlock_bh(&ssi->lock);
++ dev_dbg(&cl->device, "Dropping tx data: CMT is OFFLINE\n");
++ goto drop2;
++ }
++ list_add_tail(&msg->link, &ssi->txqueue);
++ ssi->txqueue_len++;
++ if (dev->tx_queue_len < ssi->txqueue_len) {
++ dev_info(&cl->device, "TX queue full %d\n", ssi->txqueue_len);
++ netif_stop_queue(dev);
++ }
++ if (ssi->send_state == SEND_IDLE) {
++ ssip_set_txstate(ssi, WAIT4READY);
++ spin_unlock_bh(&ssi->lock);
++ dev_dbg(&cl->device, "Start TX qlen %d\n", ssi->txqueue_len);
++ hsi_start_tx(cl);
++ } else if (ssi->send_state == SEND_READY) {
++ /* Needed for cmt-speech workaround */
++ dev_dbg(&cl->device, "Start TX on SEND READY qlen %d\n",
++ ssi->txqueue_len);
++ spin_unlock_bh(&ssi->lock);
++ ssip_xmit(cl);
++ } else {
++ spin_unlock_bh(&ssi->lock);
++ }
++ dev->stats.tx_packets++;
++ dev->stats.tx_bytes += skb->len;
++
++ return 0;
++drop2:
++ hsi_free_msg(msg);
++drop:
++ dev->stats.tx_dropped++;
++ dev_kfree_skb(skb);
++
++ return 0;
++}
++
++/* CMT reset support */
++static int ssip_cmt_event(struct notifier_block *nb, unsigned long event,
++ void *data)
++{
++ struct ssi_protocol *ssi = container_of(nb, struct ssi_protocol, nb);
++
++ if (event != CMT_RESET)
++ return NOTIFY_DONE;
++
++ dev_err(&ssi->cl->device, "CMT reset detected !\n");
++ ssip_error(ssi->cl);
++
++ return NOTIFY_DONE;
++}
++
++static const struct net_device_ops ssip_pn_ops = {
++ .ndo_open = ssip_pn_open,
++ .ndo_stop = ssip_pn_stop,
++ .ndo_start_xmit = ssip_pn_xmit,
++ .ndo_change_mtu = ssip_pn_set_mtu,
++};
++
++static void ssip_pn_setup(struct net_device *dev)
++{
++ dev->features = 0;
++ dev->netdev_ops = &ssip_pn_ops;
++ dev->type = ARPHRD_PHONET;
++ dev->flags = IFF_POINTOPOINT | IFF_NOARP;
++ dev->mtu = SSIP_DEFAULT_MTU;
++ dev->hard_header_len = 1;
++ dev->dev_addr[0] = PN_MEDIA_SOS;
++ dev->addr_len = 1;
++ dev->tx_queue_len = SSIP_TXQUEUE_LEN;
++
++ dev->destructor = free_netdev;
++ dev->header_ops = &phonet_header_ops;
++}
++
++static int __init ssi_protocol_probe(struct device *dev)
++{
++ static const char ifname[] = "phonet%d";
++ struct hsi_client *cl = to_hsi_client(dev);
++ struct ssi_protocol *ssi;
++ int err;
++
++ ssi = kzalloc(sizeof(*ssi), GFP_KERNEL);
++ if (!ssi) {
++ dev_err(dev, "No memory for ssi protocol\n");
++ return -ENOMEM;
++ }
++ spin_lock_init(&ssi->lock);
++ init_timer_deferrable(&ssi->rx_wd);
++ init_timer_deferrable(&ssi->tx_wd);
++ init_timer(&ssi->keep_alive);
++ ssi->rx_wd.data = (unsigned long)cl;
++ ssi->rx_wd.function = ssip_wd;
++ ssi->tx_wd.data = (unsigned long)cl;
++ ssi->tx_wd.function = ssip_wd;
++ ssi->keep_alive.data = (unsigned long)cl;
++ ssi->keep_alive.function = ssip_keep_alive;
++ INIT_LIST_HEAD(&ssi->txqueue);
++ INIT_LIST_HEAD(&ssi->cmdqueue);
++ ssi->nb.notifier_call = ssip_cmt_event;
++ ssi->nb.priority = INT_MAX;
++ atomic_set(&ssi->tx_usecnt, 0);
++ hsi_client_set_drvdata(cl, ssi);
++ ssi->cl = cl;
++ err = ssip_alloc_cmds(ssi);
++ if (err < 0) {
++ dev_err(dev, "No memory for commands\n");
++ goto out;
++ }
++ ssi->netdev = alloc_netdev(0, ifname, ssip_pn_setup);
++ if (!ssi->netdev) {
++ dev_err(dev, "No memory for netdev\n");
++ err = -ENOMEM;
++ goto out1;
++ }
++ SET_NETDEV_DEV(ssi->netdev, dev);
++ netif_carrier_off(ssi->netdev);
++ err = register_netdev(ssi->netdev);
++ if (err < 0) {
++ dev_err(dev, "Register netdev failed (%d)\n", err);
++ free_netdev(ssi->netdev);
++ goto out1;
++ }
++ ssi->cmt = cmt_get("cmt");
++ if (IS_ERR(ssi->cmt)) {
++ err = PTR_ERR(ssi->cmt);
++ dev_err(dev, "Could not get CMT (%d)\n", err);
++ goto out2;
++ }
++ err = cmt_notifier_register(ssi->cmt, &ssi->nb);
++ if (err < 0) {
++ dev_err(dev, "Register CMT notifier failed (%d)\n", err);
++ goto out3;
++ }
++ cl->hsi_start_rx = ssip_start_rx;
++ cl->hsi_stop_rx = ssip_stop_rx;
++ list_add(&ssi->link, &ssip_list);
++
++ return 0;
++out3:
++ cmt_put(ssi->cmt);
++out2:
++ unregister_netdev(ssi->netdev);
++out1:
++ ssip_free_cmds(ssi);
++out:
++ kfree(ssi);
++
++ return err;
++}
++
++static int __exit ssi_protocol_remove(struct device *dev)
++{
++ struct hsi_client *cl = to_hsi_client(dev);
++ struct ssi_protocol *ssi = hsi_client_drvdata(cl);
++
++ list_del(&ssi->link);
++ cl->hsi_start_rx = NULL;
++ cl->hsi_stop_rx = NULL;
++ cmt_notifier_unregister(ssi->cmt, &ssi->nb);
++ cmt_put(ssi->cmt);
++ unregister_netdev(ssi->netdev);
++ ssip_free_cmds(ssi);
++ hsi_client_set_drvdata(cl, NULL);
++ kfree(ssi);
++
++ return 0;
++}
++
++static struct hsi_client_driver ssip_driver = {
++ .driver = {
++ .name = "ssi_protocol",
++ .owner = THIS_MODULE,
++ .probe = ssi_protocol_probe,
++ .remove = ssi_protocol_remove,
++ },
++};
++
++static int __init ssip_init(void)
++{
++ pr_info("SSI protocol aka McSAAB added\n");
++
++ return hsi_register_client_driver(&ssip_driver);
++}
++module_init(ssip_init);
++
++static void __exit ssip_exit(void)
++{
++ hsi_unregister_client_driver(&ssip_driver);
++ pr_info("SSI protocol driver removed\n");
++}
++module_exit(ssip_exit);
++
++MODULE_ALIAS("hsi:ssi_protocol");
++MODULE_AUTHOR("Carlos Chinea <carlos.chinea@nokia.com>,"
++ "Remi Denis-Courmont <remi.denis-courmont@nokia.com>");
++MODULE_DESCRIPTION("SSI protocol improved aka McSAAB");
++MODULE_LICENSE("GPL");
+--- /dev/null
++++ b/drivers/hsi/controllers/Kconfig
+@@ -0,0 +1,21 @@
++#
++# HSI controllers configuration
++#
++comment "HSI controllers"
++
++config OMAP_SSI
++ tristate "OMAP SSI hardware driver"
++ depends on ARCH_OMAP && HSI
++ default n
++ ---help---
++ If you say Y here, you will enable the OMAP SSI hardware driver.
++
++ If unsure, say N.
++
++if OMAP_SSI
++
++config OMAP_SSI_CONFIG
++ boolean
++ default y
++
++endif # OMAP_SSI
+--- /dev/null
++++ b/drivers/hsi/controllers/Makefile
+@@ -0,0 +1,5 @@
++#
++# Makefile for HSI controllers drivers
++#
++
++obj-$(CONFIG_OMAP_SSI) += omap_ssi.o
+--- /dev/null
++++ b/drivers/hsi/controllers/omap_ssi.c
+@@ -0,0 +1,1819 @@
++/*
++ * omap_ssi.c
++ *
++ * Implements the OMAP SSI driver.
++ *
++ * Copyright (C) 2010 Nokia Corporation. All rights reserved.
++ *
++ * Contact: Carlos Chinea <carlos.chinea@nokia.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ */
++#include <linux/err.h>
++#include <linux/ioport.h>
++#include <linux/io.h>
++#include <linux/gpio.h>
++#include <linux/clk.h>
++#include <linux/device.h>
++#include <linux/platform_device.h>
++#include <linux/dma-mapping.h>
++#include <linux/delay.h>
++#include <linux/seq_file.h>
++#include <linux/scatterlist.h>
++#include <linux/interrupt.h>
++#include <linux/spinlock.h>
++#include <linux/hsi/hsi.h>
++#include <linux/hsi/omap_ssi_hack.h>
++#include <linux/debugfs.h>
++#include <plat/omap-pm.h>
++#include <plat/clock.h>
++#include <plat/ssi.h>
++#include <../arch/arm/mach-omap2/cm.h>
++
++#define SSI_MAX_CHANNELS 8
++#define SSI_MAX_GDD_LCH 8
++#define SSI_BYTES_TO_FRAMES(x) ((((x) - 1) >> 2) + 1)
++
++/**
++ * struct gdd_trn - GDD transaction data
++ * @msg: Pointer to the HSI message being served
++ * @sg: Pointer to the current sg entry being served
++ */
++struct gdd_trn {
++ struct hsi_msg *msg;
++ struct scatterlist *sg;
++};
++
++/**
++ * struct omap_ssm_ctx - OMAP synchronous serial module (TX/RX) context
++ * @mode: Bit transmission mode
++ * @channels: Number of channels
++ * @framesize: Frame size in bits
++ * @timeout: RX frame timeout
++ * @divisor: TX divider
++ * @arb_mode: Arbitration mode for TX frame (Round robin, priority)
++ */
++struct omap_ssm_ctx {
++ u32 mode;
++ u32 channels;
++ u32 frame_size;
++ union {
++ u32 timeout; /* Rx Only */
++ struct {
++ u32 arb_mode;
++ u32 divisor;
++ }; /* Tx only */
++ };
++};
++
++/**
++ * struct omap_ssi_port - OMAP SSI port data
++ * @dev: device associated to the port (HSI port)
++ * @sst_dma: SSI transmitter physical base address
++ * @ssr_dma: SSI receiver physical base address
++ * @sst_base: SSI transmitter base address
++ * @ssr_base: SSI receiver base address
++ * @wk_lock: spin lock to serialize access to the wake lines
++ * @lock: Spin lock to serialize access to the SSI port
++ * @channels: Current number of channels configured (1,2,4 or 8)
++ * @txqueue: TX message queues
++ * @rxqueue: RX message queues
++ * @brkqueue: Queue of incoming HWBREAK requests (FRAME mode)
++ * @irq: IRQ number
++ * @wake_irq: IRQ number for incoming wake line (-1 if none)
++ * @pio_tasklet: Bottom half for PIO transfers and events
++ * @wake_tasklet: Bottom half for incoming wake events
++ * @wkin_cken: Keep track of clock references due to the incoming wake line
++ * @wk_refcount: Reference count for output wake line
++ * @sys_mpu_enable: Context for the interrupt enable register for irq 0
++ * @sst: Context for the synchronous serial transmitter
++ * @ssr: Context for the synchronous serial receiver
++ */
++struct omap_ssi_port {
++ struct device *dev;
++ dma_addr_t sst_dma;
++ dma_addr_t ssr_dma;
++ unsigned long sst_base;
++ unsigned long ssr_base;
++ spinlock_t wk_lock;
++ spinlock_t lock;
++ unsigned int channels;
++ struct list_head txqueue[SSI_MAX_CHANNELS];
++ struct list_head rxqueue[SSI_MAX_CHANNELS];
++ struct list_head brkqueue;
++ unsigned int irq;
++ int wake_irq;
++ struct tasklet_struct pio_tasklet;
++ struct tasklet_struct wake_tasklet;
++ unsigned int wktest:1; /* FIXME: HACK to be removed */
++ unsigned int wkin_cken:1; /* Workaround */
++ int wk_refcount;
++ /* OMAP SSI port context */
++ u32 sys_mpu_enable; /* We use only one irq */
++ struct omap_ssm_ctx sst;
++ struct omap_ssm_ctx ssr;
++};
++
++/**
++ * struct omap_ssi_controller - OMAP SSI controller data
++ * @dev: device associated to the controller (HSI controller)
++ * @sys: SSI I/O base address
++ * @gdd: GDD I/O base address
++ * @ick: SSI interconnect clock
++ * @fck: SSI functional clock
++ * @ck_refcount: References count for clocks
++ * @gdd_irq: IRQ line for GDD
++ * @gdd_tasklet: bottom half for DMA transfers
++ * @gdd_trn: Array of GDD transaction data for ongoing GDD transfers
++ * @lock: lock to serialize access to GDD
++ * @ck_lock: lock to serialize access to the clocks
++ * @fck_nb: DVFS notfifier block
++ * @rate_change: flag to know if we are in the middle of a DVFS transition
++ * @loss_count: To follow if we need to restore context or not
++ * @sysconfig: SSI controller saved context
++ * @gdd_gcr: SSI GDD saved context
++ * @get_loss: Pointer to omap_pm_get_dev_context_loss_count, if any
++ * @port: Array of pointers of the ports of the controller
++ * @dir: Debugfs SSI root directory
++ */
++struct omap_ssi_controller {
++ struct device *dev;
++ unsigned long sys;
++ unsigned long gdd;
++ struct clk *ick;
++ struct clk *fck;
++ int ck_refcount;
++ unsigned int gdd_irq;
++ struct tasklet_struct gdd_tasklet;
++ struct gdd_trn gdd_trn[SSI_MAX_GDD_LCH];
++ spinlock_t lock;
++ spinlock_t ck_lock;
++ struct notifier_block fck_nb;
++ u32 fck_rate;
++ unsigned int rate_change:1;
++ int loss_count;
++ /* OMAP SSI Controller context */
++ u32 sysconfig;
++ u32 gdd_gcr;
++ int (*get_loss)(struct device *dev);
++ struct omap_ssi_port **port;
++#ifdef CONFIG_DEBUG_FS
++ struct dentry *dir;
++#endif
++};
++
++static inline unsigned int ssi_wakein(struct hsi_port *port)
++{
++ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
++
++ return gpio_get_value(irq_to_gpio(omap_port->wake_irq));
++}
++
++/*
++ * PM workaround for hw bug: SSI FCLK gets byppassed if DPLL3 goes into idle.
++ */
++static void ssi_disable_dpll3_autoidle(void)
++{
++ u32 v;
++
++ v = cm_read_mod_reg(PLL_MOD, CM_AUTOIDLE);
++ v &= ~0x7;
++ cm_write_mod_reg(v, PLL_MOD, CM_AUTOIDLE);
++}
++
++/*
++ * PM workaround for hw bug: SSI FCLK gets byppassed if DPLL3 goes into idle.
++ */
++static void ssi_enable_dpll3_autoidle(void)
++{
++ u32 v;
++
++ v = cm_read_mod_reg(PLL_MOD, CM_AUTOIDLE);
++ v |= 1;
++ cm_write_mod_reg(v, PLL_MOD, CM_AUTOIDLE);
++}
++
++static int ssi_for_each_port(struct hsi_controller *ssi, void *data,
++ int (*fn)(struct omap_ssi_port *p, void *data))
++{
++ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
++ unsigned int i = 0;
++ int err = 0;
++
++ for (i = 0; ((i < ssi->num_ports) && !err); i++)
++ err = (*fn)(omap_ssi->port[i], data);
++
++ return err;
++}
++
++static int ssi_set_port_mode(struct omap_ssi_port *omap_port, void *data)
++{
++ u32 *mode = data;
++
++ __raw_writel(*mode, omap_port->sst_base + SSI_SST_MODE_REG);
++ __raw_writel(*mode, omap_port->ssr_base + SSI_SSR_MODE_REG);
++ /* OCP barrier */
++ *mode = __raw_readl(omap_port->ssr_base + SSI_SSR_MODE_REG);
++
++ return 0;
++}
++
++static inline void ssi_set_mode(struct hsi_controller *ssi, u32 mode)
++{
++ ssi_for_each_port(ssi, &mode, ssi_set_port_mode);
++}
++
++static int ssi_restore_port_mode(struct omap_ssi_port *omap_port, void *data)
++{
++ u32 mode;
++
++ __raw_writel(omap_port->sst.mode,
++ omap_port->sst_base + SSI_SST_MODE_REG);
++ __raw_writel(omap_port->ssr.mode,
++ omap_port->ssr_base + SSI_SSR_MODE_REG);
++ /* OCP barrier */
++ mode = __raw_readl(omap_port->ssr_base + SSI_SSR_MODE_REG);
++
++ return 0;
++}
++
++static int ssi_clk_event(struct notifier_block *nb, unsigned long event,
++ void *data)
++{
++ struct omap_ssi_controller *omap_ssi = container_of(nb,
++ struct omap_ssi_controller, fck_nb);
++ struct hsi_controller *ssi = to_hsi_controller(omap_ssi->dev);
++
++ spin_lock_bh(&omap_ssi->ck_lock);
++ switch (event) {
++ case CLK_PRE_RATE_CHANGE:
++ omap_ssi->rate_change = 1;
++ if (omap_ssi->ck_refcount > 0)
++ ssi_set_mode(ssi, SSI_MODE_SLEEP);
++ break;
++ case CLK_ABORT_RATE_CHANGE:
++ case CLK_POST_RATE_CHANGE:
++ if ((omap_ssi->ck_refcount > 0) && (omap_ssi->rate_change))
++ ssi_for_each_port(ssi, NULL, ssi_restore_port_mode);
++ omap_ssi->rate_change = 0;
++ break;
++ default:
++ break;
++ }
++ spin_unlock_bh(&omap_ssi->ck_lock);
++
++ return NOTIFY_DONE;
++}
++
++static int ssi_restore_port_ctx(struct omap_ssi_port *omap_port, void *data)
++{
++ struct hsi_port *port = to_hsi_port(omap_port->dev);
++ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
++ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
++ unsigned long base = omap_port->sst_base;
++
++ __raw_writel(omap_port->sys_mpu_enable,
++ omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
++ /* SST context */
++ __raw_writel(omap_port->sst.frame_size, base + SSI_SST_FRAMESIZE_REG);
++ __raw_writel(omap_port->sst.divisor, base + SSI_SST_DIVISOR_REG);
++ __raw_writel(omap_port->sst.channels, base + SSI_SST_CHANNELS_REG);
++ __raw_writel(omap_port->sst.arb_mode, base + SSI_SST_ARBMODE_REG);
++ /* SSR context */
++ base = omap_port->ssr_base;
++ __raw_writel(omap_port->ssr.frame_size, base + SSI_SSR_FRAMESIZE_REG);
++ __raw_writel(omap_port->ssr.channels, base + SSI_SSR_CHANNELS_REG);
++ __raw_writel(omap_port->ssr.timeout, base + SSI_SSR_TIMEOUT_REG);
++
++ return 0;
++}
++
++static int ssi_save_port_ctx(struct omap_ssi_port *omap_port, void *data)
++{
++ struct hsi_port *port = to_hsi_port(omap_port->dev);
++ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
++ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
++
++ omap_port->sys_mpu_enable = __raw_readl(omap_ssi->sys +
++ SSI_MPU_ENABLE_REG(port->num, 0));
++
++ return 0;
++}
++
++static int ssi_clk_enable(struct hsi_controller *ssi)
++{
++ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
++ int err = 0;
++
++ spin_lock_bh(&omap_ssi->ck_lock);
++ if (omap_ssi->ck_refcount++)
++ goto out;
++
++ ssi_disable_dpll3_autoidle();
++ err = clk_enable(omap_ssi->fck);
++ if (unlikely(err < 0))
++ goto out;
++ err = clk_enable(omap_ssi->ick);
++ if (unlikely(err < 0)) {
++ clk_disable(omap_ssi->fck);
++ goto out;
++ }
++ if ((omap_ssi->get_loss) && (omap_ssi->loss_count ==
++ (*omap_ssi->get_loss)(ssi->device.parent)))
++ goto mode; /* We always need to restore the mode */
++
++ __raw_writel(omap_ssi->sysconfig, omap_ssi->sys + SSI_SYSCONFIG_REG);
++ __raw_writel(omap_ssi->gdd_gcr, omap_ssi->gdd + SSI_GDD_GCR_REG);
++
++ ssi_for_each_port(ssi, NULL, ssi_restore_port_ctx);
++mode:
++ if (!omap_ssi->rate_change)
++ ssi_for_each_port(ssi, NULL, ssi_restore_port_mode);
++out:
++ spin_unlock_bh(&omap_ssi->ck_lock);
++
++ return err;
++}
++
++static void ssi_clk_disable(struct hsi_controller *ssi)
++{
++ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
++
++ spin_lock_bh(&omap_ssi->ck_lock);
++ WARN_ON(omap_ssi->ck_refcount <= 0);
++ if (--omap_ssi->ck_refcount)
++ goto out;
++
++ if (!omap_ssi->rate_change)
++ ssi_set_mode(ssi, SSI_MODE_SLEEP);
++
++ if (omap_ssi->get_loss)
++ omap_ssi->loss_count =
++ (*omap_ssi->get_loss)(ssi->device.parent);
++
++ ssi_for_each_port(ssi, NULL, ssi_save_port_ctx);
++ clk_disable(omap_ssi->ick);
++ clk_disable(omap_ssi->fck);
++
++ ssi_enable_dpll3_autoidle();
++out:
++ spin_unlock_bh(&omap_ssi->ck_lock);
++}
++
++#ifdef CONFIG_DEBUG_FS
++static int ssi_debug_show(struct seq_file *m, void *p)
++{
++ struct hsi_controller *ssi = m->private;
++ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
++ unsigned long sys = omap_ssi->sys;
++
++ ssi_clk_enable(ssi);
++ seq_printf(m, "REVISION\t: 0x%08x\n",
++ __raw_readl(sys + SSI_REVISION_REG));
++ seq_printf(m, "SYSCONFIG\t: 0x%08x\n",
++ __raw_readl(sys + SSI_SYSCONFIG_REG));
++ seq_printf(m, "SYSSTATUS\t: 0x%08x\n",
++ __raw_readl(sys + SSI_SYSSTATUS_REG));
++ ssi_clk_disable(ssi);
++
++ return 0;
++}
++
++static int ssi_debug_port_show(struct seq_file *m, void *p)
++{
++ struct hsi_port *port = m->private;
++ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
++ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
++ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
++ unsigned long base = omap_ssi->sys;
++ unsigned int ch;
++
++ ssi_clk_enable(ssi);
++ if (omap_port->wake_irq > 0)
++ seq_printf(m, "CAWAKE\t\t: %d\n", ssi_wakein(port));
++ seq_printf(m, "WAKE\t\t: 0x%08x\n",
++ __raw_readl(base + SSI_WAKE_REG(port->num)));
++ seq_printf(m, "MPU_ENABLE_IRQ%d\t: 0x%08x\n", 0,
++ __raw_readl(base + SSI_MPU_ENABLE_REG(port->num, 0)));
++ seq_printf(m, "MPU_STATUS_IRQ%d\t: 0x%08x\n", 0,
++ __raw_readl(base + SSI_MPU_STATUS_REG(port->num, 0)));
++ /* SST */
++ base = omap_port->sst_base;
++ seq_printf(m, "\nSST\n===\n");
++ seq_printf(m, "MODE\t\t: 0x%08x\n",
++ __raw_readl(base + SSI_SST_MODE_REG));
++ seq_printf(m, "FRAMESIZE\t: 0x%08x\n",
++ __raw_readl(base + SSI_SST_FRAMESIZE_REG));
++ seq_printf(m, "DIVISOR\t\t: 0x%08x\n",
++ __raw_readl(base + SSI_SST_DIVISOR_REG));
++ seq_printf(m, "CHANNELS\t: 0x%08x\n",
++ __raw_readl(base + SSI_SST_CHANNELS_REG));
++ seq_printf(m, "ARBMODE\t\t: 0x%08x\n",
++ __raw_readl(base + SSI_SST_ARBMODE_REG));
++ seq_printf(m, "TXSTATE\t\t: 0x%08x\n",
++ __raw_readl(base + SSI_SST_TXSTATE_REG));
++ seq_printf(m, "BUFSTATE\t: 0x%08x\n",
++ __raw_readl(base + SSI_SST_BUFSTATE_REG));
++ seq_printf(m, "BREAK\t\t: 0x%08x\n",
++ __raw_readl(base + SSI_SST_BREAK_REG));
++ for (ch = 0; ch < omap_port->channels; ch++) {
++ seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch,
++ __raw_readl(base + SSI_SST_BUFFER_CH_REG(ch)));
++ }
++ /* SSR */
++ base = omap_port->ssr_base;
++ seq_printf(m, "\nSSR\n===\n");
++ seq_printf(m, "MODE\t\t: 0x%08x\n",
++ __raw_readl(base + SSI_SSR_MODE_REG));
++ seq_printf(m, "FRAMESIZE\t: 0x%08x\n",
++ __raw_readl(base + SSI_SSR_FRAMESIZE_REG));
++ seq_printf(m, "CHANNELS\t: 0x%08x\n",
++ __raw_readl(base + SSI_SSR_CHANNELS_REG));
++ seq_printf(m, "TIMEOUT\t\t: 0x%08x\n",
++ __raw_readl(base + SSI_SSR_TIMEOUT_REG));
++ seq_printf(m, "RXSTATE\t\t: 0x%08x\n",
++ __raw_readl(base + SSI_SSR_RXSTATE_REG));
++ seq_printf(m, "BUFSTATE\t: 0x%08x\n",
++ __raw_readl(base + SSI_SSR_BUFSTATE_REG));
++ seq_printf(m, "BREAK\t\t: 0x%08x\n",
++ __raw_readl(base + SSI_SSR_BREAK_REG));
++ seq_printf(m, "ERROR\t\t: 0x%08x\n",
++ __raw_readl(base + SSI_SSR_ERROR_REG));
++ seq_printf(m, "ERRORACK\t: 0x%08x\n",
++ __raw_readl(base + SSI_SSR_ERRORACK_REG));
++ for (ch = 0; ch < omap_port->channels; ch++) {
++ seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch,
++ __raw_readl(base + SSI_SSR_BUFFER_CH_REG(ch)));
++ }
++ ssi_clk_disable(ssi);
++
++ return 0;
++}
++
++static int ssi_debug_gdd_show(struct seq_file *m, void *p)
++{
++ struct hsi_controller *ssi = m->private;
++ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
++ unsigned long gdd = omap_ssi->gdd;
++ int lch;
++
++ ssi_clk_enable(ssi);
++ seq_printf(m, "GDD_MPU_STATUS\t: 0x%08x\n",
++ __raw_readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG));
++ seq_printf(m, "GDD_MPU_ENABLE\t: 0x%08x\n\n",
++ __raw_readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG));
++ seq_printf(m, "HW_ID\t\t: 0x%08x\n",
++ __raw_readl(gdd + SSI_GDD_HW_ID_REG));
++ seq_printf(m, "PPORT_ID\t: 0x%08x\n",
++ __raw_readl(gdd + SSI_GDD_PPORT_ID_REG));
++ seq_printf(m, "MPORT_ID\t: 0x%08x\n",
++ __raw_readl(gdd + SSI_GDD_MPORT_ID_REG));
++ seq_printf(m, "TEST\t\t: 0x%08x\n",
++ __raw_readl(gdd + SSI_GDD_TEST_REG));
++ seq_printf(m, "GCR\t\t: 0x%08x\n",
++ __raw_readl(gdd + SSI_GDD_GCR_REG));
++
++ for (lch = 0; lch < SSI_MAX_GDD_LCH; lch++) {
++ seq_printf(m, "\nGDD LCH %d\n=========\n", lch);
++ seq_printf(m, "CSDP\t\t: 0x%04x\n",
++ __raw_readw(gdd + SSI_GDD_CSDP_REG(lch)));
++ seq_printf(m, "CCR\t\t: 0x%04x\n",
++ __raw_readw(gdd + SSI_GDD_CCR_REG(lch)));
++ seq_printf(m, "CICR\t\t: 0x%04x\n",
++ __raw_readw(gdd + SSI_GDD_CICR_REG(lch)));
++ seq_printf(m, "CSR\t\t: 0x%04x\n",
++ __raw_readw(gdd + SSI_GDD_CSR_REG(lch)));
++ seq_printf(m, "CSSA\t\t: 0x%08x\n",
++ __raw_readl(gdd + SSI_GDD_CSSA_REG(lch)));
++ seq_printf(m, "CDSA\t\t: 0x%08x\n",
++ __raw_readl(gdd + SSI_GDD_CDSA_REG(lch)));
++ seq_printf(m, "CEN\t\t: 0x%04x\n",
++ __raw_readw(gdd + SSI_GDD_CEN_REG(lch)));
++ seq_printf(m, "CSAC\t\t: 0x%04x\n",
++ __raw_readw(gdd + SSI_GDD_CSAC_REG(lch)));
++ seq_printf(m, "CDAC\t\t: 0x%04x\n",
++ __raw_readw(gdd + SSI_GDD_CDAC_REG(lch)));
++ seq_printf(m, "CLNK_CTRL\t: 0x%04x\n",
++ __raw_readw(gdd + SSI_GDD_CLNK_CTRL_REG(lch)));
++ }
++ ssi_clk_disable(ssi);
++
++ return 0;
++}
++
++static int ssi_regs_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, ssi_debug_show, inode->i_private);
++}
++
++static int ssi_port_regs_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, ssi_debug_port_show, inode->i_private);
++}
++
++static int ssi_gdd_regs_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, ssi_debug_gdd_show, inode->i_private);
++}
++
++static const struct file_operations ssi_regs_fops = {
++ .open = ssi_regs_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
++static const struct file_operations ssi_port_regs_fops = {
++ .open = ssi_port_regs_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
++static const struct file_operations ssi_gdd_regs_fops = {
++ .open = ssi_gdd_regs_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
++static int __init ssi_debug_add_port(struct omap_ssi_port *omap_port,
++ void *data)
++{
++ struct hsi_port *port = to_hsi_port(omap_port->dev);
++ struct dentry *dir = data;
++
++ dir = debugfs_create_dir(dev_name(omap_port->dev), dir);
++ if (IS_ERR(dir))
++ return PTR_ERR(dir);
++ debugfs_create_file("regs", S_IRUGO, dir, port, &ssi_port_regs_fops);
++
++ return 0;
++}
++
++static int __init ssi_debug_add_ctrl(struct hsi_controller *ssi)
++{
++ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
++ struct dentry *dir;
++ int err;
++
++ /* SSI controller */
++ omap_ssi->dir = debugfs_create_dir(dev_name(&ssi->device), NULL);
++ if (IS_ERR(omap_ssi->dir))
++ return PTR_ERR(omap_ssi->dir);
++
++ debugfs_create_file("regs", S_IRUGO, omap_ssi->dir, ssi,
++ &ssi_regs_fops);
++ /* SSI GDD (DMA) */
++ dir = debugfs_create_dir("gdd", omap_ssi->dir);
++ if (IS_ERR(dir))
++ goto rback;
++ debugfs_create_file("regs", S_IRUGO, dir, ssi, &ssi_gdd_regs_fops);
++ /* SSI ports */
++ err = ssi_for_each_port(ssi, omap_ssi->dir, ssi_debug_add_port);
++ if (err < 0)
++ goto rback;
++
++ return 0;
++rback:
++ debugfs_remove_recursive(omap_ssi->dir);
++
++ return PTR_ERR(dir);
++}
++
++static void ssi_debug_remove_ctrl(struct hsi_controller *ssi)
++{
++ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
++
++ debugfs_remove_recursive(omap_ssi->dir);
++}
++#endif /* CONFIG_DEBUG_FS */
++
++static int ssi_claim_lch(struct hsi_msg *msg)
++{
++
++ struct hsi_port *port = hsi_get_port(msg->cl);
++ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
++ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
++ int lch;
++
++ for (lch = 0; lch < SSI_MAX_GDD_LCH; lch++)
++ if (!omap_ssi->gdd_trn[lch].msg) {
++ omap_ssi->gdd_trn[lch].msg = msg;
++ omap_ssi->gdd_trn[lch].sg = msg->sgt.sgl;
++ return lch;
++ }
++
++ return -EBUSY;
++}
++
++static int ssi_start_pio(struct hsi_msg *msg)
++{
++ struct hsi_port *port = hsi_get_port(msg->cl);
++ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
++ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
++ u32 val;
++
++ ssi_clk_enable(ssi);
++ if (msg->ttype == HSI_MSG_WRITE) {
++ val = SSI_DATAACCEPT(msg->channel);
++ ssi_clk_enable(ssi); /* Hold clocks for pio writes */
++ } else {
++ val = SSI_DATAAVAILABLE(msg->channel) | SSI_ERROROCCURED;
++ }
++ dev_dbg(&port->device, "Single %s transfer\n",
++ msg->ttype ? "write" : "read");
++ val |= __raw_readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
++ __raw_writel(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
++ ssi_clk_disable(ssi);
++ msg->actual_len = 0;
++ msg->status = HSI_STATUS_PROCEEDING;
++
++ return 0;
++}
++
++static int ssi_start_dma(struct hsi_msg *msg, int lch)
++{
++ struct hsi_port *port = hsi_get_port(msg->cl);
++ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
++ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
++ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
++ unsigned long gdd = omap_ssi->gdd;
++ int err;
++ u16 csdp;
++ u16 ccr;
++ u32 s_addr;
++ u32 d_addr;
++ u32 tmp;
++
++ if (msg->ttype == HSI_MSG_READ) {
++ err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents,
++ DMA_FROM_DEVICE);
++ if (err < 0) {
++ dev_dbg(&ssi->device, "DMA map SG failed !\n");
++ return err;
++ }
++ csdp = SSI_DST_BURST_4x32_BIT | SSI_DST_MEMORY_PORT |
++ SSI_SRC_SINGLE_ACCESS0 | SSI_SRC_PERIPHERAL_PORT |
++ SSI_DATA_TYPE_S32;
++ ccr = msg->channel + 0x10 + (port->num * 8); /* Sync */
++ ccr |= SSI_DST_AMODE_POSTINC | SSI_SRC_AMODE_CONST |
++ SSI_CCR_ENABLE;
++ s_addr = omap_port->ssr_dma +
++ SSI_SSR_BUFFER_CH_REG(msg->channel);
++ d_addr = sg_dma_address(msg->sgt.sgl);
++ } else {
++ err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents,
++ DMA_TO_DEVICE);
++ if (err < 0) {
++ dev_dbg(&ssi->device, "DMA map SG failed !\n");
++ return err;
++ }
++ csdp = SSI_SRC_BURST_4x32_BIT | SSI_SRC_MEMORY_PORT |
++ SSI_DST_SINGLE_ACCESS0 | SSI_DST_PERIPHERAL_PORT |
++ SSI_DATA_TYPE_S32;
++ ccr = (msg->channel + 1 + (port->num * 8)) & 0xf; /* Sync */
++ ccr |= SSI_SRC_AMODE_POSTINC | SSI_DST_AMODE_CONST |
++ SSI_CCR_ENABLE;
++ s_addr = sg_dma_address(msg->sgt.sgl);
++ d_addr = omap_port->sst_dma +
++ SSI_SST_BUFFER_CH_REG(msg->channel);
++ }
++ dev_dbg(&ssi->device, "lch %d cdsp %08x ccr %04x s_addr %08x"
++ " d_addr %08x\n", lch, csdp, ccr, s_addr, d_addr);
++ ssi_clk_enable(ssi); /* Hold clocks during the transfer */
++ __raw_writew(csdp, gdd + SSI_GDD_CSDP_REG(lch));
++ __raw_writew(SSI_BLOCK_IE | SSI_TOUT_IE, gdd + SSI_GDD_CICR_REG(lch));
++ __raw_writel(d_addr, gdd + SSI_GDD_CDSA_REG(lch));
++ __raw_writel(s_addr, gdd + SSI_GDD_CSSA_REG(lch));
++ __raw_writew(SSI_BYTES_TO_FRAMES(msg->sgt.sgl->length),
++ gdd + SSI_GDD_CEN_REG(lch));
++ tmp = __raw_readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
++ tmp |= SSI_GDD_LCH(lch);
++ __raw_writel(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
++ __raw_writew(ccr, gdd + SSI_GDD_CCR_REG(lch));
++ msg->status = HSI_STATUS_PROCEEDING;
++
++ return 0;
++}
++
++static int ssi_start_transfer(struct list_head *queue)
++{
++ struct hsi_msg *msg;
++ int lch = -1;
++
++ if (list_empty(queue))
++ return 0;
++ msg = list_first_entry(queue, struct hsi_msg, link);
++ if (msg->status != HSI_STATUS_QUEUED)
++ return 0;
++ if ((msg->sgt.nents) && (msg->sgt.sgl->length > sizeof(u32)))
++ lch = ssi_claim_lch(msg);
++ if (lch >= 0)
++ return ssi_start_dma(msg, lch);
++ else
++ return ssi_start_pio(msg);
++}
++
++/*
++ * FIXME: Horrible HACK needed until we remove the useless wakeline test
++ * in the CMT. To be removed !!!!
++ */
++void ssi_waketest(struct hsi_client *cl, unsigned int enable)
++{
++ struct hsi_port *port = hsi_get_port(cl);
++ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
++ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
++ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
++
++ omap_port->wktest = !!enable;
++ if (omap_port->wktest) {
++ ssi_clk_enable(ssi);
++ __raw_writel(SSI_WAKE(0),
++ omap_ssi->sys + SSI_SET_WAKE_REG(port->num));
++ } else {
++ __raw_writel(SSI_WAKE(0),
++ omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num));
++ ssi_clk_disable(ssi);
++ }
++}
++EXPORT_SYMBOL_GPL(ssi_waketest);
++
++static void ssi_error(struct hsi_port *port)
++{
++ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
++ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
++ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
++ struct hsi_msg *msg;
++ unsigned int i;
++ u32 err;
++ u32 val;
++ u32 tmp;
++
++ /* ACK error */
++ err = __raw_readl(omap_port->ssr_base + SSI_SSR_ERROR_REG);
++ dev_err(&port->device, "SSI error: 0x%02x\n", err);
++ if (!err) {
++ dev_dbg(&port->device, "spurious SSI error ignored!\n");
++ return;
++ }
++ /* Cancel all GDD read transfers */
++ for (i = 0, val = 0; i < SSI_MAX_GDD_LCH; i++) {
++ msg = omap_ssi->gdd_trn[i].msg;
++ if ((msg) && (msg->ttype == HSI_MSG_READ)) {
++ __raw_writew(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
++ val |= (1 << i);
++ omap_ssi->gdd_trn[i].msg = NULL;
++ }
++ }
++ tmp = __raw_readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
++ tmp &= ~val;
++ __raw_writel(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
++ /* Cancel all PIO read transfers */
++ tmp = __raw_readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
++ tmp &= 0xfeff00ff; /* Disable error & all dataavailable interrupts */
++ __raw_writel(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
++ /* Signal the error all current pending read requests */
++ for (i = 0; i < omap_port->channels; i++) {
++ if (list_empty(&omap_port->rxqueue[i]))
++ continue;
++ msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg,
++ link);
++ list_del(&msg->link);
++ msg->status = HSI_STATUS_ERROR;
++ msg->complete(msg);
++ /* Now restart queued reads if any */
++ ssi_start_transfer(&omap_port->rxqueue[i]);
++ }
++ /* ACK error */
++ __raw_writel(err, omap_port->ssr_base + SSI_SSR_ERRORACK_REG);
++}
++
++static void ssi_break_complete(struct hsi_port *port)
++{
++ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
++ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
++ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
++ struct hsi_msg *msg;
++ struct hsi_msg *tmp;
++ u32 val;
++
++ dev_dbg(&port->device, "HWBREAK received\n");
++
++ spin_lock(&omap_port->lock);
++ val = __raw_readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
++ val &= ~SSI_BREAKDETECTED;
++ __raw_writel(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
++ __raw_writel(0, omap_port->ssr_base + SSI_SSR_BREAK_REG);
++ spin_unlock(&omap_port->lock);
++
++ list_for_each_entry_safe(msg, tmp, &omap_port->brkqueue, link) {
++ msg->status = HSI_STATUS_COMPLETED;
++ list_del(&msg->link);
++ msg->complete(msg);
++ }
++
++}
++
++static int ssi_async_break(struct hsi_msg *msg)
++{
++ struct hsi_port *port = hsi_get_port(msg->cl);
++ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
++ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
++ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
++ int err = 0;
++ u32 tmp;
++
++ ssi_clk_enable(ssi);
++ if (msg->ttype == HSI_MSG_WRITE) {
++ if (omap_port->sst.mode != SSI_MODE_FRAME) {
++ err = -EINVAL;
++ goto out;
++ }
++ __raw_writel(1, omap_port->sst_base + SSI_SST_BREAK_REG);
++ msg->status = HSI_STATUS_COMPLETED;
++ msg->complete(msg);
++ } else {
++ if (omap_port->ssr.mode != SSI_MODE_FRAME) {
++ err = -EINVAL;
++ goto out;
++ }
++ spin_lock_bh(&omap_port->lock);
++ tmp = __raw_readl(omap_ssi->sys +
++ SSI_MPU_ENABLE_REG(port->num, 0));
++ __raw_writel(tmp | SSI_BREAKDETECTED,
++ omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
++ msg->status = HSI_STATUS_PROCEEDING;
++ list_add_tail(&msg->link, &omap_port->brkqueue);
++ spin_unlock_bh(&omap_port->lock);
++ }
++out:
++ ssi_clk_disable(ssi);
++
++ return err;
++}
++
++static int ssi_async(struct hsi_msg *msg)
++{
++ struct hsi_port *port = hsi_get_port(msg->cl);
++ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
++ struct list_head *queue;
++ int err;
++
++ BUG_ON(!msg);
++
++ if (msg->sgt.nents > 1)
++ return -ENOSYS; /* TODO: Add sg support */
++
++ if (msg->break_frame)
++ return ssi_async_break(msg);
++
++ if (msg->ttype) {
++ BUG_ON(msg->channel >= omap_port->sst.channels);
++ queue = &omap_port->txqueue[msg->channel];
++ } else {
++ BUG_ON(msg->channel >= omap_port->ssr.channels);
++ queue = &omap_port->rxqueue[msg->channel];
++ }
++ msg->status = HSI_STATUS_QUEUED;
++ spin_lock_bh(&omap_port->lock);
++ list_add_tail(&msg->link, queue);
++ err = ssi_start_transfer(queue);
++ spin_unlock_bh(&omap_port->lock);
++
++ dev_dbg(&port->device, "msg status %d ttype %d ch %d\n",
++ msg->status, msg->ttype, msg->channel);
++
++ return err;
++}
++
++static void ssi_flush_queue(struct list_head *queue, struct hsi_client *cl)
++{
++ struct list_head *node, *tmp;
++ struct hsi_msg *msg;
++
++ list_for_each_safe(node, tmp, queue) {
++ msg = list_entry(node, struct hsi_msg, link);
++ if ((cl) && (cl != msg->cl))
++ continue;
++ list_del(node);
++ pr_debug("flush queue: ch %d, msg %p len %d type %d ctxt %p\n",
++ msg->channel, msg, msg->sgt.sgl->length,
++ msg->ttype, msg->context);
++ if (msg->destructor)
++ msg->destructor(msg);
++ else
++ hsi_free_msg(msg);
++ }
++}
++
++static u32 ssi_calculate_div(struct hsi_controller *ssi, u32 max_speed)
++{
++ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
++ u32 tx_fckrate = omap_ssi->fck_rate;
++
++ /* / 2 : SSI TX clock is always half of the SSI functional clock */
++ tx_fckrate >>= 1;
++ /* Round down when tx_fckrate % max_speed == 0 */
++ tx_fckrate--;
++ dev_dbg(&ssi->device, "TX divisor is %d for fck_rate %d speed %d\n",
++ tx_fckrate / max_speed, omap_ssi->fck_rate, max_speed);
++
++ return tx_fckrate / max_speed;
++}
++
++static int ssi_setup(struct hsi_client *cl)
++{
++ struct hsi_port *port = to_hsi_port(cl->device.parent);
++ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
++ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
++ unsigned long sst = omap_port->sst_base;
++ unsigned long ssr = omap_port->ssr_base;
++ u32 div = 0;
++ u32 val;
++ int err = 0;
++
++ ssi_clk_enable(ssi);
++ spin_lock_bh(&omap_port->lock);
++
++ if (cl->tx_cfg.speed)
++ div = ssi_calculate_div(ssi, cl->tx_cfg.speed);
++
++ if (div > SSI_MAX_DIVISOR) {
++ dev_err(&cl->device, "Invalid TX speed %d Mb/s (div %d)\n",
++ cl->tx_cfg.speed, div);
++ err = -EINVAL;
++ goto out;
++ }
++ /* Set TX/RX module to sleep to stop TX/RX during cfg update */
++ __raw_writel(SSI_MODE_SLEEP, sst + SSI_SST_MODE_REG);
++ __raw_writel(SSI_MODE_SLEEP, ssr + SSI_SSR_MODE_REG);
++ /* Flush posted write */
++ val = __raw_readl(ssr + SSI_SSR_MODE_REG);
++ /* TX */
++ __raw_writel(31, sst + SSI_SST_FRAMESIZE_REG);
++ __raw_writel(div, sst + SSI_SST_DIVISOR_REG);
++ __raw_writel(cl->tx_cfg.channels, sst + SSI_SST_CHANNELS_REG);
++ __raw_writel(cl->tx_cfg.arb_mode, sst + SSI_SST_ARBMODE_REG);
++ __raw_writel(cl->tx_cfg.mode, sst + SSI_SST_MODE_REG);
++ /* RX */
++ __raw_writel(31, ssr + SSI_SSR_FRAMESIZE_REG);
++ __raw_writel(cl->rx_cfg.channels, ssr + SSI_SSR_CHANNELS_REG);
++ __raw_writel(0, ssr + SSI_SSR_TIMEOUT_REG);
++ /* Cleanup the break queue if we leave FRAME mode */
++ if ((omap_port->ssr.mode == SSI_MODE_FRAME) &&
++ (cl->rx_cfg.mode != SSI_MODE_FRAME))
++ ssi_flush_queue(&omap_port->brkqueue, cl);
++ __raw_writel(cl->rx_cfg.mode, ssr + SSI_SSR_MODE_REG);
++ omap_port->channels = max(cl->rx_cfg.channels, cl->tx_cfg.channels);
++ /* Shadow registering for OFF mode */
++ /* SST */
++ omap_port->sst.divisor = div;
++ omap_port->sst.frame_size = 31;
++ omap_port->sst.channels = cl->tx_cfg.channels;
++ omap_port->sst.arb_mode = cl->tx_cfg.arb_mode;
++ omap_port->sst.mode = cl->tx_cfg.mode;
++ /* SSR */
++ omap_port->ssr.frame_size = 31;
++ omap_port->ssr.timeout = 0;
++ omap_port->ssr.channels = cl->rx_cfg.channels;
++ omap_port->ssr.mode = cl->rx_cfg.mode;
++out:
++ spin_unlock_bh(&omap_port->lock);
++ ssi_clk_disable(ssi);
++
++ return err;
++}
++
++static void ssi_cleanup_queues(struct hsi_client *cl)
++{
++ struct hsi_port *port = hsi_get_port(cl);
++ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
++ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
++ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
++ struct hsi_msg *msg;
++ unsigned int i;
++ u32 rxbufstate = 0;
++ u32 txbufstate = 0;
++ u32 status = SSI_ERROROCCURED;
++ u32 tmp;
++
++ ssi_flush_queue(&omap_port->brkqueue, cl);
++ if (list_empty(&omap_port->brkqueue))
++ status |= SSI_BREAKDETECTED;
++
++ for (i = 0; i < omap_port->channels; i++) {
++ msg = list_first_entry(&omap_port->txqueue[i], struct hsi_msg,
++ link);
++ if ((msg) && (msg->cl == cl)) {
++ txbufstate |= (1 << i);
++ status |= SSI_DATAACCEPT(i);
++ /* Release the clocks writes, also GDD ones */
++ ssi_clk_disable(ssi);
++ }
++ ssi_flush_queue(&omap_port->txqueue[i], cl);
++ msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg,
++ link);
++ if ((msg) && (msg->cl == cl)) {
++ rxbufstate |= (1 << i);
++ status |= SSI_DATAAVAILABLE(i);
++ }
++ ssi_flush_queue(&omap_port->rxqueue[i], cl);
++ /* Check if we keep the error detection interrupt armed */
++ if (!list_empty(&omap_port->rxqueue[i]))
++ status &= ~SSI_ERROROCCURED;
++ }
++ /* Cleanup write buffers */
++ tmp = __raw_readl(omap_port->sst_base + SSI_SST_BUFSTATE_REG);
++ tmp &= ~txbufstate;
++ __raw_writel(tmp, omap_port->sst_base + SSI_SST_BUFSTATE_REG);
++ /* Cleanup read buffers */
++ tmp = __raw_readl(omap_port->ssr_base + SSI_SSR_BUFSTATE_REG);
++ tmp &= ~rxbufstate;
++ __raw_writel(tmp, omap_port->ssr_base + SSI_SSR_BUFSTATE_REG);
++ /* Disarm and ack pending interrupts */
++ tmp = __raw_readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
++ tmp &= ~status;
++ __raw_writel(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
++ __raw_writel(status, omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
++}
++
++static void ssi_cleanup_gdd(struct hsi_controller *ssi, struct hsi_client *cl)
++{
++ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
++ struct hsi_msg *msg;
++ unsigned int i;
++ u32 val = 0;
++ u32 tmp;
++
++ for (i = 0; i < SSI_MAX_GDD_LCH; i++) {
++ msg = omap_ssi->gdd_trn[i].msg;
++ if ((!msg) || (msg->cl != cl))
++ continue;
++ __raw_writew(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
++ val |= (1 << i);
++ /*
++ * Clock references for write will be handled in
++ * ssi_cleanup_queues
++ */
++ if (msg->ttype == HSI_MSG_READ)
++ ssi_clk_disable(ssi);
++ omap_ssi->gdd_trn[i].msg = NULL;
++ }
++ tmp = __raw_readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
++ tmp &= ~val;
++ __raw_writel(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
++ __raw_writel(val, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG);
++}
++
++static int ssi_release(struct hsi_client *cl)
++{
++ struct hsi_port *port = hsi_get_port(cl);
++ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
++ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
++
++ ssi_clk_enable(ssi);
++ spin_lock_bh(&omap_port->lock);
++ /* Stop all the pending DMA requests for that client */
++ ssi_cleanup_gdd(ssi, cl);
++ /* Now cleanup all the queues */
++ ssi_cleanup_queues(cl);
++ spin_unlock_bh(&omap_port->lock);
++ ssi_clk_disable(ssi);
++
++ return 0;
++}
++
++static int ssi_flush(struct hsi_client *cl)
++{
++ struct hsi_port *port = hsi_get_port(cl);
++ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
++ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
++ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
++ struct hsi_msg *msg;
++ unsigned long sst = omap_port->sst_base;
++ unsigned long ssr = omap_port->ssr_base;
++ unsigned int i;
++ u32 err;
++
++ ssi_clk_enable(ssi);
++ spin_lock_bh(&omap_port->lock);
++ /* Stop all DMA transfers */
++ for (i = 0; i < SSI_MAX_GDD_LCH; i++) {
++ msg = omap_ssi->gdd_trn[i].msg;
++ if (!msg || (port != hsi_get_port(msg->cl)))
++ continue;
++ __raw_writew(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
++ if (msg->ttype == HSI_MSG_READ)
++ ssi_clk_disable(ssi);
++ omap_ssi->gdd_trn[i].msg = NULL;
++ }
++ /* Flush all SST buffers */
++ __raw_writel(0, sst + SSI_SST_BUFSTATE_REG);
++ __raw_writel(0, sst + SSI_SST_TXSTATE_REG);
++ /* Flush all SSR buffers */
++ __raw_writel(0, ssr + SSI_SSR_RXSTATE_REG);
++ __raw_writel(0, ssr + SSI_SSR_BUFSTATE_REG);
++ /* Flush all errors */
++ err = __raw_readl(ssr + SSI_SSR_ERROR_REG);
++ __raw_writel(err, ssr + SSI_SSR_ERRORACK_REG);
++ /* Flush break */
++ __raw_writel(0, ssr + SSI_SSR_BREAK_REG);
++ /* Clear interrupts */
++ __raw_writel(0, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
++ __raw_writel(0xffffff00,
++ omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
++ __raw_writel(0, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
++ __raw_writel(0xff, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG);
++ /* Dequeue all pending requests */
++ for (i = 0; i < omap_port->channels; i++) {
++ /* Release write clocks */
++ if (!list_empty(&omap_port->txqueue[i]))
++ ssi_clk_disable(ssi);
++ ssi_flush_queue(&omap_port->txqueue[i], NULL);
++ ssi_flush_queue(&omap_port->rxqueue[i], NULL);
++ }
++ ssi_flush_queue(&omap_port->brkqueue, NULL);
++ spin_unlock_bh(&omap_port->lock);
++ ssi_clk_disable(ssi);
++
++ return 0;
++}
++
++static int ssi_start_tx(struct hsi_client *cl)
++{
++ struct hsi_port *port = hsi_get_port(cl);
++ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
++ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
++ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
++
++ dev_dbg(&port->device, "Wake out high %d\n", omap_port->wk_refcount);
++
++ spin_lock_bh(&omap_port->wk_lock);
++ if (omap_port->wk_refcount++) {
++ spin_unlock_bh(&omap_port->wk_lock);
++ return 0;
++ }
++ ssi_clk_enable(ssi); /* Grab clocks */
++ __raw_writel(SSI_WAKE(0), omap_ssi->sys + SSI_SET_WAKE_REG(port->num));
++ spin_unlock_bh(&omap_port->wk_lock);
++
++ return 0;
++}
++
++static int ssi_stop_tx(struct hsi_client *cl)
++{
++ struct hsi_port *port = hsi_get_port(cl);
++ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
++ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
++ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
++
++ dev_dbg(&port->device, "Wake out low %d\n", omap_port->wk_refcount);
++
++ spin_lock_bh(&omap_port->wk_lock);
++ BUG_ON(!omap_port->wk_refcount);
++ if (--omap_port->wk_refcount) {
++ spin_unlock_bh(&omap_port->wk_lock);
++ return 0;
++ }
++ __raw_writel(SSI_WAKE(0),
++ omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num));
++ ssi_clk_disable(ssi); /* Release clocks */
++ spin_unlock_bh(&omap_port->wk_lock);
++
++ return 0;
++}
++
++static void ssi_pio_complete(struct hsi_port *port, struct list_head *queue)
++{
++ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
++ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
++ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
++ struct hsi_msg *msg;
++ u32 *buf;
++ u32 val;
++
++ spin_lock(&omap_port->lock);
++ msg = list_first_entry(queue, struct hsi_msg, link);
++ if ((!msg->sgt.nents) || (!msg->sgt.sgl->length)) {
++ msg->actual_len = 0;
++ msg->status = HSI_STATUS_PENDING;
++ }
++ if (msg->status == HSI_STATUS_PROCEEDING) {
++ buf = sg_virt(msg->sgt.sgl) + msg->actual_len;
++ if (msg->ttype == HSI_MSG_WRITE)
++ __raw_writel(*buf, omap_port->sst_base +
++ SSI_SST_BUFFER_CH_REG(msg->channel));
++ else
++ *buf = __raw_readl(omap_port->ssr_base +
++ SSI_SSR_BUFFER_CH_REG(msg->channel));
++ dev_dbg(&port->device, "ch %d ttype %d 0x%08x\n", msg->channel,
++ msg->ttype, *buf);
++ msg->actual_len += sizeof(*buf);
++ if (msg->actual_len >= msg->sgt.sgl->length)
++ msg->status = HSI_STATUS_COMPLETED;
++ /*
++ * Wait for the last written frame to be really sent before
++ * we call the complete callback
++ */
++ if ((msg->status == HSI_STATUS_PROCEEDING) ||
++ ((msg->status == HSI_STATUS_COMPLETED) &&
++ (msg->ttype == HSI_MSG_WRITE)))
++ goto out;
++
++ }
++ if (msg->status == HSI_STATUS_PROCEEDING)
++ goto out;
++ /* Transfer completed at this point */
++ val = __raw_readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
++ if (msg->ttype == HSI_MSG_WRITE) {
++ val &= ~SSI_DATAACCEPT(msg->channel);
++ ssi_clk_disable(ssi); /* Release clocks for write transfer */
++ } else {
++ val &= ~SSI_DATAAVAILABLE(msg->channel);
++ }
++ __raw_writel(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
++ list_del(&msg->link);
++ spin_unlock(&omap_port->lock);
++ msg->complete(msg);
++ spin_lock(&omap_port->lock);
++ ssi_start_transfer(queue);
++out:
++ spin_unlock(&omap_port->lock);
++}
++
++static void ssi_gdd_complete(struct hsi_controller *ssi, unsigned int lch)
++{
++ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
++ struct hsi_msg *msg = omap_ssi->gdd_trn[lch].msg;
++ struct hsi_port *port = to_hsi_port(msg->cl->device.parent);
++ unsigned int dir;
++ u32 csr;
++ u32 val;
++
++ spin_lock(&omap_ssi->lock);
++
++ val = __raw_readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
++ val &= ~SSI_GDD_LCH(lch);
++ __raw_writel(val, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
++
++ if (msg->ttype == HSI_MSG_READ) {
++ dir = DMA_FROM_DEVICE;
++ val = SSI_DATAAVAILABLE(msg->channel);
++ ssi_clk_disable(ssi);
++ } else {
++ dir = DMA_TO_DEVICE;
++ val = SSI_DATAACCEPT(msg->channel);
++ /* Keep clocks reference for write pio event */
++ }
++ dma_unmap_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents, dir);
++ csr = __raw_readw(omap_ssi->gdd + SSI_GDD_CSR_REG(lch));
++ omap_ssi->gdd_trn[lch].msg = NULL; /* release GDD lch */
++ if (csr & SSI_CSR_TOUR) { /* Timeout error */
++ msg->status = HSI_STATUS_ERROR;
++ msg->actual_len = 0;
++ list_del(&msg->link); /* Dequeue msg */
++ spin_unlock(&omap_ssi->lock);
++ msg->complete(msg);
++ return;
++ }
++
++ val |= __raw_readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
++ __raw_writel(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
++
++ msg->status = HSI_STATUS_COMPLETED;
++ msg->actual_len = sg_dma_len(msg->sgt.sgl);
++ spin_unlock(&omap_ssi->lock);
++}
++
++static void ssi_gdd_tasklet(unsigned long dev)
++{
++ struct hsi_controller *ssi = (struct hsi_controller *)dev;
++ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
++ unsigned long sys = omap_ssi->sys;
++ unsigned int lch;
++ u32 status_reg;
++
++ ssi_clk_enable(ssi);
++
++ status_reg = __raw_readl(sys + SSI_GDD_MPU_IRQ_STATUS_REG);
++ for (lch = 0; lch < SSI_MAX_GDD_LCH; lch++) {
++ if (status_reg & SSI_GDD_LCH(lch))
++ ssi_gdd_complete(ssi, lch);
++ }
++ __raw_writel(status_reg, sys + SSI_GDD_MPU_IRQ_STATUS_REG);
++ status_reg = __raw_readl(sys + SSI_GDD_MPU_IRQ_STATUS_REG);
++ ssi_clk_disable(ssi);
++ if (status_reg)
++ tasklet_hi_schedule(&omap_ssi->gdd_tasklet);
++ else
++ enable_irq(omap_ssi->gdd_irq);
++
++}
++
++static irqreturn_t ssi_gdd_isr(int irq, void *ssi)
++{
++ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
++
++ tasklet_hi_schedule(&omap_ssi->gdd_tasklet);
++ disable_irq_nosync(irq);
++
++ return IRQ_HANDLED;
++}
++
++static void ssi_pio_tasklet(unsigned long ssi_port)
++{
++ struct hsi_port *port = (struct hsi_port *)ssi_port;
++ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
++ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
++ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
++ unsigned long sys = omap_ssi->sys;
++ unsigned int ch;
++ u32 status_reg;
++
++ ssi_clk_enable(ssi);
++ status_reg = __raw_readl(sys + SSI_MPU_STATUS_REG(port->num, 0));
++ status_reg &= __raw_readl(sys + SSI_MPU_ENABLE_REG(port->num, 0));
++
++ for (ch = 0; ch < omap_port->channels; ch++) {
++ if (status_reg & SSI_DATAACCEPT(ch))
++ ssi_pio_complete(port, &omap_port->txqueue[ch]);
++ if (status_reg & SSI_DATAAVAILABLE(ch))
++ ssi_pio_complete(port, &omap_port->rxqueue[ch]);
++ }
++ if (status_reg & SSI_BREAKDETECTED)
++ ssi_break_complete(port);
++ if (status_reg & SSI_ERROROCCURED)
++ ssi_error(port);
++ __raw_writel(status_reg, sys + SSI_MPU_STATUS_REG(port->num, 0));
++
++ status_reg = __raw_readl(sys + SSI_MPU_STATUS_REG(port->num, 0));
++ status_reg &= __raw_readl(sys + SSI_MPU_ENABLE_REG(port->num, 0));
++ ssi_clk_disable(ssi);
++
++ if (status_reg)
++ tasklet_hi_schedule(&omap_port->pio_tasklet);
++ else
++ enable_irq(omap_port->irq);
++}
++
++static irqreturn_t ssi_pio_isr(int irq, void *port)
++{
++ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
++
++ tasklet_hi_schedule(&omap_port->pio_tasklet);
++ disable_irq_nosync(irq);
++
++ return IRQ_HANDLED;
++}
++
++static void ssi_wake_tasklet(unsigned long ssi_port)
++{
++ struct hsi_port *port = (struct hsi_port *)ssi_port;
++ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
++ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
++ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
++
++ if (ssi_wakein(port)) {
++ /**
++ * We can have a quick High-Low-High transition in the line.
++ * In such a case if we have long interrupt latencies,
++ * we can miss the low event or get twice a high event.
++ * This workaround will avoid breaking the clock reference
++ * count when such a situation ocurrs.
++ */
++ if (!omap_port->wkin_cken) {
++ omap_port->wkin_cken = 1;
++ ssi_clk_enable(ssi);
++ }
++ if (omap_port->wktest) { /* FIXME: HACK ! To be removed */
++ __raw_writel(SSI_WAKE(0),
++ omap_ssi->sys + SSI_SET_WAKE_REG(port->num));
++ }
++ hsi_event(port, HSI_EVENT_START_RX);
++ } else {
++ if (omap_port->wktest) { /* FIXME: HACK ! To be removed */
++ __raw_writel(SSI_WAKE(0),
++ omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num));
++ }
++ hsi_event(port, HSI_EVENT_STOP_RX);
++ if (omap_port->wkin_cken) {
++ ssi_clk_disable(ssi);
++ omap_port->wkin_cken = 0;
++ }
++ }
++}
++
++static irqreturn_t ssi_wake_isr(int irq, void *ssi_port)
++{
++ struct omap_ssi_port *omap_port = hsi_port_drvdata(ssi_port);
++
++ tasklet_hi_schedule(&omap_port->wake_tasklet);
++
++ return IRQ_HANDLED;
++}
++
++static int __init ssi_port_irq(struct hsi_port *port,
++ struct platform_device *pd)
++{
++ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
++ struct resource *irq;
++ int err;
++
++ irq = platform_get_resource(pd, IORESOURCE_IRQ, (port->num * 3) + 1);
++ if (!irq) {
++ dev_err(&port->device, "Port IRQ resource missing\n");
++ return -ENXIO;
++ }
++ omap_port->irq = irq->start;
++ tasklet_init(&omap_port->pio_tasklet, ssi_pio_tasklet,
++ (unsigned long)port);
++ err = devm_request_irq(&pd->dev, omap_port->irq, ssi_pio_isr,
++ IRQF_DISABLED, irq->name, port);
++ if (err < 0)
++ dev_err(&port->device, "Request IRQ %d failed (%d)\n",
++ omap_port->irq, err);
++ return err;
++}
++
++static int __init ssi_wake_irq(struct hsi_port *port,
++ struct platform_device *pd)
++{
++ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
++ struct resource *irq;
++ int err;
++
++ irq = platform_get_resource(pd, IORESOURCE_IRQ, (port->num * 3) + 3);
++ if (!irq) {
++ dev_err(&port->device, "Wake in IRQ resource missing");
++ return -ENXIO;
++ }
++ if (irq->flags & IORESOURCE_UNSET) {
++ dev_info(&port->device, "No Wake in support\n");
++ omap_port->wake_irq = -1;
++ return 0;
++ }
++ omap_port->wake_irq = irq->start;
++ tasklet_init(&omap_port->wake_tasklet, ssi_wake_tasklet,
++ (unsigned long)port);
++ err = devm_request_irq(&pd->dev, omap_port->wake_irq, ssi_wake_isr,
++ IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
++ irq->name, port);
++ if (err < 0)
++ dev_err(&port->device, "Request Wake in IRQ %d failed %d\n",
++ omap_port->wake_irq, err);
++ err = enable_irq_wake(omap_port->wake_irq);
++ if (err < 0)
++ dev_err(&port->device, "Enable wake on the wakeline in irq %d"
++ " failed %d\n", omap_port->wake_irq, err);
++
++ return err;
++}
++
++static void __init ssi_queues_init(struct omap_ssi_port *omap_port)
++{
++ unsigned int ch;
++
++ for (ch = 0; ch < SSI_MAX_CHANNELS; ch++) {
++ INIT_LIST_HEAD(&omap_port->txqueue[ch]);
++ INIT_LIST_HEAD(&omap_port->rxqueue[ch]);
++ }
++ INIT_LIST_HEAD(&omap_port->brkqueue);
++}
++
++static int __init ssi_get_iomem(struct platform_device *pd,
++ unsigned int num, unsigned long *base, dma_addr_t *phy)
++{
++ struct resource *mem;
++ struct resource *ioarea;
++
++ mem = platform_get_resource(pd, IORESOURCE_MEM, num);
++ if (!mem) {
++ dev_err(&pd->dev, "IO memory region missing (%d)\n", num);
++ return -ENXIO;
++ }
++ ioarea = devm_request_mem_region(&pd->dev, mem->start,
++ resource_size(mem), dev_name(&pd->dev));
++ if (!ioarea) {
++ dev_err(&pd->dev, "%s IO memory region request failed\n",
++ mem->name);
++ return -ENXIO;
++ }
++ *base = (unsigned long)devm_ioremap(&pd->dev, mem->start,
++ resource_size(mem));
++ if (!base) {
++ dev_err(&pd->dev, "%s IO remap failed\n", mem->name);
++ return -ENXIO;
++ }
++ if (phy)
++ *phy = mem->start;
++
++ return 0;
++}
++
++static int __init ssi_ports_init(struct hsi_controller *ssi,
++ struct platform_device *pd)
++{
++ struct hsi_port *port;
++ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
++ struct omap_ssi_port *omap_port;
++ unsigned int i;
++ int err;
++
++ omap_ssi->port = devm_kzalloc(&pd->dev,
++ sizeof(omap_port) * ssi->num_ports, GFP_KERNEL);
++ if (!omap_ssi->port)
++ return -ENOMEM;
++
++ for (i = 0; i < ssi->num_ports; i++) {
++ port = &ssi->port[i];
++ omap_port = devm_kzalloc(&pd->dev, sizeof(*omap_port),
++ GFP_KERNEL);
++ if (!omap_port)
++ return -ENOMEM;
++ port->async = ssi_async;
++ port->setup = ssi_setup;
++ port->flush = ssi_flush;
++ port->start_tx = ssi_start_tx;
++ port->stop_tx = ssi_stop_tx;
++ port->release = ssi_release;
++ hsi_port_set_drvdata(port, omap_port);
++ /* Get SST base addresses*/
++ err = ssi_get_iomem(pd, ((i * 2) + 2), &omap_port->sst_base,
++ &omap_port->sst_dma);
++ if (err < 0)
++ return err;
++ /* Get SSR base addresses */
++ err = ssi_get_iomem(pd, ((i * 2) + 3), &omap_port->ssr_base,
++ &omap_port->ssr_dma);
++ if (err < 0)
++ return err;
++ err = ssi_port_irq(port, pd);
++ if (err < 0)
++ return err;
++ err = ssi_wake_irq(port, pd);
++ if (err < 0)
++ return err;
++ ssi_queues_init(omap_port);
++ spin_lock_init(&omap_port->lock);
++ spin_lock_init(&omap_port->wk_lock);
++ omap_port->dev = &port->device;
++ omap_ssi->port[i] = omap_port;
++ }
++
++ return 0;
++}
++
++static void ssi_ports_exit(struct hsi_controller *ssi)
++{
++ struct omap_ssi_port *omap_port;
++ unsigned int i;
++
++ for (i = 0; i < ssi->num_ports; i++) {
++ omap_port = hsi_port_drvdata(&ssi->port[i]);
++ WARN_ON(omap_port->wk_refcount != 0);
++ tasklet_kill(&omap_port->wake_tasklet);
++ tasklet_kill(&omap_port->pio_tasklet);
++ }
++}
++
++static int __init ssi_clk_get(struct hsi_controller *ssi)
++{
++ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
++ int err;
++
++ omap_ssi->ick = clk_get(&ssi->device, "ssi_ick");
++ if (IS_ERR(omap_ssi->ick)) {
++ dev_err(&ssi->device, "Interface clock missing\n");
++ return PTR_ERR(omap_ssi->ick);
++ }
++ omap_ssi->fck = clk_get(&ssi->device, "ssi_ssr_fck");
++ if (IS_ERR(omap_ssi->fck)) {
++ dev_err(&ssi->device, "Functional clock missing\n");
++ err = PTR_ERR(omap_ssi->fck);
++ goto out1;
++ }
++ omap_ssi->fck_nb.notifier_call = ssi_clk_event;
++ omap_ssi->fck_nb.priority = INT_MAX;
++ err = clk_notifier_register(omap_ssi->fck, &omap_ssi->fck_nb);
++ if (err < 0) {
++ dev_err(&ssi->device, "Could not register SSI FCK notifier\n");
++ goto out2;
++ }
++
++ return 0;
++out2:
++ clk_put(omap_ssi->fck);
++out1:
++ clk_put(omap_ssi->ick);
++
++ return err;
++}
++
++static void ssi_clk_put(struct hsi_controller *ssi)
++{
++ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
++
++ WARN_ON(omap_ssi->ck_refcount != 0);
++
++ clk_notifier_unregister(omap_ssi->fck, &omap_ssi->fck_nb);
++ clk_put(omap_ssi->ick);
++ clk_put(omap_ssi->fck);
++}
++
++static int __init ssi_add_controller(struct hsi_controller *ssi,
++ struct platform_device *pd)
++{
++ struct omap_ssi_platform_data *omap_ssi_pdata = pd->dev.platform_data;
++ struct omap_ssi_controller *omap_ssi;
++ struct resource *irq;
++ int err;
++
++ omap_ssi = devm_kzalloc(&pd->dev, sizeof(*omap_ssi), GFP_KERNEL);
++ if (!omap_ssi) {
++ dev_err(&pd->dev, "not enough memory for omap ssi\n");
++ return -ENOMEM;
++ }
++ ssi->id = pd->id;
++ ssi->device.parent = &pd->dev;
++ dev_set_name(&ssi->device, "ssi%d", ssi->id);
++ hsi_controller_set_drvdata(ssi, omap_ssi);
++ omap_ssi->dev = &ssi->device;
++ err = ssi_get_iomem(pd, 0, &omap_ssi->sys, NULL);
++ if (err < 0)
++ return err;
++ err = ssi_get_iomem(pd, 1, &omap_ssi->gdd, NULL);
++ if (err < 0)
++ return err;
++ irq = platform_get_resource(pd, IORESOURCE_IRQ, 0);
++ if (!irq) {
++ dev_err(&pd->dev, "GDD IRQ resource missing\n");
++ return -ENXIO;
++ }
++ omap_ssi->gdd_irq = irq->start;
++ tasklet_init(&omap_ssi->gdd_tasklet, ssi_gdd_tasklet,
++ (unsigned long)ssi);
++ err = devm_request_irq(&pd->dev, omap_ssi->gdd_irq, ssi_gdd_isr,
++ IRQF_DISABLED, irq->name, ssi);
++ if (err < 0) {
++ dev_err(&ssi->device, "Request GDD IRQ %d failed (%d)",
++ omap_ssi->gdd_irq, err);
++ return err;
++ }
++ err = ssi_ports_init(ssi, pd);
++ if (err < 0)
++ return err;
++ omap_ssi->get_loss = omap_ssi_pdata->get_dev_context_loss_count;
++ spin_lock_init(&omap_ssi->lock);
++ spin_lock_init(&omap_ssi->ck_lock);
++
++ err = ssi_clk_get(ssi);
++ if (err < 0)
++ return err;
++
++ err = hsi_register_controller(ssi);
++ if (err < 0)
++ ssi_clk_put(ssi);
++
++ return err;
++}
++
++static int __init ssi_hw_init(struct hsi_controller *ssi)
++{
++ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
++ unsigned int i;
++ u32 val;
++ int err;
++
++ err = ssi_clk_enable(ssi);
++ if (err < 0) {
++ dev_err(&ssi->device, "Failed to enable the clocks %d\n", err);
++ return err;
++ }
++ /* Reseting SSI controller */
++ __raw_writel(SSI_SOFTRESET, omap_ssi->sys + SSI_SYSCONFIG_REG);
++ val = __raw_readl(omap_ssi->sys + SSI_SYSSTATUS_REG);
++ for (i = 0; ((i < 20) && !(val & SSI_RESETDONE)); i++) {
++ msleep(10);
++ val = __raw_readl(omap_ssi->sys + SSI_SYSSTATUS_REG);
++ }
++ if (!(val & SSI_RESETDONE)) {
++ dev_err(&ssi->device, "SSI HW reset failed\n");
++ ssi_clk_disable(ssi);
++ return -EIO;
++ }
++ /* Reseting GDD */
++ __raw_writel(SSI_SWRESET, omap_ssi->gdd + SSI_GDD_GRST_REG);
++ /* Get FCK rate */
++ omap_ssi->fck_rate = (u32)clk_get_rate(omap_ssi->fck) / 1000; /* KHz */
++ dev_dbg(&ssi->device, "SSI fck rate %d KHz\n", omap_ssi->fck_rate);
++ /* Set default PM settings */
++ val = SSI_AUTOIDLE | SSI_SIDLEMODE_SMART | SSI_MIDLEMODE_SMART;
++ __raw_writel(val, omap_ssi->sys + SSI_SYSCONFIG_REG);
++ omap_ssi->sysconfig = val;
++ __raw_writel(SSI_CLK_AUTOGATING_ON, omap_ssi->sys + SSI_GDD_GCR_REG);
++ omap_ssi->gdd_gcr = SSI_CLK_AUTOGATING_ON;
++ ssi_clk_disable(ssi);
++
++ return 0;
++}
++
++static void ssi_remove_controller(struct hsi_controller *ssi)
++{
++ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
++
++ ssi_ports_exit(ssi);
++ tasklet_kill(&omap_ssi->gdd_tasklet);
++ ssi_clk_put(ssi);
++ hsi_unregister_controller(ssi);
++}
++
++static int __init ssi_probe(struct platform_device *pd)
++{
++ struct omap_ssi_platform_data *omap_ssi_pdata = pd->dev.platform_data;
++ struct hsi_controller *ssi;
++ int err;
++
++ if (!omap_ssi_pdata) {
++ dev_err(&pd->dev, "No OMAP SSI platform data\n");
++ return -EINVAL;
++ }
++ ssi = hsi_alloc_controller(omap_ssi_pdata->num_ports, GFP_KERNEL);
++ if (!ssi) {
++ dev_err(&pd->dev, "No memory for controller\n");
++ return -ENOMEM;
++ }
++ platform_set_drvdata(pd, ssi);
++ err = ssi_add_controller(ssi, pd);
++ if (err < 0)
++ goto out1;
++ err = ssi_hw_init(ssi);
++ if (err < 0)
++ goto out2;
++#ifdef CONFIG_DEBUG_FS
++ err = ssi_debug_add_ctrl(ssi);
++ if (err < 0)
++ goto out2;
++#endif
++ return err;
++out2:
++ ssi_remove_controller(ssi);
++out1:
++ platform_set_drvdata(pd, NULL);
++ hsi_free_controller(ssi);
++
++ return err;
++}
++
++static int __exit ssi_remove(struct platform_device *pd)
++{
++ struct hsi_controller *ssi = platform_get_drvdata(pd);
++
++#ifdef CONFIG_DEBUG_FS
++ ssi_debug_remove_ctrl(ssi);
++#endif
++ ssi_remove_controller(ssi);
++ platform_set_drvdata(pd, NULL);
++ hsi_free_controller(ssi);
++
++ return 0;
++}
++
++static struct platform_driver ssi_pdriver = {
++ .remove = __exit_p(ssi_remove),
++ .driver = {
++ .name = "omap_ssi",
++ .owner = THIS_MODULE,
++ },
++};
++
++static int __init omap_ssi_init(void)
++{
++ pr_info("OMAP SSI hw driver loaded\n");
++ return platform_driver_probe(&ssi_pdriver, ssi_probe);
++}
++module_init(omap_ssi_init);
++
++static void __exit omap_ssi_exit(void)
++{
++ platform_driver_unregister(&ssi_pdriver);
++ pr_info("OMAP SSI driver removed\n");
++}
++module_exit(omap_ssi_exit);
++
++MODULE_ALIAS("platform:omap_ssi");
++MODULE_AUTHOR("Carlos Chinea <carlos.chinea@nokia.com>");
++MODULE_DESCRIPTION("Synchronous Serial Interface Driver");
++MODULE_LICENSE("GPL");
+--- /dev/null
++++ b/drivers/hsi/hsi.c
+@@ -0,0 +1,515 @@
++/*
++ * hsi.c
++ *
++ * HSI core.
++ *
++ * Copyright (C) 2010 Nokia Corporation. All rights reserved.
++ *
++ * Contact: Carlos Chinea <carlos.chinea@nokia.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ */
++#include <linux/hsi/hsi.h>
++#include <linux/rwsem.h>
++#include <linux/list.h>
++#include <linux/spinlock.h>
++#include <linux/kobject.h>
++#include <linux/slab.h>
++#include <linux/string.h>
++
++struct hsi_cl_info {
++ struct list_head list;
++ struct hsi_board_info info;
++};
++
++static LIST_HEAD(hsi_board_list);
++
++static struct device_type hsi_ctrl = {
++ .name = "hsi_controller",
++};
++
++static struct device_type hsi_cl = {
++ .name = "hsi_client",
++};
++
++static struct device_type hsi_port = {
++ .name = "hsi_port",
++};
++
++static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
++ char *buf)
++{
++ return sprintf(buf, "hsi:%s\n", dev_name(dev));
++}
++
++static struct device_attribute hsi_bus_dev_attrs[] = {
++ __ATTR_RO(modalias),
++ __ATTR_NULL,
++};
++
++static int hsi_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
++{
++ add_uevent_var(env, "MODALIAS=hsi:%s", dev_name(dev));
++
++ return 0;
++}
++
++static int hsi_bus_match(struct device *dev, struct device_driver *driver)
++{
++ return strcmp(dev_name(dev), driver->name) == 0;
++}
++
++static struct bus_type hsi_bus_type = {
++ .name = "hsi",
++ .dev_attrs = hsi_bus_dev_attrs,
++ .match = hsi_bus_match,
++ .uevent = hsi_bus_uevent,
++};
++
++static void hsi_client_release(struct device *dev)
++{
++ kfree(to_hsi_client(dev));
++}
++
++static void hsi_new_client(struct hsi_port *port, struct hsi_board_info *info)
++{
++ struct hsi_client *cl;
++ unsigned long flags;
++
++ cl = kzalloc(sizeof(*cl), GFP_KERNEL);
++ if (!cl)
++ return;
++ cl->device.type = &hsi_cl;
++ cl->tx_cfg = info->tx_cfg;
++ cl->rx_cfg = info->rx_cfg;
++ cl->device.bus = &hsi_bus_type;
++ cl->device.parent = &port->device;
++ cl->device.release = hsi_client_release;
++ dev_set_name(&cl->device, info->name);
++ cl->device.platform_data = info->platform_data;
++ spin_lock_irqsave(&port->clock, flags);
++ list_add_tail(&cl->link, &port->clients);
++ spin_unlock_irqrestore(&port->clock, flags);
++ if (info->archdata)
++ cl->device.archdata = *info->archdata;
++ if (device_register(&cl->device) < 0) {
++ pr_err("hsi: failed to register client: %s\n", info->name);
++ kfree(cl);
++ }
++}
++
++/**
++ * hsi_register_board_info - Register HSI clients information
++ * @info: Array of HSI clients on the board
++ * @len: Length of the array
++ *
++ * HSI clients are statically declared and registered on board files.
++ *
++ * HSI clients will be automatically registered to the HSI bus once the
++ * controller and the port where the clients wishes to attach are registered
++ * to it.
++ *
++ * Return -errno on failure, 0 on success.
++ */
++int __init hsi_register_board_info(struct hsi_board_info const *info,
++ unsigned int len)
++{
++ struct hsi_cl_info *cl_info;
++
++ cl_info = kzalloc(sizeof(*cl_info) * len, GFP_KERNEL);
++ if (!cl_info)
++ return -ENOMEM;
++
++ for (; len; len--, info++, cl_info++) {
++ cl_info->info = *info;
++ list_add_tail(&cl_info->list, &hsi_board_list);
++ }
++
++ return 0;
++}
++
++static void hsi_scan_board_info(struct hsi_controller *hsi)
++{
++ struct hsi_cl_info *cl_info;
++ struct hsi_port *p;
++
++ list_for_each_entry(cl_info, &hsi_board_list, list)
++ if (cl_info->info.hsi_id == hsi->id) {
++ p = hsi_find_port_num(hsi, cl_info->info.port);
++ if (!p)
++ continue;
++ hsi_new_client(p, &cl_info->info);
++ }
++}
++
++static int hsi_remove_client(struct device *dev, void *data)
++{
++ struct hsi_client *cl = to_hsi_client(dev);
++ struct hsi_port *port = to_hsi_port(dev->parent);
++ unsigned long flags;
++
++ spin_lock_irqsave(&port->clock, flags);
++ list_del(&cl->link);
++ spin_unlock_irqrestore(&port->clock, flags);
++ device_unregister(dev);
++
++ return 0;
++}
++
++static int hsi_remove_port(struct device *dev, void *data)
++{
++ device_for_each_child(dev, NULL, hsi_remove_client);
++ device_unregister(dev);
++
++ return 0;
++}
++
++static void hsi_controller_release(struct device *dev)
++{
++}
++
++static void hsi_port_release(struct device *dev)
++{
++}
++
++/**
++ * hsi_unregister_controller - Unregister an HSI controller
++ * @hsi: The HSI controller to register
++ */
++void hsi_unregister_controller(struct hsi_controller *hsi)
++{
++ device_for_each_child(&hsi->device, NULL, hsi_remove_port);
++ device_unregister(&hsi->device);
++}
++EXPORT_SYMBOL_GPL(hsi_unregister_controller);
++
++/**
++ * hsi_register_controller - Register an HSI controller and its ports
++ * @hsi: The HSI controller to register
++ *
++ * Returns -errno on failure, 0 on success.
++ */
++int hsi_register_controller(struct hsi_controller *hsi)
++{
++ unsigned int i;
++ int err;
++
++ hsi->device.type = &hsi_ctrl;
++ hsi->device.bus = &hsi_bus_type;
++ hsi->device.release = hsi_controller_release;
++ err = device_register(&hsi->device);
++ if (err < 0)
++ return err;
++ for (i = 0; i < hsi->num_ports; i++) {
++ hsi->port[i].device.parent = &hsi->device;
++ hsi->port[i].device.bus = &hsi_bus_type;
++ hsi->port[i].device.release = hsi_port_release;
++ hsi->port[i].device.type = &hsi_port;
++ INIT_LIST_HEAD(&hsi->port[i].clients);
++ spin_lock_init(&hsi->port[i].clock);
++ err = device_register(&hsi->port[i].device);
++ if (err < 0)
++ goto out;
++ }
++ /* Populate HSI bus with HSI clients */
++ hsi_scan_board_info(hsi);
++
++ return 0;
++out:
++ hsi_unregister_controller(hsi);
++
++ return err;
++}
++EXPORT_SYMBOL_GPL(hsi_register_controller);
++
++/**
++ * hsi_register_client_driver - Register an HSI client to the HSI bus
++ * @drv: HSI client driver to register
++ *
++ * Returns -errno on failure, 0 on success.
++ */
++int hsi_register_client_driver(struct hsi_client_driver *drv)
++{
++ drv->driver.bus = &hsi_bus_type;
++
++ return driver_register(&drv->driver);
++}
++EXPORT_SYMBOL_GPL(hsi_register_client_driver);
++
++static inline int hsi_dummy_msg(struct hsi_msg *msg)
++{
++ return 0;
++}
++
++static inline int hsi_dummy_cl(struct hsi_client *cl)
++{
++ return 0;
++}
++
++/**
++ * hsi_alloc_controller - Allocate an HSI controller and its ports
++ * @n_ports: Number of ports on the HSI controller
++ * @flags: Kernel allocation flags
++ *
++ * Return NULL on failure or a pointer to an hsi_controller on success.
++ */
++struct hsi_controller *hsi_alloc_controller(unsigned int n_ports, gfp_t flags)
++{
++ struct hsi_controller *hsi;
++ struct hsi_port *port;
++ unsigned int i;
++
++ if (!n_ports)
++ return NULL;
++
++ port = kzalloc(sizeof(*port)*n_ports, flags);
++ if (!port)
++ return NULL;
++ hsi = kzalloc(sizeof(*hsi), flags);
++ if (!hsi)
++ goto out;
++ for (i = 0; i < n_ports; i++) {
++ dev_set_name(&port[i].device, "port%d", i);
++ port[i].num = i;
++ port[i].async = hsi_dummy_msg;
++ port[i].setup = hsi_dummy_cl;
++ port[i].flush = hsi_dummy_cl;
++ port[i].start_tx = hsi_dummy_cl;
++ port[i].stop_tx = hsi_dummy_cl;
++ port[i].release = hsi_dummy_cl;
++ mutex_init(&port[i].lock);
++ }
++ hsi->num_ports = n_ports;
++ hsi->port = port;
++
++ return hsi;
++out:
++ kfree(port);
++
++ return NULL;
++}
++EXPORT_SYMBOL_GPL(hsi_alloc_controller);
++
++/**
++ * hsi_free_controller - Free an HSI controller
++ * @hsi: Pointer to HSI controller
++ */
++void hsi_free_controller(struct hsi_controller *hsi)
++{
++ if (!hsi)
++ return;
++
++ kfree(hsi->port);
++ kfree(hsi);
++}
++EXPORT_SYMBOL_GPL(hsi_free_controller);
++
++/**
++ * hsi_free_msg - Free an HSI message
++ * @msg: Pointer to the HSI message
++ *
++ * Client is responsible to free the buffers pointed by the scatterlists.
++ */
++void hsi_free_msg(struct hsi_msg *msg)
++{
++ if (!msg)
++ return;
++ sg_free_table(&msg->sgt);
++ kfree(msg);
++}
++EXPORT_SYMBOL_GPL(hsi_free_msg);
++
++/**
++ * hsi_alloc_msg - Allocate an HSI message
++ * @nents: Number of memory entries
++ * @flags: Kernel allocation flags
++ *
++ * nents can be 0. This mainly makes sense for read transfer.
++ * In that case, HSI drivers will call the complete callback when
++ * there is data to be read without consuming it.
++ *
++ * Return NULL on failure or a pointer to an hsi_msg on success.
++ */
++struct hsi_msg *hsi_alloc_msg(unsigned int nents, gfp_t flags)
++{
++ struct hsi_msg *msg;
++ int err;
++
++ msg = kzalloc(sizeof(*msg), flags);
++ if (!msg)
++ return NULL;
++
++ if (!nents)
++ return msg;
++
++ err = sg_alloc_table(&msg->sgt, nents, flags);
++ if (unlikely(err)) {
++ kfree(msg);
++ msg = NULL;
++ }
++
++ return msg;
++}
++EXPORT_SYMBOL_GPL(hsi_alloc_msg);
++
++/**
++ * hsi_async - Submit an HSI transfer to the controller
++ * @cl: HSI client sending the transfer
++ * @msg: The HSI transfer passed to controller
++ *
++ * The HSI message must have the channel, ttype, complete and destructor
++ * fields set beforehand. If nents > 0 then the client has to initialize
++ * also the scatterlists to point to the buffers to write to or read from.
++ *
++ * HSI controllers relay on pre-allocated buffers from their clients and they
++ * do not allocate buffers on their own.
++ *
++ * Once the HSI message transfer finishes, the HSI controller calls the
++ * complete callback with the status and actual_len fields of the HSI message
++ * updated. The complete callback can be called before returning from
++ * hsi_async.
++ *
++ * Returns -errno on failure or 0 on success
++ */
++int hsi_async(struct hsi_client *cl, struct hsi_msg *msg)
++{
++ struct hsi_port *port = hsi_get_port(cl);
++
++ if (!hsi_port_claimed(cl))
++ return -EACCES;
++
++ WARN_ON_ONCE(!msg->destructor || !msg->complete);
++ msg->cl = cl;
++
++ return port->async(msg);
++}
++EXPORT_SYMBOL_GPL(hsi_async);
++
++/**
++ * hsi_claim_port - Claim the HSI client's port
++ * @cl: HSI client that wants to claim its port
++ * @share: Flag to indicate if the client wants to share the port or not.
++ *
++ * Returns -errno on failure, 0 on success.
++ */
++int hsi_claim_port(struct hsi_client *cl, unsigned int share)
++{
++ struct hsi_port *port = hsi_get_port(cl);
++ int err = 0;
++
++ mutex_lock(&port->lock);
++ if ((port->claimed) && (!port->shared || !share)) {
++ err = -EBUSY;
++ goto out;
++ }
++ port->claimed++;
++ port->shared = !!share;
++ cl->pclaimed = 1;
++out:
++ mutex_unlock(&port->lock);
++
++ return err;
++}
++EXPORT_SYMBOL_GPL(hsi_claim_port);
++
++/**
++ * hsi_release_port - Release the HSI client's port
++ * @cl: HSI client which previously claimed its port
++ */
++void hsi_release_port(struct hsi_client *cl)
++{
++ struct hsi_port *port = hsi_get_port(cl);
++
++ /* Allow HW driver to do some cleanup */
++ port->release(cl);
++ mutex_lock(&port->lock);
++ if (cl->pclaimed)
++ port->claimed--;
++ BUG_ON(port->claimed < 0);
++ cl->pclaimed = 0;
++ if (!port->claimed)
++ port->shared = 0;
++ mutex_unlock(&port->lock);
++}
++EXPORT_SYMBOL_GPL(hsi_release_port);
++
++static int hsi_start_rx(struct hsi_client *cl, void *data)
++{
++ if (cl->hsi_start_rx)
++ (*cl->hsi_start_rx)(cl);
++
++ return 0;
++}
++
++static int hsi_stop_rx(struct hsi_client *cl, void *data)
++{
++ if (cl->hsi_stop_rx)
++ (*cl->hsi_stop_rx)(cl);
++
++ return 0;
++}
++
++static int hsi_port_for_each_client(struct hsi_port *port, void *data,
++ int (*fn)(struct hsi_client *cl, void *data))
++{
++ struct hsi_client *cl;
++
++ spin_lock(&port->clock);
++ list_for_each_entry(cl, &port->clients, link) {
++ spin_unlock(&port->clock);
++ (*fn)(cl, data);
++ spin_lock(&port->clock);
++ }
++ spin_unlock(&port->clock);
++
++ return 0;
++}
++
++/**
++ * hsi_event -Notifies clients about port events
++ * @port: Port where the event occurred
++ * @event: The event type
++ *
++ * Clients should not be concerned about wake line behavior. However, due
++ * to a race condition in HSI HW protocol, clients need to be notified
++ * about wake line changes, so they can implement a workaround for it.
++ *
++ * Events:
++ * HSI_EVENT_START_RX - Incoming wake line high
++ * HSI_EVENT_STOP_RX - Incoming wake line down
++ */
++void hsi_event(struct hsi_port *port, unsigned int event)
++{
++ int (*fn)(struct hsi_client *cl, void *data);
++
++ switch (event) {
++ case HSI_EVENT_START_RX:
++ fn = hsi_start_rx;
++ break;
++ case HSI_EVENT_STOP_RX:
++ fn = hsi_stop_rx;
++ break;
++ default:
++ return;
++ }
++ hsi_port_for_each_client(port, NULL, fn);
++}
++EXPORT_SYMBOL_GPL(hsi_event);
++
++static int __init hsi_init(void)
++{
++ return bus_register(&hsi_bus_type);
++}
++postcore_initcall(hsi_init);
+--- a/drivers/misc/Kconfig
++++ b/drivers/misc/Kconfig
+@@ -381,6 +381,7 @@
+ module will be called bh1770glc. If unsure, say N here.
+
+ source "drivers/misc/c2port/Kconfig"
++source "drivers/misc/cmt/Kconfig"
+ source "drivers/misc/eeprom/Kconfig"
+ source "drivers/misc/cb710/Kconfig"
+ source "drivers/misc/iwmc3200top/Kconfig"
+--- a/drivers/misc/Makefile
++++ b/drivers/misc/Makefile
+@@ -35,3 +35,4 @@
+ obj-y += cb710/
+ obj-$(CONFIG_X86_MRST) += koski_hwid.o
+ obj-$(CONFIG_VMWARE_BALLOON) += vmware_balloon.o
++obj-$(CONFIG_CMT) += cmt/
+--- /dev/null
++++ b/drivers/misc/cmt/Kconfig
+@@ -0,0 +1,9 @@
++#
++# CMT
++#
++config CMT
++ tristate "Enable CMT support"
++ ---help---
++ If you say Y here, you will enable CMT support.
++
++ If unsure, say Y, or else you will not be able to use the CMT.
+--- /dev/null
++++ b/drivers/misc/cmt/Makefile
+@@ -0,0 +1,5 @@
++#
++# Makefile for CMT module
++#
++obj-$(CONFIG_CMT) += cmt.o
++
+--- /dev/null
++++ b/drivers/misc/cmt/cmt.c
+@@ -0,0 +1,223 @@
++/*
++ * cmt.c
++ *
++ * CMT support.
++ *
++ * Copyright (C) 2009 Nokia Corporation. All rights reserved.
++ *
++ * Contact: Carlos Chinea <carlos.chinea@nokia.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ */
++
++#include <asm/atomic.h>
++#include <linux/cmt.h>
++#include <linux/err.h>
++#include <linux/gpio.h>
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/irq.h>
++#include <linux/list.h>
++#include <linux/module.h>
++#include <linux/notifier.h>
++#include <linux/platform_device.h>
++#include <linux/slab.h>
++#include <linux/string.h>
++
++/**
++ * struct cmt_device - CMT device data
++ * @cmt_rst_ind_tasklet: Bottom half for CMT reset line events
++ * @cmt_rst_ind_gpio: GPIO number of the CMT reset line
++ * @n_head: List of notifiers registered to get CMT events
++ * @node: Link on the list of available CMTs
++ * @device: Reference to the CMT platform device
++ */
++struct cmt_device {
++ struct tasklet_struct cmt_rst_ind_tasklet;
++ unsigned int cmt_rst_ind_gpio;
++ struct atomic_notifier_head n_head;
++ struct list_head node;
++ struct device *device;
++};
++
++static LIST_HEAD(cmt_list); /* List of CMT devices */
++
++int cmt_notifier_register(struct cmt_device *cmtdev, struct notifier_block *nb)
++{
++ struct cmt_device *cmt;
++ int err = -ENODEV;
++
++ if ((!cmtdev) || (!nb))
++ return -EINVAL;
++
++ list_for_each_entry(cmt, &cmt_list, node)
++ if (cmt == cmtdev) {
++ err = atomic_notifier_chain_register(&cmt->n_head, nb);
++ break;
++ }
++
++ return err;
++}
++EXPORT_SYMBOL_GPL(cmt_notifier_register);
++
++int cmt_notifier_unregister(struct cmt_device *cmtdev,
++ struct notifier_block *nb)
++{
++ struct cmt_device *cmt;
++ int err = -ENODEV;
++
++ if ((!cmtdev) || (!nb))
++ return -EINVAL;
++
++ list_for_each_entry(cmt, &cmt_list, node)
++ if (cmt == cmtdev) {
++ err = atomic_notifier_chain_unregister(&cmt->n_head,
++ nb);
++ break;
++ }
++
++ return err;
++}
++EXPORT_SYMBOL_GPL(cmt_notifier_unregister);
++
++struct cmt_device *cmt_get(const char *name)
++{
++ struct cmt_device *p, *cmt = ERR_PTR(-ENODEV);
++
++ list_for_each_entry(p, &cmt_list, node)
++ if (strcmp(name, dev_name(p->device)) == 0) {
++ cmt = p;
++ break;
++ }
++
++ return cmt;
++}
++EXPORT_SYMBOL_GPL(cmt_get);
++
++void cmt_put(struct cmt_device *cmtdev)
++{
++}
++EXPORT_SYMBOL_GPL(cmt_put);
++
++static void do_cmt_rst_ind_tasklet(unsigned long cmtdev)
++{
++ struct cmt_device *cmt = (struct cmt_device *)cmtdev;
++
++ dev_dbg(cmt->device, "*** CMT rst line change detected (%d) ***\n",
++ gpio_get_value(cmt->cmt_rst_ind_gpio));
++ atomic_notifier_call_chain(&cmt->n_head, CMT_RESET, NULL);
++}
++
++static irqreturn_t cmt_rst_ind_isr(int irq, void *cmtdev)
++{
++ struct cmt_device *cmt = (struct cmt_device *)cmtdev;
++
++ tasklet_schedule(&cmt->cmt_rst_ind_tasklet);
++
++ return IRQ_HANDLED;
++}
++
++static int __init cmt_probe(struct platform_device *pd)
++{
++ struct cmt_platform_data *pdata = pd->dev.platform_data;
++ struct cmt_device *cmt;
++ int irq;
++ int err;
++
++ if (!pdata) {
++ pr_err("CMT: No platform_data found on cmt device\n");
++ return -ENXIO;
++ }
++ cmt = kzalloc(sizeof(*cmt), GFP_KERNEL);
++ if (!cmt) {
++ dev_err(&pd->dev, "Could not allocate memory for cmtdev\n");
++ return -ENOMEM;
++ }
++
++ cmt->device = &pd->dev;
++ cmt->cmt_rst_ind_gpio = pdata->cmt_rst_ind_gpio;
++ err = gpio_request(cmt->cmt_rst_ind_gpio, "cmt_rst_ind");
++ if (err < 0) {
++ dev_err(&pd->dev, "Request cmt_rst_ind gpio%d failed\n",
++ cmt->cmt_rst_ind_gpio);
++ goto rback1;
++ }
++ gpio_direction_input(cmt->cmt_rst_ind_gpio);
++ tasklet_init(&cmt->cmt_rst_ind_tasklet, do_cmt_rst_ind_tasklet,
++ (unsigned long)cmt);
++ irq = gpio_to_irq(cmt->cmt_rst_ind_gpio);
++ err = request_irq(irq, cmt_rst_ind_isr,
++ IRQF_DISABLED | IRQF_TRIGGER_RISING, "cmt_rst_ind", cmt);
++ if (err < 0) {
++ dev_err(&pd->dev, "Request cmt_rst_ind irq(%d) failed\n", irq);
++ goto rback2;
++ }
++ enable_irq_wake(irq);
++ ATOMIC_INIT_NOTIFIER_HEAD(&cmt->n_head);
++ list_add(&cmt->node, &cmt_list);
++ platform_set_drvdata(pd, cmt);
++
++ return 0;
++rback2:
++ gpio_free(cmt->cmt_rst_ind_gpio);
++rback1:
++ kfree(cmt);
++
++ return err;
++}
++
++static int __exit cmt_remove(struct platform_device *pd)
++{
++ struct cmt_device *cmt = platform_get_drvdata(pd);
++
++ if (!cmt)
++ return 0;
++ platform_set_drvdata(pd, NULL);
++ list_del(&cmt->node);
++ disable_irq_wake(gpio_to_irq(cmt->cmt_rst_ind_gpio));
++ free_irq(gpio_to_irq(cmt->cmt_rst_ind_gpio), cmt);
++ tasklet_kill(&cmt->cmt_rst_ind_tasklet);
++ gpio_free(cmt->cmt_rst_ind_gpio);
++ kfree(cmt);
++
++ return 0;
++}
++
++static struct platform_driver cmt_driver = {
++ .remove = __exit_p(cmt_remove),
++ .driver = {
++ .name = "cmt",
++ .owner = THIS_MODULE,
++ },
++};
++
++static int __init cmt_init(void)
++{
++ pr_notice("CMT driver\n");
++
++ return platform_driver_probe(&cmt_driver, cmt_probe);
++}
++module_init(cmt_init);
++
++static void __exit cmt_exit(void)
++{
++ pr_notice("CMT driver exited\n");
++ platform_driver_unregister(&cmt_driver);
++}
++module_exit(cmt_exit);
++
++MODULE_AUTHOR("Carlos Chinea, Nokia");
++MODULE_DESCRIPTION("CMT related support");
++MODULE_LICENSE("GPL");
+--- a/include/linux/Kbuild
++++ b/include/linux/Kbuild
+@@ -2,6 +2,7 @@
+ header-y += can/
+ header-y += dvb/
+ header-y += hdlc/
++header-y += hsi/
+ header-y += isdn/
+ header-y += nfsd/
+ header-y += raid/
+--- /dev/null
++++ b/include/linux/cmt.h
+@@ -0,0 +1,53 @@
++/*
++ * cmt.h
++ *
++ * CMT support header
++ *
++ * Copyright (C) 2009 Nokia Corporation. All rights reserved.
++ *
++ * Contact: Carlos Chinea <carlos.chinea@nokia.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ */
++
++#ifndef __NOKIA_CMT_H__
++#define __NOKIA_CMT_H__
++
++#include <linux/notifier.h>
++
++/*
++ * NOKIA CMT notifier events
++ */
++enum {
++ CMT_RESET,
++};
++
++struct cmt_device;
++
++/*
++ * struct cmt_platform_data - CMT platform data
++ * @ape_rst_rq_gpio: GPIO line number for the CMT reset line
++ */
++struct cmt_platform_data {
++ unsigned int cmt_rst_ind_gpio;
++};
++
++struct cmt_device *cmt_get(const char *name);
++void cmt_put(struct cmt_device *cmt);
++int cmt_notifier_register(struct cmt_device *cmtdev,
++ struct notifier_block *nb);
++int cmt_notifier_unregister(struct cmt_device *cmtdev,
++ struct notifier_block *nb);
++#endif /* __NOKIA_CMT_H__ */
+--- /dev/null
++++ b/include/linux/cs-protocol.h
+@@ -0,0 +1,116 @@
++/*
++ * include/linux/cs-protocol.h - cmt_speech interface definitions
++ *
++ * Implemented by:
++ * - drivers/misc/cmt-speech/
++ *
++ * Copyright (C) 2008,2009,2010 Nokia Corporation. All rights reserved.
++ *
++ * Contact: Kai Vehmanen <kai.vehmanen@nokia.com>
++ * Original author: Peter Ujfalusi <peter.ujfalusi@nokia.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ */
++
++#ifndef _CS_PROTOCOL_H
++#define _CS_PROTOCOL_H
++
++#include <linux/types.h>
++#include <linux/ioctl.h>
++
++/* chardev parameters */
++#define CS_DEV_FILE_NAME "/dev/cmt_speech"
++
++/* user-space API versioning */
++#define CS_IF_VERSION 2
++
++/* APE kernel <-> user space messages */
++#define CS_CMD_SHIFT 28
++#define CS_DOMAIN_SHIFT 24
++
++#define CS_CMD_MASK 0xff000000
++#define CS_PARAM_MASK 0xffffff
++
++#define CS_CMD(id, dom) \
++ (((id) << CS_CMD_SHIFT) | ((dom) << CS_DOMAIN_SHIFT))
++
++#define CS_ERROR CS_CMD(1, 0)
++#define CS_RX_DATA_RECEIVED CS_CMD(2, 0)
++#define CS_TX_DATA_READY CS_CMD(3, 0)
++#define CS_TX_DATA_SENT CS_CMD(4, 0)
++
++/* params to CS_ERROR indication */
++#define CS_ERR_PEER_RESET 0
++
++/* ioctl interface */
++
++/* parameters to CS_CONFIG_BUFS ioctl */
++#define CS_FEAT_TSTAMP_RX_CTRL (1 << 0)
++#define CS_FEAT_ROLLING_RX_COUNTER (2 << 0)
++
++/* parameters to CS_GET_STATE ioctl */
++#define CS_STATE_CLOSED 0
++#define CS_STATE_OPENED 1 /* resource allocated */
++#define CS_STATE_CONFIGURED 2 /* data path active */
++
++/* maximum number of TX/RX buffers */
++#define CS_MAX_BUFFERS_SHIFT 4
++#define CS_MAX_BUFFERS (1 << CS_MAX_BUFFERS_SHIFT)
++
++/* Parameters for setting up the data buffers */
++struct cs_buffer_config {
++ __u32 rx_bufs; /* number of RX buffer slots */
++ __u32 tx_bufs; /* number of TX buffer slots */
++ __u32 buf_size; /* bytes */
++ __u32 flags; /* see CS_FEAT_* */
++ __u32 reserved[4];
++};
++
++/*
++ * Struct describing the layout and contents of the driver mmap area.
++ * This information is meant as read-only information for the application.
++ */
++struct cs_mmap_config_block {
++ __u32 reserved1;
++ __u32 buf_size; /* 0=disabled, otherwise the transfer size */
++ __u32 rx_bufs; /* # of RX buffers */
++ __u32 tx_bufs; /* # of TX buffers */
++ __u32 reserved2;
++ /* array of offsets within the mmap area for each RX and TX buffer */
++ __u32 rx_offsets[CS_MAX_BUFFERS];
++ __u32 tx_offsets[CS_MAX_BUFFERS];
++ __u32 rx_ptr;
++ __u32 rx_ptr_boundary;
++ __u32 reserved3[2];
++ /*
++ * if enabled with CS_FEAT_TSTAMP_RX_CTRL, monotonic
++ * timestamp taken when the last control command was received
++ */
++ struct timespec tstamp_rx_ctrl;
++};
++
++#define CS_IO_MAGIC 'C'
++
++#define CS_IOW(num, dtype) _IOW(CS_IO_MAGIC, num, dtype)
++#define CS_IOR(num, dtype) _IOR(CS_IO_MAGIC, num, dtype)
++#define CS_IOWR(num, dtype) _IOWR(CS_IO_MAGIC, num, dtype)
++#define CS_IO(num) _IO(CS_IO_MAGIC, num)
++
++#define CS_GET_STATE CS_IOR(21, unsigned int)
++#define CS_SET_WAKELINE CS_IOW(23, unsigned int)
++#define CS_GET_IF_VERSION CS_IOR(30, unsigned int)
++#define CS_CONFIG_BUFS CS_IOW(31, struct cs_buffer_config)
++
++#endif /* _CS_PROTOCOL_H */
+--- /dev/null
++++ b/include/linux/hsi/Kbuild
+@@ -0,0 +1 @@
++header-y += hsi_char.h
+--- /dev/null
++++ b/include/linux/hsi/hsi.h
+@@ -0,0 +1,376 @@
++/*
++ * hsi.h
++ *
++ * HSI core header file.
++ *
++ * Copyright (C) 2010 Nokia Corporation. All rights reserved.
++ *
++ * Contact: Carlos Chinea <carlos.chinea@nokia.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ */
++
++#ifndef __LINUX_HSI_H__
++#define __LINUX_HSI_H__
++
++#include <linux/device.h>
++#include <linux/mutex.h>
++#include <linux/scatterlist.h>
++#include <linux/spinlock.h>
++#include <linux/list.h>
++
++/* HSI message ttype */
++#define HSI_MSG_READ 0
++#define HSI_MSG_WRITE 1
++
++/* HSI configuration values */
++#define HSI_MODE_STREAM 1
++#define HSI_MODE_FRAME 2
++#define HSI_FLOW_SYNC 0 /* Synchronized flow */
++#define HSI_FLOW_PIPE 1 /* Pipelined flow */
++#define HSI_ARB_RR 0 /* Round-robin arbitration */
++#define HSI_ARB_PRIO 1 /* Channel priority arbitration */
++
++#define HSI_MAX_CHANNELS 16
++
++/* HSI message status codes */
++enum {
++ HSI_STATUS_COMPLETED, /* Message transfer is completed */
++ HSI_STATUS_PENDING, /* Message pending to be read/write (POLL) */
++ HSI_STATUS_PROCEEDING, /* Message transfer is ongoing */
++ HSI_STATUS_QUEUED, /* Message waiting to be served */
++ HSI_STATUS_ERROR, /* Error when message transfer was ongoing */
++};
++
++/* HSI port event codes */
++enum {
++ HSI_EVENT_START_RX,
++ HSI_EVENT_STOP_RX,
++};
++
++/**
++ * struct hsi_config - Configuration for RX/TX HSI modules
++ * @mode: Bit transmission mode (STREAM or FRAME)
++ * @flow: Flow type (SYNCHRONIZED or PIPELINE)
++ * @channels: Number of channels to use [1..16]
++ * @speed: Max bit transmission speed (Kbit/s)
++ * @arb_mode: Arbitration mode for TX frame (Round robin, priority)
++ */
++struct hsi_config {
++ unsigned int mode;
++ unsigned int flow;
++ unsigned int channels;
++ unsigned int speed;
++ unsigned int arb_mode; /* TX only */
++};
++
++/**
++ * struct hsi_board_info - HSI client board info
++ * @name: Name for the HSI device
++ * @hsi_id: HSI controller id where the client sits
++ * @port: Port number in the controller where the client sits
++ * @tx_cfg: HSI TX configuration
++ * @rx_cfg: HSI RX configuration
++ * @platform_data: Platform related data
++ * @archdata: Architecture-dependent device data
++ */
++struct hsi_board_info {
++ const char *name;
++ int hsi_id;
++ unsigned int port;
++ struct hsi_config tx_cfg;
++ struct hsi_config rx_cfg;
++ void *platform_data;
++ struct dev_archdata *archdata;
++};
++
++#ifdef CONFIG_HSI
++extern int hsi_register_board_info(struct hsi_board_info const *info,
++ unsigned int len);
++#else
++static inline int hsi_register_board_info(struct hsi_board_info const *info,
++ unsigned int len)
++{
++ return 0;
++}
++#endif
++
++/**
++ * struct hsi_client - HSI client attached to an HSI port
++ * @device: Driver model representation of the device
++ * @tx_cfg: HSI TX configuration
++ * @rx_cfg: HSI RX configuration
++ * @hsi_start_rx: Called after incoming wake line goes high
++ * @hsi_stop_rx: Called after incoming wake line goes low
++ */
++struct hsi_client {
++ struct device device;
++ struct hsi_config tx_cfg;
++ struct hsi_config rx_cfg;
++ void (*hsi_start_rx)(struct hsi_client *cl);
++ void (*hsi_stop_rx)(struct hsi_client *cl);
++ /* private: */
++ unsigned int pclaimed:1;
++ struct list_head link;
++};
++
++#define to_hsi_client(dev) container_of(dev, struct hsi_client, device)
++
++static inline void hsi_client_set_drvdata(struct hsi_client *cl, void *data)
++{
++ dev_set_drvdata(&cl->device, data);
++}
++
++static inline void *hsi_client_drvdata(struct hsi_client *cl)
++{
++ return dev_get_drvdata(&cl->device);
++}
++
++/**
++ * struct hsi_client_driver - Driver associated to an HSI client
++ * @driver: Driver model representation of the driver
++ */
++struct hsi_client_driver {
++ struct device_driver driver;
++};
++
++#define to_hsi_client_driver(drv) container_of(drv, struct hsi_client_driver,\
++ driver)
++
++int hsi_register_client_driver(struct hsi_client_driver *drv);
++
++static inline void hsi_unregister_client_driver(struct hsi_client_driver *drv)
++{
++ driver_unregister(&drv->driver);
++}
++
++/**
++ * struct hsi_msg - HSI message descriptor
++ * @link: Free to use by the current descriptor owner
++ * @cl: HSI device client that issues the transfer
++ * @sgt: Head of the scatterlist array
++ * @context: Client context data associated to the transfer
++ * @complete: Transfer completion callback
++ * @destructor: Destructor to free resources when flushing
++ * @status: Status of the transfer when completed
++ * @actual_len: Actual length of data transfered on completion
++ * @channel: Channel were to TX/RX the message
++ * @ttype: Transfer type (TX if set, RX otherwise)
++ * @break_frame: if true HSI will send/receive a break frame (FRAME MODE)
++ */
++struct hsi_msg {
++ struct list_head link;
++ struct hsi_client *cl;
++ struct sg_table sgt;
++ void *context;
++
++ void (*complete)(struct hsi_msg *msg);
++ void (*destructor)(struct hsi_msg *msg);
++
++ int status;
++ unsigned int actual_len;
++ unsigned int channel;
++ unsigned int ttype:1;
++ unsigned int break_frame:1;
++};
++
++struct hsi_msg *hsi_alloc_msg(unsigned int n_frag, gfp_t flags);
++void hsi_free_msg(struct hsi_msg *msg);
++
++/**
++ * struct hsi_port - HSI port device
++ * @device: Driver model representation of the device
++ * @tx_cfg: Current TX path configuration
++ * @rx_cfg: Current RX path configuration
++ * @num: Port number
++ * @shared: Set when port can be shared by different clients
++ * @claimed: Reference count of clients which claimed the port
++ * @lock: Serialize port claim
++ * @async: Asynchronous transfer callback
++ * @setup: Callback to set the HSI client configuration
++ * @flush: Callback to clean the HW state and destroy all pending transfers
++ * @start_tx: Callback to inform that a client wants to TX data
++ * @stop_tx: Callback to inform that a client no longer wishes to TX data
++ * @release: Callback to inform that a client no longer uses the port
++ * @clients: List of hsi_clients using the port.
++ * @clock: Lock to serialize access to the clients list.
++ */
++struct hsi_port {
++ struct device device;
++ struct hsi_config tx_cfg;
++ struct hsi_config rx_cfg;
++ unsigned int num;
++ unsigned int shared:1;
++ int claimed;
++ struct mutex lock;
++ int (*async)(struct hsi_msg *msg);
++ int (*setup)(struct hsi_client *cl);
++ int (*flush)(struct hsi_client *cl);
++ int (*start_tx)(struct hsi_client *cl);
++ int (*stop_tx)(struct hsi_client *cl);
++ int (*release)(struct hsi_client *cl);
++ struct list_head clients;
++ spinlock_t clock;
++};
++
++#define to_hsi_port(dev) container_of(dev, struct hsi_port, device)
++#define hsi_get_port(cl) to_hsi_port((cl)->device.parent)
++
++void hsi_event(struct hsi_port *port, unsigned int event);
++int hsi_claim_port(struct hsi_client *cl, unsigned int share);
++void hsi_release_port(struct hsi_client *cl);
++
++static inline int hsi_port_claimed(struct hsi_client *cl)
++{
++ return cl->pclaimed;
++}
++
++static inline void hsi_port_set_drvdata(struct hsi_port *port, void *data)
++{
++ dev_set_drvdata(&port->device, data);
++}
++
++static inline void *hsi_port_drvdata(struct hsi_port *port)
++{
++ return dev_get_drvdata(&port->device);
++}
++
++/**
++ * struct hsi_controller - HSI controller device
++ * @device: Driver model representation of the device
++ * @id: HSI controller ID
++ * @num_ports: Number of ports in the HSI controller
++ * @port: Array of HSI ports
++ */
++struct hsi_controller {
++ struct device device;
++ int id;
++ unsigned int num_ports;
++ struct hsi_port *port;
++};
++
++#define to_hsi_controller(dev) container_of(dev, struct hsi_controller, device)
++
++struct hsi_controller *hsi_alloc_controller(unsigned int n_ports, gfp_t flags);
++void hsi_free_controller(struct hsi_controller *hsi);
++int hsi_register_controller(struct hsi_controller *hsi);
++void hsi_unregister_controller(struct hsi_controller *hsi);
++
++static inline void hsi_controller_set_drvdata(struct hsi_controller *hsi,
++ void *data)
++{
++ dev_set_drvdata(&hsi->device, data);
++}
++
++static inline void *hsi_controller_drvdata(struct hsi_controller *hsi)
++{
++ return dev_get_drvdata(&hsi->device);
++}
++
++static inline struct hsi_port *hsi_find_port_num(struct hsi_controller *hsi,
++ unsigned int num)
++{
++ return (num < hsi->num_ports) ? &hsi->port[num] : NULL;
++}
++
++/*
++ * API for HSI clients
++ */
++int hsi_async(struct hsi_client *cl, struct hsi_msg *msg);
++
++/**
++ * hsi_setup - Configure the client's port
++ * @cl: Pointer to the HSI client
++ *
++ * When sharing ports, clients should either relay on a single
++ * client setup or have the same setup for all of them.
++ *
++ * Return -errno on failure, 0 on success
++ */
++static inline int hsi_setup(struct hsi_client *cl)
++{
++ if (!hsi_port_claimed(cl))
++ return -EACCES;
++ return hsi_get_port(cl)->setup(cl);
++}
++
++/**
++ * hsi_flush - Flush all pending transactions on the client's port
++ * @cl: Pointer to the HSI client
++ *
++ * This function will destroy all pending hsi_msg in the port and reset
++ * the HW port so it is ready to receive and transmit from a clean state.
++ *
++ * Return -errno on failure, 0 on success
++ */
++static inline int hsi_flush(struct hsi_client *cl)
++{
++ if (!hsi_port_claimed(cl))
++ return -EACCES;
++ return hsi_get_port(cl)->flush(cl);
++}
++
++/**
++ * hsi_async_read - Submit a read transfer
++ * @cl: Pointer to the HSI client
++ * @msg: HSI message descriptor of the transfer
++ *
++ * Return -errno on failure, 0 on success
++ */
++static inline int hsi_async_read(struct hsi_client *cl, struct hsi_msg *msg)
++{
++ msg->ttype = HSI_MSG_READ;
++ return hsi_async(cl, msg);
++}
++
++/**
++ * hsi_async_write - Submit a write transfer
++ * @cl: Pointer to the HSI client
++ * @msg: HSI message descriptor of the transfer
++ *
++ * Return -errno on failure, 0 on success
++ */
++static inline int hsi_async_write(struct hsi_client *cl, struct hsi_msg *msg)
++{
++ msg->ttype = HSI_MSG_WRITE;
++ return hsi_async(cl, msg);
++}
++
++/**
++ * hsi_start_tx - Signal the port that the client wants to start a TX
++ * @cl: Pointer to the HSI client
++ *
++ * Return -errno on failure, 0 on success
++ */
++static inline int hsi_start_tx(struct hsi_client *cl)
++{
++ if (!hsi_port_claimed(cl))
++ return -EACCES;
++ return hsi_get_port(cl)->start_tx(cl);
++}
++
++/**
++ * hsi_stop_tx - Signal the port that the client no longer wants to transmit
++ * @cl: Pointer to the HSI client
++ *
++ * Return -errno on failure, 0 on success
++ */
++static inline int hsi_stop_tx(struct hsi_client *cl)
++{
++ if (!hsi_port_claimed(cl))
++ return -EACCES;
++ return hsi_get_port(cl)->stop_tx(cl);
++}
++#endif /* __LINUX_HSI_H__ */
+--- /dev/null
++++ b/include/linux/hsi/hsi_char.h
+@@ -0,0 +1,66 @@
++/*
++ * hsi_char.h
++ *
++ * Part of the HSI character device driver.
++ *
++ * Copyright (C) 2010 Nokia Corporation. All rights reserved.
++ *
++ * Contact: Andras Domokos <andras.domokos at nokia.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ */
++
++
++#ifndef __HSI_CHAR_H
++#define __HSI_CHAR_H
++
++#define HSI_CHAR_MAGIC 'k'
++#define HSC_IOW(num, dtype) _IOW(HSI_CHAR_MAGIC, num, dtype)
++#define HSC_IOR(num, dtype) _IOR(HSI_CHAR_MAGIC, num, dtype)
++#define HSC_IOWR(num, dtype) _IOWR(HSI_CHAR_MAGIC, num, dtype)
++#define HSC_IO(num) _IO(HSI_CHAR_MAGIC, num)
++
++#define HSC_RESET HSC_IO(16)
++#define HSC_SET_PM HSC_IO(17)
++#define HSC_SEND_BREAK HSC_IO(18)
++#define HSC_SET_RX HSC_IOW(19, struct hsc_rx_config)
++#define HSC_GET_RX HSC_IOW(20, struct hsc_rx_config)
++#define HSC_SET_TX HSC_IOW(21, struct hsc_tx_config)
++#define HSC_GET_TX HSC_IOW(22, struct hsc_tx_config)
++
++#define HSC_PM_DISABLE 0
++#define HSC_PM_ENABLE 1
++
++#define HSC_MODE_STREAM 1
++#define HSC_MODE_FRAME 2
++#define HSC_FLOW_SYNC 0
++#define HSC_ARB_RR 0
++#define HSC_ARB_PRIO 1
++
++struct hsc_rx_config {
++ uint32_t mode;
++ uint32_t flow;
++ uint32_t channels;
++};
++
++struct hsc_tx_config {
++ uint32_t mode;
++ uint32_t flow;
++ uint32_t channels;
++ uint32_t speed;
++ uint32_t arb_mode;
++};
++
++#endif /* __HSI_CHAR_H */
+--- /dev/null
++++ b/include/linux/hsi/omap_ssi_hack.h
+@@ -0,0 +1,37 @@
++/*
++ * omap_ssi_hack.h
++ *
++ * OMAP SSI HACK header file.
++ *
++ * Copyright (C) 2009 Nokia Corporation. All rights reserved.
++ *
++ * Contact: Carlos Chinea <carlos.chinea@nokia.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ */
++
++#ifndef __LINUX_OMAP_SSI_HACK_H__
++#define __LINUX_OMAP_SSI_HACK_H__
++
++/*
++ * FIXME: This file is to be removed asap.
++ * This is only use to implement a horrible hack to support the useless
++ * wakeline test until is removed in the CMT
++ */
++#include <linux/hsi/hsi.h>
++
++void ssi_waketest(struct hsi_client *cl, unsigned int enable);
++
++#endif /* __OMAP_SSI_HACK_H__ */
+--- /dev/null
++++ b/include/linux/hsi/ssip_slave.h
+@@ -0,0 +1,38 @@
++/*
++ * ssip_slave.h
++ *
++ * SSIP slave support header file
++ *
++ * Copyright (C) 2010 Nokia Corporation. All rights reserved.
++ *
++ * Contact: Carlos Chinea <carlos.chinea@nokia.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ */
++
++#ifndef __LINUX_SSIP_SLAVE_H__
++#define __LINUX_SSIP_SLAVE_H__
++
++#include <linux/hsi/hsi.h>
++
++static inline void ssip_slave_put_master(struct hsi_client *master)
++{
++}
++
++struct hsi_client *ssip_slave_get_master(struct hsi_client *slave);
++int ssip_slave_start_tx(struct hsi_client *master);
++int ssip_slave_stop_tx(struct hsi_client *master);
++
++#endif /* __LINUX_SSIP_SLAVE_H__ */
diff --git a/recipes/linux/linux-2.6.35/nokia900/linux-2.6-omap3isp-rx51.patch b/recipes/linux/linux-2.6.35/nokia900/linux-2.6-omap3isp-rx51.patch
new file mode 100644
index 0000000000..5fae8856c7
--- /dev/null
+++ b/recipes/linux/linux-2.6.35/nokia900/linux-2.6-omap3isp-rx51.patch
@@ -0,0 +1,33781 @@
+From 1a96dc43fd0e1dac3fb95ae110a1cd0b0df60a5c Mon Sep 17 00:00:00 2001
+From: Sakari Ailus <sakari.ailus@nokia.com>
+Date: Fri, 31 Oct 2008 10:20:45 +0200
+Subject: [PATCH] omap3isp-rx51 driver
+
+Top commit from git://gitorious.org/maemo-multimedia/omap3isp-rx51.git
+
+rx51: Select VIDEO_MACH_RX51 in MACH_NOKIA_RX51
+
+Commit b2513bdc065358e9a74f170b8554540850d65bf8 ("rx51: Fix Kconfig
+breakage for camera") moved the RX51 video Kconfig option to the IGEP v2
+board instead of the RX51 board. Fix it.
+
+Signed-off-by: Ameya Palande <ameya.palande@nokia.com>
+---
+ Documentation/video4linux/v4l2-framework.txt | 62 +
+ arch/arm/mach-omap2/Kconfig | 10 +
+ arch/arm/mach-omap2/Makefile | 1 +
+ arch/arm/mach-omap2/board-rx51-camera.c | 640 +++++++
+ arch/arm/mach-omap2/board-rx51-peripherals.c | 10 +
+ arch/arm/mach-omap2/devices.c | 46 +-
+ arch/arm/mach-omap2/devices.h | 17 +
+ arch/arm/plat-omap/include/mach/isp_user.h | 639 +++++++
+ arch/arm/plat-omap/include/plat/omap34xx.h | 16 +-
+ arch/arm/plat-omap/iovmm.c | 46 +-
+ arch/arm/plat-omap/omap-pm-noop.c | 2 +-
+ drivers/media/Makefile | 8 +-
+ drivers/media/media-device.c | 330 ++++
+ drivers/media/media-devnode.c | 480 ++++++
+ drivers/media/media-entity.c | 685 ++++++++
+ drivers/media/video/Kconfig | 54 +
+ drivers/media/video/Makefile | 11 +-
+ drivers/media/video/ad5820.c | 485 ++++++
+ drivers/media/video/adp1653.c | 567 +++++++
+ drivers/media/video/et8ek8.c | 1082 ++++++++++++
+ drivers/media/video/et8ek8.h | 79 +
+ drivers/media/video/isp/Makefile | 13 +
+ drivers/media/video/isp/bluegamma_table.h | 1040 ++++++++++++
+ drivers/media/video/isp/cfa_coef_table.h | 603 +++++++
+ drivers/media/video/isp/greengamma_table.h | 1040 ++++++++++++
+ drivers/media/video/isp/isp.c | 1840 +++++++++++++++++++++
+ drivers/media/video/isp/isp.h | 393 +++++
+ drivers/media/video/isp/ispccdc.c | 2292 +++++++++++++++++++++++++
+ drivers/media/video/isp/ispccdc.h | 189 +++
+ drivers/media/video/isp/ispccp2.c | 1127 +++++++++++++
+ drivers/media/video/isp/ispccp2.h | 89 +
+ drivers/media/video/isp/ispcsi2.c | 1232 ++++++++++++++
+ drivers/media/video/isp/ispcsi2.h | 157 ++
+ drivers/media/video/isp/ispcsiphy.c | 245 +++
+ drivers/media/video/isp/ispcsiphy.h | 72 +
+ drivers/media/video/isp/isph3a.h | 111 ++
+ drivers/media/video/isp/isph3a_aewb.c | 351 ++++
+ drivers/media/video/isp/isph3a_af.c | 396 +++++
+ drivers/media/video/isp/isphist.c | 508 ++++++
+ drivers/media/video/isp/isphist.h | 34 +
+ drivers/media/video/isp/isppreview.c | 2295 ++++++++++++++++++++++++++
+ drivers/media/video/isp/isppreview.h | 257 +++
+ drivers/media/video/isp/ispqueue.c | 1077 ++++++++++++
+ drivers/media/video/isp/ispqueue.h | 175 ++
+ drivers/media/video/isp/ispreg.h | 1803 ++++++++++++++++++++
+ drivers/media/video/isp/ispresizer.c | 1732 +++++++++++++++++++
+ drivers/media/video/isp/ispresizer.h | 137 ++
+ drivers/media/video/isp/ispstat.c | 1036 ++++++++++++
+ drivers/media/video/isp/ispstat.h | 163 ++
+ drivers/media/video/isp/ispvideo.c | 1150 +++++++++++++
+ drivers/media/video/isp/ispvideo.h | 144 ++
+ drivers/media/video/isp/luma_enhance_table.h | 144 ++
+ drivers/media/video/isp/noise_filter_table.h | 79 +
+ drivers/media/video/isp/redgamma_table.h | 1040 ++++++++++++
+ drivers/media/video/mt9m001.c | 26 +-
+ drivers/media/video/mt9m111.c | 20 +-
+ drivers/media/video/mt9t031.c | 24 +-
+ drivers/media/video/mt9t112.c | 14 +-
+ drivers/media/video/mt9v022.c | 26 +-
+ drivers/media/video/ov772x.c | 18 +-
+ drivers/media/video/ov9640.c | 12 +-
+ drivers/media/video/rj54n1cb0c.c | 26 +-
+ drivers/media/video/smia-sensor.c | 942 +++++++++++
+ drivers/media/video/smia-sensor.h | 39 +
+ drivers/media/video/smiaregs.c | 734 ++++++++
+ drivers/media/video/soc_camera.c | 2 +-
+ drivers/media/video/soc_mediabus.c | 2 +-
+ drivers/media/video/tw9910.c | 20 +-
+ drivers/media/video/v4l2-common.c | 17 +-
+ drivers/media/video/v4l2-dev.c | 62 +-
+ drivers/media/video/v4l2-device.c | 63 +-
+ drivers/media/video/v4l2-int-device.c | 31 +-
+ drivers/media/video/v4l2-subdev.c | 323 ++++
+ include/linux/Kbuild | 3 +
+ include/linux/i2c/twl.h | 10 +
+ include/linux/media.h | 77 +
+ include/linux/v4l2-mediabus.h | 96 ++
+ include/linux/v4l2-subdev.h | 104 ++
+ include/linux/videodev2.h | 33 +
+ include/media/ad5820.h | 63 +
+ include/media/adp1653.h | 87 +
+ include/media/media-device.h | 74 +
+ include/media/media-devnode.h | 97 ++
+ include/media/media-entity.h | 112 ++
+ include/media/smiaregs.h | 155 ++
+ include/media/soc_mediabus.h | 3 +-
+ include/media/v4l2-chip-ident.h | 12 +
+ include/media/v4l2-common.h | 25 +-
+ include/media/v4l2-dev.h | 24 +-
+ include/media/v4l2-device.h | 2 +
+ include/media/v4l2-int-device.h | 8 +-
+ include/media/v4l2-mediabus.h | 82 -
+ include/media/v4l2-subdev.h | 114 ++-
+ 93 files changed, 31541 insertions(+), 275 deletions(-)
+ create mode 100644 arch/arm/mach-omap2/board-rx51-camera.c
+ create mode 100644 arch/arm/mach-omap2/devices.h
+ create mode 100644 arch/arm/plat-omap/include/mach/isp_user.h
+ create mode 100644 drivers/media/media-device.c
+ create mode 100644 drivers/media/media-devnode.c
+ create mode 100644 drivers/media/media-entity.c
+ create mode 100644 drivers/media/video/ad5820.c
+ create mode 100644 drivers/media/video/adp1653.c
+ create mode 100644 drivers/media/video/et8ek8.c
+ create mode 100644 drivers/media/video/et8ek8.h
+ create mode 100644 drivers/media/video/isp/Makefile
+ create mode 100644 drivers/media/video/isp/bluegamma_table.h
+ create mode 100644 drivers/media/video/isp/cfa_coef_table.h
+ create mode 100644 drivers/media/video/isp/greengamma_table.h
+ create mode 100644 drivers/media/video/isp/isp.c
+ create mode 100644 drivers/media/video/isp/isp.h
+ create mode 100644 drivers/media/video/isp/ispccdc.c
+ create mode 100644 drivers/media/video/isp/ispccdc.h
+ create mode 100644 drivers/media/video/isp/ispccp2.c
+ create mode 100644 drivers/media/video/isp/ispccp2.h
+ create mode 100644 drivers/media/video/isp/ispcsi2.c
+ create mode 100644 drivers/media/video/isp/ispcsi2.h
+ create mode 100644 drivers/media/video/isp/ispcsiphy.c
+ create mode 100644 drivers/media/video/isp/ispcsiphy.h
+ create mode 100644 drivers/media/video/isp/isph3a.h
+ create mode 100644 drivers/media/video/isp/isph3a_aewb.c
+ create mode 100644 drivers/media/video/isp/isph3a_af.c
+ create mode 100644 drivers/media/video/isp/isphist.c
+ create mode 100644 drivers/media/video/isp/isphist.h
+ create mode 100644 drivers/media/video/isp/isppreview.c
+ create mode 100644 drivers/media/video/isp/isppreview.h
+ create mode 100644 drivers/media/video/isp/ispqueue.c
+ create mode 100644 drivers/media/video/isp/ispqueue.h
+ create mode 100644 drivers/media/video/isp/ispreg.h
+ create mode 100644 drivers/media/video/isp/ispresizer.c
+ create mode 100644 drivers/media/video/isp/ispresizer.h
+ create mode 100644 drivers/media/video/isp/ispstat.c
+ create mode 100644 drivers/media/video/isp/ispstat.h
+ create mode 100644 drivers/media/video/isp/ispvideo.c
+ create mode 100644 drivers/media/video/isp/ispvideo.h
+ create mode 100644 drivers/media/video/isp/luma_enhance_table.h
+ create mode 100644 drivers/media/video/isp/noise_filter_table.h
+ create mode 100644 drivers/media/video/isp/redgamma_table.h
+ create mode 100644 drivers/media/video/smia-sensor.c
+ create mode 100644 drivers/media/video/smia-sensor.h
+ create mode 100644 drivers/media/video/smiaregs.c
+ create mode 100644 drivers/media/video/v4l2-subdev.c
+ create mode 100644 include/linux/media.h
+ create mode 100644 include/linux/v4l2-mediabus.h
+ create mode 100644 include/linux/v4l2-subdev.h
+ create mode 100644 include/media/ad5820.h
+ create mode 100644 include/media/adp1653.h
+ create mode 100644 include/media/media-device.h
+ create mode 100644 include/media/media-devnode.h
+ create mode 100644 include/media/media-entity.h
+ create mode 100644 include/media/smiaregs.h
+ delete mode 100644 include/media/v4l2-mediabus.h
+
+diff --git a/Documentation/video4linux/v4l2-framework.txt b/Documentation/video4linux/v4l2-framework.txt
+index e831aac..dbcc619 100644
+--- a/Documentation/video4linux/v4l2-framework.txt
++++ b/Documentation/video4linux/v4l2-framework.txt
+@@ -192,6 +192,11 @@ You also need a way to go from the low-level struct to v4l2_subdev. For the
+ common i2c_client struct the i2c_set_clientdata() call is used to store a
+ v4l2_subdev pointer, for other busses you may have to use other methods.
+
++Bridges might also need to store per-subdev private data, such as a pointer to
++bridge-specific per-subdev private data. The v4l2_subdev structure provides
++host private data for that purpose that can be accessed with
++v4l2_get_subdev_hostdata() and v4l2_set_subdev_hostdata().
++
+ From the bridge driver perspective you load the sub-device module and somehow
+ obtain the v4l2_subdev pointer. For i2c devices this is easy: you call
+ i2c_get_clientdata(). For other busses something similar needs to be done.
+@@ -314,6 +319,63 @@ controlled through GPIO pins. This distinction is only relevant when setting
+ up the device, but once the subdev is registered it is completely transparent.
+
+
++V4L2 sub-device userspace API
++-----------------------------
++
++Beside exposing a kernel API through the v4l2_subdev_ops structure, V4L2
++sub-devices can also be controlled directly by userspace applications.
++
++When a sub-device is registered, a device node named v4l-subdevX can be created
++in /dev. If the sub-device supports direct userspace configuration it must set
++the V4L2_SUBDEV_FL_HAS_DEVNODE flag before being registered.
++
++For I2C and SPI sub-devices, the v4l2_device driver can disable registration of
++the device node if it wants to control the sub-device on its own. In that case
++it must set the v4l2_i2c_new_subdev_board or v4l2_spi_new_subdev enable_devnode
++argument to 0. Setting the argument to 1 will only enable device node
++registration if the sub-device driver has set the V4L2_SUBDEV_FL_HAS_DEVNODE
++flag.
++
++The device node handles a subset of the V4L2 API.
++
++VIDIOC_QUERYCTRL
++VIDIOC_QUERYMENU
++VIDIOC_G_CTRL
++VIDIOC_S_CTRL
++VIDIOC_G_EXT_CTRLS
++VIDIOC_S_EXT_CTRLS
++VIDIOC_TRY_EXT_CTRLS
++
++ The controls ioctls are identical to the ones defined in V4L2. They
++ behave identically, with the only exception that they deal only with
++ controls implemented in the sub-device. Depending on the driver, those
++ controls can be also be accessed through one (or several) V4L2 device
++ nodes.
++
++VIDIOC_DQEVENT
++VIDIOC_SUBSCRIBE_EVENT
++VIDIOC_UNSUBSCRIBE_EVENT
++
++ The events ioctls are identical to the ones defined in V4L2. They
++ behave identically, with the only exception that they deal only with
++ events generated by the sub-device. Depending on the driver, those
++ events can also be reported by one (or several) V4L2 device nodes.
++
++ Sub-device drivers that want to use events need to set the
++ V4L2_SUBDEV_USES_EVENTS v4l2_subdev::flags and initialize
++ v4l2_subdev::nevents to events queue depth before registering the
++ sub-device. After registration events can be queued as usual on the
++ v4l2_subdev::devnode device node.
++
++ To properly support events, the poll() file operation is also
++ implemented.
++
++Private ioctls
++
++ All ioctls not in the above list are passed directly to the sub-device
++ driver through the core::ioctl operation.
++
++
+ I2C sub-device drivers
+ ----------------------
+
+diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
+index b31b6f1..c44ce2e 100644
+--- a/arch/arm/mach-omap2/Kconfig
++++ b/arch/arm/mach-omap2/Kconfig
+@@ -120,6 +120,16 @@ config MACH_NOKIA_RX51
+ bool "Nokia RX-51 board"
+ depends on ARCH_OMAP3
+ select OMAP_PACKAGE_CBB
++ select VIDEO_MACH_RX51 if VIDEO_HELPER_CHIPS_AUTO
++
++config VIDEO_MACH_RX51
++ tristate "Nokia RX-51 board camera"
++ depends on MACH_NOKIA_RX51 && VIDEO_DEV && TWL4030_CORE
++ select VIDEO_OMAP3
++ select VIDEO_ET8EK8
++ select VIDEO_AD5820
++ select VIDEO_ADP1653
++ select VIDEO_SMIA_SENSOR
+
+ config MACH_OMAP_ZOOM2
+ bool "OMAP3 Zoom2 board"
+diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
+index b3df2f4..c9c8d30 100644
+--- a/arch/arm/mach-omap2/Makefile
++++ b/arch/arm/mach-omap2/Makefile
+@@ -119,6 +119,7 @@ obj-$(CONFIG_MACH_OMAP_3430SDP) += board-3430sdp.o \
+ hsmmc.o \
+ board-sdp-flash.o
+ obj-$(CONFIG_MACH_NOKIA_N8X0) += board-n8x0.o
++obj-$(CONFIG_VIDEO_MACH_RX51) += board-rx51-camera.o
+ obj-$(CONFIG_MACH_NOKIA_RX51) += board-rx51.o \
+ board-rx51-sdram.o \
+ board-rx51-peripherals.o \
+diff --git a/arch/arm/mach-omap2/board-rx51-camera.c b/arch/arm/mach-omap2/board-rx51-camera.c
+new file mode 100644
+index 0000000..5e741a2
+--- /dev/null
++++ b/arch/arm/mach-omap2/board-rx51-camera.c
+@@ -0,0 +1,640 @@
++/*
++ * arch/arm/mach-omap2/board-rx51-camera.c
++ *
++ * Copyright (C) 2008 Nokia Corporation
++ *
++ * Contact: Sakari Ailus <sakari.ailus@nokia.com>
++ * Tuukka Toivonen <tuukka.o.toivonen@nokia.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ *
++ */
++
++#include <linux/i2c.h>
++#include <linux/i2c/twl.h>
++#include <linux/delay.h>
++#include <linux/mm.h>
++#include <linux/platform_device.h>
++#include <linux/videodev2.h>
++
++#include <asm/gpio.h>
++#include <plat/control.h>
++
++#include "../../../drivers/media/video/isp/isp.h"
++#include "../../../drivers/media/video/isp/ispreg.h"
++#include "../../../drivers/media/video/et8ek8.h"
++#include "../../../drivers/media/video/smia-sensor.h"
++
++#include <media/ad5820.h>
++#include <media/adp1653.h>
++#include <media/smiaregs.h>
++
++#include "devices.h"
++
++#define ADP1653_GPIO_ENABLE 88 /* Used for resetting ADP1653 */
++#define ADP1653_GPIO_INT 167 /* Fault interrupt */
++#define ADP1653_GPIO_STROBE 126 /* Pin used in cam_strobe mode ->
++ * control using ISP drivers */
++
++#define STINGRAY_RESET_GPIO 102
++#define ACMELITE_RESET_GPIO 97 /* Used also to MUX between cameras */
++
++#define RX51_CAMERA_STINGRAY 0
++#define RX51_CAMERA_ACMELITE 1
++
++#define RX51_SENSOR 1
++#define RX51_LENS 2
++
++#define GPIO_DIR_OUTPUT 0
++
++/*
++ *
++ * Power control
++ *
++ */
++
++/* Assign camera to peripheral power group P3 */
++#define CAMERA_DEV_GRP (0x4 << 5)
++#define VAUX2_1V8 0x05
++#define VAUX3_1V8 0x01
++#define VAUX4_2V8 0x09
++
++/* Earlier rx51 builds require VAUX3. */
++#define NEEDS_VAUX3 (system_rev >= 0x100 && system_rev < 0x900)
++
++static struct rx51_camera {
++ int okay;
++ int inuse;
++} rx51_camera[2];
++
++static DEFINE_MUTEX(rx51_camera_mutex);
++
++/* Acquires the given slave `which' for camera if possible.
++ * Returns the bitmask containing previously acquired slaves for the device.
++ */
++static int rx51_camera_acquire_slave(int camera, int which)
++{
++ int other = 1 - camera;
++ int old_which;
++
++ if (!rx51_camera[camera].okay)
++ return -EINVAL;
++
++ if (rx51_camera[other].inuse)
++ return -EBUSY;
++
++ old_which = rx51_camera[camera].inuse;
++ rx51_camera[camera].inuse |= which;
++
++ return old_which;
++}
++
++/* Releases the given slave `which' for camera.
++ * Returns the bitmask containing still acquired slaves for the device.
++ */
++static int rx51_camera_release_slave(int camera, int which)
++{
++ rx51_camera[camera].inuse &= ~which;
++
++ return rx51_camera[camera].inuse;
++}
++
++static int rx51_camera_power_on_nolock(int camera)
++{
++ int rval;
++
++ /* Reset Stingray */
++ gpio_set_value(STINGRAY_RESET_GPIO, 0);
++
++ /* Mux to Stingray and reset Acme Lite */
++ gpio_set_value(ACMELITE_RESET_GPIO, 0);
++
++ /* VAUX2=1.8 V (muxer voltage) */
++ rval = twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER,
++ VAUX2_1V8, TWL4030_VAUX2_DEDICATED);
++ if (rval)
++ goto out;
++ rval = twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER,
++ CAMERA_DEV_GRP, TWL4030_VAUX2_DEV_GRP);
++ if (rval)
++ goto out;
++
++ /* Off & sleep -> Active state */
++ rval = twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER,
++ 0xEE, TWL4030_VAUX2_REMAP);
++ if (rval)
++ goto out;
++
++ /* VAUX4=2.8 V (camera VANA) */
++ rval = twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER,
++ VAUX4_2V8, TWL4030_VAUX4_DEDICATED);
++ if (rval)
++ goto out;
++ rval = twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER,
++ CAMERA_DEV_GRP, TWL4030_VAUX4_DEV_GRP);
++ if (rval)
++ goto out;
++ rval = twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER,
++ 0xEE, TWL4030_VAUX4_REMAP);
++ if (rval)
++ goto out;
++
++ if (NEEDS_VAUX3) {
++ /* VAUX3=1.8 V (camera VDIG) */
++ printk(KERN_INFO "%s: VAUX3 on for old board\n", __func__);
++ rval = twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER,
++ VAUX3_1V8,
++ TWL4030_VAUX3_DEDICATED);
++ if (rval)
++ goto out;
++ rval = twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER,
++ CAMERA_DEV_GRP,
++ TWL4030_VAUX3_DEV_GRP);
++ if (rval)
++ goto out;
++ rval = twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER,
++ 0xEE, TWL4030_VAUX3_REMAP);
++ if (rval)
++ goto out;
++ }
++
++ /* Let the voltages stabilize */
++ udelay(15);
++
++ /* XSHUTDOWN on, enable camera and set muxer */
++ gpio_set_value(camera == RX51_CAMERA_STINGRAY ?
++ STINGRAY_RESET_GPIO : ACMELITE_RESET_GPIO, 1);
++
++ /* CONTROL_CSIRXFE */
++ omap_writel(
++ /*
++ * CSIb receiver data/clock or data/strobe mode
++ *
++ * Stingray uses data/strobe.
++ */
++ ((camera ? 0 : 1) << 10)
++ | BIT(12) /* Enable differential transceiver */
++ | BIT(13) /* Disable reset */
++ , OMAP343X_CTRL_BASE + OMAP343X_CONTROL_CSIRXFE);
++
++ /* Let the voltages stabilize */
++ udelay(15);
++
++ return 0;
++
++out:
++ printk(KERN_ALERT "%s: Error %d in writing to TWL4030!\n", __func__,
++ rval);
++
++ return rval;
++}
++
++static int rx51_camera_power_on(int camera, int which)
++{
++ int rval;
++
++ mutex_lock(&rx51_camera_mutex);
++
++ rval = rx51_camera_acquire_slave(camera, which);
++
++ if (!rval)
++ rval = rx51_camera_power_on_nolock(camera);
++ else if (rval > 0)
++ rval = 0;
++
++ mutex_unlock(&rx51_camera_mutex);
++
++ if (rval < 0)
++ printk(KERN_INFO "%s: power_on camera %d which %d failed\n",
++ __func__, camera, which);
++
++ return rval;
++}
++
++static void rx51_camera_power_off_nolock(int camera)
++{
++ int rval;
++
++ /* Reset cameras */
++ gpio_set_value(STINGRAY_RESET_GPIO, 0);
++ gpio_set_value(ACMELITE_RESET_GPIO, 0);
++
++ /* VAUX2 (muxer voltage) off */
++ rval = twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER,
++ 0, TWL4030_VAUX2_DEV_GRP);
++ if (rval)
++ goto out;
++ /* Off & sleep -> Off state */
++ rval = twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER,
++ 0x00, TWL4030_VAUX2_REMAP);
++ if (rval)
++ goto out;
++
++ /* VAUX4 (camera VANA) off */
++ rval = twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER,
++ 0, TWL4030_VAUX4_DEV_GRP);
++ if (rval)
++ goto out;
++ rval = twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER,
++ 0x00, TWL4030_VAUX4_REMAP);
++ if (rval)
++ goto out;
++
++ if (NEEDS_VAUX3) {
++ printk(KERN_INFO "%s: VAUX3 off for old board\n", __func__);
++ /* VAUX3 (camera VDIG) off */
++ rval = twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER,
++ 0, TWL4030_VAUX3_DEV_GRP);
++ if (rval)
++ goto out;
++ rval = twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER,
++ 0x00, TWL4030_VAUX3_REMAP);
++ if (rval)
++ goto out;
++ }
++
++ return;
++
++out:
++ printk(KERN_ALERT "%s: Error %d in writing to TWL4030!\n", __func__,
++ rval);
++}
++
++static void rx51_camera_power_off(int camera, int which)
++{
++ int rval;
++
++ mutex_lock(&rx51_camera_mutex);
++
++ rval = rx51_camera_release_slave(camera, which);
++ if (!rval)
++ rx51_camera_power_off_nolock(camera);
++
++ mutex_unlock(&rx51_camera_mutex);
++}
++
++static void __init rx51_stingray_init(void)
++{
++ if (gpio_request(STINGRAY_RESET_GPIO, "stingray reset") != 0) {
++ printk(KERN_INFO "%s: unable to acquire Stingray reset gpio\n",
++ __FUNCTION__);
++ return;
++ }
++
++ /* XSHUTDOWN off, reset */
++ gpio_direction_output(STINGRAY_RESET_GPIO, 0);
++ rx51_camera_power_off_nolock(RX51_CAMERA_STINGRAY);
++ rx51_camera[RX51_CAMERA_STINGRAY].okay = 1;
++ rx51_camera[RX51_CAMERA_STINGRAY].inuse = 0;
++}
++
++static void __init rx51_acmelite_init(void)
++{
++ if (gpio_request(ACMELITE_RESET_GPIO, "acmelite reset") != 0) {
++ printk(KERN_INFO "%s: unable to acquire Acme Lite reset gpio\n",
++ __FUNCTION__);
++ return;
++ }
++
++ /* XSHUTDOWN off, reset */
++ gpio_direction_output(ACMELITE_RESET_GPIO, 0);
++ rx51_camera_power_off_nolock(RX51_CAMERA_ACMELITE);
++ rx51_camera[RX51_CAMERA_ACMELITE].okay = 1;
++ rx51_camera[RX51_CAMERA_ACMELITE].inuse = 0;
++}
++
++static int __init rx51_adp1653_init(void)
++{
++ int err;
++
++ err = gpio_request(ADP1653_GPIO_ENABLE, "adp1653 enable");
++ if (err) {
++ printk(KERN_ERR ADP1653_NAME
++ " Failed to request EN gpio\n");
++ err = -ENODEV;
++ goto err_omap_request_gpio;
++ }
++
++ err = gpio_request(ADP1653_GPIO_INT, "adp1653 interrupt");
++ if (err) {
++ printk(KERN_ERR ADP1653_NAME " Failed to request IRQ gpio\n");
++ err = -ENODEV;
++ goto err_omap_request_gpio_2;
++ }
++
++ err = gpio_request(ADP1653_GPIO_STROBE, "adp1653 strobe");
++ if (err) {
++ printk(KERN_ERR ADP1653_NAME
++ " Failed to request STROBE gpio\n");
++ err = -ENODEV;
++ goto err_omap_request_gpio_3;
++ }
++
++ gpio_direction_output(ADP1653_GPIO_ENABLE, 0);
++ gpio_direction_input(ADP1653_GPIO_INT);
++ gpio_direction_output(ADP1653_GPIO_STROBE, 0);
++
++ return 0;
++
++err_omap_request_gpio_3:
++ gpio_free(ADP1653_GPIO_INT);
++
++err_omap_request_gpio_2:
++ gpio_free(ADP1653_GPIO_ENABLE);
++
++err_omap_request_gpio:
++ return err;
++}
++
++static int __init rx51_camera_hw_init(void)
++{
++ int rval;
++
++ rval = rx51_adp1653_init();
++ if (rval)
++ return rval;
++
++ mutex_init(&rx51_camera_mutex);
++ rx51_stingray_init();
++ rx51_acmelite_init();
++
++ return 0;
++}
++
++/*
++ *
++ * Stingray
++ *
++ */
++
++#define STINGRAY_XCLK ISP_XCLK_A
++
++static int rx51_stingray_configure_interface(struct v4l2_subdev *subdev,
++ struct smia_mode *mode)
++{
++ struct isp_device *isp = v4l2_dev_to_isp_device(subdev->v4l2_dev);
++ static const int S = 8;
++ unsigned int pixel_clock;
++
++ /* Calculate average pixel clock per line. Assume buffers can spread
++ * the data over horizontal blanking time. Rounding upwards. */
++ pixel_clock =
++ mode->window_width
++ * (((mode->pixel_clock + (1<<S) - 1) >> S) + mode->width - 1)
++ / mode->width;
++ pixel_clock <<= S;
++ isp_set_pixel_clock(isp, pixel_clock);
++
++ return 0;
++}
++
++static int rx51_stingray_set_xclk(struct v4l2_subdev *subdev, int hz)
++{
++ struct isp_device *isp = v4l2_dev_to_isp_device(subdev->v4l2_dev);
++
++ isp_set_xclk(isp, hz, STINGRAY_XCLK);
++
++ return 0;
++}
++
++static int rx51_stingray_s_power(struct v4l2_subdev *subdev, int on)
++{
++ if (on)
++ return rx51_camera_power_on(RX51_CAMERA_STINGRAY, RX51_SENSOR);
++ else
++ rx51_camera_power_off(RX51_CAMERA_STINGRAY, RX51_SENSOR);
++
++ return 0;
++}
++
++static struct et8ek8_platform_data rx51_et8ek8_platform_data = {
++ .configure_interface = rx51_stingray_configure_interface,
++ .set_xclk = rx51_stingray_set_xclk,
++ .s_power = rx51_stingray_s_power,
++};
++
++/*
++ *
++ * AD5820
++ *
++ */
++
++static int ad5820_s_power(struct v4l2_subdev *subdev, int on)
++{
++ if (on)
++ return rx51_camera_power_on(RX51_CAMERA_STINGRAY, RX51_LENS);
++ else
++ rx51_camera_power_off(RX51_CAMERA_STINGRAY, RX51_LENS);
++
++ return 0;
++}
++
++static struct ad5820_platform_data rx51_ad5820_platform_data = {
++ .s_power = ad5820_s_power,
++
++};
++
++/*
++ *
++ * ADP1653
++ *
++ */
++
++static int rx51_adp1653_power(struct v4l2_subdev *subdev, int on)
++{
++ gpio_set_value(ADP1653_GPIO_ENABLE, on);
++ if (on) {
++ /* Some delay is apparently required. */
++ udelay(20);
++ }
++
++ return 0;
++}
++
++static struct adp1653_platform_data rx51_adp1653_platform_data = {
++ .power = rx51_adp1653_power,
++ /* Must be limited to 500 ms in RX-51 */
++ .max_flash_timeout = 500000, /* us */
++ /* Must be limited to 320 mA in RX-51 B3 and newer hardware */
++ .max_flash_intensity = 19,
++ /* Must be limited to 50 mA in RX-51 */
++ .max_torch_intensity = 1,
++ .max_indicator_intensity = ADP1653_REG_OUT_SEL_ILED_MAX,
++};
++
++/*
++ *
++ * Acmelite
++ *
++ */
++
++#define ACMELITE_XCLK ISP_XCLK_A
++
++static int rx51_acmelite_configure_interface(struct v4l2_subdev *subdev,
++ int width, int height)
++{
++ struct isp_device *isp = v4l2_dev_to_isp_device(subdev->v4l2_dev);
++
++ isp_set_pixel_clock(isp, 0);
++
++ return 0;
++}
++
++static int rx51_acmelite_set_xclk(struct v4l2_subdev *subdev, int hz)
++{
++ struct isp_device *isp = v4l2_dev_to_isp_device(subdev->v4l2_dev);
++
++ isp_set_xclk(isp, hz, ACMELITE_XCLK);
++
++ return 0;
++}
++
++static int rx51_acmelite_set_power(struct v4l2_subdev *subdev, int on)
++{
++ if (on)
++ return rx51_camera_power_on(RX51_CAMERA_ACMELITE, RX51_SENSOR);
++ else
++ rx51_camera_power_off(RX51_CAMERA_ACMELITE, RX51_SENSOR);
++
++ return 0;
++}
++
++static struct smia_sensor_platform_data rx51_smia_sensor_platform_data = {
++ .configure_interface = rx51_acmelite_configure_interface,
++ .set_xclk = rx51_acmelite_set_xclk,
++ .set_power = rx51_acmelite_set_power,
++};
++
++/*
++ *
++ * Init it all
++ *
++ */
++
++#ifdef CONFIG_VIDEO_MACH_RX51_OLD_I2C
++#define ET8EK8_I2C_BUS_NUM 2
++#define AD5820_I2C_BUS_NUM 2
++#define ADP1653_I2C_BUS_NUM 3
++#define SMIA_SENSOR_I2C_BUS_NUM 2
++#else /* CONFIG_VIDEO_MACH_RX51_OLD_I2C */
++#define ET8EK8_I2C_BUS_NUM 3
++#define AD5820_I2C_BUS_NUM 3
++#define ADP1653_I2C_BUS_NUM 2
++#define SMIA_SENSOR_I2C_BUS_NUM 2
++#endif
++
++static struct i2c_board_info rx51_camera_i2c_devices[] = {
++ {
++ I2C_BOARD_INFO(ET8EK8_NAME, ET8EK8_I2C_ADDR),
++ .platform_data = &rx51_et8ek8_platform_data,
++ },
++ {
++ I2C_BOARD_INFO(AD5820_NAME, AD5820_I2C_ADDR),
++ .platform_data = &rx51_ad5820_platform_data,
++ },
++ {
++ I2C_BOARD_INFO(ADP1653_NAME, ADP1653_I2C_ADDR),
++ .platform_data = &rx51_adp1653_platform_data,
++ },
++ {
++ I2C_BOARD_INFO(SMIA_SENSOR_NAME, SMIA_SENSOR_I2C_ADDR),
++ .platform_data = &rx51_smia_sensor_platform_data,
++ },
++};
++
++static struct v4l2_subdev_i2c_board_info rx51_camera_primary_subdevs[] = {
++ {
++ .board_info = &rx51_camera_i2c_devices[0],
++ .i2c_adapter_id = ET8EK8_I2C_BUS_NUM,
++ .module_name = "et8ek8",
++ },
++ {
++ .board_info = &rx51_camera_i2c_devices[1],
++ .i2c_adapter_id = AD5820_I2C_BUS_NUM,
++ .module_name = "ad5820",
++ },
++ {
++ .board_info = &rx51_camera_i2c_devices[2],
++ .i2c_adapter_id = ADP1653_I2C_BUS_NUM,
++ .module_name = "adp1653",
++ },
++ { NULL, 0, NULL, },
++};
++
++static struct v4l2_subdev_i2c_board_info rx51_camera_secondary_subdevs[] = {
++ {
++ .board_info = &rx51_camera_i2c_devices[3],
++ .i2c_adapter_id = SMIA_SENSOR_I2C_BUS_NUM,
++ .module_name = "smia-sensor",
++ },
++ { NULL, 0, NULL, },
++};
++
++static struct isp_v4l2_subdevs_group rx51_camera_subdevs[] = {
++ {
++ .subdevs = rx51_camera_primary_subdevs,
++ .interface = ISP_INTERFACE_CCP2B_PHY1,
++ .bus = { .ccp2 = {
++ .strobe_clk_pol = 0,
++ .crc = 1,
++ .ccp2_mode = 1,
++ .phy_layer = 1,
++ .vpclk_div = 1,
++ } },
++ },
++ {
++ .subdevs = rx51_camera_secondary_subdevs,
++ .interface = ISP_INTERFACE_CCP2B_PHY1,
++ .bus = { .ccp2 = {
++ .strobe_clk_pol = 0,
++ .crc = 1,
++ .ccp2_mode = 1,
++ .phy_layer = 1,
++ .vpclk_div = 1,
++ } },
++ },
++ { NULL, 0, },
++};
++
++static struct isp_platform_data rx51_isp_platform_data = {
++ .subdevs = rx51_camera_subdevs,
++};
++
++static int __init rx51_camera_init(void)
++{
++ int err;
++
++ err = rx51_camera_hw_init();
++ if (err)
++ return err;
++
++ omap3isp_device.dev.platform_data = &rx51_isp_platform_data;
++
++ return platform_device_register(&omap3isp_device);
++}
++
++static void __exit rx51_camera_exit(void)
++{
++ platform_device_unregister(&omap3isp_device);
++
++ gpio_free(ADP1653_GPIO_ENABLE);
++ gpio_free(ADP1653_GPIO_INT);
++ gpio_free(ADP1653_GPIO_STROBE);
++}
++
++module_init(rx51_camera_init);
++module_exit(rx51_camera_exit);
++
++MODULE_LICENSE("GPL");
+diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
+index 6d6e213..c431d47 100644
+--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
++++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
+@@ -552,6 +552,14 @@ static struct regulator_init_data rx51_vaux1 = {
+ .consumer_supplies = rx51_vaux1_consumers,
+ };
+
++static struct regulator_consumer_supply rx51_vaux2_consumers[] = {
++ REGULATOR_SUPPLY("VDD_CSIPHY1", "omap3isp"), /* OMAP ISP */
++ REGULATOR_SUPPLY("VDD_CSIPHY2", "omap3isp"), /* OMAP ISP */
++ {
++ .supply = "vaux2",
++ },
++};
++
+ static struct regulator_init_data rx51_vaux2 = {
+ .constraints = {
+ .name = "VCSI",
+@@ -562,6 +570,8 @@ static struct regulator_init_data rx51_vaux2 = {
+ .valid_ops_mask = REGULATOR_CHANGE_MODE
+ | REGULATOR_CHANGE_STATUS,
+ },
++ .num_consumer_supplies = ARRAY_SIZE(rx51_vaux2_consumers),
++ .consumer_supplies = rx51_vaux2_consumers,
+ };
+
+ /* VAUX3 - adds more power to VIO_18 rail */
+diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
+index 03e6c9e..61e5136 100644
+--- a/arch/arm/mach-omap2/devices.c
++++ b/arch/arm/mach-omap2/devices.c
+@@ -32,6 +32,8 @@
+
+ #include "mux.h"
+
++#include "devices.h"
++
+ #if defined(CONFIG_VIDEO_OMAP2) || defined(CONFIG_VIDEO_OMAP2_MODULE)
+
+ static struct resource cam_resources[] = {
+@@ -107,13 +109,33 @@ static struct resource omap3isp_resources[] = {
+ .flags = IORESOURCE_MEM,
+ },
+ {
+- .start = OMAP3430_ISP_CSI2A_BASE,
+- .end = OMAP3430_ISP_CSI2A_END,
++ .start = OMAP3430_ISP_CSI2A_REGS1_BASE,
++ .end = OMAP3430_ISP_CSI2A_REGS1_END,
++ .flags = IORESOURCE_MEM,
++ },
++ {
++ .start = OMAP3430_ISP_CSIPHY2_BASE,
++ .end = OMAP3430_ISP_CSIPHY2_END,
++ .flags = IORESOURCE_MEM,
++ },
++ {
++ .start = OMAP3630_ISP_CSI2A_REGS2_BASE,
++ .end = OMAP3630_ISP_CSI2A_REGS2_END,
++ .flags = IORESOURCE_MEM,
++ },
++ {
++ .start = OMAP3630_ISP_CSI2C_REGS1_BASE,
++ .end = OMAP3630_ISP_CSI2C_REGS1_END,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+- .start = OMAP3430_ISP_CSI2PHY_BASE,
+- .end = OMAP3430_ISP_CSI2PHY_END,
++ .start = OMAP3630_ISP_CSIPHY1_BASE,
++ .end = OMAP3630_ISP_CSIPHY1_END,
++ .flags = IORESOURCE_MEM,
++ },
++ {
++ .start = OMAP3630_ISP_CSI2C_REGS2_BASE,
++ .end = OMAP3630_ISP_CSI2C_REGS2_END,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+@@ -122,16 +144,28 @@ static struct resource omap3isp_resources[] = {
+ }
+ };
+
+-static struct platform_device omap3isp_device = {
++static void omap3isp_release(struct device *dev)
++{
++ /* Zero the device structure to avoid re-initialization complaints from
++ * kobject when the device will be re-registered.
++ */
++ memset(dev, 0, sizeof(*dev));
++ dev->release = omap3isp_release;
++}
++
++struct platform_device omap3isp_device = {
+ .name = "omap3isp",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(omap3isp_resources),
+ .resource = omap3isp_resources,
++ .dev = {
++ .release = omap3isp_release,
++ },
+ };
++EXPORT_SYMBOL_GPL(omap3isp_device);
+
+ static inline void omap_init_camera(void)
+ {
+- platform_device_register(&omap3isp_device);
+ }
+ #else
+ static inline void omap_init_camera(void)
+diff --git a/arch/arm/mach-omap2/devices.h b/arch/arm/mach-omap2/devices.h
+new file mode 100644
+index 0000000..f312d49
+--- /dev/null
++++ b/arch/arm/mach-omap2/devices.h
+@@ -0,0 +1,17 @@
++/*
++ * arch/arm/mach-omap2/devices.h
++ *
++ * OMAP2 platform device setup/initialization
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ */
++
++#ifndef __ARCH_ARM_MACH_OMAP_DEVICES_H
++#define __ARCH_ARM_MACH_OMAP_DEVICES_H
++
++extern struct platform_device omap3isp_device;
++
++#endif
+diff --git a/arch/arm/plat-omap/include/mach/isp_user.h b/arch/arm/plat-omap/include/mach/isp_user.h
+new file mode 100644
+index 0000000..7d1eb47
+--- /dev/null
++++ b/arch/arm/plat-omap/include/mach/isp_user.h
+@@ -0,0 +1,639 @@
++/*
++ * isp_user.h
++ *
++ * Include file for OMAP ISP module in TI's OMAP3.
++ *
++ * Copyright (C) 2009 Texas Instruments, Inc.
++ *
++ * Contributors:
++ * Mohit Jalori <mjalori@ti.com>
++ * Sergio Aguirre <saaguirre@ti.com>
++ * David Cohen <david.cohen@nokia.com>
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef OMAP_ISP_USER_H
++#define OMAP_ISP_USER_H
++
++#include <linux/types.h>
++
++/* ISP Private IOCTLs */
++#define VIDIOC_PRIVATE_ISP_CCDC_CFG \
++ _IOWR('V', BASE_VIDIOC_PRIVATE + 1, struct ispccdc_update_config)
++#define VIDIOC_PRIVATE_ISP_PRV_CFG \
++ _IOWR('V', BASE_VIDIOC_PRIVATE + 2, struct ispprv_update_config)
++#define VIDIOC_PRIVATE_ISP_AEWB_CFG \
++ _IOWR('V', BASE_VIDIOC_PRIVATE + 3, struct isph3a_aewb_config)
++#define VIDIOC_PRIVATE_ISP_HIST_CFG \
++ _IOWR('V', BASE_VIDIOC_PRIVATE + 4, struct isphist_config)
++#define VIDIOC_PRIVATE_ISP_AF_CFG \
++ _IOWR('V', BASE_VIDIOC_PRIVATE + 5, struct isph3a_af_config)
++#define VIDIOC_PRIVATE_ISP_STAT_REQ \
++ _IOWR('V', BASE_VIDIOC_PRIVATE + 6, struct ispstat_data)
++#define VIDIOC_PRIVATE_ISP_STAT_EN \
++ _IOWR('V', BASE_VIDIOC_PRIVATE + 7, unsigned long)
++
++/* Events */
++
++#define V4L2_EVENT_OMAP3ISP_CLASS (V4L2_EVENT_PRIVATE_START | 0x100)
++#define V4L2_EVENT_OMAP3ISP_AEWB (V4L2_EVENT_OMAP3ISP_CLASS | 0x1)
++#define V4L2_EVENT_OMAP3ISP_AF (V4L2_EVENT_OMAP3ISP_CLASS | 0x2)
++#define V4L2_EVENT_OMAP3ISP_HIST (V4L2_EVENT_OMAP3ISP_CLASS | 0x3)
++#define V4L2_EVENT_OMAP3ISP_HS_VS (V4L2_EVENT_OMAP3ISP_CLASS | 0x4)
++
++struct ispstat_event_status {
++ __u32 frame_number;
++ __u16 config_counter;
++ __u8 buf_err;
++};
++
++/* AE/AWB related structures and flags*/
++
++/* Flags for update field */
++#define REQUEST_STATISTICS (1 << 0)
++#define SET_COLOR_GAINS (1 << 1)
++#define SET_DIGITAL_GAIN (1 << 2)
++#define SET_EXPOSURE (1 << 3)
++#define SET_ANALOG_GAIN (1 << 4)
++
++/* H3A Range Constants */
++#define AEWB_MAX_SATURATION_LIM 1023
++#define AEWB_MIN_WIN_H 2
++#define AEWB_MAX_WIN_H 256
++#define AEWB_MIN_WIN_W 6
++#define AEWB_MAX_WIN_W 256
++#define AEWB_MIN_WINVC 1
++#define AEWB_MIN_WINHC 1
++#define AEWB_MAX_WINVC 128
++#define AEWB_MAX_WINHC 36
++#define AEWB_MAX_WINSTART 4095
++#define AEWB_MIN_SUB_INC 2
++#define AEWB_MAX_SUB_INC 32
++#define AEWB_MAX_BUF_SIZE 83600
++
++#define AF_IIRSH_MIN 0
++#define AF_IIRSH_MAX 4095
++#define AF_PAXEL_HORIZONTAL_COUNT_MIN 1
++#define AF_PAXEL_HORIZONTAL_COUNT_MAX 36
++#define AF_PAXEL_VERTICAL_COUNT_MIN 1
++#define AF_PAXEL_VERTICAL_COUNT_MAX 128
++#define AF_PAXEL_INCREMENT_MIN 2
++#define AF_PAXEL_INCREMENT_MAX 32
++#define AF_PAXEL_HEIGHT_MIN 2
++#define AF_PAXEL_HEIGHT_MAX 256
++#define AF_PAXEL_WIDTH_MIN 16
++#define AF_PAXEL_WIDTH_MAX 256
++#define AF_PAXEL_HZSTART_MIN 1
++#define AF_PAXEL_HZSTART_MAX 4095
++#define AF_PAXEL_VTSTART_MIN 0
++#define AF_PAXEL_VTSTART_MAX 4095
++#define AF_THRESHOLD_MAX 255
++#define AF_COEF_MAX 4095
++#define AF_PAXEL_SIZE 48
++#define AF_MAX_BUF_SIZE 221184
++
++/**
++ * struct isph3a_aewb_config - AE AWB configuration reset values.
++ * saturation_limit: Saturation limit.
++ * @win_height: Window Height. Range 2 - 256, even values only.
++ * @win_width: Window Width. Range 6 - 256, even values only.
++ * @ver_win_count: Vertical Window Count. Range 1 - 128.
++ * @hor_win_count: Horizontal Window Count. Range 1 - 36.
++ * @ver_win_start: Vertical Window Start. Range 0 - 4095.
++ * @hor_win_start: Horizontal Window Start. Range 0 - 4095.
++ * @blk_ver_win_start: Black Vertical Windows Start. Range 0 - 4095.
++ * @blk_win_height: Black Window Height. Range 2 - 256, even values only.
++ * @subsample_ver_inc: Subsample Vertical points increment Range 2 - 32, even
++ * values only.
++ * @subsample_hor_inc: Subsample Horizontal points increment Range 2 - 32, even
++ * values only.
++ * @alaw_enable: AEW ALAW EN flag.
++ * @aewb_enable: AE AWB stats generation EN flag.
++ */
++struct isph3a_aewb_config {
++ /*
++ * Common fields.
++ * They should be the first ones and must be in the same order as in
++ * ispstat_generic_config struct.
++ */
++ __u32 buf_size;
++ __u16 config_counter;
++
++ /* Private fields */
++ __u16 saturation_limit;
++ __u16 win_height;
++ __u16 win_width;
++ __u16 ver_win_count;
++ __u16 hor_win_count;
++ __u16 ver_win_start;
++ __u16 hor_win_start;
++ __u16 blk_ver_win_start;
++ __u16 blk_win_height;
++ __u16 subsample_ver_inc;
++ __u16 subsample_hor_inc;
++ __u8 alaw_enable;
++};
++
++/**
++ * struct ispstat_data - Struc of statistic data sent to or received from user
++ * @buf: Pointer to pass to user.
++ * @frame_number: Frame number of requested stats.
++ * @cur_frame: Current frame number being processed.
++ * @buf_size: Buffer size requested and returned.
++ * @ts: Timestamp of returned framestats.
++ */
++struct ispstat_data {
++ struct timeval ts;
++ void __user *buf;
++ __u32 buf_size;
++ __u16 frame_number;
++ __u16 cur_frame;
++ __u16 config_counter;
++ __u16 new_bufs; /* Deprecated */
++};
++
++
++/* Histogram related structs */
++
++/* Flags for number of bins */
++#define HIST_BINS_32 0
++#define HIST_BINS_64 1
++#define HIST_BINS_128 2
++#define HIST_BINS_256 3
++
++/* Number of bins * 4 colors * 4-bytes word */
++#define HIST_MEM_SIZE_BINS(n) ((1 << ((n)+5))*4*4)
++
++#define HIST_MEM_SIZE 1024
++#define HIST_MIN_REGIONS 1
++#define HIST_MAX_REGIONS 4
++#define HIST_MAX_WB_GAIN 255
++#define HIST_MIN_WB_GAIN 0
++#define HIST_MAX_BIT_WIDTH 14
++#define HIST_MIN_BIT_WIDTH 8
++#define HIST_MAX_WG 4
++#define HIST_MAX_BUF_SIZE 4096
++
++/* Source */
++#define HIST_SOURCE_CCDC 0
++#define HIST_SOURCE_MEM 1
++
++/* CFA pattern */
++#define HIST_CFA_BAYER 0
++#define HIST_CFA_FOVEONX3 1
++
++struct isphist_region {
++ __u16 h_start;
++ __u16 h_end;
++ __u16 v_start;
++ __u16 v_end;
++};
++
++struct isphist_config {
++ /*
++ * Common fields.
++ * They should be the first ones and must be in the same order as in
++ * ispstat_generic_config struct.
++ */
++ __u32 buf_size;
++ __u16 config_counter;
++
++ __u8 num_acc_frames; /* Num of image frames to be processed and
++ accumulated for each histogram frame */
++ __u16 hist_bins; /* number of bins: 32, 64, 128, or 256 */
++ __u8 cfa; /* BAYER or FOVEON X3 */
++ __u8 wg[HIST_MAX_WG]; /* White Balance Gain */
++ __u8 num_regions; /* number of regions to be configured */
++ struct isphist_region region[HIST_MAX_REGIONS];
++};
++
++/* Auto Focus related structs */
++
++#define AF_NUM_COEF 11
++
++enum isph3a_af_fvmode {
++ AF_MODE_SUMMED = 0,
++ AF_MODE_PEAK = 1
++};
++
++/* Red, Green, and blue pixel location in the AF windows */
++enum isph3a_af_rgbpos {
++ AF_GR_GB_BAYER = 0, /* GR and GB as Bayer pattern */
++ AF_RG_GB_BAYER = 1, /* RG and GB as Bayer pattern */
++ AF_GR_BG_BAYER = 2, /* GR and BG as Bayer pattern */
++ AF_RG_BG_BAYER = 3, /* RG and BG as Bayer pattern */
++ AF_GG_RB_CUSTOM = 4, /* GG and RB as custom pattern */
++ AF_RB_GG_CUSTOM = 5 /* RB and GG as custom pattern */
++};
++
++/* Contains the information regarding the Horizontal Median Filter */
++struct isph3a_af_hmf {
++ __u8 enable; /* Status of Horizontal Median Filter */
++ __u8 threshold; /* Threshhold Value for Horizontal Median Filter */
++};
++
++/* Contains the information regarding the IIR Filters */
++struct isph3a_af_iir {
++ __u16 h_start; /* IIR horizontal start */
++ __u16 coeff_set0[AF_NUM_COEF]; /* IIR Filter coefficient for set 0 */
++ __u16 coeff_set1[AF_NUM_COEF]; /* IIR Filter coefficient for set 1 */
++};
++
++/* Contains the information regarding the Paxels Structure in AF Engine */
++struct isph3a_af_paxel {
++ __u16 h_start; /* Horizontal Start Position */
++ __u16 v_start; /* Vertical Start Position */
++ __u8 width; /* Width of the Paxel */
++ __u8 height; /* Height of the Paxel */
++ __u8 h_cnt; /* Horizontal Count */
++ __u8 v_cnt; /* vertical Count */
++ __u8 line_inc; /* Line Increment */
++};
++
++/* Contains the parameters required for hardware set up of AF Engine */
++struct isph3a_af_config {
++ /*
++ * Common fields.
++ * They should be the first ones and must be in the same order as in
++ * ispstat_generic_config struct.
++ */
++ __u32 buf_size;
++ __u16 config_counter;
++
++ struct isph3a_af_hmf hmf; /*HMF configurations */
++ struct isph3a_af_iir iir; /*IIR filter configurations */
++ struct isph3a_af_paxel paxel; /*Paxel parameters */
++ enum isph3a_af_rgbpos rgb_pos; /*RGB Positions */
++ enum isph3a_af_fvmode fvmode; /*Accumulator mode */
++ __u8 alaw_enable; /*AF ALAW status */
++};
++
++/* ISP CCDC structs */
++
++/* Abstraction layer CCDC configurations */
++#define ISP_ABS_CCDC_ALAW (1 << 0)
++#define ISP_ABS_CCDC_LPF (1 << 1)
++#define ISP_ABS_CCDC_BLCLAMP (1 << 2)
++#define ISP_ABS_CCDC_BCOMP (1 << 3)
++#define ISP_ABS_CCDC_FPC (1 << 4)
++#define ISP_ABS_CCDC_CULL (1 << 5)
++#define ISP_ABS_CCDC_COLPTN (1 << 6)
++#define ISP_ABS_CCDC_CONFIG_LSC (1 << 7)
++#define ISP_ABS_TBL_LSC (1 << 8)
++
++#define RGB_MAX 3
++
++/* Enumeration constants for Alaw input width */
++enum alaw_ipwidth {
++ ALAW_BIT12_3 = 0x3,
++ ALAW_BIT11_2 = 0x4,
++ ALAW_BIT10_1 = 0x5,
++ ALAW_BIT9_0 = 0x6
++};
++
++/* Enumeration constants for Video Port */
++enum vpin {
++ BIT12_3 = 3,
++ BIT11_2 = 4,
++ BIT10_1 = 5,
++ BIT9_0 = 6
++};
++
++/**
++ * struct ispccdc_lsc_config - Structure for LSC configuration.
++ * @offset: Table Offset of the gain table.
++ * @gain_mode_n: Vertical dimension of a paxel in LSC configuration.
++ * @gain_mode_m: Horizontal dimension of a paxel in LSC configuration.
++ * @gain_format: Gain table format.
++ * @fmtsph: Start pixel horizontal from start of the HS sync pulse.
++ * @fmtlnh: Number of pixels in horizontal direction to use for the data
++ * reformatter.
++ * @fmtslv: Start line from start of VS sync pulse for the data reformatter.
++ * @fmtlnv: Number of lines in vertical direction for the data reformatter.
++ * @initial_x: X position, in pixels, of the first active pixel in reference
++ * to the first active paxel. Must be an even number.
++ * @initial_y: Y position, in pixels, of the first active pixel in reference
++ * to the first active paxel. Must be an even number.
++ * @size: Size of LSC gain table. Filled when loaded from userspace.
++ */
++struct ispccdc_lsc_config {
++ __u16 offset;
++ __u8 gain_mode_n;
++ __u8 gain_mode_m;
++ __u8 gain_format;
++ __u16 fmtsph;
++ __u16 fmtlnh;
++ __u16 fmtslv;
++ __u16 fmtlnv;
++ __u8 initial_x;
++ __u8 initial_y;
++ __u32 size;
++};
++
++/**
++ * struct ispccdc_bclamp - Structure for Optical & Digital black clamp subtract
++ * @obgain: Optical black average gain.
++ * @obstpixel: Start Pixel w.r.t. HS pulse in Optical black sample.
++ * @oblines: Optical Black Sample lines.
++ * @oblen: Optical Black Sample Length.
++ * @dcsubval: Digital Black Clamp subtract value.
++ */
++struct ispccdc_bclamp {
++ __u8 obgain;
++ __u8 obstpixel;
++ __u8 oblines;
++ __u8 oblen;
++ __u16 dcsubval;
++};
++
++/**
++ * ispccdc_fpc - Structure for FPC
++ * @fpnum: Number of faulty pixels to be corrected in the frame.
++ * @fpcaddr: Memory address of the FPC Table
++ */
++struct ispccdc_fpc {
++ __u16 fpnum;
++ __u32 fpcaddr;
++};
++
++/**
++ * ispccdc_blcomp - Structure for Black Level Compensation parameters.
++ * @b_mg: B/Mg pixels. 2's complement. -128 to +127.
++ * @gb_g: Gb/G pixels. 2's complement. -128 to +127.
++ * @gr_cy: Gr/Cy pixels. 2's complement. -128 to +127.
++ * @r_ye: R/Ye pixels. 2's complement. -128 to +127.
++ */
++struct ispccdc_blcomp {
++ __u8 b_mg;
++ __u8 gb_g;
++ __u8 gr_cy;
++ __u8 r_ye;
++};
++
++/**
++ * ispccdc_culling - Structure for Culling parameters.
++ * @v_pattern: Vertical culling pattern.
++ * @h_odd: Horizontal Culling pattern for odd lines.
++ * @h_even: Horizontal Culling pattern for even lines.
++ */
++struct ispccdc_culling {
++ __u8 v_pattern;
++ __u16 h_odd;
++ __u16 h_even;
++};
++
++/**
++ * ispccdc_update_config - Structure for CCDC configuration.
++ * @update: Specifies which CCDC registers should be updated.
++ * @flag: Specifies which CCDC functions should be enabled.
++ * @alawip: Enable/Disable A-Law compression.
++ * @bclamp: Black clamp control register.
++ * @blcomp: Black level compensation value for RGrGbB Pixels. 2's complement.
++ * @fpc: Number of faulty pixels corrected in the frame, address of FPC table.
++ * @cull: Cull control register.
++ * @colptn: Color pattern of the sensor.
++ * @lsc: Pointer to LSC gain table.
++ */
++struct ispccdc_update_config {
++ __u16 update;
++ __u16 flag;
++ enum alaw_ipwidth alawip;
++ struct ispccdc_bclamp __user *bclamp;
++ struct ispccdc_blcomp __user *blcomp;
++ struct ispccdc_fpc __user *fpc;
++ struct ispccdc_lsc_config __user *lsc_cfg;
++ struct ispccdc_culling __user *cull;
++ __u32 colptn;
++ __u8 __user *lsc;
++};
++
++/* Preview configurations */
++#define ISP_PREV_LUMAENH (1 << 0)
++#define ISP_PREV_INVALAW (1 << 1)
++#define ISP_PREV_HRZ_MED (1 << 2)
++#define ISP_PREV_CFA (1 << 3)
++#define ISP_PREV_CHROMA_SUPP (1 << 4)
++#define ISP_PREV_WB (1 << 5)
++#define ISP_PREV_BLKADJ (1 << 6)
++#define ISP_PREV_RGB2RGB (1 << 7)
++#define ISP_PREV_COLOR_CONV (1 << 8)
++#define ISP_PREV_YC_LIMIT (1 << 9)
++#define ISP_PREV_DEFECT_COR (1 << 10)
++#define ISP_PREV_GAMMABYPASS (1 << 11)
++#define ISP_PREV_DRK_FRM_CAPTURE (1 << 12)
++#define ISP_PREV_DRK_FRM_SUBTRACT (1 << 13)
++#define ISP_PREV_LENS_SHADING (1 << 14)
++#define ISP_PREV_NF (1 << 15)
++#define ISP_PREV_GAMMA (1 << 16)
++
++#define ISPPRV_NF_TBL_SIZE 64
++#define ISPPRV_CFA_TBL_SIZE 576
++#define ISPPRV_GAMMA_TBL_SIZE 1024
++#define ISPPRV_YENH_TBL_SIZE 128
++
++#define ISPPRV_DETECT_CORRECT_CHANNELS 4
++
++/**
++ * struct ispprev_hmed - Structure for Horizontal Median Filter.
++ * @odddist: Distance between consecutive pixels of same color in the odd line.
++ * @evendist: Distance between consecutive pixels of same color in the even
++ * line.
++ * @thres: Horizontal median filter threshold.
++ */
++struct ispprev_hmed {
++ __u8 odddist;
++ __u8 evendist;
++ __u8 thres;
++};
++
++/*
++ * Enumeration for CFA Formats supported by preview
++ */
++enum cfa_fmt {
++ CFAFMT_BAYER, CFAFMT_SONYVGA, CFAFMT_RGBFOVEON,
++ CFAFMT_DNSPL, CFAFMT_HONEYCOMB, CFAFMT_RRGGBBFOVEON
++};
++
++/**
++ * struct ispprev_cfa - Structure for CFA Inpterpolation.
++ * @format: CFA Format Enum value supported by preview.
++ * @gradthrs_vert: CFA Gradient Threshold - Vertical.
++ * @gradthrs_horz: CFA Gradient Threshold - Horizontal.
++ * @table: Pointer to the CFA table.
++ */
++struct ispprev_cfa {
++ enum cfa_fmt format;
++ __u8 gradthrs_vert;
++ __u8 gradthrs_horz;
++ __u32 table[ISPPRV_CFA_TBL_SIZE];
++};
++
++/**
++ * struct ispprev_csup - Structure for Chrominance Suppression.
++ * @gain: Gain.
++ * @thres: Threshold.
++ * @hypf_en: Flag to enable/disable the High Pass Filter.
++ */
++struct ispprev_csup {
++ __u8 gain;
++ __u8 thres;
++ __u8 hypf_en;
++};
++
++/**
++ * struct ispprev_wbal - Structure for White Balance.
++ * @dgain: Digital gain (U10Q8).
++ * @coef3: White balance gain - COEF 3 (U8Q5).
++ * @coef2: White balance gain - COEF 2 (U8Q5).
++ * @coef1: White balance gain - COEF 1 (U8Q5).
++ * @coef0: White balance gain - COEF 0 (U8Q5).
++ */
++struct ispprev_wbal {
++ __u16 dgain;
++ __u8 coef3;
++ __u8 coef2;
++ __u8 coef1;
++ __u8 coef0;
++};
++
++/**
++ * struct ispprev_blkadj - Structure for Black Adjustment.
++ * @red: Black level offset adjustment for Red in 2's complement format
++ * @green: Black level offset adjustment for Green in 2's complement format
++ * @blue: Black level offset adjustment for Blue in 2's complement format
++ */
++struct ispprev_blkadj {
++ /*Black level offset adjustment for Red in 2's complement format */
++ __u8 red;
++ /*Black level offset adjustment for Green in 2's complement format */
++ __u8 green;
++ /* Black level offset adjustment for Blue in 2's complement format */
++ __u8 blue;
++};
++
++/**
++ * struct ispprev_rgbtorgb - Structure for RGB to RGB Blending.
++ * @matrix: Blending values(S12Q8 format)
++ * [RR] [GR] [BR]
++ * [RG] [GG] [BG]
++ * [RB] [GB] [BB]
++ * @offset: Blending offset value for R,G,B in 2's complement integer format.
++ */
++struct ispprev_rgbtorgb {
++ __u16 matrix[RGB_MAX][RGB_MAX];
++ __u16 offset[RGB_MAX];
++};
++
++/**
++ * struct ispprev_csc - Structure for Color Space Conversion from RGB-YCbYCr
++ * @matrix: Color space conversion coefficients(S10Q8)
++ * [CSCRY] [CSCGY] [CSCBY]
++ * [CSCRCB] [CSCGCB] [CSCBCB]
++ * [CSCRCR] [CSCGCR] [CSCBCR]
++ * @offset: CSC offset values for Y offset, CB offset and CR offset respectively
++ */
++struct ispprev_csc {
++ __u16 matrix[RGB_MAX][RGB_MAX];
++ __s16 offset[RGB_MAX];
++};
++
++/**
++ * struct ispprev_yclimit - Structure for Y, C Value Limit.
++ * @minC: Minimum C value
++ * @maxC: Maximum C value
++ * @minY: Minimum Y value
++ * @maxY: Maximum Y value
++ */
++struct ispprev_yclimit {
++ __u8 minC;
++ __u8 maxC;
++ __u8 minY;
++ __u8 maxY;
++};
++
++/**
++ * struct ispprev_dcor - Structure for Defect correction.
++ * @couplet_mode_en: Flag to enable or disable the couplet dc Correction in NF
++ * @detect_correct: Thresholds for correction bit 0:10 detect 16:25 correct
++ */
++struct ispprev_dcor {
++ __u8 couplet_mode_en;
++ __u32 detect_correct[ISPPRV_DETECT_CORRECT_CHANNELS];
++};
++
++/**
++ * struct ispprev_nf - Structure for Noise Filter
++ * @spread: Spread value to be used in Noise Filter
++ * @table: Pointer to the Noise Filter table
++ */
++struct ispprev_nf {
++ __u8 spread;
++ __u32 table[ISPPRV_NF_TBL_SIZE];
++};
++
++/**
++ * struct ispprev_gtables - Structure for gamma correction tables.
++ * @red: Array for red gamma table.
++ * @green: Array for green gamma table.
++ * @blue: Array for blue gamma table.
++ */
++struct ispprev_gtables {
++ __u32 red[ISPPRV_GAMMA_TBL_SIZE];
++ __u32 green[ISPPRV_GAMMA_TBL_SIZE];
++ __u32 blue[ISPPRV_GAMMA_TBL_SIZE];
++};
++
++/**
++ * struct ispprev_luma - Structure for luma enhancement.
++ * @table: Array for luma enhancement table.
++ */
++struct ispprev_luma {
++ __u32 table[ISPPRV_YENH_TBL_SIZE];
++};
++
++/**
++ * struct ispprv_update_config - Structure for Preview Configuration (user).
++ * @update: Specifies which ISP Preview registers should be updated.
++ * @flag: Specifies which ISP Preview functions should be enabled.
++ * @shading_shift: 3bit value of shift used in shading compensation.
++ * @luma: Pointer to luma enhancement structure.
++ * @hmed: Pointer to structure containing the odd and even distance.
++ * between the pixels in the image along with the filter threshold.
++ * @cfa: Pointer to structure containing the CFA interpolation table, CFA.
++ * format in the image, vertical and horizontal gradient threshold.
++ * @csup: Pointer to Structure for Chrominance Suppression coefficients.
++ * @wbal: Pointer to structure for White Balance.
++ * @blkadj: Pointer to structure for Black Adjustment.
++ * @rgb2rgb: Pointer to structure for RGB to RGB Blending.
++ * @csc: Pointer to structure for Color Space Conversion from RGB-YCbYCr.
++ * @yclimit: Pointer to structure for Y, C Value Limit.
++ * @dcor: Pointer to structure for defect correction.
++ * @nf: Pointer to structure for Noise Filter
++ * @gamma: Pointer to gamma structure.
++ */
++struct ispprv_update_config {
++ __u32 update;
++ __u32 flag;
++ __u32 shading_shift;
++ struct ispprev_luma __user *luma;
++ struct ispprev_hmed __user *hmed;
++ struct ispprev_cfa __user *cfa;
++ struct ispprev_csup __user *csup;
++ struct ispprev_wbal __user *wbal;
++ struct ispprev_blkadj __user *blkadj;
++ struct ispprev_rgbtorgb __user *rgb2rgb;
++ struct ispprev_csc __user *csc;
++ struct ispprev_yclimit __user *yclimit;
++ struct ispprev_dcor __user *dcor;
++ struct ispprev_nf __user *nf;
++ struct ispprev_gtables __user *gamma;
++};
++
++#endif /* OMAP_ISP_USER_H */
+diff --git a/arch/arm/plat-omap/include/plat/omap34xx.h b/arch/arm/plat-omap/include/plat/omap34xx.h
+index 98fc8b4..b9e8588 100644
+--- a/arch/arm/plat-omap/include/plat/omap34xx.h
++++ b/arch/arm/plat-omap/include/plat/omap34xx.h
+@@ -56,8 +56,12 @@
+ #define OMAP3430_ISP_RESZ_BASE (OMAP3430_ISP_BASE + 0x1000)
+ #define OMAP3430_ISP_SBL_BASE (OMAP3430_ISP_BASE + 0x1200)
+ #define OMAP3430_ISP_MMU_BASE (OMAP3430_ISP_BASE + 0x1400)
+-#define OMAP3430_ISP_CSI2A_BASE (OMAP3430_ISP_BASE + 0x1800)
+-#define OMAP3430_ISP_CSI2PHY_BASE (OMAP3430_ISP_BASE + 0x1970)
++#define OMAP3430_ISP_CSI2A_REGS1_BASE (OMAP3430_ISP_BASE + 0x1800)
++#define OMAP3430_ISP_CSIPHY2_BASE (OMAP3430_ISP_BASE + 0x1970)
++#define OMAP3630_ISP_CSI2A_REGS2_BASE (OMAP3430_ISP_BASE + 0x19C0)
++#define OMAP3630_ISP_CSI2C_REGS1_BASE (OMAP3430_ISP_BASE + 0x1C00)
++#define OMAP3630_ISP_CSIPHY1_BASE (OMAP3430_ISP_BASE + 0x1D70)
++#define OMAP3630_ISP_CSI2C_REGS2_BASE (OMAP3430_ISP_BASE + 0x1DC0)
+
+ #define OMAP3430_ISP_END (OMAP3430_ISP_BASE + 0x06F)
+ #define OMAP3430_ISP_CBUFF_END (OMAP3430_ISP_CBUFF_BASE + 0x077)
+@@ -69,8 +73,12 @@
+ #define OMAP3430_ISP_RESZ_END (OMAP3430_ISP_RESZ_BASE + 0x0AB)
+ #define OMAP3430_ISP_SBL_END (OMAP3430_ISP_SBL_BASE + 0x0FB)
+ #define OMAP3430_ISP_MMU_END (OMAP3430_ISP_MMU_BASE + 0x06F)
+-#define OMAP3430_ISP_CSI2A_END (OMAP3430_ISP_CSI2A_BASE + 0x16F)
+-#define OMAP3430_ISP_CSI2PHY_END (OMAP3430_ISP_CSI2PHY_BASE + 0x007)
++#define OMAP3430_ISP_CSI2A_REGS1_END (OMAP3430_ISP_CSI2A_REGS1_BASE + 0x16F)
++#define OMAP3430_ISP_CSIPHY2_END (OMAP3430_ISP_CSIPHY2_BASE + 0x00B)
++#define OMAP3630_ISP_CSI2A_REGS2_END (OMAP3630_ISP_CSI2A_REGS2_BASE + 0x3F)
++#define OMAP3630_ISP_CSI2C_REGS1_END (OMAP3630_ISP_CSI2C_REGS1_BASE + 0x16F)
++#define OMAP3630_ISP_CSIPHY1_END (OMAP3630_ISP_CSIPHY1_BASE + 0x00B)
++#define OMAP3630_ISP_CSI2C_REGS2_END (OMAP3630_ISP_CSI2C_REGS2_BASE + 0x3F)
+
+ #define OMAP34XX_HSUSB_OTG_BASE (L4_34XX_BASE + 0xAB000)
+ #define OMAP34XX_USBTLL_BASE (L4_34XX_BASE + 0x62000)
+diff --git a/arch/arm/plat-omap/iovmm.c b/arch/arm/plat-omap/iovmm.c
+index 8ce0de2..2f47561 100644
+--- a/arch/arm/plat-omap/iovmm.c
++++ b/arch/arm/plat-omap/iovmm.c
+@@ -60,6 +60,15 @@
+
+ static struct kmem_cache *iovm_area_cachep;
+
++/* return the offset of the first scatterlist entry in a sg table */
++static unsigned int sgtable_offset(const struct sg_table *sgt)
++{
++ if (!sgt || !sgt->nents)
++ return 0;
++
++ return sgt->sgl->offset;
++}
++
+ /* return total bytes of sg buffers */
+ static size_t sgtable_len(const struct sg_table *sgt)
+ {
+@@ -72,11 +81,17 @@ static size_t sgtable_len(const struct sg_table *sgt)
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ size_t bytes;
+
+- bytes = sg_dma_len(sg);
++ bytes = sg_dma_len(sg) + sg->offset;
+
+ if (!iopgsz_ok(bytes)) {
+- pr_err("%s: sg[%d] not iommu pagesize(%x)\n",
+- __func__, i, bytes);
++ pr_err("%s: sg[%d] not iommu pagesize(%u %u)\n",
++ __func__, i, bytes, sg->offset);
++ return 0;
++ }
++
++ if (i && sg->offset) {
++ pr_err("%s: sg[%d] offset not allowed in internal "
++ "entries\n", __func__, i);
+ return 0;
+ }
+
+@@ -114,6 +129,16 @@ static unsigned int sgtable_nents(size_t bytes)
+ return nr_entries;
+ }
+
++static struct scatterlist *sg_alloc(unsigned int nents, gfp_t gfp_mask)
++{
++ return kmalloc(nents * sizeof(struct scatterlist), gfp_mask);
++}
++
++static void sg_free(struct scatterlist *sg, unsigned int nents)
++{
++ kfree(sg);
++}
++
+ /* allocate and initialize sg_table header(a kind of 'superblock') */
+ static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags)
+ {
+@@ -139,7 +164,7 @@ static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags)
+ if (!sgt)
+ return ERR_PTR(-ENOMEM);
+
+- err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL);
++ err = __sg_alloc_table(sgt, nr_entries, -1, GFP_KERNEL, sg_alloc);
+ if (err) {
+ kfree(sgt);
+ return ERR_PTR(err);
+@@ -156,7 +181,7 @@ static void sgtable_free(struct sg_table *sgt)
+ if (!sgt)
+ return;
+
+- sg_free_table(sgt);
++ __sg_free_table(sgt, -1, sg_free);
+ kfree(sgt);
+
+ pr_debug("%s: sgt:%p\n", __func__, sgt);
+@@ -190,8 +215,8 @@ static void *vmap_sg(const struct sg_table *sgt)
+ u32 pa;
+ int err;
+
+- pa = sg_phys(sg);
+- bytes = sg_dma_len(sg);
++ pa = sg_phys(sg) - sg->offset;
++ bytes = sg_dma_len(sg) + sg->offset;
+
+ BUG_ON(bytes != PAGE_SIZE);
+
+@@ -463,8 +488,8 @@ static int map_iovm_area(struct iommu *obj, struct iovm_struct *new,
+ size_t bytes;
+ struct iotlb_entry e;
+
+- pa = sg_phys(sg);
+- bytes = sg_dma_len(sg);
++ pa = sg_phys(sg) - sg->offset;
++ bytes = sg_dma_len(sg) + sg->offset;
+
+ flags &= ~IOVMF_PGSZ_MASK;
+ pgsz = bytes_to_iopgsz(bytes);
+@@ -645,7 +670,7 @@ u32 iommu_vmap(struct iommu *obj, u32 da, const struct sg_table *sgt,
+ if (IS_ERR_VALUE(da))
+ vunmap_sg(va);
+
+- return da;
++ return da + sgtable_offset(sgt);
+ }
+ EXPORT_SYMBOL_GPL(iommu_vmap);
+
+@@ -664,6 +689,7 @@ struct sg_table *iommu_vunmap(struct iommu *obj, u32 da)
+ * 'sgt' is allocated before 'iommu_vmalloc()' is called.
+ * Just returns 'sgt' to the caller to free
+ */
++ da &= PAGE_MASK;
+ sgt = unmap_vm_area(obj, da, vunmap_sg, IOVMF_DISCONT | IOVMF_MMIO);
+ if (!sgt)
+ dev_dbg(obj->dev, "%s: No sgt\n", __func__);
+diff --git a/arch/arm/plat-omap/omap-pm-noop.c b/arch/arm/plat-omap/omap-pm-noop.c
+index 9418f56..88202c3 100644
+--- a/arch/arm/plat-omap/omap-pm-noop.c
++++ b/arch/arm/plat-omap/omap-pm-noop.c
+@@ -84,7 +84,7 @@ void omap_pm_set_min_bus_tput(struct device *dev, u8 agent_id, unsigned long r)
+ * TI CDP code can call constraint_set here on the VDD2 OPP.
+ */
+ }
+-EXPORT_SYMBOL(omap_pm_set_min_bus_tput);
++EXPORT_SYMBOL_GPL(omap_pm_set_min_bus_tput);
+
+ void omap_pm_set_max_dev_wakeup_lat(struct device *dev, long t)
+ {
+diff --git a/drivers/media/Makefile b/drivers/media/Makefile
+index 499b081..a425581 100644
+--- a/drivers/media/Makefile
++++ b/drivers/media/Makefile
+@@ -2,7 +2,11 @@
+ # Makefile for the kernel multimedia device drivers.
+ #
+
++media-objs := media-device.o media-devnode.o media-entity.o
++
++obj-$(CONFIG_MEDIA_SUPPORT) += media.o
++
+ obj-y += common/ IR/ video/
+
+-obj-$(CONFIG_VIDEO_DEV) += radio/
+-obj-$(CONFIG_DVB_CORE) += dvb/
++obj-$(CONFIG_VIDEO_DEV) += radio/
++obj-$(CONFIG_DVB_CORE) += dvb/
+diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
+new file mode 100644
+index 0000000..3cd31f9
+--- /dev/null
++++ b/drivers/media/media-device.c
+@@ -0,0 +1,330 @@
++/*
++ * Media device support.
++ *
++ * Copyright (C) 2009 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ */
++
++#include <linux/types.h>
++#include <linux/ioctl.h>
++#include <linux/media.h>
++
++#include <media/media-device.h>
++#include <media/media-devnode.h>
++#include <media/media-entity.h>
++
++static int media_device_open(struct file *filp)
++{
++ return 0;
++}
++
++static int media_device_close(struct file *filp)
++{
++ return 0;
++}
++
++static struct media_entity *find_entity(struct media_device *mdev, u32 id)
++{
++ struct media_entity *entity;
++ int next = id & MEDIA_ENTITY_ID_FLAG_NEXT;
++
++ id &= ~MEDIA_ENTITY_ID_FLAG_NEXT;
++
++ spin_lock(&mdev->lock);
++
++ media_device_for_each_entity(entity, mdev) {
++ if ((entity->id == id && !next) ||
++ (entity->id > id && next)) {
++ spin_unlock(&mdev->lock);
++ return entity;
++ }
++ }
++
++ spin_unlock(&mdev->lock);
++
++ return NULL;
++}
++
++static long media_device_enum_entities(struct media_device *mdev,
++ struct media_user_entity __user *uent)
++{
++ struct media_entity *ent;
++ struct media_user_entity u_ent;
++
++ if (copy_from_user(&u_ent.id, &uent->id, sizeof(u_ent.id)))
++ return -EFAULT;
++
++ ent = find_entity(mdev, u_ent.id);
++
++ if (ent == NULL)
++ return -EINVAL;
++
++ u_ent.id = ent->id;
++ u_ent.name[0] = '\0';
++ if (ent->name)
++ strlcpy(u_ent.name, ent->name, sizeof(u_ent.name));
++ u_ent.type = ent->type;
++ u_ent.subtype = ent->subtype;
++ u_ent.pads = ent->num_pads;
++ u_ent.links = ent->num_links - ent->num_backlinks;
++ u_ent.v4l.major = ent->v4l.major;
++ u_ent.v4l.minor = ent->v4l.minor;
++ if (copy_to_user(uent, &u_ent, sizeof(u_ent)))
++ return -EFAULT;
++ return 0;
++}
++
++static void media_device_kpad_to_upad(const struct media_entity_pad *kpad,
++ struct media_user_pad *upad)
++{
++ upad->entity = kpad->entity->id;
++ upad->index = kpad->index;
++ upad->type = kpad->type;
++}
++
++static long media_device_enum_links(struct media_device *mdev,
++ struct media_user_links __user *ulinks)
++{
++ struct media_entity *entity;
++ struct media_user_links links;
++
++ if (copy_from_user(&links, ulinks, sizeof(links)))
++ return -EFAULT;
++
++ entity = find_entity(mdev, links.entity);
++ if (entity == NULL)
++ return -EINVAL;
++
++ if (links.pads) {
++ unsigned int p;
++
++ for (p = 0; p < entity->num_pads; p++) {
++ struct media_user_pad pad;
++ media_device_kpad_to_upad(&entity->pads[p], &pad);
++ if (copy_to_user(&links.pads[p], &pad, sizeof(pad)))
++ return -EFAULT;
++ }
++ }
++
++ if (links.links) {
++ struct media_user_link __user *ulink;
++ unsigned int l;
++
++ for (l = 0, ulink = links.links; l < entity->num_links; l++) {
++ struct media_user_link link;
++
++ /* Ignore backlinks. */
++ if (entity->links[l].source->entity != entity)
++ continue;
++
++ media_device_kpad_to_upad(entity->links[l].source,
++ &link.source);
++ media_device_kpad_to_upad(entity->links[l].sink,
++ &link.sink);
++ link.flags = entity->links[l].flags;
++ if (copy_to_user(ulink, &link, sizeof(*ulink)))
++ return -EFAULT;
++ ulink++;
++ }
++ }
++ if (copy_to_user(ulinks, &links, sizeof(*ulinks)))
++ return -EFAULT;
++ return 0;
++}
++
++static long media_device_setup_link(struct media_device *mdev,
++ struct media_user_link __user *_ulink)
++{
++ struct media_entity_link *link = NULL;
++ struct media_user_link ulink;
++ struct media_entity *source;
++ struct media_entity *sink;
++ int ret;
++
++ if (copy_from_user(&ulink, _ulink, sizeof(ulink)))
++ return -EFAULT;
++
++ /* Find the source and sink entities and link.
++ */
++ source = find_entity(mdev, ulink.source.entity);
++ sink = find_entity(mdev, ulink.sink.entity);
++
++ if (source == NULL || sink == NULL)
++ return -EINVAL;
++
++ if (ulink.source.index >= source->num_pads ||
++ ulink.sink.index >= sink->num_pads)
++ return -EINVAL;
++
++ link = media_entity_find_link(&source->pads[ulink.source.index],
++ &sink->pads[ulink.sink.index]);
++ if (link == NULL)
++ return -EINVAL;
++
++ /* Setup the link on both entities. */
++ ret = __media_entity_setup_link(link, ulink.flags);
++
++ if (copy_to_user(_ulink, &ulink, sizeof(ulink)))
++ return -EFAULT;
++
++ return ret;
++}
++
++static long media_device_ioctl(struct file *filp, unsigned int cmd,
++ unsigned long arg)
++{
++ struct media_devnode *devnode = media_devnode_data(filp);
++ struct media_device *dev = to_media_device(devnode);
++ long ret;
++
++ switch (cmd) {
++ case MEDIA_IOC_ENUM_ENTITIES:
++ ret = media_device_enum_entities(dev,
++ (struct media_user_entity __user *)arg);
++ break;
++
++ case MEDIA_IOC_ENUM_LINKS:
++ mutex_lock(&dev->graph_mutex);
++ ret = media_device_enum_links(dev,
++ (struct media_user_links __user *)arg);
++ mutex_unlock(&dev->graph_mutex);
++ break;
++
++ case MEDIA_IOC_SETUP_LINK:
++ mutex_lock(&dev->graph_mutex);
++ ret = media_device_setup_link(dev,
++ (struct media_user_link __user *)arg);
++ mutex_unlock(&dev->graph_mutex);
++ break;
++
++ default:
++ ret = -ENOIOCTLCMD;
++ }
++
++ return ret;
++}
++
++static const struct media_file_operations media_device_fops = {
++ .owner = THIS_MODULE,
++ .open = media_device_open,
++ .unlocked_ioctl = media_device_ioctl,
++ .release = media_device_close,
++};
++
++static void media_device_release(struct media_devnode *mdev)
++{
++}
++
++/**
++ * media_device_register - register a media device
++ * @mdev: The media device
++ *
++ * The caller is responsible for initializing the media device before
++ * registration. The following fields must be set:
++ *
++ * - dev should point to the parent device. The field can be NULL when no
++ * parent device is available (for instance with ISA devices).
++ * - name should be set to the device name. If the name is empty a parent
++ * device must be set. In that case the name will be set to the parent
++ * device driver name followed by a space and the parent device name.
++ */
++int __must_check media_device_register(struct media_device *mdev)
++{
++ mdev->entity_id = 1;
++ INIT_LIST_HEAD(&mdev->entities);
++ spin_lock_init(&mdev->lock);
++ mutex_init(&mdev->graph_mutex);
++
++ /* If dev == NULL, then name must be filled in by the caller */
++ if (mdev->dev == NULL && WARN_ON(!mdev->name[0]))
++ return 0;
++
++ /* Set name to driver name + device name if it is empty. */
++ if (!mdev->name[0])
++ snprintf(mdev->name, sizeof(mdev->name), "%s %s",
++ mdev->dev->driver->name, dev_name(mdev->dev));
++
++ /* Register the device node. */
++ mdev->devnode.fops = &media_device_fops;
++ mdev->devnode.parent = mdev->dev;
++ strlcpy(mdev->devnode.name, mdev->name, sizeof(mdev->devnode.name));
++ mdev->devnode.release = media_device_release;
++ return media_devnode_register(&mdev->devnode, MEDIA_TYPE_DEVICE);
++}
++EXPORT_SYMBOL_GPL(media_device_register);
++
++/**
++ * media_device_unregister - unregister a media device
++ * @mdev: The media device
++ *
++ */
++void media_device_unregister(struct media_device *mdev)
++{
++ struct media_entity *entity;
++ struct media_entity *next;
++
++ list_for_each_entry_safe(entity, next, &mdev->entities, list)
++ media_device_unregister_entity(entity);
++
++ media_devnode_unregister(&mdev->devnode);
++}
++EXPORT_SYMBOL_GPL(media_device_unregister);
++
++/**
++ * media_device_register_entity - Register an entity with a media device
++ * @mdev: The media device
++ * @entity: The entity
++ */
++int __must_check media_device_register_entity(struct media_device *mdev,
++ struct media_entity *entity)
++{
++ /* Warn if we apparently re-register an entity */
++ WARN_ON(entity->parent != NULL);
++ entity->parent = mdev;
++
++ /* find_entity() relies on entities being stored in increasing IDs
++ * order. Don't change that without modifying find_entity().
++ */
++ spin_lock(&mdev->lock);
++ entity->id = mdev->entity_id++;
++ list_add_tail(&entity->list, &mdev->entities);
++ spin_unlock(&mdev->lock);
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(media_device_register_entity);
++
++/**
++ * media_device_unregister_entity - Unregister an entity
++ * @entity: The entity
++ *
++ * If the entity has never been registered this function will return
++ * immediately.
++ */
++void media_device_unregister_entity(struct media_entity *entity)
++{
++ struct media_device *mdev = entity->parent;
++
++ if (mdev == NULL)
++ return;
++
++ spin_lock(&mdev->lock);
++ list_del(&entity->list);
++ spin_unlock(&mdev->lock);
++ entity->parent = NULL;
++}
++EXPORT_SYMBOL_GPL(media_device_unregister_entity);
++
+diff --git a/drivers/media/media-devnode.c b/drivers/media/media-devnode.c
+new file mode 100644
+index 0000000..0ee37ae
+--- /dev/null
++++ b/drivers/media/media-devnode.c
+@@ -0,0 +1,480 @@
++/*
++ * Media device node
++ *
++ * Generic media device node infrastructure to register and unregister
++ * character devices using a dynamic major number and proper reference
++ * counting.
++ *
++ * Copyright 2009 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
++ *
++ * Based on drivers/media/video/v4l2_dev.c code authored by
++ *
++ * Mauro Carvalho Chehab <mchehab@infradead.org> (version 2)
++ * Alan Cox, <alan@lxorguk.ukuu.org.uk> (version 1)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ *
++ */
++
++#include <linux/errno.h>
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/kmod.h>
++#include <linux/slab.h>
++#include <linux/mm.h>
++#include <linux/smp_lock.h>
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/uaccess.h>
++#include <asm/system.h>
++
++#include <media/media-devnode.h>
++
++#define MEDIA_NUM_DEVICES 256
++#define MEDIA_NAME "media"
++
++static dev_t media_dev_t;
++
++/*
++ * sysfs stuff
++ */
++
++static ssize_t show_name(struct device *cd,
++ struct device_attribute *attr, char *buf)
++{
++ struct media_devnode *mdev = to_media_devnode(cd);
++
++ return sprintf(buf, "%.*s\n", (int)sizeof(mdev->name), mdev->name);
++}
++
++static struct device_attribute media_devnode_attrs[] = {
++ __ATTR(name, S_IRUGO, show_name, NULL),
++ __ATTR_NULL
++};
++
++/*
++ * Active devices
++ */
++static struct media_devnode *media_devnodes[MEDIA_NUM_DEVICES];
++static DEFINE_MUTEX(media_devnode_lock);
++static DECLARE_BITMAP(devnode_nums[MEDIA_TYPE_MAX], MEDIA_NUM_DEVICES);
++
++/* Device node utility functions */
++
++/* Note: these utility functions all assume that type is in the range
++ [0, MEDIA_TYPE_MAX-1]. */
++
++/* Return the bitmap corresponding to type. */
++static inline unsigned long *devnode_bits(int type)
++{
++ return devnode_nums[type];
++}
++
++/* Mark device node number mdev->num as used */
++static inline void devnode_set(struct media_devnode *mdev)
++{
++ set_bit(mdev->num, devnode_bits(mdev->type));
++}
++
++/* Mark device node number mdev->num as unused */
++static inline void devnode_clear(struct media_devnode *mdev)
++{
++ clear_bit(mdev->num, devnode_bits(mdev->type));
++}
++
++/* Try to find a free device node number in the range [from, to> */
++static inline int devnode_find(struct media_devnode *mdev, int from, int to)
++{
++ return find_next_zero_bit(devnode_bits(mdev->type), to, from);
++}
++
++static inline void media_get(struct media_devnode *mdev)
++{
++ get_device(&mdev->dev);
++}
++
++static inline void media_put(struct media_devnode *mdev)
++{
++ put_device(&mdev->dev);
++}
++
++/* Called when the last user of the media device exits. */
++static void media_devnode_release(struct device *cd)
++{
++ struct media_devnode *mdev = to_media_devnode(cd);
++
++ mutex_lock(&media_devnode_lock);
++ if (media_devnodes[mdev->minor] != mdev) {
++ mutex_unlock(&media_devnode_lock);
++ /* should not happen */
++ WARN_ON(1);
++ return;
++ }
++
++ /* Free up this device for reuse */
++ media_devnodes[mdev->minor] = NULL;
++
++ /* Delete the cdev on this minor as well */
++ cdev_del(mdev->cdev);
++ /* Just in case some driver tries to access this from the release()
++ * callback.
++ */
++ mdev->cdev = NULL;
++
++ /* Mark device node number as free */
++ devnode_clear(mdev);
++
++ mutex_unlock(&media_devnode_lock);
++
++ /* Release media_devnode and perform other cleanups as needed. */
++ if (mdev->release)
++ mdev->release(mdev);
++}
++
++static struct class media_class = {
++ .name = MEDIA_NAME,
++ .dev_attrs = media_devnode_attrs,
++};
++
++struct media_devnode *media_devnode_data(struct file *file)
++{
++ return media_devnodes[iminor(file->f_path.dentry->d_inode)];
++}
++EXPORT_SYMBOL(media_devnode_data);
++
++static ssize_t media_read(struct file *filp, char __user *buf,
++ size_t sz, loff_t *off)
++{
++ struct media_devnode *mdev = media_devnode_data(filp);
++
++ if (!mdev->fops->read)
++ return -EINVAL;
++ if (!media_devnode_is_registered(mdev))
++ return -EIO;
++ return mdev->fops->read(filp, buf, sz, off);
++}
++
++static ssize_t media_write(struct file *filp, const char __user *buf,
++ size_t sz, loff_t *off)
++{
++ struct media_devnode *mdev = media_devnode_data(filp);
++
++ if (!mdev->fops->write)
++ return -EINVAL;
++ if (!media_devnode_is_registered(mdev))
++ return -EIO;
++ return mdev->fops->write(filp, buf, sz, off);
++}
++
++static unsigned int media_poll(struct file *filp,
++ struct poll_table_struct *poll)
++{
++ struct media_devnode *mdev = media_devnode_data(filp);
++
++ if (!mdev->fops->poll || !media_devnode_is_registered(mdev))
++ return DEFAULT_POLLMASK;
++ return mdev->fops->poll(filp, poll);
++}
++
++static long media_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
++{
++ struct media_devnode *mdev = media_devnode_data(filp);
++ int ret = -ENOTTY;
++
++ /* Allow ioctl to continue even if the device was unregistered.
++ Things like dequeueing buffers might still be useful. */
++ if (mdev->fops->unlocked_ioctl)
++ ret = mdev->fops->unlocked_ioctl(filp, cmd, arg);
++ else if (mdev->fops->ioctl) {
++ lock_kernel();
++ ret = mdev->fops->ioctl(filp, cmd, arg);
++ unlock_kernel();
++ }
++
++ return ret;
++}
++
++#ifdef CONFIG_MMU
++#define media_get_unmapped_area NULL
++#else
++static unsigned long media_get_unmapped_area(struct file *filp,
++ unsigned long addr, unsigned long len, unsigned long pgoff,
++ unsigned long flags)
++{
++ struct media_devnode *mdev = media_devnode_data(filp);
++
++ if (!mdev->fops->get_unmapped_area)
++ return -ENOSYS;
++ if (!media_devnode_is_registered(mdev))
++ return -ENODEV;
++ return mdev->fops->get_unmapped_area(filp, addr, len, pgoff, flags);
++}
++#endif
++
++static int media_mmap(struct file *filp, struct vm_area_struct *vm)
++{
++ struct media_devnode *mdev = media_devnode_data(filp);
++
++ if (!mdev->fops->mmap || !media_devnode_is_registered(mdev))
++ return -ENODEV;
++ return mdev->fops->mmap(filp, vm);
++}
++
++/* Override for the open function */
++static int media_open(struct inode *inode, struct file *filp)
++{
++ struct media_devnode *mdev;
++ int ret = 0;
++
++ /* Check if the media device is available */
++ mutex_lock(&media_devnode_lock);
++ mdev = media_devnode_data(filp);
++ /* return ENODEV if the media device has been removed
++ already or if it is not registered anymore. */
++ if (mdev == NULL || !media_devnode_is_registered(mdev)) {
++ mutex_unlock(&media_devnode_lock);
++ return -ENODEV;
++ }
++ /* and increase the device refcount */
++ media_get(mdev);
++ mutex_unlock(&media_devnode_lock);
++ if (mdev->fops->open)
++ ret = mdev->fops->open(filp);
++
++ /* decrease the refcount in case of an error */
++ if (ret)
++ media_put(mdev);
++ return ret;
++}
++
++/* Override for the release function */
++static int media_release(struct inode *inode, struct file *filp)
++{
++ struct media_devnode *mdev = media_devnode_data(filp);
++ int ret = 0;
++
++ if (mdev->fops->release)
++ mdev->fops->release(filp);
++
++ /* decrease the refcount unconditionally since the release()
++ return value is ignored. */
++ media_put(mdev);
++ return ret;
++}
++
++static const struct file_operations media_devnode_fops = {
++ .owner = THIS_MODULE,
++ .read = media_read,
++ .write = media_write,
++ .open = media_open,
++ .get_unmapped_area = media_get_unmapped_area,
++ .mmap = media_mmap,
++ .unlocked_ioctl = media_ioctl,
++#ifdef CONFIG_COMPAT
++/* .compat_ioctl = media_compat_ioctl32, */
++#endif
++ .release = media_release,
++ .poll = media_poll,
++ .llseek = no_llseek,
++};
++
++/**
++ * media_devnode_register - register a media device node
++ * @mdev: media device node structure we want to register
++ * @type: type of device node to register
++ *
++ * The registration code assigns minor numbers and device node numbers based
++ * on the requested type and registers the new device node with the kernel. An
++ * error is returned if no free minor or device node number could be found, or
++ * if the registration of the device node failed.
++ *
++ * Zero is returned on success.
++ *
++ * Note that if the media_devnode_register call fails, the release() callback of
++ * the media_devnode structure is *not* called, so the caller is responsible for
++ * freeing any data.
++ *
++ * Valid types are
++ *
++ * %MEDIA_TYPE_DEVICE - A media device
++ */
++int __must_check media_devnode_register(struct media_devnode *mdev, int type)
++{
++ const char *name_base;
++ int minor_offset = 0;
++ int minor_cnt = MEDIA_NUM_DEVICES;
++ void *priv;
++ int ret;
++ int nr;
++ int i;
++
++ /* Part 1: check device type. */
++ name_base = media_devnode_type_name(type);
++ if (name_base == NULL) {
++ printk(KERN_ERR "%s called with unknown type: %d\n",
++ __func__, type);
++ return -EINVAL;
++ }
++
++ mdev->type = type;
++ mdev->cdev = NULL;
++
++ /* Part 2: find a free minor and device node number. */
++
++ /* Pick a device node number */
++ mutex_lock(&media_devnode_lock);
++ nr = devnode_find(mdev, 0, minor_cnt);
++ if (nr == minor_cnt) {
++ printk(KERN_ERR "could not get a free device node number\n");
++ mutex_unlock(&media_devnode_lock);
++ return -ENFILE;
++ }
++
++ /* The device node number and minor numbers are independent, so we just
++ * find the first free minor number.
++ */
++ for (i = 0; i < MEDIA_NUM_DEVICES; i++)
++ if (media_devnodes[i] == NULL)
++ break;
++ if (i == MEDIA_NUM_DEVICES) {
++ mutex_unlock(&media_devnode_lock);
++ printk(KERN_ERR "could not get a free minor\n");
++ return -ENFILE;
++ }
++
++ mdev->minor = i + minor_offset;
++ mdev->num = nr;
++ devnode_set(mdev);
++
++ /* Should not happen since we thought this minor was free */
++ WARN_ON(media_devnodes[mdev->minor] != NULL);
++ mutex_unlock(&media_devnode_lock);
++
++ /* Part 3: Initialize the character device */
++ mdev->cdev = cdev_alloc();
++ if (mdev->cdev == NULL) {
++ ret = -ENOMEM;
++ goto cleanup;
++ }
++ mdev->cdev->ops = &media_devnode_fops;
++ mdev->cdev->owner = mdev->fops->owner;
++ ret = cdev_add(mdev->cdev, MKDEV(MAJOR(media_dev_t), mdev->minor), 1);
++ if (ret < 0) {
++ printk(KERN_ERR "%s: cdev_add failed\n", __func__);
++ kfree(mdev->cdev);
++ mdev->cdev = NULL;
++ goto cleanup;
++ }
++
++ /* Part 4: register the device with sysfs
++ *
++ * Zeroing struct device will clear the device's drvdata, so make a
++ * copy and put it back.
++ * */
++ priv = dev_get_drvdata(&mdev->dev);
++ memset(&mdev->dev, 0, sizeof(mdev->dev));
++ dev_set_drvdata(&mdev->dev, priv);
++ mdev->dev.class = &media_class;
++ mdev->dev.devt = MKDEV(MAJOR(media_dev_t), mdev->minor);
++ if (mdev->parent)
++ mdev->dev.parent = mdev->parent;
++ dev_set_name(&mdev->dev, "%s%d", name_base, mdev->num);
++ ret = device_register(&mdev->dev);
++ if (ret < 0) {
++ printk(KERN_ERR "%s: device_register failed\n", __func__);
++ goto cleanup;
++ }
++ /* Register the release callback that will be called when the last
++ reference to the device goes away. */
++ mdev->dev.release = media_devnode_release;
++
++ /* Part 5: Activate this minor. The char device can now be used. */
++ set_bit(MEDIA_FLAG_REGISTERED, &mdev->flags);
++ mutex_lock(&media_devnode_lock);
++ media_devnodes[mdev->minor] = mdev;
++ mutex_unlock(&media_devnode_lock);
++ return 0;
++
++cleanup:
++ mutex_lock(&media_devnode_lock);
++ if (mdev->cdev)
++ cdev_del(mdev->cdev);
++ devnode_clear(mdev);
++ mutex_unlock(&media_devnode_lock);
++ return ret;
++}
++
++/**
++ * media_devnode_unregister - unregister a media device node
++ * @mdev: the device node to unregister
++ *
++ * This unregisters the passed device. Future open calls will be met with
++ * errors.
++ *
++ * This function can safely be called if the device node has never been
++ * registered or has already been unregistered.
++ */
++void media_devnode_unregister(struct media_devnode *mdev)
++{
++ /* Check if mdev was ever registered at all */
++ if (!media_devnode_is_registered(mdev))
++ return;
++
++ mutex_lock(&media_devnode_lock);
++ clear_bit(MEDIA_FLAG_REGISTERED, &mdev->flags);
++ mutex_unlock(&media_devnode_lock);
++ device_unregister(&mdev->dev);
++}
++
++const char *media_devnode_type_name(int type)
++{
++ switch (type) {
++ case MEDIA_TYPE_DEVICE:
++ return "media";
++ default:
++ return NULL;
++ }
++}
++
++/*
++ * Initialise media for linux
++ */
++static int __init media_devnode_init(void)
++{
++ int ret;
++
++ printk(KERN_INFO "Linux media interface: v0.10\n");
++ ret = alloc_chrdev_region(&media_dev_t, 0, MEDIA_NUM_DEVICES,
++ MEDIA_NAME);
++ if (ret < 0) {
++ printk(KERN_WARNING "media: unable to allcoate major\n");
++ return ret;
++ }
++
++ ret = class_register(&media_class);
++ if (ret < 0) {
++ unregister_chrdev_region(media_dev_t, MEDIA_NUM_DEVICES);
++ printk(KERN_WARNING "media: class_register failed\n");
++ return -EIO;
++ }
++
++ return 0;
++}
++
++static void __exit media_devnode_exit(void)
++{
++ class_unregister(&media_class);
++ unregister_chrdev_region(media_dev_t, MEDIA_NUM_DEVICES);
++}
++
++module_init(media_devnode_init)
++module_exit(media_devnode_exit)
++
++MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
++MODULE_DESCRIPTION("Device node registration for media drivers");
++MODULE_LICENSE("GPL");
++
+diff --git a/drivers/media/media-entity.c b/drivers/media/media-entity.c
+new file mode 100644
+index 0000000..8742d60
+--- /dev/null
++++ b/drivers/media/media-entity.c
+@@ -0,0 +1,685 @@
++/*
++ * Media Entity support
++ *
++ * Copyright (C) 2009 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ */
++
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <media/media-entity.h>
++#include <media/media-device.h>
++
++/**
++ * media_entity_init - Initialize a media entity
++ *
++ * @num_pads: Total number of input and output pads.
++ * @extra_links: Initial estimate of the number of extra links.
++ * @pads: Array of 'num_pads' pads.
++ *
++ * The total number of pads is an intrinsic property of entities known by the
++ * entity driver, while the total number of links depends on hardware design
++ * and is an extrinsic property unknown to the entity driver. However, in most
++ * use cases the entity driver can guess the number of links which can safely
++ * be assumed to be equal to or larger than the number of pads.
++ *
++ * For those reasons the links array can be preallocated based on the entity
++ * driver guess and will be reallocated later if extra links need to be
++ * created.
++ *
++ * This function allocates a links array with enough space to hold at least
++ * 'num_pads' + 'extra_links' elements. The media_entity::max_links field will
++ * be set to the number of allocated elements.
++ *
++ * The pads array is managed by the entity driver and passed to
++ * media_entity_init() where its pointer will be stored in the entity structure.
++ */
++int
++media_entity_init(struct media_entity *entity, u8 num_pads,
++ struct media_entity_pad *pads, u8 extra_links)
++{
++ struct media_entity_link *links;
++ unsigned int max_links = num_pads + extra_links;
++ unsigned int i;
++
++ links = kzalloc(max_links * sizeof(links[0]), GFP_KERNEL);
++ if (links == NULL)
++ return -ENOMEM;
++
++ entity->max_links = max_links;
++ entity->num_links = 0;
++ entity->num_backlinks = 0;
++ entity->num_pads = num_pads;
++ entity->pads = pads;
++ entity->links = links;
++
++ for (i = 0; i < num_pads; i++) {
++ pads[i].entity = entity;
++ pads[i].index = i;
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL(media_entity_init);
++
++void
++media_entity_cleanup(struct media_entity *entity)
++{
++ kfree(entity->links);
++}
++EXPORT_SYMBOL(media_entity_cleanup);
++
++/* -----------------------------------------------------------------------------
++ * Graph traversal
++ */
++
++static struct media_entity *media_entity_other(struct media_entity *entity,
++ struct media_entity_link *link)
++{
++ if (link->source->entity == entity)
++ return link->sink->entity;
++ else
++ return link->source->entity;
++}
++
++/* push an entity to traversal stack */
++static void stack_push(struct media_entity_graph *graph,
++ struct media_entity *entity)
++{
++ if (graph->top == MEDIA_ENTITY_ENUM_MAX_DEPTH - 1) {
++ WARN_ON(1);
++ return;
++ }
++ graph->top++;
++ graph->stack[graph->top].link = 0;
++ graph->stack[graph->top].entity = entity;
++}
++
++static struct media_entity *stack_pop(struct media_entity_graph *graph)
++{
++ struct media_entity *entity;
++
++ entity = graph->stack[graph->top].entity;
++ graph->top--;
++
++ return entity;
++}
++
++#define stack_peek(en) ((en)->stack[(en)->top - 1].entity)
++#define link_top(en) ((en)->stack[(en)->top].link)
++#define stack_top(en) ((en)->stack[(en)->top].entity)
++
++/**
++ * media_entity_graph_walk_start - Start walking the media graph at a given entity
++ * @graph: Media graph structure that will be used to walk the graph
++ * @entity: Starting entity
++ *
++ * This function initializes the graph traversal structure to walk the entities
++ * graph starting at the given entity. The traversal structure must not be
++ * modified by the caller during graph traversal. When done the structure can
++ * safely be freed.
++ */
++void media_entity_graph_walk_start(struct media_entity_graph *graph,
++ struct media_entity *entity)
++{
++ graph->top = 0;
++ graph->stack[graph->top].entity = NULL;
++ stack_push(graph, entity);
++}
++EXPORT_SYMBOL_GPL(media_entity_graph_walk_start);
++
++/**
++ * media_entity_graph_walk_next - Get the next entity in the graph
++ * @graph: Media graph structure
++ *
++ * Perform a depth-first traversal of the given media entities graph.
++ *
++ * The graph structure must have been previously initialized with a call to
++ * media_entity_graph_walk_start().
++ *
++ * Return the next entity in the graph or NULL if the whole graph have been
++ * traversed.
++ */
++struct media_entity *
++media_entity_graph_walk_next(struct media_entity_graph *graph)
++{
++ if (stack_top(graph) == NULL)
++ return NULL;
++
++ /*
++ * Depth first search. Push entity to stack and continue from
++ * top of the stack until no more entities on the level can be
++ * found.
++ */
++ while (link_top(graph) < stack_top(graph)->num_links) {
++ struct media_entity *entity = stack_top(graph);
++ struct media_entity_link *link =
++ &entity->links[link_top(graph)];
++ struct media_entity *next;
++
++ /* The link is not active so we do not follow. */
++ if (!(link->flags & MEDIA_LINK_FLAG_ACTIVE)) {
++ link_top(graph)++;
++ continue;
++ }
++
++ /* Get the entity in the other end of the link . */
++ next = media_entity_other(entity, link);
++
++ /* Was it the entity we came here from? */
++ if (next == stack_peek(graph)) {
++ link_top(graph)++;
++ continue;
++ }
++
++ /* Push the new entity to stack and start over. */
++ link_top(graph)++;
++ stack_push(graph, next);
++ }
++
++ return stack_pop(graph);
++}
++EXPORT_SYMBOL_GPL(media_entity_graph_walk_next);
++
++/**
++ * media_entity_graph_lock - Lock all entities in a graph
++ * @entity: Starting entity
++ * @pipe: Media pipeline to be assigned to all entities in the graph.
++ *
++ * Lock all entities connected to a given entity through active links, either
++ * directly or indirectly. The given pipeline is assigned to every entity in
++ * the graph and stored in the media_entity pipe field.
++ *
++ * Calls to this function can be nested, in which case the same number of
++ * media_entity_graph_unlock() calls will be required to unlock the graph. The
++ * pipeline pointer must be identical for all nested calls to
++ * media_entity_graph_lock().
++ */
++void media_entity_graph_lock(struct media_entity *entity,
++ struct media_pipeline *pipe)
++{
++ struct media_device *mdev = entity->parent;
++ struct media_entity_graph graph;
++
++ mutex_lock(&mdev->graph_mutex);
++
++ media_entity_graph_walk_start(&graph, entity);
++
++ while ((entity = media_entity_graph_walk_next(&graph))) {
++ entity->lock_count++;
++ WARN_ON(entity->pipe && entity->pipe != pipe);
++ entity->pipe = pipe;
++ }
++
++ mutex_unlock(&mdev->graph_mutex);
++}
++EXPORT_SYMBOL_GPL(media_entity_graph_lock);
++
++/**
++ * media_entity_graph_unlock - Unlock all entities in a graph
++ * @entity: Starting entity
++ *
++ * Unlock all entities connected to a given entity through active links, either
++ * directly or indirectly. The media_entity pipe field is reset to NULL on the
++ * last nested unlock call.
++ */
++void media_entity_graph_unlock(struct media_entity *entity)
++{
++ struct media_device *mdev = entity->parent;
++ struct media_entity_graph graph;
++
++ mutex_lock(&mdev->graph_mutex);
++
++ media_entity_graph_walk_start(&graph, entity);
++
++ while ((entity = media_entity_graph_walk_next(&graph))) {
++ entity->lock_count--;
++ if (entity->lock_count == 0)
++ entity->pipe = NULL;
++ }
++
++ mutex_unlock(&mdev->graph_mutex);
++}
++EXPORT_SYMBOL_GPL(media_entity_graph_unlock);
++
++/* -----------------------------------------------------------------------------
++ * Power state handling
++ */
++
++/*
++ * Return power count of nodes directly or indirectly connected to
++ * a given entity.
++ */
++static int media_entity_count_node(struct media_entity *entity)
++{
++ struct media_entity_graph graph;
++ int use = 0;
++
++ media_entity_graph_walk_start(&graph, entity);
++
++ while ((entity = media_entity_graph_walk_next(&graph))) {
++ if (entity->type == MEDIA_ENTITY_TYPE_NODE)
++ use += entity->use_count;
++ }
++
++ return use;
++}
++
++/* Apply use count to an entity. */
++static void media_entity_use_apply_one(struct media_entity *entity, int change)
++{
++ entity->use_count += change;
++ WARN_ON(entity->use_count < 0);
++}
++
++/*
++ * Apply use count change to an entity and change power state based on
++ * new use count.
++ */
++static int media_entity_power_apply_one(struct media_entity *entity, int change)
++{
++ int ret = 0;
++
++ if (entity->use_count == 0 && change > 0 &&
++ entity->ops && entity->ops->set_power) {
++ ret = entity->ops->set_power(entity, 1);
++ if (ret)
++ return ret;
++ }
++
++ media_entity_use_apply_one(entity, change);
++
++ if (entity->use_count == 0 && change < 0 &&
++ entity->ops && entity->ops->set_power)
++ ret = entity->ops->set_power(entity, 0);
++
++ return ret;
++}
++
++/*
++ * Apply power change to all connected entities. This ignores the
++ * nodes.
++ */
++static int media_entity_power_apply(struct media_entity *entity, int change)
++{
++ struct media_entity_graph graph;
++ struct media_entity *first = entity;
++ int ret = 0;
++
++ if (!change)
++ return 0;
++
++ media_entity_graph_walk_start(&graph, entity);
++
++ while (!ret && (entity = media_entity_graph_walk_next(&graph)))
++ if (entity->type != MEDIA_ENTITY_TYPE_NODE)
++ ret = media_entity_power_apply_one(entity, change);
++
++ if (!ret)
++ return 0;
++
++ media_entity_graph_walk_start(&graph, first);
++
++ while ((first = media_entity_graph_walk_next(&graph))
++ && first != entity)
++ if (first->type != MEDIA_ENTITY_TYPE_NODE)
++ media_entity_power_apply_one(first, -change);
++
++ return ret;
++}
++
++/* Apply the power state changes when connecting two entities. */
++static int media_entity_power_connect(struct media_entity *one,
++ struct media_entity *theother)
++{
++ int power_one = media_entity_count_node(one);
++ int power_theother = media_entity_count_node(theother);
++ int ret = 0;
++
++ ret = media_entity_power_apply(one, power_theother);
++ if (ret < 0)
++ return ret;
++
++ return media_entity_power_apply(theother, power_one);
++}
++
++static void media_entity_power_disconnect(struct media_entity *one,
++ struct media_entity *theother)
++{
++ int power_one = media_entity_count_node(one);
++ int power_theother = media_entity_count_node(theother);
++
++ media_entity_power_apply(one, -power_theother);
++ media_entity_power_apply(theother, -power_one);
++}
++
++/*
++ * Apply use count change to graph and change power state of entities
++ * accordingly.
++ */
++static int media_entity_node_power_change(struct media_entity *entity,
++ int change)
++{
++ /* Apply use count to node. */
++ media_entity_use_apply_one(entity, change);
++
++ /* Apply power change to connected non-nodes. */
++ return media_entity_power_apply(entity, change);
++}
++
++/*
++ * Node entity use changes are reflected on power state of all
++ * connected (directly or indirectly) entities whereas non-node entity
++ * use count changes are limited to that very entity.
++ */
++static int media_entity_use_change(struct media_entity *entity, int change)
++{
++ if (entity->type == MEDIA_ENTITY_TYPE_NODE)
++ return media_entity_node_power_change(entity, change);
++ else
++ return media_entity_power_apply_one(entity, change);
++}
++
++/* user open()s media entity */
++static struct media_entity *__media_entity_get(struct media_entity *entity)
++{
++ if (media_entity_use_change(entity, 1))
++ return NULL;
++
++ return entity;
++}
++
++/* user release()s media entity */
++static void __media_entity_put(struct media_entity *entity)
++{
++ media_entity_use_change(entity, -1);
++}
++
++/* user open()s media entity */
++struct media_entity *media_entity_get(struct media_entity *entity)
++{
++ struct media_entity *e;
++
++ if (entity == NULL)
++ return NULL;
++
++ if (entity->parent->dev &&
++ !try_module_get(entity->parent->dev->driver->owner))
++ return NULL;
++
++ mutex_lock(&entity->parent->graph_mutex);
++ e = __media_entity_get(entity);
++ mutex_unlock(&entity->parent->graph_mutex);
++
++ if (e == NULL && entity->parent->dev)
++ module_put(entity->parent->dev->driver->owner);
++
++ return e;
++}
++EXPORT_SYMBOL_GPL(media_entity_get);
++
++/* user release()s media entity */
++void media_entity_put(struct media_entity *entity)
++{
++ if (entity == NULL)
++ return;
++
++ mutex_lock(&entity->parent->graph_mutex);
++ __media_entity_put(entity);
++ mutex_unlock(&entity->parent->graph_mutex);
++
++ if (entity->parent->dev)
++ module_put(entity->parent->dev->driver->owner);
++}
++EXPORT_SYMBOL_GPL(media_entity_put);
++
++/* -----------------------------------------------------------------------------
++ * Links management
++ */
++
++static struct
++media_entity_link *media_entity_add_link(struct media_entity *entity)
++{
++ if (entity->num_links >= entity->max_links) {
++ struct media_entity_link *links = entity->links;
++ unsigned int max_links = entity->max_links + 2;
++ unsigned int i;
++
++ links = krealloc(links, max_links * sizeof(*links), GFP_KERNEL);
++ if (links == NULL)
++ return NULL;
++
++ for (i = 0; i < entity->num_links; i++)
++ links[i].other->other = &links[i];
++
++ entity->max_links = max_links;
++ entity->links = links;
++ }
++
++ return &entity->links[entity->num_links++];
++}
++
++int
++media_entity_create_link(struct media_entity *source, u8 source_pad,
++ struct media_entity *sink, u8 sink_pad, u32 flags)
++{
++ struct media_entity_link *link;
++ struct media_entity_link *backlink;
++
++ BUG_ON(source == NULL || sink == NULL);
++ BUG_ON(source_pad >= source->num_pads);
++ BUG_ON(sink_pad >= sink->num_pads);
++
++ link = media_entity_add_link(source);
++ if (link == NULL)
++ return -ENOMEM;
++
++ link->source = &source->pads[source_pad];
++ link->sink = &sink->pads[sink_pad];
++ link->flags = flags;
++
++ /* Create the backlink. Backlinks are used to help graph traversal and
++ * are not reported to userspace.
++ */
++ backlink = media_entity_add_link(sink);
++ if (backlink == NULL) {
++ source->num_links--;
++ return -ENOMEM;
++ }
++
++ backlink->source = &source->pads[source_pad];
++ backlink->sink = &sink->pads[sink_pad];
++ backlink->flags = flags;
++
++ link->other = backlink;
++ backlink->other = link;
++
++ sink->num_backlinks++;
++
++ return 0;
++}
++EXPORT_SYMBOL(media_entity_create_link);
++
++static int __media_entity_setup_link_notify(struct media_entity_link *link,
++ u32 flags)
++{
++ const u32 mask = MEDIA_LINK_FLAG_ACTIVE;
++ int ret;
++
++ /* Notify both entities. */
++ ret = media_entity_call(link->source->entity, link_setup,
++ link->source, link->sink, flags);
++ if (ret < 0 && ret != -ENOIOCTLCMD)
++ return ret;
++
++ ret = media_entity_call(link->sink->entity, link_setup,
++ link->sink, link->source, flags);
++ if (ret < 0 && ret != -ENOIOCTLCMD) {
++ media_entity_call(link->source->entity, link_setup,
++ link->source, link->sink, link->flags);
++ return ret;
++ }
++
++ link->flags = (link->flags & ~mask) | (flags & mask);
++ link->other->flags = link->flags;
++
++ return 0;
++}
++
++/**
++ * __media_entity_setup_link - Configure a media link
++ * @link: The link being configured
++ * @flags: Link configuration flags
++ *
++ * The bulk of link setup is handled by the two entities connected through the
++ * link. This function notifies both entities of the link configuration change.
++ *
++ * If the link is immutable or if the current and new configuration are
++ * identical, return immediately.
++ *
++ * The user is expected to hold link->source->parent->mutex. If not,
++ * media_entity_setup_link() should be used instead.
++ */
++int
++__media_entity_setup_link(struct media_entity_link *link, u32 flags)
++{
++ struct media_entity *source, *sink;
++ int ret = -EBUSY;
++
++ if (link == NULL)
++ return -EINVAL;
++
++ if (link->flags & MEDIA_LINK_FLAG_IMMUTABLE)
++ return link->flags == flags ? 0 : -EINVAL;
++
++ if (link->flags == flags)
++ return 0;
++
++ if (link->source->entity->lock_count || link->sink->entity->lock_count)
++ return -EBUSY;
++
++ source = __media_entity_get(link->source->entity);
++ if (!source)
++ return ret;
++
++ sink = __media_entity_get(link->sink->entity);
++ if (!sink)
++ goto err___media_entity_get;
++
++ if (flags & MEDIA_LINK_FLAG_ACTIVE) {
++ ret = media_entity_power_connect(source, sink);
++ if (ret < 0)
++ goto err_media_entity_power_connect;
++ }
++
++ ret = __media_entity_setup_link_notify(link, flags);
++ if (ret < 0)
++ goto err___media_entity_setup_link_notify;
++
++ if (!(flags & MEDIA_LINK_FLAG_ACTIVE))
++ media_entity_power_disconnect(source, sink);
++
++ __media_entity_put(sink);
++ __media_entity_put(source);
++
++ return 0;
++
++err___media_entity_setup_link_notify:
++ if (flags & MEDIA_LINK_FLAG_ACTIVE)
++ media_entity_power_disconnect(source, sink);
++err_media_entity_power_connect:
++ __media_entity_put(sink);
++err___media_entity_get:
++ __media_entity_put(source);
++
++ return ret;
++}
++
++int media_entity_setup_link(struct media_entity_link *link, u32 flags)
++{
++ int ret;
++
++ mutex_lock(&link->source->entity->parent->graph_mutex);
++ ret = __media_entity_setup_link(link, flags);
++ mutex_unlock(&link->source->entity->parent->graph_mutex);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(media_entity_setup_link);
++
++/**
++ * media_entity_find_link - Find a link between two pads
++ * @source: Source pad
++ * @sink: Sink pad
++ *
++ * Return a pointer to the link between the two entities. If no such link
++ * exists, return NULL.
++ */
++struct media_entity_link *
++media_entity_find_link(struct media_entity_pad *source,
++ struct media_entity_pad *sink)
++{
++ struct media_entity_link *link;
++ unsigned int i;
++
++ for (i = 0; i < source->entity->num_links; ++i) {
++ link = &source->entity->links[i];
++
++ if (link->source->entity == source->entity &&
++ link->source->index == source->index &&
++ link->sink->entity == sink->entity &&
++ link->sink->index == sink->index)
++ return link;
++ }
++
++ return NULL;
++}
++EXPORT_SYMBOL_GPL(media_entity_find_link);
++
++/**
++ * media_entity_remote_pad - Locate the pad at the remote end of a link
++ * @entity: Local entity
++ * @pad: Pad at the local end of the link
++ *
++ * Search for a remote pad connected to the given pad by iterating over all
++ * links originating or terminating at that pad until an active link is found.
++ *
++ * Return a pointer to the pad at the remote end of the first found active link,
++ * or NULL if no active link has been found.
++ */
++struct media_entity_pad *
++media_entity_remote_pad(struct media_entity_pad *pad)
++{
++ unsigned int i;
++
++ for (i = 0; i < pad->entity->num_links; i++) {
++ struct media_entity_link *link = &pad->entity->links[i];
++
++ if (!(link->flags & MEDIA_LINK_FLAG_ACTIVE))
++ continue;
++
++ if (link->source == pad)
++ return link->sink;
++
++ if (link->sink == pad)
++ return link->source;
++ }
++
++ return NULL;
++
++}
++EXPORT_SYMBOL_GPL(media_entity_remote_pad);
++
+diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
+index bdbc9d3..1cd06a2 100644
+--- a/drivers/media/video/Kconfig
++++ b/drivers/media/video/Kconfig
+@@ -336,6 +336,28 @@ config VIDEO_TCM825X
+ This is a driver for the Toshiba TCM825x VGA camera sensor.
+ It is used for example in Nokia N800.
+
++config VIDEO_ET8EK8
++ tristate "ET8EK8 camera sensor support"
++ depends on I2C && VIDEO_V4L2
++ select VIDEO_SMIAREGS
++ ---help---
++ This is a driver for the Toshiba ET8EK8 5 MP camera sensor.
++ It is used for example in Nokia RX51.
++
++config VIDEO_AD5820
++ tristate "AD5820 lens voice coil support"
++ depends on I2C && VIDEO_V4L2
++ ---help---
++ This is a driver for the AD5820 camera lens voice coil.
++ It is used for example in Nokia RX51.
++
++config VIDEO_ADP1653
++ tristate "ADP1653 flash support"
++ depends on I2C && VIDEO_V4L2
++ ---help---
++ This is a driver for the ADP1653 flash. It is used for
++ example in Nokia RX51.
++
+ config VIDEO_SAA7110
+ tristate "Philips SAA7110 video decoder"
+ depends on VIDEO_V4L2 && I2C
+@@ -410,6 +432,15 @@ config VIDEO_VPX3220
+ To compile this driver as a module, choose M here: the
+ module will be called vpx3220.
+
++config VIDEO_SMIA_SENSOR
++ tristate "Generic SMIA-compatible camera sensor support"
++ depends on I2C && VIDEO_V4L2
++ select VIDEO_SMIAREGS
++ ---help---
++ This is a generic driver for SMIA-compatible camera sensors.
++ It works at least with ST VS6555 and Toshiba TCM8330MD
++ VGA camera sensors.
++
+ comment "Video and audio decoders"
+
+ source "drivers/media/video/cx25840/Kconfig"
+@@ -853,6 +884,29 @@ config VIDEO_CAFE_CCIC
+ CMOS camera controller. This is the controller found on first-
+ generation OLPC systems.
+
++config VIDEO_OMAP3
++ tristate "OMAP 3 Camera support"
++ select OMAP_IOMMU
++ depends on VIDEO_V4L2 && ARCH_OMAP3430
++ ---help---
++ Driver for an OMAP 3 camera controller.
++
++config VIDEO_OMAP3_DEBUG
++ bool "OMAP 3 Camera debug messages"
++ depends on VIDEO_OMAP3
++ ---help---
++ Enable debug messages on OMAP 3 camera controller driver.
++
++config VIDEO_SMIAREGS
++ tristate "Generic SMIA I2C register access and register list helper"
++ depends on I2C
++ ---help---
++ This allows writing and reading SMIA image sensors' I2C registers
++ easily.
++
++ Also a few helper functions are provided to work with binary
++ register lists.
++
+ config SOC_CAMERA
+ tristate "SoC camera support"
+ depends on VIDEO_V4L2 && HAS_DMA && I2C
+diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
+index cc93859..9c02678 100644
+--- a/drivers/media/video/Makefile
++++ b/drivers/media/video/Makefile
+@@ -11,7 +11,7 @@ stkwebcam-objs := stk-webcam.o stk-sensor.o
+ omap2cam-objs := omap24xxcam.o omap24xxcam-dma.o
+
+ videodev-objs := v4l2-dev.o v4l2-ioctl.o v4l2-device.o v4l2-fh.o \
+- v4l2-event.o
++ v4l2-event.o v4l2-subdev.o
+
+ # V4L2 core modules
+
+@@ -49,6 +49,7 @@ obj-$(CONFIG_VIDEO_ADV7175) += adv7175.o
+ obj-$(CONFIG_VIDEO_ADV7180) += adv7180.o
+ obj-$(CONFIG_VIDEO_ADV7343) += adv7343.o
+ obj-$(CONFIG_VIDEO_VPX3220) += vpx3220.o
++obj-$(CONFIG_VIDEO_SMIA_SENSOR) += smia-sensor.o
+ obj-$(CONFIG_VIDEO_BT819) += bt819.o
+ obj-$(CONFIG_VIDEO_BT856) += bt856.o
+ obj-$(CONFIG_VIDEO_BT866) += bt866.o
+@@ -126,6 +127,14 @@ obj-$(CONFIG_VIDEO_CX2341X) += cx2341x.o
+
+ obj-$(CONFIG_VIDEO_CAFE_CCIC) += cafe_ccic.o
+
++obj-$(CONFIG_VIDEO_OMAP3) += isp/
++
++obj-$(CONFIG_VIDEO_SMIAREGS) += smiaregs.o
++
++obj-$(CONFIG_VIDEO_ET8EK8) += et8ek8.o
++obj-$(CONFIG_VIDEO_AD5820) += ad5820.o
++obj-$(CONFIG_VIDEO_ADP1653) += adp1653.o
++
+ obj-$(CONFIG_USB_DABUSB) += dabusb.o
+ obj-$(CONFIG_USB_OV511) += ov511.o
+ obj-$(CONFIG_USB_SE401) += se401.o
+diff --git a/drivers/media/video/ad5820.c b/drivers/media/video/ad5820.c
+new file mode 100644
+index 0000000..4b4cbc9
+--- /dev/null
++++ b/drivers/media/video/ad5820.c
+@@ -0,0 +1,485 @@
++/*
++ * drivers/media/video/ad5820.c
++ *
++ * AD5820 DAC driver for camera voice coil focus.
++ *
++ * Copyright (C) 2008 Nokia Corporation
++ * Copyright (C) 2007 Texas Instruments
++ *
++ * Contact: Tuukka Toivonen <tuukka.o.toivonen@nokia.com>
++ * Sakari Ailus <sakari.ailus@nokia.com>
++ *
++ * Based on af_d88.c by Texas Instruments.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ */
++
++#include <linux/module.h>
++#include <linux/errno.h>
++#include <linux/i2c.h>
++#include <linux/slab.h>
++#include <linux/sched.h>
++#include <linux/delay.h>
++#include <linux/bitops.h>
++#include <linux/kernel.h>
++
++#include <mach/io.h>
++#include <mach/gpio.h>
++
++#include <media/ad5820.h>
++#include <media/v4l2-chip-ident.h>
++#include <media/v4l2-device.h>
++
++#include <media/smiaregs.h>
++
++#define CODE_TO_RAMP_US(s) ((s) == 0 ? 0 : (1 << ((s) - 1)) * 50)
++#define RAMP_US_TO_CODE(c) fls(((c) + ((c)>>1)) / 50)
++
++#define CTRL_FOCUS_ABSOLUTE 0
++#define CTRL_FOCUS_RAMP_TIME 1
++#define CTRL_FOCUS_RAMP_MODE 2
++
++static struct v4l2_queryctrl ad5820_ctrls[] = {
++ /* Minimum current is 0 mA, maximum is 100 mA. Thus,
++ * 1 code is equivalent to 100/1023 = 0.0978 mA.
++ * Nevertheless, we do not use [mA] for focus position,
++ * because it is meaningless for user. Meaningful would
++ * be to use focus distance or even its inverse, but
++ * since the driver doesn't have sufficiently knowledge
++ * to do the conversion, we will just use abstract codes here.
++ * In any case, smaller value = focus position farther from camera.
++ * The default zero value means focus at infinity,
++ * and also least current consumption.
++ */
++ {
++ .id = V4L2_CID_FOCUS_ABSOLUTE,
++ .type = V4L2_CTRL_TYPE_INTEGER,
++ .name = "Focus, Absolute",
++ .minimum = 0,
++ .maximum = 1023,
++ .step = 1,
++ .default_value = 0,
++ .flags = 0,
++ },
++ {
++ .id = V4L2_CID_FOCUS_AD5820_RAMP_TIME,
++ .type = V4L2_CTRL_TYPE_INTEGER,
++ .name = "Focus ramping time [us]",
++ .minimum = 0,
++ .maximum = 3200,
++ .step = 50,
++ .default_value = 0,
++ .flags = 0,
++ },
++ {
++ .id = V4L2_CID_FOCUS_AD5820_RAMP_MODE,
++ .type = V4L2_CTRL_TYPE_MENU,
++ .name = "Focus ramping mode",
++ .minimum = 0,
++ .maximum = 1,
++ .step = 1,
++ .default_value = 0,
++ .flags = 0,
++ },
++};
++
++/**
++ * @brief I2C write using i2c_transfer().
++ * @param coil - the driver data structure
++ * @param data - register value to be written
++ * @returns nonnegative on success, negative if failed
++ */
++static int ad5820_write(struct ad5820_device *coil, u16 data)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(&coil->subdev);
++ struct i2c_msg msg;
++ int r;
++
++ if (!client->adapter)
++ return -ENODEV;
++
++ data = cpu_to_be16(data);
++ msg.addr = client->addr;
++ msg.flags = 0;
++ msg.len = 2;
++ msg.buf = (u8 *)&data;
++
++ r = i2c_transfer(client->adapter, &msg, 1);
++ if (r < 0) {
++ dev_err(&client->dev, "write failed, error %d\n", r);
++ return r;
++ }
++
++ return 0;
++}
++
++/**
++ * @brief I2C read using i2c_transfer().
++ * @param coil - the driver data structure
++ * @returns unsigned 16-bit register value on success, negative if failed
++ */
++static int ad5820_read(struct ad5820_device *coil)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(&coil->subdev);
++ struct i2c_msg msg;
++ int r;
++ u16 data = 0;
++
++ if (!client->adapter)
++ return -ENODEV;
++
++ msg.addr = client->addr;
++ msg.flags = I2C_M_RD;
++ msg.len = 2;
++ msg.buf = (u8 *)&data;
++
++ r = i2c_transfer(client->adapter, &msg, 1);
++ if (r < 0) {
++ dev_err(&client->dev, "read failed, error %d\n", r);
++ return r;
++ }
++
++ return be16_to_cpu(data);
++}
++
++/* Calculate status word and write it to the device based on current
++ * values of V4L2 controls. It is assumed that the stored V4L2 control
++ * values are properly limited and rounded. */
++static int ad5820_update_hw(struct ad5820_device *coil)
++{
++ u16 status;
++
++ if (!coil->power)
++ return 0;
++
++ status = RAMP_US_TO_CODE(coil->focus_ramp_time);
++ status |= coil->focus_ramp_mode
++ ? AD5820_RAMP_MODE_64_16 : AD5820_RAMP_MODE_LINEAR;
++ status |= coil->focus_absolute << AD5820_DAC_SHIFT;
++
++ if (coil->standby)
++ status |= AD5820_POWER_DOWN;
++
++ return ad5820_write(coil, status);
++}
++
++/* --------------------------------------------------------------------------
++ * V4L2 subdev operations
++ */
++static int
++ad5820_get_chip_ident(struct v4l2_subdev *subdev,
++ struct v4l2_dbg_chip_ident *chip)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(subdev);
++
++ return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_AD5820, 0);
++}
++
++static int
++ad5820_set_config(struct v4l2_subdev *subdev, int irq, void *platform_data)
++{
++ static const int CHECK_VALUE = 0x3FF0;
++
++ struct ad5820_device *coil = to_ad5820_device(subdev);
++ struct i2c_client *client = v4l2_get_subdevdata(subdev);
++ u16 status = AD5820_POWER_DOWN | CHECK_VALUE;
++ int rval;
++
++ if (platform_data == NULL)
++ return -ENODEV;
++
++ coil->platform_data = platform_data;
++
++ coil->focus_absolute =
++ ad5820_ctrls[CTRL_FOCUS_ABSOLUTE].default_value;
++ coil->focus_ramp_time =
++ ad5820_ctrls[CTRL_FOCUS_RAMP_TIME].default_value;
++ coil->focus_ramp_mode =
++ ad5820_ctrls[CTRL_FOCUS_RAMP_MODE].default_value;
++
++ /* Detect that the chip is there */
++ rval = coil->platform_data->s_power(subdev, 1);
++ if (rval)
++ goto not_detected;
++ rval = ad5820_write(coil, status);
++ if (rval)
++ goto not_detected;
++ rval = ad5820_read(coil);
++ if (rval != status)
++ goto not_detected;
++
++ coil->platform_data->s_power(subdev, 0);
++ return 0;
++
++not_detected:
++ dev_err(&client->dev, "not detected\n");
++ return -ENODEV;
++}
++
++static int
++ad5820_query_ctrl(struct v4l2_subdev *subdev, struct v4l2_queryctrl *ctrl)
++{
++ return smia_ctrl_query(ad5820_ctrls, ARRAY_SIZE(ad5820_ctrls), ctrl);
++}
++
++static int
++ad5820_query_menu(struct v4l2_subdev *subdev, struct v4l2_querymenu *qm)
++{
++ switch (qm->id) {
++ case V4L2_CID_FOCUS_AD5820_RAMP_MODE:
++ if (qm->index & ~1)
++ return -EINVAL;
++ strcpy(qm->name, qm->index == 0 ? "Linear ramp" : "64/16 ramp");
++ break;
++ default:
++ return -EINVAL;
++ }
++ return 0;
++}
++
++static int
++ad5820_get_ctrl(struct v4l2_subdev *subdev, struct v4l2_control *vc)
++{
++ struct ad5820_device *coil = to_ad5820_device(subdev);
++
++ switch (vc->id) {
++ case V4L2_CID_FOCUS_ABSOLUTE:
++ vc->value = coil->focus_absolute;
++ break;
++ case V4L2_CID_FOCUS_AD5820_RAMP_TIME:
++ vc->value = coil->focus_ramp_time;
++ break;
++ case V4L2_CID_FOCUS_AD5820_RAMP_MODE:
++ vc->value = coil->focus_ramp_mode;
++ break;
++ default:
++ return -EINVAL;
++ }
++ return 0;
++}
++
++static int
++ad5820_set_ctrl(struct v4l2_subdev *subdev, struct v4l2_control *vc)
++{
++ struct ad5820_device *coil = to_ad5820_device(subdev);
++ u32 code;
++ int r = 0;
++
++ switch (vc->id) {
++ case V4L2_CID_FOCUS_ABSOLUTE:
++ coil->focus_absolute = clamp(vc->value,
++ ad5820_ctrls[CTRL_FOCUS_ABSOLUTE].minimum,
++ ad5820_ctrls[CTRL_FOCUS_ABSOLUTE].maximum);
++ r = ad5820_update_hw(coil);
++ break;
++
++ case V4L2_CID_FOCUS_AD5820_RAMP_TIME:
++ code = clamp(vc->value,
++ ad5820_ctrls[CTRL_FOCUS_RAMP_TIME].minimum,
++ ad5820_ctrls[CTRL_FOCUS_RAMP_TIME].maximum);
++ code = RAMP_US_TO_CODE(code);
++ coil->focus_ramp_time = CODE_TO_RAMP_US(code);
++ break;
++
++ case V4L2_CID_FOCUS_AD5820_RAMP_MODE:
++ coil->focus_ramp_mode = clamp(vc->value,
++ ad5820_ctrls[CTRL_FOCUS_RAMP_MODE].minimum,
++ ad5820_ctrls[CTRL_FOCUS_RAMP_MODE].maximum);
++ break;
++
++ default:
++ return -EINVAL;
++ }
++
++ return r;
++}
++
++static int
++ad5820_set_power(struct v4l2_subdev *subdev, int on)
++{
++ struct ad5820_device *coil = to_ad5820_device(subdev);
++ int was_on = coil->power;
++ int ret;
++
++ /* If requesting current state, nothing to be done. */
++ if (coil->power == on)
++ return 0;
++
++ /* If powering off, go to standby first as real power off my be denied
++ * by the hardware (single power line control for both coil and sensor).
++ */
++ if (!on) {
++ coil->standby = 1;
++ ret = ad5820_update_hw(coil);
++ if (ret)
++ goto fail;
++ }
++
++ /* Set the hardware power state. This will turn the power line on or
++ * off.
++ */
++ ret = coil->platform_data->s_power(subdev, on);
++ if (ret)
++ goto fail;
++
++ coil->power = on;
++
++ /* If powering on, restore the hardware settings. */
++ if (on) {
++ coil->standby = 0;
++ ret = ad5820_update_hw(coil);
++ if (ret)
++ goto fail;
++ }
++
++ return 0;
++
++fail:
++ /* Try to restore original state and return error code */
++ coil->power = was_on;
++ coil->standby = !was_on;
++
++ coil->platform_data->s_power(subdev, coil->power);
++ ad5820_update_hw(coil);
++
++ return ret;
++}
++
++static const struct v4l2_subdev_core_ops ad5820_core_ops = {
++ .g_chip_ident = ad5820_get_chip_ident,
++ .s_config = ad5820_set_config,
++ .queryctrl = ad5820_query_ctrl,
++ .querymenu = ad5820_query_menu,
++ .g_ctrl = ad5820_get_ctrl,
++ .s_ctrl = ad5820_set_ctrl,
++ .s_power = ad5820_set_power,
++};
++
++static const struct v4l2_subdev_ops ad5820_ops = {
++ .core = &ad5820_core_ops,
++};
++
++/* --------------------------------------------------------------------------
++ * I2C driver
++ */
++#ifdef CONFIG_PM
++
++static int ad5820_suspend(struct i2c_client *client, pm_message_t mesg)
++{
++ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
++ struct ad5820_device *coil = to_ad5820_device(subdev);
++
++ if (!coil->power)
++ return 0;
++
++ return coil->platform_data->s_power(subdev, 0);
++}
++
++static int ad5820_resume(struct i2c_client *client)
++{
++ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
++ struct ad5820_device *coil = to_ad5820_device(subdev);
++
++ if (!coil->power)
++ return 0;
++
++ coil->power = 0;
++ return ad5820_set_power(subdev, 1);
++}
++
++#else
++
++#define ad5820_suspend NULL
++#define ad5820_resume NULL
++
++#endif /* CONFIG_PM */
++
++static const struct media_entity_operations ad5820_entity_ops = {
++ .set_power = v4l2_subdev_set_power,
++};
++
++static int ad5820_probe(struct i2c_client *client,
++ const struct i2c_device_id *devid)
++{
++ struct ad5820_device *coil;
++ int ret = 0;
++
++ coil = kzalloc(sizeof(*coil), GFP_KERNEL);
++ if (coil == NULL)
++ return -ENOMEM;
++
++ v4l2_i2c_subdev_init(&coil->subdev, client, &ad5820_ops);
++ coil->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
++
++ coil->subdev.entity.ops = &ad5820_entity_ops;
++ ret = media_entity_init(&coil->subdev.entity, 0, NULL, 0);
++ if (ret < 0)
++ kfree(coil);
++
++ return ret;
++}
++
++static int __exit ad5820_remove(struct i2c_client *client)
++{
++ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
++ struct ad5820_device *coil = to_ad5820_device(subdev);
++
++ v4l2_device_unregister_subdev(&coil->subdev);
++ media_entity_cleanup(&coil->subdev.entity);
++ kfree(coil);
++ return 0;
++}
++
++static const struct i2c_device_id ad5820_id_table[] = {
++ { AD5820_NAME, 0 },
++ { }
++};
++MODULE_DEVICE_TABLE(i2c, ad5820_id_table);
++
++static struct i2c_driver ad5820_i2c_driver = {
++ .driver = {
++ .name = AD5820_NAME,
++ },
++ .probe = ad5820_probe,
++ .remove = __exit_p(ad5820_remove),
++ .suspend = ad5820_suspend,
++ .resume = ad5820_resume,
++ .id_table = ad5820_id_table,
++};
++
++static int __init ad5820_init(void)
++{
++ int rval;
++
++ rval = i2c_add_driver(&ad5820_i2c_driver);
++ if (rval)
++ printk(KERN_INFO "%s: failed registering " AD5820_NAME "\n",
++ __func__);
++
++ return rval;
++}
++
++static void __exit ad5820_exit(void)
++{
++ i2c_del_driver(&ad5820_i2c_driver);
++}
++
++
++module_init(ad5820_init);
++module_exit(ad5820_exit);
++
++MODULE_AUTHOR("Tuukka Toivonen <tuukka.o.toivonen@nokia.com>");
++MODULE_DESCRIPTION("AD5820 camera lens driver");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/media/video/adp1653.c b/drivers/media/video/adp1653.c
+new file mode 100644
+index 0000000..7dd2256
+--- /dev/null
++++ b/drivers/media/video/adp1653.c
+@@ -0,0 +1,567 @@
++/*
++ * drivers/media/video/adp1653.c
++ *
++ * Copyright (C) 2008 Nokia Corporation
++ *
++ * Contact: Sakari Ailus <sakari.ailus@nokia.com>
++ * Tuukka Toivonen <tuukka.o.toivonen@nokia.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ *
++ * NOTES:
++ * - Torch and Indicator lights are enabled by just increasing
++ * intensity from zero
++ * - Increasing Flash light intensity does nothing until it is
++ * strobed (strobe control set to 1)
++ * - Strobing flash disables Torch light (sets intensity to zero).
++ * This might be changed later.
++ *
++ * TODO:
++ * - fault interrupt handling
++ * - faster strobe (use i/o pin instead of i2c)
++ * - should ensure that the pin is in some sane state even if not used
++ * - strobe control could return whether flash is still on (measure time)
++ * - power doesn't need to be ON if all lights are off
++ *
++ */
++
++#include <linux/delay.h>
++#include <linux/i2c.h>
++#include <linux/slab.h>
++#include <linux/version.h>
++#include <media/adp1653.h>
++#include <media/smiaregs.h>
++#include <media/v4l2-chip-ident.h>
++#include <media/v4l2-device.h>
++
++#define TIMEOUT_US_TO_CODE(t) ((820000 + 27300 - (t))/54600)
++#define TIMEOUT_CODE_TO_US(c) (820000 - (c) * 54600)
++
++/* Write values into ADP1653 registers. Do nothing if power is off. */
++static int adp1653_update_hw(struct adp1653_flash *flash)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(&flash->subdev);
++ u8 out_sel;
++ u8 config;
++ int rval;
++
++ if (!flash->power)
++ return 0;
++
++ out_sel = flash->indicator_intensity << ADP1653_REG_OUT_SEL_ILED_SHIFT;
++ /* Set torch intensity to zero--prevents false triggering of SC Fault */
++ rval = i2c_smbus_write_byte_data(client, ADP1653_REG_OUT_SEL, out_sel);
++ if (rval < 0)
++ return rval;
++
++ if (flash->torch_intensity > 0) {
++ /* Torch mode, light immediately on, duration indefinite */
++ out_sel |= flash->torch_intensity
++ << ADP1653_REG_OUT_SEL_HPLED_SHIFT;
++ config = 0;
++ } else {
++ /* Flash mode, light on with strobe, duration from timer */
++ out_sel |= flash->flash_intensity
++ << ADP1653_REG_OUT_SEL_HPLED_SHIFT;
++ config = ADP1653_REG_CONFIG_TMR_CFG;
++ config |= TIMEOUT_US_TO_CODE(flash->flash_timeout)
++ << ADP1653_REG_CONFIG_TMR_SET_SHIFT;
++ }
++
++ rval = i2c_smbus_write_byte_data(client, ADP1653_REG_OUT_SEL, out_sel);
++ if (rval < 0)
++ return rval;
++
++ rval = i2c_smbus_write_byte_data(client, ADP1653_REG_CONFIG, config);
++ if (rval < 0)
++ return rval;
++
++ return 0;
++}
++
++static int adp1653_strobe(struct adp1653_flash *flash)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(&flash->subdev);
++ int rval;
++
++ if (flash->torch_intensity > 0) {
++ /* Disabling torch enables flash in update_hw() */
++ flash->torch_intensity = 0;
++ rval = adp1653_update_hw(flash);
++ if (rval)
++ return rval;
++ }
++
++ if (flash->platform_data->strobe)
++ /* Hardware-specific strobe using I/O pin */
++ return flash->platform_data->strobe(&flash->subdev);
++
++ /* Software strobe using i2c */
++ rval = i2c_smbus_write_byte_data(client, ADP1653_REG_SW_STROBE,
++ ADP1653_REG_SW_STROBE_SW_STROBE);
++ if (rval)
++ return rval;
++ return i2c_smbus_write_byte_data(client, ADP1653_REG_SW_STROBE, 0);
++}
++
++static int adp1653_get_fault(struct adp1653_flash *flash)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(&flash->subdev);
++
++ return i2c_smbus_read_byte_data(client, ADP1653_REG_FAULT);
++}
++
++/* --------------------------------------------------------------------------
++ * V4L2 subdev operations
++ */
++static int
++adp1653_get_chip_ident(struct v4l2_subdev *subdev,
++ struct v4l2_dbg_chip_ident *chip)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(subdev);
++
++ return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_ADP1653, 0);
++}
++
++#define CTRL_FLASH_STROBE 0
++#define CTRL_FLASH_TIMEOUT 1
++#define CTRL_FLASH_INTENSITY 2
++#define CTRL_TORCH_INTENSITY 3
++#define CTRL_INDICATOR_INTENSITY 4
++#define CTRL_FLASH_FAULT_SCP 5
++#define CTRL_FLASH_FAULT_OT 6
++#define CTRL_FLASH_FAULT_TMR 7
++#define CTRL_FLASH_FAULT_OV 8
++
++static const struct v4l2_queryctrl adp1653_ctrls[] = {
++ {
++ .id = V4L2_CID_FLASH_STROBE,
++ .type = V4L2_CTRL_TYPE_BUTTON,
++ .name = "Flash strobe",
++ .minimum = 0,
++ .maximum = 0,
++ .step = 0,
++ .default_value = 0,
++ .flags = V4L2_CTRL_FLAG_UPDATE,
++ },
++
++ {
++ .id = V4L2_CID_FLASH_TIMEOUT,
++ .type = V4L2_CTRL_TYPE_INTEGER,
++ .name = "Flash timeout [us]",
++ .minimum = 1000,
++ .maximum = 820000,
++ .step = 54600,
++ .default_value = 1000,
++ .flags = V4L2_CTRL_FLAG_SLIDER,
++ },
++ {
++ .id = V4L2_CID_FLASH_INTENSITY,
++ .type = V4L2_CTRL_TYPE_INTEGER,
++ .name = "Flash intensity",
++ .minimum = ADP1653_FLASH_INTENSITY_MIN,
++ .minimum = ADP1653_FLASH_INTENSITY_MAX,
++ .step = 1,
++ .default_value = ADP1653_FLASH_INTENSITY_MIN,
++ .flags = V4L2_CTRL_FLAG_SLIDER,
++ },
++ {
++ .id = V4L2_CID_TORCH_INTENSITY,
++ .type = V4L2_CTRL_TYPE_INTEGER,
++ .name = "Torch intensity",
++ .minimum = ADP1653_TORCH_INTENSITY_MIN,
++ .maximum = ADP1653_TORCH_INTENSITY_MAX,
++ .step = 1,
++ .default_value = ADP1653_TORCH_INTENSITY_MIN,
++ .flags = V4L2_CTRL_FLAG_SLIDER,
++ },
++ {
++ .id = V4L2_CID_INDICATOR_INTENSITY,
++ .type = V4L2_CTRL_TYPE_INTEGER,
++ .name = "Indicator intensity",
++ .minimum = ADP1653_INDICATOR_INTENSITY_MIN,
++ .maximum = ADP1653_INDICATOR_INTENSITY_MAX,
++ .step = 1,
++ .default_value = ADP1653_INDICATOR_INTENSITY_MIN,
++ .flags = V4L2_CTRL_FLAG_SLIDER,
++ },
++
++ /* Faults */
++ {
++ .id = V4L2_CID_FLASH_ADP1653_FAULT_SCP,
++ .type = V4L2_CTRL_TYPE_BOOLEAN,
++ .name = "Short-circuit fault",
++ .minimum = 0,
++ .maximum = 1,
++ .step = 1,
++ .default_value = 0,
++ .flags = V4L2_CTRL_FLAG_READ_ONLY,
++ },
++ {
++ .id = V4L2_CID_FLASH_ADP1653_FAULT_OT,
++ .type = V4L2_CTRL_TYPE_BOOLEAN,
++ .name = "Overtemperature fault",
++ .minimum = 0,
++ .maximum = 1,
++ .step = 1,
++ .default_value = 0,
++ .flags = V4L2_CTRL_FLAG_READ_ONLY,
++ },
++ {
++ .id = V4L2_CID_FLASH_ADP1653_FAULT_TMR,
++ .type = V4L2_CTRL_TYPE_BOOLEAN,
++ .name = "Timeout fault",
++ .minimum = 0,
++ .maximum = 1,
++ .step = 1,
++ .default_value = 0,
++ .flags = V4L2_CTRL_FLAG_READ_ONLY,
++ },
++ {
++ .id = V4L2_CID_FLASH_ADP1653_FAULT_OV,
++ .type = V4L2_CTRL_TYPE_BOOLEAN,
++ .name = "Overvoltage fault",
++ .minimum = 0,
++ .maximum = 1,
++ .step = 1,
++ .default_value = 0,
++ .flags = V4L2_CTRL_FLAG_READ_ONLY,
++ }
++};
++
++static int
++adp1653_query_ctrl(struct v4l2_subdev *subdev, struct v4l2_queryctrl *ctrl)
++{
++ struct adp1653_flash *flash = to_adp1653_flash(subdev);
++ int rval;
++
++ rval = smia_ctrl_query(adp1653_ctrls, ARRAY_SIZE(adp1653_ctrls), ctrl);
++ if (rval < 0)
++ return rval;
++
++ /* Override global values with platform-specific data. */
++ switch (ctrl->id) {
++ case V4L2_CID_FLASH_TIMEOUT:
++ ctrl->maximum = flash->platform_data->max_flash_timeout;
++ ctrl->default_value = flash->platform_data->max_flash_timeout;
++ break;
++ case V4L2_CID_FLASH_INTENSITY:
++ ctrl->maximum = flash->platform_data->max_flash_intensity;
++ break;
++ case V4L2_CID_TORCH_INTENSITY:
++ ctrl->maximum = flash->platform_data->max_torch_intensity;
++ break;
++ case V4L2_CID_INDICATOR_INTENSITY:
++ ctrl->maximum = flash->platform_data->max_indicator_intensity;
++ break;
++ }
++
++ return 0;
++}
++
++static int
++adp1653_get_ctrl(struct v4l2_subdev *subdev, struct v4l2_control *vc)
++{
++ struct adp1653_flash *flash = to_adp1653_flash(subdev);
++
++ switch (vc->id) {
++ case V4L2_CID_FLASH_TIMEOUT:
++ vc->value = flash->flash_timeout;
++ break;
++ case V4L2_CID_FLASH_INTENSITY:
++ vc->value = flash->flash_intensity;
++ break;
++ case V4L2_CID_TORCH_INTENSITY:
++ vc->value = flash->torch_intensity;
++ break;
++ case V4L2_CID_INDICATOR_INTENSITY:
++ vc->value = flash->indicator_intensity;
++ break;
++
++ case V4L2_CID_FLASH_ADP1653_FAULT_SCP:
++ vc->value = (adp1653_get_fault(flash)
++ & ADP1653_REG_FAULT_FLT_SCP) != 0;
++ break;
++ case V4L2_CID_FLASH_ADP1653_FAULT_OT:
++ vc->value = (adp1653_get_fault(flash)
++ & ADP1653_REG_FAULT_FLT_OT) != 0;
++ break;
++ case V4L2_CID_FLASH_ADP1653_FAULT_TMR:
++ vc->value = (adp1653_get_fault(flash)
++ & ADP1653_REG_FAULT_FLT_TMR) != 0;
++ break;
++ case V4L2_CID_FLASH_ADP1653_FAULT_OV:
++ vc->value = (adp1653_get_fault(flash)
++ & ADP1653_REG_FAULT_FLT_OV) != 0;
++ break;
++ default:
++ return -EINVAL;
++ }
++ return 0;
++}
++
++static int
++adp1653_set_ctrl(struct v4l2_subdev *subdev, struct v4l2_control *vc)
++{
++ struct adp1653_flash *flash = to_adp1653_flash(subdev);
++ const struct v4l2_queryctrl *ctrl;
++ unsigned int index;
++ s32 maximum;
++ u32 *value;
++
++ switch (vc->id) {
++ case V4L2_CID_FLASH_STROBE:
++ return adp1653_strobe(flash);
++
++ case V4L2_CID_FLASH_TIMEOUT:
++ index = CTRL_FLASH_TIMEOUT;
++ maximum = flash->platform_data->max_flash_timeout;
++ value = &flash->flash_timeout;
++ break;
++ case V4L2_CID_FLASH_INTENSITY:
++ index = CTRL_FLASH_INTENSITY;
++ maximum = flash->platform_data->max_flash_intensity;
++ value = &flash->flash_intensity;
++ break;
++ case V4L2_CID_TORCH_INTENSITY:
++ index = CTRL_TORCH_INTENSITY;
++ maximum = flash->platform_data->max_torch_intensity;
++ value = &flash->torch_intensity;
++ break;
++ case V4L2_CID_INDICATOR_INTENSITY:
++ index = CTRL_INDICATOR_INTENSITY;
++ maximum = flash->platform_data->max_indicator_intensity;
++ value = &flash->indicator_intensity;
++ break;
++
++ default:
++ return -EINVAL;
++ }
++
++ ctrl = &adp1653_ctrls[index];
++ vc->value = clamp(vc->value, ctrl->minimum, maximum);
++ vc->value = DIV_ROUND_CLOSEST(vc->value - ctrl->minimum, ctrl->step);
++ vc->value = vc->value * ctrl->step + ctrl->minimum;
++ *value = vc->value;
++
++ return adp1653_update_hw(flash);
++}
++
++static int
++adp1653_set_config(struct v4l2_subdev *subdev, int irq, void *platform_data)
++{
++ struct adp1653_flash *flash = to_adp1653_flash(subdev);
++
++ if (platform_data == NULL)
++ return -EINVAL;
++
++ flash->platform_data = platform_data;
++
++ flash->flash_timeout =
++ flash->platform_data->max_flash_timeout;
++ flash->flash_intensity =
++ adp1653_ctrls[CTRL_FLASH_INTENSITY].default_value;
++ flash->torch_intensity =
++ adp1653_ctrls[CTRL_TORCH_INTENSITY].default_value;
++ flash->indicator_intensity =
++ adp1653_ctrls[CTRL_INDICATOR_INTENSITY].default_value;
++
++ return 0;
++}
++
++static int
++adp1653_init_device(struct adp1653_flash *flash)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(&flash->subdev);
++ int rval;
++
++ /* Clear FAULT register by writing zero to OUT_SEL */
++ rval = i2c_smbus_write_byte_data(client, ADP1653_REG_OUT_SEL, 0);
++ if (rval < 0) {
++ dev_err(&client->dev, "failed writing fault register\n");
++ return -EIO;
++ }
++
++ /* Read FAULT register */
++ rval = i2c_smbus_read_byte_data(client, ADP1653_REG_FAULT);
++ if (rval < 0) {
++ dev_err(&client->dev, "failed reading fault register\n");
++ return -EIO;
++ }
++
++ if ((rval & 0x0f) != 0) {
++ dev_err(&client->dev, "device fault\n");
++ return -EIO;
++ }
++
++ rval = adp1653_update_hw(flash);
++ if (rval) {
++ dev_err(&client->dev,
++ "adp1653_update_hw failed at %s\n", __func__);
++ return -EIO;
++ }
++
++ return 0;
++}
++
++static int
++adp1653_set_power(struct v4l2_subdev *subdev, int on)
++{
++ struct adp1653_flash *flash = to_adp1653_flash(subdev);
++ int rval = 0;
++
++ if (on == flash->power)
++ return 0;
++
++ rval = flash->platform_data->power(subdev, on);
++ if (rval)
++ return rval;
++
++ flash->power = on;
++ if (!on)
++ return 0;
++
++ rval = adp1653_init_device(flash);
++ if (rval) {
++ flash->platform_data->power(subdev, 0);
++ flash->power = 0;
++ }
++
++ return rval;
++}
++
++static const struct v4l2_subdev_core_ops adp1653_core_ops = {
++ .g_chip_ident = adp1653_get_chip_ident,
++ .s_config = adp1653_set_config,
++ .queryctrl = adp1653_query_ctrl,
++ .g_ctrl = adp1653_get_ctrl,
++ .s_ctrl = adp1653_set_ctrl,
++ .s_power = adp1653_set_power,
++};
++
++static const struct v4l2_subdev_ops adp1653_ops = {
++ .core = &adp1653_core_ops,
++};
++
++/* --------------------------------------------------------------------------
++ * I2C driver
++ */
++#ifdef CONFIG_PM
++
++static int adp1653_suspend(struct i2c_client *client, pm_message_t mesg)
++{
++ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
++ struct adp1653_flash *flash = to_adp1653_flash(subdev);
++
++ if (!flash->power)
++ return 0;
++
++ return flash->platform_data->power(subdev, 0);
++}
++
++static int adp1653_resume(struct i2c_client *client)
++{
++ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
++ struct adp1653_flash *flash = to_adp1653_flash(subdev);
++
++ if (!flash->power)
++ return 0;
++
++ flash->power = 0;
++ return adp1653_set_power(subdev, 1);
++}
++
++#else
++
++#define adp1653_suspend NULL
++#define adp1653_resume NULL
++
++#endif /* CONFIG_PM */
++
++static const struct media_entity_operations adp1653_entity_ops = {
++ .set_power = v4l2_subdev_set_power,
++};
++
++static int adp1653_probe(struct i2c_client *client,
++ const struct i2c_device_id *devid)
++{
++ struct adp1653_flash *flash;
++ int ret;
++
++ flash = kzalloc(sizeof(*flash), GFP_KERNEL);
++ if (flash == NULL)
++ return -ENOMEM;
++
++ v4l2_i2c_subdev_init(&flash->subdev, client, &adp1653_ops);
++ flash->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
++
++ flash->subdev.entity.ops = &adp1653_entity_ops;
++ ret = media_entity_init(&flash->subdev.entity, 0, NULL, 0);
++ if (ret < 0)
++ kfree(flash);
++
++ return ret;
++}
++
++static int __exit adp1653_remove(struct i2c_client *client)
++{
++ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
++ struct adp1653_flash *flash = to_adp1653_flash(subdev);
++
++ v4l2_device_unregister_subdev(&flash->subdev);
++ media_entity_cleanup(&flash->subdev.entity);
++ kfree(flash);
++ return 0;
++}
++
++static const struct i2c_device_id adp1653_id_table[] = {
++ { ADP1653_NAME, 0 },
++ { }
++};
++MODULE_DEVICE_TABLE(i2c, adp1653_id_table);
++
++static struct i2c_driver adp1653_i2c_driver = {
++ .driver = {
++ .name = ADP1653_NAME,
++ },
++ .probe = adp1653_probe,
++ .remove = __exit_p(adp1653_remove),
++ .suspend = adp1653_suspend,
++ .resume = adp1653_resume,
++ .id_table = adp1653_id_table,
++};
++
++static int __init adp1653_init(void)
++{
++ int rval;
++
++ rval = i2c_add_driver(&adp1653_i2c_driver);
++ if (rval)
++ printk(KERN_ALERT "%s: failed at i2c_add_driver\n", __func__);
++
++ return rval;
++}
++
++static void __exit adp1653_exit(void)
++{
++ i2c_del_driver(&adp1653_i2c_driver);
++}
++
++module_init(adp1653_init);
++module_exit(adp1653_exit);
++
++MODULE_AUTHOR("Sakari Ailus <sakari.ailus@nokia.com>");
++MODULE_DESCRIPTION("Analog Devices ADP1653 LED flash driver");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/media/video/et8ek8.c b/drivers/media/video/et8ek8.c
+new file mode 100644
+index 0000000..1f4cf9a
+--- /dev/null
++++ b/drivers/media/video/et8ek8.c
+@@ -0,0 +1,1082 @@
++/*
++ * drivers/media/video/et8ek8.c
++ *
++ * Copyright (C) 2008 Nokia Corporation
++ *
++ * Contact: Sakari Ailus <sakari.ailus@nokia.com>
++ * Tuukka Toivonen <tuukka.o.toivonen@nokia.com>
++ *
++ * Based on code from Toni Leinonen <toni.leinonen@offcode.fi>.
++ *
++ * This driver is based on the Micron MT9T012 camera imager driver
++ * (C) Texas Instruments.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ *
++ */
++
++#include <linux/delay.h>
++#include <linux/firmware.h>
++#include <linux/i2c.h>
++#include <linux/slab.h>
++#include <linux/version.h>
++#include <linux/kernel.h>
++#include <linux/v4l2-mediabus.h>
++
++#include <media/smiaregs.h>
++#include <media/v4l2-chip-ident.h>
++#include <media/v4l2-device.h>
++#include <media/v4l2-subdev.h>
++
++#include "et8ek8.h"
++
++#define ET8EK8_XCLK_HZ 9600000
++
++#define CTRL_GAIN 0
++#define CTRL_EXPOSURE 1
++#define CTRL_TEST_PATTERN 2
++
++#define CID_TO_CTRL(id) ((id)==V4L2_CID_GAIN ? CTRL_GAIN : \
++ (id)==V4L2_CID_EXPOSURE ? CTRL_EXPOSURE : \
++ (id)==V4L2_CID_TEST_PATTERN ? CTRL_TEST_PATTERN : \
++ -EINVAL)
++
++enum et8ek8_versions {
++ ET8EK8_REV_1 = 0x0001,
++ ET8EK8_REV_2,
++};
++
++/*
++ * This table describes what should be written to the sensor register
++ * for each gain value. The gain(index in the table) is in terms of
++ * 0.1EV, i.e. 10 indexes in the table give 2 time more gain [0] in
++ * the *analog gain, [1] in the digital gain
++ *
++ * Analog gain [dB] = 20*log10(regvalue/32); 0x20..0x100
++ */
++static struct et8ek8_gain {
++ u16 analog;
++ u16 digital;
++} const et8ek8_gain_table[] = {
++ { 32, 0}, /* x1 */
++ { 34, 0},
++ { 37, 0},
++ { 39, 0},
++ { 42, 0},
++ { 45, 0},
++ { 49, 0},
++ { 52, 0},
++ { 56, 0},
++ { 60, 0},
++ { 64, 0}, /* x2 */
++ { 69, 0},
++ { 74, 0},
++ { 79, 0},
++ { 84, 0},
++ { 91, 0},
++ { 97, 0},
++ {104, 0},
++ {111, 0},
++ {119, 0},
++ {128, 0}, /* x4 */
++ {137, 0},
++ {147, 0},
++ {158, 0},
++ {169, 0},
++ {181, 0},
++ {194, 0},
++ {208, 0},
++ {223, 0},
++ {239, 0},
++ {256, 0}, /* x8 */
++ {256, 73},
++ {256, 152},
++ {256, 236},
++ {256, 327},
++ {256, 424},
++ {256, 528},
++ {256, 639},
++ {256, 758},
++ {256, 886},
++ {256, 1023}, /* x16 */
++};
++
++/* Register definitions */
++#define REG_REVISION_NUMBER_L 0x1200
++#define REG_REVISION_NUMBER_H 0x1201
++
++#define PRIV_MEM_START_REG 0x0008
++#define PRIV_MEM_WIN_SIZE 8
++
++#define ET8EK8_I2C_DELAY 3 /* msec delay b/w accesses */
++
++#define USE_CRC 1
++
++/* Called to change the V4L2 gain control value. This function
++ * rounds and clamps the given value and updates the V4L2 control value.
++ * If power is on, also updates the sensor analog and digital gains.
++ * gain is in 0.1 EV (exposure value) units.
++ */
++static int et8ek8_set_gain(struct et8ek8_sensor *sensor, s32 gain)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(&sensor->subdev);
++ struct et8ek8_gain new;
++ int r;
++
++ sensor->controls[CTRL_GAIN].value = clamp(gain,
++ sensor->controls[CTRL_GAIN].minimum,
++ sensor->controls[CTRL_GAIN].maximum);
++
++ if (!sensor->power)
++ return 0;
++
++ new = et8ek8_gain_table[sensor->controls[CTRL_GAIN].value];
++
++ /* FIXME: optimise I2C writes! */
++ r = smia_i2c_write_reg(client, SMIA_REG_8BIT,
++ 0x124a, new.analog >> 8);
++ if (r)
++ return r;
++ r = smia_i2c_write_reg(client, SMIA_REG_8BIT,
++ 0x1249, new.analog & 0xff);
++ if (r)
++ return r;
++
++ r = smia_i2c_write_reg(client, SMIA_REG_8BIT,
++ 0x124d, new.digital >> 8);
++ if (r)
++ return r;
++ r = smia_i2c_write_reg(client, SMIA_REG_8BIT,
++ 0x124c, new.digital & 0xff);
++
++ return r;
++}
++
++/* Called to change the V4L2 exposure control value. This function
++ * rounds and clamps the given value and updates the V4L2 control value.
++ * If power is on, also update the sensor exposure time.
++ * exptime is in microseconds.
++ */
++static int et8ek8_set_exposure(struct et8ek8_sensor *sensor, s32 exptime)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(&sensor->subdev);
++ unsigned int clock; /* Pixel clock in Hz>>10 fixed point */
++ unsigned int rt; /* Row time in .8 fixed point */
++ unsigned int rows; /* Exposure value as written to HW (ie. rows) */
++
++ exptime = clamp(exptime, sensor->controls[CTRL_EXPOSURE].minimum,
++ sensor->controls[CTRL_EXPOSURE].maximum);
++
++ /* Assume that the maximum exposure time is at most ~8 s,
++ * and the maximum width (with blanking) ~8000 pixels.
++ * The formula here is in principle as simple as
++ * rows = exptime / 1e6 / width * pixel_clock
++ * but to get accurate results while coping with value ranges,
++ * have to do some fixed point math.
++ */
++ clock = sensor->current_reglist->mode.pixel_clock;
++ clock = (clock + (1 << 9)) >> 10;
++ rt = sensor->current_reglist->mode.width * (1000000 >> 2);
++ rt = (rt + (clock >> 1)) / clock;
++ rows = ((exptime << 8) + (rt >> 1)) / rt;
++
++ /* Set the V4L2 control for exposure time to the rounded value */
++ sensor->controls[CTRL_EXPOSURE].value = (rt * rows + (1 << 7)) >> 8;
++
++ if (!sensor->power)
++ return 0;
++
++ return smia_i2c_write_reg(client, SMIA_REG_16BIT, 0x1243, swab16(rows));
++}
++
++static int et8ek8_set_test_pattern(struct et8ek8_sensor *sensor, s32 mode)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(&sensor->subdev);
++ int cbh_mode, cbv_mode, tp_mode, din_sw, r1420, rval;
++
++ if (mode < 0 || mode > 8)
++ return -EINVAL;
++
++ sensor->controls[CTRL_TEST_PATTERN].value = mode;
++
++ if (!sensor->power)
++ return 0;
++
++ /* Values for normal mode */
++ cbh_mode = 0;
++ cbv_mode = 0;
++ tp_mode = 0;
++ din_sw = 0x00;
++ r1420 = 0xF0;
++
++ if (mode != 0) {
++ /* Test pattern mode */
++ if (mode < 5) {
++ cbh_mode = 1;
++ cbv_mode = 1;
++ tp_mode = mode + 3;
++ } else {
++ cbh_mode = 0;
++ cbv_mode = 0;
++ tp_mode = mode - 4 + 3;
++ }
++ din_sw = 0x01;
++ r1420 = 0xE0;
++ }
++
++ rval = smia_i2c_write_reg(client, SMIA_REG_8BIT, 0x111B, tp_mode << 4);
++ if (rval)
++ goto out;
++
++ rval = smia_i2c_write_reg(client, SMIA_REG_8BIT, 0x1121, cbh_mode << 7);
++ if (rval)
++ goto out;
++
++ rval = smia_i2c_write_reg(client, SMIA_REG_8BIT, 0x1124, cbv_mode << 7);
++ if (rval)
++ goto out;
++
++ rval = smia_i2c_write_reg(client, SMIA_REG_8BIT, 0x112C, din_sw);
++ if (rval)
++ goto out;
++
++ rval = smia_i2c_write_reg(client, SMIA_REG_8BIT, 0x1420, r1420);
++ if (rval)
++ goto out;
++
++out:
++ return rval;
++
++}
++
++static int et8ek8_update_controls(struct v4l2_subdev *subdev)
++{
++ struct et8ek8_sensor *sensor = to_et8ek8_sensor(subdev);
++ struct i2c_client *client = v4l2_get_subdevdata(subdev);
++ unsigned int rt; /* Row time in us */
++ unsigned int clock; /* Pixel clock in Hz>>2 fixed point */
++ int i;
++
++ if (sensor->current_reglist->mode.pixel_clock <= 0 ||
++ sensor->current_reglist->mode.width <= 0) {
++ dev_err(&client->dev, "bad firmware\n");
++ return -EIO;
++ }
++
++ clock = sensor->current_reglist->mode.pixel_clock;
++ clock = (clock + (1 << 1)) >> 2;
++ rt = sensor->current_reglist->mode.width * (1000000 >> 2);
++ rt = (rt + (clock >> 1)) / clock;
++
++ sensor->controls[CTRL_EXPOSURE].minimum = rt;
++ sensor->controls[CTRL_EXPOSURE].maximum =
++ sensor->current_reglist->mode.max_exp * rt;
++ sensor->controls[CTRL_EXPOSURE].step = rt;
++ sensor->controls[CTRL_EXPOSURE].default_value =
++ sensor->controls[CTRL_EXPOSURE].maximum;
++ if (sensor->controls[CTRL_EXPOSURE].value == 0)
++ sensor->controls[CTRL_EXPOSURE].value =
++ sensor->controls[CTRL_EXPOSURE].maximum;
++
++ /* Adjust V4L2 control values and write them to the sensor */
++
++ for (i=0; i<ARRAY_SIZE(sensor->controls); i++) {
++ int rval = sensor->controls[i].set(sensor,
++ sensor->controls[i].value);
++ if (rval)
++ return rval;
++ }
++ return 0;
++}
++
++static int et8ek8_configure(struct v4l2_subdev *subdev)
++{
++ struct et8ek8_sensor *sensor = to_et8ek8_sensor(subdev);
++ struct i2c_client *client = v4l2_get_subdevdata(subdev);
++ int rval;
++
++ rval = et8ek8_update_controls(subdev);
++ if (rval)
++ goto fail;
++
++ rval = smia_i2c_write_regs(client, sensor->current_reglist->regs);
++ if (rval)
++ goto fail;
++
++ rval = sensor->platform_data->configure_interface(
++ subdev, &sensor->current_reglist->mode);
++ if (rval)
++ goto fail;
++
++ return 0;
++
++fail:
++ dev_err(&client->dev, "sensor configuration failed\n");
++ return rval;
++}
++
++static int et8ek8_s_stream(struct v4l2_subdev *subdev, int streaming)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(subdev);
++
++ if (streaming)
++ return smia_i2c_write_reg(client, SMIA_REG_8BIT, 0x1252, 0xB0);
++ else
++ return smia_i2c_write_reg(client, SMIA_REG_8BIT, 0x1252, 0x30);
++}
++
++/* --------------------------------------------------------------------------
++ * V4L2 subdev operations
++ */
++static int et8ek8_power_off(struct v4l2_subdev *subdev)
++{
++ struct et8ek8_sensor *sensor = to_et8ek8_sensor(subdev);
++ int rval;
++
++ rval = sensor->platform_data->s_power(subdev, 0);
++ if (rval)
++ return rval;
++ udelay(1);
++ rval = sensor->platform_data->set_xclk(subdev, 0);
++ return rval;
++}
++
++static int et8ek8_power_on(struct v4l2_subdev *subdev)
++{
++ struct et8ek8_sensor *sensor = to_et8ek8_sensor(subdev);
++ struct i2c_client *client = v4l2_get_subdevdata(subdev);
++ unsigned int hz = ET8EK8_XCLK_HZ;
++ int val, rval;
++
++ if (sensor->current_reglist)
++ hz = sensor->current_reglist->mode.ext_clock;
++
++ rval = sensor->platform_data->set_xclk(subdev, hz);
++ if (rval)
++ goto out;
++
++ udelay(10); /* I wish this is a good value */
++
++ rval = sensor->platform_data->s_power(subdev, 1);
++ if (rval)
++ goto out;
++
++ msleep(5000*1000/hz+1); /* Wait 5000 cycles */
++
++ if (sensor->meta_reglist) {
++ rval = smia_i2c_reglist_find_write(client,
++ sensor->meta_reglist,
++ SMIA_REGLIST_POWERON);
++ if (rval)
++ goto out;
++ }
++
++#ifdef USE_CRC
++ rval = smia_i2c_read_reg(client,
++ SMIA_REG_8BIT, 0x1263, &val);
++ if (rval)
++ goto out;
++#if USE_CRC
++ val |= (1<<4);
++#else
++ val &= ~(1<<4);
++#endif
++ rval = smia_i2c_write_reg(client,
++ SMIA_REG_8BIT, 0x1263, val);
++ if (rval)
++ goto out;
++#endif
++
++out:
++ if (rval)
++ et8ek8_power_off(subdev);
++
++ return rval;
++}
++
++static struct v4l2_queryctrl et8ek8_ctrls[] = {
++ {
++ .id = V4L2_CID_GAIN,
++ .type = V4L2_CTRL_TYPE_INTEGER,
++ .name = "Gain [0.1 EV]",
++ .flags = V4L2_CTRL_FLAG_SLIDER,
++ },
++ {
++ .id = V4L2_CID_EXPOSURE,
++ .type = V4L2_CTRL_TYPE_INTEGER,
++ .name = "Exposure time [us]",
++ .flags = V4L2_CTRL_FLAG_SLIDER,
++ },
++ {
++ .id = V4L2_CID_TEST_PATTERN,
++ .type = V4L2_CTRL_TYPE_MENU,
++ .name = "Test pattern mode",
++ .flags = 0,
++ .minimum = 0,
++ .maximum = 8,
++ .step = 1,
++ .default_value = 0,
++ },
++};
++
++static const __u32 et8ek8_mode_ctrls[] = {
++ V4L2_CID_MODE_FRAME_WIDTH,
++ V4L2_CID_MODE_FRAME_HEIGHT,
++ V4L2_CID_MODE_VISIBLE_WIDTH,
++ V4L2_CID_MODE_VISIBLE_HEIGHT,
++ V4L2_CID_MODE_PIXELCLOCK,
++ V4L2_CID_MODE_SENSITIVITY,
++ V4L2_CID_MODE_OPSYSCLOCK,
++};
++
++/* --------------------------------------------------------------------------
++ * V4L2 subdev video operations
++ */
++static int et8ek8_enum_mbus_code(struct v4l2_subdev *subdev,
++ struct v4l2_subdev_fh *fh,
++ struct v4l2_subdev_pad_mbus_code_enum *code)
++{
++ struct et8ek8_sensor *sensor = to_et8ek8_sensor(subdev);
++
++ return smia_reglist_enum_mbus_code(sensor->meta_reglist, code);
++}
++
++static int et8ek8_enum_frame_size(struct v4l2_subdev *subdev,
++ struct v4l2_subdev_fh *fh,
++ struct v4l2_subdev_frame_size_enum *fse)
++{
++ struct et8ek8_sensor *sensor = to_et8ek8_sensor(subdev);
++
++ return smia_reglist_enum_frame_size(sensor->meta_reglist, fse);
++}
++
++static int et8ek8_enum_frame_ival(struct v4l2_subdev *subdev,
++ struct v4l2_subdev_fh *fh,
++ struct v4l2_subdev_frame_interval_enum *fie)
++{
++ struct et8ek8_sensor *sensor = to_et8ek8_sensor(subdev);
++
++ return smia_reglist_enum_frame_ival(sensor->meta_reglist, fie);
++}
++
++static struct v4l2_mbus_framefmt *
++__et8ek8_get_pad_format(struct et8ek8_sensor *sensor, struct v4l2_subdev_fh *fh,
++ unsigned int pad, enum v4l2_subdev_format which)
++{
++ switch (which) {
++ case V4L2_SUBDEV_FORMAT_PROBE:
++ return v4l2_subdev_get_probe_format(fh, pad);
++ case V4L2_SUBDEV_FORMAT_ACTIVE:
++ return &sensor->format;
++ default:
++ return NULL;
++ }
++}
++
++static int et8ek8_get_pad_format(struct v4l2_subdev *subdev,
++ struct v4l2_subdev_fh *fh, unsigned int pad,
++ struct v4l2_mbus_framefmt *fmt,
++ enum v4l2_subdev_format which)
++{
++ struct et8ek8_sensor *sensor = to_et8ek8_sensor(subdev);
++ struct v4l2_mbus_framefmt *format;
++
++ format = __et8ek8_get_pad_format(sensor, fh, pad, which);
++ if (format == NULL)
++ return -EINVAL;
++
++ *fmt = *format;
++ return 0;
++}
++
++static int et8ek8_set_pad_format(struct v4l2_subdev *subdev,
++ struct v4l2_subdev_fh *fh, unsigned int pad,
++ struct v4l2_mbus_framefmt *fmt,
++ enum v4l2_subdev_format which)
++{
++ struct et8ek8_sensor *sensor = to_et8ek8_sensor(subdev);
++ struct v4l2_mbus_framefmt *format;
++ struct smia_reglist *reglist;
++
++ format = __et8ek8_get_pad_format(sensor, fh, pad, which);
++ if (format == NULL)
++ return -EINVAL;
++
++ reglist = smia_reglist_find_mode_fmt(sensor->meta_reglist, fmt);
++ smia_reglist_to_mbus(reglist, fmt);
++ *format = *fmt;
++
++ if (which == V4L2_SUBDEV_FORMAT_ACTIVE)
++ sensor->current_reglist = reglist;
++
++ return 0;
++}
++
++static int et8ek8_get_frame_interval(struct v4l2_subdev *subdev,
++ struct v4l2_subdev_frame_interval *fi)
++{
++ struct et8ek8_sensor *sensor = to_et8ek8_sensor(subdev);
++
++ memset(fi, 0, sizeof(*fi));
++ fi->interval = sensor->current_reglist->mode.timeperframe;
++
++ return 0;
++}
++
++static int et8ek8_set_frame_interval(struct v4l2_subdev *subdev,
++ struct v4l2_subdev_frame_interval *fi)
++{
++ struct et8ek8_sensor *sensor = to_et8ek8_sensor(subdev);
++ struct smia_reglist *reglist;
++
++ reglist = smia_reglist_find_mode_ival(sensor->meta_reglist,
++ sensor->current_reglist,
++ &fi->interval);
++
++ if (!reglist)
++ return -EINVAL;
++
++ if (sensor->power &&
++ sensor->current_reglist->mode.ext_clock != reglist->mode.ext_clock)
++ return -EINVAL;
++
++ sensor->current_reglist = reglist;
++
++ return et8ek8_update_controls(subdev);
++}
++
++static int et8ek8_g_priv_mem(struct v4l2_subdev *subdev)
++{
++ struct et8ek8_sensor *sensor = to_et8ek8_sensor(subdev);
++ struct i2c_client *client = v4l2_get_subdevdata(subdev);
++ unsigned int length = ET8EK8_PRIV_MEM_SIZE;
++ unsigned int offset = 0;
++ u8 *ptr = sensor->priv_mem;
++ int rval = 0;
++
++ /* Read the EEPROM window-by-window, each window 8 bytes */
++ do {
++ u8 buffer[PRIV_MEM_WIN_SIZE];
++ struct i2c_msg msg;
++ int bytes, i;
++ int ofs;
++
++ /* Set the current window */
++ rval = smia_i2c_write_reg(client,
++ SMIA_REG_8BIT,
++ 0x0001,
++ 0xe0 | (offset >> 3));
++ if (rval < 0)
++ goto out;
++
++ /* Wait for status bit */
++ for (i = 0; i < 1000; ++i) {
++ u32 status;
++ rval = smia_i2c_read_reg(client,
++ SMIA_REG_8BIT,
++ 0x0003,
++ &status);
++ if (rval < 0)
++ goto out;
++ if ((status & 0x08) == 0)
++ break;
++ msleep(1);
++ };
++
++ if (i == 1000) {
++ rval = -EIO;
++ goto out;
++ }
++
++ /* Read window, 8 bytes at once, and copy to user space */
++ ofs = offset & 0x07; /* Offset within this window */
++ bytes = length + ofs > 8 ? 8-ofs : length;
++ msg.addr = client->addr;
++ msg.flags = 0;
++ msg.len = 2;
++ msg.buf = buffer;
++ ofs += PRIV_MEM_START_REG;
++ buffer[0] = (u8)(ofs >> 8);
++ buffer[1] = (u8)(ofs & 0xFF);
++ rval = i2c_transfer(client->adapter, &msg, 1);
++ if (rval < 0)
++ goto out;
++ mdelay(ET8EK8_I2C_DELAY);
++ msg.addr = client->addr;
++ msg.len = bytes;
++ msg.flags = I2C_M_RD;
++ msg.buf = buffer;
++ memset(buffer, 0, sizeof(buffer));
++ rval = i2c_transfer(client->adapter, &msg, 1);
++ if (rval < 0)
++ goto out;
++ rval = 0;
++ memcpy(ptr, buffer, bytes);
++
++ length -= bytes;
++ offset += bytes;
++ ptr += bytes;
++ } while (length > 0);
++
++out:
++ return rval;
++}
++
++static int et8ek8_dev_init(struct v4l2_subdev *subdev)
++{
++ struct et8ek8_sensor *sensor = to_et8ek8_sensor(subdev);
++ struct i2c_client *client = v4l2_get_subdevdata(subdev);
++ char name[SMIA_MAX_LEN];
++ int rval, rev_l, rev_h;
++
++ rval = et8ek8_power_on(subdev);
++ if (rval)
++ return -ENODEV;
++
++ if (smia_i2c_read_reg(client, SMIA_REG_8BIT,
++ REG_REVISION_NUMBER_L, &rev_l) != 0
++ || smia_i2c_read_reg(client, SMIA_REG_8BIT,
++ REG_REVISION_NUMBER_H, &rev_h) != 0) {
++ dev_err(&client->dev,
++ "no et8ek8 sensor detected\n");
++ rval = -ENODEV;
++ goto out_poweroff;
++ }
++ sensor->version = (rev_h << 8) + rev_l;
++ if (sensor->version != ET8EK8_REV_1
++ && sensor->version != ET8EK8_REV_2)
++ dev_info(&client->dev,
++ "unknown version 0x%x detected, "
++ "continuing anyway\n", sensor->version);
++
++ snprintf(name, sizeof(name), "%s-%4.4x.bin", ET8EK8_NAME,
++ sensor->version);
++ if (request_firmware(&sensor->fw, name,
++ &client->dev)) {
++ dev_err(&client->dev,
++ "can't load firmware %s\n", name);
++ rval = -ENODEV;
++ goto out_poweroff;
++ }
++ sensor->meta_reglist =
++ (struct smia_meta_reglist *)sensor->fw->data;
++ rval = smia_reglist_import(sensor->meta_reglist);
++ if (rval) {
++ dev_err(&client->dev,
++ "invalid register list %s, import failed\n",
++ name);
++ goto out_release;
++ }
++
++ sensor->current_reglist =
++ smia_reglist_find_type(sensor->meta_reglist,
++ SMIA_REGLIST_MODE);
++ if (!sensor->current_reglist) {
++ dev_err(&client->dev,
++ "invalid register list %s, no mode found\n",
++ name);
++ rval = -ENODEV;
++ goto out_release;
++ }
++
++ rval = smia_i2c_reglist_find_write(client,
++ sensor->meta_reglist,
++ SMIA_REGLIST_POWERON);
++ if (rval) {
++ dev_err(&client->dev,
++ "invalid register list %s, no POWERON mode found\n",
++ name);
++ goto out_release;
++ }
++ rval = et8ek8_s_stream(subdev, 1); /* Needed to be able to read EEPROM */
++ if (rval)
++ goto out_release;
++ rval = et8ek8_g_priv_mem(subdev);
++ if (rval)
++ dev_warn(&client->dev,
++ "can not read OTP (EEPROM) memory from sensor\n");
++ rval = et8ek8_s_stream(subdev, 0);
++ if (rval)
++ goto out_release;
++
++ rval = et8ek8_power_off(subdev);
++ if (rval)
++ goto out_release;
++
++ return 0;
++
++out_release:
++ release_firmware(sensor->fw);
++out_poweroff:
++ sensor->meta_reglist = NULL;
++ sensor->fw = NULL;
++ et8ek8_power_off(subdev);
++
++ return rval;
++}
++
++/* --------------------------------------------------------------------------
++ * sysfs attributes
++ */
++static ssize_t
++et8ek8_priv_mem_read(struct device *dev, struct device_attribute *attr,
++ char *buf)
++{
++ struct v4l2_subdev *subdev = i2c_get_clientdata(to_i2c_client(dev));
++ struct et8ek8_sensor *sensor = to_et8ek8_sensor(subdev);
++
++#if PAGE_SIZE < ET8EK8_PRIV_MEM_SIZE
++#error PAGE_SIZE too small!
++#endif
++
++ memcpy(buf, sensor->priv_mem, ET8EK8_PRIV_MEM_SIZE);
++
++ return ET8EK8_PRIV_MEM_SIZE;
++}
++static DEVICE_ATTR(priv_mem, S_IRUGO, et8ek8_priv_mem_read, NULL);
++
++/* --------------------------------------------------------------------------
++ * V4L2 subdev core operations
++ */
++static int
++et8ek8_get_chip_ident(struct v4l2_subdev *subdev,
++ struct v4l2_dbg_chip_ident *chip)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(subdev);
++
++ return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_ET8EK8, 0);
++}
++
++static int
++et8ek8_set_config(struct v4l2_subdev *subdev, int irq, void *platform_data)
++{
++ struct et8ek8_sensor *sensor = to_et8ek8_sensor(subdev);
++ struct i2c_client *client = v4l2_get_subdevdata(subdev);
++ struct v4l2_mbus_framefmt *format;
++ int rval;
++
++ if (platform_data == NULL)
++ return -ENODEV;
++
++ sensor->platform_data = platform_data;
++
++ if (device_create_file(&client->dev, &dev_attr_priv_mem) != 0) {
++ dev_err(&client->dev, "could not register sysfs entry\n");
++ return -EBUSY;
++ }
++
++ /* Gain is initialized here permanently */
++ sensor->controls[CTRL_GAIN].minimum = 0;
++ sensor->controls[CTRL_GAIN].maximum = ARRAY_SIZE(et8ek8_gain_table) - 1;
++ sensor->controls[CTRL_GAIN].step = 1;
++ sensor->controls[CTRL_GAIN].default_value = 0;
++ sensor->controls[CTRL_GAIN].value = 0;
++ sensor->controls[CTRL_GAIN].set = et8ek8_set_gain;
++
++ /* Exposure parameters may change at each mode change, just zero here */
++ sensor->controls[CTRL_EXPOSURE].minimum = 0;
++ sensor->controls[CTRL_EXPOSURE].maximum = 0;
++ sensor->controls[CTRL_EXPOSURE].step = 0;
++ sensor->controls[CTRL_EXPOSURE].default_value = 0;
++ sensor->controls[CTRL_EXPOSURE].value = 0;
++ sensor->controls[CTRL_EXPOSURE].set = et8ek8_set_exposure;
++
++ /* Test pattern mode control */
++ sensor->controls[CTRL_TEST_PATTERN].minimum =
++ et8ek8_ctrls[CTRL_TEST_PATTERN].minimum;
++ sensor->controls[CTRL_TEST_PATTERN].maximum =
++ et8ek8_ctrls[CTRL_TEST_PATTERN].maximum;
++ sensor->controls[CTRL_TEST_PATTERN].step =
++ et8ek8_ctrls[CTRL_TEST_PATTERN].step;
++ sensor->controls[CTRL_TEST_PATTERN].default_value =
++ et8ek8_ctrls[CTRL_TEST_PATTERN].default_value;
++ sensor->controls[CTRL_TEST_PATTERN].value = 0;
++ sensor->controls[CTRL_TEST_PATTERN].set = et8ek8_set_test_pattern;
++
++ rval = et8ek8_dev_init(subdev);
++ if (rval)
++ return rval;
++
++ format = __et8ek8_get_pad_format(sensor, NULL, 0,
++ V4L2_SUBDEV_FORMAT_ACTIVE);
++ return 0;
++}
++
++static int et8ek8_query_ctrl(struct v4l2_subdev *subdev,
++ struct v4l2_queryctrl *a)
++{
++ struct et8ek8_sensor *sensor = to_et8ek8_sensor(subdev);
++ int rval, ctrl;
++
++ rval = smia_ctrl_query(et8ek8_ctrls, ARRAY_SIZE(et8ek8_ctrls), a);
++ if (rval) {
++ return smia_mode_query(et8ek8_mode_ctrls,
++ ARRAY_SIZE(et8ek8_mode_ctrls), a);
++ }
++
++ ctrl = CID_TO_CTRL(a->id);
++ if (ctrl < 0)
++ return ctrl;
++
++ a->minimum = sensor->controls[ctrl].minimum;
++ a->maximum = sensor->controls[ctrl].maximum;
++ a->step = sensor->controls[ctrl].step;
++ a->default_value = sensor->controls[ctrl].default_value;
++
++ return 0;
++}
++
++static int et8ek8_query_menu(struct v4l2_subdev *subdev,
++ struct v4l2_querymenu *qm)
++{
++ static const char *menu_name[] = {
++ "Normal",
++ "Vertical colorbar",
++ "Horizontal colorbar",
++ "Scale",
++ "Ramp",
++ "Small vertical colorbar",
++ "Small horizontal colorbar",
++ "Small scale",
++ "Small ramp",
++ };
++
++ switch (qm->id) {
++ case V4L2_CID_TEST_PATTERN:
++ if (qm->index >= ARRAY_SIZE(menu_name))
++ return -EINVAL;
++ strcpy(qm->name, menu_name[qm->index]);
++ break;
++ default:
++ return -EINVAL;
++ }
++ return 0;
++}
++
++static int et8ek8_get_ctrl(struct v4l2_subdev *subdev,
++ struct v4l2_control *vc)
++{
++ struct et8ek8_sensor *sensor = to_et8ek8_sensor(subdev);
++ int ctrl;
++
++ int rval = smia_mode_g_ctrl(et8ek8_mode_ctrls,
++ ARRAY_SIZE(et8ek8_mode_ctrls),
++ vc, &sensor->current_reglist->mode);
++ if (rval == 0)
++ return 0;
++
++ ctrl = CID_TO_CTRL(vc->id);
++ if (ctrl < 0)
++ return ctrl;
++ vc->value = sensor->controls[ctrl].value;
++ return 0;
++}
++
++static int et8ek8_set_ctrl(struct v4l2_subdev *subdev,
++ struct v4l2_control *vc)
++{
++ struct et8ek8_sensor *sensor = to_et8ek8_sensor(subdev);
++ int ctrl = CID_TO_CTRL(vc->id);
++ if (ctrl < 0)
++ return ctrl;
++ return sensor->controls[ctrl].set(sensor, vc->value);
++}
++
++static int et8ek8_set_power(struct v4l2_subdev *subdev, int on)
++{
++ struct et8ek8_sensor *sensor = to_et8ek8_sensor(subdev);
++ int rval = 0;
++
++ /* If we are already in this mode, do nothing */
++ if (sensor->power == on)
++ return 0;
++
++ /* Disable power if so requested (it was enabled) */
++ if (!on) {
++ rval = et8ek8_power_off(subdev);
++ goto out;
++ }
++
++ /* Either STANDBY or ON requested */
++
++ /* Enable power and move to standby if it was off */
++ if (on) {
++ rval = et8ek8_power_on(subdev);
++ if (rval)
++ goto out;
++ }
++
++ /* Now sensor is powered (standby or streaming) */
++
++ if (on) {
++ /* Standby -> streaming */
++ rval = et8ek8_configure(subdev);
++ if (rval) {
++ et8ek8_power_off(subdev);
++ goto out;
++ }
++ }
++
++out:
++ if (rval == 0)
++ sensor->power = on;
++
++ return rval;
++}
++
++static const struct v4l2_subdev_video_ops et8ek8_video_ops = {
++ .s_stream = et8ek8_s_stream,
++ .g_frame_interval = et8ek8_get_frame_interval,
++ .s_frame_interval = et8ek8_set_frame_interval,
++};
++
++static const struct v4l2_subdev_core_ops et8ek8_core_ops = {
++ .g_chip_ident = et8ek8_get_chip_ident,
++ .s_config = et8ek8_set_config,
++ .queryctrl = et8ek8_query_ctrl,
++ .querymenu = et8ek8_query_menu,
++ .g_ctrl = et8ek8_get_ctrl,
++ .s_ctrl = et8ek8_set_ctrl,
++ .s_power = et8ek8_set_power,
++};
++
++static const struct v4l2_subdev_pad_ops et8ek8_pad_ops = {
++ .enum_mbus_code = et8ek8_enum_mbus_code,
++ .enum_frame_size = et8ek8_enum_frame_size,
++ .enum_frame_interval = et8ek8_enum_frame_ival,
++ .get_fmt = et8ek8_get_pad_format,
++ .set_fmt = et8ek8_set_pad_format,
++};
++
++static const struct v4l2_subdev_ops et8ek8_ops = {
++ .core = &et8ek8_core_ops,
++ .video = &et8ek8_video_ops,
++ .pad = &et8ek8_pad_ops,
++};
++
++/* --------------------------------------------------------------------------
++ * I2C driver
++ */
++#ifdef CONFIG_PM
++
++static int et8ek8_suspend(struct i2c_client *client, pm_message_t mesg)
++{
++ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
++ struct et8ek8_sensor *sensor = to_et8ek8_sensor(subdev);
++
++ if (!sensor->power)
++ return 0;
++
++ return et8ek8_set_power(subdev, 0);
++}
++
++static int et8ek8_resume(struct i2c_client *client)
++{
++ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
++ struct et8ek8_sensor *sensor = to_et8ek8_sensor(subdev);
++
++ if (!sensor->power)
++ return 0;
++
++ sensor->power = 0;
++ return et8ek8_set_power(subdev, 1);
++}
++
++#else
++
++#define et8ek8_suspend NULL
++#define et8ek8_resume NULL
++
++#endif /* CONFIG_PM */
++
++static const struct media_entity_operations et8ek8_entity_ops = {
++ .set_power = v4l2_subdev_set_power,
++};
++
++static int et8ek8_probe(struct i2c_client *client,
++ const struct i2c_device_id *devid)
++{
++ struct et8ek8_sensor *sensor;
++ int ret;
++
++ sensor = kzalloc(sizeof(*sensor), GFP_KERNEL);
++ if (sensor == NULL)
++ return -ENOMEM;
++
++ v4l2_i2c_subdev_init(&sensor->subdev, client, &et8ek8_ops);
++ sensor->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
++
++ sensor->pad.type = MEDIA_PAD_TYPE_OUTPUT;
++ sensor->subdev.entity.ops = &et8ek8_entity_ops;
++ ret = media_entity_init(&sensor->subdev.entity, 1, &sensor->pad, 0);
++ if (ret < 0)
++ kfree(sensor);
++
++ return ret;
++}
++
++static int __exit et8ek8_remove(struct i2c_client *client)
++{
++ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
++ struct et8ek8_sensor *sensor = to_et8ek8_sensor(subdev);
++
++ v4l2_device_unregister_subdev(&sensor->subdev);
++ device_remove_file(&client->dev, &dev_attr_priv_mem);
++ media_entity_cleanup(&sensor->subdev.entity);
++ release_firmware(sensor->fw);
++ kfree(sensor);
++ return 0;
++}
++
++static const struct i2c_device_id et8ek8_id_table[] = {
++ { ET8EK8_NAME, 0 },
++ { }
++};
++MODULE_DEVICE_TABLE(i2c, et8ek8_id_table);
++
++static struct i2c_driver et8ek8_i2c_driver = {
++ .driver = {
++ .name = ET8EK8_NAME,
++ },
++ .probe = et8ek8_probe,
++ .remove = __exit_p(et8ek8_remove),
++ .suspend = et8ek8_suspend,
++ .resume = et8ek8_resume,
++ .id_table = et8ek8_id_table,
++};
++
++static int __init et8ek8_init(void)
++{
++ int rval;
++
++ rval = i2c_add_driver(&et8ek8_i2c_driver);
++ if (rval)
++ printk(KERN_INFO "%s: failed registering " ET8EK8_NAME "\n",
++ __func__);
++
++ return rval;
++}
++
++static void __exit et8ek8_exit(void)
++{
++ i2c_del_driver(&et8ek8_i2c_driver);
++}
++
++module_init(et8ek8_init);
++module_exit(et8ek8_exit);
++
++MODULE_AUTHOR("Sakari Ailus <sakari.ailus@nokia.com>");
++MODULE_DESCRIPTION("Toshiba ET8EK8 camera sensor driver");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/media/video/et8ek8.h b/drivers/media/video/et8ek8.h
+new file mode 100644
+index 0000000..11f12db
+--- /dev/null
++++ b/drivers/media/video/et8ek8.h
+@@ -0,0 +1,79 @@
++/*
++ * drivers/media/video/et8ek8.h
++ *
++ * Copyright (C) 2008 Nokia Corporation
++ *
++ * Contact: Sakari Ailus <sakari.ailus@nokia.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ *
++ */
++
++#ifndef ET8EK8_H
++#define ET8EK8_H
++
++#include <linux/i2c.h>
++#include <media/smiaregs.h>
++#include <media/v4l2-subdev.h>
++#include <media/media-entity.h>
++
++#define ET8EK8_NAME "et8ek8"
++#define ET8EK8_I2C_ADDR (0x7C >> 1)
++
++#define ET8EK8_PRIV_MEM_SIZE 128
++#define ET8EK8_NCTRLS 3
++
++struct et8ek8_platform_data {
++ int (*g_priv)(struct v4l2_subdev *subdev, void *priv);
++ int (*configure_interface)(struct v4l2_subdev *subdev,
++ struct smia_mode *mode);
++ int (*set_xclk)(struct v4l2_subdev *subdev, int hz);
++ int (*s_power)(struct v4l2_subdev *subdev, int on);
++};
++
++struct et8ek8_sensor;
++
++/* Current values for V4L2 controls */
++struct et8ek8_control {
++ s32 minimum;
++ s32 maximum;
++ s32 step;
++ s32 default_value;
++ s32 value;
++ int (*set)(struct et8ek8_sensor *sensor, s32 value);
++};
++
++#define to_et8ek8_sensor(sd) container_of(sd, struct et8ek8_sensor, subdev)
++
++struct et8ek8_sensor {
++ struct v4l2_subdev subdev;
++ struct media_entity_pad pad;
++ struct v4l2_mbus_framefmt format;
++ struct et8ek8_platform_data *platform_data;
++
++ u16 version;
++
++ struct et8ek8_control controls[ET8EK8_NCTRLS];
++
++ struct smia_reglist *current_reglist;
++
++ const struct firmware *fw;
++ struct smia_meta_reglist *meta_reglist;
++ u8 priv_mem[ET8EK8_PRIV_MEM_SIZE];
++
++ int power : 1;
++};
++
++#endif /* ET8EK8_H */
+diff --git a/drivers/media/video/isp/Makefile b/drivers/media/video/isp/Makefile
+new file mode 100644
+index 0000000..b1b3447
+--- /dev/null
++++ b/drivers/media/video/isp/Makefile
+@@ -0,0 +1,13 @@
++# Makefile for OMAP3 ISP driver
++
++ifdef CONFIG_VIDEO_OMAP3_DEBUG
++EXTRA_CFLAGS += -DDEBUG
++endif
++
++omap3-isp-objs += \
++ isp.o ispqueue.o ispvideo.o \
++ ispcsiphy.o ispccp2.o ispcsi2.o \
++ ispccdc.o isppreview.o ispresizer.o \
++ ispstat.o isph3a_aewb.o isph3a_af.o isphist.o
++
++obj-$(CONFIG_VIDEO_OMAP3) += omap3-isp.o
+diff --git a/drivers/media/video/isp/bluegamma_table.h b/drivers/media/video/isp/bluegamma_table.h
+new file mode 100644
+index 0000000..301382a
+--- /dev/null
++++ b/drivers/media/video/isp/bluegamma_table.h
+@@ -0,0 +1,1040 @@
++/*
++ * bluegamma_table.h
++ *
++ * Gamma Table values for BLUE for TI's OMAP3 Camera ISP
++ *
++ * Copyright (C) 2009 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++0,
++0,
++1,
++2,
++3,
++3,
++4,
++5,
++6,
++8,
++10,
++12,
++14,
++16,
++18,
++20,
++22,
++23,
++25,
++26,
++28,
++29,
++31,
++32,
++34,
++35,
++36,
++37,
++39,
++40,
++41,
++42,
++43,
++44,
++45,
++46,
++47,
++48,
++49,
++50,
++51,
++52,
++52,
++53,
++54,
++55,
++56,
++57,
++58,
++59,
++60,
++61,
++62,
++63,
++63,
++64,
++65,
++66,
++66,
++67,
++68,
++69,
++69,
++70,
++71,
++72,
++72,
++73,
++74,
++75,
++75,
++76,
++77,
++78,
++78,
++79,
++80,
++81,
++81,
++82,
++83,
++84,
++84,
++85,
++86,
++87,
++88,
++88,
++89,
++90,
++91,
++91,
++92,
++93,
++94,
++94,
++95,
++96,
++97,
++97,
++98,
++98,
++99,
++99,
++100,
++100,
++101,
++101,
++102,
++103,
++104,
++104,
++105,
++106,
++107,
++108,
++108,
++109,
++110,
++111,
++111,
++112,
++113,
++114,
++114,
++115,
++116,
++117,
++117,
++118,
++119,
++119,
++120,
++120,
++121,
++121,
++122,
++122,
++123,
++123,
++124,
++124,
++125,
++125,
++126,
++126,
++127,
++127,
++128,
++128,
++129,
++129,
++130,
++130,
++131,
++131,
++132,
++132,
++133,
++133,
++134,
++134,
++135,
++135,
++136,
++136,
++137,
++137,
++138,
++138,
++139,
++139,
++140,
++140,
++141,
++141,
++142,
++142,
++143,
++143,
++144,
++144,
++145,
++145,
++146,
++146,
++147,
++147,
++148,
++148,
++149,
++149,
++150,
++150,
++151,
++151,
++152,
++152,
++153,
++153,
++153,
++153,
++154,
++154,
++154,
++154,
++155,
++155,
++156,
++156,
++157,
++157,
++158,
++158,
++158,
++159,
++159,
++159,
++160,
++160,
++160,
++161,
++161,
++162,
++162,
++163,
++163,
++164,
++164,
++164,
++164,
++165,
++165,
++165,
++165,
++166,
++166,
++167,
++167,
++168,
++168,
++169,
++169,
++170,
++170,
++170,
++170,
++171,
++171,
++171,
++171,
++172,
++172,
++173,
++173,
++174,
++174,
++175,
++175,
++176,
++176,
++176,
++176,
++177,
++177,
++177,
++177,
++178,
++178,
++178,
++178,
++179,
++179,
++179,
++179,
++180,
++180,
++180,
++180,
++181,
++181,
++181,
++181,
++182,
++182,
++182,
++182,
++183,
++183,
++183,
++183,
++184,
++184,
++184,
++184,
++185,
++185,
++185,
++185,
++186,
++186,
++186,
++186,
++187,
++187,
++187,
++187,
++188,
++188,
++188,
++188,
++189,
++189,
++189,
++189,
++190,
++190,
++190,
++190,
++191,
++191,
++191,
++191,
++192,
++192,
++192,
++192,
++193,
++193,
++193,
++193,
++194,
++194,
++194,
++194,
++195,
++195,
++195,
++195,
++196,
++196,
++196,
++196,
++197,
++197,
++197,
++197,
++198,
++198,
++198,
++198,
++199,
++199,
++199,
++199,
++200,
++200,
++200,
++200,
++201,
++201,
++201,
++201,
++202,
++202,
++202,
++203,
++203,
++203,
++203,
++204,
++204,
++204,
++204,
++205,
++205,
++205,
++205,
++206,
++206,
++206,
++206,
++207,
++207,
++207,
++207,
++208,
++208,
++208,
++208,
++209,
++209,
++209,
++209,
++210,
++210,
++210,
++210,
++210,
++210,
++210,
++210,
++210,
++210,
++210,
++210,
++211,
++211,
++211,
++211,
++211,
++211,
++211,
++211,
++211,
++211,
++211,
++212,
++212,
++212,
++212,
++213,
++213,
++213,
++213,
++213,
++213,
++213,
++213,
++213,
++213,
++213,
++213,
++214,
++214,
++214,
++214,
++215,
++215,
++215,
++215,
++215,
++215,
++215,
++215,
++215,
++215,
++215,
++216,
++216,
++216,
++216,
++217,
++217,
++217,
++217,
++218,
++218,
++218,
++218,
++219,
++219,
++219,
++219,
++219,
++219,
++219,
++219,
++219,
++219,
++219,
++219,
++220,
++220,
++220,
++220,
++221,
++221,
++221,
++221,
++221,
++221,
++221,
++221,
++221,
++221,
++221,
++222,
++222,
++222,
++222,
++223,
++223,
++223,
++223,
++223,
++223,
++223,
++223,
++223,
++223,
++223,
++223,
++224,
++224,
++224,
++224,
++225,
++225,
++225,
++225,
++225,
++225,
++225,
++225,
++225,
++225,
++225,
++225,
++225,
++225,
++225,
++225,
++225,
++225,
++225,
++226,
++226,
++226,
++226,
++227,
++227,
++227,
++227,
++227,
++227,
++227,
++227,
++227,
++227,
++227,
++227,
++228,
++228,
++228,
++229,
++229,
++229,
++229,
++229,
++229,
++229,
++229,
++229,
++229,
++229,
++229,
++230,
++230,
++230,
++230,
++231,
++231,
++231,
++231,
++231,
++231,
++231,
++231,
++231,
++231,
++231,
++231,
++232,
++232,
++232,
++232,
++232,
++232,
++232,
++232,
++232,
++232,
++232,
++232,
++232,
++232,
++232,
++232,
++232,
++232,
++232,
++233,
++233,
++233,
++233,
++234,
++234,
++234,
++234,
++234,
++234,
++234,
++234,
++234,
++234,
++234,
++235,
++235,
++235,
++235,
++236,
++236,
++236,
++236,
++236,
++236,
++236,
++236,
++236,
++236,
++236,
++236,
++236,
++236,
++236,
++236,
++236,
++236,
++236,
++237,
++237,
++237,
++237,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++239,
++239,
++239,
++239,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++241,
++241,
++241,
++241,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++243,
++243,
++243,
++243,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++245,
++245,
++245,
++245,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++247,
++247,
++247,
++247,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++249,
++249,
++249,
++249,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++251,
++251,
++251,
++251,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++254,
++254,
++254,
++254,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255
+diff --git a/drivers/media/video/isp/cfa_coef_table.h b/drivers/media/video/isp/cfa_coef_table.h
+new file mode 100644
+index 0000000..8cafa1f
+--- /dev/null
++++ b/drivers/media/video/isp/cfa_coef_table.h
+@@ -0,0 +1,603 @@
++/*
++ * cfa_coef_table.h
++ *
++ * Copyright (C) 2009 Nokia Corporation
++ *
++ * Contact: Sakari Ailus <sakari.ailus@nokia.com>
++ * Tuukka Toivonen <tuukka.o.toivonen@nokia.com>
++ *
++ * Written by Gjorgji Rosikopulos
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ *
++ */
++
++244,
++0,
++247,
++0,
++12,
++27,
++36,
++247,
++250,
++0,
++27,
++0,
++4,
++250,
++12,
++244,
++248,
++0,
++0,
++0,
++0,
++40,
++0,
++0,
++244,
++12,
++250,
++4,
++0,
++27,
++0,
++250,
++247,
++36,
++27,
++12,
++0,
++247,
++0,
++244,
++0,
++0,
++40,
++0,
++0,
++0,
++0,
++248,
++244,
++0,
++247,
++0,
++12,
++27,
++36,
++247,
++250,
++0,
++27,
++0,
++4,
++250,
++12,
++244,
++248,
++0,
++0,
++0,
++0,
++40,
++0,
++0,
++244,
++12,
++250,
++4,
++0,
++27,
++0,
++250,
++247,
++36,
++27,
++12,
++0,
++247,
++0,
++244,
++0,
++0,
++40,
++0,
++0,
++0,
++0,
++248,
++244,
++0,
++247,
++0,
++12,
++27,
++36,
++247,
++250,
++0,
++27,
++0,
++4,
++250,
++12,
++244,
++248,
++0,
++0,
++0,
++0,
++40,
++0,
++0,
++244,
++12,
++250,
++4,
++0,
++27,
++0,
++250,
++247,
++36,
++27,
++12,
++0,
++247,
++0,
++244,
++0,
++0,
++40,
++0,
++0,
++0,
++0,
++248,
++0,
++247,
++0,
++244,
++247,
++36,
++27,
++12,
++0,
++27,
++0,
++250,
++244,
++12,
++250,
++4,
++0,
++0,
++0,
++248,
++0,
++0,
++40,
++0,
++4,
++250,
++12,
++244,
++250,
++0,
++27,
++0,
++12,
++27,
++36,
++247,
++244,
++0,
++247,
++0,
++0,
++40,
++0,
++0,
++248,
++0,
++0,
++0,
++0,
++247,
++0,
++244,
++247,
++36,
++27,
++12,
++0,
++27,
++0,
++250,
++244,
++12,
++250,
++4,
++0,
++0,
++0,
++248,
++0,
++0,
++40,
++0,
++4,
++250,
++12,
++244,
++250,
++0,
++27,
++0,
++12,
++27,
++36,
++247,
++244,
++0,
++247,
++0,
++0,
++40,
++0,
++0,
++248,
++0,
++0,
++0,
++0,
++247,
++0,
++244,
++247,
++36,
++27,
++12,
++0,
++27,
++0,
++250,
++244,
++12,
++250,
++4,
++0,
++0,
++0,
++248,
++0,
++0,
++40,
++0,
++4,
++250,
++12,
++244,
++250,
++0,
++27,
++0,
++12,
++27,
++36,
++247,
++244,
++0,
++247,
++0,
++0,
++40,
++0,
++0,
++248,
++0,
++0,
++0,
++4,
++250,
++12,
++244,
++250,
++0,
++27,
++0,
++12,
++27,
++36,
++247,
++244,
++0,
++247,
++0,
++0,
++0,
++0,
++248,
++0,
++0,
++40,
++0,
++0,
++247,
++0,
++244,
++247,
++36,
++27,
++12,
++0,
++27,
++0,
++250,
++244,
++12,
++250,
++4,
++0,
++40,
++0,
++0,
++248,
++0,
++0,
++0,
++4,
++250,
++12,
++244,
++250,
++0,
++27,
++0,
++12,
++27,
++36,
++247,
++244,
++0,
++247,
++0,
++0,
++0,
++0,
++248,
++0,
++0,
++40,
++0,
++0,
++247,
++0,
++244,
++247,
++36,
++27,
++12,
++0,
++27,
++0,
++250,
++244,
++12,
++250,
++4,
++0,
++40,
++0,
++0,
++248,
++0,
++0,
++0,
++4,
++250,
++12,
++244,
++250,
++0,
++27,
++0,
++12,
++27,
++36,
++247,
++244,
++0,
++247,
++0,
++0,
++0,
++0,
++248,
++0,
++0,
++40,
++0,
++0,
++247,
++0,
++244,
++247,
++36,
++27,
++12,
++0,
++27,
++0,
++250,
++244,
++12,
++250,
++4,
++0,
++40,
++0,
++0,
++248,
++0,
++0,
++0,
++244,
++12,
++250,
++4,
++0,
++27,
++0,
++250,
++247,
++36,
++27,
++12,
++0,
++247,
++0,
++244,
++248,
++0,
++0,
++0,
++0,
++40,
++0,
++0,
++244,
++0,
++247,
++0,
++12,
++27,
++36,
++247,
++250,
++0,
++27,
++0,
++4,
++250,
++12,
++244,
++0,
++0,
++40,
++0,
++0,
++0,
++0,
++248,
++244,
++12,
++250,
++4,
++0,
++27,
++0,
++250,
++247,
++36,
++27,
++12,
++0,
++247,
++0,
++244,
++248,
++0,
++0,
++0,
++0,
++40,
++0,
++0,
++244,
++0,
++247,
++0,
++12,
++27,
++36,
++247,
++250,
++0,
++27,
++0,
++4,
++250,
++12,
++244,
++0,
++0,
++40,
++0,
++0,
++0,
++0,
++248,
++244,
++12,
++250,
++4,
++0,
++27,
++0,
++250,
++247,
++36,
++27,
++12,
++0,
++247,
++0,
++244,
++248,
++0,
++0,
++0,
++0,
++40,
++0,
++0,
++244,
++0,
++247,
++0,
++12,
++27,
++36,
++247,
++250,
++0,
++27,
++0,
++4,
++250,
++12,
++244,
++0,
++0,
++40,
++0,
++0,
++0,
++0,
++248
++
+diff --git a/drivers/media/video/isp/greengamma_table.h b/drivers/media/video/isp/greengamma_table.h
+new file mode 100644
+index 0000000..0f5c5e4
+--- /dev/null
++++ b/drivers/media/video/isp/greengamma_table.h
+@@ -0,0 +1,1040 @@
++/*
++ * greengamma_table.h
++ *
++ * Gamma Table values for GREEN for TI's OMAP3 Camera ISP
++ *
++ * Copyright (C) 2009 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++0,
++0,
++1,
++2,
++3,
++3,
++4,
++5,
++6,
++8,
++10,
++12,
++14,
++16,
++18,
++20,
++22,
++23,
++25,
++26,
++28,
++29,
++31,
++32,
++34,
++35,
++36,
++37,
++39,
++40,
++41,
++42,
++43,
++44,
++45,
++46,
++47,
++48,
++49,
++50,
++51,
++52,
++52,
++53,
++54,
++55,
++56,
++57,
++58,
++59,
++60,
++61,
++62,
++63,
++63,
++64,
++65,
++66,
++66,
++67,
++68,
++69,
++69,
++70,
++71,
++72,
++72,
++73,
++74,
++75,
++75,
++76,
++77,
++78,
++78,
++79,
++80,
++81,
++81,
++82,
++83,
++84,
++84,
++85,
++86,
++87,
++88,
++88,
++89,
++90,
++91,
++91,
++92,
++93,
++94,
++94,
++95,
++96,
++97,
++97,
++98,
++98,
++99,
++99,
++100,
++100,
++101,
++101,
++102,
++103,
++104,
++104,
++105,
++106,
++107,
++108,
++108,
++109,
++110,
++111,
++111,
++112,
++113,
++114,
++114,
++115,
++116,
++117,
++117,
++118,
++119,
++119,
++120,
++120,
++121,
++121,
++122,
++122,
++123,
++123,
++124,
++124,
++125,
++125,
++126,
++126,
++127,
++127,
++128,
++128,
++129,
++129,
++130,
++130,
++131,
++131,
++132,
++132,
++133,
++133,
++134,
++134,
++135,
++135,
++136,
++136,
++137,
++137,
++138,
++138,
++139,
++139,
++140,
++140,
++141,
++141,
++142,
++142,
++143,
++143,
++144,
++144,
++145,
++145,
++146,
++146,
++147,
++147,
++148,
++148,
++149,
++149,
++150,
++150,
++151,
++151,
++152,
++152,
++153,
++153,
++153,
++153,
++154,
++154,
++154,
++154,
++155,
++155,
++156,
++156,
++157,
++157,
++158,
++158,
++158,
++159,
++159,
++159,
++160,
++160,
++160,
++161,
++161,
++162,
++162,
++163,
++163,
++164,
++164,
++164,
++164,
++165,
++165,
++165,
++165,
++166,
++166,
++167,
++167,
++168,
++168,
++169,
++169,
++170,
++170,
++170,
++170,
++171,
++171,
++171,
++171,
++172,
++172,
++173,
++173,
++174,
++174,
++175,
++175,
++176,
++176,
++176,
++176,
++177,
++177,
++177,
++177,
++178,
++178,
++178,
++178,
++179,
++179,
++179,
++179,
++180,
++180,
++180,
++180,
++181,
++181,
++181,
++181,
++182,
++182,
++182,
++182,
++183,
++183,
++183,
++183,
++184,
++184,
++184,
++184,
++185,
++185,
++185,
++185,
++186,
++186,
++186,
++186,
++187,
++187,
++187,
++187,
++188,
++188,
++188,
++188,
++189,
++189,
++189,
++189,
++190,
++190,
++190,
++190,
++191,
++191,
++191,
++191,
++192,
++192,
++192,
++192,
++193,
++193,
++193,
++193,
++194,
++194,
++194,
++194,
++195,
++195,
++195,
++195,
++196,
++196,
++196,
++196,
++197,
++197,
++197,
++197,
++198,
++198,
++198,
++198,
++199,
++199,
++199,
++199,
++200,
++200,
++200,
++200,
++201,
++201,
++201,
++201,
++202,
++202,
++202,
++203,
++203,
++203,
++203,
++204,
++204,
++204,
++204,
++205,
++205,
++205,
++205,
++206,
++206,
++206,
++206,
++207,
++207,
++207,
++207,
++208,
++208,
++208,
++208,
++209,
++209,
++209,
++209,
++210,
++210,
++210,
++210,
++210,
++210,
++210,
++210,
++210,
++210,
++210,
++210,
++211,
++211,
++211,
++211,
++211,
++211,
++211,
++211,
++211,
++211,
++211,
++212,
++212,
++212,
++212,
++213,
++213,
++213,
++213,
++213,
++213,
++213,
++213,
++213,
++213,
++213,
++213,
++214,
++214,
++214,
++214,
++215,
++215,
++215,
++215,
++215,
++215,
++215,
++215,
++215,
++215,
++215,
++216,
++216,
++216,
++216,
++217,
++217,
++217,
++217,
++218,
++218,
++218,
++218,
++219,
++219,
++219,
++219,
++219,
++219,
++219,
++219,
++219,
++219,
++219,
++219,
++220,
++220,
++220,
++220,
++221,
++221,
++221,
++221,
++221,
++221,
++221,
++221,
++221,
++221,
++221,
++222,
++222,
++222,
++222,
++223,
++223,
++223,
++223,
++223,
++223,
++223,
++223,
++223,
++223,
++223,
++223,
++224,
++224,
++224,
++224,
++225,
++225,
++225,
++225,
++225,
++225,
++225,
++225,
++225,
++225,
++225,
++225,
++225,
++225,
++225,
++225,
++225,
++225,
++225,
++226,
++226,
++226,
++226,
++227,
++227,
++227,
++227,
++227,
++227,
++227,
++227,
++227,
++227,
++227,
++227,
++228,
++228,
++228,
++229,
++229,
++229,
++229,
++229,
++229,
++229,
++229,
++229,
++229,
++229,
++229,
++230,
++230,
++230,
++230,
++231,
++231,
++231,
++231,
++231,
++231,
++231,
++231,
++231,
++231,
++231,
++231,
++232,
++232,
++232,
++232,
++232,
++232,
++232,
++232,
++232,
++232,
++232,
++232,
++232,
++232,
++232,
++232,
++232,
++232,
++232,
++233,
++233,
++233,
++233,
++234,
++234,
++234,
++234,
++234,
++234,
++234,
++234,
++234,
++234,
++234,
++235,
++235,
++235,
++235,
++236,
++236,
++236,
++236,
++236,
++236,
++236,
++236,
++236,
++236,
++236,
++236,
++236,
++236,
++236,
++236,
++236,
++236,
++236,
++237,
++237,
++237,
++237,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++239,
++239,
++239,
++239,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++241,
++241,
++241,
++241,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++243,
++243,
++243,
++243,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++245,
++245,
++245,
++245,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++247,
++247,
++247,
++247,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++249,
++249,
++249,
++249,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++251,
++251,
++251,
++251,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++254,
++254,
++254,
++254,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255
+diff --git a/drivers/media/video/isp/isp.c b/drivers/media/video/isp/isp.c
+new file mode 100644
+index 0000000..4dc11e2
+--- /dev/null
++++ b/drivers/media/video/isp/isp.c
+@@ -0,0 +1,1840 @@
++/*
++ * isp.c
++ *
++ * Driver Library for ISP Control module in TI's OMAP3 Camera ISP
++ * ISP interface and IRQ related APIs are defined here.
++ *
++ * Copyright (C) 2007--2009 Texas Instruments
++ * Copyright (C) 2006--2010 Nokia Corporation
++ *
++ * Contributors:
++ * Sameer Venkatraman <sameerv@ti.com>
++ * Mohit Jalori <mjalori@ti.com>
++ * Sergio Aguirre <saaguirre@ti.com>
++ * Sakari Ailus <sakari.ailus@nokia.com>
++ * Tuukka Toivonen <tuukka.o.toivonen@nokia.com>
++ * Toni Leinonen <toni.leinonen@nokia.com>
++ * David Cohen <david.cohen@nokia.com>
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#include <asm/cacheflush.h>
++
++#include <linux/clk.h>
++#include <linux/delay.h>
++#include <linux/device.h>
++#include <linux/dma-mapping.h>
++#include <linux/i2c.h>
++#include <linux/interrupt.h>
++#include <linux/platform_device.h>
++#include <linux/regulator/consumer.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++
++#include <media/v4l2-common.h>
++#include <media/v4l2-device.h>
++
++#include "isp.h"
++#include "ispreg.h"
++#include "ispccdc.h"
++#include "isppreview.h"
++#include "ispresizer.h"
++#include "ispcsi2.h"
++#include "ispccp2.h"
++#include "isph3a.h"
++#include "isphist.h"
++
++static void isp_save_ctx(struct isp_device *isp);
++
++static void isp_restore_ctx(struct isp_device *isp);
++
++const static struct isp_res_mapping isp_res_maps[] = {
++ {
++ .isp_rev = ISP_REVISION_2_0,
++ .map = 1 << OMAP3_ISP_IOMEM_MAIN |
++ 1 << OMAP3_ISP_IOMEM_CBUFF |
++ 1 << OMAP3_ISP_IOMEM_CCP2 |
++ 1 << OMAP3_ISP_IOMEM_CCDC |
++ 1 << OMAP3_ISP_IOMEM_HIST |
++ 1 << OMAP3_ISP_IOMEM_H3A |
++ 1 << OMAP3_ISP_IOMEM_PREV |
++ 1 << OMAP3_ISP_IOMEM_RESZ |
++ 1 << OMAP3_ISP_IOMEM_SBL |
++ 1 << OMAP3_ISP_IOMEM_CSI2A_REGS1 |
++ 1 << OMAP3_ISP_IOMEM_CSIPHY2,
++ },
++ {
++ .isp_rev = ISP_REVISION_15_0,
++ .map = 1 << OMAP3_ISP_IOMEM_MAIN |
++ 1 << OMAP3_ISP_IOMEM_CBUFF |
++ 1 << OMAP3_ISP_IOMEM_CCP2 |
++ 1 << OMAP3_ISP_IOMEM_CCDC |
++ 1 << OMAP3_ISP_IOMEM_HIST |
++ 1 << OMAP3_ISP_IOMEM_H3A |
++ 1 << OMAP3_ISP_IOMEM_PREV |
++ 1 << OMAP3_ISP_IOMEM_RESZ |
++ 1 << OMAP3_ISP_IOMEM_SBL |
++ 1 << OMAP3_ISP_IOMEM_CSI2A_REGS1 |
++ 1 << OMAP3_ISP_IOMEM_CSIPHY2 |
++ 1 << OMAP3_ISP_IOMEM_CSI2A_REGS2 |
++ 1 << OMAP3_ISP_IOMEM_CSI2C_REGS1 |
++ 1 << OMAP3_ISP_IOMEM_CSIPHY1 |
++ 1 << OMAP3_ISP_IOMEM_CSI2C_REGS2,
++ },
++};
++
++/* Structure for saving/restoring ISP module registers */
++static struct isp_reg isp_reg_list[] = {
++ {OMAP3_ISP_IOMEM_MAIN, ISP_SYSCONFIG, 0},
++ {OMAP3_ISP_IOMEM_MAIN, ISP_TCTRL_GRESET_LENGTH, 0},
++ {OMAP3_ISP_IOMEM_MAIN, ISP_TCTRL_PSTRB_REPLAY, 0},
++ {OMAP3_ISP_IOMEM_MAIN, ISP_CTRL, 0},
++ {OMAP3_ISP_IOMEM_MAIN, ISP_TCTRL_CTRL, 0},
++ {OMAP3_ISP_IOMEM_MAIN, ISP_TCTRL_FRAME, 0},
++ {OMAP3_ISP_IOMEM_MAIN, ISP_TCTRL_PSTRB_DELAY, 0},
++ {OMAP3_ISP_IOMEM_MAIN, ISP_TCTRL_STRB_DELAY, 0},
++ {OMAP3_ISP_IOMEM_MAIN, ISP_TCTRL_SHUT_DELAY, 0},
++ {OMAP3_ISP_IOMEM_MAIN, ISP_TCTRL_PSTRB_LENGTH, 0},
++ {OMAP3_ISP_IOMEM_MAIN, ISP_TCTRL_STRB_LENGTH, 0},
++ {OMAP3_ISP_IOMEM_MAIN, ISP_TCTRL_SHUT_LENGTH, 0},
++ {OMAP3_ISP_IOMEM_CBUFF, ISP_CBUFF_SYSCONFIG, 0},
++ {OMAP3_ISP_IOMEM_CBUFF, ISP_CBUFF_IRQENABLE, 0},
++ {OMAP3_ISP_IOMEM_CBUFF, ISP_CBUFF0_CTRL, 0},
++ {OMAP3_ISP_IOMEM_CBUFF, ISP_CBUFF1_CTRL, 0},
++ {OMAP3_ISP_IOMEM_CBUFF, ISP_CBUFF0_START, 0},
++ {OMAP3_ISP_IOMEM_CBUFF, ISP_CBUFF1_START, 0},
++ {OMAP3_ISP_IOMEM_CBUFF, ISP_CBUFF0_END, 0},
++ {OMAP3_ISP_IOMEM_CBUFF, ISP_CBUFF1_END, 0},
++ {OMAP3_ISP_IOMEM_CBUFF, ISP_CBUFF0_WINDOWSIZE, 0},
++ {OMAP3_ISP_IOMEM_CBUFF, ISP_CBUFF1_WINDOWSIZE, 0},
++ {OMAP3_ISP_IOMEM_CBUFF, ISP_CBUFF0_THRESHOLD, 0},
++ {OMAP3_ISP_IOMEM_CBUFF, ISP_CBUFF1_THRESHOLD, 0},
++ {0, ISP_TOK_TERM, 0}
++};
++
++/*
++ * isp_flush - Post pending L3 bus writes by doing a register readback
++ * @dev: Device pointer specific to the OMAP3 ISP.
++ *
++ * In order to force posting of pending writes, we need to write and
++ * readback the same register, in this case the revision register.
++ *
++ * See this link for reference:
++ * http://www.mail-archive.com/linux-omap@vger.kernel.org/msg08149.html
++ */
++void isp_flush(struct isp_device *isp)
++{
++ isp_reg_writel(isp, 0, OMAP3_ISP_IOMEM_MAIN, ISP_REVISION);
++ isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_REVISION);
++}
++
++/*
++ * isp_enable_interrupts - Enable ISP interrupts.
++ * @dev: Device pointer specific to the OMAP3 ISP.
++ */
++static void isp_enable_interrupts(struct isp_device *isp)
++{
++ static const u32 irq = IRQ0ENABLE_CSIA_IRQ
++ | IRQ0ENABLE_CSIB_IRQ
++ | IRQ0ENABLE_CCDC_LSC_PREF_ERR_IRQ
++ | IRQ0ENABLE_CCDC_LSC_DONE_IRQ
++ | IRQ0ENABLE_CCDC_VD0_IRQ
++ | IRQ0ENABLE_CCDC_VD1_IRQ
++ | IRQ0ENABLE_HS_VS_IRQ
++ | IRQ0ENABLE_HIST_DONE_IRQ
++ | IRQ0ENABLE_H3A_AWB_DONE_IRQ
++ | IRQ0ENABLE_H3A_AF_DONE_IRQ
++ | IRQ0ENABLE_PRV_DONE_IRQ
++ | IRQ0ENABLE_RSZ_DONE_IRQ;
++
++ isp_reg_writel(isp, irq, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS);
++ isp_reg_writel(isp, irq, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0ENABLE);
++}
++
++/*
++ * isp_disable_interrupts - Disable ISP interrupts.
++ * @dev: Device pointer specific to the OMAP3 ISP.
++ */
++static void isp_disable_interrupts(struct isp_device *isp)
++{
++ isp_reg_writel(isp, 0, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0ENABLE);
++}
++
++/**
++ * isp_set_xclk - Configures the specified cam_xclk to the desired frequency.
++ * @dev: Device pointer specific to the OMAP3 ISP.
++ * @xclk: Desired frequency of the clock in Hz. 0 = stable low, 1 is stable high
++ * @xclksel: XCLK to configure (0 = A, 1 = B).
++ *
++ * Configures the specified MCLK divisor in the ISP timing control register
++ * (TCTRL_CTRL) to generate the desired xclk clock value.
++ *
++ * Divisor = cam_mclk_hz / xclk
++ *
++ * Returns the final frequency that is actually being generated
++ **/
++u32 isp_set_xclk(struct isp_device *isp, u32 xclk, u8 xclksel)
++{
++ u32 divisor;
++ u32 currentxclk;
++ unsigned long mclk_hz;
++
++ if (!isp_get(isp))
++ return 0;
++
++ mclk_hz = clk_get_rate(isp->clock[ISP_CLK_CAM_MCLK]);
++
++ if (xclk >= mclk_hz) {
++ divisor = ISPTCTRL_CTRL_DIV_BYPASS;
++ currentxclk = mclk_hz;
++ } else if (xclk >= 2) {
++ divisor = mclk_hz / xclk;
++ if (divisor >= ISPTCTRL_CTRL_DIV_BYPASS)
++ divisor = ISPTCTRL_CTRL_DIV_BYPASS - 1;
++ currentxclk = mclk_hz / divisor;
++ } else {
++ divisor = xclk;
++ currentxclk = 0;
++ }
++
++ switch (xclksel) {
++ case 0:
++ isp_reg_and_or(isp, OMAP3_ISP_IOMEM_MAIN, ISP_TCTRL_CTRL,
++ ~ISPTCTRL_CTRL_DIVA_MASK,
++ divisor << ISPTCTRL_CTRL_DIVA_SHIFT);
++ dev_dbg(isp->dev, "isp_set_xclk(): cam_xclka set to %d Hz\n",
++ currentxclk);
++ break;
++ case 1:
++ isp_reg_and_or(isp, OMAP3_ISP_IOMEM_MAIN, ISP_TCTRL_CTRL,
++ ~ISPTCTRL_CTRL_DIVB_MASK,
++ divisor << ISPTCTRL_CTRL_DIVB_SHIFT);
++ dev_dbg(isp->dev, "isp_set_xclk(): cam_xclkb set to %d Hz\n",
++ currentxclk);
++ break;
++ default:
++ isp_put(isp);
++ dev_dbg(isp->dev, "ISP_ERR: isp_set_xclk(): Invalid requested "
++ "xclk. Must be 0 (A) or 1 (B).\n");
++ return -EINVAL;
++ }
++
++ /* Do we go from stable whatever to clock? */
++ if (divisor >= 2 && isp->xclk_divisor[xclksel] < 2)
++ isp_get(isp);
++ /* Stopping the clock. */
++ else if (divisor < 2 && isp->xclk_divisor[xclksel] >= 2)
++ isp_put(isp);
++
++ isp->xclk_divisor[xclksel] = divisor;
++
++ isp_put(isp);
++
++ return currentxclk;
++}
++EXPORT_SYMBOL(isp_set_xclk);
++
++/*
++ * isp_power_settings - Sysconfig settings, for Power Management.
++ * @dev: Device pointer specific to the OMAP3 ISP.
++ * @idle: Consider idle state.
++ *
++ * Sets the power settings for the ISP, and SBL bus.
++ */
++static void isp_power_settings(struct isp_device *isp, int idle)
++{
++ if (idle) {
++ isp_reg_writel(isp,
++ (ISP_SYSCONFIG_MIDLEMODE_SMARTSTANDBY <<
++ ISP_SYSCONFIG_MIDLEMODE_SHIFT),
++ OMAP3_ISP_IOMEM_MAIN, ISP_SYSCONFIG);
++ if (omap_rev() == OMAP3430_REV_ES1_0) {
++ isp_reg_writel(isp, ISPCSI1_AUTOIDLE |
++ (ISPCSI1_MIDLEMODE_SMARTSTANDBY <<
++ ISPCSI1_MIDLEMODE_SHIFT),
++ OMAP3_ISP_IOMEM_CSI2A_REGS1,
++ ISP_CSIA_SYSCONFIG);
++ isp_reg_writel(isp, ISPCSI1_AUTOIDLE |
++ (ISPCSI1_MIDLEMODE_SMARTSTANDBY <<
++ ISPCSI1_MIDLEMODE_SHIFT),
++ OMAP3_ISP_IOMEM_CCP2,
++ ISP_CSIB_SYSCONFIG);
++ }
++ isp_reg_writel(isp, ISPCTRL_SBL_AUTOIDLE, OMAP3_ISP_IOMEM_MAIN,
++ ISP_CTRL);
++
++ } else {
++ isp_reg_writel(isp,
++ (ISP_SYSCONFIG_MIDLEMODE_FORCESTANDBY <<
++ ISP_SYSCONFIG_MIDLEMODE_SHIFT),
++ OMAP3_ISP_IOMEM_MAIN, ISP_SYSCONFIG);
++ if (omap_rev() == OMAP3430_REV_ES1_0) {
++ isp_reg_writel(isp, ISPCSI1_AUTOIDLE |
++ (ISPCSI1_MIDLEMODE_FORCESTANDBY <<
++ ISPCSI1_MIDLEMODE_SHIFT),
++ OMAP3_ISP_IOMEM_CSI2A_REGS1,
++ ISP_CSIA_SYSCONFIG);
++
++ isp_reg_writel(isp, ISPCSI1_AUTOIDLE |
++ (ISPCSI1_MIDLEMODE_FORCESTANDBY <<
++ ISPCSI1_MIDLEMODE_SHIFT),
++ OMAP3_ISP_IOMEM_CCP2,
++ ISP_CSIB_SYSCONFIG);
++ }
++
++ isp_reg_writel(isp, ISPCTRL_SBL_AUTOIDLE, OMAP3_ISP_IOMEM_MAIN,
++ ISP_CTRL);
++ }
++}
++
++/*
++ * Configure the bridge and lane shifter. Valid inputs are
++ *
++ * CCDC_INPUT_PARALLEL: Parallel interface
++ * CCDC_INPUT_CSI2A: CSI2a receiver
++ * CCDC_INPUT_CCP2B: CCP2b receiver
++ * CCDC_INPUT_CSI2C: CSI2c receiver
++ *
++ * The bridge and lane shifter are configured according to the selected input
++ * and the ISP platform data.
++ */
++void isp_configure_bridge(struct isp_device *isp, enum ccdc_input_entity input,
++ const struct isp_parallel_platform_data *pdata)
++{
++ u32 ispctrl_val;
++
++ ispctrl_val = isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL);
++ ispctrl_val &= ISPCTRL_SHIFT_MASK;
++ ispctrl_val &= ~ISPCTRL_PAR_CLK_POL_INV;
++ ispctrl_val &= ISPCTRL_PAR_SER_CLK_SEL_MASK;
++ ispctrl_val &= ~ISPCTRL_PAR_BRIDGE_MASK;
++
++ switch (input) {
++ case CCDC_INPUT_PARALLEL:
++ ispctrl_val |= ISPCTRL_PAR_SER_CLK_SEL_PARALLEL;
++ ispctrl_val |= pdata->data_lane_shift << ISPCTRL_SHIFT_SHIFT;
++ ispctrl_val |= pdata->clk_pol << ISPCTRL_PAR_CLK_POL_SHIFT;
++ ispctrl_val |= pdata->bridge << ISPCTRL_PAR_BRIDGE_SHIFT;
++ break;
++
++ case CCDC_INPUT_CSI2A:
++ ispctrl_val |= ISPCTRL_PAR_SER_CLK_SEL_CSIA;
++ break;
++
++ case CCDC_INPUT_CCP2B:
++ ispctrl_val |= ISPCTRL_PAR_SER_CLK_SEL_CSIB;
++ break;
++
++ case CCDC_INPUT_CSI2C:
++ ispctrl_val |= ISPCTRL_PAR_SER_CLK_SEL_CSIC;
++ break;
++
++ default:
++ return;
++ }
++
++ ispctrl_val &= ~ISPCTRL_SYNC_DETECT_MASK;
++ ispctrl_val |= ISPCTRL_SYNC_DETECT_VSRISE;
++
++ isp_reg_writel(isp, ispctrl_val, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL);
++}
++
++/**
++ * isp_set_pixel_clock - Configures the ISP pixel clock
++ * @isp: OMAP3 ISP device
++ * @pixelclk: Average pixel clock in Hz
++ *
++ * Set the average pixel clock required by the sensor. The ISP will use the
++ * lowest possible memory bandwidth settings compatible with the clock.
++ **/
++void isp_set_pixel_clock(struct isp_device *isp, unsigned int pixelclk)
++{
++ isp->isp_ccdc.vpcfg.pixelclk = pixelclk;
++}
++EXPORT_SYMBOL(isp_set_pixel_clock);
++
++void isphist_dma_done(struct isp_device *isp)
++{
++ if (ispccdc_busy(&isp->isp_ccdc) || ispstat_busy(&isp->isp_hist)) {
++ /* Histogram cannot be enabled in this frame anymore */
++ atomic_set(&isp->isp_hist.buf_err, 1);
++ dev_dbg(isp->dev, "hist: Out of synchronization with "
++ "CCDC. Ignoring next buffer.\n");
++ }
++}
++
++static inline void isp_isr_dbg(struct isp_device *isp, u32 irqstatus)
++{
++ static const char *name[] = {
++ "CSIA_IRQ",
++ "res1",
++ "res2",
++ "CSIB_LCM_IRQ",
++ "CSIB_IRQ",
++ "res5",
++ "res6",
++ "res7",
++ "CCDC_VD0_IRQ",
++ "CCDC_VD1_IRQ",
++ "CCDC_VD2_IRQ",
++ "CCDC_ERR_IRQ",
++ "H3A_AF_DONE_IRQ",
++ "H3A_AWB_DONE_IRQ",
++ "res14",
++ "res15",
++ "HIST_DONE_IRQ",
++ "CCDC_LSC_DONE",
++ "CCDC_LSC_PREFETCH_COMPLETED",
++ "CCDC_LSC_PREFETCH_ERROR",
++ "PRV_DONE_IRQ",
++ "CBUFF_IRQ",
++ "res22",
++ "res23",
++ "RSZ_DONE_IRQ",
++ "OVF_IRQ",
++ "res26",
++ "res27",
++ "MMU_ERR_IRQ",
++ "OCP_ERR_IRQ",
++ "SEC_ERR_IRQ",
++ "HS_VS_IRQ",
++ };
++ int i;
++
++ dev_dbg(isp->dev, "");
++
++ for (i = 0; i < ARRAY_SIZE(name); i++) {
++ if ((1 << i) & irqstatus)
++ printk(KERN_CONT "%s ", name[i]);
++ }
++ printk(KERN_CONT "\n");
++}
++
++static void isp_isr_sbl(struct isp_device *isp)
++{
++ struct device *dev = isp->dev;
++ u32 sbl_pcr;
++
++ /*
++ * Handle shared buffer logic overflows for video buffers.
++ * ISPSBL_PCR_CCDCPRV_2_RSZ_OVF can be safely ignored.
++ */
++ sbl_pcr = isp_reg_readl(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_PCR);
++ isp_reg_writel(isp, sbl_pcr, OMAP3_ISP_IOMEM_SBL, ISPSBL_PCR);
++ sbl_pcr &= ~ISPSBL_PCR_CCDCPRV_2_RSZ_OVF;
++
++ if (sbl_pcr)
++ dev_dbg(dev, "SBL overflow (PCR = 0x%08x)\n", sbl_pcr);
++
++ if (sbl_pcr & (ISPSBL_PCR_CCDC_WBL_OVF | ISPSBL_PCR_CSIA_WBL_OVF
++ | ISPSBL_PCR_CSIB_WBL_OVF)) {
++ isp->isp_ccdc.error = 1;
++ if (isp->isp_ccdc.output & CCDC_OUTPUT_PREVIEW)
++ isp->isp_prev.error = 1;
++ if (isp->isp_ccdc.output & CCDC_OUTPUT_RESIZER)
++ isp->isp_res.error = 1;
++ }
++
++ if (sbl_pcr & ISPSBL_PCR_PRV_WBL_OVF) {
++ isp->isp_prev.error = 1;
++ if (isp->isp_res.input == RESIZER_INPUT_VP &&
++ !(isp->isp_ccdc.output & CCDC_OUTPUT_RESIZER))
++ isp->isp_res.error = 1;
++ }
++
++ if (sbl_pcr & (ISPSBL_PCR_RSZ1_WBL_OVF
++ | ISPSBL_PCR_RSZ2_WBL_OVF
++ | ISPSBL_PCR_RSZ3_WBL_OVF
++ | ISPSBL_PCR_RSZ4_WBL_OVF))
++ isp->isp_res.error = 1;
++
++ if (sbl_pcr & ISPSBL_PCR_H3A_AF_WBL_OVF)
++ ispstat_sbl_overflow(&isp->isp_af);
++
++ if (sbl_pcr & ISPSBL_PCR_H3A_AEAWB_WBL_OVF)
++ ispstat_sbl_overflow(&isp->isp_aewb);
++}
++
++/*
++ * isp_isr - Interrupt Service Routine for Camera ISP module.
++ * @irq: Not used currently.
++ * @_pdev: Pointer to the platform device associated with the OMAP3 ISP.
++ *
++ * Handles the corresponding callback if plugged in.
++ *
++ * Returns IRQ_HANDLED when IRQ was correctly handled, or IRQ_NONE when the
++ * IRQ wasn't handled.
++ */
++static irqreturn_t isp_isr(int irq, void *_isp)
++{
++ static const u32 ccdc_events = IRQ0STATUS_CCDC_LSC_PREF_ERR_IRQ |
++ IRQ0STATUS_CCDC_LSC_DONE_IRQ |
++ IRQ0STATUS_CCDC_VD0_IRQ |
++ IRQ0STATUS_CCDC_VD1_IRQ |
++ IRQ0STATUS_HS_VS_IRQ;
++ struct isp_device *isp = _isp;
++ u32 irqstatus;
++ int ret;
++
++ irqstatus = isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS);
++ isp_reg_writel(isp, irqstatus, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS);
++
++ isp_isr_sbl(isp);
++
++ if (irqstatus & IRQ0STATUS_CSIA_IRQ) {
++ ret = isp_csi2_isr(&isp->isp_csi2a);
++ if (ret)
++ isp->isp_ccdc.error = 1;
++ }
++
++ if (irqstatus & IRQ0STATUS_CSIB_IRQ) {
++ ret = ispccp2_isr(isp);
++ if (ret)
++ isp->isp_ccdc.error = 1;
++ }
++
++ if (irqstatus & IRQ0STATUS_CCDC_VD0_IRQ) {
++ if (isp->isp_ccdc.output & CCDC_OUTPUT_PREVIEW)
++ isppreview_isr_frame_sync(&isp->isp_prev);
++ if (isp->isp_ccdc.output & CCDC_OUTPUT_RESIZER)
++ ispresizer_isr_frame_sync(&isp->isp_res);
++ ispstat_isr_frame_sync(&isp->isp_aewb);
++ ispstat_isr_frame_sync(&isp->isp_af);
++ ispstat_isr_frame_sync(&isp->isp_hist);
++ }
++
++ if (irqstatus & ccdc_events)
++ ispccdc_isr(&isp->isp_ccdc, irqstatus & ccdc_events);
++
++ if (irqstatus & IRQ0STATUS_PRV_DONE_IRQ) {
++ if (isp->isp_prev.output & PREVIEW_OUTPUT_RESIZER)
++ ispresizer_isr_frame_sync(&isp->isp_res);
++ isppreview_isr(&isp->isp_prev);
++ }
++
++ if (irqstatus & IRQ0STATUS_RSZ_DONE_IRQ)
++ ispresizer_isr(&isp->isp_res);
++
++ if (irqstatus & IRQ0STATUS_H3A_AWB_DONE_IRQ)
++ ispstat_isr(&isp->isp_aewb);
++
++ if (irqstatus & IRQ0STATUS_H3A_AF_DONE_IRQ)
++ ispstat_isr(&isp->isp_af);
++
++ if (irqstatus & IRQ0STATUS_HIST_DONE_IRQ)
++ ispstat_isr(&isp->isp_hist);
++
++ isp_flush(isp);
++
++#if defined(DEBUG) && defined(ISP_ISR_DEBUG)
++ isp_isr_dbg(isp, irqstatus);
++#endif
++
++ return IRQ_HANDLED;
++}
++
++/* Device name, needed for resource tracking layer */
++static struct device_driver camera_drv = {
++ .name = "camera"
++};
++
++static struct device camera_dev = {
++ .driver = &camera_drv,
++};
++
++/* -----------------------------------------------------------------------------
++ * Pipeline management
++ */
++
++/*
++ * isp_pipeline_enable - Enable streaming on a pipeline
++ * @pipe: ISP pipeline
++ * @mode: Stream mode (single shot or continuous)
++ *
++ * Walk the entities chain starting at the pipeline output video node and start
++ * all modules in the chain in the given mode.
++ *
++ * Return 0 if successfull, or the return value of the failed video::s_stream
++ * operation otherwise.
++ */
++static int isp_pipeline_enable(struct isp_pipeline *pipe,
++ enum isp_pipeline_stream_state mode)
++{
++ struct isp_device *isp = pipe->output->isp;
++ struct media_entity_pad *pad;
++ struct media_entity *entity;
++ struct v4l2_subdev *subdev;
++ unsigned long flags;
++ int ret = 0;
++
++ spin_lock_irqsave(&pipe->lock, flags);
++ pipe->state &= ~(ISP_PIPELINE_IDLE_INPUT | ISP_PIPELINE_IDLE_OUTPUT);
++ spin_unlock_irqrestore(&pipe->lock, flags);
++
++ entity = &pipe->output->video.entity;
++ while (1) {
++ pad = &entity->pads[0];
++ if (pad->type != MEDIA_PAD_TYPE_INPUT)
++ break;
++
++ pad = media_entity_remote_pad(pad);
++ if (pad == NULL ||
++ pad->entity->type != MEDIA_ENTITY_TYPE_SUBDEV)
++ break;
++
++ entity = pad->entity;
++ subdev = media_entity_to_v4l2_subdev(entity);
++
++ ret = v4l2_subdev_call(subdev, video, s_stream, mode);
++ if (ret < 0 && ret != -ENOIOCTLCMD)
++ break;
++
++ if (subdev == &isp->isp_ccdc.subdev) {
++ v4l2_subdev_call(&isp->isp_aewb.subdev, video,
++ s_stream, mode);
++ v4l2_subdev_call(&isp->isp_af.subdev, video,
++ s_stream, mode);
++ v4l2_subdev_call(&isp->isp_hist.subdev, video,
++ s_stream, mode);
++ }
++ }
++
++ return ret;
++}
++
++static int isp_pipeline_wait_resizer(struct isp_device *isp)
++{
++ return ispresizer_busy(&isp->isp_res);
++}
++
++static int isp_pipeline_wait_preview(struct isp_device *isp)
++{
++ return isppreview_busy(&isp->isp_prev);
++}
++
++static int isp_pipeline_wait_ccdc(struct isp_device *isp)
++{
++ return ispstat_busy(&isp->isp_af)
++ || ispstat_busy(&isp->isp_aewb)
++ || ispstat_busy(&isp->isp_hist)
++ || ispccdc_busy(&isp->isp_ccdc);
++}
++
++#define ISP_STOP_TIMEOUT msecs_to_jiffies(1000)
++
++static int isp_pipeline_wait(struct isp_device *isp,
++ int(*busy)(struct isp_device *isp))
++{
++ unsigned long timeout = jiffies + ISP_STOP_TIMEOUT;
++
++ while (!time_after(jiffies, timeout)) {
++ if (!busy(isp))
++ return 0;
++ }
++
++ return 1;
++}
++
++/*
++ * isp_pipeline_disable - Disable streaming on a pipeline
++ * @pipe: ISP pipeline
++ *
++ * Walk the entities chain starting at the pipeline output video node and stop
++ * all modules in the chain. Wait synchronously for the modules to be stopped if
++ * necessary.
++ *
++ * Return 0 if all modules have been properly stopped, or -ETIMEDOUT if a module
++ * can't be stopped (in which case a software reset of the ISP is probably
++ * necessary).
++ */
++static int isp_pipeline_disable(struct isp_pipeline *pipe)
++{
++ struct isp_device *isp = pipe->output->isp;
++ struct media_entity_pad *pad;
++ struct media_entity *entity;
++ struct v4l2_subdev *subdev;
++ int failure = 0;
++ int ret;
++
++ /*
++ * We need to stop all the modules after CCDC first or they'll
++ * never stop since they may not get a full frame from CCDC.
++ */
++ entity = &pipe->output->video.entity;
++ while (1) {
++ pad = &entity->pads[0];
++ if (pad->type != MEDIA_PAD_TYPE_INPUT)
++ break;
++
++ pad = media_entity_remote_pad(pad);
++ if (pad == NULL ||
++ pad->entity->type != MEDIA_ENTITY_TYPE_SUBDEV)
++ break;
++
++ entity = pad->entity;
++ subdev = media_entity_to_v4l2_subdev(entity);
++
++ v4l2_subdev_call(subdev, video, s_stream, 0);
++
++ if (subdev == &isp->isp_res.subdev) {
++ ret = isp_pipeline_wait(isp, isp_pipeline_wait_resizer);
++ } else if (subdev == &isp->isp_prev.subdev) {
++ ret = isp_pipeline_wait(isp, isp_pipeline_wait_preview);
++ } else if (subdev == &isp->isp_ccdc.subdev) {
++ v4l2_subdev_call(&isp->isp_aewb.subdev,
++ video, s_stream, 0);
++ v4l2_subdev_call(&isp->isp_af.subdev,
++ video, s_stream, 0);
++ v4l2_subdev_call(&isp->isp_hist.subdev,
++ video, s_stream, 0);
++ ret = isp_pipeline_wait(isp, isp_pipeline_wait_ccdc);
++ } else {
++ ret = 0;
++ }
++
++ if (ret) {
++ dev_info(isp->dev, "Unable to stop %s\n", subdev->name);
++ failure = -ETIMEDOUT;
++ }
++ }
++
++ return failure;
++}
++
++/*
++ * isp_pipeline_set_stream - Enable/disable streaming on a pipeline
++ * @pipe: ISP pipeline
++ * @state: Stream state (stopped, single shot or continuous)
++ *
++ * Set the pipeline to the given stream state. Pipelines can be started in
++ * single-shot or continuous mode.
++ *
++ * Return 0 if successfull, or the return value of the failed video::s_stream
++ * operation otherwise.
++ */
++int isp_pipeline_set_stream(struct isp_pipeline *pipe,
++ enum isp_pipeline_stream_state state)
++{
++ if (state == ISP_PIPELINE_STREAM_STOPPED)
++ return isp_pipeline_disable(pipe);
++ else
++ return isp_pipeline_enable(pipe, state);
++}
++
++/*
++ * __isp_disable_modules - Disable ISP submodules with a timeout to be idle.
++ * @dev: Device pointer specific to the OMAP3 ISP.
++ * @suspend: If 0, disable modules; if 1, send modules to suspend state.
++ *
++ * Returns 0 if stop/suspend left in idle state all the submodules properly,
++ * or returns 1 if a general Reset is required to stop/suspend the submodules.
++ */
++static int __isp_disable_modules(struct isp_device *isp, int suspend)
++{
++ unsigned long timeout;
++ int reset = 0;
++
++ /*
++ * We need to stop all the modules after CCDC first or they'll
++ * never stop since they may not get a full frame from CCDC.
++ */
++ if (suspend) {
++ ispstat_suspend(&isp->isp_af);
++ ispstat_suspend(&isp->isp_aewb);
++ ispstat_suspend(&isp->isp_hist);
++ } else {
++ v4l2_subdev_call(&isp->isp_aewb.subdev, video, s_stream, 0);
++ v4l2_subdev_call(&isp->isp_af.subdev, video, s_stream, 0);
++ v4l2_subdev_call(&isp->isp_hist.subdev, video, s_stream, 0);
++ }
++
++ v4l2_subdev_call(&isp->isp_res.subdev, video, s_stream, 0);
++ v4l2_subdev_call(&isp->isp_prev.subdev, video, s_stream, 0);
++
++ timeout = jiffies + ISP_STOP_TIMEOUT;
++ while (ispstat_busy(&isp->isp_af)
++ || ispstat_busy(&isp->isp_aewb)
++ || ispstat_busy(&isp->isp_hist)
++ || isppreview_busy(&isp->isp_prev)
++ || ispresizer_busy(&isp->isp_res)) {
++ if (time_after(jiffies, timeout)) {
++ dev_info(isp->dev, "can't stop non-ccdc modules.\n");
++ reset = 1;
++ break;
++ }
++ msleep(1);
++ }
++
++ /* Let's stop CCDC now. */
++ v4l2_subdev_call(&isp->isp_ccdc.subdev, video, s_stream, 0);
++
++ timeout = jiffies + ISP_STOP_TIMEOUT;
++ while (ispccdc_busy(&isp->isp_ccdc)) {
++ if (time_after(jiffies, timeout)) {
++ dev_info(isp->dev, "can't stop ccdc module.\n");
++ reset = 1;
++ break;
++ }
++ msleep(1);
++ }
++
++ v4l2_subdev_call(&isp->isp_csi2a.subdev, video, s_stream, 0);
++ v4l2_subdev_call(&isp->isp_ccp2.subdev, video, s_stream, 0);
++
++ return reset;
++}
++
++/*
++ * isp_suspend_modules - Suspend ISP submodules.
++ * @dev: Device pointer specific to the OMAP3 ISP.
++ *
++ * Returns 0 if suspend left in idle state all the submodules properly,
++ * or returns 1 if a general Reset is required to suspend the submodules.
++ */
++static int isp_suspend_modules(struct isp_device *isp)
++{
++ return __isp_disable_modules(isp, 1);
++}
++
++/*
++ * isp_resume_modules - Resume ISP submodules.
++ * @dev: Device pointer specific to the OMAP3 ISP.
++ */
++static void isp_resume_modules(struct isp_device *isp)
++{
++ ispstat_resume(&isp->isp_hist);
++ ispstat_resume(&isp->isp_aewb);
++ ispstat_resume(&isp->isp_af);
++}
++
++/*
++ * isp_reset - Reset ISP with a timeout wait for idle.
++ * @dev: Device pointer specific to the OMAP3 ISP.
++ */
++static int isp_reset(struct isp_device *isp)
++{
++ unsigned long timeout = 0;
++
++ isp_reg_writel(isp,
++ isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_SYSCONFIG)
++ | ISP_SYSCONFIG_SOFTRESET,
++ OMAP3_ISP_IOMEM_MAIN, ISP_SYSCONFIG);
++ while (!(isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN,
++ ISP_SYSSTATUS) & 0x1)) {
++ if (timeout++ > 10000) {
++ dev_alert(isp->dev, "cannot reset ISP\n");
++ return -ETIMEDOUT;
++ }
++ udelay(1);
++ }
++
++ return 0;
++}
++
++/*
++ * isp_save_ctx - Saves ISP, CCDC, HIST, H3A, PREV, RESZ & MMU context.
++ * @dev: Device pointer specific to the OMAP3 ISP.
++ *
++ * Routine for saving the context of each module in the ISP.
++ * CCDC, HIST, H3A, PREV, RESZ and MMU.
++ */
++static void isp_save_ctx(struct isp_device *isp)
++{
++ isp_save_context(isp, isp_reg_list);
++ ispccp2_save_context(isp);
++ ispccdc_save_context(isp);
++ if (isp->iommu)
++ iommu_save_ctx(isp->iommu);
++ isppreview_save_context(isp);
++ ispresizer_save_context(isp);
++}
++
++/*
++ * isp_restore_ctx - Restores ISP, CCDC, HIST, H3A, PREV, RESZ & MMU context.
++ * @dev: Device pointer specific to the OMAP3 ISP.
++ *
++ * Routine for restoring the context of each module in the ISP.
++ * CCDC, HIST, H3A, PREV, RESZ and MMU.
++ */
++static void isp_restore_ctx(struct isp_device *isp)
++{
++ isp_restore_context(isp, isp_reg_list);
++ ispccp2_restore_context(isp);
++ ispccdc_restore_context(isp);
++ if (isp->iommu)
++ iommu_restore_ctx(isp->iommu);
++ isppreview_restore_context(isp);
++ ispresizer_restore_context(isp);
++}
++
++/* -----------------------------------------------------------------------------
++ * SBL resources management
++ */
++#define OMAP3_ISP_SBL_READ (OMAP3_ISP_SBL_CSI1_READ | \
++ OMAP3_ISP_SBL_CCDC_LSC_READ | \
++ OMAP3_ISP_SBL_PREVIEW_READ | \
++ OMAP3_ISP_SBL_RESIZER_READ)
++#define OMAP3_ISP_SBL_WRITE (OMAP3_ISP_SBL_CSI1_WRITE | \
++ OMAP3_ISP_SBL_CSI2A_WRITE | \
++ OMAP3_ISP_SBL_CSI2C_WRITE | \
++ OMAP3_ISP_SBL_CCDC_WRITE | \
++ OMAP3_ISP_SBL_PREVIEW_WRITE)
++
++void isp_sbl_enable(struct isp_device *isp, enum isp_sbl_resource res)
++{
++ u32 sbl = 0;
++
++ isp->sbl_resources |= res;
++
++ if (isp->sbl_resources & OMAP3_ISP_SBL_CSI1_READ)
++ sbl |= ISPCTRL_SBL_SHARED_RPORTA;
++
++ if (isp->sbl_resources & OMAP3_ISP_SBL_CCDC_LSC_READ)
++ sbl |= ISPCTRL_SBL_SHARED_RPORTB;
++
++ if (isp->sbl_resources & OMAP3_ISP_SBL_CSI2C_WRITE)
++ sbl |= ISPCTRL_SBL_SHARED_WPORTC;
++
++ if (isp->sbl_resources & OMAP3_ISP_SBL_RESIZER_WRITE)
++ sbl |= ISPCTRL_SBL_WR0_RAM_EN;
++
++ if (isp->sbl_resources & OMAP3_ISP_SBL_WRITE)
++ sbl |= ISPCTRL_SBL_WR1_RAM_EN;
++
++ if (isp->sbl_resources & OMAP3_ISP_SBL_READ)
++ sbl |= ISPCTRL_SBL_RD_RAM_EN;
++
++ isp_reg_or(isp, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL, sbl);
++}
++
++void isp_sbl_disable(struct isp_device *isp, enum isp_sbl_resource res)
++{
++ u32 sbl = 0;
++
++ isp->sbl_resources &= ~res;
++
++ if (!(isp->sbl_resources & OMAP3_ISP_SBL_CSI1_READ))
++ sbl |= ISPCTRL_SBL_SHARED_RPORTA;
++
++ if (!(isp->sbl_resources & OMAP3_ISP_SBL_CCDC_LSC_READ))
++ sbl |= ISPCTRL_SBL_SHARED_RPORTB;
++
++ if (!(isp->sbl_resources & OMAP3_ISP_SBL_CSI2C_WRITE))
++ sbl |= ISPCTRL_SBL_SHARED_WPORTC;
++
++ if (!(isp->sbl_resources & OMAP3_ISP_SBL_RESIZER_WRITE))
++ sbl |= ISPCTRL_SBL_WR0_RAM_EN;
++
++ if (!(isp->sbl_resources & OMAP3_ISP_SBL_WRITE))
++ sbl |= ISPCTRL_SBL_WR1_RAM_EN;
++
++ if (!(isp->sbl_resources & OMAP3_ISP_SBL_READ))
++ sbl |= ISPCTRL_SBL_RD_RAM_EN;
++
++ isp_reg_and(isp, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL, ~sbl);
++}
++
++/* --------------------------------------------------------------------------
++ * Clock management
++ */
++
++/*
++ * isp_enable_clocks - Enable ISP clocks
++ * @dev: Device pointer specific to the OMAP3 ISP.
++ *
++ * Return 0 if successful, or clk_enable return value if any of tthem fails.
++ */
++static int isp_enable_clocks(struct isp_device *isp)
++{
++ int r;
++ unsigned long rate;
++ int divisor;
++
++ /*
++ * cam_mclk clock chain:
++ * dpll4 -> dpll4_m5 -> dpll4_m5x2 -> cam_mclk
++ *
++ * In OMAP3630 dpll4_m5x2 != 2 x dpll4_m5 but both are
++ * set to the same value. Hence the rate set for dpll4_m5
++ * has to be twice of what is set on OMAP3430 to get
++ * the required value for cam_mclk
++ */
++ if (cpu_is_omap3630())
++ divisor = 1;
++ else
++ divisor = 2;
++
++ r = clk_enable(isp->clock[ISP_CLK_CAM_ICK]);
++ if (r) {
++ dev_err(isp->dev, "clk_enable cam_ick failed\n");
++ goto out_clk_enable_ick;
++ }
++ r = clk_set_rate(isp->clock[ISP_CLK_DPLL4_M5_CK],
++ CM_CAM_MCLK_HZ/divisor);
++ if (r) {
++ dev_err(isp->dev, "clk_set_rate for dpll4_m5_ck failed\n");
++ goto out_clk_enable_mclk;
++ }
++ r = clk_enable(isp->clock[ISP_CLK_CAM_MCLK]);
++ if (r) {
++ dev_err(isp->dev, "clk_enable cam_mclk failed\n");
++ goto out_clk_enable_mclk;
++ }
++ rate = clk_get_rate(isp->clock[ISP_CLK_CAM_MCLK]);
++ if (rate != CM_CAM_MCLK_HZ)
++ dev_warn(isp->dev, "unexpected cam_mclk rate:\n"
++ " expected : %d\n"
++ " actual : %ld\n", CM_CAM_MCLK_HZ, rate);
++ r = clk_enable(isp->clock[ISP_CLK_CSI2_FCK]);
++ if (r) {
++ dev_err(isp->dev, "clk_enable csi2_fck failed\n");
++ goto out_clk_enable_csi2_fclk;
++ }
++ return 0;
++
++out_clk_enable_csi2_fclk:
++ clk_disable(isp->clock[ISP_CLK_CAM_MCLK]);
++out_clk_enable_mclk:
++ clk_disable(isp->clock[ISP_CLK_CAM_ICK]);
++out_clk_enable_ick:
++ return r;
++}
++
++/*
++ * isp_disable_clocks - Disable ISP clocks
++ * @dev: Device pointer specific to the OMAP3 ISP.
++ */
++static void isp_disable_clocks(struct isp_device *isp)
++{
++ clk_disable(isp->clock[ISP_CLK_CAM_ICK]);
++ clk_disable(isp->clock[ISP_CLK_CAM_MCLK]);
++ clk_disable(isp->clock[ISP_CLK_CSI2_FCK]);
++}
++
++static const char *isp_clocks[] = {
++ "cam_ick",
++ "cam_mclk",
++ "dpll4_m5_ck",
++ "csi2_96m_fck",
++ "l3_ick",
++};
++
++static void isp_put_clocks(struct isp_device *isp)
++{
++ unsigned int i;
++
++ for (i = 0; i < ARRAY_SIZE(isp_clocks); ++i) {
++ if (isp->clock[i]) {
++ clk_put(isp->clock[i]);
++ isp->clock[i] = NULL;
++ }
++ }
++}
++
++static int isp_get_clocks(struct isp_device *isp)
++{
++ struct clk *clk;
++ unsigned int i;
++
++ for (i = 0; i < ARRAY_SIZE(isp_clocks); ++i) {
++ clk = clk_get(&camera_dev, isp_clocks[i]);
++ if (IS_ERR(clk)) {
++ dev_err(isp->dev, "clk_get %s failed\n", isp_clocks[i]);
++ isp_put_clocks(isp);
++ return PTR_ERR(clk);
++ }
++
++ isp->clock[i] = clk;
++ }
++
++ return 0;
++}
++
++/*
++ * isp_get - Acquire the ISP resource.
++ *
++ * Initializes the clocks for the first acquire.
++ *
++ * Increment the reference count on the ISP. If the first reference is taken,
++ * enable clocks and power-up all submodules.
++ *
++ * Return a pointer to the ISP device structure, or NULL if an error occured.
++ */
++struct isp_device *isp_get(struct isp_device *isp)
++{
++ struct isp_device *__isp = NULL;
++
++ if (isp == NULL)
++ return NULL;
++
++ mutex_lock(&isp->isp_mutex);
++ if (++isp->ref_count != 1) {
++ __isp = isp;
++ goto out;
++ }
++
++ if (isp_enable_clocks(isp) < 0)
++ goto out;
++
++ /* We don't want to restore context before saving it! */
++ if (isp->has_context)
++ isp_restore_ctx(isp);
++ else
++ isp->has_context = 1;
++
++ isp_enable_interrupts(isp);
++ __isp = isp;
++
++out:
++ mutex_unlock(&isp->isp_mutex);
++
++ return __isp;
++}
++
++/*
++ * isp_put - Release the ISP
++ *
++ * Decrement the reference count on the ISP. If the last reference is released,
++ * power-down all submodules, disable clocks and free temporary buffers.
++ */
++void isp_put(struct isp_device *isp)
++{
++ if (isp == NULL)
++ return;
++
++ mutex_lock(&isp->isp_mutex);
++ if (--isp->ref_count == 0) {
++ isp_disable_interrupts(isp);
++ isp_save_ctx(isp);
++ isp_disable_clocks(isp);
++ }
++ mutex_unlock(&isp->isp_mutex);
++}
++
++/*
++ * isp_save_context - Saves the values of the ISP module registers.
++ * @dev: Device pointer specific to the OMAP3 ISP.
++ * @reg_list: Structure containing pairs of register address and value to
++ * modify on OMAP.
++ */
++void isp_save_context(struct isp_device *isp, struct isp_reg *reg_list)
++{
++ struct isp_reg *next = reg_list;
++
++ for (; next->reg != ISP_TOK_TERM; next++)
++ next->val = isp_reg_readl(isp, next->mmio_range, next->reg);
++}
++
++/*
++ * isp_restore_context - Restores the values of the ISP module registers.
++ * @dev: Device pointer specific to the OMAP3 ISP.
++ * @reg_list: Structure containing pairs of register address and value to
++ * modify on OMAP.
++ */
++void isp_restore_context(struct isp_device *isp, struct isp_reg *reg_list)
++{
++ struct isp_reg *next = reg_list;
++
++ for (; next->reg != ISP_TOK_TERM; next++)
++ isp_reg_writel(isp, next->val, next->mmio_range, next->reg);
++}
++
++/* --------------------------------------------------------------------------
++ * Platform device driver
++ */
++
++/*
++ * isp_print_status - Prints the values of the ISP Control Module registers
++ * @dev: Device pointer specific to the OMAP3 ISP.
++ */
++#define ISP_PRINT_REGISTER(isp, name)\
++ dev_dbg(isp->dev, "###ISP " #name "=0x%08x\n", \
++ isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_##name))
++#define SBL_PRINT_REGISTER(isp, name)\
++ dev_dbg(isp->dev, "###SBL " #name "=0x%08x\n", \
++ isp_reg_readl(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_##name))
++
++void isp_print_status(struct isp_device *isp)
++{
++ dev_dbg(isp->dev, "-------------ISP Register dump--------------\n");
++
++ ISP_PRINT_REGISTER(isp, SYSCONFIG);
++ ISP_PRINT_REGISTER(isp, SYSSTATUS);
++ ISP_PRINT_REGISTER(isp, IRQ0ENABLE);
++ ISP_PRINT_REGISTER(isp, IRQ0STATUS);
++ ISP_PRINT_REGISTER(isp, TCTRL_GRESET_LENGTH);
++ ISP_PRINT_REGISTER(isp, TCTRL_PSTRB_REPLAY);
++ ISP_PRINT_REGISTER(isp, CTRL);
++ ISP_PRINT_REGISTER(isp, TCTRL_CTRL);
++ ISP_PRINT_REGISTER(isp, TCTRL_FRAME);
++ ISP_PRINT_REGISTER(isp, TCTRL_PSTRB_DELAY);
++ ISP_PRINT_REGISTER(isp, TCTRL_STRB_DELAY);
++ ISP_PRINT_REGISTER(isp, TCTRL_SHUT_DELAY);
++ ISP_PRINT_REGISTER(isp, TCTRL_PSTRB_LENGTH);
++ ISP_PRINT_REGISTER(isp, TCTRL_STRB_LENGTH);
++ ISP_PRINT_REGISTER(isp, TCTRL_SHUT_LENGTH);
++
++ SBL_PRINT_REGISTER(isp, PCR);
++ SBL_PRINT_REGISTER(isp, SDR_REQ_EXP);
++
++ dev_dbg(isp->dev, "--------------------------------------------\n");
++}
++
++#ifdef CONFIG_PM
++
++/*
++ * isp_suspend - Suspend routine for the ISP
++ * @dev: Pointer to ISP device
++ *
++ * Always returns 0.
++ */
++static int isp_suspend(struct device *dev)
++{
++ struct isp_device *isp = dev_get_drvdata(dev);
++ int reset;
++
++ WARN_ON(mutex_is_locked(&isp->isp_mutex));
++
++ if (isp->ref_count == 0)
++ goto out;
++
++ isp_disable_interrupts(isp);
++ reset = isp_suspend_modules(isp);
++ isp_save_ctx(isp);
++ if (reset)
++ isp_reset(isp);
++
++ isp_disable_clocks(isp);
++
++out:
++ return 0;
++}
++
++/*
++ * isp_resume - Resume routine for the ISP
++ * @dev: Pointer to ISP device
++ *
++ * Returns 0 if successful, or isp_enable_clocks return value otherwise.
++ */
++static int isp_resume(struct device *dev)
++{
++ struct isp_device *isp = dev_get_drvdata(dev);
++ int ret_err = 0;
++
++ if (isp->ref_count == 0)
++ goto out;
++
++ ret_err = isp_enable_clocks(isp);
++ if (ret_err)
++ goto out;
++ isp_restore_ctx(isp);
++ isp_enable_interrupts(isp);
++ isp_resume_modules(isp);
++
++out:
++ return ret_err;
++}
++
++#else
++
++#define isp_suspend NULL
++#define isp_resume NULL
++
++#endif /* CONFIG_PM */
++
++static void isp_unregister_entities(struct isp_device *isp)
++{
++ isp_csi2_unregister_entities(&isp->isp_csi2a);
++ isp_ccp2_unregister_entities(&isp->isp_ccp2);
++ isp_ccdc_unregister_entities(&isp->isp_ccdc);
++ isp_preview_unregister_entities(&isp->isp_prev);
++ isp_resizer_unregister_entities(&isp->isp_res);
++ ispstat_unregister_entities(&isp->isp_aewb);
++ ispstat_unregister_entities(&isp->isp_af);
++ ispstat_unregister_entities(&isp->isp_hist);
++
++ v4l2_device_unregister(&isp->v4l2_dev);
++ media_device_unregister(&isp->media_dev);
++}
++
++/*
++ * isp_register_subdev_group - Register a group of subdevices
++ * @isp: OMAP3 ISP device
++ * @board_info: I2C subdevs board information array
++ *
++ * Register all I2C subdevices in the board_info array. The array must be
++ * terminated by a NULL entry, and the first entry must be the sensor.
++ *
++ * Return a pointer to the sensor media entity if it has been successfully
++ * registered, or NULL otherwise.
++ */
++static struct v4l2_subdev *
++isp_register_subdev_group(struct isp_device *isp,
++ struct v4l2_subdev_i2c_board_info *board_info)
++{
++ struct v4l2_subdev *sensor = NULL;
++ unsigned int first;
++
++ if (board_info->board_info == NULL)
++ return NULL;
++
++ for (first = 1; board_info->board_info; ++board_info, first = 0) {
++ struct v4l2_subdev *subdev;
++ struct i2c_adapter *adapter;
++
++ adapter = i2c_get_adapter(board_info->i2c_adapter_id);
++ if (adapter == NULL) {
++ printk(KERN_ERR "%s: Unable to get I2C adapter %d for "
++ "device %s\n", __func__,
++ board_info->i2c_adapter_id,
++ board_info->board_info->type);
++ continue;
++ }
++
++ subdev = v4l2_i2c_new_subdev_board(&isp->v4l2_dev,
++ adapter, board_info->module_name,
++ board_info->board_info, NULL, 1);
++ if (subdev == NULL) {
++ printk(KERN_ERR "%s: Unable to register subdev %s\n",
++ __func__, board_info->board_info->type);
++ continue;
++ }
++
++ if (first)
++ sensor = subdev;
++ }
++
++ return sensor;
++}
++
++static int isp_register_entities(struct isp_device *isp)
++{
++ struct isp_platform_data *pdata = isp->pdata;
++ struct isp_v4l2_subdevs_group *subdevs;
++ int ret;
++
++ isp->media_dev.dev = isp->dev;
++ ret = media_device_register(&isp->media_dev);
++ if (ret < 0) {
++ printk(KERN_ERR "%s: Media device registration failed (%d)\n",
++ __func__, ret);
++ return ret;
++ }
++
++ isp->v4l2_dev.mdev = &isp->media_dev;
++ ret = v4l2_device_register(isp->dev, &isp->v4l2_dev);
++ if (ret < 0) {
++ printk(KERN_ERR "%s: V4L2 device registration failed (%d)\n",
++ __func__, ret);
++ goto done;
++ }
++
++ /* Register internal entities */
++ ret = isp_ccp2_register_entities(&isp->isp_ccp2, &isp->v4l2_dev);
++ if (ret < 0)
++ goto done;
++
++ ret = isp_csi2_register_entities(&isp->isp_csi2a, &isp->v4l2_dev);
++ if (ret < 0)
++ goto done;
++
++ ret = isp_ccdc_register_entities(&isp->isp_ccdc, &isp->v4l2_dev);
++ if (ret < 0)
++ goto done;
++
++ ret = isp_preview_register_entities(&isp->isp_prev, &isp->v4l2_dev);
++ if (ret < 0)
++ goto done;
++
++ ret = isp_resizer_register_entities(&isp->isp_res, &isp->v4l2_dev);
++ if (ret < 0)
++ goto done;
++
++ ret = ispstat_register_entities(&isp->isp_aewb, &isp->v4l2_dev);
++ if (ret < 0)
++ goto done;
++
++ ret = ispstat_register_entities(&isp->isp_af, &isp->v4l2_dev);
++ if (ret < 0)
++ goto done;
++
++ ret = ispstat_register_entities(&isp->isp_hist, &isp->v4l2_dev);
++ if (ret < 0)
++ goto done;
++
++ /* Register external entities */
++ for (subdevs = pdata->subdevs; subdevs->subdevs; ++subdevs) {
++ struct v4l2_subdev *sensor;
++ struct media_entity *input;
++ unsigned int flags;
++ unsigned int pad;
++
++ sensor = isp_register_subdev_group(isp, subdevs->subdevs);
++ if (sensor == NULL)
++ continue;
++
++ sensor->host_priv = subdevs;
++
++ /* Connect the sensor to the correct interface module. Parallel
++ * sensors are connected directly to the CCDC, while serial
++ * sensors are connected to the CSI2a, CCP2b or CSI2c receiver
++ * through CSIPHY1 or CSIPHY2.
++ */
++ switch (subdevs->interface) {
++ case ISP_INTERFACE_PARALLEL:
++ input = &isp->isp_ccdc.subdev.entity;
++ pad = CCDC_PAD_SINK;
++ flags = 0;
++ break;
++
++ case ISP_INTERFACE_CSI2A_PHY2:
++ input = &isp->isp_csi2a.subdev.entity;
++ pad = CSI2_PAD_SINK;
++ flags = MEDIA_LINK_FLAG_IMMUTABLE
++ | MEDIA_LINK_FLAG_ACTIVE;
++ break;
++
++ case ISP_INTERFACE_CCP2B_PHY1:
++ case ISP_INTERFACE_CCP2B_PHY2:
++ input = &isp->isp_ccp2.subdev.entity;
++ pad = CCP2_PAD_SINK;
++ flags = 0;
++ break;
++
++ case ISP_INTERFACE_CSI2C_PHY1:
++ input = &isp->isp_csi2c.subdev.entity;
++ pad = CSI2_PAD_SINK;
++ flags = MEDIA_LINK_FLAG_IMMUTABLE
++ | MEDIA_LINK_FLAG_ACTIVE;
++ break;
++
++ default:
++ printk(KERN_ERR "%s: invalid interface type %u\n",
++ __func__, subdevs->interface);
++ ret = -EINVAL;
++ goto done;
++ }
++
++ ret = media_entity_create_link(&sensor->entity, 0, input, pad,
++ flags);
++ if (ret < 0)
++ goto done;
++ }
++
++done:
++ if (ret < 0)
++ isp_unregister_entities(isp);
++
++ return ret;
++}
++
++static void isp_cleanup_modules(struct isp_device *isp)
++{
++ isp_h3a_aewb_cleanup(isp);
++ isp_h3a_af_cleanup(isp);
++ isp_hist_cleanup(isp);
++ isp_resizer_cleanup(isp);
++ isp_preview_cleanup(isp);
++ isp_ccdc_cleanup(isp);
++ isp_ccp2_cleanup(isp);
++ isp_csi2_cleanup(isp);
++}
++
++static int isp_initialize_modules(struct isp_device *isp)
++{
++ int ret;
++
++ ret = isp_csiphy_init(isp);
++ if (ret < 0) {
++ dev_err(isp->dev, "CSI PHY initialization failed\n");
++ goto error_csiphy;
++ }
++
++ ret = isp_csi2_init(isp);
++ if (ret < 0) {
++ dev_err(isp->dev, "CSI2 initialization failed\n");
++ goto error_csi2;
++ }
++
++ ret = isp_ccp2_init(isp);
++ if (ret < 0) {
++ dev_err(isp->dev, "CCP2 initialization failed\n");
++ goto error_ccp2;
++ }
++
++ ret = isp_ccdc_init(isp);
++ if (ret < 0) {
++ dev_err(isp->dev, "CCDC initialization failed\n");
++ goto error_ccdc;
++ }
++
++ ret = isp_preview_init(isp);
++ if (ret < 0) {
++ dev_err(isp->dev, "Preview initialization failed\n");
++ goto error_preview;
++ }
++
++ ret = isp_resizer_init(isp);
++ if (ret < 0) {
++ dev_err(isp->dev, "Resizer initialization failed\n");
++ goto error_resizer;
++ }
++
++ ret = isp_hist_init(isp);
++ if (ret < 0) {
++ dev_err(isp->dev, "Histogram initialization failed\n");
++ goto error_hist;
++ }
++
++ ret = isp_h3a_aewb_init(isp);
++ if (ret < 0) {
++ dev_err(isp->dev, "H3A AEWB initialization failed\n");
++ goto error_h3a_aewb;
++ }
++
++ ret = isp_h3a_af_init(isp);
++ if (ret < 0) {
++ dev_err(isp->dev, "H3A AF initialization failed\n");
++ goto error_h3a_af;
++ }
++
++ /* Connect the submodules. */
++ ret = media_entity_create_link(
++ &isp->isp_csi2a.subdev.entity, CSI2_PAD_SOURCE,
++ &isp->isp_ccdc.subdev.entity, CCDC_PAD_SINK, 0);
++ if (ret < 0)
++ goto error_link;
++
++ ret = media_entity_create_link(
++ &isp->isp_ccp2.subdev.entity, CCP2_PAD_SOURCE,
++ &isp->isp_ccdc.subdev.entity, CCDC_PAD_SINK, 0);
++ if (ret < 0)
++ goto error_link;
++
++ ret = media_entity_create_link(
++ &isp->isp_ccdc.subdev.entity, CCDC_PAD_SOURCE_VP,
++ &isp->isp_prev.subdev.entity, PREV_PAD_SINK, 0);
++ if (ret < 0)
++ goto error_link;
++
++ ret = media_entity_create_link(
++ &isp->isp_ccdc.subdev.entity, CCDC_PAD_SOURCE_OF,
++ &isp->isp_res.subdev.entity, RESZ_PAD_SINK, 0);
++ if (ret < 0)
++ goto error_link;
++
++ ret = media_entity_create_link(
++ &isp->isp_prev.subdev.entity, PREV_PAD_SOURCE,
++ &isp->isp_res.subdev.entity, RESZ_PAD_SINK, 0);
++ if (ret < 0)
++ goto error_link;
++
++ ret = media_entity_create_link(
++ &isp->isp_ccdc.subdev.entity, CCDC_PAD_SOURCE_VP,
++ &isp->isp_aewb.subdev.entity, 0, 0);
++ if (ret < 0)
++ goto error_link;
++
++ ret = media_entity_create_link(
++ &isp->isp_ccdc.subdev.entity, CCDC_PAD_SOURCE_VP,
++ &isp->isp_af.subdev.entity, 0, 0);
++ if (ret < 0)
++ goto error_link;
++
++ ret = media_entity_create_link(
++ &isp->isp_ccdc.subdev.entity, CCDC_PAD_SOURCE_VP,
++ &isp->isp_hist.subdev.entity, 0, 0);
++ if (ret < 0)
++ goto error_link;
++
++ return 0;
++
++error_link:
++ isp_h3a_af_cleanup(isp);
++error_h3a_af:
++ isp_h3a_aewb_cleanup(isp);
++error_h3a_aewb:
++ isp_hist_cleanup(isp);
++error_hist:
++ isp_resizer_cleanup(isp);
++error_resizer:
++ isp_preview_cleanup(isp);
++error_preview:
++ isp_ccdc_cleanup(isp);
++error_ccdc:
++ isp_ccp2_cleanup(isp);
++error_ccp2:
++ isp_csi2_cleanup(isp);
++error_csi2:
++error_csiphy:
++ return ret;
++}
++
++/*
++ * isp_remove - Remove ISP platform device
++ * @pdev: Pointer to ISP platform device
++ *
++ * Always returns 0.
++ */
++static int isp_remove(struct platform_device *pdev)
++{
++ struct isp_device *isp = platform_get_drvdata(pdev);
++ int i;
++
++ isp_unregister_entities(isp);
++ isp_cleanup_modules(isp);
++
++ isp_get(isp);
++ iommu_put(isp->iommu);
++ isp_put(isp);
++
++ free_irq(isp->irq_num, isp);
++ isp_put_clocks(isp);
++
++ for (i = 0; i < OMAP3_ISP_IOMEM_LAST; i++) {
++ if (isp->mmio_base[i]) {
++ iounmap(isp->mmio_base[i]);
++ isp->mmio_base[i] = NULL;
++ }
++
++ if (isp->mmio_base_phys[i]) {
++ release_mem_region(isp->mmio_base_phys[i],
++ isp->mmio_size[i]);
++ isp->mmio_base_phys[i] = 0;
++ }
++ }
++
++ regulator_put(isp->isp_csiphy1.vdd);
++ regulator_put(isp->isp_csiphy2.vdd);
++ kfree(isp);
++
++ return 0;
++}
++
++static int isp_map_mem_resource(struct platform_device *pdev,
++ struct isp_device *isp,
++ enum isp_mem_resources res)
++{
++ struct resource *mem;
++
++ /* request the mem region for the camera registers */
++
++ mem = platform_get_resource(pdev, IORESOURCE_MEM, res);
++ if (!mem) {
++ dev_err(isp->dev, "no mem resource?\n");
++ return -ENODEV;
++ }
++
++ if (!request_mem_region(mem->start, resource_size(mem), pdev->name)) {
++ dev_err(isp->dev,
++ "cannot reserve camera register I/O region\n");
++ return -ENODEV;
++ }
++ isp->mmio_base_phys[res] = mem->start;
++ isp->mmio_size[res] = resource_size(mem);
++
++ /* map the region */
++ isp->mmio_base[res] = ioremap_nocache(isp->mmio_base_phys[res],
++ isp->mmio_size[res]);
++ if (!isp->mmio_base[res]) {
++ dev_err(isp->dev, "cannot map camera register I/O region\n");
++ return -ENODEV;
++ }
++
++ return 0;
++}
++
++/*
++ * isp_probe - Probe ISP platform device
++ * @pdev: Pointer to ISP platform device
++ *
++ * Returns 0 if successful,
++ * -ENOMEM if no memory available,
++ * -ENODEV if no platform device resources found
++ * or no space for remapping registers,
++ * -EINVAL if couldn't install ISR,
++ * or clk_get return error value.
++ */
++static int isp_probe(struct platform_device *pdev)
++{
++ struct isp_platform_data *pdata = pdev->dev.platform_data;
++ struct isp_device *isp;
++ int ret;
++ int i, m;
++
++ if (pdata == NULL)
++ return -EINVAL;
++
++ isp = kzalloc(sizeof(*isp), GFP_KERNEL);
++ if (!isp) {
++ dev_err(&pdev->dev, "could not allocate memory\n");
++ return -ENOMEM;
++ }
++
++ mutex_init(&isp->isp_mutex);
++ spin_lock_init(&isp->stat_lock);
++
++ isp->dev = &pdev->dev;
++ isp->pdata = pdata;
++ isp->ref_count = 0;
++
++ isp->raw_dmamask = DMA_BIT_MASK(32);
++ isp->dev->dma_mask = &isp->raw_dmamask;
++ isp->dev->coherent_dma_mask = DMA_BIT_MASK(32);
++
++ platform_set_drvdata(pdev, isp);
++
++ /* Regulators */
++ isp->isp_csiphy1.vdd = regulator_get(&pdev->dev, "VDD_CSIPHY1");
++ isp->isp_csiphy2.vdd = regulator_get(&pdev->dev, "VDD_CSIPHY2");
++
++ /* Clocks */
++ ret = isp_map_mem_resource(pdev, isp, OMAP3_ISP_IOMEM_MAIN);
++ if (ret < 0)
++ goto error;
++
++ ret = isp_get_clocks(isp);
++ if (ret < 0)
++ goto error;
++
++ if (isp_get(isp) == NULL)
++ goto error;
++
++ ret = isp_reset(isp);
++ if (ret < 0)
++ goto error_isp;
++
++ /* Memory resources */
++ isp->revision = isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_REVISION);
++ dev_info(isp->dev, "Revision %d.%d found\n",
++ (isp->revision & 0xf0) >> 4, isp->revision & 0x0f);
++
++ for (m = 0; m < ARRAY_SIZE(isp_res_maps); m++)
++ if (isp->revision == isp_res_maps[m].isp_rev)
++ break;
++
++ if (m == ARRAY_SIZE(isp_res_maps)) {
++ dev_err(isp->dev, "No resource map found for ISP rev %d.%d\n",
++ (isp->revision & 0xf0) >> 4, isp->revision & 0xf);
++ ret = -ENODEV;
++ goto error_isp;
++ }
++
++ for (i = 1; i < OMAP3_ISP_IOMEM_LAST; i++) {
++ if (isp_res_maps[m].map & 1 << i) {
++ ret = isp_map_mem_resource(pdev, isp, i);
++ if (ret)
++ goto error_isp;
++ }
++ }
++
++ /* IOMMU */
++ isp->iommu = iommu_get("isp");
++ if (IS_ERR_OR_NULL(isp->iommu)) {
++ ret = -ENODEV;
++ isp_put(isp);
++ goto error_isp;
++ }
++
++ /* Interrupt */
++ isp->irq_num = platform_get_irq(pdev, 0);
++ if (isp->irq_num <= 0) {
++ dev_err(isp->dev, "No IRQ resource\n");
++ ret = -ENODEV;
++ goto error_isp;
++ }
++
++ if (request_irq(isp->irq_num, isp_isr, IRQF_SHARED, "OMAP3 ISP", isp)) {
++ dev_err(isp->dev, "Unable to request IRQ\n");
++ ret = -EINVAL;
++ goto error_isp;
++ }
++
++ /* Entities */
++ ret = isp_initialize_modules(isp);
++ if (ret < 0)
++ goto error_irq;
++
++ ret = isp_register_entities(isp);
++ if (ret < 0)
++ goto error_modules;
++
++ isp_power_settings(isp, 1);
++ isp_put(isp);
++
++ return 0;
++
++error_modules:
++ isp_cleanup_modules(isp);
++error_irq:
++ free_irq(isp->irq_num, isp);
++error_isp:
++ iommu_put(isp->iommu);
++ isp_put(isp);
++error:
++ isp_put_clocks(isp);
++
++ for (i = 0; i < OMAP3_ISP_IOMEM_LAST; i++) {
++ if (isp->mmio_base[i]) {
++ iounmap(isp->mmio_base[i]);
++ isp->mmio_base[i] = NULL;
++ }
++
++ if (isp->mmio_base_phys[i]) {
++ release_mem_region(isp->mmio_base_phys[i],
++ isp->mmio_size[i]);
++ isp->mmio_base_phys[i] = 0;
++ }
++ }
++ regulator_put(isp->isp_csiphy2.vdd);
++ regulator_put(isp->isp_csiphy1.vdd);
++ platform_set_drvdata(pdev, NULL);
++ kfree(isp);
++
++ return ret;
++}
++
++static struct dev_pm_ops omap3isp_pm_ops = {
++ .suspend = isp_suspend,
++ .resume = isp_resume,
++};
++
++static struct platform_driver omap3isp_driver = {
++ .probe = isp_probe,
++ .remove = isp_remove,
++ .driver = {
++ .name = "omap3isp",
++ .pm = &omap3isp_pm_ops,
++ },
++};
++
++/*
++ * isp_init - ISP module initialization.
++ */
++static int __init isp_init(void)
++{
++ return platform_driver_register(&omap3isp_driver);
++}
++
++/*
++ * isp_cleanup - ISP module cleanup.
++ */
++static void __exit isp_cleanup(void)
++{
++ platform_driver_unregister(&omap3isp_driver);
++}
++
++module_init(isp_init);
++module_exit(isp_cleanup);
++
++MODULE_AUTHOR("Texas Instruments");
++MODULE_DESCRIPTION("OMAP3 ISP driver");
++MODULE_LICENSE("GPL");
++
+diff --git a/drivers/media/video/isp/isp.h b/drivers/media/video/isp/isp.h
+new file mode 100644
+index 0000000..92fab08
+--- /dev/null
++++ b/drivers/media/video/isp/isp.h
+@@ -0,0 +1,393 @@
++/*
++ * isp.h
++ *
++ * Top level public header file for ISP Control module in
++ * TI's OMAP3 Camera ISP
++ *
++ * Copyright (C) 2009 Texas Instruments.
++ * Copyright (C) 2009 Nokia.
++ *
++ * Contributors:
++ * Sameer Venkatraman <sameerv@ti.com>
++ * Mohit Jalori
++ * Sergio Aguirre <saaguirre@ti.com>
++ * Sakari Ailus <sakari.ailus@nokia.com>
++ * Tuukka Toivonen <tuukka.o.toivonen@nokia.com>
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef OMAP_ISP_TOP_H
++#define OMAP_ISP_TOP_H
++
++#include <media/v4l2-device.h>
++#include <linux/device.h>
++#include <linux/io.h>
++#include <linux/platform_device.h>
++#include <plat/iommu.h>
++#include <plat/iovmm.h>
++
++#include "ispstat.h"
++#include "ispccdc.h"
++#include "ispreg.h"
++#include "ispresizer.h"
++#include "isppreview.h"
++#include "ispcsiphy.h"
++#include "ispcsi2.h"
++#include "ispccp2.h"
++
++#define IOMMU_FLAG (IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_8)
++
++#define ISP_TOK_TERM 0xFFFFFFFF /*
++ * terminating token for ISP
++ * modules reg list
++ */
++#define to_isp_device(ptr_module) \
++ container_of(ptr_module, struct isp_device, isp_##ptr_module)
++#define to_device(ptr_module) \
++ (to_isp_device(ptr_module)->dev)
++
++enum isp_mem_resources {
++ OMAP3_ISP_IOMEM_MAIN,
++ OMAP3_ISP_IOMEM_CBUFF,
++ OMAP3_ISP_IOMEM_CCP2,
++ OMAP3_ISP_IOMEM_CCDC,
++ OMAP3_ISP_IOMEM_HIST,
++ OMAP3_ISP_IOMEM_H3A,
++ OMAP3_ISP_IOMEM_PREV,
++ OMAP3_ISP_IOMEM_RESZ,
++ OMAP3_ISP_IOMEM_SBL,
++ OMAP3_ISP_IOMEM_CSI2A_REGS1,
++ OMAP3_ISP_IOMEM_CSIPHY2,
++ OMAP3_ISP_IOMEM_CSI2A_REGS2,
++ OMAP3_ISP_IOMEM_CSI2C_REGS1,
++ OMAP3_ISP_IOMEM_CSIPHY1,
++ OMAP3_ISP_IOMEM_CSI2C_REGS2,
++ OMAP3_ISP_IOMEM_LAST
++};
++
++enum isp_sbl_resource {
++ OMAP3_ISP_SBL_CSI1_READ = 0x1,
++ OMAP3_ISP_SBL_CSI1_WRITE = 0x2,
++ OMAP3_ISP_SBL_CSI2A_WRITE = 0x4,
++ OMAP3_ISP_SBL_CSI2C_WRITE = 0x8,
++ OMAP3_ISP_SBL_CCDC_LSC_READ = 0x10,
++ OMAP3_ISP_SBL_CCDC_WRITE = 0x20,
++ OMAP3_ISP_SBL_PREVIEW_READ = 0x40,
++ OMAP3_ISP_SBL_PREVIEW_WRITE = 0x80,
++ OMAP3_ISP_SBL_RESIZER_READ = 0x100,
++ OMAP3_ISP_SBL_RESIZER_WRITE = 0x200,
++};
++
++enum isp_interface_type {
++ ISP_INTERFACE_PARALLEL,
++ ISP_INTERFACE_CSI2A_PHY2,
++ ISP_INTERFACE_CCP2B_PHY1,
++ ISP_INTERFACE_CCP2B_PHY2,
++ ISP_INTERFACE_CSI2C_PHY1,
++};
++
++#define ISP_REVISION_1_0 0x10
++#define ISP_REVISION_2_0 0x20
++#define ISP_REVISION_15_0 0xF0
++
++/*
++ * struct isp_res_mapping - Map ISP io resources to ISP revision.
++ * @isp_rev: ISP_REVISION_x_x
++ * @map: bitmap for enum isp_mem_resources
++ */
++struct isp_res_mapping {
++ u32 isp_rev;
++ u32 map;
++};
++
++/*
++ * struct isp_reg - Structure for ISP register values.
++ * @reg: 32-bit Register address.
++ * @val: 32-bit Register value.
++ */
++struct isp_reg {
++ enum isp_mem_resources mmio_range;
++ u32 reg;
++ u32 val;
++};
++
++/**
++ * struct isp_parallel_platform_data - Parallel interface platform data
++ * @data_lane_shift: Data lane shifter
++ * 0 - CAMEXT[13:0] -> CAM[13:0]
++ * 1 - CAMEXT[13:2] -> CAM[11:0]
++ * 2 - CAMEXT[13:4] -> CAM[9:0]
++ * 3 - CAMEXT[13:6] -> CAM[7:0]
++ * @clk_pol: Pixel clock polarity
++ * 0 - Non Inverted, 1 - Inverted
++ * @bridge: CCDC Bridge input control
++ * ISPCTRL_PAR_BRIDGE_DISABLE - Disable
++ * ISPCTRL_PAR_BRIDGE_LENDIAN - Little endian
++ * ISPCTRL_PAR_BRIDGE_BENDIAN - Big endian
++ */
++struct isp_parallel_platform_data {
++ unsigned int data_lane_shift:2;
++ unsigned int clk_pol:1;
++ unsigned int bridge:4;
++};
++
++/**
++ * struct isp_ccp2_platform_data - CCP2 interface platform data
++ * @strobe_clk_pol: Strobe/clock polarity
++ * 0 - Non Inverted, 1 - Inverted
++ * @crc: Enable the cyclic redundancy check
++ * @ccp2_mode: Enable CCP2 compatibility mode
++ * 0 - MIPI-CSI1 mode, 1 - CCP2 mode
++ * @phy_layer: Physical layer selection
++ * ISPCCP2_CTRL_PHY_SEL_CLOCK - Data/clock physical layer
++ * ISPCCP2_CTRL_PHY_SEL_STROBE - Data/strobe physical layer
++ * @vpclk_div: Video port output clock control
++ */
++struct isp_ccp2_platform_data {
++ unsigned int strobe_clk_pol:1;
++ unsigned int crc:1;
++ unsigned int ccp2_mode:1;
++ unsigned int phy_layer:1;
++ unsigned int vpclk_div:2;
++};
++
++/**
++ * struct isp_csi2_platform_data - CSI2 interface platform data
++ * @crc: Enable the cyclic redundancy check
++ * @vpclk_div: Video port output clock control
++ */
++struct isp_csi2_platform_data {
++ unsigned crc:1;
++ unsigned vpclk_div:2;
++};
++
++struct isp_v4l2_subdevs_group {
++ struct v4l2_subdev_i2c_board_info *subdevs;
++ enum isp_interface_type interface;
++ union {
++ struct isp_parallel_platform_data parallel;
++ struct isp_ccp2_platform_data ccp2;
++ struct isp_csi2_platform_data csi2;
++ } bus; /* gcc < 4.6.0 chokes on anonymous union initializers */
++};
++
++struct isp_platform_data {
++ struct isp_v4l2_subdevs_group *subdevs;
++};
++
++/*
++ * struct isp_device - ISP device structure.
++ * @dev: Device pointer specific to the OMAP3 ISP.
++ * @revision: Stores current ISP module revision.
++ * @irq_num: Currently used IRQ number.
++ * @mmio_base: Array with kernel base addresses for ioremapped ISP register
++ * regions.
++ * @mmio_base_phys: Array with physical L4 bus addresses for ISP register
++ * regions.
++ * @mmio_size: Array with ISP register regions size in bytes.
++ * @raw_dmamask: Raw DMA mask
++ * @stat_lock: Spinlock for handling statistics
++ * @isp_mutex: Mutex for serializing requests to ISP.
++ * @has_context: Context has been saved at least once and can be restored.
++ * @ref_count: Reference count for handling multiple ISP requests.
++ * @cam_ick: Pointer to camera interface clock structure.
++ * @cam_mclk: Pointer to camera functional clock structure.
++ * @dpll4_m5_ck: Pointer to DPLL4 M5 clock structure.
++ * @csi2_fck: Pointer to camera CSI2 complexIO clock structure.
++ * @l3_ick: Pointer to OMAP3 L3 bus interface clock.
++ * @irq: Currently attached ISP ISR callbacks information structure.
++ * @isp_af: Pointer to current settings for ISP AutoFocus SCM.
++ * @isp_hist: Pointer to current settings for ISP Histogram SCM.
++ * @isp_h3a: Pointer to current settings for ISP Auto Exposure and
++ * White Balance SCM.
++ * @isp_res: Pointer to current settings for ISP Resizer.
++ * @isp_prev: Pointer to current settings for ISP Preview.
++ * @isp_ccdc: Pointer to current settings for ISP CCDC.
++ * @iommu: Pointer to requested IOMMU instance for ISP.
++ *
++ * This structure is used to store the OMAP ISP Information.
++ */
++struct isp_device {
++ struct v4l2_device v4l2_dev;
++ struct media_device media_dev;
++ struct device *dev;
++ u32 revision;
++
++ /* platform HW resources */
++ struct isp_platform_data *pdata;
++ unsigned int irq_num;
++
++ void __iomem *mmio_base[OMAP3_ISP_IOMEM_LAST];
++ unsigned long mmio_base_phys[OMAP3_ISP_IOMEM_LAST];
++ resource_size_t mmio_size[OMAP3_ISP_IOMEM_LAST];
++
++ u64 raw_dmamask;
++
++ /* ISP Obj */
++ spinlock_t stat_lock; /* common lock for statistic drivers */
++ struct mutex isp_mutex; /* For handling ref_count field */
++ int has_context;
++ int ref_count;
++ u32 xclk_divisor[2]; /* Two clocks, a and b. */
++#define ISP_CLK_CAM_ICK 0
++#define ISP_CLK_CAM_MCLK 1
++#define ISP_CLK_DPLL4_M5_CK 2
++#define ISP_CLK_CSI2_FCK 3
++#define ISP_CLK_L3_ICK 4
++ struct clk *clock[5];
++
++ /* ISP modules */
++ struct ispstat isp_af;
++ struct ispstat isp_aewb;
++ struct ispstat isp_hist;
++ struct isp_res_device isp_res;
++ struct isp_prev_device isp_prev;
++ struct isp_ccdc_device isp_ccdc;
++ struct isp_csi2_device isp_csi2a;
++ struct isp_csi2_device isp_csi2c;
++ struct isp_ccp2_device isp_ccp2;
++ struct isp_csiphy isp_csiphy1;
++ struct isp_csiphy isp_csiphy2;
++
++ unsigned int sbl_resources;
++
++ struct iommu *iommu;
++};
++
++#define v4l2_dev_to_isp_device(dev) \
++ container_of(dev, struct isp_device, v4l2_dev)
++
++void isphist_dma_done(struct isp_device *isp);
++
++void isp_flush(struct isp_device *isp);
++
++int isp_pipeline_set_stream(struct isp_pipeline *pipe,
++ enum isp_pipeline_stream_state state);
++void isp_configure_bridge(struct isp_device *isp, enum ccdc_input_entity input,
++ const struct isp_parallel_platform_data *pdata);
++
++#define ISP_XCLK_NONE -1
++#define ISP_XCLK_A 0
++#define ISP_XCLK_B 1
++
++u32 isp_set_xclk(struct isp_device *isp, u32 xclk, u8 xclksel);
++
++void isp_set_pixel_clock(struct isp_device *isp, unsigned int pixelclk);
++
++struct isp_device *isp_get(struct isp_device *isp);
++void isp_put(struct isp_device *isp);
++
++void isp_save_context(struct isp_device *isp, struct isp_reg *);
++
++void isp_restore_context(struct isp_device *isp, struct isp_reg *);
++
++void isp_print_status(struct isp_device *isp);
++
++void isp_sbl_enable(struct isp_device *isp, enum isp_sbl_resource res);
++void isp_sbl_disable(struct isp_device *isp, enum isp_sbl_resource res);
++
++int omap3isp_register_entities(struct platform_device *pdev,
++ struct v4l2_device *v4l2_dev);
++void omap3isp_unregister_entities(struct platform_device *pdev);
++
++/*
++ * isp_reg_readl - Read value of an OMAP3 ISP register
++ * @dev: Device pointer specific to the OMAP3 ISP.
++ * @isp_mmio_range: Range to which the register offset refers to.
++ * @reg_offset: Register offset to read from.
++ *
++ * Returns an unsigned 32 bit value with the required register contents.
++ */
++static inline
++u32 isp_reg_readl(struct isp_device *isp, enum isp_mem_resources isp_mmio_range,
++ u32 reg_offset)
++{
++ return __raw_readl(isp->mmio_base[isp_mmio_range] + reg_offset);
++}
++
++/*
++ * isp_reg_writel - Write value to an OMAP3 ISP register
++ * @dev: Device pointer specific to the OMAP3 ISP.
++ * @reg_value: 32 bit value to write to the register.
++ * @isp_mmio_range: Range to which the register offset refers to.
++ * @reg_offset: Register offset to write into.
++ */
++static inline
++void isp_reg_writel(struct isp_device *isp, u32 reg_value,
++ enum isp_mem_resources isp_mmio_range, u32 reg_offset)
++{
++ __raw_writel(reg_value, isp->mmio_base[isp_mmio_range] + reg_offset);
++}
++
++/*
++ * isp_reg_and - Do AND binary operation within an OMAP3 ISP register value
++ * @dev: Device pointer specific to the OMAP3 ISP.
++ * @mmio_range: Range to which the register offset refers to.
++ * @reg: Register offset to work on.
++ * @and_bits: 32 bit value which would be 'ANDed' with current register value.
++ */
++static inline
++void isp_reg_and(struct isp_device *isp, enum isp_mem_resources mmio_range,
++ u32 reg, u32 and_bits)
++{
++ u32 v = isp_reg_readl(isp, mmio_range, reg);
++
++ isp_reg_writel(isp, v & and_bits, mmio_range, reg);
++}
++
++/*
++ * isp_reg_or - Do OR binary operation within an OMAP3 ISP register value
++ * @dev: Device pointer specific to the OMAP3 ISP.
++ * @mmio_range: Range to which the register offset refers to.
++ * @reg: Register offset to work on.
++ * @or_bits: 32 bit value which would be 'ORed' with current register value.
++ */
++static inline
++void isp_reg_or(struct isp_device *isp, enum isp_mem_resources mmio_range,
++ u32 reg, u32 or_bits)
++{
++ u32 v = isp_reg_readl(isp, mmio_range, reg);
++
++ isp_reg_writel(isp, v | or_bits, mmio_range, reg);
++}
++
++/*
++ * isp_reg_and_or - Do AND and OR binary ops within an OMAP3 ISP register value
++ * @dev: Device pointer specific to the OMAP3 ISP.
++ * @mmio_range: Range to which the register offset refers to.
++ * @reg: Register offset to work on.
++ * @and_bits: 32 bit value which would be 'ANDed' with current register value.
++ * @or_bits: 32 bit value which would be 'ORed' with current register value.
++ *
++ * The AND operation is done first, and then the OR operation. Mostly useful
++ * when clearing a group of bits before setting a value.
++ */
++static inline
++void isp_reg_and_or(struct isp_device *isp, enum isp_mem_resources mmio_range,
++ u32 reg, u32 and_bits, u32 or_bits)
++{
++ u32 v = isp_reg_readl(isp, mmio_range, reg);
++
++ isp_reg_writel(isp, (v & and_bits) | or_bits, mmio_range, reg);
++}
++
++static inline enum v4l2_buf_type
++isp_pad_buffer_type(const struct v4l2_subdev *subdev, int pad)
++{
++ if (pad >= subdev->entity.num_pads)
++ return 0;
++
++ if (subdev->entity.pads[pad].type == MEDIA_PAD_TYPE_INPUT)
++ return V4L2_BUF_TYPE_VIDEO_OUTPUT;
++ else
++ return V4L2_BUF_TYPE_VIDEO_CAPTURE;
++}
++
++#endif /* OMAP_ISP_TOP_H */
+diff --git a/drivers/media/video/isp/ispccdc.c b/drivers/media/video/isp/ispccdc.c
+new file mode 100644
+index 0000000..131057a
+--- /dev/null
++++ b/drivers/media/video/isp/ispccdc.c
+@@ -0,0 +1,2292 @@
++/*
++ * ispccdc.c
++ *
++ * Driver Library for CCDC module in TI's OMAP3 Camera ISP
++ *
++ * Copyright (C) 2009 Texas Instruments, Inc.
++ *
++ * Contributors:
++ * Senthilvadivu Guruswamy <svadivu@ti.com>
++ * Pallavi Kulkarni <p-kulkarni@ti.com>
++ * Sergio Aguirre <saaguirre@ti.com>
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++#include <linux/module.h>
++#include <linux/uaccess.h>
++#include <linux/delay.h>
++#include <linux/device.h>
++#include <linux/mm.h>
++#include <linux/sched.h>
++#include <media/v4l2-event.h>
++
++#include "isp.h"
++#include "ispreg.h"
++#include "ispccdc.h"
++
++static struct v4l2_mbus_framefmt *
++__ccdc_get_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_fh *fh,
++ unsigned int pad, enum v4l2_subdev_format which);
++
++/* Structure for saving/restoring CCDC module registers*/
++static struct isp_reg ispccdc_reg_list[] = {
++ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SYN_MODE, 0},
++ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_HD_VD_WID, 0},
++ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_PIX_LINES, 0},
++ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_HORZ_INFO, 0},
++ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_VERT_START, 0},
++ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_VERT_LINES, 0},
++ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CULLING, 0},
++ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_HSIZE_OFF, 0},
++ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDOFST, 0},
++ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDR_ADDR, 0},
++ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CLAMP, 0},
++ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_DCSUB, 0},
++ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_COLPTN, 0},
++ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_BLKCMP, 0},
++ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FPC_ADDR, 0},
++ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FPC, 0},
++ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_VDINT, 0},
++ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_ALAW, 0},
++ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_REC656IF, 0},
++ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CFG, 0},
++ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMTCFG, 0},
++ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMT_HORZ, 0},
++ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMT_VERT, 0},
++ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMT_ADDR0, 0},
++ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMT_ADDR1, 0},
++ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMT_ADDR2, 0},
++ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMT_ADDR3, 0},
++ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMT_ADDR4, 0},
++ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMT_ADDR5, 0},
++ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMT_ADDR6, 0},
++ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMT_ADDR7, 0},
++ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_PRGEVEN0, 0},
++ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_PRGEVEN1, 0},
++ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_PRGODD0, 0},
++ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_PRGODD1, 0},
++ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_VP_OUT, 0},
++ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_LSC_CONFIG, 0},
++ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_LSC_INITIAL, 0},
++ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_LSC_TABLE_BASE, 0},
++ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_LSC_TABLE_OFFSET, 0},
++ {0, ISP_TOK_TERM, 0}
++};
++
++const static unsigned int ccdc_fmts[] = {
++ V4L2_MBUS_FMT_SGRBG10_1X10,
++ V4L2_MBUS_FMT_SRGGB10_1X10,
++ V4L2_MBUS_FMT_SBGGR10_1X10,
++ V4L2_MBUS_FMT_SGBRG10_1X10,
++};
++
++/*
++ * ispccdc_save_context - Save values of the CCDC module registers
++ * @dev: Device pointer specific to the OMAP3 ISP.
++ */
++void ispccdc_save_context(struct isp_device *isp)
++{
++ isp_save_context(isp, ispccdc_reg_list);
++}
++
++/*
++ * ispccdc_restore_context - Restore values of the CCDC module registers
++ * @dev: Pointer to ISP device
++ */
++void ispccdc_restore_context(struct isp_device *isp)
++{
++ isp_restore_context(isp, ispccdc_reg_list);
++}
++
++/*
++ * ispccdc_print_status - Print current CCDC Module register values.
++ * @ccdc: Pointer to ISP CCDC device.
++ *
++ * Also prints other debug information stored in the CCDC module.
++ */
++#define CCDC_PRINT_REGISTER(isp, name)\
++ dev_dbg(isp->dev, "###CCDC " #name "=0x%08x\n", \
++ isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_##name))
++
++static void ispccdc_print_status(struct isp_ccdc_device *ccdc)
++{
++ struct isp_device *isp = to_isp_device(ccdc);
++
++ dev_dbg(isp->dev, "-------------CCDC Register dump-------------\n");
++
++ CCDC_PRINT_REGISTER(isp, PCR);
++ CCDC_PRINT_REGISTER(isp, SYN_MODE);
++ CCDC_PRINT_REGISTER(isp, HD_VD_WID);
++ CCDC_PRINT_REGISTER(isp, PIX_LINES);
++ CCDC_PRINT_REGISTER(isp, HORZ_INFO);
++ CCDC_PRINT_REGISTER(isp, VERT_START);
++ CCDC_PRINT_REGISTER(isp, VERT_LINES);
++ CCDC_PRINT_REGISTER(isp, CULLING);
++ CCDC_PRINT_REGISTER(isp, HSIZE_OFF);
++ CCDC_PRINT_REGISTER(isp, SDOFST);
++ CCDC_PRINT_REGISTER(isp, SDR_ADDR);
++ CCDC_PRINT_REGISTER(isp, CLAMP);
++ CCDC_PRINT_REGISTER(isp, DCSUB);
++ CCDC_PRINT_REGISTER(isp, COLPTN);
++ CCDC_PRINT_REGISTER(isp, BLKCMP);
++ CCDC_PRINT_REGISTER(isp, FPC);
++ CCDC_PRINT_REGISTER(isp, FPC_ADDR);
++ CCDC_PRINT_REGISTER(isp, VDINT);
++ CCDC_PRINT_REGISTER(isp, ALAW);
++ CCDC_PRINT_REGISTER(isp, REC656IF);
++ CCDC_PRINT_REGISTER(isp, CFG);
++ CCDC_PRINT_REGISTER(isp, FMTCFG);
++ CCDC_PRINT_REGISTER(isp, FMT_HORZ);
++ CCDC_PRINT_REGISTER(isp, FMT_VERT);
++ CCDC_PRINT_REGISTER(isp, PRGEVEN0);
++ CCDC_PRINT_REGISTER(isp, PRGEVEN1);
++ CCDC_PRINT_REGISTER(isp, PRGODD0);
++ CCDC_PRINT_REGISTER(isp, PRGODD1);
++ CCDC_PRINT_REGISTER(isp, VP_OUT);
++ CCDC_PRINT_REGISTER(isp, LSC_CONFIG);
++ CCDC_PRINT_REGISTER(isp, LSC_INITIAL);
++ CCDC_PRINT_REGISTER(isp, LSC_TABLE_BASE);
++ CCDC_PRINT_REGISTER(isp, LSC_TABLE_OFFSET);
++
++ dev_dbg(isp->dev, "--------------------------------------------\n");
++}
++
++/*
++ * ispccdc_busy - Get busy state of the CCDC.
++ * @ccdc: Pointer to ISP CCDC device.
++ */
++int ispccdc_busy(struct isp_ccdc_device *ccdc)
++{
++ struct isp_device *isp = to_isp_device(ccdc);
++
++ return isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_PCR) &
++ ISPCCDC_PCR_BUSY;
++}
++
++/* -----------------------------------------------------------------------------
++ * Lens Shading Compensation
++ */
++
++/*
++ * ispccdc_lsc_validate_config - Check that LSC configuration is valid.
++ * @ccdc: Pointer to ISP CCDC device.
++ * @lsc_cfg: the LSC configuration to check.
++ *
++ * Returns 0 if the LSC configuration is valid, or -EINVAL if invalid.
++ */
++static int ispccdc_lsc_validate_config(struct isp_ccdc_device *ccdc,
++ struct ispccdc_lsc_config *lsc_cfg)
++{
++ struct isp_device *isp = to_isp_device(ccdc);
++ struct v4l2_mbus_framefmt *format;
++ unsigned int paxel_width, paxel_height;
++ unsigned int paxel_shift_x, paxel_shift_y;
++ unsigned int min_width, min_height, min_size;
++ unsigned int input_width, input_height;
++
++ paxel_shift_x = lsc_cfg->gain_mode_m;
++ paxel_shift_y = lsc_cfg->gain_mode_n;
++
++ if ((paxel_shift_x < 2) || (paxel_shift_x > 6) ||
++ (paxel_shift_y < 2) || (paxel_shift_y > 6)) {
++ dev_dbg(isp->dev, "CCDC: LSC: Invalid paxel size\n");
++ return -EINVAL;
++ }
++
++ if (lsc_cfg->offset & 3) {
++ dev_dbg(isp->dev, "CCDC: LSC: Offset must be a multiple of "
++ "4\n");
++ return -EINVAL;
++ }
++
++ if ((lsc_cfg->initial_x & 1) || (lsc_cfg->initial_y & 1)) {
++ dev_dbg(isp->dev, "CCDC: LSC: initial_x and y must be even\n");
++ return -EINVAL;
++ }
++
++ format = __ccdc_get_format(ccdc, NULL, CCDC_PAD_SINK,
++ V4L2_SUBDEV_FORMAT_ACTIVE);
++ input_width = format->width;
++ input_height = format->height;
++
++ /* Calculate minimum bytesize for validation */
++ paxel_width = 1 << paxel_shift_x;
++ min_width = ((input_width + lsc_cfg->initial_x + paxel_width - 1)
++ >> paxel_shift_x) + 1;
++
++ paxel_height = 1 << paxel_shift_y;
++ min_height = ((input_height + lsc_cfg->initial_y + paxel_height - 1)
++ >> paxel_shift_y) + 1;
++
++ min_size = 4 * min_width * min_height;
++ if (min_size > lsc_cfg->size) {
++ dev_dbg(isp->dev, "CCDC: LSC: too small table\n");
++ return -EINVAL;
++ }
++ if (lsc_cfg->offset < (min_width * 4)) {
++ dev_dbg(isp->dev, "CCDC: LSC: Offset is too small\n");
++ return -EINVAL;
++ }
++ if ((lsc_cfg->size / lsc_cfg->offset) < min_height) {
++ dev_dbg(isp->dev, "CCDC: LSC: Wrong size/offset combination\n");
++ return -EINVAL;
++ }
++ return 0;
++}
++
++/*
++ * ispccdc_lsc_program_table - Program Lens Shading Compensation table address.
++ * @ccdc: Pointer to ISP CCDC device.
++ */
++static void ispccdc_lsc_program_table(struct isp_ccdc_device *ccdc, u32 addr)
++{
++ isp_reg_writel(to_isp_device(ccdc), addr,
++ OMAP3_ISP_IOMEM_CCDC, ISPCCDC_LSC_TABLE_BASE);
++}
++
++/*
++ * ispccdc_lsc_setup_regs - Configures the lens shading compensation module
++ * @ccdc: Pointer to ISP CCDC device.
++ */
++static void ispccdc_lsc_setup_regs(struct isp_ccdc_device *ccdc,
++ struct ispccdc_lsc_config *cfg)
++{
++ struct isp_device *isp = to_isp_device(ccdc);
++ int reg;
++
++ isp_reg_writel(isp, cfg->offset, OMAP3_ISP_IOMEM_CCDC,
++ ISPCCDC_LSC_TABLE_OFFSET);
++
++ reg = 0;
++ reg |= cfg->gain_mode_n << ISPCCDC_LSC_GAIN_MODE_N_SHIFT;
++ reg |= cfg->gain_mode_m << ISPCCDC_LSC_GAIN_MODE_M_SHIFT;
++ reg |= cfg->gain_format << ISPCCDC_LSC_GAIN_FORMAT_SHIFT;
++ isp_reg_writel(isp, reg, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_LSC_CONFIG);
++
++ reg = 0;
++ reg &= ~ISPCCDC_LSC_INITIAL_X_MASK;
++ reg |= cfg->initial_x << ISPCCDC_LSC_INITIAL_X_SHIFT;
++ reg &= ~ISPCCDC_LSC_INITIAL_Y_MASK;
++ reg |= cfg->initial_y << ISPCCDC_LSC_INITIAL_Y_SHIFT;
++ isp_reg_writel(isp, reg, OMAP3_ISP_IOMEM_CCDC,
++ ISPCCDC_LSC_INITIAL);
++}
++
++static int ispccdc_lsc_wait_prefetch(struct isp_ccdc_device *ccdc)
++{
++ struct isp_device *isp = to_isp_device(ccdc);
++ unsigned int wait;
++
++ isp_reg_writel(isp, IRQ0STATUS_CCDC_LSC_PREF_COMP_IRQ,
++ OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS);
++
++ /* timeout 1 ms */
++ for (wait = 0; wait < 1000; wait++) {
++ if (isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS) &
++ IRQ0STATUS_CCDC_LSC_PREF_COMP_IRQ) {
++ isp_reg_writel(isp, IRQ0STATUS_CCDC_LSC_PREF_COMP_IRQ,
++ OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS);
++ return 0;
++ }
++
++ rmb();
++ udelay(1);
++ }
++
++ return -ETIMEDOUT;
++}
++
++/*
++ * __ispccdc_lsc_enable - Enables/Disables the Lens Shading Compensation module.
++ * @ccdc: Pointer to ISP CCDC device.
++ * @enable: 0 Disables LSC, 1 Enables LSC.
++ */
++static int __ispccdc_lsc_enable(struct isp_ccdc_device *ccdc, int enable)
++{
++ struct isp_device *isp = to_isp_device(ccdc);
++ const struct v4l2_mbus_framefmt *format =
++ __ccdc_get_format(ccdc, NULL, CCDC_PAD_SINK,
++ V4L2_SUBDEV_FORMAT_ACTIVE);
++
++ if ((format->code != V4L2_MBUS_FMT_SGRBG10_1X10) &&
++ (format->code != V4L2_MBUS_FMT_SRGGB10_1X10) &&
++ (format->code != V4L2_MBUS_FMT_SBGGR10_1X10) &&
++ (format->code != V4L2_MBUS_FMT_SGBRG10_1X10))
++ return -EINVAL;
++
++ if (enable)
++ isp_sbl_enable(isp, OMAP3_ISP_SBL_CCDC_LSC_READ);
++
++ isp_reg_and_or(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_LSC_CONFIG,
++ ~ISPCCDC_LSC_ENABLE, enable ? ISPCCDC_LSC_ENABLE : 0);
++
++ if (enable) {
++ if (ispccdc_lsc_wait_prefetch(ccdc) < 0) {
++ isp_reg_and(isp, OMAP3_ISP_IOMEM_CCDC,
++ ISPCCDC_LSC_CONFIG, ~ISPCCDC_LSC_ENABLE);
++ ccdc->lsc.state = LSC_STATE_STOPPED;
++ dev_warn(to_device(ccdc), "LSC prefecth timeout\n");
++ return -ETIMEDOUT;
++ }
++ ccdc->lsc.state = LSC_STATE_RUNNING;
++ } else {
++ ccdc->lsc.state = LSC_STATE_STOPPING;
++ }
++
++ return 0;
++}
++
++static int ispccdc_lsc_busy(struct isp_ccdc_device *ccdc)
++{
++ struct isp_device *isp = to_isp_device(ccdc);
++
++ return isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_LSC_CONFIG) &
++ ISPCCDC_LSC_BUSY;
++}
++
++/* __ispccdc_lsc_configure - Configure LSC engine with new configuration
++ * and/or table in interrupt context
++ * @ccdc: Pointer to ISP CCDC device.
++ *
++ */
++static int __ispccdc_lsc_configure(struct isp_ccdc_device *ccdc,
++ struct ispccdc_lsc_config_req *req)
++{
++ if (!req->enable)
++ return 0;
++
++ if (ispccdc_lsc_busy(ccdc))
++ return -EBUSY;
++
++ ispccdc_lsc_setup_regs(ccdc, &req->config);
++ ispccdc_lsc_program_table(ccdc, req->table);
++ return 0;
++}
++
++/*
++ * ispccdc_lsc_error_handler - Handle LSC prefetch error scenario.
++ * @ccdc: Pointer to ISP CCDC device.
++ *
++ * Disables LSC, and defers enablement to shadow registers update time.
++ */
++static void ispccdc_lsc_error_handler(struct isp_ccdc_device *ccdc)
++{
++ struct isp_device *isp = to_isp_device(ccdc);
++ /*
++ * From OMAP3 TRM: When this event is pending, the module
++ * goes into transparent mode (output =input). Normal
++ * operation can be resumed at the start of the next frame
++ * after:
++ * 1) Clearing this event
++ * 2) Disabling the LSC module
++ * 3) Enabling it
++ */
++ isp_reg_and(isp, OMAP3_ISP_IOMEM_CCDC,
++ ISPCCDC_LSC_CONFIG, ~ISPCCDC_LSC_ENABLE);
++ ccdc->lsc.state = LSC_STATE_STOPPED;
++}
++
++static int ispccdc_lsc_queue_req(struct isp_ccdc_device *ccdc,
++ struct ispccdc_lsc_config *cfg,
++ u32 table, int enable)
++{
++ struct ispccdc_lsc_config_req *req;
++ unsigned long flags;
++
++ req = kzalloc(sizeof(*req), GFP_KERNEL);
++ if (req == NULL)
++ return -ENOMEM;
++
++ if (enable) {
++ req->config = *cfg;
++ req->table = table;
++ req->enable = 1;
++ }
++
++ spin_lock_irqsave(&ccdc->lsc.req_lock, flags);
++ list_add_tail(&req->list, &ccdc->lsc.req_queue);
++ spin_unlock_irqrestore(&ccdc->lsc.req_lock, flags);
++ return 0;
++}
++
++static struct ispccdc_lsc_config_req *
++ispccdc_lsc_dequeue_req(struct isp_ccdc_device *ccdc, struct list_head *queue)
++{
++ if (list_empty(queue))
++ return NULL;
++
++ return list_first_entry(queue, struct ispccdc_lsc_config_req, list);
++}
++
++static void ispccdc_lsc_init_queue(struct isp_ccdc_device *ccdc)
++{
++ INIT_LIST_HEAD(&ccdc->lsc.req_queue);
++ INIT_LIST_HEAD(&ccdc->lsc.free_queue);
++}
++
++static void ispccdc_lsc_free_queue(struct isp_ccdc_device *ccdc,
++ struct list_head *queue)
++{
++ struct isp_device *isp = to_isp_device(ccdc);
++ struct ispccdc_lsc_config_req *req, *n;
++ unsigned long flags;
++
++ spin_lock_irqsave(&ccdc->lsc.req_lock, flags);
++ list_for_each_entry_safe(req, n, queue, list) {
++ list_del(&req->list);
++ spin_unlock_irqrestore(&ccdc->lsc.req_lock, flags);
++ if (req->table)
++ iommu_vfree(isp->iommu, req->table);
++ kfree(req);
++ spin_lock_irqsave(&ccdc->lsc.req_lock, flags);
++ }
++ spin_unlock_irqrestore(&ccdc->lsc.req_lock, flags);
++}
++
++static void ispccdc_lsc_free_table_work(struct work_struct *work)
++{
++ struct isp_ccdc_device *ccdc;
++ struct ispccdc_lsc *lsc;
++
++ lsc = container_of(work, struct ispccdc_lsc, table_work);
++ ccdc = container_of(lsc, struct isp_ccdc_device, lsc);
++
++ ispccdc_lsc_free_queue(ccdc, &lsc->free_queue);
++}
++
++static int ispccdc_lsc_config(struct isp_ccdc_device *ccdc,
++ struct ispccdc_update_config *config)
++{
++ struct isp_device *isp = to_isp_device(ccdc);
++ struct ispccdc_lsc_config cfg;
++ void *table_new_va;
++ u32 table_new = 0;
++ int enable = 0;
++ u16 update;
++ int ret;
++
++ update = config->update & (ISP_ABS_CCDC_CONFIG_LSC | ISP_ABS_TBL_LSC);
++ if (!update)
++ return 0;
++
++ if (update != (ISP_ABS_CCDC_CONFIG_LSC | ISP_ABS_TBL_LSC)) {
++ dev_dbg(to_device(ccdc), "%s: Both LSC configuration and table "
++ "need to be supplied\n", __func__);
++ return -EINVAL;
++ }
++
++ if (config->flag & ISP_ABS_CCDC_CONFIG_LSC) {
++ if (copy_from_user(&cfg, config->lsc_cfg, sizeof(cfg)))
++ return -EFAULT;
++ if (ispccdc_lsc_validate_config(ccdc, &cfg))
++ return -EINVAL;
++ enable = 1;
++ } else {
++ goto queue;
++ }
++
++ table_new = iommu_vmalloc(isp->iommu, 0, cfg.size, IOMMU_FLAG);
++ if (IS_ERR_VALUE(table_new))
++ return -ENOMEM;
++ table_new_va = da_to_va(isp->iommu, table_new);
++ if (copy_from_user(table_new_va, config->lsc, cfg.size)) {
++ iommu_vfree(isp->iommu, table_new);
++ return -EFAULT;
++ }
++
++queue:
++ ret = ispccdc_lsc_queue_req(ccdc, &cfg, table_new, enable);
++ if (ret < 0 && table_new)
++ iommu_vfree(isp->iommu, table_new);
++ return ret;
++}
++
++static inline int ispccdc_lsc_is_configured(struct isp_ccdc_device *ccdc)
++{
++ struct ispccdc_lsc_config_req *req;
++ unsigned long flags;
++
++ spin_lock_irqsave(&ccdc->lsc.req_lock, flags);
++ req = ccdc->lsc.active;
++ if (req && req->enable) {
++ spin_unlock_irqrestore(&ccdc->lsc.req_lock, flags);
++ return 1;
++ }
++ spin_unlock_irqrestore(&ccdc->lsc.req_lock, flags);
++ return 0;
++}
++
++/* -----------------------------------------------------------------------------
++ * Parameters configuration
++ */
++
++/*
++ * ispccdc_config_black_clamp - Configures the clamp parameters in CCDC.
++ * @ccdc: Pointer to ISP CCDC device.
++ * @bclamp: Structure containing the optical black average gain, optical black
++ * sample length, sample lines, and the start pixel position of the
++ * samples w.r.t the HS pulse.
++ *
++ * Configures the clamp parameters in CCDC. Either if its being used the
++ * optical black clamp, or the digital clamp. If its a digital clamp, then
++ * assures to put a valid DC substraction level.
++ *
++ * Returns always 0 when completed.
++ */
++static int ispccdc_config_black_clamp(struct isp_ccdc_device *ccdc,
++ struct ispccdc_bclamp *bclamp)
++{
++ struct isp_device *isp = to_isp_device(ccdc);
++ u32 bclamp_val;
++
++ if (ccdc->obclamp_en) {
++ bclamp_val = bclamp->obgain << ISPCCDC_CLAMP_OBGAIN_SHIFT;
++ bclamp_val |= bclamp->oblen << ISPCCDC_CLAMP_OBSLEN_SHIFT;
++ bclamp_val |= bclamp->oblines << ISPCCDC_CLAMP_OBSLN_SHIFT;
++ bclamp_val |= bclamp->obstpixel << ISPCCDC_CLAMP_OBST_SHIFT;
++ isp_reg_writel(isp, bclamp_val,
++ OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CLAMP);
++ } else {
++ isp_reg_writel(isp, bclamp->dcsubval,
++ OMAP3_ISP_IOMEM_CCDC, ISPCCDC_DCSUB);
++ }
++ return 0;
++}
++
++/*
++ * ispccdc_enable_black_clamp - Enables/Disables the optical black clamp.
++ * @ccdc: Pointer to ISP CCDC device.
++ * @enable: 0 Disables optical black clamp, 1 Enables optical black clamp.
++ *
++ * Enables or disables the optical black clamp. When disabled, the digital
++ * clamp operates.
++ */
++static void ispccdc_enable_black_clamp(struct isp_ccdc_device *ccdc,
++ u8 enable)
++{
++ struct isp_device *isp = to_isp_device(ccdc);
++
++ isp_reg_and_or(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CLAMP,
++ ~ISPCCDC_CLAMP_CLAMPEN,
++ enable ? ISPCCDC_CLAMP_CLAMPEN : 0);
++ ccdc->obclamp_en = enable;
++}
++
++/*
++ * ispccdc_config_fpc - Configures the Faulty Pixel Correction parameters.
++ * @ccdc: Pointer to ISP CCDC device.
++ * @fpc: Structure containing the number of faulty pixels corrected in the
++ * frame, address of the FPC table.
++ *
++ * Returns 0 if successful, or -EINVAL if FPC Address is not on the 64 byte
++ * boundary.
++ */
++static int ispccdc_config_fpc(struct isp_ccdc_device *ccdc,
++ struct ispccdc_fpc *fpc)
++{
++ struct isp_device *isp = to_isp_device(ccdc);
++ u32 fpc_val = 0;
++
++ fpc_val = isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FPC);
++
++ isp_reg_writel(isp, fpc_val & (~ISPCCDC_FPC_FPCEN),
++ OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FPC);
++ isp_reg_writel(isp, fpc->fpcaddr,
++ OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FPC_ADDR);
++ isp_reg_writel(isp, fpc_val | (fpc->fpnum << ISPCCDC_FPC_FPNUM_SHIFT),
++ OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FPC);
++ return 0;
++}
++
++/*
++ * ispccdc_enable_fpc - Enable Faulty Pixel Correction.
++ * @ccdc: Pointer to ISP CCDC device.
++ * @enable: 0 Disables FPC, 1 Enables FPC.
++ */
++static void ispccdc_enable_fpc(struct isp_ccdc_device *ccdc, u8 enable)
++{
++ struct isp_device *isp = to_isp_device(ccdc);
++
++ isp_reg_and_or(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FPC,
++ ~ISPCCDC_FPC_FPCEN, enable ? ISPCCDC_FPC_FPCEN : 0);
++}
++
++/*
++ * ispccdc_config_black_comp - Configure Black Level Compensation.
++ * @ccdc: Pointer to ISP CCDC device.
++ * @blcomp: Structure containing the black level compensation value for RGrGbB
++ * pixels. in 2's complement.
++ */
++static void ispccdc_config_black_comp(struct isp_ccdc_device *ccdc,
++ struct ispccdc_blcomp *blcomp)
++{
++ struct isp_device *isp = to_isp_device(ccdc);
++ u32 blcomp_val = 0;
++
++ blcomp_val |= blcomp->b_mg << ISPCCDC_BLKCMP_B_MG_SHIFT;
++ blcomp_val |= blcomp->gb_g << ISPCCDC_BLKCMP_GB_G_SHIFT;
++ blcomp_val |= blcomp->gr_cy << ISPCCDC_BLKCMP_GR_CY_SHIFT;
++ blcomp_val |= blcomp->r_ye << ISPCCDC_BLKCMP_R_YE_SHIFT;
++
++ isp_reg_writel(isp, blcomp_val, OMAP3_ISP_IOMEM_CCDC,
++ ISPCCDC_BLKCMP);
++}
++
++/*
++ * ispccdc_config_culling - Configure culling parameters.
++ * @ccdc: Pointer to ISP CCDC device.
++ * @cull: Structure containing the vertical culling pattern, and horizontal
++ * culling pattern for odd and even lines.
++ */
++static void ispccdc_config_culling(struct isp_ccdc_device *ccdc,
++ struct ispccdc_culling *cull)
++{
++ struct isp_device *isp = to_isp_device(ccdc);
++
++ u32 culling_val = 0;
++
++ culling_val |= cull->v_pattern << ISPCCDC_CULLING_CULV_SHIFT;
++ culling_val |= cull->h_even << ISPCCDC_CULLING_CULHEVN_SHIFT;
++ culling_val |= cull->h_odd << ISPCCDC_CULLING_CULHODD_SHIFT;
++
++ isp_reg_writel(isp, culling_val, OMAP3_ISP_IOMEM_CCDC,
++ ISPCCDC_CULLING);
++}
++
++/*
++ * ispccdc_enable_lpf - Enable Low-Pass Filter (LPF).
++ * @ccdc: Pointer to ISP CCDC device.
++ * @enable: 0 Disables LPF, 1 Enables LPF
++ */
++static void ispccdc_enable_lpf(struct isp_ccdc_device *ccdc, u8 enable)
++{
++ struct isp_device *isp = to_isp_device(ccdc);
++
++ isp_reg_and_or(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SYN_MODE,
++ ~ISPCCDC_SYN_MODE_LPF,
++ enable ? ISPCCDC_SYN_MODE_LPF : 0);
++}
++
++/*
++ * ispccdc_config_alaw - Configure the input width for A-law compression.
++ * @ccdc: Pointer to ISP CCDC device.
++ * @ipwidth: Input width for A-law
++ */
++static void ispccdc_config_alaw(struct isp_ccdc_device *ccdc,
++ enum alaw_ipwidth ipwidth)
++{
++ struct isp_device *isp = to_isp_device(ccdc);
++
++ isp_reg_writel(isp, ipwidth << ISPCCDC_ALAW_GWDI_SHIFT,
++ OMAP3_ISP_IOMEM_CCDC, ISPCCDC_ALAW);
++}
++
++/*
++ * ispccdc_enable_alaw - Enable A-law compression.
++ * @ccdc: Pointer to ISP CCDC device.
++ * @enable: 0 - Disables A-law, 1 - Enables A-law
++ */
++static void ispccdc_enable_alaw(struct isp_ccdc_device *ccdc, u8 enable)
++{
++ struct isp_device *isp = to_isp_device(ccdc);
++
++ isp_reg_and_or(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_ALAW,
++ ~ISPCCDC_ALAW_CCDTBL,
++ enable ? ISPCCDC_ALAW_CCDTBL : 0);
++}
++
++/*
++ * ispccdc_config_imgattr - Configure sensor image specific attributes.
++ * @ccdc: Pointer to ISP CCDC device.
++ * @colptn: Color pattern of the sensor.
++ */
++static void ispccdc_config_imgattr(struct isp_ccdc_device *ccdc, u32 colptn)
++{
++ struct isp_device *isp = to_isp_device(ccdc);
++
++ isp_reg_writel(isp, colptn, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_COLPTN);
++}
++
++/*
++ * ispccdc_config - Set CCDC configuration from userspace
++ * @ccdc: Pointer to ISP CCDC device.
++ * @userspace_add: Structure containing CCDC configuration sent from userspace.
++ *
++ * Returns 0 if successful, -EINVAL if the pointer to the configuration
++ * structure is null, or the copy_from_user function fails to copy user space
++ * memory to kernel space memory.
++ */
++static int ispccdc_config(struct isp_ccdc_device *ccdc,
++ struct ispccdc_update_config *ccdc_struct)
++{
++ struct isp_device *isp = to_isp_device(ccdc);
++ struct ispccdc_bclamp bclamp_t;
++ struct ispccdc_blcomp blcomp_t;
++ struct ispccdc_culling cull_t;
++ unsigned long flags;
++ int ret = 0;
++
++ if (ccdc_struct == NULL)
++ return -EINVAL;
++
++ spin_lock_irqsave(&ccdc->lock, flags);
++ ccdc->shadow_update = 1;
++ spin_unlock_irqrestore(&ccdc->lock, flags);
++
++ if (ISP_ABS_CCDC_ALAW & ccdc_struct->flag) {
++ if (ISP_ABS_CCDC_ALAW & ccdc_struct->update)
++ ispccdc_config_alaw(ccdc, ccdc_struct->alawip);
++ ispccdc_enable_alaw(ccdc, 1);
++ } else if (ISP_ABS_CCDC_ALAW & ccdc_struct->update)
++ ispccdc_enable_alaw(ccdc, 0);
++
++ if (ISP_ABS_CCDC_LPF & ccdc_struct->flag)
++ ispccdc_enable_lpf(ccdc, 1);
++ else
++ ispccdc_enable_lpf(ccdc, 0);
++
++ if (ISP_ABS_CCDC_BLCLAMP & ccdc_struct->flag) {
++ if (ISP_ABS_CCDC_BLCLAMP & ccdc_struct->update) {
++ if (copy_from_user(&bclamp_t, ccdc_struct->bclamp,
++ sizeof(struct ispccdc_bclamp))) {
++ ret = -EFAULT;
++ goto out;
++ }
++
++ ispccdc_enable_black_clamp(ccdc, 1);
++ ispccdc_config_black_clamp(ccdc, &bclamp_t);
++ } else
++ ispccdc_enable_black_clamp(ccdc, 1);
++ } else {
++ if (ISP_ABS_CCDC_BLCLAMP & ccdc_struct->update) {
++ if (copy_from_user(&bclamp_t, ccdc_struct->bclamp,
++ sizeof(struct ispccdc_bclamp))) {
++ ret = -EFAULT;
++ goto out;
++ }
++
++ ispccdc_enable_black_clamp(ccdc, 0);
++ ispccdc_config_black_clamp(ccdc, &bclamp_t);
++ }
++ }
++
++ if (ISP_ABS_CCDC_BCOMP & ccdc_struct->update) {
++ if (copy_from_user(&blcomp_t, ccdc_struct->blcomp,
++ sizeof(blcomp_t))) {
++ ret = -EFAULT;
++ goto out;
++ }
++
++ ispccdc_config_black_comp(ccdc, &blcomp_t);
++ }
++
++ if (ISP_ABS_CCDC_FPC & ccdc_struct->flag) {
++ if (ISP_ABS_CCDC_FPC & ccdc_struct->update) {
++ struct ispccdc_fpc fpc_t;
++ u32 fpc_table_m;
++ void *fpc_table;
++ u32 fpc_table_old;
++ u32 fpc_table_size;
++
++ if (ccdc->state != ISP_PIPELINE_STREAM_STOPPED)
++ return -EBUSY;
++
++ if (copy_from_user(&fpc_t, ccdc_struct->fpc,
++ sizeof(fpc_t))) {
++ ret = -EFAULT;
++ goto out;
++ }
++
++ /*
++ * fpc_table_m must be 64-bytes aligned, but it's
++ * already done by iommu_vmalloc().
++ */
++ fpc_table_size = fpc_t.fpnum * 4;
++ fpc_table_m = iommu_vmalloc(isp->iommu, 0,
++ fpc_table_size, IOMMU_FLAG);
++ if (IS_ERR_VALUE(fpc_table_m)) {
++ ret = -ENOMEM;
++ goto out;
++ }
++ fpc_table = da_to_va(isp->iommu, fpc_table_m);
++ if (copy_from_user(fpc_table,
++ (__force void __user *)
++ fpc_t.fpcaddr,
++ fpc_table_size)) {
++ iommu_vfree(isp->iommu, fpc_table_m);
++ ret = -EFAULT;
++ goto out;
++ }
++ fpc_t.fpcaddr = fpc_table_m;
++
++ spin_lock_irqsave(&ccdc->lock, flags);
++ fpc_table_old = ccdc->fpc_table_add_m;
++ ccdc->fpc_table_add = fpc_table;
++ ccdc->fpc_table_add_m = fpc_table_m;
++ ispccdc_config_fpc(ccdc, &fpc_t);
++ spin_unlock_irqrestore(&ccdc->lock, flags);
++
++ if (fpc_table_old != 0)
++ iommu_vfree(isp->iommu, fpc_table_old);
++ }
++ ispccdc_enable_fpc(ccdc, 1);
++ } else if (ISP_ABS_CCDC_FPC & ccdc_struct->update)
++ ispccdc_enable_fpc(ccdc, 0);
++
++ if (ISP_ABS_CCDC_CULL & ccdc_struct->update) {
++ if (copy_from_user(&cull_t, ccdc_struct->cull,
++ sizeof(cull_t))) {
++ ret = -EFAULT;
++ goto out;
++ }
++ ispccdc_config_culling(ccdc, &cull_t);
++ }
++
++ ret = ispccdc_lsc_config(ccdc, ccdc_struct);
++ if (ret)
++ goto out;
++
++ if (ISP_ABS_CCDC_COLPTN & ccdc_struct->update)
++ ispccdc_config_imgattr(ccdc, ccdc_struct->colptn);
++
++out:
++ if (ret == -EFAULT)
++ dev_err(to_device(ccdc),
++ "ccdc: user provided bad configuration data address");
++
++ if (ret == -ENOMEM)
++ dev_err(to_device(ccdc),
++ "ccdc: can not allocate memory");
++
++ ccdc->shadow_update = 0;
++ return ret;
++}
++
++/* -----------------------------------------------------------------------------
++ * Format- and pipeline-related configuration helpers
++ */
++
++/*
++ * ispccdc_config_vp - Configure the Video Port.
++ * @ccdc: Pointer to ISP CCDC device.
++ * @vpcfg: Structure containing the Video Port input frequency, and the 10 bit
++ * format.
++ */
++static void ispccdc_config_vp(struct isp_ccdc_device *ccdc,
++ struct ispccdc_vp *vpcfg)
++{
++ struct isp_pipeline *pipe = to_isp_pipeline(&ccdc->subdev.entity);
++ struct isp_device *isp = to_isp_device(ccdc);
++ unsigned long l3_ick = pipe->l3_ick;
++ unsigned int max_div = isp->revision == ISP_REVISION_15_0 ? 64 : 8;
++ unsigned int div = 0;
++ u32 fmtcfg_vp = isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCDC,
++ ISPCCDC_FMTCFG);
++
++ fmtcfg_vp &= ISPCCDC_FMTCFG_VPIN_MASK & ISPCCDC_FMTCFG_VPIF_FRQ_MASK;
++
++ switch (vpcfg->bitshift_sel) {
++ case BIT9_0:
++ fmtcfg_vp |= ISPCCDC_FMTCFG_VPIN_9_0;
++ break;
++ case BIT10_1:
++ fmtcfg_vp |= ISPCCDC_FMTCFG_VPIN_10_1;
++ break;
++ case BIT11_2:
++ fmtcfg_vp |= ISPCCDC_FMTCFG_VPIN_11_2;
++ break;
++ case BIT12_3:
++ fmtcfg_vp |= ISPCCDC_FMTCFG_VPIN_12_3;
++ break;
++ };
++
++ if (pipe->input)
++ div = DIV_ROUND_UP(l3_ick, pipe->max_rate);
++ else if (vpcfg->pixelclk)
++ div = l3_ick / vpcfg->pixelclk;
++
++ div = clamp(div, 2U, max_div);
++ fmtcfg_vp |= (div - 2) << ISPCCDC_FMTCFG_VPIF_FRQ_SHIFT;
++
++ isp_reg_writel(isp, fmtcfg_vp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMTCFG);
++}
++
++/*
++ * ispccdc_enable_vp - Enable Video Port.
++ * @ccdc: Pointer to ISP CCDC device.
++ * @enable: 0 Disables VP, 1 Enables VP
++ *
++ * This is needed for outputting image to Preview, H3A and HIST ISP submodules.
++ */
++static void ispccdc_enable_vp(struct isp_ccdc_device *ccdc, u8 enable)
++{
++ struct isp_device *isp = to_isp_device(ccdc);
++
++ isp_reg_and_or(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMTCFG,
++ ~ISPCCDC_FMTCFG_VPEN,
++ enable ? ISPCCDC_FMTCFG_VPEN : 0);
++}
++
++/*
++ * ispccdc_config_outlineoffset - Configure memory saving output line offset
++ * @ccdc: Pointer to ISP CCDC device.
++ * @offset: Address offset to start a new line. Must be twice the
++ * Output width and aligned on 32 byte boundary
++ * @oddeven: Specifies the odd/even line pattern to be chosen to store the
++ * output.
++ * @numlines: Set the value 0-3 for +1-4lines, 4-7 for -1-4lines.
++ *
++ * - Configures the output line offset when stored in memory
++ * - Sets the odd/even line pattern to store the output
++ * (EVENEVEN (1), ODDEVEN (2), EVENODD (3), ODDODD (4))
++ * - Configures the number of even and odd line fields in case of rearranging
++ * the lines.
++ *
++ * Returns 0 if successful, or -EINVAL if the offset is not in 32 byte
++ * boundary.
++ */
++static int ispccdc_config_outlineoffset(struct isp_ccdc_device *ccdc,
++ u32 offset, u8 oddeven, u8 numlines)
++{
++ struct isp_device *isp = to_isp_device(ccdc);
++
++ isp_reg_writel(isp, offset & 0xffff,
++ OMAP3_ISP_IOMEM_CCDC, ISPCCDC_HSIZE_OFF);
++
++ isp_reg_and(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDOFST,
++ ~ISPCCDC_SDOFST_FINV);
++
++ isp_reg_and(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDOFST,
++ ~ISPCCDC_SDOFST_FOFST_4L);
++
++ switch (oddeven) {
++ case EVENEVEN:
++ isp_reg_or(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDOFST,
++ (numlines & 0x7) << ISPCCDC_SDOFST_LOFST0_SHIFT);
++ break;
++ case ODDEVEN:
++ isp_reg_or(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDOFST,
++ (numlines & 0x7) << ISPCCDC_SDOFST_LOFST1_SHIFT);
++ break;
++ case EVENODD:
++ isp_reg_or(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDOFST,
++ (numlines & 0x7) << ISPCCDC_SDOFST_LOFST2_SHIFT);
++ break;
++ case ODDODD:
++ isp_reg_or(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDOFST,
++ (numlines & 0x7) << ISPCCDC_SDOFST_LOFST3_SHIFT);
++ break;
++ default:
++ break;
++ }
++ return 0;
++}
++
++/*
++ * ispccdc_set_outaddr - Set memory address to save output image
++ * @ccdc: Pointer to ISP CCDC device.
++ * @addr: ISP MMU Mapped 32-bit memory address aligned on 32 byte boundary.
++ *
++ * Sets the memory address where the output will be saved.
++ */
++static void ispccdc_set_outaddr(struct isp_ccdc_device *ccdc, u32 addr)
++{
++ struct isp_device *isp = to_isp_device(ccdc);
++
++ isp_reg_writel(isp, addr, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDR_ADDR);
++}
++
++/*
++ * ispccdc_config_sync_if - Set CCDC sync interface params between sensor and CCDC.
++ * @ccdc: Pointer to ISP CCDC device.
++ * @syncif: Structure containing the sync parameters like field state, CCDC in
++ * master/slave mode, raw/yuv data, polarity of data, field, hs, vs
++ * signals.
++ */
++static void ispccdc_config_sync_if(struct isp_ccdc_device *ccdc,
++ struct ispccdc_syncif *syncif)
++{
++ struct isp_device *isp = to_isp_device(ccdc);
++ u32 syn_mode = isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCDC,
++ ISPCCDC_SYN_MODE);
++
++ syn_mode |= ISPCCDC_SYN_MODE_VDHDEN;
++
++ if (syncif->fldstat)
++ syn_mode |= ISPCCDC_SYN_MODE_FLDSTAT;
++ else
++ syn_mode &= ~ISPCCDC_SYN_MODE_FLDSTAT;
++
++ syn_mode &= ISPCCDC_SYN_MODE_DATSIZ_MASK;
++ switch (syncif->datsz) {
++ case 8:
++ syn_mode |= ISPCCDC_SYN_MODE_DATSIZ_8;
++ break;
++ case 10:
++ syn_mode |= ISPCCDC_SYN_MODE_DATSIZ_10;
++ break;
++ case 11:
++ syn_mode |= ISPCCDC_SYN_MODE_DATSIZ_11;
++ break;
++ case 12:
++ syn_mode |= ISPCCDC_SYN_MODE_DATSIZ_12;
++ break;
++ };
++
++ if (syncif->fldmode)
++ syn_mode |= ISPCCDC_SYN_MODE_FLDMODE;
++ else
++ syn_mode &= ~ISPCCDC_SYN_MODE_FLDMODE;
++
++ if (syncif->datapol)
++ syn_mode |= ISPCCDC_SYN_MODE_DATAPOL;
++ else
++ syn_mode &= ~ISPCCDC_SYN_MODE_DATAPOL;
++
++ if (syncif->fldpol)
++ syn_mode |= ISPCCDC_SYN_MODE_FLDPOL;
++ else
++ syn_mode &= ~ISPCCDC_SYN_MODE_FLDPOL;
++
++ if (syncif->hdpol)
++ syn_mode |= ISPCCDC_SYN_MODE_HDPOL;
++ else
++ syn_mode &= ~ISPCCDC_SYN_MODE_HDPOL;
++
++ if (syncif->vdpol)
++ syn_mode |= ISPCCDC_SYN_MODE_VDPOL;
++ else
++ syn_mode &= ~ISPCCDC_SYN_MODE_VDPOL;
++
++ if (syncif->ccdc_mastermode) {
++ syn_mode |= ISPCCDC_SYN_MODE_FLDOUT | ISPCCDC_SYN_MODE_VDHDOUT;
++ isp_reg_writel(isp,
++ syncif->hs_width << ISPCCDC_HD_VD_WID_HDW_SHIFT
++ | syncif->vs_width << ISPCCDC_HD_VD_WID_VDW_SHIFT,
++ OMAP3_ISP_IOMEM_CCDC,
++ ISPCCDC_HD_VD_WID);
++
++ isp_reg_writel(isp,
++ syncif->ppln << ISPCCDC_PIX_LINES_PPLN_SHIFT
++ | syncif->hlprf << ISPCCDC_PIX_LINES_HLPRF_SHIFT,
++ OMAP3_ISP_IOMEM_CCDC,
++ ISPCCDC_PIX_LINES);
++ } else
++ syn_mode &= ~(ISPCCDC_SYN_MODE_FLDOUT |
++ ISPCCDC_SYN_MODE_VDHDOUT);
++
++ isp_reg_writel(isp, syn_mode, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SYN_MODE);
++
++ if (!syncif->bt_r656_en)
++ isp_reg_and(isp, OMAP3_ISP_IOMEM_CCDC,
++ ISPCCDC_REC656IF, ~ISPCCDC_REC656IF_R656ON);
++}
++
++/* CCDC formats descriptions */
++static const u32 ccdc_sgrbg_pattern =
++ ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP0PLC0_SHIFT |
++ ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP0PLC1_SHIFT |
++ ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP0PLC2_SHIFT |
++ ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP0PLC3_SHIFT |
++ ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP1PLC0_SHIFT |
++ ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP1PLC1_SHIFT |
++ ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP1PLC2_SHIFT |
++ ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP1PLC3_SHIFT |
++ ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP2PLC0_SHIFT |
++ ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP2PLC1_SHIFT |
++ ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP2PLC2_SHIFT |
++ ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP2PLC3_SHIFT |
++ ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP3PLC0_SHIFT |
++ ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP3PLC1_SHIFT |
++ ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP3PLC2_SHIFT |
++ ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP3PLC3_SHIFT;
++
++static const u32 ccdc_srggb_pattern =
++ ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP0PLC0_SHIFT |
++ ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP0PLC1_SHIFT |
++ ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP0PLC2_SHIFT |
++ ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP0PLC3_SHIFT |
++ ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP1PLC0_SHIFT |
++ ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP1PLC1_SHIFT |
++ ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP1PLC2_SHIFT |
++ ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP1PLC3_SHIFT |
++ ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP2PLC0_SHIFT |
++ ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP2PLC1_SHIFT |
++ ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP2PLC2_SHIFT |
++ ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP2PLC3_SHIFT |
++ ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP3PLC0_SHIFT |
++ ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP3PLC1_SHIFT |
++ ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP3PLC2_SHIFT |
++ ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP3PLC3_SHIFT;
++
++static const u32 ccdc_sbggr_pattern =
++ ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP0PLC0_SHIFT |
++ ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP0PLC1_SHIFT |
++ ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP0PLC2_SHIFT |
++ ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP0PLC3_SHIFT |
++ ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP1PLC0_SHIFT |
++ ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP1PLC1_SHIFT |
++ ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP1PLC2_SHIFT |
++ ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP1PLC3_SHIFT |
++ ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP2PLC0_SHIFT |
++ ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP2PLC1_SHIFT |
++ ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP2PLC2_SHIFT |
++ ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP2PLC3_SHIFT |
++ ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP3PLC0_SHIFT |
++ ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP3PLC1_SHIFT |
++ ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP3PLC2_SHIFT |
++ ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP3PLC3_SHIFT;
++
++static const u32 ccdc_sgbrg_pattern =
++ ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP0PLC0_SHIFT |
++ ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP0PLC1_SHIFT |
++ ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP0PLC2_SHIFT |
++ ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP0PLC3_SHIFT |
++ ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP1PLC0_SHIFT |
++ ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP1PLC1_SHIFT |
++ ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP1PLC2_SHIFT |
++ ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP1PLC3_SHIFT |
++ ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP2PLC0_SHIFT |
++ ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP2PLC1_SHIFT |
++ ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP2PLC2_SHIFT |
++ ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP2PLC3_SHIFT |
++ ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP3PLC0_SHIFT |
++ ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP3PLC1_SHIFT |
++ ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP3PLC2_SHIFT |
++ ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP3PLC3_SHIFT;
++
++static void ccdc_configure(struct isp_ccdc_device *ccdc)
++{
++ struct isp_device *isp = to_isp_device(ccdc);
++ struct isp_parallel_platform_data *pdata = NULL;
++ struct media_entity_pad *pad;
++ struct v4l2_subdev *sensor;
++ struct v4l2_mbus_framefmt *format;
++ struct v4l2_pix_format pix;
++ unsigned long flags;
++ u32 syn_mode;
++ u32 ccdc_pattern;
++
++ if (ccdc->input == CCDC_INPUT_PARALLEL) {
++ pad = media_entity_remote_pad(&ccdc->pads[CCDC_PAD_SINK]);
++ sensor = media_entity_to_v4l2_subdev(pad->entity);
++ pdata = &((struct isp_v4l2_subdevs_group *)sensor->host_priv)
++ ->bus.parallel;
++ }
++
++ isp_configure_bridge(isp, ccdc->input, pdata);
++
++ syn_mode = isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SYN_MODE);
++
++ /* Use the raw, unprocessed data when writing to memory. The H3A and
++ * histogram modules are still fed with lens shading corrected data.
++ */
++ syn_mode &= ~ISPCCDC_SYN_MODE_VP2SDR;
++
++ if (ccdc->output & CCDC_OUTPUT_MEMORY)
++ syn_mode |= ISPCCDC_SYN_MODE_WEN;
++ else
++ syn_mode &= ~ISPCCDC_SYN_MODE_WEN;
++
++ if (ccdc->output & CCDC_OUTPUT_RESIZER)
++ syn_mode |= ISPCCDC_SYN_MODE_SDR2RSZ;
++ else
++ syn_mode &= ~ISPCCDC_SYN_MODE_SDR2RSZ;
++
++ isp_reg_writel(isp, syn_mode, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SYN_MODE);
++
++ /* CCDC_PAD_SINK */
++ format = &ccdc->formats[CCDC_PAD_SINK];
++
++ /* Mosaic filter */
++ switch (format->code) {
++ case V4L2_MBUS_FMT_SRGGB10_1X10:
++ ccdc_pattern = ccdc_srggb_pattern;
++ break;
++ case V4L2_MBUS_FMT_SBGGR10_1X10:
++ ccdc_pattern = ccdc_sbggr_pattern;
++ break;
++ case V4L2_MBUS_FMT_SGBRG10_1X10:
++ ccdc_pattern = ccdc_sgbrg_pattern;
++ break;
++ default:
++ /* Use GRBG */
++ ccdc_pattern = ccdc_sgrbg_pattern;
++ break;
++ }
++ ispccdc_config_imgattr(ccdc, ccdc_pattern);
++
++ /* Generate VD0 on the last line of the image and VD1 on the
++ * 2/3 height line.
++ */
++ isp_reg_writel(isp, ((format->height - 2) << ISPCCDC_VDINT_0_SHIFT) |
++ ((format->height * 2 / 3) << ISPCCDC_VDINT_1_SHIFT),
++ OMAP3_ISP_IOMEM_CCDC, ISPCCDC_VDINT);
++
++ /* CCDC_PAD_SOURCE_OF */
++ format = &ccdc->formats[CCDC_PAD_SOURCE_OF];
++
++ isp_reg_writel(isp, (0 << ISPCCDC_HORZ_INFO_SPH_SHIFT) |
++ ((format->width - 1) << ISPCCDC_HORZ_INFO_NPH_SHIFT),
++ OMAP3_ISP_IOMEM_CCDC, ISPCCDC_HORZ_INFO);
++ isp_reg_writel(isp, 0 << ISPCCDC_VERT_START_SLV0_SHIFT,
++ OMAP3_ISP_IOMEM_CCDC, ISPCCDC_VERT_START);
++ isp_reg_writel(isp, (format->height - 1)
++ << ISPCCDC_VERT_LINES_NLV_SHIFT,
++ OMAP3_ISP_IOMEM_CCDC, ISPCCDC_VERT_LINES);
++
++ isp_video_mbus_to_pix(&ccdc->video_out, format, &pix);
++ ispccdc_config_outlineoffset(ccdc, pix.bytesperline, 0, 0);
++
++ /* CCDC_PAD_SOURCE_VP */
++ format = &ccdc->formats[CCDC_PAD_SOURCE_VP];
++
++ isp_reg_writel(isp, (0 << ISPCCDC_FMT_HORZ_FMTSPH_SHIFT) |
++ (format->width << ISPCCDC_FMT_HORZ_FMTLNH_SHIFT),
++ OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMT_HORZ);
++ isp_reg_writel(isp, (0 << ISPCCDC_FMT_VERT_FMTSLV_SHIFT) |
++ ((format->height + 1) << ISPCCDC_FMT_VERT_FMTLNV_SHIFT),
++ OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMT_VERT);
++
++ isp_reg_writel(isp, (format->width << ISPCCDC_VP_OUT_HORZ_NUM_SHIFT) |
++ (format->height << ISPCCDC_VP_OUT_VERT_NUM_SHIFT),
++ OMAP3_ISP_IOMEM_CCDC, ISPCCDC_VP_OUT);
++
++ /* Setup LSC. Disable it if not supported for the selected
++ * resolution.
++ */
++ spin_lock_irqsave(&ccdc->lsc.req_lock, flags);
++ if (!list_empty(&ccdc->lsc.req_queue)) {
++ struct ispccdc_lsc_config_req *req;
++
++ req = ispccdc_lsc_dequeue_req(ccdc, &ccdc->lsc.req_queue);
++ list_del(&req->list);
++ if (ispccdc_lsc_validate_config(ccdc, &req->config) < 0) {
++ list_add_tail(&req->list, &ccdc->lsc.free_queue);
++ schedule_work(&ccdc->lsc.table_work);
++ } else {
++ WARN_ON(ccdc->lsc.active);
++ ccdc->lsc.active = req;
++ __ispccdc_lsc_configure(ccdc, req);
++ }
++ }
++ spin_unlock_irqrestore(&ccdc->lsc.req_lock, flags);
++
++ ispccdc_print_status(ccdc);
++}
++
++static void __ispccdc_enable(struct isp_ccdc_device *ccdc, int enable)
++{
++ struct isp_device *isp = to_isp_device(ccdc);
++
++ isp_reg_and_or(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_PCR,
++ ~ISPCCDC_PCR_EN, enable ? ISPCCDC_PCR_EN : 0);
++}
++
++static int ispccdc_disable(struct isp_ccdc_device *ccdc)
++{
++ unsigned long flags;
++ int ret = 0;
++
++ if (ccdc->state == ISP_PIPELINE_STREAM_SINGLESHOT)
++ goto skip_wait;
++
++ spin_lock_irqsave(&ccdc->lock, flags);
++ ccdc->stopping = 1;
++ spin_unlock_irqrestore(&ccdc->lock, flags);
++
++ ret = wait_event_timeout(ccdc->wait, ccdc->stopping == 0,
++ msecs_to_jiffies(2000));
++ if (ret == 0) {
++ ret = -ETIMEDOUT;
++ ccdc->stopping = 0;
++ dev_warn(to_device(ccdc), "CCDC stop timeout!\n");
++ }
++
++skip_wait:
++ isp_sbl_disable(to_isp_device(ccdc), OMAP3_ISP_SBL_CCDC_LSC_READ);
++
++ ispccdc_lsc_free_queue(ccdc, &ccdc->lsc.req_queue);
++ ispccdc_lsc_free_queue(ccdc, &ccdc->lsc.free_queue);
++ ispccdc_lsc_init_queue(ccdc);
++ if (ccdc->lsc.active) {
++ struct isp_device *isp = to_isp_device(ccdc);
++
++ if (ccdc->lsc.active->table)
++ iommu_vfree(isp->iommu, ccdc->lsc.active->table);
++ kfree(ccdc->lsc.active);
++ ccdc->lsc.active = NULL;
++ }
++
++ return ret > 0 ? 0 : ret;
++}
++
++static void ispccdc_enable(struct isp_ccdc_device *ccdc)
++{
++ if (ispccdc_lsc_is_configured(ccdc))
++ __ispccdc_lsc_enable(ccdc, 1);
++ __ispccdc_enable(ccdc, 1);
++}
++
++/* -----------------------------------------------------------------------------
++ * Interrupt handling
++ */
++
++/*
++ * ispccdc_sbl_busy - Poll idle state of CCDC and related SBL memory write bits
++ * @ccdc: Pointer to ISP CCDC device.
++ *
++ * Returns zero if the CCDC is idle and the image has been written to
++ * memory, too.
++ */
++static int ispccdc_sbl_busy(struct isp_ccdc_device *ccdc)
++{
++ struct isp_device *isp = to_isp_device(ccdc);
++
++ return ispccdc_busy(ccdc)
++ | (isp_reg_readl(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_CCDC_WR_0) &
++ ISPSBL_CCDC_WR_0_DATA_READY)
++ | (isp_reg_readl(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_CCDC_WR_1) &
++ ISPSBL_CCDC_WR_0_DATA_READY)
++ | (isp_reg_readl(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_CCDC_WR_2) &
++ ISPSBL_CCDC_WR_0_DATA_READY)
++ | (isp_reg_readl(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_CCDC_WR_3) &
++ ISPSBL_CCDC_WR_0_DATA_READY);
++}
++
++/*
++ * ispccdc_sbl_wait_idle - Wait until the CCDC and related SBL are idle
++ * @ccdc: Pointer to ISP CCDC device.
++ * @max_wait: Max retry count in us for wait for idle/busy transition.
++ */
++static int ispccdc_sbl_wait_idle(struct isp_ccdc_device *ccdc,
++ unsigned int max_wait)
++{
++ unsigned int wait = 0;
++
++ if (max_wait == 0)
++ max_wait = 10000; /* 10 ms */
++
++ for (wait = 0; wait <= max_wait; wait++) {
++ if (!ispccdc_sbl_busy(ccdc))
++ return 0;
++
++ rmb();
++ udelay(1);
++ }
++
++ return -EBUSY;
++}
++
++static void ispccdc_hs_vs_isr(struct isp_ccdc_device *ccdc)
++{
++ struct video_device *vdev = &ccdc->subdev.devnode;
++ struct v4l2_event event;
++
++ memset(&event, 0, sizeof(event));
++ event.type = V4L2_EVENT_OMAP3ISP_HS_VS;
++
++ v4l2_event_queue(vdev, &event);
++}
++
++/*
++ * ispccdc_lsc_isr - Handle LSC events
++ * @ccdc: Pointer to ISP CCDC device.
++ * @events: LSC events
++ */
++static void ispccdc_lsc_isr(struct isp_ccdc_device *ccdc, u32 events)
++{
++ struct ispccdc_lsc_config_req *req;
++ unsigned long flags;
++
++ if (events & IRQ0STATUS_CCDC_LSC_PREF_ERR_IRQ) {
++ ispccdc_lsc_error_handler(ccdc);
++ ccdc->error = 1;
++ dev_dbg(to_device(ccdc), "lsc prefetch error\n");
++ }
++
++ if (!(events & IRQ0STATUS_CCDC_LSC_DONE_IRQ))
++ return;
++
++ /* This is an exception. Start of frame and LSC_DONE interrupt
++ * have been received on the same time. Skip this event and wait
++ * for better times.
++ */
++ if ((events & IRQ0STATUS_HS_VS_IRQ) &&
++ (events & IRQ0STATUS_CCDC_LSC_DONE_IRQ)) {
++ return;
++ }
++
++ /* LSC_DONE interrupt occur, there are two cases
++ * 1. stopping for reconfiguration
++ * 2. stopping because of STREAM OFF command
++ */
++ spin_lock_irqsave(&ccdc->lsc.req_lock, flags);
++
++ if (ccdc->lsc.state == LSC_STATE_STOPPING) {
++ ccdc->lsc.state = LSC_STATE_STOPPED;
++ goto done;
++ }
++
++ if (ccdc->lsc.state != LSC_STATE_RECONFIG)
++ goto done;
++
++ /* LSC is in STOPPING state, change to the new state */
++ ccdc->lsc.state = LSC_STATE_STOPPED;
++
++ /* The LSC engine is stopped at this point. Get first entry from
++ * request queue without deleting it
++ */
++ req = ispccdc_lsc_dequeue_req(ccdc, &ccdc->lsc.req_queue);
++ if (req == NULL)
++ goto done;
++
++ /* We should have an old active configuration it's time to free it */
++ if (ccdc->lsc.active)
++ list_add_tail(&ccdc->lsc.active->list, &ccdc->lsc.free_queue);
++
++ /* Modify active pointer to the current configuration entry and
++ * delete new request from request queue
++ */
++ list_del(&req->list);
++ ccdc->lsc.active = req;
++
++ /* The user wants to disable LSC, nothing to do */
++ if (!req->enable) {
++ isp_sbl_disable(to_isp_device(ccdc),
++ OMAP3_ISP_SBL_CCDC_LSC_READ);
++ goto done;
++ }
++
++ if (__ispccdc_lsc_configure(ccdc, req) == 0)
++ __ispccdc_lsc_enable(ccdc, 1);
++
++ schedule_work(&ccdc->lsc.table_work);
++done:
++ spin_unlock_irqrestore(&ccdc->lsc.req_lock, flags);
++}
++
++static int ispccdc_isr_buffer(struct isp_ccdc_device *ccdc)
++{
++ struct isp_pipeline *pipe = to_isp_pipeline(&ccdc->subdev.entity);
++ struct isp_device *isp = to_isp_device(ccdc);
++ struct isp_buffer *buffer;
++ int restart = 0;
++
++ /* The CCDC generates VD0 interrupts even when disabled (the datasheet
++ * doesn't explicitly state if that's supposed to happen or not, so it
++ * can be considered as a hardware bug or as a feature, but we have to
++ * deal with it anyway). Disabling the CCDC when no buffer is available
++ * would thus not be enough, we need to handle the situation explicitly.
++ */
++ if (list_empty(&ccdc->video_out.dmaqueue))
++ goto done;
++
++ /* We're in continuous mode, and memory writes were disabled due to a
++ * buffer underrun. Reenable them now that we have a buffer. The buffer
++ * address has been set in ccdc_video_queue.
++ */
++ if (ccdc->state == ISP_PIPELINE_STREAM_CONTINUOUS && ccdc->underrun) {
++ restart = 1;
++ ccdc->underrun = 0;
++ goto done;
++ }
++
++ if (ispccdc_sbl_wait_idle(ccdc, 1000)) {
++ dev_info(isp->dev, "CCDC won't become idle!\n");
++ goto done;
++ }
++
++ buffer = isp_video_buffer_next(&ccdc->video_out, ccdc->error);
++ if (buffer != NULL) {
++ ispccdc_set_outaddr(ccdc, buffer->isp_addr);
++ restart = 1;
++ }
++
++ pipe->state |= ISP_PIPELINE_IDLE_OUTPUT;
++
++ if (ccdc->state == ISP_PIPELINE_STREAM_SINGLESHOT &&
++ isp_pipeline_ready(pipe))
++ isp_pipeline_set_stream(pipe,
++ ISP_PIPELINE_STREAM_SINGLESHOT);
++
++done:
++ ccdc->error = 0;
++ return restart;
++}
++
++/*
++ * ispccdc_vd0_isr - Handle VD0 event
++ * @ccdc: Pointer to ISP CCDC device.
++ *
++ * Executes LSC deferred enablement before next frame starts.
++ */
++static void ispccdc_vd0_isr(struct isp_ccdc_device *ccdc)
++{
++ unsigned long flags;
++ int restart = 0;
++
++ if (ccdc->output & CCDC_OUTPUT_MEMORY)
++ restart = ispccdc_isr_buffer(ccdc);
++
++ spin_lock_irqsave(&ccdc->lock, flags);
++ if (ccdc->stopping) {
++ ccdc->stopping = 0;
++ spin_unlock_irqrestore(&ccdc->lock, flags);
++ wake_up(&ccdc->wait);
++ return;
++ }
++ spin_unlock_irqrestore(&ccdc->lock, flags);
++
++ if (restart)
++ ispccdc_enable(ccdc);
++}
++
++/*
++ * ispccdc_vd1_isr - Handle VD1 event
++ * @ccdc: Pointer to ISP CCDC device.
++ */
++static void ispccdc_vd1_isr(struct isp_ccdc_device *ccdc)
++{
++ struct ispccdc_lsc_config_req *req;
++ unsigned long flags;
++
++ spin_lock_irqsave(&ccdc->lsc.req_lock, flags);
++
++ /* We are about to stop CCDC and/without LSC */
++ if (ccdc->stopping || (ccdc->output & CCDC_OUTPUT_MEMORY) ||
++ (ccdc->state == ISP_PIPELINE_STREAM_SINGLESHOT)) {
++ __ispccdc_lsc_enable(ccdc, 0);
++ __ispccdc_enable(ccdc, 0);
++ goto done;
++ }
++
++ /* Get first entry from queue but without deleting it */
++ req = ispccdc_lsc_dequeue_req(ccdc, &ccdc->lsc.req_queue);
++ if (req == NULL)
++ goto done;
++
++ /* Run time LSC update */
++ switch (ccdc->lsc.state) {
++ case LSC_STATE_RUNNING:
++ /* LSC need to be reconfigured. Stop it here and on next
++ * LSC_DONE IRQ do the appropriate changes in registers
++ */
++ __ispccdc_lsc_enable(ccdc, 0);
++ ccdc->lsc.state = LSC_STATE_RECONFIG;
++ break;
++
++ case LSC_STATE_STOPPED:
++ if (!req->enable)
++ break;
++ /* LSC has been in stopped state, enable it now */
++ if (__ispccdc_lsc_configure(ccdc, req) == 0) {
++ if (ccdc->lsc.active)
++ list_add_tail(&ccdc->lsc.active->list,
++ &ccdc->lsc.free_queue);
++ ccdc->lsc.active = req;
++ list_del(&req->list);
++ __ispccdc_lsc_enable(ccdc, 1);
++ schedule_work(&ccdc->lsc.table_work);
++ }
++ break;
++
++ case LSC_STATE_STOPPING:
++ case LSC_STATE_RECONFIG:
++ /* shouldn't happen */
++ break;
++ }
++
++done:
++ spin_unlock_irqrestore(&ccdc->lsc.req_lock, flags);
++}
++
++/*
++ * ispccdc_isr - Configure CCDC during interframe time.
++ * @ccdc: Pointer to ISP CCDC device.
++ * @events: CCDC events
++ */
++int ispccdc_isr(struct isp_ccdc_device *ccdc, u32 events)
++{
++ if (ccdc->state == ISP_PIPELINE_STREAM_STOPPED)
++ return 0;
++
++ if (events & IRQ0STATUS_CCDC_VD1_IRQ)
++ ispccdc_vd1_isr(ccdc);
++
++ ispccdc_lsc_isr(ccdc, events);
++
++ if (events & IRQ0STATUS_CCDC_VD0_IRQ)
++ ispccdc_vd0_isr(ccdc);
++
++ if (events & IRQ0STATUS_HS_VS_IRQ)
++ ispccdc_hs_vs_isr(ccdc);
++
++ return 0;
++}
++
++/* -----------------------------------------------------------------------------
++ * ISP video operations
++ */
++
++static int ccdc_video_queue(struct isp_video *video, struct isp_buffer *buffer)
++{
++ struct isp_ccdc_device *ccdc = &video->isp->isp_ccdc;
++
++ if (!(ccdc->output & CCDC_OUTPUT_MEMORY))
++ return -ENODEV;
++
++ ispccdc_set_outaddr(ccdc, buffer->isp_addr);
++
++ /* We now have a buffer queued on the output, restart the pipeline in
++ * on the next CCDC interrupt if running in continuous mode (or when
++ * starting the stream).
++ */
++ ccdc->underrun = 1;
++
++ return 0;
++}
++
++static const struct isp_video_operations ccdc_video_ops = {
++ .queue = ccdc_video_queue,
++};
++
++/* -----------------------------------------------------------------------------
++ * V4L2 subdev operations
++ */
++
++/*
++ * ccdc_get_ctrl - V4L2 control get handler
++ * @sd: ISP CCDC V4L2 subdevice
++ * @ctrl: V4L2 control
++ *
++ * Return 0 on success or a negative error code otherwise.
++ */
++static int ccdc_get_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
++{
++ return -EINVAL;
++}
++
++/*
++ * ccdc_set_ctrl - V4L2 control set handler
++ * @sd: ISP CCDC V4L2 subdevice
++ * @ctrl: V4L2 control
++ *
++ * Return 0 on success or a negative error code otherwise.
++ */
++static int ccdc_set_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
++{
++ return -EINVAL;
++}
++
++/*
++ * ccdc_ioctl - CCDC module private ioctl's
++ * @sd: ISP CCDC V4L2 subdevice
++ * @cmd: ioctl command
++ * @arg: ioctl argument
++ *
++ * Return 0 on success or a negative error code otherwise.
++ */
++static long ccdc_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
++{
++ struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
++ int ret;
++
++ switch (cmd) {
++ case VIDIOC_PRIVATE_ISP_CCDC_CFG:
++ mutex_lock(&ccdc->ioctl_lock);
++ ret = ispccdc_config(ccdc, arg);
++ mutex_unlock(&ccdc->ioctl_lock);
++ break;
++
++ default:
++ return -ENOIOCTLCMD;
++ }
++
++ return ret;
++}
++
++/*
++ * ccdc_set_power - Power on/off the CCDC module
++ * @sd: ISP CCDC V4L2 subdevice
++ * @on: power on/off
++ *
++ * Return 0 on success or a negative error code otherwise.
++ */
++static int ccdc_set_power(struct v4l2_subdev *sd, int on)
++{
++ struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
++ struct isp_device *isp = to_isp_device(ccdc);
++
++ if (on) {
++ if (!isp_get(isp))
++ return -EBUSY;
++ } else {
++ isp_put(isp);
++ }
++
++ return 0;
++}
++
++static int ccdc_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
++ struct v4l2_event_subscription *sub)
++{
++ if (sub->type != V4L2_EVENT_OMAP3ISP_HS_VS)
++ return -EINVAL;
++
++ return v4l2_event_subscribe(fh, sub);
++}
++
++static int ccdc_unsubscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
++ struct v4l2_event_subscription *sub)
++{
++ return v4l2_event_unsubscribe(fh, sub);
++}
++
++/*
++ * ccdc_set_stream - Enable/Disable streaming on the CCDC module
++ * @sd: ISP CCDC V4L2 subdevice
++ * @enable: Enable/disable stream
++ *
++ * When writing to memory, the CCDC hardware can't be enabled without a memory
++ * buffer to write to. As the s_stream operation is called in response to a
++ * STREAMON call without any buffer queued yet, just update the enabled field
++ * and return immediately. The CCDC will be enabled in ccdc_isr_buffer().
++ *
++ * When not writing to memory enable the CCDC immediately.
++ */
++static int ccdc_set_stream(struct v4l2_subdev *sd, int enable)
++{
++ struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
++ struct isp_device *isp = to_isp_device(ccdc);
++ int ret = 0;
++
++ if (ccdc->state == ISP_PIPELINE_STREAM_STOPPED) {
++ if (enable == ISP_PIPELINE_STREAM_STOPPED)
++ return 0;
++
++ isp_reg_or(isp, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL,
++ ISPCTRL_CCDC_RAM_EN | ISPCTRL_CCDC_CLK_EN);
++ isp_reg_or(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CFG,
++ ISPCCDC_CFG_VDLC);
++
++ ccdc_configure(ccdc);
++
++ /* TODO: Don't configure the video port if all of its output
++ * links are inactive.
++ */
++ ispccdc_config_vp(ccdc, &ccdc->vpcfg);
++ ispccdc_enable_vp(ccdc, 1);
++ }
++
++ switch (enable) {
++ case ISP_PIPELINE_STREAM_CONTINUOUS:
++ if (ccdc->output & CCDC_OUTPUT_MEMORY)
++ isp_sbl_enable(isp, OMAP3_ISP_SBL_CCDC_WRITE);
++
++ if (ccdc->underrun || !(ccdc->output & CCDC_OUTPUT_MEMORY))
++ ispccdc_enable(ccdc);
++
++ ccdc->underrun = 0;
++ break;
++
++ case ISP_PIPELINE_STREAM_SINGLESHOT:
++ if (ccdc->output & CCDC_OUTPUT_MEMORY &&
++ ccdc->state != ISP_PIPELINE_STREAM_SINGLESHOT)
++ isp_sbl_enable(isp, OMAP3_ISP_SBL_CCDC_WRITE);
++
++ ispccdc_enable(ccdc);
++ break;
++
++ case ISP_PIPELINE_STREAM_STOPPED:
++ ret = ispccdc_disable(ccdc);
++ if (ccdc->output & CCDC_OUTPUT_MEMORY)
++ isp_sbl_disable(isp, OMAP3_ISP_SBL_CCDC_WRITE);
++ isp_reg_and(isp, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL,
++ ~(ISPCTRL_CCDC_CLK_EN | ISPCTRL_CCDC_RAM_EN));
++ ccdc->underrun = 0;
++ break;
++ }
++
++ ccdc->state = enable;
++ return ret;
++}
++
++static struct v4l2_mbus_framefmt *
++__ccdc_get_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_fh *fh,
++ unsigned int pad, enum v4l2_subdev_format which)
++{
++ if (which == V4L2_SUBDEV_FORMAT_PROBE)
++ return v4l2_subdev_get_probe_format(fh, pad);
++ else
++ return &ccdc->formats[pad];
++}
++
++/*
++ * ccdc_try_format - Try video format on a pad
++ * @ccdc: ISP CCDC device
++ * @fh : V4L2 subdev file handle
++ * @pad: Pad number
++ * @fmt: Format
++ */
++static void
++ccdc_try_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_fh *fh,
++ unsigned int pad, struct v4l2_mbus_framefmt *fmt,
++ enum v4l2_subdev_format which)
++{
++ struct v4l2_mbus_framefmt *format;
++ unsigned int width = fmt->width;
++ unsigned int height = fmt->height;
++ unsigned int i;
++
++ switch (pad) {
++ case CCDC_PAD_SINK:
++ /* TODO: If the CCDC output formatter pad is connected directly
++ * to the resizer, only YUV formats can be used.
++ */
++ for (i = 0; i < ARRAY_SIZE(ccdc_fmts); i++) {
++ if (fmt->code == ccdc_fmts[i])
++ break;
++ }
++
++ /* If not found, use SGRBG10 as default */
++ if (i >= ARRAY_SIZE(ccdc_fmts))
++ fmt->code = V4L2_MBUS_FMT_SGRBG10_1X10;
++
++ /* Clamp the input size. */
++ fmt->width = clamp_t(u32, width, 32, 4096);
++ fmt->height = clamp_t(u32, height, 32, 4096);
++ break;
++
++ case CCDC_PAD_SOURCE_OF:
++ format = __ccdc_get_format(ccdc, fh, CCDC_PAD_SINK, which);
++ memcpy(fmt, format, sizeof(*fmt));
++
++ /* The data formatter truncates the number of horizontal output
++ * pixels to a multiple of 16. To avoid clipping data, allow
++ * callers to request an output size bigger than the input size
++ * up to the nearest multiple of 16.
++ */
++ fmt->width = clamp_t(u32, width, 32, (fmt->width + 15) & ~15);
++ fmt->width &= ~15;
++ fmt->height = clamp_t(u32, height, 32, fmt->height);
++ break;
++
++ case CCDC_PAD_SOURCE_VP:
++ format = __ccdc_get_format(ccdc, fh, CCDC_PAD_SINK, which);
++ memcpy(fmt, format, sizeof(*fmt));
++
++ /* The number of lines that can be clocked out from the video
++ * port output must be at least one line less than the number
++ * of input lines.
++ */
++ fmt->width = clamp_t(u32, width, 32, fmt->width);
++ fmt->height = clamp_t(u32, height, 32, fmt->height - 1);
++ break;
++ }
++
++ /* Data is written to memory unpacked, each 10-bit pixel is stored on
++ * 2 bytes.
++ */
++ fmt->colorspace = V4L2_COLORSPACE_SRGB;
++ fmt->field = V4L2_FIELD_NONE;
++}
++
++/*
++ * ccdc_enum_mbus_code - Handle pixel format enumeration
++ * @sd : pointer to v4l2 subdev structure
++ * @fh : V4L2 subdev file handle
++ * @code : pointer to v4l2_subdev_pad_mbus_code_enum structure
++ * return -EINVAL or zero on success
++ */
++static int ccdc_enum_mbus_code(struct v4l2_subdev *sd,
++ struct v4l2_subdev_fh *fh,
++ struct v4l2_subdev_pad_mbus_code_enum *code)
++{
++ struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
++ struct v4l2_mbus_framefmt *format;
++
++ switch (code->pad) {
++ case CCDC_PAD_SINK:
++ if (code->index >= ARRAY_SIZE(ccdc_fmts))
++ return -EINVAL;
++
++ code->code = ccdc_fmts[code->index];
++ break;
++
++ case CCDC_PAD_SOURCE_OF:
++ case CCDC_PAD_SOURCE_VP:
++ /* No format conversion inside CCDC */
++ if (code->index != 0)
++ return -EINVAL;
++
++ format = __ccdc_get_format(ccdc, fh, CCDC_PAD_SINK,
++ V4L2_SUBDEV_FORMAT_PROBE);
++
++ code->code = format->code;
++ break;
++
++ default:
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static int ccdc_enum_frame_size(struct v4l2_subdev *sd,
++ struct v4l2_subdev_fh *fh,
++ struct v4l2_subdev_frame_size_enum *fse)
++{
++ struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
++ struct v4l2_mbus_framefmt format;
++
++ if (fse->index != 0)
++ return -EINVAL;
++
++ format.code = fse->code;
++ format.width = 1;
++ format.height = 1;
++ ccdc_try_format(ccdc, fh, fse->pad, &format, V4L2_SUBDEV_FORMAT_PROBE);
++ fse->min_width = format.width;
++ fse->min_height = format.height;
++
++ if (format.code != fse->code)
++ return -EINVAL;
++
++ format.code = fse->code;
++ format.width = -1;
++ format.height = -1;
++ ccdc_try_format(ccdc, fh, fse->pad, &format, V4L2_SUBDEV_FORMAT_PROBE);
++ fse->max_width = format.width;
++ fse->max_height = format.height;
++
++ return 0;
++}
++
++/*
++ * ccdc_get_format - Retrieve the video format on a pad
++ * @sd : ISP CCDC V4L2 subdevice
++ * @fh : V4L2 subdev file handle
++ * @pad: Pad number
++ * @fmt: Format
++ *
++ * Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
++ * to the format type.
++ */
++static int ccdc_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
++ unsigned int pad, struct v4l2_mbus_framefmt *fmt,
++ enum v4l2_subdev_format which)
++{
++ struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
++ struct v4l2_mbus_framefmt *format;
++
++ format = __ccdc_get_format(ccdc, fh, pad, which);
++ if (format == NULL)
++ return -EINVAL;
++
++ memcpy(fmt, format, sizeof(*fmt));
++ return 0;
++}
++
++/*
++ * ccdc_set_format - Set the video format on a pad
++ * @sd : ISP CCDC V4L2 subdevice
++ * @fh : V4L2 subdev file handle
++ * @pad: Pad number
++ * @fmt: Format
++ *
++ * Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
++ * to the format type.
++ */
++static int ccdc_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
++ unsigned int pad, struct v4l2_mbus_framefmt *fmt,
++ enum v4l2_subdev_format which)
++{
++ struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
++ struct v4l2_mbus_framefmt *format;
++
++ format = __ccdc_get_format(ccdc, fh, pad, which);
++ if (format == NULL)
++ return -EINVAL;
++
++ ccdc_try_format(ccdc, fh, pad, fmt, which);
++ memcpy(format, fmt, sizeof(*format));
++
++ /* Propagate the format from sink to source */
++ if (pad == CCDC_PAD_SINK) {
++ format = __ccdc_get_format(ccdc, fh, CCDC_PAD_SOURCE_OF, which);
++ memcpy(format, fmt, sizeof(*format));
++ ccdc_try_format(ccdc, fh, CCDC_PAD_SOURCE_OF, format, which);
++
++ format = __ccdc_get_format(ccdc, fh, CCDC_PAD_SOURCE_VP, which);
++ memcpy(format, fmt, sizeof(*format));
++ ccdc_try_format(ccdc, fh, CCDC_PAD_SOURCE_VP, format, which);
++ }
++
++ return 0;
++}
++
++/* V4L2 subdev core operations */
++static const struct v4l2_subdev_core_ops ccdc_v4l2_core_ops = {
++ .g_ctrl = ccdc_get_ctrl,
++ .s_ctrl = ccdc_set_ctrl,
++ .ioctl = ccdc_ioctl,
++ .s_power = ccdc_set_power,
++ .subscribe_event = ccdc_subscribe_event,
++ .unsubscribe_event = ccdc_unsubscribe_event,
++};
++
++/* V4L2 subdev video operations */
++static const struct v4l2_subdev_video_ops ccdc_v4l2_video_ops = {
++ .s_stream = ccdc_set_stream,
++};
++
++/* V4L2 subdev pad operations */
++static const struct v4l2_subdev_pad_ops ccdc_v4l2_pad_ops = {
++ .enum_mbus_code = ccdc_enum_mbus_code,
++ .enum_frame_size = ccdc_enum_frame_size,
++ .get_fmt = ccdc_get_format,
++ .set_fmt = ccdc_set_format,
++};
++
++/* V4L2 subdev operations */
++static const struct v4l2_subdev_ops ccdc_v4l2_ops = {
++ .core = &ccdc_v4l2_core_ops,
++ .video = &ccdc_v4l2_video_ops,
++ .pad = &ccdc_v4l2_pad_ops,
++};
++
++/* -----------------------------------------------------------------------------
++ * Media entity operations
++ */
++
++/*
++ * ccdc_link_setup - Setup CCDC connections
++ * @entity: CCDC media entity
++ * @local: Pad at the local end of the link
++ * @remote: Pad at the remote end of the link
++ * @flags: Link flags
++ *
++ * return -EINVAL or zero on success
++ */
++static int ccdc_link_setup(struct media_entity *entity,
++ const struct media_entity_pad *local,
++ const struct media_entity_pad *remote, u32 flags)
++{
++ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
++ struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
++ struct isp_device *isp = to_isp_device(ccdc);
++
++ switch (local->index | (remote->entity->type << 16)) {
++ case CCDC_PAD_SINK | (MEDIA_ENTITY_TYPE_SUBDEV << 16):
++ /* Read from the sensor (parallel interface), CCP2, CSI2a or
++ * CSI2c.
++ */
++ if (!(flags & MEDIA_LINK_FLAG_ACTIVE)) {
++ ccdc->input = CCDC_INPUT_NONE;
++ break;
++ }
++
++ if (ccdc->input != CCDC_INPUT_NONE)
++ return -EBUSY;
++
++ if (remote->entity == &isp->isp_ccp2.subdev.entity)
++ ccdc->input = CCDC_INPUT_CCP2B;
++ else if (remote->entity == &isp->isp_csi2a.subdev.entity)
++ ccdc->input = CCDC_INPUT_CSI2A;
++ else if (remote->entity == &isp->isp_csi2c.subdev.entity)
++ ccdc->input = CCDC_INPUT_CSI2C;
++ else
++ ccdc->input = CCDC_INPUT_PARALLEL;
++
++ break;
++
++ case CCDC_PAD_SOURCE_VP | (MEDIA_ENTITY_TYPE_SUBDEV << 16):
++ /* Write to preview engine, histogram and H3A. When none of
++ * those links are active, the video port can be disabled.
++ */
++ if (flags & MEDIA_LINK_FLAG_ACTIVE)
++ ccdc->output |= CCDC_OUTPUT_PREVIEW;
++ else
++ ccdc->output &= ~CCDC_OUTPUT_PREVIEW;
++ break;
++
++ case CCDC_PAD_SOURCE_OF | (MEDIA_ENTITY_TYPE_NODE << 16):
++ /* Write to memory */
++ if (flags & MEDIA_LINK_FLAG_ACTIVE)
++ ccdc->output |= CCDC_OUTPUT_MEMORY;
++ else
++ ccdc->output &= ~CCDC_OUTPUT_MEMORY;
++ break;
++
++ case CCDC_PAD_SOURCE_OF | (MEDIA_ENTITY_TYPE_SUBDEV << 16):
++ /* Write to resizer */
++ if (flags & MEDIA_LINK_FLAG_ACTIVE)
++ ccdc->output |= CCDC_OUTPUT_RESIZER;
++ else
++ ccdc->output &= ~CCDC_OUTPUT_RESIZER;
++ break;
++
++ default:
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++/* media operations */
++static const struct media_entity_operations ccdc_media_ops = {
++ .link_setup = ccdc_link_setup,
++ .set_power = v4l2_subdev_set_power,
++};
++
++/*
++ * isp_ccdc_init_entities - Initialize V4L2 subdev and media entity
++ * @ccdc: ISP CCDC module
++ *
++ * Return 0 on success and a negative error code on failure.
++ */
++static int isp_ccdc_init_entities(struct isp_ccdc_device *ccdc)
++{
++ struct v4l2_subdev *sd = &ccdc->subdev;
++ struct media_entity_pad *pads = ccdc->pads;
++ struct media_entity *me = &sd->entity;
++ int ret;
++
++ ccdc->input = CCDC_INPUT_NONE;
++
++ v4l2_subdev_init(sd, &ccdc_v4l2_ops);
++ strlcpy(sd->name, "OMAP3 ISP CCDC", sizeof(sd->name));
++ sd->grp_id = 1 << 16; /* group ID for isp subdevs */
++ v4l2_set_subdevdata(sd, ccdc);
++ sd->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE;
++ sd->nevents = OMAP3ISP_CCDC_NEVENTS;
++
++ pads[CCDC_PAD_SINK].type = MEDIA_PAD_TYPE_INPUT;
++ pads[CCDC_PAD_SOURCE_VP].type = MEDIA_PAD_TYPE_OUTPUT;
++ pads[CCDC_PAD_SOURCE_OF].type = MEDIA_PAD_TYPE_OUTPUT;
++
++ me->ops = &ccdc_media_ops;
++ ret = media_entity_init(me, CCDC_PADS_NUM, pads, 0);
++ if (ret < 0)
++ return ret;
++
++ ccdc->video_out.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
++ ccdc->video_out.ops = &ccdc_video_ops;
++ ccdc->video_out.isp = to_isp_device(ccdc);
++ ccdc->video_out.capture_mem = PAGE_ALIGN(4096 * 4096) * 3;
++ ccdc->video_out.alignment = 32;
++
++ ret = isp_video_init(&ccdc->video_out, "CCDC");
++ if (ret < 0)
++ return ret;
++
++ /* Connect the CCDC subdev to the video node. */
++ ret = media_entity_create_link(&ccdc->subdev.entity, CCDC_PAD_SOURCE_OF,
++ &ccdc->video_out.video.entity, 0, 0);
++ if (ret < 0)
++ return ret;
++
++ return 0;
++}
++
++void isp_ccdc_unregister_entities(struct isp_ccdc_device *ccdc)
++{
++ media_entity_cleanup(&ccdc->subdev.entity);
++
++ v4l2_device_unregister_subdev(&ccdc->subdev);
++ isp_video_unregister(&ccdc->video_out);
++}
++
++int isp_ccdc_register_entities(struct isp_ccdc_device *ccdc,
++ struct v4l2_device *vdev)
++{
++ int ret;
++
++ /* Register the subdev and video node. */
++ ret = v4l2_device_register_subdev(vdev, &ccdc->subdev);
++ if (ret < 0)
++ goto error;
++
++ ret = isp_video_register(&ccdc->video_out, vdev);
++ if (ret < 0)
++ goto error;
++
++ return 0;
++
++error:
++ isp_ccdc_unregister_entities(ccdc);
++ return ret;
++}
++
++/* -----------------------------------------------------------------------------
++ * ISP CCDC initialisation and cleanup
++ */
++
++/*
++ * isp_ccdc_init - CCDC module initialization.
++ * @dev: Device pointer specific to the OMAP3 ISP.
++ *
++ * TODO: Get the initialisation values from platform data.
++ *
++ * Return 0 on success or a negative error code otherwise.
++ */
++int isp_ccdc_init(struct isp_device *isp)
++{
++ struct isp_ccdc_device *ccdc = &isp->isp_ccdc;
++
++ spin_lock_init(&ccdc->lock);
++ init_waitqueue_head(&ccdc->wait);
++ mutex_init(&ccdc->ioctl_lock);
++
++ ccdc->shadow_update = 0;
++
++ INIT_WORK(&ccdc->lsc.table_work, ispccdc_lsc_free_table_work);
++ ccdc->lsc.state = LSC_STATE_STOPPED;
++ ispccdc_lsc_init_queue(ccdc);
++ spin_lock_init(&ccdc->lsc.req_lock);
++
++ ccdc->fpc_table_add_m = 0;
++
++ ccdc->syncif.ccdc_mastermode = 0;
++ ccdc->syncif.datapol = 0;
++ ccdc->syncif.datsz = 10;
++ ccdc->syncif.fldmode = 0;
++ ccdc->syncif.fldout = 0;
++ ccdc->syncif.fldpol = 0;
++ ccdc->syncif.fldstat = 0;
++ ccdc->syncif.hdpol = 0;
++ ccdc->syncif.vdpol = 0;
++
++ ccdc->blkcfg.oblen = 0;
++ ccdc->blkcfg.dcsubval = 64;
++
++ ccdc->vpcfg.bitshift_sel = BIT9_0;
++ ccdc->vpcfg.pixelclk = 0;
++
++ ispccdc_config_sync_if(ccdc, &ccdc->syncif);
++ ispccdc_config_black_clamp(ccdc, &ccdc->blkcfg);
++
++ return isp_ccdc_init_entities(ccdc);
++}
++
++/*
++ * isp_ccdc_cleanup - CCDC module cleanup.
++ * @dev: Device pointer specific to the OMAP3 ISP.
++ */
++void isp_ccdc_cleanup(struct isp_device *isp)
++{
++ struct isp_ccdc_device *ccdc = &isp->isp_ccdc;
++
++ /* Free lsc old table */
++ flush_work(&ccdc->lsc.table_work);
++
++ if (ccdc->fpc_table_add_m != 0)
++ iommu_vfree(isp->iommu, ccdc->fpc_table_add_m);
++}
+diff --git a/drivers/media/video/isp/ispccdc.h b/drivers/media/video/isp/ispccdc.h
+new file mode 100644
+index 0000000..d5eab98
+--- /dev/null
++++ b/drivers/media/video/isp/ispccdc.h
+@@ -0,0 +1,189 @@
++/*
++ * ispccdc.h
++ *
++ * Driver header file for CCDC module in TI's OMAP3 Camera ISP
++ *
++ * Copyright (C) 2009 Texas Instruments, Inc.
++ *
++ * Contributors:
++ * Senthilvadivu Guruswamy <svadivu@ti.com>
++ * Pallavi Kulkarni <p-kulkarni@ti.com>
++ * Sergio Aguirre <saaguirre@ti.com>
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef OMAP_ISP_CCDC_H
++#define OMAP_ISP_CCDC_H
++
++#include <linux/workqueue.h>
++#include <mach/isp_user.h>
++#include "ispvideo.h"
++
++enum ccdc_input_entity {
++ CCDC_INPUT_NONE,
++ CCDC_INPUT_PARALLEL,
++ CCDC_INPUT_CSI2A,
++ CCDC_INPUT_CCP2B,
++ CCDC_INPUT_CSI2C
++};
++
++#define CCDC_OUTPUT_MEMORY (1 << 0)
++#define CCDC_OUTPUT_PREVIEW (1 << 1)
++#define CCDC_OUTPUT_RESIZER (1 << 2)
++
++#define OMAP3ISP_CCDC_NEVENTS 16
++
++/*
++ * struct ispccdc_syncif - Structure for Sync Interface between sensor and CCDC
++ * @ccdc_mastermode: Master mode. 1 - Master, 0 - Slave.
++ * @fldstat: Field state. 0 - Odd Field, 1 - Even Field.
++ * @datsz: Data size.
++ * @fldmode: 0 - Progressive, 1 - Interlaced.
++ * @datapol: 0 - Positive, 1 - Negative.
++ * @fldpol: 0 - Positive, 1 - Negative.
++ * @hdpol: 0 - Positive, 1 - Negative.
++ * @vdpol: 0 - Positive, 1 - Negative.
++ * @fldout: 0 - Input, 1 - Output.
++ * @hs_width: Width of the Horizontal Sync pulse, used for HS/VS Output.
++ * @vs_width: Width of the Vertical Sync pulse, used for HS/VS Output.
++ * @ppln: Number of pixels per line, used for HS/VS Output.
++ * @hlprf: Number of half lines per frame, used for HS/VS Output.
++ * @bt_r656_en: 1 - Enable ITU-R BT656 mode, 0 - Sync mode.
++ */
++struct ispccdc_syncif {
++ u8 ccdc_mastermode;
++ u8 fldstat;
++ u8 datsz;
++ u8 fldmode;
++ u8 datapol;
++ u8 fldpol;
++ u8 hdpol;
++ u8 vdpol;
++ u8 fldout;
++ u8 hs_width;
++ u8 vs_width;
++ u8 ppln;
++ u8 hlprf;
++ u8 bt_r656_en;
++};
++
++/*
++ * struct ispccdc_vp - Structure for Video Port parameters
++ * @bitshift_sel: Video port input select. 3 - bits 12-3, 4 - bits 11-2,
++ * 5 - bits 10-1, 6 - bits 9-0.
++ * @pixelclk: Input pixel clock in Hz
++ */
++struct ispccdc_vp {
++ enum vpin bitshift_sel;
++ unsigned int pixelclk;
++};
++
++enum ispccdc_lsc_state {
++ LSC_STATE_STOPPED = 0,
++ LSC_STATE_STOPPING = 1,
++ LSC_STATE_RUNNING = 2,
++ LSC_STATE_RECONFIG = 3,
++};
++
++struct ispccdc_lsc_config_req {
++ struct list_head list;
++ struct ispccdc_lsc_config config;
++ unsigned char enable;
++ u32 table;
++};
++
++/*
++ * ispccdc_lsc - CCDC LSC parameters
++ * @update_config: Set when user changes config
++ * @request_enable: Whether LSC is requested to be enabled
++ * @config: LSC config set by user
++ * @update_table: Set when user provides a new LSC table to table_new
++ * @table_new: LSC table set by user, ISP address
++ * @table_inuse: LSC table currently in use, ISP address
++ */
++struct ispccdc_lsc {
++ enum ispccdc_lsc_state state;
++ struct work_struct table_work;
++
++ /* LSC queue of configurations */
++ spinlock_t req_lock;
++ struct list_head req_queue; /* requested configurations */
++ struct list_head free_queue; /* configurations for freeing */
++ struct ispccdc_lsc_config_req *active; /* active configuration */
++};
++
++/* Sink and source CCDC pads */
++#define CCDC_PAD_SINK 0
++#define CCDC_PAD_SOURCE_OF 1
++#define CCDC_PAD_SOURCE_VP 2
++#define CCDC_PADS_NUM 3
++
++/*
++ * struct isp_ccdc_device - Structure for the CCDC module to store its own
++ * information
++ * @subdev: V4L2 subdevice
++ * @pads: Sink and source media entity pads
++ * @formats: Active video formats
++ * @video_out: Output video node
++ * @error: A hardware error occured during capture
++ * @ccdcin_woffset: CCDC input horizontal offset.
++ * @ccdcin_hoffset: CCDC input vertical offset.
++ * @crop_w: Crop width.
++ * @crop_h: Crop weight.
++ * @syncif_ipmod: Image
++ * @obclamp_en: Data input format.
++ * @fpc_table_add_m: ISP MMU mapped address of the current used FPC table.
++ * @fpc_table_add: Virtual address of the current used FPC table.
++ * @shadow_update: non-zero when user is updating CCDC configuration
++ * @enabled: Whether the CCDC is enabled
++ * @underrun: A buffer underrun occured and a new buffer has been queued
++ * @lock: serializes shadow_update with interrupt handler
++ */
++struct isp_ccdc_device {
++ struct v4l2_subdev subdev;
++ struct media_entity_pad pads[CCDC_PADS_NUM];
++ struct v4l2_mbus_framefmt formats[CCDC_PADS_NUM];
++
++ enum ccdc_input_entity input;
++ unsigned int output;
++ struct isp_video video_out;
++ unsigned int error;
++
++ u8 obclamp_en;
++ unsigned long fpc_table_add_m;
++ u32 *fpc_table_add;
++ struct ispccdc_lsc lsc;
++ struct ispccdc_bclamp blkcfg;
++ struct ispccdc_syncif syncif;
++ struct ispccdc_vp vpcfg;
++
++ unsigned int shadow_update:1,
++ underrun:1;
++ enum isp_pipeline_stream_state state;
++ spinlock_t lock;
++ wait_queue_head_t wait;
++ unsigned int stopping;
++ struct mutex ioctl_lock;
++};
++
++struct isp_device;
++
++int isp_ccdc_init(struct isp_device *isp);
++void isp_ccdc_cleanup(struct isp_device *isp);
++int isp_ccdc_register_entities(struct isp_ccdc_device *ccdc,
++ struct v4l2_device *vdev);
++void isp_ccdc_unregister_entities(struct isp_ccdc_device *ccdc);
++
++int ispccdc_busy(struct isp_ccdc_device *isp_ccdc);
++int ispccdc_isr(struct isp_ccdc_device *isp_ccdc, u32 events);
++void ispccdc_save_context(struct isp_device *isp);
++void ispccdc_restore_context(struct isp_device *isp);
++
++#endif /* OMAP_ISP_CCDC_H */
+diff --git a/drivers/media/video/isp/ispccp2.c b/drivers/media/video/isp/ispccp2.c
+new file mode 100644
+index 0000000..8ae6c75
+--- /dev/null
++++ b/drivers/media/video/isp/ispccp2.c
+@@ -0,0 +1,1127 @@
++/*
++ * ispccp2.c
++ *
++ * Driver Library for CCP2 module in TI's OMAP3 Camera ISP
++ *
++ * Copyright (C) 2010 Nokia Corporation.
++ * Copyright (C) 2010 Texas Instruments, Inc.
++ *
++ * Contributors:
++ * RaniSuneela <r-m@ti.com>
++ *
++ * Based on code by:
++ * Contributors of isp driver:
++ * Sameer Venkatraman <sameerv@ti.com>
++ * Mohit Jalori <mjalori@ti.com>
++ * Sergio Aguirre <saaguirre@ti.com>
++ * Sakari Ailus <sakari.ailus@nokia.com>
++ * Tuukka Toivonen <tuukka.o.toivonen@nokia.com>
++ * Toni Leinonen <toni.leinonen@nokia.com>
++ * David Cohen <david.cohen@nokia.com>
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#include <linux/delay.h>
++#include <linux/device.h>
++#include <linux/mm.h>
++#include <linux/module.h>
++#include <linux/mutex.h>
++#include <linux/uaccess.h>
++
++#include "isp.h"
++#include "ispreg.h"
++#include "ispccp2.h"
++
++/* Number of LCX channels */
++#define CCP2_LCx_CHANS_NUM 3
++/* Max/Min size for CCP2 video port */
++#define ISPCCP2_DAT_START_MIN 0
++#define ISPCCP2_DAT_START_MAX 4095
++#define ISPCCP2_DAT_SIZE_MIN 0
++#define ISPCCP2_DAT_SIZE_MAX 4095
++#define ISPCCP2_VPCLK_FRACDIV 65536
++#define ISPCCP2_LCx_CTRL_FORMAT_RAW8_DPCM10_VP 0x12
++#define ISPCCP2_LCx_CTRL_FORMAT_RAW10_VP 0x16
++/* Max/Min size for CCP2 memory channel */
++#define ISPCCP2_LCM_HSIZE_COUNT_MIN 16
++#define ISPCCP2_LCM_HSIZE_COUNT_MAX 8191
++#define ISPCCP2_LCM_HSIZE_SKIP_MIN 0
++#define ISPCCP2_LCM_HSIZE_SKIP_MAX 8191
++#define ISPCCP2_LCM_VSIZE_MIN 1
++#define ISPCCP2_LCM_VSIZE_MAX 8191
++#define ISPCCP2_LCM_HWORDS_MIN 1
++#define ISPCCP2_LCM_HWORDS_MAX 4095
++#define ISPCCP2_LCM_CTRL_BURST_SIZE_32X 5
++#define ISPCCP2_LCM_CTRL_READ_THROTTLE_FULL 0
++#define ISPCCP2_LCM_CTRL_SRC_DECOMPR_DPCM10 2
++#define ISPCCP2_LCM_CTRL_SRC_FORMAT_RAW8 2
++#define ISPCCP2_LCM_CTRL_SRC_FORMAT_RAW10 3
++#define ISPCCP2_LCM_CTRL_DST_FORMAT_RAW10 3
++#define ISPCCP2_LCM_CTRL_DST_PORT_VP 0
++#define ISPCCP2_LCM_CTRL_DST_PORT_MEM 1
++
++/* Set only the required bits */
++#define BIT_SET(var, shift, mask, val) \
++ do { \
++ var = ((var) & ~((mask) << (shift))) \
++ | ((val) << (shift)); \
++ } while (0)
++
++/* Structure for saving/restoring ccp2 module registers */
++/* Saving/Restoring only registers modified here */
++static struct isp_reg ispccp2_reg_list[] = {
++ {OMAP3_ISP_IOMEM_CCP2, ISPCCP2_SYSCONFIG, 0x0000},
++ {OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LC01_IRQENABLE, 0x0000},
++ {OMAP3_ISP_IOMEM_CCP2, ISPCCP2_CTRL, 0x0000},
++ {OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCx_CTRL(0), 0x0000},
++ {OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCx_DAT_START(0), 0x0000},
++ {OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCx_DAT_SIZE(0), 0x0000},
++ {OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCx_DAT_OFST(0), 0x0000},
++ {OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCM_CTRL, 0x0000},
++ {OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCM_VSIZE, 0x0000},
++ {OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCM_HSIZE, 0x0000},
++ {OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCM_PREFETCH, 0x0000},
++ {OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCM_SRC_ADDR, 0x0000},
++ {OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCM_SRC_OFST, 0x0000},
++ {OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCM_DST_ADDR, 0x0000},
++ {OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCM_DST_OFST, 0x0000},
++ {0, ISP_TOK_TERM, 0x0000}
++};
++
++/*
++ * ispccp2_save_context - Saves values of ccp2 registers.
++ */
++void ispccp2_save_context(struct isp_device *isp)
++{
++ isp_save_context(isp, ispccp2_reg_list);
++}
++
++/*
++ * ispccp2_restore_context - Restores ccp2 register values.
++ */
++void ispccp2_restore_context(struct isp_device *isp)
++{
++ isp_restore_context(isp, ispccp2_reg_list);
++}
++
++/*
++ * ispccp2_print_status - Print current CCP2 module register values.
++ */
++#define CCP2_PRINT_REGISTER(isp, name)\
++ dev_dbg(isp->dev, "###CCP2 " #name "=0x%08x\n", \
++ isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_##name))
++
++static void ispccp2_print_status(struct isp_ccp2_device *ccp2)
++{
++ struct isp_device *isp = to_isp_device(ccp2);
++
++ dev_dbg(isp->dev, "-------------CCP2 Register dump-------------\n");
++
++ CCP2_PRINT_REGISTER(isp, SYSCONFIG);
++ CCP2_PRINT_REGISTER(isp, SYSSTATUS);
++ CCP2_PRINT_REGISTER(isp, LC01_IRQENABLE);
++ CCP2_PRINT_REGISTER(isp, LC01_IRQSTATUS);
++ CCP2_PRINT_REGISTER(isp, LC23_IRQENABLE);
++ CCP2_PRINT_REGISTER(isp, LC23_IRQSTATUS);
++ CCP2_PRINT_REGISTER(isp, LCM_IRQENABLE);
++ CCP2_PRINT_REGISTER(isp, LCM_IRQSTATUS);
++ CCP2_PRINT_REGISTER(isp, CTRL);
++ CCP2_PRINT_REGISTER(isp, LCx_CTRL(0));
++ CCP2_PRINT_REGISTER(isp, LCx_CODE(0));
++ CCP2_PRINT_REGISTER(isp, LCx_STAT_START(0));
++ CCP2_PRINT_REGISTER(isp, LCx_STAT_SIZE(0));
++ CCP2_PRINT_REGISTER(isp, LCx_SOF_ADDR(0));
++ CCP2_PRINT_REGISTER(isp, LCx_EOF_ADDR(0));
++ CCP2_PRINT_REGISTER(isp, LCx_DAT_START(0));
++ CCP2_PRINT_REGISTER(isp, LCx_DAT_SIZE(0));
++ CCP2_PRINT_REGISTER(isp, LCx_DAT_PING_ADDR(0));
++ CCP2_PRINT_REGISTER(isp, LCx_DAT_PONG_ADDR(0));
++ CCP2_PRINT_REGISTER(isp, LCx_DAT_OFST(0));
++ CCP2_PRINT_REGISTER(isp, LCM_CTRL);
++ CCP2_PRINT_REGISTER(isp, LCM_VSIZE);
++ CCP2_PRINT_REGISTER(isp, LCM_HSIZE);
++ CCP2_PRINT_REGISTER(isp, LCM_PREFETCH);
++ CCP2_PRINT_REGISTER(isp, LCM_SRC_ADDR);
++ CCP2_PRINT_REGISTER(isp, LCM_SRC_OFST);
++ CCP2_PRINT_REGISTER(isp, LCM_DST_ADDR);
++ CCP2_PRINT_REGISTER(isp, LCM_DST_OFST);
++
++ dev_dbg(isp->dev, "--------------------------------------------\n");
++}
++
++/*
++ * ispccp2_reset - Reset the CCP2
++ * @isp: pointer to isp device
++ */
++static void ispccp2_reset(struct isp_device *isp)
++{
++ int i = 0;
++
++ /* Reset the CSI1/CCP2B and wait for reset to complete */
++ isp_reg_or(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_SYSCONFIG,
++ ISPCCP2_SYSCONFIG_SOFT_RESET);
++ while (!(isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_SYSSTATUS) &
++ ISPCCP2_SYSSTATUS_RESET_DONE)) {
++ udelay(10);
++ if (i++ > 10) { /* try read 10 times */
++ dev_warn(isp->dev,
++ "omap3_isp: timeout waiting for ccp2 reset\n");
++ break;
++ }
++ }
++}
++
++/*
++ * ispccp2_if_enable - Enable CCP2 interface.
++ * @isp: pointer to isp device
++ * @enable: enable/disable flag
++ */
++static void ispccp2_if_enable(struct isp_device *isp, u8 enable)
++{
++ int i;
++
++ /* Enable/Disable all the LCx channels */
++ for (i = 0; i < CCP2_LCx_CHANS_NUM; i++)
++ isp_reg_and_or(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCx_CTRL(i),
++ ~(ISPCCP2_LCx_CTRL_CHAN_EN),
++ enable ? ISPCCP2_LCx_CTRL_CHAN_EN : 0);
++
++ /* Enable/Disable ccp2 interface in ccp2 mode */
++ isp_reg_and_or(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_CTRL,
++ ~(ISPCCP2_CTRL_MODE | ISPCCP2_CTRL_IF_EN),
++ enable ? (ISPCCP2_CTRL_MODE | ISPCCP2_CTRL_IF_EN) : 0);
++}
++
++/*
++ * ispccp2_mem_enable - Enable CCP2 memory interface.
++ * @isp: pointer to isp device
++ * @enable: enable/disable flag
++ */
++static void ispccp2_mem_enable(struct isp_ccp2_device *ccp2, u8 enable)
++{
++ struct isp_device *isp = to_isp_device(ccp2);
++
++ if (enable)
++ ispccp2_if_enable(isp, 0);
++
++ /* Enable/Disable ccp2 interface in ccp2 mode */
++ isp_reg_and_or(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_CTRL,
++ ~ISPCCP2_CTRL_MODE, enable ? ISPCCP2_CTRL_MODE : 0);
++
++ isp_reg_and_or(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCM_CTRL,
++ ~ISPCCP2_LCM_CTRL_CHAN_EN,
++ enable ? ISPCCP2_LCM_CTRL_CHAN_EN : 0);
++}
++
++/*
++ * ispccp2_phyif_config - Initialize CCP2 phy interface config
++ * @isp: Pointer to ISP device structure.
++ * @config: CCP2 platform data
++ *
++ * Configure the CCP2 physical interface module from platform data.
++ *
++ * Returns -EIO if strobe is chosen in CSI1 mode, or 0 on success.
++ */
++static int ispccp2_phyif_config(struct isp_device *isp,
++ const struct isp_ccp2_platform_data *pdata)
++{
++ u32 val;
++
++ /* CCP2B mode */
++ val = isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_CTRL) |
++ ISPCCP2_CTRL_IO_OUT_SEL | ISPCCP2_CTRL_MODE;
++ /* Data/strobe physical layer */
++ BIT_SET(val, ISPCCP2_CTRL_PHY_SEL_SHIFT, ISPCCP2_CTRL_PHY_SEL_MASK,
++ pdata->phy_layer);
++ BIT_SET(val, ISPCCP2_CTRL_INV_SHIFT, ISPCCP2_CTRL_INV_MASK,
++ pdata->strobe_clk_pol);
++ isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_CTRL);
++
++ val = isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_CTRL);
++ if (!(val & ISPCCP2_CTRL_MODE)) {
++ if (pdata->ccp2_mode)
++ dev_warn(isp->dev, "OMAP3 CCP2 bus not available\n");
++ if (pdata->phy_layer == ISPCCP2_CTRL_PHY_SEL_STROBE)
++ /* Strobe mode requires CCP2 */
++ return -EIO;
++ }
++
++ return 0;
++}
++
++/*
++ * ispccp2_vp_config - Initialize CCP2 video port interface.
++ * @isp: Pointer to ISP device structure.
++ * @vpclk_div: Video port divisor
++ *
++ * Configure the CCP2 video port with the given clock divisor. The valid divisor
++ * values depend on the ISP revision:
++ *
++ * - revision 1.0 and 2.0 1 to 4
++ * - revision 15.0 1 to 65536
++ *
++ * The exact divisor value used might differ from the requested value, as ISP
++ * revision 15.0 represent the divisor by 65536 divided by an integer.
++ */
++static void ispccp2_vp_config(struct isp_device *isp, unsigned int vpclk_div)
++{
++ u32 val;
++
++ /* ISPCCP2_CTRL Video port */
++ val = isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_CTRL);
++ val |= ISPCCP2_CTRL_VP_ONLY_EN; /* Disable the memory write port */
++
++ if (isp->revision == ISP_REVISION_15_0) {
++ vpclk_div = clamp_t(unsigned int, vpclk_div, 1, 65536);
++ vpclk_div = min(ISPCCP2_VPCLK_FRACDIV / vpclk_div, 65535U);
++ BIT_SET(val, ISPCCP2_CTRL_VPCLK_DIV_SHIFT,
++ ISPCCP2_CTRL_VPCLK_DIV_MASK, vpclk_div);
++ } else {
++ vpclk_div = clamp_t(unsigned int, vpclk_div, 1, 4);
++ BIT_SET(val, ISPCCP2_CTRL_VP_OUT_CTRL_SHIFT,
++ ISPCCP2_CTRL_VP_OUT_CTRL_MASK, vpclk_div - 1);
++ }
++
++ isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_CTRL);
++}
++
++/*
++ * ispccp2_lcx_config - Initialize CCP2 logical channel interface.
++ * @isp: Pointer to ISP device structure.
++ * @config: Pointer to ISP LCx config structure.
++ *
++ * This will analyze the parameters passed by the interface config
++ * and configure CSI1/CCP2 logical channel
++ *
++ */
++static void ispccp2_lcx_config(struct isp_device *isp,
++ struct isp_interface_lcx_config *config)
++{
++ u32 val, format;
++
++ switch (config->format) {
++ case V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8:
++ format = ISPCCP2_LCx_CTRL_FORMAT_RAW8_DPCM10_VP;
++ break;
++ case V4L2_MBUS_FMT_SGRBG10_1X10:
++ default:
++ format = ISPCCP2_LCx_CTRL_FORMAT_RAW10_VP; /* RAW10+VP */
++ break;
++ }
++ /* ISPCCP2_LCx_CTRL logical channel #0 */
++ val = isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCx_CTRL(0))
++ | (ISPCCP2_LCx_CTRL_REGION_EN); /* Region */
++
++ if (isp->revision == ISP_REVISION_15_0) {
++ /* CRC */
++ BIT_SET(val, ISPCCP2_LCx_CTRL_CRC_SHIFT_15_0,
++ ISPCCP2_LCx_CTRL_CRC_MASK,
++ config->crc);
++ /* Format = RAW10+VP or RAW8+DPCM10+VP*/
++ BIT_SET(val, ISPCCP2_LCx_CTRL_FORMAT_SHIFT_15_0,
++ ISPCCP2_LCx_CTRL_FORMAT_MASK_15_0, format);
++ } else {
++ BIT_SET(val, ISPCCP2_LCx_CTRL_CRC_SHIFT,
++ ISPCCP2_LCx_CTRL_CRC_MASK,
++ config->crc);
++
++ BIT_SET(val, ISPCCP2_LCx_CTRL_FORMAT_SHIFT,
++ ISPCCP2_LCx_CTRL_FORMAT_MASK, format);
++ }
++ isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCx_CTRL(0));
++
++ /* ISPCCP2_DAT_START for logical channel #0 */
++ isp_reg_writel(isp, config->data_start << ISPCCP2_LCx_DAT_SHIFT,
++ OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCx_DAT_START(0));
++
++ /* ISPCCP2_DAT_SIZE for logical channel #0 */
++ isp_reg_writel(isp, config->data_size << ISPCCP2_LCx_DAT_SHIFT,
++ OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCx_DAT_SIZE(0));
++
++ /* Clear status bits for logical channel #0 */
++ val = ISPCCP2_LC01_IRQSTATUS_LC0_FIFO_OVF_IRQ |
++ ISPCCP2_LC01_IRQSTATUS_LC0_CRC_IRQ |
++ ISPCCP2_LC01_IRQSTATUS_LC0_FSP_IRQ |
++ ISPCCP2_LC01_IRQSTATUS_LC0_FW_IRQ |
++ ISPCCP2_LC01_IRQSTATUS_LC0_FSC_IRQ |
++ ISPCCP2_LC01_IRQSTATUS_LC0_SSC_IRQ;
++
++ /* Clear IRQ status bits for logical channel #0 */
++ isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_CCP2,
++ ISPCCP2_LC01_IRQSTATUS);
++}
++
++/*
++ * ispccp2_if_configure - Configure ccp2 with data from sensor
++ * @ccp2: ISP ccp2 V4L2 subdevice
++ *
++ * Return 0 on success or a negative error code
++ */
++static int ispccp2_if_configure(struct isp_ccp2_device *ccp2)
++{
++ const struct isp_v4l2_subdevs_group *pdata;
++ struct isp_device *isp = to_isp_device(ccp2);
++ struct v4l2_mbus_framefmt *format;
++ struct media_entity_pad *pad;
++ struct v4l2_subdev *sensor;
++ u32 lines = 0;
++ int ret;
++
++ pad = media_entity_remote_pad(&ccp2->pads[CCP2_PAD_SINK]);
++ sensor = media_entity_to_v4l2_subdev(pad->entity);
++ pdata = sensor->host_priv;
++
++ ret = ispccp2_phyif_config(isp, &pdata->bus.ccp2);
++ if (ret < 0)
++ return ret;
++
++ ispccp2_vp_config(isp, pdata->bus.ccp2.vpclk_div + 1);
++
++ v4l2_subdev_call(sensor, sensor, g_skip_top_lines, &lines);
++
++ format = &ccp2->formats[CCP2_PAD_SINK];
++
++ ccp2->if_cfg.data_start = lines;
++ ccp2->if_cfg.crc = pdata->bus.ccp2.crc;
++ ccp2->if_cfg.format = format->code;
++ ccp2->if_cfg.data_size = format->height;
++
++ ispccp2_lcx_config(isp, &ccp2->if_cfg);
++
++ return 0;
++}
++
++static int ispccp2_adjust_bandwidth(struct isp_ccp2_device *ccp2)
++{
++ struct isp_pipeline *pipe = to_isp_pipeline(&ccp2->subdev.entity);
++ struct isp_device *isp = to_isp_device(ccp2);
++ const struct v4l2_mbus_framefmt *ofmt = &ccp2->formats[CCP2_PAD_SOURCE];
++ unsigned long l3_ick = pipe->l3_ick;
++ struct v4l2_fract *timeperframe;
++ unsigned int vpclk_div = 2;
++ unsigned int value;
++ u64 bound;
++ u64 area;
++
++ /* Compute the minimum clock divisor, based on the pipeline maximum
++ * data rate. This is an absolute lower bound if we don't want SBL
++ * overflows, so round the value up.
++ */
++ vpclk_div = max_t(unsigned int, DIV_ROUND_UP(l3_ick, pipe->max_rate),
++ vpclk_div);
++
++ /* Compute the maximum clock divisor, based on the requested frame rate.
++ * This is a soft lower bound to achieve a frame rate equal or higher
++ * than the requested value, so round the value down.
++ */
++ timeperframe = &pipe->max_timeperframe;
++
++ if (timeperframe->numerator) {
++ area = ofmt->width * ofmt->height;
++ bound = div_u64(area * timeperframe->denominator,
++ timeperframe->numerator);
++ value = min_t(u64, bound, l3_ick);
++ vpclk_div = max_t(unsigned int, l3_ick / value, vpclk_div);
++ }
++
++ dev_dbg(isp->dev, "%s: minimum clock divisor = %u\n", __func__,
++ vpclk_div);
++
++ return vpclk_div;
++}
++
++/*
++ * ispccp2_mem_configure - Initialize CCP2 memory input/output interface
++ * @ccp2: Pointer to CCP2 device structure
++ * @config: Pointer to ISP mem interface config structure
++ *
++ * This will analyze the parameters passed by the interface config
++ * structure, and configure the respective registers for proper
++ * CSI1/CCP2 memory input.
++ */
++static void ispccp2_mem_configure(struct isp_ccp2_device *ccp2,
++ struct isp_interface_mem_config *config)
++{
++ struct isp_device *isp = to_isp_device(ccp2);
++ u32 sink_pixcode = ccp2->formats[CCP2_PAD_SINK].code;
++ u32 source_pixcode = ccp2->formats[CCP2_PAD_SOURCE].code;
++ unsigned int dpcm_decompress = 0;
++ u32 val, hwords;
++
++ if (sink_pixcode != source_pixcode &&
++ sink_pixcode == V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8) {
++ dpcm_decompress = 1;
++ config->src_ofst = 0;
++ }
++
++ isp_reg_writel(isp, (ISPCSI1_MIDLEMODE_SMARTSTANDBY <<
++ ISPCSI1_MIDLEMODE_SHIFT),
++ OMAP3_ISP_IOMEM_CCP2, ISP_CSIB_SYSCONFIG);
++
++ /* Hsize, Skip */
++ isp_reg_writel(isp, ISPCCP2_LCM_HSIZE_SKIP_MIN |
++ (config->hsize_count << ISPCCP2_LCM_HSIZE_SHIFT),
++ OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCM_HSIZE);
++
++ /* Vsize, no. of lines */
++ isp_reg_writel(isp, config->vsize_count << ISPCCP2_LCM_VSIZE_SHIFT,
++ OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCM_VSIZE);
++
++ isp_reg_writel(isp, config->src_ofst, OMAP3_ISP_IOMEM_CCP2,
++ ISPCCP2_LCM_SRC_OFST);
++
++ /* Source and Destination formats */
++ val = ISPCCP2_LCM_CTRL_DST_FORMAT_RAW10 <<
++ ISPCCP2_LCM_CTRL_DST_FORMAT_SHIFT;
++
++ if (dpcm_decompress) {
++ /* source format is RAW8 */
++ val |= ISPCCP2_LCM_CTRL_SRC_FORMAT_RAW8 <<
++ ISPCCP2_LCM_CTRL_SRC_FORMAT_SHIFT;
++
++ /* RAW8 + DPCM10 - simple predictor */
++ val |= ISPCCP2_LCM_CTRL_SRC_DPCM_PRED;
++
++ /* enable source DPCM decompression */
++ val |= ISPCCP2_LCM_CTRL_SRC_DECOMPR_DPCM10 <<
++ ISPCCP2_LCM_CTRL_SRC_DECOMPR_SHIFT;
++ } else {
++ /* source format is RAW10 */
++ val |= ISPCCP2_LCM_CTRL_SRC_FORMAT_RAW10 <<
++ ISPCCP2_LCM_CTRL_SRC_FORMAT_SHIFT;
++ }
++
++ /* Burst size to 32x64 */
++ val |= ISPCCP2_LCM_CTRL_BURST_SIZE_32X <<
++ ISPCCP2_LCM_CTRL_BURST_SIZE_SHIFT;
++
++ isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCM_CTRL);
++
++ /* Prefetch setup */
++ if (dpcm_decompress)
++ hwords = (ISPCCP2_LCM_HSIZE_SKIP_MIN +
++ config->hsize_count) >> 3;
++ else
++ hwords = (ISPCCP2_LCM_HSIZE_SKIP_MIN +
++ config->hsize_count) >> 2;
++
++ isp_reg_writel(isp, hwords << ISPCCP2_LCM_PREFETCH_SHIFT,
++ OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCM_PREFETCH);
++
++ /* Video port */
++ isp_reg_or(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_CTRL,
++ ISPCCP2_CTRL_IO_OUT_SEL | ISPCCP2_CTRL_MODE);
++ ispccp2_vp_config(isp, ispccp2_adjust_bandwidth(ccp2));
++
++ /* Clear LCM interrupts */
++ isp_reg_writel(isp, ISPCCP2_LCM_IRQSTATUS_OCPERROR_IRQ |
++ ISPCCP2_LCM_IRQSTATUS_EOF_IRQ,
++ OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCM_IRQSTATUS);
++
++ /* Enable LCM interupts */
++ isp_reg_or(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCM_IRQENABLE,
++ (ISPCCP2_LCM_IRQSTATUS_EOF_IRQ |
++ ISPCCP2_LCM_IRQSTATUS_OCPERROR_IRQ));
++}
++
++/*
++ * ispccp2_set_inaddr - Sets memory address of input frame.
++ * @addr: 32bit memory address aligned on 32byte boundary.
++ *
++ * Configures the memory address from which the input frame is to be read.
++ */
++static void ispccp2_set_inaddr(struct isp_ccp2_device *ccp2, u32 addr)
++{
++ struct isp_device *isp = to_isp_device(ccp2);
++
++ isp_reg_writel(isp, addr, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCM_SRC_ADDR);
++}
++
++/* -----------------------------------------------------------------------------
++ * Interrupt handling
++ */
++
++static void ispccp2_isr_buffer(struct isp_ccp2_device *ccp2)
++{
++ struct isp_pipeline *pipe = to_isp_pipeline(&ccp2->subdev.entity);
++ struct isp_buffer *buffer;
++
++ buffer = isp_video_buffer_next(&ccp2->video_in, ccp2->error);
++ if (buffer != NULL)
++ ispccp2_set_inaddr(ccp2, buffer->isp_addr);
++
++ pipe->state |= ISP_PIPELINE_IDLE_INPUT;
++
++ if (ccp2->state == ISP_PIPELINE_STREAM_SINGLESHOT) {
++ if (isp_pipeline_ready(pipe))
++ isp_pipeline_set_stream(pipe,
++ ISP_PIPELINE_STREAM_SINGLESHOT);
++ }
++
++ ccp2->error = 0;
++}
++
++/*
++ * ispccp2_isr - Handle ISP CCP2 interrupts
++ * @isp: Device pointer specific to the OMAP3 ISP.
++ *
++ * This will handle the CCP2 interrupts
++ *
++ * Returns -EIO in case of error, or 0 on success.
++ */
++int ispccp2_isr(struct isp_device *isp)
++{
++ struct isp_ccp2_device *ccp2 = &isp->isp_ccp2;
++ int ret = 0;
++ static const u32 ISPCCP2_LC01_ERROR =
++ ISPCCP2_LC01_IRQSTATUS_LC0_FIFO_OVF_IRQ |
++ ISPCCP2_LC01_IRQSTATUS_LC0_CRC_IRQ |
++ ISPCCP2_LC01_IRQSTATUS_LC0_FSP_IRQ |
++ ISPCCP2_LC01_IRQSTATUS_LC0_FW_IRQ |
++ ISPCCP2_LC01_IRQSTATUS_LC0_FSC_IRQ |
++ ISPCCP2_LC01_IRQSTATUS_LC0_SSC_IRQ;
++ u32 lcx_irqstatus, lcm_irqstatus;
++
++ /* First clear the interrupts */
++ lcx_irqstatus = isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCP2,
++ ISPCCP2_LC01_IRQSTATUS);
++ isp_reg_writel(isp, lcx_irqstatus, OMAP3_ISP_IOMEM_CCP2,
++ ISPCCP2_LC01_IRQSTATUS);
++
++ lcm_irqstatus = isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCP2,
++ ISPCCP2_LCM_IRQSTATUS);
++ isp_reg_writel(isp, lcm_irqstatus, OMAP3_ISP_IOMEM_CCP2,
++ ISPCCP2_LCM_IRQSTATUS);
++ /* Errors */
++ if (lcx_irqstatus & ISPCCP2_LC01_ERROR) {
++ ccp2->error = 1;
++ dev_dbg(isp->dev, "CCP2 err:%x\n", lcx_irqstatus);
++ return -EIO;
++ }
++
++ if (lcm_irqstatus & ISPCCP2_LCM_IRQSTATUS_OCPERROR_IRQ) {
++ ccp2->error = 1;
++ dev_dbg(isp->dev, "CCP2 OCP err:%x\n", lcm_irqstatus);
++ ret = -EIO;
++ }
++
++ /* Handle queued buffers on frame end interrupts */
++ if (lcm_irqstatus & ISPCCP2_LCM_IRQSTATUS_EOF_IRQ)
++ ispccp2_isr_buffer(ccp2);
++
++ return ret;
++}
++
++/* -----------------------------------------------------------------------------
++ * V4L2 subdev operations
++ */
++
++const static unsigned int ccp2_fmts[] = {
++ V4L2_MBUS_FMT_SGRBG10_1X10,
++ V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8,
++};
++
++/*
++ * __ispccp2_get_format - helper function for getting ccp2 format
++ * @ccp2 : pointer to ccp2 structure
++ * @fh : V4L2 subdev file handle
++ * @pad : pad number
++ * @which : wanted subdev format
++ * return format structure or NULL on error
++ */
++static struct v4l2_mbus_framefmt *
++__ispccp2_get_format(struct isp_ccp2_device *ccp2, struct v4l2_subdev_fh *fh,
++ unsigned int pad, enum v4l2_subdev_format which)
++{
++ if (which == V4L2_SUBDEV_FORMAT_PROBE)
++ return v4l2_subdev_get_probe_format(fh, pad);
++ else
++ return &ccp2->formats[pad];
++}
++
++/*
++ * ispccp2_try_format - Handle try format by pad subdev method
++ * @ccp2 : ISP CCP2 device
++ * @fh : V4L2 subdev file handle
++ * @pad : pad num
++ * @fmt : pointer to v4l2 mbus format structure
++ * @which : wanted subdev format
++ */
++static void ispccp2_try_format(struct isp_ccp2_device *ccp2,
++ struct v4l2_subdev_fh *fh, unsigned int pad,
++ struct v4l2_mbus_framefmt *fmt,
++ enum v4l2_subdev_format which)
++{
++ struct v4l2_mbus_framefmt *format;
++
++ switch (pad) {
++ case CCP2_PAD_SINK:
++ if (fmt->code != V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8)
++ fmt->code = V4L2_MBUS_FMT_SGRBG10_1X10;
++
++ if (ccp2->input == CCP2_INPUT_SENSOR) {
++ fmt->width = clamp_t(u32, fmt->width,
++ ISPCCP2_DAT_START_MIN,
++ ISPCCP2_DAT_START_MAX);
++ fmt->height = clamp_t(u32, fmt->height,
++ ISPCCP2_DAT_SIZE_MIN,
++ ISPCCP2_DAT_SIZE_MAX);
++ } else if (ccp2->input == CCP2_INPUT_MEMORY) {
++ fmt->width = clamp_t(u32, fmt->width,
++ ISPCCP2_LCM_HSIZE_COUNT_MIN,
++ ISPCCP2_LCM_HSIZE_COUNT_MAX);
++ fmt->width &= ~15;
++ fmt->height = clamp_t(u32, fmt->height,
++ ISPCCP2_LCM_VSIZE_MIN,
++ ISPCCP2_LCM_VSIZE_MAX);
++ }
++ break;
++
++ case CCP2_PAD_SOURCE:
++ /* Source format - copy sink format and change pixel code
++ * to SGRBG10_1X10 as we don't support CCP2 write to memory.
++ * When CCP2 write to memory feature will be added this
++ * should be changed properly.
++ */
++ format = __ispccp2_get_format(ccp2, fh, CCP2_PAD_SINK, which);
++ memcpy(fmt, format, sizeof(*fmt));
++ fmt->code = V4L2_MBUS_FMT_SGRBG10_1X10;
++ break;
++ }
++
++ fmt->field = V4L2_FIELD_NONE;
++ fmt->colorspace = V4L2_COLORSPACE_SRGB;
++}
++
++/*
++ * ispccp2_enum_mbus_code - Handle pixel format enumeration
++ * @sd : pointer to v4l2 subdev structure
++ * @fh : V4L2 subdev file handle
++ * @code : pointer to v4l2_subdev_pad_mbus_code_enum structure
++ * return -EINVAL or zero on success
++ */
++static int ispccp2_enum_mbus_code(struct v4l2_subdev *sd,
++ struct v4l2_subdev_fh *fh,
++ struct v4l2_subdev_pad_mbus_code_enum *code)
++{
++ struct isp_ccp2_device *ccp2 = v4l2_get_subdevdata(sd);
++ struct v4l2_mbus_framefmt *format;
++
++ if (code->pad == CCP2_PAD_SINK) {
++ if (code->index >= ARRAY_SIZE(ccp2_fmts))
++ return -EINVAL;
++
++ code->code = ccp2_fmts[code->index];
++ } else {
++ if (code->index != 0)
++ return -EINVAL;
++
++ format = __ispccp2_get_format(ccp2, fh, CCP2_PAD_SINK,
++ V4L2_SUBDEV_FORMAT_PROBE);
++ code->code = format->code;
++ }
++
++ return 0;
++}
++
++static int ispccp2_enum_frame_size(struct v4l2_subdev *sd,
++ struct v4l2_subdev_fh *fh,
++ struct v4l2_subdev_frame_size_enum *fse)
++{
++ struct isp_ccp2_device *ccp2 = v4l2_get_subdevdata(sd);
++ struct v4l2_mbus_framefmt format;
++
++ if (fse->index != 0)
++ return -EINVAL;
++
++ format.code = fse->code;
++ format.width = 1;
++ format.height = 1;
++ ispccp2_try_format(ccp2, fh, fse->pad, &format,
++ V4L2_SUBDEV_FORMAT_PROBE);
++ fse->min_width = format.width;
++ fse->min_height = format.height;
++
++ if (format.code != fse->code)
++ return -EINVAL;
++
++ format.code = fse->code;
++ format.width = -1;
++ format.height = -1;
++ ispccp2_try_format(ccp2, fh, fse->pad, &format,
++ V4L2_SUBDEV_FORMAT_PROBE);
++ fse->max_width = format.width;
++ fse->max_height = format.height;
++
++ return 0;
++}
++
++/*
++ * ispccp2_get_format - Handle get format by pads subdev method
++ * @sd : pointer to v4l2 subdev structure
++ * @fh : V4L2 subdev file handle
++ * @pad : pad num
++ * @fmt : pointer to v4l2 mbus format structure
++ * @which : wanted subdev format
++ * return -EINVAL or zero on sucess
++ */
++static int ispccp2_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
++ unsigned int pad, struct v4l2_mbus_framefmt *fmt,
++ enum v4l2_subdev_format which)
++{
++ struct isp_ccp2_device *ccp2 = v4l2_get_subdevdata(sd);
++ struct v4l2_mbus_framefmt *format;
++
++ format = __ispccp2_get_format(ccp2, fh, pad, which);
++ if (format == NULL)
++ return -EINVAL;
++
++ memcpy(fmt, format, sizeof(*fmt));
++
++ return 0;
++}
++
++/*
++ * ispccp2_set_format - Handle set format by pads subdev method
++ * @sd : pointer to v4l2 subdev structure
++ * @fh : V4L2 subdev file handle
++ * @pad : pad num
++ * @fmt : pointer to v4l2 mbus format structure
++ * @which : wanted subdev format
++ * returns zero
++ */
++static int ispccp2_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
++ unsigned int pad, struct v4l2_mbus_framefmt *fmt,
++ enum v4l2_subdev_format which)
++{
++ struct isp_ccp2_device *ccp2 = v4l2_get_subdevdata(sd);
++ struct v4l2_mbus_framefmt *format;
++
++ format = __ispccp2_get_format(ccp2, fh, pad, which);
++ if (format == NULL)
++ return -EINVAL;
++
++ ispccp2_try_format(ccp2, fh, pad, fmt, which);
++ memcpy(format, fmt, sizeof(*format));
++
++ /* Propagate the format from sink to source */
++ if (pad == CCP2_PAD_SINK) {
++ format = __ispccp2_get_format(ccp2, fh, CCP2_PAD_SOURCE, which);
++ memcpy(format, fmt, sizeof(*format));
++ ispccp2_try_format(ccp2, fh, CCP2_PAD_SOURCE, format, which);
++ }
++
++ return 0;
++}
++
++/*
++ * ispccp2_s_stream - Enable/Disable streaming on ccp2 subdev
++ * @sd : pointer to v4l2 subdev structure
++ * @enable: 1 == Enable, 0 == Disable
++ * return zero
++ */
++static int ispccp2_s_stream(struct v4l2_subdev *sd, int enable)
++{
++ struct isp_ccp2_device *ccp2 = v4l2_get_subdevdata(sd);
++ struct isp_device *isp = to_isp_device(ccp2);
++ int ret;
++
++ switch (enable) {
++ case ISP_PIPELINE_STREAM_CONTINUOUS:
++ if (ccp2->phy) {
++ ret = isp_csiphy_acquire(ccp2->phy);
++ if (ret < 0)
++ return ret;
++ }
++
++ ispccp2_if_configure(ccp2);
++ ispccp2_print_status(ccp2);
++
++ /* Enable CSI1/CCP2 interface */
++ ispccp2_if_enable(isp, 1);
++ break;
++
++ case ISP_PIPELINE_STREAM_SINGLESHOT:
++ if (ccp2->state != ISP_PIPELINE_STREAM_SINGLESHOT) {
++ struct v4l2_mbus_framefmt *format;
++ struct v4l2_pix_format pix;
++
++ format = &ccp2->formats[CCP2_PAD_SINK];
++ isp_video_mbus_to_pix(&ccp2->video_in, format, &pix);
++
++ ccp2->mem_cfg.hsize_count = format->width;
++ ccp2->mem_cfg.vsize_count = format->height;
++ ccp2->mem_cfg.src_ofst = pix.bytesperline;
++
++ ispccp2_mem_configure(ccp2, &ccp2->mem_cfg);
++ isp_sbl_enable(isp, OMAP3_ISP_SBL_CSI1_READ);
++ ispccp2_print_status(ccp2);
++ }
++ ispccp2_mem_enable(ccp2, 1);
++ break;
++
++ case ISP_PIPELINE_STREAM_STOPPED:
++ if (ccp2->input == CCP2_INPUT_MEMORY) {
++ ispccp2_mem_enable(ccp2, 0);
++ isp_sbl_disable(isp, OMAP3_ISP_SBL_CSI1_READ);
++ } else if (ccp2->input == CCP2_INPUT_SENSOR) {
++ isp_reg_and(isp, OMAP3_ISP_IOMEM_CCP2,
++ ISPCCP2_LC01_IRQENABLE,
++ ~IRQ0ENABLE_CCP2_LC0_IRQ);
++
++ /* Disable CSI1/CCP2 interface */
++ ispccp2_if_enable(isp, 0);
++ if (ccp2->phy)
++ isp_csiphy_release(ccp2->phy);
++ }
++ break;
++ }
++
++ ccp2->state = enable;
++ return 0;
++}
++
++
++/* subdev video operations */
++static const struct v4l2_subdev_video_ops ispccp2_sd_video_ops = {
++ .s_stream = ispccp2_s_stream,
++};
++
++/* subdev pad operations */
++static const struct v4l2_subdev_pad_ops ispccp2_sd_pad_ops = {
++ .enum_mbus_code = ispccp2_enum_mbus_code,
++ .enum_frame_size = ispccp2_enum_frame_size,
++ .get_fmt = ispccp2_get_format,
++ .set_fmt = ispccp2_set_format,
++};
++
++/* subdev operations */
++static const struct v4l2_subdev_ops ispccp2_sd_ops = {
++ .video = &ispccp2_sd_video_ops,
++ .pad = &ispccp2_sd_pad_ops,
++};
++
++/* --------------------------------------------------------------------------
++ * ISP ccp2 video device node
++ */
++
++/*
++ * ispccp2_video_queue - Queue video buffer.
++ * @video : Pointer to isp video structure
++ * @buffer: Pointer to isp_buffer structure
++ * return -EIO or zero on success
++ */
++static int
++ispccp2_video_queue(struct isp_video *video, struct isp_buffer *buffer)
++{
++ struct isp_ccp2_device *ccp2 = &video->isp->isp_ccp2;
++
++ ispccp2_set_inaddr(ccp2, buffer->isp_addr);
++ return 0;
++}
++
++static const struct isp_video_operations ispccp2_video_ops = {
++ .queue = ispccp2_video_queue,
++};
++
++/* -----------------------------------------------------------------------------
++ * Media entity operations
++ */
++
++/*
++ * ispccp2_link_setup - Setup ccp2 connections.
++ * @entity : Pointer to media entity structure
++ * @local : Pointer to local pad array
++ * @remote : Pointer to remote pad array
++ * @flags : Link flags
++ * return -EINVAL on error or zero on success
++ */
++static int ispccp2_link_setup(struct media_entity *entity,
++ const struct media_entity_pad *local,
++ const struct media_entity_pad *remote, u32 flags)
++{
++ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
++ struct isp_ccp2_device *ccp2 = v4l2_get_subdevdata(sd);
++
++ switch (local->index | (remote->entity->type << 16)) {
++ case CCP2_PAD_SINK | (MEDIA_ENTITY_TYPE_NODE << 16):
++ /* read from memory */
++ if (flags & MEDIA_LINK_FLAG_ACTIVE) {
++ if (ccp2->input == CCP2_INPUT_SENSOR)
++ return -EBUSY;
++ ccp2->input = CCP2_INPUT_MEMORY;
++ } else {
++ if (ccp2->input == CCP2_INPUT_MEMORY)
++ ccp2->input = CCP2_INPUT_NONE;
++ }
++ break;
++
++ case CCP2_PAD_SINK | (MEDIA_ENTITY_TYPE_SUBDEV << 16):
++ /* read from sensor/phy */
++ if (flags & MEDIA_LINK_FLAG_ACTIVE) {
++ if (ccp2->input == CCP2_INPUT_MEMORY)
++ return -EBUSY;
++ ccp2->input = CCP2_INPUT_SENSOR;
++ } else {
++ if (ccp2->input == CCP2_INPUT_SENSOR)
++ ccp2->input = CCP2_INPUT_NONE;
++ } break;
++
++ case CCP2_PAD_SOURCE | (MEDIA_ENTITY_TYPE_SUBDEV << 16):
++ /* write to video port/ccdc */
++ if (flags & MEDIA_LINK_FLAG_ACTIVE)
++ ccp2->output = CCP2_OUTPUT_CCDC;
++ else
++ ccp2->output = CCP2_OUTPUT_NONE;
++ break;
++
++ default:
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++/* media operations */
++static const struct media_entity_operations ccp2_media_ops = {
++ .link_setup = ispccp2_link_setup,
++};
++
++/*
++ * isp_ccp2_init_entities - Initialize ccp2 subdev and media entity.
++ * @ccp2 : Pointer to ispccp2 structure
++ * return negative error code or zero on success
++ */
++static int isp_ccp2_init_entities(struct isp_ccp2_device *ccp2)
++{
++ struct v4l2_subdev *sd = &ccp2->subdev;
++ struct media_entity_pad *pads = ccp2->pads;
++ struct media_entity *me = &sd->entity;
++ int ret;
++
++ ccp2->input = CCP2_INPUT_NONE;
++ ccp2->output = CCP2_OUTPUT_NONE;
++
++ v4l2_subdev_init(sd, &ispccp2_sd_ops);
++ strlcpy(sd->name, "OMAP3 ISP CCP2", sizeof(sd->name));
++ sd->grp_id = 1 << 16; /* group ID for isp subdevs */
++ v4l2_set_subdevdata(sd, ccp2);
++ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
++
++ pads[CCP2_PAD_SINK].type = MEDIA_PAD_TYPE_INPUT;
++ pads[CCP2_PAD_SOURCE].type = MEDIA_PAD_TYPE_OUTPUT;
++
++ me->ops = &ccp2_media_ops;
++ ret = media_entity_init(me, CCP2_PADS_NUM, pads, 0);
++ if (ret < 0)
++ return ret;
++
++ ccp2->video_in.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
++ ccp2->video_in.alignment = 32;
++ ccp2->video_in.isp = to_isp_device(ccp2);
++ ccp2->video_in.ops = &ispccp2_video_ops;
++ ccp2->video_in.capture_mem = PAGE_ALIGN(4096 * 4096) * 3;
++
++ ret = isp_video_init(&ccp2->video_in, "CCP2");
++ if (ret < 0)
++ return ret;
++
++ /* Connect the video node to the ccp2 subdev. */
++ ret = media_entity_create_link(&ccp2->video_in.video.entity, 0,
++ &ccp2->subdev.entity, CCP2_PAD_SINK, 0);
++ if (ret < 0)
++ return ret;
++
++ return 0;
++}
++
++/*
++ * isp_ccp2_unregister_entities - Unregister media entities: subdev
++ * @ccp2 - Pointer to ccp2 device
++ */
++void isp_ccp2_unregister_entities(struct isp_ccp2_device *ccp2)
++{
++ media_entity_cleanup(&ccp2->subdev.entity);
++
++ v4l2_device_unregister_subdev(&ccp2->subdev);
++ isp_video_unregister(&ccp2->video_in);
++}
++
++/*
++ * ispccp2_register_entities - Register the subdev media entity
++ * @ccp2 - Pointer to ccp2 device
++ * @vdev - Pointer to v4l device
++ * return negative error code or zero on success
++ */
++
++int isp_ccp2_register_entities(struct isp_ccp2_device *ccp2,
++ struct v4l2_device *vdev)
++{
++ int ret;
++
++ /* Register the subdev and video nodes. */
++ ret = v4l2_device_register_subdev(vdev, &ccp2->subdev);
++ if (ret < 0)
++ goto error;
++
++ ret = isp_video_register(&ccp2->video_in, vdev);
++ if (ret < 0)
++ goto error;
++
++ return 0;
++
++error:
++ isp_ccp2_unregister_entities(ccp2);
++ return ret;
++}
++
++/* -----------------------------------------------------------------------------
++ * ISP ccp2 initialisation and cleanup
++ */
++
++/*
++ * ispccp2_cleanup - CCP2 un-initialization
++ * @isp : Pointer to ISP device
++ */
++void isp_ccp2_cleanup(struct isp_device *isp)
++{
++}
++
++/*
++ * isp_ccp2_init - CCP2 initialization.
++ * @isp : Pointer to ISP device
++ * return negative error code or zero on success
++ */
++int isp_ccp2_init(struct isp_device *isp)
++{
++ struct isp_ccp2_device *ccp2 = &isp->isp_ccp2;
++ int ret;
++
++ /* On the OMAP36xx, the CCP2 uses the CSI PHY1 or PHY2, shared with
++ * the CSI2c or CSI2a receivers. The PHY then needs to be explicitly
++ * configured.
++ *
++ * TODO: Don't hardcode the usage of PHY1 (shared with CSI2c).
++ */
++ if (isp->revision == ISP_REVISION_15_0)
++ ccp2->phy = &isp->isp_csiphy1;
++
++ ret = isp_ccp2_init_entities(ccp2);
++ if (ret < 0)
++ goto out;
++
++ ispccp2_reset(isp);
++out:
++ if (ret)
++ isp_ccp2_cleanup(isp);
++
++ return ret;
++}
++
+diff --git a/drivers/media/video/isp/ispccp2.h b/drivers/media/video/isp/ispccp2.h
+new file mode 100644
+index 0000000..58792f6
+--- /dev/null
++++ b/drivers/media/video/isp/ispccp2.h
+@@ -0,0 +1,89 @@
++/*
++ *ispccp2.h
++ *
++ * Copyright (C) 2010 Nokia Corporation.
++ * Copyright (C) 2010 Texas Instruments.
++ *
++ * Contributors:
++ * RaniSuneela M <r-m@ti.com>
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef OMAP_ISP_CCP2_API_H
++#define OMAP_ISP_CCP2_API_H
++
++#include <linux/videodev2.h>
++
++struct isp_device;
++struct isp_csiphy;
++
++/* Sink and source ccp2 pads */
++#define CCP2_PAD_SINK 0
++#define CCP2_PAD_SOURCE 1
++#define CCP2_PADS_NUM 2
++
++/* CCP2 input media entity */
++enum ccp2_input_entity {
++ CCP2_INPUT_NONE,
++ CCP2_INPUT_SENSOR,
++ CCP2_INPUT_MEMORY,
++};
++
++/* CCP2 output media entity */
++enum ccp2_output_entity {
++ CCP2_OUTPUT_NONE,
++ CCP2_OUTPUT_CCDC,
++ CCP2_OUTPUT_MEMORY,
++};
++
++
++/* Logical channel configuration */
++struct isp_interface_lcx_config {
++ int crc;
++ u32 data_start;
++ u32 data_size;
++ u32 format;
++};
++
++/* Memory channel configuration */
++struct isp_interface_mem_config {
++ u32 dst_port;
++ u32 vsize_count;
++ u32 hsize_count;
++ u32 src_ofst;
++ u32 dst_ofst;
++};
++
++/* CCP2 device */
++struct isp_ccp2_device {
++ struct v4l2_subdev subdev;
++ struct v4l2_mbus_framefmt formats[CCP2_PADS_NUM];
++ struct media_entity_pad pads[CCP2_PADS_NUM];
++ enum ccp2_input_entity input;
++ enum ccp2_output_entity output;
++ struct isp_interface_lcx_config if_cfg;
++ struct isp_interface_mem_config mem_cfg;
++ struct isp_video video_in;
++ struct isp_csiphy *phy;
++ unsigned int error;
++ enum isp_pipeline_stream_state state;
++};
++
++/* Function declarations */
++void ispccp2_save_context(struct isp_device *isp);
++void ispccp2_restore_context(struct isp_device *isp);
++int isp_ccp2_init(struct isp_device *isp);
++void isp_ccp2_cleanup(struct isp_device *isp);
++int isp_ccp2_register_entities(struct isp_ccp2_device *ccp2,
++ struct v4l2_device *vdev);
++void isp_ccp2_unregister_entities(struct isp_ccp2_device *ccp2);
++int ispccp2_isr(struct isp_device *isp);
++
++#endif
+diff --git a/drivers/media/video/isp/ispcsi2.c b/drivers/media/video/isp/ispcsi2.c
+new file mode 100644
+index 0000000..5b3e4a6
+--- /dev/null
++++ b/drivers/media/video/isp/ispcsi2.c
+@@ -0,0 +1,1232 @@
++/*
++ * ispcsi2.c
++ *
++ * Driver Library for ISP CSI2 Control module in TI's OMAP3 Camera ISP
++ * ISP CSI2 interface and IRQ related APIs are defined here.
++ *
++ * Copyright (C) 2009 Texas Instruments.
++ * Copyright (C) 2010 Nokia Corporation.
++ *
++ * Contributors:
++ * Sergio Aguirre <saaguirre@ti.com>
++ * Dominic Curran <dcurran@ti.com>
++ * Antti Koskipaa <antti.koskipaa@nokia.com>
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++#include <linux/delay.h>
++#include <media/v4l2-common.h>
++#include <linux/v4l2-mediabus.h>
++#include <linux/mm.h>
++
++#include "isp.h"
++#include "ispreg.h"
++#include "ispcsi2.h"
++
++/*
++ * isp_csi2_if_enable - Enable CSI2 Receiver interface.
++ * @enable: enable flag
++ *
++ */
++static void isp_csi2_if_enable(struct isp_device *isp,
++ struct isp_csi2_device *csi2, u8 enable)
++{
++ struct isp_csi2_ctrl_cfg *currctrl = &csi2->ctrl;
++
++ isp_reg_and_or(isp, csi2->regs1, ISPCSI2_CTRL,
++ ~ISPCSI2_CTRL_IF_EN_MASK,
++ enable ? ISPCSI2_CTRL_IF_EN_ENABLE :
++ ISPCSI2_CTRL_IF_EN_DISABLE);
++
++ currctrl->if_enable = enable;
++}
++
++/*
++ * isp_csi2_recv_config - CSI2 receiver module configuration.
++ * @currctrl: isp_csi2_ctrl_cfg structure
++ *
++ */
++static void isp_csi2_recv_config(struct isp_device *isp,
++ struct isp_csi2_device *csi2,
++ struct isp_csi2_ctrl_cfg *currctrl)
++{
++ u32 reg;
++
++ reg = isp_reg_readl(isp, csi2->regs1, ISPCSI2_CTRL);
++
++ reg &= ~ISPCSI2_CTRL_FRAME_MASK;
++ if (currctrl->frame_mode)
++ reg |= ISPCSI2_CTRL_FRAME_DISABLE_FEC;
++ else
++ reg |= ISPCSI2_CTRL_FRAME_DISABLE_IMM;
++
++ reg &= ~ISPCSI2_CTRL_VP_CLK_EN_MASK;
++ if (currctrl->vp_clk_enable)
++ reg |= ISPCSI2_CTRL_VP_CLK_EN_ENABLE;
++ else
++ reg |= ISPCSI2_CTRL_VP_CLK_EN_DISABLE;
++
++ reg &= ~ISPCSI2_CTRL_VP_ONLY_EN_MASK;
++ if (currctrl->vp_only_enable)
++ reg |= ISPCSI2_CTRL_VP_ONLY_EN_ENABLE;
++ else
++ reg |= ISPCSI2_CTRL_VP_ONLY_EN_DISABLE;
++
++ reg &= ~ISPCSI2_CTRL_VP_OUT_CTRL_MASK;
++ reg |= currctrl->vp_out_ctrl << ISPCSI2_CTRL_VP_OUT_CTRL_SHIFT;
++
++ reg &= ~ISPCSI2_CTRL_DBG_EN_MASK;
++ if (currctrl->debug_enable)
++ reg |= ISPCSI2_CTRL_DBG_EN_ENABLE;
++ else
++ reg |= ISPCSI2_CTRL_DBG_EN_DISABLE;
++
++ reg &= ~ISPCSI2_CTRL_ECC_EN_MASK;
++ if (currctrl->ecc_enable)
++ reg |= ISPCSI2_CTRL_ECC_EN_ENABLE;
++ else
++ reg |= ISPCSI2_CTRL_ECC_EN_DISABLE;
++
++ isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_CTRL);
++}
++
++const static unsigned int csi2_input_fmts[] = {
++ V4L2_MBUS_FMT_SGRBG10_1X10,
++ V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8,
++ V4L2_MBUS_FMT_SRGGB10_1X10,
++ V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8,
++ V4L2_MBUS_FMT_SBGGR10_1X10,
++ V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8,
++ V4L2_MBUS_FMT_SGBRG10_1X10,
++ V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8,
++};
++
++/* To set the format on the CSI2 requires a mapping function that takes
++ * the following inputs:
++ * - 2 different formats (at this time)
++ * - 2 destinations (mem, vp+mem) (vp only handled separately)
++ * - 2 decompression options (on, off)
++ * - 2 isp revisions (certain format must be handled differently on OMAP3630)
++ * Output should be CSI2 frame format code
++ * Array indices as follows: [format][dest][decompr][is_3630]
++ * Not all combinations are valid. 0 means invalid.
++ */
++static const u16 __csi2_fmt_map[2][2][2][2] = {
++ /* RAW10 formats */
++ {
++ /* Output to memory */
++ {
++ /* No DPCM decompression */
++ { CSI2_PIX_FMT_RAW10_EXP16, CSI2_PIX_FMT_RAW10_EXP16 },
++ /* DPCM decompression */
++ { 0, 0 },
++ },
++ /* Output to both */
++ {
++ /* No DPCM decompression */
++ { CSI2_PIX_FMT_RAW10_EXP16_VP,
++ CSI2_PIX_FMT_RAW10_EXP16_VP },
++ /* DPCM decompression */
++ { 0, 0 },
++ },
++ },
++ /* RAW10 DPCM8 formats */
++ {
++ /* Output to memory */
++ {
++ /* No DPCM decompression */
++ { CSI2_PIX_FMT_RAW8, CSI2_USERDEF_8BIT_DATA1 },
++ /* DPCM decompression */
++ { CSI2_PIX_FMT_RAW8_DPCM10_EXP16,
++ CSI2_USERDEF_8BIT_DATA1_DPCM10 },
++ },
++ /* Output to both */
++ {
++ /* No DPCM decompression */
++ { CSI2_PIX_FMT_RAW8_VP,
++ CSI2_PIX_FMT_RAW8_VP },
++ /* DPCM decompression */
++ { CSI2_PIX_FMT_RAW8_DPCM10_VP,
++ CSI2_USERDEF_8BIT_DATA1_DPCM10_VP },
++ },
++ },
++};
++
++/*
++ * isp_csi2_ctx_map_format - Maps v4l2 pixel format to the format ids
++ * used by CSI2.
++ * @fmt: Format to map
++ * Outputs:
++ * @format_id: CSI2 physical format id
++ */
++static int isp_csi2_ctx_map_format(struct isp_device *isp,
++ struct isp_csi2_device *csi2,
++ struct v4l2_mbus_framefmt *fmt,
++ u16 *format_id)
++{
++ int fmtidx, destidx, is_3630;
++ u16 tmp;
++
++ switch (fmt->code) {
++ case V4L2_MBUS_FMT_SGRBG10_1X10:
++ case V4L2_MBUS_FMT_SRGGB10_1X10:
++ case V4L2_MBUS_FMT_SBGGR10_1X10:
++ case V4L2_MBUS_FMT_SGBRG10_1X10:
++ fmtidx = 0;
++ break;
++ case V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8:
++ case V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8:
++ case V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8:
++ case V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8:
++ fmtidx = 1;
++ break;
++ default:
++ printk(KERN_ERR "CSI2: pixel format %08x unsupported!\n",
++ fmt->code);
++ return -EINVAL;
++ }
++
++ if (!(csi2->output & CSI2_OUTPUT_CCDC) &&
++ !(csi2->output & CSI2_OUTPUT_MEMORY)) {
++ *format_id = CSI2_PIX_FMT_OTHERS;
++ return 0; /* Neither output enabled is a valid combination */
++ }
++ destidx = !!(csi2->output & CSI2_OUTPUT_CCDC);
++ is_3630 = isp->revision == ISP_REVISION_15_0;
++
++ tmp = __csi2_fmt_map[fmtidx][destidx][csi2->dpcm_decompress][is_3630];
++ if (!tmp)
++ return -EINVAL;
++
++ *format_id = tmp;
++ return 0;
++}
++
++/*
++ * csi2_set_outaddr - Set memory address to save output image
++ * @csi2: Pointer to ISP CSI2a device.
++ * @addr: ISP MMU Mapped 32-bit memory address aligned on 32 byte boundary.
++ *
++ * Sets the memory address where the output will be saved.
++ *
++ * Returns 0 if successful, or -EINVAL if the address is not in the 32 byte
++ * boundary.
++ */
++static void csi2_set_outaddr(struct isp_csi2_device *csi2, u32 addr)
++{
++ struct isp_device *isp = csi2->isp;
++ struct isp_csi2_ctx_cfg *ctx = &csi2->contexts[0];
++
++ ctx->ping_addr = ctx->pong_addr = addr;
++ isp_reg_writel(isp, ctx->ping_addr,
++ csi2->regs1, ISPCSI2_CTX_DAT_PING_ADDR(ctx->ctxnum));
++ isp_reg_writel(isp, ctx->pong_addr,
++ csi2->regs1, ISPCSI2_CTX_DAT_PONG_ADDR(ctx->ctxnum));
++}
++
++/*
++ * is_usr_def_mapping - Checks whether USER_DEF_MAPPING should
++ * be enabled by CSI2.
++ * @format_id: mapped format id
++ *
++ */
++static inline int is_usr_def_mapping(u32 format_id)
++{
++ return (format_id & 0x40) ? 1 : 0;
++}
++
++/*
++ * isp_csi2_ctx_enable - Enable specified CSI2 context
++ * @ctxnum: Context number, valid between 0 and 7 values.
++ * @enable: enable
++ *
++ */
++static void isp_csi2_ctx_enable(struct isp_device *isp,
++ struct isp_csi2_device *csi2,
++ u8 ctxnum, u8 enable)
++{
++ struct isp_csi2_ctx_cfg *ctx = &csi2->contexts[ctxnum];
++
++ isp_reg_and_or(isp, csi2->regs1, ISPCSI2_CTX_CTRL1(ctxnum),
++ ~ISPCSI2_CTX_CTRL1_CTX_EN_MASK,
++ enable ? ISPCSI2_CTX_CTRL1_CTX_EN_ENABLE :
++ ISPCSI2_CTX_CTRL1_CTX_EN_DISABLE);
++
++ ctx->enabled = enable;
++}
++
++/*
++ * isp_csi2_ctx_config - CSI2 context configuration.
++ * @ctx: context configuration
++ *
++ */
++static void isp_csi2_ctx_config(struct isp_device *isp,
++ struct isp_csi2_device *csi2,
++ struct isp_csi2_ctx_cfg *ctx)
++{
++ u32 reg;
++
++ /* Set up CSI2_CTx_CTRL1 */
++ reg = isp_reg_readl(isp, csi2->regs1, ISPCSI2_CTX_CTRL1(ctx->ctxnum));
++
++ reg &= ~(ISPCSI2_CTX_CTRL1_COUNT_MASK);
++ reg |= ctx->frame_count << ISPCSI2_CTX_CTRL1_COUNT_SHIFT;
++
++ reg &= ~(ISPCSI2_CTX_CTRL1_EOF_EN_MASK);
++ if (ctx->eof_enabled)
++ reg |= ISPCSI2_CTX_CTRL1_EOF_EN_ENABLE;
++ else
++ reg |= ISPCSI2_CTX_CTRL1_EOF_EN_DISABLE;
++
++ reg &= ~(ISPCSI2_CTX_CTRL1_EOL_EN_MASK);
++ if (ctx->eol_enabled)
++ reg |= ISPCSI2_CTX_CTRL1_EOL_EN_ENABLE;
++ else
++ reg |= ISPCSI2_CTX_CTRL1_EOL_EN_DISABLE;
++
++ reg &= ~(ISPCSI2_CTX_CTRL1_CS_EN_MASK);
++ if (ctx->checksum_enabled)
++ reg |= ISPCSI2_CTX_CTRL1_CS_EN_ENABLE;
++ else
++ reg |= ISPCSI2_CTX_CTRL1_CS_EN_DISABLE;
++
++ isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_CTX_CTRL1(ctx->ctxnum));
++
++ /* Set up CSI2_CTx_CTRL2 */
++ reg = isp_reg_readl(isp, csi2->regs1, ISPCSI2_CTX_CTRL2(ctx->ctxnum));
++
++ reg &= ~(ISPCSI2_CTX_CTRL2_VIRTUAL_ID_MASK);
++ reg |= ctx->virtual_id << ISPCSI2_CTX_CTRL2_VIRTUAL_ID_SHIFT;
++
++ reg &= ~(ISPCSI2_CTX_CTRL2_FORMAT_MASK);
++ reg |= ctx->format_id << ISPCSI2_CTX_CTRL2_FORMAT_SHIFT;
++
++ if (ctx->dpcm_decompress) {
++ reg &= ~ISPCSI2_CTX_CTRL2_DPCM_PRED_MASK;
++ reg |= ctx->dpcm_predictor <<
++ ISPCSI2_CTX_CTRL2_DPCM_PRED_SHIFT;
++ }
++
++ if (is_usr_def_mapping(ctx->format_id)) {
++ reg &= ~ISPCSI2_CTX_CTRL2_USER_DEF_MAP_MASK;
++ reg |= 2 << ISPCSI2_CTX_CTRL2_USER_DEF_MAP_SHIFT;
++ }
++
++ isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_CTX_CTRL2(ctx->ctxnum));
++
++ /* Set up CSI2_CTx_CTRL3 */
++ reg = isp_reg_readl(isp, csi2->regs1, ISPCSI2_CTX_CTRL3(ctx->ctxnum));
++ reg &= ~(ISPCSI2_CTX_CTRL3_ALPHA_MASK);
++ reg |= (ctx->alpha << ISPCSI2_CTX_CTRL3_ALPHA_SHIFT);
++
++ isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_CTX_CTRL3(ctx->ctxnum));
++
++ /* Set up CSI2_CTx_DAT_OFST */
++ reg = isp_reg_readl(isp, csi2->regs1,
++ ISPCSI2_CTX_DAT_OFST(ctx->ctxnum));
++ reg &= ~ISPCSI2_CTX_DAT_OFST_OFST_MASK;
++ reg |= ctx->data_offset << ISPCSI2_CTX_DAT_OFST_OFST_SHIFT;
++ isp_reg_writel(isp, reg, csi2->regs1,
++ ISPCSI2_CTX_DAT_OFST(ctx->ctxnum));
++
++ isp_reg_writel(isp, ctx->ping_addr,
++ csi2->regs1, ISPCSI2_CTX_DAT_PING_ADDR(ctx->ctxnum));
++
++ isp_reg_writel(isp, ctx->pong_addr,
++ csi2->regs1, ISPCSI2_CTX_DAT_PONG_ADDR(ctx->ctxnum));
++}
++
++/*
++ * isp_csi2_timing_config - CSI2 timing configuration.
++ * @timing: isp_csi2_timing_cfg structure
++ */
++static void isp_csi2_timing_config(struct isp_device *isp,
++ struct isp_csi2_device *csi2,
++ struct isp_csi2_timing_cfg *timing)
++{
++ u32 reg;
++
++ reg = isp_reg_readl(isp, csi2->regs1, ISPCSI2_TIMING);
++
++ reg &= ~ISPCSI2_TIMING_FORCE_RX_MODE_IO_MASK(timing->ionum);
++ if (timing->force_rx_mode)
++ reg |= ISPCSI2_TIMING_FORCE_RX_MODE_IO_ENABLE(timing->ionum);
++ else
++ reg |= ISPCSI2_TIMING_FORCE_RX_MODE_IO_DISABLE(timing->ionum);
++
++ reg &= ~ISPCSI2_TIMING_STOP_STATE_X16_IO_MASK(timing->ionum);
++ if (timing->stop_state_16x)
++ reg |= ISPCSI2_TIMING_STOP_STATE_X16_IO_ENABLE(timing->ionum);
++ else
++ reg |= ISPCSI2_TIMING_STOP_STATE_X16_IO_DISABLE(timing->ionum);
++
++ reg &= ~ISPCSI2_TIMING_STOP_STATE_X4_IO_MASK(timing->ionum);
++ if (timing->stop_state_4x)
++ reg |= ISPCSI2_TIMING_STOP_STATE_X4_IO_ENABLE(timing->ionum);
++ else
++ reg |= ISPCSI2_TIMING_STOP_STATE_X4_IO_DISABLE(timing->ionum);
++
++ reg &= ~ISPCSI2_TIMING_STOP_STATE_COUNTER_IO_MASK(timing->ionum);
++ reg |= timing->stop_state_counter <<
++ ISPCSI2_TIMING_STOP_STATE_COUNTER_IO_SHIFT(timing->ionum);
++
++ isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_TIMING);
++}
++
++/*
++ * isp_csi2_irq_ctx_set - Enables CSI2 Context IRQs.
++ * @enable: Enable/disable CSI2 Context interrupts
++ */
++static void isp_csi2_irq_ctx_set(struct isp_device *isp,
++ struct isp_csi2_device *csi2, int enable)
++{
++ u32 reg;
++ int i;
++
++ reg = ISPCSI2_CTX_IRQSTATUS_FE_IRQ;
++ for (i = 0; i < 8; i++) {
++ isp_reg_writel(isp, reg, csi2->regs1,
++ ISPCSI2_CTX_IRQSTATUS(i));
++ if (enable)
++ isp_reg_or(isp, csi2->regs1,
++ ISPCSI2_CTX_IRQENABLE(i), reg);
++ else
++ isp_reg_and(isp, csi2->regs1,
++ ISPCSI2_CTX_IRQENABLE(i), ~reg);
++ }
++}
++
++/*
++ * isp_csi2_irq_complexio1_set - Enables CSI2 ComplexIO IRQs.
++ * @enable: Enable/disable CSI2 ComplexIO #1 interrupts
++ */
++static void isp_csi2_irq_complexio1_set(struct isp_device *isp,
++ struct isp_csi2_device *csi2,
++ int enable)
++{
++ u32 reg;
++ reg = ISPCSI2_PHY_IRQENABLE_STATEALLULPMEXIT |
++ ISPCSI2_PHY_IRQENABLE_STATEALLULPMENTER |
++ ISPCSI2_PHY_IRQENABLE_STATEULPM5 |
++ ISPCSI2_PHY_IRQENABLE_ERRCONTROL5 |
++ ISPCSI2_PHY_IRQENABLE_ERRESC5 |
++ ISPCSI2_PHY_IRQENABLE_ERRSOTSYNCHS5 |
++ ISPCSI2_PHY_IRQENABLE_ERRSOTHS5 |
++ ISPCSI2_PHY_IRQENABLE_STATEULPM4 |
++ ISPCSI2_PHY_IRQENABLE_ERRCONTROL4 |
++ ISPCSI2_PHY_IRQENABLE_ERRESC4 |
++ ISPCSI2_PHY_IRQENABLE_ERRSOTSYNCHS4 |
++ ISPCSI2_PHY_IRQENABLE_ERRSOTHS4 |
++ ISPCSI2_PHY_IRQENABLE_STATEULPM3 |
++ ISPCSI2_PHY_IRQENABLE_ERRCONTROL3 |
++ ISPCSI2_PHY_IRQENABLE_ERRESC3 |
++ ISPCSI2_PHY_IRQENABLE_ERRSOTSYNCHS3 |
++ ISPCSI2_PHY_IRQENABLE_ERRSOTHS3 |
++ ISPCSI2_PHY_IRQENABLE_STATEULPM2 |
++ ISPCSI2_PHY_IRQENABLE_ERRCONTROL2 |
++ ISPCSI2_PHY_IRQENABLE_ERRESC2 |
++ ISPCSI2_PHY_IRQENABLE_ERRSOTSYNCHS2 |
++ ISPCSI2_PHY_IRQENABLE_ERRSOTHS2 |
++ ISPCSI2_PHY_IRQENABLE_STATEULPM1 |
++ ISPCSI2_PHY_IRQENABLE_ERRCONTROL1 |
++ ISPCSI2_PHY_IRQENABLE_ERRESC1 |
++ ISPCSI2_PHY_IRQENABLE_ERRSOTSYNCHS1 |
++ ISPCSI2_PHY_IRQENABLE_ERRSOTHS1;
++ isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_PHY_IRQSTATUS);
++ if (enable)
++ reg |= isp_reg_readl(isp, csi2->regs1, ISPCSI2_PHY_IRQENABLE);
++ else
++ reg = 0;
++ isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_PHY_IRQENABLE);
++}
++
++/*
++ * isp_csi2_irq_status_set - Enables CSI2 Status IRQs.
++ * @enable: Enable/disable CSI2 Status interrupts
++ */
++static void isp_csi2_irq_status_set(struct isp_device *isp,
++ struct isp_csi2_device *csi2,
++ int enable)
++{
++ u32 reg;
++ reg = ISPCSI2_IRQSTATUS_OCP_ERR_IRQ |
++ ISPCSI2_IRQSTATUS_SHORT_PACKET_IRQ |
++ ISPCSI2_IRQSTATUS_ECC_CORRECTION_IRQ |
++ ISPCSI2_IRQSTATUS_ECC_NO_CORRECTION_IRQ |
++ ISPCSI2_IRQSTATUS_COMPLEXIO2_ERR_IRQ |
++ ISPCSI2_IRQSTATUS_COMPLEXIO1_ERR_IRQ |
++ ISPCSI2_IRQSTATUS_FIFO_OVF_IRQ |
++ ISPCSI2_IRQSTATUS_CONTEXT(0);
++ isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_IRQSTATUS);
++ if (enable)
++ reg |= isp_reg_readl(isp, csi2->regs1, ISPCSI2_IRQENABLE);
++ else
++ reg = 0;
++
++ isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_IRQENABLE);
++}
++
++/*
++ * isp_csi2_reset - Resets the CSI2 module.
++ *
++ * Must be called with the phy lock held.
++ *
++ * Returns 0 if successful, or -EBUSY if power command didn't respond.
++ */
++int isp_csi2_reset(struct isp_csi2_device *csi2)
++{
++ struct isp_device *isp = csi2->isp;
++ u8 soft_reset_retries = 0;
++ u32 reg;
++ int i;
++
++ if (!csi2->available)
++ return -ENODEV;
++
++ if (csi2->phy->phy_in_use)
++ return -EBUSY;
++
++ reg = isp_reg_readl(isp, csi2->regs1, ISPCSI2_SYSCONFIG);
++ reg |= ISPCSI2_SYSCONFIG_SOFT_RESET_RESET;
++ isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_SYSCONFIG);
++
++ do {
++ reg = isp_reg_readl(isp, csi2->regs1, ISPCSI2_SYSSTATUS) &
++ ISPCSI2_SYSSTATUS_RESET_DONE_MASK;
++ if (reg == ISPCSI2_SYSSTATUS_RESET_DONE_DONE)
++ break;
++ soft_reset_retries++;
++ if (soft_reset_retries < 5)
++ udelay(100);
++ } while (soft_reset_retries < 5);
++
++ if (soft_reset_retries == 5) {
++ printk(KERN_ERR "CSI2: Soft reset try count exceeded!\n");
++ return -EBUSY;
++ }
++
++ if (isp->revision == ISP_REVISION_15_0)
++ isp_reg_and_or(isp, csi2->regs1, ISPCSI2_PHY_CFG,
++ ~ISPCSI2_PHY_CFG_RESET_CTRL_MASK,
++ ISPCSI2_PHY_CFG_RESET_CTRL_DEASSERT);
++
++ i = 100;
++ do {
++ reg = isp_reg_readl(isp, csi2->phy->phy_regs, ISPCSIPHY_REG1)
++ & ISPCSIPHY_REG1_RESET_DONE_CTRLCLK_MASK;
++ if (reg == ISPCSIPHY_REG1_RESET_DONE_CTRLCLK_DONE)
++ break;
++ udelay(100);
++ } while (--i > 0);
++
++ if (i == 0) {
++ printk(KERN_ERR
++ "CSI2: Reset for CSI2_96M_FCLK domain Failed!\n");
++ return -EBUSY;
++ }
++
++ reg = isp_reg_readl(isp, csi2->regs1, ISPCSI2_SYSCONFIG);
++ reg &= ~ISPCSI2_SYSCONFIG_MSTANDBY_MODE_MASK;
++ reg |= ISPCSI2_SYSCONFIG_MSTANDBY_MODE_NO;
++ reg &= ~ISPCSI2_SYSCONFIG_AUTO_IDLE_MASK;
++ isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_SYSCONFIG);
++
++ return 0;
++}
++
++static int isp_csi2_configure(struct isp_csi2_device *csi2)
++{
++ const struct isp_v4l2_subdevs_group *pdata;
++ struct isp_device *isp = csi2->isp;
++ struct isp_csi2_timing_cfg *timing = &csi2->timing[0];
++ struct media_entity_pad *pad;
++ struct v4l2_subdev *sensor;
++ struct v4l2_pix_format pix;
++
++ /*
++ * CSI2 fields that can be updated while the context has
++ * been enabled or the interface has been enabled are not
++ * updated dynamically currently. So we do not allow to
++ * reconfigure if either has been enabled
++ */
++ if (csi2->contexts[0].enabled || csi2->ctrl.if_enable)
++ return -EBUSY;
++
++ pad = media_entity_remote_pad(&csi2->pads[CSI2_PAD_SINK]);
++ sensor = media_entity_to_v4l2_subdev(pad->entity);
++ pdata = sensor->host_priv;
++
++ csi2->ctrl.vp_out_ctrl = pdata->bus.csi2.vpclk_div;
++ csi2->ctrl.debug_enable = 0;
++ csi2->ctrl.frame_mode = ISP_CSI2_FRAME_IMMEDIATE;
++ csi2->ctrl.ecc_enable = pdata->bus.csi2.crc;
++
++ timing->ionum = 1;
++ timing->force_rx_mode = 1;
++ timing->stop_state_16x = 1;
++ timing->stop_state_4x = 1;
++ timing->stop_state_counter = 0x1FF;
++
++ /*
++ * The CSI2 receiver can't do any format conversion except DPCM
++ * decompression, so every set_format call configures both pads
++ * and enables DPCM decompression as a special case:
++ */
++ if (csi2->formats[CSI2_PAD_SINK].code !=
++ isp_video_uncompressed_code(csi2->formats[CSI2_PAD_SINK].code))
++ csi2->dpcm_decompress = true;
++ else
++ csi2->dpcm_decompress = false;
++
++ isp_csi2_ctx_map_format(isp, csi2, &csi2->formats[CSI2_PAD_SINK],
++ &csi2->contexts[0].format_id);
++
++ /*
++ * The width and height aren't actually written to any CSI2 registers.
++ * Memory writes are controlled solely by frame/line start/end codes
++ * coming from the sensor and the CSI2_CTx_DAT_OFST register.
++ * The contents of that register do not matter if not using
++ * memory output.
++ */
++ if (csi2->output & CSI2_OUTPUT_MEMORY &&
++ csi2->dpcm_decompress == true) {
++ isp_video_mbus_to_pix(&csi2->video_out,
++ &csi2->formats[CSI2_PAD_SOURCE], &pix);
++ csi2->contexts[0].data_offset = pix.bytesperline;
++ } else {
++ csi2->contexts[0].data_offset = 0;
++ }
++
++ /* Enable end of frame signal generation for context 0 */
++ csi2->contexts[0].eof_enabled = 1;
++
++ isp_csi2_irq_complexio1_set(isp, csi2, 1);
++ isp_csi2_irq_ctx_set(isp, csi2, 1);
++ isp_csi2_irq_status_set(isp, csi2, 1);
++
++ /* Set configuration (timings, format and links) */
++ isp_csi2_timing_config(isp, csi2, timing);
++ isp_csi2_recv_config(isp, csi2, &csi2->ctrl);
++ isp_csi2_ctx_config(isp, csi2, &csi2->contexts[0]);
++
++ return 0;
++}
++
++/*
++ * isp_csi2_regdump - Prints CSI2 debug information.
++ */
++#define CSI2_PRINT_REGISTER(isp, regs, name)\
++ dev_dbg(isp->dev, "###CSI2 " #name "=0x%08x\n", \
++ isp_reg_readl(isp, regs, ISPCSI2_##name))
++
++void isp_csi2_regdump(struct isp_csi2_device *csi2)
++{
++ struct isp_device *isp = csi2->isp;
++
++ if (!csi2->available)
++ return;
++
++ dev_dbg(isp->dev, "-------------CSI2 Register dump-------------\n");
++
++ dev_dbg(isp->dev, "###ISP_CTRL=0x%x\n",
++ isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL));
++ dev_dbg(isp->dev, "###ISP_IRQ0ENABLE=0x%x\n",
++ isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0ENABLE));
++ dev_dbg(isp->dev, "###ISP_IRQ0STATUS=0x%x\n",
++ isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS));
++
++ CSI2_PRINT_REGISTER(isp, csi2->regs1, SYSCONFIG);
++ CSI2_PRINT_REGISTER(isp, csi2->regs1, SYSSTATUS);
++ CSI2_PRINT_REGISTER(isp, csi2->regs1, IRQENABLE);
++ CSI2_PRINT_REGISTER(isp, csi2->regs1, IRQSTATUS);
++ CSI2_PRINT_REGISTER(isp, csi2->regs1, CTRL);
++ CSI2_PRINT_REGISTER(isp, csi2->regs1, DBG_H);
++ CSI2_PRINT_REGISTER(isp, csi2->regs1, GNQ);
++ CSI2_PRINT_REGISTER(isp, csi2->regs1, PHY_CFG);
++ CSI2_PRINT_REGISTER(isp, csi2->regs1, PHY_IRQSTATUS);
++ CSI2_PRINT_REGISTER(isp, csi2->regs1, SHORT_PACKET);
++ CSI2_PRINT_REGISTER(isp, csi2->regs1, PHY_IRQENABLE);
++ CSI2_PRINT_REGISTER(isp, csi2->regs1, DBG_P);
++ CSI2_PRINT_REGISTER(isp, csi2->regs1, TIMING);
++ CSI2_PRINT_REGISTER(isp, csi2->regs1, CTX_CTRL1(0));
++ CSI2_PRINT_REGISTER(isp, csi2->regs1, CTX_CTRL2(0));
++ CSI2_PRINT_REGISTER(isp, csi2->regs1, CTX_DAT_OFST(0));
++ CSI2_PRINT_REGISTER(isp, csi2->regs1, CTX_DAT_PING_ADDR(0));
++ CSI2_PRINT_REGISTER(isp, csi2->regs1, CTX_DAT_PONG_ADDR(0));
++ CSI2_PRINT_REGISTER(isp, csi2->regs1, CTX_IRQENABLE(0));
++ CSI2_PRINT_REGISTER(isp, csi2->regs1, CTX_IRQSTATUS(0));
++ CSI2_PRINT_REGISTER(isp, csi2->regs1, CTX_CTRL3(0));
++
++ dev_dbg(isp->dev, "--------------------------------------------\n");
++}
++
++/* -----------------------------------------------------------------------------
++ * Interrupt handling
++ */
++
++/*
++ * isp_csi2_isr_buffer - Does buffer handling at end-of-frame
++ * when writing to memory.
++ */
++static void isp_csi2_isr_buffer(struct isp_csi2_device *csi2)
++{
++ struct isp_device *isp = csi2->isp;
++ struct isp_buffer *buffer;
++
++ isp_csi2_ctx_enable(isp, csi2, 0, 0);
++
++ buffer = isp_video_buffer_next(&csi2->video_out, 0);
++ if (buffer == NULL) {
++ csi2->buffers_ready = false;
++ csi2->underrun = true;
++ return;
++ }
++ csi2_set_outaddr(csi2, buffer->isp_addr);
++ isp_csi2_ctx_enable(isp, csi2, 0, 1);
++}
++
++/*
++ * isp_csi2_isr - CSI2 interrupt handling.
++ *
++ * Return -EIO on Transmission error
++ */
++int isp_csi2_isr(struct isp_csi2_device *csi2)
++{
++ u32 csi2_irqstatus, cpxio1_irqstatus, ctxirqstatus;
++ struct isp_device *isp = csi2->isp;
++ int retval = 0;
++
++ if (!csi2->available)
++ return -ENODEV;
++
++ csi2_irqstatus = isp_reg_readl(isp, csi2->regs1, ISPCSI2_IRQSTATUS);
++ isp_reg_writel(isp, csi2_irqstatus, csi2->regs1, ISPCSI2_IRQSTATUS);
++
++ /* Failure Cases */
++ if (csi2_irqstatus & ISPCSI2_IRQSTATUS_COMPLEXIO1_ERR_IRQ) {
++ cpxio1_irqstatus = isp_reg_readl(isp, csi2->regs1,
++ ISPCSI2_PHY_IRQSTATUS);
++ isp_reg_writel(isp, cpxio1_irqstatus,
++ csi2->regs1, ISPCSI2_PHY_IRQSTATUS);
++ dev_dbg(isp->dev, "CSI2: ComplexIO Error IRQ "
++ "%x\n", cpxio1_irqstatus);
++ retval = -EIO;
++ }
++
++ if (csi2_irqstatus & (ISPCSI2_IRQSTATUS_OCP_ERR_IRQ |
++ ISPCSI2_IRQSTATUS_SHORT_PACKET_IRQ |
++ ISPCSI2_IRQSTATUS_ECC_NO_CORRECTION_IRQ |
++ ISPCSI2_IRQSTATUS_COMPLEXIO2_ERR_IRQ |
++ ISPCSI2_IRQSTATUS_FIFO_OVF_IRQ)) {
++ dev_dbg(isp->dev, "CSI2 Err:"
++ " OCP:%d,"
++ " Short_pack:%d,"
++ " ECC:%d,"
++ " CPXIO2:%d,"
++ " FIFO_OVF:%d,"
++ "\n",
++ (csi2_irqstatus &
++ ISPCSI2_IRQSTATUS_OCP_ERR_IRQ) ? 1 : 0,
++ (csi2_irqstatus &
++ ISPCSI2_IRQSTATUS_SHORT_PACKET_IRQ) ? 1 : 0,
++ (csi2_irqstatus &
++ ISPCSI2_IRQSTATUS_ECC_NO_CORRECTION_IRQ) ? 1 : 0,
++ (csi2_irqstatus &
++ ISPCSI2_IRQSTATUS_COMPLEXIO2_ERR_IRQ) ? 1 : 0,
++ (csi2_irqstatus &
++ ISPCSI2_IRQSTATUS_FIFO_OVF_IRQ) ? 1 : 0);
++ retval = -EIO;
++ }
++
++ /* Successful cases */
++ if (csi2_irqstatus & ISPCSI2_IRQSTATUS_CONTEXT(0)) {
++ ctxirqstatus = isp_reg_readl(isp, csi2->regs1,
++ ISPCSI2_CTX_IRQSTATUS(0));
++ isp_reg_writel(isp, ctxirqstatus, csi2->regs1,
++ ISPCSI2_CTX_IRQSTATUS(0));
++ if ((ctxirqstatus & ISPCSI2_CTX_IRQSTATUS_FE_IRQ) &&
++ (csi2->output & CSI2_OUTPUT_MEMORY))
++ isp_csi2_isr_buffer(csi2);
++ }
++
++ if (csi2_irqstatus & ISPCSI2_IRQSTATUS_ECC_CORRECTION_IRQ)
++ dev_dbg(isp->dev, "CSI2: ECC correction done\n");
++
++ return retval;
++}
++
++/* -----------------------------------------------------------------------------
++ * ISP video operations
++ */
++
++/*
++ * csi2_queue - Queues the first buffer when using memory output
++ * @video: The video node
++ * @buffer: buffer to queue
++ */
++static int csi2_queue(struct isp_video *video, struct isp_buffer *buffer)
++{
++ struct isp_device *isp = video->isp;
++ struct isp_csi2_device *csi2 = &isp->isp_csi2a;
++
++ csi2_set_outaddr(csi2, buffer->isp_addr);
++
++ /*
++ * If streaming was enabled before there was a buffer queued,
++ * or underrun happened in the ISR, the hardware was not enabled.
++ * Enable it now.
++ */
++ if (csi2->underrun) {
++ csi2->underrun = false;
++ /* Enable / disable context 0 and IRQs */
++ isp_csi2_if_enable(isp, csi2, 1);
++ isp_csi2_ctx_enable(isp, csi2, 0, 1);
++ }
++
++ csi2->buffers_ready = true;
++ return 0;
++}
++
++static const struct isp_video_operations csi2_ispvideo_ops = {
++ .queue = csi2_queue,
++};
++
++/* -----------------------------------------------------------------------------
++ * V4L2 subdev operations
++ */
++
++static struct v4l2_mbus_framefmt *
++__csi2_get_format(struct isp_csi2_device *csi2, struct v4l2_subdev_fh *fh,
++ unsigned int pad, enum v4l2_subdev_format which)
++{
++ if (which == V4L2_SUBDEV_FORMAT_PROBE)
++ return v4l2_subdev_get_probe_format(fh, pad);
++ else
++ return &csi2->formats[pad];
++}
++
++static void
++csi2_try_format(struct isp_csi2_device *csi2, struct v4l2_subdev_fh *fh,
++ unsigned int pad, struct v4l2_mbus_framefmt *fmt,
++ enum v4l2_subdev_format which)
++{
++ enum v4l2_mbus_pixelcode pixelcode;
++ struct v4l2_mbus_framefmt *format;
++ unsigned int i;
++
++ switch (pad) {
++ case CSI2_PAD_SINK:
++ /* Clamp the width and height to valid ranges (1-4095 and 1-8191
++ * inclusive respectively).
++ */
++ for (i = 0; i < ARRAY_SIZE(csi2_input_fmts); i++) {
++ if (fmt->code == csi2_input_fmts[i])
++ break;
++ }
++
++ /* If not found, use SGRBG10 as default */
++ if (i >= ARRAY_SIZE(csi2_input_fmts))
++ fmt->code = V4L2_MBUS_FMT_SGRBG10_1X10;
++
++ fmt->width = clamp_t(u32, fmt->width, 1, 4095);
++ fmt->height = clamp_t(u32, fmt->height, 1, 8191);
++ break;
++
++ case CSI2_PAD_SOURCE:
++ /* Source format same as sink format, except for DPCM
++ * compression.
++ */
++ pixelcode = fmt->code;
++ format = __csi2_get_format(csi2, fh, CSI2_PAD_SINK, which);
++ memcpy(fmt, format, sizeof(*fmt));
++
++ /*
++ * Only Allow DPCM decompression, and check that the
++ * pattern is preserved
++ */
++ if (isp_video_uncompressed_code(fmt->code) == pixelcode)
++ fmt->code = pixelcode;
++ break;
++ }
++
++ /* RGB, non-interlaced */
++ fmt->colorspace = V4L2_COLORSPACE_SRGB;
++ fmt->field = V4L2_FIELD_NONE;
++}
++
++/*
++ * csi2_enum_mbus_code - Handle pixel format enumeration
++ * @sd : pointer to v4l2 subdev structure
++ * @fh : V4L2 subdev file handle
++ * @code : pointer to v4l2_subdev_pad_mbus_code_enum structure
++ * return -EINVAL or zero on success
++ */
++static int csi2_enum_mbus_code(struct v4l2_subdev *sd,
++ struct v4l2_subdev_fh *fh,
++ struct v4l2_subdev_pad_mbus_code_enum *code)
++{
++ struct isp_csi2_device *csi2 = v4l2_get_subdevdata(sd);
++ struct v4l2_mbus_framefmt *format;
++
++ if (code->pad == CSI2_PAD_SINK) {
++ if (code->index >= ARRAY_SIZE(csi2_input_fmts))
++ return -EINVAL;
++
++ code->code = csi2_input_fmts[code->index];
++ } else {
++ format = __csi2_get_format(csi2, fh, CSI2_PAD_SINK,
++ V4L2_SUBDEV_FORMAT_PROBE);
++ switch (code->index) {
++ case 0:
++ /* Passthrough sink pad code */
++ code->code = format->code;
++ break;
++ case 1:
++ /* Uncompressed code */
++ code->code = isp_video_uncompressed_code(format->code);
++ if (code->code != format->code)
++ break;
++ /* Fallthrough if above is false */
++ default:
++ return -EINVAL;
++ }
++ }
++
++ return 0;
++}
++
++static int csi2_enum_frame_size(struct v4l2_subdev *sd,
++ struct v4l2_subdev_fh *fh,
++ struct v4l2_subdev_frame_size_enum *fse)
++{
++ struct isp_csi2_device *csi2 = v4l2_get_subdevdata(sd);
++ struct v4l2_mbus_framefmt format;
++
++ if (fse->index != 0)
++ return -EINVAL;
++
++ format.code = fse->code;
++ format.width = 1;
++ format.height = 1;
++ csi2_try_format(csi2, fh, fse->pad, &format, V4L2_SUBDEV_FORMAT_PROBE);
++ fse->min_width = format.width;
++ fse->min_height = format.height;
++
++ if (format.code != fse->code)
++ return -EINVAL;
++
++ format.code = fse->code;
++ format.width = -1;
++ format.height = -1;
++ csi2_try_format(csi2, fh, fse->pad, &format, V4L2_SUBDEV_FORMAT_PROBE);
++ fse->max_width = format.width;
++ fse->max_height = format.height;
++
++ return 0;
++}
++
++/*
++ * csi2_get_format - Handle get format by pads subdev method
++ * @sd : pointer to v4l2 subdev structure
++ * @fh : V4L2 subdev file handle
++ * @pad: pad num
++ * @fmt: pointer to v4l2 format structure
++ * return -EINVAL or zero on sucess
++ */
++static int csi2_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
++ unsigned int pad, struct v4l2_mbus_framefmt *fmt,
++ enum v4l2_subdev_format which)
++{
++ struct isp_csi2_device *csi2 = v4l2_get_subdevdata(sd);
++ struct v4l2_mbus_framefmt *format;
++
++ format = __csi2_get_format(csi2, fh, pad, which);
++ if (format == NULL)
++ return -EINVAL;
++
++ memcpy(fmt, format, sizeof(*fmt));
++ return 0;
++}
++
++/*
++ * csi2_set_format - Handle set format by pads subdev method
++ * @sd : pointer to v4l2 subdev structure
++ * @fh : V4L2 subdev file handle
++ * @pad: pad num
++ * @fmt: pointer to v4l2 format structure
++ * return -EINVAL or zero on success
++ */
++static int csi2_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
++ unsigned int pad, struct v4l2_mbus_framefmt *fmt,
++ enum v4l2_subdev_format which)
++{
++ struct isp_csi2_device *csi2 = v4l2_get_subdevdata(sd);
++ struct v4l2_mbus_framefmt *format;
++
++ format = __csi2_get_format(csi2, fh, pad, which);
++ if (format == NULL)
++ return -EINVAL;
++
++ csi2_try_format(csi2, fh, pad, fmt, which);
++ memcpy(format, fmt, sizeof(*format));
++
++ /* Propagate the format from sink to source */
++ if (pad == CSI2_PAD_SINK) {
++ format = __csi2_get_format(csi2, fh, CSI2_PAD_SOURCE, which);
++ memcpy(format, fmt, sizeof(*format));
++ csi2_try_format(csi2, fh, CSI2_PAD_SOURCE, format, which);
++ }
++
++ return 0;
++}
++
++/*
++ * csi2_set_stream - Enable/Disable streaming on the CSI2 module
++ * @sd: ISP CSI2 V4L2 subdevice
++ * @enable: Enable/disable stream (1/0)
++ *
++ * Return 0 on success or a negative error code otherwise.
++ */
++static int csi2_set_stream(struct v4l2_subdev *sd, int enable)
++{
++ struct isp_csi2_device *csi2 = v4l2_get_subdevdata(sd);
++ struct isp_device *isp = csi2->isp;
++ int ret;
++
++ if (enable) {
++ ret = isp_csiphy_acquire(csi2->phy);
++ if (ret < 0)
++ return ret;
++
++ isp_csi2_configure(csi2);
++ } else {
++ csi2->buffers_ready = false;
++ csi2->underrun = false;
++
++ isp_csi2_ctx_enable(isp, csi2, 0, 0);
++ isp_csi2_if_enable(isp, csi2, 0);
++ isp_csi2_irq_ctx_set(isp, csi2, 0);
++ isp_csiphy_release(csi2->phy);
++ return 0;
++ }
++
++ /* When not outputting to memory, or when outputting to memory with at
++ * last one buffer available on the output, start the CSI2 immediately.
++ * Otherwise flag an underrun condition, and let the buffer queue
++ * handler start the hardware.
++ */
++ if (!(csi2->output & CSI2_OUTPUT_MEMORY) || csi2->buffers_ready) {
++ /* Enable context 0 and IRQs */
++ isp_csi2_ctx_enable(isp, csi2, 0, 1);
++ isp_csi2_if_enable(isp, csi2, 1);
++ } else {
++ csi2->underrun = true;
++ }
++
++ return 0;
++}
++
++/* subdev video operations */
++static const struct v4l2_subdev_video_ops csi2_video_ops = {
++ .s_stream = csi2_set_stream,
++};
++
++/* subdev pad operations */
++static const struct v4l2_subdev_pad_ops csi2_pad_ops = {
++ .enum_mbus_code = csi2_enum_mbus_code,
++ .enum_frame_size = csi2_enum_frame_size,
++ .get_fmt = csi2_get_format,
++ .set_fmt = csi2_set_format,
++};
++
++/* subdev operations */
++static const struct v4l2_subdev_ops csi2_ops = {
++ .video = &csi2_video_ops,
++ .pad = &csi2_pad_ops,
++};
++
++/* -----------------------------------------------------------------------------
++ * Media entity operations
++ */
++
++/*
++ * csi2_link_setup - Setup CSI2 connections.
++ * @entity : Pointer to media entity structure
++ * @local : Pointer to local pad array
++ * @remote : Pointer to remote pad array
++ * @flags : Link flags
++ * return -EINVAL or zero on success
++ */
++static int csi2_link_setup(struct media_entity *entity,
++ const struct media_entity_pad *local,
++ const struct media_entity_pad *remote, u32 flags)
++{
++ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
++ struct isp_csi2_device *csi2 = v4l2_get_subdevdata(sd);
++ struct isp_csi2_ctrl_cfg *ctrl = &csi2->ctrl;
++
++ switch (local->index | (remote->entity->type << 16)) {
++ case CSI2_PAD_SOURCE | (MEDIA_ENTITY_TYPE_NODE << 16):
++ if (flags & MEDIA_LINK_FLAG_ACTIVE)
++ csi2->output |= CSI2_OUTPUT_MEMORY;
++ else
++ csi2->output &= ~CSI2_OUTPUT_MEMORY;
++ break;
++
++ case CSI2_PAD_SOURCE | (MEDIA_ENTITY_TYPE_SUBDEV << 16):
++ if (flags & MEDIA_LINK_FLAG_ACTIVE)
++ csi2->output |= CSI2_OUTPUT_CCDC;
++ else
++ csi2->output &= ~CSI2_OUTPUT_CCDC;
++ break;
++
++ default:
++ /* Link from camera to CSI2 is fixed... */
++ return -EINVAL;
++ }
++
++ ctrl->vp_only_enable =
++ (csi2->output & CSI2_OUTPUT_MEMORY) ? false : true;
++ ctrl->vp_clk_enable = !!(csi2->output & CSI2_OUTPUT_CCDC);
++
++ return 0;
++}
++
++/* media operations */
++static const struct media_entity_operations csi2_media_ops = {
++ .link_setup = csi2_link_setup,
++};
++
++/*
++ * ispcsi2_init_entities - Initialize subdev and media entity.
++ * @csi2: Pointer to ispcsi2 structure.
++ * return -ENOMEM or zero on success
++ */
++static int isp_csi2_init_entities(struct isp_csi2_device *csi2)
++{
++ struct v4l2_subdev *sd = &csi2->subdev;
++ struct media_entity_pad *pads = csi2->pads;
++ struct media_entity *me = &sd->entity;
++ int ret;
++
++ v4l2_subdev_init(sd, &csi2_ops);
++ strlcpy(sd->name, "OMAP3 ISP CSI2a", sizeof(sd->name));
++
++ sd->grp_id = 1 << 16; /* group ID for isp subdevs */
++ v4l2_set_subdevdata(sd, csi2);
++ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
++
++ pads[CSI2_PAD_SOURCE].type = MEDIA_PAD_TYPE_OUTPUT;
++ pads[CSI2_PAD_SINK].type = MEDIA_PAD_TYPE_INPUT;
++
++ me->ops = &csi2_media_ops;
++ ret = media_entity_init(me, CSI2_PADS_NUM, pads, 0);
++ if (ret < 0)
++ return ret;
++
++ /*
++ * Set these to some sane value here, otherwise link_setup will
++ * fail if called before set_fmt on the pads. CSI2 is braindamaged.
++ */
++ csi2->formats[CSI2_PAD_SINK].code = V4L2_MBUS_FMT_SGRBG10_1X10;
++ csi2->formats[CSI2_PAD_SOURCE].code = V4L2_MBUS_FMT_SGRBG10_1X10;
++
++ /* Video device node */
++ csi2->video_out.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
++ csi2->video_out.ops = &csi2_ispvideo_ops;
++ csi2->video_out.alignment = 32;
++ csi2->video_out.isp = csi2->isp;
++ csi2->video_out.capture_mem = PAGE_ALIGN(4096 * 4096) * 3;
++
++ ret = isp_video_init(&csi2->video_out, "CSI2a");
++ if (ret < 0)
++ return ret;
++
++ /* Connect the CSI2 subdev to the video node. */
++ ret = media_entity_create_link(&csi2->subdev.entity, CSI2_PAD_SOURCE,
++ &csi2->video_out.video.entity, 0, 0);
++ if (ret < 0)
++ return ret;
++
++ return 0;
++}
++
++void isp_csi2_unregister_entities(struct isp_csi2_device *csi2)
++{
++ media_entity_cleanup(&csi2->subdev.entity);
++
++ v4l2_device_unregister_subdev(&csi2->subdev);
++ isp_video_unregister(&csi2->video_out);
++}
++
++int isp_csi2_register_entities(struct isp_csi2_device *csi2,
++ struct v4l2_device *vdev)
++{
++ int ret;
++
++ /* Register the subdev and video nodes. */
++ ret = v4l2_device_register_subdev(vdev, &csi2->subdev);
++ if (ret < 0)
++ goto error;
++
++ ret = isp_video_register(&csi2->video_out, vdev);
++ if (ret < 0)
++ goto error;
++
++ return 0;
++
++error:
++ isp_csi2_unregister_entities(csi2);
++ return ret;
++}
++
++/* -----------------------------------------------------------------------------
++ * ISP CSI2 initialisation and cleanup
++ */
++
++/*
++ * isp_csi2_cleanup - Routine for module driver cleanup
++ */
++void isp_csi2_cleanup(struct isp_device *isp)
++{
++}
++
++/*
++ * isp_csi2_init - Routine for module driver init
++ */
++int isp_csi2_init(struct isp_device *isp)
++{
++ struct isp_csi2_device *csi2a = &isp->isp_csi2a;
++ struct isp_csi2_device *csi2c = &isp->isp_csi2c;
++ int ret;
++
++ csi2a->isp = isp;
++ csi2a->available = 1;
++ csi2a->regs1 = OMAP3_ISP_IOMEM_CSI2A_REGS1;
++ csi2a->regs2 = OMAP3_ISP_IOMEM_CSI2A_REGS2;
++ csi2a->phy = &isp->isp_csiphy2;
++
++ ret = isp_csi2_init_entities(csi2a);
++ if (ret < 0)
++ goto fail;
++
++ if (isp->revision == ISP_REVISION_15_0) {
++ csi2c->isp = isp;
++ csi2c->available = 1;
++ csi2c->regs1 = OMAP3_ISP_IOMEM_CSI2C_REGS1;
++ csi2c->regs2 = OMAP3_ISP_IOMEM_CSI2C_REGS2;
++ csi2c->phy = &isp->isp_csiphy1;
++ }
++
++ return 0;
++fail:
++ isp_csi2_cleanup(isp);
++ return ret;
++}
++
+diff --git a/drivers/media/video/isp/ispcsi2.h b/drivers/media/video/isp/ispcsi2.h
+new file mode 100644
+index 0000000..4da667d
+--- /dev/null
++++ b/drivers/media/video/isp/ispcsi2.h
+@@ -0,0 +1,157 @@
++/*
++ * ispcsi2.h
++ *
++ * Copyright (C) 2009 Texas Instruments.
++ *
++ * Contributors:
++ * Sergio Aguirre <saaguirre@ti.com>
++ * Dominic Curran <dcurran@ti.com>
++ * Antti Koskipaa <antti.koskipaa@nokia.com>
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef OMAP_ISP_CSI2_API_H
++#define OMAP_ISP_CSI2_API_H
++
++#include <linux/videodev2.h>
++
++struct isp_csiphy;
++
++/* This is not an exhaustive list */
++enum isp_csi2_pix_formats {
++ CSI2_PIX_FMT_OTHERS = 0,
++ CSI2_PIX_FMT_YUV422_8BIT = 0x1e,
++ CSI2_PIX_FMT_YUV422_8BIT_VP = 0x9e,
++ CSI2_PIX_FMT_RAW10_EXP16 = 0xab,
++ CSI2_PIX_FMT_RAW10_EXP16_VP = 0x12f,
++ CSI2_PIX_FMT_RAW8 = 0x2a,
++ CSI2_PIX_FMT_RAW8_DPCM10_EXP16 = 0x2aa,
++ CSI2_PIX_FMT_RAW8_DPCM10_VP = 0x32a,
++ CSI2_PIX_FMT_RAW8_VP = 0x12a,
++ CSI2_USERDEF_8BIT_DATA1_DPCM10_VP = 0x340,
++ CSI2_USERDEF_8BIT_DATA1_DPCM10 = 0x2c0,
++ CSI2_USERDEF_8BIT_DATA1 = 0x40,
++};
++
++enum isp_csi2_irqevents {
++ OCP_ERR_IRQ = 0x4000,
++ SHORT_PACKET_IRQ = 0x2000,
++ ECC_CORRECTION_IRQ = 0x1000,
++ ECC_NO_CORRECTION_IRQ = 0x800,
++ COMPLEXIO2_ERR_IRQ = 0x400,
++ COMPLEXIO1_ERR_IRQ = 0x200,
++ FIFO_OVF_IRQ = 0x100,
++ CONTEXT7 = 0x80,
++ CONTEXT6 = 0x40,
++ CONTEXT5 = 0x20,
++ CONTEXT4 = 0x10,
++ CONTEXT3 = 0x8,
++ CONTEXT2 = 0x4,
++ CONTEXT1 = 0x2,
++ CONTEXT0 = 0x1,
++};
++
++enum isp_csi2_ctx_irqevents {
++ CTX_ECC_CORRECTION = 0x100,
++ CTX_LINE_NUMBER = 0x80,
++ CTX_FRAME_NUMBER = 0x40,
++ CTX_CS = 0x20,
++ CTX_LE = 0x8,
++ CTX_LS = 0x4,
++ CTX_FE = 0x2,
++ CTX_FS = 0x1,
++};
++
++enum isp_csi2_frame_mode {
++ ISP_CSI2_FRAME_IMMEDIATE,
++ ISP_CSI2_FRAME_AFTERFEC,
++};
++
++#define ISP_CSI2_MAX_CTX_NUM 7
++
++struct isp_csi2_ctx_cfg {
++ u8 ctxnum; /* context number 0 - 7 */
++ u8 dpcm_decompress;
++
++ /* Fields in CSI2_CTx_CTRL2 - locked by CSI2_CTx_CTRL1.CTX_EN */
++ u8 virtual_id;
++ u16 format_id; /* as in CSI2_CTx_CTRL2[9:0] */
++ u8 dpcm_predictor; /* 1: simple, 0: advanced */
++
++ /* Fields in CSI2_CTx_CTRL1/3 - Shadowed */
++ u16 alpha;
++ u16 data_offset;
++ u32 ping_addr;
++ u32 pong_addr;
++ u8 frame_count;
++ u8 eof_enabled;
++ u8 eol_enabled;
++ u8 checksum_enabled;
++ u8 enabled;
++};
++
++struct isp_csi2_timing_cfg {
++ u8 ionum; /* IO1 or IO2 as in CSI2_TIMING */
++ unsigned force_rx_mode:1;
++ unsigned stop_state_16x:1;
++ unsigned stop_state_4x:1;
++ u16 stop_state_counter;
++};
++
++struct isp_csi2_ctrl_cfg {
++ bool vp_clk_enable;
++ bool vp_only_enable;
++ u8 vp_out_ctrl;
++ bool debug_enable;
++ enum isp_csi2_frame_mode frame_mode;
++ bool ecc_enable;
++ bool if_enable;
++};
++
++#define CSI2_PAD_SINK 0
++#define CSI2_PAD_SOURCE 1
++#define CSI2_PADS_NUM 2
++
++#define CSI2_OUTPUT_CCDC (1 << 0)
++#define CSI2_OUTPUT_MEMORY (1 << 1)
++
++struct isp_csi2_device {
++ struct v4l2_subdev subdev;
++ struct media_entity_pad pads[CSI2_PADS_NUM];
++ struct v4l2_mbus_framefmt formats[CSI2_PADS_NUM];
++ struct isp_video video_out;
++ struct isp_device *isp;
++
++ u8 available; /* Is the IP present on the silicon? */
++
++ /* mem resources - enums as defined in enum isp_mem_resources */
++ u8 regs1;
++ u8 regs2;
++
++ u32 output; /* output to CCDC, memory or both? */
++ bool dpcm_decompress;
++ bool underrun;
++ bool buffers_ready;
++
++ struct isp_csiphy *phy;
++ struct isp_csi2_ctx_cfg contexts[ISP_CSI2_MAX_CTX_NUM + 1];
++ struct isp_csi2_timing_cfg timing[2];
++ struct isp_csi2_ctrl_cfg ctrl;
++};
++
++int isp_csi2_isr(struct isp_csi2_device *csi2);
++int isp_csi2_reset(struct isp_csi2_device *csi2);
++void isp_csi2_regdump(struct isp_csi2_device *csi2);
++int isp_csi2_init(struct isp_device *isp);
++void isp_csi2_cleanup(struct isp_device *isp);
++void isp_csi2_unregister_entities(struct isp_csi2_device *csi2);
++int isp_csi2_register_entities(struct isp_csi2_device *csi2,
++ struct v4l2_device *vdev);
++#endif /* OMAP_ISP_CSI2_H */
+diff --git a/drivers/media/video/isp/ispcsiphy.c b/drivers/media/video/isp/ispcsiphy.c
+new file mode 100644
+index 0000000..47a6d29
+--- /dev/null
++++ b/drivers/media/video/isp/ispcsiphy.c
+@@ -0,0 +1,245 @@
++/*
++ * ispcsiphy.c
++ *
++ * Driver Library for ISP CSI PHY module in TI's OMAP3 Camera ISP
++ *
++ * Copyright (C) 2009 Texas Instruments.
++ * Copyright (C) 2010 Nokia Corporation.
++ *
++ * Contributors:
++ * Sergio Aguirre <saaguirre@ti.com>
++ * Dominic Curran <dcurran@ti.com>
++ * Antti Koskipaa <antti.koskipaa@nokia.com>
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#include <linux/delay.h>
++#include <linux/device.h>
++#include <linux/regulator/consumer.h>
++
++#include "isp.h"
++#include "ispreg.h"
++#include "ispcsiphy.h"
++
++/*
++ * csiphy_lanes_config - Configuration of CSIPHY lanes.
++ *
++ * Updates HW configuration.
++ * Called with phy->mutex taken.
++ */
++static void csiphy_lanes_config(struct isp_csiphy *phy)
++{
++ unsigned int i;
++ u32 reg;
++
++ reg = isp_reg_readl(phy->isp, phy->cfg_regs, ISPCSI2_PHY_CFG);
++
++ for (i = 0; i < phy->num_data_lanes; i++) {
++ reg &= ~(ISPCSI2_PHY_CFG_DATA_POL_MASK(i + 1) |
++ ISPCSI2_PHY_CFG_DATA_POSITION_MASK(i + 1));
++ reg |= (phy->lanes.data[i].pol <<
++ ISPCSI2_PHY_CFG_DATA_POL_SHIFT(i + 1));
++ reg |= (phy->lanes.data[i].pos <<
++ ISPCSI2_PHY_CFG_DATA_POSITION_SHIFT(i + 1));
++ }
++
++ reg &= ~(ISPCSI2_PHY_CFG_CLOCK_POL_MASK |
++ ISPCSI2_PHY_CFG_CLOCK_POSITION_MASK);
++ reg |= phy->lanes.clk.pol << ISPCSI2_PHY_CFG_CLOCK_POL_SHIFT;
++ reg |= phy->lanes.clk.pos << ISPCSI2_PHY_CFG_CLOCK_POSITION_SHIFT;
++
++ isp_reg_writel(phy->isp, reg, phy->cfg_regs, ISPCSI2_PHY_CFG);
++}
++
++/*
++ * csiphy_power_autoswitch_enable
++ * @enable: Sets or clears the autoswitch function enable flag.
++ */
++static void csiphy_power_autoswitch_enable(struct isp_csiphy *phy, bool enable)
++{
++ isp_reg_and_or(phy->isp, phy->cfg_regs, ISPCSI2_PHY_CFG,
++ ~ISPCSI2_PHY_CFG_PWR_AUTO_MASK,
++ enable ? ISPCSI2_PHY_CFG_PWR_AUTO_ENABLE :
++ ISPCSI2_PHY_CFG_PWR_AUTO_DISABLE);
++}
++
++/*
++ * csiphy_set_power
++ * @power: Power state to be set.
++ *
++ * Returns 0 if successful, or -EBUSY if the retry count is exceeded.
++ */
++static int csiphy_set_power(struct isp_csiphy *phy, u32 power)
++{
++ u32 reg;
++ u8 retry_count;
++
++ isp_reg_and_or(phy->isp, phy->cfg_regs, ISPCSI2_PHY_CFG,
++ ~ISPCSI2_PHY_CFG_PWR_CMD_MASK,
++ power);
++
++ retry_count = 0;
++ do {
++ udelay(50);
++ reg = isp_reg_readl(phy->isp, phy->cfg_regs, ISPCSI2_PHY_CFG) &
++ ISPCSI2_PHY_CFG_PWR_STATUS_MASK;
++
++ if (reg != power >> 2)
++ retry_count++;
++
++ } while ((reg != power >> 2) && (retry_count < 100));
++
++ if (retry_count == 100) {
++ printk(KERN_ERR "CSI2 CIO set power failed!\n");
++ return -EBUSY;
++ }
++
++ return 0;
++}
++
++/*
++ * csiphy_dphy_config - Configure CSI2 D-PHY parameters.
++ *
++ * Called with phy->mutex taken.
++ */
++static void csiphy_dphy_config(struct isp_csiphy *phy)
++{
++ u32 reg;
++
++ /* Set up ISPCSIPHY_REG0 */
++ reg = isp_reg_readl(phy->isp, phy->phy_regs, ISPCSIPHY_REG0);
++
++ reg &= ~(ISPCSIPHY_REG0_THS_TERM_MASK |
++ ISPCSIPHY_REG0_THS_SETTLE_MASK);
++ reg |= phy->dphy.ths_term << ISPCSIPHY_REG0_THS_TERM_SHIFT;
++ reg |= phy->dphy.ths_settle << ISPCSIPHY_REG0_THS_SETTLE_SHIFT;
++
++ isp_reg_writel(phy->isp, reg, phy->phy_regs, ISPCSIPHY_REG0);
++
++ /* Set up ISPCSIPHY_REG1 */
++ reg = isp_reg_readl(phy->isp, phy->phy_regs, ISPCSIPHY_REG1);
++
++ reg &= ~(ISPCSIPHY_REG1_TCLK_TERM_MASK |
++ ISPCSIPHY_REG1_TCLK_MISS_MASK |
++ ISPCSIPHY_REG1_TCLK_SETTLE_MASK);
++ reg |= phy->dphy.tclk_term << ISPCSIPHY_REG1_TCLK_TERM_SHIFT;
++ reg |= phy->dphy.tclk_miss << ISPCSIPHY_REG1_TCLK_MISS_SHIFT;
++ reg |= phy->dphy.tclk_settle << ISPCSIPHY_REG1_TCLK_SETTLE_SHIFT;
++
++ isp_reg_writel(phy->isp, reg, phy->phy_regs, ISPCSIPHY_REG1);
++}
++
++int isp_csiphy_config(struct isp_csiphy *phy,
++ struct isp_csiphy_dphy_cfg *dphy,
++ struct isp_csiphy_lanes_cfg *lanes)
++{
++ unsigned int used_lanes = 0;
++ unsigned int i;
++
++ /* Clock and data lanes verification */
++ for (i = 0; i < phy->num_data_lanes; i++) {
++ if (lanes->data[i].pol > 1 || lanes->data[i].pos > 3)
++ return -EINVAL;
++
++ if (used_lanes & (1 << lanes->data[i].pos))
++ return -EINVAL;
++
++ used_lanes |= 1 << lanes->data[i].pos;
++ }
++
++ if (lanes->clk.pol > 1 || lanes->clk.pos > 3)
++ return -EINVAL;
++
++ if (lanes->clk.pos == 0 || used_lanes & (1 << lanes->clk.pos))
++ return -EINVAL;
++
++ mutex_lock(&phy->mutex);
++ phy->dphy = *dphy;
++ phy->lanes = *lanes;
++ mutex_unlock(&phy->mutex);
++
++ return 0;
++}
++EXPORT_SYMBOL(isp_csiphy_config);
++
++int isp_csiphy_acquire(struct isp_csiphy *phy)
++{
++ int rval;
++
++ if (phy->vdd == NULL) {
++ dev_err(phy->isp->dev, "Power regulator for CSI PHY not "
++ "available\n");
++ return -ENODEV;
++ }
++
++ mutex_lock(&phy->mutex);
++
++ rval = regulator_enable(phy->vdd);
++ if (rval < 0)
++ goto done;
++
++ isp_csi2_reset(phy->csi2);
++
++ csiphy_dphy_config(phy);
++ csiphy_lanes_config(phy);
++
++ rval = csiphy_set_power(phy, ISPCSI2_PHY_CFG_PWR_CMD_ON);
++ if (rval) {
++ regulator_disable(phy->vdd);
++ goto done;
++ }
++
++ csiphy_power_autoswitch_enable(phy, true);
++ phy->phy_in_use = 1;
++
++done:
++ mutex_unlock(&phy->mutex);
++ return rval;
++}
++
++void isp_csiphy_release(struct isp_csiphy *phy)
++{
++ mutex_lock(&phy->mutex);
++ if (phy->phy_in_use) {
++ csiphy_power_autoswitch_enable(phy, false);
++ csiphy_set_power(phy, ISPCSI2_PHY_CFG_PWR_CMD_OFF);
++ regulator_disable(phy->vdd);
++ phy->phy_in_use = 0;
++ }
++ mutex_unlock(&phy->mutex);
++}
++
++/*
++ * isp_csiphy_init - Initialize the CSI PHY frontends
++ */
++int isp_csiphy_init(struct isp_device *isp)
++{
++ struct isp_csiphy *phy1 = &isp->isp_csiphy1;
++ struct isp_csiphy *phy2 = &isp->isp_csiphy2;
++
++ phy2->isp = isp;
++ phy2->csi2 = &isp->isp_csi2a;
++ phy2->num_data_lanes = ISP_CSIPHY2_NUM_DATA_LANES;
++ phy2->cfg_regs = OMAP3_ISP_IOMEM_CSI2A_REGS1;
++ phy2->phy_regs = OMAP3_ISP_IOMEM_CSIPHY2;
++ mutex_init(&phy2->mutex);
++
++ if (isp->revision == ISP_REVISION_15_0) {
++ phy1->isp = isp;
++ phy1->csi2 = &isp->isp_csi2c;
++ phy1->num_data_lanes = ISP_CSIPHY1_NUM_DATA_LANES;
++ phy1->cfg_regs = OMAP3_ISP_IOMEM_CSI2C_REGS1;
++ phy1->phy_regs = OMAP3_ISP_IOMEM_CSIPHY1;
++ mutex_init(&phy1->mutex);
++ }
++
++ return 0;
++}
++
+diff --git a/drivers/media/video/isp/ispcsiphy.h b/drivers/media/video/isp/ispcsiphy.h
+new file mode 100644
+index 0000000..839ebb0
+--- /dev/null
++++ b/drivers/media/video/isp/ispcsiphy.h
+@@ -0,0 +1,72 @@
++/*
++ * ispcsiphy.h
++ *
++ * Copyright (C) 2009 Texas Instruments.
++ * Copyright (C) 2010 Nokia Corporation.
++ *
++ * Contributors:
++ * Sergio Aguirre <saaguirre@ti.com>
++ * Dominic Curran <dcurran@ti.com>
++ * Antti Koskipaa <antti.koskipaa@nokia.com>
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef OMAP_ISP_CSI_PHY_H
++#define OMAP_ISP_CSI_PHY_H
++
++struct isp_csi2_device;
++struct regulator;
++
++struct csiphy_lane {
++ u8 pos;
++ u8 pol;
++};
++
++#define ISP_CSIPHY2_NUM_DATA_LANES 2
++#define ISP_CSIPHY1_NUM_DATA_LANES 1
++
++struct isp_csiphy_lanes_cfg {
++ struct csiphy_lane data[ISP_CSIPHY2_NUM_DATA_LANES];
++ struct csiphy_lane clk;
++};
++
++struct isp_csiphy_dphy_cfg {
++ u8 ths_term;
++ u8 ths_settle;
++ u8 tclk_term;
++ unsigned tclk_miss:1;
++ u8 tclk_settle;
++};
++
++struct isp_csiphy {
++ struct isp_device *isp;
++ struct mutex mutex; /* serialize csiphy configuration */
++ u8 phy_in_use;
++ struct isp_csi2_device *csi2;
++ struct regulator *vdd;
++
++ /* mem resources - enums as defined in enum isp_mem_resources */
++ unsigned int cfg_regs;
++ unsigned int phy_regs;
++
++ u8 num_data_lanes; /* number of CSI2 Data Lanes supported */
++ struct isp_csiphy_lanes_cfg lanes;
++ struct isp_csiphy_dphy_cfg dphy;
++};
++
++int isp_csiphy_config(struct isp_csiphy *phy, struct isp_csiphy_dphy_cfg *dphy,
++ struct isp_csiphy_lanes_cfg *lanes);
++
++int isp_csiphy_acquire(struct isp_csiphy *phy);
++void isp_csiphy_release(struct isp_csiphy *phy);
++int isp_csiphy_init(struct isp_device *isp);
++
++#endif /* OMAP_ISP_CSI_PHY_H */
++
+diff --git a/drivers/media/video/isp/isph3a.h b/drivers/media/video/isp/isph3a.h
+new file mode 100644
+index 0000000..d5feec8
+--- /dev/null
++++ b/drivers/media/video/isp/isph3a.h
+@@ -0,0 +1,111 @@
++/*
++ * isph3a.h
++ *
++ * Include file for H3A module in TI's OMAP3 Camera ISP
++ *
++ * Copyright (C) 2009 Texas Instruments, Inc.
++ *
++ * Contributors:
++ * Sergio Aguirre <saaguirre@ti.com>
++ * Troy Laramy
++ * David Cohen <david.cohen@nokia.com>
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef OMAP_ISP_H3A_H
++#define OMAP_ISP_H3A_H
++
++#include <mach/isp_user.h>
++
++/*
++ * ----------
++ * -H3A AEWB-
++ * ----------
++ */
++
++#define AEWB_PACKET_SIZE 16
++#define AEWB_SATURATION_LIMIT 0x3ff
++
++/* Flags for changed registers */
++#define PCR_CHNG (1 << 0)
++#define AEWWIN1_CHNG (1 << 1)
++#define AEWINSTART_CHNG (1 << 2)
++#define AEWINBLK_CHNG (1 << 3)
++#define AEWSUBWIN_CHNG (1 << 4)
++#define PRV_WBDGAIN_CHNG (1 << 5)
++#define PRV_WBGAIN_CHNG (1 << 6)
++
++/* ISPH3A REGISTERS bits */
++#define ISPH3A_PCR_AF_EN (1 << 0)
++#define ISPH3A_PCR_AF_ALAW_EN (1 << 1)
++#define ISPH3A_PCR_AF_MED_EN (1 << 2)
++#define ISPH3A_PCR_AF_BUSY (1 << 15)
++#define ISPH3A_PCR_AEW_EN (1 << 16)
++#define ISPH3A_PCR_AEW_ALAW_EN (1 << 17)
++#define ISPH3A_PCR_AEW_BUSY (1 << 18)
++#define ISPH3A_PCR_AEW_MASK (ISPH3A_PCR_AEW_ALAW_EN | \
++ ISPH3A_PCR_AEW_AVE2LMT_MASK)
++
++/*
++ * --------
++ * -H3A AF-
++ * --------
++ */
++
++/* Peripheral Revision */
++#define AFPID 0x0
++
++#define AFCOEF_OFFSET 0x00000004 /* COEF base address */
++
++/* PCR fields */
++#define AF_BUSYAF (1 << 15)
++#define AF_FVMODE (1 << 14)
++#define AF_RGBPOS (0x7 << 11)
++#define AF_MED_TH (0xFF << 3)
++#define AF_MED_EN (1 << 2)
++#define AF_ALAW_EN (1 << 1)
++#define AF_EN (1 << 0)
++#define AF_PCR_MASK (AF_FVMODE | AF_RGBPOS | AF_MED_TH | \
++ AF_MED_EN | AF_ALAW_EN)
++
++/* AFPAX1 fields */
++#define AF_PAXW (0x7F << 16)
++#define AF_PAXH 0x7F
++
++/* AFPAX2 fields */
++#define AF_AFINCV (0xF << 13)
++#define AF_PAXVC (0x7F << 6)
++#define AF_PAXHC 0x3F
++
++/* AFPAXSTART fields */
++#define AF_PAXSH (0xFFF<<16)
++#define AF_PAXSV 0xFFF
++
++/* COEFFICIENT MASK */
++#define AF_COEF_MASK0 0xFFF
++#define AF_COEF_MASK1 (0xFFF<<16)
++
++/* BIT SHIFTS */
++#define AF_RGBPOS_SHIFT 11
++#define AF_MED_TH_SHIFT 3
++#define AF_PAXW_SHIFT 16
++#define AF_LINE_INCR_SHIFT 13
++#define AF_VT_COUNT_SHIFT 6
++#define AF_HZ_START_SHIFT 16
++#define AF_COEF_SHIFT 16
++
++/* Init and cleanup functions */
++int isp_h3a_aewb_init(struct isp_device *isp);
++int isp_h3a_af_init(struct isp_device *isp);
++
++void isp_h3a_aewb_cleanup(struct isp_device *isp);
++void isp_h3a_af_cleanup(struct isp_device *isp);
++
++#endif /* OMAP_ISP_H3A_H */
+diff --git a/drivers/media/video/isp/isph3a_aewb.c b/drivers/media/video/isp/isph3a_aewb.c
+new file mode 100644
+index 0000000..2b43fff
+--- /dev/null
++++ b/drivers/media/video/isp/isph3a_aewb.c
+@@ -0,0 +1,351 @@
++/*
++ * isph3a.c
++ *
++ * H3A module for TI's OMAP3 Camera ISP
++ *
++ * Copyright (C) 2009 Texas Instruments, Inc.
++ *
++ * Contributors:
++ * David Cohen <david.cohen@nokia.com>
++ * Sakari Ailus <sakari.ailus@nokia.com>
++ * Sergio Aguirre <saaguirre@ti.com>
++ * Troy Laramy
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#include <linux/slab.h>
++#include <linux/uaccess.h>
++
++#include "isp.h"
++#include "isph3a.h"
++#include "ispstat.h"
++
++/*
++ * isph3a_aewb_update_regs - Helper function to update h3a registers.
++ */
++static void isph3a_aewb_setup_regs(struct ispstat *aewb, void *priv)
++{
++ struct isph3a_aewb_config *conf = priv;
++ u32 pcr;
++ u32 win1;
++ u32 start;
++ u32 blk;
++ u32 subwin;
++
++ if (aewb->state == ISPSTAT_DISABLED)
++ return;
++
++ isp_reg_writel(aewb->isp, aewb->active_buf->iommu_addr,
++ OMAP3_ISP_IOMEM_H3A, ISPH3A_AEWBUFST);
++
++ if (!aewb->update)
++ return;
++
++ /* Converting config metadata into reg values */
++ pcr = conf->saturation_limit << ISPH3A_PCR_AEW_AVE2LMT_SHIFT;
++ pcr |= !!conf->alaw_enable << ISPH3A_PCR_AEW_ALAW_EN_SHIFT;
++
++ win1 = ((conf->win_height >> 1) - 1) << ISPH3A_AEWWIN1_WINH_SHIFT;
++ win1 |= ((conf->win_width >> 1) - 1) << ISPH3A_AEWWIN1_WINW_SHIFT;
++ win1 |= (conf->ver_win_count - 1) << ISPH3A_AEWWIN1_WINVC_SHIFT;
++ win1 |= (conf->hor_win_count - 1) << ISPH3A_AEWWIN1_WINHC_SHIFT;
++
++ start = conf->hor_win_start << ISPH3A_AEWINSTART_WINSH_SHIFT;
++ start |= conf->ver_win_start << ISPH3A_AEWINSTART_WINSV_SHIFT;
++
++ blk = conf->blk_ver_win_start << ISPH3A_AEWINBLK_WINSV_SHIFT;
++ blk |= ((conf->blk_win_height >> 1) - 1) << ISPH3A_AEWINBLK_WINH_SHIFT;
++
++ subwin = ((conf->subsample_ver_inc >> 1) - 1) <<
++ ISPH3A_AEWSUBWIN_AEWINCV_SHIFT;
++ subwin |= ((conf->subsample_hor_inc >> 1) - 1) <<
++ ISPH3A_AEWSUBWIN_AEWINCH_SHIFT;
++
++ isp_reg_writel(aewb->isp, win1, OMAP3_ISP_IOMEM_H3A, ISPH3A_AEWWIN1);
++ isp_reg_writel(aewb->isp, start, OMAP3_ISP_IOMEM_H3A,
++ ISPH3A_AEWINSTART);
++ isp_reg_writel(aewb->isp, blk, OMAP3_ISP_IOMEM_H3A, ISPH3A_AEWINBLK);
++ isp_reg_writel(aewb->isp, subwin, OMAP3_ISP_IOMEM_H3A,
++ ISPH3A_AEWSUBWIN);
++ isp_reg_and_or(aewb->isp, OMAP3_ISP_IOMEM_H3A, ISPH3A_PCR,
++ ~ISPH3A_PCR_AEW_MASK, pcr);
++
++ aewb->update = 0;
++ aewb->config_counter += aewb->inc_config;
++ aewb->inc_config = 0;
++ aewb->buf_size = conf->buf_size;
++}
++
++static u32 isph3a_aewb_get_buf_size(struct isph3a_aewb_config *conf)
++{
++ /* Number of configured windows + extra row for black data */
++ u32 win_count = (conf->ver_win_count + 1) * conf->hor_win_count;
++
++ /*
++ * Unsaturated block counts for each 8 windows.
++ * 1 extra for the last (win_count % 8) windows if win_count is not
++ * divisible by 8.
++ */
++ win_count += (win_count + 7) / 8;
++
++ return win_count * AEWB_PACKET_SIZE;
++}
++
++static int isph3a_aewb_validate_params(struct ispstat *aewb, void *new_conf)
++{
++ struct isph3a_aewb_config *user_cfg = new_conf;
++ u32 buf_size;
++
++ if (unlikely(user_cfg->saturation_limit > AEWB_MAX_SATURATION_LIM))
++ return -EINVAL;
++
++ if (unlikely(user_cfg->win_height < AEWB_MIN_WIN_H ||
++ user_cfg->win_height > AEWB_MAX_WIN_H ||
++ user_cfg->win_height & 0x01))
++ return -EINVAL;
++
++ if (unlikely(user_cfg->win_width < AEWB_MIN_WIN_W ||
++ user_cfg->win_width > AEWB_MAX_WIN_W ||
++ user_cfg->win_width & 0x01))
++ return -EINVAL;
++
++ if (unlikely(user_cfg->ver_win_count < AEWB_MIN_WINVC ||
++ user_cfg->ver_win_count > AEWB_MAX_WINVC))
++ return -EINVAL;
++
++ if (unlikely(user_cfg->hor_win_count < AEWB_MIN_WINHC ||
++ user_cfg->hor_win_count > AEWB_MAX_WINHC))
++ return -EINVAL;
++
++ if (unlikely(user_cfg->ver_win_start > AEWB_MAX_WINSTART))
++ return -EINVAL;
++
++ if (unlikely(user_cfg->hor_win_start > AEWB_MAX_WINSTART))
++ return -EINVAL;
++
++ if (unlikely(user_cfg->blk_ver_win_start > AEWB_MAX_WINSTART))
++ return -EINVAL;
++
++ if (unlikely(user_cfg->blk_win_height < AEWB_MIN_WIN_H ||
++ user_cfg->blk_win_height > AEWB_MAX_WIN_H ||
++ user_cfg->blk_win_height & 0x01))
++ return -EINVAL;
++
++ if (unlikely(user_cfg->subsample_ver_inc < AEWB_MIN_SUB_INC ||
++ user_cfg->subsample_ver_inc > AEWB_MAX_SUB_INC ||
++ user_cfg->subsample_ver_inc & 0x01))
++ return -EINVAL;
++
++ if (unlikely(user_cfg->subsample_hor_inc < AEWB_MIN_SUB_INC ||
++ user_cfg->subsample_hor_inc > AEWB_MAX_SUB_INC ||
++ user_cfg->subsample_hor_inc & 0x01))
++ return -EINVAL;
++
++ buf_size = isph3a_aewb_get_buf_size(user_cfg);
++ if (buf_size > user_cfg->buf_size)
++ user_cfg->buf_size = buf_size;
++ else if (user_cfg->buf_size > AEWB_MAX_BUF_SIZE)
++ user_cfg->buf_size = AEWB_MAX_BUF_SIZE;
++
++ return 0;
++}
++
++/*
++ * isph3a_aewb_set_params - Helper function to check & store user given params.
++ * @new_conf: Pointer to AE and AWB parameters struct.
++ *
++ * As most of them are busy-lock registers, need to wait until AEW_BUSY = 0 to
++ * program them during ISR.
++ */
++static void isph3a_aewb_set_params(struct ispstat *aewb, void *new_conf)
++{
++ struct isph3a_aewb_config *user_cfg = new_conf;
++ struct isph3a_aewb_config *cur_cfg = aewb->priv;
++ int update = 0;
++
++ if (cur_cfg->saturation_limit != user_cfg->saturation_limit) {
++ cur_cfg->saturation_limit = user_cfg->saturation_limit;
++ update = 1;
++ }
++ if (cur_cfg->alaw_enable != user_cfg->alaw_enable) {
++ cur_cfg->alaw_enable = user_cfg->alaw_enable;
++ update = 1;
++ }
++ if (cur_cfg->win_height != user_cfg->win_height) {
++ cur_cfg->win_height = user_cfg->win_height;
++ update = 1;
++ }
++ if (cur_cfg->win_width != user_cfg->win_width) {
++ cur_cfg->win_width = user_cfg->win_width;
++ update = 1;
++ }
++ if (cur_cfg->ver_win_count != user_cfg->ver_win_count) {
++ cur_cfg->ver_win_count = user_cfg->ver_win_count;
++ update = 1;
++ }
++ if (cur_cfg->hor_win_count != user_cfg->hor_win_count) {
++ cur_cfg->hor_win_count = user_cfg->hor_win_count;
++ update = 1;
++ }
++ if (cur_cfg->ver_win_start != user_cfg->ver_win_start) {
++ cur_cfg->ver_win_start = user_cfg->ver_win_start;
++ update = 1;
++ }
++ if (cur_cfg->hor_win_start != user_cfg->hor_win_start) {
++ cur_cfg->hor_win_start = user_cfg->hor_win_start;
++ update = 1;
++ }
++ if (cur_cfg->blk_ver_win_start != user_cfg->blk_ver_win_start) {
++ cur_cfg->blk_ver_win_start = user_cfg->blk_ver_win_start;
++ update = 1;
++ }
++ if (cur_cfg->blk_win_height != user_cfg->blk_win_height) {
++ cur_cfg->blk_win_height = user_cfg->blk_win_height;
++ update = 1;
++ }
++ if (cur_cfg->subsample_ver_inc != user_cfg->subsample_ver_inc) {
++ cur_cfg->subsample_ver_inc = user_cfg->subsample_ver_inc;
++ update = 1;
++ }
++ if (cur_cfg->subsample_hor_inc != user_cfg->subsample_hor_inc) {
++ cur_cfg->subsample_hor_inc = user_cfg->subsample_hor_inc;
++ update = 1;
++ }
++
++ if (update || !aewb->configured) {
++ aewb->inc_config++;
++ aewb->update = 1;
++ cur_cfg->buf_size = isph3a_aewb_get_buf_size(cur_cfg);
++ }
++}
++
++static long isph3a_aewb_ioctl(struct v4l2_subdev *sd, unsigned int cmd,
++ void *arg)
++{
++ struct ispstat *stat = v4l2_get_subdevdata(sd);
++
++ switch (cmd) {
++ case VIDIOC_PRIVATE_ISP_AEWB_CFG:
++ return ispstat_config(stat, arg);
++ case VIDIOC_PRIVATE_ISP_STAT_REQ:
++ return ispstat_request_statistics(stat, arg);
++ case VIDIOC_PRIVATE_ISP_STAT_EN: {
++ unsigned long *en = arg;
++ return ispstat_enable(stat, !!*en);
++ }
++ }
++
++ return -ENOIOCTLCMD;
++}
++
++static const struct ispstat_ops isph3a_aewb_ops = {
++ .validate_params = isph3a_aewb_validate_params,
++ .set_params = isph3a_aewb_set_params,
++ .setup_regs = isph3a_aewb_setup_regs,
++};
++
++static const struct ispstat_pcr_bits isph3a_aewb_pcr = {
++ .base = OMAP3_ISP_IOMEM_H3A,
++ .offset = ISPH3A_PCR,
++ .enable = ISPH3A_PCR_AEW_EN,
++ .busy = ISPH3A_PCR_BUSYAEAWB,
++};
++
++static const struct v4l2_subdev_core_ops isph3a_aewb_subdev_core_ops = {
++ .ioctl = isph3a_aewb_ioctl,
++ .subscribe_event = ispstat_subscribe_event,
++ .unsubscribe_event = ispstat_unsubscribe_event,
++};
++
++static const struct v4l2_subdev_video_ops isph3a_aewb_subdev_video_ops = {
++ .s_stream = ispstat_s_stream,
++};
++
++static const struct v4l2_subdev_ops isph3a_aewb_subdev_ops = {
++ .core = &isph3a_aewb_subdev_core_ops,
++ .video = &isph3a_aewb_subdev_video_ops,
++};
++
++/*
++ * isph3a_aewb_init - Module Initialisation.
++ */
++int isp_h3a_aewb_init(struct isp_device *isp)
++{
++ struct ispstat *aewb = &isp->isp_aewb;
++ struct isph3a_aewb_config *aewb_cfg;
++ struct isph3a_aewb_config *aewb_recover_cfg;
++ int ret;
++
++ aewb_cfg = kzalloc(sizeof(*aewb_cfg), GFP_KERNEL);
++ if (!aewb_cfg)
++ return -ENOMEM;
++
++ memset(aewb, 0, sizeof(*aewb));
++ aewb->ops = &isph3a_aewb_ops;
++ aewb->pcr = &isph3a_aewb_pcr;
++ aewb->priv = aewb_cfg;
++ aewb->dma_ch = -1;
++ aewb->event_type = V4L2_EVENT_OMAP3ISP_AEWB;
++ aewb->isp = isp;
++
++ /* Set recover state configuration */
++ aewb_recover_cfg = kzalloc(sizeof(*aewb_recover_cfg), GFP_KERNEL);
++ if (!aewb_recover_cfg) {
++ dev_err(aewb->isp->dev, "AEWB: cannot allocate memory for "
++ "recover configuration.\n");
++ ret = -ENOMEM;
++ goto err_recover_alloc;
++ }
++
++ aewb_recover_cfg->saturation_limit = AEWB_MAX_SATURATION_LIM;
++ aewb_recover_cfg->win_height = AEWB_MIN_WIN_H;
++ aewb_recover_cfg->win_width = AEWB_MIN_WIN_W;
++ aewb_recover_cfg->ver_win_count = AEWB_MIN_WINVC;
++ aewb_recover_cfg->hor_win_count = AEWB_MIN_WINHC;
++ aewb_recover_cfg->blk_ver_win_start = aewb_recover_cfg->ver_win_start +
++ aewb_recover_cfg->win_height * aewb_recover_cfg->ver_win_count;
++ aewb_recover_cfg->blk_win_height = AEWB_MIN_WIN_H;
++ aewb_recover_cfg->subsample_ver_inc = AEWB_MIN_SUB_INC;
++ aewb_recover_cfg->subsample_hor_inc = AEWB_MIN_SUB_INC;
++
++ if (isph3a_aewb_validate_params(aewb, aewb_recover_cfg)) {
++ dev_err(aewb->isp->dev, "AEWB: recover configuration is "
++ "invalid.\n");
++ ret = -EINVAL;
++ goto err_conf;
++ }
++
++ aewb_recover_cfg->buf_size = isph3a_aewb_get_buf_size(aewb_recover_cfg);
++ aewb->recover_priv = aewb_recover_cfg;
++
++ ret = ispstat_init(aewb, "AEWB", &isph3a_aewb_subdev_ops);
++ if (ret)
++ goto err_conf;
++
++ return 0;
++
++err_conf:
++ kfree(aewb_recover_cfg);
++err_recover_alloc:
++ kfree(aewb_cfg);
++
++ return ret;
++}
++
++/*
++ * isph3a_aewb_cleanup - Module exit.
++ */
++void isp_h3a_aewb_cleanup(struct isp_device *isp)
++{
++ kfree(isp->isp_aewb.priv);
++ kfree(isp->isp_aewb.recover_priv);
++ ispstat_free(&isp->isp_aewb);
++}
++
+diff --git a/drivers/media/video/isp/isph3a_af.c b/drivers/media/video/isp/isph3a_af.c
+new file mode 100644
+index 0000000..3880b58
+--- /dev/null
++++ b/drivers/media/video/isp/isph3a_af.c
+@@ -0,0 +1,396 @@
++/*
++ * isph3a_af.c
++ *
++ * AF module for TI's OMAP3 Camera ISP
++ *
++ * Copyright (C) 2009 Texas Instruments, Inc.
++ *
++ * Contributors:
++ * Sergio Aguirre <saaguirre@ti.com>
++ * Troy Laramy
++ * David Cohen <david.cohen@nokia.com>
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++/* Linux specific include files */
++#include <linux/device.h>
++#include <linux/slab.h>
++
++#include "isp.h"
++#include "isph3a.h"
++#include "ispstat.h"
++
++#define IS_OUT_OF_BOUNDS(value, min, max) \
++ (((value) < (min)) || ((value) > (max)))
++
++static void isph3a_af_setup_regs(struct ispstat *af, void *priv)
++{
++ struct isph3a_af_config *conf = priv;
++ u32 pcr;
++ u32 pax1;
++ u32 pax2;
++ u32 paxstart;
++ u32 coef;
++ u32 base_coef_set0;
++ u32 base_coef_set1;
++ int index;
++
++ if (af->state == ISPSTAT_DISABLED)
++ return;
++
++ isp_reg_writel(af->isp, af->active_buf->iommu_addr, OMAP3_ISP_IOMEM_H3A,
++ ISPH3A_AFBUFST);
++
++ if (!af->update)
++ return;
++
++ /* Configure Hardware Registers */
++ pax1 = ((conf->paxel.width >> 1) - 1) << AF_PAXW_SHIFT;
++ /* Set height in AFPAX1 */
++ pax1 |= (conf->paxel.height >> 1) - 1;
++ isp_reg_writel(af->isp, pax1, OMAP3_ISP_IOMEM_H3A, ISPH3A_AFPAX1);
++
++ /* Configure AFPAX2 Register */
++ /* Set Line Increment in AFPAX2 Register */
++ pax2 = ((conf->paxel.line_inc >> 1) - 1) << AF_LINE_INCR_SHIFT;
++ /* Set Vertical Count */
++ pax2 |= (conf->paxel.v_cnt - 1) << AF_VT_COUNT_SHIFT;
++ /* Set Horizontal Count */
++ pax2 |= (conf->paxel.h_cnt - 1);
++ isp_reg_writel(af->isp, pax2, OMAP3_ISP_IOMEM_H3A, ISPH3A_AFPAX2);
++
++ /* Configure PAXSTART Register */
++ /*Configure Horizontal Start */
++ paxstart = conf->paxel.h_start << AF_HZ_START_SHIFT;
++ /* Configure Vertical Start */
++ paxstart |= conf->paxel.v_start;
++ isp_reg_writel(af->isp, paxstart, OMAP3_ISP_IOMEM_H3A,
++ ISPH3A_AFPAXSTART);
++
++ /*SetIIRSH Register */
++ isp_reg_writel(af->isp, conf->iir.h_start,
++ OMAP3_ISP_IOMEM_H3A, ISPH3A_AFIIRSH);
++
++ base_coef_set0 = ISPH3A_AFCOEF010;
++ base_coef_set1 = ISPH3A_AFCOEF110;
++ for (index = 0; index <= 8; index += 2) {
++ /*Set IIR Filter0 Coefficients */
++ coef = 0;
++ coef |= conf->iir.coeff_set0[index];
++ coef |= conf->iir.coeff_set0[index + 1] <<
++ AF_COEF_SHIFT;
++ isp_reg_writel(af->isp, coef, OMAP3_ISP_IOMEM_H3A,
++ base_coef_set0);
++ base_coef_set0 += AFCOEF_OFFSET;
++
++ /*Set IIR Filter1 Coefficients */
++ coef = 0;
++ coef |= conf->iir.coeff_set1[index];
++ coef |= conf->iir.coeff_set1[index + 1] <<
++ AF_COEF_SHIFT;
++ isp_reg_writel(af->isp, coef, OMAP3_ISP_IOMEM_H3A,
++ base_coef_set1);
++ base_coef_set1 += AFCOEF_OFFSET;
++ }
++ /* set AFCOEF0010 Register */
++ isp_reg_writel(af->isp, conf->iir.coeff_set0[10],
++ OMAP3_ISP_IOMEM_H3A, ISPH3A_AFCOEF0010);
++ /* set AFCOEF1010 Register */
++ isp_reg_writel(af->isp, conf->iir.coeff_set1[10],
++ OMAP3_ISP_IOMEM_H3A, ISPH3A_AFCOEF1010);
++
++ /* PCR Register */
++ /* Set RGB Position */
++ pcr = conf->rgb_pos << AF_RGBPOS_SHIFT;
++ /* Set Accumulator Mode */
++ if (conf->fvmode == AF_MODE_PEAK)
++ pcr |= AF_FVMODE;
++ /* Set A-law */
++ if (conf->alaw_enable)
++ pcr |= AF_ALAW_EN;
++ /* HMF Configurations */
++ if (conf->hmf.enable) {
++ /* Enable HMF */
++ pcr |= AF_MED_EN;
++ /* Set Median Threshold */
++ pcr |= conf->hmf.threshold << AF_MED_TH_SHIFT;
++ }
++ /* Set PCR Register */
++ isp_reg_and_or(af->isp, OMAP3_ISP_IOMEM_H3A, ISPH3A_PCR,
++ ~AF_PCR_MASK, pcr);
++
++ af->update = 0;
++ af->config_counter += af->inc_config;
++ af->inc_config = 0;
++ af->buf_size = conf->buf_size;
++}
++
++static u32 isph3a_af_get_buf_size(struct isph3a_af_config *conf)
++{
++ return conf->paxel.h_cnt * conf->paxel.v_cnt * AF_PAXEL_SIZE;
++}
++
++/* Function to check paxel parameters */
++static int isph3a_af_validate_params(struct ispstat *af, void *new_conf)
++{
++ struct isph3a_af_config *user_cfg = new_conf;
++ struct isph3a_af_paxel *paxel_cfg = &user_cfg->paxel;
++ struct isph3a_af_iir *iir_cfg = &user_cfg->iir;
++ int index;
++ u32 buf_size;
++
++ /* Check horizontal Count */
++ if (IS_OUT_OF_BOUNDS(paxel_cfg->h_cnt, AF_PAXEL_HORIZONTAL_COUNT_MIN,
++ AF_PAXEL_HORIZONTAL_COUNT_MAX))
++ return -EINVAL;
++
++ /* Check Vertical Count */
++ if (IS_OUT_OF_BOUNDS(paxel_cfg->v_cnt, AF_PAXEL_VERTICAL_COUNT_MIN,
++ AF_PAXEL_VERTICAL_COUNT_MAX))
++ return -EINVAL;
++
++ if (IS_OUT_OF_BOUNDS(paxel_cfg->height, AF_PAXEL_HEIGHT_MIN,
++ AF_PAXEL_HEIGHT_MAX) || paxel_cfg->height % 2)
++ return -EINVAL;
++
++ /* Check width */
++ if (IS_OUT_OF_BOUNDS(paxel_cfg->width, AF_PAXEL_WIDTH_MIN,
++ AF_PAXEL_WIDTH_MAX) || paxel_cfg->width % 2)
++ return -EINVAL;
++
++ /* Check Line Increment */
++ if (IS_OUT_OF_BOUNDS(paxel_cfg->line_inc, AF_PAXEL_INCREMENT_MIN,
++ AF_PAXEL_INCREMENT_MAX) || paxel_cfg->line_inc % 2)
++ return -EINVAL;
++
++ /* Check Horizontal Start */
++ if ((paxel_cfg->h_start < iir_cfg->h_start) ||
++ IS_OUT_OF_BOUNDS(paxel_cfg->h_start,
++ AF_PAXEL_HZSTART_MIN, AF_PAXEL_HZSTART_MAX))
++ return -EINVAL;
++
++ /* Check IIR */
++ for (index = 0; index < AF_NUM_COEF; index++) {
++ if ((iir_cfg->coeff_set0[index]) > AF_COEF_MAX)
++ return -EINVAL;
++
++ if ((iir_cfg->coeff_set1[index]) > AF_COEF_MAX)
++ return -EINVAL;
++ }
++
++ if (IS_OUT_OF_BOUNDS(iir_cfg->h_start, AF_IIRSH_MIN, AF_IIRSH_MAX))
++ return -EINVAL;
++
++ /* Hack: If paxel size is 12, the 10th AF window may be corrupted */
++ if ((paxel_cfg->h_cnt * paxel_cfg->v_cnt > 9) &&
++ (paxel_cfg->width * paxel_cfg->height == 12))
++ return -EINVAL;
++
++ buf_size = isph3a_af_get_buf_size(user_cfg);
++ if (buf_size > user_cfg->buf_size)
++ /* User buf_size request wasn't enough */
++ user_cfg->buf_size = buf_size;
++ else if (user_cfg->buf_size > AF_MAX_BUF_SIZE)
++ user_cfg->buf_size = AF_MAX_BUF_SIZE;
++
++ return 0;
++}
++
++/* Update local parameters */
++static void isph3a_af_set_params(struct ispstat *af, void *new_conf)
++{
++ struct isph3a_af_config *user_cfg = new_conf;
++ struct isph3a_af_config *cur_cfg = af->priv;
++ int update = 0;
++ int index;
++
++ /* alaw */
++ if (cur_cfg->alaw_enable != user_cfg->alaw_enable) {
++ update = 1;
++ goto out;
++ }
++
++ /* hmf */
++ if (cur_cfg->hmf.enable != user_cfg->hmf.enable) {
++ update = 1;
++ goto out;
++ }
++ if (cur_cfg->hmf.threshold != user_cfg->hmf.threshold) {
++ update = 1;
++ goto out;
++ }
++
++ /* rgbpos */
++ if (cur_cfg->rgb_pos != user_cfg->rgb_pos) {
++ update = 1;
++ goto out;
++ }
++
++ /* iir */
++ if (cur_cfg->iir.h_start != user_cfg->iir.h_start) {
++ update = 1;
++ goto out;
++ }
++ for (index = 0; index < AF_NUM_COEF; index++) {
++ if (cur_cfg->iir.coeff_set0[index] !=
++ user_cfg->iir.coeff_set0[index]) {
++ update = 1;
++ goto out;
++ }
++ if (cur_cfg->iir.coeff_set1[index] !=
++ user_cfg->iir.coeff_set1[index]) {
++ update = 1;
++ goto out;
++ }
++ }
++
++ /* paxel */
++ if ((cur_cfg->paxel.width != user_cfg->paxel.width) ||
++ (cur_cfg->paxel.height != user_cfg->paxel.height) ||
++ (cur_cfg->paxel.h_start != user_cfg->paxel.h_start) ||
++ (cur_cfg->paxel.v_start != user_cfg->paxel.v_start) ||
++ (cur_cfg->paxel.h_cnt != user_cfg->paxel.h_cnt) ||
++ (cur_cfg->paxel.v_cnt != user_cfg->paxel.v_cnt) ||
++ (cur_cfg->paxel.line_inc != user_cfg->paxel.line_inc)) {
++ update = 1;
++ goto out;
++ }
++
++ /* af_mode */
++ if (cur_cfg->fvmode != user_cfg->fvmode)
++ update = 1;
++
++out:
++ if (update || !af->configured) {
++ memcpy(cur_cfg, user_cfg, sizeof(*cur_cfg));
++ af->inc_config++;
++ af->update = 1;
++ /*
++ * User might be asked for a bigger buffer than necessary for
++ * this configuration. In order to return the right amount of
++ * data during buffer request, let's calculate the size here
++ * instead of stick with user_cfg->buf_size.
++ */
++ cur_cfg->buf_size = isph3a_af_get_buf_size(cur_cfg);
++ }
++}
++
++static long isph3a_af_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
++{
++ struct ispstat *stat = v4l2_get_subdevdata(sd);
++
++ switch (cmd) {
++ case VIDIOC_PRIVATE_ISP_AF_CFG:
++ return ispstat_config(stat, arg);
++ case VIDIOC_PRIVATE_ISP_STAT_REQ:
++ return ispstat_request_statistics(stat, arg);
++ case VIDIOC_PRIVATE_ISP_STAT_EN: {
++ int *en = arg;
++ return ispstat_enable(stat, !!*en);
++ }
++ }
++
++ return -ENOIOCTLCMD;
++
++}
++
++static const struct ispstat_ops isph3a_af_ops = {
++ .validate_params = isph3a_af_validate_params,
++ .set_params = isph3a_af_set_params,
++ .setup_regs = isph3a_af_setup_regs,
++};
++
++static const struct ispstat_pcr_bits isph3a_af_pcr = {
++ .base = OMAP3_ISP_IOMEM_H3A,
++ .offset = ISPH3A_PCR,
++ .enable = ISPH3A_PCR_AF_EN,
++ .busy = ISPH3A_PCR_BUSYAF,
++};
++
++static const struct v4l2_subdev_core_ops isph3a_af_subdev_core_ops = {
++ .ioctl = isph3a_af_ioctl,
++ .subscribe_event = ispstat_subscribe_event,
++ .unsubscribe_event = ispstat_unsubscribe_event,
++};
++
++static const struct v4l2_subdev_video_ops isph3a_af_subdev_video_ops = {
++ .s_stream = ispstat_s_stream,
++};
++
++static const struct v4l2_subdev_ops isph3a_af_subdev_ops = {
++ .core = &isph3a_af_subdev_core_ops,
++ .video = &isph3a_af_subdev_video_ops,
++};
++
++/* Function to register the AF character device driver. */
++int isp_h3a_af_init(struct isp_device *isp)
++{
++ struct ispstat *af = &isp->isp_af;
++ struct isph3a_af_config *af_cfg;
++ struct isph3a_af_config *af_recover_cfg;
++ int ret;
++
++ af_cfg = kzalloc(sizeof(*af_cfg), GFP_KERNEL);
++ if (af_cfg == NULL)
++ return -ENOMEM;
++
++ memset(af, 0, sizeof(*af));
++ af->ops = &isph3a_af_ops;
++ af->pcr = &isph3a_af_pcr;
++ af->priv = af_cfg;
++ af->dma_ch = -1;
++ af->event_type = V4L2_EVENT_OMAP3ISP_AF;
++ af->isp = isp;
++
++ /* Set recover state configuration */
++ af_recover_cfg = kzalloc(sizeof(*af_recover_cfg), GFP_KERNEL);
++ if (!af_recover_cfg) {
++ dev_err(af->isp->dev, "AF: cannot allocate memory for recover "
++ "configuration.\n");
++ ret = -ENOMEM;
++ goto err_recover_alloc;
++ }
++
++ af_recover_cfg->paxel.h_start = AF_PAXEL_HZSTART_MIN;
++ af_recover_cfg->paxel.width = AF_PAXEL_WIDTH_MIN;
++ af_recover_cfg->paxel.height = AF_PAXEL_HEIGHT_MIN;
++ af_recover_cfg->paxel.h_cnt = AF_PAXEL_HORIZONTAL_COUNT_MIN;
++ af_recover_cfg->paxel.v_cnt = AF_PAXEL_VERTICAL_COUNT_MIN;
++ af_recover_cfg->paxel.line_inc = AF_PAXEL_INCREMENT_MIN;
++ if (isph3a_af_validate_params(af, af_recover_cfg)) {
++ dev_err(af->isp->dev, "AF: recover configuration is "
++ "invalid.\n");
++ ret = -EINVAL;
++ goto err_conf;
++ }
++
++ af_recover_cfg->buf_size = isph3a_af_get_buf_size(af_recover_cfg);
++ af->recover_priv = af_recover_cfg;
++
++ ret = ispstat_init(af, "AF", &isph3a_af_subdev_ops);
++ if (ret)
++ goto err_conf;
++
++ return 0;
++
++err_conf:
++ kfree(af_recover_cfg);
++err_recover_alloc:
++ kfree(af_cfg);
++
++ return ret;
++}
++
++void isp_h3a_af_cleanup(struct isp_device *isp)
++{
++ kfree(isp->isp_af.priv);
++ kfree(isp->isp_af.recover_priv);
++ ispstat_free(&isp->isp_af);
++}
+diff --git a/drivers/media/video/isp/isphist.c b/drivers/media/video/isp/isphist.c
+new file mode 100644
+index 0000000..cff4a5a
+--- /dev/null
++++ b/drivers/media/video/isp/isphist.c
+@@ -0,0 +1,508 @@
++/*
++ * isphist.c
++ *
++ * HISTOGRAM module for TI's OMAP3 Camera ISP
++ *
++ * Copyright (C) 2009 Texas Instruments, Inc.
++ *
++ * Author:
++ * David Cohen <david.cohen@nokia.com>
++ *
++ * Based on original version written by:
++ * Sergio Aguirre <saaguirre@ti.com>
++ * Troy Laramy
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#include <linux/delay.h>
++#include <linux/slab.h>
++#include <linux/uaccess.h>
++#include <linux/device.h>
++
++#include "isp.h"
++#include "ispreg.h"
++#include "isphist.h"
++
++#define HIST_CONFIG_DMA 1
++
++#define HIST_USING_DMA(hist) ((hist)->dma_ch >= 0)
++
++/*
++ * isphist_reset_mem - clear Histogram memory before start stats engine.
++ */
++static void isphist_reset_mem(struct ispstat *hist)
++{
++ struct isp_device *isp = hist->isp;
++ struct isphist_config *conf = hist->priv;
++ unsigned int i;
++
++ isp_reg_writel(isp, 0, OMAP3_ISP_IOMEM_HIST, ISPHIST_ADDR);
++
++ /*
++ * By setting it, the histogram internal buffer is being cleared at the
++ * same time it's being read. This bit must be cleared afterwards.
++ */
++ isp_reg_or(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT, ISPHIST_CNT_CLEAR);
++
++ /*
++ * We'll clear 4 words at each iteration for optimization. It avoids
++ * 3/4 of the jumps. We also know HIST_MEM_SIZE is divisible by 4.
++ */
++ for (i = HIST_MEM_SIZE / 4; i > 0; i--) {
++ isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
++ isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
++ isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
++ isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
++ }
++ isp_reg_and(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT,
++ ~ISPHIST_CNT_CLEAR);
++
++ hist->wait_acc_frames = conf->num_acc_frames;
++}
++
++static void isphist_dma_config(struct ispstat *hist)
++{
++ hist->dma_config.data_type = OMAP_DMA_DATA_TYPE_S32;
++ hist->dma_config.sync_mode = OMAP_DMA_SYNC_ELEMENT;
++ hist->dma_config.frame_count = 1;
++ hist->dma_config.src_amode = OMAP_DMA_AMODE_CONSTANT;
++ hist->dma_config.src_start = OMAP3ISP_HIST_REG_BASE + ISPHIST_DATA;
++ hist->dma_config.dst_amode = OMAP_DMA_AMODE_POST_INC;
++ hist->dma_config.src_or_dst_synch = OMAP_DMA_SRC_SYNC;
++}
++
++/*
++ * isphist_setup_regs - Helper function to update Histogram registers.
++ */
++static void isphist_setup_regs(struct ispstat *hist, void *priv)
++{
++ struct isp_device *isp = hist->isp;
++ struct isphist_config *conf = priv;
++ int c;
++ u32 cnt;
++ u32 wb_gain;
++ u32 reg_hor[HIST_MAX_REGIONS];
++ u32 reg_ver[HIST_MAX_REGIONS];
++
++ if (!hist->update || hist->state == ISPSTAT_DISABLED ||
++ hist->state == ISPSTAT_DISABLING)
++ return;
++
++ cnt = conf->cfa << ISPHIST_CNT_CFA_SHIFT;
++
++ wb_gain = conf->wg[0] << ISPHIST_WB_GAIN_WG00_SHIFT;
++ wb_gain |= conf->wg[1] << ISPHIST_WB_GAIN_WG01_SHIFT;
++ wb_gain |= conf->wg[2] << ISPHIST_WB_GAIN_WG02_SHIFT;
++ if (conf->cfa == HIST_CFA_BAYER)
++ wb_gain |= conf->wg[3] << ISPHIST_WB_GAIN_WG03_SHIFT;
++
++ /* Regions size and position */
++ for (c = 0; c < HIST_MAX_REGIONS; c++) {
++ if (c < conf->num_regions) {
++ reg_hor[c] = conf->region[c].h_start <<
++ ISPHIST_REG_START_SHIFT;
++ reg_hor[c] = conf->region[c].h_end <<
++ ISPHIST_REG_END_SHIFT;
++ reg_ver[c] = conf->region[c].v_start <<
++ ISPHIST_REG_START_SHIFT;
++ reg_ver[c] = conf->region[c].v_end <<
++ ISPHIST_REG_END_SHIFT;
++ } else {
++ reg_hor[c] = 0;
++ reg_ver[c] = 0;
++ }
++ }
++
++ cnt |= conf->hist_bins << ISPHIST_CNT_BINS_SHIFT;
++ switch (conf->hist_bins) {
++ case HIST_BINS_256:
++ cnt |= (ISPHIST_IN_BIT_WIDTH_CCDC - 8) <<
++ ISPHIST_CNT_SHIFT_SHIFT;
++ break;
++ case HIST_BINS_128:
++ cnt |= (ISPHIST_IN_BIT_WIDTH_CCDC - 7) <<
++ ISPHIST_CNT_SHIFT_SHIFT;
++ break;
++ case HIST_BINS_64:
++ cnt |= (ISPHIST_IN_BIT_WIDTH_CCDC - 6) <<
++ ISPHIST_CNT_SHIFT_SHIFT;
++ break;
++ default: /* HIST_BINS_32 */
++ cnt |= (ISPHIST_IN_BIT_WIDTH_CCDC - 5) <<
++ ISPHIST_CNT_SHIFT_SHIFT;
++ break;
++ }
++
++ isphist_reset_mem(hist);
++
++ isp_reg_writel(isp, cnt, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT);
++ isp_reg_writel(isp, wb_gain, OMAP3_ISP_IOMEM_HIST, ISPHIST_WB_GAIN);
++ isp_reg_writel(isp, reg_hor[0], OMAP3_ISP_IOMEM_HIST, ISPHIST_R0_HORZ);
++ isp_reg_writel(isp, reg_ver[0], OMAP3_ISP_IOMEM_HIST, ISPHIST_R0_VERT);
++ isp_reg_writel(isp, reg_hor[1], OMAP3_ISP_IOMEM_HIST, ISPHIST_R1_HORZ);
++ isp_reg_writel(isp, reg_ver[1], OMAP3_ISP_IOMEM_HIST, ISPHIST_R1_VERT);
++ isp_reg_writel(isp, reg_hor[2], OMAP3_ISP_IOMEM_HIST, ISPHIST_R2_HORZ);
++ isp_reg_writel(isp, reg_ver[2], OMAP3_ISP_IOMEM_HIST, ISPHIST_R2_VERT);
++ isp_reg_writel(isp, reg_hor[3], OMAP3_ISP_IOMEM_HIST, ISPHIST_R3_HORZ);
++ isp_reg_writel(isp, reg_ver[3], OMAP3_ISP_IOMEM_HIST, ISPHIST_R3_VERT);
++
++ hist->update = 0;
++ hist->config_counter += hist->inc_config;
++ hist->inc_config = 0;
++ hist->buf_size = conf->buf_size;
++}
++
++static void isphist_dma_cb(int lch, u16 ch_status, void *data)
++{
++ struct ispstat *hist = data;
++
++ if (ch_status & ~OMAP_DMA_BLOCK_IRQ) {
++ dev_dbg(hist->isp->dev, "hist: DMA error. status = 0x%04x\n",
++ ch_status);
++ omap_stop_dma(lch);
++ isphist_reset_mem(hist);
++ atomic_set(&hist->buf_err, 1);
++ }
++ isp_reg_and(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT,
++ ~ISPHIST_CNT_CLEAR);
++
++ ispstat_dma_isr(hist);
++ isphist_dma_done(hist->isp);
++}
++
++static int isphist_buf_dma(struct ispstat *hist)
++{
++ dma_addr_t dma_addr = hist->active_buf->dma_addr;
++
++ if (unlikely(!dma_addr)) {
++ dev_dbg(hist->isp->dev, "hist: invalid DMA buffer address\n");
++ isphist_reset_mem(hist);
++ return STAT_NO_BUF;
++ }
++
++ if (hist->buf_processing) {
++ dev_dbg(hist->isp->dev, "hist: cannot start new DMA transfer "
++ "while waiting for previous one.\n");
++ return STAT_NO_BUF;
++ }
++
++ isp_reg_writel(hist->isp, 0, OMAP3_ISP_IOMEM_HIST, ISPHIST_ADDR);
++ isp_reg_or(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT,
++ ISPHIST_CNT_CLEAR);
++ isp_flush(hist->isp);
++ hist->dma_config.dst_start = dma_addr;
++ hist->dma_config.elem_count = hist->buf_size / sizeof(u32);
++ omap_set_dma_params(hist->dma_ch, &hist->dma_config);
++
++ omap_start_dma(hist->dma_ch);
++
++ return STAT_BUF_WAITING_DMA;
++}
++
++static int isphist_buf_pio(struct ispstat *hist)
++{
++ struct isp_device *isp = hist->isp;
++ u32 *buf = hist->active_buf->virt_addr;
++ unsigned int i;
++
++ if (!buf) {
++ dev_dbg(isp->dev, "hist: invalid PIO buffer address\n");
++ isphist_reset_mem(hist);
++ return STAT_NO_BUF;
++ }
++
++ isp_reg_writel(isp, 0, OMAP3_ISP_IOMEM_HIST, ISPHIST_ADDR);
++
++ /*
++ * By setting it, the histogram internal buffer is being cleared at the
++ * same time it's being read. This bit must be cleared just after all
++ * data is acquired.
++ */
++ isp_reg_or(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT, ISPHIST_CNT_CLEAR);
++
++ /*
++ * We'll read 4 times a 4-bytes-word at each iteration for
++ * optimization. It avoids 3/4 of the jumps. We also know buf_size is
++ * divisible by 16.
++ */
++ for (i = hist->buf_size / 16; i > 0; i--) {
++ *buf++ = isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
++ *buf++ = isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
++ *buf++ = isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
++ *buf++ = isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
++ }
++ isp_reg_and(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT,
++ ~ISPHIST_CNT_CLEAR);
++
++ return STAT_BUF_DONE;
++}
++
++/*
++ * isphist_buf_process - Callback from ISP driver for HIST interrupt.
++ */
++static int isphist_buf_process(struct ispstat *hist)
++{
++ struct isphist_config *user_cfg = hist->priv;
++ int ret;
++
++ if (atomic_read(&hist->buf_err) || hist->state != ISPSTAT_ENABLED) {
++ isphist_reset_mem(hist);
++ return STAT_NO_BUF;
++ }
++
++ if (--(hist->wait_acc_frames))
++ return STAT_NO_BUF;
++
++ if (HIST_USING_DMA(hist))
++ ret = isphist_buf_dma(hist);
++ else
++ ret = isphist_buf_pio(hist);
++
++ hist->wait_acc_frames = user_cfg->num_acc_frames;
++
++ return ret;
++}
++
++static u32 isphist_get_buf_size(struct isphist_config *conf)
++{
++ return HIST_MEM_SIZE_BINS(conf->hist_bins) * conf->num_regions;
++}
++
++/*
++ * isphist_validate_params - Helper function to check user given params.
++ * @user_cfg: Pointer to user configuration structure.
++ *
++ * Returns 0 on success configuration.
++ */
++static int isphist_validate_params(struct ispstat *hist, void *new_conf)
++{
++ struct isphist_config *user_cfg = new_conf;
++ int c;
++ u32 buf_size;
++
++ if (user_cfg->cfa > HIST_CFA_FOVEONX3)
++ return -EINVAL;
++
++ /* Regions size and position */
++
++ if ((user_cfg->num_regions < HIST_MIN_REGIONS) ||
++ (user_cfg->num_regions > HIST_MAX_REGIONS))
++ return -EINVAL;
++
++ /* Regions */
++ for (c = 0; c < user_cfg->num_regions; c++) {
++ if (user_cfg->region[c].h_start & ~ISPHIST_REG_START_END_MASK)
++ return -EINVAL;
++ if (user_cfg->region[c].h_end & ~ISPHIST_REG_START_END_MASK)
++ return -EINVAL;
++ if (user_cfg->region[c].v_start & ~ISPHIST_REG_START_END_MASK)
++ return -EINVAL;
++ if (user_cfg->region[c].v_end & ~ISPHIST_REG_START_END_MASK)
++ return -EINVAL;
++ if (user_cfg->region[c].h_start > user_cfg->region[c].h_end)
++ return -EINVAL;
++ if (user_cfg->region[c].v_start > user_cfg->region[c].v_end)
++ return -EINVAL;
++ }
++
++ switch (user_cfg->num_regions) {
++ case 1:
++ if (user_cfg->hist_bins > HIST_BINS_256)
++ return -EINVAL;
++ break;
++ case 2:
++ if (user_cfg->hist_bins > HIST_BINS_128)
++ return -EINVAL;
++ break;
++ default: /* 3 or 4 */
++ if (user_cfg->hist_bins > HIST_BINS_64)
++ return -EINVAL;
++ break;
++ }
++
++ buf_size = isphist_get_buf_size(user_cfg);
++ if (buf_size > user_cfg->buf_size)
++ /* User's buf_size request wasn't enoght */
++ user_cfg->buf_size = buf_size;
++ else if (user_cfg->buf_size > HIST_MAX_BUF_SIZE)
++ user_cfg->buf_size = HIST_MAX_BUF_SIZE;
++
++ return 0;
++}
++
++static int isphist_comp_params(struct ispstat *hist,
++ struct isphist_config *user_cfg)
++{
++ struct isphist_config *cur_cfg = hist->priv;
++ int c;
++
++ if (cur_cfg->cfa != user_cfg->cfa)
++ return 1;
++
++ if (cur_cfg->num_acc_frames != user_cfg->num_acc_frames)
++ return 1;
++
++ if (cur_cfg->hist_bins != user_cfg->hist_bins)
++ return 1;
++
++ for (c = 0; c < HIST_MAX_WG; c++) {
++ if (c == 3 && user_cfg->cfa == HIST_CFA_FOVEONX3)
++ break;
++ else if (cur_cfg->wg[c] != user_cfg->wg[c])
++ return 1;
++ }
++
++ if (cur_cfg->num_regions != user_cfg->num_regions)
++ return 1;
++
++ /* Regions */
++ for (c = 0; c < user_cfg->num_regions; c++) {
++ if (cur_cfg->region[c].h_start != user_cfg->region[c].h_start)
++ return 1;
++ if (cur_cfg->region[c].h_end != user_cfg->region[c].h_end)
++ return 1;
++ if (cur_cfg->region[c].v_start != user_cfg->region[c].v_start)
++ return 1;
++ if (cur_cfg->region[c].v_end != user_cfg->region[c].v_end)
++ return 1;
++ }
++
++ return 0;
++}
++
++/*
++ * isphist_update_params - Helper function to check and store user given params.
++ * @new_conf: Pointer to user configuration structure.
++ */
++static void isphist_set_params(struct ispstat *hist, void *new_conf)
++{
++ struct isphist_config *user_cfg = new_conf;
++ struct isphist_config *cur_cfg = hist->priv;
++
++ if (!hist->configured || isphist_comp_params(hist, user_cfg)) {
++ memcpy(cur_cfg, user_cfg, sizeof(*user_cfg));
++ if (user_cfg->num_acc_frames == 0)
++ user_cfg->num_acc_frames = 1;
++ hist->inc_config++;
++ hist->update = 1;
++ /*
++ * User might be asked for a bigger buffer than necessary for
++ * this configuration. In order to return the right amount of
++ * data during buffer request, let's calculate the size here
++ * instead of stick with user_cfg->buf_size.
++ */
++ cur_cfg->buf_size = isphist_get_buf_size(cur_cfg);
++
++ }
++}
++
++static long isphist_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
++{
++ struct ispstat *stat = v4l2_get_subdevdata(sd);
++
++ switch (cmd) {
++ case VIDIOC_PRIVATE_ISP_HIST_CFG:
++ return ispstat_config(stat, arg);
++ case VIDIOC_PRIVATE_ISP_STAT_REQ:
++ return ispstat_request_statistics(stat, arg);
++ case VIDIOC_PRIVATE_ISP_STAT_EN: {
++ int *en = arg;
++ return ispstat_enable(stat, !!*en);
++ }
++ }
++
++ return -ENOIOCTLCMD;
++
++}
++
++static const struct ispstat_ops isphist_ops = {
++ .validate_params = isphist_validate_params,
++ .set_params = isphist_set_params,
++ .setup_regs = isphist_setup_regs,
++ .buf_process = isphist_buf_process,
++};
++
++static const struct ispstat_pcr_bits isphist_pcr = {
++ .base = OMAP3_ISP_IOMEM_HIST,
++ .offset = ISPHIST_PCR,
++ .enable = ISPHIST_PCR_ENABLE,
++ .busy = ISPHIST_PCR_BUSY,
++};
++
++static const struct v4l2_subdev_core_ops isphist_subdev_core_ops = {
++ .ioctl = isphist_ioctl,
++ .subscribe_event = ispstat_subscribe_event,
++ .unsubscribe_event = ispstat_unsubscribe_event,
++};
++
++static const struct v4l2_subdev_video_ops isphist_subdev_video_ops = {
++ .s_stream = ispstat_s_stream,
++};
++
++static const struct v4l2_subdev_ops isphist_subdev_ops = {
++ .core = &isphist_subdev_core_ops,
++ .video = &isphist_subdev_video_ops,
++};
++
++/*
++ * isphist_init - Module Initialization.
++ */
++int isp_hist_init(struct isp_device *isp)
++{
++ struct ispstat *hist = &isp->isp_hist;
++ struct isphist_config *hist_cfg;
++ int ret = -1;
++
++ hist_cfg = kzalloc(sizeof(*hist_cfg), GFP_KERNEL);
++ if (hist_cfg == NULL)
++ return -ENOMEM;
++
++ memset(hist, 0, sizeof(*hist));
++ if (HIST_CONFIG_DMA)
++ ret = omap_request_dma(OMAP24XX_DMA_NO_DEVICE, "DMA_ISP_HIST",
++ isphist_dma_cb, hist, &hist->dma_ch);
++ if (ret) {
++ if (HIST_CONFIG_DMA)
++ dev_warn(isp->dev, "hist: DMA request channel failed. "
++ "Using PIO only.\n");
++ hist->dma_ch = -1;
++ } else {
++ dev_dbg(isp->dev, "hist: DMA channel = %d\n", hist->dma_ch);
++ isphist_dma_config(hist);
++ omap_enable_dma_irq(hist->dma_ch, OMAP_DMA_BLOCK_IRQ);
++ }
++
++ hist->ops = &isphist_ops;
++ hist->pcr = &isphist_pcr;
++ hist->priv = hist_cfg;
++ hist->event_type = V4L2_EVENT_OMAP3ISP_HIST;
++ hist->isp = isp;
++
++ ret = ispstat_init(hist, "histogram", &isphist_subdev_ops);
++ if (ret) {
++ kfree(hist_cfg);
++ if (HIST_USING_DMA(hist))
++ omap_free_dma(hist->dma_ch);
++ }
++
++ return ret;
++}
++
++/*
++ * isphist_cleanup - Module cleanup.
++ */
++void isp_hist_cleanup(struct isp_device *isp)
++{
++ if (HIST_USING_DMA(&isp->isp_hist))
++ omap_free_dma(isp->isp_hist.dma_ch);
++ kfree(isp->isp_hist.priv);
++ ispstat_free(&isp->isp_hist);
++}
++
+diff --git a/drivers/media/video/isp/isphist.h b/drivers/media/video/isp/isphist.h
+new file mode 100644
+index 0000000..c3b0a1f
+--- /dev/null
++++ b/drivers/media/video/isp/isphist.h
+@@ -0,0 +1,34 @@
++/*
++ * isphist.h
++ *
++ * Header file for HISTOGRAM module in TI's OMAP3 Camera ISP
++ *
++ * Copyright (C) 2009 Texas Instruments, Inc.
++ *
++ * Contributors:
++ * David Cohen <david.cohen@nokia.com>
++ * Sergio Aguirre <saaguirre@ti.com>
++ * Troy Laramy
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef OMAP_ISP_HIST_H
++#define OMAP_ISP_HIST_H
++
++#include <mach/isp_user.h>
++
++#define ISPHIST_IN_BIT_WIDTH_CCDC 10
++
++struct isp_device;
++
++int isp_hist_init(struct isp_device *isp);
++void isp_hist_cleanup(struct isp_device *isp);
++
++#endif /* OMAP_ISP_HIST */
+diff --git a/drivers/media/video/isp/isppreview.c b/drivers/media/video/isp/isppreview.c
+new file mode 100644
+index 0000000..4fb291b
+--- /dev/null
++++ b/drivers/media/video/isp/isppreview.c
+@@ -0,0 +1,2295 @@
++/*
++ * isppreview.c
++ *
++ * Driver Library for Preview module in TI's OMAP3 Camera ISP
++ *
++ * Copyright (C) 2009 Texas Instruments, Inc.
++ *
++ * Contributors:
++ * Senthilvadivu Guruswamy <svadivu@ti.com>
++ * Pallavi Kulkarni <p-kulkarni@ti.com>
++ * Sergio Aguirre <saaguirre@ti.com>
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#include <linux/device.h>
++#include <linux/mm.h>
++#include <linux/module.h>
++#include <linux/mutex.h>
++#include <linux/uaccess.h>
++
++#include "isp.h"
++#include "ispreg.h"
++#include "isppreview.h"
++
++/* Structure for saving/restoring preview module registers */
++static struct isp_reg ispprev_reg_list[] = {
++ {OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, 0x0000}, /* See context saving. */
++ {OMAP3_ISP_IOMEM_PREV, ISPPRV_HORZ_INFO, 0x0000},
++ {OMAP3_ISP_IOMEM_PREV, ISPPRV_VERT_INFO, 0x0000},
++ {OMAP3_ISP_IOMEM_PREV, ISPPRV_RSDR_ADDR, 0x0000},
++ {OMAP3_ISP_IOMEM_PREV, ISPPRV_RADR_OFFSET, 0x0000},
++ {OMAP3_ISP_IOMEM_PREV, ISPPRV_DSDR_ADDR, 0x0000},
++ {OMAP3_ISP_IOMEM_PREV, ISPPRV_DRKF_OFFSET, 0x0000},
++ {OMAP3_ISP_IOMEM_PREV, ISPPRV_WSDR_ADDR, 0x0000},
++ {OMAP3_ISP_IOMEM_PREV, ISPPRV_WADD_OFFSET, 0x0000},
++ {OMAP3_ISP_IOMEM_PREV, ISPPRV_AVE, 0x0000},
++ {OMAP3_ISP_IOMEM_PREV, ISPPRV_HMED, 0x0000},
++ {OMAP3_ISP_IOMEM_PREV, ISPPRV_NF, 0x0000},
++ {OMAP3_ISP_IOMEM_PREV, ISPPRV_WB_DGAIN, 0x0000},
++ {OMAP3_ISP_IOMEM_PREV, ISPPRV_WBGAIN, 0x0000},
++ {OMAP3_ISP_IOMEM_PREV, ISPPRV_WBSEL, 0x0000},
++ {OMAP3_ISP_IOMEM_PREV, ISPPRV_CFA, 0x0000},
++ {OMAP3_ISP_IOMEM_PREV, ISPPRV_BLKADJOFF, 0x0000},
++ {OMAP3_ISP_IOMEM_PREV, ISPPRV_RGB_MAT1, 0x0000},
++ {OMAP3_ISP_IOMEM_PREV, ISPPRV_RGB_MAT2, 0x0000},
++ {OMAP3_ISP_IOMEM_PREV, ISPPRV_RGB_MAT3, 0x0000},
++ {OMAP3_ISP_IOMEM_PREV, ISPPRV_RGB_MAT4, 0x0000},
++ {OMAP3_ISP_IOMEM_PREV, ISPPRV_RGB_MAT5, 0x0000},
++ {OMAP3_ISP_IOMEM_PREV, ISPPRV_RGB_OFF1, 0x0000},
++ {OMAP3_ISP_IOMEM_PREV, ISPPRV_RGB_OFF2, 0x0000},
++ {OMAP3_ISP_IOMEM_PREV, ISPPRV_CSC0, 0x0000},
++ {OMAP3_ISP_IOMEM_PREV, ISPPRV_CSC1, 0x0000},
++ {OMAP3_ISP_IOMEM_PREV, ISPPRV_CSC2, 0x0000},
++ {OMAP3_ISP_IOMEM_PREV, ISPPRV_CSC_OFFSET, 0x0000},
++ {OMAP3_ISP_IOMEM_PREV, ISPPRV_CNT_BRT, 0x0000},
++ {OMAP3_ISP_IOMEM_PREV, ISPPRV_CSUP, 0x0000},
++ {OMAP3_ISP_IOMEM_PREV, ISPPRV_SETUP_YC, 0x0000},
++ {OMAP3_ISP_IOMEM_PREV, ISPPRV_CDC_THR0, 0x0000},
++ {OMAP3_ISP_IOMEM_PREV, ISPPRV_CDC_THR1, 0x0000},
++ {OMAP3_ISP_IOMEM_PREV, ISPPRV_CDC_THR2, 0x0000},
++ {OMAP3_ISP_IOMEM_PREV, ISPPRV_CDC_THR3, 0x0000},
++ {0, ISP_TOK_TERM, 0x0000}
++};
++
++
++/* Default values in Office Flourescent Light for RGBtoRGB Blending */
++static struct ispprev_rgbtorgb flr_rgb2rgb = {
++ { /* RGB-RGB Matrix */
++ {0x01E2, 0x0F30, 0x0FEE},
++ {0x0F9B, 0x01AC, 0x0FB9},
++ {0x0FE0, 0x0EC0, 0x0260}
++ }, /* RGB Offset */
++ {0x0000, 0x0000, 0x0000}
++};
++
++/* Default values in Office Flourescent Light for RGB to YUV Conversion*/
++static struct ispprev_csc flr_prev_csc = {
++ { /* CSC Coef Matrix */
++ {66, 129, 25},
++ {-38, -75, 112},
++ {112, -94 , -18}
++ }, /* CSC Offset */
++ {0x0, 0x0, 0x0}
++};
++
++/* Default values in Office Flourescent Light for CFA Gradient*/
++#define FLR_CFA_GRADTHRS_HORZ 0x28
++#define FLR_CFA_GRADTHRS_VERT 0x28
++
++/* Default values in Office Flourescent Light for Chroma Suppression*/
++#define FLR_CSUP_GAIN 0x0D
++#define FLR_CSUP_THRES 0xEB
++
++/* Default values in Office Flourescent Light for Noise Filter*/
++#define FLR_NF_STRGTH 0x03
++
++/* Default values in Office Flourescent Light for White Balance*/
++#define FLR_WBAL_DGAIN 0x100
++#define FLR_WBAL_COEF0 0x20
++#define FLR_WBAL_COEF1 0x29
++#define FLR_WBAL_COEF2 0x2d
++#define FLR_WBAL_COEF3 0x20
++
++#define FLR_WBAL_COEF0_ES1 0x20
++#define FLR_WBAL_COEF1_ES1 0x23
++#define FLR_WBAL_COEF2_ES1 0x39
++#define FLR_WBAL_COEF3_ES1 0x20
++
++/* Default values in Office Flourescent Light for Black Adjustment*/
++#define FLR_BLKADJ_BLUE 0x0
++#define FLR_BLKADJ_GREEN 0x0
++#define FLR_BLKADJ_RED 0x0
++
++#define DEF_DETECT_CORRECT_VAL 0xe
++
++#define PREV_MIN_WIDTH 64
++#define PREV_MIN_HEIGHT 8
++#define PREV_MAX_HEIGHT 16384
++
++/*
++ * Coeficient Tables for the submodules in Preview.
++ * Array is initialised with the values from.the tables text file.
++ */
++
++/*
++ * CFA Filter Coefficient Table
++ *
++ */
++static u32 cfa_coef_table[] = {
++#include "cfa_coef_table.h"
++};
++
++/*
++ * Gamma Correction Table - Red
++ */
++static u32 redgamma_table[] = {
++#include "redgamma_table.h"
++};
++
++/*
++ * Gamma Correction Table - Green
++ */
++static u32 greengamma_table[] = {
++#include "greengamma_table.h"
++};
++
++/*
++ * Gamma Correction Table - Blue
++ */
++static u32 bluegamma_table[] = {
++#include "bluegamma_table.h"
++};
++
++/*
++ * Noise Filter Threshold table
++ */
++static u32 noise_filter_table[] = {
++#include "noise_filter_table.h"
++};
++
++/*
++ * Luminance Enhancement Table
++ */
++static u32 luma_enhance_table[] = {
++#include "luma_enhance_table.h"
++};
++
++/*
++ * isppreview_enable_invalaw - Enable/Disable Inverse A-Law module in Preview.
++ * @enable: 1 - Reverse the A-Law done in CCDC.
++ */
++static void
++isppreview_enable_invalaw(struct isp_prev_device *prev, u8 enable)
++{
++ struct isp_device *isp = to_isp_device(prev);
++ u32 pcr_val = 0;
++
++ pcr_val = isp_reg_readl(isp,
++ OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR);
++
++ if (enable) {
++ isp_reg_writel(isp,
++ pcr_val | ISPPRV_PCR_WIDTH | ISPPRV_PCR_INVALAW,
++ OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR);
++ } else {
++ isp_reg_writel(isp, pcr_val &
++ ~(ISPPRV_PCR_WIDTH | ISPPRV_PCR_INVALAW),
++ OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR);
++ }
++}
++
++/*
++ * isppreview_enable_drkframe_capture - Enable/Disable of the darkframe capture.
++ * @prev -
++ * @enable: 1 - Enable, 0 - Disable
++ *
++ * NOTE: PRV_WSDR_ADDR and PRV_WADD_OFFSET must be set also
++ * The proccess is applied for each captured frame.
++ */
++static void
++isppreview_enable_drkframe_capture(struct isp_prev_device *prev, u8 enable)
++{
++ struct isp_device *isp = to_isp_device(prev);
++
++ if (enable)
++ isp_reg_or(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
++ ISPPRV_PCR_DRKFCAP);
++ else
++ isp_reg_and(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
++ ~ISPPRV_PCR_DRKFCAP);
++}
++
++/*
++ * isppreview_enable_drkframe - Enable/Disable of the darkframe subtract.
++ * @enable: 1 - Acquires memory bandwidth since the pixels in each frame is
++ * subtracted with the pixels in the current frame.
++ *
++ * The proccess is applied for each captured frame.
++ */
++static void
++isppreview_enable_drkframe(struct isp_prev_device *prev, u8 enable)
++{
++ struct isp_device *isp = to_isp_device(prev);
++
++ if (enable)
++ isp_reg_or(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
++ ISPPRV_PCR_DRKFEN);
++ else
++ isp_reg_and(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
++ ~ISPPRV_PCR_DRKFEN);
++}
++
++/*
++ * isppreview_config_drkf_shadcomp - Configures shift value in shading comp.
++ * @scomp_shtval: 3bit value of shift used in shading compensation.
++ */
++static void
++isppreview_config_drkf_shadcomp(struct isp_prev_device *prev,
++ const void *scomp_shtval)
++{
++ struct isp_device *isp = to_isp_device(prev);
++ const u32 *shtval = scomp_shtval;
++ u32 pcr_val = isp_reg_readl(isp,
++ OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR);
++
++ pcr_val &= ISPPRV_PCR_SCOMP_SFT_MASK;
++ isp_reg_writel(isp,
++ pcr_val | (*shtval << ISPPRV_PCR_SCOMP_SFT_SHIFT),
++ OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR);
++}
++
++/*
++ * isppreview_enable_hmed - Enables/Disables of the Horizontal Median Filter.
++ * @enable: 1 - Enables Horizontal Median Filter.
++ */
++static void
++isppreview_enable_hmed(struct isp_prev_device *prev, u8 enable)
++{
++ struct isp_device *isp = to_isp_device(prev);
++
++ if (enable)
++ isp_reg_or(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
++ ISPPRV_PCR_HMEDEN);
++ else
++ isp_reg_and(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
++ ~ISPPRV_PCR_HMEDEN);
++}
++
++/*
++ * isppreview_config_hmed - Configures the Horizontal Median Filter.
++ * @prev_hmed: Structure containing the odd and even distance between the
++ * pixels in the image along with the filter threshold.
++ */
++static void
++isppreview_config_hmed(struct isp_prev_device *prev, const void *prev_hmed)
++{
++ struct isp_device *isp = to_isp_device(prev);
++ const struct ispprev_hmed *hmed = prev_hmed;
++
++ u32 odddist = 0;
++ u32 evendist = 0;
++
++ if (hmed->odddist == 1)
++ odddist = ~ISPPRV_HMED_ODDDIST;
++ else
++ odddist = ISPPRV_HMED_ODDDIST;
++
++ if (hmed->evendist == 1)
++ evendist = ~ISPPRV_HMED_EVENDIST;
++ else
++ evendist = ISPPRV_HMED_EVENDIST;
++
++ isp_reg_writel(isp, odddist | evendist | (hmed->thres <<
++ ISPPRV_HMED_THRESHOLD_SHIFT),
++ OMAP3_ISP_IOMEM_PREV, ISPPRV_HMED);
++
++}
++
++/*
++ * isppreview_config_noisefilter - Configures the Noise Filter.
++ * @prev_nf: Structure containing the noisefilter table, strength to be used
++ * for the noise filter and the defect correction enable flag.
++ */
++static void
++isppreview_config_noisefilter(struct isp_prev_device *prev, const void *prev_nf)
++{
++ struct isp_device *isp = to_isp_device(prev);
++ const struct ispprev_nf *nf = prev_nf;
++ int i = 0;
++
++ isp_reg_writel(isp, nf->spread, OMAP3_ISP_IOMEM_PREV,
++ ISPPRV_NF);
++ isp_reg_writel(isp, ISPPRV_NF_TABLE_ADDR,
++ OMAP3_ISP_IOMEM_PREV, ISPPRV_SET_TBL_ADDR);
++ for (i = 0; i < ISPPRV_NF_TBL_SIZE; i++) {
++ isp_reg_writel(isp, nf->table[i],
++ OMAP3_ISP_IOMEM_PREV, ISPPRV_SET_TBL_DATA);
++ }
++}
++
++/*
++ * isppreview_config_dcor - Configures the defect correction
++ * @prev_nf: Structure containing the defect correction structure
++ */
++static void
++isppreview_config_dcor(struct isp_prev_device *prev, const void *prev_dcor)
++{
++ struct isp_device *isp = to_isp_device(prev);
++ const struct ispprev_dcor *dcor = prev_dcor;
++
++ if (dcor->couplet_mode_en) {
++ isp_reg_writel(isp, dcor->detect_correct[0],
++ OMAP3_ISP_IOMEM_PREV, ISPPRV_CDC_THR0);
++ isp_reg_writel(isp, dcor->detect_correct[1],
++ OMAP3_ISP_IOMEM_PREV, ISPPRV_CDC_THR1);
++ isp_reg_writel(isp, dcor->detect_correct[2],
++ OMAP3_ISP_IOMEM_PREV, ISPPRV_CDC_THR2);
++ isp_reg_writel(isp, dcor->detect_correct[3],
++ OMAP3_ISP_IOMEM_PREV, ISPPRV_CDC_THR3);
++ isp_reg_or(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
++ ISPPRV_PCR_DCCOUP);
++ } else {
++ isp_reg_and(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
++ ~ISPPRV_PCR_DCCOUP);
++ }
++}
++
++/*
++ * isppreview_config_cfa - Configures the CFA Interpolation parameters.
++ * @prev_cfa: Structure containing the CFA interpolation table, CFA format
++ * in the image, vertical and horizontal gradient threshold.
++ */
++static void
++isppreview_config_cfa(struct isp_prev_device *prev, const void *prev_cfa)
++{
++ struct isp_device *isp = to_isp_device(prev);
++ const struct ispprev_cfa *cfa = prev_cfa;
++ int i = 0;
++
++ isp_reg_and_or(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
++ ~ISPPRV_PCR_CFAFMT_MASK,
++ (cfa->format << ISPPRV_PCR_CFAFMT_SHIFT));
++
++ isp_reg_writel(isp,
++ (cfa->gradthrs_vert << ISPPRV_CFA_GRADTH_VER_SHIFT) |
++ (cfa->gradthrs_horz << ISPPRV_CFA_GRADTH_HOR_SHIFT),
++ OMAP3_ISP_IOMEM_PREV, ISPPRV_CFA);
++
++ isp_reg_writel(isp, ISPPRV_CFA_TABLE_ADDR,
++ OMAP3_ISP_IOMEM_PREV, ISPPRV_SET_TBL_ADDR);
++
++ for (i = 0; i < ISPPRV_CFA_TBL_SIZE; i++) {
++ isp_reg_writel(isp, cfa->table[i],
++ OMAP3_ISP_IOMEM_PREV, ISPPRV_SET_TBL_DATA);
++ }
++}
++
++/*
++ * isppreview_config_gammacorrn - Configures the Gamma Correction table values
++ * @gtable: Structure containing the table for red, blue, green gamma table.
++ */
++static void
++isppreview_config_gammacorrn(struct isp_prev_device *prev, const void *gtable)
++{
++ struct isp_device *isp = to_isp_device(prev);
++ const struct ispprev_gtables *gt = gtable;
++ unsigned int i;
++
++ isp_reg_writel(isp, ISPPRV_REDGAMMA_TABLE_ADDR,
++ OMAP3_ISP_IOMEM_PREV, ISPPRV_SET_TBL_ADDR);
++ for (i = 0; i < ISPPRV_GAMMA_TBL_SIZE; i++)
++ isp_reg_writel(isp, gt->red[i], OMAP3_ISP_IOMEM_PREV,
++ ISPPRV_SET_TBL_DATA);
++
++ isp_reg_writel(isp, ISPPRV_GREENGAMMA_TABLE_ADDR,
++ OMAP3_ISP_IOMEM_PREV, ISPPRV_SET_TBL_ADDR);
++ for (i = 0; i < ISPPRV_GAMMA_TBL_SIZE; i++)
++ isp_reg_writel(isp, gt->green[i], OMAP3_ISP_IOMEM_PREV,
++ ISPPRV_SET_TBL_DATA);
++
++ isp_reg_writel(isp, ISPPRV_BLUEGAMMA_TABLE_ADDR,
++ OMAP3_ISP_IOMEM_PREV, ISPPRV_SET_TBL_ADDR);
++ for (i = 0; i < ISPPRV_GAMMA_TBL_SIZE; i++)
++ isp_reg_writel(isp, gt->blue[i], OMAP3_ISP_IOMEM_PREV,
++ ISPPRV_SET_TBL_DATA);
++}
++
++/*
++ * isppreview_config_luma_enhancement - Sets the Luminance Enhancement table.
++ * @ytable: Structure containing the table for Luminance Enhancement table.
++ */
++static void
++isppreview_config_luma_enhancement(struct isp_prev_device *prev,
++ const void *ytable)
++{
++ struct isp_device *isp = to_isp_device(prev);
++ const struct ispprev_luma *yt = ytable;
++ int i = 0;
++
++ isp_reg_writel(isp, ISPPRV_YENH_TABLE_ADDR,
++ OMAP3_ISP_IOMEM_PREV, ISPPRV_SET_TBL_ADDR);
++ for (i = 0; i < ISPPRV_YENH_TBL_SIZE; i++) {
++ isp_reg_writel(isp, yt->table[i],
++ OMAP3_ISP_IOMEM_PREV, ISPPRV_SET_TBL_DATA);
++ }
++}
++
++/*
++ * isppreview_config_chroma_suppression - Configures the Chroma Suppression.
++ * @csup: Structure containing the threshold value for suppression
++ * and the hypass filter enable flag.
++ */
++static void
++isppreview_config_chroma_suppression(struct isp_prev_device *prev,
++ const void *csup)
++{
++ struct isp_device *isp = to_isp_device(prev);
++ const struct ispprev_csup *cs = csup;
++
++ isp_reg_writel(isp,
++ cs->gain | (cs->thres << ISPPRV_CSUP_THRES_SHIFT) |
++ (cs->hypf_en << ISPPRV_CSUP_HPYF_SHIFT),
++ OMAP3_ISP_IOMEM_PREV, ISPPRV_CSUP);
++}
++
++/*
++ * isppreview_enable_noisefilter - Enables/Disables the Noise Filter.
++ * @enable: 1 - Enables the Noise Filter.
++ */
++static void
++isppreview_enable_noisefilter(struct isp_prev_device *prev, u8 enable)
++{
++ struct isp_device *isp = to_isp_device(prev);
++
++ if (enable)
++ isp_reg_or(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
++ ISPPRV_PCR_NFEN);
++ else
++ isp_reg_and(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
++ ~ISPPRV_PCR_NFEN);
++}
++
++/*
++ * isppreview_enable_dcor - Enables/Disables the defect correction.
++ * @enable: 1 - Enables the defect correction.
++ */
++static void
++isppreview_enable_dcor(struct isp_prev_device *prev, u8 enable)
++{
++ struct isp_device *isp = to_isp_device(prev);
++
++ if (enable)
++ isp_reg_or(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
++ ISPPRV_PCR_DCOREN);
++ else {
++ isp_reg_and(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
++ ~ISPPRV_PCR_DCOREN);
++ }
++}
++
++/*
++ * isppreview_enable_cfa - Enable/Disable the CFA Interpolation.
++ * @enable: 1 - Enables the CFA.
++ */
++static void
++isppreview_enable_cfa(struct isp_prev_device *prev, u8 enable)
++{
++ struct isp_device *isp = to_isp_device(prev);
++
++ if (enable)
++ isp_reg_or(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
++ ISPPRV_PCR_CFAEN);
++ else {
++ isp_reg_and(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
++ ~ISPPRV_PCR_CFAEN);
++ }
++}
++
++/*
++ * isppreview_enable_gammabypass - Enables/Disables the GammaByPass
++ * @enable: 1 - Bypasses Gamma - 10bit input is cropped to 8MSB.
++ * 0 - Goes through Gamma Correction. input and output is 10bit.
++ */
++static void
++isppreview_enable_gammabypass(struct isp_prev_device *prev, u8 enable)
++{
++ struct isp_device *isp = to_isp_device(prev);
++
++ if (enable) {
++ isp_reg_or(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
++ ISPPRV_PCR_GAMMA_BYPASS);
++ } else {
++ isp_reg_and(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
++ ~ISPPRV_PCR_GAMMA_BYPASS);
++ }
++}
++
++/*
++ * isppreview_enable_luma_enhancement - Enables/Disables Luminance Enhancement
++ * @enable: 1 - Enable the Luminance Enhancement.
++ */
++static void
++isppreview_enable_luma_enhancement(struct isp_prev_device *prev, u8 enable)
++{
++ struct isp_device *isp = to_isp_device(prev);
++
++ if (enable) {
++ isp_reg_or(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
++ ISPPRV_PCR_YNENHEN);
++ } else {
++ isp_reg_and(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
++ ~ISPPRV_PCR_YNENHEN);
++ }
++}
++
++/*
++ * isppreview_enable_chroma_suppression - Enables/Disables Chrominance Suppr.
++ * @enable: 1 - Enable the Chrominance Suppression.
++ */
++static void
++isppreview_enable_chroma_suppression(struct isp_prev_device *prev, u8 enable)
++{
++ struct isp_device *isp = to_isp_device(prev);
++
++ if (enable)
++ isp_reg_or(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
++ ISPPRV_PCR_SUPEN);
++ else {
++ isp_reg_and(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
++ ~ISPPRV_PCR_SUPEN);
++ }
++}
++
++/*
++ * isppreview_config_whitebalance - Configures the White Balance parameters.
++ * @prev_wbal: Structure containing the digital gain and white balance
++ * coefficient.
++ *
++ * Coefficient matrix always with default values.
++ */
++void
++isppreview_config_whitebalance(struct isp_prev_device *prev,
++ const void *prev_wbal)
++{
++ struct isp_device *isp = to_isp_device(prev);
++ const struct ispprev_wbal *wbal = prev_wbal;
++ u32 val;
++
++ isp_reg_writel(isp, wbal->dgain, OMAP3_ISP_IOMEM_PREV,
++ ISPPRV_WB_DGAIN);
++
++ val = wbal->coef0 << ISPPRV_WBGAIN_COEF0_SHIFT;
++ val |= wbal->coef1 << ISPPRV_WBGAIN_COEF1_SHIFT;
++ val |= wbal->coef2 << ISPPRV_WBGAIN_COEF2_SHIFT;
++ val |= wbal->coef3 << ISPPRV_WBGAIN_COEF3_SHIFT;
++ isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_PREV,
++ ISPPRV_WBGAIN);
++
++ isp_reg_writel(isp,
++ ISPPRV_WBSEL_COEF0 << ISPPRV_WBSEL_N0_0_SHIFT |
++ ISPPRV_WBSEL_COEF1 << ISPPRV_WBSEL_N0_1_SHIFT |
++ ISPPRV_WBSEL_COEF0 << ISPPRV_WBSEL_N0_2_SHIFT |
++ ISPPRV_WBSEL_COEF1 << ISPPRV_WBSEL_N0_3_SHIFT |
++ ISPPRV_WBSEL_COEF2 << ISPPRV_WBSEL_N1_0_SHIFT |
++ ISPPRV_WBSEL_COEF3 << ISPPRV_WBSEL_N1_1_SHIFT |
++ ISPPRV_WBSEL_COEF2 << ISPPRV_WBSEL_N1_2_SHIFT |
++ ISPPRV_WBSEL_COEF3 << ISPPRV_WBSEL_N1_3_SHIFT |
++ ISPPRV_WBSEL_COEF0 << ISPPRV_WBSEL_N2_0_SHIFT |
++ ISPPRV_WBSEL_COEF1 << ISPPRV_WBSEL_N2_1_SHIFT |
++ ISPPRV_WBSEL_COEF0 << ISPPRV_WBSEL_N2_2_SHIFT |
++ ISPPRV_WBSEL_COEF1 << ISPPRV_WBSEL_N2_3_SHIFT |
++ ISPPRV_WBSEL_COEF2 << ISPPRV_WBSEL_N3_0_SHIFT |
++ ISPPRV_WBSEL_COEF3 << ISPPRV_WBSEL_N3_1_SHIFT |
++ ISPPRV_WBSEL_COEF2 << ISPPRV_WBSEL_N3_2_SHIFT |
++ ISPPRV_WBSEL_COEF3 << ISPPRV_WBSEL_N3_3_SHIFT,
++ OMAP3_ISP_IOMEM_PREV, ISPPRV_WBSEL);
++}
++
++/*
++ * isppreview_config_blkadj - Configures the Black Adjustment parameters.
++ * @prev_blkadj: Structure containing the black adjustment towards red, green,
++ * blue.
++ */
++static void
++isppreview_config_blkadj(struct isp_prev_device *prev, const void *prev_blkadj)
++{
++ struct isp_device *isp = to_isp_device(prev);
++ const struct ispprev_blkadj *blkadj = prev_blkadj;
++
++ isp_reg_writel(isp, blkadj->blue |
++ (blkadj->green << ISPPRV_BLKADJOFF_G_SHIFT) |
++ (blkadj->red << ISPPRV_BLKADJOFF_R_SHIFT),
++ OMAP3_ISP_IOMEM_PREV, ISPPRV_BLKADJOFF);
++}
++
++/*
++ * isppreview_config_rgb_blending - Configures the RGB-RGB Blending matrix.
++ * @rgb2rgb: Structure containing the rgb to rgb blending matrix and the rgb
++ * offset.
++ */
++static void
++isppreview_config_rgb_blending(struct isp_prev_device *prev,
++ const void *rgb2rgb)
++{
++ struct isp_device *isp = to_isp_device(prev);
++ const struct ispprev_rgbtorgb *rgbrgb = rgb2rgb;
++ u32 val = 0;
++
++ val = (rgbrgb->matrix[0][0] & 0xfff) << ISPPRV_RGB_MAT1_MTX_RR_SHIFT;
++ val |= (rgbrgb->matrix[0][1] & 0xfff) << ISPPRV_RGB_MAT1_MTX_GR_SHIFT;
++ isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_PREV,
++ ISPPRV_RGB_MAT1);
++
++ val = (rgbrgb->matrix[0][2] & 0xfff) << ISPPRV_RGB_MAT2_MTX_BR_SHIFT;
++ val |= (rgbrgb->matrix[1][0] & 0xfff) << ISPPRV_RGB_MAT2_MTX_RG_SHIFT;
++ isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_PREV,
++ ISPPRV_RGB_MAT2);
++
++ val = (rgbrgb->matrix[1][1] & 0xfff) << ISPPRV_RGB_MAT3_MTX_GG_SHIFT;
++ val |= (rgbrgb->matrix[1][2] & 0xfff) << ISPPRV_RGB_MAT3_MTX_BG_SHIFT;
++ isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_PREV,
++ ISPPRV_RGB_MAT3);
++
++ val = (rgbrgb->matrix[2][0] & 0xfff) << ISPPRV_RGB_MAT4_MTX_RB_SHIFT;
++ val |= (rgbrgb->matrix[2][1] & 0xfff) << ISPPRV_RGB_MAT4_MTX_GB_SHIFT;
++ isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_PREV,
++ ISPPRV_RGB_MAT4);
++
++ val = (rgbrgb->matrix[2][2] & 0xfff) << ISPPRV_RGB_MAT5_MTX_BB_SHIFT;
++ isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_PREV,
++ ISPPRV_RGB_MAT5);
++
++ val = (rgbrgb->offset[0] & 0x3ff) << ISPPRV_RGB_OFF1_MTX_OFFR_SHIFT;
++ val |= (rgbrgb->offset[1] & 0x3ff) << ISPPRV_RGB_OFF1_MTX_OFFG_SHIFT;
++ isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_PREV,
++ ISPPRV_RGB_OFF1);
++
++ val = (rgbrgb->offset[2] & 0x3ff) << ISPPRV_RGB_OFF2_MTX_OFFB_SHIFT;
++ isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_PREV,
++ ISPPRV_RGB_OFF2);
++}
++
++/*
++ * Configures the RGB-YCbYCr conversion matrix
++ * @prev_csc: Structure containing the RGB to YCbYCr matrix and the
++ * YCbCr offset.
++ */
++static void
++isppreview_config_rgb_to_ycbcr(struct isp_prev_device *prev,
++ const void *prev_csc)
++{
++ struct isp_device *isp = to_isp_device(prev);
++ const struct ispprev_csc *csc = prev_csc;
++ u32 val = 0;
++
++ val = (csc->matrix[0][0] & 0x3ff) << ISPPRV_CSC0_RY_SHIFT;
++ val |= (csc->matrix[0][1] & 0x3ff) << ISPPRV_CSC0_GY_SHIFT;
++ val |= (csc->matrix[0][2] & 0x3ff) << ISPPRV_CSC0_BY_SHIFT;
++ isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_PREV, ISPPRV_CSC0);
++
++ val = (csc->matrix[1][0] & 0x3ff) << ISPPRV_CSC1_RCB_SHIFT;
++ val |= (csc->matrix[1][1] & 0x3ff) << ISPPRV_CSC1_GCB_SHIFT;
++ val |= (csc->matrix[1][2] & 0x3ff) << ISPPRV_CSC1_BCB_SHIFT;
++ isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_PREV, ISPPRV_CSC1);
++
++ val = (csc->matrix[2][0] & 0x3ff) << ISPPRV_CSC2_RCR_SHIFT;
++ val |= (csc->matrix[2][1] & 0x3ff) << ISPPRV_CSC2_GCR_SHIFT;
++ val |= (csc->matrix[2][2] & 0x3ff) << ISPPRV_CSC2_BCR_SHIFT;
++ isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_PREV, ISPPRV_CSC2);
++
++ val = (csc->offset[0] & 0xff) << ISPPRV_CSC_OFFSET_Y_SHIFT;
++ val |= (csc->offset[1] & 0xff) << ISPPRV_CSC_OFFSET_CB_SHIFT;
++ val |= (csc->offset[2] & 0xff) << ISPPRV_CSC_OFFSET_CR_SHIFT;
++ isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_PREV,
++ ISPPRV_CSC_OFFSET);
++}
++
++/*
++ * isppreview_query_contrast - Query the contrast.
++ */
++static u8 isppreview_query_contrast(struct isp_prev_device *prev)
++{
++ return prev->params.contrast / ISPPRV_CONTRAST_UNITS;
++}
++
++/*
++ * isppreview_update_contrast - Updates the contrast.
++ * @contrast: Pointer to hold the current programmed contrast value.
++ *
++ * Value should be programmed before enabling the module.
++ */
++static int
++isppreview_update_contrast(struct isp_prev_device *prev, u8 contrast)
++{
++ struct prev_params *params = &prev->params;
++
++ contrast = clamp_t(u8, contrast, 0, ISPPRV_CONTRAST_HIGH);
++
++ if (params->contrast != (contrast * ISPPRV_CONTRAST_UNITS)) {
++ params->contrast = contrast * ISPPRV_CONTRAST_UNITS;
++ prev->update |= PREV_CONTRAST;
++ }
++
++ return 0;
++}
++
++/*
++ * isppreview_config_contrast - Configures the Contrast.
++ * @params: Contrast value (u8 pointer, U8Q0 format).
++ *
++ * Value should be programmed before enabling the module.
++ */
++static void
++isppreview_config_contrast(struct isp_prev_device *prev, const void *params)
++{
++ struct isp_device *isp = to_isp_device(prev);
++ u32 value;
++
++ value = isp_reg_readl(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_CNT_BRT);
++ value &= ~(0xff << ISPPRV_CNT_BRT_CNT_SHIFT);
++ value |= *(u8 *)params << ISPPRV_CNT_BRT_CNT_SHIFT,
++ isp_reg_writel(isp, value, OMAP3_ISP_IOMEM_PREV, ISPPRV_CNT_BRT);
++}
++
++/*
++ * isppreview_update_brightness - Updates the brightness in preview module.
++ * @brightness: Pointer to hold the current programmed brightness value.
++ *
++ */
++static int
++isppreview_update_brightness(struct isp_prev_device *prev, u8 brightness)
++{
++ struct prev_params *params = &prev->params;
++
++ brightness = clamp_t(u8, brightness, 0, ISPPRV_BRIGHT_HIGH);
++
++ if (params->brightness != (brightness * ISPPRV_BRIGHT_UNITS)) {
++ params->brightness = brightness * ISPPRV_BRIGHT_UNITS;
++ prev->update |= PREV_BRIGHTNESS;
++ }
++
++ return 0;
++}
++
++/*
++ * isppreview_config_brightness - Configures the brightness.
++ * @params: Brightness value (u8 pointer, U8Q0 format).
++ */
++static void
++isppreview_config_brightness(struct isp_prev_device *prev, const void *params)
++{
++ struct isp_device *isp = to_isp_device(prev);
++ u32 value;
++
++ value = isp_reg_readl(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_CNT_BRT);
++ value &= ~(0xff << ISPPRV_CNT_BRT_BRT_SHIFT);
++ value |= *(u8 *)params << ISPPRV_CNT_BRT_BRT_SHIFT,
++ isp_reg_writel(isp, value, OMAP3_ISP_IOMEM_PREV, ISPPRV_CNT_BRT);
++}
++
++/*
++ * isppreview_query_brightness - Query the brightness.
++ */
++static u8 isppreview_query_brightness(struct isp_prev_device *prev)
++{
++ return prev->params.brightness / ISPPRV_BRIGHT_UNITS;
++}
++
++/*
++ * isppreview_config_yc_range - Configures the max and min Y and C values.
++ * @yclimit: Structure containing the range of Y and C values.
++ */
++static void
++isppreview_config_yc_range(struct isp_prev_device *prev, const void *yclimit)
++{
++ struct isp_device *isp = to_isp_device(prev);
++ const struct ispprev_yclimit *yc = yclimit;
++
++ isp_reg_writel(isp,
++ yc->maxC << ISPPRV_SETUP_YC_MAXC_SHIFT |
++ yc->maxY << ISPPRV_SETUP_YC_MAXY_SHIFT |
++ yc->minC << ISPPRV_SETUP_YC_MINC_SHIFT |
++ yc->minY << ISPPRV_SETUP_YC_MINY_SHIFT,
++ OMAP3_ISP_IOMEM_PREV, ISPPRV_SETUP_YC);
++}
++
++/* preview parameters update structure */
++struct preview_update {
++ int cfg_bit;
++ int feature_bit;
++ void (*config)(struct isp_prev_device *, const void *);
++ void (*enable)(struct isp_prev_device *, u8);
++};
++
++static struct preview_update update_attrs[] = {
++ {ISP_PREV_LUMAENH, PREV_LUMA_ENHANCE,
++ isppreview_config_luma_enhancement,
++ isppreview_enable_luma_enhancement},
++ {ISP_PREV_INVALAW, PREV_INVERSE_ALAW,
++ NULL,
++ isppreview_enable_invalaw},
++ {ISP_PREV_HRZ_MED, PREV_HORZ_MEDIAN_FILTER,
++ isppreview_config_hmed,
++ isppreview_enable_hmed},
++ {ISP_PREV_CFA, PREV_CFA,
++ isppreview_config_cfa,
++ isppreview_enable_cfa},
++ {ISP_PREV_CHROMA_SUPP, PREV_CHROMA_SUPPRESS,
++ isppreview_config_chroma_suppression,
++ isppreview_enable_chroma_suppression},
++ {ISP_PREV_WB, PREV_WB,
++ isppreview_config_whitebalance,
++ NULL},
++ {ISP_PREV_BLKADJ, PREV_BLKADJ,
++ isppreview_config_blkadj,
++ NULL},
++ {ISP_PREV_RGB2RGB, PREV_RGB2RGB,
++ isppreview_config_rgb_blending,
++ NULL},
++ {ISP_PREV_COLOR_CONV, PREV_COLOR_CONV,
++ isppreview_config_rgb_to_ycbcr,
++ NULL},
++ {ISP_PREV_YC_LIMIT, PREV_YCLIMITS,
++ isppreview_config_yc_range,
++ NULL},
++ {ISP_PREV_DEFECT_COR, PREV_DEFECT_COR,
++ isppreview_config_dcor,
++ isppreview_enable_dcor},
++ {ISP_PREV_GAMMABYPASS, PREV_GAMMA_BYPASS,
++ NULL,
++ isppreview_enable_gammabypass},
++ {ISP_PREV_DRK_FRM_CAPTURE, PREV_DARK_FRAME_CAPTURE,
++ NULL,
++ isppreview_enable_drkframe_capture},
++ {ISP_PREV_DRK_FRM_SUBTRACT, PREV_DARK_FRAME_SUBTRACT,
++ NULL,
++ isppreview_enable_drkframe},
++ {ISP_PREV_LENS_SHADING, PREV_LENS_SHADING,
++ isppreview_config_drkf_shadcomp,
++ isppreview_enable_drkframe},
++ {ISP_PREV_NF, PREV_NOISE_FILTER,
++ isppreview_config_noisefilter,
++ isppreview_enable_noisefilter},
++ {ISP_PREV_GAMMA, PREV_GAMMA,
++ isppreview_config_gammacorrn,
++ NULL},
++ {-1, PREV_CONTRAST,
++ isppreview_config_contrast,
++ NULL},
++ {-1, PREV_BRIGHTNESS,
++ isppreview_config_brightness,
++ NULL},
++};
++
++/*
++ * __isppreview_get_ptrs - helper function which return pointers to members
++ * of params and config structures.
++ * @params - pointer to preview_params structure.
++ * @param - return pointer to appropriate structure field.
++ * @configs - pointer to update config structure.
++ * @config - return pointer to appropriate structure field.
++ * @bit - for which feature to return pointers.
++ * Return size of coresponding prev_params member
++ */
++static u32
++__isppreview_get_ptrs(struct prev_params *params, void **param,
++ struct ispprv_update_config *configs,
++ void __user **config,
++ u32 bit)
++{
++#define CHKARG(cfgs, cfg, field) \
++ if (cfgs && cfg) { \
++ *(cfg) = (cfgs)->field; \
++ }
++
++ switch (bit) {
++ case PREV_HORZ_MEDIAN_FILTER:
++ *param = &params->hmed;
++ CHKARG(configs, config, hmed)
++ return sizeof(params->hmed);
++ case PREV_NOISE_FILTER:
++ *param = &params->nf;
++ CHKARG(configs, config, nf)
++ return sizeof(params->nf);
++ break;
++ case PREV_CFA:
++ *param = &params->cfa;
++ CHKARG(configs, config, cfa)
++ return sizeof(params->cfa);
++ case PREV_LUMA_ENHANCE:
++ *param = &params->luma;
++ CHKARG(configs, config, luma)
++ return sizeof(params->luma);
++ case PREV_CHROMA_SUPPRESS:
++ *param = &params->csup;
++ CHKARG(configs, config, csup)
++ return sizeof(params->csup);
++ case PREV_DEFECT_COR:
++ *param = &params->dcor;
++ CHKARG(configs, config, dcor)
++ return sizeof(params->dcor);
++ case PREV_BLKADJ:
++ *param = &params->blk_adj;
++ CHKARG(configs, config, blkadj)
++ return sizeof(params->blk_adj);
++ case PREV_YCLIMITS:
++ *param = &params->yclimit;
++ CHKARG(configs, config, yclimit)
++ return sizeof(params->yclimit);
++ case PREV_RGB2RGB:
++ *param = &params->rgb2rgb;
++ CHKARG(configs, config, rgb2rgb)
++ return sizeof(params->rgb2rgb);
++ case PREV_COLOR_CONV:
++ *param = &params->rgb2ycbcr;
++ CHKARG(configs, config, csc)
++ return sizeof(params->rgb2ycbcr);
++ case PREV_WB:
++ *param = &params->wbal;
++ CHKARG(configs, config, wbal)
++ return sizeof(params->wbal);
++ case PREV_GAMMA:
++ *param = &params->gamma;
++ CHKARG(configs, config, gamma)
++ return sizeof(params->gamma);
++ case PREV_CONTRAST:
++ *param = &params->contrast;
++ return 0;
++ case PREV_BRIGHTNESS:
++ *param = &params->brightness;
++ return 0;
++ default:
++ *param = NULL;
++ *config = NULL;
++ break;
++ }
++ return 0;
++}
++
++/*
++ * isppreview_config - Copy and update local structure with userspace preview
++ * configuration.
++ * @prev: ISP preview engine
++ * @cfg: Configuration
++ *
++ * Return zero if success or -EFAULT if the configuration can't be copied from
++ * userspace.
++ */
++static int isppreview_config(struct isp_prev_device *prev,
++ struct ispprv_update_config *cfg)
++{
++ struct prev_params *params;
++ struct preview_update *attr;
++ int i, bit, rval = 0;
++
++ params = &prev->params;
++
++ if (prev->state != ISP_PIPELINE_STREAM_STOPPED) {
++ unsigned long flags;
++
++ spin_lock_irqsave(&prev->lock, flags);
++ prev->shadow_update = 1;
++ spin_unlock_irqrestore(&prev->lock, flags);
++ }
++
++ for (i = 0; i < ARRAY_SIZE(update_attrs); i++) {
++ attr = &update_attrs[i];
++ bit = 0;
++
++ if (!(cfg->update & attr->cfg_bit))
++ continue;
++
++ bit = cfg->flag & attr->cfg_bit;
++ if (bit) {
++ void *to = NULL, __user *from = NULL;
++ unsigned long sz = 0;
++
++ sz = __isppreview_get_ptrs(params, &to, cfg, &from,
++ bit);
++ if (to && from && sz) {
++ if (copy_from_user(to, from, sz)) {
++ rval = -EFAULT;
++ break;
++ }
++ }
++ params->features |= attr->feature_bit;
++ } else {
++ params->features &= ~attr->feature_bit;
++ }
++
++ prev->update |= attr->feature_bit;
++ }
++
++ prev->shadow_update = 0;
++ return rval;
++}
++
++/*
++ * isppreview_setup_hw - Setup preview registers and/or internal memory
++ * @prev: pointer to preview private structure
++ * Note: can be called from interrupt context
++ * Return none
++ */
++static void isppreview_setup_hw(struct isp_prev_device *prev)
++{
++ struct prev_params *params = &prev->params;
++ struct preview_update *attr;
++ int i, bit;
++ void *param_ptr;
++
++ for (i = 0; i < ARRAY_SIZE(update_attrs); i++) {
++ attr = &update_attrs[i];
++
++ if (!(prev->update & attr->feature_bit))
++ continue;
++ bit = params->features & attr->feature_bit;
++ if (bit) {
++ if (attr->config) {
++ __isppreview_get_ptrs(params, &param_ptr, NULL,
++ NULL, bit);
++ attr->config(prev, param_ptr);
++ }
++ if (attr->enable)
++ attr->enable(prev, 1);
++ } else
++ if (attr->enable)
++ attr->enable(prev, 0);
++
++ prev->update &= ~attr->feature_bit;
++ }
++}
++
++/*
++ * isppreview_config_ycpos - Configure byte layout of YUV image.
++ * @mode: Indicates the required byte layout.
++ */
++static void
++isppreview_config_ycpos(struct isp_prev_device *prev,
++ enum v4l2_mbus_pixelcode pixelcode)
++{
++ struct isp_device *isp = to_isp_device(prev);
++ enum preview_ycpos_mode mode;
++ u32 pcr;
++
++ switch (pixelcode) {
++ case V4L2_MBUS_FMT_YUYV16_1X16:
++ mode = YCPOS_CrYCbY;
++ break;
++ case V4L2_MBUS_FMT_UYVY16_1X16:
++ mode = YCPOS_YCrYCb;
++ break;
++ default:
++ return;
++ }
++
++ pcr = isp_reg_readl(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR);
++ pcr &= ~ISPPRV_PCR_YCPOS_CrYCbY;
++ pcr |= (mode << ISPPRV_PCR_YCPOS_SHIFT);
++ isp_reg_writel(isp, pcr, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR);
++}
++
++/*
++ * isppreview_config_averager - Enable / disable / configure averager
++ * @average: Average value to be configured.
++ */
++static void
++isppreview_config_averager(struct isp_prev_device *prev, u8 average)
++{
++ struct isp_device *isp = to_isp_device(prev);
++ int reg = 0;
++
++ if (prev->params.cfa.format == CFAFMT_BAYER)
++ reg = ISPPRV_AVE_EVENDIST_2 << ISPPRV_AVE_EVENDIST_SHIFT |
++ ISPPRV_AVE_ODDDIST_2 << ISPPRV_AVE_ODDDIST_SHIFT |
++ average;
++ else if (prev->params.cfa.format == CFAFMT_RGBFOVEON)
++ reg = ISPPRV_AVE_EVENDIST_3 << ISPPRV_AVE_EVENDIST_SHIFT |
++ ISPPRV_AVE_ODDDIST_3 << ISPPRV_AVE_ODDDIST_SHIFT |
++ average;
++ isp_reg_writel(isp, reg, OMAP3_ISP_IOMEM_PREV, ISPPRV_AVE);
++}
++
++/*
++ * isppreview_set_input_sz - Set input frame size
++ * @sph: Start pixel horizontal.
++ * @eph: End pixel horizontal.
++ * @slv: Start line vertical.
++ * @elv: End line vertical.
++ */
++static void isppreview_set_input_sz(struct isp_prev_device *prev,
++ u32 sph, u32 eph, u32 slv, u32 elv)
++{
++ struct isp_device *isp = to_isp_device(prev);
++
++ isp_reg_writel(isp, (sph << ISPPRV_HORZ_INFO_SPH_SHIFT) | eph,
++ OMAP3_ISP_IOMEM_PREV, ISPPRV_HORZ_INFO);
++ isp_reg_writel(isp, (slv << ISPPRV_VERT_INFO_SLV_SHIFT) | elv,
++ OMAP3_ISP_IOMEM_PREV, ISPPRV_VERT_INFO);
++}
++
++/*
++ * isppreview_config_inlineoffset - Configures the Read address line offset.
++ * @prev: Preview module
++ * @offset: Line offset
++ *
++ * According to the TRM, the line offset must be aligned on a 32 bytes boundary.
++ * However, a hardware bug requires the memory start address to be aligned on a
++ * 64 bytes boundary, so the offset probably should be aligned on 64 bytes as
++ * well.
++ */
++static void isppreview_config_inlineoffset(struct isp_prev_device *prev,
++ u32 offset)
++{
++ struct isp_device *isp = to_isp_device(prev);
++
++ isp_reg_writel(isp, offset & 0xffff, OMAP3_ISP_IOMEM_PREV,
++ ISPPRV_RADR_OFFSET);
++}
++
++/*
++ * isppreview_set_inaddr - Sets memory address of input frame.
++ * @addr: 32bit memory address aligned on 32byte boundary.
++ *
++ * Configures the memory address from which the input frame is to be read.
++ */
++static void isppreview_set_inaddr(struct isp_prev_device *prev, u32 addr)
++{
++ struct isp_device *isp = to_isp_device(prev);
++
++ isp_reg_writel(isp, addr, OMAP3_ISP_IOMEM_PREV, ISPPRV_RSDR_ADDR);
++}
++
++/*
++ * isppreview_config_outlineoffset - Configures the Write address line offset.
++ * @offset: Line Offset for the preview output.
++ *
++ * The offset must be a multiple of 32 bytes.
++ */
++static void isppreview_config_outlineoffset(struct isp_prev_device *prev,
++ u32 offset)
++{
++ struct isp_device *isp = to_isp_device(prev);
++
++ isp_reg_writel(isp, offset & 0xffff, OMAP3_ISP_IOMEM_PREV,
++ ISPPRV_WADD_OFFSET);
++}
++
++/*
++ * isppreview_set_outaddr - Sets the memory address to store output frame
++ * @addr: 32bit memory address aligned on 32byte boundary.
++ *
++ * Configures the memory address to which the output frame is written.
++ */
++static void isppreview_set_outaddr(struct isp_prev_device *prev, u32 addr)
++{
++ struct isp_device *isp = to_isp_device(prev);
++
++ isp_reg_writel(isp, addr, OMAP3_ISP_IOMEM_PREV, ISPPRV_WSDR_ADDR);
++}
++
++static void isppreview_adjust_bandwidth(struct isp_prev_device *prev)
++{
++ struct isp_pipeline *pipe = to_isp_pipeline(&prev->subdev.entity);
++ struct isp_device *isp = to_isp_device(prev);
++ const struct v4l2_mbus_framefmt *ifmt = &prev->formats[PREV_PAD_SINK];
++ unsigned long l3_ick = pipe->l3_ick;
++ struct v4l2_fract *timeperframe;
++ unsigned int cycles_per_frame;
++ unsigned int requests_per_frame;
++ unsigned int cycles_per_request;
++ unsigned int minimum;
++ unsigned int maximum;
++ unsigned int value;
++
++ if (prev->input != PREVIEW_INPUT_MEMORY) {
++ isp_reg_and(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_SDR_REQ_EXP,
++ ~ISPSBL_SDR_REQ_PRV_EXP_MASK);
++ return;
++ }
++
++ /* Compute the minimum number of cycles per request, based on the
++ * pipeline maximum data rate. This is an absolute lower bound if we
++ * don't want SBL overflows, so round the value up.
++ */
++ cycles_per_request = div_u64((u64)l3_ick / 2 * 256 + pipe->max_rate - 1,
++ pipe->max_rate);
++ minimum = DIV_ROUND_UP(cycles_per_request, 32);
++
++ /* Compute the maximum number of cycles per request, based on the
++ * requested frame rate. This is a soft upper bound to achieve a frame
++ * rate equal or higher than the requested value, so round the value
++ * down.
++ */
++ timeperframe = &pipe->max_timeperframe;
++
++ requests_per_frame = DIV_ROUND_UP(ifmt->width * 2, 256) * ifmt->height;
++ cycles_per_frame = div_u64((u64)l3_ick * timeperframe->numerator,
++ timeperframe->denominator);
++ cycles_per_request = cycles_per_frame / requests_per_frame;
++
++ maximum = cycles_per_request / 32;
++
++ value = max(minimum, maximum);
++
++ dev_dbg(isp->dev, "%s: cycles per request = %u\n", __func__, value);
++ isp_reg_and_or(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_SDR_REQ_EXP,
++ ~ISPSBL_SDR_REQ_PRV_EXP_MASK,
++ value << ISPSBL_SDR_REQ_PRV_EXP_SHIFT);
++}
++
++/*
++ * isppreview_busy - Gets busy state of preview module.
++ */
++int isppreview_busy(struct isp_prev_device *prev)
++{
++ struct isp_device *isp = to_isp_device(prev);
++
++ return isp_reg_readl(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR)
++ & ISPPRV_PCR_BUSY;
++}
++
++/*
++ * isppreview_save_context - Saves the values of the preview module registers.
++ */
++void isppreview_save_context(struct isp_device *isp)
++{
++ isp_save_context(isp, ispprev_reg_list);
++ /* Avoid unwanted enabling when restoring the context. */
++ ispprev_reg_list[0].val &= ~ISPPRV_PCR_EN;
++}
++
++/*
++ * isppreview_restore_context - Restores the values of preview module registers
++ */
++void isppreview_restore_context(struct isp_device *isp)
++{
++ isp_restore_context(isp, ispprev_reg_list);
++
++ isp->isp_prev.update = PREV_FEATURES_END - 1;
++ isppreview_setup_hw(&isp->isp_prev);
++}
++
++/*
++ * isppreview_print_status - Prints the values of the Preview Module registers.
++ *
++ * Also prints other debug information stored in the preview module.
++ */
++#define PREV_PRINT_REGISTER(isp, name)\
++ dev_dbg(isp->dev, "###PRV " #name "=0x%08x\n", \
++ isp_reg_readl(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_##name))
++
++static void isppreview_print_status(struct isp_prev_device *prev)
++{
++ struct isp_device *isp = to_isp_device(prev);
++
++ dev_dbg(isp->dev, "-------------Preview Register dump----------\n");
++
++ PREV_PRINT_REGISTER(isp, PCR);
++ PREV_PRINT_REGISTER(isp, HORZ_INFO);
++ PREV_PRINT_REGISTER(isp, VERT_INFO);
++ PREV_PRINT_REGISTER(isp, RSDR_ADDR);
++ PREV_PRINT_REGISTER(isp, RADR_OFFSET);
++ PREV_PRINT_REGISTER(isp, DSDR_ADDR);
++ PREV_PRINT_REGISTER(isp, DRKF_OFFSET);
++ PREV_PRINT_REGISTER(isp, WSDR_ADDR);
++ PREV_PRINT_REGISTER(isp, WADD_OFFSET);
++ PREV_PRINT_REGISTER(isp, AVE);
++ PREV_PRINT_REGISTER(isp, HMED);
++ PREV_PRINT_REGISTER(isp, NF);
++ PREV_PRINT_REGISTER(isp, WB_DGAIN);
++ PREV_PRINT_REGISTER(isp, WBGAIN);
++ PREV_PRINT_REGISTER(isp, WBSEL);
++ PREV_PRINT_REGISTER(isp, CFA);
++ PREV_PRINT_REGISTER(isp, BLKADJOFF);
++ PREV_PRINT_REGISTER(isp, RGB_MAT1);
++ PREV_PRINT_REGISTER(isp, RGB_MAT2);
++ PREV_PRINT_REGISTER(isp, RGB_MAT3);
++ PREV_PRINT_REGISTER(isp, RGB_MAT4);
++ PREV_PRINT_REGISTER(isp, RGB_MAT5);
++ PREV_PRINT_REGISTER(isp, RGB_OFF1);
++ PREV_PRINT_REGISTER(isp, RGB_OFF2);
++ PREV_PRINT_REGISTER(isp, CSC0);
++ PREV_PRINT_REGISTER(isp, CSC1);
++ PREV_PRINT_REGISTER(isp, CSC2);
++ PREV_PRINT_REGISTER(isp, CSC_OFFSET);
++ PREV_PRINT_REGISTER(isp, CNT_BRT);
++ PREV_PRINT_REGISTER(isp, CSUP);
++ PREV_PRINT_REGISTER(isp, SETUP_YC);
++ PREV_PRINT_REGISTER(isp, SET_TBL_ADDR);
++ PREV_PRINT_REGISTER(isp, CDC_THR0);
++ PREV_PRINT_REGISTER(isp, CDC_THR1);
++ PREV_PRINT_REGISTER(isp, CDC_THR2);
++ PREV_PRINT_REGISTER(isp, CDC_THR3);
++
++ dev_dbg(isp->dev, "--------------------------------------------\n");
++}
++
++/*
++ * isppreview_init_params - init image processing parameters.
++ * @prev: pointer to previewer private structure
++ * return none
++ */
++static void isppreview_init_params(struct isp_prev_device *prev)
++{
++ struct isp_device *isp = to_isp_device(prev);
++ struct prev_params *params = &prev->params;
++ int i = 0;
++
++ /* Init values */
++ params->contrast = ISPPRV_CONTRAST_DEF * ISPPRV_CONTRAST_UNITS;
++ params->brightness = ISPPRV_BRIGHT_DEF * ISPPRV_BRIGHT_UNITS;
++ params->average = NO_AVE;
++ params->lens_shading_shift = 0;
++ params->cfa.format = CFAFMT_BAYER;
++ memcpy(params->cfa.table, cfa_coef_table,
++ sizeof(params->cfa.table));
++ params->cfa.gradthrs_horz = FLR_CFA_GRADTHRS_HORZ;
++ params->cfa.gradthrs_vert = FLR_CFA_GRADTHRS_VERT;
++ params->csup.gain = FLR_CSUP_GAIN;
++ params->csup.thres = FLR_CSUP_THRES;
++ params->csup.hypf_en = 0;
++ memcpy(params->luma.table, luma_enhance_table,
++ sizeof(params->luma.table));
++ params->nf.spread = FLR_NF_STRGTH;
++ memcpy(params->nf.table, noise_filter_table, sizeof(params->nf.table));
++ params->dcor.couplet_mode_en = 1;
++ for (i = 0; i < ISPPRV_DETECT_CORRECT_CHANNELS; i++)
++ params->dcor.detect_correct[i] = DEF_DETECT_CORRECT_VAL;
++ memcpy(params->gamma.blue, bluegamma_table, sizeof(params->gamma.blue));
++ memcpy(params->gamma.green, greengamma_table,
++ sizeof(params->gamma.green));
++ memcpy(params->gamma.red, redgamma_table, sizeof(params->gamma.red));
++ params->wbal.dgain = FLR_WBAL_DGAIN;
++ if (isp->revision == ISP_REVISION_1_0) {
++ params->wbal.coef0 = FLR_WBAL_COEF0_ES1;
++ params->wbal.coef1 = FLR_WBAL_COEF1_ES1;
++ params->wbal.coef2 = FLR_WBAL_COEF2_ES1;
++ params->wbal.coef3 = FLR_WBAL_COEF3_ES1;
++ } else {
++ params->wbal.coef0 = FLR_WBAL_COEF0;
++ params->wbal.coef1 = FLR_WBAL_COEF1;
++ params->wbal.coef2 = FLR_WBAL_COEF2;
++ params->wbal.coef3 = FLR_WBAL_COEF3;
++ }
++ params->blk_adj.red = FLR_BLKADJ_RED;
++ params->blk_adj.green = FLR_BLKADJ_GREEN;
++ params->blk_adj.blue = FLR_BLKADJ_BLUE;
++ params->rgb2rgb = flr_rgb2rgb;
++ params->rgb2ycbcr = flr_prev_csc;
++ params->yclimit.minC = ISPPRV_YC_MIN;
++ params->yclimit.maxC = ISPPRV_YC_MAX;
++ params->yclimit.minY = ISPPRV_YC_MIN;
++ params->yclimit.maxY = ISPPRV_YC_MAX;
++
++ params->features = PREV_CFA | PREV_DEFECT_COR | PREV_NOISE_FILTER |
++ PREV_GAMMA | PREV_AVERAGER | PREV_BLKADJ |
++ PREV_YCLIMITS | PREV_RGB2RGB | PREV_COLOR_CONV |
++ PREV_WB | PREV_BRIGHTNESS | PREV_CONTRAST;
++
++ params->features &= ~(PREV_AVERAGER | PREV_INVERSE_ALAW |
++ PREV_HORZ_MEDIAN_FILTER |
++ PREV_GAMMA_BYPASS |
++ PREV_DARK_FRAME_SUBTRACT |
++ PREV_LENS_SHADING |
++ PREV_DARK_FRAME_CAPTURE |
++ PREV_CHROMA_SUPPRESS |
++ PREV_LUMA_ENHANCE);
++
++ prev->update = PREV_FEATURES_END - 1;
++
++ isppreview_setup_hw(prev);
++
++ /* Disable SDR PORT which is enabled by default after reset. */
++ isp_reg_and(to_isp_device(prev), OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
++ ~ISPPRV_PCR_SDRPORT);
++}
++
++/*
++ * preview_max_out_width - Handle previewer hardware ouput limitations
++ * @isp_revision : ISP revision
++ * returns maximum width output for current isp revision
++ */
++static unsigned int preview_max_out_width(struct isp_prev_device *prev)
++{
++ struct isp_device *isp = to_isp_device(prev);
++
++ switch (isp->revision) {
++ case ISP_REVISION_1_0:
++ return ISPPRV_MAXOUTPUT_WIDTH;
++
++ case ISP_REVISION_2_0:
++ default:
++ return ISPPRV_MAXOUTPUT_WIDTH_ES2;
++
++ case ISP_REVISION_15_0:
++ return ISPPRV_MAXOUTPUT_WIDTH_3630;
++ }
++}
++
++static void preview_configure(struct isp_prev_device *prev)
++{
++ struct isp_device *isp = to_isp_device(prev);
++ struct v4l2_mbus_framefmt *format;
++ unsigned int max_out_width;
++ unsigned int format_avg;
++
++ isppreview_setup_hw(prev);
++
++ if (prev->output & PREVIEW_OUTPUT_MEMORY)
++ isp_reg_or(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
++ ISPPRV_PCR_SDRPORT);
++ else
++ isp_reg_and(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
++ ~ISPPRV_PCR_SDRPORT);
++
++ if (prev->output & PREVIEW_OUTPUT_RESIZER)
++ isp_reg_or(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
++ ISPPRV_PCR_RSZPORT);
++ else
++ isp_reg_and(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
++ ~ISPPRV_PCR_RSZPORT);
++
++ /* PREV_PAD_SINK */
++ format = &prev->formats[PREV_PAD_SINK];
++
++ isppreview_adjust_bandwidth(prev);
++
++ if (prev->input == PREVIEW_INPUT_CCDC) {
++ isppreview_set_input_sz(prev, 2, format->width - 3,
++ 0, format->height - 1);
++ isppreview_config_inlineoffset(prev, 0);
++ } else {
++ isppreview_set_input_sz(prev, 0, format->width - 1,
++ 0, format->height - 1);
++ isppreview_config_inlineoffset(prev,
++ ALIGN(format->width, 0x20) * 2);
++ }
++
++ /* PREV_PAD_SOURCE */
++ format = &prev->formats[PREV_PAD_SOURCE];
++
++ if (prev->output & PREVIEW_OUTPUT_MEMORY)
++ isppreview_config_outlineoffset(prev,
++ ALIGN(format->width, 0x10) * 2);
++
++ max_out_width = preview_max_out_width(prev);
++
++ format_avg = fls(DIV_ROUND_UP(format->width, max_out_width) - 1);
++ isppreview_config_averager(prev, format_avg);
++ isppreview_config_ycpos(prev, format->code);
++}
++
++/* -----------------------------------------------------------------------------
++ * Interrupt handling
++ */
++
++static void preview_enable_oneshot(struct isp_prev_device *prev)
++{
++ struct isp_device *isp = to_isp_device(prev);
++
++ /* The PCR.SOURCE bit is automatically reset to 0 when the PCR.ENABLE
++ * bit is set. As the preview engine is used in single-shot mode, we
++ * need to set PCR.SOURCE before enabling the preview engine.
++ */
++ if (prev->input == PREVIEW_INPUT_MEMORY)
++ isp_reg_or(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
++ ISPPRV_PCR_SOURCE);
++
++ isp_reg_or(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
++ ISPPRV_PCR_EN | ISPPRV_PCR_ONESHOT);
++}
++
++void isppreview_isr_frame_sync(struct isp_prev_device *prev)
++{
++ if (prev->state == ISP_PIPELINE_STREAM_CONTINUOUS && prev->underrun) {
++ preview_enable_oneshot(prev);
++ prev->underrun = 0;
++ }
++}
++
++static void preview_isr_buffer(struct isp_prev_device *prev)
++{
++ struct isp_pipeline *pipe = to_isp_pipeline(&prev->subdev.entity);
++ struct isp_buffer *buffer;
++ int restart = 0;
++
++ if (prev->input == PREVIEW_INPUT_MEMORY) {
++ buffer = isp_video_buffer_next(&prev->video_in, prev->error);
++ if (buffer != NULL)
++ isppreview_set_inaddr(prev, buffer->isp_addr);
++ pipe->state |= ISP_PIPELINE_IDLE_INPUT;
++ }
++
++ if (prev->output & PREVIEW_OUTPUT_MEMORY) {
++ buffer = isp_video_buffer_next(&prev->video_out, prev->error);
++ if (buffer != NULL) {
++ isppreview_set_outaddr(prev, buffer->isp_addr);
++ restart = 1;
++ }
++ pipe->state |= ISP_PIPELINE_IDLE_OUTPUT;
++ }
++
++ if (prev->state == ISP_PIPELINE_STREAM_SINGLESHOT) {
++ if (isp_pipeline_ready(pipe))
++ isp_pipeline_set_stream(pipe,
++ ISP_PIPELINE_STREAM_SINGLESHOT);
++ } else {
++ /* If an underrun occurs, the video queue operation handler will
++ * restart the preview engine. Otherwise restart it immediately.
++ */
++ if (restart)
++ preview_enable_oneshot(prev);
++ }
++
++ prev->error = 0;
++}
++
++/*
++ * isppreview_isr - ISP preview engine interrupt handler
++ *
++ * Manage the preview engine video buffers and configure shadowed registers.
++ */
++void isppreview_isr(struct isp_prev_device *prev)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&prev->lock, flags);
++ if (prev->shadow_update)
++ goto done;
++
++ isppreview_setup_hw(prev);
++
++done:
++ spin_unlock_irqrestore(&prev->lock, flags);
++
++ if (prev->state == ISP_PIPELINE_STREAM_STOPPED)
++ return;
++
++ if (prev->input == PREVIEW_INPUT_MEMORY ||
++ prev->output & PREVIEW_OUTPUT_MEMORY)
++ preview_isr_buffer(prev);
++ else if (prev->state == ISP_PIPELINE_STREAM_CONTINUOUS)
++ preview_enable_oneshot(prev);
++}
++
++/* -----------------------------------------------------------------------------
++ * ISP video operations
++ */
++
++static int preview_video_queue(struct isp_video *video,
++ struct isp_buffer *buffer)
++{
++ struct isp_prev_device *prev = &video->isp->isp_prev;
++
++ if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
++ isppreview_set_inaddr(prev, buffer->isp_addr);
++
++ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
++ isppreview_set_outaddr(prev, buffer->isp_addr);
++
++ /* We now have a buffer queued on the output, restart the
++ * pipeline on the next sync interrupt if running in continuous
++ * mode (or when the stream is started).
++ */
++ prev->underrun = 1;
++ }
++
++ return 0;
++}
++
++static const struct isp_video_operations preview_video_ops = {
++ .queue = preview_video_queue,
++};
++
++/* -----------------------------------------------------------------------------
++ * V4L2 subdev operations
++ */
++
++/* Preview module controls */
++static struct v4l2_queryctrl preview_controls[] = {
++ {
++ .id = V4L2_CID_BRIGHTNESS,
++ .type = V4L2_CTRL_TYPE_INTEGER,
++ .name = "Brightness",
++ .minimum = ISPPRV_BRIGHT_LOW,
++ .maximum = ISPPRV_BRIGHT_HIGH,
++ .step = ISPPRV_BRIGHT_STEP,
++ .default_value = ISPPRV_BRIGHT_DEF,
++ },
++ {
++ .id = V4L2_CID_CONTRAST,
++ .type = V4L2_CTRL_TYPE_INTEGER,
++ .name = "Contrast",
++ .minimum = ISPPRV_CONTRAST_LOW,
++ .maximum = ISPPRV_CONTRAST_HIGH,
++ .step = ISPPRV_CONTRAST_STEP,
++ .default_value = ISPPRV_CONTRAST_DEF,
++ },
++};
++
++/*
++ * preview_g_ctrl - Handle get control subdev method
++ * @sd : pointer to v4l2 subdev structure
++ * @ctrl: pointer to v4l2 control structure
++ * return -EINVAL or zero on success
++ */
++static int preview_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
++{
++ struct isp_prev_device *prev = v4l2_get_subdevdata(sd);
++
++ switch (ctrl->id) {
++ case V4L2_CID_BRIGHTNESS:
++ ctrl->value = isppreview_query_brightness(prev);
++ break;
++ case V4L2_CID_CONTRAST:
++ ctrl->value = isppreview_query_contrast(prev);
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++/*
++ * preview_s_ctrl - Handle set control subdev method
++ * @sd : pointer to v4l2 subdev structure
++ * @ctrl: pointer to v4l2 control structure
++ * return -EINVAL or zero on success
++ */
++static int preview_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
++{
++ struct isp_prev_device *prev = v4l2_get_subdevdata(sd);
++ u8 new_value = ctrl->value;
++ int rval = 0;
++
++ switch (ctrl->id) {
++ case V4L2_CID_BRIGHTNESS:
++ rval = isppreview_update_brightness(prev, new_value);
++ break;
++ case V4L2_CID_CONTRAST:
++ rval = isppreview_update_contrast(prev, new_value);
++ break;
++ default:
++ rval = -EINVAL;
++ break;
++ }
++
++ return rval;
++}
++
++/*
++ * preview_query_ctrl - Handle query control subdev method
++ * @sd : pointer to v4l2 subdev structure
++ * @ctrl: pointer to v4l2 control structure
++ * return -EINVAL or zero on success
++ */
++static int
++preview_query_ctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *query)
++{
++ struct v4l2_queryctrl *best = NULL;
++ int next;
++ u32 id;
++ int i;
++
++ next = query->id & V4L2_CTRL_FLAG_NEXT_CTRL;
++ id = query->id & V4L2_CTRL_ID_MASK;
++
++ for (i = 0; i < ARRAY_SIZE(preview_controls); i++) {
++ struct v4l2_queryctrl *ctrl = &preview_controls[i];
++
++ if (ctrl->id == id && !next) {
++ best = ctrl;
++ break;
++ }
++
++ if ((!best || best->id > ctrl->id) && ctrl->id > id && next)
++ best = ctrl;
++ }
++
++ if (best == NULL)
++ return -EINVAL;
++
++ memcpy(query, best, sizeof(*query));
++ return 0;
++}
++
++/*
++ * preview_ioctl - Handle preview module private ioctl's
++ * @prev: pointer to preview context structure
++ * @cmd: configuration command
++ * @arg: configuration argument
++ * return -EINVAL or zero on success
++ */
++static long preview_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
++{
++ struct isp_prev_device *prev = v4l2_get_subdevdata(sd);
++
++ switch (cmd) {
++ case VIDIOC_PRIVATE_ISP_PRV_CFG:
++ return isppreview_config(prev, arg);
++
++ default:
++ return -ENOIOCTLCMD;
++ }
++}
++
++/*
++ * preview_s_power - Handle set power subdev method
++ * @sd: pointer to v4l2 subdev structure
++ * @on: power on/off
++ * return -EINVAL or zero on success
++ */
++static int preview_s_power(struct v4l2_subdev *sd, int on)
++{
++ struct isp_prev_device *prev = v4l2_get_subdevdata(sd);
++ struct isp_device *isp = to_isp_device(prev);
++
++ if (on) {
++ if (!isp_get(isp))
++ return -EBUSY;
++ } else {
++ isp_put(isp);
++ }
++
++ return 0;
++}
++
++/*
++ * preview_set_stream - Enable/Disable streaming on preview subdev
++ * @sd : pointer to v4l2 subdev structure
++ * @enable: 1 == Enable, 0 == Disable
++ * return -EINVAL or zero on sucess
++ */
++static int preview_set_stream(struct v4l2_subdev *sd, int enable)
++{
++ struct isp_prev_device *prev = v4l2_get_subdevdata(sd);
++ struct isp_device *isp = to_isp_device(prev);
++
++ if (prev->state == ISP_PIPELINE_STREAM_STOPPED) {
++ if (enable == ISP_PIPELINE_STREAM_STOPPED)
++ return 0;
++
++ isp_reg_or(isp, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL,
++ ISPCTRL_PREV_RAM_EN | ISPCTRL_PREV_CLK_EN);
++ preview_configure(prev);
++ isppreview_print_status(prev);
++ }
++
++ switch (enable) {
++ case ISP_PIPELINE_STREAM_CONTINUOUS:
++ if (prev->output & PREVIEW_OUTPUT_MEMORY)
++ isp_sbl_enable(isp, OMAP3_ISP_SBL_PREVIEW_WRITE);
++
++ if (prev->underrun || !(prev->output & PREVIEW_OUTPUT_MEMORY))
++ preview_enable_oneshot(prev);
++
++ prev->underrun = 0;
++ break;
++
++ case ISP_PIPELINE_STREAM_SINGLESHOT:
++ if (prev->input == PREVIEW_INPUT_MEMORY)
++ isp_sbl_enable(isp, OMAP3_ISP_SBL_PREVIEW_READ);
++ if (prev->output & PREVIEW_OUTPUT_MEMORY)
++ isp_sbl_enable(isp, OMAP3_ISP_SBL_PREVIEW_WRITE);
++
++ preview_enable_oneshot(prev);
++ break;
++
++ case ISP_PIPELINE_STREAM_STOPPED:
++ isp_sbl_disable(isp, OMAP3_ISP_SBL_PREVIEW_READ);
++ isp_sbl_disable(isp, OMAP3_ISP_SBL_PREVIEW_WRITE);
++ isp_reg_and(isp, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL,
++ ~(ISPCTRL_PREV_CLK_EN | ISPCTRL_PREV_RAM_EN));
++ prev->underrun = 0;
++ break;
++ }
++
++ prev->state = enable;
++ return 0;
++}
++
++static struct v4l2_mbus_framefmt *
++__preview_get_format(struct isp_prev_device *prev, struct v4l2_subdev_fh *fh,
++ unsigned int pad, enum v4l2_subdev_format which)
++{
++ if (which == V4L2_SUBDEV_FORMAT_PROBE)
++ return v4l2_subdev_get_probe_format(fh, pad);
++ else
++ return &prev->formats[pad];
++}
++
++/* previewer format descriptions */
++const static unsigned int prev_input_fmts[] = {
++ V4L2_MBUS_FMT_SGRBG10_1X10,
++ V4L2_MBUS_FMT_SRGGB10_1X10,
++ V4L2_MBUS_FMT_SBGGR10_1X10,
++ V4L2_MBUS_FMT_SGBRG10_1X10,
++};
++
++const static unsigned int prev_output_fmts[] = {
++ V4L2_MBUS_FMT_UYVY16_1X16,
++ V4L2_MBUS_FMT_YUYV16_1X16,
++};
++
++/*
++ * preview_try_format - Handle try format by pad subdev method
++ * @prev: ISP preview device
++ * @fh : V4L2 subdev file handle
++ * @pad: pad num
++ * @fmt: pointer to v4l2 format structure
++ */
++static void preview_try_format(struct isp_prev_device *prev,
++ struct v4l2_subdev_fh *fh, unsigned int pad,
++ struct v4l2_mbus_framefmt *fmt,
++ enum v4l2_subdev_format which)
++{
++ struct v4l2_mbus_framefmt *format;
++ unsigned int max_out_width;
++ enum v4l2_mbus_pixelcode pixelcode;
++ unsigned int i;
++
++ max_out_width = preview_max_out_width(prev);
++
++ switch (pad) {
++ case PREV_PAD_SINK:
++ /* When reading data from the CCDC, the input size has already
++ * been mangled by the CCDC output pad so it can be accepted
++ * as-is.
++ *
++ * When reading data from memory, clamp the requested width and
++ * height. The TRM doesn't specify a minimum input height, make
++ * sure we got enough lines to enable the noise filter and color
++ * filter array interpolation.
++ */
++ if (prev->input == PREVIEW_INPUT_MEMORY) {
++ fmt->width = clamp_t(u32, fmt->width, PREV_MIN_WIDTH,
++ max_out_width * 8);
++ fmt->height = clamp_t(u32, fmt->height, PREV_MIN_HEIGHT,
++ PREV_MAX_HEIGHT);
++ }
++
++ fmt->colorspace = V4L2_COLORSPACE_SRGB;
++
++ for (i = 0; i < ARRAY_SIZE(prev_input_fmts); i++) {
++ if (fmt->code == prev_input_fmts[i])
++ break;
++ }
++
++ /* If not found, use SGRBG10 as default */
++ if (i >= ARRAY_SIZE(prev_input_fmts))
++ fmt->code = V4L2_MBUS_FMT_SGRBG10_1X10;
++ break;
++
++ case PREV_PAD_SOURCE:
++ pixelcode = fmt->code;
++ format = __preview_get_format(prev, fh, PREV_PAD_SINK, which);
++ memcpy(fmt, format, sizeof(*fmt));
++
++ /* The preview module output size is configurable through the
++ * input interface (horizontal and vertical cropping) and the
++ * averager (horizontal scaling by 1/1, 1/2, 1/4 or 1/8). In
++ * spite of this, hardcode the output size to the biggest
++ * possible value for simplicity reasons.
++ */
++ switch (pixelcode) {
++ case V4L2_MBUS_FMT_YUYV16_1X16:
++ case V4L2_MBUS_FMT_UYVY16_1X16:
++ fmt->code = pixelcode;
++ break;
++
++ default:
++ fmt->code = V4L2_MBUS_FMT_YUYV16_1X16;
++ break;
++ }
++
++ /* The TRM states (12.1.4.7.1.2) that 2 pixels must be cropped
++ * from the left and right sides when the input source is the
++ * CCDC. This seems not to be needed in practice, investigation
++ * is required.
++ */
++ if (prev->input == PREVIEW_INPUT_CCDC)
++ fmt->width -= 4;
++
++ /* The preview module can output a maximum of 3312 pixels
++ * horizontally due to fixed memory-line sizes. Compute the
++ * horizontal averaging factor accordingly. Note that the limit
++ * applies to the noise filter and CFA interpolation blocks, so
++ * it doesn't take cropping by further blocks into account.
++ *
++ * ES 1.0 hardware revision is limited to 1280 pixels
++ * horizontally.
++ */
++ fmt->width >>= fls(DIV_ROUND_UP(fmt->width, max_out_width) - 1);
++
++ /* Assume that all blocks are enabled and crop pixels and lines
++ * accordingly.
++ *
++ * Median filter 4 pixels
++ * Noise filter 4 pixels, 4 lines
++ * CFA filter 4 pixels, 4 lines in Bayer mode
++ * 2 lines in other modes
++ * Color suppression 2 pixels
++ * or luma enhancement
++ * -------------------------------------------------------------
++ * Maximum total 14 pixels, 8 lines
++ */
++ fmt->width -= 14;
++ fmt->height -= 8;
++
++ fmt->colorspace = V4L2_COLORSPACE_JPEG;
++ break;
++ }
++
++ fmt->field = V4L2_FIELD_NONE;
++}
++
++/*
++ * preview_enum_mbus_code - Handle pixel format enumeration
++ * @sd : pointer to v4l2 subdev structure
++ * @fh : V4L2 subdev file handle
++ * @code : pointer to v4l2_subdev_pad_mbus_code_enum structure
++ * return -EINVAL or zero on success
++ */
++static int preview_enum_mbus_code(struct v4l2_subdev *sd,
++ struct v4l2_subdev_fh *fh,
++ struct v4l2_subdev_pad_mbus_code_enum *code)
++{
++ switch (code->pad) {
++ case PREV_PAD_SINK:
++ if (code->index >= ARRAY_SIZE(prev_input_fmts))
++ return -EINVAL;
++
++ code->code = prev_input_fmts[code->index];
++ break;
++ case PREV_PAD_SOURCE:
++ if (code->index >= ARRAY_SIZE(prev_output_fmts))
++ return -EINVAL;
++
++ code->code = prev_output_fmts[code->index];
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static int preview_enum_frame_size(struct v4l2_subdev *sd,
++ struct v4l2_subdev_fh *fh,
++ struct v4l2_subdev_frame_size_enum *fse)
++{
++ struct isp_prev_device *prev = v4l2_get_subdevdata(sd);
++ struct v4l2_mbus_framefmt format;
++
++ if (fse->index != 0)
++ return -EINVAL;
++
++ format.code = fse->code;
++ format.width = 1;
++ format.height = 1;
++ preview_try_format(prev, fh, fse->pad, &format,
++ V4L2_SUBDEV_FORMAT_PROBE);
++ fse->min_width = format.width;
++ fse->min_height = format.height;
++
++ if (format.code != fse->code)
++ return -EINVAL;
++
++ format.code = fse->code;
++ format.width = -1;
++ format.height = -1;
++ preview_try_format(prev, fh, fse->pad, &format,
++ V4L2_SUBDEV_FORMAT_PROBE);
++ fse->max_width = format.width;
++ fse->max_height = format.height;
++
++ return 0;
++}
++
++/*
++ * preview_get_format - Handle get format by pads subdev method
++ * @sd : pointer to v4l2 subdev structure
++ * @fh : V4L2 subdev file handle
++ * @pad: pad num
++ * @fmt: pointer to v4l2 format structure
++ * return -EINVAL or zero on sucess
++ */
++static int preview_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
++ unsigned int pad, struct v4l2_mbus_framefmt *fmt,
++ enum v4l2_subdev_format which)
++{
++ struct isp_prev_device *prev = v4l2_get_subdevdata(sd);
++ struct v4l2_mbus_framefmt *format;
++
++ format = __preview_get_format(prev, fh, pad, which);
++ if (format == NULL)
++ return -EINVAL;
++
++ memcpy(fmt, format, sizeof(*fmt));
++ return 0;
++}
++
++/*
++ * preview_set_format - Handle set format by pads subdev method
++ * @sd : pointer to v4l2 subdev structure
++ * @fh : V4L2 subdev file handle
++ * @pad: pad num
++ * @fmt: pointer to v4l2 format structure
++ * return -EINVAL or zero on success
++ */
++static int preview_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
++ unsigned int pad, struct v4l2_mbus_framefmt *fmt,
++ enum v4l2_subdev_format which)
++{
++ struct isp_prev_device *prev = v4l2_get_subdevdata(sd);
++ struct v4l2_mbus_framefmt *format;
++
++ format = __preview_get_format(prev, fh, pad, which);
++ if (format == NULL)
++ return -EINVAL;
++
++ preview_try_format(prev, fh, pad, fmt, which);
++ memcpy(format, fmt, sizeof(*format));
++
++ /* Propagate the format from sink to source */
++ if (pad == PREV_PAD_SINK) {
++ format = __preview_get_format(prev, fh, PREV_PAD_SOURCE, which);
++ memcpy(format, fmt, sizeof(*format));
++ preview_try_format(prev, fh, PREV_PAD_SOURCE, format, which);
++ }
++
++ return 0;
++}
++
++/* subdev core operations */
++static const struct v4l2_subdev_core_ops preview_v4l2_core_ops = {
++ .queryctrl = preview_query_ctrl,
++ .g_ctrl = preview_g_ctrl,
++ .s_ctrl = preview_s_ctrl,
++ .ioctl = preview_ioctl,
++ .s_power = preview_s_power,
++};
++
++/* subdev video operations */
++static const struct v4l2_subdev_video_ops preview_v4l2_video_ops = {
++ .s_stream = preview_set_stream,
++};
++
++/* subdev pad operations */
++static const struct v4l2_subdev_pad_ops preview_v4l2_pad_ops = {
++ .enum_mbus_code = preview_enum_mbus_code,
++ .enum_frame_size = preview_enum_frame_size,
++ .get_fmt = preview_get_format,
++ .set_fmt = preview_set_format,
++};
++
++/* subdev operations */
++static const struct v4l2_subdev_ops preview_v4l2_ops = {
++ .core = &preview_v4l2_core_ops,
++ .video = &preview_v4l2_video_ops,
++ .pad = &preview_v4l2_pad_ops,
++};
++
++/* -----------------------------------------------------------------------------
++ * Media entity operations
++ */
++
++/*
++ * preview_link_setup - Setup previewer connections.
++ * @entity : Pointer to media entity structure
++ * @local : Pointer to local pad array
++ * @remote : Pointer to remote pad array
++ * @flags : Link flags
++ * return -EINVAL or zero on success
++ */
++static int preview_link_setup(struct media_entity *entity,
++ const struct media_entity_pad *local,
++ const struct media_entity_pad *remote, u32 flags)
++{
++ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
++ struct isp_prev_device *prev = v4l2_get_subdevdata(sd);
++
++ switch (local->index | (remote->entity->type << 16)) {
++ case PREV_PAD_SINK | (MEDIA_ENTITY_TYPE_NODE << 16):
++ /* read from memory */
++ if (flags & MEDIA_LINK_FLAG_ACTIVE) {
++ if (prev->input == PREVIEW_INPUT_CCDC)
++ return -EBUSY;
++ prev->input = PREVIEW_INPUT_MEMORY;
++ } else {
++ if (prev->input == PREVIEW_INPUT_MEMORY)
++ prev->input = PREVIEW_INPUT_NONE;
++ }
++ break;
++
++ case PREV_PAD_SINK | (MEDIA_ENTITY_TYPE_SUBDEV << 16):
++ /* read from ccdc */
++ if (flags & MEDIA_LINK_FLAG_ACTIVE) {
++ if (prev->input == PREVIEW_INPUT_MEMORY)
++ return -EBUSY;
++ prev->input = PREVIEW_INPUT_CCDC;
++ } else {
++ if (prev->input == PREVIEW_INPUT_CCDC)
++ prev->input = PREVIEW_INPUT_NONE;
++ }
++ break;
++
++ case PREV_PAD_SOURCE | (MEDIA_ENTITY_TYPE_NODE << 16):
++ /* write to memory */
++ if (flags & MEDIA_LINK_FLAG_ACTIVE)
++ prev->output |= PREVIEW_OUTPUT_MEMORY;
++ else
++ prev->output &= ~PREVIEW_OUTPUT_MEMORY;
++ break;
++
++ case PREV_PAD_SOURCE | (MEDIA_ENTITY_TYPE_SUBDEV << 16):
++ /* write to resizer */
++ if (flags & MEDIA_LINK_FLAG_ACTIVE)
++ prev->output |= PREVIEW_OUTPUT_RESIZER;
++ else
++ prev->output &= ~PREVIEW_OUTPUT_RESIZER;
++ break;
++
++ default:
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++/* media operations */
++static const struct media_entity_operations preview_media_ops = {
++ .link_setup = preview_link_setup,
++ .set_power = v4l2_subdev_set_power,
++};
++
++/*
++ * isppreview_init_entities - Initialize subdev and media entity.
++ * @prev : Pointer to isppreview structure
++ * return -ENOMEM or zero on success
++ */
++static int isppreview_init_entities(struct isp_prev_device *prev)
++{
++ struct v4l2_subdev *sd = &prev->subdev;
++ struct media_entity_pad *pads = prev->pads;
++ struct media_entity *me = &sd->entity;
++ int ret;
++
++ prev->input = PREVIEW_INPUT_NONE;
++
++ v4l2_subdev_init(sd, &preview_v4l2_ops);
++ strlcpy(sd->name, "OMAP3 ISP preview", sizeof(sd->name));
++ sd->grp_id = 1 << 16; /* group ID for isp subdevs */
++ v4l2_set_subdevdata(sd, prev);
++ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
++
++ pads[PREV_PAD_SINK].type = MEDIA_PAD_TYPE_INPUT;
++ pads[PREV_PAD_SOURCE].type = MEDIA_PAD_TYPE_OUTPUT;
++
++ me->ops = &preview_media_ops;
++ ret = media_entity_init(me, PREV_PADS_NUM, pads, 0);
++ if (ret < 0)
++ return ret;
++
++ /* According to the OMAP34xx TRM, video buffers need to be aligned on a
++ * 32 bytes boundary. However, an undocumented hardware bug requires a
++ * 64 bytes boundary at the preview engine input.
++ */
++ prev->video_in.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
++ prev->video_in.ops = &preview_video_ops;
++ prev->video_in.isp = to_isp_device(prev);
++ prev->video_in.capture_mem = PAGE_ALIGN(4096 * 4096) * 2 * 3;
++ prev->video_in.alignment = 64;
++ prev->video_out.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
++ prev->video_out.ops = &preview_video_ops;
++ prev->video_out.isp = to_isp_device(prev);
++ prev->video_out.capture_mem = PAGE_ALIGN(4096 * 4096) * 2 * 3;
++ prev->video_out.alignment = 32;
++
++ ret = isp_video_init(&prev->video_in, "preview");
++ if (ret < 0)
++ return ret;
++
++ ret = isp_video_init(&prev->video_out, "preview");
++ if (ret < 0)
++ return ret;
++
++ /* Connect the video nodes to the previewer subdev. */
++ ret = media_entity_create_link(&prev->video_in.video.entity, 0,
++ &prev->subdev.entity, PREV_PAD_SINK, 0);
++ if (ret < 0)
++ return ret;
++
++ ret = media_entity_create_link(&prev->subdev.entity, PREV_PAD_SOURCE,
++ &prev->video_out.video.entity, 0, 0);
++ if (ret < 0)
++ return ret;
++
++ return 0;
++}
++
++void isp_preview_unregister_entities(struct isp_prev_device *prev)
++{
++ media_entity_cleanup(&prev->subdev.entity);
++
++ v4l2_device_unregister_subdev(&prev->subdev);
++ isp_video_unregister(&prev->video_in);
++ isp_video_unregister(&prev->video_out);
++}
++
++int isp_preview_register_entities(struct isp_prev_device *prev,
++ struct v4l2_device *vdev)
++{
++ int ret;
++
++ /* Register the subdev and video nodes. */
++ ret = v4l2_device_register_subdev(vdev, &prev->subdev);
++ if (ret < 0)
++ goto error;
++
++ ret = isp_video_register(&prev->video_in, vdev);
++ if (ret < 0)
++ goto error;
++
++ ret = isp_video_register(&prev->video_out, vdev);
++ if (ret < 0)
++ goto error;
++
++ return 0;
++
++error:
++ isp_preview_unregister_entities(prev);
++ return ret;
++}
++
++/* -----------------------------------------------------------------------------
++ * ISP previewer initialisation and cleanup
++ */
++
++void isp_preview_cleanup(struct isp_device *isp)
++{
++}
++
++/*
++ * isp_preview_init - Previewer initialization.
++ * @dev : Pointer to ISP device
++ * return -ENOMEM or zero on success
++ */
++int isp_preview_init(struct isp_device *isp)
++{
++ struct isp_prev_device *prev = &isp->isp_prev;
++ int ret;
++
++ spin_lock_init(&prev->lock);
++ isppreview_init_params(prev);
++
++ ret = isppreview_init_entities(prev);
++ if (ret < 0)
++ goto out;
++
++out:
++ if (ret)
++ isp_preview_cleanup(isp);
++
++ return ret;
++}
++
+diff --git a/drivers/media/video/isp/isppreview.h b/drivers/media/video/isp/isppreview.h
+new file mode 100644
+index 0000000..4ecee66
+--- /dev/null
++++ b/drivers/media/video/isp/isppreview.h
+@@ -0,0 +1,257 @@
++/*
++ * isppreview.h
++ *
++ * Driver header file for Preview module in TI's OMAP3 Camera ISP
++ *
++ * Copyright (C) 2009 Texas Instruments, Inc.
++ *
++ * Contributors:
++ * Senthilvadivu Guruswamy <svadivu@ti.com>
++ * Pallavi Kulkarni <p-kulkarni@ti.com>
++ * Sergio Aguirre <saaguirre@ti.com>
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef OMAP_ISP_PREVIEW_H
++#define OMAP_ISP_PREVIEW_H
++
++#include <mach/isp_user.h>
++#include "ispvideo.h"
++/* Isp query control structure */
++
++#define ISPPRV_BRIGHT_STEP 0x1
++#define ISPPRV_BRIGHT_DEF 0x1
++#define ISPPRV_BRIGHT_LOW 0x0
++#define ISPPRV_BRIGHT_HIGH 0xFF
++#define ISPPRV_BRIGHT_UNITS 0x1
++
++#define ISPPRV_CONTRAST_STEP 0x1
++#define ISPPRV_CONTRAST_DEF 0x10
++#define ISPPRV_CONTRAST_LOW 0x0
++#define ISPPRV_CONTRAST_HIGH 0xFF
++#define ISPPRV_CONTRAST_UNITS 0x1
++
++#define NO_AVE 0x0
++#define AVE_2_PIX 0x1
++#define AVE_4_PIX 0x2
++#define AVE_8_PIX 0x3
++
++/* Features list */
++#define PREV_LUMA_ENHANCE ISP_PREV_LUMAENH
++#define PREV_INVERSE_ALAW ISP_PREV_INVALAW
++#define PREV_HORZ_MEDIAN_FILTER ISP_PREV_HRZ_MED
++#define PREV_CFA ISP_PREV_CFA
++#define PREV_CHROMA_SUPPRESS ISP_PREV_CHROMA_SUPP
++#define PREV_WB ISP_PREV_WB
++#define PREV_BLKADJ ISP_PREV_BLKADJ
++#define PREV_RGB2RGB ISP_PREV_RGB2RGB
++#define PREV_COLOR_CONV ISP_PREV_COLOR_CONV
++#define PREV_YCLIMITS ISP_PREV_YC_LIMIT
++#define PREV_DEFECT_COR ISP_PREV_DEFECT_COR
++#define PREV_GAMMA_BYPASS ISP_PREV_GAMMABYPASS
++#define PREV_DARK_FRAME_CAPTURE ISP_PREV_DRK_FRM_CAPTURE
++#define PREV_DARK_FRAME_SUBTRACT ISP_PREV_DRK_FRM_SUBTRACT
++#define PREV_LENS_SHADING ISP_PREV_LENS_SHADING
++#define PREV_NOISE_FILTER ISP_PREV_NF
++#define PREV_GAMMA ISP_PREV_GAMMA
++
++#define PREV_CONTRAST (1 << 17)
++#define PREV_BRIGHTNESS (1 << 18)
++#define PREV_AVERAGER (1 << 19)
++#define PREV_FEATURES_END (1 << 20)
++
++enum preview_input_entity {
++ PREVIEW_INPUT_NONE,
++ PREVIEW_INPUT_CCDC,
++ PREVIEW_INPUT_MEMORY,
++};
++
++#define PREVIEW_OUTPUT_RESIZER (1 << 1)
++#define PREVIEW_OUTPUT_MEMORY (1 << 2)
++
++/*
++ * Configure byte layout of YUV image
++ */
++enum preview_ycpos_mode {
++ YCPOS_YCrYCb = 0,
++ YCPOS_YCbYCr = 1,
++ YCPOS_CbYCrY = 2,
++ YCPOS_CrYCbY = 3
++};
++
++/*
++ * struct prev_size_params - Structure for size parameters.
++ * @hstart: Starting pixel.
++ * @vstart: Starting line.
++ * @hsize: Width of input image.
++ * @vsize: Height of input image.
++ * @pixsize: Pixel size of the image in terms of bits.
++ * @in_pitch: Line offset of input image.
++ * @out_pitch: Line offset of output image.
++ */
++struct prev_size_params {
++ unsigned int hstart;
++ unsigned int vstart;
++ unsigned int hsize;
++ unsigned int vsize;
++ unsigned char pixsize;
++ unsigned short in_pitch;
++ unsigned short out_pitch;
++};
++
++/*
++ * struct prev_rgb2ycbcr_coeffs - Structure RGB2YCbCr parameters.
++ * @coeff: Color conversion gains in 3x3 matrix.
++ * @offset: Color conversion offsets.
++ */
++struct prev_rgb2ycbcr_coeffs {
++ short coeff[RGB_MAX][RGB_MAX];
++ short offset[RGB_MAX];
++};
++
++/*
++ * struct prev_darkfrm_params - Structure for Dark frame suppression.
++ * @addr: Memory start address.
++ * @offset: Line offset.
++ */
++struct prev_darkfrm_params {
++ u32 addr;
++ u32 offset;
++};
++
++/*
++ * struct prev_params - Structure for all configuration
++ * @features: Set of features enabled.
++ * @cfa: CFA coefficients.
++ * @csup: Chroma suppression coefficients.
++ * @ytable: Pointer to Luma enhancement coefficients.
++ * @nf: Noise filter coefficients.
++ * @dcor: Noise filter coefficients.
++ * @gtable: Gamma coefficients.
++ * @wbal: White Balance parameters.
++ * @blk_adj: Black adjustment parameters.
++ * @rgb2rgb: RGB blending parameters.
++ * @rgb2ycbcr: RGB to ycbcr parameters.
++ * @hmf_params: Horizontal median filter.
++ * @size_params: Size parameters.
++ * @drkf_params: Darkframe parameters.
++ * @yclimit: YC limits parameters.
++ * @lens_shading_shift:
++ * @average: Downsampling rate for averager.
++ * @contrast: Contrast.
++ * @brightness: Brightness.
++ */
++struct prev_params {
++ u32 features;
++ enum preview_ycpos_mode pix_fmt;
++ struct ispprev_cfa cfa;
++ struct ispprev_csup csup;
++ struct ispprev_luma luma;
++ struct ispprev_nf nf;
++ struct ispprev_dcor dcor;
++ struct ispprev_gtables gamma;
++ struct ispprev_wbal wbal;
++ struct ispprev_blkadj blk_adj;
++ struct ispprev_rgbtorgb rgb2rgb;
++ struct ispprev_csc rgb2ycbcr;
++ struct ispprev_hmed hmed;
++ struct prev_size_params size_params;
++ struct prev_darkfrm_params drkf_params;
++ struct ispprev_yclimit yclimit;
++ u32 lens_shading_shift;
++ u8 average;
++ u8 contrast;
++ u8 brightness;
++};
++
++/*
++ * struct isptables_update - Structure for Table Configuration.
++ * @update: Specifies which tables should be updated.
++ * @flag: Specifies which tables should be enabled.
++ * @prev_nf: Pointer to structure for Noise Filter
++ * @lsc: Pointer to LSC gain table. (currently not used)
++ * @red_gamma: Pointer to red gamma correction table.
++ * @green_gamma: Pointer to green gamma correction table.
++ * @blue_gamma: Pointer to blue gamma correction table.
++ * @prev_cfa: Pointer to color filter array configuration.
++ * @prev_wbal: Pointer to colour and digital gain configuration.
++ */
++struct isptables_update {
++ u32 update;
++ u32 flag;
++ struct ispprev_nf *nf;
++ u32 *lsc;
++ struct ispprev_gtables *gamma;
++ struct ispprev_cfa *cfa;
++ struct ispprev_wbal *wbal;
++};
++
++/* Sink and source previewer pads */
++#define PREV_PAD_SINK 0
++#define PREV_PAD_SOURCE 1
++#define PREV_PADS_NUM 2
++
++/*
++ * struct isp_prev_device - Structure for storing ISP Preview module information
++ * @subdev: V4L2 subdevice
++ * @pads: Media entity pads
++ * @formats: Active formats at the subdev pad
++ * @input: Module currently connected to the input pad
++ * @output: Bitmask of the active output
++ * @params: Module configuration data
++ * @shadow_update: If set, update the hardware configured in the next interrupt
++ * @enabled: Whether the preview engine is enabled
++ * @lock: Shadow update lock
++ * @update: Bitmask of the parameters to be updated
++ * @error: A hardware error occured during capture
++ *
++ * This structure is used to store the OMAP ISP Preview module Information.
++ */
++struct isp_prev_device {
++ struct v4l2_subdev subdev;
++ struct media_entity_pad pads[PREV_PADS_NUM];
++ struct v4l2_mbus_framefmt formats[PREV_PADS_NUM];
++
++ enum preview_input_entity input;
++ unsigned int output;
++ struct isp_video video_in;
++ struct isp_video video_out;
++ unsigned int error;
++
++ struct prev_params params;
++ unsigned int shadow_update:1,
++ underrun:1;
++ enum isp_pipeline_stream_state state;
++ spinlock_t lock;
++ u32 update;
++};
++
++struct isp_device;
++
++int isp_preview_init(struct isp_device *isp);
++void isp_preview_cleanup(struct isp_device *isp);
++
++int isp_preview_register_entities(struct isp_prev_device *prv,
++ struct v4l2_device *vdev);
++void isp_preview_unregister_entities(struct isp_prev_device *prv);
++
++void isppreview_isr_frame_sync(struct isp_prev_device *prev);
++void isppreview_isr(struct isp_prev_device *prev);
++
++void isppreview_config_whitebalance(struct isp_prev_device *isp_prev,
++ const void *prev_wbal);
++
++int isppreview_busy(struct isp_prev_device *isp_prev);
++
++void isppreview_save_context(struct isp_device *isp);
++
++void isppreview_restore_context(struct isp_device *isp);
++
++#endif/* OMAP_ISP_PREVIEW_H */
+diff --git a/drivers/media/video/isp/ispqueue.c b/drivers/media/video/isp/ispqueue.c
+new file mode 100644
+index 0000000..e9c17f8
+--- /dev/null
++++ b/drivers/media/video/isp/ispqueue.c
+@@ -0,0 +1,1077 @@
++/*
++ * ispqueue.c - ISP video buffers queue handling
++ *
++ * Copyright (C) 2010 Nokia.
++ *
++ * Contributors:
++ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#include <asm/cacheflush.h>
++#include <linux/dma-mapping.h>
++#include <linux/mm.h>
++#include <linux/pagemap.h>
++#include <linux/poll.h>
++#include <linux/scatterlist.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++
++#include "ispqueue.h"
++
++/* -----------------------------------------------------------------------------
++ * Video buffers management
++ */
++
++/*
++ * isp_video_buffer_cache_sync - Keep the buffers coherent between CPU and ISP
++ *
++ * The typical operation required here is Cache Invalidation across
++ * the (user space) buffer address range. And this _must_ be done
++ * at QBUF stage (and *only* at QBUF).
++ *
++ * We try to use optimal cache invalidation function:
++ * - dmac_map_area:
++ * - used when the number of pages are _low_.
++ * - it becomes quite slow as the number of pages increase.
++ * - for 648x492 viewfinder (150 pages) it takes 1.3 ms.
++ * - for 5 Mpix buffer (2491 pages) it takes between 25-50 ms.
++ *
++ * - flush_cache_all:
++ * - used when the number of pages are _high_.
++ * - time taken in the range of 500-900 us.
++ * - has a higher penalty but, as whole dcache + icache is invalidated
++ */
++/*
++ * FIXME: dmac_inv_range crashes randomly on the user space buffer
++ * address. Fall back to flush_cache_all for now.
++ */
++#define ISP_CACHE_FLUSH_PAGES_MAX 0
++
++static void isp_video_buffer_cache_sync(struct isp_video_buffer *buf)
++{
++ if (buf->vbuf.m.userptr == 0 || buf->npages == 0 ||
++ buf->npages > ISP_CACHE_FLUSH_PAGES_MAX)
++ flush_cache_all();
++ else {
++ dmac_map_area((void *)buf->vbuf.m.userptr, buf->vbuf.length,
++ DMA_FROM_DEVICE);
++ outer_inv_range(buf->vbuf.m.userptr,
++ buf->vbuf.m.userptr + buf->vbuf.length);
++ }
++}
++
++/*
++ * isp_video_buffer_lock_vma - Prevent VMAs from being unmapped
++ *
++ * Lock the VMA underlying the given buffer into memory. This avoids the
++ * userspace buffer mapping from being swapped out, making VIPT cache handling
++ * easier.
++ *
++ * Note that the pages will not be freed as the buffers have been locked to
++ * memory using by a call to get_user_pages(), but the userspace mapping could
++ * still disappear if the VMAs are not locked. This is caused by the memory
++ * management code trying to be as lock-less as possible, which results in the
++ * userspace mapping manager not finding out that the pages are locked under
++ * some conditions.
++ */
++static int isp_video_buffer_lock_vma(struct isp_video_buffer *buf, int lock)
++{
++ struct vm_area_struct *vma;
++ unsigned int newflags;
++ unsigned long start;
++ unsigned long end;
++ int ret = 0;
++
++ if (buf->vbuf.memory == V4L2_MEMORY_MMAP)
++ return 0;
++
++ /* We can be called from workqueue context if the current task dies to
++ * unlock the VMA. In that case there's no current memory management
++ * context so unlocking can't be performed, but the VMA has been or is
++ * getting destroyed anyway so it doesn't really matter.
++ */
++ if (!current || !current->mm)
++ return lock ? -EINVAL : 0;
++
++ start = buf->vbuf.m.userptr;
++ end = buf->vbuf.m.userptr + buf->vbuf.length - 1;
++
++ down_write(&current->mm->mmap_sem);
++ spin_lock(&current->mm->page_table_lock);
++
++ vma = find_vma(current->mm, start);
++ if (vma == NULL || vma->vm_end < end) {
++ ret = -EFAULT;
++ goto out;
++ }
++
++ newflags = vma->vm_flags | VM_LOCKED;
++ if (!lock)
++ newflags &= ~VM_LOCKED;
++
++ vma->vm_flags = newflags;
++ buf->vm_flags = newflags;
++
++out:
++ spin_unlock(&current->mm->page_table_lock);
++ up_write(&current->mm->mmap_sem);
++ return ret;
++}
++
++/*
++ * isp_video_buffer_sglist_kernel - Build a scatter list for a vmalloc'ed buffer
++ *
++ * Iterate over the vmalloc'ed area and create a scatter list entry for every
++ * page.
++ */
++static int isp_video_buffer_sglist_kernel(struct isp_video_buffer *buf)
++{
++ struct scatterlist *sglist;
++ unsigned int npages;
++ unsigned int i;
++ void *addr;
++
++ addr = buf->vaddr;
++ npages = PAGE_ALIGN(buf->vbuf.length) >> PAGE_SHIFT;
++
++ sglist = vmalloc(npages * sizeof(*sglist));
++ if (sglist == NULL)
++ return -ENOMEM;
++
++ sg_init_table(sglist, npages);
++
++ for (i = 0; i < npages; ++i, addr += PAGE_SIZE) {
++ struct page *page = vmalloc_to_page(addr);
++
++ if (page == NULL || PageHighMem(page)) {
++ vfree(sglist);
++ return -EINVAL;
++ }
++
++ sg_set_page(&sglist[i], page, PAGE_SIZE, 0);
++ }
++
++ buf->sglen = npages;
++ buf->sglist = sglist;
++
++ return 0;
++}
++
++/*
++ * isp_video_buffer_sglist_user - Build a scatter list for a userspace buffer
++ *
++ * Walk the buffer pages list and create a 1:1 mapping to a scatter list.
++ */
++static int isp_video_buffer_sglist_user(struct isp_video_buffer *buf)
++{
++ struct scatterlist *sglist;
++ unsigned int offset = buf->offset;
++ unsigned int i;
++
++ sglist = vmalloc(buf->npages * sizeof(*sglist));
++ if (sglist == NULL)
++ return -ENOMEM;
++
++ sg_init_table(sglist, buf->npages);
++
++ for (i = 0; i < buf->npages; ++i) {
++ if (PageHighMem(buf->pages[i])) {
++ vfree(sglist);
++ return -EINVAL;
++ }
++
++ sg_set_page(&sglist[i], buf->pages[i], PAGE_SIZE - offset,
++ offset);
++ offset = 0;
++ }
++
++ buf->sglen = buf->npages;
++ buf->sglist = sglist;
++
++ return 0;
++}
++
++/*
++ * isp_video_buffer_sglist_pfnmap - Build a scatter list for a VM_PFNMAP buffer
++ *
++ * Create a scatter list of physically contiguous pages starting at the buffer
++ * memory physical address.
++ */
++static int isp_video_buffer_sglist_pfnmap(struct isp_video_buffer *buf)
++{
++ struct scatterlist *sglist;
++ unsigned int offset = buf->offset;
++ unsigned long pfn = buf->paddr >> PAGE_SHIFT;
++ unsigned int i;
++
++ sglist = vmalloc(buf->npages * sizeof(*sglist));
++ if (sglist == NULL)
++ return -ENOMEM;
++
++ sg_init_table(sglist, buf->npages);
++
++ for (i = 0; i < buf->npages; ++i, ++pfn) {
++ sg_set_page(&sglist[i], pfn_to_page(pfn), PAGE_SIZE - offset,
++ offset);
++ /* PFNMAP buffers will not get DMA-mapped, set the DMA address
++ * manually.
++ */
++ sg_dma_address(&sglist[i]) = (pfn << PAGE_SHIFT) + offset;
++ offset = 0;
++ }
++
++ buf->sglen = buf->npages;
++ buf->sglist = sglist;
++
++ return 0;
++}
++
++/*
++ * isp_video_buffer_cleanup - Release pages for a userspace VMA.
++ *
++ * Release pages locked by a call isp_video_buffer_prepare_user and free the
++ * pages table.
++ */
++static void isp_video_buffer_cleanup(struct isp_video_buffer *buf)
++{
++ enum dma_data_direction direction;
++ unsigned int i;
++
++ if (buf->queue->ops->buffer_cleanup)
++ buf->queue->ops->buffer_cleanup(buf);
++
++ if (!(buf->vm_flags & VM_PFNMAP)) {
++ direction = buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE
++ ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
++ dma_unmap_sg(buf->queue->dev, buf->sglist, buf->sglen,
++ direction);
++ }
++
++ vfree(buf->sglist);
++ buf->sglist = NULL;
++ buf->sglen = 0;
++
++ if (buf->pages != NULL) {
++ isp_video_buffer_lock_vma(buf, 0);
++
++ for (i = 0; i < buf->npages; ++i)
++ page_cache_release(buf->pages[i]);
++
++ vfree(buf->pages);
++ buf->pages = NULL;
++ }
++
++ buf->npages = 0;
++}
++
++/*
++ * isp_video_buffer_prepare_user - Pin userspace VMA pages to memory.
++ *
++ * This function creates a list of pages for a userspace VMA. The number of
++ * pages is first computed based on the buffer size, and pages are then
++ * retrieved by a call to get_user_pages.
++ *
++ * Pages are pinned to memory by get_user_pages, making them available for DMA
++ * transfers. However, due to memory management optimization, it seems the
++ * get_user_pages doesn't guarantee that the pinned pages will not be written
++ * to swap and removed from the userspace mapping(s). When this happens, a page
++ * fault can be generated when accessing those unmapped pages.
++ *
++ * If the fault is triggered by a page table walk caused by VIPT cache
++ * management operations, the page fault handler might oops if the MM semaphore
++ * is held, as it can't handle kernel page faults in that case. To fix that, a
++ * fixup entry needs to be added to the cache management code, or the userspace
++ * VMA must be locked to avoid removing pages from the userspace mapping in the
++ * first place.
++ *
++ * If the number of pages retrieved is smaller than the number required by the
++ * buffer size, the function returns -EFAULT.
++ */
++static int isp_video_buffer_prepare_user(struct isp_video_buffer *buf)
++{
++ unsigned long data;
++ unsigned int first;
++ unsigned int last;
++ int ret;
++
++ data = buf->vbuf.m.userptr;
++ first = (data & PAGE_MASK) >> PAGE_SHIFT;
++ last = ((data + buf->vbuf.length - 1) & PAGE_MASK) >> PAGE_SHIFT;
++
++ buf->offset = data & ~PAGE_MASK;
++ buf->npages = last - first + 1;
++ buf->pages = vmalloc(buf->npages * sizeof(buf->pages[0]));
++ if (buf->pages == NULL)
++ return -ENOMEM;
++
++ down_read(&current->mm->mmap_sem);
++ ret = get_user_pages(current, current->mm, data & PAGE_MASK,
++ buf->npages,
++ buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE, 0,
++ buf->pages, NULL);
++ up_read(&current->mm->mmap_sem);
++
++ if (ret != buf->npages) {
++ buf->npages = ret;
++ isp_video_buffer_cleanup(buf);
++ return -EFAULT;
++ }
++
++ ret = isp_video_buffer_lock_vma(buf, 1);
++ if (ret < 0)
++ isp_video_buffer_cleanup(buf);
++
++ return ret;
++}
++
++/*
++ * isp_video_buffer_prepare_pfnmap - Validate a VM_PFNMAP userspace buffer
++ *
++ * Userspace VM_PFNMAP buffers are supported only if they are contiguous in
++ * memory and if they span a single VMA. The single VMA requirement has already
++ * been checked by isp_video_buffer_prepare_vm_flags, so it's safe to just
++ * follow the page frame numbers.
++ *
++ * Return 0 if the buffer is valid, or -EFAULT otherwise.
++ */
++static int isp_video_buffer_prepare_pfnmap(struct isp_video_buffer *buf)
++{
++ struct vm_area_struct *vma;
++ unsigned long prev_pfn;
++ unsigned long this_pfn;
++ unsigned long start;
++ unsigned long end;
++ dma_addr_t pa;
++ int ret = -EFAULT;
++
++ start = buf->vbuf.m.userptr;
++ end = buf->vbuf.m.userptr + buf->vbuf.length - 1;
++
++ buf->offset = start & ~PAGE_MASK;
++ buf->npages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
++ buf->pages = NULL;
++
++ down_read(&current->mm->mmap_sem);
++ vma = find_vma(current->mm, start);
++ if (vma == NULL)
++ goto done;
++
++ for (prev_pfn = 0; start <= end; start += PAGE_SIZE) {
++ ret = follow_pfn(vma, start, &this_pfn);
++ if (ret)
++ goto done;
++
++ if (prev_pfn == 0)
++ pa = this_pfn << PAGE_SHIFT;
++ else if (this_pfn != prev_pfn + 1) {
++ ret = -EFAULT;
++ goto done;
++ }
++
++ prev_pfn = this_pfn;
++ }
++
++ buf->paddr = pa + buf->offset;
++ ret = 0;
++
++done:
++ up_read(&current->mm->mmap_sem);
++ return ret;
++}
++
++/*
++ * isp_video_buffer_prepare_vm_flags - Get VMA flags for a userspace address
++ *
++ * This function locates the VMA for the buffer's userspace address and updates
++ * the buffer structure with its flags. If the buffer spans several VMAs, or if
++ * no VMA can be found, return -EFAULT.
++ */
++static int isp_video_buffer_prepare_vm_flags(struct isp_video_buffer *buf)
++{
++ struct vm_area_struct *vma;
++ unsigned long start;
++ unsigned long end;
++ int ret = -EFAULT;
++
++ start = buf->vbuf.m.userptr;
++ end = buf->vbuf.m.userptr + buf->vbuf.length - 1;
++
++ down_read(&current->mm->mmap_sem);
++ vma = find_vma(current->mm, start);
++ if (vma == NULL || vma->vm_end < end)
++ goto done;
++
++ buf->vm_flags = vma->vm_flags;
++ ret = 0;
++
++done:
++ up_read(&current->mm->mmap_sem);
++ return ret;
++}
++
++/*
++ * isp_video_buffer_prepare - Make a buffer ready for operation
++ *
++ * Preparing a buffer involves:
++ *
++ * - validating VMAs (userspace buffers only)
++ * - locking pages and VMAs into memory (userspace buffers only)
++ * - building page and scatter-gather lists
++ * - mapping buffers for DMA operation
++ * - performing driver-specific preparation
++ *
++ * The function must be called in userspace context with a valid mm context
++ * (this excludes cleanup paths such as sys_close when the userspace process
++ * segfaults).
++ */
++static int isp_video_buffer_prepare(struct isp_video_buffer *buf)
++{
++ enum dma_data_direction direction;
++ int ret;
++
++ switch (buf->vbuf.memory) {
++ case V4L2_MEMORY_MMAP:
++ ret = isp_video_buffer_sglist_kernel(buf);
++ break;
++
++ case V4L2_MEMORY_USERPTR:
++ ret = isp_video_buffer_prepare_vm_flags(buf);
++ if (ret < 0)
++ return ret;
++
++ if (buf->vm_flags & VM_PFNMAP) {
++ ret = isp_video_buffer_prepare_pfnmap(buf);
++ if (ret < 0)
++ return ret;
++
++ ret = isp_video_buffer_sglist_pfnmap(buf);
++ } else {
++ ret = isp_video_buffer_prepare_user(buf);
++ if (ret < 0)
++ return ret;
++
++ ret = isp_video_buffer_sglist_user(buf);
++ }
++ break;
++
++ default:
++ return -EINVAL;
++ }
++
++ if (ret < 0)
++ goto done;
++
++ if (!(buf->vm_flags & VM_PFNMAP)) {
++ direction = buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE
++ ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
++ ret = dma_map_sg(buf->queue->dev, buf->sglist, buf->sglen,
++ direction);
++ if (ret != buf->sglen) {
++ ret = -EFAULT;
++ goto done;
++ }
++ }
++
++ if (buf->queue->ops->buffer_prepare)
++ ret = buf->queue->ops->buffer_prepare(buf);
++
++done:
++ if (ret < 0) {
++ isp_video_buffer_cleanup(buf);
++ return ret;
++ }
++
++ return ret;
++}
++
++/*
++ * isp_video_queue_query - Query the status of a given buffer
++ *
++ * Locking: must be called with the queue lock held.
++ */
++static void isp_video_buffer_query(struct isp_video_buffer *buf,
++ struct v4l2_buffer *vbuf)
++{
++ memcpy(vbuf, &buf->vbuf, sizeof(*vbuf));
++
++ if (buf->vma_use_count)
++ vbuf->flags |= V4L2_BUF_FLAG_MAPPED;
++
++ switch (buf->state) {
++ case ISP_BUF_STATE_ERROR:
++ vbuf->flags |= V4L2_BUF_FLAG_ERROR;
++ case ISP_BUF_STATE_DONE:
++ vbuf->flags |= V4L2_BUF_FLAG_DONE;
++ case ISP_BUF_STATE_QUEUED:
++ case ISP_BUF_STATE_ACTIVE:
++ vbuf->flags |= V4L2_BUF_FLAG_QUEUED;
++ break;
++ case ISP_BUF_STATE_IDLE:
++ default:
++ break;
++ }
++}
++
++/*
++ * isp_video_buffer_wait - Wait for a buffer to be ready
++ *
++ * In non-blocking mode, return immediately with 0 if the buffer is ready or
++ * -EAGAIN if the buffer is in the QUEUED or ACTIVE state.
++ *
++ * In blocking mode, wait (interruptibly but with no timeout) on the buffer wait
++ * queue using the same condition.
++ */
++static int isp_video_buffer_wait(struct isp_video_buffer *buf, int nonblocking)
++{
++ if (nonblocking) {
++ return (buf->state != ISP_BUF_STATE_QUEUED &&
++ buf->state != ISP_BUF_STATE_ACTIVE)
++ ? 0 : -EAGAIN;
++ }
++
++ return wait_event_interruptible(buf->wait,
++ buf->state != ISP_BUF_STATE_QUEUED &&
++ buf->state != ISP_BUF_STATE_ACTIVE);
++}
++
++/* -----------------------------------------------------------------------------
++ * Queue management
++ */
++
++/*
++ * isp_video_queue_free - Free video buffers memory
++ *
++ * Buffers can only be freed if the queue isn't streaming and if no buffer is
++ * mapped to userspace. Return -EBUSY if those conditions aren't statisfied.
++ *
++ * This function must be called with the queue lock held.
++ */
++static int isp_video_queue_free(struct isp_video_queue *queue)
++{
++ unsigned int i;
++
++ if (queue->streaming)
++ return -EBUSY;
++
++ for (i = 0; i < queue->count; ++i) {
++ if (queue->buffers[i]->vma_use_count != 0)
++ return -EBUSY;
++ }
++
++ for (i = 0; i < queue->count; ++i) {
++ struct isp_video_buffer *buf = queue->buffers[i];
++
++ isp_video_buffer_cleanup(buf);
++
++ vfree(buf->vaddr);
++ buf->vaddr = NULL;
++
++ kfree(buf);
++ queue->buffers[i] = NULL;
++ }
++
++ INIT_LIST_HEAD(&queue->queue);
++ queue->count = 0;
++ return 0;
++}
++
++/*
++ * isp_video_queue_alloc - Allocate video buffers memory
++ *
++ * This function must be called with the queue lock held.
++ */
++static int isp_video_queue_alloc(struct isp_video_queue *queue,
++ unsigned int nbuffers,
++ unsigned int size, enum v4l2_memory memory)
++{
++ struct isp_video_buffer *buf;
++ unsigned int i;
++ void *mem;
++ int ret;
++
++ /* Start by freeing the buffers. */
++ ret = isp_video_queue_free(queue);
++ if (ret < 0)
++ return ret;
++
++ /* Bail out of no buffers should be allocated. */
++ if (nbuffers == 0)
++ return 0;
++
++ /* Initialize the allocated buffers. */
++ for (i = 0; i < nbuffers; ++i) {
++ buf = kzalloc(queue->bufsize, GFP_KERNEL);
++ if (buf == NULL)
++ break;
++
++ if (memory == V4L2_MEMORY_MMAP) {
++ /* Allocate video buffers memory for mmap mode. Align
++ * the size to the page size.
++ */
++ mem = vmalloc_32_user(PAGE_ALIGN(size));
++ if (mem == NULL) {
++ kfree(buf);
++ break;
++ }
++
++ buf->vbuf.m.offset = i * PAGE_ALIGN(size);
++ buf->vaddr = mem;
++ }
++
++ buf->vbuf.index = i;
++ buf->vbuf.length = size;
++ buf->vbuf.type = queue->type;
++ buf->vbuf.field = V4L2_FIELD_NONE;
++ buf->vbuf.memory = memory;
++
++ buf->queue = queue;
++ init_waitqueue_head(&buf->wait);
++
++ queue->buffers[i] = buf;
++ }
++
++ if (i == 0)
++ return -ENOMEM;
++
++ queue->count = i;
++ return nbuffers;
++}
++
++/**
++ * isp_video_queue_cleanup - Clean up the video buffers queue
++ * @queue: Video buffers queue
++ *
++ * Free all allocated resources and clean up the video buffers queue. The queue
++ * must not be busy (no ongoing video stream) and buffers must have been
++ * unmapped.
++ *
++ * Return 0 on success or -EBUSY if the queue is busy or buffers haven't been
++ * unmapped.
++ */
++int isp_video_queue_cleanup(struct isp_video_queue *queue)
++{
++ return isp_video_queue_free(queue);
++}
++
++/**
++ * isp_video_queue_init - Initialize the video buffers queue
++ * @queue: Video buffers queue
++ * @type: V4L2 buffer type (capture or output)
++ * @ops: Driver-specific queue operations
++ * @dev: Device used for DMA operations
++ * @bufsize: Size of the driver-specific buffer structure
++ *
++ * Initialize the video buffers queue with the supplied parameters.
++ *
++ * The queue type must be one of V4L2_BUF_TYPE_VIDEO_CAPTURE or
++ * V4L2_BUF_TYPE_VIDEO_OUTPUT. Other buffer types are not supported yet.
++ *
++ * Buffer objects will be allocated using the given buffer size to allow room
++ * for driver-specific fields. Driver-specific buffer structures must start
++ * with a struct isp_video_buffer field. Drivers with no driver-specific buffer
++ * structure must pass the size of the isp_video_buffer structure in the bufsize
++ * parameter.
++ *
++ * Return 0 on success.
++ */
++int isp_video_queue_init(struct isp_video_queue *queue, enum v4l2_buf_type type,
++ const struct isp_video_queue_operations *ops,
++ struct device *dev, unsigned int bufsize)
++{
++ INIT_LIST_HEAD(&queue->queue);
++ mutex_init(&queue->lock);
++ spin_lock_init(&queue->irqlock);
++
++ queue->type = type;
++ queue->ops = ops;
++ queue->dev = dev;
++ queue->bufsize = bufsize;
++
++ return 0;
++}
++
++/* -----------------------------------------------------------------------------
++ * V4L2 operations
++ */
++
++/**
++ * isp_video_queue_reqbufs - Allocate video buffers memory
++ *
++ * This function is intended to be used as a VIDIOC_REQBUFS ioctl handler. It
++ * allocated video buffer objects and, for MMAP buffers, buffer memory.
++ *
++ * If the number of buffers is 0, all buffers are freed and the function returns
++ * without performing any allocation.
++ *
++ * If the number of buffers is not 0, currently allocated buffers (if any) are
++ * freed and the requested number of buffers are allocated. Depending on
++ * driver-specific requirements and on memory availability, a number of buffer
++ * smaller or bigger than requested can be allocated. This isn't considered as
++ * an error.
++ *
++ * Return 0 on success or one of the following error codes:
++ *
++ * -EINVAL if the buffer type or index are invalid
++ * -EBUSY if the queue is busy (streaming or buffers mapped)
++ * -ENOMEM if the buffers can't be allocated due to an out-of-memory condition
++ */
++int isp_video_queue_reqbufs(struct isp_video_queue *queue,
++ struct v4l2_requestbuffers *rb)
++{
++ unsigned int nbuffers = rb->count;
++ unsigned int size;
++ int ret;
++
++ if (rb->type != queue->type)
++ return -EINVAL;
++
++ queue->ops->queue_prepare(queue, &nbuffers, &size);
++ if (size == 0)
++ return -EINVAL;
++
++ nbuffers = min_t(unsigned int, nbuffers, ISP_VIDEO_MAX_BUFFERS);
++
++ mutex_lock(&queue->lock);
++
++ ret = isp_video_queue_alloc(queue, nbuffers, size, rb->memory);
++ if (ret < 0)
++ goto done;
++
++ rb->count = ret;
++ ret = 0;
++
++done:
++ mutex_unlock(&queue->lock);
++ return ret;
++}
++
++/**
++ * isp_video_queue_querybuf - Query the status of a buffer in a queue
++ *
++ * This function is intended to be used as a VIDIOC_QUERYBUF ioctl handler. It
++ * returns the status of a given video buffer.
++ *
++ * Return 0 on success or -EINVAL if the buffer type or index are invalid.
++ */
++int isp_video_queue_querybuf(struct isp_video_queue *queue,
++ struct v4l2_buffer *vbuf)
++{
++ struct isp_video_buffer *buf;
++ int ret = 0;
++
++ if (vbuf->type != queue->type)
++ return -EINVAL;
++
++ mutex_lock(&queue->lock);
++
++ if (vbuf->index >= queue->count) {
++ ret = -EINVAL;
++ goto done;
++ }
++
++ buf = queue->buffers[vbuf->index];
++ isp_video_buffer_query(buf, vbuf);
++
++done:
++ mutex_unlock(&queue->lock);
++ return ret;
++}
++
++/**
++ * isp_video_queue_qbuf - Queue a buffer
++ *
++ * This function is intended to be used as a VIDIOC_QBUF ioctl handler.
++ *
++ * The v4l2_buffer structure passed from userspace is first sanity tested. If
++ * sane, the buffer is then processed and added to the main queue and, if the
++ * queue is streaming, to the IRQ queue.
++ *
++ * Before being enqueued, USERPTR buffers are checked for address changes. If
++ * the buffer has a different userspace address, the old memory area is unlocked
++ * and the new memory area is locked.
++ */
++int isp_video_queue_qbuf(struct isp_video_queue *queue,
++ struct v4l2_buffer *vbuf)
++{
++ struct isp_video_buffer *buf;
++ unsigned long flags;
++ int ret = -EINVAL;
++
++ if (vbuf->type != queue->type)
++ goto done;
++
++ mutex_lock(&queue->lock);
++
++ if (vbuf->index >= queue->count)
++ goto done;
++
++ buf = queue->buffers[vbuf->index];
++
++ if (vbuf->memory != buf->vbuf.memory)
++ goto done;
++
++ if (buf->state != ISP_BUF_STATE_IDLE)
++ goto done;
++
++ if (vbuf->memory == V4L2_MEMORY_USERPTR &&
++ vbuf->m.userptr != buf->vbuf.m.userptr) {
++ isp_video_buffer_cleanup(buf);
++ buf->vbuf.m.userptr = vbuf->m.userptr;
++ buf->prepared = 0;
++ }
++
++ if (!buf->prepared) {
++ ret = isp_video_buffer_prepare(buf);
++ if (ret < 0)
++ goto done;
++ buf->prepared = 1;
++ }
++
++ isp_video_buffer_cache_sync(buf);
++
++ buf->state = ISP_BUF_STATE_QUEUED;
++ list_add_tail(&buf->stream, &queue->queue);
++
++ if (queue->streaming) {
++ spin_lock_irqsave(&queue->irqlock, flags);
++ queue->ops->buffer_queue(buf);
++ spin_unlock_irqrestore(&queue->irqlock, flags);
++ }
++
++ ret = 0;
++
++done:
++ mutex_unlock(&queue->lock);
++ return ret;
++}
++
++/**
++ * isp_video_queue_dqbuf - Dequeue a buffer
++ *
++ * This function is intended to be used as a VIDIOC_DQBUF ioctl handler.
++ *
++ * The v4l2_buffer structure passed from userspace is first sanity tested. If
++ * sane, the buffer is then processed and added to the main queue and, if the
++ * queue is streaming, to the IRQ queue.
++ *
++ * Before being enqueued, USERPTR buffers are checked for address changes. If
++ * the buffer has a different userspace address, the old memory area is unlocked
++ * and the new memory area is locked.
++ */
++int isp_video_queue_dqbuf(struct isp_video_queue *queue,
++ struct v4l2_buffer *vbuf, int nonblocking)
++{
++ struct isp_video_buffer *buf;
++ int ret;
++
++ if (vbuf->type != queue->type)
++ return -EINVAL;
++
++ mutex_lock(&queue->lock);
++
++ if (list_empty(&queue->queue)) {
++ ret = -EINVAL;
++ goto done;
++ }
++
++ buf = list_first_entry(&queue->queue, struct isp_video_buffer, stream);
++ ret = isp_video_buffer_wait(buf, nonblocking);
++ if (ret < 0)
++ goto done;
++
++ list_del(&buf->stream);
++
++ isp_video_buffer_query(buf, vbuf);
++ buf->state = ISP_BUF_STATE_IDLE;
++ vbuf->flags &= ~V4L2_BUF_FLAG_QUEUED;
++
++done:
++ mutex_unlock(&queue->lock);
++ return ret;
++}
++
++/**
++ * isp_video_queue_streamon - Start streaming
++ *
++ * This function is intended to be used as a VIDIOC_STREAMON ioctl handler. It
++ * starts streaming on the queue and calls the buffer_queue operation for all
++ * queued buffers.
++ *
++ * Return 0 on success.
++ */
++int isp_video_queue_streamon(struct isp_video_queue *queue)
++{
++ struct isp_video_buffer *buf;
++ unsigned long flags;
++
++ mutex_lock(&queue->lock);
++
++ if (queue->streaming)
++ goto done;
++
++ queue->streaming = 1;
++
++ spin_lock_irqsave(&queue->irqlock, flags);
++ list_for_each_entry(buf, &queue->queue, stream)
++ queue->ops->buffer_queue(buf);
++ spin_unlock_irqrestore(&queue->irqlock, flags);
++
++done:
++ mutex_unlock(&queue->lock);
++ return 0;
++}
++
++/**
++ * isp_video_queue_streamoff - Stop streaming
++ *
++ * This function is intended to be used as a VIDIOC_STREAMOFF ioctl handler. It
++ * stops streaming on the queue and wakes up all the buffers.
++ *
++ * Drivers must stop the hardware and synchronize with interrupt handlers and/or
++ * delayed works before calling this function to make sure no buffer will be
++ * touched by the driver and/or hardware.
++ */
++void isp_video_queue_streamoff(struct isp_video_queue *queue)
++{
++ struct isp_video_buffer *buf;
++ unsigned long flags;
++ unsigned int i;
++
++ mutex_lock(&queue->lock);
++
++ if (!queue->streaming)
++ goto done;
++
++ queue->streaming = 0;
++
++ spin_lock_irqsave(&queue->irqlock, flags);
++ for (i = 0; i < queue->count; ++i) {
++ buf = queue->buffers[i];
++
++ if (buf->state == ISP_BUF_STATE_ACTIVE)
++ wake_up(&buf->wait);
++
++ buf->state = ISP_BUF_STATE_IDLE;
++ }
++ spin_unlock_irqrestore(&queue->irqlock, flags);
++
++ INIT_LIST_HEAD(&queue->queue);
++
++done:
++ mutex_unlock(&queue->lock);
++}
++
++/**
++ * isp_video_queue_mmap - Map buffers to userspace
++ *
++ * This function is intended to be used as an mmap() file operation handler. It
++ * maps a buffer to userspace based on the VMA offset.
++ *
++ * Only buffers of memory type MMAP are supported.
++ */
++static void isp_video_queue_vm_open(struct vm_area_struct *vma)
++{
++ struct isp_video_buffer *buf = vma->vm_private_data;
++
++ buf->vma_use_count++;
++}
++
++static void isp_video_queue_vm_close(struct vm_area_struct *vma)
++{
++ struct isp_video_buffer *buf = vma->vm_private_data;
++
++ buf->vma_use_count--;
++}
++
++static const struct vm_operations_struct isp_video_queue_vm_ops = {
++ .open = isp_video_queue_vm_open,
++ .close = isp_video_queue_vm_close,
++};
++
++int isp_video_queue_mmap(struct isp_video_queue *queue,
++ struct vm_area_struct *vma)
++{
++ struct isp_video_buffer *uninitialized_var(buf);
++ unsigned long size;
++ unsigned int i;
++ int ret = 0;
++
++ mutex_lock(&queue->lock);
++
++ for (i = 0; i < queue->count; ++i) {
++ buf = queue->buffers[i];
++ if ((buf->vbuf.m.offset >> PAGE_SHIFT) == vma->vm_pgoff)
++ break;
++ }
++
++ if (i == queue->count) {
++ ret = -EINVAL;
++ goto done;
++ }
++
++ size = vma->vm_end - vma->vm_start;
++
++ if (buf->vbuf.memory != V4L2_MEMORY_MMAP ||
++ size != PAGE_ALIGN(buf->vbuf.length)) {
++ ret = -EINVAL;
++ goto done;
++ }
++
++ ret = remap_vmalloc_range(vma, buf->vaddr, 0);
++ if (ret < 0)
++ goto done;
++
++ vma->vm_ops = &isp_video_queue_vm_ops;
++ vma->vm_private_data = buf;
++ isp_video_queue_vm_open(vma);
++
++done:
++ mutex_unlock(&queue->lock);
++ return ret;
++}
++
++/**
++ * isp_video_queue_poll - Poll video queue state
++ *
++ * This function is intended to be used as a poll() file operation handler. It
++ * polls the state of the video buffer at the front of the queue and returns an
++ * events mask.
++ *
++ * If no buffer is present at the front of the queue, POLLERR is returned.
++ */
++unsigned int isp_video_queue_poll(struct isp_video_queue *queue,
++ struct file *file, poll_table *wait)
++{
++ struct isp_video_buffer *buf;
++ unsigned int mask = 0;
++
++ mutex_lock(&queue->lock);
++ if (list_empty(&queue->queue)) {
++ mask |= POLLERR;
++ goto done;
++ }
++ buf = list_first_entry(&queue->queue, struct isp_video_buffer, stream);
++
++ poll_wait(file, &buf->wait, wait);
++ if (buf->state == ISP_BUF_STATE_DONE ||
++ buf->state == ISP_BUF_STATE_ERROR) {
++ if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
++ mask |= POLLIN | POLLRDNORM;
++ else
++ mask |= POLLOUT | POLLWRNORM;
++ }
++
++done:
++ mutex_unlock(&queue->lock);
++ return mask;
++}
++
+diff --git a/drivers/media/video/isp/ispqueue.h b/drivers/media/video/isp/ispqueue.h
+new file mode 100644
+index 0000000..3f96adc
+--- /dev/null
++++ b/drivers/media/video/isp/ispqueue.h
+@@ -0,0 +1,175 @@
++/*
++ * ispqueue.h - ISP video buffers queue handling
++ *
++ * Copyright (C) 2010 Nokia.
++ *
++ * Contributors:
++ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++#ifndef __ISP_QUEUE_H
++#define __ISP_QUEUE_H
++
++#include <linux/kernel.h>
++#include <linux/list.h>
++#include <linux/mutex.h>
++#include <linux/videodev2.h>
++#include <linux/wait.h>
++
++struct isp_video_queue;
++struct page;
++struct scatterlist;
++
++#define ISP_VIDEO_MAX_BUFFERS 16
++
++/**
++ * enum isp_video_buffer_state - ISP video buffer state
++ * @ISP_BUF_STATE_IDLE: The buffer is under userspace control (dequeued
++ * or not queued yet).
++ * @ISP_BUF_STATE_QUEUED: The buffer has been queued but isn't used by the
++ * device yet.
++ * @ISP_BUF_STATE_ACTIVE: The buffer is in use for an active video transfer.
++ * @ISP_BUF_STATE_ERROR: The device is done with the buffer and an error
++ * occured. For capture device the buffer likely contains corrupted data or
++ * no data at all.
++ * @ISP_BUF_STATE_DONE: The device is done with the buffer and no error occured.
++ * For capture devices the buffer contains valid data.
++ */
++enum isp_video_buffer_state {
++ ISP_BUF_STATE_IDLE,
++ ISP_BUF_STATE_QUEUED,
++ ISP_BUF_STATE_ACTIVE,
++ ISP_BUF_STATE_ERROR,
++ ISP_BUF_STATE_DONE,
++};
++
++/**
++ * struct isp_video_buffer - ISP video buffer
++ * @vma_use_count: Number of times the buffer is mmap'ed to userspace
++ * @stream: List head for insertion into main queue
++ * @queue: ISP buffers queue this buffer belongs to
++ * @prepared: Whether the buffer has been prepared
++ * @vaddr: Memory virtual address (for kernel buffers)
++ * @vm_flags: Buffer VMA flags (for userspace buffers)
++ * @offset: Offset inside the first page (for userspace buffers)
++ * @npages: Number of pages (for userspace buffers)
++ * @pages: Pages table (for userspace non-VM_PFNMAP buffers)
++ * @paddr: Memory physical address (for userspace VM_PFNMAP buffers)
++ * @sglen: Number of elements in the scatter list (for non-VM_PFNMAP buffers)
++ * @sglist: Scatter list (for non-VM_PFNMAP buffers)
++ * @vbuf: V4L2 buffer
++ * @irqlist: List head for insertion into IRQ queue
++ * @state: Current buffer state
++ * @wait: Wait queue to signal buffer completion
++ */
++struct isp_video_buffer {
++ unsigned long vma_use_count;
++ struct list_head stream;
++ struct isp_video_queue *queue;
++ unsigned int prepared:1;
++
++ /* For kernel buffers. */
++ void *vaddr;
++
++ /* For userspace buffers. */
++ unsigned long vm_flags;
++ unsigned long offset;
++ unsigned int npages;
++ struct page **pages;
++ dma_addr_t paddr;
++
++ /* For all buffers except VM_PFNMAP. */
++ unsigned int sglen;
++ struct scatterlist *sglist;
++
++ /* Touched by the interrupt handler. */
++ struct v4l2_buffer vbuf;
++ struct list_head irqlist;
++ enum isp_video_buffer_state state;
++ wait_queue_head_t wait;
++};
++
++#define to_isp_video_buffer(vb) container_of(vb, struct isp_video_buffer, vb)
++
++/**
++ * struct isp_video_queue_operations - Driver-specific operations
++ * @queue_prepare: Called before allocating buffers. Drivers should clamp the
++ * number of buffers according to their requirements, and must return the
++ * buffer size in bytes.
++ * @buffer_prepare: Called the first time a buffer is queued, or after changing
++ * the userspace memory address for a USERPTR buffer, with the queue lock
++ * held. Drivers should perform device-specific buffer preparation (such as
++ * mapping the buffer memory in an IOMMU). This operation is optional.
++ * @buffer_queue: Called when a buffer is being added to the queue with the
++ * queue irqlock spinlock held.
++ * @buffer_cleanup: Called before freeing buffers, or before changing the
++ * userspace memory address for a USERPTR buffer, with the queue lock held.
++ * Drivers must perform cleanup operations required to undo the
++ * buffer_prepare call. This operation is optional.
++ */
++struct isp_video_queue_operations {
++ void (*queue_prepare)(struct isp_video_queue *queue,
++ unsigned int *nbuffers, unsigned int *size);
++ int (*buffer_prepare)(struct isp_video_buffer *buf);
++ void (*buffer_queue)(struct isp_video_buffer *buf);
++ void (*buffer_cleanup)(struct isp_video_buffer *buf);
++};
++
++/**
++ * struct isp_video_queue - ISP video buffers queue
++ * @type: Type of video buffers handled by this queue
++ * @ops: Queue operations
++ * @dev: Device used for DMA operations
++ * @bufsize: Size of a driver-specific buffer object
++ * @count: Number of currently allocated buffers
++ * @buffers: ISP video buffers
++ * @lock: Mutex to protect access to the buffers, main queue and state
++ * @irqlock: Spinlock to protect access to the IRQ queue
++ * @streaming: Queue state, indicates whether the queue is streaming
++ * @queue: List of all queued buffers
++ */
++struct isp_video_queue {
++ enum v4l2_buf_type type;
++ const struct isp_video_queue_operations *ops;
++ struct device *dev;
++ unsigned int bufsize;
++
++ unsigned int count;
++ struct isp_video_buffer *buffers[ISP_VIDEO_MAX_BUFFERS];
++ struct mutex lock;
++ spinlock_t irqlock;
++
++ unsigned int streaming:1;
++
++ struct list_head queue;
++};
++
++int isp_video_queue_cleanup(struct isp_video_queue *queue);
++int isp_video_queue_init(struct isp_video_queue *queue, enum v4l2_buf_type type,
++ const struct isp_video_queue_operations *ops,
++ struct device *dev, unsigned int bufsize);
++
++int isp_video_queue_reqbufs(struct isp_video_queue *queue,
++ struct v4l2_requestbuffers *rb);
++int isp_video_queue_querybuf(struct isp_video_queue *queue,
++ struct v4l2_buffer *vbuf);
++int isp_video_queue_qbuf(struct isp_video_queue *queue,
++ struct v4l2_buffer *vbuf);
++int isp_video_queue_dqbuf(struct isp_video_queue *queue,
++ struct v4l2_buffer *vbuf, int nonblocking);
++int isp_video_queue_streamon(struct isp_video_queue *queue);
++void isp_video_queue_streamoff(struct isp_video_queue *queue);
++int isp_video_queue_mmap(struct isp_video_queue *queue,
++ struct vm_area_struct *vma);
++unsigned int isp_video_queue_poll(struct isp_video_queue *queue,
++ struct file *file, poll_table *wait);
++
++#endif /* __ISP_QUEUE_H */
++
+diff --git a/drivers/media/video/isp/ispreg.h b/drivers/media/video/isp/ispreg.h
+new file mode 100644
+index 0000000..91256ad
+--- /dev/null
++++ b/drivers/media/video/isp/ispreg.h
+@@ -0,0 +1,1803 @@
++/*
++ * ispreg.h
++ *
++ * Header file for all the ISP module in TI's OMAP3 Camera ISP.
++ * It has the OMAP HW register definitions.
++ *
++ * Copyright (C) 2009 Texas Instruments.
++ * Copyright (C) 2009 Nokia.
++ *
++ * Contributors:
++ * Tuukka Toivonen <tuukka.o.toivonen@nokia.com>
++ * Thara Gopinath <thara@ti.com>
++ * Sergio Aguirre <saaguirre@ti.com>
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef __ISPREG_H__
++#define __ISPREG_H__
++
++#include <plat/omap34xx.h>
++
++
++#define CM_CAM_MCLK_HZ 172800000 /* Hz */
++
++/* ISP Submodules offset */
++
++#define OMAP3ISP_REG_BASE OMAP3430_ISP_BASE
++#define OMAP3ISP_REG(offset) (OMAP3ISP_REG_BASE + (offset))
++
++#define OMAP3ISP_CBUFF_REG_OFFSET 0x0100
++#define OMAP3ISP_CBUFF_REG_BASE (OMAP3ISP_REG_BASE + \
++ OMAP3ISP_CBUFF_REG_OFFSET)
++#define OMAP3ISP_CBUFF_REG(offset) (OMAP3ISP_CBUFF_REG_BASE + (offset))
++
++#define OMAP3ISP_CCP2_REG_OFFSET 0x0400
++#define OMAP3ISP_CCP2_REG_BASE (OMAP3ISP_REG_BASE + \
++ OMAP3ISP_CCP2_REG_OFFSET)
++#define OMAP3ISP_CCP2_REG(offset) (OMAP3ISP_CCP2_REG_BASE + (offset))
++
++#define OMAP3ISP_CCDC_REG_OFFSET 0x0600
++#define OMAP3ISP_CCDC_REG_BASE (OMAP3ISP_REG_BASE + \
++ OMAP3ISP_CCDC_REG_OFFSET)
++#define OMAP3ISP_CCDC_REG(offset) (OMAP3ISP_CCDC_REG_BASE + (offset))
++
++#define OMAP3ISP_HIST_REG_OFFSET 0x0A00
++#define OMAP3ISP_HIST_REG_BASE (OMAP3ISP_REG_BASE + \
++ OMAP3ISP_HIST_REG_OFFSET)
++#define OMAP3ISP_HIST_REG(offset) (OMAP3ISP_HIST_REG_BASE + (offset))
++
++#define OMAP3ISP_H3A_REG_OFFSET 0x0C00
++#define OMAP3ISP_H3A_REG_BASE (OMAP3ISP_REG_BASE + \
++ OMAP3ISP_H3A_REG_OFFSET)
++#define OMAP3ISP_H3A_REG(offset) (OMAP3ISP_H3A_REG_BASE + (offset))
++
++#define OMAP3ISP_PREV_REG_OFFSET 0x0E00
++#define OMAP3ISP_PREV_REG_BASE (OMAP3ISP_REG_BASE + \
++ OMAP3ISP_PREV_REG_OFFSET)
++#define OMAP3ISP_PREV_REG(offset) (OMAP3ISP_PREV_REG_BASE + (offset))
++
++#define OMAP3ISP_RESZ_REG_OFFSET 0x1000
++#define OMAP3ISP_RESZ_REG_BASE (OMAP3ISP_REG_BASE + \
++ OMAP3ISP_RESZ_REG_OFFSET)
++#define OMAP3ISP_RESZ_REG(offset) (OMAP3ISP_RESZ_REG_BASE + (offset))
++
++#define OMAP3ISP_SBL_REG_OFFSET 0x1200
++#define OMAP3ISP_SBL_REG_BASE (OMAP3ISP_REG_BASE + \
++ OMAP3ISP_SBL_REG_OFFSET)
++#define OMAP3ISP_SBL_REG(offset) (OMAP3ISP_SBL_REG_BASE + (offset))
++
++#define OMAP3ISP_MMU_REG_OFFSET 0x1400
++#define OMAP3ISP_MMU_REG_BASE (OMAP3ISP_REG_BASE + \
++ OMAP3ISP_MMU_REG_OFFSET)
++#define OMAP3ISP_MMU_REG(offset) (OMAP3ISP_MMU_REG_BASE + (offset))
++
++#define OMAP3ISP_CSI2A_REGS1_REG_OFFSET 0x1800
++#define OMAP3ISP_CSI2A_REGS1_REG_BASE (OMAP3ISP_REG_BASE + \
++ OMAP3ISP_CSI2A_REGS1_REG_OFFSET)
++#define OMAP3ISP_CSI2A_REGS1_REG(offset) \
++ (OMAP3ISP_CSI2A_REGS1_REG_BASE + (offset))
++
++#define OMAP3ISP_CSIPHY2_REG_OFFSET 0x1970
++#define OMAP3ISP_CSIPHY2_REG_BASE (OMAP3ISP_REG_BASE + \
++ OMAP3ISP_CSIPHY2_REG_OFFSET)
++#define OMAP3ISP_CSIPHY2_REG(offset) (OMAP3ISP_CSIPHY2_REG_BASE + (offset))
++
++#define OMAP3ISP_CSI2A_REGS2_REG_OFFSET 0x19C0
++#define OMAP3ISP_CSI2A_REGS2_REG_BASE (OMAP3ISP_REG_BASE + \
++ OMAP3ISP_CSI2A_REGS2_REG_OFFSET)
++#define OMAP3ISP_CSI2A_REGS2_REG(offset) \
++ (OMAP3ISP_CSI2A_REGS2_REG_BASE + (offset))
++
++#define OMAP3ISP_CSI2C_REGS1_REG_OFFSET 0x1C00
++#define OMAP3ISP_CSI2C_REGS1_REG_BASE (OMAP3ISP_REG_BASE + \
++ OMAP3ISP_CSI2C_REGS1_REG_OFFSET)
++#define OMAP3ISP_CSI2C_REGS1_REG(offset) \
++ (OMAP3ISP_CSI2C_REGS1_REG_BASE + (offset))
++
++#define OMAP3ISP_CSIPHY1_REG_OFFSET 0x1D70
++#define OMAP3ISP_CSIPHY1_REG_BASE (OMAP3ISP_REG_BASE + \
++ OMAP3ISP_CSIPHY1_REG_OFFSET)
++#define OMAP3ISP_CSIPHY1_REG(offset) (OMAP3ISP_CSIPHY1_REG_BASE + (offset))
++
++#define OMAP3ISP_CSI2C_REGS2_REG_OFFSET 0x1DC0
++#define OMAP3ISP_CSI2C_REGS2_REG_BASE (OMAP3ISP_REG_BASE + \
++ OMAP3ISP_CSI2C_REGS2_REG_OFFSET)
++#define OMAP3ISP_CSI2C_REGS2_REG(offset) \
++ (OMAP3ISP_CSI2C_REGS2_REG_BASE + (offset))
++
++/* ISP module register offset */
++
++#define ISP_REVISION (0x000)
++#define ISP_SYSCONFIG (0x004)
++#define ISP_SYSSTATUS (0x008)
++#define ISP_IRQ0ENABLE (0x00C)
++#define ISP_IRQ0STATUS (0x010)
++#define ISP_IRQ1ENABLE (0x014)
++#define ISP_IRQ1STATUS (0x018)
++#define ISP_TCTRL_GRESET_LENGTH (0x030)
++#define ISP_TCTRL_PSTRB_REPLAY (0x034)
++#define ISP_CTRL (0x040)
++#define ISP_SECURE (0x044)
++#define ISP_TCTRL_CTRL (0x050)
++#define ISP_TCTRL_FRAME (0x054)
++#define ISP_TCTRL_PSTRB_DELAY (0x058)
++#define ISP_TCTRL_STRB_DELAY (0x05C)
++#define ISP_TCTRL_SHUT_DELAY (0x060)
++#define ISP_TCTRL_PSTRB_LENGTH (0x064)
++#define ISP_TCTRL_STRB_LENGTH (0x068)
++#define ISP_TCTRL_SHUT_LENGTH (0x06C)
++#define ISP_PING_PONG_ADDR (0x070)
++#define ISP_PING_PONG_MEM_RANGE (0x074)
++#define ISP_PING_PONG_BUF_SIZE (0x078)
++
++/* CCP2 receiver registers */
++
++#define ISPCCP2_REVISION (0x000)
++#define ISPCCP2_SYSCONFIG (0x004)
++#define ISPCCP2_SYSCONFIG_SOFT_RESET (1 << 1)
++#define ISPCCP2_SYSSTATUS (0x008)
++#define ISPCCP2_SYSSTATUS_RESET_DONE (1 << 0)
++#define ISPCCP2_LC01_IRQENABLE (0x00C)
++#define ISPCCP2_LC01_IRQSTATUS (0x010)
++#define ISPCCP2_LC01_IRQSTATUS_LC0_FS_IRQ (1 << 11)
++#define ISPCCP2_LC01_IRQSTATUS_LC0_LE_IRQ (1 << 10)
++#define ISPCCP2_LC01_IRQSTATUS_LC0_LS_IRQ (1 << 9)
++#define ISPCCP2_LC01_IRQSTATUS_LC0_FE_IRQ (1 << 8)
++#define ISPCCP2_LC01_IRQSTATUS_LC0_COUNT_IRQ (1 << 7)
++#define ISPCCP2_LC01_IRQSTATUS_LC0_FIFO_OVF_IRQ (1 << 5)
++#define ISPCCP2_LC01_IRQSTATUS_LC0_CRC_IRQ (1 << 4)
++#define ISPCCP2_LC01_IRQSTATUS_LC0_FSP_IRQ (1 << 3)
++#define ISPCCP2_LC01_IRQSTATUS_LC0_FW_IRQ (1 << 2)
++#define ISPCCP2_LC01_IRQSTATUS_LC0_FSC_IRQ (1 << 1)
++#define ISPCCP2_LC01_IRQSTATUS_LC0_SSC_IRQ (1 << 0)
++
++#define ISPCCP2_LC23_IRQENABLE (0x014)
++#define ISPCCP2_LC23_IRQSTATUS (0x018)
++#define ISPCCP2_LCM_IRQENABLE (0x02C)
++#define ISPCCP2_LCM_IRQSTATUS_EOF_IRQ (1 << 0)
++#define ISPCCP2_LCM_IRQSTATUS_OCPERROR_IRQ (1 << 1)
++#define ISPCCP2_LCM_IRQSTATUS (0x030)
++#define ISPCCP2_CTRL (0x040)
++#define ISPCCP2_CTRL_IF_EN (1 << 0)
++#define ISPCCP2_CTRL_PHY_SEL (1 << 1)
++#define ISPCCP2_CTRL_PHY_SEL_CLOCK (0 << 1)
++#define ISPCCP2_CTRL_PHY_SEL_STROBE (1 << 1)
++#define ISPCCP2_CTRL_PHY_SEL_MASK 0x1
++#define ISPCCP2_CTRL_PHY_SEL_SHIFT 1
++#define ISPCCP2_CTRL_IO_OUT_SEL (1 << 2)
++#define ISPCCP2_CTRL_MODE (1 << 4)
++#define ISPCCP2_CTRL_VP_CLK_FORCE_ON (1 << 9)
++#define ISPCCP2_CTRL_INV (1 << 10)
++#define ISPCCP2_CTRL_INV_MASK 0x1
++#define ISPCCP2_CTRL_INV_SHIFT 10
++#define ISPCCP2_CTRL_VP_ONLY_EN (1 << 11)
++#define ISPCCP2_CTRL_VP_CLK_POL (1 << 12)
++#define ISPCCP2_CTRL_VPCLK_DIV_SHIFT 15
++#define ISPCCP2_CTRL_VPCLK_DIV_MASK 0x1ffff /* [31:15] */
++#define ISPCCP2_CTRL_VP_OUT_CTRL_SHIFT 8 /* 3430 bits */
++#define ISPCCP2_CTRL_VP_OUT_CTRL_MASK 0x3 /* 3430 bits */
++#define ISPCCP2_DBG (0x044)
++#define ISPCCP2_GNQ (0x048)
++#define ISPCCP2_LCx_CTRL(x) ((0x050)+0x30*(x))
++#define ISPCCP2_LCx_CTRL_CHAN_EN (1 << 0)
++#define ISPCCP2_LCx_CTRL_CRC_EN (1 << 19)
++#define ISPCCP2_LCx_CTRL_CRC_MASK 0x1
++#define ISPCCP2_LCx_CTRL_CRC_SHIFT 2
++#define ISPCCP2_LCx_CTRL_CRC_SHIFT_15_0 19
++#define ISPCCP2_LCx_CTRL_REGION_EN (1 << 1)
++#define ISPCCP2_LCx_CTRL_REGION_MASK 0x1
++#define ISPCCP2_LCx_CTRL_REGION_SHIFT 1
++#define ISPCCP2_LCx_CTRL_FORMAT_MASK_15_0 0x3f
++#define ISPCCP2_LCx_CTRL_FORMAT_SHIFT_15_0 0x2
++#define ISPCCP2_LCx_CTRL_FORMAT_MASK 0x1f
++#define ISPCCP2_LCx_CTRL_FORMAT_SHIFT 0x3
++#define ISPCCP2_LCx_CODE(x) ((0x054)+0x30*(x))
++#define ISPCCP2_LCx_STAT_START(x) ((0x058)+0x30*(x))
++#define ISPCCP2_LCx_STAT_SIZE(x) ((0x05C)+0x30*(x))
++#define ISPCCP2_LCx_SOF_ADDR(x) ((0x060)+0x30*(x))
++#define ISPCCP2_LCx_EOF_ADDR(x) ((0x064)+0x30*(x))
++#define ISPCCP2_LCx_DAT_START(x) ((0x068)+0x30*(x))
++#define ISPCCP2_LCx_DAT_SIZE(x) ((0x06C)+0x30*(x))
++#define ISPCCP2_LCx_DAT_MASK 0xFFF
++#define ISPCCP2_LCx_DAT_SHIFT 16
++#define ISPCCP2_LCx_DAT_PING_ADDR(x) ((0x070)+0x30*(x))
++#define ISPCCP2_LCx_DAT_PONG_ADDR(x) ((0x074)+0x30*(x))
++#define ISPCCP2_LCx_DAT_OFST(x) ((0x078)+0x30*(x))
++#define ISPCCP2_LCM_CTRL (0x1D0)
++#define ISPCCP2_LCM_CTRL_CHAN_EN (1 << 0)
++#define ISPCCP2_LCM_CTRL_DST_PORT (1 << 2)
++#define ISPCCP2_LCM_CTRL_DST_PORT_SHIFT 2
++#define ISPCCP2_LCM_CTRL_READ_THROTTLE_SHIFT 3
++#define ISPCCP2_LCM_CTRL_READ_THROTTLE_MASK 0x11
++#define ISPCCP2_LCM_CTRL_BURST_SIZE_SHIFT 5
++#define ISPCCP2_LCM_CTRL_BURST_SIZE_MASK 0x7
++#define ISPCCP2_LCM_CTRL_SRC_FORMAT_SHIFT 16
++#define ISPCCP2_LCM_CTRL_SRC_FORMAT_MASK 0x7
++#define ISPCCP2_LCM_CTRL_SRC_DECOMPR_SHIFT 20
++#define ISPCCP2_LCM_CTRL_SRC_DECOMPR_MASK 0x3
++#define ISPCCP2_LCM_CTRL_SRC_DPCM_PRED (1 << 22)
++#define ISPCCP2_LCM_CTRL_SRC_PACK (1 << 23)
++#define ISPCCP2_LCM_CTRL_DST_FORMAT_SHIFT 24
++#define ISPCCP2_LCM_CTRL_DST_FORMAT_MASK 0x7
++#define ISPCCP2_LCM_VSIZE (0x1D4)
++#define ISPCCP2_LCM_VSIZE_SHIFT 16
++#define ISPCCP2_LCM_HSIZE (0x1D8)
++#define ISPCCP2_LCM_HSIZE_SHIFT 16
++#define ISPCCP2_LCM_PREFETCH (0x1DC)
++#define ISPCCP2_LCM_PREFETCH_SHIFT 3
++#define ISPCCP2_LCM_SRC_ADDR (0x1E0)
++#define ISPCCP2_LCM_SRC_OFST (0x1E4)
++#define ISPCCP2_LCM_DST_ADDR (0x1E8)
++#define ISPCCP2_LCM_DST_OFST (0x1EC)
++
++#define ISP_CSIB_SYSCONFIG ISPCCP2_SYSCONFIG
++#define ISP_CSIA_SYSCONFIG ISPCSI2_SYSCONFIG
++
++/* ISP_CBUFF Registers */
++
++#define ISP_CBUFF_SYSCONFIG (0x010)
++#define ISP_CBUFF_IRQENABLE (0x01C)
++
++#define ISP_CBUFF0_CTRL (0x020)
++#define ISP_CBUFF1_CTRL (0x024)
++
++#define ISP_CBUFF0_START (0x040)
++#define ISP_CBUFF1_START (0x044)
++
++#define ISP_CBUFF0_END (0x050)
++#define ISP_CBUFF1_END (0x054)
++
++#define ISP_CBUFF0_WINDOWSIZE (0x060)
++#define ISP_CBUFF1_WINDOWSIZE (0x064)
++
++#define ISP_CBUFF0_THRESHOLD (0x070)
++#define ISP_CBUFF1_THRESHOLD (0x074)
++
++/* CCDC module register offset */
++
++#define ISPCCDC_PID (0x000)
++#define ISPCCDC_PCR (0x004)
++#define ISPCCDC_SYN_MODE (0x008)
++#define ISPCCDC_HD_VD_WID (0x00C)
++#define ISPCCDC_PIX_LINES (0x010)
++#define ISPCCDC_HORZ_INFO (0x014)
++#define ISPCCDC_VERT_START (0x018)
++#define ISPCCDC_VERT_LINES (0x01C)
++#define ISPCCDC_CULLING (0x020)
++#define ISPCCDC_HSIZE_OFF (0x024)
++#define ISPCCDC_SDOFST (0x028)
++#define ISPCCDC_SDR_ADDR (0x02C)
++#define ISPCCDC_CLAMP (0x030)
++#define ISPCCDC_DCSUB (0x034)
++#define ISPCCDC_COLPTN (0x038)
++#define ISPCCDC_BLKCMP (0x03C)
++#define ISPCCDC_FPC (0x040)
++#define ISPCCDC_FPC_ADDR (0x044)
++#define ISPCCDC_VDINT (0x048)
++#define ISPCCDC_ALAW (0x04C)
++#define ISPCCDC_REC656IF (0x050)
++#define ISPCCDC_CFG (0x054)
++#define ISPCCDC_FMTCFG (0x058)
++#define ISPCCDC_FMT_HORZ (0x05C)
++#define ISPCCDC_FMT_VERT (0x060)
++#define ISPCCDC_FMT_ADDR0 (0x064)
++#define ISPCCDC_FMT_ADDR1 (0x068)
++#define ISPCCDC_FMT_ADDR2 (0x06C)
++#define ISPCCDC_FMT_ADDR3 (0x070)
++#define ISPCCDC_FMT_ADDR4 (0x074)
++#define ISPCCDC_FMT_ADDR5 (0x078)
++#define ISPCCDC_FMT_ADDR6 (0x07C)
++#define ISPCCDC_FMT_ADDR7 (0x080)
++#define ISPCCDC_PRGEVEN0 (0x084)
++#define ISPCCDC_PRGEVEN1 (0x088)
++#define ISPCCDC_PRGODD0 (0x08C)
++#define ISPCCDC_PRGODD1 (0x090)
++#define ISPCCDC_VP_OUT (0x094)
++
++#define ISPCCDC_LSC_CONFIG (0x098)
++#define ISPCCDC_LSC_INITIAL (0x09C)
++#define ISPCCDC_LSC_TABLE_BASE (0x0A0)
++#define ISPCCDC_LSC_TABLE_OFFSET (0x0A4)
++
++/* SBL */
++#define ISPSBL_PCR 0x4
++#define ISPSBL_PCR_H3A_AEAWB_WBL_OVF (1 << 16)
++#define ISPSBL_PCR_H3A_AF_WBL_OVF (1 << 17)
++#define ISPSBL_PCR_RSZ4_WBL_OVF (1 << 18)
++#define ISPSBL_PCR_RSZ3_WBL_OVF (1 << 19)
++#define ISPSBL_PCR_RSZ2_WBL_OVF (1 << 20)
++#define ISPSBL_PCR_RSZ1_WBL_OVF (1 << 21)
++#define ISPSBL_PCR_PRV_WBL_OVF (1 << 22)
++#define ISPSBL_PCR_CCDC_WBL_OVF (1 << 23)
++#define ISPSBL_PCR_CCDCPRV_2_RSZ_OVF (1 << 24)
++#define ISPSBL_PCR_CSIA_WBL_OVF (1 << 25)
++#define ISPSBL_PCR_CSIB_WBL_OVF (1 << 26)
++#define ISPSBL_CCDC_WR_0 (0x028)
++#define ISPSBL_CCDC_WR_0_DATA_READY (1 << 21)
++#define ISPSBL_CCDC_WR_1 (0x02C)
++#define ISPSBL_CCDC_WR_2 (0x030)
++#define ISPSBL_CCDC_WR_3 (0x034)
++
++#define ISPSBL_SDR_REQ_EXP 0xF8
++#define ISPSBL_SDR_REQ_HIST_EXP_SHIFT 0
++#define ISPSBL_SDR_REQ_HIST_EXP_MASK (0x3FF)
++#define ISPSBL_SDR_REQ_RSZ_EXP_SHIFT 10
++#define ISPSBL_SDR_REQ_RSZ_EXP_MASK (0x3FF << ISPSBL_SDR_REQ_RSZ_EXP_SHIFT)
++#define ISPSBL_SDR_REQ_PRV_EXP_SHIFT 20
++#define ISPSBL_SDR_REQ_PRV_EXP_MASK (0x3FF << ISPSBL_SDR_REQ_PRV_EXP_SHIFT)
++
++/* Histogram registers */
++#define ISPHIST_PID (0x000)
++#define ISPHIST_PCR (0x004)
++#define ISPHIST_CNT (0x008)
++#define ISPHIST_WB_GAIN (0x00C)
++#define ISPHIST_R0_HORZ (0x010)
++#define ISPHIST_R0_VERT (0x014)
++#define ISPHIST_R1_HORZ (0x018)
++#define ISPHIST_R1_VERT (0x01C)
++#define ISPHIST_R2_HORZ (0x020)
++#define ISPHIST_R2_VERT (0x024)
++#define ISPHIST_R3_HORZ (0x028)
++#define ISPHIST_R3_VERT (0x02C)
++#define ISPHIST_ADDR (0x030)
++#define ISPHIST_DATA (0x034)
++#define ISPHIST_RADD (0x038)
++#define ISPHIST_RADD_OFF (0x03C)
++#define ISPHIST_H_V_INFO (0x040)
++
++/* H3A module registers */
++#define ISPH3A_PID (0x000)
++#define ISPH3A_PCR (0x004)
++#define ISPH3A_AEWWIN1 (0x04C)
++#define ISPH3A_AEWINSTART (0x050)
++#define ISPH3A_AEWINBLK (0x054)
++#define ISPH3A_AEWSUBWIN (0x058)
++#define ISPH3A_AEWBUFST (0x05C)
++#define ISPH3A_AFPAX1 (0x008)
++#define ISPH3A_AFPAX2 (0x00C)
++#define ISPH3A_AFPAXSTART (0x010)
++#define ISPH3A_AFIIRSH (0x014)
++#define ISPH3A_AFBUFST (0x018)
++#define ISPH3A_AFCOEF010 (0x01C)
++#define ISPH3A_AFCOEF032 (0x020)
++#define ISPH3A_AFCOEF054 (0x024)
++#define ISPH3A_AFCOEF076 (0x028)
++#define ISPH3A_AFCOEF098 (0x02C)
++#define ISPH3A_AFCOEF0010 (0x030)
++#define ISPH3A_AFCOEF110 (0x034)
++#define ISPH3A_AFCOEF132 (0x038)
++#define ISPH3A_AFCOEF154 (0x03C)
++#define ISPH3A_AFCOEF176 (0x040)
++#define ISPH3A_AFCOEF198 (0x044)
++#define ISPH3A_AFCOEF1010 (0x048)
++
++#define ISPPRV_PCR (0x004)
++#define ISPPRV_HORZ_INFO (0x008)
++#define ISPPRV_VERT_INFO (0x00C)
++#define ISPPRV_RSDR_ADDR (0x010)
++#define ISPPRV_RADR_OFFSET (0x014)
++#define ISPPRV_DSDR_ADDR (0x018)
++#define ISPPRV_DRKF_OFFSET (0x01C)
++#define ISPPRV_WSDR_ADDR (0x020)
++#define ISPPRV_WADD_OFFSET (0x024)
++#define ISPPRV_AVE (0x028)
++#define ISPPRV_HMED (0x02C)
++#define ISPPRV_NF (0x030)
++#define ISPPRV_WB_DGAIN (0x034)
++#define ISPPRV_WBGAIN (0x038)
++#define ISPPRV_WBSEL (0x03C)
++#define ISPPRV_CFA (0x040)
++#define ISPPRV_BLKADJOFF (0x044)
++#define ISPPRV_RGB_MAT1 (0x048)
++#define ISPPRV_RGB_MAT2 (0x04C)
++#define ISPPRV_RGB_MAT3 (0x050)
++#define ISPPRV_RGB_MAT4 (0x054)
++#define ISPPRV_RGB_MAT5 (0x058)
++#define ISPPRV_RGB_OFF1 (0x05C)
++#define ISPPRV_RGB_OFF2 (0x060)
++#define ISPPRV_CSC0 (0x064)
++#define ISPPRV_CSC1 (0x068)
++#define ISPPRV_CSC2 (0x06C)
++#define ISPPRV_CSC_OFFSET (0x070)
++#define ISPPRV_CNT_BRT (0x074)
++#define ISPPRV_CSUP (0x078)
++#define ISPPRV_SETUP_YC (0x07C)
++#define ISPPRV_SET_TBL_ADDR (0x080)
++#define ISPPRV_SET_TBL_DATA (0x084)
++#define ISPPRV_CDC_THR0 (0x090)
++#define ISPPRV_CDC_THR1 (ISPPRV_CDC_THR0 + (0x4))
++#define ISPPRV_CDC_THR2 (ISPPRV_CDC_THR0 + (0x4) * 2)
++#define ISPPRV_CDC_THR3 (ISPPRV_CDC_THR0 + (0x4) * 3)
++
++#define ISPPRV_REDGAMMA_TABLE_ADDR 0x0000
++#define ISPPRV_GREENGAMMA_TABLE_ADDR 0x0400
++#define ISPPRV_BLUEGAMMA_TABLE_ADDR 0x0800
++#define ISPPRV_NF_TABLE_ADDR 0x0C00
++#define ISPPRV_YENH_TABLE_ADDR 0x1000
++#define ISPPRV_CFA_TABLE_ADDR 0x1400
++
++#define ISPPRV_MAXOUTPUT_WIDTH 1280
++#define ISPPRV_MAXOUTPUT_WIDTH_ES2 3300
++#define ISPPRV_MAXOUTPUT_WIDTH_3630 4096
++#define ISPRSZ_MIN_OUTPUT 64
++#define ISPRSZ_MAX_OUTPUT 3312
++
++/* Resizer module register offset */
++#define ISPRSZ_PID (0x000)
++#define ISPRSZ_PCR (0x004)
++#define ISPRSZ_CNT (0x008)
++#define ISPRSZ_OUT_SIZE (0x00C)
++#define ISPRSZ_IN_START (0x010)
++#define ISPRSZ_IN_SIZE (0x014)
++#define ISPRSZ_SDR_INADD (0x018)
++#define ISPRSZ_SDR_INOFF (0x01C)
++#define ISPRSZ_SDR_OUTADD (0x020)
++#define ISPRSZ_SDR_OUTOFF (0x024)
++#define ISPRSZ_HFILT10 (0x028)
++#define ISPRSZ_HFILT32 (0x02C)
++#define ISPRSZ_HFILT54 (0x030)
++#define ISPRSZ_HFILT76 (0x034)
++#define ISPRSZ_HFILT98 (0x038)
++#define ISPRSZ_HFILT1110 (0x03C)
++#define ISPRSZ_HFILT1312 (0x040)
++#define ISPRSZ_HFILT1514 (0x044)
++#define ISPRSZ_HFILT1716 (0x048)
++#define ISPRSZ_HFILT1918 (0x04C)
++#define ISPRSZ_HFILT2120 (0x050)
++#define ISPRSZ_HFILT2322 (0x054)
++#define ISPRSZ_HFILT2524 (0x058)
++#define ISPRSZ_HFILT2726 (0x05C)
++#define ISPRSZ_HFILT2928 (0x060)
++#define ISPRSZ_HFILT3130 (0x064)
++#define ISPRSZ_VFILT10 (0x068)
++#define ISPRSZ_VFILT32 (0x06C)
++#define ISPRSZ_VFILT54 (0x070)
++#define ISPRSZ_VFILT76 (0x074)
++#define ISPRSZ_VFILT98 (0x078)
++#define ISPRSZ_VFILT1110 (0x07C)
++#define ISPRSZ_VFILT1312 (0x080)
++#define ISPRSZ_VFILT1514 (0x084)
++#define ISPRSZ_VFILT1716 (0x088)
++#define ISPRSZ_VFILT1918 (0x08C)
++#define ISPRSZ_VFILT2120 (0x090)
++#define ISPRSZ_VFILT2322 (0x094)
++#define ISPRSZ_VFILT2524 (0x098)
++#define ISPRSZ_VFILT2726 (0x09C)
++#define ISPRSZ_VFILT2928 (0x0A0)
++#define ISPRSZ_VFILT3130 (0x0A4)
++#define ISPRSZ_YENH (0x0A8)
++
++/* MMU module registers */
++#define ISPMMU_REVISION (0x000)
++#define ISPMMU_SYSCONFIG (0x010)
++#define ISPMMU_SYSSTATUS (0x014)
++#define ISPMMU_IRQSTATUS (0x018)
++#define ISPMMU_IRQENABLE (0x01C)
++#define ISPMMU_WALKING_ST (0x040)
++#define ISPMMU_CNTL (0x044)
++#define ISPMMU_FAULT_AD (0x048)
++#define ISPMMU_TTB (0x04C)
++#define ISPMMU_LOCK (0x050)
++#define ISPMMU_LD_TLB (0x054)
++#define ISPMMU_CAM (0x058)
++#define ISPMMU_RAM (0x05C)
++#define ISPMMU_GFLUSH (0x060)
++#define ISPMMU_FLUSH_ENTRY (0x064)
++#define ISPMMU_READ_CAM (0x068)
++#define ISPMMU_READ_RAM (0x06c)
++#define ISPMMU_EMU_FAULT_AD (0x070)
++
++#define ISP_INT_CLR 0xFF113F11
++#define ISPPRV_PCR_EN 1
++#define ISPPRV_PCR_BUSY (1 << 1)
++#define ISPPRV_PCR_SOURCE (1 << 2)
++#define ISPPRV_PCR_ONESHOT (1 << 3)
++#define ISPPRV_PCR_WIDTH (1 << 4)
++#define ISPPRV_PCR_INVALAW (1 << 5)
++#define ISPPRV_PCR_DRKFEN (1 << 6)
++#define ISPPRV_PCR_DRKFCAP (1 << 7)
++#define ISPPRV_PCR_HMEDEN (1 << 8)
++#define ISPPRV_PCR_NFEN (1 << 9)
++#define ISPPRV_PCR_CFAEN (1 << 10)
++#define ISPPRV_PCR_CFAFMT_SHIFT 11
++#define ISPPRV_PCR_CFAFMT_MASK 0x7800
++#define ISPPRV_PCR_CFAFMT_BAYER (0 << 11)
++#define ISPPRV_PCR_CFAFMT_SONYVGA (1 << 11)
++#define ISPPRV_PCR_CFAFMT_RGBFOVEON (2 << 11)
++#define ISPPRV_PCR_CFAFMT_DNSPL (3 << 11)
++#define ISPPRV_PCR_CFAFMT_HONEYCOMB (4 << 11)
++#define ISPPRV_PCR_CFAFMT_RRGGBBFOVEON (5 << 11)
++#define ISPPRV_PCR_YNENHEN (1 << 15)
++#define ISPPRV_PCR_SUPEN (1 << 16)
++#define ISPPRV_PCR_YCPOS_SHIFT 17
++#define ISPPRV_PCR_YCPOS_YCrYCb (0 << 17)
++#define ISPPRV_PCR_YCPOS_YCbYCr (1 << 17)
++#define ISPPRV_PCR_YCPOS_CbYCrY (2 << 17)
++#define ISPPRV_PCR_YCPOS_CrYCbY (3 << 17)
++#define ISPPRV_PCR_RSZPORT (1 << 19)
++#define ISPPRV_PCR_SDRPORT (1 << 20)
++#define ISPPRV_PCR_SCOMP_EN (1 << 21)
++#define ISPPRV_PCR_SCOMP_SFT_SHIFT (22)
++#define ISPPRV_PCR_SCOMP_SFT_MASK (~(7 << 22))
++#define ISPPRV_PCR_GAMMA_BYPASS (1 << 26)
++#define ISPPRV_PCR_DCOREN (1 << 27)
++#define ISPPRV_PCR_DCCOUP (1 << 28)
++#define ISPPRV_PCR_DRK_FAIL (1 << 31)
++
++#define ISPPRV_HORZ_INFO_EPH_SHIFT 0
++#define ISPPRV_HORZ_INFO_EPH_MASK 0x3fff
++#define ISPPRV_HORZ_INFO_SPH_SHIFT 16
++#define ISPPRV_HORZ_INFO_SPH_MASK 0x3fff0
++
++#define ISPPRV_VERT_INFO_ELV_SHIFT 0
++#define ISPPRV_VERT_INFO_ELV_MASK 0x3fff
++#define ISPPRV_VERT_INFO_SLV_SHIFT 16
++#define ISPPRV_VERT_INFO_SLV_MASK 0x3fff0
++
++#define ISPPRV_AVE_EVENDIST_SHIFT 2
++#define ISPPRV_AVE_EVENDIST_1 0x0
++#define ISPPRV_AVE_EVENDIST_2 0x1
++#define ISPPRV_AVE_EVENDIST_3 0x2
++#define ISPPRV_AVE_EVENDIST_4 0x3
++#define ISPPRV_AVE_ODDDIST_SHIFT 4
++#define ISPPRV_AVE_ODDDIST_1 0x0
++#define ISPPRV_AVE_ODDDIST_2 0x1
++#define ISPPRV_AVE_ODDDIST_3 0x2
++#define ISPPRV_AVE_ODDDIST_4 0x3
++
++#define ISPPRV_HMED_THRESHOLD_SHIFT 0
++#define ISPPRV_HMED_EVENDIST (1 << 8)
++#define ISPPRV_HMED_ODDDIST (1 << 9)
++
++#define ISPPRV_WBGAIN_COEF0_SHIFT 0
++#define ISPPRV_WBGAIN_COEF1_SHIFT 8
++#define ISPPRV_WBGAIN_COEF2_SHIFT 16
++#define ISPPRV_WBGAIN_COEF3_SHIFT 24
++
++#define ISPPRV_WBSEL_COEF0 0x0
++#define ISPPRV_WBSEL_COEF1 0x1
++#define ISPPRV_WBSEL_COEF2 0x2
++#define ISPPRV_WBSEL_COEF3 0x3
++
++#define ISPPRV_WBSEL_N0_0_SHIFT 0
++#define ISPPRV_WBSEL_N0_1_SHIFT 2
++#define ISPPRV_WBSEL_N0_2_SHIFT 4
++#define ISPPRV_WBSEL_N0_3_SHIFT 6
++#define ISPPRV_WBSEL_N1_0_SHIFT 8
++#define ISPPRV_WBSEL_N1_1_SHIFT 10
++#define ISPPRV_WBSEL_N1_2_SHIFT 12
++#define ISPPRV_WBSEL_N1_3_SHIFT 14
++#define ISPPRV_WBSEL_N2_0_SHIFT 16
++#define ISPPRV_WBSEL_N2_1_SHIFT 18
++#define ISPPRV_WBSEL_N2_2_SHIFT 20
++#define ISPPRV_WBSEL_N2_3_SHIFT 22
++#define ISPPRV_WBSEL_N3_0_SHIFT 24
++#define ISPPRV_WBSEL_N3_1_SHIFT 26
++#define ISPPRV_WBSEL_N3_2_SHIFT 28
++#define ISPPRV_WBSEL_N3_3_SHIFT 30
++
++#define ISPPRV_CFA_GRADTH_HOR_SHIFT 0
++#define ISPPRV_CFA_GRADTH_VER_SHIFT 8
++
++#define ISPPRV_BLKADJOFF_B_SHIFT 0
++#define ISPPRV_BLKADJOFF_G_SHIFT 8
++#define ISPPRV_BLKADJOFF_R_SHIFT 16
++
++#define ISPPRV_RGB_MAT1_MTX_RR_SHIFT 0
++#define ISPPRV_RGB_MAT1_MTX_GR_SHIFT 16
++
++#define ISPPRV_RGB_MAT2_MTX_BR_SHIFT 0
++#define ISPPRV_RGB_MAT2_MTX_RG_SHIFT 16
++
++#define ISPPRV_RGB_MAT3_MTX_GG_SHIFT 0
++#define ISPPRV_RGB_MAT3_MTX_BG_SHIFT 16
++
++#define ISPPRV_RGB_MAT4_MTX_RB_SHIFT 0
++#define ISPPRV_RGB_MAT4_MTX_GB_SHIFT 16
++
++#define ISPPRV_RGB_MAT5_MTX_BB_SHIFT 0
++
++#define ISPPRV_RGB_OFF1_MTX_OFFG_SHIFT 0
++#define ISPPRV_RGB_OFF1_MTX_OFFR_SHIFT 16
++
++#define ISPPRV_RGB_OFF2_MTX_OFFB_SHIFT 0
++
++#define ISPPRV_CSC0_RY_SHIFT 0
++#define ISPPRV_CSC0_GY_SHIFT 10
++#define ISPPRV_CSC0_BY_SHIFT 20
++
++#define ISPPRV_CSC1_RCB_SHIFT 0
++#define ISPPRV_CSC1_GCB_SHIFT 10
++#define ISPPRV_CSC1_BCB_SHIFT 20
++
++#define ISPPRV_CSC2_RCR_SHIFT 0
++#define ISPPRV_CSC2_GCR_SHIFT 10
++#define ISPPRV_CSC2_BCR_SHIFT 20
++
++#define ISPPRV_CSC_OFFSET_CR_SHIFT 0
++#define ISPPRV_CSC_OFFSET_CB_SHIFT 8
++#define ISPPRV_CSC_OFFSET_Y_SHIFT 16
++
++#define ISPPRV_CNT_BRT_BRT_SHIFT 0
++#define ISPPRV_CNT_BRT_CNT_SHIFT 8
++
++#define ISPPRV_CONTRAST_MAX 0x10
++#define ISPPRV_CONTRAST_MIN 0xFF
++#define ISPPRV_BRIGHT_MIN 0x00
++#define ISPPRV_BRIGHT_MAX 0xFF
++
++#define ISPPRV_CSUP_CSUPG_SHIFT 0
++#define ISPPRV_CSUP_THRES_SHIFT 8
++#define ISPPRV_CSUP_HPYF_SHIFT 16
++
++#define ISPPRV_SETUP_YC_MINC_SHIFT 0
++#define ISPPRV_SETUP_YC_MAXC_SHIFT 8
++#define ISPPRV_SETUP_YC_MINY_SHIFT 16
++#define ISPPRV_SETUP_YC_MAXY_SHIFT 24
++#define ISPPRV_YC_MAX 0xFF
++#define ISPPRV_YC_MIN 0x0
++
++/* Define bit fields within selected registers */
++#define ISP_REVISION_SHIFT 0
++
++#define ISP_SYSCONFIG_AUTOIDLE (1 << 0)
++#define ISP_SYSCONFIG_SOFTRESET (1 << 1)
++#define ISP_SYSCONFIG_MIDLEMODE_SHIFT 12
++#define ISP_SYSCONFIG_MIDLEMODE_FORCESTANDBY 0x0
++#define ISP_SYSCONFIG_MIDLEMODE_NOSTANBY 0x1
++#define ISP_SYSCONFIG_MIDLEMODE_SMARTSTANDBY 0x2
++
++#define ISP_SYSSTATUS_RESETDONE 0
++
++#define IRQ0ENABLE_CSIA_IRQ (1 << 0)
++#define IRQ0ENABLE_CSIC_IRQ (1 << 1)
++#define IRQ0ENABLE_CCP2_LCM_IRQ (1 << 3)
++#define IRQ0ENABLE_CCP2_LC0_IRQ (1 << 4)
++#define IRQ0ENABLE_CCP2_LC1_IRQ (1 << 5)
++#define IRQ0ENABLE_CCP2_LC2_IRQ (1 << 6)
++#define IRQ0ENABLE_CCP2_LC3_IRQ (1 << 7)
++#define IRQ0ENABLE_CSIB_IRQ (IRQ0ENABLE_CCP2_LCM_IRQ | \
++ IRQ0ENABLE_CCP2_LC0_IRQ | \
++ IRQ0ENABLE_CCP2_LC1_IRQ | \
++ IRQ0ENABLE_CCP2_LC2_IRQ | \
++ IRQ0ENABLE_CCP2_LC3_IRQ)
++
++#define IRQ0ENABLE_CCDC_VD0_IRQ (1 << 8)
++#define IRQ0ENABLE_CCDC_VD1_IRQ (1 << 9)
++#define IRQ0ENABLE_CCDC_VD2_IRQ (1 << 10)
++#define IRQ0ENABLE_CCDC_ERR_IRQ (1 << 11)
++#define IRQ0ENABLE_H3A_AF_DONE_IRQ (1 << 12)
++#define IRQ0ENABLE_H3A_AWB_DONE_IRQ (1 << 13)
++#define IRQ0ENABLE_HIST_DONE_IRQ (1 << 16)
++#define IRQ0ENABLE_CCDC_LSC_DONE_IRQ (1 << 17)
++#define IRQ0ENABLE_CCDC_LSC_PREF_COMP_IRQ (1 << 18)
++#define IRQ0ENABLE_CCDC_LSC_PREF_ERR_IRQ (1 << 19)
++#define IRQ0ENABLE_PRV_DONE_IRQ (1 << 20)
++#define IRQ0ENABLE_RSZ_DONE_IRQ (1 << 24)
++#define IRQ0ENABLE_OVF_IRQ (1 << 25)
++#define IRQ0ENABLE_PING_IRQ (1 << 26)
++#define IRQ0ENABLE_PONG_IRQ (1 << 27)
++#define IRQ0ENABLE_MMU_ERR_IRQ (1 << 28)
++#define IRQ0ENABLE_OCP_ERR_IRQ (1 << 29)
++#define IRQ0ENABLE_SEC_ERR_IRQ (1 << 30)
++#define IRQ0ENABLE_HS_VS_IRQ (1 << 31)
++
++#define IRQ0STATUS_CSIA_IRQ (1 << 0)
++#define IRQ0STATUS_CSI2C_IRQ (1 << 1)
++#define IRQ0STATUS_CCP2_LCM_IRQ (1 << 3)
++#define IRQ0STATUS_CCP2_LC0_IRQ (1 << 4)
++#define IRQ0STATUS_CSIB_IRQ (IRQ0STATUS_CCP2_LCM_IRQ | \
++ IRQ0STATUS_CCP2_LC0_IRQ)
++
++#define IRQ0STATUS_CSIB_LC1_IRQ (1 << 5)
++#define IRQ0STATUS_CSIB_LC2_IRQ (1 << 6)
++#define IRQ0STATUS_CSIB_LC3_IRQ (1 << 7)
++#define IRQ0STATUS_CCDC_VD0_IRQ (1 << 8)
++#define IRQ0STATUS_CCDC_VD1_IRQ (1 << 9)
++#define IRQ0STATUS_CCDC_VD2_IRQ (1 << 10)
++#define IRQ0STATUS_CCDC_ERR_IRQ (1 << 11)
++#define IRQ0STATUS_H3A_AF_DONE_IRQ (1 << 12)
++#define IRQ0STATUS_H3A_AWB_DONE_IRQ (1 << 13)
++#define IRQ0STATUS_HIST_DONE_IRQ (1 << 16)
++#define IRQ0STATUS_CCDC_LSC_DONE_IRQ (1 << 17)
++#define IRQ0STATUS_CCDC_LSC_PREF_COMP_IRQ (1 << 18)
++#define IRQ0STATUS_CCDC_LSC_PREF_ERR_IRQ (1 << 19)
++#define IRQ0STATUS_PRV_DONE_IRQ (1 << 20)
++#define IRQ0STATUS_RSZ_DONE_IRQ (1 << 24)
++#define IRQ0STATUS_OVF_IRQ (1 << 25)
++#define IRQ0STATUS_PING_IRQ (1 << 26)
++#define IRQ0STATUS_PONG_IRQ (1 << 27)
++#define IRQ0STATUS_MMU_ERR_IRQ (1 << 28)
++#define IRQ0STATUS_OCP_ERR_IRQ (1 << 29)
++#define IRQ0STATUS_SEC_ERR_IRQ (1 << 30)
++#define IRQ0STATUS_HS_VS_IRQ (1 << 31)
++
++#define TCTRL_GRESET_LEN 0
++
++#define TCTRL_PSTRB_REPLAY_DELAY 0
++#define TCTRL_PSTRB_REPLAY_COUNTER_SHIFT 25
++
++#define ISPCTRL_PAR_SER_CLK_SEL_PARALLEL 0x0
++#define ISPCTRL_PAR_SER_CLK_SEL_CSIA 0x1
++#define ISPCTRL_PAR_SER_CLK_SEL_CSIB 0x2
++#define ISPCTRL_PAR_SER_CLK_SEL_CSIC 0x3
++#define ISPCTRL_PAR_SER_CLK_SEL_MASK 0xFFFFFFFC
++
++#define ISPCTRL_PAR_BRIDGE_SHIFT 2
++#define ISPCTRL_PAR_BRIDGE_DISABLE (0x0 << 2)
++#define ISPCTRL_PAR_BRIDGE_LENDIAN (0x2 << 2)
++#define ISPCTRL_PAR_BRIDGE_BENDIAN (0x3 << 2)
++#define ISPCTRL_PAR_BRIDGE_MASK (0x3 << 2)
++
++#define ISPCTRL_PAR_CLK_POL_SHIFT 4
++#define ISPCTRL_PAR_CLK_POL_INV (1 << 4)
++#define ISPCTRL_PING_PONG_EN (1 << 5)
++#define ISPCTRL_SHIFT_SHIFT 6
++#define ISPCTRL_SHIFT_0 (0x0 << 6)
++#define ISPCTRL_SHIFT_2 (0x1 << 6)
++#define ISPCTRL_SHIFT_4 (0x2 << 6)
++#define ISPCTRL_SHIFT_MASK (~(0x3 << 6))
++
++#define ISPCTRL_CCDC_CLK_EN (1 << 8)
++#define ISPCTRL_SCMP_CLK_EN (1 << 9)
++#define ISPCTRL_H3A_CLK_EN (1 << 10)
++#define ISPCTRL_HIST_CLK_EN (1 << 11)
++#define ISPCTRL_PREV_CLK_EN (1 << 12)
++#define ISPCTRL_RSZ_CLK_EN (1 << 13)
++#define ISPCTRL_SYNC_DETECT_SHIFT 14
++#define ISPCTRL_SYNC_DETECT_HSFALL (0x0 << ISPCTRL_SYNC_DETECT_SHIFT)
++#define ISPCTRL_SYNC_DETECT_HSRISE (0x1 << ISPCTRL_SYNC_DETECT_SHIFT)
++#define ISPCTRL_SYNC_DETECT_VSFALL (0x2 << ISPCTRL_SYNC_DETECT_SHIFT)
++#define ISPCTRL_SYNC_DETECT_VSRISE (0x3 << ISPCTRL_SYNC_DETECT_SHIFT)
++#define ISPCTRL_SYNC_DETECT_MASK (0x3 << ISPCTRL_SYNC_DETECT_SHIFT)
++
++#define ISPCTRL_CCDC_RAM_EN (1 << 16)
++#define ISPCTRL_PREV_RAM_EN (1 << 17)
++#define ISPCTRL_SBL_RD_RAM_EN (1 << 18)
++#define ISPCTRL_SBL_WR1_RAM_EN (1 << 19)
++#define ISPCTRL_SBL_WR0_RAM_EN (1 << 20)
++#define ISPCTRL_SBL_AUTOIDLE (1 << 21)
++#define ISPCTRL_SBL_SHARED_WPORTC (1 << 26)
++#define ISPCTRL_SBL_SHARED_RPORTA (1 << 27)
++#define ISPCTRL_SBL_SHARED_RPORTB (1 << 28)
++#define ISPCTRL_JPEG_FLUSH (1 << 30)
++#define ISPCTRL_CCDC_FLUSH (1 << 31)
++
++#define ISPSECURE_SECUREMODE 0
++
++#define ISPTCTRL_CTRL_DIV_LOW 0x0
++#define ISPTCTRL_CTRL_DIV_HIGH 0x1
++#define ISPTCTRL_CTRL_DIV_BYPASS 0x1F
++
++#define ISPTCTRL_CTRL_DIVA_SHIFT 0
++#define ISPTCTRL_CTRL_DIVA_MASK (0x1F << ISPTCTRL_CTRL_DIVA_SHIFT)
++
++#define ISPTCTRL_CTRL_DIVB_SHIFT 5
++#define ISPTCTRL_CTRL_DIVB_MASK (0x1F << ISPTCTRL_CTRL_DIVB_SHIFT)
++
++#define ISPTCTRL_CTRL_DIVC_SHIFT 10
++#define ISPTCTRL_CTRL_DIVC_NOCLOCK (0x0 << 10)
++
++#define ISPTCTRL_CTRL_SHUTEN (1 << 21)
++#define ISPTCTRL_CTRL_PSTRBEN (1 << 22)
++#define ISPTCTRL_CTRL_STRBEN (1 << 23)
++#define ISPTCTRL_CTRL_SHUTPOL (1 << 24)
++#define ISPTCTRL_CTRL_STRBPSTRBPOL (1 << 26)
++
++#define ISPTCTRL_CTRL_INSEL_SHIFT 27
++#define ISPTCTRL_CTRL_INSEL_PARALLEL (0x0 << 27)
++#define ISPTCTRL_CTRL_INSEL_CSIA (0x1 << 27)
++#define ISPTCTRL_CTRL_INSEL_CSIB (0x2 << 27)
++
++#define ISPTCTRL_CTRL_GRESETEn (1 << 29)
++#define ISPTCTRL_CTRL_GRESETPOL (1 << 30)
++#define ISPTCTRL_CTRL_GRESETDIR (1 << 31)
++
++#define ISPTCTRL_FRAME_SHUT_SHIFT 0
++#define ISPTCTRL_FRAME_PSTRB_SHIFT 6
++#define ISPTCTRL_FRAME_STRB_SHIFT 12
++
++#define ISPCCDC_PID_PREV_SHIFT 0
++#define ISPCCDC_PID_CID_SHIFT 8
++#define ISPCCDC_PID_TID_SHIFT 16
++
++#define ISPCCDC_PCR_EN 1
++#define ISPCCDC_PCR_BUSY (1 << 1)
++
++#define ISPCCDC_SYN_MODE_VDHDOUT 0x1
++#define ISPCCDC_SYN_MODE_FLDOUT (1 << 1)
++#define ISPCCDC_SYN_MODE_VDPOL (1 << 2)
++#define ISPCCDC_SYN_MODE_HDPOL (1 << 3)
++#define ISPCCDC_SYN_MODE_FLDPOL (1 << 4)
++#define ISPCCDC_SYN_MODE_EXWEN (1 << 5)
++#define ISPCCDC_SYN_MODE_DATAPOL (1 << 6)
++#define ISPCCDC_SYN_MODE_FLDMODE (1 << 7)
++#define ISPCCDC_SYN_MODE_DATSIZ_MASK 0xFFFFF8FF
++#define ISPCCDC_SYN_MODE_DATSIZ_8_16 (0x0 << 8)
++#define ISPCCDC_SYN_MODE_DATSIZ_12 (0x4 << 8)
++#define ISPCCDC_SYN_MODE_DATSIZ_11 (0x5 << 8)
++#define ISPCCDC_SYN_MODE_DATSIZ_10 (0x6 << 8)
++#define ISPCCDC_SYN_MODE_DATSIZ_8 (0x7 << 8)
++#define ISPCCDC_SYN_MODE_PACK8 (1 << 11)
++#define ISPCCDC_SYN_MODE_INPMOD_MASK 0xFFFFCFFF
++#define ISPCCDC_SYN_MODE_INPMOD_RAW (0 << 12)
++#define ISPCCDC_SYN_MODE_INPMOD_YCBCR16 (1 << 12)
++#define ISPCCDC_SYN_MODE_INPMOD_YCBCR8 (2 << 12)
++#define ISPCCDC_SYN_MODE_LPF (1 << 14)
++#define ISPCCDC_SYN_MODE_FLDSTAT (1 << 15)
++#define ISPCCDC_SYN_MODE_VDHDEN (1 << 16)
++#define ISPCCDC_SYN_MODE_WEN (1 << 17)
++#define ISPCCDC_SYN_MODE_VP2SDR (1 << 18)
++#define ISPCCDC_SYN_MODE_SDR2RSZ (1 << 19)
++
++#define ISPCCDC_HD_VD_WID_VDW_SHIFT 0
++#define ISPCCDC_HD_VD_WID_HDW_SHIFT 16
++
++#define ISPCCDC_PIX_LINES_HLPRF_SHIFT 0
++#define ISPCCDC_PIX_LINES_PPLN_SHIFT 16
++
++#define ISPCCDC_HORZ_INFO_NPH_SHIFT 0
++#define ISPCCDC_HORZ_INFO_NPH_MASK 0xFFFF8000
++#define ISPCCDC_HORZ_INFO_SPH_MASK 0x1000FFFF
++#define ISPCCDC_HORZ_INFO_SPH_SHIFT 16
++
++#define ISPCCDC_VERT_START_SLV0_SHIFT 16
++#define ISPCCDC_VERT_START_SLV0_MASK 0x1000FFFF
++#define ISPCCDC_VERT_START_SLV1_SHIFT 0
++
++#define ISPCCDC_VERT_LINES_NLV_MASK 0xFFFF8000
++#define ISPCCDC_VERT_LINES_NLV_SHIFT 0
++
++#define ISPCCDC_CULLING_CULV_SHIFT 0
++#define ISPCCDC_CULLING_CULHODD_SHIFT 16
++#define ISPCCDC_CULLING_CULHEVN_SHIFT 24
++
++#define ISPCCDC_HSIZE_OFF_SHIFT 0
++
++#define ISPCCDC_SDOFST_FINV (1 << 14)
++#define ISPCCDC_SDOFST_FOFST_1L 0
++#define ISPCCDC_SDOFST_FOFST_4L (3 << 12)
++#define ISPCCDC_SDOFST_LOFST3_SHIFT 0
++#define ISPCCDC_SDOFST_LOFST2_SHIFT 3
++#define ISPCCDC_SDOFST_LOFST1_SHIFT 6
++#define ISPCCDC_SDOFST_LOFST0_SHIFT 9
++#define EVENEVEN 1
++#define ODDEVEN 2
++#define EVENODD 3
++#define ODDODD 4
++
++#define ISPCCDC_CLAMP_OBGAIN_SHIFT 0
++#define ISPCCDC_CLAMP_OBST_SHIFT 10
++#define ISPCCDC_CLAMP_OBSLN_SHIFT 25
++#define ISPCCDC_CLAMP_OBSLEN_SHIFT 28
++#define ISPCCDC_CLAMP_CLAMPEN (1 << 31)
++
++#define ISPCCDC_COLPTN_R_Ye 0x0
++#define ISPCCDC_COLPTN_Gr_Cy 0x1
++#define ISPCCDC_COLPTN_Gb_G 0x2
++#define ISPCCDC_COLPTN_B_Mg 0x3
++#define ISPCCDC_COLPTN_CP0PLC0_SHIFT 0
++#define ISPCCDC_COLPTN_CP0PLC1_SHIFT 2
++#define ISPCCDC_COLPTN_CP0PLC2_SHIFT 4
++#define ISPCCDC_COLPTN_CP0PLC3_SHIFT 6
++#define ISPCCDC_COLPTN_CP1PLC0_SHIFT 8
++#define ISPCCDC_COLPTN_CP1PLC1_SHIFT 10
++#define ISPCCDC_COLPTN_CP1PLC2_SHIFT 12
++#define ISPCCDC_COLPTN_CP1PLC3_SHIFT 14
++#define ISPCCDC_COLPTN_CP2PLC0_SHIFT 16
++#define ISPCCDC_COLPTN_CP2PLC1_SHIFT 18
++#define ISPCCDC_COLPTN_CP2PLC2_SHIFT 20
++#define ISPCCDC_COLPTN_CP2PLC3_SHIFT 22
++#define ISPCCDC_COLPTN_CP3PLC0_SHIFT 24
++#define ISPCCDC_COLPTN_CP3PLC1_SHIFT 26
++#define ISPCCDC_COLPTN_CP3PLC2_SHIFT 28
++#define ISPCCDC_COLPTN_CP3PLC3_SHIFT 30
++
++#define ISPCCDC_BLKCMP_B_MG_SHIFT 0
++#define ISPCCDC_BLKCMP_GB_G_SHIFT 8
++#define ISPCCDC_BLKCMP_GR_CY_SHIFT 16
++#define ISPCCDC_BLKCMP_R_YE_SHIFT 24
++
++#define ISPCCDC_FPC_FPNUM_SHIFT 0
++#define ISPCCDC_FPC_FPCEN (1 << 15)
++#define ISPCCDC_FPC_FPERR (1 << 16)
++
++#define ISPCCDC_VDINT_1_SHIFT 0
++#define ISPCCDC_VDINT_0_SHIFT 16
++#define ISPCCDC_VDINT_0_MASK 0x7FFF
++#define ISPCCDC_VDINT_1_MASK 0x7FFF
++
++#define ISPCCDC_ALAW_GWDI_SHIFT 0
++#define ISPCCDC_ALAW_CCDTBL (1 << 3)
++
++#define ISPCCDC_REC656IF_R656ON 1
++#define ISPCCDC_REC656IF_ECCFVH (1 << 1)
++
++#define ISPCCDC_CFG_BW656 (1 << 5)
++#define ISPCCDC_CFG_FIDMD_SHIFT 6
++#define ISPCCDC_CFG_WENLOG (1 << 8)
++#define ISPCCDC_CFG_WENLOG_AND (0 << 8)
++#define ISPCCDC_CFG_WENLOG_OR (1 << 8)
++#define ISPCCDC_CFG_Y8POS (1 << 11)
++#define ISPCCDC_CFG_BSWD (1 << 12)
++#define ISPCCDC_CFG_MSBINVI (1 << 13)
++#define ISPCCDC_CFG_VDLC (1 << 15)
++
++#define ISPCCDC_FMTCFG_FMTEN 0x1
++#define ISPCCDC_FMTCFG_LNALT (1 << 1)
++#define ISPCCDC_FMTCFG_LNUM_SHIFT 2
++#define ISPCCDC_FMTCFG_PLEN_ODD_SHIFT 4
++#define ISPCCDC_FMTCFG_PLEN_EVEN_SHIFT 8
++#define ISPCCDC_FMTCFG_VPIN_MASK 0xFFFF8000
++#define ISPCCDC_FMTCFG_VPIN_12_3 (0x3 << 12)
++#define ISPCCDC_FMTCFG_VPIN_11_2 (0x4 << 12)
++#define ISPCCDC_FMTCFG_VPIN_10_1 (0x5 << 12)
++#define ISPCCDC_FMTCFG_VPIN_9_0 (0x6 << 12)
++#define ISPCCDC_FMTCFG_VPEN (1 << 15)
++
++#define ISPCCDC_FMTCFG_VPIF_FRQ_MASK 0xFFF8FFFF
++#define ISPCCDC_FMTCFG_VPIF_FRQ_SHIFT 16
++#define ISPCCDC_FMTCFG_VPIF_FRQ_BY2 (0x0 << 16)
++#define ISPCCDC_FMTCFG_VPIF_FRQ_BY3 (0x1 << 16)
++#define ISPCCDC_FMTCFG_VPIF_FRQ_BY4 (0x2 << 16)
++#define ISPCCDC_FMTCFG_VPIF_FRQ_BY5 (0x3 << 16)
++#define ISPCCDC_FMTCFG_VPIF_FRQ_BY6 (0x4 << 16)
++
++#define ISPCCDC_FMT_HORZ_FMTLNH_SHIFT 0
++#define ISPCCDC_FMT_HORZ_FMTSPH_SHIFT 16
++
++#define ISPCCDC_FMT_VERT_FMTLNV_SHIFT 0
++#define ISPCCDC_FMT_VERT_FMTSLV_SHIFT 16
++
++#define ISPCCDC_FMT_HORZ_FMTSPH_MASK 0x1FFF0000
++#define ISPCCDC_FMT_HORZ_FMTLNH_MASK 0x1FFF
++
++#define ISPCCDC_FMT_VERT_FMTSLV_MASK 0x1FFF0000
++#define ISPCCDC_FMT_VERT_FMTLNV_MASK 0x1FFF
++
++#define ISPCCDC_VP_OUT_HORZ_ST_SHIFT 0
++#define ISPCCDC_VP_OUT_HORZ_NUM_SHIFT 4
++#define ISPCCDC_VP_OUT_VERT_NUM_SHIFT 17
++
++#define ISPRSZ_PID_PREV_SHIFT 0
++#define ISPRSZ_PID_CID_SHIFT 8
++#define ISPRSZ_PID_TID_SHIFT 16
++
++#define ISPRSZ_PCR_ENABLE (1 << 0)
++#define ISPRSZ_PCR_BUSY (1 << 1)
++#define ISPRSZ_PCR_ONESHOT (1 << 2)
++
++#define ISPRSZ_CNT_HRSZ_SHIFT 0
++#define ISPRSZ_CNT_HRSZ_MASK \
++ (0x3FF << ISPRSZ_CNT_HRSZ_SHIFT)
++#define ISPRSZ_CNT_VRSZ_SHIFT 10
++#define ISPRSZ_CNT_VRSZ_MASK \
++ (0x3FF << ISPRSZ_CNT_VRSZ_SHIFT)
++#define ISPRSZ_CNT_HSTPH_SHIFT 20
++#define ISPRSZ_CNT_HSTPH_MASK (0x7 << ISPRSZ_CNT_HSTPH_SHIFT)
++#define ISPRSZ_CNT_VSTPH_SHIFT 23
++#define ISPRSZ_CNT_VSTPH_MASK (0x7 << ISPRSZ_CNT_VSTPH_SHIFT)
++#define ISPRSZ_CNT_YCPOS (1 << 26)
++#define ISPRSZ_CNT_INPTYP (1 << 27)
++#define ISPRSZ_CNT_INPSRC (1 << 28)
++#define ISPRSZ_CNT_CBILIN (1 << 29)
++
++#define ISPRSZ_OUT_SIZE_HORZ_SHIFT 0
++#define ISPRSZ_OUT_SIZE_HORZ_MASK \
++ (0xFFF << ISPRSZ_OUT_SIZE_HORZ_SHIFT)
++#define ISPRSZ_OUT_SIZE_VERT_SHIFT 16
++#define ISPRSZ_OUT_SIZE_VERT_MASK \
++ (0xFFF << ISPRSZ_OUT_SIZE_VERT_SHIFT)
++
++#define ISPRSZ_IN_START_HORZ_ST_SHIFT 0
++#define ISPRSZ_IN_START_HORZ_ST_MASK \
++ (0x1FFF << ISPRSZ_IN_START_HORZ_ST_SHIFT)
++#define ISPRSZ_IN_START_VERT_ST_SHIFT 16
++#define ISPRSZ_IN_START_VERT_ST_MASK \
++ (0x1FFF << ISPRSZ_IN_START_VERT_ST_SHIFT)
++
++#define ISPRSZ_IN_SIZE_HORZ_SHIFT 0
++#define ISPRSZ_IN_SIZE_HORZ_MASK \
++ (0x1FFF << ISPRSZ_IN_SIZE_HORZ_SHIFT)
++#define ISPRSZ_IN_SIZE_VERT_SHIFT 16
++#define ISPRSZ_IN_SIZE_VERT_MASK \
++ (0x1FFF << ISPRSZ_IN_SIZE_VERT_SHIFT)
++
++#define ISPRSZ_SDR_INADD_ADDR_SHIFT 0
++#define ISPRSZ_SDR_INADD_ADDR_MASK 0xFFFFFFFF
++
++#define ISPRSZ_SDR_INOFF_OFFSET_SHIFT 0
++#define ISPRSZ_SDR_INOFF_OFFSET_MASK \
++ (0xFFFF << ISPRSZ_SDR_INOFF_OFFSET_SHIFT)
++
++#define ISPRSZ_SDR_OUTADD_ADDR_SHIFT 0
++#define ISPRSZ_SDR_OUTADD_ADDR_MASK 0xFFFFFFFF
++
++
++#define ISPRSZ_SDR_OUTOFF_OFFSET_SHIFT 0
++#define ISPRSZ_SDR_OUTOFF_OFFSET_MASK \
++ (0xFFFF << ISPRSZ_SDR_OUTOFF_OFFSET_SHIFT)
++
++#define ISPRSZ_HFILT_COEF0_SHIFT 0
++#define ISPRSZ_HFILT_COEF0_MASK \
++ (0x3FF << ISPRSZ_HFILT_COEF0_SHIFT)
++#define ISPRSZ_HFILT_COEF1_SHIFT 16
++#define ISPRSZ_HFILT_COEF1_MASK \
++ (0x3FF << ISPRSZ_HFILT_COEF1_SHIFT)
++
++#define ISPRSZ_HFILT32_COEF2_SHIFT 0
++#define ISPRSZ_HFILT32_COEF2_MASK 0x3FF
++#define ISPRSZ_HFILT32_COEF3_SHIFT 16
++#define ISPRSZ_HFILT32_COEF3_MASK 0x3FF0000
++
++#define ISPRSZ_HFILT54_COEF4_SHIFT 0
++#define ISPRSZ_HFILT54_COEF4_MASK 0x3FF
++#define ISPRSZ_HFILT54_COEF5_SHIFT 16
++#define ISPRSZ_HFILT54_COEF5_MASK 0x3FF0000
++
++#define ISPRSZ_HFILT76_COEFF6_SHIFT 0
++#define ISPRSZ_HFILT76_COEFF6_MASK 0x3FF
++#define ISPRSZ_HFILT76_COEFF7_SHIFT 16
++#define ISPRSZ_HFILT76_COEFF7_MASK 0x3FF0000
++
++#define ISPRSZ_HFILT98_COEFF8_SHIFT 0
++#define ISPRSZ_HFILT98_COEFF8_MASK 0x3FF
++#define ISPRSZ_HFILT98_COEFF9_SHIFT 16
++#define ISPRSZ_HFILT98_COEFF9_MASK 0x3FF0000
++
++#define ISPRSZ_HFILT1110_COEF10_SHIFT 0
++#define ISPRSZ_HFILT1110_COEF10_MASK 0x3FF
++#define ISPRSZ_HFILT1110_COEF11_SHIFT 16
++#define ISPRSZ_HFILT1110_COEF11_MASK 0x3FF0000
++
++#define ISPRSZ_HFILT1312_COEFF12_SHIFT 0
++#define ISPRSZ_HFILT1312_COEFF12_MASK 0x3FF
++#define ISPRSZ_HFILT1312_COEFF13_SHIFT 16
++#define ISPRSZ_HFILT1312_COEFF13_MASK 0x3FF0000
++
++#define ISPRSZ_HFILT1514_COEFF14_SHIFT 0
++#define ISPRSZ_HFILT1514_COEFF14_MASK 0x3FF
++#define ISPRSZ_HFILT1514_COEFF15_SHIFT 16
++#define ISPRSZ_HFILT1514_COEFF15_MASK 0x3FF0000
++
++#define ISPRSZ_HFILT1716_COEF16_SHIFT 0
++#define ISPRSZ_HFILT1716_COEF16_MASK 0x3FF
++#define ISPRSZ_HFILT1716_COEF17_SHIFT 16
++#define ISPRSZ_HFILT1716_COEF17_MASK 0x3FF0000
++
++#define ISPRSZ_HFILT1918_COEF18_SHIFT 0
++#define ISPRSZ_HFILT1918_COEF18_MASK 0x3FF
++#define ISPRSZ_HFILT1918_COEF19_SHIFT 16
++#define ISPRSZ_HFILT1918_COEF19_MASK 0x3FF0000
++
++#define ISPRSZ_HFILT2120_COEF20_SHIFT 0
++#define ISPRSZ_HFILT2120_COEF20_MASK 0x3FF
++#define ISPRSZ_HFILT2120_COEF21_SHIFT 16
++#define ISPRSZ_HFILT2120_COEF21_MASK 0x3FF0000
++
++#define ISPRSZ_HFILT2322_COEF22_SHIFT 0
++#define ISPRSZ_HFILT2322_COEF22_MASK 0x3FF
++#define ISPRSZ_HFILT2322_COEF23_SHIFT 16
++#define ISPRSZ_HFILT2322_COEF23_MASK 0x3FF0000
++
++#define ISPRSZ_HFILT2524_COEF24_SHIFT 0
++#define ISPRSZ_HFILT2524_COEF24_MASK 0x3FF
++#define ISPRSZ_HFILT2524_COEF25_SHIFT 16
++#define ISPRSZ_HFILT2524_COEF25_MASK 0x3FF0000
++
++#define ISPRSZ_HFILT2726_COEF26_SHIFT 0
++#define ISPRSZ_HFILT2726_COEF26_MASK 0x3FF
++#define ISPRSZ_HFILT2726_COEF27_SHIFT 16
++#define ISPRSZ_HFILT2726_COEF27_MASK 0x3FF0000
++
++#define ISPRSZ_HFILT2928_COEF28_SHIFT 0
++#define ISPRSZ_HFILT2928_COEF28_MASK 0x3FF
++#define ISPRSZ_HFILT2928_COEF29_SHIFT 16
++#define ISPRSZ_HFILT2928_COEF29_MASK 0x3FF0000
++
++#define ISPRSZ_HFILT3130_COEF30_SHIFT 0
++#define ISPRSZ_HFILT3130_COEF30_MASK 0x3FF
++#define ISPRSZ_HFILT3130_COEF31_SHIFT 16
++#define ISPRSZ_HFILT3130_COEF31_MASK 0x3FF0000
++
++#define ISPRSZ_VFILT_COEF0_SHIFT 0
++#define ISPRSZ_VFILT_COEF0_MASK \
++ (0x3FF << ISPRSZ_VFILT_COEF0_SHIFT)
++#define ISPRSZ_VFILT_COEF1_SHIFT 16
++#define ISPRSZ_VFILT_COEF1_MASK \
++ (0x3FF << ISPRSZ_VFILT_COEF1_SHIFT)
++
++#define ISPRSZ_VFILT10_COEF0_SHIFT 0
++#define ISPRSZ_VFILT10_COEF0_MASK 0x3FF
++#define ISPRSZ_VFILT10_COEF1_SHIFT 16
++#define ISPRSZ_VFILT10_COEF1_MASK 0x3FF0000
++
++#define ISPRSZ_VFILT32_COEF2_SHIFT 0
++#define ISPRSZ_VFILT32_COEF2_MASK 0x3FF
++#define ISPRSZ_VFILT32_COEF3_SHIFT 16
++#define ISPRSZ_VFILT32_COEF3_MASK 0x3FF0000
++
++#define ISPRSZ_VFILT54_COEF4_SHIFT 0
++#define ISPRSZ_VFILT54_COEF4_MASK 0x3FF
++#define ISPRSZ_VFILT54_COEF5_SHIFT 16
++#define ISPRSZ_VFILT54_COEF5_MASK 0x3FF0000
++
++#define ISPRSZ_VFILT76_COEFF6_SHIFT 0
++#define ISPRSZ_VFILT76_COEFF6_MASK 0x3FF
++#define ISPRSZ_VFILT76_COEFF7_SHIFT 16
++#define ISPRSZ_VFILT76_COEFF7_MASK 0x3FF0000
++
++#define ISPRSZ_VFILT98_COEFF8_SHIFT 0
++#define ISPRSZ_VFILT98_COEFF8_MASK 0x3FF
++#define ISPRSZ_VFILT98_COEFF9_SHIFT 16
++#define ISPRSZ_VFILT98_COEFF9_MASK 0x3FF0000
++
++#define ISPRSZ_VFILT1110_COEF10_SHIFT 0
++#define ISPRSZ_VFILT1110_COEF10_MASK 0x3FF
++#define ISPRSZ_VFILT1110_COEF11_SHIFT 16
++#define ISPRSZ_VFILT1110_COEF11_MASK 0x3FF0000
++
++#define ISPRSZ_VFILT1312_COEFF12_SHIFT 0
++#define ISPRSZ_VFILT1312_COEFF12_MASK 0x3FF
++#define ISPRSZ_VFILT1312_COEFF13_SHIFT 16
++#define ISPRSZ_VFILT1312_COEFF13_MASK 0x3FF0000
++
++#define ISPRSZ_VFILT1514_COEFF14_SHIFT 0
++#define ISPRSZ_VFILT1514_COEFF14_MASK 0x3FF
++#define ISPRSZ_VFILT1514_COEFF15_SHIFT 16
++#define ISPRSZ_VFILT1514_COEFF15_MASK 0x3FF0000
++
++#define ISPRSZ_VFILT1716_COEF16_SHIFT 0
++#define ISPRSZ_VFILT1716_COEF16_MASK 0x3FF
++#define ISPRSZ_VFILT1716_COEF17_SHIFT 16
++#define ISPRSZ_VFILT1716_COEF17_MASK 0x3FF0000
++
++#define ISPRSZ_VFILT1918_COEF18_SHIFT 0
++#define ISPRSZ_VFILT1918_COEF18_MASK 0x3FF
++#define ISPRSZ_VFILT1918_COEF19_SHIFT 16
++#define ISPRSZ_VFILT1918_COEF19_MASK 0x3FF0000
++
++#define ISPRSZ_VFILT2120_COEF20_SHIFT 0
++#define ISPRSZ_VFILT2120_COEF20_MASK 0x3FF
++#define ISPRSZ_VFILT2120_COEF21_SHIFT 16
++#define ISPRSZ_VFILT2120_COEF21_MASK 0x3FF0000
++
++#define ISPRSZ_VFILT2322_COEF22_SHIFT 0
++#define ISPRSZ_VFILT2322_COEF22_MASK 0x3FF
++#define ISPRSZ_VFILT2322_COEF23_SHIFT 16
++#define ISPRSZ_VFILT2322_COEF23_MASK 0x3FF0000
++
++#define ISPRSZ_VFILT2524_COEF24_SHIFT 0
++#define ISPRSZ_VFILT2524_COEF24_MASK 0x3FF
++#define ISPRSZ_VFILT2524_COEF25_SHIFT 16
++#define ISPRSZ_VFILT2524_COEF25_MASK 0x3FF0000
++
++#define ISPRSZ_VFILT2726_COEF26_SHIFT 0
++#define ISPRSZ_VFILT2726_COEF26_MASK 0x3FF
++#define ISPRSZ_VFILT2726_COEF27_SHIFT 16
++#define ISPRSZ_VFILT2726_COEF27_MASK 0x3FF0000
++
++#define ISPRSZ_VFILT2928_COEF28_SHIFT 0
++#define ISPRSZ_VFILT2928_COEF28_MASK 0x3FF
++#define ISPRSZ_VFILT2928_COEF29_SHIFT 16
++#define ISPRSZ_VFILT2928_COEF29_MASK 0x3FF0000
++
++#define ISPRSZ_VFILT3130_COEF30_SHIFT 0
++#define ISPRSZ_VFILT3130_COEF30_MASK 0x3FF
++#define ISPRSZ_VFILT3130_COEF31_SHIFT 16
++#define ISPRSZ_VFILT3130_COEF31_MASK 0x3FF0000
++
++#define ISPRSZ_YENH_CORE_SHIFT 0
++#define ISPRSZ_YENH_CORE_MASK \
++ (0xFF << ISPRSZ_YENH_CORE_SHIFT)
++#define ISPRSZ_YENH_SLOP_SHIFT 8
++#define ISPRSZ_YENH_SLOP_MASK \
++ (0xF << ISPRSZ_YENH_SLOP_SHIFT)
++#define ISPRSZ_YENH_GAIN_SHIFT 12
++#define ISPRSZ_YENH_GAIN_MASK \
++ (0xF << ISPRSZ_YENH_GAIN_SHIFT)
++#define ISPRSZ_YENH_ALGO_SHIFT 16
++#define ISPRSZ_YENH_ALGO_MASK \
++ (0x3 << ISPRSZ_YENH_ALGO_SHIFT)
++
++#define ISPH3A_PCR_AEW_ALAW_EN_SHIFT 1
++#define ISPH3A_PCR_AF_MED_TH_SHIFT 3
++#define ISPH3A_PCR_AF_RGBPOS_SHIFT 11
++#define ISPH3A_PCR_AEW_AVE2LMT_SHIFT 22
++#define ISPH3A_PCR_AEW_AVE2LMT_MASK 0xFFC00000
++#define ISPH3A_PCR_BUSYAF (1 << 15)
++#define ISPH3A_PCR_BUSYAEAWB (1 << 18)
++
++#define ISPH3A_AEWWIN1_WINHC_SHIFT 0
++#define ISPH3A_AEWWIN1_WINHC_MASK 0x3F
++#define ISPH3A_AEWWIN1_WINVC_SHIFT 6
++#define ISPH3A_AEWWIN1_WINVC_MASK 0x1FC0
++#define ISPH3A_AEWWIN1_WINW_SHIFT 13
++#define ISPH3A_AEWWIN1_WINW_MASK 0xFE000
++#define ISPH3A_AEWWIN1_WINH_SHIFT 24
++#define ISPH3A_AEWWIN1_WINH_MASK 0x7F000000
++
++#define ISPH3A_AEWINSTART_WINSH_SHIFT 0
++#define ISPH3A_AEWINSTART_WINSH_MASK 0x0FFF
++#define ISPH3A_AEWINSTART_WINSV_SHIFT 16
++#define ISPH3A_AEWINSTART_WINSV_MASK 0x0FFF0000
++
++#define ISPH3A_AEWINBLK_WINH_SHIFT 0
++#define ISPH3A_AEWINBLK_WINH_MASK 0x7F
++#define ISPH3A_AEWINBLK_WINSV_SHIFT 16
++#define ISPH3A_AEWINBLK_WINSV_MASK 0x0FFF0000
++
++#define ISPH3A_AEWSUBWIN_AEWINCH_SHIFT 0
++#define ISPH3A_AEWSUBWIN_AEWINCH_MASK 0x0F
++#define ISPH3A_AEWSUBWIN_AEWINCV_SHIFT 8
++#define ISPH3A_AEWSUBWIN_AEWINCV_MASK 0x0F00
++
++#define ISPHIST_PCR_ENABLE_SHIFT 0
++#define ISPHIST_PCR_ENABLE_MASK 0x01
++#define ISPHIST_PCR_ENABLE (1 << ISPHIST_PCR_ENABLE_SHIFT)
++#define ISPHIST_PCR_BUSY 0x02
++
++#define ISPHIST_CNT_DATASIZE_SHIFT 8
++#define ISPHIST_CNT_DATASIZE_MASK 0x0100
++#define ISPHIST_CNT_CLEAR_SHIFT 7
++#define ISPHIST_CNT_CLEAR_MASK 0x080
++#define ISPHIST_CNT_CLEAR (1 << ISPHIST_CNT_CLEAR_SHIFT)
++#define ISPHIST_CNT_CFA_SHIFT 6
++#define ISPHIST_CNT_CFA_MASK 0x040
++#define ISPHIST_CNT_BINS_SHIFT 4
++#define ISPHIST_CNT_BINS_MASK 0x030
++#define ISPHIST_CNT_SOURCE_SHIFT 3
++#define ISPHIST_CNT_SOURCE_MASK 0x08
++#define ISPHIST_CNT_SHIFT_SHIFT 0
++#define ISPHIST_CNT_SHIFT_MASK 0x07
++
++#define ISPHIST_WB_GAIN_WG00_SHIFT 24
++#define ISPHIST_WB_GAIN_WG00_MASK 0xFF000000
++#define ISPHIST_WB_GAIN_WG01_SHIFT 16
++#define ISPHIST_WB_GAIN_WG01_MASK 0xFF0000
++#define ISPHIST_WB_GAIN_WG02_SHIFT 8
++#define ISPHIST_WB_GAIN_WG02_MASK 0xFF00
++#define ISPHIST_WB_GAIN_WG03_SHIFT 0
++#define ISPHIST_WB_GAIN_WG03_MASK 0xFF
++
++#define ISPHIST_REG_START_END_MASK 0x3FFF
++#define ISPHIST_REG_START_SHIFT 16
++#define ISPHIST_REG_END_SHIFT 0
++#define ISPHIST_REG_START_MASK (ISPHIST_REG_START_END_MASK << \
++ ISPHIST_REG_START_SHIFT)
++#define ISPHIST_REG_END_MASK (ISPHIST_REG_START_END_MASK << \
++ ISPHIST_REG_END_SHIFT)
++
++#define ISPHIST_REG_MASK (ISPHIST_REG_START_MASK | \
++ ISPHIST_REG_END_MASK)
++
++#define ISPHIST_ADDR_SHIFT 0
++#define ISPHIST_ADDR_MASK 0x3FF
++
++#define ISPHIST_DATA_SHIFT 0
++#define ISPHIST_DATA_MASK 0xFFFFF
++
++#define ISPHIST_RADD_SHIFT 0
++#define ISPHIST_RADD_MASK 0xFFFFFFFF
++
++#define ISPHIST_RADD_OFF_SHIFT 0
++#define ISPHIST_RADD_OFF_MASK 0xFFFF
++
++#define ISPHIST_HV_INFO_HSIZE_SHIFT 16
++#define ISPHIST_HV_INFO_HSIZE_MASK 0x3FFF0000
++#define ISPHIST_HV_INFO_VSIZE_SHIFT 0
++#define ISPHIST_HV_INFO_VSIZE_MASK 0x3FFF
++
++#define ISPHIST_HV_INFO_MASK 0x3FFF3FFF
++
++#define ISPCCDC_LSC_ENABLE 1
++#define ISPCCDC_LSC_BUSY (1 << 7)
++#define ISPCCDC_LSC_GAIN_MODE_N_MASK 0x700
++#define ISPCCDC_LSC_GAIN_MODE_N_SHIFT 8
++#define ISPCCDC_LSC_GAIN_MODE_M_MASK 0x3800
++#define ISPCCDC_LSC_GAIN_MODE_M_SHIFT 12
++#define ISPCCDC_LSC_GAIN_FORMAT_MASK 0xE
++#define ISPCCDC_LSC_GAIN_FORMAT_SHIFT 1
++#define ISPCCDC_LSC_AFTER_REFORMATTER_MASK (1<<6)
++
++#define ISPCCDC_LSC_INITIAL_X_MASK 0x3F
++#define ISPCCDC_LSC_INITIAL_X_SHIFT 0
++#define ISPCCDC_LSC_INITIAL_Y_MASK 0x3F0000
++#define ISPCCDC_LSC_INITIAL_Y_SHIFT 16
++
++#define ISPMMU_REVISION_REV_MINOR_MASK 0xF
++#define ISPMMU_REVISION_REV_MAJOR_SHIFT 0x4
++
++#define IRQENABLE_MULTIHITFAULT (1<<4)
++#define IRQENABLE_TWFAULT (1<<3)
++#define IRQENABLE_EMUMISS (1<<2)
++#define IRQENABLE_TRANSLNFAULT (1<<1)
++#define IRQENABLE_TLBMISS (1)
++
++#define ISPMMU_MMUCNTL_MMU_EN (1<<1)
++#define ISPMMU_MMUCNTL_TWL_EN (1<<2)
++#define ISPMMU_MMUCNTL_EMUTLBUPDATE (1<<3)
++#define ISPMMU_AUTOIDLE 0x1
++#define ISPMMU_SIDLEMODE_FORCEIDLE 0
++#define ISPMMU_SIDLEMODE_NOIDLE 1
++#define ISPMMU_SIDLEMODE_SMARTIDLE 2
++#define ISPMMU_SIDLEMODE_SHIFT 3
++
++#define ISPCSI1_AUTOIDLE 0x1
++#define ISPCSI1_MIDLEMODE_SHIFT 12
++#define ISPCSI1_MIDLEMODE_FORCESTANDBY 0x0
++#define ISPCSI1_MIDLEMODE_NOSTANDBY 0x1
++#define ISPCSI1_MIDLEMODE_SMARTSTANDBY 0x2
++
++/* CSI2 receiver registers (ES2.0) */
++#define ISPCSI2_REVISION (0x000)
++#define ISPCSI2_SYSCONFIG (0x010)
++#define ISPCSI2_SYSCONFIG_MSTANDBY_MODE_SHIFT 12
++#define ISPCSI2_SYSCONFIG_MSTANDBY_MODE_MASK \
++ (0x3 << ISPCSI2_SYSCONFIG_MSTANDBY_MODE_SHIFT)
++#define ISPCSI2_SYSCONFIG_MSTANDBY_MODE_FORCE \
++ (0x0 << ISPCSI2_SYSCONFIG_MSTANDBY_MODE_SHIFT)
++#define ISPCSI2_SYSCONFIG_MSTANDBY_MODE_NO \
++ (0x1 << ISPCSI2_SYSCONFIG_MSTANDBY_MODE_SHIFT)
++#define ISPCSI2_SYSCONFIG_MSTANDBY_MODE_SMART \
++ (0x2 << ISPCSI2_SYSCONFIG_MSTANDBY_MODE_SHIFT)
++#define ISPCSI2_SYSCONFIG_SOFT_RESET_SHIFT 1
++#define ISPCSI2_SYSCONFIG_SOFT_RESET_MASK \
++ (0x1 << ISPCSI2_SYSCONFIG_SOFT_RESET_SHIFT)
++#define ISPCSI2_SYSCONFIG_SOFT_RESET_NORMAL \
++ (0x0 << ISPCSI2_SYSCONFIG_SOFT_RESET_SHIFT)
++#define ISPCSI2_SYSCONFIG_SOFT_RESET_RESET \
++ (0x1 << ISPCSI2_SYSCONFIG_SOFT_RESET_SHIFT)
++#define ISPCSI2_SYSCONFIG_AUTO_IDLE_SHIFT 0
++#define ISPCSI2_SYSCONFIG_AUTO_IDLE_MASK \
++ (0x1 << ISPCSI2_SYSCONFIG_AUTO_IDLE_SHIFT)
++#define ISPCSI2_SYSCONFIG_AUTO_IDLE_FREE \
++ (0x0 << ISPCSI2_SYSCONFIG_AUTO_IDLE_SHIFT)
++#define ISPCSI2_SYSCONFIG_AUTO_IDLE_AUTO \
++ (0x1 << ISPCSI2_SYSCONFIG_AUTO_IDLE_SHIFT)
++#define ISPCSI2_SYSSTATUS (0x014)
++#define ISPCSI2_SYSSTATUS_RESET_DONE_SHIFT 0
++#define ISPCSI2_SYSSTATUS_RESET_DONE_MASK \
++ (0x1 << ISPCSI2_SYSSTATUS_RESET_DONE_SHIFT)
++#define ISPCSI2_SYSSTATUS_RESET_DONE_ONGOING \
++ (0x0 << ISPCSI2_SYSSTATUS_RESET_DONE_SHIFT)
++#define ISPCSI2_SYSSTATUS_RESET_DONE_DONE \
++ (0x1 << ISPCSI2_SYSSTATUS_RESET_DONE_SHIFT)
++#define ISPCSI2_IRQSTATUS (0x018)
++#define ISPCSI2_IRQSTATUS_OCP_ERR_IRQ (1 << 14)
++#define ISPCSI2_IRQSTATUS_SHORT_PACKET_IRQ (1 << 13)
++#define ISPCSI2_IRQSTATUS_ECC_CORRECTION_IRQ (1 << 12)
++#define ISPCSI2_IRQSTATUS_ECC_NO_CORRECTION_IRQ (1 << 11)
++#define ISPCSI2_IRQSTATUS_COMPLEXIO2_ERR_IRQ (1 << 10)
++#define ISPCSI2_IRQSTATUS_COMPLEXIO1_ERR_IRQ (1 << 9)
++#define ISPCSI2_IRQSTATUS_FIFO_OVF_IRQ (1 << 8)
++#define ISPCSI2_IRQSTATUS_CONTEXT(n) (1 << (n))
++
++#define ISPCSI2_IRQENABLE (0x01C)
++#define ISPCSI2_CTRL (0x040)
++#define ISPCSI2_CTRL_VP_CLK_EN_SHIFT 15
++#define ISPCSI2_CTRL_VP_CLK_EN_MASK (0x1 << ISPCSI2_CTRL_VP_CLK_EN_SHIFT)
++#define ISPCSI2_CTRL_VP_CLK_EN_DISABLE (0x0 << ISPCSI2_CTRL_VP_CLK_EN_SHIFT)
++#define ISPCSI2_CTRL_VP_CLK_EN_ENABLE (0x1 << ISPCSI2_CTRL_VP_CLK_EN_SHIFT)
++
++#define ISPCSI2_CTRL_VP_ONLY_EN_SHIFT 11
++#define ISPCSI2_CTRL_VP_ONLY_EN_MASK (0x1 << ISPCSI2_CTRL_VP_ONLY_EN_SHIFT)
++#define ISPCSI2_CTRL_VP_ONLY_EN_DISABLE (0x0 << ISPCSI2_CTRL_VP_ONLY_EN_SHIFT)
++#define ISPCSI2_CTRL_VP_ONLY_EN_ENABLE (0x1 << ISPCSI2_CTRL_VP_ONLY_EN_SHIFT)
++
++#define ISPCSI2_CTRL_VP_OUT_CTRL_SHIFT 8
++#define ISPCSI2_CTRL_VP_OUT_CTRL_MASK (0x3 << \
++ ISPCSI2_CTRL_VP_OUT_CTRL_SHIFT)
++#define ISPCSI2_CTRL_VP_OUT_CTRL_DISABLE (0x0 << \
++ ISPCSI2_CTRL_VP_OUT_CTRL_SHIFT)
++#define ISPCSI2_CTRL_VP_OUT_CTRL_DIV2 (0x1 << \
++ ISPCSI2_CTRL_VP_OUT_CTRL_SHIFT)
++#define ISPCSI2_CTRL_VP_OUT_CTRL_DIV3 (0x2 << \
++ ISPCSI2_CTRL_VP_OUT_CTRL_SHIFT)
++#define ISPCSI2_CTRL_VP_OUT_CTRL_DIV4 (0x3 << \
++ ISPCSI2_CTRL_VP_OUT_CTRL_SHIFT)
++
++#define ISPCSI2_CTRL_DBG_EN_SHIFT 7
++#define ISPCSI2_CTRL_DBG_EN_MASK (0x1 << ISPCSI2_CTRL_DBG_EN_SHIFT)
++#define ISPCSI2_CTRL_DBG_EN_DISABLE (0x0 << ISPCSI2_CTRL_DBG_EN_SHIFT)
++#define ISPCSI2_CTRL_DBG_EN_ENABLE (0x1 << ISPCSI2_CTRL_DBG_EN_SHIFT)
++
++#define ISPCSI2_CTRL_BURST_SIZE_SHIFT 5
++#define ISPCSI2_CTRL_BURST_SIZE_MASK (0x3 << \
++ ISPCSI2_CTRL_BURST_SIZE_SHIFT)
++#define ISPCSI2_CTRL_BURST_SIZE_MYSTERY_VAL (0x2 << \
++ ISPCSI2_CTRL_BURST_SIZE_SHIFT)
++
++#define ISPCSI2_CTRL_FRAME_SHIFT 3
++#define ISPCSI2_CTRL_FRAME_MASK (0x1 << ISPCSI2_CTRL_FRAME_SHIFT)
++#define ISPCSI2_CTRL_FRAME_DISABLE_IMM (0x0 << ISPCSI2_CTRL_FRAME_SHIFT)
++#define ISPCSI2_CTRL_FRAME_DISABLE_FEC (0x1 << ISPCSI2_CTRL_FRAME_SHIFT)
++
++#define ISPCSI2_CTRL_ECC_EN_SHIFT 2
++#define ISPCSI2_CTRL_ECC_EN_MASK (0x1 << ISPCSI2_CTRL_ECC_EN_SHIFT)
++#define ISPCSI2_CTRL_ECC_EN_DISABLE (0x0 << ISPCSI2_CTRL_ECC_EN_SHIFT)
++#define ISPCSI2_CTRL_ECC_EN_ENABLE (0x1 << ISPCSI2_CTRL_ECC_EN_SHIFT)
++
++#define ISPCSI2_CTRL_SECURE_SHIFT 1
++#define ISPCSI2_CTRL_SECURE_MASK (0x1 << ISPCSI2_CTRL_SECURE_SHIFT)
++#define ISPCSI2_CTRL_SECURE_DISABLE (0x0 << ISPCSI2_CTRL_SECURE_SHIFT)
++#define ISPCSI2_CTRL_SECURE_ENABLE (0x1 << ISPCSI2_CTRL_SECURE_SHIFT)
++
++#define ISPCSI2_CTRL_IF_EN_SHIFT 0
++#define ISPCSI2_CTRL_IF_EN_MASK (0x1 << ISPCSI2_CTRL_IF_EN_SHIFT)
++#define ISPCSI2_CTRL_IF_EN_DISABLE (0x0 << ISPCSI2_CTRL_IF_EN_SHIFT)
++#define ISPCSI2_CTRL_IF_EN_ENABLE (0x1 << ISPCSI2_CTRL_IF_EN_SHIFT)
++
++#define ISPCSI2_DBG_H (0x044)
++#define ISPCSI2_GNQ (0x048)
++#define ISPCSI2_PHY_CFG (0x050)
++#define ISPCSI2_PHY_CFG_RESET_CTRL_SHIFT 30
++#define ISPCSI2_PHY_CFG_RESET_CTRL_MASK \
++ (0x1 << ISPCSI2_PHY_CFG_RESET_CTRL_SHIFT)
++#define ISPCSI2_PHY_CFG_RESET_CTRL_ACTIVE \
++ (0x0 << ISPCSI2_PHY_CFG_RESET_CTRL_SHIFT)
++#define ISPCSI2_PHY_CFG_RESET_CTRL_DEASSERT \
++ (0x1 << ISPCSI2_PHY_CFG_RESET_CTRL_SHIFT)
++#define ISPCSI2_PHY_CFG_RESET_DONE_SHIFT 29
++#define ISPCSI2_PHY_CFG_RESET_DONE_MASK \
++ (0x1 << ISPCSI2_PHY_CFG_RESET_DONE_SHIFT)
++#define ISPCSI2_PHY_CFG_RESET_DONE_ONGOING \
++ (0x0 << ISPCSI2_PHY_CFG_RESET_DONE_SHIFT)
++#define ISPCSI2_PHY_CFG_RESET_DONE_DONE \
++ (0x1 << ISPCSI2_PHY_CFG_RESET_DONE_SHIFT)
++#define ISPCSI2_PHY_CFG_PWR_CMD_SHIFT 27
++#define ISPCSI2_PHY_CFG_PWR_CMD_MASK \
++ (0x3 << ISPCSI2_PHY_CFG_PWR_CMD_SHIFT)
++#define ISPCSI2_PHY_CFG_PWR_CMD_OFF \
++ (0x0 << ISPCSI2_PHY_CFG_PWR_CMD_SHIFT)
++#define ISPCSI2_PHY_CFG_PWR_CMD_ON \
++ (0x1 << ISPCSI2_PHY_CFG_PWR_CMD_SHIFT)
++#define ISPCSI2_PHY_CFG_PWR_CMD_ULPW \
++ (0x2 << ISPCSI2_PHY_CFG_PWR_CMD_SHIFT)
++#define ISPCSI2_PHY_CFG_PWR_STATUS_SHIFT 25
++#define ISPCSI2_PHY_CFG_PWR_STATUS_MASK \
++ (0x3 << ISPCSI2_PHY_CFG_PWR_STATUS_SHIFT)
++#define ISPCSI2_PHY_CFG_PWR_STATUS_OFF \
++ (0x0 << ISPCSI2_PHY_CFG_PWR_STATUS_SHIFT)
++#define ISPCSI2_PHY_CFG_PWR_STATUS_ON \
++ (0x1 << ISPCSI2_PHY_CFG_PWR_STATUS_SHIFT)
++#define ISPCSI2_PHY_CFG_PWR_STATUS_ULPW \
++ (0x2 << ISPCSI2_PHY_CFG_PWR_STATUS_SHIFT)
++#define ISPCSI2_PHY_CFG_PWR_AUTO_SHIFT 24
++#define ISPCSI2_PHY_CFG_PWR_AUTO_MASK \
++ (0x1 << ISPCSI2_PHY_CFG_PWR_AUTO_SHIFT)
++#define ISPCSI2_PHY_CFG_PWR_AUTO_DISABLE \
++ (0x0 << ISPCSI2_PHY_CFG_PWR_AUTO_SHIFT)
++#define ISPCSI2_PHY_CFG_PWR_AUTO_ENABLE \
++ (0x1 << ISPCSI2_PHY_CFG_PWR_AUTO_SHIFT)
++
++#define ISPCSI2_PHY_CFG_DATA_POL_SHIFT(n) (3 + ((n) * 4))
++#define ISPCSI2_PHY_CFG_DATA_POL_MASK(n) \
++ (0x1 << ISPCSI2_PHY_CFG_DATA_POL_SHIFT(n))
++#define ISPCSI2_PHY_CFG_DATA_POL_PN(n) \
++ (0x0 << ISPCSI2_PHY_CFG_DATA_POL_SHIFT(n))
++#define ISPCSI2_PHY_CFG_DATA_POL_NP(n) \
++ (0x1 << ISPCSI2_PHY_CFG_DATA_POL_SHIFT(n))
++
++#define ISPCSI2_PHY_CFG_DATA_POSITION_SHIFT(n) ((n) * 4)
++#define ISPCSI2_PHY_CFG_DATA_POSITION_MASK(n) \
++ (0x7 << ISPCSI2_PHY_CFG_DATA_POSITION_SHIFT(n))
++#define ISPCSI2_PHY_CFG_DATA_POSITION_NC(n) \
++ (0x0 << ISPCSI2_PHY_CFG_DATA_POSITION_SHIFT(n))
++#define ISPCSI2_PHY_CFG_DATA_POSITION_1(n) \
++ (0x1 << ISPCSI2_PHY_CFG_DATA_POSITION_SHIFT(n))
++#define ISPCSI2_PHY_CFG_DATA_POSITION_2(n) \
++ (0x2 << ISPCSI2_PHY_CFG_DATA_POSITION_SHIFT(n))
++#define ISPCSI2_PHY_CFG_DATA_POSITION_3(n) \
++ (0x3 << ISPCSI2_PHY_CFG_DATA_POSITION_SHIFT(n))
++#define ISPCSI2_PHY_CFG_DATA_POSITION_4(n) \
++ (0x4 << ISPCSI2_PHY_CFG_DATA_POSITION_SHIFT(n))
++#define ISPCSI2_PHY_CFG_DATA_POSITION_5(n) \
++ (0x5 << ISPCSI2_PHY_CFG_DATA_POSITION_SHIFT(n))
++
++#define ISPCSI2_PHY_CFG_CLOCK_POL_SHIFT 3
++#define ISPCSI2_PHY_CFG_CLOCK_POL_MASK \
++ (0x1 << ISPCSI2_PHY_CFG_CLOCK_POL_SHIFT)
++#define ISPCSI2_PHY_CFG_CLOCK_POL_PN \
++ (0x0 << ISPCSI2_PHY_CFG_CLOCK_POL_SHIFT)
++#define ISPCSI2_PHY_CFG_CLOCK_POL_NP \
++ (0x1 << ISPCSI2_PHY_CFG_CLOCK_POL_SHIFT)
++
++#define ISPCSI2_PHY_CFG_CLOCK_POSITION_SHIFT 0
++#define ISPCSI2_PHY_CFG_CLOCK_POSITION_MASK \
++ (0x7 << ISPCSI2_PHY_CFG_CLOCK_POSITION_SHIFT)
++#define ISPCSI2_PHY_CFG_CLOCK_POSITION_1 \
++ (0x1 << ISPCSI2_PHY_CFG_CLOCK_POSITION_SHIFT)
++#define ISPCSI2_PHY_CFG_CLOCK_POSITION_2 \
++ (0x2 << ISPCSI2_PHY_CFG_CLOCK_POSITION_SHIFT)
++#define ISPCSI2_PHY_CFG_CLOCK_POSITION_3 \
++ (0x3 << ISPCSI2_PHY_CFG_CLOCK_POSITION_SHIFT)
++#define ISPCSI2_PHY_CFG_CLOCK_POSITION_4 \
++ (0x4 << ISPCSI2_PHY_CFG_CLOCK_POSITION_SHIFT)
++#define ISPCSI2_PHY_CFG_CLOCK_POSITION_5 \
++ (0x5 << ISPCSI2_PHY_CFG_CLOCK_POSITION_SHIFT)
++
++#define ISPCSI2_PHY_IRQSTATUS (0x054)
++#define ISPCSI2_PHY_IRQSTATUS_STATEALLULPMEXIT (1 << 26)
++#define ISPCSI2_PHY_IRQSTATUS_STATEALLULPMENTER (1 << 25)
++#define ISPCSI2_PHY_IRQSTATUS_STATEULPM5 (1 << 24)
++#define ISPCSI2_PHY_IRQSTATUS_STATEULPM4 (1 << 23)
++#define ISPCSI2_PHY_IRQSTATUS_STATEULPM3 (1 << 22)
++#define ISPCSI2_PHY_IRQSTATUS_STATEULPM2 (1 << 21)
++#define ISPCSI2_PHY_IRQSTATUS_STATEULPM1 (1 << 20)
++#define ISPCSI2_PHY_IRQSTATUS_ERRCONTROL5 (1 << 19)
++#define ISPCSI2_PHY_IRQSTATUS_ERRCONTROL4 (1 << 18)
++#define ISPCSI2_PHY_IRQSTATUS_ERRCONTROL3 (1 << 17)
++#define ISPCSI2_PHY_IRQSTATUS_ERRCONTROL2 (1 << 16)
++#define ISPCSI2_PHY_IRQSTATUS_ERRCONTROL1 (1 << 15)
++#define ISPCSI2_PHY_IRQSTATUS_ERRESC5 (1 << 14)
++#define ISPCSI2_PHY_IRQSTATUS_ERRESC4 (1 << 13)
++#define ISPCSI2_PHY_IRQSTATUS_ERRESC3 (1 << 12)
++#define ISPCSI2_PHY_IRQSTATUS_ERRESC2 (1 << 11)
++#define ISPCSI2_PHY_IRQSTATUS_ERRESC1 (1 << 10)
++#define ISPCSI2_PHY_IRQSTATUS_ERRSOTSYNCHS5 (1 << 9)
++#define ISPCSI2_PHY_IRQSTATUS_ERRSOTSYNCHS4 (1 << 8)
++#define ISPCSI2_PHY_IRQSTATUS_ERRSOTSYNCHS3 (1 << 7)
++#define ISPCSI2_PHY_IRQSTATUS_ERRSOTSYNCHS2 (1 << 6)
++#define ISPCSI2_PHY_IRQSTATUS_ERRSOTSYNCHS1 (1 << 5)
++#define ISPCSI2_PHY_IRQSTATUS_ERRSOTHS5 (1 << 4)
++#define ISPCSI2_PHY_IRQSTATUS_ERRSOTHS4 (1 << 3)
++#define ISPCSI2_PHY_IRQSTATUS_ERRSOTHS3 (1 << 2)
++#define ISPCSI2_PHY_IRQSTATUS_ERRSOTHS2 (1 << 1)
++#define ISPCSI2_PHY_IRQSTATUS_ERRSOTHS1 1
++
++#define ISPCSI2_SHORT_PACKET (0x05C)
++#define ISPCSI2_PHY_IRQENABLE (0x060)
++#define ISPCSI2_PHY_IRQENABLE_STATEALLULPMEXIT (1 << 26)
++#define ISPCSI2_PHY_IRQENABLE_STATEALLULPMENTER (1 << 25)
++#define ISPCSI2_PHY_IRQENABLE_STATEULPM5 (1 << 24)
++#define ISPCSI2_PHY_IRQENABLE_STATEULPM4 (1 << 23)
++#define ISPCSI2_PHY_IRQENABLE_STATEULPM3 (1 << 22)
++#define ISPCSI2_PHY_IRQENABLE_STATEULPM2 (1 << 21)
++#define ISPCSI2_PHY_IRQENABLE_STATEULPM1 (1 << 20)
++#define ISPCSI2_PHY_IRQENABLE_ERRCONTROL5 (1 << 19)
++#define ISPCSI2_PHY_IRQENABLE_ERRCONTROL4 (1 << 18)
++#define ISPCSI2_PHY_IRQENABLE_ERRCONTROL3 (1 << 17)
++#define ISPCSI2_PHY_IRQENABLE_ERRCONTROL2 (1 << 16)
++#define ISPCSI2_PHY_IRQENABLE_ERRCONTROL1 (1 << 15)
++#define ISPCSI2_PHY_IRQENABLE_ERRESC5 (1 << 14)
++#define ISPCSI2_PHY_IRQENABLE_ERRESC4 (1 << 13)
++#define ISPCSI2_PHY_IRQENABLE_ERRESC3 (1 << 12)
++#define ISPCSI2_PHY_IRQENABLE_ERRESC2 (1 << 11)
++#define ISPCSI2_PHY_IRQENABLE_ERRESC1 (1 << 10)
++#define ISPCSI2_PHY_IRQENABLE_ERRSOTSYNCHS5 (1 << 9)
++#define ISPCSI2_PHY_IRQENABLE_ERRSOTSYNCHS4 (1 << 8)
++#define ISPCSI2_PHY_IRQENABLE_ERRSOTSYNCHS3 (1 << 7)
++#define ISPCSI2_PHY_IRQENABLE_ERRSOTSYNCHS2 (1 << 6)
++#define ISPCSI2_PHY_IRQENABLE_ERRSOTSYNCHS1 (1 << 5)
++#define ISPCSI2_PHY_IRQENABLE_ERRSOTHS5 (1 << 4)
++#define ISPCSI2_PHY_IRQENABLE_ERRSOTHS4 (1 << 3)
++#define ISPCSI2_PHY_IRQENABLE_ERRSOTHS3 (1 << 2)
++#define ISPCSI2_PHY_IRQENABLE_ERRSOTHS2 (1 << 1)
++#define ISPCSI2_PHY_IRQENABLE_ERRSOTHS1 1
++#define ISPCSI2_DBG_P (0x068)
++#define ISPCSI2_TIMING (0x06C)
++
++
++#define ISPCSI2_TIMING_FORCE_RX_MODE_IO_SHIFT(n) \
++ ((16 * ((n) - 1)) + 15)
++#define ISPCSI2_TIMING_FORCE_RX_MODE_IO_MASK(n) \
++ (0x1 << ISPCSI2_TIMING_FORCE_RX_MODE_IO_SHIFT(n))
++#define ISPCSI2_TIMING_FORCE_RX_MODE_IO_DISABLE(n) \
++ (0x0 << ISPCSI2_TIMING_FORCE_RX_MODE_IO_SHIFT(n))
++#define ISPCSI2_TIMING_FORCE_RX_MODE_IO_ENABLE(n) \
++ (0x1 << ISPCSI2_TIMING_FORCE_RX_MODE_IO_SHIFT(n))
++#define ISPCSI2_TIMING_STOP_STATE_X16_IO_SHIFT(n) ((16 * ((n) - 1)) + 14)
++#define ISPCSI2_TIMING_STOP_STATE_X16_IO_MASK(n) \
++ (0x1 << ISPCSI2_TIMING_STOP_STATE_X16_IO_SHIFT(n))
++#define ISPCSI2_TIMING_STOP_STATE_X16_IO_DISABLE(n) \
++ (0x0 << ISPCSI2_TIMING_STOP_STATE_X16_IO_SHIFT(n))
++#define ISPCSI2_TIMING_STOP_STATE_X16_IO_ENABLE(n) \
++ (0x1 << ISPCSI2_TIMING_STOP_STATE_X16_IO_SHIFT(n))
++#define ISPCSI2_TIMING_STOP_STATE_X4_IO_SHIFT(n) ((16 * ((n) - 1)) + 13)
++#define ISPCSI2_TIMING_STOP_STATE_X4_IO_MASK(n) \
++ (0x1 << ISPCSI2_TIMING_STOP_STATE_X4_IO_SHIFT(n))
++#define ISPCSI2_TIMING_STOP_STATE_X4_IO_DISABLE(n) \
++ (0x0 << ISPCSI2_TIMING_STOP_STATE_X4_IO_SHIFT(n))
++#define ISPCSI2_TIMING_STOP_STATE_X4_IO_ENABLE(n) \
++ (0x1 << ISPCSI2_TIMING_STOP_STATE_X4_IO_SHIFT(n))
++#define ISPCSI2_TIMING_STOP_STATE_COUNTER_IO_SHIFT(n) (16 * ((n) - 1))
++#define ISPCSI2_TIMING_STOP_STATE_COUNTER_IO_MASK(n) \
++ (0x1fff << ISPCSI2_TIMING_STOP_STATE_COUNTER_IO_SHIFT(n))
++
++#define ISPCSI2_CTX_CTRL1(n) ((0x070) + 0x20 * (n))
++#define ISPCSI2_CTX_CTRL1_COUNT_SHIFT 8
++#define ISPCSI2_CTX_CTRL1_COUNT_MASK (0xFF << \
++ ISPCSI2_CTX_CTRL1_COUNT_SHIFT)
++#define ISPCSI2_CTX_CTRL1_EOF_EN_SHIFT 7
++#define ISPCSI2_CTX_CTRL1_EOF_EN_MASK \
++ (0x1 << ISPCSI2_CTX_CTRL1_EOF_EN_SHIFT)
++#define ISPCSI2_CTX_CTRL1_EOF_EN_DISABLE \
++ (0x0 << ISPCSI2_CTX_CTRL1_EOF_EN_SHIFT)
++#define ISPCSI2_CTX_CTRL1_EOF_EN_ENABLE \
++ (0x1 << ISPCSI2_CTX_CTRL1_EOF_EN_SHIFT)
++#define ISPCSI2_CTX_CTRL1_EOL_EN_SHIFT 6
++#define ISPCSI2_CTX_CTRL1_EOL_EN_MASK \
++ (0x1 << ISPCSI2_CTX_CTRL1_EOL_EN_SHIFT)
++#define ISPCSI2_CTX_CTRL1_EOL_EN_DISABLE \
++ (0x0 << ISPCSI2_CTX_CTRL1_EOL_EN_SHIFT)
++#define ISPCSI2_CTX_CTRL1_EOL_EN_ENABLE \
++ (0x1 << ISPCSI2_CTX_CTRL1_EOL_EN_SHIFT)
++#define ISPCSI2_CTX_CTRL1_CS_EN_SHIFT 5
++#define ISPCSI2_CTX_CTRL1_CS_EN_MASK \
++ (0x1 << ISPCSI2_CTX_CTRL1_CS_EN_SHIFT)
++#define ISPCSI2_CTX_CTRL1_CS_EN_DISABLE \
++ (0x0 << ISPCSI2_CTX_CTRL1_CS_EN_SHIFT)
++#define ISPCSI2_CTX_CTRL1_CS_EN_ENABLE \
++ (0x1 << ISPCSI2_CTX_CTRL1_CS_EN_SHIFT)
++#define ISPCSI2_CTX_CTRL1_COUNT_UNLOCK_EN_SHIFT 4
++#define ISPCSI2_CTX_CTRL1_COUNT_UNLOCK_EN_MASK \
++ (0x1 << ISPCSI2_CTX_CTRL1_COUNT_UNLOCK_EN_SHIFT)
++#define ISPCSI2_CTX_CTRL1_COUNT_UNLOCK_EN_DISABLE \
++ (0x0 << ISPCSI2_CTX_CTRL1_COUNT_UNLOCK_EN_SHIFT)
++#define ISPCSI2_CTX_CTRL1_COUNT_UNLOCK_EN_ENABLE \
++ (0x1 << ISPCSI2_CTX_CTRL1_COUNT_UNLOCK_EN_SHIFT)
++#define ISPCSI2_CTX_CTRL1_PING_PONG_SHIFT 3
++#define ISPCSI2_CTX_CTRL1_PING_PONG_MASK \
++ (0x1 << ISPCSI2_CTX_CTRL1_PING_PONG_SHIFT)
++#define ISPCSI2_CTX_CTRL1_CTX_EN_SHIFT 0
++#define ISPCSI2_CTX_CTRL1_CTX_EN_MASK \
++ (0x1 << ISPCSI2_CTX_CTRL1_CTX_EN_SHIFT)
++#define ISPCSI2_CTX_CTRL1_CTX_EN_DISABLE \
++ (0x0 << ISPCSI2_CTX_CTRL1_CTX_EN_SHIFT)
++#define ISPCSI2_CTX_CTRL1_CTX_EN_ENABLE \
++ (0x1 << ISPCSI2_CTX_CTRL1_CTX_EN_SHIFT)
++
++#define ISPCSI2_CTX_CTRL2(n) ((0x074) + 0x20 * (n))
++#define ISPCSI2_CTX_CTRL2_USER_DEF_MAP_SHIFT 13
++#define ISPCSI2_CTX_CTRL2_USER_DEF_MAP_MASK \
++ (0x3 << ISPCSI2_CTX_CTRL2_USER_DEF_MAP_SHIFT)
++#define ISPCSI2_CTX_CTRL2_VIRTUAL_ID_SHIFT 11
++#define ISPCSI2_CTX_CTRL2_VIRTUAL_ID_MASK \
++ (0x3 << ISPCSI2_CTX_CTRL2_VIRTUAL_ID_SHIFT)
++#define ISPCSI2_CTX_CTRL2_DPCM_PRED_SHIFT 10
++#define ISPCSI2_CTX_CTRL2_DPCM_PRED_MASK \
++ (0x1 << ISPCSI2_CTX_CTRL2_DPCM_PRED_SHIFT)
++
++#define ISPCSI2_CTX_CTRL2_FORMAT_SHIFT 0
++#define ISPCSI2_CTX_CTRL2_FORMAT_MASK (0x3FF << \
++ ISPCSI2_CTX_CTRL2_FORMAT_SHIFT)
++
++#define ISPCSI2_CTX_DAT_OFST(n) ((0x078) + 0x20 * (n))
++#define ISPCSI2_CTX_DAT_OFST_OFST_SHIFT 0
++#define ISPCSI2_CTX_DAT_OFST_OFST_MASK (0x1FFE0 << \
++ ISPCSI2_CTX_DAT_OFST_OFST_SHIFT)
++
++#define ISPCSI2_CTX_DAT_PING_ADDR(n) ((0x07C) + 0x20 * (n))
++#define ISPCSI2_CTX_DAT_PONG_ADDR(n) ((0x080) + 0x20 * (n))
++#define ISPCSI2_CTX_IRQENABLE(n) ((0x084) + 0x20 * (n))
++#define ISPCSI2_CTX_IRQENABLE_ECC_CORRECTION_IRQ (1 << 8)
++#define ISPCSI2_CTX_IRQENABLE_LINE_NUMBER_IRQ (1 << 7)
++#define ISPCSI2_CTX_IRQENABLE_FRAME_NUMBER_IRQ (1 << 6)
++#define ISPCSI2_CTX_IRQENABLE_CS_IRQ (1 << 5)
++#define ISPCSI2_CTX_IRQENABLE_LE_IRQ (1 << 3)
++#define ISPCSI2_CTX_IRQENABLE_LS_IRQ (1 << 2)
++#define ISPCSI2_CTX_IRQENABLE_FE_IRQ (1 << 1)
++#define ISPCSI2_CTX_IRQENABLE_FS_IRQ 1
++#define ISPCSI2_CTX_IRQSTATUS(n) ((0x088) + 0x20 * (n))
++#define ISPCSI2_CTX_IRQSTATUS_ECC_CORRECTION_IRQ (1 << 8)
++#define ISPCSI2_CTX_IRQSTATUS_LINE_NUMBER_IRQ (1 << 7)
++#define ISPCSI2_CTX_IRQSTATUS_FRAME_NUMBER_IRQ (1 << 6)
++#define ISPCSI2_CTX_IRQSTATUS_CS_IRQ (1 << 5)
++#define ISPCSI2_CTX_IRQSTATUS_LE_IRQ (1 << 3)
++#define ISPCSI2_CTX_IRQSTATUS_LS_IRQ (1 << 2)
++#define ISPCSI2_CTX_IRQSTATUS_FE_IRQ (1 << 1)
++#define ISPCSI2_CTX_IRQSTATUS_FS_IRQ 1
++
++#define ISPCSI2_CTX_CTRL3(n) ((0x08C) + 0x20 * (n))
++#define ISPCSI2_CTX_CTRL3_ALPHA_SHIFT 5
++#define ISPCSI2_CTX_CTRL3_ALPHA_MASK (0x3FFF << \
++ ISPCSI2_CTX_CTRL3_ALPHA_SHIFT)
++
++#define ISPCSIPHY_REG0 (0x000)
++#define ISPCSIPHY_REG0_THS_TERM_SHIFT 8
++#define ISPCSIPHY_REG0_THS_TERM_MASK \
++ (0xFF << ISPCSIPHY_REG0_THS_TERM_SHIFT)
++#define ISPCSIPHY_REG0_THS_TERM_RESETVAL \
++ (0x04 << ISPCSIPHY_REG0_THS_TERM_SHIFT)
++#define ISPCSIPHY_REG0_THS_SETTLE_SHIFT 0
++#define ISPCSIPHY_REG0_THS_SETTLE_MASK \
++ (0xFF << ISPCSIPHY_REG0_THS_SETTLE_SHIFT)
++#define ISPCSIPHY_REG0_THS_SETTLE_RESETVAL \
++ (0x27 << ISPCSIPHY_REG0_THS_SETTLE_SHIFT)
++
++#define ISPCSIPHY_REG1 (0x004)
++#define ISPCSIPHY_REG1_RESET_DONE_CTRLCLK_SHIFT 29
++#define ISPCSIPHY_REG1_RESET_DONE_CTRLCLK_MASK \
++ (0x1 << ISPCSIPHY_REG1_RESET_DONE_CTRLCLK_SHIFT)
++#define ISPCSIPHY_REG1_RESET_DONE_CTRLCLK_ONGOING \
++ (0x0 << ISPCSIPHY_REG1_RESET_DONE_CTRLCLK_SHIFT)
++#define ISPCSIPHY_REG1_RESET_DONE_CTRLCLK_DONE \
++ (0x1 << ISPCSIPHY_REG1_RESET_DONE_CTRLCLK_SHIFT)
++/* This field is for OMAP3630 only */
++#define ISPCSIPHY_REG1_CLOCK_MISS_DETECTOR_STATUS_SHIFT 25
++#define ISPCSIPHY_REG1_CLOCK_MISS_DETECTOR_STATUS_MASK \
++ (0x1 << ISPCSIPHY_REG1_CLOCK_MISS_DETECTOR_STATUS_SHIFT)
++#define ISPCSIPHY_REG1_TCLK_TERM_SHIFT 18
++#define ISPCSIPHY_REG1_TCLK_TERM_MASK \
++ (0x7F << ISPCSIPHY_REG1_TCLK_TERM_SHIFT)
++#define ISPCSIPHY_REG1_TCLK_TERM_RESETVAL \
++ (0x00 << ISPCSIPHY_REG1_TCLK_TERM_SHIFT)
++#define ISPCSIPHY_REG1_RESERVED1_SHIFT 10
++#define ISPCSIPHY_REG1_RESERVED1_MASK \
++ (0xFF << ISPCSIPHY_REG1_RESERVED1_SHIFT)
++#define ISPCSIPHY_REG1_RESERVED1_RESETVAL \
++ (0xB8 << ISPCSIPHY_REG1_RESERVED1_SHIFT)
++/* This field is for OMAP3430 only */
++#define ISPCSIPHY_REG1_TCLK_MISS_SHIFT 8
++#define ISPCSIPHY_REG1_TCLK_MISS_MASK \
++ (0x3 << ISPCSIPHY_REG1_TCLK_MISS_SHIFT)
++#define ISPCSIPHY_REG1_TCLK_MISS_RESETVAL \
++ (0x1 << ISPCSIPHY_REG1_TCLK_MISS_SHIFT)
++/* This field is for OMAP3630 only */
++#define ISPCSIPHY_REG1_CTRLCLK_DIV_FACTOR_SHIFT 8
++#define ISPCSIPHY_REG1_CTRLCLK_DIV_FACTOR_MASK \
++ (0x3 << ISPCSIPHY_REG1_CTRLCLK_DIV_FACTOR_SHIFT)
++#define ISPCSIPHY_REG1_CTRLCLK_DIV_FACTOR_RESETVAL \
++ (0x1 << ISPCSIPHY_REG1_CTRLCLK_DIV_FACTOR_SHIFT)
++#define ISPCSIPHY_REG1_TCLK_SETTLE_SHIFT 0
++#define ISPCSIPHY_REG1_TCLK_SETTLE_MASK \
++ (0xFF << ISPCSIPHY_REG1_TCLK_SETTLE_SHIFT)
++#define ISPCSIPHY_REG1_TCLK_SETTLE_RESETVAL \
++ (0x0E << ISPCSIPHY_REG1_TCLK_SETTLE_SHIFT)
++#define ISPCSIPHY_REG1_RESETVAL \
++ (ISPCSIPHY_REG1_TCLK_TERM_RESETVAL | \
++ ISPCSIPHY_REG1_RESERVED1_RESETVAL | \
++ (cpu_is_omap3630() ? \
++ ISPCSIPHY_REG1_CTRLCLK_DIV_FACTOR_RESETVAL : \
++ ISPCSIPHY_REG1_TCLK_MISS_RESETVAL) | \
++ ISPCSIPHY_REG1_TCLK_SETTLE_RESETVAL)
++#define ISPCSIPHY_REG1_EDITABLE_MASK \
++ (ISPCSIPHY_REG1_TCLK_TERM_MASK | \
++ ISPCSIPHY_REG1_RESERVED1_MASK | \
++ ISPCSIPHY_REG1_TCLK_MISS_MASK | \
++ ISPCSIPHY_REG1_TCLK_SETTLE_MASK)
++
++/* This register is for OMAP3630 only */
++#define ISPCSIPHY_REG2 (0x008)
++#define ISPCSIPHY_REG2_TRIGGER_CMD_RXTRIGESC0_SHIFT 30
++#define ISPCSIPHY_REG2_TRIGGER_CMD_RXTRIGESC0_MASK \
++ (0x3 << ISPCSIPHY_REG2_TRIGGER_CMD_RXTRIGESC0_SHIFT)
++#define ISPCSIPHY_REG2_TRIGGER_CMD_RXTRIGESC1_SHIFT 28
++#define ISPCSIPHY_REG2_TRIGGER_CMD_RXTRIGESC1_MASK \
++ (0x3 << ISPCSIPHY_REG2_TRIGGER_CMD_RXTRIGESC1_SHIFT)
++#define ISPCSIPHY_REG2_TRIGGER_CMD_RXTRIGESC2_SHIFT 26
++#define ISPCSIPHY_REG2_TRIGGER_CMD_RXTRIGESC2_MASK \
++ (0x3 << ISPCSIPHY_REG2_TRIGGER_CMD_RXTRIGESC2_SHIFT)
++#define ISPCSIPHY_REG2_TRIGGER_CMD_RXTRIGESC3_SHIFT 24
++#define ISPCSIPHY_REG2_TRIGGER_CMD_RXTRIGESC3_MASK \
++ (0x3 << ISPCSIPHY_REG2_TRIGGER_CMD_RXTRIGESC3_SHIFT)
++#define ISPCSIPHY_REG2_CCP2_SYNC_PATTERN_SHIFT 0
++#define ISPCSIPHY_REG2_CCP2_SYNC_PATTERN_MASK \
++ (0x7FFFFF << ISPCSIPHY_REG2_CCP2_SYNC_PATTERN_SHIFT)
++
++/* This instance is for OMAP3630 only */
++#define ISPCSI2_CTX_TRANSCODEH(n) (0x000 + 0x8 * (n))
++#define ISPCSI2_CTX_TRANSCODEH_HCOUNT_SHIFT 16
++#define ISPCSI2_CTX_TRANSCODEH_HCOUNT_MASK \
++ (0x1FFF << ISPCSI2_CTX_TRANSCODEH_HCOUNT_SHIFT)
++#define ISPCSI2_CTX_TRANSCODEH_HSKIP_SHIFT 0
++#define ISPCSI2_CTX_TRANSCODEH_HSKIP_MASK \
++ (0x1FFF << ISPCSI2_CTX_TRANSCODEH_HCOUNT_SHIFT)
++#define ISPCSI2_CTX_TRANSCODEV(n) (0x004 + 0x8 * (n))
++#define ISPCSI2_CTX_TRANSCODEV_VCOUNT_SHIFT 16
++#define ISPCSI2_CTX_TRANSCODEV_VCOUNT_MASK \
++ (0x1FFF << ISPCSI2_CTX_TRANSCODEV_VCOUNT_SHIFT)
++#define ISPCSI2_CTX_TRANSCODEV_VSKIP_SHIFT 0
++#define ISPCSI2_CTX_TRANSCODEV_VSKIP_MASK \
++ (0x1FFF << ISPCSI2_CTX_TRANSCODEV_VCOUNT_SHIFT)
++
++#endif /* __ISPREG_H__ */
+diff --git a/drivers/media/video/isp/ispresizer.c b/drivers/media/video/isp/ispresizer.c
+new file mode 100644
+index 0000000..2c3879b
+--- /dev/null
++++ b/drivers/media/video/isp/ispresizer.c
+@@ -0,0 +1,1732 @@
++/*
++ * ispresizer.c
++ *
++ * Driver Library for Resizer module in TI's OMAP3 Camera ISP
++ *
++ * Copyright (C)2009 Texas Instruments, Inc.
++ *
++ * Rewritten by: Antti Koskipaa <antti.koskipaa@nokia.com>
++ *
++ * Based on code by:
++ * Sameer Venkatraman <sameerv@ti.com>
++ * Mohit Jalori
++ * Sergio Aguirre <saaguirre@ti.com>
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#include <linux/device.h>
++#include <linux/mm.h>
++#include <linux/module.h>
++
++#include "isp.h"
++#include "ispreg.h"
++#include "ispresizer.h"
++
++/*
++ * Resizer Constants
++ */
++#define MIN_RESIZE_VALUE 64
++#define MID_RESIZE_VALUE 512
++#define MAX_RESIZE_VALUE 1024
++
++#define MIN_IN_WIDTH 32
++#define MIN_IN_HEIGHT 32
++#define MAX_IN_WIDTH_MEMORY_MODE 4095
++#define MAX_IN_WIDTH_ONTHEFLY_MODE_ES1 1280
++#define MAX_IN_WIDTH_ONTHEFLY_MODE_ES2 4095
++#define MAX_IN_HEIGHT 4095
++
++#define MIN_OUT_WIDTH 16
++#define MIN_OUT_HEIGHT 2
++#define MAX_OUT_HEIGHT 4095
++
++/*
++ * Resizer Use Constraints
++ * "TRM ES3.1, table 12-46"
++ */
++#define MAX_4TAP_OUT_WIDTH_ES1 1280
++#define MAX_7TAP_OUT_WIDTH_ES1 640
++#define MAX_4TAP_OUT_WIDTH_ES2 3312
++#define MAX_7TAP_OUT_WIDTH_ES2 1650
++#define MAX_4TAP_OUT_WIDTH_3630 4096
++#define MAX_7TAP_OUT_WIDTH_3630 2048
++
++/*
++ * Constants for ratio calculation
++ */
++#define RESIZE_DIVISOR 256
++#define DEFAULT_PHASE 1
++
++/*
++ * Default (and only) configuration of filter coefficients.
++ * 7-tap mode is for scale factors 0.25x to 0.5x.
++ * 4-tap mode is for scale factors 0.5x to 4.0x.
++ * There shouldn't be any reason to recalculate these, EVER.
++ */
++static const struct isprsz_coef filter_coefs = {
++ /* For 8-phase 4-tap horizontal filter: */
++ {
++ 0x0000, 0x0100, 0x0000, 0x0000,
++ 0x03FA, 0x00F6, 0x0010, 0x0000,
++ 0x03F9, 0x00DB, 0x002C, 0x0000,
++ 0x03FB, 0x00B3, 0x0053, 0x03FF,
++ 0x03FD, 0x0082, 0x0084, 0x03FD,
++ 0x03FF, 0x0053, 0x00B3, 0x03FB,
++ 0x0000, 0x002C, 0x00DB, 0x03F9,
++ 0x0000, 0x0010, 0x00F6, 0x03FA
++ },
++ /* For 8-phase 4-tap vertical filter: */
++ {
++ 0x0000, 0x0100, 0x0000, 0x0000,
++ 0x03FA, 0x00F6, 0x0010, 0x0000,
++ 0x03F9, 0x00DB, 0x002C, 0x0000,
++ 0x03FB, 0x00B3, 0x0053, 0x03FF,
++ 0x03FD, 0x0082, 0x0084, 0x03FD,
++ 0x03FF, 0x0053, 0x00B3, 0x03FB,
++ 0x0000, 0x002C, 0x00DB, 0x03F9,
++ 0x0000, 0x0010, 0x00F6, 0x03FA
++ },
++ /* For 4-phase 7-tap horizontal filter: */
++ #define DUMMY 0
++ {
++ 0x0004, 0x0023, 0x005A, 0x0058, 0x0023, 0x0004, 0x0000, DUMMY,
++ 0x0002, 0x0018, 0x004d, 0x0060, 0x0031, 0x0008, 0x0000, DUMMY,
++ 0x0001, 0x000f, 0x003f, 0x0062, 0x003f, 0x000f, 0x0001, DUMMY,
++ 0x0000, 0x0008, 0x0031, 0x0060, 0x004d, 0x0018, 0x0002, DUMMY
++ },
++ /* For 4-phase 7-tap vertical filter: */
++ {
++ 0x0004, 0x0023, 0x005A, 0x0058, 0x0023, 0x0004, 0x0000, DUMMY,
++ 0x0002, 0x0018, 0x004d, 0x0060, 0x0031, 0x0008, 0x0000, DUMMY,
++ 0x0001, 0x000f, 0x003f, 0x0062, 0x003f, 0x000f, 0x0001, DUMMY,
++ 0x0000, 0x0008, 0x0031, 0x0060, 0x004d, 0x0018, 0x0002, DUMMY
++ }
++ /*
++ * The dummy padding is required in 7-tap mode because of how the
++ * registers are arranged physically.
++ */
++ #undef DUMMY
++};
++
++/* Structure for saving/restoring resizer module registers */
++static struct isp_reg isprsz_reg_list[] = {
++ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT, 0x0000},
++ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_OUT_SIZE, 0x0000},
++ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_IN_START, 0x0000},
++ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_IN_SIZE, 0x0000},
++ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_SDR_INADD, 0x0000},
++ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_SDR_INOFF, 0x0000},
++ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_SDR_OUTADD, 0x0000},
++ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_SDR_OUTOFF, 0x0000},
++ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_HFILT10, 0x0000},
++ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_HFILT32, 0x0000},
++ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_HFILT54, 0x0000},
++ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_HFILT76, 0x0000},
++ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_HFILT98, 0x0000},
++ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_HFILT1110, 0x0000},
++ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_HFILT1312, 0x0000},
++ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_HFILT1514, 0x0000},
++ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_HFILT1716, 0x0000},
++ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_HFILT1918, 0x0000},
++ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_HFILT2120, 0x0000},
++ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_HFILT2322, 0x0000},
++ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_HFILT2524, 0x0000},
++ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_HFILT2726, 0x0000},
++ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_HFILT2928, 0x0000},
++ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_HFILT3130, 0x0000},
++ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_VFILT10, 0x0000},
++ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_VFILT32, 0x0000},
++ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_VFILT54, 0x0000},
++ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_VFILT76, 0x0000},
++ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_VFILT98, 0x0000},
++ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_VFILT1110, 0x0000},
++ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_VFILT1312, 0x0000},
++ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_VFILT1514, 0x0000},
++ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_VFILT1716, 0x0000},
++ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_VFILT1918, 0x0000},
++ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_VFILT2120, 0x0000},
++ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_VFILT2322, 0x0000},
++ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_VFILT2524, 0x0000},
++ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_VFILT2726, 0x0000},
++ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_VFILT2928, 0x0000},
++ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_VFILT3130, 0x0000},
++ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_YENH, 0x0000},
++ {OMAP3_ISP_IOMEM_SBL, ISPSBL_SDR_REQ_EXP, 0x0000},
++ {0, ISP_TOK_TERM, 0x0000}
++};
++
++/*
++ * __resizer_get_format - helper function for getting resizer format
++ * @res : pointer to resizer private structure
++ * @pad : pad number
++ * @fh : V4L2 subdev file handle
++ * @which : wanted subdev format
++ * return zero
++ */
++static struct v4l2_mbus_framefmt *
++__resizer_get_format(struct isp_res_device *res, struct v4l2_subdev_fh *fh,
++ unsigned int pad, enum v4l2_subdev_format which)
++{
++ if (which == V4L2_SUBDEV_FORMAT_PROBE)
++ return v4l2_subdev_get_probe_format(fh, pad);
++ else
++ return &res->formats[pad];
++}
++
++/*
++ * __resizer_get_crop - helper function for getting resizer crop rectangle
++ * @res : pointer to resizer private structure
++ * @fh : V4L2 subdev file handle
++ * @which : wanted subdev crop rectangle
++ */
++static struct v4l2_rect *
++__resizer_get_crop(struct isp_res_device *res, struct v4l2_subdev_fh *fh,
++ enum v4l2_subdev_format which)
++{
++ if (which == V4L2_SUBDEV_FORMAT_PROBE)
++ return v4l2_subdev_get_probe_crop(fh, RESZ_PAD_SINK);
++ else
++ return &res->crop;
++}
++
++/*
++ * ispresizer_set_filters - Set resizer filters
++ * @isp_res: Device context.
++ * @h_coeff: horizontal coefficient
++ * @v_coeff: vertical coefficient
++ * Return none
++ */
++static void ispresizer_set_filters(struct isp_res_device *res,
++ const u16 *h_coeff,
++ const u16 *v_coeff)
++{
++ struct isp_device *isp = to_isp_device(res);
++ u32 startaddr_h, startaddr_v, tmp_h, tmp_v;
++ int i;
++
++ startaddr_h = ISPRSZ_HFILT10;
++ startaddr_v = ISPRSZ_VFILT10;
++
++ for (i = 0; i < COEFF_CNT; i += 2) {
++ tmp_h = h_coeff[i] |
++ (h_coeff[i + 1] << ISPRSZ_HFILT_COEF1_SHIFT);
++ tmp_v = v_coeff[i] |
++ (v_coeff[i + 1] << ISPRSZ_VFILT_COEF1_SHIFT);
++ isp_reg_writel(isp, tmp_h, OMAP3_ISP_IOMEM_RESZ, startaddr_h);
++ isp_reg_writel(isp, tmp_v, OMAP3_ISP_IOMEM_RESZ, startaddr_v);
++ startaddr_h += 4;
++ startaddr_v += 4;
++ }
++}
++
++/*
++ * ispresizer_set_bilinear - Chrominance horizontal algorithm select
++ * @isp_res: Device context.
++ * @type: Filtering interpolation type.
++ *
++ * Filtering that is same as luminance processing is
++ * intended only for downsampling, and bilinear interpolation
++ * is intended only for upsampling.
++ */
++static void ispresizer_set_bilinear(struct isp_res_device *res,
++ enum resizer_chroma_algo type)
++{
++ struct isp_device *isp = to_isp_device(res);
++
++ if (type == RSZ_BILINEAR)
++ isp_reg_or(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT,
++ ISPRSZ_CNT_CBILIN);
++ else
++ isp_reg_and(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT,
++ ~ISPRSZ_CNT_CBILIN);
++}
++
++/*
++ * ispresizer_set_ycpos - Luminance and chrominance order
++ * @isp_res: Device context.
++ * @order: order type.
++ */
++static void ispresizer_set_ycpos(struct isp_res_device *res,
++ enum v4l2_mbus_pixelcode pixelcode)
++{
++ struct isp_device *isp = to_isp_device(res);
++
++ switch (pixelcode) {
++ case V4L2_MBUS_FMT_YUYV16_1X16:
++ isp_reg_or(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT,
++ ISPRSZ_CNT_YCPOS);
++ break;
++ case V4L2_MBUS_FMT_UYVY16_1X16:
++ isp_reg_and(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT,
++ ~ISPRSZ_CNT_YCPOS);
++ break;
++ default:
++ return;
++ }
++}
++
++/*
++ * ispresizer_set_phase - Setup horizontal and vertical starting phase
++ * @isp_res: Device context.
++ * @h_phase: horizontal phase parameters.
++ * @v_phase: vertical phase parameters.
++ *
++ * Horizontal and vertical phase range is 0 to 7
++ */
++static void ispresizer_set_phase(struct isp_res_device *res, u32 h_phase,
++ u32 v_phase)
++{
++ struct isp_device *isp = to_isp_device(res);
++ u32 rgval = 0;
++
++ rgval = isp_reg_readl(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT) &
++ ~(ISPRSZ_CNT_HSTPH_MASK | ISPRSZ_CNT_VSTPH_MASK);
++ rgval |= (h_phase << ISPRSZ_CNT_HSTPH_SHIFT) & ISPRSZ_CNT_HSTPH_MASK;
++ rgval |= (v_phase << ISPRSZ_CNT_VSTPH_SHIFT) & ISPRSZ_CNT_VSTPH_MASK;
++
++ isp_reg_writel(isp, rgval, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT);
++}
++
++/*
++ * ispresizer_set_luma - Setup luminance enhancer parameters
++ * @isp_res: Device context.
++ * @luma: Structure for luminance enhancer parameters.
++ *
++ * Algorithm select:
++ * 0x0: Disable
++ * 0x1: [-1 2 -1]/2 high-pass filter
++ * 0x2: [-1 -2 6 -2 -1]/4 high-pass filter
++ *
++ * Maximum gain:
++ * The data is coded in U4Q4 representation.
++ *
++ * Slope:
++ * The data is coded in U4Q4 representation.
++ *
++ * Coring offset:
++ * The data is coded in U8Q0 representation.
++ *
++ * The new luminance value is computed as:
++ * Y += HPF(Y) x max(GAIN, (HPF(Y) - CORE) x SLOP + 8) >> 4.
++ */
++static void ispresizer_set_luma(struct isp_res_device *res,
++ struct resizer_luma_yenh *luma)
++{
++ struct isp_device *isp = to_isp_device(res);
++ u32 rgval = 0;
++
++ rgval = (luma->algo << ISPRSZ_YENH_ALGO_SHIFT)
++ & ISPRSZ_YENH_ALGO_MASK;
++ rgval |= (luma->gain << ISPRSZ_YENH_GAIN_SHIFT)
++ & ISPRSZ_YENH_GAIN_MASK;
++ rgval |= (luma->slope << ISPRSZ_YENH_SLOP_SHIFT)
++ & ISPRSZ_YENH_SLOP_MASK;
++ rgval |= (luma->core << ISPRSZ_YENH_CORE_SHIFT)
++ & ISPRSZ_YENH_CORE_MASK;
++
++ isp_reg_writel(isp, rgval, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_YENH);
++}
++
++/*
++ * ispresizer_set_source - Input source select
++ * @isp_res: Device context.
++ * @source: Input source type
++ *
++ * If this field is set to RESIZER_INPUT_VP, the resizer input is fed from
++ * Preview/CCDC engine, otherwise from memory.
++ */
++static void ispresizer_set_source(struct isp_res_device *res,
++ enum resizer_input_entity source)
++{
++ struct isp_device *isp = to_isp_device(res);
++
++ if (source == RESIZER_INPUT_MEMORY)
++ isp_reg_or(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT,
++ ISPRSZ_CNT_INPSRC);
++ else
++ isp_reg_and(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT,
++ ~ISPRSZ_CNT_INPSRC);
++}
++
++/*
++ * ispresizer_set_ratio - Setup horizontal and vertical resizing value
++ * @isp_res: Device context.
++ * @ratio: Structure for ratio parameters.
++ *
++ * Resizing range from 64 to 1024
++ */
++static void ispresizer_set_ratio(struct isp_res_device *res,
++ const struct resizer_ratio *ratio)
++{
++ struct isp_device *isp = to_isp_device(res);
++ const u16 *h_filter, *v_filter;
++ u32 rgval = 0;
++
++ rgval = isp_reg_readl(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT) &
++ ~(ISPRSZ_CNT_HRSZ_MASK | ISPRSZ_CNT_VRSZ_MASK);
++ rgval |= ((ratio->horz - 1) << ISPRSZ_CNT_HRSZ_SHIFT)
++ & ISPRSZ_CNT_HRSZ_MASK;
++ rgval |= ((ratio->vert - 1) << ISPRSZ_CNT_VRSZ_SHIFT)
++ & ISPRSZ_CNT_VRSZ_MASK;
++ isp_reg_writel(isp, rgval, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT);
++
++ /* prepare horizontal filter coefficients */
++ if (ratio->horz > MID_RESIZE_VALUE)
++ h_filter = &filter_coefs.h_filter_coef_7tap[0];
++ else
++ h_filter = &filter_coefs.h_filter_coef_4tap[0];
++
++ /* prepare vertical filter coefficients */
++ if (ratio->vert > MID_RESIZE_VALUE)
++ v_filter = &filter_coefs.v_filter_coef_7tap[0];
++ else
++ v_filter = &filter_coefs.v_filter_coef_4tap[0];
++
++ ispresizer_set_filters(res, h_filter, v_filter);
++}
++
++/*
++ * ispresizer_set_dst_size - Setup the output height and width
++ * @isp_res: Device context.
++ * @width: Output width.
++ * @height: Output height.
++ *
++ * Width :
++ * The value must be EVEN.
++ *
++ * Height:
++ * The number of bytes written to SDRAM must be
++ * a multiple of 16-bytes if the vertical resizing factor
++ * is greater than 1x (upsizing)
++ */
++static void ispresizer_set_output_size(struct isp_res_device *res,
++ u32 width, u32 height)
++{
++ struct isp_device *isp = to_isp_device(res);
++ u32 rgval = 0;
++
++ dev_dbg(isp->dev, "Output size[w/h]: %dx%d\n", width, height);
++ rgval = (width << ISPRSZ_OUT_SIZE_HORZ_SHIFT)
++ & ISPRSZ_OUT_SIZE_HORZ_MASK;
++ rgval |= (height << ISPRSZ_OUT_SIZE_VERT_SHIFT)
++ & ISPRSZ_OUT_SIZE_VERT_MASK;
++ isp_reg_writel(isp, rgval, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_OUT_SIZE);
++}
++
++/*
++ * ispresizer_set_output_offset - Setup memory offset for the output lines.
++ * @isp_res: Device context.
++ * @offset: Memory offset.
++ *
++ * The 5 LSBs are forced to be zeros by the hardware to align on a 32-byte
++ * boundary; the 5 LSBs are read-only. For optimal use of SDRAM bandwidth,
++ * the SDRAM line offset must be set on a 256-byte boundary
++ */
++static void ispresizer_set_output_offset(struct isp_res_device *res,
++ u32 offset)
++{
++ struct isp_device *isp = to_isp_device(res);
++
++ isp_reg_writel(isp, offset, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_SDR_OUTOFF);
++}
++
++/*
++ * ispresizer_set_start - Setup vertical and horizontal start position
++ * @isp_res: Device context.
++ * @left: Horizontal start position.
++ * @top: Vertical start position.
++ *
++ * Vertical start line:
++ * This field makes sense only when the resizer obtains its input
++ * from the preview engine/CCDC
++ *
++ * Horizontal start pixel:
++ * Pixels are coded on 16 bits for YUV and 8 bits for color separate data.
++ * When the resizer gets its input from SDRAM, this field must be set
++ * to <= 15 for YUV 16-bit data and <= 31 for 8-bit color separate data
++ */
++static void ispresizer_set_start(struct isp_res_device *res, u32 left,
++ u32 top)
++{
++ struct isp_device *isp = to_isp_device(res);
++ u32 rgval = 0;
++
++ rgval = (left << ISPRSZ_IN_START_HORZ_ST_SHIFT)
++ & ISPRSZ_IN_START_HORZ_ST_MASK;
++ rgval |= (top << ISPRSZ_IN_START_VERT_ST_SHIFT)
++ & ISPRSZ_IN_START_VERT_ST_MASK;
++
++ isp_reg_writel(isp, rgval, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_IN_START);
++}
++
++/*
++ * ispresizer_set_input_size - Setup the input size
++ * @isp_res: Device context.
++ * @width: The range is 0 to 4095 pixels
++ * @height: The range is 0 to 4095 lines
++ */
++static void ispresizer_set_input_size(struct isp_res_device *res,
++ u32 width, u32 height)
++{
++ struct isp_device *isp = to_isp_device(res);
++ u32 rgval = 0;
++
++ dev_dbg(isp->dev, "Input size[w/h]: %dx%d\t", width, height);
++
++ rgval = (width << ISPRSZ_IN_SIZE_HORZ_SHIFT)
++ & ISPRSZ_IN_SIZE_HORZ_MASK;
++ rgval |= (height << ISPRSZ_IN_SIZE_VERT_SHIFT)
++ & ISPRSZ_IN_SIZE_VERT_MASK;
++
++ isp_reg_writel(isp, rgval, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_IN_SIZE);
++}
++
++/*
++ * ispresizer_set_src_offs - Setup the memory offset for the input lines
++ * @isp_res: Device context.
++ * @offset: Memory offset.
++ *
++ * The 5 LSBs are forced to be zeros by the hardware to align on a 32-byte
++ * boundary; the 5 LSBs are read-only. This field must be programmed to be
++ * 0x0 if the resizer input is from preview engine/CCDC.
++ */
++static void ispresizer_set_input_offset(struct isp_res_device *res,
++ u32 offset)
++{
++ struct isp_device *isp = to_isp_device(res);
++
++ isp_reg_writel(isp, offset, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_SDR_INOFF);
++}
++
++/*
++ * ispresizer_set_intype - Input type select
++ * @isp_res: Device context.
++ * @type: Pixel format type.
++ */
++static void ispresizer_set_intype(struct isp_res_device *res,
++ enum resizer_colors_type type)
++{
++ struct isp_device *isp = to_isp_device(res);
++
++ if (type == RSZ_COLOR8)
++ isp_reg_or(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT,
++ ISPRSZ_CNT_INPTYP);
++ else
++ isp_reg_and(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT,
++ ~ISPRSZ_CNT_INPTYP);
++}
++
++/*
++ * __ispresizer_set_inaddr - Helper function for set input address
++ * @res : pointer to resizer private data structure
++ * @addr: input address
++ * return none
++ */
++static void __ispresizer_set_inaddr(struct isp_res_device *res, u32 addr)
++{
++ struct isp_device *isp = to_isp_device(res);
++
++ isp_reg_writel(isp, addr, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_SDR_INADD);
++}
++
++/*
++ * The data rate at the horizontal resizer output must not exceed half the
++ * functional clock or 100 MP/s, whichever is lower. According to the TRM
++ * there's no similar requirement for the vertical resizer output. However
++ * experience showed that vertical upscaling by 4 leads to SBL overflows (with
++ * data rates at the resizer output exceeding 300 MP/s). Limiting the resizer
++ * output data rate to the functional clock or 200 MP/s, whichever is lower,
++ * seems to get rid of SBL overflows.
++ *
++ * The maximum data rate at the output of the horizontal resizer can thus be
++ * computed with
++ *
++ * max intermediate rate <= L3 clock * input height / output height
++ * max intermediate rate <= L3 clock / 2
++ *
++ * The maximum data rate at the resizer input is then
++ *
++ * max input rate <= max intermediate rate * input width / output width
++ *
++ * where the input width and height are the resizer input crop rectangle size.
++ * The TRM doesn't clearly explain if that's a maximum instant data rate or a
++ * maximum average data rate.
++ */
++void ispresizer_max_rate(struct isp_res_device *res, unsigned int *max_rate)
++{
++ struct isp_pipeline *pipe = to_isp_pipeline(&res->subdev.entity);
++ const struct v4l2_mbus_framefmt *ofmt = &res->formats[RESZ_PAD_SOURCE];
++ unsigned long limit = min(pipe->l3_ick, 200000000UL);
++ unsigned long clock;
++
++ clock = div_u64((u64)limit * res->crop.height, ofmt->height);
++ clock = min(clock, limit / 2);
++ *max_rate = div_u64((u64)clock * res->crop.width, ofmt->width);
++}
++
++/*
++ * When the resizer processes images from memory, the driver must slow down read
++ * requests on the input to at least comply with the internal data rate
++ * requirements. If the application real-time requirements can cope with slower
++ * processing, the resizer can be slowed down even more to put less pressure on
++ * the overall system.
++ *
++ * When the resizer processes images on the fly (either from the CCDC or the
++ * preview module), the same data rate requirements apply but they can't be
++ * enforced at the resizer level. The image input module (sensor, CCP2 or
++ * preview module) must not provide image data faster than the resizer can
++ * process.
++ *
++ * For live image pipelines, the data rate is set by the frame format, size and
++ * rate. The sensor output frame rate must not exceed the maximum resizer data
++ * rate.
++ *
++ * The resizer slows down read requests by inserting wait cycles in the SBL
++ * requests. The maximum number of 256-byte requests per second can be computed
++ * as (the data rate is multiplied by 2 to convert from pixels per second to
++ * bytes per second)
++ *
++ * request per second = data rate * 2 / 256
++ * cycles per request = cycles per second / requests per second
++ *
++ * The number of cycles per second is controlled by the L3 clock, leading to
++ *
++ * cycles per request = L3 frequency / 2 * 256 / data rate
++ */
++static void ispresizer_adjust_bandwidth(struct isp_res_device *res)
++{
++ struct isp_pipeline *pipe = to_isp_pipeline(&res->subdev.entity);
++ struct isp_device *isp = to_isp_device(res);
++ unsigned long l3_ick = pipe->l3_ick;
++ struct v4l2_fract *timeperframe;
++ unsigned int cycles_per_frame;
++ unsigned int requests_per_frame;
++ unsigned int cycles_per_request;
++ unsigned int granularity;
++ unsigned int minimum;
++ unsigned int maximum;
++ unsigned int value;
++
++ if (res->input != RESIZER_INPUT_MEMORY) {
++ isp_reg_and(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_SDR_REQ_EXP,
++ ~ISPSBL_SDR_REQ_RSZ_EXP_MASK);
++ return;
++ }
++
++ switch (isp->revision) {
++ case ISP_REVISION_1_0:
++ case ISP_REVISION_2_0:
++ default:
++ granularity = 1024;
++ break;
++
++ case ISP_REVISION_15_0:
++ granularity = 32;
++ break;
++ }
++
++ /* Compute the minimum number of cycles per request, based on the
++ * pipeline maximum data rate. This is an absolute lower bound if we
++ * don't want SBL overflows, so round the value up.
++ */
++ cycles_per_request = div_u64((u64)l3_ick / 2 * 256 + pipe->max_rate - 1,
++ pipe->max_rate);
++ minimum = DIV_ROUND_UP(cycles_per_request, granularity);
++
++ /* Compute the maximum number of cycles per request, based on the
++ * requested frame rate. This is a soft upper bound to achieve a frame
++ * rate equal or higher than the requested value, so round the value
++ * down.
++ */
++ timeperframe = &pipe->max_timeperframe;
++
++ requests_per_frame = DIV_ROUND_UP(res->crop.width * 2, 256)
++ * res->crop.height;
++ cycles_per_frame = div_u64((u64)l3_ick * timeperframe->numerator,
++ timeperframe->denominator);
++ cycles_per_request = cycles_per_frame / requests_per_frame;
++
++ maximum = cycles_per_request / granularity;
++
++ value = max(minimum, maximum);
++
++ dev_dbg(isp->dev, "%s: cycles per request = %u\n", __func__, value);
++ isp_reg_and_or(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_SDR_REQ_EXP,
++ ~ISPSBL_SDR_REQ_RSZ_EXP_MASK,
++ value << ISPSBL_SDR_REQ_RSZ_EXP_SHIFT);
++}
++
++/*
++ * ispresizer_busy - Checks if ISP resizer is busy.
++ *
++ * Returns busy field from ISPRSZ_PCR register.
++ */
++int ispresizer_busy(struct isp_res_device *res)
++{
++ struct isp_device *isp = to_isp_device(res);
++
++ return isp_reg_readl(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_PCR) &
++ ISPRSZ_PCR_BUSY;
++}
++
++/*
++ * ispresizer_set_inaddr - Sets the memory address of the input frame.
++ * @addr: 32bit memory address aligned on 32byte boundary.
++ */
++static void ispresizer_set_inaddr(struct isp_res_device *res, u32 addr)
++{
++ res->addr_base = addr;
++
++ /* This will handle crop settings in stream off state */
++ if (res->crop_offset)
++ addr += res->crop_offset & ~0x1f;
++
++ __ispresizer_set_inaddr(res, addr);
++}
++
++/*
++ * Configures the memory address to which the output frame is written.
++ * @addr: 32bit memory address aligned on 32byte boundary.
++ * Note: For SBL efficiency reasons the address should be on a 256-byte
++ * boundary.
++ */
++static void ispresizer_set_outaddr(struct isp_res_device *res, u32 addr)
++{
++ struct isp_device *isp = to_isp_device(res);
++
++ /*
++ * Set output address. This needs to be in its own function
++ * because it changes often.
++ */
++ isp_reg_writel(isp, addr << ISPRSZ_SDR_OUTADD_ADDR_SHIFT,
++ OMAP3_ISP_IOMEM_RESZ, ISPRSZ_SDR_OUTADD);
++}
++
++/*
++ * ispresizer_save_context - Saves the values of the resizer module registers.
++ */
++void ispresizer_save_context(struct isp_device *isp)
++{
++ isp_save_context(isp, isprsz_reg_list);
++}
++
++/*
++ * ispresizer_restore_context - Restores resizer module register values.
++ */
++void ispresizer_restore_context(struct isp_device *isp)
++{
++ isp_restore_context(isp, isprsz_reg_list);
++}
++
++/*
++ * ispresizer_print_status - Prints the values of the resizer module registers.
++ */
++#define RSZ_PRINT_REGISTER(isp, name)\
++ dev_dbg(isp->dev, "###RSZ " #name "=0x%08x\n", \
++ isp_reg_readl(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_##name))
++
++static void ispresizer_print_status(struct isp_res_device *res)
++{
++ struct isp_device *isp = to_isp_device(res);
++
++ dev_dbg(isp->dev, "-------------Resizer Register dump----------\n");
++
++ RSZ_PRINT_REGISTER(isp, PCR);
++ RSZ_PRINT_REGISTER(isp, CNT);
++ RSZ_PRINT_REGISTER(isp, OUT_SIZE);
++ RSZ_PRINT_REGISTER(isp, IN_START);
++ RSZ_PRINT_REGISTER(isp, IN_SIZE);
++ RSZ_PRINT_REGISTER(isp, SDR_INADD);
++ RSZ_PRINT_REGISTER(isp, SDR_INOFF);
++ RSZ_PRINT_REGISTER(isp, SDR_OUTADD);
++ RSZ_PRINT_REGISTER(isp, SDR_OUTOFF);
++ RSZ_PRINT_REGISTER(isp, YENH);
++
++ dev_dbg(isp->dev, "--------------------------------------------\n");
++}
++
++/*
++ * ispresizer_calc_ratios - Helper function for calculate resizer ratios
++ * @res: pointer to resizer private data structure
++ * @input: input frame size
++ * @output: output frame size
++ * @ratio : return calculated ratios
++ * return none
++ *
++ * The resizer uses a polyphase sample rate converter. The upsampling filter
++ * has a fixed number of phases that depend on the resizing ratio. As the ratio
++ * computation depends on the number of phases, we need to compute a first
++ * approximation and then refine it.
++ *
++ * The input/output/ratio relationship is given by the OMAP34xx TRM:
++ *
++ * - 8-phase, 4-tap mode (RSZ = 64 ~ 512)
++ * iw = (32 * sph + (ow - 1) * hrsz + 16) >> 8 + 7
++ * ih = (32 * spv + (oh - 1) * vrsz + 16) >> 8 + 4
++ * - 4-phase, 7-tap mode (RSZ = 513 ~ 1024)
++ * iw = (64 * sph + (ow - 1) * hrsz + 32) >> 8 + 7
++ * ih = (64 * spv + (oh - 1) * vrsz + 32) >> 8 + 7
++ *
++ * iw and ih are the input width and height after cropping. Those equations need
++ * to be satisfied exactly for the resizer to work correctly.
++ *
++ * Reverting the equations, we can compute the resizing ratios with
++ *
++ * - 8-phase, 4-tap mode
++ * hrsz = ((iw - 7) * 256 - 16 - 32 * sph) / (ow - 1)
++ * vrsz = ((ih - 4) * 256 - 16 - 32 * spv) / (oh - 1)
++ * - 4-phase, 7-tap mode
++ * hrsz = ((iw - 7) * 256 - 32 - 64 * sph) / (ow - 1)
++ * vrsz = ((ih - 7) * 256 - 32 - 64 * spv) / (oh - 1)
++ *
++ * The ratios are integer values, and must be rounded down to ensure that the
++ * cropped input size is not bigger than the uncropped input size. As the ratio
++ * in 7-tap mode is always smaller than the ratio in 4-tap mode, we can use the
++ * 7-tap mode equations to compute a ratio approximation.
++ *
++ * We first clamp the output size according to the hardware capabilitie to avoid
++ * auto-cropping the input more than required to satisfy the TRM equations. The
++ * worst case is 7-tap mode, which will lead to the smallest output width. We
++ * can thus compute the minimum and maximum output sizes with
++ *
++ * - 4-phase, 7-tap mode
++ * min ow = ((iw - 7) * 256 - 32 - 64 * sph) / 1024 + 1
++ * min oh = ((ih - 7) * 256 - 32 - 64 * spv) / 1024 + 1
++ * max ow = ((iw - 7) * 256 - 32 - 64 * sph) / 64 + 1
++ * max oh = ((ih - 7) * 256 - 32 - 64 * spv) / 64 + 1
++ *
++ * We then compute and clamp the ratios (x1/4 ~ x4). Clamping the output size to
++ * the maximum value guarantees that the ratio value will never be smaller than
++ * the minimum, but it could still slightly exceed the maximum. Clamping the
++ * ratio will thus result in a resizing factor slightly larger than the
++ * requested value.
++ *
++ * To accomodate that, and make sure the TRM equations are satisfied exactly, we
++ * compute the input crop rectangle as the last step.
++ *
++ * As if the situation wasn't complex enough, the maximum output width depends
++ * on the vertical resizing ratio. Fortunately, the output height doesn't
++ * depend on the horizontal resizing ratio. We can then start by computing the
++ * output height and the vertical ratio, and then move to computing the output
++ * width and the horizontal ratio.
++ */
++static void ispresizer_calc_ratios(struct isp_res_device *res,
++ struct v4l2_rect *input,
++ struct v4l2_mbus_framefmt *output,
++ struct resizer_ratio *ratio)
++{
++ struct isp_device *isp = to_isp_device(res);
++ const unsigned int spv = DEFAULT_PHASE;
++ const unsigned int sph = DEFAULT_PHASE;
++ unsigned int upscaled_width;
++ unsigned int upscaled_height;
++ unsigned int min_width;
++ unsigned int min_height;
++ unsigned int max_width;
++ unsigned int max_height;
++ unsigned int width_alignment;
++
++ /*
++ * Clamp the output height based on the hardware capabilities and
++ * compute the vertical resizing ratio.
++ */
++ min_height = ((input->height - 7) * 256 - 32 - 64 * spv) / 1024 + 1;
++ min_height = max_t(unsigned int, min_height, MIN_OUT_HEIGHT);
++ max_height = ((input->height - 7) * 256 - 32 - 64 * spv) / 64 + 1;
++ max_height = min_t(unsigned int, max_height, MAX_OUT_HEIGHT);
++ output->height = clamp(output->height, min_height, max_height);
++
++ ratio->vert = ((input->height - 7) * 256 - 32 - 64 * spv)
++ / (output->height - 1);
++ ratio->vert = clamp_t(unsigned int, ratio->vert,
++ MIN_RESIZE_VALUE, MAX_RESIZE_VALUE);
++
++ if (ratio->vert <= MID_RESIZE_VALUE) {
++ upscaled_height = (output->height - 1) * ratio->vert
++ + 32 * spv + 16;
++ input->height = (upscaled_height >> 8) + 4;
++ } else {
++ upscaled_height = (output->height - 1) * ratio->vert
++ + 64 * spv + 32;
++ input->height = (upscaled_height >> 8) + 7;
++ }
++
++ /*
++ * Compute the minimum and maximum output widths based on the hardware
++ * capabilities. The maximum depends on the vertical resizing ratio.
++ */
++ min_width = ((input->width - 7) * 256 - 32 - 64 * sph) / 1024 + 1;
++ min_width = max_t(unsigned int, min_width, MIN_OUT_WIDTH);
++
++ if (ratio->vert <= MID_RESIZE_VALUE) {
++ switch (isp->revision) {
++ case ISP_REVISION_1_0:
++ max_width = MAX_4TAP_OUT_WIDTH_ES1;
++ break;
++
++ case ISP_REVISION_2_0:
++ default:
++ max_width = MAX_4TAP_OUT_WIDTH_ES2;
++ break;
++
++ case ISP_REVISION_15_0:
++ max_width = MAX_4TAP_OUT_WIDTH_3630;
++ break;
++ }
++ } else {
++ switch (isp->revision) {
++ case ISP_REVISION_1_0:
++ max_width = MAX_7TAP_OUT_WIDTH_ES1;
++ break;
++
++ case ISP_REVISION_2_0:
++ default:
++ max_width = MAX_7TAP_OUT_WIDTH_ES2;
++ break;
++
++ case ISP_REVISION_15_0:
++ max_width = MAX_7TAP_OUT_WIDTH_3630;
++ break;
++ }
++ }
++ max_width = min(((input->width - 7) * 256 - 32 - 64 * sph) / 64 + 1,
++ max_width);
++
++ /*
++ * The output width must be even, and must be a multiple of 16 bytes
++ * when upscaling vertically. Clamp the output width to the valid range.
++ * Take the alignment into account (the maximum width in 7-tap mode on
++ * ES2 isn't a multiple of 8) and align the result up to make sure it
++ * won't be smaller than the minimum.
++ */
++ width_alignment = ratio->vert < 256 ? 8 : 2;
++ output->width = clamp(output->width, min_width,
++ max_width & ~(width_alignment - 1));
++ output->width = ALIGN(output->width, width_alignment);
++
++ ratio->horz = ((input->width - 7) * 256 - 32 - 64 * sph)
++ / (output->width - 1);
++ ratio->horz = clamp_t(unsigned int, ratio->horz,
++ MIN_RESIZE_VALUE, MAX_RESIZE_VALUE);
++
++ if (ratio->horz <= MID_RESIZE_VALUE) {
++ upscaled_width = (output->width - 1) * ratio->horz
++ + 32 * spv + 16;
++ input->width = (upscaled_width >> 8) + 7;
++ } else {
++ upscaled_width = (output->width - 1) * ratio->horz
++ + 64 * spv + 32;
++ input->width = (upscaled_width >> 8) + 7;
++ }
++}
++
++/*
++ * ispresizer_set_crop_params - Setup hardware with cropping parameters
++ * @res : ispresizer private structure
++ * @crop_rect : current crop rectangle
++ * @ratio : resizer ratios
++ * return none
++ */
++static void ispresizer_set_crop_params(struct isp_res_device *res,
++ const struct v4l2_mbus_framefmt *input,
++ const struct v4l2_mbus_framefmt *output)
++{
++ ispresizer_set_ratio(res, &res->ratio);
++
++ /* Set chrominance horizontal algorithm */
++ if (res->ratio.horz >= RESIZE_DIVISOR)
++ ispresizer_set_bilinear(res, RSZ_THE_SAME);
++ else
++ ispresizer_set_bilinear(res, RSZ_BILINEAR);
++
++ ispresizer_adjust_bandwidth(res);
++
++ if (res->input == RESIZER_INPUT_MEMORY) {
++ /* Calculate additional offset for crop */
++ res->crop_offset = (res->crop.top * input->width +
++ res->crop.left) * 2;
++ /*
++ * Write lowest 4 bits of horizontal pixel offset (in pixels),
++ * vertical start must be 0.
++ */
++ ispresizer_set_start(res, (res->crop_offset / 2) & 0xf, 0);
++
++ /*
++ * Set start (read) address for cropping, in bytes.
++ * Lowest 5 bits must be zero.
++ */
++ __ispresizer_set_inaddr(res,
++ res->addr_base + (res->crop_offset & ~0x1f));
++ } else {
++ /*
++ * Set vertical start line and horizontal starting pixel.
++ * If the input is from CCDC/PREV, horizontal start field is
++ * in bytes (twice number of pixels).
++ */
++ ispresizer_set_start(res, res->crop.left * 2, res->crop.top);
++ /* Input address and offset must be 0 for preview/ccdc input */
++ __ispresizer_set_inaddr(res, 0);
++ ispresizer_set_input_offset(res, 0);
++ }
++
++ /* Set the input size */
++ ispresizer_set_input_size(res, res->crop.width, res->crop.height);
++}
++
++static void resizer_configure(struct isp_res_device *res)
++{
++ struct v4l2_mbus_framefmt *informat, *outformat;
++ struct resizer_luma_yenh luma = {0, 0, 0, 0};
++
++ ispresizer_set_source(res, res->input);
++
++ informat = &res->formats[RESZ_PAD_SINK];
++ outformat = &res->formats[RESZ_PAD_SOURCE];
++
++ /* RESZ_PAD_SINK */
++ if (res->input == RESIZER_INPUT_VP)
++ ispresizer_set_input_offset(res, 0);
++ else
++ ispresizer_set_input_offset(res, informat->width * 2);
++
++ /* YUV422 interleaved, default phase, no luma enhancement */
++ ispresizer_set_intype(res, RSZ_YUV422);
++ ispresizer_set_ycpos(res, informat->code);
++ ispresizer_set_phase(res, DEFAULT_PHASE, DEFAULT_PHASE);
++ ispresizer_set_luma(res, &luma);
++
++ /* RESZ_PAD_SOURCE */
++ ispresizer_set_output_offset(res, ALIGN(outformat->width * 2, 32));
++ ispresizer_set_output_size(res, outformat->width, outformat->height);
++
++ ispresizer_set_crop_params(res, informat, outformat);
++}
++
++/* -----------------------------------------------------------------------------
++ * Interrupt handling
++ */
++
++static void resizer_enable_oneshot(struct isp_res_device *res)
++{
++ struct isp_device *isp = to_isp_device(res);
++
++ isp_reg_or(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_PCR,
++ ISPRSZ_PCR_ENABLE | ISPRSZ_PCR_ONESHOT);
++}
++
++void ispresizer_isr_frame_sync(struct isp_res_device *res)
++{
++ if (res->underrun && res->state == ISP_PIPELINE_STREAM_CONTINUOUS) {
++ resizer_enable_oneshot(res);
++ res->underrun = 0;
++ }
++}
++
++static void ispresizer_isr_buffer(struct isp_res_device *res)
++{
++ struct isp_pipeline *pipe = to_isp_pipeline(&res->subdev.entity);
++ struct isp_buffer *buffer;
++ int restart = 0;
++
++ if (res->state == ISP_PIPELINE_STREAM_STOPPED)
++ return;
++
++ /* Complete the output buffer and, if reading from memory, the input
++ * buffer.
++ */
++ buffer = isp_video_buffer_next(&res->video_out, res->error);
++ if (buffer != NULL) {
++ ispresizer_set_outaddr(res, buffer->isp_addr);
++ restart = 1;
++ }
++
++ pipe->state |= ISP_PIPELINE_IDLE_OUTPUT;
++
++ if (res->input == RESIZER_INPUT_MEMORY) {
++ buffer = isp_video_buffer_next(&res->video_in, 0);
++ if (buffer != NULL)
++ ispresizer_set_inaddr(res, buffer->isp_addr);
++ pipe->state |= ISP_PIPELINE_IDLE_INPUT;
++ }
++
++ if (res->state == ISP_PIPELINE_STREAM_SINGLESHOT) {
++ if (isp_pipeline_ready(pipe))
++ isp_pipeline_set_stream(pipe,
++ ISP_PIPELINE_STREAM_SINGLESHOT);
++ } else {
++ /* If an underrun occurs, the video queue operation handler will
++ * restart the resizer. Otherwise restart it immediately.
++ */
++ if (restart)
++ resizer_enable_oneshot(res);
++ }
++
++ res->error = 0;
++}
++
++/*
++ * ispresizer_isr - ISP resizer interrupt handler
++ *
++ * Manage the resizer video buffers and configure shadowed and busy-locked
++ * registers.
++ */
++void ispresizer_isr(struct isp_res_device *res)
++{
++ struct v4l2_mbus_framefmt *informat, *outformat;
++
++ if (res->applycrop) {
++ outformat = __resizer_get_format(res, NULL, RESZ_PAD_SOURCE,
++ V4L2_SUBDEV_FORMAT_ACTIVE);
++ informat = __resizer_get_format(res, NULL, RESZ_PAD_SINK,
++ V4L2_SUBDEV_FORMAT_ACTIVE);
++ ispresizer_set_crop_params(res, informat, outformat);
++ res->applycrop = 0;
++ }
++
++ ispresizer_isr_buffer(res);
++}
++
++/* -----------------------------------------------------------------------------
++ * ISP video operations
++ */
++
++static int resizer_video_queue(struct isp_video *video,
++ struct isp_buffer *buffer)
++{
++ struct isp_res_device *res = &video->isp->isp_res;
++
++ if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
++ ispresizer_set_inaddr(res, buffer->isp_addr);
++
++ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
++ ispresizer_set_outaddr(res, buffer->isp_addr);
++
++ /* We now have a buffer queued on the output. Despite what the
++ * TRM says, the resizer can't be restarted immediately.
++ * Enabling it in one shot mode in the middle of a frame (or at
++ * least asynchronously to the frame) results in the output
++ * being shifted randomly left/right and up/down, as if the
++ * hardware didn't synchronize itself to the beginning of the
++ * frame correctly.
++ *
++ * Restart the resizer on the next sync interrupt if running in
++ * continuous mode or when starting the stream.
++ */
++ res->underrun = 1;
++ }
++
++ return 0;
++}
++
++static const struct isp_video_operations resizer_video_ops = {
++ .queue = resizer_video_queue,
++};
++
++/* -----------------------------------------------------------------------------
++ * V4L2 subdev operations
++ */
++
++/*
++ * resizer_s_power - Handle set power subdev method
++ * @sd: pointer to v4l2 subdev structure
++ * @on: power on/off
++ * return -EINVAL or zero on success
++ */
++static int resizer_s_power(struct v4l2_subdev *sd, int on)
++{
++ struct isp_res_device *res = v4l2_get_subdevdata(sd);
++ struct isp_device *isp = to_isp_device(res);
++
++ if (on) {
++ if (!isp_get(isp))
++ return -EBUSY;
++ } else {
++ isp_put(isp);
++ }
++
++ return 0;
++}
++
++/*
++ * resizer_set_stream - Enable/Disable streaming on resizer subdev
++ * @sd: ISP resizer V4L2 subdev
++ * @enable: 1 == Enable, 0 == Disable
++ *
++ * The resizer hardware can't be enabled without a memory buffer to write to.
++ * As the s_stream operation is called in response to a STREAMON call without
++ * any buffer queued yet, just update the state field and return immediately.
++ * The resizer will be enabled in resizer_video_queue().
++ */
++static int resizer_set_stream(struct v4l2_subdev *sd, int enable)
++{
++ struct isp_res_device *res = v4l2_get_subdevdata(sd);
++ struct isp_device *isp = to_isp_device(res);
++
++ if (enable != ISP_PIPELINE_STREAM_STOPPED &&
++ res->state == ISP_PIPELINE_STREAM_STOPPED) {
++ isp_reg_or(isp, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL,
++ ISPCTRL_RSZ_CLK_EN);
++ resizer_configure(res);
++ ispresizer_print_status(res);
++ }
++
++ switch (enable) {
++ case ISP_PIPELINE_STREAM_CONTINUOUS:
++ isp_sbl_enable(isp, OMAP3_ISP_SBL_RESIZER_WRITE);
++ if (res->underrun) {
++ resizer_enable_oneshot(res);
++ res->underrun = 0;
++ }
++ break;
++
++ case ISP_PIPELINE_STREAM_SINGLESHOT:
++ if (res->input == RESIZER_INPUT_MEMORY)
++ isp_sbl_enable(isp, OMAP3_ISP_SBL_RESIZER_READ);
++ isp_sbl_enable(isp, OMAP3_ISP_SBL_RESIZER_WRITE);
++
++ resizer_enable_oneshot(res);
++ break;
++
++ case ISP_PIPELINE_STREAM_STOPPED:
++ isp_sbl_disable(isp, OMAP3_ISP_SBL_RESIZER_READ |
++ OMAP3_ISP_SBL_RESIZER_WRITE);
++ isp_reg_and(isp, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL,
++ ~ISPCTRL_RSZ_CLK_EN);
++ res->underrun = 0;
++ break;
++ }
++
++ res->state = enable;
++ return 0;
++}
++
++/*
++ * resizer_g_crop - handle get crop subdev operation
++ * @sd : pointer to v4l2 subdev structure
++ * @pad : subdev pad
++ * @crop : pointer to crop structure
++ * @which : active or probe format
++ * return zero
++ */
++static int resizer_g_crop(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
++ struct v4l2_subdev_pad_crop *crop)
++{
++ struct isp_res_device *res = v4l2_get_subdevdata(sd);
++
++ /* Only sink pad has crop capability */
++ if (crop->pad != RESZ_PAD_SINK)
++ return -EINVAL;
++
++ crop->rect = *__resizer_get_crop(res, fh, crop->which);
++ return 0;
++}
++
++/*
++ * resizer_try_crop - mangles crop parameters.
++ */
++static void resizer_try_crop(struct v4l2_mbus_framefmt *format_sink,
++ struct v4l2_mbus_framefmt *format_source,
++ struct v4l2_rect *crop_rect,
++ enum isp_pipeline_stream_state state)
++{
++ /* Is streaming on? Crop mangling is handled differently */
++ if (state != ISP_PIPELINE_STREAM_STOPPED) {
++ /*
++ * Crop rectangle is constrained to the output size so
++ * that zoom ratio cannot exceed +/-4.0.
++ */
++ int minwidth = (format_source->width + 3) / 4;
++ int minheight = (format_source->height + 3) / 4;
++ int maxwidth = format_source->width * 4;
++ int maxheight = format_source->height * 4;
++
++ crop_rect->width = clamp_t(u32, crop_rect->width,
++ minwidth, maxwidth);
++ crop_rect->height = clamp_t(u32, crop_rect->height,
++ minheight, maxheight);
++ } else {
++ /* Setting crop resets the output size to zoom=1.0 */
++ format_source->width = crop_rect->width;
++ format_source->height = crop_rect->height;
++ }
++
++ /* Crop can not go beyond of the input rectangle */
++ crop_rect->left = clamp_t(u32, crop_rect->left, 0,
++ format_sink->width - MIN_IN_WIDTH);
++ crop_rect->width = clamp_t(u32, crop_rect->width, MIN_IN_WIDTH,
++ format_sink->width - crop_rect->left);
++ crop_rect->top = clamp_t(u32, crop_rect->top, 0,
++ format_sink->height - MIN_IN_HEIGHT);
++ crop_rect->height = clamp_t(u32, crop_rect->height, MIN_IN_HEIGHT,
++ format_sink->height - crop_rect->top);
++}
++
++/*
++ * resizer_s_crop - handle set crop subdev operation
++ * @sd : pointer to v4l2 subdev structure
++ * @pad : subdev pad
++ * @crop : pointer to crop structure
++ * @which : active or probe format
++ * return -EINVAL or zero when succeed
++ */
++static int resizer_s_crop(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
++ struct v4l2_subdev_pad_crop *crop)
++{
++ struct isp_res_device *res = v4l2_get_subdevdata(sd);
++ struct isp_device *isp = to_isp_device(res);
++ struct v4l2_rect *crop_rect = &crop->rect;
++ struct v4l2_mbus_framefmt *format_sink, *format_source;
++
++ /* Only sink pad has crop capability */
++ if (crop->pad != RESZ_PAD_SINK)
++ return -EINVAL;
++
++ format_sink = __resizer_get_format(res, fh, RESZ_PAD_SINK,
++ crop->which);
++ format_source = __resizer_get_format(res, fh, RESZ_PAD_SOURCE,
++ crop->which);
++
++ dev_dbg(isp->dev, "%s: L=%d,T=%d,W=%d,H=%d,which=%d\n", __func__,
++ crop_rect->left, crop_rect->top, crop_rect->width,
++ crop_rect->height, crop->which);
++
++ dev_dbg(isp->dev, "%s: input=%dx%d, output=%dx%d\n", __func__,
++ format_sink->width, format_sink->height,
++ format_source->width, format_source->height);
++
++ resizer_try_crop(format_sink, format_source, crop_rect, res->state);
++
++ if (crop->which == V4L2_SUBDEV_FORMAT_PROBE) {
++ *v4l2_subdev_get_probe_crop(fh, RESZ_PAD_SINK) = *crop_rect;
++ return 0;
++ }
++
++ ispresizer_calc_ratios(res, crop_rect, format_source, &res->ratio);
++
++ res->crop = *crop_rect;
++
++ /*
++ * s_crop can be called while streaming is on. In this case
++ * the crop values will be set in the next IRQ.
++ */
++ if (res->state != ISP_PIPELINE_STREAM_STOPPED)
++ res->applycrop = 1;
++
++ return 0;
++}
++
++/* resizer pixel formats */
++const static unsigned int resz_fmts[] = {
++ V4L2_MBUS_FMT_UYVY16_1X16,
++ V4L2_MBUS_FMT_YUYV16_1X16,
++};
++
++static unsigned int resizer_max_in_width(struct isp_res_device *res)
++{
++ struct isp_device *isp = to_isp_device(res);
++
++ if (res->input == RESIZER_INPUT_MEMORY) {
++ return MAX_IN_WIDTH_MEMORY_MODE;
++ } else {
++ if (isp->revision == ISP_REVISION_1_0)
++ return MAX_IN_WIDTH_ONTHEFLY_MODE_ES1;
++ else
++ return MAX_IN_WIDTH_ONTHEFLY_MODE_ES2;
++ }
++}
++
++/*
++ * resizer_try_format - Handle try format by pad subdev method
++ * @res : ISP resizer device
++ * @fh : V4L2 subdev file handle
++ * @pad : pad num
++ * @fmt : pointer to v4l2 format structure
++ * @which : wanted subdev format
++ */
++static void resizer_try_format(struct isp_res_device *res,
++ struct v4l2_subdev_fh *fh, unsigned int pad,
++ struct v4l2_mbus_framefmt *fmt,
++ enum v4l2_subdev_format which)
++{
++ struct v4l2_mbus_framefmt *format;
++ struct v4l2_rect crop_rect;
++ struct resizer_ratio ratio;
++
++ switch (pad) {
++ case RESZ_PAD_SINK:
++ if (fmt->code != V4L2_MBUS_FMT_YUYV16_1X16 &&
++ fmt->code != V4L2_MBUS_FMT_UYVY16_1X16)
++ fmt->code = V4L2_MBUS_FMT_YUYV16_1X16;
++
++ fmt->width = clamp_t(u32, fmt->width, MIN_IN_WIDTH,
++ resizer_max_in_width(res));
++ fmt->height = clamp_t(u32, fmt->height, MIN_IN_HEIGHT,
++ MAX_IN_HEIGHT);
++ break;
++
++ case RESZ_PAD_SOURCE:
++ format = __resizer_get_format(res, fh, RESZ_PAD_SINK, which);
++ fmt->code = format->code;
++
++ crop_rect = *__resizer_get_crop(res, fh, which);
++ ispresizer_calc_ratios(res, &crop_rect, fmt, &ratio);
++ break;
++ }
++
++ fmt->colorspace = V4L2_COLORSPACE_JPEG;
++ fmt->field = V4L2_FIELD_NONE;
++}
++
++/*
++ * resizer_enum_mbus_code - Handle pixel format enumeration
++ * @sd : pointer to v4l2 subdev structure
++ * @fh : V4L2 subdev file handle
++ * @code : pointer to v4l2_subdev_pad_mbus_code_enum structure
++ * return -EINVAL or zero on success
++ */
++static int resizer_enum_mbus_code(struct v4l2_subdev *sd,
++ struct v4l2_subdev_fh *fh,
++ struct v4l2_subdev_pad_mbus_code_enum *code)
++{
++ struct isp_res_device *res = v4l2_get_subdevdata(sd);
++ struct v4l2_mbus_framefmt *format;
++
++ if (code->pad == RESZ_PAD_SINK) {
++ if (code->index >= ARRAY_SIZE(resz_fmts))
++ return -EINVAL;
++
++ code->code = resz_fmts[code->index];
++ } else {
++ if (code->index != 0)
++ return -EINVAL;
++
++ format = __resizer_get_format(res, fh, RESZ_PAD_SINK,
++ V4L2_SUBDEV_FORMAT_PROBE);
++ code->code = format->code;
++ }
++
++ return 0;
++}
++
++static int resizer_enum_frame_size(struct v4l2_subdev *sd,
++ struct v4l2_subdev_fh *fh,
++ struct v4l2_subdev_frame_size_enum *fse)
++{
++ struct isp_res_device *res = v4l2_get_subdevdata(sd);
++ struct v4l2_mbus_framefmt format;
++
++ if (fse->index != 0)
++ return -EINVAL;
++
++ format.code = fse->code;
++ format.width = 1;
++ format.height = 1;
++ resizer_try_format(res, fh, fse->pad, &format,
++ V4L2_SUBDEV_FORMAT_PROBE);
++ fse->min_width = format.width;
++ fse->min_height = format.height;
++
++ if (format.code != fse->code)
++ return -EINVAL;
++
++ format.code = fse->code;
++ format.width = -1;
++ format.height = -1;
++ resizer_try_format(res, fh, fse->pad, &format,
++ V4L2_SUBDEV_FORMAT_PROBE);
++ fse->max_width = format.width;
++ fse->max_height = format.height;
++
++ return 0;
++}
++
++/*
++ * resizer_get_format - Handle get format by pads subdev method
++ * @sd : pointer to v4l2 subdev structure
++ * @fh : V4L2 subdev file handle
++ * @pad : pad num
++ * @fmt : pointer to v4l2 format structure
++ * @which : wanted subdev format
++ * return -EINVAL or zero on sucess
++ */
++static int resizer_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
++ unsigned int pad, struct v4l2_mbus_framefmt *fmt,
++ enum v4l2_subdev_format which)
++{
++ struct isp_res_device *res = v4l2_get_subdevdata(sd);
++ struct v4l2_mbus_framefmt *format;
++
++ format = __resizer_get_format(res, fh, pad, which);
++ if (format == NULL)
++ return -EINVAL;
++
++ *fmt = *format;
++ return 0;
++}
++
++/*
++ * resizer_set_format - Handle set format by pads subdev method
++ * @sd : pointer to v4l2 subdev structure
++ * @fh : V4L2 subdev file handle
++ * @pad : pad num
++ * @fmt : pointer to v4l2 format structure
++ * @which : wanted subdev format
++ * return -EINVAL or zero on success
++ */
++static int resizer_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
++ unsigned int pad, struct v4l2_mbus_framefmt *fmt,
++ enum v4l2_subdev_format which)
++{
++ struct isp_res_device *res = v4l2_get_subdevdata(sd);
++ struct v4l2_mbus_framefmt *format;
++ struct v4l2_rect *crop;
++
++ format = __resizer_get_format(res, fh, pad, which);
++ if (format == NULL)
++ return -EINVAL;
++
++ resizer_try_format(res, fh, pad, fmt, which);
++ *format = *fmt;
++
++ if (pad == RESZ_PAD_SINK) {
++ /* reset crop rectangle */
++ crop = __resizer_get_crop(res, fh, which);
++ crop->left = 0;
++ crop->top = 0;
++ crop->width = fmt->width;
++ crop->height = fmt->height;
++
++ /* Propagate the format from sink to source */
++ format = __resizer_get_format(res, fh, RESZ_PAD_SOURCE, which);
++ memcpy(format, fmt, sizeof(*format));
++ resizer_try_format(res, fh, RESZ_PAD_SOURCE, format, which);
++ }
++
++ if (which == V4L2_SUBDEV_FORMAT_PROBE)
++ return 0;
++
++ ispresizer_calc_ratios(res, &res->crop, format, &res->ratio);
++
++ return 0;
++}
++
++/* subdev core operations */
++static const struct v4l2_subdev_core_ops resizer_v4l2_core_ops = {
++ .s_power = resizer_s_power,
++};
++
++/* subdev video operations */
++static const struct v4l2_subdev_video_ops resizer_v4l2_video_ops = {
++ .s_stream = resizer_set_stream,
++};
++
++/* subdev pad operations */
++static const struct v4l2_subdev_pad_ops resizer_v4l2_pad_ops = {
++ .enum_mbus_code = resizer_enum_mbus_code,
++ .enum_frame_size = resizer_enum_frame_size,
++ .get_fmt = resizer_get_format,
++ .set_fmt = resizer_set_format,
++ .get_crop = resizer_g_crop,
++ .set_crop = resizer_s_crop,
++};
++
++/* subdev operations */
++static const struct v4l2_subdev_ops resizer_v4l2_ops = {
++ .core = &resizer_v4l2_core_ops,
++ .video = &resizer_v4l2_video_ops,
++ .pad = &resizer_v4l2_pad_ops,
++};
++
++
++/* -----------------------------------------------------------------------------
++ * Media entity operations
++ */
++
++/*
++ * resizer_link_setup - Setup resizer connections.
++ * @entity : Pointer to media entity structure
++ * @local : Pointer to local pad array
++ * @remote : Pointer to remote pad array
++ * @flags : Link flags
++ * return -EINVAL or zero on success
++ */
++static int resizer_link_setup(struct media_entity *entity,
++ const struct media_entity_pad *local,
++ const struct media_entity_pad *remote, u32 flags)
++{
++ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
++ struct isp_res_device *res = v4l2_get_subdevdata(sd);
++
++ switch (local->index | (remote->entity->type << 16)) {
++ case RESZ_PAD_SINK | (MEDIA_ENTITY_TYPE_NODE << 16):
++ /* read from memory */
++ if (flags & MEDIA_LINK_FLAG_ACTIVE) {
++ if (res->input == RESIZER_INPUT_VP)
++ return -EBUSY;
++ res->input = RESIZER_INPUT_MEMORY;
++ } else {
++ if (res->input == RESIZER_INPUT_MEMORY)
++ res->input = RESIZER_INPUT_NONE;
++ }
++ break;
++
++ case RESZ_PAD_SINK | (MEDIA_ENTITY_TYPE_SUBDEV << 16):
++ /* read from ccdc or previewer */
++ if (flags & MEDIA_LINK_FLAG_ACTIVE) {
++ if (res->input == RESIZER_INPUT_MEMORY)
++ return -EBUSY;
++ res->input = RESIZER_INPUT_VP;
++ } else {
++ if (res->input == RESIZER_INPUT_VP)
++ res->input = RESIZER_INPUT_NONE;
++ }
++ break;
++
++ case RESZ_PAD_SOURCE | (MEDIA_ENTITY_TYPE_NODE << 16):
++ /* resizer always write to memory */
++ break;
++
++ default:
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++/* media operations */
++static const struct media_entity_operations resizer_media_ops = {
++ .link_setup = resizer_link_setup,
++ .set_power = v4l2_subdev_set_power,
++};
++
++/*
++ * ispresizer_init_entities - Initialize resizer subdev and media entity.
++ * @res : Pointer to resizer device structure
++ * return -ENOMEM or zero on success
++ */
++static int ispresizer_init_entities(struct isp_res_device *res)
++{
++ struct v4l2_subdev *sd = &res->subdev;
++ struct media_entity_pad *pads = res->pads;
++ struct media_entity *me = &sd->entity;
++ int ret;
++
++ res->input = RESIZER_INPUT_NONE;
++
++ v4l2_subdev_init(sd, &resizer_v4l2_ops);
++ strlcpy(sd->name, "OMAP3 ISP resizer", sizeof(sd->name));
++ sd->grp_id = 1 << 16; /* group ID for isp subdevs */
++ v4l2_set_subdevdata(sd, res);
++ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
++
++ pads[RESZ_PAD_SINK].type = MEDIA_PAD_TYPE_INPUT;
++ pads[RESZ_PAD_SOURCE].type = MEDIA_PAD_TYPE_OUTPUT;
++
++ me->ops = &resizer_media_ops;
++ ret = media_entity_init(me, RESZ_PADS_NUM, pads, 0);
++ if (ret < 0)
++ return ret;
++
++ res->video_in.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
++ res->video_in.ops = &resizer_video_ops;
++ res->video_in.isp = to_isp_device(res);
++ res->video_in.capture_mem = PAGE_ALIGN(4096 * 4096) * 2 * 3;
++ res->video_in.alignment = 32;
++ res->video_out.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
++ res->video_out.ops = &resizer_video_ops;
++ res->video_out.isp = to_isp_device(res);
++ res->video_out.capture_mem = PAGE_ALIGN(4096 * 4096) * 2 * 3;
++ res->video_out.alignment = 32;
++
++ ret = isp_video_init(&res->video_in, "resizer");
++ if (ret < 0)
++ return ret;
++
++ ret = isp_video_init(&res->video_out, "resizer");
++ if (ret < 0)
++ return ret;
++
++ /* Connect the video nodes to the resizer subdev. */
++ ret = media_entity_create_link(&res->video_in.video.entity, 0,
++ &res->subdev.entity, RESZ_PAD_SINK, 0);
++ if (ret < 0)
++ return ret;
++
++ ret = media_entity_create_link(&res->subdev.entity, RESZ_PAD_SOURCE,
++ &res->video_out.video.entity, 0, 0);
++ if (ret < 0)
++ return ret;
++
++ return 0;
++}
++
++void isp_resizer_unregister_entities(struct isp_res_device *res)
++{
++ media_entity_cleanup(&res->subdev.entity);
++
++ v4l2_device_unregister_subdev(&res->subdev);
++ isp_video_unregister(&res->video_in);
++ isp_video_unregister(&res->video_out);
++}
++
++int isp_resizer_register_entities(struct isp_res_device *res,
++ struct v4l2_device *vdev)
++{
++ int ret;
++
++ /* Register the subdev and video nodes. */
++ ret = v4l2_device_register_subdev(vdev, &res->subdev);
++ if (ret < 0)
++ goto error;
++
++ ret = isp_video_register(&res->video_in, vdev);
++ if (ret < 0)
++ goto error;
++
++ ret = isp_video_register(&res->video_out, vdev);
++ if (ret < 0)
++ goto error;
++
++ return 0;
++
++error:
++ isp_resizer_unregister_entities(res);
++ return ret;
++}
++
++/* -----------------------------------------------------------------------------
++ * ISP resizer initialization and cleanup
++ */
++
++void isp_resizer_cleanup(struct isp_device *isp)
++{
++}
++
++/*
++ * isp_resizer_init - Resizer initialization.
++ * @isp : Pointer to ISP device
++ * return -ENOMEM or zero on success
++ */
++int isp_resizer_init(struct isp_device *isp)
++{
++ struct isp_res_device *res = &isp->isp_res;
++ int ret;
++
++ ret = ispresizer_init_entities(res);
++ if (ret < 0)
++ goto out;
++
++out:
++ if (ret)
++ isp_resizer_cleanup(isp);
++
++ return ret;
++}
++
+diff --git a/drivers/media/video/isp/ispresizer.h b/drivers/media/video/isp/ispresizer.h
+new file mode 100644
+index 0000000..b7f3554
+--- /dev/null
++++ b/drivers/media/video/isp/ispresizer.h
+@@ -0,0 +1,137 @@
++/*
++ * ispresizer.h
++ *
++ * Driver header file for Resizer module in TI's OMAP3 Camera ISP
++ *
++ * Copyright (C) 2009 Texas Instruments, Inc.
++ *
++ * Contributors:
++ * Sameer Venkatraman <sameerv@ti.com>
++ * Mohit Jalori
++ * Sergio Aguirre <saaguirre@ti.com>
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef OMAP_ISP_RESIZER_H
++#define OMAP_ISP_RESIZER_H
++
++/*
++ * Constants for filter coefficents count
++ */
++#define COEFF_CNT 32
++
++/*
++ * struct isprsz_coef - Structure for resizer filter coeffcients.
++ * @h_filter_coef_4tap: Horizontal filter coefficients for 8-phase/4-tap
++ * mode (.5x-4x)
++ * @v_filter_coef_4tap: Vertical filter coefficients for 8-phase/4-tap
++ * mode (.5x-4x)
++ * @h_filter_coef_7tap: Horizontal filter coefficients for 4-phase/7-tap
++ * mode (.25x-.5x)
++ * @v_filter_coef_7tap: Vertical filter coefficients for 4-phase/7-tap
++ * mode (.25x-.5x)
++ */
++struct isprsz_coef {
++ u16 h_filter_coef_4tap[32];
++ u16 v_filter_coef_4tap[32];
++ /* Every 8th value is a dummy value in the following arrays: */
++ u16 h_filter_coef_7tap[32];
++ u16 v_filter_coef_7tap[32];
++};
++
++/* Chrominance horizontal algorithm */
++enum resizer_chroma_algo {
++ RSZ_THE_SAME = 0, /* Chrominance the same as Luminance */
++ RSZ_BILINEAR = 1, /* Chrominance uses bilinear interpolation */
++};
++
++/* Resizer input type select */
++enum resizer_colors_type {
++ RSZ_YUV422 = 0, /* YUV422 color is interleaved */
++ RSZ_COLOR8 = 1, /* Color separate data on 8 bits */
++};
++
++/*
++ * Structure for horizontal and vertical resizing value
++ */
++struct resizer_ratio {
++ u32 horz;
++ u32 vert;
++};
++
++/*
++ * Structure for luminance enhancer parameters.
++ */
++struct resizer_luma_yenh {
++ u8 algo; /* algorithm select. */
++ u8 gain; /* maximum gain. */
++ u8 slope; /* slope. */
++ u8 core; /* core offset. */
++};
++
++enum resizer_input_entity {
++ RESIZER_INPUT_NONE,
++ RESIZER_INPUT_VP, /* input video port - prev or ccdc */
++ RESIZER_INPUT_MEMORY,
++};
++
++/* Sink and source resizer pads */
++#define RESZ_PAD_SINK 0
++#define RESZ_PAD_SOURCE 1
++#define RESZ_PADS_NUM 2
++
++/*
++ * struct isp_res_device - Structure for the resizer module to store its
++ * information.
++ */
++struct isp_res_device {
++ struct v4l2_subdev subdev;
++ struct media_entity_pad pads[RESZ_PADS_NUM];
++ struct v4l2_mbus_framefmt formats[RESZ_PADS_NUM];
++
++ enum resizer_input_entity input;
++ struct isp_video video_in;
++ struct isp_video video_out;
++ unsigned int error;
++
++ u32 addr_base; /* stored source buffer address in memory mode */
++ u32 crop_offset; /* additional offset for crop in memory mode */
++ struct resizer_ratio ratio;
++ int pm_state;
++ unsigned int applycrop:1,
++ underrun:1;
++ enum isp_pipeline_stream_state state;
++ struct v4l2_rect crop; /* current crop requested by user */
++};
++
++struct isp_device;
++
++int isp_resizer_init(struct isp_device *isp);
++void isp_resizer_cleanup(struct isp_device *isp);
++
++int isp_resizer_register_entities(struct isp_res_device *res,
++ struct v4l2_device *vdev);
++void isp_resizer_unregister_entities(struct isp_res_device *res);
++void ispresizer_isr_frame_sync(struct isp_res_device *res);
++void ispresizer_isr(struct isp_res_device *isp_res);
++
++void ispresizer_max_rate(struct isp_res_device *res, unsigned int *max_rate);
++
++void ispresizer_suspend(struct isp_res_device *isp_res);
++
++void ispresizer_resume(struct isp_res_device *isp_res);
++
++int ispresizer_busy(struct isp_res_device *isp_res);
++
++void ispresizer_save_context(struct isp_device *isp);
++
++void ispresizer_restore_context(struct isp_device *isp);
++
++#endif /* OMAP_ISP_RESIZER_H */
+diff --git a/drivers/media/video/isp/ispstat.c b/drivers/media/video/isp/ispstat.c
+new file mode 100644
+index 0000000..953f461
+--- /dev/null
++++ b/drivers/media/video/isp/ispstat.c
+@@ -0,0 +1,1036 @@
++/*
++ * ispstat.c
++ *
++ * STAT module for TI's OMAP3 Camera ISP
++ *
++ * Copyright (C) 2009 Texas Instruments, Inc.
++ *
++ * Contributors:
++ * David Cohen <david.cohen@nokia.com>
++ * Sakari Ailus <sakari.ailus@nokia.com>
++ * Sergio Aguirre <saaguirre@ti.com>
++ * Troy Laramy
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#include <linux/dma-mapping.h>
++#include <linux/slab.h>
++#include <linux/uaccess.h>
++
++#include "isp.h"
++
++#define IS_COHERENT_BUF(stat) ((stat)->dma_ch >= 0)
++
++/*
++ * MAGIC_SIZE must always be the greatest common divisor of
++ * AEWB_PACKET_SIZE and AF_PAXEL_SIZE.
++ */
++#define MAGIC_SIZE 16
++#define MAGIC_NUM 0x55
++
++/* HACK: AF module seems to be writing one more paxel data than it should. */
++#define AF_EXTRA_DATA AF_PAXEL_SIZE
++
++/*
++ * HACK: H3A modules go to an invalid state after have a SBL overflow. It makes
++ * the next buffer to start to be written in the same point where the overflow
++ * occurred instead of the configured address. The only known way to make it to
++ * go back to a valid state is having a valid buffer processing. Of course it
++ * requires at least a doubled buffer size to avoid an access to invalid memory
++ * region. But it does not fix everything. It may happen more than one
++ * consecutive SBL overflows. In that case, it might be unpredictable how many
++ * buffers the allocated memory should fit. For that case, a recover
++ * configuration was created. It produces the minimum buffer size for each H3A
++ * module and decrease the change for more SBL overflows. This recover state
++ * will be enabled every time a SBL overflow occur. As the output buffer size
++ * isn't big, it's possible to have an extra size able to fit many recover
++ * buffers making it extreamily unlikely to have an access to invalid memory
++ * region.
++ */
++#define NUM_H3A_RECOVER_BUFS 10
++
++/*
++ * HACK: Because of HW issues the generic layer sometimes need to have
++ * different behaviour for different statistic modules.
++ */
++#define IS_H3A_AF(stat) ((stat) == &(stat)->isp->isp_af)
++#define IS_H3A_AEWB(stat) ((stat) == &(stat)->isp->isp_aewb)
++#define IS_H3A(stat) (IS_H3A_AF(stat) || IS_H3A_AEWB(stat))
++
++static void __ispstat_buf_sync_magic(struct ispstat *stat,
++ struct ispstat_buffer *buf,
++ u32 buf_size,
++ enum dma_data_direction dir,
++ void (*dma_sync)(struct device *,
++ dma_addr_t, unsigned long, size_t,
++ enum dma_data_direction))
++{
++ struct device *dev = stat->isp->dev;
++ struct page *pg;
++ dma_addr_t dma_addr;
++ u32 offset;
++
++ /* Initial magic words */
++ pg = vmalloc_to_page(buf->virt_addr);
++ dma_addr = page_to_dma(dev, pg);
++ dma_sync(dev, dma_addr, 0, MAGIC_SIZE, dir);
++
++ /* Final magic words */
++ pg = vmalloc_to_page(buf->virt_addr + buf_size);
++ dma_addr = page_to_dma(dev, pg);
++ offset = ((u32)buf->virt_addr + buf_size) & ~PAGE_MASK;
++ dma_sync(dev, dma_addr, offset, MAGIC_SIZE, dir);
++}
++
++static void ispstat_buf_sync_magic_for_device(struct ispstat *stat,
++ struct ispstat_buffer *buf,
++ u32 buf_size,
++ enum dma_data_direction dir)
++{
++ if (IS_COHERENT_BUF(stat))
++ return;
++
++ __ispstat_buf_sync_magic(stat, buf, buf_size, dir,
++ dma_sync_single_range_for_device);
++}
++
++static void ispstat_buf_sync_magic_for_cpu(struct ispstat *stat,
++ struct ispstat_buffer *buf,
++ u32 buf_size,
++ enum dma_data_direction dir)
++{
++ if (IS_COHERENT_BUF(stat))
++ return;
++
++ __ispstat_buf_sync_magic(stat, buf, buf_size, dir,
++ dma_sync_single_range_for_cpu);
++}
++
++static int ispstat_buf_check_magic(struct ispstat *stat,
++ struct ispstat_buffer *buf)
++{
++ const u32 buf_size = IS_H3A_AF(stat) ?
++ buf->buf_size + AF_EXTRA_DATA : buf->buf_size;
++ u8 *w;
++ u8 *end;
++ int ret = -EINVAL;
++
++ ispstat_buf_sync_magic_for_cpu(stat, buf, buf_size, DMA_FROM_DEVICE);
++
++ /* Checking initial magic numbers. They shouldn't be here anymore. */
++ for (w = buf->virt_addr, end = w + MAGIC_SIZE; w < end; w++)
++ if (likely(*w != MAGIC_NUM))
++ ret = 0;
++
++ if (ret) {
++ dev_dbg(stat->isp->dev, "%s: beginning magic check does not "
++ "match.\n", stat->subdev.name);
++ return ret;
++ }
++
++ /* Checking magic numbers at the end. They must be still here. */
++ for (w = buf->virt_addr + buf_size, end = w + MAGIC_SIZE;
++ w < end; w++) {
++ if (unlikely(*w != MAGIC_NUM)) {
++ dev_dbg(stat->isp->dev, "%s: endding magic check does "
++ "not match.\n", stat->subdev.name);
++ return -EINVAL;
++ }
++ }
++
++ ispstat_buf_sync_magic_for_device(stat, buf, buf_size, DMA_FROM_DEVICE);
++
++ return 0;
++}
++
++static void ispstat_buf_insert_magic(struct ispstat *stat,
++ struct ispstat_buffer *buf)
++{
++ const u32 buf_size = IS_H3A_AF(stat) ?
++ stat->buf_size + AF_EXTRA_DATA : stat->buf_size;
++
++ ispstat_buf_sync_magic_for_cpu(stat, buf, buf_size, DMA_FROM_DEVICE);
++
++ /*
++ * Inserting MAGIC_NUM at the beginning and end of the buffer.
++ * buf->buf_size is set only after the buffer is queued. For now the
++ * right buf_size for the current configuration is pointed by
++ * stat->buf_size.
++ */
++ memset(buf->virt_addr, MAGIC_NUM, MAGIC_SIZE);
++ memset(buf->virt_addr + buf_size, MAGIC_NUM, MAGIC_SIZE);
++
++ ispstat_buf_sync_magic_for_device(stat, buf, buf_size,
++ DMA_BIDIRECTIONAL);
++}
++
++static void ispstat_buf_sync_for_device(struct ispstat *stat,
++ struct ispstat_buffer *buf)
++{
++ if (IS_COHERENT_BUF(stat))
++ return;
++
++ dma_sync_sg_for_device(stat->isp->dev, buf->iovm->sgt->sgl,
++ buf->iovm->sgt->nents, DMA_FROM_DEVICE);
++}
++
++static void ispstat_buf_sync_for_cpu(struct ispstat *stat,
++ struct ispstat_buffer *buf)
++{
++ if (IS_COHERENT_BUF(stat))
++ return;
++
++ dma_sync_sg_for_cpu(stat->isp->dev, buf->iovm->sgt->sgl,
++ buf->iovm->sgt->nents, DMA_FROM_DEVICE);
++}
++
++static void ispstat_buf_clear(struct ispstat *stat)
++{
++ int i;
++
++ for (i = 0; i < STAT_MAX_BUFS; i++)
++ stat->buf[i].empty = 1;
++}
++
++static struct ispstat_buffer *__ispstat_buf_find(struct ispstat *stat,
++ int look_empty)
++{
++ struct ispstat_buffer *found = NULL;
++ int i;
++
++ for (i = 0; i < STAT_MAX_BUFS; i++) {
++ struct ispstat_buffer *curr = &stat->buf[i];
++
++ /*
++ * Don't select the buffer which is being copied to
++ * userspace or used by the module.
++ */
++ if (curr == stat->locked_buf || curr == stat->active_buf)
++ continue;
++
++ /* Don't select uninitialised buffers if it's not required */
++ if (!look_empty && curr->empty)
++ continue;
++
++ /* Pick uninitialised buffer over anything else if look_empty */
++ if (curr->empty) {
++ found = curr;
++ break;
++ }
++
++ /* Choose the oldest buffer */
++ if (!found ||
++ (s32)curr->frame_number - (s32)found->frame_number < 0)
++ found = curr;
++ }
++
++ return found;
++}
++
++static inline struct ispstat_buffer *
++ispstat_buf_find_oldest(struct ispstat *stat)
++{
++ return __ispstat_buf_find(stat, 0);
++}
++
++static inline struct ispstat_buffer *
++ispstat_buf_find_oldest_or_empty(struct ispstat *stat)
++{
++ return __ispstat_buf_find(stat, 1);
++}
++
++static int ispstat_buf_queue(struct ispstat *stat)
++{
++ if (!stat->active_buf)
++ return STAT_NO_BUF;
++
++ do_gettimeofday(&stat->active_buf->ts);
++
++ stat->active_buf->buf_size = stat->buf_size;
++ if (ispstat_buf_check_magic(stat, stat->active_buf)) {
++ dev_dbg(stat->isp->dev, "%s: data wasn't properly written.\n",
++ stat->subdev.name);
++ return STAT_NO_BUF;
++ }
++ stat->active_buf->config_counter = stat->config_counter;
++ stat->active_buf->frame_number = stat->frame_number;
++ stat->active_buf->empty = 0;
++ stat->active_buf = NULL;
++ stat->frame_number++;
++
++ return STAT_BUF_DONE;
++}
++
++/* Get next free buffer to write the statistics to and mark it active. */
++static void ispstat_buf_next(struct ispstat *stat)
++{
++
++ if (unlikely(stat->active_buf))
++ /* Overwriting unused active buffer */
++ dev_dbg(stat->isp->dev, "%s: new buffer requested without "
++ "queuing active one.\n",
++ stat->subdev.name);
++ else
++ stat->active_buf = ispstat_buf_find_oldest_or_empty(stat);
++}
++
++static void ispstat_buf_release(struct ispstat *stat)
++{
++ unsigned long flags;
++
++ ispstat_buf_sync_for_device(stat, stat->locked_buf);
++ spin_lock_irqsave(&stat->isp->stat_lock, flags);
++ stat->locked_buf = NULL;
++ spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
++}
++
++/* Get buffer to userspace. */
++static struct ispstat_buffer *ispstat_buf_get(struct ispstat *stat,
++ struct ispstat_data *data)
++{
++ int rval = 0;
++ unsigned long flags;
++ struct ispstat_buffer *buf;
++
++ spin_lock_irqsave(&stat->isp->stat_lock, flags);
++
++ while (1) {
++ buf = ispstat_buf_find_oldest(stat);
++ if (!buf) {
++ spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
++ dev_dbg(stat->isp->dev, "%s: cannot find a buffer.\n",
++ stat->subdev.name);
++ return ERR_PTR(-EBUSY);
++ }
++ if (ispstat_buf_check_magic(stat, buf)) {
++ dev_dbg(stat->isp->dev, "%s: current buffer has "
++ "corrupted data\n.", stat->subdev.name);
++ /* Mark empty because it doesn't have valid data. */
++ buf->empty = 1;
++ } else {
++ /* Buffer isn't corrupted. */
++ break;
++ }
++ }
++
++ stat->locked_buf = buf;
++
++ spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
++
++ if (buf->buf_size > data->buf_size) {
++ dev_warn(stat->isp->dev, "%s: userspace's buffer size is "
++ "not enough.\n", stat->subdev.name);
++ ispstat_buf_release(stat);
++ return ERR_PTR(-EINVAL);
++ }
++
++ ispstat_buf_sync_for_cpu(stat, buf);
++
++ rval = copy_to_user(data->buf,
++ buf->virt_addr,
++ buf->buf_size);
++
++ if (rval) {
++ dev_info(stat->isp->dev,
++ "%s: failed copying %d bytes of stat data\n",
++ stat->subdev.name, rval);
++ buf = ERR_PTR(-EFAULT);
++ ispstat_buf_release(stat);
++ }
++
++ return buf;
++}
++
++static void ispstat_bufs_free(struct ispstat *stat)
++{
++ struct isp_device *isp = stat->isp;
++ int i;
++
++ for (i = 0; i < STAT_MAX_BUFS; i++) {
++ struct ispstat_buffer *buf = &stat->buf[i];
++
++ if (!IS_COHERENT_BUF(stat)) {
++ if (IS_ERR_OR_NULL((void *)buf->iommu_addr))
++ continue;
++ if (buf->iovm)
++ dma_unmap_sg(isp->dev, buf->iovm->sgt->sgl,
++ buf->iovm->sgt->nents,
++ DMA_FROM_DEVICE);
++ iommu_vfree(isp->iommu, buf->iommu_addr);
++ } else {
++ if (!buf->virt_addr)
++ continue;
++ dma_free_coherent(stat->isp->dev, stat->buf_alloc_size,
++ buf->virt_addr, buf->dma_addr);
++ }
++ buf->iommu_addr = 0;
++ buf->iovm = NULL;
++ buf->dma_addr = 0;
++ buf->virt_addr = NULL;
++ buf->empty = 1;
++ }
++
++ dev_dbg(stat->isp->dev, "%s: all buffers were freed.\n",
++ stat->subdev.name);
++
++ stat->buf_alloc_size = 0;
++ stat->active_buf = NULL;
++}
++
++static int ispstat_bufs_alloc_iommu(struct ispstat *stat, unsigned int size)
++{
++ struct isp_device *isp = stat->isp;
++ int i;
++
++ stat->buf_alloc_size = size;
++
++ for (i = 0; i < STAT_MAX_BUFS; i++) {
++ struct ispstat_buffer *buf = &stat->buf[i];
++ struct iovm_struct *iovm;
++
++ WARN_ON(buf->dma_addr);
++ buf->iommu_addr = iommu_vmalloc(isp->iommu, 0, size,
++ IOMMU_FLAG);
++ if (IS_ERR((void *)buf->iommu_addr)) {
++ dev_err(stat->isp->dev,
++ "%s: Can't acquire memory for "
++ "buffer %d\n", stat->subdev.name, i);
++ ispstat_bufs_free(stat);
++ return -ENOMEM;
++ }
++
++ iovm = find_iovm_area(isp->iommu, buf->iommu_addr);
++ if (!iovm ||
++ !dma_map_sg(isp->dev, iovm->sgt->sgl, iovm->sgt->nents,
++ DMA_FROM_DEVICE)) {
++ ispstat_bufs_free(stat);
++ return -ENOMEM;
++ }
++ buf->iovm = iovm;
++
++ buf->virt_addr = da_to_va(stat->isp->iommu,
++ (u32)buf->iommu_addr);
++ buf->empty = 1;
++ dev_dbg(stat->isp->dev, "%s: buffer[%d] allocated."
++ "iommu_addr=0x%08lx virt_addr=0x%08lx",
++ stat->subdev.name, i, buf->iommu_addr,
++ (unsigned long)buf->virt_addr);
++ }
++
++ return 0;
++}
++
++static int ispstat_bufs_alloc_dma(struct ispstat *stat, unsigned int size)
++{
++ int i;
++
++ stat->buf_alloc_size = size;
++
++ for (i = 0; i < STAT_MAX_BUFS; i++) {
++ struct ispstat_buffer *buf = &stat->buf[i];
++
++ WARN_ON(buf->iommu_addr);
++ buf->virt_addr = dma_alloc_coherent(stat->isp->dev, size,
++ &buf->dma_addr, GFP_KERNEL | GFP_DMA);
++
++ if (!buf->virt_addr || !buf->dma_addr) {
++ dev_info(stat->isp->dev,
++ "%s: Can't acquire memory for "
++ "DMA buffer %d\n", stat->subdev.name, i);
++ ispstat_bufs_free(stat);
++ return -ENOMEM;
++ }
++ buf->empty = 1;
++
++ dev_dbg(stat->isp->dev, "%s: buffer[%d] allocated."
++ "dma_addr=0x%08lx virt_addr=0x%08lx\n",
++ stat->subdev.name, i, (unsigned long)buf->dma_addr,
++ (unsigned long)buf->virt_addr);
++ }
++
++ return 0;
++}
++
++static int ispstat_bufs_alloc(struct ispstat *stat, u32 size)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&stat->isp->stat_lock, flags);
++
++ BUG_ON(stat->locked_buf != NULL);
++
++ /* Are the old buffers big enough? */
++ if (stat->buf_alloc_size >= size) {
++ spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
++ return 0;
++ }
++
++ if (stat->state != ISPSTAT_DISABLED || stat->buf_processing) {
++ dev_info(stat->isp->dev,
++ "%s: trying to allocate memory when busy\n",
++ stat->subdev.name);
++ spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
++ return -EBUSY;
++ }
++
++ spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
++
++ ispstat_bufs_free(stat);
++
++ if (IS_COHERENT_BUF(stat))
++ return ispstat_bufs_alloc_dma(stat, size);
++ else
++ return ispstat_bufs_alloc_iommu(stat, size);
++}
++
++static void ispstat_queue_event(struct ispstat *stat, int err)
++{
++ struct video_device *vdev = &stat->subdev.devnode;
++ struct v4l2_event event;
++ struct ispstat_event_status *status = (void *)event.u.data;
++
++ memset(&event, 0, sizeof(event));
++ if (!err) {
++ status->frame_number = stat->frame_number;
++ status->config_counter = stat->config_counter;
++ } else {
++ status->buf_err = 1;
++ }
++ event.type = stat->event_type;
++ v4l2_event_queue(vdev, &event);
++}
++
++
++/*
++ * ispstat_request_statistics - Request statistics.
++ * @data: Pointer to return statistics data.
++ *
++ * Returns 0 if successful.
++ */
++int ispstat_request_statistics(struct ispstat *stat,
++ struct ispstat_data *data)
++{
++ struct ispstat_buffer *buf;
++
++ if (stat->state != ISPSTAT_ENABLED) {
++ dev_dbg(stat->isp->dev, "%s: engine not enabled.\n",
++ stat->subdev.name);
++ return -EINVAL;
++ }
++
++ mutex_lock(&stat->ioctl_lock);
++ buf = ispstat_buf_get(stat, data);
++ if (IS_ERR(buf)) {
++ mutex_unlock(&stat->ioctl_lock);
++ return PTR_ERR(buf);
++ }
++
++ data->ts = buf->ts;
++ data->config_counter = buf->config_counter;
++ data->frame_number = buf->frame_number;
++ data->buf_size = buf->buf_size;
++
++ /*
++ * Deprecated. Number of new buffers is always equal to number of
++ * queued events without error flag. By setting it to 0, userspace
++ * won't try to request new buffer without receiving new event.
++ * This field must go away in future.
++ */
++ data->new_bufs = 0;
++
++ buf->empty = 1;
++ ispstat_buf_release(stat);
++ mutex_unlock(&stat->ioctl_lock);
++
++ return 0;
++}
++
++/*
++ * ispstat_config - Receives new statistic engine configuration.
++ * @new_conf: Pointer to config structure.
++ *
++ * Returns 0 if successful, -EINVAL if new_conf pointer is NULL, -ENOMEM if
++ * was unable to allocate memory for the buffer, or other errors if parameters
++ * are invalid.
++ */
++int ispstat_config(struct ispstat *stat, void *new_conf)
++{
++ int ret;
++ unsigned long irqflags;
++ struct ispstat_generic_config *user_cfg = new_conf;
++ u32 buf_size = user_cfg->buf_size;
++
++ if (!new_conf) {
++ dev_dbg(stat->isp->dev, "%s: configuration is NULL\n",
++ stat->subdev.name);
++ return -EINVAL;
++ }
++
++ mutex_lock(&stat->ioctl_lock);
++
++ dev_dbg(stat->isp->dev, "%s: configuring module with buffer "
++ "size=0x%08lx\n", stat->subdev.name, (unsigned long)buf_size);
++
++ ret = stat->ops->validate_params(stat, new_conf);
++ if (ret) {
++ mutex_unlock(&stat->ioctl_lock);
++ dev_dbg(stat->isp->dev, "%s: configuration values are "
++ "invalid.\n", stat->subdev.name);
++ return ret;
++ }
++
++ if (buf_size != user_cfg->buf_size)
++ dev_dbg(stat->isp->dev, "%s: driver has corrected buffer size "
++ "request to 0x%08lx\n", stat->subdev.name,
++ (unsigned long)user_cfg->buf_size);
++
++ /*
++ * Hack: H3A modules may need a doubled buffer size to avoid access
++ * to a invalid memory address after a SBL overflow.
++ * The buffer size is always PAGE_ALIGNED.
++ * Hack 2: MAGIC_SIZE is added to buf_size so a magic word can be
++ * inserted at the end to data integrity check purpose.
++ * Hack 3: AF module writes one paxel data more than it should, so
++ * the buffer allocation must consider it to avoid invalid memory
++ * access.
++ * Hack 4: H3A need to allocate extra space for the recover state.
++ */
++ if (IS_H3A(stat)) {
++ buf_size = user_cfg->buf_size * 2 + MAGIC_SIZE;
++ if (IS_H3A_AF(stat))
++ /*
++ * Adding one extra paxel data size for each recover
++ * buffer + 2 regular ones.
++ */
++ buf_size += AF_EXTRA_DATA * (NUM_H3A_RECOVER_BUFS + 2);
++ if (stat->recover_priv) {
++ struct ispstat_generic_config *recover_cfg =
++ stat->recover_priv;
++ buf_size += recover_cfg->buf_size *
++ NUM_H3A_RECOVER_BUFS;
++ }
++ buf_size = PAGE_ALIGN(buf_size);
++ } else { /* Histogram */
++ buf_size = PAGE_ALIGN(user_cfg->buf_size + MAGIC_SIZE);
++ }
++
++ ret = ispstat_bufs_alloc(stat, buf_size);
++ if (ret) {
++ mutex_unlock(&stat->ioctl_lock);
++ return ret;
++ }
++
++ spin_lock_irqsave(&stat->isp->stat_lock, irqflags);
++ stat->ops->set_params(stat, new_conf);
++ spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
++
++ /*
++ * Returning the right future config_counter for this setup, so
++ * userspace can *know* when it has been applied.
++ */
++ user_cfg->config_counter = stat->config_counter + stat->inc_config;
++
++ /* Module has a valid configuration. */
++ stat->configured = 1;
++ dev_dbg(stat->isp->dev, "%s: module has been successfully "
++ "configured.\n", stat->subdev.name);
++
++ mutex_unlock(&stat->ioctl_lock);
++
++ return 0;
++}
++
++/*
++ * ispstat_buf_process - Process statistic buffers.
++ * @buf_state: points out if buffer is ready to be processed. It's necessary
++ * because histogram needs to copy the data from internal memory
++ * before be able to process the buffer.
++ */
++static int ispstat_buf_process(struct ispstat *stat, int buf_state)
++{
++ int ret = STAT_NO_BUF;
++
++ if (!atomic_add_unless(&stat->buf_err, -1, 0) &&
++ buf_state == STAT_BUF_DONE && stat->state == ISPSTAT_ENABLED) {
++ ret = ispstat_buf_queue(stat);
++ ispstat_buf_next(stat);
++ }
++
++ return ret;
++}
++
++int ispstat_busy(struct ispstat *stat)
++{
++ return isp_reg_readl(stat->isp, stat->pcr->base, stat->pcr->offset)
++ & stat->pcr->busy;
++}
++
++static void __ispstat_pcr_enable(struct ispstat *stat, u8 pcr_enable)
++{
++ u32 pcr = isp_reg_readl(stat->isp, stat->pcr->base,
++ stat->pcr->offset);
++
++ if (pcr_enable)
++ pcr |= stat->pcr->enable;
++ else
++ pcr &= ~stat->pcr->enable;
++ isp_reg_writel(stat->isp, pcr, stat->pcr->base, stat->pcr->offset);
++}
++
++/*
++ * ispstat_pcr_enable - Disables/Enables statistic engines.
++ * @pcr_enable: 0/1 - Disables/Enables the engine.
++ *
++ * Must be called from ISP driver only and not from a userspace request.
++ */
++void ispstat_pcr_enable(struct ispstat *stat, u8 pcr_enable)
++{
++ unsigned long irqflags;
++
++ spin_lock_irqsave(&stat->isp->stat_lock, irqflags);
++
++ if ((stat->state == ISPSTAT_DISABLING ||
++ stat->state == ISPSTAT_DISABLED) && pcr_enable) {
++ /* Userspace has disabled the module. Aborting. */
++ spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
++ return;
++ }
++
++ __ispstat_pcr_enable(stat, pcr_enable);
++ if (stat->state == ISPSTAT_DISABLING && !pcr_enable)
++ stat->state = ISPSTAT_DISABLED;
++ else if (stat->state == ISPSTAT_ENABLING && pcr_enable)
++ stat->state = ISPSTAT_ENABLED;
++
++ spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
++}
++
++void ispstat_suspend(struct ispstat *stat)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&stat->isp->stat_lock, flags);
++
++ if (stat->state != ISPSTAT_DISABLED)
++ __ispstat_pcr_enable(stat, 0);
++
++ spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
++}
++
++void ispstat_resume(struct ispstat *stat)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&stat->isp->stat_lock, flags);
++
++ if (stat->state == ISPSTAT_ENABLED) {
++ stat->update = 1;
++ stat->ops->setup_regs(stat, stat->priv);
++ __ispstat_pcr_enable(stat, 1);
++ }
++
++ spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
++}
++
++static void ispstat_try_enable(struct ispstat *stat)
++{
++ unsigned long irqflags;
++
++ if (stat->priv == NULL)
++ /* driver wasn't initialised */
++ return;
++
++ spin_lock_irqsave(&stat->isp->stat_lock, irqflags);
++ if (stat->state == ISPSTAT_ENABLING && !stat->buf_processing &&
++ stat->buf_alloc_size) {
++ /*
++ * Userspace's requested to enable the engine but it wasn't yet.
++ * Let's do that now.
++ */
++ stat->update = 1;
++ ispstat_buf_next(stat);
++ stat->ops->setup_regs(stat, stat->priv);
++ ispstat_buf_insert_magic(stat, stat->active_buf);
++ spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
++ ispstat_pcr_enable(stat, 1);
++ dev_dbg(stat->isp->dev, "%s: module is enabled.\n",
++ stat->subdev.name);
++ } else {
++ spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
++ }
++}
++
++void ispstat_isr_frame_sync(struct ispstat *stat)
++{
++ ispstat_try_enable(stat);
++}
++
++void ispstat_sbl_overflow(struct ispstat *stat)
++{
++ unsigned long irqflags;
++
++ spin_lock_irqsave(&stat->isp->stat_lock, irqflags);
++ /*
++ * Due to a H3A hw issue which prevents the next buffer to start from
++ * the correct memory address, 2 buffers must be ignored.
++ */
++ atomic_set(&stat->buf_err, 2);
++
++ /*
++ * If more than one SBL overflow happen in a row, H3A module may access
++ * invalid memory region.
++ * stat->sbl_ovl_recover is set to tell to the driver to temporarily use
++ * a soft configuration which helps to avoid consecutive overflows.
++ */
++ if (stat->recover_priv)
++ stat->sbl_ovl_recover = 1;
++ spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
++}
++
++/*
++ * ispstat_enable - Disables/Enables statistic engine as soon as it's possible.
++ * @enable: 0/1 - Disables/Enables the engine.
++ *
++ * Client should configure all the module registers before this.
++ * This function can be called from a userspace request.
++ */
++int ispstat_enable(struct ispstat *stat, u8 enable)
++{
++ unsigned long irqflags;
++
++ dev_dbg(stat->isp->dev, "%s: user wants to %s module.\n",
++ stat->subdev.name, enable ? "enable" : "disable");
++
++ /* Prevent enabling while configuring */
++ mutex_lock(&stat->ioctl_lock);
++
++ spin_lock_irqsave(&stat->isp->stat_lock, irqflags);
++
++ if (!stat->configured && enable) {
++ spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
++ mutex_unlock(&stat->ioctl_lock);
++ dev_dbg(stat->isp->dev, "%s: cannot enable module as it's "
++ "never been successfully configured so far.\n",
++ stat->subdev.name);
++ return -EINVAL;
++ }
++
++ if (enable) {
++ if (stat->state == ISPSTAT_DISABLING)
++ /* Previous disabling request wasn't done yet */
++ stat->state = ISPSTAT_ENABLED;
++ else if (stat->state == ISPSTAT_DISABLED)
++ /* Module is now being enabled */
++ stat->state = ISPSTAT_ENABLING;
++ } else {
++ if (stat->state == ISPSTAT_ENABLING) {
++ /* Previous enabling request wasn't done yet */
++ stat->state = ISPSTAT_DISABLED;
++ } else if (stat->state == ISPSTAT_ENABLED) {
++ /* Module is now being disabled */
++ stat->state = ISPSTAT_DISABLING;
++ ispstat_buf_clear(stat);
++ }
++ }
++
++ spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
++ mutex_unlock(&stat->ioctl_lock);
++
++ return 0;
++}
++
++int ispstat_s_stream(struct v4l2_subdev *subdev, int enable)
++{
++ struct ispstat *stat = v4l2_get_subdevdata(subdev);
++
++ if (enable) {
++ /*
++ * Only set enable PCR bit if the module was previously
++ * enabled through ioct.
++ */
++ ispstat_try_enable(stat);
++ } else {
++ /* Disable PCR bit and config enable field */
++ ispstat_enable(stat, 0);
++ ispstat_pcr_enable(stat, 0);
++ dev_dbg(stat->isp->dev, "%s: module is being disabled\n",
++ stat->subdev.name);
++ }
++
++ return 0;
++}
++
++/*
++ * __ispstat_isr - Interrupt handler for statistic drivers
++ */
++static void __ispstat_isr(struct ispstat *stat, int from_dma)
++{
++ int ret = STAT_BUF_DONE;
++ unsigned long irqflags;
++
++ ispstat_pcr_enable(stat, 0);
++ /* If it's busy we can't process this buffer anymore */
++ if (!ispstat_busy(stat)) {
++ if (!from_dma && stat->ops->buf_process &&
++ !stat->buf_processing) {
++ /* Module still need to copy data to buffer. */
++ ret = stat->ops->buf_process(stat);
++ }
++ stat->buf_processing = 1;
++ if (ret == STAT_BUF_WAITING_DMA)
++ /* Buffer is not ready yet */
++ return;
++
++ spin_lock_irqsave(&stat->isp->stat_lock, irqflags);
++ /*
++ * Before this point, 'ret' stores the buffer's status if it's
++ * ready to be processed. Afterwards, it holds the status if
++ * it was processed successfully.
++ */
++ ret = ispstat_buf_process(stat, ret);
++
++ if (likely(!stat->sbl_ovl_recover)) {
++ stat->ops->setup_regs(stat, stat->priv);
++ } else {
++ /*
++ * Using recover config to increase the chance to have
++ * a good buffer processing and make the H3A module to
++ * go back to a valid state.
++ */
++ stat->update = 1;
++ stat->ops->setup_regs(stat, stat->recover_priv);
++ stat->sbl_ovl_recover = 0;
++
++ /*
++ * Set 'update' in case of the module needs to use
++ * regular configuration after next buffer.
++ */
++ stat->update = 1;
++ }
++
++ ispstat_buf_insert_magic(stat, stat->active_buf);
++ spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
++
++ /*
++ * Hack: H3A modules may access invalid memory address or send
++ * corrupted data to userspace if more than 1 SBL overflow
++ * happens in a row without re-writing its buffer's start memory
++ * address in the meantime. Such situation is avoided if the
++ * module is not immediately re-enabled when the ISR misses the
++ * timing to process the buffer and to setup the registers.
++ * Because of that, pcr_enable(1) was moved to inside this 'if'
++ * block. But the next interruption will still happen as during
++ * pcr_enable(0) the module was busy.
++ */
++ ispstat_pcr_enable(stat, 1);
++ } else {
++ /*
++ * If a SBL overflow occurs and the H3A driver misses the timing
++ * to process the buffer, stat->buf_err is set and won't be
++ * cleared now. So the next buffer will be correctly ignored.
++ * It's necessary due to a hw issue which makes the next H3A
++ * buffer to start from the memory address where the previous
++ * one stopped, instead of start where it was configured to.
++ * Do not "stat->buf_err = 0" here.
++ */
++
++ if (stat->ops->buf_process)
++ /*
++ * Driver may need to erase current data prior to
++ * process a new buffer. If it misses the timing, the
++ * next buffer might be wrong. So should be ignored.
++ * It happens only for Histogram.
++ */
++ atomic_set(&stat->buf_err, 1);
++
++ ret = STAT_NO_BUF;
++ dev_dbg(stat->isp->dev, "%s: cannot process buffer, "
++ "device is busy.\n", stat->subdev.name);
++ }
++ stat->buf_processing = 0;
++ ispstat_queue_event(stat, ret != STAT_BUF_DONE);
++}
++
++void ispstat_isr(struct ispstat *stat)
++{
++ __ispstat_isr(stat, 0);
++}
++
++void ispstat_dma_isr(struct ispstat *stat)
++{
++ __ispstat_isr(stat, 1);
++}
++
++static int ispstat_init_entities(struct ispstat *stat, const char *name,
++ const struct v4l2_subdev_ops *sd_ops)
++{
++ struct v4l2_subdev *subdev = &stat->subdev;
++ struct media_entity *me = &subdev->entity;
++
++ v4l2_subdev_init(subdev, sd_ops);
++ snprintf(subdev->name, V4L2_SUBDEV_NAME_SIZE, "OMAP3 ISP %s", name);
++ subdev->grp_id = 1 << 16; /* group ID for isp subdevs */
++ subdev->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE;
++ subdev->nevents = STAT_NEVENTS;
++ v4l2_set_subdevdata(subdev, stat);
++
++ stat->pad.type = MEDIA_PAD_TYPE_INPUT;
++ me->ops = NULL;
++
++ return media_entity_init(me, 1, &stat->pad, 0);
++}
++
++int ispstat_subscribe_event(struct v4l2_subdev *subdev, struct v4l2_fh *fh,
++ struct v4l2_event_subscription *sub)
++{
++ struct ispstat *stat = v4l2_get_subdevdata(subdev);
++
++ if (sub->type != stat->event_type)
++ return -EINVAL;
++
++ return v4l2_event_subscribe(fh, sub);
++}
++
++int ispstat_unsubscribe_event(struct v4l2_subdev *subdev, struct v4l2_fh *fh,
++ struct v4l2_event_subscription *sub)
++{
++ return v4l2_event_unsubscribe(fh, sub);
++}
++
++void ispstat_unregister_entities(struct ispstat *stat)
++{
++ media_entity_cleanup(&stat->subdev.entity);
++ v4l2_device_unregister_subdev(&stat->subdev);
++}
++
++int ispstat_register_entities(struct ispstat *stat, struct v4l2_device *vdev)
++{
++ return v4l2_device_register_subdev(vdev, &stat->subdev);
++}
++
++int ispstat_init(struct ispstat *stat, const char *name,
++ const struct v4l2_subdev_ops *sd_ops)
++{
++ stat->buf = kcalloc(STAT_MAX_BUFS, sizeof(*stat->buf), GFP_KERNEL);
++ if (!stat->buf)
++ return -ENOMEM;
++ ispstat_buf_clear(stat);
++ mutex_init(&stat->ioctl_lock);
++ atomic_set(&stat->buf_err, 0);
++
++ return ispstat_init_entities(stat, name, sd_ops);
++}
++
++void ispstat_free(struct ispstat *stat)
++{
++ ispstat_bufs_free(stat);
++ kfree(stat->buf);
++}
++
+diff --git a/drivers/media/video/isp/ispstat.h b/drivers/media/video/isp/ispstat.h
+new file mode 100644
+index 0000000..8b6c95b
+--- /dev/null
++++ b/drivers/media/video/isp/ispstat.h
+@@ -0,0 +1,163 @@
++/*
++ * ispstat.h
++ *
++ * Copyright (C) 2009 Nokia Corporation
++ *
++ * Contact: Sakari Ailus <sakari.ailus@nokia.com>
++ * David Cohen <david.cohen@nokia.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ *
++ */
++
++#ifndef ISPSTAT_H
++#define ISPSTAT_H
++
++#include <linux/types.h>
++#include <mach/isp_user.h>
++#include <plat/dma.h>
++#include <media/v4l2-event.h>
++
++#include "isp.h"
++#include "ispvideo.h"
++
++#define STAT_MAX_BUFS 5
++#define STAT_NEVENTS 8
++
++#define STAT_BUF_DONE 0 /* Buffer is ready */
++#define STAT_NO_BUF 1 /* An error has occurred */
++#define STAT_BUF_WAITING_DMA 2 /* Histogram only: DMA is running */
++
++struct ispstat;
++
++struct ispstat_buffer {
++ unsigned long iommu_addr;
++ struct iovm_struct *iovm;
++ void *virt_addr;
++ dma_addr_t dma_addr;
++ struct timeval ts;
++ u32 buf_size;
++ u32 frame_number;
++ u16 config_counter;
++ u8 empty;
++};
++
++struct ispstat_ops {
++ /*
++ * Validate new params configuration.
++ * new_conf->buf_size value must be changed to the exact buffer size
++ * necessary for the new configuration if it's smaller.
++ */
++ int (*validate_params)(struct ispstat *stat, void *new_conf);
++
++ /*
++ * Save new params configuration.
++ * stat->priv->buf_size value must be set to the exact buffer size for
++ * the new configuration.
++ * stat->update is set to 1 if new configuration is different than
++ * current one.
++ */
++ void (*set_params)(struct ispstat *stat, void *new_conf);
++
++ /* Apply stored configuration. */
++ void (*setup_regs)(struct ispstat *stat, void *priv);
++
++ /* Used for specific operations during generic buf process task. */
++ int (*buf_process)(struct ispstat *stat);
++};
++
++struct ispstat_pcr_bits {
++ u32 base;
++ u32 offset;
++ u32 enable;
++ u32 busy;
++};
++
++enum ispstat_state_t {
++ ISPSTAT_DISABLED = 0,
++ ISPSTAT_DISABLING,
++ ISPSTAT_ENABLED,
++ ISPSTAT_ENABLING,
++};
++
++struct ispstat {
++ struct v4l2_subdev subdev;
++ struct media_entity_pad pad; /* sink pad */
++
++ /* Control */
++ unsigned configured:1;
++ unsigned update:1;
++ unsigned buf_processing:1;
++ unsigned sbl_ovl_recover:1;
++ u8 inc_config;
++ atomic_t buf_err;
++ enum ispstat_state_t state; /* enabling/disabling state */
++ struct omap_dma_channel_params dma_config;
++ struct isp_device *isp;
++ void *priv; /* pointer to priv config struct */
++ void *recover_priv; /* pointer to recover priv configuration */
++ struct mutex ioctl_lock; /* serialize private ioctl */
++
++ const struct ispstat_ops *ops;
++ const struct ispstat_pcr_bits *pcr;
++
++ /* Buffer */
++ u8 wait_acc_frames;
++ u16 config_counter;
++ u32 frame_number;
++ u32 buf_size;
++ u32 buf_alloc_size;
++ int dma_ch;
++ unsigned long event_type;
++ struct ispstat_buffer *buf;
++ struct ispstat_buffer *active_buf;
++ struct ispstat_buffer *locked_buf;
++};
++
++struct ispstat_generic_config {
++ /*
++ * Fields must be in the same order as in:
++ * - isph3a_aewb_config
++ * - isph3a_af_config
++ * - isphist_config
++ */
++ u32 buf_size;
++ u16 config_counter;
++};
++
++int ispstat_config(struct ispstat *stat, void *new_conf);
++int ispstat_request_statistics(struct ispstat *stat, struct ispstat_data *data);
++int ispstat_init(struct ispstat *stat, const char *name,
++ const struct v4l2_subdev_ops *sd_ops);
++void ispstat_free(struct ispstat *stat);
++int ispstat_subscribe_event(struct v4l2_subdev *subdev, struct v4l2_fh *fh,
++ struct v4l2_event_subscription *sub);
++int ispstat_unsubscribe_event(struct v4l2_subdev *subdev, struct v4l2_fh *fh,
++ struct v4l2_event_subscription *sub);
++int ispstat_s_stream(struct v4l2_subdev *subdev, int enable);
++void ispstat_pcr_enable(struct ispstat *stat, u8 enable);
++
++int ispstat_busy(struct ispstat *stat);
++void ispstat_suspend(struct ispstat *stat);
++void ispstat_resume(struct ispstat *stat);
++int ispstat_enable(struct ispstat *stat, u8 enable);
++void ispstat_sbl_overflow(struct ispstat *stat);
++void ispstat_isr(struct ispstat *stat);
++void ispstat_isr_frame_sync(struct ispstat *stat);
++void ispstat_dma_isr(struct ispstat *stat);
++int ispstat_register_entities(struct ispstat *stat, struct v4l2_device *vdev);
++void ispstat_unregister_entities(struct ispstat *stat);
++
++#endif /* ISPSTAT_H */
+diff --git a/drivers/media/video/isp/ispvideo.c b/drivers/media/video/isp/ispvideo.c
+new file mode 100644
+index 0000000..e9d2be3
+--- /dev/null
++++ b/drivers/media/video/isp/ispvideo.c
+@@ -0,0 +1,1150 @@
++/*
++ * ispvideo.c - ISP generic video node
++ *
++ * Copyright (C) 2009-2010 Nokia.
++ *
++ * Contributors:
++ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#include <asm/cacheflush.h>
++#include <linux/clk.h>
++#include <linux/mm.h>
++#include <linux/pagemap.h>
++#include <linux/scatterlist.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <media/v4l2-dev.h>
++#include <media/v4l2-ioctl.h>
++#include <plat/iommu.h>
++#include <plat/iovmm.h>
++#include <plat/omap-pm.h>
++
++#include "ispvideo.h"
++#include "isp.h"
++
++
++/* -----------------------------------------------------------------------------
++ * Helper functions
++ */
++
++static struct v4l2_subdev *
++isp_video_remote_subdev(struct isp_video *video, u32 *pad)
++{
++ struct media_entity_pad *remote;
++
++ remote = media_entity_remote_pad(&video->pad);
++
++ if (remote == NULL || remote->entity->type != MEDIA_ENTITY_TYPE_SUBDEV)
++ return NULL;
++
++ if (pad)
++ *pad = remote->index;
++
++ return media_entity_to_v4l2_subdev(remote->entity);
++}
++
++/* Return a pointer to the ISP video instance at the far end of the pipeline. */
++static struct isp_video *
++isp_video_far_end(struct isp_video *video)
++{
++ struct media_entity_graph graph;
++ struct media_entity *entity = &video->video.entity;
++ struct media_device *mdev = entity->parent;
++ struct isp_video *far_end = NULL;
++
++ mutex_lock(&mdev->graph_mutex);
++ media_entity_graph_walk_start(&graph, entity);
++
++ while ((entity = media_entity_graph_walk_next(&graph))) {
++ if (entity == &video->video.entity)
++ continue;
++
++ if (entity->type != MEDIA_ENTITY_TYPE_NODE)
++ continue;
++
++ far_end = to_isp_video(media_entity_to_video_device(entity));
++ if (far_end->type != video->type)
++ break;
++
++ far_end = NULL;
++ }
++
++ mutex_unlock(&mdev->graph_mutex);
++ return far_end;
++}
++
++/*
++ * Validate a pipeline by checking both ends of all links for format
++ * discrepancies.
++ *
++ * Compute the minimum time per frame value as the maximum of time per frame
++ * limits reported by every block in the pipeline.
++ *
++ * Return 0 if all formats match, or -EPIPE if at least one link is found with
++ * different formats on its two ends.
++ */
++static int isp_video_validate_pipeline(struct isp_pipeline *pipe)
++{
++ struct isp_device *isp = pipe->output->isp;
++ struct v4l2_mbus_framefmt fmt_source;
++ struct v4l2_mbus_framefmt fmt_sink;
++ struct media_entity_pad *pad;
++ struct v4l2_subdev *subdev;
++ int ret;
++
++ pipe->max_rate = pipe->l3_ick;
++
++ subdev = isp_video_remote_subdev(pipe->output, NULL);
++ if (subdev == NULL)
++ return -EPIPE;
++
++ while (1) {
++ /* Retrieve the sink format */
++ pad = &subdev->entity.pads[0];
++ if (pad->type != MEDIA_PAD_TYPE_INPUT)
++ break;
++
++ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, pad->index,
++ &fmt_sink, V4L2_SUBDEV_FORMAT_ACTIVE);
++ if (ret < 0 && ret != -ENOIOCTLCMD)
++ return -EPIPE;
++
++ /* Update the maximum frame rate */
++ if (subdev == &isp->isp_res.subdev)
++ ispresizer_max_rate(&isp->isp_res, &pipe->max_rate);
++
++ /* Retrieve the source format */
++ pad = media_entity_remote_pad(pad);
++ if (pad == NULL ||
++ pad->entity->type != MEDIA_ENTITY_TYPE_SUBDEV)
++ break;
++
++ subdev = media_entity_to_v4l2_subdev(pad->entity);
++
++ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, pad->index,
++ &fmt_source, V4L2_SUBDEV_FORMAT_ACTIVE);
++ if (ret < 0 && ret != -ENOIOCTLCMD)
++ return -EPIPE;
++
++ /* Check if the two ends match */
++ if (fmt_source.code != fmt_sink.code ||
++ fmt_source.width != fmt_sink.width ||
++ fmt_source.height != fmt_sink.height)
++ return -EPIPE;
++ }
++
++ return 0;
++}
++
++static int
++__isp_video_get_format(struct isp_video *video, struct v4l2_format *format)
++{
++ struct v4l2_mbus_framefmt fmt;
++ struct v4l2_subdev *subdev;
++ u32 pad;
++ int ret;
++
++ subdev = isp_video_remote_subdev(video, &pad);
++ if (subdev == NULL)
++ return -EINVAL;
++
++ mutex_lock(&video->mutex);
++ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, pad, &fmt,
++ V4L2_SUBDEV_FORMAT_ACTIVE);
++ if (ret == -ENOIOCTLCMD)
++ ret = -EINVAL;
++
++ mutex_unlock(&video->mutex);
++
++ if (ret)
++ return ret;
++
++ format->type = video->type;
++ isp_video_mbus_to_pix(video, &fmt, &format->fmt.pix);
++ return 0;
++}
++
++static int
++isp_video_check_format(struct isp_video *video, struct isp_video_fh *vfh)
++{
++ struct v4l2_format format;
++ int ret;
++
++ ret = __isp_video_get_format(video, &format);
++ if (ret < 0)
++ return ret;
++
++ if (vfh->format.fmt.pix.pixelformat != format.fmt.pix.pixelformat ||
++ vfh->format.fmt.pix.height != format.fmt.pix.height ||
++ vfh->format.fmt.pix.width != format.fmt.pix.width ||
++ vfh->format.fmt.pix.bytesperline != format.fmt.pix.bytesperline ||
++ vfh->format.fmt.pix.sizeimage != format.fmt.pix.sizeimage)
++ return -EINVAL;
++
++ return 0;
++}
++
++void isp_video_mbus_to_pix(const struct isp_video *video,
++ const struct v4l2_mbus_framefmt *mbus,
++ struct v4l2_pix_format *pix)
++{
++ memset(pix, 0, sizeof(*pix));
++ pix->width = mbus->width;
++ pix->height = mbus->height;
++
++ switch (mbus->code) {
++ case V4L2_MBUS_FMT_SGRBG10_1X10:
++ pix->pixelformat = V4L2_PIX_FMT_SGRBG10;
++ pix->bytesperline = pix->width * 2;
++ break;
++ case V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8:
++ pix->pixelformat = V4L2_PIX_FMT_SGRBG10DPCM8;
++ pix->bytesperline = pix->width;
++ break;
++ case V4L2_MBUS_FMT_YUYV16_1X16:
++ pix->pixelformat = V4L2_PIX_FMT_YUYV;
++ pix->bytesperline = pix->width * 2;
++ break;
++ case V4L2_MBUS_FMT_UYVY16_1X16:
++ default:
++ pix->pixelformat = V4L2_PIX_FMT_UYVY;
++ pix->bytesperline = pix->width * 2;
++ break;
++ }
++
++ if (video->alignment)
++ pix->bytesperline = ALIGN(pix->bytesperline, video->alignment);
++
++ pix->sizeimage = pix->bytesperline * pix->height;
++ pix->colorspace = mbus->colorspace;
++ pix->field = mbus->field;
++}
++EXPORT_SYMBOL_GPL(isp_video_mbus_to_pix);
++
++void isp_video_pix_to_mbus(const struct v4l2_pix_format *pix,
++ struct v4l2_mbus_framefmt *mbus)
++{
++ memset(mbus, 0, sizeof(*mbus));
++ mbus->width = pix->width;
++ mbus->height = pix->height;
++
++ switch (pix->pixelformat) {
++ case V4L2_PIX_FMT_SGRBG10:
++ mbus->code = V4L2_MBUS_FMT_SGRBG10_1X10;
++ break;
++ case V4L2_PIX_FMT_SGRBG10DPCM8:
++ mbus->code = V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8;
++ break;
++ case V4L2_PIX_FMT_YUYV:
++ mbus->code = V4L2_MBUS_FMT_YUYV16_1X16;
++ break;
++ case V4L2_PIX_FMT_UYVY:
++ default:
++ mbus->code = V4L2_MBUS_FMT_UYVY16_1X16;
++ break;
++ }
++
++ mbus->colorspace = pix->colorspace;
++ mbus->field = pix->field;
++}
++EXPORT_SYMBOL_GPL(isp_video_pix_to_mbus);
++
++/*
++ * Returns uncompressed mediabus pixelcode
++ *
++ * NOTE: In case the Format is not a known DPCM, It'll just return the
++ * exact same code
++ */
++enum v4l2_mbus_pixelcode
++isp_video_uncompressed_code(enum v4l2_mbus_pixelcode code)
++{
++ switch (code) {
++ case V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8:
++ return V4L2_MBUS_FMT_SBGGR10_1X10;
++ case V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8:
++ return V4L2_MBUS_FMT_SGRBG10_1X10;
++ case V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8:
++ return V4L2_MBUS_FMT_SRGGB10_1X10;
++ case V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8:
++ return V4L2_MBUS_FMT_SGBRG10_1X10;
++ default:
++ return code;
++ }
++}
++
++/* -----------------------------------------------------------------------------
++ * IOMMU management
++ */
++
++#define IOMMU_FLAG (IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_8)
++
++/*
++ * ispmmu_vmap - Wrapper for Virtual memory mapping of a scatter gather list
++ * @dev: Device pointer specific to the OMAP3 ISP.
++ * @sglist: Pointer to source Scatter gather list to allocate.
++ * @sglen: Number of elements of the scatter-gatter list.
++ *
++ * Returns a resulting mapped device address by the ISP MMU, or -ENOMEM if
++ * we ran out of memory.
++ */
++static dma_addr_t
++ispmmu_vmap(struct isp_device *isp, const struct scatterlist *sglist, int sglen)
++{
++ struct sg_table *sgt;
++ u32 da;
++
++ sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
++ if (sgt == NULL)
++ return -ENOMEM;
++
++ sgt->sgl = (struct scatterlist *)sglist;
++ sgt->nents = sglen;
++ sgt->orig_nents = sglen;
++
++ da = iommu_vmap(isp->iommu, 0, sgt, IOMMU_FLAG);
++ if (IS_ERR_VALUE(da))
++ kfree(sgt);
++
++ return da;
++}
++
++/*
++ * ispmmu_vunmap - Unmap a device address from the ISP MMU
++ * @dev: Device pointer specific to the OMAP3 ISP.
++ * @da: Device address generated from a ispmmu_vmap call.
++ */
++static void ispmmu_vunmap(struct isp_device *isp, dma_addr_t da)
++{
++ struct sg_table *sgt;
++
++ sgt = iommu_vunmap(isp->iommu, (u32)da);
++ kfree(sgt);
++}
++
++/* -----------------------------------------------------------------------------
++ * Video queue operations
++ */
++
++static void isp_video_queue_prepare(struct isp_video_queue *queue,
++ unsigned int *nbuffers, unsigned int *size)
++{
++ struct isp_video_fh *vfh =
++ container_of(queue, struct isp_video_fh, queue);
++ struct isp_video *video = vfh->video;
++
++ *size = vfh->format.fmt.pix.sizeimage;
++ if (*size == 0)
++ return;
++
++ *nbuffers = min(*nbuffers, video->capture_mem / PAGE_ALIGN(*size));
++}
++
++static void isp_video_buffer_cleanup(struct isp_video_buffer *buf)
++{
++ struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
++ struct isp_buffer *buffer = to_isp_buffer(buf);
++ struct isp_video *video = vfh->video;
++
++ if (buffer->isp_addr) {
++ ispmmu_vunmap(video->isp, buffer->isp_addr);
++ buffer->isp_addr = 0;
++ }
++}
++
++static int isp_video_buffer_prepare(struct isp_video_buffer *buf)
++{
++ struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
++ struct isp_buffer *buffer = to_isp_buffer(buf);
++ struct isp_video *video = vfh->video;
++ unsigned long addr;
++
++ addr = ispmmu_vmap(video->isp, buf->sglist, buf->sglen);
++ if (IS_ERR_VALUE(addr))
++ return -EIO;
++
++ if (!IS_ALIGNED(addr, 32)) {
++ dev_dbg(video->isp->dev, "Buffer address must be "
++ "aligned to 32 bytes boundary.\n");
++ ispmmu_vunmap(video->isp, buffer->isp_addr);
++ return -EINVAL;
++ }
++
++ buf->vbuf.bytesused = vfh->format.fmt.pix.sizeimage;
++ buffer->isp_addr = addr;
++ return 0;
++}
++
++/*
++ * isp_video_buffer_queue - Add buffer to streaming queue
++ * @buf: Video buffer
++ *
++ * In memory-to-memory mode, start streaming on the pipeline if buffers are
++ * queued on both the input and the output, if the pipeline isn't already busy.
++ * If the pipeline is busy, it will be restarted in the output module interrupt
++ * handler.
++ */
++static void isp_video_buffer_queue(struct isp_video_buffer *buf)
++{
++ struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
++ struct isp_buffer *buffer = to_isp_buffer(buf);
++ struct isp_video *video = vfh->video;
++ struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
++ enum isp_pipeline_state state;
++ unsigned long flags;
++ unsigned int empty;
++ unsigned int start;
++
++ empty = list_empty(&video->dmaqueue);
++ list_add_tail(&buffer->buffer.irqlist, &video->dmaqueue);
++
++ if (empty) {
++ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
++ state = ISP_PIPELINE_QUEUE_OUTPUT;
++ else
++ state = ISP_PIPELINE_QUEUE_INPUT;
++
++ spin_lock_irqsave(&pipe->lock, flags);
++ pipe->state |= state;
++ video->ops->queue(video, buffer);
++
++ start = isp_pipeline_ready(pipe);
++ if (start)
++ pipe->state |= ISP_PIPELINE_STREAM;
++ spin_unlock_irqrestore(&pipe->lock, flags);
++
++ if (start)
++ isp_pipeline_set_stream(pipe,
++ ISP_PIPELINE_STREAM_SINGLESHOT);
++ }
++}
++
++static const struct isp_video_queue_operations isp_video_queue_ops = {
++ .queue_prepare = &isp_video_queue_prepare,
++ .buffer_prepare = &isp_video_buffer_prepare,
++ .buffer_queue = &isp_video_buffer_queue,
++ .buffer_cleanup = &isp_video_buffer_cleanup,
++};
++
++/*
++ * isp_video_buffer_next - Complete the current buffer and return the next one
++ * @video: ISP video object
++ * @error: Whether an error occured during capture
++ *
++ * Remove the current video buffer from the DMA queue and fill its timestamp,
++ * field count and state fields before waking up its completion handler.
++ *
++ * The buffer state is set to VIDEOBUF_DONE if no error occured (@error is 0)
++ * or VIDEOBUF_ERROR otherwise (@error is non-zero).
++ *
++ * The DMA queue is expected to contain at least one buffer.
++ *
++ * Return a pointer to the next buffer in the DMA queue, or NULL if the queue is
++ * empty.
++ */
++struct isp_buffer *isp_video_buffer_next(struct isp_video *video,
++ unsigned int error)
++{
++ struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
++ struct isp_video_queue *queue = video->queue;
++ enum isp_pipeline_state state;
++ struct isp_video_buffer *buf;
++ unsigned long flags;
++ struct timespec ts;
++
++ spin_lock_irqsave(&queue->irqlock, flags);
++ BUG_ON(list_empty(&video->dmaqueue));
++ buf = list_first_entry(&video->dmaqueue, struct isp_video_buffer,
++ irqlist);
++ list_del(&buf->irqlist);
++ spin_unlock_irqrestore(&queue->irqlock, flags);
++
++ ktime_get_ts(&ts);
++ buf->vbuf.timestamp.tv_sec = ts.tv_sec;
++ buf->vbuf.timestamp.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
++
++ buf->vbuf.sequence = atomic_inc_return(&video->sequence);
++ buf->state = error ? ISP_BUF_STATE_ERROR : ISP_BUF_STATE_DONE;
++
++ wake_up(&buf->wait);
++
++ if (list_empty(&video->dmaqueue)) {
++ if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
++ state = ISP_PIPELINE_QUEUE_OUTPUT
++ | ISP_PIPELINE_STREAM;
++ else
++ state = ISP_PIPELINE_QUEUE_INPUT
++ | ISP_PIPELINE_STREAM;
++
++ spin_lock_irqsave(&pipe->lock, flags);
++ pipe->state &= ~state;
++ spin_unlock_irqrestore(&pipe->lock, flags);
++ return NULL;
++ }
++
++ if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->input != NULL) {
++ spin_lock_irqsave(&pipe->lock, flags);
++ pipe->state &= ~ISP_PIPELINE_STREAM;
++ spin_unlock_irqrestore(&pipe->lock, flags);
++ }
++
++ buf = list_first_entry(&video->dmaqueue, struct isp_video_buffer,
++ irqlist);
++ buf->state = ISP_BUF_STATE_ACTIVE;
++ return to_isp_buffer(buf);
++}
++
++/* -----------------------------------------------------------------------------
++ * V4L2 ioctls
++ */
++
++static int
++isp_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
++{
++ struct isp_video *video = video_drvdata(file);
++
++ strlcpy(cap->driver, ISP_VIDEO_DRIVER_NAME, sizeof(cap->driver));
++ strlcpy(cap->card, video->video.name, sizeof(cap->card));
++ strlcpy(cap->bus_info, "media", sizeof(cap->bus_info));
++ cap->version = ISP_VIDEO_DRIVER_VERSION;
++
++ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
++ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
++ else
++ cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
++
++ return 0;
++}
++
++static int
++isp_video_get_format(struct file *file, void *fh, struct v4l2_format *format)
++{
++ struct isp_video_fh *vfh = to_isp_video_fh(fh);
++ struct isp_video *video = video_drvdata(file);
++
++ if (format->type != video->type)
++ return -EINVAL;
++
++ mutex_lock(&video->mutex);
++ *format = vfh->format;
++ mutex_unlock(&video->mutex);
++
++ return 0;
++}
++
++static int
++isp_video_set_format(struct file *file, void *fh, struct v4l2_format *format)
++{
++ struct isp_video_fh *vfh = to_isp_video_fh(fh);
++ struct isp_video *video = video_drvdata(file);
++ struct v4l2_mbus_framefmt fmt;
++
++ if (format->type != video->type)
++ return -EINVAL;
++
++ mutex_lock(&video->mutex);
++
++ /* Fill the bytesperline and sizeimage fields by converting to media bus
++ * format and back to pixel format.
++ */
++ isp_video_pix_to_mbus(&format->fmt.pix, &fmt);
++ isp_video_mbus_to_pix(video, &fmt, &format->fmt.pix);
++
++ vfh->format = *format;
++
++ mutex_unlock(&video->mutex);
++ return 0;
++}
++
++static int
++isp_video_try_format(struct file *file, void *fh, struct v4l2_format *format)
++{
++ struct isp_video *video = video_drvdata(file);
++ struct v4l2_mbus_framefmt fmt;
++ struct v4l2_subdev *subdev;
++ u32 pad;
++ int ret;
++
++ if (format->type != video->type)
++ return -EINVAL;
++
++ subdev = isp_video_remote_subdev(video, &pad);
++ if (subdev == NULL)
++ return -EINVAL;
++
++ isp_video_pix_to_mbus(&format->fmt.pix, &fmt);
++
++ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, pad, &fmt,
++ V4L2_SUBDEV_FORMAT_ACTIVE);
++ if (ret)
++ return ret == -ENOIOCTLCMD ? -EINVAL : ret;
++
++ isp_video_mbus_to_pix(video, &fmt, &format->fmt.pix);
++ return 0;
++}
++
++static int
++isp_video_cropcap(struct file *file, void *fh, struct v4l2_cropcap *cropcap)
++{
++ struct isp_video *video = video_drvdata(file);
++ struct v4l2_subdev *subdev;
++ int ret;
++
++ subdev = isp_video_remote_subdev(video, NULL);
++ if (subdev == NULL)
++ return -EINVAL;
++
++ mutex_lock(&video->mutex);
++ ret = v4l2_subdev_call(subdev, video, cropcap, cropcap);
++ mutex_unlock(&video->mutex);
++
++ return ret == -ENOIOCTLCMD ? -EINVAL : ret;
++}
++
++static int
++isp_video_get_crop(struct file *file, void *fh, struct v4l2_crop *crop)
++{
++ struct isp_video *video = video_drvdata(file);
++ struct v4l2_subdev *subdev;
++ struct v4l2_mbus_framefmt format;
++ u32 pad;
++ int ret;
++
++ subdev = isp_video_remote_subdev(video, &pad);
++ if (subdev == NULL)
++ return -EINVAL;
++
++ /* Try the get crop operation first and fallback to get format if not
++ * implemented.
++ */
++ ret = v4l2_subdev_call(subdev, video, g_crop, crop);
++ if (ret != -ENOIOCTLCMD)
++ return ret;
++
++ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, pad, &format,
++ V4L2_SUBDEV_FORMAT_ACTIVE);
++ if (ret < 0)
++ return ret == -ENOIOCTLCMD ? -EINVAL : ret;
++
++ crop->c.left = 0;
++ crop->c.top = 0;
++ crop->c.width = format.width;
++ crop->c.height = format.height;
++
++ return 0;
++}
++
++static int
++isp_video_set_crop(struct file *file, void *fh, struct v4l2_crop *crop)
++{
++ struct isp_video *video = video_drvdata(file);
++ struct v4l2_subdev *subdev;
++ int ret;
++
++ subdev = isp_video_remote_subdev(video, NULL);
++ if (subdev == NULL)
++ return -EINVAL;
++
++ mutex_lock(&video->mutex);
++ ret = v4l2_subdev_call(subdev, video, s_crop, crop);
++ mutex_unlock(&video->mutex);
++
++ return ret == -ENOIOCTLCMD ? -EINVAL : ret;
++}
++
++static int
++isp_video_get_param(struct file *file, void *fh, struct v4l2_streamparm *a)
++{
++ struct isp_video_fh *vfh = to_isp_video_fh(fh);
++ struct isp_video *video = video_drvdata(file);
++
++ if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
++ video->type != a->type)
++ return -EINVAL;
++
++ memset(a, 0, sizeof(*a));
++ a->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
++ a->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
++ a->parm.output.timeperframe = vfh->timeperframe;
++
++ return 0;
++}
++
++static int
++isp_video_set_param(struct file *file, void *fh, struct v4l2_streamparm *a)
++{
++ struct isp_video_fh *vfh = to_isp_video_fh(fh);
++ struct isp_video *video = video_drvdata(file);
++
++ if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
++ video->type != a->type)
++ return -EINVAL;
++
++ if (a->parm.output.timeperframe.denominator == 0)
++ a->parm.output.timeperframe.denominator = 1;
++
++ vfh->timeperframe = a->parm.output.timeperframe;
++
++ return 0;
++}
++
++static int
++isp_video_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *rb)
++{
++ struct isp_video_fh *vfh = to_isp_video_fh(fh);
++
++ return isp_video_queue_reqbufs(&vfh->queue, rb);
++}
++
++static int
++isp_video_querybuf(struct file *file, void *fh, struct v4l2_buffer *b)
++{
++ struct isp_video_fh *vfh = to_isp_video_fh(fh);
++
++ return isp_video_queue_querybuf(&vfh->queue, b);
++}
++
++static int
++isp_video_qbuf(struct file *file, void *fh, struct v4l2_buffer *b)
++{
++ struct isp_video_fh *vfh = to_isp_video_fh(fh);
++
++ return isp_video_queue_qbuf(&vfh->queue, b);
++}
++
++static int
++isp_video_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
++{
++ struct isp_video_fh *vfh = to_isp_video_fh(fh);
++
++ return isp_video_queue_dqbuf(&vfh->queue, b,
++ file->f_flags & O_NONBLOCK);
++}
++
++/*
++ * Stream management
++ *
++ * Every ISP pipeline has a single input and a single output. The input can be
++ * either a sensor or a video node. The output is always a video node.
++ *
++ * As every pipeline has an output video node, the ISP video objects at the
++ * pipeline output stores the pipeline state. It tracks the streaming state of
++ * both the input and output, as well as the availability of buffers.
++ *
++ * In sensor-to-memory mode, frames are always available at the pipeline input.
++ * Starting the sensor usually requires I2C transfers and must be done in
++ * interruptible context. The pipeline is started and stopped synchronously
++ * to the stream on/off commands. All modules in the pipeline will get their
++ * subdev set stream handler called. The module at the end of the pipeline must
++ * delay starting the hardware until buffers are available at its output.
++ *
++ * In memory-to-memory mode, starting/stopping the stream requires
++ * synchronization between the input and output. ISP modules can't be stopped
++ * in the middle of a frame, and at least some of the modules seem to become
++ * busy as soon as they're started, even if they don't receive a frame start
++ * event. For that reason frames need to be processed in single-shot mode. The
++ * driver needs to wait until a frame is completely processed and written to
++ * memory before restarting the pipeline for the next frame. Pipelined
++ * processing might be possible but requires more testing.
++ *
++ * Stream start must be delayed until buffers are available at both the input
++ * and output. The pipeline must be started in the videobuf queue callback with
++ * the buffers queue spinlock held. The modules subdev set stream operation must
++ * not sleep.
++ */
++static int
++isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
++{
++ struct isp_video_fh *vfh = to_isp_video_fh(fh);
++ struct isp_video *video = video_drvdata(file);
++ enum isp_pipeline_state state;
++ struct isp_pipeline *pipe;
++ struct isp_video *far_end;
++ unsigned int streaming;
++ unsigned long flags;
++ int ret;
++
++ if (type != video->type)
++ return -EINVAL;
++
++ mutex_lock(&video->stream_lock);
++
++ mutex_lock(&vfh->queue.lock);
++ streaming = vfh->queue.streaming;
++ mutex_unlock(&vfh->queue.lock);
++
++ if (streaming) {
++ mutex_unlock(&video->stream_lock);
++ return -EBUSY;
++ }
++
++ /* Lock the pipeline. No link touching an entity in the pipeline can
++ * be activated or deactivated once the pipeline is locked.
++ */
++ pipe = video->video.entity.pipe
++ ? to_isp_pipeline(&video->video.entity) : &video->pipe;
++ media_entity_graph_lock(&video->video.entity, &pipe->pipe);
++
++ /* Verify that the currently configured format matches the output of
++ * the connected subdev.
++ */
++ ret = isp_video_check_format(video, vfh);
++ if (ret < 0)
++ goto error;
++
++ /* Find the ISP video node connected at the far end of the pipeline and
++ * update the pipeline.
++ */
++ far_end = isp_video_far_end(video);
++
++ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
++ state = ISP_PIPELINE_STREAM_OUTPUT | ISP_PIPELINE_IDLE_OUTPUT;
++ pipe->input = far_end;
++ pipe->output = video;
++ } else {
++ if (far_end == NULL) {
++ ret = -EPIPE;
++ goto error;
++ }
++
++ state = ISP_PIPELINE_STREAM_INPUT | ISP_PIPELINE_IDLE_INPUT;
++ pipe->input = video;
++ pipe->output = far_end;
++ }
++
++ /* Make sure the interconnect clock runs fast enough.
++ *
++ * Formula from: resource34xx.c set_opp()
++ * If MPU freq is above 500MHz, make sure the interconnect
++ * is at 100Mhz or above.
++ * throughput in KiB/s for 100 Mhz = 100 * 1000 * 4.
++ *
++ * We want to be fast enough then set OCP clock to be max as
++ * possible, in that case 185Mhz then:
++ * throughput in KiB/s for 185Mhz = 185 * 1000 * 4 = 740000 KiB/s
++ */
++ omap_pm_set_min_bus_tput(video->isp->dev, OCP_INITIATOR_AGENT, 740000);
++ pipe->l3_ick = clk_get_rate(video->isp->clock[ISP_CLK_L3_ICK]);
++
++ /* Validate the pipeline and update its state. */
++ ret = isp_video_validate_pipeline(pipe);
++ if (ret < 0)
++ goto error;
++
++ spin_lock_irqsave(&pipe->lock, flags);
++ pipe->state &= ~ISP_PIPELINE_STREAM;
++ pipe->state |= state;
++ spin_unlock_irqrestore(&pipe->lock, flags);
++
++ /* Set the maximum time per frame as the value requested by userspace.
++ * This is a soft limit that can be overridden if the hardware doesn't
++ * support the request limit.
++ */
++ if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
++ pipe->max_timeperframe = vfh->timeperframe;
++
++ video->queue = &vfh->queue;
++ INIT_LIST_HEAD(&video->dmaqueue);
++ atomic_set(&video->sequence, -1);
++
++ ret = isp_video_queue_streamon(&vfh->queue);
++ if (ret < 0)
++ goto error;
++
++ /* In sensor-to-memory mode, the stream can be started synchronously
++ * to the stream on command. In memory-to-memory mode, it will be
++ * started when buffers are queued on both the input and output.
++ */
++ if (pipe->input == NULL) {
++ ret = isp_pipeline_set_stream(pipe,
++ ISP_PIPELINE_STREAM_CONTINUOUS);
++ if (ret < 0)
++ goto error;
++ }
++
++error:
++ if (ret < 0) {
++ isp_video_queue_streamoff(&vfh->queue);
++ omap_pm_set_min_bus_tput(video->isp->dev,
++ OCP_INITIATOR_AGENT, 0);
++ media_entity_graph_unlock(&video->video.entity);
++ video->queue = NULL;
++ }
++
++ mutex_unlock(&video->stream_lock);
++ return ret;
++}
++
++static int
++isp_video_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
++{
++ struct isp_video_fh *vfh = to_isp_video_fh(fh);
++ struct isp_video *video = video_drvdata(file);
++ struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
++ enum isp_pipeline_state state;
++ unsigned int streaming;
++ unsigned long flags;
++
++ if (type != video->type)
++ return -EINVAL;
++
++ mutex_lock(&video->stream_lock);
++
++ /* Make sure we're not streaming yet. */
++ mutex_lock(&vfh->queue.lock);
++ streaming = vfh->queue.streaming;
++ mutex_unlock(&vfh->queue.lock);
++
++ if (!streaming)
++ goto done;
++
++ /* Update the pipeline state. */
++ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
++ state = ISP_PIPELINE_STREAM_OUTPUT
++ | ISP_PIPELINE_QUEUE_OUTPUT;
++ else
++ state = ISP_PIPELINE_STREAM_INPUT
++ | ISP_PIPELINE_QUEUE_INPUT;
++
++ spin_lock_irqsave(&pipe->lock, flags);
++ pipe->state &= ~state;
++ spin_unlock_irqrestore(&pipe->lock, flags);
++
++ /* Stop the stream. */
++ isp_pipeline_set_stream(pipe, ISP_PIPELINE_STREAM_STOPPED);
++ isp_video_queue_streamoff(&vfh->queue);
++ video->queue = NULL;
++
++ omap_pm_set_min_bus_tput(video->isp->dev, OCP_INITIATOR_AGENT, 0);
++ media_entity_graph_unlock(&video->video.entity);
++
++done:
++ mutex_unlock(&video->stream_lock);
++ return 0;
++}
++
++static int
++isp_video_enum_input(struct file *file, void *fh, struct v4l2_input *input)
++{
++ if (input->index > 0)
++ return -EINVAL;
++
++ strlcpy(input->name, "camera", sizeof(input->name));
++ input->type = V4L2_INPUT_TYPE_CAMERA;
++
++ return 0;
++}
++
++static int
++isp_video_g_input(struct file *file, void *fh, unsigned int *input)
++{
++ *input = 0;
++
++ return 0;
++}
++
++static int
++isp_video_s_input(struct file *file, void *fh, unsigned int input)
++{
++ return input == 0 ? 0 : -EINVAL;
++}
++
++static const struct v4l2_ioctl_ops isp_video_ioctl_ops = {
++ .vidioc_querycap = isp_video_querycap,
++ .vidioc_g_fmt_vid_cap = isp_video_get_format,
++ .vidioc_s_fmt_vid_cap = isp_video_set_format,
++ .vidioc_try_fmt_vid_cap = isp_video_try_format,
++ .vidioc_g_fmt_vid_out = isp_video_get_format,
++ .vidioc_s_fmt_vid_out = isp_video_set_format,
++ .vidioc_try_fmt_vid_out = isp_video_try_format,
++ .vidioc_cropcap = isp_video_cropcap,
++ .vidioc_g_crop = isp_video_get_crop,
++ .vidioc_s_crop = isp_video_set_crop,
++ .vidioc_g_parm = isp_video_get_param,
++ .vidioc_s_parm = isp_video_set_param,
++ .vidioc_reqbufs = isp_video_reqbufs,
++ .vidioc_querybuf = isp_video_querybuf,
++ .vidioc_qbuf = isp_video_qbuf,
++ .vidioc_dqbuf = isp_video_dqbuf,
++ .vidioc_streamon = isp_video_streamon,
++ .vidioc_streamoff = isp_video_streamoff,
++ .vidioc_enum_input = isp_video_enum_input,
++ .vidioc_g_input = isp_video_g_input,
++ .vidioc_s_input = isp_video_s_input,
++};
++
++/* -----------------------------------------------------------------------------
++ * V4L2 file operations
++ */
++
++static int isp_video_open(struct file *file)
++{
++ struct isp_video *video = video_drvdata(file);
++ struct isp_video_fh *handle;
++ int ret = 0;
++
++ handle = kzalloc(sizeof(*handle), GFP_KERNEL);
++ if (handle == NULL)
++ return -ENOMEM;
++
++ v4l2_fh_init(&handle->vfh, &video->video);
++ v4l2_fh_add(&handle->vfh);
++
++ /* If this is the first user, initialise the pipeline. */
++ if (isp_get(video->isp) == NULL) {
++ ret = -EBUSY;
++ goto done;
++ }
++
++ isp_video_queue_init(&handle->queue, video->type, &isp_video_queue_ops,
++ video->isp->dev, sizeof(struct isp_buffer));
++
++ memset(&handle->format, 0, sizeof(handle->format));
++ handle->format.type = video->type;
++ handle->timeperframe.denominator = 1;
++
++ handle->video = video;
++ file->private_data = &handle->vfh;
++
++done:
++ if (ret < 0) {
++ v4l2_fh_del(&handle->vfh);
++ kfree(handle);
++ }
++
++ return ret;
++}
++
++static int isp_video_release(struct file *file)
++{
++ struct isp_video *video = video_drvdata(file);
++ struct v4l2_fh *vfh = file->private_data;
++ struct isp_video_fh *handle = to_isp_video_fh(vfh);
++
++ /* Disable streaming and free the buffers queue resources. */
++ isp_video_streamoff(file, vfh, video->type);
++
++ mutex_lock(&handle->queue.lock);
++ isp_video_queue_cleanup(&handle->queue);
++ mutex_unlock(&handle->queue.lock);
++
++ /* Release the file handle. */
++ v4l2_fh_del(vfh);
++ kfree(handle);
++ file->private_data = NULL;
++
++ isp_put(video->isp);
++
++ return 0;
++}
++
++static unsigned int isp_video_poll(struct file *file, poll_table *wait)
++{
++ struct isp_video_fh *vfh = to_isp_video_fh(file->private_data);
++ struct isp_video_queue *queue = &vfh->queue;
++
++ return isp_video_queue_poll(queue, file, wait);
++}
++
++static int isp_video_mmap(struct file *file, struct vm_area_struct *vma)
++{
++ struct isp_video_fh *vfh = to_isp_video_fh(file->private_data);
++
++ return isp_video_queue_mmap(&vfh->queue, vma);
++}
++
++static struct v4l2_file_operations isp_video_fops = {
++ .owner = THIS_MODULE,
++ .unlocked_ioctl = video_ioctl2,
++ .open = isp_video_open,
++ .release = isp_video_release,
++ .poll = isp_video_poll,
++ .mmap = isp_video_mmap,
++};
++
++/* -----------------------------------------------------------------------------
++ * ISP video core
++ */
++
++static const struct isp_video_operations isp_video_dummy_ops = {
++};
++
++int isp_video_init(struct isp_video *video, const char *name)
++{
++ const char *direction;
++ int ret;
++
++ switch (video->type) {
++ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
++ direction = "output";
++ video->pad.type = MEDIA_PAD_TYPE_INPUT;
++ break;
++ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
++ direction = "input";
++ video->pad.type = MEDIA_PAD_TYPE_OUTPUT;
++ break;
++
++ default:
++ return -EINVAL;
++ }
++
++ ret = media_entity_init(&video->video.entity, 1, &video->pad, 0);
++ if (ret < 0)
++ return ret;
++
++ mutex_init(&video->mutex);
++ atomic_set(&video->active, 0);
++
++ spin_lock_init(&video->pipe.lock);
++ mutex_init(&video->stream_lock);
++
++ /* Initialize the video device. */
++ if (video->ops == NULL)
++ video->ops = &isp_video_dummy_ops;
++
++ video->video.fops = &isp_video_fops;
++ snprintf(video->video.name, sizeof(video->video.name),
++ "OMAP3 ISP %s %s", name, direction);
++ video->video.vfl_type = VFL_TYPE_GRABBER;
++ video->video.release = video_device_release_empty;
++ video->video.ioctl_ops = &isp_video_ioctl_ops;
++
++ video_set_drvdata(&video->video, video);
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(isp_video_init);
++
++int isp_video_register(struct isp_video *video, struct v4l2_device *vdev)
++{
++ int ret;
++
++ video->video.v4l2_dev = vdev;
++
++ ret = video_register_device(&video->video, VFL_TYPE_GRABBER, -1);
++ if (ret < 0)
++ printk(KERN_ERR "%s: could not register video device (%d)\n",
++ __func__, ret);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(isp_video_register);
++
++void isp_video_unregister(struct isp_video *video)
++{
++ if (video_is_registered(&video->video)) {
++ media_entity_cleanup(&video->video.entity);
++ video_unregister_device(&video->video);
++ }
++}
++EXPORT_SYMBOL_GPL(isp_video_unregister);
++
+diff --git a/drivers/media/video/isp/ispvideo.h b/drivers/media/video/isp/ispvideo.h
+new file mode 100644
+index 0000000..97ad52f
+--- /dev/null
++++ b/drivers/media/video/isp/ispvideo.h
+@@ -0,0 +1,144 @@
++#ifndef __ISP_VIDEO_H
++#define __ISP_VIDEO_H
++
++#include <linux/version.h>
++#include <media/media-entity.h>
++#include <media/v4l2-dev.h>
++#include <media/v4l2-fh.h>
++
++#include "ispqueue.h"
++
++#define ISP_VIDEO_DRIVER_NAME "ispvideo"
++#define ISP_VIDEO_DRIVER_VERSION KERNEL_VERSION(0, 0, 1)
++
++struct isp_device;
++struct isp_video;
++struct v4l2_mbus_framefmt;
++struct v4l2_pix_format;
++
++enum isp_pipeline_stream_state {
++ ISP_PIPELINE_STREAM_STOPPED = 0,
++ ISP_PIPELINE_STREAM_CONTINUOUS = 1,
++ ISP_PIPELINE_STREAM_SINGLESHOT = 2,
++};
++
++enum isp_pipeline_state {
++ /* The stream has been started on the input video node. */
++ ISP_PIPELINE_STREAM_INPUT = 1,
++ /* The stream has been started on the output video node. */
++ ISP_PIPELINE_STREAM_OUTPUT = 2,
++ /* At least one buffer is queued on the input video node. */
++ ISP_PIPELINE_QUEUE_INPUT = 4,
++ /* At least one buffer is queued on the output video node. */
++ ISP_PIPELINE_QUEUE_OUTPUT = 8,
++ /* The input entity is idle, ready to be started. */
++ ISP_PIPELINE_IDLE_INPUT = 16,
++ /* The output entity is idle, ready to be started. */
++ ISP_PIPELINE_IDLE_OUTPUT = 32,
++ /* The pipeline is currently streaming. */
++ ISP_PIPELINE_STREAM = 64,
++};
++
++struct isp_pipeline {
++ struct media_pipeline pipe;
++ spinlock_t lock;
++ unsigned int state;
++ struct isp_video *input;
++ struct isp_video *output;
++ unsigned long l3_ick;
++ unsigned int max_rate;
++ struct v4l2_fract max_timeperframe;
++};
++
++#define to_isp_pipeline(__e) \
++ container_of((__e)->pipe, struct isp_pipeline, pipe)
++
++static inline int isp_pipeline_ready(struct isp_pipeline *pipe)
++{
++ return pipe->state == (ISP_PIPELINE_STREAM_INPUT |
++ ISP_PIPELINE_STREAM_OUTPUT |
++ ISP_PIPELINE_QUEUE_INPUT |
++ ISP_PIPELINE_QUEUE_OUTPUT |
++ ISP_PIPELINE_IDLE_INPUT |
++ ISP_PIPELINE_IDLE_OUTPUT);
++}
++
++/*
++ * struct isp_buffer - ISP buffer
++ * @buffer: ISP video buffer
++ * @isp_addr: MMU mapped address (a.k.a. device address) of the buffer.
++ */
++struct isp_buffer {
++ struct isp_video_buffer buffer;
++ dma_addr_t isp_addr;
++};
++
++#define to_isp_buffer(buf) container_of(buf, struct isp_buffer, buffer)
++
++/*
++ * struct isp_video_operations - ISP video operations
++ * @queue: Resume streaming when a buffer is queued. Called on VIDIOC_QBUF
++ * if there was no buffer previously queued.
++ */
++struct isp_video_operations {
++ int(*queue)(struct isp_video *video, struct isp_buffer *buffer);
++};
++
++struct isp_video {
++ struct video_device video;
++ enum v4l2_buf_type type;
++ struct media_entity_pad pad;
++
++ struct mutex mutex;
++ atomic_t active;
++
++ struct isp_device *isp;
++
++ unsigned int capture_mem;
++ unsigned int alignment;
++
++ /* Pipeline state */
++ struct isp_pipeline pipe;
++ struct mutex stream_lock;
++
++ /* Video buffers queue */
++ struct isp_video_queue *queue;
++ struct list_head dmaqueue;
++ atomic_t sequence;
++
++ const struct isp_video_operations *ops;
++};
++
++#define to_isp_video(vdev) container_of(vdev, struct isp_video, video)
++
++struct isp_video_fh {
++ struct v4l2_fh vfh;
++ struct isp_video *video;
++ struct isp_video_queue queue;
++ struct v4l2_format format;
++ struct v4l2_fract timeperframe;
++};
++
++#define to_isp_video_fh(fh) container_of(fh, struct isp_video_fh, vfh)
++#define isp_video_queue_to_isp_video_fh(q) \
++ container_of(q, struct isp_video_fh, queue)
++
++extern int isp_video_init(struct isp_video *video, const char *name);
++extern int isp_video_register(struct isp_video *video,
++ struct v4l2_device *vdev);
++extern void isp_video_unregister(struct isp_video *video);
++extern struct isp_buffer *isp_video_buffer_next(struct isp_video *video,
++ unsigned int error);
++
++extern struct media_entity_pad *isp_video_remote_pad(struct isp_video *video);
++extern void isp_video_mbus_to_pix(const struct isp_video *video,
++ const struct v4l2_mbus_framefmt *mbus,
++ struct v4l2_pix_format *pix);
++extern void isp_video_pix_to_mbus(const struct v4l2_pix_format *pix,
++ struct v4l2_mbus_framefmt *mbus);
++
++extern enum v4l2_mbus_pixelcode
++isp_video_uncompressed_code(enum v4l2_mbus_pixelcode code);
++
++#endif /* __ISP_VIDEO_H */
++
+diff --git a/drivers/media/video/isp/luma_enhance_table.h b/drivers/media/video/isp/luma_enhance_table.h
+new file mode 100644
+index 0000000..99c8b05
+--- /dev/null
++++ b/drivers/media/video/isp/luma_enhance_table.h
+@@ -0,0 +1,144 @@
++/*
++ * luma_enhance_table.h
++ *
++ * Luminance Enhancement table values for TI's OMAP3 Camera ISP
++ *
++ * Copyright (C) 2009 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++1047552,
++1047552,
++1047552,
++1047552,
++1047552,
++1047552,
++1047552,
++1047552,
++1047552,
++1047552,
++1047552,
++1047552,
++1047552,
++1047552,
++1047552,
++1047552,
++1047552,
++1047552,
++1047552,
++1047552,
++1047552,
++1047552,
++1047552,
++1047552,
++1047552,
++1047552,
++1047552,
++1047552,
++1048575,
++1047551,
++1046527,
++1045503,
++1044479,
++1043455,
++1042431,
++1041407,
++1040383,
++1039359,
++1038335,
++1037311,
++1036287,
++1035263,
++1034239,
++1033215,
++1032191,
++1031167,
++1030143,
++1028096,
++1028096,
++1028096,
++1028096,
++1028096,
++1028096,
++1028096,
++1028096,
++1028096,
++1028096,
++1028100,
++1032196,
++1036292,
++1040388,
++1044484,
++0,
++0,
++0,
++5,
++5125,
++10245,
++15365,
++20485,
++25605,
++30720,
++30720,
++30720,
++30720,
++30720,
++30720,
++30720,
++30720,
++30720,
++30720,
++30720,
++31743,
++30719,
++29695,
++28671,
++27647,
++26623,
++25599,
++24575,
++23551,
++22527,
++21503,
++20479,
++19455,
++18431,
++17407,
++16383,
++15359,
++14335,
++13311,
++12287,
++11263,
++10239,
++9215,
++8191,
++7167,
++6143,
++5119,
++4095,
++3071,
++1024,
++1024,
++1024,
++1024,
++1024,
++1024,
++1024,
++1024,
++1024,
++1024,
++1024,
++1024,
++1024,
++1024,
++1024,
++1024,
++1024
+diff --git a/drivers/media/video/isp/noise_filter_table.h b/drivers/media/video/isp/noise_filter_table.h
+new file mode 100644
+index 0000000..7345f90
+--- /dev/null
++++ b/drivers/media/video/isp/noise_filter_table.h
+@@ -0,0 +1,79 @@
++/*
++ * noise_filter_table.h
++ *
++ * Noise Filter Table values for TI's OMAP3 Camera ISP
++ *
++ * Copyright (C) 2009 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++16,
++16,
++16,
++16,
++16,
++16,
++16,
++16,
++16,
++16,
++16,
++16,
++16,
++16,
++16,
++16,
++16,
++16,
++16,
++16,
++16,
++16,
++16,
++16,
++16,
++16,
++16,
++16,
++16,
++16,
++16,
++16,
++31,
++31,
++31,
++31,
++31,
++31,
++31,
++31,
++31,
++31,
++31,
++31,
++31,
++31,
++31,
++31,
++31,
++31,
++31,
++31,
++31,
++31,
++31,
++31,
++31,
++31,
++31,
++31,
++31,
++31,
++31,
++31
+diff --git a/drivers/media/video/isp/redgamma_table.h b/drivers/media/video/isp/redgamma_table.h
+new file mode 100644
+index 0000000..ad0232a
+--- /dev/null
++++ b/drivers/media/video/isp/redgamma_table.h
+@@ -0,0 +1,1040 @@
++/*
++ * redgamma_table.h
++ *
++ * Gamma Table values for RED for TI's OMAP3 Camera ISP
++ *
++ * Copyright (C) 2009 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++0,
++0,
++1,
++2,
++3,
++3,
++4,
++5,
++6,
++8,
++10,
++12,
++14,
++16,
++18,
++20,
++22,
++23,
++25,
++26,
++28,
++29,
++31,
++32,
++34,
++35,
++36,
++37,
++39,
++40,
++41,
++42,
++43,
++44,
++45,
++46,
++47,
++48,
++49,
++50,
++51,
++52,
++52,
++53,
++54,
++55,
++56,
++57,
++58,
++59,
++60,
++61,
++62,
++63,
++63,
++64,
++65,
++66,
++66,
++67,
++68,
++69,
++69,
++70,
++71,
++72,
++72,
++73,
++74,
++75,
++75,
++76,
++77,
++78,
++78,
++79,
++80,
++81,
++81,
++82,
++83,
++84,
++84,
++85,
++86,
++87,
++88,
++88,
++89,
++90,
++91,
++91,
++92,
++93,
++94,
++94,
++95,
++96,
++97,
++97,
++98,
++98,
++99,
++99,
++100,
++100,
++101,
++101,
++102,
++103,
++104,
++104,
++105,
++106,
++107,
++108,
++108,
++109,
++110,
++111,
++111,
++112,
++113,
++114,
++114,
++115,
++116,
++117,
++117,
++118,
++119,
++119,
++120,
++120,
++121,
++121,
++122,
++122,
++123,
++123,
++124,
++124,
++125,
++125,
++126,
++126,
++127,
++127,
++128,
++128,
++129,
++129,
++130,
++130,
++131,
++131,
++132,
++132,
++133,
++133,
++134,
++134,
++135,
++135,
++136,
++136,
++137,
++137,
++138,
++138,
++139,
++139,
++140,
++140,
++141,
++141,
++142,
++142,
++143,
++143,
++144,
++144,
++145,
++145,
++146,
++146,
++147,
++147,
++148,
++148,
++149,
++149,
++150,
++150,
++151,
++151,
++152,
++152,
++153,
++153,
++153,
++153,
++154,
++154,
++154,
++154,
++155,
++155,
++156,
++156,
++157,
++157,
++158,
++158,
++158,
++159,
++159,
++159,
++160,
++160,
++160,
++161,
++161,
++162,
++162,
++163,
++163,
++164,
++164,
++164,
++164,
++165,
++165,
++165,
++165,
++166,
++166,
++167,
++167,
++168,
++168,
++169,
++169,
++170,
++170,
++170,
++170,
++171,
++171,
++171,
++171,
++172,
++172,
++173,
++173,
++174,
++174,
++175,
++175,
++176,
++176,
++176,
++176,
++177,
++177,
++177,
++177,
++178,
++178,
++178,
++178,
++179,
++179,
++179,
++179,
++180,
++180,
++180,
++180,
++181,
++181,
++181,
++181,
++182,
++182,
++182,
++182,
++183,
++183,
++183,
++183,
++184,
++184,
++184,
++184,
++185,
++185,
++185,
++185,
++186,
++186,
++186,
++186,
++187,
++187,
++187,
++187,
++188,
++188,
++188,
++188,
++189,
++189,
++189,
++189,
++190,
++190,
++190,
++190,
++191,
++191,
++191,
++191,
++192,
++192,
++192,
++192,
++193,
++193,
++193,
++193,
++194,
++194,
++194,
++194,
++195,
++195,
++195,
++195,
++196,
++196,
++196,
++196,
++197,
++197,
++197,
++197,
++198,
++198,
++198,
++198,
++199,
++199,
++199,
++199,
++200,
++200,
++200,
++200,
++201,
++201,
++201,
++201,
++202,
++202,
++202,
++203,
++203,
++203,
++203,
++204,
++204,
++204,
++204,
++205,
++205,
++205,
++205,
++206,
++206,
++206,
++206,
++207,
++207,
++207,
++207,
++208,
++208,
++208,
++208,
++209,
++209,
++209,
++209,
++210,
++210,
++210,
++210,
++210,
++210,
++210,
++210,
++210,
++210,
++210,
++210,
++211,
++211,
++211,
++211,
++211,
++211,
++211,
++211,
++211,
++211,
++211,
++212,
++212,
++212,
++212,
++213,
++213,
++213,
++213,
++213,
++213,
++213,
++213,
++213,
++213,
++213,
++213,
++214,
++214,
++214,
++214,
++215,
++215,
++215,
++215,
++215,
++215,
++215,
++215,
++215,
++215,
++215,
++216,
++216,
++216,
++216,
++217,
++217,
++217,
++217,
++218,
++218,
++218,
++218,
++219,
++219,
++219,
++219,
++219,
++219,
++219,
++219,
++219,
++219,
++219,
++219,
++220,
++220,
++220,
++220,
++221,
++221,
++221,
++221,
++221,
++221,
++221,
++221,
++221,
++221,
++221,
++222,
++222,
++222,
++222,
++223,
++223,
++223,
++223,
++223,
++223,
++223,
++223,
++223,
++223,
++223,
++223,
++224,
++224,
++224,
++224,
++225,
++225,
++225,
++225,
++225,
++225,
++225,
++225,
++225,
++225,
++225,
++225,
++225,
++225,
++225,
++225,
++225,
++225,
++225,
++226,
++226,
++226,
++226,
++227,
++227,
++227,
++227,
++227,
++227,
++227,
++227,
++227,
++227,
++227,
++227,
++228,
++228,
++228,
++229,
++229,
++229,
++229,
++229,
++229,
++229,
++229,
++229,
++229,
++229,
++229,
++230,
++230,
++230,
++230,
++231,
++231,
++231,
++231,
++231,
++231,
++231,
++231,
++231,
++231,
++231,
++231,
++232,
++232,
++232,
++232,
++232,
++232,
++232,
++232,
++232,
++232,
++232,
++232,
++232,
++232,
++232,
++232,
++232,
++232,
++232,
++233,
++233,
++233,
++233,
++234,
++234,
++234,
++234,
++234,
++234,
++234,
++234,
++234,
++234,
++234,
++235,
++235,
++235,
++235,
++236,
++236,
++236,
++236,
++236,
++236,
++236,
++236,
++236,
++236,
++236,
++236,
++236,
++236,
++236,
++236,
++236,
++236,
++236,
++237,
++237,
++237,
++237,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++238,
++239,
++239,
++239,
++239,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++240,
++241,
++241,
++241,
++241,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++242,
++243,
++243,
++243,
++243,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++244,
++245,
++245,
++245,
++245,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++246,
++247,
++247,
++247,
++247,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++248,
++249,
++249,
++249,
++249,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++250,
++251,
++251,
++251,
++251,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++252,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++253,
++254,
++254,
++254,
++254,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255,
++255
+diff --git a/drivers/media/video/mt9m001.c b/drivers/media/video/mt9m001.c
+index 79f096d..fcb4cd9 100644
+--- a/drivers/media/video/mt9m001.c
++++ b/drivers/media/video/mt9m001.c
+@@ -157,7 +157,7 @@ static int mt9m001_init(struct i2c_client *client)
+
+ static int mt9m001_s_stream(struct v4l2_subdev *sd, int enable)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ /* Switch to master "normal" mode or stop sensor readout */
+ if (reg_write(client, MT9M001_OUTPUT_CONTROL, enable ? 2 : 0) < 0)
+@@ -206,7 +206,7 @@ static unsigned long mt9m001_query_bus_param(struct soc_camera_device *icd)
+
+ static int mt9m001_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct mt9m001 *mt9m001 = to_mt9m001(client);
+ struct v4l2_rect rect = a->c;
+ struct soc_camera_device *icd = client->dev.platform_data;
+@@ -271,7 +271,7 @@ static int mt9m001_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+
+ static int mt9m001_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct mt9m001 *mt9m001 = to_mt9m001(client);
+
+ a->c = mt9m001->rect;
+@@ -297,7 +297,7 @@ static int mt9m001_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
+ static int mt9m001_g_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct mt9m001 *mt9m001 = to_mt9m001(client);
+
+ mf->width = mt9m001->rect.width;
+@@ -312,7 +312,7 @@ static int mt9m001_g_fmt(struct v4l2_subdev *sd,
+ static int mt9m001_s_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct mt9m001 *mt9m001 = to_mt9m001(client);
+ struct v4l2_crop a = {
+ .c = {
+@@ -340,7 +340,7 @@ static int mt9m001_s_fmt(struct v4l2_subdev *sd,
+ static int mt9m001_try_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct mt9m001 *mt9m001 = to_mt9m001(client);
+ const struct mt9m001_datafmt *fmt;
+
+@@ -367,7 +367,7 @@ static int mt9m001_try_fmt(struct v4l2_subdev *sd,
+ static int mt9m001_g_chip_ident(struct v4l2_subdev *sd,
+ struct v4l2_dbg_chip_ident *id)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct mt9m001 *mt9m001 = to_mt9m001(client);
+
+ if (id->match.type != V4L2_CHIP_MATCH_I2C_ADDR)
+@@ -386,7 +386,7 @@ static int mt9m001_g_chip_ident(struct v4l2_subdev *sd,
+ static int mt9m001_g_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0xff)
+ return -EINVAL;
+@@ -406,7 +406,7 @@ static int mt9m001_g_register(struct v4l2_subdev *sd,
+ static int mt9m001_s_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0xff)
+ return -EINVAL;
+@@ -468,7 +468,7 @@ static struct soc_camera_ops mt9m001_ops = {
+
+ static int mt9m001_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct mt9m001 *mt9m001 = to_mt9m001(client);
+ int data;
+
+@@ -494,7 +494,7 @@ static int mt9m001_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+
+ static int mt9m001_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct mt9m001 *mt9m001 = to_mt9m001(client);
+ struct soc_camera_device *icd = client->dev.platform_data;
+ const struct v4l2_queryctrl *qctrl;
+@@ -683,7 +683,7 @@ static void mt9m001_video_remove(struct soc_camera_device *icd)
+
+ static int mt9m001_g_skip_top_lines(struct v4l2_subdev *sd, u32 *lines)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct mt9m001 *mt9m001 = to_mt9m001(client);
+
+ *lines = mt9m001->y_skip_top;
+@@ -704,7 +704,7 @@ static struct v4l2_subdev_core_ops mt9m001_subdev_core_ops = {
+ static int mt9m001_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
+ enum v4l2_mbus_pixelcode *code)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct mt9m001 *mt9m001 = to_mt9m001(client);
+
+ if (index >= mt9m001->num_fmts)
+diff --git a/drivers/media/video/mt9m111.c b/drivers/media/video/mt9m111.c
+index fbd0fc7..a30fe35 100644
+--- a/drivers/media/video/mt9m111.c
++++ b/drivers/media/video/mt9m111.c
+@@ -434,7 +434,7 @@ static int mt9m111_make_rect(struct i2c_client *client,
+ static int mt9m111_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+ {
+ struct v4l2_rect rect = a->c;
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct mt9m111 *mt9m111 = to_mt9m111(client);
+ int ret;
+
+@@ -449,7 +449,7 @@ static int mt9m111_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+
+ static int mt9m111_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct mt9m111 *mt9m111 = to_mt9m111(client);
+
+ a->c = mt9m111->rect;
+@@ -475,7 +475,7 @@ static int mt9m111_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
+ static int mt9m111_g_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct mt9m111 *mt9m111 = to_mt9m111(client);
+
+ mf->width = mt9m111->rect.width;
+@@ -537,7 +537,7 @@ static int mt9m111_set_pixfmt(struct i2c_client *client,
+ static int mt9m111_s_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ const struct mt9m111_datafmt *fmt;
+ struct mt9m111 *mt9m111 = to_mt9m111(client);
+ struct v4l2_rect rect = {
+@@ -572,7 +572,7 @@ static int mt9m111_s_fmt(struct v4l2_subdev *sd,
+ static int mt9m111_try_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct mt9m111 *mt9m111 = to_mt9m111(client);
+ const struct mt9m111_datafmt *fmt;
+ bool bayer = mf->code == V4L2_MBUS_FMT_SBGGR8_1X8 ||
+@@ -612,7 +612,7 @@ static int mt9m111_try_fmt(struct v4l2_subdev *sd,
+ static int mt9m111_g_chip_ident(struct v4l2_subdev *sd,
+ struct v4l2_dbg_chip_ident *id)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct mt9m111 *mt9m111 = to_mt9m111(client);
+
+ if (id->match.type != V4L2_CHIP_MATCH_I2C_ADDR)
+@@ -631,7 +631,7 @@ static int mt9m111_g_chip_ident(struct v4l2_subdev *sd,
+ static int mt9m111_g_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int val;
+
+ if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0x2ff)
+@@ -652,7 +652,7 @@ static int mt9m111_g_register(struct v4l2_subdev *sd,
+ static int mt9m111_s_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0x2ff)
+ return -EINVAL;
+@@ -800,7 +800,7 @@ static int mt9m111_set_autowhitebalance(struct i2c_client *client, int on)
+
+ static int mt9m111_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct mt9m111 *mt9m111 = to_mt9m111(client);
+ int data;
+
+@@ -843,7 +843,7 @@ static int mt9m111_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+
+ static int mt9m111_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct mt9m111 *mt9m111 = to_mt9m111(client);
+ const struct v4l2_queryctrl *qctrl;
+ int ret;
+diff --git a/drivers/media/video/mt9t031.c b/drivers/media/video/mt9t031.c
+index a9a28b2..9bd44a8 100644
+--- a/drivers/media/video/mt9t031.c
++++ b/drivers/media/video/mt9t031.c
+@@ -163,7 +163,7 @@ static int mt9t031_disable(struct i2c_client *client)
+
+ static int mt9t031_s_stream(struct v4l2_subdev *sd, int enable)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+
+ if (enable)
+@@ -393,7 +393,7 @@ static int mt9t031_set_params(struct i2c_client *client,
+ static int mt9t031_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+ {
+ struct v4l2_rect rect = a->c;
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct mt9t031 *mt9t031 = to_mt9t031(client);
+
+ rect.width = ALIGN(rect.width, 2);
+@@ -410,7 +410,7 @@ static int mt9t031_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+
+ static int mt9t031_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct mt9t031 *mt9t031 = to_mt9t031(client);
+
+ a->c = mt9t031->rect;
+@@ -436,7 +436,7 @@ static int mt9t031_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
+ static int mt9t031_g_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct mt9t031 *mt9t031 = to_mt9t031(client);
+
+ mf->width = mt9t031->rect.width / mt9t031->xskip;
+@@ -451,7 +451,7 @@ static int mt9t031_g_fmt(struct v4l2_subdev *sd,
+ static int mt9t031_s_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct mt9t031 *mt9t031 = to_mt9t031(client);
+ u16 xskip, yskip;
+ struct v4l2_rect rect = mt9t031->rect;
+@@ -490,7 +490,7 @@ static int mt9t031_try_fmt(struct v4l2_subdev *sd,
+ static int mt9t031_g_chip_ident(struct v4l2_subdev *sd,
+ struct v4l2_dbg_chip_ident *id)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct mt9t031 *mt9t031 = to_mt9t031(client);
+
+ if (id->match.type != V4L2_CHIP_MATCH_I2C_ADDR)
+@@ -509,7 +509,7 @@ static int mt9t031_g_chip_ident(struct v4l2_subdev *sd,
+ static int mt9t031_g_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0xff)
+ return -EINVAL;
+@@ -528,7 +528,7 @@ static int mt9t031_g_register(struct v4l2_subdev *sd,
+ static int mt9t031_s_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0xff)
+ return -EINVAL;
+@@ -545,7 +545,7 @@ static int mt9t031_s_register(struct v4l2_subdev *sd,
+
+ static int mt9t031_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct mt9t031 *mt9t031 = to_mt9t031(client);
+ int data;
+
+@@ -577,7 +577,7 @@ static int mt9t031_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+
+ static int mt9t031_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct mt9t031 *mt9t031 = to_mt9t031(client);
+ const struct v4l2_queryctrl *qctrl;
+ int data;
+@@ -703,7 +703,7 @@ static int mt9t031_runtime_resume(struct device *dev)
+ struct soc_camera_device *icd = container_of(vdev->parent,
+ struct soc_camera_device, dev);
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct mt9t031 *mt9t031 = to_mt9t031(client);
+
+ int ret;
+@@ -780,7 +780,7 @@ static int mt9t031_video_probe(struct i2c_client *client)
+
+ static int mt9t031_g_skip_top_lines(struct v4l2_subdev *sd, u32 *lines)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct mt9t031 *mt9t031 = to_mt9t031(client);
+
+ *lines = mt9t031->y_skip_top;
+diff --git a/drivers/media/video/mt9t112.c b/drivers/media/video/mt9t112.c
+index e4bf1db..74d8dd4 100644
+--- a/drivers/media/video/mt9t112.c
++++ b/drivers/media/video/mt9t112.c
+@@ -804,7 +804,7 @@ static struct soc_camera_ops mt9t112_ops = {
+ static int mt9t112_g_chip_ident(struct v4l2_subdev *sd,
+ struct v4l2_dbg_chip_ident *id)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct mt9t112_priv *priv = to_mt9t112(client);
+
+ id->ident = priv->model;
+@@ -817,7 +817,7 @@ static int mt9t112_g_chip_ident(struct v4l2_subdev *sd,
+ static int mt9t112_g_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+
+ reg->size = 2;
+@@ -831,7 +831,7 @@ static int mt9t112_g_register(struct v4l2_subdev *sd,
+ static int mt9t112_s_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+
+ mt9t112_reg_write(ret, client, reg->reg, reg->val);
+@@ -858,7 +858,7 @@ static struct v4l2_subdev_core_ops mt9t112_subdev_core_ops = {
+ ************************************************************************/
+ static int mt9t112_s_stream(struct v4l2_subdev *sd, int enable)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct mt9t112_priv *priv = to_mt9t112(client);
+ int ret = 0;
+
+@@ -968,7 +968,7 @@ static int mt9t112_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+
+ static int mt9t112_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct v4l2_rect *rect = &a->c;
+
+ return mt9t112_set_params(client, rect->width, rect->height,
+@@ -978,7 +978,7 @@ static int mt9t112_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+ static int mt9t112_g_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct mt9t112_priv *priv = to_mt9t112(client);
+
+ if (!priv->format) {
+@@ -1000,7 +1000,7 @@ static int mt9t112_g_fmt(struct v4l2_subdev *sd,
+ static int mt9t112_s_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ /* TODO: set colorspace */
+ return mt9t112_set_params(client, mf->width, mf->height, mf->code);
+diff --git a/drivers/media/video/mt9v022.c b/drivers/media/video/mt9v022.c
+index e7cd23c..1a02f67 100644
+--- a/drivers/media/video/mt9v022.c
++++ b/drivers/media/video/mt9v022.c
+@@ -184,7 +184,7 @@ static int mt9v022_init(struct i2c_client *client)
+
+ static int mt9v022_s_stream(struct v4l2_subdev *sd, int enable)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct mt9v022 *mt9v022 = to_mt9v022(client);
+
+ if (enable)
+@@ -273,7 +273,7 @@ static unsigned long mt9v022_query_bus_param(struct soc_camera_device *icd)
+
+ static int mt9v022_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct mt9v022 *mt9v022 = to_mt9v022(client);
+ struct v4l2_rect rect = a->c;
+ int ret;
+@@ -334,7 +334,7 @@ static int mt9v022_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+
+ static int mt9v022_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct mt9v022 *mt9v022 = to_mt9v022(client);
+
+ a->c = mt9v022->rect;
+@@ -360,7 +360,7 @@ static int mt9v022_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
+ static int mt9v022_g_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct mt9v022 *mt9v022 = to_mt9v022(client);
+
+ mf->width = mt9v022->rect.width;
+@@ -375,7 +375,7 @@ static int mt9v022_g_fmt(struct v4l2_subdev *sd,
+ static int mt9v022_s_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct mt9v022 *mt9v022 = to_mt9v022(client);
+ struct v4l2_crop a = {
+ .c = {
+@@ -425,7 +425,7 @@ static int mt9v022_s_fmt(struct v4l2_subdev *sd,
+ static int mt9v022_try_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct mt9v022 *mt9v022 = to_mt9v022(client);
+ const struct mt9v022_datafmt *fmt;
+ int align = mf->code == V4L2_MBUS_FMT_SBGGR8_1X8 ||
+@@ -451,7 +451,7 @@ static int mt9v022_try_fmt(struct v4l2_subdev *sd,
+ static int mt9v022_g_chip_ident(struct v4l2_subdev *sd,
+ struct v4l2_dbg_chip_ident *id)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct mt9v022 *mt9v022 = to_mt9v022(client);
+
+ if (id->match.type != V4L2_CHIP_MATCH_I2C_ADDR)
+@@ -470,7 +470,7 @@ static int mt9v022_g_chip_ident(struct v4l2_subdev *sd,
+ static int mt9v022_g_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0xff)
+ return -EINVAL;
+@@ -490,7 +490,7 @@ static int mt9v022_g_register(struct v4l2_subdev *sd,
+ static int mt9v022_s_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0xff)
+ return -EINVAL;
+@@ -568,7 +568,7 @@ static struct soc_camera_ops mt9v022_ops = {
+
+ static int mt9v022_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ const struct v4l2_queryctrl *qctrl;
+ unsigned long range;
+ int data;
+@@ -625,7 +625,7 @@ static int mt9v022_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+ static int mt9v022_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+ {
+ int data;
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ const struct v4l2_queryctrl *qctrl;
+
+ qctrl = soc_camera_find_qctrl(&mt9v022_ops, ctrl->id);
+@@ -820,7 +820,7 @@ static void mt9v022_video_remove(struct soc_camera_device *icd)
+
+ static int mt9v022_g_skip_top_lines(struct v4l2_subdev *sd, u32 *lines)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct mt9v022 *mt9v022 = to_mt9v022(client);
+
+ *lines = mt9v022->y_skip_top;
+@@ -841,7 +841,7 @@ static struct v4l2_subdev_core_ops mt9v022_subdev_core_ops = {
+ static int mt9v022_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
+ enum v4l2_mbus_pixelcode *code)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct mt9v022 *mt9v022 = to_mt9v022(client);
+
+ if (index >= mt9v022->num_fmts)
+diff --git a/drivers/media/video/ov772x.c b/drivers/media/video/ov772x.c
+index 34034a7..4330c1f 100644
+--- a/drivers/media/video/ov772x.c
++++ b/drivers/media/video/ov772x.c
+@@ -599,7 +599,7 @@ static int ov772x_reset(struct i2c_client *client)
+
+ static int ov772x_s_stream(struct v4l2_subdev *sd, int enable)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov772x_priv *priv = to_ov772x(client);
+
+ if (!enable) {
+@@ -645,7 +645,7 @@ static unsigned long ov772x_query_bus_param(struct soc_camera_device *icd)
+
+ static int ov772x_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov772x_priv *priv = to_ov772x(client);
+
+ switch (ctrl->id) {
+@@ -664,7 +664,7 @@ static int ov772x_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+
+ static int ov772x_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov772x_priv *priv = to_ov772x(client);
+ int ret = 0;
+ u8 val;
+@@ -715,7 +715,7 @@ static int ov772x_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+ static int ov772x_g_chip_ident(struct v4l2_subdev *sd,
+ struct v4l2_dbg_chip_ident *id)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov772x_priv *priv = to_ov772x(client);
+
+ id->ident = priv->model;
+@@ -728,7 +728,7 @@ static int ov772x_g_chip_ident(struct v4l2_subdev *sd,
+ static int ov772x_g_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+
+ reg->size = 1;
+@@ -747,7 +747,7 @@ static int ov772x_g_register(struct v4l2_subdev *sd,
+ static int ov772x_s_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ if (reg->reg > 0xff ||
+ reg->val > 0xff)
+@@ -954,7 +954,7 @@ static int ov772x_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
+ static int ov772x_g_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov772x_priv *priv = to_ov772x(client);
+
+ if (!priv->win || !priv->cfmt) {
+@@ -977,7 +977,7 @@ static int ov772x_g_fmt(struct v4l2_subdev *sd,
+ static int ov772x_s_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov772x_priv *priv = to_ov772x(client);
+ int ret = ov772x_set_params(client, &mf->width, &mf->height,
+ mf->code);
+@@ -991,7 +991,7 @@ static int ov772x_s_fmt(struct v4l2_subdev *sd,
+ static int ov772x_try_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov772x_priv *priv = to_ov772x(client);
+ const struct ov772x_win_size *win;
+ int i;
+diff --git a/drivers/media/video/ov9640.c b/drivers/media/video/ov9640.c
+index 7ce9e05..faa71f3 100644
+--- a/drivers/media/video/ov9640.c
++++ b/drivers/media/video/ov9640.c
+@@ -308,7 +308,7 @@ static unsigned long ov9640_query_bus_param(struct soc_camera_device *icd)
+ /* Get status of additional camera capabilities */
+ static int ov9640_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov9640_priv *priv = container_of(i2c_get_clientdata(client),
+ struct ov9640_priv, subdev);
+
+@@ -326,7 +326,7 @@ static int ov9640_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+ /* Set status of additional camera capabilities */
+ static int ov9640_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov9640_priv *priv = container_of(i2c_get_clientdata(client),
+ struct ov9640_priv, subdev);
+
+@@ -360,7 +360,7 @@ static int ov9640_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+ static int ov9640_g_chip_ident(struct v4l2_subdev *sd,
+ struct v4l2_dbg_chip_ident *id)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov9640_priv *priv = container_of(i2c_get_clientdata(client),
+ struct ov9640_priv, subdev);
+
+@@ -374,7 +374,7 @@ static int ov9640_g_chip_ident(struct v4l2_subdev *sd,
+ static int ov9640_get_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+ u8 val;
+
+@@ -395,7 +395,7 @@ static int ov9640_get_register(struct v4l2_subdev *sd,
+ static int ov9640_set_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ if (reg->reg & ~0xff || reg->val & ~0xff)
+ return -EINVAL;
+@@ -558,7 +558,7 @@ static int ov9640_prog_dflt(struct i2c_client *client)
+ static int ov9640_s_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov9640_reg_alt alts = {0};
+ enum v4l2_colorspace cspace;
+ enum v4l2_mbus_pixelcode code = mf->code;
+diff --git a/drivers/media/video/rj54n1cb0c.c b/drivers/media/video/rj54n1cb0c.c
+index 47fd207..a626a2a 100644
+--- a/drivers/media/video/rj54n1cb0c.c
++++ b/drivers/media/video/rj54n1cb0c.c
+@@ -493,7 +493,7 @@ static int rj54n1_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
+
+ static int rj54n1_s_stream(struct v4l2_subdev *sd, int enable)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ /* Switch between preview and still shot modes */
+ return reg_set(client, RJ54N1_STILL_CONTROL, (!enable) << 7, 0x80);
+@@ -503,7 +503,7 @@ static int rj54n1_set_bus_param(struct soc_camera_device *icd,
+ unsigned long flags)
+ {
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ /* Figures 2.5-1 to 2.5-3 - default falling pixclk edge */
+
+ if (flags & SOCAM_PCLK_SAMPLE_RISING)
+@@ -560,7 +560,7 @@ static int rj54n1_sensor_scale(struct v4l2_subdev *sd, s32 *in_w, s32 *in_h,
+
+ static int rj54n1_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct rj54n1 *rj54n1 = to_rj54n1(client);
+ struct v4l2_rect *rect = &a->c;
+ int dummy = 0, output_w, output_h,
+@@ -595,7 +595,7 @@ static int rj54n1_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+
+ static int rj54n1_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct rj54n1 *rj54n1 = to_rj54n1(client);
+
+ a->c = rj54n1->rect;
+@@ -621,7 +621,7 @@ static int rj54n1_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
+ static int rj54n1_g_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct rj54n1 *rj54n1 = to_rj54n1(client);
+
+ mf->code = rj54n1->fmt->code;
+@@ -641,7 +641,7 @@ static int rj54n1_g_fmt(struct v4l2_subdev *sd,
+ static int rj54n1_sensor_scale(struct v4l2_subdev *sd, s32 *in_w, s32 *in_h,
+ s32 *out_w, s32 *out_h)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct rj54n1 *rj54n1 = to_rj54n1(client);
+ unsigned int skip, resize, input_w = *in_w, input_h = *in_h,
+ output_w = *out_w, output_h = *out_h;
+@@ -983,7 +983,7 @@ static int rj54n1_reg_init(struct i2c_client *client)
+ static int rj54n1_try_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct rj54n1 *rj54n1 = to_rj54n1(client);
+ const struct rj54n1_datafmt *fmt;
+ int align = mf->code == V4L2_MBUS_FMT_SBGGR10_1X10 ||
+@@ -1014,7 +1014,7 @@ static int rj54n1_try_fmt(struct v4l2_subdev *sd,
+ static int rj54n1_s_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct rj54n1 *rj54n1 = to_rj54n1(client);
+ const struct rj54n1_datafmt *fmt;
+ int output_w, output_h, max_w, max_h,
+@@ -1145,7 +1145,7 @@ static int rj54n1_s_fmt(struct v4l2_subdev *sd,
+ static int rj54n1_g_chip_ident(struct v4l2_subdev *sd,
+ struct v4l2_dbg_chip_ident *id)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ if (id->match.type != V4L2_CHIP_MATCH_I2C_ADDR)
+ return -EINVAL;
+@@ -1163,7 +1163,7 @@ static int rj54n1_g_chip_ident(struct v4l2_subdev *sd,
+ static int rj54n1_g_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR ||
+ reg->reg < 0x400 || reg->reg > 0x1fff)
+@@ -1185,7 +1185,7 @@ static int rj54n1_g_register(struct v4l2_subdev *sd,
+ static int rj54n1_s_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR ||
+ reg->reg < 0x400 || reg->reg > 0x1fff)
+@@ -1248,7 +1248,7 @@ static struct soc_camera_ops rj54n1_ops = {
+
+ static int rj54n1_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct rj54n1 *rj54n1 = to_rj54n1(client);
+ int data;
+
+@@ -1283,7 +1283,7 @@ static int rj54n1_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+ static int rj54n1_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+ {
+ int data;
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct rj54n1 *rj54n1 = to_rj54n1(client);
+ const struct v4l2_queryctrl *qctrl;
+
+diff --git a/drivers/media/video/smia-sensor.c b/drivers/media/video/smia-sensor.c
+new file mode 100644
+index 0000000..2d256fe
+--- /dev/null
++++ b/drivers/media/video/smia-sensor.c
+@@ -0,0 +1,942 @@
++/*
++ * drivers/media/video/smia-sensor.c
++ *
++ * Copyright (C) 2008 Nokia Corporation
++ *
++ * Contact: Tuukka Toivonen <tuukka.o.toivonen@nokia.com>
++ *
++ * Based on code from Toni Leinonen <toni.leinonen@offcode.fi>
++ * and Sakari Ailus <sakari.ailus@nokia.com>.
++ *
++ * This driver is based on the Micron MT9T012 camera imager driver
++ * (C) Texas Instruments and Toshiba ET8EK8 driver (C) Nokia.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ *
++ */
++
++#include <linux/delay.h>
++#include <linux/firmware.h>
++#include <linux/i2c.h>
++#include <linux/slab.h>
++#include <linux/version.h>
++#include <linux/v4l2-mediabus.h>
++
++#include <media/media-entity.h>
++#include <media/smiaregs.h>
++#include <media/v4l2-chip-ident.h>
++#include <media/v4l2-device.h>
++#include <media/v4l2-subdev.h>
++
++#include "smia-sensor.h"
++
++#define DEFAULT_XCLK 9600000 /* [Hz] */
++
++#define SMIA_CTRL_GAIN 0
++#define SMIA_CTRL_EXPOSURE 1
++#define SMIA_NCTRLS 2
++
++#define CID_TO_CTRL(id) ((id) == V4L2_CID_GAIN ? SMIA_CTRL_GAIN : \
++ (id) == V4L2_CID_EXPOSURE ? \
++ SMIA_CTRL_EXPOSURE : \
++ -EINVAL)
++
++/* Register definitions */
++
++/* Status registers */
++#define REG_MODEL_ID 0x0000
++#define REG_REVISION_NUMBER 0x0002
++#define REG_MANUFACTURER_ID 0x0003
++#define REG_SMIA_VERSION 0x0004
++
++/* Exposure time and gain registers */
++#define REG_FINE_EXPOSURE 0x0200
++#define REG_COARSE_EXPOSURE 0x0202
++#define REG_ANALOG_GAIN 0x0204
++#define REG_ANALOG_GAIN_MIN 0x0084
++#define REG_ANALOG_GAIN_MAX 0x0086
++#define REG_ANALOG_GAIN_STEP 0x0088
++
++/* Frame Format Description registers */
++#define REG_FFMT_MTYPE 0x0040
++#define REG_FFMT_MSTYPE 0x0041
++#define REG_FFMT_DESC_ZERO 0x0042
++
++struct smia_sensor_type {
++ u8 manufacturer_id;
++ u16 model_id;
++ char *name;
++};
++
++/* Current values for V4L2 controls */
++struct smia_control {
++ s32 minimum;
++ s32 maximum;
++ s32 step;
++ s32 default_value;
++ s32 value;
++ int (*set)(struct v4l2_subdev *, s32 value);
++};
++
++#define to_smia_sensor(sd) container_of(sd, struct smia_sensor, subdev)
++
++struct smia_sensor {
++ struct v4l2_subdev subdev;
++ struct media_entity_pad pad;
++ struct v4l2_mbus_framefmt format;
++
++ /* Sensor information */
++ char name[32];
++ struct smia_sensor_type *type;
++ u8 revision_number;
++ u8 smia_version;
++ u16 sof_rows; /* Additional rows from the sensor @ Start-Of-Frame */
++
++ /* V4L2 current control values */
++ struct smia_control controls[SMIA_NCTRLS];
++
++ struct smia_reglist *current_reglist;
++ struct v4l2_fract timeperframe;
++
++ struct smia_sensor_platform_data *platform_data;
++
++ const struct firmware *fw;
++ struct smia_meta_reglist *meta_reglist;
++
++ int power;
++};
++
++static struct smia_sensor_type smia_sensors[] = {
++ { 0, 0, "unknown" },
++ { 0x01, 0x022b, "vs6555" },
++ { 0x0c, 0x208a, "tcm8330md" },
++};
++
++static const __u32 smia_mode_ctrls[] = {
++ V4L2_CID_MODE_FRAME_WIDTH,
++ V4L2_CID_MODE_FRAME_HEIGHT,
++ V4L2_CID_MODE_VISIBLE_WIDTH,
++ V4L2_CID_MODE_VISIBLE_HEIGHT,
++ V4L2_CID_MODE_PIXELCLOCK,
++ V4L2_CID_MODE_SENSITIVITY,
++};
++
++static int smia_read_frame_fmt(struct v4l2_subdev *subdev)
++{
++ struct smia_sensor *sensor = to_smia_sensor(subdev);
++ struct i2c_client *client = v4l2_get_subdevdata(&sensor->subdev);
++ int i, ncol_desc, nrow_desc;
++ u32 val;
++ int rval;
++
++ sensor->sof_rows = 0;
++
++ rval = smia_i2c_read_reg(client, SMIA_REG_8BIT, REG_FFMT_MTYPE,
++ &val);
++ if (rval)
++ return rval;
++
++ /* We support only 2-byte Generic Frame Format Description */
++ if (val != 0x01)
++ return val;
++
++ rval = smia_i2c_read_reg(client, SMIA_REG_8BIT, REG_FFMT_MSTYPE,
++ &val);
++ if (rval)
++ return rval;
++
++ ncol_desc = (val & 0xF0) >> 4;
++ nrow_desc = (val & 0x0F);
++
++ for (i = ncol_desc; i < ncol_desc + nrow_desc; i++) {
++ rval = smia_i2c_read_reg(client, SMIA_REG_16BIT,
++ REG_FFMT_DESC_ZERO + (i * 2),
++ &val);
++ if (rval)
++ return rval;
++
++ if ((val & 0xF000) >> 12 == 5)
++ continue; /* Image Data */
++
++ sensor->sof_rows += val & 0x0FFF;
++ }
++
++ return 0;
++}
++
++/* Return time of one row in microseconds, .8 fixed point format.
++ * If the sensor is not set to any mode, return zero. */
++static int smia_get_row_time(struct smia_sensor *sensor)
++{
++ unsigned int clock; /* Pixel clock in Hz>>10 fixed point */
++ unsigned int rt; /* Row time in .8 fixed point */
++
++ if (!sensor->current_reglist)
++ return 0;
++
++ clock = sensor->current_reglist->mode.pixel_clock;
++ clock = (clock + (1 << 9)) >> 10;
++ rt = sensor->current_reglist->mode.width * (1000000 >> 2);
++ rt = (rt + (clock >> 1)) / clock;
++
++ return rt;
++}
++
++/* Convert exposure time `us' to rows. Modify `us' to make it to
++ * correspond to the actual exposure time.
++ */
++static int smia_exposure_us_to_rows(struct smia_sensor *sensor, s32 *us)
++{
++ unsigned int rows; /* Exposure value as written to HW (ie. rows) */
++ unsigned int rt; /* Row time in .8 fixed point */
++
++ if (*us < 0)
++ *us = 0;
++
++ /* Assume that the maximum exposure time is at most ~8 s,
++ * and the maximum width (with blanking) ~8000 pixels.
++ * The formula here is in principle as simple as
++ * rows = exptime / 1e6 / width * pixel_clock
++ * but to get accurate results while coping with value ranges,
++ * have to do some fixed point math.
++ */
++
++ rt = smia_get_row_time(sensor);
++ rows = ((*us << 8) + (rt >> 1)) / rt;
++
++ if (rows > sensor->current_reglist->mode.max_exp)
++ rows = sensor->current_reglist->mode.max_exp;
++
++ /* Set the exposure time to the rounded value */
++ *us = (rt * rows + (1 << 7)) >> 8;
++
++ return rows;
++}
++
++/* Convert exposure time in rows to microseconds */
++static int smia_exposure_rows_to_us(struct smia_sensor *sensor, int rows)
++{
++ return (smia_get_row_time(sensor) * rows + (1 << 7)) >> 8;
++}
++
++/* Called to change the V4L2 gain control value. This function
++ * rounds and clamps the given value and updates the V4L2 control value.
++ * If power is on, also updates the sensor analog gain.
++ */
++static int smia_set_gain(struct v4l2_subdev *subdev, s32 gain)
++{
++ struct smia_sensor *sensor = to_smia_sensor(subdev);
++ struct i2c_client *client = v4l2_get_subdevdata(&sensor->subdev);
++
++ sensor->controls[SMIA_CTRL_GAIN].value = clamp(gain,
++ sensor->controls[SMIA_CTRL_GAIN].minimum,
++ sensor->controls[SMIA_CTRL_GAIN].maximum);
++
++ if (!sensor->power)
++ return 0;
++
++ return smia_i2c_write_reg(client,
++ SMIA_REG_16BIT, REG_ANALOG_GAIN,
++ sensor->controls[SMIA_CTRL_GAIN].value);
++}
++
++/* Called to change the V4L2 exposure control value. This function
++ * rounds and clamps the given value and updates the V4L2 control value.
++ * If power is on, also update the sensor exposure time.
++ * exptime is in microseconds.
++ */
++static int smia_set_exposure(struct v4l2_subdev *subdev, s32 exptime)
++{
++ struct smia_sensor *sensor = to_smia_sensor(subdev);
++ struct i2c_client *client = v4l2_get_subdevdata(subdev);
++ int exposure_rows;
++
++ exptime = clamp(exptime, sensor->controls[SMIA_CTRL_EXPOSURE].minimum,
++ sensor->controls[SMIA_CTRL_EXPOSURE].maximum);
++
++ exposure_rows = smia_exposure_us_to_rows(sensor, &exptime);
++ sensor->controls[SMIA_CTRL_EXPOSURE].value = exptime;
++
++ if (!sensor->power)
++ return 0;
++
++ return smia_i2c_write_reg(client, SMIA_REG_16BIT,
++ REG_COARSE_EXPOSURE, exposure_rows);
++}
++
++static void smia_init_controls(struct v4l2_subdev *subdev)
++{
++ struct smia_sensor *sensor = to_smia_sensor(subdev);
++ struct i2c_client *client = v4l2_get_subdevdata(&sensor->subdev);
++
++ smia_i2c_read_reg(client, SMIA_REG_16BIT, REG_ANALOG_GAIN_MIN,
++ (u32 *)&sensor->controls[SMIA_CTRL_GAIN].minimum);
++ smia_i2c_read_reg(client, SMIA_REG_16BIT, REG_ANALOG_GAIN_MAX,
++ (u32 *)&sensor->controls[SMIA_CTRL_GAIN].maximum);
++ smia_i2c_read_reg(client, SMIA_REG_16BIT, REG_ANALOG_GAIN_STEP,
++ (u32 *)&sensor->controls[SMIA_CTRL_GAIN].step);
++ sensor->controls[SMIA_CTRL_GAIN].default_value =
++ sensor->controls[SMIA_CTRL_GAIN].minimum;
++ sensor->controls[SMIA_CTRL_GAIN].value = 0;
++ sensor->controls[SMIA_CTRL_GAIN].set = smia_set_gain;
++
++ /* Exposure parameters may change at each mode change, just zero here */
++ sensor->controls[SMIA_CTRL_EXPOSURE].minimum = 0;
++ sensor->controls[SMIA_CTRL_EXPOSURE].maximum = 0;
++ sensor->controls[SMIA_CTRL_EXPOSURE].step = 0;
++ sensor->controls[SMIA_CTRL_EXPOSURE].default_value = 0;
++ sensor->controls[SMIA_CTRL_EXPOSURE].value = 0;
++ sensor->controls[SMIA_CTRL_EXPOSURE].set = smia_set_exposure;
++}
++
++static int smia_update_controls(struct v4l2_subdev *subdev)
++{
++ struct smia_sensor *sensor = to_smia_sensor(subdev);
++
++ /* Adjust V4L2 control values due to sensor mode change */
++
++ sensor->controls[SMIA_CTRL_EXPOSURE].minimum = 0;
++ sensor->controls[SMIA_CTRL_EXPOSURE].maximum =
++ smia_exposure_rows_to_us(sensor,
++ sensor->current_reglist->mode.max_exp);
++ sensor->controls[SMIA_CTRL_EXPOSURE].step =
++ smia_exposure_rows_to_us(sensor, 1);
++ sensor->controls[SMIA_CTRL_EXPOSURE].default_value =
++ sensor->controls[SMIA_CTRL_EXPOSURE].maximum;
++ if (sensor->controls[SMIA_CTRL_EXPOSURE].value == 0)
++ sensor->controls[SMIA_CTRL_EXPOSURE].value =
++ sensor->controls[SMIA_CTRL_EXPOSURE].maximum;
++
++ return 0;
++}
++
++static int smia_set_controls(struct v4l2_subdev *subdev)
++{
++ struct smia_sensor *sensor = to_smia_sensor(subdev);
++ int i;
++
++ /* Write the cntrols to the sensorr */
++
++ for (i = 0; i < ARRAY_SIZE(sensor->controls); i++) {
++ int rval = sensor->controls[i].set(subdev,
++ sensor->controls[i].value);
++ if (rval)
++ return rval;
++ }
++
++ return 0;
++}
++
++/* Must be called with power already enabled on the sensor */
++static int smia_configure(struct v4l2_subdev *subdev)
++{
++ struct smia_sensor *sensor = to_smia_sensor(subdev);
++ struct i2c_client *client = v4l2_get_subdevdata(subdev);
++ int rval;
++
++ rval = smia_i2c_write_regs(client,
++ sensor->current_reglist->regs);
++ if (rval)
++ goto fail;
++
++ rval = smia_set_controls(subdev);
++ if (rval)
++ goto fail;
++
++ rval = sensor->platform_data->configure_interface(
++ subdev,
++ sensor->sof_rows,
++ sensor->current_reglist->mode.window_height);
++ if (rval)
++ goto fail;
++
++ return 0;
++
++fail:
++ dev_err(&client->dev, "sensor configuration failed\n");
++ return rval;
++
++}
++
++static int smia_s_stream(struct v4l2_subdev *subdev, int streaming)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(subdev);
++ int rval;
++
++ if (streaming) {
++ rval = smia_configure(subdev);
++ if (!rval)
++ rval = smia_i2c_write_reg(client, SMIA_REG_8BIT,
++ 0x0100, 0x01);
++ } else {
++ rval = smia_i2c_write_reg(client, SMIA_REG_8BIT, 0x0100, 0x00);
++ }
++
++ return rval;
++}
++
++static int smia_power_off(struct v4l2_subdev *subdev)
++{
++ struct smia_sensor *sensor = to_smia_sensor(subdev);
++ int rval;
++
++ rval = sensor->platform_data->set_xclk(subdev, 0);
++ if (rval)
++ return rval;
++
++ return sensor->platform_data->set_power(subdev, 0);
++}
++
++static int smia_power_on(struct v4l2_subdev *subdev)
++{
++ struct smia_sensor *sensor = to_smia_sensor(subdev);
++ struct i2c_client *client = v4l2_get_subdevdata(subdev);
++ struct smia_reglist *reglist = NULL;
++ int rval;
++ unsigned int hz = DEFAULT_XCLK;
++
++ if (sensor->meta_reglist) {
++ reglist = smia_reglist_find_type(sensor->meta_reglist,
++ SMIA_REGLIST_POWERON);
++ hz = reglist->mode.ext_clock;
++ }
++
++ rval = sensor->platform_data->set_power(subdev, 1);
++ if (rval)
++ goto out;
++
++ sensor->platform_data->set_xclk(subdev, hz);
++
++ /*
++ * At least 10 ms is required between xshutdown up and first
++ * i2c transaction. Clock must start at least 2400 cycles
++ * before first i2c transaction.
++ */
++ msleep(10);
++
++ if (reglist) {
++ rval = smia_i2c_write_regs(client, reglist->regs);
++ if (rval)
++ goto out;
++ }
++
++out:
++ if (rval)
++ smia_power_off(subdev);
++
++ return rval;
++}
++
++static int smia_dev_init(struct v4l2_subdev *subdev)
++{
++ struct smia_sensor *sensor = to_smia_sensor(subdev);
++ struct i2c_client *client = v4l2_get_subdevdata(subdev);
++ char name[SMIA_MAX_LEN];
++ u32 model_id, revision_number, manufacturer_id, smia_version;
++ int i, rval;
++
++ rval = smia_power_on(subdev);
++ if (rval)
++ return -ENODEV;
++
++ /* Read and check sensor identification registers */
++ if (smia_i2c_read_reg(client, SMIA_REG_16BIT, REG_MODEL_ID, &model_id)
++ || smia_i2c_read_reg(client, SMIA_REG_8BIT,
++ REG_REVISION_NUMBER, &revision_number)
++ || smia_i2c_read_reg(client, SMIA_REG_8BIT,
++ REG_MANUFACTURER_ID, &manufacturer_id)
++ || smia_i2c_read_reg(client, SMIA_REG_8BIT,
++ REG_SMIA_VERSION, &smia_version)) {
++ rval = -ENODEV;
++ goto out_poweroff;
++ }
++
++ sensor->revision_number = revision_number;
++ sensor->smia_version = smia_version;
++
++ if (smia_version != 10) {
++ /* We support only SMIA version 1.0 at the moment */
++ dev_err(&client->dev,
++ "unknown sensor 0x%04x detected (smia ver %i.%i)\n",
++ model_id, smia_version / 10, smia_version % 10);
++ rval = -ENODEV;
++ goto out_poweroff;
++ }
++
++ /* Detect which sensor we have */
++ for (i = 1; i < ARRAY_SIZE(smia_sensors); i++) {
++ if (smia_sensors[i].manufacturer_id == manufacturer_id
++ && smia_sensors[i].model_id == model_id)
++ break;
++ }
++
++ /* This will be exported go the the v4l2_subdev description (through a
++ * string control) when we'll have one.
++ */
++ if (i >= ARRAY_SIZE(smia_sensors))
++ i = 0; /* Unknown sensor */
++ sensor->type = &smia_sensors[i];
++ strlcpy(sensor->name, smia_sensors[i].name, sizeof(sensor->name));
++
++
++ /* Read sensor frame format */
++ smia_read_frame_fmt(subdev);
++
++ /* Initialize V4L2 controls */
++ smia_init_controls(subdev);
++
++ /* Import firmware */
++ snprintf(name, sizeof(name), "%s-%02x-%04x-%02x.bin",
++ SMIA_SENSOR_NAME, sensor->type->manufacturer_id,
++ sensor->type->model_id, sensor->revision_number);
++
++ if (request_firmware(&sensor->fw, name, &client->dev)) {
++ dev_err(&client->dev, "can't load firmware %s\n", name);
++ rval = -ENODEV;
++ goto out_poweroff;
++ }
++
++ sensor->meta_reglist = (struct smia_meta_reglist *)sensor->fw->data;
++
++ rval = smia_reglist_import(sensor->meta_reglist);
++ if (rval) {
++ dev_err(&client->dev,
++ "invalid register list %s, import failed\n",
++ name);
++ goto out_release;
++ }
++
++ /* Select initial mode */
++ sensor->current_reglist =
++ smia_reglist_find_type(sensor->meta_reglist,
++ SMIA_REGLIST_MODE);
++ if (!sensor->current_reglist) {
++ dev_err(&client->dev,
++ "invalid register list %s, no mode found\n",
++ name);
++ rval = -ENODEV;
++ goto out_release;
++ }
++
++ rval = smia_power_off(subdev);
++ if (rval)
++ goto out_release;
++
++ return 0;
++
++out_release:
++ release_firmware(sensor->fw);
++out_poweroff:
++ sensor->meta_reglist = NULL;
++ sensor->fw = NULL;
++ smia_power_off(subdev);
++
++ return rval;
++}
++
++static struct v4l2_queryctrl smia_ctrls[] = {
++ {
++ .id = V4L2_CID_GAIN,
++ .type = V4L2_CTRL_TYPE_INTEGER,
++ .name = "Analog gain",
++ .flags = V4L2_CTRL_FLAG_SLIDER,
++ },
++ {
++ .id = V4L2_CID_EXPOSURE,
++ .type = V4L2_CTRL_TYPE_INTEGER,
++ .name = "Exposure time [us]",
++ .flags = V4L2_CTRL_FLAG_SLIDER,
++ },
++};
++
++/* --------------------------------------------------------------------------
++ * V4L2 subdev video operations
++ */
++static int smia_enum_mbus_code(struct v4l2_subdev *subdev,
++ struct v4l2_subdev_fh *fh,
++ struct v4l2_subdev_pad_mbus_code_enum *code)
++{
++ struct smia_sensor *sensor = to_smia_sensor(subdev);
++
++ return smia_reglist_enum_mbus_code(sensor->meta_reglist, code);
++}
++
++static int smia_enum_frame_size(struct v4l2_subdev *subdev,
++ struct v4l2_subdev_fh *fh,
++ struct v4l2_subdev_frame_size_enum *fse)
++{
++ struct smia_sensor *sensor = to_smia_sensor(subdev);
++
++ return smia_reglist_enum_frame_size(sensor->meta_reglist, fse);
++}
++
++static int smia_enum_frame_ival(struct v4l2_subdev *subdev,
++ struct v4l2_subdev_fh *fh,
++ struct v4l2_subdev_frame_interval_enum *fie)
++{
++ struct smia_sensor *sensor = to_smia_sensor(subdev);
++
++ return smia_reglist_enum_frame_ival(sensor->meta_reglist, fie);
++}
++
++static struct v4l2_mbus_framefmt *
++__smia_get_pad_format(struct smia_sensor *sensor, struct v4l2_subdev_fh *fh,
++ unsigned int pad, enum v4l2_subdev_format which)
++{
++ if (pad != 0)
++ return NULL;
++
++ switch (which) {
++ case V4L2_SUBDEV_FORMAT_PROBE:
++ return v4l2_subdev_get_probe_format(fh, pad);
++ case V4L2_SUBDEV_FORMAT_ACTIVE:
++ return &sensor->format;
++ default:
++ return NULL;
++ }
++}
++
++static int smia_get_pad_format(struct v4l2_subdev *subdev,
++ struct v4l2_subdev_fh *fh, unsigned int pad,
++ struct v4l2_mbus_framefmt *fmt,
++ enum v4l2_subdev_format which)
++{
++ struct smia_sensor *sensor = to_smia_sensor(subdev);
++ struct v4l2_mbus_framefmt *format;
++
++ format = __smia_get_pad_format(sensor, fh, pad, which);
++ if (format == NULL)
++ return -EINVAL;
++
++ *fmt = *format;
++ return 0;
++}
++
++static int smia_set_pad_format(struct v4l2_subdev *subdev,
++ struct v4l2_subdev_fh *fh, unsigned int pad,
++ struct v4l2_mbus_framefmt *fmt,
++ enum v4l2_subdev_format which)
++{
++ struct smia_sensor *sensor = to_smia_sensor(subdev);
++ struct v4l2_mbus_framefmt *format;
++ struct smia_reglist *reglist;
++
++ format = __smia_get_pad_format(sensor, fh, pad, which);
++ if (format == NULL)
++ return -EINVAL;
++
++ reglist = smia_reglist_find_mode_fmt(sensor->meta_reglist, fmt);
++ smia_reglist_to_mbus(reglist, fmt);
++ *format = *fmt;
++
++ if (which == V4L2_SUBDEV_FORMAT_ACTIVE)
++ sensor->current_reglist = reglist;
++
++ return 0;
++}
++
++static int smia_get_frame_interval(struct v4l2_subdev *subdev,
++ struct v4l2_subdev_frame_interval *fi)
++{
++ struct smia_sensor *sensor = to_smia_sensor(subdev);
++
++ memset(fi, 0, sizeof(*fi));
++ fi->interval = sensor->current_reglist->mode.timeperframe;
++
++ return 0;
++}
++
++static int smia_set_frame_interval(struct v4l2_subdev *subdev,
++ struct v4l2_subdev_frame_interval *fi)
++{
++ struct smia_sensor *sensor = to_smia_sensor(subdev);
++ struct smia_reglist *reglist;
++
++ reglist = smia_reglist_find_mode_ival(sensor->meta_reglist,
++ sensor->current_reglist,
++ &fi->interval);
++ if (!reglist)
++ return -EINVAL;
++
++ sensor->current_reglist = reglist;
++ return smia_update_controls(subdev);
++}
++
++static int
++smia_get_skip_top_lines(struct v4l2_subdev *subdev, u32 *lines)
++{
++ struct smia_sensor *sensor = to_smia_sensor(subdev);
++
++ *lines = sensor->sof_rows;
++ return 0;
++}
++
++/* --------------------------------------------------------------------------
++ * V4L2 subdev core operations
++ */
++static int
++smia_get_chip_ident(struct v4l2_subdev *subdev,
++ struct v4l2_dbg_chip_ident *chip)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(subdev);
++
++ return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_SMIA, 0);
++}
++
++static int
++smia_set_config(struct v4l2_subdev *subdev, int irq, void *platform_data)
++{
++ struct smia_sensor *sensor = to_smia_sensor(subdev);
++
++ if (platform_data == NULL)
++ return -ENODEV;
++
++ sensor->platform_data = platform_data;
++
++ return smia_dev_init(subdev);
++}
++
++static int smia_query_ctrl(struct v4l2_subdev *subdev, struct v4l2_queryctrl *a)
++{
++ struct smia_sensor *sensor = to_smia_sensor(subdev);
++ int rval, ctrl;
++
++ rval = smia_ctrl_query(smia_ctrls, ARRAY_SIZE(smia_ctrls), a);
++ if (rval) {
++ return smia_mode_query(smia_mode_ctrls,
++ ARRAY_SIZE(smia_mode_ctrls), a);
++ }
++
++ ctrl = CID_TO_CTRL(a->id);
++ if (ctrl < 0)
++ return ctrl;
++
++ a->minimum = sensor->controls[ctrl].minimum;
++ a->maximum = sensor->controls[ctrl].maximum;
++ a->step = sensor->controls[ctrl].step;
++ a->default_value = sensor->controls[ctrl].default_value;
++
++ return 0;
++}
++
++static int smia_get_ctrl(struct v4l2_subdev *subdev, struct v4l2_control *vc)
++{
++ struct smia_sensor *sensor = to_smia_sensor(subdev);
++ int ctrl;
++
++ int rval = smia_mode_g_ctrl(smia_mode_ctrls,
++ ARRAY_SIZE(smia_mode_ctrls),
++ vc, &sensor->current_reglist->mode);
++ if (rval == 0)
++ return 0;
++
++ ctrl = CID_TO_CTRL(vc->id);
++ if (ctrl < 0)
++ return ctrl;
++ vc->value = sensor->controls[ctrl].value;
++
++ return 0;
++}
++
++static int smia_set_ctrl(struct v4l2_subdev *subdev, struct v4l2_control *vc)
++{
++ struct smia_sensor *sensor = to_smia_sensor(subdev);
++
++ int ctrl = CID_TO_CTRL(vc->id);
++ if (ctrl < 0)
++ return ctrl;
++
++ return sensor->controls[ctrl].set(subdev, vc->value);
++
++}
++
++static int
++smia_set_power(struct v4l2_subdev *subdev, int on)
++{
++ struct smia_sensor *sensor = to_smia_sensor(subdev);
++ int rval = 0;
++
++ /* If we are already in this mode, do nothing */
++ if (sensor->power == on)
++ return 0;
++
++ if (on)
++ rval = smia_power_on(subdev);
++ else
++ rval = smia_power_off(subdev);
++
++ if (rval == 0)
++ sensor->power = on;
++
++ return rval;
++}
++
++static const struct v4l2_subdev_video_ops smia_video_ops = {
++ .s_stream = smia_s_stream,
++ .g_frame_interval = smia_get_frame_interval,
++ .s_frame_interval = smia_set_frame_interval,
++};
++
++static const struct v4l2_subdev_core_ops smia_core_ops = {
++ .g_chip_ident = smia_get_chip_ident,
++ .s_config = smia_set_config,
++ .queryctrl = smia_query_ctrl,
++ .g_ctrl = smia_get_ctrl,
++ .s_ctrl = smia_set_ctrl,
++ .s_power = smia_set_power,
++};
++
++static const struct v4l2_subdev_pad_ops smia_pad_ops = {
++ .enum_mbus_code = smia_enum_mbus_code,
++ .enum_frame_size = smia_enum_frame_size,
++ .enum_frame_interval = smia_enum_frame_ival,
++ .get_fmt = smia_get_pad_format,
++ .set_fmt = smia_set_pad_format,
++};
++
++static const struct v4l2_subdev_sensor_ops smia_sensor_ops = {
++ .g_skip_top_lines = smia_get_skip_top_lines,
++};
++
++static const struct v4l2_subdev_ops smia_ops = {
++ .core = &smia_core_ops,
++ .video = &smia_video_ops,
++ .pad = &smia_pad_ops,
++ .sensor = &smia_sensor_ops,
++};
++
++/* --------------------------------------------------------------------------
++ * I2C driver
++ */
++#ifdef CONFIG_PM
++
++static int smia_suspend(struct i2c_client *client, pm_message_t mesg)
++{
++ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
++ struct smia_sensor *sensor = to_smia_sensor(subdev);
++ int ret;
++
++ if (!sensor->power)
++ return 0;
++
++ ret = smia_set_power(subdev, 0);
++ if (ret < 0)
++ return ret;
++
++ sensor->power = 1;
++ return 0;
++}
++
++static int smia_resume(struct i2c_client *client)
++{
++ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
++ struct smia_sensor *sensor = to_smia_sensor(subdev);
++
++ if (!sensor->power)
++ return 0;
++
++ sensor->power = 0;
++ return smia_set_power(subdev, 1);
++}
++
++#else
++
++#define smia_suspend NULL
++#define smia_resume NULL
++
++#endif /* CONFIG_PM */
++
++static const struct media_entity_operations smia_entity_ops = {
++ .set_power = v4l2_subdev_set_power,
++};
++
++static int smia_probe(struct i2c_client *client,
++ const struct i2c_device_id *devid)
++{
++ struct smia_sensor *sensor;
++ int ret;
++
++ sensor = kzalloc(sizeof(*sensor), GFP_KERNEL);
++ if (sensor == NULL)
++ return -ENOMEM;
++
++ v4l2_i2c_subdev_init(&sensor->subdev, client, &smia_ops);
++ sensor->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
++
++ sensor->pad.type = MEDIA_PAD_TYPE_OUTPUT;
++ sensor->subdev.entity.ops = &smia_entity_ops;
++ ret = media_entity_init(&sensor->subdev.entity, 1, &sensor->pad, 0);
++ if (ret < 0)
++ kfree(sensor);
++
++ return ret;
++}
++
++static int __exit smia_remove(struct i2c_client *client)
++{
++ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
++ struct smia_sensor *sensor = to_smia_sensor(subdev);
++
++ v4l2_device_unregister_subdev(&sensor->subdev);
++ media_entity_cleanup(&sensor->subdev.entity);
++ release_firmware(sensor->fw);
++ kfree(sensor);
++ return 0;
++}
++
++static const struct i2c_device_id smia_id_table[] = {
++ { SMIA_SENSOR_NAME, 0 },
++ { }
++};
++MODULE_DEVICE_TABLE(i2c, smia_id_table);
++
++static struct i2c_driver smia_i2c_driver = {
++ .driver = {
++ .name = SMIA_SENSOR_NAME,
++ },
++ .probe = smia_probe,
++ .remove = __exit_p(smia_remove),
++ .suspend = smia_suspend,
++ .resume = smia_resume,
++ .id_table = smia_id_table,
++};
++
++static int __init smia_init(void)
++{
++ int rval;
++
++ rval = i2c_add_driver(&smia_i2c_driver);
++ if (rval)
++ printk(KERN_INFO "%s: failed registering " SMIA_SENSOR_NAME
++ "\n", __func__);
++
++ return rval;
++}
++
++static void __exit smia_exit(void)
++{
++ i2c_del_driver(&smia_i2c_driver);
++}
++
++/*
++ * FIXME: Menelaus isn't ready (?) at module_init stage, so use
++ * late_initcall for now.
++ */
++module_init(smia_init);
++module_exit(smia_exit);
++
++MODULE_AUTHOR("Tuukka Toivonen <tuukka.o.toivonen@nokia.com>");
++MODULE_DESCRIPTION("Generic SMIA-compatible camera sensor driver");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/media/video/smia-sensor.h b/drivers/media/video/smia-sensor.h
+new file mode 100644
+index 0000000..ed9600f
+--- /dev/null
++++ b/drivers/media/video/smia-sensor.h
+@@ -0,0 +1,39 @@
++/*
++ * drivers/media/video/smia-sensor.h
++ *
++ * Copyright (C) 2008,2009 Nokia Corporation
++ *
++ * Contact: Tuukka Toivonen <tuukka.o.toivonen@nokia.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ *
++ */
++
++#ifndef SMIA_SENSOR_H
++#define SMIA_SENSOR_H
++
++#include <media/v4l2-subdev.h>
++
++#define SMIA_SENSOR_NAME "smia-sensor"
++#define SMIA_SENSOR_I2C_ADDR (0x20 >> 1)
++
++struct smia_sensor_platform_data {
++ int (*configure_interface)(struct v4l2_subdev *subdev,
++ int sof_rows, int height);
++ int (*set_xclk)(struct v4l2_subdev *subdev, int hz);
++ int (*set_power)(struct v4l2_subdev *subdev, int on);
++};
++
++#endif /* SMIA_SENSOR_H */
+diff --git a/drivers/media/video/smiaregs.c b/drivers/media/video/smiaregs.c
+new file mode 100644
+index 0000000..3dadea5
+--- /dev/null
++++ b/drivers/media/video/smiaregs.c
+@@ -0,0 +1,734 @@
++/*
++ * drivers/media/video/smiaregs.c
++ *
++ * Copyright (C) 2008 Nokia Corporation
++ *
++ * Contact: Sakari Ailus <sakari.ailus@nokia.com>
++ * Tuukka Toivonen <tuukka.o.toivonen@nokia.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ *
++ */
++
++#include <linux/delay.h>
++#include <linux/err.h>
++#include <linux/i2c.h>
++#include <linux/sort.h>
++#include <linux/v4l2-subdev.h>
++#include <media/smiaregs.h>
++
++/*
++ *
++ * Video control helpers
++ *
++ */
++
++int smia_ctrl_find(const struct v4l2_queryctrl *ctrls, size_t nctrls, int id)
++{
++ size_t i;
++
++ for (i = 0; i < nctrls; i++)
++ if (ctrls[i].id == id)
++ break;
++
++ if (i == nctrls)
++ i = -EINVAL;
++
++ return i;
++}
++EXPORT_SYMBOL_GPL(smia_ctrl_find);
++
++int smia_ctrl_find_next(const struct v4l2_queryctrl *ctrls, size_t nctrls,
++ int id)
++{
++ int i;
++ u32 best = (u32)-1;
++
++ for (i = 0; i < nctrls; i++)
++ if (ctrls[i].id > id
++ && (best == (u32)-1 || ctrls[i].id < ctrls[best].id))
++ best = i;
++
++ if (best == (u32)-1)
++ return -EINVAL;
++
++ return best;
++}
++EXPORT_SYMBOL_GPL(smia_ctrl_find_next);
++
++int smia_ctrl_query(const struct v4l2_queryctrl *ctrls, size_t nctrls,
++ struct v4l2_queryctrl *a)
++{
++ int id, i;
++
++ id = a->id;
++ if (id & V4L2_CTRL_FLAG_NEXT_CTRL) {
++ id &= ~V4L2_CTRL_FLAG_NEXT_CTRL;
++ i = smia_ctrl_find_next(ctrls, nctrls, id);
++ } else {
++ i = smia_ctrl_find(ctrls, nctrls, id);
++ }
++
++ if (i < 0)
++ return -EINVAL;
++
++ *a = ctrls[i];
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(smia_ctrl_query);
++
++int smia_mode_query(const __u32 *ctrls, size_t nctrls, struct v4l2_queryctrl *a)
++{
++ static const struct {
++ __u32 id;
++ char *name;
++ } ctrl[] = {
++ { .id = V4L2_CID_MODE_FRAME_WIDTH, .name = "Frame width" },
++ { .id = V4L2_CID_MODE_FRAME_HEIGHT, .name = "Frame height" },
++ { .id = V4L2_CID_MODE_VISIBLE_WIDTH, .name = "Visible width" },
++ { .id = V4L2_CID_MODE_VISIBLE_HEIGHT,
++ .name = "Visible height" },
++ { .id = V4L2_CID_MODE_PIXELCLOCK,
++ .name = "Pixel clock [Hz]" },
++ { .id = V4L2_CID_MODE_SENSITIVITY, .name = "Sensitivity" },
++ };
++ int id, next = 0, i;
++
++ id = a->id;
++ if (id & V4L2_CTRL_FLAG_NEXT_CTRL) {
++ id &= ~V4L2_CTRL_FLAG_NEXT_CTRL;
++ next = 1;
++ }
++
++ for (i = 0; i < ARRAY_SIZE(ctrl); i++) {
++ if ((!next && ctrl[i].id == id) ||
++ (next && ctrl[i].id > id)) {
++ int j;
++ for (j = 0; j < nctrls; j++)
++ if (ctrl[i].id == ctrls[j])
++ goto found;
++ }
++ }
++ return -EINVAL;
++
++found:
++ a->id = ctrl[i].id;
++ strcpy(a->name, ctrl[i].name);
++ a->type = V4L2_CTRL_TYPE_INTEGER;
++ a->minimum = 0;
++ a->maximum = 0;
++ a->step = 0;
++ a->default_value = 0;
++ a->flags = V4L2_CTRL_FLAG_READ_ONLY;
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(smia_mode_query);
++
++int smia_mode_g_ctrl(const __u32 *ctrls, size_t nctrls, struct v4l2_control *vc,
++ const struct smia_mode *sm)
++{
++ int i;
++
++ for (i = 0; i < nctrls; i++)
++ if (ctrls[i] == vc->id)
++ break;
++ if (i >= nctrls)
++ return -EINVAL;
++
++ switch (vc->id) {
++ case V4L2_CID_MODE_FRAME_WIDTH:
++ vc->value = sm->width;
++ break;
++ case V4L2_CID_MODE_FRAME_HEIGHT:
++ vc->value = sm->height;
++ break;
++ case V4L2_CID_MODE_VISIBLE_WIDTH:
++ vc->value = sm->window_width;
++ break;
++ case V4L2_CID_MODE_VISIBLE_HEIGHT:
++ vc->value = sm->window_height;
++ break;
++ case V4L2_CID_MODE_PIXELCLOCK:
++ vc->value = sm->pixel_clock;
++ break;
++ case V4L2_CID_MODE_SENSITIVITY:
++ vc->value = sm->sensitivity;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(smia_mode_g_ctrl);
++
++/*
++ *
++ * Reglist helpers
++ *
++ */
++
++static int smia_reglist_cmp(const void *a, const void *b)
++{
++ const struct smia_reglist **list1 = (const struct smia_reglist **)a,
++ **list2 = (const struct smia_reglist **)b;
++
++ /* Put real modes in the beginning. */
++ if ((*list1)->type == SMIA_REGLIST_MODE &&
++ (*list2)->type != SMIA_REGLIST_MODE)
++ return -1;
++ else if ((*list1)->type != SMIA_REGLIST_MODE &&
++ (*list2)->type == SMIA_REGLIST_MODE)
++ return 1;
++
++ /* Descending width. */
++ if ((*list1)->mode.window_width > (*list2)->mode.window_width)
++ return -1;
++ else if ((*list1)->mode.window_width < (*list2)->mode.window_width)
++ return 1;
++
++ if ((*list1)->mode.window_height > (*list2)->mode.window_height)
++ return -1;
++ else if ((*list1)->mode.window_height < (*list2)->mode.window_height)
++ return 1;
++ else
++ return 0;
++}
++
++/*
++ * Prepare register list created by dcc-pulautin for use in kernel.
++ * The pointers in the list are actually offsets from the beginning of
++ * the blob.
++ */
++int smia_reglist_import(struct smia_meta_reglist *meta)
++{
++ uintptr_t nlists = 0;
++
++ if (meta->magic != SMIA_MAGIC) {
++ printk(KERN_ERR "invalid camera sensor firmware (0x%08X)\n",
++ meta->magic);
++ return -EILSEQ;
++ }
++
++ printk(KERN_ALERT "%s: meta_reglist version %s\n",
++ __func__, meta->version);
++
++ while (meta->reglist[nlists].offset != 0) {
++ struct smia_reglist *list;
++
++ meta->reglist[nlists].offset =
++ (uintptr_t)meta + meta->reglist[nlists].offset;
++
++ list = meta->reglist[nlists].ptr;
++
++ nlists++;
++ }
++
++ if (!nlists)
++ return -EINVAL;
++
++ sort(&meta->reglist[0].offset, nlists, sizeof(meta->reglist[0].offset),
++ smia_reglist_cmp, NULL);
++
++ nlists = 0;
++ while (meta->reglist[nlists].offset != 0) {
++ struct smia_reglist *list;
++
++ list = meta->reglist[nlists].ptr;
++
++ printk(KERN_DEBUG
++ "%s: type %d\tw %d\th %d\tfmt %x\tival %d/%d\tptr %p\n",
++ __func__,
++ list->type,
++ list->mode.window_width, list->mode.window_height,
++ list->mode.pixel_format,
++ list->mode.timeperframe.numerator,
++ list->mode.timeperframe.denominator,
++ (void *)meta->reglist[nlists].offset);
++
++ nlists++;
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(smia_reglist_import);
++
++struct smia_reglist *smia_reglist_find_type(struct smia_meta_reglist *meta,
++ u16 type)
++{
++ struct smia_reglist **next = &meta->reglist[0].ptr;
++
++ while (*next) {
++ if ((*next)->type == type)
++ return *next;
++
++ next++;
++ }
++
++ return NULL;
++}
++EXPORT_SYMBOL_GPL(smia_reglist_find_type);
++
++struct smia_reglist **smia_reglist_first(struct smia_meta_reglist *meta)
++{
++ return &meta->reglist[0].ptr;
++}
++EXPORT_SYMBOL_GPL(smia_reglist_first);
++
++struct smia_reglist *smia_reglist_find_mode_fmt(struct smia_meta_reglist *meta,
++ struct v4l2_mbus_framefmt *fmt)
++{
++ struct smia_reglist **list = smia_reglist_first(meta);
++ struct smia_reglist *best_match = NULL;
++ struct smia_reglist *best_other = NULL;
++ struct v4l2_mbus_framefmt format;
++ unsigned int max_dist_match = (unsigned int)-1;
++ unsigned int max_dist_other = (unsigned int)-1;
++
++ /* Find the mode with the closest image size. The distance between
++ * image sizes is the size in pixels of the non-overlapping regions
++ * between the requested size and the frame-specified size.
++ *
++ * Store both the closest mode that matches the requested format, and
++ * the closest mode for all other formats. The best match is returned
++ * if found, otherwise the best mode with a non-matching format is
++ * returned.
++ */
++ for (; *list; list++) {
++ unsigned int dist;
++
++ if ((*list)->type != SMIA_REGLIST_MODE)
++ continue;
++
++ smia_reglist_to_mbus(*list, &format);
++
++ dist = min(fmt->width, format.width)
++ * min(fmt->height, format.height);
++ dist = format.width * format.height
++ + fmt->width * fmt->height - 2 * dist;
++
++
++ if (fmt->code == format.code) {
++ if (dist < max_dist_match || best_match == NULL) {
++ best_match = *list;
++ max_dist_match = dist;
++ }
++ } else {
++ if (dist < max_dist_other || best_other == NULL) {
++ best_other = *list;
++ max_dist_other = dist;
++ }
++ }
++ }
++
++ return best_match ? best_match : best_other;
++}
++EXPORT_SYMBOL_GPL(smia_reglist_find_mode_fmt);
++
++#define TIMEPERFRAME_AVG_FPS(t) \
++ (((t).denominator + ((t).numerator >> 1)) / (t).numerator)
++struct smia_reglist *smia_reglist_find_mode_ival(
++ struct smia_meta_reglist *meta,
++ struct smia_reglist *current_reglist,
++ struct v4l2_fract *timeperframe)
++{
++ int fps = TIMEPERFRAME_AVG_FPS(*timeperframe);
++ struct smia_reglist **list = smia_reglist_first(meta);
++ struct smia_mode *current_mode = &current_reglist->mode;
++
++ for (; *list; list++) {
++ struct smia_mode *mode = &(*list)->mode;
++
++ if ((*list)->type != SMIA_REGLIST_MODE)
++ continue;
++
++ if (mode->window_width != current_mode->window_width
++ || mode->window_height != current_mode->window_height)
++ continue;
++
++ if (TIMEPERFRAME_AVG_FPS(mode->timeperframe) == fps)
++ return *list;
++ }
++
++ return NULL;
++}
++EXPORT_SYMBOL_GPL(smia_reglist_find_mode_ival);
++
++#define MAX_FMTS 4
++int smia_reglist_enum_mbus_code(struct smia_meta_reglist *meta,
++ struct v4l2_subdev_pad_mbus_code_enum *code)
++{
++ struct smia_reglist **list = smia_reglist_first(meta);
++ u32 pixelformat[MAX_FMTS];
++ int npixelformat = 0;
++
++ if (code->index >= MAX_FMTS)
++ return -EINVAL;
++
++ for (; *list; list++) {
++ struct smia_mode *mode = &(*list)->mode;
++ int i;
++
++ if ((*list)->type != SMIA_REGLIST_MODE)
++ continue;
++
++ for (i = 0; i < npixelformat; i++) {
++ if (pixelformat[i] == mode->pixel_format)
++ break;
++ }
++ if (i != npixelformat)
++ continue;
++
++ if (code->index == npixelformat) {
++ if (mode->pixel_format == V4L2_PIX_FMT_SGRBG10DPCM8)
++ code->code = V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8;
++ else
++ code->code = V4L2_MBUS_FMT_SGRBG10_1X10;
++ return 0;
++ }
++
++ pixelformat[npixelformat] = mode->pixel_format;
++ npixelformat++;
++ }
++
++ return -EINVAL;
++}
++EXPORT_SYMBOL_GPL(smia_reglist_enum_mbus_code);
++
++int smia_reglist_enum_frame_size(struct smia_meta_reglist *meta,
++ struct v4l2_subdev_frame_size_enum *fse)
++{
++ struct smia_reglist **list = smia_reglist_first(meta);
++ struct v4l2_mbus_framefmt format;
++ int cmp_width = INT_MAX;
++ int cmp_height = INT_MAX;
++ int index = fse->index;
++
++ for (; *list; list++) {
++ if ((*list)->type != SMIA_REGLIST_MODE)
++ continue;
++
++ smia_reglist_to_mbus(*list, &format);
++ if (fse->code != format.code)
++ continue;
++
++ /* Assume that the modes are grouped by frame size. */
++ if (format.width == cmp_width && format.height == cmp_height)
++ continue;
++
++ cmp_width = format.width;
++ cmp_height = format.height;
++
++ if (index-- == 0) {
++ fse->min_width = format.width;
++ fse->min_height = format.height;
++ fse->max_width = format.width;
++ fse->max_height = format.height;
++ return 0;
++ }
++ }
++
++ return -EINVAL;
++}
++EXPORT_SYMBOL_GPL(smia_reglist_enum_frame_size);
++
++int smia_reglist_enum_frame_ival(struct smia_meta_reglist *meta,
++ struct v4l2_subdev_frame_interval_enum *fie)
++{
++ struct smia_reglist **list = smia_reglist_first(meta);
++ struct v4l2_mbus_framefmt format;
++ int index = fie->index;
++
++ for (; *list; list++) {
++ struct smia_mode *mode = &(*list)->mode;
++
++ if ((*list)->type != SMIA_REGLIST_MODE)
++ continue;
++
++ smia_reglist_to_mbus(*list, &format);
++ if (fie->code != format.code)
++ continue;
++
++ if (fie->width != format.width || fie->height != format.height)
++ continue;
++
++ if (index-- == 0) {
++ fie->interval = mode->timeperframe;
++ return 0;
++ }
++ }
++
++ return -EINVAL;
++}
++EXPORT_SYMBOL_GPL(smia_reglist_enum_frame_ival);
++
++void smia_reglist_to_mbus(const struct smia_reglist *reglist,
++ struct v4l2_mbus_framefmt *fmt)
++{
++ fmt->width = reglist->mode.window_width;
++ fmt->height = reglist->mode.window_height;
++
++ if (reglist->mode.pixel_format == V4L2_PIX_FMT_SGRBG10DPCM8)
++ fmt->code = V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8;
++ else
++ fmt->code = V4L2_MBUS_FMT_SGRBG10_1X10;
++}
++EXPORT_SYMBOL_GPL(smia_reglist_to_mbus);
++
++/*
++ *
++ * Register access helpers
++ *
++ */
++
++/*
++ * Read a 8/16/32-bit i2c register. The value is returned in 'val'.
++ * Returns zero if successful, or non-zero otherwise.
++ */
++int smia_i2c_read_reg(struct i2c_client *client, u16 data_length,
++ u16 reg, u32 *val)
++{
++ int r;
++ struct i2c_msg msg[1];
++ unsigned char data[4];
++
++ if (!client->adapter)
++ return -ENODEV;
++ if (data_length != SMIA_REG_8BIT && data_length != SMIA_REG_16BIT)
++ return -EINVAL;
++
++ msg->addr = client->addr;
++ msg->flags = 0;
++ msg->len = 2;
++ msg->buf = data;
++
++ /* high byte goes out first */
++ data[0] = (u8) (reg >> 8);;
++ data[1] = (u8) (reg & 0xff);
++ r = i2c_transfer(client->adapter, msg, 1);
++ if (r < 0)
++ goto err;
++
++ msg->len = data_length;
++ msg->flags = I2C_M_RD;
++ r = i2c_transfer(client->adapter, msg, 1);
++ if (r < 0)
++ goto err;
++
++ *val = 0;
++ /* high byte comes first */
++ if (data_length == SMIA_REG_8BIT)
++ *val = data[0];
++ else
++ *val = (data[0] << 8) + data[1];
++
++ return 0;
++
++err:
++ dev_err(&client->dev, "read from offset 0x%x error %d\n", reg, r);
++
++ return r;
++}
++EXPORT_SYMBOL_GPL(smia_i2c_read_reg);
++
++static void smia_i2c_create_msg(struct i2c_client *client, u16 len, u16 reg,
++ u32 val, struct i2c_msg *msg,
++ unsigned char *buf)
++{
++ msg->addr = client->addr;
++ msg->flags = 0; /* Write */
++ msg->len = 2 + len;
++ msg->buf = buf;
++
++ /* high byte goes out first */
++ buf[0] = (u8) (reg >> 8);;
++ buf[1] = (u8) (reg & 0xff);
++
++ switch (len) {
++ case SMIA_REG_8BIT:
++ buf[2] = (u8) (val) & 0xff;
++ break;
++ case SMIA_REG_16BIT:
++ buf[2] = (u8) (val >> 8) & 0xff;
++ buf[3] = (u8) (val & 0xff);
++ break;
++ case SMIA_REG_32BIT:
++ buf[2] = (u8) (val >> 24) & 0xff;
++ buf[3] = (u8) (val >> 16) & 0xff;
++ buf[4] = (u8) (val >> 8) & 0xff;
++ buf[5] = (u8) (val & 0xff);
++ break;
++ default:
++ BUG();
++ }
++}
++
++/*
++ * Write to a 8/16-bit register.
++ * Returns zero if successful, or non-zero otherwise.
++ */
++int smia_i2c_write_reg(struct i2c_client *client, u16 data_length, u16 reg,
++ u32 val)
++{
++ int r;
++ struct i2c_msg msg[1];
++ unsigned char data[6];
++
++ if (!client->adapter)
++ return -ENODEV;
++ if (data_length != SMIA_REG_8BIT && data_length != SMIA_REG_16BIT)
++ return -EINVAL;
++
++ smia_i2c_create_msg(client, data_length, reg, val, msg, data);
++
++ r = i2c_transfer(client->adapter, msg, 1);
++ if (r < 0)
++ dev_err(&client->dev,
++ "wrote 0x%x to offset 0x%x error %d\n", val, reg, r);
++ else
++ r = 0; /* on success i2c_transfer() return messages trasfered */
++
++ return r;
++}
++EXPORT_SYMBOL_GPL(smia_i2c_write_reg);
++
++/*
++ * A buffered write method that puts the wanted register write
++ * commands in a message list and passes the list to the i2c framework
++ */
++static int smia_i2c_buffered_write_regs(struct i2c_client *client,
++ const struct smia_reg *wnext, int cnt)
++{
++ /* FIXME: check how big cnt is */
++ struct i2c_msg msg[cnt];
++ unsigned char data[cnt][6];
++ int wcnt = 0;
++ u16 reg, data_length;
++ u32 val;
++
++ /* Create new write messages for all writes */
++ while (wcnt < cnt) {
++ data_length = wnext->type;
++ reg = wnext->reg;
++ val = wnext->val;
++ wnext++;
++
++ smia_i2c_create_msg(client, data_length, reg,
++ val, &msg[wcnt], &data[wcnt][0]);
++
++ /* Update write count */
++ wcnt++;
++ }
++
++ /* Now we send everything ... */
++ return i2c_transfer(client->adapter, msg, wcnt);
++}
++
++/*
++ * Write a list of registers to i2c device.
++ *
++ * The list of registers is terminated by SMIA_REG_TERM.
++ * Returns zero if successful, or non-zero otherwise.
++ */
++int smia_i2c_write_regs(struct i2c_client *client,
++ const struct smia_reg reglist[])
++{
++ int r, cnt = 0;
++ const struct smia_reg *next, *wnext;
++
++ if (!client->adapter)
++ return -ENODEV;
++
++ if (reglist == NULL)
++ return -EINVAL;
++
++ /* Initialize list pointers to the start of the list */
++ next = wnext = reglist;
++
++ do {
++ /*
++ * We have to go through the list to figure out how
++ * many regular writes we have in a row
++ */
++ while (next->type != SMIA_REG_TERM
++ && next->type != SMIA_REG_DELAY) {
++ /*
++ * Here we check that the actual lenght fields
++ * are valid
++ */
++ if (next->type != SMIA_REG_8BIT
++ && next->type != SMIA_REG_16BIT) {
++ dev_err(&client->dev,
++ "Invalid value on entry %d 0x%x\n",
++ cnt, next->type);
++ return -EINVAL;
++ }
++
++ /*
++ * Increment count of successive writes and
++ * read pointer
++ */
++ cnt++;
++ next++;
++ }
++
++ /* Now we start writing ... */
++ r = smia_i2c_buffered_write_regs(client, wnext, cnt);
++
++ /* ... and then check that everything was OK */
++ if (r < 0) {
++ dev_err(&client->dev, "i2c transfer error !!!\n");
++ return r;
++ }
++
++ /*
++ * If we ran into a sleep statement when going through
++ * the list, this is where we snooze for the required time
++ */
++ if (next->type == SMIA_REG_DELAY) {
++ set_current_state(TASK_UNINTERRUPTIBLE);
++ schedule_timeout(msecs_to_jiffies(next->val));
++ /*
++ * ZZZ ...
++ * Update list pointers and cnt and start over ...
++ */
++ next++;
++ wnext = next;
++ cnt = 0;
++ }
++ } while (next->type != SMIA_REG_TERM);
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(smia_i2c_write_regs);
++
++int smia_i2c_reglist_find_write(struct i2c_client *client,
++ struct smia_meta_reglist *meta, u16 type)
++{
++ struct smia_reglist *reglist;
++
++ reglist = smia_reglist_find_type(meta, type);
++ if (IS_ERR(reglist))
++ return PTR_ERR(reglist);
++
++ return smia_i2c_write_regs(client, reglist->regs);
++}
++EXPORT_SYMBOL_GPL(smia_i2c_reglist_find_write);
++
++MODULE_AUTHOR("Sakari Ailus <sakari.ailus@nokia.com>");
++MODULE_DESCRIPTION("Generic SMIA configuration and i2c register access");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c
+index 475757b..3bbd9cd 100644
+--- a/drivers/media/video/soc_camera.c
++++ b/drivers/media/video/soc_camera.c
+@@ -899,7 +899,7 @@ static int soc_camera_init_i2c(struct soc_camera_device *icd,
+ if (!subdev)
+ goto ei2cnd;
+
+- client = subdev->priv;
++ client = v4l2_get_subdevdata(subdev);
+
+ /* Use to_i2c_client(dev) to recover the i2c client */
+ dev_set_drvdata(&icd->dev, &client->dev);
+diff --git a/drivers/media/video/soc_mediabus.c b/drivers/media/video/soc_mediabus.c
+index 8b63b65..e8fcfaf 100644
+--- a/drivers/media/video/soc_mediabus.c
++++ b/drivers/media/video/soc_mediabus.c
+@@ -10,9 +10,9 @@
+
+ #include <linux/kernel.h>
+ #include <linux/module.h>
++#include <linux/v4l2-mediabus.h>
+
+ #include <media/v4l2-device.h>
+-#include <media/v4l2-mediabus.h>
+ #include <media/soc_mediabus.h>
+
+ #define MBUS_IDX(f) (V4L2_MBUS_FMT_ ## f - V4L2_MBUS_FMT_FIXED - 1)
+diff --git a/drivers/media/video/tw9910.c b/drivers/media/video/tw9910.c
+index 445dc93..d5e4949 100644
+--- a/drivers/media/video/tw9910.c
++++ b/drivers/media/video/tw9910.c
+@@ -469,7 +469,7 @@ tw9910_select_norm(struct soc_camera_device *icd, u32 width, u32 height)
+ */
+ static int tw9910_s_stream(struct v4l2_subdev *sd, int enable)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct tw9910_priv *priv = to_tw9910(client);
+ u8 val;
+ int ret;
+@@ -511,7 +511,7 @@ static int tw9910_set_bus_param(struct soc_camera_device *icd,
+ unsigned long flags)
+ {
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ u8 val = VSSL_VVALID | HSSL_DVALID;
+
+ /*
+@@ -565,7 +565,7 @@ static int tw9910_enum_input(struct soc_camera_device *icd,
+ static int tw9910_g_chip_ident(struct v4l2_subdev *sd,
+ struct v4l2_dbg_chip_ident *id)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct tw9910_priv *priv = to_tw9910(client);
+
+ id->ident = V4L2_IDENT_TW9910;
+@@ -578,7 +578,7 @@ static int tw9910_g_chip_ident(struct v4l2_subdev *sd,
+ static int tw9910_g_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+
+ if (reg->reg > 0xff)
+@@ -600,7 +600,7 @@ static int tw9910_g_register(struct v4l2_subdev *sd,
+ static int tw9910_s_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ if (reg->reg > 0xff ||
+ reg->val > 0xff)
+@@ -613,7 +613,7 @@ static int tw9910_s_register(struct v4l2_subdev *sd,
+ static int tw9910_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+ {
+ struct v4l2_rect *rect = &a->c;
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct tw9910_priv *priv = to_tw9910(client);
+ struct soc_camera_device *icd = client->dev.platform_data;
+ int ret = -EINVAL;
+@@ -701,7 +701,7 @@ tw9910_set_fmt_error:
+
+ static int tw9910_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct tw9910_priv *priv = to_tw9910(client);
+
+ if (!priv->scale) {
+@@ -748,7 +748,7 @@ static int tw9910_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
+ static int tw9910_g_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct tw9910_priv *priv = to_tw9910(client);
+
+ if (!priv->scale) {
+@@ -778,7 +778,7 @@ static int tw9910_g_fmt(struct v4l2_subdev *sd,
+ static int tw9910_s_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct tw9910_priv *priv = to_tw9910(client);
+ /* See tw9910_s_crop() - no proper cropping support */
+ struct v4l2_crop a = {
+@@ -813,7 +813,7 @@ static int tw9910_s_fmt(struct v4l2_subdev *sd,
+ static int tw9910_try_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+ {
+- struct i2c_client *client = sd->priv;
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct soc_camera_device *icd = client->dev.platform_data;
+ const struct tw9910_scale_ctrl *scale;
+
+diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c
+index 4e53b0b..b9a5e65 100644
+--- a/drivers/media/video/v4l2-common.c
++++ b/drivers/media/video/v4l2-common.c
+@@ -838,7 +838,8 @@ EXPORT_SYMBOL_GPL(v4l2_i2c_subdev_init);
+ /* Load an i2c sub-device. */
+ struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev,
+ struct i2c_adapter *adapter, const char *module_name,
+- struct i2c_board_info *info, const unsigned short *probe_addrs)
++ struct i2c_board_info *info, const unsigned short *probe_addrs,
++ int enable_devnode)
+ {
+ struct v4l2_subdev *sd = NULL;
+ struct i2c_client *client;
+@@ -868,9 +869,12 @@ struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev,
+ if (!try_module_get(client->driver->driver.owner))
+ goto error;
+ sd = i2c_get_clientdata(client);
++ if (!enable_devnode)
++ sd->flags &= ~V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ /* Register with the v4l2_device which increases the module's
+ use count as well. */
++ sd->initialized = 0;
+ if (v4l2_device_register_subdev(v4l2_dev, sd))
+ sd = NULL;
+ /* Decrease the module use count to match the first try_module_get. */
+@@ -885,6 +889,8 @@ struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev,
+ if (err && err != -ENOIOCTLCMD) {
+ v4l2_device_unregister_subdev(sd);
+ sd = NULL;
++ } else {
++ sd->initialized = 1;
+ }
+ }
+
+@@ -897,10 +903,9 @@ error:
+ }
+ EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev_board);
+
+-struct v4l2_subdev *v4l2_i2c_new_subdev_cfg(struct v4l2_device *v4l2_dev,
++struct v4l2_subdev *v4l2_i2c_new_subdev(struct v4l2_device *v4l2_dev,
+ struct i2c_adapter *adapter,
+ const char *module_name, const char *client_type,
+- int irq, void *platform_data,
+ u8 addr, const unsigned short *probe_addrs)
+ {
+ struct i2c_board_info info;
+@@ -910,13 +915,11 @@ struct v4l2_subdev *v4l2_i2c_new_subdev_cfg(struct v4l2_device *v4l2_dev,
+ memset(&info, 0, sizeof(info));
+ strlcpy(info.type, client_type, sizeof(info.type));
+ info.addr = addr;
+- info.irq = irq;
+- info.platform_data = platform_data;
+
+ return v4l2_i2c_new_subdev_board(v4l2_dev, adapter, module_name,
+- &info, probe_addrs);
++ &info, probe_addrs, 0);
+ }
+-EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev_cfg);
++EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev);
+
+ /* Return i2c client address of v4l2_subdev. */
+ unsigned short v4l2_i2c_subdev_addr(struct v4l2_subdev *sd)
+diff --git a/drivers/media/video/v4l2-dev.c b/drivers/media/video/v4l2-dev.c
+index 0ca7ec9..3b1d828 100644
+--- a/drivers/media/video/v4l2-dev.c
++++ b/drivers/media/video/v4l2-dev.c
+@@ -269,6 +269,7 @@ static int v4l2_mmap(struct file *filp, struct vm_area_struct *vm)
+ static int v4l2_open(struct inode *inode, struct file *filp)
+ {
+ struct video_device *vdev;
++ struct media_entity *entity = NULL;
+ int ret = 0;
+
+ /* Check if the video device is available */
+@@ -283,12 +284,22 @@ static int v4l2_open(struct inode *inode, struct file *filp)
+ /* and increase the device refcount */
+ video_get(vdev);
+ mutex_unlock(&videodev_lock);
++ if (vdev->v4l2_dev && vdev->v4l2_dev->mdev) {
++ entity = media_entity_get(&vdev->entity);
++ if (!entity) {
++ ret = -EBUSY;
++ video_put(vdev);
++ return ret;
++ }
++ }
+ if (vdev->fops->open)
+ ret = vdev->fops->open(filp);
+
+ /* decrease the refcount in case of an error */
+- if (ret)
++ if (ret) {
++ media_entity_put(entity);
+ video_put(vdev);
++ }
+ return ret;
+ }
+
+@@ -301,6 +312,9 @@ static int v4l2_release(struct inode *inode, struct file *filp)
+ if (vdev->fops->release)
+ vdev->fops->release(filp);
+
++ if (vdev->v4l2_dev && vdev->v4l2_dev->mdev)
++ media_entity_put(&vdev->entity);
++
+ /* decrease the refcount unconditionally since the release()
+ return value is ignored. */
+ video_put(vdev);
+@@ -376,13 +390,14 @@ static int get_index(struct video_device *vdev)
+ }
+
+ /**
+- * video_register_device - register video4linux devices
++ * __video_register_device - register video4linux devices
+ * @vdev: video device structure we want to register
+ * @type: type of device to register
+ * @nr: which device node number (0 == /dev/video0, 1 == /dev/video1, ...
+ * -1 == first free)
+ * @warn_if_nr_in_use: warn if the desired device node number
+ * was already in use and another number was chosen instead.
++ * @owner: module that owns the video device node
+ *
+ * The registration code assigns minor numbers and device node numbers
+ * based on the requested type and registers the new device node with
+@@ -401,9 +416,11 @@ static int get_index(struct video_device *vdev)
+ * %VFL_TYPE_VBI - Vertical blank data (undecoded)
+ *
+ * %VFL_TYPE_RADIO - A radio card
++ *
++ * %VFL_TYPE_SUBDEV - A subdevice
+ */
+-static int __video_register_device(struct video_device *vdev, int type, int nr,
+- int warn_if_nr_in_use)
++int __video_register_device(struct video_device *vdev, int type, int nr,
++ int warn_if_nr_in_use, struct module *owner)
+ {
+ int i = 0;
+ int ret;
+@@ -439,6 +456,9 @@ static int __video_register_device(struct video_device *vdev, int type, int nr,
+ case VFL_TYPE_RADIO:
+ name_base = "radio";
+ break;
++ case VFL_TYPE_SUBDEV:
++ name_base = "v4l-subdev";
++ break;
+ default:
+ printk(KERN_ERR "%s called with unknown type: %d\n",
+ __func__, type);
+@@ -525,7 +545,7 @@ static int __video_register_device(struct video_device *vdev, int type, int nr,
+ vdev->cdev->ops = &v4l2_unlocked_fops;
+ else
+ vdev->cdev->ops = &v4l2_fops;
+- vdev->cdev->owner = vdev->fops->owner;
++ vdev->cdev->owner = owner;
+ ret = cdev_add(vdev->cdev, MKDEV(VIDEO_MAJOR, vdev->minor), 1);
+ if (ret < 0) {
+ printk(KERN_ERR "%s: cdev_add failed\n", __func__);
+@@ -557,11 +577,25 @@ static int __video_register_device(struct video_device *vdev, int type, int nr,
+ printk(KERN_WARNING "%s: requested %s%d, got %s\n", __func__,
+ name_base, nr, video_device_node_name(vdev));
+
+- /* Part 5: Activate this minor. The char device can now be used. */
++ /* Part 5: Register the entity. */
++ if (vdev->v4l2_dev && vdev->v4l2_dev->mdev) {
++ vdev->entity.type = MEDIA_ENTITY_TYPE_NODE;
++ vdev->entity.subtype = MEDIA_NODE_TYPE_V4L;
++ vdev->entity.name = vdev->name;
++ vdev->entity.v4l.major = VIDEO_MAJOR;
++ vdev->entity.v4l.minor = vdev->minor;
++ ret = media_device_register_entity(vdev->v4l2_dev->mdev,
++ &vdev->entity);
++ if (ret < 0)
++ printk(KERN_ERR "error\n"); /* TODO */
++ }
++
++ /* Part 6: Activate this minor. The char device can now be used. */
+ set_bit(V4L2_FL_REGISTERED, &vdev->flags);
+ mutex_lock(&videodev_lock);
+ video_device[vdev->minor] = vdev;
+ mutex_unlock(&videodev_lock);
++
+ return 0;
+
+ cleanup:
+@@ -574,18 +608,7 @@ cleanup:
+ vdev->minor = -1;
+ return ret;
+ }
+-
+-int video_register_device(struct video_device *vdev, int type, int nr)
+-{
+- return __video_register_device(vdev, type, nr, 1);
+-}
+-EXPORT_SYMBOL(video_register_device);
+-
+-int video_register_device_no_warn(struct video_device *vdev, int type, int nr)
+-{
+- return __video_register_device(vdev, type, nr, 0);
+-}
+-EXPORT_SYMBOL(video_register_device_no_warn);
++EXPORT_SYMBOL(__video_register_device);
+
+ /**
+ * video_unregister_device - unregister a video4linux device
+@@ -600,6 +623,9 @@ void video_unregister_device(struct video_device *vdev)
+ if (!vdev || !video_is_registered(vdev))
+ return;
+
++ if (vdev->v4l2_dev && vdev->v4l2_dev->mdev)
++ media_device_unregister_entity(&vdev->entity);
++
+ clear_bit(V4L2_FL_REGISTERED, &vdev->flags);
+ device_unregister(&vdev->dev);
+ }
+diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c
+index 5a7dc4a..beff03a 100644
+--- a/drivers/media/video/v4l2-device.c
++++ b/drivers/media/video/v4l2-device.c
+@@ -45,9 +45,8 @@ int v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev)
+ if (!v4l2_dev->name[0])
+ snprintf(v4l2_dev->name, sizeof(v4l2_dev->name), "%s %s",
+ dev->driver->name, dev_name(dev));
+- if (dev_get_drvdata(dev))
+- v4l2_warn(v4l2_dev, "Non-NULL drvdata on register\n");
+- dev_set_drvdata(dev, v4l2_dev);
++ if (!dev_get_drvdata(dev))
++ dev_set_drvdata(dev, v4l2_dev);
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(v4l2_device_register);
+@@ -70,10 +69,12 @@ EXPORT_SYMBOL_GPL(v4l2_device_set_name);
+
+ void v4l2_device_disconnect(struct v4l2_device *v4l2_dev)
+ {
+- if (v4l2_dev->dev) {
++ if (v4l2_dev->dev == NULL)
++ return;
++
++ if (dev_get_drvdata(v4l2_dev->dev) == v4l2_dev)
+ dev_set_drvdata(v4l2_dev->dev, NULL);
+- v4l2_dev->dev = NULL;
+- }
++ v4l2_dev->dev = NULL;
+ }
+ EXPORT_SYMBOL_GPL(v4l2_device_disconnect);
+
+@@ -113,32 +114,76 @@ void v4l2_device_unregister(struct v4l2_device *v4l2_dev)
+ EXPORT_SYMBOL_GPL(v4l2_device_unregister);
+
+ int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev,
+- struct v4l2_subdev *sd)
++ struct v4l2_subdev *sd)
+ {
++ struct media_entity *entity = &sd->entity;
++ struct video_device *vdev;
++ int ret;
++
+ /* Check for valid input */
+ if (v4l2_dev == NULL || sd == NULL || !sd->name[0])
+ return -EINVAL;
++
+ /* Warn if we apparently re-register a subdev */
+ WARN_ON(sd->v4l2_dev != NULL);
++
+ if (!try_module_get(sd->owner))
+ return -ENODEV;
++
++ /* Register the entity. */
++ if (v4l2_dev->mdev) {
++ ret = media_device_register_entity(v4l2_dev->mdev, entity);
++ if (ret < 0) {
++ module_put(sd->owner);
++ return ret;
++ }
++ }
++
+ sd->v4l2_dev = v4l2_dev;
+ spin_lock(&v4l2_dev->lock);
+ list_add_tail(&sd->list, &v4l2_dev->subdevs);
+ spin_unlock(&v4l2_dev->lock);
++
++ /* Register the device node. */
++ vdev = &sd->devnode;
++ strlcpy(vdev->name, sd->name, sizeof(vdev->name));
++ vdev->parent = v4l2_dev->dev;
++ vdev->fops = &v4l2_subdev_fops;
++ vdev->release = video_device_release_empty;
++ if (sd->flags & V4L2_SUBDEV_FL_HAS_DEVNODE) {
++ ret = __video_register_device(vdev, VFL_TYPE_SUBDEV, -1, 1,
++ sd->owner);
++ if (ret < 0) {
++ v4l2_device_unregister_subdev(sd);
++ return ret;
++ }
++ }
++
++ entity->v4l.major = VIDEO_MAJOR;
++ entity->v4l.minor = vdev->minor;
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(v4l2_device_register_subdev);
+
+ void v4l2_device_unregister_subdev(struct v4l2_subdev *sd)
+ {
++ struct v4l2_device *v4l2_dev;
++
+ /* return if it isn't registered */
+ if (sd == NULL || sd->v4l2_dev == NULL)
+ return;
+- spin_lock(&sd->v4l2_dev->lock);
++
++ v4l2_dev = sd->v4l2_dev;
++
++ spin_lock(&v4l2_dev->lock);
+ list_del(&sd->list);
+- spin_unlock(&sd->v4l2_dev->lock);
++ spin_unlock(&v4l2_dev->lock);
+ sd->v4l2_dev = NULL;
++
+ module_put(sd->owner);
++ if (v4l2_dev->mdev)
++ media_device_unregister_entity(&sd->entity);
++ video_unregister_device(&sd->devnode);
+ }
+ EXPORT_SYMBOL_GPL(v4l2_device_unregister_subdev);
++
+diff --git a/drivers/media/video/v4l2-int-device.c b/drivers/media/video/v4l2-int-device.c
+index a935bae..483ee2e 100644
+--- a/drivers/media/video/v4l2-int-device.c
++++ b/drivers/media/video/v4l2-int-device.c
+@@ -32,7 +32,7 @@
+ static DEFINE_MUTEX(mutex);
+ static LIST_HEAD(int_list);
+
+-void v4l2_int_device_try_attach_all(void)
++static void __v4l2_int_device_try_attach_all(void)
+ {
+ struct v4l2_int_device *m, *s;
+
+@@ -66,6 +66,33 @@ void v4l2_int_device_try_attach_all(void)
+ }
+ }
+ }
++
++static struct v4l2_int_slave dummy_slave = {
++ /* Dummy pointer to avoid underflow in find_ioctl. */
++ .ioctls = (void *)sizeof(struct v4l2_int_ioctl_desc),
++ .num_ioctls = 0,
++};
++
++static struct v4l2_int_device dummy = {
++ .type = v4l2_int_type_slave,
++ .u = {
++ .slave = &dummy_slave,
++ },
++};
++
++struct v4l2_int_device *v4l2_int_device_dummy()
++{
++ return &dummy;
++}
++EXPORT_SYMBOL_GPL(v4l2_int_device_dummy);
++
++void v4l2_int_device_try_attach_all(void)
++{
++ mutex_lock(&mutex);
++ __v4l2_int_device_try_attach_all();
++ mutex_unlock(&mutex);
++}
++
+ EXPORT_SYMBOL_GPL(v4l2_int_device_try_attach_all);
+
+ static int ioctl_sort_cmp(const void *a, const void *b)
+@@ -89,7 +116,7 @@ int v4l2_int_device_register(struct v4l2_int_device *d)
+ &ioctl_sort_cmp, NULL);
+ mutex_lock(&mutex);
+ list_add(&d->head, &int_list);
+- v4l2_int_device_try_attach_all();
++ __v4l2_int_device_try_attach_all();
+ mutex_unlock(&mutex);
+
+ return 0;
+diff --git a/drivers/media/video/v4l2-subdev.c b/drivers/media/video/v4l2-subdev.c
+new file mode 100644
+index 0000000..a8b9de1
+--- /dev/null
++++ b/drivers/media/video/v4l2-subdev.c
+@@ -0,0 +1,323 @@
++/*
++ * V4L2 subdevice support.
++ *
++ * Copyright (C) 2010 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ */
++
++#include <linux/types.h>
++#include <linux/ioctl.h>
++#include <linux/slab.h>
++#include <linux/videodev2.h>
++
++#include <media/v4l2-device.h>
++#include <media/v4l2-ioctl.h>
++#include <media/v4l2-fh.h>
++#include <media/v4l2-event.h>
++
++static int subdev_fh_init(struct v4l2_subdev_fh *fh, struct v4l2_subdev *sd)
++{
++ /* Allocate probe format and crop in the same memory block */
++ fh->probe_fmt = kzalloc((sizeof(*fh->probe_fmt) +
++ sizeof(*fh->probe_crop)) * sd->entity.num_pads,
++ GFP_KERNEL);
++ if (fh->probe_fmt == NULL)
++ return -ENOMEM;
++
++ fh->probe_crop = (struct v4l2_rect *)
++ (fh->probe_fmt + sd->entity.num_pads);
++
++ return 0;
++}
++
++static void subdev_fh_free(struct v4l2_subdev_fh *fh)
++{
++ kfree(fh->probe_fmt);
++ fh->probe_fmt = NULL;
++ fh->probe_crop = NULL;
++}
++
++static int subdev_open(struct file *file)
++{
++ struct video_device *vdev = video_devdata(file);
++ struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
++ struct media_entity *entity;
++ struct v4l2_subdev_fh *subdev_fh;
++ int ret;
++
++ if (!sd->initialized)
++ return -EAGAIN;
++
++ subdev_fh = kzalloc(sizeof(*subdev_fh), GFP_KERNEL);
++ if (subdev_fh == NULL)
++ return -ENOMEM;
++
++ ret = subdev_fh_init(subdev_fh, sd);
++ if (ret) {
++ kfree(subdev_fh);
++ return ret;
++ }
++
++ ret = v4l2_fh_init(&subdev_fh->vfh, vdev);
++ if (ret)
++ goto err;
++
++ if (sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS) {
++ ret = v4l2_event_init(&subdev_fh->vfh);
++ if (ret)
++ goto err;
++
++ ret = v4l2_event_alloc(&subdev_fh->vfh, sd->nevents);
++ if (ret)
++ goto err;
++ }
++
++ v4l2_fh_add(&subdev_fh->vfh);
++ file->private_data = &subdev_fh->vfh;
++
++ entity = media_entity_get(&sd->entity);
++ if (!entity) {
++ ret = -EBUSY;
++ goto err;
++ }
++
++ return 0;
++
++err:
++ v4l2_fh_del(&subdev_fh->vfh);
++ v4l2_fh_exit(&subdev_fh->vfh);
++ subdev_fh_free(subdev_fh);
++ kfree(subdev_fh);
++
++ return ret;
++}
++
++static int subdev_close(struct file *file)
++{
++ struct video_device *vdev = video_devdata(file);
++ struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
++ struct v4l2_fh *vfh = file->private_data;
++ struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh);
++
++ media_entity_put(&sd->entity);
++
++ v4l2_fh_del(vfh);
++ v4l2_fh_exit(vfh);
++ subdev_fh_free(subdev_fh);
++ kfree(subdev_fh);
++ file->private_data = NULL;
++
++ return 0;
++}
++
++static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
++{
++ struct video_device *vdev = video_devdata(file);
++ struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
++ struct v4l2_fh *vfh = file->private_data;
++ struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh);
++
++ switch (cmd) {
++ case VIDIOC_QUERYCTRL:
++ return v4l2_subdev_call(sd, core, queryctrl, arg);
++
++ case VIDIOC_QUERYMENU:
++ return v4l2_subdev_call(sd, core, querymenu, arg);
++
++ case VIDIOC_G_CTRL:
++ return v4l2_subdev_call(sd, core, g_ctrl, arg);
++
++ case VIDIOC_S_CTRL:
++ return v4l2_subdev_call(sd, core, s_ctrl, arg);
++
++ case VIDIOC_G_EXT_CTRLS:
++ return v4l2_subdev_call(sd, core, g_ext_ctrls, arg);
++
++ case VIDIOC_S_EXT_CTRLS:
++ return v4l2_subdev_call(sd, core, s_ext_ctrls, arg);
++
++ case VIDIOC_TRY_EXT_CTRLS:
++ return v4l2_subdev_call(sd, core, try_ext_ctrls, arg);
++
++ case VIDIOC_DQEVENT:
++ if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
++ return -ENOIOCTLCMD;
++
++ return v4l2_event_dequeue(vfh, arg, file->f_flags & O_NONBLOCK);
++
++ case VIDIOC_SUBSCRIBE_EVENT:
++ return v4l2_subdev_call(sd, core, subscribe_event, vfh, arg);
++
++ case VIDIOC_UNSUBSCRIBE_EVENT:
++ return v4l2_subdev_call(sd, core, unsubscribe_event, vfh, arg);
++
++ case VIDIOC_SUBDEV_G_FMT: {
++ struct v4l2_subdev_pad_format *format = arg;
++
++ if (format->which != V4L2_SUBDEV_FORMAT_PROBE &&
++ format->which != V4L2_SUBDEV_FORMAT_ACTIVE)
++ return -EINVAL;
++
++ if (format->pad >= sd->entity.num_pads)
++ return -EINVAL;
++
++ return v4l2_subdev_call(sd, pad, get_fmt, subdev_fh,
++ format->pad, &format->format,
++ format->which);
++ }
++
++ case VIDIOC_SUBDEV_S_FMT: {
++ struct v4l2_subdev_pad_format *format = arg;
++
++ if (format->which != V4L2_SUBDEV_FORMAT_PROBE &&
++ format->which != V4L2_SUBDEV_FORMAT_ACTIVE)
++ return -EINVAL;
++
++ if (format->pad >= sd->entity.num_pads)
++ return -EINVAL;
++
++ return v4l2_subdev_call(sd, pad, set_fmt, subdev_fh,
++ format->pad, &format->format,
++ format->which);
++ }
++
++ case VIDIOC_SUBDEV_G_FRAME_INTERVAL:
++ return v4l2_subdev_call(sd, video, g_frame_interval, arg);
++
++ case VIDIOC_SUBDEV_S_FRAME_INTERVAL:
++ return v4l2_subdev_call(sd, video, s_frame_interval, arg);
++
++ case VIDIOC_SUBDEV_G_CROP: {
++ struct v4l2_subdev_pad_crop *crop = arg;
++
++ if (crop->which != V4L2_SUBDEV_FORMAT_PROBE &&
++ crop->which != V4L2_SUBDEV_FORMAT_ACTIVE)
++ return -EINVAL;
++
++ if (crop->pad >= sd->entity.num_pads)
++ return -EINVAL;
++
++ return v4l2_subdev_call(sd, pad, get_crop, subdev_fh, crop);
++ }
++
++ case VIDIOC_SUBDEV_S_CROP: {
++ struct v4l2_subdev_pad_crop *crop = arg;
++
++ if (crop->which != V4L2_SUBDEV_FORMAT_PROBE &&
++ crop->which != V4L2_SUBDEV_FORMAT_ACTIVE)
++ return -EINVAL;
++
++ if (crop->pad >= sd->entity.num_pads)
++ return -EINVAL;
++
++ return v4l2_subdev_call(sd, pad, set_crop, subdev_fh, crop);
++ }
++
++ case VIDIOC_SUBDEV_ENUM_MBUS_CODE: {
++ struct v4l2_subdev_pad_mbus_code_enum *code = arg;
++
++ if (code->pad >= sd->entity.num_pads)
++ return -EINVAL;
++
++ return v4l2_subdev_call(sd, pad, enum_mbus_code, subdev_fh,
++ code);
++ }
++
++ case VIDIOC_SUBDEV_ENUM_FRAME_SIZE: {
++ struct v4l2_subdev_frame_size_enum *fse = arg;
++
++ if (fse->pad >= sd->entity.num_pads)
++ return -EINVAL;
++
++ return v4l2_subdev_call(sd, pad, enum_frame_size, subdev_fh,
++ fse);
++ }
++
++ case VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL: {
++ struct v4l2_subdev_frame_interval_enum *fie = arg;
++
++ if (fie->pad >= sd->entity.num_pads)
++ return -EINVAL;
++
++ return v4l2_subdev_call(sd, pad, enum_frame_interval, subdev_fh,
++ fie);
++ }
++
++ default:
++ return v4l2_subdev_call(sd, core, ioctl, cmd, arg);
++ }
++
++ return 0;
++}
++
++static long subdev_ioctl(struct file *file, unsigned int cmd,
++ unsigned long arg)
++{
++ return video_usercopy(file, cmd, arg, subdev_do_ioctl);
++}
++
++static unsigned int subdev_poll(struct file *file, poll_table *wait)
++{
++ struct video_device *vdev = video_devdata(file);
++ struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
++ struct v4l2_fh *fh = file->private_data;
++
++ if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
++ return POLLERR;
++
++ poll_wait(file, &fh->events->wait, wait);
++
++ if (v4l2_event_pending(fh))
++ return POLLPRI;
++
++ return 0;
++}
++
++const struct v4l2_file_operations v4l2_subdev_fops = {
++ .owner = THIS_MODULE,
++ .open = subdev_open,
++ .unlocked_ioctl = subdev_ioctl,
++ .release = subdev_close,
++ .poll = subdev_poll,
++};
++
++void v4l2_subdev_init(struct v4l2_subdev *sd, const struct v4l2_subdev_ops *ops)
++{
++ INIT_LIST_HEAD(&sd->list);
++ BUG_ON(!ops);
++ sd->ops = ops;
++ sd->v4l2_dev = NULL;
++ sd->flags = 0;
++ sd->name[0] = '\0';
++ sd->grp_id = 0;
++ sd->dev_priv = NULL;
++ sd->host_priv = NULL;
++ sd->entity.name = sd->name;
++ sd->entity.type = MEDIA_ENTITY_TYPE_SUBDEV;
++ sd->initialized = 1;
++}
++EXPORT_SYMBOL(v4l2_subdev_init);
++
++int v4l2_subdev_set_power(struct media_entity *entity, int power)
++{
++ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
++
++ dev_dbg(entity->parent->dev,
++ "%s power%s\n", entity->name, power ? "on" : "off");
++
++ return v4l2_subdev_call(sd, core, s_power, power);
++}
++EXPORT_SYMBOL_GPL(v4l2_subdev_set_power);
+diff --git a/include/linux/Kbuild b/include/linux/Kbuild
+index 624b84c..d34d1eb 100644
+--- a/include/linux/Kbuild
++++ b/include/linux/Kbuild
+@@ -113,6 +113,7 @@ header-y += magic.h
+ header-y += major.h
+ header-y += map_to_7segment.h
+ header-y += matroxfb.h
++header-y += media.h
+ header-y += meye.h
+ header-y += minix_fs.h
+ header-y += mmtimer.h
+@@ -167,6 +168,8 @@ header-y += udf_fs_i.h
+ header-y += ultrasound.h
+ header-y += un.h
+ header-y += utime.h
++header-y += v4l2-mediabus.h
++header-y += v4l2-subdev.h
+ header-y += veth.h
+ header-y += videotext.h
+ header-y += x25.h
+diff --git a/include/linux/i2c/twl.h b/include/linux/i2c/twl.h
+index 6de90bf..72bdb30 100644
+--- a/include/linux/i2c/twl.h
++++ b/include/linux/i2c/twl.h
+@@ -620,11 +620,21 @@ int twl4030_sih_setup(int module);
+ #define TWL4030_VDAC_DEV_GRP 0x3B
+ #define TWL4030_VDAC_DEDICATED 0x3E
+ #define TWL4030_VAUX1_DEV_GRP 0x17
++#define TWL4030_VAUX1_TYPE 0x18
++#define TWL4030_VAUX1_REMAP 0x19
+ #define TWL4030_VAUX1_DEDICATED 0x1A
+ #define TWL4030_VAUX2_DEV_GRP 0x1B
++#define TWL4030_VAUX2_TYPE 0x1C
++#define TWL4030_VAUX2_REMAP 0x1D
+ #define TWL4030_VAUX2_DEDICATED 0x1E
+ #define TWL4030_VAUX3_DEV_GRP 0x1F
++#define TWL4030_VAUX3_TYPE 0x20
++#define TWL4030_VAUX3_REMAP 0x21
+ #define TWL4030_VAUX3_DEDICATED 0x22
++#define TWL4030_VAUX4_DEV_GRP 0x23
++#define TWL4030_VAUX4_TYPE 0x24
++#define TWL4030_VAUX4_REMAP 0x25
++#define TWL4030_VAUX4_DEDICATED 0x26
+
+ static inline int twl4030charger_usb_en(int enable) { return 0; }
+
+diff --git a/include/linux/media.h b/include/linux/media.h
+new file mode 100644
+index 0000000..4f39639
+--- /dev/null
++++ b/include/linux/media.h
+@@ -0,0 +1,77 @@
++#ifndef __LINUX_MEDIA_H
++#define __LINUX_MEDIA_H
++
++#include <linux/types.h>
++
++#define MEDIA_ENTITY_TYPE_NODE 1
++#define MEDIA_ENTITY_TYPE_SUBDEV 2
++
++#define MEDIA_NODE_TYPE_V4L 1
++#define MEDIA_NODE_TYPE_FB 2
++#define MEDIA_NODE_TYPE_ALSA 3
++#define MEDIA_NODE_TYPE_DVB 4
++
++#define MEDIA_SUBDEV_TYPE_VID_DECODER 1
++#define MEDIA_SUBDEV_TYPE_VID_ENCODER 2
++#define MEDIA_SUBDEV_TYPE_MISC 3
++
++#define MEDIA_PAD_TYPE_INPUT 1
++#define MEDIA_PAD_TYPE_OUTPUT 2
++
++#define MEDIA_LINK_FLAG_ACTIVE (1 << 0)
++#define MEDIA_LINK_FLAG_IMMUTABLE (1 << 1)
++
++#define MEDIA_ENTITY_ID_FLAG_NEXT (1 << 31)
++
++struct media_user_pad {
++ __u32 entity; /* entity ID */
++ __u32 index; /* pad index */
++ __u32 type; /* pad type */
++};
++
++struct media_user_entity {
++ __u32 id;
++ char name[32];
++ __u32 type;
++ __u32 subtype;
++ __u8 pads;
++ __u32 links;
++
++ union {
++ /* Node specifications */
++ struct {
++ __u32 major;
++ __u32 minor;
++ } v4l;
++ struct {
++ __u32 major;
++ __u32 minor;
++ } fb;
++ int alsa;
++ int dvb;
++
++ /* Sub-device specifications */
++ /* Nothing needed yet */
++ };
++};
++
++struct media_user_link {
++ struct media_user_pad source;
++ struct media_user_pad sink;
++ __u32 flags;
++};
++
++struct media_user_links {
++ __u32 entity;
++ /* Should have enough room for pads elements */
++ struct media_user_pad __user *pads;
++ /* Should have enough room for links elements */
++ struct media_user_link __user *links;
++};
++
++#define MEDIA_IOC_ENUM_ENTITIES _IOWR('M', 1, struct media_user_entity)
++#define MEDIA_IOC_ENUM_LINKS _IOWR('M', 2, struct media_user_links)
++#define MEDIA_IOC_SETUP_LINK _IOWR('M', 3, struct media_user_link)
++
++#endif /* __LINUX_MEDIA_H */
++
+diff --git a/include/linux/v4l2-mediabus.h b/include/linux/v4l2-mediabus.h
+new file mode 100644
+index 0000000..7f5d92c
+--- /dev/null
++++ b/include/linux/v4l2-mediabus.h
+@@ -0,0 +1,96 @@
++/*
++ * Media Bus API header
++ *
++ * Copyright (C) 2009, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#ifndef __LINUX_V4L2_MEDIABUS_H
++#define __LINUX_V4L2_MEDIABUS_H
++
++#include <linux/types.h>
++#include <linux/videodev2.h>
++
++/*
++ * These pixel codes uniquely identify data formats on the media bus. Mostly
++ * they correspond to similarly named V4L2_PIX_FMT_* formats, format 0 is
++ * reserved, V4L2_MBUS_FMT_FIXED shall be used by host-client pairs, where the
++ * data format is fixed. Additionally, "2X8" means that one pixel is transferred
++ * in two 8-bit samples, "BE" or "LE" specify in which order those samples are
++ * transferred over the bus: "LE" means that the least significant bits are
++ * transferred first, "BE" means that the most significant bits are transferred
++ * first, and "PADHI" and "PADLO" define which bits - low or high, in the
++ * incomplete high byte, are filled with padding bits.
++ */
++enum v4l2_mbus_pixelcode {
++ V4L2_MBUS_FMT_FIXED = 1,
++ V4L2_MBUS_FMT_YUYV8_2X8_LE,
++ V4L2_MBUS_FMT_YVYU8_2X8_LE,
++ V4L2_MBUS_FMT_YUYV8_2X8_BE,
++ V4L2_MBUS_FMT_YVYU8_2X8_BE,
++ V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE,
++ V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE,
++ V4L2_MBUS_FMT_RGB565_2X8_LE,
++ V4L2_MBUS_FMT_RGB565_2X8_BE,
++ V4L2_MBUS_FMT_SBGGR8_1X8,
++ V4L2_MBUS_FMT_SBGGR10_1X10,
++ V4L2_MBUS_FMT_GREY8_1X8,
++ V4L2_MBUS_FMT_Y10_1X10,
++ V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE,
++ V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_LE,
++ V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_BE,
++ V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_BE,
++ V4L2_MBUS_FMT_SGRBG8_1X8,
++ V4L2_MBUS_FMT_SGRBG10_1X10,
++ V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8,
++ V4L2_MBUS_FMT_YUYV16_1X16,
++ V4L2_MBUS_FMT_UYVY16_1X16,
++ V4L2_MBUS_FMT_YVYU16_1X16,
++ V4L2_MBUS_FMT_VYUY16_1X16,
++ V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8,
++ V4L2_MBUS_FMT_SRGGB10_1X10,
++ V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8,
++ V4L2_MBUS_FMT_SGBRG10_1X10,
++ V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8,
++};
++
++/**
++ * struct v4l2_mbus_framefmt - frame format on the media bus
++ * @width: frame width
++ * @height: frame height
++ * @code: data format code
++ * @field: used interlacing type
++ * @colorspace: colorspace of the data
++ */
++struct v4l2_mbus_framefmt {
++ __u32 width;
++ __u32 height;
++ __u32 code;
++ enum v4l2_field field;
++ enum v4l2_colorspace colorspace;
++};
++
++static inline void v4l2_fill_pix_format(struct v4l2_pix_format *pix_fmt,
++ const struct v4l2_mbus_framefmt *mbus_fmt)
++{
++ pix_fmt->width = mbus_fmt->width;
++ pix_fmt->height = mbus_fmt->height;
++ pix_fmt->field = mbus_fmt->field;
++ pix_fmt->colorspace = mbus_fmt->colorspace;
++}
++
++static inline void v4l2_fill_mbus_format(struct v4l2_mbus_framefmt *mbus_fmt,
++ const struct v4l2_pix_format *pix_fmt,
++ enum v4l2_mbus_pixelcode code)
++{
++ mbus_fmt->width = pix_fmt->width;
++ mbus_fmt->height = pix_fmt->height;
++ mbus_fmt->field = pix_fmt->field;
++ mbus_fmt->colorspace = pix_fmt->colorspace;
++ mbus_fmt->code = code;
++}
++
++#endif
+diff --git a/include/linux/v4l2-subdev.h b/include/linux/v4l2-subdev.h
+new file mode 100644
+index 0000000..e2e2c8d
+--- /dev/null
++++ b/include/linux/v4l2-subdev.h
+@@ -0,0 +1,104 @@
++/*
++ * V4L2 subdev userspace API
++ *
++ * Copyright (C) 2010 Nokia
++ *
++ * Contributors:
++ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef __LINUX_V4L2_SUBDEV_H
++#define __LINUX_V4L2_SUBDEV_H
++
++#include <linux/ioctl.h>
++#include <linux/types.h>
++#include <linux/v4l2-mediabus.h>
++
++enum v4l2_subdev_format {
++ V4L2_SUBDEV_FORMAT_PROBE = 0,
++ V4L2_SUBDEV_FORMAT_ACTIVE = 1,
++};
++
++/**
++ * struct v4l2_subdev_pad_format
++ */
++struct v4l2_subdev_pad_format {
++ __u32 which;
++ __u32 pad;
++ struct v4l2_mbus_framefmt format;
++};
++
++/**
++ * struct v4l2_subdev_pad_crop
++ */
++struct v4l2_subdev_pad_crop {
++ __u32 pad;
++ __u32 which;
++ struct v4l2_rect rect;
++ __u32 reserved[10];
++};
++
++/**
++ * struct v4l2_subdev_pad_frame_rate
++ */
++struct v4l2_subdev_frame_interval {
++ struct v4l2_fract interval;
++ __u32 reserved[6];
++};
++
++/**
++ * struct v4l2_subdev_pad_mbus_code_enum
++ */
++struct v4l2_subdev_pad_mbus_code_enum {
++ __u32 pad;
++ __u32 index;
++ __u32 code;
++ __u32 reserved[5];
++};
++
++struct v4l2_subdev_frame_size_enum {
++ __u32 index;
++ __u32 pad;
++ __u32 code;
++ __u32 min_width;
++ __u32 max_width;
++ __u32 min_height;
++ __u32 max_height;
++ __u32 reserved[9];
++};
++
++struct v4l2_subdev_frame_interval_enum {
++ __u32 index;
++ __u32 pad;
++ __u32 code;
++ __u32 width;
++ __u32 height;
++ struct v4l2_fract interval;
++ __u32 reserved[9];
++};
++
++#define VIDIOC_SUBDEV_G_FMT _IOWR('V', 4, struct v4l2_subdev_pad_format)
++#define VIDIOC_SUBDEV_S_FMT _IOWR('V', 5, struct v4l2_subdev_pad_format)
++#define VIDIOC_SUBDEV_G_FRAME_INTERVAL \
++ _IOWR('V', 6, struct v4l2_subdev_frame_interval)
++#define VIDIOC_SUBDEV_S_FRAME_INTERVAL \
++ _IOWR('V', 7, struct v4l2_subdev_frame_interval)
++#define VIDIOC_SUBDEV_ENUM_MBUS_CODE \
++ _IOWR('V', 8, struct v4l2_subdev_pad_mbus_code_enum)
++#define VIDIOC_SUBDEV_ENUM_FRAME_SIZE \
++ _IOWR('V', 9, struct v4l2_subdev_frame_size_enum)
++#define VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL \
++ _IOWR('V', 10, struct v4l2_subdev_frame_interval_enum)
++#define VIDIOC_SUBDEV_S_CROP _IOWR('V', 11, struct v4l2_subdev_pad_crop)
++#define VIDIOC_SUBDEV_G_CROP _IOWR('V', 12, struct v4l2_subdev_pad_crop)
++
++#endif
++
+diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
+index 047f7e6..c9a8d99 100644
+--- a/include/linux/videodev2.h
++++ b/include/linux/videodev2.h
+@@ -929,6 +929,7 @@ struct v4l2_ext_controls {
+ #define V4L2_CTRL_CLASS_MPEG 0x00990000 /* MPEG-compression controls */
+ #define V4L2_CTRL_CLASS_CAMERA 0x009a0000 /* Camera class controls */
+ #define V4L2_CTRL_CLASS_FM_TX 0x009b0000 /* FM Modulator control class */
++#define V4L2_CTRL_CLASS_MODE 0x009c0000 /* Sensor mode information */
+
+ #define V4L2_CTRL_ID_MASK (0x0fffffff)
+ #define V4L2_CTRL_ID2CLASS(id) ((id) & 0x0fff0000UL)
+@@ -1327,6 +1328,38 @@ enum v4l2_preemphasis {
+ #define V4L2_CID_TUNE_POWER_LEVEL (V4L2_CID_FM_TX_CLASS_BASE + 113)
+ #define V4L2_CID_TUNE_ANTENNA_CAPACITOR (V4L2_CID_FM_TX_CLASS_BASE + 114)
+
++/* Flash and privacy (indicator) light controls */
++#define V4L2_CID_FLASH_STROBE (V4L2_CID_CAMERA_CLASS_BASE+17)
++#define V4L2_CID_FLASH_TIMEOUT (V4L2_CID_CAMERA_CLASS_BASE+18)
++#define V4L2_CID_FLASH_INTENSITY (V4L2_CID_CAMERA_CLASS_BASE+19)
++#define V4L2_CID_TORCH_INTENSITY (V4L2_CID_CAMERA_CLASS_BASE+20)
++#define V4L2_CID_INDICATOR_INTENSITY (V4L2_CID_CAMERA_CLASS_BASE+21)
++
++#define V4L2_CID_TEST_PATTERN (V4L2_CTRL_CLASS_CAMERA | 0x107e)
++
++/* SMIA-type sensor information */
++#define V4L2_CID_MODE_CLASS_BASE (V4L2_CTRL_CLASS_MODE | 0x900)
++#define V4L2_CID_MODE_CLASS (V4L2_CTRL_CLASS_MODE | 1)
++#define V4L2_CID_MODE_FRAME_WIDTH (V4L2_CID_MODE_CLASS_BASE+1)
++#define V4L2_CID_MODE_FRAME_HEIGHT (V4L2_CID_MODE_CLASS_BASE+2)
++#define V4L2_CID_MODE_VISIBLE_WIDTH (V4L2_CID_MODE_CLASS_BASE+3)
++#define V4L2_CID_MODE_VISIBLE_HEIGHT (V4L2_CID_MODE_CLASS_BASE+4)
++#define V4L2_CID_MODE_PIXELCLOCK (V4L2_CID_MODE_CLASS_BASE+5)
++#define V4L2_CID_MODE_SENSITIVITY (V4L2_CID_MODE_CLASS_BASE+6)
++#define V4L2_CID_MODE_OPSYSCLOCK (V4L2_CID_MODE_CLASS_BASE+7)
++
++/* Control IDs specific to the AD5820 driver as defined by V4L2 */
++#define V4L2_CID_FOCUS_AD5820_BASE (V4L2_CTRL_CLASS_CAMERA | 0x10af)
++#define V4L2_CID_FOCUS_AD5820_RAMP_TIME (V4L2_CID_FOCUS_AD5820_BASE+0)
++#define V4L2_CID_FOCUS_AD5820_RAMP_MODE (V4L2_CID_FOCUS_AD5820_BASE+1)
++
++/* Control IDs specific to the ADP1653 flash driver as defined by V4L2 */
++#define V4L2_CID_FLASH_ADP1653_BASE (V4L2_CTRL_CLASS_CAMERA | 0x10f1)
++#define V4L2_CID_FLASH_ADP1653_FAULT_SCP (V4L2_CID_FLASH_ADP1653_BASE+0)
++#define V4L2_CID_FLASH_ADP1653_FAULT_OT (V4L2_CID_FLASH_ADP1653_BASE+1)
++#define V4L2_CID_FLASH_ADP1653_FAULT_TMR (V4L2_CID_FLASH_ADP1653_BASE+2)
++#define V4L2_CID_FLASH_ADP1653_FAULT_OV (V4L2_CID_FLASH_ADP1653_BASE+3)
++
+ /*
+ * T U N I N G
+ */
+diff --git a/include/media/ad5820.h b/include/media/ad5820.h
+new file mode 100644
+index 0000000..a0e0746
+--- /dev/null
++++ b/include/media/ad5820.h
+@@ -0,0 +1,63 @@
++/*
++ * include/media/ad5820.h
++ *
++ * Copyright (C) 2008 Nokia Corporation
++ * Copyright (C) 2007 Texas Instruments
++ *
++ * Contact: Tuukka Toivonen <tuukka.o.toivonen@nokia.com>
++ * Sakari Ailus <sakari.ailus@nokia.com>
++ *
++ * Based on af_d88.c by Texas Instruments.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ */
++
++#ifndef AD5820_H
++#define AD5820_H
++
++#include <linux/videodev2.h>
++
++#include <linux/i2c.h>
++
++#include <media/v4l2-subdev.h>
++
++#define AD5820_NAME "ad5820"
++#define AD5820_I2C_ADDR (0x18 >> 1)
++
++/* Register definitions */
++#define AD5820_POWER_DOWN (1 << 15)
++#define AD5820_DAC_SHIFT 4
++#define AD5820_RAMP_MODE_LINEAR (0 << 3)
++#define AD5820_RAMP_MODE_64_16 (1 << 3)
++
++struct ad5820_platform_data {
++ int (*s_power)(struct v4l2_subdev *subdev, int on);
++};
++
++#define to_ad5820_device(sd) container_of(sd, struct ad5820_device, subdev)
++
++struct ad5820_device {
++ struct v4l2_subdev subdev;
++ struct ad5820_platform_data *platform_data;
++
++ s32 focus_absolute; /* Current values of V4L2 controls */
++ s32 focus_ramp_time;
++ s32 focus_ramp_mode;
++
++ int power : 1;
++ int standby : 1;
++};
++
++#endif /* AD5820_H */
+diff --git a/include/media/adp1653.h b/include/media/adp1653.h
+new file mode 100644
+index 0000000..1b9deba
+--- /dev/null
++++ b/include/media/adp1653.h
+@@ -0,0 +1,87 @@
++/*
++ * include/media/adp1653.h
++ *
++ * Copyright (C) 2008 Nokia Corporation
++ *
++ * Contact: Sakari Ailus <sakari.ailus@nokia.com>
++ * Tuukka Toivonen <tuukka.o.toivonen@nokia.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ *
++ */
++
++#ifndef ADP1653_H
++#define ADP1653_H
++
++#include <linux/videodev2.h>
++#include <linux/i2c.h>
++#include <media/v4l2-subdev.h>
++
++#define ADP1653_NAME "adp1653"
++#define ADP1653_I2C_ADDR (0x60 >> 1)
++
++/* Register definitions */
++#define ADP1653_REG_OUT_SEL 0x00
++#define ADP1653_REG_OUT_SEL_HPLED_MAX 0x1f
++#define ADP1653_REG_OUT_SEL_HPLED_SHIFT 3
++#define ADP1653_REG_OUT_SEL_ILED_MAX 0x07
++#define ADP1653_REG_OUT_SEL_ILED_SHIFT 0
++
++#define ADP1653_REG_CONFIG 0x01
++#define ADP1653_REG_CONFIG_TMR_CFG (1 << 4)
++#define ADP1653_REG_CONFIG_TMR_SET_MAX 0x0f
++#define ADP1653_REG_CONFIG_TMR_SET_SHIFT 0
++
++#define ADP1653_REG_SW_STROBE 0x02
++#define ADP1653_REG_SW_STROBE_SW_STROBE (1 << 0)
++
++#define ADP1653_REG_FAULT 0x03
++#define ADP1653_REG_FAULT_FLT_SCP (1 << 3)
++#define ADP1653_REG_FAULT_FLT_OT (1 << 2)
++#define ADP1653_REG_FAULT_FLT_TMR (1 << 1)
++#define ADP1653_REG_FAULT_FLT_OV (1 << 0)
++
++#define ADP1653_INDICATOR_INTENSITY_MIN 0
++#define ADP1653_INDICATOR_INTENSITY_MAX ADP1653_REG_OUT_SEL_ILED_MAX
++#define ADP1653_TORCH_INTENSITY_MIN 0
++#define ADP1653_TORCH_INTENSITY_MAX 11
++#define ADP1653_FLASH_INTENSITY_MIN 12
++#define ADP1653_FLASH_INTENSITY_MAX ADP1653_REG_OUT_SEL_HPLED_MAX
++
++struct adp1653_platform_data {
++ int (*power)(struct v4l2_subdev *sd, int on);
++ int (*strobe)(struct v4l2_subdev *sd); /* If NULL, use SW strobe */
++
++ u32 max_flash_timeout; /* flash light timeout in us */
++ u32 max_flash_intensity; /* led intensity, flash mode */
++ u32 max_torch_intensity; /* led intensity, torch mode */
++ u32 max_indicator_intensity; /* indicator led intensity */
++};
++
++#define to_adp1653_flash(sd) container_of(sd, struct adp1653_flash, subdev)
++
++struct adp1653_flash {
++ struct v4l2_subdev subdev;
++ struct adp1653_platform_data *platform_data;
++
++ u32 flash_timeout;
++ u32 flash_intensity;
++ u32 torch_intensity;
++ u32 indicator_intensity;
++
++ int power; /* Requested power state */
++};
++
++#endif /* ADP1653_H */
+diff --git a/include/media/media-device.h b/include/media/media-device.h
+new file mode 100644
+index 0000000..44559e3
+--- /dev/null
++++ b/include/media/media-device.h
+@@ -0,0 +1,74 @@
++/*
++ * Media device support header.
++ *
++ * Copyright (C) 2009 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ */
++
++#ifndef _MEDIA_DEVICE_H
++#define _MEDIA_DEVICE_H
++
++#include <linux/device.h>
++#include <linux/list.h>
++#include <linux/mutex.h>
++#include <linux/spinlock.h>
++
++#include <media/media-devnode.h>
++#include <media/media-entity.h>
++
++/* Each instance of a media device should create the media_device struct,
++ either stand-alone or embedded in a larger struct.
++
++ It allows easy access to sub-devices (see v4l2-subdev.h) and provides
++ basic media device-level support.
++ */
++
++#define MEDIA_DEVICE_NAME_SIZE (20 + 16)
++
++struct media_device {
++ /* dev->driver_data points to this struct.
++ Note: dev might be NULL if there is no parent device
++ as is the case with e.g. ISA devices. */
++ struct device *dev;
++ struct media_devnode devnode;
++
++ u32 entity_id;
++ struct list_head entities;
++
++ /* Spinlock is for the entities list. */
++ spinlock_t lock;
++ /* Mutex for graph changes. */
++ struct mutex graph_mutex;
++
++ /* unique device name, by default the driver name + bus ID */
++ char name[MEDIA_DEVICE_NAME_SIZE];
++};
++
++/* media_devnode to media_device */
++#define to_media_device(node) container_of(node, struct media_device, devnode)
++
++int __must_check media_device_register(struct media_device *mdev);
++void media_device_unregister(struct media_device *mdev);
++
++int __must_check media_device_register_entity(struct media_device *mdev,
++ struct media_entity *entity);
++void media_device_unregister_entity(struct media_entity *entity);
++
++/* Iterate over all entities. */
++#define media_device_for_each_entity(entity, mdev) \
++ list_for_each_entry(entity, &(mdev)->entities, list)
++
++#endif
+diff --git a/include/media/media-devnode.h b/include/media/media-devnode.h
+new file mode 100644
+index 0000000..48f2ac0
+--- /dev/null
++++ b/include/media/media-devnode.h
+@@ -0,0 +1,97 @@
++/*
++ * Media device node handling
++ *
++ * Copyright (C) 2009 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
++ *
++ * Common functions for media-related drivers to register and unregister media
++ * device nodes.
++ */
++#ifndef _MEDIA_DEVNODE_H
++#define _MEDIA_DEVNODE_H
++
++#include <linux/poll.h>
++#include <linux/fs.h>
++#include <linux/device.h>
++#include <linux/cdev.h>
++
++/* Media device node type. */
++#define MEDIA_TYPE_DEVICE 0
++#define MEDIA_TYPE_MAX 1
++
++/*
++ * Flag to mark the media_devnode struct as registered. Drivers must not touch
++ * this flag directly, it will be set and cleared by media_devnode_register and
++ * media_devnode_unregister.
++ */
++#define MEDIA_FLAG_REGISTERED 0
++
++struct media_file_operations {
++ struct module *owner;
++ ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
++ ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
++ unsigned int (*poll) (struct file *, struct poll_table_struct *);
++ long (*ioctl) (struct file *, unsigned int, unsigned long);
++ long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
++ unsigned long (*get_unmapped_area) (struct file *, unsigned long,
++ unsigned long, unsigned long, unsigned long);
++ int (*mmap) (struct file *, struct vm_area_struct *);
++ int (*open) (struct file *);
++ int (*release) (struct file *);
++};
++
++/**
++ * struct media_devnode - Media device node
++ * @parent: parent device
++ * @name: media device node name
++ * @type: node type, one of the MEDIA_TYPE_* constants
++ * @minor: device node minor number
++ * @num: device node number
++ * @flags: flags, combination of the MEDIA_FLAG_* constants
++ *
++ * This structure represents a media-related device node.
++ *
++ * The @parent is a physical device. It must be set by core or device drivers
++ * before registering the node.
++ *
++ * @name is a descriptive name exported through sysfs. It doesn't have to be
++ * unique.
++ *
++ * The device node number @num is used to create the kobject name and thus
++ * serves as a hint to udev when creating the device node.
++ */
++struct media_devnode {
++ /* device ops */
++ const struct media_file_operations *fops;
++
++ /* sysfs */
++ struct device dev; /* v4l device */
++ struct cdev *cdev; /* character device */
++ struct device *parent; /* device parent */
++
++ /* device info */
++ char name[32];
++ int type;
++
++ int minor;
++ u16 num;
++ unsigned long flags; /* Use bitops to access flags */
++
++ /* callbacks */
++ void (*release)(struct media_devnode *mdev);
++};
++
++/* dev to media_devnode */
++#define to_media_devnode(cd) container_of(cd, struct media_devnode, dev)
++
++int __must_check media_devnode_register(struct media_devnode *mdev, int type);
++void media_devnode_unregister(struct media_devnode *mdev);
++
++const char *media_devnode_type_name(int type);
++struct media_devnode *media_devnode_data(struct file *file);
++
++static inline int media_devnode_is_registered(struct media_devnode *mdev)
++{
++ return test_bit(MEDIA_FLAG_REGISTERED, &mdev->flags);
++}
++
++#endif /* _MEDIA_DEVNODE_H */
+diff --git a/include/media/media-entity.h b/include/media/media-entity.h
+new file mode 100644
+index 0000000..c4c3124
+--- /dev/null
++++ b/include/media/media-entity.h
+@@ -0,0 +1,112 @@
++#ifndef _MEDIA_ENTITY_H
++#define _MEDIA_ENTITY_H
++
++#include <linux/list.h>
++#include <linux/media.h>
++
++struct media_pipeline {
++};
++
++struct media_entity_link {
++ struct media_entity_pad *source;/* Source pad */
++ struct media_entity_pad *sink; /* Sink pad */
++ struct media_entity_link *other;/* Link in the reverse direction */
++ u32 flags; /* Link flags (MEDIA_LINK_FLAG_*) */
++};
++
++struct media_entity_pad {
++ struct media_entity *entity; /* Entity this pad belongs to */
++ u32 type; /* Pad type (MEDIA_PAD_TYPE_*) */
++ u32 index; /* Pad index in the entity pads array */
++};
++
++struct media_entity_operations {
++ int (*link_setup)(struct media_entity *entity,
++ const struct media_entity_pad *local,
++ const struct media_entity_pad *remote, u32 flags);
++ int (*set_power)(struct media_entity *entity, int power);
++};
++
++struct media_entity {
++ struct list_head list;
++ struct media_device *parent; /* Media device this entity belongs to*/
++ u32 id; /* Entity ID, unique in the parent media
++ * device context */
++ const char *name; /* Entity name */
++ u32 type; /* Entity type (MEDIA_ENTITY_TYPE_*) */
++ u32 subtype; /* Entity subtype (type-specific) */
++
++ u8 num_pads; /* Number of input and output pads */
++ u8 num_links; /* Number of existing links, both active
++ * and inactive */
++ u8 num_backlinks; /* Number of backlinks */
++ u8 max_links; /* Maximum number of links */
++
++ struct media_entity_pad *pads; /* Array of pads (num_pads elements) */
++ struct media_entity_link *links;/* Array of links (max_links elements)*/
++
++ const struct media_entity_operations *ops; /* Entity operations */
++
++ int lock_count; /* Lock count for the entity. */
++ int use_count; /* Use count for the entity. */
++
++ struct media_pipeline *pipe; /* Pipeline this entity belongs to. */
++
++ union {
++ /* Node specifications */
++ struct {
++ u32 major;
++ u32 minor;
++ } v4l;
++ struct {
++ u32 major;
++ u32 minor;
++ } fb;
++ int alsa;
++ int dvb;
++
++ /* Sub-device specifications */
++ /* Nothing needed yet */
++ };
++};
++
++#define MEDIA_ENTITY_ENUM_MAX_DEPTH 16
++
++struct media_entity_graph {
++ struct {
++ struct media_entity *entity;
++ int link;
++ } stack[MEDIA_ENTITY_ENUM_MAX_DEPTH];
++ int top;
++};
++
++extern int media_entity_init(struct media_entity *entity, u8 num_pads,
++ struct media_entity_pad *pads, u8 extra_links);
++extern void media_entity_cleanup(struct media_entity *entity);
++
++extern int media_entity_create_link(struct media_entity *source, u8 source_pad,
++ struct media_entity *sink, u8 sink_pad, u32 flags);
++extern int __media_entity_setup_link(struct media_entity_link *link, u32 flags);
++extern int media_entity_setup_link(struct media_entity_link *link, u32 flags);
++extern struct media_entity_link *media_entity_find_link(
++ struct media_entity_pad *source, struct media_entity_pad *sink);
++extern struct media_entity_pad *media_entity_remote_pad(
++ struct media_entity_pad *pad);
++
++struct media_entity *media_entity_get(struct media_entity *entity);
++void media_entity_put(struct media_entity *entity);
++
++void media_entity_graph_walk_start(struct media_entity_graph *graph,
++ struct media_entity *entity);
++struct media_entity *
++media_entity_graph_walk_next(struct media_entity_graph *graph);
++void media_entity_graph_lock(struct media_entity *entity,
++ struct media_pipeline *pipe);
++void media_entity_graph_unlock(struct media_entity *entity);
++
++#define media_entity_call(entity, operation, args...) \
++ (((entity)->ops && (entity)->ops->operation) ? \
++ (entity)->ops->operation((entity) , ##args) : -ENOIOCTLCMD)
++
++#endif
++
+diff --git a/include/media/smiaregs.h b/include/media/smiaregs.h
+new file mode 100644
+index 0000000..9bfd7b2
+--- /dev/null
++++ b/include/media/smiaregs.h
+@@ -0,0 +1,155 @@
++/*
++ * include/media/smiaregs.h
++ *
++ * Copyright (C) 2008 Nokia Corporation
++ *
++ * Contact: Sakari Ailus <sakari.ailus@nokia.com>
++ * Tuukka Toivonen <tuukka.o.toivonen@nokia.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ *
++ */
++
++#ifndef SMIAREGS_H
++#define SMIAREGS_H
++
++#include <linux/i2c.h>
++#include <linux/types.h>
++#include <linux/videodev2.h>
++#include <linux/v4l2-subdev.h>
++
++struct v4l2_mbus_framefmt;
++struct v4l2_subdev_pad_mbus_code_enum;
++
++#define SMIA_MAGIC 0x531A0002
++
++struct smia_mode {
++ /* Physical sensor resolution and current image window */
++ __u16 sensor_width;
++ __u16 sensor_height;
++ __u16 sensor_window_origin_x;
++ __u16 sensor_window_origin_y;
++ __u16 sensor_window_width;
++ __u16 sensor_window_height;
++
++ /* Image data coming from sensor (after scaling) */
++ __u16 width;
++ __u16 height;
++ __u16 window_origin_x;
++ __u16 window_origin_y;
++ __u16 window_width;
++ __u16 window_height;
++
++ __u32 pixel_clock; /* in Hz */
++ __u32 opsys_clock; /* in Hz */
++ __u32 ext_clock; /* in Hz */
++ struct v4l2_fract timeperframe;
++ __u32 max_exp; /* Maximum exposure value */
++ __u32 pixel_format; /* V4L2_PIX_FMT_xxx */
++ __u32 sensitivity; /* 16.16 fixed point */
++};
++
++#define SMIA_REG_8BIT 1
++#define SMIA_REG_16BIT 2
++#define SMIA_REG_32BIT 4
++#define SMIA_REG_DELAY 100
++#define SMIA_REG_TERM 0xff
++struct smia_reg {
++ u16 type;
++ u16 reg; /* 16-bit offset */
++ u32 val; /* 8/16/32-bit value */
++};
++
++/* Possible struct smia_reglist types. */
++#define SMIA_REGLIST_STANDBY 0
++#define SMIA_REGLIST_POWERON 1
++#define SMIA_REGLIST_RESUME 2
++#define SMIA_REGLIST_STREAMON 3
++#define SMIA_REGLIST_STREAMOFF 4
++#define SMIA_REGLIST_DISABLED 5
++
++#define SMIA_REGLIST_MODE 10
++
++#define SMIA_REGLIST_LSC_ENABLE 100
++#define SMIA_REGLIST_LSC_DISABLE 101
++#define SMIA_REGLIST_ANR_ENABLE 102
++#define SMIA_REGLIST_ANR_DISABLE 103
++
++struct smia_reglist {
++ u32 type;
++ struct smia_mode mode;
++ struct smia_reg regs[];
++};
++
++#define SMIA_MAX_LEN 32
++struct smia_meta_reglist {
++ u32 magic;
++ char version[SMIA_MAX_LEN];
++ /*
++ * When we generate a reglist, the objcopy program will put
++ * here the list of addresses to reglists local to that object
++ * file.
++ *
++ * In the kernel they serve as offsets inside the the register
++ * list binary.
++ *
++ * The list must be NULL-terminated. That is expected by the
++ * drivers.
++ */
++ union {
++ uintptr_t offset;
++ struct smia_reglist *ptr;
++ } reglist[];
++};
++
++int smia_ctrl_find(const struct v4l2_queryctrl *ctrls, size_t nctrls, int id);
++int smia_ctrl_find_next(const struct v4l2_queryctrl *ctrls, size_t nctrls,
++ int id);
++int smia_ctrl_query(const struct v4l2_queryctrl *ctrls, size_t nctrls,
++ struct v4l2_queryctrl *a);
++int smia_mode_query(const __u32 *ctrls, size_t nctrls,
++ struct v4l2_queryctrl *a);
++int smia_mode_g_ctrl(const __u32 *ctrls, size_t nctrls, struct v4l2_control *vc,
++ const struct smia_mode *sm);
++
++int smia_reglist_import(struct smia_meta_reglist *meta);
++struct smia_reglist *smia_reglist_find_type(struct smia_meta_reglist *meta,
++ u16 type);
++struct smia_reglist **smia_reglist_first(struct smia_meta_reglist *meta);
++struct smia_reglist *smia_reglist_find_mode_fmt(struct smia_meta_reglist *meta,
++ struct v4l2_mbus_framefmt *fmt);
++struct smia_reglist *smia_reglist_find_mode_ival(
++ struct smia_meta_reglist *meta,
++ struct smia_reglist *current_reglist,
++ struct v4l2_fract *timeperframe);
++int smia_reglist_enum_mbus_code(struct smia_meta_reglist *meta,
++ struct v4l2_subdev_pad_mbus_code_enum *code);
++int smia_reglist_enum_frame_size(struct smia_meta_reglist *meta,
++ struct v4l2_subdev_frame_size_enum *fse);
++int smia_reglist_enum_frame_ival(struct smia_meta_reglist *meta,
++ struct v4l2_subdev_frame_interval_enum *fie);
++void smia_reglist_to_mbus(const struct smia_reglist *reglist,
++ struct v4l2_mbus_framefmt *fmt);
++
++int smia_i2c_read_reg(struct i2c_client *client, u16 data_length,
++ u16 reg, u32 *val);
++int smia_i2c_write_reg(struct i2c_client *client, u16 data_length, u16 reg,
++ u32 val);
++int smia_i2c_write_regs(struct i2c_client *client,
++ const struct smia_reg reglist[]);
++int smia_i2c_reglist_find_write(struct i2c_client *client,
++ struct smia_meta_reglist *meta, u16 type);
++
++#endif
+diff --git a/include/media/soc_mediabus.h b/include/media/soc_mediabus.h
+index 037cd7b..6243147 100644
+--- a/include/media/soc_mediabus.h
++++ b/include/media/soc_mediabus.h
+@@ -12,8 +12,7 @@
+ #define SOC_MEDIABUS_H
+
+ #include <linux/videodev2.h>
+-
+-#include <media/v4l2-mediabus.h>
++#include <linux/v4l2-mediabus.h>
+
+ /**
+ * enum soc_mbus_packing - data packing types on the media-bus
+diff --git a/include/media/v4l2-chip-ident.h b/include/media/v4l2-chip-ident.h
+index 21b4428..66b2e7f 100644
+--- a/include/media/v4l2-chip-ident.h
++++ b/include/media/v4l2-chip-ident.h
+@@ -100,6 +100,9 @@ enum {
+ V4L2_IDENT_KS0127 = 1127,
+ V4L2_IDENT_KS0127B = 1128,
+
++ /* module adp1653: just ident 1653 */
++ V4L2_IDENT_ADP1653 = 1653,
++
+ /* module indycam: just ident 2000 */
+ V4L2_IDENT_INDYCAM = 2000,
+
+@@ -123,6 +126,9 @@ enum {
+ /* module cs5345: just ident 5345 */
+ V4L2_IDENT_CS5345 = 5345,
+
++ /* module ad5820: just ident 5820 */
++ V4L2_IDENT_AD5820 = 5820,
++
+ /* module tea6415c: just ident 6415 */
+ V4L2_IDENT_TEA6415C = 6415,
+
+@@ -288,6 +294,12 @@ enum {
+ /* HV7131R CMOS sensor: just ident 46000 */
+ V4L2_IDENT_HV7131R = 46000,
+
++ /* Toshiba ET8EK8 sensor; just ident 46001 */
++ V4L2_IDENT_ET8EK8 = 46001,
++
++ /* SMIA sensor; just ident 46002 */
++ V4L2_IDENT_SMIA = 46002,
++
+ /* Sharp RJ54N1CB0C, 0xCB0C = 51980 */
+ V4L2_IDENT_RJ54N1CB0C = 51980,
+
+diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h
+index 98b3264..5cd3d61 100644
+--- a/include/media/v4l2-common.h
++++ b/include/media/v4l2-common.h
+@@ -139,29 +139,24 @@ struct v4l2_subdev_ops;
+ /* Load an i2c module and return an initialized v4l2_subdev struct.
+ Only call request_module if module_name != NULL.
+ The client_type argument is the name of the chip that's on the adapter. */
+-struct v4l2_subdev *v4l2_i2c_new_subdev_cfg(struct v4l2_device *v4l2_dev,
++struct v4l2_subdev *v4l2_i2c_new_subdev(struct v4l2_device *v4l2_dev,
+ struct i2c_adapter *adapter,
+ const char *module_name, const char *client_type,
+- int irq, void *platform_data,
+ u8 addr, const unsigned short *probe_addrs);
+
+-/* Load an i2c module and return an initialized v4l2_subdev struct.
+- Only call request_module if module_name != NULL.
+- The client_type argument is the name of the chip that's on the adapter. */
+-static inline struct v4l2_subdev *v4l2_i2c_new_subdev(struct v4l2_device *v4l2_dev,
+- struct i2c_adapter *adapter,
+- const char *module_name, const char *client_type,
+- u8 addr, const unsigned short *probe_addrs)
+-{
+- return v4l2_i2c_new_subdev_cfg(v4l2_dev, adapter, module_name,
+- client_type, 0, NULL, addr, probe_addrs);
+-}
+-
+ struct i2c_board_info;
+
++struct v4l2_subdev_i2c_board_info
++{
++ struct i2c_board_info *board_info;
++ int i2c_adapter_id;
++ char *module_name;
++};
++
+ struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev,
+ struct i2c_adapter *adapter, const char *module_name,
+- struct i2c_board_info *info, const unsigned short *probe_addrs);
++ struct i2c_board_info *info, const unsigned short *probe_addrs,
++ int enable_devnode);
+
+ /* Initialize an v4l2_subdev with data from an i2c_client struct */
+ void v4l2_i2c_subdev_init(struct v4l2_subdev *sd, struct i2c_client *client,
+diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
+index bebe44b..447b154 100644
+--- a/include/media/v4l2-dev.h
++++ b/include/media/v4l2-dev.h
+@@ -16,13 +16,16 @@
+ #include <linux/mutex.h>
+ #include <linux/videodev2.h>
+
++#include <media/media-entity.h>
++
+ #define VIDEO_MAJOR 81
+
+ #define VFL_TYPE_GRABBER 0
+ #define VFL_TYPE_VBI 1
+ #define VFL_TYPE_RADIO 2
+ #define VFL_TYPE_VTX 3
+-#define VFL_TYPE_MAX 4
++#define VFL_TYPE_SUBDEV 4
++#define VFL_TYPE_MAX 5
+
+ struct v4l2_ioctl_callbacks;
+ struct video_device;
+@@ -56,6 +59,8 @@ struct v4l2_file_operations {
+
+ struct video_device
+ {
++ struct media_entity entity;
++
+ /* device ops */
+ const struct v4l2_file_operations *fops;
+
+@@ -95,18 +100,31 @@ struct video_device
+ const struct v4l2_ioctl_ops *ioctl_ops;
+ };
+
++#define media_entity_to_video_device(entity) \
++ container_of(entity, struct video_device, entity)
+ /* dev to video-device */
+ #define to_video_device(cd) container_of(cd, struct video_device, dev)
+
++int __must_check __video_register_device(struct video_device *vdev, int type,
++ int nr, int warn_if_nr_in_use, struct module *owner);
++
+ /* Register video devices. Note that if video_register_device fails,
+ the release() callback of the video_device structure is *not* called, so
+ the caller is responsible for freeing any data. Usually that means that
+ you call video_device_release() on failure. */
+-int __must_check video_register_device(struct video_device *vdev, int type, int nr);
++static inline int __must_check video_register_device(struct video_device *vdev,
++ int type, int nr)
++{
++ return __video_register_device(vdev, type, nr, 1, vdev->fops->owner);
++}
+
+ /* Same as video_register_device, but no warning is issued if the desired
+ device node number was already in use. */
+-int __must_check video_register_device_no_warn(struct video_device *vdev, int type, int nr);
++static inline int __must_check video_register_device_no_warn(
++ struct video_device *vdev, int type, int nr)
++{
++ return __video_register_device(vdev, type, nr, 0, vdev->fops->owner);
++}
+
+ /* Unregister video devices. Will do nothing if vdev == NULL or
+ video_is_registered() returns false. */
+diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
+index 5d5d550..83b5966 100644
+--- a/include/media/v4l2-device.h
++++ b/include/media/v4l2-device.h
+@@ -21,6 +21,7 @@
+ #ifndef _V4L2_DEVICE_H
+ #define _V4L2_DEVICE_H
+
++#include <media/media-device.h>
+ #include <media/v4l2-subdev.h>
+
+ /* Each instance of a V4L2 device should create the v4l2_device struct,
+@@ -37,6 +38,7 @@ struct v4l2_device {
+ Note: dev might be NULL if there is no parent device
+ as is the case with e.g. ISA devices. */
+ struct device *dev;
++ struct media_device *mdev;
+ /* used to keep track of the registered subdevs */
+ struct list_head subdevs;
+ /* lock this struct; can be used by the driver as well if this
+diff --git a/include/media/v4l2-int-device.h b/include/media/v4l2-int-device.h
+index fbf5855..2830ae1 100644
+--- a/include/media/v4l2-int-device.h
++++ b/include/media/v4l2-int-device.h
+@@ -84,6 +84,8 @@ struct v4l2_int_device {
+ void *priv;
+ };
+
++struct v4l2_int_device *v4l2_int_device_dummy(void);
++
+ void v4l2_int_device_try_attach_all(void);
+
+ int v4l2_int_device_register(struct v4l2_int_device *d);
+@@ -171,11 +173,13 @@ enum v4l2_int_ioctl_num {
+ * "Proper" V4L ioctls, as in struct video_device.
+ *
+ */
+- vidioc_int_enum_fmt_cap_num = 1,
++ vidioc_int_querycap_num = 1,
++ vidioc_int_enum_fmt_cap_num,
+ vidioc_int_g_fmt_cap_num,
+ vidioc_int_s_fmt_cap_num,
+ vidioc_int_try_fmt_cap_num,
+ vidioc_int_queryctrl_num,
++ vidioc_int_querymenu_num,
+ vidioc_int_g_ctrl_num,
+ vidioc_int_s_ctrl_num,
+ vidioc_int_cropcap_num,
+@@ -275,11 +279,13 @@ enum v4l2_int_ioctl_num {
+ return desc; \
+ }
+
++V4L2_INT_WRAPPER_1(querycap, struct v4l2_capability, *);
+ V4L2_INT_WRAPPER_1(enum_fmt_cap, struct v4l2_fmtdesc, *);
+ V4L2_INT_WRAPPER_1(g_fmt_cap, struct v4l2_format, *);
+ V4L2_INT_WRAPPER_1(s_fmt_cap, struct v4l2_format, *);
+ V4L2_INT_WRAPPER_1(try_fmt_cap, struct v4l2_format, *);
+ V4L2_INT_WRAPPER_1(queryctrl, struct v4l2_queryctrl, *);
++V4L2_INT_WRAPPER_1(querymenu, struct v4l2_querymenu, *);
+ V4L2_INT_WRAPPER_1(g_ctrl, struct v4l2_control, *);
+ V4L2_INT_WRAPPER_1(s_ctrl, struct v4l2_control, *);
+ V4L2_INT_WRAPPER_1(cropcap, struct v4l2_cropcap, *);
+diff --git a/include/media/v4l2-mediabus.h b/include/media/v4l2-mediabus.h
+deleted file mode 100644
+index 865cda7..0000000
+--- a/include/media/v4l2-mediabus.h
++++ /dev/null
+@@ -1,82 +0,0 @@
+-/*
+- * Media Bus API header
+- *
+- * Copyright (C) 2009, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 as
+- * published by the Free Software Foundation.
+- */
+-
+-#ifndef V4L2_MEDIABUS_H
+-#define V4L2_MEDIABUS_H
+-
+-/*
+- * These pixel codes uniquely identify data formats on the media bus. Mostly
+- * they correspond to similarly named V4L2_PIX_FMT_* formats, format 0 is
+- * reserved, V4L2_MBUS_FMT_FIXED shall be used by host-client pairs, where the
+- * data format is fixed. Additionally, "2X8" means that one pixel is transferred
+- * in two 8-bit samples, "BE" or "LE" specify in which order those samples are
+- * transferred over the bus: "LE" means that the least significant bits are
+- * transferred first, "BE" means that the most significant bits are transferred
+- * first, and "PADHI" and "PADLO" define which bits - low or high, in the
+- * incomplete high byte, are filled with padding bits.
+- */
+-enum v4l2_mbus_pixelcode {
+- V4L2_MBUS_FMT_FIXED = 1,
+- V4L2_MBUS_FMT_YUYV8_2X8_LE,
+- V4L2_MBUS_FMT_YVYU8_2X8_LE,
+- V4L2_MBUS_FMT_YUYV8_2X8_BE,
+- V4L2_MBUS_FMT_YVYU8_2X8_BE,
+- V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE,
+- V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE,
+- V4L2_MBUS_FMT_RGB565_2X8_LE,
+- V4L2_MBUS_FMT_RGB565_2X8_BE,
+- V4L2_MBUS_FMT_SBGGR8_1X8,
+- V4L2_MBUS_FMT_SBGGR10_1X10,
+- V4L2_MBUS_FMT_GREY8_1X8,
+- V4L2_MBUS_FMT_Y10_1X10,
+- V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE,
+- V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_LE,
+- V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_BE,
+- V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_BE,
+- V4L2_MBUS_FMT_SGRBG8_1X8,
+-};
+-
+-/**
+- * struct v4l2_mbus_framefmt - frame format on the media bus
+- * @width: frame width
+- * @height: frame height
+- * @code: data format code
+- * @field: used interlacing type
+- * @colorspace: colorspace of the data
+- */
+-struct v4l2_mbus_framefmt {
+- __u32 width;
+- __u32 height;
+- enum v4l2_mbus_pixelcode code;
+- enum v4l2_field field;
+- enum v4l2_colorspace colorspace;
+-};
+-
+-static inline void v4l2_fill_pix_format(struct v4l2_pix_format *pix_fmt,
+- const struct v4l2_mbus_framefmt *mbus_fmt)
+-{
+- pix_fmt->width = mbus_fmt->width;
+- pix_fmt->height = mbus_fmt->height;
+- pix_fmt->field = mbus_fmt->field;
+- pix_fmt->colorspace = mbus_fmt->colorspace;
+-}
+-
+-static inline void v4l2_fill_mbus_format(struct v4l2_mbus_framefmt *mbus_fmt,
+- const struct v4l2_pix_format *pix_fmt,
+- enum v4l2_mbus_pixelcode code)
+-{
+- mbus_fmt->width = pix_fmt->width;
+- mbus_fmt->height = pix_fmt->height;
+- mbus_fmt->field = pix_fmt->field;
+- mbus_fmt->colorspace = pix_fmt->colorspace;
+- mbus_fmt->code = code;
+-}
+-
+-#endif
+diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
+index 02c6f4d..f554be7 100644
+--- a/include/media/v4l2-subdev.h
++++ b/include/media/v4l2-subdev.h
+@@ -21,8 +21,12 @@
+ #ifndef _V4L2_SUBDEV_H
+ #define _V4L2_SUBDEV_H
+
++#include <linux/v4l2-mediabus.h>
++#include <linux/v4l2-subdev.h>
++#include <media/media-entity.h>
+ #include <media/v4l2-common.h>
+-#include <media/v4l2-mediabus.h>
++#include <media/v4l2-dev.h>
++#include <media/v4l2-fh.h>
+
+ /* generic v4l2_device notify callback notification values */
+ #define V4L2_SUBDEV_IR_RX_NOTIFY _IOW('v', 0, u32)
+@@ -36,7 +40,10 @@
+
+ struct v4l2_device;
+ struct v4l2_subdev;
++struct v4l2_subdev_fh;
+ struct tuner_setup;
++struct v4l2_fh;
++struct v4l2_event_subscription;
+
+ /* decode_vbi_line */
+ struct v4l2_decode_vbi_line {
+@@ -133,6 +140,10 @@ struct v4l2_subdev_core_ops {
+ int (*s_register)(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg);
+ #endif
+ int (*s_power)(struct v4l2_subdev *sd, int on);
++ int (*subscribe_event)(struct v4l2_subdev *sd, struct v4l2_fh *fh,
++ struct v4l2_event_subscription *sub);
++ int (*unsubscribe_event)(struct v4l2_subdev *sd, struct v4l2_fh *fh,
++ struct v4l2_event_subscription *sub);
+ };
+
+ /* s_mode: switch the tuner to a specific tuner mode. Replacement of s_radio.
+@@ -234,6 +245,10 @@ struct v4l2_subdev_video_ops {
+ int (*s_crop)(struct v4l2_subdev *sd, struct v4l2_crop *crop);
+ int (*g_parm)(struct v4l2_subdev *sd, struct v4l2_streamparm *param);
+ int (*s_parm)(struct v4l2_subdev *sd, struct v4l2_streamparm *param);
++ int (*g_frame_interval)(struct v4l2_subdev *sd,
++ struct v4l2_subdev_frame_interval *interval);
++ int (*s_frame_interval)(struct v4l2_subdev *sd,
++ struct v4l2_subdev_frame_interval *interval);
+ int (*enum_framesizes)(struct v4l2_subdev *sd, struct v4l2_frmsizeenum *fsize);
+ int (*enum_frameintervals)(struct v4l2_subdev *sd, struct v4l2_frmivalenum *fival);
+ int (*enum_dv_presets) (struct v4l2_subdev *sd,
+@@ -389,6 +404,27 @@ struct v4l2_subdev_ir_ops {
+ struct v4l2_subdev_ir_parameters *params);
+ };
+
++struct v4l2_subdev_pad_ops {
++ int (*enum_mbus_code)(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
++ struct v4l2_subdev_pad_mbus_code_enum *code);
++ int (*enum_frame_size)(struct v4l2_subdev *sd,
++ struct v4l2_subdev_fh *fh,
++ struct v4l2_subdev_frame_size_enum *fse);
++ int (*enum_frame_interval)(struct v4l2_subdev *sd,
++ struct v4l2_subdev_fh *fh,
++ struct v4l2_subdev_frame_interval_enum *fie);
++ int (*get_fmt)(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
++ unsigned int pad, struct v4l2_mbus_framefmt *fmt,
++ enum v4l2_subdev_format which);
++ int (*set_fmt)(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
++ unsigned int pad, struct v4l2_mbus_framefmt *fmt,
++ enum v4l2_subdev_format which);
++ int (*set_crop)(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
++ struct v4l2_subdev_pad_crop *crop);
++ int (*get_crop)(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
++ struct v4l2_subdev_pad_crop *crop);
++};
++
+ struct v4l2_subdev_ops {
+ const struct v4l2_subdev_core_ops *core;
+ const struct v4l2_subdev_tuner_ops *tuner;
+@@ -397,19 +433,25 @@ struct v4l2_subdev_ops {
+ const struct v4l2_subdev_vbi_ops *vbi;
+ const struct v4l2_subdev_ir_ops *ir;
+ const struct v4l2_subdev_sensor_ops *sensor;
++ const struct v4l2_subdev_pad_ops *pad;
+ };
+
+ #define V4L2_SUBDEV_NAME_SIZE 32
+
+ /* Set this flag if this subdev is a i2c device. */
+-#define V4L2_SUBDEV_FL_IS_I2C (1U << 0)
++#define V4L2_SUBDEV_FL_IS_I2C (1U << 0)
+ /* Set this flag if this subdev is a spi device. */
+-#define V4L2_SUBDEV_FL_IS_SPI (1U << 1)
++#define V4L2_SUBDEV_FL_IS_SPI (1U << 1)
++#define V4L2_SUBDEV_FL_HAS_DEVNODE (1U << 2)
++/* Set this flag if this subdev generates events. */
++#define V4L2_SUBDEV_FL_HAS_EVENTS (1U << 3)
+
+ /* Each instance of a subdev driver should create this struct, either
+ stand-alone or embedded in a larger struct.
+ */
+ struct v4l2_subdev {
++ struct media_entity entity;
++
+ struct list_head list;
+ struct module *owner;
+ u32 flags;
+@@ -420,33 +462,71 @@ struct v4l2_subdev {
+ /* can be used to group similar subdevs, value is driver-specific */
+ u32 grp_id;
+ /* pointer to private data */
+- void *priv;
++ void *dev_priv;
++ void *host_priv;
++ /* subdev device node */
++ struct video_device devnode;
++ unsigned int initialized;
++ /* number of events to be allocated on open */
++ unsigned int nevents;
+ };
+
++#define media_entity_to_v4l2_subdev(ent) \
++ container_of(ent, struct v4l2_subdev, entity)
++#define vdev_to_v4l2_subdev(vdev) \
++ container_of(vdev, struct v4l2_subdev, devnode)
++
++/*
++ * Used for storing subdev information per file handle
++ */
++struct v4l2_subdev_fh {
++ struct v4l2_fh vfh;
++ struct v4l2_mbus_framefmt *probe_fmt;
++ struct v4l2_rect *probe_crop;
++};
++
++#define to_v4l2_subdev_fh(fh) \
++ container_of(fh, struct v4l2_subdev_fh, vfh)
++
++static inline struct v4l2_mbus_framefmt *
++v4l2_subdev_get_probe_format(struct v4l2_subdev_fh *fh, unsigned int pad)
++{
++ return &fh->probe_fmt[pad];
++}
++
++static inline struct v4l2_rect *
++v4l2_subdev_get_probe_crop(struct v4l2_subdev_fh *fh, unsigned int pad)
++{
++ return &fh->probe_crop[pad];
++}
++
++extern const struct v4l2_file_operations v4l2_subdev_fops;
++
+ static inline void v4l2_set_subdevdata(struct v4l2_subdev *sd, void *p)
+ {
+- sd->priv = p;
++ sd->dev_priv = p;
+ }
+
+ static inline void *v4l2_get_subdevdata(const struct v4l2_subdev *sd)
+ {
+- return sd->priv;
++ return sd->dev_priv;
+ }
+
+-static inline void v4l2_subdev_init(struct v4l2_subdev *sd,
+- const struct v4l2_subdev_ops *ops)
++static inline void v4l2_set_subdev_hostdata(struct v4l2_subdev *sd, void *p)
+ {
+- INIT_LIST_HEAD(&sd->list);
+- /* ops->core MUST be set */
+- BUG_ON(!ops || !ops->core);
+- sd->ops = ops;
+- sd->v4l2_dev = NULL;
+- sd->flags = 0;
+- sd->name[0] = '\0';
+- sd->grp_id = 0;
+- sd->priv = NULL;
++ sd->host_priv = p;
+ }
+
++static inline void *v4l2_get_subdev_hostdata(const struct v4l2_subdev *sd)
++{
++ return sd->host_priv;
++}
++
++void v4l2_subdev_init(struct v4l2_subdev *sd,
++ const struct v4l2_subdev_ops *ops);
++
++int v4l2_subdev_set_power(struct media_entity *entity, int power);
++
+ /* Call an ops of a v4l2_subdev, doing the right checks against
+ NULL pointers.
+
+--
+1.7.0.4
+
diff --git a/recipes/linux/linux-2.6.35/nokia900/linux-2.6-usb-bt-autosuspend.patch b/recipes/linux/linux-2.6.35/nokia900/linux-2.6-usb-bt-autosuspend.patch
new file mode 100644
index 0000000000..68a0ecc19d
--- /dev/null
+++ b/recipes/linux/linux-2.6.35/nokia900/linux-2.6-usb-bt-autosuspend.patch
@@ -0,0 +1,13 @@
+Index: linux-2.6.34/drivers/bluetooth/btusb.c
+===================================================================
+--- linux-2.6.34.orig/drivers/bluetooth/btusb.c
++++ linux-2.6.34/drivers/bluetooth/btusb.c
+@@ -1020,6 +1020,8 @@ static int btusb_probe(struct usb_interf
+ return err;
+ }
+
++ usb_enable_autosuspend(data->udev);
++
+ usb_set_intfdata(intf, data);
+
+ return 0;
diff --git a/recipes/linux/linux-2.6.35/nokia900/linux-2.6-usb-musb-ignore-spurious-SESSREQ-interrupts.patch b/recipes/linux/linux-2.6.35/nokia900/linux-2.6-usb-musb-ignore-spurious-SESSREQ-interrupts.patch
new file mode 100644
index 0000000000..48fd261d9d
--- /dev/null
+++ b/recipes/linux/linux-2.6.35/nokia900/linux-2.6-usb-musb-ignore-spurious-SESSREQ-interrupts.patch
@@ -0,0 +1,42 @@
+From 5a73bcd7e510891edf7964cd8ea7e1c0d1a7ec8a Mon Sep 17 00:00:00 2001
+From: Heikki Krogerus <ext-heikki.krogerus@nokia.com>
+Date: Thu, 9 Sep 2010 16:55:21 +0300
+Subject: [PATCH 3/5] usb: musb: ignore spurious SESSREQ interrupts
+
+The charger detection may cause spurious SESSREQ interrupts.
+This will ignore any SESSREQ interrupt if musb is B-device.
+
+Signed-off-by: Heikki Krogerus <ext-heikki.krogerus@nokia.com>
+---
+ drivers/usb/musb/musb_core.c | 15 ++++++++++-----
+ 1 files changed, 10 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
+index 9504484..e4d0582 100644
+--- a/drivers/usb/musb/musb_core.c
++++ b/drivers/usb/musb/musb_core.c
+@@ -559,11 +559,16 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
+ * - ... to A_WAIT_BCON.
+ * a_wait_vrise_tmout triggers VBUS_ERROR transitions
+ */
+- musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
+- musb->ep0_stage = MUSB_EP0_START;
+- musb->xceiv->state = OTG_STATE_A_IDLE;
+- MUSB_HST_MODE(musb);
+- musb_set_vbus(musb, 1);
++ if ((devctl & MUSB_DEVCTL_VBUS)
++ && !(devctl & MUSB_DEVCTL_BDEVICE)) {
++ musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
++ musb->ep0_stage = MUSB_EP0_START;
++ musb->xceiv->state = OTG_STATE_A_IDLE;
++ MUSB_HST_MODE(musb);
++ musb_set_vbus(musb, 1);
++ } else {
++ DBG(5, "discarding SESSREQ INT\n");
++ }
+
+ handled = IRQ_HANDLED;
+ }
+--
+1.7.0.4
+
diff --git a/recipes/linux/linux-2.6.35/nokia900/linux-2.6-usb-uvc-autosuspend.patch b/recipes/linux/linux-2.6.35/nokia900/linux-2.6-usb-uvc-autosuspend.patch
new file mode 100644
index 0000000000..34870e56f9
--- /dev/null
+++ b/recipes/linux/linux-2.6.35/nokia900/linux-2.6-usb-uvc-autosuspend.patch
@@ -0,0 +1,19 @@
+commit 9d4c919bcfa794c054cc33155c7e3c53ac2c5684
+Author: Matthew Garrett <mjg@redhat.com>
+Date: Sun Jul 19 02:24:49 2009 +0100
+
+ Enable autosuspend on UVC by default
+
+Index: linux-2.6.34/drivers/media/video/uvc/uvc_driver.c
+===================================================================
+--- linux-2.6.34.orig/drivers/media/video/uvc/uvc_driver.c
++++ linux-2.6.34/drivers/media/video/uvc/uvc_driver.c
+@@ -1814,6 +1814,8 @@ static int uvc_probe(struct usb_interfac
+ "supported.\n", ret);
+ }
+
++ usb_enable_autosuspend(udev);
++
+ uvc_trace(UVC_TRACE_PROBE, "UVC device initialized.\n");
+ return 0;
+
diff --git a/recipes/linux/linux-2.6.35/nokia900/linux-2.6.29-dont-wait-for-mouse.patch b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.29-dont-wait-for-mouse.patch
new file mode 100644
index 0000000000..eb6dbae0b3
--- /dev/null
+++ b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.29-dont-wait-for-mouse.patch
@@ -0,0 +1,47 @@
+From dce8113d033975f56630cf6d2a6a908cfb66059d Mon Sep 17 00:00:00 2001
+From: Arjan van de Ven <arjan@linux.intel.com>
+Date: Sun, 20 Jul 2008 13:12:16 -0700
+Subject: [PATCH] fastboot: remove "wait for all devices before mounting root" delay
+
+In the non-initrd case, we wait for all devices to finish their
+probing before we try to mount the rootfs.
+In practice, this means that we end up waiting 2 extra seconds for
+the PS/2 mouse probing even though the root holding device has been
+ready since a long time.
+
+The previous two patches in this series made the RAID autodetect code
+do it's own "wait for probing to be done" code, and added
+"wait and retry" functionality in case the root device isn't actually
+available.
+
+These two changes should make it safe to remove the delay itself,
+and this patch does this. On my test laptop, this reduces the boot time
+by 2 seconds (kernel time goes from 3.9 to 1.9 seconds).
+
+Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
+---
+---
+ init/do_mounts.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+Index: linux-2.6.34/init/do_mounts.c
+===================================================================
+--- linux-2.6.34.orig/init/do_mounts.c
++++ linux-2.6.34/init/do_mounts.c
+@@ -373,6 +373,7 @@ void __init prepare_namespace(void)
+ ssleep(root_delay);
+ }
+
++#if 0
+ /*
+ * wait for the known devices to complete their probing
+ *
+@@ -381,6 +382,8 @@ void __init prepare_namespace(void)
+ * for the touchpad of a laptop to initialize.
+ */
+ wait_for_device_probe();
++#endif
++ async_synchronize_full();
+
+ md_run_setup();
+
diff --git a/recipes/linux/linux-2.6.35/nokia900/linux-2.6.33-ahci-alpm-accounting.patch b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.33-ahci-alpm-accounting.patch
new file mode 100644
index 0000000000..287b6794b4
--- /dev/null
+++ b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.33-ahci-alpm-accounting.patch
@@ -0,0 +1,300 @@
+From: Arjan van de Ven <arjan@linux.intel.com>
+Subject: [PATCH] libata: Add ALPM power state accounting to the AHCI driver
+
+PowerTOP wants to be able to show the user how effective the ALPM link
+power management is for the user. ALPM is worth around 0.5W on a quiet
+link; PowerTOP wants to be able to find cases where the "quiet link" isn't
+actually quiet.
+
+This patch adds state accounting functionality to the AHCI driver for
+PowerTOP to use.
+The parts of the patch are
+1) the sysfs logic of exposing the stats for each state in sysfs
+2) the basic accounting logic that gets update on link change interrupts
+ (or when the user accesses the info from sysfs)
+3) a "accounting enable" flag; in order to get the accounting to work,
+ the driver needs to get phyrdy interrupts on link status changes.
+ Normally and currently this is disabled by the driver when ALPM is
+ on (to reduce overhead); when PowerTOP is running this will need
+ to be on to get usable statistics... hence the sysfs tunable.
+
+The PowerTOP output currently looks like this:
+
+Recent SATA AHCI link activity statistics
+Active Partial Slumber Device name
+ 0.5% 99.5% 0.0% host0
+
+(work to resolve "host0" to a more human readable name is in progress)
+
+Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
+
+diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
+index 7113c57..6a3a291 100644
+--- a/drivers/ata/ahci.h
++++ b/drivers/ata/ahci.h
+@@ -261,6 +261,13 @@ struct ahci_em_priv {
+ unsigned long led_state;
+ };
+
++enum ahci_port_states {
++ AHCI_PORT_NOLINK = 0,
++ AHCI_PORT_ACTIVE = 1,
++ AHCI_PORT_PARTIAL = 2,
++ AHCI_PORT_SLUMBER = 3
++};
++
+ struct ahci_port_priv {
+ struct ata_link *active_link;
+ struct ahci_cmd_hdr *cmd_slot;
+@@ -279,6 +286,14 @@ struct ahci_port_priv {
+ int fbs_last_dev; /* save FBS.DEV of last FIS */
+ /* enclosure management info per PM slot */
+ struct ahci_em_priv em_priv[EM_MAX_SLOTS];
++
++ /* ALPM accounting state and stats */
++ unsigned int accounting_active:1;
++ u64 active_jiffies;
++ u64 partial_jiffies;
++ u64 slumber_jiffies;
++ int previous_state;
++ int previous_jiffies;
+ };
+
+ struct ahci_host_priv {
+diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
+index 81e772a..c3250ee 100644
+--- a/drivers/ata/libahci.c
++++ b/drivers/ata/libahci.c
+@@ -59,6 +59,20 @@ MODULE_PARM_DESC(ignore_sss, "Ignore staggered spinup flag (0=don't ignore, 1=ig
+ static int ahci_enable_alpm(struct ata_port *ap,
+ enum link_pm policy);
+ static void ahci_disable_alpm(struct ata_port *ap);
++static ssize_t ahci_alpm_show_active(struct device *dev,
++ struct device_attribute *attr, char *buf);
++static ssize_t ahci_alpm_show_slumber(struct device *dev,
++ struct device_attribute *attr, char *buf);
++static ssize_t ahci_alpm_show_partial(struct device *dev,
++ struct device_attribute *attr, char *buf);
++
++static ssize_t ahci_alpm_show_accounting(struct device *dev,
++ struct device_attribute *attr, char *buf);
++
++static ssize_t ahci_alpm_set_accounting(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count);
++
+ static ssize_t ahci_led_show(struct ata_port *ap, char *buf);
+ static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
+ size_t size);
+@@ -118,6 +132,12 @@ static DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL);
+ static DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL);
+ static DEVICE_ATTR(ahci_host_version, S_IRUGO, ahci_show_host_version, NULL);
+ static DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL);
++static DEVICE_ATTR(ahci_alpm_active, S_IRUGO, ahci_alpm_show_active, NULL);
++static DEVICE_ATTR(ahci_alpm_partial, S_IRUGO, ahci_alpm_show_partial, NULL);
++static DEVICE_ATTR(ahci_alpm_slumber, S_IRUGO, ahci_alpm_show_slumber, NULL);
++static DEVICE_ATTR(ahci_alpm_accounting, S_IRUGO | S_IWUSR,
++ ahci_alpm_show_accounting, ahci_alpm_set_accounting);
++
+ static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO,
+ ahci_read_em_buffer, ahci_store_em_buffer);
+
+@@ -129,6 +149,10 @@ static struct device_attribute *ahci_shost_attrs[] = {
+ &dev_attr_ahci_host_cap2,
+ &dev_attr_ahci_host_version,
+ &dev_attr_ahci_port_cmd,
++ &dev_attr_ahci_alpm_active,
++ &dev_attr_ahci_alpm_partial,
++ &dev_attr_ahci_alpm_slumber,
++ &dev_attr_ahci_alpm_accounting,
+ &dev_attr_em_buffer,
+ NULL
+ };
+@@ -734,9 +758,14 @@ static int ahci_enable_alpm(struct ata_port *ap,
+ * getting woken up due to spurious phy ready interrupts
+ * TBD - Hot plug should be done via polling now, is
+ * that even supported?
++ *
++ * However, when accounting_active is set, we do want
++ * the interrupts for accounting purposes.
+ */
+- pp->intr_mask &= ~PORT_IRQ_PHYRDY;
+- writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
++ if (!pp->accounting_active) {
++ pp->intr_mask &= ~PORT_IRQ_PHYRDY;
++ writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
++ }
+
+ /*
+ * Set a flag to indicate that we should ignore all PhyRdy
+@@ -1645,6 +1674,162 @@ static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
+ ata_port_abort(ap);
+ }
+
++static int get_current_alpm_state(struct ata_port *ap)
++{
++ u32 status = 0;
++
++ ahci_scr_read(&ap->link, SCR_STATUS, &status);
++
++ /* link status is in bits 11-8 */
++ status = status >> 8;
++ status = status & 0x7;
++
++ if (status == 6)
++ return AHCI_PORT_SLUMBER;
++ if (status == 2)
++ return AHCI_PORT_PARTIAL;
++ if (status == 1)
++ return AHCI_PORT_ACTIVE;
++ return AHCI_PORT_NOLINK;
++}
++
++static void account_alpm_stats(struct ata_port *ap)
++{
++ struct ahci_port_priv *pp;
++
++ int new_state;
++ u64 new_jiffies, jiffies_delta;
++
++ if (ap == NULL)
++ return;
++ pp = ap->private_data;
++
++ if (!pp) return;
++
++ new_state = get_current_alpm_state(ap);
++ new_jiffies = jiffies;
++
++ jiffies_delta = new_jiffies - pp->previous_jiffies;
++
++ switch (pp->previous_state) {
++ case AHCI_PORT_NOLINK:
++ pp->active_jiffies = 0;
++ pp->partial_jiffies = 0;
++ pp->slumber_jiffies = 0;
++ break;
++ case AHCI_PORT_ACTIVE:
++ pp->active_jiffies += jiffies_delta;
++ break;
++ case AHCI_PORT_PARTIAL:
++ pp->partial_jiffies += jiffies_delta;
++ break;
++ case AHCI_PORT_SLUMBER:
++ pp->slumber_jiffies += jiffies_delta;
++ break;
++ default:
++ break;
++ }
++ pp->previous_state = new_state;
++ pp->previous_jiffies = new_jiffies;
++}
++
++static ssize_t ahci_alpm_show_active(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct Scsi_Host *shost = class_to_shost(dev);
++ struct ata_port *ap = ata_shost_to_port(shost);
++ struct ahci_port_priv *pp;
++
++ if (!ap || ata_port_is_dummy(ap))
++ return -EINVAL;
++ pp = ap->private_data;
++ account_alpm_stats(ap);
++
++ return sprintf(buf, "%u\n", jiffies_to_msecs(pp->active_jiffies));
++}
++
++static ssize_t ahci_alpm_show_partial(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct Scsi_Host *shost = class_to_shost(dev);
++ struct ata_port *ap = ata_shost_to_port(shost);
++ struct ahci_port_priv *pp;
++
++ if (!ap || ata_port_is_dummy(ap))
++ return -EINVAL;
++
++ pp = ap->private_data;
++ account_alpm_stats(ap);
++
++ return sprintf(buf, "%u\n", jiffies_to_msecs(pp->partial_jiffies));
++}
++
++static ssize_t ahci_alpm_show_slumber(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct Scsi_Host *shost = class_to_shost(dev);
++ struct ata_port *ap = ata_shost_to_port(shost);
++ struct ahci_port_priv *pp;
++
++ if (!ap || ata_port_is_dummy(ap))
++ return -EINVAL;
++
++ pp = ap->private_data;
++
++ account_alpm_stats(ap);
++
++ return sprintf(buf, "%u\n", jiffies_to_msecs(pp->slumber_jiffies));
++}
++
++static ssize_t ahci_alpm_show_accounting(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct Scsi_Host *shost = class_to_shost(dev);
++ struct ata_port *ap = ata_shost_to_port(shost);
++ struct ahci_port_priv *pp;
++
++ if (!ap || ata_port_is_dummy(ap))
++ return -EINVAL;
++
++ pp = ap->private_data;
++
++ return sprintf(buf, "%u\n", pp->accounting_active);
++}
++
++static ssize_t ahci_alpm_set_accounting(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ unsigned long flags;
++ struct Scsi_Host *shost = class_to_shost(dev);
++ struct ata_port *ap = ata_shost_to_port(shost);
++ struct ahci_port_priv *pp;
++ void __iomem *port_mmio;
++
++ if (!ap || ata_port_is_dummy(ap))
++ return 1;
++
++ pp = ap->private_data;
++ port_mmio = ahci_port_base(ap);
++
++ if (!pp)
++ return 1;
++ if (buf[0] == '0')
++ pp->accounting_active = 0;
++ if (buf[0] == '1')
++ pp->accounting_active = 1;
++
++ /* we need to enable the PHYRDY interrupt when we want accounting */
++ if (pp->accounting_active) {
++ spin_lock_irqsave(ap->lock, flags);
++ pp->intr_mask |= PORT_IRQ_PHYRDY;
++ writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
++ spin_unlock_irqrestore(ap->lock, flags);
++ }
++ return count;
++}
++
++
+ static void ahci_port_intr(struct ata_port *ap)
+ {
+ void __iomem *port_mmio = ahci_port_base(ap);
+@@ -1670,6 +1855,7 @@ static void ahci_port_intr(struct ata_port *ap)
+ if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) &&
+ (status & PORT_IRQ_PHYRDY)) {
+ status &= ~PORT_IRQ_PHYRDY;
++ account_alpm_stats(ap);
+ ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
+ }
+
diff --git a/recipes/linux/linux-2.6.35/nokia900/linux-2.6.33-vfs-tracepoints.patch b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.33-vfs-tracepoints.patch
new file mode 100644
index 0000000000..b1509f0df3
--- /dev/null
+++ b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.33-vfs-tracepoints.patch
@@ -0,0 +1,116 @@
+From f56c995174cf42d84fdad06beebacd56e700b05d Mon Sep 17 00:00:00 2001
+From: Arjan van de Ven <arjan@linux.intel.com>
+Date: Sun, 25 Oct 2009 15:37:04 -0700
+Subject: [PATCH] vfs: Add a trace point in the mark_inode_dirty function
+
+PowerTOP would like to be able to show who is keeping the disk
+busy by dirtying data. The most logical spot for this is in the vfs
+in the mark_inode_dirty() function, doing this on the block level
+is not possible because by the time the IO hits the block layer the
+guilty party can no longer be found ("kjournald" and "pdflush" are not
+useful answers to "who caused this file to be dirty).
+
+The trace point follows the same logic/style as the block_dump code
+and pretty much dumps the same data, just not to dmesg (and thus to
+/var/log/messages) but via the trace events streams.
+
+Signed-of-by: Arjan van de Ven <arjan@linux.intel.com>
+---
+ fs/fs-writeback.c | 4 +++
+ fs/inode.c | 4 +++
+ include/trace/events/vfs.h | 53 ++++++++++++++++++++++++++++++++++++++++++++
+ 3 files changed, 61 insertions(+), 0 deletions(-)
+ create mode 100644 include/trace/events/vfs.h
+
+Index: linux-2.6.34/fs/fs-writeback.c
+===================================================================
+--- linux-2.6.34.orig/fs/fs-writeback.c
++++ linux-2.6.34/fs/fs-writeback.c
+@@ -26,6 +26,7 @@
+ #include <linux/blkdev.h>
+ #include <linux/backing-dev.h>
+ #include <linux/buffer_head.h>
++#include <trace/events/vfs.h>
+ #include "internal.h"
+
+ #define inode_to_bdi(inode) ((inode)->i_mapping->backing_dev_info)
+@@ -1076,6 +1077,9 @@ void __mark_inode_dirty(struct inode *in
+ sb->s_op->dirty_inode(inode);
+ }
+
++ if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES))
++ trace_dirty_inode(inode, current);
++
+ /*
+ * make sure that changes are seen by all cpus before we test i_state
+ * -- mikulas
+Index: linux-2.6.34/fs/inode.c
+===================================================================
+--- linux-2.6.34.orig/fs/inode.c
++++ linux-2.6.34/fs/inode.c
+@@ -1626,3 +1626,7 @@ void inode_init_owner(struct inode *inod
+ inode->i_mode = mode;
+ }
+ EXPORT_SYMBOL(inode_init_owner);
++
++#define CREATE_TRACE_POINTS
++#include <trace/events/vfs.h>
++
+Index: linux-2.6.34/include/trace/events/vfs.h
+===================================================================
+--- /dev/null
++++ linux-2.6.34/include/trace/events/vfs.h
+@@ -0,0 +1,53 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM vfs
++
++#if !defined(_TRACE_VFS_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_VFS_H
++
++/*
++ * Tracepoint for dirtying an inode:
++ */
++TRACE_EVENT(dirty_inode,
++
++ TP_PROTO(struct inode *inode, struct task_struct *task),
++
++ TP_ARGS(inode, task),
++
++ TP_STRUCT__entry(
++ __array( char, comm, TASK_COMM_LEN )
++ __field( pid_t, pid )
++ __array( char, dev, 16 )
++ __array( char, file, 32 )
++ ),
++
++ TP_fast_assign(
++ if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
++ struct dentry *dentry;
++ const char *name = "?";
++
++ dentry = d_find_alias(inode);
++ if (dentry) {
++ spin_lock(&dentry->d_lock);
++ name = (const char *) dentry->d_name.name;
++ }
++
++ memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
++ __entry->pid = task->pid;
++ strlcpy(__entry->file, name, 32);
++ strlcpy(__entry->dev, inode->i_sb->s_id, 16);
++
++ if (dentry) {
++ spin_unlock(&dentry->d_lock);
++ dput(dentry);
++ }
++ }
++ ),
++
++ TP_printk("task=%i (%s) file=%s dev=%s",
++ __entry->pid, __entry->comm, __entry->file, __entry->dev)
++);
++
++#endif /* _TRACE_VFS_H */
++
++/* This part must be outside protection */
++#include <trace/define_trace.h>
diff --git a/recipes/linux/linux-2.6.35/nokia900/linux-2.6.35-aava-firmware-workaround-wifi.patch b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.35-aava-firmware-workaround-wifi.patch
new file mode 100644
index 0000000000..68b6b84765
--- /dev/null
+++ b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.35-aava-firmware-workaround-wifi.patch
@@ -0,0 +1,66 @@
+From: Arjan van de Ven <arjan@linux.intel.com>
+Subject: workaround for an initialization bug in the Aava firmware
+
+based on code from James Ausmus <james.ausmus@intel.com> and
+Chao Jiang <chao.jiang@intel.com>; wiggle the power lines on the wifi
+on aava devices because the firmware forgot to do that during system power on.
+
+Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
+
+
+
+diff --git a/arch/x86/kernel/mrst.c b/arch/x86/kernel/mrst.c
+index 7263cd9..e6999b3 100644
+--- a/arch/x86/kernel/mrst.c
++++ b/arch/x86/kernel/mrst.c
+@@ -1074,6 +1074,41 @@ static struct pca953x_platform_data max7315_pdata;
+ static struct pca953x_platform_data max7315_pdata_2;
+ static struct lis3lv02d_platform_data lis3lv02d_pdata;
+
++static int intel_mrst_sdio_8688_power_up(void)
++{
++ u8 temp = 0;
++
++ if (mrst_platform_id() == MRST_PLATFORM_AAVA_SC) {
++ /* Set GYMXIOCNT for Marvell 8688 */
++ intel_scu_ipc_iowrite8(0x4a, 0x3f);
++ /* Set GYMX33CNT for Marvell 8688 */
++ intel_scu_ipc_iowrite8(0x4e, 0x3f);
++
++ /* WLAN/BT power-up sequence: */
++ /* 1. power (GPO4) & reset (GPO3) low */
++ /* 2. power (GPO4) high */
++ /* 3. reset (GPO3) high */
++
++ /* WLAN POWER and RESET low */
++ intel_scu_ipc_ioread8(0xf4, &temp);
++ temp &= ~0x18;
++ intel_scu_ipc_iowrite8(0xf4, temp);
++
++ /* Enable V1p8_VWYMXARF for MRVL8688 */
++ intel_scu_ipc_iowrite8(0x4c, 0x27);
++
++ /* WLAN POWER high */
++ temp |= 0x10;
++ intel_scu_ipc_iowrite8(0xf4, temp);
++
++ /* WLAN RESET high */
++ temp |= 0x8;
++ intel_scu_ipc_iowrite8(0xf4, temp);
++ }
++ return 0;
++}
++
++
+ static int __init sfi_parse_i2cb(struct sfi_table_header *table)
+ {
+ struct sfi_table_simple *sb;
+@@ -1139,6 +1174,8 @@ static int __init sfi_parse_i2cb(struct sfi_table_header *table)
+ info.platform_data = NULL;
+ strcpy(info.type, "cy8ctmg110");
+ i2c_register_board_info(0, &info, 1);
++
++ intel_mrst_sdio_8688_power_up();
+ }
+
+ return 0;
diff --git a/recipes/linux/linux-2.6.35/nokia900/linux-2.6.35-aava-firmware-workaround.patch b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.35-aava-firmware-workaround.patch
new file mode 100644
index 0000000000..c44e35b867
--- /dev/null
+++ b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.35-aava-firmware-workaround.patch
@@ -0,0 +1,39 @@
+From 70d63e7fbc48b7f93b605fcbf9096eb4b8fa441c Mon Sep 17 00:00:00 2001
+From: Jiang, Chao <chao.jiang@intel.com>
+Date: Thu, 29 Jul 2010 11:17:28 +0800
+Subject: [PATCH] add cy8ctmg110 board info
+
+The current batch of Aava devices have a firmware bug where the
+touch screen is not present in the SFI tables.
+This patch manually fake-inserts this entry from the mrst
+code; and will be dropped once the fixed firmware becomes available.
+
+
+Signed-off-by: Jiang, Chao <chao.jiang@intel.com>
+---
+ arch/x86/kernel/mrst.c | 9 +++++++++
+ 1 files changed, 9 insertions(+), 0 deletions(-)
+
+diff --git a/arch/x86/kernel/mrst.c b/arch/x86/kernel/mrst.c
+index 88ca4cf..211b134 100644
+--- a/arch/x86/kernel/mrst.c
++++ b/arch/x86/kernel/mrst.c
+@@ -1121,6 +1121,15 @@ static int __init sfi_parse_i2cb(struct sfi_table_header *table)
+ i2c_register_board_info(busnum, &info, 1);
+ }
+
++ if (mrst_platform_id() == MRST_PLATFORM_AAVA_SC) {
++ /*Add AAVA SC touch screen*/
++ info.irq = 59;
++ info.addr = 0x38;
++ info.platform_data = NULL;
++ strcpy(info.type, "cy8ctmg110");
++ i2c_register_board_info(0, &info, 1);
++ }
++
+ return 0;
+ }
+
+--
+1.6.2.5
+
diff --git a/recipes/linux/linux-2.6.35/nokia900/linux-2.6.35-ac-2010-08-24.patch b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.35-ac-2010-08-24.patch
new file mode 100644
index 0000000000..5e815cd6b9
--- /dev/null
+++ b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.35-ac-2010-08-24.patch
@@ -0,0 +1,301256 @@
+---
+ Documentation/ABI/testing/sysfs-bus-devices-i2c-isl29020 | 29
+ Documentation/ABI/testing/sysfs-bus-i2c-devices-hm6352 | 21
+ Documentation/leds-lp5523.txt | 72
+ Documentation/networking/caif/spi_porting.txt | 208
+ MAINTAINERS | 13
+ arch/x86/Kconfig | 18
+ arch/x86/Kconfig.debug | 4
+ arch/x86/include/asm/apb_timer.h | 1
+ arch/x86/include/asm/apic.h | 1
+ arch/x86/include/asm/fixmap.h | 4
+ arch/x86/include/asm/gpio.h | 5
+ arch/x86/include/asm/intel_scu_ipc.h | 20
+ arch/x86/include/asm/mrst.h | 53
+ arch/x86/include/asm/vrtc.h | 27
+ arch/x86/include/asm/x86_init.h | 2
+ arch/x86/kernel/Makefile | 3
+ arch/x86/kernel/apb_timer.c | 37
+ arch/x86/kernel/apic/apic.c | 33
+ arch/x86/kernel/apic/io_apic.c | 4
+ arch/x86/kernel/cpu/common.c | 2
+ arch/x86/kernel/cpu/cpufreq/Kconfig | 16
+ arch/x86/kernel/cpu/cpufreq/Makefile | 1
+ arch/x86/kernel/cpu/cpufreq/sfi-cpufreq.c | 607 +
+ arch/x86/kernel/early_printk.c | 8
+ arch/x86/kernel/mrst.c | 1407 ++
+ arch/x86/kernel/mrst_earlyprintk.c | 391
+ arch/x86/kernel/setup.c | 2
+ arch/x86/kernel/vrtc.c | 100
+ arch/x86/kernel/x86_init.c | 2
+ arch/x86/pci/mrst.c | 7
+ drivers/char/Kconfig | 13
+ drivers/char/Makefile | 1
+ drivers/char/ptirouter_ldisc.c | 202
+ drivers/dma/Kconfig | 13
+ drivers/dma/Makefile | 1
+ drivers/dma/intel_mid_dma.c | 1143 ++
+ drivers/dma/intel_mid_dma_regs.h | 260
+ drivers/gpio/max7315.h | 82
+ drivers/gpio/pca953x.c | 215
+ drivers/gpu/drm/drm_crtc.c | 2
+ drivers/hwmon/Kconfig | 14
+ drivers/hwmon/Makefile | 2
+ drivers/hwmon/emc1403.c | 33
+ drivers/hwmon/intel_mid_thermal.c | 808 +
+ drivers/hwmon/lis3lv02d.c | 126
+ drivers/hwmon/lis3lv02d.h | 10
+ drivers/hwmon/lis3lv02d_i2c.c | 56
+ drivers/hwmon/lis3lv02d_spi.c | 33
+ drivers/hwmon/mrst_analog_accel.c | 231
+ drivers/i2c/busses/Kconfig | 9
+ drivers/i2c/busses/Makefile | 1
+ drivers/i2c/busses/i2c-mrst.c | 979 ++
+ drivers/i2c/busses/i2c-mrst.h | 267
+ drivers/idle/intel_idle.c | 1
+ drivers/input/keyboard/Kconfig | 16
+ drivers/input/keyboard/Makefile | 2
+ drivers/input/keyboard/intel_mid_keypad.c | 843 +
+ drivers/input/keyboard/tc35894xbg.c | 722 +
+ drivers/input/keyboard/tc35894xbg_regs.h | 1528 +++
+ drivers/input/touchscreen/Kconfig | 37
+ drivers/input/touchscreen/Makefile | 2
+ drivers/input/touchscreen/clearpad_tm1217.c | 674 +
+ drivers/input/touchscreen/cy8ctmg110_ts.c | 558 +
+ drivers/leds/Kconfig | 8
+ drivers/leds/Makefile | 1
+ drivers/leds/leds-lp5523.c | 1008 ++
+ drivers/misc/Kconfig | 61
+ drivers/misc/Makefile | 9
+ drivers/misc/apds9802als.c | 303
+ drivers/misc/bh1770glc.h | 169
+ drivers/misc/bh1770glc_als.c | 424
+ drivers/misc/bh1770glc_core.c | 301
+ drivers/misc/bh1770glc_ps.c | 585 +
+ drivers/misc/hmc6352.c | 199
+ drivers/misc/isl29015.c | 554 +
+ drivers/misc/isl29020.c | 236
+ drivers/misc/koski_hwid.c | 255
+ drivers/misc/pti.c | 754 +
+ drivers/mmc/card/block.c | 3
+ drivers/mmc/card/queue.c | 27
+ drivers/mmc/core/core.c | 22
+ drivers/mmc/core/mmc.c | 28
+ drivers/mmc/host/sdhci-pci.c | 167
+ drivers/mmc/host/sdhci.c | 299
+ drivers/mmc/host/sdhci.h | 78
+ drivers/net/caif/Kconfig | 20
+ drivers/net/caif/Makefile | 4
+ drivers/net/caif/caif_spi.c | 847 +
+ drivers/net/caif/caif_spi_slave.c | 252
+ drivers/pci/probe.c | 10
+ drivers/pci/quirks.c | 13
+ drivers/platform/x86/Kconfig | 24
+ drivers/platform/x86/Makefile | 3
+ drivers/platform/x86/intel_mid_vibrator.c | 88
+ drivers/platform/x86/intel_pmic_gpio.c | 342
+ drivers/platform/x86/intel_scu_ipc.c | 186
+ drivers/platform/x86/intel_scu_ipcutil.c | 113
+ drivers/power/Kconfig | 7
+ drivers/power/Makefile | 1
+ drivers/power/intel_mid_battery.c | 841 +
+ drivers/rtc/Kconfig | 13
+ drivers/rtc/Makefile | 1
+ drivers/rtc/rtc-mrst.c | 619 +
+ drivers/serial/8250.c | 45
+ drivers/serial/Kconfig | 50
+ drivers/serial/Makefile | 5
+ drivers/serial/ifx6x60.c | 1482 +++
+ drivers/serial/ifx6x60.h | 126
+ drivers/serial/max3107-aava.c | 339
+ drivers/serial/max3107.c | 1192 ++
+ drivers/serial/max3107.h | 439
+ drivers/serial/mfd.c | 1507 +++
+ drivers/serial/mrst_max3110.c | 917 +
+ drivers/serial/mrst_max3110.h | 60
+ drivers/sfi/Kconfig | 11
+ drivers/sfi/Makefile | 6
+ drivers/sfi/sfi_processor_core.c | 205
+ drivers/sfi/sfi_processor_idle.c | 262
+ drivers/sfi/sfi_processor_perflib.c | 189
+ drivers/spi/Kconfig | 19
+ drivers/spi/Makefile | 5
+ drivers/spi/dw_spi.c | 48
+ drivers/spi/dw_spi_mid.c | 243
+ drivers/spi/dw_spi_pci.c | 14
+ drivers/spi/intel_mid_ssp_spi.c | 1251 ++
+ drivers/spi/intel_mid_ssp_spi_def.h | 247
+ drivers/spi/pw_spi3.c | 1162 ++
+ drivers/spi/spi.c | 86
+ drivers/staging/Kconfig | 14
+ drivers/staging/Makefile | 9
+ drivers/staging/ice4100/Kconfig | 44
+ drivers/staging/ice4100/Makefile | 12
+ drivers/staging/ice4100/sgx535/Makefile | 147
+ drivers/staging/ice4100/sgx535/bridged/bridged_pvr_bridge.c | 3419 +++++++
+ drivers/staging/ice4100/sgx535/bridged/bridged_pvr_bridge.h | 218
+ drivers/staging/ice4100/sgx535/bridged/bridged_support.c | 84
+ drivers/staging/ice4100/sgx535/bridged/bridged_support.h | 43
+ drivers/staging/ice4100/sgx535/bridged/sgx/bridged_sgx_bridge.c | 2497 +++++
+ drivers/staging/ice4100/sgx535/bridged/sgx/bridged_sgx_bridge.h | 42
+ drivers/staging/ice4100/sgx535/common/buffer_manager.c | 1803 +++
+ drivers/staging/ice4100/sgx535/common/deviceclass.c | 1732 +++
+ drivers/staging/ice4100/sgx535/common/devicemem.c | 1253 ++
+ drivers/staging/ice4100/sgx535/common/handle.c | 1521 +++
+ drivers/staging/ice4100/sgx535/common/hash.c | 434
+ drivers/staging/ice4100/sgx535/common/lists.c | 92
+ drivers/staging/ice4100/sgx535/common/mem.c | 131
+ drivers/staging/ice4100/sgx535/common/mem_debug.c | 228
+ drivers/staging/ice4100/sgx535/common/metrics.c | 149
+ drivers/staging/ice4100/sgx535/common/pdump_common.c | 1558 +++
+ drivers/staging/ice4100/sgx535/common/perproc.c | 276
+ drivers/staging/ice4100/sgx535/common/power.c | 686 +
+ drivers/staging/ice4100/sgx535/common/pvrsrv.c | 1105 ++
+ drivers/staging/ice4100/sgx535/common/queue.c | 1046 ++
+ drivers/staging/ice4100/sgx535/common/ra.c | 1889 +++
+ drivers/staging/ice4100/sgx535/common/resman.c | 630 +
+ drivers/staging/ice4100/sgx535/devices/sgx/mmu.c | 2501 +++++
+ drivers/staging/ice4100/sgx535/devices/sgx/mmu.h | 139
+ drivers/staging/ice4100/sgx535/devices/sgx/pb.c | 420
+ drivers/staging/ice4100/sgx535/devices/sgx/sgx_bridge_km.h | 147
+ drivers/staging/ice4100/sgx535/devices/sgx/sgxconfig.h | 158
+ drivers/staging/ice4100/sgx535/devices/sgx/sgxinfokm.h | 346
+ drivers/staging/ice4100/sgx535/devices/sgx/sgxinit.c | 2136 ++++
+ drivers/staging/ice4100/sgx535/devices/sgx/sgxkick.c | 901 +
+ drivers/staging/ice4100/sgx535/devices/sgx/sgxpower.c | 424
+ drivers/staging/ice4100/sgx535/devices/sgx/sgxreset.c | 474 +
+ drivers/staging/ice4100/sgx535/devices/sgx/sgxtransfer.c | 598 +
+ drivers/staging/ice4100/sgx535/devices/sgx/sgxutils.c | 845 +
+ drivers/staging/ice4100/sgx535/devices/sgx/sgxutils.h | 107
+ drivers/staging/ice4100/sgx535/env_data.h | 66
+ drivers/staging/ice4100/sgx535/env_perproc.h | 56
+ drivers/staging/ice4100/sgx535/event.c | 278
+ drivers/staging/ice4100/sgx535/event.h | 32
+ drivers/staging/ice4100/sgx535/hwdefs/sgx535defs.h | 637 +
+ drivers/staging/ice4100/sgx535/hwdefs/sgxdefs.h | 82
+ drivers/staging/ice4100/sgx535/hwdefs/sgxerrata.h | 306
+ drivers/staging/ice4100/sgx535/hwdefs/sgxfeaturedefs.h | 166
+ drivers/staging/ice4100/sgx535/hwdefs/sgxmmu.h | 79
+ drivers/staging/ice4100/sgx535/include/buffer_manager.h | 213
+ drivers/staging/ice4100/sgx535/include/dbgdrvif.h | 267
+ drivers/staging/ice4100/sgx535/include/device.h | 271
+ drivers/staging/ice4100/sgx535/include/env/linux/pvr_drm_shared.h | 63
+ drivers/staging/ice4100/sgx535/include/handle.h | 272
+ drivers/staging/ice4100/sgx535/include/hash.h | 73
+ drivers/staging/ice4100/sgx535/include/img_types.h | 78
+ drivers/staging/ice4100/sgx535/include/ioctldef.h | 98
+ drivers/staging/ice4100/sgx535/include/kernelbuffer.h | 60
+ drivers/staging/ice4100/sgx535/include/kerneldisplay.h | 153
+ drivers/staging/ice4100/sgx535/include/lists.h | 176
+ drivers/staging/ice4100/sgx535/include/metrics.h | 130
+ drivers/staging/ice4100/sgx535/include/osfunc.h | 321
+ drivers/staging/ice4100/sgx535/include/osperproc.h | 37
+ drivers/staging/ice4100/sgx535/include/pdump_km.h | 439
+ drivers/staging/ice4100/sgx535/include/pdump_osfunc.h | 137
+ drivers/staging/ice4100/sgx535/include/pdumpdefs.h | 99
+ drivers/staging/ice4100/sgx535/include/perproc.h | 91
+ drivers/staging/ice4100/sgx535/include/power.h | 120
+ drivers/staging/ice4100/sgx535/include/pvr_bridge.h | 1405 ++
+ drivers/staging/ice4100/sgx535/include/pvr_bridge_km.h | 295
+ drivers/staging/ice4100/sgx535/include/pvr_debug.h | 119
+ drivers/staging/ice4100/sgx535/include/pvrmmap.h | 36
+ drivers/staging/ice4100/sgx535/include/pvrmodule.h | 31
+ drivers/staging/ice4100/sgx535/include/pvrversion.h | 38
+ drivers/staging/ice4100/sgx535/include/queue.h | 119
+ drivers/staging/ice4100/sgx535/include/ra.h | 155
+ drivers/staging/ice4100/sgx535/include/regpaths.h | 43
+ drivers/staging/ice4100/sgx535/include/resman.h | 113
+ drivers/staging/ice4100/sgx535/include/services.h | 864 +
+ drivers/staging/ice4100/sgx535/include/services_headers.h | 49
+ drivers/staging/ice4100/sgx535/include/servicesext.h | 659 +
+ drivers/staging/ice4100/sgx535/include/servicesint.h | 254
+ drivers/staging/ice4100/sgx535/include/sgx_bridge.h | 477 +
+ drivers/staging/ice4100/sgx535/include/sgx_mkif_km.h | 388
+ drivers/staging/ice4100/sgx535/include/sgx_options.h | 224
+ drivers/staging/ice4100/sgx535/include/sgxapi_km.h | 329
+ drivers/staging/ice4100/sgx535/include/sgxinfo.h | 288
+ drivers/staging/ice4100/sgx535/include/sgxscript.h | 81
+ drivers/staging/ice4100/sgx535/include/srvkm.h | 69
+ drivers/staging/ice4100/sgx535/linkage.h | 61
+ drivers/staging/ice4100/sgx535/lock.h | 32
+ drivers/staging/ice4100/sgx535/mm.c | 2150 ++++
+ drivers/staging/ice4100/sgx535/mm.h | 331
+ drivers/staging/ice4100/sgx535/mmap.c | 1104 ++
+ drivers/staging/ice4100/sgx535/mmap.h | 107
+ drivers/staging/ice4100/sgx535/module.c | 734 +
+ drivers/staging/ice4100/sgx535/mutils.c | 129
+ drivers/staging/ice4100/sgx535/mutils.h | 101
+ drivers/staging/ice4100/sgx535/osfunc.c | 2369 +++++
+ drivers/staging/ice4100/sgx535/osperproc.c | 106
+ drivers/staging/ice4100/sgx535/pdump.c | 610 +
+ drivers/staging/ice4100/sgx535/private_data.h | 67
+ drivers/staging/ice4100/sgx535/proc.c | 933 +
+ drivers/staging/ice4100/sgx535/proc.h | 115
+ drivers/staging/ice4100/sgx535/pvr_bridge_k.c | 667 +
+ drivers/staging/ice4100/sgx535/pvr_debug.c | 413
+ drivers/staging/ice4100/sgx535/pvr_drm.c | 300
+ drivers/staging/ice4100/sgx535/pvr_drm.h | 87
+ drivers/staging/ice4100/sgx535/system/include/syscommon.h | 201
+ drivers/staging/ice4100/sgx535/system/sgx_intel_ce/graphics_pm.c | 262
+ drivers/staging/ice4100/sgx535/system/sgx_intel_ce/graphics_pm.h | 51
+ drivers/staging/ice4100/sgx535/system/sgx_intel_ce/oemfuncs.h | 79
+ drivers/staging/ice4100/sgx535/system/sgx_intel_ce/sysconfig.c | 884 +
+ drivers/staging/ice4100/sgx535/system/sgx_intel_ce/sysconfig.h | 88
+ drivers/staging/ice4100/sgx535/system/sgx_intel_ce/sysinfo.h | 65
+ drivers/staging/ice4100/sgx535/system/sgx_intel_ce/syslocal.h | 85
+ drivers/staging/ice4100/sgx535/system/sgx_intel_ce/sysutils.c | 118
+ drivers/staging/ifx-mux/Kconfig | 4
+ drivers/staging/ifx-mux/Makefile | 3
+ drivers/staging/ifx-mux/crc8.c | 63
+ drivers/staging/ifx-mux/crc8.h | 7
+ drivers/staging/ifx-mux/gsm0710.c | 723 +
+ drivers/staging/ifx-mux/gsm0710.h | 168
+ drivers/staging/ifx-mux/ifx_spi_mux.c | 2385 +++++
+ drivers/staging/ifx-mux/ifx_spi_mux.h | 219
+ drivers/staging/ifx-mux/ifx_spi_mux_ioctl.h | 57
+ drivers/staging/memrar/TODO | 2
+ drivers/staging/memrar/memrar-abi | 4
+ drivers/staging/memrar/memrar_handler.c | 28
+ drivers/staging/mfld-sensors/Kconfig | 24
+ drivers/staging/mfld-sensors/Makefile | 6
+ drivers/staging/mfld-sensors/ak8974.c | 250
+ drivers/staging/mfld-sensors/apds9802ps.c | 248
+ drivers/staging/mfld_ledflash/Kconfig | 10
+ drivers/staging/mfld_ledflash/Makefile | 1
+ drivers/staging/mfld_ledflash/mfld_ledflash.c | 138
+ drivers/staging/mrst-touchscreen/Makefile | 2
+ drivers/staging/mrst-touchscreen/intel-mid-touch.c | 939 -
+ drivers/staging/mrst/Kconfig | 93
+ drivers/staging/mrst/Makefile | 4
+ drivers/staging/mrst/drm_global.c | 113
+ drivers/staging/mrst/drm_global.h | 25
+ drivers/staging/mrst/drv/lnc_topaz.c | 751 +
+ drivers/staging/mrst/drv/lnc_topaz.h | 154
+ drivers/staging/mrst/drv/lnc_topaz_hw_reg.h | 787 +
+ drivers/staging/mrst/drv/lnc_topazinit.c | 2062 ++++
+ drivers/staging/mrst/drv/mdfld_dsi_dbi.c | 1895 ++++
+ drivers/staging/mrst/drv/mdfld_dsi_dbi.h | 185
+ drivers/staging/mrst/drv/mdfld_dsi_dbi_dpu.c | 703 +
+ drivers/staging/mrst/drv/mdfld_dsi_dbi_dpu.h | 130
+ drivers/staging/mrst/drv/mdfld_hdcp.h | 232
+ drivers/staging/mrst/drv/mdfld_hdcp_if.h | 77
+ drivers/staging/mrst/drv/mdfld_hdcp_reg.h | 231
+ drivers/staging/mrst/drv/mdfld_hdmi_audio.c | 199
+ drivers/staging/mrst/drv/mdfld_hdmi_audio_if.h | 68
+ drivers/staging/mrst/drv/mdfld_intel_hdcp.c | 1350 ++
+ drivers/staging/mrst/drv/msvdx_power.c | 164
+ drivers/staging/mrst/drv/msvdx_power.h | 48
+ drivers/staging/mrst/drv/pnw_topaz.c | 889 +
+ drivers/staging/mrst/drv/pnw_topaz.h | 150
+ drivers/staging/mrst/drv/pnw_topaz_hw_reg.h | 1133 ++
+ drivers/staging/mrst/drv/pnw_topazinit.c | 2346 ++++
+ drivers/staging/mrst/drv/psb_bl.c | 270
+ drivers/staging/mrst/drv/psb_bl2.c | 165
+ drivers/staging/mrst/drv/psb_buffer.c | 379
+ drivers/staging/mrst/drv/psb_dpst.c | 254
+ drivers/staging/mrst/drv/psb_dpst.h | 98
+ drivers/staging/mrst/drv/psb_drm.h | 661 +
+ drivers/staging/mrst/drv/psb_drv.c | 2557 +++++
+ drivers/staging/mrst/drv/psb_drv.h | 1322 ++
+ drivers/staging/mrst/drv/psb_fb.c | 1822 +++
+ drivers/staging/mrst/drv/psb_fb.h | 66
+ drivers/staging/mrst/drv/psb_fence.c | 191
+ drivers/staging/mrst/drv/psb_gtt.c | 1040 ++
+ drivers/staging/mrst/drv/psb_gtt.h | 111
+ drivers/staging/mrst/drv/psb_hotplug.c | 425
+ drivers/staging/mrst/drv/psb_hotplug.h | 90
+ drivers/staging/mrst/drv/psb_intel_bios.c | 305
+ drivers/staging/mrst/drv/psb_intel_bios.h | 430
+ drivers/staging/mrst/drv/psb_intel_display.c | 2564 +++++
+ drivers/staging/mrst/drv/psb_intel_display.h | 25
+ drivers/staging/mrst/drv/psb_intel_display2.c | 1304 ++
+ drivers/staging/mrst/drv/psb_intel_drv.h | 365
+ drivers/staging/mrst/drv/psb_intel_dsi.c | 2361 ++++
+ drivers/staging/mrst/drv/psb_intel_dsi2.c | 3583 +++++++
+ drivers/staging/mrst/drv/psb_intel_dsi_aava.c | 930 +
+ drivers/staging/mrst/drv/psb_intel_hdmi.c | 989 ++
+ drivers/staging/mrst/drv/psb_intel_hdmi.h | 883 +
+ drivers/staging/mrst/drv/psb_intel_hdmi_edid.h | 1057 ++
+ drivers/staging/mrst/drv/psb_intel_hdmi_i2c.c | 213
+ drivers/staging/mrst/drv/psb_intel_hdmi_i2c.h | 21
+ drivers/staging/mrst/drv/psb_intel_hdmi_reg.h | 130
+ drivers/staging/mrst/drv/psb_intel_i2c.c | 172
+ drivers/staging/mrst/drv/psb_intel_lvds.c | 1390 ++
+ drivers/staging/mrst/drv/psb_intel_modes.c | 77
+ drivers/staging/mrst/drv/psb_intel_reg.h | 1232 ++
+ drivers/staging/mrst/drv/psb_intel_sdvo.c | 1408 ++
+ drivers/staging/mrst/drv/psb_intel_sdvo_regs.h | 338
+ drivers/staging/mrst/drv/psb_irq.c | 675 +
+ drivers/staging/mrst/drv/psb_irq.h | 49
+ drivers/staging/mrst/drv/psb_mmu.c | 1010 ++
+ drivers/staging/mrst/drv/psb_msvdx.c | 1099 ++
+ drivers/staging/mrst/drv/psb_msvdx.h | 785 +
+ drivers/staging/mrst/drv/psb_msvdxinit.c | 1063 ++
+ drivers/staging/mrst/drv/psb_powermgmt.c | 890 +
+ drivers/staging/mrst/drv/psb_powermgmt.h | 85
+ drivers/staging/mrst/drv/psb_pvr_glue.c | 74
+ drivers/staging/mrst/drv/psb_pvr_glue.h | 26
+ drivers/staging/mrst/drv/psb_reg.h | 570 +
+ drivers/staging/mrst/drv/psb_reset.c | 330
+ drivers/staging/mrst/drv/psb_schedule.c | 70
+ drivers/staging/mrst/drv/psb_schedule.h | 81
+ drivers/staging/mrst/drv/psb_setup.c | 36
+ drivers/staging/mrst/drv/psb_sgx.c | 936 +
+ drivers/staging/mrst/drv/psb_sgx.h | 32
+ drivers/staging/mrst/drv/psb_socket.c | 379
+ drivers/staging/mrst/drv/psb_ttm_glue.c | 353
+ drivers/staging/mrst/drv/psb_umevents.c | 485 +
+ drivers/staging/mrst/drv/psb_umevents.h | 159
+ drivers/staging/mrst/drv/topaz_power.c | 229
+ drivers/staging/mrst/drv/topaz_power.h | 53
+ drivers/staging/mrst/drv/ttm/ttm_agp_backend.c | 144
+ drivers/staging/mrst/drv/ttm/ttm_bo.c | 1730 +++
+ drivers/staging/mrst/drv/ttm/ttm_bo_api.h | 573 +
+ drivers/staging/mrst/drv/ttm/ttm_bo_driver.h | 864 +
+ drivers/staging/mrst/drv/ttm/ttm_bo_util.c | 546 +
+ drivers/staging/mrst/drv/ttm/ttm_bo_vm.c | 429
+ drivers/staging/mrst/drv/ttm/ttm_execbuf_util.c | 108
+ drivers/staging/mrst/drv/ttm/ttm_execbuf_util.h | 103
+ drivers/staging/mrst/drv/ttm/ttm_fence.c | 607 +
+ drivers/staging/mrst/drv/ttm/ttm_fence_api.h | 272
+ drivers/staging/mrst/drv/ttm/ttm_fence_driver.h | 302
+ drivers/staging/mrst/drv/ttm/ttm_fence_user.c | 238
+ drivers/staging/mrst/drv/ttm/ttm_fence_user.h | 140
+ drivers/staging/mrst/drv/ttm/ttm_lock.c | 155
+ drivers/staging/mrst/drv/ttm/ttm_lock.h | 176
+ drivers/staging/mrst/drv/ttm/ttm_memory.c | 228
+ drivers/staging/mrst/drv/ttm/ttm_memory.h | 147
+ drivers/staging/mrst/drv/ttm/ttm_object.c | 440
+ drivers/staging/mrst/drv/ttm/ttm_object.h | 262
+ drivers/staging/mrst/drv/ttm/ttm_pat_compat.c | 164
+ drivers/staging/mrst/drv/ttm/ttm_pat_compat.h | 34
+ drivers/staging/mrst/drv/ttm/ttm_placement_common.h | 91
+ drivers/staging/mrst/drv/ttm/ttm_placement_user.c | 468
+ drivers/staging/mrst/drv/ttm/ttm_placement_user.h | 252
+ drivers/staging/mrst/drv/ttm/ttm_regman.h | 67
+ drivers/staging/mrst/drv/ttm/ttm_tt.c | 656 +
+ drivers/staging/mrst/drv/ttm/ttm_userobj_api.h | 72
+ drivers/staging/mrst/medfield/Makefile | 176
+ drivers/staging/mrst/moorestown/Makefile | 178
+ drivers/staging/mrst/pvr/COPYING | 351
+ drivers/staging/mrst/pvr/INSTALL | 76
+ drivers/staging/mrst/pvr/README | 48
+ drivers/staging/mrst/pvr/eurasiacon/.gitignore | 6
+ drivers/staging/mrst/pvr/include4/dbgdrvif.h | 298
+ drivers/staging/mrst/pvr/include4/img_defs.h | 108
+ drivers/staging/mrst/pvr/include4/img_types.h | 128
+ drivers/staging/mrst/pvr/include4/ioctldef.h | 98
+ drivers/staging/mrst/pvr/include4/pdumpdefs.h | 99
+ drivers/staging/mrst/pvr/include4/pvr_debug.h | 127
+ drivers/staging/mrst/pvr/include4/pvrmodule.h | 31
+ drivers/staging/mrst/pvr/include4/pvrversion.h | 38
+ drivers/staging/mrst/pvr/include4/regpaths.h | 43
+ drivers/staging/mrst/pvr/include4/services.h | 872 +
+ drivers/staging/mrst/pvr/include4/servicesext.h | 648 +
+ drivers/staging/mrst/pvr/include4/sgx_options.h | 224
+ drivers/staging/mrst/pvr/include4/sgxapi_km.h | 323
+ drivers/staging/mrst/pvr/include4/sgxscript.h | 81
+ drivers/staging/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/.gitignore | 6
+ drivers/staging/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/makefile.linux.common | 41
+ drivers/staging/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/mrstlfb.h | 295
+ drivers/staging/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/mrstlfb_displayclass.c | 2092 ++++
+ drivers/staging/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/mrstlfb_linux.c | 206
+ drivers/staging/mrst/pvr/services4/include/env/linux-intel/pvr_drm_shared.h | 54
+ drivers/staging/mrst/pvr/services4/include/env/linux/pvr_drm_shared.h | 54
+ drivers/staging/mrst/pvr/services4/include/kernelbuffer.h | 60
+ drivers/staging/mrst/pvr/services4/include/kerneldisplay.h | 156
+ drivers/staging/mrst/pvr/services4/include/pvr_bridge.h | 1383 ++
+ drivers/staging/mrst/pvr/services4/include/pvr_bridge_km.h | 288
+ drivers/staging/mrst/pvr/services4/include/pvrmmap.h | 36
+ drivers/staging/mrst/pvr/services4/include/servicesint.h | 276
+ drivers/staging/mrst/pvr/services4/include/sgx_bridge.h | 477 +
+ drivers/staging/mrst/pvr/services4/include/sgx_mkif_km.h | 334
+ drivers/staging/mrst/pvr/services4/include/sgxinfo.h | 288
+ drivers/staging/mrst/pvr/services4/srvkm/bridged/.gitignore | 5
+ drivers/staging/mrst/pvr/services4/srvkm/bridged/bridged_pvr_bridge.c | 3409 +++++++
+ drivers/staging/mrst/pvr/services4/srvkm/bridged/bridged_pvr_bridge.h | 231
+ drivers/staging/mrst/pvr/services4/srvkm/bridged/bridged_support.c | 85
+ drivers/staging/mrst/pvr/services4/srvkm/bridged/bridged_support.h | 43
+ drivers/staging/mrst/pvr/services4/srvkm/bridged/sgx/bridged_sgx_bridge.c | 2511 +++++
+ drivers/staging/mrst/pvr/services4/srvkm/bridged/sgx/bridged_sgx_bridge.h | 42
+ drivers/staging/mrst/pvr/services4/srvkm/common/.gitignore | 5
+ drivers/staging/mrst/pvr/services4/srvkm/common/buffer_manager.c | 2072 ++++
+ drivers/staging/mrst/pvr/services4/srvkm/common/deviceclass.c | 1977 ++++
+ drivers/staging/mrst/pvr/services4/srvkm/common/devicemem.c | 1459 +++
+ drivers/staging/mrst/pvr/services4/srvkm/common/handle.c | 1547 +++
+ drivers/staging/mrst/pvr/services4/srvkm/common/hash.c | 463
+ drivers/staging/mrst/pvr/services4/srvkm/common/lists.c | 99
+ drivers/staging/mrst/pvr/services4/srvkm/common/mem.c | 151
+ drivers/staging/mrst/pvr/services4/srvkm/common/mem_debug.c | 250
+ drivers/staging/mrst/pvr/services4/srvkm/common/metrics.c | 160
+ drivers/staging/mrst/pvr/services4/srvkm/common/pdump_common.c | 1723 +++
+ drivers/staging/mrst/pvr/services4/srvkm/common/perproc.c | 283
+ drivers/staging/mrst/pvr/services4/srvkm/common/power.c | 820 +
+ drivers/staging/mrst/pvr/services4/srvkm/common/pvrsrv.c | 1195 ++
+ drivers/staging/mrst/pvr/services4/srvkm/common/queue.c | 1166 ++
+ drivers/staging/mrst/pvr/services4/srvkm/common/ra.c | 1871 +++
+ drivers/staging/mrst/pvr/services4/srvkm/common/resman.c | 717 +
+ drivers/staging/mrst/pvr/services4/srvkm/devices/sgx/.gitignore | 5
+ drivers/staging/mrst/pvr/services4/srvkm/devices/sgx/mmu.c | 2776 +++++
+ drivers/staging/mrst/pvr/services4/srvkm/devices/sgx/mmu.h | 139
+ drivers/staging/mrst/pvr/services4/srvkm/devices/sgx/pb.c | 458
+ drivers/staging/mrst/pvr/services4/srvkm/devices/sgx/sgx_bridge_km.h | 147
+ drivers/staging/mrst/pvr/services4/srvkm/devices/sgx/sgxconfig.h | 134
+ drivers/staging/mrst/pvr/services4/srvkm/devices/sgx/sgxinfokm.h | 352
+ drivers/staging/mrst/pvr/services4/srvkm/devices/sgx/sgxinit.c | 2228 ++++
+ drivers/staging/mrst/pvr/services4/srvkm/devices/sgx/sgxkick.c | 744 +
+ drivers/staging/mrst/pvr/services4/srvkm/devices/sgx/sgxpower.c | 453
+ drivers/staging/mrst/pvr/services4/srvkm/devices/sgx/sgxreset.c | 489 +
+ drivers/staging/mrst/pvr/services4/srvkm/devices/sgx/sgxtransfer.c | 543 +
+ drivers/staging/mrst/pvr/services4/srvkm/devices/sgx/sgxutils.c | 934 +
+ drivers/staging/mrst/pvr/services4/srvkm/devices/sgx/sgxutils.h | 99
+ drivers/staging/mrst/pvr/services4/srvkm/env/linux-intel/env_data.h | 66
+ drivers/staging/mrst/pvr/services4/srvkm/env/linux-intel/env_perproc.h | 56
+ drivers/staging/mrst/pvr/services4/srvkm/env/linux-intel/event.c | 270
+ drivers/staging/mrst/pvr/services4/srvkm/env/linux-intel/event.h | 32
+ drivers/staging/mrst/pvr/services4/srvkm/env/linux-intel/linkage.h | 61
+ drivers/staging/mrst/pvr/services4/srvkm/env/linux-intel/lock.h | 34
+ drivers/staging/mrst/pvr/services4/srvkm/env/linux-intel/mm.c | 570 +
+ drivers/staging/mrst/pvr/services4/srvkm/env/linux-intel/mm.h | 198
+ drivers/staging/mrst/pvr/services4/srvkm/env/linux-intel/mmap.c | 844 +
+ drivers/staging/mrst/pvr/services4/srvkm/env/linux-intel/mmap.h | 102
+ drivers/staging/mrst/pvr/services4/srvkm/env/linux-intel/module.c | 747 +
+ drivers/staging/mrst/pvr/services4/srvkm/env/linux-intel/mutils.c | 133
+ drivers/staging/mrst/pvr/services4/srvkm/env/linux-intel/mutils.h | 89
+ drivers/staging/mrst/pvr/services4/srvkm/env/linux-intel/osfunc.c | 2461 +++++
+ drivers/staging/mrst/pvr/services4/srvkm/env/linux-intel/osperproc.c | 113
+ drivers/staging/mrst/pvr/services4/srvkm/env/linux-intel/pdump.c | 662 +
+ drivers/staging/mrst/pvr/services4/srvkm/env/linux-intel/private_data.h | 67
+ drivers/staging/mrst/pvr/services4/srvkm/env/linux-intel/proc.c | 958 ++
+ drivers/staging/mrst/pvr/services4/srvkm/env/linux-intel/proc.h | 115
+ drivers/staging/mrst/pvr/services4/srvkm/env/linux-intel/pvr_bridge_k.c | 652 +
+ drivers/staging/mrst/pvr/services4/srvkm/env/linux-intel/pvr_debug.c | 428
+ drivers/staging/mrst/pvr/services4/srvkm/env/linux-intel/pvr_drm.c | 306
+ drivers/staging/mrst/pvr/services4/srvkm/env/linux-intel/pvr_drm.h | 80
+ drivers/staging/mrst/pvr/services4/srvkm/env/linux/.gitignore | 5
+ drivers/staging/mrst/pvr/services4/srvkm/env/linux/env_data.h | 66
+ drivers/staging/mrst/pvr/services4/srvkm/env/linux/env_perproc.h | 56
+ drivers/staging/mrst/pvr/services4/srvkm/env/linux/event.c | 273
+ drivers/staging/mrst/pvr/services4/srvkm/env/linux/event.h | 32
+ drivers/staging/mrst/pvr/services4/srvkm/env/linux/linkage.h | 61
+ drivers/staging/mrst/pvr/services4/srvkm/env/linux/lock.h | 32
+ drivers/staging/mrst/pvr/services4/srvkm/env/linux/mm.c | 2360 ++++
+ drivers/staging/mrst/pvr/services4/srvkm/env/linux/mm.h | 331
+ drivers/staging/mrst/pvr/services4/srvkm/env/linux/mmap.c | 1148 ++
+ drivers/staging/mrst/pvr/services4/srvkm/env/linux/mmap.h | 107
+ drivers/staging/mrst/pvr/services4/srvkm/env/linux/module.c | 767 +
+ drivers/staging/mrst/pvr/services4/srvkm/env/linux/mutex.c | 131
+ drivers/staging/mrst/pvr/services4/srvkm/env/linux/mutex.h | 77
+ drivers/staging/mrst/pvr/services4/srvkm/env/linux/mutils.c | 133
+ drivers/staging/mrst/pvr/services4/srvkm/env/linux/mutils.h | 101
+ drivers/staging/mrst/pvr/services4/srvkm/env/linux/osfunc.c | 2564 +++++
+ drivers/staging/mrst/pvr/services4/srvkm/env/linux/osperproc.c | 113
+ drivers/staging/mrst/pvr/services4/srvkm/env/linux/pdump.c | 662 +
+ drivers/staging/mrst/pvr/services4/srvkm/env/linux/private_data.h | 67
+ drivers/staging/mrst/pvr/services4/srvkm/env/linux/proc.c | 970 ++
+ drivers/staging/mrst/pvr/services4/srvkm/env/linux/proc.h | 115
+ drivers/staging/mrst/pvr/services4/srvkm/env/linux/pvr_bridge_k.c | 651 +
+ drivers/staging/mrst/pvr/services4/srvkm/env/linux/pvr_debug.c | 426
+ drivers/staging/mrst/pvr/services4/srvkm/env/linux/pvr_drm.c | 310
+ drivers/staging/mrst/pvr/services4/srvkm/env/linux/pvr_drm.h | 80
+ drivers/staging/mrst/pvr/services4/srvkm/hwdefs/sgx535defs.h | 637 +
+ drivers/staging/mrst/pvr/services4/srvkm/hwdefs/sgx540defs.h | 620 +
+ drivers/staging/mrst/pvr/services4/srvkm/hwdefs/sgxdefs.h | 82
+ drivers/staging/mrst/pvr/services4/srvkm/hwdefs/sgxerrata.h | 309
+ drivers/staging/mrst/pvr/services4/srvkm/hwdefs/sgxfeaturedefs.h | 163
+ drivers/staging/mrst/pvr/services4/srvkm/hwdefs/sgxmmu.h | 79
+ drivers/staging/mrst/pvr/services4/srvkm/include/buffer_manager.h | 218
+ drivers/staging/mrst/pvr/services4/srvkm/include/device.h | 278
+ drivers/staging/mrst/pvr/services4/srvkm/include/handle.h | 382
+ drivers/staging/mrst/pvr/services4/srvkm/include/hash.h | 73
+ drivers/staging/mrst/pvr/services4/srvkm/include/lists.h | 176
+ drivers/staging/mrst/pvr/services4/srvkm/include/metrics.h | 130
+ drivers/staging/mrst/pvr/services4/srvkm/include/osfunc.h | 487 +
+ drivers/staging/mrst/pvr/services4/srvkm/include/osperproc.h | 76
+ drivers/staging/mrst/pvr/services4/srvkm/include/pdump_km.h | 451
+ drivers/staging/mrst/pvr/services4/srvkm/include/pdump_osfunc.h | 137
+ drivers/staging/mrst/pvr/services4/srvkm/include/perproc.h | 110
+ drivers/staging/mrst/pvr/services4/srvkm/include/power.h | 133
+ drivers/staging/mrst/pvr/services4/srvkm/include/queue.h | 119
+ drivers/staging/mrst/pvr/services4/srvkm/include/ra.h | 155
+ drivers/staging/mrst/pvr/services4/srvkm/include/resman.h | 113
+ drivers/staging/mrst/pvr/services4/srvkm/include/services_headers.h | 49
+ drivers/staging/mrst/pvr/services4/srvkm/include/srvkm.h | 69
+ drivers/staging/mrst/pvr/services4/system/include/syscommon.h | 217
+ drivers/staging/mrst/pvr/services4/system/medfield/.gitignore | 5
+ drivers/staging/mrst/pvr/services4/system/medfield/oemfuncs.h | 64
+ drivers/staging/mrst/pvr/services4/system/medfield/ospm_power.c | 517 +
+ drivers/staging/mrst/pvr/services4/system/medfield/ospm_power.h | 79
+ drivers/staging/mrst/pvr/services4/system/medfield/psb_powermgmt.h | 85
+ drivers/staging/mrst/pvr/services4/system/medfield/sys_pvr_drm_export.c | 135
+ drivers/staging/mrst/pvr/services4/system/medfield/sys_pvr_drm_export.h | 87
+ drivers/staging/mrst/pvr/services4/system/medfield/sys_pvr_drm_import.h | 44
+ drivers/staging/mrst/pvr/services4/system/medfield/sysconfig.c | 1274 ++
+ drivers/staging/mrst/pvr/services4/system/medfield/sysconfig.h | 147
+ drivers/staging/mrst/pvr/services4/system/medfield/sysinfo.h | 43
+ drivers/staging/mrst/pvr/services4/system/medfield/sysirq.h | 49
+ drivers/staging/mrst/pvr/services4/system/medfield/syslocal.h | 82
+ drivers/staging/mrst/pvr/services4/system/medfield/sysutils.c | 30
+ drivers/staging/mrst/pvr/services4/system/moorestown/.gitignore | 5
+ drivers/staging/mrst/pvr/services4/system/moorestown/oemfuncs.h | 72
+ drivers/staging/mrst/pvr/services4/system/moorestown/sys_pvr_drm_export.c | 135
+ drivers/staging/mrst/pvr/services4/system/moorestown/sys_pvr_drm_export.h | 87
+ drivers/staging/mrst/pvr/services4/system/moorestown/sys_pvr_drm_import.h | 45
+ drivers/staging/mrst/pvr/services4/system/moorestown/sysconfig.c | 1203 ++
+ drivers/staging/mrst/pvr/services4/system/moorestown/sysconfig.h | 139
+ drivers/staging/mrst/pvr/services4/system/moorestown/sysinfo.h | 43
+ drivers/staging/mrst/pvr/services4/system/moorestown/syslocal.h | 83
+ drivers/staging/mrst/pvr/services4/system/moorestown/sysutils.c | 30
+ drivers/staging/mrst/pvr/services4/system/unified/oemfuncs.h | 74
+ drivers/staging/mrst/pvr/services4/system/unified/ospm_power.c | 517 +
+ drivers/staging/mrst/pvr/services4/system/unified/ospm_power.h | 79
+ drivers/staging/mrst/pvr/services4/system/unified/psb_powermgmt.h | 85
+ drivers/staging/mrst/pvr/services4/system/unified/sys_pvr_drm_export.c | 135
+ drivers/staging/mrst/pvr/services4/system/unified/sys_pvr_drm_export.h | 87
+ drivers/staging/mrst/pvr/services4/system/unified/sys_pvr_drm_import.h | 44
+ drivers/staging/mrst/pvr/services4/system/unified/sysconfig-medfield.c | 1279 ++
+ drivers/staging/mrst/pvr/services4/system/unified/sysconfig-moorestown.c | 1219 ++
+ drivers/staging/mrst/pvr/services4/system/unified/sysconfig.h | 151
+ drivers/staging/mrst/pvr/services4/system/unified/sysinfo.h | 43
+ drivers/staging/mrst/pvr/services4/system/unified/sysirq.h | 49
+ drivers/staging/mrst/pvr/services4/system/unified/syslocal.h | 82
+ drivers/staging/mrst/pvr/services4/system/unified/sysutils.c | 30
+ drivers/staging/mrst/pvr/tools/intern/debug/client/linuxsrv.h | 48
+ drivers/staging/mrst/pvr/tools/intern/debug/dbgdriv/common/dbgdriv.c | 2075 ++++
+ drivers/staging/mrst/pvr/tools/intern/debug/dbgdriv/common/dbgdriv.h | 116
+ drivers/staging/mrst/pvr/tools/intern/debug/dbgdriv/common/hostfunc.h | 58
+ drivers/staging/mrst/pvr/tools/intern/debug/dbgdriv/common/hotkey.c | 135
+ drivers/staging/mrst/pvr/tools/intern/debug/dbgdriv/common/hotkey.h | 60
+ drivers/staging/mrst/pvr/tools/intern/debug/dbgdriv/common/ioctl.c | 371
+ drivers/staging/mrst/pvr/tools/intern/debug/dbgdriv/common/ioctl.h | 87
+ drivers/staging/mrst/pvr/tools/intern/debug/dbgdriv/linux/hostfunc.c | 302
+ drivers/staging/mrst/pvr/tools/intern/debug/dbgdriv/linux/kbuild/Makefile | 35
+ drivers/staging/mrst/pvr/tools/intern/debug/dbgdriv/linux/main.c | 298
+ drivers/staging/mrst/pvr/tools/intern/debug/dbgdriv/linux/makefile.linux.common | 40
+ drivers/staging/mrstci/Kconfig | 28
+ drivers/staging/mrstci/Makefile | 9
+ drivers/staging/mrstci/include/ci_isp_common.h | 1416 ++
+ drivers/staging/mrstci/include/ci_isp_fmts_common.h | 128
+ drivers/staging/mrstci/include/ci_sensor_common.h | 1233 ++
+ drivers/staging/mrstci/include/ci_va.h | 42
+ drivers/staging/mrstci/include/v4l2_jpg_review.h | 48
+ drivers/staging/mrstci/mrstflash/Kconfig | 9
+ drivers/staging/mrstci/mrstflash/Makefile | 3
+ drivers/staging/mrstci/mrstflash/mrstflash.c | 168
+ drivers/staging/mrstci/mrstisp/Kconfig | 10
+ drivers/staging/mrstci/mrstisp/Makefile | 7
+ drivers/staging/mrstci/mrstisp/__mrstisp_private_ioctl.c | 324
+ drivers/staging/mrstci/mrstisp/include/def.h | 122
+ drivers/staging/mrstci/mrstisp/include/mrstisp.h | 279
+ drivers/staging/mrstci/mrstisp/include/mrstisp_dp.h | 317
+ drivers/staging/mrstci/mrstisp/include/mrstisp_hw.h | 245
+ drivers/staging/mrstci/mrstisp/include/mrstisp_isp.h | 42
+ drivers/staging/mrstci/mrstisp/include/mrstisp_jpe.h | 426
+ drivers/staging/mrstci/mrstisp/include/mrstisp_reg.h | 4700 +++++++++
+ drivers/staging/mrstci/mrstisp/include/mrstisp_stdinc.h | 118
+ drivers/staging/mrstci/mrstisp/include/project_settings_mrv.h | 622 +
+ drivers/staging/mrstci/mrstisp/include/reg_access.h | 233
+ drivers/staging/mrstci/mrstisp/mrstisp_dp.c | 1303 ++
+ drivers/staging/mrstci/mrstisp/mrstisp_hw.c | 1640 +++
+ drivers/staging/mrstci/mrstisp/mrstisp_isp.c | 1993 ++++
+ drivers/staging/mrstci/mrstisp/mrstisp_jpe.c | 577 +
+ drivers/staging/mrstci/mrstisp/mrstisp_main.c | 3138 ++++++
+ drivers/staging/mrstci/mrstisp/mrstisp_mif.c | 763 +
+ drivers/staging/mrstci/mrstmt9d113/Kconfig | 9
+ drivers/staging/mrstci/mrstmt9d113/Makefile | 3
+ drivers/staging/mrstci/mrstmt9d113/mrstmt9d113.c | 1188 ++
+ drivers/staging/mrstci/mrstmt9d113/mrstmt9d113.h | 107
+ drivers/staging/mrstci/mrstov2650/Kconfig | 9
+ drivers/staging/mrstci/mrstov2650/Makefile | 3
+ drivers/staging/mrstci/mrstov2650/mrstov2650.c | 1190 ++
+ drivers/staging/mrstci/mrstov2650/ov2650.h | 766 +
+ drivers/staging/mrstci/mrstov5630/Kconfig | 9
+ drivers/staging/mrstci/mrstov5630/Makefile | 4
+ drivers/staging/mrstci/mrstov5630/ov5630.c | 1153 ++
+ drivers/staging/mrstci/mrstov5630/ov5630.h | 672 +
+ drivers/staging/mrstci/mrstov5630_motor/Kconfig | 9
+ drivers/staging/mrstci/mrstov5630_motor/Makefile | 3
+ drivers/staging/mrstci/mrstov5630_motor/mrstov5630_motor.c | 414
+ drivers/staging/mrstci/mrstov5630_motor/ov5630_motor.h | 86
+ drivers/staging/mrstci/mrstov9665/Kconfig | 9
+ drivers/staging/mrstci/mrstov9665/Makefile | 3
+ drivers/staging/mrstci/mrstov9665/mrstov9665.c | 972 ++
+ drivers/staging/mrstci/mrstov9665/ov9665.h | 263
+ drivers/staging/mrstci/mrsts5k4e1/Kconfig | 9
+ drivers/staging/mrstci/mrsts5k4e1/Makefile | 3
+ drivers/staging/mrstci/mrsts5k4e1/mrsts5k4e1.c | 1034 ++
+ drivers/staging/mrstci/mrsts5k4e1/mrsts5k4e1.h | 676 +
+ drivers/staging/mrstci/mrsts5k4e1_motor/Kconfig | 9
+ drivers/staging/mrstci/mrsts5k4e1_motor/Makefile | 3
+ drivers/staging/mrstci/mrsts5k4e1_motor/mrsts5k4e1_motor.c | 430
+ drivers/staging/mrstci/mrsts5k4e1_motor/mrsts5k4e1_motor.h | 102
+ drivers/staging/rar_register/rar_register.c | 2
+ drivers/staging/spectra/Kconfig | 40
+ drivers/staging/spectra/Makefile | 11
+ drivers/staging/spectra/README | 29
+ drivers/staging/spectra/ffsdefs.h | 58
+ drivers/staging/spectra/ffsport.c | 830 +
+ drivers/staging/spectra/ffsport.h | 85
+ drivers/staging/spectra/flash.c | 4729 ++++++++++
+ drivers/staging/spectra/flash.h | 198
+ drivers/staging/spectra/lld.c | 339
+ drivers/staging/spectra/lld.h | 111
+ drivers/staging/spectra/lld_cdma.c | 910 +
+ drivers/staging/spectra/lld_cdma.h | 123
+ drivers/staging/spectra/lld_emu.c | 780 +
+ drivers/staging/spectra/lld_emu.h | 51
+ drivers/staging/spectra/lld_mtd.c | 687 +
+ drivers/staging/spectra/lld_mtd.h | 51
+ drivers/staging/spectra/lld_nand.c | 2616 +++++
+ drivers/staging/spectra/lld_nand.h | 131
+ drivers/staging/spectra/nand_regs.h | 619 +
+ drivers/staging/spectra/spectraswconfig.h | 82
+ drivers/usb/core/buffer.c | 10
+ drivers/usb/core/hub.c | 25
+ drivers/usb/core/usb.h | 1
+ drivers/usb/gadget/Kconfig | 4
+ drivers/usb/gadget/f_acm.c | 32
+ drivers/usb/gadget/langwell_udc.c | 1293 +-
+ drivers/usb/gadget/langwell_udc.h | 18
+ drivers/usb/gadget/serial.c | 31
+ drivers/usb/host/ehci-dbg.c | 144
+ drivers/usb/host/ehci-hcd.c | 57
+ drivers/usb/host/ehci-hub.c | 30
+ drivers/usb/host/ehci-langwell-pci.c | 269
+ drivers/usb/host/ehci-lpm.c | 83
+ drivers/usb/host/ehci-pci.c | 86
+ drivers/usb/host/ehci.h | 17
+ drivers/usb/otg/Kconfig | 28
+ drivers/usb/otg/Makefile | 2
+ drivers/usb/otg/langwell_otg.c | 2382 +++++
+ drivers/usb/otg/penwell_otg.c | 2321 ++++
+ drivers/watchdog/Kconfig | 10
+ drivers/watchdog/Makefile | 1
+ drivers/watchdog/intel_scu_watchdog.c | 633 +
+ drivers/watchdog/intel_scu_watchdog.h | 66
+ include/drm/drm_mode.h | 2
+ include/linux/bh1770glc.h | 39
+ include/linux/cy8ctmg110_pdata.h | 10
+ include/linux/i2c/cp_tm1217.h | 8
+ include/linux/i2c/tc35894xbg.h | 72
+ include/linux/intel_mid_dma.h | 86
+ include/linux/koski_hwid.h | 25
+ include/linux/leds-lp5523.h | 22
+ include/linux/mmc/host.h | 6
+ include/linux/pci.h | 2
+ include/linux/pci_ids.h | 7
+ include/linux/pti.h | 38
+ include/linux/serial_core.h | 7
+ include/linux/serial_mfd.h | 47
+ include/linux/serial_reg.h | 16
+ include/linux/sfi_processor.h | 74
+ include/linux/spi/dw_spi.h | 15
+ include/linux/spi/emp_modem.h | 13
+ include/linux/spi/ifx_gps.h | 9
+ include/linux/spi/ifx_modem.h | 13
+ include/linux/spi/intel_mid_ssp_spi.h | 51
+ include/linux/spi/intel_pmic_gpio.h | 15
+ include/linux/spi/opt_modem.h | 11
+ include/linux/spi/pw_spi3.h | 135
+ include/linux/spi/spi.h | 3
+ include/linux/tty.h | 3
+ include/linux/usb.h | 1
+ include/linux/usb/ehci_def.h | 23
+ include/linux/usb/hcd.h | 13
+ include/linux/usb/intel_mid_otg.h | 180
+ include/linux/usb/langwell_otg.h | 139
+ include/linux/usb/langwell_udc.h | 8
+ include/linux/usb/penwell_otg.h | 277
+ include/net/caif/caif_spi.h | 153
+ include/sound/intel_sst.h | 131
+ include/sound/intel_sst_ioctl.h | 435
+ include/sound/jack.h | 2
+ sound/pci/Kconfig | 19
+ sound/pci/Makefile | 3
+ sound/pci/sst/Makefile | 8
+ sound/pci/sst/intel_sst.c | 512 +
+ sound/pci/sst/intel_sst_app_interface.c | 1232 ++
+ sound/pci/sst/intel_sst_common.h | 618 +
+ sound/pci/sst/intel_sst_drv_interface.c | 492 +
+ sound/pci/sst/intel_sst_dsp.c | 486 +
+ sound/pci/sst/intel_sst_fw_ipc.h | 393
+ sound/pci/sst/intel_sst_ipc.c | 656 +
+ sound/pci/sst/intel_sst_pvt.c | 311
+ sound/pci/sst/intel_sst_stream.c | 575 +
+ sound/pci/sst/intel_sst_stream_encoded.c | 1273 ++
+ sound/pci/sst/intelmid.c | 1233 ++
+ sound/pci/sst/intelmid.h | 181
+ sound/pci/sst/intelmid_ctrl.c | 629 +
+ sound/pci/sst/intelmid_msic_control.c | 410
+ sound/pci/sst/intelmid_pvt.c | 174
+ sound/pci/sst/intelmid_snd_control.h | 114
+ sound/pci/sst/intelmid_v0_control.c | 771 +
+ sound/pci/sst/intelmid_v1_control.c | 1066 ++
+ sound/pci/sst/intelmid_v2_control.c | 1001 ++
+ 733 files changed, 292688 insertions(+), 1522 deletions(-)
+
+--- /dev/null
++++ b/Documentation/ABI/testing/sysfs-bus-devices-i2c-isl29020
+@@ -0,0 +1,29 @@
++
++Where: /sys/bus/i2c/devices/.../sensing_range
++Date: April 2010
++Kernel Version: 2.6.36?
++Contact: alan.cox@intel.com
++Description: Reports the sensing range configured on the ISL29020
++ hardware (1/4/16/64000). Writing a value between 1 and 4
++ sets the sensing range to 1/4/16/64000.
++
++
++Where: /sys/bus/i2c/devices/.../power_state
++Date: April 2010
++Kernel Version: 2.6.36?
++Contact: alan.cox@intel.com
++Description: Write 1 to bring the device out of power saving mode and 0
++ to return it to power saving. When read returns 0 or 1
++ indicating the current power state.
++
++Where: /sys/bus/i2c/devices/.../lux_output
++Date: April 2010
++Kernel Version: 2.6.36?
++Contact: alan.cox@intel.com
++Description: Report the lux level being read by the sensor.
++
++
++
++
++
++
+--- /dev/null
++++ b/Documentation/ABI/testing/sysfs-bus-i2c-devices-hm6352
+@@ -0,0 +1,21 @@
++Where: /sys/bus/i2c/devices/.../heading
++Date: April 2010
++Kernel Version: 2.6.36?
++Contact: alan.cox@intel.com
++Description: Reports the current heading from the compass as a floating
++ point value in degrees.
++
++Where: /sys/bus/i2c/devices/.../power_state
++Date: April 2010
++Kernel Version: 2.6.36?
++Contact: alan.cox@intel.com
++Description: Sets the power state of the device. 0 sets the device into
++ sleep mode, 1 wakes it up.
++
++Where: /sys/bus/i2c/devices/.../calibration
++Date: April 2010
++Kernel Version: 2.6.36?
++Contact: alan.cox@intel.com
++Description: Sets the calibration on or off (1 = on, 0 = off). See the
++ chip data sheet.
++
+--- /dev/null
++++ b/Documentation/leds-lp5523.txt
+@@ -0,0 +1,72 @@
++Kernel driver for lp5523
++========================
++
++* National Semiconductor LP5523 led driver chip
++* Datasheet: http://www.national.com/pf/LP/LP5523.html
++
++Authors: Mathias Nyman, Yuri Zaporozhets, Samu Onkalo
++Contact: Samu Onkalo (samu.p.onkalo-at-nokia.com)
++
++Description
++-----------
++LP5523 can drive up to 9 channels. Leds can be controlled directly via
++led class control interface. Channels have a generic names:
++lp5523:channelx where x is 0...8
++
++Chip provides 3 engines which can be control channels wihtout main CPU
++interaction. Details of the micro engine code can be found from the
++public data sheet. Leds can be muxed to different channels.
++
++Control interface for engines:
++x is 1 .. 3
++enginex_mode : disabled, load, run
++enginex_load : microcode load (visible only in load mode)
++enginex_leds : led mux control (visible only in load mode)
++
++cd /sys/class/leds/lp5523:channel2/device
++echo "load" > engine3_mode
++echo "9d80400004ff05ff437f0000" > engine3_load
++echo "111111111" > engine3_leds
++echo "run" > engine3_mode
++
++sysfs contains also selftest entry. It measures each channel
++voltage level and checks if it looks reasonable. Too high
++level means missing led and to low value means short circuit.
++
++Example platform data:
++
++static struct lp5523_led_config lp5523_led_config[] = {
++ {
++ .chan_nr = 0,
++ .led_current = 50,
++ },
++...
++ }, {
++ .chan_nr = 8,
++ .led_current = 50,
++ }
++};
++
++static int lp5523_setup(void)
++{
++ /* Setup HW resources */
++}
++
++static void lp5523_release(void)
++{
++ /* Release HW resources */
++}
++
++static void lp5523_enable(bool state)
++{
++ /* Control chip enable signal */
++}
++
++static struct lp5523_platform_data lp5523_platform_data = {
++ .led_config = lp5523_led_config,
++ .num_channels = ARRAY_SIZE(lp5523_led_config),
++ .clock_mode = LP5523_CLOCK_EXT,
++ .setup_resources = lp5523_setup,
++ .release_resources = lp5523_release,
++ .enable = lp5523_enable,
++};
+--- /dev/null
++++ b/Documentation/networking/caif/spi_porting.txt
+@@ -0,0 +1,208 @@
++- CAIF SPI porting -
++
++- CAIF SPI basics:
++
++Running CAIF over SPI needs some extra setup, owing to the nature of SPI.
++Two extra GPIOs have been added in order to negotiate the transfers
++ between the master and the slave. The minimum requirement for running
++CAIF over SPI is a SPI slave chip and two GPIOs (more details below).
++Please note that running as a slave implies that you need to keep up
++with the master clock. An overrun or underrun event is fatal.
++
++- CAIF SPI framework:
++
++To make porting as easy as possible, the CAIF SPI has been divided in
++two parts. The first part (called the interface part) deals with all
++generic functionality such as length framing, SPI frame negotiation
++and SPI frame delivery and transmission. The other part is the CAIF
++SPI slave device part, which is the module that you have to write if
++you want to run SPI CAIF on a new hardware. This part takes care of
++the physical hardware, both with regard to SPI and to GPIOs.
++
++- Implementing a CAIF SPI device:
++
++ - Functionality provided by the CAIF SPI slave device:
++
++ In order to implement a SPI device you will, as a minimum,
++ need to implement the following
++ functions:
++
++ int (*init_xfer) (struct cfspi_xfer * xfer, struct cfspi_dev *dev):
++
++ This function is called by the CAIF SPI interface to give
++ you a chance to set up your hardware to be ready to receive
++ a stream of data from the master. The xfer structure contains
++ both physical and logical adresses, as well as the total length
++ of the transfer in both directions.The dev parameter can be used
++ to map to different CAIF SPI slave devices.
++
++ void (*sig_xfer) (bool xfer, struct cfspi_dev *dev):
++
++ This function is called by the CAIF SPI interface when the output
++ (SPI_INT) GPIO needs to change state. The boolean value of the xfer
++ variable indicates whether the GPIO should be asserted (HIGH) or
++ deasserted (LOW). The dev parameter can be used to map to different CAIF
++ SPI slave devices.
++
++ - Functionality provided by the CAIF SPI interface:
++
++ void (*ss_cb) (bool assert, struct cfspi_ifc *ifc);
++
++ This function is called by the CAIF SPI slave device in order to
++ signal a change of state of the input GPIO (SS) to the interface.
++ Only active edges are mandatory to be reported.
++ This function can be called from IRQ context (recommended in order
++ not to introduce latency). The ifc parameter should be the pointer
++ returned from the platform probe function in the SPI device structure.
++
++ void (*xfer_done_cb) (struct cfspi_ifc *ifc);
++
++ This function is called by the CAIF SPI slave device in order to
++ report that a transfer is completed. This function should only be
++ called once both the transmission and the reception are completed.
++ This function can be called from IRQ context (recommended in order
++ not to introduce latency). The ifc parameter should be the pointer
++ returned from the platform probe function in the SPI device structure.
++
++ - Connecting the bits and pieces:
++
++ - Filling in the SPI slave device structure:
++
++ Connect the necessary callback functions.
++ Indicate clock speed (used to calculate toggle delays).
++ Chose a suitable name (helps debugging if you use several CAIF
++ SPI slave devices).
++ Assign your private data (can be used to map to your structure).
++
++ - Filling in the SPI slave platform device structure:
++ Add name of driver to connect to ("cfspi_sspi").
++ Assign the SPI slave device structure as platform data.
++
++- Padding:
++
++In order to optimize throughput, a number of SPI padding options are provided.
++Padding can be enabled independently for uplink and downlink transfers.
++Padding can be enabled for the head, the tail and for the total frame size.
++The padding needs to be correctly configured on both sides of the link.
++The padding can be changed via module parameters in cfspi_sspi.c or via
++the sysfs directory of the cfspi_sspi driver (before device registration).
++
++- CAIF SPI device template:
++
++/*
++ * Copyright (C) ST-Ericsson AB 2010
++ * Author: Daniel Martensson / Daniel.Martensson@stericsson.com
++ * License terms: GNU General Public License (GPL), version 2.
++ *
++ */
++
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/device.h>
++#include <linux/wait.h>
++#include <linux/interrupt.h>
++#include <linux/dma-mapping.h>
++#include <net/caif/caif_spi.h>
++
++MODULE_LICENSE("GPL");
++
++struct sspi_struct {
++ struct cfspi_dev sdev;
++ struct cfspi_xfer *xfer;
++};
++
++static struct sspi_struct slave;
++static struct platform_device slave_device;
++
++static irqreturn_t sspi_irq(int irq, void *arg)
++{
++ /* You only need to trigger on an edge to the active state of the
++ * SS signal. Once a edge is detected, the ss_cb() function should be
++ * called with the parameter assert set to true. It is OK
++ * (and even advised) to call the ss_cb() function in IRQ context in
++ * order not to add any delay. */
++
++ return IRQ_HANDLED;
++}
++
++static void sspi_complete(void *context)
++{
++ /* Normally the DMA or the SPI framework will call you back
++ * in something similar to this. The only thing you need to
++ * do is to call the xfer_done_cb() function, providing the pointer
++ * to the CAIF SPI interface. It is OK to call this function
++ * from IRQ context. */
++}
++
++static int sspi_init_xfer(struct cfspi_xfer *xfer, struct cfspi_dev *dev)
++{
++ /* Store transfer info. For a normal implementation you should
++ * set up your DMA here and make sure that you are ready to
++ * receive the data from the master SPI. */
++
++ struct sspi_struct *sspi = (struct sspi_struct *)dev->priv;
++
++ sspi->xfer = xfer;
++
++ return 0;
++}
++
++void sspi_sig_xfer(bool xfer, struct cfspi_dev *dev)
++{
++ /* If xfer is true then you should assert the SPI_INT to indicate to
++ * the master that you are ready to recieve the data from the master
++ * SPI. If xfer is false then you should de-assert SPI_INT to indicate
++ * that the transfer is done.
++ */
++
++ struct sspi_struct *sspi = (struct sspi_struct *)dev->priv;
++}
++
++static void sspi_release(struct device *dev)
++{
++ /*
++ * Here you should release your SPI device resources.
++ */
++}
++
++static int __init sspi_init(void)
++{
++ /* Here you should initialize your SPI device by providing the
++ * necessary functions, clock speed, name and private data. Once
++ * done, you can register your device with the
++ * platform_device_register() function. This function will return
++ * with the CAIF SPI interface initialized. This is probably also
++ * the place where you should set up your GPIOs, interrupts and SPI
++ * resources. */
++
++ int res = 0;
++
++ /* Initialize slave device. */
++ slave.sdev.init_xfer = sspi_init_xfer;
++ slave.sdev.sig_xfer = sspi_sig_xfer;
++ slave.sdev.clk_mhz = 13;
++ slave.sdev.priv = &slave;
++ slave.sdev.name = "spi_sspi";
++ slave_device.dev.release = sspi_release;
++
++ /* Initialize platform device. */
++ slave_device.name = "cfspi_sspi";
++ slave_device.dev.platform_data = &slave.sdev;
++
++ /* Register platform device. */
++ res = platform_device_register(&slave_device);
++ if (res) {
++ printk(KERN_WARNING "sspi_init: failed to register dev.\n");
++ return -ENODEV;
++ }
++
++ return res;
++}
++
++static void __exit sspi_exit(void)
++{
++ platform_device_del(&slave_device);
++}
++
++module_init(sspi_init);
++module_exit(sspi_exit);
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -5147,6 +5147,19 @@
+ F: drivers/sfi/
+ F: include/linux/sfi*.h
+
++SFI C STATE DRIVER
++M: Vishwesh Rudramuni <vishwesh.m.rudramuni@intel.com>
++M: Sujith Thomas <sujith.thomas@intel.com>
++S: Supported
++F: drivers/sfi/sfi_processor_idle.c
++F: drivers/sfi/sfi_processor_core.c
++
++SFI P STATE DRIVER
++M: Vishwesh Rudramuni <vishwesh.m.rudramuni@intel.com>
++S: Supported
++F: drivers/sfi/sfi_processor_perflib.c
++F: arch/x86/kernel/cpu/cpufreq/sfi-cpufreq.c
++
+ SIMTEC EB110ATX (Chalice CATS)
+ P: Ben Dooks
+ M: Vincent Sanders <support@simtec.co.uk>
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -417,6 +417,14 @@
+ nor standard legacy replacement devices/features. e.g. Moorestown does
+ not contain i8259, i8254, HPET, legacy BIOS, most of the io ports.
+
++config MRST_SPI_UART_BOOT_MSG
++ def_bool y
++ prompt "Moorestown SPI UART boot message"
++ depends on (X86_MRST && X86_32)
++ help
++ Enable this to see boot message during protected mode boot phase, such as
++ kernel decompression, BAUD rate is set at 115200 8n1
++
+ config X86_RDC321X
+ bool "RDC R-321x SoC"
+ depends on X86_32
+@@ -646,11 +654,11 @@
+ def_bool y if MRST
+ prompt "Langwell APB Timer Support" if X86_MRST
+ help
+- APB timer is the replacement for 8254, HPET on X86 MID platforms.
+- The APBT provides a stable time base on SMP
+- systems, unlike the TSC, but it is more expensive to access,
+- as it is off-chip. APB timers are always running regardless of CPU
+- C states, they are used as per CPU clockevent device when possible.
++ APB timer is the replacement for 8254, HPET on X86 MID platforms.
++ The APBT provides a stable time base on SMP
++ systems, unlike the TSC, but it is more expensive to access,
++ as it is off-chip. APB timers are always running regardless of CPU
++ C states, they are used as per CPU clockevent device when possible.
+
+ # Mark as embedded because too many people got it wrong.
+ # The code disables itself when not needed.
+--- a/arch/x86/Kconfig.debug
++++ b/arch/x86/Kconfig.debug
+@@ -43,6 +43,10 @@
+ with klogd/syslogd or the X server. You should normally N here,
+ unless you want to debug such a crash.
+
++config X86_MRST_EARLY_PRINTK
++ bool "Early printk for MRST platform support"
++ depends on EARLY_PRINTK && X86_MRST
++
+ config EARLY_PRINTK_DBGP
+ bool "Early printk via EHCI debug port"
+ depends on EARLY_PRINTK && PCI
+--- a/arch/x86/include/asm/apb_timer.h
++++ b/arch/x86/include/asm/apb_timer.h
+@@ -55,7 +55,6 @@
+ extern int arch_setup_apbt_irqs(int irq, int trigger, int mask, int cpu);
+ extern void apbt_setup_secondary_clock(void);
+ extern unsigned int boot_cpu_id;
+-extern int disable_apbt_percpu;
+
+ extern struct sfi_timer_table_entry *sfi_get_mtmr(int hint);
+ extern void sfi_free_mtmr(struct sfi_timer_table_entry *mtmr);
+--- a/arch/x86/include/asm/apic.h
++++ b/arch/x86/include/asm/apic.h
+@@ -50,6 +50,7 @@
+ extern int local_apic_timer_c2_ok;
+
+ extern int disable_apic;
++extern unsigned int lapic_timer_frequency;
+
+ #ifdef CONFIG_SMP
+ extern void __inquire_remote_apic(int apicid);
+--- a/arch/x86/include/asm/fixmap.h
++++ b/arch/x86/include/asm/fixmap.h
+@@ -117,6 +117,10 @@
+ FIX_TEXT_POKE1, /* reserve 2 pages for text_poke() */
+ FIX_TEXT_POKE0, /* first page is last, because allocation is backward */
+ __end_of_permanent_fixed_addresses,
++
++#ifdef CONFIG_X86_MRST
++ FIX_LNW_VRTC,
++#endif
+ /*
+ * 256 temporary boot-time mappings, used by early_ioremap(),
+ * before ioremap() is functional.
+--- a/arch/x86/include/asm/gpio.h
++++ b/arch/x86/include/asm/gpio.h
+@@ -38,12 +38,9 @@
+ return __gpio_cansleep(gpio);
+ }
+
+-/*
+- * Not implemented, yet.
+- */
+ static inline int gpio_to_irq(unsigned int gpio)
+ {
+- return -ENOSYS;
++ return __gpio_to_irq(gpio);
+ }
+
+ static inline int irq_to_gpio(unsigned int irq)
+--- a/arch/x86/include/asm/intel_scu_ipc.h
++++ b/arch/x86/include/asm/intel_scu_ipc.h
+@@ -1,6 +1,12 @@
+ #ifndef _ASM_X86_INTEL_SCU_IPC_H_
+ #define _ASM_X86_INTEL_SCU_IPC_H_
+
++#define IPCMSG_VRTC 0xFA /* Set vRTC device */
++
++/* Command id associated with message IPCMSG_VRTC */
++#define IPC_CMD_VRTC_SETTIME 1 /* Set time */
++#define IPC_CMD_VRTC_SETALARM 2 /* Set alarm */
++
+ /* Read single register */
+ int intel_scu_ipc_ioread8(u16 addr, u8 *data);
+
+@@ -28,20 +34,6 @@
+ /* Update single register based on the mask */
+ int intel_scu_ipc_update_register(u16 addr, u8 data, u8 mask);
+
+-/*
+- * Indirect register read
+- * Can be used when SCCB(System Controller Configuration Block) register
+- * HRIM(Honor Restricted IPC Messages) is set (bit 23)
+- */
+-int intel_scu_ipc_register_read(u32 addr, u32 *data);
+-
+-/*
+- * Indirect register write
+- * Can be used when SCCB(System Controller Configuration Block) register
+- * HRIM(Honor Restricted IPC Messages) is set (bit 23)
+- */
+-int intel_scu_ipc_register_write(u32 addr, u32 data);
+-
+ /* Issue commands to the SCU with or without data */
+ int intel_scu_ipc_simple_command(int cmd, int sub);
+ int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen,
+--- a/arch/x86/include/asm/mrst.h
++++ b/arch/x86/include/asm/mrst.h
+@@ -10,10 +10,63 @@
+ */
+ #ifndef _ASM_X86_MRST_H
+ #define _ASM_X86_MRST_H
++
++#include <linux/sfi.h>
++
+ extern int pci_mrst_init(void);
+ int __init sfi_parse_mrtc(struct sfi_table_header *table);
++extern struct sfi_rtc_table_entry sfi_mrtc_array[];
++
++/*
++ * Medfield is the follow-up of Moorestown, it combines two chip solution into
++ * one. Other than that it also added always-on and constant tsc and lapic
++ * timers. Medfield is the platform name, and the chip name is called Penwell
++ * we treat Medfield/Penwell as a variant of Moorestown. Penwell can be
++ * identified via MSRs.
++ */
++enum mrst_cpu_type {
++ MRST_CPU_CHIP_LINCROFT = 1,
++ MRST_CPU_CHIP_PENWELL,
++};
++
++extern enum mrst_cpu_type __mrst_cpu_chip;
++static enum mrst_cpu_type inline mrst_identify_cpu(void)
++{
++ return __mrst_cpu_chip;
++}
++
++enum mrst_timer_options {
++ MRST_TIMER_DEFAULT,
++ MRST_TIMER_APBT_ONLY,
++ MRST_TIMER_LAPIC_APBT,
++};
++
++enum {
++ /* 0 is "unknown" so that you can write if (!mrst_platform_id()) */
++ MRST_PLATFORM_LANFORD = 1,
++ MRST_PLATFORM_SHCDK,
++ MRST_PLATFORM_AAVA_SC,
++};
++extern int mrst_platform_id(void);
++
++extern enum mrst_timer_options mrst_timer_options;
++
++/*
++ * Penwell uses spread spectrum clock, so the freq number is not exactly
++ * the same as reported by MSR based on SDM.
++ */
++#define PENWELL_FSB_FREQ_83SKU 83200
++#define PENWELL_FSB_FREQ_100SKU 99840
+
+ #define SFI_MTMR_MAX_NUM 8
+ #define SFI_MRTC_MAX 8
+
++extern unsigned char hsu_dma_enable;
++
++extern struct console early_mrst_console;
++extern struct console early_hsu_console;
++extern void mrst_early_printk(const char *fmt, ...);
++
++extern void intel_scu_devices_create(void);
++extern void intel_scu_devices_destroy(void);
+ #endif /* _ASM_X86_MRST_H */
+--- /dev/null
++++ b/arch/x86/include/asm/vrtc.h
+@@ -0,0 +1,27 @@
++#ifndef _MRST_VRTC_H
++#define _MRST_VRTC_H
++
++#ifdef CONFIG_X86_MRST
++extern unsigned char vrtc_cmos_read(unsigned char reg);
++extern void vrtc_cmos_write(unsigned char val, unsigned char reg);
++extern unsigned long vrtc_get_time(void);
++extern int vrtc_set_mmss(unsigned long nowtime);
++extern void vrtc_set_base(void __iomem *base);
++
++#define MRST_VRTC_PGOFFSET (0xc00)
++
++#else
++static inline unsigned char vrtc_cmos_read(unsigned char reg)
++{
++ return 0xff;
++}
++
++static inline void vrtc_cmos_write(unsigned char val, unsigned char reg)
++{
++ return;
++}
++#endif
++
++#define MRST_VRTC_MAP_SZ (1024)
++
++#endif
+--- a/arch/x86/include/asm/x86_init.h
++++ b/arch/x86/include/asm/x86_init.h
+@@ -138,6 +138,7 @@
+ /**
+ * struct x86_platform_ops - platform specific runtime functions
+ * @calibrate_tsc: calibrate TSC
++ * @wallclock_init: init the wallclock device
+ * @get_wallclock: get time from HW clock like RTC etc.
+ * @set_wallclock: set time back to HW clock
+ * @is_untracked_pat_range exclude from PAT logic
+@@ -146,6 +147,7 @@
+ */
+ struct x86_platform_ops {
+ unsigned long (*calibrate_tsc)(void);
++ void (*wallclock_init)(void);
+ unsigned long (*get_wallclock)(void);
+ int (*set_wallclock)(unsigned long nowtime);
+ void (*iommu_shutdown)(void);
+--- a/arch/x86/kernel/Makefile
++++ b/arch/x86/kernel/Makefile
+@@ -83,6 +83,7 @@
+ obj-$(CONFIG_KGDB) += kgdb.o
+ obj-$(CONFIG_VM86) += vm86_32.o
+ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
++obj-$(CONFIG_X86_MRST_EARLY_PRINTK) += mrst_earlyprintk.o
+
+ obj-$(CONFIG_HPET_TIMER) += hpet.o
+ obj-$(CONFIG_APB_TIMER) += apb_timer.o
+@@ -104,7 +105,7 @@
+ scx200-y += scx200_32.o
+
+ obj-$(CONFIG_OLPC) += olpc.o
+-obj-$(CONFIG_X86_MRST) += mrst.o
++obj-$(CONFIG_X86_MRST) += mrst.o vrtc.o
+
+ microcode-y := microcode_core.o
+ microcode-$(CONFIG_MICROCODE_INTEL) += microcode_intel.o
+--- a/arch/x86/kernel/apb_timer.c
++++ b/arch/x86/kernel/apb_timer.c
+@@ -43,10 +43,11 @@
+
+ #include <asm/fixmap.h>
+ #include <asm/apb_timer.h>
++#include <asm/mrst.h>
+
+ #define APBT_MASK CLOCKSOURCE_MASK(32)
+ #define APBT_SHIFT 22
+-#define APBT_CLOCKEVENT_RATING 150
++#define APBT_CLOCKEVENT_RATING 110
+ #define APBT_CLOCKSOURCE_RATING 250
+ #define APBT_MIN_DELTA_USEC 200
+
+@@ -83,8 +84,6 @@
+ char name[10];
+ };
+
+-int disable_apbt_percpu __cpuinitdata;
+-
+ static DEFINE_PER_CPU(struct apbt_dev, cpu_apbt_dev);
+
+ #ifdef CONFIG_SMP
+@@ -195,29 +194,6 @@
+ };
+
+ /*
+- * if user does not want to use per CPU apb timer, just give it a lower rating
+- * than local apic timer and skip the late per cpu timer init.
+- */
+-static inline int __init setup_x86_mrst_timer(char *arg)
+-{
+- if (!arg)
+- return -EINVAL;
+-
+- if (strcmp("apbt_only", arg) == 0)
+- disable_apbt_percpu = 0;
+- else if (strcmp("lapic_and_apbt", arg) == 0)
+- disable_apbt_percpu = 1;
+- else {
+- pr_warning("X86 MRST timer option %s not recognised"
+- " use x86_mrst_timer=apbt_only or lapic_and_apbt\n",
+- arg);
+- return -EINVAL;
+- }
+- return 0;
+-}
+-__setup("x86_mrst_timer=", setup_x86_mrst_timer);
+-
+-/*
+ * start count down from 0xffff_ffff. this is done by toggling the enable bit
+ * then load initial load count to ~0.
+ */
+@@ -335,7 +311,7 @@
+ adev->num = smp_processor_id();
+ memcpy(&adev->evt, &apbt_clockevent, sizeof(struct clock_event_device));
+
+- if (disable_apbt_percpu) {
++ if (mrst_timer_options == MRST_TIMER_LAPIC_APBT) {
+ apbt_clockevent.rating = APBT_CLOCKEVENT_RATING - 100;
+ global_clock_event = &adev->evt;
+ printk(KERN_DEBUG "%s clockevent registered as global\n",
+@@ -429,7 +405,8 @@
+
+ static __init int apbt_late_init(void)
+ {
+- if (disable_apbt_percpu || !apb_timer_block_enabled)
++ if (mrst_timer_options == MRST_TIMER_LAPIC_APBT ||
++ !apb_timer_block_enabled)
+ return 0;
+ /* This notifier should be called after workqueue is ready */
+ hotcpu_notifier(apbt_cpuhp_notify, -20);
+@@ -450,6 +427,8 @@
+ int timer_num;
+ struct apbt_dev *adev = EVT_TO_APBT_DEV(evt);
+
++ BUG_ON(!apbt_virt_address);
++
+ timer_num = adev->num;
+ pr_debug("%s CPU %d timer %d mode=%d\n",
+ __func__, first_cpu(*evt->cpumask), timer_num, mode);
+@@ -676,7 +655,7 @@
+ }
+ #ifdef CONFIG_SMP
+ /* kernel cmdline disable apb timer, so we will use lapic timers */
+- if (disable_apbt_percpu) {
++ if (mrst_timer_options == MRST_TIMER_LAPIC_APBT) {
+ printk(KERN_INFO "apbt: disabled per cpu timer\n");
+ return;
+ }
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -176,7 +176,7 @@
+ .flags = IORESOURCE_MEM | IORESOURCE_BUSY,
+ };
+
+-static unsigned int calibration_result;
++unsigned int lapic_timer_frequency;
+
+ static int lapic_next_event(unsigned long delta,
+ struct clock_event_device *evt);
+@@ -431,7 +431,7 @@
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ case CLOCK_EVT_MODE_ONESHOT:
+- __setup_APIC_LVTT(calibration_result,
++ __setup_APIC_LVTT(lapic_timer_frequency,
+ mode != CLOCK_EVT_MODE_PERIODIC, 1);
+ break;
+ case CLOCK_EVT_MODE_UNUSED:
+@@ -598,6 +598,25 @@
+ long delta, deltatsc;
+ int pm_referenced = 0;
+
++ /**
++ * check if lapic timer has already been calibrated by platform
++ * specific routine, such as tsc calibration code. if so, we just fill
++ * in the clockevent structure and return.
++ */
++
++ if (lapic_timer_frequency) {
++ apic_printk(APIC_VERBOSE, "lapic timer already calibrated %d\n",
++ lapic_timer_frequency);
++ lapic_clockevent.mult = div_sc(lapic_timer_frequency/APIC_DIVISOR,
++ TICK_NSEC, lapic_clockevent.shift);
++ lapic_clockevent.max_delta_ns =
++ clockevent_delta2ns(0x7FFFFF, &lapic_clockevent);
++ lapic_clockevent.min_delta_ns =
++ clockevent_delta2ns(0xF, &lapic_clockevent);
++ lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
++ return 0;
++ }
++
+ local_irq_disable();
+
+ /* Replace the global interrupt handler */
+@@ -639,12 +658,12 @@
+ lapic_clockevent.min_delta_ns =
+ clockevent_delta2ns(0xF, &lapic_clockevent);
+
+- calibration_result = (delta * APIC_DIVISOR) / LAPIC_CAL_LOOPS;
++ lapic_timer_frequency = (delta * APIC_DIVISOR) / LAPIC_CAL_LOOPS;
+
+ apic_printk(APIC_VERBOSE, "..... delta %ld\n", delta);
+ apic_printk(APIC_VERBOSE, "..... mult: %u\n", lapic_clockevent.mult);
+ apic_printk(APIC_VERBOSE, "..... calibration result: %u\n",
+- calibration_result);
++ lapic_timer_frequency);
+
+ if (cpu_has_tsc) {
+ apic_printk(APIC_VERBOSE, "..... CPU clock speed is "
+@@ -655,13 +674,13 @@
+
+ apic_printk(APIC_VERBOSE, "..... host bus clock speed is "
+ "%u.%04u MHz.\n",
+- calibration_result / (1000000 / HZ),
+- calibration_result % (1000000 / HZ));
++ lapic_timer_frequency / (1000000 / HZ),
++ lapic_timer_frequency % (1000000 / HZ));
+
+ /*
+ * Do a sanity check on the APIC calibration result
+ */
+- if (calibration_result < (1000000 / HZ)) {
++ if (lapic_timer_frequency < (1000000 / HZ)) {
+ local_irq_enable();
+ pr_warning("APIC frequency too slow, disabling apic timer\n");
+ return -1;
+--- a/arch/x86/kernel/apic/io_apic.c
++++ b/arch/x86/kernel/apic/io_apic.c
+@@ -155,10 +155,8 @@
+ int node;
+ int i;
+
+- if (!legacy_pic->nr_legacy_irqs) {
+- nr_irqs_gsi = 0;
++ if (!legacy_pic->nr_legacy_irqs)
+ io_apic_irqs = ~0UL;
+- }
+
+ cfg = irq_cfgx;
+ count = ARRAY_SIZE(irq_cfgx);
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -576,6 +576,7 @@
+ if (c->extended_cpuid_level >= 0x80000007)
+ c->x86_power = cpuid_edx(0x80000007);
+
++ init_scattered_cpuid_features(c);
+ }
+
+ static void __cpuinit identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
+@@ -731,7 +732,6 @@
+
+ get_model_name(c); /* Default name */
+
+- init_scattered_cpuid_features(c);
+ detect_nopl(c);
+ }
+
+--- a/arch/x86/kernel/cpu/cpufreq/Kconfig
++++ b/arch/x86/kernel/cpu/cpufreq/Kconfig
+@@ -10,6 +10,22 @@
+
+ comment "CPUFreq processor drivers"
+
++config X86_SFI_CPUFREQ
++ tristate "SFI Processor P-States driver"
++ depends on SFI_PROCESSOR_PM
++ select CPU_FREQ_TABLE
++ help
++ This driver adds a CPUFreq driver which utilizes the SFI
++ Processor Performance States.
++ This driver also supports Intel Enhanced Speedstep.
++
++ To compile this driver as a module, choose M here: the
++ module will be called sfi-cpufreq.
++
++ For details, take a look at <file:Documentation/cpu-freq/>.
++
++ If in doubt, say N.
++
+ config X86_PCC_CPUFREQ
+ tristate "Processor Clocking Control interface driver"
+ depends on ACPI && ACPI_PROCESSOR
+--- a/arch/x86/kernel/cpu/cpufreq/Makefile
++++ b/arch/x86/kernel/cpu/cpufreq/Makefile
+@@ -16,6 +16,7 @@
+ obj-$(CONFIG_X86_SPEEDSTEP_ICH) += speedstep-ich.o
+ obj-$(CONFIG_X86_SPEEDSTEP_LIB) += speedstep-lib.o
+ obj-$(CONFIG_X86_SPEEDSTEP_SMI) += speedstep-smi.o
++obj-$(CONFIG_X86_SFI_CPUFREQ) += sfi-cpufreq.o
+ obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o
+ obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o
+ obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o
+--- /dev/null
++++ b/arch/x86/kernel/cpu/cpufreq/sfi-cpufreq.c
+@@ -0,0 +1,607 @@
++/*
++ * sfi_cpufreq.c - sfi Processor P-States Driver
++ *
++ *
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or (at
++ * your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ * Author: Vishwesh M Rudramuni
++ * Contact information: Vishwesh Rudramuni <vishwesh.m.rudramuni@intel.com>
++ */
++
++/*
++ * This sfi Processor P-States Driver re-uses most part of the code available
++ * in acpi cpufreq driver.
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/smp.h>
++#include <linux/sched.h>
++#include <linux/cpufreq.h>
++#include <linux/compiler.h>
++#include <linux/dmi.h>
++#include <trace/events/power.h>
++#include <linux/slab.h>
++
++#include <linux/sfi.h>
++#include <linux/sfi_processor.h>
++
++#include <linux/io.h>
++#include <asm/msr.h>
++#include <asm/processor.h>
++#include <asm/cpufeature.h>
++#include <linux/delay.h>
++#include <linux/uaccess.h>
++
++#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \
++ "sfi-cpufreq", msg)
++
++MODULE_AUTHOR("Vishwesh Rudramuni");
++MODULE_DESCRIPTION("SFI Processor P-States Driver");
++MODULE_LICENSE("GPL");
++#define SYSTEM_INTEL_MSR_CAPABLE 0x1
++#define INTEL_MSR_RANGE (0xffff)
++#define CPUID_6_ECX_APERFMPERF_CAPABILITY (0x1)
++
++struct sfi_cpufreq_data {
++ struct sfi_processor_performance *sfi_data;
++ struct cpufreq_frequency_table *freq_table;
++ unsigned int max_freq;
++ unsigned int resume;
++ unsigned int cpu_feature;
++};
++
++static DEFINE_PER_CPU(struct sfi_cpufreq_data *, drv_data);
++static DEFINE_PER_CPU(struct aperfmperf, old_perf);
++
++/* sfi_perf_data is a pointer to percpu data. */
++static struct sfi_processor_performance *sfi_perf_data;
++
++static struct cpufreq_driver sfi_cpufreq_driver;
++
++static unsigned int sfi_pstate_strict;
++
++static int check_est_cpu(unsigned int cpuid)
++{
++ struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
++
++ if (cpu->x86_vendor != X86_VENDOR_INTEL ||
++ !cpu_has(cpu, X86_FEATURE_EST))
++ return 0;
++
++ return 1;
++}
++
++static unsigned extract_freq(u32 msr, struct sfi_cpufreq_data *data)
++{
++ int i;
++ struct sfi_processor_performance *perf;
++
++ msr &= INTEL_MSR_RANGE;
++ perf = data->sfi_data;
++
++ for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
++ if (msr == perf->states[data->freq_table[i].index].control)
++ return data->freq_table[i].frequency;
++ }
++ return data->freq_table[0].frequency;
++}
++
++
++struct msr_addr {
++ u32 reg;
++};
++
++struct drv_cmd {
++ unsigned int type;
++ const struct cpumask *mask;
++ struct msr_addr msr;
++ u32 val;
++};
++
++/* Called via smp_call_function_single(), on the target CPU */
++static void do_drv_read(void *_cmd)
++{
++ struct drv_cmd *cmd = _cmd;
++ u32 h;
++
++ rdmsr(cmd->msr.reg, cmd->val, h);
++}
++
++/* Called via smp_call_function_many(), on the target CPUs */
++static void do_drv_write(void *_cmd)
++{
++ struct drv_cmd *cmd = _cmd;
++ u32 lo, hi;
++
++ rdmsr(cmd->msr.reg, lo, hi);
++ lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE);
++ wrmsr(cmd->msr.reg, lo, hi);
++}
++
++static void drv_read(struct drv_cmd *cmd)
++{
++ cmd->val = 0;
++
++ smp_call_function_single(cpumask_any(cmd->mask), do_drv_read, cmd, 1);
++}
++
++static void drv_write(struct drv_cmd *cmd)
++{
++ int this_cpu;
++
++ this_cpu = get_cpu();
++ if (cpumask_test_cpu(this_cpu, cmd->mask))
++ do_drv_write(cmd);
++ smp_call_function_many(cmd->mask, do_drv_write, cmd, 1);
++ put_cpu();
++}
++
++static u32 get_cur_val(const struct cpumask *mask)
++{
++ struct drv_cmd cmd;
++
++ if (unlikely(cpumask_empty(mask)))
++ return 0;
++
++ cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
++ cmd.msr.reg = MSR_IA32_PERF_STATUS;
++ cmd.mask = mask;
++ drv_read(&cmd);
++
++ dprintk("get_cur_val = %u\n", cmd.val);
++
++ return cmd.val;
++}
++
++/* Called via smp_call_function_single(), on the target CPU */
++static void read_measured_perf_ctrs(void *_cur)
++{
++ struct aperfmperf *am = _cur;
++
++ get_aperfmperf(am);
++}
++
++/*
++ * Return the measured active (C0) frequency on this CPU since last call
++ * to this function.
++ * Input: cpu number
++ * Return: Average CPU frequency in terms of max frequency (zero on error)
++ *
++ * We use IA32_MPERF and IA32_APERF MSRs to get the measured performance
++ * over a period of time, while CPU is in C0 state.
++ * IA32_MPERF counts at the rate of max advertised frequency
++ * IA32_APERF counts at the rate of actual CPU frequency
++ * Only IA32_APERF/IA32_MPERF ratio is architecturally defined and
++ * no meaning should be associated with absolute values of these MSRs.
++ */
++static unsigned int get_measured_perf(struct cpufreq_policy *policy,
++ unsigned int cpu)
++{
++ struct aperfmperf perf;
++ unsigned long ratio;
++ unsigned int retval;
++
++ if (smp_call_function_single(cpu, read_measured_perf_ctrs, &perf, 1))
++ return 0;
++
++ ratio = calc_aperfmperf_ratio(&per_cpu(old_perf, cpu), &perf);
++ per_cpu(old_perf, cpu) = perf;
++
++ retval = (policy->cpuinfo.max_freq * ratio) >> APERFMPERF_SHIFT;
++
++ return retval;
++}
++
++static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
++{
++ struct sfi_cpufreq_data *data = per_cpu(drv_data, cpu);
++ unsigned int freq;
++ unsigned int cached_freq;
++
++ dprintk("get_cur_freq_on_cpu (%d)\n", cpu);
++
++ if (unlikely(data == NULL ||
++ data->sfi_data == NULL || data->freq_table == NULL)) {
++ return 0;
++ }
++
++ cached_freq = data->freq_table[data->sfi_data->state].frequency;
++ freq = extract_freq(get_cur_val(cpumask_of(cpu)), data);
++ if (freq != cached_freq) {
++ /*
++ * The dreaded BIOS frequency change behind our back.
++ * Force set the frequency on next target call.
++ */
++ data->resume = 1;
++ }
++
++ dprintk("cur freq = %u\n", freq);
++
++ return freq;
++}
++
++static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq,
++ struct sfi_cpufreq_data *data)
++{
++ unsigned int cur_freq;
++ unsigned int i;
++
++ for (i = 0; i < 100; i++) {
++ cur_freq = extract_freq(get_cur_val(mask), data);
++ if (cur_freq == freq)
++ return 1;
++ udelay(10);
++ }
++ return 0;
++}
++
++static int sfi_cpufreq_target(struct cpufreq_policy *policy,
++ unsigned int target_freq, unsigned int relation)
++{
++ struct sfi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
++ struct sfi_processor_performance *perf;
++ struct cpufreq_freqs freqs;
++ struct drv_cmd cmd;
++ unsigned int next_state = 0; /* Index into freq_table */
++ unsigned int next_perf_state = 0; /* Index into perf table */
++ unsigned int i;
++ int result = 0;
++
++ dprintk("sfi_cpufreq_target %d (%d)\n", target_freq, policy->cpu);
++
++ if (unlikely(data == NULL ||
++ data->sfi_data == NULL || data->freq_table == NULL)) {
++ return -ENODEV;
++ }
++
++ perf = data->sfi_data;
++ result = cpufreq_frequency_table_target(policy,
++ data->freq_table,
++ target_freq,
++ relation, &next_state);
++ if (unlikely(result))
++ return -ENODEV;
++
++ next_perf_state = data->freq_table[next_state].index;
++ if (perf->state == next_perf_state) {
++ if (unlikely(data->resume)) {
++ dprintk("Called after resume, resetting to P%d\n",
++ next_perf_state);
++ data->resume = 0;
++ } else {
++ dprintk("Already at target state (P%d)\n",
++ next_perf_state);
++ return 0;
++ }
++ }
++
++ trace_power_frequency(POWER_PSTATE,
++ data->freq_table[next_state].frequency);
++
++ cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
++ cmd.msr.reg = MSR_IA32_PERF_CTL;
++ cmd.val = (u32) perf->states[next_perf_state].control;
++
++ if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY)
++ cmd.mask = policy->cpus;
++ else
++ cmd.mask = cpumask_of(policy->cpu);
++
++ freqs.old = perf->states[perf->state].core_frequency * 1000;
++ freqs.new = data->freq_table[next_state].frequency;
++ for_each_cpu(i, cmd.mask) {
++ freqs.cpu = i;
++ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
++ }
++
++ drv_write(&cmd);
++
++ if (sfi_pstate_strict) {
++ if (!check_freqs(cmd.mask, freqs.new, data)) {
++ dprintk("sfi_cpufreq_target failed (%d)\n",
++ policy->cpu);
++ return -EAGAIN;
++ }
++ }
++
++ for_each_cpu(i, cmd.mask) {
++ freqs.cpu = i;
++ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
++ }
++ perf->state = next_perf_state;
++
++ return result;
++}
++
++static int sfi_cpufreq_verify(struct cpufreq_policy *policy)
++{
++ struct sfi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
++
++ dprintk("sfi_cpufreq_verify\n");
++
++ return cpufreq_frequency_table_verify(policy, data->freq_table);
++}
++
++static void free_sfi_perf_data(void)
++{
++ unsigned int i;
++
++ /* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */
++ for_each_possible_cpu(i)
++ free_cpumask_var(per_cpu_ptr(sfi_perf_data, i)
++ ->shared_cpu_map);
++ free_percpu(sfi_perf_data);
++}
++
++/*
++ * sfi_cpufreq_early_init - initialize SFI P-States library
++ *
++ * Initialize the SFI P-States library (drivers/sfi/processor_perflib.c)
++ * in order to determine correct frequency and voltage pairings. We can
++ * do _PDC and _PSD and find out the processor dependency for the
++ * actual init that will happen later...
++ */
++static int __init sfi_cpufreq_early_init(void)
++{
++ int i, j;
++ struct sfi_processor *pr;
++
++ sfi_perf_data = alloc_percpu(struct sfi_processor_performance);
++ if (!sfi_perf_data) {
++ dprintk("Memory allocation error for sfi_perf_data.\n");
++ return -ENOMEM;
++ }
++
++ for_each_possible_cpu(i) {
++ if (!zalloc_cpumask_var_node(
++ &per_cpu_ptr(sfi_perf_data, i)->shared_cpu_map,
++ GFP_KERNEL, cpu_to_node(i))) {
++
++ /* Freeing a NULL pointer is OK: alloc_percpu zeroes. */
++ free_sfi_perf_data();
++ return -ENOMEM;
++ }
++ }
++
++ /* _PSD & _PDC is not supported in SFI.Its just a placeholder.
++ * sfi_processor_preregister_performance(sfi_perf_data);
++ * TBD: We need to study what we need to do here
++ */
++ for_each_possible_cpu(i) {
++ pr = per_cpu(sfi_processors, i);
++ if (!pr /*|| !pr->performance*/)
++ continue;
++ for_each_possible_cpu(j) {
++ cpumask_set_cpu(j,
++ per_cpu_ptr(sfi_perf_data, i)->shared_cpu_map);
++ }
++ per_cpu_ptr(sfi_perf_data, i)->shared_type =
++ CPUFREQ_SHARED_TYPE_ALL;
++ }
++
++ return 0;
++}
++
++
++static int sfi_cpufreq_cpu_init(struct cpufreq_policy *policy)
++{
++ unsigned int i;
++ unsigned int valid_states = 0;
++ unsigned int cpu = policy->cpu;
++ struct sfi_cpufreq_data *data;
++ unsigned int result = 0;
++ struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
++ struct sfi_processor_performance *perf;
++
++ dprintk("sfi_cpufreq_cpu_init CPU:%d\n", policy->cpu);
++
++ data = kzalloc(sizeof(struct sfi_cpufreq_data), GFP_KERNEL);
++ if (!data)
++ return -ENOMEM;
++
++ data->sfi_data = per_cpu_ptr(sfi_perf_data, cpu);
++ per_cpu(drv_data, cpu) = data;
++
++ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
++ sfi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
++
++
++ result = sfi_processor_register_performance(data->sfi_data, cpu);
++ if (result)
++ goto err_free;
++
++ perf = data->sfi_data;
++ policy->shared_type = perf->shared_type;
++
++ /*
++ * Will let policy->cpus know about dependency only when software
++ * coordination is required.
++ */
++ if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
++ policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
++ cpumask_copy(policy->cpus, perf->shared_cpu_map);
++ }
++ cpumask_copy(policy->related_cpus, perf->shared_cpu_map);
++
++ /* capability check */
++ if (perf->state_count <= 1) {
++ dprintk("No P-States\n");
++ result = -ENODEV;
++ goto err_unreg;
++ }
++
++ dprintk("HARDWARE addr space\n");
++ if (!check_est_cpu(cpu)) {
++ result = -ENODEV;
++ goto err_unreg;
++ }
++
++ data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
++ data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) *
++ (perf->state_count+1), GFP_KERNEL);
++ if (!data->freq_table) {
++ result = -ENOMEM;
++ goto err_unreg;
++ }
++
++ /* detect transition latency */
++ policy->cpuinfo.transition_latency = 0;
++ for (i = 0; i < perf->state_count; i++) {
++ if ((perf->states[i].transition_latency * 1000) >
++ policy->cpuinfo.transition_latency)
++ policy->cpuinfo.transition_latency =
++ perf->states[i].transition_latency * 1000;
++ }
++
++ data->max_freq = perf->states[0].core_frequency * 1000;
++ /* table init */
++ for (i = 0; i < perf->state_count; i++) {
++ if (i > 0 && perf->states[i].core_frequency >=
++ data->freq_table[valid_states-1].frequency / 1000)
++ continue;
++
++ data->freq_table[valid_states].index = i;
++ data->freq_table[valid_states].frequency =
++ perf->states[i].core_frequency * 1000;
++ valid_states++;
++ }
++ data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
++ perf->state = 0;
++
++ result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table);
++ if (result)
++ goto err_freqfree;
++
++ sfi_cpufreq_driver.get = get_cur_freq_on_cpu;
++ policy->cur = get_cur_freq_on_cpu(cpu);
++
++ /* Check for APERF/MPERF support in hardware */
++ if (c->x86_vendor == X86_VENDOR_INTEL && c->cpuid_level >= 6) {
++ unsigned int ecx;
++ ecx = cpuid_ecx(6);
++ if (ecx & CPUID_6_ECX_APERFMPERF_CAPABILITY)
++ sfi_cpufreq_driver.getavg = get_measured_perf;
++ }
++
++ dprintk("CPU%u - SFI performance management activated.\n", cpu);
++ for (i = 0; i < perf->state_count; i++)
++ dprintk(" %cP%d: %d MHz, %d uS\n",
++ (i == perf->state ? '*' : ' '), i,
++ (u32) perf->states[i].core_frequency,
++ (u32) perf->states[i].transition_latency);
++
++ cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
++
++ /*
++ * the first call to ->target() should result in us actually
++ * writing something to the appropriate registers.
++ */
++ data->resume = 1;
++
++ return result;
++
++err_freqfree:
++ kfree(data->freq_table);
++err_unreg:
++ sfi_processor_unregister_performance(perf, cpu);
++err_free:
++ kfree(data);
++ per_cpu(drv_data, cpu) = NULL;
++
++ return result;
++}
++
++static int sfi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
++{
++ struct sfi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
++
++ dprintk("sfi_cpufreq_cpu_exit\n");
++
++ if (data) {
++ cpufreq_frequency_table_put_attr(policy->cpu);
++ per_cpu(drv_data, policy->cpu) = NULL;
++ sfi_processor_unregister_performance(data->sfi_data,
++ policy->cpu);
++ kfree(data);
++ }
++
++ return 0;
++}
++
++static int sfi_cpufreq_resume(struct cpufreq_policy *policy)
++{
++ struct sfi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
++
++ dprintk("sfi_cpufreq_resume\n");
++
++ data->resume = 1;
++
++ return 0;
++}
++
++static struct freq_attr *sfi_cpufreq_attr[] = {
++ &cpufreq_freq_attr_scaling_available_freqs,
++ NULL,
++};
++
++static struct cpufreq_driver sfi_cpufreq_driver = {
++ .verify = sfi_cpufreq_verify,
++ .target = sfi_cpufreq_target,
++ .init = sfi_cpufreq_cpu_init,
++ .exit = sfi_cpufreq_cpu_exit,
++ .resume = sfi_cpufreq_resume,
++ .name = "sfi-cpufreq",
++ .owner = THIS_MODULE,
++ .attr = sfi_cpufreq_attr,
++};
++
++static int __init sfi_cpufreq_init(void)
++{
++ int ret;
++
++ dprintk("sfi_cpufreq_init\n");
++
++ ret = sfi_cpufreq_early_init();
++ if (ret)
++ return ret;
++
++ return cpufreq_register_driver(&sfi_cpufreq_driver);
++}
++
++static void __exit sfi_cpufreq_exit(void)
++{
++ dprintk("sfi_cpufreq_exit\n");
++
++ cpufreq_unregister_driver(&sfi_cpufreq_driver);
++
++ free_percpu(sfi_perf_data);
++
++ return;
++}
++
++module_param(sfi_pstate_strict, uint, 0644);
++MODULE_PARM_DESC(sfi_pstate_strict,
++ "value 0 or non-zero. non-zero -> strict sfi checks are "
++ "performed during frequency changes.");
++
++late_initcall(sfi_cpufreq_init);
++module_exit(sfi_cpufreq_exit);
++
++MODULE_ALIAS("sfi");
+--- a/arch/x86/kernel/early_printk.c
++++ b/arch/x86/kernel/early_printk.c
+@@ -14,6 +14,7 @@
+ #include <xen/hvc-console.h>
+ #include <asm/pci-direct.h>
+ #include <asm/fixmap.h>
++#include <asm/mrst.h>
+ #include <asm/pgtable.h>
+ #include <linux/usb/ehci_def.h>
+
+@@ -239,6 +240,13 @@
+ if (!strncmp(buf, "xen", 3))
+ early_console_register(&xenboot_console, keep);
+ #endif
++#ifdef CONFIG_X86_MRST_EARLY_PRINTK
++ if (!strncmp(buf, "mrst", 4))
++ early_console_register(&early_mrst_console, keep);
++
++ if (!strncmp(buf, "hsu", 3))
++ early_console_register(&early_hsu_console, keep);
++#endif
+ buf++;
+ }
+ return 0;
+--- a/arch/x86/kernel/mrst.c
++++ b/arch/x86/kernel/mrst.c
+@@ -11,9 +11,29 @@
+ */
+ #include <linux/init.h>
+ #include <linux/kernel.h>
+-#include <linux/sfi.h>
+-#include <linux/irq.h>
++#include <linux/interrupt.h>
+ #include <linux/module.h>
++#include <linux/irq.h>
++#include <linux/gpio.h>
++#include <linux/gpio_keys.h>
++#include <linux/input.h>
++#include <linux/platform_device.h>
++#include <linux/sfi.h>
++#include <linux/spi/spi.h>
++#include <linux/spi/intel_pmic_gpio.h>
++#include <linux/spi/opt_modem.h>
++#include <linux/spi/emp_modem.h>
++#include <linux/spi/ifx_gps.h>
++#include <linux/spi/ifx_modem.h>
++#include <linux/spi/dw_spi.h>
++#include <linux/i2c/cp_tm1217.h>
++#include <linux/cy8ctmg110_pdata.h>
++#include <linux/i2c.h>
++#include <linux/i2c/pca953x.h>
++#include <linux/lis3lv02d.h>
++#include <linux/i2c/tc35894xbg.h>
++#include <linux/bh1770glc.h>
++#include <linux/leds-lp5523.h>
+
+ #include <asm/setup.h>
+ #include <asm/mpspec_def.h>
+@@ -23,16 +43,70 @@
+ #include <asm/mrst.h>
+ #include <asm/io.h>
+ #include <asm/i8259.h>
++#include <asm/intel_scu_ipc.h>
++#include <asm/reboot.h>
++#include <asm/intel_scu_ipc.h>
+ #include <asm/apb_timer.h>
++#include <asm/vrtc.h>
++
++/*
++ * the clockevent devices on Moorestown/Medfield can be APBT or LAPIC clock,
++ * cmdline option x86_mrst_timer can be used to override the configuration
++ * to prefer one or the other.
++ * at runtime, there are basically three timer configurations:
++ * 1. per cpu apbt clock only
++ * 2. per cpu always-on lapic clocks only, this is Penwell/Medfield only
++ * 3. per cpu lapic clock (C3STOP) and one apbt clock, with broadcast.
++ *
++ * by default (without cmdline option), platform code first detects cpu type
++ * to see if we are on lincroft or penwell, then set up both lapic or apbt
++ * clocks accordingly.
++ * i.e. by default, medfield uses configuration #2, moorestown uses #1.
++ * config #3 is supported but not recommended on medfield.
++ *
++ * rating and feature summary:
++ * lapic (with C3STOP) --------- 100
++ * apbt (always-on) ------------ 110
++ * lapic (always-on,ARAT) ------ 150
++ */
++
++__cpuinitdata enum mrst_timer_options mrst_timer_options;
+
+ static u32 sfi_mtimer_usage[SFI_MTMR_MAX_NUM];
+ static struct sfi_timer_table_entry sfi_mtimer_array[SFI_MTMR_MAX_NUM];
++enum mrst_cpu_type __mrst_cpu_chip;
++EXPORT_SYMBOL_GPL(__mrst_cpu_chip);
++
+ int sfi_mtimer_num;
++EXPORT_SYMBOL_GPL(sfi_mtimer_num);
+
+ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
+ EXPORT_SYMBOL_GPL(sfi_mrtc_array);
+ int sfi_mrtc_num;
+
++static void sfi_handle_ipc_dev(struct platform_device *pdev);
++
++static int platformid = MRST_PLATFORM_SHCDK;
++
++int mrst_platform_id(void)
++{
++ /*TODO:read fid from SFI to override*/
++ return platformid;
++}
++EXPORT_SYMBOL_GPL(mrst_platform_id);
++
++static int __init parse_platformid(char *arg)
++{
++ if (!arg)
++ return -EINVAL;
++ if (!strcasecmp(arg, "aava"))
++ platformid = MRST_PLATFORM_AAVA_SC;
++ else if (!strcasecmp(arg, "cdk"))
++ platformid = MRST_PLATFORM_SHCDK;
++ return 0;
++}
++early_param("platformid", parse_platformid);
++
+ static inline void assign_to_mp_irq(struct mpc_intsrc *m,
+ struct mpc_intsrc *mp_irq)
+ {
+@@ -59,12 +133,17 @@
+ panic("Max # of irq sources exceeded!!\n");
+ }
+
++/* Temp use for registering watchdog_timer device as IPC dev, this
++ * declaration should be removed when watchdog is put into FW table */
++static void install_irq_resource(struct platform_device *pdev, int irq);
++
+ /* parse all the mtimer info to a static mtimer array */
+ static int __init sfi_parse_mtmr(struct sfi_table_header *table)
+ {
+ struct sfi_table_simple *sb;
+ struct sfi_timer_table_entry *pentry;
+ struct mpc_intsrc mp_irq;
++ struct platform_device *pdev;
+ int totallen;
+
+ sb = (struct sfi_table_simple *)table;
+@@ -95,6 +174,20 @@
+ save_mp_irq(&mp_irq);
+ }
+
++ /*
++ * The last timer is used as watchdog timer, so register it with platform
++ * device IPC bus type.
++ *
++ * FIXME: IA FW will move watchdog timer to platform device table, we can
++ * remove this code after switching to the new IA FW.
++ */
++ pdev = platform_device_alloc("watchdog_timer", pentry->irq);
++ if (pdev == NULL) {
++ pr_err("out of memory for registering watchdog_timer\n");
++ return -1;
++ }
++ install_irq_resource(pdev, pentry->irq);
++ sfi_handle_ipc_dev(pdev);
+ return 0;
+ }
+
+@@ -119,6 +212,7 @@
+ }
+ return NULL;
+ }
++EXPORT_SYMBOL_GPL(sfi_get_mtmr);
+
+ void sfi_free_mtmr(struct sfi_timer_table_entry *mtmr)
+ {
+@@ -167,25 +261,49 @@
+ return 0;
+ }
+
+-/*
+- * the secondary clock in Moorestown can be APBT or LAPIC clock, default to
+- * APBT but cmdline option can also override it.
+- */
+-static void __cpuinit mrst_setup_secondary_clock(void)
+-{
+- /* restore default lapic clock if disabled by cmdline */
+- if (disable_apbt_percpu)
+- return setup_secondary_APIC_clock();
+- apbt_setup_secondary_clock();
+-}
+-
+ static unsigned long __init mrst_calibrate_tsc(void)
+ {
+ unsigned long flags, fast_calibrate;
+
+- local_irq_save(flags);
+- fast_calibrate = apbt_quick_calibrate();
+- local_irq_restore(flags);
++ if (__mrst_cpu_chip == MRST_CPU_CHIP_PENWELL) {
++ u32 lo, hi, ratio, fsb;
++
++ rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
++ pr_debug("IA32 perf status is 0x%x, 0x%0x\n", lo, hi);
++ ratio = (hi >> 8) & 0x1f;
++ pr_debug("ratio is %d\n", ratio);
++ if (!ratio) {
++ pr_err("read a zero ratio, should be incorrect!\n");
++ pr_err("force tsc ratio to 16 ...\n");
++ ratio = 16;
++ }
++ rdmsr(MSR_FSB_FREQ, lo, hi);
++ if ((lo & 0x7) == 0x7)
++ fsb = PENWELL_FSB_FREQ_83SKU;
++ else
++ fsb = PENWELL_FSB_FREQ_100SKU;
++ fast_calibrate = ratio * fsb;
++ pr_debug("read penwell tsc %lu khz\n", fast_calibrate);
++ lapic_timer_frequency = fsb * 1000 / HZ;
++ /* mark tsc clocksource as reliable */
++ set_cpu_cap(&boot_cpu_data, X86_FEATURE_TSC_RELIABLE);
++ } else {
++ /*
++ * TODO: calibrate lapic timer with apbt, if we use apbt only,
++ * there is no need to calibrate lapic timer, since they are
++ * not used.
++ * if we use lapic timers and apbt, the default calibration
++ * should work, since we have the global clockevent setup.
++ * but it would be more efficient if we combine the lapic timer
++ * with tsc calibration.
++ */
++ local_irq_save(flags);
++ fast_calibrate = apbt_quick_calibrate();
++ local_irq_restore(flags);
++ }
++
++ pr_info("tsc lapic calibration results %lu %d\n",
++ fast_calibrate, lapic_timer_frequency);
+
+ if (fast_calibrate)
+ return fast_calibrate;
+@@ -196,30 +314,81 @@
+ void __init mrst_time_init(void)
+ {
+ sfi_table_parse(SFI_SIG_MTMR, NULL, NULL, sfi_parse_mtmr);
++ switch (mrst_timer_options) {
++ case MRST_TIMER_APBT_ONLY:
++ break;
++ case MRST_TIMER_LAPIC_APBT:
++ x86_init.timers.setup_percpu_clockev = setup_boot_APIC_clock;
++ x86_cpuinit.setup_percpu_clockev = setup_secondary_APIC_clock;
++ break;
++ default:
++ if (!boot_cpu_has(X86_FEATURE_ARAT))
++ break;
++ x86_init.timers.setup_percpu_clockev = setup_boot_APIC_clock;
++ x86_cpuinit.setup_percpu_clockev = setup_secondary_APIC_clock;
++ return;
++ }
++ /* we need at least one APB timer */
+ pre_init_apic_IRQ0();
+ apbt_time_init();
+ }
+
+ void __init mrst_rtc_init(void)
+ {
++ unsigned long rtc_paddr;
++ void __iomem *virt_base;
++
+ sfi_table_parse(SFI_SIG_MRTC, NULL, NULL, sfi_parse_mrtc);
++ if (!sfi_mrtc_num)
++ return;
++
++ rtc_paddr = sfi_mrtc_array[0].phys_addr;
++
++ /* vRTC's register address may not be page aligned */
++ set_fixmap_nocache(FIX_LNW_VRTC, rtc_paddr);
++
++ virt_base = (void __iomem *)__fix_to_virt(FIX_LNW_VRTC);
++ virt_base += rtc_paddr & ~PAGE_MASK;
++ vrtc_set_base(virt_base);
++
++ x86_platform.get_wallclock = vrtc_get_time;
++ x86_platform.set_wallclock = vrtc_set_mmss;
++}
++
++/* MID systems don't have i8042 controller */
++static int mrst_i8042_detect(void)
++{
++ return 0;
++}
++
++static void mrst_power_off(void)
++{
++ intel_scu_ipc_simple_command(0xf1, 1);
++}
++
++static void mrst_reboot(void)
++{
++ intel_scu_ipc_simple_command(0xf1, 0);
+ }
+
+ /*
+ * if we use per cpu apb timer, the bootclock already setup. if we use lapic
+ * timer and one apbt timer for broadcast, we need to set up lapic boot clock.
+ */
+-static void __init mrst_setup_boot_clock(void)
+-{
+- pr_info("%s: per cpu apbt flag %d \n", __func__, disable_apbt_percpu);
+- if (disable_apbt_percpu)
+- setup_boot_APIC_clock();
+-};
+-
+-/* MID systems don't have i8042 controller */
+-static int mrst_i8042_detect(void)
++void __cpuinit mrst_arch_setup(void)
+ {
+- return 0;
++ if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 0x27)
++ __mrst_cpu_chip = MRST_CPU_CHIP_PENWELL;
++ else if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 0x26)
++ __mrst_cpu_chip = MRST_CPU_CHIP_LINCROFT;
++ else {
++ pr_err("Unknown Moorestown CPU (%d:%d), default to Lincroft\n",
++ boot_cpu_data.x86, boot_cpu_data.x86_model);
++ __mrst_cpu_chip = MRST_CPU_CHIP_LINCROFT;
++ }
++ pr_debug("Moorestown CPU %s identified\n",
++ (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT) ?
++ "Lincroft" : "Penwell");
+ }
+
+ /*
+@@ -232,21 +401,1201 @@
+ x86_init.resources.reserve_resources = x86_init_noop;
+
+ x86_init.timers.timer_init = mrst_time_init;
+- x86_init.timers.setup_percpu_clockev = mrst_setup_boot_clock;
++ x86_init.timers.setup_percpu_clockev = x86_init_noop;
+
+ x86_init.irqs.pre_vector_init = x86_init_noop;
+
+- x86_cpuinit.setup_percpu_clockev = mrst_setup_secondary_clock;
++ x86_init.oem.arch_setup = mrst_arch_setup;
++
++ x86_cpuinit.setup_percpu_clockev = apbt_setup_secondary_clock;
+
+ x86_platform.calibrate_tsc = mrst_calibrate_tsc;
+ x86_platform.i8042_detect = mrst_i8042_detect;
++ x86_platform.wallclock_init = mrst_rtc_init;
++
+ x86_init.pci.init = pci_mrst_init;
+ x86_init.pci.fixup_irqs = x86_init_noop;
+
+ legacy_pic = &null_legacy_pic;
+
++ /* Moorestown specific power_off/restart method */
++ pm_power_off = mrst_power_off;
++ machine_ops.emergency_restart = mrst_reboot;
++
+ /* Avoid searching for BIOS MP tables */
+ x86_init.mpparse.find_smp_config = x86_init_noop;
+ x86_init.mpparse.get_smp_config = x86_init_uint_noop;
++ /*
++ * Give an estimated CPU frequency which will be used by early local
++ * APIC setup code prior to calibration. The true value will be set
++ * in tsc calibration later.
++ */
++ cpu_khz = 1500000;
++ set_bit(0, mp_bus_not_pci);
++}
++
++/*
++ * if user does not want to use per CPU apb timer, just give it a lower rating
++ * than local apic timer and skip the late per cpu timer init.
++ */
++static inline int __init setup_x86_mrst_timer(char *arg)
++{
++ if (!arg)
++ return -EINVAL;
++
++ if (strcmp("apbt_only", arg) == 0)
++ mrst_timer_options = MRST_TIMER_APBT_ONLY;
++ else if (strcmp("lapic_and_apbt", arg) == 0)
++ mrst_timer_options = MRST_TIMER_LAPIC_APBT;
++ else {
++ pr_warning("X86 MRST timer option %s not recognised"
++ " use x86_mrst_timer=apbt_only or lapic_and_apbt\n",
++ arg);
++ return -EINVAL;
++ }
++ return 0;
++}
++__setup("x86_mrst_timer=", setup_x86_mrst_timer);
++
++/*
++ * Parsing GPIO table first, since the DEVS table will need this table
++ * to map the pin name to the actual pin.
++ */
++static struct sfi_gpio_table_entry *gpio_table;
++static int gpio_num_entry;
++
++static int __init sfi_parse_gpio(struct sfi_table_header *table)
++{
++ struct sfi_table_simple *sb;
++ struct sfi_gpio_table_entry *pentry;
++ int num, i;
++
++ if (gpio_table)
++ return 0;
++ sb = (struct sfi_table_simple *)table;
++ num = SFI_GET_NUM_ENTRIES(sb, struct sfi_gpio_table_entry);
++ pentry = (struct sfi_gpio_table_entry *)sb->pentry;
++
++ gpio_table = (struct sfi_gpio_table_entry *)
++ kmalloc(num * sizeof(*pentry), GFP_KERNEL);
++ if (!gpio_table)
++ return -1;
++ memcpy(gpio_table, pentry, num * sizeof(*pentry));
++ gpio_num_entry = num;
++
++ pr_info("Moorestown GPIO pin info:\n");
++ for (i = 0; i < num; i++, pentry++)
++ pr_info("info[%2d]: controller = %16.16s, pin_name = %16.16s,"
++ " pin = %d\n", i,
++ pentry->controller_name,
++ pentry->pin_name,
++ pentry->pin_no);
++ return 0;
++}
++
++static int get_gpio_by_name(const char *name)
++{
++ struct sfi_gpio_table_entry *pentry = gpio_table;
++ int i;
++
++ if (!pentry)
++ return -1;
++ for (i = 0; i < gpio_num_entry; i++, pentry++) {
++ if (!strncmp(name, pentry->pin_name, 16))
++ return pentry->pin_no;
++ }
++ return -1;
++}
++
++/*
++ * Here defines the array of devices platform data that IAFW would export
++ * through SFI "DEVS" table, we use name and type to match the device and
++ * its platform data.
++ */
++struct devs_id {
++ char name[17];
++ u8 type;
++ u8 delay;
++ void *(*get_platform_data)(void *info);
++};
++
++/* the offset for the mapping of global gpio pin to irq */
++#define MRST_IRQ_OFFSET 0x100
++
++static void *pmic_gpio_platform_data(void *info)
++{
++ static struct intel_pmic_gpio_platform_data pmic_gpio_pdata;
++ int gpio_base = get_gpio_by_name("pmic_gpio_base");
++
++ if (gpio_base == -1)
++ return NULL;
++ pmic_gpio_pdata.gpio_base = gpio_base;
++ pmic_gpio_pdata.irq_base = gpio_base + MRST_IRQ_OFFSET;
++ pmic_gpio_pdata.gpiointr = 0xffffeff8;
++
++ return &pmic_gpio_pdata;
++}
++
++static void *opt_modem_platform_data(void *info)
++{
++ struct spi_board_info *spi_info = (struct spi_board_info *)info;
++ static struct opt_modem_platform_data opt_modem_pdata;
++ int intr = get_gpio_by_name("opt_modem_int");
++ int wake = get_gpio_by_name("opt_modem_wak");
++ int dis = get_gpio_by_name("opt_modem_dis");
++ int rst = get_gpio_by_name("opt_modem_rst");
++
++ if (intr == -1 || wake == -1 || dis == -1 || rst == -1)
++ return NULL;
++ opt_modem_pdata.intr = intr;
++ spi_info->irq = intr + MRST_IRQ_OFFSET;
++ opt_modem_pdata.wake = wake;
++ opt_modem_pdata.dis = dis;
++ opt_modem_pdata.rst = rst;
++
++ return &opt_modem_pdata;
++}
++
++static void *emp_modem_platform_data(void *info)
++{
++ struct spi_board_info *spi_info = (struct spi_board_info *)info;
++ static struct emp_modem_platform_data emp_modem_pdata;
++ int intr = get_gpio_by_name("emp_modem_int");
++ int cwr = get_gpio_by_name("emp_modem_cwr");
++ int awr = get_gpio_by_name("emp_modem_awr");
++ int serven = get_gpio_by_name("emp_modem_serven");
++ int resout = get_gpio_by_name("emp_modem_resout");
++ int rst = get_gpio_by_name("emp_modem_rst");
++
++ if (intr == -1 || cwr == -1 || awr == -1 || serven == -1
++ || resout == -1 || rst == -1)
++ return NULL;
++ emp_modem_pdata.intr = intr;
++ spi_info->irq = intr + MRST_IRQ_OFFSET;
++ emp_modem_pdata.cwr = cwr;
++ emp_modem_pdata.awr = awr;
++ emp_modem_pdata.serven = serven;
++ emp_modem_pdata.resout = resout;
++ emp_modem_pdata.rst = rst;
++
++ return &emp_modem_pdata;
++}
++
++static void *ifx_mdm_platform_data(void *info)
++{
++ static struct ifx_modem_platform_data ifx_mdm_pdata;
++ int rst_out = get_gpio_by_name("ifx_mdm_rst_out");
++ int pwr_on = get_gpio_by_name("ifx_mdm_pwr_on");
++ int rst_pmu = get_gpio_by_name("ifx_mdm_rst_pmu");
++ int tx_pwr = get_gpio_by_name("ifx_mdm_tx_pwr");
++ int srdy = get_gpio_by_name("ifx_mdm_srdy");
++ int mrdy = get_gpio_by_name("ifx_mdm_mrdy");
++
++ if (rst_out == -1 || pwr_on == -1 || rst_pmu == -1 || tx_pwr == -1
++ || srdy == -1 || mrdy == -1)
++ return NULL;
++ ifx_mdm_pdata.rst_out = rst_out;
++ ifx_mdm_pdata.pwr_on = pwr_on;
++ ifx_mdm_pdata.rst_pmu = rst_pmu;
++ ifx_mdm_pdata.tx_pwr = tx_pwr;
++ ifx_mdm_pdata.srdy = srdy;
++ ifx_mdm_pdata.mrdy = mrdy;
++ return &ifx_mdm_pdata;
++}
++
++static void *ifx_gps_platform_data(void *info)
++{
++ static struct ifx_gps_platform_data ifx_gps_pdata;
++ int pd = get_gpio_by_name("ifx_gps_pd");
++ int rst = get_gpio_by_name("ifx_gps_rst");
++
++ if (pd == -1 || rst == -1)
++ return NULL;
++ ifx_gps_pdata.pd = pd;
++ ifx_gps_pdata.rst = rst;
++
++ return &ifx_gps_pdata;
++}
++
++static void *cp_tm1217_platform_data(void *info)
++{
++#define CPTM1217_GPIO_NO 0x3E
++ static struct cp_tm1217_platform_data cp_tm1217_pdata;
++ cp_tm1217_pdata.gpio = CPTM1217_GPIO_NO;
++ return &cp_tm1217_pdata;
++}
++
++static struct dw_spi_chip max3111_uart = {
++ .poll_mode = 1,
++ .enable_dma = 0,
++ .type = SPI_FRF_SPI,
++};
++
++static void *max3111_platform_data(void *info)
++{
++ static int dummy;
++ struct spi_board_info *spi_info = (struct spi_board_info *)info;
++ int intr = get_gpio_by_name("max3111_int");
++
++ if (intr == -1)
++ return NULL;
++ spi_info->irq = intr + MRST_IRQ_OFFSET;
++ spi_info->mode = SPI_MODE_0;
++ spi_info->controller_data = &max3111_uart;
++
++ /* we return a dummy pdata */
++ return &dummy;
++}
++
++/* we have multiple max7315 on the board ... */
++#define MAX7315_NUM 2
++static void *max7315_platform_data(void *info)
++{
++ static struct pca953x_platform_data max7315_pdata[MAX7315_NUM];
++ static int nr;
++ struct pca953x_platform_data *max7315 = &max7315_pdata[nr];
++ struct i2c_board_info *i2c_info = (struct i2c_board_info *)info;
++ int gpio_base;
++ int intr;
++ char base_pin_name[17];
++ char intr_pin_name[17];
++
++ if (nr == MAX7315_NUM) {
++ printk(KERN_ERR "too many max7315s, we only support %d\n",
++ MAX7315_NUM);
++ return NULL;
++ }
++ /* we have several max7315 on the board, we only need load several
++ * instances of the same pca953x driver to cover them
++ */
++ strcpy(i2c_info->type, "max7315");
++ if (nr++) {
++ sprintf(base_pin_name, "max7315_%d_base", nr);
++ sprintf(intr_pin_name, "max7315_%d_int", nr);
++ } else {
++ strcpy(base_pin_name, "max7315_base");
++ strcpy(intr_pin_name, "max7315_int");
++ }
++
++ gpio_base = get_gpio_by_name(base_pin_name);
++ intr = get_gpio_by_name(intr_pin_name);
++
++ if (gpio_base == -1)
++ return NULL;
++ max7315->gpio_base = gpio_base;
++ if (intr != -1) {
++ i2c_info->irq = intr + MRST_IRQ_OFFSET;
++ max7315->irq_base = gpio_base + MRST_IRQ_OFFSET;
++ } else {
++ i2c_info->irq = -1;
++ max7315->irq_base = -1;
++ }
++ return max7315;
++}
++
++void *emc1403_platform_data(void *info)
++{
++ static short intr2nd_pdata;
++ struct i2c_board_info *i2c_info = (struct i2c_board_info *)info;
++ int intr = get_gpio_by_name("thermal_int");
++ int intr2nd = get_gpio_by_name("thermal_alert");
++
++ if (intr == -1 || intr2nd == -1)
++ return NULL;
++
++ i2c_info->irq = intr + MRST_IRQ_OFFSET;
++ intr2nd_pdata = intr2nd + MRST_IRQ_OFFSET;
++
++ return &intr2nd_pdata;
++}
++
++static void *lis331dl_platform_data(void *info)
++{
++ static short intr2nd_pdata;
++ struct i2c_board_info *i2c_info = (struct i2c_board_info *)info;
++ int intr = get_gpio_by_name("accel_int");
++ int intr2nd = get_gpio_by_name("accel_2");
++
++ if (intr == -1 || intr2nd == -1)
++ return NULL;
++
++ i2c_info->irq = intr + MRST_IRQ_OFFSET;
++ intr2nd_pdata = intr2nd + MRST_IRQ_OFFSET;
++
++ return &intr2nd_pdata;
++}
++
++#define BH1770GLC_GPIO_INT "bh1770glc_int"
++
++void *bh1770glc_platform_data_init(void *info)
++{
++ int intr;
++
++ static struct bh1770glc_platform_data prox_pdata = {
++ .leds = BH1770GLC_LED1,
++ .led_max_curr = BH1770GLC_LED_50mA,
++ .led_def_curr = {BH1770GLC_LED_50mA},
++ .setup_resources = NULL,
++ .release_resources = NULL,
++ };
++ struct i2c_board_info *i2c_info = (struct i2c_board_info *)info;
++
++ printk(KERN_DEBUG "Intialize BH1770GLC platform data\n");
++ intr = get_gpio_by_name(BH1770GLC_GPIO_INT);
++ if (intr < 0) {
++ printk(KERN_ERR "ERROR: No IRQ pin for BH1770GLC given in SFI\n");
++ return NULL;
++ }
++ i2c_info->irq = intr + MRST_IRQ_OFFSET;
++ return &prox_pdata;
++}
++
++#define CY8CTMG110_RESET_PIN_GPIO 43
++#define CY8CTMG110_IRQ_PIN_GPIO 59
++#define CY8CTMG110_TOUCH_IRQ 21
++
++void *cy8ctmg110_platform_data(void *info)
++{
++ static struct cy8ctmg110_pdata pdata;
++ struct i2c_board_info *i2c_info = info;
++ i2c_info->irq = CY8CTMG110_TOUCH_IRQ;
++ pdata.reset_pin = CY8CTMG110_RESET_PIN_GPIO;
++ pdata.irq_pin = CY8CTMG110_IRQ_PIN_GPIO;
++ return &pdata;
++}
++
++static void *wdt_platform_data(void *info)
++{
++ if (!sfi_mtimer_num) {
++ pr_err("No mtimer found as watchdog\n");
++ return NULL;
++ }
++ return &sfi_mtimer_array[sfi_mtimer_num - 1];
++}
++
++static void *no_platform_data(void *info)
++{
++ return NULL;
++}
++
++#define LP5523_GPIO_ENABLE "lp5523_enable"
++#define LP5523_NUM_CHANNELS (9)
++
++#define LP5523_CURRENT_CONTROL_5MA (0x32) /* 5mA - each value = 100uA */
++#define LP5523_CURRENT_CONTROL_NOT_IMPL (0x00)
++
++static struct lp5523_led_config led_config[LP5523_NUM_CHANNELS] = {
++ {1, LP5523_CURRENT_CONTROL_5MA},
++ {2, LP5523_CURRENT_CONTROL_5MA},
++ {3, LP5523_CURRENT_CONTROL_5MA},
++ {4, LP5523_CURRENT_CONTROL_5MA},
++ {5, LP5523_CURRENT_CONTROL_5MA},
++ {6, LP5523_CURRENT_CONTROL_5MA},
++ {7, LP5523_CURRENT_CONTROL_NOT_IMPL},
++ {8, LP5523_CURRENT_CONTROL_NOT_IMPL},
++ {9, LP5523_CURRENT_CONTROL_5MA}
++};
++static int lp5523_enable_gpio;
++
++static int lp5523_backlight_setup_resources(void)
++{
++ int err = 0;
++
++ /* Request GPIO for LP5523 ENABLE */
++ err = gpio_request(lp5523_enable_gpio, LP5523_GPIO_ENABLE);
++ if (err < 0) {
++ pr_info("Error: Failed to request LP5523 Enable Pin\n");
++ return err;
++ }
++
++ err = gpio_direction_output(lp5523_enable_gpio, 0);
++ if (err)
++ pr_info("Error: Failed to change direction of LP5523 pin\n");
++ return err;
++}
++
++static void lb5523_backlight_release_resources(void)
++{
++
++ gpio_free(lp5523_enable_gpio);
++ return;
++}
++
++static void lb5523_backlight_enable(bool state)
++{
++ if (state)
++ gpio_set_value(lp5523_enable_gpio, 1);
++ else
++ gpio_set_value(lp5523_enable_gpio, 0);
++ return;
++
++}
++
++void *lp5523_platform_data_init(void *info)
++{
++ static struct lp5523_platform_data pdata;
++
++ lp5523_enable_gpio = get_gpio_by_name(LP5523_GPIO_ENABLE);
++
++ pdata.led_config = led_config;
++ pdata.num_channels = LP5523_NUM_CHANNELS;
++ pdata.clock_mode = LP5523_CLOCK_AUTO;
++ pdata.enable = lb5523_backlight_enable;
++ pdata.release_resources = lb5523_backlight_release_resources;
++ pdata.setup_resources = lp5523_backlight_setup_resources;
++ return &pdata;
++}
++
++static void tc35894xbg_reset_ctrl(struct i2c_client *client, int value)
++{
++ struct tc35894xbg_platform_data *pdata = client->dev.platform_data;
++ if (pdata == NULL) {
++ printk(KERN_ERR "Missing platform data for TC35894XBG\n");
++ return;
++ }
+
++ printk(KERN_INFO "tc35894xbg_reset_ctrl: Set GPIO %d to %d\n",
++ pdata->gpio_reset, value);
++ if (pdata->gpio_reset != -1)
++ gpio_set_value(pdata->gpio_reset, value);
+ }
++
++static const unsigned short tc35894xbg_icdk_keymap[] = {
++ /* Row 0 */
++ KEY_1, KEY_2, KEY_3, KEY_4,
++ KEY_5, KEY_6, KEY_7, KEY_8,
++ /* Row 1 */
++ KEY_Q, KEY_W, KEY_E, KEY_R,
++ KEY_T, KEY_Y, KEY_U, KEY_I,
++ /* Row 2 */
++ KEY_A, KEY_S, KEY_D, KEY_F,
++ KEY_G, KEY_H, KEY_J, KEY_K,
++ /* Row 3 */
++ KEY_LEFTSHIFT, KEY_Z, KEY_X, KEY_C,
++ KEY_V, KEY_B, KEY_N, KEY_M,
++ /* Row 4 */
++ KEY_9, KEY_FN, KEY_LEFTALT, KEY_SPACE,
++ KEY_DELETE, KEY_LEFT, KEY_DOWN, KEY_RIGHT,
++ /* Row 5 */
++ KEY_LEFTCTRL, KEY_O, KEY_L, KEY_UP,
++ KEY_0, KEY_P, KEY_BACKSPACE, KEY_ENTER,
++ KEY_RIGHTSHIFT,
++};
++
++static const unsigned short tc35894xbg_icdk_fnkeymap[] = {
++ /* Row 0 */
++ KEY_GRAVE, KEY_AT, KEY_NUMBER_SIGN, KEY_DOLLAR_SIGN,
++ KEY_PERCENT, KEY_NOR, KEY_AMPERSAND, KEY_KPASTERISK,
++ /* Row 1 */
++ KEY_BAR, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
++ KEY_RESERVED, KEY_BACKSLASH, KEY_SLASH, KEY_PLUS,
++ /* Row 2 */
++ KEY_TAB, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
++ KEY_APOSTROPHE, KEY_SEMICOLON, KEY_COLON, KEY_COMMA,
++ /* Row 3 */
++ KEY_CAPSLOCK, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
++ KEY_DOT, KEY_UNDERSCORE, KEY_EXCLAM, KEY_QUESTION,
++ /* Row 4 */
++ KEY_LEFTBRACE, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
++ KEY_RESERVED, KEY_RESERVED, KEY_PAGEDOWN, KEY_RESERVED,
++ /* Row 5 */
++ KEY_RESERVED, KEY_MINUS, KEY_QUOTE_DBL, KEY_PAGEUP,
++ KEY_RIGHTBRACE, KEY_EQUAL, KEY_RESERVED, KEY_RESERVED,
++ KEY_RIGHTSHIFT,
++ };
++
++/*
++ * Since there is no SYM key, we use a value that is not part
++ * of the key input file.
++ */
++#define KEY_SYM (0xFE)
++static const unsigned short tc35894xbg_ncdk_keymap[] = {
++ /* Row 0 */
++ KEY_RIGHTSHIFT, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
++ KEY_RESERVED, KEY_RESERVED, KEY_SPACE, KEY_RESERVED,
++ /* Row 1 */
++ KEY_LEFTSHIFT, KEY_VOLUMEUP, KEY_VOLUMEDOWN, KEY_RESERVED,
++ KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
++ /* Row 2 */
++ KEY_SYM, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
++ KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
++ /* Row 3 */
++ KEY_FN, KEY_Z, KEY_X, KEY_O,
++ KEY_Y, KEY_B, KEY_T, KEY_P,
++ /* Row 4 */
++ KEY_LEFTCTRL, KEY_A, KEY_S, KEY_I,
++ KEY_K, KEY_COMMA, KEY_PAGEUP, KEY_ENTER,
++ /* Row 5 */
++ KEY_BACKSPACE, KEY_Q, KEY_D, KEY_U,
++ KEY_J, KEY_M, KEY_LEFT, KEY_AT,
++ /* Row 6 */
++ KEY_RESERVED, KEY_W, KEY_C, KEY_L,
++ KEY_H, KEY_N, KEY_RIGHT, KEY_F,
++ /* Row 7 */
++ KEY_RESERVED, KEY_E, KEY_V, KEY_QUESTION,
++ KEY_G, KEY_DOT, KEY_PAGEDOWN, KEY_R,
++
++};
++
++static const unsigned short tc35894xbg_ncdk_symkeymap[] = {
++ /* Row 0 */
++ KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
++ KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
++ /* Row 1 */
++ KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
++ KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
++ /* Row 2 */
++ KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
++ KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
++ /* Row 3 */
++ KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_9,
++ KEY_6, KEY_RESERVED, KEY_5, KEY_0,
++ /* Row 4 */
++ KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_8,
++ KEY_NUMBER_SIGN, KEY_SEMICOLON, KEY_RESERVED, KEY_RESERVED,
++ /* Row 5 */
++ KEY_RESERVED, KEY_1, KEY_RESERVED, KEY_7,
++ KEY_PLUS, KEY_RIGHTBRACE, KEY_RESERVED, KEY_SLASH,
++ /* Row 6 */
++ KEY_RESERVED, KEY_2, KEY_RESERVED, KEY_KPASTERISK,
++ KEY_MINUS, KEY_LEFTBRACE, KEY_RESERVED, KEY_RESERVED,
++ /* Row 7 */
++ KEY_RESERVED, KEY_3, KEY_RESERVED, KEY_EXCLAM,
++ KEY_AMPERSAND, KEY_COLON, KEY_RESERVED, KEY_4,
++};
++
++static struct tc35894xbg_platform_data tc35894xbg_ncdk_data = {
++ .debounce_time = 0xA3, /* Set to 9.68ms */
++ .settle_time = 0xA3, /* Set to 9.68ms */
++ .col_setting = 0x7F, /* col 8:0 */
++ .rowcol_setting = 0xFE, /* row 7:0 */
++ .keymap_size = 64,
++ .size_x = 8,
++ .size_y = 8,
++ .function_key = 24,
++ .right_shift_key = 0,
++ .n_keymaps = 2
++};
++
++static struct tc35894xbg_platform_data tc35894xbg_icdk_data = {
++ .debounce_time = 0xA3, /* Set to 9.68ms */
++ .settle_time = 0xA3, /* Set to 9.68ms */
++ .col_setting = 0xFF, /* col 7:0 */
++ .rowcol_setting = 0xFF, /* row 5:0 */
++ .keymap_size = 49,
++ .size_x = 6,
++ .size_y = 8,
++ .function_key = 33,
++ .right_shift_key = 48
++};
++
++static void *tc35894xbg_platform_data(struct i2c_board_info *i2c_info,
++ const unsigned short *keymap0, int keymap0sz,
++ const unsigned short *keymap1, int keymap1sz,
++ struct tc35894xbg_platform_data* tc_data)
++
++{
++ tc_data->gpio_irq = get_gpio_by_name("keypad-intr");
++ tc_data->gpio_reset = get_gpio_by_name("keypad-reset");
++
++ printk(KERN_INFO "Nokia: keypad-intr on %d, keypad-reset on %d\n",
++ tc_data->gpio_irq, tc_data->gpio_reset);
++
++ if ((tc_data->gpio_irq == -1 || tc_data->gpio_reset == -1)) {
++ printk(KERN_ERR "Missing GPIOs for TC35894XBG\n");
++ return NULL;
++ }
++
++ i2c_info->irq = tc_data->gpio_irq + MRST_IRQ_OFFSET;
++
++ memcpy(&tc_data->keymap[0], keymap0, keymap0sz);
++ memcpy(&tc_data->keymap[1], keymap1, keymap1sz);
++
++ tc_data->reset_ctrl = tc35894xbg_reset_ctrl;
++
++ return tc_data;
++}
++
++void *tc35894xbg_i_platform_data(void *info)
++{
++ return tc35894xbg_platform_data((struct i2c_board_info *)info,
++ tc35894xbg_icdk_keymap, sizeof(tc35894xbg_icdk_keymap),
++ tc35894xbg_icdk_fnkeymap, sizeof(tc35894xbg_icdk_fnkeymap),
++ &tc35894xbg_icdk_data);
++}
++
++void *tc35894xbg_n_platform_data(void *info)
++{
++ return tc35894xbg_platform_data((struct i2c_board_info*)info,
++ tc35894xbg_ncdk_keymap, sizeof(tc35894xbg_ncdk_keymap),
++ tc35894xbg_ncdk_symkeymap, sizeof(tc35894xbg_ncdk_symkeymap),
++ &tc35894xbg_ncdk_data);
++}
++
++static const struct devs_id device_ids[] = {
++ {"pmic_gpio", SFI_DEV_TYPE_SPI, 1, &pmic_gpio_platform_data},
++ {"pmic_gpio", SFI_DEV_TYPE_IPC, 1, &pmic_gpio_platform_data},
++ {"watchdog_timer", SFI_DEV_TYPE_IPC, 1, &wdt_platform_data},
++ {"spi_opt_modem", SFI_DEV_TYPE_SPI, 0, &opt_modem_platform_data},
++ {"spi_emp_modem", SFI_DEV_TYPE_SPI, 0, &emp_modem_platform_data},
++ {"spi_ifx_modem", SFI_DEV_TYPE_SPI, 0, &ifx_mdm_platform_data},
++ {"spi_ifx_gps", SFI_DEV_TYPE_SPI, 0, &ifx_gps_platform_data},
++ {"cptm1217", SFI_DEV_TYPE_I2C, 0, &cp_tm1217_platform_data},
++ {"spi_max3111", SFI_DEV_TYPE_SPI, 0, &max3111_platform_data},
++ {"i2c_max7315", SFI_DEV_TYPE_I2C, 1, &max7315_platform_data},
++ {"i2c_max7315_2", SFI_DEV_TYPE_I2C, 1, &max7315_platform_data},
++ {"i2c_thermal", SFI_DEV_TYPE_I2C, 0, &emc1403_platform_data},
++ {"i2c_accel", SFI_DEV_TYPE_I2C, 0, &lis331dl_platform_data},
++ {"cy8ctmg110", SFI_DEV_TYPE_I2C, 0, &cy8ctmg110_platform_data},
++ {"aava-max3107", SFI_DEV_TYPE_SPI, 1, &no_platform_data},
++ {"pmic_audio", SFI_DEV_TYPE_SPI, 1, &no_platform_data},
++ {"pmic_audio", SFI_DEV_TYPE_IPC, 1, &no_platform_data},
++ {"msic_audio", SFI_DEV_TYPE_SPI, 1, &no_platform_data},
++ {"msic_audio", SFI_DEV_TYPE_IPC, 1, &no_platform_data},
++ {"i2c_TC35894-nEB1", SFI_DEV_TYPE_I2C, 0, &tc35894xbg_n_platform_data},
++ {"i2c_TC35894-i", SFI_DEV_TYPE_I2C, 0, &tc35894xbg_i_platform_data},
++ {"bh1770glc", SFI_DEV_TYPE_I2C, 0, &bh1770glc_platform_data_init},
++ {"lp5523", SFI_DEV_TYPE_I2C, 0, &lp5523_platform_data_init},
++ {},
++};
++
++#define MAX_IPCDEVS 20
++static struct platform_device *ipc_devs[MAX_IPCDEVS];
++static int ipc_next_dev;
++
++#define MAX_DELAYED_SPI 20
++static struct spi_board_info *spi_devs[MAX_DELAYED_SPI];
++static int spi_next_dev;
++
++#define MAX_DELAYED_I2C 20
++static struct i2c_board_info *i2c_devs[MAX_DELAYED_SPI];
++static int i2c_bus[MAX_DELAYED_I2C];
++static int i2c_next_dev;
++
++static void intel_scu_device_register(struct platform_device *pdev)
++{
++ BUG_ON(ipc_next_dev == MAX_IPCDEVS);
++ ipc_devs[ipc_next_dev++] = pdev;
++}
++
++static void intel_delayed_spi_device_register(struct spi_board_info *sdev)
++{
++ struct spi_board_info *new_dev;
++
++ BUG_ON(spi_next_dev == MAX_DELAYED_SPI);
++
++ new_dev = kzalloc(sizeof(*sdev), GFP_KERNEL);
++ if (!new_dev) {
++ pr_err("MRST: fail to alloc mem for delayed spi dev %s\n",
++ sdev->modalias);
++ return;
++ }
++ memcpy(new_dev, sdev, sizeof(*sdev));
++
++ spi_devs[spi_next_dev++] = new_dev;
++}
++
++static void intel_delayed_i2c_device_register(int bus,
++ struct i2c_board_info *idev)
++{
++ struct i2c_board_info *new_dev;
++
++ BUG_ON(i2c_next_dev == MAX_DELAYED_I2C);
++
++ new_dev = kzalloc(sizeof(*idev), GFP_KERNEL);
++ if (!new_dev) {
++ pr_err("MRST: fail to alloc mem for delayed i2c dev %s\n",
++ idev->type);
++ return;
++ }
++ memcpy(new_dev, idev, sizeof(*idev));
++
++ i2c_bus[i2c_next_dev] = bus;
++ i2c_devs[i2c_next_dev++] = new_dev;
++}
++
++/* Called by IPC driver */
++void intel_scu_devices_create(void)
++{
++ int i;
++
++ for (i = 0; i < ipc_next_dev; i++)
++ platform_device_add(ipc_devs[i]);
++
++ for (i = 0; i < spi_next_dev; i++) {
++ spi_register_board_info(spi_devs[i], 1);
++ kfree(spi_devs[i]);
++ }
++
++ for (i = 0; i < i2c_next_dev; i++) {
++ struct i2c_adapter *adapter;
++ struct i2c_client *client;
++
++ adapter = i2c_get_adapter(i2c_bus[i]);
++ if (adapter) {
++ client = i2c_new_device(adapter, i2c_devs[i]);
++ if (!client)
++ pr_err("mrst: can't create i2c device %s\n",
++ i2c_devs[i]->type);
++ } else
++ i2c_register_board_info(i2c_bus[i], i2c_devs[i], 1);
++ kfree(i2c_devs[i]);
++ }
++}
++EXPORT_SYMBOL_GPL(intel_scu_devices_create);
++
++/* Called by IPC driver */
++void intel_scu_devices_destroy(void)
++{
++ int i;
++
++ for (i = 0; i < ipc_next_dev; i++)
++ platform_device_del(ipc_devs[i]);
++}
++EXPORT_SYMBOL_GPL(intel_scu_devices_destroy);
++
++static void install_irq_resource(struct platform_device *pdev, int irq)
++{
++ /* Single threaded */
++ static struct resource res = {
++ .name = "IRQ",
++ .flags = IORESOURCE_IRQ,
++ };
++ res.start = irq;
++ platform_device_add_resources(pdev, &res, 1);
++}
++
++static void sfi_handle_ipc_dev(struct platform_device *pdev)
++{
++ const struct devs_id *dev = device_ids;
++ void *pdata = NULL;
++
++ while (dev->name[0]) {
++ if (dev->type == SFI_DEV_TYPE_IPC &&
++ !strncmp(dev->name, pdev->name, 16)) {
++ pdata = dev->get_platform_data(pdev);
++ break;
++ }
++ dev++;
++ }
++ pdev->dev.platform_data = pdata;
++ intel_scu_device_register(pdev);
++}
++
++static int sfi_force_ipc(const char *name)
++{
++ static const char *to_force[] = {
++ "pmic_gpio",
++ "pmic_battery",
++ "pmic_touch",
++ "pmic_audio",
++ "msic_audio",
++ NULL
++ };
++ const char **p = &to_force[0];
++ while (*p != NULL) {
++ if (strcmp(*p, name) == 0)
++ return 1;
++ p++;
++ }
++ return 0;
++}
++
++static void sfi_handle_spi_dev(struct spi_board_info *spi_info)
++{
++ const struct devs_id *dev = device_ids;
++ void *pdata = NULL;
++
++ /* Older firmware lists some IPC devices as SPI. We want to force
++ these to be the correct type for Linux */
++ if (sfi_force_ipc(spi_info->modalias)) {
++ /* Allocate a platform device, and translate the device
++ configuration, then use the ipc helper. */
++ struct platform_device *pdev;
++
++ pdev = platform_device_alloc(spi_info->modalias,
++ spi_info->irq);
++ if (pdev == NULL) {
++ pr_err("out of memory for SFI platform device '%s'.\n",
++ spi_info->modalias);
++ return;
++ }
++ install_irq_resource(pdev, spi_info->irq);
++ sfi_handle_ipc_dev(pdev);
++ return;
++ }
++
++
++ while (dev->name[0]) {
++ if (dev->type == SFI_DEV_TYPE_SPI &&
++ !strncmp(dev->name, spi_info->modalias, 16)) {
++ pdata = dev->get_platform_data(spi_info);
++ break;
++ }
++ dev++;
++ }
++ spi_info->platform_data = pdata;
++ if (dev->delay)
++ intel_delayed_spi_device_register(spi_info);
++ else
++ spi_register_board_info(spi_info, 1);
++}
++
++static void sfi_handle_i2c_dev(int bus, struct i2c_board_info *i2c_info)
++{
++ const struct devs_id *dev = device_ids;
++ void *pdata = NULL;
++
++ while (dev->name[0]) {
++ if (dev->type == SFI_DEV_TYPE_I2C &&
++ !strncmp(dev->name, i2c_info->type, 16)) {
++ pdata = dev->get_platform_data(i2c_info);
++ break;
++ }
++ dev++;
++ }
++ i2c_info->platform_data = pdata;
++
++ if (dev->delay)
++ intel_delayed_i2c_device_register(bus, i2c_info);
++ else
++ i2c_register_board_info(bus, i2c_info, 1);
++ }
++
++
++static int __init sfi_parse_devs(struct sfi_table_header *table)
++{
++ struct sfi_table_simple *sb;
++ struct sfi_device_table_entry *pentry;
++ struct spi_board_info spi_info;
++ struct i2c_board_info i2c_info;
++ struct platform_device *pdev;
++ int num, i, bus;
++ int ioapic;
++ struct io_apic_irq_attr irq_attr;
++
++ sb = (struct sfi_table_simple *)table;
++ num = SFI_GET_NUM_ENTRIES(sb, struct sfi_device_table_entry);
++ pentry = (struct sfi_device_table_entry *)sb->pentry;
++
++ for (i = 0; i < num; i++, pentry++) {
++ if (pentry->irq != (u8)0xff) { /* native RTE case */
++ /* these SPI2 devices are not exposed to system as PCI
++ * devices, but they have separate RTE entry in IOAPIC
++ * so we have to enable them one by one here
++ */
++ ioapic = mp_find_ioapic(pentry->irq);
++ irq_attr.ioapic = ioapic;
++ irq_attr.ioapic_pin = pentry->irq;
++ irq_attr.trigger = 1;
++ irq_attr.polarity = 1;
++ io_apic_set_pci_routing(NULL, pentry->irq, &irq_attr);
++ }
++ switch (pentry->type) {
++ case SFI_DEV_TYPE_IPC:
++ /* ID as IRQ is a hack that will go away */
++ pdev = platform_device_alloc(pentry->name, pentry->irq);
++ if (pdev == NULL) {
++ pr_err("out of memory for SFI platform device '%s'.\n",
++ pentry->name);
++ continue;
++ }
++ install_irq_resource(pdev, pentry->irq);
++ pr_info("info[%2d]: IPC bus, name = %16.16s, "
++ "irq = 0x%2x\n", i, pentry->name, pentry->irq);
++ sfi_handle_ipc_dev(pdev);
++ break;
++ case SFI_DEV_TYPE_SPI:
++ memset(&spi_info, 0, sizeof(spi_info));
++ strncpy(spi_info.modalias, pentry->name, 16);
++ spi_info.irq = pentry->irq;
++ spi_info.bus_num = pentry->host_num;
++ spi_info.chip_select = pentry->addr;
++ spi_info.max_speed_hz = pentry->max_freq;
++ pr_info("info[%2d]: SPI bus = %d, name = %16.16s, "
++ "irq = 0x%2x, max_freq = %d, cs = %d\n", i,
++ spi_info.bus_num,
++ spi_info.modalias,
++ spi_info.irq,
++ spi_info.max_speed_hz,
++ spi_info.chip_select);
++ sfi_handle_spi_dev(&spi_info);
++ break;
++ case SFI_DEV_TYPE_I2C:
++ memset(&i2c_info, 0, sizeof(i2c_info));
++ bus = pentry->host_num;
++ strncpy(i2c_info.type, pentry->name, 16);
++ i2c_info.irq = pentry->irq;
++ i2c_info.addr = pentry->addr;
++ pr_info("info[%2d]: I2C bus = %d, name = %16.16s, "
++ "irq = 0x%2x, addr = 0x%x\n", i, bus,
++ i2c_info.type,
++ i2c_info.irq,
++ i2c_info.addr);
++ sfi_handle_i2c_dev(bus, &i2c_info);
++ break;
++ case SFI_DEV_TYPE_UART:
++ case SFI_DEV_TYPE_HSI:
++ default:
++ ;
++ }
++ }
++ return 0;
++}
++
++#define MRST_SPI2_CS_START 4
++static struct intel_pmic_gpio_platform_data pmic_gpio_pdata;
++
++static int __init sfi_parse_spib(struct sfi_table_header *table)
++{
++ struct sfi_table_simple *sb;
++ struct sfi_spi_table_entry *pentry;
++ struct spi_board_info *info;
++ int num, i, j;
++ int ioapic;
++ struct io_apic_irq_attr irq_attr;
++
++ sb = (struct sfi_table_simple *)table;
++ num = SFI_GET_NUM_ENTRIES(sb, struct sfi_spi_table_entry);
++ pentry = (struct sfi_spi_table_entry *) sb->pentry;
++
++ info = kzalloc(num * sizeof(*info), GFP_KERNEL);
++ if (!info) {
++ pr_info("%s(): Error in kzalloc\n", __func__);
++ return -ENOMEM;
++ }
++
++ if (num)
++ pr_info("Moorestown SPI devices info:\n");
++
++ for (i = 0, j = 0; i < num; i++, pentry++) {
++ strncpy(info[j].modalias, pentry->name, 16);
++ info[j].irq = pentry->irq_info;
++ info[j].bus_num = pentry->host_num;
++ info[j].chip_select = pentry->cs;
++ info[j].max_speed_hz = 3125000; /* hard coded */
++ if (info[i].chip_select >= MRST_SPI2_CS_START) {
++ /* these SPI2 devices are not exposed to system as PCI
++ * devices, but they have separate RTE entry in IOAPIC
++ * so we have to enable them one by one here
++ */
++ ioapic = mp_find_ioapic(info[j].irq);
++ irq_attr.ioapic = ioapic;
++ irq_attr.ioapic_pin = info[j].irq;
++ irq_attr.trigger = 1;
++ irq_attr.polarity = 1;
++ io_apic_set_pci_routing(NULL, info[j].irq,
++ &irq_attr);
++ }
++ info[j].platform_data = pentry->dev_info;
++
++ if (!strcmp(pentry->name, "pmic_gpio")) {
++ memcpy(&pmic_gpio_pdata, pentry->dev_info, 8);
++ pmic_gpio_pdata.gpiointr = 0xffffeff8;
++ info[j].platform_data = &pmic_gpio_pdata;
++ }
++ pr_info("info[%d]: name = %16.16s, irq = 0x%04x, bus = %d, "
++ "cs = %d\n", j, info[j].modalias, info[j].irq,
++ info[j].bus_num, info[j].chip_select);
++ sfi_handle_spi_dev(&info[j]);
++ j++;
++ }
++ kfree(info);
++ return 0;
++}
++
++#define MRST_I2C_BUSNUM 3
++static struct pca953x_platform_data max7315_pdata;
++static struct pca953x_platform_data max7315_pdata_2;
++static struct lis3lv02d_platform_data lis3lv02d_pdata;
++
++static int __init sfi_parse_i2cb(struct sfi_table_header *table)
++{
++ struct sfi_table_simple *sb;
++ struct sfi_i2c_table_entry *pentry;
++ struct i2c_board_info info;
++ int num, i, busnum;
++
++ sb = (struct sfi_table_simple *)table;
++ num = SFI_GET_NUM_ENTRIES(sb, struct sfi_i2c_table_entry);
++ pentry = (struct sfi_i2c_table_entry *) sb->pentry;
++
++ if (num <= 0)
++ return -ENODEV;
++
++ for (i = 0; i < num; i++, pentry++) {
++ busnum = pentry->host_num;
++ if (busnum >= MRST_I2C_BUSNUM || busnum < 0)
++ continue;
++
++ memset(&info, 0, sizeof(info));
++ strncpy(info.type, pentry->name, 16);
++ info.irq = pentry->irq_info;
++ info.addr = pentry->addr;
++ info.platform_data = pentry->dev_info;
++
++ if (!strcmp(pentry->name, "i2c_max7315")) {
++ strcpy(info.type, "max7315");
++ max7315_pdata.irq_base = *(int *)pentry->dev_info;
++ max7315_pdata.gpio_base =
++ *((u32 *)pentry->dev_info + 1);
++ info.platform_data = &max7315_pdata;
++ } else if (!strcmp(pentry->name, "i2c_max7315_2")) {
++ strcpy(info.type, "max7315");
++ max7315_pdata_2.irq_base = *(int *)pentry->dev_info;
++ max7315_pdata_2.gpio_base =
++ *((u32 *)pentry->dev_info + 1);
++ info.platform_data = &max7315_pdata_2;
++ } else if (!strcmp(pentry->name, "i2c_accel")) {
++ strcpy(info.type, "lis3lv02d");
++ info.platform_data = &lis3lv02d_pdata;
++ } else if (!strcmp(pentry->name, "i2c_als")) {
++ strcpy(info.type, "isl29020");
++ info.platform_data = NULL;
++ } else if (!strcmp(pentry->name, "i2c_thermal")) {
++ strcpy(info.type, "emc1403");
++ info.platform_data = NULL;
++ } else if (!strcmp(pentry->name, "i2c_compass")) {
++ strcpy(info.type, "hmc6352");
++ info.platform_data = NULL;
++ }
++
++ pr_info("info[%d]: bus = %d, name = %16.16s, irq = 0x%04x, "
++ "addr = 0x%x\n", i, busnum, info.type,
++ info.irq, info.addr);
++
++ i2c_register_board_info(busnum, &info, 1);
++ }
++
++ return 0;
++}
++
++static int __init mrst_platform_init(void)
++{
++ /* Keep for back compatibility for SFI 0.7 and before */
++ sfi_table_parse(SFI_SIG_SPIB, NULL, NULL, sfi_parse_spib);
++ sfi_table_parse(SFI_SIG_I2CB, NULL, NULL, sfi_parse_i2cb);
++
++ /* For SFi 0.8 version */
++ sfi_table_parse(SFI_SIG_GPIO, NULL, NULL, sfi_parse_gpio);
++ sfi_table_parse(SFI_SIG_DEVS, NULL, NULL, sfi_parse_devs);
++ return 0;
++}
++arch_initcall(mrst_platform_init);
++
++/*
++ * we will search these buttons in SFI GPIO table (by name)
++ * and register them dynamically. Please add all possible
++ * buttons here, we will shrink them if no GPIO found.
++ */
++static struct gpio_keys_button gpio_button[] = {
++ {KEY_POWER, -1, 1, "power_btn", EV_KEY, 0, 3000},
++ {KEY_PROG1, -1, 1, "prog_btn1", EV_KEY, 0, 20},
++ {KEY_PROG2, -1, 1, "prog_btn2", EV_KEY, 0, 20},
++ {SW_LID, -1, 1, "lid_switch", EV_SW, 0, 20},
++ {KEY_VOLUMEUP, -1, 1, "vol_up", EV_KEY, 0, 20},
++ {KEY_VOLUMEDOWN, -1, 1, "vol_down", EV_KEY, 0, 20},
++ {KEY_CAMERA, -1, 1, "camera_full", EV_KEY, 0, 20},
++ {KEY_CAMERA_FOCUS, -1, 1, "camera_half", EV_KEY, 0, 20},
++ {SW_KEYPAD_SLIDE, -1, 1, "MagSw1", EV_SW, 0, 20},
++ {SW_KEYPAD_SLIDE, -1, 1, "MagSw2", EV_SW, 0, 20},
++ {-1},/* must be ended with code = -1 */
++};
++
++static struct gpio_keys_platform_data mrst_gpio_keys = {
++ .buttons = gpio_button,
++ .rep = 1,
++ .nbuttons = -1, /* will fill it after search */
++};
++
++static struct platform_device pb_device = {
++ .name = "gpio-keys",
++ .id = -1,
++ .dev = {
++ .platform_data = &mrst_gpio_keys,
++ },
++};
++
++static void __init pb_match(const char *name, int gpio)
++{
++ struct gpio_keys_button *gb = gpio_button;
++
++ while (gb->code != -1) {
++ if (!strcmp(name, gb->desc)) {
++ gb->gpio = gpio;
++ break;
++ }
++ gb++;
++ }
++}
++
++/* shrink non-existent buttons and return total available number */
++static int __init pb_shrink(void)
++{
++ struct gpio_keys_button *gb = gpio_button;
++ struct gpio_keys_button *next = NULL;
++ int num = 0;
++
++ while (gb->code != -1) {
++ if (gb->gpio == -1) {
++ if (!next)
++ next = gb + 1;
++ while (next->code != -1 && next->gpio == -1)
++ next++;
++ if (next->code == -1)/* end */
++ break;
++ *gb = *next;
++ next->gpio = -1;
++ next++;
++ }
++ num++;
++ gb++;
++ }
++ return num;
++}
++
++static int __init pb_keys_init(void)
++{
++ int num;
++ /* for SFI 0.7, we have to claim static gpio buttons */
++ if (!gpio_table) {
++ pb_match("power_btn", 65);
++ pb_match("prog_btn1", 66);
++ pb_match("prog_btn2", 69);
++ pb_match("lid_switch", 101);
++ } else {/* SFI 0.8 */
++ struct gpio_keys_button *gb = gpio_button;
++
++ while (gb->code != -1) {
++ gb->gpio = get_gpio_by_name(gb->desc);
++ gb++;
++ }
++ }
++ num = pb_shrink();
++ if (num) {
++ mrst_gpio_keys.nbuttons = num;
++ return platform_device_register(&pb_device);
++ }
++ return 0;
++}
++
++late_initcall(pb_keys_init);
++
++/* a 3 bits bit-map, from 0 to 7, default 0 */
++unsigned char hsu_dma_enable;
++EXPORT_SYMBOL_GPL(hsu_dma_enable);
++
++static int __init setup_hsu_dma_enable_flag(char *p)
++{
++ if (!p)
++ return -EINVAL;
++
++ hsu_dma_enable = (unsigned char)memparse(p, &p);
++ if (hsu_dma_enable & (~0x7))
++ return -EINVAL;
++
++ return 0;
++}
++early_param("hsu_dma", setup_hsu_dma_enable_flag);
++
++
+--- /dev/null
++++ b/arch/x86/kernel/mrst_earlyprintk.c
+@@ -0,0 +1,391 @@
++/*
++ * mrst_earlyprintk.c - spi-uart early printk for Intel Moorestown platform
++ *
++ * Copyright (c) 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; version 2
++ * of the License.
++ */
++
++#include <linux/serial_reg.h>
++#include <linux/serial_mfd.h>
++#include <linux/kmsg_dump.h>
++#include <linux/console.h>
++#include <linux/kernel.h>
++#include <linux/delay.h>
++#include <linux/init.h>
++#include <linux/io.h>
++
++#include <asm/fixmap.h>
++#include <asm/pgtable.h>
++#include <asm/mrst.h>
++
++#define MRST_SPI_TIMEOUT 0x200000
++#define MRST_REGBASE_SPI0 0xff128000
++#define MRST_REGBASE_SPI1 0xff128400
++#define MRST_CLK_SPI0_REG 0xff11d86c
++
++/* Bit fields in CTRLR0 */
++#define SPI_DFS_OFFSET 0
++
++#define SPI_FRF_OFFSET 4
++#define SPI_FRF_SPI 0x0
++#define SPI_FRF_SSP 0x1
++#define SPI_FRF_MICROWIRE 0x2
++#define SPI_FRF_RESV 0x3
++
++#define SPI_MODE_OFFSET 6
++#define SPI_SCPH_OFFSET 6
++#define SPI_SCOL_OFFSET 7
++#define SPI_TMOD_OFFSET 8
++#define SPI_TMOD_TR 0x0 /* xmit & recv */
++#define SPI_TMOD_TO 0x1 /* xmit only */
++#define SPI_TMOD_RO 0x2 /* recv only */
++#define SPI_TMOD_EPROMREAD 0x3 /* eeprom read mode */
++
++#define SPI_SLVOE_OFFSET 10
++#define SPI_SRL_OFFSET 11
++#define SPI_CFS_OFFSET 12
++
++/* Bit fields in SR, 7 bits */
++#define SR_MASK 0x7f /* cover 7 bits */
++#define SR_BUSY (1 << 0)
++#define SR_TF_NOT_FULL (1 << 1)
++#define SR_TF_EMPT (1 << 2)
++#define SR_RF_NOT_EMPT (1 << 3)
++#define SR_RF_FULL (1 << 4)
++#define SR_TX_ERR (1 << 5)
++#define SR_DCOL (1 << 6)
++
++
++struct dw_spi_reg {
++ u32 ctrl0;
++ u32 ctrl1;
++ u32 ssienr;
++ u32 mwcr;
++ u32 ser;
++ u32 baudr;
++ u32 txfltr;
++ u32 rxfltr;
++ u32 txflr;
++ u32 rxflr;
++ u32 sr;
++ u32 imr;
++ u32 isr;
++ u32 risr;
++ u32 txoicr;
++ u32 rxoicr;
++ u32 rxuicr;
++ u32 msticr;
++ u32 icr;
++ u32 dmacr;
++ u32 dmatdlr;
++ u32 dmardlr;
++ u32 idr;
++ u32 version;
++ u32 dr; /* Currently oper as 32 bits,
++ though only low 16 bits matters */
++} __packed;
++
++#define dw_readl(dw, name) \
++ __raw_readl(&dw->name)
++#define dw_writel(dw, name, val) \
++ __raw_writel((val), &dw->name)
++
++/* default use SPI0 register for mrst, we will detect Penwell and use SPI1*/
++static unsigned long mrst_spi_paddr = MRST_REGBASE_SPI0;
++
++/* Always contains a accessable address, start with 0 */
++static u32 *pclk_spi0;
++static struct dw_spi_reg *pspi;
++static int mrst_spi_inited;
++static spinlock_t dw_lock;
++static int real_pgt_is_up;
++
++static struct kmsg_dumper dw_dumper;
++static int dumper_registered;
++
++static void dw_kmsg_dump(struct kmsg_dumper *dumper,
++ enum kmsg_dump_reason reason,
++ const char *s1, unsigned long l1,
++ const char *s2, unsigned long l2)
++{
++ int i;
++
++ /* When run to this, we'd better re-init the HW */
++ mrst_spi_inited = 0;
++
++ for (i = 0; i < l1; i++)
++ early_mrst_console.write(&early_mrst_console, s1 + i, 1);
++ for (i = 0; i < l2; i++)
++ early_mrst_console.write(&early_mrst_console, s2 + i, 1);
++}
++
++/*
++ * One trick for the early printk is that it could be called
++ * before and after the real page table is enabled for kernel,
++ * so the PHY IO registers should be mapped twice. And a flag
++ * "real_pgt_is_up" is used as an indicator
++ */
++static void early_mrst_spi_init(void)
++{
++ u32 ctrlr0 = 0;
++ u32 spi0_cdiv;
++ static u32 freq; /* freq info only need be searched once */
++
++ if (pspi && mrst_spi_inited)
++ return;
++
++ spin_lock_init(&dw_lock);
++
++ if (!freq) {
++ set_fixmap_nocache(FIX_EARLYCON_MEM_BASE, MRST_CLK_SPI0_REG);
++ pclk_spi0 = (void *)(__fix_to_virt(FIX_EARLYCON_MEM_BASE) +
++ (MRST_CLK_SPI0_REG & (PAGE_SIZE - 1)));
++
++ spi0_cdiv = ((*pclk_spi0) & 0xe00) >> 9;
++ freq = 100000000 / (spi0_cdiv + 1);
++ }
++
++ if (mrst_identify_cpu() == MRST_CPU_CHIP_PENWELL)
++ mrst_spi_paddr = MRST_REGBASE_SPI1;
++
++ set_fixmap_nocache(FIX_EARLYCON_MEM_BASE, mrst_spi_paddr);
++ pspi = (void *)(__fix_to_virt(FIX_EARLYCON_MEM_BASE) +
++ (mrst_spi_paddr & (PAGE_SIZE - 1)));
++
++
++ /* disable SPI controller */
++ dw_writel(pspi, ssienr, 0);
++
++ /* set control param, 8 bits, transmit only mode */
++ ctrlr0 = dw_readl(pspi, ctrl0);
++
++ ctrlr0 &= 0xfcc0;
++ ctrlr0 |= 0xf | (SPI_FRF_SPI << SPI_FRF_OFFSET)
++ | (SPI_TMOD_TO << SPI_TMOD_OFFSET);
++ dw_writel(pspi, ctrl0, ctrlr0);
++
++ /* change the spi0 clk to comply with 115200 bps,
++ * use 100000 as dividor to make the clock a little
++ * slower than baud rate */
++ dw_writel(pspi, baudr, freq/100000);
++
++ /* disable all INT for early phase */
++ dw_writel(pspi, imr, 0x0);
++
++ /* set the cs to max3110 */
++ dw_writel(pspi, ser, 0x2);
++
++ /* enable the HW, the last step for HW init */
++ dw_writel(pspi, ssienr, 0x1);
++
++ mrst_spi_inited = 1;
++
++ /* register the kmsg dumper */
++ if (!dumper_registered) {
++ dw_dumper.dump = dw_kmsg_dump;
++ kmsg_dump_register(&dw_dumper);
++ dumper_registered = 1;
++ }
++}
++
++/* set the ratio rate, INT */
++static void max3110_write_config(void)
++{
++ u16 config;
++
++ /* 115200, TM not set, no parity, 8bit word */
++ config = 0xc001;
++ dw_writel(pspi, dr, config);
++}
++
++/* transfer char to a eligibal word and send to max3110 */
++static void max3110_write_data(char c)
++{
++ u16 data;
++
++ data = 0x8000 | c;
++ dw_writel(pspi, dr, data);
++}
++
++/* slave select should be called in the read/write function */
++static int early_mrst_spi_putc(char c)
++{
++ unsigned int timeout;
++ u32 sr;
++
++ timeout = MRST_SPI_TIMEOUT;
++ /* early putc need make sure the TX FIFO is not full*/
++ while (timeout--) {
++ sr = dw_readl(pspi, sr);
++ if (!(sr & SR_TF_NOT_FULL))
++ cpu_relax();
++ else
++ break;
++ }
++
++ if (timeout == 0xffffffff) {
++ printk(KERN_INFO "SPI: waiting timeout\n");
++ return -1;
++ }
++
++ max3110_write_data(c);
++ return 0;
++}
++
++/* early SPI only use polling mode */
++static void early_mrst_spi_write(struct console *con,
++ const char *str, unsigned n)
++{
++ int i;
++ unsigned long flags;
++
++ if ((read_cr3() == __pa(swapper_pg_dir)) && !real_pgt_is_up) {
++ mrst_spi_inited = 0;
++ real_pgt_is_up = 1;
++ }
++
++ if (unlikely(!mrst_spi_inited)) {
++ early_mrst_spi_init();
++ max3110_write_config();
++ }
++
++ spin_lock_irqsave(&dw_lock, flags);
++ for (i = 0; i < n && *str; i++) {
++ if (*str == '\n')
++ early_mrst_spi_putc('\r');
++ early_mrst_spi_putc(*str);
++
++ str++;
++ }
++ spin_unlock_irqrestore(&dw_lock, flags);
++}
++
++struct console early_mrst_console = {
++ .name = "earlymrst",
++ .write = early_mrst_spi_write,
++ .flags = CON_PRINTBUFFER,
++ .index = -1,
++};
++
++/* a debug function */
++void mrst_early_printk(const char *fmt, ...)
++{
++ char buf[512];
++ int n;
++ va_list ap;
++
++ va_start(ap, fmt);
++ n = vscnprintf(buf, 512, fmt, ap);
++ va_end(ap);
++
++ early_mrst_console.write(&early_mrst_console, buf, n);
++}
++
++
++/* Will use HSU port2 for early console */
++static spinlock_t hsu_lock;
++static int hsu_inited;
++static void __iomem *phsu;
++#define HSU_PORT2_PADDR 0xffa28180
++
++static void early_hsu_init(void)
++{
++ u8 lcr;
++
++ if (phsu && hsu_inited)
++ return;
++
++ spin_lock_init(&hsu_lock);
++
++ set_fixmap_nocache(FIX_EARLYCON_MEM_BASE, HSU_PORT2_PADDR);
++ phsu = (void *)(__fix_to_virt(FIX_EARLYCON_MEM_BASE) +
++ (HSU_PORT2_PADDR & (PAGE_SIZE - 1)));
++
++ writeb(0x0, phsu + UART_FCR);
++
++ /* set to default 115200 */
++ lcr = readb(phsu + UART_LCR);
++ writeb((0x80 | lcr), phsu + UART_LCR);
++ writeb(0x18, phsu + UART_DLL);
++ writeb(lcr, phsu + UART_LCR);
++ writel(0x3600, phsu + UART_MUL*4);
++
++ writeb(0x8, phsu + UART_MCR);
++ writeb(0x7, phsu + UART_FCR);
++ writeb(0x3, phsu + UART_LCR);
++
++ /* clear IRQ status */
++ readb(phsu + UART_LSR);
++ readb(phsu + UART_RX);
++ readb(phsu + UART_IIR);
++ readb(phsu + UART_MSR);
++
++ writeb(0x7, phsu + UART_FCR);
++
++ hsu_inited = 1;
++}
++
++#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
++static void early_hsu_putc(char ch)
++{
++ unsigned int timeout = 10000; /* 10ms*/
++ u8 status;
++
++ while (timeout--) {
++ status = readb(phsu + UART_LSR);
++ if (status & BOTH_EMPTY)
++ break;
++
++ udelay(1);
++ }
++
++ if (timeout == 0xffffffff)
++ return;
++
++ writeb(ch, phsu + UART_TX);
++}
++
++static void early_hsu_write(struct console *con,
++ const char *str, unsigned n)
++{
++ int i;
++ unsigned long flags;
++
++ if (unlikely(!hsu_inited))
++ early_hsu_init();
++
++ spin_lock_irqsave(&hsu_lock, flags);
++ for (i = 0; i < n && *str; i++) {
++ if (*str == '\n')
++ early_hsu_putc('\r');
++ early_hsu_putc(*str);
++
++ str++;
++ }
++ spin_unlock_irqrestore(&hsu_lock, flags);
++}
++
++
++struct console early_hsu_console = {
++ .name = "earlyhsu",
++ .write = early_hsu_write,
++ .flags = CON_PRINTBUFFER,
++ .index = -1,
++};
++
++void hsu_early_printk(const char *fmt, ...)
++{
++ char buf[512];
++ int n;
++ va_list ap;
++
++ va_start(ap, fmt);
++ n = vscnprintf(buf, 512, fmt, ap);
++ va_end(ap);
++
++ early_hsu_console.write(&early_mrst_console, buf, n);
++}
+--- a/arch/x86/kernel/setup.c
++++ b/arch/x86/kernel/setup.c
+@@ -1062,6 +1062,8 @@
+ #endif
+ x86_init.oem.banner();
+
++ x86_platform.wallclock_init();
++
+ mcheck_init();
+ }
+
+--- /dev/null
++++ b/arch/x86/kernel/vrtc.c
+@@ -0,0 +1,100 @@
++/*
++ * vrtc.c: Driver for virtual RTC device on Intel MID platform
++ *
++ * (C) Copyright 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; version 2
++ * of the License.
++ *
++ * Note:
++ * VRTC is emulated by system controller firmware, the real HW
++ * RTC is located in the PMIC device. SCU FW shadows PMIC RTC
++ * in a memory mapped IO space that is visible to the host IA
++ * processor.
++ *
++ * This driver is based on RTC CMOS driver.
++ */
++
++#include <linux/kernel.h>
++
++#include <asm/vrtc.h>
++#include <asm/time.h>
++#include <asm/fixmap.h>
++
++static unsigned char __iomem *vrtc_virt_base;
++
++void vrtc_set_base(void __iomem *base)
++{
++ vrtc_virt_base = base;
++}
++
++unsigned char vrtc_cmos_read(unsigned char reg)
++{
++ unsigned char retval;
++
++ /* vRTC's registers range from 0x0 to 0xD */
++ if (reg > 0xd || !vrtc_virt_base)
++ return 0xff;
++
++ lock_cmos_prefix(reg);
++ retval = __raw_readb(vrtc_virt_base + (reg << 2));
++ lock_cmos_suffix(reg);
++ return retval;
++}
++EXPORT_SYMBOL(vrtc_cmos_read);
++
++void vrtc_cmos_write(unsigned char val, unsigned char reg)
++{
++ if (reg > 0xd || !vrtc_virt_base)
++ return;
++
++ lock_cmos_prefix(reg);
++ __raw_writeb(val, vrtc_virt_base + (reg << 2));
++ lock_cmos_suffix(reg);
++}
++EXPORT_SYMBOL(vrtc_cmos_write);
++
++unsigned long vrtc_get_time(void)
++{
++ u8 sec, min, hour, mday, mon;
++ u32 year;
++
++ while ((vrtc_cmos_read(RTC_FREQ_SELECT) & RTC_UIP))
++ cpu_relax();
++
++ sec = vrtc_cmos_read(RTC_SECONDS);
++ min = vrtc_cmos_read(RTC_MINUTES);
++ hour = vrtc_cmos_read(RTC_HOURS);
++ mday = vrtc_cmos_read(RTC_DAY_OF_MONTH);
++ mon = vrtc_cmos_read(RTC_MONTH);
++ year = vrtc_cmos_read(RTC_YEAR);
++
++ /* vRTC YEAR reg contains the offset to 1960 */
++ year += 1960;
++
++ printk(KERN_INFO "vRTC: sec: %d min: %d hour: %d day: %d "
++ "mon: %d year: %d\n", sec, min, hour, mday, mon, year);
++
++ return mktime(year, mon, mday, hour, min, sec);
++}
++
++/* Only care about the minutes and seconds */
++int vrtc_set_mmss(unsigned long nowtime)
++{
++ int real_sec, real_min;
++ int vrtc_min;
++
++ vrtc_min = vrtc_cmos_read(RTC_MINUTES);
++
++ real_sec = nowtime % 60;
++ real_min = nowtime / 60;
++ if (((abs(real_min - vrtc_min) + 15)/30) & 1)
++ real_min += 30;
++ real_min %= 60;
++
++ vrtc_cmos_write(real_sec, RTC_SECONDS);
++ vrtc_cmos_write(real_min, RTC_MINUTES);
++ return 0;
++}
+--- a/arch/x86/kernel/x86_init.c
++++ b/arch/x86/kernel/x86_init.c
+@@ -25,6 +25,7 @@
+ void __init x86_init_pgd_noop(pgd_t *unused) { }
+ int __init iommu_init_noop(void) { return 0; }
+ void iommu_shutdown_noop(void) { }
++void wallclock_init_noop(void) { }
+
+ /*
+ * The platform setup functions are preset with the default functions
+@@ -90,6 +91,7 @@
+
+ struct x86_platform_ops x86_platform = {
+ .calibrate_tsc = native_calibrate_tsc,
++ .wallclock_init = wallclock_init_noop,
+ .get_wallclock = mach_get_cmos_time,
+ .set_wallclock = mach_set_rtc_mmss,
+ .iommu_shutdown = iommu_shutdown_noop,
+--- a/arch/x86/pci/mrst.c
++++ b/arch/x86/pci/mrst.c
+@@ -204,6 +204,9 @@
+ u8 pin;
+ struct io_apic_irq_attr irq_attr;
+
++ if (!dev->irq)
++ return 0;
++
+ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
+
+ /* MRST only have IOAPIC, the PCI irq lines are 1:1 mapped to
+@@ -248,10 +251,6 @@
+ u32 size;
+ int i;
+
+- /* Must have extended configuration space */
+- if (dev->cfg_size < PCIE_CAP_OFFSET + 4)
+- return;
+-
+ /* Fixup the BAR sizes for fixed BAR devices and make them unmoveable */
+ offset = fixed_bar_cap(dev->bus, dev->devfn);
+ if (!offset || PCI_DEVFN(2, 0) == dev->devfn ||
+--- a/drivers/char/Kconfig
++++ b/drivers/char/Kconfig
+@@ -110,6 +110,19 @@
+ bool "Console on Blackfin JTAG"
+ depends on BFIN_JTAG_COMM=y
+
++config PTI_ROUTER
++ tristate "Trace data router for MIPI P1149.7 cJTAG standard"
++ depends on INTEL_MID_PTI
++ ---help---
++ The PTI router uses the Linux tty line discipline framework to route
++ trace data coming from a HW Modem to a PTI (Parallel Trace Inferface)
++ port. This is part of a solution for the MIPI P1149.7, compact JTAG,
++ standard, which is for debugging mobile devices. A PTI
++ driver is also needed for this solution.
++
++ You should select this driver if the target kernel is meant for
++ a mobile device containing a MIPI P1149.7 standard implementation.
++
+ config SERIAL_NONSTANDARD
+ bool "Non-standard serial port support"
+ depends on HAS_IOMEM
+--- a/drivers/char/Makefile
++++ b/drivers/char/Makefile
+@@ -14,6 +14,7 @@
+ obj-y += misc.o
+ obj-$(CONFIG_VT) += vt_ioctl.o vc_screen.o selection.o keyboard.o
+ obj-$(CONFIG_BFIN_JTAG_COMM) += bfin_jtag_comm.o
++obj-$(CONFIG_PTI_ROUTER) += ptirouter_ldisc.o
+ obj-$(CONFIG_CONSOLE_TRANSLATIONS) += consolemap.o consolemap_deftbl.o
+ obj-$(CONFIG_HW_CONSOLE) += vt.o defkeymap.o
+ obj-$(CONFIG_AUDIT) += tty_audit.o
+--- /dev/null
++++ b/drivers/char/ptirouter_ldisc.c
+@@ -0,0 +1,202 @@
++/*
++ * ptirouter_ldisc.c - PTI data router for JTAG data extration
++ *
++ * Copyright (C) Intel 2010
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * The PTI router uses the Linux line discipline framework to route
++ * trace data coming from a HW Modem to a PTI (Parallel Trace Module) port.
++ * This is part of a solution for the P1149.7, compact JTAG, standard.
++ *
++ */
++
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/ioctl.h>
++#include <linux/tty.h>
++#include <linux/tty_ldisc.h>
++#include <linux/errno.h>
++#include <linux/string.h>
++#include <linux/signal.h>
++#include <linux/slab.h>
++#include <asm-generic/bug.h>
++#include <linux/pti.h>
++
++/* Other ldisc drivers use 65536 which basically means,
++ * 'I can always accept 64k' and flow control is off.
++ * This number is deemed appropriate for this driver.
++ */
++
++#define RECEIVE_ROOM 65536
++#define DRIVERNAME "ptirouter_ldisc"
++
++/**
++ * ptirouter_ldisc_open() - Called when a tty is opened by a SW entity.
++ * @tty: terminal device to the ldisc.
++ *
++ * Return:
++ * 0 for success.
++ */
++
++static int ptirouter_ldisc_open(struct tty_struct *tty)
++{
++ tty->receive_room = RECEIVE_ROOM;
++ tty_driver_flush_buffer(tty);
++ return 0;
++}
++
++/**
++ * ptirouter_ldisc_close() - close connection
++ * @tty: terminal device to the ldisc.
++ *
++ * Called when a software entity wants to close a connection.
++ */
++static void ptirouter_ldisc_close(struct tty_struct *tty)
++{
++ tty_driver_flush_buffer(tty);
++}
++
++/**
++ * ptirouter_ldisc_read() - read request from user space
++ * @tty: terminal device passed into the ldisc.
++ * @file: pointer to open file object.
++ * @buf: pointer to the data buffer that gets eventually returned.
++ * @nr: number of bytes of the data buffer that is returned.
++ *
++ * function that allows read() functionality in userspace. By default if this
++ * is not implemented it returns -EIO. This module is functioning like a
++ * router via ptirouter_ldisc_receivebuf(), and there is no real requirement
++ * to implement this function. However, an error return value other than
++ * -EIO should be used just to show that there was an intent not to have
++ * this function implemented. Return value based on read() man pages.
++ *
++ * Return:
++ * -EINVAL
++ */
++ssize_t ptirouter_ldisc_read(struct tty_struct *tty, struct file *file,
++ unsigned char *buf, size_t nr) {
++ return -EINVAL;
++}
++
++/**
++ * ptirouter_ldisc_write() - Function that allows write() in userspace.
++ * @tty: terminal device passed into the ldisc.
++ * @file: pointer to open file object.
++ * @buf: pointer to the data buffer that gets eventually returned.
++ * @nr: number of bytes of the data buffer that is returned.
++ *
++ * By default if this is not implemented, it returns -EIO.
++ * This should not be implemented, ever, because
++ * 1. this driver is functioning like a router via
++ * ptirouter_ldisc_receivebuf()
++ * 2. No writes to HW will ever go through this line discpline driver.
++ * However, an error return value other than -EIO should be used
++ * just to show that there was an intent not to have this function
++ * implemented. Return value based on write() man pages.
++ *
++ * Return:
++ * -EINVAL
++ */
++ssize_t ptirouter_ldisc_write(struct tty_struct *tty, struct file *file,
++ const unsigned char *buf, size_t nr) {
++ return -EINVAL;
++}
++
++/**
++ * ptirouter_ldisc_receivebuf() - Routing function for driver.
++ * @tty: terminal device passed into the ldisc. It's assumed
++ * tty will never be NULL.
++ * @cp: buffer, block of characters to be eventually read by
++ * someone, somewhere (user read() call or some kernel function).
++ * @fp: flag buffer.
++ * @count: number of characters (aka, bytes) in cp.
++ *
++ * This function takes the input buffer, cp, and passes it to
++ * an external API function for processing.
++ */
++static void ptirouter_ldisc_receivebuf(struct tty_struct *tty,
++ const unsigned char *cp,
++ char *fp, int count)
++{
++ /* 71 is the master ID for modem messages */
++ /* Only channel 0 for now */
++ static struct masterchannel mc = {.master = 71, .channel = 0 };
++ mipi_pti_writedata((void *) &mc, (u8 *)cp, count);
++}
++
++/* Flush buffer is not impelemented as the ldisc has no internal buffering
++ * so the tty_driver_flush_buffer() is sufficient for this driver's needs.
++ */
++
++static struct tty_ldisc_ops tty_ptirouter_ldisc = {
++ .owner = THIS_MODULE,
++ .magic = TTY_LDISC_MAGIC,
++ .name = DRIVERNAME,
++ .open = ptirouter_ldisc_open,
++ .close = ptirouter_ldisc_close,
++ .read = ptirouter_ldisc_read,
++ .write = ptirouter_ldisc_write,
++ .receive_buf = ptirouter_ldisc_receivebuf
++};
++
++/**
++ * ptirouter_ldisc_init - module initialisation
++ *
++ * Registers this module as a line discipline driver.
++ *
++ * Return:
++ * 0 for success, any other value error.
++ */
++static int __init ptirouter_ldisc_init(void)
++{
++ int retval;
++
++ /* Note N_PTIR is defined in linux/tty.h */
++ retval = tty_register_ldisc(N_PTIR, &tty_ptirouter_ldisc);
++ if (retval < 0)
++ pr_err("%s: Registration failed: %d\n",
++ __func__, retval);
++ return retval;
++}
++
++/**
++ * ptirouter_ldisc_exit - - module unload
++ *
++ * Removes this module as a line discipline driver.
++ */
++static void __exit ptirouter_ldisc_exit(void)
++{
++ int retval;
++
++ retval = tty_unregister_ldisc(N_PTIR);
++ if (retval < 0)
++ pr_err("%s: Unregistration failed: %d\n",
++ __func__, retval);
++}
++
++module_init(ptirouter_ldisc_init);
++module_exit(ptirouter_ldisc_exit);
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jay Freyensee");
++MODULE_ALIAS_LDISC(N_PTIR);
++MODULE_DESCRIPTION("PTI Router ldisc driver");
+--- a/drivers/dma/Kconfig
++++ b/drivers/dma/Kconfig
+@@ -33,6 +33,19 @@
+
+ comment "DMA Devices"
+
++config INTEL_MID_DMAC
++ tristate "Intel MID DMA support for Peripheral DMA controllers"
++ depends on PCI && X86
++ select DMA_ENGINE
++ default n
++ help
++ Enable support for the Intel(R) MID DMA engine present
++ in Intel MID chipsets.
++
++ Say Y here if you have such a chipset.
++
++ If unsure, say N.
++
+ config ASYNC_TX_DISABLE_CHANNEL_SWITCH
+ bool
+
+--- a/drivers/dma/Makefile
++++ b/drivers/dma/Makefile
+@@ -7,6 +7,7 @@
+
+ obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
+ obj-$(CONFIG_NET_DMA) += iovlock.o
++obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o
+ obj-$(CONFIG_DMATEST) += dmatest.o
+ obj-$(CONFIG_INTEL_IOATDMA) += ioat/
+ obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
+--- /dev/null
++++ b/drivers/dma/intel_mid_dma.c
+@@ -0,0 +1,1143 @@
++/*
++ * intel_mid_dma.c - Intel Langwell DMA Drivers
++ *
++ * Copyright (C) 2008-10 Intel Corp
++ * Author: Vinod Koul <vinod.koul@intel.com>
++ * The driver design is based on dw_dmac driver
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ *
++ */
++#include <linux/pci.h>
++#include <linux/interrupt.h>
++#include <linux/intel_mid_dma.h>
++
++#define MAX_CHAN 4 /*max ch across controllers*/
++#include "intel_mid_dma_regs.h"
++
++#define INTEL_MID_DMAC1_ID 0x0814
++#define INTEL_MID_DMAC2_ID 0x0813
++#define INTEL_MID_GP_DMAC2_ID 0x0827
++#define INTEL_MFLD_DMAC1_ID 0x0830
++#define LNW_PERIPHRAL_MASK_BASE 0xFFAE8008
++#define LNW_PERIPHRAL_MASK_SIZE 0x10
++#define LNW_PERIPHRAL_STATUS 0x0
++#define LNW_PERIPHRAL_MASK 0x8
++
++struct intel_mid_dma_probe_info {
++ u8 max_chan;
++ u8 ch_base;
++ u16 block_size;
++ u32 pimr_mask;
++};
++
++#define INFO(_max_chan, _ch_base, _block_size, _pimr_mask) \
++ ((kernel_ulong_t)&(struct intel_mid_dma_probe_info) { \
++ .max_chan = (_max_chan), \
++ .ch_base = (_ch_base), \
++ .block_size = (_block_size), \
++ .pimr_mask = (_pimr_mask), \
++ })
++
++/*****************************************************************************
++Utility Functions*/
++/**
++ * get_ch_index - convert status to channel
++ * @status: status mask
++ * @base: dma ch base value
++ *
++ * Modify the status mask and return the channel index needing
++ * attention (or -1 if neither)
++ */
++static int get_ch_index(int *status, unsigned int base)
++{
++ int i;
++ for (i = 0; i < MAX_CHAN; i++) {
++ if (*status & (1 << (i + base))) {
++ *status = *status & ~(1 << (i + base));
++ pr_debug("MDMA: index %d New status %x\n", i, *status);
++ return i;
++ }
++ }
++ return -1;
++}
++
++/**
++ * get_block_ts - calculates dma transaction length
++ * @len: dma transfer length
++ * @tx_width: dma transfer src width
++ * @block_size: dma controller max block size
++ *
++ * Based on src width calculate the DMA trsaction length in data items
++ * return data items or FFFF if exceeds max length for block
++ */
++static int get_block_ts(int len, int tx_width, int block_size)
++{
++ int byte_width = 0, block_ts = 0;
++
++ switch (tx_width) {
++ case LNW_DMA_WIDTH_8BIT:
++ byte_width = 1;
++ break;
++ case LNW_DMA_WIDTH_16BIT:
++ byte_width = 2;
++ break;
++ case LNW_DMA_WIDTH_32BIT:
++ default:
++ byte_width = 4;
++ break;
++ }
++
++ block_ts = len/byte_width;
++ if (block_ts > block_size)
++ block_ts = 0xFFFF;
++ return block_ts;
++}
++
++/*****************************************************************************
++DMAC1 interrupt Functions*/
++
++/**
++ * dmac1_mask_periphral_intr - mask the periphral interrupt
++ * @midc: dma channel for which masking is required
++ *
++ * Masks the DMA periphral interrupt
++ * this is valid for DMAC1 family controllers only
++ * This controller should have periphral mask registers already mapped
++ */
++static void dmac1_mask_periphral_intr(struct intel_mid_dma_chan *midc)
++{
++ u32 pimr;
++ struct middma_device *mid = to_middma_device(midc->chan.device);
++
++ if (mid->pimr_mask) {
++ pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK);
++ pimr |= mid->pimr_mask;
++ writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK);
++ }
++ return;
++}
++
++/**
++ * dmac1_unmask_periphral_intr - unmask the periphral interrupt
++ * @midc: dma channel for which masking is required
++ *
++ * UnMasks the DMA periphral interrupt,
++ * this is valid for DMAC1 family controllers only
++ * This controller should have periphral mask registers already mapped
++ */
++static void dmac1_unmask_periphral_intr(struct intel_mid_dma_chan *midc)
++{
++ u32 pimr;
++ struct middma_device *mid = to_middma_device(midc->chan.device);
++
++ if (mid->pimr_mask) {
++ pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK);
++ pimr &= ~mid->pimr_mask;
++ writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK);
++ }
++ return;
++}
++
++/**
++ * enable_dma_interrupt - enable the periphral interrupt
++ * @midc: dma channel for which enable interrupt is required
++ *
++ * Enable the DMA periphral interrupt,
++ * this is valid for DMAC1 family controllers only
++ * This controller should have periphral mask registers already mapped
++ */
++static void enable_dma_interrupt(struct intel_mid_dma_chan *midc)
++{
++ dmac1_unmask_periphral_intr(midc);
++
++ /*en ch interrupts*/
++ iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR);
++ iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR);
++ return;
++}
++
++/**
++ * disable_dma_interrupt - disable the periphral interrupt
++ * @midc: dma channel for which disable interrupt is required
++ *
++ * Disable the DMA periphral interrupt,
++ * this is valid for DMAC1 family controllers only
++ * This controller should have periphral mask registers already mapped
++ */
++static void disable_dma_interrupt(struct intel_mid_dma_chan *midc)
++{
++ /*Check LPE PISR, make sure fwd is disabled*/
++ dmac1_mask_periphral_intr(midc);
++ iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_BLOCK);
++ iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR);
++ iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR);
++ return;
++}
++
++/*****************************************************************************
++DMA channel helper Functions*/
++/**
++ * mid_desc_get - get a descriptor
++ * @midc: dma channel for which descriptor is required
++ *
++ * Obtain a descriptor for the channel. Returns NULL if none are free.
++ * Once the descriptor is returned it is private until put on another
++ * list or freed
++ */
++static struct intel_mid_dma_desc *midc_desc_get(struct intel_mid_dma_chan *midc)
++{
++ struct intel_mid_dma_desc *desc, *_desc;
++ struct intel_mid_dma_desc *ret = NULL;
++
++ spin_lock_bh(&midc->lock);
++ list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) {
++ if (async_tx_test_ack(&desc->txd)) {
++ list_del(&desc->desc_node);
++ ret = desc;
++ break;
++ }
++ }
++ spin_unlock_bh(&midc->lock);
++ return ret;
++}
++
++/**
++ * mid_desc_put - put a descriptor
++ * @midc: dma channel for which descriptor is required
++ * @desc: descriptor to put
++ *
++ * Return a descriptor from lwn_desc_get back to the free pool
++ */
++static void midc_desc_put(struct intel_mid_dma_chan *midc,
++ struct intel_mid_dma_desc *desc)
++{
++ if (desc) {
++ spin_lock_bh(&midc->lock);
++ list_add_tail(&desc->desc_node, &midc->free_list);
++ spin_unlock_bh(&midc->lock);
++ }
++}
++/**
++ * midc_dostart - begin a DMA transaction
++ * @midc: channel for which txn is to be started
++ * @first: first descriptor of series
++ *
++ * Load a transaction into the engine. This must be called with midc->lock
++ * held and bh disabled.
++ */
++static void midc_dostart(struct intel_mid_dma_chan *midc,
++ struct intel_mid_dma_desc *first)
++{
++ struct middma_device *mid = to_middma_device(midc->chan.device);
++
++ /* channel is idle */
++ if (midc->in_use && test_ch_en(midc->dma_base, midc->ch_id)) {
++ /*error*/
++ pr_err("ERR_MDMA: channel is busy in start\n");
++ /* The tasklet will hopefully advance the queue... */
++ return;
++ }
++
++ /*write registers and en*/
++ iowrite32(first->sar, midc->ch_regs + SAR);
++ iowrite32(first->dar, midc->ch_regs + DAR);
++ iowrite32(first->cfg_hi, midc->ch_regs + CFG_HIGH);
++ iowrite32(first->cfg_lo, midc->ch_regs + CFG_LOW);
++ iowrite32(first->ctl_lo, midc->ch_regs + CTL_LOW);
++ iowrite32(first->ctl_hi, midc->ch_regs + CTL_HIGH);
++ pr_debug("MDMA:TX SAR %x,DAR %x,CFGL %x,CFGH %x,CTLH %x, CTLL %x\n",
++ (int)first->sar, (int)first->dar, first->cfg_hi,
++ first->cfg_lo, first->ctl_hi, first->ctl_lo);
++
++ iowrite32(ENABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN);
++ first->status = DMA_IN_PROGRESS;
++}
++
++/**
++ * midc_descriptor_complete - process completed descriptor
++ * @midc: channel owning the descriptor
++ * @desc: the descriptor itself
++ *
++ * Process a completed descriptor and perform any callbacks upon
++ * the completion. The completion handling drops the lock during the
++ * callbacks but must be called with the lock held.
++ */
++static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
++ struct intel_mid_dma_desc *desc)
++{
++ struct dma_async_tx_descriptor *txd = &desc->txd;
++ dma_async_tx_callback callback_txd = NULL;
++ void *param_txd = NULL;
++
++ midc->completed = txd->cookie;
++ callback_txd = txd->callback;
++ param_txd = txd->callback_param;
++
++ list_move(&desc->desc_node, &midc->free_list);
++
++ spin_unlock_bh(&midc->lock);
++ if (callback_txd) {
++ pr_debug("MDMA: TXD callback set ... calling\n");
++ callback_txd(param_txd);
++ spin_lock_bh(&midc->lock);
++ return;
++ }
++ spin_lock_bh(&midc->lock);
++
++}
++/**
++ * midc_scan_descriptors - check the descriptors in channel
++ * mark completed when tx is completete
++ * @mid: device
++ * @midc: channel to scan
++ *
++ * Walk the descriptor chain for the device and process any entries
++ * that are complete.
++ */
++static void midc_scan_descriptors(struct middma_device *mid,
++ struct intel_mid_dma_chan *midc)
++{
++ struct intel_mid_dma_desc *desc = NULL, *_desc = NULL;
++
++ /*tx is complete*/
++ list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
++ if (desc->status == DMA_IN_PROGRESS) {
++ desc->status = DMA_SUCCESS;
++ midc_descriptor_complete(midc, desc);
++ }
++ }
++ return;
++}
++
++/*****************************************************************************
++DMA engine callback Functions*/
++/**
++ * intel_mid_dma_tx_submit - callback to submit DMA transaction
++ * @tx: dma engine descriptor
++ *
++ * Submit the DMA trasaction for this descriptor, start if ch idle
++ */
++static dma_cookie_t intel_mid_dma_tx_submit(struct dma_async_tx_descriptor *tx)
++{
++ struct intel_mid_dma_desc *desc = to_intel_mid_dma_desc(tx);
++ struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(tx->chan);
++ dma_cookie_t cookie;
++
++ spin_lock_bh(&midc->lock);
++ cookie = midc->chan.cookie;
++
++ if (++cookie < 0)
++ cookie = 1;
++
++ midc->chan.cookie = cookie;
++ desc->txd.cookie = cookie;
++
++
++ if (list_empty(&midc->active_list)) {
++ midc_dostart(midc, desc);
++ list_add_tail(&desc->desc_node, &midc->active_list);
++ } else {
++ list_add_tail(&desc->desc_node, &midc->queue);
++ }
++ spin_unlock_bh(&midc->lock);
++
++ return cookie;
++}
++
++/**
++ * intel_mid_dma_issue_pending - callback to issue pending txn
++ * @chan: chan where pending trascation needs to be checked and submitted
++ *
++ * Call for scan to issue pending descriptors
++ */
++static void intel_mid_dma_issue_pending(struct dma_chan *chan)
++{
++ struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
++
++ spin_lock_bh(&midc->lock);
++ if (!list_empty(&midc->queue))
++ midc_scan_descriptors(to_middma_device(chan->device), midc);
++ spin_unlock_bh(&midc->lock);
++}
++
++/**
++ * intel_mid_dma_tx_status - Return status of txn
++ * @chan: chan for where status needs to be checked
++ * @cookie: cookie for txn
++ * @txstate: DMA txn state
++ *
++ * Return status of DMA txn
++ */
++static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan,
++ dma_cookie_t cookie,
++ struct dma_tx_state *txstate)
++{
++ struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
++ dma_cookie_t last_used;
++ dma_cookie_t last_complete;
++ int ret;
++
++ last_complete = midc->completed;
++ last_used = chan->cookie;
++
++ ret = dma_async_is_complete(cookie, last_complete, last_used);
++ if (ret != DMA_SUCCESS) {
++ midc_scan_descriptors(to_middma_device(chan->device), midc);
++
++ last_complete = midc->completed;
++ last_used = chan->cookie;
++
++ ret = dma_async_is_complete(cookie, last_complete, last_used);
++ }
++
++ if (txstate) {
++ txstate->last = last_complete;
++ txstate->used = last_used;
++ txstate->residue = 0;
++ }
++ return ret;
++}
++
++/**
++ * intel_mid_dma_device_control - DMA device control
++ * @chan: chan for DMA control
++ * @cmd: control cmd
++ * @arg: cmd arg value
++ *
++ * Perform DMA control command
++ */
++static int intel_mid_dma_device_control(struct dma_chan *chan,
++ enum dma_ctrl_cmd cmd, unsigned long arg)
++{
++ struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
++ struct middma_device *mid = to_middma_device(chan->device);
++ struct intel_mid_dma_desc *desc, *_desc;
++ LIST_HEAD(list);
++
++ if (cmd != DMA_TERMINATE_ALL)
++ return -ENXIO;
++
++ spin_lock_bh(&midc->lock);
++ if (midc->in_use == false) {
++ spin_unlock_bh(&midc->lock);
++ return 0;
++ }
++ list_splice_init(&midc->free_list, &list);
++ midc->descs_allocated = 0;
++ midc->slave = NULL;
++
++ /* Disable interrupts */
++ disable_dma_interrupt(midc);
++
++ spin_unlock_bh(&midc->lock);
++ list_for_each_entry_safe(desc, _desc, &list, desc_node) {
++ pr_debug("MDMA: freeing descriptor %p\n", desc);
++ pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
++ }
++ return 0;
++}
++
++/**
++ * intel_mid_dma_prep_slave_sg - Prep slave sg txn
++ * @chan: chan for DMA transfer
++ * @sgl: scatter gather list
++ * @sg_len: length of sg txn
++ * @direction: DMA transfer dirtn
++ * @flags: DMA flags
++ *
++ * Do DMA sg txn: NOT supported now
++ */
++static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
++ struct dma_chan *chan, struct scatterlist *sgl,
++ unsigned int sg_len, enum dma_data_direction direction,
++ unsigned long flags)
++{
++ /*not supported now*/
++ return NULL;
++}
++
++/**
++ * intel_mid_dma_prep_memcpy - Prep memcpy txn
++ * @chan: chan for DMA transfer
++ * @dest: destn address
++ * @src: src address
++ * @len: DMA transfer len
++ * @flags: DMA flags
++ *
++ * Perform a DMA memcpy. Note we support slave periphral DMA transfers only
++ * The periphral txn details should be filled in slave structure properly
++ * Returns the descriptor for this txn
++ */
++static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
++ struct dma_chan *chan, dma_addr_t dest,
++ dma_addr_t src, size_t len, unsigned long flags)
++{
++ struct intel_mid_dma_chan *midc;
++ struct intel_mid_dma_desc *desc = NULL;
++ struct intel_mid_dma_slave *mids;
++ union intel_mid_dma_ctl_lo ctl_lo;
++ union intel_mid_dma_ctl_hi ctl_hi;
++ union intel_mid_dma_cfg_lo cfg_lo;
++ union intel_mid_dma_cfg_hi cfg_hi;
++ enum intel_mid_dma_width width = 0;
++
++ pr_debug("MDMA: Prep for memcpy\n");
++ BUG_ON(!chan);
++ if (!len)
++ return NULL;
++
++ mids = chan->private;
++ BUG_ON(!mids);
++
++ midc = to_intel_mid_dma_chan(chan);
++ BUG_ON(!midc);
++
++ pr_debug("MDMA:called for DMA %x CH %d Length %d\n",
++ midc->dma->pci_id, midc->ch_id, len);
++ pr_debug("MDMA:Cfg passed Mode %x, Dirn %x, HS %x, Width %x\n",
++ mids->cfg_mode, mids->dirn, mids->hs_mode, mids->src_width);
++
++ /*calculate CFG_LO*/
++ if (mids->hs_mode == LNW_DMA_SW_HS) {
++ cfg_lo.cfg_lo = 0;
++ cfg_lo.cfgx.hs_sel_dst = 1;
++ cfg_lo.cfgx.hs_sel_src = 1;
++ } else if (mids->hs_mode == LNW_DMA_HW_HS)
++ cfg_lo.cfg_lo = 0x00000;
++
++ /*calculate CFG_HI*/
++ if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) {
++ /*SW HS only*/
++ cfg_hi.cfg_hi = 0;
++ } else {
++ cfg_hi.cfg_hi = 0;
++ if (midc->dma->pimr_mask) {
++ cfg_hi.cfgx.protctl = 0x0; /*default value*/
++ cfg_hi.cfgx.fifo_mode = 1;
++ if (mids->dirn == DMA_TO_DEVICE) {
++ cfg_hi.cfgx.src_per = 0;
++ if (mids->device_instance == 0)
++ cfg_hi.cfgx.dst_per = 3;
++ if (mids->device_instance == 1)
++ cfg_hi.cfgx.dst_per = 1;
++ } else if (mids->dirn == DMA_FROM_DEVICE) {
++ if (mids->device_instance == 0)
++ cfg_hi.cfgx.src_per = 2;
++ if (mids->device_instance == 1)
++ cfg_hi.cfgx.src_per = 0;
++ cfg_hi.cfgx.dst_per = 0;
++ }
++ } else {
++ cfg_hi.cfgx.protctl = 0x1; /*default value*/
++ cfg_hi.cfgx.src_per = cfg_hi.cfgx.dst_per =
++ midc->ch_id - midc->dma->chan_base;
++ }
++ }
++
++ /*calculate CTL_HI*/
++ ctl_hi.ctlx.reser = 0;
++ width = mids->src_width;
++
++ ctl_hi.ctlx.block_ts = get_block_ts(len, width, midc->dma->block_size);
++ pr_debug("MDMA:calc len %d for block size %d\n",
++ ctl_hi.ctlx.block_ts, midc->dma->block_size);
++ /*calculate CTL_LO*/
++ ctl_lo.ctl_lo = 0;
++ ctl_lo.ctlx.int_en = 1;
++ ctl_lo.ctlx.dst_tr_width = mids->dst_width;
++ ctl_lo.ctlx.src_tr_width = mids->src_width;
++ ctl_lo.ctlx.dst_msize = mids->src_msize;
++ ctl_lo.ctlx.src_msize = mids->dst_msize;
++
++ if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) {
++ ctl_lo.ctlx.tt_fc = 0;
++ ctl_lo.ctlx.sinc = 0;
++ ctl_lo.ctlx.dinc = 0;
++ } else {
++ if (mids->dirn == DMA_TO_DEVICE) {
++ ctl_lo.ctlx.sinc = 0;
++ ctl_lo.ctlx.dinc = 2;
++ ctl_lo.ctlx.tt_fc = 1;
++ } else if (mids->dirn == DMA_FROM_DEVICE) {
++ ctl_lo.ctlx.sinc = 2;
++ ctl_lo.ctlx.dinc = 0;
++ ctl_lo.ctlx.tt_fc = 2;
++ }
++ }
++
++ pr_debug("MDMA:Calc CTL LO %x, CTL HI %x, CFG LO %x, CFG HI %x\n",
++ ctl_lo.ctl_lo, ctl_hi.ctl_hi, cfg_lo.cfg_lo, cfg_hi.cfg_hi);
++
++ enable_dma_interrupt(midc);
++
++ desc = midc_desc_get(midc);
++ if (desc == NULL)
++ goto err_desc_get;
++ desc->sar = src;
++ desc->dar = dest ;
++ desc->len = len;
++ desc->cfg_hi = cfg_hi.cfg_hi;
++ desc->cfg_lo = cfg_lo.cfg_lo;
++ desc->ctl_lo = ctl_lo.ctl_lo;
++ desc->ctl_hi = ctl_hi.ctl_hi;
++ desc->width = width;
++ desc->dirn = mids->dirn;
++ return &desc->txd;
++
++err_desc_get:
++ pr_err("ERR_MDMA: Failed to get desc\n");
++ midc_desc_put(midc, desc);
++ return NULL;
++}
++
++/**
++ * intel_mid_dma_free_chan_resources - Frees dma resources
++ * @chan: chan requiring attention
++ *
++ * Frees the allocated resources on this DMA chan
++ */
++static void intel_mid_dma_free_chan_resources(struct dma_chan *chan)
++{
++ struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
++ struct middma_device *mid = to_middma_device(chan->device);
++ struct intel_mid_dma_desc *desc, *_desc;
++
++ if (true == midc->in_use) {
++ /*trying to free ch in use!!!!!*/
++ pr_err("ERR_MDMA: trying to free ch in use\n");
++ }
++
++ spin_lock_bh(&midc->lock);
++ midc->descs_allocated = 0;
++ list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
++ list_del(&desc->desc_node);
++ pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
++ }
++ list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) {
++ list_del(&desc->desc_node);
++ pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
++ }
++ list_for_each_entry_safe(desc, _desc, &midc->queue, desc_node) {
++ list_del(&desc->desc_node);
++ pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
++ }
++ spin_unlock_bh(&midc->lock);
++ midc->in_use = false;
++ /* Disable CH interrupts */
++ iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK);
++ iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR);
++}
++
++/**
++ * intel_mid_dma_alloc_chan_resources - Allocate dma resources
++ * @chan: chan requiring attention
++ *
++ * Allocates DMA resources on this chan
++ * Return the descriptors allocated
++ */
++static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan)
++{
++ struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
++ struct middma_device *mid = to_middma_device(chan->device);
++ struct intel_mid_dma_desc *desc;
++ dma_addr_t phys;
++ int i = 0;
++
++
++ /* ASSERT: channel is idle */
++ if (test_ch_en(mid->dma_base, midc->ch_id)) {
++ /*ch is not idle*/
++ pr_err("ERR_MDMA: ch not idle\n");
++ return -EIO;
++ }
++ midc->completed = chan->cookie = 1;
++
++ spin_lock_bh(&midc->lock);
++ while (midc->descs_allocated < DESCS_PER_CHANNEL) {
++ spin_unlock_bh(&midc->lock);
++ desc = pci_pool_alloc(mid->dma_pool, GFP_KERNEL, &phys);
++ if (!desc) {
++ pr_err("ERR_MDMA: desc failed\n");
++ return -ENOMEM;
++ /*check*/
++ }
++ dma_async_tx_descriptor_init(&desc->txd, chan);
++ desc->txd.tx_submit = intel_mid_dma_tx_submit;
++ desc->txd.flags = DMA_CTRL_ACK;
++ desc->txd.phys = phys;
++ spin_lock_bh(&midc->lock);
++ i = ++midc->descs_allocated;
++ list_add_tail(&desc->desc_node, &midc->free_list);
++ }
++ spin_unlock_bh(&midc->lock);
++ midc->in_use = false;
++ pr_debug("MID_DMA: Desc alloc done ret: %d desc\n", i);
++ return i;
++}
++
++/**
++ * midc_handle_error - Handle DMA txn error
++ * @mid: controller where error occured
++ * @midc: chan where error occured
++ *
++ * Scan the descriptor for error
++ */
++static void midc_handle_error(struct middma_device *mid,
++ struct intel_mid_dma_chan *midc)
++{
++ midc_scan_descriptors(mid, midc);
++}
++
++/**
++ * dma_tasklet - DMA interrupt tasklet
++ * @data: tasklet arg (the controller structure)
++ *
++ * Scan the controller for interrupts for completion/error
++ * Clear the interrupt and call for handling completion/error
++ */
++static void dma_tasklet(unsigned long data)
++{
++ struct middma_device *mid = NULL;
++ struct intel_mid_dma_chan *midc = NULL;
++ u32 status;
++ int i;
++
++ mid = (struct middma_device *)data;
++ if (mid == NULL) {
++ pr_err("ERR_MDMA: tasklet Null param\n");
++ return;
++ }
++ pr_debug("MDMA: in tasklet for device %x\n", mid->pci_id);
++ status = ioread32(mid->dma_base + RAW_TFR);
++ pr_debug("MDMA:RAW_TFR %x\n", status);
++ status &= mid->intr_mask;
++ while (status) {
++ /*txn interrupt*/
++ i = get_ch_index(&status, mid->chan_base);
++ if (i < 0) {
++ pr_err("ERR_MDMA:Invalid ch index %x\n", i);
++ return;
++ }
++ midc = &mid->ch[i];
++ if (midc == NULL) {
++ pr_err("ERR_MDMA:Null param midc\n");
++ return;
++ }
++ pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n",
++ status, midc->ch_id, i);
++ /*clearing this interrupts first*/
++ iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_TFR);
++ iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_BLOCK);
++
++ spin_lock_bh(&midc->lock);
++ midc_scan_descriptors(mid, midc);
++ pr_debug("MDMA:Scan of desc... complete, unmasking\n");
++ iowrite32(UNMASK_INTR_REG(midc->ch_id),
++ mid->dma_base + MASK_TFR);
++ spin_unlock_bh(&midc->lock);
++ }
++
++ status = ioread32(mid->dma_base + RAW_ERR);
++ status &= mid->intr_mask;
++ while (status) {
++ /*err interrupt*/
++ i = get_ch_index(&status, mid->chan_base);
++ if (i < 0) {
++ pr_err("ERR_MDMA:Invalid ch index %x\n", i);
++ return;
++ }
++ midc = &mid->ch[i];
++ if (midc == NULL) {
++ pr_err("ERR_MDMA:Null param midc\n");
++ return;
++ }
++ pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n",
++ status, midc->ch_id, i);
++
++ iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_ERR);
++ spin_lock_bh(&midc->lock);
++ midc_handle_error(mid, midc);
++ iowrite32(UNMASK_INTR_REG(midc->ch_id),
++ mid->dma_base + MASK_ERR);
++ spin_unlock_bh(&midc->lock);
++ }
++ pr_debug("MDMA:Exiting takslet...\n");
++ return;
++}
++
++static void dma_tasklet1(unsigned long data)
++{
++ pr_debug("MDMA:in takslet1...\n");
++ return dma_tasklet(data);
++}
++
++static void dma_tasklet2(unsigned long data)
++{
++ pr_debug("MDMA:in takslet2...\n");
++ return dma_tasklet(data);
++}
++
++/**
++ * intel_mid_dma_interrupt - DMA ISR
++ * @irq: IRQ where interrupt occurred
++ * @data: ISR cllback data (the controller structure)
++ *
++ * See if this is our interrupt if so then schedule the tasklet
++ * otherwise ignore
++ */
++static irqreturn_t intel_mid_dma_interrupt(int irq, void *data)
++{
++ struct middma_device *mid = data;
++ u32 status;
++ int call_tasklet = 0;
++
++ /*DMA Interrupt*/
++ pr_debug("MDMA:Got an interrupt on irq %d\n", irq);
++ if (!mid) {
++ pr_err("ERR_MDMA:null pointer mid\n");
++ return -EINVAL;
++ }
++
++ status = ioread32(mid->dma_base + RAW_TFR);
++ pr_debug("MDMA: Status %x, Mask %x\n", status, mid->intr_mask);
++ status &= mid->intr_mask;
++ if (status) {
++ /*need to disable intr*/
++ iowrite32((status << 8), mid->dma_base + MASK_TFR);
++ pr_debug("MDMA: Calling tasklet %x\n", status);
++ call_tasklet = 1;
++ }
++ status = ioread32(mid->dma_base + RAW_ERR);
++ status &= mid->intr_mask;
++ if (status) {
++ iowrite32(MASK_INTR_REG(status), mid->dma_base + MASK_ERR);
++ call_tasklet = 1;
++ }
++ if (call_tasklet)
++ tasklet_schedule(&mid->tasklet);
++
++ return IRQ_HANDLED;
++}
++
++static irqreturn_t intel_mid_dma_interrupt1(int irq, void *data)
++{
++ return intel_mid_dma_interrupt(irq, data);
++}
++
++static irqreturn_t intel_mid_dma_interrupt2(int irq, void *data)
++{
++ return intel_mid_dma_interrupt(irq, data);
++}
++
++/**
++ * mid_setup_dma - Setup the DMA controller
++ * @pdev: Controller PCI device structure
++ *
++ * Initilize the DMA controller, channels, registers with DMA engine,
++ * ISR. Initilize DMA controller channels.
++ */
++static int mid_setup_dma(struct pci_dev *pdev)
++{
++ struct middma_device *dma = pci_get_drvdata(pdev);
++ int err, i;
++ unsigned int irq_level;
++
++ /* DMA coherent memory pool for DMA descriptor allocations */
++ dma->dma_pool = pci_pool_create("intel_mid_dma_desc_pool", pdev,
++ sizeof(struct intel_mid_dma_desc),
++ 32, 0);
++ if (NULL == dma->dma_pool) {
++ pr_err("ERR_MDMA:pci_pool_create failed\n");
++ err = -ENOMEM;
++ kfree(dma);
++ goto err_dma_pool;
++ }
++
++ INIT_LIST_HEAD(&dma->common.channels);
++ dma->pci_id = pdev->device;
++ if (dma->pimr_mask) {
++ dma->mask_reg = ioremap(LNW_PERIPHRAL_MASK_BASE,
++ LNW_PERIPHRAL_MASK_SIZE);
++ if (dma->mask_reg == NULL) {
++ pr_err("ERR_MDMA:Cant map periphral intr space !!\n");
++ return -ENOMEM;
++ }
++ } else
++ dma->mask_reg = NULL;
++
++ pr_debug("MDMA:Adding %d channel for this controller\n", dma->max_chan);
++ /*init CH structures*/
++ dma->intr_mask = 0;
++ for (i = 0; i < dma->max_chan; i++) {
++ struct intel_mid_dma_chan *midch = &dma->ch[i];
++
++ midch->chan.device = &dma->common;
++ midch->chan.cookie = 1;
++ midch->chan.chan_id = i;
++ midch->ch_id = dma->chan_base + i;
++ pr_debug("MDMA:Init CH %d, ID %d\n", i, midch->ch_id);
++
++ midch->dma_base = dma->dma_base;
++ midch->ch_regs = dma->dma_base + DMA_CH_SIZE * midch->ch_id;
++ midch->dma = dma;
++ dma->intr_mask |= 1 << (dma->chan_base + i);
++ spin_lock_init(&midch->lock);
++
++ INIT_LIST_HEAD(&midch->active_list);
++ INIT_LIST_HEAD(&midch->queue);
++ INIT_LIST_HEAD(&midch->free_list);
++ /*mask interrupts*/
++ iowrite32(MASK_INTR_REG(midch->ch_id),
++ dma->dma_base + MASK_BLOCK);
++ iowrite32(MASK_INTR_REG(midch->ch_id),
++ dma->dma_base + MASK_SRC_TRAN);
++ iowrite32(MASK_INTR_REG(midch->ch_id),
++ dma->dma_base + MASK_DST_TRAN);
++ iowrite32(MASK_INTR_REG(midch->ch_id),
++ dma->dma_base + MASK_ERR);
++ iowrite32(MASK_INTR_REG(midch->ch_id),
++ dma->dma_base + MASK_TFR);
++
++ disable_dma_interrupt(midch);
++ list_add_tail(&midch->chan.device_node, &dma->common.channels);
++ }
++ pr_debug("MDMA: Calc Mask as %x for this controller\n", dma->intr_mask);
++
++ /*init dma structure*/
++ dma_cap_zero(dma->common.cap_mask);
++ dma_cap_set(DMA_MEMCPY, dma->common.cap_mask);
++ dma_cap_set(DMA_SLAVE, dma->common.cap_mask);
++ dma_cap_set(DMA_PRIVATE, dma->common.cap_mask);
++ dma->common.dev = &pdev->dev;
++ dma->common.chancnt = dma->max_chan;
++
++ dma->common.device_alloc_chan_resources =
++ intel_mid_dma_alloc_chan_resources;
++ dma->common.device_free_chan_resources =
++ intel_mid_dma_free_chan_resources;
++
++ dma->common.device_tx_status = intel_mid_dma_tx_status;
++ dma->common.device_prep_dma_memcpy = intel_mid_dma_prep_memcpy;
++ dma->common.device_issue_pending = intel_mid_dma_issue_pending;
++ dma->common.device_prep_slave_sg = intel_mid_dma_prep_slave_sg;
++ dma->common.device_control = intel_mid_dma_device_control;
++
++ /*enable dma cntrl*/
++ iowrite32(REG_BIT0, dma->dma_base + DMA_CFG);
++
++ /*register irq */
++ if (dma->pimr_mask) {
++ irq_level = IRQF_SHARED;
++ pr_debug("MDMA:Requesting irq shared for DMAC1\n");
++ err = request_irq(pdev->irq, intel_mid_dma_interrupt1,
++ IRQF_SHARED, "INTEL_MID_DMAC1", dma);
++ if (0 != err)
++ goto err_irq;
++ } else {
++ dma->intr_mask = 0x03;
++ irq_level = 0;
++ pr_debug("MDMA:Requesting irq for DMAC2\n");
++ err = request_irq(pdev->irq, intel_mid_dma_interrupt2,
++ 0, "INTEL_MID_DMAC2", dma);
++ if (0 != err)
++ goto err_irq;
++ }
++ /*register device w/ engine*/
++ err = dma_async_device_register(&dma->common);
++ if (0 != err) {
++ pr_err("ERR_MDMA:device_register failed: %d\n", err);
++ goto err_engine;
++ }
++ if (dma->pimr_mask) {
++ pr_debug("setting up tasklet1 for DMAC1\n");
++ tasklet_init(&dma->tasklet, dma_tasklet1, (unsigned long)dma);
++ } else {
++ pr_debug("setting up tasklet2 for DMAC2\n");
++ tasklet_init(&dma->tasklet, dma_tasklet2, (unsigned long)dma);
++ }
++ return 0;
++
++err_engine:
++ free_irq(pdev->irq, dma);
++err_irq:
++ pci_pool_destroy(dma->dma_pool);
++ kfree(dma);
++err_dma_pool:
++ pr_err("ERR_MDMA:setup_dma failed: %d\n", err);
++ return err;
++
++}
++
++/**
++ * middma_shutdown - Shutdown the DMA controller
++ * @pdev: Controller PCI device structure
++ *
++ * Called by remove
++ * Unregister DMa controller, clear all structures and free interrupt
++ */
++static void middma_shutdown(struct pci_dev *pdev)
++{
++ struct middma_device *device = pci_get_drvdata(pdev);
++
++ dma_async_device_unregister(&device->common);
++ pci_pool_destroy(device->dma_pool);
++ if (device->mask_reg)
++ iounmap(device->mask_reg);
++ if (device->dma_base)
++ iounmap(device->dma_base);
++ free_irq(pdev->irq, device);
++ return;
++}
++
++/**
++ * intel_mid_dma_probe - PCI Probe
++ * @pdev: Controller PCI device structure
++ * @id: pci device id structure
++ *
++ * Initilize the PCI device, map BARs, query driver data.
++ * Call setup_dma to complete contoller and chan initilzation
++ */
++static int __devinit intel_mid_dma_probe(struct pci_dev *pdev,
++ const struct pci_device_id *id)
++{
++ struct middma_device *device;
++ u32 base_addr, bar_size;
++ struct intel_mid_dma_probe_info *info;
++ int err;
++
++ pr_debug("MDMA: probe for %x\n", pdev->device);
++ info = (void *)id->driver_data;
++ pr_debug("MDMA: CH %d, base %d, block len %d, Periphral mask %x\n",
++ info->max_chan, info->ch_base,
++ info->block_size, info->pimr_mask);
++
++ err = pci_enable_device(pdev);
++ if (err)
++ goto err_enable_device;
++
++ err = pci_request_regions(pdev, "intel_mid_dmac");
++ if (err)
++ goto err_request_regions;
++
++ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
++ if (err)
++ goto err_set_dma_mask;
++
++ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
++ if (err)
++ goto err_set_dma_mask;
++
++ device = kzalloc(sizeof(*device), GFP_KERNEL);
++ if (!device) {
++ pr_err("ERR_MDMA:kzalloc failed probe\n");
++ err = -ENOMEM;
++ goto err_kzalloc;
++ }
++ device->pdev = pci_dev_get(pdev);
++
++ base_addr = pci_resource_start(pdev, 0);
++ bar_size = pci_resource_len(pdev, 0);
++ device->dma_base = ioremap_nocache(base_addr, DMA_REG_SIZE);
++ if (!device->dma_base) {
++ pr_err("ERR_MDMA:ioremap failed\n");
++ err = -ENOMEM;
++ goto err_ioremap;
++ }
++ pci_set_drvdata(pdev, device);
++ pci_set_master(pdev);
++ device->max_chan = info->max_chan;
++ device->chan_base = info->ch_base;
++ device->block_size = info->block_size;
++ device->pimr_mask = info->pimr_mask;
++
++ err = mid_setup_dma(pdev);
++ if (err)
++ goto err_dma;
++
++ return 0;
++
++err_dma:
++ iounmap(device->dma_base);
++err_ioremap:
++ pci_dev_put(pdev);
++ kfree(device);
++err_kzalloc:
++err_set_dma_mask:
++ pci_release_regions(pdev);
++ pci_disable_device(pdev);
++err_request_regions:
++err_enable_device:
++ pr_err("ERR_MDMA:Probe failed %d\n", err);
++ return err;
++}
++
++/**
++ * intel_mid_dma_remove - PCI remove
++ * @pdev: Controller PCI device structure
++ *
++ * Free up all resources and data
++ * Call shutdown_dma to complete contoller and chan cleanup
++ */
++static void __devexit intel_mid_dma_remove(struct pci_dev *pdev)
++{
++ struct middma_device *device = pci_get_drvdata(pdev);
++ middma_shutdown(pdev);
++ pci_dev_put(pdev);
++ kfree(device);
++ pci_release_regions(pdev);
++ pci_disable_device(pdev);
++}
++
++/******************************************************************************
++* PCI stuff
++*/
++static struct pci_device_id intel_mid_dma_ids[] = {
++ { PCI_VDEVICE(INTEL, INTEL_MID_DMAC1_ID), INFO(2, 6, 4095, 0x200020)},
++ { PCI_VDEVICE(INTEL, INTEL_MID_DMAC2_ID), INFO(2, 0, 2047, 0)},
++ { PCI_VDEVICE(INTEL, INTEL_MID_GP_DMAC2_ID), INFO(2, 0, 2047, 0)},
++ { PCI_VDEVICE(INTEL, INTEL_MFLD_DMAC1_ID), INFO(4, 0, 4095, 0x400040)},
++ { 0, }
++};
++MODULE_DEVICE_TABLE(pci, intel_mid_dma_ids);
++
++static struct pci_driver intel_mid_dma_pci = {
++ .name = "Intel MID DMA",
++ .id_table = intel_mid_dma_ids,
++ .probe = intel_mid_dma_probe,
++ .remove = __devexit_p(intel_mid_dma_remove),
++};
++
++static int __init intel_mid_dma_init(void)
++{
++ pr_debug("INFO_MDMA: LNW DMA Driver Version %s\n",
++ INTEL_MID_DMA_DRIVER_VERSION);
++ return pci_register_driver(&intel_mid_dma_pci);
++}
++fs_initcall(intel_mid_dma_init);
++
++static void __exit intel_mid_dma_exit(void)
++{
++ pci_unregister_driver(&intel_mid_dma_pci);
++}
++module_exit(intel_mid_dma_exit);
++
++MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
++MODULE_DESCRIPTION("Intel (R) MID DMAC Driver");
++MODULE_LICENSE("GPL v2");
++MODULE_VERSION(INTEL_MID_DMA_DRIVER_VERSION);
+--- /dev/null
++++ b/drivers/dma/intel_mid_dma_regs.h
+@@ -0,0 +1,260 @@
++/*
++ * intel_mid_dma_regs.h - Intel MID DMA Drivers
++ *
++ * Copyright (C) 2008-10 Intel Corp
++ * Author: Vinod Koul <vinod.koul@intel.com>
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ *
++ */
++#ifndef __INTEL_MID_DMAC_REGS_H__
++#define __INTEL_MID_DMAC_REGS_H__
++
++#include <linux/dmaengine.h>
++#include <linux/dmapool.h>
++#include <linux/pci_ids.h>
++
++#define INTEL_MID_DMA_DRIVER_VERSION "1.0.5"
++
++#define REG_BIT0 0x00000001
++#define REG_BIT8 0x00000100
++
++#define UNMASK_INTR_REG(chan_num) \
++ ((REG_BIT0 << chan_num) | (REG_BIT8 << chan_num))
++#define MASK_INTR_REG(chan_num) (REG_BIT8 << chan_num)
++
++#define ENABLE_CHANNEL(chan_num) \
++ ((REG_BIT0 << chan_num) | (REG_BIT8 << chan_num))
++
++#define DESCS_PER_CHANNEL 16
++/*DMA Registers*/
++/*registers associated with channel programming*/
++#define DMA_REG_SIZE 0x400
++#define DMA_CH_SIZE 0x58
++
++/*CH X REG = (DMA_CH_SIZE)*CH_NO + REG*/
++#define SAR 0x00 /* Source Address Register*/
++#define DAR 0x08 /* Destination Address Register*/
++#define CTL_LOW 0x18 /* Control Register*/
++#define CTL_HIGH 0x1C /* Control Register*/
++#define CFG_LOW 0x40 /* Configuration Register Low*/
++#define CFG_HIGH 0x44 /* Configuration Register high*/
++
++#define STATUS_TFR 0x2E8
++#define STATUS_BLOCK 0x2F0
++#define STATUS_ERR 0x308
++
++#define RAW_TFR 0x2C0
++#define RAW_BLOCK 0x2C8
++#define RAW_ERR 0x2E0
++
++#define MASK_TFR 0x310
++#define MASK_BLOCK 0x318
++#define MASK_SRC_TRAN 0x320
++#define MASK_DST_TRAN 0x328
++#define MASK_ERR 0x330
++
++#define CLEAR_TFR 0x338
++#define CLEAR_BLOCK 0x340
++#define CLEAR_SRC_TRAN 0x348
++#define CLEAR_DST_TRAN 0x350
++#define CLEAR_ERR 0x358
++
++#define INTR_STATUS 0x360
++#define DMA_CFG 0x398
++#define DMA_CHAN_EN 0x3A0
++
++/*DMA channel control registers*/
++union intel_mid_dma_ctl_lo {
++ struct {
++ u32 int_en:1; /*enable or disable interrupts*/
++ /*should be 0*/
++ u32 dst_tr_width:3; /*destination transfer width*/
++ /*usually 32 bits = 010*/
++ u32 src_tr_width:3; /*source transfer width*/
++ /*usually 32 bits = 010*/
++ u32 dinc:2; /*destination address inc/dec*/
++ /*For mem:INC=00, Periphral NoINC=11*/
++ u32 sinc:2; /*source address inc or dec, as above*/
++ u32 dst_msize:3; /*destination burst transaction length*/
++ /*always = 16 ie 011*/
++ u32 src_msize:3; /*source burst transaction length*/
++ /*always = 16 ie 011*/
++ u32 reser1:3;
++ u32 tt_fc:3; /*transfer type and flow controller*/
++ /*M-M = 000
++ P-M = 010
++ M-P = 001*/
++ u32 dms:2; /*destination master select = 0*/
++ u32 sms:2; /*source master select = 0*/
++ u32 llp_dst_en:1; /*enable/disable destination LLP = 0*/
++ u32 llp_src_en:1; /*enable/disable source LLP = 0*/
++ u32 reser2:3;
++ } ctlx;
++ u32 ctl_lo;
++};
++
++union intel_mid_dma_ctl_hi {
++ struct {
++ u32 block_ts:12; /*block transfer size*/
++ /*configured by DMAC*/
++ u32 reser:20;
++ } ctlx;
++ u32 ctl_hi;
++
++};
++
++/*DMA channel configuration registers*/
++union intel_mid_dma_cfg_lo {
++ struct {
++ u32 reser1:5;
++ u32 ch_prior:3; /*channel priority = 0*/
++ u32 ch_susp:1; /*channel suspend = 0*/
++ u32 fifo_empty:1; /*FIFO empty or not R bit = 0*/
++ u32 hs_sel_dst:1; /*select HW/SW destn handshaking*/
++ /*HW = 0, SW = 1*/
++ u32 hs_sel_src:1; /*select HW/SW src handshaking*/
++ u32 reser2:6;
++ u32 dst_hs_pol:1; /*dest HS interface polarity*/
++ u32 src_hs_pol:1; /*src HS interface polarity*/
++ u32 max_abrst:10; /*max AMBA burst len = 0 (no sw limit*/
++ u32 reload_src:1; /*auto reload src addr =1 if src is P*/
++ u32 reload_dst:1; /*AR destn addr =1 if dstn is P*/
++ } cfgx;
++ u32 cfg_lo;
++};
++
++union intel_mid_dma_cfg_hi {
++ struct {
++ u32 fcmode:1; /*flow control mode = 1*/
++ u32 fifo_mode:1; /*FIFO mode select = 1*/
++ u32 protctl:3; /*protection control = 0*/
++ u32 rsvd:2;
++ u32 src_per:4; /*src hw HS interface*/
++ u32 dst_per:4; /*dstn hw HS interface*/
++ u32 reser2:17;
++ } cfgx;
++ u32 cfg_hi;
++};
++
++/**
++ * struct intel_mid_dma_chan - internal mid representation of a DMA channel
++ * @chan: dma_chan strcture represetation for mid chan
++ * @ch_regs: MMIO register space pointer to channel register
++ * @dma_base: MMIO register space DMA engine base pointer
++ * @ch_id: DMA channel id
++ * @lock: channel spinlock
++ * @completed: DMA cookie
++ * @active_list: current active descriptors
++ * @queue: current queued up descriptors
++ * @free_list: current free descriptors
++ * @slave: dma slave struture
++ * @descs_allocated: total number of decsiptors allocated
++ * @dma: dma device struture pointer
++ * @in_use: bool representing if ch is in use or not
++ */
++struct intel_mid_dma_chan {
++ struct dma_chan chan;
++ void __iomem *ch_regs;
++ void __iomem *dma_base;
++ int ch_id;
++ spinlock_t lock;
++ dma_cookie_t completed;
++ struct list_head active_list;
++ struct list_head queue;
++ struct list_head free_list;
++ struct intel_mid_dma_slave *slave;
++ unsigned int descs_allocated;
++ struct middma_device *dma;
++ bool in_use;
++};
++
++static inline struct intel_mid_dma_chan *to_intel_mid_dma_chan(
++ struct dma_chan *chan)
++{
++ return container_of(chan, struct intel_mid_dma_chan, chan);
++}
++
++/**
++ * struct middma_device - internal representation of a DMA device
++ * @pdev: PCI device
++ * @dma_base: MMIO register space pointer of DMA
++ * @dma_pool: for allocating DMA descriptors
++ * @common: embedded struct dma_device
++ * @tasklet: dma tasklet for processing interrupts
++ * @ch: per channel data
++ * @pci_id: DMA device PCI ID
++ * @intr_mask: Interrupt mask to be used
++ * @mask_reg: MMIO register for periphral mask
++ * @chan_base: Base ch index (read from driver data)
++ * @max_chan: max number of chs supported (from drv_data)
++ * @block_size: Block size of DMA transfer supported (from drv_data)
++ * @pimr_mask: MMIO register addr for periphral interrupt (from drv_data)
++ */
++struct middma_device {
++ struct pci_dev *pdev;
++ void __iomem *dma_base;
++ struct pci_pool *dma_pool;
++ struct dma_device common;
++ struct tasklet_struct tasklet;
++ struct intel_mid_dma_chan ch[MAX_CHAN];
++ unsigned int pci_id;
++ unsigned int intr_mask;
++ void __iomem *mask_reg;
++ int chan_base;
++ int max_chan;
++ int block_size;
++ unsigned int pimr_mask;
++};
++
++static inline struct middma_device *to_middma_device(struct dma_device *common)
++{
++ return container_of(common, struct middma_device, common);
++}
++
++struct intel_mid_dma_desc {
++ void __iomem *block; /*ch ptr*/
++ struct list_head desc_node;
++ struct dma_async_tx_descriptor txd;
++ size_t len;
++ dma_addr_t sar;
++ dma_addr_t dar;
++ u32 cfg_hi;
++ u32 cfg_lo;
++ u32 ctl_lo;
++ u32 ctl_hi;
++ dma_addr_t next;
++ enum dma_data_direction dirn;
++ enum dma_status status;
++ enum intel_mid_dma_width width; /*width of DMA txn*/
++ enum intel_mid_dma_mode cfg_mode; /*mode configuration*/
++
++};
++
++static inline int test_ch_en(void __iomem *dma, u32 ch_no)
++{
++ u32 en_reg = ioread32(dma + DMA_CHAN_EN);
++ return (en_reg >> ch_no) & 0x1;
++}
++
++static inline struct intel_mid_dma_desc *to_intel_mid_dma_desc
++ (struct dma_async_tx_descriptor *txd)
++{
++ return container_of(txd, struct intel_mid_dma_desc, txd);
++}
++#endif /*__INTEL_MID_DMAC_REGS_H__*/
+--- /dev/null
++++ b/drivers/gpio/max7315.h
+@@ -0,0 +1,82 @@
++/*
++ * max7315.h - GPIO expander MAX7315 driver header
++ *
++ * Copyright (C) 2010 Aava Mobile Oy
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++#include <linux/leds.h>
++#include <linux/workqueue.h>
++
++#define MAX7315_INPUT 0x00
++#define MAX7315_BLINK0 0x01
++#define MAX7315_DIRECTION 0x03
++#define MAX7315_BLINK1 0x09
++#define MAX7315_MG8 0x0E
++#define MAX7315_CONF 0x0F
++#define MAX7315_INTNS0 0x10
++#define MAX7315_INTNS1 0x11
++#define MAX7315_INTNS2 0x12
++#define MAX7315_INTNS3 0x13
++/* configuration register masks */
++#define MAX7315_CONF_BEN 0x01
++#define MAX7315_CONF_BFLIP 0x02
++#define MAX7315_CONF_GLINTNS 0x04
++#define MAX7315_CONF_INTEN 0x08
++#define MAX7315_CONF_INTOC1 0x10
++#define MAX7315_CONF_INTOC2 0x20
++#define MAX7315_CONF_RSRVD 0x40
++#define MAX7315_CONF_INTSTS 0x80
++/* led pin register mask */
++#define MAX7315_P2_MASK 0x04
++/* PWM enable mask */
++#define MAX7315_PWM_MASK 0xFF
++/* BLINK mask */
++#define MAX7315_BLINKMASK_P2 (1 << 2)
++/* period in ms */
++#define MAX7315_PERIOD_MIN 0
++#define MAX7315_PERIOD_MAX 1600
++/* duty cycle range: 0, 1/16, 2/16, ... 15/16 */
++#define MAX7315_DCYCLE_MIN 0
++#define MAX7315_DCYCLE_MAX 100
++#define MAX7315_BLINK_PRESET0 75
++#define MAX7315_BLINK_PRESET1 60
++#define MAX7315_BLINK_PRESET2 15
++#define MAX7315_BLINK_DELAY0 100
++#define MAX7315_BLINK_DELAY1 250
++#define MAX7315_BLINK_DELAY2 500
++#define MAX7315_DUTY_MAX 0xF
++
++/* XXX STATUS_MAX needs to be updated in case more modes are added XXX */
++
++#define MAX7315_LED_STATUS_MAX 3
++#define MAX7315_LEDS_MAX 8
++
++enum max7315_status {
++ MAX7315_LED_STATUS_OFF,
++ MAX7315_LED_STATUS_ON,
++ MAX7315_LED_STATUS_BLINK0,
++ MAX7315_LED_STATUS_BLINK1,
++};
++
++struct max7315_led_data {
++ u8 id;
++ const char *name;
++ enum max7315_status status;
++ struct led_classdev ldev;
++ struct i2c_client *client;
++ struct work_struct work;
++};
+--- a/drivers/gpio/pca953x.c
++++ b/drivers/gpio/pca953x.c
+@@ -24,6 +24,8 @@
+ #include <linux/of_gpio.h>
+ #endif
+
++#include "max7315.h"
++
+ #define PCA953X_INPUT 0
+ #define PCA953X_OUTPUT 1
+ #define PCA953X_INVERT 2
+@@ -31,6 +33,7 @@
+
+ #define PCA953X_GPIOS 0x00FF
+ #define PCA953X_INT 0x0100
++#define PCA953X_LED 0x0200
+
+ static const struct i2c_device_id pca953x_id[] = {
+ { "pca9534", 8 | PCA953X_INT, },
+@@ -47,7 +50,7 @@
+ { "max7310", 8, },
+ { "max7312", 16 | PCA953X_INT, },
+ { "max7313", 16 | PCA953X_INT, },
+- { "max7315", 8 | PCA953X_INT, },
++ { "max7315", 8 | PCA953X_INT | PCA953X_LED, },
+ { "pca6107", 8 | PCA953X_INT, },
+ { "tca6408", 8 | PCA953X_INT, },
+ { "tca6416", 16 | PCA953X_INT, },
+@@ -74,6 +77,12 @@
+ struct pca953x_platform_data *dyn_pdata;
+ struct gpio_chip gpio_chip;
+ const char *const *names;
++
++#ifdef CONFIG_LEDS_CLASS
++ struct max7315_led_data leds[MAX7315_LEDS_MAX];
++ struct work_struct work;
++ uint8_t leds_size;
++#endif
+ };
+
+ static int pca953x_write_reg(struct pca953x_chip *chip, int reg, uint16_t val)
+@@ -345,7 +354,7 @@
+
+ do {
+ level = __ffs(pending);
+- handle_nested_irq(level + chip->irq_base);
++ generic_handle_irq(level + chip->irq_base);
+
+ pending &= ~(1 << level);
+ } while (pending);
+@@ -360,7 +369,8 @@
+ struct pca953x_platform_data *pdata = client->dev.platform_data;
+ int ret;
+
+- if (pdata->irq_base && (id->driver_data & PCA953X_INT)) {
++ if (pdata->irq_base != -1
++ && (id->driver_data & PCA953X_INT)) {
+ int lvl;
+
+ ret = pca953x_read_reg(chip, PCA953X_INPUT,
+@@ -383,7 +393,6 @@
+ set_irq_chip_data(irq, chip);
+ set_irq_chip_and_handler(irq, &pca953x_irq_chip,
+ handle_edge_irq);
+- set_irq_nested_thread(irq, 1);
+ #ifdef CONFIG_ARM
+ set_irq_flags(irq, IRQF_VALID);
+ #else
+@@ -394,6 +403,7 @@
+ ret = request_threaded_irq(client->irq,
+ NULL,
+ pca953x_irq_handler,
++ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ dev_name(&client->dev), chip);
+ if (ret) {
+@@ -408,13 +418,13 @@
+ return 0;
+
+ out_failed:
+- chip->irq_base = 0;
++ chip->irq_base = -1;
+ return ret;
+ }
+
+ static void pca953x_irq_teardown(struct pca953x_chip *chip)
+ {
+- if (chip->irq_base)
++ if (chip->irq_base != -1)
+ free_irq(chip->client->irq, chip);
+ }
+ #else /* CONFIG_GPIO_PCA953X_IRQ */
+@@ -424,7 +434,7 @@
+ struct i2c_client *client = chip->client;
+ struct pca953x_platform_data *pdata = client->dev.platform_data;
+
+- if (pdata->irq_base && (id->driver_data & PCA953X_INT))
++ if (pdata->irq_base != -1 && (id->driver_data & PCA953X_INT))
+ dev_warn(&client->dev, "interrupt support not compiled in\n");
+
+ return 0;
+@@ -483,6 +493,189 @@
+ }
+ #endif
+
++
++#ifdef CONFIG_LEDS_CLASS
++
++/**
++ * Set the led status
++ *
++ * @chip: a pca953x_chip structure
++ * @status: one of MAX7315_LED_STATUS_OFF
++ * MAX7315_LED_STATUS_ON
++ * MAX7315_LED_STATUS_BLINK0
++ * MAX7315_LED_STATUS_BLINK1
++ */
++static int max7315_led_set(struct max7315_led_data *led, u8 status)
++{
++ return 0;
++}
++
++static void max7315_led_set_brightness(struct led_classdev *led_cdev,
++ enum led_brightness brightness)
++{
++ struct max7315_led_data *led =
++ container_of(led_cdev, struct max7315_led_data, ldev);
++
++ dev_dbg(&led->client->dev, "%s: %s, %d\n",
++ __func__, led_cdev->name, brightness);
++
++ led->status = brightness;
++ schedule_work(&led->work);
++}
++
++static enum led_brightness
++ max7315_led_get_brightness(struct led_classdev *led_cdev)
++{
++ struct max7315_led_data *led =
++ container_of(led_cdev, struct max7315_led_data, ldev);
++
++ return led->status;
++}
++
++static int max7315_led_set_blink(struct led_classdev *led_cdev,
++ unsigned long *delay_on,
++ unsigned long *delay_off)
++{
++ return 0;
++}
++
++static void max7315_led_work(struct work_struct *work)
++{
++ struct max7315_led_data *led;
++
++ led = container_of(work, struct max7315_led_data, work);
++ max7315_led_set(led, led->status);
++}
++
++static int max7315_teardown_leds(struct i2c_client *client,
++ unsigned gpio, unsigned ngpio, void *context)
++{
++ int i = 0;
++ struct pca953x_chip *chip = i2c_get_clientdata(client);
++ for (i = 0; i < chip->leds_size; i++) {
++ led_classdev_unregister(&chip->leds[i].ldev);
++ cancel_work_sync(&chip->leds[i].work);
++ }
++ return 0;
++}
++
++/* Initial settings for max7315 leds */
++static int max7315_setup_leds(struct i2c_client *client,
++ unsigned gpio, unsigned ngpio, void *context)
++{
++ int i = 0, err = 0;
++ u8 conf_reg = 0;
++ uint16_t port_reg = 0;
++ struct pca953x_chip *chip = i2c_get_clientdata(client);
++
++ if (!chip) {
++ dev_err(&client->dev, "%s no client chip data\n", __func__);
++ goto exit;
++ }
++ /* Set master intensity (between 0x10-0xFF) */
++ err = pca953x_write_reg(chip, MAX7315_MG8, MAX7315_PWM_MASK);
++ if (err < 0) {
++ dev_err(&client->dev, "%s couldn't enable PWM\n", __func__);
++ goto exit;
++ }
++ /* Set BLINK1 output */
++ err = pca953x_read_reg(chip, MAX7315_BLINK1, &port_reg);
++ if (err) {
++ dev_err(&client->dev, "%s couldn't read BLINK1\n", __func__);
++ return err;
++ }
++ port_reg |= MAX7315_BLINKMASK_P2;
++ err = pca953x_write_reg(chip, MAX7315_BLINK1, port_reg);
++ if (err) {
++ dev_err(&client->dev, "%s couldn't write BLINK1\n", __func__);
++ return err;
++ }
++
++ /* Set initial configuration to leds config reg:
++ * -no blinking nor blink flip
++ * -no global intensity (each port configured separately)
++ * -interrupt enable is activated (in INT/O8)
++ * -interrupt status is logic 1 until interrupt occurs
++ * -conf: 10111000
++ * -ports 0,1,4,5,6 inputs (+3,7 not connected)
++ * -ports 2,8 outputs
++ */
++ conf_reg |= MAX7315_CONF_INTEN; /*bit 3*/
++ conf_reg |= MAX7315_CONF_INTOC1; /*bit 4*/
++ conf_reg |= MAX7315_CONF_INTOC2; /*bit 5*/
++ conf_reg |= MAX7315_CONF_INTSTS; /*bit 7*/
++ conf_reg &= ~MAX7315_CONF_BEN; /*bit 0*/
++ conf_reg &= ~MAX7315_CONF_BFLIP; /*bit 1*/
++ conf_reg &= ~MAX7315_CONF_GLINTNS; /*bit 2*/
++ conf_reg &= ~MAX7315_CONF_RSRVD; /*bit 6*/
++ err = pca953x_write_reg(chip, MAX7315_CONF, conf_reg);
++ chip->leds_size = 1;
++
++
++ for (i = 0; i < chip->leds_size; i++) {
++ struct max7315_led_data *led = &chip->leds[i];
++ led->client = client;
++ led->id = 2;
++ led->name = "p2";
++
++ led->status = MAX7315_LED_STATUS_OFF;
++ led->ldev.name = led->name;
++ led->ldev.max_brightness = LED_FULL;
++ led->ldev.brightness_set = max7315_led_set_brightness;
++ led->ldev.blink_set = max7315_led_set_blink;
++ led->ldev.brightness_get = max7315_led_get_brightness;
++ led->ldev.flags = LED_CORE_SUSPENDRESUME;
++
++ INIT_WORK(&led->work, max7315_led_work);
++ err = led_classdev_register(&client->dev, &led->ldev);
++ if (err < 0) {
++ dev_err(&client->dev,
++ "couldn't register LED %s\n",
++ led->ldev.name);
++ goto exit;
++ }
++ /* to expose the default value to userspace */
++ led->ldev.brightness = led->status;
++
++ /* Set the default led status */
++ err = max7315_led_set(led, led->status);
++ if (err < 0) {
++ dev_err(&client->dev,
++ "%s couldn't set STATUS %d\n",
++ led->ldev.name, led->status);
++ goto exit;
++ }
++
++ }
++
++ /* Set direction and value through gpiolib */
++ if (gpio_request(chip->gpio_chip.base+2, "max7315")) {
++ pr_debug("Requesting GPIO%d failed\n",
++ chip->gpio_chip.base+2);
++ return -EIO;
++ }
++ if (gpio_direction_output(chip->gpio_chip.base+2, 0)) {
++ pr_debug("Setting GPIO%d to out failed\n",
++ chip->gpio_chip.base+2);
++ gpio_free(chip->gpio_chip.base+2);
++ return -EIO;
++ }
++ gpio_free(chip->gpio_chip.base+2);
++
++ return 0;
++
++exit:
++
++ if (i > 0)
++ for (i = i - 1; i >= 0; i--) {
++ led_classdev_unregister(&chip->leds[i].ldev);
++ cancel_work_sync(&chip->leds[i].work);
++ }
++ return err;
++}
++
++#endif
++
+ static int __devinit pca953x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+ {
+@@ -542,6 +735,13 @@
+ if (ret)
+ goto out_failed;
+
++#ifdef CONFIG_LEDS_CLASS
++ if (id->driver_data & PCA953X_LED) {
++ pdata->setup = max7315_setup_leds;
++ pdata->teardown = max7315_teardown_leds;
++ }
++#endif
++ i2c_set_clientdata(client, chip);
+ if (pdata->setup) {
+ ret = pdata->setup(client, chip->gpio_chip.base,
+ chip->gpio_chip.ngpio, pdata->context);
+@@ -549,7 +749,6 @@
+ dev_warn(&client->dev, "setup failed, %d\n", ret);
+ }
+
+- i2c_set_clientdata(client, chip);
+ return 0;
+
+ out_failed:
+--- a/drivers/gpu/drm/drm_crtc.c
++++ b/drivers/gpu/drm/drm_crtc.c
+@@ -161,6 +161,7 @@
+ { DRM_MODE_CONNECTOR_HDMIB, "HDMI Type B", 0 },
+ { DRM_MODE_CONNECTOR_TV, "TV", 0 },
+ { DRM_MODE_CONNECTOR_eDP, "Embedded DisplayPort", 0 },
++ { DRM_MODE_CONNECTOR_MIPI, "MIPI", 0},
+ };
+
+ static struct drm_prop_enum_list drm_encoder_enum_list[] =
+@@ -169,6 +170,7 @@
+ { DRM_MODE_ENCODER_TMDS, "TMDS" },
+ { DRM_MODE_ENCODER_LVDS, "LVDS" },
+ { DRM_MODE_ENCODER_TVDAC, "TV" },
++ { DRM_MODE_ENCODER_MIPI, "MIPI"},
+ };
+
+ char *drm_get_encoder_name(struct drm_encoder *encoder)
+--- a/drivers/hwmon/Kconfig
++++ b/drivers/hwmon/Kconfig
+@@ -1118,6 +1118,20 @@
+ help
+ Support for the A/D converter on MC13783 PMIC.
+
++config SENSORS_MRST_ANALOG_ACCEL
++ tristate "Moorestown Analog Accelerometer"
++ depends on INTEL_SCU_IPC
++ help
++ If you say yes here you get support for the analog accelerometer
++ on the Moorestown platform. x y x data can be accessed via sysfs.
++
++config SENSORS_THERMAL_MFLD
++ tristate "Thermal driver for Intel Medfield platform"
++ depends on INTEL_SCU_IPC
++ help
++ Say Y here to enable thermal driver on Intel Medfield
++ platform.
++
+ if ACPI
+
+ comment "ACPI drivers"
+--- a/drivers/hwmon/Makefile
++++ b/drivers/hwmon/Makefile
+@@ -80,6 +80,7 @@
+ obj-$(CONFIG_SENSORS_MAX1619) += max1619.o
+ obj-$(CONFIG_SENSORS_MAX6650) += max6650.o
+ obj-$(CONFIG_SENSORS_MC13783_ADC)+= mc13783-adc.o
++obj-$(CONFIG_SENSORS_MRST_ANALOG_ACCEL) += mrst_analog_accel.o
+ obj-$(CONFIG_SENSORS_PC87360) += pc87360.o
+ obj-$(CONFIG_SENSORS_PC87427) += pc87427.o
+ obj-$(CONFIG_SENSORS_PCF8591) += pcf8591.o
+@@ -103,6 +104,7 @@
+ obj-$(CONFIG_SENSORS_W83L786NG) += w83l786ng.o
+ obj-$(CONFIG_SENSORS_WM831X) += wm831x-hwmon.o
+ obj-$(CONFIG_SENSORS_WM8350) += wm8350-hwmon.o
++obj-$(CONFIG_SENSORS_THERMAL_MFLD) += intel_mid_thermal.o
+
+ ifeq ($(CONFIG_HWMON_DEBUG_CHIP),y)
+ EXTRA_CFLAGS += -DDEBUG
+--- a/drivers/hwmon/emc1403.c
++++ b/drivers/hwmon/emc1403.c
+@@ -89,6 +89,35 @@
+ return count;
+ }
+
++static ssize_t store_bit(struct device *dev,
++ struct device_attribute *attr, const char *buf, size_t count)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++ struct thermal_data *data = i2c_get_clientdata(client);
++ struct sensor_device_attribute_2 *sda = to_sensor_dev_attr_2(attr);
++ unsigned long val;
++ int retval;
++
++ if (strict_strtoul(buf, 10, &val))
++ return -EINVAL;
++
++ mutex_lock(&data->mutex);
++ retval = i2c_smbus_read_byte_data(client, sda->nr);
++ if (retval < 0)
++ goto fail;
++
++ retval &= ~sda->index;
++ if (val)
++ retval |= sda->index;
++
++ retval = i2c_smbus_write_byte_data(client, sda->index, retval);
++ if (retval == 0)
++ retval = count;
++fail:
++ mutex_unlock(&data->mutex);
++ return retval;
++}
++
+ static ssize_t show_hyst(struct device *dev,
+ struct device_attribute *attr, char *buf)
+ {
+@@ -200,6 +229,9 @@
+ static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO | S_IWUSR,
+ show_hyst, store_hyst, 0x1A);
+
++static SENSOR_DEVICE_ATTR_2(power_state, S_IRUGO | S_IWUSR,
++ show_bit, store_bit, 0x03, 0x40);
++
+ static struct attribute *mid_att_thermal[] = {
+ &sensor_dev_attr_temp1_min.dev_attr.attr,
+ &sensor_dev_attr_temp1_max.dev_attr.attr,
+@@ -225,6 +257,7 @@
+ &sensor_dev_attr_temp3_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp3_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr,
++ &sensor_dev_attr_power_state.dev_attr.attr,
+ NULL
+ };
+
+--- /dev/null
++++ b/drivers/hwmon/intel_mid_thermal.c
+@@ -0,0 +1,808 @@
++/*
++ * intel_mid_thermal.c - Intel MID platform thermal driver
++ *
++ *
++ * Copyright (C) 2010 Intel Corporation
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ * Author: Ananth Krishna <ananth.krishna.r@intel.com>
++ * Author: Durgadoss <durgadoss.r@intel.com>
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/err.h>
++#include <linux/jiffies.h>
++#include <linux/timer.h>
++#include <linux/param.h>
++#include <linux/workqueue.h>
++#include <linux/device.h>
++#include <linux/platform_device.h>
++#include <linux/slab.h>
++#include <linux/pm.h>
++#include <linux/hwmon.h>
++#include <linux/hwmon-sysfs.h>
++#include <linux/hwmon-vid.h>
++
++#include <asm/intel_scu_ipc.h>
++
++
++#define DRIVER_NAME "msic_sensor"
++/*********************************************************************
++ * Generic defines
++ *********************************************************************/
++/* ADC1 - thermal registers */
++#define MSIC_THERM_ADC1CNTL1 0x1C0
++#define MSIC_ADC_ENBL 0x18
++#define MSIC_THERM_ADC1CNTL3 0x1C2
++#define MSIC_ADCTHERM_ENBL 0x04
++#define MSIC_ADCRRDATA_ENBL 0x05
++#define MSIC_CHANL_MASK_VAL 0x0F
++
++#define MSIC_STOPBIT_MASK 16
++#define MSIC_ADCTHERM_MASK 4
++#define ADC_CHANLS_MAX 15 /*no of adc channels*/
++#define ADC_LOOP_MAX (ADC_CHANLS_MAX - 3)
++
++#define MSIC_VAUDA 0x0DB
++#define MSIC_VAUDA_VAL 0xFF
++
++/* ADC channel code values */
++#define SKIN_SENSOR0_CODE 0x08
++#define SKIN_SENSOR1_CODE 0x09
++#define SYS_SENSOR_CODE 0x1A
++
++#define SKIN_THERM_SENSOR0 0
++#define SKIN_THERM_SENSOR1 1
++#define SYS_THERM_SENSOR2 2
++
++/* ADC code range */
++#define ADC_MAX 977
++#define ADC_MIN 162
++#define ADC_VAL1 887
++#define ADC_VAL2 720
++#define ADC_VAL3 508
++#define ADC_VAL4 315
++
++/* Event id */
++#define HIGH_EVENT 1
++#define LOW_EVENT 2
++#define THERM_EVENT 3
++#define FAULT_EVENT 4
++
++/* Default temperature limits
++ * in milli degree celsius */
++#define SKIN_SENSOR_MIN_TEMP 0
++#define SKIN_SENSOR_MAX_TEMP 35000
++#define SKIN_SENSOR_CRIT_TEMP 45000
++#define SKIN_SENSOR_THROT_START 30000
++
++#define SYS_SENSOR_MIN_TEMP 0
++#define SYS_SENSOR_MAX_TEMP 45000
++#define SYS_SENSOR_CRIT_TEMP 55000
++#define SYS_SENSOR_THROT_START 30000
++
++/* ADC base addresses */
++#define ADC_CHNL_START_ADDR 0x1C5 /* increments by 1 */
++#define ADC_DATA_START_ADDR 0x1D4 /* increments by 2 */
++
++static int channel_index;
++
++/* Generic structure for sensor */
++struct sensor {
++ int curr_temp;
++ unsigned int min_temp;
++ unsigned int max_temp;
++ unsigned int crit_temp;
++ unsigned int therm_throt_temp;
++};
++
++/*********************************************************************
++ * Thermal properties
++ *********************************************************************/
++struct thermal_module_info {
++ struct platform_device *pdev;
++ struct device *dev;
++ /* thermal parameters */
++ struct sensor platfrm_sens[3];
++ struct workqueue_struct *therm_monitor_wqueue;
++ struct delayed_work thermal_monitor;
++};
++
++/**
++ * mid_therm_event - send thermal notification event
++ * @event_id: indicates criticality of thershold exceeded
++ * @sensor_id: indicates the sensor
++ * @curr_temp: current tempearture of sensor
++ * Context: can sleep
++ *
++ * sends thermal notofication to the thermal management
++ * on exceeding the defined thershold
++ */
++static void mid_therm_event(int event_id, int sensor_id, int curr_temp)
++{
++ /* for future enabling*/
++ printk(KERN_INFO "called:%s", __func__);
++}
++
++/**
++ * notify_status : Notifies the thermal management when threshold exceeds
++ * @sensor: sensor id
++ * @therm : thermal_module_info structure
++ * Context: can sleep
++ */
++static void notify_status(int sensor, struct thermal_module_info *therm)
++{
++ int temp = therm->platfrm_sens[sensor].curr_temp;
++ if (temp >= therm->platfrm_sens[sensor].crit_temp) {
++ dev_warn(&therm->pdev->dev,
++ "%s:sensor %d: temperature is crit:%d",
++ __func__, sensor, temp);
++ mid_therm_event(THERM_EVENT, sensor, temp);
++ } else if (temp >= therm->platfrm_sens[sensor].max_temp) {
++ dev_warn(&therm->pdev->dev,
++ "%s:sensor %d: temperature is max:%d",
++ __func__, sensor, temp);
++ mid_therm_event(HIGH_EVENT, sensor, temp);
++ } else if (temp >= therm->platfrm_sens[sensor].min_temp) {
++ if (temp > therm->platfrm_sens[sensor].therm_throt_temp)
++ mid_therm_event(LOW_EVENT, sensor, temp);
++ }
++}
++
++
++/**
++ * adc_to_temp - converts the ADC code to temperature in C
++ * @adc_val: the adc_val needs to be converted
++ *
++ * Linear approximation is used to covert the adc value into temperature.
++ * This technique is used to avoid very long look-up table to get
++ * the appropriate temp value from ADC value.
++ * The adc code vs sensor temp curve is split into five parts
++ * to achieve very close approximate temp value with less than
++ * 0.5C error
++ */
++static int adc_to_temp(uint16_t adc_val)
++{
++ int temp;
++ if (adc_val > ADC_MAX || adc_val < ADC_MIN)
++ return -ERANGE;
++ /* linear approximation */
++ if (adc_val > ADC_VAL1) /* -20 to 0C */
++ temp = 177 - (adc_val/5);
++ else if (adc_val <= ADC_VAL1 && adc_val > ADC_VAL2) /* 0C to 20C */
++ temp = 111 - (adc_val/8);
++ else if (adc_val <= ADC_VAL2 && adc_val > ADC_VAL3) /* 20C to 40C */
++ temp = 92 - (adc_val/10);
++ else if (adc_val <= ADC_VAL3 && adc_val > ADC_VAL4) /* 40C to 60C */
++ temp = 91 - (adc_val/10);
++ else
++ temp = 112 - (adc_val/6); /* 60C to 85C */
++
++ return temp;
++}
++/**
++ * mid_read_temp - read sensors for temperature
++ * @sensor: sensor to be read
++ * @therm: thermal module info structure
++ * Context: can sleep
++ *
++ * enable and ADC conversion and read the adc value on channel
++ * and convert the adc value to real time tempearture
++ */
++static int mid_read_temp(int sensor,
++ struct thermal_module_info *therm)
++{
++ uint16_t adc_val, addr;
++ uint8_t data = 0;
++ int ret;
++
++ /* enable the msic for conversion before reading */
++ ret = intel_scu_ipc_iowrite8(MSIC_THERM_ADC1CNTL3, MSIC_ADCRRDATA_ENBL);
++ if (ret) {
++ dev_warn(&therm->pdev->dev, "%s:ipc write failed\n",
++ __func__);
++ return ret;
++ }
++
++ /* re-toggle the RRDATARD bit
++ * temporary workaround */
++ ret = intel_scu_ipc_iowrite8(MSIC_THERM_ADC1CNTL3, MSIC_ADCTHERM_ENBL);
++ if (ret) {
++ dev_warn(&therm->pdev->dev, "%s:ipc write failed\n",
++ __func__);
++ return ret;
++ }
++ /* reading the higher bits of data */
++ addr = ADC_DATA_START_ADDR+2*(channel_index+sensor);
++ ret = intel_scu_ipc_ioread8(addr, &data);
++ if (ret) {
++ dev_warn(&therm->pdev->dev, "%s:ipc read failed\n",
++ __func__);
++ return ret;
++ }
++ /* shifting bits to accomodate the lower two data bits */
++ adc_val = data << 2;
++ addr++;
++ ret = intel_scu_ipc_ioread8(addr, &data);/* reading lower bits */
++ if (ret) {
++ dev_warn(&therm->pdev->dev, "%s:ipc read failed\n",
++ __func__);
++ return ret;
++ }
++ /* adding lower two bits to the higher bits */
++ data &= 03;
++ adc_val += data;
++ ret = adc_to_temp(adc_val);
++ if (ret == -ERANGE) {
++ dev_err(&therm->pdev->dev,
++ "intel_mid_thermal: adc code out of range\n");
++ return ret;
++ }
++ /* convert tempertaure in celsius to milli degree celsius */
++ therm->platfrm_sens[sensor].curr_temp = ret * 1000;
++ return 0;
++}
++
++/**
++ * platform_thermal_monitor - monitoring the thermal sensors
++ * @work: work structure
++ * Context: can sleep
++ *
++ * monitors the thermal sensors on the platform and
++ * notifies the thermal management if thershold is exceeded.
++ */
++static void platform_thermal_monitor(struct work_struct *work)
++{
++ int ret;
++ int i;
++ struct thermal_module_info *therm = container_of(work,
++ struct thermal_module_info, thermal_monitor.work);
++ for (i = 0; i < 3; i++) {
++ ret = mid_read_temp(i, therm);
++ if (ret)
++ dev_err(&therm->pdev->dev,
++ "intel_mid_thermal:temperature \
++ conversion failed: %s", __func__);
++ else
++ notify_status(i, therm);
++ }
++ queue_delayed_work(therm->therm_monitor_wqueue,
++ &therm->thermal_monitor,
++ round_jiffies_relative(HZ * 50));
++ return;
++}
++
++
++static ssize_t show_temp_auto_offset(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct sensor_device_attribute_2 *s_attr = to_sensor_dev_attr_2(attr);
++ struct thermal_module_info *therm = dev_get_drvdata(dev);
++ int ret = 0;
++ int sensor_index = s_attr->index;
++
++ switch (s_attr->nr) {
++ case 0:
++ ret = therm->platfrm_sens[sensor_index].min_temp;
++ break;
++ case 1:
++ ret = therm->platfrm_sens[sensor_index].max_temp;
++ break;
++ case 2:
++ ret = therm->platfrm_sens[sensor_index].crit_temp;
++ break;
++ default:
++ WARN_ON(1);
++ }
++ return sprintf(buf, "%d\n", ret);
++}
++
++static ssize_t store_temp_auto_offset(struct device *dev,
++ struct device_attribute *attr, const char *buf, size_t count)
++{
++ struct sensor_device_attribute_2 *s_attr = to_sensor_dev_attr_2(attr);
++ struct thermal_module_info *therm = dev_get_drvdata(dev);
++ unsigned long val;
++ int sensor_index = s_attr->index;
++ if (therm == NULL) {
++ dev_err(&therm->pdev->dev,
++ "intel_mid_thermal:using NULL pointer\n");
++ return -EINVAL;
++ }
++ if (strict_strtoul(buf, 10, &val)) {
++ return -EINVAL;
++ } else {
++ switch (s_attr->nr) {
++ case 0:
++ therm->platfrm_sens[sensor_index].min_temp = val;
++ break;
++ case 1:
++ therm->platfrm_sens[sensor_index].max_temp = val;
++ break;
++ case 2:
++ therm->platfrm_sens[sensor_index].crit_temp = val;
++ break;
++ default:
++ WARN_ON(1);
++ }
++ return count;
++ }
++}
++static ssize_t show_curr_temp(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ int ret;
++ struct thermal_module_info *therm = dev_get_drvdata(dev);
++ struct sensor_device_attribute_2 *s_attr = to_sensor_dev_attr_2(attr);
++ int sensor_index = s_attr->index;
++ ret = mid_read_temp(sensor_index, therm);
++ if (ret) {
++ dev_err(&therm->pdev->dev,
++ "intel_mid_thermal: %s: failed to read curr temp",
++ __func__);
++ return ret;
++ }
++ return sprintf(buf, "%d\n",
++ therm->platfrm_sens[sensor_index].curr_temp);
++}
++
++static SENSOR_DEVICE_ATTR_2(temp1_min, S_IRUGO | S_IWUSR,
++ show_temp_auto_offset, store_temp_auto_offset, 0, 0);
++static SENSOR_DEVICE_ATTR_2(temp1_max, S_IRUGO | S_IWUSR,
++ show_temp_auto_offset, store_temp_auto_offset, 1, 0);
++static SENSOR_DEVICE_ATTR_2(temp1_crit, S_IRUGO | S_IWUSR,
++ show_temp_auto_offset, store_temp_auto_offset, 2, 0);
++static SENSOR_DEVICE_ATTR_2(temp1_curr, S_IRUGO, show_curr_temp,
++ NULL, 3, 0);
++
++static SENSOR_DEVICE_ATTR_2(temp2_min, S_IRUGO | S_IWUSR,
++ show_temp_auto_offset, store_temp_auto_offset, 0, 1);
++static SENSOR_DEVICE_ATTR_2(temp2_max, S_IRUGO | S_IWUSR,
++ show_temp_auto_offset, store_temp_auto_offset, 1, 1);
++static SENSOR_DEVICE_ATTR_2(temp2_crit, S_IRUGO | S_IWUSR,
++ show_temp_auto_offset, store_temp_auto_offset, 2, 1);
++static SENSOR_DEVICE_ATTR_2(temp2_curr, S_IRUGO, show_curr_temp,
++ NULL, 3, 1);
++
++static SENSOR_DEVICE_ATTR_2(temp3_min, S_IRUGO | S_IWUSR,
++ show_temp_auto_offset, store_temp_auto_offset, 0, 2);
++static SENSOR_DEVICE_ATTR_2(temp3_max, S_IRUGO | S_IWUSR,
++ show_temp_auto_offset, store_temp_auto_offset, 1, 2);
++static SENSOR_DEVICE_ATTR_2(temp3_crit, S_IRUGO | S_IWUSR,
++ show_temp_auto_offset, store_temp_auto_offset, 2, 2);
++static SENSOR_DEVICE_ATTR_2(temp3_curr, S_IRUGO, show_curr_temp,
++ NULL, 3, 2);
++
++static struct attribute *mid_att_thermal[] = {
++ &sensor_dev_attr_temp1_min.dev_attr.attr,
++ &sensor_dev_attr_temp1_max.dev_attr.attr,
++ &sensor_dev_attr_temp1_crit.dev_attr.attr,
++ &sensor_dev_attr_temp1_curr.dev_attr.attr,
++ &sensor_dev_attr_temp2_min.dev_attr.attr,
++ &sensor_dev_attr_temp2_max.dev_attr.attr,
++ &sensor_dev_attr_temp2_crit.dev_attr.attr,
++ &sensor_dev_attr_temp2_curr.dev_attr.attr,
++ &sensor_dev_attr_temp3_min.dev_attr.attr,
++ &sensor_dev_attr_temp3_max.dev_attr.attr,
++ &sensor_dev_attr_temp3_crit.dev_attr.attr,
++ &sensor_dev_attr_temp3_curr.dev_attr.attr,
++ NULL
++};
++
++static struct attribute_group mid_thermal_gr = {
++ .name = "mid_thermal",
++ .attrs = mid_att_thermal
++};
++
++/**
++ * set_up_therm_chnl - to set thermal for conversion
++ * @base_addr: index of free msic adc channel
++ * @therm: struct thermal module info
++ * Context: can sleep
++ *
++ * To set up the adc for reading thermistor
++ * and converting the same into actual temp value
++ * on the platform
++ */
++static int set_up_therm_chnl(u16 base_addr,
++ struct thermal_module_info *therm)
++{
++ int ret;
++ /* enabling the SKINTHERM0 channel */
++ ret = intel_scu_ipc_iowrite8(base_addr, SKIN_SENSOR0_CODE);
++ if (ret) {
++ dev_warn(&therm->pdev->dev,
++ "%s:enabling skin therm sensor0 failed\n", __func__);
++ return ret;
++ }
++ /* enabling the SKINTHERM1 channel */
++ ret = intel_scu_ipc_iowrite8(base_addr + 1, SKIN_SENSOR1_CODE);
++ if (ret) {
++ dev_warn(&therm->pdev->dev,
++ "%s:enabling skin therm sensor1 failed\n", __func__);
++ return ret;
++ }
++ /* enabling the SYSTHERM2 channel */
++ ret = intel_scu_ipc_iowrite8(base_addr + 2, SYS_SENSOR_CODE);
++ if (ret) {
++ dev_warn(&therm->pdev->dev,
++ "%s:enabling sys therm sensor failed\n", __func__);
++ return ret;
++ }
++ /* enabling the VAUDA line
++ * this is a temporary workaround for MSIC issue */
++ ret = intel_scu_ipc_iowrite8(MSIC_VAUDA, MSIC_VAUDA_VAL);
++ if (ret) {
++ dev_warn(&therm->pdev->dev,
++ "%s:VAUDA:ipc write failed\n", __func__);
++ return ret;
++ }
++ ret = intel_scu_ipc_iowrite8(MSIC_THERM_ADC1CNTL1, MSIC_ADC_ENBL);
++ if (ret)
++ dev_warn(&therm->pdev->dev,
++ "%s:ADC CNTRL1 enabling failed\n", __func__);
++ else
++ dev_info(&therm->pdev->dev,
++ "%s:therm channel set up successfull\n", __func__);
++ return ret;
++}
++/*
++ * reset_stopbit - sets the stop bit to 0 on the given channel
++ * @addr: address of the channel
++ */
++static int reset_stopbit(uint16_t addr)
++{
++ int ret;
++ uint8_t data;
++ ret = intel_scu_ipc_ioread8(addr, &data);
++ if (ret)
++ return ret;
++ data &= 0xEF; /*setting the stop bit to zero*/
++ ret = intel_scu_ipc_iowrite8(addr, data);
++ return ret;
++}
++
++/*
++ * find_free_channel - finds an empty channel for conversion
++ * @therm: struct thermal module info
++ * Context: can sleep
++ *
++ * If adc is not enabled then start using 0th channel
++ * itself. Otherwise find an empty channel by looking for
++ * one in which the stopbit is set to 1.
++ * returns the base address if succeeds,-EINVAL otherwise
++ */
++static int find_free_channel(struct thermal_module_info *therm)
++{
++ int ret;
++ int i;
++ uint8_t data;
++
++ /* check whether ADC is enabled */
++ ret = intel_scu_ipc_ioread8(MSIC_THERM_ADC1CNTL1, &data);
++ if (ret) {
++ dev_warn(&therm->pdev->dev, "%s:ipc read failed\n", __func__);
++ return ret;
++ }
++ if ((data & 0x10) == 0) {
++ data = data | 0x10; /*enable ADC */
++ ret = intel_scu_ipc_iowrite8(MSIC_THERM_ADC1CNTL1, data);
++ if (ret) {
++ dev_warn(&therm->pdev->dev, "%s:ipc write failed\n",
++ __func__);
++ return ret;
++ }
++ /* reset stop bit on the 14th channel */
++ ret = reset_stopbit(ADC_CHNL_START_ADDR + 14);
++ if (ret) {
++ dev_warn(&therm->pdev->dev,
++ "ipc r/w failed in reset stop bit\n");
++ return ret;
++ }
++ return 0;
++ }
++ /* ADC already enabled */
++ /* Looping for empty channel */
++ for (i = 0; i < ADC_CHANLS_MAX; i++) {
++ ret = intel_scu_ipc_ioread8(ADC_CHNL_START_ADDR + i, &data);
++ if (ret) {
++ dev_warn(&therm->pdev->dev, "%s:ipc read failed\n",
++ __func__);
++ return ret;
++ }
++ if (data & MSIC_STOPBIT_MASK) {
++ ret = i;
++ break;
++ }
++ }
++ if (ret > ADC_LOOP_MAX) {
++ dev_warn(&therm->pdev->dev,
++ "%s:Cannot set up adc, no channels free\n",
++ __func__);
++ return -EINVAL;
++ }
++ return ret;
++}
++
++/*
++ * mid_initialize_adc - initializing the adc
++ * @therm: struct thermal module info
++ * Context: can sleep
++ *
++ * To initialize the adc for reading thermistor
++ * and converting the same into actual temp value
++ * on the platform
++ */
++static int mid_initialize_adc(struct thermal_module_info *therm)
++{
++ u8 data = 0;
++ int ret;
++ int offset;
++
++ /* ensure that therm conversion adctherm is disabled before we
++ * initialize the adc and map the channels
++ */
++ ret = intel_scu_ipc_ioread8(MSIC_THERM_ADC1CNTL3, &data);
++ if (ret) {
++ dev_warn(&therm->pdev->dev, "%s:ipc read failed\n", __func__);
++ return ret;
++ }
++
++ if (data & MSIC_ADCTHERM_MASK)
++ dev_warn(&therm->pdev->dev, "%s:ADCTHERM already set\n",
++ __func__);
++
++ ret = find_free_channel(therm);
++ if (ret == -EINVAL)
++ return ret;
++
++ /* assign free channel index to global variable*/
++ channel_index = ret;
++
++ offset = (ret == 0 || ret == ADC_LOOP_MAX) ? 0 : 1;
++
++ /* base address of the free channel*/
++ ret = set_up_therm_chnl(ADC_CHNL_START_ADDR + channel_index + offset,
++ therm);
++ if (ret)
++ return ret;
++
++ if (offset) { /* no need to reset for channels 0 and 12 */
++ ret = reset_stopbit(ADC_CHNL_START_ADDR + channel_index);
++ if (ret) {
++ dev_err(&therm->pdev->dev,
++ "%s:intel_mid_thermal:ipc r/w failed", __func__);
++ return ret;
++ }
++ }
++ dev_info(&therm->pdev->dev,
++ "intel_mid_thermal:adc initialization successful\n");
++ return ret;
++}
++
++/**
++ * mid_set_default_temp_range - set default temp range
++ * @therm: thermal module info structure
++ * Context: can sleep
++ *
++ * mid set default thermal range sets the initial temp thersholds
++ * and can be reset by the thermal management solution running
++ * on the platform
++ */
++static void mid_set_default_temp_range(struct thermal_module_info *therm)
++{
++ int i;
++ /* setting default temp limits for skin thermal sensors */
++ for (i = 0; i < 2; i++) {
++ therm->platfrm_sens[i].min_temp = SKIN_SENSOR_MIN_TEMP;
++ therm->platfrm_sens[i].max_temp = SKIN_SENSOR_MAX_TEMP;
++ therm->platfrm_sens[i].crit_temp = SKIN_SENSOR_CRIT_TEMP;
++ therm->platfrm_sens[i].therm_throt_temp =
++ SKIN_SENSOR_THROT_START;
++ }
++
++ /* thresholds vary for system thermal sensor */
++ therm->platfrm_sens[2].min_temp = SYS_SENSOR_MIN_TEMP;
++ therm->platfrm_sens[2].max_temp = SYS_SENSOR_MAX_TEMP;
++ therm->platfrm_sens[2].crit_temp = SYS_SENSOR_CRIT_TEMP;
++ therm->platfrm_sens[2].therm_throt_temp = SYS_SENSOR_THROT_START;
++}
++
++/**
++ * mid_thermal_resume - resume routine
++ * @dev: platform device structure
++ * Context: can sleep
++ *
++ * mid thermal resume re-initialize the thermal monitoring
++ * of system as well as skin thermal sensors
++ */
++static int mid_thermal_resume(struct platform_device *dev)
++{
++ struct thermal_module_info *therm = platform_get_drvdata(dev);
++ if (therm) {
++ queue_delayed_work(therm->therm_monitor_wqueue,
++ &therm->thermal_monitor,
++ round_jiffies_relative(HZ * 1));
++ }
++ return 0;
++}
++
++/**
++ * mid_thermal_suspend - suspend routine
++ * @dev: platform device structure
++ * Context: can sleep
++ *
++ * mid thermal suspend implements the suspend functionality
++ * flushes the workqueue enabled for thermal monitoring
++ */
++static int mid_thermal_suspend(struct platform_device *dev, pm_message_t mesg)
++{
++ struct thermal_module_info *therm = platform_get_drvdata(dev);
++ if (therm) {
++ cancel_rearming_delayed_workqueue(therm->therm_monitor_wqueue,
++ &therm->thermal_monitor);
++ flush_scheduled_work();
++ }
++ return 0;
++}
++
++/**
++ * mid_thermal_probe - mfld thermal initialize
++ * @dev: platform device structure
++ * Context: can sleep
++ *
++ * mid thermal probe initialize its internal data structue
++ * and other infrastructure components for it to work
++ * as expected
++ */
++static int mid_thermal_probe(struct platform_device *dev)
++{
++ int ret = 0;
++ struct thermal_module_info *therm = NULL;
++
++ therm = kzalloc(sizeof(struct thermal_module_info), GFP_KERNEL);
++
++ if (therm == NULL) {
++ dev_err(&dev->dev, "%s: Memory allocation failed",
++ __func__);
++ return -ENOMEM;
++ }
++ therm->pdev = dev;
++ platform_set_drvdata(dev, therm);
++ /* creating a sysfs group with mid thermal attributes */
++ ret = sysfs_create_group(&dev->dev.kobj, &mid_thermal_gr);
++ if (ret) {
++ dev_err(&dev->dev, "%s: sysfs create group failed!\n",
++ __func__);
++ goto thermal_error1;
++ }
++ /* Registering with hwmon class */
++ therm->dev = hwmon_device_register(&dev->dev);
++ if (IS_ERR(therm->dev)) {
++ ret = PTR_ERR(therm->dev);
++ therm->dev = NULL;
++ dev_err(&dev->dev, "%s:Register hwmon dev Failed\n",
++ __func__);
++ goto thermal_error2;
++ }
++ /* initializing the workqueue required to poll the sensors */
++ INIT_DELAYED_WORK(&therm->thermal_monitor,
++ platform_thermal_monitor);
++ therm->therm_monitor_wqueue =
++ create_singlethread_workqueue(dev_name(&dev->dev));
++ if (!therm->therm_monitor_wqueue) {
++ dev_err(&dev->dev, "%s: wqueue init failed\n",
++ __func__);
++ ret = -ESRCH;
++ goto wqueue_failed;
++ }
++ /* Initializing the hardware */
++ ret = mid_initialize_adc(therm);
++ if (ret) {
++ dev_err(&dev->dev, "%s: adc init failed!\n",
++ __func__);
++ goto wqueue_failed;
++ }
++ /* setting the default min,max, and crit temp values */
++ mid_set_default_temp_range(therm);
++ queue_delayed_work(therm->therm_monitor_wqueue,
++ &therm->thermal_monitor,
++ round_jiffies_relative(HZ * 50));
++ return ret;
++
++wqueue_failed:
++ hwmon_device_unregister(therm->dev);
++thermal_error2:
++ sysfs_remove_group(&dev->dev.kobj, &mid_thermal_gr);
++thermal_error1:
++ kfree(therm);
++ return ret;
++}
++
++/**
++ * mid_thermal_remove - mfld thermal finalize
++ * @dev: platform device structure
++ * Context: can sleep
++ *
++ * MLFD thermal remove finalizes its internal data structue
++ * and other components initialized during probe
++ */
++static int mid_thermal_remove(struct platform_device *dev)
++{
++ struct thermal_module_info *therm = platform_get_drvdata(dev);
++
++ if (therm) {
++ hwmon_device_unregister(therm->dev);
++ sysfs_remove_group(&dev->dev.kobj, &mid_thermal_gr);
++ kfree(therm);
++ cancel_rearming_delayed_workqueue(therm->therm_monitor_wqueue,
++ &therm->thermal_monitor);
++ destroy_workqueue(therm->therm_monitor_wqueue);
++ flush_scheduled_work();
++ }
++ return 0;
++}
++
++/*********************************************************************
++ * Driver initialisation and finalization
++ *********************************************************************/
++static const struct platform_device_id therm_id_table[] = {
++ { DRIVER_NAME, 1 },
++};
++
++static struct platform_driver mid_thermal_driver = {
++ .driver = {
++ .name = DRIVER_NAME,
++ .owner = THIS_MODULE,
++ },
++ .probe = mid_thermal_probe,
++ .suspend = mid_thermal_suspend,
++ .resume = mid_thermal_resume,
++ .remove = __devexit_p(mid_thermal_remove),
++ .id_table = therm_id_table,
++};
++
++static int __init mid_thermal_module_init(void)
++{
++ int ret;
++ struct platform_device *pdev;
++ ret = platform_driver_register(&mid_thermal_driver);
++ if (ret) {
++ printk(KERN_INFO "intel_mid_thermal: driver_register failed");
++ return ret;
++ }
++ pdev = platform_device_register_simple(DRIVER_NAME, 0, NULL, 0);
++ if (IS_ERR(pdev)) {
++ printk(KERN_INFO "intel_mid_thermal: device_register failed");
++ return PTR_ERR(pdev);
++ }
++ return ret;
++}
++
++static void __exit mid_thermal_module_exit(void)
++{
++ platform_driver_unregister(&mid_thermal_driver);
++}
++
++module_init(mid_thermal_module_init);
++module_exit(mid_thermal_module_exit);
++
++MODULE_AUTHOR("Ananth Krishna <ananth.krishna.r@intel.com>");
++MODULE_DESCRIPTION("Intel Medfield Platform Thermal Driver");
++MODULE_LICENSE("GPL");
+--- a/drivers/hwmon/lis3lv02d.c
++++ b/drivers/hwmon/lis3lv02d.c
+@@ -34,6 +34,7 @@
+ #include <linux/freezer.h>
+ #include <linux/uaccess.h>
+ #include <linux/miscdevice.h>
++#include <linux/pm_runtime.h>
+ #include <asm/atomic.h>
+ #include "lis3lv02d.h"
+
+@@ -52,6 +53,7 @@
+ * joystick.
+ */
+
++#define LIS3_PWRON_DELAY_WAI_16B (4000)
+ #define LIS3_PWRON_DELAY_WAI_12B (5000)
+ #define LIS3_PWRON_DELAY_WAI_8B (3000)
+
+@@ -65,6 +67,7 @@
+ /* Sensitivity values for -2G +2G scale */
+ #define LIS3_SENSITIVITY_12B ((LIS3_ACCURACY * 1000) / 1024)
+ #define LIS3_SENSITIVITY_8B (18 * LIS3_ACCURACY)
++#define LIS3_SENSITIVITY_16B ((LIS3_ACCURACY * 1000) / 16384)
+
+ #define LIS3_DEFAULT_FUZZ 3
+ #define LIS3_DEFAULT_FLAT 3
+@@ -136,6 +139,7 @@
+ }
+
+ /* conversion btw sampling rate and the register values */
++static int lis3_16_rates[] = { 0, 1, 10, 25, 50, 100, 200, 400, 1600, 1250 };
+ static int lis3_12_rates[4] = {40, 160, 640, 2560};
+ static int lis3_8_rates[2] = {100, 400};
+
+@@ -147,7 +151,11 @@
+
+ lis3_dev.read(&lis3_dev, CTRL_REG1, &ctrl);
+ ctrl &= lis3_dev.odr_mask;
+- shift = ffs(lis3_dev.odr_mask) - 1;
++ if (lis3_dev.whoami == WAI_16B)
++ shift = 4;
++ else
++ shift = ffs(lis3_dev.odr_mask) - 1;
++
+ return lis3_dev.odrs[(ctrl >> shift)];
+ }
+
+@@ -228,23 +236,10 @@
+
+ void lis3lv02d_poweron(struct lis3lv02d *lis3)
+ {
+- u8 reg;
+-
+ lis3->init(lis3);
+
+ /* LIS3 power on delay is quite long */
+ msleep(lis3->pwron_delay / lis3lv02d_get_odr());
+-
+- /*
+- * Common configuration
+- * BDU: (12 bits sensors only) LSB and MSB values are not updated until
+- * both have been read. So the value read will always be correct.
+- */
+- if (lis3->whoami == WAI_12B) {
+- lis3->read(lis3, CTRL_REG2, &reg);
+- reg |= CTRL2_BDU;
+- lis3->write(lis3, CTRL_REG2, reg);
+- }
+ }
+ EXPORT_SYMBOL_GPL(lis3lv02d_poweron);
+
+@@ -330,7 +325,8 @@
+
+ struct lis3lv02d *lis3 = data;
+
+- if ((lis3->pdata->irq_cfg & LIS3_IRQ1_MASK) == LIS3_IRQ1_CLICK)
++ if (lis3->pdata &&
++ (lis3->pdata->irq_cfg & LIS3_IRQ1_MASK) == LIS3_IRQ1_CLICK)
+ lis302dl_interrupt_handle_click(lis3);
+ else
+ lis302dl_interrupt_handle_ff_wu(lis3);
+@@ -343,7 +339,8 @@
+
+ struct lis3lv02d *lis3 = data;
+
+- if ((lis3->pdata->irq_cfg & LIS3_IRQ2_MASK) == LIS3_IRQ2_CLICK)
++ if (lis3->pdata &&
++ (lis3->pdata->irq_cfg & LIS3_IRQ2_MASK) == LIS3_IRQ2_CLICK)
+ lis302dl_interrupt_handle_click(lis3);
+ else
+ lis302dl_interrupt_handle_ff_wu(lis3);
+@@ -356,6 +353,9 @@
+ if (test_and_set_bit(0, &lis3_dev.misc_opened))
+ return -EBUSY; /* already open */
+
++ if (lis3_dev.dev)
++ pm_runtime_get(lis3_dev.dev);
++
+ atomic_set(&lis3_dev.count, 0);
+ return 0;
+ }
+@@ -363,6 +363,10 @@
+ static int lis3lv02d_misc_release(struct inode *inode, struct file *file)
+ {
+ fasync_helper(-1, file, 0, &lis3_dev.async_queue);
++
++ if (lis3_dev.dev)
++ pm_runtime_put(lis3_dev.dev);
++
+ clear_bit(0, &lis3_dev.misc_opened); /* release the device */
+ return 0;
+ }
+@@ -445,6 +449,20 @@
+ .fops = &lis3lv02d_misc_fops,
+ };
+
++static void joystick_open(struct input_polled_dev *dev)
++{
++ if (lis3_dev.dev)
++ pm_runtime_get(lis3_dev.dev);
++
++}
++
++static void joystick_close(struct input_polled_dev *dev)
++{
++ if (lis3_dev.dev)
++ pm_runtime_put(lis3_dev.dev);
++}
++
++
+ int lis3lv02d_joystick_enable(void)
+ {
+ struct input_dev *input_dev;
+@@ -463,6 +481,8 @@
+ lis3_dev.idev->poll_interval = MDPS_POLL_INTERVAL;
+ lis3_dev.idev->poll_interval_min = MDPS_POLL_MIN;
+ lis3_dev.idev->poll_interval_max = MDPS_POLL_MAX;
++ lis3_dev.idev->open = joystick_open;
++ lis3_dev.idev->close = joystick_close;
+ input_dev = lis3_dev.idev->input;
+
+ input_dev->name = "ST LIS3LV02DL Accelerometer";
+@@ -528,9 +548,16 @@
+ {
+ int x, y, z;
+
++ if (lis3_dev.dev)
++ pm_runtime_get_sync(lis3_dev.dev);
++
+ mutex_lock(&lis3_dev.mutex);
+ lis3lv02d_get_xyz(&lis3_dev, &x, &y, &z);
+ mutex_unlock(&lis3_dev.mutex);
++
++ if (lis3_dev.dev)
++ pm_runtime_put_sync(lis3_dev.dev);
++
+ return sprintf(buf, "(%d,%d,%d)\n", x, y, z);
+ }
+
+@@ -644,6 +671,59 @@
+ }
+ }
+
++static int lis3lv02d_init_8(struct lis3lv02d *lis3)
++{
++ u8 reg;
++ int ret;
++
++ /* power up the device */
++ ret = lis3->read(lis3, CTRL_REG1, &reg);
++ if (ret < 0)
++ return ret;
++
++ reg |= CTRL1_PD0;
++ return lis3->write(lis3, CTRL_REG1, reg);
++}
++
++static int lis3lv02d_init_12(struct lis3lv02d *lis3)
++{
++ u8 reg;
++ int ret;
++
++ /*
++ * Common configuration
++ * BDU: (12 bits sensors only) LSB and MSB values are not updated until
++ * both have been read. So the value read will always be correct.
++ */
++ lis3->read(lis3, CTRL_REG2, &reg);
++ reg |= CTRL2_BDU;
++ lis3->write(lis3, CTRL_REG2, reg);
++
++ /* power up the device */
++ ret = lis3->read(lis3, CTRL_REG1, &reg);
++ if (ret < 0)
++ return ret;
++
++ reg |= CTRL1_PD0;
++ return lis3->write(lis3, CTRL_REG1, reg);
++}
++
++static int lis3lv02d_init_16(struct lis3lv02d *lis3)
++{
++ u8 reg;
++
++ reg = CTRL4_BDU | CTRL4_HR;
++ lis3->write(lis3, CTRL_REG4, reg);
++
++ /* enable x, y, z axis */
++ reg = CTRL1_Xen | CTRL1_Yen | CTRL1_Zen;
++ /* normal mode, 50 Hz */
++ reg |= CTRL1_ODR_50;
++ lis3->write(lis3, CTRL_REG1, reg);
++
++ return 0;
++}
++
+ /*
+ * Initialise the accelerometer and the various subsystems.
+ * Should be rather independent of the bus system.
+@@ -656,8 +736,19 @@
+ dev->whoami = lis3lv02d_read_8(dev, WHO_AM_I);
+
+ switch (dev->whoami) {
++ case WAI_16B:
++ printk(KERN_INFO DRIVER_NAME ": 16 bits sensor found\n");
++ dev->init = lis3lv02d_init_16;
++ dev->read_data = lis3lv02d_read_12;
++ dev->mdps_max_val = 32767;
++ dev->pwron_delay = LIS3_PWRON_DELAY_WAI_16B;
++ dev->odrs = lis3_16_rates;
++ dev->odr_mask = CTRL1_ODR_MASK;
++ dev->scale = LIS3_SENSITIVITY_16B;
++ break;
+ case WAI_12B:
+ printk(KERN_INFO DRIVER_NAME ": 12 bits sensor found\n");
++ dev->init = lis3lv02d_init_12;
+ dev->read_data = lis3lv02d_read_12;
+ dev->mdps_max_val = 2048;
+ dev->pwron_delay = LIS3_PWRON_DELAY_WAI_12B;
+@@ -667,6 +758,7 @@
+ break;
+ case WAI_8B:
+ printk(KERN_INFO DRIVER_NAME ": 8 bits sensor found\n");
++ dev->init = lis3lv02d_init_8;
+ dev->read_data = lis3lv02d_read_8;
+ dev->mdps_max_val = 128;
+ dev->pwron_delay = LIS3_PWRON_DELAY_WAI_8B;
+@@ -702,7 +794,7 @@
+
+ /* bail if we did not get an IRQ from the bus layer */
+ if (!dev->irq) {
+- printk(KERN_ERR DRIVER_NAME
++ printk(KERN_WARNING DRIVER_NAME
+ ": No IRQ. Disabling /dev/freefall\n");
+ goto out;
+ }
+--- a/drivers/hwmon/lis3lv02d.h
++++ b/drivers/hwmon/lis3lv02d.h
+@@ -92,7 +92,16 @@
+ DD_THSE_H = 0x3F,
+ };
+
++enum lis3dh_reg {
++ CTRL1_ODR_50 = 4 << 4,
++ CTRL1_ODR_MASK = 0xf << 4,
++ CTRL_REG4 = 0x23,
++ CTRL4_BDU = 0x80,
++ CTRL4_HR = 0x08,
++};
++
+ enum lis3_who_am_i {
++ WAI_16B = 0x33, /* 16 bits: LIS3DH... */
+ WAI_12B = 0x3A, /* 12 bits: LIS3LV02D[LQ]... */
+ WAI_8B = 0x3B, /* 8 bits: LIS[23]02D[LQ]... */
+ WAI_6B = 0x52, /* 6 bits: LIS331DLF - not supported */
+@@ -231,6 +240,7 @@
+
+ struct input_polled_dev *idev; /* input device */
+ struct platform_device *pdev; /* platform device */
++ struct device *dev;
+ atomic_t count; /* interrupt count after last read */
+ struct axis_conversion ac; /* hw -> logical axis */
+ int mapped_btns[3];
+--- a/drivers/hwmon/lis3lv02d_i2c.c
++++ b/drivers/hwmon/lis3lv02d_i2c.c
+@@ -28,6 +28,7 @@
+ #include <linux/kernel.h>
+ #include <linux/init.h>
+ #include <linux/err.h>
++#include <linux/pm_runtime.h>
+ #include <linux/i2c.h>
+ #include "lis3lv02d.h"
+
+@@ -46,20 +47,6 @@
+ return 0;
+ }
+
+-static int lis3_i2c_init(struct lis3lv02d *lis3)
+-{
+- u8 reg;
+- int ret;
+-
+- /* power up the device */
+- ret = lis3->read(lis3, CTRL_REG1, &reg);
+- if (ret < 0)
+- return ret;
+-
+- reg |= CTRL1_PD0;
+- return lis3->write(lis3, CTRL_REG1, reg);
+-}
+-
+ /* Default axis mapping but it can be overwritten by platform data */
+ static struct axis_conversion lis3lv02d_axis_map = { LIS3_DEV_X,
+ LIS3_DEV_Y,
+@@ -90,14 +77,22 @@
+
+ lis3_dev.pdata = pdata;
+ lis3_dev.bus_priv = client;
+- lis3_dev.init = lis3_i2c_init;
+ lis3_dev.read = lis3_i2c_read;
+ lis3_dev.write = lis3_i2c_write;
+ lis3_dev.irq = client->irq;
+ lis3_dev.ac = lis3lv02d_axis_map;
++ lis3_dev.dev = &client->dev;
+
+ i2c_set_clientdata(client, &lis3_dev);
++
++
+ ret = lis3lv02d_init_device(&lis3_dev);
++
++ pm_runtime_enable(&client->dev);
++
++ /* toggle the power state */
++ pm_runtime_get(&client->dev);
++ pm_runtime_put(&client->dev);
+ fail:
+ return ret;
+ }
+@@ -121,7 +116,7 @@
+ {
+ struct lis3lv02d *lis3 = i2c_get_clientdata(client);
+
+- if (!lis3->pdata->wakeup_flags)
++ if (!lis3->pdata || !lis3->pdata->wakeup_flags)
+ lis3lv02d_poweroff(lis3);
+ return 0;
+ }
+@@ -130,7 +125,7 @@
+ {
+ struct lis3lv02d *lis3 = i2c_get_clientdata(client);
+
+- if (!lis3->pdata->wakeup_flags)
++ if (!lis3->pdata || !lis3->pdata->wakeup_flags)
+ lis3lv02d_poweron(lis3);
+ return 0;
+ }
+@@ -139,6 +134,26 @@
+ {
+ lis3lv02d_i2c_suspend(client, PMSG_SUSPEND);
+ }
++
++static int lis3lv02d_i2c_runtime_suspend(struct device *dev)
++{
++ struct i2c_client *i2c;
++
++ i2c = container_of(dev, struct i2c_client, dev);
++ if (i2c)
++ lis3lv02d_i2c_suspend(i2c, PMSG_SUSPEND);
++ return 0;
++}
++
++static int lis3lv02d_i2c_runtime_resume(struct device *dev)
++{
++ struct i2c_client *i2c;
++
++ i2c = container_of(dev, struct i2c_client, dev);
++ if (i2c)
++ lis3lv02d_i2c_resume(i2c);
++ return 0;
++}
+ #else
+ #define lis3lv02d_i2c_suspend NULL
+ #define lis3lv02d_i2c_resume NULL
+@@ -147,15 +162,22 @@
+
+ static const struct i2c_device_id lis3lv02d_id[] = {
+ {"lis3lv02d", 0 },
++ {"lis3dh", 0 },
+ {}
+ };
+
+ MODULE_DEVICE_TABLE(i2c, lis3lv02d_id);
+
++static const struct dev_pm_ops lis3lv02d_i2c_pm = {
++ .runtime_suspend = lis3lv02d_i2c_runtime_suspend,
++ .runtime_resume = lis3lv02d_i2c_runtime_resume,
++};
++
+ static struct i2c_driver lis3lv02d_i2c_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
++ .pm = &lis3lv02d_i2c_pm,
+ },
+ .suspend = lis3lv02d_i2c_suspend,
+ .shutdown = lis3lv02d_i2c_shutdown,
+--- a/drivers/hwmon/lis3lv02d_spi.c
++++ b/drivers/hwmon/lis3lv02d_spi.c
+@@ -25,12 +25,18 @@
+ static int lis3_spi_read(struct lis3lv02d *lis3, int reg, u8 *v)
+ {
+ struct spi_device *spi = lis3->bus_priv;
+- int ret = spi_w8r8(spi, reg | LIS3_SPI_READ);
+- if (ret < 0)
+- return -EINVAL;
+-
+- *v = (u8) ret;
+- return 0;
++ int retries;
++ int ret;
++
++ for (retries = 0; retries < 5; retries ++) {
++ ret = spi_w8r8(spi, reg | LIS3_SPI_READ);
++ if (ret != -ETIMEDOUT)
++ break;
++ mdelay(10);
++ }
++ if (ret == 0)
++ *v = (u8) ret;
++ return ret;
+ }
+
+ static int lis3_spi_write(struct lis3lv02d *lis3, int reg, u8 val)
+@@ -40,20 +46,6 @@
+ return spi_write(spi, tmp, sizeof(tmp));
+ }
+
+-static int lis3_spi_init(struct lis3lv02d *lis3)
+-{
+- u8 reg;
+- int ret;
+-
+- /* power up the device */
+- ret = lis3->read(lis3, CTRL_REG1, &reg);
+- if (ret < 0)
+- return ret;
+-
+- reg |= CTRL1_PD0;
+- return lis3->write(lis3, CTRL_REG1, reg);
+-}
+-
+ static struct axis_conversion lis3lv02d_axis_normal = { 1, 2, 3 };
+
+ static int __devinit lis302dl_spi_probe(struct spi_device *spi)
+@@ -67,7 +59,6 @@
+ return ret;
+
+ lis3_dev.bus_priv = spi;
+- lis3_dev.init = lis3_spi_init;
+ lis3_dev.read = lis3_spi_read;
+ lis3_dev.write = lis3_spi_write;
+ lis3_dev.irq = spi->irq;
+--- /dev/null
++++ b/drivers/hwmon/mrst_analog_accel.c
+@@ -0,0 +1,231 @@
++/*
++ * mrst_analog_accel.c - Intel analog accelerometer driver for Moorestown
++ *
++ * Copyright (C) 2009 Intel Corp
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ */
++
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/i2c.h>
++#include <linux/input-polldev.h>
++#include <linux/err.h>
++#include <linux/delay.h>
++#include <asm/intel_scu_ipc.h>
++
++#define POLL_INTERVAL 50
++
++/* PMIC ADC INTERRUPT REGISTERS */
++#define PMIC_ADC_ACC_REG_ADCINT 0x5F /*ADC interrupt register */
++#define PMIC_ADC_ACC_REG_MADCINT 0x60 /*ADC interrupt mask register */
++
++/* PMIC ADC CONTROL REGISTERS */
++#define PMIC_ADC_ACC_REG_ADCCNTL1 0x61 /*ADC control register */
++#define PMIC_ADC_ACC_REG_ADCCNTL2 0x62 /*ADC gain regs channel 10-17 */
++#define PMIC_ADC_ACC_REG_ADCCNTL3 0x63 /*ADC gain regs channel 18-21 */
++
++/* PMIC Data Register base */
++#define PMIC_ADC_DATA_REG_BASE 0x64
++
++/* PMIC Channel Mapping Register base */
++#define PMIC_ADC_MAPPING_BASE 0xA4
++
++/* Number of PMIC sample registers */
++#define PMIC_ADC_REG_MAX 32 /* Max no of available channel */
++
++#define PMIC_ADC_X_REG_HIGH(index) (PMIC_ADC_DATA_REG_BASE \
++ + (index * 2))
++#define PMIC_ADC_X_REG_LOW(index) (PMIC_ADC_DATA_REG_BASE \
++ + (index * 2) + 1)
++#define PMIC_ADC_Y_REG_HIGH(index) (PMIC_ADC_DATA_REG_BASE \
++ + (index * 2) + 2)
++#define PMIC_ADC_Y_REG_LOW(index) (PMIC_ADC_DATA_REG_BASE \
++ + (index * 2) + 3)
++#define PMIC_ADC_Z_REG_HIGH(index) (PMIC_ADC_DATA_REG_BASE \
++ + (index * 2) + 4)
++#define PMIC_ADC_Z_REG_LOW(index) (PMIC_ADC_DATA_REG_BASE \
++ + (index * 2) + 5)
++
++/* Number of registers to read at a time */
++#define REG_READ_PER_IPC 4 /* Read 4 at a time although the */
++ /* IPC will support max 5 */
++
++#define END_OF_CHANNEL_VALUE 0x1F /* Used to indicate the last */
++ /* channel being used */
++
++static unsigned int mrst_analog_reg_idx;
++
++static ssize_t mrst_accel_read(int idx)
++{
++ u16 reg;
++ int ret;
++ if (intel_scu_ipc_ioread16(idx, &reg))
++ return -EIO;
++
++ /* The returned value is 16bits in the form
++ [XXXXXlow 3 bits][high 8 bits] */
++ ret = (reg << 3) | ((reg >> 8) & 7);
++ return ret;
++}
++
++static void mrst_analog_poll(struct input_polled_dev *idev)
++{
++ struct input_dev *input = idev->input;
++ int r;
++
++ r = mrst_accel_read(PMIC_ADC_X_REG_HIGH(mrst_analog_reg_idx));
++ if (r >= 0)
++ input_report_abs(input, ABS_X, r);
++ r = mrst_accel_read(PMIC_ADC_Y_REG_HIGH(mrst_analog_reg_idx));
++ if (r >= 0)
++ input_report_abs(input, ABS_Y, r);
++ r = mrst_accel_read(PMIC_ADC_Z_REG_HIGH(mrst_analog_reg_idx));
++ if (r >= 0)
++ input_report_abs(input, ABS_Z, r);
++ input_sync(input);
++}
++
++static int mrst_analog_accel_initialize(void)
++{
++ int retval = 0;
++ u8 r;
++ u8 mad_cntrl = 0; /* MADCINT register value */
++ u8 adc_cntrl2; /* ADCCNTL2 register value */
++ int i, j;
++ u32 r32;
++
++ /* Initialize the register index to use to be zero */
++ mrst_analog_reg_idx = 0;
++
++ /* check if the ADC is enabled or not
++ * Read ADCCNTL1 registers */
++ retval = intel_scu_ipc_ioread8(PMIC_ADC_ACC_REG_ADCCNTL1, &adc_cntrl2);
++ if (retval != 0)
++ return retval;
++
++ if ((adc_cntrl2 >> 7) & 0x1) {
++ /* If the ADC is enabled find the set of registers to use
++ ** Loop through the channel mapping register to find out the
++ ** first free one
++ */
++ for (i = 0;
++ (i < PMIC_ADC_REG_MAX) && (mrst_analog_reg_idx == 0);
++ i += REG_READ_PER_IPC) {
++
++ /* Reading 4 regs at a time instead of reading each
++ * reg one by one since IPC is an expensive operation
++ */
++ retval = intel_scu_ipc_ioread32(
++ PMIC_ADC_MAPPING_BASE + i, &r32);
++ if (retval != 0)
++ return retval;
++ for (j = 0; j < 32; j += 8) {
++ if (((r >> j) & 0xFF) == END_OF_CHANNEL_VALUE) {
++ mrst_analog_reg_idx = i + j;
++ break;
++ }
++ }
++ }
++ }
++ /* Check to see if there are enough registers to map the channel */
++ if (mrst_analog_reg_idx + 3 >= PMIC_ADC_REG_MAX) {
++ printk(KERN_ERR
++ "mrst_analog_accel:Not enough regs to map the channels\n");
++ return -ENOSPC;
++ }
++
++ /* Set Ch14, Ch15, Ch16, End of channel */
++ retval = intel_scu_ipc_iowrite32(
++ PMIC_ADC_MAPPING_BASE + mrst_analog_reg_idx,
++ 0x0E0F101F);
++
++ if (retval != 0)
++ return retval;
++
++ /* If the ADC was not enabled, enable it now */
++ if (!(adc_cntrl2 >> 7) & 0x1) {
++ retval = intel_scu_ipc_update_register(
++ PMIC_ADC_ACC_REG_MADCINT, mad_cntrl, 0x01);
++ if (retval != 0)
++ return retval;
++
++ /* 27ms delay,start round robin, enable full power */
++ retval = intel_scu_ipc_iowrite8(PMIC_ADC_ACC_REG_ADCCNTL1, 0xc6);
++ if (retval != 0)
++ return retval;
++ }
++ return retval;
++}
++
++static struct platform_device *pdev;
++static struct input_polled_dev *idev;
++
++static int __init mrst_analog_accel_module_init(void)
++{
++ int retval;
++ struct input_dev *input;
++
++ pdev = platform_device_register_simple("mrst_analog_accel",
++ 0, NULL, 0);
++ if (IS_ERR(pdev))
++ return PTR_ERR(pdev);
++
++ retval = mrst_analog_accel_initialize();
++ if (retval)
++ return retval;
++ idev = input_allocate_polled_device();
++ if (!idev)
++ goto free_dev;
++ idev->poll = mrst_analog_poll;
++ idev->poll_interval = POLL_INTERVAL;
++ input = idev->input;
++ input->name = "mrst_analog";
++ input->phys = "intel_scu/input0";
++ input->id.bustype = BUS_HOST;
++ input->dev.parent = &pdev->dev;
++ input->evbit[0] = BIT_MASK(EV_ABS);
++ input_set_abs_params(input, ABS_X, 0, 255, 2, 2);
++ input_set_abs_params(input, ABS_Y, 0, 255, 2, 2);
++ input_set_abs_params(input, ABS_Z, 0, 255, 2, 2);
++
++ retval = input_register_polled_device(idev);
++ if (retval == 0)
++ return 0;
++
++ input_free_polled_device(idev);
++free_dev:
++ platform_device_unregister(pdev);
++ return retval;
++}
++
++static void __exit mrst_analog_accel_module_exit(void)
++{
++ input_unregister_polled_device(idev);
++ input_free_polled_device(idev);
++ platform_device_unregister(pdev);
++}
++
++module_init(mrst_analog_accel_module_init);
++module_exit(mrst_analog_accel_module_exit);
++
++MODULE_AUTHOR("Ramesh Agarwal, Alan Cox");
++MODULE_DESCRIPTION("Intel Moorestown Analog Accelerometer Driver");
++MODULE_LICENSE("GPL v2");
+--- a/drivers/i2c/busses/Kconfig
++++ b/drivers/i2c/busses/Kconfig
+@@ -420,6 +420,15 @@
+ This driver is deprecated and will be dropped soon. Use i2c-gpio
+ instead.
+
++config I2C_MRST
++ tristate "Intel Moorestown/Medfield Platform I2C controller"
++ help
++ Say Y here if you have an Intel Moorestown/Medfield platform I2C
++ controller.
++
++ This support is also available as a module. If so, the module
++ will be called i2c-mrst.
++
+ config I2C_MPC
+ tristate "MPC107/824x/85xx/512x/52xx/83xx/86xx"
+ depends on PPC32
+--- a/drivers/i2c/busses/Makefile
++++ b/drivers/i2c/busses/Makefile
+@@ -40,6 +40,7 @@
+ obj-$(CONFIG_I2C_IMX) += i2c-imx.o
+ obj-$(CONFIG_I2C_IOP3XX) += i2c-iop3xx.o
+ obj-$(CONFIG_I2C_IXP2000) += i2c-ixp2000.o
++obj-$(CONFIG_I2C_MRST) += i2c-mrst.o
+ obj-$(CONFIG_I2C_MPC) += i2c-mpc.o
+ obj-$(CONFIG_I2C_MV64XXX) += i2c-mv64xxx.o
+ obj-$(CONFIG_I2C_NOMADIK) += i2c-nomadik.o
+--- /dev/null
++++ b/drivers/i2c/busses/i2c-mrst.c
+@@ -0,0 +1,979 @@
++/*
++ * Support for Moorestown/Medfield I2C chip
++ *
++ * Copyright (c) 2009 Intel Corporation.
++ * Copyright (c) 2009 Synopsys. Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License, version
++ * 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT ANY
++ * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
++ * details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc., 51
++ * Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/version.h>
++#include <linux/kernel.h>
++#include <linux/err.h>
++#include <linux/slab.h>
++#include <linux/stat.h>
++#include <linux/types.h>
++#include <linux/delay.h>
++#include <linux/i2c.h>
++#include <linux/init.h>
++#include <linux/pci.h>
++#include <linux/gpio.h>
++#include <linux/interrupt.h>
++#include <linux/pm_runtime.h>
++
++#include <linux/io.h>
++
++#include "i2c-mrst.h"
++
++#define DEF_BAR 0
++#define VERSION "Version 0.5"
++#define PLATFORM "Moorestown/Medfield"
++
++#define mrst_i2c_read(reg) __raw_readl(reg)
++#define mrst_i2c_write(reg, val) __raw_writel((val), (reg))
++
++/* Use defines not enumerations so that we can do this with tables */
++
++#define MOOESTOWN 0
++#define MEDFIELD 1
++
++#define NUM_PLATFORMS 2
++
++#define STANDARD 0
++#define FAST 1
++#define HIGH 2
++
++#define NUM_SPEEDS 3
++
++
++static int speed_mode[6] = {
++ STANDARD,
++ STANDARD,
++ STANDARD,
++ STANDARD,
++ STANDARD,
++ STANDARD
++};
++static int ctl_num;
++module_param_array(speed_mode, int, &ctl_num, S_IRUGO);
++
++/**
++ * mrst_i2c_disable - Disable I2C controller
++ * @adap: struct pointer to i2c_adapter
++ *
++ * Return Value:
++ * 0 success
++ * -EBUSY if device is busy
++ * -ETIMEOUT if i2c cannot be disabled within the given time
++ *
++ * I2C bus state should be checked prior to disabling the hardware. If bus is
++ * not in idle state, an errno is returned. Write "0" to IC_ENABLE to disable
++ * I2C controller.
++ */
++static int mrst_i2c_disable(struct i2c_adapter *adap)
++{
++ struct mrst_i2c_private *i2c =
++ (struct mrst_i2c_private *)i2c_get_adapdata(adap);
++
++ int count = 0;
++ int ret1, ret2;
++ static const u16 delay[NUM_SPEEDS] = {100, 25, 3};
++
++ /* Set IC_ENABLE to 0 */
++ mrst_i2c_write(i2c->base + IC_ENABLE, 0);
++
++ /* Check if device is busy */
++ dev_dbg(&adap->dev, "mrst i2c disable\n");
++ while ((ret1 = mrst_i2c_read(i2c->base + IC_ENABLE_STATUS) & 0x1)
++ || (ret2 = mrst_i2c_read(i2c->base + IC_STATUS) & 0x1)) {
++ udelay(delay[i2c->speed]);
++ mrst_i2c_write(i2c->base + IC_ENABLE, 0);
++ dev_dbg(&adap->dev, "i2c is busy, count is %d speed %d\n",
++ count, i2c->speed);
++ if (count++ > 10)
++ break;
++ }
++
++ /* Clear all interrupts */
++ mrst_i2c_read(i2c->base + IC_CLR_INTR);
++ /* Disable all interupts */
++ mrst_i2c_write(i2c->base + IC_INTR_MASK, 0x0000);
++
++ return 0;
++}
++
++/**
++ * mrst_i2c_hwinit - Initiate the I2C hardware registers. This function will
++ * be called in mrst_i2c_probe() before device registration.
++ * @dev: pci device struct pointer
++ *
++ * Return Values:
++ * 0 success
++ * -EBUSY i2c cannot be disabled
++ * -ETIMEDOUT i2c cannot be disabled
++ * -EFAULT If APB data width is not 32-bit wide
++ *
++ * I2C should be disabled prior to other register operation. If failed, an
++ * errno is returned. Mask and Clear all interrpts, this should be done at
++ * first. Set common registers which will not be modified during normal
++ * transfers, including: controll register, FIFO threshold and clock freq.
++ * Check APB data width at last.
++ */
++static int mrst_i2c_hwinit(struct mrst_i2c_private *i2c)
++{
++ int err = 0;
++ static const u16 hcnt[NUM_PLATFORMS][NUM_SPEEDS] = {
++ { 0x75, 0x15, 0x07 },
++ { 0x1EC, 0x70, 0x06 }
++ };
++ static const u16 lcnt[NUM_PLATFORMS][NUM_SPEEDS] = {
++ { 0x7C, 0x21, 0x0E },
++ { 0x1F3, 0x81, 0x0F }
++ };
++
++ /* Disable i2c first */
++ mrst_i2c_disable(i2c->adap);
++ /* Clear all interrupts */
++ mrst_i2c_read(i2c->base + IC_CLR_INTR);
++ /* Disable all interupts */
++ mrst_i2c_write(i2c->base + IC_INTR_MASK, 0x0000);
++
++ /*
++ * Setup clock frequency and speed mode
++ * Enable restart condition,
++ * enable master FSM, disable slave FSM,
++ * use target address when initiating transfer
++ */
++
++ mrst_i2c_write(i2c->base + IC_CON,
++ (i2c->speed + 1) << 1 | SLV_DIS | RESTART | MASTER_EN);
++ mrst_i2c_write(i2c->base + (IC_SS_SCL_HCNT + (i2c->speed << 3)),
++ hcnt[i2c->platform][i2c->speed]);
++ mrst_i2c_write(i2c->base + (IC_SS_SCL_LCNT + (i2c->speed << 3)),
++ lcnt[i2c->platform][i2c->speed]);
++
++ /* Set tranmit & receive FIFO threshold to zero */
++ mrst_i2c_write(i2c->base + IC_RX_TL, 0x0);
++ mrst_i2c_write(i2c->base + IC_TX_TL, 0x0);
++
++ return err;
++}
++
++/**
++ * mrst_i2c_func - Return the supported three I2C operations.
++ * @adapter: i2c_adapter struct pointer
++ */
++static u32 mrst_i2c_func(struct i2c_adapter *adapter)
++{
++ return I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR | I2C_FUNC_SMBUS_EMUL;
++}
++
++/**
++ * mrst_i2c_invalid_address - To check if the address in i2c message is
++ * correct.
++ * @p: i2c_msg struct pointer
++ *
++ * Return Values:
++ * 0 if the address is valid
++ * 1 if the address is invalid
++ */
++static inline int mrst_i2c_invalid_address(const struct i2c_msg *p)
++{
++ int ret = ((p->addr > 0x3ff) || (!(p->flags & I2C_M_TEN)
++ && (p->addr > 0x7f)));
++ return ret;
++}
++
++/**
++ * mrst_i2c_address_neq - To check if the addresses for different i2c messages
++ * are equal.
++ * @p1: first i2c_msg
++ * @p2: second i2c_msg
++ *
++ * Return Values:
++ * 0 if addresse are equal
++ * 1 if not equal
++ *
++ * Within a single transfer, I2C client may need to send its address more
++ * than one time. So a check for the address equation is needed.
++ */
++static inline int mrst_i2c_address_neq(const struct i2c_msg *p1,
++ const struct i2c_msg *p2)
++{
++ int ret = ((p1->addr != p2->addr) || ((p1->flags & (I2C_M_TEN))
++ != ((p2->flags) & (I2C_M_TEN))));
++ return ret;
++}
++
++/**
++ * mrst_i2c_abort - To handle transfer abortions and print error messages.
++ * @adap: i2c_adapter struct pointer
++ *
++ * By reading register IC_TX_ABRT_SOURCE, various transfer errors can be
++ * distingushed. At present, no circumstances have been found out that
++ * multiple errors would be occured simutaneously, so we simply use the
++ * register value directly.
++ *
++ * At last the error bits are cleared. (Note clear ABRT_SBYTE_NORSTRT bit need
++ * a few extra steps)
++ */
++static void mrst_i2c_abort(struct mrst_i2c_private *i2c)
++{
++ /* Read about source register */
++ int abort = i2c->abort;
++ struct i2c_adapter *adap = i2c->adap;
++
++ /* Single transfer error check:
++ * According to databook, TX/RX FIFOs would be flushed when
++ * the abort interrupt occured.
++ */
++ switch (abort) {
++ case ABRT_MASTER_DIS:
++ dev_err(&adap->dev,
++ "initiate Master operation with Master mode disabled.\n");
++
++ break;
++ case ABRT_10B_RD_NORSTRT:
++ dev_err(&adap->dev,
++ "RESTART disabled and master sends READ cmd in 10-BIT addressing.\n");
++ break;
++ case ABRT_SBYTE_NORSTRT:
++ dev_err(&adap->dev,
++ "RESTART disabled and user is trying to send START byte.\n");
++ mrst_i2c_write(i2c->base + IC_TX_ABRT_SOURCE,
++ !(ABRT_SBYTE_NORSTRT));
++ mrst_i2c_write(i2c->base + IC_CON, RESTART);
++ mrst_i2c_write(i2c->base + IC_TAR, !(IC_TAR_SPECIAL));
++ break;
++ case ABRT_SBYTE_ACKDET:
++ dev_err(&adap->dev,
++ "START byte was acknowledged.\n");
++ break;
++ case ABRT_TXDATA_NOACK:
++ dev_err(&adap->dev,
++ "No acknowledge received from slave.\n");
++ break;
++ case ABRT_10ADDR2_NOACK:
++ dev_err(&adap->dev,
++ "The 2nd address byte of the 10-bit address not acknowledged.\n");
++ break;
++ case ABRT_10ADDR1_NOACK:
++ dev_dbg(&adap->dev,
++ "The 1st address byte of 10-bit address not acknowledged.\n");
++ break;
++ case ABRT_7B_ADDR_NOACK:
++ dev_err(&adap->dev,
++ "I2C slave device not acknowledge.\n");
++ break;
++ default:
++ ;
++ }
++
++ /* Clear TX_ABRT bit */
++ mrst_i2c_read(i2c->base + IC_CLR_TX_ABRT);
++ i2c->status = STATUS_XFER_ABORT;
++}
++
++/**
++ * xfer_read - Internal function to implement master read transfer.
++ * @adap: i2c_adapter struct pointer
++ * @buf: buffer in i2c_msg
++ * @length: number of bytes to be read
++ *
++ * Return Values:
++ * 0 if the read transfer succeeds
++ * -ETIMEDOUT if cannot read the "raw" interrupt register
++ * -EINVAL if transfer abort occured
++ *
++ * For every byte, a "READ" command will be loaded into IC_DATA_CMD prior to
++ * data transfer. The actual "read" operation will be performed if the RX_FULL
++ * interrupt is occured.
++ *
++ * Note there may be two interrupt signals captured, one should read
++ * IC_RAW_INTR_STAT to seperate between errors and actual data.
++ */
++static int xfer_read(struct i2c_adapter *adap, unsigned char *buf, int length)
++{
++ struct mrst_i2c_private *i2c =
++ (struct mrst_i2c_private *) i2c_get_adapdata(adap);
++ int i = length;
++ int err;
++
++ if (length >= 256) {
++ dev_err(&adap->dev,
++ "I2C FIFO can not support larger than 256 bytes\n");
++ return -EINVAL;
++ }
++
++ INIT_COMPLETION(i2c->complete);
++
++ mrst_i2c_write(i2c->base + IC_INTR_MASK, 0x0044);
++ mrst_i2c_read(i2c->base + IC_CLR_INTR);
++
++ i2c->status = STATUS_READ_START;
++ while (i--)
++ mrst_i2c_write(i2c->base + IC_DATA_CMD, (uint16_t)0x100);
++
++ err = wait_for_completion_interruptible_timeout(&i2c->complete, HZ);
++ if (!err) {
++ dev_err(&adap->dev, "Time out for ACK from I2C slave device\n");
++ mrst_i2c_hwinit(i2c);
++ return -ETIMEDOUT;
++ } else {
++ if (i2c->status == STATUS_READ_SUCCESS)
++ return 0;
++ else
++ return -EINVAL;
++ }
++}
++
++/**
++ * xfer_write - Internal function to implement master write transfer.
++ * @adap: i2c_adapter struct pointer
++ * @buf: buffer in i2c_msg
++ * @length: number of bytes to be read
++ *
++ * Return Values:
++ * 0 if the read transfer succeeds
++ * -ETIMEDOUT if cannot read the "raw" interrupt register
++ * -EINVAL if transfer abort occured
++ *
++ * For every byte, a "WRITE" command will be loaded into IC_DATA_CMD prior to
++ * data transfer. The actual "write" operation will be performed if the
++ * RX_FULL interrupt siganal is occured.
++ *
++ * Note there may be two interrupt signals captured, one should read
++ * IC_RAW_INTR_STAT to seperate between errors and actual data.
++ */
++static int xfer_write(struct i2c_adapter *adap,
++ unsigned char *buf, int length)
++{
++ struct mrst_i2c_private *i2c = (struct mrst_i2c_private *)
++ i2c_get_adapdata(adap);
++
++ int i, err;
++
++ if (length >= 256) {
++ dev_err(&adap->dev,
++ "I2C FIFO can not support larger than 256 bytes\n");
++ return -EINVAL;
++ }
++
++ INIT_COMPLETION(i2c->complete);
++
++ mrst_i2c_write(i2c->base + IC_INTR_MASK, 0x0050);
++ mrst_i2c_read(i2c->base + IC_CLR_INTR);
++
++ i2c->status = STATUS_WRITE_START;
++ for (i = 0; i < length; i++)
++ mrst_i2c_write(i2c->base + IC_DATA_CMD,
++ (uint16_t)(*(buf + i)));
++
++ err = wait_for_completion_interruptible_timeout(&i2c->complete, HZ);
++ if (!err) {
++ dev_err(&adap->dev, "Time out for ACK from I2C slave device\n");
++ mrst_i2c_hwinit(i2c);
++ return -ETIMEDOUT;
++ } else {
++ if (i2c->status == STATUS_WRITE_SUCCESS)
++ return 0;
++ else
++ return -EINVAL;
++ }
++}
++
++static int mrst_i2c_setup(struct i2c_adapter *adap, struct i2c_msg *pmsg)
++{
++ struct mrst_i2c_private *i2c =
++ (struct mrst_i2c_private *)i2c_get_adapdata(adap);
++ int err;
++ u32 reg_val;
++ u32 bit_mask;
++ u32 mode;
++
++ /* Disable device first */
++ err = mrst_i2c_disable(adap);
++ if (err) {
++ dev_err(&adap->dev,
++ "Cannot disable i2c controller, timeout\n");
++ return -ETIMEDOUT;
++ }
++
++ mode = (1 + i2c->speed) << 1;
++ /* set the speed mode */
++ reg_val = mrst_i2c_read(i2c->base + IC_CON);
++ if ((reg_val & 0x06) != mode) {
++ dev_dbg(&adap->dev, "set mode %d\n", i2c->speed);
++ mrst_i2c_write(i2c->base + IC_CON,
++ (reg_val & (~0x6)) | mode);
++ }
++
++ reg_val = mrst_i2c_read(i2c->base + IC_CON);
++ /* use 7-bit addressing */
++ if (pmsg->flags & I2C_M_TEN) {
++ if ((reg_val & (1<<4)) != ADDR_10BIT) {
++ dev_dbg(&adap->dev, "set i2c 10 bit address mode\n");
++ mrst_i2c_write(i2c->base + IC_CON,
++ reg_val | ADDR_10BIT);
++ }
++ } else {
++ if ((reg_val & (1<<4)) != 0x0) {
++ dev_dbg(&adap->dev, "set i2c 7 bit address mode\n");
++ mrst_i2c_write(i2c->base + IC_CON, reg_val & (~(1<<4)));
++ }
++ }
++ /* enable restart conditions */
++ reg_val = mrst_i2c_read(i2c->base + IC_CON);
++ if ((reg_val & (1<<5)) != 1<<5) {
++ dev_dbg(&adap->dev, "enable restart conditions\n");
++ mrst_i2c_write(i2c->base + IC_CON, (reg_val & (~(1 << 5)))
++ | 1 << 5);
++ }
++
++ /* enable master FSM */
++ reg_val = mrst_i2c_read(i2c->base + IC_CON);
++ dev_dbg(&adap->dev, "ic_con reg_val is 0x%x\n", reg_val);
++ mrst_i2c_write(i2c->base + IC_CON, (reg_val & 0xFE) | MASTER_EN);
++ if ((reg_val & (1<<6)) != 1<<6) {
++ dev_dbg(&adap->dev, "enable master FSM\n");
++ mrst_i2c_write(i2c->base + IC_CON, (reg_val & (~(1 << 6)))
++ | 1<<6);
++ dev_dbg(&adap->dev, "ic_con reg_val is 0x%x\n", reg_val);
++ }
++
++ /* use target address when initiating transfer */
++ reg_val = mrst_i2c_read(i2c->base + IC_TAR);
++ bit_mask = 1 << 11 | 1 << 10;
++
++ if ((reg_val & bit_mask) != 0x0) {
++ dev_dbg(&adap->dev,
++ "WR: use target address when intiating transfer, i2c_tx_target\n");
++ mrst_i2c_write(i2c->base + IC_TAR, reg_val & ~bit_mask);
++ }
++
++ /* set target address to the I2C slave address */
++ dev_dbg(&adap->dev,
++ "set target address to the I2C slave address, addr is %x\n",
++ pmsg->addr);
++ mrst_i2c_write(i2c->base + IC_TAR, pmsg->addr
++ | (pmsg->flags & I2C_M_TEN ? IC_TAR_10BIT_ADDR : 0));
++
++ /* Enable I2C controller */
++ mrst_i2c_write(i2c->base + IC_ENABLE, ENABLE);
++
++ return 0;
++}
++
++/**
++ * mrst_i2c_xfer - Main master transfer routine.
++ * @adap: i2c_adapter struct pointer
++ * @pmsg: i2c_msg struct pointer
++ * @num: number of i2c_msg
++ *
++ * Return Values:
++ * + number of messages transfered
++ * -ETIMEDOUT If cannot disable I2C controller or read IC_STATUS
++ * -EINVAL If the address in i2c_msg is invalid
++ *
++ * This function will be registered in i2c-core and exposed to external
++ * I2C clients.
++ * 1. Disable I2C controller
++ * 2. Unmask three interrupts: RX_FULL, TX_EMPTY, TX_ABRT
++ * 3. Check if address in i2c_msg is valid
++ * 4. Enable I2C controller
++ * 5. Perform real transfer (call xfer_read or xfer_write)
++ * 6. Wait until the current transfer is finished(check bus state)
++ * 7. Mask and clear all interrupts
++ */
++static int mrst_i2c_xfer(struct i2c_adapter *adap,
++ struct i2c_msg *pmsg,
++ int num)
++{
++ struct mrst_i2c_private *i2c =
++ (struct mrst_i2c_private *)i2c_get_adapdata(adap);
++ int i, err;
++
++ pm_runtime_get(&adap->dev);
++
++ mutex_lock(&i2c->lock);
++ dev_dbg(&adap->dev, "mrst_i2c_xfer, process %d msg(s)\n", num);
++ dev_dbg(&adap->dev, KERN_INFO "slave address is %x\n", pmsg->addr);
++
++ if (i2c->status != STATUS_IDLE) {
++ dev_err(&adap->dev, "Adapter %d in transfer/standby\n",
++ adap->nr);
++ mutex_unlock(&i2c->lock);
++ pm_runtime_put(&adap->dev);
++ return -1;
++ }
++
++ /* if number of messages equal 0*/
++ if (num == 0) {
++ mutex_unlock(&i2c->lock);
++ pm_runtime_put(&adap->dev);
++ return 0;
++ }
++
++ /* Checked the sanity of passed messages. */
++ if (unlikely(mrst_i2c_invalid_address(&pmsg[0]))) {
++ dev_err(&adap->dev, "Invalid address 0x%03x (%d-bit)\n",
++ pmsg[0].addr, pmsg[0].flags & I2C_M_TEN ? 10 : 7);
++ mutex_unlock(&i2c->lock);
++ pm_runtime_put(&adap->dev);
++ return -EINVAL;
++ }
++ for (i = 0; i < num; i++) {
++ /* Message address equal? */
++ if (unlikely(mrst_i2c_address_neq(&pmsg[0], &pmsg[i]))) {
++ dev_err(&adap->dev, "Invalid address in msg[%d]\n", i);
++ mutex_unlock(&i2c->lock);
++ pm_runtime_put(&adap->dev);
++ return -EINVAL;
++ }
++ }
++
++ if (mrst_i2c_setup(adap, pmsg)) {
++ mutex_unlock(&i2c->lock);
++ pm_runtime_put(&adap->dev);
++ return -EINVAL;
++ }
++
++ for (i = 0; i < num; i++) {
++ dev_dbg(&adap->dev, " #%d: %sing %d byte%s %s 0x%02x\n", i,
++ pmsg->flags & I2C_M_RD ? "read" : "writ",
++ pmsg->len, pmsg->len > 1 ? "s" : "",
++ pmsg->flags & I2C_M_RD ? "from" : "to", pmsg->addr);
++
++
++ i2c->msg = pmsg;
++ i2c->status = STATUS_IDLE;
++ /* Read or Write */
++ if (pmsg->len && pmsg->buf) {
++ if (pmsg->flags & I2C_M_RD) {
++ dev_dbg(&adap->dev, "I2C_M_RD\n");
++ err = xfer_read(adap, pmsg->buf, pmsg->len);
++ } else {
++ dev_dbg(&adap->dev, "I2C_M_WR\n");
++ err = xfer_write(adap, pmsg->buf, pmsg->len);
++ }
++ if (err < 0)
++ goto err_1;
++ }
++ dev_dbg(&adap->dev, "msg[%d] transfer complete\n", i);
++ pmsg++; /* next message */
++ }
++ goto exit;
++
++err_1:
++ i = err;
++exit:
++ /* Mask interrupts */
++ mrst_i2c_write(i2c->base + IC_INTR_MASK, 0x0000);
++ /* Clear all interrupts */
++ mrst_i2c_read(i2c->base + IC_CLR_INTR);
++
++ i2c->status = STATUS_IDLE;
++ mutex_unlock(&i2c->lock);
++ pm_runtime_put(&adap->dev);
++
++ return i;
++}
++
++static int mrst_i2c_suspend(struct pci_dev *dev, pm_message_t mesg)
++{
++ struct mrst_i2c_private *i2c =
++ (struct mrst_i2c_private *)pci_get_drvdata(dev);
++ struct i2c_adapter *adap = i2c->adap;
++ int err;
++
++ if (i2c->status != STATUS_IDLE)
++ return -1;
++
++ mrst_i2c_disable(adap);
++
++ err = pci_save_state(dev);
++ if (err) {
++ dev_err(&dev->dev, "pci_save_state failed\n");
++ return err;
++ }
++
++ err = pci_set_power_state(dev, PCI_D3hot);
++ if (err) {
++ dev_err(&dev->dev, "pci_set_power_state failed\n");
++ return err;
++ }
++
++ i2c->status = STATUS_STANDBY;
++
++ return 0;
++}
++
++static int mrst_i2c_resume(struct pci_dev *dev)
++{
++ struct mrst_i2c_private *i2c =
++ (struct mrst_i2c_private *)pci_get_drvdata(dev);
++ int err;
++
++ if (i2c->status != STATUS_STANDBY)
++ return 0;
++
++ pci_set_power_state(dev, PCI_D0);
++ pci_restore_state(dev);
++ err = pci_enable_device(dev);
++ if (err) {
++ dev_err(&dev->dev, "pci_enable_device failed\n");
++ return err;
++ }
++
++ i2c->status = STATUS_IDLE;
++
++ mrst_i2c_hwinit(i2c);
++
++ return err;
++}
++
++static int mrst_i2c_runtime_suspend(struct device *dev)
++{
++ struct i2c_adapter *adap = to_i2c_adapter(dev);
++ int err = 0;
++ adap = adap;
++ /* Doing nothing now */
++
++ return err;
++}
++
++static int mrst_i2c_runtime_resume(struct device *dev)
++{
++ struct i2c_adapter *adap = to_i2c_adapter(dev);
++ int err = 0;
++ adap = adap;
++
++ /* Doing nothing now */
++
++ return err;
++}
++
++static void i2c_isr_read(struct mrst_i2c_private *i2c)
++{
++ struct i2c_msg *msg = i2c->msg;
++ int rx_num;
++ u32 len;
++ u8 *buf;
++
++ if (!(msg->flags & I2C_M_RD))
++ return;
++
++ if (i2c->status != STATUS_READ_IN_PROGRESS) {
++ len = msg->len;
++ buf = msg->buf;
++ } else {
++ len = i2c->rx_buf_len;
++ buf = i2c->rx_buf;
++ }
++
++ rx_num = mrst_i2c_read(i2c->base + IC_RXFLR);
++
++ for (; len > 0 && rx_num > 0; len--, rx_num--)
++ *buf++ = mrst_i2c_read(i2c->base + IC_DATA_CMD);
++
++ if (len > 0) {
++ i2c->status = STATUS_READ_IN_PROGRESS;
++ i2c->rx_buf_len = len;
++ i2c->rx_buf = buf;
++ } else
++ i2c->status = STATUS_READ_SUCCESS;
++
++ return;
++}
++
++static irqreturn_t mrst_i2c_isr(int this_irq, void *dev)
++{
++ struct mrst_i2c_private *i2c = dev;
++ u32 stat = mrst_i2c_read(i2c->base + IC_INTR_STAT) & 0x54;
++
++ dev_dbg(&i2c->adap->dev, "%s, stat = 0x%x\n", __func__, stat);
++
++ if ((i2c->status != STATUS_WRITE_START) &&
++ (i2c->status != STATUS_READ_START) &&
++ (i2c->status != STATUS_READ_IN_PROGRESS))
++ goto err;
++
++ if (stat & TX_ABRT)
++ i2c->abort = mrst_i2c_read(i2c->base + IC_TX_ABRT_SOURCE);
++
++ mrst_i2c_read(i2c->base + IC_CLR_INTR);
++
++ if (stat & TX_ABRT) {
++ mrst_i2c_abort(i2c);
++ goto exit;
++ }
++
++ if (stat & RX_FULL) {
++ i2c_isr_read(i2c);
++ goto exit;
++ }
++
++ if (stat & TX_EMPTY) {
++ if (mrst_i2c_read(i2c->base + IC_STATUS) & 0x4)
++ i2c->status = STATUS_WRITE_SUCCESS;
++ }
++
++exit:
++ if ((i2c->status == STATUS_READ_SUCCESS) ||
++ (i2c->status == STATUS_WRITE_SUCCESS) ||
++ (i2c->status == STATUS_XFER_ABORT)) {
++ /* Clear all interrupts */
++ mrst_i2c_read(i2c->base + IC_CLR_INTR);
++ /* Mask interrupts */
++ mrst_i2c_write(i2c->base + IC_INTR_MASK, 0x0000);
++ complete(&i2c->complete);
++ }
++err:
++ return IRQ_HANDLED;
++}
++
++static struct pci_device_id mrst_i2c_ids[] = {
++ /* Moorestown */
++ {PCI_VDEVICE(INTEL, 0x0802), 0 },
++ {PCI_VDEVICE(INTEL, 0x0803), 1 },
++ {PCI_VDEVICE(INTEL, 0x0804), 2 },
++ /* Medfield */
++ {PCI_VDEVICE(INTEL, 0x0817), 3,},
++ {PCI_VDEVICE(INTEL, 0x0818), 4 },
++ {PCI_VDEVICE(INTEL, 0x0819), 5 },
++ {PCI_VDEVICE(INTEL, 0x082C), 0 },
++ {PCI_VDEVICE(INTEL, 0x082D), 1 },
++ {PCI_VDEVICE(INTEL, 0x082E), 2 },
++ {0,}
++};
++MODULE_DEVICE_TABLE(pci, mrst_i2c_ids);
++
++static struct i2c_algorithm mrst_i2c_algorithm = {
++ .master_xfer = mrst_i2c_xfer,
++ .functionality = mrst_i2c_func,
++};
++
++
++static const struct dev_pm_ops mrst_i2c_pm_ops = {
++ .runtime_suspend = mrst_i2c_runtime_suspend,
++ .runtime_resume = mrst_i2c_runtime_resume,
++};
++
++static struct pci_driver mrst_i2c_driver = {
++ .driver = {
++ .pm = &mrst_i2c_pm_ops,
++ },
++ .name = "mrst_i2c",
++ .id_table = mrst_i2c_ids,
++ .probe = mrst_i2c_probe,
++ .remove = __devexit_p(mrst_i2c_remove),
++ .suspend = mrst_i2c_suspend,
++ .resume = mrst_i2c_resume,
++};
++
++/**
++ * mrst_i2c_probe - I2C controller initialization routine
++ * @dev: pci device
++ * @id: device id
++ *
++ * Return Values:
++ * 0 success
++ * -ENODEV If cannot allocate pci resource
++ * -ENOMEM If the register base remapping failed, or
++ * if kzalloc failed
++ *
++ * Initialization steps:
++ * 1. Request for PCI resource
++ * 2. Remap the start address of PCI resource to register base
++ * 3. Request for device memory region
++ * 4. Fill in the struct members of mrst_i2c_private
++ * 5. Call mrst_i2c_hwinit() for hardware initialization
++ * 6. Register I2C adapter in i2c-core
++ */
++static int __devinit mrst_i2c_probe(struct pci_dev *dev,
++ const struct pci_device_id *id)
++{
++ struct mrst_i2c_private *mrst;
++ struct i2c_adapter *adap;
++ unsigned int start, len;
++ int err, busnum = 0;
++ void __iomem *base = NULL;
++
++ dev_dbg(&dev->dev, "Get into probe function for I2C\n");
++ err = pci_enable_device(dev);
++ if (err) {
++ dev_err(&dev->dev, "Failed to enable I2C PCI device (%d)\n",
++ err);
++ goto exit;
++ }
++
++ /* Determine the address of the I2C area */
++ start = pci_resource_start(dev, DEF_BAR);
++ len = pci_resource_len(dev, DEF_BAR);
++ if (!start || len <= 0) {
++ dev_err(&dev->dev, "Base address initialization failed\n");
++ err = -ENODEV;
++ goto exit;
++ }
++ dev_dbg(&dev->dev, "%s i2c resource start %x, len=%d\n",
++ PLATFORM, start, len);
++ err = pci_request_region(dev, DEF_BAR, mrst_i2c_driver.name);
++ if (err) {
++ dev_err(&dev->dev, "Failed to request I2C region "
++ "0x%1x-0x%Lx\n", start,
++ (unsigned long long)pci_resource_end(dev, DEF_BAR));
++ goto exit;
++ }
++
++ base = ioremap_nocache(start, len);
++ if (!base) {
++ dev_err(&dev->dev, "I/O memory remapping failed\n");
++ err = -ENOMEM;
++ goto fail0;
++ }
++
++ /* Allocate the per-device data structure, mrst_i2c_private */
++ mrst = kzalloc(sizeof(struct mrst_i2c_private), GFP_KERNEL);
++ if (mrst == NULL) {
++ dev_err(&dev->dev, "Can't allocate interface\n");
++ err = -ENOMEM;
++ goto fail1;
++ }
++
++ adap = kzalloc(sizeof(struct i2c_adapter), GFP_KERNEL);
++ if (adap == NULL) {
++ dev_err(&dev->dev, "Can't allocate interface\n");
++ err = -ENOMEM;
++ goto fail2;
++ }
++
++ /* Initialize struct members */
++ snprintf(adap->name, sizeof(adap->name), "mrst_i2c");
++ adap->owner = THIS_MODULE;
++ adap->algo = &mrst_i2c_algorithm;
++ adap->class = I2C_CLASS_HWMON;
++ adap->dev.parent = &dev->dev;
++ mrst->adap = adap;
++ mrst->base = base;
++ mrst->speed = STANDARD;
++ mrst->abort = 0;
++ mrst->rx_buf_len = 0;
++ mrst->status = STATUS_IDLE;
++
++ pci_set_drvdata(dev, mrst);
++ i2c_set_adapdata(adap, mrst);
++
++ adap->nr = busnum = id->driver_data;
++ if (dev->device <= 0x0804)
++ mrst->platform = MOORESTOWN;
++ else
++ mrst->platform = MEDFIELD;
++
++ dev_dbg(&dev->dev, "I2C%d\n", busnum);
++
++ if (ctl_num > busnum) {
++ if (speed_mode[busnum] < 0 || speed_mode[busnum] >= NUM_SPEEDS)
++ dev_warn(&dev->dev, "Invalid speed %d ignored.\n",
++ speed_mode[busnum]);
++ else
++ mrst->speed = speed_mode[busnum];
++ }
++
++ /* Initialize i2c controller */
++ err = mrst_i2c_hwinit(mrst);
++ if (err < 0) {
++ dev_err(&dev->dev, "I2C interface initialization failed\n");
++ goto fail3;
++ }
++
++ mutex_init(&mrst->lock);
++ init_completion(&mrst->complete);
++ err = request_irq(dev->irq, mrst_i2c_isr, IRQF_DISABLED,
++ adap->name, mrst);
++ if (err) {
++ dev_err(&dev->dev, "Failed to request IRQ for I2C controller: "
++ "%s", adap->name);
++ goto fail3;
++ }
++ /* Clear all interrupts */
++ mrst_i2c_read(mrst->base + IC_CLR_INTR);
++ mrst_i2c_write(mrst->base + IC_INTR_MASK, 0x0000);
++
++ /* Adapter registration */
++ err = i2c_add_numbered_adapter(adap);
++ if (err) {
++ dev_err(&dev->dev, "Adapter %s registration failed\n",
++ adap->name);
++ goto fail4;
++ }
++
++ dev_err(&dev->dev, "%s I2C bus %d driver bind success.\n",
++ (mrst->platform == MOORESTOWN) ? "Moorestown" : "Medfield",
++ busnum);
++
++ pm_runtime_enable(&adap->dev);
++ return 0;
++
++fail4:
++ free_irq(dev->irq, mrst);
++fail3:
++ i2c_set_adapdata(adap, NULL);
++ pci_set_drvdata(dev, NULL);
++ kfree(adap);
++fail2:
++ kfree(mrst);
++fail1:
++ iounmap(base);
++fail0:
++ pci_release_region(dev, DEF_BAR);
++exit:
++ return err;
++}
++
++static void __devexit mrst_i2c_remove(struct pci_dev *dev)
++{
++ struct mrst_i2c_private *mrst = (struct mrst_i2c_private *)
++ pci_get_drvdata(dev);
++ mrst_i2c_disable(mrst->adap);
++ if (i2c_del_adapter(mrst->adap))
++ dev_err(&dev->dev, "Failed to delete i2c adapter");
++
++ free_irq(dev->irq, mrst);
++ pci_set_drvdata(dev, NULL);
++ iounmap(mrst->base);
++ kfree(mrst);
++ pci_release_region(dev, DEF_BAR);
++}
++
++static int __init mrst_i2c_init(void)
++{
++ printk(KERN_NOTICE "%s I2C driver %s\n", PLATFORM, VERSION);
++ return pci_register_driver(&mrst_i2c_driver);
++}
++
++static void __exit mrst_i2c_exit(void)
++{
++ pci_unregister_driver(&mrst_i2c_driver);
++}
++
++module_init(mrst_i2c_init);
++module_exit(mrst_i2c_exit);
++
++MODULE_AUTHOR("Ba Zheng <zheng.ba@intel.com>");
++MODULE_DESCRIPTION("I2C driver for Moorestown Platform");
++MODULE_LICENSE("GPL");
++MODULE_VERSION(VERSION);
+--- /dev/null
++++ b/drivers/i2c/busses/i2c-mrst.h
+@@ -0,0 +1,267 @@
++#ifndef __I2C_MFLD_H
++#define __I2C_MFLD_H
++
++#include <linux/i2c.h>
++
++/* PCI config table macros */
++/* Offests */
++#define I2C_INFO_TABLE_LENGTH 4
++#define I2C_INFO_DEV_BLOCK 10
++#define I2C_DEV_ADDR 2
++#define I2C_DEV_IRQ 4
++#define I2C_DEV_NAME 6
++#define I2C_DEV_INFO 22
++/* Length */
++#define HEAD_LENGTH 10
++#define BLOCK_LENGTH 32
++#define ADDR_LENGTH 2
++#define IRQ_LENGTH 2
++#define NAME_LENGTH 16
++#define INFO_LENGTH 10
++
++enum platform_enum {
++ MOORESTOWN,
++ MEDFIELD,
++};
++
++struct mrst_i2c_private {
++ struct i2c_adapter *adap;
++ /* Register base address */
++ void __iomem *base;
++ /* Speed mode */
++ int speed;
++ struct completion complete;
++ int abort;
++ u8 *rx_buf;
++ int rx_buf_len;
++ int status;
++ struct i2c_msg *msg;
++ enum platform_enum platform;
++ struct mutex lock;
++ spinlock_t slock;
++};
++
++#define STATUS_IDLE 0
++#define STATUS_READ_START 1
++#define STATUS_READ_IN_PROGRESS 2
++#define STATUS_READ_SUCCESS 3
++#define STATUS_WRITE_START 4
++#define STATUS_WRITE_SUCCESS 5
++#define STATUS_XFER_ABORT 6
++#define STATUS_STANDBY 7
++
++/* Control register */
++#define IC_CON 0x00
++#define SLV_DIS (1 << 6) /* Disable slave mode */
++#define RESTART (1 << 5) /* Send a Restart condition */
++#define ADDR_10BIT (1 << 4) /* 10-bit addressing */
++#define STANDARD_MODE (1 << 1) /* standard mode */
++#define FAST_MODE (2 << 1) /* fast mode */
++#define HIGH_MODE (3 << 1) /* high speed mode */
++#define MASTER_EN (1 << 0) /* Master mode */
++
++/* Target address register */
++#define IC_TAR 0x04
++#define IC_TAR_10BIT_ADDR (1 << 12) /* 10-bit addressing */
++#define IC_TAR_SPECIAL (1 << 11) /* Perform special I2C cmd */
++#define IC_TAR_GC_OR_START (1 << 10) /* 0: Gerneral Call Address */
++ /* 1: START BYTE */
++
++/* Slave Address Register */
++#define IC_SAR 0x08 /* Not used in Master mode */
++
++/* High Speed Master Mode Code Address Register */
++#define IC_HS_MADDR 0x0c
++
++/* Rx/Tx Data Buffer and Command Register */
++#define IC_DATA_CMD 0x10
++#define IC_RD (1 << 8) /* 1: Read 0: Write */
++
++/* Standard Speed Clock SCL High Count Register */
++#define IC_SS_SCL_HCNT 0x14
++
++/* Standard Speed Clock SCL Low Count Register */
++#define IC_SS_SCL_LCNT 0x18
++
++/* Fast Speed Clock SCL High Count Register */
++#define IC_FS_SCL_HCNT 0x1c
++
++/* Fast Spedd Clock SCL Low Count Register */
++#define IC_FS_SCL_LCNT 0x20
++
++/* High Speed Clock SCL High Count Register */
++#define IC_HS_SCL_HCNT 0x24
++
++/* High Speed Clock SCL Low Count Register */
++#define IC_HS_SCL_LCNT 0x28
++
++/* Interrupt Status Register */
++#define IC_INTR_STAT 0x2c /* Read only */
++#define R_GEN_CALL (1 << 11)
++#define R_START_DET (1 << 10)
++#define R_STOP_DET (1 << 9)
++#define R_ACTIVITY (1 << 8)
++#define R_RX_DONE (1 << 7)
++#define R_TX_ABRT (1 << 6)
++#define R_RD_REQ (1 << 5)
++#define R_TX_EMPTY (1 << 4)
++#define R_TX_OVER (1 << 3)
++#define R_RX_FULL (1 << 2)
++#define R_RX_OVER (1 << 1)
++#define R_RX_UNDER (1 << 0)
++
++/* Interrupt Mask Register */
++#define IC_INTR_MASK 0x30 /* Read and Write */
++#define M_GEN_CALL (1 << 11)
++#define M_START_DET (1 << 10)
++#define M_STOP_DET (1 << 9)
++#define M_ACTIVITY (1 << 8)
++#define M_RX_DONE (1 << 7)
++#define M_TX_ABRT (1 << 6)
++#define M_RD_REQ (1 << 5)
++#define M_TX_EMPTY (1 << 4)
++#define M_TX_OVER (1 << 3)
++#define M_RX_FULL (1 << 2)
++#define M_RX_OVER (1 << 1)
++#define M_RX_UNDER (1 << 0)
++
++/* Raw Interrupt Status Register */
++#define IC_RAW_INTR_STAT 0x34 /* Read Only */
++#define GEN_CALL (1 << 11) /* General call */
++#define START_DET (1 << 10) /* (RE)START occured */
++#define STOP_DET (1 << 9) /* STOP occured */
++#define ACTIVITY (1 << 8) /* Bus busy */
++#define RX_DONE (1 << 7) /* Not used in Master mode */
++#define TX_ABRT (1 << 6) /* Transmit Abort */
++#define RD_REQ (1 << 5) /* Not used in Master mode */
++#define TX_EMPTY (1 << 4) /* TX FIFO <= threshold */
++#define TX_OVER (1 << 3) /* TX FIFO overflow */
++#define RX_FULL (1 << 2) /* RX FIFO >= threshold */
++#define RX_OVER (1 << 1) /* RX FIFO overflow */
++#define RX_UNDER (1 << 0) /* RX FIFO empty */
++
++/* Receive FIFO Threshold Register */
++#define IC_RX_TL 0x38
++
++/* Transmit FIFO Treshold Register */
++#define IC_TX_TL 0x3c
++
++/* Clear Combined and Individual Interrupt Register */
++#define IC_CLR_INTR 0x40
++#define CLR_INTR (1 << 0)
++
++/* Clear RX_UNDER Interrupt Register */
++#define IC_CLR_RX_UNDER 0x44
++#define CLR_RX_UNDER (1 << 0)
++
++/* Clear RX_OVER Interrupt Register */
++#define IC_CLR_RX_OVER 0x48
++#define CLR_RX_OVER (1 << 0)
++
++/* Clear TX_OVER Interrupt Register */
++#define IC_CLR_TX_OVER 0x4c
++#define CLR_TX_OVER (1 << 0)
++
++#define IC_CLR_RD_REQ 0x50
++
++/* Clear TX_ABRT Interrupt Register */
++#define IC_CLR_TX_ABRT 0x54
++#define CLR_TX_ABRT (1 << 0)
++
++#define IC_CLR_RX_DONE 0x58
++
++
++/* Clear ACTIVITY Interrupt Register */
++#define IC_CLR_ACTIVITY 0x5c
++#define CLR_ACTIVITY (1 << 0)
++
++/* Clear STOP_DET Interrupt Register */
++#define IC_CLR_STOP_DET 0x60
++#define CLR_STOP_DET (1 << 0)
++
++/* Clear START_DET Interrupt Register */
++#define IC_CLR_START_DET 0x64
++#define CLR_START_DET (1 << 0)
++
++/* Clear GEN_CALL Interrupt Register */
++#define IC_CLR_GEN_CALL 0x68
++#define CLR_GEN_CALL (1 << 0)
++
++/* Enable Register */
++#define IC_ENABLE 0x6c
++#define ENABLE (1 << 0)
++
++/* Status Register */
++#define IC_STATUS 0x70 /* Read Only */
++#define STAT_SLV_ACTIVITY (1 << 6) /* Slave not in idle */
++#define STAT_MST_ACTIVITY (1 << 5) /* Master not in idle */
++#define STAT_RFF (1 << 4) /* RX FIFO Full */
++#define STAT_RFNE (1 << 3) /* RX FIFO Not Empty */
++#define STAT_TFE (1 << 2) /* TX FIFO Empty */
++#define STAT_TFNF (1 << 1) /* TX FIFO Not Full */
++#define STAT_ACTIVITY (1 << 0) /* Activity Status */
++
++/* Transmit FIFO Level Register */
++#define IC_TXFLR 0x74 /* Read Only */
++#define TXFLR (1 << 0) /* TX FIFO level */
++
++/* Receive FIFO Level Register */
++#define IC_RXFLR 0x78 /* Read Only */
++#define RXFLR (1 << 0) /* RX FIFO level */
++
++/* Transmit Abort Source Register */
++#define IC_TX_ABRT_SOURCE 0x80
++#define ABRT_SLVRD_INTX (1 << 15)
++#define ABRT_SLV_ARBLOST (1 << 14)
++#define ABRT_SLVFLUSH_TXFIFO (1 << 13)
++#define ARB_LOST (1 << 12)
++#define ABRT_MASTER_DIS (1 << 11)
++#define ABRT_10B_RD_NORSTRT (1 << 10)
++#define ABRT_SBYTE_NORSTRT (1 << 9)
++#define ABRT_HS_NORSTRT (1 << 8)
++#define ABRT_SBYTE_ACKDET (1 << 7)
++#define ABRT_HS_ACKDET (1 << 6)
++#define ABRT_GCALL_READ (1 << 5)
++#define ABRT_GCALL_NOACK (1 << 4)
++#define ABRT_TXDATA_NOACK (1 << 3)
++#define ABRT_10ADDR2_NOACK (1 << 2)
++#define ABRT_10ADDR1_NOACK (1 << 1)
++#define ABRT_7B_ADDR_NOACK (1 << 0)
++
++/* Enable Status Register */
++#define IC_ENABLE_STATUS 0x9c
++#define IC_EN (1 << 0) /* I2C in an enabled state */
++
++/* Component Parameter Register 1*/
++#define IC_COMP_PARAM_1 0xf4
++#define APB_DATA_WIDTH (0x3 << 0)
++
++/* added by xiaolin --begin */
++#define SS_MIN_SCL_HIGH 4000
++#define SS_MIN_SCL_LOW 4700
++#define FS_MIN_SCL_HIGH 600
++#define FS_MIN_SCL_LOW 1300
++#define HS_MIN_SCL_HIGH_100PF 60
++#define HS_MIN_SCL_LOW_100PF 120
++
++/* Function declarations */
++
++static int mrst_i2c_disable(struct i2c_adapter *);
++static int mrst_i2c_hwinit(struct mrst_i2c_private *);
++static u32 mrst_i2c_func(struct i2c_adapter *);
++static inline int mrst_i2c_invalid_address(const struct i2c_msg *);
++static inline int mrst_i2c_address_neq(const struct i2c_msg *,
++ const struct i2c_msg *);
++static int mrst_i2c_xfer(struct i2c_adapter *,
++ struct i2c_msg *,
++ int);
++static int __devinit mrst_i2c_probe(struct pci_dev *,
++ const struct pci_device_id *);
++static void __devexit mrst_i2c_remove(struct pci_dev *);
++static int __init mrst_i2c_init(void);
++static void __exit mrst_i2c_exit(void);
++static int xfer_read(struct i2c_adapter *,
++ unsigned char *, int);
++static int xfer_write(struct i2c_adapter *,
++ unsigned char *, int);
++#endif /* __I2C_MFLD_H */
+--- a/drivers/idle/intel_idle.c
++++ b/drivers/idle/intel_idle.c
+@@ -308,6 +308,7 @@
+ break;
+
+ case 0x1C: /* 28 - Atom Processor */
++ case 0x26: /* 38 - Lincroft Atom Processor */
+ lapic_timer_reliable_states = (1 << 2) | (1 << 1); /* C2, C1 */
+ cpuidle_state_table = atom_cstates;
+ choose_substate = choose_zero_substate;
+--- a/drivers/input/keyboard/Kconfig
++++ b/drivers/input/keyboard/Kconfig
+@@ -306,6 +306,22 @@
+ To compile this driver as a module, choose M here: the
+ module will be called imx_keypad.
+
++config KEYBOARD_INTEL_MID
++ tristate "Intel MID keypad support"
++ depends on GPIO_LANGWELL
++ help
++ Say Y if you want support for Intel MID keypad devices
++ depends on GPIO_LANGWELL
++
++config KEYBOARD_TC35894XBG
++ tristate "TC35894XBG I2C keypad support"
++ depends on I2C
++ help
++ Say Y if your platform uses a Toshiba TX35894XBG keypad
++ controller. This is an I2C controller used on mobile
++ devices. Note that this is just the generic engine, the key
++ mappings need to be provided externally.
++
+ config KEYBOARD_NEWTON
+ tristate "Newton keyboard"
+ select SERIO
+--- a/drivers/input/keyboard/Makefile
++++ b/drivers/input/keyboard/Makefile
+@@ -13,6 +13,7 @@
+ obj-$(CONFIG_KEYBOARD_BFIN) += bf54x-keys.o
+ obj-$(CONFIG_KEYBOARD_DAVINCI) += davinci_keyscan.o
+ obj-$(CONFIG_KEYBOARD_EP93XX) += ep93xx_keypad.o
++obj-$(CONFIG_KEYBOARD_INTEL_MID) += intel_mid_keypad.o
+ obj-$(CONFIG_KEYBOARD_GPIO) += gpio_keys.o
+ obj-$(CONFIG_KEYBOARD_TCA6416) += tca6416-keypad.o
+ obj-$(CONFIG_KEYBOARD_HIL) += hil_kbd.o
+@@ -35,6 +36,7 @@
+ obj-$(CONFIG_KEYBOARD_SH_KEYSC) += sh_keysc.o
+ obj-$(CONFIG_KEYBOARD_STOWAWAY) += stowaway.o
+ obj-$(CONFIG_KEYBOARD_SUNKBD) += sunkbd.o
++obj-$(CONFIG_KEYBOARD_TC35894XBG) += tc35894xbg.o
+ obj-$(CONFIG_KEYBOARD_TWL4030) += twl4030_keypad.o
+ obj-$(CONFIG_KEYBOARD_XTKBD) += xtkbd.o
+ obj-$(CONFIG_KEYBOARD_W90P910) += w90p910_keypad.o
+--- /dev/null
++++ b/drivers/input/keyboard/intel_mid_keypad.c
+@@ -0,0 +1,843 @@
++/*
++ * linux/drivers/input/keyboard/intel_mid_keypad.c
++ *
++ * Driver for the matrix keypad controller on MID platform.
++ *
++ * Copyright (c) 2009 Intel Corporation.
++ * Created: Sep 18, 2008
++ * Updated: May 14, 2010
++ *
++ * Based on pxa27x_keypad.c by Rodolfo Giometti <giometti@linux.it>
++ * pxa27x_keypad.c is based on a previous implementation by Kevin O'Connor
++ * <kevin_at_keconnor.net> and Alex Osborne <bobofdoom@gmail.com> and
++ * on some suggestions by Nicolas Pitre <nico@cam.org>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++#define DRV_NAME "mrst_keypad"
++#define DRV_VERSION "0.0.1"
++#define MRST_KEYPAD_DRIVER_NAME DRV_NAME " " DRV_VERSION
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/pci.h>
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/input.h>
++#include <linux/device.h>
++#include <linux/err.h>
++#include <linux/gpio.h>
++#include <asm/intel_scu_ipc.h>
++#include <asm/mrst.h>
++
++/*
++ * Keypad Controller registers
++ */
++#define KPC 0x0000 /* Keypad Control register */
++#define KPDK 0x0004 /* Keypad Direct Key register */
++#define KPREC 0x0008 /* Keypad Rotary Encoder register */
++#define KPMK 0x000C /* Keypad Matrix Key register */
++#define KPAS 0x0010 /* Keypad Automatic Scan register */
++
++/* Keypad Automatic Scan Multiple Key Presser register 0-3 */
++#define KPASMKP0 0x0014
++#define KPASMKP1 0x0018
++#define KPASMKP2 0x001C
++#define KPASMKP3 0x0020
++#define KPKDI 0x0024
++
++/* bit definitions */
++#define KPC_MKRN(n) ((((n) - 1) & 0x7) << 26) /* matrix key row number */
++#define KPC_MKCN(n) ((((n) - 1) & 0x7) << 23) /* matrix key col number */
++#define KPC_DKN(n) ((((n) - 1) & 0x7) << 6) /* direct key number */
++
++#define KPC_AS (0x1 << 30) /* Automatic Scan bit */
++#define KPC_ASACT (0x1 << 29) /* Automatic Scan on Activity */
++#define KPC_MI (0x1 << 22) /* Matrix interrupt bit */
++#define KPC_IMKP (0x1 << 21) /* Ignore Multiple Key Press */
++
++#define KPC_MS(n) (0x1 << (13 + (n))) /* Matrix scan line 'n' */
++#define KPC_MS_ALL (0xff << 13)
++
++#define KPC_ME (0x1 << 12) /* Matrix Keypad Enable */
++#define KPC_MIE (0x1 << 11) /* Matrix Interrupt Enable */
++#define KPC_DK_DEB_SEL (0x1 << 9) /* Direct Keypad Debounce Select */
++#define KPC_DI (0x1 << 5) /* Direct key interrupt bit */
++#define KPC_RE_ZERO_DEB (0x1 << 4) /* Rotary Encoder Zero Debounce */
++#define KPC_REE1 (0x1 << 3) /* Rotary Encoder1 Enable */
++#define KPC_REE0 (0x1 << 2) /* Rotary Encoder0 Enable */
++#define KPC_DE (0x1 << 1) /* Direct Keypad Enable */
++#define KPC_DIE (0x1 << 0) /* Direct Keypad interrupt Enable */
++
++#define KPDK_DKP (0x1 << 31)
++#define KPDK_DK(n) ((n) & 0xff)
++
++#define KPREC_OF1 (0x1 << 31)
++#define kPREC_UF1 (0x1 << 30)
++#define KPREC_OF0 (0x1 << 15)
++#define KPREC_UF0 (0x1 << 14)
++
++#define KPREC_RECOUNT0(n) ((n) & 0xff)
++#define KPREC_RECOUNT1(n) (((n) >> 16) & 0xff)
++
++#define KPMK_MKP (0x1 << 31)
++#define KPAS_SO (0x1 << 31)
++#define KPASMKPx_SO (0x1 << 31)
++
++#define KPAS_MUKP(n) (((n) >> 26) & 0x1f)
++#define KPAS_RP(n) (((n) >> 4) & 0xf)
++#define KPAS_CP(n) ((n) & 0xf)
++
++#define KPASMKP_MKC_MASK (0xff)
++
++#define KEYPAD_MATRIX_GPIO_IN_PIN 24
++#define KEYPAD_MATRIX_GPIO_OUT_PIN 32
++#define KEYPAD_DIRECT_GPIO_IN_PIN 40
++
++#define keypad_readl(off) readl(keypad->mmio_base + (off))
++#define keypad_writel(off, v) writel((v), keypad->mmio_base + (off))
++
++#define MAX_MATRIX_KEY_NUM (8 * 8)
++#define MAX_DIRECT_KEY_NUM (4)
++
++#define MAX_MATRIX_KEY_ROWS (8)
++#define MAX_MATRIX_KEY_COLS (8)
++#define DEBOUNCE_INTERVAL 100
++
++#define KEY_HALFSHUTTER KEY_PROG1
++#define KEY_FULLSHUTTER KEY_CAMERA
++
++static unsigned int mrst_keycode[MAX_MATRIX_KEY_NUM] = {
++ KEY_F, KEY_D, KEY_E, KEY_GRAVE, KEY_C, KEY_R, KEY_4, KEY_V,
++ KEY_NUMLOCK, KEY_LEFTCTRL, KEY_Z, KEY_W, KEY_2, KEY_X, KEY_S, KEY_3,
++ KEY_EQUAL, KEY_N, KEY_H, KEY_U, KEY_7, KEY_M, KEY_J, KEY_8,
++ KEY_6, KEY_5, KEY_APOSTROPHE, KEY_G, KEY_T, KEY_SPACE, KEY_B, KEY_Y,
++ KEY_MINUS, KEY_0, KEY_LEFT, KEY_SEMICOLON, KEY_P, KEY_DOWN, KEY_UP,
++ KEY_BACKSPACE,
++ KEY_L, KEY_K, KEY_I, KEY_SLASH, KEY_COMMA, KEY_O, KEY_9, KEY_DOT,
++ KEY_Q, KEY_TAB, KEY_ESC, KEY_LEFTSHIFT, KEY_CAPSLOCK, KEY_1, KEY_FN,
++ KEY_A,
++ 0, KEY_RIGHTSHIFT, KEY_ENTER, 0, KEY_RIGHT, 0, 0, 0,
++};
++
++/* NumLk key mapping */
++static unsigned int mrst_keycode_numlck[MAX_MATRIX_KEY_NUM] = {
++ KEY_F, KEY_D, KEY_E, KEY_GRAVE, KEY_C, KEY_R, KEY_4, KEY_V,
++ KEY_NUMLOCK, KEY_LEFTCTRL, KEY_Z, KEY_W, KEY_2, KEY_X, KEY_S, KEY_3,
++ KEY_EQUAL, KEY_N, KEY_H, KEY_KP4, KEY_KP7, KEY_KP0, KEY_KP1, KEY_KP8,
++ KEY_6, KEY_5, KEY_APOSTROPHE, KEY_G, KEY_T, KEY_SPACE, KEY_B, KEY_Y,
++ KEY_MINUS, KEY_KPSLASH, KEY_LEFT, KEY_KPMINUS, KEY_KPASTERISK,
++ KEY_DOWN, KEY_UP, KEY_BACKSPACE,
++ KEY_KP3, KEY_KP2, KEY_KP5, KEY_SLASH, KEY_KPDOT, KEY_KP6, KEY_KP9,
++ KEY_KPPLUS,
++ KEY_Q, KEY_TAB, KEY_ESC, KEY_LEFTSHIFT, KEY_CAPSLOCK, KEY_1, KEY_FN,
++ KEY_A,
++ 0, KEY_RIGHTSHIFT, KEY_ENTER, 0, KEY_RIGHT, 0, 0, 0,
++};
++
++/* Fn key mapping */
++static unsigned int mrst_keycode_fn[MAX_MATRIX_KEY_NUM] = {
++ 0, 0, 0, 0, 0, 0, 0, 0,
++ 0, 0, 0, 0, 0, 0, 0, 0,
++ KEY_LEFTBRACE, 0, 0, 0, 0, 0, 0, 0,
++ 0, 0, 0, 0, 0, 0, 0, 0,
++ 0, 0, KEY_HOME, 0, 0, KEY_PAGEDOWN, KEY_PAGEUP, 0,
++ 0, 0, 0, KEY_RIGHTBRACE, KEY_LEFTBRACE, 0, 0, KEY_RIGHTBRACE,
++ 0, 0, 0, KEY_LEFTSHIFT, 0, 0, KEY_FN, 0,
++ 0, KEY_RIGHTSHIFT, 0, 0, KEY_END, 0, 0, 0,
++};
++
++/* direct key map */
++static unsigned int mrst_direct_keycode[MAX_DIRECT_KEY_NUM] = {
++ KEY_VOLUMEUP, KEY_VOLUMEDOWN, KEY_HALFSHUTTER, KEY_FULLSHUTTER,
++};
++
++struct mrst_keypad {
++
++ struct input_dev *input_dev;
++ void __iomem *mmio_base;
++
++ unsigned int matrix_key_rows;
++ unsigned int matrix_key_cols;
++ int matrix_key_map_size;
++
++ /* key debounce interval */
++ unsigned int debounce_interval;
++
++ /* matrix key code map */
++ unsigned int matrix_keycodes[MAX_MATRIX_KEY_NUM];
++
++ /* state row bits of each column scan */
++ uint32_t matrix_key_state[MAX_MATRIX_KEY_COLS];
++ uint32_t direct_key_state;
++
++ unsigned int direct_key_mask;
++
++ int direct_key_num;
++
++ unsigned int direct_key_map[MAX_DIRECT_KEY_NUM];
++
++ /* rotary encoders 0 */
++ int enable_rotary0;
++ int rotary0_rel_code;
++ int rotary0_up_key;
++ int rotary0_down_key;
++
++ /* rotary encoders 1 */
++ int enable_rotary1;
++ int rotary1_rel_code;
++ int rotary1_up_key;
++ int rotary1_down_key;
++
++ int rotary_rel_code[2];
++ int rotary_up_key[2];
++ int rotary_down_key[2];
++
++ /* Fn key */
++ int fn;
++
++ /* Number Lock key */
++ int numlck;
++
++ /* FIXME:
++ * Keypad controller likely issues fake interrupts
++ * when direct key status registers were first initialized
++ * This value assures this interrupt will not be proceeded.
++ */
++ int count;
++};
++
++static void mrst_keypad_build_keycode(struct mrst_keypad *keypad)
++{
++ struct input_dev *input_dev = keypad->input_dev;
++ unsigned int *key;
++ int i, code;
++
++ keypad->matrix_key_rows = MAX_MATRIX_KEY_ROWS;
++ keypad->matrix_key_cols = MAX_MATRIX_KEY_COLS;
++ keypad->direct_key_num = MAX_DIRECT_KEY_NUM;
++ keypad->matrix_key_map_size = MAX_MATRIX_KEY_NUM;
++ keypad->debounce_interval = DEBOUNCE_INTERVAL;
++
++ if (mrst_platform_id() == MRST_PLATFORM_AAVA_SC) {
++ keypad->matrix_key_rows = 7;
++ keypad->matrix_key_cols = 7;
++ keypad->direct_key_num = 2;
++ }
++
++ /* three sets of keycode here */
++ if (keypad->fn)
++ memcpy(keypad->matrix_keycodes, mrst_keycode_fn,
++ sizeof(keypad->matrix_keycodes));
++ else if (keypad->numlck)
++ memcpy(keypad->matrix_keycodes, mrst_keycode_numlck,
++ sizeof(keypad->matrix_keycodes));
++ else {
++ if (mrst_platform_id() == MRST_PLATFORM_AAVA_SC) {
++ mrst_keycode[45] = KEY_HALFSHUTTER;
++ mrst_keycode[46] = KEY_FULLSHUTTER;
++ }
++
++ memcpy(keypad->matrix_keycodes, mrst_keycode,
++ sizeof(keypad->matrix_keycodes));
++ }
++
++ memcpy(keypad->direct_key_map, mrst_direct_keycode,
++ sizeof(keypad->direct_key_map));
++
++ key = &keypad->matrix_keycodes[0];
++ for (i = 0; i < MAX_MATRIX_KEY_NUM; i++, key++) {
++ code = (*key) & 0xffffff;
++ set_bit(code, input_dev->keybit);
++ }
++
++ key = &keypad->direct_key_map[0];
++ for (i = 0; i < keypad->direct_key_num; i++, key++) {
++ code = (*key) & 0xffffff;
++ set_bit(code, input_dev->keybit);
++ }
++
++ keypad->enable_rotary0 = 0;
++ keypad->enable_rotary1 = 0;
++
++}
++
++static inline unsigned int lookup_matrix_keycode(
++ struct mrst_keypad *keypad, int row, int col)
++{
++ return keypad->matrix_keycodes[(row << 3) + col];
++}
++
++static void handle_constant_keypress(struct mrst_keypad *keypad,
++ int num, int col, int row,
++ int state)
++{
++ struct input_dev *dev = keypad->input_dev;
++
++ switch (num) {
++ case 0:
++ if (keypad->fn)
++ keypad->fn = 0;
++ /* Manually release special keys (Fn combinations) */
++ if (test_bit(KEY_LEFTBRACE, dev->key))
++ input_report_key(dev, KEY_LEFTBRACE, 0);
++ if (test_bit(KEY_RIGHTBRACE, dev->key))
++ input_report_key(dev, KEY_RIGHTBRACE, 0);
++ if (test_bit(KEY_HOME, dev->key))
++ input_report_key(dev, KEY_RIGHTBRACE, 0);
++ if (test_bit(KEY_END, dev->key))
++ input_report_key(dev, KEY_END, 0);
++ if (test_bit(KEY_PAGEUP, dev->key))
++ input_report_key(dev, KEY_RIGHTBRACE, 0);
++ if (test_bit(KEY_PAGEDOWN, dev->key))
++ input_report_key(dev, KEY_RIGHTBRACE, 0);
++
++ return;
++
++ case 1:
++ /* if Fn pressed */
++ if (col == 6 && row == 6)
++ keypad->fn = 1;
++ /* key '[' */
++ else if ((col == 0 && row == 2) && state) {
++ keypad->fn = 0;
++ set_bit(KEY_EQUAL, dev->key);
++ dev->repeat_key = KEY_EQUAL;
++ }
++ /* key ']' */
++ else if ((col == 3 && row == 5) && state) {
++ keypad->fn = 0;
++ set_bit(KEY_SLASH, dev->key);
++ dev->repeat_key = KEY_SLASH;
++ }
++ /* key '{' */
++ else if ((col == 4 && row == 5) && state) {
++ keypad->fn = 0;
++ set_bit(KEY_COMMA, dev->key);
++ dev->repeat_key = KEY_COMMA;
++ }
++ /* key '}' */
++ else if ((col == 7 && row == 5) && state) {
++ keypad->fn = 0;
++ set_bit(KEY_DOT, dev->key);
++ dev->repeat_key = KEY_DOT;
++ }
++
++ return;
++ default:
++ ;
++ }
++}
++
++static void mrst_keypad_scan_matrix(struct mrst_keypad *keypad)
++{
++ int row, col, num_keys_pressed = 0;
++ uint32_t new_state[MAX_MATRIX_KEY_COLS];
++ uint32_t kpas = keypad_readl(KPAS);
++ int status;
++
++ num_keys_pressed = KPAS_MUKP(kpas);
++
++ memset(new_state, 0, sizeof(new_state));
++
++ if (num_keys_pressed == 0) {
++ status = keypad->matrix_key_state[0] & (1 << 0);
++ handle_constant_keypress(keypad, num_keys_pressed, 0, 0,
++ status);
++
++ goto scan;
++ }
++
++ if (num_keys_pressed == 1) {
++ col = KPAS_CP(kpas);
++ row = KPAS_RP(kpas);
++
++ /* if invalid row/col, treat as no key pressed */
++ if (col < keypad->matrix_key_cols &&
++ row < keypad->matrix_key_rows) {
++ /* if NumLk pressed */
++ if (col == 0 && row == 1)
++ keypad->numlck = !keypad->numlck;
++
++ status = keypad->matrix_key_state[col] & (1 << row);
++ handle_constant_keypress(keypad, num_keys_pressed, col,
++ row, status);
++
++ new_state[col] = (1 << row);
++ }
++
++ goto scan;
++ }
++
++ if (num_keys_pressed > 1) {
++ uint32_t kpasmkp0 = keypad_readl(KPASMKP0);
++ uint32_t kpasmkp1 = keypad_readl(KPASMKP1);
++ uint32_t kpasmkp2 = keypad_readl(KPASMKP2);
++ uint32_t kpasmkp3 = keypad_readl(KPASMKP3);
++
++ new_state[0] = kpasmkp0 & KPASMKP_MKC_MASK;
++ new_state[1] = (kpasmkp0 >> 16) & KPASMKP_MKC_MASK;
++ new_state[2] = kpasmkp1 & KPASMKP_MKC_MASK;
++ new_state[3] = (kpasmkp1 >> 16) & KPASMKP_MKC_MASK;
++ new_state[4] = kpasmkp2 & KPASMKP_MKC_MASK;
++ new_state[5] = (kpasmkp2 >> 16) & KPASMKP_MKC_MASK;
++ new_state[6] = kpasmkp3 & KPASMKP_MKC_MASK;
++ if (mrst_platform_id() != MRST_PLATFORM_AAVA_SC)
++ new_state[7] = (kpasmkp3 >> 16) & KPASMKP_MKC_MASK;
++
++ /* if Fn is pressed, all SHIFT is ignored, except when {
++ * or } is pressed */
++ if (new_state[6] & 0x40) {
++ keypad->fn = 1;
++ new_state[3] &= ~0x40;
++ new_state[1] &= ~0x80;
++ }
++
++ if (keypad->fn == 1) {
++ if (mrst_platform_id() == MRST_PLATFORM_AAVA_SC) {
++ /* if { or } pressed */
++ if ((new_state[4] & 0x20)) {
++ /* as if LEFTSHIFT is pressed */
++ new_state[3] |= 0x40;
++ /* as if Fn not pressed */
++ new_state[6] &= ~0x40;
++ }
++ } else {
++ /* if { or } pressed */
++ if ((new_state[4] & 0x20) ||
++ (new_state[7] & 0x20)) {
++ /* as if LEFTSHIFT is pressed */
++ new_state[3] |= 0x40;
++ /* as if Fn not pressed */
++ new_state[6] &= ~0x40;
++ }
++ }
++ /* if [ or ] pressed */
++ if ((new_state[0] & 0x04) || (new_state[3] & 0x20))
++ /* as if Fn not pressed */
++ new_state[6] &= ~0x40;
++ }
++ }
++
++scan:
++ /* re-build keycode */
++ mrst_keypad_build_keycode(keypad);
++
++ for (col = 0; col < keypad->matrix_key_cols; col++) {
++ uint32_t bits_changed;
++
++ bits_changed = keypad->matrix_key_state[col] ^ new_state[col];
++ if (bits_changed == 0)
++ continue;
++
++ for (row = 0; row < keypad->matrix_key_rows; row++) {
++ if ((bits_changed & (1 << row)) == 0)
++ continue;
++
++ input_report_key(keypad->input_dev,
++ lookup_matrix_keycode(keypad, row, col),
++ new_state[col] & (1 << row));
++ }
++ }
++ input_sync(keypad->input_dev);
++ memcpy(keypad->matrix_key_state, new_state, sizeof(new_state));
++}
++
++#define DEFAULT_KPREC (0x007f007f)
++
++static inline int rotary_delta(uint32_t kprec)
++{
++ if (kprec & KPREC_OF0)
++ return (kprec & 0xff) + 0x7f;
++ else if (kprec & KPREC_UF0)
++ return (kprec & 0xff) - 0x7f - 0xff;
++ else
++ return (kprec & 0xff) - 0x7f;
++}
++
++static void report_rotary_event(struct mrst_keypad *keypad, int r, int delta)
++{
++ struct input_dev *dev = keypad->input_dev;
++
++ if (delta == 0)
++ return;
++
++ if (keypad->rotary_up_key[r] && keypad->rotary_down_key[r]) {
++ int keycode = (delta > 0) ? keypad->rotary_up_key[r] :
++ keypad->rotary_down_key[r];
++
++ /* simulate a press-n-release */
++ input_report_key(dev, keycode, 1);
++ input_sync(dev);
++ input_report_key(dev, keycode, 0);
++ input_sync(dev);
++ } else {
++ input_report_rel(dev, keypad->rotary_rel_code[r], delta);
++ input_sync(dev);
++ }
++}
++
++static void mrst_keypad_scan_rotary(struct mrst_keypad *keypad)
++{
++ unsigned int kprec;
++
++ /* read and reset to default count value */
++ kprec = keypad_readl(KPREC);
++ keypad_writel(KPREC, DEFAULT_KPREC);
++
++ if (keypad->enable_rotary0)
++ report_rotary_event(keypad, 0, rotary_delta(kprec));
++
++ if (keypad->enable_rotary1)
++ report_rotary_event(keypad, 1, rotary_delta(kprec >> 16));
++}
++
++static void mrst_keypad_scan_direct(struct mrst_keypad *keypad)
++{
++ unsigned int new_state;
++ uint32_t kpdk, bits_changed;
++ int i;
++
++ kpdk = keypad_readl(KPDK);
++
++ if (keypad->enable_rotary0 || keypad->enable_rotary1)
++ mrst_keypad_scan_rotary(keypad);
++
++ if ((keypad->direct_key_map == NULL) || (++keypad->count == 1)) {
++ keypad->direct_key_state = 0;
++ return;
++ }
++
++ new_state = KPDK_DK(kpdk) & keypad->direct_key_mask;
++ new_state = ~new_state;
++ bits_changed = keypad->direct_key_state ^ new_state;
++
++ if (bits_changed == 0)
++ return;
++
++ for (i = 0; i < keypad->direct_key_num; i++) {
++ if (bits_changed & (1 << i)) {
++ input_report_key(keypad->input_dev,
++ keypad->direct_key_map[i],
++ (new_state & (1 << i)));
++ }
++ }
++
++ input_sync(keypad->input_dev);
++ keypad->direct_key_state = new_state;
++}
++
++static irqreturn_t mrst_keypad_irq_handler(int irq, void *dev_id)
++{
++ struct mrst_keypad *keypad = dev_id;
++ unsigned long kpc = keypad_readl(KPC);
++
++ if (kpc & KPC_DI)
++ mrst_keypad_scan_direct(keypad);
++
++ if (kpc & KPC_MI)
++ mrst_keypad_scan_matrix(keypad);
++
++ return IRQ_HANDLED;
++}
++
++static int mrst_keypad_gpio_init(struct mrst_keypad *keypad)
++{
++ int i, err;
++ int cnt1 = 0;
++ int cnt2 = 0;
++ int cnt3 = 0;
++
++ for (i = 0; i < keypad->matrix_key_rows; i++, cnt1++) {
++ err = gpio_request(KEYPAD_MATRIX_GPIO_IN_PIN + i, NULL);
++ if (err) {
++ pr_err("GPIO pin %d failed to request.\n", i);
++ goto err_request1;
++ }
++ }
++
++ for (i = 0; i < keypad->matrix_key_cols; i++, cnt2++) {
++ err = gpio_request(KEYPAD_MATRIX_GPIO_OUT_PIN + i, NULL);
++ if (err) {
++ pr_err("GPIO pin %d failed to request.\n", i);
++ goto err_request2;
++ }
++ }
++
++ for (i = 0; i < keypad->direct_key_num; i++, cnt3++) {
++ err = gpio_request(KEYPAD_DIRECT_GPIO_IN_PIN + i, NULL);
++ if (err) {
++ pr_err("GPIO pin %d failed to request.\n", i);
++ goto err_request3;
++ }
++ }
++
++ for (i = 0; i < keypad->matrix_key_rows; i++)
++ gpio_direction_input(KEYPAD_MATRIX_GPIO_IN_PIN + i);
++
++ for (i = 0; i < keypad->matrix_key_cols; i++)
++ gpio_direction_output(KEYPAD_MATRIX_GPIO_OUT_PIN + i, 1);
++
++ for (i = 0; i < keypad->direct_key_num; i++)
++ gpio_direction_input(KEYPAD_DIRECT_GPIO_IN_PIN + i);
++
++ return 0;
++
++err_request3:
++ /* free requested pins... */
++ for (i = 0; i >= cnt3; i++)
++ gpio_free(KEYPAD_DIRECT_GPIO_IN_PIN + i);
++err_request2:
++ /* free requested pins... */
++ for (i = 0; i < cnt2 ; i++)
++ gpio_free(KEYPAD_MATRIX_GPIO_OUT_PIN + i);
++err_request1:
++ /* free requested pins... */
++ for (i = 0; i < cnt1; i++)
++ gpio_free(KEYPAD_MATRIX_GPIO_IN_PIN + i);
++
++ return err;
++}
++
++static void mrst_keypad_config(struct mrst_keypad *keypad)
++{
++ unsigned int mask = 0, direct_key_num = 0;
++ unsigned long kpc = 0;
++
++ /* enable matrix keys with automatic scan */
++ if (keypad->matrix_key_rows && keypad->matrix_key_cols) {
++ kpc |= KPC_ASACT | KPC_MIE | KPC_ME | KPC_MS_ALL;
++ kpc |= KPC_MKRN(keypad->matrix_key_rows) |
++ KPC_MKCN(keypad->matrix_key_cols);
++ }
++
++ /* enable rotary key, debounce interval same as direct keys */
++ if (keypad->enable_rotary0) {
++ mask |= 0x03;
++ direct_key_num = 2;
++ kpc |= KPC_REE0;
++ }
++
++ if (keypad->enable_rotary1) {
++ mask |= 0x0c;
++ direct_key_num = 4;
++ kpc |= KPC_REE1;
++ }
++
++ if (keypad->direct_key_num > direct_key_num)
++ direct_key_num = keypad->direct_key_num;
++
++ keypad->direct_key_mask = ((2 << direct_key_num) - 1) & ~mask;
++
++ /* enable direct key */
++ if (direct_key_num)
++ kpc |= KPC_DE | KPC_DIE | KPC_DKN(direct_key_num);
++
++ keypad_writel(KPC, kpc);
++ keypad_writel(KPREC, DEFAULT_KPREC);
++ keypad_writel(KPKDI, keypad->debounce_interval);
++}
++
++static int mrst_keypad_open(struct input_dev *dev)
++{
++ struct mrst_keypad *keypad = input_get_drvdata(dev);
++ int err;
++
++ err = mrst_keypad_gpio_init(keypad);
++ if (err)
++ return err;
++ mrst_keypad_config(keypad);
++
++ return 0;
++}
++
++static void mrst_keypad_close(struct input_dev *dev)
++{
++ int i;
++ struct mrst_keypad *keypad = input_get_drvdata(dev);
++
++ /* free occupied pins */
++ for (i = 0; i < keypad->matrix_key_rows; i++)
++ gpio_free(KEYPAD_MATRIX_GPIO_IN_PIN + i);
++ for (i = 0; i < keypad->matrix_key_cols; i++)
++ gpio_free(KEYPAD_MATRIX_GPIO_OUT_PIN + i);
++ for (i = 0; i < keypad->direct_key_num; i++)
++ gpio_free(KEYPAD_DIRECT_GPIO_IN_PIN + i);
++}
++
++static int __devinit mrst_keypad_probe(struct pci_dev *pdev,
++ const struct pci_device_id *ent)
++{
++ struct mrst_keypad *keypad;
++ struct input_dev *input_dev;
++ int error;
++
++ u32 data;
++
++ keypad = kzalloc(sizeof(struct mrst_keypad), GFP_KERNEL);
++ if (keypad == NULL) {
++ dev_err(&pdev->dev, "failed to allocate driver data\n");
++ return -ENOMEM;
++ }
++
++ error = pci_enable_device(pdev);
++ if (error || (pdev->irq < 0)) {
++ dev_err(&pdev->dev, "failed to enable device/get irq\n");
++ error = -ENXIO;
++ goto failed_free;
++ }
++
++ error = pci_request_regions(pdev, DRV_NAME);
++ if (error) {
++ dev_err(&pdev->dev, "failed to request I/O memory\n");
++ goto failed_free;
++ }
++
++ keypad->mmio_base = ioremap(pci_resource_start(pdev, 0),
++ pci_resource_len(pdev, 0));
++ if (keypad->mmio_base == NULL) {
++ dev_err(&pdev->dev, "failed to remap I/O memory\n");
++ error = -ENXIO;
++ goto failed_free_mem;
++ }
++
++ /* Create and register the input driver. */
++ input_dev = input_allocate_device();
++ if (!input_dev) {
++ dev_err(&pdev->dev, "failed to allocate input device\n");
++ error = -ENOMEM;
++ goto failed_free_io;
++ }
++
++ input_dev->name = pci_name(pdev);
++ input_dev->id.bustype = BUS_PCI;
++ input_dev->open = mrst_keypad_open;
++ input_dev->close = mrst_keypad_close;
++ input_dev->dev.parent = &pdev->dev;
++
++ input_dev->keycode = keypad->matrix_keycodes;
++ input_dev->keycodesize = sizeof(unsigned int);
++ input_dev->keycodemax = ARRAY_SIZE(mrst_keycode);
++
++ keypad->input_dev = input_dev;
++ keypad->fn = 0;
++ keypad->numlck = 0;
++ /*FIXME*/
++ keypad->count = 0;
++ input_set_drvdata(input_dev, keypad);
++
++ input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP) |
++ BIT_MASK(EV_REL);
++
++ mrst_keypad_build_keycode(keypad);
++ pci_set_drvdata(pdev, keypad);
++
++ error = request_irq(pdev->irq, mrst_keypad_irq_handler, IRQF_SHARED,
++ pci_name(pdev), keypad);
++ if (error) {
++ dev_err(&pdev->dev, "failed to request IRQ\n");
++ goto failed_free_dev;
++ }
++
++ if (mrst_platform_id() == MRST_PLATFORM_AAVA_SC) {
++
++ /* Enable 75 kOhm internal pull-ups for
++ * KBD_DKIN0 and KBD_DKIN1 */
++ /* bus: 0x4h, address: 0x20h, bits 0...3 */
++ /* 01 = W, 04 = bus, 20 = address */
++ /* b3-b0 = 1010 (75kOhm pull-ups) = 0xAh */
++ data = 0xA;
++ intel_scu_ipc_i2c_cntrl(0x01040020, &data);
++ } else {
++ data = 0;
++ }
++ /* Register the input device */
++ error = input_register_device(input_dev);
++ if (error) {
++ dev_err(&pdev->dev, "failed to register input device\n");
++ goto failed_free_irq;
++ }
++
++ pr_info("*** keypad driver load successfully ***\n");
++ return 0;
++
++failed_free_irq:
++ free_irq(pdev->irq, keypad);
++ pci_set_drvdata(pdev, NULL);
++failed_free_dev:
++ input_free_device(input_dev);
++failed_free_io:
++ iounmap(keypad->mmio_base);
++failed_free_mem:
++ pci_release_regions(pdev);
++failed_free:
++ kfree(keypad);
++ return error;
++}
++
++static void __devexit mrst_keypad_remove(struct pci_dev *pdev)
++{
++ struct mrst_keypad *keypad = pci_get_drvdata(pdev);
++ int i;
++
++ /* free occupied pins */
++ for (i = 0; i < keypad->matrix_key_rows; i++)
++ gpio_free(KEYPAD_MATRIX_GPIO_IN_PIN + i);
++ for (i = 0; i < keypad->matrix_key_cols; i++)
++ gpio_free(KEYPAD_MATRIX_GPIO_OUT_PIN + i);
++ for (i = 0; i < keypad->direct_key_num; i++)
++ gpio_free(KEYPAD_DIRECT_GPIO_IN_PIN + i);
++
++ free_irq(pdev->irq, keypad);
++ input_unregister_device(keypad->input_dev);
++ iounmap(keypad->mmio_base);
++ pci_release_regions(pdev);
++ pci_set_drvdata(pdev, NULL);
++ kfree(keypad);
++}
++
++static struct pci_device_id keypad_pci_tbl[] = {
++ {0x8086, 0x0805, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
++ {0,}
++};
++MODULE_DEVICE_TABLE(pci, keypad_pci_tbl);
++
++static struct pci_driver mrst_keypad_driver = {
++ .name = DRV_NAME,
++ .id_table = keypad_pci_tbl,
++ .probe = mrst_keypad_probe,
++ .remove = __devexit_p(mrst_keypad_remove),
++#ifdef CONFIG_PM
++ .suspend = NULL,
++ .resume = NULL,
++#endif /* CONFIG_PM */
++};
++
++static int __init mrst_keypad_init(void)
++{
++ return pci_register_driver(&mrst_keypad_driver);
++}
++
++static void __exit mrst_keypad_exit(void)
++{
++ pci_unregister_driver(&mrst_keypad_driver);
++}
++
++module_init(mrst_keypad_init);
++module_exit(mrst_keypad_exit);
++
++MODULE_DESCRIPTION("MRST Keypad Controller Driver");
++MODULE_LICENSE("GPL v2");
+--- /dev/null
++++ b/drivers/input/keyboard/tc35894xbg.c
+@@ -0,0 +1,722 @@
++/*
++ * tc35894xbg.c: Keypad driver for Toshiba TC35894XBG
++ *
++ * (C) Copyright 2010 Intel Corporation
++ * Author: Charlie Paul (z8cpaul@windriver.com)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; version 2
++ * of the License.
++ */
++#include <linux/module.h>
++#include <linux/i2c.h>
++#include <linux/interrupt.h>
++#include <linux/sched.h>
++#include <linux/mutex.h>
++#include <linux/delay.h>
++#include <linux/input.h>
++#include <linux/device.h>
++#include <linux/gpio.h>
++#include <linux/slab.h>
++
++#include <linux/i2c/tc35894xbg.h>
++#include "tc35894xbg_regs.h"
++
++struct tc35894xbg_keypad_chip {
++ /* device lock */
++ struct mutex lock;
++ struct i2c_client *client;
++ struct work_struct work;
++ struct input_dev *idev;
++ bool kp_enabled;
++ bool pm_suspend;
++ char phys[32];
++ struct tc35894xbg_platform_data pd;
++ unsigned char keymap_index;
++ unsigned int kp_reg_addr;
++};
++
++#define work_to_keypad(w) container_of(w, struct tc35894xbg_keypad_chip, \
++ work)
++#define client_to_keypad(c) container_of(c, struct tc35894xbg_keypad_chip, \
++ client)
++#define dev_to_keypad(d) container_of(c, struct tc35894xbg_keypad_chip, \
++ client->dev)
++
++#define KEYPAD_MAX_DATA 8
++
++/*
++ * To write, access the chip's address in write mode, and dump the
++ * command and data on the bus. The command and data are taken as
++ * sequential u8s out of varargs, to a maxinum of KEYPAD_MAX_DATA
++ */
++static int keypad_write(struct tc35894xbg_keypad_chip *tc, int len, ...)
++{
++ int ret, i;
++ va_list ap;
++ u8 data[KEYPAD_MAX_DATA];
++
++ va_start(ap, len);
++ if (len > KEYPAD_MAX_DATA) {
++ dev_err(&tc->client->dev, "tried to send %d bytes\n", len);
++ va_end(ap);
++ return 0;
++ }
++
++ for (i = 0; i < len; i++)
++ data[i] = va_arg(ap, int);
++ va_end(ap);
++
++#ifdef DEBUG
++ dev_dbg(&tc->client->dev, "Register write: register:0x%02x", data[0]);
++ for (i = 1; i < len; i++)
++ dev_dbg(&tc->client->dev, ", value:0x%02x", data[i]);
++ dev_dbg(&tc->client->dev, "\n");
++#endif
++ /*
++ * In case of host's asleep, send again when get NACK
++ */
++ ret = i2c_master_send(tc->client, data, len);
++ if (ret == -EREMOTEIO)
++ ret = i2c_master_send(tc->client, data, len);
++
++ if (ret != len)
++ dev_err(&tc->client->dev, "sent %d bytes of %d total\n",
++ len, ret);
++
++ return ret;
++}
++
++/*
++ * To read, first send the command byte and end the transaction.
++ * Then we can get the data in read mode.
++ */
++static int keypad_read(struct tc35894xbg_keypad_chip *tc, u8 cmd, u8 * buf,
++ int len)
++{
++#ifdef DEBUG
++ int i;
++#endif
++ int ret;
++
++ /*
++ * In case of host's asleep, send again when get NACK
++ */
++ ret = i2c_master_send(tc->client, &cmd, 1);
++ if (ret == -EREMOTEIO)
++ ret = i2c_master_send(tc->client, &cmd, 1);
++
++ if (ret != 1) {
++ dev_err(&tc->client->dev, "sending command 0x%2x failed.\n",
++ cmd);
++ return 0;
++ }
++
++ ret = i2c_master_recv(tc->client, buf, len);
++ if (ret != len)
++ dev_err(&tc->client->dev, "want %d bytes, got %d\n", len, ret);
++
++#ifdef DEBUG
++ dev_dbg(&tc->client->dev, "Register read: register:0x%02x", cmd);
++ for (i = 0; i < len; i++)
++ dev_dbg(&tc->client->dev, ", value:0x%02x", buf[i]);
++ dev_dbg(&tc->client->dev, "\n");
++#endif
++ return ret;
++}
++
++/*software reset */
++static void keypad_reset(struct tc35894xbg_keypad_chip *tc)
++{
++ /*
++ * Three reset mode, one is software reset by
++ * control the RSTCTRL register.
++ */
++ keypad_write(tc, 2, TC_REG_RSTCTRL, (TC_VAL_IRQRST | TC_VAL_TIMRST
++ | TC_VAL_KBDRST | TC_VAL_GPIRST));
++ /*
++ * Once reset bit is set, need write back to 0
++ */
++ keypad_write(tc, 2, TC_REG_RSTCTRL, 0x0);
++}
++
++/*
++ * Read the manufacturer ID and SW revision registers. Return them
++ * to the caller, if the caller has supplied pointers.
++ */
++static int keypad_checkid(struct tc35894xbg_keypad_chip *tc,
++ int *mfg_id_ret, int *sw_rev_ret)
++{
++ u8 mfg_id;
++ u8 sw_rev;
++
++ if (keypad_read(tc, TC_REG_MANUFACT_CODE, &mfg_id, 1) != 1)
++ return -EREMOTEIO;
++ if (keypad_read(tc, TC_REG_SW_VERSION, &sw_rev, 1) != 1)
++ return -EREMOTEIO;
++
++ if (mfg_id_ret != NULL)
++ *mfg_id_ret = (int)mfg_id;
++ if (sw_rev_ret != NULL)
++ *sw_rev_ret = (int)sw_rev;
++ return 0;
++}
++
++static int keypad_configure(struct tc35894xbg_keypad_chip *tc)
++{
++ /* enable the modified feature */
++ keypad_write(tc, 2, TC_REG_KBDMFS, TC_VAL_MFSEN);
++
++ /* enable the SYSCLK in KBD and timer */
++ keypad_write(tc, 2, TC_REG_CLKEN, 0x28 | TC_VAL_KBDEN);
++ /* when clock source is RC osci NOTE: Needs to be written twice */
++ keypad_write(tc, 2, TC_REG_CLKEN, 0x28 | TC_VAL_KBDEN);
++
++ dev_dbg(&tc->client->dev, "keypad internal clock setting\n");
++ /* CLKCFG : select the RC-osc:2MHZ, disable doubler, divider:2 */
++ /* CLK_IN = internal clock / 2 = 65KHZ / 2 = 32KHZ */
++ keypad_write(tc, 2, TC_REG_CLKCFG, TC_VAL_CLKSRCSEL | 0x01);
++
++ dev_dbg(&tc->client->dev, "keypad keyboard setting\n");
++ /* keyboard settings */
++ keypad_write(tc, 2, TC_REG_KBDSETTLE, tc->pd.settle_time);
++ keypad_write(tc, 2, TC_REG_KBD_BOUNCE, tc->pd.debounce_time);
++ keypad_write(tc, 2, TC_REG_KBDSIZE, ((tc->pd.size_x << 4)
++ | tc->pd.size_y));
++ keypad_write(tc, 3, TC_REG_DEDCFG_COL, tc->pd.col_setting,
++ tc->pd.rowcol_setting);
++
++ dev_dbg(&tc->client->dev, "keypad keyboard interrupt setting\n");
++ /*XXX: set again */
++ keypad_write(tc, 2, TC_REG_DKBDMSK, 0x03);
++
++ /* clear pending interrupts before irq enabled */
++ keypad_write(tc, 2, TC_REG_KBDIC, (TC_VAL_EVTIC | TC_VAL_KBDIC));
++
++ /* Enable keycode lost intr & keyboard status intr */
++ keypad_write(tc, 2, TC_REG_KBDMSK, 0x00);
++
++ return 0;
++}
++
++/*
++ * AT-style: low 7 bits are the keycode, and the top
++ * bit indicates the state( 1 for down, 0 for up)
++ */
++static inline u8 keypad_whichkey(u8 event)
++{
++ /* bit[7-4]:key row, bit[3-0]:key col */
++ u8 row, col;
++ u8 key;
++ row = (event & 0x70) >> 4;
++ col = (event & 0x0F);
++
++ key = row * 8 + col;
++
++ return key;
++}
++
++static inline int keypad_ispress(u8 event)
++{
++ /* 1: pressed, 0: released */
++ return (event & 0x80) ? 0 : 1;
++}
++
++/* reset the keybit of input */
++static void set_keymap_bit(struct tc35894xbg_keypad_chip *tc)
++{
++ int i;
++ unsigned temp;
++ for (i = 0; i < tc->pd.keymap_size; i++) {
++ temp = (tc->pd.keymap[tc->keymap_index][i] & ~(SHIFT_NEEDED));
++ __set_bit(temp, tc->idev->keybit);
++ }
++
++ __clear_bit(KEY_RESERVED, tc->idev->keybit);
++}
++
++
++/* report the 'right shift' key */
++static void report_shift_key(struct tc35894xbg_keypad_chip *tc, int isdown)
++{
++ if (tc->kp_enabled) {
++ input_report_key(tc->idev,
++ tc->pd.keymap[TC_DEFAULT_KEYMAP][tc->pd.right_shift_key],
++ isdown);
++ input_sync(tc->idev);
++ }
++}
++
++/* report the key code */
++static void submit_key(struct tc35894xbg_keypad_chip *tc, u8 key,
++ unsigned short keycode, int isdown)
++{
++ unsigned short saved_keycode = keycode;
++
++ dev_vdbg(&tc->client->dev, "key 0x%02x %s\n",
++ key, isdown ? "down" : "up");
++ /*
++ * Translate the non-exist keycode keys.
++ * when key press down, report the 'shift' key pressed ahead.
++ */
++ if ((keycode & SHIFT_NEEDED) && isdown) {
++ keycode = keycode & ~(SHIFT_NEEDED);
++ report_shift_key(tc, isdown);
++ }
++
++ /* report the key */
++ if (tc->kp_enabled) {
++ input_report_key(tc->idev, (keycode & ~(SHIFT_NEEDED)), isdown);
++ input_sync(tc->idev);
++ }
++
++ /*
++ * When key press up, report the 'shift' up followed.
++ */
++ if ((saved_keycode & SHIFT_NEEDED) && !isdown)
++ report_shift_key(tc, isdown);
++}
++
++/* key event interrupt handler */
++static inline void process_keys(struct tc35894xbg_keypad_chip *tc)
++{
++ u8 event;
++ int ret, i = 0;
++ static u8 queue[TC35894XBG_MAX_FIFO];
++ static int tail;
++
++ ret = keypad_read(tc, TC_REG_EVTCODE, &event, 1);
++ if (ret < 0) {
++ dev_err(&tc->client->dev, "Failed reading fifo\n");
++ /* clear event buffer */
++ keypad_write(tc, 2, TC_REG_KBDIC, 0x83);
++ return;
++ }
++
++ /* clear event buffer */
++ keypad_write(tc, 2, TC_REG_KBDIC, 0x83);
++
++ i = 0;
++ /* modified feature enable on KBDMFS */
++ if (event != 0x7F && event != 0xFF) {
++
++ u8 key = keypad_whichkey(event);
++ int isdown = keypad_ispress(event);
++ unsigned short keycode = tc->pd.keymap[tc->keymap_index][key];
++
++ /* The function key pressed */
++ if ((key == tc->pd.function_key) && isdown) {
++ tc->keymap_index = TC_ALT_KEYMAP;
++ set_keymap_bit(tc);
++ return;
++ }
++
++ /* Function key press up */
++ if ((key == tc->pd.function_key) && !isdown) {
++ /*
++ * dequeue the queue,
++ * where keys stored while FN is pressed
++ */
++ int j;
++ unsigned short temp_key;
++ for (j = 0; j < tail; j++) { /* keys up */
++ temp_key = tc->pd.keymap[TC_ALT_KEYMAP][queue[j]];
++ submit_key(tc, queue[j], temp_key, 0);
++ }
++ tail = 0;
++
++ tc->keymap_index = TC_DEFAULT_KEYMAP;
++ set_keymap_bit(tc);
++ return;
++ }
++
++ if (tc->keymap_index == TC_ALT_KEYMAP)
++ queue[tail++] = key;
++
++ submit_key(tc, key, keycode, isdown);
++ }
++
++}
++
++/*
++ * Bottom Half: handle the interrupt by posting key events, or dealing with
++ * errors appropriately
++ */
++static void keypad_work(struct work_struct *work)
++{
++ struct tc35894xbg_keypad_chip *tc = work_to_keypad(work);
++ u8 ints = 0;
++
++
++ mutex_lock(&tc->lock);
++ while ((keypad_read(tc, TC_REG_IRQST, &ints, 1) == 1) && ints) {
++ if (ints & TC_VAL_KBDIRQ) {
++ /* keycode revert from the FIFO buffer */
++ process_keys(tc);
++ }
++ }
++ mutex_unlock(&tc->lock);
++}
++
++/*
++ * We cannot use I2c in interrupt context, so we just schedule work.
++ */
++static irqreturn_t keypad_irq(int irq, void *data)
++{
++ struct tc35894xbg_keypad_chip *tc = data;
++ schedule_work(&tc->work);
++ return IRQ_HANDLED;
++}
++
++/*
++ * Sysfs interface
++ */
++static ssize_t keypad_show_disable(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct tc35894xbg_keypad_chip *tc = dev_get_drvdata(dev);
++ return sprintf(buf, "%u\n", !tc->kp_enabled);
++}
++
++static ssize_t keypad_set_disable(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct tc35894xbg_keypad_chip *tc = dev_get_drvdata(dev);
++ int ret;
++ unsigned long i;
++
++ ret = strict_strtoul(buf, 10, &i);
++
++ mutex_lock(&tc->lock);
++ tc->kp_enabled = !i;
++ mutex_unlock(&tc->lock);
++
++ return count;
++}
++
++static DEVICE_ATTR(disable_kp, 0644,
++ keypad_show_disable, keypad_set_disable);
++
++static ssize_t keypad_show_addr(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct tc35894xbg_keypad_chip *tc = dev_get_drvdata(dev);
++ return sprintf(buf, "0x%02X\n", tc->kp_reg_addr);
++}
++
++static ssize_t keypad_set_addr(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct tc35894xbg_keypad_chip *tc = dev_get_drvdata(dev);
++ int ret;
++ unsigned long i;
++
++ ret = strict_strtoul(buf, 0, &i);
++
++ mutex_lock(&tc->lock);
++ tc->kp_reg_addr = i;
++ mutex_unlock(&tc->lock);
++
++ return count;
++}
++
++static DEVICE_ATTR(addr_kp, 0644,
++ keypad_show_addr, keypad_set_addr);
++
++static ssize_t keypad_show_data(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct tc35894xbg_keypad_chip *tc = dev_get_drvdata(dev);
++ u8 val;
++
++ mutex_lock(&tc->lock);
++ if (keypad_read(tc, tc->kp_reg_addr, &val, 1) == 1) {
++ mutex_unlock(&tc->lock);
++ return sprintf(buf, "0x%02X\n", val);
++ }
++ mutex_unlock(&tc->lock);
++ return 0;
++}
++
++static ssize_t keypad_set_data(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ return 0;
++}
++
++static DEVICE_ATTR(data_kp, 0644,
++ keypad_show_data, keypad_set_data);
++
++static struct attribute *tc35894_attributes[] = {
++ &dev_attr_disable_kp.attr,
++ &dev_attr_addr_kp.attr,
++ &dev_attr_data_kp.attr,
++ NULL
++};
++
++static const struct attribute_group tc35894_attr_group = {
++ .attrs = tc35894_attributes,
++};
++
++static int __devinit
++keypad_probe(struct i2c_client *client, const struct i2c_device_id *id)
++{
++
++ struct tc35894xbg_platform_data *pdata = client->dev.platform_data;
++ struct input_dev *idev;
++ struct tc35894xbg_keypad_chip *tc;
++
++ int err;
++ int sw_rev;
++ int mfg_id;
++ unsigned long tmo;
++ u8 data[2];
++ unsigned int irq;
++
++
++ dev_dbg(&client->dev, "keypad probe\n");
++
++ if (!pdata || !pdata->size_x || !pdata->size_y) {
++ dev_err(&client->dev, "missing platform_data\n");
++ return -EINVAL;
++ }
++
++ if (pdata->size_x > 8) {
++ dev_err(&client->dev, "invalid x size %d specified\n",
++ pdata->size_x);
++ return -EINVAL;
++ }
++
++ if (pdata->size_y > 12) {
++ dev_err(&client->dev, "invalid y size %d specified\n",
++ pdata->size_y);
++ return -EINVAL;
++ }
++
++ tc = kzalloc(sizeof(*tc), GFP_KERNEL);
++ if (!tc) {
++ err = -ENOMEM;
++ goto fail0;
++ }
++ idev = input_allocate_device();
++ if (!idev) {
++ err = -ENOMEM;
++ goto fail1;
++ }
++
++ memcpy(&tc->pd, pdata, sizeof(struct tc35894xbg_platform_data));
++
++ i2c_set_clientdata(client, tc);
++
++ tc->client = client;
++ tc->idev = idev;
++ mutex_init(&tc->lock);
++ INIT_WORK(&tc->work, keypad_work);
++
++ dev_dbg(&client->dev, "Reset GPIO ID: %d\n", tc->pd.gpio_reset);
++ dev_dbg(&client->dev, "Keypad size:%d x %d\n",
++ tc->pd.size_x, tc->pd.size_y);
++
++ /*
++ * Take controller out of reset
++ */
++ if (pdata->gpio_reset != -1) {
++ dev_dbg(&client->dev, "Release TC35894XBG reset\n");
++ if (pdata->reset_ctrl == NULL) {
++ dev_err(&client->dev, "No reset_ctrl function\n");
++ return -ENODEV;
++ }
++ pdata->reset_ctrl(client, 1);
++ }
++
++ /*
++ * Nothing's set up to service the IRQ yet, so just spin for max.
++ * 280us util we can configure.(Tp1 + Tp2)
++ */
++ tmo = jiffies + usecs_to_jiffies(280);
++ while (keypad_read(tc, TC_REG_IRQST, data, 1) == 1) {
++ if (data[0] & TC_VAL_PORIRQ) { /* power on reset complete */
++ /* clear the PORIRQ bit */
++ keypad_write(tc, 2, TC_REG_RSTINTCLR,
++ TC_VAL_IRQCLR);
++ break;
++ }
++ if (time_after(jiffies, tmo)) {
++ dev_err(&client->dev,
++ "timeout waiting for initialisation\n");
++ break;
++ }
++ udelay(1);
++ }
++
++ /* Confirm device ID register */
++ err = keypad_checkid(tc, &mfg_id, &sw_rev);
++ if (err != 0) {
++ dev_err(&client->dev, "Could not read ID and revision\n");
++ goto fail1;
++ } else {
++ dev_dbg(&client->dev, "Controller ID/Rev: 0x%02X/0x%02X\n",
++ mfg_id, sw_rev);
++ }
++
++ /* Software reset can be achieved only after power-on complete */
++ dev_dbg(&client->dev, "Controller reset by software\n");
++ keypad_reset(tc);
++
++ /* detach the RESETN from the global reset tree */
++ keypad_write(tc, 2, TC_REG_EXTRSTN, TC_VAL_EXTRSTN);
++
++ dev_dbg(&client->dev, "keypad configure start\n");
++ keypad_configure(tc);
++
++ tc->kp_enabled = true;
++
++ idev->name = "KEYPAD";
++ snprintf(tc->phys, sizeof(tc->phys), "%s/input-kp",
++ dev_name(&client->dev));
++ idev->phys = tc->phys;
++ /* the two bit set */
++ idev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP) | BIT_MASK(EV_REL);
++
++ tc->keymap_index = TC_DEFAULT_KEYMAP;
++ set_keymap_bit(tc);
++
++ err = input_register_device(idev);
++ if (err) {
++ dev_dbg(&client->dev, "error register input device\n");
++ goto fail2;
++ }
++
++ irq = gpio_to_irq(pdata->gpio_irq);
++ if (irq < 0) {
++ dev_err(&client->dev, "Failed to get IRQ to GPIO %d\n", irq);
++ goto fail2;
++ }
++ client->irq = irq;
++
++ dev_dbg(&client->dev, "keypad irq register\n");
++ err = request_irq(client->irq, keypad_irq, IRQ_TYPE_EDGE_FALLING
++ | IRQF_SHARED, "keypad", tc);
++ if (err) {
++ dev_err(&client->dev, "could not get IRQ %d\n", irq);
++ goto fail3;
++ }
++
++ /* Register sysfs hooks */
++ err = sysfs_create_group(&client->dev.kobj, &tc35894_attr_group);
++ if (err)
++ goto fail3;
++
++ device_init_wakeup(&client->dev, 1);
++ enable_irq_wake(client->irq);
++
++ return 0;
++
++fail3: input_unregister_device(idev);
++ idev = NULL;
++
++fail2: device_remove_file(&client->dev, &dev_attr_disable_kp);
++
++fail1: input_free_device(idev);
++
++fail0: kfree(tc);
++
++ return err;
++}
++
++static int __devexit keypad_remove(struct i2c_client *client)
++{
++ struct tc35894xbg_keypad_chip *tc = i2c_get_clientdata(client);
++
++ dev_dbg(&client->dev, "keypad driver remove\n");
++
++ disable_irq_wake(client->irq);
++ free_irq(client->irq, tc);
++ cancel_work_sync(&tc->work);
++
++ dev_dbg(&client->dev, "keypad input device unregister\n");
++ sysfs_remove_group(&client->dev.kobj, &tc35894_attr_group);
++ input_unregister_device(tc->idev);
++ device_remove_file(&tc->client->dev, &dev_attr_disable_kp);
++
++ kfree(tc);
++
++ return 0;
++}
++
++#ifdef CONFIG_PM
++/*
++ * the chip can switch off when no activity, so
++ * explicitly suspend is no need
++ */
++
++static int keypad_suspend(struct i2c_client *client, pm_message_t mesg)
++{
++ struct tc35894xbg_keypad_chip *tc = i2c_get_clientdata(client);
++
++ set_irq_wake(client->irq, 0);
++ disable_irq(client->irq);
++
++ mutex_lock(&tc->lock);
++ tc->pm_suspend = true;
++ mutex_unlock(&tc->lock);
++ return 0;
++}
++
++static int keypad_resume(struct i2c_client *client)
++{
++ struct tc35894xbg_keypad_chip *tc = i2c_get_clientdata(client);
++
++ mutex_lock(&tc->lock);
++ tc->pm_suspend = false;
++ mutex_unlock(&tc->lock);
++
++ enable_irq(client->irq);
++ set_irq_wake(client->irq, 1);
++ return 0;
++}
++
++#else
++#define keypad_suspend NULL
++#define keypad_resume NULL
++
++#endif
++
++static const struct i2c_device_id keypad_id[] = {
++ { "i2c_TC35894-nEB1", 0 },
++ { "i2c_TC35894-i", 0 },
++ { }
++};
++
++static struct i2c_driver keypad_i2c_driver = {
++ .class = I2C_CLASS_HWMON,
++ .driver = {.name = "keypad",},
++ .probe = keypad_probe,
++ .remove = __devexit_p(keypad_remove),
++ .suspend = keypad_suspend,
++ .resume = keypad_resume,
++ .id_table = keypad_id,
++};
++
++MODULE_DEVICE_TABLE(i2c, keypad_id);
++
++static int __init keypad_init(void)
++{
++ return i2c_add_driver(&keypad_i2c_driver);
++}
++
++static void __exit keypad_exit(void)
++{
++ i2c_del_driver(&keypad_i2c_driver);
++}
++
++module_init(keypad_init);
++module_exit(keypad_exit);
++
++MODULE_AUTHOR("Charlie Paul");
++MODULE_DESCRIPTION("TC35894XBG expander driver");
++MODULE_LICENSE("GPL");
+--- /dev/null
++++ b/drivers/input/keyboard/tc35894xbg_regs.h
+@@ -0,0 +1,1528 @@
++/*
++ * tc35894_regs.h: Register definitions for Toshiba TC35894XBG
++ *
++ * (C) Copyright 2010 Intel Corporation
++ * Author: Charlie Paul (z8cpaul@windriver.com)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; version 2
++ * of the License.
++ */
++#ifndef __TC3589XBG_REGS_H
++#define __TC3589XBG_REGS_H
++
++#include <linux/types.h>
++#include <linux/input.h>
++
++/******************************************************************************
++ * REGISTER DEFINITIONS
++ ******************************************************************************/
++/******************************************************************************
++ * Setup of Wait Period register
++ ******************************************************************************/
++#define TC_REG_KBDSETTLE (0x01)
++
++/*
++ * Initial wait time for keys to settle, before key scan is started. Each unit
++ * of this timer equals for SYSCLK clock cycles or approx. 61us on a 65.536kHz
++ * SYSCLK. 4 times the value programmed to WAIT0 divided by the SYSCLK
++ * frequency in Hz.
++ * 0xFF: 15.6ms
++ * 0xA3: 9.68ms
++ * 0x7F: 7.8ms
++ * 0x52: 5.0ms
++ * 0x40: 3.9ms
++ * 0x00: 0ms
++ */
++#define TC_VAL_WAIT_15_6MS (0xFF)
++#define TC_VAL_WAIT_9_68MS (0xA3)
++#define TC_VAL_WAIT_7_8MS (0x7F)
++#define TC_VAL_WAIT_5_0MS (0x52)
++#define TC_VAL_WAIT_3_9MS (0x40)
++
++/******************************************************************************
++ * Setup of Debouncing Register
++ *****************************************************************************/
++#define TC_REG_KBD_BOUNCE (0x02)
++
++/*
++ * De-bounce time between detection of any key change and the keyboard
++ * scan. Each unit of this time equals for SYSCLK clock cycles or approx 61us
++ * on a 65.536KHz clock. Debounce time is calculated to:
++ * 4 times the value programmed to BOUNCETIM0 divided by the SYSCLK frequency
++ * in Hz
++ * 0xFF: 15.6ms
++ * 0xA3: 9.68ms
++ * 0x7F: 7.8ms
++ * 0x52: 5.0ms
++ * 0x40: 3.9ms
++ * 0x00: 0ms
++ */
++#define TC_VAL_BOUNCE_15_6MS (0xFF)
++#define TC_VAL_BOUNCE_9_68MS (0xA3)
++#define TC_VAL_BOUNCE_7_8MS (0x7F)
++#define TC_VAL_BOUNCE_5_0MS (0x52)
++#define TC_VAL_BOUNCE_3_9MS (0x40)
++
++/******************************************************************************
++ * Keyboard Matrix Setup
++ ******************************************************************************/
++#define TC_REG_KBDSIZE (0x03)
++
++/*
++ * Number of rows in the keyboard matrix, between 2 and 8
++ * 0: A value of 0 will free all rows to become GPIO lines.
++ * 1: Inhibition
++ * 2-8: Number of Rows
++ */
++#define TC_VAL_ROW_SIZE_MASK (0xF0)
++
++/*
++ * Number of columns in the keyboard matrix, betwee 2 and 12.
++ * 0: A value of 0 will free all rows to become GPIO lines
++ * 1: Inhibition
++ * 2-12: Number of columns
++ */
++#define TC_VAL_COL_SIZE_MASK (0x0F)
++
++/******************************************************************************
++ * Dedicated Key Setup register (0x04 and 0x05)
++ *****************************************************************************/
++#define TC_REG_DEDCFG_COL (0x04)
++
++/*
++ * Each bit in COL[8:2] corresponds to ball KPY8..KPY2 and can be
++ * configured individually.
++ * 0: Dedicated key
++ * 1: No dedicated key (standard GPIO, alternative functionality according
++ * to register BALLCFG or keyboard)
++ */
++#define TC_VAL_COL9 (0x01 << 7)
++#define TC_VAL_COL8 (0x01 << 6)
++#define TC_VAL_COL7 (0x01 << 5)
++#define TC_VAL_COL6 (0x01 << 4)
++#define TC_VAL_COL5 (0x01 << 3)
++#define TC_VAL_COL4 (0x01 << 2)
++#define TC_VAL_COL3 (0x01 << 1)
++#define TC_VAL_COL2 (0x01)
++
++#define TC_REG_DEDCFG_ROW_COL (0x05)
++
++/*
++ * Each bit in ROW[7:2] corresponds to ball KPX7..KPX2 and can be
++ * configured individually.
++ * 0: Dedicated key
++ * 1: No dedicated key (standard GPIO or keyboard matrix)
++ */
++#define TC_VAL_ROW7 (0x01 << 7)
++#define TC_VAL_ROW6 (0x01 << 6)
++#define TC_VAL_ROW5 (0x01 << 5)
++#define TC_VAL_ROW4 (0x01 << 4)
++#define TC_VAL_ROW3 (0x01 << 3)
++#define TC_VAL_ROW2 (0x01 << 2)
++
++/*
++ * Each bit in COL[11:10] corresponds to ball KPY11..KPY10 and can be
++ * configured individually.
++ * 0: Dedicated key
++ * 1: No dedicated key (standard GPIO, alternative functionality according
++ * to register BALLCFG or keyboard)
++ */
++#define TC_VAL_COL11 (0x01 << 1)
++#define TC_VAL_COL10 (0x01)
++
++/******************************************************************************
++ * Keyboard Raw Interrupt Register
++ *****************************************************************************/
++#define TC_REG_KBDRIS (0x06) /* Read Only */
++
++/*
++ * Raw Event Lost Interrupt.
++ * This bit is cleared by writing into EVTIC
++ * 0: No interrupt
++ * 1: More than 8 keyboard event shave been detected and caused
++ * the event buffer to overflow.
++ */
++#define TC_VAL_RELINT (0x01 << 3)
++
++/*
++ * Raw keyboard event interrupt
++ * Reading from EVTCODE until the buffer is empty will automatically
++ * clear this interrupt.
++ * 0: No interrupt
++ * 1: At least one key press or key release is in the keyboard
++ * event buffer.
++ */
++#define TC_VAL_REVT_INT (0x01 << 2)
++
++/*
++ * Raw Key Lost interrupt (indicates a lost keycode)
++ * The meaning of this interrupt bit changes depending on the
++ * configuration of KBDMFS register.
++ * 0: No interrupt
++ * 1: If KBDMFS is set to 0: When RSINT has not been clear upon
++ * detection of a new key press or key
++ * release, or when more than 4 keys are pressed
++ * simultaneously.
++ * If KBDMFS is set to 1: Indicates that more than 4 keys are pressed
++ * simultaneously, i.e. key presses are lost
++ */
++#define TC_VAL_RKLINT (0x01 << 1)
++
++/*
++ * Raw scan interrupt
++ * 0: No interrupt
++ * 1: Interrupt generated after keyboard scan, if the keyboard status has
++ * changed
++ */
++#define TC_VAL_RSINT (0x01)
++
++/******************************************************************************
++ * Keyboard Mask Interrupt Register
++ *****************************************************************************/
++#define TC_REG_KBDMIS (0x07) /* Read Only */
++
++/*
++ * Masked Event Lost Interrupt
++ * 0: No interrupt
++ * 1: More than 8 keyboard events have been detected
++ * and caused the event buffer to overflow.
++ */
++#define TC_VAL_MELINT (0x01 << 3)
++
++/*
++ * Masked keyboard event interrupt
++ * 0: No interrupt
++ * 1: At least one key press or key release is in the keyboard
++ * event buffer.
++ */
++#define TC_VAL_MEVT_INT (0x01 << 2)
++
++/*
++ * Masked key lose interrupt
++ * 0: No interrupt
++ * 1: Masked key lost interrupt.
++ */
++#define TC_VAL_MKLINT (0x01 << 1)
++
++/*
++ * Masked scan interrupt
++ * 0: No interrupt
++ * 1: Interrupt generated after keyboard scan,
++ * if the keyboard status has changed, after masking process.
++ */
++#define TC_VAL_MSINT (0x01)
++
++/******************************************************************************
++ * Keyboard Interrupt Clear Register
++ *****************************************************************************/
++#define TC_REG_KBDIC (0x08) /* Write Only */
++
++/*
++ * Switches off scanning of special function
++ * keys, when keyboard has no special function
++ * layout.
++ * 0: Scans keyboard layout with or without special function keys
++ * 1: Scans keyboard layout without special function keys
++ */
++#define TC_VAL_SFOFF (0x01 << 7)
++
++/*
++ * Clear event buffer an corresponding interrupts
++ * REVTINT and RELINT. The host does not need to write "0".
++ * Write "1" every time when clearing the event buffer.
++ * 0: No action
++ * 1: Clear event buffer and corresponding interrupts REVTINT and RELINT
++ */
++#define TC_VAL_EVTIC (0x01 << 1)
++
++/*
++ * Clear RSINT and RKLINT interrupt bits.
++ * The host does not need to write "0". Write "1" every time when clearing
++ * RSINT and RKLINT.
++ * 0: No action
++ * 1: Clear RSINT and RKLINT interrupt bits.
++*/
++#define TC_VAL_KBDIC (0x01)
++
++/******************************************************************************
++ * Keyboard Mask Register
++ *****************************************************************************/
++#define TC_REG_KBDMSK (0x09)
++
++/*
++ * Enable keyboard event lost interrupt
++ * 0: Enabled
++ * 1: Disabled
++ */
++#define TC_VAL_MSKELINT (0x01 << 3)
++
++/*
++ * Enable keyboard event interrupt
++ * 0: Enabled
++ * 1: Disabled
++ */
++#define TC_VAL_MSKEINT (0x01 << 2)
++
++/*
++ * Enable keycode lost interrupt
++ * 0: Enabled
++ * 1: Disabled
++ */
++#define TC_VAL_MSKKLINT (0x01 << 1)
++
++/*
++ * Enable keyboard status interrupt
++ * 0: Enable
++ * 1: Disable
++ */
++#define TC_VAL_MSKSINT (0x01)
++
++/****************************************************************************
++ * Keyboard Code Registers
++ ***************************************************************************/
++#define TC_REG_KBDCODE0 (0x0B)
++#define TC_REG_KBDCODE1 (0x0C)
++#define TC_REG_KBDCODE2 (0x0D)
++#define TC_REG_KBDCODE3 (0x0E)
++
++/*
++ * Multiple key press. Another key code is available in KEYCODE(X+1) register
++ * 0: Another key code is not available
++ * 1: Another key code is available.
++ */
++#define TC_VAL_MULTIKEY (0x01 << 7)
++
++/* Row index of key that is pressed (0..7) */
++#define TC_VAL_KEYROW_MASK (0x70)
++
++/*
++ * Column index of key that is pressed (0..11 and 12 for special
++ * function key)
++ */
++#define TC_VAL_KEYCOL_MASK (0x0F)
++
++/******************************************************************************
++ * Event Code Register
++ *****************************************************************************/
++#define TC_REG_EVTCODE (0x10)
++
++/*
++ * Indicates, whether keyboard event was a key press or a key release
++ * 0: Key was pressed
++ * 1: Key was released
++ */
++#define TC_VAL_RELEASE (0x01 << 7)
++
++
++/******************************************************************************
++ * Timer configuration registers
++ *****************************************************************************/
++#define TC_REG_TIMCFG0 (0x60)
++#define TC_REG_TIMCFG1 (0x68)
++#define TC_REG_TIMCFG2 (0x70)
++
++/*
++ * Interrupt mask for CYCIRQ
++ * 0: interrupt enabled
++ * 1: interrupt masked
++ */
++#define TC_VAL_TIM_IRQMASK (0x01 << 4)
++
++/*
++ * CYCLE counter control register
++ * 0: Timer/counter stops after TIMLOAD cycles of CNTCLK (One-shot operation)
++ * An interrupt is (only) issued, when the NUM Counter (TIMCYCLE controlled)
++ * is at 0.
++ * 1: Timer/counter counts down from TIMLOAD to 0 as many times as are
++ * specified in the TIMCYCLE register. The, the timer stops and an
++ * interrupt is generated.
++ */
++#define TC_VAL_CYCCTRL (0x01 << 3)
++
++/*
++ * Switches between free-running timer and one time count. In both operating
++ * modes the register TIMCYCLE influences the behavior of the interrupt
++ * generation.
++ * 0: CYCLE mode, behavior depends on CYCLE bit
++ * 1: Timer/counter counts down from TIMLOAD to 0, re-loads the value of
++ * TIMLOAD and restarts counting down. (FREE-running mode)
++ */
++#define TC_VAL_FREE (0x01 << 2)
++
++/*
++ * Synchronization of pattern generator and timer
++ * 0: Pattern generator is started and stopped by the PWMCFG. PGE bit
++ * 1: Pattern generator and Timer are enabled simultaneously setting bit
++ * TIMCFG START, pattern generator is stopped by PWMCFG.PGE=0, timer
++ * is stopped by TIMCFG.START=0
++ */
++#define TC_VAL_SYNC (0x01 << 1)
++
++/*
++ * Timer start/stop control (WRITE_ONLY)
++ * 0: Timer is stopped (can also be stopped from internal state machine)
++ * 1: Timer is started
++ */
++#define TC_VAL_START (0x01)
++
++/******************************************************************************
++ * Pattern Configuration registers
++ *****************************************************************************/
++#define TC_REG_PWMCFG0 (0x61)
++#define TC_REG_PWMCFG1 (0x69)
++#define TC_REG_PWMCFG2 (0x71)
++
++/*
++ * Mask for CDIRQ
++ * 0: CDIRQ enabled
++ * 1: CDIRQ disabled/masked
++ */
++#define TC_VAL_PWM_MASK (0x01 << 3)
++
++/*
++ * Pattern Generator Enable
++ * This bit is ignored, if the SYCN bit of the corresponding
++ * TIMCFG register is set
++ * 0: Pattern generator disabled
++ * 1: Pattern generator enabled
++ */
++#define TC_VAL_PGE (0x01 << 2)
++
++/*
++ * PWM Enable
++ * 0: PWM disabled
++ * PWM timer output assumes value programmed in PWMPOL
++ * 1: PWM enabled
++ */
++#define TC_VAL_PWMEN (0x01 << 1)
++
++/* OFF-state of PWM output, when PWMEN=0
++ * 0: PWM off-state is low
++ * 1: PWM off-state is high
++ */
++#define TC_VAL_PWMPOL (0x01)
++
++/******************************************************************************
++ * Timer Scale registers
++ *
++ * SCAL7:0 : Load value for timer pre-scaler.
++ * The system clock is divided by (SCAL+1). The resulting CNTCLK is
++ * the reference clock for timer related operations.
++ ******************************************************************************/
++#define TC_REG_TIMSCAL0 (0x62)
++#define TC_REG_TIMSCAL1 (0x6A)
++#define TC_REG_TIMSCAL2 (0x72)
++#define TC_VAL_SCAL_MASK (0xFF)
++
++/******************************************************************************
++ * Timer CYCLE registers
++ *
++ * CYCLE7:0 : Additional number of elapsed timer/counter expires, before
++ * CYCIRQ is released. When programmed to a value N, an interrupt
++ * is issued every (N+1) expiry. This register is active in
++ * one-shot and free-running mode. In one-shot mode, the timer
++ * needs to be restarted under host control N times before an
++ * interrupt is generated.
++ * 0: Interrupt generated immediately, when timer/counter expired
++ * 1: Interrupt generated after N+1 expires of timer/counter
++ *****************************************************************************/
++#define TC_REG_TIMCYCLE0 (0x63)
++#define TC_REG_TIMCYCLE1 (0x6B)
++#define TC_REG_TIMCYCLE2 (0x73)
++#define TC_VAL_CYCLE_MASK (0xFF)
++
++/******************************************************************************
++ * Timer LOAD registers
++ *
++ * LOAD7:0 : The register contents define together with TIMSCAL the PWM
++ * frequency. The timer/counter counts down from LOAD value to 0 in
++ * (LOAD+1) steps. The value programmed into this register is
++ * transferred into the timer/counter synchronously to the pre-scaled
++ * timer clock CNTCLK. Therefore, the settings become only effective,
++ * when the timer clock is running (TIMCFG.START bit set). If duty
++ * cycle modulation is enabled, this register defines also the
++ * resolution of possible duty cycle settings
++ *
++ *****************************************************************************/
++#define TC_REG_TIM_LOAD0 (0x64)
++#define TC_REG_TIM_LOAD1 (0x6C)
++#define TC_REG_TIM_LOAD2 (0x74)
++#define TC_VAL_LOAD_MASK (0xFF)
++
++/******************************************************************************
++ * Timer Software Reset register
++ *****************************************************************************/
++#define TC_REG_SWRES (0x78)
++
++/*
++ * Software reset of TIMER2
++ * 0: no action
++ * 1: Software reset on timer 2, needs not to be
++ * written back to 0
++ */
++#define TC_VAL_SWRES2 (0x01 << 2)
++
++/*
++ * Software reset of TIMER1
++ * 0: no action
++ * 1: Software reset on timer 1, needs not to be
++ * written back to 0
++ */
++#define TC_VAL_SWRES1 (0x01 << 1)
++
++/*
++ * Software reset of TIMER0
++ * 0: no action
++ * 1: Software reset on timer 0, needs not to be
++ * written back to 0
++ */
++#define TC_VAL_SWRES0 (0x01)
++
++/******************************************************************************
++ * Timer Raw input status register
++ *
++ * CDIRQ2:0 : Raw interrupt status for CDIRQ timer2,1 and 0
++ * 0: No interrupt pending
++ * 1: Unmasked interrupt generated
++ *
++ * CYCIRQ2:0 : Raw interrupt status for CYCIRQ timer 2, 1 and 0
++ *
++ *****************************************************************************/
++#define TC_REG_TIMRIS (0x7A)
++
++/******************************************************************************
++ * Timer Mask Interrupt Status register
++ *
++ * CDIRQ2:0 : Interrupt after masking, indicates active contribution to the
++ * interrupt ball, when set. Status for CDIRQ timer2, 1 and 0
++ * 0: No interrupt pending
++ * 1: Interrupt generated.
++ *
++ * CYCIRQ2:0 : Interrupt after masking, indicates active contribution to the
++ * interrupt ball, when set. Status for CYCIRQtimer2,1 and 0
++ * 0: No interrupt pending
++ * 1: interrupt generated
++ *****************************************************************************/
++#define TC_REG_TIMMIS (0x7B)
++
++/******************************************************************************
++ * Timer Interrupt Clear register
++ *
++ * CDIRQ2:0: Clears interrupt CDIRQ timer 2,1, and 0
++ * 0: No effect
++ * 1: Interrupt is cleared. Does not need to be written back to 0
++ *
++ * CYCIRQ2:0 Clears interrupt CYCIRQ timer 2,1, and 0
++ * 0: No effect
++ * 1: Interrupt is cleared. Does not need to be written back to 0
++ *****************************************************************************/
++#define TC_REG_TIMIC (0x7C)
++#define TC_VAL_CDIRQ2 (0x01 << 5)
++#define TC_VAL_CDIRQ1 (0x01 << 4)
++#define TC_VAL_CDIRQ0 (0x01 << 3)
++#define TC_VAL_CYCIRQ2 (0x01 << 2)
++#define TC_VAL_CYCIRQ1 (0x01 << 1)
++#define TC_VAL_CYCIRQ0 (0x01)
++
++/******************************************************************************
++ * Patter Storage Register
++ *****************************************************************************/
++#define TC_REG_PWMWP (0x7D)
++
++/*
++ * POINTER6:0 Points to the pattern position configuration register,
++ * which will be overwritten by the next write access
++ * to the PWMPAT register
++ * 0 <= POINTER < 32 : Timer0 patterns 0 to 31
++ * 32 <= POINTER < 64 : Timer1 patterns 0 to 31
++ * 64 <= POINTER < 96 : Timer2 patterns 0 to 31
++ * 96 <= POINTER < 128: Not Valid
++ */
++#define TC_VAL_POINTER_MASK (0x7F)
++
++/******************************************************************************
++ * PWMPAT register
++ *
++ * PAT15:0 : Input port to the pattern storage register indexed by PWMWP,
++ * After I2C write, PWMWP is incremented. Cannot be written in two
++ * byte accesses, must be written in a single I2C burst command.
++ *
++ *****************************************************************************/
++#define TC_REG_PWMPAT_HIGH (0x7E)
++#define TC_REG_PWMPAT_LOW (0x7F)
++
++/******************************************************************************
++ * Manufacture Code register
++ *****************************************************************************/
++#define TC_REG_MANUFACT_CODE (0x80) /* Read Only */
++#define TC_VAL_MANUFACTURE_CODE (0x03)
++
++/******************************************************************************
++ * Software Version register
++ *****************************************************************************/
++#define TC_REG_SW_VERSION (0x81)
++#define TC_VAL_SW_VERSION (0xC0)
++
++/******************************************************************************
++ * I2C register
++ *****************************************************************************/
++#define TC_REG_I2CA (0x80) /* Write Only */
++
++/******************************************************************************
++ * Reset Register
++ *****************************************************************************/
++#define TC_REG_RSTCTRL (0x82)
++
++/*
++ * Interrupt Controller Reset
++ * Status on ball IRQN remains unaffected. This register bit is only used
++ * to control IRA module register. Interrupt status read out is not
++ * possible, when this bit is set. It is recommended to leave this bit always
++ * at zero.
++ * 0: Interrupt Controller not reset
++ * 1: Interrupt Controller is reset (Need to write back to 0, once reset)
++ */
++#define TC_VAL_IRQRST (0x01 << 4)
++
++/*
++ * Timer Reset for timers 0,1,2
++ * 0: Timer not reset
++ * 1: Timer is reset (Need to write back to 0, once reset)
++ */
++#define TC_VAL_TIMRST (0x01 << 3)
++
++/*
++ * Keyboard interface reset
++ * 0: Keyboard not reset
++ * 1: Keyboard is reset (Need to write back to 0, once reset)
++ */
++#define TC_VAL_KBDRST (0x01 << 1)
++
++/*
++ * GPIO reset
++ * 0: GPIO not reset
++ * 1: GPIO reset (Need to write back to 0, once reset)
++ */
++#define TC_VAL_GPIRST (0x01)
++
++/******************************************************************************
++ * External Reset Register
++ *****************************************************************************/
++#define TC_REG_EXTRSTN (0x83)
++
++/*
++ * External Reset ball (RESETN) enable
++ * This register is not on the global reset line, it is reset
++ * only by a power-on reset
++ * 0: RESETN ball is not used as hardware reset
++ * 1: RESETN is used as hardware reset
++ */
++#define TC_VAL_EXTRSTN (0x01)
++
++/******************************************************************************
++ * Reset Interrupt Clear register
++ *****************************************************************************/
++#define TC_REG_RSTINTCLR (0x84)
++
++/*
++ * Clears the RSTINT interrupt
++ * 0: No impact
++ * 1: Clear PORSTN interrupt (does not need to be re-written to 0)
++ */
++#define TC_VAL_IRQCLR (0X01)
++
++/******************************************************************************
++ * Power on Watch dog Register
++ *****************************************************************************/
++#define TC_REG_PORTRIM (0X85)
++
++/*
++ * Override factory setting for Power-on-reset with PRO_TRIMSEL value
++ * 0: Use factory setting
++ * 1: Use value defined in POR_TRIM
++ */
++#define TC_VAL_POR_SEL (0x01 << 7)
++
++/*
++ * 5 bit setting for Power-on-reset level, twos complement
++ * This value affects reset behavior for power-on reset judged on the
++ * voltage found on VCC.
++ */
++#define TC_VAL_POR_TRIM_MASK (0x1F)
++
++/******************************************************************************
++ * Clock Mode register
++ *****************************************************************************/
++#define TC_REG_CLKMODE (0x88)
++
++/*
++ * This register determines the operating mode
++ * 0: SLEEP mode, no SYSCLK generation
++ * 1: OPERATION mode
++ */
++#define TC_VAL_MODCTL (0x01)
++
++/******************************************************************************
++ * Clock Configure Register
++ *****************************************************************************/
++#define TC_REG_CLKCFG (0x89)
++
++/*
++ * Clock source selector
++ * This switch shall not be modified, if CLKMODE.MODCTL is at SLEEP
++ * 0: LVCMOS clock input
++ * 1: Internal RC-oscillator
++ */
++#define TC_VAL_CLKSRCSEL (0x01 << 6)
++
++/*
++ * Clock frequency doubler enable (should only be enabled when CLKDIV=0)
++ * 0: Disable clock frequency doubler
++ * 1: Enable clock frequency doubler
++ */
++#define TC_VAL_CLKFDEN (0x01 << 4)
++
++/* Clock divider for SYSCLK
++ * Used to divide a high frequency LVCMOS input clock DIR24 or alternatively
++ * the internal RC clock to the SYSCLK frequency. Keep in mind that SYSCLK
++ * frequency *6.5 must exceed the maximum allowed SCL frequency.
++ * Clock division ratio is 2
++ *
++ * 0x0: Divide by 1
++ * 0x1: Divide by 2
++ * 0x2: Divide by 4
++ * ...
++ * 0x9: Divide by 512 (Maximum legal division factor)
++ * 0xA: Not allowed
++ * ...
++ * 0xF: Not allowed
++ */
++#define TC_VAL_CLKDIV_MASK (0x0F)
++
++/******************************************************************************
++ * Clock Enable Register
++ *****************************************************************************/
++#define TC_REG_CLKEN (0x8A)
++
++/* Clock output enable Mask */
++#define TC_VAL_CLKOUTEN_MASK (0xC0)
++/* CLKOUT clock disabled */
++#define TC_VAL_CLKOUTEN_DISABLE (0x00)
++/* CLKOUT clock frequency = SYSCLK frequency */
++#define TC_VAL_CLKOUTEN_SYSCLK (0x40)
++/* CLKOUT clock frequency = 1/2 SYSCLK frequency */
++#define TC_VAL_CLKOUTEN_HALF_SYSCLK (0x80)
++
++/*
++ * Timer 0,1,2 clock enable
++ * 0: Timer 0,1 and 2 disabled
++ * 1: Timer 0,1 and 2 enabled
++ * The Timer clock can only be enabled, if an external clock feed
++ * (LVCMOS line) is used.
++ */
++#define TC_VAL_TIMEN (0x01 << 2)
++
++/*
++ * Keyboard clock enable
++ * 0: Keyboard clock disabled
++ * 1: Keyboard clock enabled
++ */
++#define TC_VAL_KBDEN (0x01)
++
++/******************************************************************************
++ * Auto Sleep Timer Enable register
++ *****************************************************************************/
++#define TC_REG_AUTOSLPENA (0x8B)
++
++/*
++ * Auto Sleep feature enable
++ * When Auto-sleep is on, the register MODCTL is controlled
++ * under a state machine and should not be programmed directly
++ * via I2C. Also, the register CLKCFG should not be programmed,
++ * when Auto-sleep is enabled
++ * 0: Auto-sleep feature is off
++ * 1: Auto-sleep feature is on
++ */
++#define TC_VAL_AUTO_ENABLE (0x01)
++
++/******************************************************************************
++ * AUTO Sleep Timer Register
++ *
++ * UPTIME10:0 : Minimum time the TC35894XBG stays in OPERATION mode
++ * Counts SYSCLK cycles before going into SLEEP mode
++ * The value programmed here is multiplied by 64 and copied into
++ * the timer at the time AUTOSLPENA.ENABLE is set to 1.
++ *****************************************************************************/
++#define TC_REG_AUTOSLPTIMER_HIGH (0x8C)
++#define TC_REG_AUTOSLPTIMER_LOW (0x8D)
++#define TC_VAL_UPTIME_LOW_MASK (0x03)
++
++/******************************************************************************
++ * I2C wakeup register
++ *****************************************************************************/
++#define TC_REG_I2CWAKEUP_EN (0x8E)
++
++/*
++ * I2C wake-up enable
++ * 0: Device does not wake-up b I2C access to KBD/TIM module when in SLEEP
++ * 1: Device wakes up by I2C access to KBD/TIM module in SLEEP.
++ */
++#define TC_VAL_I2CWEN (0x01)
++
++/******************************************************************************
++ * Modified Feature Set Register
++ *****************************************************************************/
++#define TC_REG_KBDMFS (0x8F) /* Write Only */
++
++/*
++ * Modified feature set enable / disable
++ * 0: TC35892XBG compatibility mode (modified features disabled)
++ * 1: Modified features enabled
++ */
++#define TC_VAL_MFSEN (0x01)
++
++/******************************************************************************
++ * Interrupt Status Register
++ *****************************************************************************/
++#define TC_REG_IRQST (0x91) /* Read Only */
++
++/*
++ * Supply failure on VCC. Also Power-on is considered
++ * as an initial supply failure.
++ * 0: No failure recoreded
++ * 1: Failure Occurred
++ */
++#define TC_VAL_PORIRQ (0x01 << 7)
++
++/*
++ * Keyboard Interrupt (further key selection in keyboard
++ * module)
++ * 0: Inactive
++ * 1: Active
++ */
++#define TC_VAL_KBDIRQ (0x01 << 6)
++
++/*
++ * Direct keyboard interrupt (further key selection
++ * in GPIO module)
++ * 0: Inactive
++ * 1: Active
++ */
++#define TC_VAL_DKBD_IRQ (0x01 << 5)
++
++/*
++ * Timer 2 Expire (CDIRQ or CYCIRG)
++ * 0: Inactive
++ * 1: Active
++ */
++#define TC_VAL_TIM2IRQ (0x01 << 3)
++
++/*
++ * Timer 1 Expire (CDIRA or CYCIRQ)
++ * 0: Inactive
++ * 1: Active
++ */
++#define TC_VAL_TIM1IRQ (0x01 << 2)
++
++/*
++ * Timer 0 Expire (CDIRA or CYCIRQ)
++ * 0: Inactive
++ * 1: Active
++ */
++#define TC_VAL_TIM0IRQ (0x01 << 1)
++
++/*
++ * GPIO interrupt (further selectionin GPIO module)
++ * 0: Inactive
++ * 1: Active
++ */
++#define TC_VAL_GPIIRQ (0x01)
++
++/******************************************************************************
++ * Drive0 Strength register
++ *
++ * KPX[7:0]DRV1:0 : Output drive strength for KPX[7:0] ball
++ * 00: Lowest strength
++ * 01: Medium strength
++ * 10: Medium strength
++ * 11: Highest strength
++ *
++ ******************************************************************************/
++#define TC_REG_DRIVE0_HIGH (0xA0)
++#define TC_VAL_KPX7_LOW (0x00)
++#define TC_VAL_KPX7_MED_LOW (0x40)
++#define TC_VAL_KPX7_MED_HI (0x80)
++#define TC_VAL_KPX7_HI (0xC0)
++#define TC_VAL_KPX6_LOW (0x00)
++#define TC_VAL_KPX6_MED_LOW (0x01)
++#define TC_VAL_KPX6_MED_HI (0x02)
++#define TC_VAL_KPX6_HI (0x03)
++#define TC_VAL_KPX5_LOW (0x00)
++#define TC_VAL_KPX5_MED_LOW (0x04)
++#define TC_VAL_KPX5_MED_HI (0x08)
++#define TC_VAL_KPX5_HI (0x0C)
++#define TC_VAL_KPX4_LOW (0x00)
++#define TC_VAL_KPX4_MED_LOW (0x01)
++#define TC_VAL_KPX4_MED_HI (0x02)
++#define TC_VAL_KPX4_HI (0x03)
++
++#define TC_REG_DRIVE0_LOW (0xA1)
++#define TC_VAL_KPX3_LOW (0x00)
++#define TC_VAL_KPX3_MED_LOW (0x40)
++#define TC_VAL_KPX3_MED_HI (0x80)
++#define TC_VAL_KPX3_HI (0xC0)
++#define TC_VAL_KPX2_LOW (0x00)
++#define TC_VAL_KPX2_MED_LOW (0x01)
++#define TC_VAL_KPX2_MED_HI (0x02)
++#define TC_VAL_KPX2_HI (0x03)
++#define TC_VAL_KPX1_LOW (0x00)
++#define TC_VAL_KPX1_MED_LOW (0x04)
++#define TC_VAL_KPX1_MED_HI (0x08)
++#define TC_VAL_KPX1_HI (0x0C)
++#define TC_VAL_KPX0_LOW (0x00)
++#define TC_VAL_KPX0_MED_LOW (0x01)
++#define TC_VAL_KPX0_MED_HI (0x02)
++#define TC_VAL_KPX0_HI (0x03)
++
++/******************************************************************************
++ * Drive1 Strength register
++ *
++ * KPY[7:0]DRV1:0 : Output drive strength for KPY[7:0] ball
++ * 00: Lowest strength
++ * 01: Medium strength
++ * 10: Medium strength
++ * 11: Highest strength
++ *
++ *****************************************************************************/
++#define TC_REG_DRIVE1_HIGH (0xA2)
++#define TC_VAL_KPY7_LOW (0x00)
++#define TC_VAL_KPY7_MED_LOW (0x40)
++#define TC_VAL_KPY7_MED_HI (0x80)
++#define TC_VAL_KPY7_HI (0xC0)
++#define TC_VAL_KPY6_LOW (0x00)
++#define TC_VAL_KPY6_MED_LOW (0x01)
++#define TC_VAL_KPY6_MED_HI (0x02)
++#define TC_VAL_KPY6_HI (0x03)
++#define TC_VAL_KPY5_LOW (0x00)
++#define TC_VAL_KPY5_MED_LOW (0x04)
++#define TC_VAL_KPY5_MED_HI (0x08)
++#define TC_VAL_KPY5_HI (0x0C)
++#define TC_VAL_KPY4_LOW (0x00)
++#define TC_VAL_KPY4_MED_LOW (0x01)
++#define TC_VAL_KPY4_MED_HI (0x02)
++#define TC_VAL_KPY4_HI (0x03)
++
++#define TC_REG_DRIVE1_LOW (0xA3)
++#define TC_VAL_KPY3_LOW (0x00)
++#define TC_VAL_KPY3_MED_LOW (0x40)
++#define TC_VAL_KPY3_MED_HI (0x80)
++#define TC_VAL_KPY3_HI (0xC0)
++#define TC_VAL_KPY2_LOW (0x00)
++#define TC_VAL_KPY2_MED_LOW (0x01)
++#define TC_VAL_KPY2_MED_HI (0x02)
++#define TC_VAL_KPY2_HI (0x03)
++#define TC_VAL_KPY1_LOW (0x00)
++#define TC_VAL_KPY1_MED_LOW (0x04)
++#define TC_VAL_KPY1_MED_HI (0x08)
++#define TC_VAL_KPY1_HI (0x0C)
++#define TC_VAL_KPY0_LOW (0x00)
++#define TC_VAL_KPY0_MED_LOW (0x01)
++#define TC_VAL_KPY0_MED_HI (0x02)
++#define TC_VAL_KPY0_HI (0x03)
++
++/******************************************************************************
++ * Drive2 Strength register
++ *
++ * EXTIO0DRV1:0 : Output drive strength for EXTIO0 ball
++ * 00: Lowest strength
++ * 01: Medium strength
++ * 10: Medium strength
++ * 11: Highest strength
++ *
++ * PWM[2:0]DRV1:0 : Output drive strength for PWM2, PWM1, PWM0 ball
++ * 00: Lowest strength
++ * 01: Medium strength
++ * 10: Medium strength
++ * 11: Highest strength
++ *
++ * KPY[11:8]DRV1:0 : Output drive strength for KPY[11:8] ball
++ * 00: Lowest strength
++ * 01: Medium strength
++ * 10: Medium strength
++ * 11: Highest strength
++ *
++ *
++ *****************************************************************************/
++#define TC_REG_DRIVE2_HIGH (0xA4)
++#define TC_VAL_EXTIO0_LOW (0x00)
++#define TC_VAL_EXTIO0_MED_LOW (0x40)
++#define TC_VAL_EXTIO0_MED_HI (0x80)
++#define TC_VAL_EXTIO0_HI (0xC0)
++#define TC_VAL_PWM2_LOW (0x00)
++#define TC_VAL_PWM2_MED_LOW (0x10)
++#define TC_VAL_PWM2_MED_HI (0x20)
++#define TC_VAL_PWM2_HI (0x30)
++#define TC_VAL_PWM1_LOW (0x00)
++#define TC_VAL_PWM1_MED_LOW (0x04)
++#define TC_VAL_PWM1_MED_HI (0x08)
++#define TC_VAL_PWM1_HI (0x0C)
++#define TC_VAL_PWM0_LOW (0x00)
++#define TC_VAL_PWM0_MED_LOW (0x01)
++#define TC_VAL_PWM0_MED_HI (0x02)
++#define TC_VAL_PWM0_HI (0x03)
++
++#define TC_REG_DRIVE2_LOW (0xA5)
++#define TC_VAL_KPY11_LOW (0x00)
++#define TC_VAL_KPY11_MED_LOW (0x40)
++#define TC_VAL_KPY11_MED_HI (0x80)
++#define TC_VAL_KPY11_HI (0xC0)
++#define TC_VAL_KPY10_LOW (0x00)
++#define TC_VAL_KPY10_MED_LOW (0x01)
++#define TC_VAL_KPY10_MED_HI (0x02)
++#define TC_VAL_KPY10_HI (0x03)
++#define TC_VAL_KPY9_LOW (0x00)
++#define TC_VAL_KPY9_MED_LOW (0x04)
++#define TC_VAL_KPY9_MED_HI (0x08)
++#define TC_VAL_KPY9_HI (0x0C)
++#define TC_VAL_KPY8_LOW (0x00)
++#define TC_VAL_KPY8_MED_LOW (0x01)
++#define TC_VAL_KPY8_MED_HI (0x02)
++#define TC_VAL_KPY8_HI (0x03)
++
++/******************************************************************************
++ * Drive3 Strength register
++ *
++ * IRQNDRV1:0 : Output drive strength for IRANDRV ball
++ * 00: Lowest strength
++ * 01: Medium strength
++ * 10: Medium strength
++ * 11: Highest strength
++ *
++ * SDADRV1:0 : Output drive strength for SDADRV ball
++ * 00: Lowest strength
++ * 01: Medium strength
++ * 10: Medium strength
++ * 11: Highest strength
++ *
++ *****************************************************************************/
++#define TC_REG_DRIVE_3 (0xA6)
++#define TC_VAL_IRQNDRV_LOW (0x00)
++#define TC_VAL_IRQNDRV_MED_LOW (0x04)
++#define TC_VAL_IRQNDRV_MED_HI (0x08)
++#define TC_VAL_IRQNDRV_HI (0x0C)
++#define TC_VAL_SDADRV_LOW (0x00)
++#define TC_VAL_SDADRV_MED_LOW (0x01)
++#define TC_VAL_SDADRV_MED_HI (0x02)
++#define TC_VAL_SDADRV_HI (0x03)
++
++/******************************************************************************
++ * IOCF Configuration register
++ *
++ * GPIOSEL3:0 : Overrides BALLCFG with GPIO functionality for DIR24 and
++ * PWM[2:0] balls.
++ * 0: Use functionality defined in BALLCFG for DIR24 and
++ * PWM[2:0] balls
++ * 1: Override DIR24 and PWM[2:0] functionality with GPIO
++ *
++ * IG : Global input gate
++ * 0: Disable all inputs
++ * 1: Enable all inputs
++ *
++ * BALLCFG1:0 : Ball configuration Setting
++ * See Data sheet for tables.
++ *****************************************************************************/
++#define TC_REG_IOCFG (0xA7)
++
++/******************************************************************************
++ * IOPC Ext registers
++ *
++ * DIR[25:24]PR 1:0 : Resistor enable for DIR[25:24] ball
++ * 00: No pull resistor
++ * 01: Pull down resistor
++ * 10: Pull up resistor
++ * 11: Pull up resistor
++ *****************************************************************************/
++#define TC_REG_IOPCEXT (0xA8)
++#define TC_VAL_DIR25_NO_RESISTOR (0x00)
++#define TC_VAL_DIR25_PULL_DOWN (0x04)
++#define TC_VAL_DIR25_PULL_UP (0x08)
++#define TC_VAL_DIR25_PULL_UP_HI (0x0C)
++#define TC_VAL_DIR24_NO_RESISTOR (0x00)
++#define TC_VAL_DIR24_PULL_DOWN (0x01)
++#define TC_VAL_DIR24_PULL_UP (0x02)
++#define TC_VAL_DIR24_PULL_UP_HI (0x03)
++
++/******************************************************************************
++ * IOPC0 Registers
++ *
++ * KPX[7:0]DRV1:0 : Resistor enable for KPX[7:0] ball
++ * 00: No pull resistor
++ * 01: Pull down resistor
++ * 10: Pull up resistor
++ * 11: Pull up resistor
++ *****************************************************************************/
++#define TC_REG_IOPC0_HIGH (0xAA)
++#define TC_VAL_KPX7_NO_RESISTOR (0x00)
++#define TC_VAL_KPX7_PULL_DOWN (0x40)
++#define TC_VAL_KPX7_PULL_UP (0x80)
++#define TC_VAL_KPX7_PULL_UP_HI (0xC0)
++#define TC_VAL_KPX6_NO_RESISTOR (0x00)
++#define TC_VAL_KPX6_PULL_DOWN (0x01)
++#define TC_VAL_KPX6_PULL_UP (0x02)
++#define TC_VAL_KPX6_PULL_UP_HI (0x03)
++#define TC_VAL_KPX5_NO_RESISTOR (0x00)
++#define TC_VAL_KPX5_PULL_DOWN (0x04)
++#define TC_VAL_KPX5_PULL_UP (0x08)
++#define TC_VAL_KPX5_PULL_UP_HI (0x0C)
++#define TC_VAL_KPX4_NO_RESISTOR (0x00)
++#define TC_VAL_KPX4_PULL_DOWN (0x01)
++#define TC_VAL_KPX4_PULL_UP (0x02)
++#define TC_VAL_KPX4_PULL_UP_HI (0x03)
++
++#define TC_REG_IOPC0_LOW (0xAB)
++#define TC_VAL_KPX3_NO_RESISTOR (0x00)
++#define TC_VAL_KPX3_PULL_DOWN (0x40)
++#define TC_VAL_KPX3_PULL_UP (0x80)
++#define TC_VAL_KPX3_PULL_UP_HI (0xC0)
++#define TC_VAL_KPX2_NO_RESISTOR (0x00)
++#define TC_VAL_KPX2_PULL_DOWN (0x01)
++#define TC_VAL_KPX2_PULL_UP (0x02)
++#define TC_VAL_KPX2_PULL_UP_HI (0x03)
++#define TC_VAL_KPX1_NO_RESISTOR (0x00)
++#define TC_VAL_KPX1_PULL_DOWN (0x04)
++#define TC_VAL_KPX1_PULL_UP (0x08)
++#define TC_VAL_KPX1_PULL_UP_HI (0x0C)
++#define TC_VAL_KPX0_NO_RESISTOR (0x00)
++#define TC_VAL_KPX0_PULL_DOWN (0x01)
++#define TC_VAL_KPX0_PULL_UP (0x02)
++#define TC_VAL_KPX0_PULL_UP_HI (0x03)
++
++/******************************************************************************
++ * IOPC1 Registers (0xAC, 0xAD)
++ *
++ * KPY[7:0]DRV1:0 : Resistor enable for KPY[7:0] ball
++ * 00: No pull resistor
++ * 01: Pull down resistor
++ * 10: Pull up resistor
++ * 11: Pull up resistor
++ *****************************************************************************/
++#define TC_REG_IOPC1_HIGH (0xAC)
++#define TC_VAL_KPY7_NO_RESISTOR (0x00)
++#define TC_VAL_KPY7_PULL_DOWN (0x40)
++#define TC_VAL_KPY7_PULL_UP (0x80)
++#define TC_VAL_KPY7_PULL_UP_HI (0xC0)
++#define TC_VAL_KPY6_NO_RESISTOR (0x00)
++#define TC_VAL_KPY6_PULL_DOWN (0x01)
++#define TC_VAL_KPY6_PULL_UP (0x02)
++#define TC_VAL_KPY6_PULL_UP_HI (0x03)
++#define TC_VAL_KPY5_NO_RESISTOR (0x00)
++#define TC_VAL_KPY5_PULL_DOWN (0x04)
++#define TC_VAL_KPY5_PULL_UP (0x08)
++#define TC_VAL_KPY5_PULL_UP_HI (0x0C)
++#define TC_VAL_KPY4_NO_RESISTOR (0x00)
++#define TC_VAL_KPY4_PULL_DOWN (0x01)
++#define TC_VAL_KPY4_PULL_UP (0x02)
++#define TC_VAL_KPY4_PULL_UP_HI (0x03)
++
++#define TC_REG_IOPC1_LOW (0xAD)
++#define TC_VAL_KPY3_NO_RESISTOR (0x00)
++#define TC_VAL_KPY3_PULL_DOWN (0x40)
++#define TC_VAL_KPY3_PULL_UP (0x80)
++#define TC_VAL_KPY3_PULL_UP_HI (0xC0)
++#define TC_VAL_KPY2_NO_RESISTOR (0x00)
++#define TC_VAL_KPY2_PULL_DOWN (0x01)
++#define TC_VAL_KPY2_PULL_UP (0x02)
++#define TC_VAL_KPY2_PULL_UP_HI (0x03)
++#define TC_VAL_KPY1_NO_RESISTOR (0x00)
++#define TC_VAL_KPY1_PULL_DOWN (0x04)
++#define TC_VAL_KPY1_PULL_UP (0x08)
++#define TC_VAL_KPY1_PULL_UP_HI (0x0C)
++#define TC_VAL_KPY0_NO_RESISTOR (0x00)
++#define TC_VAL_KPY0_PULL_DOWN (0x01)
++#define TC_VAL_KPY0_PULL_UP (0x02)
++#define TC_VAL_KPY0_PULL_UP_HI (0x03)
++
++/******************************************************************************
++ * IOPC2 Registers (0xAE, 0xAF)
++ *
++ * EXTIO0PR1:0 : Resistor enable for EXTIO0 ball
++ * 00: No pull resistor
++ * 01: Pull down resistor
++ * 10: Pull up resistor
++ * 11: Pull up resistor
++ * PWM[2:0]PR1:0 : Resistor enable for PWM[2:0] ball
++ * 00: No pull resistor
++ * 01: Pull down resistor
++ * 10: Pull up resistor
++ * 11: Pull up resistor
++ * KPY[11:8]PR1:0: Resistor enable for KPY[11:8] ball
++ * 00: No pull resistor
++ * 01: Pull down resistor
++ * 10: Pull up resistor
++ * 11: Pull up resistor
++ *****************************************************************************/
++#define TC_REG_IOPC2_HIGH (0xAE)
++#define TC_VAL_EXTIO0_NO_RESISTOR (0x00)
++#define TC_VAL_EXTIO0_PULL_DOWN (0x40)
++#define TC_VAL_EXTIO0_PULL_UP (0x80)
++#define TC_VAL_EXTIO0_PULL_UP_HI (0xC0)
++#define TC_VAL_PWM2_NO_RESISTOR (0x00)
++#define TC_VAL_PWM2_PULL_DOWN (0x10)
++#define TC_VAL_PWM2_PULL_UP (0x20)
++#define TC_VAL_PWM2_PULL_UP_HI (0x30)
++#define TC_VAL_PWM1_NO_RESISTOR (0x00)
++#define TC_VAL_PWM1_PULL_DOWN (0x04)
++#define TC_VAL_PWM1_PULL_UP (0x08)
++#define TC_VAL_PWM1_PULL_UP_HI (0x0C)
++#define TC_VAL_PWM0_NO_RESISTOR (0x00)
++#define TC_VAL_PWM0_PULL_DOWN (0x01)
++#define TC_VAL_PWM0_PULL_UP (0x02)
++#define TC_VAL_PWM0_PULL_UP_HI (0x03)
++
++#define TC_REG_IOPC2_LOW (0xAF)
++#define TC_VAL_KPY11_NO_RESISTOR (0x00)
++#define TC_VAL_KPY11_PULL_DOWN (0x40)
++#define TC_VAL_KPY11_PULL_UP (0x80)
++#define TC_VAL_KPY11_PULL_UP_HI (0xC0)
++#define TC_VAL_KPY10_NO_RESISTOR (0x00)
++#define TC_VAL_KPY10_PULL_DOWN (0x01)
++#define TC_VAL_KPY10_PULL_UP (0x02)
++#define TC_VAL_KPY10_PULL_UP_HI (0x03)
++#define TC_VAL_KPY9_NO_RESISTOR (0x00)
++#define TC_VAL_KPY9_PULL_DOWN (0x04)
++#define TC_VAL_KPY9_PULL_UP (0x08)
++#define TC_VAL_KPY9_PULL_UP_HI (0x0C)
++#define TC_VAL_KPY8_NO_RESISTOR (0x00)
++#define TC_VAL_KPY8_PULL_DOWN (0x01)
++#define TC_VAL_KPY8_PULL_UP (0x02)
++#define TC_VAL_KPY8_PULL_UP_HI (0x03)
++
++/******************************************************************************
++ * GPIO Dat registers
++ *
++ * DATA23:0 : Data 23:0 (on ball EXTIO0, PWM[2:0], KPY[11:0] and KPX[7:0] when
++ * GPIO selected)
++ * 0: Output "0" when corresponding MASK bit is set to "1"
++ * 1: Output "1" when corresponding MASK bit is set to "1"
++ *****************************************************************************/
++#define TC_REG_GPIODATA0 (0xC0)
++#define TC_REG_GPIODATA1 (0xC2)
++#define TC_REG_GPIODATA2 (0xC4)
++
++/******************************************************************************
++ * GPIO Data Mask Registers
++ *
++ * MASK23:0 Mask bit for DATA23:0 (WRITE_ONLY)
++ * 0: Disbale DATA23:0 bit setting
++ * 1: Enable DATA23:0 bit setting
++ *****************************************************************************/
++#define TC_REG_GPIODATA0_MASK (0xC1)
++#define TC_REG_GPIODATA1_MASK (0xC3)
++#define TC_REG_GPIODATA2_MASK (0xC5)
++
++/******************************************************************************
++ * GPIO Direction Registers
++ *
++ * DIR23:0 : Direction bits for DATA23:0 (EXTIO0, PWM[2:0], KPY[11:0] and
++ * KPX[7:0])
++ * 0: Input Mode
++ * 1: Output Mode
++ *****************************************************************************/
++#define TC_REG_GPIODIR0 (0xC6)
++#define TC_REG_GPIODIR1 (0xC7)
++#define TC_REG_GPIODIR2 (0xC8)
++
++/******************************************************************************
++ * GPIO Interrupt Sense register
++ *
++ * IS23:0 : Interrupt sense bits for DATA23:0 (EXTIO0, PWM[2:0], KPY[11:0] and
++ * KPX[7:0])
++ * 0: Edge sensitive interrupt
++ * 1: Level sensitive
++ *****************************************************************************/
++#define TC_REG_GPIOIS0 (0xC9)
++#define TC_REG_GPIOIS1 (0xCA)
++#define TC_REG_GPIOIS2 (0xCB)
++
++/*****************************************************************************
++ * GPIO Both Edges Interrupt register
++ *
++ * BE23:0 : Interrupt both edges bits for DATA23:0 (EXTIO0, PWM[2:0],
++ * KPY[11:0] and KPX[7:0]) IBE23 register bit is used also for DIR24
++ * input and DIR25 input when they are configured as direct key input.
++ * 0: Interrupt generated at the active edges
++ * 1: Interrupt generated at both edges
++ *****************************************************************************/
++#define TC_REG_GPIOIBE0 (0xCC)
++#define TC_REG_GPIOIBE1 (0xCD)
++#define TC_REG_GPIOIBE2 (0xCE)
++
++/******************************************************************************
++ * GPIO Interrupt Event register
++ *
++ * IEV23:0 : Interrupt event select from DATA23:0 (EXTIO0, PWM[2:0], KPY[11:0]
++ * and KPX[7:0]) IEV23 register bit is used also for DIR24 input and
++ * DIR25 input when they are configured as direct key input
++ * 0: Interrupt at low level of falling edge
++ * 1: Interrupt at high level of rising edge
++ *****************************************************************************/
++#define TC_REG_GPIOIEV0 (0xCF)
++#define TC_REG_GPIOIEV1 (0xD0)
++#define TC_REG_GPIOIEV2 (0xD1)
++
++/******************************************************************************
++ * GPIO Interrupt Enable register
++ *
++ * IE23:0 : Interrupt enable for DATE23:0 (EXTIO0, PWM[2:0], KPY[11:0] and
++ * KPX[7:0])
++ * 0: Disable Interrupt
++ * 1: Enable Interrupt
++ *****************************************************************************/
++#define TC_REG_GPIOIE0 (0xD2)
++#define TC_REG_GPIOIE1 (0xD3)
++#define TC_REG_GPIOIE2 (0xD4)
++
++/******************************************************************************
++ * GPIO Raw Input Status register
++ *
++ * RIS23:0 : Raw interrupt status for DATA23:0 (EXTIO0, PWM[2:0], KPY[11:0] and
++ * KPX[7:0])
++ * 0: No interrupt condition at GPIO
++ * 1: Interrupt condtionat GPIO
++ *****************************************************************************/
++#define TC_REG_GPIORIS0 (0xD6)
++#define TC_REG_GPIORIS1 (0xD7)
++#define TC_REG_GPIORIS2 (0xD8)
++
++/******************************************************************************
++ * GPIO Mask Interrupt Status register
++ *
++ * MIS23:0 : Masked interrupt status for (EXTIO0, PWM[2:0], KPY[11:0] and
++ * KPX[7:0])
++ * 0: No interrupt condition from GPIO
++ * 1: Interrupt at GPIO is active
++ *****************************************************************************/
++#define TC_REG_GPIOMIS0 (0xD9)
++#define TC_REG_GPIOMIS1 (0xDA)
++#define TC_REG_GPIOMIS2 (0xDB)
++
++/******************************************************************************
++ * GPIO Interrupt Clear register
++ *
++ * IC23:0 : Clear interrupt of DATA23:0 (EXTIO0, PWM[2:0], KPY[11:0] and
++ * KPX[7:0])
++ * 0: No effect
++ * 1: Clear corresponding interrupt
++ *****************************************************************************/
++#define TC_REG_GPIOIC0 (0xDC)
++#define TC_REG_GPIOIC1 (0xDD)
++#define TC_REG_GPIOIC2 (0xDE)
++
++/******************************************************************************
++ * GPIO OMS Registers
++ *
++ * ODM23:0 : Open Drain Mode Select for DATA23:0 (EXTIO0, PWM[2:0], KPY[11:0]
++ * and KPX[7:0])
++ * 0: Only N-MOS transistor is active in output driver stage.
++ * Output can be driven to GND or HI-Z
++ * 1: Only P-MOS transistor is active in output driver stage.
++ * Output can be driven to VCC or Hi-Z
++ *
++ * ODE23:0: Open Drain Enable for DATA23:0 (EXTIO0, PWM[2:0], KPY[11:0]
++ * and KPX[7:0])
++ * 0: Full buffer
++ * 1: Open Drain Functionality
++ *****************************************************************************/
++#define TC_REG_GPIOOMS0_A (0xE0)
++#define TC_REG_GPIOOMS0_B (0xE1)
++#define TC_REG_GPIOOMS1_A (0xE2)
++#define TC_REG_GPIOOMS1_B (0xE3)
++#define TC_REG_GPIOOMS2_A (0xE4)
++#define TC_REG_GPIOOMS2_B (0xE5)
++
++/******************************************************************************
++ * GPIO Wake Registers
++ *
++ * WAKE23:0 : Each bit corresponds to a ball except WAKE23. WAKE23 is
++ * corresponding DIR25:24 and EXTIO0 balls. When bit set, the
++ * corresponding ball contributes to wake-up from Auto-sleep
++ * mode. When set, the corresponding ball is also used as a trigger
++ * event to the TRIGGER pattern of the Timer module.
++ * 0: Wakeup from Auto-sleep mode on corresponding ball is disabled.
++ * 1: The corresponding ball contributes to wake-up from Auto-Sleep
++ * mode. And, the corresponding ball is also used as a trigger
++ * event to the TRIGGER pattern of the timer module.
++ *****************************************************************************/
++#define TC_REG_GPIOWAKE0 (0xE9)
++#define TC_REG_GPIOWAKE1 (0xEA)
++#define TC_REG_GPIOWAKE2 (0xEB)
++
++/******************************************************************************
++ * Direct Key Event Code register
++ *****************************************************************************/
++#define TC_REG_DEVTCODE (0xE6)
++
++/*
++ * Indicates, whether keyboard event was a key press or a key relese
++ * 0: Key was pressed
++ * 1: Key was released
++ */
++#define TC_VAL_DKEYSTAT (0x01 << 5)
++
++/*
++ * Direct key event code
++ * 0x01: event on KPX0 ball
++ * 0x02: event on KPX1 ball
++ * ...
++ * 0x19: event on DIR24 ball
++ * 0x1A: event on DIR25 ball
++ * 0x1F: event buffer empty
++ */
++#define TC_VAL_DKEYCODE_MASK (0x1F)
++
++/******************************************************************************
++ * Input De-Bounce register
++ *****************************************************************************/
++#define TC_REG_DBOUNCE (0xE8)
++
++/*
++ * Enables de-bouncing feature on general purpose input lines.
++ * Debouncing makes the input lines immune to noise.
++ * 0: No synchronization
++ * 1: Synchronization of the GPIO input lines according the value
++ * conf. in DBOUNCE.
++ */
++#define TC_VAL_DB_SYNC (0x01 << 5)
++
++/*
++ * De-Bounce time for the inputs.
++ * 00: 1.5ms
++ * 01: 3.0ms
++ * 02: 4.5ms
++ * ...
++ * 1F: 48ms
++ */
++#define TC_VAL_DBOUNCE_MASK (0x1F)
++
++/******************************************************************************
++ * Direct Keypad Registers (0-3)
++ *
++ * DIRECT23-0: Direct keypad bits take priority over anything else These bits
++ * must be cleared to '0' before IOCFG is accessed to set other
++ * functions for the pins.
++ * 0: General purpose input/output functionality is active
++ * 1: Direct keypad functionality is active.
++ *****************************************************************************/
++#define TC_REG_DIRECT0 (0xEC)
++#define TC_VAL_DIRECT7 (0x01 << 7)
++#define TC_VAL_DIRECT6 (0x01 << 6)
++#define TC_VAL_DIRECT5 (0x01 << 5)
++#define TC_VAL_DIRECT4 (0x01 << 4)
++#define TC_VAL_DIRECT3 (0x01 << 3)
++#define TC_VAL_DIRECT2 (0x01 << 2)
++#define TC_VAL_DIRECT1 (0x01 << 1)
++#define TC_VAL_DIRECT0 (0x01)
++
++#define TC_REG_DIRECT1 (0xED)
++#define TC_VAL_DIRECT15 (0x01 << 7)
++#define TC_VAL_DIRECT14 (0x01 << 6)
++#define TC_VAL_DIRECT13 (0x01 << 5)
++#define TC_VAL_DIRECT12 (0x01 << 4)
++#define TC_VAL_DIRECT11 (0x01 << 3)
++#define TC_VAL_DIRECT10 (0x01 << 2)
++#define TC_VAL_DIRECT9 (0x01 << 1)
++#define TC_VAL_DIRECT8 (0x01)
++
++#define TC_REG_DIRECT2 (0xEE)
++#define TC_VAL_DIRECT23 (0x01 << 7)
++#define TC_VAL_DIRECT22 (0x01 << 6)
++#define TC_VAL_DIRECT21 (0x01 << 5)
++#define TC_VAL_DIRECT20 (0x01 << 4)
++#define TC_VAL_DIRECT19 (0x01 << 3)
++#define TC_VAL_DIRECT18 (0x01 << 2)
++#define TC_VAL_DIRECT17 (0x01 << 1)
++#define TC_VAL_DIRECT16 (0x01)
++
++#define TC_REG_DIRECT3 (0xEF)
++#define TC_VAL_DIRECT25 (0x01 << 1)
++#define TC_VAL_DIRECT24 (0x01)
++
++/******************************************************************************
++ * Direct Key Raw Interrupt register
++ *****************************************************************************/
++#define TC_REG_DKBDRIS (0xF0)
++
++/*
++ * Raw Event Lost Interrupt
++ * This bit is cleared by writing int DEVTIC
++ * 0: No interrupt
++ * 1: More than 8 direct key events have been detected and
++ * caused the event buffer to overflow.
++ */
++#define TC_VAL_DRELINT (0x01 << 1)
++
++/*
++ * Raw direct key event interrupt
++ * Reading from DEVTCODE until the buffer is empty will automatically clear
++ * this interrupt
++ * 0: No interrupt
++ * 1: At lest one direct key press or direct key release in the event buffer.
++ */
++#define TC_VAL_DREVTINT (0x01)
++
++/******************************************************************************
++ * Direct Key Mask Interrupt register
++ *****************************************************************************/
++#define TC_REG_DKBDMIS (0xF1)
++
++/*
++ * Masked Event Lost Interrupt
++ * 0: No interrupt
++ * 1: More than 8 direct key events have been detected and
++ * and caused the event buffer to overflow.
++ */
++#define TC_VAL_DMELINT (0x01 << 1)
++
++/*
++ * Masked Direct key Event interrupt
++ * 0: No interrupt
++ * 1: At least one direct key press or direct key release is in
++ * the event buffer.
++ */
++#define TC_VAL_DMEVTINT (0x01)
++
++/******************************************************************************
++ * Direct Key Interrupt Clear register
++ *****************************************************************************/
++#define TC_REG_DKBDIC (0xF2)
++
++/*
++ * Clear event buffer an corresponding interrupt DREVTINT and DRELINT
++ * The host does not need to write "0". Write "1" every time when clearing
++ * the event buffer.
++ * 0: No action
++ * 1: Clear event buffer and corresponding interrupts REVTINT and RELINT
++ */
++#define TC_VAL_DEVTIC (0x01)
++
++/******************************************************************************
++ * Direct Key Mask Register
++ *****************************************************************************/
++#define TC_REG_DKBDMSK (0xF3)
++
++/*
++ * Enable keyboard event lost interrupt
++ * 0: Keyboard event lost interrupt is enabled
++ * 1: Keyboard event lost interrupt is disabled
++ */
++#define TC_VAL_DMSKELINT (0x01 << 1)
++
++/* Enable keyboard event interrupt
++ * 0: Keyboard event interrupt is enabled
++ * 1: Keyboard event interrupt is disabled
++ */
++#define TC_VAL_DMSKEINT (0x01)
++
++#endif /* __TC3589XBG_REGS_H */
+--- a/drivers/input/touchscreen/Kconfig
++++ b/drivers/input/touchscreen/Kconfig
+@@ -603,4 +603,41 @@
+ To compile this driver as a module, choose M here: the
+ module will be called tps6507x_ts.
+
++config TOUCHSCREEN_CY8CTMG110
++ tristate "cy8ctmg110 touchscreen"
++ depends on I2C
++ depends on GPIOLIB
++ help
++ Say Y here if you have a cy8ctmg110 capacitive touchscreen
++
++ If unsure, say N.
++
++ To compile this driver as a module, choose M here: the
++ module will be called cy8ctmg110_ts.
++
++config TOUCHSCREEN_CY8CTMG110_MULTIPLE_INPUT
++ bool "cy8ctmg110 multiple interface support"
++ default y
++ depends on TOUCHSCREEN_CY8CTMG110
++ help
++ Say Y here if you want each contact point (up to 2) supported by
++ the cy8ctmg110 capacitive touchscreen to be exposed as a
++ seperate input device.
++
++ This enables MPX and some multi-touch applications to work.
++
++ If unsure, say Y.
++
++config TOUCHSCREEN_CLEARPAD_TM1217
++ tristate "Synaptics Clearpad TM1217"
++ depends on I2C
++ depends on GPIOLIB
++ help
++ Say Y here if you have a Synaptics Clearpad TM1217 Controller
++
++ If unsure, say N.
++
++ To compile this driver as a module, choose M here: the
++ module will be called clearpad_tm1217.
++
+ endif
+--- a/drivers/input/touchscreen/Makefile
++++ b/drivers/input/touchscreen/Makefile
+@@ -12,6 +12,8 @@
+ obj-$(CONFIG_TOUCHSCREEN_ADS7846) += ads7846.o
+ obj-$(CONFIG_TOUCHSCREEN_ATMEL_TSADCC) += atmel_tsadcc.o
+ obj-$(CONFIG_TOUCHSCREEN_BITSY) += h3600_ts_input.o
++obj-$(CONFIG_TOUCHSCREEN_CY8CTMG110) += cy8ctmg110_ts.o
++obj-$(CONFIG_TOUCHSCREEN_CLEARPAD_TM1217) += clearpad_tm1217.o
+ obj-$(CONFIG_TOUCHSCREEN_DYNAPRO) += dynapro.o
+ obj-$(CONFIG_TOUCHSCREEN_HAMPSHIRE) += hampshire.o
+ obj-$(CONFIG_TOUCHSCREEN_GUNZE) += gunze.o
+--- /dev/null
++++ b/drivers/input/touchscreen/clearpad_tm1217.c
+@@ -0,0 +1,674 @@
++/*
++ * clearpad_tm1217.c - Touch Screen driver for Synaptics Clearpad
++ * TM1217 controller
++ *
++ * Copyright (C) 2008 Intel Corp
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; ifnot, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * Questions/Comments/Bug fixes to Ramesh Agarwal (ramesh.agarwal@intel.com)
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ */
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/input.h>
++#include <linux/interrupt.h>
++#include <linux/io.h>
++#include <linux/i2c.h>
++#include <linux/timer.h>
++#include <linux/gpio.h>
++#include <linux/hrtimer.h>
++#include <linux/kthread.h>
++#include <linux/delay.h>
++#include <linux/slab.h>
++#include <linux/i2c/cp_tm1217.h>
++
++#define CPTM1217_DEVICE_NAME "cptm1217"
++#define CPTM1217_DRIVER_NAME CPTM1217_DEVICE_NAME
++
++#define MAX_TOUCH_SUPPORTED 2
++#define TOUCH_SUPPORTED 1
++#define SAMPLING_FREQ 80 /* Frequency in HZ */
++#define DELAY_BTWIN_SAMPLE (1000 / SAMPLING_FREQ)
++#define WAIT_FOR_RESPONSE 5 /* 5msec just works */
++#define MAX_RETRIES 5 /* As above */
++#define INCREMENTAL_DELAY 5 /* As above */
++
++/* Regster Definitions */
++#define TMA1217_DEV_STATUS 0x13 /* Device Status */
++#define TMA1217_INT_STATUS 0x14 /* Interrupt Status */
++
++/* Controller can detect upto 2 possible finger touches.
++ * Each finger touch provides 12 bit X Y co-ordinates, the values are split
++ * across 2 registers, and an 8 bit Z value */
++#define TMA1217_FINGER_STATE 0x18 /* Finger State */
++#define TMA1217_FINGER1_X_HIGHER8 0x19 /* Higher 8 bit of X coordinate */
++#define TMA1217_FINGER1_Y_HIGHER8 0x1A /* Higher 8 bit of Y coordinate */
++#define TMA1217_FINGER1_XY_LOWER4 0x1B /* Lower 4 bits of X and Y */
++#define TMA1217_FINGER1_Z_VALUE 0x1D /* 8 bit Z value for finger 1 */
++#define TMA1217_FINGER2_X_HIGHER8 0x1E /* Higher 8 bit of X coordinate */
++#define TMA1217_FINGER2_Y_HIGHER8 0x1F /* Higher 8 bit of Y coordinate */
++#define TMA1217_FINGER2_XY_LOWER4 0x20 /* Lower 4 bits of X and Y */
++#define TMA1217_FINGER2_Z_VALUE 0x22 /* 8 bit Z value for finger 2 */
++#define TMA1217_DEVICE_CTRL 0x23 /* Device Control */
++#define TMA1217_INTERRUPT_ENABLE 0x24 /* Interrupt Enable */
++#define TMA1217_REPORT_MODE 0x2B /* Reporting Mode */
++#define TMA1217_MAX_X_LOWER8 0x31 /* Bit 0-7 for Max X */
++#define TMA1217_MAX_X_HIGHER4 0x32 /* Bit 8-11 for Max X */
++#define TMA1217_MAX_Y_LOWER8 0x33 /* Bit 0-7 for Max Y */
++#define TMA1217_MAX_Y_HIGHER4 0x34 /* Bit 8-11 for Max Y */
++#define TMA1217_DEVICE_CMD_RESET 0x67 /* Device CMD reg for reset */
++#define TMA1217_DEVICE_CMD_REZERO 0x69 /* Device CMD reg for rezero */
++
++#define TMA1217_MANUFACTURER_ID 0x73 /* Manufacturer Id */
++#define TMA1217_PRODUCT_FAMILY 0x75 /* Product Family */
++#define TMA1217_FIRMWARE_REVISION 0x76 /* Firmware Revision */
++#define TMA1217_SERIAL_NO_HIGH 0x7C /* Bit 8-15 of device serial no. */
++#define TMA1217_SERIAL_NO_LOW 0x7D /* Bit 0-7 of device serial no. */
++#define TMA1217_PRODUCT_ID_START 0x7E /* Start address for 10 byte ID */
++#define TMA1217_DEVICE_CAPABILITY 0x8B /* Reporting capability */
++
++
++/*
++ * The touch position structure.
++ */
++struct touch_state {
++ int x;
++ int y;
++ bool button;
++};
++
++/* Device Specific info given by the controller */
++struct cp_dev_info {
++ u16 maxX;
++ u16 maxY;
++};
++
++/* Vendor related info given by the controller */
++struct cp_vendor_info {
++ u8 vendor_id;
++ u8 product_family;
++ u8 firmware_rev;
++ u16 serial_no;
++};
++
++/*
++ * Private structure to store the device details
++ */
++struct cp_tm1217_device {
++ struct i2c_client *client;
++ struct device *dev;
++ struct cp_vendor_info vinfo;
++ struct cp_dev_info dinfo;
++ struct input_dev_info {
++ char phys[32];
++ char name[128];
++ struct input_dev *input;
++ struct touch_state touch;
++ } cp_input_info[MAX_TOUCH_SUPPORTED];
++
++ int thread_running;
++ struct mutex thread_mutex;
++
++ int gpio;
++};
++
++
++/* The following functions are used to read/write registers on the device
++ * as per the RMI prorocol. Technically, a page select should be written
++ * before doing read/write but since the register offsets are below 0xFF
++ * we can use the default value of page which is 0x00
++ */
++static int cp_tm1217_read(struct cp_tm1217_device *ts,
++ u8 *req, int size)
++{
++ int i, retval;
++
++ /* Send the address */
++ retval = i2c_master_send(ts->client, &req[0], 1);
++ if (retval != 1) {
++ dev_err(ts->dev, "cp_tm1217: I2C send failed\n");
++ return retval;
++ }
++ msleep(WAIT_FOR_RESPONSE);
++ for (i = 0; i < MAX_RETRIES; i++) {
++ retval = i2c_master_recv(ts->client, &req[1], size);
++ if (retval == size) {
++ break;
++ } else {
++ msleep(INCREMENTAL_DELAY);
++ dev_dbg(ts->dev, "cp_tm1217: Retry count is %d\n", i);
++ }
++ }
++ if (retval != size)
++ dev_err(ts->dev, "cp_tm1217: Read from device failed\n");
++
++ return retval;
++}
++
++static int cp_tm1217_write(struct cp_tm1217_device *ts,
++ u8 *req, int size)
++{
++ int retval;
++
++ /* Send the address and the data to be written */
++ retval = i2c_master_send(ts->client, &req[0], size + 1);
++ if (retval != size + 1) {
++ dev_err(ts->dev, "cp_tm1217: I2C write failed: %d\n", retval);
++ return retval;
++ }
++ /* Wait for the write to complete. TBD why this is required */
++ msleep(WAIT_FOR_RESPONSE);
++
++ return size;
++}
++
++static int cp_tm1217_mask_interrupt(struct cp_tm1217_device *ts)
++{
++ u8 req[2];
++ int retval;
++
++ req[0] = TMA1217_INTERRUPT_ENABLE;
++ req[1] = 0x0;
++ retval = cp_tm1217_write(ts, req, 1);
++ if (retval != 1)
++ return -EIO;
++
++ return 0;
++}
++
++static int cp_tm1217_unmask_interrupt(struct cp_tm1217_device *ts)
++{
++ u8 req[2];
++ int retval;
++
++ req[0] = TMA1217_INTERRUPT_ENABLE;
++ req[1] = 0xa;
++ retval = cp_tm1217_write(ts, req, 1);
++ if (retval != 1)
++ return -EIO;
++
++ return 0;
++}
++
++static void process_touch(struct cp_tm1217_device *ts, int index)
++{
++ int retval;
++ struct input_dev_info *input_info =
++ (struct input_dev_info *)&ts->cp_input_info[index];
++ u8 xy_data[6];
++
++ if (index == 0)
++ xy_data[0] = TMA1217_FINGER1_X_HIGHER8;
++ else
++ xy_data[0] = TMA1217_FINGER2_X_HIGHER8;
++
++ retval = cp_tm1217_read(ts, xy_data, 5);
++ if (retval < 5) {
++ dev_err(ts->dev, "cp_tm1217: XY read from device failed\n");
++ return;
++ }
++
++ /* Note: Currently not using the Z values but may be requried in
++ the future. */
++ input_info->touch.x = (xy_data[1] << 4)
++ | (xy_data[3] & 0x0F);
++ input_info->touch.y = (xy_data[2] << 4)
++ | ((xy_data[3] & 0xF0) >> 4);
++ input_report_abs(input_info->input, ABS_X, input_info->touch.x);
++ input_report_abs(input_info->input, ABS_Y, input_info->touch.y);
++ input_sync(input_info->input);
++}
++
++static void cp_tm1217_get_data(struct cp_tm1217_device *ts)
++{
++ u8 req[2];
++ int retval, i, finger_touched;
++
++ do {
++ req[0] = TMA1217_FINGER_STATE;
++ retval = cp_tm1217_read(ts, req, 1);
++ if (retval != 1) {
++ dev_err(ts->dev,
++ "cp_tm1217: Read from device failed\n");
++ continue;
++ }
++ finger_touched = 0;
++ /* Start sampling until the pressure is below
++ threshold */
++ for (i = 0; i < TOUCH_SUPPORTED; i++) {
++ if (req[1] & 0x3) {
++ finger_touched++;
++ if (ts->cp_input_info[i].touch.button == 0) {
++ /* send the button touch event */
++ input_report_key(
++ ts->cp_input_info[i].input,
++ BTN_TOUCH, 1);
++ ts->cp_input_info[i].touch.button = 1;
++ }
++ process_touch(ts, i);
++ } else {
++ if (ts->cp_input_info[i].touch.button == 1) {
++ /* send the button release event */
++ input_report_key(
++ ts->cp_input_info[i].input,
++ BTN_TOUCH, 0);
++ ts->cp_input_info[i].touch.button = 0;
++ }
++ }
++ req[1] = req[1] >> 2;
++ }
++ msleep(DELAY_BTWIN_SAMPLE);
++ } while (finger_touched > 0);
++}
++
++static irqreturn_t cp_tm1217_sample_thread(int irq, void *handle)
++{
++ struct cp_tm1217_device *ts = (struct cp_tm1217_device *) handle;
++ u8 req[2];
++ int retval;
++
++ /* Chedk if another thread is already running */
++ mutex_lock(&ts->thread_mutex);
++ if (ts->thread_running == 1) {
++ mutex_unlock(&ts->thread_mutex);
++ return IRQ_HANDLED;
++ } else {
++ ts->thread_running = 1;
++ mutex_unlock(&ts->thread_mutex);
++ }
++
++ /* Mask the interrupts */
++ retval = cp_tm1217_mask_interrupt(ts);
++
++ /* Read the Interrupt Status register to find the cause of the
++ Interrupt */
++ req[0] = TMA1217_INT_STATUS;
++ retval = cp_tm1217_read(ts, req, 1);
++ if (retval != 1)
++ goto exit_thread;
++
++ if (!(req[1] & 0x8))
++ goto exit_thread;
++
++ cp_tm1217_get_data(ts);
++
++exit_thread:
++ /* Unmask the interrupts before going to sleep */
++ retval = cp_tm1217_unmask_interrupt(ts);
++
++ mutex_lock(&ts->thread_mutex);
++ ts->thread_running = 0;
++ mutex_unlock(&ts->thread_mutex);
++
++ return IRQ_HANDLED;
++}
++
++static int cp_tm1217_init_data(struct cp_tm1217_device *ts)
++{
++ int retval;
++ u8 req[2];
++
++ /* Read the vendor id/ fw revision etc. Ignoring return check as this
++ is non critical info */
++ req[0] = TMA1217_MANUFACTURER_ID;
++ retval = cp_tm1217_read(ts, req, 1);
++ ts->vinfo.vendor_id = req[1];
++
++ req[0] = TMA1217_PRODUCT_FAMILY;
++ retval = cp_tm1217_read(ts, req, 1);
++ ts->vinfo.product_family = req[1];
++
++ req[0] = TMA1217_FIRMWARE_REVISION;
++ retval = cp_tm1217_read(ts, req, 1);
++ ts->vinfo.firmware_rev = req[1];
++
++ req[0] = TMA1217_SERIAL_NO_HIGH;
++ retval = cp_tm1217_read(ts, req, 1);
++ ts->vinfo.serial_no = (req[1] << 8);
++
++ req[0] = TMA1217_SERIAL_NO_LOW;
++ retval = cp_tm1217_read(ts, req, 1);
++ ts->vinfo.serial_no = ts->vinfo.serial_no | req[1];
++
++ req[0] = TMA1217_MAX_X_HIGHER4;
++ retval = cp_tm1217_read(ts, req, 1);
++ ts->dinfo.maxX = (req[1] & 0xF) << 8;
++
++ req[0] = TMA1217_MAX_X_LOWER8;
++ retval = cp_tm1217_read(ts, req, 1);
++ ts->dinfo.maxX = ts->dinfo.maxX | req[1];
++
++ req[0] = TMA1217_MAX_Y_HIGHER4;
++ retval = cp_tm1217_read(ts, req, 1);
++ ts->dinfo.maxY = (req[1] & 0xF) << 8;
++
++ req[0] = TMA1217_MAX_Y_LOWER8;
++ retval = cp_tm1217_read(ts, req, 1);
++ ts->dinfo.maxY = ts->dinfo.maxY | req[1];
++
++ return 0;
++
++}
++
++/*
++ * Set up a GPIO for use as the interrupt. We can't simply do this at
++ * boot time because the GPIO drivers themselves may not be around at
++ * boot/firmware set up time to do the work. Instead defer it to driver
++ * detection.
++ */
++
++static int cp_tm1217_setup_gpio_irq(struct cp_tm1217_device *ts)
++{
++ int retval;
++
++ /* Hook up the irq handler */
++ retval = gpio_request(ts->gpio, "cp_tm1217_touch");
++ if (retval < 0) {
++ dev_err(ts->dev, "cp_tm1217: GPIO request failed error %d\n",
++ retval);
++ return retval;
++ }
++
++ retval = gpio_direction_input(ts->gpio);
++ if (retval < 0) {
++ dev_err(ts->dev,
++ "cp_tm1217: GPIO direction configuration failed, error %d\n",
++ retval);
++ gpio_free(ts->gpio);
++ return retval;
++ }
++
++ retval = gpio_to_irq(ts->gpio);
++ if (retval < 0) {
++ dev_err(ts->dev, "cp_tm1217: GPIO to IRQ failedi,"
++ " error %d\n", retval);
++ gpio_free(ts->gpio);
++ }
++ dev_dbg(ts->dev,
++ "cp_tm1217: Got IRQ number is %d for GPIO %d\n",
++ retval, ts->gpio);
++ return retval;
++}
++
++static int cp_tm1217_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ struct cp_tm1217_device *ts;
++ struct input_dev *input_dev;
++ struct input_dev_info *input_info;
++ struct cp_tm1217_platform_data *pdata;
++ u8 req[2];
++ int i, retval;
++
++ /* No pdata is fine - we then use "normal" IRQ mode */
++
++ pdata = client->dev.platform_data;
++
++ ts = kzalloc(sizeof(struct cp_tm1217_device), GFP_KERNEL);
++ if (!ts) {
++ dev_err(ts->dev,
++ "cp_tm1217: Private Device Struct alloc failed\n");
++ return -ENOMEM;
++ }
++
++ ts->client = client;
++ ts->dev = &client->dev;
++ i2c_set_clientdata(client, ts);
++
++ ts->thread_running = 0;
++ mutex_init(&ts->thread_mutex);
++
++ /* Reset the Controller */
++ req[0] = TMA1217_DEVICE_CMD_RESET;
++ req[1] = 0x1;
++ retval = cp_tm1217_write(ts, req, 1);
++ if (retval != 1) {
++ dev_err(ts->dev, "cp_tm1217: Controller reset failed\n");
++ kfree(ts);
++ return -EIO;
++ }
++
++ /* Clear up the interrupt status from reset. */
++ req[0] = TMA1217_INT_STATUS;
++ retval = cp_tm1217_read(ts, req, 1);
++
++ /* Mask all the interrupts */
++ retval = cp_tm1217_mask_interrupt(ts);
++
++ /* Read the controller information */
++ cp_tm1217_init_data(ts);
++
++ /* The following code will register multiple event devices when
++ multi-pointer is enabled, the code has not been tested
++ with MPX */
++ for (i = 0; i < TOUCH_SUPPORTED; i++) {
++ input_dev = input_allocate_device();
++ if (input_dev == NULL) {
++ kfree(ts);
++ dev_err(ts->dev,
++ "cp_tm1217:Input Device Struct alloc failed\n");
++ return -ENOMEM;
++ }
++ input_info = &ts->cp_input_info[i];
++ snprintf(input_info->name, sizeof(input_info->name),
++ "cp_tm1217_touchscreen_%d", i);
++ input_dev->name = input_info->name;
++ snprintf(input_info->phys, sizeof(input_info->phys),
++ "%s/input%d", dev_name(&client->dev), i);
++
++ input_dev->phys = input_info->phys;
++ input_dev->id.bustype = BUS_I2C;
++
++ input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
++ input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
++
++ input_set_abs_params(input_dev, ABS_X, 0, ts->dinfo.maxX, 0, 0);
++ input_set_abs_params(input_dev, ABS_Y, 0, ts->dinfo.maxY, 0, 0);
++
++ retval = input_register_device(input_dev);
++ if (retval) {
++ dev_err(ts->dev,
++ "Input dev registration failed for %s\n",
++ input_dev->name);
++ goto fail;
++ }
++ input_info->input = input_dev;
++ }
++
++ /* Setup the reporting mode to send an interrupt only when
++ finger arrives or departs. */
++ req[0] = TMA1217_REPORT_MODE;
++ req[1] = 0x02;
++ retval = cp_tm1217_write(ts, req, 1);
++
++ /* Setup the device to no sleep mode for now and make it configured */
++ req[0] = TMA1217_DEVICE_CTRL;
++ req[1] = 0x84;
++ retval = cp_tm1217_write(ts, req, 1);
++
++ /* Check for the status of the device */
++ req[0] = TMA1217_DEV_STATUS;
++ retval = cp_tm1217_read(ts, req, 1);
++ if (req[1] != 0) {
++ dev_err(ts->dev,
++ "cp_tm1217: Device Status 0x%x != 0: config failed\n",
++ req[1]);
++
++ retval = -EIO;
++ goto fail;
++ }
++
++ if (pdata && pdata->gpio) {
++ ts->gpio = pdata->gpio;
++ retval = cp_tm1217_setup_gpio_irq(ts);
++ } else
++ retval = client->irq;
++
++ if (retval < 0) {
++ dev_err(ts->dev, "cp_tm1217: GPIO request failed error %d\n",
++ retval);
++ goto fail;
++ }
++
++ client->irq = retval;
++
++
++ retval = request_threaded_irq(client->irq,
++ NULL, cp_tm1217_sample_thread,
++ IRQF_TRIGGER_FALLING, "cp_tm1217_touch", ts);
++ if (retval < 0) {
++ dev_err(ts->dev, "cp_tm1217: Request IRQ error %d\n", retval);
++ goto fail_gpio;
++ }
++
++ /* Unmask the interrupts */
++ retval = cp_tm1217_unmask_interrupt(ts);
++ if (retval == 0)
++ return 0;
++
++ free_irq(client->irq, ts);
++fail_gpio:
++ if (ts->gpio)
++ gpio_free(ts->gpio);
++fail:
++ /* Clean up before returning failure */
++ for (i = 0; i < TOUCH_SUPPORTED; i++) {
++ if (ts->cp_input_info[i].input) {
++ input_unregister_device(ts->cp_input_info[i].input);
++ input_free_device(ts->cp_input_info[i].input);
++ }
++ }
++ kfree(ts);
++ return retval;
++
++}
++
++/*
++ * cp_tm1217 suspend
++ *
++ */
++static int cp_tm1217_suspend(struct i2c_client *client, pm_message_t mesg)
++{
++ struct cp_tm1217_device *ts = i2c_get_clientdata(client);
++ u8 req[2];
++ int retval;
++
++ /* Put the controller to sleep */
++ req[0] = TMA1217_DEVICE_CTRL;
++ retval = cp_tm1217_read(ts, req, 1);
++ req[1] = (req[1] & 0xF8) | 0x1;
++ retval = cp_tm1217_write(ts, req, 1);
++
++ if (device_may_wakeup(&client->dev))
++ enable_irq_wake(client->irq);
++
++ return 0;
++}
++
++/*
++ * cp_tm1217_resume
++ *
++ */
++static int cp_tm1217_resume(struct i2c_client *client)
++{
++ struct cp_tm1217_device *ts = i2c_get_clientdata(client);
++ u8 req[2];
++ int retval;
++
++ /* Take the controller out of sleep */
++ req[0] = TMA1217_DEVICE_CTRL;
++ retval = cp_tm1217_read(ts, req, 1);
++ req[1] = (req[1] & 0xF8) | 0x4;
++ retval = cp_tm1217_write(ts, req, 1);
++
++ /* Restore the register settings sinc the power to the
++ could have been cut off */
++
++ /* Setup the reporting mode to send an interrupt only when
++ finger arrives or departs. */
++ req[0] = TMA1217_REPORT_MODE;
++ req[1] = 0x02;
++ retval = cp_tm1217_write(ts, req, 1);
++
++ /* Setup the device to no sleep mode for now and make it configured */
++ req[0] = TMA1217_DEVICE_CTRL;
++ req[1] = 0x84;
++ retval = cp_tm1217_write(ts, req, 1);
++
++ /* Setup the interrupt mask */
++ retval = cp_tm1217_unmask_interrupt(ts);
++
++ if (device_may_wakeup(&client->dev))
++ disable_irq_wake(client->irq);
++
++ return 0;
++}
++
++/*
++ * cp_tm1217_remove
++ *
++ */
++static int cp_tm1217_remove(struct i2c_client *client)
++{
++ struct cp_tm1217_device *ts = i2c_get_clientdata(client);
++ int i;
++
++ free_irq(client->irq, ts);
++ if (ts->gpio)
++ gpio_free(ts->gpio);
++ for (i = 0; i < TOUCH_SUPPORTED; i++)
++ input_unregister_device(ts->cp_input_info[i].input);
++ kfree(ts);
++ return 0;
++}
++
++static struct i2c_device_id cp_tm1217_idtable[] = {
++ { CPTM1217_DEVICE_NAME, 0 },
++ { }
++};
++
++MODULE_DEVICE_TABLE(i2c, cp_tm1217_idtable);
++
++static struct i2c_driver cp_tm1217_driver = {
++ .driver = {
++ .owner = THIS_MODULE,
++ .name = CPTM1217_DRIVER_NAME,
++ },
++ .id_table = cp_tm1217_idtable,
++ .probe = cp_tm1217_probe,
++ .remove = cp_tm1217_remove,
++ .suspend = cp_tm1217_suspend,
++ .resume = cp_tm1217_resume,
++};
++
++static int __init clearpad_tm1217_init(void)
++{
++ return i2c_add_driver(&cp_tm1217_driver);
++}
++
++static void __exit clearpad_tm1217_exit(void)
++{
++ i2c_del_driver(&cp_tm1217_driver);
++}
++
++module_init(clearpad_tm1217_init);
++module_exit(clearpad_tm1217_exit);
++
++MODULE_AUTHOR("Ramesh Agarwal <ramesh.agarwal@intel.com>");
++MODULE_DESCRIPTION("Synaptics TM1217 TouchScreen Driver");
++MODULE_LICENSE("GPL v2");
+--- /dev/null
++++ b/drivers/input/touchscreen/cy8ctmg110_ts.c
+@@ -0,0 +1,558 @@
++/*
++ * cy8ctmg110_ts.c Driver for cypress touch screen controller
++ * Copyright (c) 2009 Aava Mobile
++ *
++ * Some cleanups by Alan Cox <alan@linux.intel.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/input.h>
++#include <linux/slab.h>
++#include <linux/interrupt.h>
++#include <linux/io.h>
++#include <linux/i2c.h>
++#include <linux/timer.h>
++#include <linux/gpio.h>
++#include <linux/hrtimer.h>
++
++#define CY8CTMG110_DRIVER_NAME "cy8ctmg110"
++
++/* HW definitions */
++#define CY8CTMG110_RESET_PIN_GPIO 43
++#define CY8CTMG110_IRQ_PIN_GPIO 59
++#define CY8CTMG110_I2C_ADDR 0x38
++#define CY8CTMG110_TOUCH_IRQ 21
++
++/* Touch coordinates */
++#define CY8CTMG110_X_MIN 0
++#define CY8CTMG110_Y_MIN 0
++#define CY8CTMG110_X_MAX 864
++#define CY8CTMG110_Y_MAX 480
++
++
++/* cy8ctmg110 register definitions */
++#define CY8CTMG110_TOUCH_WAKEUP_TIME 0
++#define CY8CTMG110_TOUCH_SLEEP_TIME 2
++#define CY8CTMG110_TOUCH_X1 3
++#define CY8CTMG110_TOUCH_Y1 5
++#define CY8CTMG110_TOUCH_X2 7
++#define CY8CTMG110_TOUCH_Y2 9
++#define CY8CTMG110_FINGERS 11
++#define CY8CTMG110_GESTURE 12
++#define CY8CTMG110_REG_MAX 13
++
++#define CY8CTMG110_POLL_TIMER_DELAY (1000*1000*100)
++#define TOUCH_MAX_I2C_FAILS 50
++#define MAX_FINGERS 2
++
++/* Polling mode */
++static int polling;
++module_param(polling, int, 0);
++MODULE_PARM_DESC(polling, "Set to enable polling of the touchscreen");
++
++
++/*
++ * The touch position structure.
++ */
++struct ts_event {
++ int x1;
++ int y1;
++ int x2;
++ int y2;
++ int fingers;
++ int gesture;
++};
++
++/*
++ * The touch driver structure.
++ */
++struct cy8ctmg110 {
++#ifdef CONFIG_TOUCHSCREEN_CY8CTMG110_MULTIPLE_INPUT
++ struct input_dev *input[MAX_FINGERS];
++ char phys[MAX_FINGERS][32];
++#else
++ struct input_dev *input;
++ char phys[32];
++#endif
++ struct ts_event tc;
++ struct i2c_client *client;
++ spinlock_t lock;
++ bool sleepmode;
++ int polling;
++ struct hrtimer timer;
++};
++
++/*
++ * cy8ctmg110_power is the routine that is called when touch hardware
++ * will powered off or on.
++ */
++static void cy8ctmg110_power(int poweron)
++{
++ gpio_direction_output(CY8CTMG110_RESET_PIN_GPIO, 1 - poweron);
++}
++
++/*
++ * cy8ctmg110_write_req write regs to the i2c devices
++ */
++static int cy8ctmg110_write_req(struct cy8ctmg110 *tsc, unsigned char reg,
++ unsigned char len, unsigned char *value)
++{
++ struct i2c_client *client = tsc->client;
++ unsigned int ret;
++ unsigned char i2c_data[] = { 0, 0, 0, 0, 0, 0 };
++ struct i2c_msg msg[] = {
++ {client->addr, 0, len + 1, i2c_data},
++ };
++
++ i2c_data[0] = reg;
++ memcpy(i2c_data + 1, value, len);
++
++ ret = i2c_transfer(client->adapter, msg, 1);
++ if (ret != 1) {
++ dev_err(&client->dev,
++ "cy8ctmg110 touch : i2c write data cmd failed\n");
++ return ret;
++ }
++ return 0;
++}
++
++/*
++ * cy8ctmg110_read_req read regs from i2c devise
++ */
++
++static int cy8ctmg110_read_req(struct cy8ctmg110 *tsc,
++ unsigned char *i2c_data, unsigned char len, unsigned char cmd)
++{
++ struct i2c_client *client = tsc->client;
++ unsigned int ret;
++ unsigned char regs_cmd[2] = { 0, 0 };
++ struct i2c_msg msg1[] = {
++ {client->addr, 0, 1, regs_cmd},
++ };
++ struct i2c_msg msg2[] = {
++ {client->addr, I2C_M_RD, len, i2c_data},
++ };
++
++ regs_cmd[0] = cmd;
++
++ /* first write slave position to i2c devices */
++ ret = i2c_transfer(client->adapter, msg1, 1);
++ if (ret != 1)
++ return ret;
++
++ /* Second read data from position */
++ ret = i2c_transfer(client->adapter, msg2, 1);
++ if (ret != 1)
++ return ret;
++ return 0;
++}
++
++/*
++ * cy8ctmg110_touch_pos check touch position from i2c devices
++ */
++static int cy8ctmg110_touch_pos(struct cy8ctmg110 *tsc)
++{
++ unsigned char reg_p[CY8CTMG110_REG_MAX];
++
++ memset(reg_p, 0, CY8CTMG110_REG_MAX);
++
++ /* Reading coordinates */
++ if (cy8ctmg110_read_req(tsc, reg_p, CY8CTMG110_REG_MAX, 0) != 0)
++ return -EIO;
++
++ /*
++ * Position coordinates are big-endian
++ *
++ * NOTE: Protocol byte order is swapped from what the hardware
++ * is reporting, so swap them here.
++ */
++ tsc->tc.y1 = be16_to_cpup((__be16 *)&reg_p[CY8CTMG110_TOUCH_X1]);
++ tsc->tc.x1 = be16_to_cpup((__be16 *)&reg_p[CY8CTMG110_TOUCH_Y1]);
++ tsc->tc.y2 = be16_to_cpup((__be16 *)&reg_p[CY8CTMG110_TOUCH_X2]);
++ tsc->tc.x2 = be16_to_cpup((__be16 *)&reg_p[CY8CTMG110_TOUCH_Y2]);
++
++ /*
++ * TODO:
++ *
++ * Add 'gesture' as ABS_MISC data or ??? Not sure what the
++ * correct input API to use. The 'gesture' is a magic code that
++ * maps to the Cypress controller detecting a given
++ * gesture...
++ *
++ * Through experimentation, it appears to report:
++ * 0x08 two-finger swipe left [or 'up' if axis swap]
++ * 0x09 two-finger swipe down [or 'right' if axis swap]
++ * 0x0a two-finger swipe right [or 'down' if axis swap]
++ * 0x0b two-finger swipe up [or 'left' if axis swap]
++ * 0x0c pinch
++ * 0x0d unpinch
++ */
++ tsc->tc.gesture = reg_p[CY8CTMG110_GESTURE];
++
++ /* Number of contact points: 0, 1, 2 */
++ tsc->tc.fingers = reg_p[CY8CTMG110_FINGERS];
++ if (tsc->tc.fingers > MAX_FINGERS)
++ tsc->tc.fingers = 0;
++
++#ifdef CONFIG_TOUCHSCREEN_CY8CTMG110_MULTIPLE_INPUT
++ /*
++ * Set/Clear BTN_TOUCH bit based on if there is an active contact
++ * point on that input device.
++ */
++ input_event(tsc->input[0], EV_KEY, BTN_TOUCH, tsc->tc.fingers > 0);
++ input_event(tsc->input[1], EV_KEY, BTN_TOUCH, tsc->tc.fingers > 1);
++
++ /*
++ * Track the '1st' finger; this might need to be improved as it
++ * could result in a borken experience when the user presses
++ * with contact 1, presses contact 2, then releases contact 1.
++ */
++ if (tsc->tc.fingers > 0) {
++ input_report_abs(tsc->input[0], ABS_X, tsc->tc.x1);
++ input_report_abs(tsc->input[0], ABS_Y, tsc->tc.y1);
++ }
++ input_sync(tsc->input[0]);
++ if (tsc->tc.fingers > 1) {
++ input_report_abs(tsc->input[1], ABS_X, tsc->tc.x2);
++ input_report_abs(tsc->input[1], ABS_Y, tsc->tc.y2);
++ }
++ input_sync(tsc->input[1]);
++#else
++ /* Set/Clear BTN_TOUCH bit based on if any contact points */
++ input_event(tsc->input, EV_KEY, BTN_TOUCH, tsc->tc.fingers);
++
++ /*
++ * Track the '1st' finger; this might need to be improved as it
++ * could result in a borken experience when the user presses
++ * with contact 1, presses contact 2, then releases contact 1.
++ */
++ if (tsc->tc.fingers) {
++ input_report_abs(tsc->input, ABS_X, tsc->tc.x1);
++ input_report_abs(tsc->input, ABS_Y, tsc->tc.y1);
++ }
++ input_sync(tsc->input);
++#endif
++ return 0;
++}
++
++/*
++ * If the interrupt isn't in use the touch positions can be read by polling
++ */
++static enum hrtimer_restart cy8ctmg110_timer(struct hrtimer *handle)
++{
++ struct cy8ctmg110 *ts = container_of(handle, struct cy8ctmg110, timer);
++
++ cy8ctmg110_touch_pos(ts);
++ hrtimer_start(&ts->timer,
++ ktime_set(0, CY8CTMG110_POLL_TIMER_DELAY), HRTIMER_MODE_REL);
++
++ return HRTIMER_NORESTART;
++}
++
++/*
++ * cy8ctmg110_init_controller set init value to touchcontroller
++ */
++static bool cy8ctmg110_set_sleepmode(struct cy8ctmg110 *ts)
++{
++ unsigned char reg_p[3];
++
++ if (ts->sleepmode == true) {
++ reg_p[0] = 0x00;
++ reg_p[1] = 0xff;
++ reg_p[2] = 5;
++ } else {
++ reg_p[0] = 0x10;
++ reg_p[1] = 0xff;
++ reg_p[2] = 0;
++ }
++
++ if (cy8ctmg110_write_req(ts, CY8CTMG110_TOUCH_WAKEUP_TIME, 3, reg_p))
++ return false;
++ return true;
++}
++
++/*
++ * cy8ctmg110_irq_handler irq handling function
++ */
++
++static irqreturn_t cy8ctmg110_threaded_irq_handler(int irq, void *dev_id)
++{
++ struct cy8ctmg110 *tsc = (struct cy8ctmg110 *) dev_id;
++ cy8ctmg110_touch_pos(tsc);
++ return IRQ_HANDLED;
++}
++
++static int cy8ctmg110_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ struct cy8ctmg110 *ts;
++ struct input_dev *input_dev;
++#ifdef CONFIG_TOUCHSCREEN_CY8CTMG110_MULTIPLE_INPUT
++ int contact;
++#endif
++ int err;
++
++ if (!i2c_check_functionality(client->adapter,
++ I2C_FUNC_SMBUS_READ_WORD_DATA))
++ return -EIO;
++
++ ts = kzalloc(sizeof(struct cy8ctmg110), GFP_KERNEL);
++ input_dev = input_allocate_device();
++
++ if (!ts || !input_dev) {
++ err = -ENOMEM;
++ goto err_free_mem;
++ }
++
++ ts->client = client;
++ i2c_set_clientdata(client, ts);
++
++ ts->input = input_dev;
++ ts->polling = polling;
++
++ snprintf(ts->phys, sizeof(ts->phys), "%s/input0",
++ dev_name(&client->dev));
++
++ input_dev->name = CY8CTMG110_DRIVER_NAME " Touchscreen";
++ input_dev->phys = ts->phys;
++ input_dev->id.bustype = BUS_I2C;
++
++ spin_lock_init(&ts->lock);
++
++ input_set_capability(input_dev, EV_KEY, BTN_TOUCH);
++ input_set_capability(input_dev, EV_ABS, ABS_X);
++ input_set_capability(input_dev, EV_ABS, ABS_Y);
++
++ /* Initialize these with 4 fuzz for jitter... */
++ input_set_abs_params(input_dev, ABS_X, CY8CTMG110_X_MIN,
++ CY8CTMG110_X_MAX, 4, 0);
++ input_set_abs_params(input_dev, ABS_Y, CY8CTMG110_Y_MIN,
++ CY8CTMG110_Y_MAX, 4, 0);
++
++ err = gpio_request(CY8CTMG110_RESET_PIN_GPIO, NULL);
++
++ if (err) {
++ dev_err(&client->dev,
++ "cy8ctmg110_ts: Unable to request GPIO pin %d.\n",
++ CY8CTMG110_RESET_PIN_GPIO);
++ goto err_free_mem;
++ }
++ cy8ctmg110_power(1);
++ cy8ctmg110_set_sleepmode(ts);
++
++ hrtimer_init(&ts->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++ ts->timer.function = cy8ctmg110_timer;
++
++ if (ts->polling == 0) {
++ /* Can we fall back to polling if these bits fail - something
++ to look at for robustness */
++ err = gpio_request(CY8CTMG110_IRQ_PIN_GPIO, "touch_irq_key");
++ if (err < 0) {
++ dev_err(&client->dev,
++ "cy8ctmg110_ts: failed to request GPIO %d, error %d\n",
++ CY8CTMG110_IRQ_PIN_GPIO, err);
++ ts->polling = 1;
++ goto failed_irq;
++ }
++ err = gpio_direction_input(CY8CTMG110_IRQ_PIN_GPIO);
++
++ if (err < 0) {
++ dev_err(&client->dev,
++ "cy8ctmg110_ts: failed to configure input direction for GPIO %d, error %d\n",
++ CY8CTMG110_IRQ_PIN_GPIO, err);
++ gpio_free(CY8CTMG110_IRQ_PIN_GPIO);
++ ts->polling = 1;
++ goto failed_irq;
++ }
++ client->irq = gpio_to_irq(CY8CTMG110_IRQ_PIN_GPIO);
++ if (client->irq < 0) {
++ err = client->irq;
++ dev_err(&client->dev,
++ "cy8ctmg110_ts: Unable to get irq number for GPIO %d, error %d\n",
++ CY8CTMG110_IRQ_PIN_GPIO, err);
++ gpio_free(CY8CTMG110_IRQ_PIN_GPIO);
++ ts->polling = 1;
++ goto failed_irq;
++ }
++ err = request_threaded_irq(client->irq, NULL, \
++ cy8ctmg110_threaded_irq_handler, \
++ IRQF_TRIGGER_RISING | IRQF_SHARED, \
++ "cy8ctmg110_irq", ts);
++ if (err < 0) {
++ dev_err(&client->dev,
++ "cy8ctmg110 irq %d busy? error %d\n",
++ client->irq, err);
++ gpio_free(CY8CTMG110_IRQ_PIN_GPIO);
++ ts->polling = 1;
++ }
++ }
++failed_irq:
++ if (ts->polling)
++ hrtimer_start(&ts->timer, ktime_set(10, 0), HRTIMER_MODE_REL);
++
++#ifdef CONFIG_TOUCHSCREEN_CY8CTMG110_MULTIPLE_INPUT
++ for (contact = 0; contact < MAX_FINGERS; contact++) {
++ err = input_register_device(ts->input[contact]);
++ if (err) {
++ /*
++ * For the input devices that have successfully
++ * registered before this failure, call
++ * input_unregister_device() and clear the input[]
++ * value so the err_free_mem code path doesn't call
++ * input_free_device on it
++ */
++ while (contact) {
++ input_unregister_device(ts->input[contact]);
++ ts->input[contact] = NULL;
++ contact--;
++ }
++ break;
++ }
++ }
++#else
++ err = input_register_device(ts->input);
++#endif
++
++ if (!err)
++ return 0;
++
++ if (ts->polling)
++ hrtimer_cancel(&ts->timer);
++ else
++ free_irq(client->irq, ts);
++ gpio_free(CY8CTMG110_IRQ_PIN_GPIO);
++ gpio_free(CY8CTMG110_RESET_PIN_GPIO);
++err_free_mem:
++#ifdef CONFIG_TOUCHSCREEN_CY8CTMG110_MULTIPLE_INPUT
++ for (contact = 0; contact < MAX_FINGERS; contact++) {
++ if (ts->input[contact])
++ input_free_device(ts->input[contact]);
++ }
++#else
++ input_free_device(ts->input);
++#endif
++ kfree(ts);
++ return err;
++}
++
++#ifdef CONFIG_PM
++/*
++ * cy8ctmg110_suspend
++ */
++
++static int cy8ctmg110_suspend(struct i2c_client *client, pm_message_t mesg)
++{
++ struct cy8ctmg110 *ts = i2c_get_clientdata(client);
++
++ if (ts->polling)
++ hrtimer_cancel(&ts->timer);
++ if (device_may_wakeup(&client->dev))
++ enable_irq_wake(client->irq);
++ else {
++ ts->sleepmode = true;
++ cy8ctmg110_set_sleepmode(ts);
++ cy8ctmg110_power(0);
++ }
++ return 0;
++}
++
++/*
++ * cy8ctmg110_resume
++ */
++
++static int cy8ctmg110_resume(struct i2c_client *client)
++{
++ struct cy8ctmg110 *ts = i2c_get_clientdata(client);
++
++ if (device_may_wakeup(&client->dev))
++ disable_irq_wake(client->irq);
++ else {
++ cy8ctmg110_power(1);
++ ts->sleepmode = false;
++ cy8ctmg110_set_sleepmode(ts);
++ }
++ if (ts->polling)
++ hrtimer_start(&ts->timer, ktime_set(10, 0), HRTIMER_MODE_REL);
++ return 0;
++}
++#endif
++
++/*
++ * cy8ctmg110_remove
++ */
++
++static int cy8ctmg110_remove(struct i2c_client *client)
++{
++ struct cy8ctmg110 *ts = i2c_get_clientdata(client);
++#ifdef CONFIG_TOUCHSCREEN_CY8CTMG110_MULTIPLE_INPUT
++ int contact;
++#endif
++ cy8ctmg110_power(0);
++
++ if (ts->polling)
++ hrtimer_cancel(&ts->timer);
++ free_irq(client->irq, ts);
++#ifdef CONFIG_TOUCHSCREEN_CY8CTMG110_MULTIPLE_INPUT
++ for (contact = 0; contact < MAX_FINGERS; contact++)
++ input_unregister_device(ts->input[contact]);
++#else
++ input_unregister_device(ts->input);
++#endif
++ gpio_free(CY8CTMG110_IRQ_PIN_GPIO);
++ gpio_free(CY8CTMG110_RESET_PIN_GPIO);
++ kfree(ts);
++ return 0;
++}
++
++static struct i2c_device_id cy8ctmg110_idtable[] = {
++ {CY8CTMG110_DRIVER_NAME, 1},
++ {}
++};
++
++MODULE_DEVICE_TABLE(i2c, cy8ctmg110_idtable);
++
++static struct i2c_driver cy8ctmg110_driver = {
++ .driver = {
++ .owner = THIS_MODULE,
++ .name = CY8CTMG110_DRIVER_NAME,
++ .bus = &i2c_bus_type,
++ },
++ .id_table = cy8ctmg110_idtable,
++ .probe = cy8ctmg110_probe,
++ .remove = cy8ctmg110_remove,
++#ifdef CONFIG_PM
++ .suspend = cy8ctmg110_suspend,
++ .resume = cy8ctmg110_resume,
++#endif
++};
++
++static int __init cy8ctmg110_init(void)
++{
++ return i2c_add_driver(&cy8ctmg110_driver);
++}
++
++static void __exit cy8ctmg110_exit(void)
++{
++ i2c_del_driver(&cy8ctmg110_driver);
++}
++
++module_init(cy8ctmg110_init);
++module_exit(cy8ctmg110_exit);
++
++MODULE_AUTHOR("Samuli Konttila <samuli.konttila@aavamobile.com>");
++MODULE_DESCRIPTION("cy8ctmg110 TouchScreen Driver");
++MODULE_LICENSE("GPL v2");
+--- a/drivers/leds/Kconfig
++++ b/drivers/leds/Kconfig
+@@ -176,6 +176,14 @@
+ To compile this driver as a module, choose M here: the
+ module will be called leds-lp3944.
+
++config LEDS_LP5523
++ tristate "LED support for N.S. LP5523 LED driver chip"
++ depends on I2C
++ help
++ If you say yes here you get support for the National Semiconductor
++ LP5523 LED driver.
++ Module is called leds-lp5523
++
+ config LEDS_CLEVO_MAIL
+ tristate "Mail LED on Clevo notebook"
+ depends on X86 && SERIO_I8042 && DMI
+--- a/drivers/leds/Makefile
++++ b/drivers/leds/Makefile
+@@ -23,6 +23,7 @@
+ obj-$(CONFIG_LEDS_PCA9532) += leds-pca9532.o
+ obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o
+ obj-$(CONFIG_LEDS_LP3944) += leds-lp3944.o
++obj-$(CONFIG_LEDS_LP5523) += leds-lp5523.o
+ obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o
+ obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o
+ obj-$(CONFIG_LEDS_FSG) += leds-fsg.o
+--- /dev/null
++++ b/drivers/leds/leds-lp5523.c
+@@ -0,0 +1,1008 @@
++/*
++ * lp5523.c - LP5523 LED Driver
++ *
++ * Copyright (C) 2010 Nokia Corporation
++ *
++ * Author: Mathias Nyman <mathias.nyman@nokia.com>
++ * Author: Samu Onkalo <samu.p.onkalo@nokia.com>
++ * Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ */
++
++#include <linux/slab.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/i2c.h>
++#include <linux/mutex.h>
++#include <linux/gpio.h>
++#include <linux/interrupt.h>
++#include <linux/delay.h>
++#include <linux/ctype.h>
++#include <linux/spinlock.h>
++#include <linux/wait.h>
++#include <linux/leds.h>
++#include <linux/leds-lp5523.h>
++
++#define LP5523_REG_ENABLE 0x00
++#define LP5523_REG_OP_MODE 0x01
++#define LP5523_REG_RATIOMETRIC_MSB 0x02
++#define LP5523_REG_RATIOMETRIC_LSB 0x03
++#define LP5523_REG_ENABLE_LEDS_MSB 0x04
++#define LP5523_REG_ENABLE_LEDS_LSB 0x05
++#define LP5523_REG_LED_CNTRL_BASE 0x06
++#define LP5523_REG_LED_PWM_BASE 0x16
++#define LP5523_REG_LED_CURRENT_BASE 0x26
++#define LP5523_REG_CONFIG 0x36
++#define LP5523_REG_CHANNEL1_PC 0x37
++#define LP5523_REG_CHANNEL2_PC 0x38
++#define LP5523_REG_CHANNEL3_PC 0x39
++#define LP5523_REG_STATUS 0x3a
++#define LP5523_REG_GPO 0x3b
++#define LP5523_REG_VARIABLE 0x3c
++#define LP5523_REG_RESET 0x3d
++#define LP5523_REG_TEMP_CTRL 0x3e
++#define LP5523_REG_TEMP_READ 0x3f
++#define LP5523_REG_TEMP_WRITE 0x40
++#define LP5523_REG_LED_TEST_CTRL 0x41
++#define LP5523_REG_LED_TEST_ADC 0x42
++#define LP5523_REG_ENG1_VARIABLE 0x45
++#define LP5523_REG_ENG2_VARIABLE 0x46
++#define LP5523_REG_ENG3_VARIABLE 0x47
++#define LP5523_REG_MASTER_FADER1 0x48
++#define LP5523_REG_MASTER_FADER2 0x49
++#define LP5523_REG_MASTER_FADER3 0x4a
++#define LP5523_REG_CH1_PROG_START 0x4c
++#define LP5523_REG_CH2_PROG_START 0x4d
++#define LP5523_REG_CH3_PROG_START 0x4e
++#define LP5523_REG_PROG_PAGE_SEL 0x4f
++#define LP5523_REG_PROG_MEM 0x50
++
++#define LP5523_CMD_LOAD 0x15 /* 00010101 */
++#define LP5523_CMD_RUN 0x2a /* 00101010 */
++#define LP5523_CMD_DISABLED 0x00 /* 00000000 */
++
++#define LP5523_ENABLE 0x40
++#define LP5523_AUTO_INC 0x40
++#define LP5523_PWR_SAVE 0x20
++#define LP5523_PWM_PWR_SAVE 0x04
++#define LP5523_CP_1 0x08
++#define LP5523_CP_1_5 0x10
++#define LP5523_CP_AUTO 0x18
++#define LP5523_INT_CLK 0x01
++#define LP5523_AUTO_CLK 0x02
++#define LP5523_EN_LEDTEST 0x80
++#define LP5523_LEDTEST_DONE 0x80
++
++#define LP5523_DEFAULT_CURRENT 50 /* microAmps */
++#define LP5523_PROGRAM_LENGTH 32 /* in bytes */
++#define LP5523_PROGRAM_PAGES 6
++#define LP5523_ADC_SHORTCIRC_LIM 80
++
++#define LP5523_LEDS 9
++#define LP5523_ENGINES 3
++
++#define LP5523_ENG_MASK_BASE 0x30 /* 00110000 */
++
++#define LP5523_ENG_STATUS_MASK 0x07 /* 00000111 */
++
++#define LP5523_IRQ_FLAGS IRQF_TRIGGER_FALLING
++
++#define LP5523_EXT_CLK_USED 0x08
++
++#define LP5523_CURRENT_CONTROL_OFFSET (LP5523_REG_LED_CURRENT_BASE-1)
++
++#define LED_ACTIVE(mux, led) (!!(mux & (0x0001 << led)))
++#define SHIFT_MASK(id) (((id) - 1) * 2)
++
++struct lp5523_engine {
++ const struct attribute_group *attributes;
++ int id;
++ u8 mode;
++ u8 prog_page;
++ u8 mux_page;
++ u16 led_mux;
++ u8 engine_mask;
++};
++
++struct lp5523_led {
++ int id;
++ u8 chan_nr;
++ u8 led_current;
++ struct led_classdev cdev;
++};
++
++struct lp5523_chip {
++ struct mutex lock; /* protect against parallel control */
++ struct i2c_client *client;
++ struct lp5523_engine engines[LP5523_ENGINES];
++ struct lp5523_led leds[LP5523_LEDS];
++ struct lp5523_platform_data *pdata;
++ u8 num_channels;
++ u8 num_leds;
++};
++
++#define cdev_to_led(c) container_of(c, struct lp5523_led, cdev)
++
++static struct lp5523_chip *engine_to_lp5523(struct lp5523_engine *engine)
++{
++ return container_of(engine, struct lp5523_chip,
++ engines[engine->id - 1]);
++}
++
++static struct lp5523_chip *led_to_lp5523(struct lp5523_led *led)
++{
++ return container_of(led, struct lp5523_chip,
++ leds[led->id]);
++}
++
++static int lp5523_set_mode(struct lp5523_engine *engine, u8 mode);
++static int lp5523_set_engine_mode(struct lp5523_engine *engine, u8 mode);
++static int lp5523_load_program(struct lp5523_engine *engine, u8 *pattern);
++
++static int lp5523_write(struct i2c_client *client, u8 reg, u8 value)
++{
++ return i2c_smbus_write_byte_data(client, reg, value);
++}
++
++static int lp5523_read(struct i2c_client *client, u8 reg, u8 *buf)
++{
++ s32 ret = i2c_smbus_read_byte_data(client, reg);
++
++ if (ret < 0)
++ return -EIO;
++
++ *buf = ret;
++ return 0;
++}
++
++static int lp5523_detect(struct i2c_client *client)
++{
++ int ret;
++ u8 buf;
++
++ ret = lp5523_write(client, LP5523_REG_ENABLE, 0x40);
++ if (ret)
++ return ret;
++ ret = lp5523_read(client, LP5523_REG_ENABLE, &buf);
++ if (ret)
++ return ret;
++ if (buf == 0x40)
++ return 0;
++ else
++ return -ENODEV;
++}
++
++static int lp5523_configure(struct i2c_client *client)
++{
++ struct lp5523_chip *chip = i2c_get_clientdata(client);
++ int ret = 0;
++ u8 status;
++
++ /* one pattern per engine setting led mux start and stop addresses */
++ static u8 pattern[][LP5523_PROGRAM_LENGTH] = {
++ { 0x9c, 0x30, 0x9c, 0xb0, 0x9d, 0x80, 0xd8, 0x00, 0},
++ { 0x9c, 0x40, 0x9c, 0xc0, 0x9d, 0x80, 0xd8, 0x00, 0},
++ { 0x9c, 0x50, 0x9c, 0xd0, 0x9d, 0x80, 0xd8, 0x00, 0},
++ };
++
++ lp5523_write(client, LP5523_REG_RESET, 0xff);
++
++ msleep(10);
++
++ ret |= lp5523_write(client, LP5523_REG_ENABLE, LP5523_ENABLE);
++ /* Chip startup time after reset is 500 us */
++ msleep(1);
++
++ ret |= lp5523_write(client, LP5523_REG_CONFIG,
++ LP5523_AUTO_INC | LP5523_PWR_SAVE |
++ LP5523_CP_AUTO | LP5523_AUTO_CLK |
++ LP5523_PWM_PWR_SAVE);
++
++ /* turn on all leds */
++ ret |= lp5523_write(client, LP5523_REG_ENABLE_LEDS_MSB, 0x01);
++ ret |= lp5523_write(client, LP5523_REG_ENABLE_LEDS_LSB, 0xff);
++
++ /* hardcode 32 bytes of memory for each engine from program memory */
++ ret |= lp5523_write(client, LP5523_REG_CH1_PROG_START, 0x00);
++ ret |= lp5523_write(client, LP5523_REG_CH2_PROG_START, 0x10);
++ ret |= lp5523_write(client, LP5523_REG_CH3_PROG_START, 0x20);
++
++ /* write led mux address space for each channel */
++ ret |= lp5523_load_program(&chip->engines[0], pattern[0]);
++ ret |= lp5523_load_program(&chip->engines[1], pattern[1]);
++ ret |= lp5523_load_program(&chip->engines[2], pattern[2]);
++
++ if (ret) {
++ dev_err(&client->dev, "could not load mux programs\n");
++ return -1;
++ }
++
++ /* set all engines exec state and mode to run 00101010 */
++ ret |= lp5523_write(client, LP5523_REG_ENABLE,
++ (LP5523_CMD_RUN | LP5523_ENABLE));
++
++ ret |= lp5523_write(client, LP5523_REG_OP_MODE, LP5523_CMD_RUN);
++
++ if (ret) {
++ dev_err(&client->dev, "could not start mux programs\n");
++ return -1;
++ }
++
++ /* Wait 3ms and check the engine status */
++ msleep(3);
++ lp5523_read(client, LP5523_REG_STATUS, &status);
++ status &= LP5523_ENG_STATUS_MASK;
++
++ if (status == LP5523_ENG_STATUS_MASK) {
++ dev_dbg(&client->dev, "all engines configured\n");
++ } else {
++ dev_info(&client->dev, "status == %x\n", status);
++ dev_err(&client->dev, "cound not configure LED engine\n");
++ return -1;
++ }
++
++ dev_info(&client->dev, "disabling engines\n");
++
++ ret |= lp5523_write(client, LP5523_REG_OP_MODE, LP5523_CMD_DISABLED);
++
++ return ret;
++}
++
++static int lp5523_set_engine_mode(struct lp5523_engine *engine, u8 mode)
++{
++ struct lp5523_chip *chip = engine_to_lp5523(engine);
++ struct i2c_client *client = chip->client;
++ int ret;
++ u8 engine_state;
++
++ ret = lp5523_read(client, LP5523_REG_OP_MODE, &engine_state);
++ if (ret)
++ goto fail;
++
++ engine_state &= ~(engine->engine_mask);
++
++ /* set mode only for this engine */
++ mode &= engine->engine_mask;
++
++ engine_state |= mode;
++
++ ret |= lp5523_write(client, LP5523_REG_OP_MODE, engine_state);
++fail:
++ return ret;
++}
++
++static int lp5523_load_mux(struct lp5523_engine *engine, u16 mux)
++{
++ struct lp5523_chip *chip = engine_to_lp5523(engine);
++ struct i2c_client *client = chip->client;
++ int ret = 0;
++
++ ret |= lp5523_set_engine_mode(engine, LP5523_CMD_LOAD);
++
++ ret |= lp5523_write(client, LP5523_REG_PROG_PAGE_SEL, engine->mux_page);
++ ret |= lp5523_write(client, LP5523_REG_PROG_MEM,
++ (u8)(mux >> 8));
++ ret |= lp5523_write(client, LP5523_REG_PROG_MEM + 1, (u8)(mux));
++ engine->led_mux = mux;
++
++ return ret;
++}
++
++static int lp5523_load_program(struct lp5523_engine *engine, u8 *pattern)
++{
++ struct lp5523_chip *chip = engine_to_lp5523(engine);
++ struct i2c_client *client = chip->client;
++
++ int ret = 0;
++
++ ret |= lp5523_set_engine_mode(engine, LP5523_CMD_LOAD);
++
++ ret |= lp5523_write(client, LP5523_REG_PROG_PAGE_SEL,
++ engine->prog_page);
++ ret |= i2c_smbus_write_i2c_block_data(client, LP5523_REG_PROG_MEM,
++ LP5523_PROGRAM_LENGTH, pattern);
++
++ return ret;
++}
++
++static int lp5523_run_program(struct lp5523_engine *engine)
++{
++ struct lp5523_chip *chip = engine_to_lp5523(engine);
++ struct i2c_client *client = chip->client;
++ int ret;
++
++ ret = lp5523_write(client, LP5523_REG_ENABLE,
++ LP5523_CMD_RUN | LP5523_ENABLE);
++ if (ret)
++ goto fail;
++
++ ret = lp5523_set_engine_mode(engine, LP5523_CMD_RUN);
++fail:
++ return ret;
++}
++
++static int lp5523_mux_parse(const char *buf, u16 *mux, size_t len)
++{
++ int i;
++ u16 tmp_mux = 0;
++ len = len < LP5523_LEDS ? len : LP5523_LEDS;
++ for (i = 0; i < len; i++) {
++ switch (buf[i]) {
++ case '1':
++ tmp_mux |= (1 << i);
++ break;
++ case '0':
++ break;
++ case '\n':
++ i = len;
++ break;
++ default:
++ return -1;
++ }
++ }
++ *mux = tmp_mux;
++
++ return 0;
++}
++
++static void lp5523_mux_to_array(u16 led_mux, char *array)
++{
++ int i, pos = 0;
++ for (i = 0; i < LP5523_LEDS; i++)
++ pos += sprintf(array + pos, "%x", LED_ACTIVE(led_mux, i));
++
++ array[pos] = '\0';
++}
++
++/*--------------------------------------------------------------*/
++/* Sysfs interface */
++/*--------------------------------------------------------------*/
++
++static ssize_t show_engine_leds(struct device *dev,
++ struct device_attribute *attr,
++ char *buf, int nr)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++ struct lp5523_chip *chip = i2c_get_clientdata(client);
++ char mux[LP5523_LEDS + 1];
++
++ lp5523_mux_to_array(chip->engines[nr - 1].led_mux, mux);
++
++ return sprintf(buf, "%s\n", mux);
++}
++
++#define show_leds(nr) \
++static ssize_t show_engine##nr##_leds(struct device *dev, \
++ struct device_attribute *attr, \
++ char *buf) \
++{ \
++ return show_engine_leds(dev, attr, buf, nr); \
++}
++show_leds(1)
++show_leds(2)
++show_leds(3)
++
++static ssize_t store_engine_leds(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t len, int nr)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++ struct lp5523_chip *chip = i2c_get_clientdata(client);
++ u16 mux = 0;
++
++ if (lp5523_mux_parse(buf, &mux, len))
++ return -EINVAL;
++
++ if (lp5523_load_mux(&chip->engines[nr - 1], mux))
++ return -EINVAL;
++
++ return len;
++}
++
++#define store_leds(nr) \
++static ssize_t store_engine##nr##_leds(struct device *dev, \
++ struct device_attribute *attr, \
++ const char *buf, size_t len) \
++{ \
++ return store_engine_leds(dev, attr, buf, len, nr); \
++}
++store_leds(1)
++store_leds(2)
++store_leds(3)
++
++static ssize_t lp5523_selftest(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++ struct lp5523_chip *chip = i2c_get_clientdata(client);
++ int i, ret, pos = 0;
++ u8 status, adc, vdd;
++
++ mutex_lock(&chip->lock);
++
++ ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status);
++ if (ret < 0)
++ goto fail;
++
++ /* Check that ext clock is really in use if requested */
++ if ((chip->pdata) && (chip->pdata->clock_mode == LP5523_CLOCK_EXT))
++ if ((status & LP5523_EXT_CLK_USED) == 0)
++ goto fail;
++
++ /* Measure VDD (i.e. VBAT) first (channel 16 corresponds to VDD) */
++ lp5523_write(chip->client, LP5523_REG_LED_TEST_CTRL,
++ LP5523_EN_LEDTEST | 16);
++ msleep(3);
++ ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status);
++ if (!(status & LP5523_LEDTEST_DONE))
++ msleep(3);
++
++ ret |= lp5523_read(chip->client, LP5523_REG_LED_TEST_ADC, &vdd);
++ vdd--; /* There may be some fluctuation in measurement */
++
++ for (i = 0; i < LP5523_LEDS; i++) {
++ /* Skip non-existing channels */
++ if (chip->pdata->led_config[i].led_current == 0)
++ continue;
++
++ lp5523_write(chip->client, LP5523_REG_LED_PWM_BASE + i, 0xff);
++ /* let current stabilize 2ms before measurements start */
++ msleep(2);
++ lp5523_write(chip->client,
++ LP5523_REG_LED_TEST_CTRL,
++ LP5523_EN_LEDTEST | i);
++ /* ledtest takes 2.7ms */
++ msleep(3);
++ ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status);
++ if (!(status & LP5523_LEDTEST_DONE))
++ msleep(3);
++ ret |= lp5523_read(chip->client, LP5523_REG_LED_TEST_ADC, &adc);
++
++ if (adc >= vdd || adc < LP5523_ADC_SHORTCIRC_LIM)
++ pos += sprintf(buf + pos, "LED %d FAIL\n", i);
++
++ lp5523_write(chip->client, LP5523_REG_LED_PWM_BASE + i, 0x00);
++ }
++ if (pos == 0)
++ pos = sprintf(buf, "OK\n");
++ goto release_lock;
++fail:
++ pos = sprintf(buf, "FAIL\n");
++
++release_lock:
++ mutex_unlock(&chip->lock);
++
++ return pos;
++}
++
++static void lp5523_set_brightness(struct led_classdev *cdev,
++ enum led_brightness brightness)
++{
++ struct lp5523_led *led = cdev_to_led(cdev);
++ struct lp5523_chip *chip = led_to_lp5523(led);
++ struct i2c_client *client = chip->client;
++
++ mutex_lock(&chip->lock);
++
++ lp5523_write(client, LP5523_REG_LED_PWM_BASE + led->chan_nr,
++ (u8)brightness);
++
++ mutex_unlock(&chip->lock);
++}
++
++static int lp5523_do_store_load(struct lp5523_engine *engine,
++ const char *buf, size_t len)
++{
++ struct lp5523_chip *chip = engine_to_lp5523(engine);
++ struct i2c_client *client = chip->client;
++ int ret, nrchars, offset = 0, i = 0;
++ char c[3];
++ unsigned cmd;
++ u8 pattern[LP5523_PROGRAM_LENGTH] = {0};
++
++ while ((offset < len - 1) && (i < LP5523_PROGRAM_LENGTH)) {
++ /* separate sscanfs because length is working only for %s */
++ ret = sscanf(buf + offset, "%2s%n ", c, &nrchars);
++ ret = sscanf(c, "%2x", &cmd);
++ if (ret != 1)
++ goto fail;
++ pattern[i] = (u8)cmd;
++
++ offset += nrchars;
++ i++;
++ }
++
++ /* pattern commands are always two bytes long */
++ if (i % 2)
++ goto fail;
++
++ mutex_lock(&chip->lock);
++
++ ret = lp5523_load_program(engine, pattern);
++ mutex_unlock(&chip->lock);
++
++ if (ret) {
++ dev_err(&client->dev, "failed loading pattern\n");
++ return ret;
++ }
++
++ return len;
++fail:
++ dev_err(&client->dev, "wrong pattern format\n");
++ return -EINVAL;
++}
++
++static ssize_t store_engine_load(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t len, int nr)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++ struct lp5523_chip *chip = i2c_get_clientdata(client);
++ return lp5523_do_store_load(&chip->engines[nr - 1], buf, len);
++}
++
++#define store_load(nr) \
++static ssize_t store_engine##nr##_load(struct device *dev, \
++ struct device_attribute *attr, \
++ const char *buf, size_t len) \
++{ \
++ return store_engine_load(dev, attr, buf, len, nr); \
++}
++store_load(1)
++store_load(2)
++store_load(3)
++
++static ssize_t show_engine_mode(struct device *dev,
++ struct device_attribute *attr,
++ char *buf, int nr)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++ struct lp5523_chip *chip = i2c_get_clientdata(client);
++ switch (chip->engines[nr - 1].mode) {
++ case LP5523_CMD_RUN:
++ return sprintf(buf, "run\n");
++ case LP5523_CMD_LOAD:
++ return sprintf(buf, "load\n");
++ case LP5523_CMD_DISABLED:
++ return sprintf(buf, "disabled\n");
++ default:
++ return sprintf(buf, "disabled\n");
++ }
++}
++
++#define show_mode(nr) \
++static ssize_t show_engine##nr##_mode(struct device *dev, \
++ struct device_attribute *attr, \
++ char *buf) \
++{ \
++ return show_engine_mode(dev, attr, buf, nr); \
++}
++show_mode(1)
++show_mode(2)
++show_mode(3)
++
++static ssize_t store_engine_mode(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t len, int nr)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++ struct lp5523_chip *chip = i2c_get_clientdata(client);
++ struct lp5523_engine *engine = &chip->engines[nr - 1];
++ mutex_lock(&chip->lock);
++
++ if (!strncmp(buf, "run", 3))
++ lp5523_set_mode(engine, LP5523_CMD_RUN);
++ else if (!strncmp(buf, "load", 4))
++ lp5523_set_mode(engine, LP5523_CMD_LOAD);
++ else if (!strncmp(buf, "disabled", 8))
++ lp5523_set_mode(engine, LP5523_CMD_DISABLED);
++
++ mutex_unlock(&chip->lock);
++ return len;
++}
++
++#define store_mode(nr) \
++static ssize_t store_engine##nr##_mode(struct device *dev, \
++ struct device_attribute *attr, \
++ const char *buf, size_t len) \
++{ \
++ return store_engine_mode(dev, attr, buf, len, nr); \
++}
++store_mode(1)
++store_mode(2)
++store_mode(3)
++
++static ssize_t show_current(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ struct led_classdev *led_cdev = dev_get_drvdata(dev);
++ struct lp5523_led *led = cdev_to_led(led_cdev);
++
++ return sprintf(buf, "%d\n", led->led_current);
++}
++
++static ssize_t store_current(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t len)
++{
++ struct led_classdev *led_cdev = dev_get_drvdata(dev);
++ struct lp5523_led *led = cdev_to_led(led_cdev);
++ struct lp5523_chip *chip = led_to_lp5523(led);
++ ssize_t ret;
++ unsigned long curr;
++
++ if (strict_strtoul(buf, 0, &curr))
++ return -EINVAL;
++
++ if (curr > 255)
++ return -EINVAL;
++
++ mutex_lock(&chip->lock);
++
++ ret = lp5523_write(chip->client,
++ LP5523_CURRENT_CONTROL_OFFSET + led->chan_nr,
++ (u8)curr);
++ mutex_unlock(&chip->lock);
++
++ if (ret < 0)
++ return ret;
++
++ led->led_current = (u8)curr;
++
++ return len;
++}
++
++/* led class device attributes */
++static DEVICE_ATTR(led_current, S_IRUGO | S_IWUGO, show_current, store_current);
++
++/* device attributes */
++static DEVICE_ATTR(engine1_mode, S_IRUGO | S_IWUGO,
++ show_engine1_mode, store_engine1_mode);
++static DEVICE_ATTR(engine2_mode, S_IRUGO | S_IWUGO,
++ show_engine2_mode, store_engine2_mode);
++static DEVICE_ATTR(engine3_mode, S_IRUGO | S_IWUGO,
++ show_engine3_mode, store_engine3_mode);
++static DEVICE_ATTR(engine1_leds, S_IRUGO | S_IWUGO,
++ show_engine1_leds, store_engine1_leds);
++static DEVICE_ATTR(engine2_leds, S_IRUGO | S_IWUGO,
++ show_engine2_leds, store_engine2_leds);
++static DEVICE_ATTR(engine3_leds, S_IRUGO | S_IWUGO,
++ show_engine3_leds, store_engine3_leds);
++static DEVICE_ATTR(engine1_load, S_IWUGO, NULL, store_engine1_load);
++static DEVICE_ATTR(engine2_load, S_IWUGO, NULL, store_engine2_load);
++static DEVICE_ATTR(engine3_load, S_IWUGO, NULL, store_engine3_load);
++static DEVICE_ATTR(selftest, S_IRUGO, lp5523_selftest, NULL);
++
++static struct attribute *lp5523_attributes[] = {
++ &dev_attr_engine1_mode.attr,
++ &dev_attr_engine2_mode.attr,
++ &dev_attr_engine3_mode.attr,
++ &dev_attr_selftest.attr,
++ NULL
++};
++
++static struct attribute *lp5523_engine1_attributes[] = {
++ &dev_attr_engine1_load.attr,
++ &dev_attr_engine1_leds.attr,
++ NULL
++};
++
++static struct attribute *lp5523_engine2_attributes[] = {
++ &dev_attr_engine2_load.attr,
++ &dev_attr_engine2_leds.attr,
++ NULL
++};
++
++static struct attribute *lp5523_engine3_attributes[] = {
++ &dev_attr_engine3_load.attr,
++ &dev_attr_engine3_leds.attr,
++ NULL
++};
++
++static const struct attribute_group lp5523_group = {
++ .attrs = lp5523_attributes,
++};
++
++static const struct attribute_group lp5523_engine_group[] = {
++ {.attrs = lp5523_engine1_attributes },
++ {.attrs = lp5523_engine2_attributes },
++ {.attrs = lp5523_engine3_attributes },
++};
++
++static int lp5523_register_sysfs(struct i2c_client *client)
++{
++ struct device *dev = &client->dev;
++ int ret;
++
++ ret = sysfs_create_group(&dev->kobj, &lp5523_group);
++ if (ret < 0)
++ return ret;
++
++ return 0;
++}
++
++static void lp5523_unregister_sysfs(struct i2c_client *client)
++{
++ struct lp5523_chip *chip = i2c_get_clientdata(client);
++ struct device *dev = &client->dev;
++ int i;
++
++ sysfs_remove_group(&dev->kobj, &lp5523_group);
++
++ for (i = 0; i < LP5523_ENGINES; i++) {
++ if (chip->engines[i].mode == LP5523_CMD_LOAD)
++ sysfs_remove_group(&dev->kobj, &lp5523_engine_group[i]);
++ }
++
++ for (i = 0; i < chip->num_leds; i++)
++ device_remove_file(chip->leds[i].cdev.dev,
++ &dev_attr_led_current);
++}
++
++/*--------------------------------------------------------------*/
++/* Set chip operating mode */
++/*--------------------------------------------------------------*/
++static int lp5523_set_mode(struct lp5523_engine *engine, u8 mode)
++{
++ /* engine to chip */
++ struct lp5523_chip *chip = engine_to_lp5523(engine);
++ struct i2c_client *client = chip->client;
++ struct device *dev = &client->dev;
++ int ret = 0;
++
++ /* if in that mode already do nothing, except for run */
++ if (mode == engine->mode && mode != LP5523_CMD_RUN)
++ return 0;
++
++ if (mode == LP5523_CMD_RUN)
++ ret = lp5523_run_program(engine);
++
++ else if (mode == LP5523_CMD_LOAD) {
++
++ lp5523_set_engine_mode(engine, LP5523_CMD_DISABLED);
++ lp5523_set_engine_mode(engine, LP5523_CMD_LOAD);
++
++ ret = sysfs_create_group(&dev->kobj, engine->attributes);
++ if (ret)
++ return ret;
++ }
++
++ else if (mode == LP5523_CMD_DISABLED)
++ lp5523_set_engine_mode(engine, LP5523_CMD_DISABLED);
++
++ /* remove load attribute from sysfs if not in load mode */
++ if (engine->mode == LP5523_CMD_LOAD && mode != LP5523_CMD_LOAD)
++ sysfs_remove_group(&dev->kobj, engine->attributes);
++
++ engine->mode = mode;
++
++ return ret;
++}
++
++/*--------------------------------------------------------------*/
++/* Probe, Attach, Remove */
++/*--------------------------------------------------------------*/
++static int __init lp5523_init_engine(struct lp5523_engine *engine, int id)
++{
++ if (id < 1 || id > LP5523_ENGINES)
++ return -1;
++ engine->id = id;
++ engine->engine_mask = LP5523_ENG_MASK_BASE >> SHIFT_MASK(id);
++ engine->prog_page = id - 1;
++ engine->mux_page = id + 2;
++ engine->attributes = &lp5523_engine_group[id - 1];
++
++ return 0;
++}
++
++static int __init lp5523_init_led(struct lp5523_led *led, struct device *dev,
++ int chan, struct lp5523_platform_data *pdata)
++{
++ char name[32];
++ int res;
++
++ if (chan >= LP5523_LEDS)
++ return -EINVAL;
++
++ if (pdata->led_config[chan].led_current) {
++ led->led_current = pdata->led_config[chan].led_current;
++ led->chan_nr = pdata->led_config[chan].chan_nr;
++
++ snprintf(name, 32, "lp5523:channel%d", led->chan_nr);
++
++ led->cdev.name = name;
++ led->cdev.brightness_set = lp5523_set_brightness;
++ res = led_classdev_register(dev, &led->cdev);
++ if (res < 0) {
++ dev_err(dev, "couldn't register led on channed %d\n",
++ led->chan_nr);
++ return res;
++ }
++ res = device_create_file(led->cdev.dev, &dev_attr_led_current);
++ if (res < 0) {
++ dev_err(dev, "couldn't register current attribute\n");
++ led_classdev_unregister(&led->cdev);
++ return res;
++ }
++ } else {
++ led->led_current = 0;
++ }
++ return 0;
++}
++
++static struct i2c_driver lp5523_driver;
++
++static int lp5523_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ struct lp5523_chip *chip;
++ struct lp5523_platform_data *pdata;
++ int ret, i, led;
++
++ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
++ if (!chip)
++ return -ENOMEM;
++
++ i2c_set_clientdata(client, chip);
++ chip->client = client;
++
++ pdata = client->dev.platform_data;
++
++ if (!pdata) {
++ dev_err(&client->dev, "no platform data\n");
++ ret = -EINVAL;
++ goto fail1;
++ }
++
++ mutex_init(&chip->lock);
++
++ chip->pdata = pdata;
++
++ if (pdata->setup_resources) {
++ ret = pdata->setup_resources();
++ if (ret < 0)
++ goto fail1;
++ }
++
++ if (pdata->enable) {
++ pdata->enable(0);
++ msleep(1);
++ pdata->enable(1);
++ msleep(1); /* Spec says 500us */
++ }
++
++ ret = lp5523_detect(client);
++ if (ret)
++ goto fail2;
++
++ dev_info(&client->dev, "LP5523 Programmable led chip found\n");
++
++ /* Initialize engines */
++ for (i = 0; i < LP5523_ENGINES; i++) {
++ ret = lp5523_init_engine(&chip->engines[i], i + 1);
++ if (ret) {
++ dev_err(&client->dev, "error initializing engine\n");
++ goto fail2;
++ }
++ }
++ ret = lp5523_configure(client);
++ if (ret < 0) {
++ dev_err(&client->dev, "error configuring chip\n");
++ goto fail2;
++ }
++
++ /* Initialize leds */
++ chip->num_channels = pdata->num_channels;
++ chip->num_leds = 0;
++ led = 0;
++ for (i = 0; i < pdata->num_channels; i++) {
++ /* Do not initialize channels that are not connected */
++ if (pdata->led_config[i].led_current == 0)
++ continue;
++
++ chip->num_leds++;
++ ret = lp5523_init_led(&chip->leds[led], &client->dev, i, pdata);
++ if (ret) {
++ dev_err(&client->dev, "error initializing leds\n");
++ goto fail3;
++ }
++
++ chip->leds[led].id = led;
++ /* Set LED current */
++ lp5523_write(client,
++ LP5523_CURRENT_CONTROL_OFFSET + chip->leds[led].chan_nr,
++ chip->leds[led].led_current);
++ led++;
++ }
++
++ ret = lp5523_register_sysfs(client);
++ if (ret) {
++ dev_err(&client->dev, "registering sysfs failed\n");
++ goto fail3;
++ }
++ return ret;
++fail3:
++ for (i = 0; i < chip->num_leds; i++)
++ led_classdev_unregister(&chip->leds[i].cdev);
++fail2:
++ if (pdata->enable)
++ pdata->enable(0);
++ if (pdata->release_resources)
++ pdata->release_resources();
++fail1:
++ kfree(chip);
++ return ret;
++}
++
++static int lp5523_remove(struct i2c_client *client)
++{
++ struct lp5523_chip *chip = i2c_get_clientdata(client);
++ int i;
++
++ lp5523_unregister_sysfs(client);
++
++ for (i = 0; i < chip->num_leds; i++)
++ led_classdev_unregister(&chip->leds[i].cdev);
++
++ if (chip->pdata->enable)
++ chip->pdata->enable(0);
++ if (chip->pdata->release_resources)
++ chip->pdata->release_resources();
++ kfree(chip);
++ return 0;
++}
++
++static const struct i2c_device_id lp5523_id[] = {
++ { "lp5523", 0 },
++ { }
++};
++
++MODULE_DEVICE_TABLE(i2c, lp5523_id);
++
++static struct i2c_driver lp5523_driver = {
++ .driver = {
++ .name = "lp5523",
++ },
++ .probe = lp5523_probe,
++ .remove = lp5523_remove,
++ .id_table = lp5523_id,
++};
++
++static int __init lp5523_init(void)
++{
++ int ret;
++
++ ret = i2c_add_driver(&lp5523_driver);
++
++ if (ret < 0)
++ printk(KERN_ALERT "Adding lp5523 driver failed\n");
++
++ return ret;
++}
++
++static void __exit lp5523_exit(void)
++{
++ i2c_del_driver(&lp5523_driver);
++}
++
++module_init(lp5523_init);
++module_exit(lp5523_exit);
++
++MODULE_AUTHOR("Mathias Nyman, Samu Onkalo");
++MODULE_DESCRIPTION("LP5523 LED engine");
++MODULE_LICENSE("GPL v2");
+--- a/drivers/misc/Kconfig
++++ b/drivers/misc/Kconfig
+@@ -284,6 +284,16 @@
+ This option enables addition debugging code for the SGI GRU driver. If
+ you are unsure, say N.
+
++config APDS9802ALS
++ tristate "Medfield Avago APDS9802 ALS Sensor module"
++ depends on I2C
++ help
++ If you say yes here you get support for the ALS APDS9802 ambient
++ light sensor.
++
++ This driver can also be built as a module. If so, the module
++ will be called apds9802als.
++
+ config ISL29003
+ tristate "Intersil ISL29003 ambient light sensor"
+ depends on I2C && SYSFS
+@@ -294,6 +304,26 @@
+ This driver can also be built as a module. If so, the module
+ will be called isl29003.
+
++config ISL29020
++ tristate "Intersil ISL29020 ambient light sensor"
++ depends on I2C
++ help
++ If you say yes here you get support for the Intersil ISL29020
++ ambient light sensor.
++
++ This driver can also be built as a module. If so, the module
++ will be called isl29020.
++
++config ISL29015
++ tristate "Intersil ISL29015 ambient light & proximity sensor"
++ depends on I2C && SYSFS
++ help
++ If you say yes here you get support for the Intersil ISL29015
++ ambient light & proximity sensor.
++
++ This driver can also be built as a module. If so, the module
++ will be called isl29015.
++
+ config SENSORS_TSL2550
+ tristate "Taos TSL2550 ambient light sensor"
+ depends on I2C && SYSFS
+@@ -304,6 +334,13 @@
+ This driver can also be built as a module. If so, the module
+ will be called tsl2550.
+
++config HMC6352
++ tristate "Honeywell HMC6352 compass"
++ depends on I2C
++ help
++ This driver provides support for the Honeywell HMC6352 compass,
++ providing configuration and heading data via sysfs.
++
+ config EP93XX_PWM
+ tristate "EP93xx PWM support"
+ depends on ARCH_EP93XX
+@@ -353,6 +390,30 @@
+ To compile this driver as a module, choose M here: the
+ module will be called vmware_balloon.
+
++config INTEL_MID_PTI
++ tristate "Parallel Trace Interface for MIPI P1149.7 cJTAG standard"
++ help
++ The PTI (Parallel Trace Interface) driver directs
++ trace data routed from various parts in the system out
++ through an Intel Penwell PTI port and out of the mobile
++ device for analysis with a debugging tool (Lauterbach or Fido).
++
++ You should select this driver if the target kernel is meant for
++ an Intel Atom (non-netbook) mobile device containing a MIPI
++ P1149.7 standard implementation.
++
++config BH1770GLC
++ tristate "Rohm BH1770GLC ambient light and proximity sensor"
++ depends on I2C
++ default n
++ ---help---
++ Say Y here if you want to build a driver for BH1770GLC
++ combined ambient light and proximity sensor chip.
++ Driver supports also Osram SFH7770 version of the chip.
++
++ To compile this driver as a module, choose M here: the
++ module will be called bh1770glc. If unsure, say N here.
++
+ source "drivers/misc/c2port/Kconfig"
+ source "drivers/misc/eeprom/Kconfig"
+ source "drivers/misc/cb710/Kconfig"
+--- a/drivers/misc/Makefile
++++ b/drivers/misc/Makefile
+@@ -2,8 +2,11 @@
+ # Makefile for misc devices that really don't fit anywhere else.
+ #
+
++bh1770glc-objs := bh1770glc_core.o bh1770glc_als.o bh1770glc_ps.o
++
+ obj-$(CONFIG_IBM_ASM) += ibmasm/
+ obj-$(CONFIG_AD525X_DPOT) += ad525x_dpot.o
++obj-$(CONFIG_INTEL_MID_PTI) += pti.o
+ obj-$(CONFIG_AD525X_DPOT_I2C) += ad525x_dpot-i2c.o
+ obj-$(CONFIG_AD525X_DPOT_SPI) += ad525x_dpot-spi.o
+ obj-$(CONFIG_ATMEL_PWM) += atmel_pwm.o
+@@ -21,13 +24,19 @@
+ obj-$(CONFIG_SGI_GRU) += sgi-gru/
+ obj-$(CONFIG_CS5535_MFGPT) += cs5535-mfgpt.o
+ obj-$(CONFIG_HP_ILO) += hpilo.o
++obj-$(CONFIG_APDS9802ALS) += apds9802als.o
+ obj-$(CONFIG_ISL29003) += isl29003.o
++obj-$(CONFIG_ISL29020) += isl29020.o
++obj-$(CONFIG_ISL29015) += isl29015.o
+ obj-$(CONFIG_SENSORS_TSL2550) += tsl2550.o
+ obj-$(CONFIG_EP93XX_PWM) += ep93xx_pwm.o
+ obj-$(CONFIG_DS1682) += ds1682.o
+ obj-$(CONFIG_TI_DAC7512) += ti_dac7512.o
++obj-$(CONFIG_BH1770GLC) += bh1770glc.o
+ obj-$(CONFIG_C2PORT) += c2port/
+ obj-$(CONFIG_IWMC3200TOP) += iwmc3200top/
++obj-$(CONFIG_HMC6352) += hmc6352.o
+ obj-y += eeprom/
+ obj-y += cb710/
++obj-$(CONFIG_X86_MRST) += koski_hwid.o
+ obj-$(CONFIG_VMWARE_BALLOON) += vmware_balloon.o
+--- /dev/null
++++ b/drivers/misc/apds9802als.c
+@@ -0,0 +1,303 @@
++/*
++ * apds9802als.c - apds9802 ALS Driver
++ *
++ * Copyright (C) 2009 Intel Corp
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/i2c.h>
++#include <linux/err.h>
++#include <linux/delay.h>
++#include <linux/mutex.h>
++#include <linux/sysfs.h>
++#include <linux/hwmon-sysfs.h>
++
++#define ALS_MIN_RANGE_VAL 1
++#define ALS_MAX_RANGE_VAL 2
++#define POWER_STA_ENABLE 1
++#define POWER_STA_DISABLE 0
++#define APDS9802ALS_I2C_ADDR 0x29
++
++#define DRIVER_NAME "apds9802als"
++
++struct als_data {
++ struct device *hwmon_dev;
++ bool needresume;
++ struct mutex mutex;
++};
++
++static ssize_t als_sensing_range_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++ int val;
++
++ val = i2c_smbus_read_byte_data(client, 0x81);
++ if (val < 0)
++ return val;
++ if (val & 1)
++ return sprintf(buf, "4000\n");
++ else
++ return sprintf(buf, "64000\n");
++}
++
++static ssize_t als_lux_output_data_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++ struct als_data *data = i2c_get_clientdata(client);
++ unsigned int ret_val;
++ int temp;
++
++ /* Protect against parallel reads */
++ mutex_lock(&data->mutex);
++ temp = i2c_smbus_read_byte_data(client, 0x8C);/*LSB data*/
++ if (temp < 0) {
++ ret_val = temp;
++ goto failed;
++ }
++ ret_val = i2c_smbus_read_byte_data(client, 0x8D);/*MSB data*/
++ if (ret_val < 0)
++ goto failed;
++ mutex_unlock(&data->mutex);
++ ret_val = (ret_val << 8) | temp;
++ return sprintf(buf, "%d\n", ret_val);
++failed:
++ mutex_unlock(&data->mutex);
++ return ret_val;
++}
++
++static ssize_t als_sensing_range_store(struct device *dev,
++ struct device_attribute *attr, const char *buf, size_t count)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++ struct als_data *data = i2c_get_clientdata(client);
++ unsigned int ret_val;
++ unsigned long val;
++
++ if (strict_strtoul(buf, 10, &val))
++ return -EINVAL;
++ if (val != 1 && val != 2)
++ return -EINVAL;
++
++ /* Make sure nobody else reads/modifies/writes 0x81 while we
++ are active */
++
++ mutex_lock(&data->mutex);
++
++ ret_val = i2c_smbus_read_byte_data(client, 0x81);
++ if (ret_val < 0)
++ goto fail;
++
++ /* Reset the bits before setting them */
++ ret_val = ret_val & 0xFA;
++
++ if (val == 1) /* Setting the continous measurement up to 8k LUX */
++ ret_val = (ret_val | 0x05);
++ else /* Setting the continous measurement up to 64k LUX*/
++ ret_val = (ret_val | 0x04);
++
++ ret_val = i2c_smbus_write_byte_data(client, 0x81, ret_val);
++ if (ret_val >= 0) {
++ /* All OK */
++ mutex_unlock(&data->mutex);
++ return count;
++ }
++fail:
++ mutex_unlock(&data->mutex);
++ return ret_val;
++}
++
++static ssize_t als_power_status_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++ int ret_val;
++ ret_val = i2c_smbus_read_byte_data(client, 0x80);
++ if (ret_val < 0)
++ return ret_val;
++ ret_val = ret_val & 0x01;
++ return sprintf(buf, "%d\n", ret_val);
++}
++
++static int als_set_power_state(struct i2c_client *client, bool on_off)
++{
++ int ret_val;
++ struct als_data *data = i2c_get_clientdata(client);
++
++ mutex_lock(&data->mutex);
++ ret_val = i2c_smbus_read_byte_data(client, 0x80);
++ if (ret_val < 0)
++ goto fail;
++ if (on_off)
++ ret_val = ret_val | 0x01;
++ else
++ ret_val = ret_val & 0xFE;
++ ret_val = i2c_smbus_write_byte_data(client, 0x80, ret_val);
++fail:
++ mutex_unlock(&data->mutex);
++ return ret_val;
++}
++
++static ssize_t als_power_status_store(struct device *dev,
++ struct device_attribute *attr, const char *buf, size_t count)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++ struct als_data *data = i2c_get_clientdata(client);
++ unsigned long val;
++ int ret_val = 0;
++
++ if (strict_strtoul(buf, 10, &val))
++ return -EINVAL;
++ if (val == POWER_STA_ENABLE) {
++ ret_val = als_set_power_state(client, true);
++ data->needresume = true;
++ } else if (val == POWER_STA_DISABLE) {
++ ret_val = als_set_power_state(client, false);
++ data->needresume = false;
++ } else
++ return -EINVAL;
++ if (ret_val < 0)
++ return ret_val;
++ return count;
++}
++
++static DEVICE_ATTR(sensing_range, S_IRUGO | S_IWUSR,
++ als_sensing_range_show, als_sensing_range_store);
++static DEVICE_ATTR(lux_output, S_IRUGO, als_lux_output_data_show, NULL);
++static DEVICE_ATTR(power_state, S_IRUGO | S_IWUSR,
++ als_power_status_show, als_power_status_store);
++
++static struct attribute *mid_att_als[] = {
++ &dev_attr_sensing_range.attr,
++ &dev_attr_lux_output.attr,
++ &dev_attr_power_state.attr,
++ NULL
++};
++
++static struct attribute_group m_als_gr = {
++ .name = "apds9802als",
++ .attrs = mid_att_als
++};
++
++static int als_set_default_config(struct i2c_client *client)
++{
++ int ret_val;
++ /* Write the command and then switch on */
++ ret_val = i2c_smbus_write_byte_data(client, 0x80, 0x01);
++ if (ret_val < 0) {
++ dev_err(&client->dev, "failed default switch on write\n");
++ return ret_val;
++ }
++ /* Continous from 1Lux to 64k Lux */
++ ret_val = i2c_smbus_write_byte_data(client, 0x81, 0x04);
++ if (ret_val < 0)
++ dev_err(&client->dev, "failed default LUX on write\n");
++ return ret_val;
++}
++
++static int apds9802als_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ int res;
++ struct als_data *data;
++
++ data = kzalloc(sizeof(struct als_data), GFP_KERNEL);
++ if (data == NULL) {
++ dev_err(&client->dev, "Memory allocation failed");
++ return -ENOMEM;
++ }
++ i2c_set_clientdata(client, data);
++ res = sysfs_create_group(&client->dev.kobj, &m_als_gr);
++ if (res) {
++ dev_err(&client->dev, "device create file failed\n");
++ goto als_error1;
++ }
++ dev_info(&client->dev,
++ "%s apds9802als: ALS chip found\n", client->name);
++ als_set_default_config(client);
++ data->needresume = true;
++ mutex_init(&data->mutex);
++ return res;
++als_error1:
++ i2c_set_clientdata(client, NULL);
++ kfree(data);
++ return res;
++}
++
++static int apds9802als_remove(struct i2c_client *client)
++{
++ struct als_data *data = i2c_get_clientdata(client);
++ sysfs_remove_group(&client->dev.kobj, &m_als_gr);
++ kfree(data);
++ return 0;
++}
++
++static int apds9802als_suspend(struct i2c_client *client, pm_message_t mesg)
++{
++ als_set_power_state(client, false);
++ return 0;
++}
++
++static int apds9802als_resume(struct i2c_client *client)
++{
++ struct als_data *data = i2c_get_clientdata(client);
++
++ if (data->needresume == true)
++ als_set_power_state(client, true);
++ return 0;
++}
++
++static struct i2c_device_id apds9802als_id[] = {
++ { DRIVER_NAME, 0 },
++ { }
++};
++
++MODULE_DEVICE_TABLE(i2c, apds9802als_id);
++
++static struct i2c_driver apds9802als_driver = {
++ .driver = {
++ .name = DRIVER_NAME,
++ .owner = THIS_MODULE,
++ },
++ .probe = apds9802als_probe,
++ .remove = apds9802als_remove,
++ .suspend = apds9802als_suspend,
++ .resume = apds9802als_resume,
++ .id_table = apds9802als_id,
++};
++
++static int __init sensor_apds9802als_init(void)
++{
++ return i2c_add_driver(&apds9802als_driver);
++}
++
++static void __exit sensor_apds9802als_exit(void)
++{
++ i2c_del_driver(&apds9802als_driver);
++}
++module_init(sensor_apds9802als_init);
++module_exit(sensor_apds9802als_exit);
++
++MODULE_AUTHOR("Anantha Narayanan <Anantha.Narayanan@intel.com");
++MODULE_DESCRIPTION("Avago apds9802als ALS Driver");
++MODULE_LICENSE("GPL v2");
+--- /dev/null
++++ b/drivers/misc/bh1770glc.h
+@@ -0,0 +1,169 @@
++/*
++ * This file is part of the ROHM BH1770GLC / OSRAM SFH7770 sensor driver.
++ * Chip is combined proximity and ambient light sensor.
++ *
++ * Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
++ *
++ * Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ *
++ */
++
++#ifndef __BH1770GLC_HEADER__
++#define __BH1770GLC_HEADER__
++
++#define BHCHIP_ALS_CONTROL 0x80 /* ALS operation mode control */
++#define BHCHIP_PS_CONTROL 0x81 /* PS operation mode control */
++#define BHCHIP_I_LED 0x82 /* active LED and LED1, LED2 current */
++#define BHCHIP_I_LED3 0x83 /* LED3 current setting */
++#define BHCHIP_ALS_PS_MEAS 0x84 /* Forced mode trigger */
++#define BHCHIP_PS_MEAS_RATE 0x85 /* PS meas. rate at stand alone mode */
++#define BHCHIP_ALS_MEAS_RATE 0x86 /* ALS meas. rate at stand alone mode */
++#define BHCHIP_PART_ID 0x8a /* Part number and revision ID */
++#define BHCHIP_MANUFACT_ID 0x8b /* Manufacturerer ID */
++#define BHCHIP_ALS_DATA_0 0x8c /* ALS DATA low byte */
++#define BHCHIP_ALS_DATA_1 0x8d /* ALS DATA high byte */
++#define BHCHIP_ALS_PS_STATUS 0x8e /* Measurement data and int status */
++#define BHCHIP_PS_DATA_LED1 0x8f /* PS data from LED1 */
++#define BHCHIP_PS_DATA_LED2 0x90 /* PS data from LED2 */
++#define BHCHIP_PS_DATA_LED3 0x91 /* PS data from LED3 */
++#define BHCHIP_INTERRUPT 0x92 /* Interrupt setting */
++#define BHCHIP_PS_TH_LED1 0x93 /* PS interrupt threshold for LED1 */
++#define BHCHIP_PS_TH_LED2 0x94 /* PS interrupt threshold for LED2 */
++#define BHCHIP_PS_TH_LED3 0x95 /* PS interrupt threshold for LED3 */
++#define BHCHIP_ALS_TH_UP_0 0x96 /* ALS upper threshold low byte */
++#define BHCHIP_ALS_TH_UP_1 0x97 /* ALS upper threshold high byte */
++#define BHCHIP_ALS_TH_LOW_0 0x98 /* ALS lower threshold low byte */
++#define BHCHIP_ALS_TH_LOW_1 0x99 /* ALS lower threshold high byte */
++
++/* MANUFACT_ID */
++#define BHCHIP_MANUFACT_ROHM 0x01
++#define BHCHIP_MANUFACT_OSRAM 0x03
++
++/* PART_ID */
++#define BHCHIP_PART 0x90
++#define BHCHIP_PART_MASK 0xf0
++#define BHCHIP_REV_MASK 0x0f
++#define BHCHIP_REV_SHIFT 0
++#define BHCHIP_REV_0 0x00
++
++/* Operating modes for both */
++#define BHCHIP_STANDBY 0x00
++#define BHCHIP_FORCED 0x02
++#define BHCHIP_STANDALONE 0x03
++
++#define BHCHIP_PS_TRIG_MEAS (1 << 0)
++#define BHCHIP_ALS_TRIG_MEAS (1 << 1)
++
++/* Interrupt control */
++#define BHCHIP_INT_OUTPUT_MODE (1 << 3) /* 0 = latched */
++#define BHCHIP_INT_POLARITY (1 << 2) /* 1 = active high */
++#define BHCHIP_INT_ALS_ENA (1 << 1)
++#define BHCHIP_INT_PS_ENA (1 << 0)
++
++/* Interrupt status */
++#define BHCHIP_INT_LED1_DATA (1 << 0)
++#define BHCHIP_INT_LED1_INT (1 << 1)
++#define BHCHIP_INT_LED2_DATA (1 << 2)
++#define BHCHIP_INT_LED2_INT (1 << 3)
++#define BHCHIP_INT_LED3_DATA (1 << 4)
++#define BHCHIP_INT_LED3_INT (1 << 5)
++#define BHCHIP_INT_LEDS_INT ((1 << 1) | (1 << 3) | (1 << 5))
++#define BHCHIP_INT_ALS_DATA (1 << 6)
++#define BHCHIP_INT_ALS_INT (1 << 7)
++
++#define BHCHIP_DISABLE 0
++#define BHCHIP_ENABLE 1
++
++ /* Following are milliseconds */
++#define BHCHIP_ALS_DEFAULT_RATE 200
++#define BHCHIP_PS_DEFAULT_RATE 40
++#define BHCHIP_PS_DEF_RATE_THRESH 200
++#define BHCHIP_PS_INIT_DELAY 15
++#define BHCHIP_STARTUP_DELAY 50
++
++#define BHCHIP_ALS_RANGE 65535
++#define BHCHIP_PS_RANGE 255
++#define BHCHIP_CALIB_SCALER 1000
++#define BHCHIP_ALS_NEUTRAL_CALIB_VALUE (1 * BHCHIP_CALIB_SCALER)
++#define BHCHIP_PS_NEUTRAL_CALIB_VALUE (1 * BHCHIP_CALIB_SCALER)
++#define BHCHIP_ALS_DEF_SENS 10
++#define BHCHIP_ALS_DEF_THRES 1000
++#define BHCHIP_PS_DEF_THRES 20
++
++#define ALS_NBR_FORMAT 512
++/* coef as decimal = ALS_COEF / *(ALS_NBR_FORMAT ^ 2) */
++#define ALS_COEF 1536
++/* Scaler coefficient at zero level */
++#define ALS_ZERO_LEVEL (ALS_NBR_FORMAT / 4)
++
++#define PS_ABOVE_THRESHOLD 1
++#define PS_BELOW_THRESHOLD 0
++
++#define BHCHIP_PS_CHANNELS 3
++
++struct bh1770glc_chip {
++ struct i2c_client *client;
++ struct bh1770glc_platform_data *pdata;
++ struct mutex mutex; /* avoid parallel access */
++ struct regulator_bulk_data regs[2];
++
++ bool int_mode_ps;
++ bool int_mode_als;
++
++ wait_queue_head_t als_misc_wait; /* WQ for ALS part */
++ wait_queue_head_t ps_misc_wait; /* WQ for PS part */
++ struct delayed_work ps_work; /* For ps low threshold */
++
++ char chipname[10];
++ u8 revision;
++
++ u32 als_calib;
++ int als_rate;
++ int als_mode;
++ int als_users;
++ u16 als_data;
++ u16 als_threshold_hi;
++ u16 als_threshold_lo;
++ u16 als_sens;
++ loff_t als_offset;
++
++ loff_t ps_offset;
++ u32 ps_calib[BHCHIP_PS_CHANNELS];
++ int ps_rate;
++ int ps_rate_threshold;
++ int ps_mode;
++ int ps_users;
++ u8 ps_data[BHCHIP_PS_CHANNELS];
++ u8 ps_thresholds[BHCHIP_PS_CHANNELS];
++ u8 ps_leds[BHCHIP_PS_CHANNELS];
++ u8 ps_channels; /* nbr of leds */
++};
++
++extern struct bh1770glc_chip *bh1770glc;
++
++extern int bh1770glc_ps_mode(struct bh1770glc_chip *chip, int mode);
++extern int bh1770glc_ps_init(struct bh1770glc_chip *chip);
++extern int bh1770glc_ps_destroy(struct bh1770glc_chip *chip);
++extern void bh1770glc_ps_interrupt_handler(struct bh1770glc_chip *chip,
++ int status);
++
++extern int bh1770glc_als_mode(struct bh1770glc_chip *chip, int mode);
++extern int bh1770glc_als_init(struct bh1770glc_chip *chip);
++extern int bh1770glc_als_destroy(struct bh1770glc_chip *chip);
++extern void bh1770glc_als_interrupt_handler(struct bh1770glc_chip *chip,
++ int status);
++#endif
+--- /dev/null
++++ b/drivers/misc/bh1770glc_als.c
+@@ -0,0 +1,424 @@
++/*
++ * This file is part of the ROHM BH1770GLC / OSRAM SFH7770 sensor driver.
++ * Chip is combined proximity and ambient light sensor.
++ *
++ * Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
++ *
++ * Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ *
++ */
++
++#include <linux/kernel.h>
++#include <linux/i2c.h>
++#include <linux/mutex.h>
++#include <linux/miscdevice.h>
++#include <linux/poll.h>
++#include <linux/bh1770glc.h>
++#include <linux/uaccess.h>
++#include <linux/delay.h>
++#include <linux/regulator/consumer.h>
++#include "bh1770glc.h"
++
++/* Supported stand alone rates in ms from chip data sheet */
++static s16 als_rates[] = {100, 200, 500, 1000, 2000};
++
++/* chip->mutex must be locked during this function */
++static int bh1770glc_als_interrupt_control(struct bh1770glc_chip *chip,
++ int als)
++{
++ chip->int_mode_als = als;
++
++ /* Set ALS interrupt mode, interrupt active low, latched */
++ return i2c_smbus_write_byte_data(chip->client,
++ BHCHIP_INTERRUPT,
++ (als << 1) | (chip->int_mode_ps << 0));
++}
++
++int bh1770glc_als_mode(struct bh1770glc_chip *chip, int mode)
++{
++ int r;
++
++ r = i2c_smbus_write_byte_data(chip->client, BHCHIP_ALS_CONTROL, mode);
++
++ if (r == 0)
++ chip->als_mode = mode;
++
++ return r;
++}
++
++static int bh1770glc_als_rate(struct bh1770glc_chip *chip, int rate)
++{
++ int ret = -EINVAL;
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(als_rates); i++)
++ if (als_rates[i] == rate) {
++ ret = i2c_smbus_write_byte_data(chip->client,
++ BHCHIP_ALS_MEAS_RATE,
++ i);
++ if (ret == 0)
++ chip->als_rate = rate;
++ break;
++ }
++ return ret;
++}
++
++static int bh1770glc_als_set_threshold(struct bh1770glc_chip *chip,
++ int threshold_hi,
++ int threshold_lo)
++{
++ u8 data[4];
++ int ret;
++
++ chip->als_threshold_hi = threshold_hi;
++ chip->als_threshold_lo = threshold_lo;
++
++ data[0] = threshold_hi;
++ data[1] = threshold_hi >> 8;
++ data[2] = threshold_lo;
++ data[3] = threshold_lo >> 8;
++
++ ret = i2c_smbus_write_i2c_block_data(chip->client,
++ BHCHIP_ALS_TH_UP_0,
++ ARRAY_SIZE(data),
++ data);
++ return ret;
++}
++
++static int bh1770glc_als_calc_thresholds(struct bh1770glc_chip *chip, u16 data)
++{
++ int scaler;
++ int hi_thres;
++ int lo_thres;
++ int sens;
++ int ret;
++
++ /*
++ * Recalculate new threshold limits to simulate delta measurement
++ * mode. New limits are relative to latest measurement data.
++ */
++ scaler = ((int)data * ALS_COEF) / ALS_NBR_FORMAT + ALS_ZERO_LEVEL;
++ sens = chip->als_sens * scaler / ALS_NBR_FORMAT;
++
++ hi_thres = min(data + sens, BHCHIP_ALS_RANGE);
++ lo_thres = max(data - sens, 0);
++
++ mutex_lock(&chip->mutex);
++ ret = bh1770glc_als_set_threshold(chip,
++ hi_thres,
++ lo_thres);
++ mutex_unlock(&chip->mutex);
++
++ return ret;
++}
++
++static int bh1770glc_als_read_result(struct bh1770glc_chip *chip)
++{
++ u16 data;
++ int ret;
++
++ ret = i2c_smbus_read_byte_data(chip->client, BHCHIP_ALS_DATA_0);
++ if (ret < 0)
++ goto exit;
++
++ data = ret & 0xff;
++ ret = i2c_smbus_read_byte_data(chip->client, BHCHIP_ALS_DATA_1);
++ if (ret < 0)
++ goto exit;
++
++ data = data | ((ret & 0xff) << 8);
++ chip->als_data = data;
++ chip->als_offset += sizeof(struct bh1770glc_als);
++
++ ret = bh1770glc_als_calc_thresholds(chip, data);
++exit:
++ return ret;
++}
++
++void bh1770glc_als_interrupt_handler(struct bh1770glc_chip *chip, int status)
++{
++ if (chip->int_mode_als)
++ if (status & BHCHIP_INT_ALS_INT) {
++ bh1770glc_als_read_result(chip);
++ wake_up_interruptible(&bh1770glc->als_misc_wait);
++ }
++}
++
++static ssize_t bh1770glc_als_read(struct file *file, char __user *buf,
++ size_t count, loff_t *offset)
++{
++ struct bh1770glc_als als;
++ struct bh1770glc_chip *chip = bh1770glc;
++ u32 lux;
++
++ if (count < sizeof(als))
++ return -EINVAL;
++
++ if (*offset >= chip->als_offset) {
++ if (file->f_flags & O_NONBLOCK)
++ return -EAGAIN;
++ if (wait_event_interruptible(bh1770glc->als_misc_wait,
++ (*offset < chip->als_offset)))
++ return -ERESTARTSYS;
++ }
++ lux = ((u32)chip->als_data * chip->als_calib) /
++ BHCHIP_CALIB_SCALER;
++ lux = min(lux, (u32)BHCHIP_ALS_RANGE);
++
++ als.lux = lux;
++
++ *offset = chip->als_offset;
++
++ return copy_to_user(buf, &als, sizeof(als)) ? -EFAULT : sizeof(als);
++}
++
++static int bh1770glc_als_open(struct inode *inode, struct file *file)
++{
++
++ struct bh1770glc_chip *chip = bh1770glc;
++ int ret = 0;
++
++ mutex_lock(&chip->mutex);
++ if (!chip->als_users) {
++ ret = regulator_bulk_enable(ARRAY_SIZE(chip->regs),
++ chip->regs);
++ if (ret < 0)
++ goto release_lock;
++
++ if (!chip->ps_users)
++ msleep(BHCHIP_STARTUP_DELAY);
++
++ ret = bh1770glc_als_mode(chip, BHCHIP_STANDALONE);
++ if (ret < 0)
++ goto exit;
++
++ ret = bh1770glc_als_rate(chip, chip->als_rate);
++ if (ret < 0)
++ goto exit;
++
++ ret = bh1770glc_als_interrupt_control(chip, BHCHIP_ENABLE);
++ if (ret < 0)
++ goto exit;
++ }
++ /* Trig measurement and refresh the measurement result */
++ bh1770glc_als_set_threshold(chip, BHCHIP_ALS_DEF_THRES,
++ BHCHIP_ALS_DEF_THRES);
++
++ if (ret == 0)
++ chip->als_users++;
++exit:
++ if (ret < 0)
++ regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs);
++release_lock:
++ file->f_pos = chip->als_offset;
++ /* In case of two or more user, provide newest results available */
++ if (chip->als_users > 1 &&
++ file->f_pos >= sizeof(struct bh1770glc_als))
++ file->f_pos -= sizeof(struct bh1770glc_als);
++
++ mutex_unlock(&chip->mutex);
++ return ret;
++}
++
++static unsigned int bh1770glc_als_poll(struct file *file, poll_table *wait)
++{
++ poll_wait(file, &bh1770glc->als_misc_wait, wait);
++ if (file->f_pos < bh1770glc->als_offset)
++ return POLLIN | POLLRDNORM;
++ return 0;
++}
++
++static int bh1770glc_als_close(struct inode *inode, struct file *file)
++{
++ struct bh1770glc_chip *chip = bh1770glc;
++ mutex_lock(&chip->mutex);
++ if (!--chip->als_users) {
++ bh1770glc_als_interrupt_control(chip, BHCHIP_DISABLE);
++ bh1770glc_als_mode(chip, BHCHIP_STANDBY);
++ regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs);
++ }
++ mutex_unlock(&chip->mutex);
++ return 0;
++}
++
++
++/* SYSFS interface */
++static ssize_t bh1770glc_als_calib_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct bh1770glc_chip *chip = dev_get_drvdata(dev);
++
++ return snprintf(buf, PAGE_SIZE, "%u\n", chip->als_calib);
++}
++
++static ssize_t bh1770glc_als_calib_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t len)
++{
++ struct bh1770glc_chip *chip = dev_get_drvdata(dev);
++ unsigned long value;
++
++ if (strict_strtoul(buf, 0, &value))
++ return -EINVAL;
++
++ chip->als_calib = value;
++
++ return len;
++}
++
++static ssize_t bh1770glc_get_als_mode(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct bh1770glc_chip *chip = dev_get_drvdata(dev);
++ return sprintf(buf, "%d\n", chip->als_mode);
++}
++
++static ssize_t bh1770glc_get_als_rate(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct bh1770glc_chip *chip = dev_get_drvdata(dev);
++ return sprintf(buf, "%d\n", chip->als_rate);
++}
++
++static ssize_t bh1770glc_set_als_rate(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct bh1770glc_chip *chip = dev_get_drvdata(dev);
++ unsigned long rate;
++ int ret;
++
++ if (strict_strtoul(buf, 0, &rate))
++ return -EINVAL;
++
++ mutex_lock(&chip->mutex);
++ ret = bh1770glc_als_rate(chip, rate);
++ mutex_unlock(&chip->mutex);
++
++ if (ret < 0)
++ return ret;
++
++ return count;
++}
++
++static ssize_t bh1770glc_get_als_sens(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct bh1770glc_chip *chip = dev_get_drvdata(dev);
++ return sprintf(buf, "%d\n", chip->als_sens);
++}
++
++static ssize_t bh1770glc_set_als_sens(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct bh1770glc_chip *chip = dev_get_drvdata(dev);
++ int ret;
++ unsigned long sens;
++
++ if (strict_strtoul(buf, 0, &sens))
++ return -EINVAL;
++
++ chip->als_sens = sens;
++
++ /* Trick measurement by setting default thresholds */
++ mutex_lock(&chip->mutex);
++ ret = bh1770glc_als_set_threshold(chip,
++ BHCHIP_ALS_DEF_THRES,
++ BHCHIP_ALS_DEF_THRES);
++
++ mutex_unlock(&chip->mutex);
++ if (ret < 0)
++ return ret;
++ return count;
++}
++
++static DEVICE_ATTR(als_calib, S_IRUGO | S_IWUSR, bh1770glc_als_calib_show,
++ bh1770glc_als_calib_store);
++static DEVICE_ATTR(als_mode, S_IRUGO , bh1770glc_get_als_mode, NULL);
++static DEVICE_ATTR(als_rate, S_IRUGO | S_IWUSR, bh1770glc_get_als_rate,
++ bh1770glc_set_als_rate);
++static DEVICE_ATTR(als_sens, S_IRUGO | S_IWUSR, bh1770glc_get_als_sens,
++ bh1770glc_set_als_sens);
++
++static struct attribute *sysfs_attrs[] = {
++ &dev_attr_als_calib.attr,
++ &dev_attr_als_mode.attr,
++ &dev_attr_als_rate.attr,
++ &dev_attr_als_sens.attr,
++ NULL
++};
++
++static struct attribute_group bh1770glc_attribute_group = {
++ .attrs = sysfs_attrs
++};
++
++static const struct file_operations bh1770glc_als_fops = {
++ .owner = THIS_MODULE,
++ .llseek = no_llseek,
++ .read = bh1770glc_als_read,
++ .poll = bh1770glc_als_poll,
++ .open = bh1770glc_als_open,
++ .release = bh1770glc_als_close,
++};
++
++static struct miscdevice bh1770glc_als_miscdevice = {
++ .minor = MISC_DYNAMIC_MINOR,
++ .name = "bh1770glc_als",
++ .fops = &bh1770glc_als_fops
++};
++
++int bh1770glc_als_init(struct bh1770glc_chip *chip)
++{
++ int err;
++ err = bh1770glc_als_mode(chip, BHCHIP_STANDBY);
++ if (err < 0)
++ goto fail;
++
++ err = bh1770glc_als_interrupt_control(chip, BHCHIP_DISABLE);
++ if (err < 0)
++ goto fail;
++
++ chip->als_rate = BHCHIP_ALS_DEFAULT_RATE;
++
++ bh1770glc_als_miscdevice.parent = &chip->client->dev;
++ err = misc_register(&bh1770glc_als_miscdevice);
++ if (err < 0) {
++ dev_err(&chip->client->dev, "Device registration failed\n");
++ goto fail;
++ }
++
++ err = sysfs_create_group(&chip->client->dev.kobj,
++ &bh1770glc_attribute_group);
++ if (err < 0) {
++ dev_err(&chip->client->dev, "Sysfs registration failed\n");
++ goto fail2;
++ }
++ return 0;
++fail2:
++ misc_deregister(&bh1770glc_als_miscdevice);
++fail:
++ return err;
++}
++
++int bh1770glc_als_destroy(struct bh1770glc_chip *chip)
++{
++ sysfs_remove_group(&chip->client->dev.kobj,
++ &bh1770glc_attribute_group);
++ misc_deregister(&bh1770glc_als_miscdevice);
++ return 0;
++}
+--- /dev/null
++++ b/drivers/misc/bh1770glc_core.c
+@@ -0,0 +1,301 @@
++/*
++ * This file is part of the ROHM BH1770GLC / OSRAM SFH7770 sensor driver.
++ * Chip is combined proximity and ambient light sensor.
++ *
++ * Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
++ *
++ * Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ *
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/i2c.h>
++#include <linux/interrupt.h>
++#include <linux/mutex.h>
++#include <linux/bh1770glc.h>
++#include <linux/regulator/consumer.h>
++#include <linux/slab.h>
++#include "bh1770glc.h"
++
++struct bh1770glc_chip *bh1770glc;
++
++static const char reg_vcc[] = "Vcc";
++static const char reg_vleds[] = "Vleds";
++
++static int bh1770glc_detect(struct bh1770glc_chip *chip)
++{
++ struct i2c_client *client = chip->client;
++ s32 ret;
++ u8 manu;
++ u8 part;
++
++ ret = i2c_smbus_read_byte_data(client, BHCHIP_MANUFACT_ID);
++ if (ret < 0)
++ goto error;
++
++ manu = (u8)ret;
++
++ ret = i2c_smbus_read_byte_data(client, BHCHIP_PART_ID);
++ if (ret < 0)
++ goto error;
++ part = (u8)ret;
++ chip->revision = (part & BHCHIP_REV_MASK) >> BHCHIP_REV_SHIFT;
++
++ if ((manu == BHCHIP_MANUFACT_ROHM) &&
++ ((part & BHCHIP_PART_MASK) == BHCHIP_PART)) {
++ snprintf(chip->chipname, sizeof(chip->chipname), "BH1770GLC");
++ return 0;
++ }
++
++ if ((manu == BHCHIP_MANUFACT_OSRAM) &&
++ ((part & BHCHIP_PART_MASK) == BHCHIP_PART)) {
++ snprintf(chip->chipname, sizeof(chip->chipname), "SFH7770");
++ return 0;
++ }
++
++ ret = -ENODEV;
++error:
++ dev_dbg(&client->dev, "BH1770GLC or SFH7770 not found\n");
++
++ return ret;
++}
++
++/* This is threaded irq handler */
++static irqreturn_t bh1770glc_irq(int irq, void *data)
++{
++ struct bh1770glc_chip *chip = data;
++ int status;
++
++ status = i2c_smbus_read_byte_data(chip->client, BHCHIP_ALS_PS_STATUS);
++
++ /* Acknowledge interrupt by reading this register */
++ i2c_smbus_read_byte_data(chip->client, BHCHIP_INTERRUPT);
++
++ bh1770glc_als_interrupt_handler(chip, status);
++ bh1770glc_ps_interrupt_handler(chip, status);
++
++ return IRQ_HANDLED;
++}
++
++static int __devinit bh1770glc_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ struct bh1770glc_chip *chip;
++ int err;
++ int i;
++
++ chip = kzalloc(sizeof *chip, GFP_KERNEL);
++ if (!chip)
++ return -ENOMEM;
++
++ bh1770glc = chip;
++
++ init_waitqueue_head(&chip->ps_misc_wait);
++ init_waitqueue_head(&chip->als_misc_wait);
++
++ i2c_set_clientdata(client, chip);
++ chip->client = client;
++
++ mutex_init(&chip->mutex);
++ chip->pdata = client->dev.platform_data;
++ chip->als_calib = BHCHIP_ALS_NEUTRAL_CALIB_VALUE;
++ chip->als_sens = BHCHIP_ALS_DEF_SENS;
++ for (i = 0; i < BHCHIP_PS_CHANNELS; i++)
++ chip->ps_calib[i] = BHCHIP_PS_NEUTRAL_CALIB_VALUE;
++
++ /*
++ * Platform data contains led configuration and safety limits.
++ * Too strong current can damage HW permanently.
++ * Platform data filled with zeros causes minimum current.
++ */
++ if (chip->pdata == NULL) {
++ dev_err(&client->dev, "platform data is mandatory\n");
++ err = -EINVAL;
++ goto fail1;
++ }
++
++ if (chip->pdata->setup_resources) {
++ err = chip->pdata->setup_resources();
++ if (err) {
++ err = -EINVAL;
++ goto fail1;
++ }
++ }
++
++ switch (chip->pdata->leds) {
++ case BH1770GLC_LED1:
++ chip->ps_channels = 1;
++ break;
++ case BH1770GLC_LED12:
++ case BH1770GLC_LED13:
++ chip->ps_channels = 2;
++ break;
++ case BH1770GLC_LED123:
++ chip->ps_channels = 3;
++ break;
++ default:
++ err = -EINVAL;
++ goto fail1;
++ }
++
++ chip->regs[0].supply = reg_vcc;
++ chip->regs[1].supply = reg_vleds;
++
++ err = regulator_bulk_get(&client->dev,
++ ARRAY_SIZE(chip->regs), chip->regs);
++ if (err < 0) {
++ dev_err(&client->dev, "Cannot get regulators\n");
++ goto fail1;
++ }
++
++ err = regulator_bulk_enable(ARRAY_SIZE(chip->regs), chip->regs);
++ if (err < 0) {
++ dev_err(&client->dev, "Cannot enable regulators\n");
++ goto fail2;
++ }
++
++ err = bh1770glc_detect(chip);
++ if (err < 0)
++ goto fail3;
++
++ err = bh1770glc_als_init(chip);
++ if (err < 0)
++ goto fail3;
++
++ err = bh1770glc_ps_init(chip);
++ if (err < 0)
++ goto fail4;
++
++ err = request_threaded_irq(client->irq, NULL,
++ bh1770glc_irq,
++ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
++ "bh1770glc", chip);
++ if (err) {
++ dev_err(&client->dev, "could not get IRQ %d\n",
++ client->irq);
++ goto fail5;
++ }
++ regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs);
++ return err;
++fail5:
++ bh1770glc_ps_destroy(chip);
++fail4:
++ bh1770glc_als_destroy(chip);
++fail3:
++ regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs);
++fail2:
++ regulator_bulk_free(ARRAY_SIZE(chip->regs), chip->regs);
++fail1:
++ kfree(chip);
++ return err;
++}
++
++static int __devexit bh1770glc_remove(struct i2c_client *client)
++{
++ struct bh1770glc_chip *chip = i2c_get_clientdata(client);
++
++ free_irq(client->irq, chip);
++
++ if (chip->pdata && chip->pdata->release_resources)
++ chip->pdata->release_resources();
++
++ bh1770glc_als_destroy(chip);
++ bh1770glc_ps_destroy(chip);
++ regulator_bulk_free(ARRAY_SIZE(chip->regs), chip->regs);
++ kfree(chip);
++ return 0;
++}
++
++#ifdef CONFIG_PM
++static int bh1770glc_suspend(struct i2c_client *client, pm_message_t mesg)
++{
++ struct bh1770glc_chip *chip = i2c_get_clientdata(client);
++
++ mutex_lock(&chip->mutex);
++ if (chip->als_users)
++ bh1770glc_als_mode(chip, BHCHIP_STANDBY);
++ if (chip->ps_users)
++ bh1770glc_ps_mode(chip, BHCHIP_STANDBY);
++ mutex_unlock(&chip->mutex);
++ return 0;
++}
++
++static int bh1770glc_resume(struct i2c_client *client)
++{
++ struct bh1770glc_chip *chip = i2c_get_clientdata(client);
++
++ mutex_lock(&chip->mutex);
++ if (chip->als_users)
++ bh1770glc_als_mode(chip, BHCHIP_STANDALONE);
++ if (chip->ps_users)
++ bh1770glc_ps_mode(chip, BHCHIP_STANDALONE);
++ mutex_unlock(&chip->mutex);
++ return 0;
++}
++
++static void bh1770glc_shutdown(struct i2c_client *client)
++{
++ struct bh1770glc_chip *chip = i2c_get_clientdata(client);
++
++ bh1770glc_als_mode(chip, BHCHIP_STANDBY);
++ bh1770glc_ps_mode(chip, BHCHIP_STANDBY);
++}
++
++#else
++#define bh1770glc_suspend NULL
++#define bh1770glc_shutdown NULL
++#define bh1770glc_resume NULL
++#endif
++
++static const struct i2c_device_id bh1770glc_id[] = {
++ {"bh1770glc", 0 },
++ {"SFH7770", 0 },
++ {}
++};
++
++MODULE_DEVICE_TABLE(i2c, bh1770glc_id);
++
++static struct i2c_driver bh1770glc_driver = {
++ .driver = {
++ .name = "bh1770glc",
++ .owner = THIS_MODULE,
++ },
++ .suspend = bh1770glc_suspend,
++ .shutdown = bh1770glc_shutdown,
++ .resume = bh1770glc_resume,
++ .probe = bh1770glc_probe,
++ .remove = __devexit_p(bh1770glc_remove),
++ .id_table = bh1770glc_id,
++};
++
++static int __init bh1770glc_init(void)
++{
++ return i2c_add_driver(&bh1770glc_driver);
++}
++
++static void __exit bh1770glc_exit(void)
++{
++ i2c_del_driver(&bh1770glc_driver);
++}
++
++MODULE_DESCRIPTION("BH1770GLC combined ALS and proximity sensor");
++MODULE_AUTHOR("Samu Onkalo, Nokia Corporation");
++MODULE_LICENSE("GPL v2");
++
++module_init(bh1770glc_init);
++module_exit(bh1770glc_exit);
+--- /dev/null
++++ b/drivers/misc/bh1770glc_ps.c
+@@ -0,0 +1,585 @@
++/*
++ * This file is part of the ROHM BH1770GLC / OSRAM SFH7770 sensor driver.
++ * Chip is combined proximity and ambient light sensor.
++ *
++ * Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
++ *
++ * Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ *
++ */
++
++#include <linux/kernel.h>
++#include <linux/i2c.h>
++#include <linux/mutex.h>
++#include <linux/miscdevice.h>
++#include <linux/poll.h>
++#include <linux/bh1770glc.h>
++#include <linux/uaccess.h>
++#include <linux/delay.h>
++#include <linux/regulator/consumer.h>
++#include <linux/workqueue.h>
++#include <linux/jiffies.h>
++#include "bh1770glc.h"
++
++/* Supported stand alone rates in ms from chip data sheet */
++static s16 ps_rates[] = {10, 20, 30, 40, 70, 100, 200, 500, 1000, 2000};
++
++/* Supported IR-led currents in mA */
++static const u8 ps_curr_ma[] = {5, 10, 20, 50, 100, 150, 200};
++
++/* chip->mutex must be locked during this function */
++static int bh1770glc_ps_interrupt_control(struct bh1770glc_chip *chip, int ps)
++{
++ chip->int_mode_ps = ps;
++
++ /* Set PS interrupt mode, interrupt active low, latched */
++ return i2c_smbus_write_byte_data(chip->client,
++ BHCHIP_INTERRUPT,
++ (chip->int_mode_als << 1) | (ps << 0));
++}
++
++/* LEDs are controlled by the chip during proximity scanning */
++static int bh1770glc_led_cfg(struct bh1770glc_chip *chip, u8 ledcurr[3])
++{
++ u8 ledcfg;
++ int ret, i;
++
++ ledcfg = chip->pdata->leds;
++
++ for (i = 0; i < BHCHIP_PS_CHANNELS; i++)
++ if (ledcurr[i] > chip->pdata->led_max_curr)
++ return -EINVAL;
++
++ for (i = 0; i < BHCHIP_PS_CHANNELS; i++)
++ chip->ps_leds[i] = ledcurr[i];
++
++ /* LED cfg, current for leds 1 and 2 */
++ ret = i2c_smbus_write_byte_data(chip->client,
++ BHCHIP_I_LED,
++ (ledcfg << 6) | (ledcurr[1] << 3) |
++ ledcurr[0]);
++ if (ret < 0)
++ goto fail;
++
++ /* Current for led 3*/
++ ret = i2c_smbus_write_byte_data(chip->client,
++ BHCHIP_I_LED3,
++ ledcurr[2]);
++fail:
++ return ret;
++}
++
++int bh1770glc_ps_mode(struct bh1770glc_chip *chip, int mode)
++{
++ int ret;
++
++ ret = i2c_smbus_write_byte_data(chip->client, BHCHIP_PS_CONTROL, mode);
++ if (ret == 0)
++ chip->ps_mode = mode;
++ return ret;
++}
++
++static int bh1770glc_ps_rates(struct bh1770glc_chip *chip, int rate,
++ int rate_threshold)
++{
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(ps_rates); i++) {
++ if (ps_rates[i] == rate) {
++ chip->ps_rate = i;
++ break;
++ }
++ }
++ if (i == ARRAY_SIZE(ps_rates))
++ return -EINVAL;
++
++ for (i = 0; i < ARRAY_SIZE(ps_rates); i++) {
++ if (ps_rates[i] == rate_threshold) {
++ chip->ps_rate_threshold = i;
++ return 0;
++ }
++ }
++ return -EINVAL;
++}
++
++static int bh1770glc_ps_rate(struct bh1770glc_chip *chip, int mode)
++{
++ int ret;
++ int rate;
++
++ rate = (mode == PS_ABOVE_THRESHOLD) ?
++ chip->ps_rate_threshold : chip->ps_rate;
++
++ ret = i2c_smbus_write_byte_data(chip->client,
++ BHCHIP_PS_MEAS_RATE,
++ rate);
++ return ret;
++}
++
++static int bh1770glc_ps_read_result(struct bh1770glc_chip *chip)
++{
++ int ret;
++ int i;
++
++ mutex_lock(&chip->mutex);
++ for (i = 0; i < ARRAY_SIZE(chip->ps_data); i++) {
++ ret = i2c_smbus_read_byte_data(chip->client,
++ BHCHIP_PS_DATA_LED1 + i);
++ if (ret < 0)
++ goto out;
++ chip->ps_data[i] = ret;
++ }
++ chip->ps_offset += sizeof(struct bh1770glc_ps);
++out:
++ mutex_unlock(&chip->mutex);
++ return ret;
++}
++
++static int bh1770glc_ps_set_thresholds(struct bh1770glc_chip *chip)
++{
++ int ret, i;
++ u8 data[BHCHIP_PS_CHANNELS];
++ u32 tmp;
++
++ for (i = 0; i < BHCHIP_PS_CHANNELS; i++) {
++ tmp = ((u32)chip->ps_thresholds[i] * BHCHIP_CALIB_SCALER) /
++ chip->ps_calib[i];
++ if (tmp > BHCHIP_PS_RANGE)
++ tmp = BHCHIP_PS_RANGE;
++ data[i] = tmp;
++ }
++
++ ret = i2c_smbus_write_i2c_block_data(chip->client,
++ BHCHIP_PS_TH_LED1,
++ BHCHIP_PS_CHANNELS,
++ data);
++ return ret;
++}
++
++static ssize_t bh1770glc_ps_read(struct file *file, char __user *buf,
++ size_t count, loff_t *offset)
++{
++ struct bh1770glc_ps ps;
++ struct bh1770glc_chip *chip = bh1770glc;
++ int i;
++ u16 data[BHCHIP_PS_CHANNELS];
++
++ if (count < sizeof(ps))
++ return -EINVAL;
++
++ if (*offset >= chip->ps_offset) {
++ if (file->f_flags & O_NONBLOCK)
++ return -EAGAIN;
++ if (wait_event_interruptible(bh1770glc->ps_misc_wait,
++ (*offset < chip->ps_offset)))
++ return -ERESTARTSYS;
++ }
++
++ for (i = 0; i < BHCHIP_PS_CHANNELS; i++) {
++ data[i] = ((u32)chip->ps_data[i] * chip->ps_calib[i]) /
++ BHCHIP_CALIB_SCALER;
++ if (data[i] > BHCHIP_PS_RANGE)
++ data[i] = BHCHIP_PS_RANGE;
++ }
++
++ ps.led1 = data[0];
++ ps.led2 = data[1];
++ ps.led3 = data[2];
++
++ *offset = chip->ps_offset;
++
++ return copy_to_user(buf, &ps, sizeof(ps)) ? -EFAULT : sizeof(ps);
++}
++
++static void bh1770glc_ps_work(struct work_struct *work)
++{
++ struct bh1770glc_chip *chip =
++ container_of(work, struct bh1770glc_chip, ps_work.work);
++
++ bh1770glc_ps_rate(chip, PS_BELOW_THRESHOLD);
++ bh1770glc_ps_read_result(chip);
++ wake_up_interruptible(&chip->ps_misc_wait);
++}
++
++void bh1770glc_ps_interrupt_handler(struct bh1770glc_chip *chip, int status)
++{
++ if (chip->int_mode_ps)
++ if (status & BHCHIP_INT_LEDS_INT) {
++ int rate = ps_rates[chip->ps_rate_threshold];
++
++ bh1770glc_ps_read_result(chip);
++ bh1770glc_ps_rate(chip, PS_ABOVE_THRESHOLD);
++ wake_up_interruptible(&bh1770glc->ps_misc_wait);
++
++ cancel_delayed_work_sync(&chip->ps_work);
++ /*
++ * Let's recheck situation 50 ms after the next
++ * expected threshold interrupt.
++ */
++ schedule_delayed_work(&chip->ps_work,
++ msecs_to_jiffies(rate + 50));
++ }
++}
++
++/* Proximity misc device */
++static unsigned int bh1770glc_ps_poll(struct file *file, poll_table *wait)
++{
++ poll_wait(file, &bh1770glc->ps_misc_wait, wait);
++ if (file->f_pos < bh1770glc->ps_offset)
++ return POLLIN | POLLRDNORM;
++ return 0;
++}
++
++static int bh1770glc_ps_open(struct inode *inode, struct file *file)
++{
++ struct bh1770glc_chip *chip = bh1770glc;
++ int err;
++
++ mutex_lock(&chip->mutex);
++ err = 0;
++ if (!chip->ps_users) {
++ err = regulator_bulk_enable(ARRAY_SIZE(chip->regs),
++ chip->regs);
++ if (err < 0)
++ goto release_lock;
++
++ if (!chip->als_users)
++ msleep(BHCHIP_STARTUP_DELAY);
++
++ /* Refresh all configs in case of regulators were off */
++ err = bh1770glc_ps_set_thresholds(chip);
++ if (err < 0)
++ goto exit;
++
++ err = bh1770glc_led_cfg(chip, chip->ps_leds);
++ if (err < 0)
++ goto exit;
++
++ err = bh1770glc_ps_rate(chip, PS_BELOW_THRESHOLD);
++ if (err < 0)
++ goto exit;
++
++ err = bh1770glc_ps_interrupt_control(chip, BHCHIP_ENABLE);
++ if (err < 0)
++ goto exit;
++
++ err = bh1770glc_ps_mode(chip, BHCHIP_STANDALONE);
++ }
++ if (err == 0)
++ chip->ps_users++;
++exit:
++ if (err < 0)
++ regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs);
++
++release_lock:
++ file->f_pos = chip->ps_offset;
++ mutex_unlock(&chip->mutex);
++
++ if (err == 0) {
++ cancel_delayed_work_sync(&chip->ps_work);
++ schedule_delayed_work(&chip->ps_work,
++ msecs_to_jiffies(BHCHIP_PS_INIT_DELAY));
++ }
++
++ return err;
++}
++
++static int bh1770glc_ps_close(struct inode *inode, struct file *file)
++{
++ struct bh1770glc_chip *chip = bh1770glc;
++ mutex_lock(&chip->mutex);
++ if (!--chip->ps_users) {
++ bh1770glc_ps_interrupt_control(chip, BHCHIP_DISABLE);
++ bh1770glc_ps_mode(chip, BHCHIP_STANDBY);
++ regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs);
++ }
++ mutex_unlock(&chip->mutex);
++ return 0;
++}
++
++static ssize_t bh1770glc_get_ps_mode(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct bh1770glc_chip *chip = dev_get_drvdata(dev);
++ return sprintf(buf, "%d\n", chip->ps_mode);
++}
++
++static ssize_t bh1770glc_get_ps_rate(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct bh1770glc_chip *chip = dev_get_drvdata(dev);
++ return sprintf(buf, "%d %d\n", ps_rates[chip->ps_rate],
++ ps_rates[chip->ps_rate_threshold]);
++}
++
++static ssize_t bh1770glc_set_ps_rate(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct bh1770glc_chip *chip = dev_get_drvdata(dev);
++ int rate = 0, rate_threshold = 0;
++ int ret;
++
++ ret = sscanf(buf, "%d %d", &rate, &rate_threshold);
++ if (ret < 0)
++ return ret;
++
++ if (ret == 0)
++ return count;
++
++ /* Second value is optional */
++ if (ret == 1)
++ rate_threshold = ps_rates[chip->ps_rate_threshold];
++
++ mutex_lock(&chip->mutex);
++ ret = bh1770glc_ps_rates(chip, rate, rate_threshold);
++ mutex_unlock(&chip->mutex);
++
++ if (ret < 0)
++ return ret;
++
++ return count;
++}
++
++static ssize_t bh1770glc_ps_calib_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct bh1770glc_chip *chip = dev_get_drvdata(dev);
++
++ return snprintf(buf, PAGE_SIZE, "%u %u %u\n", chip->ps_calib[0],
++ chip->ps_calib[1], chip->ps_calib[2]);
++}
++
++static ssize_t bh1770glc_ps_calib_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t len)
++{
++ struct bh1770glc_chip *chip = dev_get_drvdata(dev);
++ int calib[BHCHIP_PS_CHANNELS];
++ int i, ret;
++
++ for (i = 0; i < BHCHIP_PS_CHANNELS; i++)
++ calib[i] = BHCHIP_PS_NEUTRAL_CALIB_VALUE;
++ ret = sscanf(buf, "%d %d %d", &calib[0], &calib[1], &calib[2]);
++ if (ret < 0)
++ return ret;
++ if (ret < chip->ps_channels)
++ return -EINVAL;
++
++ for (i = 0; i < chip->ps_channels; i++)
++ chip->ps_calib[i] = calib[i];
++
++ return len;
++}
++
++static ssize_t bh1770glc_get_ps_thres(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct bh1770glc_chip *chip = dev_get_drvdata(dev);
++ return sprintf(buf, "%d %d %d\n", chip->ps_thresholds[0],
++ chip->ps_thresholds[1],
++ chip->ps_thresholds[2]);
++}
++
++static ssize_t bh1770glc_set_ps_thres(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct bh1770glc_chip *chip = dev_get_drvdata(dev);
++ int input[BHCHIP_PS_CHANNELS];
++ int ret;
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(input); i++)
++ input[i] = BHCHIP_PS_RANGE;
++ ret = sscanf(buf, "%d %d %d", &input[0], &input[1], &input[2]);
++
++ if (ret < 0)
++ return ret;
++ if (ret < chip->ps_channels)
++ return -EINVAL;
++
++ for (i = 0; i < ARRAY_SIZE(input); i++) {
++ if ((input[i] < 0) ||
++ (input[i] > BHCHIP_PS_RANGE))
++ return -EINVAL;
++ chip->ps_thresholds[i] = input[i];
++ }
++
++ mutex_lock(&chip->mutex);
++ ret = bh1770glc_ps_set_thresholds(chip);
++ mutex_unlock(&chip->mutex);
++ if (ret < 0)
++ return ret;
++
++ return count;
++}
++
++static ssize_t bh1770glc_ps_leds_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct bh1770glc_chip *chip = dev_get_drvdata(dev);
++ u8 led_current[BHCHIP_PS_CHANNELS];
++ int i;
++
++ memset(led_current, 0, sizeof(led_current));
++ for (i = 0; i < chip->ps_channels; i++)
++ led_current[i] = ps_curr_ma[chip->ps_leds[i]];
++
++ return sprintf(buf, "%d %d %d\n", led_current[0],
++ led_current[1],
++ led_current[2]);
++}
++
++static ssize_t bh1770glc_ps_leds_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct bh1770glc_chip *chip = dev_get_drvdata(dev);
++ int input[BHCHIP_PS_CHANNELS];
++ u8 led_curr[BHCHIP_PS_CHANNELS];
++ int ret;
++ int i, j;
++
++ ret = sscanf(buf, "%d %d %d", &input[0], &input[1], &input[2]);
++ if (ret < 0)
++ return ret;
++ if (ret < chip->ps_channels)
++ return -EINVAL;
++
++ /* Set minimum current */
++ for (i = chip->ps_channels; i < BHCHIP_PS_CHANNELS; i++)
++ led_curr[i] = BH1770GLC_LED_5mA;
++
++ for (i = 0; i < chip->ps_channels; i++) {
++ for (j = 0; j < ARRAY_SIZE(ps_curr_ma); j++)
++ if (input[i] == ps_curr_ma[j]) {
++ led_curr[i] = j;
++ break;
++ }
++ if (j == ARRAY_SIZE(ps_curr_ma))
++ return -EINVAL;
++ }
++
++ mutex_lock(&chip->mutex);
++ ret = bh1770glc_led_cfg(chip, led_curr);
++ mutex_unlock(&chip->mutex);
++ if (ret < 0)
++ return ret;
++
++ return count;
++}
++
++static ssize_t bh1770glc_chip_id_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct bh1770glc_chip *chip = dev_get_drvdata(dev);
++ return sprintf(buf, "%s rev %d\n", chip->chipname, chip->revision);
++}
++
++static DEVICE_ATTR(ps_mode, S_IRUGO, bh1770glc_get_ps_mode, NULL);
++static DEVICE_ATTR(ps_rate, S_IRUGO | S_IWUSR, bh1770glc_get_ps_rate,
++ bh1770glc_set_ps_rate);
++static DEVICE_ATTR(ps_threshold, S_IRUGO | S_IWUSR, bh1770glc_get_ps_thres,
++ bh1770glc_set_ps_thres);
++static DEVICE_ATTR(ps_calib, S_IRUGO | S_IWUSR, bh1770glc_ps_calib_show,
++ bh1770glc_ps_calib_store);
++static DEVICE_ATTR(ps_leds, S_IRUGO | S_IWUSR, bh1770glc_ps_leds_show,
++ bh1770glc_ps_leds_store);
++static DEVICE_ATTR(chip_id, S_IRUGO, bh1770glc_chip_id_show, NULL);
++
++static struct attribute *sysfs_attrs[] = {
++ &dev_attr_ps_calib.attr,
++ &dev_attr_ps_mode.attr,
++ &dev_attr_ps_rate.attr,
++ &dev_attr_ps_threshold.attr,
++ &dev_attr_ps_leds.attr,
++ &dev_attr_chip_id.attr,
++ NULL
++};
++
++static struct attribute_group bh1770glc_attribute_group = {
++ .attrs = sysfs_attrs
++};
++
++static const struct file_operations bh1770glc_ps_fops = {
++ .owner = THIS_MODULE,
++ .llseek = no_llseek,
++ .read = bh1770glc_ps_read,
++ .poll = bh1770glc_ps_poll,
++ .open = bh1770glc_ps_open,
++ .release = bh1770glc_ps_close,
++};
++
++static struct miscdevice bh1770glc_ps_miscdevice = {
++ .minor = MISC_DYNAMIC_MINOR,
++ .name = "bh1770glc_ps",
++ .fops = &bh1770glc_ps_fops
++};
++
++int bh1770glc_ps_init(struct bh1770glc_chip *chip)
++{
++ int err;
++ int i;
++
++ for (i = 0; i < BHCHIP_PS_CHANNELS; i++) {
++ chip->ps_thresholds[i] = BHCHIP_PS_DEF_THRES;
++ chip->ps_leds[i] = chip->pdata->led_def_curr[i];
++ }
++
++ err = bh1770glc_ps_mode(chip, BHCHIP_STANDBY);
++ if (err < 0)
++ goto fail1;
++
++ err = bh1770glc_ps_interrupt_control(chip, BHCHIP_DISABLE);
++ if (err < 0)
++ goto fail1;
++
++ bh1770glc_ps_rates(chip, BHCHIP_PS_DEFAULT_RATE,
++ BHCHIP_PS_DEF_RATE_THRESH);
++
++ INIT_DELAYED_WORK(&chip->ps_work, bh1770glc_ps_work);
++
++ bh1770glc_ps_miscdevice.parent = &chip->client->dev;
++ err = misc_register(&bh1770glc_ps_miscdevice);
++ if (err < 0) {
++ dev_err(&chip->client->dev, "Device registration failed\n");
++ goto fail1;
++ }
++
++ err = sysfs_create_group(&chip->client->dev.kobj,
++ &bh1770glc_attribute_group);
++ if (err < 0) {
++ dev_err(&chip->client->dev, "Sysfs registration failed\n");
++ goto fail2;
++ }
++ return 0;
++fail2:
++ misc_deregister(&bh1770glc_ps_miscdevice);
++fail1:
++ return err;
++
++}
++
++int bh1770glc_ps_destroy(struct bh1770glc_chip *chip)
++{
++ cancel_delayed_work_sync(&chip->ps_work);
++ sysfs_remove_group(&chip->client->dev.kobj,
++ &bh1770glc_attribute_group);
++ misc_deregister(&bh1770glc_ps_miscdevice);
++ return 0;
++}
+--- /dev/null
++++ b/drivers/misc/hmc6352.c
+@@ -0,0 +1,199 @@
++/*
++ * hmc6352.c - Honeywell Compass Driver
++ *
++ * Copyright (C) 2009 Intel Corp
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/i2c.h>
++#include <linux/err.h>
++#include <linux/delay.h>
++#include <linux/sysfs.h>
++
++static ssize_t compass_calibration_store(struct device *dev,
++ struct device_attribute *attr, const char *buf, size_t count)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++ int ret;
++ unsigned long val;
++ char cmd = 'C'; /* Calibrate */
++ char cmd1 = 'E'; /* Exit calibration mode */
++ struct i2c_msg msg[] = {
++ { client->addr, 0, 1, &cmd },
++ };
++ struct i2c_msg msg1[] = {
++ { client->addr, 0, 1, &cmd1 },
++ };
++
++ if (strict_strtoul(buf, 10, &val))
++ return -EINVAL;
++ if (val == 1) {
++ ret = i2c_transfer(client->adapter, msg, 1);
++ if (ret != 1) {
++ dev_warn(dev, "i2c calib start cmd failed\n");
++ return ret;
++ }
++ } else if (val == 2) {
++ ret = i2c_transfer(client->adapter, msg1, 1);
++ if (ret != 1) {
++ dev_warn(dev, "i2c calib stop cmd failed\n");
++ return ret;
++ }
++ } else
++ return -EINVAL;
++
++ return count;
++}
++
++static ssize_t compass_heading_data_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++
++ struct i2c_client *client = to_i2c_client(dev);
++ static char cmd = 'A'; /* Get Data */
++ unsigned char i2c_data[2];
++ unsigned int ret, ret_val;
++ struct i2c_msg msg[] = {
++ { client->addr, 0, 1, &cmd },
++ };
++ struct i2c_msg msg1[] = {
++ { client->addr, I2C_M_RD, 2, i2c_data },
++ };
++
++ ret = i2c_transfer(client->adapter, msg, 1);
++ if (ret != 1) {
++ dev_warn(dev, "i2c cmd 0x41 failed\n");
++ return ret;
++ }
++ msleep(10); /* sending 0x41 cmd we need to wait for 7-10 milli seconds */
++ ret = i2c_transfer(client->adapter, msg1, 1);
++ if (ret != 1) {
++ dev_warn(dev, "i2c read data cmd failed\n");
++ return ret;
++ }
++ ret_val = i2c_data[0];
++ ret_val = ((ret_val << 8) | i2c_data[1]);
++ return sprintf(buf, "%d.%d\n", ret_val/10, ret_val%10);
++}
++
++static ssize_t compass_power_mode_store(struct device *dev,
++ struct device_attribute *attr, const char *buf, size_t count)
++{
++
++ struct i2c_client *client = to_i2c_client(dev);
++ unsigned long val;
++ unsigned int ret;
++ static char cmd = 'S'; /* Sleep mode */
++ static char cmd1 = 'W'; /* Wake up */
++ struct i2c_msg msg[] = {
++ { client->addr, 0, 1, &cmd },
++ };
++ struct i2c_msg msg1[] = {
++ { client->addr, 0, 1, &cmd1 },
++ };
++
++ if (strict_strtoul(buf, 10, &val))
++ return -EINVAL;
++
++ if (val == 0) {
++ ret = i2c_transfer(client->adapter, msg, 1);
++ if (ret != 1)
++ dev_warn(dev, "i2c cmd sleep mode failed\n");
++ } else if (val == 1) {
++ ret = i2c_transfer(client->adapter, msg1, 1);
++ if (ret != 1)
++ dev_warn(dev, "i2c cmd active mode failed\n");
++ } else
++ return -EINVAL;
++
++ return count;
++}
++
++static DEVICE_ATTR(heading, S_IRUGO, compass_heading_data_show, NULL);
++static DEVICE_ATTR(calibration, S_IWUSR, NULL, compass_calibration_store);
++static DEVICE_ATTR(power_state, S_IWUSR, NULL, compass_power_mode_store);
++
++static struct attribute *mid_att_compass[] = {
++ &dev_attr_heading.attr,
++ &dev_attr_calibration.attr,
++ &dev_attr_power_state.attr,
++ NULL
++};
++
++static struct attribute_group m_compass_gr = {
++ .name = "hmc6352",
++ .attrs = mid_att_compass
++};
++
++static int hmc6352_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ int res;
++
++ res = sysfs_create_group(&client->dev.kobj, &m_compass_gr);
++ if (res) {
++ dev_err(&client->dev, "device_create_file failed\n");
++ return res;
++ }
++ dev_info(&client->dev, "%s HMC6352 compass chip found\n",
++ client->name);
++ return 0;
++}
++
++static int hmc6352_remove(struct i2c_client *client)
++{
++ sysfs_remove_group(&client->dev.kobj, &m_compass_gr);
++ return 0;
++}
++
++static struct i2c_device_id hmc6352_id[] = {
++ { "hmc6352", 0 },
++ { }
++};
++
++MODULE_DEVICE_TABLE(i2c, hmc6352_id);
++
++static struct i2c_driver hmc6352_driver = {
++ .driver = {
++ .name = "hmc6352",
++ },
++ .probe = hmc6352_probe,
++ .remove = hmc6352_remove,
++ .id_table = hmc6352_id,
++};
++
++static int __init sensor_hmc6352_init(void)
++{
++ return i2c_add_driver(&hmc6352_driver);
++}
++
++static void __exit sensor_hmc6352_exit(void)
++{
++ i2c_del_driver(&hmc6352_driver);
++}
++
++module_init(sensor_hmc6352_init);
++module_exit(sensor_hmc6352_exit);
++
++MODULE_AUTHOR("Kalhan Trisal <kalhan.trisal@intel.com");
++MODULE_DESCRIPTION("hmc6352 Compass Driver");
++MODULE_LICENSE("GPL v2");
+--- /dev/null
++++ b/drivers/misc/isl29015.c
+@@ -0,0 +1,554 @@
++/*
++ * isl29015.c - Intersil ISL29015 ALS & Proximity Driver
++ *
++ * Copyright (C) 2010 Aava Mobile Oy
++ *
++ * Based on isl29020.c
++ * by Kalhan Trisal <kalhan.trisal@intel.com>
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/i2c.h>
++#include <linux/hwmon.h>
++#include <linux/err.h>
++#include <linux/sysfs.h>
++#include <linux/pm_runtime.h>
++
++#define REG_CMD_1 0x00
++#define REG_CMD_2 0x01
++#define REG_DATA_LSB 0x02
++#define REG_DATA_MSB 0x03
++#define ISL_MOD_MASK 0xE0
++#define ISL_MOD_POWERDOWN 0
++#define ISL_MOD_ALS_ONCE 1
++#define ISL_MOD_IR_ONCE 2
++#define ISL_MOD_PS_ONCE 3
++#define ISL_MOD_RESERVED 4
++#define ISL_MOD_ALS_CONT 5
++#define ISL_MOD_IR_CONT 6
++#define ISL_MOD_PS_CONT 7
++#define IR_CURRENT_MASK 0xC0
++#define IR_FREQ_MASK 0x30
++#define SENSOR_RANGE_MASK 0x03
++#define ISL_RES_MASK 0x0C
++
++static int last_mod;
++
++static DEFINE_MUTEX(mutex);
++
++static int isl_set_range(struct i2c_client *client, int range)
++{
++ int ret_val;
++
++ ret_val = i2c_smbus_read_byte_data(client, REG_CMD_2);
++ if (ret_val < 0)
++ return -EINVAL;
++ ret_val &= ~SENSOR_RANGE_MASK; /*reset the bit */
++ ret_val |= range;
++ ret_val = i2c_smbus_write_byte_data(client, REG_CMD_2, ret_val);
++
++ if (ret_val < 0)
++ return ret_val;
++ return range;
++}
++
++static int isl_set_mod(struct i2c_client *client, int mod)
++{
++ int ret, val, freq;
++
++ switch (mod) {
++ case ISL_MOD_POWERDOWN:
++ case ISL_MOD_RESERVED:
++ goto setmod;
++ case ISL_MOD_ALS_ONCE:
++ case ISL_MOD_ALS_CONT:
++ freq = 0;
++ break;
++ case ISL_MOD_IR_ONCE:
++ case ISL_MOD_IR_CONT:
++ case ISL_MOD_PS_ONCE:
++ case ISL_MOD_PS_CONT:
++ freq = 1;
++ break;
++ default:
++ return -EINVAL;
++ }
++ /* set IR frequency */
++ val = i2c_smbus_read_byte_data(client, REG_CMD_2);
++ if (val < 0)
++ return -EINVAL;
++ val &= ~IR_FREQ_MASK;
++ if (freq)
++ val |= IR_FREQ_MASK;
++ ret = i2c_smbus_write_byte_data(client, REG_CMD_2, val);
++ if (ret < 0)
++ return -EINVAL;
++
++setmod:
++ /* set operation mod */
++ val = i2c_smbus_read_byte_data(client, REG_CMD_1);
++ if (val < 0)
++ return -EINVAL;
++ val &= ~ISL_MOD_MASK;
++ val |= (mod << 5);
++ ret = i2c_smbus_write_byte_data(client, REG_CMD_1, val);
++ if (ret < 0)
++ return -EINVAL;
++
++ if (mod != ISL_MOD_POWERDOWN)
++ last_mod = mod;
++
++ return mod;
++}
++
++static int isl_get_res(struct i2c_client *client)
++{
++ int val;
++
++ val = i2c_smbus_read_byte_data(client, REG_CMD_2);
++
++ if (val < 0)
++ return -EINVAL;
++
++ val &= ISL_RES_MASK;
++ val >>= 2;
++
++ switch (val) {
++ case 0:
++ return 65536;
++ case 1:
++ return 4096;
++ case 2:
++ return 256;
++ case 3:
++ return 16;
++ default:
++ return -EINVAL;
++ }
++}
++
++static int isl_get_mod(struct i2c_client *client)
++{
++ int val;
++
++ val = i2c_smbus_read_byte_data(client, REG_CMD_1);
++ if (val < 0)
++ return -EINVAL;
++ return val >> 5;
++}
++
++static ssize_t
++isl_sensing_range_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++ int val;
++
++ mutex_lock(&mutex);
++ pm_runtime_get_sync(dev);
++ val = i2c_smbus_read_byte_data(client, REG_CMD_2);
++ pm_runtime_put_sync(dev);
++ mutex_unlock(&mutex);
++
++ dev_dbg(dev, "%s: range: 0x%.2x\n", __func__, val);
++
++ if (val < 0)
++ return val;
++ return sprintf(buf, "%d000\n", 1 << (2 * (val & 3)));
++}
++
++static ssize_t
++ir_current_show(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++ int val;
++
++ mutex_lock(&mutex);
++ pm_runtime_get_sync(dev);
++ val = i2c_smbus_read_byte_data(client, REG_CMD_2);
++ pm_runtime_put_sync(dev);
++ mutex_unlock(&mutex);
++
++ dev_dbg(dev, "%s: IR current: 0x%.2x\n", __func__, val);
++
++ if (val < 0)
++ return -EINVAL;
++ val >>= 6;
++
++ switch (val) {
++ case 0:
++ val = 100;
++ break;
++ case 1:
++ val = 50;
++ break;
++ case 2:
++ val = 25;
++ break;
++ case 3:
++ val = 0;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ if (val)
++ val = sprintf(buf, "%d\n", val);
++ else
++ val = sprintf(buf, "%s\n", "12.5");
++ return val;
++}
++
++static ssize_t
++isl_sensing_mod_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++ int val;
++
++ mutex_lock(&mutex);
++ pm_runtime_get_sync(dev);
++ val = isl_get_mod(client);
++ pm_runtime_put_sync(dev);
++ mutex_unlock(&mutex);
++
++ dev_dbg(dev, "%s: mod: 0x%.2x\n", __func__, val);
++
++ if (val < 0)
++ return val;
++
++ switch (val) {
++ case ISL_MOD_POWERDOWN:
++ return sprintf(buf, "%s\n", "0-Power-down");
++ case ISL_MOD_ALS_ONCE:
++ return sprintf(buf, "%s\n", "1-ALS once");
++ case ISL_MOD_IR_ONCE:
++ return sprintf(buf, "%s\n", "2-IR once");
++ case ISL_MOD_PS_ONCE:
++ return sprintf(buf, "%s\n", "3-Proximity once");
++ case ISL_MOD_RESERVED:
++ return sprintf(buf, "%s\n", "4-Reserved");
++ case ISL_MOD_ALS_CONT:
++ return sprintf(buf, "%s\n", "5-ALS continuous");
++ case ISL_MOD_IR_CONT:
++ return sprintf(buf, "%s\n", "6-IR continuous");
++ case ISL_MOD_PS_CONT:
++ return sprintf(buf, "%s\n", "7-Proximity continuous");
++ default:
++ return -EINVAL;
++ }
++}
++
++static ssize_t
++isl_output_data_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++ int ret_val, val, mod;
++ unsigned long int max_count, output = 0;
++ int temp;
++
++ mutex_lock(&mutex);
++ pm_runtime_get_sync(dev);
++
++ temp = i2c_smbus_read_byte_data(client, REG_DATA_MSB);
++ if (temp < 0)
++ goto err_exit;
++ ret_val = i2c_smbus_read_byte_data(client, REG_DATA_LSB);
++ if (ret_val < 0)
++ goto err_exit;
++ ret_val |= temp << 8;
++
++ dev_dbg(dev, "%s: Data: %04x\n", __func__, ret_val);
++
++ mod = isl_get_mod(client);
++ switch (mod) {
++ case ISL_MOD_ALS_CONT:
++ case ISL_MOD_ALS_ONCE:
++ case ISL_MOD_IR_ONCE:
++ case ISL_MOD_IR_CONT:
++ output = ret_val;
++ break;
++ case ISL_MOD_PS_CONT:
++ case ISL_MOD_PS_ONCE:
++ val = i2c_smbus_read_byte_data(client, REG_CMD_2);
++ if (val < 0)
++ goto err_exit;
++ max_count = isl_get_res(client);
++ output = (((1 << (2 * (val & SENSOR_RANGE_MASK))) * 1000)
++ * ret_val) / max_count;
++ break;
++ default:
++ goto err_exit;
++ }
++ pm_runtime_put_sync(dev);
++ mutex_unlock(&mutex);
++ return sprintf(buf, "%ld\n", output);
++
++err_exit:
++ pm_runtime_put_sync(dev);
++ mutex_unlock(&mutex);
++ return -EINVAL;
++}
++
++static ssize_t
++isl_sensing_range_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++ unsigned int ret_val;
++ unsigned long val;
++
++ if (strict_strtoul(buf, 10, &val))
++ return -EINVAL;
++
++ switch (val) {
++ case 1000:
++ val = 0;
++ break;
++ case 4000:
++ val = 1;
++ break;
++ case 16000:
++ val = 2;
++ break;
++ case 64000:
++ val = 3;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ mutex_lock(&mutex);
++ pm_runtime_get_sync(dev);
++ ret_val = isl_set_range(client, val);
++ pm_runtime_put_sync(dev);
++ mutex_unlock(&mutex);
++
++ if (ret_val < 0)
++ return ret_val;
++ return count;
++}
++
++static ssize_t
++ir_current_store(struct device *dev,
++ struct device_attribute *attr, const char *buf, size_t count)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++ unsigned int ret_val;
++ unsigned long val;
++
++ if (!strncmp(buf, "12.5", 4))
++ val = 3;
++ else {
++ if (strict_strtoul(buf, 10, &val))
++ return -EINVAL;
++ switch (val) {
++ case 100:
++ val = 0;
++ break;
++ case 50:
++ val = 1;
++ break;
++ case 25:
++ val = 2;
++ break;
++ default:
++ return -EINVAL;
++ }
++ }
++
++ mutex_lock(&mutex);
++ pm_runtime_get_sync(dev);
++
++ ret_val = i2c_smbus_read_byte_data(client, REG_CMD_2);
++ if (ret_val < 0)
++ goto err_exit;
++
++ ret_val &= ~IR_CURRENT_MASK; /*reset the bit before setting them */
++ ret_val |= (val << 6);
++
++ ret_val = i2c_smbus_write_byte_data(client, REG_CMD_2, ret_val);
++ if (ret_val < 0)
++ goto err_exit;
++
++ pm_runtime_put_sync(dev);
++ mutex_unlock(&mutex);
++
++ return count;
++
++err_exit:
++ pm_runtime_put_sync(dev);
++ mutex_unlock(&mutex);
++ return -EINVAL;
++}
++
++static ssize_t
++isl_sensing_mod_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++ int ret_val;
++ unsigned long val;
++
++ if (strict_strtoul(buf, 10, &val))
++ return -EINVAL;
++ if (val > 7)
++ return -EINVAL;
++
++ mutex_lock(&mutex);
++ pm_runtime_get_sync(dev);
++ ret_val = isl_set_mod(client, val);
++ pm_runtime_put_sync(dev);
++ mutex_unlock(&mutex);
++
++ if (ret_val < 0)
++ return ret_val;
++ return count;
++}
++
++static DEVICE_ATTR(range, S_IRUGO | S_IWUSR,
++ isl_sensing_range_show, isl_sensing_range_store);
++static DEVICE_ATTR(mod, S_IRUGO | S_IWUSR,
++ isl_sensing_mod_show, isl_sensing_mod_store);
++static DEVICE_ATTR(ir_current, S_IRUGO | S_IWUSR,
++ ir_current_show, ir_current_store);
++static DEVICE_ATTR(output, S_IRUGO, isl_output_data_show, NULL);
++
++static struct attribute *mid_att_isl[] = {
++ &dev_attr_range.attr,
++ &dev_attr_mod.attr,
++ &dev_attr_ir_current.attr,
++ &dev_attr_output.attr,
++ NULL
++};
++
++static struct attribute_group m_isl_gr = {
++ .name = "isl29015",
++ .attrs = mid_att_isl
++};
++
++static int isl_set_default_config(struct i2c_client *client)
++{
++ int ret;
++ ret = i2c_smbus_write_byte_data(client, REG_CMD_1, 0xE0);
++ if (ret < 0)
++ return -EINVAL;
++ ret = i2c_smbus_write_byte_data(client, REG_CMD_2, 0xC3);
++ if (ret < 0)
++ return -EINVAL;
++ return 0;
++}
++
++static int
++isl29015_probe(struct i2c_client *client, const struct i2c_device_id *id)
++{
++ int res;
++
++ dev_info(&client->dev, "%s: ALS/PS chip found\n", client->name);
++
++ res = isl_set_default_config(client);
++ if (res < 0) {
++ pr_warn("isl29015: set default config failed!!\n");
++ return -EINVAL;
++ }
++
++ res = sysfs_create_group(&client->dev.kobj, &m_isl_gr);
++ if (res) {
++ pr_warn("isl29015: device create file failed!!\n");
++ return -EINVAL;
++ }
++
++ last_mod = 0;
++ isl_set_mod(client, ISL_MOD_POWERDOWN);
++ pm_runtime_enable(&client->dev);
++
++ dev_dbg(&client->dev, "isl29015 probe succeed!\n");
++
++ return res;
++}
++
++static int isl29015_remove(struct i2c_client *client)
++{
++ sysfs_remove_group(&client->dev.kobj, &m_isl_gr);
++ __pm_runtime_disable(&client->dev, false);
++ return 0;
++}
++
++static struct i2c_device_id isl29015_id[] = {
++ {"isl29015", 0},
++ {}
++};
++
++static int isl29015_runtime_suspend(struct device *dev)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++ dev_dbg(dev, "suspend\n");
++ isl_set_mod(client, ISL_MOD_POWERDOWN);
++ return 0;
++}
++
++static int isl29015_runtime_resume(struct device *dev)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++ dev_dbg(dev, "resume\n");
++ isl_set_mod(client, last_mod);
++ msleep(100);
++ return 0;
++}
++
++MODULE_DEVICE_TABLE(i2c, isl29015_id);
++
++static const struct dev_pm_ops isl29015_pm_ops = {
++ .runtime_suspend = isl29015_runtime_suspend,
++ .runtime_resume = isl29015_runtime_resume,
++};
++
++static struct i2c_driver isl29015_driver = {
++ .driver = {
++ .name = "isl29015",
++ .pm = &isl29015_pm_ops,
++ },
++ .probe = isl29015_probe,
++ .remove = isl29015_remove,
++ .id_table = isl29015_id,
++};
++
++static int __init sensor_isl29015_init(void)
++{
++ return i2c_add_driver(&isl29015_driver);
++}
++
++static void __exit sensor_isl29015_exit(void)
++{
++ i2c_del_driver(&isl29015_driver);
++}
++
++module_init(sensor_isl29015_init);
++module_exit(sensor_isl29015_exit);
++
++MODULE_AUTHOR("Aavamobile");
++MODULE_ALIAS("isl29015 ALS/PS");
++MODULE_DESCRIPTION("Intersil isl29015 ALS/PS Driver");
++MODULE_LICENSE("GPL v2");
++
+--- /dev/null
++++ b/drivers/misc/isl29020.c
+@@ -0,0 +1,236 @@
++/*
++ * isl29020.c - Intersil ALS Driver
++ *
++ * Copyright (C) 2008 Intel Corp
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/i2c.h>
++#include <linux/err.h>
++#include <linux/delay.h>
++#include <linux/sysfs.h>
++#include <linux/pm_runtime.h>
++
++#define ALS_MIN_RANGE_VAL 0
++#define ALS_MAX_RANGE_VAL 5
++
++static DEFINE_MUTEX(mutex);
++
++static ssize_t als_sensing_range_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++ int val;
++
++ val = i2c_smbus_read_byte_data(client, 0x00);
++
++ if (val < 0)
++ return val;
++ return sprintf(buf, "%d000\n", 1 << (2 * (val & 3)));
++
++}
++
++static ssize_t als_lux_output_data_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++ int ret_val, val;
++ unsigned long int lux, max_count;
++ int temp;
++
++ pm_runtime_get_sync(dev);
++ msleep(100);
++
++ mutex_lock(&mutex);
++ temp = i2c_smbus_read_byte_data(client, 0x02); /* MSB data */
++ if (temp < 0) {
++ pm_runtime_put_sync(dev);
++ mutex_unlock(&mutex);
++ return temp;
++ }
++
++ ret_val = i2c_smbus_read_byte_data(client, 0x01); /* LSB data */
++ mutex_unlock(&mutex);
++
++ if (ret_val < 0) {
++ pm_runtime_put_sync(dev);
++ return ret_val;
++ }
++
++ ret_val |= temp << 8;
++ val = i2c_smbus_read_byte_data(client, 0x00);
++ pm_runtime_put_sync(dev);
++ if (val < 0)
++ return val;
++ max_count = 65535;
++ lux = ((((1 << (2 * (val & 3))))*1000) * ret_val) / max_count;
++ return sprintf(buf, "%ld\n", lux);
++}
++
++static ssize_t als_sensing_range_store(struct device *dev,
++ struct device_attribute *attr, const char *buf, size_t count)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++ unsigned int ret_val;
++ unsigned long val;
++
++ if (strict_strtoul(buf, 10, &val))
++ return -EINVAL;
++ if (val < 1 || val > 4)
++ return -EINVAL;
++
++ ret_val = i2c_smbus_read_byte_data(client, 0x00);
++
++ ret_val &= 0xFC; /*reset the bit before setting them */
++ ret_val |= val - 1;
++ ret_val = i2c_smbus_write_byte_data(client, 0x00, ret_val);
++
++ if (ret_val < 0)
++ return ret_val;
++ return count;
++}
++
++static void als_set_power_state(struct i2c_client *client, int enable)
++{
++ int ret_val;
++
++ ret_val = i2c_smbus_read_byte_data(client, 0x00);
++ if (ret_val < 0)
++ return;
++
++ if (enable)
++ ret_val |= 0x80;
++ else
++ ret_val &= 0x7F;
++
++ i2c_smbus_write_byte_data(client, 0x00, ret_val);
++}
++
++static DEVICE_ATTR(sensing_range, S_IRUGO | S_IWUSR,
++ als_sensing_range_show, als_sensing_range_store);
++static DEVICE_ATTR(lux_output, S_IRUGO, als_lux_output_data_show, NULL);
++
++static struct attribute *mid_att_als[] = {
++ &dev_attr_sensing_range.attr,
++ &dev_attr_lux_output.attr,
++ NULL
++};
++
++static struct attribute_group m_als_gr = {
++ .name = "isl29020",
++ .attrs = mid_att_als
++};
++
++static int als_set_default_config(struct i2c_client *client)
++{
++ int retval;
++
++ retval = i2c_smbus_write_byte_data(client, 0x00, 0xc0);
++ if (retval < 0) {
++ dev_err(&client->dev, "default write failed.");
++ return retval;
++ }
++ return 0;;
++}
++
++static int isl29020_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ int res;
++
++ res = als_set_default_config(client);
++ if (res < 0)
++ return res;
++
++ res = sysfs_create_group(&client->dev.kobj, &m_als_gr);
++ if (res) {
++ dev_err(&client->dev, "isl29020: device create file failed\n");
++ return res;
++ }
++ dev_info(&client->dev, "%s isl29020: ALS chip found\n", client->name);
++ als_set_power_state(client, 0);
++ pm_runtime_enable(&client->dev);
++ return res;
++}
++
++static int isl29020_remove(struct i2c_client *client)
++{
++ struct als_data *data = i2c_get_clientdata(client);
++ sysfs_remove_group(&client->dev.kobj, &m_als_gr);
++ kfree(data);
++ return 0;
++}
++
++static struct i2c_device_id isl29020_id[] = {
++ { "isl29020", 0 },
++ { }
++};
++
++static int isl29020_runtime_suspend(struct device *dev)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++ printk("DEBUG: suspend\n");
++ als_set_power_state(client, 0);
++ return 0;
++}
++
++static int isl29020_runtime_resume(struct device *dev)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++ printk("DEBUG: resume\n");
++ als_set_power_state(client, 1);
++ return 0;
++}
++
++MODULE_DEVICE_TABLE(i2c, isl29020_id);
++
++static const struct dev_pm_ops isl29020_pm_ops = {
++ .runtime_suspend = isl29020_runtime_suspend,
++ .runtime_resume = isl29020_runtime_resume,
++};
++
++static struct i2c_driver isl29020_driver = {
++ .driver = {
++ .name = "isl29020",
++ .pm = &isl29020_pm_ops,
++ },
++ .probe = isl29020_probe,
++ .remove = isl29020_remove,
++ .id_table = isl29020_id,
++};
++
++static int __init sensor_isl29020_init(void)
++{
++ return i2c_add_driver(&isl29020_driver);
++}
++
++static void __exit sensor_isl29020_exit(void)
++{
++ i2c_del_driver(&isl29020_driver);
++}
++
++module_init(sensor_isl29020_init);
++module_exit(sensor_isl29020_exit);
++
++MODULE_AUTHOR("Kalhan Trisal <kalhan.trisal@intel.com");
++MODULE_DESCRIPTION("Intersil isl29020 ALS Driver");
++MODULE_LICENSE("GPL v2");
+--- /dev/null
++++ b/drivers/misc/koski_hwid.c
+@@ -0,0 +1,255 @@
++/*
++ * koski_hwid.c Driver for hardware id query
++ * Copyright (c) 2009 aava Mobile
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/gpio.h>
++#include <linux/device.h>
++
++#include <asm/mrst.h>
++#include <asm/intel_scu_ipc.h>
++#include <linux/koski_hwid.h>
++
++#define BUILD_ID_GPIO_START 24
++#define BUILD_ID_GPIO_END 27
++
++#define PRODUCT_ID_GPIO_START 27
++#define PRODUCT_ID_GPIO_END 30
++
++#define KOSKI_BUILD_ID_EV2 0x5 /* 101 */
++#define KOSKI_BUILD_ID_DV1 0x6 /* 110 */
++#define KOSKI_BUILD_ID_DV2 0x7 /* 111 */
++
++#define KOSKI_PRODUCT_ID_KOSKI 0x6 /* 110 */
++#define KOSKI_PRODUCT_ID_SC 0x7 /* 111 */
++
++#define IPCMSG_HWID_FW_REVISION 0xF4
++#define IPC_CMD_HWID_R 1
++
++static koski_id build_table[4] = {
++ { KOSKI_BUILD_ID_EV2, "EV2" , KOSKI_EV2 },
++ { KOSKI_BUILD_ID_DV1, "DV1" , KOSKI_DV1 },
++ { KOSKI_BUILD_ID_DV2, "DV2" , KOSKI_DV2 },
++ { 0, "EV2" , KOSKI_EV2 }
++};
++
++static koski_id product_table[3] = {
++ { KOSKI_PRODUCT_ID_KOSKI, "Koski" , PRODUCT_KOSKI },
++ { KOSKI_PRODUCT_ID_SC, "South Canyon" , PRODUCT_SC },
++ { 0, "Koski" , PRODUCT_KOSKI }
++};
++
++static int koski_product_id = -1;
++static int koski_build_id = -1;
++static char koski_fw_ver_info[16];
++
++static void __init koski_fw_ver_info_init(void)
++{
++ ssize_t status;
++ int i = 0;
++ unsigned char mrst_fw_ver_info[16];
++
++ status = intel_scu_ipc_command(IPCMSG_HWID_FW_REVISION,
++ IPC_CMD_HWID_R, NULL, 0, (u32 *)mrst_fw_ver_info, 4);
++ if (status < 0) {
++ pr_err("read fw version error\n");
++ return ;
++ }
++
++ for (i = 0; i < 16; i++)
++ koski_fw_ver_info[i] = mrst_fw_ver_info[i];
++}
++
++static void __init koski_product_id_init(char koski_hw_id)
++{
++ int i = 0;
++ koski_product_id = KOSKI_EV2; /* default ev2 */
++ while (true) {
++ if (product_table[i].gpio_value == (koski_hw_id & 0x0f) ||
++ product_table[i].gpio_value == 0) {
++ koski_product_id = product_table[i].id;
++ break;
++ }
++
++ i++;
++ }
++}
++
++static void __init koski_build_id_init(char koski_hw_id)
++{
++ int i = 0;
++ koski_build_id = PRODUCT_KOSKI; /* default ev2 */
++ while (true) {
++ if (build_table[i].gpio_value == (koski_hw_id >> 4) ||
++ build_table[i].gpio_value == 0) {
++ koski_build_id = build_table[i].id;
++ break;
++ }
++
++ i++;
++ }
++}
++
++static int __init koski_read_hwid(void)
++{
++ int i = 0, err, cnt1, cnt2;
++ char build_version = 0, product_version = 0;
++ u32 data;
++ char koski_hw_id;
++
++ if (mrst_platform_id() != MRST_PLATFORM_AAVA_SC)
++ return -1;
++
++ data = 0xAAAA0000;
++ err = intel_scu_ipc_i2c_cntrl(0x01040022, &data);
++ if (err < 0) {
++ pr_err("ipc i2c cntrl error\n");
++ goto err3;
++ }
++
++ for (i = (cnt1 = BUILD_ID_GPIO_START); i < BUILD_ID_GPIO_END; i++, cnt1++) {
++ err = gpio_request(i, NULL);
++ if (err) {
++ pr_err("koski_hwid GPIO pin %d "
++ "failed to request.\n", i);
++ goto err2;
++ }
++
++ gpio_direction_input(i);
++
++ if (gpio_get_value(i)) {
++ build_version |= (1 << (i - BUILD_ID_GPIO_START));
++ pr_info("KOSKI HW GPIO %d = 1 (%x)", i, build_version);
++ }
++ }
++
++ for (i = (cnt2 = PRODUCT_ID_GPIO_START); i < PRODUCT_ID_GPIO_END; i++, cnt2++) {
++ err = gpio_request(i, NULL);
++ if (err) {
++ pr_err("koski_hwid GPIO pin %d "
++ "failed to request.\n", i);
++ goto err1;
++ }
++
++ gpio_direction_input(i);
++
++ if (gpio_get_value(i)) {
++ product_version |= (1 << (i - PRODUCT_ID_GPIO_START));
++ pr_info("KOSKI HW GPIO %d = 1 (%x)",
++ i, product_version);
++ }
++ }
++
++ data = 0x55550000;
++ err = intel_scu_ipc_i2c_cntrl(0x01040022, &data);
++ if (err < 0) {
++ pr_err("ipc i2c cntrl error\n");
++ goto err3;
++ }
++
++ koski_hw_id = (build_version << 4) | product_version;
++
++ koski_product_id_init(koski_hw_id);
++ koski_build_id_init(koski_hw_id);
++ koski_fw_ver_info_init();
++
++err1:
++ for (i = PRODUCT_ID_GPIO_START; i < cnt2; i++)
++ gpio_free(i);
++err2:
++ for (i = BUILD_ID_GPIO_START; i < cnt1; i++)
++ gpio_free(i);
++err3:
++ return err;
++}
++
++static ssize_t koski_query_fw_info_func(struct class *class,
++ struct class_attribute *attr, char *buf)
++{
++ int i;
++ for (i = 0; i < 16; i++)
++ buf[i] = koski_fw_ver_info[i];
++ return 16;
++}
++
++static ssize_t koski_get_build_id_func(struct class *class,
++ struct class_attribute *attr, char *buf)
++{
++ int i;
++ for (i = 0; i < 4; i++) {
++ if (build_table[i].id == koski_build_id) {
++ memcpy(buf, build_table[i].name,
++ strlen(build_table[i].name));
++ return strlen(build_table[i].name);
++ }
++ }
++
++ return 0;
++}
++
++static ssize_t koski_get_product_id_func(struct class *class,
++ struct class_attribute *attr, char *buf)
++{
++ int i;
++ for (i = 0; i < 3; i++) {
++ if (product_table[i].id == koski_product_id) {
++ memcpy(buf, product_table[i].name,
++ strlen(product_table[i].name));
++ return strlen(product_table[i].name);
++ }
++ }
++
++ return 0;
++}
++
++int get_koski_product_id(void)
++{
++ return koski_product_id;
++}
++EXPORT_SYMBOL(get_koski_product_id);
++
++int get_koski_build_id(void)
++{
++ return koski_build_id;
++}
++EXPORT_SYMBOL(get_koski_build_id);
++
++static struct class_attribute koski_class_attrs[] = {
++ __ATTR(koski_query_fw_info, 0444, koski_query_fw_info_func, NULL),
++ __ATTR(koski_get_build_id, 0444, koski_get_build_id_func, NULL),
++ __ATTR(koski_get_pruduct_id, 0444, koski_get_product_id_func, NULL),
++ __ATTR_NULL,
++};
++
++static struct class koski_class = {
++ .name = "koski_platform",
++ .owner = THIS_MODULE,
++ .class_attrs = koski_class_attrs,
++};
++
++static int __init koski_sysfs_init(void)
++{
++ koski_read_hwid();
++ return class_register(&koski_class);
++}
++postcore_initcall(koski_sysfs_init);
++
++MODULE_AUTHOR("Samuli Konttila <samuli.konttila@aavamobile.com>");
++MODULE_DESCRIPTION("Koski Platform Driver");
++MODULE_LICENSE("GPL v2");
+--- /dev/null
++++ b/drivers/misc/pti.c
+@@ -0,0 +1,754 @@
++/*
++ * pti.c - PTI driver for cJTAG data extration
++ *
++ * Copyright (C) Intel 2010
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
++ * USA
++ */
++
++
++/*
++ * The PTI (Parallel Trace Interface) driver directs trace data routed from
++ * various parts in the system out through the Intel Penwell PTI port and
++ * out of the mobile device for analysis with a debugging tool
++ * (Lauterbach, Fido). This is part of a solution for the MIPI P1149.7,
++ * compact JTAG, standard.
++ */
++
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/tty.h>
++#include <linux/tty_driver.h>
++#include <linux/pci.h>
++#include <linux/mutex.h>
++#include <linux/miscdevice.h>
++#include <linux/pti.h>
++
++#define DRIVERNAME "pti"
++#define PCINAME "pciPTI"
++#define TTYNAME "ttyPTI"
++#define CHARNAME "pti"
++#define MAX_APP_IDS 256
++#define MAX_OS_IDS 128
++#define OS_BASE_ID 72 /* base OS master ID address */
++#define APP_BASE_ID 80 /* base App master ID address */
++
++struct pti_tty {
++ struct masterchannel *mc;
++};
++
++struct pti_dev {
++ struct tty_port port;
++ unsigned long pti_addr;
++ unsigned long aperture_base;
++ void __iomem *pti_ioaddr;
++ unsigned long pti_iolen;
++ u8 IA_App[MAX_APP_IDS];
++ u8 IA_OS[MAX_OS_IDS];
++};
++
++
++static DEFINE_MUTEX(alloclock);
++
++static struct pci_device_id pci_ids[] __devinitconst = {
++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x82B) },
++ {}
++};
++
++static struct tty_driver *pti_tty_driver;
++
++static struct pti_dev *drv_data;
++
++
++#define DTS 0x30 /* offset for last dword of a PTI message */
++
++/**
++ * pti_write_to_aperture() - THE private write function to PTI HW.
++ * @mc: The 'aperture'. It's part of a write address that holds
++ * a master and channel ID.
++ * @buf: Data being written to the HW that will ultimately be seen
++ * in a debugging tool (Fido, Lauterbach).
++ * @len: Size of buffer.
++ *
++ * Since each aperture is specified by a unique
++ * master/channel ID, no two processes will be writing
++ * to the same aperture at the same time so no lock is required. The
++ * PTI-Output agent will send these out in the order that they arrived, and
++ * thus, it will intermix these messages. The debug tool can then later
++ * regroup the appropriate message segments together reconstituting each
++ * message.
++ */
++static void pti_write_to_aperture(struct masterchannel *mc, u8 *buf, int len)
++{
++ int dwordcnt, final, i;
++ union {
++ u32 val;
++ u8 c[4];
++ } ptiword;
++ u8 *p;
++ u32 __iomem *aperture;
++
++ p = buf;
++
++ /*
++ calculate the aperture offset from the base using the master and
++ channel id's.
++ */
++ aperture = drv_data->pti_ioaddr + (mc->master << 15)
++ + (mc->channel << 8);
++
++ dwordcnt = len >> 2;
++ final = len - (dwordcnt << 2); /* final = trailing bytes */
++ if (final == 0 && dwordcnt != 0) { /* always have a final dword */
++ final += 4;
++ dwordcnt--;
++ }
++
++ /*
++ FIXME: This algorithm builds the dword from the input buffer.
++ This algorithm does work correctly with the PTI HW
++ and Fido debugging HW. However, this got flagged in upstream
++ review not conforming to proper endian practices.
++ u32 ptiword = cpu_to_le32(*(u32 *)p);
++ was tried but was incorrect endianess. Then the Fido
++ HW used to test this code broke. The goal is to submit
++ something known to work and then fix this when it can be tested.
++ */
++ for (i = 0; i < dwordcnt; i++) {
++ ptiword.c[3] = *p++;
++ ptiword.c[2] = *p++;
++ ptiword.c[1] = *p++;
++ ptiword.c[0] = *p++;
++ pr_debug("%s(%d): PTI aperture: master(%d), channel(%d)\n",
++ __func__, __LINE__, mc->master, mc->channel);
++ pr_debug("%s(%d): PTI double word: %#x\n\n",
++ __func__, __LINE__, ptiword.val);
++ iowrite32(ptiword.val, aperture);
++ }
++
++ aperture += DTS; /* adding DTS signals that is EOM */
++ ptiword.val = 0;
++ /*
++ FIXME: This has the same issue as stated in other FIXME.
++ u32 ptiword |= *p++ << (8 * i); was tried and had the
++ same character-swapping endianess problem.
++ */
++ for (i = 0; i < final; i++)
++ ptiword.c[3-i] = *p++;
++
++ pr_debug("%s(%d): PTI aperture: master(%d), channel(%d)\n",
++ __func__, __LINE__, mc->master, mc->channel);
++ pr_debug("%s(%d): Final PTI double word: %#x\n\n",
++ __func__, __LINE__, ptiword.val);
++ iowrite32(ptiword.val, aperture);
++
++ return;
++}
++
++/**
++ * getID(): Allocate a master and channel ID.
++ *
++ * @IDarray:
++ * @max_IDS: The max amount of available write IDs to use.
++ * @baseID: The starting SW channel ID, based on the Intel
++ * PTI arch.
++ *
++ * @return: masterchannel struct containing master, channel ID address,
++ * or 0 for error.
++ *
++ * Each bit in the arrays IA_App and IA_OS correspond to a master and
++ * channel id. The bit is one if the id is taken and 0 if free. For
++ * every master there are 128 channel id's.
++ */
++static struct masterchannel *getID(u8 *IDarray, int max_IDS, int baseID)
++{
++ struct masterchannel *mc;
++ int i, j, mask;
++
++ mc = kmalloc(sizeof(struct masterchannel), GFP_KERNEL);
++ if (mc == NULL)
++ return 0;
++
++ /* look for a byte with a free bit */
++ for (i = 0; i < max_IDS; i++)
++ if (IDarray[i] != 0xff)
++ break;
++ if (i == max_IDS)
++ return 0;
++ /* find the bit */
++ mask = 0x80;
++ for (j = 0; j < 8; j++) {
++ if ((IDarray[i] & mask) == 0)
++ break;
++ mask >>= 1;
++ }
++
++ /* grab it */
++ IDarray[i] |= mask;
++ mc->master = (i>>4)+baseID;
++ mc->channel = ((i & 0xf)<<3) + j;
++ return mc;
++}
++
++/*
++ The following three functions:
++ mipi_request_mastercahannel(), mipi_release masterchannel()
++ and mipi_write_data() are an API for other kernel drivers to
++ access PTI.
++*/
++
++/**
++ * mipi_request_masterchannel() - Kernel API function used to allocate
++ * a master, channel ID address to write to
++ * PTI HW.
++ * @type: 0- request Application master, channel aperture ID write address.
++ * 1- request OS master, channel aperture ID write address.
++ * Other values, error.
++ * @return: masterchannel struct or 0 for error.
++ *
++ */
++struct masterchannel *mipi_request_masterchannel(u8 type)
++{
++ struct masterchannel *mc;
++
++ mutex_lock(&alloclock);
++
++ switch (type) {
++
++ case 0:
++ mc = getID(drv_data->IA_App, MAX_APP_IDS, APP_BASE_ID);
++ break;
++
++ case 1:
++ mc = getID(drv_data->IA_OS, MAX_OS_IDS, OS_BASE_ID);
++ break;
++
++ default:
++ mutex_unlock(&alloclock);
++ return 0;
++ }
++
++ mutex_unlock(&alloclock);
++ return mc;
++}
++EXPORT_SYMBOL(mipi_request_masterchannel);
++
++/**
++ * mipi_release_masterchannel() - Kernel API function used to release
++ * a master, channel ID address
++ * used to write to PTI HW.
++ * @mc: master, channel apeture ID address to be released.
++ *
++ */
++void mipi_release_masterchannel(struct masterchannel *mc)
++{
++ u8 master, channel, i;
++ if (mc) {
++ master = mc->master;
++ channel = mc->channel;
++
++ if (master >= 80) {
++ i = ((master-80) << 4) + (channel>>3);
++ drv_data->IA_App[i] &= ~(0x80>>(channel & 0x7));
++ }
++ if (master >= 72) {
++ i = ((master-72) << 4) + (channel>>3);
++ drv_data->IA_OS[i] &= ~(0x80>>(channel & 0x7));
++ }
++
++ kfree(mc);
++ }
++}
++EXPORT_SYMBOL(mipi_release_masterchannel);
++
++/**
++ * mipi_pti_writedata() - Kernel API function used to write trace
++ * debugging data to PTI HW.
++ *
++ * @mc: Master, channel aperture ID address to write to.
++ * Null value will return with no write occurring.
++ * @buf: Trace debuging data to write to the PTI HW.
++ * Null value will return with no write occurring.
++ * @count: Size of buf. Value of 0 or a negative number will
++ * retrn with no write occuring.
++ */
++void mipi_pti_writedata(struct masterchannel *mc, u8 *buf, int count)
++{
++ /*
++ since this function is exported, this is treated like an
++ API function, thus, all parameters should
++ be checked for validity.
++ */
++ if ((mc != NULL) && (buf != NULL) && (count > 0)) {
++ pti_write_to_aperture(mc, buf, count);
++ pr_debug("%s(%d): buf: %s, len: %d\n", __func__, __LINE__,
++ buf, count);
++ }
++ return;
++}
++EXPORT_SYMBOL(mipi_pti_writedata);
++
++static const struct tty_port_operations tty_port_ops = {
++};
++
++static void __devexit pti_pci_remove(struct pci_dev *pdev)
++{
++ struct pti_dev *drv_data;
++
++ drv_data = pci_get_drvdata(pdev);
++ if (drv_data != NULL) {
++ pci_iounmap(pdev, drv_data->pti_ioaddr);
++ pci_set_drvdata(pdev, NULL);
++ kfree(drv_data);
++ pci_release_region(pdev, 0);
++ pci_disable_device(pdev);
++ }
++}
++
++/*
++ for the tty_driver_*() basic function descriptions, see tty_driver.h.
++ Specific header comments made for PTI-related specifics.
++*/
++
++/**
++ * pti_tty_driver_open()- Open an Application master, channel aperture
++ * ID to the PTI device via tty device.
++ *
++ * @param tty: tty interface.
++ * @param filp: filp interface pased to tty_port_open() call.
++ *
++ * @return int : Success = 0, otherwise fail.
++ *
++ * The main purpose of using the tty device interface is to route
++ * syslog daemon messages to the PTI HW and out of the handheld platform
++ * and to the Fido/Lauterbach device.
++ */
++static int pti_tty_driver_open(struct tty_struct *tty, struct file *filp)
++{
++ struct pti_tty *pti_tty_data;
++ struct masterchannel *mc;
++ int ret = 0;
++
++ pr_debug("%s %s(%d): Called.\n", __FILE__, __func__, __LINE__);
++
++ /*
++ we actually want to allocate a new channel per open, per
++ system arch. HW gives more than plenty channels for a single
++ system task to have its own channel to write trace data. This
++ also removes a locking requirement for the actual write
++ procedure.
++ */
++ ret = tty_port_open(&drv_data->port, tty, filp);
++ pti_tty_data = tty->driver_data;
++ mc = mipi_request_masterchannel(0);
++ pti_tty_data->mc = mc;
++
++ return ret;
++}
++
++/**
++ * pti_tty_driver_close()- close tty device and release Application
++ * master, channel aperture ID to the PTI device via tty device.
++ *
++ * @param tty: tty interface.
++ * @param filp: filp interface pased to tty_port_close() call.
++ *
++ * The main purpose of using the tty device interface is to route
++ * syslog daemon messages to the PTI HW and out of the handheld platform
++ * and to the Fido/Lauterbach device.
++ */
++static void pti_tty_driver_close(struct tty_struct *tty, struct file *filp)
++{
++ struct pti_tty *pti_tty_data;
++ struct masterchannel *mc;
++
++ pr_debug("%s(%d): Called.\n", __func__, __LINE__);
++
++ pti_tty_data = tty->driver_data;
++ if (pti_tty_data != NULL) {
++ mc = pti_tty_data->mc;
++ mipi_release_masterchannel(mc);
++ }
++
++ tty_port_close(&drv_data->port, tty, filp);
++
++ return;
++}
++
++static int pti_tty_install(struct tty_driver *driver, struct tty_struct *tty)
++{
++ int idx = tty->index;
++ struct pti_tty *pti_tty_data;
++
++ int ret = tty_init_termios(tty);
++
++ if (ret == 0) {
++ tty_driver_kref_get(driver);
++ tty->count++;
++ driver->ttys[idx] = tty;
++
++ pti_tty_data = kmalloc(sizeof(struct pti_tty), GFP_KERNEL);
++ if (pti_tty_data == NULL)
++ return -ENOMEM;
++
++ tty->driver_data = pti_tty_data;
++ }
++
++ return ret;
++}
++
++static void pti_tty_cleanup(struct tty_struct *tty)
++{
++ struct pti_tty *pti_tty_data;
++
++ pti_tty_data = tty->driver_data;
++ if (pti_tty_data != NULL)
++ kfree(pti_tty_data);
++
++ tty->driver_data = NULL;
++}
++
++/**
++ * pti_tty_driver_write(): Write trace debugging data through the char
++ * interface to the PTI HW. Part of the misc device implementation.
++ *
++ * @param filp: Contains private data which is used to obtain
++ * master, channel write ID.
++ * @param data: trace data to be written.
++ * @param len: # of byte to write.
++ * @param ppose: Not used in this function implementation.
++ * @return int : # of bytes written, or error. -EMSGSIZE is
++ * returned if length is beyond 8k.
++ */
++int pti_tty_driver_write(struct tty_struct *tty,
++ const unsigned char *buf, int len)
++{
++ struct masterchannel *mc;
++ struct pti_tty *pti_tty_data;
++
++ pr_debug("%s(%d): buf: %s, len: %d\n", __func__, __LINE__, buf, len);
++
++ pti_tty_data = tty->driver_data;
++ mc = pti_tty_data->mc;
++ pti_write_to_aperture(mc, (u8 *)buf, len);
++
++ return len;
++}
++
++int pti_tty_write_room(struct tty_struct *tty)
++{
++ return 2048;
++}
++
++/**
++ * pti_char_open()- Open an Application master, channel aperture
++ * ID to the PTI device. Part of the misc device implementation.
++ *
++ * @param inode: not used.
++ * @param filp: Output- will have a masterchannel struct set containing
++ * the allocated application PTI aperture write address.
++ *
++ * @return int : Success = 0, otherwise fail. As of right now,
++ * it is not sure what needs to really be initialized
++ * for open(), so it always returns 0.
++ */
++int pti_char_open(struct inode *inode, struct file *filp)
++{
++ struct masterchannel *mc;
++
++ mc = mipi_request_masterchannel(0);
++ if (mc == NULL)
++ return -ENOMEM;
++ filp->private_data = mc;
++ return 0;
++}
++
++/**
++ * pti_char_release()- Close a char channel to the PTI device. Part
++ * of the misc device implementation.
++ *
++ * @param inode: Not used in this implementaiton.
++ * @param filp: Contains private_data that contains the master, channel
++ * ID to be released by the PTI device.
++ *
++ * @return int : Success = 0
++ */
++int pti_char_release(struct inode *inode, struct file *filp)
++{
++ mipi_release_masterchannel(filp->private_data);
++
++ return 0;
++}
++
++/**
++ * pti_char_write(): Write trace debugging data through the char
++ * interface to the PTI HW. Part of the misc device implementation.
++ *
++ * @param filp: Contains private data which is used to obtain
++ * master, channel write ID.
++ * @param data: trace data to be written.
++ * @param len: # of byte to write.
++ * @param ppose: Not used in this function implementation.
++ * @return int : # of bytes written, or error. -EMSGSIZE is
++ * returned if length is beyond 8k.
++ */
++ssize_t pti_char_write(struct file *filp, const char *data, size_t len,
++ loff_t *ppose)
++{
++ int retval;
++
++ struct masterchannel *mc;
++ void *kbuf;
++
++ /*
++ adding a limit on the size of the buffer, since this
++ is a value that can be passed in by a user and we want to
++ minimize the chance of crashing alloc. Returning
++ EMSGSIZE actually seems to be the best error code
++ for a user to figure out what happened.
++ */
++ if (len > 8192)
++ return -EMSGSIZE;
++
++ mc = filp->private_data;
++
++ kbuf = kmalloc(len, GFP_KERNEL);
++ if (kbuf == NULL)
++ return 0;
++ retval = copy_from_user(kbuf, data, len);
++ if (retval) {
++ kfree(kbuf);
++ return -EFAULT;
++ }
++
++ pr_debug("%s(%d): buf: %s, len: %d\n", __func__, __LINE__, data, len);
++ pti_write_to_aperture(mc, kbuf, len);
++ kfree(kbuf);
++ kbuf = 0;
++
++ return len;
++}
++
++const struct tty_operations pti_tty_driver_ops = {
++ .open = pti_tty_driver_open,
++ .close = pti_tty_driver_close,
++ .write = pti_tty_driver_write,
++ .write_room = pti_tty_write_room,
++ .install = pti_tty_install,
++ .cleanup = pti_tty_cleanup
++};
++
++const struct file_operations pti_char_driver_ops = {
++ .owner = THIS_MODULE,
++ .open = pti_char_open,
++ .release = pti_char_release,
++ .write = pti_char_write,
++};
++
++static struct miscdevice pti_char_driver = {
++ .minor = MISC_DYNAMIC_MINOR,
++ .name = CHARNAME,
++ .fops = &pti_char_driver_ops
++};
++
++/*
++ Note the _probe() call sets everything up and ties the char and tty
++ to successfully detecting the PTI device on the pci bus.
++*/
++
++static int __devinit pti_pci_probe(struct pci_dev *pdev,
++ const struct pci_device_id *ent)
++{
++ int retval = -EINVAL;
++ int pci_bar = 1;
++
++ dev_dbg(&pdev->dev, "%s %s(%d): PTI PCI ID %04x:%04x\n", __FILE__,
++ __func__, __LINE__, pdev->vendor, pdev->device);
++
++ retval = pci_enable_device(pdev);
++ if (retval != 0) {
++ dev_err(&pdev->dev,
++ "%s: pci_enable_device() returned error %d\n",
++ __func__, retval);
++ return retval;
++ }
++
++ drv_data = kzalloc(sizeof(*drv_data), GFP_KERNEL);
++
++ if (drv_data == NULL) {
++ retval = -ENOMEM;
++ dev_err(&pdev->dev,
++ "%s(%d): kmalloc() returned NULL memory.\n",
++ __func__, __LINE__);
++ return retval;
++ }
++ drv_data->pti_addr = pci_resource_start(pdev, pci_bar);
++
++ retval = pci_request_region(pdev, pci_bar, dev_name(&pdev->dev));
++ if (retval != 0) {
++ dev_err(&pdev->dev,
++ "%s(%d): pci_request_region() returned error %d\n",
++ __func__, __LINE__, retval);
++ kfree(drv_data);
++ return retval;
++ }
++ drv_data->pti_iolen = pci_resource_len(pdev, pci_bar);
++ drv_data->aperture_base = drv_data->pti_addr+APERTURE_14;
++ drv_data->pti_ioaddr =
++ ioremap_nocache((u32)drv_data->aperture_base,
++ APERTURE_LEN);
++ if (!drv_data->pti_ioaddr) {
++ pci_release_region(pdev, pci_bar);
++ retval = -ENOMEM;
++ kfree(drv_data);
++ return retval;
++ }
++
++ pci_set_drvdata(pdev, drv_data);
++
++ tty_port_init(&drv_data->port);
++ drv_data->port.ops = &tty_port_ops;
++
++ tty_register_device(pti_tty_driver, 0, NULL);
++
++ retval = misc_register(&pti_char_driver);
++ if (retval) {
++ pr_err("%s(%d): CHAR registration failed of pti driver\n",
++ __func__, __LINE__);
++ pr_err("%s(%d): Error value returned: %d\n",
++ __func__, __LINE__, retval);
++ return retval;
++ }
++
++ return retval;
++}
++
++static struct pci_driver pti_pci_driver = {
++ .name = PCINAME,
++ .id_table = pci_ids,
++ .probe = pti_pci_probe,
++ .remove = pti_pci_remove,
++};
++
++/**
++ *
++ * pti_init():
++ *
++ * @return int __init: 0 for success, any other value error.
++ *
++ */
++static int __init pti_init(void)
++{
++ int retval = -EINVAL;
++
++ /* First register module as tty device */
++
++ pti_tty_driver = alloc_tty_driver(1);
++ if (pti_tty_driver == NULL) {
++ pr_err("%s(%d): Memory allocation failed for ptiTTY driver\n",
++ __func__, __LINE__);
++ return -ENOMEM;
++ }
++
++ pti_tty_driver->owner = THIS_MODULE;
++ pti_tty_driver->magic = TTY_DRIVER_MAGIC;
++ pti_tty_driver->driver_name = DRIVERNAME;
++ pti_tty_driver->name = TTYNAME;
++ pti_tty_driver->major = 0;
++ pti_tty_driver->minor_start = 0;
++ pti_tty_driver->minor_num = 1;
++ pti_tty_driver->num = 1;
++ pti_tty_driver->type = TTY_DRIVER_TYPE_SYSTEM;
++ pti_tty_driver->subtype = SYSTEM_TYPE_SYSCONS;
++ pti_tty_driver->flags = TTY_DRIVER_REAL_RAW |
++ TTY_DRIVER_DYNAMIC_DEV;
++ pti_tty_driver->init_termios = tty_std_termios;
++
++ tty_set_operations(pti_tty_driver, &pti_tty_driver_ops);
++
++ retval = tty_register_driver(pti_tty_driver);
++ if (retval) {
++ pr_err("%s(%d): TTY registration failed of pti driver\n",
++ __func__, __LINE__);
++ pr_err("%s(%d): Error value returned: %d\n",
++ __func__, __LINE__, retval);
++
++ pti_tty_driver = 0;
++ return retval;
++ }
++
++ retval = pci_register_driver(&pti_pci_driver);
++
++ if (retval) {
++ pr_err("%s(%d): PCI registration failed of pti driver\n",
++ __func__, __LINE__);
++ pr_err("%s(%d): Error value returned: %d\n",
++ __func__, __LINE__, retval);
++
++ tty_unregister_driver(pti_tty_driver);
++ pr_err("%s(%d): Unregistering TTY part of pti driver\n",
++ __func__, __LINE__);
++ pti_tty_driver = 0;
++ return retval;
++ }
++
++ return retval;
++}
++
++/**
++ * pti_exit(): Unregisters this module as a tty and pci driver.
++ */
++static void __exit pti_exit(void)
++{
++ int retval;
++
++ /* If some thead is hanging onto the alloclock, force it to release
++ * it because we are shutting down.
++ */
++ if (mutex_is_locked(&alloclock) == 1)
++ mutex_unlock(&alloclock);
++ mutex_destroy(&alloclock);
++
++ retval = misc_deregister(&pti_char_driver);
++ if (retval) {
++ pr_err("%s(%d): CHAR unregistration failed of pti driver\n",
++ __func__, __LINE__);
++ pr_err("%s(%d): Error value returned: %d\n",
++ __func__, __LINE__, retval);
++ }
++
++ tty_unregister_device(pti_tty_driver, 0);
++
++ retval = tty_unregister_driver(pti_tty_driver);
++ if (retval) {
++ pr_err("%s(%d): TTY unregistration failed of pti driver\n",
++ __func__, __LINE__);
++ pr_err("%s(%d): Error value returned: %d\n",
++ __func__, __LINE__, retval);
++ }
++
++ kfree(drv_data);
++
++ return;
++}
++
++module_init(pti_init);
++module_exit(pti_exit);
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Ken Mills, Jay Freyensee");
++MODULE_DESCRIPTION("PTI Driver");
++
+--- a/drivers/mmc/card/block.c
++++ b/drivers/mmc/card/block.c
+@@ -535,7 +535,8 @@
+ * messages to tell when the card is present.
+ */
+
+- sprintf(md->disk->disk_name, "mmcblk%d", devidx);
++ snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
++ "mmcblk%d", devidx);
+
+ blk_queue_logical_block_size(md->queue.queue, 512);
+
+--- a/drivers/mmc/card/queue.c
++++ b/drivers/mmc/card/queue.c
+@@ -42,6 +42,29 @@
+ return BLKPREP_OK;
+ }
+
++static void acquire_ownership(struct mmc_queue *mq)
++{
++ struct mmc_card *card = mq->card;
++ struct mmc_host *host = card->host;
++
++ mmc_claim_host(host);
++ if (host->ops->acquire_ownership)
++ host->ops->acquire_ownership(host);
++ mmc_release_host(host);
++}
++
++static void release_ownership(struct mmc_queue *mq)
++{
++ struct mmc_card *card = mq->card;
++ struct mmc_host *host = card->host;
++
++ mmc_claim_host(host);
++ if (host->ops->release_ownership)
++ host->ops->release_ownership(host);
++ mmc_release_host(host);
++}
++
++
+ static int mmc_queue_thread(void *d)
+ {
+ struct mmc_queue *mq = d;
+@@ -66,12 +89,14 @@
+ break;
+ }
+ up(&mq->thread_sem);
++ release_ownership(mq);
+ schedule();
+ down(&mq->thread_sem);
+ continue;
+ }
+- set_current_state(TASK_RUNNING);
+
++ set_current_state(TASK_RUNNING);
++ acquire_ownership(mq);
+ mq->issue_fn(mq, req);
+ } while (1);
+ up(&mq->thread_sem);
+--- a/drivers/mmc/core/core.c
++++ b/drivers/mmc/core/core.c
+@@ -213,9 +213,15 @@
+ mrq->done_data = &complete;
+ mrq->done = mmc_wait_done;
+
++ if (host->port_mutex)
++ mutex_lock(host->port_mutex);
++
+ mmc_start_request(host, mrq);
+
+ wait_for_completion(&complete);
++
++ if (host->port_mutex)
++ mutex_unlock(host->port_mutex);
+ }
+
+ EXPORT_SYMBOL(mmc_wait_for_req);
+@@ -912,7 +918,15 @@
+ "identification mode\n", mmc_hostname(host));
+ host->ios.clock = host->f_min;
+ } else
+- host->ios.clock = 400000;
++ /*
++ * according to mmca 4.4 or sd 2.0 spec, the clock frequency of
++ * identification mode can be less than 400KHz.
++ *
++ * lower down the clock frequency to 200KHz because some
++ * e.MMC devices (e.g. Micron e.MMC) don't work in clock
++ * frequency above 200KHz.
++ */
++ host->ios.clock = 200000;
+
+ host->ios.power_mode = MMC_POWER_ON;
+ mmc_set_ios(host);
+@@ -1058,6 +1072,9 @@
+ u32 ocr;
+ int err;
+
++ if (host->ops->acquire_ownership)
++ host->ops->acquire_ownership(host);
++
+ mmc_bus_get(host);
+
+ /* if there is a card registered, check whether it is still present */
+@@ -1130,6 +1147,9 @@
+ out:
+ if (host->caps & MMC_CAP_NEEDS_POLL)
+ mmc_schedule_delayed_work(&host->detect, HZ);
++
++ if (host->ops->release_ownership)
++ host->ops->release_ownership(host);
+ }
+
+ void mmc_start_host(struct mmc_host *host)
+--- a/drivers/mmc/core/mmc.c
++++ b/drivers/mmc/core/mmc.c
+@@ -294,6 +294,28 @@
+ };
+
+ /*
++ * Distinguish the fake MMCA4 MMC card.
++ *
++ * Transcend 2GB MMC card is a kind of MMCA3.31 MMC card.
++ * However, it makes up itself as a MMCA4 one via SPEC_VERS
++ * field of its CSD register. Once it's treated as MMCA4 by
++ * driver, 4 bit bus is activated which leads to data error.
++ */
++static bool fake_mmca4_card(struct mmc_card *card)
++{
++ if (card->cid.manfid == 0x1e &&
++ card->cid.oemid == 0xffff &&
++ card->cid.prod_name[0] == 'M' &&
++ card->cid.prod_name[1] == 'M' &&
++ card->cid.prod_name[2] == 'C' &&
++ card->cid.month == 9 &&
++ card->cid.year == 2008)
++ return true;
++ else
++ return false;
++}
++
++/*
+ * Handle the detection and initialisation of a card.
+ *
+ * In the case of a resume, "oldcard" will contain the card
+@@ -398,6 +420,12 @@
+ err = mmc_select_card(card);
+ if (err)
+ goto free_card;
++
++ /*
++ * Get card's true specification version
++ */
++ if (fake_mmca4_card(card))
++ card->csd.mmca_vsn = CSD_SPEC_VER_3;
+ }
+
+ if (!oldcard) {
+--- a/drivers/mmc/host/sdhci-pci.c
++++ b/drivers/mmc/host/sdhci-pci.c
+@@ -22,9 +22,12 @@
+
+ #include <asm/scatterlist.h>
+ #include <asm/io.h>
++#include <asm/intel_scu_ipc.h>
+
+ #include "sdhci.h"
+
++#define DRIVER_VERSION "June 21, 2010"
++
+ /*
+ * PCI registers
+ */
+@@ -39,11 +42,13 @@
+
+ #define MAX_SLOTS 8
+
++static DEFINE_MUTEX(port_mutex);
++
+ struct sdhci_pci_chip;
+ struct sdhci_pci_slot;
+
+ struct sdhci_pci_fixes {
+- unsigned int quirks;
++ u64 quirks;
+
+ int (*probe)(struct sdhci_pci_chip*);
+
+@@ -65,7 +70,7 @@
+ struct sdhci_pci_chip {
+ struct pci_dev *pdev;
+
+- unsigned int quirks;
++ u64 quirks;
+ const struct sdhci_pci_fixes *fixes;
+
+ int num_slots; /* Slots on controller */
+@@ -364,6 +369,99 @@
+ .probe = via_probe,
+ };
+
++static int single_slot(struct sdhci_pci_chip *chip)
++{
++ chip->num_slots = 1;
++ return 0;
++}
++
++/*
++ * ADMA operation is disabled for Moorestown platform due to
++ * hardware bugs.
++ */
++static const struct sdhci_pci_fixes sdhci_intel_mrst_hc0 = {
++ .quirks = SDHCI_QUIRK_BROKEN_ADMA |
++ SDHCI_QUIRK_SERIALIZE |
++ SDHCI_QUIRK_BROKEN_RESETALL |
++ SDHCI_QUIRK_FORCE_FULL_SPEED_MODE,
++};
++
++static const struct sdhci_pci_fixes sdhci_intel_mrst_hc1 = {
++ .quirks = SDHCI_QUIRK_BROKEN_ADMA |
++ SDHCI_QUIRK_BROKEN_RESETALL |
++ SDHCI_QUIRK_FORCE_FULL_SPEED_MODE,
++ .probe = single_slot
++};
++
++/*
++ * Get the base address in shared SRAM for eMMC mutex
++ * (Dekker's algorithm) through IPC call.
++ *
++ * Please note it'll always return 0 whether the address requesting
++ * success or not. So, the mmc driver will still work well if the scu
++ * firmware is not ready yet.
++*/
++static int mfld_sdio3_probe_slot(struct sdhci_pci_slot *slot)
++{
++ u32 mutex_base_addr = 0;
++ int ret = -EIO; /* Assume IPC call fails */
++
++ /*
++ * Currently, the SCU firmware and interface in IPC driver is
++ * not ready yet. So just disable it by always set 'ret = -EIO' here.
++ * Will submit a patch to enable it once the SCU firmware and
++ * IPC driver interface is ready.
++ */
++ /* ret = intel_scu_ipc_get_emmc_mutex_addr(&mutex_base_addr); */
++ if (ret) {
++ dev_err(&slot->chip->pdev->dev, "IPC error: %d\n", ret);
++ slot->host->sram_addr = 0;
++ } else {
++ /* 3 housekeeping mutex variables, 12 bytes length */
++ slot->host->sram_addr = ioremap_nocache(mutex_base_addr, 16);
++ if (!slot->host->sram_addr) {
++ dev_err(&slot->chip->pdev->dev, "ioremap failed!\n");
++ } else {
++ dev_info(&slot->chip->pdev->dev, "mapped addr: %p\n",
++ slot->host->sram_addr);
++ dev_info(&slot->chip->pdev->dev, "current eMMC owner:"
++ " %d, IA req: %d, SCU req: %d\n",
++ readl(slot->host->sram_addr + DEKKER_EMMC_OWNER_OFFSET),
++ readl(slot->host->sram_addr + DEKKER_IA_REQ_OFFSET),
++ readl(slot->host->sram_addr + DEKKER_SCU_REQ_OFFSET));
++ }
++ }
++
++ return 0;
++}
++
++static void mfld_sdio3_remove_slot(struct sdhci_pci_slot *slot, int dead)
++{
++ if (dead)
++ return;
++
++ if (slot->host->sram_addr)
++ iounmap(slot->host->sram_addr);
++}
++
++static const struct sdhci_pci_fixes sdhci_intel_mfd_sd = {
++ .quirks = SDHCI_QUIRK_MFD_SD_RESTRICTION |
++ SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
++};
++
++static const struct sdhci_pci_fixes sdhci_intel_mfd_emmc_sdio = {
++ .quirks = SDHCI_QUIRK_MFD_EMMC_SDIO_RESTRICTION |
++ SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
++};
++
++static const struct sdhci_pci_fixes sdhci_intel_mfd_sdio3 = {
++ .quirks = SDHCI_QUIRK_NEED_DEKKER_MUTEX |
++ SDHCI_QUIRK_MFD_EMMC_SDIO_RESTRICTION |
++ SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
++ .probe_slot = mfld_sdio3_probe_slot,
++ .remove_slot = mfld_sdio3_remove_slot,
++};
++
+ static const struct pci_device_id pci_ids[] __devinitdata = {
+ {
+ .vendor = PCI_VENDOR_ID_RICOH,
+@@ -445,6 +543,62 @@
+ .driver_data = (kernel_ulong_t)&sdhci_via,
+ },
+
++ {
++ .vendor = PCI_VENDOR_ID_INTEL,
++ .device = PCI_DEVICE_ID_INTEL_MRST_SD0,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .driver_data = (kernel_ulong_t)&sdhci_intel_mrst_hc0,
++ },
++
++ {
++ .vendor = PCI_VENDOR_ID_INTEL,
++ .device = PCI_DEVICE_ID_INTEL_MRST_SD1,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .driver_data = (kernel_ulong_t)&sdhci_intel_mrst_hc1,
++ },
++
++ {
++ .vendor = PCI_VENDOR_ID_INTEL,
++ .device = PCI_DEVICE_ID_INTEL_MFD_SD,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_sd,
++ },
++
++ {
++ .vendor = PCI_VENDOR_ID_INTEL,
++ .device = PCI_DEVICE_ID_INTEL_MFD_SDIO1,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc_sdio,
++ },
++
++ {
++ .vendor = PCI_VENDOR_ID_INTEL,
++ .device = PCI_DEVICE_ID_INTEL_MFD_SDIO2,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc_sdio,
++ },
++
++ {
++ .vendor = PCI_VENDOR_ID_INTEL,
++ .device = PCI_DEVICE_ID_INTEL_MFD_EMMC0,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_sdio3,
++ },
++
++ {
++ .vendor = PCI_VENDOR_ID_INTEL,
++ .device = PCI_DEVICE_ID_INTEL_MFD_EMMC1,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc_sdio,
++ },
++
+ { /* Generic SD host controller */
+ PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00)
+ },
+@@ -600,9 +754,7 @@
+ {
+ struct sdhci_pci_slot *slot;
+ struct sdhci_host *host;
+-
+ resource_size_t addr;
+-
+ int ret;
+
+ if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
+@@ -643,6 +795,9 @@
+
+ host->irq = pdev->irq;
+
++ if (host->quirks & SDHCI_QUIRK_SERIALIZE)
++ host->mmc->port_mutex = &port_mutex;
++
+ ret = pci_request_region(pdev, bar, mmc_hostname(host->mmc));
+ if (ret) {
+ dev_err(&pdev->dev, "cannot request region\n");
+@@ -728,6 +883,7 @@
+ return ret;
+
+ slots = PCI_SLOT_INFO_SLOTS(slots) + 1;
++
+ dev_dbg(&pdev->dev, "found %d slot(s)\n", slots);
+ if (slots == 0)
+ return -ENODEV;
+@@ -769,7 +925,7 @@
+ goto free;
+ }
+
+- for (i = 0;i < slots;i++) {
++ for (i = 0;i < chip->num_slots;i++) {
+ slot = sdhci_pci_probe_slot(pdev, chip, first_bar + i);
+ if (IS_ERR(slot)) {
+ for (i--;i >= 0;i--)
+@@ -840,4 +996,5 @@
+
+ MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
+ MODULE_DESCRIPTION("Secure Digital Host Controller Interface PCI driver");
++MODULE_VERSION(DRIVER_VERSION);
+ MODULE_LICENSE("GPL");
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -27,6 +27,12 @@
+ #include "sdhci.h"
+
+ #define DRIVER_NAME "sdhci"
++#define DRIVER_VERSION "June 21, 2010"
++
++static int sdhci_wait_time = 10;
++module_param_named(wait_time, sdhci_wait_time, int, 0644);
++MODULE_PARM_DESC(sdhci_wait_time,
++ "Time (milliseconds) to wait for clock reset.");
+
+ #define DBG(f, x...) \
+ pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
+@@ -162,9 +168,11 @@
+ /* hw clears the bit when it's done */
+ while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
+ if (timeout == 0) {
+- printk(KERN_ERR "%s: Reset 0x%x never completed.\n",
+- mmc_hostname(host->mmc), (int)mask);
+- sdhci_dumpregs(host);
++ if (!(host->quirks & SDHCI_QUIRK_BROKEN_RESETALL)) {
++ printk(KERN_ERR "%s: Reset 0x%x never completed.\n",
++ mmc_hostname(host->mmc), (int)mask);
++ sdhci_dumpregs(host);
++ }
+ return;
+ }
+ timeout--;
+@@ -179,11 +187,21 @@
+
+ static void sdhci_init(struct sdhci_host *host, int soft)
+ {
+- if (soft)
+- sdhci_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA);
+- else
+- sdhci_reset(host, SDHCI_RESET_ALL);
++ u32 intmask;
+
++ intmask = sdhci_readl(host, SDHCI_INT_STATUS);
++ sdhci_writel(host,
++ intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE),
++ SDHCI_INT_STATUS);
++
++ if (!(host->quirks & SDHCI_QUIRK_BROKEN_RESETALL)) {
++ if (soft)
++ sdhci_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA);
++ else
++ sdhci_reset(host, SDHCI_RESET_ALL);
++ }
++ sdhci_writel(host, 0, SDHCI_INT_ENABLE);
++ sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
+ sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK,
+ SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
+ SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_INDEX |
+@@ -195,6 +213,9 @@
+ host->clock = 0;
+ sdhci_set_ios(host->mmc, &host->mmc->ios);
+ }
++
++ /* disable wakeup signal during initialization */
++ sdhci_writeb(host, 0x0, SDHCI_WAKE_UP_CONTROL);
+ }
+
+ static void sdhci_reinit(struct sdhci_host *host)
+@@ -625,11 +646,8 @@
+ break;
+ }
+
+- if (count >= 0xF) {
+- printk(KERN_WARNING "%s: Too large timeout requested!\n",
+- mmc_hostname(host->mmc));
++ if (count >= 0xF)
+ count = 0xE;
+- }
+
+ return count;
+ }
+@@ -873,6 +891,36 @@
+ tasklet_schedule(&host->finish_tasklet);
+ }
+
++/*
++ * HW problem exists in LNW A3 so clock register has to be set
++ * for every command if both SDIO0 and SDIO1 are enabled.
++ */
++static void sdhci_clock_reset(struct sdhci_host *host)
++{
++ u16 clk;
++ unsigned long timeout;
++
++ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
++
++ clk |= SDHCI_CLOCK_CARD_EN;
++ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
++
++ /* Wait max 10 ms */
++ timeout = sdhci_wait_time;
++ while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
++ & SDHCI_CLOCK_INT_STABLE)) {
++ if (timeout == 0) {
++ printk(KERN_ERR "%s: Internal clock never "
++ "stabilised.\n",
++ mmc_hostname(host->mmc));
++ sdhci_dumpregs(host);
++ return;
++ }
++ timeout--;
++ mdelay(1);
++ }
++}
++
+ static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
+ {
+ int flags;
+@@ -940,6 +988,9 @@
+ if (cmd->data)
+ flags |= SDHCI_CMD_DATA;
+
++ if (host->quirks & SDHCI_QUIRK_SERIALIZE)
++ sdhci_clock_reset(host);
++
+ sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
+ }
+
+@@ -1159,16 +1210,36 @@
+
+ ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+
+- if (ios->bus_width == MMC_BUS_WIDTH_4)
++ if (ios->bus_width == MMC_BUS_WIDTH_8) {
++ ctrl |= SDHCI_CTRL_8BITBUS;
++ ctrl &= ~SDHCI_CTRL_4BITBUS;
++ } else if (ios->bus_width == MMC_BUS_WIDTH_4) {
++ ctrl &= ~SDHCI_CTRL_8BITBUS;
+ ctrl |= SDHCI_CTRL_4BITBUS;
+- else
++ } else {
++ ctrl &= ~SDHCI_CTRL_8BITBUS;
+ ctrl &= ~SDHCI_CTRL_4BITBUS;
++ }
+
+- if (ios->timing == MMC_TIMING_SD_HS)
++/*
++ * For LNW A3, HISPD bit has to be cleared in order to enable 50MHz clock
++ */
++ if (!(host->quirks & SDHCI_QUIRK_FORCE_FULL_SPEED_MODE) &&
++ (ios->timing == MMC_TIMING_SD_HS ||
++ ios->timing == MMC_TIMING_MMC_HS))
+ ctrl |= SDHCI_CTRL_HISPD;
+ else
+ ctrl &= ~SDHCI_CTRL_HISPD;
+
++ /*
++ * XXX: workaround for Medfield silicon sighting 1064454:
++ * clear High Speed Enable bit to cheat HW
++ * and set high speed clock frequency later
++ */
++ if ((host->quirks & SDHCI_QUIRK_MFD_EMMC_SDIO_RESTRICTION)
++ || (host->quirks & SDHCI_QUIRK_MFD_SD_RESTRICTION))
++ ctrl &= ~SDHCI_CTRL_HISPD;
++
+ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+
+ /*
+@@ -1228,11 +1299,147 @@
+ spin_unlock_irqrestore(&host->lock, flags);
+ }
+
++/*
++ * One of the Medfield eMMC controller (PCI device id 0x0823, SDIO3) is
++ * a shared resource used by the SCU and the IA processors. SCU primarily
++ * uses the eMMC host controller to access the eMMC device's Boot Partition,
++ * while the IA CPU uses the eMMC host controller to access the eMMC device's
++ * User Partition.
++ *
++ * After the SCU hands off the system to the IA processor, the IA processor assumes
++ * ownership to the eMMC host controller. Due to absence of any arbitration at the
++ * eMMC host controller, this could result in concurrent eMMC host accesses resulting in
++ * bus contention and garbage data ending up in either of the partitions.
++ *
++ * To circumvent this from happening, eMMC host controller locking mechanism
++ * is employed, where at any one given time, only one agent, SCU or IA, may be
++ * allowed to access the host. This is achieved by implementing Dekker's Algorithm
++ * (http://en.wikipedia.org/wiki/Dekker's_algorithm) between the two processors.
++ *
++ * Before handing off the system to the IA processor, SCU must set up three
++ * housekeeping mutex variables allocated in the shared SRAM as follows:
++ *
++ * eMMC_Owner = IA (SCU and IA processors - RW, 32bit)
++ * IA_Req = FALSE (IA -RW, SCU - RO, 32bit)
++ * SCU_Req = FALSE (IA - RO, SCU - R/W, 32bit)
++ *
++ * There is no hardware based access control to these variables and so code executing
++ * on SCU and IA processors must follow below access rules (Dekker's algorithm):
++ *
++ * -----------------------------------------
++ * SCU Processor Implementation
++ * -----------------------------------------
++ * SCU_Req = TRUE;
++ * while (IA_Req == TRUE) {
++ * if (eMMC_Owner != SCU){
++ * SCU_Req = FALSE;
++ * while (eMMC_Owner != SCU);
++ * SCU_Req = TRUE;
++ * }
++ * }
++ * // SCU now performs eMMC transactions here
++ * ...
++ * // When done, relinquish control to IA
++ * eMMC_Owner = IA;
++ * SCU_Req = FALSE;
++ *
++ * -----------------------------------------
++ * IA Processor Implementation
++ * -----------------------------------------
++ * IA_Req = TRUE;
++ * while (SCU_Req == TRUE) {
++ * if (eMMC_Owner != IA){
++ * IA_Req = FALSE;
++ * while (eMMC_Owner != IA);
++ * IA_Req = TRUE;
++ * }
++ * }
++ * //IA now performs eMMC transactions here
++ * ¡­
++ * //When done, relinquish control to SCU
++ * eMMC_Owner = SCU;
++ * IA_Req = FALSE;
++ *
++ * ----------------------------------------
++*/
++
++/* Implement the Dekker's algorithm on the IA process side */
++static int sdhci_acquire_ownership(struct mmc_host *mmc)
++{
++ struct sdhci_host *host;
++ unsigned long t1, t2;
++
++ host = mmc_priv(mmc);
++
++ if (!((host->quirks & SDHCI_QUIRK_NEED_DEKKER_MUTEX) && (host->sram_addr)))
++ return 0;
++
++ DBG("Acquire ownership - eMMC owner: %d, IA req: %d, SCU req: %d\n",
++ readl(host->sram_addr + DEKKER_EMMC_OWNER_OFFSET),
++ readl(host->sram_addr + DEKKER_IA_REQ_OFFSET),
++ readl(host->sram_addr + DEKKER_SCU_REQ_OFFSET));
++
++ writel(1, host->sram_addr + DEKKER_IA_REQ_OFFSET);
++
++ t1 = jiffies + 10 * HZ;
++ t2 = 500;
++
++ while (readl(host->sram_addr + DEKKER_SCU_REQ_OFFSET)) {
++ if (readl(host->sram_addr + DEKKER_EMMC_OWNER_OFFSET) != DEKKER_OWNER_IA) {
++ writel(0, host->sram_addr + DEKKER_IA_REQ_OFFSET);
++ while (t2) {
++ if (readl(host->sram_addr + DEKKER_EMMC_OWNER_OFFSET) == DEKKER_OWNER_IA)
++ break;
++ msleep(10);
++ t2--;
++ }
++ if (t2) {
++ writel(1, host->sram_addr + DEKKER_IA_REQ_OFFSET);
++ } else {
++ pr_err("eMMC mutex timeout (owner)!\n");
++ goto timeout;
++ }
++ }
++ if (time_after(jiffies, t1)) {
++ pr_err("eMMC mutex timeout (req)!\n");
++ goto timeout;
++ }
++ cpu_relax();
++ }
++
++ return 0;
++
++timeout:
++ writel(DEKKER_OWNER_SCU, host->sram_addr + DEKKER_EMMC_OWNER_OFFSET);
++ writel(0, host->sram_addr + DEKKER_IA_REQ_OFFSET);
++ return -EBUSY;
++}
++
++static void sdhci_release_ownership(struct mmc_host *mmc)
++{
++ struct sdhci_host *host;
++
++ host = mmc_priv(mmc);
++
++ if (!((host->quirks & SDHCI_QUIRK_NEED_DEKKER_MUTEX) && (host->sram_addr)))
++ return;
++
++ writel(DEKKER_OWNER_SCU, host->sram_addr + DEKKER_EMMC_OWNER_OFFSET);
++ writel(0, host->sram_addr + DEKKER_IA_REQ_OFFSET);
++
++ DBG("Exit ownership - eMMC owner: %d, IA req: %d, SCU req: %d\n",
++ readl(host->sram_addr + DEKKER_EMMC_OWNER_OFFSET),
++ readl(host->sram_addr + DEKKER_IA_REQ_OFFSET),
++ readl(host->sram_addr + DEKKER_SCU_REQ_OFFSET));
++}
++
+ static const struct mmc_host_ops sdhci_ops = {
+ .request = sdhci_request,
+ .set_ios = sdhci_set_ios,
+ .get_ro = sdhci_get_ro,
+ .enable_sdio_irq = sdhci_enable_sdio_irq,
++ .acquire_ownership = sdhci_acquire_ownership,
++ .release_ownership = sdhci_release_ownership,
+ };
+
+ /*****************************************************************************\
+@@ -1365,11 +1572,18 @@
+ {
+ BUG_ON(intmask == 0);
+
++ /*
++ * Intel MRST:
++ * HW problem exists in LNW A3 which leads to fake interrupt on SDIO1
++ * if SDIO0 and SDIO1 are both enabled.
++ */
+ if (!host->cmd) {
+- printk(KERN_ERR "%s: Got command interrupt 0x%08x even "
+- "though no command operation was in progress.\n",
+- mmc_hostname(host->mmc), (unsigned)intmask);
+- sdhci_dumpregs(host);
++ if (!(host->quirks & SDHCI_QUIRK_SERIALIZE)) {
++ printk(KERN_ERR "%s: Got command interrupt 0x%08x even "
++ "though no command operation in progress.\n",
++ mmc_hostname(host->mmc), (unsigned)intmask);
++ sdhci_dumpregs(host);
++ }
+ return;
+ }
+
+@@ -1674,9 +1888,10 @@
+ mmc = host->mmc;
+
+ if (debug_quirks)
+- host->quirks = debug_quirks;
++ host->quirks = (u64)debug_quirks;
+
+- sdhci_reset(host, SDHCI_RESET_ALL);
++ if (!(host->quirks & SDHCI_QUIRK_BROKEN_RESETALL))
++ sdhci_reset(host, SDHCI_RESET_ALL);
+
+ host->version = sdhci_readw(host, SDHCI_HOST_VERSION);
+ host->version = (host->version & SDHCI_SPEC_VER_MASK)
+@@ -1689,6 +1904,31 @@
+
+ caps = sdhci_readl(host, SDHCI_CAPABILITIES);
+
++ /*
++ * XXX: workaround for Medfield silicon sighting 3147023:
++ *
++ * hard-code capabilities register:
++ * SDIO-0: caps = 0x6bee32b2
++ * SDIO-1/2/3/4: caps = 0x6cee32b2
++ *
++ * max_blk_size: 2048
++ * base clk freq: 50MHz
++ * timeout clk freq: 50MHz
++ * voltage: SDIO-0 (sd) 3.3V, others: 1.8V
++ */
++ if ((host->quirks & SDHCI_QUIRK_MFD_EMMC_SDIO_RESTRICTION)
++ || (host->quirks & SDHCI_QUIRK_MFD_SD_RESTRICTION)) {
++
++ DBG("caps reg from hw: 0x%04x\n", caps);
++
++ if (host->quirks & SDHCI_QUIRK_MFD_SD_RESTRICTION)
++ caps = 0x6bee32b2;
++ else if (host->quirks & SDHCI_QUIRK_MFD_EMMC_SDIO_RESTRICTION)
++ caps = 0x6cee32b2;
++
++ DBG("correct caps reg value: 0x%04x\n", caps);
++ }
++
+ if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
+ host->flags |= SDHCI_USE_SDMA;
+ else if (!(caps & SDHCI_CAN_DO_SDMA))
+@@ -1785,6 +2025,7 @@
+ * Set host parameters.
+ */
+ mmc->ops = &sdhci_ops;
++
+ if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK &&
+ host->ops->set_clock && host->ops->get_min_clock)
+ mmc->f_min = host->ops->get_min_clock(host);
+@@ -1796,8 +2037,21 @@
+ if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
+ mmc->caps |= MMC_CAP_4_BIT_DATA;
+
++ /*
++ * XXX: enable 8-bit data bus width for Moorestown and Medfield
++ * host controllers which connected to e.MMC/MMC devices
++ */
++ if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
++ mmc->caps |= MMC_CAP_8_BIT_DATA;
++
+ if (caps & SDHCI_CAN_DO_HISPD)
+- mmc->caps |= MMC_CAP_SD_HIGHSPEED;
++ mmc->caps |= (MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED);
++
++ /*
++ * XXX: disable SDIO-0 high speed for Medfield silicon sighting 3548331
++ */
++ if (host->quirks & SDHCI_QUIRK_MFD_SD_RESTRICTION)
++ mmc->caps &= ~MMC_CAP_SD_HIGHSPEED;
+
+ if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
+ mmc->caps |= MMC_CAP_NEEDS_POLL;
+@@ -1855,7 +2109,7 @@
+ } else {
+ mmc->max_blk_size = (caps & SDHCI_MAX_BLOCK_MASK) >>
+ SDHCI_MAX_BLOCK_SHIFT;
+- if (mmc->max_blk_size >= 3) {
++ if (mmc->max_blk_size > 3) {
+ printk(KERN_WARNING "%s: Invalid maximum block size, "
+ "assuming 512 bytes\n", mmc_hostname(mmc));
+ mmc->max_blk_size = 0;
+@@ -2010,6 +2264,7 @@
+
+ MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
+ MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
++MODULE_VERSION(DRIVER_VERSION);
+ MODULE_LICENSE("GPL");
+
+ MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
+--- a/drivers/mmc/host/sdhci.h
++++ b/drivers/mmc/host/sdhci.h
+@@ -67,6 +67,7 @@
+ #define SDHCI_CTRL_LED 0x01
+ #define SDHCI_CTRL_4BITBUS 0x02
+ #define SDHCI_CTRL_HISPD 0x04
++#define SDHCI_CTRL_8BITBUS 0x20
+ #define SDHCI_CTRL_DMA_MASK 0x18
+ #define SDHCI_CTRL_SDMA 0x00
+ #define SDHCI_CTRL_ADMA1 0x08
+@@ -184,66 +185,86 @@
+ /* Data set by hardware interface driver */
+ const char *hw_name; /* Hardware bus name */
+
+- unsigned int quirks; /* Deviations from spec. */
++ u64 quirks; /* Deviations from spec. */
+
+ /* Controller doesn't honor resets unless we touch the clock register */
+-#define SDHCI_QUIRK_CLOCK_BEFORE_RESET (1<<0)
++#define SDHCI_QUIRK_CLOCK_BEFORE_RESET 0x1ULL
+ /* Controller has bad caps bits, but really supports DMA */
+-#define SDHCI_QUIRK_FORCE_DMA (1<<1)
++#define SDHCI_QUIRK_FORCE_DMA 0x2ULL
+ /* Controller doesn't like to be reset when there is no card inserted. */
+-#define SDHCI_QUIRK_NO_CARD_NO_RESET (1<<2)
++#define SDHCI_QUIRK_NO_CARD_NO_RESET 0x4ULL
+ /* Controller doesn't like clearing the power reg before a change */
+-#define SDHCI_QUIRK_SINGLE_POWER_WRITE (1<<3)
++#define SDHCI_QUIRK_SINGLE_POWER_WRITE 0x8ULL
+ /* Controller has flaky internal state so reset it on each ios change */
+-#define SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS (1<<4)
++#define SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS 0x10ULL
+ /* Controller has an unusable DMA engine */
+-#define SDHCI_QUIRK_BROKEN_DMA (1<<5)
++#define SDHCI_QUIRK_BROKEN_DMA 0x20ULL
+ /* Controller has an unusable ADMA engine */
+-#define SDHCI_QUIRK_BROKEN_ADMA (1<<6)
++#define SDHCI_QUIRK_BROKEN_ADMA 0x40ULL
+ /* Controller can only DMA from 32-bit aligned addresses */
+-#define SDHCI_QUIRK_32BIT_DMA_ADDR (1<<7)
++#define SDHCI_QUIRK_32BIT_DMA_ADDR 0x80ULL
+ /* Controller can only DMA chunk sizes that are a multiple of 32 bits */
+-#define SDHCI_QUIRK_32BIT_DMA_SIZE (1<<8)
++#define SDHCI_QUIRK_32BIT_DMA_SIZE 0x100ULL
+ /* Controller can only ADMA chunks that are a multiple of 32 bits */
+-#define SDHCI_QUIRK_32BIT_ADMA_SIZE (1<<9)
++#define SDHCI_QUIRK_32BIT_ADMA_SIZE 0x200ULL
+ /* Controller needs to be reset after each request to stay stable */
+-#define SDHCI_QUIRK_RESET_AFTER_REQUEST (1<<10)
++#define SDHCI_QUIRK_RESET_AFTER_REQUEST 0x400ULL
+ /* Controller needs voltage and power writes to happen separately */
+-#define SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER (1<<11)
++#define SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER 0x800ULL
+ /* Controller provides an incorrect timeout value for transfers */
+-#define SDHCI_QUIRK_BROKEN_TIMEOUT_VAL (1<<12)
++#define SDHCI_QUIRK_BROKEN_TIMEOUT_VAL 0x1000ULL
+ /* Controller has an issue with buffer bits for small transfers */
+-#define SDHCI_QUIRK_BROKEN_SMALL_PIO (1<<13)
++#define SDHCI_QUIRK_BROKEN_SMALL_PIO 0x2000ULL
+ /* Controller does not provide transfer-complete interrupt when not busy */
+-#define SDHCI_QUIRK_NO_BUSY_IRQ (1<<14)
++#define SDHCI_QUIRK_NO_BUSY_IRQ 0x4000ULL
+ /* Controller has unreliable card detection */
+-#define SDHCI_QUIRK_BROKEN_CARD_DETECTION (1<<15)
++#define SDHCI_QUIRK_BROKEN_CARD_DETECTION 0x8000ULL
+ /* Controller reports inverted write-protect state */
+-#define SDHCI_QUIRK_INVERTED_WRITE_PROTECT (1<<16)
++#define SDHCI_QUIRK_INVERTED_WRITE_PROTECT 0x10000ULL
+ /* Controller has nonstandard clock management */
+-#define SDHCI_QUIRK_NONSTANDARD_CLOCK (1<<17)
++#define SDHCI_QUIRK_NONSTANDARD_CLOCK 0x20000ULL
+ /* Controller does not like fast PIO transfers */
+-#define SDHCI_QUIRK_PIO_NEEDS_DELAY (1<<18)
++#define SDHCI_QUIRK_PIO_NEEDS_DELAY 0x40000ULL
+ /* Controller losing signal/interrupt enable states after reset */
+-#define SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET (1<<19)
++#define SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET 0x80000ULL
+ /* Controller has to be forced to use block size of 2048 bytes */
+-#define SDHCI_QUIRK_FORCE_BLK_SZ_2048 (1<<20)
++#define SDHCI_QUIRK_FORCE_BLK_SZ_2048 0x100000ULL
+ /* Controller cannot do multi-block transfers */
+-#define SDHCI_QUIRK_NO_MULTIBLOCK (1<<21)
++#define SDHCI_QUIRK_NO_MULTIBLOCK 0x200000ULL
+ /* Controller can only handle 1-bit data transfers */
+-#define SDHCI_QUIRK_FORCE_1_BIT_DATA (1<<22)
++#define SDHCI_QUIRK_FORCE_1_BIT_DATA 0x400000ULL
+ /* Controller needs 10ms delay between applying power and clock */
+-#define SDHCI_QUIRK_DELAY_AFTER_POWER (1<<23)
++#define SDHCI_QUIRK_DELAY_AFTER_POWER 0x800000ULL
+ /* Controller uses SDCLK instead of TMCLK for data timeouts */
+-#define SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK (1<<24)
++#define SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK 0x1000000ULL
+ /* Controller reports wrong base clock capability */
+-#define SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN (1<<25)
++#define SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN 0x2000000ULL
+ /* Controller cannot support End Attribute in NOP ADMA descriptor */
+-#define SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC (1<<26)
++#define SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC 0x4000000ULL
++/* Controller can only handle full speed mode */
++#define SDHCI_QUIRK_FORCE_FULL_SPEED_MODE 0x8000000ULL
++/* Controller has an issue with software reset all function */
++#define SDHCI_QUIRK_BROKEN_RESETALL 0x10000000ULL
++/* Controller has an issue when its two slots enabled together */
++#define SDHCI_QUIRK_SERIALIZE 0x20000000ULL
++/* Controller of Medfield specific restriction */
++#define SDHCI_QUIRK_MFD_SD_RESTRICTION 0x40000000ULL
++#define SDHCI_QUIRK_MFD_EMMC_SDIO_RESTRICTION 0x80000000ULL
++/* One controller port will be accessed by driver and fw at the same time */
++#define SDHCI_QUIRK_NEED_DEKKER_MUTEX 0x100000000ULL
+
+ int irq; /* Device IRQ */
+ void __iomem * ioaddr; /* Mapped address */
+
++ /* XXX: SCU/X86 mutex variables base address in shared SRAM */
++ void __iomem * sram_addr; /* Shared SRAM address */
++
++#define DEKKER_EMMC_OWNER_OFFSET 0
++#define DEKKER_IA_REQ_OFFSET 0x04
++#define DEKKER_SCU_REQ_OFFSET 0x08
++#define DEKKER_OWNER_IA 0
++#define DEKKER_OWNER_SCU 1
++
+ const struct sdhci_ops *ops; /* Low level hw interface */
+
+ /* Internal data */
+@@ -295,7 +316,6 @@
+ unsigned long private[0] ____cacheline_aligned;
+ };
+
+-
+ struct sdhci_ops {
+ #ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
+ u32 (*read_l)(struct sdhci_host *host, int reg);
+--- a/drivers/net/caif/Kconfig
++++ b/drivers/net/caif/Kconfig
+@@ -14,4 +14,24 @@
+ identified as N_CAIF. When this ldisc is opened from user space
+ it will redirect the TTY's traffic into the CAIF stack.
+
++
++config CAIF_SPI_SLAVE
++ tristate "CAIF SPI transport driver for slave interface"
++ depends on CAIF
++ default n
++ ---help---
++ The CAIF Link layer SPI Protocol driver for Slave SPI interface.
++ This driver implements a platform driver to accommodate for a
++ platform specific SPI device. A sample CAIF SPI Platform device is
++ provided in Documentation/networking/caif/spi_porting.txt
++
++config CAIF_SPI_SYNC
++ bool "Next command and length in start of frame"
++ depends on CAIF_SPI_SLAVE
++ default n
++ ---help---
++ Putting the next command and length in the start of the frame can
++ help to synchronize to the next transfer in case of over or under-runs.
++ This option also needs to be enabled on the modem.
++
+ endif # CAIF
+--- a/drivers/net/caif/Makefile
++++ b/drivers/net/caif/Makefile
+@@ -10,3 +10,7 @@
+
+ # Serial interface
+ obj-$(CONFIG_CAIF_TTY) += caif_serial.o
++
++# SPI slave physical interfaces module
++cfspi_slave-objs := caif_spi.o caif_spi_slave.o
++obj-$(CONFIG_CAIF_SPI_SLAVE) += cfspi_slave.o
+--- /dev/null
++++ b/drivers/net/caif/caif_spi.c
+@@ -0,0 +1,847 @@
++/*
++ * Copyright (C) ST-Ericsson AB 2010
++ * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
++ * Author: Daniel Martensson / Daniel.Martensson@stericsson.com
++ * License terms: GNU General Public License (GPL) version 2.
++ */
++
++#include <linux/version.h>
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/device.h>
++#include <linux/platform_device.h>
++#include <linux/string.h>
++#include <linux/workqueue.h>
++#include <linux/completion.h>
++#include <linux/list.h>
++#include <linux/interrupt.h>
++#include <linux/dma-mapping.h>
++#include <linux/delay.h>
++#include <linux/sched.h>
++#include <linux/debugfs.h>
++#include <linux/if_arp.h>
++#include <net/caif/caif_layer.h>
++#include <net/caif/caif_spi.h>
++
++#ifndef CONFIG_CAIF_SPI_SYNC
++#define FLAVOR "Flavour: Vanilla.\n"
++#else
++#define FLAVOR "Flavour: Master CMD&LEN at start.\n"
++#endif /* CONFIG_CAIF_SPI_SYNC */
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Daniel Martensson<daniel.martensson@stericsson.com>");
++MODULE_DESCRIPTION("CAIF SPI driver");
++
++static int spi_loop;
++module_param(spi_loop, bool, S_IRUGO);
++MODULE_PARM_DESC(spi_loop, "SPI running in loopback mode.");
++
++/* SPI frame alignment. */
++module_param(spi_frm_align, int, S_IRUGO);
++MODULE_PARM_DESC(spi_frm_align, "SPI frame alignment.");
++
++/* SPI padding options. */
++module_param(spi_up_head_align, int, S_IRUGO);
++MODULE_PARM_DESC(spi_up_head_align, "SPI uplink head alignment.");
++
++module_param(spi_up_tail_align, int, S_IRUGO);
++MODULE_PARM_DESC(spi_up_tail_align, "SPI uplink tail alignment.");
++
++module_param(spi_down_head_align, int, S_IRUGO);
++MODULE_PARM_DESC(spi_down_head_align, "SPI downlink head alignment.");
++
++module_param(spi_down_tail_align, int, S_IRUGO);
++MODULE_PARM_DESC(spi_down_tail_align, "SPI downlink tail alignment.");
++
++#ifdef CONFIG_ARM
++#define BYTE_HEX_FMT "%02X"
++#else
++#define BYTE_HEX_FMT "%02hhX"
++#endif
++
++#define SPI_MAX_PAYLOAD_SIZE 4096
++/*
++ * Threshold values for the SPI packet queue. Flowcontrol will be asserted
++ * when the number of packets exceeds HIGH_WATER_MARK. It will not be
++ * deasserted before the number of packets drops below LOW_WATER_MARK.
++ */
++#define LOW_WATER_MARK 100
++#define HIGH_WATER_MARK (LOW_WATER_MARK*5)
++
++#ifdef CONFIG_UML
++
++/*
++ * We sometimes use UML for debugging, but it cannot handle
++ * dma_alloc_coherent so we have to wrap it.
++ */
++static inline void *dma_alloc(dma_addr_t *daddr)
++{
++ return kmalloc(SPI_DMA_BUF_LEN, GFP_KERNEL);
++}
++
++static inline void dma_free(void *cpu_addr, dma_addr_t handle)
++{
++ kfree(cpu_addr);
++}
++
++#else
++
++static inline void *dma_alloc(dma_addr_t *daddr)
++{
++ return dma_alloc_coherent(NULL, SPI_DMA_BUF_LEN, daddr,
++ GFP_KERNEL);
++}
++
++static inline void dma_free(void *cpu_addr, dma_addr_t handle)
++{
++ dma_free_coherent(NULL, SPI_DMA_BUF_LEN, cpu_addr, handle);
++}
++#endif /* CONFIG_UML */
++
++#ifdef CONFIG_DEBUG_FS
++
++#define DEBUGFS_BUF_SIZE 4096
++
++static struct dentry *dbgfs_root;
++
++static inline void driver_debugfs_create(void)
++{
++ dbgfs_root = debugfs_create_dir(cfspi_spi_driver.driver.name, NULL);
++}
++
++static inline void driver_debugfs_remove(void)
++{
++ debugfs_remove(dbgfs_root);
++}
++
++static inline void dev_debugfs_rem(struct cfspi *cfspi)
++{
++ debugfs_remove(cfspi->dbgfs_frame);
++ debugfs_remove(cfspi->dbgfs_state);
++ debugfs_remove(cfspi->dbgfs_dir);
++}
++
++static int dbgfs_open(struct inode *inode, struct file *file)
++{
++ file->private_data = inode->i_private;
++ return 0;
++}
++
++static ssize_t dbgfs_state(struct file *file, char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ char *buf;
++ int len = 0;
++ ssize_t size;
++ struct cfspi *cfspi = (struct cfspi *)file->private_data;
++
++ buf = kzalloc(DEBUGFS_BUF_SIZE, GFP_KERNEL);
++ if (!buf)
++ return 0;
++
++ /* Print out debug information. */
++ len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
++ "CAIF SPI debug information:\n");
++
++ len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), FLAVOR);
++
++ len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
++ "STATE: %d\n", cfspi->dbg_state);
++ len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
++ "Previous CMD: 0x%x\n", cfspi->pcmd);
++ len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
++ "Current CMD: 0x%x\n", cfspi->cmd);
++ len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
++ "Previous TX len: %d\n", cfspi->tx_ppck_len);
++ len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
++ "Previous RX len: %d\n", cfspi->rx_ppck_len);
++ len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
++ "Current TX len: %d\n", cfspi->tx_cpck_len);
++ len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
++ "Current RX len: %d\n", cfspi->rx_cpck_len);
++ len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
++ "Next TX len: %d\n", cfspi->tx_npck_len);
++ len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
++ "Next RX len: %d\n", cfspi->rx_npck_len);
++
++ size = simple_read_from_buffer(user_buf, count, ppos, buf, len);
++ kfree(buf);
++
++ return size;
++}
++
++static ssize_t print_frame(char *buf, size_t size, char *frm,
++ size_t count, size_t cut)
++{
++ int len = 0;
++ int i;
++ for (i = 0; i < count; i++) {
++ len += snprintf((buf + len), (size - len),
++ "[0x" BYTE_HEX_FMT "]",
++ frm[i]);
++ if ((i == cut) && (count > (cut * 2))) {
++ /* Fast forward. */
++ i = count - cut;
++ len += snprintf((buf + len), (size - len),
++ "--- %u bytes skipped ---\n",
++ (int)(count - (cut * 2)));
++ }
++
++ if ((!(i % 10)) && i) {
++ len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
++ "\n");
++ }
++ }
++ len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), "\n");
++ return len;
++}
++
++static ssize_t dbgfs_frame(struct file *file, char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ char *buf;
++ int len = 0;
++ ssize_t size;
++ struct cfspi *cfspi;
++
++ cfspi = (struct cfspi *)file->private_data;
++ buf = kzalloc(DEBUGFS_BUF_SIZE, GFP_KERNEL);
++ if (!buf)
++ return 0;
++
++ /* Print out debug information. */
++ len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
++ "Current frame:\n");
++
++ len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
++ "Tx data (Len: %d):\n", cfspi->tx_cpck_len);
++
++ len += print_frame((buf + len), (DEBUGFS_BUF_SIZE - len),
++ cfspi->xfer.va_tx,
++ (cfspi->tx_cpck_len + SPI_CMD_SZ), 100);
++
++ len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
++ "Rx data (Len: %d):\n", cfspi->rx_cpck_len);
++
++ len += print_frame((buf + len), (DEBUGFS_BUF_SIZE - len),
++ cfspi->xfer.va_rx,
++ (cfspi->rx_cpck_len + SPI_CMD_SZ), 100);
++
++ size = simple_read_from_buffer(user_buf, count, ppos, buf, len);
++ kfree(buf);
++
++ return size;
++}
++
++static const struct file_operations dbgfs_state_fops = {
++ .open = dbgfs_open,
++ .read = dbgfs_state,
++ .owner = THIS_MODULE
++};
++
++static const struct file_operations dbgfs_frame_fops = {
++ .open = dbgfs_open,
++ .read = dbgfs_frame,
++ .owner = THIS_MODULE
++};
++
++static inline void dev_debugfs_add(struct cfspi *cfspi)
++{
++ cfspi->dbgfs_dir = debugfs_create_dir(cfspi->pdev->name, dbgfs_root);
++ cfspi->dbgfs_state = debugfs_create_file("state", S_IRUGO,
++ cfspi->dbgfs_dir, cfspi,
++ &dbgfs_state_fops);
++ cfspi->dbgfs_frame = debugfs_create_file("frame", S_IRUGO,
++ cfspi->dbgfs_dir, cfspi,
++ &dbgfs_frame_fops);
++}
++
++inline void cfspi_dbg_state(struct cfspi *cfspi, int state)
++{
++ cfspi->dbg_state = state;
++};
++#else
++
++static inline void driver_debugfs_create(void)
++{
++}
++
++static inline void driver_debugfs_remove(void)
++{
++}
++
++static inline void dev_debugfs_add(struct cfspi *cfspi)
++{
++}
++
++static inline void dev_debugfs_rem(struct cfspi *cfspi)
++{
++}
++
++inline void cfspi_dbg_state(struct cfspi *cfspi, int state)
++{
++}
++#endif /* CONFIG_DEBUG_FS */
++
++static LIST_HEAD(cfspi_list);
++static spinlock_t cfspi_list_lock;
++
++/* SPI uplink head alignment. */
++static ssize_t show_up_head_align(struct device_driver *driver, char *buf)
++{
++ return sprintf(buf, "%d\n", spi_up_head_align);
++}
++
++static DRIVER_ATTR(up_head_align, S_IRUSR, show_up_head_align, NULL);
++
++/* SPI uplink tail alignment. */
++static ssize_t show_up_tail_align(struct device_driver *driver, char *buf)
++{
++ return sprintf(buf, "%d\n", spi_up_tail_align);
++}
++
++static DRIVER_ATTR(up_tail_align, S_IRUSR, show_up_tail_align, NULL);
++
++/* SPI downlink head alignment. */
++static ssize_t show_down_head_align(struct device_driver *driver, char *buf)
++{
++ return sprintf(buf, "%d\n", spi_down_head_align);
++}
++
++static DRIVER_ATTR(down_head_align, S_IRUSR, show_down_head_align, NULL);
++
++/* SPI downlink tail alignment. */
++static ssize_t show_down_tail_align(struct device_driver *driver, char *buf)
++{
++ return sprintf(buf, "%d\n", spi_down_tail_align);
++}
++
++static DRIVER_ATTR(down_tail_align, S_IRUSR, show_down_tail_align, NULL);
++
++/* SPI frame alignment. */
++static ssize_t show_frame_align(struct device_driver *driver, char *buf)
++{
++ return sprintf(buf, "%d\n", spi_frm_align);
++}
++
++static DRIVER_ATTR(frame_align, S_IRUSR, show_frame_align, NULL);
++
++int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len)
++{
++ u8 *dst = buf;
++ caif_assert(buf);
++
++ do {
++ struct sk_buff *skb;
++ struct caif_payload_info *info;
++ int spad = 0;
++ int epad;
++
++ skb = skb_dequeue(&cfspi->chead);
++ if (!skb)
++ break;
++
++ /*
++ * Calculate length of frame including SPI padding.
++ * The payload position is found in the control buffer.
++ */
++ info = (struct caif_payload_info *)&skb->cb;
++
++ /*
++ * Compute head offset i.e. number of bytes to add to
++ * get the start of the payload aligned.
++ */
++ if (spi_up_head_align) {
++ spad = 1 + ((info->hdr_len + 1) & spi_up_head_align);
++ *dst = (u8)(spad - 1);
++ dst += spad;
++ }
++
++ /* Copy in CAIF frame. */
++ skb_copy_bits(skb, 0, dst, skb->len);
++ dst += skb->len;
++ cfspi->ndev->stats.tx_packets++;
++ cfspi->ndev->stats.tx_bytes += skb->len;
++
++ /*
++ * Compute tail offset i.e. number of bytes to add to
++ * get the complete CAIF frame aligned.
++ */
++ epad = (skb->len + spad) & spi_up_tail_align;
++ dst += epad;
++
++ dev_kfree_skb(skb);
++
++ } while ((dst - buf) < len);
++
++ return dst - buf;
++}
++
++int cfspi_xmitlen(struct cfspi *cfspi)
++{
++ struct sk_buff *skb = NULL;
++ int frm_len = 0;
++ int pkts = 0;
++
++ /*
++ * Decommit previously commited frames.
++ * skb_queue_splice_tail(&cfspi->chead,&cfspi->qhead)
++ */
++ while (skb_peek(&cfspi->chead)) {
++ skb = skb_dequeue_tail(&cfspi->chead);
++ skb_queue_head(&cfspi->qhead, skb);
++ }
++
++ do {
++ struct caif_payload_info *info = NULL;
++ int spad = 0;
++ int epad = 0;
++
++ skb = skb_dequeue(&cfspi->qhead);
++ if (!skb)
++ break;
++
++ /*
++ * Calculate length of frame including SPI padding.
++ * The payload position is found in the control buffer.
++ */
++ info = (struct caif_payload_info *)&skb->cb;
++
++ /*
++ * Compute head offset i.e. number of bytes to add to
++ * get the start of the payload aligned.
++ */
++ if (spi_up_head_align)
++ spad = 1 + ((info->hdr_len + 1) & spi_up_head_align);
++
++ /*
++ * Compute tail offset i.e. number of bytes to add to
++ * get the complete CAIF frame aligned.
++ */
++ epad = (skb->len + spad) & spi_up_tail_align;
++
++ if ((skb->len + spad + epad + frm_len) <= CAIF_MAX_SPI_FRAME) {
++ skb_queue_tail(&cfspi->chead, skb);
++ pkts++;
++ frm_len += skb->len + spad + epad;
++ } else {
++ /* Put back packet. */
++ skb_queue_head(&cfspi->qhead, skb);
++ }
++ } while (pkts <= CAIF_MAX_SPI_PKTS);
++
++ /*
++ * Send flow on if previously sent flow off
++ * and now go below the low water mark
++ */
++ if (cfspi->flow_off_sent && cfspi->qhead.qlen < cfspi->qd_low_mark &&
++ cfspi->cfdev.flowctrl) {
++ cfspi->flow_off_sent = 0;
++ cfspi->cfdev.flowctrl(cfspi->ndev, 1);
++ }
++
++ return frm_len;
++}
++
++static void cfspi_ss_cb(bool assert, struct cfspi_ifc *ifc)
++{
++ struct cfspi *cfspi = (struct cfspi *)ifc->priv;
++
++ if (!in_interrupt())
++ spin_lock(&cfspi->lock);
++ if (assert) {
++ set_bit(SPI_SS_ON, &cfspi->state);
++ set_bit(SPI_XFER, &cfspi->state);
++ } else {
++ set_bit(SPI_SS_OFF, &cfspi->state);
++ }
++ if (!in_interrupt())
++ spin_unlock(&cfspi->lock);
++
++ /* Wake up the xfer thread. */
++ wake_up_interruptible(&cfspi->wait);
++}
++
++static void cfspi_xfer_done_cb(struct cfspi_ifc *ifc)
++{
++ struct cfspi *cfspi = (struct cfspi *)ifc->priv;
++
++ /* Transfer done, complete work queue */
++ complete(&cfspi->comp);
++}
++
++static int cfspi_xmit(struct sk_buff *skb, struct net_device *dev)
++{
++ struct cfspi *cfspi = NULL;
++ unsigned long flags;
++ if (!dev)
++ return -EINVAL;
++
++ cfspi = netdev_priv(dev);
++
++ skb_queue_tail(&cfspi->qhead, skb);
++
++ spin_lock_irqsave(&cfspi->lock, flags);
++ if (!test_and_set_bit(SPI_XFER, &cfspi->state)) {
++ /* Wake up xfer thread. */
++ wake_up_interruptible(&cfspi->wait);
++ }
++ spin_unlock_irqrestore(&cfspi->lock, flags);
++
++ /* Send flow off if number of bytes is above high water mark */
++ if (!cfspi->flow_off_sent &&
++ cfspi->qhead.qlen > cfspi->qd_high_mark &&
++ cfspi->cfdev.flowctrl) {
++ cfspi->flow_off_sent = 1;
++ cfspi->cfdev.flowctrl(cfspi->ndev, 0);
++ }
++
++ return 0;
++}
++
++int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len)
++{
++ u8 *src = buf;
++
++ caif_assert(buf != NULL);
++
++ do {
++ int res;
++ struct sk_buff *skb = NULL;
++ int spad = 0;
++ int epad = 0;
++ u8 *dst = NULL;
++ int pkt_len = 0;
++
++ /*
++ * Compute head offset i.e. number of bytes added to
++ * get the start of the payload aligned.
++ */
++ if (spi_down_head_align) {
++ spad = 1 + *src;
++ src += spad;
++ }
++
++ /* Read length of CAIF frame (little endian). */
++ pkt_len = *src;
++ pkt_len |= ((*(src+1)) << 8) & 0xFF00;
++ pkt_len += 2; /* Add FCS fields. */
++
++ /* Get a suitable caif packet and copy in data. */
++
++ skb = netdev_alloc_skb(cfspi->ndev, pkt_len + 1);
++ caif_assert(skb != NULL);
++
++ dst = skb_put(skb, pkt_len);
++ memcpy(dst, src, pkt_len);
++ src += pkt_len;
++
++ skb->protocol = htons(ETH_P_CAIF);
++ skb_reset_mac_header(skb);
++ skb->dev = cfspi->ndev;
++
++ /*
++ * Push received packet up the stack.
++ */
++ if (!spi_loop)
++ res = netif_rx_ni(skb);
++ else
++ res = cfspi_xmit(skb, cfspi->ndev);
++
++ if (!res) {
++ cfspi->ndev->stats.rx_packets++;
++ cfspi->ndev->stats.rx_bytes += pkt_len;
++ } else
++ cfspi->ndev->stats.rx_dropped++;
++
++ /*
++ * Compute tail offset i.e. number of bytes added to
++ * get the complete CAIF frame aligned.
++ */
++ epad = (pkt_len + spad) & spi_down_tail_align;
++ src += epad;
++ } while ((src - buf) < len);
++
++ return src - buf;
++}
++
++static int cfspi_open(struct net_device *dev)
++{
++ netif_wake_queue(dev);
++ return 0;
++}
++
++static int cfspi_close(struct net_device *dev)
++{
++ netif_stop_queue(dev);
++ return 0;
++}
++static const struct net_device_ops cfspi_ops = {
++ .ndo_open = cfspi_open,
++ .ndo_stop = cfspi_close,
++ .ndo_start_xmit = cfspi_xmit
++};
++
++static void cfspi_setup(struct net_device *dev)
++{
++ struct cfspi *cfspi = netdev_priv(dev);
++ dev->features = 0;
++ dev->netdev_ops = &cfspi_ops;
++ dev->type = ARPHRD_CAIF;
++ dev->flags = IFF_NOARP | IFF_POINTOPOINT;
++ dev->tx_queue_len = 0;
++ dev->mtu = SPI_MAX_PAYLOAD_SIZE;
++ dev->destructor = free_netdev;
++ skb_queue_head_init(&cfspi->qhead);
++ skb_queue_head_init(&cfspi->chead);
++ cfspi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
++ cfspi->cfdev.use_frag = false;
++ cfspi->cfdev.use_stx = false;
++ cfspi->cfdev.use_fcs = false;
++ cfspi->ndev = dev;
++}
++
++int cfspi_spi_probe(struct platform_device *pdev)
++{
++ struct cfspi *cfspi = NULL;
++ struct net_device *ndev;
++ struct cfspi_dev *dev;
++ int res;
++ dev = (struct cfspi_dev *)pdev->dev.platform_data;
++
++ ndev = alloc_netdev(sizeof(struct cfspi),
++ "cfspi%d", cfspi_setup);
++ if (!dev)
++ return -ENODEV;
++
++ cfspi = netdev_priv(ndev);
++ netif_stop_queue(ndev);
++ cfspi->ndev = ndev;
++ cfspi->pdev = pdev;
++
++ /* Set flow info */
++ cfspi->flow_off_sent = 0;
++ cfspi->qd_low_mark = LOW_WATER_MARK;
++ cfspi->qd_high_mark = HIGH_WATER_MARK;
++
++ /* Assign the SPI device. */
++ cfspi->dev = dev;
++ /* Assign the device ifc to this SPI interface. */
++ dev->ifc = &cfspi->ifc;
++
++ /* Allocate DMA buffers. */
++ cfspi->xfer.va_tx = dma_alloc(&cfspi->xfer.pa_tx);
++ if (!cfspi->xfer.va_tx) {
++ printk(KERN_WARNING
++ "CFSPI: failed to allocate dma TX buffer.\n");
++ res = -ENODEV;
++ goto err_dma_alloc_tx;
++ }
++
++ cfspi->xfer.va_rx = dma_alloc(&cfspi->xfer.pa_rx);
++
++ if (!cfspi->xfer.va_rx) {
++ printk(KERN_WARNING
++ "CFSPI: failed to allocate dma TX buffer.\n");
++ res = -ENODEV;
++ goto err_dma_alloc_rx;
++ }
++
++ /* Initialize the work queue. */
++ INIT_WORK(&cfspi->work, cfspi_xfer);
++
++ /* Initialize spin locks. */
++ spin_lock_init(&cfspi->lock);
++
++ /* Initialize flow control state. */
++ cfspi->flow_stop = false;
++
++ /* Initialize wait queue. */
++ init_waitqueue_head(&cfspi->wait);
++
++ /* Create work thread. */
++ cfspi->wq = create_singlethread_workqueue(dev->name);
++ if (!cfspi->wq) {
++ printk(KERN_WARNING "CFSPI: failed to create work queue.\n");
++ res = -ENODEV;
++ goto err_create_wq;
++ }
++
++ /* Initialize work queue. */
++ init_completion(&cfspi->comp);
++
++ /* Create debugfs entries. */
++ dev_debugfs_add(cfspi);
++
++ /* Set up the ifc. */
++ cfspi->ifc.ss_cb = cfspi_ss_cb;
++ cfspi->ifc.xfer_done_cb = cfspi_xfer_done_cb;
++ cfspi->ifc.priv = cfspi;
++
++ /* Add CAIF SPI device to list. */
++ spin_lock(&cfspi_list_lock);
++ list_add_tail(&cfspi->list, &cfspi_list);
++ spin_unlock(&cfspi_list_lock);
++
++ /* Schedule the work queue. */
++ queue_work(cfspi->wq, &cfspi->work);
++
++ /* Register network device. */
++ res = register_netdev(ndev);
++ if (res) {
++ printk(KERN_ERR "CFSPI: Reg. error: %d.\n", res);
++ goto err_net_reg;
++ }
++ return res;
++
++ err_net_reg:
++ dev_debugfs_rem(cfspi);
++ set_bit(SPI_TERMINATE, &cfspi->state);
++ wake_up_interruptible(&cfspi->wait);
++ destroy_workqueue(cfspi->wq);
++ err_create_wq:
++ dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
++ err_dma_alloc_rx:
++ dma_free(cfspi->xfer.va_tx, cfspi->xfer.pa_tx);
++ err_dma_alloc_tx:
++ free_netdev(ndev);
++
++ return res;
++}
++
++int cfspi_spi_remove(struct platform_device *pdev)
++{
++ struct list_head *list_node;
++ struct list_head *n;
++ struct cfspi *cfspi = NULL;
++ struct cfspi_dev *dev;
++
++ dev = (struct cfspi_dev *)pdev->dev.platform_data;
++ spin_lock(&cfspi_list_lock);
++ list_for_each_safe(list_node, n, &cfspi_list) {
++ cfspi = list_entry(list_node, struct cfspi, list);
++ /* Find the corresponding device. */
++ if (cfspi->dev == dev) {
++ /* Remove from list. */
++ list_del(list_node);
++ /* Free DMA buffers. */
++ dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
++ dma_free(cfspi->xfer.va_tx, cfspi->xfer.pa_tx);
++ set_bit(SPI_TERMINATE, &cfspi->state);
++ wake_up_interruptible(&cfspi->wait);
++ destroy_workqueue(cfspi->wq);
++ /* Destroy debugfs directory and files. */
++ dev_debugfs_rem(cfspi);
++ unregister_netdev(cfspi->ndev);
++ spin_unlock(&cfspi_list_lock);
++ return 0;
++ }
++ }
++ spin_unlock(&cfspi_list_lock);
++ return -ENODEV;
++}
++
++static void __exit cfspi_exit_module(void)
++{
++ struct list_head *list_node;
++ struct list_head *n;
++ struct cfspi *cfspi = NULL;
++
++ list_for_each_safe(list_node, n, &cfspi_list) {
++ cfspi = list_entry(list_node, struct cfspi, list);
++ platform_device_unregister(cfspi->pdev);
++ }
++
++ /* Destroy sysfs files. */
++ driver_remove_file(&cfspi_spi_driver.driver,
++ &driver_attr_up_head_align);
++ driver_remove_file(&cfspi_spi_driver.driver,
++ &driver_attr_up_tail_align);
++ driver_remove_file(&cfspi_spi_driver.driver,
++ &driver_attr_down_head_align);
++ driver_remove_file(&cfspi_spi_driver.driver,
++ &driver_attr_down_tail_align);
++ driver_remove_file(&cfspi_spi_driver.driver, &driver_attr_frame_align);
++ /* Unregister platform driver. */
++ platform_driver_unregister(&cfspi_spi_driver);
++ /* Destroy debugfs root directory. */
++ driver_debugfs_remove();
++}
++
++static int __init cfspi_init_module(void)
++{
++ int result;
++
++ /* Initialize spin lock. */
++ spin_lock_init(&cfspi_list_lock);
++
++ /* Register platform driver. */
++ result = platform_driver_register(&cfspi_spi_driver);
++ if (result) {
++ printk(KERN_ERR "Could not register platform SPI driver.\n");
++ goto err_dev_register;
++ }
++
++ /* Create sysfs files. */
++ result =
++ driver_create_file(&cfspi_spi_driver.driver,
++ &driver_attr_up_head_align);
++ if (result) {
++ printk(KERN_ERR "Sysfs creation failed 1.\n");
++ goto err_create_up_head_align;
++ }
++
++ result =
++ driver_create_file(&cfspi_spi_driver.driver,
++ &driver_attr_up_tail_align);
++ if (result) {
++ printk(KERN_ERR "Sysfs creation failed 2.\n");
++ goto err_create_up_tail_align;
++ }
++
++ result =
++ driver_create_file(&cfspi_spi_driver.driver,
++ &driver_attr_down_head_align);
++ if (result) {
++ printk(KERN_ERR "Sysfs creation failed 3.\n");
++ goto err_create_down_head_align;
++ }
++
++ result =
++ driver_create_file(&cfspi_spi_driver.driver,
++ &driver_attr_down_tail_align);
++ if (result) {
++ printk(KERN_ERR "Sysfs creation failed 4.\n");
++ goto err_create_down_tail_align;
++ }
++
++ result =
++ driver_create_file(&cfspi_spi_driver.driver,
++ &driver_attr_frame_align);
++ if (result) {
++ printk(KERN_ERR "Sysfs creation failed 5.\n");
++ goto err_create_frame_align;
++ }
++ driver_debugfs_create();
++ return result;
++
++ err_create_frame_align:
++ driver_remove_file(&cfspi_spi_driver.driver,
++ &driver_attr_down_tail_align);
++ err_create_down_tail_align:
++ driver_remove_file(&cfspi_spi_driver.driver,
++ &driver_attr_down_head_align);
++ err_create_down_head_align:
++ driver_remove_file(&cfspi_spi_driver.driver,
++ &driver_attr_up_tail_align);
++ err_create_up_tail_align:
++ driver_remove_file(&cfspi_spi_driver.driver,
++ &driver_attr_up_head_align);
++ err_create_up_head_align:
++ err_dev_register:
++ return result;
++}
++
++module_init(cfspi_init_module);
++module_exit(cfspi_exit_module);
+--- /dev/null
++++ b/drivers/net/caif/caif_spi_slave.c
+@@ -0,0 +1,252 @@
++/*
++ * Copyright (C) ST-Ericsson AB 2010
++ * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
++ * Author: Daniel Martensson / Daniel.Martensson@stericsson.com
++ * License terms: GNU General Public License (GPL) version 2.
++ */
++#include <linux/version.h>
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/device.h>
++#include <linux/platform_device.h>
++#include <linux/string.h>
++#include <linux/semaphore.h>
++#include <linux/workqueue.h>
++#include <linux/completion.h>
++#include <linux/list.h>
++#include <linux/interrupt.h>
++#include <linux/dma-mapping.h>
++#include <linux/delay.h>
++#include <linux/sched.h>
++#include <linux/debugfs.h>
++#include <net/caif/caif_spi.h>
++
++#ifndef CONFIG_CAIF_SPI_SYNC
++#define SPI_DATA_POS SPI_CMD_SZ
++static inline int forward_to_spi_cmd(struct cfspi *cfspi)
++{
++ return cfspi->rx_cpck_len;
++}
++#else
++#define SPI_DATA_POS 0
++static inline int forward_to_spi_cmd(struct cfspi *cfspi)
++{
++ return 0;
++}
++#endif
++
++int spi_frm_align = 2;
++int spi_up_head_align = 1;
++int spi_up_tail_align;
++int spi_down_head_align = 3;
++int spi_down_tail_align = 1;
++
++#ifdef CONFIG_DEBUG_FS
++static inline void debugfs_store_prev(struct cfspi *cfspi)
++{
++ /* Store previous command for debugging reasons.*/
++ cfspi->pcmd = cfspi->cmd;
++ /* Store previous transfer. */
++ cfspi->tx_ppck_len = cfspi->tx_cpck_len;
++ cfspi->rx_ppck_len = cfspi->rx_cpck_len;
++}
++#else
++static inline void debugfs_store_prev(struct cfspi *cfspi)
++{
++}
++#endif
++
++void cfspi_xfer(struct work_struct *work)
++{
++ struct cfspi *cfspi;
++ u8 *ptr = NULL;
++ unsigned long flags;
++ int ret;
++ cfspi = container_of(work, struct cfspi, work);
++
++ /* Initialize state. */
++ cfspi->cmd = SPI_CMD_EOT;
++
++ for (;;) {
++
++ cfspi_dbg_state(cfspi, CFSPI_STATE_WAITING);
++
++ /* Wait for master talk or transmit event. */
++ wait_event_interruptible(cfspi->wait,
++ test_bit(SPI_XFER, &cfspi->state) ||
++ test_bit(SPI_TERMINATE, &cfspi->state));
++
++ if (test_bit(SPI_TERMINATE, &cfspi->state))
++ return;
++
++#if CFSPI_DBG_PREFILL
++ /* Prefill buffers for easier debugging. */
++ memset(cfspi->xfer.va_tx, 0xFF, SPI_DMA_BUF_LEN);
++ memset(cfspi->xfer.va_rx, 0xFF, SPI_DMA_BUF_LEN);
++#endif /* CFSPI_DBG_PREFILL */
++
++ cfspi_dbg_state(cfspi, CFSPI_STATE_AWAKE);
++
++ /* Check whether we have a committed frame. */
++ if (cfspi->tx_cpck_len) {
++ int len;
++
++ cfspi_dbg_state(cfspi, CFSPI_STATE_FETCH_PKT);
++
++ /* Copy commited SPI frames after the SPI indication. */
++ ptr = (u8 *) cfspi->xfer.va_tx;
++ ptr += SPI_IND_SZ;
++ len = cfspi_xmitfrm(cfspi, ptr, cfspi->tx_cpck_len);
++ WARN_ON(len != cfspi->tx_cpck_len);
++ }
++
++ cfspi_dbg_state(cfspi, CFSPI_STATE_GET_NEXT);
++
++ /* Get length of next frame to commit. */
++ cfspi->tx_npck_len = cfspi_xmitlen(cfspi);
++
++ WARN_ON(cfspi->tx_npck_len > SPI_DMA_BUF_LEN);
++
++ /*
++ * Add indication and length at the beginning of the frame,
++ * using little endian.
++ */
++ ptr = (u8 *) cfspi->xfer.va_tx;
++ *ptr++ = SPI_CMD_IND;
++ *ptr++ = (SPI_CMD_IND & 0xFF00) >> 8;
++ *ptr++ = cfspi->tx_npck_len & 0x00FF;
++ *ptr++ = (cfspi->tx_npck_len & 0xFF00) >> 8;
++
++ /* Calculate length of DMAs. */
++ cfspi->xfer.tx_dma_len = cfspi->tx_cpck_len + SPI_IND_SZ;
++ cfspi->xfer.rx_dma_len = cfspi->rx_cpck_len + SPI_CMD_SZ;
++
++ /* Add SPI TX frame alignment padding, if necessary. */
++ if (cfspi->tx_cpck_len &&
++ (cfspi->xfer.tx_dma_len % spi_frm_align)) {
++
++ cfspi->xfer.tx_dma_len += spi_frm_align -
++ (cfspi->xfer.tx_dma_len % spi_frm_align);
++ }
++
++ /* Add SPI RX frame alignment padding, if necessary. */
++ if (cfspi->rx_cpck_len &&
++ (cfspi->xfer.rx_dma_len % spi_frm_align)) {
++
++ cfspi->xfer.rx_dma_len += spi_frm_align -
++ (cfspi->xfer.rx_dma_len % spi_frm_align);
++ }
++
++ cfspi_dbg_state(cfspi, CFSPI_STATE_INIT_XFER);
++
++ /* Start transfer. */
++ ret = cfspi->dev->init_xfer(&cfspi->xfer, cfspi->dev);
++ WARN_ON(ret);
++
++ cfspi_dbg_state(cfspi, CFSPI_STATE_WAIT_ACTIVE);
++
++ /*
++ * TODO: We might be able to make an assumption if this is the
++ * first loop. Make sure that minimum toggle time is respected.
++ */
++ udelay(MIN_TRANSITION_TIME_USEC);
++
++ cfspi_dbg_state(cfspi, CFSPI_STATE_SIG_ACTIVE);
++
++ /* Signal that we are ready to recieve data. */
++ cfspi->dev->sig_xfer(true, cfspi->dev);
++
++ cfspi_dbg_state(cfspi, CFSPI_STATE_WAIT_XFER_DONE);
++
++ /* Wait for transfer completion. */
++ wait_for_completion(&cfspi->comp);
++
++ cfspi_dbg_state(cfspi, CFSPI_STATE_XFER_DONE);
++
++ if (cfspi->cmd == SPI_CMD_EOT) {
++ /*
++ * Clear the master talk bit. A xfer is always at
++ * least two bursts.
++ */
++ clear_bit(SPI_SS_ON, &cfspi->state);
++ }
++
++ cfspi_dbg_state(cfspi, CFSPI_STATE_WAIT_INACTIVE);
++
++ /* Make sure that the minimum toggle time is respected. */
++ if (SPI_XFER_TIME_USEC(cfspi->xfer.tx_dma_len,
++ cfspi->dev->clk_mhz) <
++ MIN_TRANSITION_TIME_USEC) {
++
++ udelay(MIN_TRANSITION_TIME_USEC -
++ SPI_XFER_TIME_USEC
++ (cfspi->xfer.tx_dma_len, cfspi->dev->clk_mhz));
++ }
++
++ cfspi_dbg_state(cfspi, CFSPI_STATE_SIG_INACTIVE);
++
++ /* De-assert transfer signal. */
++ cfspi->dev->sig_xfer(false, cfspi->dev);
++
++ /* Check whether we received a CAIF packet. */
++ if (cfspi->rx_cpck_len) {
++ int len;
++
++ cfspi_dbg_state(cfspi, CFSPI_STATE_DELIVER_PKT);
++
++ /* Parse SPI frame. */
++ ptr = ((u8 *)(cfspi->xfer.va_rx + SPI_DATA_POS));
++
++ len = cfspi_rxfrm(cfspi, ptr, cfspi->rx_cpck_len);
++ WARN_ON(len != cfspi->rx_cpck_len);
++ }
++
++ /* Check the next SPI command and length. */
++ ptr = (u8 *) cfspi->xfer.va_rx;
++
++ ptr += forward_to_spi_cmd(cfspi);
++
++ cfspi->cmd = *ptr++;
++ cfspi->cmd |= ((*ptr++) << 8) & 0xFF00;
++ cfspi->rx_npck_len = *ptr++;
++ cfspi->rx_npck_len |= ((*ptr++) << 8) & 0xFF00;
++
++ WARN_ON(cfspi->rx_npck_len > SPI_DMA_BUF_LEN);
++ WARN_ON(cfspi->cmd > SPI_CMD_EOT);
++
++ debugfs_store_prev(cfspi);
++
++ /* Check whether the master issued an EOT command. */
++ if (cfspi->cmd == SPI_CMD_EOT) {
++ /* Reset state. */
++ cfspi->tx_cpck_len = 0;
++ cfspi->rx_cpck_len = 0;
++ } else {
++ /* Update state. */
++ cfspi->tx_cpck_len = cfspi->tx_npck_len;
++ cfspi->rx_cpck_len = cfspi->rx_npck_len;
++ }
++
++ /*
++ * Check whether we need to clear the xfer bit.
++ * Spin lock needed for packet insertion.
++ * Test and clear of different bits
++ * are not supported.
++ */
++ spin_lock_irqsave(&cfspi->lock, flags);
++ if (cfspi->cmd == SPI_CMD_EOT && !cfspi_xmitlen(cfspi)
++ && !test_bit(SPI_SS_ON, &cfspi->state))
++ clear_bit(SPI_XFER, &cfspi->state);
++
++ spin_unlock_irqrestore(&cfspi->lock, flags);
++ }
++}
++
++struct platform_driver cfspi_spi_driver = {
++ .probe = cfspi_spi_probe,
++ .remove = cfspi_spi_remove,
++ .driver = {
++ .name = "cfspi_sspi",
++ .owner = THIS_MODULE,
++ },
++};
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -163,9 +163,16 @@
+ struct resource *res, unsigned int pos)
+ {
+ u32 l, sz, mask;
++ u16 orig_cmd;
+
+ mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
+
++ if (!dev->mmio_always_on) {
++ pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
++ pci_write_config_word(dev, PCI_COMMAND,
++ orig_cmd & ~(PCI_COMMAND_MEMORY | PCI_COMMAND_IO));
++ }
++
+ res->name = pci_name(dev);
+
+ pci_read_config_dword(dev, pos, &l);
+@@ -173,6 +180,9 @@
+ pci_read_config_dword(dev, pos, &sz);
+ pci_write_config_dword(dev, pos, l);
+
++ if (!dev->mmio_always_on)
++ pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
++
+ /*
+ * All bits set in sz means the device isn't working properly.
+ * If the BAR isn't implemented, all bits must be 0. If it's a
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -91,6 +91,19 @@
+ }
+ DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, quirk_resource_alignment);
+
++/*
++ * Decoding should be disabled for a PCI device during BAR sizing to avoid
++ * conflict. But doing so may cause problems on host bridge and perhaps other
++ * key system devices. For devices that need to have mmio decoding always-on,
++ * we need to set the dev->mmio_always_on bit.
++ */
++static void __devinit quirk_mmio_always_on(struct pci_dev *dev)
++{
++ if ((dev->class >> 8) == PCI_CLASS_BRIDGE_HOST)
++ dev->mmio_always_on = 1;
++}
++DECLARE_PCI_FIXUP_EARLY(PCI_ANY_ID, PCI_ANY_ID, quirk_mmio_always_on);
++
+ /* The Mellanox Tavor device gives false positive parity errors
+ * Mark this device with a broken_parity_status, to allow
+ * PCI scanning code to "skip" this now blacklisted device.
+--- a/drivers/platform/x86/Kconfig
++++ b/drivers/platform/x86/Kconfig
+@@ -528,6 +528,13 @@
+ keys as input device, backlight device, tablet and accelerometer
+ devices.
+
++config GPIO_INTEL_PMIC
++ bool "Intel PMIC GPIO support"
++ depends on INTEL_SCU_IPC && SPI_MASTER
++ help
++ Say Y here to support GPIO via the SCU IPC interface
++ on Intel MID platforms.
++
+ config INTEL_SCU_IPC
+ bool "Intel SCU IPC Support"
+ depends on X86_MRST
+@@ -537,4 +544,21 @@
+ some embedded Intel x86 platforms. This is not needed for PC-type
+ machines.
+
++config INTEL_SCU_IPC_UTIL
++ bool "Intel SCU IPC utility driver"
++ depends on INTEL_SCU_IPC
++ default y
++ ---help---
++ IPC utility driver enables user to perform pmic/msic regsiter access
++ and firmware update functionality
++
++config INTEL_MID_VIB
++ tristate "Vibrator driver for Intel MID platforms"
++ depends on INTEL_SCU_IPC
++ help
++ This driver provides a sys interface to the vibrator device
++ on the Intel MID platforms.
++
++ If unsure, say N.
++
+ endif # X86_PLATFORM_DEVICES
+--- a/drivers/platform/x86/Makefile
++++ b/drivers/platform/x86/Makefile
+@@ -26,3 +26,6 @@
+ obj-$(CONFIG_ACPI_TOSHIBA) += toshiba_acpi.o
+ obj-$(CONFIG_TOSHIBA_BT_RFKILL) += toshiba_bluetooth.o
+ obj-$(CONFIG_INTEL_SCU_IPC) += intel_scu_ipc.o
++obj-$(CONFIG_INTEL_SCU_IPC_UTIL)+= intel_scu_ipcutil.o
++obj-$(CONFIG_GPIO_INTEL_PMIC) += intel_pmic_gpio.o
++obj-$(CONFIG_INTEL_MID_VIB) += intel_mid_vibrator.o
+--- /dev/null
++++ b/drivers/platform/x86/intel_mid_vibrator.c
+@@ -0,0 +1,88 @@
++/*
++ * intel_mid_vibrator.c - Intel vibrator Driver
++ *
++ * Copyright (C) 2008 Intel Corp
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ */
++
++#include <linux/platform_device.h>
++#include <linux/kernel.h>
++#include <linux/sysfs.h>
++#include <asm/intel_scu_ipc.h>
++
++
++static struct platform_device *vib_pdev;
++
++/*
++ * If the PMIC hasn't been discovered or one is not found then
++ * the calls will error for us.
++ */
++
++static ssize_t vib_store(struct device *dev, struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++
++ unsigned long val;
++
++ if (strict_strtoul(buf, 10, &val))
++ return -EINVAL;
++ if (val) {
++ if (intel_scu_ipc_iowrite8(0x49, 0xAD))
++ return -EINVAL;
++ } else {
++ if (intel_scu_ipc_iowrite8(0x49, 0x14))
++ return -EINVAL;
++ }
++ return count;
++}
++
++static struct device_attribute dev_attr_vib =
++ __ATTR(vib, S_IWUSR, NULL, vib_store);
++
++
++/*
++ * The vibrator interface is non-discoverable and attached only via
++ * the PMIC IPC, so we create ourselves as a platform device. If it
++ * becomes discoverable this will change to a match handler for the
++ * device and the device itself will be created by whoever enumerates it.
++ */
++
++static int __init mrst_vib_init(void)
++{
++ vib_pdev = platform_device_register_simple("mrst_vib", -1, NULL, 0);
++ if (IS_ERR(vib_pdev)) {
++ printk(KERN_WARNING
++ "mrst_vib: unable to register platform device\n");
++ return PTR_ERR(vib_pdev);
++ }
++ return device_create_file(&vib_pdev->dev, &dev_attr_vib);
++}
++
++static void __exit mrst_vib_exit(void)
++{
++ device_remove_file(&vib_pdev->dev, &dev_attr_vib);
++ platform_device_unregister(vib_pdev);
++}
++
++module_init(mrst_vib_init);
++module_exit(mrst_vib_exit);
++
++MODULE_AUTHOR("Kalhan Trisal");
++MODULE_DESCRIPTION("Intel Moorestown Vibrator Driver");
++MODULE_LICENSE("GPL v2");
+--- /dev/null
++++ b/drivers/platform/x86/intel_pmic_gpio.c
+@@ -0,0 +1,342 @@
++/* Moorestown PMIC GPIO (access through IPC) driver
++ * Copyright (c) 2008 - 2009, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++/* Supports:
++ * Moorestown platform PMIC chip
++ */
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/interrupt.h>
++#include <linux/delay.h>
++#include <linux/stddef.h>
++#include <linux/ioport.h>
++#include <linux/init.h>
++#include <linux/io.h>
++#include <linux/gpio.h>
++#include <linux/interrupt.h>
++#include <asm/intel_scu_ipc.h>
++#include <linux/device.h>
++#include <linux/spi/spi.h>
++#include <linux/spi/intel_pmic_gpio.h>
++#include <linux/platform_device.h>
++
++#define DRIVER_NAME "pmic_gpio"
++
++/* register offset that IPC driver should use
++ * 8 GPIO + 8 GPOSW (6 controllable) + 8GPO
++ */
++enum pmic_gpio_register {
++ GPIO0 = 0xE0,
++ GPIO7 = 0xE7,
++ GPIOINT = 0xE8,
++ GPOSWCTL0 = 0xEC,
++ GPOSWCTL5 = 0xF1,
++ GPO = 0xF4,
++};
++
++/* bits definition for GPIO & GPOSW */
++#define GPIO_DRV 0x01
++#define GPIO_DIR 0x02
++#define GPIO_DIN 0x04
++#define GPIO_DOU 0x08
++#define GPIO_INTCTL 0x30
++#define GPIO_DBC 0xc0
++
++#define GPOSW_DRV 0x01
++#define GPOSW_DOU 0x08
++#define GPOSW_RDRV 0x30
++
++
++#define NUM_GPIO 24
++
++struct pmic_gpio_irq {
++ spinlock_t lock;
++ u32 trigger[NUM_GPIO];
++ u32 dirty;
++ struct work_struct work;
++};
++
++
++struct pmic_gpio {
++ struct gpio_chip chip;
++ struct pmic_gpio_irq irqtypes;
++ void *gpiointr;
++ int irq;
++ struct spi_device *spi;
++ unsigned irq_base;
++};
++
++static void pmic_program_irqtype(int gpio, int type)
++{
++ if (type & IRQ_TYPE_EDGE_RISING)
++ intel_scu_ipc_update_register(GPIO0 + gpio, 0x20, 0x20);
++ else
++ intel_scu_ipc_update_register(GPIO0 + gpio, 0x00, 0x20);
++
++ if (type & IRQ_TYPE_EDGE_FALLING)
++ intel_scu_ipc_update_register(GPIO0 + gpio, 0x10, 0x10);
++ else
++ intel_scu_ipc_update_register(GPIO0 + gpio, 0x00, 0x10);
++};
++
++static void pmic_irqtype_work(struct work_struct *work)
++{
++ struct pmic_gpio_irq *t =
++ container_of(work, struct pmic_gpio_irq, work);
++ unsigned long flags;
++ int i;
++ u16 type;
++
++ spin_lock_irqsave(&t->lock, flags);
++ /* As we drop the lock, we may need multiple scans if we race the
++ pmic_irq_type function */
++ while (t->dirty) {
++ /*
++ * For each pin that has the dirty bit set send an IPC
++ * message to configure the hardware via the PMIC
++ */
++ for (i = 0; i < NUM_GPIO; i++) {
++ if (!(t->dirty & (1 << i)))
++ continue;
++ t->dirty &= ~(1 << i);
++ /* We can't trust the array entry or dirty
++ once the lock is dropped */
++ type = t->trigger[i];
++ spin_unlock_irqrestore(&t->lock, flags);
++ pmic_program_irqtype(i, type);
++ spin_lock_irqsave(&t->lock, flags);
++ }
++ }
++ spin_unlock_irqrestore(&t->lock, flags);
++}
++
++static int pmic_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
++{
++ if (offset > 8) {
++ printk(KERN_ERR
++ "%s: only pin 0-7 support input\n", __func__);
++ return -1;/* we only have 8 GPIO can use as input */
++ }
++ return intel_scu_ipc_update_register(GPIO0 + offset,
++ GPIO_DIR, GPIO_DIR);
++}
++
++static int pmic_gpio_direction_output(struct gpio_chip *chip,
++ unsigned offset, int value)
++{
++ int rc = 0;
++
++ if (offset < 8)/* it is GPIO */
++ rc = intel_scu_ipc_update_register(GPIO0 + offset,
++ GPIO_DRV | (value ? GPIO_DOU : 0),
++ GPIO_DRV | GPIO_DOU | GPIO_DIR);
++ else if (offset < 14)/* it is GPOSW */
++ rc = intel_scu_ipc_update_register(GPOSWCTL0 + offset - 8,
++ GPOSW_DRV | (value ? GPOSW_DOU : 0),
++ GPOSW_DRV | GPOSW_DOU | GPOSW_RDRV);
++ else if (offset > 15 && offset < 24)/* it is GPO */
++ rc = intel_scu_ipc_update_register(GPO,
++ value ? 1 << (offset - 16) : 0,
++ 1 << (offset - 16));
++ else {
++ printk(KERN_ERR
++ "%s: invalid PMIC GPIO pin %d!\n", __func__, offset);
++ WARN_ON(1);
++ }
++
++ return rc;
++}
++
++static int pmic_gpio_get(struct gpio_chip *chip, unsigned offset)
++{
++ u8 r;
++
++ /* we only have 8 GPIO pins we can use as input */
++ if (offset > 8)
++ return -1;
++ if (intel_scu_ipc_ioread8(GPIO0 + offset, &r))
++ return -1;
++ return r & GPIO_DIN;
++}
++
++static void pmic_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
++{
++ if (offset < 8)/* it is GPIO */
++ intel_scu_ipc_update_register(GPIO0 + offset,
++ GPIO_DRV | (value ? GPIO_DOU : 0),
++ GPIO_DRV | GPIO_DOU);
++ else if (offset < 14)/* it is GPOSW */
++ intel_scu_ipc_update_register(GPOSWCTL0 + offset - 8,
++ GPOSW_DRV | (value ? GPOSW_DOU : 0),
++ GPOSW_DRV | GPOSW_DOU | GPOSW_RDRV);
++ else if (offset > 15 && offset < 24) /* it is GPO */
++ intel_scu_ipc_update_register(GPO,
++ value ? 1 << (offset - 16) : 0,
++ 1 << (offset - 16));
++}
++
++static int pmic_irq_type(unsigned irq, unsigned type)
++{
++ struct pmic_gpio *pg = get_irq_chip_data(irq);
++ u32 gpio = irq - pg->irq_base;
++ unsigned long flags;
++
++ if (gpio < 0 || gpio > pg->chip.ngpio)
++ return -EINVAL;
++
++ spin_lock_irqsave(&pg->irqtypes.lock, flags);
++ pg->irqtypes.trigger[gpio] = type;
++ pg->irqtypes.dirty |= (1 << gpio);
++ spin_unlock_irqrestore(&pg->irqtypes.lock, flags);
++ schedule_work(&pg->irqtypes.work);
++ return 0;
++}
++
++
++
++static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
++{
++ struct pmic_gpio *pg = container_of(chip, struct pmic_gpio, chip);
++
++ return pg->irq_base + offset;
++}
++
++/* the gpiointr register is read-clear, so just do nothing. */
++static void pmic_irq_unmask(unsigned irq)
++{
++};
++
++static void pmic_irq_mask(unsigned irq)
++{
++};
++
++static struct irq_chip pmic_irqchip = {
++ .name = "PMIC-GPIO",
++ .mask = pmic_irq_mask,
++ .unmask = pmic_irq_unmask,
++ .set_type = pmic_irq_type,
++};
++
++static void pmic_irq_handler(unsigned irq, struct irq_desc *desc)
++{
++ struct pmic_gpio *pg = (struct pmic_gpio *)get_irq_data(irq);
++ u8 intsts = *((u8 *)pg->gpiointr + 4);
++ int gpio;
++
++ for (gpio = 0; gpio < 8; gpio++) {
++ if (intsts & (1 << gpio)) {
++ pr_debug("pmic pin %d triggered\n", gpio);
++ generic_handle_irq(pg->irq_base + gpio);
++ }
++ }
++ desc->chip->eoi(irq);
++}
++
++static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev)
++{
++ struct device *dev = &pdev->dev;
++ int irq = platform_get_irq(pdev, 0);
++ struct intel_pmic_gpio_platform_data *pdata = dev->platform_data;
++
++ struct pmic_gpio *pg;
++ int retval;
++ int i;
++
++
++ printk(KERN_INFO "%s: PMIC GPIO driver loaded.\n", __func__);
++
++ if (irq < 0) {
++ dev_dbg(dev, "no IRQ line\n");
++ return -EINVAL;
++ }
++
++ if (!pdata || !pdata->gpio_base || !pdata->irq_base) {
++ dev_dbg(dev, "incorrect or missing platform data\n");
++ return -EINVAL;
++ }
++
++ pg = kzalloc(sizeof(*pg), GFP_KERNEL);
++ if (!pg)
++ return -ENOMEM;
++
++ dev_set_drvdata(dev, pg);
++
++ pg->irq = irq;
++ /* setting up SRAM mapping for GPIOINT register */
++ pg->gpiointr = ioremap_nocache(pdata->gpiointr, 8);
++ if (!pg->gpiointr) {
++ printk(KERN_ERR "%s: Can not map GPIOINT.\n", __func__);
++ retval = -EINVAL;
++ goto err2;
++ }
++ pg->irq_base = pdata->irq_base;
++ pg->chip.label = "intel_pmic";
++ pg->chip.direction_input = pmic_gpio_direction_input;
++ pg->chip.direction_output = pmic_gpio_direction_output;
++ pg->chip.get = pmic_gpio_get;
++ pg->chip.set = pmic_gpio_set;
++ pg->chip.to_irq = pmic_gpio_to_irq;
++ pg->chip.base = pdata->gpio_base;
++ pg->chip.ngpio = NUM_GPIO;
++ pg->chip.can_sleep = 1;
++ pg->chip.dev = dev;
++
++ INIT_WORK(&pg->irqtypes.work, pmic_irqtype_work);
++ spin_lock_init(&pg->irqtypes.lock);
++
++ pg->chip.dev = dev;
++ retval = gpiochip_add(&pg->chip);
++ if (retval) {
++ printk(KERN_ERR "%s: Can not add pmic gpio chip.\n", __func__);
++ goto err;
++ }
++ set_irq_data(pg->irq, pg);
++ set_irq_chained_handler(pg->irq, pmic_irq_handler);
++ for (i = 0; i < 8; i++) {
++ set_irq_chip_and_handler_name(i + pg->irq_base, &pmic_irqchip,
++ handle_simple_irq, "demux");
++ set_irq_chip_data(i + pg->irq_base, pg);
++ }
++ return 0;
++err:
++ iounmap(pg->gpiointr);
++err2:
++ kfree(pg);
++ return retval;
++}
++
++/* at the same time, register a platform driver
++ * this supports the sfi 0.81 fw */
++static struct platform_driver platform_pmic_gpio_driver = {
++ .driver = {
++ .name = DRIVER_NAME,
++ .owner = THIS_MODULE,
++ },
++ .probe = platform_pmic_gpio_probe,
++};
++
++static int __init platform_pmic_gpio_init(void)
++{
++ return platform_driver_register(&platform_pmic_gpio_driver);
++}
++
++subsys_initcall(platform_pmic_gpio_init);
++
++MODULE_AUTHOR("Alek Du <alek.du@intel.com>");
++MODULE_DESCRIPTION("Intel Moorestown PMIC GPIO driver");
++MODULE_LICENSE("GPL v2");
+--- a/drivers/platform/x86/intel_scu_ipc.c
++++ b/drivers/platform/x86/intel_scu_ipc.c
+@@ -23,7 +23,7 @@
+ #include <linux/pm.h>
+ #include <linux/pci.h>
+ #include <linux/interrupt.h>
+-#include <asm/setup.h>
++#include <asm/mrst.h>
+ #include <asm/intel_scu_ipc.h>
+
+ /* IPC defines the following message types */
+@@ -38,10 +38,6 @@
+ #define IPC_CMD_PCNTRL_R 1 /* Register read */
+ #define IPC_CMD_PCNTRL_M 2 /* Register read-modify-write */
+
+-/* Miscelaneous Command ids */
+-#define IPC_CMD_INDIRECT_RD 2 /* 32bit indirect read */
+-#define IPC_CMD_INDIRECT_WR 5 /* 32bit indirect write */
+-
+ /*
+ * IPC register summary
+ *
+@@ -62,8 +58,8 @@
+
+ #define IPC_BASE_ADDR 0xFF11C000 /* IPC1 base register address */
+ #define IPC_MAX_ADDR 0x100 /* Maximum IPC regisers */
+-#define IPC_WWBUF_SIZE 16 /* IPC Write buffer Size */
+-#define IPC_RWBUF_SIZE 16 /* IPC Read buffer Size */
++#define IPC_WWBUF_SIZE 20 /* IPC Write buffer Size */
++#define IPC_RWBUF_SIZE 20 /* IPC Read buffer Size */
+ #define IPC_I2C_BASE 0xFF12B000 /* I2C control register base address */
+ #define IPC_I2C_MAX_ADDR 0x10 /* Maximum I2C regisers */
+
+@@ -78,12 +74,7 @@
+
+ static struct intel_scu_ipc_dev ipcdev; /* Only one for now */
+
+-static int platform = 1;
+-module_param(platform, int, 0);
+-MODULE_PARM_DESC(platform, "1 for moorestown platform");
+-
+-
+-
++static int platform; /* Platform type */
+
+ /*
+ * IPC Read Buffer (Read Only):
+@@ -119,24 +110,6 @@
+ }
+
+ /*
+- * IPC destination Pointer (Write Only):
+- * Use content as pointer for destination write
+- */
+-static inline void ipc_write_dptr(u32 data) /* Write dptr data */
+-{
+- writel(data, ipcdev.ipc_base + 0x0C);
+-}
+-
+-/*
+- * IPC Source Pointer (Write Only):
+- * Use content as pointer for read location
+-*/
+-static inline void ipc_write_sptr(u32 data) /* Write dptr data */
+-{
+- writel(data, ipcdev.ipc_base + 0x08);
+-}
+-
+-/*
+ * Status Register (Read Only):
+ * Driver will read this register to get the ready/busy status of the IPC
+ * block and error status of the IPC command that was just processed by SCU
+@@ -154,7 +127,7 @@
+ return readb(ipcdev.ipc_base + IPC_READ_BUFFER + offset);
+ }
+
+-static inline u8 ipc_data_readl(u32 offset) /* Read ipc u32 data */
++static inline u32 ipc_data_readl(u32 offset) /* Read ipc u32 data */
+ {
+ return readl(ipcdev.ipc_base + IPC_READ_BUFFER + offset);
+ }
+@@ -175,62 +148,73 @@
+ return -ETIMEDOUT;
+ }
+ }
+- return (status >> 1) & 1;
++ if ((status >> 1) & 1)
++ return -EIO;
++
++ return 0;
+ }
+
+ /* Read/Write power control(PMIC in Langwell, MSIC in PenWell) registers */
+ static int pwr_reg_rdwr(u16 *addr, u8 *data, u32 count, u32 op, u32 id)
+ {
+- int nc;
++ int i, nc, bytes, d;
+ u32 offset = 0;
+ u32 err = 0;
+- u8 cbuf[IPC_WWBUF_SIZE] = { '\0' };
++ u8 cbuf[IPC_WWBUF_SIZE] = { };
+ u32 *wbuf = (u32 *)&cbuf;
+
+ mutex_lock(&ipclock);
++
++ memset(cbuf, 0, sizeof(cbuf));
++
+ if (ipcdev.pdev == NULL) {
+ mutex_unlock(&ipclock);
+ return -ENODEV;
+ }
+
+- if (platform == 1) {
+- /* Entry is 4 bytes for read/write, 5 bytes for read modify */
+- for (nc = 0; nc < count; nc++) {
++ if (platform != MRST_CPU_CHIP_PENWELL) {
++ bytes = 0;
++ d = 0;
++ for (i = 0; i < count; i++) {
++ cbuf[bytes++] = addr[i];
++ cbuf[bytes++] = addr[i] >> 8;
++ if (id != IPC_CMD_PCNTRL_R)
++ cbuf[bytes++] = data[d++];
++ if (id == IPC_CMD_PCNTRL_M)
++ cbuf[bytes++] = data[d++];
++ }
++ for (i = 0; i < bytes; i += 4)
++ ipc_data_writel(wbuf[i/4], i);
++ ipc_command(bytes << 16 | id << 12 | 0 << 8 | op);
++ } else {
++ for (nc = 0; nc < count; nc++, offset += 2) {
+ cbuf[offset] = addr[nc];
+ cbuf[offset + 1] = addr[nc] >> 8;
+- if (id != IPC_CMD_PCNTRL_R)
+- cbuf[offset + 2] = data[nc];
+- if (id == IPC_CMD_PCNTRL_M) {
+- cbuf[offset + 3] = data[nc + 1];
+- offset += 1;
+- }
+- offset += 3;
+ }
+- for (nc = 0, offset = 0; nc < count; nc++, offset += 4)
+- ipc_data_writel(wbuf[nc], offset); /* Write wbuff */
+
+- } else {
+- for (nc = 0, offset = 0; nc < count; nc++, offset += 2)
+- ipc_data_writel(addr[nc], offset); /* Write addresses */
+- if (id != IPC_CMD_PCNTRL_R) {
+- for (nc = 0; nc < count; nc++, offset++)
+- ipc_data_writel(data[nc], offset); /* Write data */
+- if (id == IPC_CMD_PCNTRL_M)
+- ipc_data_writel(data[nc + 1], offset); /* Mask value*/
++ if (id == IPC_CMD_PCNTRL_R) {
++ for (nc = 0, offset = 0; nc < count; nc++, offset += 4)
++ ipc_data_writel(wbuf[nc], offset);
++ ipc_command((count*2) << 16 | id << 12 | 0 << 8 | op);
++ } else if (id == IPC_CMD_PCNTRL_W) {
++ for (nc = 0; nc < count; nc++, offset += 1)
++ cbuf[offset] = data[nc];
++ for (nc = 0, offset = 0; nc < count; nc++, offset += 4)
++ ipc_data_writel(wbuf[nc], offset);
++ ipc_command((count*3) << 16 | id << 12 | 0 << 8 | op);
++ } else if (id == IPC_CMD_PCNTRL_M) {
++ cbuf[offset] = data[0];
++ cbuf[offset + 1] = data[1];
++ ipc_data_writel(wbuf[0], 0); /* Write wbuff */
++ ipc_command(4 << 16 | id << 12 | 0 << 8 | op);
+ }
+ }
+
+- if (id != IPC_CMD_PCNTRL_M)
+- ipc_command((count * 3) << 16 | id << 12 | 0 << 8 | op);
+- else
+- ipc_command((count * 4) << 16 | id << 12 | 0 << 8 | op);
+-
+ err = busy_loop();
+-
+ if (id == IPC_CMD_PCNTRL_R) { /* Read rbuf */
+ /* Workaround: values are read as 0 without memcpy_fromio */
+- memcpy_fromio(cbuf, ipcdev.ipc_base + IPC_READ_BUFFER, 16);
+- if (platform == 1) {
++ memcpy_fromio(cbuf, ipcdev.ipc_base + 0x90, 16);
++ if (platform != MRST_CPU_CHIP_PENWELL) {
+ for (nc = 0, offset = 2; nc < count; nc++, offset += 3)
+ data[nc] = ipc_data_readb(offset);
+ } else {
+@@ -405,70 +389,6 @@
+ EXPORT_SYMBOL(intel_scu_ipc_update_register);
+
+ /**
+- * intel_scu_ipc_register_read - 32bit indirect read
+- * @addr: register address
+- * @value: 32bit value return
+- *
+- * Performs IA 32 bit indirect read, returns 0 on success, or an
+- * error code.
+- *
+- * Can be used when SCCB(System Controller Configuration Block) register
+- * HRIM(Honor Restricted IPC Messages) is set (bit 23)
+- *
+- * This function may sleep. Locking for SCU accesses is handled for
+- * the caller.
+- */
+-int intel_scu_ipc_register_read(u32 addr, u32 *value)
+-{
+- u32 err = 0;
+-
+- mutex_lock(&ipclock);
+- if (ipcdev.pdev == NULL) {
+- mutex_unlock(&ipclock);
+- return -ENODEV;
+- }
+- ipc_write_sptr(addr);
+- ipc_command(4 << 16 | IPC_CMD_INDIRECT_RD);
+- err = busy_loop();
+- *value = ipc_data_readl(0);
+- mutex_unlock(&ipclock);
+- return err;
+-}
+-EXPORT_SYMBOL(intel_scu_ipc_register_read);
+-
+-/**
+- * intel_scu_ipc_register_write - 32bit indirect write
+- * @addr: register address
+- * @value: 32bit value to write
+- *
+- * Performs IA 32 bit indirect write, returns 0 on success, or an
+- * error code.
+- *
+- * Can be used when SCCB(System Controller Configuration Block) register
+- * HRIM(Honor Restricted IPC Messages) is set (bit 23)
+- *
+- * This function may sleep. Locking for SCU accesses is handled for
+- * the caller.
+- */
+-int intel_scu_ipc_register_write(u32 addr, u32 value)
+-{
+- u32 err = 0;
+-
+- mutex_lock(&ipclock);
+- if (ipcdev.pdev == NULL) {
+- mutex_unlock(&ipclock);
+- return -ENODEV;
+- }
+- ipc_write_dptr(addr);
+- ipc_data_writel(value, 0);
+- ipc_command(4 << 16 | IPC_CMD_INDIRECT_WR);
+- err = busy_loop();
+- mutex_unlock(&ipclock);
+- return err;
+-}
+-EXPORT_SYMBOL(intel_scu_ipc_register_write);
+-
+-/**
+ * intel_scu_ipc_simple_command - send a simple command
+ * @cmd: command
+ * @sub: sub type
+@@ -524,7 +444,7 @@
+ for (i = 0; i < inlen; i++)
+ ipc_data_writel(*in++, 4 * i);
+
+- ipc_command((sub << 12) | cmd | (inlen << 18));
++ ipc_command((inlen << 16) | (sub << 12) | cmd);
+ err = busy_loop();
+
+ for (i = 0; i < outlen; i++)
+@@ -567,7 +487,7 @@
+ mdelay(1);
+ *data = readl(ipcdev.i2c_base + I2C_DATA_ADDR);
+ } else if (cmd == IPC_I2C_WRITE) {
+- writel(addr, ipcdev.i2c_base + I2C_DATA_ADDR);
++ writel(*data, ipcdev.i2c_base + I2C_DATA_ADDR);
+ mdelay(1);
+ writel(addr, ipcdev.i2c_base + IPC_I2C_CNTRL_ADDR);
+ } else {
+@@ -778,6 +698,9 @@
+ iounmap(ipcdev.ipc_base);
+ return -ENOMEM;
+ }
++
++ intel_scu_devices_create();
++
+ return 0;
+ }
+
+@@ -799,10 +722,12 @@
+ iounmap(ipcdev.ipc_base);
+ iounmap(ipcdev.i2c_base);
+ ipcdev.pdev = NULL;
++ intel_scu_devices_destroy();
+ }
+
+ static const struct pci_device_id pci_ids[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080e)},
++ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x082a)},
+ { 0,}
+ };
+ MODULE_DEVICE_TABLE(pci, pci_ids);
+@@ -817,6 +742,9 @@
+
+ static int __init intel_scu_ipc_init(void)
+ {
++ platform = mrst_identify_cpu();
++ if (platform == 0)
++ return -ENODEV;
+ return pci_register_driver(&ipc_driver);
+ }
+
+--- /dev/null
++++ b/drivers/platform/x86/intel_scu_ipcutil.c
+@@ -0,0 +1,113 @@
++/*
++ * intel_scu_ipc.c: Driver for the Intel SCU IPC mechanism
++ *
++ * (C) Copyright 2008-2010 Intel Corporation
++ * Author: Sreedhara DS (sreedhara.ds@intel.com)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; version 2
++ * of the License.
++ *
++ * This driver provides ioctl interfaces to call intel scu ipc driver api
++ */
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/types.h>
++#include <linux/fs.h>
++#include <linux/fcntl.h>
++#include <linux/sched.h>
++#include <linux/uaccess.h>
++#include <linux/slab.h>
++#include <linux/init.h>
++#include <asm/intel_scu_ipc.h>
++
++static u32 major;
++#define MAX_FW_SIZE 264192
++
++/* ioctl commnds */
++#define INTE_SCU_IPC_REGISTER_READ 0
++#define INTE_SCU_IPC_REGISTER_WRITE 1
++#define INTE_SCU_IPC_REGISTER_UPDATE 2
++#define INTE_SCU_IPC_FW_UPDATE 0xA2
++
++struct scu_ipc_data {
++ u32 count; /* No. of registers */
++ u16 addr[5]; /* Register addresses */
++ u8 data[5]; /* Register data */
++ u8 mask; /* Valid for read-modify-write */
++};
++
++int power_cntrl_reg_access(u32 cmd, struct scu_ipc_data *data)
++{
++ int ret;
++
++ if (data->count == 0 || data->count > 5)
++ return -1;
++
++ switch (cmd) {
++ case INTE_SCU_IPC_REGISTER_READ:
++ ret = intel_scu_ipc_readv(data->addr, data->data, data->count);
++ break;
++ case INTE_SCU_IPC_REGISTER_WRITE:
++ ret = intel_scu_ipc_writev(data->addr, data->data, data->count);
++ break;
++ case INTE_SCU_IPC_REGISTER_UPDATE:
++ ret = intel_scu_ipc_update_register(data->addr[0],
++ data->data[0],
++ data->mask);
++ break;
++ default:
++ return -EINVAL;
++ }
++ return ret;
++}
++
++static long scu_ipc_ioctl(struct file *fp, unsigned int cmd,
++ unsigned long arg)
++{
++ int ret;
++ struct scu_ipc_data data;
++ void __user *argp = (void __user *)arg;
++
++ ret = copy_from_user(&data, argp, sizeof(struct scu_ipc_data));
++ if (data.count == 0 || data.count > 4 || data.count == 3)
++ return -EINVAL;
++
++ if (cmd == INTE_SCU_IPC_FW_UPDATE) {
++ u8 *fwbuf = NULL ;
++
++ fwbuf = kmalloc(MAX_FW_SIZE, GFP_KERNEL);
++ if (fwbuf == NULL)
++ return -ENOMEM;
++ ret = copy_from_user(fwbuf, (u8 *)arg, MAX_FW_SIZE);
++ return intel_scu_ipc_fw_update(fwbuf, MAX_FW_SIZE);
++ } else {
++ power_cntrl_reg_access(cmd, &data);
++ ret = copy_to_user(argp, &data, sizeof(struct scu_ipc_data));
++ }
++ return ret;
++}
++
++static const struct file_operations scu_ipc_fops = {
++ .unlocked_ioctl = scu_ipc_ioctl,
++};
++
++static int __init ipc_module_init(void)
++{
++ return register_chrdev(0, "mid_ipc", &scu_ipc_fops);
++}
++
++static void __exit ipc_module_exit(void)
++{
++ unregister_chrdev(major, "mid_ipc");
++}
++
++module_init(ipc_module_init);
++module_exit(ipc_module_exit);
++
++MODULE_LICENSE("GPL V2");
++MODULE_DESCRIPTION("Utility driver for intel scu ipc");
++MODULE_AUTHOR("Sreedhara <sreedhara.ds@intel.com>")
+--- a/drivers/power/Kconfig
++++ b/drivers/power/Kconfig
+@@ -142,4 +142,11 @@
+ help
+ Say Y to include support for NXP PCF50633 Main Battery Charger.
+
++config BATTERY_INTEL_MID
++ tristate "Battery driver for Intel MID platforms"
++ depends on INTEL_SCU_IPC && SPI && USB_GADGET_LANGWELL
++ help
++ Say Y here to enable the battery driver on Intel MID
++ platforms.
++
+ endif # POWER_SUPPLY
+--- a/drivers/power/Makefile
++++ b/drivers/power/Makefile
+@@ -34,3 +34,4 @@
+ obj-$(CONFIG_BATTERY_MAX17040) += max17040_battery.o
+ obj-$(CONFIG_BATTERY_Z2) += z2_battery.o
+ obj-$(CONFIG_CHARGER_PCF50633) += pcf50633-charger.o
++obj-$(CONFIG_BATTERY_INTEL_MID) += intel_mid_battery.o
+--- /dev/null
++++ b/drivers/power/intel_mid_battery.c
+@@ -0,0 +1,841 @@
++/*
++ * intel_mid_battery.c - Intel MID PMIC Battery Driver
++ *
++ * Copyright (C) 2009 Intel Corporation
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ * Author: Nithish Mahalingam <nithish.mahalingam@intel.com>
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/err.h>
++#include <linux/interrupt.h>
++#include <linux/workqueue.h>
++#include <linux/jiffies.h>
++#include <linux/param.h>
++#include <linux/device.h>
++#include <linux/spi/spi.h>
++#include <linux/platform_device.h>
++#include <linux/power_supply.h>
++#include <linux/usb/langwell_udc.h>
++
++#include <asm/intel_scu_ipc.h>
++
++#define DRIVER_NAME "pmic_battery"
++
++/*********************************************************************
++ * Generic defines
++ *********************************************************************/
++
++static int debug;
++module_param(debug, int, 0444);
++MODULE_PARM_DESC(debug, "Flag to enable PMIC Battery debug messages.");
++
++#define PMIC_BATT_DRV_INFO_UPDATED 1
++#define PMIC_BATT_PRESENT 1
++#define PMIC_BATT_NOT_PRESENT 0
++#define PMIC_USB_PRESENT PMIC_BATT_PRESENT
++#define PMIC_USB_NOT_PRESENT PMIC_BATT_NOT_PRESENT
++
++/* pmic battery register related */
++#define PMIC_BATT_CHR_SCHRGINT_ADDR 0xD2
++#define PMIC_BATT_CHR_SBATOVP_MASK (1 << 1)
++#define PMIC_BATT_CHR_STEMP_MASK (1 << 2)
++#define PMIC_BATT_CHR_SCOMP_MASK (1 << 3)
++#define PMIC_BATT_CHR_SUSBDET_MASK (1 << 4)
++#define PMIC_BATT_CHR_SBATDET_MASK (1 << 5)
++#define PMIC_BATT_CHR_SDCLMT_MASK (1 << 6)
++#define PMIC_BATT_CHR_SUSBOVP_MASK (1 << 7)
++#define PMIC_BATT_CHR_EXCPT_MASK 0xC6
++#define PMIC_BATT_ADC_ACCCHRG_MASK (1 << 31)
++#define PMIC_BATT_ADC_ACCCHRGVAL_MASK 0x7FFFFFFF
++
++/* pmic ipc related */
++#define PMIC_BATT_CHR_IPC_FCHRG_SUBID 0x4
++#define PMIC_BATT_CHR_IPC_TCHRG_SUBID 0x6
++
++/* types of battery charging */
++enum batt_charge_type {
++ BATT_USBOTG_500MA_CHARGE,
++ BATT_USBOTG_TRICKLE_CHARGE,
++};
++
++/* valid battery events */
++enum batt_event {
++ BATT_EVENT_BATOVP_EXCPT,
++ BATT_EVENT_USBOVP_EXCPT,
++ BATT_EVENT_TEMP_EXCPT,
++ BATT_EVENT_DCLMT_EXCPT,
++ BATT_EVENT_EXCPT
++};
++
++
++/*********************************************************************
++ * Battery properties
++ *********************************************************************/
++
++/*
++ * pmic battery info
++ */
++struct pmic_power_module_info {
++ bool is_dev_info_updated;
++ struct device *dev;
++ /* pmic battery data */
++ unsigned long update_time; /* jiffies when data read */
++ unsigned int usb_is_present;
++ unsigned int batt_is_present;
++ unsigned int batt_health;
++ unsigned int usb_health;
++ unsigned int batt_status;
++ unsigned int batt_charge_now; /* in mAS */
++ unsigned int batt_prev_charge_full; /* in mAS */
++ unsigned int batt_charge_rate; /* in units per second */
++
++ struct power_supply usb;
++ struct power_supply batt;
++ int irq; /* GPE_ID or IRQ# */
++ struct workqueue_struct *monitor_wqueue;
++ struct delayed_work monitor_battery;
++ struct work_struct handler;
++};
++
++static unsigned int delay_time = 2000; /* in ms */
++
++/*
++ * pmic ac properties
++ */
++static enum power_supply_property pmic_usb_props[] = {
++ POWER_SUPPLY_PROP_PRESENT,
++ POWER_SUPPLY_PROP_HEALTH,
++};
++
++/*
++ * pmic battery properties
++ */
++static enum power_supply_property pmic_battery_props[] = {
++ POWER_SUPPLY_PROP_STATUS,
++ POWER_SUPPLY_PROP_HEALTH,
++ POWER_SUPPLY_PROP_PRESENT,
++ POWER_SUPPLY_PROP_CHARGE_NOW,
++ POWER_SUPPLY_PROP_CHARGE_FULL,
++ POWER_SUPPLY_PROP_CHARGE_AVG,
++};
++
++
++/*
++ * Glue functions for talking to the IPC
++ */
++
++struct battery_property {
++ u32 capacity; /* Charger capacity */
++ u8 crnt; /* Quick charge current value*/
++ u8 volt; /* Fine adjustment of constant charge voltage */
++ u8 prot; /* CHRGPROT register value */
++ u8 prot2; /* CHRGPROT1 register value */
++ u8 timer; /* Charging timer */
++};
++
++#define IPCMSG_BATTERY 0xEF
++
++/* Battery coulomb counter accumulator commands */
++#define IPC_CMD_CC_WR 0 /* Update coulomb counter value */
++#define IPC_CMD_CC_RD 1 /* Read coulomb counter value */
++#define IPC_CMD_BATTERY_PROPERTY 2 /* Read Battery property */
++
++/**
++ * pmic_scu_ipc_battery_cc_read - read battery cc
++ * @value: battery coulomb counter read
++ *
++ * Reads the battery couloumb counter value, returns 0 on success, or
++ * an error code
++ *
++ * This function may sleep. Locking for SCU accesses is handled for
++ * the caller.
++ */
++static int pmic_scu_ipc_battery_cc_read(u32 *value)
++{
++ return intel_scu_ipc_command(IPCMSG_BATTERY, IPC_CMD_CC_RD,
++ NULL, 0, value, 1);
++}
++
++/**
++ * pmic_scu_ipc_battery_property_get - fetch properties
++ * @prop: battery properties
++ *
++ * Retrieve the battery properties from the power management
++ *
++ * This function may sleep. Locking for SCU accesses is handled for
++ * the caller.
++ */
++static int pmic_scu_ipc_battery_property_get(struct battery_property *prop)
++{
++ u32 data[3];
++ u8 *p = (u8 *)&data[1];
++ int err = intel_scu_ipc_command(IPCMSG_BATTERY,
++ IPC_CMD_BATTERY_PROPERTY, NULL, 0, data, 3);
++
++ prop->capacity = data[0];
++ prop->crnt = *p++;
++ prop->volt = *p++;
++ prop->prot = *p++;
++ prop->prot2 = *p++;
++ prop->timer = *p++;
++
++ return err;
++}
++
++/**
++ * pmic_scu_ipc_set_charger - set charger
++ * @charger: charger to select
++ *
++ * Switch the charging mode for the SCU
++ */
++
++static int pmic_scu_ipc_set_charger(int charger)
++{
++ return intel_scu_ipc_simple_command(IPCMSG_BATTERY, charger);
++}
++
++/**
++ * pmic_battery_log_event - log battery events
++ * @event: battery event to be logged
++ * Context: can sleep
++ *
++ * There are multiple battery events which may be of interest to users;
++ * this battery function logs the different battery events onto the
++ * kernel log messages.
++ */
++static void pmic_battery_log_event(enum batt_event event)
++{
++ printk(KERN_WARNING "pmic-battery: ");
++ switch (event) {
++ case BATT_EVENT_BATOVP_EXCPT:
++ printk(KERN_CONT "battery overvoltage condition\n");
++ break;
++ case BATT_EVENT_USBOVP_EXCPT:
++ printk(KERN_CONT "usb charger overvoltage condition\n");
++ break;
++ case BATT_EVENT_TEMP_EXCPT:
++ printk(KERN_CONT "high battery temperature condition\n");
++ break;
++ case BATT_EVENT_DCLMT_EXCPT:
++ printk(KERN_CONT "over battery charge current condition\n");
++ break;
++ default:
++ printk(KERN_CONT "charger/battery exception %d\n", event);
++ break;
++ }
++}
++
++/**
++ * pmic_battery_read_status - read battery status information
++ * @pbi: device info structure to update the read information
++ * Context: can sleep
++ *
++ * PMIC power source information need to be updated based on the data read
++ * from the PMIC battery registers.
++ *
++ */
++static void pmic_battery_read_status(struct pmic_power_module_info *pbi)
++{
++ unsigned int update_time_intrvl;
++ unsigned int chrg_val;
++ u32 ccval;
++ u8 r8;
++ struct battery_property batt_prop;
++ int batt_present = 0;
++ int usb_present = 0;
++ int batt_exception = 0 ;
++
++ /* make sure the last batt_status read happened delay_time before */
++ if (pbi->update_time && time_before(jiffies, pbi->update_time +
++ msecs_to_jiffies(delay_time)))
++ return;
++
++ update_time_intrvl = jiffies_to_msecs(jiffies - pbi->update_time);
++ pbi->update_time = jiffies;
++
++ /* read coulomb counter registers and schrgint register */
++ if (pmic_scu_ipc_battery_cc_read(&ccval)) {
++ dev_warn(pbi->dev, "%s(): ipc config cmd failed\n",
++ __func__);
++ return;
++ }
++
++ if (intel_scu_ipc_ioread8(PMIC_BATT_CHR_SCHRGINT_ADDR, &r8)) {
++ dev_warn(pbi->dev, "%s(): ipc pmic read failed\n",
++ __func__);
++ return;
++ }
++
++ /*
++ * set pmic_power_module_info members based on pmic register values
++ * read.
++ */
++
++ /* set batt_is_present */
++ if (r8 & PMIC_BATT_CHR_SBATDET_MASK) {
++ pbi->batt_is_present = PMIC_BATT_PRESENT;
++ batt_present = 1;
++ } else {
++ pbi->batt_is_present = PMIC_BATT_NOT_PRESENT;
++ pbi->batt_health = POWER_SUPPLY_HEALTH_UNKNOWN;
++ pbi->batt_status = POWER_SUPPLY_STATUS_UNKNOWN;
++ }
++
++ /* set batt_health */
++ if (batt_present) {
++ if (r8 & PMIC_BATT_CHR_SBATOVP_MASK) {
++ pbi->batt_health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
++ pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
++ pmic_battery_log_event(BATT_EVENT_BATOVP_EXCPT);
++ batt_exception = 1;
++ } else if (r8 & PMIC_BATT_CHR_SDCLMT_MASK) {
++ pbi->batt_health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
++ pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
++ pmic_battery_log_event(BATT_EVENT_DCLMT_EXCPT);
++ batt_exception = 1;
++ } else if (r8 & PMIC_BATT_CHR_STEMP_MASK) {
++ pbi->batt_health = POWER_SUPPLY_HEALTH_OVERHEAT;
++ pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
++ pmic_battery_log_event(BATT_EVENT_TEMP_EXCPT);
++ batt_exception = 1;
++ } else {
++ pbi->batt_health = POWER_SUPPLY_HEALTH_GOOD;
++ }
++ }
++
++ /* set usb_is_present */
++ if (r8 & PMIC_BATT_CHR_SUSBDET_MASK) {
++ pbi->usb_is_present = PMIC_USB_PRESENT;
++ usb_present = 1;
++ } else {
++ pbi->usb_is_present = PMIC_USB_NOT_PRESENT;
++ pbi->usb_health = POWER_SUPPLY_HEALTH_UNKNOWN;
++ }
++
++ if (usb_present) {
++ if (r8 & PMIC_BATT_CHR_SUSBOVP_MASK) {
++ pbi->usb_health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
++ pmic_battery_log_event(BATT_EVENT_USBOVP_EXCPT);
++ } else {
++ pbi->usb_health = POWER_SUPPLY_HEALTH_GOOD;
++ }
++ }
++
++ chrg_val = ccval & PMIC_BATT_ADC_ACCCHRGVAL_MASK;
++
++ /* set batt_prev_charge_full to battery capacity the first time */
++ if (!pbi->is_dev_info_updated) {
++ if (pmic_scu_ipc_battery_property_get(&batt_prop)) {
++ dev_warn(pbi->dev, "%s(): ipc config cmd failed\n",
++ __func__);
++ return;
++ }
++ pbi->batt_prev_charge_full = batt_prop.capacity;
++ }
++
++ /* set batt_status */
++ if (batt_present && !batt_exception) {
++ if (r8 & PMIC_BATT_CHR_SCOMP_MASK) {
++ pbi->batt_status = POWER_SUPPLY_STATUS_FULL;
++ pbi->batt_prev_charge_full = chrg_val;
++ } else if (ccval & PMIC_BATT_ADC_ACCCHRG_MASK) {
++ pbi->batt_status = POWER_SUPPLY_STATUS_DISCHARGING;
++ } else {
++ pbi->batt_status = POWER_SUPPLY_STATUS_CHARGING;
++ }
++ }
++
++ /* set batt_charge_rate */
++ if (pbi->is_dev_info_updated && batt_present && !batt_exception) {
++ if (pbi->batt_status == POWER_SUPPLY_STATUS_DISCHARGING) {
++ if (pbi->batt_charge_now - chrg_val) {
++ pbi->batt_charge_rate = ((pbi->batt_charge_now -
++ chrg_val) * 1000 * 60) /
++ update_time_intrvl;
++ }
++ } else if (pbi->batt_status == POWER_SUPPLY_STATUS_CHARGING) {
++ if (chrg_val - pbi->batt_charge_now) {
++ pbi->batt_charge_rate = ((chrg_val -
++ pbi->batt_charge_now) * 1000 * 60) /
++ update_time_intrvl;
++ }
++ } else
++ pbi->batt_charge_rate = 0;
++ } else {
++ pbi->batt_charge_rate = -1;
++ }
++
++ /* batt_charge_now */
++ if (batt_present && !batt_exception)
++ pbi->batt_charge_now = chrg_val;
++ else
++ pbi->batt_charge_now = -1;
++
++ pbi->is_dev_info_updated = PMIC_BATT_DRV_INFO_UPDATED;
++}
++
++/**
++ * pmic_usb_get_property - usb power source get property
++ * @psy: usb power supply context
++ * @psp: usb power source property
++ * @val: usb power source property value
++ * Context: can sleep
++ *
++ * PMIC usb power source property needs to be provided to power_supply
++ * subsytem for it to provide the information to users.
++ */
++static int pmic_usb_get_property(struct power_supply *psy,
++ enum power_supply_property psp,
++ union power_supply_propval *val)
++{
++ struct pmic_power_module_info *pbi = container_of(psy,
++ struct pmic_power_module_info, usb);
++
++ /* update pmic_power_module_info members */
++ pmic_battery_read_status(pbi);
++
++ switch (psp) {
++ case POWER_SUPPLY_PROP_PRESENT:
++ val->intval = pbi->usb_is_present;
++ break;
++ case POWER_SUPPLY_PROP_HEALTH:
++ val->intval = pbi->usb_health;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++/**
++ * pmic_battery_get_property - battery power source get property
++ * @psy: battery power supply context
++ * @psp: battery power source property
++ * @val: battery power source property value
++ * Context: can sleep
++ *
++ * PMIC battery power source property needs to be provided to power_supply
++ * subsytem for it to provide the information to users.
++ */
++static int pmic_battery_get_property(struct power_supply *psy,
++ enum power_supply_property psp,
++ union power_supply_propval *val)
++{
++ struct pmic_power_module_info *pbi = container_of(psy,
++ struct pmic_power_module_info, batt);
++
++ /* update pmic_power_module_info members */
++ pmic_battery_read_status(pbi);
++
++ switch (psp) {
++ case POWER_SUPPLY_PROP_STATUS:
++ val->intval = pbi->batt_status;
++ break;
++ case POWER_SUPPLY_PROP_HEALTH:
++ val->intval = pbi->batt_health;
++ break;
++ case POWER_SUPPLY_PROP_PRESENT:
++ val->intval = pbi->batt_is_present;
++ break;
++ case POWER_SUPPLY_PROP_CHARGE_NOW:
++ val->intval = pbi->batt_charge_now;
++ break;
++ case POWER_SUPPLY_PROP_CHARGE_FULL:
++ val->intval = pbi->batt_prev_charge_full;
++ break;
++ case POWER_SUPPLY_PROP_CHARGE_AVG:
++ val->intval = pbi->batt_charge_rate;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++/**
++ * pmic_battery_monitor - monitor battery status
++ * @work: work structure
++ * Context: can sleep
++ *
++ * PMIC battery status needs to be monitored for any change
++ * and information needs to be frequently updated.
++ */
++static void pmic_battery_monitor(struct work_struct *work)
++{
++ struct pmic_power_module_info *pbi = container_of(work,
++ struct pmic_power_module_info, monitor_battery.work);
++
++ /* update pmic_power_module_info members */
++ pmic_battery_read_status(pbi);
++ queue_delayed_work(pbi->monitor_wqueue, &pbi->monitor_battery, HZ * 10);
++}
++
++/**
++ * pmic_battery_set_charger - set battery charger
++ * @pbi: device info structure
++ * @chrg: charge mode to set battery charger in
++ * Context: can sleep
++ *
++ * PMIC battery charger needs to be enabled based on the usb charge
++ * capabilities connected to the platform.
++ */
++static int pmic_battery_set_charger(struct pmic_power_module_info *pbi,
++ enum batt_charge_type chrg)
++{
++ int retval;
++
++ /* set usblmt bits and chrgcntl register bits appropriately */
++ switch (chrg) {
++ case BATT_USBOTG_500MA_CHARGE:
++ retval = pmic_scu_ipc_set_charger(PMIC_BATT_CHR_IPC_FCHRG_SUBID);
++ break;
++ case BATT_USBOTG_TRICKLE_CHARGE:
++ retval = pmic_scu_ipc_set_charger(PMIC_BATT_CHR_IPC_TCHRG_SUBID);
++ break;
++ default:
++ dev_warn(pbi->dev, "%s(): out of range usb charger "
++ "charge detected\n", __func__);
++ return -EINVAL;
++ }
++
++ if (retval) {
++ dev_warn(pbi->dev, "%s(): ipc pmic read failed\n",
++ __func__);
++ return retval;;
++ }
++
++ return 0;
++}
++
++/**
++ * pmic_battery_interrupt_handler - pmic battery interrupt handler
++ * Context: interrupt context
++ *
++ * PMIC battery interrupt handler which will be called with either
++ * battery full condition occurs or usb otg & battery connect
++ * condition occurs.
++ */
++static irqreturn_t pmic_battery_interrupt_handler(int id, void *dev)
++{
++ struct pmic_power_module_info *pbi =
++ (struct pmic_power_module_info *)dev;
++
++ schedule_work(&pbi->handler);
++
++ return IRQ_HANDLED;
++}
++
++/**
++ * pmic_battery_handle_intrpt - pmic battery service interrupt
++ * @work: work structure
++ * Context: can sleep
++ *
++ * PMIC battery needs to either update the battery status as full
++ * if it detects battery full condition caused the interrupt or needs
++ * to enable battery charger if it detects usb and battery detect
++ * caused the source of interrupt.
++ */
++static void pmic_battery_handle_intrpt(struct work_struct *work)
++{
++ struct pmic_power_module_info *pbi = container_of(work,
++ struct pmic_power_module_info, handler);
++ enum batt_charge_type chrg;
++ u8 r8;
++ int retval;
++ int power;
++
++ /* check if pmic_power_module_info is initialized */
++ if (!pbi)
++ return;
++
++ if (intel_scu_ipc_ioread8(PMIC_BATT_CHR_SCHRGINT_ADDR, &r8)) {
++ dev_warn(pbi->dev, "%s(): ipc pmic read failed\n",
++ __func__);
++ return;
++ }
++ /* find the cause of the interrupt */
++ if (r8 & PMIC_BATT_CHR_SBATDET_MASK) {
++ pbi->batt_is_present = PMIC_BATT_PRESENT;
++ } else {
++ pbi->batt_is_present = PMIC_BATT_NOT_PRESENT;
++ pbi->batt_health = POWER_SUPPLY_HEALTH_UNKNOWN;
++ pbi->batt_status = POWER_SUPPLY_STATUS_UNKNOWN;
++ return;
++ }
++
++ if (r8 & PMIC_BATT_CHR_EXCPT_MASK) {
++ pbi->batt_health = POWER_SUPPLY_HEALTH_UNKNOWN;
++ pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
++ pbi->usb_health = POWER_SUPPLY_HEALTH_UNKNOWN;
++ pmic_battery_log_event(BATT_EVENT_EXCPT);
++ return;
++ } else {
++ pbi->batt_health = POWER_SUPPLY_HEALTH_GOOD;
++ pbi->usb_health = POWER_SUPPLY_HEALTH_GOOD;
++ }
++
++ if (r8 & PMIC_BATT_CHR_SCOMP_MASK) {
++ u32 ccval;
++ pbi->batt_status = POWER_SUPPLY_STATUS_FULL;
++
++ if (pmic_scu_ipc_battery_cc_read(&ccval)) {
++ dev_warn(pbi->dev, "%s(): ipc config cmd "
++ "failed\n", __func__);
++ return;
++ }
++ pbi->batt_prev_charge_full = ccval &
++ PMIC_BATT_ADC_ACCCHRGVAL_MASK;
++ return;
++ }
++
++ if (r8 & PMIC_BATT_CHR_SUSBDET_MASK) {
++ pbi->usb_is_present = PMIC_USB_PRESENT;
++ } else {
++ pbi->usb_is_present = PMIC_USB_NOT_PRESENT;
++ pbi->usb_health = POWER_SUPPLY_HEALTH_UNKNOWN;
++ return;
++ }
++
++ /* setup battery charging */
++
++ /* check usb otg power capability and set charger accordingly */
++ retval = langwell_udc_maxpower(&power);
++ if (retval) {
++ dev_warn(pbi->dev, "%s(): usb otg power query failed "
++ "with error code %d\n", __func__, retval);
++ return;
++ }
++
++ if (power >= 500)
++ chrg = BATT_USBOTG_500MA_CHARGE;
++ else
++ chrg = BATT_USBOTG_TRICKLE_CHARGE;
++
++ /* enable battery charging */
++ if (pmic_battery_set_charger(pbi, chrg)) {
++ dev_warn(pbi->dev,
++ "%s(): failed to set up battery charging\n", __func__);
++ return;
++ }
++
++ if (debug)
++ printk(KERN_INFO "pmic-battery: %s() - setting up battery "
++ "charger successful\n", __func__);
++}
++
++/**
++ * pmic_battery_probe - pmic battery initialize
++ * @irq: pmic battery device irq
++ * @dev: pmic battery device structure
++ * Context: can sleep
++ *
++ * PMIC battery initializes its internal data structue and other
++ * infrastructure components for it to work as expected.
++ */
++static __devinit int probe(int irq, struct device *dev)
++{
++ int retval = 0;
++ struct pmic_power_module_info *pbi = 0;
++
++ if (debug)
++ printk(KERN_INFO "pmic-battery: found pmic battery device\n");
++
++ pbi = kzalloc(sizeof(*pbi), GFP_KERNEL);
++ if (!pbi) {
++ dev_err(dev, "%s(): memory allocation failed\n",
++ __func__);
++ return -ENOMEM;
++ }
++
++ pbi->dev = dev;
++ pbi->irq = irq;
++ dev_set_drvdata(dev, pbi);
++
++ /* initialize all required framework before enabling interrupts */
++ INIT_WORK(&pbi->handler, (void *)pmic_battery_handle_intrpt);
++ INIT_DELAYED_WORK(&pbi->monitor_battery, pmic_battery_monitor);
++ pbi->monitor_wqueue =
++ create_singlethread_workqueue(dev_name(dev));
++ if (!pbi->monitor_wqueue) {
++ dev_err(dev, "%s(): wqueue init failed\n", __func__);
++ retval = -ESRCH;
++ goto wqueue_failed;
++ }
++
++ /* register interrupt */
++ retval = request_irq(pbi->irq, pmic_battery_interrupt_handler,
++ 0, DRIVER_NAME, pbi);
++ if (retval) {
++ dev_err(dev, "%s(): cannot get IRQ\n", __func__);
++ goto requestirq_failed;
++ }
++
++ /* register pmic-batt with power supply subsystem */
++ pbi->batt.name = "pmic-batt";
++ pbi->batt.type = POWER_SUPPLY_TYPE_BATTERY;
++ pbi->batt.properties = pmic_battery_props;
++ pbi->batt.num_properties = ARRAY_SIZE(pmic_battery_props);
++ pbi->batt.get_property = pmic_battery_get_property;
++ retval = power_supply_register(dev, &pbi->batt);
++ if (retval) {
++ dev_err(dev,
++ "%s(): failed to register pmic battery device with power supply subsystem\n",
++ __func__);
++ goto power_reg_failed;
++ }
++
++ if (debug)
++ printk(KERN_INFO "pmic-battery: %s() - pmic battery device "
++ "registration with power supply subsystem "
++ "successful\n", __func__);
++
++ queue_delayed_work(pbi->monitor_wqueue, &pbi->monitor_battery, HZ * 1);
++
++ /* register pmic-usb with power supply subsystem */
++ pbi->usb.name = "pmic-usb";
++ pbi->usb.type = POWER_SUPPLY_TYPE_USB;
++ pbi->usb.properties = pmic_usb_props;
++ pbi->usb.num_properties = ARRAY_SIZE(pmic_usb_props);
++ pbi->usb.get_property = pmic_usb_get_property;
++ retval = power_supply_register(dev, &pbi->usb);
++ if (retval) {
++ dev_err(dev,
++ "%s(): failed to register pmic usb device with power supply subsystem\n",
++ __func__);
++ goto power_reg_failed_1;
++ }
++
++ if (debug)
++ printk(KERN_INFO "pmic-battery: %s() - pmic usb device "
++ "registration with power supply subsystem successful\n",
++ __func__);
++
++ return retval;
++
++power_reg_failed_1:
++ power_supply_unregister(&pbi->batt);
++power_reg_failed:
++ cancel_rearming_delayed_workqueue(pbi->monitor_wqueue,
++ &pbi->monitor_battery);
++requestirq_failed:
++ destroy_workqueue(pbi->monitor_wqueue);
++wqueue_failed:
++ kfree(pbi);
++
++ return retval;
++}
++
++static int __devinit pmic_battery_probe(struct spi_device *spi)
++{
++ return probe(spi->irq, &spi->dev);
++}
++
++static int __devinit platform_pmic_battery_probe(struct platform_device *pdev)
++{
++ return probe(pdev->id, &pdev->dev);
++}
++
++/**
++ * pmic_battery_remove - pmic battery finalize
++ * @dev: pmic battery device structure
++ * Context: can sleep
++ *
++ * PMIC battery finalizes its internal data structue and other
++ * infrastructure components that it initialized in
++ * pmic_battery_probe.
++ */
++
++static int __devexit remove(struct device *dev)
++{
++ struct pmic_power_module_info *pbi = dev_get_drvdata(dev);
++
++ if (pbi) {
++ free_irq(pbi->irq, pbi);
++
++ cancel_rearming_delayed_workqueue(pbi->monitor_wqueue,
++ &pbi->monitor_battery);
++ destroy_workqueue(pbi->monitor_wqueue);
++
++ power_supply_unregister(&pbi->usb);
++ power_supply_unregister(&pbi->batt);
++
++ flush_scheduled_work();
++
++ kfree(pbi);
++ }
++
++ return 0;
++}
++
++static int __devexit pmic_battery_remove(struct spi_device *spi)
++{
++ return remove(&spi->dev);
++}
++
++static int __devexit platform_pmic_battery_remove(struct platform_device *pdev)
++{
++ return remove(&pdev->dev);
++}
++
++static struct spi_driver pmic_battery_driver = {
++ .driver = {
++ .name = DRIVER_NAME,
++ .bus = &spi_bus_type,
++ .owner = THIS_MODULE,
++ },
++ .probe = pmic_battery_probe,
++ .remove = pmic_battery_remove,
++};
++
++static struct platform_driver platform_pmic_battery_driver = {
++ .driver = {
++ .name = DRIVER_NAME,
++ .owner = THIS_MODULE,
++ },
++ .probe = platform_pmic_battery_probe,
++ .remove = platform_pmic_battery_remove,
++};
++
++static int __init platform_pmic_battery_module_init(void)
++{
++ int err = spi_register_driver(&pmic_battery_driver);
++ if (err < 0)
++ return err;
++ err = platform_driver_register(&platform_pmic_battery_driver);
++ if (err < 0)
++ spi_unregister_driver(&pmic_battery_driver);
++ return err;
++}
++
++static void __exit platform_pmic_battery_module_exit(void)
++{
++ platform_driver_unregister(&platform_pmic_battery_driver);
++ spi_unregister_driver(&pmic_battery_driver);
++}
++
++module_init(platform_pmic_battery_module_init);
++module_exit(platform_pmic_battery_module_exit);
++
++MODULE_AUTHOR("Nithish Mahalingam <nithish.mahalingam@intel.com>");
++MODULE_DESCRIPTION("Intel Moorestown PMIC Battery Driver");
++MODULE_LICENSE("GPL");
+--- a/drivers/rtc/Kconfig
++++ b/drivers/rtc/Kconfig
+@@ -433,6 +433,19 @@
+ This driver can also be built as a module. If so, the module
+ will be called rtc-cmos.
+
++config RTC_DRV_VRTC
++ tristate "Virtual RTC for MRST"
++ depends on X86_MRST
++ default y if X86_MRST
++
++ help
++ Say "yes" here to get direct support for the real time clock
++ found in Moorestown platform. The VRTC is a emulated RTC that
++ Derive its clock source from a realy RTC in PMIC. MC146818
++ stype programming interface is most conserved other than any
++ updates is done via IPC calls to the system controller FW.
++
++
+ config RTC_DRV_DS1216
+ tristate "Dallas DS1216"
+ depends on SNI_RM
+--- a/drivers/rtc/Makefile
++++ b/drivers/rtc/Makefile
+@@ -30,6 +30,7 @@
+ obj-$(CONFIG_RTC_DRV_COH901331) += rtc-coh901331.o
+ obj-$(CONFIG_RTC_DRV_DAVINCI) += rtc-davinci.o
+ obj-$(CONFIG_RTC_DRV_DM355EVM) += rtc-dm355evm.o
++obj-$(CONFIG_RTC_DRV_VRTC) += rtc-mrst.o
+ obj-$(CONFIG_RTC_DRV_DS1216) += rtc-ds1216.o
+ obj-$(CONFIG_RTC_DRV_DS1286) += rtc-ds1286.o
+ obj-$(CONFIG_RTC_DRV_DS1302) += rtc-ds1302.o
+--- /dev/null
++++ b/drivers/rtc/rtc-mrst.c
+@@ -0,0 +1,619 @@
++/*
++ * rtc-mrst.c: Driver for Moorestown virtual RTC
++ *
++ * (C) Copyright 2009 Intel Corporation
++ * Author: Jacob Pan (jacob.jun.pan@intel.com)
++ * Feng Tang (feng.tang@intel.com)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; version 2
++ * of the License.
++ *
++ * Note:
++ * VRTC is emulated by system controller firmware, the real HW
++ * RTC is located in the PMIC device. SCU FW shadows PMIC RTC
++ * in a memory mapped IO space that is visible to the host IA
++ * processor.
++ *
++ * This driver refers drivers/rtc/rtc-cmos.c
++ */
++
++/*
++ * Note:
++ * * vRTC only supports binary mode and 24H mode
++ * * vRTC only support PIE and AIE, no UIE, and its PIE only happens
++ * at 23:59:59pm everyday, not supporting adjustable frequency
++ * * Alarm function is also limited to hr/min/sec.
++ */
++
++#include <linux/mod_devicetable.h>
++#include <linux/platform_device.h>
++#include <linux/interrupt.h>
++#include <linux/spinlock.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/sfi.h>
++
++#include <asm-generic/rtc.h>
++
++#include <asm/intel_scu_ipc.h>
++#include <asm/mrst.h>
++#include <asm/vrtc.h>
++
++struct mrst_rtc {
++ struct rtc_device *rtc;
++ struct device *dev;
++ int irq;
++ struct resource *iomem;
++
++ u8 enabled_wake;
++ u8 suspend_ctrl;
++};
++
++/* both platform and pnp busses use negative numbers for invalid irqs */
++#define is_valid_irq(n) ((n) >= 0)
++
++static const char driver_name[] = "rtc_mrst";
++
++#define RTC_IRQMASK (RTC_PF | RTC_AF)
++
++static inline int is_intr(u8 rtc_intr)
++{
++ if (!(rtc_intr & RTC_IRQF))
++ return 0;
++ return rtc_intr & RTC_IRQMASK;
++}
++
++/*
++ * rtc_time's year contains the increment over 1900, but vRTC's YEAR
++ * register can't be programmed to value larger than 0x64, so vRTC
++ * driver chose to use 1960 (1970 is UNIX time start point) as the base,
++ * and do the translation in read/write time
++ */
++static int mrst_read_time(struct device *dev, struct rtc_time *time)
++{
++ unsigned long flags;
++
++ if (rtc_is_updating())
++ mdelay(20);
++
++ spin_lock_irqsave(&rtc_lock, flags);
++ time->tm_sec = vrtc_cmos_read(RTC_SECONDS);
++ time->tm_min = vrtc_cmos_read(RTC_MINUTES);
++ time->tm_hour = vrtc_cmos_read(RTC_HOURS);
++ time->tm_mday = vrtc_cmos_read(RTC_DAY_OF_MONTH);
++ time->tm_mon = vrtc_cmos_read(RTC_MONTH);
++ time->tm_year = vrtc_cmos_read(RTC_YEAR);
++ spin_unlock_irqrestore(&rtc_lock, flags);
++
++ /* Adjust for the 1960/1900 */
++ time->tm_year += 60;
++ time->tm_mon--;
++ return RTC_24H;
++}
++
++static int mrst_set_time(struct device *dev, struct rtc_time *time)
++{
++ int ret;
++ unsigned long flags;
++ unsigned char mon, day, hrs, min, sec;
++ unsigned int yrs;
++
++ yrs = time->tm_year;
++ mon = time->tm_mon + 1; /* tm_mon starts at zero */
++ day = time->tm_mday;
++ hrs = time->tm_hour;
++ min = time->tm_min;
++ sec = time->tm_sec;
++
++ if (yrs < 70 || yrs > 138)
++ return -EINVAL;
++ yrs -= 60;
++
++ spin_lock_irqsave(&rtc_lock, flags);
++
++ vrtc_cmos_write(yrs, RTC_YEAR);
++ vrtc_cmos_write(mon, RTC_MONTH);
++ vrtc_cmos_write(day, RTC_DAY_OF_MONTH);
++ vrtc_cmos_write(hrs, RTC_HOURS);
++ vrtc_cmos_write(min, RTC_MINUTES);
++ vrtc_cmos_write(sec, RTC_SECONDS);
++
++ spin_unlock_irqrestore(&rtc_lock, flags);
++
++ ret = intel_scu_ipc_simple_command(IPC_CMD_VRTC_SETTIME, IPCMSG_VRTC);
++ return ret;
++}
++
++static int mrst_read_alarm(struct device *dev, struct rtc_wkalrm *t)
++{
++ struct mrst_rtc *mrst = dev_get_drvdata(dev);
++ unsigned char rtc_control;
++
++ if (!is_valid_irq(mrst->irq))
++ return -EIO;
++
++ /* Basic alarms only support hour, minute, and seconds fields.
++ * Some also support day and month, for alarms up to a year in
++ * the future.
++ */
++ t->time.tm_mday = -1;
++ t->time.tm_mon = -1;
++ t->time.tm_year = -1;
++
++ /* vRTC only supports binary mode */
++ spin_lock_irq(&rtc_lock);
++ t->time.tm_sec = vrtc_cmos_read(RTC_SECONDS_ALARM);
++ t->time.tm_min = vrtc_cmos_read(RTC_MINUTES_ALARM);
++ t->time.tm_hour = vrtc_cmos_read(RTC_HOURS_ALARM);
++
++ rtc_control = vrtc_cmos_read(RTC_CONTROL);
++ spin_unlock_irq(&rtc_lock);
++
++ t->enabled = !!(rtc_control & RTC_AIE);
++ t->pending = 0;
++
++ return 0;
++}
++
++static void mrst_checkintr(struct mrst_rtc *mrst, unsigned char rtc_control)
++{
++ unsigned char rtc_intr;
++
++ /*
++ * NOTE after changing RTC_xIE bits we always read INTR_FLAGS;
++ * allegedly some older rtcs need that to handle irqs properly
++ */
++ rtc_intr = vrtc_cmos_read(RTC_INTR_FLAGS);
++ rtc_intr &= (rtc_control & RTC_IRQMASK) | RTC_IRQF;
++ if (is_intr(rtc_intr))
++ rtc_update_irq(mrst->rtc, 1, rtc_intr);
++}
++
++static void mrst_irq_enable(struct mrst_rtc *mrst, unsigned char mask)
++{
++ unsigned char rtc_control;
++
++ /*
++ * Flush any pending IRQ status, notably for update irqs,
++ * before we enable new IRQs
++ */
++ rtc_control = vrtc_cmos_read(RTC_CONTROL);
++ mrst_checkintr(mrst, rtc_control);
++
++ rtc_control |= mask;
++ vrtc_cmos_write(rtc_control, RTC_CONTROL);
++
++ mrst_checkintr(mrst, rtc_control);
++}
++
++static void mrst_irq_disable(struct mrst_rtc *mrst, unsigned char mask)
++{
++ unsigned char rtc_control;
++
++ rtc_control = vrtc_cmos_read(RTC_CONTROL);
++ rtc_control &= ~mask;
++ vrtc_cmos_write(rtc_control, RTC_CONTROL);
++ mrst_checkintr(mrst, rtc_control);
++}
++
++static int mrst_set_alarm(struct device *dev, struct rtc_wkalrm *t)
++{
++ struct mrst_rtc *mrst = dev_get_drvdata(dev);
++ unsigned char hrs, min, sec;
++ int ret = 0;
++
++ if (!is_valid_irq(mrst->irq))
++ return -EIO;
++
++ hrs = t->time.tm_hour;
++ min = t->time.tm_min;
++ sec = t->time.tm_sec;
++
++ spin_lock_irq(&rtc_lock);
++ /* Next rtc irq must not be from previous alarm setting */
++ mrst_irq_disable(mrst, RTC_AIE);
++
++ /* Update alarm */
++ vrtc_cmos_write(hrs, RTC_HOURS_ALARM);
++ vrtc_cmos_write(min, RTC_MINUTES_ALARM);
++ vrtc_cmos_write(sec, RTC_SECONDS_ALARM);
++
++ spin_unlock_irq(&rtc_lock);
++
++ ret = intel_scu_ipc_simple_command(IPC_CMD_VRTC_SETALARM, IPCMSG_VRTC);
++ if (ret)
++ return ret;
++
++ spin_lock_irq(&rtc_lock);
++ if (t->enabled)
++ mrst_irq_enable(mrst, RTC_AIE);
++
++ spin_unlock_irq(&rtc_lock);
++
++ return 0;
++}
++
++static int mrst_irq_set_state(struct device *dev, int enabled)
++{
++ struct mrst_rtc *mrst = dev_get_drvdata(dev);
++ unsigned long flags;
++
++ if (!is_valid_irq(mrst->irq))
++ return -ENXIO;
++
++ spin_lock_irqsave(&rtc_lock, flags);
++
++ if (enabled)
++ mrst_irq_enable(mrst, RTC_PIE);
++ else
++ mrst_irq_disable(mrst, RTC_PIE);
++
++ spin_unlock_irqrestore(&rtc_lock, flags);
++ return 0;
++}
++
++#if defined(CONFIG_RTC_INTF_DEV) || defined(CONFIG_RTC_INTF_DEV_MODULE)
++
++/* Currently, the vRTC doesn't support UIE ON/OFF */
++static int
++mrst_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
++{
++ struct mrst_rtc *mrst = dev_get_drvdata(dev);
++ unsigned long flags;
++
++ switch (cmd) {
++ case RTC_AIE_OFF:
++ case RTC_AIE_ON:
++ if (!is_valid_irq(mrst->irq))
++ return -EINVAL;
++ break;
++ default:
++ /* PIE ON/OFF is handled by mrst_irq_set_state() */
++ return -ENOIOCTLCMD;
++ }
++
++ spin_lock_irqsave(&rtc_lock, flags);
++ switch (cmd) {
++ case RTC_AIE_OFF: /* alarm off */
++ mrst_irq_disable(mrst, RTC_AIE);
++ break;
++ case RTC_AIE_ON: /* alarm on */
++ mrst_irq_enable(mrst, RTC_AIE);
++ break;
++ }
++ spin_unlock_irqrestore(&rtc_lock, flags);
++ return 0;
++}
++
++#else
++#define mrst_rtc_ioctl NULL
++#endif
++
++#if defined(CONFIG_RTC_INTF_PROC) || defined(CONFIG_RTC_INTF_PROC_MODULE)
++
++static int mrst_procfs(struct device *dev, struct seq_file *seq)
++{
++ unsigned char rtc_control, valid;
++
++ spin_lock_irq(&rtc_lock);
++ rtc_control = vrtc_cmos_read(RTC_CONTROL);
++ valid = vrtc_cmos_read(RTC_VALID);
++ spin_unlock_irq(&rtc_lock);
++
++ return seq_printf(seq,
++ "periodic_IRQ\t: %s\n"
++ "alarm\t\t: %s\n"
++ "BCD\t\t: no\n"
++ "periodic_freq\t: daily (not adjustable)\n",
++ (rtc_control & RTC_PIE) ? "on" : "off",
++ (rtc_control & RTC_AIE) ? "on" : "off");
++}
++
++#else
++#define mrst_procfs NULL
++#endif
++
++static const struct rtc_class_ops mrst_rtc_ops = {
++ .ioctl = mrst_rtc_ioctl,
++ .read_time = mrst_read_time,
++ .set_time = mrst_set_time,
++ .read_alarm = mrst_read_alarm,
++ .set_alarm = mrst_set_alarm,
++ .proc = mrst_procfs,
++ .irq_set_state = mrst_irq_set_state,
++};
++
++static struct mrst_rtc mrst_rtc;
++
++/*
++ * When vRTC IRQ is captured by SCU FW, FW will clear the AIE bit in
++ * Reg B, so no need for this driver to clear it
++ */
++static irqreturn_t mrst_rtc_irq(int irq, void *p)
++{
++ u8 irqstat;
++
++ spin_lock(&rtc_lock);
++ /* This read will clear all IRQ flags inside Reg C */
++ irqstat = vrtc_cmos_read(RTC_INTR_FLAGS);
++ spin_unlock(&rtc_lock);
++
++ irqstat &= RTC_IRQMASK | RTC_IRQF;
++ if (is_intr(irqstat)) {
++ rtc_update_irq(p, 1, irqstat);
++ return IRQ_HANDLED;
++ } else {
++ printk(KERN_ERR "vRTC: error in IRQ handler\n");
++ return IRQ_NONE;
++ }
++}
++
++static int __init
++vrtc_mrst_do_probe(struct device *dev, struct resource *iomem, int rtc_irq)
++{
++ int retval = 0;
++ unsigned char rtc_control;
++
++ /* There can be only one ... */
++ if (mrst_rtc.dev)
++ return -EBUSY;
++
++ if (!iomem)
++ return -ENODEV;
++
++ iomem = request_mem_region(iomem->start,
++ iomem->end + 1 - iomem->start,
++ driver_name);
++ if (!iomem) {
++ dev_dbg(dev, "i/o mem already in use.\n");
++ return -EBUSY;
++ }
++
++ mrst_rtc.irq = rtc_irq;
++ mrst_rtc.iomem = iomem;
++
++ mrst_rtc.rtc = rtc_device_register(driver_name, dev,
++ &mrst_rtc_ops, THIS_MODULE);
++ if (IS_ERR(mrst_rtc.rtc)) {
++ retval = PTR_ERR(mrst_rtc.rtc);
++ goto cleanup0;
++ }
++
++ mrst_rtc.dev = dev;
++ dev_set_drvdata(dev, &mrst_rtc);
++ rename_region(iomem, dev_name(&mrst_rtc.rtc->dev));
++
++ spin_lock_irq(&rtc_lock);
++ mrst_irq_disable(&mrst_rtc, RTC_PIE | RTC_AIE);
++ rtc_control = vrtc_cmos_read(RTC_CONTROL);
++ spin_unlock_irq(&rtc_lock);
++
++ if (!(rtc_control & RTC_24H) || (rtc_control & (RTC_DM_BINARY)))
++ dev_dbg(dev, "TODO: support more than 24-hr BCD mode\n");
++
++ if (is_valid_irq(rtc_irq)) {
++ retval = request_irq(rtc_irq, mrst_rtc_irq,
++ IRQF_DISABLED, dev_name(&mrst_rtc.rtc->dev),
++ mrst_rtc.rtc);
++ if (retval < 0) {
++ dev_dbg(dev, "IRQ %d is already in use, err %d\n",
++ rtc_irq, retval);
++ goto cleanup1;
++ }
++ }
++
++ pr_info("vRTC driver for Moorewtown is initialized\n");
++ return 0;
++
++cleanup1:
++ mrst_rtc.dev = NULL;
++ rtc_device_unregister(mrst_rtc.rtc);
++cleanup0:
++ release_region(iomem->start, iomem->end + 1 - iomem->start);
++ pr_warning("vRTC driver for Moorewtown initialization Failed!!\n");
++ return retval;
++}
++
++static void rtc_mrst_do_shutdown(void)
++{
++ spin_lock_irq(&rtc_lock);
++ mrst_irq_disable(&mrst_rtc, RTC_IRQMASK);
++ spin_unlock_irq(&rtc_lock);
++}
++
++static void __exit rtc_mrst_do_remove(struct device *dev)
++{
++ struct mrst_rtc *mrst = dev_get_drvdata(dev);
++ struct resource *iomem;
++
++ rtc_mrst_do_shutdown();
++
++ if (is_valid_irq(mrst->irq))
++ free_irq(mrst->irq, mrst->rtc);
++
++ rtc_device_unregister(mrst->rtc);
++ mrst->rtc = NULL;
++
++ iomem = mrst->iomem;
++ release_region(iomem->start, iomem->end + 1 - iomem->start);
++ mrst->iomem = NULL;
++
++ mrst->dev = NULL;
++ dev_set_drvdata(dev, NULL);
++}
++
++#ifdef CONFIG_PM
++static int mrst_suspend(struct device *dev, pm_message_t mesg)
++{
++ struct mrst_rtc *mrst = dev_get_drvdata(dev);
++ unsigned char tmp;
++
++ /* Only the alarm might be a wakeup event source */
++ spin_lock_irq(&rtc_lock);
++ mrst->suspend_ctrl = tmp = vrtc_cmos_read(RTC_CONTROL);
++ if (tmp & (RTC_PIE | RTC_AIE)) {
++ unsigned char mask;
++
++ if (device_may_wakeup(dev))
++ mask = RTC_IRQMASK & ~RTC_AIE;
++ else
++ mask = RTC_IRQMASK;
++ tmp &= ~mask;
++ vrtc_cmos_write(tmp, RTC_CONTROL);
++
++ mrst_checkintr(mrst, tmp);
++ }
++ spin_unlock_irq(&rtc_lock);
++
++ if (tmp & RTC_AIE) {
++ mrst->enabled_wake = 1;
++ enable_irq_wake(mrst->irq);
++ }
++
++ pr_debug("%s: suspend%s, ctrl %02x\n",
++ dev_name(&mrst_rtc.rtc->dev),
++ (tmp & RTC_AIE) ? ", alarm may wake" : "",
++ tmp);
++
++ return 0;
++}
++
++/*
++ * We want RTC alarms to wake us from the deep power saving state
++ */
++static inline int mrst_poweroff(struct device *dev)
++{
++ return mrst_suspend(dev, PMSG_HIBERNATE);
++}
++
++static int mrst_resume(struct device *dev)
++{
++ struct mrst_rtc *mrst = dev_get_drvdata(dev);
++ unsigned char tmp = mrst->suspend_ctrl;
++
++ /* Re-enable any irqs previously active */
++ if (tmp & RTC_IRQMASK) {
++ unsigned char mask;
++
++ if (mrst->enabled_wake) {
++ disable_irq_wake(mrst->irq);
++ mrst->enabled_wake = 0;
++ }
++
++ spin_lock_irq(&rtc_lock);
++ do {
++ vrtc_cmos_write(tmp, RTC_CONTROL);
++
++ mask = vrtc_cmos_read(RTC_INTR_FLAGS);
++ mask &= (tmp & RTC_IRQMASK) | RTC_IRQF;
++ if (!is_intr(mask))
++ break;
++
++ rtc_update_irq(mrst->rtc, 1, mask);
++ tmp &= ~RTC_AIE;
++ } while (mask & RTC_AIE);
++ spin_unlock_irq(&rtc_lock);
++ }
++
++ pr_debug("%s: resume, ctrl %02x\n",
++ dev_name(&mrst_rtc.rtc->dev),
++ tmp);
++
++ return 0;
++}
++
++#else
++#define mrst_suspend NULL
++#define mrst_resume NULL
++
++static inline int mrst_poweroff(struct device *dev)
++{
++ return -ENOSYS;
++}
++
++#endif
++
++static int __init vrtc_mrst_platform_probe(struct platform_device *pdev)
++{
++ return vrtc_mrst_do_probe(&pdev->dev,
++ platform_get_resource(pdev, IORESOURCE_MEM, 0),
++ platform_get_irq(pdev, 0));
++}
++
++static int __exit vrtc_mrst_platform_remove(struct platform_device *pdev)
++{
++ rtc_mrst_do_remove(&pdev->dev);
++ return 0;
++}
++
++static void vrtc_mrst_platform_shutdown(struct platform_device *pdev)
++{
++ if (system_state == SYSTEM_POWER_OFF && !mrst_poweroff(&pdev->dev))
++ return;
++
++ rtc_mrst_do_shutdown();
++}
++
++MODULE_ALIAS("platform:vrtc_mrst");
++
++static struct platform_driver vrtc_mrst_platform_driver = {
++ .remove = __exit_p(vrtc_mrst_platform_remove),
++ .shutdown = vrtc_mrst_platform_shutdown,
++ .driver = {
++ .name = (char *) driver_name,
++ .suspend = mrst_suspend,
++ .resume = mrst_resume,
++ }
++};
++
++/*
++ * Moorestown platform has memory mapped virtual RTC device that emulates
++ * the programming interface of the RTC.
++ */
++
++static struct resource vrtc_resources[] = {
++ [0] = {
++ .flags = IORESOURCE_MEM,
++ },
++ [1] = {
++ .flags = IORESOURCE_IRQ,
++ }
++};
++
++static struct platform_device vrtc_device = {
++ .name = "rtc_mrst",
++ .id = -1,
++ .resource = vrtc_resources,
++ .num_resources = ARRAY_SIZE(vrtc_resources),
++};
++
++static int __init vrtc_mrst_init(void)
++{
++ /* iomem resource */
++ vrtc_resources[0].start = sfi_mrtc_array[0].phys_addr;
++ vrtc_resources[0].end = sfi_mrtc_array[0].phys_addr +
++ MRST_VRTC_MAP_SZ;
++ /* irq resource */
++ vrtc_resources[1].start = sfi_mrtc_array[0].irq;
++ vrtc_resources[1].end = sfi_mrtc_array[0].irq;
++
++ platform_device_register(&vrtc_device);
++ return platform_driver_probe(&vrtc_mrst_platform_driver,
++ vrtc_mrst_platform_probe);
++}
++
++static void __exit vrtc_mrst_exit(void)
++{
++ platform_driver_unregister(&vrtc_mrst_platform_driver);
++ platform_device_unregister(&vrtc_device);
++}
++
++module_init(vrtc_mrst_init);
++module_exit(vrtc_mrst_exit);
++
++MODULE_AUTHOR("Jacob Pan; Feng Tang");
++MODULE_DESCRIPTION("Driver for Moorestown virtual RTC");
++MODULE_LICENSE("GPL");
+--- a/drivers/serial/8250.c
++++ b/drivers/serial/8250.c
+@@ -58,6 +58,8 @@
+
+ static unsigned int nr_uarts = CONFIG_SERIAL_8250_RUNTIME_UARTS;
+
++static unsigned int intel_ce_uart_quirk = 0;
++
+ static struct uart_driver serial8250_reg;
+
+ static int serial_index(struct uart_port *port)
+@@ -483,6 +485,41 @@
+ outb(value, p->iobase + offset);
+ }
+
++
++/*
++ * The UART Tx interrupts are not set under some conditions and therefore serial
++ * transmission hangs. This is a silicon issue and has not been root caused. The
++ * workaround for this silicon issue checks UART_LSR_THRE bit and UART_LSR_TEMT
++ * bit of LSR register in interrupt handler to see whether at least one of these
++ * two bits is set, if so then process the transmit request. If this workaround
++ * is not applied, then the serial transmission may hang. This workaround is for
++ * errata number 9 in Errata - B step.
++*/
++static unsigned int ce4100_serial_in(struct uart_port *p, int offset)
++{
++ unsigned int ret, ier, lsr;
++
++ offset = map_8250_in_reg(p, offset) << p->regshift;
++ if (offset == UART_IIR) {
++ ret = inb(p->iobase + offset);
++ if (ret & UART_IIR_NO_INT) {
++ /* see if the TX interrupt should have really set */
++ ier = io_serial_in(p, UART_IER);
++ /* see if the UART's XMIT interrupt is enabled */
++ if (ier & UART_IER_THRI) {
++ lsr = io_serial_in(p, UART_LSR);
++ /* now check to see if the UART should be
++ generating an interrupt (but isn't) */
++ if(lsr & (UART_LSR_THRE | UART_LSR_TEMT))
++ ret &= ~UART_IIR_NO_INT;
++ }
++ }
++ } else
++ ret = inb(p->iobase + offset);
++ return ret;
++}
++
++
+ static void set_io_from_upio(struct uart_port *p)
+ {
+ struct uart_8250_port *up = (struct uart_8250_port *)p;
+@@ -2702,6 +2739,11 @@
+ up->port.membase = old_serial_port[i].iomem_base;
+ up->port.iotype = old_serial_port[i].io_type;
+ up->port.regshift = old_serial_port[i].iomem_reg_shift;
++ if (intel_ce_uart_quirk) {
++ up->port.serial_in = ce4100_serial_in;
++ up->port.uartclk = 14745600;
++ }
++
+ set_io_from_upio(&up->port);
+ up->port.irqflags |= irqflag;
+ }
+@@ -3283,6 +3325,9 @@
+ module_param(skip_txen_test, uint, 0644);
+ MODULE_PARM_DESC(skip_txen_test, "Skip checking for the TXEN bug at init time");
+
++module_param(intel_ce_uart_quirk, uint, 0644);
++MODULE_PARM_DESC(intel_ce_uart_quirk, "Enable Intel CE4100 UART quirk");
++
+ #ifdef CONFIG_SERIAL_8250_RSA
+ module_param_array(probe_rsa, ulong, &probe_rsa_count, 0444);
+ MODULE_PARM_DESC(probe_rsa, "Probe I/O ports for RSA");
+--- a/drivers/serial/Kconfig
++++ b/drivers/serial/Kconfig
+@@ -550,6 +550,7 @@
+ help
+ Serial port support for Samsung's S5P Family of SoC's
+
++
+ config SERIAL_MAX3100
+ tristate "MAX3100 support"
+ depends on SPI
+@@ -557,6 +558,22 @@
+ help
+ MAX3100 chip support
+
++config SERIAL_MAX3107
++ tristate "MAX3107 support"
++ depends on SPI
++ select SERIAL_CORE
++ help
++ MAX3107 chip support
++
++config SERIAL_MAX3107_AAVA
++ tristate "MAX3107 AAVA platform support"
++ depends on X86_MRST && SERIAL_MAX3107 && GPIOLIB
++ select SERIAL_CORE
++ help
++ Support for the MAX3107 chip configuration found on the AAVA
++ platform. Includes the extra initialisation and GPIO support
++ neded for this device.
++
+ config SERIAL_DZ
+ bool "DECstation DZ serial driver"
+ depends on MACH_DECSTATION && 32BIT
+@@ -698,6 +715,33 @@
+ your boot loader (lilo or loadlin) about how to pass options to the
+ kernel at boot time.)
+
++config SERIAL_MRST_MAX3110
++ tristate "SPI UART driver for Max3110"
++ depends on SPI_DW_PCI
++ select SERIAL_CORE
++ select SERIAL_CORE_CONSOLE
++ help
++ This is the UART protocol driver for the MAX3110 device on
++ the Intel Moorestown platform. On other systems use the max3100
++ driver.
++
++config MRST_MAX3110_IRQ
++ boolean "Enable GPIO IRQ for Max3110 over Moorestown"
++ default n
++ depends on SERIAL_MRST_MAX3110 && GPIO_LANGWELL
++ help
++ This has to be enabled after Moorestown GPIO driver is loaded
++
++config SERIAL_MFD_HSU
++ tristate "Medfield High Speed UART support"
++ depends on PCI
++ select SERIAL_CORE
++
++config SERIAL_MFD_HSU_CONSOLE
++ boolean "Medfile HSU serial console support"
++ depends on SERIAL_MFD_HSU=y
++ select SERIAL_CORE_CONSOLE
++
+ config SERIAL_BFIN
+ tristate "Blackfin serial port support"
+ depends on BLACKFIN
+@@ -1575,4 +1619,10 @@
+ help
+ Enable a Altera UART port to be the system console.
+
++config SERIAL_IFX6X60
++ tristate "SPI protocol driver for Infineon 6x60 modem"
++ depends on SPI_PW_SPI3 && GPIOLIB
++ help
++ Support for the IFX6x60 modem devices on Intel MID platforms.
++
+ endmenu
+--- a/drivers/serial/Makefile
++++ b/drivers/serial/Makefile
+@@ -46,6 +46,8 @@
+ obj-$(CONFIG_SERIAL_S3C6400) += s3c6400.o
+ obj-$(CONFIG_SERIAL_S5PV210) += s5pv210.o
+ obj-$(CONFIG_SERIAL_MAX3100) += max3100.o
++obj-$(CONFIG_SERIAL_MAX3107) += max3107.o
++obj-$(CONFIG_SERIAL_MAX3107_AAVA) += max3107-aava.o
+ obj-$(CONFIG_SERIAL_IP22_ZILOG) += ip22zilog.o
+ obj-$(CONFIG_SERIAL_MUX) += mux.o
+ obj-$(CONFIG_SERIAL_68328) += 68328serial.o
+@@ -84,3 +86,6 @@
+ obj-$(CONFIG_SERIAL_GRLIB_GAISLER_APBUART) += apbuart.o
+ obj-$(CONFIG_SERIAL_ALTERA_JTAGUART) += altera_jtaguart.o
+ obj-$(CONFIG_SERIAL_ALTERA_UART) += altera_uart.o
++obj-$(CONFIG_SERIAL_MRST_MAX3110) += mrst_max3110.o
++obj-$(CONFIG_SERIAL_MFD_HSU) += mfd.o
++obj-$(CONFIG_SERIAL_IFX6X60) += ifx6x60.o
+--- /dev/null
++++ b/drivers/serial/ifx6x60.c
+@@ -0,0 +1,1482 @@
++/****************************************************************************
++ *
++ * Driver for the IFX 6x60 spi modem.
++ *
++ * Copyright (C) 2008 Option International
++ * Copyright (C) 2008 Filip Aben <f.aben@option.com>
++ * Denis Joseph Barrow <d.barow@option.com>
++ * Jan Dumon <j.dumon@option.com>
++ *
++ * Copyright (C) 2009, 2010 Intel Corp
++ * Jim Stanley <jim.stanley@intel.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
++ * USA
++ *
++ * Driver modified by Intel from Option gtm501l_spi.c
++ *
++ *
++ *****************************************************************************/
++#include <linux/module.h>
++#include <linux/termios.h>
++#include <linux/tty.h>
++#include <linux/device.h>
++#include <linux/spi/spi.h>
++#include <linux/tty.h>
++#include <linux/kfifo.h>
++#include <linux/tty_flip.h>
++#include <linux/timer.h>
++#include <linux/serial.h>
++#include <linux/interrupt.h>
++#include <linux/irq.h>
++#include <linux/rfkill.h>
++#include <linux/fs.h>
++#include <linux/ip.h>
++#include <linux/dmapool.h>
++#include <linux/gpio.h>
++#include <linux/sched.h>
++#include <linux/time.h>
++#include <linux/wait.h>
++#include <linux/tty.h>
++#include <linux/pm.h>
++#include <linux/pm_runtime.h>
++#include <linux/spi/ifx_modem.h>
++
++#include <linux/spi/pw_spi3.h>
++#include "ifx6x60.h"
++
++#define IFX_SPI_MORE_MASK 0x10
++#define IFX_SPI_MORE_BIT 12 /* bit position in u16 */
++#define IFX_SPI_CTS_BIT 13 /* bit position in u16 */
++#define IFX_SPI_MODE SPI_MODE_1
++#define IFX_SPI_TTY_ID 0
++#define IFX_SPI_TIMEOUT_SEC 2
++#define IFX_SPI_HEADER_0 (-1)
++#define IFX_SPI_HEADER_F (-2)
++
++/* #define IFX_SPI_DEBUG */
++
++/* forward reference */
++static void ifx_spi_handle_srdy(struct ifx_spi_device *ifx_dev);
++
++/* local variables */
++static int spi_b16 = 1; /* 8 or 16 bit word length */
++static struct tty_driver *tty_drv;
++static struct ifx_spi_device *saved_ifx_dev;
++static struct lock_class_key ifx_spi_key;
++
++static int tm_ignore_srdy;
++static int tm_ignore_spito;
++static unsigned int ignore_spito_stop = 10;
++static unsigned int ignore_srdy_start = 25;
++#ifdef IFX_SPI_DEBUG
++static unsigned int testmode;
++#define TESTMODE_COMMON_MASK 0xff
++#define TESTMODE_ENABLE_DMA 0x01
++#define TESTMODE_ENABLE_POLL 0x02
++#define TESTMODE_ENABLE_LOOPBACK 0x04
++#define TESTMODE_ENABLE_INTR 0x08
++#define TESTMODE_PRIV_MASK 0xff00
++#define TESTMODE_IGNORE_SRDY 0x100
++#define TESTMODE_IGNORE_SPITO 0x200
++
++module_param(testmode, uint, S_IRUGO);
++module_param(ignore_spito_stop, uint, S_IRUGO);
++module_param(ignore_srdy_start, uint, S_IRUGO);
++MODULE_PARM_DESC(testmode, "supply test mode bits");
++MODULE_PARM_DESC(ignore_spito_stop, "number of spi timeouts to ignore");
++MODULE_PARM_DESC(ignore_srdy_start,
++ "number of GPIO slave-ready interrupts before ignoring");
++
++#define TESTMODE(x) (testmode & x)
++#else /* IFX_SPI_DEBUG */
++#define TESTMODE(x) (0)
++#endif /* IFX_SPI_DEBUG */
++
++/* GPIO/GPE settings */
++static inline void mrdy_set_high(void)
++{
++ gpio_set_value(saved_ifx_dev->gpio.mrdy, 1);
++}
++static inline void mrdy_set_low(void)
++{
++ gpio_set_value(saved_ifx_dev->gpio.mrdy, 0);
++}
++
++
++
++/* set bit in power status and signal power system if status becomes non-0 */
++static void
++ifx_spi_power_state_set(struct ifx_spi_device *ifx_dev, unsigned char val)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&ifx_dev->power_lock, flags);
++
++ /*
++ * if power status is already non-0, just update, else
++ * tell power system
++ */
++ if (!ifx_dev->power_status) {
++ dev_dbg(&ifx_dev->spi_dev->dev, "pm_runtime_get called");
++ pm_runtime_get(&ifx_dev->spi_dev->dev);
++ }
++ ifx_dev->power_status |= val;
++
++ spin_unlock_irqrestore(&ifx_dev->power_lock, flags);
++}
++
++/* clear bit in power status and signal power system if status becomes 0 */
++static void
++ifx_spi_power_state_clear(struct ifx_spi_device *ifx_dev, unsigned char val)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&ifx_dev->power_lock, flags);
++
++ if (ifx_dev->power_status) {
++ ifx_dev->power_status &= ~val;
++ if (!ifx_dev->power_status) {
++ dev_dbg(&ifx_dev->spi_dev->dev,
++ "pm_runtime_put called");
++ pm_runtime_put(&ifx_dev->spi_dev->dev);
++ }
++ }
++
++ spin_unlock_irqrestore(&ifx_dev->power_lock, flags);
++}
++
++/*
++ * @len : number of bytes (not words) in the buffer
++ */
++static inline void swap_buf(u16 *buf, int len, void *end)
++{
++ int n;
++
++ len = ((len + 1) >> 1);
++ if ((void *)&buf[len] > end) {
++ pr_err("swap_buf: swap exceeds boundary (%p > %p)!",
++ &buf[len], end);
++ return;
++ }
++ for (n = 0; n < len; n++) {
++ *buf = cpu_to_be16(*buf);
++ buf++;
++ }
++}
++
++/* assert mrdy and set timer to wait for SRDY interrupt, if SRDY is low now */
++static void mrdy_assert(struct ifx_spi_device *ifx_dev)
++{
++ int val = gpio_get_value(ifx_dev->gpio.srdy);
++ if (!val) {
++ dev_dbg(&ifx_dev->spi_dev->dev, "srdy low; chk spi timer");
++ if (!test_and_set_bit(IFX_SPI_STATE_TIMER_PENDING,
++ &ifx_dev->flags)) {
++
++ dev_dbg(&ifx_dev->spi_dev->dev, "set spi timer");
++ ifx_dev->spi_timer.expires =
++ jiffies + IFX_SPI_TIMEOUT_SEC*HZ;
++ add_timer(&ifx_dev->spi_timer);
++
++ }
++ }
++ ifx_spi_power_state_set(ifx_dev, IFX_SPI_POWER_DATA_PENDING);
++ mrdy_set_high();
++}
++
++/* SPI has timed out: hang up all tty, reset modem */
++static void ifx_spi_timeout(unsigned long arg)
++{
++ struct ifx_spi_device *ifx_dev = (struct ifx_spi_device *)arg;
++ struct ifx_spi_port_data *port_data;
++ struct tty_port *pport;
++ struct tty_struct *tty;
++
++ if (TESTMODE(TESTMODE_IGNORE_SPITO)) {
++ /* SPI timeout debugging */
++ if (tm_ignore_spito++ < ignore_spito_stop) {
++ dev_dbg(&ifx_dev->spi_dev->dev,
++ "*** spi timeout ignored (%d) ***",
++ tm_ignore_spito);
++ return;
++ }
++ }
++
++ dev_warn(&ifx_dev->spi_dev->dev, "*** SPI Timeout ***");
++
++ port_data = ifx_dev->port_data;
++ pport = &port_data->serial.tty_port;
++ if (port_data) {
++ dev_dbg(&ifx_dev->spi_dev->dev,
++ "send signal to associated tty");
++ tty = tty_port_tty_get(pport);
++ if (tty) {
++ tty_hangup(tty);
++ tty_kref_put(tty);
++ }
++ }
++}
++
++/* char/tty operations */
++
++
++static int ifx_spi_tiocmget(struct tty_struct *tty, struct file *filp)
++{
++ unsigned int value = 0;
++ struct ifx_spi_port_data *port_data = tty->driver_data;
++ struct ifx_spi_serial *ifx_ser = &port_data->serial;
++
++ dev_dbg(ifx_ser->tty_dev, "%s called", __func__);
++
++ value =
++ (test_bit(IFX_SPI_RTS, &port_data->signal_state) ? TIOCM_RTS : 0) |
++ (test_bit(IFX_SPI_DTR, &port_data->signal_state) ? TIOCM_DTR : 0) |
++ (test_bit(IFX_SPI_CTS, &port_data->signal_state) ? TIOCM_CTS : 0) |
++ (test_bit(IFX_SPI_DSR, &port_data->signal_state) ? TIOCM_DSR : 0) |
++ (test_bit(IFX_SPI_DCD, &port_data->signal_state) ? TIOCM_CAR : 0) |
++ (test_bit(IFX_SPI_RI, &port_data->signal_state) ? TIOCM_RNG : 0);
++ dev_dbg(&port_data->ifx_spi->spi_dev->dev, "value=%x", value);
++ return value;
++}
++
++static int ifx_spi_tiocmset(struct tty_struct *tty, struct file *filp,
++ unsigned int set, unsigned int clear)
++{
++ struct ifx_spi_port_data *port_data = tty->driver_data;
++ struct ifx_spi_serial *ifx_ser = &port_data->serial;
++
++ dev_dbg(ifx_ser->tty_dev,
++ "%s called (set:%x clear:%x)", __func__, set, clear);
++
++ if (set & TIOCM_RTS)
++ set_bit(IFX_SPI_RTS, &port_data->signal_state);
++ if (set & TIOCM_DTR)
++ set_bit(IFX_SPI_DTR, &port_data->signal_state);
++
++ if (clear & TIOCM_RTS)
++ clear_bit(IFX_SPI_RTS, &port_data->signal_state);
++ if (clear & TIOCM_DTR)
++ clear_bit(IFX_SPI_DTR, &port_data->signal_state);
++
++ set_bit(IFX_SPI_UPDATE, &port_data->signal_state);
++ return 0;
++}
++
++static struct ktermios *init_termios(void)
++{
++ static struct ktermios _ktermios;
++ struct ktermios *termios = &_ktermios;
++
++ memset(termios, 0, sizeof(*termios));
++ /*
++ * The default requirements for this device are:
++ */
++ termios->c_iflag = 0;
++
++ termios->c_cc[VMIN] = 1;
++ termios->c_cc[VTIME] = 0;
++
++ /* disable postprocess output characters */
++ termios->c_oflag &= ~OPOST;
++
++ termios->c_lflag &= ~(ECHO /* disable echo input characters */
++ | ECHONL /* disable echo new line */
++ | ICANON /* disable erase, kill, werase, and
++ * rprnt special characters */
++ | ISIG /* disable interrupt, quit, and suspend
++ * special characters */
++ | IEXTEN); /* disable non-POSIX special
++ * characters */
++
++ termios->c_cflag &= ~(CSIZE /* no size */
++ | PARENB /* disable parity bit */
++ | CBAUD /* clear current baud rate */
++ | CBAUDEX); /* clear current buad rate */
++ termios->c_cflag |= CS8; /* character size 8 bits */
++ termios->c_cflag |= B115200; /* baud rate 115200 */
++
++ return termios;
++
++}
++
++static int ifx_spi_open(struct tty_struct *tty, struct file *filp)
++{
++ struct ifx_spi_serial *ifx_ser;
++ struct ifx_spi_port_data *port_data;
++ int ret = 0;
++
++ dev_dbg(&saved_ifx_dev->spi_dev->dev,
++ "%s called (tty:%p)", __func__, tty);
++ port_data = saved_ifx_dev->port_data;
++ ifx_ser = &port_data->serial;
++ ret = tty_port_open(&ifx_ser->tty_port, tty, filp);
++
++ return ret;
++}
++
++static void ifx_spi_close(struct tty_struct *tty, struct file *filp)
++{
++ struct ifx_spi_port_data *port_data;
++ struct ifx_spi_serial *ifx_ser;
++ struct tty_port *pport;
++ struct ifx_spi_device *ifx_dev = saved_ifx_dev;
++
++ dev_dbg(&ifx_dev->spi_dev->dev, "%s called (tty:%p)",
++ __func__, tty);
++ port_data = tty->driver_data;
++ ifx_ser = &port_data->serial;
++ pport = &ifx_ser->tty_port;
++ tty_port_close(pport, tty, filp);
++
++#ifdef N_IFX_SPI
++ if (test_bit(OS_HCLOSE_START, &ifx_ser->ostatus)) {
++ /*
++ * wait for hangup callback so we'll know the N_IFX_SPI
++ * ldisc has completed closing the MUX channels
++ */
++ (void) wait_event_interruptible(ifx_ser->hangup_wait,
++ test_bit(OS_HCLOSE_DONE, &ifx_ser->ostatus));
++ tasklet_kill(&ifx_dev->io_work_tasklet);
++ }
++#endif /* N_IFX_SPI */
++}
++
++/* note how received_cts is handled -- if header is all F it is left
++ * the same as it was, if header is all 0 it is set to 0
++ * otherwise it is taken from the incoming header
++ */
++static int ifx_spi_decode_spi_header(unsigned char *buffer, int *length,
++ unsigned char *more,
++ unsigned char *received_cts)
++{
++ u16 h1;
++ u16 h2;
++ u16 *in_buffer = (u16 *)buffer;
++
++ h1 = *in_buffer;
++ h2 = *(in_buffer+1);
++
++ if (h1 == 0 && h2 == 0) {
++ dev_dbg(&saved_ifx_dev->spi_dev->dev, "header invalidated 0");
++ *received_cts = 0;
++ return IFX_SPI_HEADER_0;
++ } else if (h1 == 0xffff && h2 == 0xffff) {
++ dev_dbg(&saved_ifx_dev->spi_dev->dev, "header invalidated f");
++ /* spi_slave_cts remains as it was */
++ return IFX_SPI_HEADER_F;
++ }
++
++ *length = h1 & 0xfff; /* upper bits of byte are flags */
++ *more = (buffer[1] >> IFX_SPI_MORE_BIT) & 1;
++ *received_cts = (buffer[3] >> IFX_SPI_CTS_BIT) & 1;
++
++ dev_dbg(&saved_ifx_dev->spi_dev->dev,
++ "(%x %x %x %x) received length %d, more %d, cts %d",
++ buffer[0], buffer[1], buffer[2], buffer[3],
++ *length, *more, *received_cts);
++
++ return 0;
++}
++
++static void ifx_spi_setup_spi_header(unsigned char *txbuffer, int tx_count,
++ unsigned char more)
++{
++ *(u16 *)(txbuffer) = tx_count;
++ *(u16 *)(txbuffer+2) = IFX_SPI_PAYLOAD_SIZE;
++ txbuffer[1] |= (more << IFX_SPI_MORE_BIT) & IFX_SPI_MORE_MASK;
++}
++
++static void ifx_spi_wakeup_serial(struct ifx_spi_port_data *port_data)
++{
++ struct tty_struct *tty;
++
++ tty = tty_port_tty_get(&port_data->serial.tty_port);
++ if (!tty)
++ return;
++ tty_wakeup(tty);
++ tty_kref_put(tty);
++}
++
++static int ifx_spi_prepare_tx_buffer(struct ifx_spi_device *ifx_dev)
++{
++ int temp_count;
++ int queue_length;
++ int tx_count;
++ unsigned char *tx_buffer;
++ struct ifx_spi_port_data *port_data = ifx_dev->port_data;
++
++ dev_dbg(&ifx_dev->spi_dev->dev, "prepare_tx_buffer called");
++ tx_buffer = ifx_dev->tx_buffer;
++ memset(tx_buffer, 0, IFX_SPI_TRANSFER_SIZE);
++
++ /* make room for required SPI header */
++ tx_buffer += IFX_SPI_HEADER_OVERHEAD;
++ tx_count = IFX_SPI_HEADER_OVERHEAD;
++
++ /* clear to signal no more data if this turns out to be the
++ * last buffer sent in a sequence */
++ ifx_dev->spi_more = 0;
++
++ /* if modem cts is set, just send empty buffer */
++ if (!(ifx_dev->spi_slave_cts)) {
++ /* see if there's tx data */
++ queue_length = kfifo_len(&port_data->tx_fifo);
++ if (queue_length != 0) {
++ /* data to mux -- see if there's room for it */
++ temp_count = min(queue_length, IFX_SPI_PAYLOAD_SIZE);
++ temp_count = kfifo_out_locked(&port_data->tx_fifo,
++ tx_buffer, temp_count,
++ &port_data->fifo_lock);
++
++ /* update buffer pointer and data count in message */
++ tx_buffer += temp_count;
++ tx_count += temp_count;
++#ifdef IFX_SPI_DEBUG
++ do {
++ int i;
++ unsigned char *buf = ifx_dev->tx_buffer;
++ dev_dbg(&ifx_dev->spi_dev->dev,
++ "prepare outgoing buffer:");
++ for (i = 0; i < tx_count; i++)
++ dev_dbg(&ifx_dev->spi_dev->dev,
++ "%x %c", buf[i], buf[i]);
++ dev_dbg(&ifx_dev->spi_dev->dev, "end buffer");
++ } while (0);
++#endif
++ if (temp_count == queue_length) {
++ /* poke port to get more data */
++ ifx_spi_wakeup_serial(port_data);
++ } else {
++ /* more data in port, use next SPI message */
++ ifx_dev->spi_more = 1;
++ }
++ }
++ } else {
++ dev_dbg(&ifx_dev->spi_dev->dev,
++ "slave CTS set: send empty buffer");
++ }
++ /* have data and info for header -- set up SPI header in buffer */
++ /* spi header needs payload size, not entire buffer size */
++ ifx_spi_setup_spi_header(ifx_dev->tx_buffer,
++ tx_count-IFX_SPI_HEADER_OVERHEAD,
++ ifx_dev->spi_more);
++ /* swap actual data in the buffer */
++ swap_buf((u16 *)(ifx_dev->tx_buffer), tx_count,
++ &ifx_dev->tx_buffer[IFX_SPI_TRANSFER_SIZE]);
++ return tx_count;
++}
++
++static int ifx_spi_write(struct tty_struct *tty, const unsigned char *buf,
++ int count)
++{
++ struct ifx_spi_port_data *port_data = tty->driver_data;
++ struct ifx_spi_serial *ifx_ser = &port_data->serial;
++ struct ifx_spi_device *ifx_dev = port_data->ifx_spi;
++ unsigned char *tmp_buf = (unsigned char *)buf;
++ int tx_count;
++ int srdy_value;
++
++ dev_dbg(&ifx_dev->spi_dev->dev, "%s called", __func__);
++
++ tx_count = kfifo_in_locked(&port_data->tx_fifo, tmp_buf, count,
++ &port_data->fifo_lock);
++ ifx_ser->tty_write_cnt += tx_count;
++ dev_dbg(&ifx_dev->spi_dev->dev,
++ "Write: wrote %d bytes in fifo (total = %d)",
++ tx_count, ifx_ser->tty_write_cnt);
++ srdy_value = gpio_get_value(ifx_dev->gpio.srdy);
++ dev_dbg(&ifx_dev->spi_dev->dev,
++ "mrdy set high, wait for srdy (write) %x",
++ srdy_value);
++ mrdy_assert(ifx_dev);
++ return tx_count;
++}
++
++static int ifx_spi_write_room(struct tty_struct *tty)
++{
++ struct ifx_spi_port_data *port_data = tty->driver_data;
++ int val = IFX_SPI_FIFO_SIZE - kfifo_len(&port_data->tx_fifo);
++
++ return val;
++}
++
++static int ifx_spi_chars_in_buffer(struct tty_struct *tty)
++{
++ struct ifx_spi_port_data *port_data = tty->driver_data;
++
++ return kfifo_len(&port_data->tx_fifo);
++}
++
++static int ifx_spi_ioctl(struct tty_struct *tty, struct file * file,
++ unsigned int cmd, unsigned long arg)
++{
++ struct serial_struct serial;
++ static int _get_irq;
++
++ dev_dbg(&saved_ifx_dev->spi_dev->dev,
++ "%s called (cmd:%x arg:%lx)", __func__, cmd, arg);
++
++ switch (cmd) {
++ case TIOCSSERIAL:
++ /* use irq/port for gpio_number/value */
++ if (copy_from_user(&serial, (struct serial_struct *)arg,
++ sizeof(serial)))
++ return -EFAULT;
++ if (serial.port == 255) {
++ /* set irq for next GSERIAL */
++ _get_irq = serial.irq;
++ return -EINVAL;
++ }
++ if (serial.irq != saved_ifx_dev->gpio.reset &&
++ serial.irq != saved_ifx_dev->gpio.po)
++ return -EINVAL;
++ gpio_set_value(serial.irq, serial.port);
++ dev_dbg(&saved_ifx_dev->spi_dev->dev, "set gpio: %d,%d",
++ serial.irq, serial.port);
++
++ break;
++
++ case TIOCGSERIAL:
++ /* use irq/port for gpio_number/value */
++ if (copy_from_user(&serial, (struct serial_struct *)arg,
++ sizeof(serial)))
++ return -EFAULT;
++
++ serial.irq = saved_ifx_dev->gpio.mrdy;
++ if (_get_irq)
++ serial.irq = _get_irq;
++
++ serial.port = gpio_get_value(serial.irq);
++ dev_dbg(&saved_ifx_dev->spi_dev->dev, "get gpio: %d,%d",
++ serial.irq, serial.port);
++ if (copy_to_user((void *)arg, &serial, sizeof(serial)))
++ return -EFAULT;
++
++ break;
++
++ default:
++ return -ENOIOCTLCMD;
++ }
++
++ return 0;
++}
++
++static void ifx_spi_write_wakeup_work(struct work_struct *_work)
++{
++ struct ifx_spi_device *ifx_dev =
++ container_of(_work, struct ifx_spi_device, write_wakeup_work);
++ struct ifx_spi_port_data *port_data = ifx_dev->port_data;
++ struct tty_struct *tty;
++ struct tty_ldisc *ldisc;
++
++
++ dev_dbg(&ifx_dev->spi_dev->dev, "call line disc wakeup (2)");
++ tty = tty_port_tty_get(&port_data->serial.tty_port);
++ if (!tty) {
++ dev_dbg(&ifx_dev->spi_dev->dev, "no tty");
++ return;
++ }
++ ldisc = tty_ldisc_ref(tty);
++ if (!ldisc) {
++ tty_kref_put(tty);
++ return;
++ }
++ ldisc->ops->write_wakeup(tty);
++ tty_ldisc_deref(ldisc);
++ tty_kref_put(tty);
++ dev_dbg(&ifx_dev->spi_dev->dev, "ldisc wakeup done");
++}
++
++static void ifx_spi_hangup(struct tty_struct *tty)
++{
++ struct ifx_spi_port_data *port_data = tty->driver_data;
++ struct ifx_spi_serial *ifx_ser = &port_data->serial;
++ struct ifx_spi_device *ifx_dev = saved_ifx_dev;
++
++ dev_dbg(&ifx_dev->spi_dev->dev, "%s called", __func__);
++ tty_port_hangup(&ifx_ser->tty_port);
++ if (test_bit(OS_HCLOSE_START, &ifx_ser->ostatus)) {
++ set_bit(OS_HCLOSE_DONE, &ifx_ser->ostatus);
++ wake_up(&ifx_ser->hangup_wait);
++ }
++}
++
++/*
++ * tty port activate method - called for first port open
++ */
++static int
++ifx_port_activate(struct tty_port *port, struct tty_struct *tty)
++{
++ struct ifx_spi_port_data *port_data =
++ container_of(port, struct ifx_spi_port_data, serial.tty_port);
++ struct ifx_spi_serial *ifx_ser = &port_data->serial;
++
++ dev_dbg(ifx_ser->tty_dev, "%s called", __func__);
++
++ set_bit(OS_OPEN, &ifx_ser->ostatus);
++
++ /* clear any old data; can't do this in 'close' */
++ kfifo_reset(&port_data->tx_fifo);
++
++ /* put port data into this tty */
++ tty->driver_data = port_data;
++
++ /* allows flip string push from int context */
++ tty->low_latency = 1;
++
++ return 0;
++}
++
++/*
++ * tty port shutdown method - called for last port close
++ */
++static void
++ifx_port_shutdown(struct tty_port *port)
++{
++ struct ifx_spi_port_data *port_data =
++ container_of(port, struct ifx_spi_port_data, serial.tty_port);
++ struct ifx_spi_serial *ifx_ser = &port_data->serial;
++ struct tty_struct *tty;
++
++ dev_dbg(ifx_ser->tty_dev, "%s called", __func__);
++
++ clear_bit(OS_OPEN, &ifx_ser->ostatus);
++ clear_bit(OS_HCLOSE_START, &ifx_ser->ostatus);
++ clear_bit(OS_HCLOSE_DONE, &ifx_ser->ostatus);
++
++ tty = tty_port_tty_get(&ifx_ser->tty_port);
++ if (!tty) {
++ dev_dbg(ifx_ser->tty_dev, "no tty");
++ return;
++ }
++
++#ifdef N_IFX_SPI
++ if (tty->termios->c_line == N_IFX_SPI) {
++ /*
++ * We need to ensure that the MUX channels shut down
++ * before we kill the io_work_tasklet so the channel
++ * shutdown commands can make it to the modem
++ */
++ set_bit(OS_HCLOSE_START, &ifx_ser->ostatus);
++ tty_hangup(tty);
++ tty_kref_put(tty);
++ return;
++ }
++#endif /* N_IFX_SPI */
++
++ tasklet_kill(&saved_ifx_dev->io_work_tasklet);
++ tty_kref_put(tty);
++}
++
++static const struct tty_port_operations ifx_tty_port_ops = {
++ .activate = ifx_port_activate,
++ .shutdown = ifx_port_shutdown,
++};
++
++static const struct tty_operations ifx_spi_serial_ops = {
++ .open = ifx_spi_open,
++ .close = ifx_spi_close,
++ .write = ifx_spi_write,
++ .ioctl = ifx_spi_ioctl,
++ .hangup = ifx_spi_hangup,
++ .write_room = ifx_spi_write_room,
++ .chars_in_buffer = ifx_spi_chars_in_buffer,
++ .tiocmget = ifx_spi_tiocmget,
++ .tiocmset = ifx_spi_tiocmset,
++};
++
++static void ifx_spi_insert_flip_string(struct ifx_spi_serial *ifx_ser,
++ unsigned char *chars, size_t size)
++{
++ int chars_inserted;
++ struct tty_struct *tty;
++#ifdef IFX_SPI_DEBUG
++ int i;
++#endif
++
++ dev_dbg(ifx_ser->tty_dev, "%s %d", __func__, size);
++#ifdef IFX_SPI_DEBUG
++ for (i = 0; i < size; i++)
++ dev_dbg(ifx_ser->tty_dev, "%x %c", chars[i], chars[i]);
++#endif
++
++ tty = tty_port_tty_get(&ifx_ser->tty_port);
++ if (!tty) {
++ dev_dbg(ifx_ser->tty_dev, "no tty for flip string");
++ return;
++ }
++ chars_inserted = tty_insert_flip_string(tty, chars, size);
++ dev_dbg(ifx_ser->tty_dev, "spi inserted flip string %d (%p)",
++ chars_inserted, tty);
++ tty_flip_buffer_push(tty);
++ tty_kref_put(tty);
++}
++
++static void ifx_spi_complete(void *ctx)
++{
++ struct ifx_spi_device *ifx_dev = (struct ifx_spi_device *)ctx;
++ struct ifx_spi_port_data *port_data = NULL;
++ struct tty_struct *tty;
++ struct tty_ldisc *ldisc = NULL;
++ int length = 0;
++ int actual_length = 0;
++ unsigned char more = 0;
++ unsigned char cts = 0;
++ int local_write_pending = 0;
++ int queue_length = 0;
++ int srdy = 0;
++ int decode_result;
++
++ dev_dbg(&ifx_dev->spi_dev->dev, "SPI completion");
++
++
++ dev_dbg(&ifx_dev->spi_dev->dev, "mrdy set low");
++ mrdy_set_low();
++
++ if (!ifx_dev->spi_msg.status) {
++ /* check header validity, get comm flags */
++ swap_buf((u16 *)ifx_dev->rx_buffer, IFX_SPI_HEADER_OVERHEAD,
++ &ifx_dev->rx_buffer[IFX_SPI_HEADER_OVERHEAD]);
++ dev_dbg(&ifx_dev->spi_dev->dev, "rx buffer = %x %x %x %x %x",
++ ifx_dev->rx_buffer[0],
++ ifx_dev->rx_buffer[1],
++ ifx_dev->rx_buffer[2],
++ ifx_dev->rx_buffer[3],
++ ifx_dev->rx_buffer[4]);
++ port_data = ifx_dev->port_data;
++ decode_result = ifx_spi_decode_spi_header(ifx_dev->rx_buffer,
++ &length, &more, &cts);
++ if (decode_result == IFX_SPI_HEADER_0) {
++ dev_dbg(&ifx_dev->spi_dev->dev,
++ "ignore input: invalid header 0");
++ ifx_dev->spi_slave_cts = 0;
++ goto complete_exit;
++ } else if (decode_result == IFX_SPI_HEADER_F) {
++ dev_dbg(&ifx_dev->spi_dev->dev,
++ "ignore input: invalid header F");
++ goto complete_exit;
++ }
++
++ ifx_dev->spi_slave_cts = cts;
++ dev_dbg(&ifx_dev->spi_dev->dev, "set cts %d", cts);
++ actual_length = min((unsigned int)length,
++ ifx_dev->spi_msg.actual_length);
++ swap_buf((u16 *)(ifx_dev->rx_buffer+IFX_SPI_HEADER_OVERHEAD),
++ actual_length,
++ &ifx_dev->rx_buffer[IFX_SPI_TRANSFER_SIZE]);
++ dev_dbg(&ifx_dev->spi_dev->dev, "send data to flip buffer");
++ ifx_spi_insert_flip_string(
++ &port_data->serial,
++ ifx_dev->rx_buffer+IFX_SPI_HEADER_OVERHEAD,
++ (size_t)actual_length);
++ } else {
++ dev_dbg(&ifx_dev->spi_dev->dev, "SPI transfer error %d",
++ ifx_dev->spi_msg.status);
++ }
++
++complete_exit:
++ if (ifx_dev->write_pending) {
++ ifx_dev->write_pending = 0;
++ local_write_pending = 1;
++ }
++
++ clear_bit(IFX_SPI_STATE_IO_IN_PROGRESS, &(ifx_dev->flags));
++
++ queue_length = (port_data == NULL) ? 0 :
++ kfifo_len(&port_data->tx_fifo);
++ srdy = gpio_get_value(ifx_dev->gpio.srdy);
++ if (!srdy)
++ ifx_spi_power_state_clear(ifx_dev, IFX_SPI_POWER_SRDY);
++
++ dev_dbg(&ifx_dev->spi_dev->dev, "ifx_spi_complete(): rx_more = %d, "
++ "tx_more = %d, cts = %d, "
++ "tx_queue_length = %d, srdy = %x, write_pending = %d",
++ more, ifx_dev->spi_more, ifx_dev->spi_slave_cts,
++ queue_length, srdy, local_write_pending);
++
++ /* schedule output if there is more to do */
++ if (test_and_clear_bit(IFX_SPI_STATE_IO_READY, &ifx_dev->flags)) {
++ dev_dbg(&ifx_dev->spi_dev->dev,
++ "io_ready set, reschedule tasklet for more");
++ tasklet_schedule(&ifx_dev->io_work_tasklet);
++ } else {
++ if (more || ifx_dev->spi_more || queue_length > 0 ||
++ local_write_pending) {
++ if (ifx_dev->spi_slave_cts) {
++ if (more) {
++ dev_dbg(&ifx_dev->spi_dev->dev,
++ "cts 1, more 1");
++ mrdy_assert(ifx_dev);
++ } else {
++ dev_dbg(&ifx_dev->spi_dev->dev,
++ "cts 1, more 0");
++ }
++ } else {
++ dev_dbg(&ifx_dev->spi_dev->dev,
++ "mrdy set high (spi comp)");
++ mrdy_assert(ifx_dev);
++ }
++ } else {
++ /*
++ * poke line discipline driver if any for more data
++ * may or may not get more data to write
++ * for now, say not busy
++ */
++ ifx_spi_power_state_clear(ifx_dev,
++ IFX_SPI_POWER_DATA_PENDING);
++ if (port_data) {
++ tty = tty_port_tty_get(
++ &port_data->serial.tty_port);
++ if (tty) {
++ ldisc = tty_ldisc_ref(tty);
++ if (ldisc) {
++ dev_dbg(&ifx_dev->spi_dev->dev,
++ "call line disc wakeup");
++ ldisc->ops->write_wakeup(tty);
++ tty_ldisc_deref(ldisc);
++ } else {
++ schedule_work(&ifx_dev->write_wakeup_work);
++ }
++ tty_kref_put(tty);
++ }
++ }
++ }
++ }
++}
++
++void ifx_spi_io(unsigned long data)
++{
++ int retval = 0;
++ struct ifx_spi_device *ifx_dev = (struct ifx_spi_device *) data;
++
++ dev_dbg(&ifx_dev->spi_dev->dev, "ifx_spi_io int_nb %d, %lx",
++ ifx_dev->gpio.unack_srdy_int_nb, ifx_dev->flags);
++
++
++ if (!test_and_set_bit(IFX_SPI_STATE_IO_IN_PROGRESS, &ifx_dev->flags)) {
++ if (ifx_dev->gpio.unack_srdy_int_nb > 0)
++ ifx_dev->gpio.unack_srdy_int_nb--;
++
++ ifx_spi_prepare_tx_buffer(ifx_dev);
++
++ spi_message_init(&ifx_dev->spi_msg);
++ INIT_LIST_HEAD(&ifx_dev->spi_msg.queue);
++
++ ifx_dev->spi_msg.context = ifx_dev;
++ ifx_dev->spi_msg.complete = ifx_spi_complete;
++
++ /* set up our spi transfer */
++ /* note len is BYTES, not transfers */
++ ifx_dev->spi_xfer.len = IFX_SPI_TRANSFER_SIZE;
++ ifx_dev->spi_xfer.cs_change = 0;
++ ifx_dev->spi_xfer.speed_hz = 12500000; /*390625;*/
++ ifx_dev->spi_xfer.bits_per_word = spi_b16 ? 16 : 8;
++
++ ifx_dev->spi_xfer.tx_buf = ifx_dev->tx_buffer;
++ ifx_dev->spi_xfer.rx_buf = ifx_dev->rx_buffer;
++
++ /*
++ * setup dma pointers
++ */
++ ifx_dev->spi_msg.is_dma_mapped = 0;
++ ifx_dev->tx_dma = (dma_addr_t)NULL;
++ ifx_dev->rx_dma = (dma_addr_t)NULL;
++ ifx_dev->spi_xfer.tx_dma = (dma_addr_t)NULL;
++ ifx_dev->spi_xfer.rx_dma = (dma_addr_t)NULL;
++
++ spi_message_add_tail(&ifx_dev->spi_xfer, &ifx_dev->spi_msg);
++
++ /* Assert MRDY. This may have already been done by the write
++ * routine.
++ */
++ dev_dbg(&ifx_dev->spi_dev->dev, "mrdy set high (_io)");
++ mrdy_assert(ifx_dev);
++ dev_dbg(&ifx_dev->spi_dev->dev, "call spi_async");
++
++
++ retval = spi_async(ifx_dev->spi_dev, &ifx_dev->spi_msg);
++ if (retval) {
++ dev_dbg(&ifx_dev->spi_dev->dev,
++ "spi_async failed (%d)", retval);
++ clear_bit(IFX_SPI_STATE_IO_IN_PROGRESS,
++ &ifx_dev->flags);
++ tasklet_schedule(&ifx_dev->io_work_tasklet);
++ return;
++ }
++ } else {
++ /* print error if called in progress */
++ dev_dbg(&ifx_dev->spi_dev->dev,
++ "spi_io called while SPI in progress");
++ ifx_dev->write_pending = 1;
++ }
++
++ dev_dbg(&ifx_dev->spi_dev->dev, "ifx_spi_io exit");
++}
++
++void ifx_spi_free_port(struct ifx_spi_device *ifx_dev)
++{
++ struct ifx_spi_serial *ifx_ser;
++ struct ifx_spi_port_data *port_data;
++
++
++ if (!ifx_dev)
++ return;
++
++ dev_dbg(&ifx_dev->spi_dev->dev, "%s called", __func__);
++
++ port_data = ifx_dev->port_data;
++ if (!port_data)
++ return;
++
++ ifx_ser = &port_data->serial;
++ if (ifx_ser) {
++ if (ifx_ser->tty_dev)
++ tty_unregister_device(tty_drv, ifx_ser->minor);
++ }
++ kfifo_free(&port_data->tx_fifo);
++ kfree(port_data);
++}
++
++int ifx_spi_create_port(struct ifx_spi_device *ifx_dev)
++{
++ struct ifx_spi_serial *ifx_ser;
++ struct ifx_spi_port_data *port_data;
++ struct tty_port *pport;
++ int ret = 0;
++
++ dev_dbg(&ifx_dev->spi_dev->dev, "%s called", __func__);
++
++ port_data = kzalloc(sizeof(struct ifx_spi_port_data), GFP_ATOMIC);
++ if (!port_data)
++ return -ENOMEM;
++ ifx_dev->port_data = port_data;
++
++ spin_lock_init(&port_data->fifo_lock);
++ lockdep_set_class_and_subclass(&port_data->fifo_lock,
++ &ifx_spi_key, 0);
++
++ port_data->ifx_spi = ifx_dev;
++ port_data->port_id = IFX_SPI_TTY_ID;
++ if (kfifo_alloc(&port_data->tx_fifo, IFX_SPI_FIFO_SIZE,
++ GFP_ATOMIC)) {
++
++ ret = -ENOMEM;
++ goto error_ret;
++ }
++
++ pport = &port_data->serial.tty_port;
++ tty_port_init(pport);
++ pport->ops = &ifx_tty_port_ops;
++ ifx_ser = &port_data->serial;
++ init_waitqueue_head(&ifx_ser->hangup_wait);
++ ifx_ser->minor = IFX_SPI_TTY_ID;
++ ifx_ser->tty_dev = tty_register_device(tty_drv, ifx_ser->minor,
++ &ifx_dev->spi_dev->dev);
++ if (!ifx_ser->tty_dev) {
++ dev_dbg(&ifx_dev->spi_dev->dev,
++ "%s: registering tty device failed", __func__);
++ ret = -ENODEV;
++ goto error_ret;
++ }
++
++
++ ifx_dev->port_data = port_data;
++ dev_dbg(&ifx_dev->spi_dev->dev, "port data successfully constructed");
++
++ return 0;
++
++error_ret:
++ ifx_spi_free_port(ifx_dev);
++
++ return ret;
++}
++
++static void ifx_spi_handle_srdy(struct ifx_spi_device *ifx_dev)
++{
++ dev_dbg(&ifx_dev->spi_dev->dev, "%s called", __func__);
++
++ if (test_bit(IFX_SPI_STATE_TIMER_PENDING, &ifx_dev->flags)) {
++ dev_dbg(&ifx_dev->spi_dev->dev, "clear spi timer");
++ del_timer_sync(&ifx_dev->spi_timer);
++ clear_bit(IFX_SPI_STATE_TIMER_PENDING, &ifx_dev->flags);
++ }
++
++ ifx_spi_power_state_set(ifx_dev, IFX_SPI_POWER_SRDY);
++ if (!test_bit(IFX_SPI_STATE_IO_IN_PROGRESS, &ifx_dev->flags)) {
++ dev_dbg(&ifx_dev->spi_dev->dev, "schedule tasklet");
++ tasklet_schedule(&ifx_dev->io_work_tasklet);
++ } else {
++ dev_dbg(&ifx_dev->spi_dev->dev, "set io ready flag");
++ set_bit(IFX_SPI_STATE_IO_READY, &ifx_dev->flags);
++ }
++}
++
++static irqreturn_t ifx_spi_gpio_interrupt(int irq, void *dev)
++{
++ struct ifx_spi_device *ifx_dev = (struct ifx_spi_device *)dev;
++
++ dev_dbg(&ifx_dev->spi_dev->dev, "GPIO 60 int_nb %d srdy %x flags %lx",
++ ifx_dev->gpio.unack_srdy_int_nb,
++ gpio_get_value(ifx_dev->gpio.srdy), ifx_dev->flags);
++ ifx_dev->gpio.unack_srdy_int_nb++;
++
++
++ if (TESTMODE(TESTMODE_IGNORE_SRDY)) {
++ /*
++ * SPI timeout debugging
++ * ignore SRDY interrupt for timer debugging
++ */
++ if (test_bit(IFX_SPI_STATE_TIMER_PENDING,
++ &ifx_dev->flags) &&
++ ++tm_ignore_srdy >= ignore_srdy_start) {
++
++ dev_dbg(&ifx_dev->spi_dev->dev,
++ "*** spi gpio ignored ***");
++ return IRQ_HANDLED;
++ }
++ }
++
++ ifx_spi_handle_srdy(ifx_dev);
++
++ return IRQ_HANDLED;
++}
++
++static void _ifx_spi_free_device(void)
++{
++ if (saved_ifx_dev) {
++ ifx_spi_free_port(saved_ifx_dev);
++ kfree(saved_ifx_dev->tx_buffer);
++ kfree(saved_ifx_dev->rx_buffer);
++ kfree(saved_ifx_dev);
++ }
++ saved_ifx_dev = NULL;
++}
++
++static const struct pnwl_spi3_chip ctlr_ifx_spi = {
++ .poll_mode = 0,
++ .enable_dma = 1,
++};
++
++static int ifx_spi_spi_probe(struct spi_device *spi)
++{
++ int ret;
++ int srdy;
++ struct ifx_modem_platform_data *pl_data = NULL;
++ struct ifx_spi_device *ifx_dev;
++
++ dev_dbg(&spi->dev, "%s called", __func__);
++ if (saved_ifx_dev) {
++ dev_dbg(&spi->dev, "ignoring subsequent detection");
++ return -ENODEV;
++ }
++ /* we check here only the SPI mode and correct them, if needed */
++ if (IFX_SPI_MODE !=
++ (spi->mode & (SPI_CPHA | SPI_CPOL | SPI_CS_HIGH |
++ SPI_LSB_FIRST | SPI_3WIRE))) {
++ dev_warn(&spi->dev, "SPI mode wrong, found %d, correct to %d",
++ spi->mode, IFX_SPI_MODE);
++ spi->mode = IFX_SPI_MODE | (SPI_LOOP & spi->mode);
++ }
++
++ if (spi->mode & SPI_LOOP)
++ dev_warn(&spi->dev, "SPI device in loop back");
++
++ /* The Bit_per_word and the maximum speed has to be setup by us,
++ * the protocol driver */
++ if (spi_b16)
++ spi->bits_per_word = 16;
++ else
++ spi->bits_per_word = 8;
++
++ /* controller_data must match what the SPI controller driver uses */
++ spi->controller_data = (void *)&ctlr_ifx_spi;
++ spi->max_speed_hz = 12500000;
++ ret = spi_setup(spi);
++ if (ret) {
++ dev_err(&spi->dev, "SPI setup wasn't successful %d", ret);
++ return -ENODEV;
++ }
++
++ /* initialize structure to hold our device variables */
++ ifx_dev = kzalloc(sizeof(struct ifx_spi_device), GFP_ATOMIC);
++ if (!ifx_dev) {
++ dev_err(&spi->dev, "spi device allocation failed");
++ return -ENOMEM;
++ }
++ saved_ifx_dev = ifx_dev;
++ ifx_dev->spi_dev = spi;
++ clear_bit(IFX_SPI_STATE_IO_IN_PROGRESS, &ifx_dev->flags);
++ spin_lock_init(&ifx_dev->write_lock);
++ spin_lock_init(&ifx_dev->power_lock);
++ ifx_dev->power_status = 0;
++ init_timer(&ifx_dev->spi_timer);
++ ifx_dev->spi_timer.function = ifx_spi_timeout;
++ ifx_dev->spi_timer.data = (unsigned long)ifx_dev;
++
++ /* required to get ldisc reference from spi_complete */
++ INIT_WORK(&(ifx_dev->write_wakeup_work),
++ (work_func_t)ifx_spi_write_wakeup_work);
++
++ /* ensure SPI protocol flags are initialized to enable transfer */
++ ifx_dev->spi_more = 0;
++ ifx_dev->spi_slave_cts = 0;
++
++ /*initialize transfer and dma buffers */
++ ifx_dev->tx_buffer = kzalloc(IFX_SPI_TRANSFER_SIZE,
++ GFP_KERNEL | GFP_DMA);
++ if (!ifx_dev->tx_buffer) {
++ dev_err(&spi->dev, "DMA-TX buffer allocation failed");
++ ret = -ENOMEM;
++ goto error_ret;
++ }
++ ifx_dev->rx_buffer = kzalloc(IFX_SPI_TRANSFER_SIZE,
++ GFP_KERNEL | GFP_DMA);
++ if (!ifx_dev->rx_buffer) {
++ dev_err(&spi->dev, "DMA-RX buffer allocation failed");
++ ret = -ENOMEM;
++ goto error_ret;
++ }
++
++ spi_set_drvdata(spi, ifx_dev);
++ tasklet_init(&ifx_dev->io_work_tasklet,
++ (void (*)(unsigned long))ifx_spi_io,
++ (unsigned long)ifx_dev);
++
++ set_bit(IFX_SPI_STATE_PRESENT, &ifx_dev->flags);
++
++ /* create our tty port */
++ ret = ifx_spi_create_port(ifx_dev);
++ if (ret != 0) {
++ dev_err(&spi->dev, "create default tty port failed");
++ goto error_ret;
++ }
++
++ pl_data = (struct ifx_modem_platform_data *)spi->dev.platform_data;
++ if (pl_data) {
++ ifx_dev->gpio.reset = pl_data->rst_pmu;
++ ifx_dev->gpio.po = pl_data->pwr_on;
++ ifx_dev->gpio.mrdy = pl_data->mrdy;
++ ifx_dev->gpio.srdy = pl_data->srdy;
++ ifx_dev->gpio.reset_out = pl_data->rst_out;
++ } else {
++ dev_err(&spi->dev, "missing platform data!");
++ return -ENODEV;
++
++ }
++
++
++ dev_info(&spi->dev, "gpios %d, %d, %d, %d, %d",
++ ifx_dev->gpio.reset, ifx_dev->gpio.po, ifx_dev->gpio.mrdy,
++ ifx_dev->gpio.srdy, ifx_dev->gpio.reset_out);
++
++ /* Configure gpios */
++ ret = gpio_request(ifx_dev->gpio.reset, "ifxModem");
++ ret += gpio_direction_output(ifx_dev->gpio.reset, 1);
++ ret += gpio_export(ifx_dev->gpio.reset, 1);
++ if (ret) {
++ dev_err(&spi->dev, "Unable to configure GPIO%d (RESET)",
++ ifx_dev->gpio.reset);
++ ret = -EBUSY;
++ goto error_ret2;
++ }
++
++ ret = gpio_request(ifx_dev->gpio.po, "ifxModem");
++ ret += gpio_direction_output(ifx_dev->gpio.po, 1);
++ ret += gpio_export(ifx_dev->gpio.po, 1);
++ if (ret) {
++ dev_err(&spi->dev, "Unable to configure GPIO%d (ON)",
++ ifx_dev->gpio.po);
++ ret = -EBUSY;
++ goto error_ret2;
++ }
++
++ ret = gpio_request(ifx_dev->gpio.mrdy, "ifxModem");
++ ret += gpio_export(ifx_dev->gpio.mrdy, 1);
++ ret += gpio_direction_output(ifx_dev->gpio.mrdy, 0);
++ if (ret) {
++ dev_err(&spi->dev, "Unable to configure GPIO%d (MRDY)",
++ ifx_dev->gpio.mrdy);
++ ret = -EBUSY;
++ goto error_ret2;
++ }
++
++ ret = gpio_request(ifx_dev->gpio.srdy, "ifxModem");
++ ret += gpio_export(ifx_dev->gpio.srdy, 1);
++ ret += gpio_direction_input(ifx_dev->gpio.srdy);
++ if (ret) {
++ dev_err(&spi->dev, "Unable to configure GPIO%d (SRDY)",
++ ifx_dev->gpio.srdy);
++ ret = -EBUSY;
++ goto error_ret2;
++ }
++
++ ret = gpio_request(ifx_dev->gpio.reset_out, "ifxModem");
++ ret += gpio_export(ifx_dev->gpio.reset_out, 1);
++ ret += gpio_direction_input(ifx_dev->gpio.reset_out);
++ if (ret) {
++ dev_err(&spi->dev, "Unable to configure GPIO%d (RESET_OUT)",
++ ifx_dev->gpio.reset_out);
++ ret = -EBUSY;
++ goto error_ret2;
++ }
++
++ /*
++ * set up modem power, reset
++ *
++ * delays are required on some platforms for the modem
++ * to reset properly
++ */
++ gpio_set_value(ifx_dev->gpio.po, 0);
++ gpio_set_value(ifx_dev->gpio.reset, 0);
++ mdelay(25);
++ gpio_set_value(ifx_dev->gpio.reset, 1);
++ mdelay(1);
++ gpio_set_value(ifx_dev->gpio.po, 1);
++ mdelay(1);
++
++ dev_dbg(&spi->dev, "Trying to get irq %x (%x)",
++ gpio_to_irq(ifx_dev->gpio.srdy), ifx_dev->gpio.srdy);
++ ret = request_irq(gpio_to_irq(ifx_dev->gpio.srdy),
++ (irq_handler_t)ifx_spi_gpio_interrupt,
++ IRQF_TRIGGER_RISING, "spi_ifx_modem",
++ (void *)ifx_dev);
++ if (ret) {
++ dev_err(&spi->dev, "Unable to get irq %x",
++ gpio_to_irq(ifx_dev->gpio.srdy));
++ goto error_ret;
++ }
++
++ /* set pm runtime power state and register with power system */
++ pm_runtime_set_active(&spi->dev);
++ pm_runtime_enable(&spi->dev);
++
++ /* handle case that modem is already signaling SRDY */
++ /* no outgoing tty open at this point, this just satisfies the
++ * modem's read and should reset communication properly
++ */
++ srdy = gpio_get_value(ifx_dev->gpio.srdy);
++ dev_dbg(&spi->dev, "IFX: GPIO SRDY = %x", srdy);
++ if (srdy) {
++ dev_dbg(&spi->dev, "assert mrdy, start transaction");
++ mrdy_assert(ifx_dev);
++ ifx_spi_handle_srdy(ifx_dev);
++ } else {
++ dev_dbg(&spi->dev, "setting mrdy low");
++ mrdy_set_low();
++ }
++
++ return 0;
++
++error_ret2:
++ gpio_free(ifx_dev->gpio.srdy);
++ gpio_free(ifx_dev->gpio.mrdy);
++ gpio_free(ifx_dev->gpio.reset);
++ gpio_free(ifx_dev->gpio.po);
++ gpio_free(ifx_dev->gpio.reset_out);
++error_ret:
++ _ifx_spi_free_device();
++ return ret;
++}
++
++static int ifx_spi_spi_remove(struct spi_device *spi)
++{
++ dev_dbg(&spi->dev, "%s called", __func__);
++ return 0;
++}
++
++static void ifx_spi_spi_shutdown(struct spi_device *spi)
++{
++ dev_dbg(&spi->dev, "%s called", __func__);
++}
++
++/*
++ * various suspends and resumes have nothing to do
++ * no hardware to save state for
++ */
++static int ifx_spi_spi_suspend(struct spi_device *spi, pm_message_t msg)
++{
++ dev_dbg(&spi->dev, "%s called", __func__);
++ return 0;
++}
++
++static int ifx_spi_spi_resume(struct spi_device *spi)
++{
++ dev_dbg(&spi->dev, "%s called", __func__);
++ return 0;
++}
++
++static int ifx_spi_pm_suspend(struct device *dev)
++{
++ int retval = 0;
++ struct spi_device *spi = to_spi_device(dev);
++
++ dev_dbg(&spi->dev, "pm suspend");
++
++ return retval;
++}
++
++static int ifx_spi_pm_resume(struct device *dev)
++{
++ int retval = 0;
++ struct spi_device *spi = to_spi_device(dev);
++
++ dev_dbg(&spi->dev, "pm resume");
++
++ return retval;
++}
++
++static int ifx_spi_pm_runtime_resume(struct device *dev)
++{
++ int retval = 0;
++ struct spi_device *spi = to_spi_device(dev);
++
++ dev_dbg(&spi->dev, "pm runtime resume");
++
++ return retval;
++}
++
++static int ifx_spi_pm_runtime_suspend(struct device *dev)
++{
++ int retval = 0;
++ struct spi_device *spi = to_spi_device(dev);
++
++ dev_dbg(&spi->dev, "pm runtime_suspend");
++
++ return retval;
++}
++
++/* check conditions and queue runtime suspend if idle */
++static int ifx_spi_pm_runtime_idle(struct device *dev)
++{
++ int retval = 0;
++ struct spi_device *spi = to_spi_device(dev);
++ struct ifx_spi_device *ifx_dev = spi_get_drvdata(spi);
++
++ dev_dbg(&spi->dev, "pm runtime_idle");
++
++ if (!ifx_dev->power_status)
++ pm_runtime_suspend(dev);
++
++ return retval;
++}
++
++static const struct dev_pm_ops ifx_spi_pm = {
++ .resume = ifx_spi_pm_resume,
++ .suspend = ifx_spi_pm_suspend,
++ .runtime_resume = ifx_spi_pm_runtime_resume,
++ .runtime_suspend = ifx_spi_pm_runtime_suspend,
++ .runtime_idle = ifx_spi_pm_runtime_idle
++};
++
++/* spi operations */
++static const struct spi_driver ifx_spi_driver = {
++ .driver = {
++ .name = "spi_ifx_modem",
++ .bus = &spi_bus_type,
++ .pm = &ifx_spi_pm,
++ .owner = THIS_MODULE},
++ .probe = ifx_spi_spi_probe,
++ .shutdown = ifx_spi_spi_shutdown,
++ .remove = __devexit_p(ifx_spi_spi_remove),
++ .suspend = ifx_spi_spi_suspend,
++ .resume = ifx_spi_spi_resume
++};
++
++/* module exit point */
++static void __exit ifx_spi_exit(void)
++{
++ pr_debug("%s called", __func__);
++
++ /* stop activity */
++ tasklet_kill(&saved_ifx_dev->io_work_tasklet);
++ /* free irq */
++ free_irq(saved_ifx_dev->spi_dev->irq, (void *)saved_ifx_dev);
++
++ gpio_free(saved_ifx_dev->gpio.srdy);
++ gpio_free(saved_ifx_dev->gpio.mrdy);
++ gpio_free(saved_ifx_dev->gpio.reset);
++ gpio_free(saved_ifx_dev->gpio.po);
++ gpio_free(saved_ifx_dev->gpio.reset_out);
++
++ /* free allocations */
++ _ifx_spi_free_device();
++
++ /* unregister */
++ tty_unregister_driver(tty_drv);
++ spi_unregister_driver((void *)&ifx_spi_driver);
++
++ pr_debug("ifx_spi driver removed");
++}
++
++/* module entry point */
++static int __init ifx_spi_init(void)
++{
++ int result = 0;
++ /*
++ initialize upper-edge spi driver. needs to be done after tty
++ initialization because the spi probe will
++ race
++ */
++
++ pr_info("%s: %s called", DRVNAME, __func__);
++
++
++
++ tty_drv = alloc_tty_driver(1);
++ if (!tty_drv) {
++ pr_err("%s: alloc_tty_driver failed", DRVNAME);
++ return -ENOMEM;
++ }
++
++ tty_drv->magic = TTY_DRIVER_MAGIC;
++ tty_drv->owner = THIS_MODULE;
++ tty_drv->driver_name = DRVNAME;
++ tty_drv->name = TTYNAME;
++ tty_drv->minor_start = IFX_SPI_TTY_ID;
++ tty_drv->num = 1;
++ tty_drv->type = TTY_DRIVER_TYPE_SERIAL;
++ tty_drv->subtype = SERIAL_TYPE_NORMAL;
++ tty_drv->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
++ tty_drv->init_termios = *init_termios();
++
++ tty_set_operations(tty_drv, &ifx_spi_serial_ops);
++
++ result = tty_register_driver(tty_drv);
++ if (result) {
++ pr_err("%s: tty_register_driver failed(%d)",
++ DRVNAME, result);
++ return result;
++ }
++
++ result = spi_register_driver((void *)&ifx_spi_driver);
++ if (result) {
++ pr_err("%s: spi_register_driver failed(%d)",
++ DRVNAME, result);
++ tty_unregister_driver(tty_drv);
++ return result;
++ }
++
++
++ pr_debug("%s: ifx_spi driver initialized successfully", DRVNAME);
++ return 0;
++}
++
++module_init(ifx_spi_init);
++module_exit(ifx_spi_exit);
++
++MODULE_AUTHOR("Intel");
++MODULE_DESCRIPTION("IFX6x60 spi driver");
++MODULE_LICENSE("GPL");
++MODULE_INFO(Version, "0.1-IFX6x60");
+--- /dev/null
++++ b/drivers/serial/ifx6x60.h
+@@ -0,0 +1,126 @@
++/****************************************************************************
++ *
++ * Driver for the IFX spi modem.
++ *
++ * Copyright (C) 2009, 2010 Intel Corp
++ * Jim Stanley <jim.stanley@intel.com>
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
++ * USA
++ *
++ *
++ *
++ *****************************************************************************/
++#ifndef _IFX6X60_H
++#define _IFX6X60_H
++
++#define DRVNAME "ifx6x60"
++#define TTYNAME "ttyIFX"
++
++/* #define IFX_THROTTLE_CODE */
++
++#define IFX_SPI_MAX_MINORS 1
++#define IFX_SPI_TRANSFER_SIZE 2048
++#define IFX_SPI_FIFO_SIZE 4096
++
++#define IFX_SPI_HEADER_OVERHEAD 4
++
++/* device flags bitfield definitions */
++#define IFX_SPI_STATE_PRESENT 0
++#define IFX_SPI_STATE_IO_IN_PROGRESS 1
++#define IFX_SPI_STATE_IO_READY 2
++#define IFX_SPI_STATE_TIMER_PENDING 3
++
++/* flow control bitfields */
++#define IFX_SPI_DCD 0
++#define IFX_SPI_CTS 1
++#define IFX_SPI_DSR 2
++#define IFX_SPI_RI 3
++#define IFX_SPI_DTR 4
++#define IFX_SPI_RTS 5
++#define IFX_SPI_TX_FC 6
++#define IFX_SPI_RX_FC 7
++#define IFX_SPI_UPDATE 8
++
++#define IFX_SPI_PAYLOAD_SIZE (IFX_SPI_TRANSFER_SIZE - \
++ IFX_SPI_HEADER_OVERHEAD)
++
++#define IFX_SPI_IRQ_TYPE DETECT_EDGE_RISING
++#define IFX_SPI_GPIO_TARGET 0
++#define IFX_SPI_GPIO0 0x105
++
++#define IFX_SPI_STATUS_TIMEOUT (2000*HZ)
++
++/* values for bits in power status byte */
++#define IFX_SPI_POWER_DATA_PENDING 1
++#define IFX_SPI_POWER_SRDY 2
++
++struct ifx_spi_device {
++ struct spi_device *spi_dev;
++ struct ifx_spi_port_data *port_data;
++ struct tasklet_struct io_work_tasklet;
++ struct work_struct write_wakeup_work;
++ unsigned long flags;
++ dma_addr_t rx_dma;
++ dma_addr_t tx_dma;
++
++ spinlock_t write_lock;
++ int write_pending;
++ spinlock_t power_lock;
++ unsigned char power_status;
++
++ unsigned char *rx_buffer;
++ unsigned char *tx_buffer;
++ unsigned char spi_more;
++ unsigned char spi_slave_cts;
++
++ struct timer_list spi_timer;
++
++ struct spi_message spi_msg;
++ struct spi_transfer spi_xfer;
++
++ struct {
++ /* gpio lines */
++ unsigned short srdy; /* slave-ready gpio */
++ unsigned short mrdy; /* master-ready gpio */
++ unsigned short reset; /* modem-reset gpio */
++ unsigned short po; /* modem-on gpio */
++ unsigned short reset_out; /* modem-in-reset gpio */
++ /* state/stats */
++ int unack_srdy_int_nb;
++ } gpio;
++};
++
++struct ifx_spi_serial {
++ struct tty_port tty_port;
++ struct device *tty_dev;
++ unsigned int tty_write_cnt;
++ int minor;
++ unsigned long ostatus;
++#define OS_OPEN 0
++#define OS_HCLOSE_START 1
++#define OS_HCLOSE_DONE 2
++ wait_queue_head_t hangup_wait;
++};
++
++struct ifx_spi_port_data {
++ int port_id;
++ struct kfifo tx_fifo;
++ spinlock_t fifo_lock;
++ unsigned long signal_state;
++ struct ifx_spi_serial serial;
++ struct ifx_spi_device *ifx_spi;
++};
++#endif /* _IFX6X60_H */
+--- /dev/null
++++ b/drivers/serial/max3107-aava.c
+@@ -0,0 +1,339 @@
++/*
++ * max3107.c - spi uart protocol driver for Maxim 3107
++ * Based on max3100.c
++ * by Christian Pellegrin <chripell@evolware.org>
++ * and max3110.c
++ * by Feng Tang <feng.tang@intel.com>
++ *
++ * Copyright (C) Aavamobile 2009
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ */
++
++#include <linux/delay.h>
++#include <linux/device.h>
++#include <linux/serial_core.h>
++#include <linux/serial.h>
++#include <linux/spi/spi.h>
++#include <linux/freezer.h>
++#include <linux/platform_device.h>
++#include <linux/gpio.h>
++#include <linux/koski_hwid.h>
++#include <asm/mrst.h>
++#include "max3107.h"
++
++/* GPIO direction to input function */
++static int max3107_gpio_direction_in(struct gpio_chip *chip, unsigned offset)
++{
++ struct max3107_port *s = container_of(chip, struct max3107_port, chip);
++ u16 buf[1]; /* Buffer for SPI transfer */
++
++ if (offset >= MAX3107_GPIO_COUNT) {
++ dev_err(&s->spi->dev, "Invalid GPIO\n");
++ return -EINVAL;
++ }
++
++ /* Read current GPIO configuration register */
++ buf[0] = MAX3107_GPIOCFG_REG;
++ /* Perform SPI transfer */
++ if (max3107_rw(s, (u8 *)buf, (u8 *)buf, 2)) {
++ dev_err(&s->spi->dev, "SPI transfer GPIO read failed\n");
++ return -EIO;
++ }
++ buf[0] &= MAX3107_SPI_RX_DATA_MASK;
++
++ /* Set GPIO to input */
++ buf[0] &= ~(0x0001 << offset);
++
++ /* Write new GPIO configuration register value */
++ buf[0] |= (MAX3107_WRITE_BIT | MAX3107_GPIOCFG_REG);
++ /* Perform SPI transfer */
++ if (max3107_rw(s, (u8 *)buf, NULL, 2)) {
++ dev_err(&s->spi->dev, "SPI transfer GPIO write failed\n");
++ return -EIO;
++ }
++ return 0;
++}
++
++/* GPIO direction to output function */
++static int max3107_gpio_direction_out(struct gpio_chip *chip, unsigned offset,
++ int value)
++{
++ struct max3107_port *s = container_of(chip, struct max3107_port, chip);
++ u16 buf[2]; /* Buffer for SPI transfers */
++
++ if (offset >= MAX3107_GPIO_COUNT) {
++ dev_err(&s->spi->dev, "Invalid GPIO\n");
++ return -EINVAL;
++ }
++
++ /* Read current GPIO configuration and data registers */
++ buf[0] = MAX3107_GPIOCFG_REG;
++ buf[1] = MAX3107_GPIODATA_REG;
++ /* Perform SPI transfer */
++ if (max3107_rw(s, (u8 *)buf, (u8 *)buf, 4)) {
++ dev_err(&s->spi->dev, "SPI transfer gpio failed\n");
++ return -EIO;
++ }
++ buf[0] &= MAX3107_SPI_RX_DATA_MASK;
++ buf[1] &= MAX3107_SPI_RX_DATA_MASK;
++
++ /* Set GPIO to output */
++ buf[0] |= (0x0001 << offset);
++ /* Set value */
++ if (value)
++ buf[1] |= (0x0001 << offset);
++ else
++ buf[1] &= ~(0x0001 << offset);
++
++ /* Write new GPIO configuration and data register values */
++ buf[0] |= (MAX3107_WRITE_BIT | MAX3107_GPIOCFG_REG);
++ buf[1] |= (MAX3107_WRITE_BIT | MAX3107_GPIODATA_REG);
++ /* Perform SPI transfer */
++ if (max3107_rw(s, (u8 *)buf, NULL, 4)) {
++ dev_err(&s->spi->dev,
++ "SPI transfer for GPIO conf data w failed\n");
++ return -EIO;
++ }
++ return 0;
++}
++
++/* GPIO value query function */
++static int max3107_gpio_get(struct gpio_chip *chip, unsigned offset)
++{
++ struct max3107_port *s = container_of(chip, struct max3107_port, chip);
++ u16 buf[1]; /* Buffer for SPI transfer */
++
++ if (offset >= MAX3107_GPIO_COUNT) {
++ dev_err(&s->spi->dev, "Invalid GPIO\n");
++ return -EINVAL;
++ }
++
++ /* Read current GPIO data register */
++ buf[0] = MAX3107_GPIODATA_REG;
++ /* Perform SPI transfer */
++ if (max3107_rw(s, (u8 *)buf, (u8 *)buf, 2)) {
++ dev_err(&s->spi->dev, "SPI transfer GPIO data r failed\n");
++ return -EIO;
++ }
++ buf[0] &= MAX3107_SPI_RX_DATA_MASK;
++
++ /* Return value */
++ return buf[0] & (0x0001 << offset);
++}
++
++/* GPIO value set function */
++static void max3107_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
++{
++ struct max3107_port *s = container_of(chip, struct max3107_port, chip);
++ u16 buf[2]; /* Buffer for SPI transfers */
++
++ if (offset >= MAX3107_GPIO_COUNT) {
++ dev_err(&s->spi->dev, "Invalid GPIO\n");
++ return;
++ }
++
++ /* Read current GPIO configuration registers*/
++ buf[0] = MAX3107_GPIODATA_REG;
++ buf[1] = MAX3107_GPIOCFG_REG;
++ /* Perform SPI transfer */
++ if (max3107_rw(s, (u8 *)buf, (u8 *)buf, 4)) {
++ dev_err(&s->spi->dev,
++ "SPI transfer for GPIO data and config read failed\n");
++ return;
++ }
++ buf[0] &= MAX3107_SPI_RX_DATA_MASK;
++ buf[1] &= MAX3107_SPI_RX_DATA_MASK;
++
++ if (!(buf[1] & (0x0001 << offset))) {
++ /* Configured as input, can't set value */
++ dev_warn(&s->spi->dev,
++ "Trying to set value for input GPIO\n");
++ return;
++ }
++
++ /* Set value */
++ if (value)
++ buf[0] |= (0x0001 << offset);
++ else
++ buf[0] &= ~(0x0001 << offset);
++
++ /* Write new GPIO data register value */
++ buf[0] |= (MAX3107_WRITE_BIT | MAX3107_GPIODATA_REG);
++ /* Perform SPI transfer */
++ if (max3107_rw(s, (u8 *)buf, NULL, 2))
++ dev_err(&s->spi->dev, "SPI transfer GPIO data w failed\n");
++}
++
++/* GPIO chip data */
++static struct gpio_chip max3107_gpio_chip = {
++ .owner = THIS_MODULE,
++ .direction_input = max3107_gpio_direction_in,
++ .direction_output = max3107_gpio_direction_out,
++ .get = max3107_gpio_get,
++ .set = max3107_gpio_set,
++ .can_sleep = 1,
++ .base = MAX3107_GPIO_BASE,
++ .ngpio = MAX3107_GPIO_COUNT,
++};
++
++/**
++ * max3107_aava_reset - reset on AAVA systems
++ * @spi: The SPI device we are probing
++ *
++ * Reset the device ready for probing.
++ */
++
++static int max3107_aava_reset(struct spi_device *spi)
++{
++ /* Reset the chip */
++ if (gpio_request(MAX3107_RESET_GPIO, "max3107")) {
++ pr_err("Requesting RESET GPIO failed\n");
++ return -EIO;
++ }
++ if (gpio_direction_output(MAX3107_RESET_GPIO, 0)) {
++ pr_err("Setting RESET GPIO to 0 failed\n");
++ gpio_free(MAX3107_RESET_GPIO);
++ return -EIO;
++ }
++ msleep(MAX3107_RESET_DELAY);
++ if (gpio_direction_output(MAX3107_RESET_GPIO, 1)) {
++ pr_err("Setting RESET GPIO to 1 failed\n");
++ gpio_free(MAX3107_RESET_GPIO);
++ return -EIO;
++ }
++ gpio_free(MAX3107_RESET_GPIO);
++ msleep(MAX3107_WAKEUP_DELAY);
++ return 0;
++}
++
++static int max3107_aava_configure(struct max3107_port *s)
++{
++ int retval;
++
++ /* Initialize GPIO chip data */
++ s->chip = max3107_gpio_chip;
++ s->chip.label = s->spi->modalias;
++ s->chip.dev = &s->spi->dev;
++
++ /* Add GPIO chip */
++ retval = gpiochip_add(&s->chip);
++ if (retval) {
++ dev_err(&s->spi->dev, "Adding GPIO chip failed\n");
++ return retval;
++ }
++
++ /* Temporary fix for EV2 boot problems, set modem reset to 0 */
++ max3107_gpio_direction_out(&s->chip, 3, 0);
++ return 0;
++}
++
++static struct baud_table brg13_ext[] = {
++ { 300, MAX3107_BRG13_B300 },
++ { 600, MAX3107_BRG13_B600 },
++ { 1200, MAX3107_BRG13_B1200 },
++ { 2400, MAX3107_BRG13_B2400 },
++ { 4800, MAX3107_BRG13_B4800 },
++ { 9600, MAX3107_BRG13_B9600 },
++ { 19200, MAX3107_BRG13_B19200 },
++ { 57600, MAX3107_BRG13_B57600 },
++ { 115200, MAX3107_BRG13_B115200 },
++ { 230400, MAX3107_BRG13_B230400 },
++ { 460800, MAX3107_BRG13_B460800 },
++ { 921600, MAX3107_BRG13_B921600 },
++ { 0, 0 }
++};
++
++static void max3107_aava_init(struct max3107_port *s)
++{
++ /*override for AAVA SC specific*/
++ if (mrst_platform_id() == MRST_PLATFORM_AAVA_SC) {
++ if (get_koski_build_id() <= KOSKI_EV2)
++ if (s->ext_clk) {
++ s->brg_cfg = MAX3107_BRG13_B9600;
++ s->baud_tbl = (struct baud_table *)brg13_ext;
++ }
++ }
++}
++
++static int __devexit max3107_aava_remove(struct spi_device *spi)
++{
++ struct max3107_port *s = dev_get_drvdata(&spi->dev);
++
++ /* Remove GPIO chip */
++ if (gpiochip_remove(&s->chip))
++ dev_warn(&spi->dev, "Removing GPIO chip failed\n");
++
++ /* Then do the default remove */
++ return max3107_remove(spi);
++}
++
++/* Platform data */
++static struct max3107_plat aava_plat_data = {
++ .loopback = 0,
++ .ext_clk = 1,
++ .init = max3107_aava_init,
++ .configure = max3107_aava_configure,
++ .hw_suspend = max3107_hw_susp,
++ .polled_mode = 0,
++ .poll_time = 0,
++};
++
++
++static int __devinit max3107_probe_aava(struct spi_device *spi)
++{
++ int err = max3107_aava_reset(spi);
++ if (err < 0)
++ return err;
++ return max3107_probe(spi, &aava_plat_data);
++}
++
++/* Spi driver data */
++static struct spi_driver max3107_driver = {
++ .driver = {
++ .name = "aava-max3107",
++ .bus = &spi_bus_type,
++ .owner = THIS_MODULE,
++ },
++ .probe = max3107_probe_aava,
++ .remove = __devexit_p(max3107_aava_remove),
++ .suspend = max3107_suspend,
++ .resume = max3107_resume,
++};
++
++/* Driver init function */
++static int __init max3107_init(void)
++{
++ return spi_register_driver(&max3107_driver);
++}
++
++/* Driver exit function */
++static void __exit max3107_exit(void)
++{
++ spi_unregister_driver(&max3107_driver);
++}
++
++module_init(max3107_init);
++module_exit(max3107_exit);
++
++MODULE_DESCRIPTION("MAX3107 driver");
++MODULE_AUTHOR("Aavamobile");
++MODULE_ALIAS("aava-max3107-spi");
++MODULE_LICENSE("GPL v2");
+--- /dev/null
++++ b/drivers/serial/max3107.c
+@@ -0,0 +1,1192 @@
++/*
++ * max3107.c - spi uart protocol driver for Maxim 3107
++ * Based on max3100.c
++ * by Christian Pellegrin <chripell@evolware.org>
++ * and max3110.c
++ * by Feng Tang <feng.tang@intel.com>
++ *
++ * Copyright (C) Aavamobile 2009
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ */
++
++#include <linux/delay.h>
++#include <linux/device.h>
++#include <linux/serial_core.h>
++#include <linux/serial.h>
++#include <linux/spi/spi.h>
++#include <linux/freezer.h>
++#include <linux/platform_device.h>
++#include <linux/gpio.h>
++#include <linux/koski_hwid.h>
++#include <asm/mrst.h>
++#include "max3107.h"
++
++static const struct baud_table brg26_ext[] = {
++ { 300, MAX3107_BRG26_B300 },
++ { 600, MAX3107_BRG26_B600 },
++ { 1200, MAX3107_BRG26_B1200 },
++ { 2400, MAX3107_BRG26_B2400 },
++ { 4800, MAX3107_BRG26_B4800 },
++ { 9600, MAX3107_BRG26_B9600 },
++ { 19200, MAX3107_BRG26_B19200 },
++ { 57600, MAX3107_BRG26_B57600 },
++ { 115200, MAX3107_BRG26_B115200 },
++ { 230400, MAX3107_BRG26_B230400 },
++ { 460800, MAX3107_BRG26_B460800 },
++ { 921600, MAX3107_BRG26_B921600 },
++ { 0, 0 }
++};
++
++static const struct baud_table brg13_int[] = {
++ { 300, MAX3107_BRG13_IB300 },
++ { 600, MAX3107_BRG13_IB600 },
++ { 1200, MAX3107_BRG13_IB1200 },
++ { 2400, MAX3107_BRG13_IB2400 },
++ { 4800, MAX3107_BRG13_IB4800 },
++ { 9600, MAX3107_BRG13_IB9600 },
++ { 19200, MAX3107_BRG13_IB19200 },
++ { 57600, MAX3107_BRG13_IB57600 },
++ { 115200, MAX3107_BRG13_IB115200 },
++ { 230400, MAX3107_BRG13_IB230400 },
++ { 460800, MAX3107_BRG13_IB460800 },
++ { 921600, MAX3107_BRG13_IB921600 },
++ { 0, 0 }
++};
++
++static u32 get_new_brg(int baud, struct max3107_port *s)
++{
++ int i;
++ const struct baud_table *baud_tbl = s->baud_tbl;
++
++ for (i = 0; i < 13; i++) {
++ if (baud == baud_tbl[i].baud)
++ return baud_tbl[i].new_brg;
++ }
++
++ return 0;
++}
++
++/* Perform SPI transfer for write/read of device register(s) */
++int max3107_rw(struct max3107_port *s, u8 *tx, u8 *rx, int len)
++{
++ struct spi_message spi_msg;
++ struct spi_transfer spi_xfer;
++
++ /* Initialize SPI ,message */
++ spi_message_init(&spi_msg);
++
++ /* Initialize SPI transfer */
++ memset(&spi_xfer, 0, sizeof spi_xfer);
++ spi_xfer.len = len;
++ spi_xfer.tx_buf = tx;
++ spi_xfer.rx_buf = rx;
++ spi_xfer.speed_hz = MAX3107_SPI_SPEED;
++
++ /* Add SPI transfer to SPI message */
++ spi_message_add_tail(&spi_xfer, &spi_msg);
++
++#ifdef DBG_TRACE_SPI_DATA
++ {
++ int i;
++ pr_info("tx len %d:\n", spi_xfer.len);
++ for (i = 0 ; i < spi_xfer.len && i < 32 ; i++)
++ pr_info(" %x", ((u8 *)spi_xfer.tx_buf)[i]);
++ pr_info("\n");
++ }
++#endif
++
++ /* Perform synchronous SPI transfer */
++ if (spi_sync(s->spi, &spi_msg)) {
++ dev_err(&s->spi->dev, "spi_sync failure\n");
++ return -EIO;
++ }
++
++#ifdef DBG_TRACE_SPI_DATA
++ if (spi_xfer.rx_buf) {
++ int i;
++ pr_info("rx len %d:\n", spi_xfer.len);
++ for (i = 0 ; i < spi_xfer.len && i < 32 ; i++)
++ pr_info(" %x", ((u8 *)spi_xfer.rx_buf)[i]);
++ pr_info("\n");
++ }
++#endif
++ return 0;
++}
++EXPORT_SYMBOL_GPL(max3107_rw);
++
++/* Puts received data to circular buffer */
++static void put_data_to_circ_buf(struct max3107_port *s, unsigned char *data,
++ int len)
++{
++ struct uart_port *port = &s->port;
++ struct tty_struct *tty;
++
++ if (!port->state)
++ return;
++
++ tty = port->state->port.tty;
++ if (!tty)
++ return;
++
++ /* Insert received data */
++ tty_insert_flip_string(tty, data, len);
++ /* Update RX counter */
++ port->icount.rx += len;
++}
++
++/* Handle data receiving */
++static void max3107_handlerx(struct max3107_port *s, u16 rxlvl)
++{
++ int i;
++ int j;
++ int len; /* SPI transfer buffer length */
++ u16 *buf;
++ u8 *valid_str;
++
++ if (!s->rx_enabled)
++ /* RX is disabled */
++ return;
++
++ if (rxlvl == 0) {
++ /* RX fifo is empty */
++ return;
++ } else if (rxlvl >= MAX3107_RX_FIFO_SIZE) {
++ dev_warn(&s->spi->dev, "Possible RX FIFO overrun %d\n", rxlvl);
++ /* Ensure sanity of RX level */
++ rxlvl = MAX3107_RX_FIFO_SIZE;
++ }
++ if ((s->rxbuf == 0) || (s->rxstr == 0)) {
++ dev_warn(&s->spi->dev, "Rx buffer/str isn't ready\n");
++ return;
++ }
++ buf = s->rxbuf;
++ valid_str = s->rxstr;
++ while (rxlvl) {
++ pr_debug("rxlvl %d\n", rxlvl);
++ /* Clear buffer */
++ memset(buf, 0, sizeof(u16) * (MAX3107_RX_FIFO_SIZE + 2));
++ len = 0;
++ if (s->irqen_reg & MAX3107_IRQ_RXFIFO_BIT) {
++ /* First disable RX FIFO interrupt */
++ pr_debug("Disabling RX INT\n");
++ buf[0] = (MAX3107_WRITE_BIT | MAX3107_IRQEN_REG);
++ s->irqen_reg &= ~MAX3107_IRQ_RXFIFO_BIT;
++ buf[0] |= s->irqen_reg;
++ len++;
++ }
++ /* Just increase the length by amount of words in FIFO since
++ * buffer was zeroed and SPI transfer of 0x0000 means reading
++ * from RX FIFO
++ */
++ len += rxlvl;
++ /* Append RX level query */
++ buf[len] = MAX3107_RXFIFOLVL_REG;
++ len++;
++
++ /* Perform the SPI transfer */
++ if (max3107_rw(s, (u8 *)buf, (u8 *)buf, len * 2)) {
++ dev_err(&s->spi->dev, "SPI transfer for RX h failed\n");
++ return;
++ }
++
++ /* Skip RX FIFO interrupt disabling word if it was added */
++ j = ((len - 1) - rxlvl);
++ /* Read received words */
++ for (i = 0; i < rxlvl; i++, j++)
++ valid_str[i] = (u8)buf[j];
++ put_data_to_circ_buf(s, valid_str, rxlvl);
++ /* Get new RX level */
++ rxlvl = (buf[len - 1] & MAX3107_SPI_RX_DATA_MASK);
++ }
++
++ if (s->rx_enabled) {
++ /* RX still enabled, re-enable RX FIFO interrupt */
++ pr_debug("Enabling RX INT\n");
++ buf[0] = (MAX3107_WRITE_BIT | MAX3107_IRQEN_REG);
++ s->irqen_reg |= MAX3107_IRQ_RXFIFO_BIT;
++ buf[0] |= s->irqen_reg;
++ if (max3107_rw(s, (u8 *)buf, NULL, 2))
++ dev_err(&s->spi->dev, "RX FIFO INT enabling failed\n");
++ }
++
++ /* Push the received data to receivers */
++ if (s->port.state->port.tty)
++ tty_flip_buffer_push(s->port.state->port.tty);
++}
++
++
++/* Handle data sending */
++static void max3107_handletx(struct max3107_port *s)
++{
++ struct circ_buf *xmit = &s->port.state->xmit;
++ int i;
++ unsigned long flags;
++ int len; /* SPI transfer buffer length */
++ u16 *buf;
++
++ if (!s->tx_fifo_empty)
++ /* Don't send more data before previous data is sent */
++ return;
++
++ if (uart_circ_empty(xmit) || uart_tx_stopped(&s->port))
++ /* No data to send or TX is stopped */
++ return;
++
++ if (!s->txbuf) {
++ dev_warn(&s->spi->dev, "Txbuf isn't ready\n");
++ return;
++ }
++ buf = s->txbuf;
++ /* Get length of data pending in circular buffer */
++ len = uart_circ_chars_pending(xmit);
++ if (len) {
++ /* Limit to size of TX FIFO */
++ if (len > MAX3107_TX_FIFO_SIZE)
++ len = MAX3107_TX_FIFO_SIZE;
++
++ pr_debug("txlen %d\n", len);
++
++ /* Update TX counter */
++ s->port.icount.tx += len;
++
++ /* TX FIFO will no longer be empty */
++ s->tx_fifo_empty = 0;
++
++ i = 0;
++ if (s->irqen_reg & MAX3107_IRQ_TXEMPTY_BIT) {
++ /* First disable TX empty interrupt */
++ pr_debug("Disabling TE INT\n");
++ buf[i] = (MAX3107_WRITE_BIT | MAX3107_IRQEN_REG);
++ s->irqen_reg &= ~MAX3107_IRQ_TXEMPTY_BIT;
++ buf[i] |= s->irqen_reg;
++ i++;
++ len++;
++ }
++ /* Add data to send */
++ spin_lock_irqsave(&s->port.lock, flags);
++ for ( ; i < len ; i++) {
++ buf[i] = (MAX3107_WRITE_BIT | MAX3107_THR_REG);
++ buf[i] |= ((u16)xmit->buf[xmit->tail] &
++ MAX3107_SPI_TX_DATA_MASK);
++ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
++ }
++ spin_unlock_irqrestore(&s->port.lock, flags);
++ if (!(s->irqen_reg & MAX3107_IRQ_TXEMPTY_BIT)) {
++ /* Enable TX empty interrupt */
++ pr_debug("Enabling TE INT\n");
++ buf[i] = (MAX3107_WRITE_BIT | MAX3107_IRQEN_REG);
++ s->irqen_reg |= MAX3107_IRQ_TXEMPTY_BIT;
++ buf[i] |= s->irqen_reg;
++ i++;
++ len++;
++ }
++ if (!s->tx_enabled) {
++ /* Enable TX */
++ pr_debug("Enable TX\n");
++ buf[i] = (MAX3107_WRITE_BIT | MAX3107_MODE1_REG);
++ spin_lock_irqsave(&s->data_lock, flags);
++ s->mode1_reg &= ~MAX3107_MODE1_TXDIS_BIT;
++ buf[i] |= s->mode1_reg;
++ spin_unlock_irqrestore(&s->data_lock, flags);
++ s->tx_enabled = 1;
++ i++;
++ len++;
++ }
++
++ /* Perform the SPI transfer */
++ if (max3107_rw(s, (u8 *)buf, NULL, len*2)) {
++ dev_err(&s->spi->dev,
++ "SPI transfer TX handling failed\n");
++ return;
++ }
++ }
++
++ /* Indicate wake up if circular buffer is getting low on data */
++ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
++ uart_write_wakeup(&s->port);
++
++}
++
++/* Handle interrupts
++ * Also reads and returns current RX FIFO level
++ */
++static u16 handle_interrupt(struct max3107_port *s)
++{
++ u16 buf[4]; /* Buffer for SPI transfers */
++ u8 irq_status;
++ u16 rx_level;
++ unsigned long flags;
++
++ /* Read IRQ status register */
++ buf[0] = MAX3107_IRQSTS_REG;
++ /* Read status IRQ status register */
++ buf[1] = MAX3107_STS_IRQSTS_REG;
++ /* Read LSR IRQ status register */
++ buf[2] = MAX3107_LSR_IRQSTS_REG;
++ /* Query RX level */
++ buf[3] = MAX3107_RXFIFOLVL_REG;
++
++ if (max3107_rw(s, (u8 *)buf, (u8 *)buf, 8)) {
++ dev_err(&s->spi->dev,
++ "SPI transfer for INTR handling failed\n");
++ return 0;
++ }
++
++ irq_status = (u8)buf[0];
++ pr_debug("IRQSTS %x\n", irq_status);
++ rx_level = (buf[3] & MAX3107_SPI_RX_DATA_MASK);
++
++ if (irq_status & MAX3107_IRQ_LSR_BIT) {
++ /* LSR interrupt */
++ if (buf[2] & MAX3107_LSR_RXTO_BIT)
++ /* RX timeout interrupt,
++ * handled by normal RX handling
++ */
++ pr_debug("RX TO INT\n");
++ }
++
++ if (irq_status & MAX3107_IRQ_TXEMPTY_BIT) {
++ /* Tx empty interrupt,
++ * disable TX and set tx_fifo_empty flag
++ */
++ pr_debug("TE INT, disabling TX\n");
++ buf[0] = (MAX3107_WRITE_BIT | MAX3107_MODE1_REG);
++ spin_lock_irqsave(&s->data_lock, flags);
++ s->mode1_reg |= MAX3107_MODE1_TXDIS_BIT;
++ buf[0] |= s->mode1_reg;
++ spin_unlock_irqrestore(&s->data_lock, flags);
++ if (max3107_rw(s, (u8 *)buf, NULL, 2))
++ dev_err(&s->spi->dev, "SPI transfer TX dis failed\n");
++ s->tx_enabled = 0;
++ s->tx_fifo_empty = 1;
++ }
++
++ if (irq_status & MAX3107_IRQ_RXFIFO_BIT)
++ /* RX FIFO interrupt,
++ * handled by normal RX handling
++ */
++ pr_debug("RFIFO INT\n");
++
++ /* Return RX level */
++ return rx_level;
++}
++
++/* Trigger work thread*/
++static void max3107_dowork(struct max3107_port *s)
++{
++ if (!work_pending(&s->work) && !freezing(current) && !s->suspended)
++ queue_work(s->workqueue, &s->work);
++ else
++ dev_warn(&s->spi->dev, "interrup isn't serviced normally!\n");
++}
++
++/* Work thread */
++static void max3107_work(struct work_struct *w)
++{
++ struct max3107_port *s = container_of(w, struct max3107_port, work);
++ u16 rxlvl = 0;
++ int len; /* SPI transfer buffer length */
++ u16 buf[5]; /* Buffer for SPI transfers */
++ unsigned long flags;
++
++ /* Start by reading current RX FIFO level */
++ buf[0] = MAX3107_RXFIFOLVL_REG;
++ if (max3107_rw(s, (u8 *)buf, (u8 *)buf, 2)) {
++ dev_err(&s->spi->dev, "SPI transfer RX lev failed\n");
++ rxlvl = 0;
++ } else {
++ rxlvl = (buf[0] & MAX3107_SPI_RX_DATA_MASK);
++ }
++
++ do {
++ pr_debug("rxlvl %d\n", rxlvl);
++
++ /* Handle RX */
++ max3107_handlerx(s, rxlvl);
++ rxlvl = 0;
++
++ if (s->handle_irq) {
++ /* Handle pending interrupts
++ * We also get new RX FIFO level since new data may
++ * have been received while pushing received data to
++ * receivers
++ */
++ s->handle_irq = 0;
++ rxlvl = handle_interrupt(s);
++ }
++
++ /* Handle TX */
++ max3107_handletx(s);
++
++ /* Handle configuration changes */
++ len = 0;
++ spin_lock_irqsave(&s->data_lock, flags);
++ if (s->mode1_commit) {
++ pr_debug("mode1_commit\n");
++ buf[len] = (MAX3107_WRITE_BIT | MAX3107_MODE1_REG);
++ buf[len++] |= s->mode1_reg;
++ s->mode1_commit = 0;
++ }
++ if (s->lcr_commit) {
++ pr_debug("lcr_commit\n");
++ buf[len] = (MAX3107_WRITE_BIT | MAX3107_LCR_REG);
++ buf[len++] |= s->lcr_reg;
++ s->lcr_commit = 0;
++ }
++ if (s->brg_commit) {
++ pr_debug("brg_commit\n");
++ buf[len] = (MAX3107_WRITE_BIT | MAX3107_BRGDIVMSB_REG);
++ buf[len++] |= ((s->brg_cfg >> 16) &
++ MAX3107_SPI_TX_DATA_MASK);
++ buf[len] = (MAX3107_WRITE_BIT | MAX3107_BRGDIVLSB_REG);
++ buf[len++] |= ((s->brg_cfg >> 8) &
++ MAX3107_SPI_TX_DATA_MASK);
++ buf[len] = (MAX3107_WRITE_BIT | MAX3107_BRGCFG_REG);
++ buf[len++] |= ((s->brg_cfg) & 0xff);
++ s->brg_commit = 0;
++ }
++ spin_unlock_irqrestore(&s->data_lock, flags);
++
++ if (len > 0) {
++ if (max3107_rw(s, (u8 *)buf, NULL, len * 2))
++ dev_err(&s->spi->dev,
++ "SPI transfer config failed\n");
++ }
++
++ /* Reloop if interrupt handling indicated data in RX FIFO */
++ } while (rxlvl);
++
++}
++
++/* Set sleep mode */
++static void max3107_set_sleep(struct max3107_port *s, int mode)
++{
++ u16 buf[1]; /* Buffer for SPI transfer */
++ unsigned long flags;
++ pr_debug("enter, mode %d\n", mode);
++
++ buf[0] = (MAX3107_WRITE_BIT | MAX3107_MODE1_REG);
++ spin_lock_irqsave(&s->data_lock, flags);
++ switch (mode) {
++ case MAX3107_DISABLE_FORCED_SLEEP:
++ s->mode1_reg &= ~MAX3107_MODE1_FORCESLEEP_BIT;
++ break;
++ case MAX3107_ENABLE_FORCED_SLEEP:
++ s->mode1_reg |= MAX3107_MODE1_FORCESLEEP_BIT;
++ break;
++ case MAX3107_DISABLE_AUTOSLEEP:
++ s->mode1_reg &= ~MAX3107_MODE1_AUTOSLEEP_BIT;
++ break;
++ case MAX3107_ENABLE_AUTOSLEEP:
++ s->mode1_reg |= MAX3107_MODE1_AUTOSLEEP_BIT;
++ break;
++ default:
++ spin_unlock_irqrestore(&s->data_lock, flags);
++ dev_warn(&s->spi->dev, "invalid sleep mode\n");
++ return;
++ }
++ buf[0] |= s->mode1_reg;
++ spin_unlock_irqrestore(&s->data_lock, flags);
++
++ if (max3107_rw(s, (u8 *)buf, NULL, 2))
++ dev_err(&s->spi->dev, "SPI transfer sleep mode failed\n");
++
++ if (mode == MAX3107_DISABLE_AUTOSLEEP ||
++ mode == MAX3107_DISABLE_FORCED_SLEEP)
++ msleep(MAX3107_WAKEUP_DELAY);
++}
++
++/* Perform full register initialization */
++static void max3107_register_init(struct max3107_port *s)
++{
++ u16 buf[11]; /* Buffer for SPI transfers */
++
++ /* 1. Configure baud rate, 9600 as default */
++ s->baud = 9600;
++ /* the below is default*/
++ if (s->ext_clk) {
++ s->brg_cfg = MAX3107_BRG26_B9600;
++ s->baud_tbl = (struct baud_table *)brg26_ext;
++ } else {
++ s->brg_cfg = MAX3107_BRG13_IB9600;
++ s->baud_tbl = (struct baud_table *)brg13_int;
++ }
++ if (s->pdata->init)
++ s->pdata->init(s);
++ buf[0] = (MAX3107_WRITE_BIT | MAX3107_BRGDIVMSB_REG)
++ | ((s->brg_cfg >> 16) & MAX3107_SPI_TX_DATA_MASK);
++ buf[1] = (MAX3107_WRITE_BIT | MAX3107_BRGDIVLSB_REG)
++ | ((s->brg_cfg >> 8) & MAX3107_SPI_TX_DATA_MASK);
++ buf[2] = (MAX3107_WRITE_BIT | MAX3107_BRGCFG_REG)
++ | ((s->brg_cfg) & 0xff);
++
++ /* 2. Configure LCR register, 8N1 mode by default */
++ s->lcr_reg = MAX3107_LCR_WORD_LEN_8;
++ buf[3] = (MAX3107_WRITE_BIT | MAX3107_LCR_REG)
++ | s->lcr_reg;
++
++ /* 3. Configure MODE 1 register */
++ s->mode1_reg = 0;
++ /* Enable IRQ pin */
++ s->mode1_reg |= MAX3107_MODE1_IRQSEL_BIT;
++ /* Disable TX */
++ s->mode1_reg |= MAX3107_MODE1_TXDIS_BIT;
++ s->tx_enabled = 0;
++ /* RX is enabled */
++ s->rx_enabled = 1;
++ buf[4] = (MAX3107_WRITE_BIT | MAX3107_MODE1_REG)
++ | s->mode1_reg;
++
++ /* 4. Configure MODE 2 register */
++ buf[5] = (MAX3107_WRITE_BIT | MAX3107_MODE2_REG);
++ if (s->loopback) {
++ /* Enable loopback */
++ buf[5] |= MAX3107_MODE2_LOOPBACK_BIT;
++ }
++ /* Reset FIFOs */
++ buf[5] |= MAX3107_MODE2_FIFORST_BIT;
++ s->tx_fifo_empty = 1;
++
++ /* 5. Configure FIFO trigger level register */
++ buf[6] = (MAX3107_WRITE_BIT | MAX3107_FIFOTRIGLVL_REG);
++ /* RX FIFO trigger for 16 words, TX FIFO trigger not used */
++ buf[6] |= (MAX3107_FIFOTRIGLVL_RX(16) | MAX3107_FIFOTRIGLVL_TX(0));
++
++ /* 6. Configure flow control levels */
++ buf[7] = (MAX3107_WRITE_BIT | MAX3107_FLOWLVL_REG);
++ /* Flow control halt level 96, resume level 48 */
++ buf[7] |= (MAX3107_FLOWLVL_RES(48) | MAX3107_FLOWLVL_HALT(96));
++
++ /* 7. Configure flow control */
++ buf[8] = (MAX3107_WRITE_BIT | MAX3107_FLOWCTRL_REG);
++ /* Enable auto CTS and auto RTS flow control */
++ buf[8] |= (MAX3107_FLOWCTRL_AUTOCTS_BIT | MAX3107_FLOWCTRL_AUTORTS_BIT);
++
++ /* 8. Configure RX timeout register */
++ buf[9] = (MAX3107_WRITE_BIT | MAX3107_RXTO_REG);
++ /* Timeout after 48 character intervals */
++ buf[9] |= 0x0030;
++
++ /* 9. Configure LSR interrupt enable register */
++ buf[10] = (MAX3107_WRITE_BIT | MAX3107_LSR_IRQEN_REG);
++ /* Enable RX timeout interrupt */
++ buf[10] |= MAX3107_LSR_RXTO_BIT;
++
++ /* Perform SPI transfer */
++ if (max3107_rw(s, (u8 *)buf, NULL, 22))
++ dev_err(&s->spi->dev, "SPI transfer for init failed\n");
++
++ /* 10. Clear IRQ status register by reading it */
++ buf[0] = MAX3107_IRQSTS_REG;
++
++ /* 11. Configure interrupt enable register */
++ /* Enable LSR interrupt */
++ s->irqen_reg = MAX3107_IRQ_LSR_BIT;
++ /* Enable RX FIFO interrupt */
++ s->irqen_reg |= MAX3107_IRQ_RXFIFO_BIT;
++ buf[1] = (MAX3107_WRITE_BIT | MAX3107_IRQEN_REG)
++ | s->irqen_reg;
++
++ /* 12. Clear FIFO reset that was set in step 6 */
++ buf[2] = (MAX3107_WRITE_BIT | MAX3107_MODE2_REG);
++ if (s->loopback) {
++ /* Keep loopback enabled */
++ buf[2] |= MAX3107_MODE2_LOOPBACK_BIT;
++ }
++
++ /* Perform SPI transfer */
++ if (max3107_rw(s, (u8 *)buf, (u8 *)buf, 6))
++ dev_err(&s->spi->dev, "SPI transfer for init failed\n");
++
++}
++
++/* IRQ handler */
++static irqreturn_t max3107_irq(int irqno, void *dev_id)
++{
++ struct max3107_port *s = dev_id;
++
++ if (irqno != s->spi->irq) {
++ /* Unexpected IRQ */
++ return IRQ_NONE;
++ }
++
++ /* Indicate irq */
++ s->handle_irq = 1;
++
++ /* Trigger work thread */
++ max3107_dowork(s);
++
++ return IRQ_HANDLED;
++}
++
++/* HW suspension function
++ *
++ * Currently autosleep is used to decrease current consumption, alternative
++ * approach would be to set the chip to reset mode if UART is not being
++ * used but that would mess the GPIOs
++ *
++ */
++void max3107_hw_susp(struct max3107_port *s, int suspend)
++{
++ pr_debug("enter, suspend %d\n", suspend);
++
++ if (suspend) {
++ /* Suspend requested,
++ * enable autosleep to decrease current consumption
++ */
++ s->suspended = 1;
++ max3107_set_sleep(s, MAX3107_ENABLE_AUTOSLEEP);
++ } else {
++ /* Resume requested,
++ * disable autosleep
++ */
++ s->suspended = 0;
++ max3107_set_sleep(s, MAX3107_DISABLE_AUTOSLEEP);
++ }
++}
++EXPORT_SYMBOL_GPL(max3107_hw_susp);
++
++/* Modem status IRQ enabling */
++static void max3107_enable_ms(struct uart_port *port)
++{
++ /* Modem status not supported */
++}
++
++/* Data send function */
++static void max3107_start_tx(struct uart_port *port)
++{
++ struct max3107_port *s = container_of(port, struct max3107_port, port);
++
++ /* Trigger work thread for sending data */
++ max3107_dowork(s);
++}
++
++/* Function for checking that there is no pending transfers */
++static unsigned int max3107_tx_empty(struct uart_port *port)
++{
++ struct max3107_port *s = container_of(port, struct max3107_port, port);
++
++ pr_debug("returning %d\n",
++ (s->tx_fifo_empty && uart_circ_empty(&s->port.state->xmit)));
++ return s->tx_fifo_empty && uart_circ_empty(&s->port.state->xmit);
++}
++
++/* Function for stopping RX */
++static void max3107_stop_rx(struct uart_port *port)
++{
++ struct max3107_port *s = container_of(port, struct max3107_port, port);
++ unsigned long flags;
++
++ /* Set RX disabled in MODE 1 register */
++ spin_lock_irqsave(&s->data_lock, flags);
++ s->mode1_reg |= MAX3107_MODE1_RXDIS_BIT;
++ s->mode1_commit = 1;
++ spin_unlock_irqrestore(&s->data_lock, flags);
++ /* Set RX disabled */
++ s->rx_enabled = 0;
++ /* Trigger work thread for doing the actual configuration change */
++ max3107_dowork(s);
++}
++
++/* Function for returning control pin states */
++static unsigned int max3107_get_mctrl(struct uart_port *port)
++{
++ /* DCD and DSR are not wired and CTS/RTS is handled automatically
++ * so just indicate DSR and CAR asserted
++ */
++ return TIOCM_DSR | TIOCM_CAR;
++}
++
++/* Function for setting control pin states */
++static void max3107_set_mctrl(struct uart_port *port, unsigned int mctrl)
++{
++ /* DCD and DSR are not wired and CTS/RTS is hadnled automatically
++ * so do nothing
++ */
++}
++
++/* Function for configuring UART parameters */
++static void max3107_set_termios(struct uart_port *port,
++ struct ktermios *termios,
++ struct ktermios *old)
++{
++ struct max3107_port *s = container_of(port, struct max3107_port, port);
++ struct tty_struct *tty;
++ int baud;
++ u16 new_lcr = 0;
++ u32 new_brg = 0;
++ unsigned long flags;
++
++ if (!port->state)
++ return;
++
++ tty = port->state->port.tty;
++ if (!tty)
++ return;
++
++ /* Get new LCR register values */
++ /* Word size */
++ if ((termios->c_cflag & CSIZE) == CS7)
++ new_lcr |= MAX3107_LCR_WORD_LEN_7;
++ else
++ new_lcr |= MAX3107_LCR_WORD_LEN_8;
++
++ /* Parity */
++ if (termios->c_cflag & PARENB) {
++ new_lcr |= MAX3107_LCR_PARITY_BIT;
++ if (!(termios->c_cflag & PARODD))
++ new_lcr |= MAX3107_LCR_EVENPARITY_BIT;
++ }
++
++ /* Stop bits */
++ if (termios->c_cflag & CSTOPB) {
++ /* 2 stop bits */
++ new_lcr |= MAX3107_LCR_STOPLEN_BIT;
++ }
++
++ /* Mask termios capabilities we don't support */
++ termios->c_cflag &= ~CMSPAR;
++
++ /* Set status ignore mask */
++ s->port.ignore_status_mask = 0;
++ if (termios->c_iflag & IGNPAR)
++ s->port.ignore_status_mask |= MAX3107_ALL_ERRORS;
++
++ /* Set low latency to immediately handle pushed data */
++ s->port.state->port.tty->low_latency = 1;
++
++ /* Get new baud rate generator configuration */
++ baud = tty_get_baud_rate(tty);
++
++ spin_lock_irqsave(&s->data_lock, flags);
++ new_brg = get_new_brg(baud, s);
++ /* if can't find the corrent config, use previous */
++ if (!new_brg) {
++ baud = s->baud;
++ new_brg = s->brg_cfg;
++ }
++ spin_unlock_irqrestore(&s->data_lock, flags);
++ tty_termios_encode_baud_rate(termios, baud, baud);
++ s->baud = baud;
++
++ /* Update timeout according to new baud rate */
++ uart_update_timeout(port, termios->c_cflag, baud);
++
++ spin_lock_irqsave(&s->data_lock, flags);
++ if (s->lcr_reg != new_lcr) {
++ s->lcr_reg = new_lcr;
++ s->lcr_commit = 1;
++ }
++ if (s->brg_cfg != new_brg) {
++ s->brg_cfg = new_brg;
++ s->brg_commit = 1;
++ }
++ spin_unlock_irqrestore(&s->data_lock, flags);
++
++ /* Trigger work thread for doing the actual configuration change */
++ max3107_dowork(s);
++}
++
++/* Port shutdown function */
++static void max3107_shutdown(struct uart_port *port)
++{
++ struct max3107_port *s = container_of(port, struct max3107_port, port);
++
++ if (s->suspended && s->pdata->hw_suspend)
++ s->pdata->hw_suspend(s, 0);
++
++ /* Free the interrupt */
++ free_irq(s->spi->irq, s);
++
++ if (s->workqueue) {
++ /* Flush and destroy work queue */
++ flush_workqueue(s->workqueue);
++ destroy_workqueue(s->workqueue);
++ s->workqueue = NULL;
++ }
++
++ /* Suspend HW */
++ if (s->pdata->hw_suspend)
++ s->pdata->hw_suspend(s, 1);
++}
++
++/* Port startup function */
++static int max3107_startup(struct uart_port *port)
++{
++ struct max3107_port *s = container_of(port, struct max3107_port, port);
++
++ /* Initialize work queue */
++ s->workqueue = create_freezeable_workqueue("max3107");
++ if (!s->workqueue) {
++ dev_err(&s->spi->dev, "Workqueue creation failed\n");
++ return -EBUSY;
++ }
++ INIT_WORK(&s->work, max3107_work);
++
++ /* Setup IRQ */
++ if (request_irq(s->spi->irq, max3107_irq, IRQF_TRIGGER_FALLING,
++ "max3107", s)) {
++ dev_err(&s->spi->dev, "IRQ reguest failed\n");
++ destroy_workqueue(s->workqueue);
++ s->workqueue = NULL;
++ return -EBUSY;
++ }
++
++ /* Resume HW */
++ if (s->pdata->hw_suspend)
++ s->pdata->hw_suspend(s, 0);
++
++ /* Init registers */
++ max3107_register_init(s);
++
++ return 0;
++}
++
++/* Port type function */
++static const char *max3107_type(struct uart_port *port)
++{
++ struct max3107_port *s = container_of(port, struct max3107_port, port);
++ return s->spi->modalias;
++}
++
++/* Port release function */
++static void max3107_release_port(struct uart_port *port)
++{
++ /* Do nothing */
++}
++
++/* Port request function */
++static int max3107_request_port(struct uart_port *port)
++{
++ /* Do nothing */
++ return 0;
++}
++
++/* Port config function */
++static void max3107_config_port(struct uart_port *port, int flags)
++{
++ struct max3107_port *s = container_of(port, struct max3107_port, port);
++ s->port.type = PORT_MAX3107;
++}
++
++/* Port verify function */
++static int max3107_verify_port(struct uart_port *port,
++ struct serial_struct *ser)
++{
++ if (ser->type == PORT_UNKNOWN || ser->type == PORT_MAX3107)
++ return 0;
++
++ return -EINVAL;
++}
++
++/* Port stop TX function */
++static void max3107_stop_tx(struct uart_port *port)
++{
++ /* Do nothing */
++}
++
++/* Port break control function */
++static void max3107_break_ctl(struct uart_port *port, int break_state)
++{
++ /* We don't support break control, do nothing */
++}
++
++
++/* Port functions */
++static struct uart_ops max3107_ops = {
++ .tx_empty = max3107_tx_empty,
++ .set_mctrl = max3107_set_mctrl,
++ .get_mctrl = max3107_get_mctrl,
++ .stop_tx = max3107_stop_tx,
++ .start_tx = max3107_start_tx,
++ .stop_rx = max3107_stop_rx,
++ .enable_ms = max3107_enable_ms,
++ .break_ctl = max3107_break_ctl,
++ .startup = max3107_startup,
++ .shutdown = max3107_shutdown,
++ .set_termios = max3107_set_termios,
++ .type = max3107_type,
++ .release_port = max3107_release_port,
++ .request_port = max3107_request_port,
++ .config_port = max3107_config_port,
++ .verify_port = max3107_verify_port,
++};
++
++/* UART driver data */
++static struct uart_driver max3107_uart_driver = {
++ .owner = THIS_MODULE,
++ .driver_name = "ttyMAX",
++ .dev_name = "ttyMAX",
++ .nr = 1,
++};
++
++
++
++/* 'Generic' platform data */
++static struct max3107_plat generic_plat_data = {
++ .loopback = 0,
++ .ext_clk = 1,
++ .hw_suspend = max3107_hw_susp,
++ .polled_mode = 0,
++ .poll_time = 0,
++};
++
++
++/*******************************************************************/
++
++/**
++ * max3107_probe - SPI bus probe entry point
++ * @spi: the spi device
++ *
++ * SPI wants us to probe this device and if appropriate claim it.
++ * Perform any platform specific requirements and then initialise
++ * the device.
++ */
++
++int max3107_probe(struct spi_device *spi, struct max3107_plat *pdata)
++{
++ struct max3107_port *s;
++ u16 buf[2]; /* Buffer for SPI transfers */
++ int retval;
++
++ pr_info("enter max3107 probe\n");
++
++ /* Allocate port structure */
++ s = kzalloc(sizeof(*s), GFP_KERNEL);
++ if (!s) {
++ pr_err("Allocating port structure failed\n");
++ return -ENOMEM;
++ }
++
++ s->pdata = pdata;
++
++ /* SPI Rx buffer
++ * +2 for RX FIFO interrupt
++ * disabling and RX level query
++ */
++ s->rxbuf = kzalloc(sizeof(u16) * (MAX3107_RX_FIFO_SIZE+2), GFP_KERNEL);
++ if (!s->rxbuf) {
++ pr_err("Allocating RX buffer failed\n");
++ return -ENOMEM;
++ }
++ s->rxstr = kzalloc(sizeof(u8) * MAX3107_RX_FIFO_SIZE, GFP_KERNEL);
++ if (!s->rxstr) {
++ pr_err("Allocating RX buffer failed\n");
++ return -ENOMEM;
++ }
++ /* SPI Tx buffer
++ * SPI transfer buffer
++ * +3 for TX FIFO empty
++ * interrupt disabling and
++ * enabling and TX enabling
++ */
++ s->txbuf = kzalloc(sizeof(u16) * MAX3107_TX_FIFO_SIZE + 3, GFP_KERNEL);
++ if (!s->txbuf) {
++ pr_err("Allocating TX buffer failed\n");
++ return -ENOMEM;
++ }
++ /* Initialize shared data lock */
++ spin_lock_init(&s->data_lock);
++
++ /* SPI intializations */
++ dev_set_drvdata(&spi->dev, s);
++ spi->mode = SPI_MODE_0;
++ spi->dev.platform_data = pdata;
++ spi->bits_per_word = 16;
++ s->ext_clk = pdata->ext_clk;
++ s->loopback = pdata->loopback;
++ spi_setup(spi);
++ s->spi = spi;
++
++ /* Check REV ID to ensure we are talking to what we expect */
++ buf[0] = MAX3107_REVID_REG;
++ if (max3107_rw(s, (u8 *)buf, (u8 *)buf, 2)) {
++ dev_err(&s->spi->dev, "SPI transfer for REVID read failed\n");
++ return -EIO;
++ }
++ if ((buf[0] & MAX3107_SPI_RX_DATA_MASK) != MAX3107_REVID1 &&
++ (buf[0] & MAX3107_SPI_RX_DATA_MASK) != MAX3107_REVID2) {
++ dev_err(&s->spi->dev, "REVID %x does not match\n",
++ (buf[0] & MAX3107_SPI_RX_DATA_MASK));
++ return -ENODEV;
++ }
++
++ /* Disable all interrupts */
++ buf[0] = (MAX3107_WRITE_BIT | MAX3107_IRQEN_REG | 0x0000);
++ buf[0] |= 0x0000;
++
++ /* Configure clock source */
++ buf[1] = (MAX3107_WRITE_BIT | MAX3107_CLKSRC_REG);
++ if (s->ext_clk) {
++ /* External clock */
++ buf[1] |= MAX3107_CLKSRC_EXTCLK_BIT;
++ }
++
++ /* PLL bypass ON */
++ buf[1] |= MAX3107_CLKSRC_PLLBYP_BIT;
++
++ /* Perform SPI transfer */
++ if (max3107_rw(s, (u8 *)buf, NULL, 4)) {
++ dev_err(&s->spi->dev, "SPI transfer for init failed\n");
++ return -EIO;
++ }
++
++ /* Register UART driver */
++ retval = uart_register_driver(&max3107_uart_driver);
++ if (retval) {
++ dev_err(&s->spi->dev, "Registering UART driver failed\n");
++ return retval;
++ }
++
++ /* Initialize UART port data */
++ s->port.fifosize = 128;
++ s->port.ops = &max3107_ops;
++ s->port.line = 0;
++ s->port.dev = &spi->dev;
++ s->port.uartclk = 9600;
++ s->port.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF;
++ s->port.irq = s->spi->irq;
++ s->port.type = PORT_MAX3107;
++
++ /* Add UART port */
++ retval = uart_add_one_port(&max3107_uart_driver, &s->port);
++ if (retval < 0) {
++ dev_err(&s->spi->dev, "Adding UART port failed\n");
++ return retval;
++ }
++
++ if (pdata->configure) {
++ retval = pdata->configure(s);
++ if (retval < 0)
++ return retval;
++ }
++
++ /* Go to suspend mode */
++ if (pdata->hw_suspend)
++ pdata->hw_suspend(s, 1);
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(max3107_probe);
++
++/* Driver remove function */
++int max3107_remove(struct spi_device *spi)
++{
++ struct max3107_port *s = dev_get_drvdata(&spi->dev);
++
++ pr_info("enter max3107 remove\n");
++
++ /* Remove port */
++ if (uart_remove_one_port(&max3107_uart_driver, &s->port))
++ dev_warn(&s->spi->dev, "Removing UART port failed\n");
++
++ /* Unregister UART driver */
++ uart_unregister_driver(&max3107_uart_driver);
++
++ /* Free TxRx buffer */
++ kfree(s->rxbuf);
++ kfree(s->rxstr);
++ kfree(s->txbuf);
++
++ /* Free port structure */
++ kfree(s);
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(max3107_remove);
++
++/* Driver suspend function */
++int max3107_suspend(struct spi_device *spi, pm_message_t state)
++{
++#ifdef CONFIG_PM
++ struct max3107_port *s = dev_get_drvdata(&spi->dev);
++
++ pr_debug("enter suspend\n");
++
++ /* Suspend UART port */
++ uart_suspend_port(&max3107_uart_driver, &s->port);
++
++ /* Go to suspend mode */
++ if (s->pdata->hw_suspend)
++ s->pdata->hw_suspend(s, 1);
++#endif /* CONFIG_PM */
++ return 0;
++}
++EXPORT_SYMBOL_GPL(max3107_suspend);
++
++/* Driver resume function */
++int max3107_resume(struct spi_device *spi)
++{
++#ifdef CONFIG_PM
++ struct max3107_port *s = dev_get_drvdata(&spi->dev);
++
++ pr_debug("enter resume\n");
++
++ /* Resume from suspend */
++ if (s->pdata->hw_suspend)
++ s->pdata->hw_suspend(s, 0);
++
++ /* Resume UART port */
++ uart_resume_port(&max3107_uart_driver, &s->port);
++#endif /* CONFIG_PM */
++ return 0;
++}
++EXPORT_SYMBOL_GPL(max3107_resume);
++
++static int max3107_probe_generic(struct spi_device *spi)
++{
++ return max3107_probe(spi, &generic_plat_data);
++}
++
++/* Spi driver data */
++static struct spi_driver max3107_driver = {
++ .driver = {
++ .name = "max3107",
++ .bus = &spi_bus_type,
++ .owner = THIS_MODULE,
++ },
++ .probe = max3107_probe_generic,
++ .remove = __devexit_p(max3107_remove),
++ .suspend = max3107_suspend,
++ .resume = max3107_resume,
++};
++
++/* Driver init function */
++static int __init max3107_init(void)
++{
++ pr_info("enter max3107 init\n");
++ return spi_register_driver(&max3107_driver);
++}
++
++/* Driver exit function */
++static void __exit max3107_exit(void)
++{
++ pr_info("enter max3107 exit\n");
++ spi_unregister_driver(&max3107_driver);
++}
++
++module_init(max3107_init);
++module_exit(max3107_exit);
++
++MODULE_DESCRIPTION("MAX3107 driver");
++MODULE_AUTHOR("Aavamobile");
++MODULE_ALIAS("max3107-spi");
++MODULE_LICENSE("GPL v2");
+--- /dev/null
++++ b/drivers/serial/max3107.h
+@@ -0,0 +1,439 @@
++/*
++ * max3107.h - spi uart protocol driver header for Maxim 3107
++ *
++ * Copyright (C) Aavamobile 2009
++ * Based on serial_max3100.h by Christian Pellegrin
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ */
++
++#ifndef _MAX3107_H
++#define _MAX3107_H
++
++/* Serial error status definitions */
++#define MAX3107_PARITY_ERROR 1
++#define MAX3107_FRAME_ERROR 2
++#define MAX3107_OVERRUN_ERROR 4
++#define MAX3107_ALL_ERRORS (MAX3107_PARITY_ERROR | \
++ MAX3107_FRAME_ERROR | \
++ MAX3107_OVERRUN_ERROR)
++
++/* GPIO definitions */
++#define MAX3107_GPIO_BASE 88
++#define MAX3107_GPIO_COUNT 4
++
++
++/* GPIO connected to chip's reset pin */
++#define MAX3107_RESET_GPIO 87
++
++
++/* Chip reset delay */
++#define MAX3107_RESET_DELAY 10
++
++/* Chip wakeup delay */
++#define MAX3107_WAKEUP_DELAY 50
++
++
++/* Sleep mode definitions */
++#define MAX3107_DISABLE_FORCED_SLEEP 0
++#define MAX3107_ENABLE_FORCED_SLEEP 1
++#define MAX3107_DISABLE_AUTOSLEEP 2
++#define MAX3107_ENABLE_AUTOSLEEP 3
++
++
++/* Definitions for register access with SPI transfers
++ *
++ * SPI transfer format:
++ *
++ * Master to slave bits xzzzzzzzyyyyyyyy
++ * Slave to master bits aaaaaaaabbbbbbbb
++ *
++ * where:
++ * x = 0 for reads, 1 for writes
++ * z = register address
++ * y = new register value if write, 0 if read
++ * a = unspecified
++ * b = register value if read, unspecified if write
++ */
++
++/* SPI speed */
++#define MAX3107_SPI_SPEED (3125000 * 2)
++
++/* Write bit */
++#define MAX3107_WRITE_BIT (1 << 15)
++
++/* SPI TX data mask */
++#define MAX3107_SPI_RX_DATA_MASK (0x00ff)
++
++/* SPI RX data mask */
++#define MAX3107_SPI_TX_DATA_MASK (0x00ff)
++
++/* Register access masks */
++#define MAX3107_RHR_REG (0x0000) /* RX FIFO */
++#define MAX3107_THR_REG (0x0000) /* TX FIFO */
++#define MAX3107_IRQEN_REG (0x0100) /* IRQ enable */
++#define MAX3107_IRQSTS_REG (0x0200) /* IRQ status */
++#define MAX3107_LSR_IRQEN_REG (0x0300) /* LSR IRQ enable */
++#define MAX3107_LSR_IRQSTS_REG (0x0400) /* LSR IRQ status */
++#define MAX3107_SPCHR_IRQEN_REG (0x0500) /* Special char IRQ enable */
++#define MAX3107_SPCHR_IRQSTS_REG (0x0600) /* Special char IRQ status */
++#define MAX3107_STS_IRQEN_REG (0x0700) /* Status IRQ enable */
++#define MAX3107_STS_IRQSTS_REG (0x0800) /* Status IRQ status */
++#define MAX3107_MODE1_REG (0x0900) /* MODE1 */
++#define MAX3107_MODE2_REG (0x0a00) /* MODE2 */
++#define MAX3107_LCR_REG (0x0b00) /* LCR */
++#define MAX3107_RXTO_REG (0x0c00) /* RX timeout */
++#define MAX3107_HDPIXDELAY_REG (0x0d00) /* Auto transceiver delays */
++#define MAX3107_IRDA_REG (0x0e00) /* IRDA settings */
++#define MAX3107_FLOWLVL_REG (0x0f00) /* Flow control levels */
++#define MAX3107_FIFOTRIGLVL_REG (0x1000) /* FIFO IRQ trigger levels */
++#define MAX3107_TXFIFOLVL_REG (0x1100) /* TX FIFO level */
++#define MAX3107_RXFIFOLVL_REG (0x1200) /* RX FIFO level */
++#define MAX3107_FLOWCTRL_REG (0x1300) /* Flow control */
++#define MAX3107_XON1_REG (0x1400) /* XON1 character */
++#define MAX3107_XON2_REG (0x1500) /* XON2 character */
++#define MAX3107_XOFF1_REG (0x1600) /* XOFF1 character */
++#define MAX3107_XOFF2_REG (0x1700) /* XOFF2 character */
++#define MAX3107_GPIOCFG_REG (0x1800) /* GPIO config */
++#define MAX3107_GPIODATA_REG (0x1900) /* GPIO data */
++#define MAX3107_PLLCFG_REG (0x1a00) /* PLL config */
++#define MAX3107_BRGCFG_REG (0x1b00) /* Baud rate generator conf */
++#define MAX3107_BRGDIVLSB_REG (0x1c00) /* Baud rate divisor LSB */
++#define MAX3107_BRGDIVMSB_REG (0x1d00) /* Baud rate divisor MSB */
++#define MAX3107_CLKSRC_REG (0x1e00) /* Clock source */
++#define MAX3107_REVID_REG (0x1f00) /* Revision identification */
++
++/* IRQ register bits */
++#define MAX3107_IRQ_LSR_BIT (1 << 0) /* LSR interrupt */
++#define MAX3107_IRQ_SPCHR_BIT (1 << 1) /* Special char interrupt */
++#define MAX3107_IRQ_STS_BIT (1 << 2) /* Status interrupt */
++#define MAX3107_IRQ_RXFIFO_BIT (1 << 3) /* RX FIFO interrupt */
++#define MAX3107_IRQ_TXFIFO_BIT (1 << 4) /* TX FIFO interrupt */
++#define MAX3107_IRQ_TXEMPTY_BIT (1 << 5) /* TX FIFO empty interrupt */
++#define MAX3107_IRQ_RXEMPTY_BIT (1 << 6) /* RX FIFO empty interrupt */
++#define MAX3107_IRQ_CTS_BIT (1 << 7) /* CTS interrupt */
++
++/* LSR register bits */
++#define MAX3107_LSR_RXTO_BIT (1 << 0) /* RX timeout */
++#define MAX3107_LSR_RXOVR_BIT (1 << 1) /* RX overrun */
++#define MAX3107_LSR_RXPAR_BIT (1 << 2) /* RX parity error */
++#define MAX3107_LSR_FRERR_BIT (1 << 3) /* Frame error */
++#define MAX3107_LSR_RXBRK_BIT (1 << 4) /* RX break */
++#define MAX3107_LSR_RXNOISE_BIT (1 << 5) /* RX noise */
++#define MAX3107_LSR_UNDEF6_BIT (1 << 6) /* Undefined/not used */
++#define MAX3107_LSR_CTS_BIT (1 << 7) /* CTS pin state */
++
++/* Special character register bits */
++#define MAX3107_SPCHR_XON1_BIT (1 << 0) /* XON1 character */
++#define MAX3107_SPCHR_XON2_BIT (1 << 1) /* XON2 character */
++#define MAX3107_SPCHR_XOFF1_BIT (1 << 2) /* XOFF1 character */
++#define MAX3107_SPCHR_XOFF2_BIT (1 << 3) /* XOFF2 character */
++#define MAX3107_SPCHR_BREAK_BIT (1 << 4) /* RX break */
++#define MAX3107_SPCHR_MULTIDROP_BIT (1 << 5) /* 9-bit multidrop addr char */
++#define MAX3107_SPCHR_UNDEF6_BIT (1 << 6) /* Undefined/not used */
++#define MAX3107_SPCHR_UNDEF7_BIT (1 << 7) /* Undefined/not used */
++
++/* Status register bits */
++#define MAX3107_STS_GPIO0_BIT (1 << 0) /* GPIO 0 interrupt */
++#define MAX3107_STS_GPIO1_BIT (1 << 1) /* GPIO 1 interrupt */
++#define MAX3107_STS_GPIO2_BIT (1 << 2) /* GPIO 2 interrupt */
++#define MAX3107_STS_GPIO3_BIT (1 << 3) /* GPIO 3 interrupt */
++#define MAX3107_STS_UNDEF4_BIT (1 << 4) /* Undefined/not used */
++#define MAX3107_STS_CLKREADY_BIT (1 << 5) /* Clock ready */
++#define MAX3107_STS_SLEEP_BIT (1 << 6) /* Sleep interrupt */
++#define MAX3107_STS_UNDEF7_BIT (1 << 7) /* Undefined/not used */
++
++/* MODE1 register bits */
++#define MAX3107_MODE1_RXDIS_BIT (1 << 0) /* RX disable */
++#define MAX3107_MODE1_TXDIS_BIT (1 << 1) /* TX disable */
++#define MAX3107_MODE1_TXHIZ_BIT (1 << 2) /* TX pin three-state */
++#define MAX3107_MODE1_RTSHIZ_BIT (1 << 3) /* RTS pin three-state */
++#define MAX3107_MODE1_TRNSCVCTRL_BIT (1 << 4) /* Transceiver ctrl enable */
++#define MAX3107_MODE1_FORCESLEEP_BIT (1 << 5) /* Force sleep mode */
++#define MAX3107_MODE1_AUTOSLEEP_BIT (1 << 6) /* Auto sleep enable */
++#define MAX3107_MODE1_IRQSEL_BIT (1 << 7) /* IRQ pin enable */
++
++/* MODE2 register bits */
++#define MAX3107_MODE2_RST_BIT (1 << 0) /* Chip reset */
++#define MAX3107_MODE2_FIFORST_BIT (1 << 1) /* FIFO reset */
++#define MAX3107_MODE2_RXTRIGINV_BIT (1 << 2) /* RX FIFO INT invert */
++#define MAX3107_MODE2_RXEMPTINV_BIT (1 << 3) /* RX FIFO empty INT invert */
++#define MAX3107_MODE2_SPCHR_BIT (1 << 4) /* Special chr detect enable */
++#define MAX3107_MODE2_LOOPBACK_BIT (1 << 5) /* Internal loopback enable */
++#define MAX3107_MODE2_MULTIDROP_BIT (1 << 6) /* 9-bit multidrop enable */
++#define MAX3107_MODE2_ECHOSUPR_BIT (1 << 7) /* ECHO suppression enable */
++
++/* LCR register bits */
++#define MAX3107_LCR_LENGTH0_BIT (1 << 0) /* Word length bit 0 */
++#define MAX3107_LCR_LENGTH1_BIT (1 << 1) /* Word length bit 1
++ *
++ * Word length bits table:
++ * 00 -> 5 bit words
++ * 01 -> 6 bit words
++ * 10 -> 7 bit words
++ * 11 -> 8 bit words
++ */
++#define MAX3107_LCR_STOPLEN_BIT (1 << 2) /* STOP length bit
++ *
++ * STOP length bit table:
++ * 0 -> 1 stop bit
++ * 1 -> 1-1.5 stop bits if
++ * word length is 5,
++ * 2 stop bits otherwise
++ */
++#define MAX3107_LCR_PARITY_BIT (1 << 3) /* Parity bit enable */
++#define MAX3107_LCR_EVENPARITY_BIT (1 << 4) /* Even parity bit enable */
++#define MAX3107_LCR_FORCEPARITY_BIT (1 << 5) /* 9-bit multidrop parity */
++#define MAX3107_LCR_TXBREAK_BIT (1 << 6) /* TX break enable */
++#define MAX3107_LCR_RTS_BIT (1 << 7) /* RTS pin control */
++#define MAX3107_LCR_WORD_LEN_5 (0x0000)
++#define MAX3107_LCR_WORD_LEN_6 (0x0001)
++#define MAX3107_LCR_WORD_LEN_7 (0x0002)
++#define MAX3107_LCR_WORD_LEN_8 (0x0003)
++
++
++/* IRDA register bits */
++#define MAX3107_IRDA_IRDAEN_BIT (1 << 0) /* IRDA mode enable */
++#define MAX3107_IRDA_SIR_BIT (1 << 1) /* SIR mode enable */
++#define MAX3107_IRDA_SHORTIR_BIT (1 << 2) /* Short SIR mode enable */
++#define MAX3107_IRDA_MIR_BIT (1 << 3) /* MIR mode enable */
++#define MAX3107_IRDA_RXINV_BIT (1 << 4) /* RX logic inversion enable */
++#define MAX3107_IRDA_TXINV_BIT (1 << 5) /* TX logic inversion enable */
++#define MAX3107_IRDA_UNDEF6_BIT (1 << 6) /* Undefined/not used */
++#define MAX3107_IRDA_UNDEF7_BIT (1 << 7) /* Undefined/not used */
++
++/* Flow control trigger level register masks */
++#define MAX3107_FLOWLVL_HALT_MASK (0x000f) /* Flow control halt level */
++#define MAX3107_FLOWLVL_RES_MASK (0x00f0) /* Flow control resume level */
++#define MAX3107_FLOWLVL_HALT(words) ((words/8) & 0x000f)
++#define MAX3107_FLOWLVL_RES(words) (((words/8) & 0x000f) << 4)
++
++/* FIFO interrupt trigger level register masks */
++#define MAX3107_FIFOTRIGLVL_TX_MASK (0x000f) /* TX FIFO trigger level */
++#define MAX3107_FIFOTRIGLVL_RX_MASK (0x00f0) /* RX FIFO trigger level */
++#define MAX3107_FIFOTRIGLVL_TX(words) ((words/8) & 0x000f)
++#define MAX3107_FIFOTRIGLVL_RX(words) (((words/8) & 0x000f) << 4)
++
++/* Flow control register bits */
++#define MAX3107_FLOWCTRL_AUTORTS_BIT (1 << 0) /* Auto RTS flow ctrl enable */
++#define MAX3107_FLOWCTRL_AUTOCTS_BIT (1 << 1) /* Auto CTS flow ctrl enable */
++#define MAX3107_FLOWCTRL_GPIADDR_BIT (1 << 2) /* Enables that GPIO inputs
++ * are used in conjunction with
++ * XOFF2 for definition of
++ * special character */
++#define MAX3107_FLOWCTRL_SWFLOWEN_BIT (1 << 3) /* Auto SW flow ctrl enable */
++#define MAX3107_FLOWCTRL_SWFLOW0_BIT (1 << 4) /* SWFLOW bit 0 */
++#define MAX3107_FLOWCTRL_SWFLOW1_BIT (1 << 5) /* SWFLOW bit 1
++ *
++ * SWFLOW bits 1 & 0 table:
++ * 00 -> no transmitter flow
++ * control
++ * 01 -> receiver compares
++ * XON2 and XOFF2
++ * and controls
++ * transmitter
++ * 10 -> receiver compares
++ * XON1 and XOFF1
++ * and controls
++ * transmitter
++ * 11 -> receiver compares
++ * XON1, XON2, XOFF1 and
++ * XOFF2 and controls
++ * transmitter
++ */
++#define MAX3107_FLOWCTRL_SWFLOW2_BIT (1 << 6) /* SWFLOW bit 2 */
++#define MAX3107_FLOWCTRL_SWFLOW3_BIT (1 << 7) /* SWFLOW bit 3
++ *
++ * SWFLOW bits 3 & 2 table:
++ * 00 -> no received flow
++ * control
++ * 01 -> transmitter generates
++ * XON2 and XOFF2
++ * 10 -> transmitter generates
++ * XON1 and XOFF1
++ * 11 -> transmitter generates
++ * XON1, XON2, XOFF1 and
++ * XOFF2
++ */
++
++/* GPIO configuration register bits */
++#define MAX3107_GPIOCFG_GP0OUT_BIT (1 << 0) /* GPIO 0 output enable */
++#define MAX3107_GPIOCFG_GP1OUT_BIT (1 << 1) /* GPIO 1 output enable */
++#define MAX3107_GPIOCFG_GP2OUT_BIT (1 << 2) /* GPIO 2 output enable */
++#define MAX3107_GPIOCFG_GP3OUT_BIT (1 << 3) /* GPIO 3 output enable */
++#define MAX3107_GPIOCFG_GP0OD_BIT (1 << 4) /* GPIO 0 open-drain enable */
++#define MAX3107_GPIOCFG_GP1OD_BIT (1 << 5) /* GPIO 1 open-drain enable */
++#define MAX3107_GPIOCFG_GP2OD_BIT (1 << 6) /* GPIO 2 open-drain enable */
++#define MAX3107_GPIOCFG_GP3OD_BIT (1 << 7) /* GPIO 3 open-drain enable */
++
++/* GPIO DATA register bits */
++#define MAX3107_GPIODATA_GP0OUT_BIT (1 << 0) /* GPIO 0 output value */
++#define MAX3107_GPIODATA_GP1OUT_BIT (1 << 1) /* GPIO 1 output value */
++#define MAX3107_GPIODATA_GP2OUT_BIT (1 << 2) /* GPIO 2 output value */
++#define MAX3107_GPIODATA_GP3OUT_BIT (1 << 3) /* GPIO 3 output value */
++#define MAX3107_GPIODATA_GP0IN_BIT (1 << 4) /* GPIO 0 input value */
++#define MAX3107_GPIODATA_GP1IN_BIT (1 << 5) /* GPIO 1 input value */
++#define MAX3107_GPIODATA_GP2IN_BIT (1 << 6) /* GPIO 2 input value */
++#define MAX3107_GPIODATA_GP3IN_BIT (1 << 7) /* GPIO 3 input value */
++
++/* PLL configuration register masks */
++#define MAX3107_PLLCFG_PREDIV_MASK (0x003f) /* PLL predivision value */
++#define MAX3107_PLLCFG_PLLFACTOR_MASK (0x00c0) /* PLL multiplication factor */
++
++/* Baud rate generator configuration register masks and bits */
++#define MAX3107_BRGCFG_FRACT_MASK (0x000f) /* Fractional portion of
++ * Baud rate generator divisor
++ */
++#define MAX3107_BRGCFG_2XMODE_BIT (1 << 4) /* Double baud rate */
++#define MAX3107_BRGCFG_4XMODE_BIT (1 << 5) /* Quadruple baud rate */
++#define MAX3107_BRGCFG_UNDEF6_BIT (1 << 6) /* Undefined/not used */
++#define MAX3107_BRGCFG_UNDEF7_BIT (1 << 7) /* Undefined/not used */
++
++/* Clock source register bits */
++#define MAX3107_CLKSRC_INTOSC_BIT (1 << 0) /* Internal osc enable */
++#define MAX3107_CLKSRC_CRYST_BIT (1 << 1) /* Crystal osc enable */
++#define MAX3107_CLKSRC_PLL_BIT (1 << 2) /* PLL enable */
++#define MAX3107_CLKSRC_PLLBYP_BIT (1 << 3) /* PLL bypass */
++#define MAX3107_CLKSRC_EXTCLK_BIT (1 << 4) /* External clock enable */
++#define MAX3107_CLKSRC_UNDEF5_BIT (1 << 5) /* Undefined/not used */
++#define MAX3107_CLKSRC_UNDEF6_BIT (1 << 6) /* Undefined/not used */
++#define MAX3107_CLKSRC_CLK2RTS_BIT (1 << 7) /* Baud clk to RTS pin */
++
++
++/* HW definitions */
++#define MAX3107_RX_FIFO_SIZE 128
++#define MAX3107_TX_FIFO_SIZE 128
++#define MAX3107_REVID1 0x00a0
++#define MAX3107_REVID2 0x00a1
++
++
++/* Baud rate generator configuration values for external clock 13MHz */
++#define MAX3107_BRG13_B300 (0x0A9400 | 0x05)
++#define MAX3107_BRG13_B600 (0x054A00 | 0x03)
++#define MAX3107_BRG13_B1200 (0x02A500 | 0x01)
++#define MAX3107_BRG13_B2400 (0x015200 | 0x09)
++#define MAX3107_BRG13_B4800 (0x00A900 | 0x04)
++#define MAX3107_BRG13_B9600 (0x005400 | 0x0A)
++#define MAX3107_BRG13_B19200 (0x002A00 | 0x05)
++#define MAX3107_BRG13_B38400 (0x001500 | 0x03)
++#define MAX3107_BRG13_B57600 (0x000E00 | 0x02)
++#define MAX3107_BRG13_B115200 (0x000700 | 0x01)
++#define MAX3107_BRG13_B230400 (0x000300 | 0x08)
++#define MAX3107_BRG13_B460800 (0x000100 | 0x0c)
++#define MAX3107_BRG13_B921600 (0x000100 | 0x1c)
++
++/* Baud rate generator configuration values for external clock 26MHz */
++#define MAX3107_BRG26_B300 (0x152800 | 0x0A)
++#define MAX3107_BRG26_B600 (0x0A9400 | 0x05)
++#define MAX3107_BRG26_B1200 (0x054A00 | 0x03)
++#define MAX3107_BRG26_B2400 (0x02A500 | 0x01)
++#define MAX3107_BRG26_B4800 (0x015200 | 0x09)
++#define MAX3107_BRG26_B9600 (0x00A900 | 0x04)
++#define MAX3107_BRG26_B19200 (0x005400 | 0x0A)
++#define MAX3107_BRG26_B38400 (0x002A00 | 0x05)
++#define MAX3107_BRG26_B57600 (0x001C00 | 0x03)
++#define MAX3107_BRG26_B115200 (0x000E00 | 0x02)
++#define MAX3107_BRG26_B230400 (0x000700 | 0x01)
++#define MAX3107_BRG26_B460800 (0x000300 | 0x08)
++#define MAX3107_BRG26_B921600 (0x000100 | 0x0C)
++
++/* Baud rate generator configuration values for internal clock */
++#define MAX3107_BRG13_IB300 (0x008000 | 0x00)
++#define MAX3107_BRG13_IB600 (0x004000 | 0x00)
++#define MAX3107_BRG13_IB1200 (0x002000 | 0x00)
++#define MAX3107_BRG13_IB2400 (0x001000 | 0x00)
++#define MAX3107_BRG13_IB4800 (0x000800 | 0x00)
++#define MAX3107_BRG13_IB9600 (0x000400 | 0x00)
++#define MAX3107_BRG13_IB19200 (0x000200 | 0x00)
++#define MAX3107_BRG13_IB38400 (0x000100 | 0x00)
++#define MAX3107_BRG13_IB57600 (0x000000 | 0x0B)
++#define MAX3107_BRG13_IB115200 (0x000000 | 0x05)
++#define MAX3107_BRG13_IB230400 (0x000000 | 0x03)
++#define MAX3107_BRG13_IB460800 (0x000000 | 0x00)
++#define MAX3107_BRG13_IB921600 (0x000000 | 0x00)
++
++
++struct baud_table {
++ int baud;
++ u32 new_brg;
++};
++
++struct max3107_port {
++ /* UART port structure */
++ struct uart_port port;
++
++ /* SPI device structure */
++ struct spi_device *spi;
++
++ /* GPIO chip stucture */
++ struct gpio_chip chip;
++
++ /* Workqueue that does all the magic */
++ struct workqueue_struct *workqueue;
++ struct work_struct work;
++
++ /* Lock for shared data */
++ spinlock_t data_lock;
++
++ /* Device configuration */
++ int ext_clk; /* 1 if external clock used */
++ int loopback; /* Current loopback mode state */
++ int baud; /* Current baud rate */
++
++ /* State flags */
++ int suspended; /* Indicates suspend mode */
++ int tx_fifo_empty; /* Flag for TX FIFO state */
++ int rx_enabled; /* Flag for receiver state */
++ int tx_enabled; /* Flag for transmitter state */
++
++ u16 irqen_reg; /* Current IRQ enable register value */
++ /* Shared data */
++ u16 mode1_reg; /* Current mode1 register value*/
++ int mode1_commit; /* Flag for setting new mode1 register value */
++ u16 lcr_reg; /* Current LCR register value */
++ int lcr_commit; /* Flag for setting new LCR register value */
++ u32 brg_cfg; /* Current Baud rate generator config */
++ int brg_commit; /* Flag for setting new baud rate generator
++ * config
++ */
++ struct baud_table *baud_tbl;
++ int handle_irq; /* Indicates that IRQ should be handled */
++
++ /* Rx buffer and str*/
++ u16 *rxbuf;
++ u8 *rxstr;
++ /* Tx buffer*/
++ u16 *txbuf;
++
++ struct max3107_plat *pdata; /* Platform data */
++};
++
++/* Platform data structure */
++struct max3107_plat {
++ /* Loopback mode enable */
++ int loopback;
++ /* External clock enable */
++ int ext_clk;
++ /* Called during the register initialisation */
++ void (*init)(struct max3107_port *s);
++ /* Called when the port is found and configured */
++ int (*configure)(struct max3107_port *s);
++ /* HW suspend function */
++ void (*hw_suspend) (struct max3107_port *s, int suspend);
++ /* Polling mode enable */
++ int polled_mode;
++ /* Polling period if polling mode enabled */
++ int poll_time;
++};
++
++extern int max3107_rw(struct max3107_port *s, u8 *tx, u8 *rx, int len);
++extern void max3107_hw_susp(struct max3107_port *s, int suspend);
++extern int max3107_probe(struct spi_device *spi, struct max3107_plat *pdata);
++extern int max3107_remove(struct spi_device *spi);
++extern int max3107_suspend(struct spi_device *spi, pm_message_t state);
++extern int max3107_resume(struct spi_device *spi);
++
++#endif /* _LINUX_SERIAL_MAX3107_H */
+--- /dev/null
++++ b/drivers/serial/mfd.c
+@@ -0,0 +1,1507 @@
++/*
++ * mfd.c: driver for High Speed UART device of Intel Medfield platform
++ *
++ * Refer pxa.c, 8250.c and some other drivers in drivers/serial/
++ *
++ * (C) Copyright 2010 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; version 2
++ * of the License.
++ */
++
++/* Notes:
++ * 1. DMA channel allocation: 0/1 channel are assigned to port 0,
++ * 2/3 chan to port 1, 4/5 chan to port 3. Even number chans
++ * are used for RX, odd chans for TX
++ *
++ * 2. In A0 stepping, UART will not support TX half empty flag
++ *
++ * 3. The RI/DSR/DCD/DTR are not pinned out, DCD & DSR are always
++ * asserted, only when the HW is reset the DDCD and DDSR will
++ * be triggered
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/console.h>
++#include <linux/sysrq.h>
++#include <linux/serial_reg.h>
++#include <linux/circ_buf.h>
++#include <linux/delay.h>
++#include <linux/interrupt.h>
++#include <linux/tty.h>
++#include <linux/tty_flip.h>
++#include <linux/serial_core.h>
++#include <linux/serial_mfd.h>
++#include <linux/dma-mapping.h>
++#include <linux/pci.h>
++#include <linux/io.h>
++#include <linux/debugfs.h>
++
++/* Temp solution to get the dma enable flag */
++#include <asm/mrst.h>
++
++#define MFD_HSU_A0_STEPPING 1
++
++#define HSU_DMA_BUF_SIZE 2048
++
++#define chan_readl(chan, offset) readl(chan->reg + offset)
++#define chan_writel(chan, offset, val) writel(val, chan->reg + offset)
++
++#define mfd_readl(obj, offset) readl(obj->reg + offset)
++#define mfd_writel(obj, offset, val) writel(val, obj->reg + offset)
++
++#define HSU_DMA_TIMEOUT_CHECK_FREQ (HZ/10)
++
++struct hsu_dma_buffer {
++ u8 *buf;
++ dma_addr_t dma_addr;
++ u32 dma_size;
++ u32 ofs;
++};
++
++struct hsu_dma_chan {
++ u32 id;
++ enum dma_data_direction dirt;
++ struct uart_hsu_port *uport;
++ void __iomem *reg;
++ struct timer_list rx_timer; /* only needed by RX channel */
++};
++
++struct uart_hsu_port {
++ struct uart_port port;
++ unsigned char ier;
++ unsigned char lcr;
++ unsigned char mcr;
++ unsigned int lsr_break_flag;
++ char name[12];
++ int index;
++ struct device *dev;
++
++ struct hsu_dma_chan *txc;
++ struct hsu_dma_chan *rxc;
++ struct hsu_dma_buffer txbuf;
++ struct hsu_dma_buffer rxbuf;
++ int use_dma; /* flag for DMA/PIO */
++ int running;
++ int dma_tx_on;
++};
++
++/* Top level data structure of HSU */
++struct hsu_port {
++ void __iomem *reg;
++ unsigned long paddr;
++ unsigned long iolen;
++ u32 irq;
++
++ struct uart_hsu_port port[3];
++ struct hsu_dma_chan chans[10];
++
++ struct dentry *debugfs;
++};
++
++static inline unsigned int serial_in(struct uart_hsu_port *up, int offset)
++{
++ unsigned int val;
++
++ if (offset > UART_MSR) {
++ offset <<= 2;
++ val = readl(up->port.membase + offset);
++ } else
++ val = (unsigned int)readb(up->port.membase + offset);
++
++ return val;
++}
++
++static inline void serial_out(struct uart_hsu_port *up, int offset, int value)
++{
++ if (offset > UART_MSR) {
++ offset <<= 2;
++ writel(value, up->port.membase + offset);
++ } else {
++ unsigned char val = value & 0xff;
++ writeb(val, up->port.membase + offset);
++ }
++}
++
++#ifdef CONFIG_DEBUG_FS
++
++#define HSU_REGS_BUFSIZE 1024
++
++static int hsu_show_regs_open(struct inode *inode, struct file *file)
++{
++ file->private_data = inode->i_private;
++ return 0;
++}
++
++static ssize_t port_show_regs(struct file *file, char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct uart_hsu_port *up = file->private_data;
++ char *buf;
++ u32 len = 0;
++ ssize_t ret;
++
++ buf = kzalloc(HSU_REGS_BUFSIZE, GFP_KERNEL);
++ if (!buf)
++ return 0;
++
++ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
++ "MFD HSU port[%d] regs:\n", up->index);
++
++ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
++ "=================================\n");
++ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
++ "IER: \t\t0x%08x\n", serial_in(up, UART_IER));
++ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
++ "IIR: \t\t0x%08x\n", serial_in(up, UART_IIR));
++ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
++ "LCR: \t\t0x%08x\n", serial_in(up, UART_LCR));
++ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
++ "MCR: \t\t0x%08x\n", serial_in(up, UART_MCR));
++ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
++ "LSR: \t\t0x%08x\n", serial_in(up, UART_LSR));
++ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
++ "MSR: \t\t0x%08x\n", serial_in(up, UART_MSR));
++ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
++ "FOR: \t\t0x%08x\n", serial_in(up, UART_FOR));
++ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
++ "PS: \t\t0x%08x\n", serial_in(up, UART_PS));
++ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
++ "MUL: \t\t0x%08x\n", serial_in(up, UART_MUL));
++ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
++ "DIV: \t\t0x%08x\n", serial_in(up, UART_DIV));
++
++ ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
++ kfree(buf);
++ return ret;
++}
++
++static ssize_t dma_show_regs(struct file *file, char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct hsu_dma_chan *chan = file->private_data;
++ char *buf;
++ u32 len = 0;
++ ssize_t ret;
++
++ buf = kzalloc(HSU_REGS_BUFSIZE, GFP_KERNEL);
++ if (!buf)
++ return 0;
++
++ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
++ "MFD HSU DMA channel [%d] regs:\n", chan->id);
++
++ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
++ "=================================\n");
++ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
++ "CR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_CR));
++ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
++ "DCR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_DCR));
++ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
++ "BSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_BSR));
++ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
++ "MOTSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_MOTSR));
++ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
++ "D0SAR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D0SAR));
++ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
++ "D0TSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D0TSR));
++ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
++ "D0SAR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D1SAR));
++ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
++ "D0TSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D1TSR));
++ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
++ "D0SAR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D2SAR));
++ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
++ "D0TSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D2TSR));
++ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
++ "D0SAR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D3SAR));
++ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
++ "D0TSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D3TSR));
++
++ ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
++ kfree(buf);
++ return ret;
++}
++
++static const struct file_operations port_regs_ops = {
++ .owner = THIS_MODULE,
++ .open = hsu_show_regs_open,
++ .read = port_show_regs,
++};
++
++static const struct file_operations dma_regs_ops = {
++ .owner = THIS_MODULE,
++ .open = hsu_show_regs_open,
++ .read = dma_show_regs,
++};
++
++static int hsu_debugfs_init(struct hsu_port *hsu)
++{
++ int i;
++ char name[32];
++
++ hsu->debugfs = debugfs_create_dir("hsu", NULL);
++ if (!hsu->debugfs)
++ return -ENOMEM;
++
++ for (i = 0; i < 3; i++) {
++ snprintf(name, sizeof(name), "port_%d_regs", i);
++ debugfs_create_file(name, S_IFREG | S_IRUGO,
++ hsu->debugfs, (void *)(&hsu->port[i]), &port_regs_ops);
++ }
++
++ for (i = 0; i < 6; i++) {
++ snprintf(name, sizeof(name), "dma_chan_%d_regs", i);
++ debugfs_create_file(name, S_IFREG | S_IRUGO,
++ hsu->debugfs, (void *)&hsu->chans[i], &dma_regs_ops);
++ }
++
++ return 0;
++}
++
++static void hsu_debugfs_remove(struct hsu_port *hsu)
++{
++ if (hsu->debugfs)
++ debugfs_remove_recursive(hsu->debugfs);
++}
++
++#else
++static inline int hsu_debugfs_init(struct hsu_port *hsu)
++{
++ return 0;
++}
++
++static inline void hsu_debugfs_remove(struct hsu_port *hsu)
++{
++}
++#endif /* CONFIG_DEBUG_FS */
++
++static void serial_hsu_enable_ms(struct uart_port *port)
++{
++ struct uart_hsu_port *up =
++ container_of(port, struct uart_hsu_port, port);
++
++ up->ier |= UART_IER_MSI;
++ serial_out(up, UART_IER, up->ier);
++}
++
++void hsu_dma_tx(struct uart_hsu_port *up)
++{
++ struct circ_buf *xmit = &up->port.state->xmit;
++ struct hsu_dma_buffer *dbuf = &up->txbuf;
++ int count;
++
++ /* test_and_set_bit may be better, but anyway it's in lock protected mode */
++ if (up->dma_tx_on)
++ return;
++
++ /* Update the circ buf info */
++ xmit->tail += dbuf->ofs;
++ xmit->tail &= UART_XMIT_SIZE - 1;
++
++ up->port.icount.tx += dbuf->ofs;
++ dbuf->ofs = 0;
++
++ /* Disable the channel */
++ chan_writel(up->txc, HSU_CH_CR, 0x0);
++
++ if (!uart_circ_empty(xmit) && !uart_tx_stopped(&up->port)) {
++ dma_sync_single_for_device(up->port.dev,
++ dbuf->dma_addr,
++ dbuf->dma_size,
++ DMA_TO_DEVICE);
++
++ count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
++ dbuf->ofs = count;
++
++ /* Reprogram the channel */
++ chan_writel(up->txc, HSU_CH_D0SAR, dbuf->dma_addr + xmit->tail);
++ chan_writel(up->txc, HSU_CH_D0TSR, count);
++
++ /* Reenable the channel */
++ chan_writel(up->txc, HSU_CH_DCR, 0x1
++ | (0x1 << 8)
++ | (0x1 << 16)
++ | (0x1 << 24));
++ up->dma_tx_on = 1;
++ chan_writel(up->txc, HSU_CH_CR, 0x1);
++ }
++
++ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
++ uart_write_wakeup(&up->port);
++}
++
++/* The buffer is already cache coherent */
++void hsu_dma_start_rx_chan(struct hsu_dma_chan *rxc, struct hsu_dma_buffer *dbuf)
++{
++ dbuf->ofs = 0;
++
++ chan_writel(rxc, HSU_CH_BSR, 32);
++ chan_writel(rxc, HSU_CH_MOTSR, 4);
++
++ chan_writel(rxc, HSU_CH_D0SAR, dbuf->dma_addr);
++ chan_writel(rxc, HSU_CH_D0TSR, dbuf->dma_size);
++ chan_writel(rxc, HSU_CH_DCR, 0x1 | (0x1 << 8)
++ | (0x1 << 16)
++ | (0x1 << 24) /* timeout bit, see HSU Errata 1 */
++ );
++ chan_writel(rxc, HSU_CH_CR, 0x3);
++
++ mod_timer(&rxc->rx_timer, jiffies + HSU_DMA_TIMEOUT_CHECK_FREQ);
++}
++
++/* Protected by spin_lock_irqsave(port->lock) */
++static void serial_hsu_start_tx(struct uart_port *port)
++{
++ struct uart_hsu_port *up =
++ container_of(port, struct uart_hsu_port, port);
++
++ if (up->use_dma) {
++ hsu_dma_tx(up);
++ } else if (!(up->ier & UART_IER_THRI)) {
++ up->ier |= UART_IER_THRI;
++ serial_out(up, UART_IER, up->ier);
++ }
++}
++
++static void serial_hsu_stop_tx(struct uart_port *port)
++{
++ struct uart_hsu_port *up =
++ container_of(port, struct uart_hsu_port, port);
++ struct hsu_dma_chan *txc = up->txc;
++
++ if (up->use_dma)
++ chan_writel(txc, HSU_CH_CR, 0x0);
++ else if (up->ier & UART_IER_THRI) {
++ up->ier &= ~UART_IER_THRI;
++ serial_out(up, UART_IER, up->ier);
++ }
++}
++
++/* This is always called in spinlock protected mode, so
++ * modify timeout timer is safe here */
++void hsu_dma_rx(struct uart_hsu_port *up, u32 int_sts)
++{
++ struct hsu_dma_buffer *dbuf = &up->rxbuf;
++ struct hsu_dma_chan *chan = up->rxc;
++ struct uart_port *port = &up->port;
++ struct tty_struct *tty = port->state->port.tty;
++ int count;
++
++ if (!tty)
++ return;
++
++ /*
++ * First need to know how many is already transferred,
++ * then check if its a timeout DMA irq, and return
++ * the trail bytes out, push them up and reenable the
++ * channel
++ */
++
++ /* Timeout IRQ, need wait some time, see Errata 2 */
++ if (int_sts & 0xf00)
++ udelay(2);
++
++ /* Stop the channel */
++ chan_writel(chan, HSU_CH_CR, 0x0);
++
++ count = chan_readl(chan, HSU_CH_D0SAR) - dbuf->dma_addr;
++ if (!count) {
++ /* Restart the channel before we leave */
++ chan_writel(chan, HSU_CH_CR, 0x3);
++ return;
++ }
++ del_timer(&chan->rx_timer);
++
++ dma_sync_single_for_cpu(port->dev, dbuf->dma_addr,
++ dbuf->dma_size, DMA_FROM_DEVICE);
++
++ /*
++ * Head will only wrap around when we recycle
++ * the DMA buffer, and when that happens, we
++ * explicitly set tail to 0. So head will
++ * always be greater than tail.
++ */
++ tty_insert_flip_string(tty, dbuf->buf, count);
++ port->icount.rx += count;
++
++ dma_sync_single_for_device(up->port.dev, dbuf->dma_addr,
++ dbuf->dma_size, DMA_FROM_DEVICE);
++
++ /* Reprogram the channel */
++ chan_writel(chan, HSU_CH_D0SAR, dbuf->dma_addr);
++ chan_writel(chan, HSU_CH_D0TSR, dbuf->dma_size);
++ chan_writel(chan, HSU_CH_DCR, 0x1
++ | (0x1 << 8)
++ | (0x1 << 16)
++ | (0x1 << 24) /* timeout bit, see HSU Errata 1 */
++ );
++ tty_flip_buffer_push(tty);
++
++ chan_writel(chan, HSU_CH_CR, 0x3);
++ chan->rx_timer.expires = jiffies + HSU_DMA_TIMEOUT_CHECK_FREQ;
++ add_timer(&chan->rx_timer);
++
++}
++
++static void serial_hsu_stop_rx(struct uart_port *port)
++{
++ struct uart_hsu_port *up =
++ container_of(port, struct uart_hsu_port, port);
++ struct hsu_dma_chan *chan = up->rxc;
++
++ if (up->use_dma)
++ chan_writel(chan, HSU_CH_CR, 0x2);
++ else {
++ up->ier &= ~UART_IER_RLSI;
++ up->port.read_status_mask &= ~UART_LSR_DR;
++ serial_out(up, UART_IER, up->ier);
++ }
++}
++
++static inline void receive_chars(struct uart_hsu_port *up, int *status)
++{
++ struct tty_struct *tty = up->port.state->port.tty;
++ unsigned int ch, flag;
++ unsigned int max_count = 256;
++
++ if (!tty)
++ return;
++
++ do {
++ ch = serial_in(up, UART_RX);
++ flag = TTY_NORMAL;
++ up->port.icount.rx++;
++
++ if (unlikely(*status & (UART_LSR_BI | UART_LSR_PE |
++ UART_LSR_FE | UART_LSR_OE))) {
++
++ dev_warn(up->dev, "We really rush into ERR/BI case"
++ "status = 0x%02x", *status);
++ /* For statistics only */
++ if (*status & UART_LSR_BI) {
++ *status &= ~(UART_LSR_FE | UART_LSR_PE);
++ up->port.icount.brk++;
++ /*
++ * We do the SysRQ and SAK checking
++ * here because otherwise the break
++ * may get masked by ignore_status_mask
++ * or read_status_mask.
++ */
++ if (uart_handle_break(&up->port))
++ goto ignore_char;
++ } else if (*status & UART_LSR_PE)
++ up->port.icount.parity++;
++ else if (*status & UART_LSR_FE)
++ up->port.icount.frame++;
++ if (*status & UART_LSR_OE)
++ up->port.icount.overrun++;
++
++ /* Mask off conditions which should be ignored. */
++ *status &= up->port.read_status_mask;
++
++#ifdef CONFIG_SERIAL_MFD_HSU_CONSOLE
++ if (up->port.cons &&
++ up->port.cons->index == up->port.line) {
++ /* Recover the break flag from console xmit */
++ *status |= up->lsr_break_flag;
++ up->lsr_break_flag = 0;
++ }
++#endif
++ if (*status & UART_LSR_BI) {
++ flag = TTY_BREAK;
++ } else if (*status & UART_LSR_PE)
++ flag = TTY_PARITY;
++ else if (*status & UART_LSR_FE)
++ flag = TTY_FRAME;
++ }
++
++ if (uart_handle_sysrq_char(&up->port, ch))
++ goto ignore_char;
++
++ uart_insert_char(&up->port, *status, UART_LSR_OE, ch, flag);
++ ignore_char:
++ *status = serial_in(up, UART_LSR);
++ } while ((*status & UART_LSR_DR) && max_count--);
++ tty_flip_buffer_push(tty);
++}
++
++static void transmit_chars(struct uart_hsu_port *up)
++{
++ struct circ_buf *xmit = &up->port.state->xmit;
++ int count;
++
++ if (up->port.x_char) {
++ serial_out(up, UART_TX, up->port.x_char);
++ up->port.icount.tx++;
++ up->port.x_char = 0;
++ return;
++ }
++ if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) {
++ serial_hsu_stop_tx(&up->port);
++ return;
++ }
++
++#ifndef MFD_HSU_A0_STEPPING
++ count = up->port.fifosize / 2;
++#else
++ /*
++ * A0 only supports fully empty IRQ, and the first char written
++ * into it won't clear the EMPT bit, so we may need be cautious
++ * by useing a shorter buffer
++ */
++ count = up->port.fifosize - 4;
++#endif
++ do {
++ serial_out(up, UART_TX, xmit->buf[xmit->tail]);
++ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
++
++ up->port.icount.tx++;
++ if (uart_circ_empty(xmit))
++ break;
++ } while (--count > 0);
++
++ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
++ uart_write_wakeup(&up->port);
++
++ if (uart_circ_empty(xmit))
++ serial_hsu_stop_tx(&up->port);
++}
++
++static inline void check_modem_status(struct uart_hsu_port *up)
++{
++ int status;
++
++ status = serial_in(up, UART_MSR);
++
++ if ((status & UART_MSR_ANY_DELTA) == 0)
++ return;
++
++ if (status & UART_MSR_TERI)
++ up->port.icount.rng++;
++ if (status & UART_MSR_DDSR)
++ up->port.icount.dsr++;
++ /* We may only get DDCD when HW init and reset */
++ if (status & UART_MSR_DDCD)
++ uart_handle_dcd_change(&up->port, status & UART_MSR_DCD);
++ /* Will start/stop_tx accordingly */
++ if (status & UART_MSR_DCTS)
++ uart_handle_cts_change(&up->port, status & UART_MSR_CTS);
++
++ wake_up_interruptible(&up->port.state->port.delta_msr_wait);
++}
++
++/*
++ * This handles the interrupt from one port.
++ */
++static irqreturn_t port_irq(int irq, void *dev_id)
++{
++ struct uart_hsu_port *up = dev_id;
++ unsigned int iir, lsr;
++ unsigned long flags;
++
++ if (unlikely(!up->running))
++ return IRQ_NONE;
++
++ spin_lock_irqsave(&up->port.lock, flags);
++ if (up->use_dma) {
++ lsr = serial_in(up, UART_LSR);
++ if (unlikely(lsr & (UART_LSR_BI | UART_LSR_PE |
++ UART_LSR_FE | UART_LSR_OE)))
++ dev_warn(up->dev,
++ "Got lsr irq while using DMA, lsr = 0x%2x\n",
++ lsr);
++ check_modem_status(up);
++ spin_unlock_irqrestore(&up->port.lock, flags);
++ return IRQ_HANDLED;
++ }
++
++ iir = serial_in(up, UART_IIR);
++ if (iir & UART_IIR_NO_INT) {
++ spin_unlock_irqrestore(&up->port.lock, flags);
++ return IRQ_NONE;
++ }
++
++ lsr = serial_in(up, UART_LSR);
++ if (lsr & UART_LSR_DR)
++ receive_chars(up, &lsr);
++ check_modem_status(up);
++
++ /* lsr will be renewed during the receive_chars */
++ if (lsr & UART_LSR_THRE)
++ transmit_chars(up);
++
++ spin_unlock_irqrestore(&up->port.lock, flags);
++ return IRQ_HANDLED;
++}
++
++static inline void dma_chan_irq(struct hsu_dma_chan *chan)
++{
++ struct uart_hsu_port *up = chan->uport;
++ unsigned long flags;
++ u32 int_sts;
++
++ spin_lock_irqsave(&up->port.lock, flags);
++
++ if (!up->use_dma || !up->running)
++ goto exit;
++
++ /*
++ * No matter what situation, need read clear the IRQ status
++ * There is a bug, see Errata 5, HSD 2900918
++ */
++ int_sts = chan_readl(chan, HSU_CH_SR);
++
++ /* Rx channel */
++ if (chan->dirt == DMA_FROM_DEVICE)
++ hsu_dma_rx(up, int_sts);
++
++ /* Tx channel */
++ if (chan->dirt == DMA_TO_DEVICE) {
++ chan_writel(chan, HSU_CH_CR, 0x0);
++ up->dma_tx_on = 0;
++ hsu_dma_tx(up);
++ }
++
++exit:
++ spin_unlock_irqrestore(&up->port.lock, flags);
++ return;
++}
++
++static irqreturn_t dma_irq(int irq, void *dev_id)
++{
++ struct hsu_port *hsu = dev_id;
++ u32 int_sts, i;
++
++ int_sts = mfd_readl(hsu, HSU_GBL_DMAISR);
++
++ /* Currently we only have 6 channels may be used */
++ for (i = 0; i < 6; i++) {
++ if (int_sts & 0x1)
++ dma_chan_irq(&hsu->chans[i]);
++ int_sts >>= 1;
++ }
++
++ return IRQ_HANDLED;
++}
++
++static unsigned int serial_hsu_tx_empty(struct uart_port *port)
++{
++ struct uart_hsu_port *up =
++ container_of(port, struct uart_hsu_port, port);
++ unsigned long flags;
++ unsigned int ret;
++
++ spin_lock_irqsave(&up->port.lock, flags);
++ ret = serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0;
++ spin_unlock_irqrestore(&up->port.lock, flags);
++
++ return ret;
++}
++
++static unsigned int serial_hsu_get_mctrl(struct uart_port *port)
++{
++ struct uart_hsu_port *up =
++ container_of(port, struct uart_hsu_port, port);
++ unsigned char status;
++ unsigned int ret;
++
++ status = serial_in(up, UART_MSR);
++
++ ret = 0;
++ if (status & UART_MSR_DCD)
++ ret |= TIOCM_CAR;
++ if (status & UART_MSR_RI)
++ ret |= TIOCM_RNG;
++ if (status & UART_MSR_DSR)
++ ret |= TIOCM_DSR;
++ if (status & UART_MSR_CTS)
++ ret |= TIOCM_CTS;
++ return ret;
++}
++
++static void serial_hsu_set_mctrl(struct uart_port *port, unsigned int mctrl)
++{
++ struct uart_hsu_port *up =
++ container_of(port, struct uart_hsu_port, port);
++ unsigned char mcr = 0;
++
++ if (mctrl & TIOCM_RTS)
++ mcr |= UART_MCR_RTS;
++ if (mctrl & TIOCM_DTR)
++ mcr |= UART_MCR_DTR;
++ if (mctrl & TIOCM_OUT1)
++ mcr |= UART_MCR_OUT1;
++ if (mctrl & TIOCM_OUT2)
++ mcr |= UART_MCR_OUT2;
++ if (mctrl & TIOCM_LOOP)
++ mcr |= UART_MCR_LOOP;
++
++ mcr |= up->mcr;
++
++ serial_out(up, UART_MCR, mcr);
++}
++
++static void serial_hsu_break_ctl(struct uart_port *port, int break_state)
++{
++ struct uart_hsu_port *up =
++ container_of(port, struct uart_hsu_port, port);
++ unsigned long flags;
++
++ spin_lock_irqsave(&up->port.lock, flags);
++ if (break_state == -1)
++ up->lcr |= UART_LCR_SBC;
++ else
++ up->lcr &= ~UART_LCR_SBC;
++ serial_out(up, UART_LCR, up->lcr);
++ spin_unlock_irqrestore(&up->port.lock, flags);
++}
++
++/*
++ * What special to do:
++ * 1. chose the 64B fifo mode
++ * 2. make sure not to select half empty mode for A0 stepping
++ * 3. start dma or pio depends on configuration
++ * 4. we only allocate dma memory when needed
++ */
++static int serial_hsu_startup(struct uart_port *port)
++{
++ struct uart_hsu_port *up =
++ container_of(port, struct uart_hsu_port, port);
++ unsigned long flags;
++
++ /*
++ * Clear the FIFO buffers and disable them.
++ * (they will be reenabled in set_termios())
++ */
++ serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
++ serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
++ UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
++ serial_out(up, UART_FCR, 0);
++
++ /* Clear the interrupt registers. */
++ (void) serial_in(up, UART_LSR);
++ (void) serial_in(up, UART_RX);
++ (void) serial_in(up, UART_IIR);
++ (void) serial_in(up, UART_MSR);
++
++ /* Now, initialize the UART, default is 8n1 */
++ serial_out(up, UART_LCR, UART_LCR_WLEN8);
++
++ spin_lock_irqsave(&up->port.lock, flags);
++
++ up->port.mctrl |= TIOCM_OUT2;
++ serial_hsu_set_mctrl(&up->port, up->port.mctrl);
++
++ /*
++ * Finally, enable interrupts. Note: Modem status interrupts
++ * are set via set_termios(), which will be occurring imminently
++ * anyway, so we don't enable them here.
++ */
++ if (!up->use_dma)
++ up->ier = UART_IER_RLSI | UART_IER_RDI | UART_IER_RTOIE;
++ else
++ up->ier = 0;
++ serial_out(up, UART_IER, up->ier);
++
++ spin_unlock_irqrestore(&up->port.lock, flags);
++
++ /* DMA init */
++ if (up->use_dma) {
++ struct hsu_dma_buffer *dbuf;
++ struct circ_buf *xmit = &port->state->xmit;
++
++ up->dma_tx_on = 0;
++
++ /* First allocate the RX buffer */
++ dbuf = &up->rxbuf;
++ dbuf->buf = kzalloc(HSU_DMA_BUF_SIZE, GFP_KERNEL);
++ if (!dbuf->buf) {
++ up->use_dma = 0;
++ goto exit;
++ }
++ dbuf->dma_addr = dma_map_single(port->dev,
++ dbuf->buf,
++ HSU_DMA_BUF_SIZE,
++ DMA_FROM_DEVICE);
++ dbuf->dma_size = HSU_DMA_BUF_SIZE;
++
++ /* Start the RX channel right now */
++ hsu_dma_start_rx_chan(up->rxc, dbuf);
++
++ /* Next init the TX DMA */
++ dbuf = &up->txbuf;
++ dbuf->buf = xmit->buf;
++ dbuf->dma_addr = dma_map_single(port->dev,
++ dbuf->buf,
++ UART_XMIT_SIZE,
++ DMA_TO_DEVICE);
++ dbuf->dma_size = UART_XMIT_SIZE;
++
++ /* This should not be changed all around */
++ chan_writel(up->txc, HSU_CH_BSR, 32);
++ chan_writel(up->txc, HSU_CH_MOTSR, 4);
++ dbuf->ofs = 0;
++ }
++
++exit:
++ /* And clear the interrupt registers again for luck. */
++ (void) serial_in(up, UART_LSR);
++ (void) serial_in(up, UART_RX);
++ (void) serial_in(up, UART_IIR);
++ (void) serial_in(up, UART_MSR);
++
++ up->running = 1;
++ return 0;
++}
++
++static void serial_hsu_shutdown(struct uart_port *port)
++{
++ struct uart_hsu_port *up =
++ container_of(port, struct uart_hsu_port, port);
++ unsigned long flags;
++
++ del_timer_sync(&up->rxc->rx_timer);
++
++ /* Disable interrupts from this port */
++ up->ier = 0;
++ serial_out(up, UART_IER, 0);
++ up->running = 0;
++
++ spin_lock_irqsave(&up->port.lock, flags);
++ up->port.mctrl &= ~TIOCM_OUT2;
++ serial_hsu_set_mctrl(&up->port, up->port.mctrl);
++ spin_unlock_irqrestore(&up->port.lock, flags);
++
++ /* Disable break condition and FIFOs */
++ serial_out(up, UART_LCR, serial_in(up, UART_LCR) & ~UART_LCR_SBC);
++ serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
++ UART_FCR_CLEAR_RCVR |
++ UART_FCR_CLEAR_XMIT);
++ serial_out(up, UART_FCR, 0);
++}
++
++static void
++serial_hsu_set_termios(struct uart_port *port, struct ktermios *termios,
++ struct ktermios *old)
++{
++ struct uart_hsu_port *up =
++ container_of(port, struct uart_hsu_port, port);
++ struct tty_struct *tty = port->state->port.tty;
++ unsigned char cval, fcr = 0;
++ unsigned long flags;
++ unsigned int baud, quot;
++ u32 mul = 0x3600;
++ u32 ps = 0x10;
++
++ switch (termios->c_cflag & CSIZE) {
++ case CS5:
++ cval = UART_LCR_WLEN5;
++ break;
++ case CS6:
++ cval = UART_LCR_WLEN6;
++ break;
++ case CS7:
++ cval = UART_LCR_WLEN7;
++ break;
++ default:
++ case CS8:
++ cval = UART_LCR_WLEN8;
++ break;
++ }
++
++ /* CMSPAR isn't supported by this driver */
++ if (tty)
++ tty->termios->c_cflag &= ~CMSPAR;
++
++ if (termios->c_cflag & CSTOPB)
++ cval |= UART_LCR_STOP;
++ if (termios->c_cflag & PARENB)
++ cval |= UART_LCR_PARITY;
++ if (!(termios->c_cflag & PARODD))
++ cval |= UART_LCR_EPAR;
++
++ /*
++ * For those basic low baud rate we can get the direct
++ * scalar from 2746800, like 115200 = 2746800/24, for those
++ * higher baud rate, we have to handle them case by case,
++ * but DIV reg is never touched as its default value 0x3d09
++ */
++ baud = uart_get_baud_rate(port, termios, old, 0, 4000000);
++ quot = uart_get_divisor(port, baud);
++
++ switch (baud) {
++ case 3500000:
++ mul = 0x3345;
++ ps = 0xC;
++ quot = 1;
++ break;
++ case 2500000:
++ mul = 0x2710;
++ ps = 0x10;
++ quot = 1;
++ break;
++ case 18432000:
++ mul = 0x2400;
++ ps = 0x10;
++ quot = 1;
++ break;
++ case 1500000:
++ mul = 0x1D4C;
++ ps = 0xc;
++ quot = 1;
++ break;
++ default:
++ ;
++ }
++
++ if ((up->port.uartclk / quot) < (2400 * 16))
++ fcr = UART_FCR_ENABLE_FIFO | UART_FCR_HSU_64_1B;
++ else if ((up->port.uartclk / quot) < (230400 * 16))
++ fcr = UART_FCR_ENABLE_FIFO | UART_FCR_HSU_64_16B;
++ else
++ fcr = UART_FCR_ENABLE_FIFO | UART_FCR_HSU_64_32B;
++
++ fcr |= UART_FCR_HSU_64B_FIFO;
++#ifdef MFD_HSU_A0_STEPPING
++ /* A0 doesn't support half empty IRQ */
++ fcr |= UART_FCR_FULL_EMPT_TXI;
++#endif
++
++ /*
++ * Ok, we're now changing the port state. Do it with
++ * interrupts disabled.
++ */
++ spin_lock_irqsave(&up->port.lock, flags);
++
++ /* Update the per-port timeout */
++ uart_update_timeout(port, termios->c_cflag, baud);
++
++ up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
++ if (termios->c_iflag & INPCK)
++ up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
++ if (termios->c_iflag & (BRKINT | PARMRK))
++ up->port.read_status_mask |= UART_LSR_BI;
++
++ /* Characters to ignore */
++ up->port.ignore_status_mask = 0;
++ if (termios->c_iflag & IGNPAR)
++ up->port.ignore_status_mask |= UART_LSR_PE | UART_LSR_FE;
++ if (termios->c_iflag & IGNBRK) {
++ up->port.ignore_status_mask |= UART_LSR_BI;
++ /*
++ * If we're ignoring parity and break indicators,
++ * ignore overruns too (for real raw support).
++ */
++ if (termios->c_iflag & IGNPAR)
++ up->port.ignore_status_mask |= UART_LSR_OE;
++ }
++
++ /* Ignore all characters if CREAD is not set */
++ if ((termios->c_cflag & CREAD) == 0)
++ up->port.ignore_status_mask |= UART_LSR_DR;
++
++ /*
++ * CTS flow control flag and modem status interrupts, disable
++ * MSI by default
++ */
++ up->ier &= ~UART_IER_MSI;
++ if (UART_ENABLE_MS(&up->port, termios->c_cflag))
++ up->ier |= UART_IER_MSI;
++
++ serial_out(up, UART_IER, up->ier);
++
++ if (termios->c_cflag & CRTSCTS)
++ up->mcr |= UART_MCR_AFE | UART_MCR_RTS;
++ else
++ up->mcr &= ~UART_MCR_AFE;
++
++ serial_out(up, UART_LCR, cval | UART_LCR_DLAB); /* set DLAB */
++ serial_out(up, UART_DLL, quot & 0xff); /* LS of divisor */
++ serial_out(up, UART_DLM, quot >> 8); /* MS of divisor */
++ serial_out(up, UART_LCR, cval); /* reset DLAB */
++ serial_out(up, UART_MUL, mul); /* set MUL */
++ serial_out(up, UART_PS, ps); /* set PS */
++ up->lcr = cval; /* Save LCR */
++ serial_hsu_set_mctrl(&up->port, up->port.mctrl);
++ serial_out(up, UART_FCR, fcr);
++ spin_unlock_irqrestore(&up->port.lock, flags);
++}
++
++static void
++serial_hsu_pm(struct uart_port *port, unsigned int state,
++ unsigned int oldstate)
++{
++}
++
++static void serial_hsu_release_port(struct uart_port *port)
++{
++}
++
++static int serial_hsu_request_port(struct uart_port *port)
++{
++ return 0;
++}
++
++static void serial_hsu_config_port(struct uart_port *port, int flags)
++{
++ struct uart_hsu_port *up =
++ container_of(port, struct uart_hsu_port, port);
++ up->port.type = PORT_MFD;
++}
++
++static int
++serial_hsu_verify_port(struct uart_port *port, struct serial_struct *ser)
++{
++ /* We don't want the core code to modify any port params */
++ return -EINVAL;
++}
++
++static const char *
++serial_hsu_type(struct uart_port *port)
++{
++ struct uart_hsu_port *up =
++ container_of(port, struct uart_hsu_port, port);
++ return up->name;
++}
++
++/* Mainly for uart console use */
++static struct uart_hsu_port *serial_hsu_ports[3];
++static struct uart_driver serial_hsu_reg;
++
++#ifdef CONFIG_SERIAL_MFD_HSU_CONSOLE
++
++#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
++
++/* Wait for transmitter & holding register to empty */
++static inline void wait_for_xmitr(struct uart_hsu_port *up)
++{
++ unsigned int status, tmout = 1000;
++
++ /* Wait up to 1ms for the character to be sent. */
++ do {
++ status = serial_in(up, UART_LSR);
++
++ if (status & UART_LSR_BI)
++ up->lsr_break_flag = UART_LSR_BI;
++
++ if (--tmout == 0)
++ break;
++ udelay(1);
++ } while (!(status & BOTH_EMPTY));
++
++ /* Wait up to 1s for flow control if necessary */
++ if (up->port.flags & UPF_CONS_FLOW) {
++ tmout = 1000000;
++ while (--tmout &&
++ ((serial_in(up, UART_MSR) & UART_MSR_CTS) == 0))
++ udelay(1);
++ }
++}
++
++static void serial_hsu_console_putchar(struct uart_port *port, int ch)
++{
++ struct uart_hsu_port *up =
++ container_of(port, struct uart_hsu_port, port);
++
++ wait_for_xmitr(up);
++ serial_out(up, UART_TX, ch);
++}
++
++/*
++ * Print a string to the serial port trying not to disturb
++ * any possible real use of the port...
++ *
++ * The console_lock must be held when we get here.
++ */
++static void
++serial_hsu_console_write(struct console *co, const char *s, unsigned int count)
++{
++ struct uart_hsu_port *up = serial_hsu_ports[co->index];
++ unsigned long flags;
++ unsigned int ier;
++ int locked = 1;
++
++ local_irq_save(flags);
++ if (up->port.sysrq)
++ locked = 0;
++ else if (oops_in_progress) {
++ locked = spin_trylock(&up->port.lock);
++ } else
++ spin_lock(&up->port.lock);
++
++ /* First save the IER then disable the interrupts */
++ ier = serial_in(up, UART_IER);
++ serial_out(up, UART_IER, 0);
++
++ uart_console_write(&up->port, s, count, serial_hsu_console_putchar);
++
++ /*
++ * Finally, wait for transmitter to become empty
++ * and restore the IER
++ */
++ wait_for_xmitr(up);
++ serial_out(up, UART_IER, ier);
++
++ if (locked)
++ spin_unlock(&up->port.lock);
++ local_irq_restore(flags);
++}
++
++static struct console serial_hsu_console;
++
++static int __init
++serial_hsu_console_setup(struct console *co, char *options)
++{
++ struct uart_hsu_port *up;
++ int baud = 115200;
++ int bits = 8;
++ int parity = 'n';
++ int flow = 'n';
++ int ret;
++
++ if (co->index == -1 || co->index >= serial_hsu_reg.nr)
++ co->index = 0;
++ up = serial_hsu_ports[co->index];
++ if (!up)
++ return -ENODEV;
++
++ if (options)
++ uart_parse_options(options, &baud, &parity, &bits, &flow);
++
++ ret = uart_set_options(&up->port, co, baud, parity, bits, flow);
++
++ return ret;
++}
++
++static struct console serial_hsu_console = {
++ .name = "ttyMFD",
++ .write = serial_hsu_console_write,
++ .device = uart_console_device,
++ .setup = serial_hsu_console_setup,
++ .flags = CON_PRINTBUFFER,
++ .index = 2,
++ .data = &serial_hsu_reg,
++};
++#endif
++
++struct uart_ops serial_hsu_pops = {
++ .tx_empty = serial_hsu_tx_empty,
++ .set_mctrl = serial_hsu_set_mctrl,
++ .get_mctrl = serial_hsu_get_mctrl,
++ .stop_tx = serial_hsu_stop_tx,
++ .start_tx = serial_hsu_start_tx,
++ .stop_rx = serial_hsu_stop_rx,
++ .enable_ms = serial_hsu_enable_ms,
++ .break_ctl = serial_hsu_break_ctl,
++ .startup = serial_hsu_startup,
++ .shutdown = serial_hsu_shutdown,
++ .set_termios = serial_hsu_set_termios,
++ .pm = serial_hsu_pm,
++ .type = serial_hsu_type,
++ .release_port = serial_hsu_release_port,
++ .request_port = serial_hsu_request_port,
++ .config_port = serial_hsu_config_port,
++ .verify_port = serial_hsu_verify_port,
++};
++
++static struct uart_driver serial_hsu_reg = {
++ .owner = THIS_MODULE,
++ .driver_name = "MFD serial",
++ .dev_name = "ttyMFD",
++ .major = TTY_MAJOR,
++ .minor = 128,
++ .nr = 3,
++};
++
++#ifdef CONFIG_PM
++static int serial_hsu_suspend(struct pci_dev *pdev, pm_message_t state)
++{
++ void *priv = pci_get_drvdata(pdev);
++ struct uart_hsu_port *up;
++
++ /* Make sure this is not the internal dma controller */
++ if (priv && (pdev->device != 0x081E)) {
++ up = priv;
++ uart_suspend_port(&serial_hsu_reg, &up->port);
++ }
++
++ pci_save_state(pdev);
++ pci_set_power_state(pdev, pci_choose_state(pdev, state));
++ return 0;
++}
++
++static int serial_hsu_resume(struct pci_dev *pdev)
++{
++ void *priv = pci_get_drvdata(pdev);
++ struct uart_hsu_port *up;
++ int ret;
++
++ pci_set_power_state(pdev, PCI_D0);
++ pci_restore_state(pdev);
++
++ ret = pci_enable_device(pdev);
++ if (ret)
++ dev_warn(&pdev->dev,
++ "HSU: can't re-enable device, try to continue\n");
++
++ if (priv && (pdev->device != 0x081E)) {
++ up = priv;
++ uart_resume_port(&serial_hsu_reg, &up->port);
++ }
++ return 0;
++}
++#else
++#define serial_hsu_suspend NULL
++#define serial_hsu_resume NULL
++#endif
++
++/* temp global pointer before we settle down on using one or four PCI dev */
++static struct hsu_port *phsu;
++
++static int serial_hsu_probe(struct pci_dev *pdev,
++ const struct pci_device_id *ent)
++{
++ struct uart_hsu_port *uport;
++ int index, ret;
++
++ printk(KERN_INFO "HSU: found PCI Serial controller(ID: %04x:%04x)\n",
++ pdev->vendor, pdev->device);
++
++ switch (pdev->device) {
++ case 0x081B:
++ index = 0;
++ break;
++ case 0x081C:
++ index = 1;
++ break;
++ case 0x081D:
++ index = 2;
++ break;
++ case 0x081E:
++ /* internal DMA controller */
++ index = 3;
++ break;
++ default:
++ dev_err(&pdev->dev, "HSU: out of index!");
++ return -ENODEV;
++ }
++
++ ret = pci_enable_device(pdev);
++ if (ret)
++ return ret;
++
++ if (index == 3) {
++ /* DMA controller */
++ ret = request_irq(pdev->irq, dma_irq, 0, "hsu_dma", phsu);
++ if (ret) {
++ dev_err(&pdev->dev, "can not get IRQ\n");
++ goto err_disable;
++ }
++ pci_set_drvdata(pdev, phsu);
++ } else {
++ /* UART port 0~2 */
++ uport = &phsu->port[index];
++ uport->port.irq = pdev->irq;
++ uport->port.dev = &pdev->dev;
++ uport->dev = &pdev->dev;
++
++ ret = request_irq(pdev->irq, port_irq, 0, uport->name, uport);
++ if (ret) {
++ dev_err(&pdev->dev, "can not get IRQ\n");
++ goto err_disable;
++ }
++ uart_add_one_port(&serial_hsu_reg, &uport->port);
++
++#ifdef CONFIG_SERIAL_MFD_HSU_CONSOLE
++ if (index == 2) {
++ register_console(&serial_hsu_console);
++ uport->port.cons = &serial_hsu_console;
++ }
++#endif
++ pci_set_drvdata(pdev, uport);
++ }
++
++ return 0;
++
++err_disable:
++ pci_disable_device(pdev);
++ return ret;
++}
++
++static void hsu_dma_rx_timeout(unsigned long data)
++{
++ struct hsu_dma_chan *chan = (void *)data;
++ struct uart_hsu_port *up = chan->uport;
++ struct hsu_dma_buffer *dbuf = &up->rxbuf;
++ int count = 0;
++ unsigned long flags;
++
++ spin_lock_irqsave(&up->port.lock, flags);
++
++ count = chan_readl(chan, HSU_CH_D0SAR) - dbuf->dma_addr;
++
++ if (!count) {
++ mod_timer(&chan->rx_timer, jiffies + HSU_DMA_TIMEOUT_CHECK_FREQ);
++ goto exit;
++ }
++
++ hsu_dma_rx(up, 0);
++exit:
++ spin_unlock_irqrestore(&up->port.lock, flags);
++}
++
++static void hsu_global_init(void)
++{
++ struct hsu_port *hsu;
++ struct uart_hsu_port *uport;
++ struct hsu_dma_chan *dchan;
++ int i, ret;
++
++ hsu = kzalloc(sizeof(struct hsu_port), GFP_KERNEL);
++ if (!hsu)
++ return;
++
++ /* Get basic io resource and map it */
++ hsu->paddr = 0xffa28000;
++ hsu->iolen = 0x1000;
++
++ if (!(request_mem_region(hsu->paddr, hsu->iolen, "HSU global")))
++ pr_warning("HSU: error in request mem region\n");
++
++ hsu->reg = ioremap_nocache((unsigned long)hsu->paddr, hsu->iolen);
++ if (!hsu->reg) {
++ pr_err("HSU: error in ioremap\n");
++ ret = -ENOMEM;
++ goto err_free_region;
++ }
++
++ /* Initialise the 3 UART ports */
++ uport = hsu->port;
++ for (i = 0; i < 3; i++) {
++ uport->port.type = PORT_MFD;
++ uport->port.iotype = UPIO_MEM;
++ uport->port.mapbase = (resource_size_t)hsu->paddr
++ + HSU_PORT_REG_OFFSET
++ + i * HSU_PORT_REG_LENGTH;
++ uport->port.membase = hsu->reg + HSU_PORT_REG_OFFSET
++ + i * HSU_PORT_REG_LENGTH;
++
++ sprintf(uport->name, "hsu_port%d", i);
++ uport->port.fifosize = 64;
++ uport->port.ops = &serial_hsu_pops;
++ uport->port.line = i;
++ uport->port.flags = UPF_IOREMAP;
++ /* set the scalable maxim support rate to 2746800 bps */
++ uport->port.uartclk = 115200 * 24 * 16;
++
++ uport->running = 0;
++ uport->txc = &hsu->chans[i * 2];
++ uport->rxc = &hsu->chans[i * 2 + 1];
++
++ serial_hsu_ports[i] = uport;
++ uport->index = i;
++
++ if (hsu_dma_enable & (1<<i))
++ uport->use_dma = 1;
++ else
++ uport->use_dma = 0;
++
++ uport++;
++ }
++
++ /* Initialise 6 dma channels */
++ dchan = hsu->chans;
++ for (i = 0; i < 6; i++) {
++ dchan->id = i;
++ dchan->dirt = (i & 0x1) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
++ dchan->uport = &hsu->port[i/2];
++ dchan->reg = hsu->reg + HSU_DMA_CHANS_REG_OFFSET +
++ i * HSU_DMA_CHANS_REG_LENGTH;
++
++ /* Work around for RX */
++ if (dchan->dirt == DMA_FROM_DEVICE) {
++ init_timer(&dchan->rx_timer);
++ dchan->rx_timer.function = hsu_dma_rx_timeout;
++ dchan->rx_timer.data = (unsigned long)dchan;
++ }
++ dchan++;
++ }
++
++ phsu = hsu;
++
++ hsu_debugfs_init(hsu);
++ return;
++
++err_free_region:
++ release_mem_region(hsu->paddr, hsu->iolen);
++ kfree(hsu);
++ return;
++}
++
++static void serial_hsu_remove(struct pci_dev *pdev)
++{
++ struct hsu_port *hsu;
++ int i;
++
++ hsu = pci_get_drvdata(pdev);
++ if (!hsu)
++ return;
++
++ for (i = 0; i < 3; i++)
++ uart_remove_one_port(&serial_hsu_reg, &hsu->port[i].port);
++
++ pci_set_drvdata(pdev, NULL);
++ free_irq(hsu->irq, hsu);
++ pci_disable_device(pdev);
++}
++
++/* First 3 are UART ports, and the 4th is the DMA */
++static const struct pci_device_id pci_ids[] __devinitdata = {
++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081B) },
++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081C) },
++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081D) },
++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081E) },
++ {},
++};
++
++static struct pci_driver hsu_pci_driver = {
++ .name = "HSU serial",
++ .id_table = pci_ids,
++ .probe = serial_hsu_probe,
++ .remove = __devexit_p(serial_hsu_remove),
++ .suspend = serial_hsu_suspend,
++ .resume = serial_hsu_resume,
++};
++
++static int __init hsu_pci_init(void)
++{
++ int ret;
++
++ hsu_global_init();
++
++ ret = uart_register_driver(&serial_hsu_reg);
++ if (ret)
++ return ret;
++
++ return pci_register_driver(&hsu_pci_driver);
++}
++
++static void __exit hsu_pci_exit(void)
++{
++ pci_unregister_driver(&hsu_pci_driver);
++ uart_unregister_driver(&serial_hsu_reg);
++
++ hsu_debugfs_remove(phsu);
++
++ kfree(phsu);
++}
++
++module_init(hsu_pci_init);
++module_exit(hsu_pci_exit);
++
++MODULE_LICENSE("GPL v2");
++MODULE_ALIAS("platform:medfield-hsu");
+--- /dev/null
++++ b/drivers/serial/mrst_max3110.c
+@@ -0,0 +1,917 @@
++/*
++ * mrst_max3110.c - spi uart protocol driver for Maxim 3110
++ *
++ * Copyright (c) 2010, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++/*
++ * Note:
++ * 1. From Max3110 spec, the Rx FIFO has 8 words, while the Tx FIFO only has
++ * 1 word. If SPI master controller doesn't support sclk frequency change,
++ * then the char need be sent out one by one with some delay
++ *
++ * 2. Currently only RX availabe interrrupt is used, no need for waiting TXE
++ * interrupt for a low speed UART device
++ */
++
++#include <linux/module.h>
++#include <linux/ioport.h>
++#include <linux/init.h>
++#include <linux/console.h>
++#include <linux/tty.h>
++#include <linux/tty_flip.h>
++#include <linux/serial_core.h>
++#include <linux/serial_reg.h>
++
++#include <linux/kthread.h>
++#include <linux/spi/spi.h>
++
++#include "mrst_max3110.h"
++
++#define PR_FMT "mrst_max3110: "
++
++#define UART_TX_NEEDED 1
++#define CON_TX_NEEDED 2
++#define BIT_IRQ_PENDING 3
++
++struct uart_max3110 {
++ struct uart_port port;
++ struct spi_device *spi;
++ char name[24];
++
++ wait_queue_head_t wq;
++ struct task_struct *main_thread;
++ struct task_struct *read_thread;
++ struct mutex thread_mutex;;
++
++ u32 baud;
++ u16 cur_conf;
++ u8 clock;
++ u8 parity, word_7bits;
++ u16 irq;
++
++ unsigned long uart_flags;
++
++ /* console related */
++ struct circ_buf con_xmit;
++};
++
++/* global data structure, may need be removed */
++static struct uart_max3110 *pmax;
++
++static void receive_chars(struct uart_max3110 *max,
++ unsigned char *str, int len);
++static int max3110_read_multi(struct uart_max3110 *max, u8 *buf);
++static void max3110_con_receive(struct uart_max3110 *max);
++
++static int max3110_write_then_read(struct uart_max3110 *max,
++ const void *txbuf, void *rxbuf, unsigned len, int always_fast)
++{
++ struct spi_device *spi = max->spi;
++ struct spi_message message;
++ struct spi_transfer x;
++ int ret;
++
++ spi_message_init(&message);
++ memset(&x, 0, sizeof x);
++ x.len = len;
++ x.tx_buf = txbuf;
++ x.rx_buf = rxbuf;
++ spi_message_add_tail(&x, &message);
++
++ if (always_fast)
++ x.speed_hz = spi->max_speed_hz;
++ else if (max->baud)
++ x.speed_hz = max->baud;
++
++ /* Do the i/o */
++ ret = spi_sync(spi, &message);
++ return ret;
++}
++
++/* Write a 16b word to the device */
++static int max3110_out(struct uart_max3110 *max, const u16 out)
++{
++ void *buf;
++ u16 *obuf, *ibuf;
++ u8 ch;
++ int ret;
++
++ buf = kzalloc(8, GFP_KERNEL | GFP_DMA);
++ if (!buf)
++ return -ENOMEM;
++
++ obuf = buf;
++ ibuf = buf + 4;
++ *obuf = out;
++ ret = max3110_write_then_read(max, obuf, ibuf, 2, 1);
++ if (ret) {
++ pr_warning(PR_FMT "%s(): get err msg %d when sending 0x%x\n",
++ __func__, ret, out);
++ goto exit;
++ }
++
++ /* If some valid data is read back */
++ if (*ibuf & MAX3110_READ_DATA_AVAILABLE) {
++ ch = *ibuf & 0xff;
++ receive_chars(max, &ch, 1);
++ }
++
++exit:
++ kfree(buf);
++ return ret;
++}
++
++/*
++ * This is usually used to read data from SPIC RX FIFO, which doesn't
++ * need any delay like flushing character out.
++ *
++ * Return how many valide bytes are read back
++ */
++static int max3110_read_multi(struct uart_max3110 *max, u8 *rxbuf)
++{
++ void *buf;
++ u16 *obuf, *ibuf;
++ u8 *pbuf, valid_str[M3110_RX_FIFO_DEPTH];
++ int i, j, blen;
++
++ blen = M3110_RX_FIFO_DEPTH * sizeof(u16);
++ buf = kzalloc(blen * 2, GFP_KERNEL | GFP_DMA);
++ if (!buf) {
++ pr_warning(PR_FMT "%s(): fail to alloc dma buffer\n", __func__);
++ return 0;
++ }
++
++ /* tx/rx always have the same length */
++ obuf = buf;
++ ibuf = buf + blen;
++
++ if (max3110_write_then_read(max, obuf, ibuf, blen, 1)) {
++ kfree(buf);
++ return 0;
++ }
++
++ /* If caller doesn't provide a buffer, then handle received char */
++ pbuf = rxbuf ? rxbuf : valid_str;
++
++ for (i = 0, j = 0; i < M3110_RX_FIFO_DEPTH; i++) {
++ if (ibuf[i] & MAX3110_READ_DATA_AVAILABLE)
++ pbuf[j++] = ibuf[i] & 0xff;
++ }
++
++ if (j && (pbuf == valid_str))
++ receive_chars(max, valid_str, j);
++
++ kfree(buf);
++ return j;
++}
++
++static void serial_m3110_con_putchar(struct uart_port *port, int ch)
++{
++ struct uart_max3110 *max =
++ container_of(port, struct uart_max3110, port);
++ struct circ_buf *xmit = &max->con_xmit;
++
++ if (uart_circ_chars_free(xmit)) {
++ xmit->buf[xmit->head] = (char)ch;
++ xmit->head = (xmit->head + 1) & (PAGE_SIZE - 1);
++ }
++}
++
++/*
++ * Print a string to the serial port trying not to disturb
++ * any possible real use of the port...
++ *
++ * The console_lock must be held when we get here.
++ */
++static void serial_m3110_con_write(struct console *co,
++ const char *s, unsigned int count)
++{
++ if (!pmax)
++ return;
++
++ uart_console_write(&pmax->port, s, count, serial_m3110_con_putchar);
++
++ if (!test_and_set_bit(CON_TX_NEEDED, &pmax->uart_flags))
++ wake_up_process(pmax->main_thread);
++}
++
++static int __init
++serial_m3110_con_setup(struct console *co, char *options)
++{
++ struct uart_max3110 *max = pmax;
++ int baud = 115200;
++ int bits = 8;
++ int parity = 'n';
++ int flow = 'n';
++
++ pr_info(PR_FMT "setting up console\n");
++
++ if (co->index == -1)
++ co->index = 0;
++
++ if (!max) {
++ pr_err(PR_FMT "pmax is NULL, return");
++ return -ENODEV;
++ }
++
++ if (options)
++ uart_parse_options(options, &baud, &parity, &bits, &flow);
++
++ return uart_set_options(&max->port, co, baud, parity, bits, flow);
++}
++
++static struct tty_driver *serial_m3110_con_device(struct console *co,
++ int *index)
++{
++ struct uart_driver *p = co->data;
++ *index = co->index;
++ return p->tty_driver;
++}
++
++static struct uart_driver serial_m3110_reg;
++static struct console serial_m3110_console = {
++ .name = "ttyS",
++ .write = serial_m3110_con_write,
++ .device = serial_m3110_con_device,
++ .setup = serial_m3110_con_setup,
++ .flags = CON_PRINTBUFFER,
++ .index = -1,
++ .data = &serial_m3110_reg,
++};
++
++static unsigned int serial_m3110_tx_empty(struct uart_port *port)
++{
++ return 1;
++}
++
++static void serial_m3110_stop_tx(struct uart_port *port)
++{
++ return;
++}
++
++/* stop_rx will be called in spin_lock env */
++static void serial_m3110_stop_rx(struct uart_port *port)
++{
++ return;
++}
++
++#define WORDS_PER_XFER 128
++static void send_circ_buf(struct uart_max3110 *max,
++ struct circ_buf *xmit)
++{
++ void *buf;
++ u16 *obuf, *ibuf;
++ u8 valid_str[WORDS_PER_XFER];
++ int i, j, len, blen, dma_size, left, ret = 0;
++
++
++ dma_size = WORDS_PER_XFER * sizeof(u16) * 2;
++ buf = kzalloc(dma_size, GFP_KERNEL | GFP_DMA);
++ if (!buf)
++ return;
++ obuf = buf;
++ ibuf = buf + dma_size/2;
++
++ while (!uart_circ_empty(xmit)) {
++ left = uart_circ_chars_pending(xmit);
++ while (left) {
++ len = min(left, WORDS_PER_XFER);
++ blen = len * sizeof(u16);
++ memset(ibuf, 0, blen);
++
++ for (i = 0; i < len; i++) {
++ obuf[i] = (u8)xmit->buf[xmit->tail] | WD_TAG;
++ xmit->tail = (xmit->tail + 1) &
++ (UART_XMIT_SIZE - 1);
++ }
++
++ /* Fail to send msg to console is not very critical */
++ ret = max3110_write_then_read(max, obuf, ibuf, blen, 0);
++ if (ret)
++ pr_warning(PR_FMT "%s(): get err msg %d\n",
++ __func__, ret);
++
++ for (i = 0, j = 0; i < len; i++) {
++ if (ibuf[i] & MAX3110_READ_DATA_AVAILABLE)
++ valid_str[j++] = ibuf[i] & 0xff;
++ }
++
++ if (j)
++ receive_chars(max, valid_str, j);
++
++ max->port.icount.tx += len;
++ left -= len;
++ }
++ }
++
++ kfree(buf);
++}
++
++static void transmit_char(struct uart_max3110 *max)
++{
++ struct uart_port *port = &max->port;
++ struct circ_buf *xmit = &port->state->xmit;
++
++ if (uart_circ_empty(xmit) || uart_tx_stopped(port))
++ return;
++
++ send_circ_buf(max, xmit);
++
++ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
++ uart_write_wakeup(port);
++
++ if (uart_circ_empty(xmit))
++ serial_m3110_stop_tx(port);
++}
++
++/*
++ * This will be called by uart_write() and tty_write, can't
++ * go to sleep
++ */
++static void serial_m3110_start_tx(struct uart_port *port)
++{
++ struct uart_max3110 *max =
++ container_of(port, struct uart_max3110, port);
++
++ if (!test_and_set_bit(UART_TX_NEEDED, &max->uart_flags))
++ wake_up_process(max->main_thread);
++}
++
++static void receive_chars(struct uart_max3110 *max, unsigned char *str, int len)
++{
++ struct uart_port *port = &max->port;
++ struct tty_struct *tty;
++ int usable;
++
++ /* If uart is not opened, just return */
++ if (!port->state)
++ return;
++
++ tty = port->state->port.tty;
++ if (!tty)
++ return;
++
++ while (len) {
++ usable = tty_buffer_request_room(tty, len);
++ if (usable) {
++ tty_insert_flip_string(tty, str, usable);
++ str += usable;
++ port->icount.rx += usable;
++ }
++ len -= usable;
++ }
++ tty_flip_buffer_push(tty);
++}
++
++/*
++ * This routine will be used in read_thread or RX IRQ handling,
++ * it will first do one round buffer read(8 words), if there is some
++ * valid RX data, will try to read 5 more rounds till all data
++ * is read out.
++ *
++ * Use stack space as data buffer to save some system load, and chose
++ * 504 Btyes as a threadhold to do a bulk push to upper tty layer when
++ * receiving bulk data, a much bigger buffer may cause stack overflow
++ */
++static void max3110_con_receive(struct uart_max3110 *max)
++{
++ int loop = 1, num, total = 0;
++ u8 recv_buf[512], *pbuf;
++
++ pbuf = recv_buf;
++ do {
++ num = max3110_read_multi(max, pbuf);
++
++ if (num) {
++ loop = 5;
++ pbuf += num;
++ total += num;
++
++ if (total >= 504) {
++ receive_chars(max, recv_buf, total);
++ pbuf = recv_buf;
++ total = 0;
++ }
++ }
++ } while (--loop);
++
++ if (total)
++ receive_chars(max, recv_buf, total);
++}
++
++static int max3110_main_thread(void *_max)
++{
++ struct uart_max3110 *max = _max;
++ wait_queue_head_t *wq = &max->wq;
++ int ret = 0;
++ struct circ_buf *xmit = &max->con_xmit;
++
++ init_waitqueue_head(wq);
++ pr_info(PR_FMT "start main thread\n");
++
++ do {
++ wait_event_interruptible(*wq, max->uart_flags || kthread_should_stop());
++
++ mutex_lock(&max->thread_mutex);
++
++ if (test_and_clear_bit(BIT_IRQ_PENDING, &max->uart_flags))
++ max3110_con_receive(max);
++
++ /* first handle console output */
++ if (test_and_clear_bit(CON_TX_NEEDED, &max->uart_flags))
++ send_circ_buf(max, xmit);
++
++ /* handle uart output */
++ if (test_and_clear_bit(UART_TX_NEEDED, &max->uart_flags))
++ transmit_char(max);
++
++ mutex_unlock(&max->thread_mutex);
++
++ } while (!kthread_should_stop());
++
++ return ret;
++}
++
++#ifdef CONFIG_MRST_MAX3110_IRQ
++static irqreturn_t serial_m3110_irq(int irq, void *dev_id)
++{
++ struct uart_max3110 *max = dev_id;
++
++ /* max3110's irq is a falling edge, not level triggered,
++ * so no need to disable the irq */
++ if (!test_and_set_bit(BIT_IRQ_PENDING, &max->uart_flags))
++ wake_up_process(max->main_thread);
++
++ return IRQ_HANDLED;
++}
++#else
++/* if don't use RX IRQ, then need a thread to polling read */
++static int max3110_read_thread(void *_max)
++{
++ struct uart_max3110 *max = _max;
++
++ pr_info(PR_FMT "start read thread\n");
++ do {
++ /*
++ * If can't acquire the mutex, it means the main thread
++ * is running which will also perform the rx job
++ */
++ if (mutex_trylock(&max->thread_mutex)) {
++ max3110_con_receive(max);
++ mutex_unlock(&max->thread_mutex);
++ }
++
++ set_current_state(TASK_INTERRUPTIBLE);
++ schedule_timeout(HZ / 20);
++ } while (!kthread_should_stop());
++
++ return 0;
++}
++#endif
++
++static int serial_m3110_startup(struct uart_port *port)
++{
++ struct uart_max3110 *max =
++ container_of(port, struct uart_max3110, port);
++ u16 config = 0;
++ int ret = 0;
++
++ if (port->line != 0) {
++ pr_err(PR_FMT "uart port startup failed\n");
++ return -1;
++ }
++
++ /* Disable all IRQ and config it to 115200, 8n1 */
++ config = WC_TAG | WC_FIFO_ENABLE
++ | WC_1_STOPBITS
++ | WC_8BIT_WORD
++ | WC_BAUD_DR2;
++
++ /* as we use thread to handle tx/rx, need set low latency */
++ port->state->port.tty->low_latency = 1;
++
++#ifdef CONFIG_MRST_MAX3110_IRQ
++ ret = request_irq(max->irq, serial_m3110_irq,
++ IRQ_TYPE_EDGE_FALLING, "max3110", max);
++ if (ret)
++ return ret;
++
++ /* Enable RX IRQ only */
++ config |= WC_RXA_IRQ_ENABLE;
++#else
++ /* If IRQ is disabled, start a read thread for input data */
++ max->read_thread =
++ kthread_run(max3110_read_thread, max, "max3110_read");
++ if (IS_ERR(max->read_thread)) {
++ ret = PTR_ERR(max->read_thread);
++ max->read_thread = NULL;
++ pr_err(PR_FMT "Can't create read thread!");
++ return ret;
++ }
++#endif
++
++ ret = max3110_out(max, config);
++ if (ret) {
++#ifdef CONFIG_MRST_MAX3110_IRQ
++ free_irq(max->irq, max);
++#else
++ kthread_stop(max->read_thread);
++ max->read_thread = NULL;
++#endif
++ return ret;
++ }
++
++ max->cur_conf = config;
++ return 0;
++}
++
++static void serial_m3110_shutdown(struct uart_port *port)
++{
++ struct uart_max3110 *max =
++ container_of(port, struct uart_max3110, port);
++ u16 config;
++
++ if (max->read_thread) {
++ kthread_stop(max->read_thread);
++ max->read_thread = NULL;
++ }
++
++#ifdef CONFIG_MRST_MAX3110_IRQ
++ free_irq(max->irq, max);
++#endif
++
++ /* Disable interrupts from this port */
++ config = WC_TAG | WC_SW_SHDI;
++ max3110_out(max, config);
++}
++
++static void serial_m3110_release_port(struct uart_port *port)
++{
++}
++
++static int serial_m3110_request_port(struct uart_port *port)
++{
++ return 0;
++}
++
++static void serial_m3110_config_port(struct uart_port *port, int flags)
++{
++ port->type = PORT_MAX3100;
++}
++
++static int
++serial_m3110_verify_port(struct uart_port *port, struct serial_struct *ser)
++{
++ /* we don't want the core code to modify any port params */
++ return -EINVAL;
++}
++
++
++static const char *serial_m3110_type(struct uart_port *port)
++{
++ struct uart_max3110 *max =
++ container_of(port, struct uart_max3110, port);
++ return max->name;
++}
++
++static void
++serial_m3110_set_termios(struct uart_port *port, struct ktermios *termios,
++ struct ktermios *old)
++{
++ struct uart_max3110 *max =
++ container_of(port, struct uart_max3110, port);
++ unsigned char cval;
++ unsigned int baud, parity = 0;
++ int clk_div = -1;
++ u16 new_conf = max->cur_conf;
++
++ switch (termios->c_cflag & CSIZE) {
++ case CS7:
++ cval = UART_LCR_WLEN7;
++ new_conf |= WC_7BIT_WORD;
++ break;
++ default:
++ /* We only support CS7 & CS8 */
++ termios->c_cflag &= ~CSIZE;
++ termios->c_cflag |= CS8;
++ case CS8:
++ cval = UART_LCR_WLEN8;
++ new_conf |= WC_8BIT_WORD;
++ break;
++ }
++
++ baud = uart_get_baud_rate(port, termios, old, 0, 230400);
++
++ /* First calc the div for 1.8MHZ clock case */
++ switch (baud) {
++ case 300:
++ clk_div = WC_BAUD_DR384;
++ break;
++ case 600:
++ clk_div = WC_BAUD_DR192;
++ break;
++ case 1200:
++ clk_div = WC_BAUD_DR96;
++ break;
++ case 2400:
++ clk_div = WC_BAUD_DR48;
++ break;
++ case 4800:
++ clk_div = WC_BAUD_DR24;
++ break;
++ case 9600:
++ clk_div = WC_BAUD_DR12;
++ break;
++ case 19200:
++ clk_div = WC_BAUD_DR6;
++ break;
++ case 38400:
++ clk_div = WC_BAUD_DR3;
++ break;
++ case 57600:
++ clk_div = WC_BAUD_DR2;
++ break;
++ case 115200:
++ clk_div = WC_BAUD_DR1;
++ break;
++ case 230400:
++ if (max->clock & MAX3110_HIGH_CLK)
++ break;
++ default:
++ /* Pick the previous baud rate */
++ baud = max->baud;
++ clk_div = max->cur_conf & WC_BAUD_DIV_MASK;
++ tty_termios_encode_baud_rate(termios, baud, baud);
++ }
++
++ if (max->clock & MAX3110_HIGH_CLK) {
++ clk_div += 1;
++ /* High clk version max3110 doesn't support B300 */
++ if (baud == 300) {
++ baud = 600;
++ clk_div = WC_BAUD_DR384;
++ }
++ if (baud == 230400)
++ clk_div = WC_BAUD_DR1;
++ tty_termios_encode_baud_rate(termios, baud, baud);
++ }
++
++ new_conf = (new_conf & ~WC_BAUD_DIV_MASK) | clk_div;
++
++ if (unlikely(termios->c_cflag & CMSPAR))
++ termios->c_cflag &= ~CMSPAR;
++
++ if (termios->c_cflag & CSTOPB)
++ new_conf |= WC_2_STOPBITS;
++ else
++ new_conf &= ~WC_2_STOPBITS;
++
++ if (termios->c_cflag & PARENB) {
++ new_conf |= WC_PARITY_ENABLE;
++ parity |= UART_LCR_PARITY;
++ } else
++ new_conf &= ~WC_PARITY_ENABLE;
++
++ if (!(termios->c_cflag & PARODD))
++ parity |= UART_LCR_EPAR;
++ max->parity = parity;
++
++ uart_update_timeout(port, termios->c_cflag, baud);
++
++ new_conf |= WC_TAG;
++ if (new_conf != max->cur_conf) {
++ if (!max3110_out(max, new_conf)) {
++ max->cur_conf = new_conf;
++ max->baud = baud;
++ }
++ }
++}
++
++/* Don't handle hw handshaking */
++static unsigned int serial_m3110_get_mctrl(struct uart_port *port)
++{
++ return TIOCM_DSR | TIOCM_CAR | TIOCM_DSR;
++}
++
++static void serial_m3110_set_mctrl(struct uart_port *port, unsigned int mctrl)
++{
++}
++
++static void serial_m3110_break_ctl(struct uart_port *port, int break_state)
++{
++}
++
++static void serial_m3110_pm(struct uart_port *port, unsigned int state,
++ unsigned int oldstate)
++{
++}
++
++static void serial_m3110_enable_ms(struct uart_port *port)
++{
++}
++
++struct uart_ops serial_m3110_ops = {
++ .tx_empty = serial_m3110_tx_empty,
++ .set_mctrl = serial_m3110_set_mctrl,
++ .get_mctrl = serial_m3110_get_mctrl,
++ .stop_tx = serial_m3110_stop_tx,
++ .start_tx = serial_m3110_start_tx,
++ .stop_rx = serial_m3110_stop_rx,
++ .enable_ms = serial_m3110_enable_ms,
++ .break_ctl = serial_m3110_break_ctl,
++ .startup = serial_m3110_startup,
++ .shutdown = serial_m3110_shutdown,
++ .set_termios = serial_m3110_set_termios,
++ .pm = serial_m3110_pm,
++ .type = serial_m3110_type,
++ .release_port = serial_m3110_release_port,
++ .request_port = serial_m3110_request_port,
++ .config_port = serial_m3110_config_port,
++ .verify_port = serial_m3110_verify_port,
++};
++
++static struct uart_driver serial_m3110_reg = {
++ .owner = THIS_MODULE,
++ .driver_name = "MRST serial",
++ .dev_name = "ttyS",
++ .major = TTY_MAJOR,
++ .minor = 64,
++ .nr = 1,
++ .cons = &serial_m3110_console,
++};
++
++#ifdef CONFIG_PM
++static int serial_m3110_suspend(struct spi_device *spi, pm_message_t state)
++{
++ struct uart_max3110 *max = spi_get_drvdata(spi);
++
++ disable_irq(max->irq);
++ uart_suspend_port(&serial_m3110_reg, &max->port);
++ max3110_out(max, max->cur_conf | WC_SW_SHDI);
++ return 0;
++}
++
++static int serial_m3110_resume(struct spi_device *spi)
++{
++ struct uart_max3110 *max = spi_get_drvdata(spi);
++
++ max3110_out(max, max->cur_conf);
++ uart_resume_port(&serial_m3110_reg, &max->port);
++ enable_irq(max->irq);
++ return 0;
++}
++#else
++#define serial_m3110_suspend NULL
++#define serial_m3110_resume NULL
++#endif
++
++static int __devinit serial_m3110_probe(struct spi_device *spi)
++{
++ struct uart_max3110 *max;
++ void *buffer;
++ u16 res;
++ int ret = 0;
++
++ max = kzalloc(sizeof(*max), GFP_KERNEL);
++ if (!max)
++ return -ENOMEM;
++
++ /* Set spi info */
++ spi->bits_per_word = 16;
++ max->clock = MAX3110_HIGH_CLK;
++
++ spi_setup(spi);
++
++ max->port.type = PORT_MAX3100; /* need apply for a max3110 type */
++ max->port.fifosize = 2; /* only have 16b buffer */
++ max->port.ops = &serial_m3110_ops;
++ max->port.line = 0;
++ max->port.dev = &spi->dev;
++ max->port.uartclk = 115200;
++
++ max->spi = spi;
++ strcpy(max->name, spi->modalias);
++ max->irq = (u16)spi->irq;
++
++ mutex_init(&max->thread_mutex);
++
++ max->word_7bits = 0;
++ max->parity = 0;
++ max->baud = 0;
++
++ max->cur_conf = 0;
++ max->uart_flags = 0;
++
++ /* Check if reading configuration register returns something sane */
++
++ res = RC_TAG;
++ ret = max3110_write_then_read(max, (u8 *)&res, (u8 *)&res, 2, 0);
++ if (ret < 0 || res == 0 || res == 0xffff) {
++ printk(KERN_ERR "MAX3111 deemed not present (conf reg %04x)",
++ res);
++ ret = -ENODEV;
++ goto err_get_page;
++ }
++
++ buffer = (void *)__get_free_page(GFP_KERNEL);
++ if (!buffer) {
++ ret = -ENOMEM;
++ goto err_get_page;
++ }
++ max->con_xmit.buf = buffer;
++ max->con_xmit.head = 0;
++ max->con_xmit.tail = 0;
++
++ max->main_thread = kthread_run(max3110_main_thread,
++ max, "max3110_main");
++ if (IS_ERR(max->main_thread)) {
++ ret = PTR_ERR(max->main_thread);
++ goto err_kthread;
++ }
++
++ spi_set_drvdata(spi, max);
++ pmax = max;
++
++ /* give membase a psudo value to pass serial_core's check */
++ max->port.membase = (void *)0xff110000;
++ uart_add_one_port(&serial_m3110_reg, &max->port);
++
++ return 0;
++
++err_kthread:
++ free_page((unsigned long)buffer);
++err_get_page:
++ kfree(max);
++ return ret;
++}
++
++static int __devexit serial_m3110_remove(struct spi_device *dev)
++{
++ struct uart_max3110 *max = spi_get_drvdata(dev);
++
++ if (!max)
++ return 0;
++
++ uart_remove_one_port(&serial_m3110_reg, &max->port);
++
++ free_page((unsigned long)max->con_xmit.buf);
++
++ if (max->main_thread)
++ kthread_stop(max->main_thread);
++
++ kfree(max);
++ return 0;
++}
++
++static struct spi_driver uart_max3110_driver = {
++ .driver = {
++ .name = "spi_max3111",
++ .bus = &spi_bus_type,
++ .owner = THIS_MODULE,
++ },
++ .probe = serial_m3110_probe,
++ .remove = __devexit_p(serial_m3110_remove),
++ .suspend = serial_m3110_suspend,
++ .resume = serial_m3110_resume,
++};
++
++static int __init serial_m3110_init(void)
++{
++ int ret = 0;
++
++ ret = uart_register_driver(&serial_m3110_reg);
++ if (ret)
++ return ret;
++
++ ret = spi_register_driver(&uart_max3110_driver);
++ if (ret)
++ uart_unregister_driver(&serial_m3110_reg);
++
++ return ret;
++}
++
++static void __exit serial_m3110_exit(void)
++{
++ spi_unregister_driver(&uart_max3110_driver);
++ uart_unregister_driver(&serial_m3110_reg);
++}
++
++module_init(serial_m3110_init);
++module_exit(serial_m3110_exit);
++
++MODULE_LICENSE("GPL v2");
++MODULE_ALIAS("max3110-uart");
+--- /dev/null
++++ b/drivers/serial/mrst_max3110.h
+@@ -0,0 +1,60 @@
++#ifndef _MRST_MAX3110_H
++#define _MRST_MAX3110_H
++
++#define MAX3110_HIGH_CLK 0x1 /* 3.6864 MHZ */
++#define MAX3110_LOW_CLK 0x0 /* 1.8432 MHZ */
++
++/* status bits for all 4 MAX3110 operate modes */
++#define MAX3110_READ_DATA_AVAILABLE (1 << 15)
++#define MAX3110_WRITE_BUF_EMPTY (1 << 14)
++
++#define WC_TAG (3 << 14)
++#define RC_TAG (1 << 14)
++#define WD_TAG (2 << 14)
++#define RD_TAG (0 << 14)
++
++/* bits def for write configuration */
++#define WC_FIFO_ENABLE_MASK (1 << 13)
++#define WC_FIFO_ENABLE (0 << 13)
++
++#define WC_SW_SHDI (1 << 12)
++
++#define WC_IRQ_MASK (0xF << 8)
++#define WC_TXE_IRQ_ENABLE (1 << 11) /* TX empty irq */
++#define WC_RXA_IRQ_ENABLE (1 << 10) /* RX availabe irq */
++#define WC_PAR_HIGH_IRQ_ENABLE (1 << 9)
++#define WC_REC_ACT_IRQ_ENABLE (1 << 8)
++
++#define WC_IRDA_ENABLE (1 << 7)
++
++#define WC_STOPBITS_MASK (1 << 6)
++#define WC_2_STOPBITS (1 << 6)
++#define WC_1_STOPBITS (0 << 6)
++
++#define WC_PARITY_ENABLE_MASK (1 << 5)
++#define WC_PARITY_ENABLE (1 << 5)
++
++#define WC_WORDLEN_MASK (1 << 4)
++#define WC_7BIT_WORD (1 << 4)
++#define WC_8BIT_WORD (0 << 4)
++
++#define WC_BAUD_DIV_MASK (0xF)
++#define WC_BAUD_DR1 (0x0)
++#define WC_BAUD_DR2 (0x1)
++#define WC_BAUD_DR4 (0x2)
++#define WC_BAUD_DR8 (0x3)
++#define WC_BAUD_DR16 (0x4)
++#define WC_BAUD_DR32 (0x5)
++#define WC_BAUD_DR64 (0x6)
++#define WC_BAUD_DR128 (0x7)
++#define WC_BAUD_DR3 (0x8)
++#define WC_BAUD_DR6 (0x9)
++#define WC_BAUD_DR12 (0xA)
++#define WC_BAUD_DR24 (0xB)
++#define WC_BAUD_DR48 (0xC)
++#define WC_BAUD_DR96 (0xD)
++#define WC_BAUD_DR192 (0xE)
++#define WC_BAUD_DR384 (0xF)
++
++#define M3110_RX_FIFO_DEPTH 8
++#endif
+--- a/drivers/sfi/Kconfig
++++ b/drivers/sfi/Kconfig
+@@ -4,6 +4,7 @@
+
+ menuconfig SFI
+ bool "SFI (Simple Firmware Interface) Support"
++ select I2C
+ ---help---
+ The Simple Firmware Interface (SFI) provides a lightweight method
+ for platform firmware to pass information to the operating system
+@@ -15,3 +16,13 @@
+ For more information, see http://simplefirmware.org
+
+ Say 'Y' here to enable the kernel to boot on SFI-only platforms.
++config SFI_PROCESSOR_PM
++ bool "SFI Processor Power Management"
++ depends on SFI && X86_LOCAL_APIC
++ default y
++
++config SFI_CPUIDLE
++ bool "SFI Processor C-State driver"
++ depends on SFI_PROCESSOR_PM && CPU_IDLE
++ default y
++
+--- a/drivers/sfi/Makefile
++++ b/drivers/sfi/Makefile
+@@ -1,3 +1,9 @@
+ obj-y += sfi_acpi.o
+ obj-y += sfi_core.o
+
++sfi-processor-objs += sfi_processor_core.o
++sfi-processor-objs += sfi_processor_idle.o
++sfi-processor-objs += sfi_processor_perflib.o
++
++obj-$(CONFIG_SFI_PROCESSOR_PM) += sfi-processor.o
++
+--- /dev/null
++++ b/drivers/sfi/sfi_processor_core.c
+@@ -0,0 +1,205 @@
++/*
++ * sfi_processor_core.c - sfi based c-state driver
++ * Copyright (c) 2010, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/init.h>
++#include <linux/types.h>
++#include <linux/sfi.h>
++#include <linux/cpu.h>
++#include <linux/sfi_processor.h>
++
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("Processor enumeration based on SFI table.");
++
++DEFINE_PER_CPU(struct sfi_processor *, sfi_processors);
++
++#define SFI_CPU_MAX 8
++
++static u32 sfi_cpu_num;
++static struct sfi_cpu_table_entry sfi_cpu_array[SFI_CPU_MAX];
++
++struct cpuidle_driver sfi_idle_driver = {
++ .name = "sfi_idle",
++ .owner = THIS_MODULE,
++};
++
++static int __init parse_cpus(struct sfi_table_header *table)
++{
++ struct sfi_table_simple *sb;
++ struct sfi_cpu_table_entry *pentry;
++ int i;
++
++ sb = (struct sfi_table_simple *)table;
++
++ sfi_cpu_num = SFI_GET_NUM_ENTRIES(sb, u64);
++
++ pentry = (struct sfi_cpu_table_entry *) sb->pentry;
++ for (i = 0; i < sfi_cpu_num; i++) {
++ sfi_cpu_array[i].apic_id = pentry->apic_id;
++ printk(KERN_INFO "APIC ID: %d\n", pentry->apic_id);
++ pentry++;
++ }
++
++ return 0;
++
++}
++
++static int alloc_cstates(int cpu, u32 count)
++{
++ struct sfi_processor *pr;
++ u32 totallen = count * sizeof(struct sfi_cstate_table_entry);
++
++ pr = per_cpu(sfi_processors, cpu);
++ pr->power.sfi_cstates = kzalloc(totallen, GFP_KERNEL);
++ if (!pr->power.sfi_cstates)
++ return -ENOMEM;
++
++ totallen = count * sizeof(struct cpuidle_state);
++ pr->power.states = kzalloc(totallen, GFP_KERNEL);
++ if (!pr->power.states) {
++ kfree(pr->power.sfi_cstates);
++ return -ENOMEM;
++ }
++
++ return 0;
++}
++
++static void dealloc_cstates(int num)
++{
++ struct sfi_processor *pr;
++ int i;
++
++ for (i = 0; i < num; i++) {
++ pr = per_cpu(sfi_processors, i);
++ kfree(pr->power.sfi_cstates);
++ kfree(pr->power.states);
++ }
++}
++
++static int __init parse_idle(struct sfi_table_header *table)
++{
++ struct sfi_table_simple *sb;
++ struct sfi_cstate_table_entry *pentry;
++ struct sfi_processor *pr;
++ int i;
++ int result;
++ u32 total_cstates, actual_cstates;
++
++ sb = (struct sfi_table_simple *)table;
++ actual_cstates = SFI_GET_NUM_ENTRIES(sb, u64);
++ pentry = (struct sfi_cstate_table_entry *)sb->pentry;
++#ifdef CONFIG_MSTWN_POWER_MGMT
++ total_cstates = actual_cstates + NEW_CSTATES_COUNT;
++#else
++ total_cstates = actual_cstates;
++#endif
++
++ for (i = 0; i < sfi_cpu_num; i++) {
++ pr = per_cpu(sfi_processors, i);
++ result = alloc_cstates(i, total_cstates);
++ if (result < 0)
++ dealloc_cstates(i);
++ memcpy(pr->power.sfi_cstates, pentry,
++ actual_cstates * sizeof(*pentry));
++ pr->power.count = actual_cstates;
++ }
++
++ return 0;
++}
++
++static int __init init_sfi_processor_list(void)
++{
++ struct sfi_processor *pr;
++ int i;
++ int result;
++
++ /* parse the cpus from the sfi table */
++ result = sfi_table_parse(SFI_SIG_CPUS, NULL, NULL, parse_cpus);
++
++ if (result < 0)
++ return result;
++
++ pr = kzalloc(sfi_cpu_num * sizeof(struct sfi_processor), GFP_KERNEL);
++ if (!pr)
++ return -ENOMEM;
++
++ for (i = 0; i < sfi_cpu_num; i++) {
++ pr->id = sfi_cpu_array[i].apic_id;
++ per_cpu(sfi_processors, pr->id) = pr;
++ pr++;
++ }
++
++ return 0;
++}
++
++static int __init sfi_processor_init(void)
++{
++ struct sfi_processor *pr;
++ int i;
++ int result;
++
++ result = init_sfi_processor_list();
++ if (result)
++ return result;
++
++ /* parse the cpu idle states */
++ result = sfi_table_parse(SFI_SIG_IDLE, NULL, NULL, parse_idle);
++
++ if (result < 0)
++ return result;
++
++ result = cpuidle_register_driver(&sfi_idle_driver);
++ if (!result) {
++ for (i = 0; i < sfi_cpu_num; i++) {
++ pr = per_cpu(sfi_processors, i);
++ if (pr) {
++ result = sfi_processor_power_init(pr);
++ if (result)
++ break;
++ }
++ }
++ if (result)
++ cpuidle_unregister_driver(&sfi_idle_driver);
++ } else
++ printk(KERN_ERR "Failed to register cpuidle driver: %s\n",
++ sfi_idle_driver.name);
++
++ return result;
++}
++
++static void __exit sfi_processor_exit(void)
++{
++ struct sfi_processor *pr;
++ int i;
++
++ for (i = 0; i < sfi_cpu_num; i++) {
++ pr = per_cpu(sfi_processors, i);
++ sfi_processor_power_exit(pr);
++ }
++
++ pr = per_cpu(sfi_processors, 0);
++ kfree(pr);
++
++ cpuidle_unregister_driver(&sfi_idle_driver);
++
++}
++
++module_init(sfi_processor_init);
++module_exit(sfi_processor_exit);
+--- /dev/null
++++ b/drivers/sfi/sfi_processor_idle.c
+@@ -0,0 +1,262 @@
++/*
++ * sfi_processor_idle.c - sfi based c-state driver
++ * Copyright (c) 2010, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++#include <asm/processor.h>
++#include <linux/sfi_processor.h>
++#include <linux/sched.h>
++#include <linux/clockchips.h>
++#include <linux/sfi.h>
++
++#define MWAIT_SUBSTATE_MASK (0xf)
++#define MWAIT_SUBSTATE_SIZE (4)
++
++#define CPUID_MWAIT_LEAF (5)
++#define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1)
++#define CPUID5_ECX_INTERRUPT_BREAK (0x2)
++
++#define MWAIT_ECX_INTERRUPT_BREAK (0x1)
++
++static unsigned int latency_factor __read_mostly = 4;
++module_param(latency_factor, uint, 0644);
++
++static int sfi_idle_enter_deep(struct cpuidle_device *dev,
++ struct cpuidle_state *state);
++
++static int sfi_idle_enter_simple(struct cpuidle_device *dev,
++ struct cpuidle_state *state)
++{
++ ktime_t t1, t2;
++ s64 diff = 0;
++ struct sfi_cstate_table_entry *data;
++
++ data = (struct sfi_cstate_table_entry *)cpuidle_get_statedata(state);
++ if (unlikely(!data))
++ return 0;
++
++
++ local_irq_disable();
++ current_thread_info()->status &= ~TS_POLLING;
++ /*
++ * TS_POLLING-cleared state must be visible before we test
++ * NEED_RESCHED:
++ */
++ smp_mb();
++
++ if (unlikely(need_resched())) {
++ current_thread_info()->status |= TS_POLLING;
++ local_irq_enable();
++ return 0;
++ }
++
++ t1 = ktime_get();
++ mwait_idle_with_hints(data->hint, MWAIT_ECX_INTERRUPT_BREAK);
++ t2 = ktime_get();
++
++ local_irq_enable();
++ current_thread_info()->status |= TS_POLLING;
++
++ diff = ktime_to_us(ktime_sub(t2, t1));
++ if (diff > INT_MAX)
++ diff = INT_MAX;
++
++ return (int)diff;
++}
++
++static int sfi_idle_enter_deep(struct cpuidle_device *dev,
++ struct cpuidle_state *state)
++{
++
++ ktime_t t1, t2;
++ s64 diff_us = 0;
++ s64 diff_ns = 0;
++ struct sfi_cstate_table_entry *data;
++ struct sfi_processor *pr;
++
++ pr = __get_cpu_var(sfi_processors);
++ if (unlikely(!pr))
++ return 0;
++
++ data = (struct sfi_cstate_table_entry *)cpuidle_get_statedata(state);
++ if (unlikely(!data))
++ return 0;
++
++ local_irq_disable();
++ current_thread_info()->status &= ~TS_POLLING;
++ /*
++ * TS_POLLING-cleared state must be visible before we test
++ * NEED_RESCHED:
++ */
++ smp_mb();
++
++ if (unlikely(need_resched())) {
++ current_thread_info()->status |= TS_POLLING;
++ local_irq_enable();
++ return 0;
++ }
++
++ t1 = ktime_get();
++
++ /* Tell the scheduler that we are going deep-idle: */
++ sched_clock_idle_sleep_event();
++
++ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &pr->id);
++ mwait_idle_with_hints(data->hint, MWAIT_ECX_INTERRUPT_BREAK);
++
++ t2 = ktime_get();
++
++ diff_us = ktime_to_us(ktime_sub(t2, t1));
++ diff_ns = ktime_to_ns(ktime_sub(t2, t1));
++
++ /* Tell the scheduler how much we idled: */
++ sched_clock_idle_wakeup_event(diff_ns);
++
++ local_irq_enable();
++ current_thread_info()->status |= TS_POLLING;
++
++ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &pr->id);
++
++ if (diff_us > INT_MAX)
++ diff_us = INT_MAX;
++
++ return (int)diff_us;
++
++}
++
++/**
++ * sfi_processor_setup_cpuidle - prepares and configures CPUIDLE
++ * @pr: the SFI processor
++ */
++static int sfi_processor_setup_cpuidle(struct sfi_processor *pr)
++{
++ int i;
++ struct cpuidle_state *state;
++ struct cpuidle_device *dev = &pr->power.dev;
++ static int cstates[] = {0, 1, 2, 4, 6};
++
++ for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
++ dev->states[i].name[0] = '\0';
++ dev->states[i].desc[0] = '\0';
++ }
++
++ for (i = 1; i <= pr->power.count; i++) {
++
++ state = &dev->states[i];
++
++ snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", cstates[i]);
++ snprintf(state->desc, CPUIDLE_DESC_LEN, "C%d", cstates[i]);
++
++ state->exit_latency = pr->power.states[i].exit_latency;
++ state->target_residency = state->exit_latency * latency_factor;
++ state->power_usage = pr->power.states[i].power_usage;
++ state->flags = 0;
++ cpuidle_set_statedata(state, &pr->power.sfi_cstates[i]);
++
++ printk
++ (KERN_INFO "State details Name:%s, Desc:%s, \
++ exit_latency:%d,target_residency%d,power_usage%d,hint%d",
++ state->name, state->desc, state->exit_latency,
++ state->target_residency, state->power_usage,
++ pr->power.sfi_cstates[i].hint);
++
++ switch (i) {
++ case 1:
++ state->flags |= CPUIDLE_FLAG_SHALLOW;
++ state->enter = sfi_idle_enter_simple;
++ break;
++
++ case 2:
++ state->flags |= CPUIDLE_FLAG_BALANCED;
++ state->flags |= CPUIDLE_FLAG_TIME_VALID;
++ state->enter = sfi_idle_enter_deep;
++ break;
++
++ default:
++ state->flags |= CPUIDLE_FLAG_DEEP;
++ state->enter = sfi_idle_enter_deep;
++ break;
++ }
++
++ }
++
++ dev->state_count = i;
++
++ return 0;
++}
++
++int sfi_cstate_probe(unsigned int hint)
++{
++ int retval;
++ unsigned int eax, ebx, ecx, edx;
++ unsigned int edx_part;
++ unsigned int cstate_type;
++ unsigned int num_cstate_subtype;
++
++ cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
++
++ /* Check whether this particular CState is supported or not */
++ cstate_type = (hint >> MWAIT_SUBSTATE_SIZE) + 1;
++ edx_part = edx >> (cstate_type * MWAIT_SUBSTATE_SIZE);
++ num_cstate_subtype = edx_part & MWAIT_SUBSTATE_MASK;
++
++ retval = 0;
++ if (num_cstate_subtype < (hint & MWAIT_SUBSTATE_MASK)) {
++ retval = -1;
++ goto out;
++ }
++
++ /* mwait ecx extensions INTERRUPT_BREAK should be supported for C2/C3 */
++ if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
++ !(ecx & CPUID5_ECX_INTERRUPT_BREAK)) {
++ retval = -1;
++ goto out;
++ }
++
++out:
++ return retval;
++}
++
++int sfi_processor_power_init(struct sfi_processor *pr)
++{
++
++ int totallen;
++ struct sfi_cstate_table_entry *pentry;
++
++ pentry = pr->power.sfi_cstates;
++
++ for (totallen = 1; totallen <= pr->power.count; totallen++, pentry++) {
++ pr->power.states[totallen].power_usage = 0;
++ pr->power.states[totallen].exit_latency = pentry->latency;
++ sfi_cstate_probe(pentry->hint);
++ printk(KERN_INFO "Cstate[%d]: hint = 0x%08x, latency = %dms\n",
++ totallen, pentry->hint, pentry->latency);
++ }
++
++ sfi_processor_setup_cpuidle(pr);
++ pr->power.dev.cpu = pr->id;
++ if (cpuidle_register_device(&pr->power.dev))
++ return -EIO;
++
++ return 0;
++}
++
++int sfi_processor_power_exit(struct sfi_processor *pr)
++{
++ cpuidle_unregister_device(&pr->power.dev);
++ return 0;
++}
+--- /dev/null
++++ b/drivers/sfi/sfi_processor_perflib.c
+@@ -0,0 +1,189 @@
++/*
++ * sfi_Processor_perflib.c - sfi Processor P-States Library
++ *
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or (at
++ * your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ * Author: Vishwesh M Rudramuni
++ * Contact information: Vishwesh Rudramuni <vishwesh.m.rudramuni@intel.com>
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/cpufreq.h>
++#include <linux/sfi.h>
++#include <linux/sfi_processor.h>
++#include <linux/slab.h>
++
++#define SFI_PROCESSOR_COMPONENT 0x01000000
++#define SFI_PROCESSOR_CLASS "processor"
++#define SFI_PROCESSOR_FILE_PERFORMANCE "performance"
++#define _COMPONENT SFI_PROCESSOR_COMPONENT
++#define MSR_IA32_CLOCK_CR_GEYSIII_VCC_3 0xcf
++#define GRD_RATIO_900 9
++#define GRD_RATIO_1100 0xb
++#define CTRL_VAL_900 0x90c
++#define CTRL_VAL_1100 0xb14
++#define GRD_VID_MASK 0x3F
++#define GRD_BUS_RATIO_MASK 0xF
++
++
++static DEFINE_MUTEX(performance_mutex);
++
++/* Use cpufreq debug layer for _PPC changes. */
++#define cpufreq_printk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, \
++ "cpufreq-core", msg)
++
++#define SFI_FREQ_MAX 32
++struct sfi_freq_table_entry sfi_cpufreq_array[SFI_FREQ_MAX];
++int g_sfi_cpufreq_num;
++
++static int __init parse_freq(struct sfi_table_header *table)
++{
++ struct sfi_table_simple *sb;
++ struct sfi_freq_table_entry *pentry;
++ int totallen;
++
++ sb = (struct sfi_table_simple *)table;
++ if (!sb) {
++ printk(KERN_WARNING "SFI: Unable to map FREQ\n");
++ return -ENODEV;
++ }
++
++ if (!g_sfi_cpufreq_num) {
++ g_sfi_cpufreq_num = SFI_GET_NUM_ENTRIES(sb,
++ struct sfi_freq_table_entry);
++ pentry = (struct sfi_freq_table_entry *)sb->pentry;
++ totallen = g_sfi_cpufreq_num * sizeof(*pentry);
++ memcpy(sfi_cpufreq_array, pentry, totallen);
++ }
++
++ return 0;
++}
++
++static int sfi_processor_get_performance_states(struct sfi_processor *pr)
++{
++ int result = 0;
++ int i;
++ unsigned int l, h;
++ unsigned int grd_vid, grd_ratio;
++
++ pr->performance->state_count = g_sfi_cpufreq_num;
++ pr->performance->states =
++ kmalloc(sizeof(struct sfi_processor_px) * g_sfi_cpufreq_num,
++ GFP_KERNEL);
++ if (!pr->performance->states)
++ result = -ENOMEM;
++
++ printk(KERN_INFO "Num p-states %d\n", g_sfi_cpufreq_num);
++
++ /* Populate the P-states info from the SFI table here */
++ for (i = 0; i < g_sfi_cpufreq_num; i++) {
++ pr->performance->states[i].core_frequency = \
++ sfi_cpufreq_array[i].freq_mhz;
++ pr->performance->states[i].transition_latency = \
++ sfi_cpufreq_array[i].latency;
++ pr->performance->states[i].control = \
++ sfi_cpufreq_array[i].ctrl_val;
++ printk(KERN_INFO "State [%d]: core_frequency[%d] \
++ transition_latency[%d] \
++ control[0x%x] status[0x%x]\n", i,
++ (u32) pr->performance->states[i].core_frequency,
++ (u32) pr->performance->states[i].transition_latency,
++ (u32) pr->performance->states[i].control,
++ (u32) pr->performance->states[i].status);
++ }
++
++ /* program the GFM when the cpu's are initialized */
++ rdmsr(MSR_IA32_CLOCK_CR_GEYSIII_VCC_3, l, h);
++ grd_vid = (l >> 12) & GRD_VID_MASK;
++ grd_ratio = (l >> 7) & GRD_BUS_RATIO_MASK;
++
++ /* program the control values for GFM */
++ if (grd_ratio == GRD_RATIO_900)
++ l = CTRL_VAL_900;
++ else if (grd_ratio == GRD_RATIO_1100)
++ l = CTRL_VAL_1100;
++
++ h = 0;
++
++ /* write the value to change the freq to GFM */
++ wrmsr(MSR_IA32_PERF_CTL, l, h);
++
++ return result;
++}
++
++int
++sfi_processor_register_performance(struct sfi_processor_performance
++ *performance, unsigned int cpu)
++{
++ struct sfi_processor *pr;
++
++ mutex_lock(&performance_mutex);
++
++ pr = per_cpu(sfi_processors, cpu);
++ if (!pr) {
++ mutex_unlock(&performance_mutex);
++ return -ENODEV;
++ }
++
++ if (pr->performance) {
++ mutex_unlock(&performance_mutex);
++ return -EBUSY;
++ }
++
++ WARN_ON(!performance);
++
++ pr->performance = performance;
++
++ /* parse the freq table from sfi */
++ g_sfi_cpufreq_num = 0;
++ sfi_table_parse(SFI_SIG_FREQ, NULL, NULL, parse_freq);
++
++ sfi_processor_get_performance_states(pr);
++
++ mutex_unlock(&performance_mutex);
++ return 0;
++}
++EXPORT_SYMBOL(sfi_processor_register_performance);
++
++void
++sfi_processor_unregister_performance(struct sfi_processor_performance
++ *performance, unsigned int cpu)
++{
++ struct sfi_processor *pr;
++
++
++ mutex_lock(&performance_mutex);
++
++ pr = per_cpu(sfi_processors, cpu);
++ if (!pr) {
++ mutex_unlock(&performance_mutex);
++ return;
++ }
++
++ if (pr->performance)
++ kfree(pr->performance->states);
++ pr->performance = NULL;
++
++ mutex_unlock(&performance_mutex);
++
++ return;
++}
++EXPORT_SYMBOL(sfi_processor_unregister_performance);
+--- a/drivers/spi/Kconfig
++++ b/drivers/spi/Kconfig
+@@ -234,6 +234,14 @@
+ help
+ This selects a driver for the PPC4xx SPI Controller.
+
++config SPI_PW_SPI3
++ tristate "SPI-3 controller driver for Intel Penwell chipset"
++ depends on SPI_MASTER && PCI && X86_MRST
++ select INTEL_MID_DMAC
++ help
++ This is the SPI-3 (SSP) master controller driver for the Intel
++ Penwell chipset.
++
+ config SPI_PXA2XX
+ tristate "PXA2xx SSP SPI master"
+ depends on ARCH_PXA && EXPERIMENTAL
+@@ -351,6 +359,10 @@
+ tristate "PCI interface driver for DW SPI core"
+ depends on SPI_DESIGNWARE && PCI
+
++config SPI_DW_MID_DMA
++ bool "DMA support for DW SPI controller on Intel Moorestown platform"
++ depends on SPI_DW_PCI && INTEL_MID_DMAC
++
+ config SPI_DW_MMIO
+ tristate "Memory-mapped io interface driver for DW SPI core"
+ depends on SPI_DESIGNWARE && HAVE_CLK
+@@ -385,6 +397,11 @@
+
+ endif # SPI_MASTER
+
+-# (slave support would go here)
++config SPI_INTEL_MID_SSP
++ tristate "SSP SPI controller driver for Intel Moorestown platform (slave mode only)(EXPERIMENTAL)"
++ depends on SPI_MASTER && INTEL_MID_DMAC && EXPERIMENTAL
++ help
++ This is the SPI slave controller driver for the Intel
++ Moorestown platform.
+
+ endif # SPI
+--- a/drivers/spi/Makefile
++++ b/drivers/spi/Makefile
+@@ -19,7 +19,8 @@
+ obj-$(CONFIG_SPI_COLDFIRE_QSPI) += coldfire_qspi.o
+ obj-$(CONFIG_SPI_DAVINCI) += davinci_spi.o
+ obj-$(CONFIG_SPI_DESIGNWARE) += dw_spi.o
+-obj-$(CONFIG_SPI_DW_PCI) += dw_spi_pci.o
++obj-$(CONFIG_SPI_DW_PCI) += dw_spi_midpci.o
++dw_spi_midpci-objs := dw_spi_pci.o dw_spi_mid.o
+ obj-$(CONFIG_SPI_DW_MMIO) += dw_spi_mmio.o
+ obj-$(CONFIG_SPI_EP93XX) += ep93xx_spi.o
+ obj-$(CONFIG_SPI_GPIO) += spi_gpio.o
+@@ -47,6 +48,7 @@
+ obj-$(CONFIG_SPI_SH_MSIOF) += spi_sh_msiof.o
+ obj-$(CONFIG_SPI_STMP3XXX) += spi_stmp.o
+ obj-$(CONFIG_SPI_NUC900) += spi_nuc900.o
++obj-$(CONFIG_SPI_PW_SPI3) += pw_spi3.o
+
+ # special build for s3c24xx spi driver with fiq support
+ spi_s3c24xx_hw-y := spi_s3c24xx.o
+@@ -60,6 +62,7 @@
+ # ... add above this line ...
+
+ # SPI slave controller drivers (upstream link)
++obj-$(CONFIG_SPI_INTEL_MID_SSP) += intel_mid_ssp_spi.o
+ # ... add above this line ...
+
+ # SPI slave drivers (protocol for that link)
+--- a/drivers/spi/dw_spi.c
++++ b/drivers/spi/dw_spi.c
+@@ -163,20 +163,23 @@
+
+ static void wait_till_not_busy(struct dw_spi *dws)
+ {
+- unsigned long end = jiffies + 1 + usecs_to_jiffies(1000);
++ unsigned long end = jiffies + 1 + usecs_to_jiffies(5000);
+
+ while (time_before(jiffies, end)) {
+ if (!(dw_readw(dws, sr) & SR_BUSY))
+ return;
++ cpu_relax();
+ }
+ dev_err(&dws->master->dev,
+- "DW SPI: Status keeps busy for 1000us after a read/write!\n");
++ "DW SPI: Status keeps busy for 5000us after a read/write!\n");
+ }
+
+ static void flush(struct dw_spi *dws)
+ {
+- while (dw_readw(dws, sr) & SR_RF_NOT_EMPT)
++ while (dw_readw(dws, sr) & SR_RF_NOT_EMPT) {
+ dw_readw(dws, dr);
++ cpu_relax();
++ }
+
+ wait_till_not_busy(dws);
+ }
+@@ -288,8 +291,10 @@
+ */
+ static int map_dma_buffers(struct dw_spi *dws)
+ {
+- if (!dws->cur_msg->is_dma_mapped || !dws->dma_inited
+- || !dws->cur_chip->enable_dma)
++ if (!dws->cur_msg->is_dma_mapped
++ || !dws->dma_inited
++ || !dws->cur_chip->enable_dma
++ || !dws->dma_ops)
+ return 0;
+
+ if (dws->cur_transfer->tx_dma)
+@@ -341,7 +346,7 @@
+ tasklet_schedule(&dws->pump_transfers);
+ }
+
+-static void transfer_complete(struct dw_spi *dws)
++void dw_spi_xfer_done(struct dw_spi *dws)
+ {
+ /* Update total byte transfered return count actual bytes read */
+ dws->cur_msg->actual_length += dws->len;
+@@ -356,6 +361,7 @@
+ } else
+ tasklet_schedule(&dws->pump_transfers);
+ }
++EXPORT_SYMBOL_GPL(dw_spi_xfer_done);
+
+ static irqreturn_t interrupt_transfer(struct dw_spi *dws)
+ {
+@@ -387,7 +393,7 @@
+ if (dws->tx_end > dws->tx)
+ spi_umask_intr(dws, SPI_INT_TXEI);
+ else
+- transfer_complete(dws);
++ dw_spi_xfer_done(dws);
+ }
+
+ return IRQ_HANDLED;
+@@ -412,11 +418,7 @@
+ while (dws->write(dws))
+ dws->read(dws);
+
+- transfer_complete(dws);
+-}
+-
+-static void dma_transfer(struct dw_spi *dws, int cs_change)
+-{
++ dw_spi_xfer_done(dws);
+ }
+
+ static void pump_transfers(unsigned long data)
+@@ -598,7 +600,7 @@
+ }
+
+ if (dws->dma_mapped)
+- dma_transfer(dws, cs_change);
++ dws->dma_ops->dma_transfer(dws, cs_change);
+
+ if (chip->poll_mode)
+ poll_transfer(dws);
+@@ -897,11 +899,15 @@
+ master->setup = dw_spi_setup;
+ master->transfer = dw_spi_transfer;
+
+- dws->dma_inited = 0;
+-
+ /* Basic HW init */
+ spi_hw_init(dws);
+
++ if (dws->dma_ops && dws->dma_ops->dma_init) {
++ ret = dws->dma_ops->dma_init(dws);
++ if (ret)
++ goto err_diable_hw;
++ }
++
+ /* Initial and start queue */
+ ret = init_queue(dws);
+ if (ret) {
+@@ -926,6 +932,8 @@
+
+ err_queue_alloc:
+ destroy_queue(dws);
++ if (dws->dma_ops && dws->dma_ops->dma_exit)
++ dws->dma_ops->dma_exit(dws);
+ err_diable_hw:
+ spi_enable_chip(dws, 0);
+ free_irq(dws->irq, dws);
+@@ -934,7 +942,7 @@
+ exit:
+ return ret;
+ }
+-EXPORT_SYMBOL(dw_spi_add_host);
++EXPORT_SYMBOL_GPL(dw_spi_add_host);
+
+ void __devexit dw_spi_remove_host(struct dw_spi *dws)
+ {
+@@ -950,6 +958,8 @@
+ dev_err(&dws->master->dev, "dw_spi_remove: workqueue will not "
+ "complete, message memory not freed\n");
+
++ if (dws->dma_ops && dws->dma_ops->dma_exit)
++ dws->dma_ops->dma_exit(dws);
+ spi_enable_chip(dws, 0);
+ /* Disable clk */
+ spi_set_clk(dws, 0);
+@@ -958,7 +968,7 @@
+ /* Disconnect from the SPI framework */
+ spi_unregister_master(dws->master);
+ }
+-EXPORT_SYMBOL(dw_spi_remove_host);
++EXPORT_SYMBOL_GPL(dw_spi_remove_host);
+
+ int dw_spi_suspend_host(struct dw_spi *dws)
+ {
+@@ -971,7 +981,7 @@
+ spi_set_clk(dws, 0);
+ return ret;
+ }
+-EXPORT_SYMBOL(dw_spi_suspend_host);
++EXPORT_SYMBOL_GPL(dw_spi_suspend_host);
+
+ int dw_spi_resume_host(struct dw_spi *dws)
+ {
+@@ -983,7 +993,7 @@
+ dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret);
+ return ret;
+ }
+-EXPORT_SYMBOL(dw_spi_resume_host);
++EXPORT_SYMBOL_GPL(dw_spi_resume_host);
+
+ MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>");
+ MODULE_DESCRIPTION("Driver for DesignWare SPI controller core");
+--- /dev/null
++++ b/drivers/spi/dw_spi_mid.c
+@@ -0,0 +1,243 @@
++/*
++ * dw_spi_mid.c - special handling for DW core on Intel MID platform
++ *
++ * Copyright (c) 2009, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation,
++ * Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#include <linux/dma-mapping.h>
++#include <linux/interrupt.h>
++#include <linux/slab.h>
++#include <linux/spi/spi.h>
++#include <linux/spi/dw_spi.h>
++
++#ifdef CONFIG_SPI_DW_MID_DMA
++#include <linux/intel_mid_dma.h>
++#include <linux/pci.h>
++
++struct mid_dma {
++ struct intel_mid_dma_slave dmas_tx;
++ struct intel_mid_dma_slave dmas_rx;
++};
++
++static bool mid_spi_dma_chan_filter(struct dma_chan *chan, void *param)
++{
++ struct dw_spi *dws = (struct dw_spi *)param;
++ bool ret = false;
++
++ if (!dws->dmac)
++ goto out;
++
++ if (chan->device->dev == &dws->dmac->dev)
++ ret = true;
++
++out:
++ return ret;
++}
++
++static int mid_spi_dma_init(struct dw_spi *dws)
++{
++ struct mid_dma *dw_dma = dws->dma_priv;
++ struct intel_mid_dma_slave *rxs, *txs;
++ dma_cap_mask_t mask;
++
++ dws->txchan = NULL;
++ dws->rxchan = NULL;
++
++ /*get pci device for DMA*/
++ dws->dmac = pci_get_device(PCI_VENDOR_ID_INTEL, 0x813, NULL);
++
++ /* 1. Init rx channel */
++ rxs = &dw_dma->dmas_rx;
++
++ rxs->dirn = DMA_FROM_DEVICE;
++ rxs->hs_mode = LNW_DMA_HW_HS;
++ rxs->cfg_mode = LNW_DMA_PER_TO_MEM;
++ rxs->src_width = LNW_DMA_WIDTH_16BIT;
++ rxs->dst_width = LNW_DMA_WIDTH_32BIT;
++ rxs->src_msize = LNW_DMA_MSIZE_16;
++ rxs->dst_msize = LNW_DMA_MSIZE_16;
++
++ dma_cap_zero(mask);
++ dma_cap_set(DMA_MEMCPY, mask);
++ dma_cap_set(DMA_SLAVE, mask);
++
++ dws->rxchan = dma_request_channel(mask, mid_spi_dma_chan_filter, dws);
++ if (!dws->rxchan)
++ goto err_exit;
++ dws->rxchan->private = rxs;
++
++ /* 2. Init tx channel */
++ txs = &dw_dma->dmas_tx;
++
++ txs->dirn = DMA_TO_DEVICE;
++ txs->hs_mode = LNW_DMA_HW_HS;
++ txs->cfg_mode = LNW_DMA_MEM_TO_PER;
++ txs->src_width = LNW_DMA_WIDTH_32BIT;
++ txs->dst_width = LNW_DMA_WIDTH_16BIT;
++ txs->src_msize = LNW_DMA_MSIZE_16;
++ txs->dst_msize = LNW_DMA_MSIZE_16;
++
++ dma_cap_set(DMA_SLAVE, mask);
++ dma_cap_set(DMA_MEMCPY, mask);
++
++ dws->txchan = dma_request_channel(mask, mid_spi_dma_chan_filter, dws);
++ if (!dws->txchan)
++ goto free_rxchan;
++ dws->txchan->private = txs;
++
++ /* Set the dma done bit to 1 */
++ dws->dma_inited = 1;
++ dws->txdma_done = 1;
++ dws->rxdma_done = 1;
++
++ dws->tx_param = ((u64)(unsigned long)dws << 32)
++ | (unsigned long)(&dws->txdma_done);
++ dws->rx_param = ((u64)(unsigned long)dws << 32)
++ | (unsigned long)(&dws->rxdma_done);
++ return 0;
++
++free_rxchan:
++ dma_release_channel(dws->rxchan);
++err_exit:
++ return -1;
++
++}
++
++static void mid_spi_dma_exit(struct dw_spi *dws)
++{
++ dma_release_channel(dws->txchan);
++ dma_release_channel(dws->rxchan);
++}
++
++static void dw_spi_dma_done(void *arg)
++{
++ u64 *param = arg;
++ struct dw_spi *dws;
++ int *done;
++
++ dws = (struct dw_spi *)(unsigned long)(*param >> 32);
++ done = (int *)(unsigned long)(*param & 0xffffffff);
++
++ *done = 1;
++ /* wait till both tx/rx channels are done */
++ if (!dws->txdma_done || !dws->rxdma_done)
++ return;
++
++ dw_spi_xfer_done(dws);
++}
++
++static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
++{
++ struct dma_async_tx_descriptor *txdesc = NULL, *rxdesc = NULL;
++ struct dma_chan *txchan, *rxchan;
++ enum dma_ctrl_flags flag;
++ u16 dma_ctrl = 0;
++
++ /* 1. setup DMA related registers */
++ if (cs_change) {
++ spi_enable_chip(dws, 0);
++ dw_writew(dws, dmardlr, 0xf);
++ dw_writew(dws, dmatdlr, 0x10);
++ if (dws->tx_dma)
++ dma_ctrl |= 0x2;
++ if (dws->rx_dma)
++ dma_ctrl |= 0x1;
++ dw_writew(dws, dmacr, dma_ctrl);
++ spi_enable_chip(dws, 1);
++ }
++
++ if (dws->tx_dma)
++ dws->txdma_done = 0;
++ if (dws->rx_dma)
++ dws->rxdma_done = 0;
++
++ /* 2. start the TX dma transfer */
++ txchan = dws->txchan;
++ rxchan = dws->rxchan;
++
++ flag = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
++ if (dws->tx_dma) {
++ txdesc = txchan->device->device_prep_dma_memcpy(txchan,
++ dws->dma_addr, dws->tx_dma,
++ dws->len, flag);
++ txdesc->callback = dw_spi_dma_done;
++ txdesc->callback_param = &dws->tx_param;
++ }
++
++ /* 3. start the RX dma transfer */
++ if (dws->rx_dma) {
++ rxdesc = rxchan->device->device_prep_dma_memcpy(rxchan,
++ dws->rx_dma, dws->dma_addr,
++ dws->len, flag);
++ rxdesc->callback = dw_spi_dma_done;
++ rxdesc->callback_param = &dws->rx_param;
++ }
++
++ /* rx must be started before tx due to spi instinct */
++ if (rxdesc)
++ rxdesc->tx_submit(rxdesc);
++ if (txdesc)
++ txdesc->tx_submit(txdesc);
++
++ return 0;
++}
++
++static struct dw_spi_dma_ops mid_dma_ops = {
++ .dma_init = mid_spi_dma_init,
++ .dma_exit = mid_spi_dma_exit,
++ .dma_transfer = mid_spi_dma_transfer,
++};
++#endif
++
++/* Some specific info for SPI0 controller on Moorestown */
++/* HW info for MRST CLk Control Unit, one 32b reg */
++#define MRST_SPI_CLK_BASE 100000000 /* 100m */
++#define MRST_CLK_SPI0_REG 0xff11d86c
++#define CLK_SPI_BDIV_OFFSET 0
++#define CLK_SPI_BDIV_MASK 0x00000007
++#define CLK_SPI_CDIV_OFFSET 9
++#define CLK_SPI_CDIV_MASK 0x00000e00
++#define CLK_SPI_CDIV_100M 0x0
++#define CLK_SPI_CDIV_50M 0x1
++#define CLK_SPI_CDIV_33M 0x2
++#define CLK_SPI_CDIV_25M 0x3
++#define CLK_SPI_DISABLE_OFFSET 8
++
++int dw_spi_mid_init(struct dw_spi *dws)
++{
++ u32 *clk_reg, clk_cdiv;
++
++ clk_reg = ioremap_nocache(MRST_CLK_SPI0_REG, 16);
++ if (!clk_reg)
++ return -ENOMEM;
++
++ /* get SPI controller operating freq info */
++ clk_cdiv = ((*clk_reg) & CLK_SPI_CDIV_MASK) >> CLK_SPI_CDIV_OFFSET;
++ dws->max_freq = MRST_SPI_CLK_BASE / (clk_cdiv + 1);
++ iounmap(clk_reg);
++
++ dws->num_cs = 16;
++ dws->fifo_len = 40; /* FIFO has 40 words buffer */
++
++#ifdef CONFIG_SPI_DW_MID_DMA
++ dws->dma_priv = kzalloc(sizeof(struct mid_dma), GFP_KERNEL);
++ if (!dws->dma_priv)
++ return -ENOMEM;
++ dws->dma_ops = &mid_dma_ops;
++#endif
++ return 0;
++}
++
+--- a/drivers/spi/dw_spi_pci.c
++++ b/drivers/spi/dw_spi_pci.c
+@@ -1,5 +1,5 @@
+ /*
+- * mrst_spi_pci.c - PCI interface driver for DW SPI Core
++ * dw_spi_pci.c - PCI interface driver for DW SPI Core
+ *
+ * Copyright (c) 2009, Intel Corporation.
+ *
+@@ -26,8 +26,8 @@
+ #define DRIVER_NAME "dw_spi_pci"
+
+ struct dw_spi_pci {
+- struct pci_dev *pdev;
+- struct dw_spi dws;
++ struct pci_dev *pdev;
++ struct dw_spi dws;
+ };
+
+ static int __devinit spi_pci_probe(struct pci_dev *pdev,
+@@ -72,9 +72,13 @@
+ dws->parent_dev = &pdev->dev;
+ dws->bus_num = 0;
+ dws->num_cs = 4;
+- dws->max_freq = 25000000; /* for Moorestwon */
+ dws->irq = pdev->irq;
+- dws->fifo_len = 40; /* FIFO has 40 words buffer */
++
++ if (pdev->device == 0x0800) {
++ ret = dw_spi_mid_init(dws);
++ if (ret)
++ goto err_unmap;
++ }
+
+ ret = dw_spi_add_host(dws);
+ if (ret)
+--- /dev/null
++++ b/drivers/spi/intel_mid_ssp_spi.c
+@@ -0,0 +1,1251 @@
++/*
++ * intel_mid_ssp_spi.c
++ * This driver supports Bulverde SSP core used on Intel MID platforms
++ * Initial version supporting only slave mode.
++ *
++ * Copyright (c) 2010, Intel Corporation.
++ * Ken Mills <ken.k.mills@intel.com>
++ * Sylvain Centelles <sylvain.centelles@intel.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++/*
++ * Note:
++ *
++ * Supports DMA and non-interrupt polled transfers.
++ *
++ */
++
++#include <linux/delay.h>
++#include <linux/highmem.h>
++#include <linux/pci.h>
++#include <linux/init.h>
++
++#include <linux/dma-mapping.h>
++#include <linux/intel_mid_dma.h>
++#include <linux/pm_qos_params.h>
++
++#include <linux/spi/spi.h>
++#include <linux/spi/intel_mid_ssp_spi.h>
++#include "intel_mid_ssp_spi_def.h"
++
++#define DRIVER_NAME "intel_mid_ssp_spi"
++
++MODULE_AUTHOR("Ken Mills");
++MODULE_DESCRIPTION("Bulverde SSP core SPI contoller (Slave only)");
++MODULE_LICENSE("GPL");
++
++static const struct pci_device_id pci_ids[];
++/* Used to enable SRAM based DMA transfers if set in */
++/* struct pci_device_id pci_ids. */
++#define SRAM_ADDITIONAL_CPY 1
++/* Disables the trail byte handling at DMA HW level when set in */
++/* struct pci_device_id pci_ids. */
++/* Trailing byte feature not fully available on moorestown. */
++#define DMA_USE_NO_TRAIL 2
++
++#ifdef DUMP_RX
++static void dump_trailer(const struct device *dev, char *buf, int len, int sz)
++{
++ int tlen1 = (len < sz ? len : sz);
++ int tlen2 = ((len - sz) > sz) ? sz : (len - sz);
++ unsigned char *p;
++ static char msg[MAX_SPI_TRANSFER_SIZE];
++
++ memset(msg, '\0', sizeof(msg));
++ p = buf;
++ while (p < buf + tlen1)
++ sprintf(msg, "%s%02x", msg, (unsigned int)*p++);
++
++ if (tlen2 > 0) {
++ sprintf(msg, "%s .....", msg);
++ p = (buf+len) - tlen2;
++ while (p < buf + len)
++ sprintf(msg, "%s%02x", msg, (unsigned int)*p++);
++ }
++
++ dev_info(dev, "DUMP: %p[0:%d ... %d:%d]:%s", buf, tlen1 - 1,
++ len-tlen2, len - 1, msg);
++}
++#endif
++
++static inline u32 is_tx_fifo_empty(struct ssp_driver_context *drv_context)
++{
++ u32 sssr;
++ sssr = read_SSSR(drv_context->ioaddr);
++ if ((sssr & SSSR_TFL_MASK) || (sssr & SSSR_TNF) == 0)
++ return 0;
++ else
++ return 1;
++}
++
++static inline u32 is_rx_fifo_empty(struct ssp_driver_context *drv_context)
++{
++ return ((read_SSSR(drv_context->ioaddr) & SSSR_RNE) == 0);
++}
++
++static inline void disable_interface(struct ssp_driver_context *drv_context)
++{
++ void *reg = drv_context->ioaddr;
++ write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
++}
++
++static inline void disable_triggers(struct ssp_driver_context *drv_context)
++{
++ void *reg = drv_context->ioaddr;
++ write_SSCR1(read_SSCR1(reg) & ~drv_context->cr1_sig, reg);
++}
++
++
++static void flush(struct ssp_driver_context *drv_context)
++{
++ void *reg = drv_context->ioaddr;
++ u32 i = 0;
++
++ /* If the transmit fifo is not empty, reset the interface. */
++ if (!is_tx_fifo_empty(drv_context)) {
++ dev_err(&drv_context->pdev->dev,
++ "TX FIFO not empty. Reset of SPI IF");
++ disable_interface(drv_context);
++ return;
++ }
++
++ dev_dbg(&drv_context->pdev->dev, " SSSR=%x\r\n", read_SSSR(reg));
++ while (!is_rx_fifo_empty(drv_context) && (i < SPI_FIFO_SIZE + 1)) {
++ read_SSDR(reg);
++ i++;
++ }
++ WARN(i > 0, "%d words flush occured\n", i);
++
++ return;
++}
++
++static int null_writer(struct ssp_driver_context *drv_context)
++{
++ void *reg = drv_context->ioaddr;
++ u8 n_bytes = drv_context->n_bytes;
++
++ if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
++ || (drv_context->tx == drv_context->tx_end))
++ return 0;
++
++ write_SSDR(0, reg);
++ drv_context->tx += n_bytes;
++
++ return 1;
++}
++
++static int null_reader(struct ssp_driver_context *drv_context)
++{
++ void *reg = drv_context->ioaddr;
++ u8 n_bytes = drv_context->n_bytes;
++
++ while ((read_SSSR(reg) & SSSR_RNE)
++ && (drv_context->rx < drv_context->rx_end)) {
++ read_SSDR(reg);
++ drv_context->rx += n_bytes;
++ }
++
++ return drv_context->rx == drv_context->rx_end;
++}
++
++static int u8_writer(struct ssp_driver_context *drv_context)
++{
++ void *reg = drv_context->ioaddr;
++ if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
++ || (drv_context->tx == drv_context->tx_end))
++ return 0;
++
++ write_SSDR(*(u8 *)(drv_context->tx), reg);
++ ++drv_context->tx;
++
++ return 1;
++}
++
++static int u8_reader(struct ssp_driver_context *drv_context)
++{
++ void *reg = drv_context->ioaddr;
++ while ((read_SSSR(reg) & SSSR_RNE)
++ && (drv_context->rx < drv_context->rx_end)) {
++ *(u8 *)(drv_context->rx) = read_SSDR(reg);
++ ++drv_context->rx;
++ }
++
++ return drv_context->rx == drv_context->rx_end;
++}
++
++static int u16_writer(struct ssp_driver_context *drv_context)
++{
++ void *reg = drv_context->ioaddr;
++ if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
++ || (drv_context->tx == drv_context->tx_end))
++ return 0;
++
++ write_SSDR(*(u16 *)(drv_context->tx), reg);
++ drv_context->tx += 2;
++
++ return 1;
++}
++
++static int u16_reader(struct ssp_driver_context *drv_context)
++{
++ void *reg = drv_context->ioaddr;
++ while ((read_SSSR(reg) & SSSR_RNE)
++ && (drv_context->rx < drv_context->rx_end)) {
++ *(u16 *)(drv_context->rx) = read_SSDR(reg);
++ drv_context->rx += 2;
++ }
++
++ return drv_context->rx == drv_context->rx_end;
++}
++
++static int u32_writer(struct ssp_driver_context *drv_context)
++{
++ void *reg = drv_context->ioaddr;
++ if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
++ || (drv_context->tx == drv_context->tx_end))
++ return 0;
++
++ write_SSDR(*(u32 *)(drv_context->tx), reg);
++ drv_context->tx += 4;
++
++ return 1;
++}
++
++static int u32_reader(struct ssp_driver_context *drv_context)
++{
++ void *reg = drv_context->ioaddr;
++ while ((read_SSSR(reg) & SSSR_RNE)
++ && (drv_context->rx < drv_context->rx_end)) {
++ *(u32 *)(drv_context->rx) = read_SSDR(reg);
++ drv_context->rx += 4;
++ }
++
++ return drv_context->rx == drv_context->rx_end;
++}
++
++static bool chan_filter(struct dma_chan *chan, void *param)
++{
++ struct ssp_driver_context *drv_context = (struct ssp_driver_context *)param;
++ bool ret = false;
++
++ if (!drv_context->dmac1)
++ return ret;
++
++ if (chan->device->dev == &drv_context->dmac1->dev)
++ ret = true;
++
++ return ret;
++}
++
++/**
++ * unmap_dma_buffers() - Unmap the DMA buffers used during the last transfer.
++ * @drv_context: Pointer to the private driver context
++ */
++static void unmap_dma_buffers(struct ssp_driver_context *drv_context)
++{
++ struct device *dev = &drv_context->pdev->dev;
++
++ if (!drv_context->dma_mapped)
++ return;
++ dma_unmap_single(dev, drv_context->rx_dma, drv_context->len,
++ PCI_DMA_FROMDEVICE);
++ dma_unmap_single(dev, drv_context->tx_dma, drv_context->len,
++ PCI_DMA_TODEVICE);
++ drv_context->dma_mapped = 0;
++}
++
++/**
++ * intel_mid_ssp_spi_dma_done() - End of DMA transfer callback
++ * @arg: Pointer to the data provided at callback registration
++ *
++ * This function is set as callback for both RX and TX DMA transfers. The
++ * RX or TX 'done' flag is set acording to the direction of the ended
++ * transfer. Then, if both RX and TX flags are set, it means that the
++ * transfer job is completed.
++ */
++static void intel_mid_ssp_spi_dma_done(void *arg)
++{
++ struct callback_param *cb_param = (struct callback_param *)arg;
++ struct ssp_driver_context *drv_context = cb_param->drv_context;
++ void *reg = drv_context->ioaddr;
++
++ if (cb_param->direction == TX_DIRECTION)
++ drv_context->txdma_done = 1;
++ else
++ drv_context->rxdma_done = 1;
++
++ if (drv_context->txdma_done && drv_context->rxdma_done) {
++ /* Clear Status Register */
++ write_SSSR(drv_context->clear_sr, reg);
++ /* Disable Triggers to DMA or to CPU*/
++ disable_triggers(drv_context);
++ unmap_dma_buffers(drv_context);
++ queue_work(drv_context->dma_wq, &drv_context->complete_work);
++ }
++}
++
++/**
++ * intel_mid_ssp_spi_dma_init() - Initialize DMA
++ * @drv_context: Pointer to the private driver context
++ *
++ * This function is called at driver setup phase to allocate DMA
++ * ressources.
++ */
++static void intel_mid_ssp_spi_dma_init(struct ssp_driver_context *drv_context)
++{
++ struct intel_mid_dma_slave *rxs, *txs;
++ dma_cap_mask_t mask;
++ struct device *dev = &drv_context->pdev->dev;
++
++ if (drv_context->dma_initialized)
++ return;
++
++ /* Use DMAC1 */
++ drv_context->dmac1 = pci_get_device(PCI_VENDOR_ID_INTEL,
++ PCI_DMAC1_ID, NULL);
++
++ if (!drv_context->dmac1) {
++ dev_err(dev, "SPI Slave:Can't find DMAC1");
++ return;
++ }
++
++ if (drv_context->quirks & SRAM_ADDITIONAL_CPY) {
++ drv_context->virt_addr_sram_rx = ioremap_nocache(SRAM_BASE_ADDR,
++ 2 * MAX_SPI_TRANSFER_SIZE);
++ if (drv_context->virt_addr_sram_rx)
++ drv_context->virt_addr_sram_tx =
++ drv_context->virt_addr_sram_rx +
++ MAX_SPI_TRANSFER_SIZE;
++ else
++ dev_err(dev, "Virt_addr_sram_rx is null\n");
++ }
++
++ /* 1. init rx channel */
++ rxs = &drv_context->dmas_rx;
++
++ rxs->dirn = DMA_FROM_DEVICE;
++ rxs->hs_mode = LNW_DMA_HW_HS;
++ rxs->cfg_mode = LNW_DMA_PER_TO_MEM;
++ rxs->src_width = LNW_DMA_WIDTH_16BIT;
++ rxs->dst_width = LNW_DMA_WIDTH_32BIT;
++ rxs->src_msize = LNW_DMA_MSIZE_8;
++ rxs->dst_msize = LNW_DMA_MSIZE_8;
++
++
++ dma_cap_zero(mask);
++ dma_cap_set(DMA_MEMCPY, mask);
++ dma_cap_set(DMA_SLAVE, mask);
++
++ drv_context->rxchan = dma_request_channel(mask, chan_filter,
++ drv_context);
++ if (!drv_context->rxchan)
++ goto err_exit;
++
++ drv_context->rxchan->private = rxs;
++
++ /* 2. init tx channel */
++ txs = &drv_context->dmas_tx;
++
++ txs->dirn = DMA_TO_DEVICE;
++ txs->hs_mode = LNW_DMA_HW_HS;
++ txs->cfg_mode = LNW_DMA_MEM_TO_PER;
++ txs->src_width = LNW_DMA_WIDTH_32BIT;
++ txs->dst_width = LNW_DMA_WIDTH_16BIT;
++ txs->src_msize = LNW_DMA_MSIZE_8;
++ txs->dst_msize = LNW_DMA_MSIZE_8;
++
++ dma_cap_set(DMA_SLAVE, mask);
++ dma_cap_set(DMA_MEMCPY, mask);
++
++ drv_context->txchan = dma_request_channel(mask, chan_filter,
++ drv_context);
++
++ if (!drv_context->txchan)
++ goto free_rxchan;
++ else
++ drv_context->txchan->private = txs;
++
++ /* set the dma done bit to 1 */
++ drv_context->txdma_done = 1;
++ drv_context->rxdma_done = 1;
++
++ drv_context->tx_param.drv_context = drv_context;
++ drv_context->tx_param.direction = TX_DIRECTION;
++ drv_context->rx_param.drv_context = drv_context;
++ drv_context->rx_param.direction = RX_DIRECTION;
++
++ drv_context->dma_initialized = 1;
++
++ return;
++
++free_rxchan:
++ dma_release_channel(drv_context->rxchan);
++err_exit:
++ dev_err(dev, "Error : DMA Channel Not available\n");
++
++ if (drv_context->quirks & SRAM_ADDITIONAL_CPY)
++ iounmap(drv_context->virt_addr_sram_rx);
++
++ pci_dev_put(drv_context->dmac1);
++ return;
++}
++
++/**
++ * intel_mid_ssp_spi_dma_exit() - Release DMA ressources
++ * @drv_context: Pointer to the private driver context
++ */
++static void intel_mid_ssp_spi_dma_exit(struct ssp_driver_context *drv_context)
++{
++ dma_release_channel(drv_context->txchan);
++ dma_release_channel(drv_context->rxchan);
++
++ if (drv_context->quirks & SRAM_ADDITIONAL_CPY)
++ iounmap(drv_context->virt_addr_sram_rx);
++
++ pci_dev_put(drv_context->dmac1);
++}
++
++/**
++ * dma_transfer() - Initiate a DMA transfer
++ * @drv_context: Pointer to the private driver context
++ */
++static void dma_transfer(struct ssp_driver_context *drv_context)
++{
++ dma_addr_t ssdr_addr;
++ struct dma_async_tx_descriptor *txdesc = NULL, *rxdesc = NULL;
++ struct dma_chan *txchan, *rxchan;
++ enum dma_ctrl_flags flag;
++ struct device *dev = &drv_context->pdev->dev;
++ u32 length;
++ /* get Data Read/Write address */
++ ssdr_addr = (dma_addr_t)(drv_context->paddr + 0x10);
++
++ if (drv_context->tx_dma)
++ drv_context->txdma_done = 0;
++
++ if (drv_context->rx_dma)
++ drv_context->rxdma_done = 0;
++
++ /* 2. start the TX dma transfer */
++ txchan = drv_context->txchan;
++ rxchan = drv_context->rxchan;
++
++ flag = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
++
++ if (drv_context->quirks & DMA_USE_NO_TRAIL) {
++ /* In Rx direction, TRAIL Bytes are handled by memcpy */
++ if (drv_context->rx_dma && (drv_context->len > SPI_FIFO_SIZE))
++ length = TRUNCATE(drv_context->len,
++ DFLT_RX_BYTE_THRESHOLD);
++ else if (drv_context->len <= SPI_FIFO_SIZE)
++ length = drv_context->len;
++ else
++ dev_err(dev, "ERROR : rx_dma is null\r\n");
++ } else {
++ /* TRAIL Bytes are handled by DMA */
++ if (drv_context->rx_dma)
++ length = drv_context->len;
++ else
++ dev_err(dev, "ERROR : drv_context->rx_dma is null!\n");
++ }
++
++ rxdesc = rxchan->device->device_prep_dma_memcpy
++ (rxchan, /* DMA Channel */
++ drv_context->rx_dma, /* DAR */
++ ssdr_addr, /* SAR */
++ length, /* Data Length */
++ flag); /* Flag */
++
++ if (rxdesc) {
++ rxdesc->callback = intel_mid_ssp_spi_dma_done;
++ rxdesc->callback_param = &drv_context->rx_param;
++ } else {
++ dev_err(dev, "ERROR : rxdesc is null!\n");
++ return;
++ }
++
++ dev_dbg(dev, "in RX dma_transfer: len=%d done=%d\n",
++ drv_context->len, drv_context->rxdma_done);
++
++ /* 3. start the RX dma transfer */
++ if (drv_context->tx_dma) {
++ txdesc = txchan->device->device_prep_dma_memcpy
++ (txchan, /* DMA Channel */
++ ssdr_addr, /* DAR */
++ drv_context->tx_dma, /* SAR */
++ drv_context->len, /* Data Length */
++ flag); /* Flag */
++ if (txdesc == NULL) {
++ dev_err(dev, "ERROR : txdesc is null\n");
++ return;
++ }
++
++ txdesc->callback = intel_mid_ssp_spi_dma_done;
++ txdesc->callback_param = &drv_context->tx_param;
++ } else {
++ dev_err(dev, "ERROR : drv_context->tx_dma is null!\n");
++ return;
++ }
++
++ dev_dbg(dev, "Firing DMA channels\r\n");
++ txdesc->tx_submit(txdesc);
++ rxdesc->tx_submit(rxdesc);
++}
++
++/**
++ * map_dma_buffers() - Map DMA buffer before a transfer
++ * @drv_context: Pointer to the private drivzer context
++ */
++static int map_dma_buffers(struct ssp_driver_context *drv_context)
++{
++ struct device *dev = &drv_context->pdev->dev;
++
++ if (drv_context->dma_mapped) {
++ dev_err(dev, "ERROR : DMA buffers already mapped\n");
++ return 0;
++ }
++ if (drv_context->quirks & SRAM_ADDITIONAL_CPY) {
++ /* Copy drv_context->tx into sram_tx */
++ memcpy_toio(drv_context->virt_addr_sram_tx, drv_context->tx,
++ drv_context->len);
++#ifdef DUMP_RX
++ dump_trailer(&drv_context->pdev->dev, drv_context->tx,
++ drv_context->len, 16);
++#endif
++ drv_context->rx_dma = SRAM_RX_ADDR;
++ drv_context->tx_dma = SRAM_TX_ADDR;
++ } else {
++ /* no SRAM_ADDITIONAL_CPY */
++ if (drv_context->dma_mapped)
++ return 1;
++
++ drv_context->tx_dma =
++ dma_map_single(dev, drv_context->tx, drv_context->len,
++ PCI_DMA_TODEVICE);
++ if (dma_mapping_error(dev, drv_context->tx_dma)) {
++ dev_err(dev, "ERROR : tx dma mapping failed\n");
++ return 0;
++ }
++
++ drv_context->rx_dma =
++ dma_map_single(dev, drv_context->rx, drv_context->len,
++ PCI_DMA_FROMDEVICE);
++ if (dma_mapping_error(dev, drv_context->rx_dma)) {
++ dma_unmap_single(dev, drv_context->tx_dma,
++ drv_context->len, DMA_TO_DEVICE);
++ dev_err(dev, "ERROR : rx dma mapping failed\n");
++ return 0;
++ }
++ }
++ return 1;
++}
++
++/**
++ * drain_trail() - Handle trailing bytes of a transfer
++ * @drv_context: Pointer to the private driver context
++ *
++ * This function handles the trailing bytes of a transfer for the case
++ * they are not handled by the DMA.
++ */
++void drain_trail(struct ssp_driver_context *drv_context)
++{
++ int i;
++ u32 count = 0;
++ u32 count2 = 0;
++ u16 *p_u16ddr;
++ void *reg = drv_context->ioaddr;
++ struct device *dev = &drv_context->pdev->dev;
++
++ if ((drv_context->len) > SPI_FIFO_SIZE) {
++ p_u16ddr = (drv_context->rx + TRUNCATE(drv_context->len,
++ DFLT_RX_BYTE_THRESHOLD));
++
++ while ((p_u16ddr <
++ (u16 *)((char *)drv_context->rx + drv_context->len))
++ && (count < MAX_TRAILING_BYTE_LOOP)) {
++ i = 0;
++ count++;
++ while (is_rx_fifo_empty(drv_context)
++ && (count2 < MAX_TRAILING_BYTE_RETRY)) {
++ i++;
++ count2++;
++ /* should be enough to get a word */
++ udelay(DELAY_TO_GET_A_WORD);
++ }
++ WARN(i, "Waited for %d x %dus\r\n", i,
++ DELAY_TO_GET_A_WORD);
++
++ *p_u16ddr++ = read_SSDR(reg);
++ }
++ if (count >= MAX_TRAILING_BYTE_LOOP)
++ dev_err(dev, "ERROR in %s : infinite \
++ loop avoided on trailing byte loop!\n",
++ __func__);
++ if (count >= MAX_TRAILING_BYTE_RETRY)
++ dev_err(dev, "ERROR in %s : infinite \
++ loop avoided on maximum read retry!\n",
++ __func__);
++ }
++
++}
++
++/**
++ * sram_to_ddr_cpy() - Copy data from Langwell SDRAM to DDR
++ * @drv_context: Pointer to the private driver context
++ */
++static void sram_to_ddr_cpy(struct ssp_driver_context *drv_context)
++{
++ u32 length = drv_context->len;
++
++ if ((drv_context->quirks & DMA_USE_NO_TRAIL)
++ && (drv_context->len > SPI_FIFO_SIZE))
++ length = TRUNCATE(drv_context->len, DFLT_RX_BYTE_THRESHOLD);
++
++ memcpy_fromio(drv_context->rx, drv_context->virt_addr_sram_rx, length);
++}
++
++static void int_transfer_complete(struct ssp_driver_context *drv_context)
++{
++ void *reg = drv_context->ioaddr;
++ struct spi_message *msg;
++
++ pm_qos_update_request(drv_context->pm_qos_req, PM_QOS_DEFAULT_VALUE);
++
++ if (drv_context->quirks & SRAM_ADDITIONAL_CPY)
++ sram_to_ddr_cpy(drv_context);
++
++ if (drv_context->quirks & DMA_USE_NO_TRAIL)
++ drain_trail(drv_context);
++ else
++ /* Stop getting Time Outs */
++ write_SSTO(0, reg);
++
++ drv_context->cur_msg->status = 0;
++
++#ifdef DUMP_RX
++ dump_trailer(dev, drv_context->rx, drv_context->len, 16);
++#endif
++
++ msg = drv_context->cur_msg;
++ if (msg->complete)
++ msg->complete(msg->context);
++}
++
++static void int_transfer_complete_work(struct work_struct *work)
++{
++ struct ssp_driver_context *drv_context = container_of(work,
++ struct ssp_driver_context, complete_work);
++
++ int_transfer_complete(drv_context);
++}
++
++static void poll_transfer_complete(struct ssp_driver_context *drv_context)
++{
++ struct spi_message *msg;
++
++ /* Update total byte transfered return count actual bytes read */
++ drv_context->cur_msg->actual_length +=
++ drv_context->len - (drv_context->rx_end - drv_context->rx);
++
++ drv_context->cur_msg->status = 0;
++
++ msg = drv_context->cur_msg;
++ if (msg->complete)
++ msg->complete(msg->context);
++}
++
++/**
++ * ssp_int() - Interrupt handler
++ * @irq
++ * @dev_id
++ *
++ * The SSP interrupt is not used for transfer which are handled by
++ * DMA or polling: only under/over run are catched to detect
++ * broken transfers.
++ */
++static irqreturn_t ssp_int(int irq, void *dev_id)
++{
++ struct ssp_driver_context *drv_context = dev_id;
++ void *reg = drv_context->ioaddr;
++ struct device *dev = &drv_context->pdev->dev;
++ u32 status = read_SSSR(reg);
++
++ /* just return if this is not our interrupt */
++ if (!(status & drv_context->mask_sr))
++ return IRQ_NONE;
++
++ if (status & SSSR_ROR || status & SSSR_TUR) {
++ dev_err(dev, "--- SPI ROR or TUR occurred : SSSR=%x\n", status);
++ WARN_ON(1);
++ if (status & SSSR_ROR)
++ dev_err(dev, "we have Overrun\n");
++ if (status & SSSR_TUR)
++ dev_err(dev, "we have Underrun\n");
++ }
++
++ /* We can fall here when not using DMA mode */
++ if (!drv_context->cur_msg) {
++ disable_interface(drv_context);
++ disable_triggers(drv_context);
++ }
++ /* clear status register */
++ write_SSSR(drv_context->clear_sr, reg);
++ return IRQ_HANDLED;
++}
++
++static void poll_transfer(unsigned long data)
++{
++ struct ssp_driver_context *drv_context = (struct ssp_driver_context *)data;
++
++ if (drv_context->tx)
++ while (drv_context->tx != drv_context->tx_end) {
++ drv_context->write(drv_context);
++ drv_context->read(drv_context);
++ }
++
++ while (!drv_context->read(drv_context))
++ ;
++
++ poll_transfer_complete(drv_context);
++}
++
++/**
++ * start_bitbanging() - Clock synchronization by bit banging
++ * @drv_context: Pointer to private driver context
++ *
++ * This clock synchronization will be removed as soon as it is
++ * handled by the SCU.
++ */
++static void start_bitbanging(struct ssp_driver_context *drv_context)
++{
++ u32 sssr;
++ u32 count = 0;
++ u32 cr0;
++ void *i2c_reg = drv_context->I2C_ioaddr;
++ struct device *dev = &drv_context->pdev->dev;
++ void *reg = drv_context->ioaddr;
++ struct chip_data *chip = spi_get_ctldata(drv_context->cur_msg->spi);
++ cr0 = chip->cr0;
++
++ dev_warn(dev, "In %s : Starting bit banging\n",\
++ __func__);
++ if (read_SSSR(reg) & SSP_NOT_SYNC)
++ dev_warn(dev, "SSP clock desynchronized.\n");
++ if (!(read_SSCR0(reg) & SSCR0_SSE))
++ dev_warn(dev, "in SSCR0, SSP disabled.\n");
++
++ dev_dbg(dev, "SSP not ready, start CLK sync\n");
++
++ write_SSCR0(cr0 & ~SSCR0_SSE, reg);
++ write_SSPSP(0x02010007, reg);
++
++ write_SSTO(chip->timeout, reg);
++ write_SSCR0(cr0, reg);
++
++ /*
++ * This routine uses the DFx block to override the SSP inputs
++ * and outputs allowing us to bit bang SSPSCLK. On Langwell,
++ * we have to generate the clock to clear busy.
++ */
++ write_I2CDATA(0x3, i2c_reg);
++ udelay(I2C_ACCESS_USDELAY);
++ write_I2CCTRL(0x01070034, i2c_reg);
++ udelay(I2C_ACCESS_USDELAY);
++ write_I2CDATA(0x00000099, i2c_reg);
++ udelay(I2C_ACCESS_USDELAY);
++ write_I2CCTRL(0x01070038, i2c_reg);
++ udelay(I2C_ACCESS_USDELAY);
++ sssr = read_SSSR(reg);
++
++ /* Bit bang the clock until CSS clears */
++ while ((sssr & 0x400000) && (count < MAX_BITBANGING_LOOP)) {
++ write_I2CDATA(0x2, i2c_reg);
++ udelay(I2C_ACCESS_USDELAY);
++ write_I2CCTRL(0x01070034, i2c_reg);
++ udelay(I2C_ACCESS_USDELAY);
++ write_I2CDATA(0x3, i2c_reg);
++ udelay(I2C_ACCESS_USDELAY);
++ write_I2CCTRL(0x01070034, i2c_reg);
++ udelay(I2C_ACCESS_USDELAY);
++ sssr = read_SSSR(reg);
++ count++;
++ }
++ if (count >= MAX_BITBANGING_LOOP)
++ dev_err(dev, "ERROR in %s : infinite loop \
++ on bit banging. Aborting\n", __func__);
++
++ dev_dbg(dev, "---Bit bang count=%d\n", count);
++
++ write_I2CDATA(0x0, i2c_reg);
++ udelay(I2C_ACCESS_USDELAY);
++ write_I2CCTRL(0x01070038, i2c_reg);
++}
++
++/**
++ * transfer() - Start a SPI transfer
++ * @spi: Pointer to the spi_device struct
++ * @msg: Pointer to the spi_message struct
++ */
++static int transfer(struct spi_device *spi, struct spi_message *msg)
++{
++ struct ssp_driver_context *drv_context = \
++ spi_master_get_devdata(spi->master);
++ struct chip_data *chip = NULL;
++ struct spi_transfer *transfer = NULL;
++ void *reg = drv_context->ioaddr;
++
++ u32 cr1;
++ struct device *dev = &drv_context->pdev->dev;
++ chip = spi_get_ctldata(msg->spi);
++
++ msg->actual_length = 0;
++ msg->status = -EINPROGRESS;
++ drv_context->cur_msg = msg;
++
++ /* We handle only one transfer message since the protocol module has to
++ control the out of band signaling. */
++ transfer = list_entry(msg->transfers.next,
++ struct spi_transfer,
++ transfer_list);
++
++ /* Check transfer length */
++ if (transfer->len > MAX_SPI_TRANSFER_SIZE) {
++ dev_warn(dev, "SPI-SLAVE: transfer length greater than %d\n",
++ MAX_SPI_TRANSFER_SIZE);
++ dev_warn(dev, "SPI-SLAVE: length = %d\n", transfer->len);
++ msg->status = -EINVAL;
++
++ if (msg->complete)
++ msg->complete(msg->context);
++
++ return 0;
++ }
++
++ /* Flush any remaining data (in case of failed previous transfer) */
++ flush(drv_context);
++
++ drv_context->tx = (void *)transfer->tx_buf;
++ drv_context->rx = (void *)transfer->rx_buf;
++ drv_context->len = transfer->len;
++
++ if (chip->dma_enabled) {
++ drv_context->dma_mapped = map_dma_buffers(drv_context);
++ if (!drv_context->dma_mapped)
++ return 0;
++ } else {
++ drv_context->write = drv_context->tx ?
++ chip->write : null_writer;
++ drv_context->read = drv_context->rx ?
++ chip->read : null_reader;
++ drv_context->tx_end = drv_context->tx + transfer->len;
++ drv_context->rx_end = drv_context->rx + transfer->len;
++ }
++
++ /* Clear status */
++ write_SSSR(drv_context->clear_sr, reg);
++
++ /* setup the CR1 control register */
++ cr1 = chip->cr1 | drv_context->cr1_sig;
++
++ if (drv_context->quirks & DMA_USE_NO_TRAIL) {
++ /* in case of len smaller than FIFO size, adjust the RX */
++ /* threshold. All other cases will use the default threshold */
++ /* value. */
++ if (drv_context->len <= SPI_FIFO_SIZE) {
++ cr1 &= ~(SSCR1_RFT);
++ cr1 |= (SSCR1_RxTresh((drv_context->len)
++ / drv_context->n_bytes)
++ & SSCR1_RFT);
++ } else {
++ write_SSTO(chip->timeout, reg);
++ }
++ }
++
++ dev_dbg(dev, "Writing 0x%08x SSCR1 register\n", cr1);
++ write_SSCR1(cr1, reg);
++
++ /* Do bitbanging only if SSP not-enabled or not-synchronized */
++ if ((read_SSSR(reg) & SSP_NOT_SYNC) ||
++ (!(read_SSCR0(reg) & SSCR0_SSE))) {
++ start_bitbanging(drv_context);
++ }
++
++ if (chip->dma_enabled) {
++ pm_qos_update_request(drv_context->pm_qos_req,
++ MIN_EXIT_LATENCY);
++ dma_transfer(drv_context);
++ } else {
++ tasklet_schedule(&drv_context->poll_transfer);
++ }
++
++ return 0;
++}
++
++/**
++ * setup() - Driver setup procedure
++ * @spi: Pointeur to the spi_device struct
++ */
++static int setup(struct spi_device *spi)
++{
++ struct intel_mid_ssp_spi_chip *chip_info = NULL;
++ struct chip_data *chip;
++ struct ssp_driver_context *drv_context =
++ spi_master_get_devdata(spi->master);
++ u32 threshold = 0;
++
++ if (!spi->bits_per_word)
++ spi->bits_per_word = DFLT_BITS_PER_WORD;
++
++ if ((spi->bits_per_word < MIN_BITS_PER_WORD
++ || spi->bits_per_word > MAX_BITS_PER_WORD))
++ return -EINVAL;
++
++ chip = spi_get_ctldata(spi);
++ if (!chip) {
++ chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
++ if (!chip) {
++ dev_err(&spi->dev,
++ "failed setup: can't allocate chip data\n");
++ return -ENOMEM;
++ }
++ }
++ chip->cr0 = SSCR0_Motorola | SSCR0_DataSize(spi->bits_per_word > 16 ?
++ spi->bits_per_word - 16 : spi->bits_per_word)
++ | SSCR0_SSE
++ | (spi->bits_per_word > 16 ? SSCR0_EDSS : 0);
++
++ /* protocol drivers may change the chip settings, so... */
++ /* if chip_info exists, use it */
++ chip_info = spi->controller_data;
++
++ /* chip_info isn't always needed */
++ chip->cr1 = 0;
++ if (chip_info) {
++ threshold = (SSCR1_RxTresh(chip_info->rx_threshold) &
++ SSCR1_RFT) | (SSCR1_TxTresh(chip_info->tx_threshold) &
++ SSCR1_TFT);
++
++ chip->timeout = chip_info->timeout;
++
++ if (chip_info->enable_loopback)
++ chip->cr1 |= SSCR1_LBM;
++
++ chip->dma_enabled = chip_info->dma_enabled;
++
++ } else {
++ /* if no chip_info provided by protocol driver, */
++ /* set default values */
++ dev_info(&spi->dev, "setting default chip values\n");
++ threshold = (SSCR1_RxTresh(DFLT_RX_THRESHOLD) & SSCR1_RFT)
++ | (SSCR1_TxTresh(DFLT_TX_THRESHOLD) & SSCR1_TFT);
++ chip->dma_enabled = 1;
++ if (drv_context->quirks & DMA_USE_NO_TRAIL)
++ chip->timeout = 0;
++ else
++ chip->timeout = DFLT_TIMEOUT_VAL;
++ }
++ chip->cr1 |= threshold;
++
++ drv_context->dma_mapped = 0;
++
++ /* setting phase and polarity. spi->mode comes from boardinfo */
++ if ((spi->mode & SPI_CPHA) != 0)
++ chip->cr1 |= SSCR1_SPH;
++ if ((spi->mode & SPI_CPOL) != 0)
++ chip->cr1 |= SSCR1_SPO;
++
++ /* set slave mode */
++ chip->cr1 |= SSCR1_SCLKDIR | SSCR1_SFRMDIR;
++ chip->cr1 |= SSCR1_SCFR; /* slave clock is not free running */
++
++ dev_dbg(&spi->dev, "%d bits/word, mode %d\n",
++ spi->bits_per_word,
++ spi->mode & 0x3);
++ if (spi->bits_per_word <= 8) {
++ chip->n_bytes = 1;
++ chip->read = u8_reader;
++ chip->write = u8_writer;
++ } else if (spi->bits_per_word <= 16) {
++ chip->n_bytes = 2;
++ chip->read = u16_reader;
++ chip->write = u16_writer;
++ } else if (spi->bits_per_word <= 32) {
++ chip->cr0 |= SSCR0_EDSS;
++ chip->n_bytes = 4;
++ chip->read = u32_reader;
++ chip->write = u32_writer;
++ } else {
++ dev_err(&spi->dev, "invalid wordsize\n");
++ return -EINVAL;
++ }
++
++ spi_set_ctldata(spi, chip);
++
++ /* setup of drv_context members that will not change across transfers */
++ drv_context->n_bytes = chip->n_bytes;
++
++ if (chip->dma_enabled) {
++ intel_mid_ssp_spi_dma_init(drv_context);
++ drv_context->cr1_sig = SSCR1_TSRE | SSCR1_RSRE;
++ drv_context->mask_sr = SSSR_ROR | SSSR_TUR;
++ if (drv_context->quirks & DMA_USE_NO_TRAIL)
++ drv_context->cr1_sig |= SSCR1_TRAIL;
++ } else {
++ drv_context->cr1_sig = SSCR1_RIE | SSCR1_TIE | SSCR1_TINTE;
++ drv_context->mask_sr = SSSR_RFS | SSSR_TFS |
++ SSSR_ROR | SSSR_TUR | SSSR_TINT;
++ }
++ drv_context->clear_sr = SSSR_TUR | SSSR_ROR | SSSR_TINT;
++
++ /* We should enable the interface now, but doing it now will make the */
++ /* driver fail. Disabling the interface will force enabling and clock */
++ /* synchronization by bit banging at the first transfer. */
++ disable_interface(drv_context);
++
++ return 0;
++}
++
++/**
++ * cleanup() - Driver cleanup procedure
++ * @spi: Pointer to the spi_device struct
++ */
++static void cleanup(struct spi_device *spi)
++{
++ struct chip_data *chip = spi_get_ctldata(spi);
++ struct ssp_driver_context *drv_context =
++ spi_master_get_devdata(spi->master);
++
++ if (drv_context->dma_initialized) {
++ intel_mid_ssp_spi_dma_exit(drv_context);
++ pci_dev_put(drv_context->dmac1);
++ }
++
++ /* Remove the PM_QOS request */
++ pm_qos_remove_request(drv_context->pm_qos_req);
++
++ kfree(chip);
++}
++
++/**
++ * intel_mid_ssp_spi_probe() - Driver probe procedure
++ * @pdev: Pointer to the pci_dev struct
++ * @ent: Pointer to the pci_device_id struct
++ */
++static int intel_mid_ssp_spi_probe(struct pci_dev *pdev,
++ const struct pci_device_id *ent)
++{
++ struct device *dev = &pdev->dev;
++ struct spi_master *slave;
++ struct ssp_driver_context *drv_context = 0;
++ int status;
++ u32 iolen = 0;
++
++ dev_info(dev, "SPI-Slave: found PCI SSP controller(ID: %04x:%04x)\n",
++ pdev->vendor, pdev->device);
++
++ status = pci_enable_device(pdev);
++ if (status)
++ return status;
++
++ /* Allocate Slave with space for drv_context and null dma buffer */
++ slave = spi_alloc_master(dev, sizeof(struct ssp_driver_context));
++
++ if (!slave) {
++ dev_err(dev, "cannot alloc spi_slave\n");
++ status = -ENOMEM;
++ goto err_free_slave0;
++ }
++
++ drv_context = spi_master_get_devdata(slave);
++ drv_context->slave = slave;
++
++ drv_context->pdev = pdev;
++ drv_context->quirks = ent->driver_data;
++ slave->bus_num = 3;
++ slave->num_chipselect = 1;
++
++ slave->cleanup = cleanup;
++ slave->setup = setup;
++ slave->transfer = transfer;
++ drv_context->dma_wq = create_workqueue("intel_mid_ssp_spi");
++ INIT_WORK(&drv_context->complete_work, int_transfer_complete_work);
++
++ drv_context->dma_initialized = 0;
++
++ /* get basic io resource and map it */
++ drv_context->paddr = pci_resource_start(pdev, 0);
++ iolen = pci_resource_len(pdev, 0);
++
++ status = pci_request_region(pdev, 0, dev_name(&pdev->dev));
++ if (status)
++ goto err_free_slave1;
++
++ drv_context->ioaddr =
++ ioremap_nocache(drv_context->paddr, iolen);
++ if (!drv_context->ioaddr) {
++ status = -ENOMEM;
++ goto err_free_slave2;
++ }
++
++ dev_info(dev, "SPI-Slave: ioaddr = : %08x\n", (int)drv_context->ioaddr);
++ dev_info(dev, "SPI-Slave: attaching to IRQ: %04x\n", pdev->irq);
++
++ /* get base address of I2C_Serbus registers */
++ drv_context->I2C_paddr = 0xff12b000;
++ drv_context->I2C_ioaddr =
++ ioremap_nocache(drv_context->I2C_paddr, 0x10);
++ if (!drv_context->I2C_ioaddr) {
++ status = -ENOMEM;
++ goto err_free_slave3;
++ }
++
++ /* Attach to IRQ */
++ drv_context->irq = pdev->irq;
++ status = request_irq(drv_context->irq, ssp_int, IRQF_SHARED,
++ "intel_mid_ssp_spi", drv_context);
++ if (status < 0) {
++ dev_err(&pdev->dev, "can not get IRQ\n");
++ goto err_free_slave4;
++ }
++
++ tasklet_init(&drv_context->poll_transfer, poll_transfer,
++ (unsigned long)drv_context);
++
++ /* Register with the SPI framework */
++ dev_info(dev, "SPI-Slave: register with SPI framework\n");
++
++ status = spi_register_master(slave);
++
++ if (status != 0) {
++ dev_err(dev, "problem registering spi slave\n");
++ goto err_free_slave5;
++ }
++
++ pci_set_drvdata(pdev, drv_context);
++
++ /* Create the PM_QOS request */
++ drv_context->pm_qos_req = pm_qos_add_request(PM_QOS_CPU_DMA_LATENCY,
++ PM_QOS_DEFAULT_VALUE);
++
++ return status;
++
++err_free_slave5:
++ free_irq(drv_context->irq, drv_context);
++err_free_slave4:
++ iounmap(drv_context->I2C_ioaddr);
++err_free_slave3:
++ iounmap(drv_context->ioaddr);
++err_free_slave2:
++ pci_release_region(pdev, 0);
++err_free_slave1:
++ spi_master_put(slave);
++err_free_slave0:
++ pci_disable_device(pdev);
++
++ return status;
++}
++
++/**
++ * intel_mid_ssp_spi_remove() - driver remove procedure
++ * @pdev: Pointer to the pci_dev struct
++ */
++static void __devexit intel_mid_ssp_spi_remove(struct pci_dev *pdev)
++{
++ struct ssp_driver_context *drv_context = pci_get_drvdata(pdev);
++
++ if (!drv_context)
++ return;
++
++ /* Release IRQ */
++ free_irq(drv_context->irq, drv_context);
++
++ iounmap(drv_context->ioaddr);
++ iounmap(drv_context->I2C_ioaddr);
++
++ /* disconnect from the SPI framework */
++ spi_unregister_master(drv_context->slave);
++
++ pci_set_drvdata(pdev, NULL);
++ pci_release_region(pdev, 0);
++ pci_disable_device(pdev);
++
++ return;
++}
++
++#ifdef CONFIG_PM
++/**
++ * intel_mid_ssp_spi_suspend() - Driver suspend procedure
++ * @pdev: Pointer to the pci_dev struct
++ * @state: pm_message_t
++ */
++static int intel_mid_ssp_spi_suspend(struct pci_dev *pdev, pm_message_t state)
++{
++ struct ssp_driver_context *drv_context = pci_get_drvdata(pdev);
++ dev_warn(&pdev->dev, "spi-slave: suspend\n");
++
++ tasklet_disable(&drv_context->poll_transfer);
++
++ return 0;
++}
++
++/**
++ * intel_mid_ssp_spi_resume() - Driver resume procedure
++ * @pdev: Pointer to the pci_dev struct
++ */
++static int intel_mid_ssp_spi_resume(struct pci_dev *pdev)
++{
++ struct ssp_driver_context *drv_context = pci_get_drvdata(pdev);
++ dev_warn(&pdev->dev, "spi-slave: resume\n");
++
++ tasklet_enable(&drv_context->poll_transfer);
++
++ return 0;
++}
++#else
++#define intel_mid_ssp_spi_suspend NULL
++#define intel_mid_ssp_spi_resume NULL
++#endif /* CONFIG_PM */
++
++
++static const struct pci_device_id pci_ids[] __devinitdata = {
++ { PCI_VDEVICE(INTEL, 0x0815), SRAM_ADDITIONAL_CPY | DMA_USE_NO_TRAIL},
++ {},
++};
++
++static struct pci_driver intel_mid_ssp_spi_driver = {
++ .name = DRIVER_NAME,
++ .id_table = pci_ids,
++ .probe = intel_mid_ssp_spi_probe,
++ .remove = __devexit_p(intel_mid_ssp_spi_remove),
++ .suspend = intel_mid_ssp_spi_suspend,
++ .resume = intel_mid_ssp_spi_resume,
++};
++
++static int __init intel_mid_ssp_spi_init(void)
++{
++ return pci_register_driver(&intel_mid_ssp_spi_driver);
++}
++
++late_initcall(intel_mid_ssp_spi_init);
++
++static void __exit intel_mid_ssp_spi_exit(void)
++{
++ pci_unregister_driver(&intel_mid_ssp_spi_driver);
++}
++
++module_exit(intel_mid_ssp_spi_exit);
++
+--- /dev/null
++++ b/drivers/spi/intel_mid_ssp_spi_def.h
+@@ -0,0 +1,247 @@
++/*
++ * intel_mid_ssp_spi_def.h - SPI slave registers and definitions
++ * for moorestown target
++ *
++ * Copyright (C) Intel 2009
++ * Ken Mills <ken.k.mills@intel.com>
++ * Sylvain Centelles <sylvain.centelles@intel.com>
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ */
++
++#ifndef _INTEL_MID_SSP_SPI_DEF_H
++#define _INTEL_MID_SSP_SPI_DEF_H
++
++#define PCI_DMAC1_ID 0x0814
++
++#define SSP_NOT_SYNC 0x400000
++#define MAX_SPI_TRANSFER_SIZE 8192
++#define MAX_BITBANGING_LOOP 10000
++#define SPI_FIFO_SIZE 16
++
++/* PM QoS define */
++#define MIN_EXIT_LATENCY 20
++
++/* Uncomment to get RX and TX short dumps after each transfer */
++/* #define DUMP_RX 1 */
++#define MAX_TRAILING_BYTE_RETRY 16
++#define MAX_TRAILING_BYTE_LOOP 100
++#define DELAY_TO_GET_A_WORD 3
++#define DFLT_TIMEOUT_VAL 500
++
++#define DEFINE_SSP_REG(reg, off) \
++static inline u32 read_##reg(void *p) { return __raw_readl(p + (off)); } \
++static inline void write_##reg(u32 v, void *p) { __raw_writel(v, p + (off)); }
++
++#define RX_DIRECTION 0
++#define TX_DIRECTION 1
++
++#define I2C_ACCESS_USDELAY 10
++
++#define DFLT_BITS_PER_WORD 16
++#define MIN_BITS_PER_WORD 4
++#define MAX_BITS_PER_WORD 32
++#define DFLT_RX_THRESHOLD 8
++#define DFLT_RX_BYTE_THRESHOLD (DFLT_RX_THRESHOLD * DFLT_BITS_PER_WORD / 8)
++#define DFLT_TX_THRESHOLD 8
++
++#define TRUNCATE(x, a) ((x) & ~(a-1))
++
++DEFINE_SSP_REG(SSCR0, 0x00)
++DEFINE_SSP_REG(SSCR1, 0x04)
++DEFINE_SSP_REG(SSSR, 0x08)
++DEFINE_SSP_REG(SSITR, 0x0c)
++DEFINE_SSP_REG(SSDR, 0x10)
++DEFINE_SSP_REG(SSTO, 0x28)
++DEFINE_SSP_REG(SSPSP, 0x2c)
++
++DEFINE_SSP_REG(I2CCTRL, 0x00);
++DEFINE_SSP_REG(I2CDATA, 0x04);
++
++DEFINE_SSP_REG(GPLR1, 0x04);
++DEFINE_SSP_REG(GPDR1, 0x0c);
++DEFINE_SSP_REG(GPSR1, 0x14);
++DEFINE_SSP_REG(GPCR1, 0x1C);
++DEFINE_SSP_REG(GAFR1_U, 0x44);
++
++
++#define SRAM_BASE_ADDR 0xfffdc000
++#define SRAM_RX_ADDR SRAM_BASE_ADDR
++#define SRAM_TX_ADDR (SRAM_BASE_ADDR + MAX_SPI_TRANSFER_SIZE)
++
++#define SSCR0_DSS (0x0000000f) /* Data Size Select (mask) */
++#define SSCR0_DataSize(x) ((x) - 1) /* Data Size Select [4..16] */
++#define SSCR0_FRF (0x00000030) /* FRame Format (mask) */
++#define SSCR0_Motorola (0x0 << 4) /* Motorola's SPI mode */
++#define SSCR0_ECS (1 << 6) /* External clock select */
++#define SSCR0_SSE (1 << 7) /* Synchronous Serial Port Enable */
++
++#define SSCR0_SCR (0x000fff00) /* Serial Clock Rate (mask) */
++#define SSCR0_SerClkDiv(x) (((x) - 1) << 8) /* Divisor [1..4096] */
++#define SSCR0_EDSS (1 << 20) /* Extended data size select */
++#define SSCR0_NCS (1 << 21) /* Network clock select */
++#define SSCR0_RIM (1 << 22) /* Receive FIFO overrrun int mask */
++#define SSCR0_TUM (1 << 23) /* Transmit FIFO underrun int mask */
++#define SSCR0_FRDC (0x07000000) /* Frame rate divider control (mask) */
++#define SSCR0_SlotsPerFrm(x) (((x) - 1) << 24) /* Time slots per frame */
++#define SSCR0_ADC (1 << 30) /* Audio clock select */
++#define SSCR0_MOD (1 << 31) /* Mode (normal or network) */
++
++#define SSCR1_RIE (1 << 0) /* Receive FIFO Interrupt Enable */
++#define SSCR1_TIE (1 << 1) /* Transmit FIFO Interrupt Enable */
++#define SSCR1_LBM (1 << 2) /* Loop-Back Mode */
++#define SSCR1_SPO (1 << 3) /* SSPSCLK polarity setting */
++#define SSCR1_SPH (1 << 4) /* Motorola SPI SSPSCLK phase setting */
++#define SSCR1_MWDS (1 << 5) /* Microwire Transmit Data Size */
++#define SSCR1_TFT (0x000003c0) /* Transmit FIFO Threshold (mask) */
++#define SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..16] */
++#define SSCR1_RFT (0x00003c00) /* Receive FIFO Threshold (mask) */
++#define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..16] */
++
++#define SSSR_TNF (1 << 2) /* Tx FIFO Not Full */
++#define SSSR_RNE (1 << 3) /* Rx FIFO Not Empty */
++#define SSSR_BSY (1 << 4) /* SSP Busy */
++#define SSSR_TFS (1 << 5) /* Tx FIFO Service Request */
++#define SSSR_RFS (1 << 6) /* Rx FIFO Service Request */
++#define SSSR_ROR (1 << 7) /* Rx FIFO Overrun */
++#define SSSR_TFL_MASK (0x0F << 8) /* Tx FIFO level field mask */
++
++#define SSCR0_TIM (1 << 23) /* Transmit FIFO Under Run Int Mask */
++#define SSCR0_RIM (1 << 22) /* Receive FIFO Over Run int Mask */
++#define SSCR0_NCS (1 << 21) /* Network Clock Select */
++#define SSCR0_EDSS (1 << 20) /* Extended Data Size Select */
++
++#define SSCR0_TISSP (1 << 4) /* TI Sync Serial Protocol */
++#define SSCR0_PSP (3 << 4) /* PSP - Programmable Serial Protocol */
++#define SSCR1_TTELP (1 << 31) /* TXD Tristate Enable Last Phase */
++#define SSCR1_TTE (1 << 30) /* TXD Tristate Enable */
++#define SSCR1_EBCEI (1 << 29) /* Enable Bit Count Error interrupt */
++#define SSCR1_SCFR (1 << 28) /* Slave Clock free Running */
++#define SSCR1_ECRA (1 << 27) /* Enable Clock Request A */
++#define SSCR1_ECRB (1 << 26) /* Enable Clock request B */
++#define SSCR1_SCLKDIR (1 << 25) /* Serial Bit Rate Clock Direction */
++#define SSCR1_SFRMDIR (1 << 24) /* Frame Direction */
++#define SSCR1_RWOT (1 << 23) /* Receive Without Transmit */
++#define SSCR1_TRAIL (1 << 22) /* Trailing Byte */
++#define SSCR1_TSRE (1 << 21) /* Transmit Service Request Enable */
++#define SSCR1_RSRE (1 << 20) /* Receive Service Request Enable */
++#define SSCR1_TINTE (1 << 19) /* Receiver Time-out Interrupt enable */
++#define SSCR1_PINTE (1 << 18) /* Trailing Byte Interupt Enable */
++#define SSCR1_STRF (1 << 15) /* Select FIFO or EFWR */
++#define SSCR1_EFWR (1 << 14) /* Enable FIFO Write/Read */
++
++#define SSSR_BCE (1 << 23) /* Bit Count Error */
++#define SSSR_CSS (1 << 22) /* Clock Synchronisation Status */
++#define SSSR_TUR (1 << 21) /* Transmit FIFO Under Run */
++#define SSSR_EOC (1 << 20) /* End Of Chain */
++#define SSSR_TINT (1 << 19) /* Receiver Time-out Interrupt */
++#define SSSR_PINT (1 << 18) /* Peripheral Trailing Byte Interrupt */
++
++#define SSPSP_FSRT (1 << 25) /* Frame Sync Relative Timing */
++#define SSPSP_DMYSTOP(x) ((x) << 23) /* Dummy Stop */
++#define SSPSP_SFRMWDTH(x)((x) << 16) /* Serial Frame Width */
++#define SSPSP_SFRMDLY(x) ((x) << 9) /* Serial Frame Delay */
++#define SSPSP_DMYSTRT(x) ((x) << 7) /* Dummy Start */
++#define SSPSP_STRTDLY(x) ((x) << 4) /* Start Delay */
++#define SSPSP_ETDS (1 << 3) /* End of Transfer data State */
++#define SSPSP_SFRMP (1 << 2) /* Serial Frame Polarity */
++#define SSPSP_SCMODE(x) ((x) << 0) /* Serial Bit Rate Clock Mode */
++
++struct callback_param {
++ void *drv_context;
++ u32 direction;
++};
++
++struct ssp_driver_context {
++ /* Driver model hookup */
++ struct pci_dev *pdev;
++
++ /* SPI framework hookup */
++ struct spi_master *slave;
++
++ /* SSP register addresses */
++ dma_addr_t paddr;
++ void *ioaddr;
++ int irq;
++
++ /* I2C registers */
++ dma_addr_t I2C_paddr;
++ void *I2C_ioaddr;
++
++ /* SSP masks*/
++ u32 cr1_sig;
++ u32 cr1;
++ u32 clear_sr;
++ u32 mask_sr;
++
++ /* PM_QOS request */
++ struct pm_qos_request_list *pm_qos_req;
++
++ struct tasklet_struct poll_transfer;
++
++ spinlock_t lock;
++
++ /* Current message transfer state info */
++ struct spi_message *cur_msg;
++ size_t len;
++ void *tx;
++ void *tx_end;
++ void *rx;
++ void *rx_end;
++ bool dma_initialized;
++ int dma_mapped;
++ dma_addr_t rx_dma;
++ dma_addr_t tx_dma;
++ u8 n_bytes;
++ int (*write)(struct ssp_driver_context *drv_context);
++ int (*read)(struct ssp_driver_context *drv_context);
++
++ struct intel_mid_dma_slave dmas_tx;
++ struct intel_mid_dma_slave dmas_rx;
++ struct dma_chan *txchan;
++ struct dma_chan *rxchan;
++ struct workqueue_struct *dma_wq;
++ struct work_struct complete_work;
++
++ u8 __iomem *virt_addr_sram_tx;
++ u8 __iomem *virt_addr_sram_rx;
++
++ int txdma_done;
++ int rxdma_done;
++ struct callback_param tx_param;
++ struct callback_param rx_param;
++ struct pci_dev *dmac1;
++
++ unsigned long quirks;
++};
++
++struct chip_data {
++ u32 cr0;
++ u32 cr1;
++ u32 timeout;
++ u8 n_bytes;
++ u8 dma_enabled;
++ int (*write)(struct ssp_driver_context *drv_context);
++ int (*read)(struct ssp_driver_context *drv_context);
++};
++
++
++#endif
++
+--- /dev/null
++++ b/drivers/spi/pw_spi3.c
+@@ -0,0 +1,1162 @@
++/*
++ * pw_spi3.c - Penwell SPI master controller driver
++ * based on pxa2xx.c
++ *
++ * Copyright (C) Intel 2010
++ * Ken Mills <ken.k.mills@intel.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
++ * USA
++ *
++ */
++
++#include <linux/delay.h>
++#include <linux/highmem.h>
++#include <linux/pci.h>
++#include <linux/dma-mapping.h>
++#include <linux/pm_qos_params.h>
++#include <linux/intel_mid_dma.h>
++#include <linux/interrupt.h>
++#include <linux/spi/spi.h>
++#include <linux/spi/pw_spi3.h>
++
++#define DRIVER_NAME "pw_spi3"
++#define PNWL_MAX_DMA_LEN 8192
++/* PM QoS define */
++#define MIN_EXIT_LATENCY 20
++
++MODULE_AUTHOR("Intel");
++MODULE_DESCRIPTION("Penwell SPI3 Master Contoller");
++MODULE_LICENSE("GPL");
++
++#define RX_THRESH_DFLT 8
++#define TX_THRESH_DFLT 8
++#define TIMOUT_DFLT 1000
++
++/*
++ * For testing SSCR1 changes that require SSP restart, basically
++ * everything except the service and interrupt enables
++ */
++
++#define SSCR1_CHANGE_MASK (SSCR1_TTELP | SSCR1_TTE | SSCR1_SCFR \
++ | SSCR1_ECRA | SSCR1_ECRB | SSCR1_SCLKDIR \
++ | SSCR1_SFRMDIR | SSCR1_RWOT | SSCR1_TRAIL \
++ | SSCR1_IFS | SSCR1_STRF | SSCR1_EFWR \
++ | SSCR1_RFT | SSCR1_TFT | SSCR1_MWDS \
++ | SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
++
++#define PNWL_SSPSP (SSPSP_FSRT | SSPSP_SFRMWDTH(1) | SSPSP_SFRMP | \
++ SSPSP_SCMODE(3))
++
++/*
++ * clock divider
++ * 8 bpw
++ * TUR/ROR do not generate interrupt
++ * SPI mode operation
++ * SSP enabled
++ */
++#define PNWL_CR0(clk, bits, spi, chip) \
++ (clk | SSCR0_Motorola | \
++ SSCR0_DataSize(bits > 16 ? bits - 16 : bits) | \
++ SSCR0_SSE | \
++ SSCR0_TIM | \
++ SSCR0_RIM | \
++ (bits > 16 ? SSCR0_EDSS : 0))
++
++#define PNWL_CR1_MASTER_ROLE 0
++#define PNWL_CR1_SLAVE_ROLE (SSCR1_SFRMDIR | SSCR1_SCLKDIR)
++/* MRST SSP must be slave */
++#define PNWL_CR1_ROLE PNWL_CR1_MASTER_ROLE
++#define PNWL_CR1(spi, chip) \
++ ((chip->enable_loopback ? SSCR1_LBM : 0) | \
++ ((spi->mode & SPI_CPHA) ? SSCR1_SPH : 0) | \
++ ((spi->mode & SPI_CPOL) ? SSCR1_SPO : 0) | \
++ SSCR1_SCFR | \
++ chip->threshold | \
++ PNWL_CR1_ROLE)
++
++
++
++struct callback_param {
++ void *drv_data;
++ int *donep;
++};
++struct driver_data {
++ /* Driver model hookup */
++ struct pci_dev *pdev;
++ /* SPI framework hookup */
++ struct spi_master *master;
++
++ /* SSP register addresses */
++ unsigned long paddr;
++ void __iomem *ioaddr;
++ u32 iolen;
++ int irq;
++
++ /* SSP masks*/
++ u32 dma_cr1;
++ u32 int_cr1;
++ u32 clear_sr;
++ u32 mask_sr;
++
++ struct tasklet_struct poll_transfer;
++
++ int busy;
++ int run;
++
++ /* Current message transfer state info */
++ struct spi_message *cur_msg;
++ size_t len;
++ void *tx;
++ void *tx_end;
++ void *rx;
++ void *rx_end;
++ int dma_mapped;
++ dma_addr_t rx_dma;
++ dma_addr_t tx_dma;
++ size_t rx_map_len;
++ size_t tx_map_len;
++ u8 n_bytes;
++ int (*write)(struct driver_data *drv_data);
++ int (*read)(struct driver_data *drv_data);
++ irqreturn_t (*transfer_handler)(struct driver_data *drv_data);
++ void (*cs_control)(u32 command);
++ int dma_inited;
++
++ /* used by DMA code */
++ struct pci_dev *dmac1;
++ struct intel_mid_dma_slave dmas_tx;
++ struct intel_mid_dma_slave dmas_rx;
++ struct dma_chan *txchan;
++ struct dma_chan *rxchan;
++ int txdma_done;
++ int rxdma_done;
++ struct callback_param tx_param;
++ struct callback_param rx_param;
++};
++
++struct chip_data {
++ u32 cr0;
++ u32 cr1;
++ u32 psp;
++ u32 timeout;
++ u8 n_bytes;
++ u32 threshold;
++ u8 enable_dma;
++ u8 poll_mode; /* 1 means use poll mode */
++ u8 enable_loopback;
++ u8 bits_per_word;
++ u32 speed_hz;
++ int (*write)(struct driver_data *drv_data);
++ int (*read)(struct driver_data *drv_data);
++};
++
++static void flush(struct driver_data *drv_data)
++{
++ void *reg = drv_data->ioaddr;
++ u32 sssr;
++
++ /* If the transmit fifo is not empty, reset the interface. */
++ sssr = ioread32(reg + SSSR);
++ if ((sssr & SSSR_TFL) || (sssr & SSSR_TNF) == 0) {
++ iowrite32(ioread32(reg + SSCR0) & ~SSCR0_SSE, reg + SSCR0);
++ return;
++ }
++
++ /* FIXME?: Timeout reset */
++ while (ioread32(reg + SSSR) & SSSR_RNE)
++ ioread32(reg + SSDR);
++
++ iowrite32(SSSR_ROR, reg + SSSR);
++ iowrite32(SSSR_TUR, reg + SSSR);
++}
++
++/*
++ * reader/writer functions
++ *
++ * *_reader functions return:
++ * 0: not complete (data not available)
++ * 1: *all* requested data has been read
++ *
++ * *_writer functions return:
++ * 1: data successfully writen
++ * 0: *all* requested data already written *or* full condition hit
++ * note: this means caller must verify write-complete condition
++ *
++ */
++static int null_writer(struct driver_data *drv_data)
++{
++ void *reg = drv_data->ioaddr;
++ u8 n_bytes = drv_data->n_bytes;
++ if (((ioread32(reg + SSSR) & SSSR_TFL) == SSSR_TFL)
++ || (drv_data->tx == drv_data->tx_end))
++ return 0;
++
++ iowrite32(0, reg + SSDR);
++ drv_data->tx += n_bytes;
++
++ return 1;
++}
++
++static int null_reader(struct driver_data *drv_data)
++{
++ void *reg = drv_data->ioaddr;
++ u8 n_bytes = drv_data->n_bytes;
++
++ while ((ioread32(reg + SSSR) & SSSR_RNE) &&
++ (drv_data->rx < drv_data->rx_end)) {
++
++ ioread32(reg + SSDR);
++ drv_data->rx += n_bytes;
++ }
++
++ return drv_data->rx == drv_data->rx_end;
++}
++
++static int u8_writer(struct driver_data *drv_data)
++{
++ void *reg = drv_data->ioaddr;
++
++ if (((ioread32(reg + SSSR) & SSSR_TFL) == SSSR_TFL)
++ || (drv_data->tx == drv_data->tx_end))
++ return 0;
++
++ iowrite32(*(u8 *)(drv_data->tx), reg + SSDR);
++ dev_dbg(&drv_data->pdev->dev, "u8_write: %x", ((u8 *)drv_data->tx)[0]);
++ drv_data->tx++;
++
++ return 1;
++}
++
++static int u8_reader(struct driver_data *drv_data)
++{
++ void *reg = drv_data->ioaddr;
++
++ while ((ioread32(reg + SSSR) & SSSR_RNE)
++ && (drv_data->rx < drv_data->rx_end)) {
++
++ *(u8 *)(drv_data->rx) = ioread32(reg + SSDR);
++ drv_data->rx++;
++ }
++
++ return drv_data->rx == drv_data->rx_end;
++}
++
++static int u16_writer(struct driver_data *drv_data)
++{
++ void *reg = drv_data->ioaddr;
++ if (((ioread32(reg + SSSR) & SSSR_TFL) == SSSR_TFL)
++ || (drv_data->tx == drv_data->tx_end))
++ return 0;
++
++ iowrite32(*(u16 *)(drv_data->tx), reg + SSDR);
++ drv_data->tx += 2;
++
++ return 1;
++}
++
++static int u16_reader(struct driver_data *drv_data)
++{
++ void *reg = drv_data->ioaddr;
++ dev_dbg(&drv_data->pdev->dev, "u16_read");
++
++ while ((ioread32(reg + SSSR) & SSSR_RNE)
++ && (drv_data->rx < drv_data->rx_end)) {
++
++ *(u16 *)(drv_data->rx) = ioread32(reg + SSDR);
++ drv_data->rx += 2;
++ }
++
++ return drv_data->rx == drv_data->rx_end;
++}
++
++static int u32_writer(struct driver_data *drv_data)
++{
++ void *reg = drv_data->ioaddr;
++ dev_dbg(&drv_data->pdev->dev, "u32_write");
++
++ if (((ioread32(reg + SSSR) & SSSR_TFL) == SSSR_TFL)
++ || (drv_data->tx == drv_data->tx_end)) {
++ return 0;
++ }
++
++ iowrite32(*(u32 *)(drv_data->tx), reg + SSDR);
++ drv_data->tx += 4;
++
++ return 1;
++}
++
++static int u32_reader(struct driver_data *drv_data)
++{
++ void *reg = drv_data->ioaddr;
++
++ while ((ioread32(reg + SSSR) & SSSR_RNE)
++ && (drv_data->rx < drv_data->rx_end)) {
++
++ *(u32 *)(drv_data->rx) = ioread32(reg + SSDR);
++ drv_data->rx += 4;
++ }
++
++ return drv_data->rx == drv_data->rx_end;
++}
++
++
++/* caller already set message->status; dma and pio irqs are blocked */
++static void giveback(struct driver_data *drv_data)
++{
++ struct spi_message *msg;
++
++ msg = drv_data->cur_msg;
++ msg->state = NULL;
++ if (msg->complete)
++ msg->complete(msg->context);
++}
++
++static void int_transfer_complete(struct driver_data *drv_data);
++
++static bool chan_filter(struct dma_chan *chan, void *param)
++{
++ struct driver_data *drv_data = (struct driver_data *)param;
++ bool ret = false;
++
++ if (!drv_data->dmac1)
++ return ret;
++
++ if (chan->device->dev == &drv_data->dmac1->dev)
++ ret = true;
++
++ return ret;
++}
++
++static void pw_spi_dma_done(void *arg)
++{
++ struct callback_param *param = arg;
++ struct driver_data *drv_data;
++ int *done;
++ void *reg;
++ u32 sscr1;
++
++ drv_data = (struct driver_data *)param->drv_data;
++ done = (int *)param->donep;
++ reg = drv_data->ioaddr;
++ *done = 1;
++
++ if (!drv_data->txdma_done || !drv_data->rxdma_done)
++ return;
++
++
++ /* Clear Status Register */
++ iowrite32(drv_data->clear_sr, reg + SSSR);
++
++ sscr1 = ioread32(reg + SSCR1);
++
++ /* Disable Triggers to DMA */
++ sscr1 &= ~drv_data->dma_cr1;
++
++ /* Disable Interrupt */
++ sscr1 &= ~drv_data->int_cr1;
++ iowrite32(sscr1, reg + SSCR1);
++
++ /* Stop getting Time Outs */
++ iowrite32(0, reg + SSTO);
++
++ /* Update total byte transfered return count actual bytes read */
++ drv_data->cur_msg->actual_length = drv_data->len;
++
++ drv_data->cur_msg->status = 0;
++ giveback(drv_data);
++}
++
++static void pw_spi_dma_init(struct driver_data *drv_data)
++{
++ struct intel_mid_dma_slave *rxs, *txs;
++ dma_cap_mask_t mask;
++
++ /* Use DMAC1 */
++ drv_data->dmac1 = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0827, NULL);
++ if (!drv_data->dmac1) {
++ dev_warn(&drv_data->pdev->dev, "Can't find DMAC1");
++ return;
++ }
++
++ /* 1. init rx channel */
++ rxs = &drv_data->dmas_rx;
++
++ rxs->dirn = DMA_FROM_DEVICE;
++ rxs->hs_mode = LNW_DMA_HW_HS;
++ rxs->cfg_mode = LNW_DMA_PER_TO_MEM;
++ rxs->src_width = LNW_DMA_WIDTH_16BIT;
++ rxs->dst_width = LNW_DMA_WIDTH_32BIT;
++ rxs->src_msize = LNW_DMA_MSIZE_8;
++ rxs->dst_msize = LNW_DMA_MSIZE_8;
++
++ dma_cap_zero(mask);
++ dma_cap_set(DMA_MEMCPY, mask);
++ dma_cap_set(DMA_SLAVE, mask);
++
++ drv_data->rxchan = dma_request_channel(mask, chan_filter, drv_data);
++ if (!drv_data->rxchan)
++ goto err_exit;
++
++ drv_data->rxchan->private = rxs;
++
++ /* 2. init tx channel */
++ txs = &drv_data->dmas_tx;
++
++ txs->dirn = DMA_TO_DEVICE;
++ txs->hs_mode = LNW_DMA_HW_HS;
++ txs->cfg_mode = LNW_DMA_MEM_TO_PER;
++ txs->src_width = LNW_DMA_WIDTH_32BIT;
++ txs->dst_width = LNW_DMA_WIDTH_16BIT;
++ txs->src_msize = LNW_DMA_MSIZE_8;
++ txs->dst_msize = LNW_DMA_MSIZE_8;
++
++
++ dma_cap_set(DMA_SLAVE, mask);
++ dma_cap_set(DMA_MEMCPY, mask);
++
++ drv_data->txchan = dma_request_channel(mask, chan_filter, drv_data);
++ if (!drv_data->txchan)
++ goto free_rxchan;
++ drv_data->txchan->private = txs;
++
++ /* set the dma done bit to 1 */
++ drv_data->dma_inited = 1;
++ drv_data->txdma_done = 1;
++ drv_data->rxdma_done = 1;
++
++ drv_data->tx_param.drv_data = (void *)drv_data;
++ drv_data->tx_param.donep = &drv_data->txdma_done;
++ drv_data->rx_param.drv_data = (void *)drv_data;
++ drv_data->rx_param.donep = &drv_data->rxdma_done;
++ return;
++
++free_rxchan:
++ dev_err(&drv_data->pdev->dev, "DMA TX Channel Not available\n");
++ dma_release_channel(drv_data->rxchan);
++err_exit:
++ dev_err(&drv_data->pdev->dev, "DMA RX Channel Not available\n");
++ pci_dev_put(drv_data->dmac1);
++}
++
++static void pw_spi_dma_exit(struct driver_data *drv_data)
++{
++ dma_release_channel(drv_data->txchan);
++ dma_release_channel(drv_data->rxchan);
++ pci_dev_put(drv_data->dmac1);
++}
++
++static void dma_transfer(struct driver_data *drv_data)
++{
++ dma_addr_t ssdr_addr;
++ struct dma_async_tx_descriptor *txdesc = NULL, *rxdesc = NULL;
++ struct dma_chan *txchan, *rxchan;
++ enum dma_ctrl_flags flag;
++
++
++ /* get Data Read/Write address */
++ ssdr_addr = (dma_addr_t)(drv_data->paddr + 0x10);
++
++ if (drv_data->tx_dma)
++ drv_data->txdma_done = 0;
++
++ if (drv_data->rx_dma)
++ drv_data->rxdma_done = 0;
++
++ /* 2. start the TX dma transfer */
++ txchan = drv_data->txchan;
++ rxchan = drv_data->rxchan;
++
++ flag = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
++
++ /*
++ * RRG
++ * (re)set rxchan->private->src_with based on drv_data->nbytes
++ * (re)set txchan->private->dst_with based on drv_data->nbytes
++ */
++ if (drv_data->rx_dma) {
++ rxdesc = rxchan->device->device_prep_dma_memcpy
++ (rxchan, /* DMA Channel */
++ drv_data->rx_dma, /* DAR */
++ ssdr_addr, /* SAR */
++ drv_data->len, /* Data Length */
++ flag); /* Flag */
++
++ rxdesc->callback = pw_spi_dma_done;
++ rxdesc->callback_param = &drv_data->rx_param;
++ }
++
++ /* 3. start the RX dma transfer */
++ if (drv_data->tx_dma) {
++ txdesc = txchan->device->device_prep_dma_memcpy
++ (txchan, /* DMA Channel */
++ ssdr_addr, /* DAR */
++ drv_data->tx_dma, /* SAR */
++ drv_data->len, /* Data Length */
++ flag); /* Flag */
++
++ txdesc->callback = pw_spi_dma_done;
++ txdesc->callback_param = &drv_data->tx_param;
++ }
++
++ if (rxdesc)
++ rxdesc->tx_submit(rxdesc);
++
++ if (txdesc)
++ txdesc->tx_submit(txdesc);
++}
++
++
++static int map_dma_buffers(struct driver_data *drv_data,
++ struct spi_message *msg,
++ struct spi_transfer *transfer)
++{
++
++ if (msg->is_dma_mapped) {
++ drv_data->rx_dma = transfer->rx_dma;
++ drv_data->tx_dma = transfer->tx_dma;
++ return 1;
++ }
++
++
++ if (drv_data->rx)
++ drv_data->rx_dma =
++ dma_map_single(&drv_data->pdev->dev, drv_data->rx,
++ drv_data->len, DMA_FROM_DEVICE);
++ if (drv_data->tx)
++ drv_data->tx_dma =
++ dma_map_single(&drv_data->pdev->dev, drv_data->tx,
++ drv_data->len, DMA_TO_DEVICE);
++ return 1;
++}
++
++static void set_dma_width(struct spi_device *spi, int bits)
++{
++ struct driver_data *drv_data = \
++ spi_master_get_devdata(spi->master);
++ struct intel_mid_dma_slave *rxs, *txs;
++ rxs = &drv_data->dmas_rx;
++ txs = &drv_data->dmas_tx;
++
++ if (bits <= 8) {
++ rxs->src_width = LNW_DMA_WIDTH_8BIT;
++ txs->dst_width = LNW_DMA_WIDTH_8BIT;
++ } else if (bits <= 16) {
++ rxs->src_width = LNW_DMA_WIDTH_16BIT;
++ txs->dst_width = LNW_DMA_WIDTH_16BIT;
++ } else if (bits <= 32) {
++ rxs->src_width = LNW_DMA_WIDTH_32BIT;
++ txs->dst_width = LNW_DMA_WIDTH_32BIT;
++ }
++}
++
++static void int_error_stop(struct driver_data *drv_data, const char* msg)
++{
++ void *reg = drv_data->ioaddr;
++
++ /* Stop and reset SSP */
++ iowrite32(drv_data->clear_sr, reg + SSSR);
++ iowrite32(ioread32(reg + SSCR1) & ~drv_data->int_cr1, reg + SSCR1);
++ iowrite32(0, reg + SSTO);
++ flush(drv_data);
++
++ dev_err(&drv_data->pdev->dev, "%s", msg);
++
++}
++
++static void int_transfer_complete(struct driver_data *drv_data)
++{
++ void *reg = drv_data->ioaddr;
++ u32 sscr1;
++
++ dev_dbg(&drv_data->pdev->dev, "interrupt transfer complete\n");
++ /* Clear Status Register */
++ iowrite32(drv_data->clear_sr, reg + SSSR);
++
++ sscr1 = ioread32(reg + SSCR1);
++
++ /* Disable Triggers to DMA */
++ sscr1 &= ~drv_data->dma_cr1;
++
++ /* Disable Interrupt */
++ sscr1 &= ~drv_data->int_cr1;
++ iowrite32(sscr1, reg + SSCR1);
++
++ /* Stop getting Time Outs */
++ iowrite32(0, reg + SSTO);
++
++ /* Update total byte transfered return count actual bytes read */
++ drv_data->cur_msg->actual_length += drv_data->len -
++ (drv_data->rx_end - drv_data->rx);
++
++ drv_data->cur_msg->status = 0;
++ giveback(drv_data);
++}
++
++static void transfer_complete(struct driver_data *drv_data)
++{
++ /* Update total byte transfered return count actual bytes read */
++ drv_data->cur_msg->actual_length +=
++ drv_data->len - (drv_data->rx_end - drv_data->rx);
++
++ drv_data->cur_msg->status = 0;
++ giveback(drv_data);
++}
++
++static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
++{
++ void *reg = drv_data->ioaddr;
++ u32 irq_mask = (ioread32(reg + SSCR1) & SSCR1_TIE) ?
++ drv_data->mask_sr : drv_data->mask_sr & ~SSSR_TFS;
++
++ u32 irq_status = ioread32(reg + SSSR) & irq_mask;
++ if (irq_status & SSSR_ROR) {
++ int_error_stop(drv_data, "interrupt_transfer: fifo overrun");
++ return IRQ_HANDLED;
++ }
++
++ if (irq_status & SSSR_TINT) {
++ iowrite32(SSSR_TINT, reg + SSSR);
++ if (drv_data->read(drv_data)) {
++ int_transfer_complete(drv_data);
++ return IRQ_HANDLED;
++ }
++ }
++
++ /* Drain rx fifo, Fill tx fifo and prevent overruns */
++ do {
++ if (drv_data->read(drv_data)) {
++ int_transfer_complete(drv_data);
++ return IRQ_HANDLED;
++ }
++ } while (drv_data->write(drv_data));
++
++ if (drv_data->read(drv_data)) {
++ int_transfer_complete(drv_data);
++ return IRQ_HANDLED;
++ }
++
++ if (drv_data->tx == drv_data->tx_end)
++ iowrite32(ioread32(reg + SSCR1) & ~SSCR1_TIE, reg + SSCR1);
++
++ return IRQ_HANDLED;
++}
++
++static irqreturn_t ssp_int(int irq, void *dev_id)
++{
++ struct driver_data *drv_data = dev_id;
++ void *reg = drv_data->ioaddr;
++ u32 status = ioread32(reg + SSSR);
++
++ if (status & (SSSR_ROR | SSSR_TUR)) {
++ dev_dbg(&drv_data->pdev->dev,
++ "--- SPI ROR or TUR Occred : SSSR=%x\n", status);
++
++ if (drv_data->dma_mapped) {
++ iowrite32(SSSR_ROR, reg + SSSR); /* Clear ROR */
++ iowrite32(SSSR_TUR, reg + SSSR); /* Clear TUR */
++ return IRQ_HANDLED;
++ }
++ }
++
++ /* just return if this is not our interrupt */
++ if (!(ioread32(reg + SSSR) & drv_data->mask_sr))
++ return IRQ_NONE;
++
++ if (!drv_data->cur_msg) {
++ iowrite32(ioread32(reg + SSCR0) & ~SSCR0_SSE, reg + SSCR0);
++ iowrite32(ioread32(reg + SSCR1) & ~drv_data->int_cr1,
++ reg + SSCR1);
++ iowrite32(drv_data->clear_sr, reg + SSSR);
++
++ /* Never fail */
++
++ return IRQ_HANDLED;
++ }
++
++ return drv_data->transfer_handler(drv_data);
++}
++
++static void poll_transfer(unsigned long data)
++{
++ struct driver_data *drv_data = (struct driver_data *)data;
++
++ if (drv_data->tx)
++ while (drv_data->tx != drv_data->tx_end) {
++ drv_data->write(drv_data);
++ drv_data->read(drv_data);
++ }
++
++ while (!drv_data->read(drv_data))
++ cpu_relax();
++
++ transfer_complete(drv_data);
++}
++
++/* FIXME: Document what this is doing - it isn't obvious */
++static unsigned int ssp_get_clk_div(int speed)
++{
++ return (25000000 / speed) << 10;
++}
++
++
++static int transfer(struct spi_device *spi, struct spi_message *msg)
++{
++ struct driver_data *drv_data = \
++ spi_master_get_devdata(spi->master);
++ struct chip_data *chip = NULL;
++ struct spi_transfer *transfer = NULL;
++ void *reg = drv_data->ioaddr;
++ u8 bits = 0;
++ u32 clk_div;
++ u32 speed = 0;
++ u32 cr0;
++ u32 cr1;
++
++ msg->actual_length = 0;
++ msg->status = -EINPROGRESS;
++ drv_data->cur_msg = msg;
++ /* Initial message state*/
++
++ /* We handle only one transfer message since the protocol module has to
++ control the out of band signaling. */
++ transfer = list_entry(msg->transfers.next,
++ struct spi_transfer, transfer_list);
++
++ chip = spi_get_ctldata(msg->spi);
++
++ drv_data->busy = 1;
++
++ /* Check transfer length */
++ if (transfer->len > 8192) {
++ dev_warn(&drv_data->pdev->dev,
++ "transfer length greater than 8192\n");
++ msg->status = -EINVAL;
++ giveback(drv_data);
++ return 0;
++ }
++
++ /* Setup the transfer state based on the type of transfer */
++ flush(drv_data);
++ drv_data->n_bytes = chip->n_bytes;
++ drv_data->tx = (void *)transfer->tx_buf;
++ drv_data->tx_end = drv_data->tx + transfer->len;
++ drv_data->rx = transfer->rx_buf;
++ drv_data->rx_end = drv_data->rx + transfer->len;
++ drv_data->rx_dma = transfer->rx_dma;
++ drv_data->tx_dma = transfer->tx_dma;
++ drv_data->len = transfer->len;
++ drv_data->write = drv_data->tx ? chip->write : null_writer;
++ drv_data->read = drv_data->rx ? chip->read : null_reader;
++
++ /* Change speed and bit per word on a per transfer */
++ cr0 = chip->cr0;
++ if (transfer->speed_hz || transfer->bits_per_word) {
++
++ bits = chip->bits_per_word;
++ speed = chip->speed_hz;
++
++ if (transfer->speed_hz)
++ speed = transfer->speed_hz;
++
++ clk_div = ssp_get_clk_div(speed);
++
++ if (transfer->bits_per_word)
++ bits = transfer->bits_per_word;
++
++ if (bits <= 8) {
++ drv_data->n_bytes = 1;
++ drv_data->read = drv_data->read != null_reader ?
++ u8_reader : null_reader;
++ drv_data->write = drv_data->write != null_writer ?
++ u8_writer : null_writer;
++ } else if (bits <= 16) {
++ drv_data->n_bytes = 2;
++ drv_data->read = drv_data->read != null_reader ?
++ u16_reader : null_reader;
++ drv_data->write = drv_data->write != null_writer ?
++ u16_writer : null_writer;
++ } else if (bits <= 32) {
++ drv_data->n_bytes = 4;
++ drv_data->read = drv_data->read != null_reader ?
++ u32_reader : null_reader;
++ drv_data->write = drv_data->write != null_writer ?
++ u32_writer : null_writer;
++ }
++ set_dma_width(spi, spi->bits_per_word);
++ cr0 = PNWL_CR0(clk_div, bits, spi, chip);
++ }
++
++ /* try to map dma buffer and do a dma transfer if successful */
++ drv_data->dma_mapped = 0;
++ if (chip->enable_dma && drv_data->len &&
++ (drv_data->len <= PNWL_MAX_DMA_LEN)) {
++
++ drv_data->dma_mapped = map_dma_buffers(drv_data, msg, transfer);
++ }
++ /* try to map dma buffer and do a dma transfer if successful */
++ /* Ensure we have the correct interrupt handler */
++ drv_data->transfer_handler = interrupt_transfer;
++ /* Clear status */
++ iowrite32(drv_data->clear_sr, reg + SSSR);
++
++ cr1 = chip->cr1;
++ iowrite32(chip->timeout, reg + SSTO);
++
++
++ if (drv_data->dma_mapped)
++ cr1 |= drv_data->dma_cr1;
++ else if (!chip->poll_mode)
++ cr1 |= drv_data->int_cr1;
++
++ dev_dbg(&drv_data->pdev->dev,
++ "%s drv_data:%p len:%d n_bytes:%d cr0:%x cr1:%x",
++ (drv_data->dma_mapped ? "DMA io:" :
++ (chip->poll_mode ? "Poll io:" : "Intr io:")),
++ drv_data, drv_data->len, drv_data->n_bytes, cr0, cr1);
++
++ /* see if we need to reload the config registers */
++ if ((ioread32(reg + SSCR0) != cr0)
++ || (ioread32(reg + SSCR0) & SSCR1_CHANGE_MASK) !=
++ (cr1 & SSCR1_CHANGE_MASK)) {
++
++ /* stop the SSP, and update the other bits */
++ iowrite32(cr0 & ~SSCR0_SSE, reg + SSCR0);
++ /* first set CR1 without interrupt and service enables */
++ iowrite32(cr1 & SSCR1_CHANGE_MASK, reg + SSCR1);
++ /* restart the SSP */
++ iowrite32(cr0, reg + SSCR0);
++
++ }
++
++ /* after chip select, release the data by enabling service
++ * requests and interrupts, without changing any mode bits */
++ iowrite32(cr1, reg + SSCR1);
++
++ if (drv_data->dma_mapped) {
++ /* transfer using DMA */
++ dma_transfer(drv_data);
++ } else if (chip->poll_mode) {
++ /* transfer using non interrupt polling */
++ tasklet_schedule(&drv_data->poll_transfer);
++ } else {
++ /* transfer using interrupt driven programmed I/O */
++ }
++
++ return 0;
++}
++
++static int setup(struct spi_device *spi)
++{
++ struct pnwl_spi3_chip *chip_info = NULL;
++ struct chip_data *chip;
++ uint tx_thres = TX_THRESH_DFLT;
++ uint rx_thres = RX_THRESH_DFLT;
++ u32 clk_div;
++
++
++ if (!spi->bits_per_word)
++ spi->bits_per_word = 8;
++
++ if ((spi->bits_per_word < 4 || spi->bits_per_word > 32))
++ return -EINVAL;
++
++ /* Only alloc on first setup */
++ chip = spi_get_ctldata(spi);
++ if (!chip) {
++ chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
++ if (!chip) {
++ dev_err(&spi->dev,
++ "failed setup: can't allocate chip data\n");
++ return -ENOMEM;
++ }
++
++ chip->enable_dma = 1;
++ chip->poll_mode = 0;
++ chip->timeout = TIMOUT_DFLT;
++ }
++
++ /*
++ * protocol drivers may change the chip settings, so...
++ * if chip_info exists, use it
++ */
++ chip_info = spi->controller_data;
++
++ /* chip_info isn't always needed */
++
++ if (chip_info) {
++ if (chip_info->timeout)
++ chip->timeout = chip_info->timeout;
++
++ if (chip_info->tx_threshold)
++ tx_thres = chip_info->tx_threshold;
++ if (chip_info->rx_threshold)
++ rx_thres = chip_info->rx_threshold;
++
++ chip->enable_dma = chip_info->enable_dma;
++ chip->poll_mode = chip_info->poll_mode;
++ chip->enable_loopback = chip_info->enable_loopback;
++ }
++
++ if (spi->bits_per_word <= 8) {
++ chip->n_bytes = 1;
++ chip->read = u8_reader;
++ chip->write = u8_writer;
++
++ } else if (spi->bits_per_word <= 16) {
++ chip->n_bytes = 2;
++ chip->read = u16_reader;
++ chip->write = u16_writer;
++ } else if (spi->bits_per_word <= 32) {
++ chip->n_bytes = 4;
++ chip->read = u32_reader;
++ chip->write = u32_writer;
++ } else {
++ dev_err(&spi->dev, "invalid wordsize");
++ return -ENODEV;
++ }
++
++ set_dma_width(spi, spi->bits_per_word);
++ chip->speed_hz = spi->max_speed_hz;
++ clk_div = ssp_get_clk_div(chip->speed_hz);
++
++ chip->threshold = (SSCR1_RxTresh(rx_thres) & SSCR1_RFT) |
++ (SSCR1_TxTresh(tx_thres) & SSCR1_TFT);
++ chip->bits_per_word = spi->bits_per_word;
++
++ chip->cr0 = PNWL_CR0(clk_div, spi->bits_per_word, spi, chip);
++ chip->cr1 = PNWL_CR1(spi, chip);
++
++ dev_dbg(&spi->dev,
++ "setup bpw:%d, mode:%d dma:%d poll:%d loop:%d cr0:%x cr1:%x\n",
++ spi->bits_per_word, spi->mode & 0x3,
++ chip->enable_dma, chip->poll_mode, chip->enable_loopback,
++ chip->cr0, chip->cr1);
++
++ spi_set_ctldata(spi, chip);
++
++ return 0;
++}
++
++static void cleanup(struct spi_device *spi)
++{
++ struct chip_data *chip = spi_get_ctldata(spi);
++ kfree(chip);
++ spi_set_ctldata(spi, NULL);
++}
++
++static int pw_spi_probe(struct pci_dev *pdev,
++ const struct pci_device_id *ent)
++{
++ struct device *dev = &pdev->dev;
++ struct spi_master *master;
++ struct driver_data *drv_data = 0;
++ int status = 0;
++ int pci_bar = 0;
++ void __iomem *syscfg_ioaddr;
++ unsigned long syscfg;
++
++ dev_info(&pdev->dev, "found PCI SSP controller(ID: %04x:%04x)",
++ pdev->vendor, pdev->device);
++
++ status = pci_enable_device(pdev);
++ if (status)
++ return status;
++
++ /* Allocate Slave with space for drv_data and null dma buffer */
++ master = spi_alloc_master(dev, sizeof(struct driver_data));
++
++ if (!master) {
++ dev_err(&pdev->dev, "cannot alloc spi_master");
++ status = -ENOMEM;
++ goto err_free_0;
++ }
++
++ drv_data = spi_master_get_devdata(master);
++ drv_data->master = master;
++
++ drv_data->pdev = pdev;
++
++ master->mode_bits = SPI_CPOL | SPI_CPHA;
++ master->bus_num = 3;
++ master->num_chipselect = 1;
++ master->cleanup = cleanup;
++ master->setup = setup;
++ master->transfer = transfer;
++
++ /* get basic io resource and map it */
++ drv_data->paddr = pci_resource_start(pdev, pci_bar);
++ drv_data->iolen = pci_resource_len(pdev, pci_bar);
++
++ status = pci_request_region(pdev, pci_bar, dev_name(&pdev->dev));
++ if (status)
++ goto err_free_1;
++
++ drv_data->ioaddr = ioremap_nocache(drv_data->paddr, drv_data->iolen);
++ if (!drv_data->ioaddr) {
++ status = -ENOMEM;
++ goto err_free_2;
++ }
++ dev_dbg(&pdev->dev, "paddr = : %08lx", drv_data->paddr);
++ dev_dbg(&pdev->dev, "ioaddr = : %p", drv_data->ioaddr);
++ dev_dbg(&pdev->dev, "attaching to IRQ: %04x", pdev->irq);
++
++ /* Attach to IRQ */
++ drv_data->irq = pdev->irq;
++
++ status = request_irq(drv_data->irq, ssp_int, IRQF_SHARED,
++ "pw_spi3", drv_data);
++ if (status < 0) {
++ dev_err(&pdev->dev, "can not get IRQ %d", drv_data->irq);
++ goto err_free_3;
++ }
++
++ /* get base address of DMA selector. */
++ syscfg = drv_data->paddr - SYSCFG;
++ syscfg_ioaddr = ioremap_nocache(syscfg, 0x10);
++ if (!syscfg_ioaddr) {
++ status = -ENOMEM;
++ goto err_free_3;
++ }
++
++ iowrite32(ioread32(syscfg_ioaddr) | 2, syscfg_ioaddr);
++
++ drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE | SSCR1_TINTE;
++ drv_data->dma_cr1 = SSCR1_TSRE | SSCR1_RSRE | SSCR1_TRAIL;
++ drv_data->clear_sr = SSSR_ROR | SSSR_TINT;
++ drv_data->mask_sr = SSSR_TINT | SSSR_RFS | SSSR_TFS | SSSR_ROR;
++
++ tasklet_init(&drv_data->poll_transfer,
++ poll_transfer, (unsigned long)drv_data);
++
++ /* Load default SSP configuration */
++ dev_info(&pdev->dev, "setup default SSP configuration");
++ iowrite32(0, drv_data->ioaddr + SSCR0);
++ iowrite32(SSCR1_RxTresh(RX_THRESH_DFLT) |
++ SSCR1_TxTresh(TX_THRESH_DFLT),
++ drv_data->ioaddr + SSCR1);
++ iowrite32(SSCR0_Motorola | SSCR0_DataSize(8), drv_data->ioaddr + SSCR0);
++ iowrite32(0, drv_data->ioaddr + SSTO);
++ iowrite32(PNWL_SSPSP, drv_data->ioaddr + SSPSP);
++
++ /* Register with the SPI framework */
++ dev_info(&pdev->dev, "register with SPI framework");
++
++ status = spi_register_master(master);
++
++ if (status != 0) {
++ dev_err(&pdev->dev, "problem registering driver");
++ goto err_free_4;
++ }
++
++ drv_data->dma_inited = 0;
++ pw_spi_dma_init(drv_data);
++
++ pci_set_drvdata(pdev, drv_data);
++
++ return status;
++
++err_free_4:
++ free_irq(drv_data->irq, drv_data);
++err_free_3:
++ iounmap(drv_data->ioaddr);
++err_free_2:
++ pci_release_region(pdev, pci_bar);
++err_free_1:
++ spi_master_put(master);
++err_free_0:
++ pci_disable_device(pdev);
++
++ return status;
++}
++
++static void __devexit pw_spi_remove(struct pci_dev *pdev)
++{
++ struct driver_data *drv_data = pci_get_drvdata(pdev);
++
++ if (!drv_data)
++ return;
++
++ pci_set_drvdata(pdev, NULL);
++
++ pw_spi_dma_exit(drv_data);
++ if (drv_data->dmac1)
++ pci_dev_put(drv_data->dmac1);
++/* Release IRQ */
++ free_irq(drv_data->irq, drv_data);
++
++ iounmap(drv_data->ioaddr);
++
++ pci_release_region(pdev, 0);
++
++ /* disconnect from the SPI framework */
++ spi_unregister_master(drv_data->master);
++
++ pci_disable_device(pdev);
++
++ return;
++}
++
++#ifdef CONFIG_PM
++
++static int pw_spi_suspend(struct pci_dev *pdev, pm_message_t state)
++{
++ struct driver_data *drv_data = pci_get_drvdata(pdev);
++ dev_dbg(&pdev->dev, "suspend");
++
++ tasklet_disable(&drv_data->poll_transfer);
++
++ return 0;
++}
++
++static int pw_spi_resume(struct pci_dev *pdev)
++{
++ struct driver_data *drv_data = pci_get_drvdata(pdev);
++ dev_dbg(&pdev->dev, "resume");
++
++ tasklet_enable(&drv_data->poll_transfer);
++
++ return 0;
++}
++#else
++#define pw_spi_suspend NULL
++#define pw_spi_resume NULL
++#endif /* CONFIG_PM */
++
++
++static const struct pci_device_id pci_ids[] __devinitdata = {
++ { PCI_VDEVICE(INTEL, 0x0816) },
++ { }
++};
++
++static struct pci_driver pnwl_spi3_driver = {
++ .name = DRIVER_NAME,
++ .id_table = pci_ids,
++ .probe = pw_spi_probe,
++ .remove = __devexit_p(pw_spi_remove),
++ .suspend = pw_spi_suspend,
++ .resume = pw_spi_resume,
++};
++
++static int __init pw_spi_init(void)
++{
++ return pci_register_driver(&pnwl_spi3_driver);
++}
++late_initcall(pw_spi_init);
++
++static void __exit pw_spi_exit(void)
++{
++ pci_unregister_driver(&pnwl_spi3_driver);
++}
++module_exit(pw_spi_exit);
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -27,11 +27,6 @@
+ #include <linux/mod_devicetable.h>
+ #include <linux/spi/spi.h>
+
+-
+-/* SPI bustype and spi_master class are registered after board init code
+- * provides the SPI device tables, ensuring that both are present by the
+- * time controller driver registration causes spi_devices to "enumerate".
+- */
+ static void spidev_release(struct device *dev)
+ {
+ struct spi_device *spi = to_spi_device(dev);
+@@ -196,11 +191,16 @@
+
+ struct boardinfo {
+ struct list_head list;
+- unsigned n_board_info;
+- struct spi_board_info board_info[0];
++ struct spi_board_info board_info;
+ };
+
+ static LIST_HEAD(board_list);
++static LIST_HEAD(spi_master_list);
++
++/*
++ * Used to protect add/del opertion for board_info list and
++ * spi_master list, and their matching process
++ */
+ static DEFINE_MUTEX(board_lock);
+
+ /**
+@@ -365,6 +365,20 @@
+ }
+ EXPORT_SYMBOL_GPL(spi_new_device);
+
++static void spi_match_master_to_boardinfo(struct spi_master *master,
++ struct spi_board_info *bi)
++{
++ struct spi_device *dev;
++
++ if (master->bus_num != bi->bus_num)
++ return;
++
++ dev = spi_new_device(master, bi);
++ if (!dev)
++ dev_err(master->dev.parent, "can't create new device for %s\n",
++ bi->modalias);
++}
++
+ /**
+ * spi_register_board_info - register SPI devices for a given board
+ * @info: array of chip descriptors
+@@ -387,43 +401,25 @@
+ int __init
+ spi_register_board_info(struct spi_board_info const *info, unsigned n)
+ {
+- struct boardinfo *bi;
++ struct boardinfo *bi;
++ int i;
+
+- bi = kmalloc(sizeof(*bi) + n * sizeof *info, GFP_KERNEL);
++ bi = kzalloc(n * sizeof(*bi), GFP_KERNEL);
+ if (!bi)
+ return -ENOMEM;
+- bi->n_board_info = n;
+- memcpy(bi->board_info, info, n * sizeof *info);
+-
+- mutex_lock(&board_lock);
+- list_add_tail(&bi->list, &board_list);
+- mutex_unlock(&board_lock);
+- return 0;
+-}
+
+-/* FIXME someone should add support for a __setup("spi", ...) that
+- * creates board info from kernel command lines
+- */
++ for (i = 0; i < n; i++, bi++, info++) {
++ struct spi_master *master;
+
+-static void scan_boardinfo(struct spi_master *master)
+-{
+- struct boardinfo *bi;
+-
+- mutex_lock(&board_lock);
+- list_for_each_entry(bi, &board_list, list) {
+- struct spi_board_info *chip = bi->board_info;
+- unsigned n;
+-
+- for (n = bi->n_board_info; n > 0; n--, chip++) {
+- if (chip->bus_num != master->bus_num)
+- continue;
+- /* NOTE: this relies on spi_new_device to
+- * issue diagnostics when given bogus inputs
+- */
+- (void) spi_new_device(master, chip);
+- }
++ memcpy(&bi->board_info, info, sizeof(*info));
++ mutex_lock(&board_lock);
++ list_add_tail(&bi->list, &board_list);
++ list_for_each_entry(master, &spi_master_list, list)
++ spi_match_master_to_boardinfo(master, &bi->board_info);
++ mutex_unlock(&board_lock);
+ }
+- mutex_unlock(&board_lock);
++
++ return 0;
+ }
+
+ /*-------------------------------------------------------------------------*/
+@@ -506,6 +502,7 @@
+ {
+ static atomic_t dyn_bus_id = ATOMIC_INIT((1<<15) - 1);
+ struct device *dev = master->dev.parent;
++ struct boardinfo *bi;
+ int status = -ENODEV;
+ int dynamic = 0;
+
+@@ -537,15 +534,18 @@
+ dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev),
+ dynamic ? " (dynamic)" : "");
+
+- /* populate children from any spi device tables */
+- scan_boardinfo(master);
++ mutex_lock(&board_lock);
++ list_add_tail(&master->list, &spi_master_list);
++ list_for_each_entry(bi, &board_list, list)
++ spi_match_master_to_boardinfo(master, &bi->board_info);
++ mutex_unlock(&board_lock);
++
+ status = 0;
+ done:
+ return status;
+ }
+ EXPORT_SYMBOL_GPL(spi_register_master);
+
+-
+ static int __unregister(struct device *dev, void *master_dev)
+ {
+ /* note: before about 2.6.14-rc1 this would corrupt memory: */
+@@ -568,6 +568,10 @@
+ {
+ int dummy;
+
++ mutex_lock(&board_lock);
++ list_del(&master->list);
++ mutex_unlock(&board_lock);
++
+ dummy = device_for_each_child(master->dev.parent, &master->dev,
+ __unregister);
+ device_unregister(&master->dev);
+--- a/drivers/staging/Kconfig
++++ b/drivers/staging/Kconfig
+@@ -97,6 +97,8 @@
+
+ source "drivers/staging/serqt_usb2/Kconfig"
+
++source "drivers/staging/spectra/Kconfig"
++
+ source "drivers/staging/quatech_usb2/Kconfig"
+
+ source "drivers/staging/vt6655/Kconfig"
+@@ -147,5 +149,17 @@
+
+ source "drivers/staging/msm/Kconfig"
+
++source "drivers/staging/mrst/Kconfig"
++
++source "drivers/staging/mfld_ledflash/Kconfig"
++
++source "drivers/staging/mfld-sensors/Kconfig"
++
++source "drivers/staging/ice4100/Kconfig"
++
++source "drivers/staging/mrstci/Kconfig"
++
++source "drivers/staging/ifx-mux/Kconfig"
++
+ endif # !STAGING_EXCLUDE_BUILD
+ endif # STAGING
+--- a/drivers/staging/Makefile
++++ b/drivers/staging/Makefile
+@@ -22,6 +22,7 @@
+ obj-$(CONFIG_RTL8192SU) += rtl8192su/
+ obj-$(CONFIG_RTL8192U) += rtl8192u/
+ obj-$(CONFIG_RTL8192E) += rtl8192e/
++obj-$(CONFIG_SPECTRA) += spectra/
+ obj-$(CONFIG_TRANZPORT) += frontier/
+ obj-$(CONFIG_DREAM) += dream/
+ obj-$(CONFIG_POHMELFS) += pohmelfs/
+@@ -52,5 +53,11 @@
+ obj-$(CONFIG_TI_ST) += ti-st/
+ obj-$(CONFIG_ADIS16255) += adis16255/
+ obj-$(CONFIG_FB_XGI) += xgifb/
+-obj-$(CONFIG_TOUCHSCREEN_MRSTOUCH) += mrst-touchscreen/
++obj-$(CONFIG_TOUCHSCREEN_INTEL_MID) += mrst-touchscreen/
+ obj-$(CONFIG_MSM_STAGING) += msm/
++obj-$(CONFIG_DRM_INTEL_MID) += mrst/
++obj-$(CONFIG_MFLD_LEDFLASH) += mfld_ledflash/
++obj-$(CONFIG_MFLD_SENSORS) += mfld-sensors/
++obj-$(CONFIG_X86_INTEL_CE) += ice4100/
++obj-$(CONFIG_VIDEO_MRSTCI) += mrstci/
++obj-$(CONFIG_N_IFX_MUX) += ifx-mux/
+--- /dev/null
++++ b/drivers/staging/ice4100/Kconfig
+@@ -0,0 +1,44 @@
++#
++# Intel CE device configuration
++#
++# This driver provides support for the
++# Intel CE media processors
++#
++menu "Intel CE Media Processor"
++
++config X86_INTEL_CE
++ bool "Intel CE Media Processor Drivers"
++ default N
++ help
++ Enable support for the Intel CE Media Processor functions
++
++choice
++ prompt "Build Intel CE kernel services as "
++ depends on X86_INTEL_CE
++ default CE_RELEASE
++
++config CE_RELEASE
++ bool "Release"
++ depends on X86_INTEL_CE
++ help
++ Build Intel CE kernel services as release
++
++config CE_DEBUG
++ bool "Debug"
++ depends on X86_INTEL_CE
++ help
++ Build Intel CE kernel services as debug
++
++endchoice
++
++if X86_INTEL_CE
++
++config CE_SGX_GFX
++ tristate "SGX Power VR graphics core driver"
++ help
++ Driver for 2D and 3D accelerated graphics core
++
++endif
++
++endmenu
++
+--- /dev/null
++++ b/drivers/staging/ice4100/Makefile
+@@ -0,0 +1,12 @@
++#obj-$(CONFIG_CE_OSAL) += osal/
++#obj-$(CONFIG_CE_PLATFORM_CONFIG) += platform_config/
++#obj-$(CONFIG_CE_PAL) += pal/
++#obj-$(CONFIG_CE_SYSTEM_UTILS) += system_utils/
++#obj-$(CONFIG_CE_IDL) += idl/
++#obj-$(CONFIG_CE_SVEN) += sven/
++#obj-$(CONFIG_CE_CLOCK_CONTROL) += clock_control/
++#obj-$(CONFIG_CE_POWER) += power/
++#obj-$(CONFIG_CE_GDL) += gdl/
++obj-$(CONFIG_CE_SGX_GFX) += sgx535/
++#obj-$(CONFIG_CE_MM) += mm/
++#obj-$(CONFIG_CE_SMD) += SMD/
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/Makefile
+@@ -0,0 +1,147 @@
++#
++# Makefile for the graphics hardware. This driver provides support for the
++# Power VR accelerated 2D and 3D cores
++
++
++PVR_BUILD_DIR ?= $(PWD)
++
++
++# Services module name
++PVRSRV_MODNAME = pvrsrvkm
++
++# FIXME do we need these
++# -DSUPPORT_SGX_HWPERF
++
++#-DPVR_LINUX_USING_WORKQUEUES \
++#-DPVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE \
++#-DPVR_LINUX_TIMERS_USING_WORKQUEUES \
++#-DSYS_CUSTOM_POWERLOCK_WRAP
++
++
++INCDIR=drivers/staging/ice4100/sgx535
++
++include_dirs := -I$(INCDIR)/ \
++ -I$(INCDIR)/include \
++ -I$(INCDIR)/include/env/linux \
++ -I$(INCDIR)/bridged \
++ -I$(INCDIR)/bridged/sgx \
++ -I$(INCDIR)/devices/sgx \
++ -I$(INCDIR)/system \
++ -I$(INCDIR)/system/include \
++ -I$(INCDIR)/system/sgx_intel_ce \
++ -I$(INCDIR)/hwdefs \
++ $(ALL_CFLAGS_kbuild)
++
++ccflags-y := $(include_dirs)
++
++# X86_INTEL_CE device information
++ccflags-y += -DSGX535 -DSUPPORT_SGX535 -DSGX_CORE_REV=121
++
++ccflags-y += \
++ -Werror \
++ -DLINUX \
++ -DPVR_BUILD_DATE="\"$(DATE)\"" \
++ -DPVR_BUILD_DIR="\"$(PVR_BUILD_DIR)\"" \
++ -DSERVICES4 \
++ -D_XOPEN_SOURCE=600 \
++ -DPVR2D_VALIDATE_INPUT_PARAMS \
++ -UDEBUG_LOG_PATH_TRUNCATE \
++ -DSUPPORT_SRVINIT \
++ -DSUPPORT_SGX \
++ -DSUPPORT_PERCONTEXT_PB \
++ -DSUPPORT_LINUX_X86_WRITECOMBINE \
++ -DTRANSFER_QUEUE \
++ -DSUPPORT_DRI_DRM \
++ -DSYS_USING_INTERRUPTS \
++ -DSUPPORT_HW_RECOVERY \
++ -DPVR_SECURE_HANDLES \
++ -DUSE_PTHREADS \
++ -DSUPPORT_SGX_EVENT_OBJECT \
++ -DSUPPORT_SGX_LOW_LATENCY_SCHEDULING \
++ -DSUPPORT_LINUX_X86_PAT \
++ -DPVR_PROC_USE_SEQ_FILE
++
++
++# Defaults for useful things on the Intel CE platform.
++ccflags-$(CONFIG_X86_INTEL_CE) += -DPVRSRV_MODNAME="\"$(PVRSRV_MODNAME)"\"
++
++ccflags-$(CONFIG_X86_INTEL_CE) += -DINTEL_D3_CHANGES=1
++ccflags-$(CONFIG_X86_INTEL_CE) += -DINTEL_D3_PAD=1
++ccflags-$(CONFIG_X86_INTEL_CE) += -DINTEL_D3_FLUSH=1
++ccflags-$(CONFIG_X86_INTEL_CE) += -DINTEL_D3_DISABLE_TEXTURE_STREAM=1
++#ccflags-$(CONFIG_X86_INTEL_CE) += -DINTEL_D3_GDL=1
++#ccflags-$(CONFIG_X86_INTEL_CE) += -DINTEL_D3_NO_PCI_ENUM=1
++ccflags-$(CONFIG_X86_INTEL_CE) += -DINTEL_D3_P_CHANGES=1
++ccflags-$(CONFIG_X86_INTEL_CE) += -DINTEL_D3_CACHED_CBUF=1
++ccflags-$(CONFIG_X86_INTEL_CE) += -DINTEL_D3_MEEGO=1
++
++# FIXME not sure if we need these
++#ccflags-$(CONFIG_CE_RELEASE) += -DUSE_FBDEV
++#ccflags-$(CONFIG_CE_RELEASE) += -DFBDEV_NAME="\"$(FBDEV_NAME)\""
++
++
++ccflags-$(CONFIG_CE_RELEASE) += -DBUILD="\"release\"" -DPVR_BUILD_TYPE="\"release\"" -DRELEASE
++
++ccflags-$(CONFIG_CE_DEBUG) += -DBUILD="\"debug\"" -DPVR_BUILD_TYPE="\"debug\"" -DDEBUG
++ccflags-$(CONFIG_CE_DEBUG) += -DDEBUG_LINUX_MEMORY_ALLOCATIONS
++ccflags-$(CONFIG_CE_DEBUG) += -DDEBUG_LINUX_MEM_AREAS
++ccflags-$(CONFIG_CE_DEBUG) += -DDEBUG_LINUX_MMAP_AREAS
++#ccflags-$(CONFIG_CE_DEBUG) += -DDEBUG_LINUX_XML_PROC_FILES
++#ccflags-$(CONFIG_CE_DEBUG) += -DDEBUG_LINUX_SLAB_ALLOCATIONS
++ccflags-$(CONFIG_CE_DEBUG) += -DDEBUG_BRIDGE_KM
++#ccflags-$(CONFIG_CE_DEBUG) += -DDEBUG_TRACE_BRIDGE_KM
++#ccflags-$(CONFIG_CE_DEBUG) += -DDEBUG_BRIDGE_KM_DISPATCH_TABLE
++#ccflags-$(CONFIG_CE_DEBUG) += -DSUPPORT_HW_RECOVERY
++
++COMMONDIR = common
++BRIDGEDIR = bridged
++SGXDIR = devices/sgx
++SYSDIR = system/sgx_intel_ce
++SYSCONFIGDIR = system/sgx_intel_ce
++
++pvrsrvkm-y := osfunc.o \
++ mmap.o \
++ module.o \
++ pdump.o \
++ proc.o \
++ pvr_bridge_k.o \
++ pvr_debug.o \
++ pvr_drm.o \
++ mm.o \
++ mutils.o \
++ event.o \
++ osperproc.o
++
++pvrsrvkm-y += $(COMMONDIR)/buffer_manager.o \
++ $(COMMONDIR)/devicemem.o \
++ $(COMMONDIR)/deviceclass.o \
++ $(COMMONDIR)/handle.o \
++ $(COMMONDIR)/hash.o \
++ $(COMMONDIR)/metrics.o \
++ $(COMMONDIR)/pvrsrv.o \
++ $(COMMONDIR)/queue.o \
++ $(COMMONDIR)/ra.o \
++ $(COMMONDIR)/resman.o \
++ $(COMMONDIR)/power.o \
++ $(COMMONDIR)/mem.o \
++ $(COMMONDIR)/pdump_common.o \
++ $(COMMONDIR)/perproc.o \
++ $(COMMONDIR)/lists.o
++
++pvrsrvkm-y += $(BRIDGEDIR)/bridged_support.o \
++ $(BRIDGEDIR)/bridged_pvr_bridge.o \
++ $(BRIDGEDIR)/sgx/bridged_sgx_bridge.o
++
++pvrsrvkm-y += $(SGXDIR)/sgxinit.o \
++ $(SGXDIR)/sgxpower.o \
++ $(SGXDIR)/sgxreset.o \
++ $(SGXDIR)/sgxutils.o \
++ $(SGXDIR)/sgxkick.o \
++ $(SGXDIR)/sgxtransfer.o \
++ $(SGXDIR)/mmu.o \
++ $(SGXDIR)/pb.o
++
++pvrsrvkm-y += $(SYSCONFIGDIR)/sysconfig.o \
++ $(SYSCONFIGDIR)/sysutils.o
++
++obj-$(CONFIG_CE_SGX_GFX) += pvrsrvkm.o
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/bridged/bridged_pvr_bridge.c
+@@ -0,0 +1,3419 @@
++/**********************************************************************
++ *
++ * Copyright (c) 2009-2010 Intel Corporation.
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <stddef.h>
++
++
++#include "services.h"
++#include "pvr_bridge_km.h"
++#include "pvr_debug.h"
++#include "ra.h"
++#include "pvr_bridge.h"
++#if defined(SUPPORT_SGX)
++#include "sgx_bridge.h"
++#endif
++#if defined(SUPPORT_VGX)
++#include "vgx_bridge.h"
++#endif
++#if defined(SUPPORT_MSVDX)
++#include "msvdx_bridge.h"
++#endif
++#include "perproc.h"
++#include "device.h"
++#include "buffer_manager.h"
++
++#include "pdump_km.h"
++#include "syscommon.h"
++
++#include "bridged_pvr_bridge.h"
++#if defined(SUPPORT_SGX)
++#include "bridged_sgx_bridge.h"
++#endif
++#if defined(SUPPORT_VGX)
++#include "bridged_vgx_bridge.h"
++#endif
++#if defined(SUPPORT_MSVDX)
++#include "bridged_msvdx_bridge.h"
++#endif
++
++#include "env_data.h"
++
++#if defined (__linux__)
++#include "mmap.h"
++#endif
++
++#include "srvkm.h"
++
++#ifdef INTEL_D3_PM
++#include "graphics_pm.h"
++#endif
++
++PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY
++ g_BridgeDispatchTable[BRIDGE_DISPATCH_TABLE_ENTRY_COUNT];
++
++#if defined(DEBUG_BRIDGE_KM)
++PVRSRV_BRIDGE_GLOBAL_STATS g_BridgeGlobalStats;
++#endif
++
++#if defined(PVR_SECURE_HANDLES)
++static int abSharedDeviceMemHeap[PVRSRV_MAX_CLIENT_HEAPS];
++static int *pbSharedDeviceMemHeap = abSharedDeviceMemHeap;
++#else
++static int *pbSharedDeviceMemHeap = (int *)NULL;
++#endif
++
++#if defined(DEBUG_BRIDGE_KM)
++PVRSRV_ERROR
++CopyFromUserWrapper(PVRSRV_PER_PROCESS_DATA * pProcData,
++ u32 ui32BridgeID, void *pvDest, void *pvSrc, u32 ui32Size)
++{
++ g_BridgeDispatchTable[ui32BridgeID].ui32CopyFromUserTotalBytes +=
++ ui32Size;
++ g_BridgeGlobalStats.ui32TotalCopyFromUserBytes += ui32Size;
++ return OSCopyFromUser(pProcData, pvDest, pvSrc, ui32Size);
++}
++
++PVRSRV_ERROR
++CopyToUserWrapper(PVRSRV_PER_PROCESS_DATA * pProcData,
++ u32 ui32BridgeID, void *pvDest, void *pvSrc, u32 ui32Size)
++{
++ g_BridgeDispatchTable[ui32BridgeID].ui32CopyToUserTotalBytes +=
++ ui32Size;
++ g_BridgeGlobalStats.ui32TotalCopyToUserBytes += ui32Size;
++ return OSCopyToUser(pProcData, pvDest, pvSrc, ui32Size);
++}
++#endif
++
++#ifdef INTEL_D3_CHANGES
++
++static int
++PVRSRVWaitForWriteOpSyncBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_WAIT_FOR_WRITE_OP_SYNC * psIn,
++ PVRSRV_BRIDGE_OUT_WAIT_FOR_WRITE_OP_SYNC * psOut,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ void *hKernelSyncInfo = NULL;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_WAIT_FOR_WRITE_OP_SYNC);
++
++ psOut->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hKernelSyncInfo,
++ psIn->hKernelSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++
++ if (psOut->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psOut->eError =
++ PVRSRVWaitForWriteOpSyncKM((PVRSRV_KERNEL_SYNC_INFO *)
++ hKernelSyncInfo);
++
++ return 0;
++}
++
++#endif
++
++static int
++PVRSRVEnumerateDevicesBW(u32 ui32BridgeID,
++ void *psBridgeIn,
++ PVRSRV_BRIDGE_OUT_ENUMDEVICE * psEnumDeviceOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ENUM_DEVICES);
++
++ psEnumDeviceOUT->eError =
++ PVRSRVEnumerateDevicesKM(&psEnumDeviceOUT->ui32NumDevices,
++ psEnumDeviceOUT->asDeviceIdentifier);
++
++ return 0;
++}
++
++static int
++PVRSRVAcquireDeviceDataBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_ACQUIRE_DEVICEINFO *
++ psAcquireDevInfoIN,
++ PVRSRV_BRIDGE_OUT_ACQUIRE_DEVICEINFO *
++ psAcquireDevInfoOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ void *hDevCookieInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_ACQUIRE_DEVICEINFO);
++
++ psAcquireDevInfoOUT->eError =
++ PVRSRVAcquireDeviceDataKM(psAcquireDevInfoIN->uiDevIndex,
++ psAcquireDevInfoIN->eDeviceType,
++ &hDevCookieInt);
++ if (psAcquireDevInfoOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psAcquireDevInfoOUT->eError =
++ PVRSRVAllocHandle(psPerProc->psHandleBase,
++ &psAcquireDevInfoOUT->hDevCookie,
++ hDevCookieInt,
++ PVRSRV_HANDLE_TYPE_DEV_NODE,
++ PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
++
++ return 0;
++}
++
++static int
++PVRSRVCreateDeviceMemContextBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_CREATE_DEVMEMCONTEXT *
++ psCreateDevMemContextIN,
++ PVRSRV_BRIDGE_OUT_CREATE_DEVMEMCONTEXT *
++ psCreateDevMemContextOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ void *hDevCookieInt;
++ void *hDevMemContextInt;
++ u32 i;
++ int bCreated;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_CREATE_DEVMEMCONTEXT);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psCreateDevMemContextOUT->eError, psPerProc,
++ PVRSRV_MAX_CLIENT_HEAPS + 1);
++
++ psCreateDevMemContextOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psCreateDevMemContextIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if (psCreateDevMemContextOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psCreateDevMemContextOUT->eError =
++ PVRSRVCreateDeviceMemContextKM(hDevCookieInt,
++ psPerProc,
++ &hDevMemContextInt,
++ &psCreateDevMemContextOUT->
++ ui32ClientHeapCount,
++ &psCreateDevMemContextOUT->
++ sHeapInfo[0], &bCreated,
++ pbSharedDeviceMemHeap);
++
++ if (psCreateDevMemContextOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ if (bCreated) {
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psCreateDevMemContextOUT->hDevMemContext,
++ hDevMemContextInt,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++ } else {
++ psCreateDevMemContextOUT->eError =
++ PVRSRVFindHandle(psPerProc->psHandleBase,
++ &psCreateDevMemContextOUT->hDevMemContext,
++ hDevMemContextInt,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
++ if (psCreateDevMemContextOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++ }
++
++ for (i = 0; i < psCreateDevMemContextOUT->ui32ClientHeapCount; i++) {
++ void *hDevMemHeapExt;
++
++#if defined(PVR_SECURE_HANDLES)
++ if (abSharedDeviceMemHeap[i])
++#endif
++ {
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &hDevMemHeapExt,
++ psCreateDevMemContextOUT->
++ sHeapInfo[i].hDevMemHeap,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP,
++ PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
++ }
++#if defined(PVR_SECURE_HANDLES)
++ else {
++
++ if (bCreated) {
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &hDevMemHeapExt,
++ psCreateDevMemContextOUT->
++ sHeapInfo[i].hDevMemHeap,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
++ psCreateDevMemContextOUT->
++ hDevMemContext);
++ } else {
++ psCreateDevMemContextOUT->eError =
++ PVRSRVFindHandle(psPerProc->psHandleBase,
++ &hDevMemHeapExt,
++ psCreateDevMemContextOUT->
++ sHeapInfo[i].hDevMemHeap,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP);
++ if (psCreateDevMemContextOUT->eError !=
++ PVRSRV_OK) {
++ return 0;
++ }
++ }
++ }
++#endif
++ psCreateDevMemContextOUT->sHeapInfo[i].hDevMemHeap =
++ hDevMemHeapExt;
++ }
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psCreateDevMemContextOUT->eError,
++ psPerProc);
++
++ return 0;
++}
++
++static int
++PVRSRVDestroyDeviceMemContextBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_DESTROY_DEVMEMCONTEXT *
++ psDestroyDevMemContextIN,
++ PVRSRV_BRIDGE_RETURN * psRetOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ void *hDevCookieInt;
++ void *hDevMemContextInt;
++ int bDestroyed;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_DESTROY_DEVMEMCONTEXT);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psDestroyDevMemContextIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if (psRetOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemContextInt,
++ psDestroyDevMemContextIN->hDevMemContext,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
++
++ if (psRetOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVDestroyDeviceMemContextKM(hDevCookieInt, hDevMemContextInt,
++ &bDestroyed);
++
++ if (psRetOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ if (bDestroyed) {
++ psRetOUT->eError =
++ PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psDestroyDevMemContextIN->
++ hDevMemContext,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
++ }
++
++ return 0;
++}
++
++static int
++PVRSRVGetDeviceMemHeapInfoBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_GET_DEVMEM_HEAPINFO *
++ psGetDevMemHeapInfoIN,
++ PVRSRV_BRIDGE_OUT_GET_DEVMEM_HEAPINFO *
++ psGetDevMemHeapInfoOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ void *hDevCookieInt;
++ void *hDevMemContextInt;
++ u32 i;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_GET_DEVMEM_HEAPINFO);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psGetDevMemHeapInfoOUT->eError, psPerProc,
++ PVRSRV_MAX_CLIENT_HEAPS);
++
++ psGetDevMemHeapInfoOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psGetDevMemHeapInfoIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if (psGetDevMemHeapInfoOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psGetDevMemHeapInfoOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemContextInt,
++ psGetDevMemHeapInfoIN->hDevMemContext,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
++
++ if (psGetDevMemHeapInfoOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psGetDevMemHeapInfoOUT->eError =
++ PVRSRVGetDeviceMemHeapInfoKM(hDevCookieInt,
++ hDevMemContextInt,
++ &psGetDevMemHeapInfoOUT->
++ ui32ClientHeapCount,
++ &psGetDevMemHeapInfoOUT->sHeapInfo[0],
++ pbSharedDeviceMemHeap);
++
++ if (psGetDevMemHeapInfoOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ for (i = 0; i < psGetDevMemHeapInfoOUT->ui32ClientHeapCount; i++) {
++ void *hDevMemHeapExt;
++
++#if defined(PVR_SECURE_HANDLES)
++ if (abSharedDeviceMemHeap[i])
++#endif
++ {
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &hDevMemHeapExt,
++ psGetDevMemHeapInfoOUT->
++ sHeapInfo[i].hDevMemHeap,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP,
++ PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
++ }
++#if defined(PVR_SECURE_HANDLES)
++ else {
++
++ psGetDevMemHeapInfoOUT->eError =
++ PVRSRVFindHandle(psPerProc->psHandleBase,
++ &hDevMemHeapExt,
++ psGetDevMemHeapInfoOUT->
++ sHeapInfo[i].hDevMemHeap,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP);
++ if (psGetDevMemHeapInfoOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++ }
++#endif
++ psGetDevMemHeapInfoOUT->sHeapInfo[i].hDevMemHeap =
++ hDevMemHeapExt;
++ }
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psGetDevMemHeapInfoOUT->eError, psPerProc);
++
++ return 0;
++}
++
++#if defined(OS_PVRSRV_ALLOC_DEVICE_MEM_BW)
++int
++PVRSRVAllocDeviceMemBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_ALLOCDEVICEMEM * psAllocDeviceMemIN,
++ PVRSRV_BRIDGE_OUT_ALLOCDEVICEMEM * psAllocDeviceMemOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc);
++#else
++static int
++PVRSRVAllocDeviceMemBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_ALLOCDEVICEMEM * psAllocDeviceMemIN,
++ PVRSRV_BRIDGE_OUT_ALLOCDEVICEMEM * psAllocDeviceMemOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo;
++ void *hDevCookieInt;
++ void *hDevMemHeapInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ALLOC_DEVICEMEM);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psAllocDeviceMemOUT->eError, psPerProc, 2);
++
++ psAllocDeviceMemOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psAllocDeviceMemIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if (psAllocDeviceMemOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psAllocDeviceMemOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemHeapInt,
++ psAllocDeviceMemIN->hDevMemHeap,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP);
++
++ if (psAllocDeviceMemOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psAllocDeviceMemOUT->eError =
++ PVRSRVAllocDeviceMemKM(hDevCookieInt,
++ psPerProc,
++ hDevMemHeapInt,
++ psAllocDeviceMemIN->ui32Attribs,
++ psAllocDeviceMemIN->ui32Size,
++ psAllocDeviceMemIN->ui32Alignment,
++ &psMemInfo, "");
++
++ if (psAllocDeviceMemOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ memset(&psAllocDeviceMemOUT->sClientMemInfo,
++ 0, sizeof(psAllocDeviceMemOUT->sClientMemInfo));
++
++ psAllocDeviceMemOUT->sClientMemInfo.pvLinAddrKM =
++ psMemInfo->pvLinAddrKM;
++
++#if defined (__linux__)
++ psAllocDeviceMemOUT->sClientMemInfo.pvLinAddr = 0;
++#else
++ psAllocDeviceMemOUT->sClientMemInfo.pvLinAddr = psMemInfo->pvLinAddrKM;
++#endif
++ psAllocDeviceMemOUT->sClientMemInfo.sDevVAddr = psMemInfo->sDevVAddr;
++ psAllocDeviceMemOUT->sClientMemInfo.ui32Flags = psMemInfo->ui32Flags;
++ psAllocDeviceMemOUT->sClientMemInfo.ui32AllocSize =
++ psMemInfo->ui32AllocSize;
++ psAllocDeviceMemOUT->sClientMemInfo.hMappingInfo =
++ psMemInfo->sMemBlk.hOSMemHandle;
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psAllocDeviceMemOUT->sClientMemInfo.hKernelMemInfo,
++ psMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++
++ if (psAllocDeviceMemIN->ui32Attribs & PVRSRV_MEM_NO_SYNCOBJ) {
++
++ memset(&psAllocDeviceMemOUT->sClientSyncInfo,
++ 0, sizeof(PVRSRV_CLIENT_SYNC_INFO));
++ psAllocDeviceMemOUT->sClientMemInfo.psClientSyncInfo = NULL;
++ psAllocDeviceMemOUT->psKernelSyncInfo = NULL;
++ } else {
++
++ psAllocDeviceMemOUT->psKernelSyncInfo =
++ psMemInfo->psKernelSyncInfo;
++
++ psAllocDeviceMemOUT->sClientSyncInfo.psSyncData =
++ psMemInfo->psKernelSyncInfo->psSyncData;
++ psAllocDeviceMemOUT->sClientSyncInfo.sWriteOpsCompleteDevVAddr =
++ psMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr;
++ psAllocDeviceMemOUT->sClientSyncInfo.sReadOpsCompleteDevVAddr =
++ psMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr;
++
++ psAllocDeviceMemOUT->sClientSyncInfo.hMappingInfo =
++ psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.
++ hOSMemHandle;
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psAllocDeviceMemOUT->sClientSyncInfo.
++ hKernelSyncInfo,
++ psMemInfo->psKernelSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
++ psAllocDeviceMemOUT->sClientMemInfo.
++ hKernelMemInfo);
++
++ psAllocDeviceMemOUT->sClientMemInfo.psClientSyncInfo =
++ &psAllocDeviceMemOUT->sClientSyncInfo;
++
++ }
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psAllocDeviceMemOUT->eError, psPerProc);
++
++ return 0;
++}
++
++#endif
++
++static int
++PVRSRVFreeDeviceMemBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_FREEDEVICEMEM * psFreeDeviceMemIN,
++ PVRSRV_BRIDGE_RETURN * psRetOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ void *hDevCookieInt;
++ void *pvKernelMemInfo;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_FREE_DEVICEMEM);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psFreeDeviceMemIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if (psRetOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &pvKernelMemInfo,
++ psFreeDeviceMemIN->psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++ if (psRetOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psKernelMemInfo = (PVRSRV_KERNEL_MEM_INFO *) pvKernelMemInfo;
++
++ if (psKernelMemInfo->ui32RefCount == 1) {
++ psRetOUT->eError =
++ PVRSRVFreeDeviceMemKM(hDevCookieInt, pvKernelMemInfo);
++ } else {
++ PVR_DPF((PVR_DBG_WARNING,
++ "PVRSRVFreeDeviceMemBW: mappings are open "
++ "in other processes, deferring free!"));
++
++ psKernelMemInfo->bPendingFree = 1;
++ psRetOUT->eError = PVRSRV_OK;
++ }
++
++ if (psRetOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psFreeDeviceMemIN->psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++ return 0;
++}
++
++static int
++PVRSRVExportDeviceMemBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_EXPORTDEVICEMEM * psExportDeviceMemIN,
++ PVRSRV_BRIDGE_OUT_EXPORTDEVICEMEM *
++ psExportDeviceMemOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ void *hDevCookieInt;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_EXPORT_DEVICEMEM);
++
++ psExportDeviceMemOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psExportDeviceMemIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if (psExportDeviceMemOUT->eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVExportDeviceMemBW: can't find devcookie"));
++ return 0;
++ }
++
++ psExportDeviceMemOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ (void **)&psKernelMemInfo,
++ psExportDeviceMemIN->psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++ if (psExportDeviceMemOUT->eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVExportDeviceMemBW: can't find kernel meminfo"));
++ return 0;
++ }
++
++ psExportDeviceMemOUT->eError =
++ PVRSRVFindHandle(KERNEL_HANDLE_BASE,
++ &psExportDeviceMemOUT->hMemInfo,
++ psKernelMemInfo, PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if (psExportDeviceMemOUT->eError == PVRSRV_OK) {
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "PVRSRVExportDeviceMemBW: allocation is already exported"));
++ return 0;
++ }
++
++ psExportDeviceMemOUT->eError = PVRSRVAllocHandle(KERNEL_HANDLE_BASE,
++ &psExportDeviceMemOUT->
++ hMemInfo,
++ psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++ if (psExportDeviceMemOUT->eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVExportDeviceMemBW: failed to allocate handle from global handle list"));
++ return 0;
++ }
++
++ psKernelMemInfo->ui32Flags |= PVRSRV_MEM_EXPORTED;
++
++ return 0;
++}
++
++static int
++PVRSRVMapDeviceMemoryBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_MAP_DEV_MEMORY * psMapDevMemIN,
++ PVRSRV_BRIDGE_OUT_MAP_DEV_MEMORY * psMapDevMemOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ PVRSRV_KERNEL_MEM_INFO *psSrcKernelMemInfo = NULL;
++ PVRSRV_KERNEL_MEM_INFO *psDstKernelMemInfo = NULL;
++ void *hDstDevMemHeap = NULL;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_MAP_DEV_MEMORY);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psMapDevMemOUT->eError, psPerProc, 2);
++
++ psMapDevMemOUT->eError = PVRSRVLookupHandle(KERNEL_HANDLE_BASE,
++ (void **)
++ &psSrcKernelMemInfo,
++ psMapDevMemIN->
++ hKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if (psMapDevMemOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psMapDevMemOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDstDevMemHeap,
++ psMapDevMemIN->
++ hDstDevMemHeap,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP);
++ if (psMapDevMemOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psMapDevMemOUT->eError = PVRSRVMapDeviceMemoryKM(psPerProc,
++ psSrcKernelMemInfo,
++ hDstDevMemHeap,
++ &psDstKernelMemInfo);
++ if (psMapDevMemOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ memset(&psMapDevMemOUT->sDstClientMemInfo,
++ 0, sizeof(psMapDevMemOUT->sDstClientMemInfo));
++ memset(&psMapDevMemOUT->sDstClientSyncInfo,
++ 0, sizeof(psMapDevMemOUT->sDstClientSyncInfo));
++
++ psMapDevMemOUT->sDstClientMemInfo.pvLinAddrKM =
++ psDstKernelMemInfo->pvLinAddrKM;
++
++ psMapDevMemOUT->sDstClientMemInfo.pvLinAddr = 0;
++ psMapDevMemOUT->sDstClientMemInfo.sDevVAddr =
++ psDstKernelMemInfo->sDevVAddr;
++ psMapDevMemOUT->sDstClientMemInfo.ui32Flags =
++ psDstKernelMemInfo->ui32Flags;
++ psMapDevMemOUT->sDstClientMemInfo.ui32AllocSize =
++ psDstKernelMemInfo->ui32AllocSize;
++ psMapDevMemOUT->sDstClientMemInfo.hMappingInfo =
++ psDstKernelMemInfo->sMemBlk.hOSMemHandle;
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psMapDevMemOUT->sDstClientMemInfo.hKernelMemInfo,
++ psDstKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++ psMapDevMemOUT->sDstClientSyncInfo.hKernelSyncInfo = NULL;
++ psMapDevMemOUT->psDstKernelSyncInfo = NULL;
++
++ if (psDstKernelMemInfo->psKernelSyncInfo) {
++ psMapDevMemOUT->psDstKernelSyncInfo =
++ psDstKernelMemInfo->psKernelSyncInfo;
++
++ psMapDevMemOUT->sDstClientSyncInfo.psSyncData =
++ psDstKernelMemInfo->psKernelSyncInfo->psSyncData;
++ psMapDevMemOUT->sDstClientSyncInfo.sWriteOpsCompleteDevVAddr =
++ psDstKernelMemInfo->psKernelSyncInfo->
++ sWriteOpsCompleteDevVAddr;
++ psMapDevMemOUT->sDstClientSyncInfo.sReadOpsCompleteDevVAddr =
++ psDstKernelMemInfo->psKernelSyncInfo->
++ sReadOpsCompleteDevVAddr;
++
++ psMapDevMemOUT->sDstClientSyncInfo.hMappingInfo =
++ psDstKernelMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->
++ sMemBlk.hOSMemHandle;
++
++ psMapDevMemOUT->sDstClientMemInfo.psClientSyncInfo =
++ &psMapDevMemOUT->sDstClientSyncInfo;
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psMapDevMemOUT->sDstClientSyncInfo.
++ hKernelSyncInfo,
++ psDstKernelMemInfo->psKernelSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
++ psMapDevMemOUT->sDstClientMemInfo.
++ hKernelMemInfo);
++ }
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psMapDevMemOUT->eError, psPerProc);
++
++ return 0;
++}
++
++static int
++PVRSRVUnmapDeviceMemoryBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_UNMAP_DEV_MEMORY * psUnmapDevMemIN,
++ PVRSRV_BRIDGE_RETURN * psRetOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = NULL;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_UNMAP_DEV_MEMORY);
++
++ psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ (void **)&psKernelMemInfo,
++ psUnmapDevMemIN->psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if (psRetOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psRetOUT->eError = PVRSRVUnmapDeviceMemoryKM(psKernelMemInfo);
++ if (psRetOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psRetOUT->eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psUnmapDevMemIN->psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++ return 0;
++}
++
++static int
++PVRSRVMapDeviceClassMemoryBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_MAP_DEVICECLASS_MEMORY *
++ psMapDevClassMemIN,
++ PVRSRV_BRIDGE_OUT_MAP_DEVICECLASS_MEMORY *
++ psMapDevClassMemOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo;
++ void *hOSMapInfo;
++ void *hDeviceClassBufferInt;
++ void *hDevMemContextInt;
++ PVRSRV_HANDLE_TYPE eHandleType;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psMapDevClassMemOUT->eError, psPerProc, 2);
++
++ psMapDevClassMemOUT->eError =
++ PVRSRVLookupHandleAnyType(psPerProc->psHandleBase,
++ &hDeviceClassBufferInt, &eHandleType,
++ psMapDevClassMemIN->hDeviceClassBuffer);
++
++ if (psMapDevClassMemOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psMapDevClassMemOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemContextInt,
++ psMapDevClassMemIN->hDevMemContext,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
++
++ if (psMapDevClassMemOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ switch (eHandleType) {
++#if defined(PVR_SECURE_HANDLES)
++ case PVRSRV_HANDLE_TYPE_DISP_BUFFER:
++ case PVRSRV_HANDLE_TYPE_BUF_BUFFER:
++#else
++ case PVRSRV_HANDLE_TYPE_NONE:
++#endif
++ break;
++ default:
++ psMapDevClassMemOUT->eError = PVRSRV_ERROR_GENERIC;
++ return 0;
++ }
++
++ psMapDevClassMemOUT->eError =
++ PVRSRVMapDeviceClassMemoryKM(psPerProc,
++ hDevMemContextInt,
++ hDeviceClassBufferInt,
++ &psMemInfo, &hOSMapInfo);
++ if (psMapDevClassMemOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ memset(&psMapDevClassMemOUT->sClientMemInfo,
++ 0, sizeof(psMapDevClassMemOUT->sClientMemInfo));
++ memset(&psMapDevClassMemOUT->sClientSyncInfo,
++ 0, sizeof(psMapDevClassMemOUT->sClientSyncInfo));
++
++ psMapDevClassMemOUT->sClientMemInfo.pvLinAddrKM =
++ psMemInfo->pvLinAddrKM;
++
++ psMapDevClassMemOUT->sClientMemInfo.pvLinAddr = 0;
++ psMapDevClassMemOUT->sClientMemInfo.sDevVAddr = psMemInfo->sDevVAddr;
++ psMapDevClassMemOUT->sClientMemInfo.ui32Flags = psMemInfo->ui32Flags;
++ psMapDevClassMemOUT->sClientMemInfo.ui32AllocSize =
++ psMemInfo->ui32AllocSize;
++ psMapDevClassMemOUT->sClientMemInfo.hMappingInfo =
++ psMemInfo->sMemBlk.hOSMemHandle;
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psMapDevClassMemOUT->sClientMemInfo.
++ hKernelMemInfo, psMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
++ psMapDevClassMemIN->hDeviceClassBuffer);
++
++ psMapDevClassMemOUT->sClientSyncInfo.hKernelSyncInfo = NULL;
++ psMapDevClassMemOUT->psKernelSyncInfo = NULL;
++
++ if (psMemInfo->psKernelSyncInfo) {
++ psMapDevClassMemOUT->psKernelSyncInfo =
++ psMemInfo->psKernelSyncInfo;
++
++ psMapDevClassMemOUT->sClientSyncInfo.psSyncData =
++ psMemInfo->psKernelSyncInfo->psSyncData;
++ psMapDevClassMemOUT->sClientSyncInfo.sWriteOpsCompleteDevVAddr =
++ psMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr;
++ psMapDevClassMemOUT->sClientSyncInfo.sReadOpsCompleteDevVAddr =
++ psMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr;
++
++ psMapDevClassMemOUT->sClientSyncInfo.hMappingInfo =
++ psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.
++ hOSMemHandle;
++
++ psMapDevClassMemOUT->sClientMemInfo.psClientSyncInfo =
++ &psMapDevClassMemOUT->sClientSyncInfo;
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psMapDevClassMemOUT->sClientSyncInfo.
++ hKernelSyncInfo,
++ psMemInfo->psKernelSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
++ psMapDevClassMemOUT->sClientMemInfo.
++ hKernelMemInfo);
++ }
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psMapDevClassMemOUT->eError, psPerProc);
++
++ return 0;
++}
++
++static int
++PVRSRVUnmapDeviceClassMemoryBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_UNMAP_DEVICECLASS_MEMORY *
++ psUnmapDevClassMemIN,
++ PVRSRV_BRIDGE_RETURN * psRetOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ void *pvKernelMemInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_UNMAP_DEVICECLASS_MEMORY);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &pvKernelMemInfo,
++ psUnmapDevClassMemIN->psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if (psRetOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psRetOUT->eError = PVRSRVUnmapDeviceClassMemoryKM(pvKernelMemInfo);
++
++ if (psRetOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psUnmapDevClassMemIN->psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++ return 0;
++}
++
++#if defined(OS_PVRSRV_WRAP_EXT_MEM_BW)
++int
++PVRSRVWrapExtMemoryBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_WRAP_EXT_MEMORY * psWrapExtMemIN,
++ PVRSRV_BRIDGE_OUT_WRAP_EXT_MEMORY * psWrapExtMemOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc);
++#else
++static int
++PVRSRVWrapExtMemoryBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_WRAP_EXT_MEMORY * psWrapExtMemIN,
++ PVRSRV_BRIDGE_OUT_WRAP_EXT_MEMORY * psWrapExtMemOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ void *hDevCookieInt;
++ void *hDevMemContextInt;
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo;
++ u32 ui32PageTableSize = 0;
++ IMG_SYS_PHYADDR *psSysPAddr = NULL;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_WRAP_EXT_MEMORY);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psWrapExtMemOUT->eError, psPerProc, 2);
++
++ psWrapExtMemOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psWrapExtMemIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if (psWrapExtMemOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psWrapExtMemOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemContextInt,
++ psWrapExtMemIN->hDevMemContext,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
++
++ if (psWrapExtMemOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ if (psWrapExtMemIN->ui32NumPageTableEntries) {
++ ui32PageTableSize = psWrapExtMemIN->ui32NumPageTableEntries
++ * sizeof(IMG_SYS_PHYADDR);
++
++ ASSIGN_AND_EXIT_ON_ERROR(psWrapExtMemOUT->eError,
++ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32PageTableSize,
++ (void **)&psSysPAddr, 0,
++ "Page Table"));
++
++ if (CopyFromUserWrapper(psPerProc,
++ ui32BridgeID,
++ psSysPAddr,
++ psWrapExtMemIN->psSysPAddr,
++ ui32PageTableSize) != PVRSRV_OK) {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32PageTableSize,
++ (void *)psSysPAddr, 0);
++
++ return -EFAULT;
++ }
++ }
++
++ psWrapExtMemOUT->eError =
++ PVRSRVWrapExtMemoryKM(hDevCookieInt,
++ psPerProc,
++ hDevMemContextInt,
++ psWrapExtMemIN->ui32ByteSize,
++ psWrapExtMemIN->ui32PageOffset,
++ psWrapExtMemIN->bPhysContig,
++ psSysPAddr,
++ psWrapExtMemIN->pvLinAddr,
++ psWrapExtMemIN->ui32Flags, &psMemInfo);
++ if (psWrapExtMemIN->ui32NumPageTableEntries) {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32PageTableSize, (void *)psSysPAddr, 0);
++
++ }
++ if (psWrapExtMemOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psWrapExtMemOUT->sClientMemInfo.pvLinAddrKM = psMemInfo->pvLinAddrKM;
++
++ psWrapExtMemOUT->sClientMemInfo.pvLinAddr = 0;
++ psWrapExtMemOUT->sClientMemInfo.sDevVAddr = psMemInfo->sDevVAddr;
++ psWrapExtMemOUT->sClientMemInfo.ui32Flags = psMemInfo->ui32Flags;
++ psWrapExtMemOUT->sClientMemInfo.ui32AllocSize =
++ psMemInfo->ui32AllocSize;
++ psWrapExtMemOUT->sClientMemInfo.hMappingInfo =
++ psMemInfo->sMemBlk.hOSMemHandle;
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psWrapExtMemOUT->sClientMemInfo.hKernelMemInfo,
++ psMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++
++ psWrapExtMemOUT->sClientSyncInfo.psSyncData =
++ psMemInfo->psKernelSyncInfo->psSyncData;
++ psWrapExtMemOUT->sClientSyncInfo.sWriteOpsCompleteDevVAddr =
++ psMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr;
++ psWrapExtMemOUT->sClientSyncInfo.sReadOpsCompleteDevVAddr =
++ psMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr;
++
++ psWrapExtMemOUT->sClientSyncInfo.hMappingInfo =
++ psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.
++ hOSMemHandle;
++
++ psWrapExtMemOUT->sClientMemInfo.psClientSyncInfo =
++ &psWrapExtMemOUT->sClientSyncInfo;
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psWrapExtMemOUT->sClientSyncInfo.
++ hKernelSyncInfo,
++ (void *)psMemInfo->psKernelSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
++ psWrapExtMemOUT->sClientMemInfo.hKernelMemInfo);
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psWrapExtMemOUT->eError, psPerProc);
++
++ return 0;
++}
++#endif
++
++static int
++PVRSRVUnwrapExtMemoryBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_UNWRAP_EXT_MEMORY * psUnwrapExtMemIN,
++ PVRSRV_BRIDGE_RETURN * psRetOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ void *pvMemInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_UNWRAP_EXT_MEMORY);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvMemInfo,
++ psUnwrapExtMemIN->hKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if (psRetOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVUnwrapExtMemoryKM((PVRSRV_KERNEL_MEM_INFO *) pvMemInfo);
++ if (psRetOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psUnwrapExtMemIN->hKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++ return 0;
++}
++
++static int
++PVRSRVGetFreeDeviceMemBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_GETFREEDEVICEMEM *
++ psGetFreeDeviceMemIN,
++ PVRSRV_BRIDGE_OUT_GETFREEDEVICEMEM *
++ psGetFreeDeviceMemOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GETFREE_DEVICEMEM);
++
++ psGetFreeDeviceMemOUT->eError =
++ PVRSRVGetFreeDeviceMemKM(psGetFreeDeviceMemIN->ui32Flags,
++ &psGetFreeDeviceMemOUT->ui32Total,
++ &psGetFreeDeviceMemOUT->ui32Free,
++ &psGetFreeDeviceMemOUT->ui32LargestBlock);
++
++ return 0;
++}
++
++static int
++PVRMMapOSMemHandleToMMapDataBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_MHANDLE_TO_MMAP_DATA *
++ psMMapDataIN,
++ PVRSRV_BRIDGE_OUT_MHANDLE_TO_MMAP_DATA *
++ psMMapDataOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_MHANDLE_TO_MMAP_DATA);
++
++#if defined (__linux__)
++ psMMapDataOUT->eError =
++ PVRMMapOSMemHandleToMMapData(psPerProc,
++ psMMapDataIN->hMHandle,
++ &psMMapDataOUT->ui32MMapOffset,
++ &psMMapDataOUT->ui32ByteOffset,
++ &psMMapDataOUT->ui32RealByteSize,
++ &psMMapDataOUT->ui32UserVAddr);
++#else
++ psMMapDataOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
++#endif
++ return 0;
++}
++
++static int
++PVRMMapReleaseMMapDataBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_RELEASE_MMAP_DATA * psMMapDataIN,
++ PVRSRV_BRIDGE_OUT_RELEASE_MMAP_DATA * psMMapDataOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_RELEASE_MMAP_DATA);
++
++#if defined (__linux__)
++ psMMapDataOUT->eError =
++ PVRMMapReleaseMMapData(psPerProc,
++ psMMapDataIN->hMHandle,
++ &psMMapDataOUT->bMUnmap,
++ &psMMapDataOUT->ui32RealByteSize,
++ &psMMapDataOUT->ui32UserVAddr);
++#else
++ psMMapDataOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
++#endif
++ return 0;
++}
++
++#ifdef PDUMP
++static int
++PDumpIsCaptureFrameBW(u32 ui32BridgeID,
++ void *psBridgeIn,
++ PVRSRV_BRIDGE_OUT_PDUMP_ISCAPTURING *
++ psPDumpIsCapturingOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_ISCAPTURING);
++ psPDumpIsCapturingOUT->bIsCapturing = PDumpIsCaptureFrameKM();
++ psPDumpIsCapturingOUT->eError = PVRSRV_OK;
++
++ return 0;
++}
++
++static int
++PDumpCommentBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_COMMENT * psPDumpCommentIN,
++ PVRSRV_BRIDGE_RETURN * psRetOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_COMMENT);
++ psRetOUT->eError = PDumpCommentKM(&psPDumpCommentIN->szComment[0],
++ psPDumpCommentIN->ui32Flags);
++ return 0;
++}
++
++static int
++PDumpSetFrameBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_SETFRAME * psPDumpSetFrameIN,
++ PVRSRV_BRIDGE_RETURN * psRetOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_SETFRAME);
++
++ psRetOUT->eError = PDumpSetFrameKM(psPDumpSetFrameIN->ui32Frame);
++
++ return 0;
++}
++
++static int
++PDumpRegWithFlagsBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_DUMPREG * psPDumpRegDumpIN,
++ PVRSRV_BRIDGE_RETURN * psRetOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_REG);
++
++ psRetOUT->eError =
++ PDumpRegWithFlagsKM(psPDumpRegDumpIN->sHWReg.ui32RegAddr,
++ psPDumpRegDumpIN->sHWReg.ui32RegVal,
++ psPDumpRegDumpIN->ui32Flags);
++
++ return 0;
++}
++
++static int
++PDumpRegPolBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_REGPOL * psPDumpRegPolIN,
++ PVRSRV_BRIDGE_RETURN * psRetOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_REGPOL);
++
++ psRetOUT->eError =
++ PDumpRegPolWithFlagsKM(psPDumpRegPolIN->sHWReg.ui32RegAddr,
++ psPDumpRegPolIN->sHWReg.ui32RegVal,
++ psPDumpRegPolIN->ui32Mask,
++ psPDumpRegPolIN->ui32Flags);
++
++ return 0;
++}
++
++static int
++PDumpMemPolBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_MEMPOL * psPDumpMemPolIN,
++ PVRSRV_BRIDGE_RETURN * psRetOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ void *pvMemInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_MEMPOL);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvMemInfo,
++ psPDumpMemPolIN->psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if (psRetOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PDumpMemPolKM(((PVRSRV_KERNEL_MEM_INFO *) pvMemInfo),
++ psPDumpMemPolIN->ui32Offset,
++ psPDumpMemPolIN->ui32Value,
++ psPDumpMemPolIN->ui32Mask,
++ PDUMP_POLL_OPERATOR_EQUAL,
++ psPDumpMemPolIN->ui32Flags, MAKEUNIQUETAG(pvMemInfo));
++
++ return 0;
++}
++
++static int
++PDumpMemBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_DUMPMEM * psPDumpMemDumpIN,
++ PVRSRV_BRIDGE_RETURN * psRetOUT, PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ void *pvMemInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DUMPMEM);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvMemInfo,
++ psPDumpMemDumpIN->psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if (psRetOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PDumpMemUM(psPerProc,
++ psPDumpMemDumpIN->pvAltLinAddr,
++ psPDumpMemDumpIN->pvLinAddr,
++ pvMemInfo,
++ psPDumpMemDumpIN->ui32Offset,
++ psPDumpMemDumpIN->ui32Bytes,
++ psPDumpMemDumpIN->ui32Flags, MAKEUNIQUETAG(pvMemInfo));
++
++ return 0;
++}
++
++static int
++PDumpBitmapBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_BITMAP * psPDumpBitmapIN,
++ PVRSRV_BRIDGE_RETURN * psRetOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++
++ psRetOUT->eError =
++ PDumpBitmapKM(&psPDumpBitmapIN->szFileName[0],
++ psPDumpBitmapIN->ui32FileOffset,
++ psPDumpBitmapIN->ui32Width,
++ psPDumpBitmapIN->ui32Height,
++ psPDumpBitmapIN->ui32StrideInBytes,
++ psPDumpBitmapIN->sDevBaseAddr,
++ psPDumpBitmapIN->ui32Size,
++ psPDumpBitmapIN->ePixelFormat,
++ psPDumpBitmapIN->eMemFormat,
++ psPDumpBitmapIN->ui32Flags);
++
++ return 0;
++}
++
++static int
++PDumpReadRegBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_READREG * psPDumpReadRegIN,
++ PVRSRV_BRIDGE_RETURN * psRetOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DUMPREADREG);
++
++ psRetOUT->eError =
++ PDumpReadRegKM(&psPDumpReadRegIN->szFileName[0],
++ psPDumpReadRegIN->ui32FileOffset,
++ psPDumpReadRegIN->ui32Address,
++ psPDumpReadRegIN->ui32Size,
++ psPDumpReadRegIN->ui32Flags);
++
++ return 0;
++}
++
++static int
++PDumpDriverInfoBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_DRIVERINFO * psPDumpDriverInfoIN,
++ PVRSRV_BRIDGE_RETURN * psRetOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ u32 ui32PDumpFlags;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DRIVERINFO);
++
++ ui32PDumpFlags = 0;
++ if (psPDumpDriverInfoIN->bContinuous) {
++ ui32PDumpFlags |= PDUMP_FLAGS_CONTINUOUS;
++ }
++ psRetOUT->eError =
++ PDumpDriverInfoKM(&psPDumpDriverInfoIN->szString[0],
++ ui32PDumpFlags);
++
++ return 0;
++}
++
++static int
++PDumpSyncDumpBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_DUMPSYNC * psPDumpSyncDumpIN,
++ PVRSRV_BRIDGE_RETURN * psRetOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ u32 ui32Bytes = psPDumpSyncDumpIN->ui32Bytes;
++ void *pvSyncInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DUMPSYNC);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &pvSyncInfo,
++ psPDumpSyncDumpIN->psKernelSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if (psRetOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PDumpMemUM(psPerProc,
++ psPDumpSyncDumpIN->pvAltLinAddr,
++ NULL,
++ ((PVRSRV_KERNEL_SYNC_INFO *) pvSyncInfo)->
++ psSyncDataMemInfoKM, psPDumpSyncDumpIN->ui32Offset,
++ ui32Bytes, 0,
++ MAKEUNIQUETAG(((PVRSRV_KERNEL_SYNC_INFO *) pvSyncInfo)->
++ psSyncDataMemInfoKM));
++
++ return 0;
++}
++
++static int
++PDumpSyncPolBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_SYNCPOL * psPDumpSyncPolIN,
++ PVRSRV_BRIDGE_RETURN * psRetOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ u32 ui32Offset;
++ void *pvSyncInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_SYNCPOL);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &pvSyncInfo,
++ psPDumpSyncPolIN->psKernelSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if (psRetOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ if (psPDumpSyncPolIN->bIsRead) {
++ ui32Offset = offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete);
++ } else {
++ ui32Offset = offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete);
++ }
++
++ psRetOUT->eError =
++ PDumpMemPolKM(((PVRSRV_KERNEL_SYNC_INFO *) pvSyncInfo)->
++ psSyncDataMemInfoKM, ui32Offset,
++ psPDumpSyncPolIN->ui32Value,
++ psPDumpSyncPolIN->ui32Mask, PDUMP_POLL_OPERATOR_EQUAL,
++ 0,
++ MAKEUNIQUETAG(((PVRSRV_KERNEL_SYNC_INFO *)
++ pvSyncInfo)->psSyncDataMemInfoKM));
++
++ return 0;
++}
++
++static int
++PDumpPDRegBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_DUMPPDREG * psPDumpPDRegDumpIN,
++ PVRSRV_BRIDGE_RETURN * psRetOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_PDREG);
++
++ PDumpPDReg(psPDumpPDRegDumpIN->sHWReg.ui32RegAddr,
++ psPDumpPDRegDumpIN->sHWReg.ui32RegVal, PDUMP_PD_UNIQUETAG);
++
++ psRetOUT->eError = PVRSRV_OK;
++ return 0;
++}
++
++static int
++PDumpCycleCountRegReadBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_CYCLE_COUNT_REG_READ *
++ psPDumpCycleCountRegReadIN,
++ PVRSRV_BRIDGE_RETURN * psRetOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_PDUMP_CYCLE_COUNT_REG_READ);
++
++ PDumpCycleCountRegRead(psPDumpCycleCountRegReadIN->ui32RegOffset,
++ psPDumpCycleCountRegReadIN->bLastFrame);
++
++ psRetOUT->eError = PVRSRV_OK;
++
++ return 0;
++}
++
++static int
++PDumpPDDevPAddrBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_DUMPPDDEVPADDR * psPDumpPDDevPAddrIN,
++ PVRSRV_BRIDGE_RETURN * psRetOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ void *pvMemInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_PDUMP_DUMPPDDEVPADDR);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &pvMemInfo,
++ psPDumpPDDevPAddrIN->hKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if (psRetOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PDumpPDDevPAddrKM((PVRSRV_KERNEL_MEM_INFO *) pvMemInfo,
++ psPDumpPDDevPAddrIN->ui32Offset,
++ psPDumpPDDevPAddrIN->sPDDevPAddr,
++ MAKEUNIQUETAG(pvMemInfo), PDUMP_PD_UNIQUETAG);
++ return 0;
++}
++
++static int
++PDumpStartInitPhaseBW(u32 ui32BridgeID,
++ void *psBridgeIn,
++ PVRSRV_BRIDGE_RETURN * psRetOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_PDUMP_STARTINITPHASE);
++
++ psRetOUT->eError = PDumpStartInitPhaseKM();
++
++ return 0;
++}
++
++static int
++PDumpStopInitPhaseBW(u32 ui32BridgeID,
++ void *psBridgeIn,
++ PVRSRV_BRIDGE_RETURN * psRetOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_PDUMP_STOPINITPHASE);
++
++ psRetOUT->eError = PDumpStopInitPhaseKM();
++
++ return 0;
++}
++
++#endif
++
++static int
++PVRSRVGetMiscInfoBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_GET_MISC_INFO * psGetMiscInfoIN,
++ PVRSRV_BRIDGE_OUT_GET_MISC_INFO * psGetMiscInfoOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ PVRSRV_ERROR eError;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_MISC_INFO);
++
++ memcpy(&psGetMiscInfoOUT->sMiscInfo,
++ &psGetMiscInfoIN->sMiscInfo, sizeof(PVRSRV_MISC_INFO));
++
++ if (((psGetMiscInfoIN->sMiscInfo.
++ ui32StateRequest & PVRSRV_MISC_INFO_MEMSTATS_PRESENT) != 0)
++ &&
++ ((psGetMiscInfoIN->sMiscInfo.
++ ui32StateRequest & PVRSRV_MISC_INFO_DDKVERSION_PRESENT) != 0)) {
++
++ psGetMiscInfoOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++ return 0;
++ }
++
++ if (((psGetMiscInfoIN->sMiscInfo.
++ ui32StateRequest & PVRSRV_MISC_INFO_MEMSTATS_PRESENT) != 0)
++ ||
++ ((psGetMiscInfoIN->sMiscInfo.
++ ui32StateRequest & PVRSRV_MISC_INFO_DDKVERSION_PRESENT) != 0)) {
++
++ ASSIGN_AND_EXIT_ON_ERROR(psGetMiscInfoOUT->eError,
++ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ psGetMiscInfoOUT->sMiscInfo.
++ ui32MemoryStrLen,
++ (void **)&psGetMiscInfoOUT->
++ sMiscInfo.pszMemoryStr, 0,
++ "Output string buffer"));
++
++ psGetMiscInfoOUT->eError =
++ PVRSRVGetMiscInfoKM(&psGetMiscInfoOUT->sMiscInfo);
++
++ eError = CopyToUserWrapper(psPerProc, ui32BridgeID,
++ psGetMiscInfoIN->sMiscInfo.
++ pszMemoryStr,
++ psGetMiscInfoOUT->sMiscInfo.
++ pszMemoryStr,
++ psGetMiscInfoOUT->sMiscInfo.
++ ui32MemoryStrLen);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ psGetMiscInfoOUT->sMiscInfo.ui32MemoryStrLen,
++ (void *)psGetMiscInfoOUT->sMiscInfo.pszMemoryStr, 0);
++ psGetMiscInfoOUT->sMiscInfo.pszMemoryStr = NULL;
++
++ psGetMiscInfoOUT->sMiscInfo.pszMemoryStr =
++ psGetMiscInfoIN->sMiscInfo.pszMemoryStr;
++
++ if (eError != PVRSRV_OK) {
++
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVGetMiscInfoBW Error copy to user"));
++ return -EFAULT;
++ }
++ } else {
++ psGetMiscInfoOUT->eError =
++ PVRSRVGetMiscInfoKM(&psGetMiscInfoOUT->sMiscInfo);
++ }
++
++ if (psGetMiscInfoOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ if (psGetMiscInfoIN->sMiscInfo.
++ ui32StateRequest & PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT) {
++ psGetMiscInfoOUT->eError =
++ PVRSRVAllocHandle(psPerProc->psHandleBase,
++ &psGetMiscInfoOUT->sMiscInfo.
++ sGlobalEventObject.hOSEventKM,
++ psGetMiscInfoOUT->sMiscInfo.
++ sGlobalEventObject.hOSEventKM,
++ PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT,
++ PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
++
++ if (psGetMiscInfoOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++ }
++
++ if (psGetMiscInfoOUT->sMiscInfo.hSOCTimerRegisterOSMemHandle) {
++
++ psGetMiscInfoOUT->eError =
++ PVRSRVAllocHandle(psPerProc->psHandleBase,
++ &psGetMiscInfoOUT->sMiscInfo.
++ hSOCTimerRegisterOSMemHandle,
++ psGetMiscInfoOUT->sMiscInfo.
++ hSOCTimerRegisterOSMemHandle,
++ PVRSRV_HANDLE_TYPE_SOC_TIMER,
++ PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
++
++ if (psGetMiscInfoOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++ }
++
++ return 0;
++}
++
++static int
++PVRSRVConnectBW(u32 ui32BridgeID,
++ void *psBridgeIn,
++ PVRSRV_BRIDGE_OUT_CONNECT_SERVICES * psConnectServicesOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CONNECT_SERVICES);
++
++ psConnectServicesOUT->hKernelServices = psPerProc->hPerProcData;
++ psConnectServicesOUT->eError = PVRSRV_OK;
++
++ return 0;
++}
++
++static int
++PVRSRVDisconnectBW(u32 ui32BridgeID,
++ void *psBridgeIn,
++ PVRSRV_BRIDGE_RETURN * psRetOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_DISCONNECT_SERVICES);
++
++ psRetOUT->eError = PVRSRV_OK;
++
++ return 0;
++}
++
++static int
++PVRSRVEnumerateDCBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_ENUMCLASS * psEnumDispClassIN,
++ PVRSRV_BRIDGE_OUT_ENUMCLASS * psEnumDispClassOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ENUM_CLASS);
++
++ psEnumDispClassOUT->eError =
++ PVRSRVEnumerateDCKM(psEnumDispClassIN->sDeviceClass,
++ &psEnumDispClassOUT->ui32NumDevices,
++ &psEnumDispClassOUT->ui32DevID[0]);
++
++ return 0;
++}
++
++static int
++PVRSRVOpenDCDeviceBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_OPEN_DISPCLASS_DEVICE *
++ psOpenDispClassDeviceIN,
++ PVRSRV_BRIDGE_OUT_OPEN_DISPCLASS_DEVICE *
++ psOpenDispClassDeviceOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ void *hDevCookieInt;
++ void *hDispClassInfoInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_OPEN_DISPCLASS_DEVICE);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psOpenDispClassDeviceOUT->eError, psPerProc,
++ 1);
++
++ psOpenDispClassDeviceOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psOpenDispClassDeviceIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if (psOpenDispClassDeviceOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psOpenDispClassDeviceOUT->eError =
++ PVRSRVOpenDCDeviceKM(psPerProc,
++ psOpenDispClassDeviceIN->ui32DeviceID,
++ hDevCookieInt, &hDispClassInfoInt);
++
++ if (psOpenDispClassDeviceOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psOpenDispClassDeviceOUT->hDeviceKM,
++ hDispClassInfoInt,
++ PVRSRV_HANDLE_TYPE_DISP_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++ COMMIT_HANDLE_BATCH_OR_ERROR(psOpenDispClassDeviceOUT->eError,
++ psPerProc);
++
++ return 0;
++}
++
++static int
++PVRSRVCloseDCDeviceBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_CLOSE_DISPCLASS_DEVICE *
++ psCloseDispClassDeviceIN, PVRSRV_BRIDGE_RETURN * psRetOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ void *pvDispClassInfoInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_CLOSE_DISPCLASS_DEVICE);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfoInt,
++ psCloseDispClassDeviceIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++
++ if (psRetOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psRetOUT->eError = PVRSRVCloseDCDeviceKM(pvDispClassInfoInt, 0);
++ if (psRetOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psCloseDispClassDeviceIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++ return 0;
++}
++
++static int
++PVRSRVEnumDCFormatsBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_FORMATS *
++ psEnumDispClassFormatsIN,
++ PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_FORMATS *
++ psEnumDispClassFormatsOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ void *pvDispClassInfoInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_ENUM_DISPCLASS_FORMATS);
++
++ psEnumDispClassFormatsOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfoInt,
++ psEnumDispClassFormatsIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++ if (psEnumDispClassFormatsOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psEnumDispClassFormatsOUT->eError =
++ PVRSRVEnumDCFormatsKM(pvDispClassInfoInt,
++ &psEnumDispClassFormatsOUT->ui32Count,
++ psEnumDispClassFormatsOUT->asFormat);
++
++ return 0;
++}
++
++static int
++PVRSRVEnumDCDimsBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_DIMS * psEnumDispClassDimsIN,
++ PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_DIMS *
++ psEnumDispClassDimsOUT, PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ void *pvDispClassInfoInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_ENUM_DISPCLASS_DIMS);
++
++ psEnumDispClassDimsOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfoInt,
++ psEnumDispClassDimsIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++
++ if (psEnumDispClassDimsOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psEnumDispClassDimsOUT->eError =
++ PVRSRVEnumDCDimsKM(pvDispClassInfoInt,
++ &psEnumDispClassDimsIN->sFormat,
++ &psEnumDispClassDimsOUT->ui32Count,
++ psEnumDispClassDimsOUT->asDim);
++
++ return 0;
++}
++
++static int
++PVRSRVGetDCSystemBufferBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_GET_DISPCLASS_SYSBUFFER *
++ psGetDispClassSysBufferIN,
++ PVRSRV_BRIDGE_OUT_GET_DISPCLASS_SYSBUFFER *
++ psGetDispClassSysBufferOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ void *hBufferInt;
++ void *pvDispClassInfoInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_GET_DISPCLASS_SYSBUFFER);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psGetDispClassSysBufferOUT->eError, psPerProc,
++ 1);
++
++ psGetDispClassSysBufferOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfoInt,
++ psGetDispClassSysBufferIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++ if (psGetDispClassSysBufferOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psGetDispClassSysBufferOUT->eError =
++ PVRSRVGetDCSystemBufferKM(pvDispClassInfoInt, &hBufferInt);
++
++ if (psGetDispClassSysBufferOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psGetDispClassSysBufferOUT->hBuffer,
++ hBufferInt,
++ PVRSRV_HANDLE_TYPE_DISP_BUFFER,
++ (PVRSRV_HANDLE_ALLOC_FLAG)
++ (PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE |
++ PVRSRV_HANDLE_ALLOC_FLAG_SHARED),
++ psGetDispClassSysBufferIN->hDeviceKM);
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psGetDispClassSysBufferOUT->eError,
++ psPerProc);
++
++ return 0;
++}
++
++static int
++PVRSRVGetDCInfoBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_GET_DISPCLASS_INFO * psGetDispClassInfoIN,
++ PVRSRV_BRIDGE_OUT_GET_DISPCLASS_INFO * psGetDispClassInfoOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ void *pvDispClassInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_GET_DISPCLASS_INFO);
++
++ psGetDispClassInfoOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfo,
++ psGetDispClassInfoIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++ if (psGetDispClassInfoOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psGetDispClassInfoOUT->eError =
++ PVRSRVGetDCInfoKM(pvDispClassInfo,
++ &psGetDispClassInfoOUT->sDisplayInfo);
++
++ return 0;
++}
++
++static int
++PVRSRVCreateDCSwapChainBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_CREATE_DISPCLASS_SWAPCHAIN *
++ psCreateDispClassSwapChainIN,
++ PVRSRV_BRIDGE_OUT_CREATE_DISPCLASS_SWAPCHAIN *
++ psCreateDispClassSwapChainOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ void *pvDispClassInfo;
++ void *hSwapChainInt;
++ u32 ui32SwapChainID;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_CREATE_DISPCLASS_SWAPCHAIN);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psCreateDispClassSwapChainOUT->eError,
++ psPerProc, 1);
++
++ psCreateDispClassSwapChainOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfo,
++ psCreateDispClassSwapChainIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++
++ if (psCreateDispClassSwapChainOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ ui32SwapChainID = psCreateDispClassSwapChainIN->ui32SwapChainID;
++
++ psCreateDispClassSwapChainOUT->eError =
++ PVRSRVCreateDCSwapChainKM(psPerProc, pvDispClassInfo,
++ psCreateDispClassSwapChainIN->ui32Flags,
++ &psCreateDispClassSwapChainIN->
++ sDstSurfAttrib,
++ &psCreateDispClassSwapChainIN->
++ sSrcSurfAttrib,
++ psCreateDispClassSwapChainIN->
++ ui32BufferCount,
++ psCreateDispClassSwapChainIN->
++ ui32OEMFlags, &hSwapChainInt,
++ &ui32SwapChainID);
++
++ if (psCreateDispClassSwapChainOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psCreateDispClassSwapChainOUT->ui32SwapChainID = ui32SwapChainID;
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psCreateDispClassSwapChainOUT->hSwapChain,
++ hSwapChainInt,
++ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
++ psCreateDispClassSwapChainIN->hDeviceKM);
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psCreateDispClassSwapChainOUT->eError,
++ psPerProc);
++
++ return 0;
++}
++
++static int
++PVRSRVDestroyDCSwapChainBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_DESTROY_DISPCLASS_SWAPCHAIN *
++ psDestroyDispClassSwapChainIN,
++ PVRSRV_BRIDGE_RETURN * psRetOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ void *pvSwapChain;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_DESTROY_DISPCLASS_SWAPCHAIN);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &pvSwapChain,
++ psDestroyDispClassSwapChainIN->hSwapChain,
++ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
++ if (psRetOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psRetOUT->eError = PVRSRVDestroyDCSwapChainKM(pvSwapChain);
++
++ if (psRetOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psDestroyDispClassSwapChainIN->hSwapChain,
++ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
++
++ return 0;
++}
++
++static int
++PVRSRVSetDCDstRectBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SET_DISPCLASS_RECT *
++ psSetDispClassDstRectIN, PVRSRV_BRIDGE_RETURN * psRetOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ void *pvDispClassInfo;
++ void *pvSwapChain;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_SET_DISPCLASS_DSTRECT);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfo,
++ psSetDispClassDstRectIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++ if (psRetOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvSwapChain,
++ psSetDispClassDstRectIN->hSwapChain,
++ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
++
++ if (psRetOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVSetDCDstRectKM(pvDispClassInfo,
++ pvSwapChain, &psSetDispClassDstRectIN->sRect);
++
++ return 0;
++}
++
++static int
++PVRSRVSetDCSrcRectBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SET_DISPCLASS_RECT *
++ psSetDispClassSrcRectIN, PVRSRV_BRIDGE_RETURN * psRetOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ void *pvDispClassInfo;
++ void *pvSwapChain;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_SET_DISPCLASS_SRCRECT);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfo,
++ psSetDispClassSrcRectIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++ if (psRetOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvSwapChain,
++ psSetDispClassSrcRectIN->hSwapChain,
++ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
++ if (psRetOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVSetDCSrcRectKM(pvDispClassInfo,
++ pvSwapChain, &psSetDispClassSrcRectIN->sRect);
++
++ return 0;
++}
++
++static int
++PVRSRVSetDCDstColourKeyBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SET_DISPCLASS_COLOURKEY *
++ psSetDispClassColKeyIN,
++ PVRSRV_BRIDGE_RETURN * psRetOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ void *pvDispClassInfo;
++ void *pvSwapChain;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_SET_DISPCLASS_DSTCOLOURKEY);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfo,
++ psSetDispClassColKeyIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++ if (psRetOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvSwapChain,
++ psSetDispClassColKeyIN->hSwapChain,
++ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
++ if (psRetOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVSetDCDstColourKeyKM(pvDispClassInfo,
++ pvSwapChain,
++ psSetDispClassColKeyIN->ui32CKColour);
++
++ return 0;
++}
++
++static int
++PVRSRVSetDCSrcColourKeyBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SET_DISPCLASS_COLOURKEY *
++ psSetDispClassColKeyIN,
++ PVRSRV_BRIDGE_RETURN * psRetOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ void *pvDispClassInfo;
++ void *pvSwapChain;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_SET_DISPCLASS_SRCCOLOURKEY);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfo,
++ psSetDispClassColKeyIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++ if (psRetOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvSwapChain,
++ psSetDispClassColKeyIN->hSwapChain,
++ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
++ if (psRetOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVSetDCSrcColourKeyKM(pvDispClassInfo,
++ pvSwapChain,
++ psSetDispClassColKeyIN->ui32CKColour);
++
++ return 0;
++}
++
++static int
++PVRSRVGetDCBuffersBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_GET_DISPCLASS_BUFFERS *
++ psGetDispClassBuffersIN,
++ PVRSRV_BRIDGE_OUT_GET_DISPCLASS_BUFFERS *
++ psGetDispClassBuffersOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ void *pvDispClassInfo;
++ void *pvSwapChain;
++ u32 i;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_GET_DISPCLASS_BUFFERS);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psGetDispClassBuffersOUT->eError, psPerProc,
++ PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS);
++
++ psGetDispClassBuffersOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfo,
++ psGetDispClassBuffersIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++ if (psGetDispClassBuffersOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psGetDispClassBuffersOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvSwapChain,
++ psGetDispClassBuffersIN->hSwapChain,
++ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
++ if (psGetDispClassBuffersOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psGetDispClassBuffersOUT->eError =
++ PVRSRVGetDCBuffersKM(pvDispClassInfo,
++ pvSwapChain,
++ &psGetDispClassBuffersOUT->ui32BufferCount,
++ psGetDispClassBuffersOUT->ahBuffer);
++ if (psGetDispClassBuffersOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ PVR_ASSERT(psGetDispClassBuffersOUT->ui32BufferCount <=
++ PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS);
++
++ for (i = 0; i < psGetDispClassBuffersOUT->ui32BufferCount; i++) {
++ void *hBufferExt;
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &hBufferExt,
++ psGetDispClassBuffersOUT->ahBuffer[i],
++ PVRSRV_HANDLE_TYPE_DISP_BUFFER,
++ (PVRSRV_HANDLE_ALLOC_FLAG)
++ (PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE |
++ PVRSRV_HANDLE_ALLOC_FLAG_SHARED),
++ psGetDispClassBuffersIN->hSwapChain);
++
++ psGetDispClassBuffersOUT->ahBuffer[i] = hBufferExt;
++ }
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psGetDispClassBuffersOUT->eError,
++ psPerProc);
++
++ return 0;
++}
++
++static int
++PVRSRVSwapToDCBufferBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_BUFFER *
++ psSwapDispClassBufferIN, PVRSRV_BRIDGE_RETURN * psRetOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ void *pvDispClassInfo;
++ void *pvSwapChainBuf;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_BUFFER);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfo,
++ psSwapDispClassBufferIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++ if (psRetOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVLookupSubHandle(psPerProc->psHandleBase,
++ &pvSwapChainBuf,
++ psSwapDispClassBufferIN->hBuffer,
++ PVRSRV_HANDLE_TYPE_DISP_BUFFER,
++ psSwapDispClassBufferIN->hDeviceKM);
++ if (psRetOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVSwapToDCBufferKM(pvDispClassInfo,
++ pvSwapChainBuf,
++ psSwapDispClassBufferIN->ui32SwapInterval,
++ psSwapDispClassBufferIN->hPrivateTag,
++ psSwapDispClassBufferIN->ui32ClipRectCount,
++ psSwapDispClassBufferIN->sClipRect);
++
++ return 0;
++}
++
++static int
++PVRSRVSwapToDCSystemBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_SYSTEM *
++ psSwapDispClassSystemIN, PVRSRV_BRIDGE_RETURN * psRetOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ void *pvDispClassInfo;
++ void *pvSwapChain;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_SYSTEM);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfo,
++ psSwapDispClassSystemIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++ if (psRetOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVLookupSubHandle(psPerProc->psHandleBase,
++ &pvSwapChain,
++ psSwapDispClassSystemIN->hSwapChain,
++ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN,
++ psSwapDispClassSystemIN->hDeviceKM);
++ if (psRetOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++ psRetOUT->eError = PVRSRVSwapToDCSystemKM(pvDispClassInfo, pvSwapChain);
++
++ return 0;
++}
++
++static int
++PVRSRVOpenBCDeviceBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_OPEN_BUFFERCLASS_DEVICE *
++ psOpenBufferClassDeviceIN,
++ PVRSRV_BRIDGE_OUT_OPEN_BUFFERCLASS_DEVICE *
++ psOpenBufferClassDeviceOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ void *hDevCookieInt;
++ void *hBufClassInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_OPEN_BUFFERCLASS_DEVICE);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psOpenBufferClassDeviceOUT->eError, psPerProc,
++ 1);
++
++ psOpenBufferClassDeviceOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psOpenBufferClassDeviceIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if (psOpenBufferClassDeviceOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psOpenBufferClassDeviceOUT->eError =
++ PVRSRVOpenBCDeviceKM(psPerProc,
++ psOpenBufferClassDeviceIN->ui32DeviceID,
++ hDevCookieInt, &hBufClassInfo);
++ if (psOpenBufferClassDeviceOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psOpenBufferClassDeviceOUT->hDeviceKM,
++ hBufClassInfo,
++ PVRSRV_HANDLE_TYPE_BUF_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psOpenBufferClassDeviceOUT->eError,
++ psPerProc);
++
++ return 0;
++}
++
++static int
++PVRSRVCloseBCDeviceBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_CLOSE_BUFFERCLASS_DEVICE *
++ psCloseBufferClassDeviceIN,
++ PVRSRV_BRIDGE_RETURN * psRetOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ void *pvBufClassInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_CLOSE_BUFFERCLASS_DEVICE);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvBufClassInfo,
++ psCloseBufferClassDeviceIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_BUF_INFO);
++ if (psRetOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psRetOUT->eError = PVRSRVCloseBCDeviceKM(pvBufClassInfo, 0);
++
++ if (psRetOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psRetOUT->eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psCloseBufferClassDeviceIN->
++ hDeviceKM,
++ PVRSRV_HANDLE_TYPE_BUF_INFO);
++
++ return 0;
++}
++
++static int
++PVRSRVGetBCInfoBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_INFO *
++ psGetBufferClassInfoIN,
++ PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_INFO *
++ psGetBufferClassInfoOUT, PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ void *pvBufClassInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_GET_BUFFERCLASS_INFO);
++
++ psGetBufferClassInfoOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvBufClassInfo,
++ psGetBufferClassInfoIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_BUF_INFO);
++ if (psGetBufferClassInfoOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psGetBufferClassInfoOUT->eError =
++ PVRSRVGetBCInfoKM(pvBufClassInfo,
++ &psGetBufferClassInfoOUT->sBufferInfo);
++ return 0;
++}
++
++static int
++PVRSRVGetBCBufferBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_BUFFER *
++ psGetBufferClassBufferIN,
++ PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_BUFFER *
++ psGetBufferClassBufferOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ void *pvBufClassInfo;
++ void *hBufferInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_GET_BUFFERCLASS_BUFFER);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psGetBufferClassBufferOUT->eError, psPerProc,
++ 1);
++
++ psGetBufferClassBufferOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvBufClassInfo,
++ psGetBufferClassBufferIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_BUF_INFO);
++ if (psGetBufferClassBufferOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psGetBufferClassBufferOUT->eError =
++ PVRSRVGetBCBufferKM(pvBufClassInfo,
++ psGetBufferClassBufferIN->ui32BufferIndex,
++ &hBufferInt);
++
++ if (psGetBufferClassBufferOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psGetBufferClassBufferOUT->hBuffer,
++ hBufferInt,
++ PVRSRV_HANDLE_TYPE_BUF_BUFFER,
++ (PVRSRV_HANDLE_ALLOC_FLAG)
++ (PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE |
++ PVRSRV_HANDLE_ALLOC_FLAG_SHARED),
++ psGetBufferClassBufferIN->hDeviceKM);
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psGetBufferClassBufferOUT->eError,
++ psPerProc);
++
++ return 0;
++}
++
++static int
++PVRSRVAllocSharedSysMemoryBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_ALLOC_SHARED_SYS_MEM *
++ psAllocSharedSysMemIN,
++ PVRSRV_BRIDGE_OUT_ALLOC_SHARED_SYS_MEM *
++ psAllocSharedSysMemOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_ALLOC_SHARED_SYS_MEM);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psAllocSharedSysMemOUT->eError, psPerProc, 1);
++
++ psAllocSharedSysMemOUT->eError =
++ PVRSRVAllocSharedSysMemoryKM(psPerProc,
++ psAllocSharedSysMemIN->ui32Flags,
++ psAllocSharedSysMemIN->ui32Size,
++ &psKernelMemInfo);
++ if (psAllocSharedSysMemOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ memset(&psAllocSharedSysMemOUT->sClientMemInfo,
++ 0, sizeof(psAllocSharedSysMemOUT->sClientMemInfo));
++
++ psAllocSharedSysMemOUT->sClientMemInfo.pvLinAddrKM =
++ psKernelMemInfo->pvLinAddrKM;
++
++ psAllocSharedSysMemOUT->sClientMemInfo.pvLinAddr = 0;
++ psAllocSharedSysMemOUT->sClientMemInfo.ui32Flags =
++ psKernelMemInfo->ui32Flags;
++ psAllocSharedSysMemOUT->sClientMemInfo.ui32AllocSize =
++ psKernelMemInfo->ui32AllocSize;
++ psAllocSharedSysMemOUT->sClientMemInfo.hMappingInfo =
++ psKernelMemInfo->sMemBlk.hOSMemHandle;
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psAllocSharedSysMemOUT->sClientMemInfo.
++ hKernelMemInfo, psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psAllocSharedSysMemOUT->eError, psPerProc);
++
++ return 0;
++}
++
++static int
++PVRSRVFreeSharedSysMemoryBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_FREE_SHARED_SYS_MEM *
++ psFreeSharedSysMemIN,
++ PVRSRV_BRIDGE_OUT_FREE_SHARED_SYS_MEM *
++ psFreeSharedSysMemOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_FREE_SHARED_SYS_MEM);
++
++ psFreeSharedSysMemOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ (void **)&psKernelMemInfo,
++ psFreeSharedSysMemIN->psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO);
++
++ if (psFreeSharedSysMemOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psFreeSharedSysMemOUT->eError =
++ PVRSRVFreeSharedSysMemoryKM(psKernelMemInfo);
++ if (psFreeSharedSysMemOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psFreeSharedSysMemOUT->eError =
++ PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psFreeSharedSysMemIN->psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO);
++ return 0;
++}
++
++static int
++PVRSRVMapMemInfoMemBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_MAP_MEMINFO_MEM * psMapMemInfoMemIN,
++ PVRSRV_BRIDGE_OUT_MAP_MEMINFO_MEM * psMapMemInfoMemOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ PVRSRV_HANDLE_TYPE eHandleType;
++ void *hParent;
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_MAP_MEMINFO_MEM);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psMapMemInfoMemOUT->eError, psPerProc, 2);
++
++ psMapMemInfoMemOUT->eError =
++ PVRSRVLookupHandleAnyType(psPerProc->psHandleBase,
++ (void **)&psKernelMemInfo,
++ &eHandleType,
++ psMapMemInfoMemIN->hKernelMemInfo);
++ if (psMapMemInfoMemOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ switch (eHandleType) {
++#if defined(PVR_SECURE_HANDLES)
++ case PVRSRV_HANDLE_TYPE_MEM_INFO:
++ case PVRSRV_HANDLE_TYPE_MEM_INFO_REF:
++ case PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO:
++#else
++ case PVRSRV_HANDLE_TYPE_NONE:
++#endif
++ break;
++ default:
++ psMapMemInfoMemOUT->eError = PVRSRV_ERROR_GENERIC;
++ return 0;
++ }
++
++ psMapMemInfoMemOUT->eError =
++ PVRSRVGetParentHandle(psPerProc->psHandleBase,
++ &hParent,
++ psMapMemInfoMemIN->hKernelMemInfo,
++ eHandleType);
++ if (psMapMemInfoMemOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++ if (hParent == NULL) {
++ hParent = psMapMemInfoMemIN->hKernelMemInfo;
++ }
++
++ memset(&psMapMemInfoMemOUT->sClientMemInfo,
++ 0, sizeof(psMapMemInfoMemOUT->sClientMemInfo));
++
++ psMapMemInfoMemOUT->sClientMemInfo.pvLinAddrKM =
++ psKernelMemInfo->pvLinAddrKM;
++
++ psMapMemInfoMemOUT->sClientMemInfo.pvLinAddr = 0;
++ psMapMemInfoMemOUT->sClientMemInfo.sDevVAddr =
++ psKernelMemInfo->sDevVAddr;
++ psMapMemInfoMemOUT->sClientMemInfo.ui32Flags =
++ psKernelMemInfo->ui32Flags;
++ psMapMemInfoMemOUT->sClientMemInfo.ui32AllocSize =
++ psKernelMemInfo->ui32AllocSize;
++ psMapMemInfoMemOUT->sClientMemInfo.hMappingInfo =
++ psKernelMemInfo->sMemBlk.hOSMemHandle;
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psMapMemInfoMemOUT->sClientMemInfo.
++ hKernelMemInfo, psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, hParent);
++
++ if (psKernelMemInfo->ui32Flags & PVRSRV_MEM_NO_SYNCOBJ) {
++
++ memset(&psMapMemInfoMemOUT->sClientSyncInfo,
++ 0, sizeof(PVRSRV_CLIENT_SYNC_INFO));
++ psMapMemInfoMemOUT->psKernelSyncInfo = NULL;
++ } else {
++
++ psMapMemInfoMemOUT->sClientSyncInfo.psSyncData =
++ psKernelMemInfo->psKernelSyncInfo->psSyncData;
++ psMapMemInfoMemOUT->sClientSyncInfo.sWriteOpsCompleteDevVAddr =
++ psKernelMemInfo->psKernelSyncInfo->
++ sWriteOpsCompleteDevVAddr;
++ psMapMemInfoMemOUT->sClientSyncInfo.sReadOpsCompleteDevVAddr =
++ psKernelMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr;
++
++ psMapMemInfoMemOUT->sClientSyncInfo.hMappingInfo =
++ psKernelMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->
++ sMemBlk.hOSMemHandle;
++
++ psMapMemInfoMemOUT->sClientMemInfo.psClientSyncInfo =
++ &psMapMemInfoMemOUT->sClientSyncInfo;
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psMapMemInfoMemOUT->sClientSyncInfo.
++ hKernelSyncInfo,
++ psKernelMemInfo->psKernelSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
++ psMapMemInfoMemOUT->sClientMemInfo.
++ hKernelMemInfo);
++ }
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psMapMemInfoMemOUT->eError, psPerProc);
++
++ return 0;
++}
++
++static int
++MMU_GetPDDevPAddrBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_GETMMU_PD_DEVPADDR * psGetMmuPDDevPAddrIN,
++ PVRSRV_BRIDGE_OUT_GETMMU_PD_DEVPADDR *
++ psGetMmuPDDevPAddrOUT, PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ void *hDevMemContextInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_GETMMU_PD_DEVPADDR);
++
++ psGetMmuPDDevPAddrOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemContextInt,
++ psGetMmuPDDevPAddrIN->hDevMemContext,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
++ if (psGetMmuPDDevPAddrOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psGetMmuPDDevPAddrOUT->sPDDevPAddr =
++ BM_GetDeviceNode(hDevMemContextInt)->
++ pfnMMUGetPDDevPAddr(BM_GetMMUContextFromMemContext
++ (hDevMemContextInt));
++ if (psGetMmuPDDevPAddrOUT->sPDDevPAddr.uiAddr) {
++ psGetMmuPDDevPAddrOUT->eError = PVRSRV_OK;
++ } else {
++ psGetMmuPDDevPAddrOUT->eError = PVRSRV_ERROR_GENERIC;
++ }
++ return 0;
++}
++
++int
++DummyBW(u32 ui32BridgeID,
++ void *psBridgeIn,
++ void *psBridgeOut, PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++
++#if defined(DEBUG_BRIDGE_KM)
++ PVR_DPF((PVR_DBG_ERROR, "%s: BRIDGE ERROR: BridgeID %lu (%s) mapped to "
++ "Dummy Wrapper (probably not what you want!)",
++ __FUNCTION__, ui32BridgeID,
++ g_BridgeDispatchTable[ui32BridgeID].pszIOCName));
++#else
++ PVR_DPF((PVR_DBG_ERROR, "%s: BRIDGE ERROR: BridgeID %lu mapped to "
++ "Dummy Wrapper (probably not what you want!)",
++ __FUNCTION__, ui32BridgeID));
++#endif
++ return -ENOTTY;
++}
++
++void
++_SetDispatchTableEntry(u32 ui32Index,
++ const char *pszIOCName,
++ BridgeWrapperFunction pfFunction,
++ const char *pszFunctionName)
++{
++ static u32 ui32PrevIndex = ~0UL;
++
++#if defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE)
++
++ PVR_DPF((PVR_DBG_WARNING, "%s: %d %s %s", __FUNCTION__, ui32Index,
++ pszIOCName, pszFunctionName));
++#endif
++
++#if defined(INTEL_D3_P_CHANGES)
++ if (ui32Index >= BRIDGE_DISPATCH_TABLE_ENTRY_COUNT) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "%s: ui32BridgeID = %d is out if range!", __FUNCTION__,
++ ui32Index));
++ return;
++ }
++#endif
++
++ if (g_BridgeDispatchTable[ui32Index].pfFunction) {
++#if defined(DEBUG_BRIDGE_KM)
++ PVR_DPF((PVR_DBG_ERROR,
++ "%s: BUG!: Adding dispatch table entry for %s clobbers an existing entry for %s",
++ __FUNCTION__, pszIOCName,
++ g_BridgeDispatchTable[ui32Index].pszIOCName));
++#else
++ PVR_DPF((PVR_DBG_ERROR,
++ "%s: BUG!: Adding dispatch table entry for %s clobbers an existing entry (index=%lu)",
++ __FUNCTION__, pszIOCName, ui32Index));
++#endif
++ PVR_DPF((PVR_DBG_ERROR,
++ "NOTE: Enabling DEBUG_BRIDGE_KM_DISPATCH_TABLE may help debug this issue.",
++ __FUNCTION__));
++ }
++
++ if ((ui32PrevIndex != ~0UL) &&
++ ((ui32Index >= ui32PrevIndex + DISPATCH_TABLE_GAP_THRESHOLD) ||
++ (ui32Index <= ui32PrevIndex))) {
++#if defined(DEBUG_BRIDGE_KM)
++ PVR_DPF((PVR_DBG_WARNING,
++ "%s: There is a gap in the dispatch table between indices %lu (%s) and %lu (%s)",
++ __FUNCTION__, ui32PrevIndex,
++ g_BridgeDispatchTable[ui32PrevIndex].pszIOCName,
++ ui32Index, pszIOCName));
++#else
++ PVR_DPF((PVR_DBG_WARNING,
++ "%s: There is a gap in the dispatch table between indices %u and %u (%s)",
++ __FUNCTION__, (u32) ui32PrevIndex, (u32) ui32Index,
++ pszIOCName));
++#endif
++ PVR_DPF((PVR_DBG_ERROR,
++ "NOTE: Enabling DEBUG_BRIDGE_KM_DISPATCH_TABLE may help debug this issue.",
++ __FUNCTION__));
++ }
++
++ g_BridgeDispatchTable[ui32Index].pfFunction = pfFunction;
++#if defined(DEBUG_BRIDGE_KM)
++ g_BridgeDispatchTable[ui32Index].pszIOCName = pszIOCName;
++ g_BridgeDispatchTable[ui32Index].pszFunctionName = pszFunctionName;
++ g_BridgeDispatchTable[ui32Index].ui32CallCount = 0;
++ g_BridgeDispatchTable[ui32Index].ui32CopyFromUserTotalBytes = 0;
++#endif
++
++ ui32PrevIndex = ui32Index;
++}
++
++static int
++PVRSRVInitSrvConnectBW(u32 ui32BridgeID,
++ void *psBridgeIn,
++ PVRSRV_BRIDGE_RETURN * psRetOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_INITSRV_CONNECT);
++
++ if (!OSProcHasPrivSrvInit()
++ || PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_RUNNING)
++ || PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_RAN)) {
++ psRetOUT->eError = PVRSRV_ERROR_GENERIC;
++ return 0;
++ }
++#if defined (__linux__)
++ PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_RUNNING, 1);
++#endif
++ psPerProc->bInitProcess = 1;
++
++ psRetOUT->eError = PVRSRV_OK;
++
++ return 0;
++}
++
++static int
++PVRSRVInitSrvDisconnectBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_INITSRV_DISCONNECT *
++ psInitSrvDisconnectIN,
++ PVRSRV_BRIDGE_RETURN * psRetOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_INITSRV_DISCONNECT);
++
++ if (!psPerProc->bInitProcess) {
++ psRetOUT->eError = PVRSRV_ERROR_GENERIC;
++ return 0;
++ }
++
++ psPerProc->bInitProcess = 0;
++
++ PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_RUNNING, 0);
++ PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_RAN, 1);
++
++ psRetOUT->eError =
++ PVRSRVFinaliseSystem(psInitSrvDisconnectIN->bInitSuccesful);
++
++ PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_SUCCESSFUL,
++ (((psRetOUT->eError == PVRSRV_OK)
++ && (psInitSrvDisconnectIN->bInitSuccesful)))
++ ? 1 : 0);
++
++ return 0;
++}
++
++static int
++PVRSRVEventObjectWaitBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_EVENT_OBJECT_WAIT *
++ psEventObjectWaitIN, PVRSRV_BRIDGE_RETURN * psRetOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ void *hOSEventKM;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_EVENT_OBJECT_WAIT);
++
++ psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hOSEventKM,
++ psEventObjectWaitIN->hOSEventKM,
++ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT);
++
++ if (psRetOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psRetOUT->eError = OSEventObjectWait(hOSEventKM);
++
++ return 0;
++}
++
++static int
++PVRSRVEventObjectOpenBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_EVENT_OBJECT_OPEN *
++ psEventObjectOpenIN,
++ PVRSRV_BRIDGE_OUT_EVENT_OBJECT_OPEN *
++ psEventObjectOpenOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_EVENT_OBJECT_OPEN);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psEventObjectOpenOUT->eError, psPerProc, 1);
++
++ psEventObjectOpenOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psEventObjectOpenIN->sEventObject.hOSEventKM,
++ psEventObjectOpenIN->sEventObject.hOSEventKM,
++ PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT);
++
++ if (psEventObjectOpenOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psEventObjectOpenOUT->eError =
++ OSEventObjectOpen(&psEventObjectOpenIN->sEventObject,
++ &psEventObjectOpenOUT->hOSEvent);
++
++ if (psEventObjectOpenOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psEventObjectOpenOUT->hOSEvent,
++ psEventObjectOpenOUT->hOSEvent,
++ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT,
++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI);
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psEventObjectOpenOUT->eError, psPerProc);
++
++ return 0;
++}
++
++static int
++PVRSRVEventObjectCloseBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_EVENT_OBJECT_CLOSE *
++ psEventObjectCloseIN, PVRSRV_BRIDGE_RETURN * psRetOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ void *hOSEventKM;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_EVENT_OBJECT_CLOSE);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psEventObjectCloseIN->sEventObject.hOSEventKM,
++ psEventObjectCloseIN->sEventObject.hOSEventKM,
++ PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT);
++ if (psRetOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psRetOUT->eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ &hOSEventKM,
++ psEventObjectCloseIN->
++ hOSEventKM,
++ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT);
++
++ if (psRetOUT->eError != PVRSRV_OK) {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ OSEventObjectClose(&psEventObjectCloseIN->sEventObject, hOSEventKM);
++
++ return 0;
++}
++
++typedef struct _MODIFY_SYNC_OP_INFO {
++ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++ u32 ui32ModifyFlags;
++ u32 ui32ReadOpsPendingSnapShot;
++ u32 ui32WriteOpsPendingSnapShot;
++} MODIFY_SYNC_OP_INFO;
++
++static PVRSRV_ERROR ModifyCompleteSyncOpsCallBack(void *pvParam, u32 ui32Param)
++{
++ MODIFY_SYNC_OP_INFO *psModSyncOpInfo;
++ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++
++ if (!pvParam) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "ModifyCompleteSyncOpsCallBack: invalid parameter"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psModSyncOpInfo = (MODIFY_SYNC_OP_INFO *) pvParam;
++ psKernelSyncInfo = psModSyncOpInfo->psKernelSyncInfo;
++
++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) {
++ if ((psModSyncOpInfo->ui32WriteOpsPendingSnapShot ==
++ psKernelSyncInfo->psSyncData->ui32WriteOpsComplete)
++ && (psModSyncOpInfo->ui32ReadOpsPendingSnapShot ==
++ psKernelSyncInfo->psSyncData->ui32ReadOpsComplete)) {
++ goto OpFlushedComplete;
++ }
++ PVR_DPF((PVR_DBG_ERROR,
++ "ModifyCompleteSyncOpsCallBack: waiting for old Ops to flush"));
++ OSWaitus(MAX_HW_TIME_US / WAIT_TRY_COUNT);
++ }
++ END_LOOP_UNTIL_TIMEOUT();
++
++ PVR_DPF((PVR_DBG_ERROR,
++ "ModifyCompleteSyncOpsCallBack: waiting for old Ops to flush timed out"));
++
++ return PVRSRV_ERROR_TIMEOUT;
++
++OpFlushedComplete:
++
++ if (psModSyncOpInfo->
++ ui32ModifyFlags & PVRSRV_MODIFYSYNCOPS_FLAGS_WO_INC) {
++ psKernelSyncInfo->psSyncData->ui32WriteOpsComplete++;
++ }
++
++ if (psModSyncOpInfo->
++ ui32ModifyFlags & PVRSRV_MODIFYSYNCOPS_FLAGS_RO_INC) {
++ psKernelSyncInfo->psSyncData->ui32ReadOpsComplete++;
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(MODIFY_SYNC_OP_INFO),
++ (void *)psModSyncOpInfo, 0);
++
++ PVRSRVCommandCompleteCallbacks();
++
++ return PVRSRV_OK;
++}
++
++static int
++PVRSRVModifyPendingSyncOpsBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_MODIFY_PENDING_SYNC_OPS *
++ psModifySyncOpsIN,
++ PVRSRV_BRIDGE_OUT_MODIFY_PENDING_SYNC_OPS *
++ psModifySyncOpsOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ void *hKernelSyncInfo;
++ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++ MODIFY_SYNC_OP_INFO *psModSyncOpInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_MODIFY_PENDING_SYNC_OPS);
++
++ psModifySyncOpsOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hKernelSyncInfo,
++ psModifySyncOpsIN->
++ hKernelSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if (psModifySyncOpsOUT->eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVModifyPendingSyncOpsBW: PVRSRVLookupHandle failed"));
++ return 0;
++ }
++
++ psKernelSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) hKernelSyncInfo;
++
++ if (psKernelSyncInfo->hResItem != NULL) {
++
++ psModifySyncOpsOUT->eError = PVRSRV_ERROR_RETRY;
++ return 0;
++ }
++
++ ASSIGN_AND_EXIT_ON_ERROR(psModifySyncOpsOUT->eError,
++ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(MODIFY_SYNC_OP_INFO),
++ (void **)&psModSyncOpInfo, 0,
++ "ModSyncOpInfo (MODIFY_SYNC_OP_INFO)"));
++
++ psModSyncOpInfo->psKernelSyncInfo = psKernelSyncInfo;
++ psModSyncOpInfo->ui32ModifyFlags = psModifySyncOpsIN->ui32ModifyFlags;
++ psModSyncOpInfo->ui32ReadOpsPendingSnapShot =
++ psKernelSyncInfo->psSyncData->ui32ReadOpsPending;
++ psModSyncOpInfo->ui32WriteOpsPendingSnapShot =
++ psKernelSyncInfo->psSyncData->ui32WriteOpsPending;
++
++ psModifySyncOpsOUT->ui32ReadOpsPending =
++ psKernelSyncInfo->psSyncData->ui32ReadOpsPending;
++ psModifySyncOpsOUT->ui32WriteOpsPending =
++ psKernelSyncInfo->psSyncData->ui32WriteOpsPending;
++
++ if (psModifySyncOpsIN->
++ ui32ModifyFlags & PVRSRV_MODIFYSYNCOPS_FLAGS_WO_INC) {
++ psKernelSyncInfo->psSyncData->ui32WriteOpsPending++;
++ }
++
++ if (psModifySyncOpsIN->
++ ui32ModifyFlags & PVRSRV_MODIFYSYNCOPS_FLAGS_RO_INC) {
++ psKernelSyncInfo->psSyncData->ui32ReadOpsPending++;
++ }
++
++ psKernelSyncInfo->hResItem =
++ ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_MODIFY_SYNC_OPS, psModSyncOpInfo, 0,
++ ModifyCompleteSyncOpsCallBack);
++ return 0;
++}
++
++static int
++PVRSRVModifyCompleteSyncOpsBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_MODIFY_COMPLETE_SYNC_OPS *
++ psModifySyncOpsIN,
++ PVRSRV_BRIDGE_RETURN * psModifySyncOpsOUT,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_MODIFY_COMPLETE_SYNC_OPS);
++
++ psModifySyncOpsOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ (void **)
++ &psKernelSyncInfo,
++ psModifySyncOpsIN->
++ hKernelSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if (psModifySyncOpsOUT->eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVModifyCompleteSyncOpsBW: PVRSRVLookupHandle failed"));
++ return 0;
++ }
++
++ if (psKernelSyncInfo->hResItem == NULL) {
++
++ psModifySyncOpsOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++ return 0;
++ }
++
++ eError = ResManFreeResByPtr(psKernelSyncInfo->hResItem);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVModifyCompleteSyncOpsBW: ResManFreeResByPtr failed"));
++ return 0;
++ }
++
++ psKernelSyncInfo->hResItem = NULL;
++
++ return 0;
++}
++
++PVRSRV_ERROR CommonBridgeInit(void)
++{
++ u32 i;
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_ENUM_DEVICES,
++ PVRSRVEnumerateDevicesBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_ACQUIRE_DEVICEINFO,
++ PVRSRVAcquireDeviceDataBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_RELEASE_DEVICEINFO, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_CREATE_DEVMEMCONTEXT,
++ PVRSRVCreateDeviceMemContextBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_DESTROY_DEVMEMCONTEXT,
++ PVRSRVDestroyDeviceMemContextBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DEVMEM_HEAPINFO,
++ PVRSRVGetDeviceMemHeapInfoBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_ALLOC_DEVICEMEM,
++ PVRSRVAllocDeviceMemBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_FREE_DEVICEMEM,
++ PVRSRVFreeDeviceMemBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GETFREE_DEVICEMEM,
++ PVRSRVGetFreeDeviceMemBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_CREATE_COMMANDQUEUE, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_DESTROY_COMMANDQUEUE, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_MHANDLE_TO_MMAP_DATA,
++ PVRMMapOSMemHandleToMMapDataBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_CONNECT_SERVICES, PVRSRVConnectBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_DISCONNECT_SERVICES,
++ PVRSRVDisconnectBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_WRAP_DEVICE_MEM, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DEVICEMEMINFO, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_RESERVE_DEV_VIRTMEM, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_FREE_DEV_VIRTMEM, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_EXT_MEMORY, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAP_EXT_MEMORY, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_DEV_MEMORY,
++ PVRSRVMapDeviceMemoryBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAP_DEV_MEMORY,
++ PVRSRVUnmapDeviceMemoryBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY,
++ PVRSRVMapDeviceClassMemoryBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAP_DEVICECLASS_MEMORY,
++ PVRSRVUnmapDeviceClassMemoryBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_MEM_INFO_TO_USER, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAP_MEM_INFO_FROM_USER, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_EXPORT_DEVICEMEM,
++ PVRSRVExportDeviceMemBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_RELEASE_MMAP_DATA,
++ PVRMMapReleaseMMapDataBW);
++
++#ifdef INTEL_D3_CHANGES
++ SetDispatchTableEntry(PVRSRV_BRIDGE_WAIT_FOR_WRITE_OP_SYNC,
++ PVRSRVWaitForWriteOpSyncBW);
++#endif
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PROCESS_SIMISR_EVENT, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_REGISTER_SIM_PROCESS, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_UNREGISTER_SIM_PROCESS, DummyBW);
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_MAPPHYSTOUSERSPACE, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAPPHYSTOUSERSPACE, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GETPHYSTOUSERSPACEMAP, DummyBW);
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_FB_STATS, DummyBW);
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_MISC_INFO, PVRSRVGetMiscInfoBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_RELEASE_MISC_INFO, DummyBW);
++
++#if defined (SUPPORT_OVERLAY_ROTATE_BLIT)
++ SetDispatchTableEntry(PVRSRV_BRIDGE_INIT_3D_OVL_BLT_RES, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_DEINIT_3D_OVL_BLT_RES, DummyBW);
++#endif
++
++#if defined(PDUMP)
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_INIT, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_MEMPOL, PDumpMemPolBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPMEM, PDumpMemBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_REG, PDumpRegWithFlagsBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_REGPOL, PDumpRegPolBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_COMMENT, PDumpCommentBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_SETFRAME, PDumpSetFrameBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_ISCAPTURING,
++ PDumpIsCaptureFrameBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPBITMAP, PDumpBitmapBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPREADREG, PDumpReadRegBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_SYNCPOL, PDumpSyncPolBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPSYNC, PDumpSyncDumpBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DRIVERINFO,
++ PDumpDriverInfoBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_PDREG, PDumpPDRegBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPPDDEVPADDR,
++ PDumpPDDevPAddrBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_CYCLE_COUNT_REG_READ,
++ PDumpCycleCountRegReadBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_STARTINITPHASE,
++ PDumpStartInitPhaseBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_STOPINITPHASE,
++ PDumpStopInitPhaseBW);
++#endif
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_OEMJTABLE, DummyBW);
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_ENUM_CLASS, PVRSRVEnumerateDCBW);
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_OPEN_DISPCLASS_DEVICE,
++ PVRSRVOpenDCDeviceBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_CLOSE_DISPCLASS_DEVICE,
++ PVRSRVCloseDCDeviceBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_ENUM_DISPCLASS_FORMATS,
++ PVRSRVEnumDCFormatsBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_ENUM_DISPCLASS_DIMS,
++ PVRSRVEnumDCDimsBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DISPCLASS_SYSBUFFER,
++ PVRSRVGetDCSystemBufferBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DISPCLASS_INFO,
++ PVRSRVGetDCInfoBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_CREATE_DISPCLASS_SWAPCHAIN,
++ PVRSRVCreateDCSwapChainBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_DESTROY_DISPCLASS_SWAPCHAIN,
++ PVRSRVDestroyDCSwapChainBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SET_DISPCLASS_DSTRECT,
++ PVRSRVSetDCDstRectBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SET_DISPCLASS_SRCRECT,
++ PVRSRVSetDCSrcRectBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SET_DISPCLASS_DSTCOLOURKEY,
++ PVRSRVSetDCDstColourKeyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SET_DISPCLASS_SRCCOLOURKEY,
++ PVRSRVSetDCSrcColourKeyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DISPCLASS_BUFFERS,
++ PVRSRVGetDCBuffersBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_BUFFER,
++ PVRSRVSwapToDCBufferBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_SYSTEM,
++ PVRSRVSwapToDCSystemBW);
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_OPEN_BUFFERCLASS_DEVICE,
++ PVRSRVOpenBCDeviceBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_CLOSE_BUFFERCLASS_DEVICE,
++ PVRSRVCloseBCDeviceBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_BUFFERCLASS_INFO,
++ PVRSRVGetBCInfoBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_BUFFERCLASS_BUFFER,
++ PVRSRVGetBCBufferBW);
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_WRAP_EXT_MEMORY,
++ PVRSRVWrapExtMemoryBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_UNWRAP_EXT_MEMORY,
++ PVRSRVUnwrapExtMemoryBW);
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_ALLOC_SHARED_SYS_MEM,
++ PVRSRVAllocSharedSysMemoryBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_FREE_SHARED_SYS_MEM,
++ PVRSRVFreeSharedSysMemoryBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_MEMINFO_MEM,
++ PVRSRVMapMemInfoMemBW);
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GETMMU_PD_DEVPADDR,
++ MMU_GetPDDevPAddrBW);
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_INITSRV_CONNECT,
++ PVRSRVInitSrvConnectBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_INITSRV_DISCONNECT,
++ PVRSRVInitSrvDisconnectBW);
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_EVENT_OBJECT_WAIT,
++ PVRSRVEventObjectWaitBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_EVENT_OBJECT_OPEN,
++ PVRSRVEventObjectOpenBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_EVENT_OBJECT_CLOSE,
++ PVRSRVEventObjectCloseBW);
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_MODIFY_PENDING_SYNC_OPS,
++ PVRSRVModifyPendingSyncOpsBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_MODIFY_COMPLETE_SYNC_OPS,
++ PVRSRVModifyCompleteSyncOpsBW);
++
++#if defined (SUPPORT_SGX)
++ SetSGXDispatchTableEntry();
++#endif
++#if defined (SUPPORT_VGX)
++ SetVGXDispatchTableEntry();
++#endif
++#if defined (SUPPORT_MSVDX)
++ SetMSVDXDispatchTableEntry();
++#endif
++
++ for (i = 0; i < BRIDGE_DISPATCH_TABLE_ENTRY_COUNT; i++) {
++ if (!g_BridgeDispatchTable[i].pfFunction) {
++ g_BridgeDispatchTable[i].pfFunction = DummyBW;
++#if defined(DEBUG_BRIDGE_KM)
++ g_BridgeDispatchTable[i].pszIOCName =
++ "_PVRSRV_BRIDGE_DUMMY";
++ g_BridgeDispatchTable[i].pszFunctionName = "DummyBW";
++ g_BridgeDispatchTable[i].ui32CallCount = 0;
++ g_BridgeDispatchTable[i].ui32CopyFromUserTotalBytes = 0;
++ g_BridgeDispatchTable[i].ui32CopyToUserTotalBytes = 0;
++#endif
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
++int BridgedDispatchKM(PVRSRV_PER_PROCESS_DATA * psPerProc,
++ PVRSRV_BRIDGE_PACKAGE * psBridgePackageKM)
++{
++
++ void *psBridgeIn;
++ void *psBridgeOut;
++ BridgeWrapperFunction pfBridgeHandler;
++ u32 ui32BridgeID = psBridgePackageKM->ui32BridgeID;
++ int err = -EFAULT;
++
++#if defined(DEBUG_TRACE_BRIDGE_KM)
++ PVR_DPF((PVR_DBG_ERROR, "%s: %s",
++ __FUNCTION__, g_BridgeDispatchTable[ui32BridgeID].pszIOCName));
++#endif
++
++#if defined(DEBUG_BRIDGE_KM)
++ g_BridgeDispatchTable[ui32BridgeID].ui32CallCount++;
++ g_BridgeGlobalStats.ui32IOCTLCount++;
++#endif
++
++ if (!psPerProc->bInitProcess) {
++ if (PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_RAN)) {
++ if (!PVRSRVGetInitServerState
++ (PVRSRV_INIT_SERVER_SUCCESSFUL)) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "%s: Initialisation failed. Driver unusable.",
++ __FUNCTION__));
++ goto return_fault;
++ }
++ } else {
++ if (PVRSRVGetInitServerState
++ (PVRSRV_INIT_SERVER_RUNNING)) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "%s: Initialisation is in progress",
++ __FUNCTION__));
++ goto return_fault;
++ } else {
++
++ switch (ui32BridgeID) {
++ case PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_CONNECT_SERVICES):
++ case PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_DISCONNECT_SERVICES):
++ case PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_INITSRV_CONNECT):
++ case PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_INITSRV_DISCONNECT):
++ break;
++ default:
++ PVR_DPF((PVR_DBG_ERROR,
++ "%s: Driver initialisation not completed yet.",
++ __FUNCTION__));
++ goto return_fault;
++ }
++ }
++ }
++ }
++#ifdef INTEL_D3_PM
++ graphics_pm_wait_not_suspended();
++#endif
++
++#if defined(__linux__)
++ {
++
++ SYS_DATA *psSysData;
++
++ SysAcquireData(&psSysData);
++
++ psBridgeIn =
++ ((ENV_DATA *) psSysData->pvEnvSpecificData)->pvBridgeData;
++ psBridgeOut =
++ (void *)((unsigned char *) psBridgeIn +
++ PVRSRV_MAX_BRIDGE_IN_SIZE);
++
++ if (psBridgePackageKM->ui32InBufferSize > 0) {
++ if (!OSAccessOK(PVR_VERIFY_READ,
++ psBridgePackageKM->pvParamIn,
++ psBridgePackageKM->ui32InBufferSize)) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "%s: Invalid pvParamIn pointer",
++ __FUNCTION__));
++ }
++
++ if (CopyFromUserWrapper(psPerProc,
++ ui32BridgeID,
++ psBridgeIn,
++ psBridgePackageKM->pvParamIn,
++ psBridgePackageKM->
++ ui32InBufferSize)
++ != PVRSRV_OK) {
++ goto return_fault;
++ }
++ }
++ }
++#else
++ psBridgeIn = psBridgePackageKM->pvParamIn;
++ psBridgeOut = psBridgePackageKM->pvParamOut;
++#endif
++
++ if (ui32BridgeID >= (BRIDGE_DISPATCH_TABLE_ENTRY_COUNT)) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "%s: ui32BridgeID = %d is out if range!", __FUNCTION__,
++ ui32BridgeID));
++ goto return_fault;
++ }
++ pfBridgeHandler =
++ (BridgeWrapperFunction) g_BridgeDispatchTable[ui32BridgeID].
++ pfFunction;
++ err = pfBridgeHandler(ui32BridgeID, psBridgeIn, psBridgeOut, psPerProc);
++ if (err < 0) {
++ goto return_fault;
++ }
++
++#if defined(__linux__)
++
++ if (CopyToUserWrapper(psPerProc,
++ ui32BridgeID,
++ psBridgePackageKM->pvParamOut,
++ psBridgeOut, psBridgePackageKM->ui32OutBufferSize)
++ != PVRSRV_OK) {
++ goto return_fault;
++ }
++#endif
++
++ err = 0;
++return_fault:
++ ReleaseHandleBatch(psPerProc);
++ return err;
++}
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/bridged/bridged_pvr_bridge.h
+@@ -0,0 +1,218 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __BRIDGED_PVR_BRIDGE_H__
++#define __BRIDGED_PVR_BRIDGE_H__
++
++#include "pvr_bridge.h"
++
++#if defined(__linux__)
++#define PVRSRV_GET_BRIDGE_ID(X) _IOC_NR(X)
++#else
++#define PVRSRV_GET_BRIDGE_ID(X) (X - PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST))
++#endif
++
++#ifndef ENOMEM
++#define ENOMEM 12
++#endif
++#ifndef EFAULT
++#define EFAULT 14
++#endif
++#ifndef ENOTTY
++#define ENOTTY 25
++#endif
++
++#if defined(DEBUG_BRIDGE_KM)
++PVRSRV_ERROR
++CopyFromUserWrapper(PVRSRV_PER_PROCESS_DATA *pProcData,
++ u32 ui32BridgeID,
++ void *pvDest,
++ void *pvSrc,
++ u32 ui32Size);
++PVRSRV_ERROR
++CopyToUserWrapper(PVRSRV_PER_PROCESS_DATA *pProcData,
++ u32 ui32BridgeID,
++ void *pvDest,
++ void *pvSrc,
++ u32 ui32Size);
++#else
++#define CopyFromUserWrapper(pProcData, ui32BridgeID, pvDest, pvSrc, ui32Size) \
++ OSCopyFromUser(pProcData, pvDest, pvSrc, ui32Size)
++#define CopyToUserWrapper(pProcData, ui32BridgeID, pvDest, pvSrc, ui32Size) \
++ OSCopyToUser(pProcData, pvDest, pvSrc, ui32Size)
++#endif
++
++
++#define ASSIGN_AND_RETURN_ON_ERROR(error, src, res) \
++ do \
++ { \
++ (error) = (src); \
++ if ((error) != PVRSRV_OK) \
++ { \
++ return (res); \
++ } \
++ } while (error != PVRSRV_OK)
++
++#define ASSIGN_AND_EXIT_ON_ERROR(error, src) \
++ ASSIGN_AND_RETURN_ON_ERROR(error, src, 0)
++
++#if defined (PVR_SECURE_HANDLES)
++static inline PVRSRV_ERROR
++NewHandleBatch(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ u32 ui32BatchSize)
++{
++ PVRSRV_ERROR eError;
++
++ PVR_ASSERT(!psPerProc->bHandlesBatched);
++
++ eError = PVRSRVNewHandleBatch(psPerProc->psHandleBase, ui32BatchSize);
++
++ if (eError == PVRSRV_OK)
++ {
++ psPerProc->bHandlesBatched = 1;
++ }
++
++ return eError;
++}
++
++#define NEW_HANDLE_BATCH_OR_ERROR(error, psPerProc, ui32BatchSize) \
++ ASSIGN_AND_EXIT_ON_ERROR(error, NewHandleBatch(psPerProc, ui32BatchSize))
++
++static inline PVRSRV_ERROR
++CommitHandleBatch(PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVR_ASSERT(psPerProc->bHandlesBatched);
++
++ psPerProc->bHandlesBatched = 0;
++
++ return PVRSRVCommitHandleBatch(psPerProc->psHandleBase);
++}
++
++
++#define COMMIT_HANDLE_BATCH_OR_ERROR(error, psPerProc) \
++ ASSIGN_AND_EXIT_ON_ERROR(error, CommitHandleBatch(psPerProc))
++
++static inline void
++ReleaseHandleBatch(PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ if (psPerProc->bHandlesBatched)
++ {
++ psPerProc->bHandlesBatched = 0;
++
++ PVRSRVReleaseHandleBatch(psPerProc->psHandleBase);
++ }
++}
++#else
++#define NEW_HANDLE_BATCH_OR_ERROR(error, psPerProc, ui32BatchSize)
++#define COMMIT_HANDLE_BATCH_OR_ERROR(error, psPerProc)
++#define ReleaseHandleBatch(psPerProc)
++#endif
++
++int
++DummyBW(u32 ui32BridgeID,
++ void *psBridgeIn,
++ void *psBridgeOut,
++ PVRSRV_PER_PROCESS_DATA *psPerProc);
++
++typedef int (*BridgeWrapperFunction)(u32 ui32BridgeID,
++ void *psBridgeIn,
++ void *psBridgeOut,
++ PVRSRV_PER_PROCESS_DATA *psPerProc);
++
++typedef struct _PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY
++{
++ BridgeWrapperFunction pfFunction;
++#if defined(DEBUG_BRIDGE_KM)
++ const char *pszIOCName;
++ const char *pszFunctionName;
++ u32 ui32CallCount;
++ u32 ui32CopyFromUserTotalBytes;
++ u32 ui32CopyToUserTotalBytes;
++#endif
++}PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY;
++
++#if defined(SUPPORT_VGX) || defined(SUPPORT_MSVDX)
++ #if defined(SUPPORT_VGX)
++ #define BRIDGE_DISPATCH_TABLE_ENTRY_COUNT (PVRSRV_BRIDGE_LAST_VGX_CMD+1)
++ #define PVRSRV_BRIDGE_LAST_DEVICE_CMD PVRSRV_BRIDGE_LAST_VGX_CMD
++ #else
++ #define BRIDGE_DISPATCH_TABLE_ENTRY_COUNT (PVRSRV_BRIDGE_LAST_MSVDX_CMD+1)
++ #define PVRSRV_BRIDGE_LAST_DEVICE_CMD PVRSRV_BRIDGE_LAST_MSVDX_CMD
++ #endif
++#else
++ #if defined(SUPPORT_SGX)
++ #define BRIDGE_DISPATCH_TABLE_ENTRY_COUNT (PVRSRV_BRIDGE_LAST_SGX_CMD+1)
++ #define PVRSRV_BRIDGE_LAST_DEVICE_CMD PVRSRV_BRIDGE_LAST_SGX_CMD
++ #else
++ #define BRIDGE_DISPATCH_TABLE_ENTRY_COUNT (PVRSRV_BRIDGE_LAST_NON_DEVICE_CMD+1)
++ #define PVRSRV_BRIDGE_LAST_DEVICE_CMD PVRSRV_BRIDGE_LAST_NON_DEVICE_CMD
++ #endif
++#endif
++
++extern PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY g_BridgeDispatchTable[BRIDGE_DISPATCH_TABLE_ENTRY_COUNT];
++
++void
++_SetDispatchTableEntry(u32 ui32Index,
++ const char *pszIOCName,
++ BridgeWrapperFunction pfFunction,
++ const char *pszFunctionName);
++
++
++#define SetDispatchTableEntry(ui32Index, pfFunction) \
++ _SetDispatchTableEntry(PVRSRV_GET_BRIDGE_ID(ui32Index), #ui32Index, (BridgeWrapperFunction)pfFunction, #pfFunction)
++
++#define DISPATCH_TABLE_GAP_THRESHOLD 5
++
++#if defined(DEBUG)
++#define PVRSRV_BRIDGE_ASSERT_CMD(X, Y) PVR_ASSERT(X == PVRSRV_GET_BRIDGE_ID(Y))
++#else
++#define PVRSRV_BRIDGE_ASSERT_CMD(X, Y) do {} while(0)
++#endif
++
++
++#if defined(DEBUG_BRIDGE_KM)
++typedef struct _PVRSRV_BRIDGE_GLOBAL_STATS
++{
++ u32 ui32IOCTLCount;
++ u32 ui32TotalCopyFromUserBytes;
++ u32 ui32TotalCopyToUserBytes;
++}PVRSRV_BRIDGE_GLOBAL_STATS;
++
++extern PVRSRV_BRIDGE_GLOBAL_STATS g_BridgeGlobalStats;
++#endif
++
++
++PVRSRV_ERROR CommonBridgeInit(void);
++
++int BridgedDispatchKM(PVRSRV_PER_PROCESS_DATA * psPerProc,
++ PVRSRV_BRIDGE_PACKAGE * psBridgePackageKM);
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/bridged/bridged_support.c
+@@ -0,0 +1,84 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++
++#include "servicesint.h"
++#include "bridged_support.h"
++
++PVRSRV_ERROR
++PVRSRVLookupOSMemHandle(PVRSRV_HANDLE_BASE * psHandleBase, void **phOSMemHandle,
++ void *hMHandle)
++{
++ void *hMHandleInt;
++ PVRSRV_HANDLE_TYPE eHandleType;
++ PVRSRV_ERROR eError;
++
++ eError = PVRSRVLookupHandleAnyType(psHandleBase, &hMHandleInt,
++ &eHandleType, hMHandle);
++ if (eError != PVRSRV_OK) {
++ return eError;
++ }
++
++ switch (eHandleType) {
++#if defined(PVR_SECURE_HANDLES)
++ case PVRSRV_HANDLE_TYPE_MEM_INFO:
++ case PVRSRV_HANDLE_TYPE_MEM_INFO_REF:
++ case PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO:
++ {
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo =
++ (PVRSRV_KERNEL_MEM_INFO *) hMHandleInt;
++
++ *phOSMemHandle = psMemInfo->sMemBlk.hOSMemHandle;
++
++ break;
++ }
++ case PVRSRV_HANDLE_TYPE_SYNC_INFO:
++ {
++ PVRSRV_KERNEL_SYNC_INFO *psSyncInfo =
++ (PVRSRV_KERNEL_SYNC_INFO *) hMHandleInt;
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo =
++ psSyncInfo->psSyncDataMemInfoKM;
++
++ *phOSMemHandle = psMemInfo->sMemBlk.hOSMemHandle;
++
++ break;
++ }
++ case PVRSRV_HANDLE_TYPE_SOC_TIMER:
++ {
++ *phOSMemHandle = (void *)hMHandleInt;
++ break;
++ }
++#else
++ case PVRSRV_HANDLE_TYPE_NONE:
++ *phOSMemHandle = (void *)hMHandleInt;
++ break;
++#endif
++ default:
++ return PVRSRV_ERROR_BAD_MAPPING;
++ }
++
++ return PVRSRV_OK;
++}
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/bridged/bridged_support.h
+@@ -0,0 +1,43 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __BRIDGED_SUPPORT_H__
++#define __BRIDGED_SUPPORT_H__
++
++#include "handle.h"
++
++#if defined(__cplusplus)
++extern "C" {
++#endif
++
++PVRSRV_ERROR PVRSRVLookupOSMemHandle(PVRSRV_HANDLE_BASE *psBase, void * *phOSMemHandle, void * hMHandle);
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/bridged/sgx/bridged_sgx_bridge.c
+@@ -0,0 +1,2497 @@
++/**********************************************************************
++ *
++ * Copyright (c) 2009-2010 Intel Corporation.
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++
++
++#include <stddef.h>
++
++
++
++#if defined(SUPPORT_SGX)
++
++#include "services.h"
++#include "pvr_debug.h"
++#include "pvr_bridge.h"
++#include "sgx_bridge.h"
++#include "perproc.h"
++#include "power.h"
++#include "pvr_bridge_km.h"
++#include "sgx_bridge_km.h"
++
++#if defined(SUPPORT_MSVDX)
++ #include "msvdx_bridge.h"
++#endif
++
++#include "bridged_pvr_bridge.h"
++#include "bridged_sgx_bridge.h"
++#include "sgxutils.h"
++#include "pdump_km.h"
++
++static int
++SGXGetClientInfoBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_GETCLIENTINFO *psGetClientInfoIN,
++ PVRSRV_BRIDGE_OUT_GETCLIENTINFO *psGetClientInfoOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void * hDevCookieInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_GETCLIENTINFO);
++
++ psGetClientInfoOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psGetClientInfoIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psGetClientInfoOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psGetClientInfoOUT->eError =
++ SGXGetClientInfoKM(hDevCookieInt,
++ &psGetClientInfoOUT->sClientInfo);
++ return 0;
++}
++
++static int
++SGXReleaseClientInfoBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_RELEASECLIENTINFO *psReleaseClientInfoIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_SGXDEV_INFO *psDevInfo;
++ void * hDevCookieInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_RELEASECLIENTINFO);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psReleaseClientInfoIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookieInt)->pvDevice;
++
++ PVR_ASSERT(psDevInfo->ui32ClientRefCount > 0);
++
++ psDevInfo->ui32ClientRefCount--;
++
++ psRetOUT->eError = PVRSRV_OK;
++
++ return 0;
++}
++
++
++static int
++SGXGetInternalDevInfoBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_GETINTERNALDEVINFO *psSGXGetInternalDevInfoIN,
++ PVRSRV_BRIDGE_OUT_GETINTERNALDEVINFO *psSGXGetInternalDevInfoOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void * hDevCookieInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_GETINTERNALDEVINFO);
++
++ psSGXGetInternalDevInfoOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psSGXGetInternalDevInfoIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psSGXGetInternalDevInfoOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psSGXGetInternalDevInfoOUT->eError =
++ SGXGetInternalDevInfoKM(hDevCookieInt,
++ &psSGXGetInternalDevInfoOUT->sSGXInternalDevInfo);
++
++
++ psSGXGetInternalDevInfoOUT->eError =
++ PVRSRVAllocHandle(psPerProc->psHandleBase,
++ &psSGXGetInternalDevInfoOUT->sSGXInternalDevInfo.hHostCtlKernelMemInfoHandle,
++ psSGXGetInternalDevInfoOUT->sSGXInternalDevInfo.hHostCtlKernelMemInfoHandle,
++ PVRSRV_HANDLE_TYPE_MEM_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
++
++ return 0;
++}
++
++
++static int
++SGXDoKickBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_DOKICK *psDoKickIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void * hDevCookieInt;
++ u32 i;
++ int ret = 0;
++ u32 ui32NumDstSyncs;
++ void * *phKernelSyncInfoHandles = NULL;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_DOKICK);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psDoKickIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.hCCBKernelMemInfo,
++ psDoKickIN->sCCBKick.hCCBKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ if(psDoKickIN->sCCBKick.hTA3DSyncInfo != NULL)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.hTA3DSyncInfo,
++ psDoKickIN->sCCBKick.hTA3DSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ if(psDoKickIN->sCCBKick.hTASyncInfo != NULL)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.hTASyncInfo,
++ psDoKickIN->sCCBKick.hTASyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ if(psDoKickIN->sCCBKick.h3DSyncInfo != NULL)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.h3DSyncInfo,
++ psDoKickIN->sCCBKick.h3DSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++
++#if defined(SUPPORT_SGX_GENERALISED_SYNCOBJECTS)
++
++ if (psDoKickIN->sCCBKick.ui32NumTASrcSyncs > SGX_MAX_TA_SRC_SYNCS)
++ {
++ psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++ return 0;
++ }
++
++ for(i=0; i<psDoKickIN->sCCBKick.ui32NumTASrcSyncs; i++)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.ahTASrcKernelSyncInfo[i],
++ psDoKickIN->sCCBKick.ahTASrcKernelSyncInfo[i],
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ if (psDoKickIN->sCCBKick.ui32NumTADstSyncs > SGX_MAX_TA_DST_SYNCS)
++ {
++ psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++ return 0;
++ }
++
++ for(i=0; i<psDoKickIN->sCCBKick.ui32NumTADstSyncs; i++)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.ahTADstKernelSyncInfo[i],
++ psDoKickIN->sCCBKick.ahTADstKernelSyncInfo[i],
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ if (psDoKickIN->sCCBKick.ui32Num3DSrcSyncs > SGX_MAX_3D_SRC_SYNCS)
++ {
++ psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++ return 0;
++ }
++
++ for(i=0; i<psDoKickIN->sCCBKick.ui32Num3DSrcSyncs; i++)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.ah3DSrcKernelSyncInfo[i],
++ psDoKickIN->sCCBKick.ah3DSrcKernelSyncInfo[i],
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++#else
++
++ if (psDoKickIN->sCCBKick.ui32NumSrcSyncs > SGX_MAX_SRC_SYNCS)
++ {
++ psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++ return 0;
++ }
++ for(i=0; i<psDoKickIN->sCCBKick.ui32NumSrcSyncs; i++)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.ahSrcKernelSyncInfo[i],
++ psDoKickIN->sCCBKick.ahSrcKernelSyncInfo[i],
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++#endif
++
++ if (psDoKickIN->sCCBKick.ui32NumTAStatusVals > SGX_MAX_TA_STATUS_VALS)
++ {
++ psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++ return 0;
++ }
++ for (i = 0; i < psDoKickIN->sCCBKick.ui32NumTAStatusVals; i++)
++ {
++ psRetOUT->eError =
++#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.asTAStatusUpdate[i].hKernelMemInfo,
++ psDoKickIN->sCCBKick.asTAStatusUpdate[i].hKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++#else
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.ahTAStatusSyncInfo[i],
++ psDoKickIN->sCCBKick.ahTAStatusSyncInfo[i],
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++#endif
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ if (psDoKickIN->sCCBKick.ui32Num3DStatusVals > SGX_MAX_3D_STATUS_VALS)
++ {
++ psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++ return 0;
++ }
++ for(i = 0; i < psDoKickIN->sCCBKick.ui32Num3DStatusVals; i++)
++ {
++ psRetOUT->eError =
++#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.as3DStatusUpdate[i].hKernelMemInfo,
++ psDoKickIN->sCCBKick.as3DStatusUpdate[i].hKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++#else
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.ah3DStatusSyncInfo[i],
++ psDoKickIN->sCCBKick.ah3DStatusSyncInfo[i],
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++#endif
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ ui32NumDstSyncs = psDoKickIN->sCCBKick.ui32NumDstSyncObjects;
++
++ if(ui32NumDstSyncs > 0)
++ {
++ if(!OSAccessOK(PVR_VERIFY_READ,
++ psDoKickIN->sCCBKick.pahDstSyncHandles,
++ ui32NumDstSyncs * sizeof(void *)))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: SGXDoKickBW:"
++ " Invalid pasDstSyncHandles pointer", __FUNCTION__));
++ return -EFAULT;
++ }
++
++ psRetOUT->eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32NumDstSyncs * sizeof(void *),
++ (void **)&phKernelSyncInfoHandles,
++ 0,
++ "Array of Synchronization Info Handles");
++ if (psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ if(CopyFromUserWrapper(psPerProc,
++ ui32BridgeID,
++ phKernelSyncInfoHandles,
++ psDoKickIN->sCCBKick.pahDstSyncHandles,
++ ui32NumDstSyncs * sizeof(void *)) != PVRSRV_OK)
++ {
++ ret = -EFAULT;
++ goto PVRSRV_BRIDGE_SGX_DOKICK_RETURN_RESULT;
++ }
++
++
++ psDoKickIN->sCCBKick.pahDstSyncHandles = phKernelSyncInfoHandles;
++
++ for( i = 0; i < ui32NumDstSyncs; i++)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.pahDstSyncHandles[i],
++ psDoKickIN->sCCBKick.pahDstSyncHandles[i],
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ goto PVRSRV_BRIDGE_SGX_DOKICK_RETURN_RESULT;
++ }
++
++ }
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.hKernelHWSyncListMemInfo,
++ psDoKickIN->sCCBKick.hKernelHWSyncListMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ goto PVRSRV_BRIDGE_SGX_DOKICK_RETURN_RESULT;
++ }
++ }
++
++ psRetOUT->eError =
++ SGXDoKickKM(hDevCookieInt,
++ &psDoKickIN->sCCBKick);
++
++PVRSRV_BRIDGE_SGX_DOKICK_RETURN_RESULT:
++
++ if(phKernelSyncInfoHandles)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32NumDstSyncs * sizeof(void *),
++ (void *)phKernelSyncInfoHandles,
++ 0);
++
++ }
++ return ret;
++}
++
++
++static int
++SGXScheduleProcessQueuesBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGX_SCHEDULE_PROCESS_QUEUES *psScheduleProcQIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void * hDevCookieInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_SCHEDULE_PROCESS_QUEUES);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psScheduleProcQIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError = SGXScheduleProcessQueuesKM(hDevCookieInt);
++
++ return 0;
++}
++
++
++#if defined(TRANSFER_QUEUE)
++static int
++SGXSubmitTransferBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SUBMITTRANSFER *psSubmitTransferIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void * hDevCookieInt;
++ PVRSRV_TRANSFER_SGX_KICK *psKick;
++ u32 i;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_SUBMITTRANSFER);
++
++ psKick = &psSubmitTransferIN->sKick;
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psSubmitTransferIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psKick->hCCBMemInfo,
++ psKick->hCCBMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ if (psKick->hTASyncInfo != NULL)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psKick->hTASyncInfo,
++ psKick->hTASyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ if (psKick->h3DSyncInfo != NULL)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psKick->h3DSyncInfo,
++ psKick->h3DSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ if (psKick->ui32NumSrcSync > SGX_MAX_TRANSFER_SYNC_OPS)
++ {
++ psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++ return 0;
++ }
++ for (i = 0; i < psKick->ui32NumSrcSync; i++)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psKick->ahSrcSyncInfo[i],
++ psKick->ahSrcSyncInfo[i],
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ if (psKick->ui32NumDstSync > SGX_MAX_TRANSFER_SYNC_OPS)
++ {
++ psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++ return 0;
++ }
++ for (i = 0; i < psKick->ui32NumDstSync; i++)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psKick->ahDstSyncInfo[i],
++ psKick->ahDstSyncInfo[i],
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ psRetOUT->eError = SGXSubmitTransferKM(hDevCookieInt, psKick);
++
++ return 0;
++}
++
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++static int
++SGXSubmit2DBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SUBMIT2D *psSubmit2DIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void * hDevCookieInt;
++ PVRSRV_2D_SGX_KICK *psKick;
++ u32 i;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_SUBMIT2D);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psSubmit2DIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psKick = &psSubmit2DIN->sKick;
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psKick->hCCBMemInfo,
++ psKick->hCCBMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ if (psKick->hTASyncInfo != NULL)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psKick->hTASyncInfo,
++ psKick->hTASyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ if (psKick->h3DSyncInfo != NULL)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psKick->h3DSyncInfo,
++ psKick->h3DSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ if (psKick->ui32NumSrcSync > SGX_MAX_2D_SRC_SYNC_OPS)
++ {
++ psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++ return 0;
++ }
++ for (i = 0; i < psKick->ui32NumSrcSync; i++)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psKick->ahSrcSyncInfo[i],
++ psKick->ahSrcSyncInfo[i],
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ if (psKick->hDstSyncInfo != NULL)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psKick->hDstSyncInfo,
++ psKick->hDstSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ psRetOUT->eError =
++ SGXSubmit2DKM(hDevCookieInt, psKick);
++
++ return 0;
++}
++#endif
++#endif
++
++
++static int
++SGXGetMiscInfoBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGXGETMISCINFO *psSGXGetMiscInfoIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void * hDevCookieInt;
++ void * hDevMemContextInt = 0;
++ PVRSRV_SGXDEV_INFO *psDevInfo;
++ SGX_MISC_INFO sMiscInfo;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_SGX_GETMISCINFO);
++
++ psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psSGXGetMiscInfoIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++#if defined(SUPPORT_SGX_EDM_MEMORY_DEBUG)
++
++ if (psSGXGetMiscInfoIN->psMiscInfo->eRequest == SGX_MISC_INFO_REQUEST_MEMREAD)
++ {
++ psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevMemContextInt,
++ psSGXGetMiscInfoIN->psMiscInfo->hDevMemContext,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++#endif
++
++ psDeviceNode = hDevCookieInt;
++ PVR_ASSERT(psDeviceNode != NULL);
++ if (psDeviceNode == NULL)
++ {
++ return -EFAULT;
++ }
++
++ psDevInfo = psDeviceNode->pvDevice;
++
++
++ psRetOUT->eError = CopyFromUserWrapper(psPerProc,
++ ui32BridgeID,
++ &sMiscInfo,
++ psSGXGetMiscInfoIN->psMiscInfo,
++ sizeof(SGX_MISC_INFO));
++ if (psRetOUT->eError != PVRSRV_OK)
++ {
++ return -EFAULT;
++ }
++
++#ifdef SUPPORT_SGX_HWPERF
++ if (sMiscInfo.eRequest == SGX_MISC_INFO_REQUEST_HWPERF_RETRIEVE_CB)
++ {
++
++ void * pAllocated;
++ void * hAllocatedHandle;
++ void * psTmpUserData;
++ u32 allocatedSize;
++
++ allocatedSize = (u32)(sMiscInfo.uData.sRetrieveCB.ui32ArraySize * sizeof(PVRSRV_SGX_HWPERF_CBDATA));
++
++ ASSIGN_AND_EXIT_ON_ERROR(psRetOUT->eError,
++ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ allocatedSize,
++ &pAllocated,
++ &hAllocatedHandle,
++ "Array of Hardware Performance Circular Buffer Data"));
++
++
++ psTmpUserData = sMiscInfo.uData.sRetrieveCB.psHWPerfData;
++ sMiscInfo.uData.sRetrieveCB.psHWPerfData = pAllocated;
++
++ psRetOUT->eError = SGXGetMiscInfoKM(psDevInfo, &sMiscInfo, psDeviceNode, 0);
++ if (psRetOUT->eError != PVRSRV_OK)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ allocatedSize,
++ pAllocated,
++ hAllocatedHandle);
++
++ return 0;
++ }
++
++
++ psRetOUT->eError = CopyToUserWrapper(psPerProc,
++ ui32BridgeID,
++ psTmpUserData,
++ sMiscInfo.uData.sRetrieveCB.psHWPerfData,
++ allocatedSize);
++
++ sMiscInfo.uData.sRetrieveCB.psHWPerfData = psTmpUserData;
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ allocatedSize,
++ pAllocated,
++ hAllocatedHandle);
++
++ if (psRetOUT->eError != PVRSRV_OK)
++ {
++ return -EFAULT;
++ }
++ }
++ else
++#endif
++ {
++ psRetOUT->eError = SGXGetMiscInfoKM(psDevInfo, &sMiscInfo, psDeviceNode, hDevMemContextInt);
++
++ if (psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++
++ psRetOUT->eError = CopyToUserWrapper(psPerProc,
++ ui32BridgeID,
++ psSGXGetMiscInfoIN->psMiscInfo,
++ &sMiscInfo,
++ sizeof(SGX_MISC_INFO));
++ if (psRetOUT->eError != PVRSRV_OK)
++ {
++ return -EFAULT;
++ }
++ return 0;
++}
++
++
++#if defined(SUPPORT_SGX_HWPERF)
++static int
++SGXReadDiffCountersBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGX_READ_DIFF_COUNTERS *psSGXReadDiffCountersIN,
++ PVRSRV_BRIDGE_OUT_SGX_READ_DIFF_COUNTERS *psSGXReadDiffCountersOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void * hDevCookieInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_READ_DIFF_COUNTERS);
++
++ psSGXReadDiffCountersOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psSGXReadDiffCountersIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if(psSGXReadDiffCountersOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psSGXReadDiffCountersOUT->eError = SGXReadDiffCountersKM(hDevCookieInt,
++ psSGXReadDiffCountersIN->ui32Reg,
++ &psSGXReadDiffCountersOUT->ui32Old,
++ psSGXReadDiffCountersIN->bNew,
++ psSGXReadDiffCountersIN->ui32New,
++ psSGXReadDiffCountersIN->ui32NewReset,
++ psSGXReadDiffCountersIN->ui32CountersReg,
++ psSGXReadDiffCountersIN->ui32Reg2,
++ &psSGXReadDiffCountersOUT->bActive,
++ &psSGXReadDiffCountersOUT->sDiffs);
++
++ return 0;
++}
++
++
++static int
++SGXReadHWPerfCBBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGX_READ_HWPERF_CB *psSGXReadHWPerfCBIN,
++ PVRSRV_BRIDGE_OUT_SGX_READ_HWPERF_CB *psSGXReadHWPerfCBOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void * hDevCookieInt;
++ PVRSRV_SGX_HWPERF_CB_ENTRY *psAllocated;
++ void * hAllocatedHandle;
++ u32 ui32AllocatedSize;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_READ_HWPERF_CB);
++
++ psSGXReadHWPerfCBOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psSGXReadHWPerfCBIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if(psSGXReadHWPerfCBOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ ui32AllocatedSize = psSGXReadHWPerfCBIN->ui32ArraySize *
++ sizeof(psSGXReadHWPerfCBIN->psHWPerfCBData[0]);
++ ASSIGN_AND_EXIT_ON_ERROR(psSGXReadHWPerfCBOUT->eError,
++ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32AllocatedSize,
++ (void **)&psAllocated,
++ &hAllocatedHandle,
++ "Array of Hardware Performance Circular Buffer Data"));
++
++ psSGXReadHWPerfCBOUT->eError = SGXReadHWPerfCBKM(hDevCookieInt,
++ psSGXReadHWPerfCBIN->ui32ArraySize,
++ psAllocated,
++ &psSGXReadHWPerfCBOUT->ui32DataCount,
++ &psSGXReadHWPerfCBOUT->ui32ClockSpeed,
++ &psSGXReadHWPerfCBOUT->ui32HostTimeStamp);
++ if (psSGXReadHWPerfCBOUT->eError == PVRSRV_OK)
++ {
++ psSGXReadHWPerfCBOUT->eError = CopyToUserWrapper(psPerProc,
++ ui32BridgeID,
++ psSGXReadHWPerfCBIN->psHWPerfCBData,
++ psAllocated,
++ ui32AllocatedSize);
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32AllocatedSize,
++ psAllocated,
++ hAllocatedHandle);
++
++
++ return 0;
++}
++#endif
++
++
++static int
++SGXDevInitPart2BW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGXDEVINITPART2 *psSGXDevInitPart2IN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void * hDevCookieInt;
++ PVRSRV_ERROR eError;
++ int bDissociateFailed = 0;
++ int bLookupFailed = 0;
++ int bReleaseFailed = 0;
++ void * hDummy;
++ u32 i;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_DEVINITPART2);
++
++ if(!psPerProc->bInitProcess)
++ {
++ psRetOUT->eError = PVRSRV_ERROR_GENERIC;
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psSGXDevInitPart2IN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++
++
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDummy,
++ psSGXDevInitPart2IN->sInitInfo.hKernelCCBMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bLookupFailed |= (int)(eError != PVRSRV_OK);
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDummy,
++ psSGXDevInitPart2IN->sInitInfo.hKernelCCBCtlMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bLookupFailed |= (int)(eError != PVRSRV_OK);
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDummy,
++ psSGXDevInitPart2IN->sInitInfo.hKernelCCBEventKickerMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bLookupFailed |= (int)(eError != PVRSRV_OK);
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDummy,
++ psSGXDevInitPart2IN->sInitInfo.hKernelSGXHostCtlMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bLookupFailed |= (int)(eError != PVRSRV_OK);
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDummy,
++ psSGXDevInitPart2IN->sInitInfo.hKernelSGXTA3DCtlMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bLookupFailed |= (int)(eError != PVRSRV_OK);
++
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDummy,
++ psSGXDevInitPart2IN->sInitInfo.hKernelSGXMiscMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bLookupFailed |= (int)(eError != PVRSRV_OK);
++
++#if defined(SGX_SUPPORT_HWPROFILING)
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDummy,
++ psSGXDevInitPart2IN->sInitInfo.hKernelHWProfilingMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bLookupFailed |= (int)(eError != PVRSRV_OK);
++#endif
++
++#if defined(SUPPORT_SGX_HWPERF)
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDummy,
++ psSGXDevInitPart2IN->sInitInfo.hKernelHWPerfCBMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bLookupFailed |= (int)(eError != PVRSRV_OK);
++#endif
++
++#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG)
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDummy,
++ psSGXDevInitPart2IN->sInitInfo.hKernelEDMStatusBufferMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bLookupFailed |= (int)(eError != PVRSRV_OK);
++#endif
++
++#if defined(SGX_FEATURE_SPM_MODE_0)
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDummy,
++ psSGXDevInitPart2IN->sInitInfo.hKernelTmpDPMStateMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bLookupFailed |= (int)(eError != PVRSRV_OK);
++#endif
++
++ for (i = 0; i < SGX_MAX_INIT_MEM_HANDLES; i++)
++ {
++ void * hHandle = psSGXDevInitPart2IN->sInitInfo.asInitMemHandles[i];
++
++ if (hHandle == NULL)
++ {
++ continue;
++ }
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDummy,
++ hHandle,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bLookupFailed |= (int)(eError != PVRSRV_OK);
++ }
++
++ if (bLookupFailed)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "DevInitSGXPart2BW: A handle lookup failed"));
++ psRetOUT->eError = PVRSRV_ERROR_GENERIC;
++ return 0;
++ }
++
++
++ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ &psSGXDevInitPart2IN->sInitInfo.hKernelCCBMemInfo,
++ psSGXDevInitPart2IN->sInitInfo.hKernelCCBMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bReleaseFailed |= (int)(eError != PVRSRV_OK);
++
++ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ &psSGXDevInitPart2IN->sInitInfo.hKernelCCBCtlMemInfo,
++ psSGXDevInitPart2IN->sInitInfo.hKernelCCBCtlMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bReleaseFailed |= (int)(eError != PVRSRV_OK);
++
++ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ &psSGXDevInitPart2IN->sInitInfo.hKernelCCBEventKickerMemInfo,
++ psSGXDevInitPart2IN->sInitInfo.hKernelCCBEventKickerMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bReleaseFailed |= (int)(eError != PVRSRV_OK);
++
++
++ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ &psSGXDevInitPart2IN->sInitInfo.hKernelSGXHostCtlMemInfo,
++ psSGXDevInitPart2IN->sInitInfo.hKernelSGXHostCtlMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bReleaseFailed |= (int)(eError != PVRSRV_OK);
++
++ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ &psSGXDevInitPart2IN->sInitInfo.hKernelSGXTA3DCtlMemInfo,
++ psSGXDevInitPart2IN->sInitInfo.hKernelSGXTA3DCtlMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bReleaseFailed |= (int)(eError != PVRSRV_OK);
++
++ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ &psSGXDevInitPart2IN->sInitInfo.hKernelSGXMiscMemInfo,
++ psSGXDevInitPart2IN->sInitInfo.hKernelSGXMiscMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bReleaseFailed |= (int)(eError != PVRSRV_OK);
++
++
++ #if defined(SGX_SUPPORT_HWPROFILING)
++ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ &psSGXDevInitPart2IN->sInitInfo.hKernelHWProfilingMemInfo,
++ psSGXDevInitPart2IN->sInitInfo.hKernelHWProfilingMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bReleaseFailed |= (int)(eError != PVRSRV_OK);
++#endif
++
++#if defined(SUPPORT_SGX_HWPERF)
++ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ &psSGXDevInitPart2IN->sInitInfo.hKernelHWPerfCBMemInfo,
++ psSGXDevInitPart2IN->sInitInfo.hKernelHWPerfCBMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bReleaseFailed |= (int)(eError != PVRSRV_OK);
++#endif
++
++#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG)
++ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ &psSGXDevInitPart2IN->sInitInfo.hKernelEDMStatusBufferMemInfo,
++ psSGXDevInitPart2IN->sInitInfo.hKernelEDMStatusBufferMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bReleaseFailed |= (int)(eError != PVRSRV_OK);
++#endif
++
++#if defined(SGX_FEATURE_SPM_MODE_0)
++ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ &psSGXDevInitPart2IN->sInitInfo.hKernelTmpDPMStateMemInfo,
++ psSGXDevInitPart2IN->sInitInfo.hKernelTmpDPMStateMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bReleaseFailed |= (int)(eError != PVRSRV_OK);
++#endif
++
++
++ for (i = 0; i < SGX_MAX_INIT_MEM_HANDLES; i++)
++ {
++ void * *phHandle = &psSGXDevInitPart2IN->sInitInfo.asInitMemHandles[i];
++
++ if (*phHandle == NULL)
++ continue;
++
++ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ phHandle,
++ *phHandle,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bReleaseFailed |= (int)(eError != PVRSRV_OK);
++ }
++
++ if (bReleaseFailed)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "DevInitSGXPart2BW: A handle release failed"));
++ psRetOUT->eError = PVRSRV_ERROR_GENERIC;
++
++ PVR_DBG_BREAK;
++ return 0;
++ }
++
++
++ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelCCBMemInfo);
++ bDissociateFailed |= (int)(eError != PVRSRV_OK);
++
++ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelCCBCtlMemInfo);
++ bDissociateFailed |= (int)(eError != PVRSRV_OK);
++
++ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelCCBEventKickerMemInfo);
++ bDissociateFailed |= (int)(eError != PVRSRV_OK);
++
++ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelSGXHostCtlMemInfo);
++ bDissociateFailed |= (int)(eError != PVRSRV_OK);
++
++ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelSGXTA3DCtlMemInfo);
++ bDissociateFailed |= (int)(eError != PVRSRV_OK);
++
++
++ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelSGXMiscMemInfo);
++ bDissociateFailed |= (int)(eError != PVRSRV_OK);
++
++
++#if defined(SGX_SUPPORT_HWPROFILING)
++ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelHWProfilingMemInfo);
++ bDissociateFailed |= (int)(eError != PVRSRV_OK);
++#endif
++
++#if defined(SUPPORT_SGX_HWPERF)
++ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelHWPerfCBMemInfo);
++ bDissociateFailed |= (int)(eError != PVRSRV_OK);
++#endif
++
++#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG)
++ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelEDMStatusBufferMemInfo);
++ bDissociateFailed |= (int)(eError != PVRSRV_OK);
++#endif
++
++#if defined(SGX_FEATURE_SPM_MODE_0)
++ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelTmpDPMStateMemInfo);
++ bDissociateFailed |= (int)(eError != PVRSRV_OK);
++#endif
++
++ for (i = 0; i < SGX_MAX_INIT_MEM_HANDLES; i++)
++ {
++ void * hHandle = psSGXDevInitPart2IN->sInitInfo.asInitMemHandles[i];
++
++ if (hHandle == NULL)
++ continue;
++
++ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, hHandle);
++ bDissociateFailed |= (int)(eError != PVRSRV_OK);
++ }
++
++
++
++
++ if(bDissociateFailed)
++ {
++ PVRSRVFreeDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelCCBMemInfo);
++ PVRSRVFreeDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelCCBCtlMemInfo);
++ PVRSRVFreeDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelSGXHostCtlMemInfo);
++ PVRSRVFreeDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelSGXTA3DCtlMemInfo);
++ PVRSRVFreeDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelSGXMiscMemInfo);
++
++ for (i = 0; i < SGX_MAX_INIT_MEM_HANDLES; i++)
++ {
++ void * hHandle = psSGXDevInitPart2IN->sInitInfo.asInitMemHandles[i];
++
++ if (hHandle == NULL)
++ continue;
++
++ PVRSRVFreeDeviceMemKM(hDevCookieInt, (PVRSRV_KERNEL_MEM_INFO *)hHandle);
++
++ }
++
++ PVR_DPF((PVR_DBG_ERROR, "DevInitSGXPart2BW: A dissociate failed"));
++
++ psRetOUT->eError = PVRSRV_ERROR_GENERIC;
++
++
++ PVR_DBG_BREAK;
++ return 0;
++ }
++
++ psRetOUT->eError =
++ DevInitSGXPart2KM(psPerProc,
++ hDevCookieInt,
++ &psSGXDevInitPart2IN->sInitInfo);
++
++ return 0;
++}
++
++
++static int
++SGXRegisterHWRenderContextBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_RENDER_CONTEXT *psSGXRegHWRenderContextIN,
++ PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_RENDER_CONTEXT *psSGXRegHWRenderContextOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void * hDevCookieInt;
++ void * hHWRenderContextInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_REGISTER_HW_RENDER_CONTEXT);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psSGXRegHWRenderContextOUT->eError, psPerProc, 1);
++
++ psSGXRegHWRenderContextOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psSGXRegHWRenderContextIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psSGXRegHWRenderContextOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ hHWRenderContextInt =
++ SGXRegisterHWRenderContextKM(hDevCookieInt,
++ &psSGXRegHWRenderContextIN->sHWRenderContextDevVAddr,
++ psPerProc);
++
++ if (hHWRenderContextInt == NULL)
++ {
++ psSGXRegHWRenderContextOUT->eError = PVRSRV_ERROR_GENERIC;
++ return 0;
++ }
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psSGXRegHWRenderContextOUT->hHWRenderContext,
++ hHWRenderContextInt,
++ PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psSGXRegHWRenderContextOUT->eError, psPerProc);
++
++ return 0;
++}
++
++
++static int
++SGXUnregisterHWRenderContextBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_RENDER_CONTEXT *psSGXUnregHWRenderContextIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void * hHWRenderContextInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_UNREGISTER_HW_RENDER_CONTEXT);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hHWRenderContextInt,
++ psSGXUnregHWRenderContextIN->hHWRenderContext,
++ PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError = SGXUnregisterHWRenderContextKM(hHWRenderContextInt);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psSGXUnregHWRenderContextIN->hHWRenderContext,
++ PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT);
++
++ return 0;
++}
++
++
++static int
++SGXRegisterHWTransferContextBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_TRANSFER_CONTEXT *psSGXRegHWTransferContextIN,
++ PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_TRANSFER_CONTEXT *psSGXRegHWTransferContextOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void * hDevCookieInt;
++ void * hHWTransferContextInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_REGISTER_HW_TRANSFER_CONTEXT);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psSGXRegHWTransferContextOUT->eError, psPerProc, 1);
++
++ psSGXRegHWTransferContextOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psSGXRegHWTransferContextIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psSGXRegHWTransferContextOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ hHWTransferContextInt =
++ SGXRegisterHWTransferContextKM(hDevCookieInt,
++ &psSGXRegHWTransferContextIN->sHWTransferContextDevVAddr,
++ psPerProc);
++
++ if (hHWTransferContextInt == NULL)
++ {
++ psSGXRegHWTransferContextOUT->eError = PVRSRV_ERROR_GENERIC;
++ return 0;
++ }
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psSGXRegHWTransferContextOUT->hHWTransferContext,
++ hHWTransferContextInt,
++ PVRSRV_HANDLE_TYPE_SGX_HW_TRANSFER_CONTEXT,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psSGXRegHWTransferContextOUT->eError, psPerProc);
++
++ return 0;
++}
++
++
++static int
++SGXUnregisterHWTransferContextBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_TRANSFER_CONTEXT *psSGXUnregHWTransferContextIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void * hHWTransferContextInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_UNREGISTER_HW_TRANSFER_CONTEXT);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hHWTransferContextInt,
++ psSGXUnregHWTransferContextIN->hHWTransferContext,
++ PVRSRV_HANDLE_TYPE_SGX_HW_TRANSFER_CONTEXT);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError = SGXUnregisterHWTransferContextKM(hHWTransferContextInt);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psSGXUnregHWTransferContextIN->hHWTransferContext,
++ PVRSRV_HANDLE_TYPE_SGX_HW_TRANSFER_CONTEXT);
++
++ return 0;
++}
++
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++static int
++SGXRegisterHW2DContextBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_2D_CONTEXT *psSGXRegHW2DContextIN,
++ PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_2D_CONTEXT *psSGXRegHW2DContextOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void * hDevCookieInt;
++ void * hHW2DContextInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_REGISTER_HW_2D_CONTEXT);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psSGXRegHW2DContextOUT->eError, psPerProc, 1);
++
++ psSGXRegHW2DContextOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psSGXRegHW2DContextIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psSGXRegHW2DContextOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ hHW2DContextInt =
++ SGXRegisterHW2DContextKM(hDevCookieInt,
++ &psSGXRegHW2DContextIN->sHW2DContextDevVAddr,
++ psPerProc);
++
++ if (hHW2DContextInt == NULL)
++ {
++ psSGXRegHW2DContextOUT->eError = PVRSRV_ERROR_GENERIC;
++ return 0;
++ }
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psSGXRegHW2DContextOUT->hHW2DContext,
++ hHW2DContextInt,
++ PVRSRV_HANDLE_TYPE_SGX_HW_2D_CONTEXT,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psSGXRegHW2DContextOUT->eError, psPerProc);
++
++ return 0;
++}
++
++
++static int
++SGXUnregisterHW2DContextBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_2D_CONTEXT *psSGXUnregHW2DContextIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void * hHW2DContextInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_UNREGISTER_HW_2D_CONTEXT);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hHW2DContextInt,
++ psSGXUnregHW2DContextIN->hHW2DContext,
++ PVRSRV_HANDLE_TYPE_SGX_HW_2D_CONTEXT);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError = SGXUnregisterHW2DContextKM(hHW2DContextInt);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psSGXUnregHW2DContextIN->hHW2DContext,
++ PVRSRV_HANDLE_TYPE_SGX_HW_2D_CONTEXT);
++
++ return 0;
++}
++#endif
++
++static int
++SGXFlushHWRenderTargetBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGX_FLUSH_HW_RENDER_TARGET *psSGXFlushHWRenderTargetIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void * hDevCookieInt;
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_FLUSH_HW_RENDER_TARGET);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psSGXFlushHWRenderTargetIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ SGXFlushHWRenderTargetKM(hDevCookieInt, psSGXFlushHWRenderTargetIN->sHWRTDataSetDevVAddr);
++
++ return 0;
++}
++
++
++static int
++SGX2DQueryBlitsCompleteBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_2DQUERYBLTSCOMPLETE *ps2DQueryBltsCompleteIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void * hDevCookieInt;
++ void *pvSyncInfo;
++ PVRSRV_SGXDEV_INFO *psDevInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_2DQUERYBLTSCOMPLETE);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ ps2DQueryBltsCompleteIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &pvSyncInfo,
++ ps2DQueryBltsCompleteIN->hKernSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookieInt)->pvDevice;
++
++ psRetOUT->eError =
++ SGX2DQueryBlitsCompleteKM(psDevInfo,
++ (PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo,
++ ps2DQueryBltsCompleteIN->bWaitForComplete);
++
++ return 0;
++}
++
++
++static int
++SGXFindSharedPBDescBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGXFINDSHAREDPBDESC *psSGXFindSharedPBDescIN,
++ PVRSRV_BRIDGE_OUT_SGXFINDSHAREDPBDESC *psSGXFindSharedPBDescOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void * hDevCookieInt;
++ PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo;
++ PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo;
++ PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo;
++ PVRSRV_KERNEL_MEM_INFO *psHWBlockKernelMemInfo;
++ PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescSubKernelMemInfos = NULL;
++ u32 ui32SharedPBDescSubKernelMemInfosCount = 0;
++ u32 i;
++ void * hSharedPBDesc = NULL;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psSGXFindSharedPBDescOUT->eError, psPerProc, PVRSRV_BRIDGE_SGX_SHAREDPBDESC_MAX_SUBMEMINFOS + 4);
++
++ psSGXFindSharedPBDescOUT->hSharedPBDesc = NULL;
++
++ psSGXFindSharedPBDescOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psSGXFindSharedPBDescIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psSGXFindSharedPBDescOUT->eError != PVRSRV_OK)
++ goto PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC_EXIT;
++
++ psSGXFindSharedPBDescOUT->eError =
++ SGXFindSharedPBDescKM(psPerProc, hDevCookieInt,
++ psSGXFindSharedPBDescIN->bLockOnFailure,
++ psSGXFindSharedPBDescIN->ui32TotalPBSize,
++ &hSharedPBDesc,
++ &psSharedPBDescKernelMemInfo,
++ &psHWPBDescKernelMemInfo,
++ &psBlockKernelMemInfo,
++ &psHWBlockKernelMemInfo,
++ &ppsSharedPBDescSubKernelMemInfos,
++ &ui32SharedPBDescSubKernelMemInfosCount);
++ if(psSGXFindSharedPBDescOUT->eError != PVRSRV_OK)
++ goto PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC_EXIT;
++
++ PVR_ASSERT(ui32SharedPBDescSubKernelMemInfosCount
++ <= PVRSRV_BRIDGE_SGX_SHAREDPBDESC_MAX_SUBMEMINFOS);
++
++ psSGXFindSharedPBDescOUT->ui32SharedPBDescSubKernelMemInfoHandlesCount =
++ ui32SharedPBDescSubKernelMemInfosCount;
++
++ if(hSharedPBDesc == NULL)
++ {
++ psSGXFindSharedPBDescOUT->hSharedPBDescKernelMemInfoHandle = 0;
++
++ goto PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC_EXIT;
++ }
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psSGXFindSharedPBDescOUT->hSharedPBDesc,
++ hSharedPBDesc,
++ PVRSRV_HANDLE_TYPE_SHARED_PB_DESC,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psSGXFindSharedPBDescOUT->hSharedPBDescKernelMemInfoHandle,
++ psSharedPBDescKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
++ psSGXFindSharedPBDescOUT->hSharedPBDesc);
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psSGXFindSharedPBDescOUT->hHWPBDescKernelMemInfoHandle,
++ psHWPBDescKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
++ psSGXFindSharedPBDescOUT->hSharedPBDesc);
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psSGXFindSharedPBDescOUT->hBlockKernelMemInfoHandle,
++ psBlockKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
++ psSGXFindSharedPBDescOUT->hSharedPBDesc);
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psSGXFindSharedPBDescOUT->hHWBlockKernelMemInfoHandle,
++ psHWBlockKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
++ psSGXFindSharedPBDescOUT->hSharedPBDesc);
++
++
++ for(i=0; i<ui32SharedPBDescSubKernelMemInfosCount; i++)
++ {
++ PVRSRV_BRIDGE_OUT_SGXFINDSHAREDPBDESC *psSGXFindSharedPBDescOut =
++ psSGXFindSharedPBDescOUT;
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psSGXFindSharedPBDescOut->ahSharedPBDescSubKernelMemInfoHandles[i],
++ ppsSharedPBDescSubKernelMemInfos[i],
++ PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
++ psSGXFindSharedPBDescOUT->hSharedPBDescKernelMemInfoHandle);
++ }
++
++PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC_EXIT:
++ if (ppsSharedPBDescSubKernelMemInfos != NULL)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_KERNEL_MEM_INFO *) * ui32SharedPBDescSubKernelMemInfosCount,
++ ppsSharedPBDescSubKernelMemInfos,
++ NULL);
++ }
++
++ if(psSGXFindSharedPBDescOUT->eError != PVRSRV_OK)
++ {
++ if(hSharedPBDesc != NULL)
++ {
++ SGXUnrefSharedPBDescKM(hSharedPBDesc);
++ }
++ }
++ else
++ {
++ COMMIT_HANDLE_BATCH_OR_ERROR(psSGXFindSharedPBDescOUT->eError, psPerProc);
++ }
++
++ return 0;
++}
++
++
++static int
++SGXUnrefSharedPBDescBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGXUNREFSHAREDPBDESC *psSGXUnrefSharedPBDescIN,
++ PVRSRV_BRIDGE_OUT_SGXUNREFSHAREDPBDESC *psSGXUnrefSharedPBDescOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void * hSharedPBDesc;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_UNREFSHAREDPBDESC);
++
++ psSGXUnrefSharedPBDescOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hSharedPBDesc,
++ psSGXUnrefSharedPBDescIN->hSharedPBDesc,
++ PVRSRV_HANDLE_TYPE_SHARED_PB_DESC);
++ if(psSGXUnrefSharedPBDescOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psSGXUnrefSharedPBDescOUT->eError =
++ SGXUnrefSharedPBDescKM(hSharedPBDesc);
++
++ if(psSGXUnrefSharedPBDescOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psSGXUnrefSharedPBDescOUT->eError =
++ PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psSGXUnrefSharedPBDescIN->hSharedPBDesc,
++ PVRSRV_HANDLE_TYPE_SHARED_PB_DESC);
++
++ return 0;
++}
++
++
++static int
++SGXAddSharedPBDescBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGXADDSHAREDPBDESC *psSGXAddSharedPBDescIN,
++ PVRSRV_BRIDGE_OUT_SGXADDSHAREDPBDESC *psSGXAddSharedPBDescOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void * hDevCookieInt;
++ PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo;
++ PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo;
++ PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo;
++ PVRSRV_KERNEL_MEM_INFO *psHWBlockKernelMemInfo;
++ u32 ui32KernelMemInfoHandlesCount =
++ psSGXAddSharedPBDescIN->ui32KernelMemInfoHandlesCount;
++ int ret = 0;
++ void * *phKernelMemInfoHandles = NULL;
++ PVRSRV_KERNEL_MEM_INFO **ppsKernelMemInfos = NULL;
++ u32 i;
++ PVRSRV_ERROR eError;
++ void * hSharedPBDesc = NULL;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psSGXAddSharedPBDescOUT->eError, psPerProc, 1);
++
++ psSGXAddSharedPBDescOUT->hSharedPBDesc = NULL;
++
++ PVR_ASSERT(ui32KernelMemInfoHandlesCount
++ <= PVRSRV_BRIDGE_SGX_SHAREDPBDESC_MAX_SUBMEMINFOS);
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psSGXAddSharedPBDescIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(eError != PVRSRV_OK)
++ {
++ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++ }
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ (void **)&psSharedPBDescKernelMemInfo,
++ psSGXAddSharedPBDescIN->hSharedPBDescKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO);
++ if(eError != PVRSRV_OK)
++ {
++ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++ }
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ (void **)&psHWPBDescKernelMemInfo,
++ psSGXAddSharedPBDescIN->hHWPBDescKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if(eError != PVRSRV_OK)
++ {
++ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++ }
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ (void **)&psBlockKernelMemInfo,
++ psSGXAddSharedPBDescIN->hBlockKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO);
++ if(eError != PVRSRV_OK)
++ {
++ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++ }
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ (void **)&psHWBlockKernelMemInfo,
++ psSGXAddSharedPBDescIN->hHWBlockKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if(eError != PVRSRV_OK)
++ {
++ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++ }
++
++
++ if(!OSAccessOK(PVR_VERIFY_READ,
++ psSGXAddSharedPBDescIN->phKernelMemInfoHandles,
++ ui32KernelMemInfoHandlesCount * sizeof(void *)))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC:"
++ " Invalid phKernelMemInfos pointer", __FUNCTION__));
++ ret = -EFAULT;
++ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++ }
++
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32KernelMemInfoHandlesCount * sizeof(void *),
++ (void **)&phKernelMemInfoHandles,
++ 0,
++ "Array of Handles");
++ if (eError != PVRSRV_OK)
++ {
++ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++ }
++
++ if(CopyFromUserWrapper(psPerProc,
++ ui32BridgeID,
++ phKernelMemInfoHandles,
++ psSGXAddSharedPBDescIN->phKernelMemInfoHandles,
++ ui32KernelMemInfoHandlesCount * sizeof(void *))
++ != PVRSRV_OK)
++ {
++ ret = -EFAULT;
++ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++ }
++
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32KernelMemInfoHandlesCount * sizeof(PVRSRV_KERNEL_MEM_INFO *),
++ (void **)&ppsKernelMemInfos,
++ 0,
++ "Array of pointers to Kernel Memory Info");
++ if (eError != PVRSRV_OK)
++ {
++ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++ }
++
++ for(i=0; i<ui32KernelMemInfoHandlesCount; i++)
++ {
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ (void **)&ppsKernelMemInfos[i],
++ phKernelMemInfoHandles[i],
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if(eError != PVRSRV_OK)
++ {
++ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++ }
++ }
++
++
++
++ eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psSGXAddSharedPBDescIN->hSharedPBDescKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO);
++ PVR_ASSERT(eError == PVRSRV_OK);
++
++
++ eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psSGXAddSharedPBDescIN->hHWPBDescKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ PVR_ASSERT(eError == PVRSRV_OK);
++
++
++ eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psSGXAddSharedPBDescIN->hBlockKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO);
++ PVR_ASSERT(eError == PVRSRV_OK);
++
++
++ eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psSGXAddSharedPBDescIN->hHWBlockKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ PVR_ASSERT(eError == PVRSRV_OK);
++
++ for(i=0; i<ui32KernelMemInfoHandlesCount; i++)
++ {
++
++ eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ phKernelMemInfoHandles[i],
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ PVR_ASSERT(eError == PVRSRV_OK);
++ }
++
++ eError = SGXAddSharedPBDescKM(psPerProc, hDevCookieInt,
++ psSharedPBDescKernelMemInfo,
++ psHWPBDescKernelMemInfo,
++ psBlockKernelMemInfo,
++ psHWBlockKernelMemInfo,
++ psSGXAddSharedPBDescIN->ui32TotalPBSize,
++ &hSharedPBDesc,
++ ppsKernelMemInfos,
++ ui32KernelMemInfoHandlesCount);
++
++
++ if (eError != PVRSRV_OK)
++ {
++ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++ }
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psSGXAddSharedPBDescOUT->hSharedPBDesc,
++ hSharedPBDesc,
++ PVRSRV_HANDLE_TYPE_SHARED_PB_DESC,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++
++PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT:
++
++ if(phKernelMemInfoHandles)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ psSGXAddSharedPBDescIN->ui32KernelMemInfoHandlesCount * sizeof(void *),
++ (void *)phKernelMemInfoHandles,
++ 0);
++ }
++ if(ppsKernelMemInfos)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ psSGXAddSharedPBDescIN->ui32KernelMemInfoHandlesCount * sizeof(PVRSRV_KERNEL_MEM_INFO *),
++ (void *)ppsKernelMemInfos,
++ 0);
++ }
++
++ if(ret == 0 && eError == PVRSRV_OK)
++ {
++ COMMIT_HANDLE_BATCH_OR_ERROR(psSGXAddSharedPBDescOUT->eError, psPerProc);
++ }
++
++ psSGXAddSharedPBDescOUT->eError = eError;
++
++ return ret;
++}
++
++static int
++SGXGetInfoForSrvinitBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGXINFO_FOR_SRVINIT *psSGXInfoForSrvinitIN,
++ PVRSRV_BRIDGE_OUT_SGXINFO_FOR_SRVINIT *psSGXInfoForSrvinitOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ void * hDevCookieInt;
++ u32 i;
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGXINFO_FOR_SRVINIT);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psSGXInfoForSrvinitOUT->eError, psPerProc, PVRSRV_MAX_CLIENT_HEAPS);
++
++ if(!psPerProc->bInitProcess)
++ {
++ psSGXInfoForSrvinitOUT->eError = PVRSRV_ERROR_GENERIC;
++ return 0;
++ }
++
++ psSGXInfoForSrvinitOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psSGXInfoForSrvinitIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if(psSGXInfoForSrvinitOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psSGXInfoForSrvinitOUT->eError =
++ SGXGetInfoForSrvinitKM(hDevCookieInt,
++ &psSGXInfoForSrvinitOUT->sInitInfo);
++
++ if(psSGXInfoForSrvinitOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ for(i = 0; i < PVRSRV_MAX_CLIENT_HEAPS; i++)
++ {
++ PVRSRV_HEAP_INFO *psHeapInfo;
++
++ psHeapInfo = &psSGXInfoForSrvinitOUT->sInitInfo.asHeapInfo[i];
++
++ if (psHeapInfo->ui32HeapID != (u32)SGX_UNDEFINED_HEAP_ID)
++ {
++ void * hDevMemHeapExt;
++
++ if (psHeapInfo->hDevMemHeap != NULL)
++ {
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &hDevMemHeapExt,
++ psHeapInfo->hDevMemHeap,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP,
++ PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
++ psHeapInfo->hDevMemHeap = hDevMemHeapExt;
++ }
++ }
++ }
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psSGXInfoForSrvinitOUT->eError, psPerProc);
++
++ return 0;
++}
++
++#if defined(PDUMP)
++static void
++DumpBufferArray(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ PSGX_KICKTA_DUMP_BUFFER psBufferArray,
++ u32 ui32BufferArrayLength,
++ int bDumpPolls)
++{
++ u32 i;
++
++ for (i=0; i<ui32BufferArrayLength; i++)
++ {
++ PSGX_KICKTA_DUMP_BUFFER psBuffer;
++ PVRSRV_KERNEL_MEM_INFO *psCtrlMemInfoKM;
++ char * pszName;
++ void * hUniqueTag;
++ u32 ui32Offset;
++
++ psBuffer = &psBufferArray[i];
++ pszName = psBuffer->pszName;
++ if (!pszName)
++ {
++ pszName = "Nameless buffer";
++ }
++
++ hUniqueTag = MAKEUNIQUETAG((PVRSRV_KERNEL_MEM_INFO *)psBuffer->hKernelMemInfo);
++
++ #if defined(SUPPORT_SGX_NEW_STATUS_VALS)
++ psCtrlMemInfoKM = ((PVRSRV_KERNEL_MEM_INFO *)psBuffer->hCtrlKernelMemInfo);
++ ui32Offset = psBuffer->sCtrlDevVAddr.uiAddr - psCtrlMemInfoKM->sDevVAddr.uiAddr;
++ #else
++ psCtrlMemInfoKM = ((PVRSRV_KERNEL_MEM_INFO *)psBuffer->hKernelMemInfo)->psKernelSyncInfo->psSyncDataMemInfoKM;
++ ui32Offset = offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete);
++ #endif
++
++ if (psBuffer->ui32Start <= psBuffer->ui32End)
++ {
++ if (bDumpPolls)
++ {
++ PDUMPCOMMENTWITHFLAGS(0, "Wait for %s space\r\n", pszName);
++ PDUMPCBP(psCtrlMemInfoKM,
++ ui32Offset,
++ psBuffer->ui32Start,
++ psBuffer->ui32SpaceUsed,
++ psBuffer->ui32BufferSize,
++ 0,
++ MAKEUNIQUETAG(psCtrlMemInfoKM));
++ }
++
++ PDUMPCOMMENTWITHFLAGS(0, "%s\r\n", pszName);
++ PDUMPMEMUM(psPerProc,
++ NULL,
++ psBuffer->pvLinAddr,
++ (PVRSRV_KERNEL_MEM_INFO*)psBuffer->hKernelMemInfo,
++ psBuffer->ui32Start,
++ psBuffer->ui32End - psBuffer->ui32Start,
++ 0,
++ hUniqueTag);
++ }
++ else
++ {
++
++
++ if (bDumpPolls)
++ {
++ PDUMPCOMMENTWITHFLAGS(0, "Wait for %s space\r\n", pszName);
++ PDUMPCBP(psCtrlMemInfoKM,
++ ui32Offset,
++ psBuffer->ui32Start,
++ psBuffer->ui32BackEndLength,
++ psBuffer->ui32BufferSize,
++ 0,
++ MAKEUNIQUETAG(psCtrlMemInfoKM));
++ }
++ PDUMPCOMMENTWITHFLAGS(0, "%s (part 1)\r\n", pszName);
++ PDUMPMEMUM(psPerProc,
++ NULL,
++ psBuffer->pvLinAddr,
++ (PVRSRV_KERNEL_MEM_INFO*)psBuffer->hKernelMemInfo,
++ psBuffer->ui32Start,
++ psBuffer->ui32BackEndLength,
++ 0,
++ hUniqueTag);
++
++ if (bDumpPolls)
++ {
++ PDUMPMEMPOL(psCtrlMemInfoKM,
++ ui32Offset,
++ 0,
++ 0xFFFFFFFF,
++ PDUMP_POLL_OPERATOR_NOTEQUAL,
++ 0,
++ MAKEUNIQUETAG(psCtrlMemInfoKM));
++
++ PDUMPCOMMENTWITHFLAGS(0, "Wait for %s space\r\n", pszName);
++ PDUMPCBP(psCtrlMemInfoKM,
++ ui32Offset,
++ 0,
++ psBuffer->ui32End,
++ psBuffer->ui32BufferSize,
++ 0,
++ MAKEUNIQUETAG(psCtrlMemInfoKM));
++ }
++ PDUMPCOMMENTWITHFLAGS(0, "%s (part 2)\r\n", pszName);
++ PDUMPMEMUM(psPerProc,
++ NULL,
++ psBuffer->pvLinAddr,
++ (PVRSRV_KERNEL_MEM_INFO*)psBuffer->hKernelMemInfo,
++ 0,
++ psBuffer->ui32End,
++ 0,
++ hUniqueTag);
++ }
++ }
++}
++static int
++SGXPDumpBufferArrayBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_BUFFER_ARRAY *psPDumpBufferArrayIN,
++ void *psBridgeOut,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ u32 i;
++ SGX_KICKTA_DUMP_BUFFER *psKickTADumpBuffer;
++ u32 ui32BufferArrayLength =
++ psPDumpBufferArrayIN->ui32BufferArrayLength;
++ u32 ui32BufferArraySize =
++ ui32BufferArrayLength * sizeof(SGX_KICKTA_DUMP_BUFFER);
++ PVRSRV_ERROR eError = PVRSRV_ERROR_GENERIC;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_PDUMP_BUFFER_ARRAY);
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32BufferArraySize,
++ (void * *)&psKickTADumpBuffer, 0,
++ "Array of Kick Tile Accelerator Dump Buffer") != PVRSRV_OK)
++ {
++ return -ENOMEM;
++ }
++
++ if(CopyFromUserWrapper(psPerProc,
++ ui32BridgeID,
++ psKickTADumpBuffer,
++ psPDumpBufferArrayIN->psBufferArray,
++ ui32BufferArraySize) != PVRSRV_OK)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32BufferArraySize, psKickTADumpBuffer, 0);
++
++ return -EFAULT;
++ }
++
++ for(i = 0; i < ui32BufferArrayLength; i++)
++ {
++ void *pvMemInfo;
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvMemInfo,
++ psKickTADumpBuffer[i].hKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRV_BRIDGE_SGX_PDUMP_BUFFER_ARRAY: "
++ "PVRSRVLookupHandle failed (%d)", eError));
++ break;
++ }
++ psKickTADumpBuffer[i].hKernelMemInfo = pvMemInfo;
++
++#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvMemInfo,
++ psKickTADumpBuffer[i].hCtrlKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRV_BRIDGE_SGX_PDUMP_BUFFER_ARRAY: "
++ "PVRSRVLookupHandle failed (%d)", eError));
++ break;
++ }
++ psKickTADumpBuffer[i].hCtrlKernelMemInfo = pvMemInfo;
++#endif
++ }
++
++ if(eError == PVRSRV_OK)
++ {
++ DumpBufferArray(psPerProc,
++ psKickTADumpBuffer,
++ ui32BufferArrayLength,
++ psPDumpBufferArrayIN->bDumpPolls);
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32BufferArraySize, psKickTADumpBuffer, 0);
++
++
++ return 0;
++}
++
++static int
++SGXPDump3DSignatureRegistersBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_3D_SIGNATURE_REGISTERS *psPDump3DSignatureRegistersIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ u32 ui32RegisterArraySize = psPDump3DSignatureRegistersIN->ui32NumRegisters * sizeof(u32);
++ u32 *pui32Registers = NULL;
++#if defined(SGX_FEATURE_MP) && defined(FIX_HW_BRN_27270)
++ PVRSRV_SGXDEV_INFO *psDevInfo = NULL;
++ void * hDevCookieInt;
++ u32 ui32RegVal = 0;
++#endif
++ int ret = -EFAULT;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_PDUMP_3D_SIGNATURE_REGISTERS);
++
++ if (ui32RegisterArraySize == 0)
++ {
++ goto ExitNoError;
++ }
++
++#if defined(SGX_FEATURE_MP) && defined(FIX_HW_BRN_27270)
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psPDump3DSignatureRegistersIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PDumpTASignatureRegistersBW: hDevCookie lookup failed"));
++ goto Exit;
++ }
++
++ psDevInfo = ((PVRSRV_DEVICE_NODE *)hDevCookieInt)->pvDevice;
++
++
++ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_CORE);
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_CORE, (SGX_FEATURE_MP_CORE_COUNT - 1) << EUR_CR_MASTER_CORE_ENABLE_SHIFT);
++#if defined(PDUMP)
++ PDUMPREGWITHFLAGS(EUR_CR_MASTER_CORE, (SGX_FEATURE_MP_CORE_COUNT - 1) << EUR_CR_MASTER_CORE_ENABLE_SHIFT,
++ psPDump3DSignatureRegistersIN->bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0);
++#endif
++#endif
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32RegisterArraySize,
++ (void * *)&pui32Registers, 0,
++ "Array of Registers") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PDump3DSignatureRegistersBW: OSAllocMem failed"));
++ goto Exit;
++ }
++
++ if(CopyFromUserWrapper(psPerProc,
++ ui32BridgeID,
++ pui32Registers,
++ psPDump3DSignatureRegistersIN->pui32Registers,
++ ui32RegisterArraySize) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PDump3DSignatureRegistersBW: CopyFromUserWrapper failed"));
++ goto Exit;
++ }
++
++ PDump3DSignatureRegisters(psPDump3DSignatureRegistersIN->ui32DumpFrameNum,
++ psPDump3DSignatureRegistersIN->bLastFrame,
++ pui32Registers,
++ psPDump3DSignatureRegistersIN->ui32NumRegisters);
++
++ExitNoError:
++ psRetOUT->eError = PVRSRV_OK;
++ ret = 0;
++Exit:
++ if (pui32Registers != NULL)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32RegisterArraySize, pui32Registers, 0);
++ }
++
++#if defined(SGX_FEATURE_MP) && defined(FIX_HW_BRN_27270)
++ if (psDevInfo != NULL)
++ {
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_CORE, ui32RegVal);
++#if defined(PDUMP)
++ PDUMPREGWITHFLAGS(EUR_CR_MASTER_CORE, ui32RegVal,
++ psPDump3DSignatureRegistersIN->bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0);
++#endif
++ }
++#endif
++
++ return ret;
++}
++
++static int
++SGXPDumpCounterRegistersBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_COUNTER_REGISTERS *psPDumpCounterRegistersIN,
++ void *psBridgeOut,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ u32 ui32RegisterArraySize = psPDumpCounterRegistersIN->ui32NumRegisters * sizeof(u32);
++ u32 *pui32Registers = NULL;
++ int ret = -EFAULT;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_PDUMP_COUNTER_REGISTERS);
++
++ if (ui32RegisterArraySize == 0)
++ {
++ goto ExitNoError;
++ }
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32RegisterArraySize,
++ (void * *)&pui32Registers, 0,
++ "Array of Registers") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PDumpCounterRegistersBW: OSAllocMem failed"));
++ ret = -ENOMEM;
++ goto Exit;
++ }
++
++ if(CopyFromUserWrapper(psPerProc,
++ ui32BridgeID,
++ pui32Registers,
++ psPDumpCounterRegistersIN->pui32Registers,
++ ui32RegisterArraySize) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PDumpCounterRegistersBW: CopyFromUserWrapper failed"));
++ goto Exit;
++ }
++
++ PDumpCounterRegisters(psPDumpCounterRegistersIN->ui32DumpFrameNum,
++ psPDumpCounterRegistersIN->bLastFrame,
++ pui32Registers,
++ psPDumpCounterRegistersIN->ui32NumRegisters);
++
++ExitNoError:
++ ret = 0;
++Exit:
++ if (pui32Registers != NULL)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32RegisterArraySize, pui32Registers, 0);
++ }
++
++ return ret;
++}
++
++static int
++SGXPDumpTASignatureRegistersBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_TA_SIGNATURE_REGISTERS *psPDumpTASignatureRegistersIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ u32 ui32RegisterArraySize = psPDumpTASignatureRegistersIN->ui32NumRegisters * sizeof(u32);
++ u32 *pui32Registers = NULL;
++#if defined(SGX_FEATURE_MP) && defined(FIX_HW_BRN_27270)
++ PVRSRV_SGXDEV_INFO *psDevInfo = NULL;
++ void * hDevCookieInt;
++ u32 ui32RegVal = 0;
++#endif
++ int ret = -EFAULT;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_PDUMP_TA_SIGNATURE_REGISTERS);
++
++ if (ui32RegisterArraySize == 0)
++ {
++ goto ExitNoError;
++ }
++
++#if defined(SGX_FEATURE_MP) && defined(FIX_HW_BRN_27270)
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psPDumpTASignatureRegistersIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PDumpTASignatureRegistersBW: hDevCookie lookup failed"));
++ goto Exit;
++ }
++
++ psDevInfo = ((PVRSRV_DEVICE_NODE *)hDevCookieInt)->pvDevice;
++
++
++ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_CORE);
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_CORE, (SGX_FEATURE_MP_CORE_COUNT - 1) << EUR_CR_MASTER_CORE_ENABLE_SHIFT);
++#if defined(PDUMP)
++ PDUMPREGWITHFLAGS(EUR_CR_MASTER_CORE, (SGX_FEATURE_MP_CORE_COUNT - 1) << EUR_CR_MASTER_CORE_ENABLE_SHIFT,
++ psPDumpTASignatureRegistersIN->bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0);
++#endif
++#endif
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32RegisterArraySize,
++ (void * *)&pui32Registers, 0,
++ "Array of Registers") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PDumpTASignatureRegistersBW: OSAllocMem failed"));
++ ret = -ENOMEM;
++ goto Exit;
++ }
++
++ if(CopyFromUserWrapper(psPerProc,
++ ui32BridgeID,
++ pui32Registers,
++ psPDumpTASignatureRegistersIN->pui32Registers,
++ ui32RegisterArraySize) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PDumpTASignatureRegistersBW: CopyFromUserWrapper failed"));
++ goto Exit;
++ }
++
++ PDumpTASignatureRegisters(psPDumpTASignatureRegistersIN->ui32DumpFrameNum,
++ psPDumpTASignatureRegistersIN->ui32TAKickCount,
++ psPDumpTASignatureRegistersIN->bLastFrame,
++ pui32Registers,
++ psPDumpTASignatureRegistersIN->ui32NumRegisters);
++
++ExitNoError:
++ psRetOUT->eError = PVRSRV_OK;
++ ret = 0;
++Exit:
++ if (pui32Registers != NULL)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32RegisterArraySize, pui32Registers, 0);
++ }
++
++#if defined(SGX_FEATURE_MP) && defined(FIX_HW_BRN_27270)
++ if (psDevInfo != NULL)
++ {
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_CORE, ui32RegVal);
++#if defined(PDUMP)
++ PDUMPREGWITHFLAGS(EUR_CR_MASTER_CORE, ui32RegVal,
++ psPDumpTASignatureRegistersIN->bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0);
++#endif
++ }
++#endif
++
++ return ret;
++}
++static int
++SGXPDumpHWPerfCBBW(u32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_HWPERFCB *psPDumpHWPerfCBIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++#if defined(SUPPORT_SGX_HWPERF)
++#if defined(__linux__)
++ PVRSRV_SGXDEV_INFO *psDevInfo;
++ void * hDevCookieInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_PDUMP_HWPERFCB);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psPDumpHWPerfCBIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psDevInfo = ((PVRSRV_DEVICE_NODE *)hDevCookieInt)->pvDevice;
++
++ PDumpHWPerfCBKM(&psPDumpHWPerfCBIN->szFileName[0],
++ psPDumpHWPerfCBIN->ui32FileOffset,
++ psDevInfo->psKernelHWPerfCBMemInfo->sDevVAddr,
++ psDevInfo->psKernelHWPerfCBMemInfo->ui32AllocSize,
++ psPDumpHWPerfCBIN->ui32PDumpFlags);
++
++ return 0;
++#else
++ return 0;
++#endif
++#else
++ return -EFAULT;
++#endif
++}
++
++#endif
++
++
++void SetSGXDispatchTableEntry(void)
++{
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_GETCLIENTINFO, SGXGetClientInfoBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_RELEASECLIENTINFO, SGXReleaseClientInfoBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_GETINTERNALDEVINFO, SGXGetInternalDevInfoBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_DOKICK, SGXDoKickBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_GETPHYSPAGEADDR, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_READREGISTRYDWORD, DummyBW);
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_2DQUERYBLTSCOMPLETE, SGX2DQueryBlitsCompleteBW);
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_GETMMUPDADDR, DummyBW);
++
++#if defined(TRANSFER_QUEUE)
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_SUBMITTRANSFER, SGXSubmitTransferBW);
++#endif
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_GETMISCINFO, SGXGetMiscInfoBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGXINFO_FOR_SRVINIT , SGXGetInfoForSrvinitBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_DEVINITPART2, SGXDevInitPart2BW);
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC, SGXFindSharedPBDescBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_UNREFSHAREDPBDESC, SGXUnrefSharedPBDescBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC, SGXAddSharedPBDescBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_REGISTER_HW_RENDER_CONTEXT, SGXRegisterHWRenderContextBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_FLUSH_HW_RENDER_TARGET, SGXFlushHWRenderTargetBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_UNREGISTER_HW_RENDER_CONTEXT, SGXUnregisterHWRenderContextBW);
++#if defined(SGX_FEATURE_2D_HARDWARE)
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_SUBMIT2D, SGXSubmit2DBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_REGISTER_HW_2D_CONTEXT, SGXRegisterHW2DContextBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_UNREGISTER_HW_2D_CONTEXT, SGXUnregisterHW2DContextBW);
++#endif
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_REGISTER_HW_TRANSFER_CONTEXT, SGXRegisterHWTransferContextBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_UNREGISTER_HW_TRANSFER_CONTEXT, SGXUnregisterHWTransferContextBW);
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_SCHEDULE_PROCESS_QUEUES, SGXScheduleProcessQueuesBW);
++
++#if defined(SUPPORT_SGX_HWPERF)
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_READ_DIFF_COUNTERS, SGXReadDiffCountersBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_READ_HWPERF_CB, SGXReadHWPerfCBBW);
++#endif
++
++#if defined(PDUMP)
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_PDUMP_BUFFER_ARRAY, SGXPDumpBufferArrayBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_PDUMP_3D_SIGNATURE_REGISTERS, SGXPDump3DSignatureRegistersBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_PDUMP_COUNTER_REGISTERS, SGXPDumpCounterRegistersBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_PDUMP_TA_SIGNATURE_REGISTERS, SGXPDumpTASignatureRegistersBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_PDUMP_HWPERFCB, SGXPDumpHWPerfCBBW);
++#endif
++}
++
++#endif
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/bridged/sgx/bridged_sgx_bridge.h
+@@ -0,0 +1,42 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __BRIDGED_SGX_BRIDGE_H__
++#define __BRIDGED_SGX_BRIDGE_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++
++void SetSGXDispatchTableEntry(void);
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/common/buffer_manager.c
+@@ -0,0 +1,1803 @@
++/**********************************************************************
++ *
++ * Copyright (c) 2009-2010 Intel Corporation.
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++
++#include "sysconfig.h"
++#include "hash.h"
++#include "ra.h"
++#include "pdump_km.h"
++
++#define MIN(a,b) (a > b ? b : a)
++
++#include "lists.h"
++
++DECLARE_LIST_ANY_VA(BM_HEAP);
++DECLARE_LIST_ANY_2(BM_HEAP, PVRSRV_ERROR, PVRSRV_OK);
++DECLARE_LIST_ANY_VA_2(BM_HEAP, PVRSRV_ERROR, PVRSRV_OK);
++DECLARE_LIST_FOR_EACH_VA(BM_HEAP);
++DECLARE_LIST_INSERT(BM_HEAP);
++DECLARE_LIST_REMOVE(BM_HEAP);
++
++/* FIXME MLD */
++typedef void *mldhack;
++
++DECLARE_LIST_FOR_EACH(BM_CONTEXT);
++DECLARE_LIST_ANY_VA(BM_CONTEXT);
++DECLARE_LIST_ANY_VA_2(BM_CONTEXT, mldhack, NULL);
++DECLARE_LIST_INSERT(BM_CONTEXT);
++DECLARE_LIST_REMOVE(BM_CONTEXT);
++
++static int
++ZeroBuf(BM_BUF * pBuf, BM_MAPPING * pMapping, u32 ui32Bytes, u32 ui32Flags);
++static void BM_FreeMemory(void *pH, u32 base, BM_MAPPING * psMapping);
++static int
++BM_ImportMemory(void *pH, u32 uSize,
++ u32 * pActualSize, BM_MAPPING ** ppsMapping,
++ u32 uFlags, u32 * pBase);
++
++static int
++DevMemoryAlloc(BM_CONTEXT * pBMContext,
++ BM_MAPPING * pMapping,
++ u32 * pActualSize,
++ u32 uFlags,
++ u32 dev_vaddr_alignment, IMG_DEV_VIRTADDR * pDevVAddr);
++static void DevMemoryFree(BM_MAPPING * pMapping);
++
++static int
++AllocMemory(BM_CONTEXT * pBMContext,
++ BM_HEAP * psBMHeap,
++ IMG_DEV_VIRTADDR * psDevVAddr,
++ u32 uSize, u32 uFlags, u32 uDevVAddrAlignment, BM_BUF * pBuf)
++{
++ BM_MAPPING *pMapping;
++ u32 uOffset;
++ RA_ARENA *pArena = NULL;
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "AllocMemory (pBMContext=%08X, uSize=0x%x, uFlags=0x%x, align=0x%x, pBuf=%08X)",
++ pBMContext, uSize, uFlags, uDevVAddrAlignment, pBuf));
++
++ if (uFlags & PVRSRV_MEM_RAM_BACKED_ALLOCATION) {
++ if (uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) {
++
++ PVR_DPF((PVR_DBG_ERROR,
++ "AllocMemory: combination of DevVAddr management and RAM backing mode unsupported"));
++ return 0;
++ }
++
++ if (psBMHeap->ui32Attribs
++ & (PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG
++ | PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG)) {
++
++ pArena = psBMHeap->pImportArena;
++ } else {
++ PVR_DPF((PVR_DBG_ERROR,
++ "AllocMemory: backing store type doesn't match heap"));
++ return 0;
++ }
++
++ if (!RA_Alloc(pArena,
++ uSize,
++ NULL,
++ (void *)&pMapping,
++ uFlags,
++ uDevVAddrAlignment,
++ 0, (u32 *) & (pBuf->DevVAddr.uiAddr))) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "AllocMemory: RA_Alloc(0x%x) FAILED", uSize));
++ return 0;
++ }
++
++ uOffset = pBuf->DevVAddr.uiAddr - pMapping->DevVAddr.uiAddr;
++ if (pMapping->CpuVAddr) {
++ pBuf->CpuVAddr =
++ (void *)((u32) pMapping->CpuVAddr + uOffset);
++ } else {
++ pBuf->CpuVAddr = NULL;
++ }
++
++ if (uSize == pMapping->uSize) {
++ pBuf->hOSMemHandle = pMapping->hOSMemHandle;
++ } else {
++ if (OSGetSubMemHandle(pMapping->hOSMemHandle,
++ uOffset,
++ uSize,
++ psBMHeap->ui32Attribs,
++ &pBuf->hOSMemHandle) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "AllocMemory: OSGetSubMemHandle FAILED"));
++ return 0;
++ }
++ }
++
++ pBuf->CpuPAddr.uiAddr = pMapping->CpuPAddr.uiAddr + uOffset;
++
++ if (uFlags & PVRSRV_MEM_ZERO) {
++ if (!ZeroBuf
++ (pBuf, pMapping, uSize,
++ psBMHeap->ui32Attribs | uFlags)) {
++ return 0;
++ }
++ }
++ } else {
++ if (uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) {
++
++ PVR_ASSERT(psDevVAddr != NULL);
++
++ if (psDevVAddr == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "AllocMemory: invalid parameter - psDevVAddr"));
++ return 0;
++ }
++
++ pBMContext->psDeviceNode->pfnMMUAlloc(psBMHeap->
++ pMMUHeap, uSize,
++ NULL,
++ PVRSRV_MEM_USER_SUPPLIED_DEVVADDR,
++ uDevVAddrAlignment,
++ psDevVAddr);
++
++ pBuf->DevVAddr = *psDevVAddr;
++ } else {
++
++ pBMContext->psDeviceNode->pfnMMUAlloc(psBMHeap->
++ pMMUHeap, uSize,
++ NULL, 0,
++ uDevVAddrAlignment,
++ &pBuf->DevVAddr);
++ }
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(struct _BM_MAPPING_),
++ (void **)&pMapping, NULL,
++ "Buffer Manager Mapping") != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "AllocMemory: OSAllocMem(0x%x) FAILED"));
++ return 0;
++ }
++
++ pBuf->CpuVAddr = NULL;
++ pBuf->hOSMemHandle = 0;
++ pBuf->CpuPAddr.uiAddr = 0;
++
++ pMapping->CpuVAddr = NULL;
++ pMapping->CpuPAddr.uiAddr = 0;
++ pMapping->DevVAddr = pBuf->DevVAddr;
++ pMapping->psSysAddr = NULL;
++ pMapping->uSize = uSize;
++ pMapping->hOSMemHandle = 0;
++ }
++
++ pMapping->pArena = pArena;
++
++ pMapping->pBMHeap = psBMHeap;
++ pBuf->pMapping = pMapping;
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "AllocMemory: pMapping=%08X: DevV=%08X CpuV=%08X CpuP=%08X uSize=0x%x",
++ pMapping,
++ pMapping->DevVAddr.uiAddr,
++ pMapping->CpuVAddr,
++ pMapping->CpuPAddr.uiAddr, pMapping->uSize));
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "AllocMemory: pBuf=%08X: DevV=%08X CpuV=%08X CpuP=%08X uSize=0x%x",
++ pBuf,
++ pBuf->DevVAddr.uiAddr,
++ pBuf->CpuVAddr, pBuf->CpuPAddr.uiAddr, uSize));
++
++ PVR_ASSERT(((pBuf->DevVAddr.uiAddr) & (uDevVAddrAlignment - 1)) == 0);
++
++ return 1;
++}
++
++static int
++WrapMemory(BM_HEAP * psBMHeap,
++ u32 uSize,
++ u32 ui32BaseOffset,
++ int bPhysContig,
++ IMG_SYS_PHYADDR * psAddr,
++ void *pvCPUVAddr, u32 uFlags, BM_BUF * pBuf)
++{
++ IMG_DEV_VIRTADDR DevVAddr = { 0 };
++ BM_MAPPING *pMapping;
++ int bResult;
++ u32 const ui32PageSize = HOST_PAGESIZE();
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "WrapMemory(psBMHeap=%08X, size=0x%x, offset=0x%x, bPhysContig=0x%x, pvCPUVAddr = 0x%x, flags=0x%x, pBuf=%08X)",
++ psBMHeap, uSize, ui32BaseOffset, bPhysContig, pvCPUVAddr,
++ uFlags, pBuf));
++
++ PVR_ASSERT((psAddr->uiAddr & (ui32PageSize - 1)) == 0);
++
++ PVR_ASSERT(((u32) pvCPUVAddr & (ui32PageSize - 1)) == 0);
++
++ uSize += ui32BaseOffset;
++ uSize = HOST_PAGEALIGN(uSize);
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(*pMapping),
++ (void **)&pMapping, NULL,
++ "Mocked-up mapping") != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSAllocMem(0x%x) FAILED",
++ sizeof(*pMapping)));
++ return 0;
++ }
++
++ memset(pMapping, 0, sizeof(*pMapping));
++
++ pMapping->uSize = uSize;
++ pMapping->pBMHeap = psBMHeap;
++
++ if (pvCPUVAddr) {
++ pMapping->CpuVAddr = pvCPUVAddr;
++
++ if (bPhysContig) {
++ pMapping->eCpuMemoryOrigin = hm_wrapped_virtaddr;
++ pMapping->CpuPAddr = SysSysPAddrToCpuPAddr(psAddr[0]);
++
++ if (OSRegisterMem(pMapping->CpuPAddr,
++ pMapping->CpuVAddr,
++ pMapping->uSize,
++ uFlags,
++ &pMapping->hOSMemHandle) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "WrapMemory: OSRegisterMem Phys=0x%08X, CpuVAddr = 0x%08X, Size=%d) failed",
++ pMapping->CpuPAddr, pMapping->CpuVAddr,
++ pMapping->uSize));
++ goto fail_cleanup;
++ }
++ } else {
++ pMapping->eCpuMemoryOrigin =
++ hm_wrapped_scatter_virtaddr;
++ pMapping->psSysAddr = psAddr;
++
++ if (OSRegisterDiscontigMem(pMapping->psSysAddr,
++ pMapping->CpuVAddr,
++ pMapping->uSize,
++ uFlags,
++ &pMapping->hOSMemHandle) !=
++ PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "WrapMemory: OSRegisterDiscontigMem CpuVAddr = 0x%08X, Size=%d) failed",
++ pMapping->CpuVAddr, pMapping->uSize));
++ goto fail_cleanup;
++ }
++ }
++ } else {
++ if (bPhysContig) {
++ pMapping->eCpuMemoryOrigin = hm_wrapped;
++ pMapping->CpuPAddr = SysSysPAddrToCpuPAddr(psAddr[0]);
++
++ if (OSReservePhys(pMapping->CpuPAddr,
++ pMapping->uSize,
++ uFlags,
++ &pMapping->CpuVAddr,
++ &pMapping->hOSMemHandle) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "WrapMemory: OSReservePhys Phys=0x%08X, Size=%d) failed",
++ pMapping->CpuPAddr, pMapping->uSize));
++ goto fail_cleanup;
++ }
++ } else {
++ pMapping->eCpuMemoryOrigin = hm_wrapped_scatter;
++ pMapping->psSysAddr = psAddr;
++
++ if (OSReserveDiscontigPhys(pMapping->psSysAddr,
++ pMapping->uSize,
++ uFlags,
++ &pMapping->CpuVAddr,
++ &pMapping->hOSMemHandle) !=
++ PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "WrapMemory: OSReserveDiscontigPhys Size=%d) failed",
++ pMapping->uSize));
++ goto fail_cleanup;
++ }
++ }
++ }
++
++ bResult = DevMemoryAlloc(psBMHeap->pBMContext,
++ pMapping,
++ NULL,
++ uFlags | PVRSRV_MEM_READ | PVRSRV_MEM_WRITE,
++ IMG_CAST_TO_DEVVADDR_UINT(ui32PageSize),
++ &DevVAddr);
++ if (!bResult) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "WrapMemory: DevMemoryAlloc(0x%x) failed",
++ pMapping->uSize));
++ goto fail_cleanup;
++ }
++
++ pBuf->CpuPAddr.uiAddr = pMapping->CpuPAddr.uiAddr + ui32BaseOffset;
++ if (!ui32BaseOffset) {
++ pBuf->hOSMemHandle = pMapping->hOSMemHandle;
++ } else {
++ if (OSGetSubMemHandle(pMapping->hOSMemHandle,
++ ui32BaseOffset,
++ (pMapping->uSize - ui32BaseOffset),
++ uFlags,
++ &pBuf->hOSMemHandle) != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "WrapMemory: OSGetSubMemHandle failed"));
++ goto fail_cleanup;
++ }
++ }
++ if (pMapping->CpuVAddr) {
++ pBuf->CpuVAddr =
++ (void *)((u32) pMapping->CpuVAddr + ui32BaseOffset);
++ }
++ pBuf->DevVAddr.uiAddr =
++ pMapping->DevVAddr.uiAddr +
++ IMG_CAST_TO_DEVVADDR_UINT(ui32BaseOffset);
++
++ if (uFlags & PVRSRV_MEM_ZERO) {
++ if (!ZeroBuf(pBuf, pMapping, uSize, uFlags)) {
++ return 0;
++ }
++ }
++
++ PVR_DPF((PVR_DBG_MESSAGE, "DevVaddr.uiAddr=%08X", DevVAddr.uiAddr));
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "WrapMemory: pMapping=%08X: DevV=%08X CpuV=%08X CpuP=%08X uSize=0x%x",
++ pMapping, pMapping->DevVAddr.uiAddr,
++ pMapping->CpuVAddr, pMapping->CpuPAddr.uiAddr,
++ pMapping->uSize));
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "WrapMemory: pBuf=%08X: DevV=%08X CpuV=%08X CpuP=%08X uSize=0x%x",
++ pBuf, pBuf->DevVAddr.uiAddr, pBuf->CpuVAddr,
++ pBuf->CpuPAddr.uiAddr, uSize));
++
++ pBuf->pMapping = pMapping;
++ return 1;
++
++fail_cleanup:
++ if (ui32BaseOffset && pBuf->hOSMemHandle) {
++ OSReleaseSubMemHandle(pBuf->hOSMemHandle, uFlags);
++ }
++
++ if (pMapping && (pMapping->CpuVAddr || pMapping->hOSMemHandle)) {
++ switch (pMapping->eCpuMemoryOrigin) {
++ case hm_wrapped:
++ OSUnReservePhys(pMapping->CpuVAddr, pMapping->uSize,
++ uFlags, pMapping->hOSMemHandle);
++ break;
++ case hm_wrapped_virtaddr:
++ OSUnRegisterMem(pMapping->CpuVAddr, pMapping->uSize,
++ uFlags, pMapping->hOSMemHandle);
++ break;
++ case hm_wrapped_scatter:
++ OSUnReserveDiscontigPhys(pMapping->CpuVAddr,
++ pMapping->uSize, uFlags,
++ pMapping->hOSMemHandle);
++ break;
++ case hm_wrapped_scatter_virtaddr:
++ OSUnRegisterDiscontigMem(pMapping->CpuVAddr,
++ pMapping->uSize, uFlags,
++ pMapping->hOSMemHandle);
++ break;
++ default:
++ break;
++ }
++
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), pMapping, NULL);
++
++ return 0;
++}
++
++static int
++ZeroBuf(BM_BUF * pBuf, BM_MAPPING * pMapping, u32 ui32Bytes, u32 ui32Flags)
++{
++ void *pvCpuVAddr;
++
++ if (pBuf->CpuVAddr) {
++ memset(pBuf->CpuVAddr, 0, ui32Bytes);
++ } else if (pMapping->eCpuMemoryOrigin == hm_contiguous
++ || pMapping->eCpuMemoryOrigin == hm_wrapped) {
++ pvCpuVAddr = OSMapPhysToLin(pBuf->CpuPAddr,
++ ui32Bytes,
++ PVRSRV_HAP_KERNEL_ONLY
++ | (ui32Flags &
++ PVRSRV_HAP_CACHETYPE_MASK),
++ NULL);
++ if (!pvCpuVAddr) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "ZeroBuf: OSMapPhysToLin for contiguous buffer failed"));
++ return 0;
++ }
++ memset(pvCpuVAddr, 0, ui32Bytes);
++ OSUnMapPhysToLin(pvCpuVAddr,
++ ui32Bytes,
++ PVRSRV_HAP_KERNEL_ONLY
++ | (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK),
++ NULL);
++ } else {
++ u32 ui32BytesRemaining = ui32Bytes;
++ u32 ui32CurrentOffset = 0;
++ IMG_CPU_PHYADDR CpuPAddr;
++
++ PVR_ASSERT(pBuf->hOSMemHandle);
++
++ while (ui32BytesRemaining > 0) {
++ u32 ui32BlockBytes =
++ MIN(ui32BytesRemaining, HOST_PAGESIZE());
++ CpuPAddr =
++ OSMemHandleToCpuPAddr(pBuf->hOSMemHandle,
++ ui32CurrentOffset);
++
++ if (CpuPAddr.uiAddr & (HOST_PAGESIZE() - 1)) {
++ ui32BlockBytes =
++ MIN(ui32BytesRemaining,
++ HOST_PAGEALIGN(CpuPAddr.uiAddr) -
++ CpuPAddr.uiAddr);
++ }
++
++ pvCpuVAddr = OSMapPhysToLin(CpuPAddr,
++ ui32BlockBytes,
++ PVRSRV_HAP_KERNEL_ONLY
++ | (ui32Flags &
++ PVRSRV_HAP_CACHETYPE_MASK),
++ NULL);
++ if (!pvCpuVAddr) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "ZeroBuf: OSMapPhysToLin while zeroing non-contiguous memory FAILED"));
++ return 0;
++ }
++ memset(pvCpuVAddr, 0, ui32BlockBytes);
++ OSUnMapPhysToLin(pvCpuVAddr,
++ ui32BlockBytes,
++ PVRSRV_HAP_KERNEL_ONLY
++ | (ui32Flags &
++ PVRSRV_HAP_CACHETYPE_MASK), NULL);
++
++ ui32BytesRemaining -= ui32BlockBytes;
++ ui32CurrentOffset += ui32BlockBytes;
++ }
++ }
++
++ return 1;
++}
++
++static void FreeBuf(BM_BUF * pBuf, u32 ui32Flags)
++{
++ BM_MAPPING *pMapping;
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "FreeBuf: pBuf=%08X: DevVAddr=%08X CpuVAddr=%08X CpuPAddr=%08X",
++ pBuf, pBuf->DevVAddr.uiAddr, pBuf->CpuVAddr,
++ pBuf->CpuPAddr.uiAddr));
++
++ pMapping = pBuf->pMapping;
++
++ if (ui32Flags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) {
++
++ if (ui32Flags & PVRSRV_MEM_RAM_BACKED_ALLOCATION) {
++
++ PVR_DPF((PVR_DBG_ERROR,
++ "FreeBuf: combination of DevVAddr management and RAM backing mode unsupported"));
++ } else {
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING),
++ pMapping, NULL);
++ pBuf->pMapping = NULL;
++ }
++ } else {
++
++ if (pBuf->hOSMemHandle != pMapping->hOSMemHandle) {
++ OSReleaseSubMemHandle(pBuf->hOSMemHandle, ui32Flags);
++ }
++ if (ui32Flags & PVRSRV_MEM_RAM_BACKED_ALLOCATION) {
++
++ RA_Free(pBuf->pMapping->pArena, pBuf->DevVAddr.uiAddr,
++ 0);
++ } else {
++ switch (pMapping->eCpuMemoryOrigin) {
++ case hm_wrapped:
++ OSUnReservePhys(pMapping->CpuVAddr,
++ pMapping->uSize, ui32Flags,
++ pMapping->hOSMemHandle);
++ break;
++ case hm_wrapped_virtaddr:
++ OSUnRegisterMem(pMapping->CpuVAddr,
++ pMapping->uSize, ui32Flags,
++ pMapping->hOSMemHandle);
++ break;
++ case hm_wrapped_scatter:
++ OSUnReserveDiscontigPhys(pMapping->CpuVAddr,
++ pMapping->uSize,
++ ui32Flags,
++ pMapping->
++ hOSMemHandle);
++ break;
++ case hm_wrapped_scatter_virtaddr:
++ OSUnRegisterDiscontigMem(pMapping->CpuVAddr,
++ pMapping->uSize,
++ ui32Flags,
++ pMapping->
++ hOSMemHandle);
++ break;
++ default:
++ break;
++ }
++
++ DevMemoryFree(pMapping);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING),
++ pMapping, NULL);
++ pBuf->pMapping = NULL;
++ }
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_BUF), pBuf, NULL);
++
++}
++
++PVRSRV_ERROR BM_DestroyContext_AnyCb(BM_HEAP * psBMHeap)
++{
++ if (psBMHeap->ui32Attribs
++ & (PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG
++ | PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG)) {
++ if (psBMHeap->pImportArena) {
++ int bTestDelete = RA_TestDelete(psBMHeap->pImportArena);
++ if (!bTestDelete) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "BM_DestroyContext_AnyCb: RA_TestDelete failed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ }
++ }
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR BM_DestroyContext(void *hBMContext, int *pbDestroyed)
++{
++ PVRSRV_ERROR eError;
++ BM_CONTEXT *pBMContext = (BM_CONTEXT *) hBMContext;
++
++ PVR_DPF((PVR_DBG_MESSAGE, "BM_DestroyContext"));
++
++ if (pbDestroyed != NULL) {
++ *pbDestroyed = 0;
++ }
++
++ if (pBMContext == NULL) {
++ PVR_DPF((PVR_DBG_ERROR, "BM_DestroyContext: Invalid handle"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ pBMContext->ui32RefCount--;
++
++ if (pBMContext->ui32RefCount > 0) {
++
++ return PVRSRV_OK;
++ }
++
++ eError =
++ List_BM_HEAP_PVRSRV_ERROR_Any(pBMContext->psBMHeap,
++ BM_DestroyContext_AnyCb);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "BM_DestroyContext: List_BM_HEAP_PVRSRV_ERROR_Any failed"));
++#if 0
++
++ PVR_DPF((PVR_DBG_ERROR,
++ "BM_DestroyContext: Cleaning up with ResManFreeSpecial"));
++ if (ResManFreeSpecial() != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "BM_DestroyContext: ResManFreeSpecial failed %d",
++ eError));
++ }
++#endif
++ return eError;
++ } else {
++
++ eError = ResManFreeResByPtr(pBMContext->hResItem);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "BM_DestroyContext: ResManFreeResByPtr failed %d",
++ eError));
++ return eError;
++ }
++
++ if (pbDestroyed != NULL) {
++ *pbDestroyed = 1;
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR BM_DestroyContextCallBack_AnyVaCb(BM_HEAP * psBMHeap, va_list va)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ psDeviceNode = va_arg(va, PVRSRV_DEVICE_NODE *);
++
++ if (psBMHeap->ui32Attribs
++ & (PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG
++ | PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG)) {
++ if (psBMHeap->pImportArena) {
++ RA_Delete(psBMHeap->pImportArena);
++ }
++ } else {
++ PVR_DPF((PVR_DBG_ERROR,
++ "BM_DestroyContext: backing store type unsupported"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ psDeviceNode->pfnMMUDelete(psBMHeap->pMMUHeap);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_HEAP), psBMHeap, NULL);
++
++ return PVRSRV_OK;
++}
++
++static PVRSRV_ERROR BM_DestroyContextCallBack(void *pvParam, u32 ui32Param)
++{
++ BM_CONTEXT *pBMContext = pvParam;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++
++ psDeviceNode = pBMContext->psDeviceNode;
++
++ if (List_BM_HEAP_PVRSRV_ERROR_Any_va(pBMContext->psBMHeap,
++ BM_DestroyContextCallBack_AnyVaCb,
++ psDeviceNode) != PVRSRV_OK) {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if (pBMContext->psMMUContext) {
++ psDeviceNode->pfnMMUFinalise(pBMContext->psMMUContext);
++ }
++
++ if (pBMContext->pBufferHash) {
++ HASH_Delete(pBMContext->pBufferHash);
++ }
++
++ if (pBMContext == psDeviceNode->sDevMemoryInfo.pBMKernelContext) {
++
++ psDeviceNode->sDevMemoryInfo.pBMKernelContext = NULL;
++ } else {
++
++ List_BM_CONTEXT_Remove(pBMContext);
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_CONTEXT), pBMContext,
++ NULL);
++
++ return PVRSRV_OK;
++}
++
++void *BM_CreateContext_IncRefCount_AnyVaCb(BM_CONTEXT * pBMContext, va_list va)
++{
++ PRESMAN_CONTEXT hResManContext;
++ hResManContext = va_arg(va, PRESMAN_CONTEXT);
++ if (ResManFindResourceByPtr(hResManContext, pBMContext->hResItem) ==
++ PVRSRV_OK) {
++
++ pBMContext->ui32RefCount++;
++ return pBMContext;
++ }
++ return NULL;
++}
++
++void BM_CreateContext_InsertHeap_ForEachVaCb(BM_HEAP * psBMHeap, va_list va)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ BM_CONTEXT *pBMContext;
++ psDeviceNode = va_arg(va, PVRSRV_DEVICE_NODE *);
++ pBMContext = va_arg(va, BM_CONTEXT *);
++ switch (psBMHeap->sDevArena.DevMemHeapType) {
++ case DEVICE_MEMORY_HEAP_SHARED:
++ case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
++ {
++
++ psDeviceNode->pfnMMUInsertHeap(pBMContext->psMMUContext,
++ psBMHeap->pMMUHeap);
++ break;
++ }
++ }
++}
++
++void *BM_CreateContext(PVRSRV_DEVICE_NODE * psDeviceNode,
++ IMG_DEV_PHYADDR * psPDDevPAddr,
++ PVRSRV_PER_PROCESS_DATA * psPerProc, int *pbCreated)
++{
++ BM_CONTEXT *pBMContext;
++ DEVICE_MEMORY_INFO *psDevMemoryInfo;
++ int bKernelContext;
++ PRESMAN_CONTEXT hResManContext;
++
++ PVR_DPF((PVR_DBG_MESSAGE, "BM_CreateContext"));
++
++ if (psPerProc == NULL) {
++ bKernelContext = 1;
++ hResManContext = psDeviceNode->hResManContext;
++ } else {
++ bKernelContext = 0;
++ hResManContext = psPerProc->hResManContext;
++ }
++
++ if (pbCreated != NULL) {
++ *pbCreated = 0;
++ }
++
++ psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
++
++ if (bKernelContext == 0) {
++ void *res =
++ (void *)List_BM_CONTEXT_Any_va(psDevMemoryInfo->pBMContext,
++ BM_CreateContext_IncRefCount_AnyVaCb,
++ hResManContext);
++ if (res) {
++ return res;
++ }
++ }
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(struct _BM_CONTEXT_),
++ (void **)&pBMContext, NULL,
++ "Buffer Manager Context") != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR, "BM_CreateContext: Alloc failed"));
++ return NULL;
++ }
++ memset(pBMContext, 0, sizeof(BM_CONTEXT));
++
++ pBMContext->psDeviceNode = psDeviceNode;
++
++ pBMContext->pBufferHash = HASH_Create(32);
++ if (pBMContext->pBufferHash == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "BM_CreateContext: HASH_Create failed"));
++ goto cleanup;
++ }
++
++ if (psDeviceNode->pfnMMUInitialise(psDeviceNode,
++ &pBMContext->psMMUContext,
++ psPDDevPAddr) != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "BM_CreateContext: MMUInitialise failed"));
++ goto cleanup;
++ }
++
++ if (bKernelContext) {
++
++ PVR_ASSERT(psDevMemoryInfo->pBMKernelContext == NULL);
++ psDevMemoryInfo->pBMKernelContext = pBMContext;
++ } else {
++
++ PVR_ASSERT(psDevMemoryInfo->pBMKernelContext);
++
++ if (psDevMemoryInfo->pBMKernelContext == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "BM_CreateContext: psDevMemoryInfo->pBMKernelContext invalid"));
++ goto cleanup;
++ }
++
++ PVR_ASSERT(psDevMemoryInfo->pBMKernelContext->psBMHeap);
++
++ pBMContext->psBMSharedHeap =
++ psDevMemoryInfo->pBMKernelContext->psBMHeap;
++
++ List_BM_HEAP_ForEach_va(pBMContext->psBMSharedHeap,
++ BM_CreateContext_InsertHeap_ForEachVaCb,
++ psDeviceNode, pBMContext);
++
++ List_BM_CONTEXT_Insert(&psDevMemoryInfo->pBMContext,
++ pBMContext);
++ }
++
++ pBMContext->ui32RefCount++;
++
++ pBMContext->hResItem = ResManRegisterRes(hResManContext,
++ RESMAN_TYPE_DEVICEMEM_CONTEXT,
++ pBMContext,
++ 0, BM_DestroyContextCallBack);
++ if (pBMContext->hResItem == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "BM_CreateContext: ResManRegisterRes failed"));
++ goto cleanup;
++ }
++
++ if (pbCreated != NULL) {
++ *pbCreated = 1;
++ }
++ return (void *)pBMContext;
++
++cleanup:
++ (void)BM_DestroyContextCallBack(pBMContext, 0);
++
++ return NULL;
++}
++
++void *BM_CreateHeap_AnyVaCb(BM_HEAP * psBMHeap, va_list va)
++{
++ DEVICE_MEMORY_HEAP_INFO *psDevMemHeapInfo;
++ psDevMemHeapInfo = va_arg(va, DEVICE_MEMORY_HEAP_INFO *);
++ if (psBMHeap->sDevArena.ui32HeapID == psDevMemHeapInfo->ui32HeapID) {
++
++ return psBMHeap;
++ } else {
++ return NULL;
++ }
++}
++
++void *BM_CreateHeap(void *hBMContext,
++ DEVICE_MEMORY_HEAP_INFO * psDevMemHeapInfo)
++{
++ BM_CONTEXT *pBMContext = (BM_CONTEXT *) hBMContext;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ BM_HEAP *psBMHeap;
++
++ PVR_DPF((PVR_DBG_MESSAGE, "BM_CreateHeap"));
++
++ if (!pBMContext) {
++ return NULL;
++ }
++
++ psDeviceNode = pBMContext->psDeviceNode;
++
++ if (pBMContext->ui32RefCount > 0) {
++ psBMHeap = (BM_HEAP *) List_BM_HEAP_Any_va(pBMContext->psBMHeap,
++ BM_CreateHeap_AnyVaCb,
++ psDevMemHeapInfo);
++
++ if (psBMHeap) {
++ return psBMHeap;
++ }
++ }
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(BM_HEAP),
++ (void **)&psBMHeap, NULL,
++ "Buffer Manager Heap") != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR, "BM_CreateHeap: Alloc failed"));
++ return NULL;
++ }
++
++ memset(psBMHeap, 0, sizeof(BM_HEAP));
++
++ psBMHeap->sDevArena.ui32HeapID = psDevMemHeapInfo->ui32HeapID;
++ psBMHeap->sDevArena.pszName = psDevMemHeapInfo->pszName;
++ psBMHeap->sDevArena.BaseDevVAddr = psDevMemHeapInfo->sDevVAddrBase;
++ psBMHeap->sDevArena.ui32Size = psDevMemHeapInfo->ui32HeapSize;
++ psBMHeap->sDevArena.DevMemHeapType = psDevMemHeapInfo->DevMemHeapType;
++ psBMHeap->sDevArena.ui32DataPageSize =
++ psDevMemHeapInfo->ui32DataPageSize;
++ psBMHeap->sDevArena.psDeviceMemoryHeapInfo = psDevMemHeapInfo;
++ psBMHeap->ui32Attribs = psDevMemHeapInfo->ui32Attribs;
++
++ psBMHeap->pBMContext = pBMContext;
++
++ psBMHeap->pMMUHeap =
++ psDeviceNode->pfnMMUCreate(pBMContext->psMMUContext,
++ &psBMHeap->sDevArena,
++ &psBMHeap->pVMArena);
++ if (!psBMHeap->pMMUHeap) {
++ PVR_DPF((PVR_DBG_ERROR, "BM_CreateHeap: MMUCreate failed"));
++ goto ErrorExit;
++ }
++
++ psBMHeap->pImportArena = RA_Create(psDevMemHeapInfo->pszBSName,
++ 0, 0, NULL,
++ psBMHeap->sDevArena.ui32DataPageSize,
++ BM_ImportMemory,
++ BM_FreeMemory, NULL, psBMHeap);
++ if (psBMHeap->pImportArena == NULL) {
++ PVR_DPF((PVR_DBG_ERROR, "BM_CreateHeap: RA_Create failed"));
++ goto ErrorExit;
++ }
++
++ if (psBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG) {
++
++ psBMHeap->pLocalDevMemArena =
++ psDevMemHeapInfo->psLocalDevMemArena;
++ if (psBMHeap->pLocalDevMemArena == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "BM_CreateHeap: LocalDevMemArena null"));
++ goto ErrorExit;
++ }
++ }
++
++ List_BM_HEAP_Insert(&pBMContext->psBMHeap, psBMHeap);
++
++ return (void *)psBMHeap;
++
++ErrorExit:
++
++ if (psBMHeap->pMMUHeap != NULL) {
++ psDeviceNode->pfnMMUDelete(psBMHeap->pMMUHeap);
++ psDeviceNode->pfnMMUFinalise(pBMContext->psMMUContext);
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_HEAP), psBMHeap, NULL);
++
++ return NULL;
++}
++
++void BM_DestroyHeap(void *hDevMemHeap)
++{
++ BM_HEAP *psBMHeap = (BM_HEAP *) hDevMemHeap;
++ PVRSRV_DEVICE_NODE *psDeviceNode = psBMHeap->pBMContext->psDeviceNode;
++
++ PVR_DPF((PVR_DBG_MESSAGE, "BM_DestroyHeap"));
++
++ if (psBMHeap) {
++
++ if (psBMHeap->ui32Attribs
++ & (PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG
++ | PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG)) {
++ if (psBMHeap->pImportArena) {
++ RA_Delete(psBMHeap->pImportArena);
++ }
++ } else {
++ PVR_DPF((PVR_DBG_ERROR,
++ "BM_DestroyHeap: backing store type unsupported"));
++ return;
++ }
++
++ psDeviceNode->pfnMMUDelete(psBMHeap->pMMUHeap);
++
++ List_BM_HEAP_Remove(psBMHeap);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_HEAP), psBMHeap,
++ NULL);
++
++ } else {
++ PVR_DPF((PVR_DBG_ERROR, "BM_DestroyHeap: invalid heap handle"));
++ }
++}
++
++int BM_Reinitialise(PVRSRV_DEVICE_NODE * psDeviceNode)
++{
++
++ PVR_DPF((PVR_DBG_MESSAGE, "BM_Reinitialise"));
++
++ return 1;
++}
++
++int
++BM_Alloc(void *hDevMemHeap,
++ IMG_DEV_VIRTADDR * psDevVAddr,
++ u32 uSize, u32 * pui32Flags, u32 uDevVAddrAlignment, BM_HANDLE * phBuf)
++{
++ BM_BUF *pBuf;
++ BM_CONTEXT *pBMContext;
++ BM_HEAP *psBMHeap;
++ SYS_DATA *psSysData;
++ u32 uFlags;
++
++ if (pui32Flags == NULL) {
++ PVR_DPF((PVR_DBG_ERROR, "BM_Alloc: invalid parameter"));
++ PVR_DBG_BREAK;
++ return 0;
++ }
++
++ uFlags = *pui32Flags;
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "BM_Alloc (uSize=0x%x, uFlags=0x%x, uDevVAddrAlignment=0x%x)",
++ uSize, uFlags, uDevVAddrAlignment));
++
++ SysAcquireData(&psSysData);
++
++ psBMHeap = (BM_HEAP *) hDevMemHeap;
++ pBMContext = psBMHeap->pBMContext;
++
++ if (uDevVAddrAlignment == 0) {
++ uDevVAddrAlignment = 1;
++ }
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(BM_BUF),
++ (void **)&pBuf, NULL,
++ "Buffer Manager buffer") != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR, "BM_Alloc: BM_Buf alloc FAILED"));
++ return 0;
++ }
++ memset(pBuf, 0, sizeof(BM_BUF));
++
++ if (AllocMemory(pBMContext,
++ psBMHeap,
++ psDevVAddr,
++ uSize, uFlags, uDevVAddrAlignment, pBuf) != 1) {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_BUF), pBuf, NULL);
++
++ PVR_DPF((PVR_DBG_ERROR, "BM_Alloc: AllocMemory FAILED"));
++ return 0;
++ }
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "BM_Alloc (uSize=0x%x, uFlags=0x%x)=%08X",
++ uSize, uFlags, pBuf));
++
++ pBuf->ui32RefCount = 1;
++ *phBuf = (BM_HANDLE) pBuf;
++#if defined(INTEL_D3_P_CHANGES)
++ if (pui32Flags) {
++#endif
++ *pui32Flags = uFlags | psBMHeap->ui32Attribs;
++
++ if (uFlags & PVRSRV_HAP_CACHETYPE_MASK) {
++ *pui32Flags &= ~PVRSRV_HAP_CACHETYPE_MASK;
++ *pui32Flags |= (uFlags & PVRSRV_HAP_CACHETYPE_MASK);
++ }
++#if defined(INTEL_D3_P_CHANGES)
++ }
++#endif
++
++ return 1;
++}
++
++#if defined(PVR_LMA)
++static int
++ValidSysPAddrArrayForDev(PVRSRV_DEVICE_NODE * psDeviceNode,
++ IMG_SYS_PHYADDR * psSysPAddr, u32 ui32PageCount,
++ u32 ui32PageSize)
++{
++ u32 i;
++
++ for (i = 0; i < ui32PageCount; i++) {
++ IMG_SYS_PHYADDR sStartSysPAddr = psSysPAddr[i];
++ IMG_SYS_PHYADDR sEndSysPAddr;
++
++ if (!SysVerifySysPAddrToDevPAddr
++ (psDeviceNode->sDevId.eDeviceType, sStartSysPAddr)) {
++ return 0;
++ }
++
++ sEndSysPAddr.uiAddr = sStartSysPAddr.uiAddr + ui32PageSize;
++
++ if (!SysVerifySysPAddrToDevPAddr
++ (psDeviceNode->sDevId.eDeviceType, sEndSysPAddr)) {
++ return 0;
++ }
++ }
++
++ return 1;
++}
++
++static int
++ValidSysPAddrRangeForDev(PVRSRV_DEVICE_NODE * psDeviceNode,
++ IMG_SYS_PHYADDR sStartSysPAddr, u32 ui32Range)
++{
++ IMG_SYS_PHYADDR sEndSysPAddr;
++
++ if (!SysVerifySysPAddrToDevPAddr
++ (psDeviceNode->sDevId.eDeviceType, sStartSysPAddr)) {
++ return 0;
++ }
++
++ sEndSysPAddr.uiAddr = sStartSysPAddr.uiAddr + ui32Range;
++
++ if (!SysVerifySysPAddrToDevPAddr
++ (psDeviceNode->sDevId.eDeviceType, sEndSysPAddr)) {
++ return 0;
++ }
++
++ return 1;
++}
++
++#define WRAP_MAPPING_SIZE(ui32ByteSize, ui32PageOffset) HOST_PAGEALIGN((ui32ByteSize) + (ui32PageOffset))
++
++#define WRAP_PAGE_COUNT(ui32ByteSize, ui32PageOffset, ui32HostPageSize) (WRAP_MAPPING_SIZE(ui32ByteSize, ui32PageOffset) / (ui32HostPageSize))
++
++#endif
++
++int
++BM_Wrap(void *hDevMemHeap,
++ u32 ui32Size,
++ u32 ui32Offset,
++ int bPhysContig,
++ IMG_SYS_PHYADDR * psSysAddr,
++ void *pvCPUVAddr, u32 * pui32Flags, BM_HANDLE * phBuf)
++{
++ BM_BUF *pBuf;
++ BM_CONTEXT *psBMContext;
++ BM_HEAP *psBMHeap;
++ SYS_DATA *psSysData;
++ IMG_SYS_PHYADDR sHashAddress;
++ u32 uFlags;
++
++ psBMHeap = (BM_HEAP *) hDevMemHeap;
++ psBMContext = psBMHeap->pBMContext;
++
++ uFlags =
++ psBMHeap->
++ ui32Attribs & (PVRSRV_HAP_CACHETYPE_MASK | PVRSRV_HAP_MAPTYPE_MASK);
++
++ if ((pui32Flags != NULL)
++ && ((*pui32Flags & PVRSRV_HAP_CACHETYPE_MASK) != 0)) {
++ uFlags &= ~PVRSRV_HAP_CACHETYPE_MASK;
++ uFlags |= *pui32Flags & PVRSRV_HAP_CACHETYPE_MASK;
++ }
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "BM_Wrap (uSize=0x%x, uOffset=0x%x, bPhysContig=0x%x, pvCPUVAddr=0x%x, uFlags=0x%x)",
++ ui32Size, ui32Offset, bPhysContig, pvCPUVAddr, uFlags));
++
++ SysAcquireData(&psSysData);
++
++#if defined(PVR_LMA)
++ if (bPhysContig) {
++ if (!ValidSysPAddrRangeForDev
++ (psBMContext->psDeviceNode, *psSysAddr,
++ WRAP_MAPPING_SIZE(ui32Size, ui32Offset))) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "BM_Wrap: System address range invalid for device"));
++ return 0;
++ }
++ } else {
++ u32 ui32HostPageSize = HOST_PAGESIZE();
++
++ if (!ValidSysPAddrArrayForDev
++ (psBMContext->psDeviceNode, psSysAddr,
++ WRAP_PAGE_COUNT(ui32Size, ui32Offset, ui32HostPageSize),
++ ui32HostPageSize)) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "BM_Wrap: Array of system addresses invalid for device"));
++ return 0;
++ }
++ }
++#endif
++
++ sHashAddress = psSysAddr[0];
++
++ sHashAddress.uiAddr += ui32Offset;
++
++ pBuf =
++ (BM_BUF *) HASH_Retrieve(psBMContext->pBufferHash,
++ (u32) sHashAddress.uiAddr);
++
++ if (pBuf) {
++ u32 ui32MappingSize = HOST_PAGEALIGN(ui32Size + ui32Offset);
++
++ if (pBuf->pMapping->uSize == ui32MappingSize
++ && (pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped
++ || pBuf->pMapping->eCpuMemoryOrigin ==
++ hm_wrapped_virtaddr)) {
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "BM_Wrap (Matched previous Wrap! uSize=0x%x, uOffset=0x%x, SysAddr=%08X)",
++ ui32Size, ui32Offset, sHashAddress.uiAddr));
++
++ pBuf->ui32RefCount++;
++ *phBuf = (BM_HANDLE) pBuf;
++ if (pui32Flags)
++ *pui32Flags = uFlags;
++
++ return 1;
++ }
++ }
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(BM_BUF),
++ (void **)&pBuf, NULL,
++ "Buffer Manager buffer") != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: BM_Buf alloc FAILED"));
++ return 0;
++ }
++ memset(pBuf, 0, sizeof(BM_BUF));
++
++ if (WrapMemory
++ (psBMHeap, ui32Size, ui32Offset, bPhysContig, psSysAddr, pvCPUVAddr,
++ uFlags, pBuf) != 1) {
++ PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: WrapMemory FAILED"));
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_BUF), pBuf, NULL);
++
++ return 0;
++ }
++
++ if (pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped
++ || pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped_virtaddr) {
++
++ PVR_ASSERT(SysSysPAddrToCpuPAddr(sHashAddress).uiAddr ==
++ pBuf->CpuPAddr.uiAddr);
++
++ if (!HASH_Insert
++ (psBMContext->pBufferHash, (u32) sHashAddress.uiAddr,
++ (u32) pBuf)) {
++ FreeBuf(pBuf, uFlags);
++ PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: HASH_Insert FAILED"));
++ return 0;
++ }
++ }
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "BM_Wrap (uSize=0x%x, uFlags=0x%x)=%08X(devVAddr=%08X)",
++ ui32Size, uFlags, pBuf, pBuf->DevVAddr.uiAddr));
++
++ pBuf->ui32RefCount = 1;
++ *phBuf = (BM_HANDLE) pBuf;
++ if (pui32Flags) {
++
++ *pui32Flags =
++ (uFlags & ~PVRSRV_HAP_MAPTYPE_MASK) |
++ PVRSRV_HAP_MULTI_PROCESS;
++ }
++
++ return 1;
++}
++
++void BM_Free(BM_HANDLE hBuf, u32 ui32Flags)
++{
++ BM_BUF *pBuf = (BM_BUF *) hBuf;
++ SYS_DATA *psSysData;
++ IMG_SYS_PHYADDR sHashAddr;
++
++ PVR_DPF((PVR_DBG_MESSAGE, "BM_Free (h=%08X)", hBuf));
++ PVR_ASSERT(pBuf != NULL);
++
++ if (pBuf == NULL) {
++ PVR_DPF((PVR_DBG_ERROR, "BM_Free: invalid parameter"));
++ return;
++ }
++
++ SysAcquireData(&psSysData);
++
++ pBuf->ui32RefCount--;
++
++ if (pBuf->ui32RefCount == 0) {
++ if (pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped
++ || pBuf->pMapping->eCpuMemoryOrigin ==
++ hm_wrapped_virtaddr) {
++ sHashAddr = SysCpuPAddrToSysPAddr(pBuf->CpuPAddr);
++
++ HASH_Remove(pBuf->pMapping->pBMHeap->pBMContext->
++ pBufferHash, (u32) sHashAddr.uiAddr);
++ }
++ FreeBuf(pBuf, ui32Flags);
++ }
++}
++
++IMG_CPU_VIRTADDR BM_HandleToCpuVaddr(BM_HANDLE hBuf)
++{
++ BM_BUF *pBuf = (BM_BUF *) hBuf;
++
++ PVR_ASSERT(pBuf != NULL);
++ if (pBuf == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "BM_HandleToCpuVaddr: invalid parameter"));
++ return NULL;
++ }
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "BM_HandleToCpuVaddr(h=%08X)=%08X", hBuf, pBuf->CpuVAddr));
++ return pBuf->CpuVAddr;
++}
++
++IMG_DEV_VIRTADDR BM_HandleToDevVaddr(BM_HANDLE hBuf)
++{
++ BM_BUF *pBuf = (BM_BUF *) hBuf;
++
++ PVR_ASSERT(pBuf != NULL);
++ if (pBuf == NULL) {
++ IMG_DEV_VIRTADDR DevVAddr = { 0 };
++ PVR_DPF((PVR_DBG_ERROR,
++ "BM_HandleToDevVaddr: invalid parameter"));
++ return DevVAddr;
++ }
++
++ PVR_DPF((PVR_DBG_MESSAGE, "BM_HandleToDevVaddr(h=%08X)=%08X", hBuf,
++ pBuf->DevVAddr));
++ return pBuf->DevVAddr;
++}
++
++IMG_SYS_PHYADDR BM_HandleToSysPaddr(BM_HANDLE hBuf)
++{
++ BM_BUF *pBuf = (BM_BUF *) hBuf;
++
++ PVR_ASSERT(pBuf != NULL);
++
++ if (pBuf == NULL) {
++ IMG_SYS_PHYADDR PhysAddr = { 0 };
++ PVR_DPF((PVR_DBG_ERROR,
++ "BM_HandleToSysPaddr: invalid parameter"));
++ return PhysAddr;
++ }
++
++ PVR_DPF((PVR_DBG_MESSAGE, "BM_HandleToSysPaddr(h=%08X)=%08X", hBuf,
++ pBuf->CpuPAddr.uiAddr));
++ return SysCpuPAddrToSysPAddr(pBuf->CpuPAddr);
++}
++
++void *BM_HandleToOSMemHandle(BM_HANDLE hBuf)
++{
++ BM_BUF *pBuf = (BM_BUF *) hBuf;
++
++ PVR_ASSERT(pBuf != NULL);
++
++ if (pBuf == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "BM_HandleToOSMemHandle: invalid parameter"));
++ return NULL;
++ }
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "BM_HandleToOSMemHandle(h=%08X)=%08X",
++ hBuf, pBuf->hOSMemHandle));
++ return pBuf->hOSMemHandle;
++}
++
++int
++BM_ContiguousStatistics(u32 uFlags, u32 * pTotalBytes, u32 * pAvailableBytes)
++{
++ if (pAvailableBytes || pTotalBytes || uFlags) ;
++ return 0;
++}
++
++static int
++DevMemoryAlloc(BM_CONTEXT * pBMContext,
++ BM_MAPPING * pMapping,
++ u32 * pActualSize,
++ u32 uFlags,
++ u32 dev_vaddr_alignment, IMG_DEV_VIRTADDR * pDevVAddr)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++#ifdef PDUMP
++ u32 ui32PDumpSize = pMapping->uSize;
++#endif
++
++ psDeviceNode = pBMContext->psDeviceNode;
++
++ if (uFlags & PVRSRV_MEM_INTERLEAVED) {
++
++ pMapping->uSize *= 2;
++ }
++#ifdef PDUMP
++ if (uFlags & PVRSRV_MEM_DUMMY) {
++
++ ui32PDumpSize = pMapping->pBMHeap->sDevArena.ui32DataPageSize;
++ }
++#endif
++
++ if (!psDeviceNode->pfnMMUAlloc(pMapping->pBMHeap->pMMUHeap,
++ pMapping->uSize,
++ pActualSize,
++ 0,
++ dev_vaddr_alignment,
++ &(pMapping->DevVAddr))) {
++ PVR_DPF((PVR_DBG_ERROR, "DevMemoryAlloc ERROR MMU_Alloc"));
++ return 0;
++ }
++#ifdef SUPPORT_SGX_MMU_BYPASS
++ EnableHostAccess(pBMContext->psMMUContext);
++#endif
++
++ PDUMPMALLOCPAGES(psDeviceNode->sDevId.eDeviceType,
++ pMapping->DevVAddr.uiAddr, pMapping->CpuVAddr,
++ pMapping->hOSMemHandle, ui32PDumpSize,
++ pMapping->pBMHeap->sDevArena.ui32DataPageSize,
++ (void *)pMapping);
++
++ switch (pMapping->eCpuMemoryOrigin) {
++ case hm_wrapped:
++ case hm_wrapped_virtaddr:
++ case hm_contiguous:
++ {
++ psDeviceNode->pfnMMUMapPages(pMapping->pBMHeap->
++ pMMUHeap,
++ pMapping->DevVAddr,
++ SysCpuPAddrToSysPAddr
++ (pMapping->CpuPAddr),
++ pMapping->uSize, uFlags,
++ (void *)pMapping);
++
++ *pDevVAddr = pMapping->DevVAddr;
++ break;
++ }
++ case hm_env:
++ {
++ psDeviceNode->pfnMMUMapShadow(pMapping->pBMHeap->
++ pMMUHeap,
++ pMapping->DevVAddr,
++ pMapping->uSize,
++ pMapping->CpuVAddr,
++ pMapping->hOSMemHandle,
++ pDevVAddr, uFlags,
++ (void *)pMapping);
++ break;
++ }
++ case hm_wrapped_scatter:
++ case hm_wrapped_scatter_virtaddr:
++ {
++ psDeviceNode->pfnMMUMapScatter(pMapping->pBMHeap->
++ pMMUHeap,
++ pMapping->DevVAddr,
++ pMapping->psSysAddr,
++ pMapping->uSize, uFlags,
++ (void *)pMapping);
++
++ *pDevVAddr = pMapping->DevVAddr;
++ break;
++ }
++ default:
++ PVR_DPF((PVR_DBG_ERROR,
++ "Illegal value %d for pMapping->eCpuMemoryOrigin",
++ pMapping->eCpuMemoryOrigin));
++ return 0;
++ }
++
++#ifdef SUPPORT_SGX_MMU_BYPASS
++ DisableHostAccess(pBMContext->psMMUContext);
++#endif
++
++ return 1;
++}
++
++static void DevMemoryFree(BM_MAPPING * pMapping)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++#ifdef PDUMP
++ u32 ui32PSize;
++#endif
++
++#ifdef PDUMP
++
++ if (pMapping->ui32Flags & PVRSRV_MEM_DUMMY) {
++
++ ui32PSize = pMapping->pBMHeap->sDevArena.ui32DataPageSize;
++ } else {
++ ui32PSize = pMapping->uSize;
++ }
++
++ PDUMPFREEPAGES(pMapping->pBMHeap,
++ pMapping->DevVAddr,
++ ui32PSize,
++ pMapping->pBMHeap->sDevArena.ui32DataPageSize,
++ (void *)pMapping,
++ (pMapping->ui32Flags & PVRSRV_MEM_INTERLEAVED) ? 1 : 0);
++#endif
++
++ psDeviceNode = pMapping->pBMHeap->pBMContext->psDeviceNode;
++
++ psDeviceNode->pfnMMUFree(pMapping->pBMHeap->pMMUHeap,
++ pMapping->DevVAddr,
++ IMG_CAST_TO_DEVVADDR_UINT(pMapping->uSize));
++}
++
++static int
++BM_ImportMemory(void *pH,
++ u32 uRequestSize,
++ u32 * pActualSize,
++ BM_MAPPING ** ppsMapping, u32 uFlags, u32 * pBase)
++{
++ BM_MAPPING *pMapping;
++ BM_HEAP *pBMHeap = pH;
++ BM_CONTEXT *pBMContext = pBMHeap->pBMContext;
++ int bResult;
++ u32 uSize;
++ u32 uPSize;
++ u32 uDevVAddrAlignment = 0;
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "BM_ImportMemory (pBMContext=%08X, uRequestSize=0x%x, uFlags=0x%x, uAlign=0x%x)",
++ pBMContext, uRequestSize, uFlags, uDevVAddrAlignment));
++
++ PVR_ASSERT(ppsMapping != NULL);
++ PVR_ASSERT(pBMContext != NULL);
++
++ if (ppsMapping == NULL) {
++ PVR_DPF((PVR_DBG_ERROR, "BM_ImportMemory: invalid parameter"));
++ goto fail_exit;
++ }
++
++ uSize = HOST_PAGEALIGN(uRequestSize);
++ PVR_ASSERT(uSize >= uRequestSize);
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(BM_MAPPING),
++ (void **)&pMapping, NULL,
++ "Buffer Manager Mapping") != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "BM_ImportMemory: failed BM_MAPPING alloc"));
++ goto fail_exit;
++ }
++
++ pMapping->hOSMemHandle = 0;
++ pMapping->CpuVAddr = 0;
++ pMapping->DevVAddr.uiAddr = 0;
++ pMapping->CpuPAddr.uiAddr = 0;
++ pMapping->uSize = uSize;
++ pMapping->pBMHeap = pBMHeap;
++ pMapping->ui32Flags = uFlags;
++
++ if (pActualSize) {
++ *pActualSize = uSize;
++ }
++
++ if (pMapping->ui32Flags & PVRSRV_MEM_DUMMY) {
++ uPSize = pBMHeap->sDevArena.ui32DataPageSize;
++ } else {
++ uPSize = pMapping->uSize;
++ }
++
++ if (pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG) {
++ u32 ui32Attribs = pBMHeap->ui32Attribs;
++
++ if (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK) {
++ ui32Attribs &= ~PVRSRV_HAP_CACHETYPE_MASK;
++ ui32Attribs |=
++ (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK);
++ }
++
++ if (OSAllocPages(ui32Attribs,
++ uPSize,
++ pBMHeap->sDevArena.ui32DataPageSize,
++ (void **)&pMapping->CpuVAddr,
++ &pMapping->hOSMemHandle) != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "BM_ImportMemory: OSAllocPages(0x%x) failed",
++ uPSize));
++ goto fail_mapping_alloc;
++ }
++
++ pMapping->eCpuMemoryOrigin = hm_env;
++ } else if (pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG) {
++ IMG_SYS_PHYADDR sSysPAddr;
++ u32 ui32Attribs = pBMHeap->ui32Attribs;
++
++ if (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK) {
++ ui32Attribs &= ~PVRSRV_HAP_CACHETYPE_MASK;
++ ui32Attribs |=
++ (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK);
++ }
++
++ PVR_ASSERT(pBMHeap->pLocalDevMemArena != NULL);
++
++ if (!RA_Alloc(pBMHeap->pLocalDevMemArena,
++ uPSize,
++ NULL,
++ NULL,
++ 0,
++ pBMHeap->sDevArena.ui32DataPageSize,
++ 0, (u32 *) & sSysPAddr.uiAddr)) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "BM_ImportMemory: RA_Alloc(0x%x) FAILED",
++ uPSize));
++ goto fail_mapping_alloc;
++ }
++
++ pMapping->CpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
++ if (OSReservePhys(pMapping->CpuPAddr,
++ uPSize,
++ ui32Attribs,
++ &pMapping->CpuVAddr,
++ &pMapping->hOSMemHandle) != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "BM_ImportMemory: OSReservePhys failed"));
++ goto fail_dev_mem_alloc;
++ }
++
++ pMapping->eCpuMemoryOrigin = hm_contiguous;
++ } else {
++ PVR_DPF((PVR_DBG_ERROR,
++ "BM_ImportMemory: Invalid backing store type"));
++ goto fail_mapping_alloc;
++ }
++
++ bResult = DevMemoryAlloc(pBMContext,
++ pMapping,
++ NULL,
++ uFlags,
++ uDevVAddrAlignment, &pMapping->DevVAddr);
++ if (!bResult) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "BM_ImportMemory: DevMemoryAlloc(0x%x) failed",
++ pMapping->uSize));
++ goto fail_dev_mem_alloc;
++ }
++
++ PVR_ASSERT(uDevVAddrAlignment >
++ 1 ? (pMapping->DevVAddr.uiAddr % uDevVAddrAlignment) ==
++ 0 : 1);
++
++ *pBase = pMapping->DevVAddr.uiAddr;
++ *ppsMapping = pMapping;
++
++ PVR_DPF((PVR_DBG_MESSAGE, "BM_ImportMemory: 1"));
++ return 1;
++
++fail_dev_mem_alloc:
++ if (pMapping && (pMapping->CpuVAddr || pMapping->hOSMemHandle)) {
++
++ if (pMapping->ui32Flags & PVRSRV_MEM_INTERLEAVED) {
++ pMapping->uSize /= 2;
++ }
++
++ if (pMapping->ui32Flags & PVRSRV_MEM_DUMMY) {
++ uPSize = pBMHeap->sDevArena.ui32DataPageSize;
++ } else {
++ uPSize = pMapping->uSize;
++ }
++
++ if (pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG) {
++ OSFreePages(pBMHeap->ui32Attribs,
++ uPSize,
++ (void *)pMapping->CpuVAddr,
++ pMapping->hOSMemHandle);
++ } else {
++ IMG_SYS_PHYADDR sSysPAddr;
++
++ if (pMapping->CpuVAddr) {
++ OSUnReservePhys(pMapping->CpuVAddr,
++ uPSize,
++ pBMHeap->ui32Attribs,
++ pMapping->hOSMemHandle);
++ }
++ sSysPAddr = SysCpuPAddrToSysPAddr(pMapping->CpuPAddr);
++ RA_Free(pBMHeap->pLocalDevMemArena, sSysPAddr.uiAddr,
++ 0);
++ }
++ }
++fail_mapping_alloc:
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), pMapping, NULL);
++
++fail_exit:
++ return 0;
++}
++
++static void BM_FreeMemory(void *h, u32 _base, BM_MAPPING * psMapping)
++{
++ BM_HEAP *pBMHeap = h;
++ u32 uPSize;
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "BM_FreeMemory (h=%08X, base=0x%x, psMapping=0x%x)", h, _base,
++ psMapping));
++
++#ifdef INTEL_D3_P_CHANGES
++ if (NULL == pBMHeap) {
++ PVR_DPF((PVR_DBG_ERROR, "BM_FreeMemory: invalid parameter"));
++ return;
++ }
++#endif
++
++ PVR_ASSERT(psMapping != NULL);
++
++ if (psMapping == NULL) {
++ PVR_DPF((PVR_DBG_ERROR, "BM_FreeMemory: invalid parameter"));
++ return;
++ }
++
++ DevMemoryFree(psMapping);
++
++ if ((psMapping->ui32Flags & PVRSRV_MEM_INTERLEAVED) != 0) {
++ psMapping->uSize /= 2;
++ }
++
++ if (psMapping->ui32Flags & PVRSRV_MEM_DUMMY) {
++ uPSize = psMapping->pBMHeap->sDevArena.ui32DataPageSize;
++ } else {
++ uPSize = psMapping->uSize;
++ }
++
++ if (pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG) {
++ OSFreePages(pBMHeap->ui32Attribs,
++ uPSize,
++ (void *)psMapping->CpuVAddr,
++ psMapping->hOSMemHandle);
++ } else if (pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG) {
++ IMG_SYS_PHYADDR sSysPAddr;
++
++ OSUnReservePhys(psMapping->CpuVAddr, uPSize,
++ pBMHeap->ui32Attribs, psMapping->hOSMemHandle);
++
++ sSysPAddr = SysCpuPAddrToSysPAddr(psMapping->CpuPAddr);
++
++ RA_Free(pBMHeap->pLocalDevMemArena, sSysPAddr.uiAddr, 0);
++ } else {
++ PVR_DPF((PVR_DBG_ERROR,
++ "BM_FreeMemory: Invalid backing store type"));
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), psMapping, NULL);
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "..BM_FreeMemory (h=%08X, base=0x%x, psMapping=0x%x)",
++ h, _base, psMapping));
++}
++
++void BM_GetPhysPageAddr(PVRSRV_KERNEL_MEM_INFO * psMemInfo,
++ IMG_DEV_VIRTADDR sDevVPageAddr,
++ IMG_DEV_PHYADDR * psDevPAddr)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++
++ PVR_DPF((PVR_DBG_MESSAGE, "BM_GetPhysPageAddr"));
++
++ PVR_ASSERT(psMemInfo && psDevPAddr)
++
++ PVR_ASSERT((sDevVPageAddr.uiAddr & 0xFFF) == 0);
++
++ psDeviceNode =
++ ((BM_BUF *) psMemInfo->sMemBlk.hBuffer)->pMapping->pBMHeap->
++ pBMContext->psDeviceNode;
++
++ *psDevPAddr =
++ psDeviceNode->
++ pfnMMUGetPhysPageAddr(((BM_BUF *) psMemInfo->sMemBlk.hBuffer)->
++ pMapping->pBMHeap->pMMUHeap, sDevVPageAddr);
++}
++
++PVRSRV_ERROR BM_GetHeapInfo(void *hDevMemHeap, PVRSRV_HEAP_INFO * psHeapInfo)
++{
++ BM_HEAP *psBMHeap = (BM_HEAP *) hDevMemHeap;
++
++ PVR_DPF((PVR_DBG_VERBOSE, "BM_GetHeapInfo"));
++
++ psHeapInfo->hDevMemHeap = hDevMemHeap;
++ psHeapInfo->sDevVAddrBase = psBMHeap->sDevArena.BaseDevVAddr;
++ psHeapInfo->ui32HeapByteSize = psBMHeap->sDevArena.ui32Size;
++ psHeapInfo->ui32Attribs = psBMHeap->ui32Attribs;
++
++ return PVRSRV_OK;
++}
++
++MMU_CONTEXT *BM_GetMMUContext(void *hDevMemHeap)
++{
++ BM_HEAP *pBMHeap = (BM_HEAP *) hDevMemHeap;
++
++ PVR_DPF((PVR_DBG_VERBOSE, "BM_GetMMUContext"));
++
++ return pBMHeap->pBMContext->psMMUContext;
++}
++
++MMU_CONTEXT *BM_GetMMUContextFromMemContext(void *hDevMemContext)
++{
++ BM_CONTEXT *pBMContext = (BM_CONTEXT *) hDevMemContext;
++
++ PVR_DPF((PVR_DBG_VERBOSE, "BM_GetMMUContextFromMemContext"));
++
++ return pBMContext->psMMUContext;
++}
++
++void *BM_GetMMUHeap(void *hDevMemHeap)
++{
++ PVR_DPF((PVR_DBG_VERBOSE, "BM_GetMMUHeap"));
++
++ return (void *)((BM_HEAP *) hDevMemHeap)->pMMUHeap;
++}
++
++PVRSRV_DEVICE_NODE *BM_GetDeviceNode(void *hDevMemContext)
++{
++ PVR_DPF((PVR_DBG_VERBOSE, "BM_GetDeviceNode"));
++
++ return ((BM_CONTEXT *) hDevMemContext)->psDeviceNode;
++}
++
++void *BM_GetMappingHandle(PVRSRV_KERNEL_MEM_INFO * psMemInfo)
++{
++ PVR_DPF((PVR_DBG_VERBOSE, "BM_GetMappingHandle"));
++
++ return ((BM_BUF *) psMemInfo->sMemBlk.hBuffer)->pMapping->hOSMemHandle;
++}
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/common/deviceclass.c
+@@ -0,0 +1,1732 @@
++/**********************************************************************
++ *
++ * Copyright (c) 2009-2010 Intel Corporation.
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "buffer_manager.h"
++#include "kernelbuffer.h"
++#include "pvr_bridge_km.h"
++
++#include "lists.h"
++DECLARE_LIST_ANY_VA(PVRSRV_DEVICE_NODE);
++DECLARE_LIST_FOR_EACH_VA(PVRSRV_DEVICE_NODE);
++DECLARE_LIST_INSERT(PVRSRV_DEVICE_NODE);
++DECLARE_LIST_REMOVE(PVRSRV_DEVICE_NODE);
++
++void *MatchDeviceKM_AnyVaCb(PVRSRV_DEVICE_NODE * psDeviceNode, va_list va);
++
++PVRSRV_ERROR AllocateDeviceID(SYS_DATA * psSysData, u32 * pui32DevID);
++PVRSRV_ERROR FreeDeviceID(SYS_DATA * psSysData, u32 ui32DevID);
++
++#if defined(SUPPORT_MISR_IN_THREAD)
++void OSVSyncMISR(void *, int);
++#endif
++
++typedef struct PVRSRV_DC_SRV2DISP_KMJTABLE_TAG *PPVRSRV_DC_SRV2DISP_KMJTABLE;
++
++typedef struct PVRSRV_DC_BUFFER_TAG {
++
++ PVRSRV_DEVICECLASS_BUFFER sDeviceClassBuffer;
++
++ struct PVRSRV_DISPLAYCLASS_INFO_TAG *psDCInfo;
++ struct PVRSRV_DC_SWAPCHAIN_TAG *psSwapChain;
++} PVRSRV_DC_BUFFER;
++
++typedef struct PVRSRV_DC_SWAPCHAIN_TAG {
++ void *hExtSwapChain;
++ u32 ui32SwapChainID;
++ u32 ui32Flags;
++ u32 ui32RefCount;
++ PVRSRV_QUEUE_INFO *psQueue;
++ PVRSRV_DC_BUFFER asBuffer[PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS];
++ u32 ui32BufferCount;
++ PVRSRV_DC_BUFFER *psLastFlipBuffer;
++ struct PVRSRV_DC_SWAPCHAIN_TAG *psNext;
++ struct PVRSRV_DISPLAYCLASS_INFO_TAG *psDCInfo;
++} PVRSRV_DC_SWAPCHAIN;
++
++typedef struct PVRSRV_DC_SWAPCHAIN_REF_TAG {
++ struct PVRSRV_DC_SWAPCHAIN_TAG *psSwapChain;
++ void *hResItem;
++} PVRSRV_DC_SWAPCHAIN_REF;
++
++typedef struct PVRSRV_DISPLAYCLASS_INFO_TAG {
++ u32 ui32RefCount;
++ u32 ui32DeviceID;
++ void *hExtDevice;
++ PPVRSRV_DC_SRV2DISP_KMJTABLE psFuncTable;
++ void *hDevMemContext;
++ PVRSRV_DC_BUFFER sSystemBuffer;
++ struct PVRSRV_DC_SWAPCHAIN_TAG *psDCSwapChainShared;
++} PVRSRV_DISPLAYCLASS_INFO;
++
++typedef struct PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO_TAG {
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ PRESMAN_ITEM hResItem;
++} PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO;
++
++typedef struct PVRSRV_BC_SRV2BUFFER_KMJTABLE_TAG
++ *PPVRSRV_BC_SRV2BUFFER_KMJTABLE;
++
++typedef struct PVRSRV_BC_BUFFER_TAG {
++
++ PVRSRV_DEVICECLASS_BUFFER sDeviceClassBuffer;
++
++ struct PVRSRV_BUFFERCLASS_INFO_TAG *psBCInfo;
++} PVRSRV_BC_BUFFER;
++
++typedef struct PVRSRV_BUFFERCLASS_INFO_TAG {
++ u32 ui32RefCount;
++ u32 ui32DeviceID;
++ void *hExtDevice;
++ PPVRSRV_BC_SRV2BUFFER_KMJTABLE psFuncTable;
++ void *hDevMemContext;
++
++ u32 ui32BufferCount;
++ PVRSRV_BC_BUFFER *psBuffer;
++
++} PVRSRV_BUFFERCLASS_INFO;
++
++typedef struct PVRSRV_BUFFERCLASS_PERCONTEXT_INFO_TAG {
++ PVRSRV_BUFFERCLASS_INFO *psBCInfo;
++ void *hResItem;
++} PVRSRV_BUFFERCLASS_PERCONTEXT_INFO;
++
++static PVRSRV_DISPLAYCLASS_INFO *DCDeviceHandleToDCInfo(void *hDeviceKM)
++{
++ PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *psDCPerContextInfo;
++
++ psDCPerContextInfo = (PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *) hDeviceKM;
++
++ return psDCPerContextInfo->psDCInfo;
++}
++
++static PVRSRV_BUFFERCLASS_INFO *BCDeviceHandleToBCInfo(void *hDeviceKM)
++{
++ PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *psBCPerContextInfo;
++
++ psBCPerContextInfo = (PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *) hDeviceKM;
++
++ return psBCPerContextInfo->psBCInfo;
++}
++
++void PVRSRVEnumerateDCKM_ForEachVaCb(PVRSRV_DEVICE_NODE * psDeviceNode,
++ va_list va)
++{
++ u32 *pui32DevCount;
++ u32 **ppui32DevID;
++ PVRSRV_DEVICE_CLASS peDeviceClass;
++
++ pui32DevCount = va_arg(va, u32 *);
++ ppui32DevID = va_arg(va, u32 **);
++ peDeviceClass = va_arg(va, PVRSRV_DEVICE_CLASS);
++
++ if ((psDeviceNode->sDevId.eDeviceClass == peDeviceClass)
++ && (psDeviceNode->sDevId.eDeviceType == PVRSRV_DEVICE_TYPE_EXT)) {
++ (*pui32DevCount)++;
++ if (*ppui32DevID) {
++ *(*ppui32DevID)++ =
++ psDeviceNode->sDevId.ui32DeviceIndex;
++ }
++ }
++}
++
++PVRSRV_ERROR PVRSRVEnumerateDCKM(PVRSRV_DEVICE_CLASS DeviceClass,
++ u32 * pui32DevCount, u32 * pui32DevID)
++{
++
++ u32 ui32DevCount = 0;
++ SYS_DATA *psSysData;
++
++ SysAcquireData(&psSysData);
++
++ List_PVRSRV_DEVICE_NODE_ForEach_va(psSysData->psDeviceNodeList,
++ PVRSRVEnumerateDCKM_ForEachVaCb,
++ &ui32DevCount,
++ &pui32DevID, DeviceClass);
++
++ if (pui32DevCount) {
++ *pui32DevCount = ui32DevCount;
++ } else if (pui32DevID == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVEnumerateDCKM: Invalid parameters"));
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVRegisterDCDeviceKM(PVRSRV_DC_SRV2DISP_KMJTABLE * psFuncTable,
++ u32 * pui32DeviceID)
++{
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo = NULL;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ SYS_DATA *psSysData;
++
++ SysAcquireData(&psSysData);
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(*psDCInfo),
++ (void **)&psDCInfo, NULL,
++ "Display Class Info") != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVRegisterDCDeviceKM: Failed psDCInfo alloc"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ memset(psDCInfo, 0, sizeof(*psDCInfo));
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_DC_SRV2DISP_KMJTABLE),
++ (void **)&psDCInfo->psFuncTable, NULL,
++ "Function table for SRVKM->DISPLAY") != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVRegisterDCDeviceKM: Failed psFuncTable alloc"));
++ goto ErrorExit;
++ }
++ memset(psDCInfo->psFuncTable, 0, sizeof(PVRSRV_DC_SRV2DISP_KMJTABLE));
++
++ *psDCInfo->psFuncTable = *psFuncTable;
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_DEVICE_NODE),
++ (void **)&psDeviceNode, NULL,
++ "Device Node") != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVRegisterDCDeviceKM: Failed psDeviceNode alloc"));
++ goto ErrorExit;
++ }
++ memset(psDeviceNode, 0, sizeof(PVRSRV_DEVICE_NODE));
++
++ psDeviceNode->pvDevice = (void *)psDCInfo;
++ psDeviceNode->ui32pvDeviceSize = sizeof(*psDCInfo);
++ psDeviceNode->ui32RefCount = 1;
++ psDeviceNode->sDevId.eDeviceType = PVRSRV_DEVICE_TYPE_EXT;
++ psDeviceNode->sDevId.eDeviceClass = PVRSRV_DEVICE_CLASS_DISPLAY;
++ psDeviceNode->psSysData = psSysData;
++
++ if (AllocateDeviceID(psSysData, &psDeviceNode->sDevId.ui32DeviceIndex)
++ != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVRegisterBCDeviceKM: Failed to allocate Device ID"));
++ goto ErrorExit;
++ }
++ psDCInfo->ui32DeviceID = psDeviceNode->sDevId.ui32DeviceIndex;
++ if (pui32DeviceID) {
++ *pui32DeviceID = psDeviceNode->sDevId.ui32DeviceIndex;
++ }
++
++ SysRegisterExternalDevice(psDeviceNode);
++
++ List_PVRSRV_DEVICE_NODE_Insert(&psSysData->psDeviceNodeList,
++ psDeviceNode);
++
++ return PVRSRV_OK;
++
++ErrorExit:
++
++ if (psDCInfo->psFuncTable) {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_DC_SRV2DISP_KMJTABLE),
++ psDCInfo->psFuncTable, NULL);
++ psDCInfo->psFuncTable = NULL;
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DISPLAYCLASS_INFO),
++ psDCInfo, NULL);
++
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++}
++
++PVRSRV_ERROR PVRSRVRemoveDCDeviceKM(u32 ui32DevIndex)
++{
++ SYS_DATA *psSysData;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++
++ SysAcquireData(&psSysData);
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE *)
++ List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList,
++ MatchDeviceKM_AnyVaCb,
++ ui32DevIndex,
++ 0, PVRSRV_DEVICE_CLASS_DISPLAY);
++ if (!psDeviceNode) {
++
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVRemoveDCDeviceKM: requested device %d not present",
++ ui32DevIndex));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ psDCInfo = (PVRSRV_DISPLAYCLASS_INFO *) psDeviceNode->pvDevice;
++
++ if (psDCInfo->ui32RefCount == 0) {
++
++ List_PVRSRV_DEVICE_NODE_Remove(psDeviceNode);
++
++ SysRemoveExternalDevice(psDeviceNode);
++
++ PVR_ASSERT(psDCInfo->ui32RefCount == 0);
++ (void)FreeDeviceID(psSysData, ui32DevIndex);
++ (void)OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_DC_SRV2DISP_KMJTABLE),
++ psDCInfo->psFuncTable, NULL);
++ psDCInfo->psFuncTable = NULL;
++ (void)OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_DISPLAYCLASS_INFO), psDCInfo,
++ NULL);
++
++ (void)OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_DEVICE_NODE), psDeviceNode, NULL);
++
++ } else {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVRemoveDCDeviceKM: failed as %d Services DC API connections are still open",
++ psDCInfo->ui32RefCount));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVRegisterBCDeviceKM(PVRSRV_BC_SRV2BUFFER_KMJTABLE *
++ psFuncTable, u32 * pui32DeviceID)
++{
++ PVRSRV_BUFFERCLASS_INFO *psBCInfo = NULL;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ SYS_DATA *psSysData;
++
++ SysAcquireData(&psSysData);
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(*psBCInfo),
++ (void **)&psBCInfo, NULL,
++ "Buffer Class Info") != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVRegisterBCDeviceKM: Failed psBCInfo alloc"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ memset(psBCInfo, 0, sizeof(*psBCInfo));
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_BC_SRV2BUFFER_KMJTABLE),
++ (void **)&psBCInfo->psFuncTable, NULL,
++ "Function table for SRVKM->BUFFER") != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVRegisterBCDeviceKM: Failed psFuncTable alloc"));
++ goto ErrorExit;
++ }
++ memset(psBCInfo->psFuncTable, 0, sizeof(PVRSRV_BC_SRV2BUFFER_KMJTABLE));
++
++ *psBCInfo->psFuncTable = *psFuncTable;
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_DEVICE_NODE),
++ (void **)&psDeviceNode, NULL,
++ "Device Node") != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVRegisterBCDeviceKM: Failed psDeviceNode alloc"));
++ goto ErrorExit;
++ }
++ memset(psDeviceNode, 0, sizeof(PVRSRV_DEVICE_NODE));
++
++ psDeviceNode->pvDevice = (void *)psBCInfo;
++ psDeviceNode->ui32pvDeviceSize = sizeof(*psBCInfo);
++ psDeviceNode->ui32RefCount = 1;
++ psDeviceNode->sDevId.eDeviceType = PVRSRV_DEVICE_TYPE_EXT;
++ psDeviceNode->sDevId.eDeviceClass = PVRSRV_DEVICE_CLASS_BUFFER;
++ psDeviceNode->psSysData = psSysData;
++
++ if (AllocateDeviceID(psSysData, &psDeviceNode->sDevId.ui32DeviceIndex)
++ != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVRegisterBCDeviceKM: Failed to allocate Device ID"));
++ goto ErrorExit;
++ }
++ psBCInfo->ui32DeviceID = psDeviceNode->sDevId.ui32DeviceIndex;
++ if (pui32DeviceID) {
++ *pui32DeviceID = psDeviceNode->sDevId.ui32DeviceIndex;
++ }
++
++ List_PVRSRV_DEVICE_NODE_Insert(&psSysData->psDeviceNodeList,
++ psDeviceNode);
++
++ return PVRSRV_OK;
++
++ErrorExit:
++
++ if (psBCInfo->psFuncTable) {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PPVRSRV_BC_SRV2BUFFER_KMJTABLE),
++ psBCInfo->psFuncTable, NULL);
++ psBCInfo->psFuncTable = NULL;
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_BUFFERCLASS_INFO),
++ psBCInfo, NULL);
++
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++}
++
++PVRSRV_ERROR PVRSRVRemoveBCDeviceKM(u32 ui32DevIndex)
++{
++ SYS_DATA *psSysData;
++ PVRSRV_DEVICE_NODE *psDevNode;
++ PVRSRV_BUFFERCLASS_INFO *psBCInfo;
++
++ SysAcquireData(&psSysData);
++
++ psDevNode = (PVRSRV_DEVICE_NODE *)
++ List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList,
++ MatchDeviceKM_AnyVaCb,
++ ui32DevIndex,
++ 0, PVRSRV_DEVICE_CLASS_BUFFER);
++
++ if (!psDevNode) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVRemoveBCDeviceKM: requested device %d not present",
++ ui32DevIndex));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ psBCInfo = (PVRSRV_BUFFERCLASS_INFO *) psDevNode->pvDevice;
++
++ if (psBCInfo->ui32RefCount == 0) {
++
++ List_PVRSRV_DEVICE_NODE_Remove(psDevNode);
++
++ (void)FreeDeviceID(psSysData, ui32DevIndex);
++
++ (void)OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_BC_SRV2BUFFER_KMJTABLE),
++ psBCInfo->psFuncTable, NULL);
++ psBCInfo->psFuncTable = NULL;
++ (void)OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_BUFFERCLASS_INFO), psBCInfo,
++ NULL);
++
++ (void)OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_DEVICE_NODE), psDevNode, NULL);
++
++ } else {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVRemoveBCDeviceKM: failed as %d Services BC API connections are still open",
++ psBCInfo->ui32RefCount));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVCloseDCDeviceKM(void *hDeviceKM, int bResManCallback)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *psDCPerContextInfo;
++
++ psDCPerContextInfo = (PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *) hDeviceKM;
++
++ eError = ResManFreeResByPtr(psDCPerContextInfo->hResItem);
++
++ return eError;
++}
++
++static PVRSRV_ERROR CloseDCDeviceCallBack(void *pvParam, u32 ui32Param)
++{
++ PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *psDCPerContextInfo;
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++
++ psDCPerContextInfo = (PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *) pvParam;
++ psDCInfo = psDCPerContextInfo->psDCInfo;
++
++ psDCInfo->ui32RefCount--;
++ if (psDCInfo->ui32RefCount == 0) {
++
++ psDCInfo->psFuncTable->pfnCloseDCDevice(psDCInfo->hExtDevice);
++
++ if (--psDCInfo->sSystemBuffer.sDeviceClassBuffer.
++ psKernelSyncInfo->ui32RefCount == 0) {
++ PVRSRVFreeSyncInfoKM(psDCInfo->sSystemBuffer.
++ sDeviceClassBuffer.
++ psKernelSyncInfo);
++ }
++
++ psDCInfo->hDevMemContext = NULL;
++ psDCInfo->hExtDevice = NULL;
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO),
++ psDCPerContextInfo, NULL);
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVOpenDCDeviceKM(PVRSRV_PER_PROCESS_DATA * psPerProc,
++ u32 ui32DeviceID,
++ void *hDevCookie, void **phDeviceKM)
++{
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *psDCPerContextInfo;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ SYS_DATA *psSysData;
++ PVRSRV_ERROR eError;
++
++ if (!phDeviceKM || !hDevCookie) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVOpenDCDeviceKM: Invalid params"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ SysAcquireData(&psSysData);
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE *)
++ List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList,
++ MatchDeviceKM_AnyVaCb,
++ ui32DeviceID,
++ 0, PVRSRV_DEVICE_CLASS_DISPLAY);
++ if (!psDeviceNode) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVOpenDCDeviceKM: no devnode matching index %d",
++ ui32DeviceID));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ psDCInfo = (PVRSRV_DISPLAYCLASS_INFO *) psDeviceNode->pvDevice;
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(*psDCPerContextInfo),
++ (void **)&psDCPerContextInfo, NULL,
++ "Display Class per Context Info") != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVOpenDCDeviceKM: Failed psDCPerContextInfo alloc"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ memset(psDCPerContextInfo, 0, sizeof(*psDCPerContextInfo));
++
++ if (psDCInfo->ui32RefCount++ == 0) {
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE *) hDevCookie;
++
++ psDCInfo->hDevMemContext =
++ (void *)psDeviceNode->sDevMemoryInfo.pBMKernelContext;
++
++ eError = PVRSRVAllocSyncInfoKM(NULL,
++ (void *)psDeviceNode->
++ sDevMemoryInfo.pBMKernelContext,
++ &psDCInfo->sSystemBuffer.
++ sDeviceClassBuffer.
++ psKernelSyncInfo);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVOpenDCDeviceKM: Failed sync info alloc"));
++ psDCInfo->ui32RefCount--;
++ return eError;
++ }
++
++ eError = psDCInfo->psFuncTable->pfnOpenDCDevice(ui32DeviceID,
++ &psDCInfo->
++ hExtDevice,
++ (PVRSRV_SYNC_DATA
++ *) psDCInfo->
++ sSystemBuffer.
++ sDeviceClassBuffer.
++ psKernelSyncInfo->
++ psSyncDataMemInfoKM->
++ pvLinAddrKM);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVOpenDCDeviceKM: Failed to open external DC device"));
++ psDCInfo->ui32RefCount--;
++ PVRSRVFreeSyncInfoKM(psDCInfo->sSystemBuffer.
++ sDeviceClassBuffer.
++ psKernelSyncInfo);
++ return eError;
++ }
++
++ psDCInfo->sSystemBuffer.sDeviceClassBuffer.psKernelSyncInfo->
++ ui32RefCount++;
++ }
++
++ psDCPerContextInfo->psDCInfo = psDCInfo;
++ psDCPerContextInfo->hResItem =
++ ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_DISPLAYCLASS_DEVICE,
++ psDCPerContextInfo, 0, CloseDCDeviceCallBack);
++
++ *phDeviceKM = (void *)psDCPerContextInfo;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVEnumDCFormatsKM(void *hDeviceKM,
++ u32 * pui32Count, DISPLAY_FORMAT * psFormat)
++{
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++
++ if (!hDeviceKM || !pui32Count || !psFormat) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVEnumDCFormatsKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++
++ return psDCInfo->psFuncTable->pfnEnumDCFormats(psDCInfo->hExtDevice,
++ pui32Count, psFormat);
++}
++
++PVRSRV_ERROR PVRSRVEnumDCDimsKM(void *hDeviceKM,
++ DISPLAY_FORMAT * psFormat,
++ u32 * pui32Count, DISPLAY_DIMS * psDim)
++{
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++
++ if (!hDeviceKM || !pui32Count || !psFormat) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVEnumDCDimsKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++
++ return psDCInfo->psFuncTable->pfnEnumDCDims(psDCInfo->hExtDevice,
++ psFormat, pui32Count,
++ psDim);
++}
++
++PVRSRV_ERROR PVRSRVGetDCSystemBufferKM(void *hDeviceKM, void **phBuffer)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ void *hExtBuffer;
++
++ if (!hDeviceKM || !phBuffer) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVGetDCSystemBufferKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++
++ eError =
++ psDCInfo->psFuncTable->pfnGetDCSystemBuffer(psDCInfo->hExtDevice,
++ &hExtBuffer);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVGetDCSystemBufferKM: Failed to get valid buffer handle from external driver"));
++ return eError;
++ }
++
++ psDCInfo->sSystemBuffer.sDeviceClassBuffer.pfnGetBufferAddr =
++ psDCInfo->psFuncTable->pfnGetBufferAddr;
++ psDCInfo->sSystemBuffer.sDeviceClassBuffer.hDevMemContext =
++ psDCInfo->hDevMemContext;
++ psDCInfo->sSystemBuffer.sDeviceClassBuffer.hExtDevice =
++ psDCInfo->hExtDevice;
++ psDCInfo->sSystemBuffer.sDeviceClassBuffer.hExtBuffer = hExtBuffer;
++
++ psDCInfo->sSystemBuffer.psDCInfo = psDCInfo;
++
++ *phBuffer = (void *)&(psDCInfo->sSystemBuffer);
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVGetDCInfoKM(void *hDeviceKM, DISPLAY_INFO * psDisplayInfo)
++{
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ PVRSRV_ERROR eError;
++
++ if (!hDeviceKM || !psDisplayInfo) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVGetDCInfoKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++
++ eError =
++ psDCInfo->psFuncTable->pfnGetDCInfo(psDCInfo->hExtDevice,
++ psDisplayInfo);
++ if (eError != PVRSRV_OK) {
++ return eError;
++ }
++
++ if (psDisplayInfo->ui32MaxSwapChainBuffers >
++ PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS) {
++ psDisplayInfo->ui32MaxSwapChainBuffers =
++ PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS;
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVDestroyDCSwapChainKM(void *hSwapChainRef)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_DC_SWAPCHAIN_REF *psSwapChainRef;
++
++ if (!hSwapChainRef) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVDestroyDCSwapChainKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psSwapChainRef = hSwapChainRef;
++
++ eError = ResManFreeResByPtr(psSwapChainRef->hResItem);
++
++ return eError;
++}
++
++static PVRSRV_ERROR DestroyDCSwapChain(PVRSRV_DC_SWAPCHAIN * psSwapChain)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo = psSwapChain->psDCInfo;
++ u32 i;
++
++ if (psDCInfo->psDCSwapChainShared) {
++ if (psDCInfo->psDCSwapChainShared == psSwapChain) {
++ psDCInfo->psDCSwapChainShared = psSwapChain->psNext;
++ } else {
++ PVRSRV_DC_SWAPCHAIN *psCurrentSwapChain;
++ psCurrentSwapChain = psDCInfo->psDCSwapChainShared;
++ while (psCurrentSwapChain->psNext) {
++ if (psCurrentSwapChain->psNext != psSwapChain) {
++ psCurrentSwapChain =
++ psCurrentSwapChain->psNext;
++ continue;
++ }
++ psCurrentSwapChain->psNext =
++ psSwapChain->psNext;
++ break;
++ }
++ }
++ }
++
++ PVRSRVDestroyCommandQueueKM(psSwapChain->psQueue);
++
++ eError =
++ psDCInfo->psFuncTable->pfnDestroyDCSwapChain(psDCInfo->hExtDevice,
++ psSwapChain->
++ hExtSwapChain);
++
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "DestroyDCSwapChainCallBack: Failed to destroy DC swap chain"));
++ return eError;
++ }
++
++ for (i = 0; i < psSwapChain->ui32BufferCount; i++) {
++ if (psSwapChain->asBuffer[i].sDeviceClassBuffer.
++ psKernelSyncInfo) {
++ if (--psSwapChain->asBuffer[i].sDeviceClassBuffer.
++ psKernelSyncInfo->ui32RefCount == 0) {
++ PVRSRVFreeSyncInfoKM(psSwapChain->asBuffer[i].
++ sDeviceClassBuffer.
++ psKernelSyncInfo);
++ }
++ }
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DC_SWAPCHAIN),
++ psSwapChain, NULL);
++
++ return eError;
++}
++
++static PVRSRV_ERROR DestroyDCSwapChainRefCallBack(void *pvParam, u32 ui32Param)
++{
++ PVRSRV_DC_SWAPCHAIN_REF *psSwapChainRef =
++ (PVRSRV_DC_SWAPCHAIN_REF *) pvParam;
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if (--psSwapChainRef->psSwapChain->ui32RefCount == 0) {
++ eError = DestroyDCSwapChain(psSwapChainRef->psSwapChain);
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DC_SWAPCHAIN_REF),
++ psSwapChainRef, NULL);
++ return eError;
++}
++
++static PVRSRV_DC_SWAPCHAIN
++ *PVRSRVFindSharedDCSwapChainKM(PVRSRV_DISPLAYCLASS_INFO * psDCInfo,
++ u32 ui32SwapChainID)
++{
++ PVRSRV_DC_SWAPCHAIN *psCurrentSwapChain;
++
++ for (psCurrentSwapChain = psDCInfo->psDCSwapChainShared;
++ psCurrentSwapChain;
++ psCurrentSwapChain = psCurrentSwapChain->psNext) {
++ if (psCurrentSwapChain->ui32SwapChainID == ui32SwapChainID)
++ return psCurrentSwapChain;
++ }
++ return NULL;
++}
++
++static PVRSRV_ERROR PVRSRVCreateDCSwapChainRefKM(PVRSRV_PER_PROCESS_DATA *
++ psPerProc,
++ PVRSRV_DC_SWAPCHAIN *
++ psSwapChain,
++ PVRSRV_DC_SWAPCHAIN_REF **
++ ppsSwapChainRef)
++{
++ PVRSRV_DC_SWAPCHAIN_REF *psSwapChainRef = NULL;
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_DC_SWAPCHAIN_REF),
++ (void **)&psSwapChainRef, NULL,
++ "Display Class Swapchain Reference") != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVCreateDCSwapChainRefKM: Failed psSwapChainRef alloc"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ memset(psSwapChainRef, 0, sizeof(PVRSRV_DC_SWAPCHAIN_REF));
++
++ psSwapChain->ui32RefCount++;
++
++ psSwapChainRef->psSwapChain = psSwapChain;
++ psSwapChainRef->hResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_DISPLAYCLASS_SWAPCHAIN_REF,
++ psSwapChainRef,
++ 0,
++ &DestroyDCSwapChainRefCallBack);
++ *ppsSwapChainRef = psSwapChainRef;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVCreateDCSwapChainKM(PVRSRV_PER_PROCESS_DATA * psPerProc,
++ void *hDeviceKM,
++ u32 ui32Flags,
++ DISPLAY_SURF_ATTRIBUTES *
++ psDstSurfAttrib,
++ DISPLAY_SURF_ATTRIBUTES *
++ psSrcSurfAttrib, u32 ui32BufferCount,
++ u32 ui32OEMFlags, void **phSwapChainRef,
++ u32 * pui32SwapChainID)
++{
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ PVRSRV_DC_SWAPCHAIN *psSwapChain = NULL;
++ PVRSRV_DC_SWAPCHAIN_REF *psSwapChainRef = NULL;
++ PVRSRV_SYNC_DATA *apsSyncData[PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS];
++ PVRSRV_QUEUE_INFO *psQueue = NULL;
++ PVRSRV_ERROR eError;
++ u32 i;
++ DISPLAY_INFO sDisplayInfo;
++
++ if (!hDeviceKM
++ || !psDstSurfAttrib
++ || !psSrcSurfAttrib || !phSwapChainRef || !pui32SwapChainID) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVCreateDCSwapChainKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ if (ui32BufferCount > PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVCreateDCSwapChainKM: Too many buffers"));
++ return PVRSRV_ERROR_TOOMANYBUFFERS;
++ }
++
++ if (ui32BufferCount < 2) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVCreateDCSwapChainKM: Too few buffers"));
++ return PVRSRV_ERROR_TOO_FEW_BUFFERS;
++ }
++
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++
++ if (ui32Flags & PVRSRV_CREATE_SWAPCHAIN_QUERY) {
++
++ psSwapChain =
++ PVRSRVFindSharedDCSwapChainKM(psDCInfo, *pui32SwapChainID);
++ if (psSwapChain) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVCreateDCSwapChainKM: found query"));
++
++ eError = PVRSRVCreateDCSwapChainRefKM(psPerProc,
++ psSwapChain,
++ &psSwapChainRef);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVCreateDCSwapChainKM: Couldn't create swap chain reference"));
++ return eError;
++ }
++
++ *phSwapChainRef = (void *)psSwapChainRef;
++ return PVRSRV_OK;
++ }
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVCreateDCSwapChainKM: No shared SwapChain found for query"));
++ return PVRSRV_ERROR_FLIP_CHAIN_EXISTS;
++ }
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_DC_SWAPCHAIN),
++ (void **)&psSwapChain, NULL,
++ "Display Class Swapchain") != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVCreateDCSwapChainKM: Failed psSwapChain alloc"));
++ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto ErrorExit;
++ }
++ memset(psSwapChain, 0, sizeof(PVRSRV_DC_SWAPCHAIN));
++
++ eError = PVRSRVCreateCommandQueueKM(1024, &psQueue);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVCreateDCSwapChainKM: Failed to create CmdQueue"));
++ goto ErrorExit;
++ }
++
++ psSwapChain->psQueue = psQueue;
++
++ for (i = 0; i < ui32BufferCount; i++) {
++ eError = PVRSRVAllocSyncInfoKM(NULL,
++ psDCInfo->hDevMemContext,
++ &psSwapChain->asBuffer[i].
++ sDeviceClassBuffer.
++ psKernelSyncInfo);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVCreateDCSwapChainKM: Failed to alloc syninfo for psSwapChain"));
++ goto ErrorExit;
++ }
++
++ psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo->
++ ui32RefCount++;
++
++ psSwapChain->asBuffer[i].sDeviceClassBuffer.pfnGetBufferAddr =
++ psDCInfo->psFuncTable->pfnGetBufferAddr;
++ psSwapChain->asBuffer[i].sDeviceClassBuffer.hDevMemContext =
++ psDCInfo->hDevMemContext;
++ psSwapChain->asBuffer[i].sDeviceClassBuffer.hExtDevice =
++ psDCInfo->hExtDevice;
++
++ psSwapChain->asBuffer[i].psDCInfo = psDCInfo;
++ psSwapChain->asBuffer[i].psSwapChain = psSwapChain;
++
++ apsSyncData[i] =
++ (PVRSRV_SYNC_DATA *) psSwapChain->asBuffer[i].
++ sDeviceClassBuffer.psKernelSyncInfo->psSyncDataMemInfoKM->
++ pvLinAddrKM;
++ }
++
++ psSwapChain->ui32BufferCount = ui32BufferCount;
++ psSwapChain->psDCInfo = psDCInfo;
++
++ eError =
++ psDCInfo->psFuncTable->pfnGetDCInfo(psDCInfo->hExtDevice,
++ &sDisplayInfo);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVCreateDCSwapChainKM: Failed to get DC info"));
++ return eError;
++ }
++
++ eError =
++ psDCInfo->psFuncTable->pfnCreateDCSwapChain(psDCInfo->hExtDevice,
++ ui32Flags,
++ psDstSurfAttrib,
++ psSrcSurfAttrib,
++ ui32BufferCount,
++ apsSyncData,
++ ui32OEMFlags,
++ &psSwapChain->
++ hExtSwapChain,
++ &psSwapChain->
++ ui32SwapChainID);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVCreateDCSwapChainKM: Failed to create 3rd party SwapChain"));
++ goto ErrorExit;
++ }
++
++ eError = PVRSRVCreateDCSwapChainRefKM(psPerProc,
++ psSwapChain, &psSwapChainRef);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVCreateDCSwapChainKM: Couldn't create swap chain reference"));
++ goto ErrorExit;
++ }
++
++ psSwapChain->ui32RefCount = 1;
++ psSwapChain->ui32Flags = ui32Flags;
++
++ if (ui32Flags & PVRSRV_CREATE_SWAPCHAIN_SHARED) {
++ if (!psDCInfo->psDCSwapChainShared) {
++ psDCInfo->psDCSwapChainShared = psSwapChain;
++ } else {
++ PVRSRV_DC_SWAPCHAIN *psOldHead =
++ psDCInfo->psDCSwapChainShared;
++ psDCInfo->psDCSwapChainShared = psSwapChain;
++ psSwapChain->psNext = psOldHead;
++ }
++ }
++
++ *pui32SwapChainID = psSwapChain->ui32SwapChainID;
++
++ *phSwapChainRef = (void *)psSwapChainRef;
++
++ return eError;
++
++ErrorExit:
++
++ for (i = 0; i < ui32BufferCount; i++) {
++ if (psSwapChain->asBuffer[i].sDeviceClassBuffer.
++ psKernelSyncInfo) {
++ if (--psSwapChain->asBuffer[i].sDeviceClassBuffer.
++ psKernelSyncInfo->ui32RefCount == 0) {
++ PVRSRVFreeSyncInfoKM(psSwapChain->asBuffer[i].
++ sDeviceClassBuffer.
++ psKernelSyncInfo);
++ }
++ }
++ }
++
++ if (psQueue) {
++ PVRSRVDestroyCommandQueueKM(psQueue);
++ }
++
++ if (psSwapChain) {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DC_SWAPCHAIN),
++ psSwapChain, NULL);
++
++ }
++
++ return eError;
++}
++
++PVRSRV_ERROR PVRSRVSetDCDstRectKM(void *hDeviceKM,
++ void *hSwapChainRef, IMG_RECT * psRect)
++{
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ PVRSRV_DC_SWAPCHAIN *psSwapChain;
++
++ if (!hDeviceKM || !hSwapChainRef) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVSetDCDstRectKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++ psSwapChain = ((PVRSRV_DC_SWAPCHAIN_REF *) hSwapChainRef)->psSwapChain;
++
++ return psDCInfo->psFuncTable->pfnSetDCDstRect(psDCInfo->hExtDevice,
++ psSwapChain->
++ hExtSwapChain, psRect);
++}
++
++PVRSRV_ERROR PVRSRVSetDCSrcRectKM(void *hDeviceKM,
++ void *hSwapChainRef, IMG_RECT * psRect)
++{
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ PVRSRV_DC_SWAPCHAIN *psSwapChain;
++
++ if (!hDeviceKM || !hSwapChainRef) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVSetDCSrcRectKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++ psSwapChain = ((PVRSRV_DC_SWAPCHAIN_REF *) hSwapChainRef)->psSwapChain;
++
++ return psDCInfo->psFuncTable->pfnSetDCSrcRect(psDCInfo->hExtDevice,
++ psSwapChain->
++ hExtSwapChain, psRect);
++}
++
++PVRSRV_ERROR PVRSRVSetDCDstColourKeyKM(void *hDeviceKM,
++ void *hSwapChainRef, u32 ui32CKColour)
++{
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ PVRSRV_DC_SWAPCHAIN *psSwapChain;
++
++ if (!hDeviceKM || !hSwapChainRef) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVSetDCDstColourKeyKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++ psSwapChain = ((PVRSRV_DC_SWAPCHAIN_REF *) hSwapChainRef)->psSwapChain;
++
++ return psDCInfo->psFuncTable->pfnSetDCDstColourKey(psDCInfo->hExtDevice,
++ psSwapChain->
++ hExtSwapChain,
++ ui32CKColour);
++}
++
++PVRSRV_ERROR PVRSRVSetDCSrcColourKeyKM(void *hDeviceKM,
++ void *hSwapChainRef, u32 ui32CKColour)
++{
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ PVRSRV_DC_SWAPCHAIN *psSwapChain;
++
++ if (!hDeviceKM || !hSwapChainRef) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVSetDCSrcColourKeyKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++ psSwapChain = ((PVRSRV_DC_SWAPCHAIN_REF *) hSwapChainRef)->psSwapChain;
++
++ return psDCInfo->psFuncTable->pfnSetDCSrcColourKey(psDCInfo->hExtDevice,
++ psSwapChain->
++ hExtSwapChain,
++ ui32CKColour);
++}
++
++PVRSRV_ERROR PVRSRVGetDCBuffersKM(void *hDeviceKM,
++ void *hSwapChainRef,
++ u32 * pui32BufferCount, void **phBuffer)
++{
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ PVRSRV_DC_SWAPCHAIN *psSwapChain;
++ void *ahExtBuffer[PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS];
++ PVRSRV_ERROR eError;
++ u32 i;
++
++ if (!hDeviceKM || !hSwapChainRef || !phBuffer) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVGetDCBuffersKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++ psSwapChain = ((PVRSRV_DC_SWAPCHAIN_REF *) hSwapChainRef)->psSwapChain;
++
++ eError = psDCInfo->psFuncTable->pfnGetDCBuffers(psDCInfo->hExtDevice,
++ psSwapChain->
++ hExtSwapChain,
++ pui32BufferCount,
++ ahExtBuffer);
++
++ PVR_ASSERT(*pui32BufferCount <= PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS);
++
++ for (i = 0; i < *pui32BufferCount; i++) {
++ psSwapChain->asBuffer[i].sDeviceClassBuffer.hExtBuffer =
++ ahExtBuffer[i];
++ phBuffer[i] = (void *)&psSwapChain->asBuffer[i];
++ }
++
++ return eError;
++}
++
++PVRSRV_ERROR PVRSRVSwapToDCBufferKM(void *hDeviceKM,
++ void *hBuffer,
++ u32 ui32SwapInterval,
++ void *hPrivateTag,
++ u32 ui32ClipRectCount,
++ IMG_RECT * psClipRect)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ PVRSRV_DC_BUFFER *psBuffer;
++ PVRSRV_QUEUE_INFO *psQueue;
++ DISPLAYCLASS_FLIP_COMMAND *psFlipCmd;
++ u32 i;
++ u32 ui32NumSrcSyncs = 1;
++ PVRSRV_KERNEL_SYNC_INFO *apsSrcSync[2];
++ PVRSRV_COMMAND *psCommand;
++
++ if (!hDeviceKM || !hBuffer || !psClipRect) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVSwapToDCBufferKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++#if defined(SUPPORT_LMA)
++ eError = PVRSRVPowerLock(KERNEL_ID, 0);
++ if (eError != PVRSRV_OK) {
++ return eError;
++ }
++#endif
++
++ psBuffer = (PVRSRV_DC_BUFFER *) hBuffer;
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++
++ psQueue = psBuffer->psSwapChain->psQueue;
++
++ apsSrcSync[0] = psBuffer->sDeviceClassBuffer.psKernelSyncInfo;
++
++ if (psBuffer->psSwapChain->psLastFlipBuffer &&
++ psBuffer != psBuffer->psSwapChain->psLastFlipBuffer) {
++ apsSrcSync[1] =
++ psBuffer->psSwapChain->psLastFlipBuffer->sDeviceClassBuffer.
++ psKernelSyncInfo;
++
++ ui32NumSrcSyncs++;
++ }
++
++ eError = PVRSRVInsertCommandKM(psQueue,
++ &psCommand,
++ psDCInfo->ui32DeviceID,
++ DC_FLIP_COMMAND,
++ 0,
++ NULL,
++ ui32NumSrcSyncs,
++ apsSrcSync,
++ sizeof(DISPLAYCLASS_FLIP_COMMAND) +
++ (sizeof(IMG_RECT) * ui32ClipRectCount));
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVSwapToDCBufferKM: Failed to get space in queue"));
++ goto Exit;
++ }
++
++ psFlipCmd = (DISPLAYCLASS_FLIP_COMMAND *) psCommand->pvData;
++
++ psFlipCmd->hExtDevice = psDCInfo->hExtDevice;
++
++ psFlipCmd->hExtSwapChain = psBuffer->psSwapChain->hExtSwapChain;
++
++ psFlipCmd->hExtBuffer = psBuffer->sDeviceClassBuffer.hExtBuffer;
++
++ psFlipCmd->hPrivateTag = hPrivateTag;
++
++ psFlipCmd->ui32ClipRectCount = ui32ClipRectCount;
++
++ psFlipCmd->psClipRect =
++ (IMG_RECT *) ((u8 *) psFlipCmd + sizeof(DISPLAYCLASS_FLIP_COMMAND));
++
++ for (i = 0; i < ui32ClipRectCount; i++) {
++ psFlipCmd->psClipRect[i] = psClipRect[i];
++ }
++
++ psFlipCmd->ui32SwapInterval = ui32SwapInterval;
++
++ eError = PVRSRVSubmitCommandKM(psQueue, psCommand);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVSwapToDCBufferKM: Failed to submit command"));
++ goto Exit;
++ }
++
++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) {
++ if (PVRSRVProcessQueues(KERNEL_ID, 0) !=
++ PVRSRV_ERROR_PROCESSING_BLOCKED) {
++ goto ProcessedQueues;
++ }
++ OSWaitus(MAX_HW_TIME_US / WAIT_TRY_COUNT);
++ }
++ END_LOOP_UNTIL_TIMEOUT();
++
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVSwapToDCBufferKM: Failed to process queues"));
++
++ eError = PVRSRV_ERROR_GENERIC;
++ goto Exit;
++
++ProcessedQueues:
++
++ psBuffer->psSwapChain->psLastFlipBuffer = psBuffer;
++
++Exit:
++
++ if (eError == PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE) {
++ eError = PVRSRV_ERROR_RETRY;
++ }
++#if defined(SUPPORT_LMA)
++ PVRSRVPowerUnlock(KERNEL_ID);
++#endif
++ return eError;
++}
++
++PVRSRV_ERROR PVRSRVSwapToDCSystemKM(void *hDeviceKM, void *hSwapChainRef)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_QUEUE_INFO *psQueue;
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ PVRSRV_DC_SWAPCHAIN *psSwapChain;
++ PVRSRV_DC_SWAPCHAIN_REF *psSwapChainRef;
++ DISPLAYCLASS_FLIP_COMMAND *psFlipCmd;
++ u32 ui32NumSrcSyncs = 1;
++ PVRSRV_KERNEL_SYNC_INFO *apsSrcSync[2];
++ PVRSRV_COMMAND *psCommand;
++
++ if (!hDeviceKM || !hSwapChainRef) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVSwapToDCSystemKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++#if defined(SUPPORT_LMA)
++ eError = PVRSRVPowerLock(KERNEL_ID, 0);
++ if (eError != PVRSRV_OK) {
++ return eError;
++ }
++#endif
++
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++ psSwapChainRef = (PVRSRV_DC_SWAPCHAIN_REF *) hSwapChainRef;
++ psSwapChain = psSwapChainRef->psSwapChain;
++
++ psQueue = psSwapChain->psQueue;
++
++ apsSrcSync[0] =
++ psDCInfo->sSystemBuffer.sDeviceClassBuffer.psKernelSyncInfo;
++
++ if (psSwapChain->psLastFlipBuffer) {
++
++ if (apsSrcSync[0] !=
++ psSwapChain->psLastFlipBuffer->sDeviceClassBuffer.
++ psKernelSyncInfo) {
++ apsSrcSync[1] =
++ psSwapChain->psLastFlipBuffer->sDeviceClassBuffer.
++ psKernelSyncInfo;
++
++ ui32NumSrcSyncs++;
++ }
++ }
++
++ eError = PVRSRVInsertCommandKM(psQueue,
++ &psCommand,
++ psDCInfo->ui32DeviceID,
++ DC_FLIP_COMMAND,
++ 0,
++ NULL,
++ ui32NumSrcSyncs,
++ apsSrcSync,
++ sizeof(DISPLAYCLASS_FLIP_COMMAND));
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVSwapToDCSystemKM: Failed to get space in queue"));
++ goto Exit;
++ }
++
++ psFlipCmd = (DISPLAYCLASS_FLIP_COMMAND *) psCommand->pvData;
++
++ psFlipCmd->hExtDevice = psDCInfo->hExtDevice;
++
++ psFlipCmd->hExtSwapChain = psSwapChain->hExtSwapChain;
++
++ psFlipCmd->hExtBuffer =
++ psDCInfo->sSystemBuffer.sDeviceClassBuffer.hExtBuffer;
++
++ psFlipCmd->hPrivateTag = NULL;
++
++ psFlipCmd->ui32ClipRectCount = 0;
++
++ psFlipCmd->ui32SwapInterval = 1;
++
++ eError = PVRSRVSubmitCommandKM(psQueue, psCommand);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVSwapToDCSystemKM: Failed to submit command"));
++ goto Exit;
++ }
++
++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) {
++ if (PVRSRVProcessQueues(KERNEL_ID, 0) !=
++ PVRSRV_ERROR_PROCESSING_BLOCKED) {
++ goto ProcessedQueues;
++ }
++
++ OSWaitus(MAX_HW_TIME_US / WAIT_TRY_COUNT);
++ }
++ END_LOOP_UNTIL_TIMEOUT();
++
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVSwapToDCSystemKM: Failed to process queues"));
++ eError = PVRSRV_ERROR_GENERIC;
++ goto Exit;
++
++ProcessedQueues:
++
++ psSwapChain->psLastFlipBuffer = &psDCInfo->sSystemBuffer;
++
++ eError = PVRSRV_OK;
++
++Exit:
++
++ if (eError == PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE) {
++ eError = PVRSRV_ERROR_RETRY;
++ }
++#if defined(SUPPORT_LMA)
++ PVRSRVPowerUnlock(KERNEL_ID);
++#endif
++ return eError;
++}
++
++PVRSRV_ERROR PVRSRVRegisterSystemISRHandler(PFN_ISR_HANDLER pfnISRHandler,
++ void *pvISRHandlerData,
++ u32 ui32ISRSourceMask,
++ u32 ui32DeviceID)
++{
++ SYS_DATA *psSysData;
++ PVRSRV_DEVICE_NODE *psDevNode;
++
++ SysAcquireData(&psSysData);
++
++ psDevNode = (PVRSRV_DEVICE_NODE *)
++ List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList,
++ MatchDeviceKM_AnyVaCb,
++ ui32DeviceID, 1);
++
++ if (psDevNode == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVRegisterSystemISRHandler: Failed to get psDevNode"));
++ PVR_DBG_BREAK;
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ psDevNode->pvISRData = (void *)pvISRHandlerData;
++
++ psDevNode->pfnDeviceISR = pfnISRHandler;
++
++ return PVRSRV_OK;
++}
++
++void PVRSRVSetDCState_ForEachVaCb(PVRSRV_DEVICE_NODE * psDeviceNode, va_list va)
++{
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ u32 ui32State;
++ ui32State = va_arg(va, u32);
++
++ if (psDeviceNode->sDevId.eDeviceClass == PVRSRV_DEVICE_CLASS_DISPLAY) {
++ psDCInfo = (PVRSRV_DISPLAYCLASS_INFO *) psDeviceNode->pvDevice;
++ if (psDCInfo->psFuncTable->pfnSetDCState
++ && psDCInfo->hExtDevice) {
++ psDCInfo->psFuncTable->pfnSetDCState(psDCInfo->
++ hExtDevice,
++ ui32State);
++ }
++ }
++}
++
++void PVRSRVSetDCState(u32 ui32State)
++{
++ SYS_DATA *psSysData;
++
++ SysAcquireData(&psSysData);
++
++ List_PVRSRV_DEVICE_NODE_ForEach_va(psSysData->psDeviceNodeList,
++ PVRSRVSetDCState_ForEachVaCb,
++ ui32State);
++}
++
++int PVRGetDisplayClassJTable(PVRSRV_DC_DISP2SRV_KMJTABLE * psJTable)
++{
++ psJTable->ui32TableSize = sizeof(PVRSRV_DC_DISP2SRV_KMJTABLE);
++ psJTable->pfnPVRSRVRegisterDCDevice = PVRSRVRegisterDCDeviceKM;
++ psJTable->pfnPVRSRVRemoveDCDevice = PVRSRVRemoveDCDeviceKM;
++ psJTable->pfnPVRSRVOEMFunction = SysOEMFunction;
++ psJTable->pfnPVRSRVRegisterCmdProcList = PVRSRVRegisterCmdProcListKM;
++ psJTable->pfnPVRSRVRemoveCmdProcList = PVRSRVRemoveCmdProcListKM;
++#if defined(SUPPORT_MISR_IN_THREAD)
++ psJTable->pfnPVRSRVCmdComplete = OSVSyncMISR;
++#else
++ psJTable->pfnPVRSRVCmdComplete = PVRSRVCommandCompleteKM;
++#endif
++ psJTable->pfnPVRSRVRegisterSystemISRHandler =
++ PVRSRVRegisterSystemISRHandler;
++ psJTable->pfnPVRSRVRegisterPowerDevice = PVRSRVRegisterPowerDevice;
++
++ return 1;
++}
++
++PVRSRV_ERROR PVRSRVCloseBCDeviceKM(void *hDeviceKM, int bResManCallback)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *psBCPerContextInfo;
++
++ psBCPerContextInfo = (PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *) hDeviceKM;
++
++ eError = ResManFreeResByPtr(psBCPerContextInfo->hResItem);
++
++ return eError;
++}
++
++static PVRSRV_ERROR CloseBCDeviceCallBack(void *pvParam, u32 ui32Param)
++{
++ PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *psBCPerContextInfo;
++ PVRSRV_BUFFERCLASS_INFO *psBCInfo;
++
++ psBCPerContextInfo = (PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *) pvParam;
++ psBCInfo = psBCPerContextInfo->psBCInfo;
++
++ psBCInfo->ui32RefCount--;
++ if (psBCInfo->ui32RefCount == 0) {
++ u32 i;
++
++ psBCInfo->psFuncTable->pfnCloseBCDevice(psBCInfo->hExtDevice);
++
++ for (i = 0; i < psBCInfo->ui32BufferCount; i++) {
++ if (psBCInfo->psBuffer[i].sDeviceClassBuffer.
++ psKernelSyncInfo) {
++ if (--psBCInfo->psBuffer[i].sDeviceClassBuffer.
++ psKernelSyncInfo->ui32RefCount == 0) {
++ PVRSRVFreeSyncInfoKM(psBCInfo->
++ psBuffer[i].
++ sDeviceClassBuffer.
++ psKernelSyncInfo);
++ }
++ }
++ }
++
++ if (psBCInfo->psBuffer) {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_BC_BUFFER), psBCInfo->psBuffer,
++ NULL);
++ psBCInfo->psBuffer = NULL;
++ }
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_BUFFERCLASS_PERCONTEXT_INFO),
++ psBCPerContextInfo, NULL);
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVOpenBCDeviceKM(PVRSRV_PER_PROCESS_DATA * psPerProc,
++ u32 ui32DeviceID,
++ void *hDevCookie, void **phDeviceKM)
++{
++ PVRSRV_BUFFERCLASS_INFO *psBCInfo;
++ PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *psBCPerContextInfo;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ SYS_DATA *psSysData;
++ u32 i;
++ PVRSRV_ERROR eError;
++
++ if (!phDeviceKM || !hDevCookie) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVOpenBCDeviceKM: Invalid params"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ SysAcquireData(&psSysData);
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE *)
++ List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList,
++ MatchDeviceKM_AnyVaCb,
++ ui32DeviceID,
++ 0, PVRSRV_DEVICE_CLASS_BUFFER);
++ if (!psDeviceNode) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVOpenBCDeviceKM: No devnode matching index %d",
++ ui32DeviceID));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ psBCInfo = (PVRSRV_BUFFERCLASS_INFO *) psDeviceNode->pvDevice;
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(*psBCPerContextInfo),
++ (void **)&psBCPerContextInfo, NULL,
++ "Buffer Class per Context Info") != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVOpenBCDeviceKM: Failed psBCPerContextInfo alloc"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ memset(psBCPerContextInfo, 0, sizeof(*psBCPerContextInfo));
++
++ if (psBCInfo->ui32RefCount++ == 0) {
++ BUFFER_INFO sBufferInfo;
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE *) hDevCookie;
++
++ psBCInfo->hDevMemContext =
++ (void *)psDeviceNode->sDevMemoryInfo.pBMKernelContext;
++
++ eError =
++ psBCInfo->psFuncTable->pfnOpenBCDevice(&psBCInfo->
++ hExtDevice);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVOpenBCDeviceKM: Failed to open external BC device"));
++ return eError;
++ }
++
++ eError =
++ psBCInfo->psFuncTable->pfnGetBCInfo(psBCInfo->hExtDevice,
++ &sBufferInfo);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVOpenBCDeviceKM : Failed to get BC Info"));
++ return eError;
++ }
++
++ psBCInfo->ui32BufferCount = sBufferInfo.ui32BufferCount;
++
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_BC_BUFFER) *
++ sBufferInfo.ui32BufferCount,
++ (void **)&psBCInfo->psBuffer, NULL,
++ "Array of Buffer Class Buffer");
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVOpenBCDeviceKM: Failed to allocate BC buffers"));
++ return eError;
++ }
++ memset(psBCInfo->psBuffer,
++ 0,
++ sizeof(PVRSRV_BC_BUFFER) * sBufferInfo.ui32BufferCount);
++
++ for (i = 0; i < psBCInfo->ui32BufferCount; i++) {
++
++ eError = PVRSRVAllocSyncInfoKM(NULL,
++ psBCInfo->hDevMemContext,
++ &psBCInfo->psBuffer[i].
++ sDeviceClassBuffer.
++ psKernelSyncInfo);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVOpenBCDeviceKM: Failed sync info alloc"));
++ goto ErrorExit;
++ }
++
++ psBCInfo->psBuffer[i].sDeviceClassBuffer.
++ psKernelSyncInfo->ui32RefCount++;
++
++ eError =
++ psBCInfo->psFuncTable->pfnGetBCBuffer(psBCInfo->
++ hExtDevice, i,
++ psBCInfo->
++ psBuffer[i].
++ sDeviceClassBuffer.
++ psKernelSyncInfo->
++ psSyncData,
++ &psBCInfo->
++ psBuffer[i].
++ sDeviceClassBuffer.
++ hExtBuffer);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVOpenBCDeviceKM: Failed to get BC buffers"));
++ goto ErrorExit;
++ }
++
++ psBCInfo->psBuffer[i].sDeviceClassBuffer.
++ pfnGetBufferAddr =
++ psBCInfo->psFuncTable->pfnGetBufferAddr;
++ psBCInfo->psBuffer[i].sDeviceClassBuffer.
++ hDevMemContext = psBCInfo->hDevMemContext;
++ psBCInfo->psBuffer[i].sDeviceClassBuffer.hExtDevice =
++ psBCInfo->hExtDevice;
++ }
++ }
++
++ psBCPerContextInfo->psBCInfo = psBCInfo;
++ psBCPerContextInfo->hResItem =
++ ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_BUFFERCLASS_DEVICE,
++ psBCPerContextInfo, 0, CloseBCDeviceCallBack);
++
++ *phDeviceKM = (void *)psBCPerContextInfo;
++
++ return PVRSRV_OK;
++
++ErrorExit:
++
++#if defined(INTEL_D3_P_CHANGES)
++ if (psBCInfo->psBuffer == NULL) {
++ return eError;
++ }
++#endif
++
++ for (i = 0; i < psBCInfo->ui32BufferCount; i++) {
++ if (psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo) {
++ if (--psBCInfo->psBuffer[i].sDeviceClassBuffer.
++ psKernelSyncInfo->ui32RefCount == 0) {
++ PVRSRVFreeSyncInfoKM(psBCInfo->psBuffer[i].
++ sDeviceClassBuffer.
++ psKernelSyncInfo);
++ }
++ }
++ }
++
++ if (psBCInfo->psBuffer) {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_BC_BUFFER),
++ psBCInfo->psBuffer, NULL);
++ psBCInfo->psBuffer = NULL;
++ }
++
++ return eError;
++}
++
++PVRSRV_ERROR PVRSRVGetBCInfoKM(void *hDeviceKM, BUFFER_INFO * psBufferInfo)
++{
++ PVRSRV_BUFFERCLASS_INFO *psBCInfo;
++ PVRSRV_ERROR eError;
++
++ if (!hDeviceKM || !psBufferInfo) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVGetBCInfoKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psBCInfo = BCDeviceHandleToBCInfo(hDeviceKM);
++
++ eError =
++ psBCInfo->psFuncTable->pfnGetBCInfo(psBCInfo->hExtDevice,
++ psBufferInfo);
++
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVGetBCInfoKM : Failed to get BC Info"));
++ return eError;
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVGetBCBufferKM(void *hDeviceKM,
++ u32 ui32BufferIndex, void **phBuffer)
++{
++ PVRSRV_BUFFERCLASS_INFO *psBCInfo;
++
++ if (!hDeviceKM || !phBuffer) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVGetBCBufferKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psBCInfo = BCDeviceHandleToBCInfo(hDeviceKM);
++
++ if (ui32BufferIndex < psBCInfo->ui32BufferCount) {
++ *phBuffer = (void *)&psBCInfo->psBuffer[ui32BufferIndex];
++ } else {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVGetBCBufferKM: Buffer index %d out of range (%d)",
++ ui32BufferIndex, psBCInfo->ui32BufferCount));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ return PVRSRV_OK;
++}
++
++int PVRGetBufferClassJTable(PVRSRV_BC_BUFFER2SRV_KMJTABLE * psJTable)
++{
++ psJTable->ui32TableSize = sizeof(PVRSRV_BC_BUFFER2SRV_KMJTABLE);
++
++ psJTable->pfnPVRSRVRegisterBCDevice = PVRSRVRegisterBCDeviceKM;
++ psJTable->pfnPVRSRVRemoveBCDevice = PVRSRVRemoveBCDeviceKM;
++
++ return 1;
++}
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/common/devicemem.c
+@@ -0,0 +1,1253 @@
++/**********************************************************************
++ *
++ * Copyright (c) 2009-2010 Intel Corporation.
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <stddef.h>
++
++#include "services_headers.h"
++#include "buffer_manager.h"
++#include "pdump_km.h"
++#include "pvr_bridge_km.h"
++
++static PVRSRV_ERROR AllocDeviceMem(void *hDevCookie,
++ void *hDevMemHeap,
++ u32 ui32Flags,
++ u32 ui32Size,
++ u32 ui32Alignment,
++ PVRSRV_KERNEL_MEM_INFO ** ppsMemInfo);
++
++typedef struct _RESMAN_MAP_DEVICE_MEM_DATA_ {
++
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo;
++
++ PVRSRV_KERNEL_MEM_INFO *psSrcMemInfo;
++} RESMAN_MAP_DEVICE_MEM_DATA;
++
++PVRSRV_ERROR PVRSRVGetDeviceMemHeapsKM(void *hDevCookie,
++ PVRSRV_HEAP_INFO * psHeapInfo)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ u32 ui32HeapCount;
++ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++ u32 i;
++
++ if (hDevCookie == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVGetDeviceMemHeapsKM: hDevCookie invalid"));
++ PVR_DBG_BREAK;
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE *) hDevCookie;
++
++ ui32HeapCount = psDeviceNode->sDevMemoryInfo.ui32HeapCount;
++ psDeviceMemoryHeap = psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeap;
++
++ PVR_ASSERT(ui32HeapCount <= PVRSRV_MAX_CLIENT_HEAPS);
++
++ for (i = 0; i < ui32HeapCount; i++) {
++
++ psHeapInfo[i].ui32HeapID = psDeviceMemoryHeap[i].ui32HeapID;
++ psHeapInfo[i].hDevMemHeap = psDeviceMemoryHeap[i].hDevMemHeap;
++ psHeapInfo[i].sDevVAddrBase =
++ psDeviceMemoryHeap[i].sDevVAddrBase;
++ psHeapInfo[i].ui32HeapByteSize =
++ psDeviceMemoryHeap[i].ui32HeapSize;
++ psHeapInfo[i].ui32Attribs = psDeviceMemoryHeap[i].ui32Attribs;
++ }
++
++ for (; i < PVRSRV_MAX_CLIENT_HEAPS; i++) {
++ memset(psHeapInfo + i, 0, sizeof(*psHeapInfo));
++ psHeapInfo[i].ui32HeapID = (u32) PVRSRV_UNDEFINED_HEAP_ID;
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVCreateDeviceMemContextKM(void *hDevCookie,
++ PVRSRV_PER_PROCESS_DATA * psPerProc,
++ void **phDevMemContext,
++ u32 * pui32ClientHeapCount,
++ PVRSRV_HEAP_INFO * psHeapInfo,
++ int *pbCreated, int *pbShared)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ u32 ui32HeapCount, ui32ClientHeapCount = 0;
++ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++ void *hDevMemContext;
++ void *hDevMemHeap;
++ IMG_DEV_PHYADDR sPDDevPAddr;
++ u32 i;
++
++ if (hDevCookie == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVCreateDeviceMemContextKM: hDevCookie invalid"));
++ PVR_DBG_BREAK;
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE *) hDevCookie;
++
++ ui32HeapCount = psDeviceNode->sDevMemoryInfo.ui32HeapCount;
++ psDeviceMemoryHeap = psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeap;
++
++ PVR_ASSERT(ui32HeapCount <= PVRSRV_MAX_CLIENT_HEAPS);
++
++ hDevMemContext = BM_CreateContext(psDeviceNode,
++ &sPDDevPAddr, psPerProc, pbCreated);
++ if (hDevMemContext == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVCreateDeviceMemContextKM: Failed BM_CreateContext"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ for (i = 0; i < ui32HeapCount; i++) {
++ switch (psDeviceMemoryHeap[i].DevMemHeapType) {
++ case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
++ {
++
++ psHeapInfo[ui32ClientHeapCount].ui32HeapID =
++ psDeviceMemoryHeap[i].ui32HeapID;
++ psHeapInfo[ui32ClientHeapCount].hDevMemHeap =
++ psDeviceMemoryHeap[i].hDevMemHeap;
++ psHeapInfo[ui32ClientHeapCount].sDevVAddrBase =
++ psDeviceMemoryHeap[i].sDevVAddrBase;
++ psHeapInfo[ui32ClientHeapCount].
++ ui32HeapByteSize =
++ psDeviceMemoryHeap[i].ui32HeapSize;
++ psHeapInfo[ui32ClientHeapCount].ui32Attribs =
++ psDeviceMemoryHeap[i].ui32Attribs;
++#if defined(PVR_SECURE_HANDLES)
++ pbShared[ui32ClientHeapCount] = 1;
++#endif
++ ui32ClientHeapCount++;
++ break;
++ }
++ case DEVICE_MEMORY_HEAP_PERCONTEXT:
++ {
++ hDevMemHeap = BM_CreateHeap(hDevMemContext,
++ &psDeviceMemoryHeap
++ [i]);
++
++ psHeapInfo[ui32ClientHeapCount].ui32HeapID =
++ psDeviceMemoryHeap[i].ui32HeapID;
++ psHeapInfo[ui32ClientHeapCount].hDevMemHeap =
++ hDevMemHeap;
++ psHeapInfo[ui32ClientHeapCount].sDevVAddrBase =
++ psDeviceMemoryHeap[i].sDevVAddrBase;
++ psHeapInfo[ui32ClientHeapCount].
++ ui32HeapByteSize =
++ psDeviceMemoryHeap[i].ui32HeapSize;
++ psHeapInfo[ui32ClientHeapCount].ui32Attribs =
++ psDeviceMemoryHeap[i].ui32Attribs;
++#if defined(PVR_SECURE_HANDLES)
++ pbShared[ui32ClientHeapCount] = 0;
++#endif
++
++ ui32ClientHeapCount++;
++ break;
++ }
++ }
++ }
++
++ *pui32ClientHeapCount = ui32ClientHeapCount;
++ *phDevMemContext = hDevMemContext;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVDestroyDeviceMemContextKM(void *hDevCookie,
++ void *hDevMemContext,
++ int *pbDestroyed)
++{
++ return BM_DestroyContext(hDevMemContext, pbDestroyed);
++}
++
++PVRSRV_ERROR PVRSRVGetDeviceMemHeapInfoKM(void *hDevCookie,
++ void *hDevMemContext,
++ u32 * pui32ClientHeapCount,
++ PVRSRV_HEAP_INFO * psHeapInfo,
++ int *pbShared)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ u32 ui32HeapCount, ui32ClientHeapCount = 0;
++ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++ void *hDevMemHeap;
++ u32 i;
++
++ if (hDevCookie == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVGetDeviceMemHeapInfoKM: hDevCookie invalid"));
++ PVR_DBG_BREAK;
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE *) hDevCookie;
++
++ ui32HeapCount = psDeviceNode->sDevMemoryInfo.ui32HeapCount;
++ psDeviceMemoryHeap = psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeap;
++
++ PVR_ASSERT(ui32HeapCount <= PVRSRV_MAX_CLIENT_HEAPS);
++
++ for (i = 0; i < ui32HeapCount; i++) {
++ switch (psDeviceMemoryHeap[i].DevMemHeapType) {
++ case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
++ {
++
++ psHeapInfo[ui32ClientHeapCount].ui32HeapID =
++ psDeviceMemoryHeap[i].ui32HeapID;
++ psHeapInfo[ui32ClientHeapCount].hDevMemHeap =
++ psDeviceMemoryHeap[i].hDevMemHeap;
++ psHeapInfo[ui32ClientHeapCount].sDevVAddrBase =
++ psDeviceMemoryHeap[i].sDevVAddrBase;
++ psHeapInfo[ui32ClientHeapCount].
++ ui32HeapByteSize =
++ psDeviceMemoryHeap[i].ui32HeapSize;
++ psHeapInfo[ui32ClientHeapCount].ui32Attribs =
++ psDeviceMemoryHeap[i].ui32Attribs;
++#if defined(PVR_SECURE_HANDLES)
++ pbShared[ui32ClientHeapCount] = 1;
++#endif
++ ui32ClientHeapCount++;
++ break;
++ }
++ case DEVICE_MEMORY_HEAP_PERCONTEXT:
++ {
++ hDevMemHeap = BM_CreateHeap(hDevMemContext,
++ &psDeviceMemoryHeap
++ [i]);
++
++ psHeapInfo[ui32ClientHeapCount].ui32HeapID =
++ psDeviceMemoryHeap[i].ui32HeapID;
++ psHeapInfo[ui32ClientHeapCount].hDevMemHeap =
++ hDevMemHeap;
++ psHeapInfo[ui32ClientHeapCount].sDevVAddrBase =
++ psDeviceMemoryHeap[i].sDevVAddrBase;
++ psHeapInfo[ui32ClientHeapCount].
++ ui32HeapByteSize =
++ psDeviceMemoryHeap[i].ui32HeapSize;
++ psHeapInfo[ui32ClientHeapCount].ui32Attribs =
++ psDeviceMemoryHeap[i].ui32Attribs;
++#if defined(PVR_SECURE_HANDLES)
++ pbShared[ui32ClientHeapCount] = 0;
++#endif
++
++ ui32ClientHeapCount++;
++ break;
++ }
++ }
++ }
++
++ *pui32ClientHeapCount = ui32ClientHeapCount;
++
++ return PVRSRV_OK;
++}
++
++static PVRSRV_ERROR AllocDeviceMem(void *hDevCookie,
++ void *hDevMemHeap,
++ u32 ui32Flags,
++ u32 ui32Size,
++ u32 ui32Alignment,
++ PVRSRV_KERNEL_MEM_INFO ** ppsMemInfo)
++{
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo;
++ BM_HANDLE hBuffer;
++
++ PVRSRV_MEMBLK *psMemBlock;
++ int bBMError;
++
++ *ppsMemInfo = NULL;
++
++#ifdef INTEL_D3_PAD
++ while (ui32Alignment % 64) {
++ ui32Alignment <<= 1;
++ }
++#endif
++
++ if (OSAllocMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof(PVRSRV_KERNEL_MEM_INFO),
++ (void **)&psMemInfo, NULL,
++ "Kernel Memory Info") != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "AllocDeviceMem: Failed to alloc memory for block"));
++ return (PVRSRV_ERROR_OUT_OF_MEMORY);
++ }
++
++ memset(psMemInfo, 0, sizeof(*psMemInfo));
++
++ psMemBlock = &(psMemInfo->sMemBlk);
++
++ psMemInfo->ui32Flags = ui32Flags | PVRSRV_MEM_RAM_BACKED_ALLOCATION;
++
++ bBMError = BM_Alloc(hDevMemHeap,
++ NULL,
++ ui32Size,
++ &psMemInfo->ui32Flags,
++ IMG_CAST_TO_DEVVADDR_UINT(ui32Alignment), &hBuffer);
++
++ if (!bBMError) {
++ PVR_DPF((PVR_DBG_ERROR, "AllocDeviceMem: BM_Alloc Failed"));
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof(PVRSRV_KERNEL_MEM_INFO), psMemInfo, NULL);
++
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ psMemBlock->sDevVirtAddr = BM_HandleToDevVaddr(hBuffer);
++ psMemBlock->hOSMemHandle = BM_HandleToOSMemHandle(hBuffer);
++
++ psMemBlock->hBuffer = (void *)hBuffer;
++
++ psMemInfo->pvLinAddrKM = BM_HandleToCpuVaddr(hBuffer);
++
++ psMemInfo->sDevVAddr = psMemBlock->sDevVirtAddr;
++
++ psMemInfo->ui32AllocSize = ui32Size;
++
++ psMemInfo->pvSysBackupBuffer = NULL;
++
++ *ppsMemInfo = psMemInfo;
++
++ return (PVRSRV_OK);
++}
++
++static PVRSRV_ERROR FreeDeviceMem(PVRSRV_KERNEL_MEM_INFO * psMemInfo)
++{
++ BM_HANDLE hBuffer;
++
++ if (!psMemInfo) {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ hBuffer = psMemInfo->sMemBlk.hBuffer;
++
++ BM_Free(hBuffer, psMemInfo->ui32Flags);
++
++ if (psMemInfo->pvSysBackupBuffer) {
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, psMemInfo->ui32AllocSize,
++ psMemInfo->pvSysBackupBuffer, NULL);
++ psMemInfo->pvSysBackupBuffer = NULL;
++ }
++
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(PVRSRV_KERNEL_MEM_INFO),
++ psMemInfo, NULL);
++
++ return (PVRSRV_OK);
++}
++
++PVRSRV_ERROR PVRSRVAllocSyncInfoKM(void *hDevCookie,
++ void *hDevMemContext,
++ PVRSRV_KERNEL_SYNC_INFO ** ppsKernelSyncInfo)
++{
++ void *hSyncDevMemHeap;
++ DEVICE_MEMORY_INFO *psDevMemoryInfo;
++ BM_CONTEXT *pBMContext;
++ PVRSRV_ERROR eError;
++ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++ PVRSRV_SYNC_DATA *psSyncData;
++
++ eError = OSAllocMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof(PVRSRV_KERNEL_SYNC_INFO),
++ (void **)&psKernelSyncInfo, NULL,
++ "Kernel Synchronization Info");
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVAllocSyncInfoKM: Failed to alloc memory"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ psKernelSyncInfo->ui32RefCount = 0;
++
++ pBMContext = (BM_CONTEXT *) hDevMemContext;
++ psDevMemoryInfo = &pBMContext->psDeviceNode->sDevMemoryInfo;
++
++ hSyncDevMemHeap =
++ psDevMemoryInfo->psDeviceMemoryHeap[psDevMemoryInfo->
++ ui32SyncHeapID].hDevMemHeap;
++
++ eError = AllocDeviceMem(hDevCookie,
++ hSyncDevMemHeap,
++ PVRSRV_MEM_CACHE_CONSISTENT,
++ sizeof(PVRSRV_SYNC_DATA),
++ sizeof(u32),
++ &psKernelSyncInfo->psSyncDataMemInfoKM);
++
++ if (eError != PVRSRV_OK) {
++
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVAllocSyncInfoKM: Failed to alloc memory"));
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof(PVRSRV_KERNEL_SYNC_INFO), psKernelSyncInfo,
++ NULL);
++
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ psKernelSyncInfo->psSyncData =
++ psKernelSyncInfo->psSyncDataMemInfoKM->pvLinAddrKM;
++ psSyncData = psKernelSyncInfo->psSyncData;
++
++ psSyncData->ui32WriteOpsPending = 0;
++ psSyncData->ui32WriteOpsComplete = 0;
++ psSyncData->ui32ReadOpsPending = 0;
++ psSyncData->ui32ReadOpsComplete = 0;
++ psSyncData->ui32LastOpDumpVal = 0;
++ psSyncData->ui32LastReadOpDumpVal = 0;
++
++#if defined(PDUMP)
++ PDUMPMEM(psKernelSyncInfo->psSyncDataMemInfoKM->pvLinAddrKM,
++ psKernelSyncInfo->psSyncDataMemInfoKM,
++ 0,
++ psKernelSyncInfo->psSyncDataMemInfoKM->ui32AllocSize,
++ PDUMP_FLAGS_CONTINUOUS,
++ MAKEUNIQUETAG(psKernelSyncInfo->psSyncDataMemInfoKM));
++#endif
++
++ psKernelSyncInfo->sWriteOpsCompleteDevVAddr.uiAddr =
++ psKernelSyncInfo->psSyncDataMemInfoKM->sDevVAddr.uiAddr +
++ offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete);
++ psKernelSyncInfo->sReadOpsCompleteDevVAddr.uiAddr =
++ psKernelSyncInfo->psSyncDataMemInfoKM->sDevVAddr.uiAddr +
++ offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete);
++
++ psKernelSyncInfo->psSyncDataMemInfoKM->psKernelSyncInfo = NULL;
++
++ psKernelSyncInfo->hResItem = NULL;
++
++ *ppsKernelSyncInfo = psKernelSyncInfo;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVFreeSyncInfoKM(PVRSRV_KERNEL_SYNC_INFO * psKernelSyncInfo)
++{
++ PVRSRV_ERROR eError;
++
++ if (psKernelSyncInfo->ui32RefCount != 0) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "oops: sync info ref count not zero at destruction"));
++
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ eError = FreeDeviceMem(psKernelSyncInfo->psSyncDataMemInfoKM);
++ (void)OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(PVRSRV_KERNEL_SYNC_INFO),
++ psKernelSyncInfo, NULL);
++
++ return eError;
++}
++
++static PVRSRV_ERROR FreeDeviceMemCallBack(void *pvParam, u32 ui32Param)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo = pvParam;
++
++ psMemInfo->ui32RefCount--;
++
++ if (psMemInfo->ui32Flags & PVRSRV_MEM_EXPORTED) {
++ void *hMemInfo = NULL;
++
++ if (psMemInfo->ui32RefCount != 0) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "FreeDeviceMemCallBack: mappings are open in other processes"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ eError = PVRSRVFindHandle(KERNEL_HANDLE_BASE,
++ &hMemInfo,
++ psMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "FreeDeviceMemCallBack: can't find exported meminfo in the global handle list"));
++ return eError;
++ }
++
++ eError = PVRSRVReleaseHandle(KERNEL_HANDLE_BASE,
++ hMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "FreeDeviceMemCallBack: PVRSRVReleaseHandle failed for exported meminfo"));
++ return eError;
++ }
++ }
++
++ PVR_ASSERT(psMemInfo->ui32RefCount == 0);
++
++ if (psMemInfo->psKernelSyncInfo) {
++ psMemInfo->psKernelSyncInfo->ui32RefCount--;
++
++ if (psMemInfo->psKernelSyncInfo->ui32RefCount == 0) {
++ eError =
++ PVRSRVFreeSyncInfoKM(psMemInfo->psKernelSyncInfo);
++ }
++ }
++
++ if (eError == PVRSRV_OK) {
++ eError = FreeDeviceMem(psMemInfo);
++ }
++
++ return eError;
++}
++
++PVRSRV_ERROR PVRSRVFreeDeviceMemKM(void *hDevCookie,
++ PVRSRV_KERNEL_MEM_INFO * psMemInfo)
++{
++ PVRSRV_ERROR eError;
++
++ if (!psMemInfo) {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ if (psMemInfo->sMemBlk.hResItem != NULL) {
++ eError = ResManFreeResByPtr(psMemInfo->sMemBlk.hResItem);
++ } else {
++
++ eError = FreeDeviceMemCallBack(psMemInfo, 0);
++ }
++
++ return eError;
++}
++
++PVRSRV_ERROR _PVRSRVAllocDeviceMemKM(void *hDevCookie,
++ PVRSRV_PER_PROCESS_DATA * psPerProc,
++ void *hDevMemHeap,
++ u32 ui32Flags,
++ u32 ui32Size,
++ u32 ui32Alignment,
++ PVRSRV_KERNEL_MEM_INFO ** ppsMemInfo)
++{
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo;
++ PVRSRV_ERROR eError;
++ BM_HEAP *psBMHeap;
++ void *hDevMemContext;
++
++ if (!hDevMemHeap || (ui32Size == 0)) {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ if (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK) {
++ if (((ui32Size % HOST_PAGESIZE()) != 0) ||
++ ((ui32Alignment % HOST_PAGESIZE()) != 0)) {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++ }
++
++ eError = AllocDeviceMem(hDevCookie,
++ hDevMemHeap,
++ ui32Flags, ui32Size, ui32Alignment, &psMemInfo);
++
++ if (eError != PVRSRV_OK) {
++ return eError;
++ }
++
++ if (ui32Flags & PVRSRV_MEM_NO_SYNCOBJ) {
++ psMemInfo->psKernelSyncInfo = NULL;
++ } else {
++
++ psBMHeap = (BM_HEAP *) hDevMemHeap;
++ hDevMemContext = (void *)psBMHeap->pBMContext;
++ eError = PVRSRVAllocSyncInfoKM(hDevCookie,
++ hDevMemContext,
++ &psMemInfo->psKernelSyncInfo);
++ if (eError != PVRSRV_OK) {
++ goto free_mainalloc;
++ }
++ psMemInfo->psKernelSyncInfo->ui32RefCount++;
++ }
++
++ *ppsMemInfo = psMemInfo;
++
++ if (ui32Flags & PVRSRV_MEM_NO_RESMAN) {
++ psMemInfo->sMemBlk.hResItem = NULL;
++ } else {
++
++ psMemInfo->sMemBlk.hResItem =
++ ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_DEVICEMEM_ALLOCATION,
++ psMemInfo, 0, FreeDeviceMemCallBack);
++ if (psMemInfo->sMemBlk.hResItem == NULL) {
++
++ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto free_mainalloc;
++ }
++ }
++
++ psMemInfo->ui32RefCount++;
++
++ return (PVRSRV_OK);
++
++free_mainalloc:
++ FreeDeviceMem(psMemInfo);
++
++ return eError;
++}
++
++PVRSRV_ERROR PVRSRVDissociateDeviceMemKM(void *hDevCookie,
++ PVRSRV_KERNEL_MEM_INFO * psMemInfo)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_DEVICE_NODE *psDeviceNode = hDevCookie;
++
++ if (!psMemInfo) {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ eError =
++ ResManDissociateRes(psMemInfo->sMemBlk.hResItem,
++ psDeviceNode->hResManContext);
++
++ PVR_ASSERT(eError == PVRSRV_OK);
++
++ return eError;
++}
++
++PVRSRV_ERROR PVRSRVGetFreeDeviceMemKM(u32 ui32Flags,
++ u32 * pui32Total,
++ u32 * pui32Free, u32 * pui32LargestBlock)
++{
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVUnwrapExtMemoryKM(PVRSRV_KERNEL_MEM_INFO * psMemInfo)
++{
++ if (!psMemInfo) {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ return ResManFreeResByPtr(psMemInfo->sMemBlk.hResItem);
++}
++
++static PVRSRV_ERROR UnwrapExtMemoryCallBack(void *pvParam, u32 ui32Param)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo = pvParam;
++ void *hOSWrapMem;
++
++ hOSWrapMem = psMemInfo->sMemBlk.hOSWrapMem;
++
++ if (psMemInfo->psKernelSyncInfo) {
++ psMemInfo->psKernelSyncInfo->ui32RefCount--;
++ if (psMemInfo->psKernelSyncInfo->ui32RefCount == 0) {
++ eError =
++ PVRSRVFreeSyncInfoKM(psMemInfo->psKernelSyncInfo);
++ }
++ }
++
++ if (psMemInfo->sMemBlk.psIntSysPAddr) {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(IMG_SYS_PHYADDR),
++ psMemInfo->sMemBlk.psIntSysPAddr, NULL);
++ psMemInfo->sMemBlk.psIntSysPAddr = NULL;
++ }
++
++ if (eError == PVRSRV_OK) {
++
++ psMemInfo->ui32RefCount--;
++
++ eError = FreeDeviceMem(psMemInfo);
++ }
++
++ if (hOSWrapMem) {
++ OSReleasePhysPageAddr(hOSWrapMem);
++ }
++
++ return eError;
++}
++
++PVRSRV_ERROR PVRSRVWrapExtMemoryKM(void *hDevCookie,
++ PVRSRV_PER_PROCESS_DATA * psPerProc,
++ void *hDevMemContext,
++ u32 ui32ByteSize,
++ u32 ui32PageOffset,
++ int bPhysContig,
++ IMG_SYS_PHYADDR * psExtSysPAddr,
++ void *pvLinAddr,
++ u32 ui32Flags,
++ PVRSRV_KERNEL_MEM_INFO ** ppsMemInfo)
++{
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo = NULL;
++ DEVICE_MEMORY_INFO *psDevMemoryInfo;
++ u32 ui32HostPageSize = HOST_PAGESIZE();
++ void *hDevMemHeap = NULL;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ BM_HANDLE hBuffer;
++ PVRSRV_MEMBLK *psMemBlock;
++ int bBMError;
++ BM_HEAP *psBMHeap;
++ PVRSRV_ERROR eError;
++ void *pvPageAlignedCPUVAddr;
++ IMG_SYS_PHYADDR *psIntSysPAddr = NULL;
++ void *hOSWrapMem = NULL;
++ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++ u32 ui32PageCount = 0;
++ u32 i;
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE *) hDevCookie;
++ PVR_ASSERT(psDeviceNode != NULL);
++
++ if (psDeviceNode == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVWrapExtMemoryKM: invalid parameter"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++#ifdef INTEL_D3_CHANGES
++ if (pvLinAddr && psExtSysPAddr == NULL)
++#else
++ if (pvLinAddr)
++#endif
++ {
++
++ ui32PageOffset = (u32) pvLinAddr & (ui32HostPageSize - 1);
++
++ ui32PageCount =
++ HOST_PAGEALIGN(ui32ByteSize +
++ ui32PageOffset) / ui32HostPageSize;
++ pvPageAlignedCPUVAddr =
++ (void *)((u32) pvLinAddr - ui32PageOffset);
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32PageCount * sizeof(IMG_SYS_PHYADDR),
++ (void **)&psIntSysPAddr, NULL,
++ "Array of Page Addresses") != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVWrapExtMemoryKM: Failed to alloc memory for block"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ eError = OSAcquirePhysPageAddr(pvPageAlignedCPUVAddr,
++ ui32PageCount * ui32HostPageSize,
++ psIntSysPAddr,
++ &hOSWrapMem,
++ (ui32Flags != 0) ? 1 : 0);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVWrapExtMemoryKM: Failed to alloc memory for block"));
++ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto ErrorExitPhase1;
++ }
++
++ psExtSysPAddr = psIntSysPAddr;
++
++ bPhysContig = 0;
++ } else {
++
++ }
++
++ psDevMemoryInfo =
++ &((BM_CONTEXT *) hDevMemContext)->psDeviceNode->sDevMemoryInfo;
++ psDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap;
++ for (i = 0; i < PVRSRV_MAX_CLIENT_HEAPS; i++) {
++ if (HEAP_IDX(psDeviceMemoryHeap[i].ui32HeapID) ==
++ psDevMemoryInfo->ui32MappingHeapID) {
++ if (psDeviceMemoryHeap[i].DevMemHeapType ==
++ DEVICE_MEMORY_HEAP_PERCONTEXT) {
++
++ hDevMemHeap =
++ BM_CreateHeap(hDevMemContext,
++ &psDeviceMemoryHeap[i]);
++ } else {
++ hDevMemHeap =
++ psDevMemoryInfo->psDeviceMemoryHeap[i].
++ hDevMemHeap;
++ }
++ break;
++ }
++ }
++
++ if (hDevMemHeap == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVWrapExtMemoryKM: unable to find mapping heap"));
++ eError = PVRSRV_ERROR_GENERIC;
++ goto ErrorExitPhase2;
++ }
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_KERNEL_MEM_INFO),
++ (void **)&psMemInfo, NULL,
++ "Kernel Memory Info") != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVWrapExtMemoryKM: Failed to alloc memory for block"));
++ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto ErrorExitPhase2;
++ }
++
++ memset(psMemInfo, 0, sizeof(*psMemInfo));
++ psMemInfo->ui32Flags = ui32Flags;
++
++ psMemBlock = &(psMemInfo->sMemBlk);
++
++ bBMError = BM_Wrap(hDevMemHeap,
++ ui32ByteSize,
++ ui32PageOffset,
++ bPhysContig,
++ psExtSysPAddr,
++ NULL, &psMemInfo->ui32Flags, &hBuffer);
++ if (!bBMError) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVWrapExtMemoryKM: BM_Wrap Failed"));
++ eError = PVRSRV_ERROR_BAD_MAPPING;
++ goto ErrorExitPhase3;
++ }
++
++ psMemBlock->sDevVirtAddr = BM_HandleToDevVaddr(hBuffer);
++ psMemBlock->hOSMemHandle = BM_HandleToOSMemHandle(hBuffer);
++ psMemBlock->hOSWrapMem = hOSWrapMem;
++ psMemBlock->psIntSysPAddr = psIntSysPAddr;
++
++ psMemBlock->hBuffer = (void *)hBuffer;
++
++ psMemInfo->pvLinAddrKM = BM_HandleToCpuVaddr(hBuffer);
++ psMemInfo->sDevVAddr = psMemBlock->sDevVirtAddr;
++ psMemInfo->ui32AllocSize = ui32ByteSize;
++
++ psMemInfo->pvSysBackupBuffer = NULL;
++
++ psBMHeap = (BM_HEAP *) hDevMemHeap;
++ hDevMemContext = (void *)psBMHeap->pBMContext;
++ eError = PVRSRVAllocSyncInfoKM(hDevCookie,
++ hDevMemContext,
++ &psMemInfo->psKernelSyncInfo);
++ if (eError != PVRSRV_OK) {
++ goto ErrorExitPhase4;
++ }
++
++ psMemInfo->psKernelSyncInfo->ui32RefCount++;
++
++ psMemInfo->ui32RefCount++;
++
++ psMemInfo->sMemBlk.hResItem =
++ ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_DEVICEMEM_WRAP, psMemInfo, 0,
++ UnwrapExtMemoryCallBack);
++
++ *ppsMemInfo = psMemInfo;
++
++ return PVRSRV_OK;
++
++ErrorExitPhase4:
++ if (psMemInfo) {
++ FreeDeviceMem(psMemInfo);
++
++ psMemInfo = NULL;
++ }
++
++ErrorExitPhase3:
++ if (psMemInfo) {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_KERNEL_MEM_INFO), psMemInfo, NULL);
++
++ }
++
++ErrorExitPhase2:
++ if (psIntSysPAddr) {
++ OSReleasePhysPageAddr(hOSWrapMem);
++ }
++
++ErrorExitPhase1:
++ if (psIntSysPAddr) {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32PageCount * sizeof(IMG_SYS_PHYADDR),
++ psIntSysPAddr, NULL);
++
++ }
++
++ return eError;
++}
++
++PVRSRV_ERROR PVRSRVUnmapDeviceMemoryKM(PVRSRV_KERNEL_MEM_INFO * psMemInfo)
++{
++ if (!psMemInfo) {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ return ResManFreeResByPtr(psMemInfo->sMemBlk.hResItem);
++}
++
++static PVRSRV_ERROR UnmapDeviceMemoryCallBack(void *pvParam, u32 ui32Param)
++{
++ PVRSRV_ERROR eError;
++ RESMAN_MAP_DEVICE_MEM_DATA *psMapData = pvParam;
++
++ if (psMapData->psMemInfo->sMemBlk.psIntSysPAddr) {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(IMG_SYS_PHYADDR),
++ psMapData->psMemInfo->sMemBlk.psIntSysPAddr, NULL);
++ psMapData->psMemInfo->sMemBlk.psIntSysPAddr = NULL;
++ }
++
++ psMapData->psMemInfo->psKernelSyncInfo->ui32RefCount--;
++ if (psMapData->psMemInfo->psKernelSyncInfo->ui32RefCount == 0) {
++ eError =
++ PVRSRVFreeSyncInfoKM(psMapData->psMemInfo->
++ psKernelSyncInfo);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "UnmapDeviceMemoryCallBack: Failed to free sync info"));
++ return eError;
++ }
++ }
++
++ eError = FreeDeviceMem(psMapData->psMemInfo);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "UnmapDeviceMemoryCallBack: Failed to free DST meminfo"));
++ return eError;
++ }
++
++ psMapData->psSrcMemInfo->ui32RefCount--;
++
++ if (psMapData->psSrcMemInfo->ui32RefCount == 1 &&
++ psMapData->psSrcMemInfo->bPendingFree == 1) {
++
++ if (psMapData->psSrcMemInfo->sMemBlk.hResItem != NULL) {
++
++ eError =
++ ResManFreeResByPtr(psMapData->psSrcMemInfo->sMemBlk.
++ hResItem);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "UnmapDeviceMemoryCallBack: Failed to free SRC meminfo"));
++ PVR_DBG_BREAK;
++ }
++ } else {
++
++ eError =
++ FreeDeviceMemCallBack(psMapData->psSrcMemInfo, 0);
++ }
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(RESMAN_MAP_DEVICE_MEM_DATA),
++ psMapData, NULL);
++
++ return eError;
++}
++
++PVRSRV_ERROR PVRSRVMapDeviceMemoryKM(PVRSRV_PER_PROCESS_DATA * psPerProc,
++ PVRSRV_KERNEL_MEM_INFO * psSrcMemInfo,
++ void *hDstDevMemHeap,
++ PVRSRV_KERNEL_MEM_INFO ** ppsDstMemInfo)
++{
++ PVRSRV_ERROR eError;
++ u32 i;
++ u32 ui32PageCount, ui32PageOffset;
++ u32 ui32HostPageSize = HOST_PAGESIZE();
++ IMG_SYS_PHYADDR *psSysPAddr = NULL;
++ IMG_DEV_PHYADDR sDevPAddr;
++ BM_BUF *psBuf;
++ IMG_DEV_VIRTADDR sDevVAddr;
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo = NULL;
++ BM_HANDLE hBuffer;
++ PVRSRV_MEMBLK *psMemBlock;
++ int bBMError;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ void *pvPageAlignedCPUVAddr;
++ RESMAN_MAP_DEVICE_MEM_DATA *psMapData = NULL;
++
++ if (!psSrcMemInfo || !hDstDevMemHeap || !ppsDstMemInfo) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVMapDeviceMemoryKM: invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ *ppsDstMemInfo = NULL;
++
++ ui32PageOffset =
++ psSrcMemInfo->sDevVAddr.uiAddr & (ui32HostPageSize - 1);
++ ui32PageCount =
++ HOST_PAGEALIGN(psSrcMemInfo->ui32AllocSize +
++ ui32PageOffset) / ui32HostPageSize;
++ pvPageAlignedCPUVAddr =
++ (void *)((u32) psSrcMemInfo->pvLinAddrKM - ui32PageOffset);
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32PageCount * sizeof(IMG_SYS_PHYADDR),
++ (void **)&psSysPAddr, NULL,
++ "Array of Page Addresses") != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVMapDeviceMemoryKM: Failed to alloc memory for block"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ psBuf = psSrcMemInfo->sMemBlk.hBuffer;
++
++ psDeviceNode = psBuf->pMapping->pBMHeap->pBMContext->psDeviceNode;
++
++ sDevVAddr.uiAddr =
++ psSrcMemInfo->sDevVAddr.uiAddr -
++ IMG_CAST_TO_DEVVADDR_UINT(ui32PageOffset);
++ for (i = 0; i < ui32PageCount; i++) {
++ BM_GetPhysPageAddr(psSrcMemInfo, sDevVAddr, &sDevPAddr);
++
++ psSysPAddr[i] =
++ SysDevPAddrToSysPAddr(psDeviceNode->sDevId.eDeviceType,
++ sDevPAddr);
++
++ sDevVAddr.uiAddr += IMG_CAST_TO_DEVVADDR_UINT(ui32HostPageSize);
++ }
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(RESMAN_MAP_DEVICE_MEM_DATA),
++ (void **)&psMapData, NULL,
++ "Resource Manager Map Data") != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVMapDeviceMemoryKM: Failed to alloc resman map data"));
++ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto ErrorExit;
++ }
++
++ if (OSAllocMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof(PVRSRV_KERNEL_MEM_INFO),
++ (void **)&psMemInfo, NULL,
++ "Kernel Memory Info") != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVMapDeviceMemoryKM: Failed to alloc memory for block"));
++ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto ErrorExit;
++ }
++
++ memset(psMemInfo, 0, sizeof(*psMemInfo));
++ psMemInfo->ui32Flags = psSrcMemInfo->ui32Flags;
++
++ psMemBlock = &(psMemInfo->sMemBlk);
++
++ bBMError = BM_Wrap(hDstDevMemHeap,
++ psSrcMemInfo->ui32AllocSize,
++ ui32PageOffset,
++ 0,
++ psSysPAddr,
++ pvPageAlignedCPUVAddr,
++ &psMemInfo->ui32Flags, &hBuffer);
++
++ if (!bBMError) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVMapDeviceMemoryKM: BM_Wrap Failed"));
++ eError = PVRSRV_ERROR_BAD_MAPPING;
++ goto ErrorExit;
++ }
++
++ psMemBlock->sDevVirtAddr = BM_HandleToDevVaddr(hBuffer);
++ psMemBlock->hOSMemHandle = BM_HandleToOSMemHandle(hBuffer);
++
++ psMemBlock->hBuffer = (void *)hBuffer;
++
++ psMemBlock->psIntSysPAddr = psSysPAddr;
++
++ psMemInfo->pvLinAddrKM = psSrcMemInfo->pvLinAddrKM;
++
++ psMemInfo->sDevVAddr = psMemBlock->sDevVirtAddr;
++ psMemInfo->ui32AllocSize = psSrcMemInfo->ui32AllocSize;
++ psMemInfo->psKernelSyncInfo = psSrcMemInfo->psKernelSyncInfo;
++
++ psMemInfo->psKernelSyncInfo->ui32RefCount++;
++
++ psMemInfo->pvSysBackupBuffer = NULL;
++
++ psSrcMemInfo->ui32RefCount++;
++
++ psMapData->psMemInfo = psMemInfo;
++ psMapData->psSrcMemInfo = psSrcMemInfo;
++
++ psMemInfo->sMemBlk.hResItem =
++ ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_DEVICEMEM_MAPPING, psMapData, 0,
++ UnmapDeviceMemoryCallBack);
++
++ *ppsDstMemInfo = psMemInfo;
++
++ return PVRSRV_OK;
++
++ErrorExit:
++
++ if (psSysPAddr) {
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(IMG_SYS_PHYADDR),
++ psSysPAddr, NULL);
++
++ }
++
++ if (psMemInfo) {
++
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof(PVRSRV_KERNEL_MEM_INFO), psMemInfo, NULL);
++
++ }
++
++ if (psMapData) {
++
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof(RESMAN_MAP_DEVICE_MEM_DATA), psMapData, NULL);
++
++ }
++
++ return eError;
++}
++
++PVRSRV_ERROR PVRSRVUnmapDeviceClassMemoryKM(PVRSRV_KERNEL_MEM_INFO * psMemInfo)
++{
++ if (!psMemInfo) {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ return ResManFreeResByPtr(psMemInfo->sMemBlk.hResItem);
++}
++
++static PVRSRV_ERROR UnmapDeviceClassMemoryCallBack(void *pvParam, u32 ui32Param)
++{
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo = pvParam;
++
++ return FreeDeviceMem(psMemInfo);
++}
++
++PVRSRV_ERROR PVRSRVMapDeviceClassMemoryKM(PVRSRV_PER_PROCESS_DATA * psPerProc,
++ void *hDevMemContext,
++ void *hDeviceClassBuffer,
++ PVRSRV_KERNEL_MEM_INFO ** ppsMemInfo,
++ void **phOSMapInfo)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo;
++ PVRSRV_DEVICECLASS_BUFFER *psDeviceClassBuffer;
++ IMG_SYS_PHYADDR *psSysPAddr;
++ void *pvCPUVAddr, *pvPageAlignedCPUVAddr;
++ int bPhysContig;
++ BM_CONTEXT *psBMContext;
++ DEVICE_MEMORY_INFO *psDevMemoryInfo;
++ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++ void *hDevMemHeap = NULL;
++ u32 ui32ByteSize;
++ u32 ui32Offset;
++ u32 ui32PageSize = HOST_PAGESIZE();
++ BM_HANDLE hBuffer;
++ PVRSRV_MEMBLK *psMemBlock;
++ int bBMError;
++ u32 i;
++
++ if (!hDeviceClassBuffer || !ppsMemInfo || !phOSMapInfo
++ || !hDevMemContext) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVMapDeviceClassMemoryKM: invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDeviceClassBuffer = (PVRSRV_DEVICECLASS_BUFFER *) hDeviceClassBuffer;
++
++ eError =
++ psDeviceClassBuffer->pfnGetBufferAddr(psDeviceClassBuffer->
++ hExtDevice,
++ psDeviceClassBuffer->
++ hExtBuffer, &psSysPAddr,
++ &ui32ByteSize, &pvCPUVAddr,
++ phOSMapInfo, &bPhysContig);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVMapDeviceClassMemoryKM: unable to get buffer address"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ psBMContext = (BM_CONTEXT *) psDeviceClassBuffer->hDevMemContext;
++ psDevMemoryInfo = &psBMContext->psDeviceNode->sDevMemoryInfo;
++ psDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap;
++ for (i = 0; i < PVRSRV_MAX_CLIENT_HEAPS; i++) {
++ if (HEAP_IDX(psDeviceMemoryHeap[i].ui32HeapID) ==
++ psDevMemoryInfo->ui32MappingHeapID) {
++ if (psDeviceMemoryHeap[i].DevMemHeapType ==
++ DEVICE_MEMORY_HEAP_PERCONTEXT) {
++
++ hDevMemHeap =
++ BM_CreateHeap(hDevMemContext,
++ &psDeviceMemoryHeap[i]);
++ } else {
++ hDevMemHeap =
++ psDevMemoryInfo->psDeviceMemoryHeap[i].
++ hDevMemHeap;
++ }
++ break;
++ }
++ }
++
++ if (hDevMemHeap == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVMapDeviceClassMemoryKM: unable to find mapping heap"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ ui32Offset = ((u32) pvCPUVAddr) & (ui32PageSize - 1);
++ pvPageAlignedCPUVAddr = (void *)((u32) pvCPUVAddr - ui32Offset);
++
++ if (OSAllocMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof(PVRSRV_KERNEL_MEM_INFO),
++ (void **)&psMemInfo, NULL,
++ "Kernel Memory Info") != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVMapDeviceClassMemoryKM: Failed to alloc memory for block"));
++ return (PVRSRV_ERROR_OUT_OF_MEMORY);
++ }
++
++ memset(psMemInfo, 0, sizeof(*psMemInfo));
++
++ psMemBlock = &(psMemInfo->sMemBlk);
++
++ bBMError = BM_Wrap(hDevMemHeap,
++ ui32ByteSize,
++ ui32Offset,
++ bPhysContig,
++ psSysPAddr,
++ pvPageAlignedCPUVAddr,
++ &psMemInfo->ui32Flags, &hBuffer);
++
++ if (!bBMError) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVMapDeviceClassMemoryKM: BM_Wrap Failed"));
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof(PVRSRV_KERNEL_MEM_INFO), psMemInfo, NULL);
++
++ return PVRSRV_ERROR_BAD_MAPPING;
++ }
++
++ psMemBlock->sDevVirtAddr = BM_HandleToDevVaddr(hBuffer);
++ psMemBlock->hOSMemHandle = BM_HandleToOSMemHandle(hBuffer);
++
++ psMemBlock->hBuffer = (void *)hBuffer;
++
++ psMemInfo->pvLinAddrKM = BM_HandleToCpuVaddr(hBuffer);
++
++ psMemInfo->sDevVAddr = psMemBlock->sDevVirtAddr;
++ psMemInfo->ui32AllocSize = ui32ByteSize;
++ psMemInfo->psKernelSyncInfo = psDeviceClassBuffer->psKernelSyncInfo;
++
++ psMemInfo->pvSysBackupBuffer = NULL;
++
++ psMemInfo->sMemBlk.hResItem =
++ ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_DEVICECLASSMEM_MAPPING, psMemInfo, 0,
++ UnmapDeviceClassMemoryCallBack);
++
++ *ppsMemInfo = psMemInfo;
++
++ return PVRSRV_OK;
++}
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/common/handle.c
+@@ -0,0 +1,1521 @@
++/**********************************************************************
++ *
++ * Copyright (c) 2010 Intel Corporation.
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifdef PVR_SECURE_HANDLES
++#include <stddef.h>
++
++#include "services_headers.h"
++#include "handle.h"
++
++#ifdef DEBUG
++#define HANDLE_BLOCK_SIZE 1
++#else
++#define HANDLE_BLOCK_SIZE 256
++#endif
++
++#define HANDLE_HASH_TAB_INIT_SIZE 32
++
++#define DEFAULT_MAX_INDEX_PLUS_ONE 0xfffffffful
++#define DEFAULT_MAX_HANDLE DEFAULT_MAX_INDEX_PLUS_ONE
++
++#define INDEX_IS_VALID(psBase, i) ((i) < (psBase)->ui32TotalHandCount)
++
++#define INDEX_TO_HANDLE(psBase, idx) ((void *)((idx) + 1))
++#define HANDLE_TO_INDEX(psBase, hand) ((u32)(hand) - 1)
++
++#define INDEX_TO_HANDLE_PTR(psBase, i) (((psBase)->psHandleArray) + (i))
++#define HANDLE_TO_HANDLE_PTR(psBase, h) (INDEX_TO_HANDLE_PTR(psBase, HANDLE_TO_INDEX(psBase, h)))
++
++#define HANDLE_PTR_TO_INDEX(psBase, psHandle) (u32)((psHandle) - ((psBase)->psHandleArray))
++#define HANDLE_PTR_TO_HANDLE(psBase, psHandle) \
++ INDEX_TO_HANDLE(psBase, HANDLE_PTR_TO_INDEX(psBase, psHandle))
++
++#define ROUND_UP_TO_MULTIPLE(a, b) ((((a) + (b) - 1) / (b)) * (b))
++
++#define HANDLES_BATCHED(psBase) ((psBase)->ui32HandBatchSize != 0)
++
++#define SET_FLAG(v, f) ((void)((v) |= (f)))
++#define CLEAR_FLAG(v, f) ((void)((v) &= ~(f)))
++#define TEST_FLAG(v, f) ((int)(((v) & (f)) != 0))
++
++#define TEST_ALLOC_FLAG(psHandle, f) TEST_FLAG((psHandle)->eFlag, f)
++
++#define SET_INTERNAL_FLAG(psHandle, f) SET_FLAG((psHandle)->eInternalFlag, f)
++#define CLEAR_INTERNAL_FLAG(psHandle, f) CLEAR_FLAG((psHandle)->eInternalFlag, f)
++#define TEST_INTERNAL_FLAG(psHandle, f) TEST_FLAG((psHandle)->eInternalFlag, f)
++
++#define BATCHED_HANDLE(psHandle) TEST_INTERNAL_FLAG(psHandle, INTERNAL_HANDLE_FLAG_BATCHED)
++
++#define SET_BATCHED_HANDLE(psHandle) SET_INTERNAL_FLAG(psHandle, INTERNAL_HANDLE_FLAG_BATCHED)
++
++#define SET_UNBATCHED_HANDLE(psHandle) CLEAR_INTERNAL_FLAG(psHandle, INTERNAL_HANDLE_FLAG_BATCHED)
++
++#define BATCHED_HANDLE_PARTIALLY_FREE(psHandle) TEST_INTERNAL_FLAG(psHandle, INTERNAL_HANDLE_FLAG_BATCHED_PARTIALLY_FREE)
++
++#define SET_BATCHED_HANDLE_PARTIALLY_FREE(psHandle) SET_INTERNAL_FLAG(psHandle, INTERNAL_HANDLE_FLAG_BATCHED_PARTIALLY_FREE)
++
++#define HANDLE_STRUCT_IS_FREE(psHandle) ((psHandle)->eType == PVRSRV_HANDLE_TYPE_NONE && (psHandle)->eInternalFlag == INTERNAL_HANDLE_FLAG_NONE)
++
++#ifdef MIN
++#undef MIN
++#endif
++
++#define MIN(x, y) (((x) < (y)) ? (x) : (y))
++
++struct sHandleList {
++ u32 ui32Prev;
++ u32 ui32Next;
++ void *hParent;
++};
++
++enum ePVRSRVInternalHandleFlag {
++ INTERNAL_HANDLE_FLAG_NONE = 0x00,
++ INTERNAL_HANDLE_FLAG_BATCHED = 0x01,
++ INTERNAL_HANDLE_FLAG_BATCHED_PARTIALLY_FREE = 0x02,
++};
++
++struct sHandle {
++
++ PVRSRV_HANDLE_TYPE eType;
++
++ void *pvData;
++
++ u32 ui32NextIndexPlusOne;
++
++ enum ePVRSRVInternalHandleFlag eInternalFlag;
++
++ PVRSRV_HANDLE_ALLOC_FLAG eFlag;
++
++ u32 ui32Index;
++
++ struct sHandleList sChildren;
++
++ struct sHandleList sSiblings;
++};
++
++struct _PVRSRV_HANDLE_BASE_ {
++
++ void *hBaseBlockAlloc;
++
++ void *hHandBlockAlloc;
++
++ struct sHandle *psHandleArray;
++
++ HASH_TABLE *psHashTab;
++
++ u32 ui32FreeHandCount;
++
++ u32 ui32FirstFreeIndex;
++
++ u32 ui32MaxIndexPlusOne;
++
++ u32 ui32TotalHandCount;
++
++ u32 ui32LastFreeIndexPlusOne;
++
++ u32 ui32HandBatchSize;
++
++ u32 ui32TotalHandCountPreBatch;
++
++ u32 ui32FirstBatchIndexPlusOne;
++
++ u32 ui32BatchHandAllocFailures;
++
++ int bPurgingEnabled;
++};
++
++enum eHandKey {
++ HAND_KEY_DATA = 0,
++ HAND_KEY_TYPE,
++ HAND_KEY_PARENT,
++ HAND_KEY_LEN
++};
++
++PVRSRV_HANDLE_BASE *gpsKernelHandleBase = NULL;
++
++typedef u32 HAND_KEY[HAND_KEY_LEN];
++
++static
++void HandleListInit(u32 ui32Index, struct sHandleList *psList, void *hParent)
++{
++ psList->ui32Next = ui32Index;
++ psList->ui32Prev = ui32Index;
++ psList->hParent = hParent;
++}
++
++static
++void InitParentList(PVRSRV_HANDLE_BASE * psBase, struct sHandle *psHandle)
++{
++ u32 ui32Parent = HANDLE_PTR_TO_INDEX(psBase, psHandle);
++
++ HandleListInit(ui32Parent, &psHandle->sChildren,
++ INDEX_TO_HANDLE(psBase, ui32Parent));
++}
++
++static
++void InitChildEntry(PVRSRV_HANDLE_BASE * psBase, struct sHandle *psHandle)
++{
++ HandleListInit(HANDLE_PTR_TO_INDEX(psBase, psHandle),
++ &psHandle->sSiblings, NULL);
++}
++
++static
++int HandleListIsEmpty(u32 ui32Index, struct sHandleList *psList)
++{
++ int bIsEmpty;
++
++ bIsEmpty = (int)(psList->ui32Next == ui32Index);
++
++#ifdef DEBUG
++ {
++ int bIsEmpty2;
++
++ bIsEmpty2 = (int)(psList->ui32Prev == ui32Index);
++ PVR_ASSERT(bIsEmpty == bIsEmpty2);
++ }
++#endif
++
++ return bIsEmpty;
++}
++
++#ifdef DEBUG
++static
++int NoChildren(PVRSRV_HANDLE_BASE * psBase, struct sHandle *psHandle)
++{
++ PVR_ASSERT(psHandle->sChildren.hParent ==
++ HANDLE_PTR_TO_HANDLE(psBase, psHandle));
++
++ return HandleListIsEmpty(HANDLE_PTR_TO_INDEX(psBase, psHandle),
++ &psHandle->sChildren);
++}
++
++static
++int NoParent(PVRSRV_HANDLE_BASE * psBase, struct sHandle *psHandle)
++{
++ if (HandleListIsEmpty
++ (HANDLE_PTR_TO_INDEX(psBase, psHandle), &psHandle->sSiblings)) {
++ PVR_ASSERT(psHandle->sSiblings.hParent == NULL);
++
++ return 1;
++ } else {
++ PVR_ASSERT(psHandle->sSiblings.hParent != NULL);
++ }
++ return 0;
++}
++#endif
++
++static
++void *ParentHandle(struct sHandle *psHandle)
++{
++ return psHandle->sSiblings.hParent;
++}
++
++#define LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, i, p, po, eo) \
++ ((struct sHandleList *)((char *)(INDEX_TO_HANDLE_PTR(psBase, i)) + (((i) == (p)) ? (po) : (eo))))
++
++static
++void HandleListInsertBefore(PVRSRV_HANDLE_BASE * psBase, u32 ui32InsIndex,
++ struct sHandleList *psIns, u32 uiParentOffset,
++ u32 ui32EntryIndex, struct sHandleList *psEntry,
++ u32 uiEntryOffset, u32 ui32ParentIndex)
++{
++
++ struct sHandleList *psPrevIns =
++ LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, psIns->ui32Prev,
++ ui32ParentIndex, uiParentOffset,
++ uiEntryOffset);
++
++ PVR_ASSERT(psEntry->hParent == NULL);
++ PVR_ASSERT(ui32InsIndex == psPrevIns->ui32Next);
++ PVR_ASSERT(LIST_PTR_FROM_INDEX_AND_OFFSET
++ (psBase, ui32ParentIndex, ui32ParentIndex, uiParentOffset,
++ uiParentOffset)->hParent == INDEX_TO_HANDLE(psBase,
++ ui32ParentIndex));
++
++ psEntry->ui32Prev = psIns->ui32Prev;
++ psIns->ui32Prev = ui32EntryIndex;
++ psEntry->ui32Next = ui32InsIndex;
++ psPrevIns->ui32Next = ui32EntryIndex;
++
++ psEntry->hParent = INDEX_TO_HANDLE(psBase, ui32ParentIndex);
++}
++
++static
++void AdoptChild(PVRSRV_HANDLE_BASE * psBase, struct sHandle *psParent,
++ struct sHandle *psChild)
++{
++ u32 ui32Parent = HANDLE_TO_INDEX(psBase, psParent->sChildren.hParent);
++
++ PVR_ASSERT(ui32Parent == HANDLE_PTR_TO_INDEX(psBase, psParent));
++
++ HandleListInsertBefore(psBase, ui32Parent, &psParent->sChildren,
++ offsetof(struct sHandle, sChildren),
++ HANDLE_PTR_TO_INDEX(psBase, psChild),
++ &psChild->sSiblings, offsetof(struct sHandle,
++ sSiblings),
++ ui32Parent);
++
++}
++
++static
++void HandleListRemove(PVRSRV_HANDLE_BASE * psBase, u32 ui32EntryIndex,
++ struct sHandleList *psEntry, u32 uiEntryOffset,
++ u32 uiParentOffset)
++{
++ if (!HandleListIsEmpty(ui32EntryIndex, psEntry)) {
++
++ struct sHandleList *psPrev =
++ LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, psEntry->ui32Prev,
++ HANDLE_TO_INDEX(psBase,
++ psEntry->
++ hParent),
++ uiParentOffset,
++ uiEntryOffset);
++ struct sHandleList *psNext =
++ LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, psEntry->ui32Next,
++ HANDLE_TO_INDEX(psBase,
++ psEntry->
++ hParent),
++ uiParentOffset,
++ uiEntryOffset);
++
++ PVR_ASSERT(psEntry->hParent != NULL);
++
++ psPrev->ui32Next = psEntry->ui32Next;
++ psNext->ui32Prev = psEntry->ui32Prev;
++
++ HandleListInit(ui32EntryIndex, psEntry, NULL);
++ }
++}
++
++static
++void UnlinkFromParent(PVRSRV_HANDLE_BASE * psBase, struct sHandle *psHandle)
++{
++ HandleListRemove(psBase, HANDLE_PTR_TO_INDEX(psBase, psHandle),
++ &psHandle->sSiblings, offsetof(struct sHandle,
++ sSiblings),
++ offsetof(struct sHandle, sChildren));
++}
++
++static
++PVRSRV_ERROR HandleListIterate(PVRSRV_HANDLE_BASE * psBase,
++ struct sHandleList *psHead, u32 uiParentOffset,
++ u32 uiEntryOffset,
++ PVRSRV_ERROR(*pfnIterFunc) (PVRSRV_HANDLE_BASE *,
++ struct sHandle *))
++{
++ u32 ui32Index;
++ u32 ui32Parent = HANDLE_TO_INDEX(psBase, psHead->hParent);
++
++ PVR_ASSERT(psHead->hParent != NULL);
++
++ for (ui32Index = psHead->ui32Next; ui32Index != ui32Parent;) {
++ struct sHandle *psHandle =
++ INDEX_TO_HANDLE_PTR(psBase, ui32Index);
++
++ struct sHandleList *psEntry =
++ LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, ui32Index,
++ ui32Parent, uiParentOffset,
++ uiEntryOffset);
++ PVRSRV_ERROR eError;
++
++ PVR_ASSERT(psEntry->hParent == psHead->hParent);
++
++ ui32Index = psEntry->ui32Next;
++
++ eError = (*pfnIterFunc) (psBase, psHandle);
++ if (eError != PVRSRV_OK) {
++ return eError;
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
++static
++PVRSRV_ERROR IterateOverChildren(PVRSRV_HANDLE_BASE * psBase,
++ struct sHandle *psParent,
++ PVRSRV_ERROR(*pfnIterFunc) (PVRSRV_HANDLE_BASE
++ *,
++ struct sHandle *))
++{
++ return HandleListIterate(psBase, &psParent->sChildren,
++ offsetof(struct sHandle, sChildren),
++ offsetof(struct sHandle, sSiblings),
++ pfnIterFunc);
++}
++
++static
++PVRSRV_ERROR GetHandleStructure(PVRSRV_HANDLE_BASE * psBase,
++ struct sHandle **ppsHandle, void *hHandle,
++ PVRSRV_HANDLE_TYPE eType)
++{
++ u32 ui32Index = HANDLE_TO_INDEX(psBase, hHandle);
++ struct sHandle *psHandle;
++
++ if (!INDEX_IS_VALID(psBase, ui32Index)) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "GetHandleStructure: Handle index out of range (%u >= %u)",
++ ui32Index, psBase->ui32TotalHandCount));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ psHandle = INDEX_TO_HANDLE_PTR(psBase, ui32Index);
++ if (psHandle->eType == PVRSRV_HANDLE_TYPE_NONE) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "GetHandleStructure: Handle not allocated (index: %u)",
++ ui32Index));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if (eType != PVRSRV_HANDLE_TYPE_NONE && eType != psHandle->eType) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "GetHandleStructure: Handle type mismatch (%d != %d)",
++ eType, psHandle->eType));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ *ppsHandle = psHandle;
++
++ return PVRSRV_OK;
++}
++
++static
++void *ParentIfPrivate(struct sHandle *psHandle)
++{
++ return TEST_ALLOC_FLAG(psHandle, PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE) ?
++ ParentHandle(psHandle) : NULL;
++}
++
++static
++void InitKey(HAND_KEY aKey, PVRSRV_HANDLE_BASE * psBase, void *pvData,
++ PVRSRV_HANDLE_TYPE eType, void *hParent)
++{
++ aKey[HAND_KEY_DATA] = (u32) pvData;
++ aKey[HAND_KEY_TYPE] = (u32) eType;
++ aKey[HAND_KEY_PARENT] = (u32) hParent;
++}
++
++static PVRSRV_ERROR FreeHandleArray(PVRSRV_HANDLE_BASE * psBase)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if (psBase->psHandleArray != NULL) {
++ eError = OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ psBase->ui32TotalHandCount *
++ sizeof(struct sHandle),
++ psBase->psHandleArray,
++ psBase->hHandBlockAlloc);
++
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "FreeHandleArray: Error freeing memory (%d)",
++ eError));
++ } else {
++ psBase->psHandleArray = NULL;
++ }
++ }
++
++ return eError;
++}
++
++static PVRSRV_ERROR FreeHandle(PVRSRV_HANDLE_BASE * psBase,
++ struct sHandle *psHandle)
++{
++ HAND_KEY aKey;
++ u32 ui32Index = HANDLE_PTR_TO_INDEX(psBase, psHandle);
++ PVRSRV_ERROR eError;
++
++ InitKey(aKey, psBase, psHandle->pvData, psHandle->eType,
++ ParentIfPrivate(psHandle));
++
++ if (!TEST_ALLOC_FLAG(psHandle, PVRSRV_HANDLE_ALLOC_FLAG_MULTI)
++ && !BATCHED_HANDLE_PARTIALLY_FREE(psHandle)) {
++ void *hHandle;
++ hHandle = (void *)HASH_Remove_Extended(psBase->psHashTab, aKey);
++
++ PVR_ASSERT(hHandle != NULL);
++ PVR_ASSERT(hHandle == INDEX_TO_HANDLE(psBase, ui32Index));
++ }
++
++ UnlinkFromParent(psBase, psHandle);
++
++ eError = IterateOverChildren(psBase, psHandle, FreeHandle);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "FreeHandle: Error whilst freeing subhandles (%d)",
++ eError));
++ return eError;
++ }
++
++ psHandle->eType = PVRSRV_HANDLE_TYPE_NONE;
++
++ if (BATCHED_HANDLE(psHandle)
++ && !BATCHED_HANDLE_PARTIALLY_FREE(psHandle)) {
++
++ SET_BATCHED_HANDLE_PARTIALLY_FREE(psHandle);
++
++ return PVRSRV_OK;
++ }
++
++ if (!psBase->bPurgingEnabled) {
++ if (psBase->ui32FreeHandCount == 0) {
++ PVR_ASSERT(psBase->ui32FirstFreeIndex == 0);
++ PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne == 0);
++
++ psBase->ui32FirstFreeIndex = ui32Index;
++ } else {
++
++ PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne != 0);
++ PVR_ASSERT(INDEX_TO_HANDLE_PTR
++ (psBase,
++ psBase->ui32LastFreeIndexPlusOne -
++ 1)->ui32NextIndexPlusOne == 0);
++ INDEX_TO_HANDLE_PTR(psBase,
++ psBase->ui32LastFreeIndexPlusOne -
++ 1)->ui32NextIndexPlusOne =
++ ui32Index + 1;
++ }
++
++ PVR_ASSERT(psHandle->ui32NextIndexPlusOne == 0);
++
++ psBase->ui32LastFreeIndexPlusOne = ui32Index + 1;
++ }
++
++ psBase->ui32FreeHandCount++;
++
++ return PVRSRV_OK;
++}
++
++static PVRSRV_ERROR FreeAllHandles(PVRSRV_HANDLE_BASE * psBase)
++{
++ u32 i;
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if (psBase->ui32FreeHandCount == psBase->ui32TotalHandCount) {
++ return eError;
++ }
++
++ for (i = 0; i < psBase->ui32TotalHandCount; i++) {
++ struct sHandle *psHandle;
++
++ psHandle = INDEX_TO_HANDLE_PTR(psBase, i);
++
++ if (psHandle->eType != PVRSRV_HANDLE_TYPE_NONE) {
++ eError = FreeHandle(psBase, psHandle);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "FreeAllHandles: FreeHandle failed (%d)",
++ eError));
++ break;
++ }
++
++ if (psBase->ui32FreeHandCount ==
++ psBase->ui32TotalHandCount) {
++ break;
++ }
++ }
++ }
++
++ return eError;
++}
++
++static PVRSRV_ERROR FreeHandleBase(PVRSRV_HANDLE_BASE * psBase)
++{
++ PVRSRV_ERROR eError;
++
++ if (HANDLES_BATCHED(psBase)) {
++ PVR_DPF((PVR_DBG_WARNING,
++ "FreeHandleBase: Uncommitted/Unreleased handle batch"));
++ PVRSRVReleaseHandleBatch(psBase);
++ }
++
++ eError = FreeAllHandles(psBase);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "FreeHandleBase: Couldn't free handles (%d)", eError));
++ return eError;
++ }
++
++ eError = FreeHandleArray(psBase);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "FreeHandleBase: Couldn't free handle array (%d)",
++ eError));
++ return eError;
++ }
++
++ if (psBase->psHashTab != NULL) {
++
++ HASH_Delete(psBase->psHashTab);
++ }
++
++ eError = OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(*psBase), psBase, psBase->hBaseBlockAlloc);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "FreeHandleBase: Couldn't free handle base (%d)",
++ eError));
++ return eError;
++ }
++
++ return PVRSRV_OK;
++}
++
++static
++void *FindHandle(PVRSRV_HANDLE_BASE * psBase, void *pvData,
++ PVRSRV_HANDLE_TYPE eType, void *hParent)
++{
++ HAND_KEY aKey;
++
++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++ InitKey(aKey, psBase, pvData, eType, hParent);
++
++ return (void *)HASH_Retrieve_Extended(psBase->psHashTab, aKey);
++}
++
++static PVRSRV_ERROR ReallocMem(void **ppvMem, void **phBlockAlloc,
++ u32 ui32NewSize, u32 ui32OldSize)
++{
++ void *pvOldMem = *ppvMem;
++ void *hOldBlockAlloc = *phBlockAlloc;
++ u32 ui32CopySize = MIN(ui32NewSize, ui32OldSize);
++ void *pvNewMem = NULL;
++ void *hNewBlockAlloc = NULL;
++ PVRSRV_ERROR eError;
++
++ if (ui32NewSize == ui32OldSize) {
++ return (PVRSRV_OK);
++ }
++
++ if (ui32NewSize != 0) {
++
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32NewSize,
++ &pvNewMem, &hNewBlockAlloc, "Memory Area");
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "ReallocMem: Couldn't allocate new memory area (%d)",
++ eError));
++ return eError;
++ }
++ }
++
++ if (ui32CopySize != 0) {
++
++ memcpy(pvNewMem, pvOldMem, ui32CopySize);
++ }
++
++ if (ui32OldSize != 0) {
++
++ eError = OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32OldSize, pvOldMem, hOldBlockAlloc);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "ReallocMem: Couldn't free old memory area (%d)",
++ eError));
++ }
++ }
++
++ *ppvMem = pvNewMem;
++ *phBlockAlloc = hNewBlockAlloc;
++
++ return PVRSRV_OK;
++}
++
++static
++PVRSRV_ERROR ReallocHandleArray(PVRSRV_HANDLE_BASE * psBase, u32 ui32NewCount,
++ u32 ui32OldCount)
++{
++ return ReallocMem((void **)&psBase->psHandleArray,
++ &psBase->hHandBlockAlloc,
++ ui32NewCount * sizeof(struct sHandle),
++ ui32OldCount * sizeof(struct sHandle));
++}
++
++static PVRSRV_ERROR IncreaseHandleArraySize(PVRSRV_HANDLE_BASE * psBase,
++ u32 ui32Delta)
++{
++ PVRSRV_ERROR eError;
++ struct sHandle *psHandle;
++ u32 ui32DeltaAdjusted =
++ ROUND_UP_TO_MULTIPLE(ui32Delta, HANDLE_BLOCK_SIZE);
++ u32 ui32NewTotalHandCount =
++ psBase->ui32TotalHandCount + ui32DeltaAdjusted;
++ ;
++
++ PVR_ASSERT(ui32Delta != 0);
++
++ if (ui32NewTotalHandCount > psBase->ui32MaxIndexPlusOne
++ || ui32NewTotalHandCount <= psBase->ui32TotalHandCount) {
++ ui32NewTotalHandCount = psBase->ui32MaxIndexPlusOne;
++
++ ui32DeltaAdjusted =
++ ui32NewTotalHandCount - psBase->ui32TotalHandCount;
++
++ if (ui32DeltaAdjusted < ui32Delta) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "IncreaseHandleArraySize: Maximum handle limit reached (%d)",
++ psBase->ui32MaxIndexPlusOne));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ }
++
++ PVR_ASSERT(ui32DeltaAdjusted >= ui32Delta);
++
++ eError =
++ ReallocHandleArray(psBase, ui32NewTotalHandCount,
++ psBase->ui32TotalHandCount);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "IncreaseHandleArraySize: ReallocHandleArray failed (%d)",
++ eError));
++ return eError;
++ }
++
++ for (psHandle = psBase->psHandleArray + psBase->ui32TotalHandCount;
++ psHandle < psBase->psHandleArray + ui32NewTotalHandCount;
++ psHandle++) {
++ psHandle->eType = PVRSRV_HANDLE_TYPE_NONE;
++ psHandle->eInternalFlag = INTERNAL_HANDLE_FLAG_NONE;
++ psHandle->ui32NextIndexPlusOne = 0;
++ }
++
++ psBase->ui32FreeHandCount += ui32DeltaAdjusted;
++
++ if (psBase->ui32FirstFreeIndex == 0) {
++ PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne == 0);
++
++ psBase->ui32FirstFreeIndex = psBase->ui32TotalHandCount;
++ } else {
++ if (!psBase->bPurgingEnabled) {
++ PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne != 0)
++ PVR_ASSERT(INDEX_TO_HANDLE_PTR
++ (psBase,
++ psBase->ui32LastFreeIndexPlusOne -
++ 1)->ui32NextIndexPlusOne == 0);
++
++ INDEX_TO_HANDLE_PTR(psBase,
++ psBase->ui32LastFreeIndexPlusOne -
++ 1)->ui32NextIndexPlusOne =
++ psBase->ui32TotalHandCount + 1;
++ }
++ }
++
++ if (!psBase->bPurgingEnabled) {
++ psBase->ui32LastFreeIndexPlusOne = ui32NewTotalHandCount;
++ }
++
++ psBase->ui32TotalHandCount = ui32NewTotalHandCount;
++
++ return PVRSRV_OK;
++}
++
++static PVRSRV_ERROR EnsureFreeHandles(PVRSRV_HANDLE_BASE * psBase, u32 ui32Free)
++{
++ PVRSRV_ERROR eError;
++
++ if (ui32Free > psBase->ui32FreeHandCount) {
++ u32 ui32FreeHandDelta = ui32Free - psBase->ui32FreeHandCount;
++ eError = IncreaseHandleArraySize(psBase, ui32FreeHandDelta);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "EnsureFreeHandles: Couldn't allocate %u handles to ensure %u free handles (IncreaseHandleArraySize failed with error %d)",
++ ui32FreeHandDelta, ui32Free, eError));
++
++ return eError;
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
++static PVRSRV_ERROR AllocHandle(PVRSRV_HANDLE_BASE * psBase, void **phHandle,
++ void *pvData, PVRSRV_HANDLE_TYPE eType,
++ PVRSRV_HANDLE_ALLOC_FLAG eFlag, void *hParent)
++{
++ u32 ui32NewIndex;
++ struct sHandle *psNewHandle = NULL;
++ void *hHandle;
++ HAND_KEY aKey;
++ PVRSRV_ERROR eError;
++
++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++ PVR_ASSERT(psBase->psHashTab != NULL);
++
++ if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI)) {
++
++ PVR_ASSERT(FindHandle(psBase, pvData, eType, hParent) == NULL);
++ }
++
++ if (psBase->ui32FreeHandCount == 0 && HANDLES_BATCHED(psBase)) {
++ PVR_DPF((PVR_DBG_WARNING,
++ "AllocHandle: Handle batch size (%u) was too small, allocating additional space",
++ psBase->ui32HandBatchSize));
++ }
++
++ eError = EnsureFreeHandles(psBase, 1);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "AllocHandle: EnsureFreeHandles failed (%d)", eError));
++ return eError;
++ }
++ PVR_ASSERT(psBase->ui32FreeHandCount != 0)
++
++ if (!psBase->bPurgingEnabled) {
++
++ ui32NewIndex = psBase->ui32FirstFreeIndex;
++
++ psNewHandle = INDEX_TO_HANDLE_PTR(psBase, ui32NewIndex);
++ } else {
++
++ for (ui32NewIndex = psBase->ui32FirstFreeIndex;
++ ui32NewIndex < psBase->ui32TotalHandCount;
++ ui32NewIndex++) {
++ psNewHandle = INDEX_TO_HANDLE_PTR(psBase, ui32NewIndex);
++ if (HANDLE_STRUCT_IS_FREE(psNewHandle)) {
++ break;
++ }
++
++ }
++ psBase->ui32FirstFreeIndex = 0;
++ PVR_ASSERT(ui32NewIndex < psBase->ui32TotalHandCount);
++ }
++ PVR_ASSERT(psNewHandle != NULL);
++#ifdef INTEL_D3_P_CHANGES
++ if (NULL == psNewHandle) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "AllocHandle: New handle pointer is NULL"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++#endif
++
++ hHandle = INDEX_TO_HANDLE(psBase, ui32NewIndex);
++
++ if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI)) {
++
++ InitKey(aKey, psBase, pvData, eType, hParent);
++
++ if (!HASH_Insert_Extended
++ (psBase->psHashTab, aKey, (u32) hHandle)) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "AllocHandle: Couldn't add handle to hash table"));
++
++ return PVRSRV_ERROR_GENERIC;
++ }
++ }
++
++ psBase->ui32FreeHandCount--;
++
++ if (!psBase->bPurgingEnabled) {
++
++ if (psBase->ui32FreeHandCount == 0) {
++ PVR_ASSERT(psBase->ui32FirstFreeIndex == ui32NewIndex);
++ PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne ==
++ (ui32NewIndex + 1));
++
++ psBase->ui32LastFreeIndexPlusOne = 0;
++ psBase->ui32FirstFreeIndex = 0;
++ } else {
++
++ psBase->ui32FirstFreeIndex =
++ (psNewHandle->ui32NextIndexPlusOne ==
++ 0) ? ui32NewIndex +
++ 1 : psNewHandle->ui32NextIndexPlusOne - 1;
++ }
++ }
++
++ psNewHandle->eType = eType;
++ psNewHandle->pvData = pvData;
++ psNewHandle->eInternalFlag = INTERNAL_HANDLE_FLAG_NONE;
++ psNewHandle->eFlag = eFlag;
++ psNewHandle->ui32Index = ui32NewIndex;
++
++ InitParentList(psBase, psNewHandle);
++#if defined(DEBUG)
++ PVR_ASSERT(NoChildren(psBase, psNewHandle));
++#endif
++
++ InitChildEntry(psBase, psNewHandle);
++#if defined(DEBUG)
++ PVR_ASSERT(NoParent(psBase, psNewHandle));
++#endif
++
++ if (HANDLES_BATCHED(psBase)) {
++
++ psNewHandle->ui32NextIndexPlusOne =
++ psBase->ui32FirstBatchIndexPlusOne;
++
++ psBase->ui32FirstBatchIndexPlusOne = ui32NewIndex + 1;
++
++ SET_BATCHED_HANDLE(psNewHandle);
++ } else {
++ psNewHandle->ui32NextIndexPlusOne = 0;
++ }
++
++ *phHandle = hHandle;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE * psBase, void **phHandle,
++ void *pvData, PVRSRV_HANDLE_TYPE eType,
++ PVRSRV_HANDLE_ALLOC_FLAG eFlag)
++{
++ void *hHandle;
++ PVRSRV_ERROR eError;
++
++ *phHandle = NULL;
++
++ if (HANDLES_BATCHED(psBase)) {
++
++ psBase->ui32BatchHandAllocFailures++;
++ }
++
++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++ if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI)) {
++
++ hHandle = FindHandle(psBase, pvData, eType, NULL);
++ if (hHandle != NULL) {
++ struct sHandle *psHandle;
++
++ eError =
++ GetHandleStructure(psBase, &psHandle, hHandle,
++ eType);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVAllocHandle: Lookup of existing handle failed"));
++ return eError;
++ }
++
++ if (TEST_FLAG
++ (psHandle->eFlag & eFlag,
++ PVRSRV_HANDLE_ALLOC_FLAG_SHARED)) {
++ *phHandle = hHandle;
++ eError = PVRSRV_OK;
++ goto exit_ok;
++ }
++ return PVRSRV_ERROR_GENERIC;
++ }
++ }
++
++ eError = AllocHandle(psBase, phHandle, pvData, eType, eFlag, NULL);
++
++exit_ok:
++ if (HANDLES_BATCHED(psBase) && (eError == PVRSRV_OK)) {
++ psBase->ui32BatchHandAllocFailures--;
++ }
++
++ return eError;
++}
++
++PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE * psBase, void **phHandle,
++ void *pvData, PVRSRV_HANDLE_TYPE eType,
++ PVRSRV_HANDLE_ALLOC_FLAG eFlag, void *hParent)
++{
++ struct sHandle *psPHand;
++ struct sHandle *psCHand;
++ PVRSRV_ERROR eError;
++ void *hParentKey;
++ void *hHandle;
++
++ *phHandle = NULL;
++
++ if (HANDLES_BATCHED(psBase)) {
++
++ psBase->ui32BatchHandAllocFailures++;
++ }
++
++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++ hParentKey = TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE) ?
++ hParent : NULL;
++
++ eError =
++ GetHandleStructure(psBase, &psPHand, hParent,
++ PVRSRV_HANDLE_TYPE_NONE);
++ if (eError != PVRSRV_OK) {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI)) {
++
++ hHandle = FindHandle(psBase, pvData, eType, hParentKey);
++ if (hHandle != NULL) {
++ struct sHandle *psCHandle;
++ PVRSRV_ERROR eErr;
++
++ eErr =
++ GetHandleStructure(psBase, &psCHandle, hHandle,
++ eType);
++ if (eErr != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVAllocSubHandle: Lookup of existing handle failed"));
++ return eErr;
++ }
++
++ PVR_ASSERT(hParentKey != NULL
++ &&
++ ParentHandle(HANDLE_TO_HANDLE_PTR
++ (psBase, hHandle)) == hParent);
++
++ if (TEST_FLAG
++ (psCHandle->eFlag & eFlag,
++ PVRSRV_HANDLE_ALLOC_FLAG_SHARED)
++ &&
++ ParentHandle(HANDLE_TO_HANDLE_PTR(psBase, hHandle))
++ == hParent) {
++ *phHandle = hHandle;
++ goto exit_ok;
++ }
++ return PVRSRV_ERROR_GENERIC;
++ }
++ }
++
++ eError =
++ AllocHandle(psBase, &hHandle, pvData, eType, eFlag, hParentKey);
++ if (eError != PVRSRV_OK) {
++ return eError;
++ }
++
++ psPHand = HANDLE_TO_HANDLE_PTR(psBase, hParent);
++
++ psCHand = HANDLE_TO_HANDLE_PTR(psBase, hHandle);
++
++ AdoptChild(psBase, psPHand, psCHand);
++
++ *phHandle = hHandle;
++
++exit_ok:
++ if (HANDLES_BATCHED(psBase)) {
++ psBase->ui32BatchHandAllocFailures--;
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE * psBase, void **phHandle,
++ void *pvData, PVRSRV_HANDLE_TYPE eType)
++{
++ void *hHandle;
++
++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++ hHandle = (void *)FindHandle(psBase, pvData, eType, NULL);
++ if (hHandle == NULL) {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ *phHandle = hHandle;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVLookupHandleAnyType(PVRSRV_HANDLE_BASE * psBase,
++ void **ppvData,
++ PVRSRV_HANDLE_TYPE * peType,
++ void *hHandle)
++{
++ struct sHandle *psHandle;
++ PVRSRV_ERROR eError;
++
++ eError =
++ GetHandleStructure(psBase, &psHandle, hHandle,
++ PVRSRV_HANDLE_TYPE_NONE);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVLookupHandleAnyType: Error looking up handle (%d)",
++ eError));
++ return eError;
++ }
++
++ *ppvData = psHandle->pvData;
++ *peType = psHandle->eType;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE * psBase, void **ppvData,
++ void *hHandle, PVRSRV_HANDLE_TYPE eType)
++{
++ struct sHandle *psHandle;
++ PVRSRV_ERROR eError;
++
++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++ eError = GetHandleStructure(psBase, &psHandle, hHandle, eType);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVLookupHandle: Error looking up handle (%d)",
++ eError));
++ return eError;
++ }
++
++ *ppvData = psHandle->pvData;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVLookupSubHandle(PVRSRV_HANDLE_BASE * psBase, void **ppvData,
++ void *hHandle, PVRSRV_HANDLE_TYPE eType,
++ void *hAncestor)
++{
++ struct sHandle *psPHand;
++ struct sHandle *psCHand;
++ PVRSRV_ERROR eError;
++
++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++ eError = GetHandleStructure(psBase, &psCHand, hHandle, eType);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVLookupSubHandle: Error looking up subhandle (%d)",
++ eError));
++ return eError;
++ }
++
++ for (psPHand = psCHand; ParentHandle(psPHand) != hAncestor;) {
++ eError =
++ GetHandleStructure(psBase, &psPHand, ParentHandle(psPHand),
++ PVRSRV_HANDLE_TYPE_NONE);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVLookupSubHandle: Subhandle doesn't belong to given ancestor"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ }
++
++ *ppvData = psCHand->pvData;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVGetParentHandle(PVRSRV_HANDLE_BASE * psBase, void **phParent,
++ void *hHandle, PVRSRV_HANDLE_TYPE eType)
++{
++ struct sHandle *psHandle;
++ PVRSRV_ERROR eError;
++
++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++ eError = GetHandleStructure(psBase, &psHandle, hHandle, eType);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVGetParentHandle: Error looking up subhandle (%d)",
++ eError));
++ return eError;
++ }
++
++ *phParent = ParentHandle(psHandle);
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVLookupAndReleaseHandle(PVRSRV_HANDLE_BASE * psBase,
++ void **ppvData, void *hHandle,
++ PVRSRV_HANDLE_TYPE eType)
++{
++ struct sHandle *psHandle;
++ PVRSRV_ERROR eError;
++
++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++ eError = GetHandleStructure(psBase, &psHandle, hHandle, eType);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVLookupAndReleaseHandle: Error looking up handle (%d)",
++ eError));
++ return eError;
++ }
++
++ *ppvData = psHandle->pvData;
++
++ eError = FreeHandle(psBase, psHandle);
++
++ return eError;
++}
++
++PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE * psBase, void *hHandle,
++ PVRSRV_HANDLE_TYPE eType)
++{
++ struct sHandle *psHandle;
++ PVRSRV_ERROR eError;
++
++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++ eError = GetHandleStructure(psBase, &psHandle, hHandle, eType);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVReleaseHandle: Error looking up handle (%d)",
++ eError));
++ return eError;
++ }
++
++ eError = FreeHandle(psBase, psHandle);
++
++ return eError;
++}
++
++PVRSRV_ERROR PVRSRVNewHandleBatch(PVRSRV_HANDLE_BASE * psBase,
++ u32 ui32BatchSize)
++{
++ PVRSRV_ERROR eError;
++
++ if (HANDLES_BATCHED(psBase)) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVNewHandleBatch: There is a handle batch already in use (size %u)",
++ psBase->ui32HandBatchSize));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if (ui32BatchSize == 0) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVNewHandleBatch: Invalid batch size (%u)",
++ ui32BatchSize));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ eError = EnsureFreeHandles(psBase, ui32BatchSize);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVNewHandleBatch: EnsureFreeHandles failed (error %d)",
++ eError));
++ return eError;
++ }
++
++ psBase->ui32HandBatchSize = ui32BatchSize;
++
++ psBase->ui32TotalHandCountPreBatch = psBase->ui32TotalHandCount;
++
++ PVR_ASSERT(psBase->ui32BatchHandAllocFailures == 0);
++
++ PVR_ASSERT(psBase->ui32FirstBatchIndexPlusOne == 0);
++
++ PVR_ASSERT(HANDLES_BATCHED(psBase));
++
++ return PVRSRV_OK;
++}
++
++static PVRSRV_ERROR PVRSRVHandleBatchCommitOrRelease(PVRSRV_HANDLE_BASE *
++ psBase, int bCommit)
++{
++
++ u32 ui32IndexPlusOne;
++ int bCommitBatch = bCommit;
++
++ if (!HANDLES_BATCHED(psBase)) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVHandleBatchCommitOrRelease: There is no handle batch"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++
++ }
++
++ if (psBase->ui32BatchHandAllocFailures != 0) {
++ if (bCommit) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVHandleBatchCommitOrRelease: Attempting to commit batch with handle allocation failures."));
++ }
++ bCommitBatch = 0;
++ }
++
++ PVR_ASSERT(psBase->ui32BatchHandAllocFailures == 0 || !bCommit);
++
++ ui32IndexPlusOne = psBase->ui32FirstBatchIndexPlusOne;
++ while (ui32IndexPlusOne != 0) {
++ struct sHandle *psHandle =
++ INDEX_TO_HANDLE_PTR(psBase, ui32IndexPlusOne - 1);
++ u32 ui32NextIndexPlusOne = psHandle->ui32NextIndexPlusOne;
++ PVR_ASSERT(BATCHED_HANDLE(psHandle));
++
++ psHandle->ui32NextIndexPlusOne = 0;
++
++ if (!bCommitBatch || BATCHED_HANDLE_PARTIALLY_FREE(psHandle)) {
++ PVRSRV_ERROR eError;
++
++ if (!BATCHED_HANDLE_PARTIALLY_FREE(psHandle)) {
++ SET_UNBATCHED_HANDLE(psHandle);
++ }
++
++ eError = FreeHandle(psBase, psHandle);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVHandleBatchCommitOrRelease: Error freeing handle (%d)",
++ eError));
++ }
++ PVR_ASSERT(eError == PVRSRV_OK);
++ } else {
++ SET_UNBATCHED_HANDLE(psHandle);
++ }
++
++ ui32IndexPlusOne = ui32NextIndexPlusOne;
++ }
++
++#ifdef DEBUG
++ if (psBase->ui32TotalHandCountPreBatch != psBase->ui32TotalHandCount) {
++ u32 ui32Delta =
++ psBase->ui32TotalHandCount -
++ psBase->ui32TotalHandCountPreBatch;
++
++ PVR_ASSERT(psBase->ui32TotalHandCount >
++ psBase->ui32TotalHandCountPreBatch);
++
++ PVR_DPF((PVR_DBG_WARNING,
++ "PVRSRVHandleBatchCommitOrRelease: The batch size was too small. Batch size was %u, but needs to be %u",
++ psBase->ui32HandBatchSize,
++ psBase->ui32HandBatchSize + ui32Delta));
++
++ }
++#endif
++
++ psBase->ui32HandBatchSize = 0;
++ psBase->ui32FirstBatchIndexPlusOne = 0;
++ psBase->ui32TotalHandCountPreBatch = 0;
++ psBase->ui32BatchHandAllocFailures = 0;
++
++ if (psBase->ui32BatchHandAllocFailures != 0 && bCommit) {
++ PVR_ASSERT(!bCommitBatch);
++
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVCommitHandleBatch(PVRSRV_HANDLE_BASE * psBase)
++{
++ return PVRSRVHandleBatchCommitOrRelease(psBase, 1);
++}
++
++void PVRSRVReleaseHandleBatch(PVRSRV_HANDLE_BASE * psBase)
++{
++ (void)PVRSRVHandleBatchCommitOrRelease(psBase, 0);
++}
++
++PVRSRV_ERROR PVRSRVSetMaxHandle(PVRSRV_HANDLE_BASE * psBase, u32 ui32MaxHandle)
++{
++ if (HANDLES_BATCHED(psBase)) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVSetMaxHandle: Limit cannot be set whilst in batch mode"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ if (ui32MaxHandle == 0 || ui32MaxHandle >= DEFAULT_MAX_HANDLE) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVSetMaxHandle: Limit must be between %u and %u, inclusive",
++ 0, DEFAULT_MAX_HANDLE));
++
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ if (psBase->ui32TotalHandCount != 0) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVSetMaxHandle: Limit cannot be set becuase handles have already been allocated"));
++
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psBase->ui32MaxIndexPlusOne = ui32MaxHandle;
++
++ return PVRSRV_OK;
++}
++
++u32 PVRSRVGetMaxHandle(PVRSRV_HANDLE_BASE * psBase)
++{
++ return psBase->ui32MaxIndexPlusOne;
++}
++
++PVRSRV_ERROR PVRSRVEnableHandlePurging(PVRSRV_HANDLE_BASE * psBase)
++{
++ if (psBase->bPurgingEnabled) {
++ PVR_DPF((PVR_DBG_WARNING,
++ "PVRSRVEnableHandlePurging: Purging already enabled"));
++ return PVRSRV_OK;
++ }
++
++ if (psBase->ui32TotalHandCount != 0) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVEnableHandlePurging: Handles have already been allocated"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psBase->bPurgingEnabled = 1;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVPurgeHandles(PVRSRV_HANDLE_BASE * psBase)
++{
++ u32 ui32Handle;
++ u32 ui32NewHandCount;
++
++ if (!psBase->bPurgingEnabled) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVPurgeHandles: Purging not enabled for this handle base"));
++ return PVRSRV_ERROR_NOT_SUPPORTED;
++ }
++
++ if (HANDLES_BATCHED(psBase)) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVPurgeHandles: Purging not allowed whilst in batch mode"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ for (ui32Handle = psBase->ui32TotalHandCount; ui32Handle != 0;
++ ui32Handle--) {
++ struct sHandle *psHandle =
++ HANDLE_TO_HANDLE_PTR(psBase, ui32Handle);
++ if (!HANDLE_STRUCT_IS_FREE(psHandle)) {
++ break;
++ }
++ }
++
++ ui32NewHandCount = ROUND_UP_TO_MULTIPLE(ui32Handle, HANDLE_BLOCK_SIZE);
++
++ if (ui32NewHandCount >= ui32Handle
++ && ui32NewHandCount <= (psBase->ui32TotalHandCount / 2)) {
++ u32 ui32Delta = psBase->ui32TotalHandCount - ui32NewHandCount;
++ PVRSRV_ERROR eError;
++
++ eError =
++ ReallocHandleArray(psBase, ui32NewHandCount,
++ psBase->ui32TotalHandCount);
++ if (eError != PVRSRV_OK) {
++ return eError;
++ }
++
++ psBase->ui32TotalHandCount = ui32NewHandCount;
++ psBase->ui32FreeHandCount -= ui32Delta;
++ psBase->ui32FirstFreeIndex = 0;
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVAllocHandleBase(PVRSRV_HANDLE_BASE ** ppsBase)
++{
++ PVRSRV_HANDLE_BASE *psBase;
++ void *hBlockAlloc;
++ PVRSRV_ERROR eError;
++
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(*psBase),
++ (void **)&psBase, &hBlockAlloc, "Handle Base");
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVAllocHandleBase: Couldn't allocate handle base (%d)",
++ eError));
++ return eError;
++ }
++ memset(psBase, 0, sizeof(*psBase));
++
++ psBase->psHashTab =
++ HASH_Create_Extended(HANDLE_HASH_TAB_INIT_SIZE, sizeof(HAND_KEY),
++ HASH_Func_Default, HASH_Key_Comp_Default);
++ if (psBase->psHashTab == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVAllocHandleBase: Couldn't create data pointer hash table\n"));
++ goto failure;
++ }
++
++ psBase->hBaseBlockAlloc = hBlockAlloc;
++
++ psBase->ui32MaxIndexPlusOne = DEFAULT_MAX_INDEX_PLUS_ONE;
++
++ *ppsBase = psBase;
++
++ return PVRSRV_OK;
++failure:
++ (void)PVRSRVFreeHandleBase(psBase);
++ return PVRSRV_ERROR_GENERIC;
++}
++
++PVRSRV_ERROR PVRSRVFreeHandleBase(PVRSRV_HANDLE_BASE * psBase)
++{
++ PVRSRV_ERROR eError;
++
++ PVR_ASSERT(psBase != gpsKernelHandleBase);
++
++ eError = FreeHandleBase(psBase);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVFreeHandleBase: FreeHandleBase failed (%d)",
++ eError));
++ }
++
++ return eError;
++}
++
++PVRSRV_ERROR PVRSRVHandleInit(void)
++{
++ PVRSRV_ERROR eError;
++
++ PVR_ASSERT(gpsKernelHandleBase == NULL);
++
++ eError = PVRSRVAllocHandleBase(&gpsKernelHandleBase);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVHandleInit: PVRSRVAllocHandleBase failed (%d)",
++ eError));
++ goto error;
++ }
++
++ eError = PVRSRVEnableHandlePurging(gpsKernelHandleBase);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVHandleInit: PVRSRVEnableHandlePurging failed (%d)",
++ eError));
++ goto error;
++ }
++
++ return PVRSRV_OK;
++error:
++ (void)PVRSRVHandleDeInit();
++ return eError;
++}
++
++PVRSRV_ERROR PVRSRVHandleDeInit(void)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if (gpsKernelHandleBase != NULL) {
++ eError = FreeHandleBase(gpsKernelHandleBase);
++ if (eError == PVRSRV_OK) {
++ gpsKernelHandleBase = NULL;
++ } else {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVHandleDeInit: FreeHandleBase failed (%d)",
++ eError));
++ }
++ }
++
++ return eError;
++}
++#else
++#endif
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/common/hash.c
+@@ -0,0 +1,434 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "pvr_debug.h"
++
++#include "services.h"
++#include "servicesint.h"
++#include "hash.h"
++#include "osfunc.h"
++
++#define PRIVATE_MAX(a,b) ((a)>(b)?(a):(b))
++
++#define KEY_TO_INDEX(pHash, key, uSize) \
++ ((pHash)->pfnHashFunc((pHash)->uKeySize, key, uSize) % uSize)
++
++#define KEY_COMPARE(pHash, pKey1, pKey2) \
++ ((pHash)->pfnKeyComp((pHash)->uKeySize, pKey1, pKey2))
++
++struct _BUCKET_ {
++
++ struct _BUCKET_ *pNext;
++
++ u32 v;
++
++ u32 k[];
++};
++typedef struct _BUCKET_ BUCKET;
++
++struct _HASH_TABLE_ {
++
++ BUCKET **ppBucketTable;
++
++ u32 uSize;
++
++ u32 uCount;
++
++ u32 uMinimumSize;
++
++ u32 uKeySize;
++
++ HASH_FUNC *pfnHashFunc;
++
++ HASH_KEY_COMP *pfnKeyComp;
++};
++
++u32 HASH_Func_Default(u32 uKeySize, void *pKey, u32 uHashTabLen)
++{
++ u32 *p = (u32 *) pKey;
++ u32 uKeyLen = uKeySize / sizeof(u32);
++ u32 ui;
++ u32 uHashKey = 0;
++
++ PVR_ASSERT((uKeySize % sizeof(u32)) == 0);
++
++ for (ui = 0; ui < uKeyLen; ui++) {
++ u32 uHashPart = (u32) * p++;
++
++ uHashPart += (uHashPart << 12);
++ uHashPart ^= (uHashPart >> 22);
++ uHashPart += (uHashPart << 4);
++ uHashPart ^= (uHashPart >> 9);
++ uHashPart += (uHashPart << 10);
++ uHashPart ^= (uHashPart >> 2);
++ uHashPart += (uHashPart << 7);
++ uHashPart ^= (uHashPart >> 12);
++
++ uHashKey += uHashPart;
++ }
++
++ return uHashKey;
++}
++
++int HASH_Key_Comp_Default(u32 uKeySize, void *pKey1, void *pKey2)
++{
++ u32 *p1 = (u32 *) pKey1;
++ u32 *p2 = (u32 *) pKey2;
++ u32 uKeyLen = uKeySize / sizeof(u32);
++ u32 ui;
++
++ PVR_ASSERT((uKeySize % sizeof(u32)) == 0);
++
++ for (ui = 0; ui < uKeyLen; ui++) {
++ if (*p1++ != *p2++)
++ return 0;
++ }
++
++ return 1;
++}
++
++static PVRSRV_ERROR
++_ChainInsert(HASH_TABLE * pHash, BUCKET * pBucket, BUCKET ** ppBucketTable,
++ u32 uSize)
++{
++ u32 uIndex;
++
++ PVR_ASSERT(pBucket != NULL);
++ PVR_ASSERT(ppBucketTable != NULL);
++ PVR_ASSERT(uSize != 0);
++
++ if ((pBucket == NULL) || (ppBucketTable == NULL) || (uSize == 0)) {
++ PVR_DPF((PVR_DBG_ERROR, "_ChainInsert: invalid parameter"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ uIndex = KEY_TO_INDEX(pHash, pBucket->k, uSize);
++ pBucket->pNext = ppBucketTable[uIndex];
++ ppBucketTable[uIndex] = pBucket;
++
++ return PVRSRV_OK;
++}
++
++static PVRSRV_ERROR
++_Rehash(HASH_TABLE * pHash,
++ BUCKET ** ppOldTable, u32 uOldSize, BUCKET ** ppNewTable, u32 uNewSize)
++{
++ u32 uIndex;
++ for (uIndex = 0; uIndex < uOldSize; uIndex++) {
++ BUCKET *pBucket;
++ pBucket = ppOldTable[uIndex];
++ while (pBucket != NULL) {
++ BUCKET *pNextBucket = pBucket->pNext;
++ if (_ChainInsert(pHash, pBucket, ppNewTable, uNewSize)
++ != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "_Rehash: call to _ChainInsert failed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ pBucket = pNextBucket;
++ }
++ }
++ return PVRSRV_OK;
++}
++
++static int _Resize(HASH_TABLE * pHash, u32 uNewSize)
++{
++ if (uNewSize != pHash->uSize) {
++ BUCKET **ppNewTable;
++ u32 uIndex;
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "HASH_Resize: oldsize=0x%x newsize=0x%x count=0x%x",
++ pHash->uSize, uNewSize, pHash->uCount));
++
++ OSAllocMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof(BUCKET *) * uNewSize,
++ (void **)&ppNewTable, NULL, "Hash Table Buckets");
++ if (ppNewTable == NULL)
++ return 0;
++
++ for (uIndex = 0; uIndex < uNewSize; uIndex++)
++ ppNewTable[uIndex] = NULL;
++
++ if (_Rehash
++ (pHash, pHash->ppBucketTable, pHash->uSize, ppNewTable,
++ uNewSize) != PVRSRV_OK) {
++ return 0;
++ }
++
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof(BUCKET *) * pHash->uSize, pHash->ppBucketTable,
++ NULL);
++
++ pHash->ppBucketTable = ppNewTable;
++ pHash->uSize = uNewSize;
++ }
++ return 1;
++}
++
++HASH_TABLE *HASH_Create_Extended(u32 uInitialLen, u32 uKeySize,
++ HASH_FUNC * pfnHashFunc,
++ HASH_KEY_COMP * pfnKeyComp)
++{
++ HASH_TABLE *pHash;
++ u32 uIndex;
++
++ PVR_DPF((PVR_DBG_MESSAGE, "HASH_Create_Extended: InitialSize=0x%x",
++ uInitialLen));
++
++ if (OSAllocMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof(HASH_TABLE),
++ (void **)&pHash, NULL, "Hash Table") != PVRSRV_OK) {
++ return NULL;
++ }
++
++ pHash->uCount = 0;
++ pHash->uSize = uInitialLen;
++ pHash->uMinimumSize = uInitialLen;
++ pHash->uKeySize = uKeySize;
++ pHash->pfnHashFunc = pfnHashFunc;
++ pHash->pfnKeyComp = pfnKeyComp;
++
++ OSAllocMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof(BUCKET *) * pHash->uSize,
++ (void **)&pHash->ppBucketTable, NULL, "Hash Table Buckets");
++
++ if (pHash->ppBucketTable == NULL) {
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(HASH_TABLE), pHash,
++ NULL);
++
++ return NULL;
++ }
++
++ for (uIndex = 0; uIndex < pHash->uSize; uIndex++)
++ pHash->ppBucketTable[uIndex] = NULL;
++ return pHash;
++}
++
++HASH_TABLE *HASH_Create(u32 uInitialLen)
++{
++ return HASH_Create_Extended(uInitialLen, sizeof(u32),
++ &HASH_Func_Default, &HASH_Key_Comp_Default);
++}
++
++void HASH_Delete(HASH_TABLE * pHash)
++{
++ if (pHash != NULL) {
++ PVR_DPF((PVR_DBG_MESSAGE, "HASH_Delete"));
++
++ PVR_ASSERT(pHash->uCount == 0);
++ if (pHash->uCount != 0) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "HASH_Delete: leak detected in hash table!"));
++ PVR_DPF((PVR_DBG_ERROR,
++ "Likely Cause: client drivers not freeing alocations before destroying devmemcontext"));
++ }
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof(BUCKET *) * pHash->uSize, pHash->ppBucketTable,
++ NULL);
++ pHash->ppBucketTable = NULL;
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(HASH_TABLE), pHash,
++ NULL);
++
++ }
++}
++
++int HASH_Insert_Extended(HASH_TABLE * pHash, void *pKey, u32 v)
++{
++ BUCKET *pBucket;
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "HASH_Insert_Extended: Hash=%08X, pKey=%08X, v=0x%x", pHash,
++ pKey, v));
++
++ PVR_ASSERT(pHash != NULL);
++
++ if (pHash == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "HASH_Insert_Extended: invalid parameter"));
++ return 0;
++ }
++
++ if (OSAllocMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof(BUCKET) + pHash->uKeySize,
++ (void **)&pBucket, NULL,
++ "Hash Table entry") != PVRSRV_OK) {
++ return 0;
++ }
++
++ pBucket->v = v;
++
++ memcpy(pBucket->k, pKey, pHash->uKeySize);
++ if (_ChainInsert(pHash, pBucket, pHash->ppBucketTable, pHash->uSize) !=
++ PVRSRV_OK) {
++ return 0;
++ }
++
++ pHash->uCount++;
++
++ if (pHash->uCount << 1 > pHash->uSize) {
++
++ _Resize(pHash, pHash->uSize << 1);
++ }
++
++ return 1;
++}
++
++int HASH_Insert(HASH_TABLE * pHash, u32 k, u32 v)
++{
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "HASH_Insert: Hash=%08X, k=0x%x, v=0x%x", pHash, k, v));
++
++ return HASH_Insert_Extended(pHash, &k, v);
++}
++
++u32 HASH_Remove_Extended(HASH_TABLE * pHash, void *pKey)
++{
++ BUCKET **ppBucket;
++ u32 uIndex;
++
++ PVR_DPF((PVR_DBG_MESSAGE, "HASH_Remove_Extended: Hash=%08X, pKey=%08X",
++ pHash, pKey));
++
++ PVR_ASSERT(pHash != NULL);
++
++ if (pHash == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "HASH_Remove_Extended: Null hash table"));
++ return 0;
++ }
++
++ uIndex = KEY_TO_INDEX(pHash, pKey, pHash->uSize);
++
++ for (ppBucket = &(pHash->ppBucketTable[uIndex]); *ppBucket != NULL;
++ ppBucket = &((*ppBucket)->pNext)) {
++
++ if (KEY_COMPARE(pHash, (*ppBucket)->k, pKey)) {
++ BUCKET *pBucket = *ppBucket;
++ u32 v = pBucket->v;
++ (*ppBucket) = pBucket->pNext;
++
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof(BUCKET) + pHash->uKeySize, pBucket,
++ NULL);
++
++ pHash->uCount--;
++
++ if (pHash->uSize > (pHash->uCount << 2) &&
++ pHash->uSize > pHash->uMinimumSize) {
++
++ _Resize(pHash,
++ PRIVATE_MAX(pHash->uSize >> 1,
++ pHash->uMinimumSize));
++ }
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "HASH_Remove_Extended: Hash=%08X, pKey=%08X = 0x%x",
++ pHash, pKey, v));
++ return v;
++ }
++ }
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "HASH_Remove_Extended: Hash=%08X, pKey=%08X = 0x0 !!!!", pHash,
++ pKey));
++ return 0;
++}
++
++u32 HASH_Remove(HASH_TABLE * pHash, u32 k)
++{
++ PVR_DPF((PVR_DBG_MESSAGE, "HASH_Remove: Hash=%08X, k=0x%x", pHash, k));
++
++ return HASH_Remove_Extended(pHash, &k);
++}
++
++u32 HASH_Retrieve_Extended(HASH_TABLE * pHash, void *pKey)
++{
++ BUCKET **ppBucket;
++ u32 uIndex;
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "HASH_Retrieve_Extended: Hash=%08X, pKey=%08X", pHash, pKey));
++
++ PVR_ASSERT(pHash != NULL);
++
++ if (pHash == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "HASH_Retrieve_Extended: Null hash table"));
++ return 0;
++ }
++
++ uIndex = KEY_TO_INDEX(pHash, pKey, pHash->uSize);
++
++ for (ppBucket = &(pHash->ppBucketTable[uIndex]); *ppBucket != NULL;
++ ppBucket = &((*ppBucket)->pNext)) {
++
++ if (KEY_COMPARE(pHash, (*ppBucket)->k, pKey)) {
++ BUCKET *pBucket = *ppBucket;
++ u32 v = pBucket->v;
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "HASH_Retrieve: Hash=%08X, pKey=%08X = 0x%x",
++ pHash, pKey, v));
++ return v;
++ }
++ }
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "HASH_Retrieve: Hash=%08X, pKey=%08X = 0x0 !!!!", pHash,
++ pKey));
++ return 0;
++}
++
++u32 HASH_Retrieve(HASH_TABLE * pHash, u32 k)
++{
++ PVR_DPF((PVR_DBG_MESSAGE, "HASH_Retrieve: Hash=%08X, k=0x%x", pHash,
++ k));
++ return HASH_Retrieve_Extended(pHash, &k);
++}
++
++#ifdef HASH_TRACE
++void HASH_Dump(HASH_TABLE * pHash)
++{
++ u32 uIndex;
++ u32 uMaxLength = 0;
++ u32 uEmptyCount = 0;
++
++ PVR_ASSERT(pHash != NULL);
++ for (uIndex = 0; uIndex < pHash->uSize; uIndex++) {
++ BUCKET *pBucket;
++ u32 uLength = 0;
++ if (pHash->ppBucketTable[uIndex] == NULL)
++ uEmptyCount++;
++ for (pBucket = pHash->ppBucketTable[uIndex];
++ pBucket != NULL; pBucket = pBucket->pNext)
++ uLength++;
++ uMaxLength = PRIVATE_MAX(uMaxLength, uLength);
++ }
++
++ PVR_TRACE(("hash table: uMinimumSize=%d size=%d count=%d",
++ pHash->uMinimumSize, pHash->uSize, pHash->uCount));
++ PVR_TRACE((" empty=%d max=%d", uEmptyCount, uMaxLength));
++}
++#endif
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/common/lists.c
+@@ -0,0 +1,92 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "lists.h"
++#include "services_headers.h"
++
++IMPLEMENT_LIST_ANY_VA(BM_HEAP)
++ IMPLEMENT_LIST_ANY_2(BM_HEAP, PVRSRV_ERROR, PVRSRV_OK)
++ IMPLEMENT_LIST_ANY_VA_2(BM_HEAP, PVRSRV_ERROR, PVRSRV_OK)
++ IMPLEMENT_LIST_FOR_EACH_VA(BM_HEAP)
++ IMPLEMENT_LIST_REMOVE(BM_HEAP)
++ IMPLEMENT_LIST_INSERT(BM_HEAP)
++
++/* FIXME MLD */
++typedef void *mldhack;
++IMPLEMENT_LIST_ANY_VA(BM_CONTEXT)
++ IMPLEMENT_LIST_ANY_VA_2(BM_CONTEXT, mldhack, NULL)
++ IMPLEMENT_LIST_ANY_VA_2(BM_CONTEXT, PVRSRV_ERROR, PVRSRV_OK)
++ IMPLEMENT_LIST_FOR_EACH(BM_CONTEXT)
++ IMPLEMENT_LIST_REMOVE(BM_CONTEXT)
++ IMPLEMENT_LIST_INSERT(BM_CONTEXT)
++
++ IMPLEMENT_LIST_ANY_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK)
++ IMPLEMENT_LIST_ANY_VA(PVRSRV_DEVICE_NODE)
++ IMPLEMENT_LIST_ANY_VA_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK)
++ IMPLEMENT_LIST_FOR_EACH(PVRSRV_DEVICE_NODE)
++ IMPLEMENT_LIST_FOR_EACH_VA(PVRSRV_DEVICE_NODE)
++ IMPLEMENT_LIST_INSERT(PVRSRV_DEVICE_NODE)
++ IMPLEMENT_LIST_REMOVE(PVRSRV_DEVICE_NODE)
++
++ IMPLEMENT_LIST_ANY_VA(PVRSRV_POWER_DEV)
++ IMPLEMENT_LIST_ANY_VA_2(PVRSRV_POWER_DEV, PVRSRV_ERROR, PVRSRV_OK)
++ IMPLEMENT_LIST_INSERT(PVRSRV_POWER_DEV)
++ IMPLEMENT_LIST_REMOVE(PVRSRV_POWER_DEV)
++
++void *MatchDeviceKM_AnyVaCb(PVRSRV_DEVICE_NODE * psDeviceNode, va_list va)
++{
++ u32 ui32DevIndex;
++ int bIgnoreClass;
++ PVRSRV_DEVICE_CLASS eDevClass;
++
++ ui32DevIndex = va_arg(va, u32);
++ bIgnoreClass = va_arg(va, int);
++ if (!bIgnoreClass) {
++ eDevClass = va_arg(va, PVRSRV_DEVICE_CLASS);
++ } else {
++
++ eDevClass = PVRSRV_DEVICE_CLASS_FORCE_I32;
++ }
++
++ if ((bIgnoreClass || psDeviceNode->sDevId.eDeviceClass == eDevClass) &&
++ psDeviceNode->sDevId.ui32DeviceIndex == ui32DevIndex) {
++ return psDeviceNode;
++ }
++ return NULL;
++}
++
++void *MatchPowerDeviceIndex_AnyVaCb(PVRSRV_POWER_DEV * psPowerDev, va_list va)
++{
++ u32 ui32DeviceIndex;
++
++ ui32DeviceIndex = va_arg(va, u32);
++
++ if (psPowerDev->ui32DeviceIndex == ui32DeviceIndex) {
++ return psPowerDev;
++ } else {
++ return NULL;
++ }
++}
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/common/mem.c
+@@ -0,0 +1,131 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "pvr_bridge_km.h"
++
++static PVRSRV_ERROR FreeSharedSysMemCallBack(void *pvParam, u32 ui32Param)
++{
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = pvParam;
++
++ OSFreePages(psKernelMemInfo->ui32Flags,
++ psKernelMemInfo->ui32AllocSize,
++ psKernelMemInfo->pvLinAddrKM,
++ psKernelMemInfo->sMemBlk.hOSMemHandle);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_KERNEL_MEM_INFO), psKernelMemInfo, NULL);
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR
++PVRSRVAllocSharedSysMemoryKM(PVRSRV_PER_PROCESS_DATA * psPerProc,
++ u32 ui32Flags,
++ u32 ui32Size,
++ PVRSRV_KERNEL_MEM_INFO ** ppsKernelMemInfo)
++{
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_KERNEL_MEM_INFO),
++ (void **)&psKernelMemInfo, NULL,
++ "Kernel Memory Info") != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVAllocSharedSysMemoryKM: Failed to alloc memory for meminfo"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ memset(psKernelMemInfo, 0, sizeof(*psKernelMemInfo));
++
++ ui32Flags &= ~PVRSRV_HAP_MAPTYPE_MASK;
++ ui32Flags |= PVRSRV_HAP_MULTI_PROCESS;
++ psKernelMemInfo->ui32Flags = ui32Flags;
++ psKernelMemInfo->ui32AllocSize = ui32Size;
++
++ if (OSAllocPages(psKernelMemInfo->ui32Flags,
++ psKernelMemInfo->ui32AllocSize,
++ HOST_PAGESIZE(),
++ &psKernelMemInfo->pvLinAddrKM,
++ &psKernelMemInfo->sMemBlk.hOSMemHandle)
++ != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVAllocSharedSysMemoryKM: Failed to alloc memory for block"));
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_KERNEL_MEM_INFO), psKernelMemInfo, 0);
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ psKernelMemInfo->sMemBlk.hResItem =
++ ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_SHARED_MEM_INFO,
++ psKernelMemInfo, 0, FreeSharedSysMemCallBack);
++
++ *ppsKernelMemInfo = psKernelMemInfo;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR
++PVRSRVFreeSharedSysMemoryKM(PVRSRV_KERNEL_MEM_INFO * psKernelMemInfo)
++{
++ PVRSRV_ERROR eError;
++
++ if (psKernelMemInfo->sMemBlk.hResItem) {
++ eError = ResManFreeResByPtr(psKernelMemInfo->sMemBlk.hResItem);
++ } else {
++ eError = FreeSharedSysMemCallBack(psKernelMemInfo, 0);
++ }
++
++ return eError;
++}
++
++PVRSRV_ERROR
++PVRSRVDissociateMemFromResmanKM(PVRSRV_KERNEL_MEM_INFO * psKernelMemInfo)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if (!psKernelMemInfo) {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ if (psKernelMemInfo->sMemBlk.hResItem) {
++ eError =
++ ResManDissociateRes(psKernelMemInfo->sMemBlk.hResItem,
++ NULL);
++
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVDissociateMemFromResmanKM: ResManDissociateRes failed"));
++ PVR_DBG_BREAK;
++ return eError;
++ }
++
++ psKernelMemInfo->sMemBlk.hResItem = NULL;
++ }
++
++ return eError;
++}
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/common/mem_debug.c
+@@ -0,0 +1,228 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef MEM_DEBUG_C
++#define MEM_DEBUG_C
++
++#if defined(PVRSRV_DEBUG_OS_MEMORY)
++
++#include "img_types.h"
++#include "services_headers.h"
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#define STOP_ON_ERROR 0
++
++ int MemCheck(const void *pvAddr, const u8 ui8Pattern, u32 uSize) {
++ u8 *pui8Addr;
++ for (pui8Addr = (u8 *) pvAddr; uSize > 0; uSize--, pui8Addr++) {
++ if (*pui8Addr != ui8Pattern) {
++ return 0;
++ }
++ } return 1;
++ }
++
++ void OSCheckMemDebug(void *pvCpuVAddr, u32 uSize,
++ const char *pszFileName, const u32 uLine) {
++ OSMEM_DEBUG_INFO const *psInfo =
++ (OSMEM_DEBUG_INFO *) ((u32) pvCpuVAddr -
++ TEST_BUFFER_PADDING_STATUS);
++
++ if (pvCpuVAddr == NULL) {
++ PVR_DPF((PVR_DBG_ERROR, "Pointer 0x%X : null pointer"
++ " - referenced %s:%d - allocated %s:%d",
++ pvCpuVAddr,
++ pszFileName, uLine,
++ psInfo->sFileName, psInfo->uLineNo));
++ while (STOP_ON_ERROR) ;
++ }
++
++ if (((u32) pvCpuVAddr & 3) != 0) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "Pointer 0x%X : invalid alignment"
++ " - referenced %s:%d - allocated %s:%d",
++ pvCpuVAddr, pszFileName, uLine,
++ psInfo->sFileName, psInfo->uLineNo));
++ while (STOP_ON_ERROR) ;
++ }
++
++ if (!MemCheck
++ ((void *)psInfo->sGuardRegionBefore, 0xB1,
++ sizeof(psInfo->sGuardRegionBefore))) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "Pointer 0x%X : guard region before overwritten"
++ " - referenced %s:%d - allocated %s:%d",
++ pvCpuVAddr, pszFileName, uLine,
++ psInfo->sFileName, psInfo->uLineNo));
++ while (STOP_ON_ERROR) ;
++ }
++
++ if (uSize != psInfo->uSize) {
++ PVR_DPF((PVR_DBG_WARNING,
++ "Pointer 0x%X : supplied size was different to stored size (0x%X != 0x%X)"
++ " - referenced %s:%d - allocated %s:%d",
++ pvCpuVAddr, uSize, psInfo->uSize, pszFileName,
++ uLine, psInfo->sFileName, psInfo->uLineNo));
++ while (STOP_ON_ERROR) ;
++ }
++
++ if ((0x01234567 ^ psInfo->uSizeParityCheck) != psInfo->uSize) {
++ PVR_DPF((PVR_DBG_WARNING,
++ "Pointer 0x%X : stored size parity error (0x%X != 0x%X)"
++ " - referenced %s:%d - allocated %s:%d",
++ pvCpuVAddr, psInfo->uSize,
++ 0x01234567 ^ psInfo->uSizeParityCheck,
++ pszFileName, uLine, psInfo->sFileName,
++ psInfo->uLineNo));
++ while (STOP_ON_ERROR) ;
++ } else {
++
++ uSize = psInfo->uSize;
++ }
++
++ if (uSize) {
++ if (!MemCheck
++ ((void *)((u32) pvCpuVAddr + uSize), 0xB2,
++ TEST_BUFFER_PADDING_AFTER)) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "Pointer 0x%X : guard region after overwritten"
++ " - referenced from %s:%d - allocated from %s:%d",
++ pvCpuVAddr, pszFileName, uLine,
++ psInfo->sFileName, psInfo->uLineNo));
++ }
++ }
++
++ if (psInfo->eValid != isAllocated) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "Pointer 0x%X : not allocated (freed? %d)"
++ " - referenced %s:%d - freed %s:%d",
++ pvCpuVAddr, psInfo->eValid == isFree,
++ pszFileName, uLine, psInfo->sFileName,
++ psInfo->uLineNo));
++ while (STOP_ON_ERROR) ;
++ }
++ }
++
++ void debug_strcpy(char *pDest, const char *pSrc) {
++ u32 i = 0;
++
++ for (; i < 128; i++) {
++ *pDest = *pSrc;
++ if (*pSrc == '\0')
++ break;
++ pDest++;
++ pSrc++;
++ }
++ }
++
++ PVRSRV_ERROR OSAllocMem_Debug_Wrapper(u32 ui32Flags,
++ u32 ui32Size,
++ void **ppvCpuVAddr,
++ void **phBlockAlloc,
++ char *pszFilename, u32 ui32Line) {
++ OSMEM_DEBUG_INFO *psInfo;
++
++ PVRSRV_ERROR eError;
++
++ eError = OSAllocMem_Debug_Linux_Memory_Allocations(ui32Flags,
++ ui32Size +
++ TEST_BUFFER_PADDING,
++ ppvCpuVAddr,
++ phBlockAlloc,
++ pszFilename,
++ ui32Line);
++
++ if (eError != PVRSRV_OK) {
++ return eError;
++ }
++
++ memset((char *)(*ppvCpuVAddr) + TEST_BUFFER_PADDING_STATUS,
++ 0xBB, ui32Size);
++ memset((char *)(*ppvCpuVAddr) + ui32Size +
++ TEST_BUFFER_PADDING_STATUS, 0xB2,
++ TEST_BUFFER_PADDING_AFTER);
++
++ psInfo = (OSMEM_DEBUG_INFO *) (*ppvCpuVAddr);
++
++ memset(psInfo->sGuardRegionBefore, 0xB1,
++ sizeof(psInfo->sGuardRegionBefore));
++ debug_strcpy(psInfo->sFileName, pszFilename);
++ psInfo->uLineNo = ui32Line;
++ psInfo->eValid = isAllocated;
++ psInfo->uSize = ui32Size;
++ psInfo->uSizeParityCheck = 0x01234567 ^ ui32Size;
++
++ *ppvCpuVAddr =
++ (void *)((u32) * ppvCpuVAddr) + TEST_BUFFER_PADDING_STATUS;
++
++#ifdef PVRSRV_LOG_MEMORY_ALLOCS
++
++ PVR_TRACE(("Allocated pointer (after debug info): 0x%X from %s:%d", *ppvCpuVAddr, pszFilename, ui32Line));
++#endif
++
++ return PVRSRV_OK;
++ }
++
++ PVRSRV_ERROR OSFreeMem_Debug_Wrapper(u32 ui32Flags,
++ u32 ui32Size,
++ void *pvCpuVAddr,
++ void *hBlockAlloc,
++ char *pszFilename, u32 ui32Line) {
++ OSMEM_DEBUG_INFO *psInfo;
++
++ OSCheckMemDebug(pvCpuVAddr, ui32Size, pszFilename, ui32Line);
++
++ memset(pvCpuVAddr, 0xBF, ui32Size + TEST_BUFFER_PADDING_AFTER);
++
++ psInfo =
++ (OSMEM_DEBUG_INFO *) ((u32) pvCpuVAddr -
++ TEST_BUFFER_PADDING_STATUS);
++
++ psInfo->uSize = 0;
++ psInfo->uSizeParityCheck = 0;
++ psInfo->eValid = isFree;
++ psInfo->uLineNo = ui32Line;
++ debug_strcpy(psInfo->sFileName, pszFilename);
++
++ return OSFreeMem_Debug_Linux_Memory_Allocations(ui32Flags,
++ ui32Size +
++ TEST_BUFFER_PADDING,
++ psInfo,
++ hBlockAlloc,
++ pszFilename,
++ ui32Line);
++ }
++
++#if defined (__cplusplus)
++
++}
++#endif
++
++#endif
++
++#endif
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/common/metrics.c
+@@ -0,0 +1,149 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "metrics.h"
++
++#if defined(SUPPORT_VGX)
++#include "vgxapi_km.h"
++#endif
++
++#if defined(SUPPORT_SGX)
++#include "sgxapi_km.h"
++#endif
++
++#if defined(DEBUG) || defined(TIMING)
++
++static volatile u32 *pui32TimerRegister = 0;
++
++#define PVRSRV_TIMER_TOTAL_IN_TICKS(X) asTimers[X].ui32Total
++#define PVRSRV_TIMER_TOTAL_IN_MS(X) ((1000*asTimers[X].ui32Total)/ui32TicksPerMS)
++#define PVRSRV_TIMER_COUNT(X) asTimers[X].ui32Count
++
++Temporal_Data asTimers[PVRSRV_NUM_TIMERS];
++
++u32 PVRSRVTimeNow(void)
++{
++ if (!pui32TimerRegister) {
++ static int bFirstTime = 1;
++
++ if (bFirstTime) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVTimeNow: No timer register set up"));
++
++ bFirstTime = 0;
++ }
++
++ return 0;
++ }
++#if defined(__sh__)
++
++ return (0xffffffff - *pui32TimerRegister);
++
++#else
++
++ return 0;
++
++#endif
++}
++
++static u32 PVRSRVGetCPUFreq(void)
++{
++ u32 ui32Time1, ui32Time2;
++
++ ui32Time1 = PVRSRVTimeNow();
++
++ OSWaitus(1000000);
++
++ ui32Time2 = PVRSRVTimeNow();
++
++ PVR_DPF((PVR_DBG_WARNING, "PVRSRVGetCPUFreq: timer frequency = %d Hz",
++ ui32Time2 - ui32Time1));
++
++ return (ui32Time2 - ui32Time1);
++}
++
++void PVRSRVSetupMetricTimers(void *pvDevInfo)
++{
++ u32 ui32Loop;
++
++ for (ui32Loop = 0; ui32Loop < (PVRSRV_NUM_TIMERS); ui32Loop++) {
++ asTimers[ui32Loop].ui32Total = 0;
++ asTimers[ui32Loop].ui32Count = 0;
++ }
++
++#if defined(__sh__)
++
++ *TCR_2 = TIMER_DIVISOR;
++
++ *TCOR_2 = *TCNT_2 = (u32) 0xffffffff;
++
++ *TST_REG |= (u8) 0x04;
++
++ pui32TimerRegister = (u32 *) TCNT_2;
++
++#else
++
++ pui32TimerRegister = 0;
++
++#endif
++
++}
++
++void PVRSRVOutputMetricTotals(void)
++{
++ u32 ui32TicksPerMS, ui32Loop;
++
++ ui32TicksPerMS = PVRSRVGetCPUFreq();
++
++ if (!ui32TicksPerMS) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVOutputMetricTotals: Failed to get CPU Freq"));
++ return;
++ }
++
++ for (ui32Loop = 0; ui32Loop < (PVRSRV_NUM_TIMERS); ui32Loop++) {
++ if (asTimers[ui32Loop].ui32Count & 0x80000000L) {
++ PVR_DPF((PVR_DBG_WARNING,
++ "PVRSRVOutputMetricTotals: Timer %u is still ON",
++ ui32Loop));
++ }
++ }
++#if 0
++
++ PVR_DPF((PVR_DBG_ERROR, " Timer(%u): Total = %u",
++ PVRSRV_TIMER_EXAMPLE_1,
++ PVRSRV_TIMER_TOTAL_IN_TICKS(PVRSRV_TIMER_EXAMPLE_1)));
++ PVR_DPF((PVR_DBG_ERROR, " Timer(%u): Time = %ums",
++ PVRSRV_TIMER_EXAMPLE_1,
++ PVRSRV_TIMER_TOTAL_IN_MS(PVRSRV_TIMER_EXAMPLE_1)));
++ PVR_DPF((PVR_DBG_ERROR, " Timer(%u): Count = %u",
++ PVRSRV_TIMER_EXAMPLE_1,
++ PVRSRV_TIMER_COUNT(PVRSRV_TIMER_EXAMPLE_1)));
++#endif
++}
++
++#endif
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/common/pdump_common.c
+@@ -0,0 +1,1558 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if defined(PDUMP)
++#include <stdarg.h>
++
++#include "services_headers.h"
++#if defined(SUPPORT_SGX)
++#include "sgxdefs.h"
++#include "sgxmmu.h"
++#endif
++#include "pdump_km.h"
++
++#if !defined(PDUMP_TEMP_BUFFER_SIZE)
++#define PDUMP_TEMP_BUFFER_SIZE (64 * 1024L)
++#endif
++
++#if 1
++#define PDUMP_DBG(a) PDumpOSDebugPrintf a
++#else
++#define PDUMP_DBG(a)
++#endif
++
++#define PDUMP_DATAMASTER_PIXEL (1)
++#define PDUMP_DATAMASTER_EDM (3)
++
++#define MIN(x, y) (((x) < (y)) ? (x) : (y))
++#define PTR_PLUS(t, p, x) ((t *)(((char *)(p)) + (x)))
++#define VPTR_PLUS(p, x) PTR_PLUS(void, p, x)
++#define VPTR_INC(p, x) (p = VPTR_PLUS(p, x))
++#define MAX_PDUMP_MMU_CONTEXTS (32)
++static void *gpvTempBuffer = NULL;
++static void *ghTempBufferBlockAlloc;
++static u16 gui16MMUContextUsage = 0;
++
++static void *GetTempBuffer(void)
++{
++
++ if (gpvTempBuffer == NULL) {
++ PVRSRV_ERROR eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ PDUMP_TEMP_BUFFER_SIZE,
++ &gpvTempBuffer,
++ &ghTempBufferBlockAlloc,
++ "PDUMP Temporary Buffer");
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "GetTempBuffer: OSAllocMem failed: %d",
++ eError));
++ }
++ }
++
++ return gpvTempBuffer;
++}
++
++static void FreeTempBuffer(void)
++{
++
++ if (gpvTempBuffer != NULL) {
++ PVRSRV_ERROR eError = OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ PDUMP_TEMP_BUFFER_SIZE,
++ gpvTempBuffer,
++ ghTempBufferBlockAlloc);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "FreeTempBuffer: OSFreeMem failed: %d",
++ eError));
++ } else {
++ gpvTempBuffer = NULL;
++ }
++ }
++}
++
++void PDumpInitCommon(void)
++{
++
++ (void)GetTempBuffer();
++
++ PDumpInit();
++}
++
++void PDumpDeInitCommon(void)
++{
++
++ FreeTempBuffer();
++
++ PDumpDeInit();
++}
++
++#if defined(SGX_SUPPORT_COMMON_PDUMP)
++
++int PDumpIsSuspended(void)
++{
++ return PDumpOSIsSuspended();
++}
++
++PVRSRV_ERROR PDumpRegWithFlagsKM(u32 ui32Reg, u32 ui32Data, u32 ui32Flags)
++{
++ PVRSRV_ERROR eErr;
++ PDUMP_GET_SCRIPT_STRING()
++ PDUMP_DBG(("PDumpRegWithFlagsKM"));
++ eErr =
++ PDumpOSBufprintf(hScript, ui32MaxLen,
++ "WRW :SGXREG:0x%8.8lX 0x%8.8lX\r\n", ui32Reg,
++ ui32Data);
++ if (eErr != PVRSRV_OK) {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, ui32Flags);
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpRegKM(u32 ui32Reg, u32 ui32Data)
++{
++ return PDumpRegWithFlagsKM(ui32Reg, ui32Data, PDUMP_FLAGS_CONTINUOUS);
++}
++
++PVRSRV_ERROR PDumpRegPolWithFlagsKM(u32 ui32RegAddr, u32 ui32RegValue,
++ u32 ui32Mask, u32 ui32Flags)
++{
++
++#define POLL_DELAY 1000UL
++#define POLL_COUNT_LONG (2000000000UL / POLL_DELAY)
++#define POLL_COUNT_SHORT (1000000UL / POLL_DELAY)
++
++ PVRSRV_ERROR eErr;
++ u32 ui32PollCount;
++
++ PDUMP_GET_SCRIPT_STRING();
++ PDUMP_DBG(("PDumpRegPolWithFlagsKM"));
++
++ if (((ui32RegAddr == EUR_CR_EVENT_STATUS) &&
++ (ui32RegValue & ui32Mask & EUR_CR_EVENT_STATUS_TA_FINISHED_MASK) !=
++ 0) || ((ui32RegAddr == EUR_CR_EVENT_STATUS)
++ && (ui32RegValue & ui32Mask &
++ EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_MASK) != 0)
++ || ((ui32RegAddr == EUR_CR_EVENT_STATUS)
++ && (ui32RegValue & ui32Mask &
++ EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_MASK) != 0)) {
++ ui32PollCount = POLL_COUNT_LONG;
++ } else {
++ ui32PollCount = POLL_COUNT_SHORT;
++ }
++
++ eErr =
++ PDumpOSBufprintf(hScript, ui32MaxLen,
++ "POL :SGXREG:0x%8.8lX 0x%8.8lX 0x%8.8lX %d %lu %d\r\n",
++ ui32RegAddr, ui32RegValue, ui32Mask, 0,
++ ui32PollCount, POLL_DELAY);
++ if (eErr != PVRSRV_OK) {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, ui32Flags);
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpRegPolKM(u32 ui32RegAddr, u32 ui32RegValue, u32 ui32Mask)
++{
++ return PDumpRegPolWithFlagsKM(ui32RegAddr, ui32RegValue, ui32Mask,
++ PDUMP_FLAGS_CONTINUOUS);
++}
++
++PVRSRV_ERROR PDumpMallocPages(PVRSRV_DEVICE_TYPE eDeviceType,
++ u32 ui32DevVAddr,
++ IMG_CPU_VIRTADDR pvLinAddr,
++ void *hOSMemHandle,
++ u32 ui32NumBytes,
++ u32 ui32PageSize, void *hUniqueTag)
++{
++ PVRSRV_ERROR eErr;
++ u8 *pui8LinAddr;
++ u32 ui32Offset;
++ u32 ui32NumPages;
++ IMG_DEV_PHYADDR sDevPAddr;
++ u32 ui32Page;
++
++ PDUMP_GET_SCRIPT_STRING();
++
++#if defined(LINUX)
++ PVR_ASSERT(hOSMemHandle);
++#else
++ PVR_ASSERT(((u32) pvLinAddr & (SGX_MMU_PAGE_MASK)) == 0);
++#endif
++
++ PVR_ASSERT(((u32) ui32DevVAddr & (SGX_MMU_PAGE_MASK)) == 0);
++ PVR_ASSERT(((u32) ui32NumBytes & (SGX_MMU_PAGE_MASK)) == 0);
++
++ eErr =
++ PDumpOSBufprintf(hScript, ui32MaxLen,
++ "-- MALLOC :SGXMEM:VA_%8.8lX 0x%8.8lX %lu\r\n",
++ ui32DevVAddr, ui32NumBytes, ui32PageSize);
++ if (eErr != PVRSRV_OK) {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++
++ pui8LinAddr = (u8 *) pvLinAddr;
++ ui32Offset = 0;
++ ui32NumPages = ui32NumBytes / ui32PageSize;
++ while (ui32NumPages) {
++ ui32NumPages--;
++
++ PDumpOSCPUVAddrToDevPAddr(eDeviceType,
++ hOSMemHandle,
++ ui32Offset,
++ pui8LinAddr,
++ ui32PageSize, &sDevPAddr);
++ ui32Page = sDevPAddr.uiAddr / ui32PageSize;
++
++ pui8LinAddr += ui32PageSize;
++ ui32Offset += ui32PageSize;
++
++ eErr =
++ PDumpOSBufprintf(hScript, ui32MaxLen,
++ "MALLOC :SGXMEM:PA_%8.8lX%8.8lX %lu %lu 0x%8.8lX\r\n",
++ (u32) hUniqueTag, ui32Page * ui32PageSize,
++ ui32PageSize, ui32PageSize,
++ ui32Page * ui32PageSize);
++ if (eErr != PVRSRV_OK) {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++ }
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpMallocPageTable(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_CPU_VIRTADDR pvLinAddr,
++ u32 ui32PTSize, void *hUniqueTag)
++{
++ PVRSRV_ERROR eErr;
++ IMG_DEV_PHYADDR sDevPAddr;
++ u32 ui32Page;
++
++ PDUMP_GET_SCRIPT_STRING();
++
++ PVR_ASSERT(((u32) pvLinAddr & (ui32PTSize - 1)) == 0);
++
++ eErr =
++ PDumpOSBufprintf(hScript, ui32MaxLen,
++ "-- MALLOC :SGXMEM:PAGE_TABLE 0x%8.8lX %lu\r\n",
++ ui32PTSize, SGX_MMU_PAGE_SIZE);
++ if (eErr != PVRSRV_OK) {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++
++ {
++
++ PDumpOSCPUVAddrToDevPAddr(eDeviceType,
++ NULL,
++ 0,
++ (u8 *) pvLinAddr,
++ SGX_MMU_PAGE_SIZE, &sDevPAddr);
++ ui32Page = sDevPAddr.uiAddr >> SGX_MMU_PAGE_SHIFT;
++
++ eErr =
++ PDumpOSBufprintf(hScript, ui32MaxLen,
++ "MALLOC :SGXMEM:PA_%8.8lX%8.8lX 0x%lX %lu 0x%8.8lX\r\n",
++ (u32) hUniqueTag,
++ ui32Page * SGX_MMU_PAGE_SIZE,
++ SGX_MMU_PAGE_SIZE, SGX_MMU_PAGE_SIZE,
++ ui32Page * SGX_MMU_PAGE_SIZE);
++ if (eErr != PVRSRV_OK) {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++
++ }
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpFreePages(BM_HEAP * psBMHeap,
++ IMG_DEV_VIRTADDR sDevVAddr,
++ u32 ui32NumBytes,
++ u32 ui32PageSize,
++ void *hUniqueTag, int bInterleaved)
++{
++ PVRSRV_ERROR eErr;
++ u32 ui32NumPages, ui32PageCounter;
++ IMG_DEV_PHYADDR sDevPAddr;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++
++ PDUMP_GET_SCRIPT_STRING();
++
++ PVR_ASSERT(((u32) sDevVAddr.uiAddr & (ui32PageSize - 1)) == 0);
++ PVR_ASSERT(((u32) ui32NumBytes & (ui32PageSize - 1)) == 0);
++
++ eErr =
++ PDumpOSBufprintf(hScript, ui32MaxLen,
++ "-- FREE :SGXMEM:VA_%8.8lX\r\n", sDevVAddr.uiAddr);
++ if (eErr != PVRSRV_OK) {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++
++ ui32NumPages = ui32NumBytes / ui32PageSize;
++ psDeviceNode = psBMHeap->pBMContext->psDeviceNode;
++ for (ui32PageCounter = 0; ui32PageCounter < ui32NumPages;
++ ui32PageCounter++) {
++ if (!bInterleaved || (ui32PageCounter % 2) == 0) {
++ sDevPAddr =
++ psDeviceNode->pfnMMUGetPhysPageAddr(psBMHeap->
++ pMMUHeap,
++ sDevVAddr);
++ {
++ eErr =
++ PDumpOSBufprintf(hScript, ui32MaxLen,
++ "FREE :SGXMEM:PA_%8.8lX%8.8lX\r\n",
++ (u32) hUniqueTag,
++ sDevPAddr.uiAddr);
++ if (eErr != PVRSRV_OK) {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript,
++ PDUMP_FLAGS_CONTINUOUS);
++ }
++ } else {
++
++ }
++
++ sDevVAddr.uiAddr += ui32PageSize;
++ }
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpFreePageTable(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_CPU_VIRTADDR pvLinAddr,
++ u32 ui32PTSize, void *hUniqueTag)
++{
++ PVRSRV_ERROR eErr;
++ IMG_DEV_PHYADDR sDevPAddr;
++ u32 ui32Page;
++
++ PDUMP_GET_SCRIPT_STRING();
++
++ PVR_ASSERT(((u32) pvLinAddr & (ui32PTSize - 1UL)) == 0);
++
++ eErr =
++ PDumpOSBufprintf(hScript, ui32MaxLen,
++ "-- FREE :SGXMEM:PAGE_TABLE\r\n");
++ if (eErr != PVRSRV_OK) {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++
++ {
++ PDumpOSCPUVAddrToDevPAddr(eDeviceType,
++ NULL,
++ 0,
++ (u8 *) pvLinAddr,
++ SGX_MMU_PAGE_SIZE, &sDevPAddr);
++ ui32Page = sDevPAddr.uiAddr >> SGX_MMU_PAGE_SHIFT;
++
++ {
++ eErr =
++ PDumpOSBufprintf(hScript, ui32MaxLen,
++ "FREE :SGXMEM:PA_%8.8lX%8.8lX\r\n",
++ (u32) hUniqueTag,
++ ui32Page * SGX_MMU_PAGE_SIZE);
++ if (eErr != PVRSRV_OK) {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++ }
++ }
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpPDRegWithFlags(u32 ui32Reg,
++ u32 ui32Data, u32 ui32Flags, void *hUniqueTag)
++{
++ PVRSRV_ERROR eErr;
++ PDUMP_GET_SCRIPT_STRING()
++
++#if defined(SGX_FEATURE_36BIT_MMU)
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen,
++ "WRW :SGXMEM:$1 :SGXMEM:PA_%8.8lX%8.8lX:0x0\r\n",
++ (u32) hUniqueTag,
++ (ui32Data & SGX_MMU_PDE_ADDR_MASK) <<
++ SGX_MMU_PDE_ADDR_ALIGNSHIFT);
++ if (eErr != PVRSRV_OK) {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, ui32Flags);
++ eErr =
++ PDumpOSBufprintf(hScript, ui32MaxLen,
++ "SHR :SGXMEM:$1 :SGXMEM:$1 0x4\r\n");
++ if (eErr != PVRSRV_OK) {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, ui32Flags);
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen,
++ "WRW :SGXREG:0x%8.8lX: SGXMEM:$1\r\n", ui32Reg);
++ if (eErr != PVRSRV_OK) {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, ui32Flags);
++#else
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLen,
++ "WRW :SGXREG:0x%8.8lX :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX\r\n",
++ ui32Reg,
++ (u32) hUniqueTag,
++ (ui32Data & SGX_MMU_PDE_ADDR_MASK) <<
++ SGX_MMU_PDE_ADDR_ALIGNSHIFT,
++ ui32Data & ~SGX_MMU_PDE_ADDR_MASK);
++ if (eErr != PVRSRV_OK) {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, ui32Flags);
++#endif
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpPDReg(u32 ui32Reg, u32 ui32Data, void *hUniqueTag)
++{
++ return PDumpPDRegWithFlags(ui32Reg, ui32Data, PDUMP_FLAGS_CONTINUOUS,
++ hUniqueTag);
++}
++
++PVRSRV_ERROR PDumpMemPolKM(PVRSRV_KERNEL_MEM_INFO * psMemInfo,
++ u32 ui32Offset,
++ u32 ui32Value,
++ u32 ui32Mask,
++ PDUMP_POLL_OPERATOR eOperator,
++ u32 ui32Flags, void *hUniqueTag)
++{
++#define MEMPOLL_DELAY (1000)
++#define MEMPOLL_COUNT (2000000000 / MEMPOLL_DELAY)
++
++ PVRSRV_ERROR eErr;
++ u32 ui32PageOffset;
++ u8 *pui8LinAddr;
++ IMG_DEV_PHYADDR sDevPAddr;
++ IMG_DEV_VIRTADDR sDevVPageAddr;
++ PDUMP_GET_SCRIPT_STRING();
++
++ PVR_ASSERT((ui32Offset + sizeof(u32)) <= psMemInfo->ui32AllocSize);
++
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLen,
++ "-- POL :SGXMEM:VA_%8.8lX 0x%8.8lX 0x%8.8lX %d %d %d\r\n",
++ psMemInfo->sDevVAddr.uiAddr + ui32Offset,
++ ui32Value,
++ ui32Mask,
++ eOperator, MEMPOLL_COUNT, MEMPOLL_DELAY);
++ if (eErr != PVRSRV_OK) {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, ui32Flags);
++
++ pui8LinAddr = psMemInfo->pvLinAddrKM;
++
++ pui8LinAddr += ui32Offset;
++
++ PDumpOSCPUVAddrToPhysPages(psMemInfo->sMemBlk.hOSMemHandle,
++ ui32Offset, pui8LinAddr, &ui32PageOffset);
++
++ sDevVPageAddr.uiAddr =
++ psMemInfo->sDevVAddr.uiAddr + ui32Offset - ui32PageOffset;
++
++ PVR_ASSERT((sDevVPageAddr.uiAddr & 0xFFF) == 0);
++
++ BM_GetPhysPageAddr(psMemInfo, sDevVPageAddr, &sDevPAddr);
++
++ sDevPAddr.uiAddr += ui32PageOffset;
++
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLen,
++ "POL :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX 0x%8.8lX 0x%8.8lX %d %d %d\r\n",
++ (u32) hUniqueTag,
++ sDevPAddr.uiAddr & ~(SGX_MMU_PAGE_MASK),
++ sDevPAddr.uiAddr & (SGX_MMU_PAGE_MASK),
++ ui32Value,
++ ui32Mask,
++ eOperator, MEMPOLL_COUNT, MEMPOLL_DELAY);
++ if (eErr != PVRSRV_OK) {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, ui32Flags);
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpMemKM(void *pvAltLinAddr,
++ PVRSRV_KERNEL_MEM_INFO * psMemInfo,
++ u32 ui32Offset,
++ u32 ui32Bytes, u32 ui32Flags, void *hUniqueTag)
++{
++ PVRSRV_ERROR eErr;
++ u32 ui32NumPages;
++ u32 ui32PageByteOffset;
++ u32 ui32BlockBytes;
++ u8 *pui8LinAddr;
++ u8 *pui8DataLinAddr = NULL;
++ IMG_DEV_VIRTADDR sDevVPageAddr;
++ IMG_DEV_VIRTADDR sDevVAddr;
++ IMG_DEV_PHYADDR sDevPAddr;
++ u32 ui32ParamOutPos;
++
++ PDUMP_GET_SCRIPT_AND_FILE_STRING();
++
++ PVR_ASSERT((ui32Offset + ui32Bytes) <= psMemInfo->ui32AllocSize);
++
++ if (!PDumpOSJTInitialised()) {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if (ui32Bytes == 0 || PDumpOSIsSuspended()) {
++ return PVRSRV_OK;
++ }
++
++ if (pvAltLinAddr) {
++ pui8DataLinAddr = pvAltLinAddr;
++ } else if (psMemInfo->pvLinAddrKM) {
++ pui8DataLinAddr = (u8 *) psMemInfo->pvLinAddrKM + ui32Offset;
++ }
++ pui8LinAddr = (u8 *) psMemInfo->pvLinAddrKM;
++ sDevVAddr = psMemInfo->sDevVAddr;
++
++ sDevVAddr.uiAddr += ui32Offset;
++ pui8LinAddr += ui32Offset;
++
++ PVR_ASSERT(pui8DataLinAddr);
++
++ PDumpOSCheckForSplitting(PDumpOSGetStream(PDUMP_STREAM_PARAM2),
++ ui32Bytes, ui32Flags);
++
++ ui32ParamOutPos = PDumpOSGetStreamOffset(PDUMP_STREAM_PARAM2);
++
++ if (!PDumpOSWriteString(PDumpOSGetStream(PDUMP_STREAM_PARAM2),
++ pui8DataLinAddr, ui32Bytes, ui32Flags)) {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if (PDumpOSGetParamFileNum() == 0) {
++ eErr =
++ PDumpOSSprintf(pszFileName, ui32MaxLenFileName,
++ "%%0%%.prm");
++ } else {
++ eErr =
++ PDumpOSSprintf(pszFileName, ui32MaxLenFileName,
++ "%%0%%%lu.prm", PDumpOSGetParamFileNum());
++ }
++ if (eErr != PVRSRV_OK) {
++ return eErr;
++ }
++
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLenScript,
++ "-- LDB :SGXMEM:VA_%8.8lX%8.8lX:0x%8.8lX 0x%8.8lX 0x%8.8lX %s\r\n",
++ (u32) hUniqueTag,
++ psMemInfo->sDevVAddr.uiAddr,
++ ui32Offset,
++ ui32Bytes, ui32ParamOutPos, pszFileName);
++ if (eErr != PVRSRV_OK) {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, ui32Flags);
++
++ PDumpOSCPUVAddrToPhysPages(psMemInfo->sMemBlk.hOSMemHandle,
++ ui32Offset,
++ pui8LinAddr, &ui32PageByteOffset);
++ ui32NumPages =
++ (ui32PageByteOffset + ui32Bytes + HOST_PAGESIZE() -
++ 1) / HOST_PAGESIZE();
++
++ while (ui32NumPages) {
++#if 0
++ u32 ui32BlockBytes = MIN(ui32BytesRemaining, PAGE_SIZE);
++ CpuPAddr =
++ OSMemHandleToCpuPAddr(psMemInfo->sMemBlk.hOSMemHandle,
++ ui32CurrentOffset);
++#endif
++ ui32NumPages--;
++
++ sDevVPageAddr.uiAddr = sDevVAddr.uiAddr - ui32PageByteOffset;
++
++ PVR_ASSERT((sDevVPageAddr.uiAddr & 0xFFF) == 0);
++
++ BM_GetPhysPageAddr(psMemInfo, sDevVPageAddr, &sDevPAddr);
++
++ sDevPAddr.uiAddr += ui32PageByteOffset;
++#if 0
++ if (ui32PageByteOffset) {
++ ui32BlockBytes =
++ MIN(ui32BytesRemaining,
++ PAGE_ALIGN(CpuPAddr.uiAddr) - CpuPAddr.uiAddr);
++
++ ui32PageByteOffset = 0;
++ }
++#endif
++
++ if (ui32PageByteOffset + ui32Bytes > HOST_PAGESIZE()) {
++
++ ui32BlockBytes = HOST_PAGESIZE() - ui32PageByteOffset;
++ } else {
++
++ ui32BlockBytes = ui32Bytes;
++ }
++
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLenScript,
++ "LDB :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX 0x%8.8lX 0x%8.8lX %s\r\n",
++ (u32) hUniqueTag,
++ sDevPAddr.uiAddr & ~(SGX_MMU_PAGE_MASK),
++ sDevPAddr.uiAddr & (SGX_MMU_PAGE_MASK),
++ ui32BlockBytes,
++ ui32ParamOutPos, pszFileName);
++ if (eErr != PVRSRV_OK) {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, ui32Flags);
++
++ ui32PageByteOffset = 0;
++
++ ui32Bytes -= ui32BlockBytes;
++
++ sDevVAddr.uiAddr += ui32BlockBytes;
++
++ pui8LinAddr += ui32BlockBytes;
++
++ ui32ParamOutPos += ui32BlockBytes;
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpMem2KM(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_CPU_VIRTADDR pvLinAddr,
++ u32 ui32Bytes,
++ u32 ui32Flags,
++ int bInitialisePages,
++ void *hUniqueTag1, void *hUniqueTag2)
++{
++ PVRSRV_ERROR eErr;
++ u32 ui32NumPages;
++ u32 ui32PageOffset;
++ u32 ui32BlockBytes;
++ u8 *pui8LinAddr;
++ IMG_DEV_PHYADDR sDevPAddr;
++ IMG_CPU_PHYADDR sCpuPAddr;
++ u32 ui32Offset;
++ u32 ui32ParamOutPos;
++
++ PDUMP_GET_SCRIPT_AND_FILE_STRING();
++
++ if (!pvLinAddr || !PDumpOSJTInitialised()) {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if (PDumpOSIsSuspended()) {
++ return PVRSRV_OK;
++ }
++
++ PDumpOSCheckForSplitting(PDumpOSGetStream(PDUMP_STREAM_PARAM2),
++ ui32Bytes, ui32Flags);
++
++ ui32ParamOutPos = PDumpOSGetStreamOffset(PDUMP_STREAM_PARAM2);
++
++ if (bInitialisePages) {
++
++ if (!PDumpOSWriteString(PDumpOSGetStream(PDUMP_STREAM_PARAM2),
++ pvLinAddr,
++ ui32Bytes, PDUMP_FLAGS_CONTINUOUS)) {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if (PDumpOSGetParamFileNum() == 0) {
++ eErr =
++ PDumpOSSprintf(pszFileName, ui32MaxLenFileName,
++ "%%0%%.prm");
++ } else {
++ eErr =
++ PDumpOSSprintf(pszFileName, ui32MaxLenFileName,
++ "%%0%%%lu.prm",
++ PDumpOSGetParamFileNum());
++ }
++ if (eErr != PVRSRV_OK) {
++ return eErr;
++ }
++ }
++
++ ui32PageOffset = (u32) pvLinAddr & (HOST_PAGESIZE() - 1);
++ ui32NumPages =
++ (ui32PageOffset + ui32Bytes + HOST_PAGESIZE() -
++ 1) / HOST_PAGESIZE();
++ pui8LinAddr = (u8 *) pvLinAddr;
++
++ while (ui32NumPages) {
++ ui32NumPages--;
++ sCpuPAddr = OSMapLinToCPUPhys(pui8LinAddr);
++ sDevPAddr = SysCpuPAddrToDevPAddr(eDeviceType, sCpuPAddr);
++
++ if (ui32PageOffset + ui32Bytes > HOST_PAGESIZE()) {
++
++ ui32BlockBytes = HOST_PAGESIZE() - ui32PageOffset;
++ } else {
++
++ ui32BlockBytes = ui32Bytes;
++ }
++
++ if (bInitialisePages) {
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLenScript,
++ "LDB :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX 0x%8.8lX 0x%8.8lX %s\r\n",
++ (u32) hUniqueTag1,
++ sDevPAddr.
++ uiAddr & ~(SGX_MMU_PAGE_MASK),
++ sDevPAddr.
++ uiAddr & (SGX_MMU_PAGE_MASK),
++ ui32BlockBytes, ui32ParamOutPos,
++ pszFileName);
++ if (eErr != PVRSRV_OK) {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++ } else {
++ for (ui32Offset = 0; ui32Offset < ui32BlockBytes;
++ ui32Offset += sizeof(u32)) {
++ u32 ui32PTE =
++ *((u32 *) (pui8LinAddr + ui32Offset));
++
++ if ((ui32PTE & SGX_MMU_PDE_ADDR_MASK) != 0) {
++#if defined(SGX_FEATURE_36BIT_MMU)
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLenScript,
++ "WRW :SGXMEM:$1 :SGXMEM:PA_%8.8lX%8.8lX:0x0\r\n",
++ (u32)
++ hUniqueTag2,
++ (ui32PTE &
++ SGX_MMU_PDE_ADDR_MASK)
++ <<
++ SGX_MMU_PTE_ADDR_ALIGNSHIFT);
++ if (eErr != PVRSRV_OK) {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript,
++ PDUMP_FLAGS_CONTINUOUS);
++ eErr =
++ PDumpOSBufprintf(hScript,
++ ui32MaxLenScript,
++ "SHR :SGXMEM:$1 :SGXMEM:$1 0x4\r\n");
++ if (eErr != PVRSRV_OK) {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript,
++ PDUMP_FLAGS_CONTINUOUS);
++ eErr =
++ PDumpOSBufprintf(hScript,
++ ui32MaxLenScript,
++ "OR :SGXMEM:$1 :SGXMEM:$1 0x%8.8lX\r\n",
++ ui32PTE &
++ ~SGX_MMU_PDE_ADDR_MASK);
++ if (eErr != PVRSRV_OK) {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript,
++ PDUMP_FLAGS_CONTINUOUS);
++ eErr =
++ PDumpOSBufprintf(hScript,
++ ui32MaxLenScript,
++ "WRW :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX :SGXMEM:$1\r\n",
++ (u32) hUniqueTag1,
++ (sDevPAddr.uiAddr +
++ ui32Offset) &
++ ~
++ (SGX_MMU_PAGE_MASK),
++ (sDevPAddr.uiAddr +
++ ui32Offset) &
++ (SGX_MMU_PAGE_MASK));
++ if (eErr != PVRSRV_OK) {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript,
++ PDUMP_FLAGS_CONTINUOUS);
++#else
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLenScript,
++ "WRW :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX\r\n",
++ (u32)
++ hUniqueTag1,
++ (sDevPAddr.
++ uiAddr +
++ ui32Offset) &
++ ~
++ (SGX_MMU_PAGE_MASK),
++ (sDevPAddr.
++ uiAddr +
++ ui32Offset) &
++ (SGX_MMU_PAGE_MASK),
++ (u32)
++ hUniqueTag2,
++ (ui32PTE &
++ SGX_MMU_PDE_ADDR_MASK)
++ <<
++ SGX_MMU_PTE_ADDR_ALIGNSHIFT,
++ ui32PTE &
++ ~SGX_MMU_PDE_ADDR_MASK);
++ if (eErr != PVRSRV_OK) {
++ return eErr;
++ }
++#endif
++ } else {
++ PVR_ASSERT((ui32PTE & SGX_MMU_PTE_VALID)
++ == 0UL);
++ eErr =
++ PDumpOSBufprintf(hScript,
++ ui32MaxLenScript,
++ "WRW :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX 0x%8.8lX%8.8lX\r\n",
++ (u32) hUniqueTag1,
++ (sDevPAddr.uiAddr +
++ ui32Offset) &
++ ~
++ (SGX_MMU_PAGE_MASK),
++ (sDevPAddr.uiAddr +
++ ui32Offset) &
++ (SGX_MMU_PAGE_MASK),
++ (ui32PTE <<
++ SGX_MMU_PTE_ADDR_ALIGNSHIFT),
++ (u32) hUniqueTag2);
++ if (eErr != PVRSRV_OK) {
++ return eErr;
++ }
++ }
++ PDumpOSWriteString2(hScript,
++ PDUMP_FLAGS_CONTINUOUS);
++ }
++ }
++
++ ui32PageOffset = 0;
++
++ ui32Bytes -= ui32BlockBytes;
++
++ pui8LinAddr += ui32BlockBytes;
++
++ ui32ParamOutPos += ui32BlockBytes;
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpPDDevPAddrKM(PVRSRV_KERNEL_MEM_INFO * psMemInfo,
++ u32 ui32Offset,
++ IMG_DEV_PHYADDR sPDDevPAddr,
++ void *hUniqueTag1, void *hUniqueTag2)
++{
++ PVRSRV_ERROR eErr;
++ u32 ui32PageByteOffset;
++ IMG_DEV_VIRTADDR sDevVAddr;
++ IMG_DEV_VIRTADDR sDevVPageAddr;
++ IMG_DEV_PHYADDR sDevPAddr;
++
++ PDUMP_GET_SCRIPT_STRING();
++
++ if (!PDumpOSWriteString(PDumpOSGetStream(PDUMP_STREAM_PARAM2),
++ (u8 *) & sPDDevPAddr,
++ sizeof(IMG_DEV_PHYADDR),
++ PDUMP_FLAGS_CONTINUOUS)) {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ sDevVAddr = psMemInfo->sDevVAddr;
++ ui32PageByteOffset = sDevVAddr.uiAddr & (SGX_MMU_PAGE_MASK);
++
++ sDevVPageAddr.uiAddr = sDevVAddr.uiAddr - ui32PageByteOffset;
++ PVR_ASSERT((sDevVPageAddr.uiAddr & 0xFFF) == 0);
++
++ BM_GetPhysPageAddr(psMemInfo, sDevVPageAddr, &sDevPAddr);
++ sDevPAddr.uiAddr += ui32PageByteOffset + ui32Offset;
++
++ if ((sPDDevPAddr.uiAddr & SGX_MMU_PDE_ADDR_MASK) != 0UL) {
++#if defined(SGX_FEATURE_36BIT_MMU)
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLen,
++ "WRW :SGXMEM:$1 :SGXMEM:PA_%8.8lX%8.8lX:0x0\r\n",
++ (u32) hUniqueTag2, sPDDevPAddr.uiAddr);
++ if (eErr != PVRSRV_OK) {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++
++ eErr =
++ PDumpOSBufprintf(hScript, ui32MaxLen,
++ "AND :SGXMEM:$2 :SGXMEM:$1 0xFFFFFFFF\r\n");
++ if (eErr != PVRSRV_OK) {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLen,
++ "WRW :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX :SGXMEM:$2\r\n",
++ (u32) hUniqueTag1,
++ (sDevPAddr.
++ uiAddr) & ~(SGX_MMU_PAGE_MASK),
++ (sDevPAddr.
++ uiAddr) & (SGX_MMU_PAGE_MASK));
++ if (eErr != PVRSRV_OK) {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++
++ eErr =
++ PDumpOSBufprintf(hScript, ui32MaxLen,
++ "SHR :SGXMEM:$2 :SGXMEM:$1 0x20\r\n");
++ if (eErr != PVRSRV_OK) {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLen,
++ "WRW :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX :SGXMEM:$2\r\n",
++ (u32) hUniqueTag1,
++ (sDevPAddr.uiAddr +
++ 4) & ~(SGX_MMU_PAGE_MASK),
++ (sDevPAddr.uiAddr +
++ 4) & (SGX_MMU_PAGE_MASK));
++ if (eErr != PVRSRV_OK) {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++#else
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLen,
++ "WRW :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX\r\n",
++ (u32) hUniqueTag1,
++ sDevPAddr.uiAddr & ~(SGX_MMU_PAGE_MASK),
++ sDevPAddr.uiAddr & (SGX_MMU_PAGE_MASK),
++ (u32) hUniqueTag2,
++ sPDDevPAddr.
++ uiAddr & SGX_MMU_PDE_ADDR_MASK,
++ sPDDevPAddr.
++ uiAddr & ~SGX_MMU_PDE_ADDR_MASK);
++ if (eErr != PVRSRV_OK) {
++ return eErr;
++ }
++#endif
++ } else {
++ PVR_ASSERT(!(sDevPAddr.uiAddr & SGX_MMU_PTE_VALID));
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLen,
++ "WRW :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX 0x%8.8lX\r\n",
++ (u32) hUniqueTag1,
++ sDevPAddr.uiAddr & ~(SGX_MMU_PAGE_MASK),
++ sDevPAddr.uiAddr & (SGX_MMU_PAGE_MASK),
++ sPDDevPAddr.uiAddr);
++ if (eErr != PVRSRV_OK) {
++ return eErr;
++ }
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpCommentKM(char *pszComment, u32 ui32Flags)
++{
++ PVRSRV_ERROR eErr;
++ PDUMP_GET_MSG_STRING();
++ PDUMP_DBG(("PDumpCommentKM"));
++
++ if (!PDumpOSWriteString2("-- ", ui32Flags)) {
++ if (ui32Flags & PDUMP_FLAGS_CONTINUOUS) {
++ return PVRSRV_ERROR_GENERIC;
++ } else {
++ return PVRSRV_ERROR_CMD_NOT_PROCESSED;
++ }
++ }
++
++ eErr = PDumpOSBufprintf(hMsg, ui32MaxLen, "%s", pszComment);
++ if ((eErr != PVRSRV_OK) && (eErr != PVRSRV_ERROR_PDUMP_BUF_OVERFLOW)) {
++ return eErr;
++ }
++
++ PDumpOSVerifyLineEnding(hMsg, ui32MaxLen);
++ PDumpOSWriteString2(hMsg, ui32Flags);
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpCommentWithFlags(u32 ui32Flags, char *pszFormat, ...)
++{
++ PVRSRV_ERROR eErr;
++ PDUMP_va_list ap;
++ PDUMP_GET_MSG_STRING();
++
++ PDUMP_va_start(ap, pszFormat);
++ eErr = PDumpOSVSprintf(hMsg, ui32MaxLen, pszFormat, ap);
++ PDUMP_va_end(ap);
++
++ if (eErr != PVRSRV_OK) {
++ return eErr;
++ }
++ return PDumpCommentKM(hMsg, ui32Flags);
++}
++
++PVRSRV_ERROR PDumpComment(char *pszFormat, ...)
++{
++ PVRSRV_ERROR eErr;
++ PDUMP_va_list ap;
++ PDUMP_GET_MSG_STRING();
++
++ PDUMP_va_start(ap, pszFormat);
++ eErr = PDumpOSVSprintf(hMsg, ui32MaxLen, pszFormat, ap);
++ PDUMP_va_end(ap);
++
++ if (eErr != PVRSRV_OK) {
++ return eErr;
++ }
++ return PDumpCommentKM(hMsg, PDUMP_FLAGS_CONTINUOUS);
++}
++
++PVRSRV_ERROR PDumpDriverInfoKM(char *pszString, u32 ui32Flags)
++{
++ PVRSRV_ERROR eErr;
++ u32 ui32MsgLen;
++ PDUMP_GET_MSG_STRING();
++
++ eErr = PDumpOSBufprintf(hMsg, ui32MaxLen, "%s", pszString);
++ if (eErr != PVRSRV_OK) {
++ return eErr;
++ }
++
++ PDumpOSVerifyLineEnding(hMsg, ui32MaxLen);
++ ui32MsgLen = PDumpOSBuflen(hMsg, ui32MaxLen);
++
++ if (!PDumpOSWriteString(PDumpOSGetStream(PDUMP_STREAM_DRIVERINFO),
++ (u8 *) hMsg, ui32MsgLen, ui32Flags)) {
++ if (ui32Flags & PDUMP_FLAGS_CONTINUOUS) {
++ return PVRSRV_ERROR_GENERIC;
++ } else {
++ return PVRSRV_ERROR_CMD_NOT_PROCESSED;
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpBitmapKM(char *pszFileName,
++ u32 ui32FileOffset,
++ u32 ui32Width,
++ u32 ui32Height,
++ u32 ui32StrideInBytes,
++ IMG_DEV_VIRTADDR sDevBaseAddr,
++ u32 ui32Size,
++ PDUMP_PIXEL_FORMAT ePixelFormat,
++ PDUMP_MEM_FORMAT eMemFormat, u32 ui32PDumpFlags)
++{
++ PVRSRV_ERROR eErr;
++ PDUMP_GET_SCRIPT_STRING();
++ PDumpCommentWithFlags(ui32PDumpFlags,
++ "\r\n-- Dump bitmap of render\r\n");
++
++#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLen,
++ "SII %s %s.bin :SGXMEM:v%x:0x%08lX 0x%08lX 0x%08lX 0x%08X 0x%08lX 0x%08lX 0x%08lX 0x%08X\r\n",
++ pszFileName,
++ pszFileName,
++ PDUMP_DATAMASTER_PIXEL,
++ sDevBaseAddr.uiAddr,
++ ui32Size,
++ ui32FileOffset,
++ ePixelFormat,
++ ui32Width,
++ ui32Height, ui32StrideInBytes, eMemFormat);
++#else
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLen,
++ "SII %s %s.bin :SGXMEM:v:0x%08lX 0x%08lX 0x%08lX 0x%08X 0x%08lX 0x%08lX 0x%08lX 0x%08X\r\n",
++ pszFileName,
++ pszFileName,
++ sDevBaseAddr.uiAddr,
++ ui32Size,
++ ui32FileOffset,
++ ePixelFormat,
++ ui32Width,
++ ui32Height, ui32StrideInBytes, eMemFormat);
++#endif
++ if (eErr != PVRSRV_OK) {
++ return eErr;
++ }
++
++ PDumpOSWriteString2(hScript, ui32PDumpFlags);
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpReadRegKM(char *pszFileName,
++ u32 ui32FileOffset,
++ u32 ui32Address, u32 ui32Size, u32 ui32PDumpFlags)
++{
++ PVRSRV_ERROR eErr;
++ PDUMP_GET_SCRIPT_STRING();
++
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLen,
++ "SAB :SGXREG:0x%08lX 0x%08lX %s\r\n",
++ ui32Address, ui32FileOffset, pszFileName);
++ if (eErr != PVRSRV_OK) {
++ return eErr;
++ }
++
++ PDumpOSWriteString2(hScript, ui32PDumpFlags);
++
++ return PVRSRV_OK;
++}
++
++int PDumpTestNextFrame(u32 ui32CurrentFrame)
++{
++ int bFrameDumped;
++
++ (void)PDumpSetFrameKM(ui32CurrentFrame + 1);
++ bFrameDumped = PDumpIsCaptureFrameKM();
++ (void)PDumpSetFrameKM(ui32CurrentFrame);
++
++ return bFrameDumped;
++}
++
++static PVRSRV_ERROR PDumpSignatureRegister(char *pszFileName,
++ u32 ui32Address,
++ u32 ui32Size,
++ u32 * pui32FileOffset, u32 ui32Flags)
++{
++ PVRSRV_ERROR eErr;
++ PDUMP_GET_SCRIPT_STRING();
++
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLen,
++ "SAB :SGXREG:0x%08X 0x%08X %s\r\n",
++ ui32Address, *pui32FileOffset, pszFileName);
++ if (eErr != PVRSRV_OK) {
++ return eErr;
++ }
++
++ PDumpOSWriteString2(hScript, ui32Flags);
++ *pui32FileOffset += ui32Size;
++ return PVRSRV_OK;
++}
++
++static void PDumpRegisterRange(char *pszFileName,
++ u32 * pui32Registers,
++ u32 ui32NumRegisters,
++ u32 * pui32FileOffset,
++ u32 ui32Size, u32 ui32Flags)
++{
++ u32 i;
++ for (i = 0; i < ui32NumRegisters; i++) {
++ PDumpSignatureRegister(pszFileName, pui32Registers[i], ui32Size,
++ pui32FileOffset, ui32Flags);
++ }
++}
++
++PVRSRV_ERROR PDump3DSignatureRegisters(u32 ui32DumpFrameNum,
++ int bLastFrame,
++ u32 * pui32Registers,
++ u32 ui32NumRegisters)
++{
++ PVRSRV_ERROR eErr;
++ u32 ui32FileOffset, ui32Flags;
++
++ PDUMP_GET_FILE_STRING();
++
++ ui32Flags = bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0;
++ ui32FileOffset = 0;
++
++ PDumpCommentWithFlags(ui32Flags,
++ "\r\n-- Dump 3D signature registers\r\n");
++ eErr =
++ PDumpOSSprintf(pszFileName, ui32MaxLen, "out%lu_3d.sig",
++ ui32DumpFrameNum);
++ if (eErr != PVRSRV_OK) {
++ return eErr;
++ }
++
++ PDumpRegisterRange(pszFileName, pui32Registers, ui32NumRegisters,
++ &ui32FileOffset, sizeof(u32), ui32Flags);
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpTASignatureRegisters(u32 ui32DumpFrameNum,
++ u32 ui32TAKickCount,
++ int bLastFrame,
++ u32 * pui32Registers,
++ u32 ui32NumRegisters)
++{
++ PVRSRV_ERROR eErr;
++ u32 ui32FileOffset, ui32Flags;
++
++ PDUMP_GET_FILE_STRING();
++
++ ui32Flags = bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0;
++ ui32FileOffset = ui32TAKickCount * ui32NumRegisters * sizeof(u32);
++
++ PDumpCommentWithFlags(ui32Flags,
++ "\r\n-- Dump TA signature registers\r\n");
++ eErr =
++ PDumpOSSprintf(pszFileName, ui32MaxLen, "out%lu_ta.sig",
++ ui32DumpFrameNum);
++ if (eErr != PVRSRV_OK) {
++ return eErr;
++ }
++
++ PDumpRegisterRange(pszFileName, pui32Registers, ui32NumRegisters,
++ &ui32FileOffset, sizeof(u32), ui32Flags);
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpCounterRegisters(u32 ui32DumpFrameNum,
++ int bLastFrame,
++ u32 * pui32Registers, u32 ui32NumRegisters)
++{
++ PVRSRV_ERROR eErr;
++ u32 ui32FileOffset, ui32Flags;
++
++ PDUMP_GET_FILE_STRING();
++
++ ui32Flags = bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0UL;
++ ui32FileOffset = 0UL;
++
++ PDumpCommentWithFlags(ui32Flags, "\r\n-- Dump counter registers\r\n");
++ eErr =
++ PDumpOSSprintf(pszFileName, ui32MaxLen, "out%lu.perf",
++ ui32DumpFrameNum);
++ if (eErr != PVRSRV_OK) {
++ return eErr;
++ }
++
++ PDumpRegisterRange(pszFileName, pui32Registers, ui32NumRegisters,
++ &ui32FileOffset, sizeof(u32), ui32Flags);
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpRegRead(const u32 ui32RegOffset, u32 ui32Flags)
++{
++ PVRSRV_ERROR eErr;
++ PDUMP_GET_SCRIPT_STRING();
++
++ eErr =
++ PDumpOSBufprintf(hScript, ui32MaxLen, "RDW :SGXREG:0x%lX\r\n",
++ ui32RegOffset);
++ if (eErr != PVRSRV_OK) {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, ui32Flags);
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpCycleCountRegRead(const u32 ui32RegOffset, int bLastFrame)
++{
++ PVRSRV_ERROR eErr;
++ PDUMP_GET_SCRIPT_STRING();
++
++ eErr =
++ PDumpOSBufprintf(hScript, ui32MaxLen, "RDW :SGXREG:0x%lX\r\n",
++ ui32RegOffset);
++ if (eErr != PVRSRV_OK) {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0);
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpHWPerfCBKM(char *pszFileName,
++ u32 ui32FileOffset,
++ IMG_DEV_VIRTADDR sDevBaseAddr,
++ u32 ui32Size, u32 ui32PDumpFlags)
++{
++ PVRSRV_ERROR eErr;
++ PDUMP_GET_SCRIPT_STRING();
++ PDumpCommentWithFlags(ui32PDumpFlags,
++ "\r\n-- Dump Hardware Performance Circular Buffer\r\n");
++
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen,
++#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++ "SAB :SGXMEM:v%x:0x%08lX 0x%08lX 0x%08lX %s.bin\r\n",
++ PDUMP_DATAMASTER_EDM,
++#else
++ "SAB :SGXMEM:v:0x%08lX 0x%08lX 0x%08lX %s.bin\r\n",
++#endif
++ sDevBaseAddr.uiAddr,
++ ui32Size, ui32FileOffset, pszFileName);
++ if (eErr != PVRSRV_OK) {
++ return eErr;
++ }
++
++ PDumpOSWriteString2(hScript, ui32PDumpFlags);
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpCBP(PPVRSRV_KERNEL_MEM_INFO psROffMemInfo,
++ u32 ui32ROffOffset,
++ u32 ui32WPosVal,
++ u32 ui32PacketSize,
++ u32 ui32BufferSize, u32 ui32Flags, void *hUniqueTag)
++{
++ PVRSRV_ERROR eErr;
++ u32 ui32PageOffset;
++ u8 *pui8LinAddr;
++ IMG_DEV_VIRTADDR sDevVAddr;
++ IMG_DEV_PHYADDR sDevPAddr;
++ IMG_DEV_VIRTADDR sDevVPageAddr;
++
++ PDUMP_GET_SCRIPT_STRING();
++
++ PVR_ASSERT((ui32ROffOffset + sizeof(u32)) <=
++ psROffMemInfo->ui32AllocSize);
++
++ pui8LinAddr = psROffMemInfo->pvLinAddrKM;
++ sDevVAddr = psROffMemInfo->sDevVAddr;
++
++ pui8LinAddr += ui32ROffOffset;
++ sDevVAddr.uiAddr += ui32ROffOffset;
++
++ PDumpOSCPUVAddrToPhysPages(psROffMemInfo->sMemBlk.hOSMemHandle,
++ ui32ROffOffset,
++ pui8LinAddr, &ui32PageOffset);
++
++ sDevVPageAddr.uiAddr = sDevVAddr.uiAddr - ui32PageOffset;
++
++ PVR_ASSERT((sDevVPageAddr.uiAddr & 0xFFF) == 0);
++
++ BM_GetPhysPageAddr(psROffMemInfo, sDevVPageAddr, &sDevPAddr);
++
++ sDevPAddr.uiAddr += ui32PageOffset;
++
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLen,
++ "CBP :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX 0x%8.8lX 0x%8.8lX 0x%8.8lX\r\n",
++ (u32) hUniqueTag,
++ sDevPAddr.uiAddr & ~(SGX_MMU_PAGE_MASK),
++ sDevPAddr.uiAddr & (SGX_MMU_PAGE_MASK),
++ ui32WPosVal, ui32PacketSize, ui32BufferSize);
++ if (eErr != PVRSRV_OK) {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, ui32Flags);
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpIDLWithFlags(u32 ui32Clocks, u32 ui32Flags)
++{
++ PVRSRV_ERROR eErr;
++ PDUMP_GET_SCRIPT_STRING();
++ PDUMP_DBG(("PDumpIDLWithFlags"));
++
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "IDL %lu\r\n", ui32Clocks);
++ if (eErr != PVRSRV_OK) {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, ui32Flags);
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpIDL(u32 ui32Clocks)
++{
++ return PDumpIDLWithFlags(ui32Clocks, PDUMP_FLAGS_CONTINUOUS);
++}
++#endif
++
++PVRSRV_ERROR PDumpMemUM(PVRSRV_PER_PROCESS_DATA * psPerProc,
++ void *pvAltLinAddrUM,
++ void *pvLinAddrUM,
++ PVRSRV_KERNEL_MEM_INFO * psMemInfo,
++ u32 ui32Offset,
++ u32 ui32Bytes, u32 ui32Flags, void *hUniqueTag)
++{
++ void *pvAddrUM;
++ void *pvAddrKM;
++ u32 ui32BytesDumped;
++ u32 ui32CurrentOffset;
++
++ if (psMemInfo->pvLinAddrKM != NULL && pvAltLinAddrUM == NULL) {
++
++ return PDumpMemKM(NULL,
++ psMemInfo,
++ ui32Offset, ui32Bytes, ui32Flags, hUniqueTag);
++ }
++
++ pvAddrUM =
++ (pvAltLinAddrUM !=
++ NULL) ? pvAltLinAddrUM : ((pvLinAddrUM !=
++ NULL) ? VPTR_PLUS(pvLinAddrUM,
++ ui32Offset) : NULL);
++
++ pvAddrKM = GetTempBuffer();
++
++ PVR_ASSERT(pvAddrUM != NULL && pvAddrKM != NULL);
++ if (pvAddrUM == NULL || pvAddrKM == NULL) {
++ PVR_DPF((PVR_DBG_ERROR, "PDumpMemUM: Nothing to dump"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if (ui32Bytes > PDUMP_TEMP_BUFFER_SIZE) {
++ PDumpCommentWithFlags(ui32Flags,
++ "Dumping 0x%8.8lx bytes of memory, in blocks of 0x%8.8lx bytes",
++ ui32Bytes, (u32) PDUMP_TEMP_BUFFER_SIZE);
++ }
++
++ ui32CurrentOffset = ui32Offset;
++ for (ui32BytesDumped = 0; ui32BytesDumped < ui32Bytes;) {
++ PVRSRV_ERROR eError;
++ u32 ui32BytesToDump =
++ MIN(PDUMP_TEMP_BUFFER_SIZE, ui32Bytes - ui32BytesDumped);
++
++ eError = OSCopyFromUser(psPerProc,
++ pvAddrKM, pvAddrUM, ui32BytesToDump);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PDumpMemUM: OSCopyFromUser failed (%d), eError"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ eError = PDumpMemKM(pvAddrKM,
++ psMemInfo,
++ ui32CurrentOffset,
++ ui32BytesToDump, ui32Flags, hUniqueTag);
++
++ if (eError != PVRSRV_OK) {
++
++ if (ui32BytesDumped != 0) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PDumpMemUM: PDumpMemKM failed (%d)",
++ eError));
++ }
++ PVR_ASSERT(ui32BytesDumped == 0);
++ return eError;
++ }
++
++ VPTR_INC(pvAddrUM, ui32BytesToDump);
++ ui32CurrentOffset += ui32BytesToDump;
++ ui32BytesDumped += ui32BytesToDump;
++ }
++
++ return PVRSRV_OK;
++}
++
++static PVRSRV_ERROR _PdumpAllocMMUContext(u32 * pui32MMUContextID)
++{
++ u32 i;
++
++ for (i = 0; i < MAX_PDUMP_MMU_CONTEXTS; i++) {
++ if ((gui16MMUContextUsage & (1U << i)) == 0) {
++
++ gui16MMUContextUsage |= 1U << i;
++ *pui32MMUContextID = i;
++ return PVRSRV_OK;
++ }
++ }
++
++ PVR_DPF((PVR_DBG_ERROR,
++ "_PdumpAllocMMUContext: no free MMU context ids"));
++
++ return PVRSRV_ERROR_GENERIC;
++}
++
++static PVRSRV_ERROR _PdumpFreeMMUContext(u32 ui32MMUContextID)
++{
++ if (ui32MMUContextID < MAX_PDUMP_MMU_CONTEXTS) {
++
++ gui16MMUContextUsage &= ~(1U << ui32MMUContextID);
++ return PVRSRV_OK;
++ }
++
++ PVR_DPF((PVR_DBG_ERROR,
++ "_PdumpFreeMMUContext: MMU context ids invalid"));
++
++ return PVRSRV_ERROR_GENERIC;
++}
++
++PVRSRV_ERROR PDumpSetMMUContext(PVRSRV_DEVICE_TYPE eDeviceType,
++ char *pszMemSpace,
++ u32 * pui32MMUContextID,
++ u32 ui32MMUType,
++ void *hUniqueTag1, void *pvPDCPUAddr)
++{
++ u8 *pui8LinAddr = (u8 *) pvPDCPUAddr;
++ IMG_CPU_PHYADDR sCpuPAddr;
++ IMG_DEV_PHYADDR sDevPAddr;
++ u32 ui32MMUContextID;
++ PVRSRV_ERROR eError;
++
++ eError = _PdumpAllocMMUContext(&ui32MMUContextID);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PDumpSetMMUContext: _PdumpAllocMMUContext failed: %d",
++ eError));
++ return eError;
++ }
++
++ sCpuPAddr = OSMapLinToCPUPhys(pui8LinAddr);
++ sDevPAddr = SysCpuPAddrToDevPAddr(eDeviceType, sCpuPAddr);
++
++ sDevPAddr.uiAddr &= ~((PVRSRV_4K_PAGE_SIZE) - 1);
++
++ PDumpComment("Set MMU Context\r\n");
++
++ PDumpComment("MMU :%s:v%d %d :%s:PA_%8.8lX%8.8lX\r\n",
++ pszMemSpace,
++ ui32MMUContextID,
++ ui32MMUType, pszMemSpace, hUniqueTag1, sDevPAddr.uiAddr);
++
++ *pui32MMUContextID = ui32MMUContextID;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpClearMMUContext(PVRSRV_DEVICE_TYPE eDeviceType,
++ char *pszMemSpace,
++ u32 ui32MMUContextID, u32 ui32MMUType)
++{
++ PVRSRV_ERROR eError;
++
++ PDumpComment("Clear MMU Context for memory space %s\r\n", pszMemSpace);
++
++ PDumpComment("MMU :%s:v%d %d\r\n",
++ pszMemSpace, ui32MMUContextID, ui32MMUType);
++
++ eError = _PdumpFreeMMUContext(ui32MMUContextID);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PDumpClearMMUContext: _PdumpFreeMMUContext failed: %d",
++ eError));
++ return eError;
++ }
++
++ return PVRSRV_OK;
++}
++
++#else
++#endif
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/common/perproc.c
+@@ -0,0 +1,276 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "resman.h"
++#include "handle.h"
++#include "perproc.h"
++#include "osperproc.h"
++
++#define HASH_TAB_INIT_SIZE 32
++
++static HASH_TABLE *psHashTab = NULL;
++
++static PVRSRV_ERROR FreePerProcessData(PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ PVRSRV_ERROR eError;
++ u32 uiPerProc;
++
++ PVR_ASSERT(psPerProc != NULL);
++
++ if (psPerProc == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "FreePerProcessData: invalid parameter"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ uiPerProc = HASH_Remove(psHashTab, (u32) psPerProc->ui32PID);
++ if (uiPerProc == 0) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "FreePerProcessData: Couldn't find process in per-process data hash table"));
++
++ PVR_ASSERT(psPerProc->ui32PID == 0);
++ } else {
++ PVR_ASSERT((PVRSRV_PER_PROCESS_DATA *) uiPerProc == psPerProc);
++ PVR_ASSERT(((PVRSRV_PER_PROCESS_DATA *) uiPerProc)->ui32PID ==
++ psPerProc->ui32PID);
++ }
++
++ if (psPerProc->psHandleBase != NULL) {
++ eError = PVRSRVFreeHandleBase(psPerProc->psHandleBase);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "FreePerProcessData: Couldn't free handle base for process (%d)",
++ eError));
++ return eError;
++ }
++ }
++
++ if (psPerProc->hPerProcData != NULL) {
++ eError =
++ PVRSRVReleaseHandle(KERNEL_HANDLE_BASE,
++ psPerProc->hPerProcData,
++ PVRSRV_HANDLE_TYPE_PERPROC_DATA);
++
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "FreePerProcessData: Couldn't release per-process data handle (%d)",
++ eError));
++ return eError;
++ }
++ }
++
++ eError = OSPerProcessPrivateDataDeInit(psPerProc->hOsPrivateData);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "FreePerProcessData: OSPerProcessPrivateDataDeInit failed (%d)",
++ eError));
++ return eError;
++ }
++
++ eError = OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(*psPerProc),
++ psPerProc, psPerProc->hBlockAlloc);
++
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "FreePerProcessData: Couldn't free per-process data (%d)",
++ eError));
++ return eError;
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_PER_PROCESS_DATA *PVRSRVPerProcessData(u32 ui32PID)
++{
++ PVRSRV_PER_PROCESS_DATA *psPerProc;
++
++ PVR_ASSERT(psHashTab != NULL);
++
++ psPerProc =
++ (PVRSRV_PER_PROCESS_DATA *) HASH_Retrieve(psHashTab, (u32) ui32PID);
++ return psPerProc;
++}
++
++PVRSRV_ERROR PVRSRVPerProcessDataConnect(u32 ui32PID)
++{
++ PVRSRV_PER_PROCESS_DATA *psPerProc;
++ void *hBlockAlloc;
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ PVR_ASSERT(psHashTab != NULL);
++
++ psPerProc =
++ (PVRSRV_PER_PROCESS_DATA *) HASH_Retrieve(psHashTab, (u32) ui32PID);
++
++ if (psPerProc == NULL) {
++
++ eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(*psPerProc),
++ (void **)&psPerProc,
++ &hBlockAlloc, "Per Process Data");
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVPerProcessDataConnect: Couldn't allocate per-process data (%d)",
++ eError));
++ return eError;
++ }
++ memset(psPerProc, 0, sizeof(*psPerProc));
++ psPerProc->hBlockAlloc = hBlockAlloc;
++
++ if (!HASH_Insert(psHashTab, (u32) ui32PID, (u32) psPerProc)) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVPerProcessDataConnect: Couldn't insert per-process data into hash table"));
++ eError = PVRSRV_ERROR_GENERIC;
++ goto failure;
++ }
++
++ psPerProc->ui32PID = ui32PID;
++ psPerProc->ui32RefCount = 0;
++
++ eError =
++ OSPerProcessPrivateDataInit(&psPerProc->hOsPrivateData);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVPerProcessDataConnect: OSPerProcessPrivateDataInit failed (%d)",
++ eError));
++ goto failure;
++ }
++
++ eError = PVRSRVAllocHandle(KERNEL_HANDLE_BASE,
++ &psPerProc->hPerProcData,
++ psPerProc,
++ PVRSRV_HANDLE_TYPE_PERPROC_DATA,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVPerProcessDataConnect: Couldn't allocate handle for per-process data (%d)",
++ eError));
++ goto failure;
++ }
++
++ eError = PVRSRVAllocHandleBase(&psPerProc->psHandleBase);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVPerProcessDataConnect: Couldn't allocate handle base for process (%d)",
++ eError));
++ goto failure;
++ }
++
++ eError = OSPerProcessSetHandleOptions(psPerProc->psHandleBase);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVPerProcessDataConnect: Couldn't set handle options (%d)",
++ eError));
++ goto failure;
++ }
++
++ eError =
++ PVRSRVResManConnect(psPerProc, &psPerProc->hResManContext);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVPerProcessDataConnect: Couldn't register with the resource manager"));
++ goto failure;
++ }
++ }
++
++ psPerProc->ui32RefCount++;
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "PVRSRVPerProcessDataConnect: Process 0x%x has ref-count %d",
++ ui32PID, psPerProc->ui32RefCount));
++
++ return eError;
++
++failure:
++ (void)FreePerProcessData(psPerProc);
++ return eError;
++}
++
++void PVRSRVPerProcessDataDisconnect(u32 ui32PID)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_PER_PROCESS_DATA *psPerProc;
++
++ PVR_ASSERT(psHashTab != NULL);
++
++ psPerProc =
++ (PVRSRV_PER_PROCESS_DATA *) HASH_Retrieve(psHashTab, (u32) ui32PID);
++ if (psPerProc == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVPerProcessDataDealloc: Couldn't locate per-process data for PID %u",
++ ui32PID));
++ } else {
++ psPerProc->ui32RefCount--;
++ if (psPerProc->ui32RefCount == 0) {
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "PVRSRVPerProcessDataDisconnect: "
++ "Last close from process 0x%x received",
++ ui32PID));
++
++ PVRSRVResManDisconnect(psPerProc->hResManContext, 0);
++
++ eError = FreePerProcessData(psPerProc);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVPerProcessDataDisconnect: Error freeing per-process data"));
++ }
++ }
++ }
++
++ eError = PVRSRVPurgeHandles(KERNEL_HANDLE_BASE);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVPerProcessDataDisconnect: Purge of global handle pool failed (%d)",
++ eError));
++ }
++}
++
++PVRSRV_ERROR PVRSRVPerProcessDataInit(void)
++{
++ PVR_ASSERT(psHashTab == NULL);
++
++ psHashTab = HASH_Create(HASH_TAB_INIT_SIZE);
++ if (psHashTab == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVPerProcessDataInit: Couldn't create per-process data hash table"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVPerProcessDataDeInit(void)
++{
++
++ if (psHashTab != NULL) {
++
++ HASH_Delete(psHashTab);
++ psHashTab = NULL;
++ }
++
++ return PVRSRV_OK;
++}
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/common/power.c
+@@ -0,0 +1,686 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "pdump_km.h"
++
++#include "lists.h"
++
++DECLARE_LIST_ANY_VA(PVRSRV_POWER_DEV);
++DECLARE_LIST_ANY_VA_2(PVRSRV_POWER_DEV, PVRSRV_ERROR, PVRSRV_OK);
++DECLARE_LIST_INSERT(PVRSRV_POWER_DEV);
++DECLARE_LIST_REMOVE(PVRSRV_POWER_DEV);
++
++void *MatchPowerDeviceIndex_AnyVaCb(PVRSRV_POWER_DEV * psPowerDev, va_list va);
++
++static int gbInitServerRunning = 0;
++static int gbInitServerRan = 0;
++static int gbInitSuccessful = 0;
++
++PVRSRV_ERROR PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_STATE eInitServerState,
++ int bState)
++{
++
++ switch (eInitServerState) {
++ case PVRSRV_INIT_SERVER_RUNNING:
++ gbInitServerRunning = bState;
++ break;
++ case PVRSRV_INIT_SERVER_RAN:
++ gbInitServerRan = bState;
++ break;
++ case PVRSRV_INIT_SERVER_SUCCESSFUL:
++ gbInitSuccessful = bState;
++ break;
++ default:
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVSetInitServerState : Unknown state %lx",
++ eInitServerState));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ return PVRSRV_OK;
++}
++
++int PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_STATE eInitServerState)
++{
++ int bReturnVal;
++
++ switch (eInitServerState) {
++ case PVRSRV_INIT_SERVER_RUNNING:
++ bReturnVal = gbInitServerRunning;
++ break;
++ case PVRSRV_INIT_SERVER_RAN:
++ bReturnVal = gbInitServerRan;
++ break;
++ case PVRSRV_INIT_SERVER_SUCCESSFUL:
++ bReturnVal = gbInitSuccessful;
++ break;
++ default:
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVGetInitServerState : Unknown state %lx",
++ eInitServerState));
++ bReturnVal = 0;
++ }
++
++ return bReturnVal;
++}
++
++static int _IsSystemStatePowered(PVRSRV_SYS_POWER_STATE eSystemPowerState)
++{
++ return (int)(eSystemPowerState < PVRSRV_SYS_POWER_STATE_D2);
++}
++
++PVRSRV_ERROR PVRSRVPowerLock(u32 ui32CallerID, int bSystemPowerEvent)
++{
++ PVRSRV_ERROR eError;
++ SYS_DATA *psSysData;
++ u32 ui32Timeout = 1000000;
++
++#if defined(SUPPORT_LMA)
++
++ ui32Timeout *= 60;
++#endif
++
++ SysAcquireData(&psSysData);
++
++#if defined(SYS_CUSTOM_POWERLOCK_WRAP)
++ eError = SysPowerLockWrap(psSysData);
++ if (eError != PVRSRV_OK) {
++ return eError;
++ }
++#endif
++ do {
++ eError = OSLockResource(&psSysData->sPowerStateChangeResource,
++ ui32CallerID);
++ if (eError == PVRSRV_OK) {
++ break;
++ } else if (ui32CallerID == ISR_ID) {
++
++ eError = PVRSRV_ERROR_RETRY;
++ break;
++ }
++
++ OSWaitus(1);
++ ui32Timeout--;
++ } while (ui32Timeout > 0);
++
++#if defined(SYS_CUSTOM_POWERLOCK_WRAP)
++ if (eError != PVRSRV_OK) {
++ SysPowerLockUnwrap(psSysData);
++ }
++#endif
++ if ((eError == PVRSRV_OK) &&
++ !bSystemPowerEvent &&
++ !_IsSystemStatePowered(psSysData->eCurrentPowerState)) {
++
++ PVRSRVPowerUnlock(ui32CallerID);
++ eError = PVRSRV_ERROR_RETRY;
++ }
++
++ return eError;
++}
++
++void PVRSRVPowerUnlock(u32 ui32CallerID)
++{
++ OSUnlockResource(&gpsSysData->sPowerStateChangeResource, ui32CallerID);
++#if defined(SYS_CUSTOM_POWERLOCK_WRAP)
++ SysPowerLockUnwrap(gpsSysData);
++#endif
++}
++
++PVRSRV_ERROR PVRSRVDevicePrePowerStateKM_AnyVaCb(PVRSRV_POWER_DEV *
++ psPowerDevice, va_list va)
++{
++ PVRSRV_DEV_POWER_STATE eNewDevicePowerState;
++ PVRSRV_ERROR eError;
++
++ int bAllDevices;
++ u32 ui32DeviceIndex;
++ PVRSRV_DEV_POWER_STATE eNewPowerState;
++
++ bAllDevices = va_arg(va, int);
++ ui32DeviceIndex = va_arg(va, u32);
++ eNewPowerState = va_arg(va, PVRSRV_DEV_POWER_STATE);
++
++ if (bAllDevices || (ui32DeviceIndex == psPowerDevice->ui32DeviceIndex)) {
++ eNewDevicePowerState =
++ (eNewPowerState ==
++ PVRSRV_DEV_POWER_STATE_DEFAULT) ? psPowerDevice->
++ eDefaultPowerState : eNewPowerState;
++
++ if (psPowerDevice->eCurrentPowerState != eNewDevicePowerState) {
++ if (psPowerDevice->pfnPrePower != NULL) {
++
++ eError =
++ psPowerDevice->pfnPrePower(psPowerDevice->
++ hDevCookie,
++ eNewDevicePowerState,
++ psPowerDevice->
++ eCurrentPowerState);
++ if (eError != PVRSRV_OK) {
++ return eError;
++ }
++ }
++
++ eError =
++ SysDevicePrePowerState(psPowerDevice->
++ ui32DeviceIndex,
++ eNewDevicePowerState,
++ psPowerDevice->
++ eCurrentPowerState);
++ if (eError != PVRSRV_OK) {
++ return eError;
++ }
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
++static
++PVRSRV_ERROR PVRSRVDevicePrePowerStateKM(int bAllDevices,
++ u32 ui32DeviceIndex,
++ PVRSRV_DEV_POWER_STATE eNewPowerState)
++{
++ PVRSRV_ERROR eError;
++ SYS_DATA *psSysData;
++
++ SysAcquireData(&psSysData);
++
++ eError =
++ List_PVRSRV_POWER_DEV_PVRSRV_ERROR_Any_va(psSysData->
++ psPowerDeviceList,
++ PVRSRVDevicePrePowerStateKM_AnyVaCb,
++ bAllDevices,
++ ui32DeviceIndex,
++ eNewPowerState);
++
++ return eError;
++}
++
++PVRSRV_ERROR PVRSRVDevicePostPowerStateKM_AnyVaCb(PVRSRV_POWER_DEV *
++ psPowerDevice, va_list va)
++{
++ PVRSRV_DEV_POWER_STATE eNewDevicePowerState;
++ PVRSRV_ERROR eError;
++
++ int bAllDevices;
++ u32 ui32DeviceIndex;
++ PVRSRV_DEV_POWER_STATE eNewPowerState;
++
++ bAllDevices = va_arg(va, int);
++ ui32DeviceIndex = va_arg(va, u32);
++ eNewPowerState = va_arg(va, PVRSRV_DEV_POWER_STATE);
++
++ if (bAllDevices || (ui32DeviceIndex == psPowerDevice->ui32DeviceIndex)) {
++ eNewDevicePowerState =
++ (eNewPowerState ==
++ PVRSRV_DEV_POWER_STATE_DEFAULT) ? psPowerDevice->
++ eDefaultPowerState : eNewPowerState;
++
++ if (psPowerDevice->eCurrentPowerState != eNewDevicePowerState) {
++
++ eError =
++ SysDevicePostPowerState(psPowerDevice->
++ ui32DeviceIndex,
++ eNewDevicePowerState,
++ psPowerDevice->
++ eCurrentPowerState);
++ if (eError != PVRSRV_OK) {
++ return eError;
++ }
++
++ if (psPowerDevice->pfnPostPower != NULL) {
++
++ eError =
++ psPowerDevice->pfnPostPower(psPowerDevice->
++ hDevCookie,
++ eNewDevicePowerState,
++ psPowerDevice->
++ eCurrentPowerState);
++ if (eError != PVRSRV_OK) {
++ return eError;
++ }
++ }
++
++ psPowerDevice->eCurrentPowerState =
++ eNewDevicePowerState;
++ }
++ }
++ return PVRSRV_OK;
++}
++
++static
++PVRSRV_ERROR PVRSRVDevicePostPowerStateKM(int bAllDevices,
++ u32 ui32DeviceIndex,
++ PVRSRV_DEV_POWER_STATE eNewPowerState)
++{
++ PVRSRV_ERROR eError;
++ SYS_DATA *psSysData;
++
++ SysAcquireData(&psSysData);
++
++ eError =
++ List_PVRSRV_POWER_DEV_PVRSRV_ERROR_Any_va(psSysData->
++ psPowerDeviceList,
++ PVRSRVDevicePostPowerStateKM_AnyVaCb,
++ bAllDevices,
++ ui32DeviceIndex,
++ eNewPowerState);
++
++ return eError;
++}
++
++PVRSRV_ERROR PVRSRVSetDevicePowerStateCoreKM(u32 ui32DeviceIndex,
++ PVRSRV_DEV_POWER_STATE
++ eNewPowerState)
++{
++ PVRSRV_ERROR eError;
++ eError =
++ PVRSRVDevicePrePowerStateKM(0, ui32DeviceIndex, eNewPowerState);
++ if (eError != PVRSRV_OK) {
++ return eError;
++ }
++
++ eError =
++ PVRSRVDevicePostPowerStateKM(0, ui32DeviceIndex, eNewPowerState);
++ return eError;
++}
++
++PVRSRV_ERROR PVRSRVSetDevicePowerStateKM(u32 ui32DeviceIndex,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ u32 ui32CallerID, int bRetainMutex)
++{
++ PVRSRV_ERROR eError;
++ SYS_DATA *psSysData;
++
++ SysAcquireData(&psSysData);
++
++ eError = PVRSRVPowerLock(ui32CallerID, 0);
++ if (eError != PVRSRV_OK) {
++ return eError;
++ }
++#if defined(PDUMP)
++ if (eNewPowerState == PVRSRV_DEV_POWER_STATE_DEFAULT) {
++
++ eError =
++ PVRSRVDevicePrePowerStateKM(0, ui32DeviceIndex,
++ PVRSRV_DEV_POWER_STATE_ON);
++ if (eError != PVRSRV_OK) {
++ goto Exit;
++ }
++
++ eError =
++ PVRSRVDevicePostPowerStateKM(0, ui32DeviceIndex,
++ PVRSRV_DEV_POWER_STATE_ON);
++
++ if (eError != PVRSRV_OK) {
++ goto Exit;
++ }
++
++ PDUMPSUSPEND();
++ }
++#endif
++
++ eError =
++ PVRSRVDevicePrePowerStateKM(0, ui32DeviceIndex, eNewPowerState);
++ if (eError != PVRSRV_OK) {
++ if (eNewPowerState == PVRSRV_DEV_POWER_STATE_DEFAULT) {
++ PDUMPRESUME();
++ }
++ goto Exit;
++ }
++
++ eError =
++ PVRSRVDevicePostPowerStateKM(0, ui32DeviceIndex, eNewPowerState);
++
++ if (eNewPowerState == PVRSRV_DEV_POWER_STATE_DEFAULT) {
++ PDUMPRESUME();
++ }
++
++Exit:
++
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVSetDevicePowerStateKM : Transition to %d FAILED 0x%x",
++ eNewPowerState, eError));
++ }
++
++ if (!bRetainMutex || (eError != PVRSRV_OK)) {
++ PVRSRVPowerUnlock(ui32CallerID);
++ }
++
++ return eError;
++}
++
++PVRSRV_ERROR PVRSRVSystemPrePowerStateKM(PVRSRV_SYS_POWER_STATE
++ eNewSysPowerState)
++{
++ PVRSRV_ERROR eError;
++ SYS_DATA *psSysData;
++ PVRSRV_DEV_POWER_STATE eNewDevicePowerState;
++
++ SysAcquireData(&psSysData);
++
++ eError = PVRSRVPowerLock(KERNEL_ID, 1);
++ if (eError != PVRSRV_OK) {
++ return eError;
++ }
++
++ if (_IsSystemStatePowered(eNewSysPowerState) !=
++ _IsSystemStatePowered(psSysData->eCurrentPowerState)) {
++ if (_IsSystemStatePowered(eNewSysPowerState)) {
++
++ eNewDevicePowerState = PVRSRV_DEV_POWER_STATE_DEFAULT;
++ } else {
++ eNewDevicePowerState = PVRSRV_DEV_POWER_STATE_OFF;
++ }
++
++ eError =
++ PVRSRVDevicePrePowerStateKM(1, 0, eNewDevicePowerState);
++ if (eError != PVRSRV_OK) {
++ goto ErrorExit;
++ }
++ }
++
++ if (eNewSysPowerState != psSysData->eCurrentPowerState) {
++
++ eError = SysSystemPrePowerState(eNewSysPowerState);
++ if (eError != PVRSRV_OK) {
++ goto ErrorExit;
++ }
++ }
++
++ return eError;
++
++ErrorExit:
++
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVSystemPrePowerStateKM: Transition from %d to %d FAILED 0x%x",
++ psSysData->eCurrentPowerState, eNewSysPowerState, eError));
++
++ psSysData->eFailedPowerState = eNewSysPowerState;
++
++ PVRSRVPowerUnlock(KERNEL_ID);
++
++ return eError;
++}
++
++PVRSRV_ERROR PVRSRVSystemPostPowerStateKM(PVRSRV_SYS_POWER_STATE
++ eNewSysPowerState)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++ SYS_DATA *psSysData;
++ PVRSRV_DEV_POWER_STATE eNewDevicePowerState;
++
++ SysAcquireData(&psSysData);
++
++ if (eNewSysPowerState != psSysData->eCurrentPowerState) {
++
++ eError = SysSystemPostPowerState(eNewSysPowerState);
++ if (eError != PVRSRV_OK) {
++ goto Exit;
++ }
++ }
++
++ if (_IsSystemStatePowered(eNewSysPowerState) !=
++ _IsSystemStatePowered(psSysData->eCurrentPowerState)) {
++ if (_IsSystemStatePowered(eNewSysPowerState)) {
++
++ eNewDevicePowerState = PVRSRV_DEV_POWER_STATE_DEFAULT;
++ } else {
++ eNewDevicePowerState = PVRSRV_DEV_POWER_STATE_OFF;
++ }
++
++ eError =
++ PVRSRVDevicePostPowerStateKM(1, 0, eNewDevicePowerState);
++ if (eError != PVRSRV_OK) {
++ goto Exit;
++ }
++ }
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "PVRSRVSystemPostPowerStateKM: System Power Transition from %d to %d OK",
++ psSysData->eCurrentPowerState, eNewSysPowerState));
++
++ psSysData->eCurrentPowerState = eNewSysPowerState;
++
++Exit:
++
++ PVRSRVPowerUnlock(KERNEL_ID);
++
++ if (_IsSystemStatePowered(eNewSysPowerState) &&
++ PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_SUCCESSFUL)) {
++
++ PVRSRVCommandCompleteCallbacks();
++ }
++
++ return eError;
++}
++
++PVRSRV_ERROR PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE eNewSysPowerState)
++{
++ PVRSRV_ERROR eError;
++ SYS_DATA *psSysData;
++
++ SysAcquireData(&psSysData);
++
++ eError = PVRSRVSystemPrePowerStateKM(eNewSysPowerState);
++ if (eError != PVRSRV_OK) {
++ goto ErrorExit;
++ }
++
++ eError = PVRSRVSystemPostPowerStateKM(eNewSysPowerState);
++ if (eError != PVRSRV_OK) {
++ goto ErrorExit;
++ }
++
++ psSysData->eFailedPowerState = PVRSRV_SYS_POWER_STATE_Unspecified;
++
++ return PVRSRV_OK;
++
++ErrorExit:
++
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVSetPowerStateKM: Transition from %d to %d FAILED 0x%x",
++ psSysData->eCurrentPowerState, eNewSysPowerState, eError));
++
++ psSysData->eFailedPowerState = eNewSysPowerState;
++
++ return eError;
++}
++
++PVRSRV_ERROR PVRSRVRegisterPowerDevice(u32 ui32DeviceIndex,
++ PFN_PRE_POWER pfnPrePower,
++ PFN_POST_POWER pfnPostPower,
++ PFN_PRE_CLOCKSPEED_CHANGE
++ pfnPreClockSpeedChange,
++ PFN_POST_CLOCKSPEED_CHANGE
++ pfnPostClockSpeedChange,
++ void *hDevCookie,
++ PVRSRV_DEV_POWER_STATE
++ eCurrentPowerState,
++ PVRSRV_DEV_POWER_STATE
++ eDefaultPowerState)
++{
++ PVRSRV_ERROR eError;
++ SYS_DATA *psSysData;
++ PVRSRV_POWER_DEV *psPowerDevice;
++
++ if (pfnPrePower == NULL && pfnPostPower == NULL) {
++ return PVRSRVRemovePowerDevice(ui32DeviceIndex);
++ }
++
++ SysAcquireData(&psSysData);
++
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_POWER_DEV),
++ (void **)&psPowerDevice, NULL, "Power Device");
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVRegisterPowerDevice: Failed to alloc PVRSRV_POWER_DEV"));
++ return eError;
++ }
++
++ psPowerDevice->pfnPrePower = pfnPrePower;
++ psPowerDevice->pfnPostPower = pfnPostPower;
++ psPowerDevice->pfnPreClockSpeedChange = pfnPreClockSpeedChange;
++ psPowerDevice->pfnPostClockSpeedChange = pfnPostClockSpeedChange;
++ psPowerDevice->hDevCookie = hDevCookie;
++ psPowerDevice->ui32DeviceIndex = ui32DeviceIndex;
++ psPowerDevice->eCurrentPowerState = eCurrentPowerState;
++ psPowerDevice->eDefaultPowerState = eDefaultPowerState;
++
++ List_PVRSRV_POWER_DEV_Insert(&(psSysData->psPowerDeviceList),
++ psPowerDevice);
++
++ return (PVRSRV_OK);
++}
++
++PVRSRV_ERROR PVRSRVRemovePowerDevice(u32 ui32DeviceIndex)
++{
++ SYS_DATA *psSysData;
++ PVRSRV_POWER_DEV *psPowerDev;
++
++ SysAcquireData(&psSysData);
++
++ psPowerDev = (PVRSRV_POWER_DEV *)
++ List_PVRSRV_POWER_DEV_Any_va(psSysData->psPowerDeviceList,
++ MatchPowerDeviceIndex_AnyVaCb,
++ ui32DeviceIndex);
++
++ if (psPowerDev) {
++ List_PVRSRV_POWER_DEV_Remove(psPowerDev);
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_POWER_DEV),
++ psPowerDev, NULL);
++
++ }
++
++ return (PVRSRV_OK);
++}
++
++int PVRSRVIsDevicePowered(u32 ui32DeviceIndex)
++{
++ SYS_DATA *psSysData;
++ PVRSRV_POWER_DEV *psPowerDevice;
++
++ SysAcquireData(&psSysData);
++
++ if (OSIsResourceLocked(&psSysData->sPowerStateChangeResource, KERNEL_ID)
++ || OSIsResourceLocked(&psSysData->sPowerStateChangeResource,
++ ISR_ID)) {
++ return 0;
++ }
++
++ psPowerDevice = (PVRSRV_POWER_DEV *)
++ List_PVRSRV_POWER_DEV_Any_va(psSysData->psPowerDeviceList,
++ MatchPowerDeviceIndex_AnyVaCb,
++ ui32DeviceIndex);
++ return (psPowerDevice
++ && (psPowerDevice->eCurrentPowerState ==
++ PVRSRV_DEV_POWER_STATE_ON))
++ ? 1 : 0;
++}
++
++PVRSRV_ERROR PVRSRVDevicePreClockSpeedChange(u32 ui32DeviceIndex,
++ int bIdleDevice, void *pvInfo)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++ SYS_DATA *psSysData;
++ PVRSRV_POWER_DEV *psPowerDevice;
++
++ SysAcquireData(&psSysData);
++
++ if (bIdleDevice) {
++
++ eError = PVRSRVPowerLock(KERNEL_ID, 0);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVDevicePreClockSpeedChange : failed to acquire lock, error:0x%lx",
++ eError));
++ return eError;
++ }
++ }
++
++ psPowerDevice = (PVRSRV_POWER_DEV *)
++ List_PVRSRV_POWER_DEV_Any_va(psSysData->psPowerDeviceList,
++ MatchPowerDeviceIndex_AnyVaCb,
++ ui32DeviceIndex);
++
++ if (psPowerDevice && psPowerDevice->pfnPostClockSpeedChange) {
++ eError =
++ psPowerDevice->pfnPreClockSpeedChange(psPowerDevice->
++ hDevCookie,
++ bIdleDevice,
++ psPowerDevice->
++ eCurrentPowerState);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVDevicePreClockSpeedChange : Device %lu failed, error:0x%lx",
++ ui32DeviceIndex, eError));
++ }
++ }
++
++ if (bIdleDevice && eError != PVRSRV_OK) {
++ PVRSRVPowerUnlock(KERNEL_ID);
++ }
++
++ return eError;
++}
++
++void PVRSRVDevicePostClockSpeedChange(u32 ui32DeviceIndex,
++ int bIdleDevice, void *pvInfo)
++{
++ PVRSRV_ERROR eError;
++ SYS_DATA *psSysData;
++ PVRSRV_POWER_DEV *psPowerDevice;
++
++ SysAcquireData(&psSysData);
++
++ psPowerDevice = (PVRSRV_POWER_DEV *)
++ List_PVRSRV_POWER_DEV_Any_va(psSysData->psPowerDeviceList,
++ MatchPowerDeviceIndex_AnyVaCb,
++ ui32DeviceIndex);
++
++ if (psPowerDevice && psPowerDevice->pfnPostClockSpeedChange) {
++ eError =
++ psPowerDevice->pfnPostClockSpeedChange(psPowerDevice->
++ hDevCookie,
++ bIdleDevice,
++ psPowerDevice->
++ eCurrentPowerState);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVDevicePostClockSpeedChange : Device %lu failed, error:0x%lx",
++ ui32DeviceIndex, eError));
++ }
++ }
++
++ if (bIdleDevice) {
++
++ PVRSRVPowerUnlock(KERNEL_ID);
++ }
++}
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/common/pvrsrv.c
+@@ -0,0 +1,1105 @@
++/**********************************************************************
++ *
++ * Copyright (c) 2009-2010 Intel Corporation.
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "buffer_manager.h"
++#include "handle.h"
++#include "perproc.h"
++#include "pdump_km.h"
++#include "ra.h"
++
++#include "pvrversion.h"
++
++#include "lists.h"
++
++#ifdef INTEL_D3_CHANGES
++
++#include <linux/sched.h>
++#include <linux/wait.h>
++#include <linux/jiffies.h>
++
++#define GFX_MS_TO_JIFFIES(time_ms) msecs_to_jiffies((time_ms))
++#define WAIT_FOR_WRITE_OP_SYNC_TIMEOUT 10000
++
++DECLARE_WAIT_QUEUE_HEAD(render_wait_queue);
++
++static void WakeWriteOpSyncs(void)
++{
++ wake_up(&render_wait_queue);
++}
++
++PVRSRV_ERROR PVRSRVWaitForWriteOpSyncKM(PVRSRV_KERNEL_SYNC_INFO *
++ psKernelSyncInfo)
++{
++ int rc = 0;
++
++ if (NULL == psKernelSyncInfo) {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ rc = wait_event_interruptible_timeout(render_wait_queue,
++ (psKernelSyncInfo->psSyncData->
++ ui32WriteOpsComplete >=
++ psKernelSyncInfo->psSyncData->
++ ui32WriteOpsPending),
++ GFX_MS_TO_JIFFIES
++ (WAIT_FOR_WRITE_OP_SYNC_TIMEOUT));
++
++ if (rc == 0) {
++ return PVRSRV_ERROR_TIMEOUT;
++ } else if (rc == -ERESTARTSYS) {
++ return PVRSRV_ERROR_RETRY;
++ }
++
++ return PVRSRV_OK;
++}
++
++#endif
++
++DECLARE_LIST_ANY_VA_2(BM_CONTEXT, PVRSRV_ERROR, PVRSRV_OK);
++
++DECLARE_LIST_FOR_EACH_VA(BM_HEAP);
++
++DECLARE_LIST_ANY_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK);
++DECLARE_LIST_ANY_VA(PVRSRV_DEVICE_NODE);
++DECLARE_LIST_ANY_VA_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK);
++DECLARE_LIST_FOR_EACH_VA(PVRSRV_DEVICE_NODE);
++DECLARE_LIST_FOR_EACH(PVRSRV_DEVICE_NODE);
++DECLARE_LIST_INSERT(PVRSRV_DEVICE_NODE);
++DECLARE_LIST_REMOVE(PVRSRV_DEVICE_NODE);
++
++void *MatchDeviceKM_AnyVaCb(PVRSRV_DEVICE_NODE * psDeviceNode, va_list va);
++
++PVRSRV_ERROR AllocateDeviceID(SYS_DATA * psSysData, u32 * pui32DevID)
++{
++ SYS_DEVICE_ID *psDeviceWalker;
++ SYS_DEVICE_ID *psDeviceEnd;
++
++ psDeviceWalker = &psSysData->sDeviceID[0];
++ psDeviceEnd = psDeviceWalker + psSysData->ui32NumDevices;
++
++ while (psDeviceWalker < psDeviceEnd) {
++ if (!psDeviceWalker->bInUse) {
++ psDeviceWalker->bInUse = 1;
++ *pui32DevID = psDeviceWalker->uiID;
++ return PVRSRV_OK;
++ }
++ psDeviceWalker++;
++ }
++
++ PVR_DPF((PVR_DBG_ERROR,
++ "AllocateDeviceID: No free and valid device IDs available!"));
++
++ PVR_ASSERT(psDeviceWalker < psDeviceEnd);
++
++ return PVRSRV_ERROR_GENERIC;
++}
++
++PVRSRV_ERROR FreeDeviceID(SYS_DATA * psSysData, u32 ui32DevID)
++{
++ SYS_DEVICE_ID *psDeviceWalker;
++ SYS_DEVICE_ID *psDeviceEnd;
++
++ psDeviceWalker = &psSysData->sDeviceID[0];
++ psDeviceEnd = psDeviceWalker + psSysData->ui32NumDevices;
++
++ while (psDeviceWalker < psDeviceEnd) {
++
++ if ((psDeviceWalker->uiID == ui32DevID) &&
++ (psDeviceWalker->bInUse)
++ ) {
++ psDeviceWalker->bInUse = 0;
++ return PVRSRV_OK;
++ }
++ psDeviceWalker++;
++ }
++
++ PVR_DPF((PVR_DBG_ERROR,
++ "FreeDeviceID: no matching dev ID that is in use!"));
++
++ PVR_ASSERT(psDeviceWalker < psDeviceEnd);
++
++ return PVRSRV_ERROR_GENERIC;
++}
++
++#ifndef ReadHWReg
++
++u32 ReadHWReg(void *pvLinRegBaseAddr, u32 ui32Offset)
++{
++ return *(volatile u32 *)((u32) pvLinRegBaseAddr + ui32Offset);
++}
++#endif
++
++#ifndef WriteHWReg
++
++void WriteHWReg(void *pvLinRegBaseAddr, u32 ui32Offset, u32 ui32Value)
++{
++ PVR_DPF((PVR_DBG_MESSAGE, "WriteHWReg Base:%x, Offset: %x, Value %x",
++ pvLinRegBaseAddr, ui32Offset, ui32Value));
++
++ *(u32 *) ((u32) pvLinRegBaseAddr + ui32Offset) = ui32Value;
++}
++#endif
++
++#ifndef WriteHWRegs
++
++void WriteHWRegs(void *pvLinRegBaseAddr, u32 ui32Count, PVRSRV_HWREG * psHWRegs)
++{
++ while (ui32Count) {
++ WriteHWReg(pvLinRegBaseAddr, psHWRegs->ui32RegAddr,
++ psHWRegs->ui32RegVal);
++ psHWRegs++;
++ ui32Count--;
++ }
++}
++#endif
++
++void PVRSRVEnumerateDevicesKM_ForEachVaCb(PVRSRV_DEVICE_NODE * psDeviceNode,
++ va_list va)
++{
++ u32 *pui32DevCount;
++ PVRSRV_DEVICE_IDENTIFIER **ppsDevIdList;
++
++ pui32DevCount = va_arg(va, u32 *);
++ ppsDevIdList = va_arg(va, PVRSRV_DEVICE_IDENTIFIER **);
++
++ if (psDeviceNode->sDevId.eDeviceType != PVRSRV_DEVICE_TYPE_EXT) {
++ *(*ppsDevIdList) = psDeviceNode->sDevId;
++ (*ppsDevIdList)++;
++ (*pui32DevCount)++;
++ }
++}
++
++PVRSRV_ERROR PVRSRVEnumerateDevicesKM(u32 * pui32NumDevices,
++ PVRSRV_DEVICE_IDENTIFIER * psDevIdList)
++{
++ SYS_DATA *psSysData;
++ u32 i;
++
++ if (!pui32NumDevices || !psDevIdList) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVEnumerateDevicesKM: Invalid params"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ SysAcquireData(&psSysData);
++
++ for (i = 0; i < PVRSRV_MAX_DEVICES; i++) {
++ psDevIdList[i].eDeviceType = PVRSRV_DEVICE_TYPE_UNKNOWN;
++ }
++
++ *pui32NumDevices = 0;
++
++ List_PVRSRV_DEVICE_NODE_ForEach_va(psSysData->psDeviceNodeList,
++ PVRSRVEnumerateDevicesKM_ForEachVaCb,
++ pui32NumDevices, &psDevIdList);
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVInit(PSYS_DATA psSysData)
++{
++ PVRSRV_ERROR eError;
++
++ eError = ResManInit();
++ if (eError != PVRSRV_OK) {
++ goto Error;
++ }
++
++ eError = PVRSRVPerProcessDataInit();
++ if (eError != PVRSRV_OK) {
++ goto Error;
++ }
++
++ eError = PVRSRVHandleInit();
++ if (eError != PVRSRV_OK) {
++ goto Error;
++ }
++
++ eError = OSCreateResource(&psSysData->sPowerStateChangeResource);
++ if (eError != PVRSRV_OK) {
++ goto Error;
++ }
++
++ psSysData->eCurrentPowerState = PVRSRV_SYS_POWER_STATE_D0;
++ psSysData->eFailedPowerState = PVRSRV_SYS_POWER_STATE_Unspecified;
++
++ if (OSAllocMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof(PVRSRV_EVENTOBJECT),
++ (void **)&psSysData->psGlobalEventObject, 0,
++ "Event Object") != PVRSRV_OK) {
++
++ goto Error;
++ }
++
++ if (OSEventObjectCreate
++ ("PVRSRV_GLOBAL_EVENTOBJECT",
++ psSysData->psGlobalEventObject) != PVRSRV_OK) {
++ goto Error;
++ }
++
++ return eError;
++
++Error:
++ PVRSRVDeInit(psSysData);
++ return eError;
++}
++
++void PVRSRVDeInit(PSYS_DATA psSysData)
++{
++ PVRSRV_ERROR eError;
++
++ if (psSysData == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVDeInit: PVRSRVHandleDeInit failed - invalid param"));
++ return;
++ }
++
++ if (psSysData->psGlobalEventObject) {
++ OSEventObjectDestroy(psSysData->psGlobalEventObject);
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof(PVRSRV_EVENTOBJECT),
++ psSysData->psGlobalEventObject, 0);
++ psSysData->psGlobalEventObject = NULL;
++ }
++
++ eError = PVRSRVHandleDeInit();
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVDeInit: PVRSRVHandleDeInit failed"));
++ }
++
++ eError = PVRSRVPerProcessDataDeInit();
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVDeInit: PVRSRVPerProcessDataDeInit failed"));
++ }
++
++ ResManDeInit();
++}
++
++PVRSRV_ERROR PVRSRVRegisterDevice(PSYS_DATA psSysData,
++ PVRSRV_ERROR(*pfnRegisterDevice)
++ (PVRSRV_DEVICE_NODE *),
++ u32 ui32SOCInterruptBit,
++ u32 * pui32DeviceIndex)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++
++ if (OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_DEVICE_NODE),
++ (void **)&psDeviceNode, NULL,
++ "Device Node") != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVRegisterDevice : Failed to alloc memory for psDeviceNode"));
++ return (PVRSRV_ERROR_OUT_OF_MEMORY);
++ }
++ memset(psDeviceNode, 0, sizeof(PVRSRV_DEVICE_NODE));
++
++ eError = pfnRegisterDevice(psDeviceNode);
++ if (eError != PVRSRV_OK) {
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_DEVICE_NODE), psDeviceNode, NULL);
++
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVRegisterDevice : Failed to register device"));
++ return (PVRSRV_ERROR_DEVICE_REGISTER_FAILED);
++ }
++
++ psDeviceNode->ui32RefCount = 1;
++ psDeviceNode->psSysData = psSysData;
++ psDeviceNode->ui32SOCInterruptBit = ui32SOCInterruptBit;
++
++ AllocateDeviceID(psSysData, &psDeviceNode->sDevId.ui32DeviceIndex);
++
++ List_PVRSRV_DEVICE_NODE_Insert(&psSysData->psDeviceNodeList,
++ psDeviceNode);
++
++ *pui32DeviceIndex = psDeviceNode->sDevId.ui32DeviceIndex;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVInitialiseDevice(u32 ui32DevIndex)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ SYS_DATA *psSysData;
++ PVRSRV_ERROR eError;
++
++ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVInitialiseDevice"));
++
++ SysAcquireData(&psSysData);
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE *)
++ List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList,
++ MatchDeviceKM_AnyVaCb,
++ ui32DevIndex, 1);
++ if (!psDeviceNode) {
++
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVInitialiseDevice: requested device is not present"));
++ return PVRSRV_ERROR_INIT_FAILURE;
++ }
++ PVR_ASSERT(psDeviceNode->ui32RefCount > 0);
++
++ eError = PVRSRVResManConnect(NULL, &psDeviceNode->hResManContext);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVInitialiseDevice: Failed PVRSRVResManConnect call"));
++ return eError;
++ }
++
++ if (psDeviceNode->pfnInitDevice != NULL) {
++ eError = psDeviceNode->pfnInitDevice(psDeviceNode);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVInitialiseDevice: Failed InitDevice call"));
++ return eError;
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVFinaliseSystem_SetPowerState_AnyCb(PVRSRV_DEVICE_NODE *
++ psDeviceNode)
++{
++ PVRSRV_ERROR eError;
++ eError =
++ PVRSRVSetDevicePowerStateKM(psDeviceNode->sDevId.ui32DeviceIndex,
++ PVRSRV_DEV_POWER_STATE_DEFAULT,
++ KERNEL_ID, 0);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVFinaliseSystem: Failed PVRSRVSetDevicePowerStateKM call (device index: %d)",
++ psDeviceNode->sDevId.ui32DeviceIndex));
++ }
++ return eError;
++}
++
++PVRSRV_ERROR PVRSRVFinaliseSystem_CompatCheck_AnyCb(PVRSRV_DEVICE_NODE *
++ psDeviceNode)
++{
++ PVRSRV_ERROR eError;
++ eError = PVRSRVDevInitCompatCheck(psDeviceNode);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVFinaliseSystem: Failed PVRSRVDevInitCompatCheck call (device index: %d)",
++ psDeviceNode->sDevId.ui32DeviceIndex));
++ }
++ return eError;
++}
++
++PVRSRV_ERROR PVRSRVFinaliseSystem(int bInitSuccessful)
++{
++ SYS_DATA *psSysData;
++ PVRSRV_ERROR eError;
++
++ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVFinaliseSystem"));
++
++ SysAcquireData(&psSysData);
++
++ if (bInitSuccessful) {
++ eError = SysFinalise();
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVFinaliseSystem: SysFinalise failed (%d)",
++ eError));
++ return eError;
++ }
++
++ eError =
++ List_PVRSRV_DEVICE_NODE_PVRSRV_ERROR_Any(psSysData->
++ psDeviceNodeList,
++ PVRSRVFinaliseSystem_SetPowerState_AnyCb);
++ if (eError != PVRSRV_OK) {
++ return eError;
++ }
++
++ eError =
++ List_PVRSRV_DEVICE_NODE_PVRSRV_ERROR_Any(psSysData->
++ psDeviceNodeList,
++ PVRSRVFinaliseSystem_CompatCheck_AnyCb);
++ if (eError != PVRSRV_OK) {
++ return eError;
++ }
++ }
++
++#if !defined(SUPPORT_PDUMP_DELAYED_INITPHASE_TERMINATION)
++ PDUMPENDINITPHASE();
++#endif
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVDevInitCompatCheck(PVRSRV_DEVICE_NODE * psDeviceNode)
++{
++
++ if (psDeviceNode->pfnInitDeviceCompatCheck)
++ return psDeviceNode->pfnInitDeviceCompatCheck(psDeviceNode);
++ else
++ return PVRSRV_OK;
++}
++
++void *PVRSRVAcquireDeviceDataKM_Match_AnyVaCb(PVRSRV_DEVICE_NODE * psDeviceNode,
++ va_list va)
++{
++ PVRSRV_DEVICE_TYPE eDeviceType;
++ u32 ui32DevIndex;
++
++ eDeviceType = va_arg(va, PVRSRV_DEVICE_TYPE);
++ ui32DevIndex = va_arg(va, u32);
++
++ if ((eDeviceType != PVRSRV_DEVICE_TYPE_UNKNOWN &&
++ psDeviceNode->sDevId.eDeviceType == eDeviceType) ||
++ (eDeviceType == PVRSRV_DEVICE_TYPE_UNKNOWN &&
++ psDeviceNode->sDevId.ui32DeviceIndex == ui32DevIndex)) {
++ return psDeviceNode;
++ } else {
++ return NULL;
++ }
++}
++
++PVRSRV_ERROR PVRSRVAcquireDeviceDataKM(u32 ui32DevIndex,
++ PVRSRV_DEVICE_TYPE eDeviceType,
++ void **phDevCookie)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ SYS_DATA *psSysData;
++
++ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVAcquireDeviceDataKM"));
++
++ SysAcquireData(&psSysData);
++
++ psDeviceNode =
++ List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList,
++ PVRSRVAcquireDeviceDataKM_Match_AnyVaCb,
++ eDeviceType, ui32DevIndex);
++
++ if (!psDeviceNode) {
++
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVAcquireDeviceDataKM: requested device is not present"));
++ return PVRSRV_ERROR_INIT_FAILURE;
++ }
++
++ PVR_ASSERT(psDeviceNode->ui32RefCount > 0);
++
++ if (phDevCookie) {
++ *phDevCookie = (void *)psDeviceNode;
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVDeinitialiseDevice(u32 ui32DevIndex)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ SYS_DATA *psSysData;
++ PVRSRV_ERROR eError;
++
++ SysAcquireData(&psSysData);
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE *)
++ List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList,
++ MatchDeviceKM_AnyVaCb,
++ ui32DevIndex, 1);
++
++ if (!psDeviceNode) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVDeinitialiseDevice: requested device %d is not present",
++ ui32DevIndex));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ eError = PVRSRVSetDevicePowerStateKM(ui32DevIndex,
++ PVRSRV_DEV_POWER_STATE_OFF,
++ KERNEL_ID, 0);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVDeinitialiseDevice: Failed PVRSRVSetDevicePowerStateKM call"));
++ return eError;
++ }
++
++ eError = ResManFreeResByCriteria(psDeviceNode->hResManContext,
++ RESMAN_CRITERIA_RESTYPE,
++ RESMAN_TYPE_DEVICEMEM_ALLOCATION,
++ NULL, 0);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVDeinitialiseDevice: Failed ResManFreeResByCriteria call"));
++ return eError;
++ }
++
++ if (psDeviceNode->pfnDeInitDevice != NULL) {
++ eError = psDeviceNode->pfnDeInitDevice(psDeviceNode);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVDeinitialiseDevice: Failed DeInitDevice call"));
++ return eError;
++ }
++ }
++
++ PVRSRVResManDisconnect(psDeviceNode->hResManContext, 1);
++ psDeviceNode->hResManContext = NULL;
++
++ List_PVRSRV_DEVICE_NODE_Remove(psDeviceNode);
++
++ (void)FreeDeviceID(psSysData, ui32DevIndex);
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_DEVICE_NODE), psDeviceNode, NULL);
++
++ return (PVRSRV_OK);
++}
++
++PVRSRV_ERROR PollForValueKM(volatile u32 * pui32LinMemAddr,
++ u32 ui32Value,
++ u32 ui32Mask, u32 ui32Waitus, u32 ui32Tries)
++{
++ {
++ u32 uiMaxTime = ui32Tries * ui32Waitus;
++
++ LOOP_UNTIL_TIMEOUT(uiMaxTime) {
++ if ((*pui32LinMemAddr & ui32Mask) == ui32Value) {
++ return PVRSRV_OK;
++ }
++ OSWaitus(ui32Waitus);
++ }
++ END_LOOP_UNTIL_TIMEOUT();
++ }
++
++ return PVRSRV_ERROR_GENERIC;
++}
++
++#if defined (USING_ISR_INTERRUPTS)
++
++extern u32 gui32EventStatusServicesByISR;
++
++PVRSRV_ERROR PollForInterruptKM(u32 ui32Value,
++ u32 ui32Mask, u32 ui32Waitus, u32 ui32Tries)
++{
++ u32 uiMaxTime;
++
++ uiMaxTime = ui32Tries * ui32Waitus;
++
++ LOOP_UNTIL_TIMEOUT(uiMaxTime) {
++ if ((gui32EventStatusServicesByISR & ui32Mask) == ui32Value) {
++ gui32EventStatusServicesByISR = 0;
++ return PVRSRV_OK;
++ }
++ OSWaitus(ui32Waitus);
++ }
++ END_LOOP_UNTIL_TIMEOUT();
++
++ return PVRSRV_ERROR_GENERIC;
++}
++#endif
++
++void PVRSRVGetMiscInfoKM_RA_GetStats_ForEachVaCb(BM_HEAP * psBMHeap, va_list va)
++{
++ char **ppszStr;
++ u32 *pui32StrLen;
++
++ ppszStr = va_arg(va, char **);
++ pui32StrLen = va_arg(va, u32 *);
++
++ if (psBMHeap->pImportArena) {
++ RA_GetStats(psBMHeap->pImportArena, ppszStr, pui32StrLen);
++ }
++
++ if (psBMHeap->pVMArena) {
++ RA_GetStats(psBMHeap->pVMArena, ppszStr, pui32StrLen);
++ }
++}
++
++PVRSRV_ERROR PVRSRVGetMiscInfoKM_BMContext_AnyVaCb(BM_CONTEXT * psBMContext,
++ va_list va)
++{
++
++ u32 *pui32StrLen;
++ s32 *pi32Count;
++ char **ppszStr;
++
++ pui32StrLen = va_arg(va, u32 *);
++ pi32Count = va_arg(va, s32 *);
++ ppszStr = va_arg(va, char **);
++
++ CHECK_SPACE(*pui32StrLen);
++ *pi32Count =
++ snprintf(*ppszStr, 100,
++ "\nApplication Context (hDevMemContext) 0x%p:\n",
++ (void *)psBMContext);
++ UPDATE_SPACE(*ppszStr, *pi32Count, *pui32StrLen);
++
++ List_BM_HEAP_ForEach_va(psBMContext->psBMHeap,
++ PVRSRVGetMiscInfoKM_RA_GetStats_ForEachVaCb,
++ ppszStr, pui32StrLen);
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVGetMiscInfoKM_Device_AnyVaCb(PVRSRV_DEVICE_NODE *
++ psDeviceNode, va_list va)
++{
++ u32 *pui32StrLen;
++ s32 *pi32Count;
++ char **ppszStr;
++
++ pui32StrLen = va_arg(va, u32 *);
++ pi32Count = va_arg(va, s32 *);
++ ppszStr = va_arg(va, char **);
++
++ CHECK_SPACE(*pui32StrLen);
++ *pi32Count =
++ snprintf(*ppszStr, 100, "\n\nDevice Type %d:\n",
++ psDeviceNode->sDevId.eDeviceType);
++ UPDATE_SPACE(*ppszStr, *pi32Count, *pui32StrLen);
++
++ if (psDeviceNode->sDevMemoryInfo.pBMKernelContext) {
++ CHECK_SPACE(*pui32StrLen);
++ *pi32Count = snprintf(*ppszStr, 100, "\nKernel Context:\n");
++ UPDATE_SPACE(*ppszStr, *pi32Count, *pui32StrLen);
++
++ List_BM_HEAP_ForEach_va(psDeviceNode->sDevMemoryInfo.
++ pBMKernelContext->psBMHeap,
++ PVRSRVGetMiscInfoKM_RA_GetStats_ForEachVaCb,
++ ppszStr, pui32StrLen);
++ }
++
++ return List_BM_CONTEXT_PVRSRV_ERROR_Any_va(psDeviceNode->sDevMemoryInfo.
++ pBMContext,
++ PVRSRVGetMiscInfoKM_BMContext_AnyVaCb,
++ pui32StrLen, pi32Count,
++ ppszStr);
++}
++
++PVRSRV_ERROR PVRSRVGetMiscInfoKM(PVRSRV_MISC_INFO * psMiscInfo)
++{
++ SYS_DATA *psSysData;
++
++ if (!psMiscInfo) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVGetMiscInfoKM: invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psMiscInfo->ui32StatePresent = 0;
++
++ if (psMiscInfo->ui32StateRequest & ~(PVRSRV_MISC_INFO_TIMER_PRESENT
++ |
++ PVRSRV_MISC_INFO_CLOCKGATE_PRESENT
++ | PVRSRV_MISC_INFO_MEMSTATS_PRESENT
++ |
++ PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT
++ |
++ PVRSRV_MISC_INFO_DDKVERSION_PRESENT
++ |
++ PVRSRV_MISC_INFO_CPUCACHEFLUSH_PRESENT
++ | PVRSRV_MISC_INFO_RESET_PRESENT))
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVGetMiscInfoKM: invalid state request flags"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ SysAcquireData(&psSysData);
++
++ if (((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_TIMER_PRESENT) !=
++ 0UL) && (psSysData->pvSOCTimerRegisterKM != NULL)) {
++ psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_TIMER_PRESENT;
++ psMiscInfo->pvSOCTimerRegisterKM =
++ psSysData->pvSOCTimerRegisterKM;
++ psMiscInfo->hSOCTimerRegisterOSMemHandle =
++ psSysData->hSOCTimerRegisterOSMemHandle;
++ } else {
++ psMiscInfo->pvSOCTimerRegisterKM = NULL;
++ psMiscInfo->hSOCTimerRegisterOSMemHandle = NULL;
++ }
++
++ if (((psMiscInfo->
++ ui32StateRequest & PVRSRV_MISC_INFO_CLOCKGATE_PRESENT) != 0UL)
++ && (psSysData->pvSOCClockGateRegsBase != NULL)) {
++ psMiscInfo->ui32StatePresent |=
++ PVRSRV_MISC_INFO_CLOCKGATE_PRESENT;
++ psMiscInfo->pvSOCClockGateRegs =
++ psSysData->pvSOCClockGateRegsBase;
++ psMiscInfo->ui32SOCClockGateRegsSize =
++ psSysData->ui32SOCClockGateRegsSize;
++ }
++
++ if (((psMiscInfo->
++ ui32StateRequest & PVRSRV_MISC_INFO_MEMSTATS_PRESENT) != 0UL)
++ && (psMiscInfo->pszMemoryStr != NULL)) {
++ RA_ARENA **ppArena;
++ char *pszStr;
++ u32 ui32StrLen;
++ s32 i32Count;
++
++ pszStr = psMiscInfo->pszMemoryStr;
++ ui32StrLen = psMiscInfo->ui32MemoryStrLen;
++
++ psMiscInfo->ui32StatePresent |=
++ PVRSRV_MISC_INFO_MEMSTATS_PRESENT;
++
++ ppArena = &psSysData->apsLocalDevMemArena[0];
++ while (*ppArena) {
++ CHECK_SPACE(ui32StrLen);
++ i32Count =
++ snprintf(pszStr, 100, "\nLocal Backing Store:\n");
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ RA_GetStats(*ppArena, &pszStr, &ui32StrLen);
++
++ ppArena++;
++ }
++
++ List_PVRSRV_DEVICE_NODE_PVRSRV_ERROR_Any_va(psSysData->
++ psDeviceNodeList,
++ PVRSRVGetMiscInfoKM_Device_AnyVaCb,
++ &ui32StrLen,
++ &i32Count, &pszStr);
++
++ i32Count = snprintf(pszStr, 100, "\n");
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++ }
++
++ if (((psMiscInfo->
++ ui32StateRequest & PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT) !=
++ 0UL) && (psSysData->psGlobalEventObject != NULL)) {
++ psMiscInfo->ui32StatePresent |=
++ PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT;
++ psMiscInfo->sGlobalEventObject =
++ *psSysData->psGlobalEventObject;
++ }
++
++ if (((psMiscInfo->
++ ui32StateRequest & PVRSRV_MISC_INFO_DDKVERSION_PRESENT) != 0UL)
++ &&
++ ((psMiscInfo->
++ ui32StateRequest & PVRSRV_MISC_INFO_MEMSTATS_PRESENT) == 0UL)
++ && (psMiscInfo->pszMemoryStr != NULL)) {
++ char *pszStr;
++ u32 ui32StrLen;
++ u32 ui32LenStrPerNum = 12;
++ s32 i32Count;
++ int i;
++ psMiscInfo->ui32StatePresent |=
++ PVRSRV_MISC_INFO_DDKVERSION_PRESENT;
++
++ psMiscInfo->aui32DDKVersion[0] = PVRVERSION_MAJ;
++ psMiscInfo->aui32DDKVersion[1] = PVRVERSION_MIN;
++ psMiscInfo->aui32DDKVersion[2] = PVRVERSION_BRANCH;
++ psMiscInfo->aui32DDKVersion[3] = PVRVERSION_BUILD;
++
++ pszStr = psMiscInfo->pszMemoryStr;
++ ui32StrLen = psMiscInfo->ui32MemoryStrLen;
++
++ for (i = 0; i < 4; i++) {
++ if (ui32StrLen < ui32LenStrPerNum) {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ i32Count =
++ snprintf(pszStr, ui32LenStrPerNum, "%d",
++ (s32) psMiscInfo->aui32DDKVersion[i]);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++ if (i != 3) {
++ i32Count = snprintf(pszStr, 2, ".");
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++ }
++ }
++ }
++#if defined(SUPPORT_CPU_CACHED_BUFFERS)
++ if ((psMiscInfo->
++ ui32StateRequest & PVRSRV_MISC_INFO_CPUCACHEFLUSH_PRESENT) !=
++ 0UL) {
++ if (psMiscInfo->bDeferCPUCacheFlush) {
++
++ if (!psMiscInfo->bCPUCacheFlushAll) {
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "PVRSRVGetMiscInfoKM: don't support deferred range flushes"));
++ PVR_DPF((PVR_DBG_MESSAGE,
++ " using deferred flush all instead"));
++ }
++
++ psSysData->bFlushAll = 1;
++ } else {
++
++ if (psMiscInfo->bCPUCacheFlushAll) {
++
++ OSFlushCPUCacheKM();
++
++ psSysData->bFlushAll = 0;
++ } else {
++
++ OSFlushCPUCacheRangeKM(psMiscInfo->
++ pvRangeAddrStart,
++ psMiscInfo->
++ pvRangeAddrEnd);
++ }
++ }
++ }
++#endif
++
++#if defined(PVRSRV_RESET_ON_HWTIMEOUT)
++ if ((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_RESET_PRESENT) !=
++ 0UL) {
++ PVR_LOG(("User requested OS reset"));
++ OSPanic();
++ }
++#endif
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVGetFBStatsKM(u32 * pui32Total, u32 * pui32Available)
++{
++ u32 ui32Total = 0, i = 0;
++ u32 ui32Available = 0;
++
++ *pui32Total = 0;
++ *pui32Available = 0;
++
++ while (BM_ContiguousStatistics(i, &ui32Total, &ui32Available) == 1) {
++ *pui32Total += ui32Total;
++ *pui32Available += ui32Available;
++
++ i++;
++ }
++
++ return PVRSRV_OK;
++}
++
++int PVRSRVDeviceLISR(PVRSRV_DEVICE_NODE * psDeviceNode)
++{
++ SYS_DATA *psSysData;
++ int bStatus = 0;
++ u32 ui32InterruptSource;
++
++ if (!psDeviceNode) {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVDeviceLISR: Invalid params\n"));
++ goto out;
++ }
++ psSysData = psDeviceNode->psSysData;
++
++ ui32InterruptSource = SysGetInterruptSource(psSysData, psDeviceNode);
++ if (ui32InterruptSource & psDeviceNode->ui32SOCInterruptBit) {
++ if (psDeviceNode->pfnDeviceISR != NULL) {
++ bStatus =
++ (*psDeviceNode->pfnDeviceISR) (psDeviceNode->
++ pvISRData);
++ }
++
++ SysClearInterrupts(psSysData,
++ psDeviceNode->ui32SOCInterruptBit);
++ }
++
++out:
++ return bStatus;
++}
++
++void PVRSRVSystemLISR_ForEachVaCb(PVRSRV_DEVICE_NODE * psDeviceNode, va_list va)
++{
++
++ int *pbStatus;
++ u32 *pui32InterruptSource;
++ u32 *pui32ClearInterrupts;
++
++ pbStatus = va_arg(va, int *);
++ pui32InterruptSource = va_arg(va, u32 *);
++ pui32ClearInterrupts = va_arg(va, u32 *);
++
++ if (psDeviceNode->pfnDeviceISR != NULL) {
++ if (*pui32InterruptSource & psDeviceNode->ui32SOCInterruptBit) {
++ if ((*psDeviceNode->pfnDeviceISR) (psDeviceNode->
++ pvISRData)) {
++
++ *pbStatus = 1;
++ }
++
++ *pui32ClearInterrupts |=
++ psDeviceNode->ui32SOCInterruptBit;
++ }
++ }
++}
++
++int PVRSRVSystemLISR(void *pvSysData)
++{
++ SYS_DATA *psSysData = pvSysData;
++ int bStatus = 0;
++ u32 ui32InterruptSource;
++ u32 ui32ClearInterrupts = 0;
++ if (!psSysData) {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVSystemLISR: Invalid params\n"));
++ } else {
++
++ ui32InterruptSource = SysGetInterruptSource(psSysData, NULL);
++
++ if (ui32InterruptSource) {
++
++ List_PVRSRV_DEVICE_NODE_ForEach_va(psSysData->
++ psDeviceNodeList,
++ PVRSRVSystemLISR_ForEachVaCb,
++ &bStatus,
++ &ui32InterruptSource,
++ &ui32ClearInterrupts);
++
++ SysClearInterrupts(psSysData, ui32ClearInterrupts);
++ }
++ }
++ return bStatus;
++}
++
++void PVRSRVMISR_ForEachCb(PVRSRV_DEVICE_NODE * psDeviceNode)
++{
++ if (psDeviceNode->pfnDeviceMISR != NULL) {
++ (*psDeviceNode->pfnDeviceMISR) (psDeviceNode->pvISRData);
++ }
++}
++
++void PVRSRVMISR(void *pvSysData)
++{
++ SYS_DATA *psSysData = pvSysData;
++ if (!psSysData) {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVMISR: Invalid params\n"));
++ return;
++ }
++
++ List_PVRSRV_DEVICE_NODE_ForEach(psSysData->psDeviceNodeList,
++ PVRSRVMISR_ForEachCb);
++
++ if (PVRSRVProcessQueues(ISR_ID, 0) == PVRSRV_ERROR_PROCESSING_BLOCKED) {
++ PVRSRVProcessQueues(ISR_ID, 0);
++ }
++
++ if (psSysData->psGlobalEventObject) {
++ void *hOSEventKM = psSysData->psGlobalEventObject->hOSEventKM;
++ if (hOSEventKM) {
++ OSEventObjectSignal(hOSEventKM);
++ }
++ }
++#ifdef INTEL_D3_CHANGES
++ WakeWriteOpSyncs();
++#endif
++}
++
++PVRSRV_ERROR PVRSRVProcessConnect(u32 ui32PID)
++{
++ return PVRSRVPerProcessDataConnect(ui32PID);
++}
++
++void PVRSRVProcessDisconnect(u32 ui32PID)
++{
++ PVRSRVPerProcessDataDisconnect(ui32PID);
++}
++
++PVRSRV_ERROR PVRSRVSaveRestoreLiveSegments(void *hArena, unsigned char *pbyBuffer,
++ u32 * puiBufSize, int bSave)
++{
++ u32 uiBytesSaved = 0;
++ void *pvLocalMemCPUVAddr;
++ RA_SEGMENT_DETAILS sSegDetails;
++
++ if (hArena == NULL) {
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++
++ sSegDetails.uiSize = 0;
++ sSegDetails.sCpuPhyAddr.uiAddr = 0;
++ sSegDetails.hSegment = 0;
++
++ while (RA_GetNextLiveSegment(hArena, &sSegDetails)) {
++ if (pbyBuffer == NULL) {
++
++ uiBytesSaved +=
++ sizeof(sSegDetails.uiSize) + sSegDetails.uiSize;
++ } else {
++ if ((uiBytesSaved + sizeof(sSegDetails.uiSize) +
++ sSegDetails.uiSize) > *puiBufSize) {
++ return (PVRSRV_ERROR_OUT_OF_MEMORY);
++ }
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "PVRSRVSaveRestoreLiveSegments: Base %08x size %08x",
++ sSegDetails.sCpuPhyAddr.uiAddr,
++ sSegDetails.uiSize));
++
++ pvLocalMemCPUVAddr =
++ OSMapPhysToLin(sSegDetails.sCpuPhyAddr,
++ sSegDetails.uiSize,
++ PVRSRV_HAP_KERNEL_ONLY |
++ PVRSRV_HAP_UNCACHED, NULL);
++ if (pvLocalMemCPUVAddr == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVSaveRestoreLiveSegments: Failed to map local memory to host"));
++ return (PVRSRV_ERROR_OUT_OF_MEMORY);
++ }
++
++ if (bSave) {
++
++ memcpy(pbyBuffer, &sSegDetails.uiSize,
++ sizeof(sSegDetails.uiSize));
++ pbyBuffer += sizeof(sSegDetails.uiSize);
++
++ memcpy(pbyBuffer, pvLocalMemCPUVAddr,
++ sSegDetails.uiSize);
++ pbyBuffer += sSegDetails.uiSize;
++ } else {
++ u32 uiSize;
++
++ memcpy(&uiSize, pbyBuffer,
++ sizeof(sSegDetails.uiSize));
++
++ if (uiSize != sSegDetails.uiSize) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVSaveRestoreLiveSegments: Segment size error"));
++ } else {
++ pbyBuffer += sizeof(sSegDetails.uiSize);
++
++ memcpy(pvLocalMemCPUVAddr, pbyBuffer,
++ sSegDetails.uiSize);
++ pbyBuffer += sSegDetails.uiSize;
++ }
++ }
++
++ uiBytesSaved +=
++ sizeof(sSegDetails.uiSize) + sSegDetails.uiSize;
++
++ OSUnMapPhysToLin(pvLocalMemCPUVAddr,
++ sSegDetails.uiSize,
++ PVRSRV_HAP_KERNEL_ONLY |
++ PVRSRV_HAP_UNCACHED, NULL);
++ }
++ }
++
++ if (pbyBuffer == NULL) {
++ *puiBufSize = uiBytesSaved;
++ }
++
++ return (PVRSRV_OK);
++}
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/common/queue.c
+@@ -0,0 +1,1046 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++
++#include "lists.h"
++
++DECLARE_LIST_FOR_EACH(PVRSRV_DEVICE_NODE);
++
++#if defined(__linux__) && defined(__KERNEL__)
++
++#include "proc.h"
++
++static int
++QueuePrintCommands(PVRSRV_QUEUE_INFO * psQueue, char *buffer, size_t size)
++{
++ off_t off = 0;
++ int cmds = 0;
++ u32 ui32ReadOffset = psQueue->ui32ReadOffset;
++ u32 ui32WriteOffset = psQueue->ui32WriteOffset;
++ PVRSRV_COMMAND *psCmd;
++
++ while (ui32ReadOffset != ui32WriteOffset) {
++ psCmd =
++ (PVRSRV_COMMAND *) ((u32) psQueue->pvLinQueueKM +
++ ui32ReadOffset);
++
++ off =
++ printAppend(buffer, size, off,
++ "%p %p %5u %6u %3u %5u %2u %2u %3u \n",
++ psQueue, psCmd, psCmd->ui32ProcessID,
++ psCmd->CommandType, psCmd->ui32CmdSize,
++ psCmd->ui32DevIndex, psCmd->ui32DstSyncCount,
++ psCmd->ui32SrcSyncCount, psCmd->ui32DataSize);
++
++ ui32ReadOffset += psCmd->ui32CmdSize;
++ ui32ReadOffset &= psQueue->ui32QueueSize - 1;
++ cmds++;
++ }
++ if (cmds == 0)
++ off = printAppend(buffer, size, off, "%p <empty>\n", psQueue);
++ return off;
++}
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++
++void ProcSeqShowQueue(struct seq_file *sfile, void *el)
++{
++ PVRSRV_QUEUE_INFO *psQueue = (PVRSRV_QUEUE_INFO *) el;
++ int cmds = 0;
++ u32 ui32ReadOffset;
++ u32 ui32WriteOffset;
++ PVRSRV_COMMAND *psCmd;
++
++ if (el == PVR_PROC_SEQ_START_TOKEN) {
++ seq_printf(sfile,
++ "Command Queues\n"
++ "Queue CmdPtr Pid Command Size DevInd DSC SSC #Data ...\n");
++ return;
++ }
++
++ ui32ReadOffset = psQueue->ui32ReadOffset;
++ ui32WriteOffset = psQueue->ui32WriteOffset;
++
++ while (ui32ReadOffset != ui32WriteOffset) {
++ psCmd =
++ (PVRSRV_COMMAND *) ((u32) psQueue->pvLinQueueKM +
++ ui32ReadOffset);
++
++ seq_printf(sfile,
++ "%p %p %5u %6u %3u %5u %2u %2u %3u \n",
++ psQueue, psCmd, psCmd->ui32ProcessID,
++ psCmd->CommandType, psCmd->ui32CmdSize,
++ psCmd->ui32DevIndex, psCmd->ui32DstSyncCount,
++ psCmd->ui32SrcSyncCount, psCmd->ui32DataSize);
++
++ ui32ReadOffset += psCmd->ui32CmdSize;
++ ui32ReadOffset &= psQueue->ui32QueueSize - 1;
++ cmds++;
++ }
++
++ if (cmds == 0)
++ seq_printf(sfile, "%p <empty>\n", psQueue);
++}
++
++void *ProcSeqOff2ElementQueue(struct seq_file *sfile, loff_t off)
++{
++ PVRSRV_QUEUE_INFO *psQueue;
++ SYS_DATA *psSysData;
++
++ if (!off) {
++ return PVR_PROC_SEQ_START_TOKEN;
++ }
++
++ SysAcquireData(&psSysData);
++
++ for (psQueue = psSysData->psQueueList;
++ (((--off) > 0) && (psQueue != NULL));
++ psQueue = psQueue->psNextKM) ;
++ return psQueue;
++}
++
++#endif
++
++off_t QueuePrintQueues(char *buffer, size_t size, off_t off)
++{
++ SYS_DATA *psSysData;
++ PVRSRV_QUEUE_INFO *psQueue;
++
++ SysAcquireData(&psSysData);
++
++ if (!off)
++ return printAppend(buffer, size, 0,
++ "Command Queues\n"
++ "Queue CmdPtr Pid Command Size DevInd DSC SSC #Data ...\n");
++
++ for (psQueue = psSysData->psQueueList;
++ (((--off) > 0) && (psQueue != NULL));
++ psQueue = psQueue->psNextKM) ;
++
++ return psQueue ? QueuePrintCommands(psQueue, buffer,
++ size) : END_OF_FILE;
++}
++#endif
++
++#define GET_SPACE_IN_CMDQ(psQueue) \
++ (((psQueue->ui32ReadOffset - psQueue->ui32WriteOffset) \
++ + (psQueue->ui32QueueSize - 1)) & (psQueue->ui32QueueSize - 1))
++
++#define UPDATE_QUEUE_WOFF(psQueue, ui32Size) \
++ psQueue->ui32WriteOffset = (psQueue->ui32WriteOffset + ui32Size) \
++ & (psQueue->ui32QueueSize - 1);
++
++#define SYNCOPS_STALE(ui32OpsComplete, ui32OpsPending) \
++ (ui32OpsComplete >= ui32OpsPending)
++
++DECLARE_LIST_FOR_EACH(PVRSRV_DEVICE_NODE);
++
++static void QueueDumpCmdComplete(COMMAND_COMPLETE_DATA * psCmdCompleteData,
++ u32 i, int bIsSrc)
++{
++ PVRSRV_SYNC_OBJECT *psSyncObject;
++
++ psSyncObject =
++ bIsSrc ? psCmdCompleteData->psSrcSync : psCmdCompleteData->
++ psDstSync;
++
++ if (psCmdCompleteData->bInUse) {
++ PVR_LOG(("\t%s %lu: ROC DevVAddr:0x%lX ROP:0x%lx ROC:0x%lx, WOC DevVAddr:0x%lX WOP:0x%lx WOC:0x%lx", bIsSrc ? "SRC" : "DEST", i, psSyncObject[i].psKernelSyncInfoKM->sReadOpsCompleteDevVAddr.uiAddr, psSyncObject[i].psKernelSyncInfoKM->psSyncData->ui32ReadOpsPending, psSyncObject[i].psKernelSyncInfoKM->psSyncData->ui32ReadOpsComplete, psSyncObject[i].psKernelSyncInfoKM->sWriteOpsCompleteDevVAddr.uiAddr, psSyncObject[i].psKernelSyncInfoKM->psSyncData->ui32WriteOpsPending, psSyncObject[i].psKernelSyncInfoKM->psSyncData->ui32WriteOpsComplete));
++ } else {
++ PVR_LOG(("\t%s %lu: (Not in use)", bIsSrc ? "SRC" : "DEST", i));
++ }
++}
++
++static void QueueDumpDebugInfo_ForEachCb(PVRSRV_DEVICE_NODE * psDeviceNode)
++{
++ if (psDeviceNode->sDevId.eDeviceClass == PVRSRV_DEVICE_CLASS_DISPLAY) {
++ u32 i;
++ SYS_DATA *psSysData;
++ COMMAND_COMPLETE_DATA **ppsCmdCompleteData;
++ COMMAND_COMPLETE_DATA *psCmdCompleteData;
++
++ SysAcquireData(&psSysData);
++
++ ppsCmdCompleteData =
++ psSysData->ppsCmdCompleteData[psDeviceNode->sDevId.
++ ui32DeviceIndex];
++
++ if (ppsCmdCompleteData != NULL) {
++ psCmdCompleteData = ppsCmdCompleteData[DC_FLIP_COMMAND];
++
++ PVR_LOG(("Command Complete Data for display device %lu:", psDeviceNode->sDevId.ui32DeviceIndex));
++
++ for (i = 0; i < psCmdCompleteData->ui32SrcSyncCount;
++ i++) {
++ QueueDumpCmdComplete(psCmdCompleteData, i, 1);
++ }
++
++ for (i = 0; i < psCmdCompleteData->ui32DstSyncCount;
++ i++) {
++ QueueDumpCmdComplete(psCmdCompleteData, i, 0);
++ }
++ } else {
++ PVR_LOG(("There is no Command Complete Data for display device %u", psDeviceNode->sDevId.ui32DeviceIndex));
++ }
++ }
++}
++
++void QueueDumpDebugInfo(void)
++{
++ SYS_DATA *psSysData;
++ SysAcquireData(&psSysData);
++ List_PVRSRV_DEVICE_NODE_ForEach(psSysData->psDeviceNodeList,
++ QueueDumpDebugInfo_ForEachCb);
++}
++
++u32 NearestPower2(u32 ui32Value)
++{
++ u32 ui32Temp, ui32Result = 1;
++
++ if (!ui32Value)
++ return 0;
++
++ ui32Temp = ui32Value - 1;
++ while (ui32Temp) {
++ ui32Result <<= 1;
++ ui32Temp >>= 1;
++ }
++
++ return ui32Result;
++}
++
++PVRSRV_ERROR PVRSRVCreateCommandQueueKM(u32 ui32QueueSize,
++ PVRSRV_QUEUE_INFO ** ppsQueueInfo)
++{
++ PVRSRV_QUEUE_INFO *psQueueInfo;
++ u32 ui32Power2QueueSize = NearestPower2(ui32QueueSize);
++ SYS_DATA *psSysData;
++ PVRSRV_ERROR eError;
++ void *hMemBlock;
++
++ SysAcquireData(&psSysData);
++
++ if (OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_QUEUE_INFO),
++ (void **)&psQueueInfo, &hMemBlock,
++ "Queue Info") != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVCreateCommandQueueKM: Failed to alloc queue struct"));
++ goto ErrorExit;
++ }
++ memset(psQueueInfo, 0, sizeof(PVRSRV_QUEUE_INFO));
++
++ psQueueInfo->hMemBlock[0] = hMemBlock;
++ psQueueInfo->ui32ProcessID = OSGetCurrentProcessIDKM();
++
++ if (OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ ui32Power2QueueSize + PVRSRV_MAX_CMD_SIZE,
++ &psQueueInfo->pvLinQueueKM, &hMemBlock,
++ "Command Queue") != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVCreateCommandQueueKM: Failed to alloc queue buffer"));
++ goto ErrorExit;
++ }
++
++ psQueueInfo->hMemBlock[1] = hMemBlock;
++ psQueueInfo->pvLinQueueUM = psQueueInfo->pvLinQueueKM;
++
++ PVR_ASSERT(psQueueInfo->ui32ReadOffset == 0);
++ PVR_ASSERT(psQueueInfo->ui32WriteOffset == 0);
++
++ psQueueInfo->ui32QueueSize = ui32Power2QueueSize;
++
++ if (psSysData->psQueueList == NULL) {
++ eError = OSCreateResource(&psSysData->sQProcessResource);
++ if (eError != PVRSRV_OK) {
++ goto ErrorExit;
++ }
++ }
++
++ if (OSLockResource(&psSysData->sQProcessResource,
++ KERNEL_ID) != PVRSRV_OK) {
++ goto ErrorExit;
++ }
++
++ psQueueInfo->psNextKM = psSysData->psQueueList;
++ psSysData->psQueueList = psQueueInfo;
++
++ if (OSUnlockResource(&psSysData->sQProcessResource, KERNEL_ID) !=
++ PVRSRV_OK) {
++ goto ErrorExit;
++ }
++
++ *ppsQueueInfo = psQueueInfo;
++
++ return PVRSRV_OK;
++
++ErrorExit:
++
++ if (psQueueInfo) {
++ if (psQueueInfo->pvLinQueueKM) {
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ psQueueInfo->ui32QueueSize,
++ psQueueInfo->pvLinQueueKM,
++ psQueueInfo->hMemBlock[1]);
++ psQueueInfo->pvLinQueueKM = NULL;
++ }
++
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_QUEUE_INFO),
++ psQueueInfo, psQueueInfo->hMemBlock[0]);
++
++ }
++
++ return PVRSRV_ERROR_GENERIC;
++}
++
++PVRSRV_ERROR PVRSRVDestroyCommandQueueKM(PVRSRV_QUEUE_INFO * psQueueInfo)
++{
++ PVRSRV_QUEUE_INFO *psQueue;
++ SYS_DATA *psSysData;
++ PVRSRV_ERROR eError;
++ int bTimeout = 1;
++
++ SysAcquireData(&psSysData);
++
++ psQueue = psSysData->psQueueList;
++
++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) {
++ if (psQueueInfo->ui32ReadOffset == psQueueInfo->ui32WriteOffset) {
++ bTimeout = 0;
++ break;
++ }
++ OSWaitus(MAX_HW_TIME_US / WAIT_TRY_COUNT);
++ }
++ END_LOOP_UNTIL_TIMEOUT();
++
++ if (bTimeout) {
++
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVDestroyCommandQueueKM : Failed to empty queue"));
++ eError = PVRSRV_ERROR_CANNOT_FLUSH_QUEUE;
++ goto ErrorExit;
++ }
++
++ eError = OSLockResource(&psSysData->sQProcessResource, KERNEL_ID);
++ if (eError != PVRSRV_OK) {
++ goto ErrorExit;
++ }
++
++ if (psQueue == psQueueInfo) {
++ psSysData->psQueueList = psQueueInfo->psNextKM;
++
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ NearestPower2(psQueueInfo->ui32QueueSize) +
++ PVRSRV_MAX_CMD_SIZE, psQueueInfo->pvLinQueueKM,
++ psQueueInfo->hMemBlock[1]);
++ psQueueInfo->pvLinQueueKM = NULL;
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_QUEUE_INFO),
++ psQueueInfo, psQueueInfo->hMemBlock[0]);
++
++ psQueueInfo = NULL;
++ } else {
++ while (psQueue) {
++ if (psQueue->psNextKM == psQueueInfo) {
++ psQueue->psNextKM = psQueueInfo->psNextKM;
++
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ psQueueInfo->ui32QueueSize,
++ psQueueInfo->pvLinQueueKM,
++ psQueueInfo->hMemBlock[1]);
++ psQueueInfo->pvLinQueueKM = NULL;
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_QUEUE_INFO),
++ psQueueInfo,
++ psQueueInfo->hMemBlock[0]);
++
++ psQueueInfo = NULL;
++ break;
++ }
++ psQueue = psQueue->psNextKM;
++ }
++
++ if (!psQueue) {
++ eError =
++ OSUnlockResource(&psSysData->sQProcessResource,
++ KERNEL_ID);
++ if (eError != PVRSRV_OK) {
++ goto ErrorExit;
++ }
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ goto ErrorExit;
++ }
++ }
++
++ eError = OSUnlockResource(&psSysData->sQProcessResource, KERNEL_ID);
++ if (eError != PVRSRV_OK) {
++ goto ErrorExit;
++ }
++
++ if (psSysData->psQueueList == NULL) {
++ eError = OSDestroyResource(&psSysData->sQProcessResource);
++ if (eError != PVRSRV_OK) {
++ goto ErrorExit;
++ }
++ }
++
++ErrorExit:
++
++ return eError;
++}
++
++PVRSRV_ERROR PVRSRVGetQueueSpaceKM(PVRSRV_QUEUE_INFO * psQueue,
++ u32 ui32ParamSize, void **ppvSpace)
++{
++ int bTimeout = 1;
++
++ ui32ParamSize = (ui32ParamSize + 3) & 0xFFFFFFFC;
++
++ if (ui32ParamSize > PVRSRV_MAX_CMD_SIZE) {
++ PVR_DPF((PVR_DBG_WARNING,
++ "PVRSRVGetQueueSpace: max command size is %d bytes",
++ PVRSRV_MAX_CMD_SIZE));
++ return PVRSRV_ERROR_CMD_TOO_BIG;
++ }
++
++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) {
++ if (GET_SPACE_IN_CMDQ(psQueue) > ui32ParamSize) {
++ bTimeout = 0;
++ break;
++ }
++ OSWaitus(MAX_HW_TIME_US / WAIT_TRY_COUNT);
++ }
++ END_LOOP_UNTIL_TIMEOUT();
++
++ if (bTimeout == 1) {
++ *ppvSpace = NULL;
++
++ return PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE;
++ } else {
++ *ppvSpace =
++ (void *)((u32) psQueue->pvLinQueueUM +
++ psQueue->ui32WriteOffset);
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVInsertCommandKM(PVRSRV_QUEUE_INFO * psQueue,
++ PVRSRV_COMMAND ** ppsCommand,
++ u32 ui32DevIndex,
++ u16 CommandType,
++ u32 ui32DstSyncCount,
++ PVRSRV_KERNEL_SYNC_INFO * apsDstSync[],
++ u32 ui32SrcSyncCount,
++ PVRSRV_KERNEL_SYNC_INFO * apsSrcSync[],
++ u32 ui32DataByteSize)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_COMMAND *psCommand;
++ u32 ui32CommandSize;
++ u32 i;
++
++ ui32DataByteSize = (ui32DataByteSize + 3UL) & ~3UL;
++
++ ui32CommandSize = sizeof(PVRSRV_COMMAND)
++ +
++ ((ui32DstSyncCount + ui32SrcSyncCount) * sizeof(PVRSRV_SYNC_OBJECT))
++ + ui32DataByteSize;
++
++ eError =
++ PVRSRVGetQueueSpaceKM(psQueue, ui32CommandSize,
++ (void **)&psCommand);
++ if (eError != PVRSRV_OK) {
++ return eError;
++ }
++
++ psCommand->ui32ProcessID = OSGetCurrentProcessIDKM();
++
++ psCommand->ui32CmdSize = ui32CommandSize;
++ psCommand->ui32DevIndex = ui32DevIndex;
++ psCommand->CommandType = CommandType;
++ psCommand->ui32DstSyncCount = ui32DstSyncCount;
++ psCommand->ui32SrcSyncCount = ui32SrcSyncCount;
++
++ psCommand->psDstSync =
++ (PVRSRV_SYNC_OBJECT *) (((u32) psCommand) + sizeof(PVRSRV_COMMAND));
++
++ psCommand->psSrcSync =
++ (PVRSRV_SYNC_OBJECT *) (((u32) psCommand->psDstSync)
++ +
++ (ui32DstSyncCount *
++ sizeof(PVRSRV_SYNC_OBJECT)));
++
++ psCommand->pvData = (PVRSRV_SYNC_OBJECT *) (((u32) psCommand->psSrcSync)
++ +
++ (ui32SrcSyncCount *
++ sizeof
++ (PVRSRV_SYNC_OBJECT)));
++ psCommand->ui32DataSize = ui32DataByteSize;
++
++ for (i = 0; i < ui32DstSyncCount; i++) {
++ psCommand->psDstSync[i].psKernelSyncInfoKM = apsDstSync[i];
++ psCommand->psDstSync[i].ui32WriteOpsPending =
++ PVRSRVGetWriteOpsPending(apsDstSync[i], 0);
++ psCommand->psDstSync[i].ui32ReadOpsPending =
++ PVRSRVGetReadOpsPending(apsDstSync[i], 0);
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "PVRSRVInsertCommandKM: Dst %lu RO-VA:0x%lx WO-VA:0x%lx ROP:0x%lx WOP:0x%lx",
++ i,
++ psCommand->psDstSync[i].psKernelSyncInfoKM->
++ sReadOpsCompleteDevVAddr.uiAddr,
++ psCommand->psDstSync[i].psKernelSyncInfoKM->
++ sWriteOpsCompleteDevVAddr.uiAddr,
++ psCommand->psDstSync[i].ui32ReadOpsPending,
++ psCommand->psDstSync[i].ui32WriteOpsPending));
++ }
++
++ for (i = 0; i < ui32SrcSyncCount; i++) {
++ psCommand->psSrcSync[i].psKernelSyncInfoKM = apsSrcSync[i];
++ psCommand->psSrcSync[i].ui32WriteOpsPending =
++ PVRSRVGetWriteOpsPending(apsSrcSync[i], 1);
++ psCommand->psSrcSync[i].ui32ReadOpsPending =
++ PVRSRVGetReadOpsPending(apsSrcSync[i], 1);
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "PVRSRVInsertCommandKM: Src %lu RO-VA:0x%lx WO-VA:0x%lx ROP:0x%lx WOP:0x%lx",
++ i,
++ psCommand->psSrcSync[i].psKernelSyncInfoKM->
++ sReadOpsCompleteDevVAddr.uiAddr,
++ psCommand->psSrcSync[i].psKernelSyncInfoKM->
++ sWriteOpsCompleteDevVAddr.uiAddr,
++ psCommand->psSrcSync[i].ui32ReadOpsPending,
++ psCommand->psSrcSync[i].ui32WriteOpsPending));
++ }
++
++ *ppsCommand = psCommand;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVSubmitCommandKM(PVRSRV_QUEUE_INFO * psQueue,
++ PVRSRV_COMMAND * psCommand)
++{
++
++ if (psCommand->ui32DstSyncCount > 0) {
++ psCommand->psDstSync =
++ (PVRSRV_SYNC_OBJECT *) (((u32) psQueue->pvLinQueueKM)
++ + psQueue->ui32WriteOffset +
++ sizeof(PVRSRV_COMMAND));
++ }
++
++ if (psCommand->ui32SrcSyncCount > 0) {
++ psCommand->psSrcSync =
++ (PVRSRV_SYNC_OBJECT *) (((u32) psQueue->pvLinQueueKM)
++ + psQueue->ui32WriteOffset +
++ sizeof(PVRSRV_COMMAND)
++ +
++ (psCommand->ui32DstSyncCount *
++ sizeof(PVRSRV_SYNC_OBJECT)));
++ }
++
++ psCommand->pvData =
++ (PVRSRV_SYNC_OBJECT *) (((u32) psQueue->pvLinQueueKM)
++ + psQueue->ui32WriteOffset +
++ sizeof(PVRSRV_COMMAND)
++ +
++ (psCommand->ui32DstSyncCount *
++ sizeof(PVRSRV_SYNC_OBJECT))
++ +
++ (psCommand->ui32SrcSyncCount *
++ sizeof(PVRSRV_SYNC_OBJECT)));
++
++ UPDATE_QUEUE_WOFF(psQueue, psCommand->ui32CmdSize);
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVProcessCommand(SYS_DATA * psSysData,
++ PVRSRV_COMMAND * psCommand, int bFlush)
++{
++ PVRSRV_SYNC_OBJECT *psWalkerObj;
++ PVRSRV_SYNC_OBJECT *psEndObj;
++ u32 i;
++ COMMAND_COMPLETE_DATA *psCmdCompleteData;
++ PVRSRV_ERROR eError = PVRSRV_OK;
++ u32 ui32WriteOpsComplete;
++ u32 ui32ReadOpsComplete;
++
++ psWalkerObj = psCommand->psDstSync;
++ psEndObj = psWalkerObj + psCommand->ui32DstSyncCount;
++ while (psWalkerObj < psEndObj) {
++ PVRSRV_SYNC_DATA *psSyncData =
++ psWalkerObj->psKernelSyncInfoKM->psSyncData;
++
++ ui32WriteOpsComplete = psSyncData->ui32WriteOpsComplete;
++ ui32ReadOpsComplete = psSyncData->ui32ReadOpsComplete;
++
++ if ((ui32WriteOpsComplete != psWalkerObj->ui32WriteOpsPending)
++ || (ui32ReadOpsComplete != psWalkerObj->ui32ReadOpsPending)) {
++ if (!bFlush ||
++ !SYNCOPS_STALE(ui32WriteOpsComplete,
++ psWalkerObj->ui32WriteOpsPending)
++ || !SYNCOPS_STALE(ui32ReadOpsComplete,
++ psWalkerObj->
++ ui32ReadOpsPending)) {
++ return PVRSRV_ERROR_FAILED_DEPENDENCIES;
++ }
++ }
++
++ psWalkerObj++;
++ }
++
++ psWalkerObj = psCommand->psSrcSync;
++ psEndObj = psWalkerObj + psCommand->ui32SrcSyncCount;
++ while (psWalkerObj < psEndObj) {
++ PVRSRV_SYNC_DATA *psSyncData =
++ psWalkerObj->psKernelSyncInfoKM->psSyncData;
++
++ ui32ReadOpsComplete = psSyncData->ui32ReadOpsComplete;
++ ui32WriteOpsComplete = psSyncData->ui32WriteOpsComplete;
++
++ if ((ui32WriteOpsComplete != psWalkerObj->ui32WriteOpsPending)
++ || (ui32ReadOpsComplete != psWalkerObj->ui32ReadOpsPending)) {
++ if (!bFlush &&
++ SYNCOPS_STALE(ui32WriteOpsComplete,
++ psWalkerObj->ui32WriteOpsPending)
++ && SYNCOPS_STALE(ui32ReadOpsComplete,
++ psWalkerObj->ui32ReadOpsPending)) {
++ PVR_DPF((PVR_DBG_WARNING,
++ "PVRSRVProcessCommand: Stale syncops psSyncData:0x%x ui32WriteOpsComplete:0x%x ui32WriteOpsPending:0x%x",
++ psSyncData, ui32WriteOpsComplete,
++ psWalkerObj->ui32WriteOpsPending));
++ }
++
++ if (!bFlush ||
++ !SYNCOPS_STALE(ui32WriteOpsComplete,
++ psWalkerObj->ui32WriteOpsPending)
++ || !SYNCOPS_STALE(ui32ReadOpsComplete,
++ psWalkerObj->
++ ui32ReadOpsPending)) {
++ return PVRSRV_ERROR_FAILED_DEPENDENCIES;
++ }
++ }
++ psWalkerObj++;
++ }
++
++ if (psCommand->ui32DevIndex >= SYS_DEVICE_COUNT) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVProcessCommand: invalid DeviceType 0x%x",
++ psCommand->ui32DevIndex));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psCmdCompleteData =
++ psSysData->ppsCmdCompleteData[psCommand->ui32DevIndex][psCommand->
++ CommandType];
++ if (psCmdCompleteData->bInUse) {
++
++ return PVRSRV_ERROR_FAILED_DEPENDENCIES;
++ }
++
++ psCmdCompleteData->bInUse = 1;
++
++ psCmdCompleteData->ui32DstSyncCount = psCommand->ui32DstSyncCount;
++ for (i = 0; i < psCommand->ui32DstSyncCount; i++) {
++ psCmdCompleteData->psDstSync[i] = psCommand->psDstSync[i];
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "PVRSRVProcessCommand: Dst %lu RO-VA:0x%lx WO-VA:0x%lx ROP:0x%lx WOP:0x%lx",
++ i,
++ psCmdCompleteData->psDstSync[i].psKernelSyncInfoKM->
++ sReadOpsCompleteDevVAddr.uiAddr,
++ psCmdCompleteData->psDstSync[i].psKernelSyncInfoKM->
++ sWriteOpsCompleteDevVAddr.uiAddr,
++ psCmdCompleteData->psDstSync[i].ui32ReadOpsPending,
++ psCmdCompleteData->psDstSync[i].ui32WriteOpsPending));
++ }
++
++ psCmdCompleteData->ui32SrcSyncCount = psCommand->ui32SrcSyncCount;
++ for (i = 0; i < psCommand->ui32SrcSyncCount; i++) {
++ psCmdCompleteData->psSrcSync[i] = psCommand->psSrcSync[i];
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "PVRSRVProcessCommand: Src %lu RO-VA:0x%lx WO-VA:0x%lx ROP:0x%lx WOP:0x%lx",
++ i,
++ psCmdCompleteData->psSrcSync[i].psKernelSyncInfoKM->
++ sReadOpsCompleteDevVAddr.uiAddr,
++ psCmdCompleteData->psSrcSync[i].psKernelSyncInfoKM->
++ sWriteOpsCompleteDevVAddr.uiAddr,
++ psCmdCompleteData->psSrcSync[i].ui32ReadOpsPending,
++ psCmdCompleteData->psSrcSync[i].ui32WriteOpsPending));
++ }
++
++ if (psSysData->
++ ppfnCmdProcList[psCommand->ui32DevIndex][psCommand->
++ CommandType] ((void *)
++ psCmdCompleteData,
++ psCommand->
++ ui32DataSize,
++ psCommand->
++ pvData) ==
++ 0) {
++
++ psCmdCompleteData->bInUse = 0;
++ eError = PVRSRV_ERROR_CMD_NOT_PROCESSED;
++ }
++
++ return eError;
++}
++
++void PVRSRVProcessQueues_ForEachCb(PVRSRV_DEVICE_NODE * psDeviceNode)
++{
++ if (psDeviceNode->bReProcessDeviceCommandComplete &&
++ psDeviceNode->pfnDeviceCommandComplete != NULL) {
++ (*psDeviceNode->pfnDeviceCommandComplete) (psDeviceNode);
++ }
++}
++
++PVRSRV_ERROR PVRSRVProcessQueues(u32 ui32CallerID, int bFlush)
++{
++ PVRSRV_QUEUE_INFO *psQueue;
++ SYS_DATA *psSysData;
++ PVRSRV_COMMAND *psCommand;
++ PVRSRV_ERROR eError;
++
++ SysAcquireData(&psSysData);
++
++ psSysData->bReProcessQueues = 0;
++
++ eError = OSLockResource(&psSysData->sQProcessResource, ui32CallerID);
++ if (eError != PVRSRV_OK) {
++
++ psSysData->bReProcessQueues = 1;
++
++ if (ui32CallerID == ISR_ID) {
++ if (bFlush) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVProcessQueues: Couldn't acquire queue processing lock for FLUSH"));
++ } else {
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "PVRSRVProcessQueues: Couldn't acquire queue processing lock"));
++ }
++ } else {
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "PVRSRVProcessQueues: Queue processing lock-acquire failed when called from the Services driver."));
++ PVR_DPF((PVR_DBG_MESSAGE,
++ " This is due to MISR queue processing being interrupted by the Services driver."));
++ }
++
++ return PVRSRV_OK;
++ }
++
++ psQueue = psSysData->psQueueList;
++
++ if (!psQueue) {
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "No Queues installed - cannot process commands"));
++ }
++
++ if (bFlush) {
++ PVRSRVSetDCState(DC_STATE_FLUSH_COMMANDS);
++ }
++
++ while (psQueue) {
++ while (psQueue->ui32ReadOffset != psQueue->ui32WriteOffset) {
++ psCommand =
++ (PVRSRV_COMMAND *) ((u32) psQueue->pvLinQueueKM +
++ psQueue->ui32ReadOffset);
++
++ if (PVRSRVProcessCommand(psSysData, psCommand, bFlush)
++ == PVRSRV_OK) {
++
++ UPDATE_QUEUE_ROFF(psQueue,
++ psCommand->ui32CmdSize)
++
++ if (bFlush) {
++ continue;
++ }
++ }
++
++ break;
++ }
++ psQueue = psQueue->psNextKM;
++ }
++
++ if (bFlush) {
++ PVRSRVSetDCState(DC_STATE_NO_FLUSH_COMMANDS);
++ }
++
++ List_PVRSRV_DEVICE_NODE_ForEach(psSysData->psDeviceNodeList,
++ PVRSRVProcessQueues_ForEachCb);
++
++ OSUnlockResource(&psSysData->sQProcessResource, ui32CallerID);
++
++ if (psSysData->bReProcessQueues) {
++ return PVRSRV_ERROR_PROCESSING_BLOCKED;
++ }
++
++ return PVRSRV_OK;
++}
++
++void PVRSRVCommandCompleteKM(void *hCmdCookie, int bScheduleMISR)
++{
++ u32 i;
++ COMMAND_COMPLETE_DATA *psCmdCompleteData =
++ (COMMAND_COMPLETE_DATA *) hCmdCookie;
++ SYS_DATA *psSysData;
++
++ SysAcquireData(&psSysData);
++
++ for (i = 0; i < psCmdCompleteData->ui32DstSyncCount; i++) {
++ psCmdCompleteData->psDstSync[i].psKernelSyncInfoKM->psSyncData->
++ ui32WriteOpsComplete++;
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "PVRSRVCommandCompleteKM: Dst %lu RO-VA:0x%lx WO-VA:0x%lx ROP:0x%lx WOP:0x%lx",
++ i,
++ psCmdCompleteData->psDstSync[i].psKernelSyncInfoKM->
++ sReadOpsCompleteDevVAddr.uiAddr,
++ psCmdCompleteData->psDstSync[i].psKernelSyncInfoKM->
++ sWriteOpsCompleteDevVAddr.uiAddr,
++ psCmdCompleteData->psDstSync[i].ui32ReadOpsPending,
++ psCmdCompleteData->psDstSync[i].ui32WriteOpsPending));
++ }
++
++ for (i = 0; i < psCmdCompleteData->ui32SrcSyncCount; i++) {
++ psCmdCompleteData->psSrcSync[i].psKernelSyncInfoKM->psSyncData->
++ ui32ReadOpsComplete++;
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "PVRSRVCommandCompleteKM: Src %lu RO-VA:0x%lx WO-VA:0x%lx ROP:0x%lx WOP:0x%lx",
++ i,
++ psCmdCompleteData->psSrcSync[i].psKernelSyncInfoKM->
++ sReadOpsCompleteDevVAddr.uiAddr,
++ psCmdCompleteData->psSrcSync[i].psKernelSyncInfoKM->
++ sWriteOpsCompleteDevVAddr.uiAddr,
++ psCmdCompleteData->psSrcSync[i].ui32ReadOpsPending,
++ psCmdCompleteData->psSrcSync[i].ui32WriteOpsPending));
++ }
++
++ psCmdCompleteData->bInUse = 0;
++
++ PVRSRVCommandCompleteCallbacks();
++
++#if defined(SYS_USING_INTERRUPTS)
++ if (bScheduleMISR) {
++ OSScheduleMISR(psSysData);
++ }
++#else
++#endif
++}
++
++void PVRSRVCommandCompleteCallbacks_ForEachCb(PVRSRV_DEVICE_NODE * psDeviceNode)
++{
++ if (psDeviceNode->pfnDeviceCommandComplete != NULL) {
++
++ (*psDeviceNode->pfnDeviceCommandComplete) (psDeviceNode);
++ }
++}
++
++void PVRSRVCommandCompleteCallbacks(void)
++{
++ SYS_DATA *psSysData;
++ SysAcquireData(&psSysData);
++
++ List_PVRSRV_DEVICE_NODE_ForEach(psSysData->psDeviceNodeList,
++ PVRSRVCommandCompleteCallbacks_ForEachCb);
++}
++
++PVRSRV_ERROR PVRSRVRegisterCmdProcListKM(u32 ui32DevIndex,
++ PFN_CMD_PROC * ppfnCmdProcList,
++ u32 ui32MaxSyncsPerCmd[][2],
++ u32 ui32CmdCount)
++{
++ SYS_DATA *psSysData;
++ PVRSRV_ERROR eError;
++ u32 i;
++ u32 ui32AllocSize;
++ PFN_CMD_PROC *ppfnCmdProc;
++ COMMAND_COMPLETE_DATA *psCmdCompleteData;
++
++ if (ui32DevIndex >= SYS_DEVICE_COUNT) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVRegisterCmdProcListKM: invalid DeviceType 0x%x",
++ ui32DevIndex));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ SysAcquireData(&psSysData);
++
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32CmdCount * sizeof(PFN_CMD_PROC),
++ (void **)&psSysData->ppfnCmdProcList[ui32DevIndex],
++ NULL, "Internal Queue Info structure");
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVRegisterCmdProcListKM: Failed to alloc queue"));
++ return eError;
++ }
++
++ ppfnCmdProc = psSysData->ppfnCmdProcList[ui32DevIndex];
++
++ for (i = 0; i < ui32CmdCount; i++) {
++ ppfnCmdProc[i] = ppfnCmdProcList[i];
++ }
++
++ ui32AllocSize = ui32CmdCount * sizeof(COMMAND_COMPLETE_DATA *);
++ eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ ui32AllocSize,
++ (void **)&psSysData->
++ ppsCmdCompleteData[ui32DevIndex], NULL,
++ "Array of Pointers for Command Store");
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVRegisterCmdProcListKM: Failed to alloc CC data"));
++ goto ErrorExit;
++ }
++
++ for (i = 0; i < ui32CmdCount; i++) {
++
++ ui32AllocSize = sizeof(COMMAND_COMPLETE_DATA)
++ + ((ui32MaxSyncsPerCmd[i][0]
++ + ui32MaxSyncsPerCmd[i][1])
++ * sizeof(PVRSRV_SYNC_OBJECT));
++
++ eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ ui32AllocSize,
++ (void **)&psSysData->
++ ppsCmdCompleteData[ui32DevIndex][i], NULL,
++ "Command Complete Data");
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVRegisterCmdProcListKM: Failed to alloc cmd %d",
++ i));
++ goto ErrorExit;
++ }
++
++ memset(psSysData->ppsCmdCompleteData[ui32DevIndex][i], 0x00,
++ ui32AllocSize);
++
++ psCmdCompleteData =
++ psSysData->ppsCmdCompleteData[ui32DevIndex][i];
++
++ psCmdCompleteData->psDstSync = (PVRSRV_SYNC_OBJECT *)
++ (((u32) psCmdCompleteData)
++ + sizeof(COMMAND_COMPLETE_DATA));
++ psCmdCompleteData->psSrcSync = (PVRSRV_SYNC_OBJECT *)
++ (((u32) psCmdCompleteData->psDstSync)
++ + (sizeof(PVRSRV_SYNC_OBJECT) * ui32MaxSyncsPerCmd[i][0]));
++
++ psCmdCompleteData->ui32AllocSize = ui32AllocSize;
++ }
++
++ return PVRSRV_OK;
++
++ErrorExit:
++
++ if (psSysData->ppsCmdCompleteData[ui32DevIndex] != NULL) {
++ for (i = 0; i < ui32CmdCount; i++) {
++ if (psSysData->ppsCmdCompleteData[ui32DevIndex][i] !=
++ NULL) {
++ ui32AllocSize = sizeof(COMMAND_COMPLETE_DATA)
++ + ((ui32MaxSyncsPerCmd[i][0]
++ + ui32MaxSyncsPerCmd[i][1])
++ * sizeof(PVRSRV_SYNC_OBJECT));
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ ui32AllocSize,
++ psSysData->
++ ppsCmdCompleteData[ui32DevIndex][i],
++ NULL);
++ psSysData->ppsCmdCompleteData[ui32DevIndex][i] =
++ NULL;
++ }
++ }
++ ui32AllocSize = ui32CmdCount * sizeof(COMMAND_COMPLETE_DATA *);
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, ui32AllocSize,
++ psSysData->ppsCmdCompleteData[ui32DevIndex], NULL);
++ psSysData->ppsCmdCompleteData[ui32DevIndex] = NULL;
++ }
++
++ if (psSysData->ppfnCmdProcList[ui32DevIndex] != NULL) {
++ ui32AllocSize = ui32CmdCount * sizeof(PFN_CMD_PROC);
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, ui32AllocSize,
++ psSysData->ppfnCmdProcList[ui32DevIndex], NULL);
++ psSysData->ppfnCmdProcList[ui32DevIndex] = NULL;
++ }
++
++ return eError;
++}
++
++PVRSRV_ERROR PVRSRVRemoveCmdProcListKM(u32 ui32DevIndex, u32 ui32CmdCount)
++{
++ SYS_DATA *psSysData;
++ u32 i;
++
++ if (ui32DevIndex >= SYS_DEVICE_COUNT) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVRemoveCmdProcListKM: invalid DeviceType 0x%x",
++ ui32DevIndex));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ SysAcquireData(&psSysData);
++
++ if (psSysData->ppsCmdCompleteData[ui32DevIndex] == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVRemoveCmdProcListKM: Invalid command array"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ } else {
++ for (i = 0; i < ui32CmdCount; i++) {
++
++ if (psSysData->ppsCmdCompleteData[ui32DevIndex][i] !=
++ NULL) {
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ psSysData->
++ ppsCmdCompleteData[ui32DevIndex][i]->
++ ui32AllocSize,
++ psSysData->
++ ppsCmdCompleteData[ui32DevIndex][i],
++ NULL);
++ psSysData->ppsCmdCompleteData[ui32DevIndex][i] =
++ NULL;
++ }
++ }
++
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ ui32CmdCount * sizeof(COMMAND_COMPLETE_DATA *),
++ psSysData->ppsCmdCompleteData[ui32DevIndex], NULL);
++ psSysData->ppsCmdCompleteData[ui32DevIndex] = NULL;
++ }
++
++ if (psSysData->ppfnCmdProcList[ui32DevIndex] != NULL) {
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ ui32CmdCount * sizeof(PFN_CMD_PROC),
++ psSysData->ppfnCmdProcList[ui32DevIndex], NULL);
++ psSysData->ppfnCmdProcList[ui32DevIndex] = NULL;
++ }
++
++ return PVRSRV_OK;
++}
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/common/ra.c
+@@ -0,0 +1,1889 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "hash.h"
++#include "ra.h"
++#include "buffer_manager.h"
++#include "osfunc.h"
++
++#ifdef __linux__
++#include <linux/kernel.h>
++#include "proc.h"
++#endif
++
++#ifdef USE_BM_FREESPACE_CHECK
++#include <stdio.h>
++#endif
++
++#define MINIMUM_HASH_SIZE (64)
++
++#if defined(VALIDATE_ARENA_TEST)
++
++typedef enum RESOURCE_DESCRIPTOR_TAG {
++
++ RESOURCE_SPAN_LIVE = 10,
++ RESOURCE_SPAN_FREE,
++ IMPORTED_RESOURCE_SPAN_START,
++ IMPORTED_RESOURCE_SPAN_LIVE,
++ IMPORTED_RESOURCE_SPAN_FREE,
++ IMPORTED_RESOURCE_SPAN_END,
++
++} RESOURCE_DESCRIPTOR;
++
++typedef enum RESOURCE_TYPE_TAG {
++
++ IMPORTED_RESOURCE_TYPE = 20,
++ NON_IMPORTED_RESOURCE_TYPE
++} RESOURCE_TYPE;
++
++static u32 ui32BoundaryTagID = 0;
++
++u32 ValidateArena(RA_ARENA * pArena);
++#endif
++
++struct _BT_ {
++ enum bt_type {
++ btt_span,
++ btt_free,
++ btt_live
++ } type;
++
++ u32 base;
++ u32 uSize;
++
++ struct _BT_ *pNextSegment;
++ struct _BT_ *pPrevSegment;
++
++ struct _BT_ *pNextFree;
++ struct _BT_ *pPrevFree;
++
++ BM_MAPPING *psMapping;
++
++#if defined(VALIDATE_ARENA_TEST)
++ RESOURCE_DESCRIPTOR eResourceSpan;
++ RESOURCE_TYPE eResourceType;
++
++ u32 ui32BoundaryTagID;
++#endif
++
++};
++typedef struct _BT_ BT;
++
++struct _RA_ARENA_ {
++
++ char *name;
++
++ u32 uQuantum;
++
++ int (*pImportAlloc) (void *,
++ u32 uSize,
++ u32 * pActualSize,
++ BM_MAPPING ** ppsMapping, u32 uFlags, u32 * pBase);
++ void (*pImportFree) (void *, u32, BM_MAPPING * psMapping);
++ void (*pBackingStoreFree) (void *, u32, u32, void *);
++
++ void *pImportHandle;
++
++#define FREE_TABLE_LIMIT 32
++
++ BT *aHeadFree[FREE_TABLE_LIMIT];
++
++ BT *pHeadSegment;
++ BT *pTailSegment;
++
++ HASH_TABLE *pSegmentHash;
++
++#ifdef RA_STATS
++ RA_STATISTICS sStatistics;
++#endif
++
++#if defined(CONFIG_PROC_FS) && defined(DEBUG)
++#define PROC_NAME_SIZE 32
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++ struct proc_dir_entry *pProcInfo;
++ struct proc_dir_entry *pProcSegs;
++#else
++ char szProcInfoName[PROC_NAME_SIZE];
++ char szProcSegsName[PROC_NAME_SIZE];
++#endif
++
++ int bInitProcEntry;
++#endif
++};
++#if defined(ENABLE_RA_DUMP)
++void RA_Dump(RA_ARENA * pArena);
++#endif
++
++#if defined(CONFIG_PROC_FS) && defined(DEBUG)
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++
++static void RA_ProcSeqShowInfo(struct seq_file *sfile, void *el);
++static void *RA_ProcSeqOff2ElementInfo(struct seq_file *sfile, loff_t off);
++
++static void RA_ProcSeqShowRegs(struct seq_file *sfile, void *el);
++static void *RA_ProcSeqOff2ElementRegs(struct seq_file *sfile, loff_t off);
++
++#else
++static int
++RA_DumpSegs(char *page, char **start, off_t off, int count, int *eof,
++ void *data);
++static int RA_DumpInfo(char *page, char **start, off_t off, int count, int *eof,
++ void *data);
++#endif
++
++#endif
++
++#ifdef USE_BM_FREESPACE_CHECK
++void CheckBMFreespace(void);
++#endif
++
++#if defined(CONFIG_PROC_FS) && defined(DEBUG)
++static char *ReplaceSpaces(char *const pS)
++{
++ char *pT;
++
++ for (pT = pS; *pT != 0; pT++) {
++ if (*pT == ' ' || *pT == '\t') {
++ *pT = '_';
++ }
++ }
++
++ return pS;
++}
++#endif
++
++static int
++_RequestAllocFail(void *_h,
++ u32 _uSize,
++ u32 * _pActualSize,
++ BM_MAPPING ** _ppsMapping, u32 _uFlags, u32 * _pBase)
++{
++ return 0;
++}
++
++static u32 pvr_log2(u32 n)
++{
++ u32 l = 0;
++ n >>= 1;
++ while (n > 0) {
++ n >>= 1;
++ l++;
++ }
++ return l;
++}
++
++static PVRSRV_ERROR
++_SegmentListInsertAfter(RA_ARENA * pArena, BT * pInsertionPoint, BT * pBT)
++{
++ PVR_ASSERT(pArena != NULL);
++ PVR_ASSERT(pInsertionPoint != NULL);
++
++ if ((pInsertionPoint == NULL) || (pArena == NULL)) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "_SegmentListInsertAfter: invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ pBT->pNextSegment = pInsertionPoint->pNextSegment;
++ pBT->pPrevSegment = pInsertionPoint;
++ if (pInsertionPoint->pNextSegment == NULL)
++ pArena->pTailSegment = pBT;
++ else
++ pInsertionPoint->pNextSegment->pPrevSegment = pBT;
++ pInsertionPoint->pNextSegment = pBT;
++
++ return PVRSRV_OK;
++}
++
++static PVRSRV_ERROR _SegmentListInsert(RA_ARENA * pArena, BT * pBT)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if (pArena->pHeadSegment == NULL) {
++ pArena->pHeadSegment = pArena->pTailSegment = pBT;
++ pBT->pNextSegment = pBT->pPrevSegment = NULL;
++ } else {
++ BT *pBTScan;
++
++ if (pBT->base < pArena->pHeadSegment->base) {
++
++ pBT->pNextSegment = pArena->pHeadSegment;
++ pArena->pHeadSegment->pPrevSegment = pBT;
++ pArena->pHeadSegment = pBT;
++ pBT->pPrevSegment = NULL;
++ } else {
++
++ pBTScan = pArena->pHeadSegment;
++
++ while ((pBTScan->pNextSegment != NULL)
++ && (pBT->base >= pBTScan->pNextSegment->base)) {
++ pBTScan = pBTScan->pNextSegment;
++ }
++
++ eError = _SegmentListInsertAfter(pArena, pBTScan, pBT);
++ if (eError != PVRSRV_OK) {
++ return eError;
++ }
++ }
++ }
++ return eError;
++}
++
++static void _SegmentListRemove(RA_ARENA * pArena, BT * pBT)
++{
++ if (pBT->pPrevSegment == NULL)
++ pArena->pHeadSegment = pBT->pNextSegment;
++ else
++ pBT->pPrevSegment->pNextSegment = pBT->pNextSegment;
++
++ if (pBT->pNextSegment == NULL)
++ pArena->pTailSegment = pBT->pPrevSegment;
++ else
++ pBT->pNextSegment->pPrevSegment = pBT->pPrevSegment;
++}
++
++static BT *_SegmentSplit(RA_ARENA * pArena, BT * pBT, u32 uSize)
++{
++ BT *pNeighbour;
++
++ PVR_ASSERT(pArena != NULL);
++
++ if (pArena == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "_SegmentSplit: invalid parameter - pArena"));
++ return NULL;
++ }
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(BT),
++ (void **)&pNeighbour, NULL,
++ "Boundary Tag") != PVRSRV_OK) {
++ return NULL;
++ }
++
++ memset(pNeighbour, 0, sizeof(BT));
++
++#if defined(VALIDATE_ARENA_TEST)
++ pNeighbour->ui32BoundaryTagID = ++ui32BoundaryTagID;
++#endif
++
++ pNeighbour->pPrevSegment = pBT;
++ pNeighbour->pNextSegment = pBT->pNextSegment;
++ if (pBT->pNextSegment == NULL)
++ pArena->pTailSegment = pNeighbour;
++ else
++ pBT->pNextSegment->pPrevSegment = pNeighbour;
++ pBT->pNextSegment = pNeighbour;
++
++ pNeighbour->type = btt_free;
++ pNeighbour->uSize = pBT->uSize - uSize;
++ pNeighbour->base = pBT->base + uSize;
++ pNeighbour->psMapping = pBT->psMapping;
++ pBT->uSize = uSize;
++
++#if defined(VALIDATE_ARENA_TEST)
++ if (pNeighbour->pPrevSegment->eResourceType == IMPORTED_RESOURCE_TYPE) {
++ pNeighbour->eResourceType = IMPORTED_RESOURCE_TYPE;
++ pNeighbour->eResourceSpan = IMPORTED_RESOURCE_SPAN_FREE;
++ } else if (pNeighbour->pPrevSegment->eResourceType ==
++ NON_IMPORTED_RESOURCE_TYPE) {
++ pNeighbour->eResourceType = NON_IMPORTED_RESOURCE_TYPE;
++ pNeighbour->eResourceSpan = RESOURCE_SPAN_FREE;
++ } else {
++ PVR_DPF((PVR_DBG_ERROR,
++ "_SegmentSplit: pNeighbour->pPrevSegment->eResourceType unrecognized"));
++ PVR_DBG_BREAK;
++ }
++#endif
++
++ return pNeighbour;
++}
++
++static void _FreeListInsert(RA_ARENA * pArena, BT * pBT)
++{
++ u32 uIndex;
++ uIndex = pvr_log2(pBT->uSize);
++ pBT->type = btt_free;
++ pBT->pNextFree = pArena->aHeadFree[uIndex];
++ pBT->pPrevFree = NULL;
++ if (pArena->aHeadFree[uIndex] != NULL)
++ pArena->aHeadFree[uIndex]->pPrevFree = pBT;
++ pArena->aHeadFree[uIndex] = pBT;
++}
++
++static void _FreeListRemove(RA_ARENA * pArena, BT * pBT)
++{
++ u32 uIndex;
++ uIndex = pvr_log2(pBT->uSize);
++ if (pBT->pNextFree != NULL)
++ pBT->pNextFree->pPrevFree = pBT->pPrevFree;
++ if (pBT->pPrevFree == NULL)
++ pArena->aHeadFree[uIndex] = pBT->pNextFree;
++ else
++ pBT->pPrevFree->pNextFree = pBT->pNextFree;
++}
++
++static BT *_BuildSpanMarker(u32 base, u32 uSize)
++{
++ BT *pBT;
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(BT),
++ (void **)&pBT, NULL, "Boundary Tag") != PVRSRV_OK) {
++ return NULL;
++ }
++
++ memset(pBT, 0, sizeof(BT));
++
++#if defined(VALIDATE_ARENA_TEST)
++ pBT->ui32BoundaryTagID = ++ui32BoundaryTagID;
++#endif
++
++ pBT->type = btt_span;
++ pBT->base = base;
++ pBT->uSize = uSize;
++ pBT->psMapping = NULL;
++
++ return pBT;
++}
++
++static BT *_BuildBT(u32 base, u32 uSize)
++{
++ BT *pBT;
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(BT),
++ (void **)&pBT, NULL, "Boundary Tag") != PVRSRV_OK) {
++ return NULL;
++ }
++
++ memset(pBT, 0, sizeof(BT));
++
++#if defined(VALIDATE_ARENA_TEST)
++ pBT->ui32BoundaryTagID = ++ui32BoundaryTagID;
++#endif
++
++ pBT->type = btt_free;
++ pBT->base = base;
++ pBT->uSize = uSize;
++
++ return pBT;
++}
++
++static BT *_InsertResource(RA_ARENA * pArena, u32 base, u32 uSize)
++{
++ BT *pBT;
++ PVR_ASSERT(pArena != NULL);
++ if (pArena == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "_InsertResource: invalid parameter - pArena"));
++ return NULL;
++ }
++
++ pBT = _BuildBT(base, uSize);
++ if (pBT != NULL) {
++
++#if defined(VALIDATE_ARENA_TEST)
++ pBT->eResourceSpan = RESOURCE_SPAN_FREE;
++ pBT->eResourceType = NON_IMPORTED_RESOURCE_TYPE;
++#endif
++
++ if (_SegmentListInsert(pArena, pBT) != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "_InsertResource: call to _SegmentListInsert failed"));
++ return NULL;
++ }
++ _FreeListInsert(pArena, pBT);
++#ifdef RA_STATS
++ pArena->sStatistics.uTotalResourceCount += uSize;
++ pArena->sStatistics.uFreeResourceCount += uSize;
++ pArena->sStatistics.uSpanCount++;
++#endif
++ }
++ return pBT;
++}
++
++static BT *_InsertResourceSpan(RA_ARENA * pArena, u32 base, u32 uSize)
++{
++ PVRSRV_ERROR eError;
++ BT *pSpanStart;
++ BT *pSpanEnd;
++ BT *pBT;
++
++ PVR_ASSERT(pArena != NULL);
++ if (pArena == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "_InsertResourceSpan: invalid parameter - pArena"));
++ return NULL;
++ }
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "RA_InsertResourceSpan: arena='%s', base=0x%x, size=0x%x",
++ pArena->name, base, uSize));
++
++ pSpanStart = _BuildSpanMarker(base, uSize);
++ if (pSpanStart == NULL) {
++ goto fail_start;
++ }
++#if defined(VALIDATE_ARENA_TEST)
++ pSpanStart->eResourceSpan = IMPORTED_RESOURCE_SPAN_START;
++ pSpanStart->eResourceType = IMPORTED_RESOURCE_TYPE;
++#endif
++
++ pSpanEnd = _BuildSpanMarker(base + uSize, 0);
++ if (pSpanEnd == NULL) {
++ goto fail_end;
++ }
++#if defined(VALIDATE_ARENA_TEST)
++ pSpanEnd->eResourceSpan = IMPORTED_RESOURCE_SPAN_END;
++ pSpanEnd->eResourceType = IMPORTED_RESOURCE_TYPE;
++#endif
++
++ pBT = _BuildBT(base, uSize);
++ if (pBT == NULL) {
++ goto fail_bt;
++ }
++#if defined(VALIDATE_ARENA_TEST)
++ pBT->eResourceSpan = IMPORTED_RESOURCE_SPAN_FREE;
++ pBT->eResourceType = IMPORTED_RESOURCE_TYPE;
++#endif
++
++ eError = _SegmentListInsert(pArena, pSpanStart);
++ if (eError != PVRSRV_OK) {
++ goto fail_SegListInsert;
++ }
++
++ eError = _SegmentListInsertAfter(pArena, pSpanStart, pBT);
++ if (eError != PVRSRV_OK) {
++ goto fail_SegListInsert;
++ }
++
++ _FreeListInsert(pArena, pBT);
++
++ eError = _SegmentListInsertAfter(pArena, pBT, pSpanEnd);
++ if (eError != PVRSRV_OK) {
++ goto fail_SegListInsert;
++ }
++#ifdef RA_STATS
++ pArena->sStatistics.uTotalResourceCount += uSize;
++#endif
++ return pBT;
++
++fail_SegListInsert:
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pBT, NULL);
++
++fail_bt:
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pSpanEnd, NULL);
++
++fail_end:
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pSpanStart, NULL);
++
++fail_start:
++ return NULL;
++}
++
++static void _FreeBT(RA_ARENA * pArena, BT * pBT, int bFreeBackingStore)
++{
++ BT *pNeighbour;
++ u32 uOrigBase;
++ u32 uOrigSize;
++
++ PVR_ASSERT(pArena != NULL);
++ PVR_ASSERT(pBT != NULL);
++
++ if ((pArena == NULL) || (pBT == NULL)) {
++ PVR_DPF((PVR_DBG_ERROR, "_FreeBT: invalid parameter"));
++ return;
++ }
++#ifdef RA_STATS
++ pArena->sStatistics.uLiveSegmentCount--;
++ pArena->sStatistics.uFreeSegmentCount++;
++ pArena->sStatistics.uFreeResourceCount += pBT->uSize;
++#endif
++
++ uOrigBase = pBT->base;
++ uOrigSize = pBT->uSize;
++
++ pNeighbour = pBT->pPrevSegment;
++ if (pNeighbour != NULL
++ && pNeighbour->type == btt_free
++ && pNeighbour->base + pNeighbour->uSize == pBT->base) {
++ _FreeListRemove(pArena, pNeighbour);
++ _SegmentListRemove(pArena, pNeighbour);
++ pBT->base = pNeighbour->base;
++ pBT->uSize += pNeighbour->uSize;
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pNeighbour,
++ NULL);
++
++#ifdef RA_STATS
++ pArena->sStatistics.uFreeSegmentCount--;
++#endif
++ }
++
++ pNeighbour = pBT->pNextSegment;
++ if (pNeighbour != NULL
++ && pNeighbour->type == btt_free
++ && pBT->base + pBT->uSize == pNeighbour->base) {
++ _FreeListRemove(pArena, pNeighbour);
++ _SegmentListRemove(pArena, pNeighbour);
++ pBT->uSize += pNeighbour->uSize;
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pNeighbour,
++ NULL);
++
++#ifdef RA_STATS
++ pArena->sStatistics.uFreeSegmentCount--;
++#endif
++ }
++
++ if (pArena->pBackingStoreFree != NULL && bFreeBackingStore) {
++ u32 uRoundedStart, uRoundedEnd;
++
++ uRoundedStart =
++ (uOrigBase / pArena->uQuantum) * pArena->uQuantum;
++
++ if (uRoundedStart < pBT->base) {
++ uRoundedStart += pArena->uQuantum;
++ }
++
++ uRoundedEnd =
++ ((uOrigBase + uOrigSize + pArena->uQuantum -
++ 1) / pArena->uQuantum) * pArena->uQuantum;
++
++ if (uRoundedEnd > (pBT->base + pBT->uSize)) {
++ uRoundedEnd -= pArena->uQuantum;
++ }
++
++ if (uRoundedStart < uRoundedEnd) {
++ pArena->pBackingStoreFree(pArena->pImportHandle,
++ uRoundedStart, uRoundedEnd,
++ (void *)0);
++ }
++ }
++
++ if (pBT->pNextSegment != NULL && pBT->pNextSegment->type == btt_span
++ && pBT->pPrevSegment != NULL && pBT->pPrevSegment->type == btt_span)
++ {
++ BT *next = pBT->pNextSegment;
++ BT *prev = pBT->pPrevSegment;
++ _SegmentListRemove(pArena, next);
++ _SegmentListRemove(pArena, prev);
++ _SegmentListRemove(pArena, pBT);
++ pArena->pImportFree(pArena->pImportHandle, pBT->base,
++ pBT->psMapping);
++#ifdef RA_STATS
++ pArena->sStatistics.uSpanCount--;
++ pArena->sStatistics.uExportCount++;
++ pArena->sStatistics.uFreeSegmentCount--;
++ pArena->sStatistics.uFreeResourceCount -= pBT->uSize;
++ pArena->sStatistics.uTotalResourceCount -= pBT->uSize;
++#endif
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), next, NULL);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), prev, NULL);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pBT, NULL);
++
++ } else
++ _FreeListInsert(pArena, pBT);
++}
++
++static int
++_AttemptAllocAligned(RA_ARENA * pArena,
++ u32 uSize,
++ BM_MAPPING ** ppsMapping,
++ u32 uFlags,
++ u32 uAlignment, u32 uAlignmentOffset, u32 * base)
++{
++ u32 uIndex;
++ PVR_ASSERT(pArena != NULL);
++ if (pArena == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "_AttemptAllocAligned: invalid parameter - pArena"));
++ return 0;
++ }
++
++ if (uAlignment > 1)
++ uAlignmentOffset %= uAlignment;
++
++ uIndex = pvr_log2(uSize);
++
++#if 0
++
++ if (1u << uIndex < uSize)
++ uIndex++;
++#endif
++
++ while (uIndex < FREE_TABLE_LIMIT && pArena->aHeadFree[uIndex] == NULL)
++ uIndex++;
++
++ while (uIndex < FREE_TABLE_LIMIT) {
++ if (pArena->aHeadFree[uIndex] != NULL) {
++
++ BT *pBT;
++
++ pBT = pArena->aHeadFree[uIndex];
++ while (pBT != NULL) {
++ u32 aligned_base;
++
++ if (uAlignment > 1)
++ aligned_base =
++ (pBT->base + uAlignmentOffset +
++ uAlignment -
++ 1) / uAlignment * uAlignment -
++ uAlignmentOffset;
++ else
++ aligned_base = pBT->base;
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "RA_AttemptAllocAligned: pBT-base=0x%x "
++ "pBT-size=0x%x alignedbase=0x%x size=0x%x",
++ pBT->base, pBT->uSize, aligned_base,
++ uSize));
++
++ if (pBT->base + pBT->uSize >=
++ aligned_base + uSize) {
++ if (!pBT->psMapping
++ || pBT->psMapping->ui32Flags ==
++ uFlags) {
++ _FreeListRemove(pArena, pBT);
++
++ PVR_ASSERT(pBT->type ==
++ btt_free);
++
++#ifdef RA_STATS
++ pArena->sStatistics.
++ uLiveSegmentCount++;
++ pArena->sStatistics.
++ uFreeSegmentCount--;
++ pArena->sStatistics.
++ uFreeResourceCount -=
++ pBT->uSize;
++#endif
++
++ if (aligned_base > pBT->base) {
++ BT *pNeighbour;
++
++ pNeighbour =
++ _SegmentSplit
++ (pArena, pBT,
++ aligned_base -
++ pBT->base);
++
++ if (pNeighbour == NULL) {
++ PVR_DPF((PVR_DBG_ERROR, "_AttemptAllocAligned: Front split failed"));
++
++ _FreeListInsert
++ (pArena,
++ pBT);
++ return 0;
++ }
++
++ _FreeListInsert(pArena,
++ pBT);
++#ifdef RA_STATS
++ pArena->sStatistics.
++ uFreeSegmentCount++;
++ pArena->sStatistics.
++ uFreeResourceCount
++ += pBT->uSize;
++#endif
++ pBT = pNeighbour;
++ }
++
++ if (pBT->uSize > uSize) {
++ BT *pNeighbour;
++ pNeighbour =
++ _SegmentSplit
++ (pArena, pBT,
++ uSize);
++
++ if (pNeighbour == NULL) {
++ PVR_DPF((PVR_DBG_ERROR, "_AttemptAllocAligned: Back split failed"));
++
++ _FreeListInsert
++ (pArena,
++ pBT);
++ return 0;
++ }
++
++ _FreeListInsert(pArena,
++ pNeighbour);
++#ifdef RA_STATS
++ pArena->sStatistics.
++ uFreeSegmentCount++;
++ pArena->sStatistics.
++ uFreeResourceCount
++ +=
++ pNeighbour->uSize;
++#endif
++ }
++
++ pBT->type = btt_live;
++
++#if defined(VALIDATE_ARENA_TEST)
++ if (pBT->eResourceType ==
++ IMPORTED_RESOURCE_TYPE) {
++ pBT->eResourceSpan =
++ IMPORTED_RESOURCE_SPAN_LIVE;
++ } else if (pBT->eResourceType ==
++ NON_IMPORTED_RESOURCE_TYPE)
++ {
++ pBT->eResourceSpan =
++ RESOURCE_SPAN_LIVE;
++ } else {
++ PVR_DPF((PVR_DBG_ERROR,
++ "_AttemptAllocAligned ERROR: pBT->eResourceType unrecognized"));
++ PVR_DBG_BREAK;
++ }
++#endif
++ if (!HASH_Insert
++ (pArena->pSegmentHash,
++ pBT->base, (u32) pBT)) {
++ _FreeBT(pArena, pBT, 0);
++ return 0;
++ }
++
++ if (ppsMapping != NULL)
++ *ppsMapping =
++ pBT->psMapping;
++
++ *base = pBT->base;
++
++ return 1;
++ } else {
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "AttemptAllocAligned: mismatch in flags. Import has %x, request was %x",
++ pBT->psMapping->
++ ui32Flags, uFlags));
++
++ }
++ }
++ pBT = pBT->pNextFree;
++ }
++
++ }
++ uIndex++;
++ }
++
++ return 0;
++}
++
++RA_ARENA *RA_Create(char *name,
++ u32 base,
++ u32 uSize,
++ BM_MAPPING * psMapping,
++ u32 uQuantum,
++ int (*imp_alloc) (void *, u32 uSize, u32 * pActualSize,
++ BM_MAPPING ** ppsMapping, u32 _flags,
++ u32 * pBase), void (*imp_free) (void *,
++ u32,
++ BM_MAPPING
++ *),
++ void (*backingstore_free) (void *, u32, u32, void *),
++ void *pImportHandle)
++{
++ RA_ARENA *pArena;
++ BT *pBT;
++ int i;
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "RA_Create: name='%s', base=0x%x, uSize=0x%x, alloc=0x%x, free=0x%x",
++ name, base, uSize, imp_alloc, imp_free));
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(*pArena),
++ (void **)&pArena, NULL, "Resource Arena") != PVRSRV_OK) {
++ goto arena_fail;
++ }
++
++ pArena->name = name;
++ pArena->pImportAlloc =
++ (imp_alloc != NULL) ? imp_alloc : _RequestAllocFail;
++ pArena->pImportFree = imp_free;
++ pArena->pBackingStoreFree = backingstore_free;
++ pArena->pImportHandle = pImportHandle;
++ for (i = 0; i < FREE_TABLE_LIMIT; i++)
++ pArena->aHeadFree[i] = NULL;
++ pArena->pHeadSegment = NULL;
++ pArena->pTailSegment = NULL;
++ pArena->uQuantum = uQuantum;
++
++#ifdef RA_STATS
++ pArena->sStatistics.uSpanCount = 0;
++ pArena->sStatistics.uLiveSegmentCount = 0;
++ pArena->sStatistics.uFreeSegmentCount = 0;
++ pArena->sStatistics.uFreeResourceCount = 0;
++ pArena->sStatistics.uTotalResourceCount = 0;
++ pArena->sStatistics.uCumulativeAllocs = 0;
++ pArena->sStatistics.uCumulativeFrees = 0;
++ pArena->sStatistics.uImportCount = 0;
++ pArena->sStatistics.uExportCount = 0;
++#endif
++
++#if defined(CONFIG_PROC_FS) && defined(DEBUG)
++ if (strcmp(pArena->name, "") != 0) {
++
++#ifndef PVR_PROC_USE_SEQ_FILE
++ int ret;
++ int (*pfnCreateProcEntry) (const char *, read_proc_t,
++ write_proc_t, void *);
++
++ pArena->bInitProcEntry =
++ !PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_SUCCESSFUL);
++
++ pfnCreateProcEntry =
++ pArena->
++ bInitProcEntry ? CreateProcEntry :
++ CreatePerProcessProcEntry;
++
++ ret =
++ snprintf(pArena->szProcInfoName,
++ sizeof(pArena->szProcInfoName), "ra_info_%s",
++ pArena->name);
++ if (ret > 0 && ret < sizeof(pArena->szProcInfoName)) {
++ (void)
++ pfnCreateProcEntry(ReplaceSpaces
++ (pArena->szProcInfoName),
++ RA_DumpInfo, 0, pArena);
++ } else {
++ pArena->szProcInfoName[0] = 0;
++ PVR_DPF((PVR_DBG_ERROR,
++ "RA_Create: couldn't create ra_info proc entry for arena %s",
++ pArena->name));
++ }
++
++ ret =
++ snprintf(pArena->szProcSegsName,
++ sizeof(pArena->szProcSegsName), "ra_segs_%s",
++ pArena->name);
++ if (ret > 0 && ret < sizeof(pArena->szProcSegsName)) {
++ (void)
++ pfnCreateProcEntry(ReplaceSpaces
++ (pArena->szProcSegsName),
++ RA_DumpSegs, 0, pArena);
++ } else {
++ pArena->szProcSegsName[0] = 0;
++ PVR_DPF((PVR_DBG_ERROR,
++ "RA_Create: couldn't create ra_segs proc entry for arena %s",
++ pArena->name));
++ }
++#else
++
++ int ret;
++ char szProcInfoName[PROC_NAME_SIZE];
++ char szProcSegsName[PROC_NAME_SIZE];
++ struct proc_dir_entry *(*pfnCreateProcEntrySeq) (const char *,
++ void *,
++ pvr_next_proc_seq_t,
++ pvr_show_proc_seq_t,
++ pvr_off2element_proc_seq_t,
++ pvr_startstop_proc_seq_t,
++ write_proc_t);
++
++ pArena->bInitProcEntry =
++ !PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_SUCCESSFUL);
++
++ pfnCreateProcEntrySeq =
++ pArena->
++ bInitProcEntry ? CreateProcEntrySeq :
++ CreatePerProcessProcEntrySeq;
++
++ ret =
++ snprintf(szProcInfoName, sizeof(szProcInfoName),
++ "ra_info_%s", pArena->name);
++ if (ret > 0 && ret < sizeof(szProcInfoName)) {
++ pArena->pProcInfo =
++ pfnCreateProcEntrySeq(ReplaceSpaces(szProcInfoName),
++ pArena, NULL,
++ RA_ProcSeqShowInfo,
++ RA_ProcSeqOff2ElementInfo,
++ NULL, NULL);
++ } else {
++ pArena->pProcInfo = 0;
++ PVR_DPF((PVR_DBG_ERROR,
++ "RA_Create: couldn't create ra_info proc entry for arena %s",
++ pArena->name));
++ }
++
++ ret =
++ snprintf(szProcSegsName, sizeof(szProcSegsName),
++ "ra_segs_%s", pArena->name);
++ if (ret > 0 && ret < sizeof(szProcInfoName)) {
++ pArena->pProcSegs =
++ pfnCreateProcEntrySeq(ReplaceSpaces(szProcSegsName),
++ pArena, NULL,
++ RA_ProcSeqShowRegs,
++ RA_ProcSeqOff2ElementRegs,
++ NULL, NULL);
++ } else {
++ pArena->pProcSegs = 0;
++ PVR_DPF((PVR_DBG_ERROR,
++ "RA_Create: couldn't create ra_segs proc entry for arena %s",
++ pArena->name));
++ }
++
++#endif
++
++ }
++#endif
++
++ pArena->pSegmentHash = HASH_Create(MINIMUM_HASH_SIZE);
++ if (pArena->pSegmentHash == NULL) {
++ goto hash_fail;
++ }
++ if (uSize > 0) {
++ uSize = (uSize + uQuantum - 1) / uQuantum * uQuantum;
++ pBT = _InsertResource(pArena, base, uSize);
++ if (pBT == NULL) {
++ goto insert_fail;
++ }
++ pBT->psMapping = psMapping;
++
++ }
++ return pArena;
++
++insert_fail:
++ HASH_Delete(pArena->pSegmentHash);
++hash_fail:
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(RA_ARENA), pArena, NULL);
++
++arena_fail:
++ return NULL;
++}
++
++void RA_Delete(RA_ARENA * pArena)
++{
++ u32 uIndex;
++
++ PVR_ASSERT(pArena != NULL);
++
++ if (pArena == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "RA_Delete: invalid parameter - pArena"));
++ return;
++ }
++
++ PVR_DPF((PVR_DBG_MESSAGE, "RA_Delete: name='%s'", pArena->name));
++
++ for (uIndex = 0; uIndex < FREE_TABLE_LIMIT; uIndex++)
++ pArena->aHeadFree[uIndex] = NULL;
++
++ while (pArena->pHeadSegment != NULL) {
++ BT *pBT = pArena->pHeadSegment;
++
++ if (pBT->type != btt_free) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "RA_Delete: allocations still exist in the arena that is being destroyed"));
++ PVR_DPF((PVR_DBG_ERROR,
++ "Likely Cause: client drivers not freeing alocations before destroying devmemcontext"));
++ PVR_DPF((PVR_DBG_ERROR,
++ "RA_Delete: base = 0x%x size=0x%x", pBT->base,
++ pBT->uSize));
++ }
++
++ _SegmentListRemove(pArena, pBT);
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pBT, NULL);
++
++#ifdef RA_STATS
++ pArena->sStatistics.uSpanCount--;
++#endif
++ }
++#if defined(CONFIG_PROC_FS) && defined(DEBUG)
++ {
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++ void (*pfnRemoveProcEntrySeq) (struct proc_dir_entry *);
++
++ pfnRemoveProcEntrySeq =
++ pArena->
++ bInitProcEntry ? RemoveProcEntrySeq :
++ RemovePerProcessProcEntrySeq;
++
++ if (pArena->pProcInfo != 0) {
++ pfnRemoveProcEntrySeq(pArena->pProcInfo);
++ }
++
++ if (pArena->pProcSegs != 0) {
++ pfnRemoveProcEntrySeq(pArena->pProcSegs);
++ }
++#else
++ void (*pfnRemoveProcEntry) (const char *);
++
++ pfnRemoveProcEntry =
++ pArena->
++ bInitProcEntry ? RemoveProcEntry :
++ RemovePerProcessProcEntry;
++
++ if (pArena->szProcInfoName[0] != 0) {
++ pfnRemoveProcEntry(pArena->szProcInfoName);
++ }
++
++ if (pArena->szProcSegsName[0] != 0) {
++ pfnRemoveProcEntry(pArena->szProcSegsName);
++ }
++#endif
++ }
++#endif
++ HASH_Delete(pArena->pSegmentHash);
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(RA_ARENA), pArena, NULL);
++
++}
++
++int RA_TestDelete(RA_ARENA * pArena)
++{
++ PVR_ASSERT(pArena != NULL);
++
++ if (pArena != NULL) {
++ while (pArena->pHeadSegment != NULL) {
++ BT *pBT = pArena->pHeadSegment;
++ if (pBT->type != btt_free) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "RA_TestDelete: detected resource leak!"));
++ PVR_DPF((PVR_DBG_ERROR,
++ "RA_TestDelete: base = 0x%x size=0x%x",
++ pBT->base, pBT->uSize));
++ return 0;
++ }
++ }
++ }
++
++ return 1;
++}
++
++int RA_Add(RA_ARENA * pArena, u32 base, u32 uSize)
++{
++ PVR_ASSERT(pArena != NULL);
++
++ if (pArena == NULL) {
++ PVR_DPF((PVR_DBG_ERROR, "RA_Add: invalid parameter - pArena"));
++ return 0;
++ }
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "RA_Add: name='%s', base=0x%x, size=0x%x", pArena->name, base,
++ uSize));
++
++ uSize =
++ (uSize + pArena->uQuantum -
++ 1) / pArena->uQuantum * pArena->uQuantum;
++ return ((int)(_InsertResource(pArena, base, uSize) != NULL));
++}
++
++int
++RA_Alloc(RA_ARENA * pArena,
++ u32 uRequestSize,
++ u32 * pActualSize,
++ BM_MAPPING ** ppsMapping,
++ u32 uFlags, u32 uAlignment, u32 uAlignmentOffset, u32 * base)
++{
++ int bResult;
++ u32 uSize = uRequestSize;
++
++ PVR_ASSERT(pArena != NULL);
++
++ if (pArena == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "RA_Alloc: invalid parameter - pArena"));
++ return 0;
++ }
++#if defined(VALIDATE_ARENA_TEST)
++ ValidateArena(pArena);
++#endif
++
++#ifdef USE_BM_FREESPACE_CHECK
++ CheckBMFreespace();
++#endif
++
++ if (pActualSize != NULL) {
++ *pActualSize = uSize;
++ }
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "RA_Alloc: arena='%s', size=0x%x(0x%x), alignment=0x%x, offset=0x%x",
++ pArena->name, uSize, uRequestSize, uAlignment,
++ uAlignmentOffset));
++
++ bResult = _AttemptAllocAligned(pArena, uSize, ppsMapping, uFlags,
++ uAlignment, uAlignmentOffset, base);
++ if (!bResult) {
++ BM_MAPPING *psImportMapping;
++ u32 import_base;
++ u32 uImportSize = uSize;
++
++ if (uAlignment > pArena->uQuantum) {
++ uImportSize += (uAlignment - 1);
++ }
++
++ uImportSize =
++ ((uImportSize + pArena->uQuantum -
++ 1) / pArena->uQuantum) * pArena->uQuantum;
++
++ bResult =
++ pArena->pImportAlloc(pArena->pImportHandle, uImportSize,
++ &uImportSize, &psImportMapping, uFlags,
++ &import_base);
++ if (bResult) {
++ BT *pBT;
++ pBT =
++ _InsertResourceSpan(pArena, import_base,
++ uImportSize);
++
++ if (pBT == NULL) {
++
++ pArena->pImportFree(pArena->pImportHandle,
++ import_base,
++ psImportMapping);
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "RA_Alloc: name='%s', size=0x%x failed!",
++ pArena->name, uSize));
++
++ return 0;
++ }
++ pBT->psMapping = psImportMapping;
++#ifdef RA_STATS
++ pArena->sStatistics.uFreeSegmentCount++;
++ pArena->sStatistics.uFreeResourceCount += uImportSize;
++ pArena->sStatistics.uImportCount++;
++ pArena->sStatistics.uSpanCount++;
++#endif
++ bResult =
++ _AttemptAllocAligned(pArena, uSize, ppsMapping,
++ uFlags, uAlignment,
++ uAlignmentOffset, base);
++ if (!bResult) {
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "RA_Alloc: name='%s' uAlignment failed!",
++ pArena->name));
++ }
++ }
++ }
++#ifdef RA_STATS
++ if (bResult)
++ pArena->sStatistics.uCumulativeAllocs++;
++#endif
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "RA_Alloc: name='%s', size=0x%x, *base=0x%x = %d",
++ pArena->name, uSize, *base, bResult));
++
++#if defined(VALIDATE_ARENA_TEST)
++ ValidateArena(pArena);
++#endif
++
++ return bResult;
++}
++
++#if defined(VALIDATE_ARENA_TEST)
++
++u32 ValidateArena(RA_ARENA * pArena)
++{
++ BT *pSegment;
++ RESOURCE_DESCRIPTOR eNextSpan;
++
++ pSegment = pArena->pHeadSegment;
++
++ if (pSegment == NULL) {
++ return 0;
++ }
++
++ if (pSegment->eResourceType == IMPORTED_RESOURCE_TYPE) {
++ PVR_ASSERT(pSegment->eResourceSpan ==
++ IMPORTED_RESOURCE_SPAN_START);
++
++ while (pSegment->pNextSegment) {
++ eNextSpan = pSegment->pNextSegment->eResourceSpan;
++
++ switch (pSegment->eResourceSpan) {
++ case IMPORTED_RESOURCE_SPAN_LIVE:
++
++ if (!
++ ((eNextSpan == IMPORTED_RESOURCE_SPAN_LIVE)
++ || (eNextSpan ==
++ IMPORTED_RESOURCE_SPAN_FREE)
++ || (eNextSpan ==
++ IMPORTED_RESOURCE_SPAN_END))) {
++
++ PVR_DPF((PVR_DBG_ERROR,
++ "ValidateArena ERROR: adjacent boundary tags %d (base=0x%x) and %d (base=0x%x) are incompatible (arena: %s)",
++ pSegment->ui32BoundaryTagID,
++ pSegment->base,
++ pSegment->pNextSegment->
++ ui32BoundaryTagID,
++ pSegment->pNextSegment->base,
++ pArena->name));
++
++ PVR_DBG_BREAK;
++ }
++ break;
++
++ case IMPORTED_RESOURCE_SPAN_FREE:
++
++ if (!
++ ((eNextSpan == IMPORTED_RESOURCE_SPAN_LIVE)
++ || (eNextSpan ==
++ IMPORTED_RESOURCE_SPAN_END))) {
++
++ PVR_DPF((PVR_DBG_ERROR,
++ "ValidateArena ERROR: adjacent boundary tags %d (base=0x%x) and %d (base=0x%x) are incompatible (arena: %s)",
++ pSegment->ui32BoundaryTagID,
++ pSegment->base,
++ pSegment->pNextSegment->
++ ui32BoundaryTagID,
++ pSegment->pNextSegment->base,
++ pArena->name));
++
++ PVR_DBG_BREAK;
++ }
++ break;
++
++ case IMPORTED_RESOURCE_SPAN_END:
++
++ if ((eNextSpan == IMPORTED_RESOURCE_SPAN_LIVE)
++ || (eNextSpan ==
++ IMPORTED_RESOURCE_SPAN_FREE)
++ || (eNextSpan ==
++ IMPORTED_RESOURCE_SPAN_END)) {
++
++ PVR_DPF((PVR_DBG_ERROR,
++ "ValidateArena ERROR: adjacent boundary tags %d (base=0x%x) and %d (base=0x%x) are incompatible (arena: %s)",
++ pSegment->ui32BoundaryTagID,
++ pSegment->base,
++ pSegment->pNextSegment->
++ ui32BoundaryTagID,
++ pSegment->pNextSegment->base,
++ pArena->name));
++
++ PVR_DBG_BREAK;
++ }
++ break;
++
++ case IMPORTED_RESOURCE_SPAN_START:
++
++ if (!
++ ((eNextSpan == IMPORTED_RESOURCE_SPAN_LIVE)
++ || (eNextSpan ==
++ IMPORTED_RESOURCE_SPAN_FREE))) {
++
++ PVR_DPF((PVR_DBG_ERROR,
++ "ValidateArena ERROR: adjacent boundary tags %d (base=0x%x) and %d (base=0x%x) are incompatible (arena: %s)",
++ pSegment->ui32BoundaryTagID,
++ pSegment->base,
++ pSegment->pNextSegment->
++ ui32BoundaryTagID,
++ pSegment->pNextSegment->base,
++ pArena->name));
++
++ PVR_DBG_BREAK;
++ }
++ break;
++
++ default:
++ PVR_DPF((PVR_DBG_ERROR,
++ "ValidateArena ERROR: adjacent boundary tags %d (base=0x%x) and %d (base=0x%x) are incompatible (arena: %s)",
++ pSegment->ui32BoundaryTagID,
++ pSegment->base,
++ pSegment->pNextSegment->
++ ui32BoundaryTagID,
++ pSegment->pNextSegment->base,
++ pArena->name));
++
++ PVR_DBG_BREAK;
++ break;
++ }
++ pSegment = pSegment->pNextSegment;
++ }
++ } else if (pSegment->eResourceType == NON_IMPORTED_RESOURCE_TYPE) {
++ PVR_ASSERT((pSegment->eResourceSpan == RESOURCE_SPAN_FREE)
++ || (pSegment->eResourceSpan == RESOURCE_SPAN_LIVE));
++
++ while (pSegment->pNextSegment) {
++ eNextSpan = pSegment->pNextSegment->eResourceSpan;
++
++ switch (pSegment->eResourceSpan) {
++ case RESOURCE_SPAN_LIVE:
++
++ if (!((eNextSpan == RESOURCE_SPAN_FREE) ||
++ (eNextSpan == RESOURCE_SPAN_LIVE))) {
++
++ PVR_DPF((PVR_DBG_ERROR,
++ "ValidateArena ERROR: adjacent boundary tags %d (base=0x%x) and %d (base=0x%x) are incompatible (arena: %s)",
++ pSegment->ui32BoundaryTagID,
++ pSegment->base,
++ pSegment->pNextSegment->
++ ui32BoundaryTagID,
++ pSegment->pNextSegment->base,
++ pArena->name));
++
++ PVR_DBG_BREAK;
++ }
++ break;
++
++ case RESOURCE_SPAN_FREE:
++
++ if (!((eNextSpan == RESOURCE_SPAN_FREE) ||
++ (eNextSpan == RESOURCE_SPAN_LIVE))) {
++
++ PVR_DPF((PVR_DBG_ERROR,
++ "ValidateArena ERROR: adjacent boundary tags %d (base=0x%x) and %d (base=0x%x) are incompatible (arena: %s)",
++ pSegment->ui32BoundaryTagID,
++ pSegment->base,
++ pSegment->pNextSegment->
++ ui32BoundaryTagID,
++ pSegment->pNextSegment->base,
++ pArena->name));
++
++ PVR_DBG_BREAK;
++ }
++ break;
++
++ default:
++ PVR_DPF((PVR_DBG_ERROR,
++ "ValidateArena ERROR: adjacent boundary tags %d (base=0x%x) and %d (base=0x%x) are incompatible (arena: %s)",
++ pSegment->ui32BoundaryTagID,
++ pSegment->base,
++ pSegment->pNextSegment->
++ ui32BoundaryTagID,
++ pSegment->pNextSegment->base,
++ pArena->name));
++
++ PVR_DBG_BREAK;
++ break;
++ }
++ pSegment = pSegment->pNextSegment;
++ }
++
++ } else {
++ PVR_DPF((PVR_DBG_ERROR,
++ "ValidateArena ERROR: pSegment->eResourceType unrecognized"));
++
++ PVR_DBG_BREAK;
++ }
++
++ return 0;
++}
++
++#endif
++
++void RA_Free(RA_ARENA * pArena, u32 base, int bFreeBackingStore)
++{
++ BT *pBT;
++
++ PVR_ASSERT(pArena != NULL);
++
++ if (pArena == NULL) {
++ PVR_DPF((PVR_DBG_ERROR, "RA_Free: invalid parameter - pArena"));
++ return;
++ }
++#ifdef USE_BM_FREESPACE_CHECK
++ CheckBMFreespace();
++#endif
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "RA_Free: name='%s', base=0x%x", pArena->name, base));
++
++ pBT = (BT *) HASH_Remove(pArena->pSegmentHash, base);
++ PVR_ASSERT(pBT != NULL);
++
++ if (pBT) {
++ PVR_ASSERT(pBT->base == base);
++
++#ifdef RA_STATS
++ pArena->sStatistics.uCumulativeFrees++;
++#endif
++
++#ifdef USE_BM_FREESPACE_CHECK
++ {
++ unsigned char *p;
++ unsigned char *endp;
++
++ p = (unsigned char *) pBT->base + SysGetDevicePhysOffset();
++ endp = (unsigned char *) ((u32) (p + pBT->uSize));
++ while ((u32) p & 3) {
++ *p++ = 0xAA;
++ }
++ while (p < (unsigned char *) ((u32) endp & 0xfffffffc)) {
++ *(u32 *) p = 0xAAAAAAAA;
++ p += sizeof(u32);
++ }
++ while (p < endp) {
++ *p++ = 0xAA;
++ }
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "BM_FREESPACE_CHECK: RA_Free Cleared %08X to %08X (size=0x%x)",
++ (unsigned char *) pBT->base +
++ SysGetDevicePhysOffset(), endp - 1,
++ pBT->uSize));
++ }
++#endif
++ _FreeBT(pArena, pBT, bFreeBackingStore);
++ }
++}
++
++int RA_GetNextLiveSegment(void *hArena, RA_SEGMENT_DETAILS * psSegDetails)
++{
++ BT *pBT;
++
++ if (psSegDetails->hSegment) {
++ pBT = (BT *) psSegDetails->hSegment;
++ } else {
++ RA_ARENA *pArena = (RA_ARENA *) hArena;
++
++ pBT = pArena->pHeadSegment;
++ }
++
++ while (pBT != NULL) {
++ if (pBT->type == btt_live) {
++ psSegDetails->uiSize = pBT->uSize;
++ psSegDetails->sCpuPhyAddr.uiAddr = pBT->base;
++ psSegDetails->hSegment = (void *)pBT->pNextSegment;
++
++ return 1;
++ }
++
++ pBT = pBT->pNextSegment;
++ }
++
++ psSegDetails->uiSize = 0;
++ psSegDetails->sCpuPhyAddr.uiAddr = 0;
++ psSegDetails->hSegment = (void *)-1;
++
++ return 0;
++}
++
++#ifdef USE_BM_FREESPACE_CHECK
++RA_ARENA *pJFSavedArena = NULL;
++
++void CheckBMFreespace(void)
++{
++ BT *pBT;
++ unsigned char *p;
++ unsigned char *endp;
++
++ if (pJFSavedArena != NULL) {
++ for (pBT = pJFSavedArena->pHeadSegment; pBT != NULL;
++ pBT = pBT->pNextSegment) {
++ if (pBT->type == btt_free) {
++ p = (unsigned char *) pBT->base +
++ SysGetDevicePhysOffset();
++ endp =
++ (unsigned char *) ((u32) (p + pBT->uSize) &
++ 0xfffffffc);
++
++ while ((u32) p & 3) {
++ if (*p++ != 0xAA) {
++ fprintf(stderr,
++ "BM_FREESPACE_CHECK: Blank space at %08X has changed to 0x%x\n",
++ p, *(u32 *) p);
++ for (;;) ;
++ break;
++ }
++ }
++ while (p < endp) {
++ if (*(u32 *) p != 0xAAAAAAAA) {
++ fprintf(stderr,
++ "BM_FREESPACE_CHECK: Blank space at %08X has changed to 0x%x\n",
++ p, *(u32 *) p);
++ for (;;) ;
++ break;
++ }
++ p += 4;
++ }
++ }
++ }
++ }
++}
++#endif
++
++#if (defined(CONFIG_PROC_FS) && defined(DEBUG)) || defined (RA_STATS)
++static char *_BTType(int eType)
++{
++ switch (eType) {
++ case btt_span:
++ return "span";
++ case btt_free:
++ return "free";
++ case btt_live:
++ return "live";
++ }
++ return "junk";
++}
++#endif
++
++#if defined(ENABLE_RA_DUMP)
++void RA_Dump(RA_ARENA * pArena)
++{
++ BT *pBT;
++ PVR_ASSERT(pArena != NULL);
++ PVR_DPF((PVR_DBG_MESSAGE, "Arena '%s':", pArena->name));
++ PVR_DPF((PVR_DBG_MESSAGE,
++ " alloc=%08X free=%08X handle=%08X quantum=%d",
++ pArena->pImportAlloc, pArena->pImportFree,
++ pArena->pImportHandle, pArena->uQuantum));
++ PVR_DPF((PVR_DBG_MESSAGE, " segment Chain:"));
++ if (pArena->pHeadSegment != NULL &&
++ pArena->pHeadSegment->pPrevSegment != NULL)
++ PVR_DPF((PVR_DBG_MESSAGE,
++ " error: head boundary tag has invalid pPrevSegment"));
++ if (pArena->pTailSegment != NULL
++ && pArena->pTailSegment->pNextSegment != NULL)
++ PVR_DPF((PVR_DBG_MESSAGE,
++ " error: tail boundary tag has invalid pNextSegment"));
++
++ for (pBT = pArena->pHeadSegment; pBT != NULL; pBT = pBT->pNextSegment) {
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "\tbase=0x%x size=0x%x type=%s ref=%08X",
++ (u32) pBT->base, pBT->uSize, _BTType(pBT->type),
++ pBT->pRef));
++ }
++
++#ifdef HASH_TRACE
++ HASH_Dump(pArena->pSegmentHash);
++#endif
++}
++#endif
++
++#if defined(CONFIG_PROC_FS) && defined(DEBUG)
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++
++static void RA_ProcSeqShowInfo(struct seq_file *sfile, void *el)
++{
++ PVR_PROC_SEQ_HANDLERS *handlers =
++ (PVR_PROC_SEQ_HANDLERS *) sfile->private;
++ RA_ARENA *pArena = (RA_ARENA *) handlers->data;
++ int off = (int)el;
++
++ switch (off) {
++ case 1:
++ seq_printf(sfile, "quantum\t\t\t%lu\n", pArena->uQuantum);
++ break;
++ case 2:
++ seq_printf(sfile, "import_handle\t\t%08X\n",
++ (u32) pArena->pImportHandle);
++ break;
++#ifdef RA_STATS
++ case 3:
++ seq_printf(sfile, "span count\t\t%lu\n",
++ pArena->sStatistics.uSpanCount);
++ break;
++ case 4:
++ seq_printf(sfile, "live segment count\t%lu\n",
++ pArena->sStatistics.uLiveSegmentCount);
++ break;
++ case 5:
++ seq_printf(sfile, "free segment count\t%lu\n",
++ pArena->sStatistics.uFreeSegmentCount);
++ break;
++ case 6:
++ seq_printf(sfile, "free resource count\t%lu (0x%x)\n",
++ pArena->sStatistics.uFreeResourceCount,
++ (u32) pArena->sStatistics.uFreeResourceCount);
++ break;
++ case 7:
++ seq_printf(sfile, "total allocs\t\t%lu\n",
++ pArena->sStatistics.uCumulativeAllocs);
++ break;
++ case 8:
++ seq_printf(sfile, "total frees\t\t%lu\n",
++ pArena->sStatistics.uCumulativeFrees);
++ break;
++ case 9:
++ seq_printf(sfile, "import count\t\t%lu\n",
++ pArena->sStatistics.uImportCount);
++ break;
++ case 10:
++ seq_printf(sfile, "export count\t\t%lu\n",
++ pArena->sStatistics.uExportCount);
++ break;
++#endif
++ }
++
++}
++
++static void *RA_ProcSeqOff2ElementInfo(struct seq_file *sfile, loff_t off)
++{
++#ifdef RA_STATS
++ if (off <= 9)
++#else
++ if (off <= 1)
++#endif
++ return (void *)(int)(off + 1);
++ return 0;
++}
++
++static void RA_ProcSeqShowRegs(struct seq_file *sfile, void *el)
++{
++ PVR_PROC_SEQ_HANDLERS *handlers =
++ (PVR_PROC_SEQ_HANDLERS *) sfile->private;
++ RA_ARENA *pArena = (RA_ARENA *) handlers->data;
++ BT *pBT = (BT *) el;
++
++ if (el == PVR_PROC_SEQ_START_TOKEN) {
++ seq_printf(sfile, "Arena \"%s\"\nBase Size Type Ref\n",
++ pArena->name);
++ return;
++ }
++
++ if (pBT) {
++ seq_printf(sfile, "%08x %8x %4s %08x\n",
++ (u32) pBT->base, (u32) pBT->uSize,
++ _BTType(pBT->type), (u32) pBT->psMapping);
++ }
++}
++
++static void *RA_ProcSeqOff2ElementRegs(struct seq_file *sfile, loff_t off)
++{
++ PVR_PROC_SEQ_HANDLERS *handlers =
++ (PVR_PROC_SEQ_HANDLERS *) sfile->private;
++ RA_ARENA *pArena = (RA_ARENA *) handlers->data;
++ BT *pBT = 0;
++
++ if (off == 0)
++ return PVR_PROC_SEQ_START_TOKEN;
++
++ for (pBT = pArena->pHeadSegment; --off && pBT;
++ pBT = pBT->pNextSegment) ;
++
++ return (void *)pBT;
++}
++
++#else
++static int
++RA_DumpSegs(char *page, char **start, off_t off, int count, int *eof,
++ void *data)
++{
++ BT *pBT = 0;
++ int len = 0;
++ RA_ARENA *pArena = (RA_ARENA *) data;
++
++ if (count < 80) {
++ *start = (char *)0;
++ return (0);
++ }
++ *eof = 0;
++ *start = (char *)1;
++ if (off == 0) {
++ return printAppend(page, count, 0,
++ "Arena \"%s\"\nBase Size Type Ref\n",
++ pArena->name);
++ }
++ for (pBT = pArena->pHeadSegment; --off && pBT;
++ pBT = pBT->pNextSegment) ;
++ if (pBT) {
++ len = printAppend(page, count, 0, "%08x %8x %4s %08x\n",
++ (u32) pBT->base, (u32) pBT->uSize,
++ _BTType(pBT->type), (u32) pBT->psMapping);
++ } else {
++ *eof = 1;
++ }
++ return (len);
++}
++
++static int
++RA_DumpInfo(char *page, char **start, off_t off, int count, int *eof,
++ void *data)
++{
++ int len = 0;
++ RA_ARENA *pArena = (RA_ARENA *) data;
++
++ if (count < 80) {
++ *start = (char *)0;
++ return (0);
++ }
++ *eof = 0;
++ switch (off) {
++ case 0:
++ len =
++ printAppend(page, count, 0, "quantum\t\t\t%lu\n",
++ pArena->uQuantum);
++ break;
++ case 1:
++ len =
++ printAppend(page, count, 0, "import_handle\t\t%08X\n",
++ (u32) pArena->pImportHandle);
++ break;
++#ifdef RA_STATS
++ case 2:
++ len =
++ printAppend(page, count, 0, "span count\t\t%lu\n",
++ pArena->sStatistics.uSpanCount);
++ break;
++ case 3:
++ len =
++ printAppend(page, count, 0, "live segment count\t%lu\n",
++ pArena->sStatistics.uLiveSegmentCount);
++ break;
++ case 4:
++ len =
++ printAppend(page, count, 0, "free segment count\t%lu\n",
++ pArena->sStatistics.uFreeSegmentCount);
++ break;
++ case 5:
++ len =
++ printAppend(page, count, 0,
++ "free resource count\t%lu (0x%x)\n",
++ pArena->sStatistics.uFreeResourceCount,
++ (u32) pArena->sStatistics.uFreeResourceCount);
++ break;
++ case 6:
++ len =
++ printAppend(page, count, 0, "total allocs\t\t%lu\n",
++ pArena->sStatistics.uCumulativeAllocs);
++ break;
++ case 7:
++ len =
++ printAppend(page, count, 0, "total frees\t\t%lu\n",
++ pArena->sStatistics.uCumulativeFrees);
++ break;
++ case 8:
++ len =
++ printAppend(page, count, 0, "import count\t\t%lu\n",
++ pArena->sStatistics.uImportCount);
++ break;
++ case 9:
++ len =
++ printAppend(page, count, 0, "export count\t\t%lu\n",
++ pArena->sStatistics.uExportCount);
++ break;
++#endif
++
++ default:
++ *eof = 1;
++ }
++ *start = (char *)1;
++ return (len);
++}
++#endif
++#endif
++
++#ifdef RA_STATS
++PVRSRV_ERROR RA_GetStats(RA_ARENA * pArena, char **ppszStr, u32 * pui32StrLen)
++{
++ char *pszStr = *ppszStr;
++ u32 ui32StrLen = *pui32StrLen;
++ s32 i32Count;
++ BT *pBT;
++
++ CHECK_SPACE(ui32StrLen);
++ i32Count = snprintf(pszStr, 100, "\nArena '%s':\n", pArena->name);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ CHECK_SPACE(ui32StrLen);
++ i32Count =
++ snprintf(pszStr, 100,
++ " allocCB=%p freeCB=%p handle=%p quantum=%d\n",
++ pArena->pImportAlloc, pArena->pImportFree,
++ pArena->pImportHandle, pArena->uQuantum);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ CHECK_SPACE(ui32StrLen);
++ i32Count =
++ snprintf(pszStr, 100, "span count\t\t%u\n",
++ pArena->sStatistics.uSpanCount);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ CHECK_SPACE(ui32StrLen);
++ i32Count =
++ snprintf(pszStr, 100, "live segment count\t%u\n",
++ pArena->sStatistics.uLiveSegmentCount);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ CHECK_SPACE(ui32StrLen);
++ i32Count =
++ snprintf(pszStr, 100, "free segment count\t%u\n",
++ pArena->sStatistics.uFreeSegmentCount);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ CHECK_SPACE(ui32StrLen);
++ i32Count = snprintf(pszStr, 100, "free resource count\t%u (0x%x)\n",
++ pArena->sStatistics.uFreeResourceCount,
++ (u32) pArena->sStatistics.uFreeResourceCount);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ CHECK_SPACE(ui32StrLen);
++ i32Count =
++ snprintf(pszStr, 100, "total allocs\t\t%u\n",
++ pArena->sStatistics.uCumulativeAllocs);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ CHECK_SPACE(ui32StrLen);
++ i32Count =
++ snprintf(pszStr, 100, "total frees\t\t%u\n",
++ pArena->sStatistics.uCumulativeFrees);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ CHECK_SPACE(ui32StrLen);
++ i32Count =
++ snprintf(pszStr, 100, "import count\t\t%u\n",
++ pArena->sStatistics.uImportCount);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ CHECK_SPACE(ui32StrLen);
++ i32Count =
++ snprintf(pszStr, 100, "export count\t\t%u\n",
++ pArena->sStatistics.uExportCount);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ CHECK_SPACE(ui32StrLen);
++ i32Count = snprintf(pszStr, 100, " segment Chain:\n");
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ if (pArena->pHeadSegment != NULL &&
++ pArena->pHeadSegment->pPrevSegment != NULL) {
++ CHECK_SPACE(ui32StrLen);
++ i32Count =
++ snprintf(pszStr, 100,
++ " error: head boundary tag has invalid pPrevSegment\n");
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++ }
++
++ if (pArena->pTailSegment != NULL &&
++ pArena->pTailSegment->pNextSegment != NULL) {
++ CHECK_SPACE(ui32StrLen);
++ i32Count =
++ snprintf(pszStr, 100,
++ " error: tail boundary tag has invalid pNextSegment\n");
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++ }
++
++ for (pBT = pArena->pHeadSegment; pBT != NULL; pBT = pBT->pNextSegment) {
++ CHECK_SPACE(ui32StrLen);
++ i32Count =
++ snprintf(pszStr, 100,
++ "\tbase=0x%x size=0x%x type=%s ref=%p\n",
++ (u32) pBT->base, pBT->uSize, _BTType(pBT->type),
++ pBT->psMapping);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++ }
++
++ *ppszStr = pszStr;
++ *pui32StrLen = ui32StrLen;
++
++ return PVRSRV_OK;
++}
++#endif
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/common/resman.c
+@@ -0,0 +1,630 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "resman.h"
++
++#ifdef __linux__
++#ifndef AUTOCONF_INCLUDED
++#include <linux/config.h>
++#endif
++
++#include <linux/version.h>
++#include <linux/sched.h>
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)
++#include <linux/hardirq.h>
++#else
++#include <asm/hardirq.h>
++#endif
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
++#include <linux/semaphore.h>
++#else
++#include <asm/semaphore.h>
++#endif
++
++static DECLARE_MUTEX(lock);
++
++#define ACQUIRE_SYNC_OBJ do { \
++ if (in_interrupt()) { \
++ printk ("ISR cannot take RESMAN mutex\n"); \
++ BUG(); \
++ } \
++ else down (&lock); \
++} while (0)
++#define RELEASE_SYNC_OBJ up (&lock)
++
++#else
++
++#define ACQUIRE_SYNC_OBJ
++#define RELEASE_SYNC_OBJ
++
++#endif
++
++#define RESMAN_SIGNATURE 0x12345678
++
++typedef struct _RESMAN_ITEM_ {
++#ifdef DEBUG
++ u32 ui32Signature;
++#endif
++ struct _RESMAN_ITEM_ **ppsThis;
++ struct _RESMAN_ITEM_ *psNext;
++
++ u32 ui32Flags;
++ u32 ui32ResType;
++
++ void *pvParam;
++ u32 ui32Param;
++
++ RESMAN_FREE_FN pfnFreeResource;
++} RESMAN_ITEM;
++
++typedef struct _RESMAN_CONTEXT_ {
++#ifdef DEBUG
++ u32 ui32Signature;
++#endif
++ struct _RESMAN_CONTEXT_ **ppsThis;
++ struct _RESMAN_CONTEXT_ *psNext;
++
++ PVRSRV_PER_PROCESS_DATA *psPerProc;
++
++ RESMAN_ITEM *psResItemList;
++
++} RESMAN_CONTEXT;
++
++typedef struct {
++ RESMAN_CONTEXT *psContextList;
++
++} RESMAN_LIST, *PRESMAN_LIST;
++
++PRESMAN_LIST gpsResList = NULL;
++
++#include "lists.h"
++
++static IMPLEMENT_LIST_ANY_VA(RESMAN_ITEM)
++static IMPLEMENT_LIST_ANY_VA_2(RESMAN_ITEM, int, 0)
++static IMPLEMENT_LIST_INSERT(RESMAN_ITEM)
++static IMPLEMENT_LIST_REMOVE(RESMAN_ITEM)
++
++static IMPLEMENT_LIST_REMOVE(RESMAN_CONTEXT)
++static IMPLEMENT_LIST_INSERT(RESMAN_CONTEXT)
++
++#define PRINT_RESLIST(x, y, z)
++ static PVRSRV_ERROR FreeResourceByPtr(RESMAN_ITEM * psItem,
++ int bExecuteCallback);
++
++static PVRSRV_ERROR FreeResourceByCriteria(PRESMAN_CONTEXT psContext,
++ u32 ui32SearchCriteria,
++ u32 ui32ResType,
++ void *pvParam,
++ u32 ui32Param, int bExecuteCallback);
++
++#ifdef DEBUG
++static void ValidateResList(PRESMAN_LIST psResList);
++#define VALIDATERESLIST() ValidateResList(gpsResList)
++#else
++#define VALIDATERESLIST()
++#endif
++
++PVRSRV_ERROR ResManInit(void)
++{
++ if (gpsResList == NULL) {
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(*gpsResList),
++ (void **)&gpsResList, NULL,
++ "Resource Manager List") != PVRSRV_OK) {
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ gpsResList->psContextList = NULL;
++
++ VALIDATERESLIST();
++ }
++
++ return PVRSRV_OK;
++}
++
++void ResManDeInit(void)
++{
++ if (gpsResList != NULL) {
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(*gpsResList),
++ gpsResList, NULL);
++ gpsResList = NULL;
++ }
++}
++
++PVRSRV_ERROR PVRSRVResManConnect(void *hPerProc,
++ PRESMAN_CONTEXT * phResManContext)
++{
++ PVRSRV_ERROR eError;
++ PRESMAN_CONTEXT psResManContext;
++
++ ACQUIRE_SYNC_OBJ;
++
++ VALIDATERESLIST();
++
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(*psResManContext),
++ (void **)&psResManContext, NULL,
++ "Resource Manager Context");
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVResManConnect: ERROR allocating new RESMAN context struct"));
++
++ VALIDATERESLIST();
++
++ RELEASE_SYNC_OBJ;
++
++ return eError;
++ }
++#ifdef DEBUG
++ psResManContext->ui32Signature = RESMAN_SIGNATURE;
++#endif
++ psResManContext->psResItemList = NULL;
++ psResManContext->psPerProc = hPerProc;
++
++ List_RESMAN_CONTEXT_Insert(&gpsResList->psContextList, psResManContext);
++
++ VALIDATERESLIST();
++
++ RELEASE_SYNC_OBJ;
++
++ *phResManContext = psResManContext;
++
++ return PVRSRV_OK;
++}
++
++void PVRSRVResManDisconnect(PRESMAN_CONTEXT psResManContext, int bKernelContext)
++{
++
++ ACQUIRE_SYNC_OBJ;
++
++ VALIDATERESLIST();
++
++ PRINT_RESLIST(gpsResList, psResManContext, 1);
++
++ if (!bKernelContext) {
++
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE,
++ RESMAN_TYPE_OS_USERMODE_MAPPING, 0, 0,
++ 1);
++
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE,
++ RESMAN_TYPE_EVENT_OBJECT, 0, 0, 1);
++
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE,
++ RESMAN_TYPE_MODIFY_SYNC_OPS, 0, 0, 1);
++
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE,
++ RESMAN_TYPE_HW_RENDER_CONTEXT, 0, 0, 1);
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE,
++ RESMAN_TYPE_HW_TRANSFER_CONTEXT, 0, 0,
++ 1);
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE,
++ RESMAN_TYPE_HW_2D_CONTEXT, 0, 0, 1);
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE,
++ RESMAN_TYPE_TRANSFER_CONTEXT, 0, 0, 1);
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE,
++ RESMAN_TYPE_SHARED_PB_DESC_CREATE_LOCK,
++ 0, 0, 1);
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE,
++ RESMAN_TYPE_SHARED_PB_DESC, 0, 0, 1);
++
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE,
++ RESMAN_TYPE_DISPLAYCLASS_SWAPCHAIN_REF,
++ 0, 0, 1);
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE,
++ RESMAN_TYPE_DISPLAYCLASS_DEVICE, 0, 0,
++ 1);
++
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE,
++ RESMAN_TYPE_BUFFERCLASS_DEVICE, 0, 0, 1);
++
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE,
++ RESMAN_TYPE_DEVICECLASSMEM_MAPPING, 0, 0,
++ 1);
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE,
++ RESMAN_TYPE_DEVICEMEM_WRAP, 0, 0, 1);
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE,
++ RESMAN_TYPE_DEVICEMEM_MAPPING, 0, 0, 1);
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE,
++ RESMAN_TYPE_KERNEL_DEVICEMEM_ALLOCATION,
++ 0, 0, 1);
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE,
++ RESMAN_TYPE_DEVICEMEM_ALLOCATION, 0, 0,
++ 1);
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE,
++ RESMAN_TYPE_DEVICEMEM_CONTEXT, 0, 0, 1);
++ }
++
++ PVR_ASSERT(psResManContext->psResItemList == NULL);
++
++ List_RESMAN_CONTEXT_Remove(psResManContext);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(RESMAN_CONTEXT),
++ psResManContext, NULL);
++
++ VALIDATERESLIST();
++
++ PRINT_RESLIST(gpsResList, psResManContext, 0);
++
++ RELEASE_SYNC_OBJ;
++}
++
++PRESMAN_ITEM ResManRegisterRes(PRESMAN_CONTEXT psResManContext,
++ u32 ui32ResType,
++ void *pvParam,
++ u32 ui32Param, RESMAN_FREE_FN pfnFreeResource)
++{
++ PRESMAN_ITEM psNewResItem;
++
++ PVR_ASSERT(psResManContext != NULL);
++ PVR_ASSERT(ui32ResType != 0);
++
++ if (psResManContext == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "ResManRegisterRes: invalid parameter - psResManContext"));
++ return (PRESMAN_ITEM) NULL;
++ }
++
++ ACQUIRE_SYNC_OBJ;
++
++ VALIDATERESLIST();
++
++ PVR_DPF((PVR_DBG_MESSAGE, "ResManRegisterRes: register resource "
++ "Context 0x%x, ResType 0x%x, pvParam 0x%x, ui32Param 0x%x, "
++ "FreeFunc %08X",
++ psResManContext, ui32ResType, (u32) pvParam,
++ ui32Param, pfnFreeResource));
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(RESMAN_ITEM), (void **)&psNewResItem,
++ NULL, "Resource Manager Item") != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR, "ResManRegisterRes: "
++ "ERROR allocating new resource item"));
++
++ RELEASE_SYNC_OBJ;
++
++ return ((PRESMAN_ITEM) NULL);
++ }
++
++#ifdef DEBUG
++ psNewResItem->ui32Signature = RESMAN_SIGNATURE;
++#endif
++ psNewResItem->ui32ResType = ui32ResType;
++ psNewResItem->pvParam = pvParam;
++ psNewResItem->ui32Param = ui32Param;
++ psNewResItem->pfnFreeResource = pfnFreeResource;
++ psNewResItem->ui32Flags = 0;
++
++ List_RESMAN_ITEM_Insert(&psResManContext->psResItemList, psNewResItem);
++
++ VALIDATERESLIST();
++
++ RELEASE_SYNC_OBJ;
++
++ return (psNewResItem);
++}
++
++PVRSRV_ERROR ResManFreeResByPtr(RESMAN_ITEM * psResItem)
++{
++ PVRSRV_ERROR eError;
++
++ PVR_ASSERT(psResItem != NULL);
++
++ if (psResItem == NULL) {
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "ResManFreeResByPtr: NULL ptr - nothing to do"));
++ return PVRSRV_OK;
++ }
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "ResManFreeResByPtr: freeing resource at %08X", psResItem));
++
++ ACQUIRE_SYNC_OBJ;
++
++ VALIDATERESLIST();
++
++ eError = FreeResourceByPtr(psResItem, 1);
++
++ VALIDATERESLIST();
++
++ RELEASE_SYNC_OBJ;
++
++ return (eError);
++}
++
++PVRSRV_ERROR ResManFreeResByCriteria(PRESMAN_CONTEXT psResManContext,
++ u32 ui32SearchCriteria,
++ u32 ui32ResType,
++ void *pvParam, u32 ui32Param)
++{
++ PVRSRV_ERROR eError;
++
++ PVR_ASSERT(psResManContext != NULL);
++
++ ACQUIRE_SYNC_OBJ;
++
++ VALIDATERESLIST();
++
++ PVR_DPF((PVR_DBG_MESSAGE, "ResManFreeResByCriteria: "
++ "Context 0x%x, Criteria 0x%x, Type 0x%x, Addr 0x%x, Param 0x%x",
++ psResManContext, ui32SearchCriteria, ui32ResType,
++ (u32) pvParam, ui32Param));
++
++ eError = FreeResourceByCriteria(psResManContext, ui32SearchCriteria,
++ ui32ResType, pvParam, ui32Param, 1);
++
++ VALIDATERESLIST();
++
++ RELEASE_SYNC_OBJ;
++
++ return eError;
++}
++
++PVRSRV_ERROR ResManDissociateRes(RESMAN_ITEM * psResItem,
++ PRESMAN_CONTEXT psNewResManContext)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ PVR_ASSERT(psResItem != NULL);
++
++ if (psResItem == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "ResManDissociateRes: invalid parameter - psResItem"));
++ PVR_DBG_BREAK;
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++#ifdef DEBUG
++ PVR_ASSERT(psResItem->ui32Signature == RESMAN_SIGNATURE);
++#endif
++
++ if (psNewResManContext != NULL) {
++
++ List_RESMAN_ITEM_Remove(psResItem);
++
++ List_RESMAN_ITEM_Insert(&psNewResManContext->psResItemList,
++ psResItem);
++
++ } else {
++ eError = FreeResourceByPtr(psResItem, 0);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "ResManDissociateRes: failed to free resource by pointer"));
++ return eError;
++ }
++ }
++
++ return eError;
++}
++
++int ResManFindResourceByPtr_AnyVaCb(RESMAN_ITEM * psCurItem, va_list va)
++{
++ RESMAN_ITEM *psItem;
++
++ psItem = va_arg(va, RESMAN_ITEM *);
++
++ return (int)(psCurItem == psItem);
++}
++
++/* FIXME MLD IMG_INTERNAL PVRSRV_ERROR ResManFindResourceByPtr(PRESMAN_CONTEXT*/
++PVRSRV_ERROR ResManFindResourceByPtr(PRESMAN_CONTEXT
++ psResManContext,
++ RESMAN_ITEM * psItem)
++{
++ PVRSRV_ERROR eResult;
++
++ PVR_ASSERT(psResManContext != NULL);
++ PVR_ASSERT(psItem != NULL);
++
++ if ((psItem == NULL) || (psResManContext == NULL)) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "ResManFindResourceByPtr: invalid parameter"));
++ PVR_DBG_BREAK;
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++#ifdef DEBUG
++ PVR_ASSERT(psItem->ui32Signature == RESMAN_SIGNATURE);
++#endif
++
++ ACQUIRE_SYNC_OBJ;
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "FindResourceByPtr: psItem=%08X, psItem->psNext=%08X",
++ psItem, psItem->psNext));
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "FindResourceByPtr: Resource Ctx 0x%x, Type 0x%x, Addr 0x%x, "
++ "Param 0x%x, FnCall %08X, Flags 0x%x",
++ psResManContext,
++ psItem->ui32ResType, (u32) psItem->pvParam, psItem->ui32Param,
++ psItem->pfnFreeResource, psItem->ui32Flags));
++
++ if (List_RESMAN_ITEM_int_Any_va(psResManContext->psResItemList,
++ ResManFindResourceByPtr_AnyVaCb,
++ psItem)) {
++ eResult = PVRSRV_OK;
++ } else {
++ eResult = PVRSRV_ERROR_NOT_OWNER;
++ }
++
++ RELEASE_SYNC_OBJ;
++
++ return eResult;
++}
++
++static PVRSRV_ERROR FreeResourceByPtr(RESMAN_ITEM * psItem,
++ int bExecuteCallback)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ PVR_ASSERT(psItem != NULL);
++
++ if (psItem == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "FreeResourceByPtr: invalid parameter"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++#ifdef DEBUG
++ PVR_ASSERT(psItem->ui32Signature == RESMAN_SIGNATURE);
++#endif
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "FreeResourceByPtr: psItem=%08X, psItem->psNext=%08X",
++ psItem, psItem->psNext));
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "FreeResourceByPtr: Type 0x%x, Addr 0x%x, "
++ "Param 0x%x, FnCall %08X, Flags 0x%x",
++ psItem->ui32ResType, (u32) psItem->pvParam, psItem->ui32Param,
++ psItem->pfnFreeResource, psItem->ui32Flags));
++
++ List_RESMAN_ITEM_Remove(psItem);
++
++ RELEASE_SYNC_OBJ;
++
++ if (bExecuteCallback) {
++ eError =
++ psItem->pfnFreeResource(psItem->pvParam, psItem->ui32Param);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "FreeResourceByPtr: ERROR calling FreeResource function"));
++ }
++ }
++
++ ACQUIRE_SYNC_OBJ;
++
++ if (OSFreeMem
++ (PVRSRV_OS_PAGEABLE_HEAP, sizeof(RESMAN_ITEM), psItem,
++ NULL) != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "FreeResourceByPtr: ERROR freeing resource list item memory"));
++ eError = PVRSRV_ERROR_GENERIC;
++ }
++
++ return (eError);
++}
++
++void *FreeResourceByCriteria_AnyVaCb(RESMAN_ITEM * psCurItem, va_list va)
++{
++ u32 ui32SearchCriteria;
++ u32 ui32ResType;
++ void *pvParam;
++ u32 ui32Param;
++
++ ui32SearchCriteria = va_arg(va, u32);
++ ui32ResType = va_arg(va, u32);
++ pvParam = va_arg(va, void *);
++ ui32Param = va_arg(va, u32);
++
++ if ((((ui32SearchCriteria & RESMAN_CRITERIA_RESTYPE) == 0UL) ||
++ (psCurItem->ui32ResType == ui32ResType))
++ &&
++ (((ui32SearchCriteria & RESMAN_CRITERIA_PVOID_PARAM) == 0UL) ||
++ (psCurItem->pvParam == pvParam))
++ &&
++ (((ui32SearchCriteria & RESMAN_CRITERIA_UI32_PARAM) == 0UL) ||
++ (psCurItem->ui32Param == ui32Param))
++ ) {
++ return psCurItem;
++ } else {
++ return NULL;
++ }
++}
++
++static PVRSRV_ERROR FreeResourceByCriteria(PRESMAN_CONTEXT psResManContext,
++ u32 ui32SearchCriteria,
++ u32 ui32ResType,
++ void *pvParam,
++ u32 ui32Param, int bExecuteCallback)
++{
++ PRESMAN_ITEM psCurItem;
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ while ((psCurItem = (PRESMAN_ITEM)
++ List_RESMAN_ITEM_Any_va(psResManContext->psResItemList,
++ FreeResourceByCriteria_AnyVaCb,
++ ui32SearchCriteria,
++ ui32ResType,
++ pvParam,
++ ui32Param)) != NULL
++ && eError == PVRSRV_OK) {
++ eError = FreeResourceByPtr(psCurItem, bExecuteCallback);
++ }
++
++ return eError;
++}
++
++#ifdef DEBUG
++static void ValidateResList(PRESMAN_LIST psResList)
++{
++ PRESMAN_ITEM psCurItem, *ppsThisItem;
++ PRESMAN_CONTEXT psCurContext, *ppsThisContext;
++
++ if (psResList == NULL) {
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "ValidateResList: resman not initialised yet"));
++ return;
++ }
++
++ psCurContext = psResList->psContextList;
++ ppsThisContext = &psResList->psContextList;
++
++ while (psCurContext != NULL) {
++
++ PVR_ASSERT(psCurContext->ui32Signature == RESMAN_SIGNATURE);
++ if (psCurContext->ppsThis != ppsThisContext) {
++ PVR_DPF((PVR_DBG_WARNING,
++ "psCC=%08X psCC->ppsThis=%08X psCC->psNext=%08X ppsTC=%08X",
++ psCurContext, psCurContext->ppsThis,
++ psCurContext->psNext, ppsThisContext));
++ PVR_ASSERT(psCurContext->ppsThis == ppsThisContext);
++ }
++
++ psCurItem = psCurContext->psResItemList;
++ ppsThisItem = &psCurContext->psResItemList;
++ while (psCurItem != NULL) {
++
++ PVR_ASSERT(psCurItem->ui32Signature ==
++ RESMAN_SIGNATURE);
++ if (psCurItem->ppsThis != ppsThisItem) {
++ PVR_DPF((PVR_DBG_WARNING,
++ "psCurItem=%08X psCurItem->ppsThis=%08X psCurItem->psNext=%08X ppsThisItem=%08X",
++ psCurItem, psCurItem->ppsThis,
++ psCurItem->psNext, ppsThisItem));
++ PVR_ASSERT(psCurItem->ppsThis == ppsThisItem);
++ }
++
++ ppsThisItem = &psCurItem->psNext;
++ psCurItem = psCurItem->psNext;
++ }
++
++ ppsThisContext = &psCurContext->psNext;
++ psCurContext = psCurContext->psNext;
++ }
++}
++#endif
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/devices/sgx/mmu.c
+@@ -0,0 +1,2501 @@
++/**********************************************************************
++ *
++ * Copyright (c) 2009-2010 Intel Corporation.
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "sgxdefs.h"
++#include "sgxmmu.h"
++#include "services_headers.h"
++#include "buffer_manager.h"
++#include "hash.h"
++#include "ra.h"
++#include "pdump_km.h"
++#include "sgxapi_km.h"
++#include "sgxinfo.h"
++#include "sgxinfokm.h"
++#include "mmu.h"
++#include "sgxconfig.h"
++
++#define UINT32_MAX_VALUE 0xFFFFFFFFUL
++
++#define SGX_MAX_PD_ENTRIES (1<<(SGX_FEATURE_ADDRESS_SPACE_SIZE - SGX_MMU_PT_SHIFT - SGX_MMU_PAGE_SHIFT))
++
++typedef struct _MMU_PT_INFO_ {
++
++ void *hPTPageOSMemHandle;
++ IMG_CPU_VIRTADDR PTPageCpuVAddr;
++ u32 ui32ValidPTECount;
++} MMU_PT_INFO;
++
++struct _MMU_CONTEXT_ {
++
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++
++ IMG_CPU_VIRTADDR pvPDCpuVAddr;
++ IMG_DEV_PHYADDR sPDDevPAddr;
++
++ void *hPDOSMemHandle;
++
++ MMU_PT_INFO *apsPTInfoList[SGX_MAX_PD_ENTRIES];
++
++ PVRSRV_SGXDEV_INFO *psDevInfo;
++
++#if defined(PDUMP)
++ u32 ui32PDumpMMUContextID;
++#endif
++
++ struct _MMU_CONTEXT_ *psNext;
++};
++
++struct _MMU_HEAP_ {
++
++ MMU_CONTEXT *psMMUContext;
++
++ u32 ui32PDBaseIndex;
++
++ u32 ui32PageTableCount;
++
++ u32 ui32PTETotal;
++
++ u32 ui32PDEPageSizeCtrl;
++
++ u32 ui32DataPageSize;
++
++ u32 ui32DataPageBitWidth;
++
++ u32 ui32DataPageMask;
++
++ u32 ui32PTShift;
++
++ u32 ui32PTBitWidth;
++
++ u32 ui32PTMask;
++
++ u32 ui32PTSize;
++
++ u32 ui32PTECount;
++
++ u32 ui32PDShift;
++
++ u32 ui32PDBitWidth;
++
++ u32 ui32PDMask;
++
++ RA_ARENA *psVMArena;
++ DEV_ARENA_DESCRIPTOR *psDevArena;
++};
++
++#if defined (SUPPORT_SGX_MMU_DUMMY_PAGE)
++#define DUMMY_DATA_PAGE_SIGNATURE 0xDEADBEEF
++#endif
++
++#if defined(PDUMP)
++static void
++MMU_PDumpPageTables(MMU_HEAP * pMMUHeap,
++ IMG_DEV_VIRTADDR DevVAddr,
++ u32 uSize, int bForUnmap, void *hUniqueTag);
++#endif
++
++#define PAGE_TEST 0
++#if PAGE_TEST
++static void PageTest(void *pMem, IMG_DEV_PHYADDR sDevPAddr);
++#endif
++
++#define PT_DEBUG 0
++#if PT_DEBUG
++static void DumpPT(MMU_PT_INFO * psPTInfoList)
++{
++ u32 *p = (u32 *) psPTInfoList->PTPageCpuVAddr;
++ u32 i;
++
++ for (i = 0; i < 1024; i += 8) {
++ PVR_DPF((PVR_DBG_WARNING,
++ "%.8lx %.8lx %.8lx %.8lx %.8lx %.8lx %.8lx %.8lx\n",
++ p[i + 0], p[i + 1], p[i + 2], p[i + 3],
++ p[i + 4], p[i + 5], p[i + 6], p[i + 7]));
++ }
++}
++
++static void CheckPT(MMU_PT_INFO * psPTInfoList)
++{
++ u32 *p = (u32 *) psPTInfoList->PTPageCpuVAddr;
++ u32 i, ui32Count = 0;
++
++ for (i = 0; i < 1024; i++)
++ if (p[i] & SGX_MMU_PTE_VALID)
++ ui32Count++;
++
++ if (psPTInfoList->ui32ValidPTECount != ui32Count) {
++ PVR_DPF((PVR_DBG_WARNING,
++ "ui32ValidPTECount: %lu ui32Count: %lu\n",
++ psPTInfoList->ui32ValidPTECount, ui32Count));
++ DumpPT(psPTInfoList);
++ BUG();
++ }
++}
++#else
++/* FIXME MLD compiler warning temporary fix */
++/* static void DumpPT(MMU_PT_INFO * psPTInfoList)
++{
++}
++*/
++static void CheckPT(MMU_PT_INFO * psPTInfoList)
++{
++}
++#endif
++
++#ifdef SUPPORT_SGX_MMU_BYPASS
++void EnableHostAccess(MMU_CONTEXT * psMMUContext)
++{
++ u32 ui32RegVal;
++ void *pvRegsBaseKM = psMMUContext->psDevInfo->pvRegsBaseKM;
++
++ ui32RegVal = OSReadHWReg(pvRegsBaseKM, EUR_CR_BIF_CTRL);
++
++ OSWriteHWReg(pvRegsBaseKM,
++ EUR_CR_BIF_CTRL,
++ ui32RegVal | EUR_CR_BIF_CTRL_MMU_BYPASS_HOST_MASK);
++
++ PDUMPREG(EUR_CR_BIF_CTRL, EUR_CR_BIF_CTRL_MMU_BYPASS_HOST_MASK);
++}
++
++void DisableHostAccess(MMU_CONTEXT * psMMUContext)
++{
++ u32 ui32RegVal;
++ void *pvRegsBaseKM = psMMUContext->psDevInfo->pvRegsBaseKM;
++
++ OSWriteHWReg(pvRegsBaseKM,
++ EUR_CR_BIF_CTRL,
++ ui32RegVal & ~EUR_CR_BIF_CTRL_MMU_BYPASS_HOST_MASK);
++
++ PDUMPREG(EUR_CR_BIF_CTRL, 0);
++}
++#endif
++
++void MMU_InvalidateSystemLevelCache(PVRSRV_SGXDEV_INFO * psDevInfo)
++{
++#if defined(SGX_FEATURE_MP)
++ psDevInfo->ui32CacheControl |= SGX_BIF_INVALIDATE_SLCACHE;
++#endif
++}
++
++void MMU_InvalidateDirectoryCache(PVRSRV_SGXDEV_INFO * psDevInfo)
++{
++ psDevInfo->ui32CacheControl |= SGX_BIF_INVALIDATE_PDCACHE;
++#if defined(SGX_FEATURE_SYSTEM_CACHE)
++ MMU_InvalidateSystemLevelCache(psDevInfo);
++#endif
++}
++
++void MMU_InvalidatePageTableCache(PVRSRV_SGXDEV_INFO * psDevInfo)
++{
++ psDevInfo->ui32CacheControl |= SGX_BIF_INVALIDATE_PTCACHE;
++#if defined(SGX_FEATURE_SYSTEM_CACHE)
++ MMU_InvalidateSystemLevelCache(psDevInfo);
++#endif
++}
++
++static int
++_AllocPageTableMemory(MMU_HEAP * pMMUHeap,
++ MMU_PT_INFO * psPTInfoList, IMG_DEV_PHYADDR * psDevPAddr)
++{
++ IMG_DEV_PHYADDR sDevPAddr;
++ IMG_CPU_PHYADDR sCpuPAddr;
++
++ if (pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->psLocalDevMemArena ==
++ NULL) {
++
++ if (OSAllocPages
++ (PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ pMMUHeap->ui32PTSize, SGX_MMU_PAGE_SIZE,
++ (void **)&psPTInfoList->PTPageCpuVAddr,
++ &psPTInfoList->hPTPageOSMemHandle) != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "_AllocPageTableMemory: ERROR call to OSAllocPages failed"));
++ return 0;
++ }
++
++ if (psPTInfoList->PTPageCpuVAddr) {
++ sCpuPAddr =
++ OSMapLinToCPUPhys(psPTInfoList->PTPageCpuVAddr);
++ } else {
++
++ sCpuPAddr =
++ OSMemHandleToCpuPAddr(psPTInfoList->
++ hPTPageOSMemHandle, 0);
++ }
++
++ sDevPAddr =
++ SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
++ } else {
++ IMG_SYS_PHYADDR sSysPAddr;
++
++ if (RA_Alloc
++ (pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->
++ psLocalDevMemArena, SGX_MMU_PAGE_SIZE, NULL, NULL, 0,
++ SGX_MMU_PAGE_SIZE, 0, &(sSysPAddr.uiAddr)) != 1) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "_AllocPageTableMemory: ERROR call to RA_Alloc failed"));
++ return 0;
++ }
++
++ sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
++
++ psPTInfoList->PTPageCpuVAddr = OSMapPhysToLin(sCpuPAddr,
++ SGX_MMU_PAGE_SIZE,
++ PVRSRV_HAP_WRITECOMBINE
++ |
++ PVRSRV_HAP_KERNEL_ONLY,
++ &psPTInfoList->
++ hPTPageOSMemHandle);
++ if (!psPTInfoList->PTPageCpuVAddr) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "_AllocPageTableMemory: ERROR failed to map page tables"));
++ return 0;
++ }
++
++ sDevPAddr =
++ SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
++
++#if PAGE_TEST
++ PageTest(psPTInfoList->PTPageCpuVAddr, sDevPAddr);
++#endif
++ }
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++ {
++ u32 *pui32Tmp;
++ u32 i;
++
++ pui32Tmp = (u32 *) psPTInfoList->PTPageCpuVAddr;
++
++ for (i = 0; i < pMMUHeap->ui32PTECount; i++) {
++ pui32Tmp[i] =
++ (pMMUHeap->psMMUContext->psDevInfo->
++ sDummyDataDevPAddr.
++ uiAddr >> SGX_MMU_PTE_ADDR_ALIGNSHIFT)
++ | SGX_MMU_PTE_VALID;
++ }
++ }
++#else
++
++ memset(psPTInfoList->PTPageCpuVAddr, 0, pMMUHeap->ui32PTSize);
++#endif
++
++ PDUMPMALLOCPAGETABLE(PVRSRV_DEVICE_TYPE_SGX,
++ psPTInfoList->PTPageCpuVAddr, pMMUHeap->ui32PTSize,
++ PDUMP_PT_UNIQUETAG);
++
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, psPTInfoList->PTPageCpuVAddr,
++ pMMUHeap->ui32PTSize, 0, 1, PDUMP_PT_UNIQUETAG,
++ PDUMP_PT_UNIQUETAG);
++
++ *psDevPAddr = sDevPAddr;
++
++ return 1;
++}
++
++static void
++_FreePageTableMemory(MMU_HEAP * pMMUHeap, MMU_PT_INFO * psPTInfoList)
++{
++
++ if (pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->psLocalDevMemArena ==
++ NULL) {
++
++ OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ pMMUHeap->ui32PTSize,
++ psPTInfoList->PTPageCpuVAddr,
++ psPTInfoList->hPTPageOSMemHandle);
++ } else {
++ IMG_SYS_PHYADDR sSysPAddr;
++ IMG_CPU_PHYADDR sCpuPAddr;
++
++ sCpuPAddr = OSMapLinToCPUPhys(psPTInfoList->PTPageCpuVAddr);
++ sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr);
++
++ OSUnMapPhysToLin(psPTInfoList->PTPageCpuVAddr,
++ SGX_MMU_PAGE_SIZE,
++ PVRSRV_HAP_WRITECOMBINE |
++ PVRSRV_HAP_KERNEL_ONLY,
++ psPTInfoList->hPTPageOSMemHandle);
++
++ RA_Free(pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->
++ psLocalDevMemArena, sSysPAddr.uiAddr, 0);
++ }
++}
++
++static void
++_DeferredFreePageTable(MMU_HEAP * pMMUHeap, u32 ui32PTIndex, int bOSFreePT)
++{
++ u32 *pui32PDEntry;
++ u32 i;
++ u32 ui32PDIndex;
++ SYS_DATA *psSysData;
++ MMU_PT_INFO **ppsPTInfoList;
++
++ SysAcquireData(&psSysData);
++
++ ui32PDIndex =
++ pMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> pMMUHeap->ui32PDShift;
++
++ ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
++
++ {
++#if PT_DEBUG
++ if (ppsPTInfoList[ui32PTIndex]
++ && ppsPTInfoList[ui32PTIndex]->ui32ValidPTECount > 0) {
++ DumpPT(ppsPTInfoList[ui32PTIndex]);
++
++ }
++#endif
++
++ PVR_ASSERT(ppsPTInfoList[ui32PTIndex] == NULL
++ || ppsPTInfoList[ui32PTIndex]->ui32ValidPTECount ==
++ 0);
++ }
++
++ PDUMPCOMMENT("Free page table (page count == %08X)",
++ pMMUHeap->ui32PageTableCount);
++ if (ppsPTInfoList[ui32PTIndex]
++ && ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr) {
++ PDUMPFREEPAGETABLE(PVRSRV_DEVICE_TYPE_SGX,
++ ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr,
++ pMMUHeap->ui32PTSize, PDUMP_PT_UNIQUETAG);
++ }
++
++ switch (pMMUHeap->psDevArena->DevMemHeapType) {
++ case DEVICE_MEMORY_HEAP_SHARED:
++ case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
++ {
++
++ MMU_CONTEXT *psMMUContext =
++ (MMU_CONTEXT *) pMMUHeap->psMMUContext->psDevInfo->
++ pvMMUContextList;
++
++ while (psMMUContext) {
++
++ pui32PDEntry =
++ (u32 *) psMMUContext->pvPDCpuVAddr;
++ pui32PDEntry += ui32PDIndex;
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++
++ pui32PDEntry[ui32PTIndex] =
++ (psMMUContext->psDevInfo->sDummyPTDevPAddr.
++ uiAddr >> SGX_MMU_PDE_ADDR_ALIGNSHIFT)
++ | SGX_MMU_PDE_PAGE_SIZE_4K |
++ SGX_MMU_PDE_VALID;
++#else
++
++ if (bOSFreePT) {
++ pui32PDEntry[ui32PTIndex] = 0;
++ }
++#endif
++
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX,
++ (void *)&pui32PDEntry[ui32PTIndex],
++ sizeof(u32), 0, 0, PDUMP_PT_UNIQUETAG,
++ PDUMP_PT_UNIQUETAG);
++
++ psMMUContext = psMMUContext->psNext;
++ }
++ break;
++ }
++ case DEVICE_MEMORY_HEAP_PERCONTEXT:
++ case DEVICE_MEMORY_HEAP_KERNEL:
++ {
++
++ pui32PDEntry =
++ (u32 *) pMMUHeap->psMMUContext->pvPDCpuVAddr;
++ pui32PDEntry += ui32PDIndex;
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++
++ pui32PDEntry[ui32PTIndex] =
++ (pMMUHeap->psMMUContext->psDevInfo->
++ sDummyPTDevPAddr.
++ uiAddr >> SGX_MMU_PDE_ADDR_ALIGNSHIFT)
++ | SGX_MMU_PDE_PAGE_SIZE_4K | SGX_MMU_PDE_VALID;
++#else
++
++ if (bOSFreePT) {
++ pui32PDEntry[ui32PTIndex] = 0;
++ }
++#endif
++
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX,
++ (void *)&pui32PDEntry[ui32PTIndex],
++ sizeof(u32), 0, 0, PDUMP_PD_UNIQUETAG,
++ PDUMP_PT_UNIQUETAG);
++ break;
++ }
++ default:
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "_DeferredFreePagetable: ERROR invalid heap type"));
++ return;
++ }
++ }
++
++ if (ppsPTInfoList[ui32PTIndex] != NULL) {
++ if (ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr != NULL) {
++ u32 *pui32Tmp;
++
++ pui32Tmp =
++ (u32 *) ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr;
++
++ for (i = 0;
++ (i < pMMUHeap->ui32PTETotal)
++ && (i < pMMUHeap->ui32PTECount); i++) {
++ pui32Tmp[i] = 0;
++ }
++
++ if (bOSFreePT) {
++ _FreePageTableMemory(pMMUHeap,
++ ppsPTInfoList
++ [ui32PTIndex]);
++ }
++
++ pMMUHeap->ui32PTETotal -= i;
++ } else {
++
++ pMMUHeap->ui32PTETotal -= pMMUHeap->ui32PTECount;
++ }
++
++ if (bOSFreePT) {
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(MMU_PT_INFO),
++ ppsPTInfoList[ui32PTIndex], NULL);
++ ppsPTInfoList[ui32PTIndex] = NULL;
++ }
++ } else {
++
++ pMMUHeap->ui32PTETotal -= pMMUHeap->ui32PTECount;
++ }
++
++ PDUMPCOMMENT("Finished free page table (page count == %08X)",
++ pMMUHeap->ui32PageTableCount);
++}
++
++static void _DeferredFreePageTables(MMU_HEAP * pMMUHeap)
++{
++ u32 i;
++
++ for (i = 0; i < pMMUHeap->ui32PageTableCount; i++) {
++ _DeferredFreePageTable(pMMUHeap, i, 1);
++ }
++ MMU_InvalidateDirectoryCache(pMMUHeap->psMMUContext->psDevInfo);
++}
++
++static int
++_DeferredAllocPagetables(MMU_HEAP * pMMUHeap, IMG_DEV_VIRTADDR DevVAddr,
++ u32 ui32Size)
++{
++ u32 ui32PageTableCount;
++ u32 ui32PDIndex;
++ u32 i;
++ u32 *pui32PDEntry;
++ MMU_PT_INFO **ppsPTInfoList;
++ SYS_DATA *psSysData;
++ IMG_DEV_VIRTADDR sHighDevVAddr;
++
++#if SGX_FEATURE_ADDRESS_SPACE_SIZE < 32
++ PVR_ASSERT(DevVAddr.uiAddr < (1 << SGX_FEATURE_ADDRESS_SPACE_SIZE));
++#endif
++
++ SysAcquireData(&psSysData);
++
++ ui32PDIndex = DevVAddr.uiAddr >> pMMUHeap->ui32PDShift;
++
++ if ((UINT32_MAX_VALUE - DevVAddr.uiAddr)
++ < (ui32Size + pMMUHeap->ui32DataPageMask + pMMUHeap->ui32PTMask)) {
++
++ sHighDevVAddr.uiAddr = UINT32_MAX_VALUE;
++ } else {
++ sHighDevVAddr.uiAddr = DevVAddr.uiAddr
++ + ui32Size
++ + pMMUHeap->ui32DataPageMask + pMMUHeap->ui32PTMask;
++ }
++
++ ui32PageTableCount = sHighDevVAddr.uiAddr >> pMMUHeap->ui32PDShift;
++
++ ui32PageTableCount -= ui32PDIndex;
++
++ pui32PDEntry = (u32 *) pMMUHeap->psMMUContext->pvPDCpuVAddr;
++ pui32PDEntry += ui32PDIndex;
++
++ ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
++
++ PDUMPCOMMENT("Alloc page table (page count == %08X)",
++ ui32PageTableCount);
++ PDUMPCOMMENT("Page directory mods (page count == %08X)",
++ ui32PageTableCount);
++
++ for (i = 0; i < ui32PageTableCount; i++) {
++ if (ppsPTInfoList[i] == NULL) {
++ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(MMU_PT_INFO),
++ (void **)&ppsPTInfoList[i], NULL,
++ "MMU Page Table Info");
++ if (ppsPTInfoList[i] == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "_DeferredAllocPagetables: ERROR call to OSAllocMem failed"));
++ return 0;
++ }
++ memset(ppsPTInfoList[i], 0, sizeof(MMU_PT_INFO));
++ }
++
++ if (ppsPTInfoList[i]->hPTPageOSMemHandle == NULL
++ && ppsPTInfoList[i]->PTPageCpuVAddr == NULL) {
++ IMG_DEV_PHYADDR sDevPAddr;
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++ u32 *pui32Tmp;
++ u32 j;
++#else
++
++ PVR_ASSERT(pui32PDEntry[i] == 0);
++#endif
++
++ if (_AllocPageTableMemory
++ (pMMUHeap, ppsPTInfoList[i], &sDevPAddr) != 1) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "_DeferredAllocPagetables: ERROR call to _AllocPageTableMemory failed"));
++ return 0;
++ }
++
++ switch (pMMUHeap->psDevArena->DevMemHeapType) {
++ case DEVICE_MEMORY_HEAP_SHARED:
++ case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
++ {
++
++ MMU_CONTEXT *psMMUContext =
++ (MMU_CONTEXT *) pMMUHeap->
++ psMMUContext->psDevInfo->
++ pvMMUContextList;
++
++ while (psMMUContext) {
++
++ pui32PDEntry =
++ (u32 *) psMMUContext->
++ pvPDCpuVAddr;
++ pui32PDEntry += ui32PDIndex;
++
++ pui32PDEntry[i] =
++ (sDevPAddr.
++ uiAddr >>
++ SGX_MMU_PDE_ADDR_ALIGNSHIFT)
++ | pMMUHeap->
++ ui32PDEPageSizeCtrl |
++ SGX_MMU_PDE_VALID;
++
++ PDUMPMEM2
++ (PVRSRV_DEVICE_TYPE_SGX,
++ (void *)&pui32PDEntry[i],
++ sizeof(u32), 0, 0,
++ PDUMP_PD_UNIQUETAG,
++ PDUMP_PT_UNIQUETAG);
++
++ psMMUContext =
++ psMMUContext->psNext;
++ }
++ break;
++ }
++ case DEVICE_MEMORY_HEAP_PERCONTEXT:
++ case DEVICE_MEMORY_HEAP_KERNEL:
++ {
++
++ pui32PDEntry[i] =
++ (sDevPAddr.
++ uiAddr >>
++ SGX_MMU_PDE_ADDR_ALIGNSHIFT)
++ | pMMUHeap->
++ ui32PDEPageSizeCtrl |
++ SGX_MMU_PDE_VALID;
++
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX,
++ (void *)&pui32PDEntry[i],
++ sizeof(u32), 0, 0,
++ PDUMP_PD_UNIQUETAG,
++ PDUMP_PT_UNIQUETAG);
++ break;
++ }
++ default:
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "_DeferredAllocPagetables: ERROR invalid heap type"));
++ return 0;
++ }
++ }
++
++#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++
++ MMU_InvalidateDirectoryCache(pMMUHeap->psMMUContext->
++ psDevInfo);
++#endif
++ } else {
++
++ PVR_ASSERT(pui32PDEntry[i] != 0);
++ }
++ }
++
++#if defined(SGX_FEATURE_SYSTEM_CACHE)
++ MMU_InvalidateSystemLevelCache(pMMUHeap->psMMUContext->psDevInfo);
++#endif
++
++ return 1;
++}
++
++PVRSRV_ERROR
++MMU_Initialise(PVRSRV_DEVICE_NODE * psDeviceNode, MMU_CONTEXT ** ppsMMUContext,
++ IMG_DEV_PHYADDR * psPDDevPAddr)
++{
++ u32 *pui32Tmp;
++ u32 i;
++ IMG_CPU_VIRTADDR pvPDCpuVAddr;
++ IMG_DEV_PHYADDR sPDDevPAddr;
++ IMG_CPU_PHYADDR sCpuPAddr;
++ MMU_CONTEXT *psMMUContext;
++ void *hPDOSMemHandle;
++ SYS_DATA *psSysData;
++ PVRSRV_SGXDEV_INFO *psDevInfo;
++
++ PVR_DPF((PVR_DBG_MESSAGE, "MMU_Initialise"));
++
++ SysAcquireData(&psSysData);
++
++ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(MMU_CONTEXT),
++ (void **)&psMMUContext, NULL, "MMU Context");
++ if (psMMUContext == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "MMU_Initialise: ERROR call to OSAllocMem failed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ memset(psMMUContext, 0, sizeof(MMU_CONTEXT));
++
++ psDevInfo = (PVRSRV_SGXDEV_INFO *) psDeviceNode->pvDevice;
++ psMMUContext->psDevInfo = psDevInfo;
++
++ psMMUContext->psDeviceNode = psDeviceNode;
++
++ if (psDeviceNode->psLocalDevMemArena == NULL) {
++ if (OSAllocPages
++ (PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ SGX_MMU_PAGE_SIZE, SGX_MMU_PAGE_SIZE, &pvPDCpuVAddr,
++ &hPDOSMemHandle) != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "MMU_Initialise: ERROR call to OSAllocPages failed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if (pvPDCpuVAddr) {
++ sCpuPAddr = OSMapLinToCPUPhys(pvPDCpuVAddr);
++ } else {
++
++ sCpuPAddr = OSMemHandleToCpuPAddr(hPDOSMemHandle, 0);
++ }
++ sPDDevPAddr =
++ SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
++
++#if PAGE_TEST
++ PageTest(pvPDCpuVAddr, sPDDevPAddr);
++#endif
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++
++ if (!psDevInfo->pvMMUContextList) {
++
++ if (OSAllocPages
++ (PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ SGX_MMU_PAGE_SIZE, SGX_MMU_PAGE_SIZE,
++ &psDevInfo->pvDummyPTPageCpuVAddr,
++ &psDevInfo->hDummyPTPageOSMemHandle) !=
++ PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "MMU_Initialise: ERROR call to OSAllocPages failed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if (psDevInfo->pvDummyPTPageCpuVAddr) {
++ sCpuPAddr =
++ OSMapLinToCPUPhys(psDevInfo->
++ pvDummyPTPageCpuVAddr);
++ } else {
++
++ sCpuPAddr =
++ OSMemHandleToCpuPAddr(psDevInfo->
++ hDummyPTPageOSMemHandle,
++ 0);
++ }
++ psDevInfo->sDummyPTDevPAddr =
++ SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX,
++ sCpuPAddr);
++
++ if (OSAllocPages
++ (PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ SGX_MMU_PAGE_SIZE, SGX_MMU_PAGE_SIZE,
++ &psDevInfo->pvDummyDataPageCpuVAddr,
++ &psDevInfo->hDummyDataPageOSMemHandle) !=
++ PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "MMU_Initialise: ERROR call to OSAllocPages failed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if (psDevInfo->pvDummyDataPageCpuVAddr) {
++ sCpuPAddr =
++ OSMapLinToCPUPhys(psDevInfo->
++ pvDummyDataPageCpuVAddr);
++ } else {
++ sCpuPAddr =
++ OSMemHandleToCpuPAddr(psDevInfo->
++ hDummyDataPageOSMemHandle,
++ 0);
++ }
++ psDevInfo->sDummyDataDevPAddr =
++ SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX,
++ sCpuPAddr);
++ }
++#endif
++ } else {
++ IMG_SYS_PHYADDR sSysPAddr;
++
++ if (RA_Alloc(psDeviceNode->psLocalDevMemArena,
++ SGX_MMU_PAGE_SIZE,
++ NULL,
++ NULL,
++ 0,
++ SGX_MMU_PAGE_SIZE, 0, &(sSysPAddr.uiAddr)) != 1) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "MMU_Initialise: ERROR call to RA_Alloc failed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
++ sPDDevPAddr =
++ SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysPAddr);
++ pvPDCpuVAddr =
++ OSMapPhysToLin(sCpuPAddr, SGX_MMU_PAGE_SIZE,
++ PVRSRV_HAP_WRITECOMBINE |
++ PVRSRV_HAP_KERNEL_ONLY, &hPDOSMemHandle);
++ if (!pvPDCpuVAddr) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "MMU_Initialise: ERROR failed to map page tables"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++#if PAGE_TEST
++ PageTest(pvPDCpuVAddr, sPDDevPAddr);
++#endif
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++
++ if (!psDevInfo->pvMMUContextList) {
++
++ if (RA_Alloc(psDeviceNode->psLocalDevMemArena,
++ SGX_MMU_PAGE_SIZE,
++ NULL,
++ NULL,
++ 0,
++ SGX_MMU_PAGE_SIZE,
++ 0, &(sSysPAddr.uiAddr)) != 1) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "MMU_Initialise: ERROR call to RA_Alloc failed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
++ psDevInfo->sDummyPTDevPAddr =
++ SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX,
++ sSysPAddr);
++ psDevInfo->pvDummyPTPageCpuVAddr =
++ OSMapPhysToLin(sCpuPAddr, SGX_MMU_PAGE_SIZE,
++ PVRSRV_HAP_WRITECOMBINE |
++ PVRSRV_HAP_KERNEL_ONLY,
++ &psDevInfo->hDummyPTPageOSMemHandle);
++ if (!psDevInfo->pvDummyPTPageCpuVAddr) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "MMU_Initialise: ERROR failed to map page tables"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if (RA_Alloc(psDeviceNode->psLocalDevMemArena,
++ SGX_MMU_PAGE_SIZE,
++ NULL,
++ NULL,
++ 0,
++ SGX_MMU_PAGE_SIZE,
++ 0, &(sSysPAddr.uiAddr)) != 1) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "MMU_Initialise: ERROR call to RA_Alloc failed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
++ psDevInfo->sDummyDataDevPAddr =
++ SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX,
++ sSysPAddr);
++ psDevInfo->pvDummyDataPageCpuVAddr =
++ OSMapPhysToLin(sCpuPAddr, SGX_MMU_PAGE_SIZE,
++ PVRSRV_HAP_WRITECOMBINE |
++ PVRSRV_HAP_KERNEL_ONLY,
++ &psDevInfo->
++ hDummyDataPageOSMemHandle);
++ if (!psDevInfo->pvDummyDataPageCpuVAddr) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "MMU_Initialise: ERROR failed to map page tables"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ }
++#endif
++ }
++
++ PDUMPCOMMENT("Alloc page directory");
++#ifdef SUPPORT_SGX_MMU_BYPASS
++ EnableHostAccess(psMMUContext);
++#endif
++
++ PDUMPMALLOCPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, pvPDCpuVAddr,
++ SGX_MMU_PAGE_SIZE, PDUMP_PD_UNIQUETAG);
++
++ if (pvPDCpuVAddr) {
++ pui32Tmp = (u32 *) pvPDCpuVAddr;
++ } else {
++ PVR_DPF((PVR_DBG_ERROR,
++ "MMU_Initialise: pvPDCpuVAddr invalid"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++
++ for (i = 0; i < SGX_MMU_PD_SIZE; i++) {
++ pui32Tmp[i] =
++ (psDevInfo->sDummyPTDevPAddr.
++ uiAddr >> SGX_MMU_PDE_ADDR_ALIGNSHIFT)
++ | SGX_MMU_PDE_PAGE_SIZE_4K | SGX_MMU_PDE_VALID;
++ }
++
++ if (!psDevInfo->pvMMUContextList) {
++
++ pui32Tmp = (u32 *) psDevInfo->pvDummyPTPageCpuVAddr;
++ for (i = 0; i < SGX_MMU_PT_SIZE; i++) {
++ pui32Tmp[i] =
++ (psDevInfo->sDummyDataDevPAddr.
++ uiAddr >> SGX_MMU_PTE_ADDR_ALIGNSHIFT)
++ | SGX_MMU_PTE_VALID;
++ }
++
++ PDUMPCOMMENT("Dummy Page table contents");
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX,
++ psDevInfo->pvDummyPTPageCpuVAddr, SGX_MMU_PAGE_SIZE,
++ 0, 1, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++
++ pui32Tmp = (u32 *) psDevInfo->pvDummyDataPageCpuVAddr;
++ for (i = 0; i < (SGX_MMU_PAGE_SIZE / 4); i++) {
++ pui32Tmp[i] = DUMMY_DATA_PAGE_SIGNATURE;
++ }
++
++ PDUMPCOMMENT("Dummy Data Page contents");
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX,
++ psDevInfo->pvDummyDataPageCpuVAddr, SGX_MMU_PAGE_SIZE,
++ 0, 1, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++ }
++#else
++
++#if defined(INTEL_D3_P_CHANGES)
++ if (pui32Tmp) {
++#endif
++ for (i = 0; i < SGX_MMU_PD_SIZE; i++) {
++
++ pui32Tmp[i] = 0;
++ }
++#if defined(INTEL_D3_P_CHANGES)
++ }
++#endif
++#endif
++
++ PDUMPCOMMENT("Page directory contents");
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, pvPDCpuVAddr, SGX_MMU_PAGE_SIZE, 0, 1,
++ PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++
++#if defined(PDUMP)
++ if (PDumpSetMMUContext(PVRSRV_DEVICE_TYPE_SGX,
++ "SGXMEM",
++ &psMMUContext->ui32PDumpMMUContextID,
++ 2,
++ PDUMP_PT_UNIQUETAG, pvPDCpuVAddr) != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "MMU_Initialise: ERROR call to PDumpSetMMUContext failed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++#endif
++
++ psMMUContext->pvPDCpuVAddr = pvPDCpuVAddr;
++ psMMUContext->sPDDevPAddr = sPDDevPAddr;
++ psMMUContext->hPDOSMemHandle = hPDOSMemHandle;
++
++ *ppsMMUContext = psMMUContext;
++
++ *psPDDevPAddr = sPDDevPAddr;
++
++ psMMUContext->psNext = (MMU_CONTEXT *) psDevInfo->pvMMUContextList;
++ psDevInfo->pvMMUContextList = (void *)psMMUContext;
++
++#ifdef SUPPORT_SGX_MMU_BYPASS
++ DisableHostAccess(psMMUContext);
++#endif
++
++ return PVRSRV_OK;
++}
++
++void MMU_Finalise(MMU_CONTEXT * psMMUContext)
++{
++ u32 *pui32Tmp, i;
++ SYS_DATA *psSysData;
++ MMU_CONTEXT **ppsMMUContext;
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++ PVRSRV_SGXDEV_INFO *psDevInfo =
++ (PVRSRV_SGXDEV_INFO *) psMMUContext->psDevInfo;
++ MMU_CONTEXT *psMMUContextList =
++ (MMU_CONTEXT *) psDevInfo->pvMMUContextList;
++#endif
++
++ SysAcquireData(&psSysData);
++
++ PDUMPCLEARMMUCONTEXT(PVRSRV_DEVICE_TYPE_SGX, "SGXMEM",
++ psMMUContext->ui32PDumpMMUContextID, 2);
++
++ PDUMPCOMMENT("Free page directory");
++ PDUMPFREEPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, psMMUContext->pvPDCpuVAddr,
++ SGX_MMU_PAGE_SIZE, PDUMP_PT_UNIQUETAG);
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++ PDUMPFREEPAGETABLE(PVRSRV_DEVICE_TYPE_SGX,
++ psDevInfo->pvDummyPTPageCpuVAddr, SGX_MMU_PAGE_SIZE,
++ PDUMP_PT_UNIQUETAG);
++ PDUMPFREEPAGETABLE(PVRSRV_DEVICE_TYPE_SGX,
++ psDevInfo->pvDummyDataPageCpuVAddr,
++ SGX_MMU_PAGE_SIZE, PDUMP_PT_UNIQUETAG);
++#endif
++
++ pui32Tmp = (u32 *) psMMUContext->pvPDCpuVAddr;
++
++ for (i = 0; i < SGX_MMU_PD_SIZE; i++) {
++
++ pui32Tmp[i] = 0;
++ }
++
++ if (psMMUContext->psDeviceNode->psLocalDevMemArena == NULL) {
++ OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ SGX_MMU_PAGE_SIZE,
++ psMMUContext->pvPDCpuVAddr,
++ psMMUContext->hPDOSMemHandle);
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++
++ if (!psMMUContextList->psNext) {
++ OSFreePages(PVRSRV_HAP_WRITECOMBINE |
++ PVRSRV_HAP_KERNEL_ONLY, SGX_MMU_PAGE_SIZE,
++ psDevInfo->pvDummyPTPageCpuVAddr,
++ psDevInfo->hDummyPTPageOSMemHandle);
++ OSFreePages(PVRSRV_HAP_WRITECOMBINE |
++ PVRSRV_HAP_KERNEL_ONLY, SGX_MMU_PAGE_SIZE,
++ psDevInfo->pvDummyDataPageCpuVAddr,
++ psDevInfo->hDummyDataPageOSMemHandle);
++ }
++#endif
++ } else {
++ IMG_SYS_PHYADDR sSysPAddr;
++ IMG_CPU_PHYADDR sCpuPAddr;
++
++ sCpuPAddr = OSMapLinToCPUPhys(psMMUContext->pvPDCpuVAddr);
++ sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr);
++
++ OSUnMapPhysToLin(psMMUContext->pvPDCpuVAddr,
++ SGX_MMU_PAGE_SIZE,
++ PVRSRV_HAP_WRITECOMBINE |
++ PVRSRV_HAP_KERNEL_ONLY,
++ psMMUContext->hPDOSMemHandle);
++
++ RA_Free(psMMUContext->psDeviceNode->psLocalDevMemArena,
++ sSysPAddr.uiAddr, 0);
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++
++ if (!psMMUContextList->psNext) {
++
++ sCpuPAddr =
++ OSMapLinToCPUPhys(psDevInfo->pvDummyPTPageCpuVAddr);
++ sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr);
++
++ OSUnMapPhysToLin(psDevInfo->pvDummyPTPageCpuVAddr,
++ SGX_MMU_PAGE_SIZE,
++ PVRSRV_HAP_WRITECOMBINE |
++ PVRSRV_HAP_KERNEL_ONLY,
++ psDevInfo->hDummyPTPageOSMemHandle);
++
++ RA_Free(psMMUContext->psDeviceNode->psLocalDevMemArena,
++ sSysPAddr.uiAddr, 0);
++
++ sCpuPAddr =
++ OSMapLinToCPUPhys(psDevInfo->
++ pvDummyDataPageCpuVAddr);
++ sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr);
++
++ OSUnMapPhysToLin(psDevInfo->pvDummyDataPageCpuVAddr,
++ SGX_MMU_PAGE_SIZE,
++ PVRSRV_HAP_WRITECOMBINE |
++ PVRSRV_HAP_KERNEL_ONLY,
++ psDevInfo->hDummyDataPageOSMemHandle);
++
++ RA_Free(psMMUContext->psDeviceNode->psLocalDevMemArena,
++ sSysPAddr.uiAddr, 0);
++ }
++#endif
++ }
++
++ PVR_DPF((PVR_DBG_MESSAGE, "MMU_Finalise"));
++
++ ppsMMUContext =
++ (MMU_CONTEXT **) & psMMUContext->psDevInfo->pvMMUContextList;
++ while (*ppsMMUContext) {
++ if (*ppsMMUContext == psMMUContext) {
++
++ *ppsMMUContext = psMMUContext->psNext;
++ break;
++ }
++
++ ppsMMUContext = &((*ppsMMUContext)->psNext);
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(MMU_CONTEXT), psMMUContext,
++ NULL);
++
++}
++
++void MMU_InsertHeap(MMU_CONTEXT * psMMUContext, MMU_HEAP * psMMUHeap)
++{
++ u32 *pui32PDCpuVAddr = (u32 *) psMMUContext->pvPDCpuVAddr;
++ u32 *pui32KernelPDCpuVAddr =
++ (u32 *) psMMUHeap->psMMUContext->pvPDCpuVAddr;
++ u32 ui32PDEntry;
++#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++ int bInvalidateDirectoryCache = 0;
++#endif
++
++ pui32PDCpuVAddr +=
++ psMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> psMMUHeap->
++ ui32PDShift;
++ pui32KernelPDCpuVAddr +=
++ psMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> psMMUHeap->
++ ui32PDShift;
++
++ PDUMPCOMMENT("Page directory shared heap range copy");
++#ifdef SUPPORT_SGX_MMU_BYPASS
++ EnableHostAccess(psMMUContext);
++#endif
++
++ for (ui32PDEntry = 0; ui32PDEntry < psMMUHeap->ui32PageTableCount;
++ ui32PDEntry++) {
++#if !defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++
++ PVR_ASSERT(pui32PDCpuVAddr[ui32PDEntry] == 0);
++#endif
++
++ pui32PDCpuVAddr[ui32PDEntry] =
++ pui32KernelPDCpuVAddr[ui32PDEntry];
++ if (pui32PDCpuVAddr[ui32PDEntry]) {
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX,
++ (void *)&pui32PDCpuVAddr[ui32PDEntry],
++ sizeof(u32), 0, 0, PDUMP_PD_UNIQUETAG,
++ PDUMP_PT_UNIQUETAG);
++
++#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++ bInvalidateDirectoryCache = 1;
++#endif
++ }
++ }
++
++#ifdef SUPPORT_SGX_MMU_BYPASS
++ DisableHostAccess(psMMUContext);
++#endif
++
++#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++ if (bInvalidateDirectoryCache) {
++
++ MMU_InvalidateDirectoryCache(psMMUContext->psDevInfo);
++ }
++#endif
++}
++
++static void
++MMU_UnmapPagesAndFreePTs(MMU_HEAP * psMMUHeap,
++ IMG_DEV_VIRTADDR sDevVAddr,
++ u32 ui32PageCount, void *hUniqueTag)
++{
++ IMG_DEV_VIRTADDR sTmpDevVAddr;
++ u32 i;
++ u32 ui32PDIndex;
++ u32 ui32PTIndex;
++ u32 *pui32Tmp;
++ int bInvalidateDirectoryCache = 0;
++
++ sTmpDevVAddr = sDevVAddr;
++
++ for (i = 0; i < ui32PageCount; i++) {
++ MMU_PT_INFO **ppsPTInfoList;
++
++ ui32PDIndex = sTmpDevVAddr.uiAddr >> psMMUHeap->ui32PDShift;
++
++ ppsPTInfoList =
++ &psMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
++
++ {
++
++ ui32PTIndex =
++ (sTmpDevVAddr.uiAddr & psMMUHeap->
++ ui32PTMask) >> psMMUHeap->ui32PTShift;
++
++ if (!ppsPTInfoList[0]) {
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "MMU_UnmapPagesAndFreePTs: Invalid PT for alloc at VAddr:0x%08lX (VaddrIni:0x%08lX AllocPage:%u) PDIdx:%u PTIdx:%u",
++ sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr,
++ i, ui32PDIndex, ui32PTIndex));
++
++ sTmpDevVAddr.uiAddr +=
++ psMMUHeap->ui32DataPageSize;
++
++ continue;
++ }
++
++ pui32Tmp = (u32 *) ppsPTInfoList[0]->PTPageCpuVAddr;
++
++ if (!pui32Tmp) {
++ continue;
++ }
++
++ CheckPT(ppsPTInfoList[0]);
++
++ if (pui32Tmp[ui32PTIndex] & SGX_MMU_PTE_VALID) {
++ ppsPTInfoList[0]->ui32ValidPTECount--;
++ } else {
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "MMU_UnmapPagesAndFreePTs: Page is already invalid for alloc at VAddr:0x%08lX (VAddrIni:0x%08lX AllocPage:%u) PDIdx:%u PTIdx:%u",
++ sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr,
++ i, ui32PDIndex, ui32PTIndex));
++ }
++
++ PVR_ASSERT((s32) ppsPTInfoList[0]->ui32ValidPTECount >=
++ 0);
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++
++ pui32Tmp[ui32PTIndex] =
++ (psMMUHeap->psMMUContext->psDevInfo->
++ sDummyDataDevPAddr.
++ uiAddr >> SGX_MMU_PTE_ADDR_ALIGNSHIFT)
++ | SGX_MMU_PTE_VALID;
++#else
++
++ pui32Tmp[ui32PTIndex] = 0;
++#endif
++
++ CheckPT(ppsPTInfoList[0]);
++ }
++
++ if (ppsPTInfoList[0]
++ && ppsPTInfoList[0]->ui32ValidPTECount == 0) {
++ _DeferredFreePageTable(psMMUHeap,
++ ui32PDIndex -
++ psMMUHeap->ui32PDBaseIndex, 1);
++ bInvalidateDirectoryCache = 1;
++ }
++
++ sTmpDevVAddr.uiAddr += psMMUHeap->ui32DataPageSize;
++ }
++
++ if (bInvalidateDirectoryCache) {
++ MMU_InvalidateDirectoryCache(psMMUHeap->psMMUContext->
++ psDevInfo);
++ } else {
++ MMU_InvalidatePageTableCache(psMMUHeap->psMMUContext->
++ psDevInfo);
++ }
++
++#if defined(PDUMP)
++ MMU_PDumpPageTables(psMMUHeap,
++ sDevVAddr,
++ psMMUHeap->ui32DataPageSize * ui32PageCount,
++ 1, hUniqueTag);
++#endif
++}
++
++void MMU_FreePageTables(void *pvMMUHeap,
++ u32 ui32Start, u32 ui32End, void *hUniqueTag)
++{
++ MMU_HEAP *pMMUHeap = (MMU_HEAP *) pvMMUHeap;
++ IMG_DEV_VIRTADDR Start;
++
++ Start.uiAddr = ui32Start;
++
++ MMU_UnmapPagesAndFreePTs(pMMUHeap, Start,
++ (ui32End - ui32Start) >> pMMUHeap->ui32PTShift,
++ hUniqueTag);
++}
++
++MMU_HEAP *MMU_Create(MMU_CONTEXT * psMMUContext,
++ DEV_ARENA_DESCRIPTOR * psDevArena, RA_ARENA ** ppsVMArena)
++{
++ MMU_HEAP *pMMUHeap;
++ u32 ui32ScaleSize;
++
++ PVR_ASSERT(psDevArena != NULL);
++
++ if (psDevArena == NULL) {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_Create: invalid parameter"));
++ return NULL;
++ }
++
++ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(MMU_HEAP), (void **)&pMMUHeap, NULL, "MMU Heap");
++ if (pMMUHeap == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "MMU_Create: ERROR call to OSAllocMem failed"));
++ return NULL;
++ }
++
++ pMMUHeap->psMMUContext = psMMUContext;
++ pMMUHeap->psDevArena = psDevArena;
++
++ switch (pMMUHeap->psDevArena->ui32DataPageSize) {
++ case 0x1000:
++ ui32ScaleSize = 0;
++ pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_4K;
++ break;
++#if defined(SGX_FEATURE_VARIABLE_MMU_PAGE_SIZE)
++ case 0x4000:
++ ui32ScaleSize = 2;
++ pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_16K;
++ break;
++ case 0x10000:
++ ui32ScaleSize = 4;
++ pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_64K;
++ break;
++ case 0x40000:
++ ui32ScaleSize = 6;
++ pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_256K;
++ break;
++ case 0x100000:
++ ui32ScaleSize = 8;
++ pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_1M;
++ break;
++ case 0x400000:
++ ui32ScaleSize = 10;
++ pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_4M;
++ break;
++#endif
++ default:
++ PVR_DPF((PVR_DBG_ERROR, "MMU_Create: invalid data page size"));
++ goto ErrorFreeHeap;
++ }
++
++ pMMUHeap->ui32DataPageSize = psDevArena->ui32DataPageSize;
++ pMMUHeap->ui32DataPageBitWidth = SGX_MMU_PAGE_SHIFT + ui32ScaleSize;
++ pMMUHeap->ui32DataPageMask = pMMUHeap->ui32DataPageSize - 1;
++
++ pMMUHeap->ui32PTShift = pMMUHeap->ui32DataPageBitWidth;
++ pMMUHeap->ui32PTBitWidth = SGX_MMU_PT_SHIFT - ui32ScaleSize;
++ pMMUHeap->ui32PTMask =
++ SGX_MMU_PT_MASK & (SGX_MMU_PT_MASK << ui32ScaleSize);
++ pMMUHeap->ui32PTSize = (1UL << pMMUHeap->ui32PTBitWidth) * sizeof(u32);
++
++ if (pMMUHeap->ui32PTSize < 4 * sizeof(u32)) {
++ pMMUHeap->ui32PTSize = 4 * sizeof(u32);
++ }
++ pMMUHeap->ui32PTECount = pMMUHeap->ui32PTSize >> 2;
++
++ pMMUHeap->ui32PDShift =
++ pMMUHeap->ui32PTBitWidth + pMMUHeap->ui32PTShift;
++ pMMUHeap->ui32PDBitWidth =
++ SGX_FEATURE_ADDRESS_SPACE_SIZE - pMMUHeap->ui32PTBitWidth -
++ pMMUHeap->ui32DataPageBitWidth;
++ pMMUHeap->ui32PDMask =
++ SGX_MMU_PD_MASK & (SGX_MMU_PD_MASK >>
++ (32 - SGX_FEATURE_ADDRESS_SPACE_SIZE));
++
++ if (psDevArena->BaseDevVAddr.uiAddr >
++ (pMMUHeap->ui32DataPageMask | pMMUHeap->ui32PTMask)) {
++
++ PVR_ASSERT((psDevArena->BaseDevVAddr.uiAddr
++ & (pMMUHeap->ui32DataPageMask
++ | pMMUHeap->ui32PTMask)) == 0);
++ }
++
++ pMMUHeap->ui32PTETotal =
++ pMMUHeap->psDevArena->ui32Size >> pMMUHeap->ui32PTShift;
++
++ pMMUHeap->ui32PDBaseIndex =
++ (pMMUHeap->psDevArena->BaseDevVAddr.uiAddr & pMMUHeap->
++ ui32PDMask) >> pMMUHeap->ui32PDShift;
++
++ pMMUHeap->ui32PageTableCount =
++ (pMMUHeap->ui32PTETotal + pMMUHeap->ui32PTECount - 1)
++ >> pMMUHeap->ui32PTBitWidth;
++
++ pMMUHeap->psVMArena = RA_Create(psDevArena->pszName,
++ psDevArena->BaseDevVAddr.uiAddr,
++ psDevArena->ui32Size,
++ NULL,
++ pMMUHeap->ui32DataPageSize,
++ NULL,
++ NULL, MMU_FreePageTables, pMMUHeap);
++
++ if (pMMUHeap->psVMArena == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "MMU_Create: ERROR call to RA_Create failed"));
++ goto ErrorFreePagetables;
++ }
++
++ *ppsVMArena = pMMUHeap->psVMArena;
++
++ return pMMUHeap;
++
++ErrorFreePagetables:
++ _DeferredFreePageTables(pMMUHeap);
++
++ErrorFreeHeap:
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(MMU_HEAP), pMMUHeap, NULL);
++
++ return NULL;
++}
++
++void MMU_Delete(MMU_HEAP * pMMUHeap)
++{
++ if (pMMUHeap != NULL) {
++ PVR_DPF((PVR_DBG_MESSAGE, "MMU_Delete"));
++
++ if (pMMUHeap->psVMArena) {
++ RA_Delete(pMMUHeap->psVMArena);
++ }
++#ifdef SUPPORT_SGX_MMU_BYPASS
++ EnableHostAccess(pMMUHeap->psMMUContext);
++#endif
++ _DeferredFreePageTables(pMMUHeap);
++#ifdef SUPPORT_SGX_MMU_BYPASS
++ DisableHostAccess(pMMUHeap->psMMUContext);
++#endif
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(MMU_HEAP), pMMUHeap,
++ NULL);
++
++ }
++}
++
++int
++MMU_Alloc(MMU_HEAP * pMMUHeap,
++ u32 uSize,
++ u32 * pActualSize,
++ u32 uFlags, u32 uDevVAddrAlignment, IMG_DEV_VIRTADDR * psDevVAddr)
++{
++ int bStatus;
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "MMU_Alloc: uSize=0x%x, flags=0x%x, align=0x%x",
++ uSize, uFlags, uDevVAddrAlignment));
++
++ if ((uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) == 0) {
++ u32 uiAddr;
++
++ bStatus = RA_Alloc(pMMUHeap->psVMArena,
++ uSize,
++ pActualSize,
++ NULL, 0, uDevVAddrAlignment, 0, &uiAddr);
++ if (!bStatus) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "MMU_Alloc: RA_Alloc of VMArena failed"));
++ return bStatus;
++ }
++
++ psDevVAddr->uiAddr = IMG_CAST_TO_DEVVADDR_UINT(uiAddr);
++ }
++#ifdef SUPPORT_SGX_MMU_BYPASS
++ EnableHostAccess(pMMUHeap->psMMUContext);
++#endif
++
++ bStatus = _DeferredAllocPagetables(pMMUHeap, *psDevVAddr, uSize);
++
++#ifdef SUPPORT_SGX_MMU_BYPASS
++ DisableHostAccess(pMMUHeap->psMMUContext);
++#endif
++
++ if (!bStatus) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "MMU_Alloc: _DeferredAllocPagetables failed"));
++ if ((uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) == 0) {
++
++ RA_Free(pMMUHeap->psVMArena, psDevVAddr->uiAddr, 0);
++ }
++ }
++
++ return bStatus;
++}
++
++void MMU_Free(MMU_HEAP * pMMUHeap, IMG_DEV_VIRTADDR DevVAddr, u32 ui32Size)
++{
++ PVR_ASSERT(pMMUHeap != NULL);
++
++ if (pMMUHeap == NULL) {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_Free: invalid parameter"));
++ return;
++ }
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "MMU_Free: mmu=%08X, dev_vaddr=%08X", pMMUHeap,
++ DevVAddr.uiAddr));
++
++ if ((DevVAddr.uiAddr >= pMMUHeap->psDevArena->BaseDevVAddr.uiAddr) &&
++ (DevVAddr.uiAddr + ui32Size <=
++ pMMUHeap->psDevArena->BaseDevVAddr.uiAddr +
++ pMMUHeap->psDevArena->ui32Size)) {
++ RA_Free(pMMUHeap->psVMArena, DevVAddr.uiAddr, 1);
++ return;
++ }
++
++ PVR_DPF((PVR_DBG_ERROR,
++ "MMU_Free: Couldn't find DevVAddr %08X in a DevArena",
++ DevVAddr.uiAddr));
++}
++
++void MMU_Enable(MMU_HEAP * pMMUHeap)
++{
++
++}
++
++void MMU_Disable(MMU_HEAP * pMMUHeap)
++{
++
++}
++
++#if defined(PDUMP)
++static void
++MMU_PDumpPageTables(MMU_HEAP * pMMUHeap,
++ IMG_DEV_VIRTADDR DevVAddr,
++ u32 uSize, int bForUnmap, void *hUniqueTag)
++{
++ u32 ui32NumPTEntries;
++ u32 ui32PTIndex;
++ u32 *pui32PTEntry;
++
++ MMU_PT_INFO **ppsPTInfoList;
++ u32 ui32PDIndex;
++ u32 ui32PTDumpCount;
++
++ ui32NumPTEntries =
++ (uSize + pMMUHeap->ui32DataPageMask) >> pMMUHeap->ui32PTShift;
++
++ ui32PDIndex = DevVAddr.uiAddr >> pMMUHeap->ui32PDShift;
++
++ ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
++
++ ui32PTIndex =
++ (DevVAddr.uiAddr & pMMUHeap->ui32PTMask) >> pMMUHeap->ui32PTShift;
++
++ PDUMPCOMMENT("Page table mods (num entries == %08X) %s",
++ ui32NumPTEntries, bForUnmap ? "(for unmap)" : "");
++
++ while (ui32NumPTEntries > 0) {
++ MMU_PT_INFO *psPTInfo = *ppsPTInfoList++;
++
++ if (ui32NumPTEntries <= pMMUHeap->ui32PTECount - ui32PTIndex) {
++ ui32PTDumpCount = ui32NumPTEntries;
++ } else {
++ ui32PTDumpCount = pMMUHeap->ui32PTECount - ui32PTIndex;
++ }
++
++ if (psPTInfo) {
++ pui32PTEntry = (u32 *) psPTInfo->PTPageCpuVAddr;
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX,
++ (void *)&pui32PTEntry[ui32PTIndex],
++ ui32PTDumpCount * sizeof(u32), 0, 0,
++ PDUMP_PT_UNIQUETAG, hUniqueTag);
++ }
++
++ ui32NumPTEntries -= ui32PTDumpCount;
++
++ ui32PTIndex = 0;
++ }
++
++ PDUMPCOMMENT("Finished page table mods %s",
++ bForUnmap ? "(for unmap)" : "");
++}
++#endif
++
++static void
++MMU_MapPage(MMU_HEAP * pMMUHeap,
++ IMG_DEV_VIRTADDR DevVAddr,
++ IMG_DEV_PHYADDR DevPAddr, u32 ui32MemFlags)
++{
++ u32 ui32Index;
++ u32 *pui32Tmp;
++ u32 ui32MMUFlags = 0;
++ MMU_PT_INFO **ppsPTInfoList;
++
++ PVR_ASSERT((DevPAddr.uiAddr & pMMUHeap->ui32DataPageMask) == 0);
++
++ if (((PVRSRV_MEM_READ | PVRSRV_MEM_WRITE) & ui32MemFlags) ==
++ (PVRSRV_MEM_READ | PVRSRV_MEM_WRITE)) {
++
++ ui32MMUFlags = 0;
++ } else if (PVRSRV_MEM_READ & ui32MemFlags) {
++
++ ui32MMUFlags |= SGX_MMU_PTE_READONLY;
++ } else if (PVRSRV_MEM_WRITE & ui32MemFlags) {
++
++ ui32MMUFlags |= SGX_MMU_PTE_WRITEONLY;
++ }
++
++ if (PVRSRV_MEM_CACHE_CONSISTENT & ui32MemFlags) {
++ ui32MMUFlags |= SGX_MMU_PTE_CACHECONSISTENT;
++ }
++#if !defined(FIX_HW_BRN_25503)
++
++ if (PVRSRV_MEM_EDM_PROTECT & ui32MemFlags) {
++ ui32MMUFlags |= SGX_MMU_PTE_EDMPROTECT;
++ }
++#endif
++
++ ui32Index = DevVAddr.uiAddr >> pMMUHeap->ui32PDShift;
++
++ ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32Index];
++
++ CheckPT(ppsPTInfoList[0]);
++
++ ui32Index =
++ (DevVAddr.uiAddr & pMMUHeap->ui32PTMask) >> pMMUHeap->ui32PTShift;
++
++ pui32Tmp = (u32 *) ppsPTInfoList[0]->PTPageCpuVAddr;
++
++#if !defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++
++ if (pui32Tmp[ui32Index] & SGX_MMU_PTE_VALID) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "MMU_MapPage: Page is already valid for alloc at VAddr:0x%08lX PDIdx:%u PTIdx:%u",
++ DevVAddr.uiAddr,
++ DevVAddr.uiAddr >> pMMUHeap->ui32PDShift, ui32Index));
++ PVR_DPF((PVR_DBG_ERROR,
++ "MMU_MapPage: Page table entry value: 0x%08lX",
++ pui32Tmp[ui32Index]));
++ PVR_DPF((PVR_DBG_ERROR,
++ "MMU_MapPage: Physical page to map: 0x%08lX",
++ DevPAddr.uiAddr));
++ }
++
++ PVR_ASSERT((pui32Tmp[ui32Index] & SGX_MMU_PTE_VALID) == 0);
++#endif
++
++ ppsPTInfoList[0]->ui32ValidPTECount++;
++
++ pui32Tmp[ui32Index] = ((DevPAddr.uiAddr >> SGX_MMU_PTE_ADDR_ALIGNSHIFT)
++ & ((~pMMUHeap->ui32DataPageMask) >>
++ SGX_MMU_PTE_ADDR_ALIGNSHIFT))
++ | SGX_MMU_PTE_VALID | ui32MMUFlags;
++
++ CheckPT(ppsPTInfoList[0]);
++}
++
++void
++MMU_MapScatter(MMU_HEAP * pMMUHeap,
++ IMG_DEV_VIRTADDR DevVAddr,
++ IMG_SYS_PHYADDR * psSysAddr,
++ u32 uSize, u32 ui32MemFlags, void *hUniqueTag)
++{
++#if defined(PDUMP)
++ IMG_DEV_VIRTADDR MapBaseDevVAddr;
++#endif
++ u32 uCount, i;
++ IMG_DEV_PHYADDR DevPAddr;
++
++ PVR_ASSERT(pMMUHeap != NULL);
++
++#if defined(PDUMP)
++ MapBaseDevVAddr = DevVAddr;
++#endif
++
++ for (i = 0, uCount = 0; uCount < uSize;
++ i++, uCount += pMMUHeap->ui32DataPageSize) {
++ IMG_SYS_PHYADDR sSysAddr;
++
++ sSysAddr = psSysAddr[i];
++
++ PVR_ASSERT((sSysAddr.uiAddr & pMMUHeap->ui32DataPageMask) == 0);
++
++ DevPAddr =
++ SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysAddr);
++
++ MMU_MapPage(pMMUHeap, DevVAddr, DevPAddr, ui32MemFlags);
++ DevVAddr.uiAddr += pMMUHeap->ui32DataPageSize;
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "MMU_MapScatter: devVAddr=%08X, SysAddr=%08X, size=0x%x/0x%x",
++ DevVAddr.uiAddr, sSysAddr.uiAddr, uCount, uSize));
++ }
++
++#if defined(PDUMP)
++ MMU_PDumpPageTables(pMMUHeap, MapBaseDevVAddr, uSize, 0, hUniqueTag);
++#endif
++}
++
++void
++MMU_MapPages(MMU_HEAP * pMMUHeap,
++ IMG_DEV_VIRTADDR DevVAddr,
++ IMG_SYS_PHYADDR SysPAddr,
++ u32 uSize, u32 ui32MemFlags, void *hUniqueTag)
++{
++ IMG_DEV_PHYADDR DevPAddr;
++#if defined(PDUMP)
++ IMG_DEV_VIRTADDR MapBaseDevVAddr;
++#endif
++ u32 uCount;
++ u32 ui32VAdvance;
++ u32 ui32PAdvance;
++
++ PVR_ASSERT(pMMUHeap != NULL);
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "MMU_MapPages: mmu=%08X, devVAddr=%08X, SysPAddr=%08X, size=0x%x",
++ pMMUHeap, DevVAddr.uiAddr, SysPAddr.uiAddr, uSize));
++
++ ui32VAdvance = pMMUHeap->ui32DataPageSize;
++ ui32PAdvance = pMMUHeap->ui32DataPageSize;
++
++#if defined(PDUMP)
++ MapBaseDevVAddr = DevVAddr;
++#endif
++
++ DevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, SysPAddr);
++
++ PVR_ASSERT((DevPAddr.uiAddr & pMMUHeap->ui32DataPageMask) == 0);
++
++#if defined(FIX_HW_BRN_23281)
++ if (ui32MemFlags & PVRSRV_MEM_INTERLEAVED) {
++ ui32VAdvance *= 2;
++ }
++#endif
++
++ if (ui32MemFlags & PVRSRV_MEM_DUMMY) {
++ ui32PAdvance = 0;
++ }
++
++ for (uCount = 0; uCount < uSize; uCount += ui32VAdvance) {
++ MMU_MapPage(pMMUHeap, DevVAddr, DevPAddr, ui32MemFlags);
++ DevVAddr.uiAddr += ui32VAdvance;
++ DevPAddr.uiAddr += ui32PAdvance;
++ }
++
++#if defined(PDUMP)
++ MMU_PDumpPageTables(pMMUHeap, MapBaseDevVAddr, uSize, 0, hUniqueTag);
++#endif
++}
++
++void
++MMU_MapShadow(MMU_HEAP * pMMUHeap,
++ IMG_DEV_VIRTADDR MapBaseDevVAddr,
++ u32 uByteSize,
++ IMG_CPU_VIRTADDR CpuVAddr,
++ void *hOSMemHandle,
++ IMG_DEV_VIRTADDR * pDevVAddr, u32 ui32MemFlags, void *hUniqueTag)
++{
++ u32 i;
++ u32 uOffset = 0;
++ IMG_DEV_VIRTADDR MapDevVAddr;
++ u32 ui32VAdvance;
++ u32 ui32PAdvance;
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "MMU_MapShadow: %08X, 0x%x, %08X",
++ MapBaseDevVAddr.uiAddr, uByteSize, CpuVAddr));
++
++ ui32VAdvance = pMMUHeap->ui32DataPageSize;
++ ui32PAdvance = pMMUHeap->ui32DataPageSize;
++
++ PVR_ASSERT(((u32) CpuVAddr & (SGX_MMU_PAGE_SIZE - 1)) == 0);
++ PVR_ASSERT(((u32) uByteSize & pMMUHeap->ui32DataPageMask) == 0);
++ pDevVAddr->uiAddr = MapBaseDevVAddr.uiAddr;
++
++#if defined(FIX_HW_BRN_23281)
++ if (ui32MemFlags & PVRSRV_MEM_INTERLEAVED) {
++ ui32VAdvance *= 2;
++ }
++#endif
++
++ if (ui32MemFlags & PVRSRV_MEM_DUMMY) {
++ ui32PAdvance = 0;
++ }
++
++ MapDevVAddr = MapBaseDevVAddr;
++ for (i = 0; i < uByteSize; i += ui32VAdvance) {
++ IMG_CPU_PHYADDR CpuPAddr;
++ IMG_DEV_PHYADDR DevPAddr;
++
++ if (CpuVAddr) {
++ CpuPAddr =
++ OSMapLinToCPUPhys((void *)((u32) CpuVAddr +
++ uOffset));
++ } else {
++ CpuPAddr = OSMemHandleToCpuPAddr(hOSMemHandle, uOffset);
++ }
++ DevPAddr =
++ SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, CpuPAddr);
++
++ PVR_ASSERT((DevPAddr.uiAddr & pMMUHeap->ui32DataPageMask) == 0);
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "0x%x: CpuVAddr=%08X, CpuPAddr=%08X, DevVAddr=%08X, DevPAddr=%08X",
++ uOffset,
++ (u32) CpuVAddr + uOffset,
++ CpuPAddr.uiAddr, MapDevVAddr.uiAddr, DevPAddr.uiAddr));
++
++ MMU_MapPage(pMMUHeap, MapDevVAddr, DevPAddr, ui32MemFlags);
++
++ MapDevVAddr.uiAddr += ui32VAdvance;
++ uOffset += ui32PAdvance;
++ }
++
++#if defined(PDUMP)
++ MMU_PDumpPageTables(pMMUHeap, MapBaseDevVAddr, uByteSize, 0,
++ hUniqueTag);
++#endif
++}
++
++void
++MMU_UnmapPages(MMU_HEAP * psMMUHeap,
++ IMG_DEV_VIRTADDR sDevVAddr, u32 ui32PageCount, void *hUniqueTag)
++{
++ u32 uPageSize = psMMUHeap->ui32DataPageSize;
++ IMG_DEV_VIRTADDR sTmpDevVAddr;
++ u32 i;
++ u32 ui32PDIndex;
++ u32 ui32PTIndex;
++ u32 *pui32Tmp;
++
++ sTmpDevVAddr = sDevVAddr;
++
++ for (i = 0; i < ui32PageCount; i++) {
++ MMU_PT_INFO **ppsPTInfoList;
++
++ ui32PDIndex = sTmpDevVAddr.uiAddr >> psMMUHeap->ui32PDShift;
++
++ ppsPTInfoList =
++ &psMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
++
++ ui32PTIndex =
++ (sTmpDevVAddr.uiAddr & psMMUHeap->ui32PTMask) >> psMMUHeap->
++ ui32PTShift;
++
++ if (!ppsPTInfoList[0]) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "MMU_UnmapPages: ERROR Invalid PT for alloc at VAddr:0x%08lX (VaddrIni:0x%08lX AllocPage:%u) PDIdx:%u PTIdx:%u",
++ sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr, i,
++ ui32PDIndex, ui32PTIndex));
++
++ sTmpDevVAddr.uiAddr += uPageSize;
++
++ continue;
++ }
++
++ CheckPT(ppsPTInfoList[0]);
++
++ pui32Tmp = (u32 *) ppsPTInfoList[0]->PTPageCpuVAddr;
++
++ if (pui32Tmp[ui32PTIndex] & SGX_MMU_PTE_VALID) {
++ ppsPTInfoList[0]->ui32ValidPTECount--;
++ } else {
++ PVR_DPF((PVR_DBG_ERROR,
++ "MMU_UnmapPages: Page is already invalid for alloc at VAddr:0x%08lX (VAddrIni:0x%08lX AllocPage:%u) PDIdx:%u PTIdx:%u",
++ sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr, i,
++ ui32PDIndex, ui32PTIndex));
++ PVR_DPF((PVR_DBG_ERROR,
++ "MMU_UnmapPages: Page table entry value: 0x%08lX",
++ pui32Tmp[ui32PTIndex]));
++ }
++
++ PVR_ASSERT((s32) ppsPTInfoList[0]->ui32ValidPTECount >= 0);
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++
++ pui32Tmp[ui32PTIndex] =
++ (psMMUHeap->psMMUContext->psDevInfo->sDummyDataDevPAddr.
++ uiAddr >> SGX_MMU_PTE_ADDR_ALIGNSHIFT)
++ | SGX_MMU_PTE_VALID;
++#else
++
++ pui32Tmp[ui32PTIndex] = 0;
++#endif
++
++ CheckPT(ppsPTInfoList[0]);
++
++ sTmpDevVAddr.uiAddr += uPageSize;
++ }
++
++ MMU_InvalidatePageTableCache(psMMUHeap->psMMUContext->psDevInfo);
++
++#if defined(PDUMP)
++ MMU_PDumpPageTables(psMMUHeap, sDevVAddr, uPageSize * ui32PageCount, 1,
++ hUniqueTag);
++#endif
++}
++
++IMG_DEV_PHYADDR
++MMU_GetPhysPageAddr(MMU_HEAP * pMMUHeap, IMG_DEV_VIRTADDR sDevVPageAddr)
++{
++ u32 *pui32PageTable;
++ u32 ui32Index;
++ IMG_DEV_PHYADDR sDevPAddr;
++ MMU_PT_INFO **ppsPTInfoList;
++
++ ui32Index = sDevVPageAddr.uiAddr >> pMMUHeap->ui32PDShift;
++
++ ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32Index];
++ if (!ppsPTInfoList[0]) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "MMU_GetPhysPageAddr: Not mapped in at 0x%08x",
++ sDevVPageAddr.uiAddr));
++ sDevPAddr.uiAddr = 0;
++ return sDevPAddr;
++ }
++
++ ui32Index =
++ (sDevVPageAddr.uiAddr & pMMUHeap->ui32PTMask) >> pMMUHeap->
++ ui32PTShift;
++
++ pui32PageTable = (u32 *) ppsPTInfoList[0]->PTPageCpuVAddr;
++
++ sDevPAddr.uiAddr = pui32PageTable[ui32Index];
++
++ sDevPAddr.uiAddr &=
++ ~(pMMUHeap->ui32DataPageMask >> SGX_MMU_PTE_ADDR_ALIGNSHIFT);
++
++ sDevPAddr.uiAddr <<= SGX_MMU_PTE_ADDR_ALIGNSHIFT;
++
++ return sDevPAddr;
++}
++
++IMG_DEV_PHYADDR MMU_GetPDDevPAddr(MMU_CONTEXT * pMMUContext)
++{
++ return (pMMUContext->sPDDevPAddr);
++}
++
++PVRSRV_ERROR SGXGetPhysPageAddrKM(void *hDevMemHeap,
++ IMG_DEV_VIRTADDR sDevVAddr,
++ IMG_DEV_PHYADDR * pDevPAddr,
++ IMG_CPU_PHYADDR * pCpuPAddr)
++{
++ MMU_HEAP *pMMUHeap;
++ IMG_DEV_PHYADDR DevPAddr;
++
++ pMMUHeap = (MMU_HEAP *) BM_GetMMUHeap(hDevMemHeap);
++
++ DevPAddr = MMU_GetPhysPageAddr(pMMUHeap, sDevVAddr);
++ pCpuPAddr->uiAddr = DevPAddr.uiAddr;
++ pDevPAddr->uiAddr = DevPAddr.uiAddr;
++
++ return (pDevPAddr->uiAddr !=
++ 0) ? PVRSRV_OK : PVRSRV_ERROR_INVALID_PARAMS;
++}
++
++PVRSRV_ERROR SGXGetMMUPDAddrKM(void *hDevCookie,
++ void *hDevMemContext,
++ IMG_DEV_PHYADDR * psPDDevPAddr)
++{
++ if (!hDevCookie || !hDevMemContext || !psPDDevPAddr) {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ *psPDDevPAddr =
++ ((BM_CONTEXT *) hDevMemContext)->psMMUContext->sPDDevPAddr;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR MMU_BIFResetPDAlloc(PVRSRV_SGXDEV_INFO * psDevInfo)
++{
++ PVRSRV_ERROR eError;
++ SYS_DATA *psSysData;
++ RA_ARENA *psLocalDevMemArena;
++ void *hOSMemHandle = NULL;
++ unsigned char *pui8MemBlock = NULL;
++ IMG_SYS_PHYADDR sMemBlockSysPAddr;
++ IMG_CPU_PHYADDR sMemBlockCpuPAddr;
++
++ SysAcquireData(&psSysData);
++
++ psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
++
++ if (psLocalDevMemArena == NULL) {
++
++ eError =
++ OSAllocPages(PVRSRV_HAP_WRITECOMBINE |
++ PVRSRV_HAP_KERNEL_ONLY, 3 * SGX_MMU_PAGE_SIZE,
++ SGX_MMU_PAGE_SIZE, (void **)&pui8MemBlock,
++ &hOSMemHandle);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "MMU_BIFResetPDAlloc: ERROR call to OSAllocPages failed"));
++ return eError;
++ }
++
++ if (pui8MemBlock) {
++ sMemBlockCpuPAddr = OSMapLinToCPUPhys(pui8MemBlock);
++ } else {
++
++ sMemBlockCpuPAddr =
++ OSMemHandleToCpuPAddr(hOSMemHandle, 0);
++ }
++ } else {
++
++ if (RA_Alloc(psLocalDevMemArena,
++ 3 * SGX_MMU_PAGE_SIZE,
++ NULL,
++ NULL,
++ 0,
++ SGX_MMU_PAGE_SIZE,
++ 0, &(sMemBlockSysPAddr.uiAddr)) != 1) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "MMU_BIFResetPDAlloc: ERROR call to RA_Alloc failed"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ sMemBlockCpuPAddr = SysSysPAddrToCpuPAddr(sMemBlockSysPAddr);
++ pui8MemBlock = OSMapPhysToLin(sMemBlockCpuPAddr,
++ SGX_MMU_PAGE_SIZE * 3,
++ PVRSRV_HAP_WRITECOMBINE |
++ PVRSRV_HAP_KERNEL_ONLY,
++ &hOSMemHandle);
++ if (!pui8MemBlock) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "MMU_BIFResetPDAlloc: ERROR failed to map page tables"));
++ return PVRSRV_ERROR_BAD_MAPPING;
++ }
++ }
++
++ psDevInfo->hBIFResetPDOSMemHandle = hOSMemHandle;
++ psDevInfo->sBIFResetPDDevPAddr =
++ SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sMemBlockCpuPAddr);
++ psDevInfo->sBIFResetPTDevPAddr.uiAddr =
++ psDevInfo->sBIFResetPDDevPAddr.uiAddr + SGX_MMU_PAGE_SIZE;
++ psDevInfo->sBIFResetPageDevPAddr.uiAddr =
++ psDevInfo->sBIFResetPTDevPAddr.uiAddr + SGX_MMU_PAGE_SIZE;
++
++ psDevInfo->pui32BIFResetPD = (u32 *) pui8MemBlock;
++ psDevInfo->pui32BIFResetPT = (u32 *) (pui8MemBlock + SGX_MMU_PAGE_SIZE);
++
++ memset(psDevInfo->pui32BIFResetPD, 0, SGX_MMU_PAGE_SIZE);
++ memset(psDevInfo->pui32BIFResetPT, 0, SGX_MMU_PAGE_SIZE);
++
++ memset(pui8MemBlock + (2 * SGX_MMU_PAGE_SIZE), 0xDB, SGX_MMU_PAGE_SIZE);
++
++ return PVRSRV_OK;
++}
++
++void MMU_BIFResetPDFree(PVRSRV_SGXDEV_INFO * psDevInfo)
++{
++ SYS_DATA *psSysData;
++ RA_ARENA *psLocalDevMemArena;
++ IMG_SYS_PHYADDR sPDSysPAddr;
++
++ SysAcquireData(&psSysData);
++
++ psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
++
++ if (psLocalDevMemArena == NULL) {
++ OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ 3 * SGX_MMU_PAGE_SIZE,
++ psDevInfo->pui32BIFResetPD,
++ psDevInfo->hBIFResetPDOSMemHandle);
++ } else {
++ OSUnMapPhysToLin(psDevInfo->pui32BIFResetPD,
++ 3 * SGX_MMU_PAGE_SIZE,
++ PVRSRV_HAP_WRITECOMBINE |
++ PVRSRV_HAP_KERNEL_ONLY,
++ psDevInfo->hBIFResetPDOSMemHandle);
++
++ sPDSysPAddr =
++ SysDevPAddrToSysPAddr(PVRSRV_DEVICE_TYPE_SGX,
++ psDevInfo->sBIFResetPDDevPAddr);
++ RA_Free(psLocalDevMemArena, sPDSysPAddr.uiAddr, 0);
++ }
++}
++
++#if defined(FIX_HW_BRN_22997) && defined(FIX_HW_BRN_23030) && defined(SGX_FEATURE_HOST_PORT)
++PVRSRV_ERROR WorkaroundBRN22997Alloc(PVRSRV_SGXDEV_INFO * psDevInfo)
++{
++ PVRSRV_ERROR eError;
++ SYS_DATA *psSysData;
++ RA_ARENA *psLocalDevMemArena;
++ void *hPTPageOSMemHandle = NULL;
++ void *hPDPageOSMemHandle = NULL;
++ u32 *pui32PD = NULL;
++ u32 *pui32PT = NULL;
++ IMG_CPU_PHYADDR sCpuPAddr;
++ IMG_DEV_PHYADDR sPTDevPAddr;
++ IMG_DEV_PHYADDR sPDDevPAddr;
++
++ SysAcquireData(&psSysData);
++
++ psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
++
++ if (psLocalDevMemArena == NULL) {
++
++ eError =
++ OSAllocPages(PVRSRV_HAP_WRITECOMBINE |
++ PVRSRV_HAP_KERNEL_ONLY, SGX_MMU_PAGE_SIZE,
++ SGX_MMU_PAGE_SIZE, (void **)&pui32PT,
++ &hPTPageOSMemHandle);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "WorkaroundBRN22997: ERROR call to OSAllocPages failed"));
++ return eError;
++ }
++
++ eError =
++ OSAllocPages(PVRSRV_HAP_WRITECOMBINE |
++ PVRSRV_HAP_KERNEL_ONLY, SGX_MMU_PAGE_SIZE,
++ SGX_MMU_PAGE_SIZE, (void **)&pui32PD,
++ &hPDPageOSMemHandle);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "WorkaroundBRN22997: ERROR call to OSAllocPages failed"));
++ return eError;
++ }
++
++ if (pui32PT) {
++ sCpuPAddr = OSMapLinToCPUPhys(pui32PT);
++ } else {
++
++ sCpuPAddr =
++ OSMemHandleToCpuPAddr(hPTPageOSMemHandle, 0);
++ }
++ sPTDevPAddr =
++ SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
++
++ if (pui32PD) {
++ sCpuPAddr = OSMapLinToCPUPhys(pui32PD);
++ } else {
++
++ sCpuPAddr =
++ OSMemHandleToCpuPAddr(hPDPageOSMemHandle, 0);
++ }
++ sPDDevPAddr =
++ SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
++
++ } else {
++
++ if (RA_Alloc(psLocalDevMemArena,
++ SGX_MMU_PAGE_SIZE * 2,
++ NULL,
++ NULL,
++ 0,
++ SGX_MMU_PAGE_SIZE,
++ 0, &(psDevInfo->sBRN22997SysPAddr.uiAddr)) != 1) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "WorkaroundBRN22997: ERROR call to RA_Alloc failed"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ sCpuPAddr = SysSysPAddrToCpuPAddr(psDevInfo->sBRN22997SysPAddr);
++ pui32PT = OSMapPhysToLin(sCpuPAddr,
++ SGX_MMU_PAGE_SIZE * 2,
++ PVRSRV_HAP_WRITECOMBINE |
++ PVRSRV_HAP_KERNEL_ONLY,
++ &hPTPageOSMemHandle);
++ if (!pui32PT) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "WorkaroundBRN22997: ERROR failed to map page tables"));
++ return PVRSRV_ERROR_BAD_MAPPING;
++ }
++
++ sPTDevPAddr =
++ SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
++
++ pui32PD = pui32PT + 1024;
++ sPDDevPAddr.uiAddr = sPTDevPAddr.uiAddr + 4096;
++ }
++
++ memset(pui32PD, 0, SGX_MMU_PAGE_SIZE);
++ memset(pui32PT, 0, SGX_MMU_PAGE_SIZE);
++
++ PDUMPMALLOCPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, pui32PD, SGX_MMU_PAGE_SIZE,
++ PDUMP_PD_UNIQUETAG);
++ PDUMPMALLOCPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, pui32PT, SGX_MMU_PAGE_SIZE,
++ PDUMP_PT_UNIQUETAG);
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, pui32PD, SGX_MMU_PAGE_SIZE, 0, 1,
++ PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, pui32PT, SGX_MMU_PAGE_SIZE, 0, 1,
++ PDUMP_PT_UNIQUETAG, PDUMP_PD_UNIQUETAG);
++
++ psDevInfo->hBRN22997PTPageOSMemHandle = hPTPageOSMemHandle;
++ psDevInfo->hBRN22997PDPageOSMemHandle = hPDPageOSMemHandle;
++ psDevInfo->sBRN22997PTDevPAddr = sPTDevPAddr;
++ psDevInfo->sBRN22997PDDevPAddr = sPDDevPAddr;
++ psDevInfo->pui32BRN22997PD = pui32PD;
++ psDevInfo->pui32BRN22997PT = pui32PT;
++
++ return PVRSRV_OK;
++}
++
++void WorkaroundBRN22997ReadHostPort(PVRSRV_SGXDEV_INFO * psDevInfo)
++{
++ u32 *pui32PD = psDevInfo->pui32BRN22997PD;
++ u32 *pui32PT = psDevInfo->pui32BRN22997PT;
++ u32 ui32PDIndex;
++ u32 ui32PTIndex;
++ IMG_DEV_VIRTADDR sDevVAddr;
++ volatile u32 *pui32HostPort;
++ u32 ui32BIFCtrl;
++
++ pui32HostPort =
++ (volatile u32 *)(((u8 *) psDevInfo->pvHostPortBaseKM) +
++ SYS_SGX_HOSTPORT_BRN23030_OFFSET);
++
++ sDevVAddr.uiAddr =
++ SYS_SGX_HOSTPORT_BASE_DEVVADDR + SYS_SGX_HOSTPORT_BRN23030_OFFSET;
++
++ ui32PDIndex =
++ (sDevVAddr.uiAddr & SGX_MMU_PD_MASK) >> (SGX_MMU_PAGE_SHIFT +
++ SGX_MMU_PT_SHIFT);
++ ui32PTIndex =
++ (sDevVAddr.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
++
++ pui32PD[ui32PDIndex] =
++ (psDevInfo->sBRN22997PTDevPAddr.
++ uiAddr >> SGX_MMU_PDE_ADDR_ALIGNSHIFT)
++ | SGX_MMU_PDE_VALID;
++
++ pui32PT[ui32PTIndex] =
++ (psDevInfo->sBRN22997PTDevPAddr.
++ uiAddr >> SGX_MMU_PTE_ADDR_ALIGNSHIFT)
++ | SGX_MMU_PTE_VALID;
++
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, pui32PD, SGX_MMU_PAGE_SIZE, 0, 1,
++ PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, pui32PT, SGX_MMU_PAGE_SIZE, 0, 1,
++ PDUMP_PT_UNIQUETAG, PDUMP_PD_UNIQUETAG);
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_DIR_LIST_BASE0,
++ psDevInfo->sBRN22997PDDevPAddr.uiAddr);
++ PDUMPPDREG(EUR_CR_BIF_DIR_LIST_BASE0,
++ psDevInfo->sBRN22997PDDevPAddr.uiAddr, PDUMP_PD_UNIQUETAG);
++
++ ui32BIFCtrl = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL);
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL,
++ ui32BIFCtrl | EUR_CR_BIF_CTRL_INVALDC_MASK);
++ PDUMPREG(EUR_CR_BIF_CTRL, ui32BIFCtrl | EUR_CR_BIF_CTRL_INVALDC_MASK);
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32BIFCtrl);
++ PDUMPREG(EUR_CR_BIF_CTRL, ui32BIFCtrl);
++
++ if (pui32HostPort) {
++
++ u32 ui32Tmp;
++ ui32Tmp = *pui32HostPort;
++ } else {
++ PVR_DPF((PVR_DBG_ERROR,
++ "Host Port not present for BRN22997 workaround"));
++ }
++
++ PDUMPCOMMENT("RDW :SGXMEM:v4:%08lX\r\n", sDevVAddr.uiAddr);
++
++ PDUMPCOMMENT("SAB :SGXMEM:v4:%08lX 4 0 hostport.bin", sDevVAddr.uiAddr);
++
++ pui32PD[ui32PDIndex] = 0;
++ pui32PT[ui32PTIndex] = 0;
++
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, pui32PD, SGX_MMU_PAGE_SIZE, 0, 1,
++ PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, pui32PT, SGX_MMU_PAGE_SIZE, 0, 1,
++ PDUMP_PT_UNIQUETAG, PDUMP_PD_UNIQUETAG);
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL,
++ ui32BIFCtrl | EUR_CR_BIF_CTRL_INVALDC_MASK);
++ PDUMPREG(EUR_CR_BIF_CTRL, ui32BIFCtrl | EUR_CR_BIF_CTRL_INVALDC_MASK);
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32BIFCtrl);
++ PDUMPREG(EUR_CR_BIF_CTRL, ui32BIFCtrl);
++}
++
++void WorkaroundBRN22997Free(PVRSRV_SGXDEV_INFO * psDevInfo)
++{
++ SYS_DATA *psSysData;
++ RA_ARENA *psLocalDevMemArena;
++
++ SysAcquireData(&psSysData);
++
++ psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
++
++ PDUMPFREEPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, psDevInfo->pui32BRN22997PD,
++ SGX_MMU_PAGE_SIZE, PDUMP_PD_UNIQUETAG);
++ PDUMPFREEPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, psDevInfo->pui32BRN22997PT,
++ SGX_MMU_PAGE_SIZE, PDUMP_PT_UNIQUETAG);
++
++ if (psLocalDevMemArena == NULL) {
++ if (psDevInfo->pui32BRN22997PD != NULL) {
++ OSFreePages(PVRSRV_HAP_WRITECOMBINE |
++ PVRSRV_HAP_KERNEL_ONLY, SGX_MMU_PAGE_SIZE,
++ psDevInfo->pui32BRN22997PD,
++ psDevInfo->hBRN22997PDPageOSMemHandle);
++ }
++
++ if (psDevInfo->pui32BRN22997PT != NULL) {
++ OSFreePages(PVRSRV_HAP_WRITECOMBINE |
++ PVRSRV_HAP_KERNEL_ONLY, SGX_MMU_PAGE_SIZE,
++ psDevInfo->pui32BRN22997PT,
++ psDevInfo->hBRN22997PTPageOSMemHandle);
++ }
++ } else {
++ if (psDevInfo->pui32BRN22997PT != NULL) {
++ OSUnMapPhysToLin(psDevInfo->pui32BRN22997PT,
++ SGX_MMU_PAGE_SIZE * 2,
++ PVRSRV_HAP_WRITECOMBINE |
++ PVRSRV_HAP_KERNEL_ONLY,
++ psDevInfo->hBRN22997PTPageOSMemHandle);
++
++ RA_Free(psLocalDevMemArena,
++ psDevInfo->sBRN22997SysPAddr.uiAddr, 0);
++ }
++ }
++}
++#endif
++
++#if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE)
++PVRSRV_ERROR MMU_MapExtSystemCacheRegs(PVRSRV_DEVICE_NODE * psDeviceNode)
++{
++ PVRSRV_ERROR eError;
++ SYS_DATA *psSysData;
++ RA_ARENA *psLocalDevMemArena;
++ void *hPTPageOSMemHandle = NULL;
++ u32 *pui32PD;
++ u32 *pui32PT = NULL;
++ IMG_CPU_PHYADDR sCpuPAddr;
++ IMG_DEV_PHYADDR sPTDevPAddr;
++ PVRSRV_SGXDEV_INFO *psDevInfo;
++ u32 ui32PDIndex;
++ u32 ui32PTIndex;
++
++ psDevInfo = (PVRSRV_SGXDEV_INFO *) psDeviceNode->pvDevice;
++ pui32PD =
++ (u32 *) psDeviceNode->sDevMemoryInfo.pBMKernelContext->
++ psMMUContext->pvPDCpuVAddr;
++
++ SysAcquireData(&psSysData);
++
++ psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
++
++ if (psLocalDevMemArena == NULL) {
++
++ eError =
++ OSAllocPages(PVRSRV_HAP_WRITECOMBINE |
++ PVRSRV_HAP_KERNEL_ONLY, SGX_MMU_PAGE_SIZE,
++ SGX_MMU_PAGE_SIZE, (void **)&pui32PT,
++ &hPTPageOSMemHandle);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "MMU_MapExtSystemCacheRegs: ERROR call to OSAllocPages failed"));
++ return eError;
++ }
++
++ if (pui32PT) {
++ sCpuPAddr = OSMapLinToCPUPhys(pui32PT);
++ } else {
++
++ sCpuPAddr =
++ OSMemHandleToCpuPAddr(hPTPageOSMemHandle, 0);
++ }
++ sPTDevPAddr =
++ SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
++ } else {
++ IMG_SYS_PHYADDR sSysPAddr;
++
++ if (RA_Alloc(psLocalDevMemArena,
++ SGX_MMU_PAGE_SIZE,
++ NULL,
++ NULL,
++ 0,
++ SGX_MMU_PAGE_SIZE, 0, &(sSysPAddr.uiAddr)) != 1) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "MMU_MapExtSystemCacheRegs: ERROR call to RA_Alloc failed"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
++ pui32PT = OSMapPhysToLin(sCpuPAddr,
++ SGX_MMU_PAGE_SIZE,
++ PVRSRV_HAP_WRITECOMBINE |
++ PVRSRV_HAP_KERNEL_ONLY,
++ &hPTPageOSMemHandle);
++ if (!pui32PT) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "MMU_MapExtSystemCacheRegs: ERROR failed to map page tables"));
++ return PVRSRV_ERROR_BAD_MAPPING;
++ }
++
++ sPTDevPAddr =
++ SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
++
++ psDevInfo->sExtSystemCacheRegsPTSysPAddr = sSysPAddr;
++ }
++
++ memset(pui32PT, 0, SGX_MMU_PAGE_SIZE);
++
++ ui32PDIndex =
++ (SGX_EXT_SYSTEM_CACHE_REGS_DEVVADDR_BASE & SGX_MMU_PD_MASK) >>
++ (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
++ ui32PTIndex =
++ (SGX_EXT_SYSTEM_CACHE_REGS_DEVVADDR_BASE & SGX_MMU_PT_MASK) >>
++ SGX_MMU_PAGE_SHIFT;
++
++ pui32PD[ui32PDIndex] =
++ (sPTDevPAddr.uiAddr >> SGX_MMU_PDE_ADDR_ALIGNSHIFT)
++ | SGX_MMU_PDE_VALID;
++
++ pui32PT[ui32PTIndex] =
++ (psDevInfo->sExtSysCacheRegsDevPBase.
++ uiAddr >> SGX_MMU_PTE_ADDR_ALIGNSHIFT)
++ | SGX_MMU_PTE_VALID;
++
++ PDUMPMALLOCPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, pui32PT, SGX_MMU_PAGE_SIZE,
++ PDUMP_PT_UNIQUETAG);
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, pui32PD, SGX_MMU_PAGE_SIZE, 0, 1,
++ PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, pui32PT, SGX_MMU_PAGE_SIZE, 0, 1,
++ PDUMP_PT_UNIQUETAG, PDUMP_PD_UNIQUETAG);
++
++ psDevInfo->pui32ExtSystemCacheRegsPT = pui32PT;
++ psDevInfo->hExtSystemCacheRegsPTPageOSMemHandle = hPTPageOSMemHandle;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR MMU_UnmapExtSystemCacheRegs(PVRSRV_DEVICE_NODE * psDeviceNode)
++{
++ SYS_DATA *psSysData;
++ RA_ARENA *psLocalDevMemArena;
++ PVRSRV_SGXDEV_INFO *psDevInfo;
++ u32 ui32PDIndex;
++ u32 *pui32PD;
++
++ psDevInfo = (PVRSRV_SGXDEV_INFO *) psDeviceNode->pvDevice;
++ pui32PD =
++ (u32 *) psDeviceNode->sDevMemoryInfo.pBMKernelContext->
++ psMMUContext->pvPDCpuVAddr;
++
++ SysAcquireData(&psSysData);
++
++ psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
++
++ ui32PDIndex =
++ (SGX_EXT_SYSTEM_CACHE_REGS_DEVVADDR_BASE & SGX_MMU_PD_MASK) >>
++ (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
++ pui32PD[ui32PDIndex] = 0;
++
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, pui32PD, SGX_MMU_PAGE_SIZE, 0, 1,
++ PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++ PDUMPFREEPAGETABLE(PVRSRV_DEVICE_TYPE_SGX,
++ psDevInfo->pui32ExtSystemCacheRegsPT,
++ SGX_MMU_PAGE_SIZE, PDUMP_PT_UNIQUETAG);
++
++ if (psLocalDevMemArena == NULL) {
++ if (psDevInfo->pui32ExtSystemCacheRegsPT != NULL) {
++ OSFreePages(PVRSRV_HAP_WRITECOMBINE |
++ PVRSRV_HAP_KERNEL_ONLY, SGX_MMU_PAGE_SIZE,
++ psDevInfo->pui32ExtSystemCacheRegsPT,
++ psDevInfo->
++ hExtSystemCacheRegsPTPageOSMemHandle);
++ }
++ } else {
++ if (psDevInfo->pui32ExtSystemCacheRegsPT != NULL) {
++ OSUnMapPhysToLin(psDevInfo->pui32ExtSystemCacheRegsPT,
++ SGX_MMU_PAGE_SIZE,
++ PVRSRV_HAP_WRITECOMBINE |
++ PVRSRV_HAP_KERNEL_ONLY,
++ psDevInfo->
++ hExtSystemCacheRegsPTPageOSMemHandle);
++
++ RA_Free(psLocalDevMemArena,
++ psDevInfo->sExtSystemCacheRegsPTSysPAddr.uiAddr,
++ 0);
++ }
++ }
++
++ return PVRSRV_OK;
++}
++#endif
++
++#if PAGE_TEST
++static void PageTest(void *pMem, IMG_DEV_PHYADDR sDevPAddr)
++{
++ volatile u32 ui32WriteData;
++ volatile u32 ui32ReadData;
++ volatile u32 *pMem32 = (volatile u32 *)pMem;
++ int n;
++ int bOK = 1;
++
++ ui32WriteData = 0xffffffff;
++
++ for (n = 0; n < 1024; n++) {
++ pMem32[n] = ui32WriteData;
++ ui32ReadData = pMem32[n];
++
++ if (ui32WriteData != ui32ReadData) {
++
++ PVR_DPF((PVR_DBG_ERROR,
++ "Error - memory page test failed at device phys address 0x%08X",
++ sDevPAddr.uiAddr + (n << 2)));
++ PVR_DBG_BREAK;
++ bOK = 0;
++ }
++ }
++
++ ui32WriteData = 0;
++
++ for (n = 0; n < 1024; n++) {
++ pMem32[n] = ui32WriteData;
++ ui32ReadData = pMem32[n];
++
++ if (ui32WriteData != ui32ReadData) {
++
++ PVR_DPF((PVR_DBG_ERROR,
++ "Error - memory page test failed at device phys address 0x%08X",
++ sDevPAddr.uiAddr + (n << 2)));
++ PVR_DBG_BREAK;
++ bOK = 0;
++ }
++ }
++
++ if (bOK) {
++ PVR_DPF((PVR_DBG_VERBOSE, "MMU Page 0x%08X is OK",
++ sDevPAddr.uiAddr));
++ } else {
++ PVR_DPF((PVR_DBG_VERBOSE, "MMU Page 0x%08X *** FAILED ***",
++ sDevPAddr.uiAddr));
++ }
++}
++#endif
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/devices/sgx/mmu.h
+@@ -0,0 +1,139 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _MMU_H_
++#define _MMU_H_
++
++#include "sgxinfokm.h"
++
++PVRSRV_ERROR
++MMU_Initialise (PVRSRV_DEVICE_NODE *psDeviceNode, MMU_CONTEXT **ppsMMUContext, IMG_DEV_PHYADDR *psPDDevPAddr);
++
++void
++MMU_Finalise (MMU_CONTEXT *psMMUContext);
++
++
++void
++MMU_InsertHeap(MMU_CONTEXT *psMMUContext, MMU_HEAP *psMMUHeap);
++
++MMU_HEAP *
++MMU_Create (MMU_CONTEXT *psMMUContext,
++ DEV_ARENA_DESCRIPTOR *psDevArena,
++ RA_ARENA **ppsVMArena);
++
++void
++MMU_Delete (MMU_HEAP *pMMU);
++
++int
++MMU_Alloc (MMU_HEAP *pMMU,
++ u32 uSize,
++ u32 *pActualSize,
++ u32 uFlags,
++ u32 uDevVAddrAlignment,
++ IMG_DEV_VIRTADDR *pDevVAddr);
++
++void
++MMU_Free (MMU_HEAP *pMMU,
++ IMG_DEV_VIRTADDR DevVAddr,
++ u32 ui32Size);
++
++void
++MMU_Enable (MMU_HEAP *pMMU);
++
++void
++MMU_Disable (MMU_HEAP *pMMU);
++
++void
++MMU_MapPages (MMU_HEAP *pMMU,
++ IMG_DEV_VIRTADDR devVAddr,
++ IMG_SYS_PHYADDR SysPAddr,
++ u32 uSize,
++ u32 ui32MemFlags,
++ void * hUniqueTag);
++
++void
++MMU_MapShadow (MMU_HEAP * pMMU,
++ IMG_DEV_VIRTADDR MapBaseDevVAddr,
++ u32 uSize,
++ IMG_CPU_VIRTADDR CpuVAddr,
++ void * hOSMemHandle,
++ IMG_DEV_VIRTADDR * pDevVAddr,
++ u32 ui32MemFlags,
++ void * hUniqueTag);
++
++void
++MMU_UnmapPages (MMU_HEAP *pMMU,
++ IMG_DEV_VIRTADDR dev_vaddr,
++ u32 ui32PageCount,
++ void * hUniqueTag);
++
++void
++MMU_MapScatter (MMU_HEAP *pMMU,
++ IMG_DEV_VIRTADDR DevVAddr,
++ IMG_SYS_PHYADDR *psSysAddr,
++ u32 uSize,
++ u32 ui32MemFlags,
++ void * hUniqueTag);
++
++
++IMG_DEV_PHYADDR
++MMU_GetPhysPageAddr(MMU_HEAP *pMMUHeap, IMG_DEV_VIRTADDR sDevVPageAddr);
++
++
++IMG_DEV_PHYADDR
++MMU_GetPDDevPAddr(MMU_CONTEXT *pMMUContext);
++
++
++#ifdef SUPPORT_SGX_MMU_BYPASS
++void
++EnableHostAccess (MMU_CONTEXT *psMMUContext);
++
++
++void
++DisableHostAccess (MMU_CONTEXT *psMMUContext);
++#endif
++
++void MMU_InvalidateDirectoryCache(PVRSRV_SGXDEV_INFO *psDevInfo);
++
++PVRSRV_ERROR MMU_BIFResetPDAlloc(PVRSRV_SGXDEV_INFO *psDevInfo);
++
++void MMU_BIFResetPDFree(PVRSRV_SGXDEV_INFO *psDevInfo);
++
++#if defined(FIX_HW_BRN_22997) && defined(FIX_HW_BRN_23030) && defined(SGX_FEATURE_HOST_PORT)
++PVRSRV_ERROR WorkaroundBRN22997Alloc(PVRSRV_SGXDEV_INFO *psDevInfo);
++
++void WorkaroundBRN22997ReadHostPort(PVRSRV_SGXDEV_INFO *psDevInfo);
++
++void WorkaroundBRN22997Free(PVRSRV_SGXDEV_INFO *psDevInfo);
++#endif
++
++#if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE)
++PVRSRV_ERROR MMU_MapExtSystemCacheRegs(PVRSRV_DEVICE_NODE *psDeviceNode);
++
++PVRSRV_ERROR MMU_UnmapExtSystemCacheRegs(PVRSRV_DEVICE_NODE *psDeviceNode);
++#endif
++
++#endif
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/devices/sgx/pb.c
+@@ -0,0 +1,420 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <stddef.h>
++
++#include "services_headers.h"
++#include "sgxapi_km.h"
++#include "sgxinfo.h"
++#include "sgxinfokm.h"
++#include "pvr_bridge_km.h"
++#include "pdump_km.h"
++#include "sgxutils.h"
++
++#ifndef __linux__
++#pragma message("TODO: Review use of OS_PAGEABLE vs OS_NON_PAGEABLE")
++#endif
++
++#include "lists.h"
++
++static IMPLEMENT_LIST_INSERT(PVRSRV_STUB_PBDESC)
++static IMPLEMENT_LIST_REMOVE(PVRSRV_STUB_PBDESC)
++
++static PRESMAN_ITEM psResItemCreateSharedPB = NULL;
++static PVRSRV_PER_PROCESS_DATA *psPerProcCreateSharedPB = NULL;
++
++static PVRSRV_ERROR SGXCleanupSharedPBDescCallback(void *pvParam,
++ u32 ui32Param);
++static PVRSRV_ERROR SGXCleanupSharedPBDescCreateLockCallback(void *pvParam,
++ u32 ui32Param);
++
++PVRSRV_ERROR
++SGXFindSharedPBDescKM(PVRSRV_PER_PROCESS_DATA * psPerProc,
++ void *hDevCookie,
++ int bLockOnFailure,
++ u32 ui32TotalPBSize,
++ void **phSharedPBDesc,
++ PVRSRV_KERNEL_MEM_INFO ** ppsSharedPBDescKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO ** ppsHWPBDescKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO ** ppsBlockKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO ** ppsHWBlockKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO ***
++ pppsSharedPBDescSubKernelMemInfos,
++ u32 * ui32SharedPBDescSubKernelMemInfosCount)
++{
++ PVRSRV_STUB_PBDESC *psStubPBDesc;
++ PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescSubKernelMemInfos = NULL;
++ PVRSRV_SGXDEV_INFO *psSGXDevInfo;
++ PVRSRV_ERROR eError;
++
++ psSGXDevInfo = ((PVRSRV_DEVICE_NODE *) hDevCookie)->pvDevice;
++
++ psStubPBDesc = psSGXDevInfo->psStubPBDescListKM;
++ if (psStubPBDesc != NULL) {
++ u32 i;
++ PRESMAN_ITEM psResItem;
++
++ if (psStubPBDesc->ui32TotalPBSize != ui32TotalPBSize) {
++ PVR_DPF((PVR_DBG_WARNING,
++ "SGXFindSharedPBDescKM: Shared PB requested with different size (0x%x) from existing shared PB (0x%x) - requested size ignored",
++ ui32TotalPBSize,
++ psStubPBDesc->ui32TotalPBSize));
++ }
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_KERNEL_MEM_INFO *)
++ * psStubPBDesc->ui32SubKernelMemInfosCount,
++ (void **)&ppsSharedPBDescSubKernelMemInfos,
++ NULL,
++ "Array of Kernel Memory Info") != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SGXFindSharedPBDescKM: OSAllocMem failed"));
++
++ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto ExitNotFound;
++ }
++
++ psResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_SHARED_PB_DESC,
++ psStubPBDesc,
++ 0,
++ &SGXCleanupSharedPBDescCallback);
++
++ if (psResItem == NULL) {
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_KERNEL_MEM_INFO *) *
++ psStubPBDesc->ui32SubKernelMemInfosCount,
++ ppsSharedPBDescSubKernelMemInfos, 0);
++
++ PVR_DPF((PVR_DBG_ERROR,
++ "SGXFindSharedPBDescKM: ResManRegisterRes failed"));
++
++ eError = PVRSRV_ERROR_GENERIC;
++ goto ExitNotFound;
++ }
++
++ *ppsSharedPBDescKernelMemInfo =
++ psStubPBDesc->psSharedPBDescKernelMemInfo;
++ *ppsHWPBDescKernelMemInfo =
++ psStubPBDesc->psHWPBDescKernelMemInfo;
++ *ppsBlockKernelMemInfo = psStubPBDesc->psBlockKernelMemInfo;
++ *ppsHWBlockKernelMemInfo = psStubPBDesc->psHWBlockKernelMemInfo;
++
++ *ui32SharedPBDescSubKernelMemInfosCount =
++ psStubPBDesc->ui32SubKernelMemInfosCount;
++
++ *pppsSharedPBDescSubKernelMemInfos =
++ ppsSharedPBDescSubKernelMemInfos;
++
++ for (i = 0; i < psStubPBDesc->ui32SubKernelMemInfosCount; i++) {
++ ppsSharedPBDescSubKernelMemInfos[i] =
++ psStubPBDesc->ppsSubKernelMemInfos[i];
++ }
++
++ psStubPBDesc->ui32RefCount++;
++ *phSharedPBDesc = (void *)psResItem;
++ return PVRSRV_OK;
++ }
++
++ eError = PVRSRV_OK;
++ if (bLockOnFailure) {
++ if (psResItemCreateSharedPB == NULL) {
++ psResItemCreateSharedPB =
++ ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_SHARED_PB_DESC_CREATE_LOCK,
++ psPerProc, 0,
++ &SGXCleanupSharedPBDescCreateLockCallback);
++
++ if (psResItemCreateSharedPB == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SGXFindSharedPBDescKM: ResManRegisterRes failed"));
++
++ eError = PVRSRV_ERROR_GENERIC;
++ goto ExitNotFound;
++ }
++ PVR_ASSERT(psPerProcCreateSharedPB == NULL);
++ psPerProcCreateSharedPB = psPerProc;
++ } else {
++ eError = PVRSRV_ERROR_PROCESSING_BLOCKED;
++ }
++ }
++ExitNotFound:
++ *phSharedPBDesc = NULL;
++
++ return eError;
++}
++
++static PVRSRV_ERROR
++SGXCleanupSharedPBDescKM(PVRSRV_STUB_PBDESC * psStubPBDescIn)
++{
++
++ u32 i;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE *) psStubPBDescIn->hDevCookie;
++
++ psStubPBDescIn->ui32RefCount--;
++ if (psStubPBDescIn->ui32RefCount == 0) {
++ List_PVRSRV_STUB_PBDESC_Remove(psStubPBDescIn);
++ for (i = 0; i < psStubPBDescIn->ui32SubKernelMemInfosCount; i++) {
++
++ PVRSRVFreeDeviceMemKM(psStubPBDescIn->hDevCookie,
++ psStubPBDescIn->
++ ppsSubKernelMemInfos[i]);
++ }
++
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_KERNEL_MEM_INFO *) *
++ psStubPBDescIn->ui32SubKernelMemInfosCount,
++ psStubPBDescIn->ppsSubKernelMemInfos, 0);
++ psStubPBDescIn->ppsSubKernelMemInfos = NULL;
++
++ PVRSRVFreeSharedSysMemoryKM(psStubPBDescIn->
++ psBlockKernelMemInfo);
++
++ PVRSRVFreeDeviceMemKM(psStubPBDescIn->hDevCookie,
++ psStubPBDescIn->psHWBlockKernelMemInfo);
++
++ PVRSRVFreeDeviceMemKM(psStubPBDescIn->hDevCookie,
++ psStubPBDescIn->psHWPBDescKernelMemInfo);
++
++ PVRSRVFreeSharedSysMemoryKM(psStubPBDescIn->
++ psSharedPBDescKernelMemInfo);
++
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_STUB_PBDESC), psStubPBDescIn, 0);
++
++ SGXCleanupRequest(psDeviceNode, NULL, PVRSRV_CLEANUPCMD_PB);
++ }
++ return PVRSRV_OK;
++
++}
++
++static PVRSRV_ERROR SGXCleanupSharedPBDescCallback(void *pvParam, u32 ui32Param)
++{
++ PVRSRV_STUB_PBDESC *psStubPBDesc = (PVRSRV_STUB_PBDESC *) pvParam;
++
++ return SGXCleanupSharedPBDescKM(psStubPBDesc);
++}
++
++static PVRSRV_ERROR SGXCleanupSharedPBDescCreateLockCallback(void *pvParam,
++ u32 ui32Param)
++{
++#ifdef DEBUG
++ PVRSRV_PER_PROCESS_DATA *psPerProc =
++ (PVRSRV_PER_PROCESS_DATA *) pvParam;
++ PVR_ASSERT(psPerProc == psPerProcCreateSharedPB);
++#endif
++
++ psPerProcCreateSharedPB = NULL;
++ psResItemCreateSharedPB = NULL;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR SGXUnrefSharedPBDescKM(void *hSharedPBDesc)
++{
++ PVR_ASSERT(hSharedPBDesc != NULL);
++
++ return ResManFreeResByPtr(hSharedPBDesc);
++}
++
++PVRSRV_ERROR
++SGXAddSharedPBDescKM(PVRSRV_PER_PROCESS_DATA * psPerProc,
++ void *hDevCookie,
++ PVRSRV_KERNEL_MEM_INFO * psSharedPBDescKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO * psHWPBDescKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO * psBlockKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO * psHWBlockKernelMemInfo,
++ u32 ui32TotalPBSize,
++ void **phSharedPBDesc,
++ PVRSRV_KERNEL_MEM_INFO ** ppsSharedPBDescSubKernelMemInfos,
++ u32 ui32SharedPBDescSubKernelMemInfosCount)
++{
++ PVRSRV_STUB_PBDESC *psStubPBDesc = NULL;
++ PVRSRV_ERROR eRet = PVRSRV_ERROR_GENERIC;
++ u32 i;
++ PVRSRV_SGXDEV_INFO *psSGXDevInfo;
++ PRESMAN_ITEM psResItem;
++
++ if (psPerProcCreateSharedPB != psPerProc) {
++ goto NoAdd;
++ } else {
++ PVR_ASSERT(psResItemCreateSharedPB != NULL);
++
++ ResManFreeResByPtr(psResItemCreateSharedPB);
++
++ PVR_ASSERT(psResItemCreateSharedPB == NULL);
++ PVR_ASSERT(psPerProcCreateSharedPB == NULL);
++ }
++
++ psSGXDevInfo =
++ (PVRSRV_SGXDEV_INFO *) ((PVRSRV_DEVICE_NODE *) hDevCookie)->
++ pvDevice;
++
++ psStubPBDesc = psSGXDevInfo->psStubPBDescListKM;
++ if (psStubPBDesc != NULL) {
++ if (psStubPBDesc->ui32TotalPBSize != ui32TotalPBSize) {
++ PVR_DPF((PVR_DBG_WARNING,
++ "SGXAddSharedPBDescKM: Shared PB requested with different size (0x%x) from existing shared PB (0x%x) - requested size ignored",
++ ui32TotalPBSize,
++ psStubPBDesc->ui32TotalPBSize));
++
++ }
++
++ psResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_SHARED_PB_DESC,
++ psStubPBDesc,
++ 0,
++ &SGXCleanupSharedPBDescCallback);
++ if (psResItem == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SGXAddSharedPBDescKM: "
++ "Failed to register existing shared "
++ "PBDesc with the resource manager"));
++ goto NoAddKeepPB;
++ }
++
++ psStubPBDesc->ui32RefCount++;
++
++ *phSharedPBDesc = (void *)psResItem;
++ eRet = PVRSRV_OK;
++ goto NoAddKeepPB;
++ }
++
++ if (OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_STUB_PBDESC),
++ (void **)&psStubPBDesc,
++ 0, "Stub Parameter Buffer Description") != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR, "SGXAddSharedPBDescKM: Failed to alloc "
++ "StubPBDesc"));
++ eRet = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto NoAdd;
++ }
++
++ psStubPBDesc->ppsSubKernelMemInfos = NULL;
++
++ if (OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_KERNEL_MEM_INFO *)
++ * ui32SharedPBDescSubKernelMemInfosCount,
++ (void **)&psStubPBDesc->ppsSubKernelMemInfos,
++ 0, "Array of Kernel Memory Info") != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR, "SGXAddSharedPBDescKM: "
++ "Failed to alloc "
++ "StubPBDesc->ppsSubKernelMemInfos"));
++ eRet = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto NoAdd;
++ }
++
++ if (PVRSRVDissociateMemFromResmanKM(psSharedPBDescKernelMemInfo)
++ != PVRSRV_OK) {
++ goto NoAdd;
++ }
++
++ if (PVRSRVDissociateMemFromResmanKM(psHWPBDescKernelMemInfo)
++ != PVRSRV_OK) {
++ goto NoAdd;
++ }
++
++ if (PVRSRVDissociateMemFromResmanKM(psBlockKernelMemInfo)
++ != PVRSRV_OK) {
++ goto NoAdd;
++ }
++
++ if (PVRSRVDissociateMemFromResmanKM(psHWBlockKernelMemInfo)
++ != PVRSRV_OK) {
++ goto NoAdd;
++ }
++
++ psStubPBDesc->ui32RefCount = 1;
++ psStubPBDesc->ui32TotalPBSize = ui32TotalPBSize;
++ psStubPBDesc->psSharedPBDescKernelMemInfo = psSharedPBDescKernelMemInfo;
++ psStubPBDesc->psHWPBDescKernelMemInfo = psHWPBDescKernelMemInfo;
++ psStubPBDesc->psBlockKernelMemInfo = psBlockKernelMemInfo;
++ psStubPBDesc->psHWBlockKernelMemInfo = psHWBlockKernelMemInfo;
++
++ psStubPBDesc->ui32SubKernelMemInfosCount =
++ ui32SharedPBDescSubKernelMemInfosCount;
++ for (i = 0; i < ui32SharedPBDescSubKernelMemInfosCount; i++) {
++ psStubPBDesc->ppsSubKernelMemInfos[i] =
++ ppsSharedPBDescSubKernelMemInfos[i];
++ if (PVRSRVDissociateMemFromResmanKM
++ (ppsSharedPBDescSubKernelMemInfos[i])
++ != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR, "SGXAddSharedPBDescKM: "
++ "Failed to dissociate shared PBDesc "
++ "from process"));
++ goto NoAdd;
++ }
++ }
++
++ psResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_SHARED_PB_DESC,
++ psStubPBDesc,
++ 0, &SGXCleanupSharedPBDescCallback);
++ if (psResItem == NULL) {
++ PVR_DPF((PVR_DBG_ERROR, "SGXAddSharedPBDescKM: "
++ "Failed to register shared PBDesc "
++ " with the resource manager"));
++ goto NoAdd;
++ }
++ psStubPBDesc->hDevCookie = hDevCookie;
++
++ List_PVRSRV_STUB_PBDESC_Insert(&(psSGXDevInfo->psStubPBDescListKM),
++ psStubPBDesc);
++
++ *phSharedPBDesc = (void *)psResItem;
++
++ return PVRSRV_OK;
++
++NoAdd:
++ if (psStubPBDesc) {
++ if (psStubPBDesc->ppsSubKernelMemInfos) {
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_KERNEL_MEM_INFO *) *
++ ui32SharedPBDescSubKernelMemInfosCount,
++ psStubPBDesc->ppsSubKernelMemInfos, 0);
++ psStubPBDesc->ppsSubKernelMemInfos = NULL;
++ }
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_STUB_PBDESC), psStubPBDesc, 0);
++
++ }
++
++NoAddKeepPB:
++ for (i = 0; i < ui32SharedPBDescSubKernelMemInfosCount; i++) {
++ PVRSRVFreeDeviceMemKM(hDevCookie,
++ ppsSharedPBDescSubKernelMemInfos[i]);
++ }
++
++ PVRSRVFreeSharedSysMemoryKM(psSharedPBDescKernelMemInfo);
++ PVRSRVFreeDeviceMemKM(hDevCookie, psHWPBDescKernelMemInfo);
++
++ PVRSRVFreeSharedSysMemoryKM(psBlockKernelMemInfo);
++ PVRSRVFreeDeviceMemKM(hDevCookie, psHWBlockKernelMemInfo);
++
++ return eRet;
++}
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/devices/sgx/sgx_bridge_km.h
+@@ -0,0 +1,147 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__SGX_BRIDGE_KM_H__)
++#define __SGX_BRIDGE_KM_H__
++
++#include "sgxapi_km.h"
++#include "sgxinfo.h"
++#include "sgxinfokm.h"
++#include "sgx_bridge.h"
++#include "pvr_bridge.h"
++#include "perproc.h"
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++
++PVRSRV_ERROR SGXSubmitTransferKM(void * hDevHandle, PVRSRV_TRANSFER_SGX_KICK *psKick);
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++
++PVRSRV_ERROR SGXSubmit2DKM(void * hDevHandle, PVRSRV_2D_SGX_KICK *psKick);
++#endif
++
++
++PVRSRV_ERROR SGXDoKickKM(void * hDevHandle,
++ SGX_CCB_KICK *psCCBKick);
++
++
++PVRSRV_ERROR SGXGetPhysPageAddrKM(void * hDevMemHeap,
++ IMG_DEV_VIRTADDR sDevVAddr,
++ IMG_DEV_PHYADDR *pDevPAddr,
++ IMG_CPU_PHYADDR *pCpuPAddr);
++
++
++PVRSRV_ERROR SGXGetMMUPDAddrKM(void * hDevCookie,
++ void * hDevMemContext,
++ IMG_DEV_PHYADDR *psPDDevPAddr);
++
++
++PVRSRV_ERROR SGXGetClientInfoKM(void * hDevCookie,
++ SGX_CLIENT_INFO* psClientInfo);
++
++
++PVRSRV_ERROR SGXGetMiscInfoKM(PVRSRV_SGXDEV_INFO *psDevInfo,
++ SGX_MISC_INFO *psMiscInfo,
++ PVRSRV_DEVICE_NODE *psDeviceNode,
++ void * hDevMemContext);
++
++#if defined(SUPPORT_SGX_HWPERF)
++
++PVRSRV_ERROR SGXReadDiffCountersKM(void * hDevHandle,
++ u32 ui32Reg,
++ u32 *pui32Old,
++ int bNew,
++ u32 ui32New,
++ u32 ui32NewReset,
++ u32 ui32CountersReg,
++ u32 ui32Reg2,
++ int *pbActive,
++ PVRSRV_SGXDEV_DIFF_INFO *psDiffs);
++
++PVRSRV_ERROR SGXReadHWPerfCBKM(void * hDevHandle,
++ u32 ui32ArraySize,
++ PVRSRV_SGX_HWPERF_CB_ENTRY *psHWPerfCBData,
++ u32 *pui32DataCount,
++ u32 *pui32ClockSpeed,
++ u32 *pui32HostTimeStamp);
++#endif
++
++
++PVRSRV_ERROR SGX2DQueryBlitsCompleteKM(PVRSRV_SGXDEV_INFO *psDevInfo,
++ PVRSRV_KERNEL_SYNC_INFO *psSyncInfo,
++ int bWaitForComplete);
++
++
++PVRSRV_ERROR SGXGetInfoForSrvinitKM(void * hDevHandle,
++ SGX_BRIDGE_INFO_FOR_SRVINIT *psInitInfo);
++
++
++PVRSRV_ERROR DevInitSGXPart2KM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ void * hDevHandle,
++ SGX_BRIDGE_INIT_INFO *psInitInfo);
++
++ PVRSRV_ERROR
++SGXFindSharedPBDescKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ void * hDevCookie,
++ int bLockOnFailure,
++ u32 ui32TotalPBSize,
++ void * *phSharedPBDesc,
++ PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO **ppsHWPBDescKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO **ppsBlockKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO **ppsHWBlockKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO ***pppsSharedPBDescSubKernelMemInfos,
++ u32 *ui32SharedPBDescSubKernelMemInfosCount);
++
++ PVRSRV_ERROR
++SGXUnrefSharedPBDescKM(void * hSharedPBDesc);
++
++ PVRSRV_ERROR
++SGXAddSharedPBDescKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ void * hDevCookie,
++ PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO *psHWBlockKernelMemInfo,
++ u32 ui32TotalPBSize,
++ void * *phSharedPBDesc,
++ PVRSRV_KERNEL_MEM_INFO **psSharedPBDescSubKernelMemInfos,
++ u32 ui32SharedPBDescSubKernelMemInfosCount);
++
++
++ PVRSRV_ERROR
++SGXGetInternalDevInfoKM(void * hDevCookie,
++ SGX_INTERNAL_DEVINFO *psSGXInternalDevInfo);
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/devices/sgx/sgxconfig.h
+@@ -0,0 +1,158 @@
++/**********************************************************************
++ *
++ * Copyright (c) 2009-2010 Intel Corporation.
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __SGXCONFIG_H__
++#define __SGXCONFIG_H__
++
++#include "sgxdefs.h"
++
++#define DEV_DEVICE_TYPE PVRSRV_DEVICE_TYPE_SGX
++#define DEV_DEVICE_CLASS PVRSRV_DEVICE_CLASS_3D
++
++#define DEV_MAJOR_VERSION 1
++#define DEV_MINOR_VERSION 0
++
++#if SGX_FEATURE_ADDRESS_SPACE_SIZE == 32
++
++#if !defined(INTEL_D3_CHANGES)
++ #if defined(SGX_FEATURE_2D_HARDWARE)
++ #define SGX_2D_HEAP_BASE 0x00100000
++ #define SGX_2D_HEAP_SIZE (0x08000000-0x00100000-0x00001000)
++ #else
++ #if defined(FIX_HW_BRN_26915)
++ #define SGX_CGBUFFER_HEAP_BASE 0x00100000
++ #define SGX_CGBUFFER_HEAP_SIZE (0x08000000-0x00100000-0x00001000)
++ #endif
++ #endif
++
++ #if defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP)
++ #define SGX_GENERAL_MAPPING_HEAP_BASE 0x08000000
++ #define SGX_GENERAL_MAPPING_HEAP_SIZE (0x08000000-0x00001000)
++ #endif
++
++ #define SGX_GENERAL_HEAP_BASE 0x10000000
++ #define SGX_GENERAL_HEAP_SIZE (0xC8000000-0x00001000)
++#else
++ #if defined(SGX_FEATURE_2D_HARDWARE)
++ #error "sgxconfig.h: ERROR: SGX_FEATURE_2D_HARDWARE not supported"
++ #endif
++
++ #if defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP)
++ #define SGX_GENERAL_MAPPING_HEAP_BASE 0x00100000
++ #define SGX_GENERAL_MAPPING_HEAP_SIZE (0x20000000-0x00100000-0x00001000)
++ #endif
++
++ #if !defined(INTEL_D3_CACHED_CBUF)
++ #define SGX_GENERAL_HEAP_BASE 0x20000000
++ #define SGX_GENERAL_HEAP_SIZE (0xB8000000-0x00001000)
++ #else
++ #define SGX_GENERAL_HEAP_BASE 0x20000000
++ #define SGX_GENERAL_HEAP_SIZE (0x5C000000-0x00001000)
++
++ #define SGX_CACHED_GENERAL_HEAP_BASE 0x7C000000
++ #define SGX_CACHED_GENERAL_HEAP_SIZE (0x5C000000-0x00001000)
++ #endif
++#endif
++
++ #define SGX_3DPARAMETERS_HEAP_BASE 0xD8000000
++ #define SGX_3DPARAMETERS_HEAP_SIZE (0x10000000-0x00001000)
++
++ #define SGX_TADATA_HEAP_BASE 0xE8000000
++ #define SGX_TADATA_HEAP_SIZE (0x0D000000-0x00001000)
++
++ #define SGX_SYNCINFO_HEAP_BASE 0xF5000000
++ #define SGX_SYNCINFO_HEAP_SIZE (0x01000000-0x00001000)
++
++ #define SGX_PDSPIXEL_CODEDATA_HEAP_BASE 0xF6000000
++ #define SGX_PDSPIXEL_CODEDATA_HEAP_SIZE (0x02000000-0x00001000)
++
++ #define SGX_KERNEL_CODE_HEAP_BASE 0xF8000000
++ #define SGX_KERNEL_CODE_HEAP_SIZE (0x00080000-0x00001000)
++
++ #define SGX_PDSVERTEX_CODEDATA_HEAP_BASE 0xF8400000
++ #define SGX_PDSVERTEX_CODEDATA_HEAP_SIZE (0x01C00000-0x00001000)
++
++ #define SGX_KERNEL_DATA_HEAP_BASE 0xFA000000
++ #define SGX_KERNEL_DATA_HEAP_SIZE (0x05000000-0x00001000)
++
++ #define SGX_PIXELSHADER_HEAP_BASE 0xFF000000
++ #define SGX_PIXELSHADER_HEAP_SIZE (0x00500000-0x00001000)
++
++ #define SGX_VERTEXSHADER_HEAP_BASE 0xFF800000
++ #define SGX_VERTEXSHADER_HEAP_SIZE (0x00200000-0x00001000)
++
++
++ #define SGX_CORE_IDENTIFIED
++#endif
++
++#if SGX_FEATURE_ADDRESS_SPACE_SIZE == 28
++ #if defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP)
++ #define SGX_GENERAL_MAPPING_HEAP_BASE 0x00001000
++ #define SGX_GENERAL_MAPPING_HEAP_SIZE (0x01800000-0x00001000-0x00001000)
++ #endif
++
++ #define SGX_GENERAL_HEAP_BASE 0x01800000
++ #define SGX_GENERAL_HEAP_SIZE (0x07000000-0x00001000)
++
++ #define SGX_3DPARAMETERS_HEAP_BASE 0x08800000
++ #define SGX_3DPARAMETERS_HEAP_SIZE (0x04000000-0x00001000)
++
++ #define SGX_TADATA_HEAP_BASE 0x0C800000
++ #define SGX_TADATA_HEAP_SIZE (0x01000000-0x00001000)
++
++ #define SGX_SYNCINFO_HEAP_BASE 0x0D800000
++ #define SGX_SYNCINFO_HEAP_SIZE (0x00400000-0x00001000)
++
++ #define SGX_PDSPIXEL_CODEDATA_HEAP_BASE 0x0DC00000
++ #define SGX_PDSPIXEL_CODEDATA_HEAP_SIZE (0x00800000-0x00001000)
++
++ #define SGX_KERNEL_CODE_HEAP_BASE 0x0E400000
++ #define SGX_KERNEL_CODE_HEAP_SIZE (0x00080000-0x00001000)
++
++ #define SGX_PDSVERTEX_CODEDATA_HEAP_BASE 0x0E800000
++ #define SGX_PDSVERTEX_CODEDATA_HEAP_SIZE (0x00800000-0x00001000)
++
++ #define SGX_KERNEL_DATA_HEAP_BASE 0x0F000000
++ #define SGX_KERNEL_DATA_HEAP_SIZE (0x00400000-0x00001000)
++
++ #define SGX_PIXELSHADER_HEAP_BASE 0x0F400000
++ #define SGX_PIXELSHADER_HEAP_SIZE (0x00500000-0x00001000)
++
++ #define SGX_VERTEXSHADER_HEAP_BASE 0x0FC00000
++ #define SGX_VERTEXSHADER_HEAP_SIZE (0x00200000-0x00001000)
++
++
++ #define SGX_CORE_IDENTIFIED
++
++#endif
++
++#if !defined(SGX_CORE_IDENTIFIED)
++ #error "sgxconfig.h: ERROR: unspecified SGX Core version"
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/devices/sgx/sgxinfokm.h
+@@ -0,0 +1,346 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __SGXINFOKM_H__
++#define __SGXINFOKM_H__
++
++#include "sgxdefs.h"
++#include "device.h"
++#include "power.h"
++#include "sysconfig.h"
++#include "sgxscript.h"
++#include "sgxinfo.h"
++
++
++#define SGX_HOSTPORT_PRESENT 0x00000001UL
++
++
++typedef struct _PVRSRV_STUB_PBDESC_ PVRSRV_STUB_PBDESC;
++
++
++typedef struct _PVRSRV_SGX_CCB_INFO_ *PPVRSRV_SGX_CCB_INFO;
++
++typedef struct _PVRSRV_SGXDEV_INFO_
++{
++ PVRSRV_DEVICE_TYPE eDeviceType;
++ PVRSRV_DEVICE_CLASS eDeviceClass;
++
++ u8 ui8VersionMajor;
++ u8 ui8VersionMinor;
++ u32 ui32CoreConfig;
++ u32 ui32CoreFlags;
++
++
++ void * pvRegsBaseKM;
++
++#if defined(SGX_FEATURE_HOST_PORT)
++
++ void * pvHostPortBaseKM;
++
++ u32 ui32HPSize;
++
++ IMG_SYS_PHYADDR sHPSysPAddr;
++#endif
++
++
++ void * hRegMapping;
++
++
++ IMG_SYS_PHYADDR sRegsPhysBase;
++
++ u32 ui32RegSize;
++
++#if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE)
++
++ u32 ui32ExtSysCacheRegsSize;
++
++ IMG_DEV_PHYADDR sExtSysCacheRegsDevPBase;
++
++ u32 *pui32ExtSystemCacheRegsPT;
++
++ void * hExtSystemCacheRegsPTPageOSMemHandle;
++
++ IMG_SYS_PHYADDR sExtSystemCacheRegsPTSysPAddr;
++#endif
++
++
++ u32 ui32CoreClockSpeed;
++ u32 ui32uKernelTimerClock;
++
++ PVRSRV_STUB_PBDESC *psStubPBDescListKM;
++
++
++
++ IMG_DEV_PHYADDR sKernelPDDevPAddr;
++
++ void *pvDeviceMemoryHeap;
++ PPVRSRV_KERNEL_MEM_INFO psKernelCCBMemInfo;
++ PVRSRV_SGX_KERNEL_CCB *psKernelCCB;
++ PPVRSRV_SGX_CCB_INFO psKernelCCBInfo;
++ PPVRSRV_KERNEL_MEM_INFO psKernelCCBCtlMemInfo;
++ PVRSRV_SGX_CCB_CTL *psKernelCCBCtl;
++ PPVRSRV_KERNEL_MEM_INFO psKernelCCBEventKickerMemInfo;
++ u32 *pui32KernelCCBEventKicker;
++#if defined(PDUMP)
++ u32 ui32KernelCCBEventKickerDumpVal;
++#endif
++ PVRSRV_KERNEL_MEM_INFO *psKernelSGXMiscMemInfo;
++ u32 aui32HostKickAddr[SGXMKIF_CMD_MAX];
++#if defined(SGX_SUPPORT_HWPROFILING)
++ PPVRSRV_KERNEL_MEM_INFO psKernelHWProfilingMemInfo;
++#endif
++ u32 ui32KickTACounter;
++ u32 ui32KickTARenderCounter;
++#if defined(SUPPORT_SGX_HWPERF)
++ PPVRSRV_KERNEL_MEM_INFO psKernelHWPerfCBMemInfo;
++ u32 ui32HWGroupRequested;
++ u32 ui32HWReset;
++#endif
++#ifdef PVRSRV_USSE_EDM_STATUS_DEBUG
++ PPVRSRV_KERNEL_MEM_INFO psKernelEDMStatusBufferMemInfo;
++#endif
++#if defined(SGX_FEATURE_OVERLAPPED_SPM)
++ PPVRSRV_KERNEL_MEM_INFO psKernelTmpRgnHeaderMemInfo;
++#endif
++#if defined(SGX_FEATURE_SPM_MODE_0)
++ PPVRSRV_KERNEL_MEM_INFO psKernelTmpDPMStateMemInfo;
++#endif
++
++
++ u32 ui32ClientRefCount;
++
++
++ u32 ui32CacheControl;
++
++
++ u32 ui32ClientBuildOptions;
++
++
++ SGX_MISCINFO_STRUCT_SIZES sSGXStructSizes;
++
++
++
++
++ void *pvMMUContextList;
++
++
++ int bForcePTOff;
++
++ u32 ui32EDMTaskReg0;
++ u32 ui32EDMTaskReg1;
++
++ u32 ui32ClkGateStatusReg;
++ u32 ui32ClkGateStatusMask;
++#if defined(SGX_FEATURE_MP)
++ u32 ui32MasterClkGateStatusReg;
++ u32 ui32MasterClkGateStatusMask;
++#endif
++ SGX_INIT_SCRIPTS sScripts;
++
++
++ void * hBIFResetPDOSMemHandle;
++ IMG_DEV_PHYADDR sBIFResetPDDevPAddr;
++ IMG_DEV_PHYADDR sBIFResetPTDevPAddr;
++ IMG_DEV_PHYADDR sBIFResetPageDevPAddr;
++ u32 *pui32BIFResetPD;
++ u32 *pui32BIFResetPT;
++
++#if defined(FIX_HW_BRN_22997) && defined(FIX_HW_BRN_23030) && defined(SGX_FEATURE_HOST_PORT)
++
++ void * hBRN22997PTPageOSMemHandle;
++ void * hBRN22997PDPageOSMemHandle;
++ IMG_DEV_PHYADDR sBRN22997PTDevPAddr;
++ IMG_DEV_PHYADDR sBRN22997PDDevPAddr;
++ u32 *pui32BRN22997PT;
++ u32 *pui32BRN22997PD;
++ IMG_SYS_PHYADDR sBRN22997SysPAddr;
++#endif
++
++#if defined(SUPPORT_HW_RECOVERY)
++
++ void * hTimer;
++
++ u32 ui32TimeStamp;
++#endif
++
++
++ u32 ui32NumResets;
++
++
++ PVRSRV_KERNEL_MEM_INFO *psKernelSGXHostCtlMemInfo;
++ SGXMKIF_HOST_CTL *psSGXHostCtl;
++
++
++ PVRSRV_KERNEL_MEM_INFO *psKernelSGXTA3DCtlMemInfo;
++
++ u32 ui32Flags;
++
++ #if defined(PDUMP)
++ PVRSRV_SGX_PDUMP_CONTEXT sPDContext;
++ #endif
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++
++ void *pvDummyPTPageCpuVAddr;
++ IMG_DEV_PHYADDR sDummyPTDevPAddr;
++ void * hDummyPTPageOSMemHandle;
++ void *pvDummyDataPageCpuVAddr;
++ IMG_DEV_PHYADDR sDummyDataDevPAddr;
++ void * hDummyDataPageOSMemHandle;
++#endif
++
++ u32 asSGXDevData[SGX_MAX_DEV_DATA];
++
++} PVRSRV_SGXDEV_INFO;
++
++
++typedef struct _SGX_TIMING_INFORMATION_
++{
++ u32 ui32CoreClockSpeed;
++ u32 ui32HWRecoveryFreq;
++ int bEnableActivePM;
++ u32 ui32ActivePowManLatencyms;
++ u32 ui32uKernelFreq;
++} SGX_TIMING_INFORMATION;
++
++typedef struct _SGX_DEVICE_MAP_
++{
++ u32 ui32Flags;
++
++
++ IMG_SYS_PHYADDR sRegsSysPBase;
++ IMG_CPU_PHYADDR sRegsCpuPBase;
++ IMG_CPU_VIRTADDR pvRegsCpuVBase;
++ u32 ui32RegsSize;
++
++#if defined(SGX_FEATURE_HOST_PORT)
++ IMG_SYS_PHYADDR sHPSysPBase;
++ IMG_CPU_PHYADDR sHPCpuPBase;
++ u32 ui32HPSize;
++#endif
++
++
++ IMG_SYS_PHYADDR sLocalMemSysPBase;
++ IMG_DEV_PHYADDR sLocalMemDevPBase;
++ IMG_CPU_PHYADDR sLocalMemCpuPBase;
++ u32 ui32LocalMemSize;
++
++#if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE)
++ u32 ui32ExtSysCacheRegsSize;
++ IMG_DEV_PHYADDR sExtSysCacheRegsDevPBase;
++#endif
++
++
++ u32 ui32IRQ;
++
++#if !defined(SGX_DYNAMIC_TIMING_INFO)
++
++ SGX_TIMING_INFORMATION sTimingInfo;
++#endif
++} SGX_DEVICE_MAP;
++
++
++struct _PVRSRV_STUB_PBDESC_
++{
++ u32 ui32RefCount;
++ u32 ui32TotalPBSize;
++ PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo;
++ PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo;
++ PVRSRV_KERNEL_MEM_INFO **ppsSubKernelMemInfos;
++ u32 ui32SubKernelMemInfosCount;
++ void * hDevCookie;
++ PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo;
++ PVRSRV_KERNEL_MEM_INFO *psHWBlockKernelMemInfo;
++ PVRSRV_STUB_PBDESC *psNext;
++ PVRSRV_STUB_PBDESC **ppsThis;
++};
++
++typedef struct _PVRSRV_SGX_CCB_INFO_
++{
++ PVRSRV_KERNEL_MEM_INFO *psCCBMemInfo;
++ PVRSRV_KERNEL_MEM_INFO *psCCBCtlMemInfo;
++ SGXMKIF_COMMAND *psCommands;
++ u32 *pui32WriteOffset;
++ volatile u32 *pui32ReadOffset;
++#if defined(PDUMP)
++ u32 ui32CCBDumpWOff;
++#endif
++} PVRSRV_SGX_CCB_INFO;
++
++PVRSRV_ERROR SGXRegisterDevice (PVRSRV_DEVICE_NODE *psDeviceNode);
++
++void SGXOSTimer(void *pvData);
++
++void SGXReset(PVRSRV_SGXDEV_INFO *psDevInfo,
++ u32 ui32PDUMPFlags);
++
++PVRSRV_ERROR SGXInitialise(PVRSRV_SGXDEV_INFO *psDevInfo);
++PVRSRV_ERROR SGXDeinitialise(void * hDevCookie);
++
++PVRSRV_ERROR SGXPrePowerState(void * hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++
++PVRSRV_ERROR SGXPostPowerState(void * hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++
++PVRSRV_ERROR SGXPreClockSpeedChange(void * hDevHandle,
++ int bIdleDevice,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++
++PVRSRV_ERROR SGXPostClockSpeedChange(void * hDevHandle,
++ int bIdleDevice,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++
++void SGXPanic(PVRSRV_DEVICE_NODE *psDeviceNode);
++
++PVRSRV_ERROR SGXDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode);
++
++#if defined(SGX_DYNAMIC_TIMING_INFO)
++void SysGetSGXTimingInformation(SGX_TIMING_INFORMATION *psSGXTimingInfo);
++#endif
++
++#if defined(NO_HARDWARE)
++static void NoHardwareGenerateEvent(PVRSRV_SGXDEV_INFO *psDevInfo,
++ u32 ui32StatusRegister,
++ u32 ui32StatusValue,
++ u32 ui32StatusMask)
++{
++ u32 ui32RegVal;
++
++ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, ui32StatusRegister);
++
++ ui32RegVal &= ~ui32StatusMask;
++ ui32RegVal |= (ui32StatusValue & ui32StatusMask);
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32StatusRegister, ui32RegVal);
++}
++#endif
++
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/devices/sgx/sgxinit.c
+@@ -0,0 +1,2136 @@
++/**********************************************************************
++ *
++ * Copyright (c) 2009-2010 Intel Corporation.
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <stddef.h>
++
++#include "sgxdefs.h"
++#include "sgxmmu.h"
++#include "services_headers.h"
++#include "buffer_manager.h"
++#include "sgxapi_km.h"
++#include "sgxinfo.h"
++#include "sgx_mkif_km.h"
++#include "sgxconfig.h"
++#include "sysconfig.h"
++#include "pvr_bridge_km.h"
++
++#include "sgx_bridge_km.h"
++
++#include "pdump_km.h"
++#include "ra.h"
++#include "mmu.h"
++#include "handle.h"
++#include "perproc.h"
++
++#include "sgxutils.h"
++#include "pvrversion.h"
++#include "sgx_options.h"
++
++#include "lists.h"
++#include "srvkm.h"
++
++DECLARE_LIST_ANY_VA(PVRSRV_POWER_DEV);
++
++#if defined(SUPPORT_SGX_HWPERF)
++void *MatchPowerDeviceIndex_AnyVaCb(PVRSRV_POWER_DEV * psPowerDev, va_list va);
++#endif
++
++#define VAR(x) #x
++
++#define CHECK_SIZE(NAME) \
++{ \
++ if (psSGXStructSizes->ui32Sizeof_##NAME != psDevInfo->sSGXStructSizes.ui32Sizeof_##NAME) \
++ { \
++ PVR_DPF((PVR_DBG_ERROR, "SGXDevInitCompatCheck: Size check failed for SGXMKIF_%s (client) = %d bytes, (ukernel) = %d bytes\n", \
++ VAR(NAME), \
++ psDevInfo->sSGXStructSizes.ui32Sizeof_##NAME, \
++ psSGXStructSizes->ui32Sizeof_##NAME )); \
++ bStructSizesFailed = 1; \
++ } \
++}
++
++#if defined (SYS_USING_INTERRUPTS)
++int SGX_ISRHandler(void *pvData);
++#endif
++
++u32 gui32EventStatusServicesByISR = 0;
++
++static
++PVRSRV_ERROR SGXGetMiscInfoUkernel(PVRSRV_SGXDEV_INFO * psDevInfo,
++ PVRSRV_DEVICE_NODE * psDeviceNode);
++
++static void SGXCommandComplete(PVRSRV_DEVICE_NODE * psDeviceNode)
++{
++#if defined(OS_SUPPORTS_IN_LISR)
++ if (in_irq()) {
++
++ psDeviceNode->bReProcessDeviceCommandComplete = 1;
++ } else {
++ SGXScheduleProcessQueuesKM(psDeviceNode);
++ }
++#else
++ SGXScheduleProcessQueuesKM(psDeviceNode);
++#endif
++}
++
++static u32 DeinitDevInfo(PVRSRV_SGXDEV_INFO * psDevInfo)
++{
++ if (psDevInfo->psKernelCCBInfo != NULL) {
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_SGX_CCB_INFO),
++ psDevInfo->psKernelCCBInfo, NULL);
++ }
++
++ return PVRSRV_OK;
++}
++
++static PVRSRV_ERROR InitDevInfo(PVRSRV_PER_PROCESS_DATA * psPerProc,
++ PVRSRV_DEVICE_NODE * psDeviceNode,
++ SGX_BRIDGE_INIT_INFO * psInitInfo)
++{
++ PVRSRV_SGXDEV_INFO *psDevInfo =
++ (PVRSRV_SGXDEV_INFO *) psDeviceNode->pvDevice;
++ PVRSRV_ERROR eError;
++
++ PVRSRV_SGX_CCB_INFO *psKernelCCBInfo = NULL;
++
++ psDevInfo->sScripts = psInitInfo->sScripts;
++
++ psDevInfo->psKernelCCBMemInfo =
++ (PVRSRV_KERNEL_MEM_INFO *) psInitInfo->hKernelCCBMemInfo;
++ psDevInfo->psKernelCCB =
++ (PVRSRV_SGX_KERNEL_CCB *) psDevInfo->psKernelCCBMemInfo->
++ pvLinAddrKM;
++
++ psDevInfo->psKernelCCBCtlMemInfo =
++ (PVRSRV_KERNEL_MEM_INFO *) psInitInfo->hKernelCCBCtlMemInfo;
++ psDevInfo->psKernelCCBCtl =
++ (PVRSRV_SGX_CCB_CTL *) psDevInfo->psKernelCCBCtlMemInfo->
++ pvLinAddrKM;
++
++ psDevInfo->psKernelCCBEventKickerMemInfo =
++ (PVRSRV_KERNEL_MEM_INFO *) psInitInfo->hKernelCCBEventKickerMemInfo;
++ psDevInfo->pui32KernelCCBEventKicker =
++ (u32 *) psDevInfo->psKernelCCBEventKickerMemInfo->pvLinAddrKM;
++
++ psDevInfo->psKernelSGXHostCtlMemInfo =
++ (PVRSRV_KERNEL_MEM_INFO *) psInitInfo->hKernelSGXHostCtlMemInfo;
++ psDevInfo->psSGXHostCtl =
++ (SGXMKIF_HOST_CTL *) psDevInfo->psKernelSGXHostCtlMemInfo->
++ pvLinAddrKM;
++
++ psDevInfo->psKernelSGXTA3DCtlMemInfo =
++ (PVRSRV_KERNEL_MEM_INFO *) psInitInfo->hKernelSGXTA3DCtlMemInfo;
++
++ psDevInfo->psKernelSGXMiscMemInfo =
++ (PVRSRV_KERNEL_MEM_INFO *) psInitInfo->hKernelSGXMiscMemInfo;
++
++#if defined(SGX_SUPPORT_HWPROFILING)
++ psDevInfo->psKernelHWProfilingMemInfo =
++ (PVRSRV_KERNEL_MEM_INFO *) psInitInfo->hKernelHWProfilingMemInfo;
++#endif
++#if defined(SUPPORT_SGX_HWPERF)
++ psDevInfo->psKernelHWPerfCBMemInfo =
++ (PVRSRV_KERNEL_MEM_INFO *) psInitInfo->hKernelHWPerfCBMemInfo;
++#endif
++#ifdef PVRSRV_USSE_EDM_STATUS_DEBUG
++ psDevInfo->psKernelEDMStatusBufferMemInfo =
++ (PVRSRV_KERNEL_MEM_INFO *) psInitInfo->
++ hKernelEDMStatusBufferMemInfo;
++#endif
++#if defined(SGX_FEATURE_OVERLAPPED_SPM)
++ psDevInfo->psKernelTmpRgnHeaderMemInfo =
++ (PVRSRV_KERNEL_MEM_INFO *) psInitInfo->hKernelTmpRgnHeaderMemInfo;
++#endif
++#if defined(SGX_FEATURE_SPM_MODE_0)
++ psDevInfo->psKernelTmpDPMStateMemInfo =
++ (PVRSRV_KERNEL_MEM_INFO *) psInitInfo->hKernelTmpDPMStateMemInfo;
++#endif
++
++ psDevInfo->ui32ClientBuildOptions = psInitInfo->ui32ClientBuildOptions;
++
++ psDevInfo->sSGXStructSizes = psInitInfo->sSGXStructSizes;
++
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_SGX_CCB_INFO),
++ (void **)&psKernelCCBInfo, 0,
++ "SGX Circular Command Buffer Info");
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR, "InitDevInfo: Failed to alloc memory"));
++ goto failed_allockernelccb;
++ }
++
++ memset(psKernelCCBInfo, 0, sizeof(PVRSRV_SGX_CCB_INFO));
++ psKernelCCBInfo->psCCBMemInfo = psDevInfo->psKernelCCBMemInfo;
++ psKernelCCBInfo->psCCBCtlMemInfo = psDevInfo->psKernelCCBCtlMemInfo;
++ psKernelCCBInfo->psCommands = psDevInfo->psKernelCCB->asCommands;
++ psKernelCCBInfo->pui32WriteOffset =
++ &psDevInfo->psKernelCCBCtl->ui32WriteOffset;
++ psKernelCCBInfo->pui32ReadOffset =
++ &psDevInfo->psKernelCCBCtl->ui32ReadOffset;
++ psDevInfo->psKernelCCBInfo = psKernelCCBInfo;
++
++ memcpy(psDevInfo->aui32HostKickAddr, psInitInfo->aui32HostKickAddr,
++ SGXMKIF_CMD_MAX * sizeof(psDevInfo->aui32HostKickAddr[0]));
++
++ psDevInfo->bForcePTOff = 0;
++
++ psDevInfo->ui32CacheControl = psInitInfo->ui32CacheControl;
++
++ psDevInfo->ui32EDMTaskReg0 = psInitInfo->ui32EDMTaskReg0;
++ psDevInfo->ui32EDMTaskReg1 = psInitInfo->ui32EDMTaskReg1;
++ psDevInfo->ui32ClkGateStatusReg = psInitInfo->ui32ClkGateStatusReg;
++ psDevInfo->ui32ClkGateStatusMask = psInitInfo->ui32ClkGateStatusMask;
++#if defined(SGX_FEATURE_MP)
++ psDevInfo->ui32MasterClkGateStatusReg =
++ psInitInfo->ui32MasterClkGateStatusReg;
++ psDevInfo->ui32MasterClkGateStatusMask =
++ psInitInfo->ui32MasterClkGateStatusMask;
++#endif
++
++ memcpy(&psDevInfo->asSGXDevData, &psInitInfo->asInitDevData,
++ sizeof(psDevInfo->asSGXDevData));
++
++ return PVRSRV_OK;
++
++failed_allockernelccb:
++ DeinitDevInfo(psDevInfo);
++
++ return eError;
++}
++
++static PVRSRV_ERROR SGXRunScript(PVRSRV_SGXDEV_INFO * psDevInfo,
++ SGX_INIT_COMMAND * psScript,
++ u32 ui32NumInitCommands)
++{
++ u32 ui32PC;
++ SGX_INIT_COMMAND *psComm;
++
++ for (ui32PC = 0, psComm = psScript;
++ ui32PC < ui32NumInitCommands; ui32PC++, psComm++) {
++ switch (psComm->eOp) {
++ case SGX_INIT_OP_WRITE_HW_REG:
++ {
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM,
++ psComm->sWriteHWReg.ui32Offset,
++ psComm->sWriteHWReg.ui32Value);
++ PDUMPREG(psComm->sWriteHWReg.ui32Offset,
++ psComm->sWriteHWReg.ui32Value);
++ break;
++ }
++#if defined(PDUMP)
++ case SGX_INIT_OP_PDUMP_HW_REG:
++ {
++ PDUMPREG(psComm->sPDumpHWReg.ui32Offset,
++ psComm->sPDumpHWReg.ui32Value);
++ break;
++ }
++#endif
++ case SGX_INIT_OP_HALT:
++ {
++ return PVRSRV_OK;
++ }
++ case SGX_INIT_OP_ILLEGAL:
++
++ default:
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SGXRunScript: PC %d: Illegal command: %d",
++ ui32PC, psComm->eOp));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ }
++
++ }
++
++ return PVRSRV_ERROR_GENERIC;
++}
++
++PVRSRV_ERROR SGXInitialise(PVRSRV_SGXDEV_INFO * psDevInfo)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_KERNEL_MEM_INFO *psSGXHostCtlMemInfo =
++ psDevInfo->psKernelSGXHostCtlMemInfo;
++ SGXMKIF_HOST_CTL *psSGXHostCtl = psSGXHostCtlMemInfo->pvLinAddrKM;
++#if defined(PDUMP)
++ static int bFirstTime = 1;
++#endif
++
++ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
++ "SGX initialisation script part 1\n");
++ eError =
++ SGXRunScript(psDevInfo, psDevInfo->sScripts.asInitCommandsPart1,
++ SGX_MAX_INIT_COMMANDS);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SGXInitialise: SGXRunScript (part 1) failed (%d)",
++ eError));
++ return (PVRSRV_ERROR_GENERIC);
++ }
++ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
++ "End of SGX initialisation script part 1\n");
++
++ SGXReset(psDevInfo, PDUMP_FLAGS_CONTINUOUS);
++
++#if defined(EUR_CR_POWER)
++#if defined(SGX531)
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_POWER, 1);
++ PDUMPREG(EUR_CR_POWER, 1);
++#else
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_POWER, 0);
++ PDUMPREG(EUR_CR_POWER, 0);
++#endif
++#endif
++
++ *psDevInfo->pui32KernelCCBEventKicker = 0;
++#if defined(PDUMP)
++ if (bFirstTime) {
++ psDevInfo->ui32KernelCCBEventKickerDumpVal = 0;
++ PDUMPMEM(&psDevInfo->ui32KernelCCBEventKickerDumpVal,
++ psDevInfo->psKernelCCBEventKickerMemInfo, 0,
++ sizeof(*psDevInfo->pui32KernelCCBEventKicker),
++ PDUMP_FLAGS_CONTINUOUS,
++ MAKEUNIQUETAG(psDevInfo->
++ psKernelCCBEventKickerMemInfo));
++ }
++#endif
++
++ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
++ "SGX initialisation script part 2\n");
++ eError =
++ SGXRunScript(psDevInfo, psDevInfo->sScripts.asInitCommandsPart2,
++ SGX_MAX_INIT_COMMANDS);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SGXInitialise: SGXRunScript (part 2) failed (%d)",
++ eError));
++ return (PVRSRV_ERROR_GENERIC);
++ }
++ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
++ "End of SGX initialisation script part 2\n");
++
++ psSGXHostCtl->ui32InitStatus = 0;
++#if defined(PDUMP)
++ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
++ "Reset the SGX microkernel initialisation status\n");
++ PDUMPMEM(NULL, psSGXHostCtlMemInfo,
++ offsetof(SGXMKIF_HOST_CTL, ui32InitStatus),
++ sizeof(u32), PDUMP_FLAGS_CONTINUOUS,
++ MAKEUNIQUETAG(psSGXHostCtlMemInfo));
++#endif
++
++ *psDevInfo->pui32KernelCCBEventKicker =
++ (*psDevInfo->pui32KernelCCBEventKicker + 1) & 0xFF;
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM,
++ SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK, 0),
++ EUR_CR_EVENT_KICK_NOW_MASK);
++
++#if defined(PDUMP)
++
++ if (bFirstTime) {
++ psDevInfo->ui32KernelCCBEventKickerDumpVal = 1;
++ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
++ "First increment of the SGX event kicker value\n");
++ PDUMPMEM(&psDevInfo->ui32KernelCCBEventKickerDumpVal,
++ psDevInfo->psKernelCCBEventKickerMemInfo,
++ 0,
++ sizeof(u32),
++ PDUMP_FLAGS_CONTINUOUS,
++ MAKEUNIQUETAG(psDevInfo->
++ psKernelCCBEventKickerMemInfo));
++ PDUMPREG(SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK, 0),
++ EUR_CR_EVENT_KICK_NOW_MASK);
++ bFirstTime = 0;
++ }
++#endif
++
++#if !defined(NO_HARDWARE)
++
++ if (PollForValueKM(&psSGXHostCtl->ui32InitStatus,
++ PVRSRV_USSE_EDM_INIT_COMPLETE,
++ PVRSRV_USSE_EDM_INIT_COMPLETE,
++ MAX_HW_TIME_US / WAIT_TRY_COUNT,
++ WAIT_TRY_COUNT) != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SGXInitialise: Wait for uKernel initialisation failed"));
++ PVR_DBG_BREAK;
++ return PVRSRV_ERROR_RETRY;
++ }
++#endif
++
++#if defined(PDUMP)
++ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
++ "Wait for the SGX microkernel initialisation to complete");
++ PDUMPMEMPOL(psSGXHostCtlMemInfo,
++ offsetof(SGXMKIF_HOST_CTL, ui32InitStatus),
++ PVRSRV_USSE_EDM_INIT_COMPLETE,
++ PVRSRV_USSE_EDM_INIT_COMPLETE,
++ PDUMP_POLL_OPERATOR_EQUAL,
++ PDUMP_FLAGS_CONTINUOUS, MAKEUNIQUETAG(psSGXHostCtlMemInfo));
++#endif
++
++#if defined(FIX_HW_BRN_22997) && defined(FIX_HW_BRN_23030) && defined(SGX_FEATURE_HOST_PORT)
++
++ WorkaroundBRN22997ReadHostPort(psDevInfo);
++#endif
++
++ PVR_ASSERT(psDevInfo->psKernelCCBCtl->ui32ReadOffset ==
++ psDevInfo->psKernelCCBCtl->ui32WriteOffset);
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR SGXDeinitialise(void *hDevCookie)
++{
++ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO *) hDevCookie;
++ PVRSRV_ERROR eError;
++
++ if (psDevInfo->pvRegsBaseKM == NULL) {
++ return PVRSRV_OK;
++ }
++
++ eError =
++ SGXRunScript(psDevInfo, psDevInfo->sScripts.asDeinitCommands,
++ SGX_MAX_DEINIT_COMMANDS);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SGXDeinitialise: SGXRunScript failed (%d)", eError));
++ return (PVRSRV_ERROR_GENERIC);
++ }
++
++ return PVRSRV_OK;
++}
++
++static PVRSRV_ERROR DevInitSGXPart1(void *pvDeviceNode)
++{
++ PVRSRV_SGXDEV_INFO *psDevInfo;
++ void *hKernelDevMemContext;
++ IMG_DEV_PHYADDR sPDDevPAddr;
++ u32 i;
++ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *) pvDeviceNode;
++ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap =
++ psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeap;
++ PVRSRV_ERROR eError;
++
++ PDUMPCOMMENT("SGX Initialisation Part 1");
++
++ PDUMPCOMMENT("SGX Core Version Information: %s",
++ SGX_CORE_FRIENDLY_NAME);
++#ifdef SGX_CORE_REV
++ PDUMPCOMMENT("SGX Core Revision Information: %d", SGX_CORE_REV);
++#else
++ PDUMPCOMMENT("SGX Core Revision Information: head rtl");
++#endif
++
++#if defined(SGX_FEATURE_SYSTEM_CACHE)
++ PDUMPCOMMENT("SGX System Level Cache is present\r\n");
++#if defined(SGX_BYPASS_SYSTEM_CACHE)
++ PDUMPCOMMENT("SGX System Level Cache is bypassed\r\n");
++#endif
++#endif
++
++ if (OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_SGXDEV_INFO),
++ (void **)&psDevInfo, NULL,
++ "SGX Device Info") != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "DevInitSGXPart1 : Failed to alloc memory for DevInfo"));
++ return (PVRSRV_ERROR_OUT_OF_MEMORY);
++ }
++ memset(psDevInfo, 0, sizeof(PVRSRV_SGXDEV_INFO));
++
++ psDevInfo->eDeviceType = DEV_DEVICE_TYPE;
++ psDevInfo->eDeviceClass = DEV_DEVICE_CLASS;
++
++ psDeviceNode->pvDevice = (void *)psDevInfo;
++
++ psDevInfo->pvDeviceMemoryHeap = (void *)psDeviceMemoryHeap;
++
++ hKernelDevMemContext = BM_CreateContext(psDeviceNode,
++ &sPDDevPAddr, NULL, NULL);
++ if (hKernelDevMemContext == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "DevInitSGXPart1: Failed BM_CreateContext"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ psDevInfo->sKernelPDDevPAddr = sPDDevPAddr;
++
++ for (i = 0; i < psDeviceNode->sDevMemoryInfo.ui32HeapCount; i++) {
++ void *hDevMemHeap;
++
++ switch (psDeviceMemoryHeap[i].DevMemHeapType) {
++ case DEVICE_MEMORY_HEAP_KERNEL:
++ case DEVICE_MEMORY_HEAP_SHARED:
++ case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
++ {
++ hDevMemHeap =
++ BM_CreateHeap(hKernelDevMemContext,
++ &psDeviceMemoryHeap[i]);
++
++ psDeviceMemoryHeap[i].hDevMemHeap = hDevMemHeap;
++ break;
++ }
++ }
++ }
++
++ eError = MMU_BIFResetPDAlloc(psDevInfo);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "DevInitSGX : Failed to alloc memory for BIF reset"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR SGXGetInfoForSrvinitKM(void *hDevHandle,
++ SGX_BRIDGE_INFO_FOR_SRVINIT * psInitInfo)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ PVRSRV_SGXDEV_INFO *psDevInfo;
++ PVRSRV_ERROR eError;
++
++ PDUMPCOMMENT("SGXGetInfoForSrvinit");
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE *) hDevHandle;
++ psDevInfo = (PVRSRV_SGXDEV_INFO *) psDeviceNode->pvDevice;
++
++ psInitInfo->sPDDevPAddr = psDevInfo->sKernelPDDevPAddr;
++
++ eError =
++ PVRSRVGetDeviceMemHeapsKM(hDevHandle, &psInitInfo->asHeapInfo[0]);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SGXGetInfoForSrvinit: PVRSRVGetDeviceMemHeapsKM failed (%d)",
++ eError));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ return eError;
++}
++
++PVRSRV_ERROR DevInitSGXPart2KM(PVRSRV_PER_PROCESS_DATA * psPerProc,
++ void *hDevHandle,
++ SGX_BRIDGE_INIT_INFO * psInitInfo)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ PVRSRV_SGXDEV_INFO *psDevInfo;
++ PVRSRV_ERROR eError;
++ SGX_DEVICE_MAP *psSGXDeviceMap;
++ PVRSRV_DEV_POWER_STATE eDefaultPowerState;
++
++ PDUMPCOMMENT("SGX Initialisation Part 2");
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE *) hDevHandle;
++ psDevInfo = (PVRSRV_SGXDEV_INFO *) psDeviceNode->pvDevice;
++
++ eError = InitDevInfo(psPerProc, psDeviceNode, psInitInfo);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "DevInitSGXPart2KM: Failed to load EDM program"));
++ goto failed_init_dev_info;
++ }
++
++ eError = SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE_SGX,
++ (void **)&psSGXDeviceMap);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "DevInitSGXPart2KM: Failed to get device memory map!"));
++ return PVRSRV_ERROR_INIT_FAILURE;
++ }
++
++ if (psSGXDeviceMap->pvRegsCpuVBase) {
++ psDevInfo->pvRegsBaseKM = psSGXDeviceMap->pvRegsCpuVBase;
++ } else {
++
++ psDevInfo->pvRegsBaseKM =
++ OSMapPhysToLin(psSGXDeviceMap->sRegsCpuPBase,
++ psSGXDeviceMap->ui32RegsSize,
++ PVRSRV_HAP_KERNEL_ONLY | PVRSRV_HAP_UNCACHED,
++ NULL);
++ if (!psDevInfo->pvRegsBaseKM) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "DevInitSGXPart2KM: Failed to map in regs\n"));
++ return PVRSRV_ERROR_BAD_MAPPING;
++ }
++ }
++ psDevInfo->ui32RegSize = psSGXDeviceMap->ui32RegsSize;
++ psDevInfo->sRegsPhysBase = psSGXDeviceMap->sRegsSysPBase;
++
++#if defined(SGX_FEATURE_HOST_PORT)
++ if (psSGXDeviceMap->ui32Flags & SGX_HOSTPORT_PRESENT) {
++
++ psDevInfo->pvHostPortBaseKM =
++ OSMapPhysToLin(psSGXDeviceMap->sHPCpuPBase,
++ psSGXDeviceMap->ui32HPSize,
++ PVRSRV_HAP_KERNEL_ONLY | PVRSRV_HAP_UNCACHED,
++ NULL);
++ if (!psDevInfo->pvHostPortBaseKM) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "DevInitSGXPart2KM: Failed to map in host port\n"));
++ return PVRSRV_ERROR_BAD_MAPPING;
++ }
++ psDevInfo->ui32HPSize = psSGXDeviceMap->ui32HPSize;
++ psDevInfo->sHPSysPAddr = psSGXDeviceMap->sHPSysPBase;
++ }
++#endif
++
++#if defined (SYS_USING_INTERRUPTS)
++
++ psDeviceNode->pvISRData = psDeviceNode;
++
++ PVR_ASSERT(psDeviceNode->pfnDeviceISR == SGX_ISRHandler);
++
++#endif
++
++ psDevInfo->psSGXHostCtl->ui32PowerStatus |=
++ PVRSRV_USSE_EDM_POWMAN_NO_WORK;
++ eDefaultPowerState = PVRSRV_DEV_POWER_STATE_OFF;
++
++ eError = PVRSRVRegisterPowerDevice(psDeviceNode->sDevId.ui32DeviceIndex,
++ SGXPrePowerState, SGXPostPowerState,
++ SGXPreClockSpeedChange,
++ SGXPostClockSpeedChange,
++ (void *)psDeviceNode,
++ PVRSRV_DEV_POWER_STATE_OFF,
++ eDefaultPowerState);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "DevInitSGXPart2KM: failed to register device with power manager"));
++ return eError;
++ }
++#if defined(FIX_HW_BRN_22997) && defined(FIX_HW_BRN_23030) && defined(SGX_FEATURE_HOST_PORT)
++ eError = WorkaroundBRN22997Alloc(psDevInfo);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SGXInitialise : Failed to alloc memory for BRN22997 workaround"));
++ return eError;
++ }
++#endif
++
++#if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE)
++
++ psDevInfo->ui32ExtSysCacheRegsSize =
++ psSGXDeviceMap->ui32ExtSysCacheRegsSize;
++ psDevInfo->sExtSysCacheRegsDevPBase =
++ psSGXDeviceMap->sExtSysCacheRegsDevPBase;
++ eError = MMU_MapExtSystemCacheRegs(psDeviceNode);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SGXInitialise : Failed to map external system cache registers"));
++ return eError;
++ }
++#endif
++
++ memset(psDevInfo->psKernelCCB, 0, sizeof(PVRSRV_SGX_KERNEL_CCB));
++ memset(psDevInfo->psKernelCCBCtl, 0, sizeof(PVRSRV_SGX_CCB_CTL));
++ memset(psDevInfo->pui32KernelCCBEventKicker, 0,
++ sizeof(*psDevInfo->pui32KernelCCBEventKicker));
++ PDUMPCOMMENT("Initialise Kernel CCB");
++ PDUMPMEM(NULL, psDevInfo->psKernelCCBMemInfo, 0,
++ sizeof(PVRSRV_SGX_KERNEL_CCB), PDUMP_FLAGS_CONTINUOUS,
++ MAKEUNIQUETAG(psDevInfo->psKernelCCBMemInfo));
++ PDUMPCOMMENT("Initialise Kernel CCB Control");
++ PDUMPMEM(NULL, psDevInfo->psKernelCCBCtlMemInfo, 0,
++ sizeof(PVRSRV_SGX_CCB_CTL), PDUMP_FLAGS_CONTINUOUS,
++ MAKEUNIQUETAG(psDevInfo->psKernelCCBCtlMemInfo));
++ PDUMPCOMMENT("Initialise Kernel CCB Event Kicker");
++ PDUMPMEM(NULL, psDevInfo->psKernelCCBEventKickerMemInfo, 0,
++ sizeof(*psDevInfo->pui32KernelCCBEventKicker),
++ PDUMP_FLAGS_CONTINUOUS,
++ MAKEUNIQUETAG(psDevInfo->psKernelCCBEventKickerMemInfo));
++
++ return PVRSRV_OK;
++
++failed_init_dev_info:
++ return eError;
++}
++
++static PVRSRV_ERROR DevDeInitSGX(void *pvDeviceNode)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *) pvDeviceNode;
++ PVRSRV_SGXDEV_INFO *psDevInfo =
++ (PVRSRV_SGXDEV_INFO *) psDeviceNode->pvDevice;
++ PVRSRV_ERROR eError;
++ u32 ui32Heap;
++ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++ SGX_DEVICE_MAP *psSGXDeviceMap;
++
++ if (!psDevInfo) {
++
++ PVR_DPF((PVR_DBG_ERROR, "DevDeInitSGX: Null DevInfo"));
++ return PVRSRV_OK;
++ }
++#if defined(SUPPORT_HW_RECOVERY)
++ if (psDevInfo->hTimer) {
++ eError = OSRemoveTimer(psDevInfo->hTimer);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "DevDeInitSGX: Failed to remove timer"));
++ return eError;
++ }
++ psDevInfo->hTimer = NULL;
++ }
++#endif
++
++#if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE)
++
++ eError = MMU_UnmapExtSystemCacheRegs(psDeviceNode);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "DevDeInitSGX: Failed to unmap ext system cache registers"));
++ return eError;
++ }
++#endif
++
++#if defined(FIX_HW_BRN_22997) && defined(FIX_HW_BRN_23030) && defined(SGX_FEATURE_HOST_PORT)
++ WorkaroundBRN22997Free(psDevInfo);
++#endif
++
++ MMU_BIFResetPDFree(psDevInfo);
++
++ DeinitDevInfo(psDevInfo);
++
++ psDeviceMemoryHeap =
++ (DEVICE_MEMORY_HEAP_INFO *) psDevInfo->pvDeviceMemoryHeap;
++ for (ui32Heap = 0;
++ ui32Heap < psDeviceNode->sDevMemoryInfo.ui32HeapCount;
++ ui32Heap++) {
++ switch (psDeviceMemoryHeap[ui32Heap].DevMemHeapType) {
++ case DEVICE_MEMORY_HEAP_KERNEL:
++ case DEVICE_MEMORY_HEAP_SHARED:
++ case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
++ {
++ if (psDeviceMemoryHeap[ui32Heap].hDevMemHeap !=
++ NULL) {
++ BM_DestroyHeap(psDeviceMemoryHeap
++ [ui32Heap].hDevMemHeap);
++ }
++ break;
++ }
++ }
++ }
++
++ eError =
++ BM_DestroyContext(psDeviceNode->sDevMemoryInfo.pBMKernelContext,
++ NULL);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "DevDeInitSGX : Failed to destroy kernel context"));
++ return eError;
++ }
++
++ eError =
++ PVRSRVRemovePowerDevice(((PVRSRV_DEVICE_NODE *) pvDeviceNode)->
++ sDevId.ui32DeviceIndex);
++ if (eError != PVRSRV_OK) {
++ return eError;
++ }
++
++ eError = SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE_SGX,
++ (void **)&psSGXDeviceMap);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "DevDeInitSGX: Failed to get device memory map!"));
++ return eError;
++ }
++
++ if (!psSGXDeviceMap->pvRegsCpuVBase) {
++
++ if (psDevInfo->pvRegsBaseKM != NULL) {
++ OSUnMapPhysToLin(psDevInfo->pvRegsBaseKM,
++ psDevInfo->ui32RegSize,
++ PVRSRV_HAP_KERNEL_ONLY |
++ PVRSRV_HAP_UNCACHED, NULL);
++ }
++ }
++#if defined(SGX_FEATURE_HOST_PORT)
++ if (psSGXDeviceMap->ui32Flags & SGX_HOSTPORT_PRESENT) {
++
++ if (psDevInfo->pvHostPortBaseKM != NULL) {
++ OSUnMapPhysToLin(psDevInfo->pvHostPortBaseKM,
++ psDevInfo->ui32HPSize,
++ PVRSRV_HAP_KERNEL_ONLY |
++ PVRSRV_HAP_UNCACHED, NULL);
++ }
++ }
++#endif
++
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_SGXDEV_INFO), psDevInfo, 0);
++
++ psDeviceNode->pvDevice = NULL;
++
++ if (psDeviceMemoryHeap != NULL) {
++
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(DEVICE_MEMORY_HEAP_INFO) * SGX_MAX_HEAP_ID,
++ psDeviceMemoryHeap, 0);
++ }
++
++ return PVRSRV_OK;
++}
++
++void SGXDumpDebugInfo(PVRSRV_DEVICE_NODE * psDeviceNode, int bDumpSGXRegs)
++{
++ u32 ui32RegVal;
++ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++
++ if (bDumpSGXRegs) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SGX Register Base Address (Linear): 0x%08X",
++ psDevInfo->pvRegsBaseKM));
++ PVR_DPF((PVR_DBG_ERROR,
++ "SGX Register Base Address (Physical): 0x%08X",
++ psDevInfo->sRegsPhysBase));
++
++ ui32RegVal =
++ OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS);
++ if (ui32RegVal &
++ (EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_MASK |
++ EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_MASK)) {
++ PVR_LOG(("DPM out of memory!!"));
++ }
++ PVR_LOG(("EUR_CR_EVENT_STATUS: %x", ui32RegVal));
++
++ ui32RegVal =
++ OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS2);
++ PVR_LOG(("EUR_CR_EVENT_STATUS2: %x", ui32RegVal));
++
++ ui32RegVal =
++ OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL);
++ PVR_LOG(("EUR_CR_BIF_CTRL: %x", ui32RegVal));
++
++#if defined(EUR_CR_BIF_BANK0)
++ ui32RegVal =
++ OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK0);
++ PVR_LOG(("EUR_CR_BIF_BANK0: %x", ui32RegVal));
++#endif
++
++ ui32RegVal =
++ OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_INT_STAT);
++ PVR_LOG(("EUR_CR_BIF_INT_STAT: %x", ui32RegVal));
++
++ ui32RegVal =
++ OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_FAULT);
++ PVR_LOG(("EUR_CR_BIF_FAULT: %x", ui32RegVal));
++
++ ui32RegVal =
++ OSReadHWReg(psDevInfo->pvRegsBaseKM,
++ EUR_CR_BIF_MEM_REQ_STAT);
++ PVR_LOG(("EUR_CR_BIF_MEM_REQ_STAT: %x", ui32RegVal));
++
++ ui32RegVal =
++ OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_CLKGATECTL);
++ PVR_LOG(("EUR_CR_CLKGATECTL: %x", ui32RegVal));
++
++#if defined(EUR_CR_PDS_PC_BASE)
++ ui32RegVal =
++ OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_PDS_PC_BASE);
++ PVR_LOG(("EUR_CR_PDS_PC_BASE: %x", ui32RegVal));
++#endif
++
++ }
++#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG)
++ {
++ u32 *pui32MKTraceBuffer =
++ psDevInfo->psKernelEDMStatusBufferMemInfo->pvLinAddrKM;
++ u32 ui32LastStatusCode, ui32WriteOffset;
++
++ ui32LastStatusCode = *pui32MKTraceBuffer;
++ pui32MKTraceBuffer++;
++ ui32WriteOffset = *pui32MKTraceBuffer;
++ pui32MKTraceBuffer++;
++
++ PVR_LOG(("Last SGX microkernel status code: 0x%x",
++ ui32LastStatusCode));
++
++#if defined(PVRSRV_DUMP_MK_TRACE)
++
++ {
++ u32 ui32LoopCounter;
++
++ for (ui32LoopCounter = 0;
++ ui32LoopCounter < SGXMK_TRACE_BUFFER_SIZE;
++ ui32LoopCounter++) {
++ u32 *pui32BufPtr;
++ pui32BufPtr = pui32MKTraceBuffer +
++ (((ui32WriteOffset +
++ ui32LoopCounter) %
++ SGXMK_TRACE_BUFFER_SIZE) * 4);
++ PVR_LOG(("(MKT%u) %08X %08X %08X %08X",
++ ui32LoopCounter, pui32BufPtr[2],
++ pui32BufPtr[3], pui32BufPtr[1],
++ pui32BufPtr[0]));
++ }
++ }
++#endif
++ }
++#endif
++
++ {
++
++ u32 *pui32HostCtlBuffer = (u32 *) psDevInfo->psSGXHostCtl;
++ u32 ui32LoopCounter;
++
++ PVR_LOG(("SGX Host control:"));
++
++ for (ui32LoopCounter = 0;
++ ui32LoopCounter <
++ sizeof(*psDevInfo->psSGXHostCtl) /
++ sizeof(*pui32HostCtlBuffer); ui32LoopCounter += 4) {
++ PVR_LOG(("\t0x%X: 0x%08X 0x%08X 0x%08X 0x%08X",
++ ui32LoopCounter * sizeof(*pui32HostCtlBuffer),
++ pui32HostCtlBuffer[ui32LoopCounter + 0],
++ pui32HostCtlBuffer[ui32LoopCounter + 1],
++ pui32HostCtlBuffer[ui32LoopCounter + 2],
++ pui32HostCtlBuffer[ui32LoopCounter + 3]));
++ }
++ }
++
++ {
++
++ u32 *pui32TA3DCtlBuffer =
++ psDevInfo->psKernelSGXTA3DCtlMemInfo->pvLinAddrKM;
++ u32 ui32LoopCounter;
++
++ PVR_LOG(("SGX TA/3D control:"));
++
++ for (ui32LoopCounter = 0;
++ ui32LoopCounter <
++ psDevInfo->psKernelSGXTA3DCtlMemInfo->ui32AllocSize /
++ sizeof(*pui32TA3DCtlBuffer); ui32LoopCounter += 4) {
++ PVR_LOG(("\t0x%X: 0x%08X 0x%08X 0x%08X 0x%08X",
++ ui32LoopCounter * sizeof(*pui32TA3DCtlBuffer),
++ pui32TA3DCtlBuffer[ui32LoopCounter + 0],
++ pui32TA3DCtlBuffer[ui32LoopCounter + 1],
++ pui32TA3DCtlBuffer[ui32LoopCounter + 2],
++ pui32TA3DCtlBuffer[ui32LoopCounter + 3]));
++ }
++ }
++
++ QueueDumpDebugInfo();
++}
++
++#if defined(SYS_USING_INTERRUPTS) || defined(SUPPORT_HW_RECOVERY)
++static
++void HWRecoveryResetSGX(PVRSRV_DEVICE_NODE * psDeviceNode,
++ u32 ui32Component, u32 ui32CallerID)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_SGXDEV_INFO *psDevInfo =
++ (PVRSRV_SGXDEV_INFO *) psDeviceNode->pvDevice;
++ SGXMKIF_HOST_CTL *psSGXHostCtl =
++ (SGXMKIF_HOST_CTL *) psDevInfo->psSGXHostCtl;
++
++ eError = PVRSRVPowerLock(ui32CallerID, 0);
++ if (eError != PVRSRV_OK) {
++
++ PVR_DPF((PVR_DBG_WARNING,
++ "HWRecoveryResetSGX: Power transition in progress"));
++ return;
++ }
++
++ psSGXHostCtl->ui32InterruptClearFlags |= PVRSRV_USSE_EDM_INTERRUPT_HWR;
++
++ PVR_LOG(("HWRecoveryResetSGX: SGX Hardware Recovery triggered"));
++
++ SGXDumpDebugInfo(psDeviceNode, 1);
++
++ PDUMPSUSPEND();
++
++ eError = SGXInitialise(psDevInfo);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "HWRecoveryResetSGX: SGXInitialise failed (%d)",
++ eError));
++ }
++
++ PDUMPRESUME();
++
++ PVRSRVPowerUnlock(ui32CallerID);
++
++ SGXScheduleProcessQueuesKM(psDeviceNode);
++
++ PVRSRVProcessQueues(ui32CallerID, 1);
++}
++#endif
++
++#if defined(SUPPORT_HW_RECOVERY)
++void SGXOSTimer(void *pvData)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode = pvData;
++ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++ static u32 ui32EDMTasks = 0;
++ static u32 ui32LockupCounter = 0;
++ static u32 ui32NumResets = 0;
++ u32 ui32CurrentEDMTasks;
++ int bLockup = 0;
++ int bPoweredDown;
++
++ psDevInfo->ui32TimeStamp++;
++
++#if defined(NO_HARDWARE)
++ bPoweredDown = 1;
++#else
++ bPoweredDown = SGXIsDevicePowered(psDeviceNode) ? 0 : 1;
++#endif
++
++ if (bPoweredDown) {
++ ui32LockupCounter = 0;
++ } else {
++
++ ui32CurrentEDMTasks =
++ OSReadHWReg(psDevInfo->pvRegsBaseKM,
++ psDevInfo->ui32EDMTaskReg0);
++ if (psDevInfo->ui32EDMTaskReg1 != 0) {
++ ui32CurrentEDMTasks ^=
++ OSReadHWReg(psDevInfo->pvRegsBaseKM,
++ psDevInfo->ui32EDMTaskReg1);
++ }
++ if ((ui32CurrentEDMTasks == ui32EDMTasks) &&
++ (psDevInfo->ui32NumResets == ui32NumResets)) {
++ ui32LockupCounter++;
++ if (ui32LockupCounter == 3) {
++ ui32LockupCounter = 0;
++ PVR_DPF((PVR_DBG_ERROR,
++ "SGXOSTimer() detected SGX lockup (0x%x tasks)",
++ ui32EDMTasks));
++
++ bLockup = 1;
++ }
++ } else {
++ ui32LockupCounter = 0;
++ ui32EDMTasks = ui32CurrentEDMTasks;
++ ui32NumResets = psDevInfo->ui32NumResets;
++ }
++ }
++
++ if (bLockup) {
++ SGXMKIF_HOST_CTL *psSGXHostCtl =
++ (SGXMKIF_HOST_CTL *) psDevInfo->psSGXHostCtl;
++
++ psSGXHostCtl->ui32HostDetectedLockups++;
++
++ HWRecoveryResetSGX(psDeviceNode, 0, KERNEL_ID);
++ }
++}
++#endif
++
++#if defined(SYS_USING_INTERRUPTS)
++
++int SGX_ISRHandler(void *pvData)
++{
++ int bInterruptProcessed = 0;
++
++ {
++ u32 ui32EventStatus, ui32EventEnable;
++ u32 ui32EventClear = 0;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ PVRSRV_SGXDEV_INFO *psDevInfo;
++
++ if (pvData == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SGX_ISRHandler: Invalid params\n"));
++ return bInterruptProcessed;
++ }
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE *) pvData;
++ psDevInfo = (PVRSRV_SGXDEV_INFO *) psDeviceNode->pvDevice;
++
++ ui32EventStatus =
++ OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS);
++ ui32EventEnable =
++ OSReadHWReg(psDevInfo->pvRegsBaseKM,
++ EUR_CR_EVENT_HOST_ENABLE);
++
++ gui32EventStatusServicesByISR = ui32EventStatus;
++
++ ui32EventStatus &= ui32EventEnable;
++
++ if (ui32EventStatus & EUR_CR_EVENT_STATUS_SW_EVENT_MASK) {
++ ui32EventClear |= EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_MASK;
++ }
++
++ if (ui32EventClear) {
++ bInterruptProcessed = 1;
++
++ ui32EventClear |=
++ EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_MASK;
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM,
++ EUR_CR_EVENT_HOST_CLEAR, ui32EventClear);
++ }
++ }
++
++ return bInterruptProcessed;
++}
++
++void SGX_MISRHandler(void *pvData)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *) pvData;
++ PVRSRV_SGXDEV_INFO *psDevInfo =
++ (PVRSRV_SGXDEV_INFO *) psDeviceNode->pvDevice;
++ SGXMKIF_HOST_CTL *psSGXHostCtl =
++ (SGXMKIF_HOST_CTL *) psDevInfo->psSGXHostCtl;
++
++ if (((psSGXHostCtl->
++ ui32InterruptFlags & PVRSRV_USSE_EDM_INTERRUPT_HWR) != 0UL)
++ &&
++ ((psSGXHostCtl->
++ ui32InterruptClearFlags & PVRSRV_USSE_EDM_INTERRUPT_HWR) ==
++ 0UL)) {
++ HWRecoveryResetSGX(psDeviceNode, 0, ISR_ID);
++ }
++#if defined(OS_SUPPORTS_IN_LISR)
++ if (psDeviceNode->bReProcessDeviceCommandComplete) {
++ SGXScheduleProcessQueuesKM(psDeviceNode);
++ }
++#endif
++
++ SGXTestActivePowerEvent(psDeviceNode, ISR_ID);
++}
++#endif
++
++PVRSRV_ERROR SGXRegisterDevice(PVRSRV_DEVICE_NODE * psDeviceNode)
++{
++ DEVICE_MEMORY_INFO *psDevMemoryInfo;
++ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++
++ psDeviceNode->sDevId.eDeviceType = DEV_DEVICE_TYPE;
++ psDeviceNode->sDevId.eDeviceClass = DEV_DEVICE_CLASS;
++
++ psDeviceNode->pfnInitDevice = DevInitSGXPart1;
++ psDeviceNode->pfnDeInitDevice = DevDeInitSGX;
++
++ psDeviceNode->pfnInitDeviceCompatCheck = SGXDevInitCompatCheck;
++
++ psDeviceNode->pfnMMUInitialise = MMU_Initialise;
++ psDeviceNode->pfnMMUFinalise = MMU_Finalise;
++ psDeviceNode->pfnMMUInsertHeap = MMU_InsertHeap;
++ psDeviceNode->pfnMMUCreate = MMU_Create;
++ psDeviceNode->pfnMMUDelete = MMU_Delete;
++ psDeviceNode->pfnMMUAlloc = MMU_Alloc;
++ psDeviceNode->pfnMMUFree = MMU_Free;
++ psDeviceNode->pfnMMUMapPages = MMU_MapPages;
++ psDeviceNode->pfnMMUMapShadow = MMU_MapShadow;
++ psDeviceNode->pfnMMUUnmapPages = MMU_UnmapPages;
++ psDeviceNode->pfnMMUMapScatter = MMU_MapScatter;
++ psDeviceNode->pfnMMUGetPhysPageAddr = MMU_GetPhysPageAddr;
++ psDeviceNode->pfnMMUGetPDDevPAddr = MMU_GetPDDevPAddr;
++
++#if defined (SYS_USING_INTERRUPTS)
++
++ psDeviceNode->pfnDeviceISR = SGX_ISRHandler;
++ psDeviceNode->pfnDeviceMISR = SGX_MISRHandler;
++#endif
++
++ psDeviceNode->pfnDeviceCommandComplete = SGXCommandComplete;
++
++ psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
++
++ psDevMemoryInfo->ui32AddressSpaceSizeLog2 =
++ SGX_FEATURE_ADDRESS_SPACE_SIZE;
++
++ psDevMemoryInfo->ui32Flags = 0;
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(DEVICE_MEMORY_HEAP_INFO) * SGX_MAX_HEAP_ID,
++ (void **)&psDevMemoryInfo->psDeviceMemoryHeap, 0,
++ "Array of Device Memory Heap Info") != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SGXRegisterDevice : Failed to alloc memory for DEVICE_MEMORY_HEAP_INFO"));
++ return (PVRSRV_ERROR_OUT_OF_MEMORY);
++ }
++ memset(psDevMemoryInfo->psDeviceMemoryHeap, 0,
++ sizeof(DEVICE_MEMORY_HEAP_INFO) * SGX_MAX_HEAP_ID);
++
++ psDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap;
++
++ psDeviceMemoryHeap->ui32HeapID =
++ HEAP_ID(PVRSRV_DEVICE_TYPE_SGX, SGX_GENERAL_HEAP_ID);
++ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_GENERAL_HEAP_BASE;
++ psDeviceMemoryHeap->ui32HeapSize = SGX_GENERAL_HEAP_SIZE;
++ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++ | PVRSRV_MEM_RAM_BACKED_ALLOCATION | PVRSRV_HAP_SINGLE_PROCESS;
++ psDeviceMemoryHeap->pszName = "General";
++ psDeviceMemoryHeap->pszBSName = "General BS";
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
++
++ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
++#if !defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP)
++
++ psDevMemoryInfo->ui32MappingHeapID =
++ (u32) (psDeviceMemoryHeap - psDevMemoryInfo->psDeviceMemoryHeap);
++#endif
++ psDeviceMemoryHeap++;
++
++ psDeviceMemoryHeap->ui32HeapID =
++ HEAP_ID(PVRSRV_DEVICE_TYPE_SGX, SGX_TADATA_HEAP_ID);
++ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_TADATA_HEAP_BASE;
++ psDeviceMemoryHeap->ui32HeapSize = SGX_TADATA_HEAP_SIZE;
++ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++ | PVRSRV_MEM_RAM_BACKED_ALLOCATION | PVRSRV_HAP_MULTI_PROCESS;
++ psDeviceMemoryHeap->pszName = "TA Data";
++ psDeviceMemoryHeap->pszBSName = "TA Data BS";
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
++
++ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
++ psDeviceMemoryHeap++;
++
++ psDeviceMemoryHeap->ui32HeapID =
++ HEAP_ID(PVRSRV_DEVICE_TYPE_SGX, SGX_KERNEL_CODE_HEAP_ID);
++ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_KERNEL_CODE_HEAP_BASE;
++ psDeviceMemoryHeap->ui32HeapSize = SGX_KERNEL_CODE_HEAP_SIZE;
++ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++ | PVRSRV_MEM_RAM_BACKED_ALLOCATION | PVRSRV_HAP_MULTI_PROCESS;
++ psDeviceMemoryHeap->pszName = "Kernel Code";
++ psDeviceMemoryHeap->pszBSName = "Kernel Code BS";
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
++
++ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
++ psDeviceMemoryHeap++;
++
++ psDeviceMemoryHeap->ui32HeapID =
++ HEAP_ID(PVRSRV_DEVICE_TYPE_SGX, SGX_KERNEL_DATA_HEAP_ID);
++ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_KERNEL_DATA_HEAP_BASE;
++ psDeviceMemoryHeap->ui32HeapSize = SGX_KERNEL_DATA_HEAP_SIZE;
++ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++ | PVRSRV_MEM_RAM_BACKED_ALLOCATION | PVRSRV_HAP_MULTI_PROCESS;
++ psDeviceMemoryHeap->pszName = "KernelData";
++ psDeviceMemoryHeap->pszBSName = "KernelData BS";
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
++
++ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
++ psDeviceMemoryHeap++;
++
++ psDeviceMemoryHeap->ui32HeapID =
++ HEAP_ID(PVRSRV_DEVICE_TYPE_SGX, SGX_PIXELSHADER_HEAP_ID);
++ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_PIXELSHADER_HEAP_BASE;
++ psDeviceMemoryHeap->ui32HeapSize = SGX_PIXELSHADER_HEAP_SIZE;
++ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++ | PVRSRV_MEM_RAM_BACKED_ALLOCATION | PVRSRV_HAP_SINGLE_PROCESS;
++ psDeviceMemoryHeap->pszName = "PixelShaderUSSE";
++ psDeviceMemoryHeap->pszBSName = "PixelShaderUSSE BS";
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
++
++ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
++ psDeviceMemoryHeap++;
++
++ psDeviceMemoryHeap->ui32HeapID =
++ HEAP_ID(PVRSRV_DEVICE_TYPE_SGX, SGX_VERTEXSHADER_HEAP_ID);
++ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_VERTEXSHADER_HEAP_BASE;
++ psDeviceMemoryHeap->ui32HeapSize = SGX_VERTEXSHADER_HEAP_SIZE;
++ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++ | PVRSRV_MEM_RAM_BACKED_ALLOCATION | PVRSRV_HAP_SINGLE_PROCESS;
++ psDeviceMemoryHeap->pszName = "VertexShaderUSSE";
++ psDeviceMemoryHeap->pszBSName = "VertexShaderUSSE BS";
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
++
++ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
++ psDeviceMemoryHeap++;
++
++ psDeviceMemoryHeap->ui32HeapID =
++ HEAP_ID(PVRSRV_DEVICE_TYPE_SGX, SGX_PDSPIXEL_CODEDATA_HEAP_ID);
++ psDeviceMemoryHeap->sDevVAddrBase.uiAddr =
++ SGX_PDSPIXEL_CODEDATA_HEAP_BASE;
++ psDeviceMemoryHeap->ui32HeapSize = SGX_PDSPIXEL_CODEDATA_HEAP_SIZE;
++ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++ | PVRSRV_MEM_RAM_BACKED_ALLOCATION | PVRSRV_HAP_SINGLE_PROCESS;
++ psDeviceMemoryHeap->pszName = "PDSPixelCodeData";
++ psDeviceMemoryHeap->pszBSName = "PDSPixelCodeData BS";
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
++
++ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
++ psDeviceMemoryHeap++;
++
++ psDeviceMemoryHeap->ui32HeapID =
++ HEAP_ID(PVRSRV_DEVICE_TYPE_SGX, SGX_PDSVERTEX_CODEDATA_HEAP_ID);
++ psDeviceMemoryHeap->sDevVAddrBase.uiAddr =
++ SGX_PDSVERTEX_CODEDATA_HEAP_BASE;
++ psDeviceMemoryHeap->ui32HeapSize = SGX_PDSVERTEX_CODEDATA_HEAP_SIZE;
++ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++ | PVRSRV_MEM_RAM_BACKED_ALLOCATION | PVRSRV_HAP_SINGLE_PROCESS;
++ psDeviceMemoryHeap->pszName = "PDSVertexCodeData";
++ psDeviceMemoryHeap->pszBSName = "PDSVertexCodeData BS";
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
++
++ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
++ psDeviceMemoryHeap++;
++
++ psDeviceMemoryHeap->ui32HeapID =
++ HEAP_ID(PVRSRV_DEVICE_TYPE_SGX, SGX_SYNCINFO_HEAP_ID);
++ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_SYNCINFO_HEAP_BASE;
++ psDeviceMemoryHeap->ui32HeapSize = SGX_SYNCINFO_HEAP_SIZE;
++ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++ | PVRSRV_MEM_RAM_BACKED_ALLOCATION | PVRSRV_HAP_MULTI_PROCESS;
++ psDeviceMemoryHeap->pszName = "CacheCoherent";
++ psDeviceMemoryHeap->pszBSName = "CacheCoherent BS";
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
++
++ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
++
++ psDevMemoryInfo->ui32SyncHeapID =
++ (u32) (psDeviceMemoryHeap - psDevMemoryInfo->psDeviceMemoryHeap);
++ psDeviceMemoryHeap++;
++
++ psDeviceMemoryHeap->ui32HeapID =
++ HEAP_ID(PVRSRV_DEVICE_TYPE_SGX, SGX_3DPARAMETERS_HEAP_ID);
++ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_3DPARAMETERS_HEAP_BASE;
++ psDeviceMemoryHeap->ui32HeapSize = SGX_3DPARAMETERS_HEAP_SIZE;
++ psDeviceMemoryHeap->pszName = "3DParameters";
++ psDeviceMemoryHeap->pszBSName = "3DParameters BS";
++#if defined(SUPPORT_PERCONTEXT_PB)
++ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++ | PVRSRV_MEM_RAM_BACKED_ALLOCATION | PVRSRV_HAP_SINGLE_PROCESS;
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
++#else
++ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++ | PVRSRV_MEM_RAM_BACKED_ALLOCATION | PVRSRV_HAP_MULTI_PROCESS;
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
++#endif
++
++ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
++ psDeviceMemoryHeap++;
++
++#if defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP)
++
++ psDeviceMemoryHeap->ui32HeapID =
++ HEAP_ID(PVRSRV_DEVICE_TYPE_SGX, SGX_GENERAL_MAPPING_HEAP_ID);
++ psDeviceMemoryHeap->sDevVAddrBase.uiAddr =
++ SGX_GENERAL_MAPPING_HEAP_BASE;
++ psDeviceMemoryHeap->ui32HeapSize = SGX_GENERAL_MAPPING_HEAP_SIZE;
++ psDeviceMemoryHeap->ui32Attribs =
++ PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_MULTI_PROCESS;
++ psDeviceMemoryHeap->pszName = "GeneralMapping";
++ psDeviceMemoryHeap->pszBSName = "GeneralMapping BS";
++#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS) && defined(FIX_HW_BRN_23410)
++
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
++#else
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
++#endif
++
++ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
++
++ psDevMemoryInfo->ui32MappingHeapID =
++ (u32) (psDeviceMemoryHeap - psDevMemoryInfo->psDeviceMemoryHeap);
++ psDeviceMemoryHeap++;
++#endif
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++
++ psDeviceMemoryHeap->ui32HeapID =
++ HEAP_ID(PVRSRV_DEVICE_TYPE_SGX, SGX_2D_HEAP_ID);
++ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_2D_HEAP_BASE;
++ psDeviceMemoryHeap->ui32HeapSize = SGX_2D_HEAP_SIZE;
++ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++ | PVRSRV_MEM_RAM_BACKED_ALLOCATION | PVRSRV_HAP_SINGLE_PROCESS;
++ psDeviceMemoryHeap->pszName = "2D";
++ psDeviceMemoryHeap->pszBSName = "2D BS";
++
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
++
++ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
++ psDeviceMemoryHeap++;
++#endif
++
++#if defined(FIX_HW_BRN_26915)
++
++ psDeviceMemoryHeap->ui32HeapID =
++ HEAP_ID(PVRSRV_DEVICE_TYPE_SGX, SGX_CGBUFFER_HEAP_ID);
++ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_CGBUFFER_HEAP_BASE;
++ psDeviceMemoryHeap->ui32HeapSize = SGX_CGBUFFER_HEAP_SIZE;
++ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++ | PVRSRV_MEM_RAM_BACKED_ALLOCATION | PVRSRV_HAP_SINGLE_PROCESS;
++ psDeviceMemoryHeap->pszName = "CGBuffer";
++ psDeviceMemoryHeap->pszBSName = "CGBuffer BS";
++
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
++
++ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
++ psDeviceMemoryHeap++;
++#endif
++
++#if defined(INTEL_D3_CACHED_CBUF)
++
++ psDeviceMemoryHeap->ui32HeapID =
++ HEAP_ID(PVRSRV_DEVICE_TYPE_SGX, SGX_CACHED_GENERAL_HEAP_ID);
++ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_CACHED_GENERAL_HEAP_BASE;
++ psDeviceMemoryHeap->ui32HeapSize = SGX_CACHED_GENERAL_HEAP_SIZE;
++ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_CACHED
++ | PVRSRV_MEM_RAM_BACKED_ALLOCATION | PVRSRV_HAP_SINGLE_PROCESS;
++ psDeviceMemoryHeap->pszName = "CachedGeneral";
++ psDeviceMemoryHeap->pszBSName = "Cached General BS";
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
++ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
++ psDeviceMemoryHeap++;
++
++#endif
++
++ psDevMemoryInfo->ui32HeapCount =
++ (u32) (psDeviceMemoryHeap - psDevMemoryInfo->psDeviceMemoryHeap);
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR SGXGetClientInfoKM(void *hDevCookie,
++ SGX_CLIENT_INFO * psClientInfo)
++{
++ PVRSRV_SGXDEV_INFO *psDevInfo =
++ (PVRSRV_SGXDEV_INFO *) ((PVRSRV_DEVICE_NODE *) hDevCookie)->
++ pvDevice;
++
++ psDevInfo->ui32ClientRefCount++;
++
++#if defined(PDUMP)
++
++ psDevInfo->psKernelCCBInfo->ui32CCBDumpWOff = 0;
++#endif
++
++ psClientInfo->ui32ProcessID = OSGetCurrentProcessIDKM();
++
++ memcpy(&psClientInfo->asDevData, &psDevInfo->asSGXDevData,
++ sizeof(psClientInfo->asDevData));
++
++ return PVRSRV_OK;
++}
++
++void SGXPanic(PVRSRV_DEVICE_NODE * psDeviceNode)
++{
++ PVR_LOG(("SGX panic"));
++ SGXDumpDebugInfo(psDeviceNode, 0);
++ OSPanic();
++}
++
++PVRSRV_ERROR SGXDevInitCompatCheck(PVRSRV_DEVICE_NODE * psDeviceNode)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_SGXDEV_INFO *psDevInfo;
++ u32 ui32BuildOptions, ui32BuildOptionsMismatch;
++#if !defined(NO_HARDWARE)
++ PPVRSRV_KERNEL_MEM_INFO psMemInfo;
++ PVRSRV_SGX_MISCINFO_INFO *psSGXMiscInfoInt;
++ PVRSRV_SGX_MISCINFO_FEATURES *psSGXFeatures;
++ SGX_MISCINFO_STRUCT_SIZES *psSGXStructSizes;
++ int bStructSizesFailed;
++
++ int bCheckCoreRev;
++ const u32 aui32CoreRevExceptions[] = {
++ 0x10100, 0x10101
++ };
++ const u32 ui32NumCoreExceptions =
++ sizeof(aui32CoreRevExceptions) / (2 * sizeof(u32));
++ u32 i;
++#endif
++
++ if (psDeviceNode->sDevId.eDeviceType != PVRSRV_DEVICE_TYPE_SGX) {
++ PVR_LOG(("(FAIL) SGXInit: Device not of type SGX"));
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ goto chk_exit;
++ }
++
++ psDevInfo = psDeviceNode->pvDevice;
++
++ ui32BuildOptions = (SGX_BUILD_OPTIONS);
++ if (ui32BuildOptions != psDevInfo->ui32ClientBuildOptions) {
++ ui32BuildOptionsMismatch =
++ ui32BuildOptions ^ psDevInfo->ui32ClientBuildOptions;
++ if ((psDevInfo->
++ ui32ClientBuildOptions & ui32BuildOptionsMismatch) != 0) {
++ PVR_LOG(("(FAIL) SGXInit: Mismatch in client-side and KM driver build options; " "extra options present in client-side driver: (0x%lx). Please check sgx_options.h", psDevInfo->ui32ClientBuildOptions & ui32BuildOptionsMismatch));
++ }
++
++ if ((ui32BuildOptions & ui32BuildOptionsMismatch) != 0) {
++ PVR_LOG(("(FAIL) SGXInit: Mismatch in client-side and KM driver build options; " "extra options present in KM: (0x%lx). Please check sgx_options.h", ui32BuildOptions & ui32BuildOptionsMismatch));
++ }
++ eError = PVRSRV_ERROR_BUILD_MISMATCH;
++ goto chk_exit;
++ } else {
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "SGXInit: Client-side and KM driver build options match. [ OK ]"));
++ }
++
++#if !defined (NO_HARDWARE)
++ psMemInfo = psDevInfo->psKernelSGXMiscMemInfo;
++
++ psSGXMiscInfoInt = psMemInfo->pvLinAddrKM;
++ psSGXMiscInfoInt->ui32MiscInfoFlags = 0;
++ psSGXMiscInfoInt->ui32MiscInfoFlags |=
++ PVRSRV_USSE_MISCINFO_GET_STRUCT_SIZES;
++ eError = SGXGetMiscInfoUkernel(psDevInfo, psDeviceNode);
++
++ if (eError != PVRSRV_OK) {
++ PVR_LOG(("(FAIL) SGXInit: Unable to validate device DDK version"));
++ goto chk_exit;
++ }
++ psSGXFeatures =
++ &((PVRSRV_SGX_MISCINFO_INFO *) (psMemInfo->pvLinAddrKM))->
++ sSGXFeatures;
++ if ((psSGXFeatures->ui32DDKVersion !=
++ ((PVRVERSION_MAJ << 16) | (PVRVERSION_MIN << 8) |
++ PVRVERSION_BRANCH))
++ || (psSGXFeatures->ui32DDKBuild != PVRVERSION_BUILD)) {
++ PVR_LOG(("(FAIL) SGXInit: Incompatible driver DDK revision (%ld)/device DDK revision (%ld).", PVRVERSION_BUILD, psSGXFeatures->ui32DDKBuild));
++ eError = PVRSRV_ERROR_DDK_VERSION_MISMATCH;
++ PVR_DBG_BREAK;
++ goto chk_exit;
++ } else {
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "SGXInit: driver DDK (%ld) and device DDK (%ld) match. [ OK ]",
++ PVRVERSION_BUILD, psSGXFeatures->ui32DDKBuild));
++ }
++
++ if (psSGXFeatures->ui32CoreRevSW == 0) {
++
++ PVR_LOG(("SGXInit: HW core rev (%lx) check skipped.",
++ psSGXFeatures->ui32CoreRev));
++ } else {
++
++ bCheckCoreRev = 1;
++ for (i = 0; i < ui32NumCoreExceptions; i += 2) {
++ if ((psSGXFeatures->ui32CoreRev ==
++ aui32CoreRevExceptions[i])
++ && (psSGXFeatures->ui32CoreRevSW ==
++ aui32CoreRevExceptions[i + 1])) {
++ PVR_LOG(("SGXInit: HW core rev (%lx), SW core rev (%lx) check skipped.", psSGXFeatures->ui32CoreRev, psSGXFeatures->ui32CoreRevSW));
++ bCheckCoreRev = 0;
++ }
++ }
++
++ if (bCheckCoreRev) {
++ if (psSGXFeatures->ui32CoreRev !=
++ psSGXFeatures->ui32CoreRevSW) {
++ PVR_LOG(("(FAIL) SGXInit: Incompatible HW core rev (%lx) and SW core rev (%lx).", psSGXFeatures->ui32CoreRev, psSGXFeatures->ui32CoreRevSW));
++ eError = PVRSRV_ERROR_BUILD_MISMATCH;
++ goto chk_exit;
++ } else {
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "SGXInit: HW core rev (%lx) and SW core rev (%lx) match. [ OK ]",
++ psSGXFeatures->ui32CoreRev,
++ psSGXFeatures->ui32CoreRevSW));
++ }
++ }
++ }
++
++ psSGXStructSizes =
++ &((PVRSRV_SGX_MISCINFO_INFO *) (psMemInfo->pvLinAddrKM))->
++ sSGXStructSizes;
++
++ bStructSizesFailed = 0;
++
++ CHECK_SIZE(HOST_CTL);
++ CHECK_SIZE(COMMAND);
++#if defined(SGX_FEATURE_2D_HARDWARE)
++ CHECK_SIZE(2DCMD);
++ CHECK_SIZE(2DCMD_SHARED);
++#endif
++ CHECK_SIZE(CMDTA);
++ CHECK_SIZE(CMDTA_SHARED);
++ CHECK_SIZE(TRANSFERCMD);
++ CHECK_SIZE(TRANSFERCMD_SHARED);
++
++ CHECK_SIZE(3DREGISTERS);
++ CHECK_SIZE(HWPBDESC);
++ CHECK_SIZE(HWRENDERCONTEXT);
++ CHECK_SIZE(HWRENDERDETAILS);
++ CHECK_SIZE(HWRTDATA);
++ CHECK_SIZE(HWRTDATASET);
++ CHECK_SIZE(HWTRANSFERCONTEXT);
++
++ if (bStructSizesFailed == 1) {
++ PVR_LOG(("(FAIL) SGXInit: Mismatch in SGXMKIF structure sizes."));
++ eError = PVRSRV_ERROR_BUILD_MISMATCH;
++ goto chk_exit;
++ } else {
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "SGXInit: SGXMKIF structure sizes match. [ OK ]"));
++ }
++
++ ui32BuildOptions = psSGXFeatures->ui32BuildOptions;
++ if (ui32BuildOptions != (SGX_BUILD_OPTIONS)) {
++ ui32BuildOptionsMismatch =
++ ui32BuildOptions ^ (SGX_BUILD_OPTIONS);
++ if (((SGX_BUILD_OPTIONS) & ui32BuildOptionsMismatch) != 0) {
++ PVR_LOG(("(FAIL) SGXInit: Mismatch in driver and microkernel build options; " "extra options present in driver: (0x%lx). Please check sgx_options.h", (SGX_BUILD_OPTIONS) & ui32BuildOptionsMismatch));
++ }
++
++ if ((ui32BuildOptions & ui32BuildOptionsMismatch) != 0) {
++ PVR_LOG(("(FAIL) SGXInit: Mismatch in driver and microkernel build options; " "extra options present in microkernel: (0x%lx). Please check sgx_options.h", ui32BuildOptions & ui32BuildOptionsMismatch));
++ }
++ eError = PVRSRV_ERROR_BUILD_MISMATCH;
++ goto chk_exit;
++ } else {
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "SGXInit: Driver and microkernel build options match. [ OK ]"));
++ }
++#endif
++
++ eError = PVRSRV_OK;
++chk_exit:
++#if defined(IGNORE_SGX_INIT_COMPATIBILITY_CHECK)
++ return PVRSRV_OK;
++#else
++ return eError;
++#endif
++}
++
++static
++PVRSRV_ERROR SGXGetMiscInfoUkernel(PVRSRV_SGXDEV_INFO * psDevInfo,
++ PVRSRV_DEVICE_NODE * psDeviceNode)
++{
++ PVRSRV_ERROR eError;
++ SGXMKIF_COMMAND sCommandData;
++ PVRSRV_SGX_MISCINFO_INFO *psSGXMiscInfoInt;
++ PVRSRV_SGX_MISCINFO_FEATURES *psSGXFeatures;
++ SGX_MISCINFO_STRUCT_SIZES *psSGXStructSizes;
++
++ PPVRSRV_KERNEL_MEM_INFO psMemInfo = psDevInfo->psKernelSGXMiscMemInfo;
++
++ if (!psMemInfo->pvLinAddrKM) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SGXGetMiscInfoUkernel: Invalid address."));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++ psSGXMiscInfoInt = psMemInfo->pvLinAddrKM;
++ psSGXFeatures = &psSGXMiscInfoInt->sSGXFeatures;
++ psSGXStructSizes = &psSGXMiscInfoInt->sSGXStructSizes;
++
++ psSGXMiscInfoInt->ui32MiscInfoFlags &= ~PVRSRV_USSE_MISCINFO_READY;
++
++ memset(psSGXFeatures, 0, sizeof(*psSGXFeatures));
++ memset(psSGXStructSizes, 0, sizeof(*psSGXStructSizes));
++
++ sCommandData.ui32Data[1] = psMemInfo->sDevVAddr.uiAddr;
++
++ eError = SGXScheduleCCBCommandKM(psDeviceNode,
++ SGXMKIF_CMD_GETMISCINFO,
++ &sCommandData, KERNEL_ID, 0);
++
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SGXGetMiscInfoUkernel: SGXScheduleCCBCommandKM failed."));
++ return eError;
++ }
++
++#if !defined(NO_HARDWARE)
++ {
++ int bExit;
++
++ bExit = 0;
++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) {
++ if ((psSGXMiscInfoInt->
++ ui32MiscInfoFlags & PVRSRV_USSE_MISCINFO_READY) !=
++ 0) {
++ bExit = 1;
++ break;
++ }
++ }
++ END_LOOP_UNTIL_TIMEOUT();
++
++ if (!bExit) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SGXGetMiscInfoUkernel: Timeout occurred waiting for misc info."));
++ return PVRSRV_ERROR_TIMEOUT;
++ }
++ }
++#endif
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR SGXGetMiscInfoKM(PVRSRV_SGXDEV_INFO * psDevInfo,
++ SGX_MISC_INFO * psMiscInfo,
++ PVRSRV_DEVICE_NODE * psDeviceNode,
++ void *hDevMemContext)
++{
++ PPVRSRV_KERNEL_MEM_INFO psMemInfo = psDevInfo->psKernelSGXMiscMemInfo;
++ u32 *pui32MiscInfoFlags =
++ &((PVRSRV_SGX_MISCINFO_INFO *) (psMemInfo->pvLinAddrKM))->
++ ui32MiscInfoFlags;
++
++ *pui32MiscInfoFlags = 0;
++
++ switch (psMiscInfo->eRequest) {
++#if defined(SGX_FEATURE_DATA_BREAKPOINTS)
++ case SGX_MISC_INFO_REQUEST_SET_BREAKPOINT:
++ {
++ u32 ui32RegOffset;
++ u32 ui32RegVal;
++ u32 ui32BaseRegOffset;
++ u32 ui32BaseRegVal;
++ u32 ui32MaskRegOffset;
++ u32 ui32MaskRegVal;
++
++ switch (psMiscInfo->uData.sSGXBreakpointInfo.
++ ui32BPIndex) {
++ case 0:
++ ui32RegOffset = EUR_CR_BREAKPOINT0;
++ ui32BaseRegOffset = EUR_CR_BREAKPOINT0_BASE;
++ ui32MaskRegOffset = EUR_CR_BREAKPOINT0_MASK;
++ break;
++ case 1:
++ ui32RegOffset = EUR_CR_BREAKPOINT1;
++ ui32BaseRegOffset = EUR_CR_BREAKPOINT1_BASE;
++ ui32MaskRegOffset = EUR_CR_BREAKPOINT1_MASK;
++ break;
++ case 2:
++ ui32RegOffset = EUR_CR_BREAKPOINT2;
++ ui32BaseRegOffset = EUR_CR_BREAKPOINT2_BASE;
++ ui32MaskRegOffset = EUR_CR_BREAKPOINT2_MASK;
++ break;
++ case 3:
++ ui32RegOffset = EUR_CR_BREAKPOINT3;
++ ui32BaseRegOffset = EUR_CR_BREAKPOINT3_BASE;
++ ui32MaskRegOffset = EUR_CR_BREAKPOINT3_MASK;
++ break;
++ default:
++ PVR_DPF((PVR_DBG_ERROR,
++ "SGXGetMiscInfoKM: SGX_MISC_INFO_REQUEST_SET_BREAKPOINT invalid BP idx %d",
++ psMiscInfo->uData.sSGXBreakpointInfo.
++ ui32BPIndex));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ if (psMiscInfo->uData.sSGXBreakpointInfo.bBPEnable) {
++
++ IMG_DEV_VIRTADDR sBPDevVAddr =
++ psMiscInfo->uData.sSGXBreakpointInfo.
++ sBPDevVAddr;
++
++ ui32MaskRegVal =
++ EUR_CR_BREAKPOINT0_MASK_REGION_MASK |
++ EUR_CR_BREAKPOINT0_MASK_DM_MASK;
++
++ ui32BaseRegVal =
++ sBPDevVAddr.
++ uiAddr &
++ EUR_CR_BREAKPOINT0_BASE_ADDRESS_MASK;
++
++ ui32RegVal =
++ EUR_CR_BREAKPOINT0_CTRL_WENABLE_MASK |
++ EUR_CR_BREAKPOINT0_CTRL_WENABLE_MASK |
++ EUR_CR_BREAKPOINT0_CTRL_TRAPENABLE_MASK;
++ } else {
++
++ ui32RegVal = ui32BaseRegVal = ui32MaskRegVal =
++ 0;
++ }
++
++ return PVRSRV_OK;
++ }
++#endif
++
++ case SGX_MISC_INFO_REQUEST_CLOCKSPEED:
++ {
++ psMiscInfo->uData.ui32SGXClockSpeed =
++ psDevInfo->ui32CoreClockSpeed;
++ return PVRSRV_OK;
++ }
++
++ case SGX_MISC_INFO_REQUEST_SGXREV:
++ {
++ PVRSRV_ERROR eError;
++ PVRSRV_SGX_MISCINFO_FEATURES *psSGXFeatures;
++
++ eError = SGXGetMiscInfoUkernel(psDevInfo, psDeviceNode);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "An error occurred in SGXGetMiscInfoUkernel: %d\n",
++ eError));
++ return eError;
++ }
++ psSGXFeatures =
++ &((PVRSRV_SGX_MISCINFO_INFO *) (psMemInfo->
++ pvLinAddrKM))->
++ sSGXFeatures;
++
++ psMiscInfo->uData.sSGXFeatures = *psSGXFeatures;
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "SGXGetMiscInfoKM: Core 0x%lx, sw ID 0x%lx, sw Rev 0x%lx\n",
++ psSGXFeatures->ui32CoreRev,
++ psSGXFeatures->ui32CoreIdSW,
++ psSGXFeatures->ui32CoreRevSW));
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "SGXGetMiscInfoKM: DDK version 0x%lx, DDK build 0x%lx\n",
++ psSGXFeatures->ui32DDKVersion,
++ psSGXFeatures->ui32DDKBuild));
++
++ return PVRSRV_OK;
++ }
++
++ case SGX_MISC_INFO_REQUEST_DRIVER_SGXREV:
++ {
++ PVRSRV_SGX_MISCINFO_FEATURES *psSGXFeatures;
++
++ psSGXFeatures =
++ &((PVRSRV_SGX_MISCINFO_INFO *) (psMemInfo->
++ pvLinAddrKM))->
++ sSGXFeatures;
++
++ memset(psMemInfo->pvLinAddrKM, 0,
++ sizeof(PVRSRV_SGX_MISCINFO_INFO));
++
++ psSGXFeatures->ui32DDKVersion =
++ (PVRVERSION_MAJ << 16) |
++ (PVRVERSION_MIN << 8) | PVRVERSION_BRANCH;
++ psSGXFeatures->ui32DDKBuild = PVRVERSION_BUILD;
++
++ psSGXFeatures->ui32BuildOptions = (SGX_BUILD_OPTIONS);
++
++ psMiscInfo->uData.sSGXFeatures = *psSGXFeatures;
++ return PVRSRV_OK;
++ }
++
++#if defined(SUPPORT_SGX_EDM_MEMORY_DEBUG)
++ case SGX_MISC_INFO_REQUEST_MEMREAD:
++ {
++ PVRSRV_ERROR eError;
++ PPVRSRV_KERNEL_MEM_INFO psMemInfo =
++ psDevInfo->psKernelSGXMiscMemInfo;
++ PVRSRV_SGX_MISCINFO_FEATURES *psSGXFeatures;
++ PVRSRV_SGX_MISCINFO_MEMREAD *psSGXMemReadData;
++
++ psSGXMemReadData =
++ &((PVRSRV_SGX_MISCINFO_INFO *) (psMemInfo->
++ pvLinAddrKM))->
++ sSGXMemReadData;
++
++ *pui32MiscInfoFlags |= PVRSRV_USSE_MISCINFO_MEMREAD;
++
++ if (psMiscInfo->hDevMemContext != NULL) {
++ SGXGetMMUPDAddrKM((void *)psDeviceNode,
++ hDevMemContext,
++ &psSGXMemReadData->
++ sPDDevPAddr);
++ } else {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ if (psMiscInfo->sDevVAddr.uiAddr != 0) {
++ psSGXMemReadData->sDevVAddr =
++ psMiscInfo->sDevVAddr;
++ } else {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ eError = SGXGetMiscInfoUkernel(psDevInfo, psDeviceNode);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "An error occurred in SGXGetMiscInfoUkernel: %d\n",
++ eError));
++ return eError;
++ }
++ psSGXFeatures =
++ &((PVRSRV_SGX_MISCINFO_INFO *) (psMemInfo->
++ pvLinAddrKM))->
++ sSGXFeatures;
++
++#if !defined SGX_FEATURE_MULTIPLE_MEM_CONTEXTS
++ if (*pui32MiscInfoFlags &
++ PVRSRV_USSE_MISCINFO_MEMREAD_FAIL) {
++ return PVRSRV_ERROR_GENERIC;
++ }
++#endif
++
++ psMiscInfo->uData.sSGXFeatures = *psSGXFeatures;
++ return PVRSRV_OK;
++ }
++#endif
++
++#ifdef SUPPORT_SGX_HWPERF
++ case SGX_MISC_INFO_REQUEST_SET_HWPERF_STATUS:
++ {
++ SGXMKIF_HWPERF_CB *psHWPerfCB =
++ psDevInfo->psKernelHWPerfCBMemInfo->pvLinAddrKM;
++ u32 ui32MatchingFlags;
++
++ if ((psMiscInfo->uData.
++ ui32NewHWPerfStatus &
++ ~(PVRSRV_SGX_HWPERF_GRAPHICS_ON |
++ PVRSRV_SGX_HWPERF_MK_EXECUTION_ON)) != 0) {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ ui32MatchingFlags =
++ psMiscInfo->uData.ui32NewHWPerfStatus & psDevInfo->
++ psSGXHostCtl->ui32HWPerfFlags;
++ if ((ui32MatchingFlags & PVRSRV_SGX_HWPERF_GRAPHICS_ON)
++ == 0UL) {
++ psHWPerfCB->ui32OrdinalGRAPHICS = 0xffffffff;
++ }
++ if ((ui32MatchingFlags &
++ PVRSRV_SGX_HWPERF_MK_EXECUTION_ON) == 0UL) {
++ psHWPerfCB->ui32OrdinalMK_EXECUTION =
++ 0xffffffffUL;
++ }
++
++ psDevInfo->psSGXHostCtl->ui32HWPerfFlags =
++ psMiscInfo->uData.ui32NewHWPerfStatus;
++#if defined(PDUMP)
++ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
++ "SGX ukernel HWPerf status %lu\n",
++ psDevInfo->psSGXHostCtl->
++ ui32HWPerfFlags);
++ PDUMPMEM(NULL, psDevInfo->psKernelSGXHostCtlMemInfo,
++ offsetof(SGXMKIF_HOST_CTL, ui32HWPerfFlags),
++ sizeof(psDevInfo->psSGXHostCtl->
++ ui32HWPerfFlags),
++ PDUMP_FLAGS_CONTINUOUS,
++ MAKEUNIQUETAG(psDevInfo->
++ psKernelSGXHostCtlMemInfo));
++#endif
++
++ return PVRSRV_OK;
++ }
++ case SGX_MISC_INFO_REQUEST_HWPERF_CB_ON:
++ {
++
++ SGXMKIF_HWPERF_CB *psHWPerfCB =
++ psDevInfo->psKernelHWPerfCBMemInfo->pvLinAddrKM;
++ psHWPerfCB->ui32OrdinalGRAPHICS = 0xffffffffUL;
++
++ psDevInfo->psSGXHostCtl->ui32HWPerfFlags |=
++ PVRSRV_SGX_HWPERF_GRAPHICS_ON;
++ return PVRSRV_OK;
++ }
++ case SGX_MISC_INFO_REQUEST_HWPERF_CB_OFF:
++ {
++
++ psDevInfo->psSGXHostCtl->ui32HWPerfFlags = 0;
++ return PVRSRV_OK;
++ }
++ case SGX_MISC_INFO_REQUEST_HWPERF_RETRIEVE_CB:
++ {
++
++ SGX_MISC_INFO_HWPERF_RETRIEVE_CB *psRetrieve =
++ &psMiscInfo->uData.sRetrieveCB;
++ SGXMKIF_HWPERF_CB *psHWPerfCB =
++ psDevInfo->psKernelHWPerfCBMemInfo->pvLinAddrKM;
++ u32 i;
++
++ for (i = 0;
++ psHWPerfCB->ui32Woff != psHWPerfCB->ui32Roff
++ && i < psRetrieve->ui32ArraySize; i++) {
++ SGXMKIF_HWPERF_CB_ENTRY *psData =
++ &psHWPerfCB->psHWPerfCBData[psHWPerfCB->
++ ui32Roff];
++
++ psRetrieve->psHWPerfData[i].ui32FrameNo =
++ psData->ui32FrameNo;
++ psRetrieve->psHWPerfData[i].ui32Type =
++ (psData->
++ ui32Type & PVRSRV_SGX_HWPERF_TYPE_OP_MASK);
++ psRetrieve->psHWPerfData[i].ui32StartTime =
++ psData->ui32Time;
++ psRetrieve->psHWPerfData[i].ui32StartTimeWraps =
++ psData->ui32TimeWraps;
++ psRetrieve->psHWPerfData[i].ui32EndTime =
++ psData->ui32Time;
++ psRetrieve->psHWPerfData[i].ui32EndTimeWraps =
++ psData->ui32TimeWraps;
++ psRetrieve->psHWPerfData[i].ui32ClockSpeed =
++ psDevInfo->ui32CoreClockSpeed;
++ psRetrieve->psHWPerfData[i].ui32TimeMax =
++ psDevInfo->ui32uKernelTimerClock;
++ psHWPerfCB->ui32Roff =
++ (psHWPerfCB->ui32Roff +
++ 1) & (SGXMKIF_HWPERF_CB_SIZE - 1);
++ }
++ psRetrieve->ui32DataCount = i;
++ psRetrieve->ui32Time = OSClockus();
++ return PVRSRV_OK;
++ }
++#endif
++ case SGX_MISC_INFO_DUMP_DEBUG_INFO:
++ {
++ PVR_LOG(("User requested SGX debug info"));
++
++ SGXDumpDebugInfo(psDeviceNode, 0);
++
++ return PVRSRV_OK;
++ }
++
++ case SGX_MISC_INFO_PANIC:
++ {
++ PVR_LOG(("User requested SGX panic"));
++
++ SGXPanic(psDeviceNode);
++
++ return PVRSRV_OK;
++ }
++
++ default:
++ {
++
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++ }
++}
++
++#if defined(SUPPORT_SGX_HWPERF)
++
++PVRSRV_ERROR SGXReadDiffCountersKM(void *hDevHandle,
++ u32 ui32Reg,
++ u32 * pui32Old,
++ int bNew,
++ u32 ui32New,
++ u32 ui32NewReset,
++ u32 ui32CountersReg,
++ u32 ui32Reg2,
++ int *pbActive,
++ PVRSRV_SGXDEV_DIFF_INFO * psDiffs)
++{
++ PVRSRV_ERROR eError;
++ SYS_DATA *psSysData;
++ PVRSRV_POWER_DEV *psPowerDevice;
++ int bPowered = 0;
++ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
++ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++
++ if (bNew) {
++ psDevInfo->ui32HWGroupRequested = ui32New;
++ }
++ psDevInfo->ui32HWReset |= ui32NewReset;
++
++ eError = PVRSRVPowerLock(KERNEL_ID, 0);
++ if (eError != PVRSRV_OK) {
++ return eError;
++ }
++
++ SysAcquireData(&psSysData);
++
++ psPowerDevice = (PVRSRV_POWER_DEV *)
++ List_PVRSRV_POWER_DEV_Any_va(psSysData->psPowerDeviceList,
++ MatchPowerDeviceIndex_AnyVaCb,
++ psDeviceNode->sDevId.ui32DeviceIndex);
++
++ if (psPowerDevice) {
++ bPowered =
++ (int)(psPowerDevice->eCurrentPowerState ==
++ PVRSRV_DEV_POWER_STATE_ON);
++ }
++
++ *pbActive = bPowered;
++
++ {
++ u32 ui32rval = 0;
++
++ if (bPowered) {
++ u32 i;
++
++ *pui32Old =
++ OSReadHWReg(psDevInfo->pvRegsBaseKM, ui32Reg);
++
++ for (i = 0; i < PVRSRV_SGX_DIFF_NUM_COUNTERS; ++i) {
++ psDiffs->aui32Counters[i] =
++ OSReadHWReg(psDevInfo->pvRegsBaseKM,
++ ui32CountersReg + (i * 4));
++ }
++
++ if (ui32Reg2) {
++ ui32rval =
++ OSReadHWReg(psDevInfo->pvRegsBaseKM,
++ ui32Reg2);
++ }
++
++ if (psDevInfo->ui32HWGroupRequested != *pui32Old) {
++
++ if (psDevInfo->ui32HWReset != 0) {
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM,
++ ui32Reg,
++ psDevInfo->
++ ui32HWGroupRequested |
++ psDevInfo->ui32HWReset);
++ psDevInfo->ui32HWReset = 0;
++ }
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32Reg,
++ psDevInfo->ui32HWGroupRequested);
++ }
++ }
++
++ psDiffs->ui32Time[0] = OSClockus();
++ psDiffs->ui32Time[1] = psDevInfo->psSGXHostCtl->ui32TimeWraps;
++ psDiffs->ui32Time[2] = ui32rval;
++
++ psDiffs->ui32Marker[0] = psDevInfo->ui32KickTACounter;
++ psDiffs->ui32Marker[1] = psDevInfo->ui32KickTARenderCounter;
++ }
++
++ PVRSRVPowerUnlock(KERNEL_ID);
++
++ SGXTestActivePowerEvent(psDeviceNode, KERNEL_ID);
++
++ return eError;
++}
++
++PVRSRV_ERROR SGXReadHWPerfCBKM(void *hDevHandle,
++ u32 ui32ArraySize,
++ PVRSRV_SGX_HWPERF_CB_ENTRY * psClientHWPerfEntry,
++ u32 * pui32DataCount,
++ u32 * pui32ClockSpeed, u32 * pui32HostTimeStamp)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
++ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++ SGXMKIF_HWPERF_CB *psHWPerfCB =
++ psDevInfo->psKernelHWPerfCBMemInfo->pvLinAddrKM;
++ u32 i;
++
++ for (i = 0;
++ psHWPerfCB->ui32Woff != psHWPerfCB->ui32Roff && i < ui32ArraySize;
++ i++) {
++ SGXMKIF_HWPERF_CB_ENTRY *psMKPerfEntry =
++ &psHWPerfCB->psHWPerfCBData[psHWPerfCB->ui32Roff];
++
++ psClientHWPerfEntry[i].ui32FrameNo = psMKPerfEntry->ui32FrameNo;
++ psClientHWPerfEntry[i].ui32Type = psMKPerfEntry->ui32Type;
++ psClientHWPerfEntry[i].ui32Ordinal = psMKPerfEntry->ui32Ordinal;
++ psClientHWPerfEntry[i].ui32Clocksx16 =
++ SGXConvertTimeStamp(psDevInfo, psMKPerfEntry->ui32TimeWraps,
++ psMKPerfEntry->ui32Time);
++ memcpy(&psClientHWPerfEntry[i].ui32Counters[0],
++ &psMKPerfEntry->ui32Counters[0],
++ sizeof(psMKPerfEntry->ui32Counters));
++
++ psHWPerfCB->ui32Roff =
++ (psHWPerfCB->ui32Roff + 1) & (SGXMKIF_HWPERF_CB_SIZE - 1);
++ }
++
++ *pui32DataCount = i;
++ *pui32ClockSpeed = psDevInfo->ui32CoreClockSpeed;
++ *pui32HostTimeStamp = OSClockus();
++
++ return eError;
++}
++#else
++#endif
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/devices/sgx/sgxkick.c
+@@ -0,0 +1,901 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <stddef.h>
++#include "services_headers.h"
++#include "sgxinfo.h"
++#include "sgxinfokm.h"
++#if defined (PDUMP)
++#include "sgxapi_km.h"
++#include "pdump_km.h"
++#endif
++#include "sgx_bridge_km.h"
++#include "osfunc.h"
++#include "pvr_debug.h"
++#include "sgxutils.h"
++
++PVRSRV_ERROR SGXDoKickKM(void *hDevHandle, SGX_CCB_KICK * psCCBKick)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_KERNEL_SYNC_INFO *psSyncInfo;
++ PVRSRV_KERNEL_MEM_INFO *psCCBMemInfo =
++ (PVRSRV_KERNEL_MEM_INFO *) psCCBKick->hCCBKernelMemInfo;
++ SGXMKIF_CMDTA_SHARED *psTACmd;
++ u32 i;
++#if defined(SUPPORT_SGX_HWPERF)
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ PVRSRV_SGXDEV_INFO *psDevInfo;
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE *) hDevHandle;
++ psDevInfo = (PVRSRV_SGXDEV_INFO *) psDeviceNode->pvDevice;
++#endif
++
++#if defined(SUPPORT_SGX_HWPERF)
++ if (psCCBKick->bKickRender) {
++ ++psDevInfo->ui32KickTARenderCounter;
++ }
++ ++psDevInfo->ui32KickTACounter;
++#endif
++
++ if (!CCB_OFFSET_IS_VALID
++ (SGXMKIF_CMDTA_SHARED, psCCBMemInfo, psCCBKick, ui32CCBOffset)) {
++ PVR_DPF((PVR_DBG_ERROR, "SGXDoKickKM: Invalid CCB offset"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psTACmd =
++ CCB_DATA_FROM_OFFSET(SGXMKIF_CMDTA_SHARED, psCCBMemInfo, psCCBKick,
++ ui32CCBOffset);
++
++ if (psCCBKick->hTA3DSyncInfo) {
++ psSyncInfo =
++ (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->hTA3DSyncInfo;
++ psTACmd->sTA3DDependency.sWriteOpsCompleteDevVAddr =
++ psSyncInfo->sWriteOpsCompleteDevVAddr;
++
++ psTACmd->sTA3DDependency.ui32WriteOpsPendingVal =
++ psSyncInfo->psSyncData->ui32WriteOpsPending;
++
++ if (psCCBKick->bTADependency) {
++ psSyncInfo->psSyncData->ui32WriteOpsPending++;
++ }
++ }
++
++ if (psCCBKick->hTASyncInfo != NULL) {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->hTASyncInfo;
++
++ psTACmd->sTATQSyncReadOpsCompleteDevVAddr =
++ psSyncInfo->sReadOpsCompleteDevVAddr;
++ psTACmd->sTATQSyncWriteOpsCompleteDevVAddr =
++ psSyncInfo->sWriteOpsCompleteDevVAddr;
++
++ psTACmd->ui32TATQSyncReadOpsPendingVal =
++ psSyncInfo->psSyncData->ui32ReadOpsPending++;
++ psTACmd->ui32TATQSyncWriteOpsPendingVal =
++ psSyncInfo->psSyncData->ui32WriteOpsPending;
++ }
++
++ if (psCCBKick->h3DSyncInfo != NULL) {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->h3DSyncInfo;
++
++ psTACmd->s3DTQSyncReadOpsCompleteDevVAddr =
++ psSyncInfo->sReadOpsCompleteDevVAddr;
++ psTACmd->s3DTQSyncWriteOpsCompleteDevVAddr =
++ psSyncInfo->sWriteOpsCompleteDevVAddr;
++
++ psTACmd->ui323DTQSyncReadOpsPendingVal =
++ psSyncInfo->psSyncData->ui32ReadOpsPending++;
++ psTACmd->ui323DTQSyncWriteOpsPendingVal =
++ psSyncInfo->psSyncData->ui32WriteOpsPending;
++ }
++
++ psTACmd->ui32NumTAStatusVals = psCCBKick->ui32NumTAStatusVals;
++ if (psCCBKick->ui32NumTAStatusVals != 0) {
++
++ for (i = 0; i < psCCBKick->ui32NumTAStatusVals; i++) {
++#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
++ psTACmd->sCtlTAStatusInfo[i] =
++ psCCBKick->asTAStatusUpdate[i].sCtlStatus;
++#else
++ psSyncInfo =
++ (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->
++ ahTAStatusSyncInfo[i];
++ psTACmd->sCtlTAStatusInfo[i].sStatusDevAddr =
++ psSyncInfo->sReadOpsCompleteDevVAddr;
++ psTACmd->sCtlTAStatusInfo[i].ui32StatusValue =
++ psSyncInfo->psSyncData->ui32ReadOpsPending;
++#endif
++ }
++ }
++
++ psTACmd->ui32Num3DStatusVals = psCCBKick->ui32Num3DStatusVals;
++ if (psCCBKick->ui32Num3DStatusVals != 0) {
++
++ for (i = 0; i < psCCBKick->ui32Num3DStatusVals; i++) {
++#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
++ psTACmd->sCtl3DStatusInfo[i] =
++ psCCBKick->as3DStatusUpdate[i].sCtlStatus;
++#else
++ psSyncInfo =
++ (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->
++ ah3DStatusSyncInfo[i];
++ psTACmd->sCtl3DStatusInfo[i].sStatusDevAddr =
++ psSyncInfo->sReadOpsCompleteDevVAddr;
++ psTACmd->sCtl3DStatusInfo[i].ui32StatusValue =
++ psSyncInfo->psSyncData->ui32ReadOpsPending;
++#endif
++ }
++ }
++
++#if defined(SUPPORT_SGX_GENERALISED_SYNCOBJECTS)
++
++ psTACmd->ui32NumTASrcSyncs = psCCBKick->ui32NumTASrcSyncs;
++ for (i = 0; i < psCCBKick->ui32NumTASrcSyncs; i++) {
++ psSyncInfo =
++ (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->
++ ahTASrcKernelSyncInfo[i];
++
++ psTACmd->asTASrcSyncs[i].sWriteOpsCompleteDevVAddr =
++ psSyncInfo->sWriteOpsCompleteDevVAddr;
++ psTACmd->asTASrcSyncs[i].sReadOpsCompleteDevVAddr =
++ psSyncInfo->sReadOpsCompleteDevVAddr;
++
++ psTACmd->asTASrcSyncs[i].ui32ReadOpsPendingVal =
++ psSyncInfo->psSyncData->ui32ReadOpsPending++;
++
++ psTACmd->asTASrcSyncs[i].ui32WriteOpsPendingVal =
++ psSyncInfo->psSyncData->ui32WriteOpsPending;
++ }
++
++ psTACmd->ui32NumTADstSyncs = psCCBKick->ui32NumTADstSyncs;
++ for (i = 0; i < psCCBKick->ui32NumTADstSyncs; i++) {
++ psSyncInfo =
++ (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->
++ ahTADstKernelSyncInfo[i];
++
++ psTACmd->asTADstSyncs[i].sWriteOpsCompleteDevVAddr =
++ psSyncInfo->sWriteOpsCompleteDevVAddr;
++ psTACmd->asTADstSyncs[i].sReadOpsCompleteDevVAddr =
++ psSyncInfo->sReadOpsCompleteDevVAddr;
++
++ psTACmd->asTADstSyncs[i].ui32ReadOpsPendingVal =
++ psSyncInfo->psSyncData->ui32ReadOpsPending;
++
++ psTACmd->asTADstSyncs[i].ui32WriteOpsPendingVal =
++ psSyncInfo->psSyncData->ui32WriteOpsPending++;
++ }
++
++ psTACmd->ui32Num3DSrcSyncs = psCCBKick->ui32Num3DSrcSyncs;
++ for (i = 0; i < psCCBKick->ui32Num3DSrcSyncs; i++) {
++ psSyncInfo =
++ (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->
++ ah3DSrcKernelSyncInfo[i];
++
++ psTACmd->as3DSrcSyncs[i].sWriteOpsCompleteDevVAddr =
++ psSyncInfo->sWriteOpsCompleteDevVAddr;
++ psTACmd->as3DSrcSyncs[i].sReadOpsCompleteDevVAddr =
++ psSyncInfo->sReadOpsCompleteDevVAddr;
++
++ psTACmd->as3DSrcSyncs[i].ui32ReadOpsPendingVal =
++ psSyncInfo->psSyncData->ui32ReadOpsPending++;
++
++ psTACmd->as3DSrcSyncs[i].ui32WriteOpsPendingVal =
++ psSyncInfo->psSyncData->ui32WriteOpsPending;
++ }
++#else
++
++ psTACmd->ui32NumSrcSyncs = psCCBKick->ui32NumSrcSyncs;
++ for (i = 0; i < psCCBKick->ui32NumSrcSyncs; i++) {
++ psSyncInfo =
++ (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->
++ ahSrcKernelSyncInfo[i];
++
++ psTACmd->asSrcSyncs[i].sWriteOpsCompleteDevVAddr =
++ psSyncInfo->sWriteOpsCompleteDevVAddr;
++ psTACmd->asSrcSyncs[i].sReadOpsCompleteDevVAddr =
++ psSyncInfo->sReadOpsCompleteDevVAddr;
++
++ psTACmd->asSrcSyncs[i].ui32ReadOpsPendingVal =
++ psSyncInfo->psSyncData->ui32ReadOpsPending++;
++
++ psTACmd->asSrcSyncs[i].ui32WriteOpsPendingVal =
++ psSyncInfo->psSyncData->ui32WriteOpsPending;
++ }
++#endif
++
++ if (psCCBKick->bFirstKickOrResume
++ && psCCBKick->ui32NumDstSyncObjects > 0) {
++ PVRSRV_KERNEL_MEM_INFO *psHWDstSyncListMemInfo =
++ (PVRSRV_KERNEL_MEM_INFO *) psCCBKick->
++ hKernelHWSyncListMemInfo;
++ SGXMKIF_HWDEVICE_SYNC_LIST *psHWDeviceSyncList =
++ psHWDstSyncListMemInfo->pvLinAddrKM;
++ u32 ui32NumDstSyncs = psCCBKick->ui32NumDstSyncObjects;
++
++ PVR_ASSERT(((PVRSRV_KERNEL_MEM_INFO *) psCCBKick->
++ hKernelHWSyncListMemInfo)->ui32AllocSize >=
++ (sizeof(SGXMKIF_HWDEVICE_SYNC_LIST) +
++ (sizeof(PVRSRV_DEVICE_SYNC_OBJECT) *
++ ui32NumDstSyncs)));
++
++ psHWDeviceSyncList->ui32NumSyncObjects = ui32NumDstSyncs;
++#if defined(PDUMP)
++ if (PDumpIsCaptureFrameKM()) {
++ PDUMPCOMMENT("HWDeviceSyncList for TACmd\r\n");
++ PDUMPMEM(NULL,
++ psHWDstSyncListMemInfo,
++ 0,
++ sizeof(SGXMKIF_HWDEVICE_SYNC_LIST),
++ 0, MAKEUNIQUETAG(psHWDstSyncListMemInfo));
++ }
++#endif
++
++ for (i = 0; i < ui32NumDstSyncs; i++) {
++ psSyncInfo =
++ (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->
++ pahDstSyncHandles[i];
++
++ if (psSyncInfo) {
++ psHWDeviceSyncList->asSyncData[i].
++ sWriteOpsCompleteDevVAddr =
++ psSyncInfo->sWriteOpsCompleteDevVAddr;
++ psHWDeviceSyncList->asSyncData[i].
++ sReadOpsCompleteDevVAddr =
++ psSyncInfo->sReadOpsCompleteDevVAddr;
++
++ psHWDeviceSyncList->asSyncData[i].
++ ui32ReadOpsPendingVal =
++ psSyncInfo->psSyncData->ui32ReadOpsPending;
++ psHWDeviceSyncList->asSyncData[i].
++ ui32WriteOpsPendingVal =
++ psSyncInfo->psSyncData->
++ ui32WriteOpsPending++;
++
++#if defined(PDUMP)
++ if (PDumpIsCaptureFrameKM()) {
++ u32 ui32ModifiedValue;
++ u32 ui32SyncOffset =
++ offsetof(SGXMKIF_HWDEVICE_SYNC_LIST,
++ asSyncData)
++ +
++ (i *
++ sizeof(PVRSRV_DEVICE_SYNC_OBJECT));
++ u32 ui32WOpsOffset =
++ ui32SyncOffset +
++ offsetof(PVRSRV_DEVICE_SYNC_OBJECT,
++ ui32WriteOpsPendingVal);
++ u32 ui32ROpsOffset =
++ ui32SyncOffset +
++ offsetof(PVRSRV_DEVICE_SYNC_OBJECT,
++ ui32ReadOpsPendingVal);
++
++ PDUMPCOMMENT
++ ("HWDeviceSyncObject for RT: %i\r\n",
++ i);
++
++ PDUMPMEM(NULL,
++ psHWDstSyncListMemInfo,
++ ui32SyncOffset,
++ sizeof
++ (PVRSRV_DEVICE_SYNC_OBJECT), 0,
++ MAKEUNIQUETAG
++ (psHWDstSyncListMemInfo));
++
++ if ((psSyncInfo->psSyncData->
++ ui32LastOpDumpVal == 0)
++ && (psSyncInfo->psSyncData->
++ ui32LastReadOpDumpVal == 0)) {
++
++ PDUMPCOMMENT
++ ("Init RT ROpsComplete\r\n",
++ i);
++ PDUMPMEM(&psSyncInfo->
++ psSyncData->
++ ui32LastReadOpDumpVal,
++ psSyncInfo->
++ psSyncDataMemInfoKM,
++ offsetof
++ (PVRSRV_SYNC_DATA,
++ ui32ReadOpsComplete),
++ sizeof(psSyncInfo->
++ psSyncData->
++ ui32ReadOpsComplete),
++ 0,
++ MAKEUNIQUETAG
++ (psSyncInfo->
++ psSyncDataMemInfoKM));
++
++ PDUMPCOMMENT
++ ("Init RT WOpsComplete\r\n");
++ PDUMPMEM(&psSyncInfo->
++ psSyncData->
++ ui32LastOpDumpVal,
++ psSyncInfo->
++ psSyncDataMemInfoKM,
++ offsetof
++ (PVRSRV_SYNC_DATA,
++ ui32WriteOpsComplete),
++ sizeof(psSyncInfo->
++ psSyncData->
++ ui32WriteOpsComplete),
++ 0,
++ MAKEUNIQUETAG
++ (psSyncInfo->
++ psSyncDataMemInfoKM));
++ }
++
++ psSyncInfo->psSyncData->
++ ui32LastOpDumpVal++;
++
++ ui32ModifiedValue =
++ psSyncInfo->psSyncData->
++ ui32LastOpDumpVal - 1;
++
++ PDUMPCOMMENT
++ ("Modify RT %d WOpPendingVal in HWDevSyncList\r\n",
++ i);
++
++ PDUMPMEM(&ui32ModifiedValue,
++ psHWDstSyncListMemInfo,
++ ui32WOpsOffset,
++ sizeof(u32),
++ 0,
++ MAKEUNIQUETAG
++ (psHWDstSyncListMemInfo));
++
++ ui32ModifiedValue = 0;
++ PDUMPCOMMENT
++ ("Modify RT %d ROpsPendingVal in HWDevSyncList\r\n",
++ i);
++
++ PDUMPMEM(&psSyncInfo->psSyncData->
++ ui32LastReadOpDumpVal,
++ psHWDstSyncListMemInfo,
++ ui32ROpsOffset, sizeof(u32), 0,
++ MAKEUNIQUETAG
++ (psHWDstSyncListMemInfo));
++ }
++#endif
++ } else {
++ psHWDeviceSyncList->asSyncData[i].
++ sWriteOpsCompleteDevVAddr.uiAddr = 0;
++ psHWDeviceSyncList->asSyncData[i].
++ sReadOpsCompleteDevVAddr.uiAddr = 0;
++
++ psHWDeviceSyncList->asSyncData[i].
++ ui32ReadOpsPendingVal = 0;
++ psHWDeviceSyncList->asSyncData[i].
++ ui32WriteOpsPendingVal = 0;
++ }
++ }
++ }
++
++ psTACmd->ui32CtrlFlags |= SGXMKIF_CMDTA_CTRLFLAGS_READY;
++
++#if defined(PDUMP)
++ if (PDumpIsCaptureFrameKM()) {
++ PDUMPCOMMENT("Shared part of TA command\r\n");
++
++ PDUMPMEM(psTACmd,
++ psCCBMemInfo,
++ psCCBKick->ui32CCBDumpWOff,
++ sizeof(SGXMKIF_CMDTA_SHARED),
++ 0, MAKEUNIQUETAG(psCCBMemInfo));
++
++#if defined(SUPPORT_SGX_GENERALISED_SYNCOBJECTS)
++ for (i = 0; i < psCCBKick->ui32NumTASrcSyncs; i++) {
++ u32 ui32ModifiedValue;
++ psSyncInfo =
++ (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->
++ ahTASrcKernelSyncInfo[i];
++
++ if ((psSyncInfo->psSyncData->ui32LastOpDumpVal == 0) &&
++ (psSyncInfo->psSyncData->ui32LastReadOpDumpVal ==
++ 0)) {
++
++ PDUMPCOMMENT("Init RT TA-SRC ROpsComplete\r\n",
++ i);
++ PDUMPMEM(&psSyncInfo->psSyncData->
++ ui32LastReadOpDumpVal,
++ psSyncInfo->psSyncDataMemInfoKM,
++ offsetof(PVRSRV_SYNC_DATA,
++ ui32ReadOpsComplete),
++ sizeof(psSyncInfo->psSyncData->
++ ui32ReadOpsComplete), 0,
++ MAKEUNIQUETAG(psSyncInfo->
++ psSyncDataMemInfoKM));
++
++ PDUMPCOMMENT("Init RT TA-SRC WOpsComplete\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->
++ ui32LastOpDumpVal,
++ psSyncInfo->psSyncDataMemInfoKM,
++ offsetof(PVRSRV_SYNC_DATA,
++ ui32WriteOpsComplete),
++ sizeof(psSyncInfo->psSyncData->
++ ui32WriteOpsComplete), 0,
++ MAKEUNIQUETAG(psSyncInfo->
++ psSyncDataMemInfoKM));
++ }
++
++ psSyncInfo->psSyncData->ui32LastReadOpDumpVal++;
++
++ ui32ModifiedValue =
++ psSyncInfo->psSyncData->ui32LastReadOpDumpVal - 1;
++
++ PDUMPCOMMENT("Modify TA SrcSync %d ROpsPendingVal\r\n",
++ i);
++
++ PDUMPMEM(&ui32ModifiedValue,
++ psCCBMemInfo,
++ psCCBKick->ui32CCBDumpWOff +
++ offsetof(SGXMKIF_CMDTA_SHARED,
++ asTASrcSyncs) +
++ (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) +
++ offsetof(PVRSRV_DEVICE_SYNC_OBJECT,
++ ui32ReadOpsPendingVal), sizeof(u32),
++ 0, MAKEUNIQUETAG(psCCBMemInfo));
++
++ PDUMPCOMMENT("Modify TA SrcSync %d WOpPendingVal\r\n",
++ i);
++
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++ psCCBMemInfo,
++ psCCBKick->ui32CCBDumpWOff +
++ offsetof(SGXMKIF_CMDTA_SHARED,
++ asTASrcSyncs) +
++ (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) +
++ offsetof(PVRSRV_DEVICE_SYNC_OBJECT,
++ ui32WriteOpsPendingVal), sizeof(u32),
++ 0, MAKEUNIQUETAG(psCCBMemInfo));
++ }
++
++ for (i = 0; i < psCCBKick->ui32NumTADstSyncs; i++) {
++ u32 ui32ModifiedValue;
++ psSyncInfo =
++ (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->
++ ahTADstKernelSyncInfo[i];
++
++ if ((psSyncInfo->psSyncData->ui32LastOpDumpVal == 0) &&
++ (psSyncInfo->psSyncData->ui32LastReadOpDumpVal ==
++ 0)) {
++
++ PDUMPCOMMENT("Init RT TA-DST ROpsComplete\r\n",
++ i);
++ PDUMPMEM(&psSyncInfo->psSyncData->
++ ui32LastReadOpDumpVal,
++ psSyncInfo->psSyncDataMemInfoKM,
++ offsetof(PVRSRV_SYNC_DATA,
++ ui32ReadOpsComplete),
++ sizeof(psSyncInfo->psSyncData->
++ ui32ReadOpsComplete), 0,
++ MAKEUNIQUETAG(psSyncInfo->
++ psSyncDataMemInfoKM));
++
++ PDUMPCOMMENT("Init RT TA-DST WOpsComplete\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->
++ ui32LastOpDumpVal,
++ psSyncInfo->psSyncDataMemInfoKM,
++ offsetof(PVRSRV_SYNC_DATA,
++ ui32WriteOpsComplete),
++ sizeof(psSyncInfo->psSyncData->
++ ui32WriteOpsComplete), 0,
++ MAKEUNIQUETAG(psSyncInfo->
++ psSyncDataMemInfoKM));
++ }
++
++ psSyncInfo->psSyncData->ui32LastOpDumpVal++;
++
++ ui32ModifiedValue =
++ psSyncInfo->psSyncData->ui32LastOpDumpVal - 1;
++
++ PDUMPCOMMENT("Modify TA DstSync %d WOpPendingVal\r\n",
++ i);
++
++ PDUMPMEM(&ui32ModifiedValue,
++ psCCBMemInfo,
++ psCCBKick->ui32CCBDumpWOff +
++ offsetof(SGXMKIF_CMDTA_SHARED,
++ asTADstSyncs) +
++ (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) +
++ offsetof(PVRSRV_DEVICE_SYNC_OBJECT,
++ ui32WriteOpsPendingVal), sizeof(u32),
++ 0, MAKEUNIQUETAG(psCCBMemInfo));
++
++ PDUMPCOMMENT("Modify TA DstSync %d ROpsPendingVal\r\n",
++ i);
++
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
++ psCCBMemInfo,
++ psCCBKick->ui32CCBDumpWOff +
++ offsetof(SGXMKIF_CMDTA_SHARED,
++ asTADstSyncs) +
++ (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) +
++ offsetof(PVRSRV_DEVICE_SYNC_OBJECT,
++ ui32ReadOpsPendingVal), sizeof(u32),
++ 0, MAKEUNIQUETAG(psCCBMemInfo));
++ }
++
++ for (i = 0; i < psCCBKick->ui32Num3DSrcSyncs; i++) {
++ u32 ui32ModifiedValue;
++ psSyncInfo =
++ (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->
++ ah3DSrcKernelSyncInfo[i];
++
++ if ((psSyncInfo->psSyncData->ui32LastOpDumpVal == 0) &&
++ (psSyncInfo->psSyncData->ui32LastReadOpDumpVal ==
++ 0)) {
++
++ PDUMPCOMMENT("Init RT 3D-SRC ROpsComplete\r\n",
++ i);
++ PDUMPMEM(&psSyncInfo->psSyncData->
++ ui32LastReadOpDumpVal,
++ psSyncInfo->psSyncDataMemInfoKM,
++ offsetof(PVRSRV_SYNC_DATA,
++ ui32ReadOpsComplete),
++ sizeof(psSyncInfo->psSyncData->
++ ui32ReadOpsComplete), 0,
++ MAKEUNIQUETAG(psSyncInfo->
++ psSyncDataMemInfoKM));
++
++ PDUMPCOMMENT("Init RT 3D-SRC WOpsComplete\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->
++ ui32LastOpDumpVal,
++ psSyncInfo->psSyncDataMemInfoKM,
++ offsetof(PVRSRV_SYNC_DATA,
++ ui32WriteOpsComplete),
++ sizeof(psSyncInfo->psSyncData->
++ ui32WriteOpsComplete), 0,
++ MAKEUNIQUETAG(psSyncInfo->
++ psSyncDataMemInfoKM));
++ }
++
++ psSyncInfo->psSyncData->ui32LastReadOpDumpVal++;
++
++ ui32ModifiedValue =
++ psSyncInfo->psSyncData->ui32LastReadOpDumpVal - 1;
++
++ PDUMPCOMMENT("Modify 3D SrcSync %d ROpsPendingVal\r\n",
++ i);
++
++ PDUMPMEM(&ui32ModifiedValue,
++ psCCBMemInfo,
++ psCCBKick->ui32CCBDumpWOff +
++ offsetof(SGXMKIF_CMDTA_SHARED,
++ as3DSrcSyncs) +
++ (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) +
++ offsetof(PVRSRV_DEVICE_SYNC_OBJECT,
++ ui32ReadOpsPendingVal), sizeof(u32),
++ 0, MAKEUNIQUETAG(psCCBMemInfo));
++
++ PDUMPCOMMENT("Modify 3D SrcSync %d WOpPendingVal\r\n",
++ i);
++
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++ psCCBMemInfo,
++ psCCBKick->ui32CCBDumpWOff +
++ offsetof(SGXMKIF_CMDTA_SHARED,
++ as3DSrcSyncs) +
++ (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) +
++ offsetof(PVRSRV_DEVICE_SYNC_OBJECT,
++ ui32WriteOpsPendingVal), sizeof(u32),
++ 0, MAKEUNIQUETAG(psCCBMemInfo));
++ }
++#else
++ for (i = 0; i < psCCBKick->ui32NumSrcSyncs; i++) {
++ u32 ui32ModifiedValue;
++ psSyncInfo =
++ (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->
++ ahSrcKernelSyncInfo[i];
++
++ if ((psSyncInfo->psSyncData->ui32LastOpDumpVal == 0) &&
++ (psSyncInfo->psSyncData->ui32LastReadOpDumpVal ==
++ 0)) {
++
++ PDUMPCOMMENT("Init RT ROpsComplete\r\n", i);
++ PDUMPMEM(&psSyncInfo->psSyncData->
++ ui32LastReadOpDumpVal,
++ psSyncInfo->psSyncDataMemInfoKM,
++ offsetof(PVRSRV_SYNC_DATA,
++ ui32ReadOpsComplete),
++ sizeof(psSyncInfo->psSyncData->
++ ui32ReadOpsComplete), 0,
++ MAKEUNIQUETAG(psSyncInfo->
++ psSyncDataMemInfoKM));
++
++ PDUMPCOMMENT("Init RT WOpsComplete\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->
++ ui32LastOpDumpVal,
++ psSyncInfo->psSyncDataMemInfoKM,
++ offsetof(PVRSRV_SYNC_DATA,
++ ui32WriteOpsComplete),
++ sizeof(psSyncInfo->psSyncData->
++ ui32WriteOpsComplete), 0,
++ MAKEUNIQUETAG(psSyncInfo->
++ psSyncDataMemInfoKM));
++ }
++
++ psSyncInfo->psSyncData->ui32LastReadOpDumpVal++;
++
++ ui32ModifiedValue =
++ psSyncInfo->psSyncData->ui32LastReadOpDumpVal - 1;
++
++ PDUMPCOMMENT("Modify SrcSync %d ROpsPendingVal\r\n", i);
++
++ PDUMPMEM(&ui32ModifiedValue,
++ psCCBMemInfo,
++ psCCBKick->ui32CCBDumpWOff +
++ offsetof(SGXMKIF_CMDTA_SHARED,
++ asSrcSyncs) +
++ (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) +
++ offsetof(PVRSRV_DEVICE_SYNC_OBJECT,
++ ui32ReadOpsPendingVal), sizeof(u32),
++ 0, MAKEUNIQUETAG(psCCBMemInfo));
++
++ PDUMPCOMMENT("Modify SrcSync %d WOpPendingVal\r\n", i);
++
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++ psCCBMemInfo,
++ psCCBKick->ui32CCBDumpWOff +
++ offsetof(SGXMKIF_CMDTA_SHARED,
++ asSrcSyncs) +
++ (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) +
++ offsetof(PVRSRV_DEVICE_SYNC_OBJECT,
++ ui32WriteOpsPendingVal), sizeof(u32),
++ 0, MAKEUNIQUETAG(psCCBMemInfo));
++ }
++#endif
++
++ for (i = 0; i < psCCBKick->ui32NumTAStatusVals; i++) {
++#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
++ PDUMPCOMMENT("Modify TA status value in TA cmd\r\n");
++ PDUMPMEM(&psCCBKick->asTAStatusUpdate[i].
++ ui32LastStatusUpdateDumpVal, psCCBMemInfo,
++ psCCBKick->ui32CCBDumpWOff +
++ offsetof(SGXMKIF_CMDTA_SHARED,
++ sCtlTAStatusInfo[i].ui32StatusValue),
++ sizeof(u32), 0, MAKEUNIQUETAG(psCCBMemInfo));
++#else
++ psSyncInfo =
++ (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->
++ ahTAStatusSyncInfo[i];
++ PDUMPCOMMENT("Modify TA status value in TA cmd\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++ psCCBMemInfo,
++ psCCBKick->ui32CCBDumpWOff +
++ offsetof(SGXMKIF_CMDTA_SHARED,
++ sCtlTAStatusInfo[i].ui32StatusValue),
++ sizeof(u32), 0, MAKEUNIQUETAG(psCCBMemInfo));
++#endif
++ }
++
++ for (i = 0; i < psCCBKick->ui32Num3DStatusVals; i++) {
++#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
++ PDUMPCOMMENT("Modify 3D status value in TA cmd\r\n");
++ PDUMPMEM(&psCCBKick->as3DStatusUpdate[i].
++ ui32LastStatusUpdateDumpVal, psCCBMemInfo,
++ psCCBKick->ui32CCBDumpWOff +
++ offsetof(SGXMKIF_CMDTA_SHARED,
++ sCtl3DStatusInfo[i].ui32StatusValue),
++ sizeof(u32), 0, MAKEUNIQUETAG(psCCBMemInfo));
++#else
++ psSyncInfo =
++ (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->
++ ah3DStatusSyncInfo[i];
++ PDUMPCOMMENT("Modify 3D status value in TA cmd\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++ psCCBMemInfo,
++ psCCBKick->ui32CCBDumpWOff +
++ offsetof(SGXMKIF_CMDTA_SHARED,
++ sCtl3DStatusInfo[i].ui32StatusValue),
++ sizeof(u32), 0, MAKEUNIQUETAG(psCCBMemInfo));
++#endif
++ }
++ }
++#endif
++
++ eError =
++ SGXScheduleCCBCommandKM(hDevHandle, SGXMKIF_CMD_TA,
++ &psCCBKick->sCommand, KERNEL_ID, 0);
++ if (eError == PVRSRV_ERROR_RETRY) {
++ if (psCCBKick->bFirstKickOrResume
++ && psCCBKick->ui32NumDstSyncObjects > 0) {
++ for (i = 0; i < psCCBKick->ui32NumDstSyncObjects; i++) {
++
++ psSyncInfo =
++ (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->
++ pahDstSyncHandles[i];
++
++ if (psSyncInfo) {
++ psSyncInfo->psSyncData->
++ ui32WriteOpsPending--;
++#if defined(PDUMP)
++ if (PDumpIsCaptureFrameKM()) {
++ psSyncInfo->psSyncData->
++ ui32LastOpDumpVal--;
++ }
++#endif
++ }
++ }
++ }
++#if defined(SUPPORT_SGX_GENERALISED_SYNCOBJECTS)
++ for (i = 0; i < psCCBKick->ui32NumTASrcSyncs; i++) {
++ psSyncInfo =
++ (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->
++ ahTASrcKernelSyncInfo[i];
++ psSyncInfo->psSyncData->ui32ReadOpsPending--;
++ }
++ for (i = 0; i < psCCBKick->ui32NumTADstSyncs; i++) {
++ psSyncInfo =
++ (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->
++ ahTADstKernelSyncInfo[i];
++ psSyncInfo->psSyncData->ui32WriteOpsPending--;
++ }
++ for (i = 0; i < psCCBKick->ui32Num3DSrcSyncs; i++) {
++ psSyncInfo =
++ (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->
++ ah3DSrcKernelSyncInfo[i];
++ psSyncInfo->psSyncData->ui32ReadOpsPending--;
++ }
++#else
++ for (i = 0; i < psCCBKick->ui32NumSrcSyncs; i++) {
++ psSyncInfo =
++ (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->
++ ahSrcKernelSyncInfo[i];
++ psSyncInfo->psSyncData->ui32ReadOpsPending--;
++ }
++#endif
++
++ return eError;
++ } else if (PVRSRV_OK != eError) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SGXDoKickKM: SGXScheduleCCBCommandKM failed."));
++ return eError;
++ }
++
++#if defined(NO_HARDWARE)
++
++ if (psCCBKick->hTA3DSyncInfo) {
++ psSyncInfo =
++ (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->hTA3DSyncInfo;
++
++ if (psCCBKick->bTADependency) {
++ psSyncInfo->psSyncData->ui32WriteOpsComplete =
++ psSyncInfo->psSyncData->ui32WriteOpsPending;
++ }
++ }
++
++ if (psCCBKick->hTASyncInfo != NULL) {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->hTASyncInfo;
++
++ psSyncInfo->psSyncData->ui32ReadOpsComplete =
++ psSyncInfo->psSyncData->ui32ReadOpsPending;
++ }
++
++ if (psCCBKick->h3DSyncInfo != NULL) {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->h3DSyncInfo;
++
++ psSyncInfo->psSyncData->ui32ReadOpsComplete =
++ psSyncInfo->psSyncData->ui32ReadOpsPending;
++ }
++
++ for (i = 0; i < psCCBKick->ui32NumTAStatusVals; i++) {
++#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo =
++ (PVRSRV_KERNEL_MEM_INFO *) psCCBKick->asTAStatusUpdate[i].
++ hKernelMemInfo;
++
++ *(u32 *) ((u32) psKernelMemInfo->pvLinAddrKM
++ + (psTACmd->sCtlTAStatusInfo[i].sStatusDevAddr.uiAddr
++ - psKernelMemInfo->sDevVAddr.uiAddr)) =
++ psTACmd->sCtlTAStatusInfo[i].ui32StatusValue;
++#else
++ psSyncInfo =
++ (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->
++ ahTAStatusSyncInfo[i];
++ psSyncInfo->psSyncData->ui32ReadOpsComplete =
++ psTACmd->sCtlTAStatusInfo[i].ui32StatusValue;
++#endif
++ }
++
++#if defined(SUPPORT_SGX_GENERALISED_SYNCOBJECTS)
++
++ for (i = 0; i < psCCBKick->ui32NumTASrcSyncs; i++) {
++ psSyncInfo =
++ (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->
++ ahTASrcKernelSyncInfo[i];
++ psSyncInfo->psSyncData->ui32ReadOpsComplete =
++ psSyncInfo->psSyncData->ui32ReadOpsPending;
++ }
++ for (i = 0; i < psCCBKick->ui32NumTADstSyncs; i++) {
++ psSyncInfo =
++ (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->
++ ahTADstKernelSyncInfo[i];
++ psSyncInfo->psSyncData->ui32WriteOpsComplete =
++ psSyncInfo->psSyncData->ui32WriteOpsPending;
++ }
++ for (i = 0; i < psCCBKick->ui32Num3DSrcSyncs; i++) {
++ psSyncInfo =
++ (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->
++ ah3DSrcKernelSyncInfo[i];
++ psSyncInfo->psSyncData->ui32ReadOpsComplete =
++ psSyncInfo->psSyncData->ui32ReadOpsPending;
++ }
++#else
++
++ for (i = 0; i < psCCBKick->ui32NumSrcSyncs; i++) {
++ psSyncInfo =
++ (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->
++ ahSrcKernelSyncInfo[i];
++ psSyncInfo->psSyncData->ui32ReadOpsComplete =
++ psSyncInfo->psSyncData->ui32ReadOpsPending;
++ }
++#endif
++
++ if (psCCBKick->bTerminateOrAbort) {
++ if (psCCBKick->ui32NumDstSyncObjects > 0) {
++ PVRSRV_KERNEL_MEM_INFO *psHWDstSyncListMemInfo =
++ (PVRSRV_KERNEL_MEM_INFO *) psCCBKick->
++ hKernelHWSyncListMemInfo;
++ SGXMKIF_HWDEVICE_SYNC_LIST *psHWDeviceSyncList =
++ psHWDstSyncListMemInfo->pvLinAddrKM;
++
++ for (i = 0; i < psCCBKick->ui32NumDstSyncObjects; i++) {
++ psSyncInfo =
++ (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->
++ pahDstSyncHandles[i];
++ if (psSyncInfo)
++ psSyncInfo->psSyncData->
++ ui32WriteOpsComplete =
++ psHWDeviceSyncList->asSyncData[i].
++ ui32WriteOpsPendingVal + 1;
++ }
++ }
++
++ for (i = 0; i < psCCBKick->ui32Num3DStatusVals; i++) {
++#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo =
++ (PVRSRV_KERNEL_MEM_INFO *) psCCBKick->
++ as3DStatusUpdate[i].hKernelMemInfo;
++
++ *(u32 *) ((u32) psKernelMemInfo->pvLinAddrKM
++ +
++ (psTACmd->sCtl3DStatusInfo[i].sStatusDevAddr.
++ uiAddr -
++ psKernelMemInfo->sDevVAddr.uiAddr)) =
++ psTACmd->sCtl3DStatusInfo[i].ui32StatusValue;
++#else
++ psSyncInfo =
++ (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->
++ ah3DStatusSyncInfo[i];
++ psSyncInfo->psSyncData->ui32ReadOpsComplete =
++ psTACmd->sCtl3DStatusInfo[i].ui32StatusValue;
++#endif
++ }
++ }
++#endif
++
++ return eError;
++}
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/devices/sgx/sgxpower.c
+@@ -0,0 +1,424 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <stddef.h>
++
++#include "sgxdefs.h"
++#include "services_headers.h"
++#include "sgxapi_km.h"
++#include "sgx_mkif_km.h"
++#include "sgxutils.h"
++#include "pdump_km.h"
++
++#if defined(SUPPORT_HW_RECOVERY)
++static PVRSRV_ERROR SGXAddTimer(PVRSRV_DEVICE_NODE * psDeviceNode,
++ SGX_TIMING_INFORMATION * psSGXTimingInfo,
++ void **phTimer)
++{
++
++ *phTimer = OSAddTimer(SGXOSTimer, psDeviceNode,
++ 1000 * 50 / psSGXTimingInfo->ui32uKernelFreq);
++ if (*phTimer == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SGXAddTimer : Failed to register timer callback function"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ return PVRSRV_OK;
++}
++#endif
++
++static PVRSRV_ERROR SGXUpdateTimingInfo(PVRSRV_DEVICE_NODE * psDeviceNode)
++{
++ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++#if defined(SGX_DYNAMIC_TIMING_INFO)
++ SGX_TIMING_INFORMATION sSGXTimingInfo = { 0 };
++#else
++ SGX_DEVICE_MAP *psSGXDeviceMap;
++#endif
++ u32 ui32ActivePowManSampleRate;
++ SGX_TIMING_INFORMATION *psSGXTimingInfo;
++
++#if defined(SGX_DYNAMIC_TIMING_INFO)
++ psSGXTimingInfo = &sSGXTimingInfo;
++ SysGetSGXTimingInformation(psSGXTimingInfo);
++#else
++ SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE_SGX, (void **)&psSGXDeviceMap);
++ psSGXTimingInfo = &psSGXDeviceMap->sTimingInfo;
++#endif
++
++#if defined(SUPPORT_HW_RECOVERY)
++ {
++ PVRSRV_ERROR eError;
++ u32 ui32OlduKernelFreq;
++
++ if (psDevInfo->hTimer != NULL) {
++ ui32OlduKernelFreq =
++ psDevInfo->ui32CoreClockSpeed /
++ psDevInfo->ui32uKernelTimerClock;
++ if (ui32OlduKernelFreq !=
++ psSGXTimingInfo->ui32uKernelFreq) {
++
++ void *hNewTimer;
++
++ eError =
++ SGXAddTimer(psDeviceNode, psSGXTimingInfo,
++ &hNewTimer);
++ if (eError == PVRSRV_OK) {
++ eError =
++ OSRemoveTimer(psDevInfo->hTimer);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SGXUpdateTimingInfo: Failed to remove timer"));
++ }
++ psDevInfo->hTimer = hNewTimer;
++ } else {
++
++ }
++ }
++ } else {
++ eError =
++ SGXAddTimer(psDeviceNode, psSGXTimingInfo,
++ &psDevInfo->hTimer);
++ if (eError != PVRSRV_OK) {
++ return eError;
++ }
++ }
++
++ psDevInfo->psSGXHostCtl->ui32HWRecoverySampleRate =
++ psSGXTimingInfo->ui32uKernelFreq /
++ psSGXTimingInfo->ui32HWRecoveryFreq;
++ }
++#endif
++
++ psDevInfo->ui32CoreClockSpeed = psSGXTimingInfo->ui32CoreClockSpeed;
++ psDevInfo->ui32uKernelTimerClock =
++ psSGXTimingInfo->ui32CoreClockSpeed /
++ psSGXTimingInfo->ui32uKernelFreq;
++
++ psDevInfo->psSGXHostCtl->ui32uKernelTimerClock =
++ psDevInfo->ui32uKernelTimerClock;
++#if defined(PDUMP)
++ PDUMPCOMMENT("Host Control - Microkernel clock");
++ PDUMPMEM(NULL, psDevInfo->psKernelSGXHostCtlMemInfo,
++ offsetof(SGXMKIF_HOST_CTL, ui32uKernelTimerClock),
++ sizeof(u32), PDUMP_FLAGS_CONTINUOUS,
++ MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo));
++#endif
++
++ if (psSGXTimingInfo->bEnableActivePM) {
++ ui32ActivePowManSampleRate =
++ psSGXTimingInfo->ui32uKernelFreq *
++ psSGXTimingInfo->ui32ActivePowManLatencyms / 1000;
++
++ ui32ActivePowManSampleRate += 1;
++ } else {
++ ui32ActivePowManSampleRate = 0;
++ }
++
++ psDevInfo->psSGXHostCtl->ui32ActivePowManSampleRate =
++ ui32ActivePowManSampleRate;
++#if defined(PDUMP)
++ PDUMPMEM(NULL, psDevInfo->psKernelSGXHostCtlMemInfo,
++ offsetof(SGXMKIF_HOST_CTL, ui32ActivePowManSampleRate),
++ sizeof(u32), PDUMP_FLAGS_CONTINUOUS,
++ MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo));
++#endif
++
++ return PVRSRV_OK;
++}
++
++static void SGXStartTimer(PVRSRV_SGXDEV_INFO * psDevInfo)
++{
++#if defined(SUPPORT_HW_RECOVERY)
++ PVRSRV_ERROR eError;
++
++ eError = OSEnableTimer(psDevInfo->hTimer);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SGXStartTimer : Failed to enable host timer"));
++ }
++#endif
++}
++
++static void SGXPollForClockGating(PVRSRV_SGXDEV_INFO * psDevInfo,
++ u32 ui32Register,
++ u32 ui32RegisterValue, char *pszComment)
++{
++#if !defined(NO_HARDWARE)
++ PVR_ASSERT(psDevInfo != NULL);
++
++ if (PollForValueKM
++ ((u32 *) psDevInfo->pvRegsBaseKM + (ui32Register >> 2), 0,
++ ui32RegisterValue, MAX_HW_TIME_US / WAIT_TRY_COUNT,
++ WAIT_TRY_COUNT) != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR, "SGXPrePowerState: %s failed.",
++ pszComment));
++ }
++#endif
++
++ PDUMPCOMMENT(pszComment);
++ PDUMPREGPOL(ui32Register, 0, ui32RegisterValue);
++}
++
++PVRSRV_ERROR SGXPrePowerState(void *hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ if ((eNewPowerState != eCurrentPowerState) &&
++ (eNewPowerState != PVRSRV_DEV_POWER_STATE_ON)) {
++ PVRSRV_ERROR eError;
++ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
++ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++ u32 ui32PowerCmd, ui32CompleteStatus;
++ SGXMKIF_COMMAND sCommand = { 0 };
++ u32 ui32Core;
++
++#if defined(SUPPORT_HW_RECOVERY)
++
++ eError = OSDisableTimer(psDevInfo->hTimer);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SGXPrePowerState: Failed to disable timer"));
++ return eError;
++ }
++#endif
++
++ if (eNewPowerState == PVRSRV_DEV_POWER_STATE_OFF) {
++
++ ui32PowerCmd = PVRSRV_POWERCMD_POWEROFF;
++ ui32CompleteStatus =
++ PVRSRV_USSE_EDM_POWMAN_POWEROFF_COMPLETE;
++ PDUMPCOMMENT("SGX power off request");
++ } else {
++
++ ui32PowerCmd = PVRSRV_POWERCMD_IDLE;
++ ui32CompleteStatus =
++ PVRSRV_USSE_EDM_POWMAN_IDLE_COMPLETE;
++ PDUMPCOMMENT("SGX idle request");
++ }
++
++ sCommand.ui32Data[1] = ui32PowerCmd;
++
++ eError =
++ SGXScheduleCCBCommand(psDevInfo, SGXMKIF_CMD_POWER,
++ &sCommand, KERNEL_ID, 0);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SGXPrePowerState: Failed to submit power down command"));
++ return eError;
++ }
++
++#if !defined(NO_HARDWARE)
++ if (PollForValueKM(&psDevInfo->psSGXHostCtl->ui32PowerStatus,
++ ui32CompleteStatus,
++ ui32CompleteStatus,
++ MAX_HW_TIME_US / WAIT_TRY_COUNT,
++ WAIT_TRY_COUNT) != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SGXPrePowerState: Wait for SGX ukernel power transition failed."));
++ PVR_DBG_BREAK;
++ }
++#endif
++
++#if defined(PDUMP)
++ PDUMPCOMMENT
++ ("TA/3D CCB Control - Wait for power event on uKernel.");
++ PDUMPMEMPOL(psDevInfo->psKernelSGXHostCtlMemInfo,
++ offsetof(SGXMKIF_HOST_CTL, ui32PowerStatus),
++ ui32CompleteStatus, ui32CompleteStatus,
++ PDUMP_POLL_OPERATOR_EQUAL, 0,
++ MAKEUNIQUETAG(psDevInfo->
++ psKernelSGXHostCtlMemInfo));
++#endif
++
++ for (ui32Core = 0; ui32Core < SGX_FEATURE_MP_CORE_COUNT;
++ ui32Core++) {
++
++ SGXPollForClockGating(psDevInfo,
++ SGX_MP_CORE_SELECT(psDevInfo->
++ ui32ClkGateStatusReg,
++ ui32Core),
++ psDevInfo->ui32ClkGateStatusMask,
++ "Wait for SGX clock gating");
++ }
++
++#if defined(SGX_FEATURE_MP)
++
++ SGXPollForClockGating(psDevInfo,
++ psDevInfo->ui32MasterClkGateStatusReg,
++ psDevInfo->ui32MasterClkGateStatusMask,
++ "Wait for SGX master clock gating");
++#endif
++
++ if (eNewPowerState == PVRSRV_DEV_POWER_STATE_OFF) {
++
++ eError = SGXDeinitialise(psDevInfo);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SGXPrePowerState: SGXDeinitialise failed: %lu",
++ eError));
++ return eError;
++ }
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR SGXPostPowerState(void *hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ if ((eNewPowerState != eCurrentPowerState) &&
++ (eCurrentPowerState != PVRSRV_DEV_POWER_STATE_ON)) {
++ PVRSRV_ERROR eError;
++ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
++ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++ SGXMKIF_HOST_CTL *psSGXHostCtl = psDevInfo->psSGXHostCtl;
++
++ psSGXHostCtl->ui32PowerStatus = 0;
++#if defined(PDUMP)
++ PDUMPCOMMENT("TA/3D CCB Control - Reset power status");
++ PDUMPMEM(NULL, psDevInfo->psKernelSGXHostCtlMemInfo,
++ offsetof(SGXMKIF_HOST_CTL, ui32PowerStatus),
++ sizeof(u32), PDUMP_FLAGS_CONTINUOUS,
++ MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo));
++#endif
++
++ if (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_OFF) {
++
++ eError = SGXUpdateTimingInfo(psDeviceNode);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SGXPostPowerState: SGXUpdateTimingInfo failed"));
++ return eError;
++ }
++
++ eError = SGXInitialise(psDevInfo);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SGXPostPowerState: SGXInitialise failed"));
++ return eError;
++ }
++ } else {
++
++ SGXMKIF_COMMAND sCommand = { 0 };
++
++ sCommand.ui32Data[1] = PVRSRV_POWERCMD_RESUME;
++ eError =
++ SGXScheduleCCBCommand(psDevInfo, SGXMKIF_CMD_POWER,
++ &sCommand, ISR_ID, 0);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SGXPostPowerState failed to schedule CCB command: %lu",
++ eError));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ }
++
++ SGXStartTimer(psDevInfo);
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR SGXPreClockSpeedChange(void *hDevHandle,
++ int bIdleDevice,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ PVRSRV_ERROR eError;
++ /* FIXME MLD Compiler warning temporary fix */
++ /* PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; */
++ /* PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; */
++
++ if (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON) {
++ if (bIdleDevice) {
++
++ PDUMPSUSPEND();
++
++ eError =
++ SGXPrePowerState(hDevHandle,
++ PVRSRV_DEV_POWER_STATE_IDLE,
++ PVRSRV_DEV_POWER_STATE_ON);
++
++ if (eError != PVRSRV_OK) {
++ PDUMPRESUME();
++ return eError;
++ }
++ }
++ }
++ /* FIXME MLD get rid of this macro and use the right stuf */
++ /* PVR_DPF((PVR_DBG_MESSAGE,
++ "SGXPreClockSpeedChange: SGX clock speed was %luHz",
++ psDevInfo->ui32CoreClockSpeed)); */
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR SGXPostClockSpeedChange(void *hDevHandle,
++ int bIdleDevice,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
++ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++ /* FIXME MLD remove this if it is not necessary */
++ /* u32 ui32OldClockSpeed = psDevInfo->ui32CoreClockSpeed;*/
++
++ if (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON) {
++ PVRSRV_ERROR eError;
++
++ eError = SGXUpdateTimingInfo(psDeviceNode);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SGXPostPowerState: SGXUpdateTimingInfo failed"));
++ return eError;
++ }
++
++ if (bIdleDevice) {
++
++ eError =
++ SGXPostPowerState(hDevHandle,
++ PVRSRV_DEV_POWER_STATE_ON,
++ PVRSRV_DEV_POWER_STATE_IDLE);
++
++ PDUMPRESUME();
++
++ if (eError != PVRSRV_OK) {
++ return eError;
++ }
++ } else {
++ SGXStartTimer(psDevInfo);
++ }
++
++ }
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "SGXPostClockSpeedChange: SGX clock speed changed from %luHz to %luHz",
++ ui32OldClockSpeed, psDevInfo->ui32CoreClockSpeed));
++
++ return PVRSRV_OK;
++}
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/devices/sgx/sgxreset.c
+@@ -0,0 +1,474 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "sgxdefs.h"
++#include "sgxmmu.h"
++#include "services_headers.h"
++#include "sgxinfokm.h"
++#include "sgxconfig.h"
++
++#include "pdump_km.h"
++
++static void SGXResetSoftReset(PVRSRV_SGXDEV_INFO * psDevInfo,
++ int bResetBIF, u32 ui32PDUMPFlags, int bPDump)
++{
++ u32 ui32SoftResetRegVal;
++
++#if defined(SGX_FEATURE_MP)
++ ui32SoftResetRegVal =
++ EUR_CR_MASTER_SOFT_RESET_IPF_RESET_MASK |
++ EUR_CR_MASTER_SOFT_RESET_DPM_RESET_MASK |
++ EUR_CR_MASTER_SOFT_RESET_VDM_RESET_MASK;
++
++#if defined(SGX_FEATURE_SYSTEM_CACHE)
++ ui32SoftResetRegVal |= EUR_CR_MASTER_SOFT_RESET_SLC_RESET_MASK;
++#endif
++
++ if (bResetBIF) {
++ ui32SoftResetRegVal |= EUR_CR_MASTER_SOFT_RESET_BIF_RESET_MASK;
++ }
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_SOFT_RESET,
++ ui32SoftResetRegVal);
++ if (bPDump) {
++ PDUMPREGWITHFLAGS(EUR_CR_MASTER_SOFT_RESET, ui32SoftResetRegVal,
++ ui32PDUMPFlags);
++ }
++#endif
++
++ ui32SoftResetRegVal =
++ EUR_CR_SOFT_RESET_DPM_RESET_MASK |
++ EUR_CR_SOFT_RESET_TA_RESET_MASK |
++ EUR_CR_SOFT_RESET_USE_RESET_MASK |
++ EUR_CR_SOFT_RESET_ISP_RESET_MASK | EUR_CR_SOFT_RESET_TSP_RESET_MASK;
++
++#ifdef EUR_CR_SOFT_RESET_TWOD_RESET_MASK
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_TWOD_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_TE_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_TE_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_MTE_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_MTE_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_ISP2_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_ISP2_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_PDS_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_PDS_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_PBE_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_PBE_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_CACHEL2_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_CACHEL2_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_TCU_L2_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_TCU_L2_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_UCACHEL2_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_UCACHEL2_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_MADD_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_MADD_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_ITR_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_ITR_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_TEX_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_TEX_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_IDXFIFO_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_IDXFIFO_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_VDM_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_VDM_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_DCU_L2_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_DCU_L2_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_DCU_L0L1_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_DCU_L0L1_RESET_MASK;
++#endif
++
++ if (bResetBIF) {
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_BIF_RESET_MASK;
++ }
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_SOFT_RESET,
++ ui32SoftResetRegVal);
++ if (bPDump) {
++ PDUMPREGWITHFLAGS(EUR_CR_SOFT_RESET, ui32SoftResetRegVal,
++ ui32PDUMPFlags);
++ }
++}
++
++static void SGXResetSleep(PVRSRV_SGXDEV_INFO * psDevInfo,
++ u32 ui32PDUMPFlags, int bPDump)
++{
++
++ OSWaitus(1000 * 1000000 / psDevInfo->ui32CoreClockSpeed);
++ if (bPDump) {
++ PDUMPIDLWITHFLAGS(30, ui32PDUMPFlags);
++#if defined(PDUMP)
++ PDumpRegRead(EUR_CR_SOFT_RESET, ui32PDUMPFlags);
++#endif
++ }
++
++}
++
++static void SGXResetInvalDC(PVRSRV_SGXDEV_INFO * psDevInfo,
++ u32 ui32PDUMPFlags, int bPDump)
++{
++ u32 ui32RegVal;
++
++#if defined(EUR_CR_BIF_CTRL_INVAL)
++ ui32RegVal = EUR_CR_BIF_CTRL_INVAL_ALL_MASK;
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL_INVAL,
++ ui32RegVal);
++ if (bPDump) {
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL_INVAL, ui32RegVal,
++ ui32PDUMPFlags);
++ }
++#else
++ ui32RegVal = EUR_CR_BIF_CTRL_INVALDC_MASK;
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
++ if (bPDump) {
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
++ }
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, bPDump);
++
++ ui32RegVal = 0;
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
++ if (bPDump) {
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
++ }
++#endif
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, bPDump);
++
++#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++ {
++
++ if (PollForValueKM
++ ((u32 *) ((u8 *) psDevInfo->pvRegsBaseKM +
++ EUR_CR_BIF_MEM_REQ_STAT), 0,
++ EUR_CR_BIF_MEM_REQ_STAT_READS_MASK,
++ MAX_HW_TIME_US / WAIT_TRY_COUNT,
++ WAIT_TRY_COUNT) != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "Wait for DC invalidate failed."));
++ PVR_DBG_BREAK;
++ }
++
++ if (bPDump) {
++ PDUMPREGPOLWITHFLAGS(EUR_CR_BIF_MEM_REQ_STAT, 0,
++ EUR_CR_BIF_MEM_REQ_STAT_READS_MASK,
++ ui32PDUMPFlags);
++ }
++ }
++#endif
++}
++
++void SGXReset(PVRSRV_SGXDEV_INFO * psDevInfo, u32 ui32PDUMPFlags)
++{
++ u32 ui32RegVal;
++#if defined(EUR_CR_BIF_INT_STAT_FAULT_REQ_MASK)
++ const u32 ui32BifFaultMask = EUR_CR_BIF_INT_STAT_FAULT_REQ_MASK;
++#else
++ const u32 ui32BifFaultMask = EUR_CR_BIF_INT_STAT_FAULT_MASK;
++#endif
++
++ psDevInfo->ui32NumResets++;
++
++ PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags,
++ "Start of SGX reset sequence\r\n");
++
++#if defined(FIX_HW_BRN_23944)
++
++ ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK;
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
++
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, 1);
++
++ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_INT_STAT);
++ if (ui32RegVal & ui32BifFaultMask) {
++
++ ui32RegVal =
++ EUR_CR_BIF_CTRL_PAUSE_MASK |
++ EUR_CR_BIF_CTRL_CLEAR_FAULT_MASK;
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL,
++ ui32RegVal);
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
++
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, 1);
++
++ ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK;
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL,
++ ui32RegVal);
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
++
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, 1);
++ }
++#endif
++
++ SGXResetSoftReset(psDevInfo, 1, ui32PDUMPFlags, 1);
++
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, 1);
++
++#if defined(SGX_FEATURE_36BIT_MMU)
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_36BIT_ADDRESSING,
++ EUR_CR_BIF_36BIT_ADDRESSING_ENABLE_MASK);
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_36BIT_ADDRESSING,
++ EUR_CR_BIF_36BIT_ADDRESSING_ENABLE_MASK,
++ ui32PDUMPFlags);
++#endif
++
++ ui32RegVal = 0;
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
++#if defined(SGX_FEATURE_MP)
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_BIF_CTRL,
++ ui32RegVal);
++ PDUMPREGWITHFLAGS(EUR_CR_MASTER_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
++#endif
++#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK_SET, ui32RegVal);
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_BANK_SET, ui32RegVal, ui32PDUMPFlags);
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK0, ui32RegVal);
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_BANK0, ui32RegVal, ui32PDUMPFlags);
++#endif
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_DIR_LIST_BASE0,
++ ui32RegVal);
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_DIR_LIST_BASE0, ui32RegVal,
++ ui32PDUMPFlags);
++
++#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++ {
++ u32 ui32DirList, ui32DirListReg;
++
++ for (ui32DirList = 1;
++ ui32DirList < SGX_FEATURE_BIF_NUM_DIRLISTS;
++ ui32DirList++) {
++ ui32DirListReg =
++ EUR_CR_BIF_DIR_LIST_BASE1 + 4 * (ui32DirList - 1);
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32DirListReg,
++ ui32RegVal);
++ PDUMPREGWITHFLAGS(ui32DirListReg, ui32RegVal,
++ ui32PDUMPFlags);
++ }
++ }
++#endif
++
++#if defined(EUR_CR_BIF_MEM_ARB_CONFIG)
++
++ ui32RegVal = (12UL << EUR_CR_BIF_MEM_ARB_CONFIG_PAGE_SIZE_SHIFT) |
++ (7UL << EUR_CR_BIF_MEM_ARB_CONFIG_BEST_CNT_SHIFT) |
++ (12UL << EUR_CR_BIF_MEM_ARB_CONFIG_TTE_THRESH_SHIFT);
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_MEM_ARB_CONFIG,
++ ui32RegVal);
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_MEM_ARB_CONFIG, ui32RegVal,
++ ui32PDUMPFlags);
++#endif
++
++#if defined(SGX_FEATURE_SYSTEM_CACHE)
++#if defined(SGX_FEATURE_MP)
++#if defined(SGX_BYPASS_SYSTEM_CACHE)
++#error SGX_BYPASS_SYSTEM_CACHE not supported
++#else
++ ui32RegVal = EUR_CR_MASTER_SLC_CTRL_USSE_INVAL_REQ0_MASK |
++ (0xC << EUR_CR_MASTER_SLC_CTRL_ARB_PAGE_SIZE_SHIFT);
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_SLC_CTRL,
++ ui32RegVal);
++ PDUMPREG(EUR_CR_MASTER_SLC_CTRL, ui32RegVal);
++
++ ui32RegVal = EUR_CR_MASTER_SLC_CTRL_BYPASS_BYP_CC_MASK;
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_SLC_CTRL_BYPASS,
++ ui32RegVal);
++ PDUMPREG(EUR_CR_MASTER_SLC_CTRL_BYPASS, ui32RegVal);
++#endif
++#else
++#if defined(SGX_BYPASS_SYSTEM_CACHE)
++
++ ui32RegVal = EUR_CR_MNE_CR_CTRL_BYPASS_ALL_MASK;
++#else
++#if defined(FIX_HW_BRN_26620)
++ ui32RegVal = 0;
++#else
++
++ ui32RegVal = EUR_CR_MNE_CR_CTRL_BYP_CC_MASK;
++#endif
++#endif
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MNE_CR_CTRL, ui32RegVal);
++ PDUMPREG(EUR_CR_MNE_CR_CTRL, ui32RegVal);
++#endif
++#endif
++
++ ui32RegVal = psDevInfo->sBIFResetPDDevPAddr.uiAddr;
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_DIR_LIST_BASE0,
++ ui32RegVal);
++
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, 0);
++
++ SGXResetSoftReset(psDevInfo, 0, ui32PDUMPFlags, 1);
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, 0);
++
++ SGXResetInvalDC(psDevInfo, ui32PDUMPFlags, 0);
++
++ for (;;) {
++ u32 ui32BifIntStat =
++ OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_INT_STAT);
++ IMG_DEV_VIRTADDR sBifFault;
++ u32 ui32PDIndex, ui32PTIndex;
++
++ if ((ui32BifIntStat & ui32BifFaultMask) == 0) {
++ break;
++ }
++
++ sBifFault.uiAddr =
++ OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_FAULT);
++ PVR_DPF((PVR_DBG_WARNING, "SGXReset: Page fault 0x%x/0x%x",
++ ui32BifIntStat, sBifFault.uiAddr));
++ ui32PDIndex =
++ sBifFault.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
++ ui32PTIndex =
++ (sBifFault.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
++
++ SGXResetSoftReset(psDevInfo, 1, ui32PDUMPFlags, 0);
++
++ psDevInfo->pui32BIFResetPD[ui32PDIndex] =
++ (psDevInfo->sBIFResetPTDevPAddr.
++ uiAddr >> SGX_MMU_PDE_ADDR_ALIGNSHIFT)
++ | SGX_MMU_PDE_PAGE_SIZE_4K | SGX_MMU_PDE_VALID;
++ psDevInfo->pui32BIFResetPT[ui32PTIndex] =
++ (psDevInfo->sBIFResetPageDevPAddr.
++ uiAddr >> SGX_MMU_PTE_ADDR_ALIGNSHIFT)
++ | SGX_MMU_PTE_VALID;
++
++ ui32RegVal =
++ OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS);
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR,
++ ui32RegVal);
++ ui32RegVal =
++ OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS2);
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR2,
++ ui32RegVal);
++
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, 0);
++
++ SGXResetSoftReset(psDevInfo, 0, ui32PDUMPFlags, 0);
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, 0);
++
++ SGXResetInvalDC(psDevInfo, ui32PDUMPFlags, 0);
++
++ psDevInfo->pui32BIFResetPD[ui32PDIndex] = 0;
++ psDevInfo->pui32BIFResetPT[ui32PTIndex] = 0;
++ }
++
++#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++
++ ui32RegVal =
++ (SGX_BIF_DIR_LIST_INDEX_EDM << EUR_CR_BIF_BANK0_INDEX_EDM_SHIFT);
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++
++ ui32RegVal |=
++ (SGX_BIF_DIR_LIST_INDEX_EDM << EUR_CR_BIF_BANK0_INDEX_2D_SHIFT);
++#endif
++
++#if defined(FIX_HW_BRN_23410)
++
++ ui32RegVal |=
++ (SGX_BIF_DIR_LIST_INDEX_EDM << EUR_CR_BIF_BANK0_INDEX_TA_SHIFT);
++#endif
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK0, ui32RegVal);
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_BANK0, ui32RegVal, ui32PDUMPFlags);
++#endif
++
++ {
++ u32 ui32EDMDirListReg;
++
++#if (SGX_BIF_DIR_LIST_INDEX_EDM == 0)
++ ui32EDMDirListReg = EUR_CR_BIF_DIR_LIST_BASE0;
++#else
++
++ ui32EDMDirListReg =
++ EUR_CR_BIF_DIR_LIST_BASE1 +
++ 4 * (SGX_BIF_DIR_LIST_INDEX_EDM - 1);
++#endif
++
++#if defined(FIX_HW_BRN_28011)
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_DIR_LIST_BASE0,
++ psDevInfo->sKernelPDDevPAddr.
++ uiAddr >> SGX_MMU_PDE_ADDR_ALIGNSHIFT);
++ PDUMPPDREGWITHFLAGS(EUR_CR_BIF_DIR_LIST_BASE0,
++ psDevInfo->sKernelPDDevPAddr.
++ uiAddr >> SGX_MMU_PDE_ADDR_ALIGNSHIFT,
++ ui32PDUMPFlags, PDUMP_PD_UNIQUETAG);
++#endif
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32EDMDirListReg,
++ psDevInfo->sKernelPDDevPAddr.
++ uiAddr >> SGX_MMU_PDE_ADDR_ALIGNSHIFT);
++ PDUMPPDREGWITHFLAGS(ui32EDMDirListReg,
++ psDevInfo->sKernelPDDevPAddr.
++ uiAddr >> SGX_MMU_PDE_ADDR_ALIGNSHIFT,
++ ui32PDUMPFlags, PDUMP_PD_UNIQUETAG);
++ }
++
++#ifdef SGX_FEATURE_2D_HARDWARE
++
++#if ((SGX_2D_HEAP_BASE & ~EUR_CR_BIF_TWOD_REQ_BASE_ADDR_MASK) != 0)
++#error "SGXReset: SGX_2D_HEAP_BASE doesn't match EUR_CR_BIF_TWOD_REQ_BASE_ADDR_MASK alignment"
++#endif
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_TWOD_REQ_BASE,
++ SGX_2D_HEAP_BASE);
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_TWOD_REQ_BASE, SGX_2D_HEAP_BASE,
++ ui32PDUMPFlags);
++#endif
++
++ SGXResetInvalDC(psDevInfo, ui32PDUMPFlags, 1);
++
++ PVR_DPF((PVR_DBG_MESSAGE, "Soft Reset of SGX"));
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, 1);
++
++ ui32RegVal = 0;
++#if defined(SGX_FEATURE_MP)
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_SOFT_RESET,
++ ui32RegVal);
++ PDUMPREGWITHFLAGS(EUR_CR_MASTER_SOFT_RESET, ui32RegVal, ui32PDUMPFlags);
++#endif
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_SOFT_RESET, ui32RegVal);
++ PDUMPREGWITHFLAGS(EUR_CR_SOFT_RESET, ui32RegVal, ui32PDUMPFlags);
++
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, 1);
++
++ PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "End of SGX reset sequence\r\n");
++}
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/devices/sgx/sgxtransfer.c
+@@ -0,0 +1,598 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if defined(TRANSFER_QUEUE)
++
++#include <stddef.h>
++
++#include "sgxdefs.h"
++#include "services_headers.h"
++#include "buffer_manager.h"
++#include "sgxinfo.h"
++#include "sysconfig.h"
++#include "regpaths.h"
++#include "pdump_km.h"
++#include "mmu.h"
++#include "pvr_bridge.h"
++#include "sgx_bridge_km.h"
++#include "sgxinfokm.h"
++#include "osfunc.h"
++#include "pvr_debug.h"
++#include "sgxutils.h"
++
++PVRSRV_ERROR SGXSubmitTransferKM(void *hDevHandle,
++ PVRSRV_TRANSFER_SGX_KICK * psKick)
++{
++ PVRSRV_KERNEL_MEM_INFO *psCCBMemInfo =
++ (PVRSRV_KERNEL_MEM_INFO *) psKick->hCCBMemInfo;
++ SGXMKIF_COMMAND sCommand = { 0 };
++ SGXMKIF_TRANSFERCMD_SHARED *psSharedTransferCmd;
++ PVRSRV_KERNEL_SYNC_INFO *psSyncInfo;
++ PVRSRV_ERROR eError;
++
++ if (!CCB_OFFSET_IS_VALID
++ (SGXMKIF_TRANSFERCMD_SHARED, psCCBMemInfo, psKick,
++ ui32SharedCmdCCBOffset)) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SGXSubmitTransferKM: Invalid CCB offset"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psSharedTransferCmd =
++ CCB_DATA_FROM_OFFSET(SGXMKIF_TRANSFERCMD_SHARED, psCCBMemInfo,
++ psKick, ui32SharedCmdCCBOffset);
++
++ if (psKick->hTASyncInfo != NULL) {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psKick->hTASyncInfo;
++
++ psSharedTransferCmd->ui32TASyncWriteOpsPendingVal =
++ psSyncInfo->psSyncData->ui32WriteOpsPending++;
++ psSharedTransferCmd->ui32TASyncReadOpsPendingVal =
++ psSyncInfo->psSyncData->ui32ReadOpsPending;
++
++ psSharedTransferCmd->sTASyncWriteOpsCompleteDevVAddr =
++ psSyncInfo->sWriteOpsCompleteDevVAddr;
++ psSharedTransferCmd->sTASyncReadOpsCompleteDevVAddr =
++ psSyncInfo->sReadOpsCompleteDevVAddr;
++ } else {
++ psSharedTransferCmd->sTASyncWriteOpsCompleteDevVAddr.uiAddr = 0;
++ psSharedTransferCmd->sTASyncReadOpsCompleteDevVAddr.uiAddr = 0;
++ }
++
++ if (psKick->h3DSyncInfo != NULL) {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psKick->h3DSyncInfo;
++
++ psSharedTransferCmd->ui323DSyncWriteOpsPendingVal =
++ psSyncInfo->psSyncData->ui32WriteOpsPending++;
++ psSharedTransferCmd->ui323DSyncReadOpsPendingVal =
++ psSyncInfo->psSyncData->ui32ReadOpsPending;
++
++ psSharedTransferCmd->s3DSyncWriteOpsCompleteDevVAddr =
++ psSyncInfo->sWriteOpsCompleteDevVAddr;
++ psSharedTransferCmd->s3DSyncReadOpsCompleteDevVAddr =
++ psSyncInfo->sReadOpsCompleteDevVAddr;
++ } else {
++ psSharedTransferCmd->s3DSyncWriteOpsCompleteDevVAddr.uiAddr = 0;
++ psSharedTransferCmd->s3DSyncReadOpsCompleteDevVAddr.uiAddr = 0;
++ }
++
++ if ((psKick->ui32Flags & SGXMKIF_TQFLAGS_KEEPPENDING) == 0UL) {
++ if (psKick->ui32NumSrcSync > 0) {
++ psSyncInfo =
++ (PVRSRV_KERNEL_SYNC_INFO *) psKick->
++ ahSrcSyncInfo[0];
++
++ psSharedTransferCmd->ui32SrcWriteOpPendingVal =
++ psSyncInfo->psSyncData->ui32WriteOpsPending;
++ psSharedTransferCmd->ui32SrcReadOpPendingVal =
++ psSyncInfo->psSyncData->ui32ReadOpsPending;
++
++ psSharedTransferCmd->sSrcWriteOpsCompleteDevAddr =
++ psSyncInfo->sWriteOpsCompleteDevVAddr;
++ psSharedTransferCmd->sSrcReadOpsCompleteDevAddr =
++ psSyncInfo->sReadOpsCompleteDevVAddr;
++
++ }
++
++ if (psKick->ui32NumDstSync > 0) {
++ psSyncInfo =
++ (PVRSRV_KERNEL_SYNC_INFO *) psKick->
++ ahDstSyncInfo[0];
++
++ psSharedTransferCmd->ui32DstWriteOpPendingVal =
++ psSyncInfo->psSyncData->ui32WriteOpsPending;
++ psSharedTransferCmd->ui32DstReadOpPendingVal =
++ psSyncInfo->psSyncData->ui32ReadOpsPending;
++
++ psSharedTransferCmd->sDstWriteOpsCompleteDevAddr =
++ psSyncInfo->sWriteOpsCompleteDevVAddr;
++ psSharedTransferCmd->sDstReadOpsCompleteDevAddr =
++ psSyncInfo->sReadOpsCompleteDevVAddr;
++
++ }
++
++ if (psKick->ui32NumSrcSync > 0) {
++ psSyncInfo =
++ (PVRSRV_KERNEL_SYNC_INFO *) psKick->
++ ahSrcSyncInfo[0];
++ psSyncInfo->psSyncData->ui32ReadOpsPending++;
++ }
++ if (psKick->ui32NumDstSync > 0) {
++ psSyncInfo =
++ (PVRSRV_KERNEL_SYNC_INFO *) psKick->
++ ahDstSyncInfo[0];
++ psSyncInfo->psSyncData->ui32WriteOpsPending++;
++ }
++ }
++
++ if (psKick->ui32NumDstSync > 1 || psKick->ui32NumSrcSync > 1) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "Transfer command doesn't support more than 1 sync object per src/dst\ndst: %d, src: %d",
++ psKick->ui32NumDstSync, psKick->ui32NumSrcSync));
++ }
++#if defined(PDUMP)
++ if (PDumpIsCaptureFrameKM()
++ || ((psKick->ui32PDumpFlags & PDUMP_FLAGS_CONTINUOUS) != 0)) {
++ PDUMPCOMMENT("Shared part of transfer command\r\n");
++ PDUMPMEM(psSharedTransferCmd,
++ psCCBMemInfo,
++ psKick->ui32CCBDumpWOff,
++ sizeof(SGXMKIF_TRANSFERCMD_SHARED),
++ psKick->ui32PDumpFlags, MAKEUNIQUETAG(psCCBMemInfo));
++
++ if ((psKick->ui32NumSrcSync > 0)
++ && ((psKick->ui32Flags & SGXMKIF_TQFLAGS_KEEPPENDING) ==
++ 0UL)) {
++ psSyncInfo = psKick->ahSrcSyncInfo[0];
++
++ PDUMPCOMMENT
++ ("Hack src surface write op in transfer cmd\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++ psCCBMemInfo,
++ psKick->ui32CCBDumpWOff +
++ offsetof(SGXMKIF_TRANSFERCMD_SHARED,
++ ui32SrcWriteOpPendingVal),
++ sizeof(psSyncInfo->psSyncData->
++ ui32LastOpDumpVal),
++ psKick->ui32PDumpFlags,
++ MAKEUNIQUETAG(psCCBMemInfo));
++
++ PDUMPCOMMENT
++ ("Hack src surface read op in transfer cmd\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
++ psCCBMemInfo,
++ psKick->ui32CCBDumpWOff +
++ offsetof(SGXMKIF_TRANSFERCMD_SHARED,
++ ui32SrcReadOpPendingVal),
++ sizeof(psSyncInfo->psSyncData->
++ ui32LastReadOpDumpVal),
++ psKick->ui32PDumpFlags,
++ MAKEUNIQUETAG(psCCBMemInfo));
++
++ }
++ if ((psKick->ui32NumDstSync > 0)
++ && ((psKick->ui32Flags & SGXMKIF_TQFLAGS_KEEPPENDING) ==
++ 0UL)) {
++ psSyncInfo = psKick->ahDstSyncInfo[0];
++
++ PDUMPCOMMENT
++ ("Hack dest surface write op in transfer cmd\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++ psCCBMemInfo,
++ psKick->ui32CCBDumpWOff +
++ offsetof(SGXMKIF_TRANSFERCMD_SHARED,
++ ui32DstWriteOpPendingVal),
++ sizeof(psSyncInfo->psSyncData->
++ ui32LastOpDumpVal),
++ psKick->ui32PDumpFlags,
++ MAKEUNIQUETAG(psCCBMemInfo));
++
++ PDUMPCOMMENT
++ ("Hack dest surface read op in transfer cmd\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
++ psCCBMemInfo,
++ psKick->ui32CCBDumpWOff +
++ offsetof(SGXMKIF_TRANSFERCMD_SHARED,
++ ui32DstReadOpPendingVal),
++ sizeof(psSyncInfo->psSyncData->
++ ui32LastReadOpDumpVal),
++ psKick->ui32PDumpFlags,
++ MAKEUNIQUETAG(psCCBMemInfo));
++
++ }
++
++ if ((psKick->ui32NumSrcSync > 0)
++ && ((psKick->ui32Flags & SGXMKIF_TQFLAGS_KEEPPENDING) ==
++ 0UL)) {
++ psSyncInfo =
++ (PVRSRV_KERNEL_SYNC_INFO *) psKick->
++ ahSrcSyncInfo[0];
++ psSyncInfo->psSyncData->ui32LastReadOpDumpVal++;
++ }
++
++ if ((psKick->ui32NumDstSync > 0)
++ && ((psKick->ui32Flags & SGXMKIF_TQFLAGS_KEEPPENDING) ==
++ 0UL)) {
++ psSyncInfo =
++ (PVRSRV_KERNEL_SYNC_INFO *) psKick->
++ ahDstSyncInfo[0];
++ psSyncInfo->psSyncData->ui32LastOpDumpVal++;
++ }
++ }
++#endif
++
++ sCommand.ui32Data[1] = psKick->sHWTransferContextDevVAddr.uiAddr;
++
++ eError =
++ SGXScheduleCCBCommandKM(hDevHandle, SGXMKIF_CMD_TRANSFER, &sCommand,
++ KERNEL_ID, psKick->ui32PDumpFlags);
++
++ if (eError == PVRSRV_ERROR_RETRY) {
++
++ if ((psKick->ui32Flags & SGXMKIF_TQFLAGS_KEEPPENDING) == 0UL) {
++ if (psKick->ui32NumSrcSync > 0) {
++ psSyncInfo =
++ (PVRSRV_KERNEL_SYNC_INFO *) psKick->
++ ahSrcSyncInfo[0];
++ psSyncInfo->psSyncData->ui32ReadOpsPending--;
++ }
++ if (psKick->ui32NumDstSync > 0) {
++ psSyncInfo =
++ (PVRSRV_KERNEL_SYNC_INFO *) psKick->
++ ahDstSyncInfo[0];
++ psSyncInfo->psSyncData->ui32WriteOpsPending--;
++ }
++#if defined(PDUMP)
++ if (PDumpIsCaptureFrameKM()
++ ||
++ ((psKick->
++ ui32PDumpFlags & PDUMP_FLAGS_CONTINUOUS) != 0)) {
++ if (psKick->ui32NumSrcSync > 0) {
++ psSyncInfo =
++ (PVRSRV_KERNEL_SYNC_INFO *) psKick->
++ ahSrcSyncInfo[0];
++ psSyncInfo->psSyncData->
++ ui32LastReadOpDumpVal--;
++ }
++ if (psKick->ui32NumDstSync > 0) {
++ psSyncInfo =
++ (PVRSRV_KERNEL_SYNC_INFO *) psKick->
++ ahDstSyncInfo[0];
++ psSyncInfo->psSyncData->
++ ui32LastOpDumpVal--;
++ }
++ }
++#endif
++ }
++
++ if (psKick->hTASyncInfo != NULL) {
++ psSyncInfo =
++ (PVRSRV_KERNEL_SYNC_INFO *) psKick->hTASyncInfo;
++ psSyncInfo->psSyncData->ui32WriteOpsPending--;
++ }
++
++ if (psKick->h3DSyncInfo != NULL) {
++ psSyncInfo =
++ (PVRSRV_KERNEL_SYNC_INFO *) psKick->h3DSyncInfo;
++ psSyncInfo->psSyncData->ui32WriteOpsPending--;
++ }
++ }
++
++ else if (PVRSRV_OK != eError) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SGXSubmitTransferKM: SGXScheduleCCBCommandKM failed."));
++ return eError;
++ }
++
++#if defined(NO_HARDWARE)
++ if ((psKick->ui32Flags & SGXMKIF_TQFLAGS_NOSYNCUPDATE) == 0) {
++ u32 i;
++
++ for (i = 0; i < psKick->ui32NumSrcSync; i++) {
++ psSyncInfo =
++ (PVRSRV_KERNEL_SYNC_INFO *) psKick->
++ ahSrcSyncInfo[i];
++ psSyncInfo->psSyncData->ui32ReadOpsComplete =
++ psSyncInfo->psSyncData->ui32ReadOpsPending;
++ }
++
++ for (i = 0; i < psKick->ui32NumDstSync; i++) {
++ psSyncInfo =
++ (PVRSRV_KERNEL_SYNC_INFO *) psKick->
++ ahDstSyncInfo[i];
++ psSyncInfo->psSyncData->ui32WriteOpsComplete =
++ psSyncInfo->psSyncData->ui32WriteOpsPending;
++
++ }
++
++ if (psKick->hTASyncInfo != NULL) {
++ psSyncInfo =
++ (PVRSRV_KERNEL_SYNC_INFO *) psKick->hTASyncInfo;
++
++ psSyncInfo->psSyncData->ui32WriteOpsComplete =
++ psSyncInfo->psSyncData->ui32WriteOpsPending;
++ }
++
++ if (psKick->h3DSyncInfo != NULL) {
++ psSyncInfo =
++ (PVRSRV_KERNEL_SYNC_INFO *) psKick->h3DSyncInfo;
++
++ psSyncInfo->psSyncData->ui32WriteOpsComplete =
++ psSyncInfo->psSyncData->ui32WriteOpsPending;
++ }
++ }
++#endif
++
++ return eError;
++}
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++PVRSRV_ERROR SGXSubmit2DKM(void *hDevHandle, PVRSRV_2D_SGX_KICK * psKick)
++{
++ PVRSRV_KERNEL_MEM_INFO *psCCBMemInfo =
++ (PVRSRV_KERNEL_MEM_INFO *) psKick->hCCBMemInfo;
++ SGXMKIF_COMMAND sCommand = { 0 };
++ SGXMKIF_2DCMD_SHARED *ps2DCmd;
++ PVRSRV_KERNEL_SYNC_INFO *psSyncInfo;
++ PVRSRV_ERROR eError;
++ u32 i;
++
++ if (!CCB_OFFSET_IS_VALID
++ (SGXMKIF_2DCMD_SHARED, psCCBMemInfo, psKick,
++ ui32SharedCmdCCBOffset)) {
++ PVR_DPF((PVR_DBG_ERROR, "SGXSubmit2DKM: Invalid CCB offset"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ ps2DCmd =
++ CCB_DATA_FROM_OFFSET(SGXMKIF_2DCMD_SHARED, psCCBMemInfo, psKick,
++ ui32SharedCmdCCBOffset);
++
++ memset(ps2DCmd, 0, sizeof(*ps2DCmd));
++
++ if (psKick->hTASyncInfo != NULL) {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psKick->hTASyncInfo;
++
++ ps2DCmd->sTASyncData.ui32WriteOpsPendingVal =
++ psSyncInfo->psSyncData->ui32WriteOpsPending++;
++ ps2DCmd->sTASyncData.ui32ReadOpsPendingVal =
++ psSyncInfo->psSyncData->ui32ReadOpsPending;
++
++ ps2DCmd->sTASyncData.sWriteOpsCompleteDevVAddr =
++ psSyncInfo->sWriteOpsCompleteDevVAddr;
++ ps2DCmd->sTASyncData.sReadOpsCompleteDevVAddr =
++ psSyncInfo->sReadOpsCompleteDevVAddr;
++ }
++
++ if (psKick->h3DSyncInfo != NULL) {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psKick->h3DSyncInfo;
++
++ ps2DCmd->s3DSyncData.ui32WriteOpsPendingVal =
++ psSyncInfo->psSyncData->ui32WriteOpsPending++;
++ ps2DCmd->s3DSyncData.ui32ReadOpsPendingVal =
++ psSyncInfo->psSyncData->ui32ReadOpsPending;
++
++ ps2DCmd->s3DSyncData.sWriteOpsCompleteDevVAddr =
++ psSyncInfo->sWriteOpsCompleteDevVAddr;
++ ps2DCmd->s3DSyncData.sReadOpsCompleteDevVAddr =
++ psSyncInfo->sReadOpsCompleteDevVAddr;
++ }
++
++ ps2DCmd->ui32NumSrcSync = psKick->ui32NumSrcSync;
++ for (i = 0; i < psKick->ui32NumSrcSync; i++) {
++ psSyncInfo = psKick->ahSrcSyncInfo[i];
++
++ ps2DCmd->sSrcSyncData[i].ui32WriteOpsPendingVal =
++ psSyncInfo->psSyncData->ui32WriteOpsPending;
++ ps2DCmd->sSrcSyncData[i].ui32ReadOpsPendingVal =
++ psSyncInfo->psSyncData->ui32ReadOpsPending;
++
++ ps2DCmd->sSrcSyncData[i].sWriteOpsCompleteDevVAddr =
++ psSyncInfo->sWriteOpsCompleteDevVAddr;
++ ps2DCmd->sSrcSyncData[i].sReadOpsCompleteDevVAddr =
++ psSyncInfo->sReadOpsCompleteDevVAddr;
++ }
++
++ if (psKick->hDstSyncInfo != NULL) {
++ psSyncInfo = psKick->hDstSyncInfo;
++
++ ps2DCmd->sDstSyncData.ui32WriteOpsPendingVal =
++ psSyncInfo->psSyncData->ui32WriteOpsPending;
++ ps2DCmd->sDstSyncData.ui32ReadOpsPendingVal =
++ psSyncInfo->psSyncData->ui32ReadOpsPending;
++
++ ps2DCmd->sDstSyncData.sWriteOpsCompleteDevVAddr =
++ psSyncInfo->sWriteOpsCompleteDevVAddr;
++ ps2DCmd->sDstSyncData.sReadOpsCompleteDevVAddr =
++ psSyncInfo->sReadOpsCompleteDevVAddr;
++ }
++
++ for (i = 0; i < psKick->ui32NumSrcSync; i++) {
++ psSyncInfo = psKick->ahSrcSyncInfo[i];
++ psSyncInfo->psSyncData->ui32ReadOpsPending++;
++ }
++
++ if (psKick->hDstSyncInfo != NULL) {
++ psSyncInfo = psKick->hDstSyncInfo;
++ psSyncInfo->psSyncData->ui32WriteOpsPending++;
++ }
++#if defined(PDUMP)
++ if (PDumpIsCaptureFrameKM()
++ || ((psKick->ui32PDumpFlags & PDUMP_FLAGS_CONTINUOUS) != 0)) {
++
++ PDUMPCOMMENT("Shared part of 2D command\r\n");
++ PDUMPMEM(ps2DCmd,
++ psCCBMemInfo,
++ psKick->ui32CCBDumpWOff,
++ sizeof(SGXMKIF_2DCMD_SHARED),
++ psKick->ui32PDumpFlags, MAKEUNIQUETAG(psCCBMemInfo));
++
++ for (i = 0; i < psKick->ui32NumSrcSync; i++) {
++ psSyncInfo = psKick->ahSrcSyncInfo[i];
++
++ PDUMPCOMMENT("Hack src surface write op in 2D cmd\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++ psCCBMemInfo,
++ psKick->ui32CCBDumpWOff +
++ offsetof(SGXMKIF_2DCMD_SHARED,
++ sSrcSyncData[i].
++ ui32WriteOpsPendingVal),
++ sizeof(psSyncInfo->psSyncData->
++ ui32LastOpDumpVal),
++ psKick->ui32PDumpFlags,
++ MAKEUNIQUETAG(psCCBMemInfo));
++
++ PDUMPCOMMENT("Hack src surface read op in 2D cmd\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
++ psCCBMemInfo,
++ psKick->ui32CCBDumpWOff +
++ offsetof(SGXMKIF_2DCMD_SHARED,
++ sSrcSyncData[i].
++ ui32ReadOpsPendingVal),
++ sizeof(psSyncInfo->psSyncData->
++ ui32LastReadOpDumpVal),
++ psKick->ui32PDumpFlags,
++ MAKEUNIQUETAG(psCCBMemInfo));
++ }
++
++ if (psKick->hDstSyncInfo != NULL) {
++ psSyncInfo = psKick->hDstSyncInfo;
++
++ PDUMPCOMMENT
++ ("Hack dest surface write op in 2D cmd\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++ psCCBMemInfo,
++ psKick->ui32CCBDumpWOff +
++ offsetof(SGXMKIF_2DCMD_SHARED,
++ sDstSyncData.ui32WriteOpsPendingVal),
++ sizeof(psSyncInfo->psSyncData->
++ ui32LastOpDumpVal),
++ psKick->ui32PDumpFlags,
++ MAKEUNIQUETAG(psCCBMemInfo));
++
++ PDUMPCOMMENT("Hack dest surface read op in 2D cmd\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
++ psCCBMemInfo,
++ psKick->ui32CCBDumpWOff +
++ offsetof(SGXMKIF_2DCMD_SHARED,
++ sDstSyncData.ui32ReadOpsPendingVal),
++ sizeof(psSyncInfo->psSyncData->
++ ui32LastReadOpDumpVal),
++ psKick->ui32PDumpFlags,
++ MAKEUNIQUETAG(psCCBMemInfo));
++ }
++
++ for (i = 0; i < psKick->ui32NumSrcSync; i++) {
++ psSyncInfo = psKick->ahSrcSyncInfo[i];
++ psSyncInfo->psSyncData->ui32LastReadOpDumpVal++;
++ }
++
++ if (psKick->hDstSyncInfo != NULL) {
++ psSyncInfo = psKick->hDstSyncInfo;
++ psSyncInfo->psSyncData->ui32LastOpDumpVal++;
++ }
++ }
++#endif
++
++ sCommand.ui32Data[1] = psKick->sHW2DContextDevVAddr.uiAddr;
++
++ eError =
++ SGXScheduleCCBCommandKM(hDevHandle, SGXMKIF_CMD_2D, &sCommand,
++ KERNEL_ID, psKick->ui32PDumpFlags);
++
++ if (eError == PVRSRV_ERROR_RETRY) {
++
++#if defined(PDUMP)
++ if (PDumpIsCaptureFrameKM()) {
++ for (i = 0; i < psKick->ui32NumSrcSync; i++) {
++ psSyncInfo = psKick->ahSrcSyncInfo[i];
++ psSyncInfo->psSyncData->ui32LastReadOpDumpVal--;
++ }
++
++ if (psKick->hDstSyncInfo != NULL) {
++ psSyncInfo = psKick->hDstSyncInfo;
++ psSyncInfo->psSyncData->ui32LastOpDumpVal--;
++ }
++ }
++#endif
++
++ for (i = 0; i < psKick->ui32NumSrcSync; i++) {
++ psSyncInfo = psKick->ahSrcSyncInfo[i];
++ psSyncInfo->psSyncData->ui32ReadOpsPending--;
++ }
++
++ if (psKick->hDstSyncInfo != NULL) {
++ psSyncInfo = psKick->hDstSyncInfo;
++ psSyncInfo->psSyncData->ui32WriteOpsPending--;
++ }
++
++ if (psKick->hTASyncInfo != NULL) {
++ psSyncInfo =
++ (PVRSRV_KERNEL_SYNC_INFO *) psKick->hTASyncInfo;
++
++ psSyncInfo->psSyncData->ui32WriteOpsPending--;
++ }
++
++ if (psKick->h3DSyncInfo != NULL) {
++ psSyncInfo =
++ (PVRSRV_KERNEL_SYNC_INFO *) psKick->h3DSyncInfo;
++
++ psSyncInfo->psSyncData->ui32WriteOpsPending--;
++ }
++ }
++
++#if defined(NO_HARDWARE)
++
++ for (i = 0; i < psKick->ui32NumSrcSync; i++) {
++ psSyncInfo =
++ (PVRSRV_KERNEL_SYNC_INFO *) psKick->ahSrcSyncInfo[i];
++ psSyncInfo->psSyncData->ui32ReadOpsComplete =
++ psSyncInfo->psSyncData->ui32ReadOpsPending;
++ }
++
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psKick->hDstSyncInfo;
++ psSyncInfo->psSyncData->ui32WriteOpsComplete =
++ psSyncInfo->psSyncData->ui32WriteOpsPending;
++
++ if (psKick->hTASyncInfo != NULL) {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psKick->hTASyncInfo;
++
++ psSyncInfo->psSyncData->ui32WriteOpsComplete =
++ psSyncInfo->psSyncData->ui32WriteOpsPending;
++ }
++
++ if (psKick->h3DSyncInfo != NULL) {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psKick->h3DSyncInfo;
++
++ psSyncInfo->psSyncData->ui32WriteOpsComplete =
++ psSyncInfo->psSyncData->ui32WriteOpsPending;
++ }
++#endif
++
++ return eError;
++}
++#endif
++#endif
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/devices/sgx/sgxutils.c
+@@ -0,0 +1,845 @@
++/**********************************************************************
++ *
++ * Copyright (c) 2009-2010 Intel Corporation.
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <stddef.h>
++
++#include "sgxdefs.h"
++#include "services_headers.h"
++#include "buffer_manager.h"
++#include "sgxapi_km.h"
++#include "sgxinfo.h"
++#include "sgx_mkif_km.h"
++#include "sysconfig.h"
++#include "pdump_km.h"
++#include "mmu.h"
++#include "pvr_bridge_km.h"
++#include "osfunc.h"
++#include "pvr_debug.h"
++#include "sgxutils.h"
++
++#include <linux/tty.h>
++
++#if defined(SYS_CUSTOM_POWERDOWN)
++PVRSRV_ERROR SysPowerDownMISR(PVRSRV_DEVICE_NODE * psDeviceNode,
++ u32 ui32CallerID);
++#endif
++
++void SGXPostActivePowerEvent(PVRSRV_DEVICE_NODE * psDeviceNode,
++ u32 ui32CallerID)
++{
++ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++ SGXMKIF_HOST_CTL *psSGXHostCtl = psDevInfo->psSGXHostCtl;
++
++ psSGXHostCtl->ui32NumActivePowerEvents++;
++
++ if ((psSGXHostCtl->
++ ui32PowerStatus &
++ PVRSRV_USSE_EDM_POWMAN_POWEROFF_RESTART_IMMEDIATE) != 0) {
++
++ if (ui32CallerID == ISR_ID) {
++ psDeviceNode->bReProcessDeviceCommandComplete = 1;
++ } else {
++ SGXScheduleProcessQueuesKM(psDeviceNode);
++ }
++ }
++}
++
++void SGXTestActivePowerEvent(PVRSRV_DEVICE_NODE * psDeviceNode,
++ u32 ui32CallerID)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++ SGXMKIF_HOST_CTL *psSGXHostCtl = psDevInfo->psSGXHostCtl;
++
++ if (((psSGXHostCtl->
++ ui32InterruptFlags & PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER) != 0)
++ &&
++ ((psSGXHostCtl->
++ ui32InterruptClearFlags & PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER)
++ == 0)) {
++
++ psSGXHostCtl->ui32InterruptClearFlags |=
++ PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER;
++
++ PDUMPSUSPEND();
++
++#if defined(SYS_CUSTOM_POWERDOWN)
++
++ eError = SysPowerDownMISR(psDeviceNode, ui32CallerID);
++#else
++ eError =
++ PVRSRVSetDevicePowerStateKM(psDeviceNode->sDevId.
++ ui32DeviceIndex,
++ PVRSRV_DEV_POWER_STATE_OFF,
++ ui32CallerID, 0);
++ if (eError == PVRSRV_OK) {
++ SGXPostActivePowerEvent(psDeviceNode, ui32CallerID);
++ }
++#endif
++ if (eError == PVRSRV_ERROR_RETRY) {
++
++ psSGXHostCtl->ui32InterruptClearFlags &=
++ ~PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER;
++ eError = PVRSRV_OK;
++ }
++
++ PDUMPRESUME();
++ }
++
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR, "SGXTestActivePowerEvent error:%lu",
++ eError));
++ }
++}
++
++static SGXMKIF_COMMAND *SGXAcquireKernelCCBSlot(PVRSRV_SGX_CCB_INFO * psCCB)
++{
++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) {
++ if (((*psCCB->pui32WriteOffset + 1) & 255) !=
++ *psCCB->pui32ReadOffset) {
++ return &psCCB->psCommands[*psCCB->pui32WriteOffset];
++ }
++
++ OSWaitus(MAX_HW_TIME_US / WAIT_TRY_COUNT);
++ }
++ END_LOOP_UNTIL_TIMEOUT();
++
++ return NULL;
++}
++
++PVRSRV_ERROR SGXScheduleCCBCommand(PVRSRV_SGXDEV_INFO * psDevInfo,
++ SGXMKIF_CMD_TYPE eCmdType,
++ SGXMKIF_COMMAND * psCommandData,
++ u32 ui32CallerID, u32 ui32PDumpFlags)
++{
++ PVRSRV_SGX_CCB_INFO *psKernelCCB;
++ PVRSRV_ERROR eError = PVRSRV_OK;
++ SGXMKIF_COMMAND *psSGXCommand;
++#if defined(PDUMP)
++ void *pvDumpCommand;
++ int bPDumpIsSuspended = PDumpIsSuspended();
++#endif
++
++ psKernelCCB = psDevInfo->psKernelCCBInfo;
++
++ psSGXCommand = SGXAcquireKernelCCBSlot(psKernelCCB);
++
++ if (!psSGXCommand) {
++ eError = PVRSRV_ERROR_TIMEOUT;
++ goto Exit;
++ }
++
++ psCommandData->ui32CacheControl = psDevInfo->ui32CacheControl;
++
++#if defined(PDUMP)
++
++ psDevInfo->sPDContext.ui32CacheControl |= psDevInfo->ui32CacheControl;
++#endif
++
++ psDevInfo->ui32CacheControl = 0;
++
++ *psSGXCommand = *psCommandData;
++
++ if (eCmdType >= SGXMKIF_CMD_MAX) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SGXScheduleCCBCommandKM: Unknown command type: %d",
++ eCmdType));
++ eError = PVRSRV_ERROR_GENERIC;
++ goto Exit;
++ }
++#if defined(SUPPORT_CPU_CACHED_BUFFERS)
++ {
++ SYS_DATA *psSysData;
++
++ SysAcquireData(&psSysData);
++
++ if (psSysData->bFlushAll) {
++ OSFlushCPUCacheKM();
++
++ psSysData->bFlushAll = 0;
++ }
++ }
++#endif
++
++ psSGXCommand->ui32ServiceAddress =
++ psDevInfo->aui32HostKickAddr[eCmdType];
++
++#if defined(PDUMP)
++ if ((ui32CallerID != ISR_ID) && (bPDumpIsSuspended == 0)) {
++
++ PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags,
++ "Poll for space in the Kernel CCB\r\n");
++ PDUMPMEMPOL(psKernelCCB->psCCBCtlMemInfo,
++ offsetof(PVRSRV_SGX_CCB_CTL, ui32ReadOffset),
++ (psKernelCCB->ui32CCBDumpWOff + 1) & 0xff, 0xff,
++ PDUMP_POLL_OPERATOR_NOTEQUAL, ui32PDumpFlags,
++ MAKEUNIQUETAG(psKernelCCB->psCCBCtlMemInfo));
++
++ PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Kernel CCB command\r\n");
++ pvDumpCommand =
++ (void *)((u8 *) psKernelCCB->psCCBMemInfo->pvLinAddrKM +
++ (*psKernelCCB->pui32WriteOffset *
++ sizeof(SGXMKIF_COMMAND)));
++
++ PDUMPMEM(pvDumpCommand,
++ psKernelCCB->psCCBMemInfo,
++ psKernelCCB->ui32CCBDumpWOff * sizeof(SGXMKIF_COMMAND),
++ sizeof(SGXMKIF_COMMAND),
++ ui32PDumpFlags,
++ MAKEUNIQUETAG(psKernelCCB->psCCBMemInfo));
++
++ PDUMPMEM(&psDevInfo->sPDContext.ui32CacheControl,
++ psKernelCCB->psCCBMemInfo,
++ psKernelCCB->ui32CCBDumpWOff *
++ sizeof(SGXMKIF_COMMAND) + offsetof(SGXMKIF_COMMAND,
++ ui32CacheControl),
++ sizeof(u32), ui32PDumpFlags,
++ MAKEUNIQUETAG(psKernelCCB->psCCBMemInfo));
++
++ if (PDumpIsCaptureFrameKM()
++ || ((ui32PDumpFlags & PDUMP_FLAGS_CONTINUOUS) != 0)) {
++
++ psDevInfo->sPDContext.ui32CacheControl = 0;
++ }
++ }
++#endif
++
++#if defined(FIX_HW_BRN_26620) && defined(SGX_FEATURE_SYSTEM_CACHE) && !defined(SGX_BYPASS_SYSTEM_CACHE)
++
++ eError = PollForValueKM(psKernelCCB->pui32ReadOffset,
++ *psKernelCCB->pui32WriteOffset,
++ 0xFF,
++ MAX_HW_TIME_US / WAIT_TRY_COUNT,
++ WAIT_TRY_COUNT);
++ if (eError != PVRSRV_OK) {
++ eError = PVRSRV_ERROR_TIMEOUT;
++ goto Exit;
++ }
++#endif
++
++ *psKernelCCB->pui32WriteOffset =
++ (*psKernelCCB->pui32WriteOffset + 1) & 255;
++
++#if defined(PDUMP)
++ if ((ui32CallerID != ISR_ID) && (bPDumpIsSuspended == 0)) {
++#if defined(FIX_HW_BRN_26620) && defined(SGX_FEATURE_SYSTEM_CACHE) && !defined(SGX_BYPASS_SYSTEM_CACHE)
++ PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags,
++ "Poll for previous Kernel CCB CMD to be read\r\n");
++ PDUMPMEMPOL(psKernelCCB->psCCBCtlMemInfo,
++ offsetof(PVRSRV_SGX_CCB_CTL, ui32ReadOffset),
++ (psKernelCCB->ui32CCBDumpWOff), 0xFF,
++ PDUMP_POLL_OPERATOR_EQUAL, ui32PDumpFlags,
++ MAKEUNIQUETAG(psKernelCCB->psCCBCtlMemInfo));
++#endif
++
++ if (PDumpIsCaptureFrameKM()
++ || ((ui32PDumpFlags & PDUMP_FLAGS_CONTINUOUS) != 0)) {
++ psKernelCCB->ui32CCBDumpWOff =
++ (psKernelCCB->ui32CCBDumpWOff + 1) & 0xFF;
++ psDevInfo->ui32KernelCCBEventKickerDumpVal =
++ (psDevInfo->ui32KernelCCBEventKickerDumpVal +
++ 1) & 0xFF;
++ }
++
++ PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags,
++ "Kernel CCB write offset\r\n");
++ PDUMPMEM(&psKernelCCB->ui32CCBDumpWOff,
++ psKernelCCB->psCCBCtlMemInfo,
++ offsetof(PVRSRV_SGX_CCB_CTL, ui32WriteOffset),
++ sizeof(u32), ui32PDumpFlags,
++ MAKEUNIQUETAG(psKernelCCB->psCCBCtlMemInfo));
++ PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags,
++ "Kernel CCB event kicker\r\n");
++ PDUMPMEM(&psDevInfo->ui32KernelCCBEventKickerDumpVal,
++ psDevInfo->psKernelCCBEventKickerMemInfo, 0,
++ sizeof(u32), ui32PDumpFlags,
++ MAKEUNIQUETAG(psDevInfo->
++ psKernelCCBEventKickerMemInfo));
++ PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags,
++ "Kick the SGX microkernel\r\n");
++#if defined(FIX_HW_BRN_26620) && defined(SGX_FEATURE_SYSTEM_CACHE) && !defined(SGX_BYPASS_SYSTEM_CACHE)
++ PDUMPREGWITHFLAGS(SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK2, 0),
++ EUR_CR_EVENT_KICK2_NOW_MASK, ui32PDumpFlags);
++#else
++ PDUMPREGWITHFLAGS(SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK, 0),
++ EUR_CR_EVENT_KICK_NOW_MASK, ui32PDumpFlags);
++#endif
++ }
++#endif
++
++ *psDevInfo->pui32KernelCCBEventKicker =
++ (*psDevInfo->pui32KernelCCBEventKicker + 1) & 0xFF;
++
++#if defined(FIX_HW_BRN_26620) && defined(SGX_FEATURE_SYSTEM_CACHE) && !defined(SGX_BYPASS_SYSTEM_CACHE)
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM,
++ SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK2, 0),
++ EUR_CR_EVENT_KICK2_NOW_MASK);
++#else
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM,
++ SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK, 0),
++ EUR_CR_EVENT_KICK_NOW_MASK);
++#endif
++
++#if defined(NO_HARDWARE)
++
++ *psKernelCCB->pui32ReadOffset =
++ (*psKernelCCB->pui32ReadOffset + 1) & 255;
++#endif
++
++Exit:
++ return eError;
++}
++
++PVRSRV_ERROR SGXScheduleCCBCommandKM(PVRSRV_DEVICE_NODE * psDeviceNode,
++ SGXMKIF_CMD_TYPE eCmdType,
++ SGXMKIF_COMMAND * psCommandData,
++ u32 ui32CallerID, u32 ui32PDumpFlags)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++
++ PDUMPSUSPEND();
++
++ eError =
++ PVRSRVSetDevicePowerStateKM(psDeviceNode->sDevId.ui32DeviceIndex,
++ PVRSRV_DEV_POWER_STATE_ON, ui32CallerID,
++ 1);
++
++ PDUMPRESUME();
++
++ if (eError == PVRSRV_OK) {
++ psDeviceNode->bReProcessDeviceCommandComplete = 0;
++ } else {
++ if (eError == PVRSRV_ERROR_RETRY) {
++ if (ui32CallerID == ISR_ID) {
++
++ psDeviceNode->bReProcessDeviceCommandComplete =
++ 1;
++ eError = PVRSRV_OK;
++ } else {
++
++ }
++ } else {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SGXScheduleCCBCommandKM failed to acquire lock - "
++ "ui32CallerID:%ld eError:%lu", ui32CallerID,
++ eError));
++ }
++
++ return eError;
++ }
++
++ eError =
++ SGXScheduleCCBCommand(psDevInfo, eCmdType, psCommandData,
++ ui32CallerID, ui32PDumpFlags);
++
++ PVRSRVPowerUnlock(ui32CallerID);
++
++ if (ui32CallerID != ISR_ID) {
++
++ SGXTestActivePowerEvent(psDeviceNode, ui32CallerID);
++ }
++
++ return eError;
++}
++
++PVRSRV_ERROR SGXScheduleProcessQueuesKM(PVRSRV_DEVICE_NODE * psDeviceNode)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++ SGXMKIF_HOST_CTL *psHostCtl =
++ psDevInfo->psKernelSGXHostCtlMemInfo->pvLinAddrKM;
++ u32 ui32PowerStatus;
++ SGXMKIF_COMMAND sCommand = { 0 };
++
++ ui32PowerStatus = psHostCtl->ui32PowerStatus;
++ if ((ui32PowerStatus & PVRSRV_USSE_EDM_POWMAN_NO_WORK) != 0) {
++
++ return PVRSRV_OK;
++ }
++
++ eError =
++ SGXScheduleCCBCommandKM(psDeviceNode, SGXMKIF_CMD_PROCESS_QUEUES,
++ &sCommand, ISR_ID, 0);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SGXScheduleProcessQueuesKM failed to schedule CCB command: %lu",
++ eError));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ return PVRSRV_OK;
++}
++
++int SGXIsDevicePowered(PVRSRV_DEVICE_NODE * psDeviceNode)
++{
++ return PVRSRVIsDevicePowered(psDeviceNode->sDevId.ui32DeviceIndex);
++}
++
++PVRSRV_ERROR SGXGetInternalDevInfoKM(void *hDevCookie,
++ SGX_INTERNAL_DEVINFO *
++ psSGXInternalDevInfo)
++{
++ PVRSRV_SGXDEV_INFO *psDevInfo =
++ (PVRSRV_SGXDEV_INFO *) ((PVRSRV_DEVICE_NODE *) hDevCookie)->
++ pvDevice;
++
++ psSGXInternalDevInfo->ui32Flags = psDevInfo->ui32Flags;
++ psSGXInternalDevInfo->bForcePTOff = (int)psDevInfo->bForcePTOff;
++
++ psSGXInternalDevInfo->hHostCtlKernelMemInfoHandle =
++ (void *)psDevInfo->psKernelSGXHostCtlMemInfo;
++
++ return PVRSRV_OK;
++}
++
++void SGXCleanupRequest(PVRSRV_DEVICE_NODE * psDeviceNode,
++ IMG_DEV_VIRTADDR * psHWDataDevVAddr, u32 ui32CleanupType)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_SGXDEV_INFO *psSGXDevInfo = psDeviceNode->pvDevice;
++ PVRSRV_KERNEL_MEM_INFO *psSGXHostCtlMemInfo =
++ psSGXDevInfo->psKernelSGXHostCtlMemInfo;
++ SGXMKIF_HOST_CTL *psSGXHostCtl = psSGXHostCtlMemInfo->pvLinAddrKM;
++
++ if ((psSGXHostCtl->ui32PowerStatus & PVRSRV_USSE_EDM_POWMAN_NO_WORK) !=
++ 0) {
++
++ } else {
++ SGXMKIF_COMMAND sCommand = { 0 };
++
++ PDUMPCOMMENTWITHFLAGS(0, "Request ukernel resouce clean-up");
++ sCommand.ui32Data[0] = ui32CleanupType;
++ sCommand.ui32Data[1] =
++ (psHWDataDevVAddr == NULL) ? 0 : psHWDataDevVAddr->uiAddr;
++
++ eError =
++ SGXScheduleCCBCommandKM(psDeviceNode, SGXMKIF_CMD_CLEANUP,
++ &sCommand, KERNEL_ID, 0);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SGXCleanupRequest: Failed to submit clean-up command"));
++ PVR_DBG_BREAK;
++ }
++
++#if !defined(NO_HARDWARE)
++ if (PollForValueKM(&psSGXHostCtl->ui32CleanupStatus,
++ PVRSRV_USSE_EDM_CLEANUPCMD_COMPLETE,
++ PVRSRV_USSE_EDM_CLEANUPCMD_COMPLETE,
++ MAX_HW_TIME_US / WAIT_TRY_COUNT,
++ WAIT_TRY_COUNT) != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SGXCleanupRequest: Wait for uKernel to clean up failed"));
++ PVR_DBG_BREAK;
++ }
++#endif
++
++#if defined(PDUMP)
++
++ PDUMPCOMMENTWITHFLAGS(0,
++ "Host Control - Poll for clean-up request to complete");
++ PDUMPMEMPOL(psSGXHostCtlMemInfo,
++ offsetof(SGXMKIF_HOST_CTL, ui32CleanupStatus),
++ PVRSRV_USSE_EDM_CLEANUPCMD_COMPLETE,
++ PVRSRV_USSE_EDM_CLEANUPCMD_COMPLETE,
++ PDUMP_POLL_OPERATOR_EQUAL, 0,
++ MAKEUNIQUETAG(psSGXHostCtlMemInfo));
++#endif
++
++ psSGXHostCtl->ui32CleanupStatus &=
++ ~(PVRSRV_USSE_EDM_CLEANUPCMD_COMPLETE);
++
++ PDUMPMEM(NULL, psSGXHostCtlMemInfo,
++ offsetof(SGXMKIF_HOST_CTL, ui32CleanupStatus),
++ sizeof(u32), 0, MAKEUNIQUETAG(psSGXHostCtlMemInfo));
++ }
++}
++
++typedef struct _SGX_HW_RENDER_CONTEXT_CLEANUP_ {
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ IMG_DEV_VIRTADDR sHWRenderContextDevVAddr;
++ void *hBlockAlloc;
++ PRESMAN_ITEM psResItem;
++} SGX_HW_RENDER_CONTEXT_CLEANUP;
++
++static PVRSRV_ERROR SGXCleanupHWRenderContextCallback(void *pvParam,
++ u32 ui32Param)
++{
++ SGX_HW_RENDER_CONTEXT_CLEANUP *psCleanup = pvParam;
++
++ SGXCleanupRequest(psCleanup->psDeviceNode,
++ &psCleanup->sHWRenderContextDevVAddr,
++ PVRSRV_CLEANUPCMD_RC);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(SGX_HW_RENDER_CONTEXT_CLEANUP),
++ psCleanup, psCleanup->hBlockAlloc);
++
++ return PVRSRV_OK;
++}
++
++typedef struct _SGX_HW_TRANSFER_CONTEXT_CLEANUP_ {
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ IMG_DEV_VIRTADDR sHWTransferContextDevVAddr;
++ void *hBlockAlloc;
++ PRESMAN_ITEM psResItem;
++} SGX_HW_TRANSFER_CONTEXT_CLEANUP;
++
++static PVRSRV_ERROR SGXCleanupHWTransferContextCallback(void *pvParam,
++ u32 ui32Param)
++{
++ SGX_HW_TRANSFER_CONTEXT_CLEANUP *psCleanup =
++ (SGX_HW_TRANSFER_CONTEXT_CLEANUP *) pvParam;
++
++ SGXCleanupRequest(psCleanup->psDeviceNode,
++ &psCleanup->sHWTransferContextDevVAddr,
++ PVRSRV_CLEANUPCMD_TC);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(SGX_HW_TRANSFER_CONTEXT_CLEANUP),
++ psCleanup, psCleanup->hBlockAlloc);
++
++ return PVRSRV_OK;
++}
++
++void *SGXRegisterHWRenderContextKM(void *psDeviceNode,
++ IMG_DEV_VIRTADDR * psHWRenderContextDevVAddr,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ PVRSRV_ERROR eError;
++ void *hBlockAlloc;
++ SGX_HW_RENDER_CONTEXT_CLEANUP *psCleanup;
++ PRESMAN_ITEM psResItem;
++
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(SGX_HW_RENDER_CONTEXT_CLEANUP),
++ (void **)&psCleanup,
++ &hBlockAlloc,
++ "SGX Hardware Render Context Cleanup");
++
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SGXRegisterHWRenderContextKM: Couldn't allocate memory for SGX_HW_RENDER_CONTEXT_CLEANUP structure"));
++ return NULL;
++ }
++
++ psCleanup->hBlockAlloc = hBlockAlloc;
++ psCleanup->psDeviceNode = psDeviceNode;
++ psCleanup->sHWRenderContextDevVAddr = *psHWRenderContextDevVAddr;
++
++ psResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_HW_RENDER_CONTEXT,
++ (void *)psCleanup,
++ 0, &SGXCleanupHWRenderContextCallback);
++
++ if (psResItem == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SGXRegisterHWRenderContextKM: ResManRegisterRes failed"));
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(SGX_HW_RENDER_CONTEXT_CLEANUP), psCleanup,
++ psCleanup->hBlockAlloc);
++
++ return NULL;
++ }
++
++ psCleanup->psResItem = psResItem;
++
++ return (void *)psCleanup;
++}
++
++PVRSRV_ERROR SGXUnregisterHWRenderContextKM(void *hHWRenderContext)
++{
++ PVRSRV_ERROR eError;
++ SGX_HW_RENDER_CONTEXT_CLEANUP *psCleanup;
++
++ PVR_ASSERT(hHWRenderContext != NULL);
++
++ psCleanup = (SGX_HW_RENDER_CONTEXT_CLEANUP *) hHWRenderContext;
++
++ if (psCleanup == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SGXUnregisterHWRenderContextKM: invalid parameter"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ eError = ResManFreeResByPtr(psCleanup->psResItem);
++
++ return eError;
++}
++
++void *SGXRegisterHWTransferContextKM(void *psDeviceNode,
++ IMG_DEV_VIRTADDR *
++ psHWTransferContextDevVAddr,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ PVRSRV_ERROR eError;
++ void *hBlockAlloc;
++ SGX_HW_TRANSFER_CONTEXT_CLEANUP *psCleanup;
++ PRESMAN_ITEM psResItem;
++
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(SGX_HW_TRANSFER_CONTEXT_CLEANUP),
++ (void **)&psCleanup,
++ &hBlockAlloc,
++ "SGX Hardware Transfer Context Cleanup");
++
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SGXRegisterHWTransferContextKM: Couldn't allocate memory for SGX_HW_TRANSFER_CONTEXT_CLEANUP structure"));
++ return NULL;
++ }
++
++ psCleanup->hBlockAlloc = hBlockAlloc;
++ psCleanup->psDeviceNode = psDeviceNode;
++ psCleanup->sHWTransferContextDevVAddr = *psHWTransferContextDevVAddr;
++
++ psResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_HW_TRANSFER_CONTEXT,
++ psCleanup,
++ 0, &SGXCleanupHWTransferContextCallback);
++
++ if (psResItem == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SGXRegisterHWTransferContextKM: ResManRegisterRes failed"));
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(SGX_HW_TRANSFER_CONTEXT_CLEANUP), psCleanup,
++ psCleanup->hBlockAlloc);
++
++ return NULL;
++ }
++
++ psCleanup->psResItem = psResItem;
++
++ return (void *)psCleanup;
++}
++
++PVRSRV_ERROR SGXUnregisterHWTransferContextKM(void *hHWTransferContext)
++{
++ PVRSRV_ERROR eError;
++ SGX_HW_TRANSFER_CONTEXT_CLEANUP *psCleanup;
++
++ PVR_ASSERT(hHWTransferContext != NULL);
++
++ psCleanup = (SGX_HW_TRANSFER_CONTEXT_CLEANUP *) hHWTransferContext;
++
++ if (psCleanup == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SGXUnregisterHWTransferContextKM: invalid parameter"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ eError = ResManFreeResByPtr(psCleanup->psResItem);
++
++ return eError;
++}
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++typedef struct _SGX_HW_2D_CONTEXT_CLEANUP_ {
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ IMG_DEV_VIRTADDR sHW2DContextDevVAddr;
++ void *hBlockAlloc;
++ PRESMAN_ITEM psResItem;
++} SGX_HW_2D_CONTEXT_CLEANUP;
++
++static PVRSRV_ERROR SGXCleanupHW2DContextCallback(void *pvParam, u32 ui32Param)
++{
++ SGX_HW_2D_CONTEXT_CLEANUP *psCleanup =
++ (SGX_HW_2D_CONTEXT_CLEANUP *) pvParam;
++
++ SGXCleanupRequest(psCleanup->psDeviceNode,
++ &psCleanup->sHW2DContextDevVAddr,
++ PVRSRV_CLEANUPCMD_2DC);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(SGX_HW_2D_CONTEXT_CLEANUP),
++ psCleanup, psCleanup->hBlockAlloc);
++
++ return PVRSRV_OK;
++}
++
++void *SGXRegisterHW2DContextKM(void *psDeviceNode,
++ IMG_DEV_VIRTADDR * psHW2DContextDevVAddr,
++ PVRSRV_PER_PROCESS_DATA * psPerProc)
++{
++ PVRSRV_ERROR eError;
++ void *hBlockAlloc;
++ SGX_HW_2D_CONTEXT_CLEANUP *psCleanup;
++ PRESMAN_ITEM psResItem;
++
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(SGX_HW_2D_CONTEXT_CLEANUP),
++ (void **)&psCleanup,
++ &hBlockAlloc, "SGX Hardware 2D Context Cleanup");
++
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SGXRegisterHW2DContextKM: Couldn't allocate memory for SGX_HW_2D_CONTEXT_CLEANUP structure"));
++ return NULL;
++ }
++
++ psCleanup->hBlockAlloc = hBlockAlloc;
++ psCleanup->psDeviceNode = psDeviceNode;
++ psCleanup->sHW2DContextDevVAddr = *psHW2DContextDevVAddr;
++
++ psResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_HW_2D_CONTEXT,
++ psCleanup,
++ 0, &SGXCleanupHW2DContextCallback);
++
++ if (psResItem == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SGXRegisterHW2DContextKM: ResManRegisterRes failed"));
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(SGX_HW_2D_CONTEXT_CLEANUP), psCleanup,
++ psCleanup->hBlockAlloc);
++
++ return NULL;
++ }
++
++ psCleanup->psResItem = psResItem;
++
++ return (void *)psCleanup;
++}
++
++PVRSRV_ERROR SGXUnregisterHW2DContextKM(void *hHW2DContext)
++{
++ PVRSRV_ERROR eError;
++ SGX_HW_2D_CONTEXT_CLEANUP *psCleanup;
++
++ PVR_ASSERT(hHW2DContext != NULL);
++
++ if (hHW2DContext == NULL) {
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++
++ psCleanup = (SGX_HW_2D_CONTEXT_CLEANUP *) hHW2DContext;
++
++ eError = ResManFreeResByPtr(psCleanup->psResItem);
++
++ return eError;
++}
++#endif
++
++static
++int SGX2DQuerySyncOpsComplete(PVRSRV_KERNEL_SYNC_INFO * psSyncInfo,
++ u32 ui32ReadOpsPending, u32 ui32WriteOpsPending)
++{
++ PVRSRV_SYNC_DATA *psSyncData = psSyncInfo->psSyncData;
++
++ return (int)((psSyncData->ui32ReadOpsComplete >= ui32ReadOpsPending) &&
++ (psSyncData->ui32WriteOpsComplete >= ui32WriteOpsPending)
++ );
++}
++
++PVRSRV_ERROR SGX2DQueryBlitsCompleteKM(PVRSRV_SGXDEV_INFO * psDevInfo,
++ PVRSRV_KERNEL_SYNC_INFO * psSyncInfo,
++ int bWaitForComplete)
++{
++ u32 ui32ReadOpsPending, ui32WriteOpsPending;
++
++ PVR_DPF((PVR_DBG_CALLTRACE, "SGX2DQueryBlitsCompleteKM: Start"));
++
++ ui32ReadOpsPending = psSyncInfo->psSyncData->ui32ReadOpsPending;
++ ui32WriteOpsPending = psSyncInfo->psSyncData->ui32WriteOpsPending;
++
++ if (SGX2DQuerySyncOpsComplete
++ (psSyncInfo, ui32ReadOpsPending, ui32WriteOpsPending)) {
++
++ PVR_DPF((PVR_DBG_CALLTRACE,
++ "SGX2DQueryBlitsCompleteKM: No wait. Blits complete."));
++ return PVRSRV_OK;
++ }
++
++ if (!bWaitForComplete) {
++
++ PVR_DPF((PVR_DBG_CALLTRACE,
++ "SGX2DQueryBlitsCompleteKM: No wait. Ops pending."));
++ return PVRSRV_ERROR_CMD_NOT_PROCESSED;
++ }
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "SGX2DQueryBlitsCompleteKM: Ops pending. Start polling."));
++
++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) {
++ OSWaitus(MAX_HW_TIME_US / WAIT_TRY_COUNT);
++
++ if (SGX2DQuerySyncOpsComplete
++ (psSyncInfo, ui32ReadOpsPending, ui32WriteOpsPending)) {
++
++ PVR_DPF((PVR_DBG_CALLTRACE,
++ "SGX2DQueryBlitsCompleteKM: Wait over. Blits complete."));
++ return PVRSRV_OK;
++ }
++
++ OSWaitus(MAX_HW_TIME_US / WAIT_TRY_COUNT);
++ }
++ END_LOOP_UNTIL_TIMEOUT();
++
++ PVR_DPF((PVR_DBG_ERROR,
++ "SGX2DQueryBlitsCompleteKM: Timed out. Ops pending."));
++
++#if defined(DEBUG)
++ {
++ PVRSRV_SYNC_DATA *psSyncData = psSyncInfo->psSyncData;
++
++ PVR_TRACE(("SGX2DQueryBlitsCompleteKM: Syncinfo: %p, Syncdata: %p", psSyncInfo, psSyncData));
++
++ PVR_TRACE(("SGX2DQueryBlitsCompleteKM: Read ops complete: %d, Read ops pending: %d", psSyncData->ui32ReadOpsComplete, psSyncData->ui32ReadOpsPending));
++ PVR_TRACE(("SGX2DQueryBlitsCompleteKM: Write ops complete: %d, Write ops pending: %d", psSyncData->ui32WriteOpsComplete, psSyncData->ui32WriteOpsPending));
++
++ }
++#endif
++
++ return PVRSRV_ERROR_TIMEOUT;
++}
++
++void SGXFlushHWRenderTargetKM(void *psDeviceNode,
++ IMG_DEV_VIRTADDR sHWRTDataSetDevVAddr)
++{
++ PVR_ASSERT(sHWRTDataSetDevVAddr.uiAddr != NULL);
++
++ SGXCleanupRequest(psDeviceNode,
++ &sHWRTDataSetDevVAddr, PVRSRV_CLEANUPCMD_RT);
++}
++
++u32 SGXConvertTimeStamp(PVRSRV_SGXDEV_INFO * psDevInfo,
++ u32 ui32TimeWraps, u32 ui32Time)
++{
++#if defined(EUR_CR_TIMER)
++ return ui32Time;
++#else
++ u64 ui64Clocks;
++ u32 ui32Clocksx16;
++
++ ui64Clocks = ((u64) ui32TimeWraps * psDevInfo->ui32uKernelTimerClock) +
++ (psDevInfo->ui32uKernelTimerClock -
++ (ui32Time & EUR_CR_EVENT_TIMER_VALUE_MASK));
++ ui32Clocksx16 = (u32) (ui64Clocks / 16);
++
++ return ui32Clocksx16;
++#endif
++}
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/devices/sgx/sgxutils.h
+@@ -0,0 +1,107 @@
++/**********************************************************************
++ *
++ * Copyright (c) 2009-2010 Intel Corporation.
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++/* FIXME MLD */
++/*
++#ifdef INTEL_D3_FLUSH
++#include "pal.h"
++#endif
++*/
++
++#include "perproc.h"
++#include "sgxinfokm.h"
++
++#define CCB_OFFSET_IS_VALID(type, psCCBMemInfo, psCCBKick, offset) \
++ ((sizeof(type) <= (psCCBMemInfo)->ui32AllocSize) && \
++ ((psCCBKick)->offset <= (psCCBMemInfo)->ui32AllocSize - sizeof(type)))
++
++#define CCB_DATA_FROM_OFFSET(type, psCCBMemInfo, psCCBKick, offset) \
++ ((type *)(((char *)(psCCBMemInfo)->pvLinAddrKM) + \
++ (psCCBKick)->offset))
++
++
++
++void SGXTestActivePowerEvent(PVRSRV_DEVICE_NODE *psDeviceNode,
++ u32 ui32CallerID);
++
++
++PVRSRV_ERROR SGXScheduleCCBCommand(PVRSRV_SGXDEV_INFO *psDevInfo,
++ SGXMKIF_CMD_TYPE eCommandType,
++ SGXMKIF_COMMAND *psCommandData,
++ u32 ui32CallerID,
++ u32 ui32PDumpFlags);
++
++PVRSRV_ERROR SGXScheduleCCBCommandKM(PVRSRV_DEVICE_NODE *psDeviceNode,
++ SGXMKIF_CMD_TYPE eCommandType,
++ SGXMKIF_COMMAND *psCommandData,
++ u32 ui32CallerID,
++ u32 ui32PDumpFlags);
++
++
++PVRSRV_ERROR SGXScheduleProcessQueuesKM(PVRSRV_DEVICE_NODE *psDeviceNode);
++
++
++int SGXIsDevicePowered(PVRSRV_DEVICE_NODE *psDeviceNode);
++
++
++void * SGXRegisterHWRenderContextKM(void * psDeviceNode,
++ IMG_DEV_VIRTADDR *psHWRenderContextDevVAddr,
++ PVRSRV_PER_PROCESS_DATA *psPerProc);
++
++
++void * SGXRegisterHWTransferContextKM(void * psDeviceNode,
++ IMG_DEV_VIRTADDR *psHWTransferContextDevVAddr,
++ PVRSRV_PER_PROCESS_DATA *psPerProc);
++
++
++void SGXFlushHWRenderTargetKM(void * psSGXDevInfo, IMG_DEV_VIRTADDR psHWRTDataSetDevVAddr);
++
++
++PVRSRV_ERROR SGXUnregisterHWRenderContextKM(void * hHWRenderContext);
++
++
++PVRSRV_ERROR SGXUnregisterHWTransferContextKM(void * hHWTransferContext);
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++
++void * SGXRegisterHW2DContextKM(void * psDeviceNode,
++ IMG_DEV_VIRTADDR *psHW2DContextDevVAddr,
++ PVRSRV_PER_PROCESS_DATA *psPerProc);
++
++
++PVRSRV_ERROR SGXUnregisterHW2DContextKM(void * hHW2DContext);
++#endif
++
++u32 SGXConvertTimeStamp(PVRSRV_SGXDEV_INFO *psDevInfo,
++ u32 ui32TimeWraps,
++ u32 ui32Time);
++
++void SGXCleanupRequest(PVRSRV_DEVICE_NODE *psDeviceNode,
++ IMG_DEV_VIRTADDR *psHWDataDevVAddr,
++ u32 ui32CleanupType);
++
++
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/env_data.h
+@@ -0,0 +1,66 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _ENV_DATA_
++#define _ENV_DATA_
++
++#include <linux/interrupt.h>
++#include <linux/pci.h>
++
++#if defined(PVR_LINUX_MISR_USING_WORKQUEUE) || defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE)
++#include <linux/workqueue.h>
++#endif
++
++#define PVRSRV_MAX_BRIDGE_IN_SIZE 0x1000
++#define PVRSRV_MAX_BRIDGE_OUT_SIZE 0x1000
++
++typedef struct _PVR_PCI_DEV_TAG
++{
++ struct pci_dev *psPCIDev;
++ HOST_PCI_INIT_FLAGS ePCIFlags;
++ int abPCIResourceInUse[DEVICE_COUNT_RESOURCE];
++} PVR_PCI_DEV;
++
++typedef struct _ENV_DATA_TAG
++{
++ void *pvBridgeData;
++ struct pm_dev *psPowerDevice;
++ int bLISRInstalled;
++ int bMISRInstalled;
++ u32 ui32IRQ;
++ void *pvISRCookie;
++#if defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE)
++ struct workqueue_struct *psWorkQueue;
++#endif
++#if defined(PVR_LINUX_MISR_USING_WORKQUEUE) || defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE)
++ struct work_struct sMISRWork;
++ void *pvMISRData;
++#else
++ struct tasklet_struct sMISRTasklet;
++#endif
++} ENV_DATA;
++
++#endif
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/env_perproc.h
+@@ -0,0 +1,56 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __ENV_PERPROC_H__
++#define __ENV_PERPROC_H__
++
++#include <linux/list.h>
++#include <linux/proc_fs.h>
++
++#include "services.h"
++#include "handle.h"
++
++typedef struct _PVRSRV_ENV_PER_PROCESS_DATA_
++{
++ void * hBlockAlloc;
++ struct proc_dir_entry *psProcDir;
++#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT)
++ struct list_head sDRMAuthListHead;
++#endif
++} PVRSRV_ENV_PER_PROCESS_DATA;
++
++void RemovePerProcessProcDir(PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc);
++
++PVRSRV_ERROR LinuxMMapPerProcessConnect(PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc);
++
++void LinuxMMapPerProcessDisconnect(PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc);
++
++PVRSRV_ERROR LinuxMMapPerProcessHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase);
++
++void * LinuxTerminatingProcessPrivateData(void);
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/event.c
+@@ -0,0 +1,278 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef AUTOCONF_INCLUDED
++#include <linux/config.h>
++#endif
++
++#include <linux/version.h>
++#include <asm/io.h>
++#include <asm/page.h>
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22))
++#include <asm/system.h>
++#endif
++#include <linux/mm.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/delay.h>
++#include <linux/pci.h>
++
++#include <linux/string.h>
++#include <linux/sched.h>
++#include <linux/interrupt.h>
++#include <asm/hardirq.h>
++#include <linux/timer.h>
++#include <linux/capability.h>
++#include <linux/sched.h>
++#include <asm/uaccess.h>
++
++#include "img_types.h"
++#include "services_headers.h"
++#include "mm.h"
++#include "pvrmmap.h"
++#include "mmap.h"
++#include "env_data.h"
++#include "proc.h"
++#include "lock.h"
++
++typedef struct PVRSRV_LINUX_EVENT_OBJECT_LIST_TAG {
++ rwlock_t sLock;
++ struct list_head sList;
++
++} PVRSRV_LINUX_EVENT_OBJECT_LIST;
++
++typedef struct PVRSRV_LINUX_EVENT_OBJECT_TAG {
++ atomic_t sTimeStamp;
++ u32 ui32TimeStampPrevious;
++#if defined(DEBUG)
++ u32 ui32Stats;
++#endif
++ wait_queue_head_t sWait;
++ struct list_head sList;
++ void *hResItem;
++ PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList;
++} PVRSRV_LINUX_EVENT_OBJECT;
++
++PVRSRV_ERROR LinuxEventObjectListCreate(void **phEventObjectList)
++{
++ PVRSRV_LINUX_EVENT_OBJECT_LIST *psEvenObjectList;
++
++ if (OSAllocMem
++ (PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_LINUX_EVENT_OBJECT_LIST), (void **)&psEvenObjectList,
++ NULL, "Linux Event Object List") != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "LinuxEventObjectCreate: failed to allocate memory for event list"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ INIT_LIST_HEAD(&psEvenObjectList->sList);
++
++ rwlock_init(&psEvenObjectList->sLock);
++
++ *phEventObjectList = (void **)psEvenObjectList;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR LinuxEventObjectListDestroy(void *hEventObjectList)
++{
++
++ PVRSRV_LINUX_EVENT_OBJECT_LIST *psEvenObjectList =
++ (PVRSRV_LINUX_EVENT_OBJECT_LIST *) hEventObjectList;
++
++ if (psEvenObjectList) {
++ if (!list_empty(&psEvenObjectList->sList)) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "LinuxEventObjectListDestroy: Event List is not empty"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_LINUX_EVENT_OBJECT_LIST),
++ psEvenObjectList, NULL);
++
++ }
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR LinuxEventObjectDelete(void *hOSEventObjectList,
++ void *hOSEventObject)
++{
++ if (hOSEventObjectList) {
++ if (hOSEventObject) {
++ PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject =
++ (PVRSRV_LINUX_EVENT_OBJECT *) hOSEventObject;
++#if defined(DEBUG)
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "LinuxEventObjectListDelete: Event object waits: %lu",
++ psLinuxEventObject->ui32Stats));
++#endif
++ if (ResManFreeResByPtr(psLinuxEventObject->hResItem) !=
++ PVRSRV_OK) {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ return PVRSRV_OK;
++ }
++ }
++ return PVRSRV_ERROR_GENERIC;
++
++}
++
++static PVRSRV_ERROR LinuxEventObjectDeleteCallback(void *pvParam, u32 ui32Param)
++{
++ PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = pvParam;
++ PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList =
++ psLinuxEventObject->psLinuxEventObjectList;
++
++ write_lock_bh(&psLinuxEventObjectList->sLock);
++ list_del(&psLinuxEventObject->sList);
++ write_unlock_bh(&psLinuxEventObjectList->sLock);
++
++#if defined(DEBUG)
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "LinuxEventObjectDeleteCallback: Event object waits: %lu",
++ psLinuxEventObject->ui32Stats));
++#endif
++
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_LINUX_EVENT_OBJECT), psLinuxEventObject, NULL);
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR LinuxEventObjectAdd(void *hOSEventObjectList,
++ void **phOSEventObject)
++{
++ PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject;
++ PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList =
++ (PVRSRV_LINUX_EVENT_OBJECT_LIST *) hOSEventObjectList;
++ u32 ui32PID = OSGetCurrentProcessIDKM();
++ PVRSRV_PER_PROCESS_DATA *psPerProc;
++
++ psPerProc = PVRSRVPerProcessData(ui32PID);
++ if (psPerProc == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "LinuxEventObjectAdd: Couldn't find per-process data"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ if (OSAllocMem
++ (PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(PVRSRV_LINUX_EVENT_OBJECT),
++ (void **)&psLinuxEventObject, NULL,
++ "Linux Event Object") != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "LinuxEventObjectAdd: failed to allocate memory "));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ INIT_LIST_HEAD(&psLinuxEventObject->sList);
++
++ atomic_set(&psLinuxEventObject->sTimeStamp, 0);
++ psLinuxEventObject->ui32TimeStampPrevious = 0;
++
++#if defined(DEBUG)
++ psLinuxEventObject->ui32Stats = 0;
++#endif
++ init_waitqueue_head(&psLinuxEventObject->sWait);
++
++ psLinuxEventObject->psLinuxEventObjectList = psLinuxEventObjectList;
++
++ psLinuxEventObject->hResItem =
++ ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_EVENT_OBJECT, psLinuxEventObject, 0,
++ &LinuxEventObjectDeleteCallback);
++
++ write_lock_bh(&psLinuxEventObjectList->sLock);
++ list_add(&psLinuxEventObject->sList, &psLinuxEventObjectList->sList);
++ write_unlock_bh(&psLinuxEventObjectList->sLock);
++
++ *phOSEventObject = psLinuxEventObject;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR LinuxEventObjectSignal(void *hOSEventObjectList)
++{
++ PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject;
++ PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList =
++ (PVRSRV_LINUX_EVENT_OBJECT_LIST *) hOSEventObjectList;
++ struct list_head *psListEntry, *psListEntryTemp, *psList;
++ psList = &psLinuxEventObjectList->sList;
++
++ list_for_each_safe(psListEntry, psListEntryTemp, psList) {
++
++ psLinuxEventObject =
++ (PVRSRV_LINUX_EVENT_OBJECT *) list_entry(psListEntry,
++ PVRSRV_LINUX_EVENT_OBJECT,
++ sList);
++
++ atomic_inc(&psLinuxEventObject->sTimeStamp);
++ wake_up_interruptible(&psLinuxEventObject->sWait);
++ }
++
++ return PVRSRV_OK;
++
++}
++
++PVRSRV_ERROR LinuxEventObjectWait(void *hOSEventObject, u32 ui32MSTimeout)
++{
++ u32 ui32TimeStamp;
++ DEFINE_WAIT(sWait);
++
++ PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject =
++ (PVRSRV_LINUX_EVENT_OBJECT *) hOSEventObject;
++
++ u32 ui32TimeOutJiffies = msecs_to_jiffies(ui32MSTimeout);
++
++ do {
++ prepare_to_wait(&psLinuxEventObject->sWait, &sWait,
++ TASK_INTERRUPTIBLE);
++ ui32TimeStamp = atomic_read(&psLinuxEventObject->sTimeStamp);
++
++ if (psLinuxEventObject->ui32TimeStampPrevious != ui32TimeStamp) {
++ break;
++ }
++
++ mutex_unlock(&gPVRSRVLock);
++
++ ui32TimeOutJiffies =
++ (u32) schedule_timeout((s32) ui32TimeOutJiffies);
++
++ mutex_lock(&gPVRSRVLock);
++#if defined(DEBUG)
++ psLinuxEventObject->ui32Stats++;
++#endif
++
++ } while (ui32TimeOutJiffies);
++
++ finish_wait(&psLinuxEventObject->sWait, &sWait);
++
++ psLinuxEventObject->ui32TimeStampPrevious = ui32TimeStamp;
++
++ return ui32TimeOutJiffies ? PVRSRV_OK : PVRSRV_ERROR_TIMEOUT;
++
++}
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/event.h
+@@ -0,0 +1,32 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++PVRSRV_ERROR LinuxEventObjectListCreate(void * *phEventObjectList);
++PVRSRV_ERROR LinuxEventObjectListDestroy(void * hEventObjectList);
++PVRSRV_ERROR LinuxEventObjectAdd(void * hOSEventObjectList, void * *phOSEventObject);
++PVRSRV_ERROR LinuxEventObjectDelete(void * hOSEventObjectList, void * hOSEventObject);
++PVRSRV_ERROR LinuxEventObjectSignal(void * hOSEventObjectList);
++PVRSRV_ERROR LinuxEventObjectWait(void * hOSEventObject, u32 ui32MSTimeout);
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/hwdefs/sgx535defs.h
+@@ -0,0 +1,637 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _SGX535DEFS_KM_H_
++#define _SGX535DEFS_KM_H_
++
++#define EUR_CR_CLKGATECTL 0x0000
++#define EUR_CR_CLKGATECTL_2D_CLKG_MASK 0x00000003UL
++#define EUR_CR_CLKGATECTL_2D_CLKG_SHIFT 0
++#define EUR_CR_CLKGATECTL_ISP_CLKG_MASK 0x00000030UL
++#define EUR_CR_CLKGATECTL_ISP_CLKG_SHIFT 4
++#define EUR_CR_CLKGATECTL_TSP_CLKG_MASK 0x00000300UL
++#define EUR_CR_CLKGATECTL_TSP_CLKG_SHIFT 8
++#define EUR_CR_CLKGATECTL_TA_CLKG_MASK 0x00003000UL
++#define EUR_CR_CLKGATECTL_TA_CLKG_SHIFT 12
++#define EUR_CR_CLKGATECTL_DPM_CLKG_MASK 0x00030000UL
++#define EUR_CR_CLKGATECTL_DPM_CLKG_SHIFT 16
++#define EUR_CR_CLKGATECTL_USE_CLKG_MASK 0x00300000UL
++#define EUR_CR_CLKGATECTL_USE_CLKG_SHIFT 20
++#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_MASK 0x01000000UL
++#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_SHIFT 24
++#define EUR_CR_CLKGATESTATUS 0x0004
++#define EUR_CR_CLKGATESTATUS_2D_CLKS_MASK 0x00000001UL
++#define EUR_CR_CLKGATESTATUS_2D_CLKS_SHIFT 0
++#define EUR_CR_CLKGATESTATUS_ISP_CLKS_MASK 0x00000010UL
++#define EUR_CR_CLKGATESTATUS_ISP_CLKS_SHIFT 4
++#define EUR_CR_CLKGATESTATUS_TSP_CLKS_MASK 0x00000100UL
++#define EUR_CR_CLKGATESTATUS_TSP_CLKS_SHIFT 8
++#define EUR_CR_CLKGATESTATUS_TA_CLKS_MASK 0x00001000UL
++#define EUR_CR_CLKGATESTATUS_TA_CLKS_SHIFT 12
++#define EUR_CR_CLKGATESTATUS_DPM_CLKS_MASK 0x00010000UL
++#define EUR_CR_CLKGATESTATUS_DPM_CLKS_SHIFT 16
++#define EUR_CR_CLKGATESTATUS_USE_CLKS_MASK 0x00100000UL
++#define EUR_CR_CLKGATESTATUS_USE_CLKS_SHIFT 20
++#define EUR_CR_CLKGATECTLOVR 0x0008
++#define EUR_CR_CLKGATECTLOVR_2D_CLKO_MASK 0x00000003UL
++#define EUR_CR_CLKGATECTLOVR_2D_CLKO_SHIFT 0
++#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_MASK 0x00000030UL
++#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_SHIFT 4
++#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_MASK 0x00000300UL
++#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_SHIFT 8
++#define EUR_CR_CLKGATECTLOVR_TA_CLKO_MASK 0x00003000UL
++#define EUR_CR_CLKGATECTLOVR_TA_CLKO_SHIFT 12
++#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_MASK 0x00030000UL
++#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_SHIFT 16
++#define EUR_CR_CLKGATECTLOVR_USE_CLKO_MASK 0x00300000UL
++#define EUR_CR_CLKGATECTLOVR_USE_CLKO_SHIFT 20
++#define EUR_CR_CORE_ID 0x0010
++#define EUR_CR_CORE_ID_CONFIG_MASK 0x0000FFFFUL
++#define EUR_CR_CORE_ID_CONFIG_SHIFT 0
++#define EUR_CR_CORE_ID_ID_MASK 0xFFFF0000UL
++#define EUR_CR_CORE_ID_ID_SHIFT 16
++#define EUR_CR_CORE_REVISION 0x0014
++#define EUR_CR_CORE_REVISION_MAINTENANCE_MASK 0x000000FFUL
++#define EUR_CR_CORE_REVISION_MAINTENANCE_SHIFT 0
++#define EUR_CR_CORE_REVISION_MINOR_MASK 0x0000FF00UL
++#define EUR_CR_CORE_REVISION_MINOR_SHIFT 8
++#define EUR_CR_CORE_REVISION_MAJOR_MASK 0x00FF0000UL
++#define EUR_CR_CORE_REVISION_MAJOR_SHIFT 16
++#define EUR_CR_CORE_REVISION_DESIGNER_MASK 0xFF000000UL
++#define EUR_CR_CORE_REVISION_DESIGNER_SHIFT 24
++#define EUR_CR_DESIGNER_REV_FIELD1 0x0018
++#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_MASK 0xFFFFFFFFUL
++#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_SHIFT 0
++#define EUR_CR_DESIGNER_REV_FIELD2 0x001C
++#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_MASK 0xFFFFFFFFUL
++#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_SHIFT 0
++#define EUR_CR_SOFT_RESET 0x0080
++#define EUR_CR_SOFT_RESET_BIF_RESET_MASK 0x00000001UL
++#define EUR_CR_SOFT_RESET_BIF_RESET_SHIFT 0
++#define EUR_CR_SOFT_RESET_TWOD_RESET_MASK 0x00000002UL
++#define EUR_CR_SOFT_RESET_TWOD_RESET_SHIFT 1
++#define EUR_CR_SOFT_RESET_DPM_RESET_MASK 0x00000004UL
++#define EUR_CR_SOFT_RESET_DPM_RESET_SHIFT 2
++#define EUR_CR_SOFT_RESET_TA_RESET_MASK 0x00000008UL
++#define EUR_CR_SOFT_RESET_TA_RESET_SHIFT 3
++#define EUR_CR_SOFT_RESET_USE_RESET_MASK 0x00000010UL
++#define EUR_CR_SOFT_RESET_USE_RESET_SHIFT 4
++#define EUR_CR_SOFT_RESET_ISP_RESET_MASK 0x00000020UL
++#define EUR_CR_SOFT_RESET_ISP_RESET_SHIFT 5
++#define EUR_CR_SOFT_RESET_TSP_RESET_MASK 0x00000040UL
++#define EUR_CR_SOFT_RESET_TSP_RESET_SHIFT 6
++#define EUR_CR_EVENT_HOST_ENABLE2 0x0110
++#define EUR_CR_EVENT_HOST_ENABLE2_BIF_REQUESTER_FAULT_MASK 0x00000010UL
++#define EUR_CR_EVENT_HOST_ENABLE2_BIF_REQUESTER_FAULT_SHIFT 4
++#define EUR_CR_EVENT_HOST_ENABLE2_DPM_DHOST_FREE_LOAD_MASK 0x00000008UL
++#define EUR_CR_EVENT_HOST_ENABLE2_DPM_DHOST_FREE_LOAD_SHIFT 3
++#define EUR_CR_EVENT_HOST_ENABLE2_DPM_HOST_FREE_LOAD_MASK 0x00000004UL
++#define EUR_CR_EVENT_HOST_ENABLE2_DPM_HOST_FREE_LOAD_SHIFT 2
++#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_MASK 0x00000002UL
++#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_SHIFT 1
++#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_MASK 0x00000001UL
++#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_SHIFT 0
++#define EUR_CR_EVENT_HOST_CLEAR2 0x0114
++#define EUR_CR_EVENT_HOST_CLEAR2_BIF_REQUESTER_FAULT_MASK 0x00000010UL
++#define EUR_CR_EVENT_HOST_CLEAR2_BIF_REQUESTER_FAULT_SHIFT 4
++#define EUR_CR_EVENT_HOST_CLEAR2_DPM_DHOST_FREE_LOAD_MASK 0x00000008UL
++#define EUR_CR_EVENT_HOST_CLEAR2_DPM_DHOST_FREE_LOAD_SHIFT 3
++#define EUR_CR_EVENT_HOST_CLEAR2_DPM_HOST_FREE_LOAD_MASK 0x00000004UL
++#define EUR_CR_EVENT_HOST_CLEAR2_DPM_HOST_FREE_LOAD_SHIFT 2
++#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_MASK 0x00000002UL
++#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_SHIFT 1
++#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_MASK 0x00000001UL
++#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_SHIFT 0
++#define EUR_CR_EVENT_STATUS2 0x0118
++#define EUR_CR_EVENT_STATUS2_BIF_REQUESTER_FAULT_MASK 0x00000010UL
++#define EUR_CR_EVENT_STATUS2_BIF_REQUESTER_FAULT_SHIFT 4
++#define EUR_CR_EVENT_STATUS2_DPM_DHOST_FREE_LOAD_MASK 0x00000008UL
++#define EUR_CR_EVENT_STATUS2_DPM_DHOST_FREE_LOAD_SHIFT 3
++#define EUR_CR_EVENT_STATUS2_DPM_HOST_FREE_LOAD_MASK 0x00000004UL
++#define EUR_CR_EVENT_STATUS2_DPM_HOST_FREE_LOAD_SHIFT 2
++#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_MASK 0x00000002UL
++#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_SHIFT 1
++#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_MASK 0x00000001UL
++#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_SHIFT 0
++#define EUR_CR_EVENT_STATUS 0x012CUL
++#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_MASK 0x80000000UL
++#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_SHIFT 31
++#define EUR_CR_EVENT_STATUS_TIMER_MASK 0x20000000UL
++#define EUR_CR_EVENT_STATUS_TIMER_SHIFT 29
++#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_MASK 0x10000000UL
++#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_SHIFT 28
++#define EUR_CR_EVENT_STATUS_TWOD_COMPLETE_MASK 0x08000000UL
++#define EUR_CR_EVENT_STATUS_TWOD_COMPLETE_SHIFT 27
++#define EUR_CR_EVENT_STATUS_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000UL
++#define EUR_CR_EVENT_STATUS_MADD_CACHE_INVALCOMPLETE_SHIFT 26
++#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000UL
++#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25
++#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_MASK 0x01000000UL
++#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_SHIFT 24
++#define EUR_CR_EVENT_STATUS_ISP_END_TILE_MASK 0x00800000UL
++#define EUR_CR_EVENT_STATUS_ISP_END_TILE_SHIFT 23
++#define EUR_CR_EVENT_STATUS_DPM_INITEND_MASK 0x00400000UL
++#define EUR_CR_EVENT_STATUS_DPM_INITEND_SHIFT 22
++#define EUR_CR_EVENT_STATUS_OTPM_LOADED_MASK 0x00200000UL
++#define EUR_CR_EVENT_STATUS_OTPM_LOADED_SHIFT 21
++#define EUR_CR_EVENT_STATUS_OTPM_INV_MASK 0x00100000UL
++#define EUR_CR_EVENT_STATUS_OTPM_INV_SHIFT 20
++#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_MASK 0x00080000UL
++#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_SHIFT 19
++#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_MASK 0x00040000UL
++#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_SHIFT 18
++#define EUR_CR_EVENT_STATUS_ISP_HALT_MASK 0x00020000UL
++#define EUR_CR_EVENT_STATUS_ISP_HALT_SHIFT 17
++#define EUR_CR_EVENT_STATUS_ISP_VISIBILITY_FAIL_MASK 0x00010000UL
++#define EUR_CR_EVENT_STATUS_ISP_VISIBILITY_FAIL_SHIFT 16
++#define EUR_CR_EVENT_STATUS_BREAKPOINT_MASK 0x00008000UL
++#define EUR_CR_EVENT_STATUS_BREAKPOINT_SHIFT 15
++#define EUR_CR_EVENT_STATUS_SW_EVENT_MASK 0x00004000UL
++#define EUR_CR_EVENT_STATUS_SW_EVENT_SHIFT 14
++#define EUR_CR_EVENT_STATUS_TA_FINISHED_MASK 0x00002000UL
++#define EUR_CR_EVENT_STATUS_TA_FINISHED_SHIFT 13
++#define EUR_CR_EVENT_STATUS_TA_TERMINATE_MASK 0x00001000UL
++#define EUR_CR_EVENT_STATUS_TA_TERMINATE_SHIFT 12
++#define EUR_CR_EVENT_STATUS_TPC_CLEAR_MASK 0x00000800UL
++#define EUR_CR_EVENT_STATUS_TPC_CLEAR_SHIFT 11
++#define EUR_CR_EVENT_STATUS_TPC_FLUSH_MASK 0x00000400UL
++#define EUR_CR_EVENT_STATUS_TPC_FLUSH_SHIFT 10
++#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_MASK 0x00000200UL
++#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_SHIFT 9
++#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_MASK 0x00000100UL
++#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_SHIFT 8
++#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_MASK 0x00000080UL
++#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_SHIFT 7
++#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_MASK 0x00000040UL
++#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_SHIFT 6
++#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_MASK 0x00000020UL
++#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_SHIFT 5
++#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_MASK 0x00000010UL
++#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_SHIFT 4
++#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_MASK 0x00000008UL
++#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_SHIFT 3
++#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004UL
++#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_SHIFT 2
++#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002UL
++#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_SHIFT 1
++#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_MASK 0x00000001UL
++#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_SHIFT 0
++#define EUR_CR_EVENT_HOST_ENABLE 0x0130
++#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_MASK 0x80000000UL
++#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_SHIFT 31
++#define EUR_CR_EVENT_HOST_ENABLE_TIMER_MASK 0x20000000UL
++#define EUR_CR_EVENT_HOST_ENABLE_TIMER_SHIFT 29
++#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_MASK 0x10000000UL
++#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_SHIFT 28
++#define EUR_CR_EVENT_HOST_ENABLE_TWOD_COMPLETE_MASK 0x08000000UL
++#define EUR_CR_EVENT_HOST_ENABLE_TWOD_COMPLETE_SHIFT 27
++#define EUR_CR_EVENT_HOST_ENABLE_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000UL
++#define EUR_CR_EVENT_HOST_ENABLE_MADD_CACHE_INVALCOMPLETE_SHIFT 26
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000UL
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_MASK 0x01000000UL
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_SHIFT 24
++#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_MASK 0x00800000UL
++#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_SHIFT 23
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_MASK 0x00400000UL
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_SHIFT 22
++#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_MASK 0x00200000UL
++#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_SHIFT 21
++#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_MASK 0x00100000UL
++#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_SHIFT 20
++#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_MASK 0x00080000UL
++#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_SHIFT 19
++#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_MASK 0x00040000UL
++#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_SHIFT 18
++#define EUR_CR_EVENT_HOST_ENABLE_ISP_HALT_MASK 0x00020000UL
++#define EUR_CR_EVENT_HOST_ENABLE_ISP_HALT_SHIFT 17
++#define EUR_CR_EVENT_HOST_ENABLE_ISP_VISIBILITY_FAIL_MASK 0x00010000UL
++#define EUR_CR_EVENT_HOST_ENABLE_ISP_VISIBILITY_FAIL_SHIFT 16
++#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_MASK 0x00008000UL
++#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_SHIFT 15
++#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_MASK 0x00004000UL
++#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_SHIFT 14
++#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_MASK 0x00002000UL
++#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_SHIFT 13
++#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_MASK 0x00001000UL
++#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_SHIFT 12
++#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_MASK 0x00000800UL
++#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_SHIFT 11
++#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_MASK 0x00000400UL
++#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_SHIFT 10
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_MASK 0x00000200UL
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_SHIFT 9
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_MASK 0x00000100UL
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_SHIFT 8
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_MASK 0x00000080UL
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_SHIFT 7
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_MASK 0x00000040UL
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_SHIFT 6
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_MASK 0x00000020UL
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_SHIFT 5
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_MASK 0x00000010UL
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_SHIFT 4
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_MASK 0x00000008UL
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_SHIFT 3
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004UL
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_SHIFT 2
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002UL
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_SHIFT 1
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_MASK 0x00000001UL
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_SHIFT 0
++#define EUR_CR_EVENT_HOST_CLEAR 0x0134
++#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_MASK 0x80000000UL
++#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_SHIFT 31
++#define EUR_CR_EVENT_HOST_CLEAR_TIMER_MASK 0x20000000UL
++#define EUR_CR_EVENT_HOST_CLEAR_TIMER_SHIFT 29
++#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_MASK 0x10000000UL
++#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_SHIFT 28
++#define EUR_CR_EVENT_HOST_CLEAR_TWOD_COMPLETE_MASK 0x08000000UL
++#define EUR_CR_EVENT_HOST_CLEAR_TWOD_COMPLETE_SHIFT 27
++#define EUR_CR_EVENT_HOST_CLEAR_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000UL
++#define EUR_CR_EVENT_HOST_CLEAR_MADD_CACHE_INVALCOMPLETE_SHIFT 26
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000UL
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_MASK 0x01000000UL
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_SHIFT 24
++#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_MASK 0x00800000UL
++#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_SHIFT 23
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_MASK 0x00400000UL
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_SHIFT 22
++#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_MASK 0x00200000UL
++#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_SHIFT 21
++#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_MASK 0x00100000UL
++#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_SHIFT 20
++#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_MASK 0x00080000UL
++#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_SHIFT 19
++#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_MASK 0x00040000UL
++#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_SHIFT 18
++#define EUR_CR_EVENT_HOST_CLEAR_ISP_HALT_MASK 0x00020000UL
++#define EUR_CR_EVENT_HOST_CLEAR_ISP_HALT_SHIFT 17
++#define EUR_CR_EVENT_HOST_CLEAR_ISP_VISIBILITY_FAIL_MASK 0x00010000UL
++#define EUR_CR_EVENT_HOST_CLEAR_ISP_VISIBILITY_FAIL_SHIFT 16
++#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_MASK 0x00008000UL
++#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_SHIFT 15
++#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_MASK 0x00004000UL
++#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_SHIFT 14
++#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_MASK 0x00002000UL
++#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_SHIFT 13
++#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_MASK 0x00001000UL
++#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_SHIFT 12
++#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_MASK 0x00000800UL
++#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_SHIFT 11
++#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_MASK 0x00000400UL
++#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_SHIFT 10
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_MASK 0x00000200UL
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_SHIFT 9
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_MASK 0x00000100UL
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_SHIFT 8
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_MASK 0x00000080UL
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_SHIFT 7
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_MASK 0x00000040UL
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_SHIFT 6
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_MASK 0x00000020UL
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_SHIFT 5
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_MASK 0x00000010UL
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_SHIFT 4
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_MASK 0x00000008UL
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_SHIFT 3
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004UL
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_SHIFT 2
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002UL
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_SHIFT 1
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_MASK 0x00000001UL
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_SHIFT 0
++#define EUR_CR_PDS 0x0ABC
++#define EUR_CR_PDS_DOUT_TIMEOUT_DISABLE_MASK 0x00000040UL
++#define EUR_CR_PDS_DOUT_TIMEOUT_DISABLE_SHIFT 6
++#define EUR_CR_PDS_EXEC_BASE 0x0AB8
++#define EUR_CR_PDS_EXEC_BASE_ADDR_MASK 0xFFF00000UL
++#define EUR_CR_PDS_EXEC_BASE_ADDR_SHIFT 20
++#define EUR_CR_EVENT_KICKER 0x0AC4
++#define EUR_CR_EVENT_KICKER_ADDRESS_MASK 0xFFFFFFF0UL
++#define EUR_CR_EVENT_KICKER_ADDRESS_SHIFT 4
++#define EUR_CR_EVENT_KICK 0x0AC8
++#define EUR_CR_EVENT_KICK_NOW_MASK 0x00000001UL
++#define EUR_CR_EVENT_KICK_NOW_SHIFT 0
++#define EUR_CR_EVENT_TIMER 0x0ACC
++#define EUR_CR_EVENT_TIMER_ENABLE_MASK 0x01000000UL
++#define EUR_CR_EVENT_TIMER_ENABLE_SHIFT 24
++#define EUR_CR_EVENT_TIMER_VALUE_MASK 0x00FFFFFFUL
++#define EUR_CR_EVENT_TIMER_VALUE_SHIFT 0
++#define EUR_CR_PDS_INV0 0x0AD0
++#define EUR_CR_PDS_INV0_DSC_MASK 0x00000001UL
++#define EUR_CR_PDS_INV0_DSC_SHIFT 0
++#define EUR_CR_PDS_INV1 0x0AD4
++#define EUR_CR_PDS_INV1_DSC_MASK 0x00000001UL
++#define EUR_CR_PDS_INV1_DSC_SHIFT 0
++#define EUR_CR_PDS_INV2 0x0AD8
++#define EUR_CR_PDS_INV2_DSC_MASK 0x00000001UL
++#define EUR_CR_PDS_INV2_DSC_SHIFT 0
++#define EUR_CR_PDS_INV3 0x0ADC
++#define EUR_CR_PDS_INV3_DSC_MASK 0x00000001UL
++#define EUR_CR_PDS_INV3_DSC_SHIFT 0
++#define EUR_CR_PDS_INV_CSC 0x0AE0
++#define EUR_CR_PDS_INV_CSC_KICK_MASK 0x00000001UL
++#define EUR_CR_PDS_INV_CSC_KICK_SHIFT 0
++#define EUR_CR_PDS_PC_BASE 0x0B2C
++#define EUR_CR_PDS_PC_BASE_ADDRESS_MASK 0x3FFFFFFFUL
++#define EUR_CR_PDS_PC_BASE_ADDRESS_SHIFT 0
++#define EUR_CR_BIF_CTRL 0x0C00
++#define EUR_CR_BIF_CTRL_NOREORDER_MASK 0x00000001UL
++#define EUR_CR_BIF_CTRL_NOREORDER_SHIFT 0
++#define EUR_CR_BIF_CTRL_PAUSE_MASK 0x00000002UL
++#define EUR_CR_BIF_CTRL_PAUSE_SHIFT 1
++#define EUR_CR_BIF_CTRL_FLUSH_MASK 0x00000004UL
++#define EUR_CR_BIF_CTRL_FLUSH_SHIFT 2
++#define EUR_CR_BIF_CTRL_INVALDC_MASK 0x00000008UL
++#define EUR_CR_BIF_CTRL_INVALDC_SHIFT 3
++#define EUR_CR_BIF_CTRL_CLEAR_FAULT_MASK 0x00000010UL
++#define EUR_CR_BIF_CTRL_CLEAR_FAULT_SHIFT 4
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_CACHE_MASK 0x00000100UL
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_CACHE_SHIFT 8
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_MASK 0x00000200UL
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_SHIFT 9
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_TE_MASK 0x00000400UL
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_TE_SHIFT 10
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_TWOD_MASK 0x00000800UL
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_TWOD_SHIFT 11
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_MASK 0x00001000UL
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_SHIFT 12
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_MASK 0x00002000UL
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_SHIFT 13
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_MASK 0x00004000UL
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_SHIFT 14
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_MASK 0x00008000UL
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_SHIFT 15
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_HOST_MASK 0x00010000UL
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_HOST_SHIFT 16
++#define EUR_CR_BIF_INT_STAT 0x0C04
++#define EUR_CR_BIF_INT_STAT_FAULT_MASK 0x00003FFFUL
++#define EUR_CR_BIF_INT_STAT_FAULT_SHIFT 0
++#define EUR_CR_BIF_INT_STAT_PF_N_RW_MASK 0x00004000UL
++#define EUR_CR_BIF_INT_STAT_PF_N_RW_SHIFT 14
++#define EUR_CR_BIF_FAULT 0x0C08
++#define EUR_CR_BIF_FAULT_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_FAULT_ADDR_SHIFT 12
++#define EUR_CR_BIF_TILE0 0x0C0C
++#define EUR_CR_BIF_TILE0_MIN_ADDRESS_MASK 0x00000FFFUL
++#define EUR_CR_BIF_TILE0_MIN_ADDRESS_SHIFT 0
++#define EUR_CR_BIF_TILE0_MAX_ADDRESS_MASK 0x00FFF000UL
++#define EUR_CR_BIF_TILE0_MAX_ADDRESS_SHIFT 12
++#define EUR_CR_BIF_TILE0_CFG_MASK 0x0F000000UL
++#define EUR_CR_BIF_TILE0_CFG_SHIFT 24
++#define EUR_CR_BIF_TILE1 0x0C10
++#define EUR_CR_BIF_TILE1_MIN_ADDRESS_MASK 0x00000FFFUL
++#define EUR_CR_BIF_TILE1_MIN_ADDRESS_SHIFT 0
++#define EUR_CR_BIF_TILE1_MAX_ADDRESS_MASK 0x00FFF000UL
++#define EUR_CR_BIF_TILE1_MAX_ADDRESS_SHIFT 12
++#define EUR_CR_BIF_TILE1_CFG_MASK 0x0F000000UL
++#define EUR_CR_BIF_TILE1_CFG_SHIFT 24
++#define EUR_CR_BIF_TILE2 0x0C14
++#define EUR_CR_BIF_TILE2_MIN_ADDRESS_MASK 0x00000FFFUL
++#define EUR_CR_BIF_TILE2_MIN_ADDRESS_SHIFT 0
++#define EUR_CR_BIF_TILE2_MAX_ADDRESS_MASK 0x00FFF000UL
++#define EUR_CR_BIF_TILE2_MAX_ADDRESS_SHIFT 12
++#define EUR_CR_BIF_TILE2_CFG_MASK 0x0F000000UL
++#define EUR_CR_BIF_TILE2_CFG_SHIFT 24
++#define EUR_CR_BIF_TILE3 0x0C18
++#define EUR_CR_BIF_TILE3_MIN_ADDRESS_MASK 0x00000FFFUL
++#define EUR_CR_BIF_TILE3_MIN_ADDRESS_SHIFT 0
++#define EUR_CR_BIF_TILE3_MAX_ADDRESS_MASK 0x00FFF000UL
++#define EUR_CR_BIF_TILE3_MAX_ADDRESS_SHIFT 12
++#define EUR_CR_BIF_TILE3_CFG_MASK 0x0F000000UL
++#define EUR_CR_BIF_TILE3_CFG_SHIFT 24
++#define EUR_CR_BIF_TILE4 0x0C1C
++#define EUR_CR_BIF_TILE4_MIN_ADDRESS_MASK 0x00000FFFUL
++#define EUR_CR_BIF_TILE4_MIN_ADDRESS_SHIFT 0
++#define EUR_CR_BIF_TILE4_MAX_ADDRESS_MASK 0x00FFF000UL
++#define EUR_CR_BIF_TILE4_MAX_ADDRESS_SHIFT 12
++#define EUR_CR_BIF_TILE4_CFG_MASK 0x0F000000UL
++#define EUR_CR_BIF_TILE4_CFG_SHIFT 24
++#define EUR_CR_BIF_TILE5 0x0C20
++#define EUR_CR_BIF_TILE5_MIN_ADDRESS_MASK 0x00000FFFUL
++#define EUR_CR_BIF_TILE5_MIN_ADDRESS_SHIFT 0
++#define EUR_CR_BIF_TILE5_MAX_ADDRESS_MASK 0x00FFF000UL
++#define EUR_CR_BIF_TILE5_MAX_ADDRESS_SHIFT 12
++#define EUR_CR_BIF_TILE5_CFG_MASK 0x0F000000UL
++#define EUR_CR_BIF_TILE5_CFG_SHIFT 24
++#define EUR_CR_BIF_TILE6 0x0C24
++#define EUR_CR_BIF_TILE6_MIN_ADDRESS_MASK 0x00000FFFUL
++#define EUR_CR_BIF_TILE6_MIN_ADDRESS_SHIFT 0
++#define EUR_CR_BIF_TILE6_MAX_ADDRESS_MASK 0x00FFF000UL
++#define EUR_CR_BIF_TILE6_MAX_ADDRESS_SHIFT 12
++#define EUR_CR_BIF_TILE6_CFG_MASK 0x0F000000UL
++#define EUR_CR_BIF_TILE6_CFG_SHIFT 24
++#define EUR_CR_BIF_TILE7 0x0C28
++#define EUR_CR_BIF_TILE7_MIN_ADDRESS_MASK 0x00000FFFUL
++#define EUR_CR_BIF_TILE7_MIN_ADDRESS_SHIFT 0
++#define EUR_CR_BIF_TILE7_MAX_ADDRESS_MASK 0x00FFF000UL
++#define EUR_CR_BIF_TILE7_MAX_ADDRESS_SHIFT 12
++#define EUR_CR_BIF_TILE7_CFG_MASK 0x0F000000UL
++#define EUR_CR_BIF_TILE7_CFG_SHIFT 24
++#define EUR_CR_BIF_TILE8 0x0C2C
++#define EUR_CR_BIF_TILE8_MIN_ADDRESS_MASK 0x00000FFFUL
++#define EUR_CR_BIF_TILE8_MIN_ADDRESS_SHIFT 0
++#define EUR_CR_BIF_TILE8_MAX_ADDRESS_MASK 0x00FFF000UL
++#define EUR_CR_BIF_TILE8_MAX_ADDRESS_SHIFT 12
++#define EUR_CR_BIF_TILE8_CFG_MASK 0x0F000000UL
++#define EUR_CR_BIF_TILE8_CFG_SHIFT 24
++#define EUR_CR_BIF_TILE9 0x0C30
++#define EUR_CR_BIF_TILE9_MIN_ADDRESS_MASK 0x00000FFFUL
++#define EUR_CR_BIF_TILE9_MIN_ADDRESS_SHIFT 0
++#define EUR_CR_BIF_TILE9_MAX_ADDRESS_MASK 0x00FFF000UL
++#define EUR_CR_BIF_TILE9_MAX_ADDRESS_SHIFT 12
++#define EUR_CR_BIF_TILE9_CFG_MASK 0x0F000000UL
++#define EUR_CR_BIF_TILE9_CFG_SHIFT 24
++#define EUR_CR_BIF_DIR_LIST_BASE1 0x0C38
++#define EUR_CR_BIF_DIR_LIST_BASE1_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE1_ADDR_SHIFT 12
++#define EUR_CR_BIF_DIR_LIST_BASE2 0x0C3C
++#define EUR_CR_BIF_DIR_LIST_BASE2_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE2_ADDR_SHIFT 12
++#define EUR_CR_BIF_DIR_LIST_BASE3 0x0C40
++#define EUR_CR_BIF_DIR_LIST_BASE3_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE3_ADDR_SHIFT 12
++#define EUR_CR_BIF_DIR_LIST_BASE4 0x0C44
++#define EUR_CR_BIF_DIR_LIST_BASE4_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE4_ADDR_SHIFT 12
++#define EUR_CR_BIF_DIR_LIST_BASE5 0x0C48
++#define EUR_CR_BIF_DIR_LIST_BASE5_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE5_ADDR_SHIFT 12
++#define EUR_CR_BIF_DIR_LIST_BASE6 0x0C4C
++#define EUR_CR_BIF_DIR_LIST_BASE6_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE6_ADDR_SHIFT 12
++#define EUR_CR_BIF_DIR_LIST_BASE7 0x0C50
++#define EUR_CR_BIF_DIR_LIST_BASE7_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE7_ADDR_SHIFT 12
++#define EUR_CR_BIF_DIR_LIST_BASE8 0x0C54
++#define EUR_CR_BIF_DIR_LIST_BASE8_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE8_ADDR_SHIFT 12
++#define EUR_CR_BIF_DIR_LIST_BASE9 0x0C58
++#define EUR_CR_BIF_DIR_LIST_BASE9_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE9_ADDR_SHIFT 12
++#define EUR_CR_BIF_DIR_LIST_BASE10 0x0C5C
++#define EUR_CR_BIF_DIR_LIST_BASE10_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE10_ADDR_SHIFT 12
++#define EUR_CR_BIF_DIR_LIST_BASE11 0x0C60
++#define EUR_CR_BIF_DIR_LIST_BASE11_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE11_ADDR_SHIFT 12
++#define EUR_CR_BIF_DIR_LIST_BASE12 0x0C64
++#define EUR_CR_BIF_DIR_LIST_BASE12_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE12_ADDR_SHIFT 12
++#define EUR_CR_BIF_DIR_LIST_BASE13 0x0C68
++#define EUR_CR_BIF_DIR_LIST_BASE13_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE13_ADDR_SHIFT 12
++#define EUR_CR_BIF_DIR_LIST_BASE14 0x0C6C
++#define EUR_CR_BIF_DIR_LIST_BASE14_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE14_ADDR_SHIFT 12
++#define EUR_CR_BIF_DIR_LIST_BASE15 0x0C70
++#define EUR_CR_BIF_DIR_LIST_BASE15_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE15_ADDR_SHIFT 12
++#define EUR_CR_BIF_BANK_SET 0x0C74
++#define EUR_CR_BIF_BANK_SET_SELECT_MASK 0x000003FFUL
++#define EUR_CR_BIF_BANK_SET_SELECT_SHIFT 0
++#define EUR_CR_BIF_BANK0 0x0C78
++#define EUR_CR_BIF_BANK0_INDEX_EDM_MASK 0x0000000FUL
++#define EUR_CR_BIF_BANK0_INDEX_EDM_SHIFT 0
++#define EUR_CR_BIF_BANK0_INDEX_TA_MASK 0x000000F0UL
++#define EUR_CR_BIF_BANK0_INDEX_TA_SHIFT 4
++#define EUR_CR_BIF_BANK0_INDEX_HOST_MASK 0x00000F00UL
++#define EUR_CR_BIF_BANK0_INDEX_HOST_SHIFT 8
++#define EUR_CR_BIF_BANK0_INDEX_3D_MASK 0x0000F000UL
++#define EUR_CR_BIF_BANK0_INDEX_3D_SHIFT 12
++#define EUR_CR_BIF_BANK0_INDEX_2D_MASK 0x000F0000UL
++#define EUR_CR_BIF_BANK0_INDEX_2D_SHIFT 16
++#define EUR_CR_BIF_BANK1 0x0C7C
++#define EUR_CR_BIF_BANK1_INDEX_EDM_MASK 0x0000000FUL
++#define EUR_CR_BIF_BANK1_INDEX_EDM_SHIFT 0
++#define EUR_CR_BIF_BANK1_INDEX_TA_MASK 0x000000F0UL
++#define EUR_CR_BIF_BANK1_INDEX_TA_SHIFT 4
++#define EUR_CR_BIF_BANK1_INDEX_HOST_MASK 0x00000F00UL
++#define EUR_CR_BIF_BANK1_INDEX_HOST_SHIFT 8
++#define EUR_CR_BIF_BANK1_INDEX_3D_MASK 0x0000F000UL
++#define EUR_CR_BIF_BANK1_INDEX_3D_SHIFT 12
++#define EUR_CR_BIF_BANK1_INDEX_2D_MASK 0x000F0000UL
++#define EUR_CR_BIF_BANK1_INDEX_2D_SHIFT 16
++#define EUR_CR_BIF_ADT_TTE 0x0C80
++#define EUR_CR_BIF_ADT_TTE_VALUE_MASK 0x000000FFUL
++#define EUR_CR_BIF_ADT_TTE_VALUE_SHIFT 0
++#define EUR_CR_BIF_DIR_LIST_BASE0 0x0C84
++#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_SHIFT 12
++#define EUR_CR_BIF_TWOD_REQ_BASE 0x0C88
++#define EUR_CR_BIF_TWOD_REQ_BASE_ADDR_MASK 0xFFF00000UL
++#define EUR_CR_BIF_TWOD_REQ_BASE_ADDR_SHIFT 20
++#define EUR_CR_BIF_TA_REQ_BASE 0x0C90
++#define EUR_CR_BIF_TA_REQ_BASE_ADDR_MASK 0xFFF00000UL
++#define EUR_CR_BIF_TA_REQ_BASE_ADDR_SHIFT 20
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1 0x0C94
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_MMU_MASK 0x00000007UL
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_MMU_SHIFT 0
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_CACHE_MASK 0x00000038UL
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_CACHE_SHIFT 3
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_VDM_MASK 0x000001C0UL
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_VDM_SHIFT 6
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_TE_MASK 0x00000E00UL
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_TE_SHIFT 9
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_TWOD_MASK 0x00007000UL
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_TWOD_SHIFT 12
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_PBE_MASK 0x00038000UL
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_PBE_SHIFT 15
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_2 0x0C98
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_2_HOST_MASK 0x00000007UL
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_2_HOST_SHIFT 0
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_2_USE_MASK 0x00000038UL
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_2_USE_SHIFT 3
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_2_ISP_MASK 0x000001C0UL
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_2_ISP_SHIFT 6
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_2_TSPP_MASK 0x00000E00UL
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_2_TSPP_SHIFT 9
++#define EUR_CR_BIF_MEM_ARB_CONFIG 0x0CA0
++#define EUR_CR_BIF_MEM_ARB_CONFIG_PAGE_SIZE_MASK 0x0000000FUL
++#define EUR_CR_BIF_MEM_ARB_CONFIG_PAGE_SIZE_SHIFT 0
++#define EUR_CR_BIF_MEM_ARB_CONFIG_BEST_CNT_MASK 0x00000FF0UL
++#define EUR_CR_BIF_MEM_ARB_CONFIG_BEST_CNT_SHIFT 4
++#define EUR_CR_BIF_MEM_ARB_CONFIG_TTE_THRESH_MASK 0x00FFF000UL
++#define EUR_CR_BIF_MEM_ARB_CONFIG_TTE_THRESH_SHIFT 12
++#define EUR_CR_BIF_MEM_REQ_STAT 0x0CA8
++#define EUR_CR_BIF_MEM_REQ_STAT_READS_MASK 0x000000FFUL
++#define EUR_CR_BIF_MEM_REQ_STAT_READS_SHIFT 0
++#define EUR_CR_BIF_3D_REQ_BASE 0x0CAC
++#define EUR_CR_BIF_3D_REQ_BASE_ADDR_MASK 0xFFF00000UL
++#define EUR_CR_BIF_3D_REQ_BASE_ADDR_SHIFT 20
++#define EUR_CR_BIF_ZLS_REQ_BASE 0x0CB0
++#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_MASK 0xFFF00000UL
++#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_SHIFT 20
++#define EUR_CR_BIF_BANK_STATUS 0x0CB4
++#define EUR_CR_BIF_BANK_STATUS_3D_CURRENT_BANK_MASK 0x00000001UL
++#define EUR_CR_BIF_BANK_STATUS_3D_CURRENT_BANK_SHIFT 0
++#define EUR_CR_BIF_BANK_STATUS_TA_CURRENT_BANK_MASK 0x00000002UL
++#define EUR_CR_BIF_BANK_STATUS_TA_CURRENT_BANK_SHIFT 1
++#define EUR_CR_2D_BLIT_STATUS 0x0E04
++#define EUR_CR_2D_BLIT_STATUS_COMPLETE_MASK 0x00FFFFFFUL
++#define EUR_CR_2D_BLIT_STATUS_COMPLETE_SHIFT 0
++#define EUR_CR_2D_BLIT_STATUS_BUSY_MASK 0x01000000UL
++#define EUR_CR_2D_BLIT_STATUS_BUSY_SHIFT 24
++#define EUR_CR_2D_VIRTUAL_FIFO_0 0x0E10
++#define EUR_CR_2D_VIRTUAL_FIFO_0_ENABLE_MASK 0x00000001UL
++#define EUR_CR_2D_VIRTUAL_FIFO_0_ENABLE_SHIFT 0
++#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MASK 0x0000000EUL
++#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_SHIFT 1
++#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_DIV_MASK 0x00000FF0UL
++#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_DIV_SHIFT 4
++#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MUL_MASK 0x0000F000UL
++#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MUL_SHIFT 12
++#define EUR_CR_2D_VIRTUAL_FIFO_1 0x0E14
++#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_ACC_MASK 0x00000FFFUL
++#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_ACC_SHIFT 0
++#define EUR_CR_2D_VIRTUAL_FIFO_1_MAX_ACC_MASK 0x00FFF000UL
++#define EUR_CR_2D_VIRTUAL_FIFO_1_MAX_ACC_SHIFT 12
++#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_METRIC_MASK 0xFF000000UL
++#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_METRIC_SHIFT 24
++#define EUR_CR_2D_SOCIF 0x0E18
++#define EUR_CR_2D_SOCIF_FREESPACE_MASK 0x000000FFUL
++#define EUR_CR_2D_SOCIF_FREESPACE_SHIFT 0
++#define EUR_CR_2D_ALPHA 0x0E1C
++#define EUR_CR_2D_ALPHA_COMPONENT_ONE_MASK 0x0000FF00UL
++#define EUR_CR_2D_ALPHA_COMPONENT_ONE_SHIFT 8
++#define EUR_CR_2D_ALPHA_COMPONENT_ZERO_MASK 0x000000FFUL
++#define EUR_CR_2D_ALPHA_COMPONENT_ZERO_SHIFT 0
++#define EUR_CR_USE_CODE_BASE(X) (0x0A0C + (4 * (X)))
++#define EUR_CR_USE_CODE_BASE_ADDR_MASK 0x01FFFFFFUL
++#define EUR_CR_USE_CODE_BASE_ADDR_SHIFT 0
++#define EUR_CR_USE_CODE_BASE_DM_MASK 0x06000000UL
++#define EUR_CR_USE_CODE_BASE_DM_SHIFT 25
++#define EUR_CR_USE_CODE_BASE_SIZE_UINT32 16
++#define EUR_CR_USE_CODE_BASE_NUM_ENTRIES 16
++
++#define EUR_CR_MNE_CR_CTRL 0x0D00
++#define EUR_CR_MNE_CR_CTRL_BYP_CC_MASK 0x00008000UL
++#define EUR_CR_MNE_CR_CTRL_INVAL 0x0D20
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/hwdefs/sgxdefs.h
+@@ -0,0 +1,82 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _SGXDEFS_H_
++#define _SGXDEFS_H_
++
++#include "sgxerrata.h"
++#include "sgxfeaturedefs.h"
++
++#if defined(SGX520)
++#include "sgx520defs.h"
++#else
++#if defined(SGX530)
++#include "sgx530defs.h"
++#else
++#if defined(SGX535)
++#include "sgx535defs.h"
++#else
++#if defined(SGX535_V1_1)
++#include "sgx535defs.h"
++#else
++#if defined(SGX540)
++#include "sgx540defs.h"
++#else
++#if defined(SGX541)
++#include "sgx541defs.h"
++#else
++#if defined(SGX543)
++#include "sgx543defs.h"
++#else
++#if defined(SGX545)
++#include "sgx545defs.h"
++#else
++#if defined(SGX531)
++#include "sgx531defs.h"
++#endif
++#endif
++#endif
++#endif
++#endif
++#endif
++#endif
++#endif
++#endif
++
++#if defined(SGX_FEATURE_MP)
++#if defined(SGX541)
++#if SGX_CORE_REV == 100
++#include "sgx541_100mpdefs.h"
++#else
++#include "sgx541mpdefs.h"
++#endif
++#else
++#include "sgxmpdefs.h"
++#endif
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/hwdefs/sgxerrata.h
+@@ -0,0 +1,306 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _SGXERRATA_KM_H_
++#define _SGXERRATA_KM_H_
++
++#if defined(SGX520) && !defined(SGX_CORE_DEFINED)
++
++ #define SGX_CORE_REV_HEAD 0
++ #if defined(USE_SGX_CORE_REV_HEAD)
++
++ #define SGX_CORE_REV SGX_CORE_REV_HEAD
++ #endif
++
++ #if SGX_CORE_REV == 100
++ #else
++ #if SGX_CORE_REV == SGX_CORE_REV_HEAD
++
++ #else
++ #error "sgxerrata.h: SGX520 Core Revision unspecified"
++ #endif
++ #endif
++
++ #define SGX_CORE_DEFINED
++#endif
++
++#if defined(SGX530) && !defined(SGX_CORE_DEFINED)
++
++ #define SGX_CORE_REV_HEAD 0
++ #if defined(USE_SGX_CORE_REV_HEAD)
++
++ #define SGX_CORE_REV SGX_CORE_REV_HEAD
++ #endif
++
++ #if SGX_CORE_REV == 103
++ #define FIX_HW_BRN_22934
++ #else
++ #if SGX_CORE_REV == 110
++ #define FIX_HW_BRN_22934
++ #else
++ #if SGX_CORE_REV == 111
++ #define FIX_HW_BRN_22934
++ #else
++ #if SGX_CORE_REV == 120
++ #define FIX_HW_BRN_22934
++ #else
++ #if SGX_CORE_REV == 121
++ #define FIX_HW_BRN_22934
++ #else
++ #if SGX_CORE_REV == 125
++ #define FIX_HW_BRN_22934
++ #else
++ #if SGX_CORE_REV == SGX_CORE_REV_HEAD
++
++ #else
++ #error "sgxerrata.h: SGX530 Core Revision unspecified"
++ #endif
++ #endif
++ #endif
++ #endif
++ #endif
++#endif
++ #endif
++
++ #define SGX_CORE_DEFINED
++#endif
++
++#if defined(SGX531) && !defined(SGX_CORE_DEFINED)
++
++ #define SGX_CORE_REV_HEAD 0
++ #if defined(USE_SGX_CORE_REV_HEAD)
++
++ #define SGX_CORE_REV SGX_CORE_REV_HEAD
++ #endif
++
++ #if SGX_CORE_REV == 101
++ #define FIX_HW_BRN_26620
++ #define FIX_HW_BRN_28011
++ #else
++ #if SGX_CORE_REV == SGX_CORE_REV_HEAD
++
++ #else
++ #error "sgxerrata.h: SGX531 Core Revision unspecified"
++ #endif
++ #endif
++
++ #define SGX_CORE_DEFINED
++#endif
++
++#if (defined(SGX535) || defined(SGX535_V1_1)) && !defined(SGX_CORE_DEFINED)
++
++ #define SGX_CORE_REV_HEAD 0
++ #if defined(USE_SGX_CORE_REV_HEAD)
++
++ #define SGX_CORE_REV SGX_CORE_REV_HEAD
++ #endif
++
++ #if SGX_CORE_REV == 111
++ #define FIX_HW_BRN_23281
++ #define FIX_HW_BRN_23410
++ #define FIX_HW_BRN_22693
++ #define FIX_HW_BRN_22934
++ #define FIX_HW_BRN_22997
++ #define FIX_HW_BRN_23030
++ #else
++ #if SGX_CORE_REV == 1111
++ #define FIX_HW_BRN_23281
++ #define FIX_HW_BRN_23410
++ #define FIX_HW_BRN_22693
++ #define FIX_HW_BRN_22934
++ #define FIX_HW_BRN_22997
++ #define FIX_HW_BRN_23030
++ #else
++ #if SGX_CORE_REV == 112
++ #define FIX_HW_BRN_23281
++ #define FIX_HW_BRN_23410
++ #define FIX_HW_BRN_22693
++ #define FIX_HW_BRN_22934
++ #define FIX_HW_BRN_22997
++ #define FIX_HW_BRN_23030
++ #else
++ #if SGX_CORE_REV == 113
++ #define FIX_HW_BRN_22934
++ #define FIX_HW_BRN_23281
++ #define FIX_HW_BRN_23944
++ #define FIX_HW_BRN_23410
++ #else
++ #if SGX_CORE_REV == 121
++ #define FIX_HW_BRN_22934
++ #define FIX_HW_BRN_23944
++ #define FIX_HW_BRN_23410
++ #else
++ #if SGX_CORE_REV == 126
++ #define FIX_HW_BRN_22934
++ #else
++ #if SGX_CORE_REV == SGX_CORE_REV_HEAD
++
++ #else
++ #error "sgxerrata.h: SGX535 Core Revision unspecified"
++
++ #endif
++ #endif
++ #endif
++ #endif
++ #endif
++ #endif
++ #endif
++
++ #define SGX_CORE_DEFINED
++#endif
++
++#if defined(SGX540) && !defined(SGX_CORE_DEFINED)
++
++ #define SGX_CORE_REV_HEAD 0
++ #if defined(USE_SGX_CORE_REV_HEAD)
++
++ #define SGX_CORE_REV SGX_CORE_REV_HEAD
++ #endif
++
++ #if SGX_CORE_REV == 101
++ #define FIX_HW_BRN_25499
++ #define FIX_HW_BRN_25503
++ #define FIX_HW_BRN_26620
++ #define FIX_HW_BRN_28011
++ #else
++ #if SGX_CORE_REV == 110
++ #define FIX_HW_BRN_25503
++ #define FIX_HW_BRN_26620
++ #define FIX_HW_BRN_28011
++ #else
++ #if SGX_CORE_REV == 120
++ #define FIX_HW_BRN_28011
++ #else
++ #if SGX_CORE_REV == 121
++ #define FIX_HW_BRN_28011
++ #else
++ #if SGX_CORE_REV == SGX_CORE_REV_HEAD
++
++ #else
++ #error "sgxerrata.h: SGX540 Core Revision unspecified"
++ #endif
++ #endif
++ #endif
++ #endif
++ #endif
++
++ #define SGX_CORE_DEFINED
++#endif
++
++#if defined(SGX541) && !defined(SGX_CORE_DEFINED)
++ #if defined(SGX_FEATURE_MP)
++
++ #define SGX_CORE_REV_HEAD 0
++ #if defined(USE_SGX_CORE_REV_HEAD)
++
++ #define SGX_CORE_REV SGX_CORE_REV_HEAD
++ #endif
++
++ #if SGX_CORE_REV == 100
++ #define FIX_HW_BRN_27270
++ #define FIX_HW_BRN_28011
++ #define FIX_HW_BRN_27510
++
++ #else
++ #if SGX_CORE_REV == 101
++
++ #else
++ #if SGX_CORE_REV == SGX_CORE_REV_HEAD
++
++ #else
++ #error "sgxerrata.h: SGX541 Core Revision unspecified"
++ #endif
++ #endif
++ #endif
++
++ #define SGX_CORE_DEFINED
++ #else
++ #error "sgxerrata.h: SGX541 only supports MP configs (SGX_FEATURE_MP)"
++ #endif
++#endif
++
++#if defined(SGX543) && !defined(SGX_CORE_DEFINED)
++ #if defined(SGX_FEATURE_MP)
++
++ #define SGX_CORE_REV_HEAD 0
++ #if defined(USE_SGX_CORE_REV_HEAD)
++
++ #define SGX_CORE_REV SGX_CORE_REV_HEAD
++ #endif
++
++ #if SGX_CORE_REV == 100
++
++ #else
++ #if SGX_CORE_REV == SGX_CORE_REV_HEAD
++
++ #else
++ #error "sgxerrata.h: SGX543 Core Revision unspecified"
++ #endif
++ #endif
++
++ #define SGX_CORE_DEFINED
++ #else
++ #error "sgxerrata.h: SGX543 only supports MP configs (SGX_FEATURE_MP)"
++ #endif
++#endif
++
++#if defined(SGX545) && !defined(SGX_CORE_DEFINED)
++
++ #define SGX_CORE_REV_HEAD 0
++ #if defined(USE_SGX_CORE_REV_HEAD)
++
++ #define SGX_CORE_REV SGX_CORE_REV_HEAD
++ #endif
++
++ #if SGX_CORE_REV == 100
++ #define FIX_HW_BRN_26620
++ #define FIX_HW_BRN_27266
++ #define FIX_HW_BRN_27456
++ #else
++ #if SGX_CORE_REV == 109
++
++ #else
++ #if SGX_CORE_REV == SGX_CORE_REV_HEAD
++
++ #else
++ #error "sgxerrata.h: SGX545 Core Revision unspecified"
++ #endif
++ #endif
++ #endif
++
++ #define SGX_CORE_DEFINED
++#endif
++
++#if !defined(SGX_CORE_DEFINED)
++#if defined (__GNUC__)
++ #warning "sgxerrata.h: SGX Core Version unspecified"
++#else
++ #pragma message("sgxerrata.h: SGX Core Version unspecified")
++#endif
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/hwdefs/sgxfeaturedefs.h
+@@ -0,0 +1,166 @@
++/**********************************************************************
++ *
++ * Copyright (c) 2009-2010 Intel Corporation.
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if defined(SGX520)
++ #define SGX_CORE_FRIENDLY_NAME "SGX520"
++ #define SGX_CORE_ID SGX_CORE_ID_520
++ #define SGX_FEATURE_ADDRESS_SPACE_SIZE (28)
++ #define SGX_FEATURE_AUTOCLOCKGATING
++#else
++#if defined(SGX530)
++ #define SGX_CORE_FRIENDLY_NAME "SGX530"
++ #define SGX_CORE_ID SGX_CORE_ID_530
++ #define SGX_FEATURE_ADDRESS_SPACE_SIZE (28)
++ #define SGX_FEATURE_AUTOCLOCKGATING
++#else
++#if defined(SGX535)
++ #define SGX_CORE_FRIENDLY_NAME "SGX535"
++ #define SGX_CORE_ID SGX_CORE_ID_535
++ #define SGX_FEATURE_ADDRESS_SPACE_SIZE (32)
++ #define SGX_FEATURE_MULTIPLE_MEM_CONTEXTS
++ #define SGX_FEATURE_BIF_NUM_DIRLISTS (16)
++#ifndef INTEL_D3_CHANGES
++ #define SGX_FEATURE_2D_HARDWARE
++#endif
++ #define SGX_FEATURE_AUTOCLOCKGATING
++ #define SUPPORT_SGX_GENERAL_MAPPING_HEAP
++#else
++#if defined(SGX540)
++ #define SGX_CORE_FRIENDLY_NAME "SGX540"
++ #define SGX_CORE_ID SGX_CORE_ID_540
++ #define SGX_FEATURE_ADDRESS_SPACE_SIZE (28)
++ #define SGX_FEATURE_AUTOCLOCKGATING
++ #define SGX_FEATURE_MULTI_EVENT_KICK
++#else
++#if defined(SGX541)
++ #define SGX_CORE_FRIENDLY_NAME "SGX541"
++ #define SGX_CORE_ID SGX_CORE_ID_541
++ #define SGX_FEATURE_ADDRESS_SPACE_SIZE (32)
++ #define SGX_FEATURE_MULTIPLE_MEM_CONTEXTS
++ #define SGX_FEATURE_BIF_NUM_DIRLISTS (8)
++ #define SGX_FEATURE_AUTOCLOCKGATING
++ #define SGX_FEATURE_SPM_MODE_0
++ #define SGX_FEATURE_MULTI_EVENT_KICK
++#else
++#if defined(SGX543)
++ #define SGX_CORE_FRIENDLY_NAME "SGX543"
++ #define SGX_CORE_ID SGX_CORE_ID_543
++ #define SGX_FEATURE_USE_NO_INSTRUCTION_PAIRING
++ #define SGX_FEATURE_USE_UNLIMITED_PHASES
++ #define SGX_FEATURE_ADDRESS_SPACE_SIZE (32)
++ #define SGX_FEATURE_MULTIPLE_MEM_CONTEXTS
++ #define SGX_FEATURE_BIF_NUM_DIRLISTS (8)
++ #define SGX_FEATURE_AUTOCLOCKGATING
++ #define SGX_FEATURE_MONOLITHIC_UKERNEL
++ #define SGX_FEATURE_MULTI_EVENT_KICK
++ #define SGX_FEATURE_DATA_BREAKPOINTS
++#else
++#if defined(SGX531)
++ #define SGX_CORE_FRIENDLY_NAME "SGX531"
++ #define SGX_CORE_ID SGX_CORE_ID_531
++ #define SGX_FEATURE_ADDRESS_SPACE_SIZE (28)
++ #define SGX_FEATURE_AUTOCLOCKGATING
++ #define SGX_FEATURE_MULTI_EVENT_KICK
++#else
++#if defined(SGX545)
++ #define SGX_CORE_FRIENDLY_NAME "SGX545"
++ #define SGX_CORE_ID SGX_CORE_ID_545
++ #define SGX_FEATURE_ADDRESS_SPACE_SIZE (32)
++ #define SGX_FEATURE_AUTOCLOCKGATING
++ #define SGX_FEATURE_USE_NO_INSTRUCTION_PAIRING
++ #define SGX_FEATURE_USE_UNLIMITED_PHASES
++ #define SGX_FEATURE_DXT_TEXTURES
++ #define SGX_FEATURE_VOLUME_TEXTURES
++ #define SGX_FEATURE_HOST_ALLOC_FROM_DPM
++ #define SGX_FEATURE_MULTIPLE_MEM_CONTEXTS
++ #define SGX_FEATURE_BIF_NUM_DIRLISTS (16)
++ #define SGX_FEATURE_NUM_USE_PIPES (4)
++ #define SGX_FEATURE_TEXTURESTRIDE_EXTENSION
++ #define SGX_FEATURE_PDS_DATA_INTERLEAVE_2DWORDS
++ #define SGX_FEATURE_MONOLITHIC_UKERNEL
++ #define SGX_FEATURE_ZLS_EXTERNALZ
++ #define SGX_FEATURE_VDM_CONTEXT_SWITCH_REV_2
++ #define SGX_FEATURE_ISP_CONTEXT_SWITCH_REV_2
++ #define SGX_FEATURE_NUM_PDS_PIPES (2)
++ #define SGX_FEATURE_NATIVE_BACKWARD_BLIT
++ #define SGX_FEATURE_MAX_TA_RENDER_TARGETS (512)
++ #define SGX_FEATURE_SPM_MODE_0
++ #define SGX_FEATURE_SECONDARY_REQUIRES_USE_KICK
++ #define SGX_FEATURE_DCU
++
++
++ #define SGX_FEATURE_BIF_WIDE_TILING_AND_4K_ADDRESS
++ #define SGX_FEATURE_MULTI_EVENT_KICK
++#endif
++#endif
++#endif
++#endif
++#endif
++#endif
++#endif
++#endif
++
++#if defined(FIX_HW_BRN_22693)
++#undef SGX_FEATURE_AUTOCLOCKGATING
++#endif
++
++#if defined(FIX_HW_BRN_27266)
++#undef SGX_FEATURE_36BIT_MMU
++#endif
++
++#if defined(FIX_HW_BRN_27456)
++#undef SGX_FEATURE_BIF_WIDE_TILING_AND_4K_ADDRESS
++#endif
++
++#if defined(FIX_HW_BRN_22934) \
++ || defined(FIX_HW_BRN_25499)
++#undef SGX_FEATURE_MULTI_EVENT_KICK
++#endif
++
++#if defined(SGX_FEATURE_SYSTEM_CACHE)
++ #if defined(SGX_FEATURE_36BIT_MMU)
++ #error SGX_FEATURE_SYSTEM_CACHE is incompatible with SGX_FEATURE_36BIT_MMU
++ #endif
++ #if defined(FIX_HW_BRN_26620) && !defined(SGX_FEATURE_MULTI_EVENT_KICK)
++ #define SGX_BYPASS_SYSTEM_CACHE
++ #endif
++#endif
++
++#if defined(SGX_FEATURE_MP)
++#if !defined(SGX_FEATURE_MP_CORE_COUNT)
++#error SGX_FEATURE_MP_CORE_COUNT must be defined when SGX_FEATURE_MP is defined
++#endif
++#else
++#define SGX_FEATURE_MP_CORE_COUNT (1)
++#endif
++
++#if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && !defined(SUPPORT_SGX_PRIORITY_SCHEDULING)
++#define SUPPORT_SGX_PRIORITY_SCHEDULING
++#endif
++
++#include "img_types.h"
++
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/hwdefs/sgxmmu.h
+@@ -0,0 +1,79 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__SGXMMU_KM_H__)
++#define __SGXMMU_KM_H__
++
++#define SGX_MMU_PAGE_SHIFT (12)
++#define SGX_MMU_PAGE_SIZE (1UL<<SGX_MMU_PAGE_SHIFT)
++#define SGX_MMU_PAGE_MASK (SGX_MMU_PAGE_SIZE - 1UL)
++
++#define SGX_MMU_PD_SHIFT (10)
++#define SGX_MMU_PD_SIZE (1UL<<SGX_MMU_PD_SHIFT)
++#define SGX_MMU_PD_MASK (0xFFC00000UL)
++
++#if defined(SGX_FEATURE_36BIT_MMU)
++ #define SGX_MMU_PDE_ADDR_MASK (0xFFFFFF00UL)
++ #define SGX_MMU_PDE_ADDR_ALIGNSHIFT (4)
++#else
++ #define SGX_MMU_PDE_ADDR_MASK (0xFFFFF000UL)
++ #define SGX_MMU_PDE_ADDR_ALIGNSHIFT (0)
++#endif
++#define SGX_MMU_PDE_VALID (0x00000001UL)
++#define SGX_MMU_PDE_PAGE_SIZE_4K (0x00000000UL)
++#if defined(SGX_FEATURE_VARIABLE_MMU_PAGE_SIZE)
++ #define SGX_MMU_PDE_PAGE_SIZE_16K (0x00000002UL)
++ #define SGX_MMU_PDE_PAGE_SIZE_64K (0x00000004UL)
++ #define SGX_MMU_PDE_PAGE_SIZE_256K (0x00000006UL)
++ #define SGX_MMU_PDE_PAGE_SIZE_1M (0x00000008UL)
++ #define SGX_MMU_PDE_PAGE_SIZE_4M (0x0000000AUL)
++ #define SGX_MMU_PDE_PAGE_SIZE_MASK (0x0000000EUL)
++#else
++ #define SGX_MMU_PDE_WRITEONLY (0x00000002UL)
++ #define SGX_MMU_PDE_READONLY (0x00000004UL)
++ #define SGX_MMU_PDE_CACHECONSISTENT (0x00000008UL)
++ #define SGX_MMU_PDE_EDMPROTECT (0x00000010UL)
++#endif
++
++#define SGX_MMU_PT_SHIFT (10)
++#define SGX_MMU_PT_SIZE (1UL<<SGX_MMU_PT_SHIFT)
++#define SGX_MMU_PT_MASK (0x003FF000UL)
++
++#if defined(SGX_FEATURE_36BIT_MMU)
++ #define SGX_MMU_PTE_ADDR_MASK (0xFFFFFF00UL)
++ #define SGX_MMU_PTE_ADDR_ALIGNSHIFT (4)
++#else
++ #define SGX_MMU_PTE_ADDR_MASK (0xFFFFF000UL)
++ #define SGX_MMU_PTE_ADDR_ALIGNSHIFT (0)
++#endif
++#define SGX_MMU_PTE_VALID (0x00000001UL)
++#define SGX_MMU_PTE_WRITEONLY (0x00000002UL)
++#define SGX_MMU_PTE_READONLY (0x00000004UL)
++#define SGX_MMU_PTE_CACHECONSISTENT (0x00000008UL)
++#define SGX_MMU_PTE_EDMPROTECT (0x00000010UL)
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/include/buffer_manager.h
+@@ -0,0 +1,213 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _BUFFER_MANAGER_H_
++#define _BUFFER_MANAGER_H_
++
++#include "img_types.h"
++#include "ra.h"
++#include "perproc.h"
++
++#if defined(__cplusplus)
++extern "C"{
++#endif
++
++typedef struct _BM_HEAP_ BM_HEAP;
++
++struct _BM_MAPPING_
++{
++ enum
++ {
++ hm_wrapped = 1,
++ hm_wrapped_scatter,
++ hm_wrapped_virtaddr,
++ hm_wrapped_scatter_virtaddr,
++ hm_env,
++ hm_contiguous
++ } eCpuMemoryOrigin;
++
++ BM_HEAP *pBMHeap;
++ RA_ARENA *pArena;
++
++ IMG_CPU_VIRTADDR CpuVAddr;
++ IMG_CPU_PHYADDR CpuPAddr;
++ IMG_DEV_VIRTADDR DevVAddr;
++ IMG_SYS_PHYADDR *psSysAddr;
++ u32 uSize;
++ void * hOSMemHandle;
++ u32 ui32Flags;
++};
++
++typedef struct _BM_BUF_
++{
++ IMG_CPU_VIRTADDR *CpuVAddr;
++ void *hOSMemHandle;
++ IMG_CPU_PHYADDR CpuPAddr;
++ IMG_DEV_VIRTADDR DevVAddr;
++
++ BM_MAPPING *pMapping;
++ u32 ui32RefCount;
++} BM_BUF;
++
++struct _BM_HEAP_
++{
++ u32 ui32Attribs;
++ BM_CONTEXT *pBMContext;
++ RA_ARENA *pImportArena;
++ RA_ARENA *pLocalDevMemArena;
++ RA_ARENA *pVMArena;
++ DEV_ARENA_DESCRIPTOR sDevArena;
++ MMU_HEAP *pMMUHeap;
++
++ struct _BM_HEAP_ *psNext;
++ struct _BM_HEAP_ **ppsThis;
++};
++
++struct _BM_CONTEXT_
++{
++ MMU_CONTEXT *psMMUContext;
++
++
++ BM_HEAP *psBMHeap;
++
++
++ BM_HEAP *psBMSharedHeap;
++
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++
++
++ HASH_TABLE *pBufferHash;
++
++
++ void * hResItem;
++
++ u32 ui32RefCount;
++
++
++
++ struct _BM_CONTEXT_ *psNext;
++ struct _BM_CONTEXT_ **ppsThis;
++};
++
++
++
++typedef void *BM_HANDLE;
++
++#define BP_POOL_MASK 0x7
++
++#define BP_CONTIGUOUS (1 << 3)
++#define BP_PARAMBUFFER (1 << 4)
++
++#define BM_MAX_DEVMEM_ARENAS 2
++
++void *
++BM_CreateContext(PVRSRV_DEVICE_NODE *psDeviceNode,
++ IMG_DEV_PHYADDR *psPDDevPAddr,
++ PVRSRV_PER_PROCESS_DATA *psPerProc,
++ int *pbCreated);
++
++
++PVRSRV_ERROR
++BM_DestroyContext (void * hBMContext,
++ int *pbCreated);
++
++
++void *
++BM_CreateHeap (void * hBMContext,
++ DEVICE_MEMORY_HEAP_INFO *psDevMemHeapInfo);
++
++void
++BM_DestroyHeap (void * hDevMemHeap);
++
++
++int
++BM_Reinitialise (PVRSRV_DEVICE_NODE *psDeviceNode);
++
++int
++BM_Alloc (void * hDevMemHeap,
++ IMG_DEV_VIRTADDR *psDevVAddr,
++ u32 uSize,
++ u32 *pui32Flags,
++ u32 uDevVAddrAlignment,
++ BM_HANDLE *phBuf);
++
++int
++BM_Wrap ( void * hDevMemHeap,
++ u32 ui32Size,
++ u32 ui32Offset,
++ int bPhysContig,
++ IMG_SYS_PHYADDR *psSysAddr,
++ void *pvCPUVAddr,
++ u32 *pui32Flags,
++ BM_HANDLE *phBuf);
++
++void
++BM_Free (BM_HANDLE hBuf,
++ u32 ui32Flags);
++
++
++IMG_CPU_VIRTADDR
++BM_HandleToCpuVaddr (BM_HANDLE hBuf);
++
++IMG_DEV_VIRTADDR
++BM_HandleToDevVaddr (BM_HANDLE hBuf);
++
++IMG_SYS_PHYADDR
++BM_HandleToSysPaddr (BM_HANDLE hBuf);
++
++void *
++BM_HandleToOSMemHandle (BM_HANDLE hBuf);
++
++int
++BM_ContiguousStatistics (u32 uFlags,
++ u32 *pTotalBytes,
++ u32 *pAvailableBytes);
++
++
++void BM_GetPhysPageAddr(PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++ IMG_DEV_VIRTADDR sDevVPageAddr,
++ IMG_DEV_PHYADDR *psDevPAddr);
++
++PVRSRV_ERROR BM_GetHeapInfo(void * hDevMemHeap,
++ PVRSRV_HEAP_INFO *psHeapInfo);
++
++MMU_CONTEXT* BM_GetMMUContext(void * hDevMemHeap);
++
++MMU_CONTEXT* BM_GetMMUContextFromMemContext(void * hDevMemContext);
++
++void * BM_GetMMUHeap(void * hDevMemHeap);
++
++PVRSRV_DEVICE_NODE* BM_GetDeviceNode(void * hDevMemContext);
++
++
++void * BM_GetMappingHandle(PVRSRV_KERNEL_MEM_INFO *psMemInfo);
++
++#if defined(__cplusplus)
++}
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/include/dbgdrvif.h
+@@ -0,0 +1,267 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _DBGDRVIF_
++#define _DBGDRVIF_
++
++
++#include "ioctldef.h"
++
++#define DEBUG_CAPMODE_FRAMED 0x00000001UL
++#define DEBUG_CAPMODE_CONTINUOUS 0x00000002UL
++#define DEBUG_CAPMODE_HOTKEY 0x00000004UL
++
++#define DEBUG_OUTMODE_STANDARDDBG 0x00000001UL
++#define DEBUG_OUTMODE_MONO 0x00000002UL
++#define DEBUG_OUTMODE_STREAMENABLE 0x00000004UL
++#define DEBUG_OUTMODE_ASYNC 0x00000008UL
++#define DEBUG_OUTMODE_SGXVGA 0x00000010UL
++
++#define DEBUG_FLAGS_USE_NONPAGED_MEM 0x00000001UL
++#define DEBUG_FLAGS_NO_BUF_EXPANDSION 0x00000002UL
++#define DEBUG_FLAGS_ENABLESAMPLE 0x00000004UL
++
++#define DEBUG_FLAGS_TEXTSTREAM 0x80000000UL
++
++#define DEBUG_LEVEL_0 0x00000001UL
++#define DEBUG_LEVEL_1 0x00000003UL
++#define DEBUG_LEVEL_2 0x00000007UL
++#define DEBUG_LEVEL_3 0x0000000FUL
++#define DEBUG_LEVEL_4 0x0000001FUL
++#define DEBUG_LEVEL_5 0x0000003FUL
++#define DEBUG_LEVEL_6 0x0000007FUL
++#define DEBUG_LEVEL_7 0x000000FFUL
++#define DEBUG_LEVEL_8 0x000001FFUL
++#define DEBUG_LEVEL_9 0x000003FFUL
++#define DEBUG_LEVEL_10 0x000007FFUL
++#define DEBUG_LEVEL_11 0x00000FFFUL
++
++#define DEBUG_LEVEL_SEL0 0x00000001UL
++#define DEBUG_LEVEL_SEL1 0x00000002UL
++#define DEBUG_LEVEL_SEL2 0x00000004UL
++#define DEBUG_LEVEL_SEL3 0x00000008UL
++#define DEBUG_LEVEL_SEL4 0x00000010UL
++#define DEBUG_LEVEL_SEL5 0x00000020UL
++#define DEBUG_LEVEL_SEL6 0x00000040UL
++#define DEBUG_LEVEL_SEL7 0x00000080UL
++#define DEBUG_LEVEL_SEL8 0x00000100UL
++#define DEBUG_LEVEL_SEL9 0x00000200UL
++#define DEBUG_LEVEL_SEL10 0x00000400UL
++#define DEBUG_LEVEL_SEL11 0x00000800UL
++
++#define DEBUG_SERVICE_IOCTL_BASE 0x800UL
++#define DEBUG_SERVICE_CREATESTREAM CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x01, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_DESTROYSTREAM CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x02, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_GETSTREAM CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x03, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_WRITESTRING CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x04, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_READSTRING CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x05, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_WRITE CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x06, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_READ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x07, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_SETDEBUGMODE CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x08, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_SETDEBUGOUTMODE CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x09, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_SETDEBUGLEVEL CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0A, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_SETFRAME CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0B, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_GETFRAME CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0C, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_OVERRIDEMODE CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0D, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_DEFAULTMODE CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0E, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_GETSERVICETABLE CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0F, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_WRITE2 CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x10, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_WRITESTRINGCM CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x11, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_WRITECM CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x12, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_SETMARKER CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x13, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_GETMARKER CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x14, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_ISCAPTUREFRAME CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x15, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_WRITELF CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x16, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_READLF CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x17, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_WAITFOREVENT CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x18, METHOD_BUFFERED, FILE_ANY_ACCESS)
++
++
++typedef enum _DBG_EVENT_
++{
++ DBG_EVENT_STREAM_DATA = 1
++} DBG_EVENT;
++
++typedef struct _DBG_IN_CREATESTREAM_
++{
++ u32 ui32Pages;
++ u32 ui32CapMode;
++ u32 ui32OutMode;
++ char *pszName;
++}DBG_IN_CREATESTREAM, *PDBG_IN_CREATESTREAM;
++
++typedef struct _DBG_IN_FINDSTREAM_
++{
++ int bResetStream;
++ char *pszName;
++}DBG_IN_FINDSTREAM, *PDBG_IN_FINDSTREAM;
++
++typedef struct _DBG_IN_WRITESTRING_
++{
++ void *pvStream;
++ u32 ui32Level;
++ char *pszString;
++}DBG_IN_WRITESTRING, *PDBG_IN_WRITESTRING;
++
++typedef struct _DBG_IN_READSTRING_
++{
++ void *pvStream;
++ u32 ui32StringLen;
++ char *pszString;
++} DBG_IN_READSTRING, *PDBG_IN_READSTRING;
++
++typedef struct _DBG_IN_SETDEBUGMODE_
++{
++ void *pvStream;
++ u32 ui32Mode;
++ u32 ui32Start;
++ u32 ui32End;
++ u32 ui32SampleRate;
++} DBG_IN_SETDEBUGMODE, *PDBG_IN_SETDEBUGMODE;
++
++typedef struct _DBG_IN_SETDEBUGOUTMODE_
++{
++ void *pvStream;
++ u32 ui32Mode;
++} DBG_IN_SETDEBUGOUTMODE, *PDBG_IN_SETDEBUGOUTMODE;
++
++typedef struct _DBG_IN_SETDEBUGLEVEL_
++{
++ void *pvStream;
++ u32 ui32Level;
++} DBG_IN_SETDEBUGLEVEL, *PDBG_IN_SETDEBUGLEVEL;
++
++typedef struct _DBG_IN_SETFRAME_
++{
++ void *pvStream;
++ u32 ui32Frame;
++} DBG_IN_SETFRAME, *PDBG_IN_SETFRAME;
++
++typedef struct _DBG_IN_WRITE_
++{
++ void *pvStream;
++ u32 ui32Level;
++ u32 ui32TransferSize;
++ u8 *pui8InBuffer;
++} DBG_IN_WRITE, *PDBG_IN_WRITE;
++
++typedef struct _DBG_IN_READ_
++{
++ void *pvStream;
++ int bReadInitBuffer;
++ u32 ui32OutBufferSize;
++ u8 *pui8OutBuffer;
++} DBG_IN_READ, *PDBG_IN_READ;
++
++typedef struct _DBG_IN_OVERRIDEMODE_
++{
++ void *pvStream;
++ u32 ui32Mode;
++} DBG_IN_OVERRIDEMODE, *PDBG_IN_OVERRIDEMODE;
++
++typedef struct _DBG_IN_ISCAPTUREFRAME_
++{
++ void *pvStream;
++ int bCheckPreviousFrame;
++} DBG_IN_ISCAPTUREFRAME, *PDBG_IN_ISCAPTUREFRAME;
++
++typedef struct _DBG_IN_SETMARKER_
++{
++ void *pvStream;
++ u32 ui32Marker;
++} DBG_IN_SETMARKER, *PDBG_IN_SETMARKER;
++
++typedef struct _DBG_IN_WRITE_LF_
++{
++ u32 ui32Flags;
++ void *pvStream;
++ u32 ui32Level;
++ u32 ui32BufferSize;
++ u8 *pui8InBuffer;
++} DBG_IN_WRITE_LF, *PDBG_IN_WRITE_LF;
++
++#define WRITELF_FLAGS_RESETBUF 0x00000001UL
++
++typedef struct _DBG_STREAM_
++{
++ struct _DBG_STREAM_ *psNext;
++ struct _DBG_STREAM_ *psInitStream;
++ int bInitPhaseComplete;
++ u32 ui32Flags;
++ u32 ui32Base;
++ u32 ui32Size;
++ u32 ui32RPtr;
++ u32 ui32WPtr;
++ u32 ui32DataWritten;
++ u32 ui32CapMode;
++ u32 ui32OutMode;
++ u32 ui32DebugLevel;
++ u32 ui32DefaultMode;
++ u32 ui32Start;
++ u32 ui32End;
++ u32 ui32Current;
++ u32 ui32Access;
++ u32 ui32SampleRate;
++ u32 ui32Reserved;
++ u32 ui32Timeout;
++ u32 ui32Marker;
++ char szName[30];
++} DBG_STREAM,*PDBG_STREAM;
++
++typedef struct _DBGKM_SERVICE_TABLE_
++{
++ u32 ui32Size;
++ void * ( *pfnCreateStream) (char * pszName,u32 ui32CapMode,u32 ui32OutMode,u32 ui32Flags,u32 ui32Pages);
++ void ( *pfnDestroyStream) (PDBG_STREAM psStream);
++ void * ( *pfnFindStream) (char * pszName, int bResetInitBuffer);
++ u32 ( *pfnWriteString) (PDBG_STREAM psStream,char * pszString,u32 ui32Level);
++ u32 ( *pfnReadString) (PDBG_STREAM psStream,char * pszString,u32 ui32Limit);
++ u32 ( *pfnWriteBIN) (PDBG_STREAM psStream,u8 *pui8InBuf,u32 ui32InBuffSize,u32 ui32Level);
++ u32 ( *pfnReadBIN) (PDBG_STREAM psStream,int bReadInitBuffer, u32 ui32OutBufferSize,u8 *pui8OutBuf);
++ void ( *pfnSetCaptureMode) (PDBG_STREAM psStream,u32 ui32CapMode,u32 ui32Start,u32 ui32Stop,u32 ui32SampleRate);
++ void ( *pfnSetOutputMode) (PDBG_STREAM psStream,u32 ui32OutMode);
++ void ( *pfnSetDebugLevel) (PDBG_STREAM psStream,u32 ui32DebugLevel);
++ void ( *pfnSetFrame) (PDBG_STREAM psStream,u32 ui32Frame);
++ u32 ( *pfnGetFrame) (PDBG_STREAM psStream);
++ void ( *pfnOverrideMode) (PDBG_STREAM psStream,u32 ui32Mode);
++ void ( *pfnDefaultMode) (PDBG_STREAM psStream);
++ u32 ( *pfnDBGDrivWrite2) (PDBG_STREAM psStream,u8 *pui8InBuf,u32 ui32InBuffSize,u32 ui32Level);
++ u32 ( *pfnWriteStringCM) (PDBG_STREAM psStream,char * pszString,u32 ui32Level);
++ u32 ( *pfnWriteBINCM) (PDBG_STREAM psStream,u8 *pui8InBuf,u32 ui32InBuffSize,u32 ui32Level);
++ void ( *pfnSetMarker) (PDBG_STREAM psStream,u32 ui32Marker);
++ u32 ( *pfnGetMarker) (PDBG_STREAM psStream);
++ void ( *pfnStartInitPhase) (PDBG_STREAM psStream);
++ void ( *pfnStopInitPhase) (PDBG_STREAM psStream);
++ int ( *pfnIsCaptureFrame) (PDBG_STREAM psStream, int bCheckPreviousFrame);
++ u32 ( *pfnWriteLF) (PDBG_STREAM psStream, u8 *pui8InBuf, u32 ui32InBuffSize, u32 ui32Level, u32 ui32Flags);
++ u32 ( *pfnReadLF) (PDBG_STREAM psStream, u32 ui32OutBuffSize, u8 *pui8OutBuf);
++ u32 ( *pfnGetStreamOffset) (PDBG_STREAM psStream);
++ void ( *pfnSetStreamOffset) (PDBG_STREAM psStream, u32 ui32StreamOffset);
++ int ( *pfnIsLastCaptureFrame) (PDBG_STREAM psStream);
++ void ( *pfnWaitForEvent) (DBG_EVENT eEvent);
++} DBGKM_SERVICE_TABLE, *PDBGKM_SERVICE_TABLE;
++
++
++#endif
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/include/device.h
+@@ -0,0 +1,271 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __DEVICE_H__
++#define __DEVICE_H__
++
++#include "ra.h"
++#include "resman.h"
++
++typedef struct _BM_CONTEXT_ BM_CONTEXT;
++
++typedef struct _MMU_HEAP_ MMU_HEAP;
++typedef struct _MMU_CONTEXT_ MMU_CONTEXT;
++
++#define PVRSRV_BACKINGSTORE_SYSMEM_CONTIG (1<<(PVRSRV_MEM_BACKINGSTORE_FIELD_SHIFT+0))
++#define PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG (1<<(PVRSRV_MEM_BACKINGSTORE_FIELD_SHIFT+1))
++#define PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG (1<<(PVRSRV_MEM_BACKINGSTORE_FIELD_SHIFT+2))
++#define PVRSRV_BACKINGSTORE_LOCALMEM_NONCONTIG (1<<(PVRSRV_MEM_BACKINGSTORE_FIELD_SHIFT+3))
++
++typedef u32 DEVICE_MEMORY_HEAP_TYPE;
++#define DEVICE_MEMORY_HEAP_PERCONTEXT 0
++#define DEVICE_MEMORY_HEAP_KERNEL 1
++#define DEVICE_MEMORY_HEAP_SHARED 2
++#define DEVICE_MEMORY_HEAP_SHARED_EXPORTED 3
++
++#define PVRSRV_DEVICE_NODE_FLAGS_PORT80DISPLAY 1
++#define PVRSRV_DEVICE_NODE_FLAGS_MMU_OPT_INV 2
++
++typedef struct _DEVICE_MEMORY_HEAP_INFO_
++{
++
++ u32 ui32HeapID;
++
++
++ char *pszName;
++
++
++ char *pszBSName;
++
++
++ IMG_DEV_VIRTADDR sDevVAddrBase;
++
++
++ u32 ui32HeapSize;
++
++
++ u32 ui32Attribs;
++
++
++ DEVICE_MEMORY_HEAP_TYPE DevMemHeapType;
++
++
++ void * hDevMemHeap;
++
++
++ RA_ARENA *psLocalDevMemArena;
++
++
++ u32 ui32DataPageSize;
++
++} DEVICE_MEMORY_HEAP_INFO;
++
++typedef struct _DEVICE_MEMORY_INFO_
++{
++
++ u32 ui32AddressSpaceSizeLog2;
++
++
++
++
++ u32 ui32Flags;
++
++
++ u32 ui32HeapCount;
++
++
++ u32 ui32SyncHeapID;
++
++
++ u32 ui32MappingHeapID;
++
++
++ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++
++
++ BM_CONTEXT *pBMKernelContext;
++
++
++ BM_CONTEXT *pBMContext;
++
++} DEVICE_MEMORY_INFO;
++
++
++typedef struct DEV_ARENA_DESCRIPTOR_TAG
++{
++ u32 ui32HeapID;
++
++ char *pszName;
++
++ IMG_DEV_VIRTADDR BaseDevVAddr;
++
++ u32 ui32Size;
++
++ DEVICE_MEMORY_HEAP_TYPE DevMemHeapType;
++
++
++ u32 ui32DataPageSize;
++
++ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeapInfo;
++
++} DEV_ARENA_DESCRIPTOR;
++
++typedef struct _SYS_DATA_TAG_ *PSYS_DATA;
++
++typedef struct _PVRSRV_DEVICE_NODE_
++{
++ PVRSRV_DEVICE_IDENTIFIER sDevId;
++ u32 ui32RefCount;
++
++
++
++
++ PVRSRV_ERROR (*pfnInitDevice) (void*);
++
++ PVRSRV_ERROR (*pfnDeInitDevice) (void*);
++
++
++ PVRSRV_ERROR (*pfnInitDeviceCompatCheck) (struct _PVRSRV_DEVICE_NODE_*);
++
++
++ PVRSRV_ERROR (*pfnMMUInitialise)(struct _PVRSRV_DEVICE_NODE_*, MMU_CONTEXT**, IMG_DEV_PHYADDR*);
++ void (*pfnMMUFinalise)(MMU_CONTEXT*);
++ void (*pfnMMUInsertHeap)(MMU_CONTEXT*, MMU_HEAP*);
++ MMU_HEAP* (*pfnMMUCreate)(MMU_CONTEXT*,DEV_ARENA_DESCRIPTOR*,RA_ARENA**);
++ void (*pfnMMUDelete)(MMU_HEAP*);
++ int (*pfnMMUAlloc)(MMU_HEAP*pMMU,
++ u32 uSize,
++ u32 *pActualSize,
++ u32 uFlags,
++ u32 uDevVAddrAlignment,
++ IMG_DEV_VIRTADDR *pDevVAddr);
++ void (*pfnMMUFree)(MMU_HEAP*,IMG_DEV_VIRTADDR,u32);
++ void (*pfnMMUEnable)(MMU_HEAP*);
++ void (*pfnMMUDisable)(MMU_HEAP*);
++ void (*pfnMMUMapPages)(MMU_HEAP *pMMU,
++ IMG_DEV_VIRTADDR devVAddr,
++ IMG_SYS_PHYADDR SysPAddr,
++ u32 uSize,
++ u32 ui32MemFlags,
++ void * hUniqueTag);
++ void (*pfnMMUMapShadow)(MMU_HEAP *pMMU,
++ IMG_DEV_VIRTADDR MapBaseDevVAddr,
++ u32 uSize,
++ IMG_CPU_VIRTADDR CpuVAddr,
++ void * hOSMemHandle,
++ IMG_DEV_VIRTADDR *pDevVAddr,
++ u32 ui32MemFlags,
++ void * hUniqueTag);
++ void (*pfnMMUUnmapPages)(MMU_HEAP *pMMU,
++ IMG_DEV_VIRTADDR dev_vaddr,
++ u32 ui32PageCount,
++ void * hUniqueTag);
++
++ void (*pfnMMUMapScatter)(MMU_HEAP *pMMU,
++ IMG_DEV_VIRTADDR DevVAddr,
++ IMG_SYS_PHYADDR *psSysAddr,
++ u32 uSize,
++ u32 ui32MemFlags,
++ void * hUniqueTag);
++
++ IMG_DEV_PHYADDR (*pfnMMUGetPhysPageAddr)(MMU_HEAP *pMMUHeap, IMG_DEV_VIRTADDR sDevVPageAddr);
++ IMG_DEV_PHYADDR (*pfnMMUGetPDDevPAddr)(MMU_CONTEXT *pMMUContext);
++
++
++ int (*pfnDeviceISR)(void*);
++
++ void *pvISRData;
++
++ u32 ui32SOCInterruptBit;
++
++ void (*pfnDeviceMISR)(void*);
++
++
++ void (*pfnDeviceCommandComplete)(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
++
++ int bReProcessDeviceCommandComplete;
++
++
++ DEVICE_MEMORY_INFO sDevMemoryInfo;
++
++
++ void *pvDevice;
++ u32 ui32pvDeviceSize;
++
++
++ PRESMAN_CONTEXT hResManContext;
++
++
++ PSYS_DATA psSysData;
++
++
++ RA_ARENA *psLocalDevMemArena;
++
++ u32 ui32Flags;
++
++ struct _PVRSRV_DEVICE_NODE_ *psNext;
++ struct _PVRSRV_DEVICE_NODE_ **ppsThis;
++} PVRSRV_DEVICE_NODE;
++
++PVRSRV_ERROR PVRSRVRegisterDevice(PSYS_DATA psSysData,
++ PVRSRV_ERROR (*pfnRegisterDevice)(PVRSRV_DEVICE_NODE*),
++ u32 ui32SOCInterruptBit,
++ u32 *pui32DeviceIndex );
++
++PVRSRV_ERROR PVRSRVInitialiseDevice(u32 ui32DevIndex);
++PVRSRV_ERROR PVRSRVFinaliseSystem(int bInitSuccesful);
++
++PVRSRV_ERROR PVRSRVDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode);
++
++PVRSRV_ERROR PVRSRVDeinitialiseDevice(u32 ui32DevIndex);
++
++#if !defined(USE_CODE)
++
++PVRSRV_ERROR PollForValueKM(volatile u32* pui32LinMemAddr,
++ u32 ui32Value,
++ u32 ui32Mask,
++ u32 ui32Waitus,
++ u32 ui32Tries);
++
++#endif
++
++
++#if defined (USING_ISR_INTERRUPTS)
++PVRSRV_ERROR PollForInterruptKM(u32 ui32Value,
++ u32 ui32Mask,
++ u32 ui32Waitus,
++ u32 ui32Tries);
++#endif
++
++PVRSRV_ERROR PVRSRVInit(PSYS_DATA psSysData);
++void PVRSRVDeInit(PSYS_DATA psSysData);
++int PVRSRVDeviceLISR(PVRSRV_DEVICE_NODE *psDeviceNode);
++int PVRSRVSystemLISR(void *pvSysData);
++void PVRSRVMISR(void *pvSysData);
++
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/include/env/linux/pvr_drm_shared.h
+@@ -0,0 +1,63 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__PVR_DRM_SHARED_H__)
++#define __PVR_DRM_SHARED_H__
++
++#if defined(SUPPORT_DRI_DRM)
++
++#if defined(SUPPORT_DRI_DRM_EXT)
++#define PVR_DRM_SRVKM_CMD DRM_PVR_RESERVED1
++#define PVR_DRM_DISP_CMD DRM_PVR_RESERVED2
++#define PVR_DRM_BC_CMD DRM_PVR_RESERVED3
++#define PVR_DRM_IS_MASTER_CMD DRM_PVR_RESERVED4
++#define PVR_DRM_UNPRIV_CMD DRM_PVR_RESERVED5
++#define PVR_DRM_DBGDRV_CMD DRM_PVR_RESERVED6
++#else
++#define PVR_DRM_SRVKM_CMD 0
++#define PVR_DRM_DISP_CMD 1
++#define PVR_DRM_BC_CMD 2
++#define PVR_DRM_IS_MASTER_CMD 3
++#define PVR_DRM_UNPRIV_CMD 4
++#define PVR_DRM_DBGDRV_CMD 5
++#endif
++
++#define PVR_DRM_UNPRIV_INIT_SUCCESFUL 0
++#define PVR_DRM_UNPRIV_BUSID_TYPE 1
++#define PVR_DRM_UNPRIV_BUSID_FIELD 2
++
++#define PVR_DRM_BUS_TYPE_PCI 0
++
++#define PVR_DRM_PCI_DOMAIN 0
++#define PVR_DRM_PCI_BUS 1
++#define PVR_DRM_PCI_DEV 2
++#define PVR_DRM_PCI_FUNC 3
++
++#endif
++
++#endif
++
++
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/include/handle.h
+@@ -0,0 +1,272 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __HANDLE_H__
++#define __HANDLE_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#include "img_types.h"
++#include "hash.h"
++#include "resman.h"
++
++typedef enum
++{
++ PVRSRV_HANDLE_TYPE_NONE = 0,
++ PVRSRV_HANDLE_TYPE_PERPROC_DATA,
++ PVRSRV_HANDLE_TYPE_DEV_NODE,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP,
++ PVRSRV_HANDLE_TYPE_MEM_INFO,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO,
++ PVRSRV_HANDLE_TYPE_DISP_INFO,
++ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN,
++ PVRSRV_HANDLE_TYPE_BUF_INFO,
++ PVRSRV_HANDLE_TYPE_DISP_BUFFER,
++ PVRSRV_HANDLE_TYPE_BUF_BUFFER,
++ PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT,
++ PVRSRV_HANDLE_TYPE_SGX_HW_TRANSFER_CONTEXT,
++ PVRSRV_HANDLE_TYPE_SGX_HW_2D_CONTEXT,
++ PVRSRV_HANDLE_TYPE_SHARED_PB_DESC,
++ PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
++ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO,
++ PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT,
++ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT,
++ PVRSRV_HANDLE_TYPE_MMAP_INFO,
++ PVRSRV_HANDLE_TYPE_SOC_TIMER
++} PVRSRV_HANDLE_TYPE;
++
++typedef enum
++{
++
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE = 0,
++
++ PVRSRV_HANDLE_ALLOC_FLAG_SHARED = 0x01,
++
++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI = 0x02,
++
++ PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE = 0x04
++} PVRSRV_HANDLE_ALLOC_FLAG;
++
++struct _PVRSRV_HANDLE_BASE_;
++typedef struct _PVRSRV_HANDLE_BASE_ PVRSRV_HANDLE_BASE;
++
++#ifdef PVR_SECURE_HANDLES
++extern PVRSRV_HANDLE_BASE *gpsKernelHandleBase;
++
++#define KERNEL_HANDLE_BASE (gpsKernelHandleBase)
++
++PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase, void * *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag);
++
++PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase, void * *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, void * hParent);
++
++PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase, void * *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType);
++
++PVRSRV_ERROR PVRSRVLookupHandleAnyType(PVRSRV_HANDLE_BASE *psBase, void * *ppvData, PVRSRV_HANDLE_TYPE *peType, void * hHandle);
++
++PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase, void * *ppvData, void * hHandle, PVRSRV_HANDLE_TYPE eType);
++
++PVRSRV_ERROR PVRSRVLookupSubHandle(PVRSRV_HANDLE_BASE *psBase, void * *ppvData, void * hHandle, PVRSRV_HANDLE_TYPE eType, void * hAncestor);
++
++PVRSRV_ERROR PVRSRVGetParentHandle(PVRSRV_HANDLE_BASE *psBase, void * *phParent, void * hHandle, PVRSRV_HANDLE_TYPE eType);
++
++PVRSRV_ERROR PVRSRVLookupAndReleaseHandle(PVRSRV_HANDLE_BASE *psBase, void * *ppvData, void * hHandle, PVRSRV_HANDLE_TYPE eType);
++
++PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase, void * hHandle, PVRSRV_HANDLE_TYPE eType);
++
++PVRSRV_ERROR PVRSRVNewHandleBatch(PVRSRV_HANDLE_BASE *psBase, u32 ui32BatchSize);
++
++PVRSRV_ERROR PVRSRVCommitHandleBatch(PVRSRV_HANDLE_BASE *psBase);
++
++void PVRSRVReleaseHandleBatch(PVRSRV_HANDLE_BASE *psBase);
++
++PVRSRV_ERROR PVRSRVSetMaxHandle(PVRSRV_HANDLE_BASE *psBase, u32 ui32MaxHandle);
++
++u32 PVRSRVGetMaxHandle(PVRSRV_HANDLE_BASE *psBase);
++
++PVRSRV_ERROR PVRSRVEnableHandlePurging(PVRSRV_HANDLE_BASE *psBase);
++
++PVRSRV_ERROR PVRSRVPurgeHandles(PVRSRV_HANDLE_BASE *psBase);
++
++PVRSRV_ERROR PVRSRVAllocHandleBase(PVRSRV_HANDLE_BASE **ppsBase);
++
++PVRSRV_ERROR PVRSRVFreeHandleBase(PVRSRV_HANDLE_BASE *psBase);
++
++PVRSRV_ERROR PVRSRVHandleInit(void);
++
++PVRSRV_ERROR PVRSRVHandleDeInit(void);
++
++#else
++
++#define KERNEL_HANDLE_BASE NULL
++
++static
++PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase, void * *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag)
++{
++ *phHandle = pvData;
++ return PVRSRV_OK;
++}
++
++static
++PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase, void * *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, void * hParent)
++{
++ *phHandle = pvData;
++ return PVRSRV_OK;
++}
++
++static
++PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase, void * *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType)
++{
++ *phHandle = pvData;
++ return PVRSRV_OK;
++}
++
++static
++PVRSRV_ERROR PVRSRVLookupHandleAnyType(PVRSRV_HANDLE_BASE *psBase, void * *ppvData, PVRSRV_HANDLE_TYPE *peType, void * hHandle)
++{
++ *peType = PVRSRV_HANDLE_TYPE_NONE;
++
++ *ppvData = hHandle;
++ return PVRSRV_OK;
++}
++
++static
++PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase, void * *ppvData, void * hHandle, PVRSRV_HANDLE_TYPE eType)
++{
++ *ppvData = hHandle;
++ return PVRSRV_OK;
++}
++
++static
++PVRSRV_ERROR PVRSRVLookupSubHandle(PVRSRV_HANDLE_BASE *psBase, void * *ppvData, void * hHandle, PVRSRV_HANDLE_TYPE eType, void * hAncestor)
++{
++
++ *ppvData = hHandle;
++ return PVRSRV_OK;
++}
++
++static
++PVRSRV_ERROR PVRSRVGetParentHandle(PVRSRV_HANDLE_BASE *psBase, void * *phParent, void * hHandle, PVRSRV_HANDLE_TYPE eType)
++{
++ *phParent = NULL;
++
++ return PVRSRV_OK;
++}
++
++static
++PVRSRV_ERROR PVRSRVLookupAndReleaseHandle(PVRSRV_HANDLE_BASE *psBase, void * *ppvData, void * hHandle, PVRSRV_HANDLE_TYPE eType)
++{
++ *ppvData = hHandle;
++ return PVRSRV_OK;
++}
++
++static
++PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase, void * hHandle, PVRSRV_HANDLE_TYPE eType)
++{
++ return PVRSRV_OK;
++}
++
++static
++PVRSRV_ERROR PVRSRVNewHandleBatch(PVRSRV_HANDLE_BASE *psBase, u32 ui32BatchSize)
++{
++ return PVRSRV_OK;
++}
++
++static
++PVRSRV_ERROR PVRSRVCommitHandleBatch(PVRSRV_HANDLE_BASE *psBase)
++{
++ return PVRSRV_OK;
++}
++
++static
++void PVRSRVReleaseHandleBatch(PVRSRV_HANDLE_BASE *psBase)
++{
++}
++
++static
++PVRSRV_ERROR PVRSRVSetMaxHandle(PVRSRV_HANDLE_BASE *psBase, u32 ui32MaxHandle)
++{
++
++ return PVRSRV_ERROR_NOT_SUPPORTED;
++}
++
++static
++u32 PVRSRVGetMaxHandle(PVRSRV_HANDLE_BASE *psBase)
++{
++ return 0;
++}
++
++static
++PVRSRV_ERROR PVRSRVEnableHandlePurging(PVRSRV_HANDLE_BASE *psBase)
++{
++ return PVRSRV_OK;
++}
++
++static
++PVRSRV_ERROR PVRSRVPurgeHandles(PVRSRV_HANDLE_BASE *psBase)
++{
++ return PVRSRV_OK;
++}
++
++static
++PVRSRV_ERROR PVRSRVAllocHandleBase(PVRSRV_HANDLE_BASE **ppsBase)
++{
++ *ppsBase = NULL;
++
++ return PVRSRV_OK;
++}
++
++static
++PVRSRV_ERROR PVRSRVFreeHandleBase(PVRSRV_HANDLE_BASE *psBase)
++{
++ return PVRSRV_OK;
++}
++
++static
++PVRSRV_ERROR PVRSRVHandleInit(void)
++{
++ return PVRSRV_OK;
++}
++
++static
++PVRSRV_ERROR PVRSRVHandleDeInit(void)
++{
++ return PVRSRV_OK;
++}
++
++#endif
++
++#define PVRSRVAllocHandleNR(psBase, phHandle, pvData, eType, eFlag) \
++ (void)PVRSRVAllocHandle(psBase, phHandle, pvData, eType, eFlag)
++
++#define PVRSRVAllocSubHandleNR(psBase, phHandle, pvData, eType, eFlag, hParent) \
++ (void)PVRSRVAllocSubHandle(psBase, phHandle, pvData, eType, eFlag, hParent)
++
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/include/hash.h
+@@ -0,0 +1,73 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _HASH_H_
++#define _HASH_H_
++
++#include "img_types.h"
++#include "osfunc.h"
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++typedef u32 HASH_FUNC(u32 uKeySize, void *pKey, u32 uHashTabLen);
++typedef int HASH_KEY_COMP(u32 uKeySize, void *pKey1, void *pKey2);
++
++typedef struct _HASH_TABLE_ HASH_TABLE;
++
++u32 HASH_Func_Default (u32 uKeySize, void *pKey, u32 uHashTabLen);
++
++int HASH_Key_Comp_Default (u32 uKeySize, void *pKey1, void *pKey2);
++
++HASH_TABLE * HASH_Create_Extended (u32 uInitialLen, u32 uKeySize, HASH_FUNC *pfnHashFunc, HASH_KEY_COMP *pfnKeyComp);
++
++HASH_TABLE * HASH_Create (u32 uInitialLen);
++
++void HASH_Delete (HASH_TABLE *pHash);
++
++int HASH_Insert_Extended (HASH_TABLE *pHash, void *pKey, u32 v);
++
++int HASH_Insert (HASH_TABLE *pHash, u32 k, u32 v);
++
++u32 HASH_Remove_Extended(HASH_TABLE *pHash, void *pKey);
++
++u32 HASH_Remove (HASH_TABLE *pHash, u32 k);
++
++u32 HASH_Retrieve_Extended (HASH_TABLE *pHash, void *pKey);
++
++u32 HASH_Retrieve (HASH_TABLE *pHash, u32 k);
++
++#ifdef HASH_TRACE
++void HASH_Dump (HASH_TABLE *pHash);
++#endif
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/include/img_types.h
+@@ -0,0 +1,78 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __IMG_TYPES_H__
++#define __IMG_TYPES_H__
++
++#include <linux/types.h>
++#if !defined(IMG_ADDRSPACE_CPUVADDR_BITS)
++#define IMG_ADDRSPACE_CPUVADDR_BITS 32
++#endif
++
++#if !defined(IMG_ADDRSPACE_PHYSADDR_BITS)
++#define IMG_ADDRSPACE_PHYSADDR_BITS 32
++#endif
++
++#if !defined(u32_MAX)
++ #define u32_MAX 0xFFFFFFFFUL
++#endif
++
++typedef void *IMG_CPU_VIRTADDR;
++
++typedef struct
++{
++
++ u32 uiAddr;
++#define IMG_CAST_TO_DEVVADDR_UINT(var) (u32)(var)
++
++} IMG_DEV_VIRTADDR;
++
++typedef struct _IMG_CPU_PHYADDR
++{
++
++ u32 uiAddr;
++} IMG_CPU_PHYADDR;
++
++typedef struct _IMG_DEV_PHYADDR
++{
++#if IMG_ADDRSPACE_PHYSADDR_BITS == 32
++
++ u32 uiAddr;
++#else
++ u32 uiAddr;
++ u32 uiHighAddr;
++#endif
++} IMG_DEV_PHYADDR;
++
++typedef struct _IMG_SYS_PHYADDR
++{
++
++ u32 uiAddr;
++} IMG_SYS_PHYADDR;
++
++
++
++#endif
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/include/ioctldef.h
+@@ -0,0 +1,98 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __IOCTLDEF_H__
++#define __IOCTLDEF_H__
++
++#define MAKEIOCTLINDEX(i) (((i) >> 2) & 0xFFF)
++
++#ifndef CTL_CODE
++
++#define DEVICE_TYPE ULONG
++
++#define FILE_DEVICE_BEEP 0x00000001
++#define FILE_DEVICE_CD_ROM 0x00000002
++#define FILE_DEVICE_CD_ROM_FILE_SYSTEM 0x00000003
++#define FILE_DEVICE_CONTROLLER 0x00000004
++#define FILE_DEVICE_DATALINK 0x00000005
++#define FILE_DEVICE_DFS 0x00000006
++#define FILE_DEVICE_DISK 0x00000007
++#define FILE_DEVICE_DISK_FILE_SYSTEM 0x00000008
++#define FILE_DEVICE_FILE_SYSTEM 0x00000009
++#define FILE_DEVICE_INPORT_PORT 0x0000000a
++#define FILE_DEVICE_KEYBOARD 0x0000000b
++#define FILE_DEVICE_MAILSLOT 0x0000000c
++#define FILE_DEVICE_MIDI_IN 0x0000000d
++#define FILE_DEVICE_MIDI_OUT 0x0000000e
++#define FILE_DEVICE_MOUSE 0x0000000f
++#define FILE_DEVICE_MULTI_UNC_PROVIDER 0x00000010
++#define FILE_DEVICE_NAMED_PIPE 0x00000011
++#define FILE_DEVICE_NETWORK 0x00000012
++#define FILE_DEVICE_NETWORK_BROWSER 0x00000013
++#define FILE_DEVICE_NETWORK_FILE_SYSTEM 0x00000014
++#define FILE_DEVICE_NULL 0x00000015
++#define FILE_DEVICE_PARALLEL_PORT 0x00000016
++#define FILE_DEVICE_PHYSICAL_NETCARD 0x00000017
++#define FILE_DEVICE_PRINTER 0x00000018
++#define FILE_DEVICE_SCANNER 0x00000019
++#define FILE_DEVICE_SERIAL_MOUSE_PORT 0x0000001a
++#define FILE_DEVICE_SERIAL_PORT 0x0000001b
++#define FILE_DEVICE_SCREEN 0x0000001c
++#define FILE_DEVICE_SOUND 0x0000001d
++#define FILE_DEVICE_STREAMS 0x0000001e
++#define FILE_DEVICE_TAPE 0x0000001f
++#define FILE_DEVICE_TAPE_FILE_SYSTEM 0x00000020
++#define FILE_DEVICE_TRANSPORT 0x00000021
++#define FILE_DEVICE_UNKNOWN 0x00000022
++#define FILE_DEVICE_VIDEO 0x00000023
++#define FILE_DEVICE_VIRTUAL_DISK 0x00000024
++#define FILE_DEVICE_WAVE_IN 0x00000025
++#define FILE_DEVICE_WAVE_OUT 0x00000026
++#define FILE_DEVICE_8042_PORT 0x00000027
++#define FILE_DEVICE_NETWORK_REDIRECTOR 0x00000028
++#define FILE_DEVICE_BATTERY 0x00000029
++#define FILE_DEVICE_BUS_EXTENDER 0x0000002a
++#define FILE_DEVICE_MODEM 0x0000002b
++#define FILE_DEVICE_VDM 0x0000002c
++#define FILE_DEVICE_MASS_STORAGE 0x0000002d
++
++#define CTL_CODE( DeviceType, Function, Method, Access ) ( \
++ ((DeviceType) << 16) | ((Access) << 14) | ((Function) << 2) | (Method) \
++)
++
++#define METHOD_BUFFERED 0
++#define METHOD_IN_DIRECT 1
++#define METHOD_OUT_DIRECT 2
++#define METHOD_NEITHER 3
++
++#define FILE_ANY_ACCESS 0
++#define FILE_READ_ACCESS ( 0x0001 )
++#define FILE_WRITE_ACCESS ( 0x0002 )
++
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/include/kernelbuffer.h
+@@ -0,0 +1,60 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined (__KERNELBUFFER_H__)
++#define __KERNELBUFFER_H__
++
++typedef PVRSRV_ERROR (*PFN_OPEN_BC_DEVICE)(void **);
++typedef PVRSRV_ERROR (*PFN_CLOSE_BC_DEVICE)(void *);
++typedef PVRSRV_ERROR (*PFN_GET_BC_INFO)(void *, BUFFER_INFO*);
++typedef PVRSRV_ERROR (*PFN_GET_BC_BUFFER)(void *, u32, PVRSRV_SYNC_DATA*, void **);
++
++typedef struct PVRSRV_BC_SRV2BUFFER_KMJTABLE_TAG
++{
++ u32 ui32TableSize;
++ PFN_OPEN_BC_DEVICE pfnOpenBCDevice;
++ PFN_CLOSE_BC_DEVICE pfnCloseBCDevice;
++ PFN_GET_BC_INFO pfnGetBCInfo;
++ PFN_GET_BC_BUFFER pfnGetBCBuffer;
++ PFN_GET_BUFFER_ADDR pfnGetBufferAddr;
++
++} PVRSRV_BC_SRV2BUFFER_KMJTABLE;
++
++
++typedef PVRSRV_ERROR (*PFN_BC_REGISTER_BUFFER_DEV)(PVRSRV_BC_SRV2BUFFER_KMJTABLE*, u32*);
++typedef PVRSRV_ERROR (*PFN_BC_REMOVE_BUFFER_DEV)(u32);
++
++typedef struct PVRSRV_BC_BUFFER2SRV_KMJTABLE_TAG
++{
++ u32 ui32TableSize;
++ PFN_BC_REGISTER_BUFFER_DEV pfnPVRSRVRegisterBCDevice;
++ PFN_BC_REMOVE_BUFFER_DEV pfnPVRSRVRemoveBCDevice;
++
++} PVRSRV_BC_BUFFER2SRV_KMJTABLE, *PPVRSRV_BC_BUFFER2SRV_KMJTABLE;
++
++typedef int (*PFN_BC_GET_PVRJTABLE) (PPVRSRV_BC_BUFFER2SRV_KMJTABLE);
++
++#endif
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/include/kerneldisplay.h
+@@ -0,0 +1,153 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined (__KERNELDISPLAY_H__)
++#define __KERNELDISPLAY_H__
++
++typedef PVRSRV_ERROR (*PFN_OPEN_DC_DEVICE)(u32, void **, PVRSRV_SYNC_DATA*);
++typedef PVRSRV_ERROR (*PFN_CLOSE_DC_DEVICE)(void *);
++typedef PVRSRV_ERROR (*PFN_ENUM_DC_FORMATS)(void *, u32*, DISPLAY_FORMAT*);
++typedef PVRSRV_ERROR (*PFN_ENUM_DC_DIMS)(void *,
++ DISPLAY_FORMAT*,
++ u32*,
++ DISPLAY_DIMS*);
++typedef PVRSRV_ERROR (*PFN_GET_DC_SYSTEMBUFFER)(void *, void **);
++typedef PVRSRV_ERROR (*PFN_GET_DC_INFO)(void *, DISPLAY_INFO*);
++typedef PVRSRV_ERROR (*PFN_CREATE_DC_SWAPCHAIN)(void *,
++ u32,
++ DISPLAY_SURF_ATTRIBUTES*,
++ DISPLAY_SURF_ATTRIBUTES*,
++ u32,
++ PVRSRV_SYNC_DATA**,
++ u32,
++ void **,
++ u32*);
++typedef PVRSRV_ERROR (*PFN_DESTROY_DC_SWAPCHAIN)(void *,
++ void *);
++typedef PVRSRV_ERROR (*PFN_SET_DC_DSTRECT)(void *, void *, IMG_RECT*);
++typedef PVRSRV_ERROR (*PFN_SET_DC_SRCRECT)(void *, void *, IMG_RECT*);
++typedef PVRSRV_ERROR (*PFN_SET_DC_DSTCK)(void *, void *, u32);
++typedef PVRSRV_ERROR (*PFN_SET_DC_SRCCK)(void *, void *, u32);
++typedef PVRSRV_ERROR (*PFN_GET_DC_BUFFERS)(void *,
++ void *,
++ u32*,
++ void **);
++typedef PVRSRV_ERROR (*PFN_SWAP_TO_DC_BUFFER)(void *,
++ void *,
++ u32,
++ void *,
++ u32,
++ IMG_RECT*);
++typedef PVRSRV_ERROR (*PFN_SWAP_TO_DC_SYSTEM)(void *, void *);
++typedef void (*PFN_SET_DC_STATE)(void *, u32);
++
++typedef struct PVRSRV_DC_SRV2DISP_KMJTABLE_TAG
++{
++ u32 ui32TableSize;
++ PFN_OPEN_DC_DEVICE pfnOpenDCDevice;
++ PFN_CLOSE_DC_DEVICE pfnCloseDCDevice;
++ PFN_ENUM_DC_FORMATS pfnEnumDCFormats;
++ PFN_ENUM_DC_DIMS pfnEnumDCDims;
++ PFN_GET_DC_SYSTEMBUFFER pfnGetDCSystemBuffer;
++ PFN_GET_DC_INFO pfnGetDCInfo;
++ PFN_GET_BUFFER_ADDR pfnGetBufferAddr;
++ PFN_CREATE_DC_SWAPCHAIN pfnCreateDCSwapChain;
++ PFN_DESTROY_DC_SWAPCHAIN pfnDestroyDCSwapChain;
++ PFN_SET_DC_DSTRECT pfnSetDCDstRect;
++ PFN_SET_DC_SRCRECT pfnSetDCSrcRect;
++ PFN_SET_DC_DSTCK pfnSetDCDstColourKey;
++ PFN_SET_DC_SRCCK pfnSetDCSrcColourKey;
++ PFN_GET_DC_BUFFERS pfnGetDCBuffers;
++ PFN_SWAP_TO_DC_BUFFER pfnSwapToDCBuffer;
++ PFN_SWAP_TO_DC_SYSTEM pfnSwapToDCSystem;
++ PFN_SET_DC_STATE pfnSetDCState;
++
++} PVRSRV_DC_SRV2DISP_KMJTABLE;
++
++typedef int (*PFN_ISR_HANDLER)(void*);
++
++typedef PVRSRV_ERROR (*PFN_DC_REGISTER_DISPLAY_DEV)(PVRSRV_DC_SRV2DISP_KMJTABLE*, u32*);
++typedef PVRSRV_ERROR (*PFN_DC_REMOVE_DISPLAY_DEV)(u32);
++typedef PVRSRV_ERROR (*PFN_DC_OEM_FUNCTION)(u32, void*, u32, void*, u32);
++typedef PVRSRV_ERROR (*PFN_DC_REGISTER_COMMANDPROCLIST)(u32, PPFN_CMD_PROC,u32[][2], u32);
++typedef PVRSRV_ERROR (*PFN_DC_REMOVE_COMMANDPROCLIST)(u32, u32);
++typedef void (*PFN_DC_CMD_COMPLETE)(void *, int);
++typedef PVRSRV_ERROR (*PFN_DC_REGISTER_SYS_ISR)(PFN_ISR_HANDLER, void*, u32, u32);
++typedef PVRSRV_ERROR (*PFN_DC_REGISTER_POWER)(u32, PFN_PRE_POWER, PFN_POST_POWER,
++ PFN_PRE_CLOCKSPEED_CHANGE, PFN_POST_CLOCKSPEED_CHANGE,
++ void *, PVRSRV_DEV_POWER_STATE, PVRSRV_DEV_POWER_STATE);
++
++typedef struct PVRSRV_DC_DISP2SRV_KMJTABLE_TAG
++{
++ u32 ui32TableSize;
++ PFN_DC_REGISTER_DISPLAY_DEV pfnPVRSRVRegisterDCDevice;
++ PFN_DC_REMOVE_DISPLAY_DEV pfnPVRSRVRemoveDCDevice;
++ PFN_DC_OEM_FUNCTION pfnPVRSRVOEMFunction;
++ PFN_DC_REGISTER_COMMANDPROCLIST pfnPVRSRVRegisterCmdProcList;
++ PFN_DC_REMOVE_COMMANDPROCLIST pfnPVRSRVRemoveCmdProcList;
++ PFN_DC_CMD_COMPLETE pfnPVRSRVCmdComplete;
++ PFN_DC_REGISTER_SYS_ISR pfnPVRSRVRegisterSystemISRHandler;
++ PFN_DC_REGISTER_POWER pfnPVRSRVRegisterPowerDevice;
++} PVRSRV_DC_DISP2SRV_KMJTABLE, *PPVRSRV_DC_DISP2SRV_KMJTABLE;
++
++
++typedef struct DISPLAYCLASS_FLIP_COMMAND_TAG
++{
++
++ void * hExtDevice;
++
++
++ void * hExtSwapChain;
++
++
++ void * hExtBuffer;
++
++
++ void * hPrivateTag;
++
++
++ u32 ui32ClipRectCount;
++
++
++ IMG_RECT *psClipRect;
++
++
++ u32 ui32SwapInterval;
++
++} DISPLAYCLASS_FLIP_COMMAND;
++
++#define DC_FLIP_COMMAND 0
++
++#define DC_STATE_NO_FLUSH_COMMANDS 0
++#define DC_STATE_FLUSH_COMMANDS 1
++
++
++typedef int (*PFN_DC_GET_PVRJTABLE)(PPVRSRV_DC_DISP2SRV_KMJTABLE);
++
++
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/include/lists.h
+@@ -0,0 +1,176 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __LISTS_UTILS__
++#define __LISTS_UTILS__
++
++#include <stdarg.h>
++#include "img_types.h"
++
++#define DECLARE_LIST_FOR_EACH(TYPE) \
++void List_##TYPE##_ForEach(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode))
++
++#define IMPLEMENT_LIST_FOR_EACH(TYPE) \
++void List_##TYPE##_ForEach(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode))\
++{\
++ while(psHead)\
++ {\
++ pfnCallBack(psHead);\
++ psHead = psHead->psNext;\
++ }\
++}
++
++
++#define DECLARE_LIST_FOR_EACH_VA(TYPE) \
++void List_##TYPE##_ForEach_va(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode, va_list va), ...)
++
++#define IMPLEMENT_LIST_FOR_EACH_VA(TYPE) \
++void List_##TYPE##_ForEach_va(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode, va_list va), ...) \
++{\
++ va_list ap;\
++ while(psHead)\
++ {\
++ va_start(ap, pfnCallBack);\
++ pfnCallBack(psHead, ap);\
++ psHead = psHead->psNext;\
++ va_end(ap);\
++ }\
++}
++
++
++#define DECLARE_LIST_ANY(TYPE) \
++void* List_##TYPE##_Any(TYPE *psHead, void* (*pfnCallBack)(TYPE* psNode))
++
++#define IMPLEMENT_LIST_ANY(TYPE) \
++void* List_##TYPE##_Any(TYPE *psHead, void* (*pfnCallBack)(TYPE* psNode))\
++{ \
++ void *pResult;\
++ TYPE *psNextNode;\
++ pResult = NULL;\
++ psNextNode = psHead;\
++ while(psHead && !pResult)\
++ {\
++ psNextNode = psNextNode->psNext;\
++ pResult = pfnCallBack(psHead);\
++ psHead = psNextNode;\
++ }\
++ return pResult;\
++}
++
++
++#define DECLARE_LIST_ANY_VA(TYPE) \
++void* List_##TYPE##_Any_va(TYPE *psHead, void*(*pfnCallBack)(TYPE* psNode, va_list va), ...)
++
++#define IMPLEMENT_LIST_ANY_VA(TYPE) \
++void* List_##TYPE##_Any_va(TYPE *psHead, void*(*pfnCallBack)(TYPE* psNode, va_list va), ...)\
++{\
++ va_list ap;\
++ TYPE *psNextNode;\
++ void* pResult = NULL;\
++ while(psHead && !pResult)\
++ {\
++ psNextNode = psHead->psNext;\
++ va_start(ap, pfnCallBack);\
++ pResult = pfnCallBack(psHead, ap);\
++ va_end(ap);\
++ psHead = psNextNode;\
++ }\
++ return pResult;\
++}
++
++#define DECLARE_LIST_ANY_2(TYPE, RTYPE, CONTINUE) \
++RTYPE List_##TYPE##_##RTYPE##_Any(TYPE *psHead, RTYPE (*pfnCallBack)(TYPE* psNode))
++
++#define IMPLEMENT_LIST_ANY_2(TYPE, RTYPE, CONTINUE) \
++RTYPE List_##TYPE##_##RTYPE##_Any(TYPE *psHead, RTYPE (*pfnCallBack)(TYPE* psNode))\
++{ \
++ RTYPE result;\
++ TYPE *psNextNode;\
++ result = CONTINUE;\
++ psNextNode = psHead;\
++ while(psHead && result == CONTINUE)\
++ {\
++ psNextNode = psNextNode->psNext;\
++ result = pfnCallBack(psHead);\
++ psHead = psNextNode;\
++ }\
++ return result;\
++}
++
++
++#define DECLARE_LIST_ANY_VA_2(TYPE, RTYPE, CONTINUE) \
++RTYPE List_##TYPE##_##RTYPE##_Any_va(TYPE *psHead, RTYPE(*pfnCallBack)(TYPE* psNode, va_list va), ...)
++
++#define IMPLEMENT_LIST_ANY_VA_2(TYPE, RTYPE, CONTINUE) \
++RTYPE List_##TYPE##_##RTYPE##_Any_va(TYPE *psHead, RTYPE(*pfnCallBack)(TYPE* psNode, va_list va), ...)\
++{\
++ va_list ap;\
++ TYPE *psNextNode;\
++ RTYPE result = CONTINUE;\
++ while(psHead && result == CONTINUE)\
++ {\
++ psNextNode = psHead->psNext;\
++ va_start(ap, pfnCallBack);\
++ result = pfnCallBack(psHead, ap);\
++ va_end(ap);\
++ psHead = psNextNode;\
++ }\
++ return result;\
++}
++
++
++#define DECLARE_LIST_REMOVE(TYPE) \
++void List_##TYPE##_Remove(TYPE *psNode)
++
++#define IMPLEMENT_LIST_REMOVE(TYPE) \
++void List_##TYPE##_Remove(TYPE *psNode)\
++{\
++ (*psNode->ppsThis)=psNode->psNext;\
++ if(psNode->psNext)\
++ {\
++ psNode->psNext->ppsThis = psNode->ppsThis;\
++ }\
++}
++
++#define DECLARE_LIST_INSERT(TYPE) \
++void List_##TYPE##_Insert(TYPE **ppsHead, TYPE *psNewNode)
++
++#define IMPLEMENT_LIST_INSERT(TYPE) \
++void List_##TYPE##_Insert(TYPE **ppsHead, TYPE *psNewNode)\
++{\
++ psNewNode->ppsThis = ppsHead;\
++ psNewNode->psNext = *ppsHead;\
++ *ppsHead = psNewNode;\
++ if(psNewNode->psNext)\
++ {\
++ psNewNode->psNext->ppsThis = &(psNewNode->psNext);\
++ }\
++}
++
++
++#define IS_LAST_ELEMENT(x) ((x)->psNext == NULL)
++
++#endif
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/include/metrics.h
+@@ -0,0 +1,130 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _METRICS_
++#define _METRICS_
++
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++
++#if defined(DEBUG) || defined(TIMING)
++
++
++typedef struct
++{
++ u32 ui32Start;
++ u32 ui32Stop;
++ u32 ui32Total;
++ u32 ui32Count;
++} Temporal_Data;
++
++extern Temporal_Data asTimers[];
++
++extern u32 PVRSRVTimeNow(void);
++extern void PVRSRVSetupMetricTimers(void *pvDevInfo);
++extern void PVRSRVOutputMetricTotals(void);
++
++
++#define PVRSRV_TIMER_DUMMY 0
++
++#define PVRSRV_TIMER_EXAMPLE_1 1
++#define PVRSRV_TIMER_EXAMPLE_2 2
++
++
++#define PVRSRV_NUM_TIMERS (PVRSRV_TIMER_EXAMPLE_2 + 1)
++
++#define PVRSRV_TIME_START(X) { \
++ asTimers[X].ui32Count += 1; \
++ asTimers[X].ui32Count |= 0x80000000L; \
++ asTimers[X].ui32Start = PVRSRVTimeNow(); \
++ asTimers[X].ui32Stop = 0; \
++ }
++
++#define PVRSRV_TIME_SUSPEND(X) { \
++ asTimers[X].ui32Stop += PVRSRVTimeNow() - asTimers[X].ui32Start; \
++ }
++
++#define PVRSRV_TIME_RESUME(X) { \
++ asTimers[X].ui32Start = PVRSRVTimeNow(); \
++ }
++
++#define PVRSRV_TIME_STOP(X) { \
++ asTimers[X].ui32Stop += PVRSRVTimeNow() - asTimers[X].ui32Start; \
++ asTimers[X].ui32Total += asTimers[X].ui32Stop; \
++ asTimers[X].ui32Count &= 0x7FFFFFFFL; \
++ }
++
++#define PVRSRV_TIME_RESET(X) { \
++ asTimers[X].ui32Start = 0; \
++ asTimers[X].ui32Stop = 0; \
++ asTimers[X].ui32Total = 0; \
++ asTimers[X].ui32Count = 0; \
++ }
++
++
++#if defined(__sh__)
++
++#define TST_REG ((volatile u8 *) (psDevInfo->pvSOCRegsBaseKM))
++
++#define TCOR_2 ((volatile u32 *) (psDevInfo->pvSOCRegsBaseKM+28))
++#define TCNT_2 ((volatile u32 *) (psDevInfo->pvSOCRegsBaseKM+32))
++#define TCR_2 ((volatile u16 *)(psDevInfo->pvSOCRegsBaseKM+36))
++
++#define TIMER_DIVISOR 4
++
++#endif
++
++
++
++
++
++#else
++
++
++
++#define PVRSRV_TIME_START(X)
++#define PVRSRV_TIME_SUSPEND(X)
++#define PVRSRV_TIME_RESUME(X)
++#define PVRSRV_TIME_STOP(X)
++#define PVRSRV_TIME_RESET(X)
++
++#define PVRSRVSetupMetricTimers(X)
++#define PVRSRVOutputMetricTotals()
++
++
++
++#endif
++
++#if defined(__cplusplus)
++}
++#endif
++
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/include/osfunc.h
+@@ -0,0 +1,321 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifdef DEBUG_RELEASE_BUILD
++#pragma optimize( "", off )
++#define DEBUG 1
++#endif
++
++#ifndef __OSFUNC_H__
++#define __OSFUNC_H__
++
++
++#include <linux/hardirq.h>
++#include <linux/string.h>
++
++
++
++#define PVRSRV_PAGEABLE_SELECT PVRSRV_OS_PAGEABLE_HEAP
++
++#define KERNEL_ID 0xffffffffL
++#define POWER_MANAGER_ID 0xfffffffeL
++#define ISR_ID 0xfffffffdL
++#define TIMER_ID 0xfffffffcL
++
++
++#define HOST_PAGESIZE OSGetPageSize
++#define HOST_PAGEMASK (~(HOST_PAGESIZE()-1))
++#define HOST_PAGEALIGN(addr) (((addr)+HOST_PAGESIZE()-1)&HOST_PAGEMASK)
++
++#define PVRSRV_OS_HEAP_MASK 0xf
++#define PVRSRV_OS_PAGEABLE_HEAP 0x1
++#define PVRSRV_OS_NON_PAGEABLE_HEAP 0x2
++
++
++u32 OSClockus(void);
++u32 OSGetPageSize(void);
++PVRSRV_ERROR OSInstallDeviceLISR(void *pvSysData,
++ u32 ui32Irq,
++ char *pszISRName,
++ void *pvDeviceNode);
++PVRSRV_ERROR OSUninstallDeviceLISR(void *pvSysData);
++PVRSRV_ERROR OSInstallSystemLISR(void *pvSysData, u32 ui32Irq);
++PVRSRV_ERROR OSUninstallSystemLISR(void *pvSysData);
++PVRSRV_ERROR OSInstallMISR(void *pvSysData);
++PVRSRV_ERROR OSUninstallMISR(void *pvSysData);
++IMG_CPU_PHYADDR OSMapLinToCPUPhys(void* pvLinAddr);
++void *OSMapPhysToLin(IMG_CPU_PHYADDR BasePAddr, u32 ui32Bytes, u32 ui32Flags, void * *phOSMemHandle);
++int OSUnMapPhysToLin(void *pvLinAddr, u32 ui32Bytes, u32 ui32Flags, void * hOSMemHandle);
++
++PVRSRV_ERROR OSReservePhys(IMG_CPU_PHYADDR BasePAddr, u32 ui32Bytes, u32 ui32Flags, void **ppvCpuVAddr, void * *phOSMemHandle);
++PVRSRV_ERROR OSUnReservePhys(void *pvCpuVAddr, u32 ui32Bytes, u32 ui32Flags, void * hOSMemHandle);
++
++#if defined(SUPPORT_CPU_CACHED_BUFFERS)
++void OSFlushCPUCacheKM(void);
++void OSFlushCPUCacheRangeKM(void *pvRangeAddrStart,
++ void *pvRangeAddrEnd);
++#endif
++
++PVRSRV_ERROR OSRegisterDiscontigMem(IMG_SYS_PHYADDR *pBasePAddr,
++ void *pvCpuVAddr,
++ u32 ui32Bytes,
++ u32 ui32Flags,
++ void * *phOSMemHandle);
++PVRSRV_ERROR OSUnRegisterDiscontigMem(void *pvCpuVAddr,
++ u32 ui32Bytes,
++ u32 ui32Flags,
++ void * hOSMemHandle);
++
++static inline PVRSRV_ERROR OSReserveDiscontigPhys(IMG_SYS_PHYADDR *pBasePAddr, u32 ui32Bytes, u32 ui32Flags, void **ppvCpuVAddr, void * *phOSMemHandle)
++{
++ *ppvCpuVAddr = NULL;
++ return OSRegisterDiscontigMem(pBasePAddr, *ppvCpuVAddr, ui32Bytes, ui32Flags, phOSMemHandle);
++}
++
++static inline PVRSRV_ERROR OSUnReserveDiscontigPhys(void *pvCpuVAddr, u32 ui32Bytes, u32 ui32Flags, void * hOSMemHandle)
++{
++ OSUnRegisterDiscontigMem(pvCpuVAddr, ui32Bytes, ui32Flags, hOSMemHandle);
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSRegisterMem(IMG_CPU_PHYADDR BasePAddr,
++ void *pvCpuVAddr,
++ u32 ui32Bytes,
++ u32 ui32Flags,
++ void * *phOSMemHandle);
++PVRSRV_ERROR OSUnRegisterMem(void *pvCpuVAddr,
++ u32 ui32Bytes,
++ u32 ui32Flags,
++ void * hOSMemHandle);
++
++
++
++PVRSRV_ERROR OSGetSubMemHandle(void * hOSMemHandle,
++ u32 ui32ByteOffset,
++ u32 ui32Bytes,
++ u32 ui32Flags,
++ void * *phOSMemHandleRet);
++PVRSRV_ERROR OSReleaseSubMemHandle(void * hOSMemHandle, u32 ui32Flags);
++
++u32 OSGetCurrentProcessIDKM(void);
++u32 OSGetCurrentThreadID( void );
++
++PVRSRV_ERROR OSAllocPages_Impl(u32 ui32Flags, u32 ui32Size, u32 ui32PageSize, void * *ppvLinAddr, void * *phPageAlloc);
++PVRSRV_ERROR OSFreePages(u32 ui32Flags, u32 ui32Size, void * pvLinAddr, void * hPageAlloc);
++
++
++#ifdef PVRSRV_LOG_MEMORY_ALLOCS
++ #define OSAllocMem(flags, size, linAddr, blockAlloc, logStr) \
++ (PVR_TRACE(("OSAllocMem(" #flags ", " #size ", " #linAddr ", " #blockAlloc "): " logStr " (size = 0x%lx)", size)), \
++ OSAllocMem_Debug_Wrapper(flags, size, linAddr, blockAlloc, __FILE__, __LINE__))
++
++ #define OSAllocPages(flags, size, pageSize, linAddr, pageAlloc) \
++ (PVR_TRACE(("OSAllocPages(" #flags ", " #size ", " #pageSize ", " #linAddr ", " #pageAlloc "): (size = 0x%lx)", size)), \
++ OSAllocPages_Impl(flags, size, pageSize, linAddr, pageAlloc))
++
++ #define OSFreeMem(flags, size, linAddr, blockAlloc) \
++ (PVR_TRACE(("OSFreeMem(" #flags ", " #size ", " #linAddr ", " #blockAlloc "): (pointer = 0x%X)", linAddr)), \
++ OSFreeMem_Debug_Wrapper(flags, size, linAddr, blockAlloc, __FILE__, __LINE__))
++#else
++ #define OSAllocMem(flags, size, linAddr, blockAlloc, logString) \
++ OSAllocMem_Debug_Wrapper(flags, size, linAddr, blockAlloc, __FILE__, __LINE__)
++
++ #define OSAllocPages OSAllocPages_Impl
++
++ #define OSFreeMem(flags, size, linAddr, blockAlloc) \
++ OSFreeMem_Debug_Wrapper(flags, size, linAddr, blockAlloc, __FILE__, __LINE__)
++#endif
++
++#ifdef PVRSRV_DEBUG_OS_MEMORY
++
++ PVRSRV_ERROR OSAllocMem_Debug_Wrapper(u32 ui32Flags,
++ u32 ui32Size,
++ void * *ppvCpuVAddr,
++ void * *phBlockAlloc,
++ char *pszFilename,
++ u32 ui32Line);
++
++ PVRSRV_ERROR OSFreeMem_Debug_Wrapper(u32 ui32Flags,
++ u32 ui32Size,
++ void * pvCpuVAddr,
++ void * hBlockAlloc,
++ char *pszFilename,
++ u32 ui32Line);
++
++
++ typedef struct
++ {
++ u8 sGuardRegionBefore[8];
++ char sFileName[128];
++ u32 uLineNo;
++ u32 uSize;
++ u32 uSizeParityCheck;
++ enum valid_tag
++ { isFree = 0x277260FF,
++ isAllocated = 0x260511AA
++ } eValid;
++ } OSMEM_DEBUG_INFO;
++
++ #define TEST_BUFFER_PADDING_STATUS (sizeof(OSMEM_DEBUG_INFO))
++ #define TEST_BUFFER_PADDING_AFTER (8)
++ #define TEST_BUFFER_PADDING (TEST_BUFFER_PADDING_STATUS + TEST_BUFFER_PADDING_AFTER)
++#else
++ #define OSAllocMem_Debug_Wrapper OSAllocMem_Debug_Linux_Memory_Allocations
++ #define OSFreeMem_Debug_Wrapper OSFreeMem_Debug_Linux_Memory_Allocations
++#endif
++
++#if defined(__linux__) && defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ PVRSRV_ERROR OSAllocMem_Impl(u32 ui32Flags, u32 ui32Size, void * *ppvLinAddr, void * *phBlockAlloc, char *pszFilename, u32 ui32Line);
++ PVRSRV_ERROR OSFreeMem_Impl(u32 ui32Flags, u32 ui32Size, void * pvLinAddr, void * hBlockAlloc, char *pszFilename, u32 ui32L ine);
++
++ #define OSAllocMem_Debug_Linux_Memory_Allocations OSAllocMem_Impl
++ #define OSFreeMem_Debug_Linux_Memory_Allocations OSFreeMem_Impl
++#else
++ PVRSRV_ERROR OSAllocMem_Impl(u32 ui32Flags, u32 ui32Size, void * *ppvLinAddr, void * *phBlockAlloc);
++ PVRSRV_ERROR OSFreeMem_Impl(u32 ui32Flags, u32 ui32Size, void * pvLinAddr, void * hBlockAlloc);
++
++ #define OSAllocMem_Debug_Linux_Memory_Allocations(flags, size, addr, blockAlloc, file, line) \
++ OSAllocMem_Impl(flags, size, addr, blockAlloc)
++ #define OSFreeMem_Debug_Linux_Memory_Allocations(flags, size, addr, blockAlloc, file, line) \
++ OSFreeMem_Impl(flags, size, addr, blockAlloc)
++#endif
++
++
++IMG_CPU_PHYADDR OSMemHandleToCpuPAddr(void *hOSMemHandle, u32 ui32ByteOffset);
++
++PVRSRV_ERROR OSInitEnvData(void * *ppvEnvSpecificData);
++PVRSRV_ERROR OSDeInitEnvData(void * pvEnvSpecificData);
++
++PVRSRV_ERROR OSEventObjectCreate(const char *pszName,
++ PVRSRV_EVENTOBJECT *psEventObject);
++PVRSRV_ERROR OSEventObjectDestroy(PVRSRV_EVENTOBJECT *psEventObject);
++PVRSRV_ERROR OSEventObjectSignal(void * hOSEventKM);
++PVRSRV_ERROR OSEventObjectWait(void * hOSEventKM);
++PVRSRV_ERROR OSEventObjectOpen(PVRSRV_EVENTOBJECT *psEventObject,
++ void * *phOSEvent);
++PVRSRV_ERROR OSEventObjectClose(PVRSRV_EVENTOBJECT *psEventObject,
++ void * hOSEventKM);
++
++
++PVRSRV_ERROR OSBaseAllocContigMemory(u32 ui32Size, IMG_CPU_VIRTADDR *pLinAddr, IMG_CPU_PHYADDR *pPhysAddr);
++PVRSRV_ERROR OSBaseFreeContigMemory(u32 ui32Size, IMG_CPU_VIRTADDR LinAddr, IMG_CPU_PHYADDR PhysAddr);
++
++void * MapUserFromKernel(void * pvLinAddrKM,u32 ui32Size,void * *phMemBlock);
++void * OSMapHWRegsIntoUserSpace(void * hDevCookie, IMG_SYS_PHYADDR sRegAddr, u32 ulSize, void * *ppvProcess);
++void OSUnmapHWRegsFromUserSpace(void * hDevCookie, void * pvUserAddr, void * pvProcess);
++
++void UnmapUserFromKernel(void * pvLinAddrUM, u32 ui32Size, void * hMemBlock);
++
++PVRSRV_ERROR OSMapPhysToUserSpace(void * hDevCookie,
++ IMG_SYS_PHYADDR sCPUPhysAddr,
++ u32 uiSizeInBytes,
++ u32 ui32CacheFlags,
++ void * *ppvUserAddr,
++ u32 *puiActualSize,
++ void * hMappingHandle);
++
++PVRSRV_ERROR OSUnmapPhysToUserSpace(void * hDevCookie,
++ void * pvUserAddr,
++ void * pvProcess);
++
++PVRSRV_ERROR OSLockResource(PVRSRV_RESOURCE *psResource, u32 ui32ID);
++PVRSRV_ERROR OSUnlockResource(PVRSRV_RESOURCE *psResource, u32 ui32ID);
++int OSIsResourceLocked(PVRSRV_RESOURCE *psResource, u32 ui32ID);
++PVRSRV_ERROR OSCreateResource(PVRSRV_RESOURCE *psResource);
++PVRSRV_ERROR OSDestroyResource(PVRSRV_RESOURCE *psResource);
++void OSBreakResourceLock(PVRSRV_RESOURCE *psResource, u32 ui32ID);
++void OSWaitus(u32 ui32Timeus);
++void OSReleaseThreadQuanta(void);
++u32 OSPCIReadDword(u32 ui32Bus, u32 ui32Dev, u32 ui32Func, u32 ui32Reg);
++void OSPCIWriteDword(u32 ui32Bus, u32 ui32Dev, u32 ui32Func, u32 ui32Reg, u32 ui32Value);
++
++#ifndef OSReadHWReg
++u32 OSReadHWReg(void * pvLinRegBaseAddr, u32 ui32Offset);
++#endif
++#ifndef OSWriteHWReg
++void OSWriteHWReg(void * pvLinRegBaseAddr, u32 ui32Offset, u32 ui32Value);
++#endif
++
++typedef void (*PFN_TIMER_FUNC)(void*);
++void * OSAddTimer(PFN_TIMER_FUNC pfnTimerFunc, void *pvData, u32 ui32MsTimeout);
++PVRSRV_ERROR OSRemoveTimer (void * hTimer);
++PVRSRV_ERROR OSEnableTimer (void * hTimer);
++PVRSRV_ERROR OSDisableTimer (void * hTimer);
++
++PVRSRV_ERROR OSGetSysMemSize(u32 *pui32Bytes);
++
++typedef enum _HOST_PCI_INIT_FLAGS_
++{
++ HOST_PCI_INIT_FLAG_BUS_MASTER = 0x00000001,
++ HOST_PCI_INIT_FLAG_MSI = 0x00000002,
++ HOST_PCI_INIT_FLAG_FORCE_I32 = 0x7fffffff
++} HOST_PCI_INIT_FLAGS;
++
++struct _PVRSRV_PCI_DEV_OPAQUE_STRUCT_;
++typedef struct _PVRSRV_PCI_DEV_OPAQUE_STRUCT_ *PVRSRV_PCI_DEV_HANDLE;
++
++PVRSRV_PCI_DEV_HANDLE OSPCIAcquireDev(u16 ui16VendorID, u16 ui16DeviceID, HOST_PCI_INIT_FLAGS eFlags);
++PVRSRV_PCI_DEV_HANDLE OSPCISetDev(void *pvPCICookie, HOST_PCI_INIT_FLAGS eFlags);
++PVRSRV_ERROR OSPCIReleaseDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI);
++PVRSRV_ERROR OSPCIIRQ(PVRSRV_PCI_DEV_HANDLE hPVRPCI, u32 *pui32IRQ);
++u32 OSPCIAddrRangeLen(PVRSRV_PCI_DEV_HANDLE hPVRPCI, u32 ui32Index);
++u32 OSPCIAddrRangeStart(PVRSRV_PCI_DEV_HANDLE hPVRPCI, u32 ui32Index);
++u32 OSPCIAddrRangeEnd(PVRSRV_PCI_DEV_HANDLE hPVRPCI, u32 ui32Index);
++PVRSRV_ERROR OSPCIRequestAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, u32 ui32Index);
++PVRSRV_ERROR OSPCIReleaseAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, u32 ui32Index);
++PVRSRV_ERROR OSPCISuspendDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI);
++PVRSRV_ERROR OSPCIResumeDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI);
++
++PVRSRV_ERROR OSScheduleMISR(void *pvSysData);
++
++void OSPanic(void);
++
++int OSProcHasPrivSrvInit(void);
++
++typedef enum _img_verify_test
++{
++ PVR_VERIFY_WRITE = 0,
++ PVR_VERIFY_READ
++} IMG_VERIFY_TEST;
++
++int OSAccessOK(IMG_VERIFY_TEST eVerification, void *pvUserPtr, u32 ui32Bytes);
++
++PVRSRV_ERROR OSCopyToUser(void * pvProcess, void *pvDest, void *pvSrc, u32 ui32Bytes);
++PVRSRV_ERROR OSCopyFromUser(void * pvProcess, void *pvDest, void *pvSrc, u32 ui32Bytes);
++
++PVRSRV_ERROR OSAcquirePhysPageAddr(void* pvCPUVAddr,
++ u32 ui32Bytes,
++ IMG_SYS_PHYADDR *psSysPAddr,
++ void * *phOSWrapMem,
++ int bWrapWorkaround);
++PVRSRV_ERROR OSReleasePhysPageAddr(void * hOSWrapMem);
++
++
++#define OS_SUPPORTS_IN_LISR
++
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/include/osperproc.h
+@@ -0,0 +1,37 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __OSPERPROC_H__
++#define __OSPERPROC_H__
++
++
++PVRSRV_ERROR OSPerProcessPrivateDataInit(void * *phOsPrivateData);
++PVRSRV_ERROR OSPerProcessPrivateDataDeInit(void * hOsPrivateData);
++
++PVRSRV_ERROR OSPerProcessSetHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase);
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/include/pdump_km.h
+@@ -0,0 +1,439 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _PDUMP_KM_H_
++#define _PDUMP_KM_H_
++
++
++#define SGX_SUPPORT_COMMON_PDUMP
++
++#if defined(SUPPORT_SGX)
++#if defined(SGX_SUPPORT_COMMON_PDUMP)
++#include <pdump_osfunc.h>
++#endif
++#endif
++
++#define PDUMP_FLAGS_NEVER 0x08000000UL
++#define PDUMP_FLAGS_TOOUT2MEM 0x10000000UL
++#define PDUMP_FLAGS_LASTFRAME 0x20000000UL
++#define PDUMP_FLAGS_RESETLFBUFFER 0x40000000UL
++#define PDUMP_FLAGS_CONTINUOUS 0x80000000UL
++
++#define PDUMP_PD_UNIQUETAG (void *)0
++#define PDUMP_PT_UNIQUETAG (void *)0
++
++#define PDUMP_STREAM_PARAM2 0
++#define PDUMP_STREAM_SCRIPT2 1
++#define PDUMP_STREAM_DRIVERINFO 2
++#define PDUMP_NUM_STREAMS 3
++
++
++#ifndef PDUMP
++#define MAKEUNIQUETAG(hMemInfo) (0)
++#endif
++
++#ifdef PDUMP
++
++#define MAKEUNIQUETAG(hMemInfo) (((BM_BUF *)(((PVRSRV_KERNEL_MEM_INFO *)hMemInfo)->sMemBlk.hBuffer))->pMapping)
++
++ PVRSRV_ERROR PDumpMemPolKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++ u32 ui32Offset,
++ u32 ui32Value,
++ u32 ui32Mask,
++ PDUMP_POLL_OPERATOR eOperator,
++ u32 ui32Flags,
++ void * hUniqueTag);
++
++ PVRSRV_ERROR PDumpMemUM(PVRSRV_PER_PROCESS_DATA *psProcData,
++ void * pvAltLinAddr,
++ void * pvLinAddr,
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++ u32 ui32Offset,
++ u32 ui32Bytes,
++ u32 ui32Flags,
++ void * hUniqueTag);
++
++ PVRSRV_ERROR PDumpMemKM(void * pvAltLinAddr,
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++ u32 ui32Offset,
++ u32 ui32Bytes,
++ u32 ui32Flags,
++ void * hUniqueTag);
++ PVRSRV_ERROR PDumpMemPagesKM(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_DEV_PHYADDR *pPages,
++ u32 ui32NumPages,
++ IMG_DEV_VIRTADDR sDevAddr,
++ u32 ui32Start,
++ u32 ui32Length,
++ u32 ui32Flags,
++ void * hUniqueTag);
++
++ PVRSRV_ERROR PDumpMem2KM(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_CPU_VIRTADDR pvLinAddr,
++ u32 ui32Bytes,
++ u32 ui32Flags,
++ int bInitialisePages,
++ void * hUniqueTag1,
++ void * hUniqueTag2);
++ void PDumpInitCommon(void);
++ void PDumpDeInitCommon(void);
++ void PDumpInit(void);
++ void PDumpDeInit(void);
++ PVRSRV_ERROR PDumpStartInitPhaseKM(void);
++ PVRSRV_ERROR PDumpStopInitPhaseKM(void);
++ PVRSRV_ERROR PDumpSetFrameKM(u32 ui32Frame);
++ PVRSRV_ERROR PDumpCommentKM(char *pszComment, u32 ui32Flags);
++ PVRSRV_ERROR PDumpDriverInfoKM(char *pszString, u32 ui32Flags);
++
++ PVRSRV_ERROR PDumpRegWithFlagsKM(u32 ui32RegAddr,
++ u32 ui32RegValue,
++ u32 ui32Flags);
++ PVRSRV_ERROR PDumpRegPolWithFlagsKM(u32 ui32RegAddr,
++ u32 ui32RegValue,
++ u32 ui32Mask,
++ u32 ui32Flags);
++ PVRSRV_ERROR PDumpRegPolKM(u32 ui32RegAddr,
++ u32 ui32RegValue,
++ u32 ui32Mask);
++
++ PVRSRV_ERROR PDumpBitmapKM(char *pszFileName,
++ u32 ui32FileOffset,
++ u32 ui32Width,
++ u32 ui32Height,
++ u32 ui32StrideInBytes,
++ IMG_DEV_VIRTADDR sDevBaseAddr,
++ u32 ui32Size,
++ PDUMP_PIXEL_FORMAT ePixelFormat,
++ PDUMP_MEM_FORMAT eMemFormat,
++ u32 ui32PDumpFlags);
++ PVRSRV_ERROR PDumpReadRegKM(char *pszFileName,
++ u32 ui32FileOffset,
++ u32 ui32Address,
++ u32 ui32Size,
++ u32 ui32PDumpFlags);
++
++ int PDumpIsSuspended(void);
++
++#if defined(SGX_SUPPORT_COMMON_PDUMP) || !defined(SUPPORT_VGX)
++
++ PVRSRV_ERROR PDumpRegKM(u32 dwReg,
++ u32 dwData);
++ PVRSRV_ERROR PDumpComment(char* pszFormat, ...);
++ PVRSRV_ERROR PDumpCommentWithFlags(u32 ui32Flags,
++ char* pszFormat,
++ ...);
++
++ PVRSRV_ERROR PDumpPDReg(u32 ui32Reg,
++ u32 ui32dwData,
++ void * hUniqueTag);
++ PVRSRV_ERROR PDumpPDRegWithFlags(u32 ui32Reg,
++ u32 ui32Data,
++ u32 ui32Flags,
++ void * hUniqueTag);
++#else
++ void PDumpRegKM(u32 dwReg,
++ u32 dwData);
++ void PDumpComment(char* pszFormat, ...);
++ void PDumpCommentWithFlags(u32 ui32Flags,
++ char* pszFormat,
++ ...);
++
++
++ void PDumpPDReg(u32 ui32Reg,
++ u32 ui32dwData,
++ void * hUniqueTag);
++ void PDumpPDRegWithFlags(u32 ui32Reg,
++ u32 ui32Data,
++ u32 ui32Flags,
++ void * hUniqueTag);
++#endif
++
++ void PDumpMsvdxRegRead(const char* const pRegRegion,
++ const u32 dwRegOffset);
++
++ void PDumpMsvdxRegWrite(const char* const pRegRegion,
++ const u32 dwRegOffset,
++ const u32 dwData);
++
++ PVRSRV_ERROR PDumpMsvdxRegPol(const char* const pRegRegion,
++ const u32 ui32Offset,
++ const u32 ui32CheckFuncIdExt,
++ const u32 ui32RequValue,
++ const u32 ui32Enable,
++ const u32 ui32PollCount,
++ const u32 ui32TimeOut);
++
++ PVRSRV_ERROR PDumpMsvdxWriteRef(const char* const pRegRegion,
++ const u32 ui32VLROffset,
++ const u32 ui32Physical );
++
++ int PDumpIsLastCaptureFrameKM(void);
++ int PDumpIsCaptureFrameKM(void);
++
++ void PDumpMallocPagesPhys(PVRSRV_DEVICE_TYPE eDeviceType,
++ u32 ui32DevVAddr,
++ u32 * pui32PhysPages,
++ u32 ui32NumPages,
++ void * hUniqueTag);
++ PVRSRV_ERROR PDumpSetMMUContext(PVRSRV_DEVICE_TYPE eDeviceType,
++ char *pszMemSpace,
++ u32 *pui32MMUContextID,
++ u32 ui32MMUType,
++ void * hUniqueTag1,
++ void *pvPDCPUAddr);
++ PVRSRV_ERROR PDumpClearMMUContext(PVRSRV_DEVICE_TYPE eDeviceType,
++ char *pszMemSpace,
++ u32 ui32MMUContextID,
++ u32 ui32MMUType);
++
++ PVRSRV_ERROR PDumpPDDevPAddrKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++ u32 ui32Offset,
++ IMG_DEV_PHYADDR sPDDevPAddr,
++ void * hUniqueTag1,
++ void * hUniqueTag2);
++
++ int PDumpTestNextFrame(u32 ui32CurrentFrame);
++
++
++#if defined (COMMON_PDUMP_OS_SUPPORT) && !defined(SUPPORT_VGX)
++
++ PVRSRV_ERROR PDumpTASignatureRegisters(u32 ui32DumpFrameNum,
++ u32 ui32TAKickCount,
++ int bLastFrame,
++ u32 *pui32Registers,
++ u32 ui32NumRegisters);
++
++ PVRSRV_ERROR PDump3DSignatureRegisters(u32 ui32DumpFrameNum,
++ int bLastFrame,
++ u32 *pui32Registers,
++ u32 ui32NumRegisters);
++
++ PVRSRV_ERROR PDumpCounterRegisters(u32 ui32DumpFrameNum,
++ int bLastFrame,
++ u32 *pui32Registers,
++ u32 ui32NumRegisters);
++
++ PVRSRV_ERROR PDumpRegRead(const u32 dwRegOffset, u32 ui32Flags);
++
++ PVRSRV_ERROR PDumpCycleCountRegRead(const u32 dwRegOffset, int bLastFrame);
++
++ PVRSRV_ERROR PDumpIDLWithFlags(u32 ui32Clocks, u32 ui32Flags);
++ PVRSRV_ERROR PDumpIDL(u32 ui32Clocks);
++
++ PVRSRV_ERROR PDumpMallocPages(PVRSRV_DEVICE_TYPE eDeviceType,
++ u32 ui32DevVAddr,
++ IMG_CPU_VIRTADDR pvLinAddr,
++ void * hOSMemHandle,
++ u32 ui32NumBytes,
++ u32 ui32PageSize,
++ void * hUniqueTag);
++ PVRSRV_ERROR PDumpMallocPageTable(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_CPU_VIRTADDR pvLinAddr,
++ u32 ui32NumBytes,
++ void * hUniqueTag);
++ PVRSRV_ERROR PDumpFreePages(struct _BM_HEAP_ *psBMHeap,
++ IMG_DEV_VIRTADDR sDevVAddr,
++ u32 ui32NumBytes,
++ u32 ui32PageSize,
++ void * hUniqueTag,
++ int bInterleaved);
++ PVRSRV_ERROR PDumpFreePageTable(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_CPU_VIRTADDR pvLinAddr,
++ u32 ui32NumBytes,
++ void * hUniqueTag);
++
++ PVRSRV_ERROR PDumpHWPerfCBKM(char *pszFileName,
++ u32 ui32FileOffset,
++ IMG_DEV_VIRTADDR sDevBaseAddr,
++ u32 ui32Size,
++ u32 ui32PDumpFlags);
++
++ PVRSRV_ERROR PDumpCBP(PPVRSRV_KERNEL_MEM_INFO psROffMemInfo,
++ u32 ui32ROffOffset,
++ u32 ui32WPosVal,
++ u32 ui32PacketSize,
++ u32 ui32BufferSize,
++ u32 ui32Flags,
++ void * hUniqueTag);
++
++#else
++ void PDumpTASignatureRegisters(u32 ui32DumpFrameNum,
++ u32 ui32TAKickCount,
++ int bLastFrame,
++ u32 *pui32Registers,
++ u32 ui32NumRegisters);
++ void PDump3DSignatureRegisters(u32 ui32DumpFrameNum,
++ int bLastFrame,
++ u32 *pui32Registers,
++ u32 ui32NumRegisters);
++ void PDumpCounterRegisters(u32 ui32DumpFrameNum,
++ int bLastFrame,
++ u32 *pui32Registers,
++ u32 ui32NumRegisters);
++
++ void PDumpRegRead(const u32 dwRegOffset, u32 ui32Flags);
++ void PDumpCycleCountRegRead(const u32 dwRegOffset, int bLastFrame);
++
++ void PDumpIDLWithFlags(u32 ui32Clocks, u32 ui32Flags);
++ void PDumpIDL(u32 ui32Clocks);
++
++
++ void PDumpMallocPages(PVRSRV_DEVICE_TYPE eDeviceType,
++ u32 ui32DevVAddr,
++ IMG_CPU_VIRTADDR pvLinAddr,
++ void * hOSMemHandle,
++ u32 ui32NumBytes,
++ u32 ui32PageSize,
++ void * hUniqueTag);
++ void PDumpMallocPageTable(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_CPU_VIRTADDR pvLinAddr,
++ u32 ui32NumBytes,
++ void * hUniqueTag);
++ void PDumpFreePages(struct _BM_HEAP_ *psBMHeap,
++ IMG_DEV_VIRTADDR sDevVAddr,
++ u32 ui32NumBytes,
++ u32 ui32PageSize,
++ void * hUniqueTag,
++ int bInterleaved);
++ void PDumpFreePageTable(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_CPU_VIRTADDR pvLinAddr,
++ u32 ui32NumBytes,
++ void * hUniqueTag);
++
++ void PDumpHWPerfCBKM(char *pszFileName,
++ u32 ui32FileOffset,
++ IMG_DEV_VIRTADDR sDevBaseAddr,
++ u32 ui32Size,
++ u32 ui32PDumpFlags);
++
++ void PDumpCBP(PPVRSRV_KERNEL_MEM_INFO psROffMemInfo,
++ u32 ui32ROffOffset,
++ u32 ui32WPosVal,
++ u32 ui32PacketSize,
++ u32 ui32BufferSize,
++ u32 ui32Flags,
++ void * hUniqueTag);
++
++#endif
++
++ void PDumpVGXMemToFile(char *pszFileName,
++ u32 ui32FileOffset,
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++ u32 uiAddr,
++ u32 ui32Size,
++ u32 ui32PDumpFlags,
++ void * hUniqueTag);
++
++ void PDumpSuspendKM(void);
++ void PDumpResumeKM(void);
++
++ #define PDUMPMEMPOL PDumpMemPolKM
++ #define PDUMPMEM PDumpMemKM
++ #define PDUMPMEM2 PDumpMem2KM
++ #define PDUMPMEMUM PDumpMemUM
++ #define PDUMPINIT PDumpInitCommon
++ #define PDUMPDEINIT PDumpDeInitCommon
++ #define PDUMPISLASTFRAME PDumpIsLastCaptureFrameKM
++ #define PDUMPTESTFRAME PDumpIsCaptureFrameKM
++ #define PDUMPTESTNEXTFRAME PDumpTestNextFrame
++ #define PDUMPREGWITHFLAGS PDumpRegWithFlagsKM
++ #define PDUMPREG PDumpRegKM
++ #define PDUMPCOMMENT PDumpComment
++ #define PDUMPCOMMENTWITHFLAGS PDumpCommentWithFlags
++ #define PDUMPREGPOL PDumpRegPolKM
++ #define PDUMPREGPOLWITHFLAGS PDumpRegPolWithFlagsKM
++ #define PDUMPMALLOCPAGES PDumpMallocPages
++ #define PDUMPMALLOCPAGETABLE PDumpMallocPageTable
++ #define PDUMPSETMMUCONTEXT PDumpSetMMUContext
++ #define PDUMPCLEARMMUCONTEXT PDumpClearMMUContext
++ #define PDUMPFREEPAGES PDumpFreePages
++ #define PDUMPFREEPAGETABLE PDumpFreePageTable
++ #define PDUMPPDREG PDumpPDReg
++ #define PDUMPPDREGWITHFLAGS PDumpPDRegWithFlags
++ #define PDUMPCBP PDumpCBP
++ #define PDUMPMALLOCPAGESPHYS PDumpMallocPagesPhys
++ #define PDUMPENDINITPHASE PDumpStopInitPhaseKM
++ #define PDUMPMSVDXREGWRITE PDumpMsvdxRegWrite
++ #define PDUMPMSVDXREGREAD PDumpMsvdxRegRead
++ #define PDUMPMSVDXPOL PDumpMsvdxRegPol
++ #define PDUMPMSVDXWRITEREF PDumpMsvdxWriteRef
++ #define PDUMPBITMAPKM PDumpBitmapKM
++ #define PDUMPDRIVERINFO PDumpDriverInfoKM
++ #define PDUMPIDLWITHFLAGS PDumpIDLWithFlags
++ #define PDUMPIDL PDumpIDL
++ #define PDUMPSUSPEND PDumpSuspendKM
++ #define PDUMPRESUME PDumpResumeKM
++
++#else
++ #if ((defined(LINUX) || defined(GCC_IA32)) || defined(GCC_ARM))
++ #define PDUMPMEMPOL(args...)
++ #define PDUMPMEM(args...)
++ #define PDUMPMEM2(args...)
++ #define PDUMPMEMUM(args...)
++ #define PDUMPINIT(args...)
++ #define PDUMPDEINIT(args...)
++ #define PDUMPISLASTFRAME(args...)
++ #define PDUMPTESTFRAME(args...)
++ #define PDUMPTESTNEXTFRAME(args...)
++ #define PDUMPREGWITHFLAGS(args...)
++ #define PDUMPREG(args...)
++ #define PDUMPCOMMENT(args...)
++ #define PDUMPREGPOL(args...)
++ #define PDUMPREGPOLWITHFLAGS(args...)
++ #define PDUMPMALLOCPAGES(args...)
++ #define PDUMPMALLOCPAGETABLE(args...)
++ #define PDUMPSETMMUCONTEXT(args...)
++ #define PDUMPCLEARMMUCONTEXT(args...)
++ #define PDUMPFREEPAGES(args...)
++ #define PDUMPFREEPAGETABLE(args...)
++ #define PDUMPPDREG(args...)
++ #define PDUMPPDREGWITHFLAGS(args...)
++ #define PDUMPSYNC(args...)
++ #define PDUMPCOPYTOMEM(args...)
++ #define PDUMPWRITE(args...)
++ #define PDUMPCBP(args...)
++ #define PDUMPCOMMENTWITHFLAGS(args...)
++ #define PDUMPMALLOCPAGESPHYS(args...)
++ #define PDUMPENDINITPHASE(args...)
++ #define PDUMPMSVDXREG(args...)
++ #define PDUMPMSVDXREGWRITE(args...)
++ #define PDUMPMSVDXREGREAD(args...)
++ #define PDUMPMSVDXPOLEQ(args...)
++ #define PDUMPMSVDXPOL(args...)
++ #define PDUMPBITMAPKM(args...)
++ #define PDUMPDRIVERINFO(args...)
++ #define PDUMPIDLWITHFLAGS(args...)
++ #define PDUMPIDL(args...)
++ #define PDUMPSUSPEND(args...)
++ #define PDUMPRESUME(args...)
++ #define PDUMPMSVDXWRITEREF(args...)
++ #else
++ #error Compiler not specified
++ #endif
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/include/pdump_osfunc.h
+@@ -0,0 +1,137 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __PDUMP_OSFUNC_H__
++#define __PDUMP_OSFUNC_H__
++
++#include <stdarg.h>
++
++#if defined(__cplusplus)
++extern "C" {
++#endif
++
++
++#define MAX_PDUMP_STRING_LENGTH (256)
++#define PDUMP_GET_SCRIPT_STRING() \
++ void * hScript; \
++ u32 ui32MaxLen; \
++ PVRSRV_ERROR eError; \
++ eError = PDumpOSGetScriptString(&hScript, &ui32MaxLen);\
++ if(eError != PVRSRV_OK) return eError;
++
++#define PDUMP_GET_MSG_STRING() \
++ void * hMsg; \
++ u32 ui32MaxLen; \
++ PVRSRV_ERROR eError; \
++ eError = PDumpOSGetMessageString(&hMsg, &ui32MaxLen);\
++ if(eError != PVRSRV_OK) return eError;
++
++#define PDUMP_GET_FILE_STRING() \
++ char *pszFileName; \
++ u32 ui32MaxLen; \
++ PVRSRV_ERROR eError; \
++ eError = PDumpOSGetFilenameString(&pszFileName, &ui32MaxLen);\
++ if(eError != PVRSRV_OK) return eError;
++
++#define PDUMP_GET_SCRIPT_AND_FILE_STRING() \
++ void * hScript; \
++ char *pszFileName; \
++ u32 ui32MaxLenScript; \
++ u32 ui32MaxLenFileName; \
++ PVRSRV_ERROR eError; \
++ eError = PDumpOSGetScriptString(&hScript, &ui32MaxLenScript);\
++ if(eError != PVRSRV_OK) return eError; \
++ eError = PDumpOSGetFilenameString(&pszFileName, &ui32MaxLenFileName);\
++ if(eError != PVRSRV_OK) return eError;
++
++
++
++ PVRSRV_ERROR PDumpOSGetScriptString(void * *phScript, u32 *pui32MaxLen);
++
++
++ PVRSRV_ERROR PDumpOSGetMessageString(void * *phMsg, u32 *pui32MaxLen);
++
++
++ PVRSRV_ERROR PDumpOSGetFilenameString(char **ppszFile, u32 *pui32MaxLen);
++
++
++
++
++#define PDUMP_va_list va_list
++#define PDUMP_va_start va_start
++#define PDUMP_va_end va_end
++
++
++
++void * PDumpOSGetStream(u32 ePDumpStream);
++
++u32 PDumpOSGetStreamOffset(u32 ePDumpStream);
++
++u32 PDumpOSGetParamFileNum(void);
++
++void PDumpOSCheckForSplitting(void * hStream, u32 ui32Size, u32 ui32Flags);
++
++int PDumpOSIsSuspended(void);
++
++int PDumpOSJTInitialised(void);
++
++int PDumpOSWriteString(void * hDbgStream,
++ u8 *psui8Data,
++ u32 ui32Size,
++ u32 ui32Flags);
++
++int PDumpOSWriteString2(void * hScript, u32 ui32Flags);
++
++PVRSRV_ERROR PDumpOSBufprintf(void * hBuf, u32 ui32ScriptSizeMax, char* pszFormat, ...);
++
++void PDumpOSDebugPrintf(char* pszFormat, ...);
++
++PVRSRV_ERROR PDumpOSSprintf(char *pszComment, u32 ui32ScriptSizeMax, char *pszFormat, ...);
++
++PVRSRV_ERROR PDumpOSVSprintf(char *pszMsg, u32 ui32ScriptSizeMax, char* pszFormat, PDUMP_va_list vaArgs);
++
++u32 PDumpOSBuflen(void * hBuffer, u32 ui32BufferSizeMax);
++
++void PDumpOSVerifyLineEnding(void * hBuffer, u32 ui32BufferSizeMax);
++
++void PDumpOSCPUVAddrToDevPAddr(PVRSRV_DEVICE_TYPE eDeviceType,
++ void * hOSMemHandle,
++ u32 ui32Offset,
++ u8 *pui8LinAddr,
++ u32 ui32PageSize,
++ IMG_DEV_PHYADDR *psDevPAddr);
++
++void PDumpOSCPUVAddrToPhysPages(void * hOSMemHandle,
++ u32 ui32Offset,
++ u8 * pui8LinAddr,
++ u32 *pui32PageOffset);
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/include/pdumpdefs.h
+@@ -0,0 +1,99 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined (__PDUMPDEFS_H__)
++#define __PDUMPDEFS_H__
++
++typedef enum _PDUMP_PIXEL_FORMAT_
++{
++ PVRSRV_PDUMP_PIXEL_FORMAT_UNSUPPORTED = 0,
++ PVRSRV_PDUMP_PIXEL_FORMAT_RGB8 = 1,
++ PVRSRV_PDUMP_PIXEL_FORMAT_RGB332 = 2,
++ PVRSRV_PDUMP_PIXEL_FORMAT_KRGB555 = 3,
++ PVRSRV_PDUMP_PIXEL_FORMAT_RGB565 = 4,
++ PVRSRV_PDUMP_PIXEL_FORMAT_ARGB4444 = 5,
++ PVRSRV_PDUMP_PIXEL_FORMAT_ARGB1555 = 6,
++ PVRSRV_PDUMP_PIXEL_FORMAT_RGB888 = 7,
++ PVRSRV_PDUMP_PIXEL_FORMAT_ARGB8888 = 8,
++ PVRSRV_PDUMP_PIXEL_FORMAT_YUV8 = 9,
++ PVRSRV_PDUMP_PIXEL_FORMAT_AYUV4444 = 10,
++ PVRSRV_PDUMP_PIXEL_FORMAT_VY0UY1_8888 = 11,
++ PVRSRV_PDUMP_PIXEL_FORMAT_UY0VY1_8888 = 12,
++ PVRSRV_PDUMP_PIXEL_FORMAT_Y0UY1V_8888 = 13,
++ PVRSRV_PDUMP_PIXEL_FORMAT_Y0VY1U_8888 = 14,
++ PVRSRV_PDUMP_PIXEL_FORMAT_YUV888 = 15,
++ PVRSRV_PDUMP_PIXEL_FORMAT_UYVY10101010 = 16,
++ PVRSRV_PDUMP_PIXEL_FORMAT_VYAUYA8888 = 17,
++ PVRSRV_PDUMP_PIXEL_FORMAT_AYUV8888 = 18,
++ PVRSRV_PDUMP_PIXEL_FORMAT_AYUV2101010 = 19,
++ PVRSRV_PDUMP_PIXEL_FORMAT_YUV101010 = 20,
++ PVRSRV_PDUMP_PIXEL_FORMAT_PL12Y8 = 21,
++ PVRSRV_PDUMP_PIXEL_FORMAT_YUV_IMC2 = 22,
++ PVRSRV_PDUMP_PIXEL_FORMAT_YUV_YV12 = 23,
++ PVRSRV_PDUMP_PIXEL_FORMAT_YUV_PL8 = 24,
++ PVRSRV_PDUMP_PIXEL_FORMAT_YUV_PL12 = 25,
++ PVRSRV_PDUMP_PIXEL_FORMAT_422PL12YUV8 = 26,
++ PVRSRV_PDUMP_PIXEL_FORMAT_420PL12YUV8 = 27,
++ PVRSRV_PDUMP_PIXEL_FORMAT_PL12Y10 = 28,
++ PVRSRV_PDUMP_PIXEL_FORMAT_422PL12YUV10 = 29,
++ PVRSRV_PDUMP_PIXEL_FORMAT_420PL12YUV10 = 30,
++ PVRSRV_PDUMP_PIXEL_FORMAT_ABGR8888 = 31,
++ PVRSRV_PDUMP_PIXEL_FORMAT_BGRA8888 = 32,
++ PVRSRV_PDUMP_PIXEL_FORMAT_ARGB8332 = 33,
++ PVRSRV_PDUMP_PIXEL_FORMAT_RGB555 = 34,
++ PVRSRV_PDUMP_PIXEL_FORMAT_F16 = 35,
++ PVRSRV_PDUMP_PIXEL_FORMAT_F32 = 36,
++ PVRSRV_PDUMP_PIXEL_FORMAT_L16 = 37,
++ PVRSRV_PDUMP_PIXEL_FORMAT_L32 = 38,
++
++ PVRSRV_PDUMP_PIXEL_FORMAT_FORCE_I32 = 0x7fffffff
++
++} PDUMP_PIXEL_FORMAT;
++
++typedef enum _PDUMP_MEM_FORMAT_
++{
++ PVRSRV_PDUMP_MEM_FORMAT_STRIDE = 0,
++ PVRSRV_PDUMP_MEM_FORMAT_RESERVED = 1,
++ PVRSRV_PDUMP_MEM_FORMAT_TILED = 8,
++ PVRSRV_PDUMP_MEM_FORMAT_TWIDDLED = 9,
++ PVRSRV_PDUMP_MEM_FORMAT_HYBRID = 10,
++
++ PVRSRV_PDUMP_MEM_FORMAT_FORCE_I32 = 0x7fffffff
++} PDUMP_MEM_FORMAT;
++
++typedef enum _PDUMP_POLL_OPERATOR
++{
++ PDUMP_POLL_OPERATOR_EQUAL = 0,
++ PDUMP_POLL_OPERATOR_LESS = 1,
++ PDUMP_POLL_OPERATOR_LESSEQUAL = 2,
++ PDUMP_POLL_OPERATOR_GREATER = 3,
++ PDUMP_POLL_OPERATOR_GREATEREQUAL = 4,
++ PDUMP_POLL_OPERATOR_NOTEQUAL = 5,
++} PDUMP_POLL_OPERATOR;
++
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/include/perproc.h
+@@ -0,0 +1,91 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __PERPROC_H__
++#define __PERPROC_H__
++
++
++#include "img_types.h"
++#include "resman.h"
++
++#include "handle.h"
++
++typedef struct _PVRSRV_PER_PROCESS_DATA_
++{
++ u32 ui32PID;
++ void * hBlockAlloc;
++ PRESMAN_CONTEXT hResManContext;
++ void * hPerProcData;
++ PVRSRV_HANDLE_BASE *psHandleBase;
++#if defined (PVR_SECURE_HANDLES)
++
++ int bHandlesBatched;
++#endif
++ u32 ui32RefCount;
++
++
++ int bInitProcess;
++
++
++ void * hOsPrivateData;
++} PVRSRV_PER_PROCESS_DATA;
++
++PVRSRV_PER_PROCESS_DATA *PVRSRVPerProcessData(u32 ui32PID);
++
++PVRSRV_ERROR PVRSRVPerProcessDataConnect(u32 ui32PID);
++void PVRSRVPerProcessDataDisconnect(u32 ui32PID);
++
++PVRSRV_ERROR PVRSRVPerProcessDataInit(void);
++PVRSRV_ERROR PVRSRVPerProcessDataDeInit(void);
++
++static inline
++PVRSRV_PER_PROCESS_DATA *PVRSRVFindPerProcessData(void)
++{
++ return PVRSRVPerProcessData(OSGetCurrentProcessIDKM());
++}
++
++
++static inline
++void * PVRSRVProcessPrivateData(PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ return (psPerProc != NULL) ? psPerProc->hOsPrivateData : NULL;
++}
++
++
++static inline
++void * PVRSRVPerProcessPrivateData(u32 ui32PID)
++{
++ return PVRSRVProcessPrivateData(PVRSRVPerProcessData(ui32PID));
++}
++
++static inline
++void * PVRSRVFindPerProcessPrivateData(void)
++{
++ return PVRSRVProcessPrivateData(PVRSRVFindPerProcessData());
++}
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/include/power.h
+@@ -0,0 +1,120 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef POWER_H
++#define POWER_H
++
++#if defined(__cplusplus)
++extern "C" {
++#endif
++
++
++
++typedef struct _PVRSRV_POWER_DEV_TAG_
++{
++ PFN_PRE_POWER pfnPrePower;
++ PFN_POST_POWER pfnPostPower;
++ PFN_PRE_CLOCKSPEED_CHANGE pfnPreClockSpeedChange;
++ PFN_POST_CLOCKSPEED_CHANGE pfnPostClockSpeedChange;
++ void * hDevCookie;
++ u32 ui32DeviceIndex;
++ PVRSRV_DEV_POWER_STATE eDefaultPowerState;
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState;
++ struct _PVRSRV_POWER_DEV_TAG_ *psNext;
++ struct _PVRSRV_POWER_DEV_TAG_ **ppsThis;
++
++} PVRSRV_POWER_DEV;
++
++typedef enum _PVRSRV_INIT_SERVER_STATE_
++{
++ PVRSRV_INIT_SERVER_Unspecified = -1,
++ PVRSRV_INIT_SERVER_RUNNING = 0,
++ PVRSRV_INIT_SERVER_RAN = 1,
++ PVRSRV_INIT_SERVER_SUCCESSFUL = 2,
++ PVRSRV_INIT_SERVER_NUM = 3,
++ PVRSRV_INIT_SERVER_FORCE_I32 = 0x7fffffff
++
++} PVRSRV_INIT_SERVER_STATE, *PPVRSRV_INIT_SERVER_STATE;
++
++
++int PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_STATE eInitServerState);
++
++
++PVRSRV_ERROR PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_STATE eInitServerState, int bState);
++
++
++
++
++PVRSRV_ERROR PVRSRVPowerLock(u32 ui32CallerID,
++ int bSystemPowerEvent);
++
++void PVRSRVPowerUnlock(u32 ui32CallerID);
++
++
++PVRSRV_ERROR PVRSRVSetDevicePowerStateKM(u32 ui32DeviceIndex,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ u32 ui32CallerID,
++ int bRetainMutex);
++
++
++PVRSRV_ERROR PVRSRVSystemPrePowerStateKM(PVRSRV_SYS_POWER_STATE eNewPowerState);
++
++PVRSRV_ERROR PVRSRVSystemPostPowerStateKM(PVRSRV_SYS_POWER_STATE eNewPowerState);
++
++
++PVRSRV_ERROR PVRSRVSetPowerStateKM (PVRSRV_SYS_POWER_STATE ePVRState);
++
++
++PVRSRV_ERROR PVRSRVRegisterPowerDevice(u32 ui32DeviceIndex,
++ PFN_PRE_POWER pfnPrePower,
++ PFN_POST_POWER pfnPostPower,
++ PFN_PRE_CLOCKSPEED_CHANGE pfnPreClockSpeedChange,
++ PFN_POST_CLOCKSPEED_CHANGE pfnPostClockSpeedChange,
++ void * hDevCookie,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState,
++ PVRSRV_DEV_POWER_STATE eDefaultPowerState);
++
++
++PVRSRV_ERROR PVRSRVRemovePowerDevice (u32 ui32DeviceIndex);
++
++
++int PVRSRVIsDevicePowered(u32 ui32DeviceIndex);
++
++
++PVRSRV_ERROR PVRSRVDevicePreClockSpeedChange(u32 ui32DeviceIndex,
++ int bIdleDevice,
++ void *pvInfo);
++
++
++void PVRSRVDevicePostClockSpeedChange(u32 ui32DeviceIndex,
++ int bIdleDevice,
++ void *pvInfo);
++
++#if defined (__cplusplus)
++}
++#endif
++#endif
++
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/include/pvr_bridge.h
+@@ -0,0 +1,1405 @@
++/**********************************************************************
++ *
++ * Copyright (c) 2009-2010 Intel Corporation.
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __PVR_BRIDGE_H__
++#define __PVR_BRIDGE_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#include "servicesint.h"
++
++#ifdef __linux__
++
++ #include <linux/ioctl.h>
++
++ #define PVRSRV_IOC_GID 'g'
++ #define PVRSRV_IO(INDEX) _IO(PVRSRV_IOC_GID, INDEX, PVRSRV_BRIDGE_PACKAGE)
++ #define PVRSRV_IOW(INDEX) _IOW(PVRSRV_IOC_GID, INDEX, PVRSRV_BRIDGE_PACKAGE)
++ #define PVRSRV_IOR(INDEX) _IOR(PVRSRV_IOC_GID, INDEX, PVRSRV_BRIDGE_PACKAGE)
++ #define PVRSRV_IOWR(INDEX) _IOWR(PVRSRV_IOC_GID, INDEX, PVRSRV_BRIDGE_PACKAGE)
++
++#else
++
++ #error Unknown platform: Cannot define ioctls
++
++ #define PVRSRV_IO(INDEX) (PVRSRV_IOC_GID + INDEX)
++ #define PVRSRV_IOW(INDEX) (PVRSRV_IOC_GID + INDEX)
++ #define PVRSRV_IOR(INDEX) (PVRSRV_IOC_GID + INDEX)
++ #define PVRSRV_IOWR(INDEX) (PVRSRV_IOC_GID + INDEX)
++
++ #define PVRSRV_BRIDGE_BASE PVRSRV_IOC_GID
++#endif
++
++
++#define PVRSRV_BRIDGE_CORE_CMD_FIRST 0UL
++#define PVRSRV_BRIDGE_ENUM_DEVICES PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_ACQUIRE_DEVICEINFO PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_RELEASE_DEVICEINFO PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+2)
++#define PVRSRV_BRIDGE_CREATE_DEVMEMCONTEXT PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+3)
++#define PVRSRV_BRIDGE_DESTROY_DEVMEMCONTEXT PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+4)
++#define PVRSRV_BRIDGE_GET_DEVMEM_HEAPINFO PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+5)
++#define PVRSRV_BRIDGE_ALLOC_DEVICEMEM PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+6)
++#define PVRSRV_BRIDGE_FREE_DEVICEMEM PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+7)
++#define PVRSRV_BRIDGE_GETFREE_DEVICEMEM PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+8)
++#define PVRSRV_BRIDGE_CREATE_COMMANDQUEUE PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+9)
++#define PVRSRV_BRIDGE_DESTROY_COMMANDQUEUE PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+10)
++#define PVRSRV_BRIDGE_MHANDLE_TO_MMAP_DATA PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+11)
++#define PVRSRV_BRIDGE_CONNECT_SERVICES PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+12)
++#define PVRSRV_BRIDGE_DISCONNECT_SERVICES PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+13)
++#define PVRSRV_BRIDGE_WRAP_DEVICE_MEM PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+14)
++#define PVRSRV_BRIDGE_GET_DEVICEMEMINFO PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+15)
++#define PVRSRV_BRIDGE_RESERVE_DEV_VIRTMEM PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+16)
++#define PVRSRV_BRIDGE_FREE_DEV_VIRTMEM PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+17)
++#define PVRSRV_BRIDGE_MAP_EXT_MEMORY PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+18)
++#define PVRSRV_BRIDGE_UNMAP_EXT_MEMORY PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+19)
++#define PVRSRV_BRIDGE_MAP_DEV_MEMORY PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+20)
++#define PVRSRV_BRIDGE_UNMAP_DEV_MEMORY PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+21)
++#define PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+22)
++#define PVRSRV_BRIDGE_UNMAP_DEVICECLASS_MEMORY PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+23)
++#define PVRSRV_BRIDGE_MAP_MEM_INFO_TO_USER PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+24)
++#define PVRSRV_BRIDGE_UNMAP_MEM_INFO_FROM_USER PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+25)
++#define PVRSRV_BRIDGE_EXPORT_DEVICEMEM PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+26)
++#define PVRSRV_BRIDGE_RELEASE_MMAP_DATA PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+27)
++
++#ifdef INTEL_D3_CHANGES
++#define PVRSRV_BRIDGE_WAIT_FOR_WRITE_OP_SYNC PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+28)
++#define PVRSRV_BRIDGE_CORE_CMD_LAST (PVRSRV_BRIDGE_CORE_CMD_FIRST+28)
++#else
++#define PVRSRV_BRIDGE_CORE_CMD_LAST (PVRSRV_BRIDGE_CORE_CMD_FIRST+27)
++#endif
++
++#define PVRSRV_BRIDGE_SIM_CMD_FIRST (PVRSRV_BRIDGE_CORE_CMD_LAST+1)
++#define PVRSRV_BRIDGE_PROCESS_SIMISR_EVENT PVRSRV_IOWR(PVRSRV_BRIDGE_SIM_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_REGISTER_SIM_PROCESS PVRSRV_IOWR(PVRSRV_BRIDGE_SIM_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_UNREGISTER_SIM_PROCESS PVRSRV_IOWR(PVRSRV_BRIDGE_SIM_CMD_FIRST+2)
++#define PVRSRV_BRIDGE_SIM_CMD_LAST (PVRSRV_BRIDGE_SIM_CMD_FIRST+2)
++
++#define PVRSRV_BRIDGE_MAPPING_CMD_FIRST (PVRSRV_BRIDGE_SIM_CMD_LAST+1)
++#define PVRSRV_BRIDGE_MAPPHYSTOUSERSPACE PVRSRV_IOWR(PVRSRV_BRIDGE_MAPPING_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_UNMAPPHYSTOUSERSPACE PVRSRV_IOWR(PVRSRV_BRIDGE_MAPPING_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_GETPHYSTOUSERSPACEMAP PVRSRV_IOWR(PVRSRV_BRIDGE_MAPPING_CMD_FIRST+2)
++#define PVRSRV_BRIDGE_MAPPING_CMD_LAST (PVRSRV_BRIDGE_MAPPING_CMD_FIRST+2)
++
++#define PVRSRV_BRIDGE_STATS_CMD_FIRST (PVRSRV_BRIDGE_MAPPING_CMD_LAST+1)
++#define PVRSRV_BRIDGE_GET_FB_STATS PVRSRV_IOWR(PVRSRV_BRIDGE_STATS_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_STATS_CMD_LAST (PVRSRV_BRIDGE_STATS_CMD_FIRST+0)
++
++#define PVRSRV_BRIDGE_MISC_CMD_FIRST (PVRSRV_BRIDGE_STATS_CMD_LAST+1)
++#define PVRSRV_BRIDGE_GET_MISC_INFO PVRSRV_IOWR(PVRSRV_BRIDGE_MISC_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_RELEASE_MISC_INFO PVRSRV_IOWR(PVRSRV_BRIDGE_MISC_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_MISC_CMD_LAST (PVRSRV_BRIDGE_MISC_CMD_FIRST+1)
++
++#define PVRSRV_BRIDGE_OVERLAY_CMD_FIRST (PVRSRV_BRIDGE_MISC_CMD_LAST+1)
++#if defined (SUPPORT_OVERLAY_ROTATE_BLIT)
++#define PVRSRV_BRIDGE_INIT_3D_OVL_BLT_RES PVRSRV_IOWR(PVRSRV_BRIDGE_OVERLAY_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_DEINIT_3D_OVL_BLT_RES PVRSRV_IOWR(PVRSRV_BRIDGE_OVERLAY_CMD_FIRST+1)
++#endif
++#define PVRSRV_BRIDGE_OVERLAY_CMD_LAST (PVRSRV_BRIDGE_OVERLAY_CMD_FIRST+1)
++
++#if defined(PDUMP)
++#define PVRSRV_BRIDGE_PDUMP_CMD_FIRST (PVRSRV_BRIDGE_OVERLAY_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_PDUMP_INIT PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_PDUMP_MEMPOL PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_PDUMP_DUMPMEM PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+2)
++#define PVRSRV_BRIDGE_PDUMP_REG PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+3)
++#define PVRSRV_BRIDGE_PDUMP_REGPOL PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+4)
++#define PVRSRV_BRIDGE_PDUMP_COMMENT PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+5)
++#define PVRSRV_BRIDGE_PDUMP_SETFRAME PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+6)
++#define PVRSRV_BRIDGE_PDUMP_ISCAPTURING PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+7)
++#define PVRSRV_BRIDGE_PDUMP_DUMPBITMAP PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+8)
++#define PVRSRV_BRIDGE_PDUMP_DUMPREADREG PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+9)
++#define PVRSRV_BRIDGE_PDUMP_SYNCPOL PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+10)
++#define PVRSRV_BRIDGE_PDUMP_DUMPSYNC PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+11)
++#define PVRSRV_BRIDGE_PDUMP_MEMPAGES PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+12)
++#define PVRSRV_BRIDGE_PDUMP_DRIVERINFO PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+13)
++#define PVRSRV_BRIDGE_PDUMP_PDREG PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+14)
++#define PVRSRV_BRIDGE_PDUMP_DUMPPDDEVPADDR PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+15)
++#define PVRSRV_BRIDGE_PDUMP_CYCLE_COUNT_REG_READ PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+16)
++#define PVRSRV_BRIDGE_PDUMP_STARTINITPHASE PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+17)
++#define PVRSRV_BRIDGE_PDUMP_STOPINITPHASE PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+18)
++#define PVRSRV_BRIDGE_PDUMP_CMD_LAST (PVRSRV_BRIDGE_PDUMP_CMD_FIRST+18)
++#else
++#define PVRSRV_BRIDGE_PDUMP_CMD_LAST PVRSRV_BRIDGE_OVERLAY_CMD_LAST
++#endif
++
++#define PVRSRV_BRIDGE_OEM_CMD_FIRST (PVRSRV_BRIDGE_PDUMP_CMD_LAST+1)
++#define PVRSRV_BRIDGE_GET_OEMJTABLE PVRSRV_IOWR(PVRSRV_BRIDGE_OEM_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_OEM_CMD_LAST (PVRSRV_BRIDGE_OEM_CMD_FIRST+0)
++
++#define PVRSRV_BRIDGE_DEVCLASS_CMD_FIRST (PVRSRV_BRIDGE_OEM_CMD_LAST+1)
++#define PVRSRV_BRIDGE_ENUM_CLASS PVRSRV_IOWR(PVRSRV_BRIDGE_DEVCLASS_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_DEVCLASS_CMD_LAST (PVRSRV_BRIDGE_DEVCLASS_CMD_FIRST+0)
++
++#define PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST (PVRSRV_BRIDGE_DEVCLASS_CMD_LAST+1)
++#define PVRSRV_BRIDGE_OPEN_DISPCLASS_DEVICE PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_CLOSE_DISPCLASS_DEVICE PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_ENUM_DISPCLASS_FORMATS PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+2)
++#define PVRSRV_BRIDGE_ENUM_DISPCLASS_DIMS PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+3)
++#define PVRSRV_BRIDGE_GET_DISPCLASS_SYSBUFFER PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+4)
++#define PVRSRV_BRIDGE_GET_DISPCLASS_INFO PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+5)
++#define PVRSRV_BRIDGE_CREATE_DISPCLASS_SWAPCHAIN PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+6)
++#define PVRSRV_BRIDGE_DESTROY_DISPCLASS_SWAPCHAIN PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+7)
++#define PVRSRV_BRIDGE_SET_DISPCLASS_DSTRECT PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+8)
++#define PVRSRV_BRIDGE_SET_DISPCLASS_SRCRECT PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+9)
++#define PVRSRV_BRIDGE_SET_DISPCLASS_DSTCOLOURKEY PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+10)
++#define PVRSRV_BRIDGE_SET_DISPCLASS_SRCCOLOURKEY PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+11)
++#define PVRSRV_BRIDGE_GET_DISPCLASS_BUFFERS PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+12)
++#define PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_BUFFER PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+13)
++#define PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_SYSTEM PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+14)
++#define PVRSRV_BRIDGE_DISPCLASS_CMD_LAST (PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+14)
++
++
++#define PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST (PVRSRV_BRIDGE_DISPCLASS_CMD_LAST+1)
++#define PVRSRV_BRIDGE_OPEN_BUFFERCLASS_DEVICE PVRSRV_IOWR(PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_CLOSE_BUFFERCLASS_DEVICE PVRSRV_IOWR(PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_GET_BUFFERCLASS_INFO PVRSRV_IOWR(PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST+2)
++#define PVRSRV_BRIDGE_GET_BUFFERCLASS_BUFFER PVRSRV_IOWR(PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST+3)
++#define PVRSRV_BRIDGE_BUFCLASS_CMD_LAST (PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST+3)
++
++#define PVRSRV_BRIDGE_WRAP_CMD_FIRST (PVRSRV_BRIDGE_BUFCLASS_CMD_LAST+1)
++#define PVRSRV_BRIDGE_WRAP_EXT_MEMORY PVRSRV_IOWR(PVRSRV_BRIDGE_WRAP_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_UNWRAP_EXT_MEMORY PVRSRV_IOWR(PVRSRV_BRIDGE_WRAP_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_WRAP_CMD_LAST (PVRSRV_BRIDGE_WRAP_CMD_FIRST+1)
++
++#define PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST (PVRSRV_BRIDGE_WRAP_CMD_LAST+1)
++#define PVRSRV_BRIDGE_ALLOC_SHARED_SYS_MEM PVRSRV_IOWR(PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_FREE_SHARED_SYS_MEM PVRSRV_IOWR(PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_MAP_MEMINFO_MEM PVRSRV_IOWR(PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+2)
++#define PVRSRV_BRIDGE_UNMAP_MEMINFO_MEM PVRSRV_IOWR(PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+3)
++#define PVRSRV_BRIDGE_SHAREDMEM_CMD_LAST (PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+3)
++
++#define PVRSRV_BRIDGE_SERVICES4_TMP_CMD_FIRST (PVRSRV_BRIDGE_SHAREDMEM_CMD_LAST+1)
++#define PVRSRV_BRIDGE_GETMMU_PD_DEVPADDR PVRSRV_IOWR(PVRSRV_BRIDGE_SERVICES4_TMP_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_SERVICES4_TMP_CMD_LAST (PVRSRV_BRIDGE_SERVICES4_TMP_CMD_FIRST+0)
++
++#define PVRSRV_BRIDGE_INITSRV_CMD_FIRST (PVRSRV_BRIDGE_SERVICES4_TMP_CMD_LAST+1)
++#define PVRSRV_BRIDGE_INITSRV_CONNECT PVRSRV_IOWR(PVRSRV_BRIDGE_INITSRV_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_INITSRV_DISCONNECT PVRSRV_IOWR(PVRSRV_BRIDGE_INITSRV_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_INITSRV_CMD_LAST (PVRSRV_BRIDGE_INITSRV_CMD_FIRST+1)
++
++#define PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST (PVRSRV_BRIDGE_INITSRV_CMD_LAST+1)
++#define PVRSRV_BRIDGE_EVENT_OBJECT_WAIT PVRSRV_IOWR(PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_EVENT_OBJECT_OPEN PVRSRV_IOWR(PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_EVENT_OBJECT_CLOSE PVRSRV_IOWR(PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST+2)
++#define PVRSRV_BRIDGE_EVENT_OBJECT_CMD_LAST (PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST+2)
++
++#define PVRSRV_BRIDGE_SYNC_OPS_CMD_FIRST (PVRSRV_BRIDGE_EVENT_OBJECT_CMD_LAST+1)
++#define PVRSRV_BRIDGE_MODIFY_PENDING_SYNC_OPS PVRSRV_IOWR(PVRSRV_BRIDGE_SYNC_OPS_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_MODIFY_COMPLETE_SYNC_OPS PVRSRV_IOWR(PVRSRV_BRIDGE_SYNC_OPS_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_SYNC_OPS_CMD_LAST (PVRSRV_BRIDGE_SYNC_OPS_CMD_FIRST+1)
++
++#define PVRSRV_BRIDGE_LAST_NON_DEVICE_CMD (PVRSRV_BRIDGE_SYNC_OPS_CMD_LAST+1)
++
++
++#define PVRSRV_KERNEL_MODE_CLIENT 1
++
++typedef struct PVRSRV_BRIDGE_RETURN_TAG
++{
++ PVRSRV_ERROR eError;
++ void *pvData;
++
++}PVRSRV_BRIDGE_RETURN;
++
++
++typedef struct PVRSRV_BRIDGE_PACKAGE_TAG
++{
++ u32 ui32BridgeID;
++ u32 ui32Size;
++ void *pvParamIn;
++ u32 ui32InBufferSize;
++ void *pvParamOut;
++ u32 ui32OutBufferSize;
++
++ void * hKernelServices;
++}PVRSRV_BRIDGE_PACKAGE;
++
++
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_ACQUIRE_DEVICEINFO_TAG
++{
++ u32 ui32BridgeFlags;
++ u32 uiDevIndex;
++ PVRSRV_DEVICE_TYPE eDeviceType;
++
++} PVRSRV_BRIDGE_IN_ACQUIRE_DEVICEINFO;
++
++
++typedef struct PVRSRV_BRIDGE_IN_ENUMCLASS_TAG
++{
++ u32 ui32BridgeFlags;
++ PVRSRV_DEVICE_CLASS sDeviceClass;
++} PVRSRV_BRIDGE_IN_ENUMCLASS;
++
++
++typedef struct PVRSRV_BRIDGE_IN_CLOSE_DISPCLASS_DEVICE_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDeviceKM;
++} PVRSRV_BRIDGE_IN_CLOSE_DISPCLASS_DEVICE;
++
++
++typedef struct PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_FORMATS_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDeviceKM;
++} PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_FORMATS;
++
++
++typedef struct PVRSRV_BRIDGE_IN_GET_DISPCLASS_SYSBUFFER_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDeviceKM;
++} PVRSRV_BRIDGE_IN_GET_DISPCLASS_SYSBUFFER;
++
++
++typedef struct PVRSRV_BRIDGE_IN_GET_DISPCLASS_INFO_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDeviceKM;
++} PVRSRV_BRIDGE_IN_GET_DISPCLASS_INFO;
++
++
++typedef struct PVRSRV_BRIDGE_IN_CLOSE_BUFFERCLASS_DEVICE_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDeviceKM;
++} PVRSRV_BRIDGE_IN_CLOSE_BUFFERCLASS_DEVICE;
++
++
++typedef struct PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_INFO_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDeviceKM;
++} PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_INFO;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_RELEASE_DEVICEINFO_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDevCookie;
++
++} PVRSRV_BRIDGE_IN_RELEASE_DEVICEINFO;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_FREE_CLASSDEVICEINFO_TAG
++{
++ u32 ui32BridgeFlags;
++ PVRSRV_DEVICE_CLASS DeviceClass;
++ void* pvDevInfo;
++
++}PVRSRV_BRIDGE_IN_FREE_CLASSDEVICEINFO;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_GET_DEVMEM_HEAPINFO_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDevCookie;
++ void * hDevMemContext;
++
++}PVRSRV_BRIDGE_IN_GET_DEVMEM_HEAPINFO;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_CREATE_DEVMEMCONTEXT_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDevCookie;
++
++}PVRSRV_BRIDGE_IN_CREATE_DEVMEMCONTEXT;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_DESTROY_DEVMEMCONTEXT_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDevCookie;
++ void * hDevMemContext;
++
++}PVRSRV_BRIDGE_IN_DESTROY_DEVMEMCONTEXT;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_ALLOCDEVICEMEM_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDevCookie;
++ void * hDevMemHeap;
++ u32 ui32Attribs;
++ u32 ui32Size;
++ u32 ui32Alignment;
++
++}PVRSRV_BRIDGE_IN_ALLOCDEVICEMEM;
++
++
++typedef struct PVRSRV_BRIDGE_IN_MAPMEMINFOTOUSER_TAG
++{
++ u32 ui32BridgeFlags;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++
++}PVRSRV_BRIDGE_IN_MAPMEMINFOTOUSER;
++
++
++typedef struct PVRSRV_BRIDGE_IN_UNMAPMEMINFOFROMUSER_TAG
++{
++ u32 ui32BridgeFlags;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ void * pvLinAddr;
++ void * hMappingInfo;
++
++}PVRSRV_BRIDGE_IN_UNMAPMEMINFOFROMUSER;
++
++
++typedef struct PVRSRV_BRIDGE_IN_FREEDEVICEMEM_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDevCookie;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++
++}PVRSRV_BRIDGE_IN_FREEDEVICEMEM;
++
++
++typedef struct PVRSRV_BRIDGE_IN_EXPORTDEVICEMEM_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDevCookie;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++
++}PVRSRV_BRIDGE_IN_EXPORTDEVICEMEM;
++
++
++typedef struct PVRSRV_BRIDGE_IN_GETFREEDEVICEMEM_TAG
++{
++ u32 ui32BridgeFlags;
++ u32 ui32Flags;
++
++} PVRSRV_BRIDGE_IN_GETFREEDEVICEMEM;
++
++
++typedef struct PVRSRV_BRIDGE_IN_CREATECOMMANDQUEUE_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDevCookie;
++ u32 ui32QueueSize;
++
++}PVRSRV_BRIDGE_IN_CREATECOMMANDQUEUE;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_DESTROYCOMMANDQUEUE_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDevCookie;
++ PVRSRV_QUEUE_INFO *psQueueInfo;
++
++}PVRSRV_BRIDGE_IN_DESTROYCOMMANDQUEUE;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_MHANDLE_TO_MMAP_DATA_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hMHandle;
++} PVRSRV_BRIDGE_IN_MHANDLE_TO_MMAP_DATA;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_RELEASE_MMAP_DATA_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hMHandle;
++} PVRSRV_BRIDGE_IN_RELEASE_MMAP_DATA;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_RESERVE_DEV_VIRTMEM_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDevMemHeap;
++ IMG_DEV_VIRTADDR *psDevVAddr;
++ u32 ui32Size;
++ u32 ui32Alignment;
++
++}PVRSRV_BRIDGE_IN_RESERVE_DEV_VIRTMEM;
++
++
++typedef struct PVRSRV_BRIDGE_OUT_CONNECT_SERVICES_TAG
++{
++ PVRSRV_ERROR eError;
++ void * hKernelServices;
++}PVRSRV_BRIDGE_OUT_CONNECT_SERVICES;
++
++
++typedef struct PVRSRV_BRIDGE_OUT_RESERVE_DEV_VIRTMEM_TAG
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++
++}PVRSRV_BRIDGE_OUT_RESERVE_DEV_VIRTMEM;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_FREE_DEV_VIRTMEM_TAG
++{
++ u32 ui32BridgeFlags;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++
++}PVRSRV_BRIDGE_IN_FREE_DEV_VIRTMEM;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_MAP_DEV_MEMORY_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hKernelMemInfo;
++ void * hDstDevMemHeap;
++
++}PVRSRV_BRIDGE_IN_MAP_DEV_MEMORY;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_MAP_DEV_MEMORY_TAG
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_KERNEL_MEM_INFO *psDstKernelMemInfo;
++ PVRSRV_KERNEL_SYNC_INFO *psDstKernelSyncInfo;
++ PVRSRV_CLIENT_MEM_INFO sDstClientMemInfo;
++ PVRSRV_CLIENT_SYNC_INFO sDstClientSyncInfo;
++
++}PVRSRV_BRIDGE_OUT_MAP_DEV_MEMORY;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_UNMAP_DEV_MEMORY_TAG
++{
++ u32 ui32BridgeFlags;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++
++}PVRSRV_BRIDGE_IN_UNMAP_DEV_MEMORY;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_MAP_EXT_MEMORY_TAG
++{
++ u32 ui32BridgeFlags;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ IMG_SYS_PHYADDR *psSysPAddr;
++ u32 ui32Flags;
++
++}PVRSRV_BRIDGE_IN_MAP_EXT_MEMORY;
++
++
++typedef struct PVRSRV_BRIDGE_IN_UNMAP_EXT_MEMORY_TAG
++{
++ u32 ui32BridgeFlags;
++ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++ u32 ui32Flags;
++
++}PVRSRV_BRIDGE_IN_UNMAP_EXT_MEMORY;
++
++
++typedef struct PVRSRV_BRIDGE_IN_MAP_DEVICECLASS_MEMORY_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDeviceClassBuffer;
++ void * hDevMemContext;
++
++}PVRSRV_BRIDGE_IN_MAP_DEVICECLASS_MEMORY;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_MAP_DEVICECLASS_MEMORY_TAG
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++ void * hMappingInfo;
++
++}PVRSRV_BRIDGE_OUT_MAP_DEVICECLASS_MEMORY;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_UNMAP_DEVICECLASS_MEMORY_TAG
++{
++ u32 ui32BridgeFlags;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++
++}PVRSRV_BRIDGE_IN_UNMAP_DEVICECLASS_MEMORY;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_MEMPOL_TAG
++{
++ u32 ui32BridgeFlags;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ u32 ui32Offset;
++ u32 ui32Value;
++ u32 ui32Mask;
++ u32 ui32Flags;
++
++}PVRSRV_BRIDGE_IN_PDUMP_MEMPOL;
++
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_SYNCPOL_TAG
++{
++ u32 ui32BridgeFlags;
++ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++ int bIsRead;
++ u32 ui32Value;
++ u32 ui32Mask;
++
++}PVRSRV_BRIDGE_IN_PDUMP_SYNCPOL;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_DUMPMEM_TAG
++{
++ u32 ui32BridgeFlags;
++ void * pvLinAddr;
++ void * pvAltLinAddr;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ u32 ui32Offset;
++ u32 ui32Bytes;
++ u32 ui32Flags;
++
++}PVRSRV_BRIDGE_IN_PDUMP_DUMPMEM;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_DUMPSYNC_TAG
++{
++ u32 ui32BridgeFlags;
++ void * pvAltLinAddr;
++ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++ u32 ui32Offset;
++ u32 ui32Bytes;
++
++}PVRSRV_BRIDGE_IN_PDUMP_DUMPSYNC;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_DUMPREG_TAG
++{
++ u32 ui32BridgeFlags;
++ PVRSRV_HWREG sHWReg;
++ u32 ui32Flags;
++
++}PVRSRV_BRIDGE_IN_PDUMP_DUMPREG;
++
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_REGPOL_TAG
++{
++ u32 ui32BridgeFlags;
++ PVRSRV_HWREG sHWReg;
++ u32 ui32Mask;
++ u32 ui32Flags;
++}PVRSRV_BRIDGE_IN_PDUMP_REGPOL;
++
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_DUMPPDREG_TAG
++{
++ u32 ui32BridgeFlags;
++ PVRSRV_HWREG sHWReg;
++ u32 ui32Flags;
++
++}PVRSRV_BRIDGE_IN_PDUMP_DUMPPDREG;
++
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_MEMPAGES_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hKernelMemInfo;
++ IMG_DEV_PHYADDR *pPages;
++ u32 ui32NumPages;
++ IMG_DEV_VIRTADDR sDevAddr;
++ u32 ui32Start;
++ u32 ui32Length;
++ int bContinuous;
++
++}PVRSRV_BRIDGE_IN_PDUMP_MEMPAGES;
++
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_COMMENT_TAG
++{
++ u32 ui32BridgeFlags;
++ char szComment[PVRSRV_PDUMP_MAX_COMMENT_SIZE];
++ u32 ui32Flags;
++
++}PVRSRV_BRIDGE_IN_PDUMP_COMMENT;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_SETFRAME_TAG
++{
++ u32 ui32BridgeFlags;
++ u32 ui32Frame;
++
++}PVRSRV_BRIDGE_IN_PDUMP_SETFRAME;
++
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_BITMAP_TAG
++{
++ u32 ui32BridgeFlags;
++ char szFileName[PVRSRV_PDUMP_MAX_FILENAME_SIZE];
++ u32 ui32FileOffset;
++ u32 ui32Width;
++ u32 ui32Height;
++ u32 ui32StrideInBytes;
++ IMG_DEV_VIRTADDR sDevBaseAddr;
++ u32 ui32Size;
++ PDUMP_PIXEL_FORMAT ePixelFormat;
++ PDUMP_MEM_FORMAT eMemFormat;
++ u32 ui32Flags;
++
++}PVRSRV_BRIDGE_IN_PDUMP_BITMAP;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_READREG_TAG
++{
++ u32 ui32BridgeFlags;
++ char szFileName[PVRSRV_PDUMP_MAX_FILENAME_SIZE];
++ u32 ui32FileOffset;
++ u32 ui32Address;
++ u32 ui32Size;
++ u32 ui32Flags;
++
++}PVRSRV_BRIDGE_IN_PDUMP_READREG;
++
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_DRIVERINFO_TAG
++{
++ u32 ui32BridgeFlags;
++ char szString[PVRSRV_PDUMP_MAX_COMMENT_SIZE];
++ int bContinuous;
++
++}PVRSRV_BRIDGE_IN_PDUMP_DRIVERINFO;
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_DUMPPDDEVPADDR_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hKernelMemInfo;
++ u32 ui32Offset;
++ IMG_DEV_PHYADDR sPDDevPAddr;
++}PVRSRV_BRIDGE_IN_PDUMP_DUMPPDDEVPADDR;
++
++
++typedef struct PVRSRV_BRIDGE_PDUM_IN_CYCLE_COUNT_REG_READ_TAG
++{
++ u32 ui32BridgeFlags;
++ u32 ui32RegOffset;
++ int bLastFrame;
++}PVRSRV_BRIDGE_IN_PDUMP_CYCLE_COUNT_REG_READ;
++
++
++typedef struct PVRSRV_BRIDGE_OUT_ENUMDEVICE_TAG
++{
++ PVRSRV_ERROR eError;
++ u32 ui32NumDevices;
++ PVRSRV_DEVICE_IDENTIFIER asDeviceIdentifier[PVRSRV_MAX_DEVICES];
++
++}PVRSRV_BRIDGE_OUT_ENUMDEVICE;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_ACQUIRE_DEVICEINFO_TAG
++{
++
++ PVRSRV_ERROR eError;
++ void * hDevCookie;
++
++} PVRSRV_BRIDGE_OUT_ACQUIRE_DEVICEINFO;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_ENUMCLASS_TAG
++{
++ PVRSRV_ERROR eError;
++ u32 ui32NumDevices;
++ u32 ui32DevID[PVRSRV_MAX_DEVICES];
++
++}PVRSRV_BRIDGE_OUT_ENUMCLASS;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_OPEN_DISPCLASS_DEVICE_TAG
++{
++ u32 ui32BridgeFlags;
++ u32 ui32DeviceID;
++ void * hDevCookie;
++
++}PVRSRV_BRIDGE_IN_OPEN_DISPCLASS_DEVICE;
++
++
++typedef struct PVRSRV_BRIDGE_OUT_OPEN_DISPCLASS_DEVICE_TAG
++{
++ PVRSRV_ERROR eError;
++ void * hDeviceKM;
++
++}PVRSRV_BRIDGE_OUT_OPEN_DISPCLASS_DEVICE;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_WRAP_EXT_MEMORY_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDevCookie;
++ void * hDevMemContext;
++ void *pvLinAddr;
++ u32 ui32ByteSize;
++ u32 ui32PageOffset;
++ int bPhysContig;
++ u32 ui32NumPageTableEntries;
++ IMG_SYS_PHYADDR *psSysPAddr;
++ u32 ui32Flags;
++
++}PVRSRV_BRIDGE_IN_WRAP_EXT_MEMORY;
++
++
++typedef struct PVRSRV_BRIDGE_OUT_WRAP_EXT_MEMORY_TAG
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++
++}PVRSRV_BRIDGE_OUT_WRAP_EXT_MEMORY;
++
++
++typedef struct PVRSRV_BRIDGE_IN_UNWRAP_EXT_MEMORY_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hKernelMemInfo;
++ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++
++}PVRSRV_BRIDGE_IN_UNWRAP_EXT_MEMORY;
++
++
++#define PVRSRV_MAX_DC_DISPLAY_FORMATS 10
++#define PVRSRV_MAX_DC_DISPLAY_DIMENSIONS 10
++#define PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS 4
++#define PVRSRV_MAX_DC_CLIP_RECTS 32
++
++
++typedef struct PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_FORMATS_TAG
++{
++ PVRSRV_ERROR eError;
++ u32 ui32Count;
++ DISPLAY_FORMAT asFormat[PVRSRV_MAX_DC_DISPLAY_FORMATS];
++
++}PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_FORMATS;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_DIMS_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDeviceKM;
++ DISPLAY_FORMAT sFormat;
++
++}PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_DIMS;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_DIMS_TAG
++{
++ PVRSRV_ERROR eError;
++ u32 ui32Count;
++ DISPLAY_DIMS asDim[PVRSRV_MAX_DC_DISPLAY_DIMENSIONS];
++
++}PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_DIMS;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_GET_DISPCLASS_INFO_TAG
++{
++ PVRSRV_ERROR eError;
++ DISPLAY_INFO sDisplayInfo;
++
++}PVRSRV_BRIDGE_OUT_GET_DISPCLASS_INFO;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_GET_DISPCLASS_SYSBUFFER_TAG
++{
++ PVRSRV_ERROR eError;
++ void * hBuffer;
++
++}PVRSRV_BRIDGE_OUT_GET_DISPCLASS_SYSBUFFER;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_CREATE_DISPCLASS_SWAPCHAIN_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDeviceKM;
++ u32 ui32Flags;
++ DISPLAY_SURF_ATTRIBUTES sDstSurfAttrib;
++ DISPLAY_SURF_ATTRIBUTES sSrcSurfAttrib;
++ u32 ui32BufferCount;
++ u32 ui32OEMFlags;
++ u32 ui32SwapChainID;
++
++} PVRSRV_BRIDGE_IN_CREATE_DISPCLASS_SWAPCHAIN;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_CREATE_DISPCLASS_SWAPCHAIN_TAG
++{
++ PVRSRV_ERROR eError;
++ void * hSwapChain;
++ u32 ui32SwapChainID;
++
++} PVRSRV_BRIDGE_OUT_CREATE_DISPCLASS_SWAPCHAIN;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_DESTROY_DISPCLASS_SWAPCHAIN_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDeviceKM;
++ void * hSwapChain;
++
++} PVRSRV_BRIDGE_IN_DESTROY_DISPCLASS_SWAPCHAIN;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_SET_DISPCLASS_RECT_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDeviceKM;
++ void * hSwapChain;
++ IMG_RECT sRect;
++
++} PVRSRV_BRIDGE_IN_SET_DISPCLASS_RECT;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_SET_DISPCLASS_COLOURKEY_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDeviceKM;
++ void * hSwapChain;
++ u32 ui32CKColour;
++
++} PVRSRV_BRIDGE_IN_SET_DISPCLASS_COLOURKEY;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_GET_DISPCLASS_BUFFERS_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDeviceKM;
++ void * hSwapChain;
++
++} PVRSRV_BRIDGE_IN_GET_DISPCLASS_BUFFERS;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_GET_DISPCLASS_BUFFERS_TAG
++{
++ PVRSRV_ERROR eError;
++ u32 ui32BufferCount;
++ void * ahBuffer[PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS];
++
++} PVRSRV_BRIDGE_OUT_GET_DISPCLASS_BUFFERS;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_BUFFER_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDeviceKM;
++ void * hBuffer;
++ u32 ui32SwapInterval;
++ void * hPrivateTag;
++ u32 ui32ClipRectCount;
++ IMG_RECT sClipRect[PVRSRV_MAX_DC_CLIP_RECTS];
++
++} PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_BUFFER;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_SYSTEM_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDeviceKM;
++ void * hSwapChain;
++
++} PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_SYSTEM;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_OPEN_BUFFERCLASS_DEVICE_TAG
++{
++ u32 ui32BridgeFlags;
++ u32 ui32DeviceID;
++ void * hDevCookie;
++
++} PVRSRV_BRIDGE_IN_OPEN_BUFFERCLASS_DEVICE;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_OPEN_BUFFERCLASS_DEVICE_TAG
++{
++ PVRSRV_ERROR eError;
++ void * hDeviceKM;
++
++} PVRSRV_BRIDGE_OUT_OPEN_BUFFERCLASS_DEVICE;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_INFO_TAG
++{
++ PVRSRV_ERROR eError;
++ BUFFER_INFO sBufferInfo;
++
++} PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_INFO;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_BUFFER_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDeviceKM;
++ u32 ui32BufferIndex;
++
++} PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_BUFFER;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_BUFFER_TAG
++{
++ PVRSRV_ERROR eError;
++ void * hBuffer;
++
++} PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_BUFFER;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_GET_DEVMEM_HEAPINFO_TAG
++{
++ PVRSRV_ERROR eError;
++ u32 ui32ClientHeapCount;
++ PVRSRV_HEAP_INFO sHeapInfo[PVRSRV_MAX_CLIENT_HEAPS];
++
++} PVRSRV_BRIDGE_OUT_GET_DEVMEM_HEAPINFO;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_CREATE_DEVMEMCONTEXT_TAG
++{
++ PVRSRV_ERROR eError;
++ void * hDevMemContext;
++ u32 ui32ClientHeapCount;
++ PVRSRV_HEAP_INFO sHeapInfo[PVRSRV_MAX_CLIENT_HEAPS];
++
++} PVRSRV_BRIDGE_OUT_CREATE_DEVMEMCONTEXT;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_CREATE_DEVMEMHEAP_TAG
++{
++ PVRSRV_ERROR eError;
++ void * hDevMemHeap;
++
++} PVRSRV_BRIDGE_OUT_CREATE_DEVMEMHEAP;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_ALLOCDEVICEMEM_TAG
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++
++} PVRSRV_BRIDGE_OUT_ALLOCDEVICEMEM;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_EXPORTDEVICEMEM_TAG
++{
++ PVRSRV_ERROR eError;
++ void * hMemInfo;
++#if defined(SUPPORT_MEMINFO_IDS)
++ u64 ui64Stamp;
++#endif
++
++} PVRSRV_BRIDGE_OUT_EXPORTDEVICEMEM;
++
++
++typedef struct PVRSRV_BRIDGE_OUT_MAPMEMINFOTOUSER_TAG
++{
++ PVRSRV_ERROR eError;
++ void * pvLinAddr;
++ void * hMappingInfo;
++
++}PVRSRV_BRIDGE_OUT_MAPMEMINFOTOUSER;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_GETFREEDEVICEMEM_TAG
++{
++ PVRSRV_ERROR eError;
++ u32 ui32Total;
++ u32 ui32Free;
++ u32 ui32LargestBlock;
++
++} PVRSRV_BRIDGE_OUT_GETFREEDEVICEMEM;
++
++
++#include "pvrmmap.h"
++typedef struct PVRSRV_BRIDGE_OUT_MHANDLE_TO_MMAP_DATA_TAG
++{
++ PVRSRV_ERROR eError;
++
++
++ u32 ui32MMapOffset;
++
++
++ u32 ui32ByteOffset;
++
++
++ u32 ui32RealByteSize;
++
++
++ u32 ui32UserVAddr;
++
++} PVRSRV_BRIDGE_OUT_MHANDLE_TO_MMAP_DATA;
++
++typedef struct PVRSRV_BRIDGE_OUT_RELEASE_MMAP_DATA_TAG
++{
++ PVRSRV_ERROR eError;
++
++
++ int bMUnmap;
++
++
++ u32 ui32UserVAddr;
++
++
++ u32 ui32RealByteSize;
++} PVRSRV_BRIDGE_OUT_RELEASE_MMAP_DATA;
++
++typedef struct PVRSRV_BRIDGE_IN_GET_MISC_INFO_TAG
++{
++ u32 ui32BridgeFlags;
++ PVRSRV_MISC_INFO sMiscInfo;
++
++}PVRSRV_BRIDGE_IN_GET_MISC_INFO;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_GET_MISC_INFO_TAG
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_MISC_INFO sMiscInfo;
++
++}PVRSRV_BRIDGE_OUT_GET_MISC_INFO;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_RELEASE_MISC_INFO_TAG
++{
++ u32 ui32BridgeFlags;
++ PVRSRV_MISC_INFO sMiscInfo;
++
++}PVRSRV_BRIDGE_IN_RELEASE_MISC_INFO;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_RELEASE_MISC_INFO_TAG
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_MISC_INFO sMiscInfo;
++
++}PVRSRV_BRIDGE_OUT_RELEASE_MISC_INFO;
++
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_PDUMP_ISCAPTURING_TAG
++{
++ PVRSRV_ERROR eError;
++ int bIsCapturing;
++
++} PVRSRV_BRIDGE_OUT_PDUMP_ISCAPTURING;
++
++
++typedef struct PVRSRV_BRIDGE_IN_GET_FB_STATS_TAG
++{
++ u32 ui32BridgeFlags;
++ u32 ui32Total;
++ u32 ui32Available;
++
++} PVRSRV_BRIDGE_IN_GET_FB_STATS;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_MAPPHYSTOUSERSPACE_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDevCookie;
++ IMG_SYS_PHYADDR sSysPhysAddr;
++ u32 uiSizeInBytes;
++
++} PVRSRV_BRIDGE_IN_MAPPHYSTOUSERSPACE;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_MAPPHYSTOUSERSPACE_TAG
++{
++ void * pvUserAddr;
++ u32 uiActualSize;
++ void * pvProcess;
++
++} PVRSRV_BRIDGE_OUT_MAPPHYSTOUSERSPACE;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_UNMAPPHYSTOUSERSPACE_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDevCookie;
++ void * pvUserAddr;
++ void * pvProcess;
++
++} PVRSRV_BRIDGE_IN_UNMAPPHYSTOUSERSPACE;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_GETPHYSTOUSERSPACEMAP_TAG
++{
++ void * *ppvTbl;
++ u32 uiTblSize;
++
++} PVRSRV_BRIDGE_OUT_GETPHYSTOUSERSPACEMAP;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_REGISTER_SIM_PROCESS_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDevCookie;
++ void * pvProcess;
++
++} PVRSRV_BRIDGE_IN_REGISTER_SIM_PROCESS;
++
++
++typedef struct PVRSRV_BRIDGE_OUT_REGISTER_SIM_PROCESS_TAG
++{
++ IMG_SYS_PHYADDR sRegsPhysBase;
++ void *pvRegsBase;
++ void * pvProcess;
++ u32 ulNoOfEntries;
++ void * pvTblLinAddr;
++
++} PVRSRV_BRIDGE_OUT_REGISTER_SIM_PROCESS;
++
++
++typedef struct PVRSRV_BRIDGE_IN_UNREGISTER_SIM_PROCESS_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDevCookie;
++ void * pvProcess;
++ void *pvRegsBase;
++
++} PVRSRV_BRIDGE_IN_UNREGISTER_SIM_PROCESS;
++
++typedef struct PVRSRV_BRIDGE_IN_PROCESS_SIMISR_EVENT_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDevCookie;
++ u32 ui32StatusAndMask;
++ PVRSRV_ERROR eError;
++
++} PVRSRV_BRIDGE_IN_PROCESS_SIMISR_EVENT;
++
++typedef struct PVRSRV_BRIDGE_IN_INITSRV_DISCONNECT_TAG
++{
++ u32 ui32BridgeFlags;
++ int bInitSuccesful;
++} PVRSRV_BRIDGE_IN_INITSRV_DISCONNECT;
++
++
++typedef struct PVRSRV_BRIDGE_IN_ALLOC_SHARED_SYS_MEM_TAG
++{
++ u32 ui32BridgeFlags;
++ u32 ui32Flags;
++ u32 ui32Size;
++}PVRSRV_BRIDGE_IN_ALLOC_SHARED_SYS_MEM;
++
++typedef struct PVRSRV_BRIDGE_OUT_ALLOC_SHARED_SYS_MEM_TAG
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++}PVRSRV_BRIDGE_OUT_ALLOC_SHARED_SYS_MEM;
++
++typedef struct PVRSRV_BRIDGE_IN_FREE_SHARED_SYS_MEM_TAG
++{
++ u32 ui32BridgeFlags;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++}PVRSRV_BRIDGE_IN_FREE_SHARED_SYS_MEM;
++
++typedef struct PVRSRV_BRIDGE_OUT_FREE_SHARED_SYS_MEM_TAG
++{
++ PVRSRV_ERROR eError;
++}PVRSRV_BRIDGE_OUT_FREE_SHARED_SYS_MEM;
++
++typedef struct PVRSRV_BRIDGE_IN_MAP_MEMINFO_MEM_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hKernelMemInfo;
++}PVRSRV_BRIDGE_IN_MAP_MEMINFO_MEM;
++
++typedef struct PVRSRV_BRIDGE_OUT_MAP_MEMINFO_MEM_TAG
++{
++ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++ PVRSRV_ERROR eError;
++}PVRSRV_BRIDGE_OUT_MAP_MEMINFO_MEM;
++
++typedef struct PVRSRV_BRIDGE_IN_UNMAP_MEMINFO_MEM_TAG
++{
++ u32 ui32BridgeFlags;
++ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++}PVRSRV_BRIDGE_IN_UNMAP_MEMINFO_MEM;
++
++typedef struct PVRSRV_BRIDGE_OUT_UNMAP_MEMINFO_MEM_TAG
++{
++ PVRSRV_ERROR eError;
++}PVRSRV_BRIDGE_OUT_UNMAP_MEMINFO_MEM;
++
++typedef struct PVRSRV_BRIDGE_IN_GETMMU_PD_DEVPADDR_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDevMemContext;
++}PVRSRV_BRIDGE_IN_GETMMU_PD_DEVPADDR;
++
++typedef struct PVRSRV_BRIDGE_OUT_GETMMU_PD_DEVPADDR_TAG
++{
++ IMG_DEV_PHYADDR sPDDevPAddr;
++ PVRSRV_ERROR eError;
++}PVRSRV_BRIDGE_OUT_GETMMU_PD_DEVPADDR;
++
++typedef struct PVRSRV_BRIDGE_IN_EVENT_OBJECT_WAI_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hOSEventKM;
++} PVRSRV_BRIDGE_IN_EVENT_OBJECT_WAIT;
++
++typedef struct PVRSRV_BRIDGE_IN_EVENT_OBJECT_OPEN_TAG
++{
++ PVRSRV_EVENTOBJECT sEventObject;
++} PVRSRV_BRIDGE_IN_EVENT_OBJECT_OPEN;
++
++typedef struct PVRSRV_BRIDGE_OUT_EVENT_OBJECT_OPEN_TAG
++{
++ void * hOSEvent;
++ PVRSRV_ERROR eError;
++} PVRSRV_BRIDGE_OUT_EVENT_OBJECT_OPEN;
++
++typedef struct PVRSRV_BRIDGE_IN_EVENT_OBJECT_CLOSE_TAG
++{
++ PVRSRV_EVENTOBJECT sEventObject;
++ void * hOSEventKM;
++} PVRSRV_BRIDGE_IN_EVENT_OBJECT_CLOSE;
++
++typedef struct PVRSRV_BRIDGE_IN_MODIFY_PENDING_SYNC_OPS_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hKernelSyncInfo;
++ u32 ui32ModifyFlags;
++
++} PVRSRV_BRIDGE_IN_MODIFY_PENDING_SYNC_OPS;
++
++typedef struct PVRSRV_BRIDGE_IN_MODIFY_COMPLETE_SYNC_OPS_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hKernelSyncInfo;
++ u32 ui32ModifyFlags;
++
++} PVRSRV_BRIDGE_IN_MODIFY_COMPLETE_SYNC_OPS;
++
++typedef struct PVRSRV_BRIDGE_OUT_MODIFY_PENDING_SYNC_OPS_TAG
++{
++ PVRSRV_ERROR eError;
++
++
++ u32 ui32ReadOpsPending;
++ u32 ui32WriteOpsPending;
++
++} PVRSRV_BRIDGE_OUT_MODIFY_PENDING_SYNC_OPS;
++
++#ifdef INTEL_D3_CHANGES
++
++typedef struct PVRSRV_BRIDGE_IN_WAIT_FOR_WRITE_OP_SYNC_TAG
++{
++ void * hKernelSyncInfo;
++} PVRSRV_BRIDGE_IN_WAIT_FOR_WRITE_OP_SYNC;
++
++typedef struct PVRSRV_BRIDGE_OUT_WAIT_FOR_WRITE_OP_SYNC_TAG
++{
++ PVRSRV_ERROR eError;
++} PVRSRV_BRIDGE_OUT_WAIT_FOR_WRITE_OP_SYNC;
++
++#endif
++
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/include/pvr_bridge_km.h
+@@ -0,0 +1,295 @@
++/**********************************************************************
++ *
++ * Copyright (c) 2009-2010 Intel Corporation.
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __PVR_BRIDGE_KM_H_
++#define __PVR_BRIDGE_KM_H_
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#include "pvr_bridge.h"
++#include "perproc.h"
++
++PVRSRV_ERROR LinuxBridgeInit(void);
++void LinuxBridgeDeInit(void);
++
++
++
++PVRSRV_ERROR PVRSRVEnumerateDevicesKM(u32 *pui32NumDevices,
++ PVRSRV_DEVICE_IDENTIFIER *psDevIdList);
++
++
++PVRSRV_ERROR PVRSRVAcquireDeviceDataKM(u32 uiDevIndex,
++ PVRSRV_DEVICE_TYPE eDeviceType,
++ void * *phDevCookie);
++
++
++PVRSRV_ERROR PVRSRVCreateCommandQueueKM(u32 ui32QueueSize,
++ PVRSRV_QUEUE_INFO **ppsQueueInfo);
++
++
++PVRSRV_ERROR PVRSRVDestroyCommandQueueKM(PVRSRV_QUEUE_INFO *psQueueInfo);
++
++
++PVRSRV_ERROR PVRSRVGetDeviceMemHeapsKM(void * hDevCookie,
++ PVRSRV_HEAP_INFO *psHeapInfo);
++
++
++PVRSRV_ERROR PVRSRVCreateDeviceMemContextKM(void * hDevCookie,
++ PVRSRV_PER_PROCESS_DATA *psPerProc,
++ void * *phDevMemContext,
++ u32 *pui32ClientHeapCount,
++ PVRSRV_HEAP_INFO *psHeapInfo,
++ int *pbCreated,
++ int *pbShared);
++
++
++
++PVRSRV_ERROR PVRSRVDestroyDeviceMemContextKM(void * hDevCookie,
++ void * hDevMemContext,
++ int *pbDestroyed);
++
++
++
++PVRSRV_ERROR PVRSRVGetDeviceMemHeapInfoKM(void * hDevCookie,
++ void * hDevMemContext,
++ u32 *pui32ClientHeapCount,
++ PVRSRV_HEAP_INFO *psHeapInfo,
++ int *pbShared
++ );
++
++
++
++PVRSRV_ERROR _PVRSRVAllocDeviceMemKM(void * hDevCookie,
++ PVRSRV_PER_PROCESS_DATA *psPerProc,
++ void * hDevMemHeap,
++ u32 ui32Flags,
++ u32 ui32Size,
++ u32 ui32Alignment,
++ PVRSRV_KERNEL_MEM_INFO **ppsMemInfo);
++
++
++#if defined(PVRSRV_LOG_MEMORY_ALLOCS)
++ #define PVRSRVAllocDeviceMemKM(devCookie, perProc, devMemHeap, flags, size, alignment, memInfo, logStr) \
++ (PVR_TRACE(("PVRSRVAllocDeviceMemKM(" #devCookie ", " #perProc ", " #devMemHeap ", " #flags ", " #size \
++ ", " #alignment "," #memInfo "): " logStr " (size = 0x%;x)", size)),\
++ _PVRSRVAllocDeviceMemKM(devCookie, perProc, devMemHeap, flags, size, alignment, memInfo))
++#else
++ #define PVRSRVAllocDeviceMemKM(devCookie, perProc, devMemHeap, flags, size, alignment, memInfo, logStr) \
++ _PVRSRVAllocDeviceMemKM(devCookie, perProc, devMemHeap, flags, size, alignment, memInfo)
++#endif
++
++
++
++
++PVRSRV_ERROR PVRSRVFreeDeviceMemKM(void * hDevCookie,
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo);
++
++
++
++PVRSRV_ERROR PVRSRVDissociateDeviceMemKM(void * hDevCookie,
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo);
++
++
++PVRSRV_ERROR PVRSRVReserveDeviceVirtualMemKM(void * hDevMemHeap,
++ IMG_DEV_VIRTADDR *psDevVAddr,
++ u32 ui32Size,
++ u32 ui32Alignment,
++ PVRSRV_KERNEL_MEM_INFO **ppsMemInfo);
++
++
++PVRSRV_ERROR PVRSRVFreeDeviceVirtualMemKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo);
++
++
++PVRSRV_ERROR PVRSRVMapDeviceMemoryKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ PVRSRV_KERNEL_MEM_INFO *psSrcMemInfo,
++ void * hDstDevMemHeap,
++ PVRSRV_KERNEL_MEM_INFO **ppsDstMemInfo);
++
++
++PVRSRV_ERROR PVRSRVUnmapDeviceMemoryKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo);
++
++
++PVRSRV_ERROR PVRSRVWrapExtMemoryKM(void * hDevCookie,
++ PVRSRV_PER_PROCESS_DATA *psPerProc,
++ void * hDevMemContext,
++ u32 ui32ByteSize,
++ u32 ui32PageOffset,
++ int bPhysContig,
++ IMG_SYS_PHYADDR *psSysAddr,
++ void *pvLinAddr,
++ u32 ui32Flags,
++ PVRSRV_KERNEL_MEM_INFO **ppsMemInfo);
++
++
++PVRSRV_ERROR PVRSRVUnwrapExtMemoryKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo);
++
++
++PVRSRV_ERROR PVRSRVEnumerateDCKM(PVRSRV_DEVICE_CLASS DeviceClass,
++ u32 *pui32DevCount,
++ u32 *pui32DevID );
++
++
++PVRSRV_ERROR PVRSRVOpenDCDeviceKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ u32 ui32DeviceID,
++ void * hDevCookie,
++ void * *phDeviceKM);
++
++
++PVRSRV_ERROR PVRSRVCloseDCDeviceKM(void * hDeviceKM, int bResManCallback);
++
++
++PVRSRV_ERROR PVRSRVEnumDCFormatsKM(void * hDeviceKM,
++ u32 *pui32Count,
++ DISPLAY_FORMAT *psFormat);
++
++
++PVRSRV_ERROR PVRSRVEnumDCDimsKM(void * hDeviceKM,
++ DISPLAY_FORMAT *psFormat,
++ u32 *pui32Count,
++ DISPLAY_DIMS *psDim);
++
++
++PVRSRV_ERROR PVRSRVGetDCSystemBufferKM(void * hDeviceKM,
++ void * *phBuffer);
++
++
++PVRSRV_ERROR PVRSRVGetDCInfoKM(void * hDeviceKM,
++ DISPLAY_INFO *psDisplayInfo);
++
++PVRSRV_ERROR PVRSRVCreateDCSwapChainKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ void * hDeviceKM,
++ u32 ui32Flags,
++ DISPLAY_SURF_ATTRIBUTES *psDstSurfAttrib,
++ DISPLAY_SURF_ATTRIBUTES *psSrcSurfAttrib,
++ u32 ui32BufferCount,
++ u32 ui32OEMFlags,
++ void * *phSwapChain,
++ u32 *pui32SwapChainID);
++
++PVRSRV_ERROR PVRSRVDestroyDCSwapChainKM(void * hSwapChain);
++
++PVRSRV_ERROR PVRSRVSetDCDstRectKM(void * hDeviceKM,
++ void * hSwapChain,
++ IMG_RECT *psRect);
++
++PVRSRV_ERROR PVRSRVSetDCSrcRectKM(void * hDeviceKM,
++ void * hSwapChain,
++ IMG_RECT *psRect);
++
++PVRSRV_ERROR PVRSRVSetDCDstColourKeyKM(void * hDeviceKM,
++ void * hSwapChain,
++ u32 ui32CKColour);
++
++PVRSRV_ERROR PVRSRVSetDCSrcColourKeyKM(void * hDeviceKM,
++ void * hSwapChain,
++ u32 ui32CKColour);
++
++PVRSRV_ERROR PVRSRVGetDCBuffersKM(void * hDeviceKM,
++ void * hSwapChain,
++ u32 *pui32BufferCount,
++ void * *phBuffer);
++
++PVRSRV_ERROR PVRSRVSwapToDCBufferKM(void * hDeviceKM,
++ void * hBuffer,
++ u32 ui32SwapInterval,
++ void * hPrivateTag,
++ u32 ui32ClipRectCount,
++ IMG_RECT *psClipRect);
++
++PVRSRV_ERROR PVRSRVSwapToDCSystemKM(void * hDeviceKM,
++ void * hSwapChain);
++
++
++PVRSRV_ERROR PVRSRVOpenBCDeviceKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ u32 ui32DeviceID,
++ void * hDevCookie,
++ void * *phDeviceKM);
++
++PVRSRV_ERROR PVRSRVCloseBCDeviceKM(void * hDeviceKM, int bResManCallback);
++
++
++PVRSRV_ERROR PVRSRVGetBCInfoKM(void * hDeviceKM,
++ BUFFER_INFO *psBufferInfo);
++
++PVRSRV_ERROR PVRSRVGetBCBufferKM(void * hDeviceKM,
++ u32 ui32BufferIndex,
++ void * *phBuffer);
++
++
++
++PVRSRV_ERROR PVRSRVMapDeviceClassMemoryKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ void * hDevMemContext,
++ void * hDeviceClassBuffer,
++ PVRSRV_KERNEL_MEM_INFO **ppsMemInfo,
++ void * *phOSMapInfo);
++
++
++PVRSRV_ERROR PVRSRVUnmapDeviceClassMemoryKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo);
++
++
++PVRSRV_ERROR PVRSRVGetFreeDeviceMemKM(u32 ui32Flags,
++ u32 *pui32Total,
++ u32 *pui32Free,
++ u32 *pui32LargestBlock);
++
++PVRSRV_ERROR PVRSRVAllocSyncInfoKM(void * hDevCookie,
++ void * hDevMemContext,
++ PVRSRV_KERNEL_SYNC_INFO **ppsKernelSyncInfo);
++
++PVRSRV_ERROR PVRSRVFreeSyncInfoKM(PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo);
++
++
++PVRSRV_ERROR PVRSRVGetMiscInfoKM(PVRSRV_MISC_INFO *psMiscInfo);
++
++PVRSRV_ERROR PVRSRVGetFBStatsKM(u32 *pui32Total,
++ u32 *pui32Available);
++
++ PVRSRV_ERROR
++PVRSRVAllocSharedSysMemoryKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ u32 ui32Flags,
++ u32 ui32Size,
++ PVRSRV_KERNEL_MEM_INFO **ppsKernelMemInfo);
++
++ PVRSRV_ERROR
++PVRSRVFreeSharedSysMemoryKM(PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo);
++
++ PVRSRV_ERROR
++PVRSRVDissociateMemFromResmanKM(PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo);
++
++#ifdef INTEL_D3_CHANGES
++
++PVRSRV_ERROR PVRSRVWaitForWriteOpSyncKM(PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo);
++#endif
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/include/pvr_debug.h
+@@ -0,0 +1,119 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __PVR_DEBUG_H__
++#define __PVR_DEBUG_H__
++
++
++#include "img_types.h"
++
++#define PVR_MAX_DEBUG_MESSAGE_LEN (512)
++
++#define DBGPRIV_FATAL 0x01UL
++#define DBGPRIV_ERROR 0x02UL
++#define DBGPRIV_WARNING 0x04UL
++#define DBGPRIV_MESSAGE 0x08UL
++#define DBGPRIV_VERBOSE 0x10UL
++#define DBGPRIV_CALLTRACE 0x20UL
++#define DBGPRIV_ALLOC 0x40UL
++#define DBGPRIV_ALLLEVELS (DBGPRIV_FATAL | DBGPRIV_ERROR | DBGPRIV_WARNING | DBGPRIV_MESSAGE | DBGPRIV_VERBOSE)
++
++
++
++#define PVR_DBG_FATAL DBGPRIV_FATAL,__FILE__, __LINE__
++#define PVR_DBG_ERROR DBGPRIV_ERROR,__FILE__, __LINE__
++#define PVR_DBG_WARNING DBGPRIV_WARNING,__FILE__, __LINE__
++#define PVR_DBG_MESSAGE DBGPRIV_MESSAGE,__FILE__, __LINE__
++#define PVR_DBG_VERBOSE DBGPRIV_VERBOSE,__FILE__, __LINE__
++#define PVR_DBG_CALLTRACE DBGPRIV_CALLTRACE,__FILE__, __LINE__
++#define PVR_DBG_ALLOC DBGPRIV_ALLOC,__FILE__, __LINE__
++
++#if !defined(PVRSRV_NEED_PVR_ASSERT) && defined(DEBUG)
++#define PVRSRV_NEED_PVR_ASSERT
++#endif
++
++#if defined(PVRSRV_NEED_PVR_ASSERT) && !defined(PVRSRV_NEED_PVR_DPF)
++#define PVRSRV_NEED_PVR_DPF
++#endif
++
++#if !defined(PVRSRV_NEED_PVR_TRACE) && (defined(DEBUG) || defined(TIMING))
++#define PVRSRV_NEED_PVR_TRACE
++#endif
++
++
++#if defined(PVRSRV_NEED_PVR_ASSERT)
++
++ #define PVR_ASSERT(EXPR) if (!(EXPR)) PVRSRVDebugAssertFail(__FILE__, __LINE__);
++
++ void PVRSRVDebugAssertFail(const char *pszFile,
++ u32 ui32Line);
++
++ #if defined(PVR_DBG_BREAK_ASSERT_FAIL)
++ #define PVR_DBG_BREAK PVRSRVDebugAssertFail("PVR_DBG_BREAK", 0)
++ #else
++ #define PVR_DBG_BREAK
++ #endif
++
++#else
++
++ #define PVR_ASSERT(EXPR)
++ #define PVR_DBG_BREAK
++
++#endif
++
++
++#if defined(PVRSRV_NEED_PVR_DPF)
++
++ #define PVR_DPF(X) PVRSRVDebugPrintf X
++
++ void PVRSRVDebugPrintf(u32 ui32DebugLevel,
++ const char *pszFileName,
++ u32 ui32Line,
++ const char *pszFormat,
++ ...);
++
++#else
++
++ #define PVR_DPF(X)
++
++#endif
++
++
++#if defined(PVRSRV_NEED_PVR_TRACE)
++
++ #define PVR_TRACE(X) PVRSRVTrace X
++
++ void PVRSRVTrace(const char* pszFormat, ... );
++
++#else
++
++ #define PVR_TRACE(X)
++
++#endif
++
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/include/pvrmmap.h
+@@ -0,0 +1,36 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __PVRMMAP_H__
++#define __PVRMMAP_H__
++
++PVRSRV_ERROR PVRPMapKMem(void * hModule, void **ppvLinAddr, void *pvLinAddrKM, void * *phMappingInfo, void * hMHandle);
++
++
++int PVRUnMapKMem(void * hModule, void * hMappingInfo, void * hMHandle);
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/include/pvrmodule.h
+@@ -0,0 +1,31 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _PVRMODULE_H_
++#define _PVRMODULE_H_
++MODULE_AUTHOR("Imagination Technologies Ltd. <gpl-support@imgtec.com>");
++MODULE_LICENSE("GPL");
++#endif
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/include/pvrversion.h
+@@ -0,0 +1,38 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _PVRVERSION_H_
++#define _PVRVERSION_H_
++
++#define PVRVERSION_MAJ 1
++#define PVRVERSION_MIN 5
++#define PVRVERSION_BRANCH 15
++#define PVRVERSION_BUILD 3014
++#define PVRVERSION_STRING "1.5.15.3014"
++#define PVRVERSION_FILE "eurasiacon.pj"
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/include/queue.h
+@@ -0,0 +1,119 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef QUEUE_H
++#define QUEUE_H
++
++
++#if defined(__cplusplus)
++extern "C" {
++#endif
++
++#define UPDATE_QUEUE_ROFF(psQueue, ui32Size) \
++ psQueue->ui32ReadOffset = (psQueue->ui32ReadOffset + ui32Size) \
++ & (psQueue->ui32QueueSize - 1);
++
++ typedef struct _COMMAND_COMPLETE_DATA_
++ {
++ int bInUse;
++
++ u32 ui32DstSyncCount;
++ u32 ui32SrcSyncCount;
++ PVRSRV_SYNC_OBJECT *psDstSync;
++ PVRSRV_SYNC_OBJECT *psSrcSync;
++ u32 ui32AllocSize;
++ }COMMAND_COMPLETE_DATA, *PCOMMAND_COMPLETE_DATA;
++
++#if !defined(USE_CODE)
++void QueueDumpDebugInfo(void);
++
++
++PVRSRV_ERROR PVRSRVProcessQueues (u32 ui32CallerID,
++ int bFlush);
++
++#if defined(__linux__) && defined(__KERNEL__)
++#include <linux/types.h>
++#include <linux/seq_file.h>
++off_t
++QueuePrintQueues (char * buffer, size_t size, off_t off);
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++void* ProcSeqOff2ElementQueue(struct seq_file * sfile, loff_t off);
++void ProcSeqShowQueue(struct seq_file *sfile,void* el);
++#endif
++
++#endif
++
++
++
++PVRSRV_ERROR PVRSRVCreateCommandQueueKM(u32 ui32QueueSize,
++ PVRSRV_QUEUE_INFO **ppsQueueInfo);
++
++PVRSRV_ERROR PVRSRVDestroyCommandQueueKM(PVRSRV_QUEUE_INFO *psQueueInfo);
++
++
++PVRSRV_ERROR PVRSRVInsertCommandKM(PVRSRV_QUEUE_INFO *psQueue,
++ PVRSRV_COMMAND **ppsCommand,
++ u32 ui32DevIndex,
++ u16 CommandType,
++ u32 ui32DstSyncCount,
++ PVRSRV_KERNEL_SYNC_INFO *apsDstSync[],
++ u32 ui32SrcSyncCount,
++ PVRSRV_KERNEL_SYNC_INFO *apsSrcSync[],
++ u32 ui32DataByteSize );
++
++
++PVRSRV_ERROR PVRSRVGetQueueSpaceKM(PVRSRV_QUEUE_INFO *psQueue,
++ u32 ui32ParamSize,
++ void **ppvSpace);
++
++
++PVRSRV_ERROR PVRSRVSubmitCommandKM(PVRSRV_QUEUE_INFO *psQueue,
++ PVRSRV_COMMAND *psCommand);
++
++
++void PVRSRVCommandCompleteKM(void * hCmdCookie, int bScheduleMISR);
++
++void PVRSRVCommandCompleteCallbacks(void);
++
++
++PVRSRV_ERROR PVRSRVRegisterCmdProcListKM(u32 ui32DevIndex,
++ PFN_CMD_PROC *ppfnCmdProcList,
++ u32 ui32MaxSyncsPerCmd[][2],
++ u32 ui32CmdCount);
++
++PVRSRV_ERROR PVRSRVRemoveCmdProcListKM(u32 ui32DevIndex,
++ u32 ui32CmdCount);
++
++#endif
++
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/include/ra.h
+@@ -0,0 +1,155 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _RA_H_
++#define _RA_H_
++
++#include "img_types.h"
++#include "hash.h"
++#include "osfunc.h"
++
++typedef struct _RA_ARENA_ RA_ARENA;
++typedef struct _BM_MAPPING_ BM_MAPPING;
++
++
++
++#define RA_STATS
++
++
++struct _RA_STATISTICS_
++{
++
++ u32 uSpanCount;
++
++
++ u32 uLiveSegmentCount;
++
++
++ u32 uFreeSegmentCount;
++
++
++ u32 uTotalResourceCount;
++
++
++ u32 uFreeResourceCount;
++
++
++ u32 uCumulativeAllocs;
++
++
++ u32 uCumulativeFrees;
++
++
++ u32 uImportCount;
++
++
++ u32 uExportCount;
++};
++typedef struct _RA_STATISTICS_ RA_STATISTICS;
++
++struct _RA_SEGMENT_DETAILS_
++{
++ u32 uiSize;
++ IMG_CPU_PHYADDR sCpuPhyAddr;
++ void * hSegment;
++};
++typedef struct _RA_SEGMENT_DETAILS_ RA_SEGMENT_DETAILS;
++
++RA_ARENA *
++RA_Create (char *name,
++ u32 base,
++ u32 uSize,
++ BM_MAPPING *psMapping,
++ u32 uQuantum,
++ int (*imp_alloc)(void *_h,
++ u32 uSize,
++ u32 *pActualSize,
++ BM_MAPPING **ppsMapping,
++ u32 uFlags,
++ u32 *pBase),
++ void (*imp_free) (void *,
++ u32,
++ BM_MAPPING *),
++ void (*backingstore_free) (void *,
++ u32,
++ u32,
++ void *),
++ void *import_handle);
++
++void
++RA_Delete (RA_ARENA *pArena);
++
++int
++RA_TestDelete (RA_ARENA *pArena);
++
++int
++RA_Add (RA_ARENA *pArena, u32 base, u32 uSize);
++
++int
++RA_Alloc (RA_ARENA *pArena,
++ u32 uSize,
++ u32 *pActualSize,
++ BM_MAPPING **ppsMapping,
++ u32 uFlags,
++ u32 uAlignment,
++ u32 uAlignmentOffset,
++ u32 *pBase);
++
++void
++RA_Free (RA_ARENA *pArena, u32 base, int bFreeBackingStore);
++
++
++#ifdef RA_STATS
++
++#define CHECK_SPACE(total) \
++{ \
++ if(total<100) \
++ return PVRSRV_ERROR_INVALID_PARAMS; \
++}
++
++#define UPDATE_SPACE(str, count, total) \
++{ \
++ if(count == -1) \
++ return PVRSRV_ERROR_INVALID_PARAMS; \
++ else \
++ { \
++ str += count; \
++ total -= count; \
++ } \
++}
++
++
++int RA_GetNextLiveSegment(void * hArena, RA_SEGMENT_DETAILS *psSegDetails);
++
++
++PVRSRV_ERROR RA_GetStats(RA_ARENA *pArena,
++ char **ppszStr,
++ u32 *pui32StrLen);
++
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/include/regpaths.h
+@@ -0,0 +1,43 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __REGPATHS_H__
++#define __REGPATHS_H__
++
++#define POWERVR_REG_ROOT "Drivers\\Display\\PowerVR"
++#define POWERVR_CHIP_KEY "\\SGX1\\"
++
++#define POWERVR_EURASIA_KEY "PowerVREurasia\\"
++
++#define POWERVR_SERVICES_KEY "\\Registry\\Machine\\System\\CurrentControlSet\\Services\\PowerVR\\"
++
++#define PVRSRV_REGISTRY_ROOT POWERVR_EURASIA_KEY "HWSettings\\PVRSRVKM"
++
++
++#define MAX_REG_STRING_SIZE 128
++
++
++#endif
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/include/resman.h
+@@ -0,0 +1,113 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __RESMAN_H__
++#define __RESMAN_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++enum {
++
++ RESMAN_TYPE_SHARED_PB_DESC = 1,
++ RESMAN_TYPE_SHARED_PB_DESC_CREATE_LOCK,
++ RESMAN_TYPE_HW_RENDER_CONTEXT,
++ RESMAN_TYPE_HW_TRANSFER_CONTEXT,
++ RESMAN_TYPE_HW_2D_CONTEXT,
++ RESMAN_TYPE_TRANSFER_CONTEXT,
++
++
++
++
++
++ RESMAN_TYPE_DISPLAYCLASS_SWAPCHAIN_REF,
++ RESMAN_TYPE_DISPLAYCLASS_DEVICE,
++
++
++ RESMAN_TYPE_BUFFERCLASS_DEVICE,
++
++
++ RESMAN_TYPE_OS_USERMODE_MAPPING,
++
++
++ RESMAN_TYPE_DEVICEMEM_CONTEXT,
++ RESMAN_TYPE_DEVICECLASSMEM_MAPPING,
++ RESMAN_TYPE_DEVICEMEM_MAPPING,
++ RESMAN_TYPE_DEVICEMEM_WRAP,
++ RESMAN_TYPE_DEVICEMEM_ALLOCATION,
++ RESMAN_TYPE_EVENT_OBJECT,
++ RESMAN_TYPE_SHARED_MEM_INFO,
++ RESMAN_TYPE_MODIFY_SYNC_OPS,
++
++
++ RESMAN_TYPE_KERNEL_DEVICEMEM_ALLOCATION
++};
++
++#define RESMAN_CRITERIA_ALL 0x00000000
++#define RESMAN_CRITERIA_RESTYPE 0x00000001
++#define RESMAN_CRITERIA_PVOID_PARAM 0x00000002
++#define RESMAN_CRITERIA_UI32_PARAM 0x00000004
++
++typedef PVRSRV_ERROR (*RESMAN_FREE_FN)(void * pvParam, u32 ui32Param);
++
++typedef struct _RESMAN_ITEM_ *PRESMAN_ITEM;
++typedef struct _RESMAN_CONTEXT_ *PRESMAN_CONTEXT;
++
++PVRSRV_ERROR ResManInit(void);
++void ResManDeInit(void);
++
++PRESMAN_ITEM ResManRegisterRes(PRESMAN_CONTEXT hResManContext,
++ u32 ui32ResType,
++ void * pvParam,
++ u32 ui32Param,
++ RESMAN_FREE_FN pfnFreeResource);
++
++PVRSRV_ERROR ResManFreeResByPtr(PRESMAN_ITEM psResItem);
++
++PVRSRV_ERROR ResManFreeResByCriteria(PRESMAN_CONTEXT hResManContext,
++ u32 ui32SearchCriteria,
++ u32 ui32ResType,
++ void * pvParam,
++ u32 ui32Param);
++
++PVRSRV_ERROR ResManDissociateRes(PRESMAN_ITEM psResItem,
++ PRESMAN_CONTEXT psNewResManContext);
++
++PVRSRV_ERROR ResManFindResourceByPtr(PRESMAN_CONTEXT hResManContext,
++ PRESMAN_ITEM psItem);
++
++PVRSRV_ERROR PVRSRVResManConnect(void * hPerProc,
++ PRESMAN_CONTEXT *phResManContext);
++void PVRSRVResManDisconnect(PRESMAN_CONTEXT hResManContext,
++ int bKernelContext);
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/include/services.h
+@@ -0,0 +1,864 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __SERVICES_H__
++#define __SERVICES_H__
++
++#include "img_types.h"
++#include "servicesext.h"
++#include "pdumpdefs.h"
++
++
++#define PVRSRV_4K_PAGE_SIZE 4096UL
++
++#define PVRSRV_MAX_CMD_SIZE 1024
++
++#define PVRSRV_MAX_DEVICES 16
++
++#define EVENTOBJNAME_MAXLENGTH (50)
++
++#define PVRSRV_MEM_READ (1UL<<0)
++#define PVRSRV_MEM_WRITE (1UL<<1)
++#define PVRSRV_MEM_CACHE_CONSISTENT (1UL<<2)
++#define PVRSRV_MEM_NO_SYNCOBJ (1UL<<3)
++#define PVRSRV_MEM_INTERLEAVED (1UL<<4)
++#define PVRSRV_MEM_DUMMY (1UL<<5)
++#define PVRSRV_MEM_EDM_PROTECT (1UL<<6)
++#define PVRSRV_MEM_ZERO (1UL<<7)
++#define PVRSRV_MEM_USER_SUPPLIED_DEVVADDR (1UL<<8)
++#define PVRSRV_MEM_RAM_BACKED_ALLOCATION (1UL<<9)
++#define PVRSRV_MEM_NO_RESMAN (1UL<<10)
++#define PVRSRV_MEM_EXPORTED (1UL<<11)
++
++
++#define PVRSRV_HAP_CACHED (1UL<<12)
++#define PVRSRV_HAP_UNCACHED (1UL<<13)
++#define PVRSRV_HAP_WRITECOMBINE (1UL<<14)
++#define PVRSRV_HAP_CACHETYPE_MASK (PVRSRV_HAP_CACHED|PVRSRV_HAP_UNCACHED|PVRSRV_HAP_WRITECOMBINE)
++#define PVRSRV_HAP_KERNEL_ONLY (1UL<<15)
++#define PVRSRV_HAP_SINGLE_PROCESS (1UL<<16)
++#define PVRSRV_HAP_MULTI_PROCESS (1UL<<17)
++#define PVRSRV_HAP_FROM_EXISTING_PROCESS (1UL<<18)
++#define PVRSRV_HAP_NO_CPU_VIRTUAL (1UL<<19)
++#define PVRSRV_HAP_MAPTYPE_MASK (PVRSRV_HAP_KERNEL_ONLY \
++ |PVRSRV_HAP_SINGLE_PROCESS \
++ |PVRSRV_HAP_MULTI_PROCESS \
++ |PVRSRV_HAP_FROM_EXISTING_PROCESS \
++ |PVRSRV_HAP_NO_CPU_VIRTUAL)
++
++#define PVRSRV_MEM_CACHED PVRSRV_HAP_CACHED
++#define PVRSRV_MEM_UNCACHED PVRSRV_HAP_UNCACHED
++#define PVRSRV_MEM_WRITECOMBINE PVRSRV_HAP_WRITECOMBINE
++
++#define PVRSRV_MEM_BACKINGSTORE_FIELD_SHIFT (24)
++
++#define PVRSRV_MAP_NOUSERVIRTUAL (1UL<<27)
++
++#define PVRSRV_NO_CONTEXT_LOSS 0
++#define PVRSRV_SEVERE_LOSS_OF_CONTEXT 1
++#define PVRSRV_PRE_STATE_CHANGE_MASK 0x80
++
++
++#define PVRSRV_DEFAULT_DEV_COOKIE (1)
++
++
++#define PVRSRV_MISC_INFO_TIMER_PRESENT (1UL<<0)
++#define PVRSRV_MISC_INFO_CLOCKGATE_PRESENT (1UL<<1)
++#define PVRSRV_MISC_INFO_MEMSTATS_PRESENT (1UL<<2)
++#define PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT (1UL<<3)
++#define PVRSRV_MISC_INFO_DDKVERSION_PRESENT (1UL<<4)
++#define PVRSRV_MISC_INFO_CPUCACHEFLUSH_PRESENT (1UL<<5)
++
++#define PVRSRV_MISC_INFO_RESET_PRESENT (1UL<<31)
++
++#define PVRSRV_PDUMP_MAX_FILENAME_SIZE 20
++#define PVRSRV_PDUMP_MAX_COMMENT_SIZE 200
++
++
++#define PVRSRV_CHANGEDEVMEM_ATTRIBS_CACHECOHERENT 0x00000001
++
++#define PVRSRV_MAPEXTMEMORY_FLAGS_ALTERNATEVA 0x00000001
++#define PVRSRV_MAPEXTMEMORY_FLAGS_PHYSCONTIG 0x00000002
++
++#define PVRSRV_MODIFYSYNCOPS_FLAGS_WO_INC 0x00000001
++#define PVRSRV_MODIFYSYNCOPS_FLAGS_RO_INC 0x00000002
++
++typedef enum _PVRSRV_DEVICE_TYPE_
++{
++ PVRSRV_DEVICE_TYPE_UNKNOWN = 0 ,
++ PVRSRV_DEVICE_TYPE_MBX1 = 1 ,
++ PVRSRV_DEVICE_TYPE_MBX1_LITE = 2 ,
++
++ PVRSRV_DEVICE_TYPE_M24VA = 3,
++ PVRSRV_DEVICE_TYPE_MVDA2 = 4,
++ PVRSRV_DEVICE_TYPE_MVED1 = 5,
++ PVRSRV_DEVICE_TYPE_MSVDX = 6,
++
++ PVRSRV_DEVICE_TYPE_SGX = 7,
++
++ PVRSRV_DEVICE_TYPE_VGX = 8,
++
++
++ PVRSRV_DEVICE_TYPE_EXT = 9,
++
++ PVRSRV_DEVICE_TYPE_LAST = 9,
++
++ PVRSRV_DEVICE_TYPE_FORCE_I32 = 0x7fffffff
++
++} PVRSRV_DEVICE_TYPE;
++
++#define HEAP_ID( _dev_ , _dev_heap_idx_ ) ( ((_dev_)<<24) | ((_dev_heap_idx_)&((1<<24)-1)) )
++#define HEAP_IDX( _heap_id_ ) ( (_heap_id_)&((1<<24) - 1 ) )
++#define HEAP_DEV( _heap_id_ ) ( (_heap_id_)>>24 )
++
++#define PVRSRV_UNDEFINED_HEAP_ID (~0LU)
++
++typedef enum
++{
++ IMG_EGL = 0x00000001,
++ IMG_OPENGLES1 = 0x00000002,
++ IMG_OPENGLES2 = 0x00000003,
++ IMG_D3DM = 0x00000004,
++ IMG_SRV_UM = 0x00000005,
++ IMG_OPENVG = 0x00000006,
++ IMG_SRVCLIENT = 0x00000007,
++ IMG_VISTAKMD = 0x00000008,
++ IMG_VISTA3DNODE = 0x00000009,
++ IMG_VISTAMVIDEONODE = 0x0000000A,
++ IMG_VISTAVPBNODE = 0x0000000B,
++ IMG_OPENGL = 0x0000000C,
++ IMG_D3D = 0x0000000D,
++#if defined(SUPPORT_GRAPHICS_HAL)
++ IMG_GRAPHICS_HAL = 0x0000000E
++#endif
++
++} IMG_MODULE_ID;
++
++
++#define APPHINT_MAX_STRING_SIZE 256
++
++typedef enum
++{
++ IMG_STRING_TYPE = 1,
++ IMG_FLOAT_TYPE ,
++ IMG_UINT_TYPE ,
++ IMG_INT_TYPE ,
++ IMG_FLAG_TYPE
++}IMG_DATA_TYPE;
++
++
++typedef struct _PVRSRV_DEV_DATA_ *PPVRSRV_DEV_DATA;
++
++typedef struct _PVRSRV_DEVICE_IDENTIFIER_
++{
++ PVRSRV_DEVICE_TYPE eDeviceType;
++ PVRSRV_DEVICE_CLASS eDeviceClass;
++ u32 ui32DeviceIndex;
++
++} PVRSRV_DEVICE_IDENTIFIER;
++
++
++typedef struct _PVRSRV_CLIENT_DEV_DATA_
++{
++ u32 ui32NumDevices;
++ PVRSRV_DEVICE_IDENTIFIER asDevID[PVRSRV_MAX_DEVICES];
++ PVRSRV_ERROR (*apfnDevConnect[PVRSRV_MAX_DEVICES])(PPVRSRV_DEV_DATA);
++ PVRSRV_ERROR (*apfnDumpTrace[PVRSRV_MAX_DEVICES])(PPVRSRV_DEV_DATA);
++
++} PVRSRV_CLIENT_DEV_DATA;
++
++
++typedef struct _PVRSRV_CONNECTION_
++{
++ void * hServices;
++ u32 ui32ProcessID;
++ PVRSRV_CLIENT_DEV_DATA sClientDevData;
++}PVRSRV_CONNECTION;
++
++
++typedef struct _PVRSRV_DEV_DATA_
++{
++ PVRSRV_CONNECTION sConnection;
++ void * hDevCookie;
++
++} PVRSRV_DEV_DATA;
++
++typedef struct _PVRSRV_MEMUPDATE_
++{
++ u32 ui32UpdateAddr;
++ u32 ui32UpdateVal;
++} PVRSRV_MEMUPDATE;
++
++typedef struct _PVRSRV_HWREG_
++{
++ u32 ui32RegAddr;
++ u32 ui32RegVal;
++} PVRSRV_HWREG;
++
++typedef struct _PVRSRV_MEMBLK_
++{
++ IMG_DEV_VIRTADDR sDevVirtAddr;
++ void * hOSMemHandle;
++ void * hOSWrapMem;
++ void * hBuffer;
++ void * hResItem;
++ IMG_SYS_PHYADDR *psIntSysPAddr;
++
++} PVRSRV_MEMBLK;
++
++typedef struct _PVRSRV_KERNEL_MEM_INFO_ *PPVRSRV_KERNEL_MEM_INFO;
++
++typedef struct _PVRSRV_CLIENT_MEM_INFO_
++{
++
++ void * pvLinAddr;
++
++
++ void * pvLinAddrKM;
++
++
++ IMG_DEV_VIRTADDR sDevVAddr;
++
++
++
++
++
++
++ IMG_CPU_PHYADDR sCpuPAddr;
++
++
++ u32 ui32Flags;
++
++
++
++
++ u32 ui32ClientFlags;
++
++
++ u32 ui32AllocSize;
++
++
++
++ struct _PVRSRV_CLIENT_SYNC_INFO_ *psClientSyncInfo;
++
++
++ void * hMappingInfo;
++
++
++ void * hKernelMemInfo;
++
++
++ void * hResItem;
++
++#if defined(SUPPORT_MEMINFO_IDS)
++ #if !defined(USE_CODE)
++
++ u64 ui64Stamp;
++ #else
++ u32 dummy1;
++ u32 dummy2;
++ #endif
++#endif
++
++
++
++
++ struct _PVRSRV_CLIENT_MEM_INFO_ *psNext;
++
++} PVRSRV_CLIENT_MEM_INFO, *PPVRSRV_CLIENT_MEM_INFO;
++
++
++#define PVRSRV_MAX_CLIENT_HEAPS (32)
++typedef struct _PVRSRV_HEAP_INFO_
++{
++ u32 ui32HeapID;
++ void * hDevMemHeap;
++ IMG_DEV_VIRTADDR sDevVAddrBase;
++ u32 ui32HeapByteSize;
++ u32 ui32Attribs;
++}PVRSRV_HEAP_INFO;
++
++
++
++
++typedef struct _PVRSRV_EVENTOBJECT_
++{
++
++ char szName[EVENTOBJNAME_MAXLENGTH];
++
++ void * hOSEventKM;
++
++} PVRSRV_EVENTOBJECT;
++
++typedef struct _PVRSRV_MISC_INFO_
++{
++ u32 ui32StateRequest;
++ u32 ui32StatePresent;
++
++
++ void *pvSOCTimerRegisterKM;
++ void *pvSOCTimerRegisterUM;
++ void * hSOCTimerRegisterOSMemHandle;
++ void * hSOCTimerRegisterMappingInfo;
++
++
++ void *pvSOCClockGateRegs;
++ u32 ui32SOCClockGateRegsSize;
++
++
++ char *pszMemoryStr;
++ u32 ui32MemoryStrLen;
++
++
++ PVRSRV_EVENTOBJECT sGlobalEventObject;
++ void * hOSGlobalEvent;
++
++
++ u32 aui32DDKVersion[4];
++
++
++
++ int bCPUCacheFlushAll;
++
++ int bDeferCPUCacheFlush;
++
++ void * pvRangeAddrStart;
++
++ void * pvRangeAddrEnd;
++
++} PVRSRV_MISC_INFO;
++
++
++typedef enum _PVRSRV_CLIENT_EVENT_
++{
++ PVRSRV_CLIENT_EVENT_HWTIMEOUT = 0,
++} PVRSRV_CLIENT_EVENT;
++
++
++PVRSRV_ERROR PVRSRVClientEvent(const PVRSRV_CLIENT_EVENT eEvent,
++ PVRSRV_DEV_DATA *psDevData,
++ void * pvData);
++
++
++PVRSRV_ERROR PVRSRVConnect(PVRSRV_CONNECTION *psConnection);
++
++
++PVRSRV_ERROR PVRSRVDisconnect(PVRSRV_CONNECTION *psConnection);
++
++
++PVRSRV_ERROR PVRSRVEnumerateDevices(const PVRSRV_CONNECTION *psConnection,
++ u32 *puiNumDevices,
++ PVRSRV_DEVICE_IDENTIFIER *puiDevIDs);
++
++PVRSRV_ERROR PVRSRVAcquireDeviceData(const PVRSRV_CONNECTION *psConnection,
++ u32 uiDevIndex,
++ PVRSRV_DEV_DATA *psDevData,
++ PVRSRV_DEVICE_TYPE eDeviceType);
++
++PVRSRV_ERROR PVRSRVGetMiscInfo (const PVRSRV_CONNECTION *psConnection, PVRSRV_MISC_INFO *psMiscInfo);
++
++
++PVRSRV_ERROR PVRSRVReleaseMiscInfo (const PVRSRV_CONNECTION *psConnection, PVRSRV_MISC_INFO *psMiscInfo);
++
++#if 1
++
++u32 ReadHWReg(void * pvLinRegBaseAddr, u32 ui32Offset);
++
++
++void WriteHWReg(void * pvLinRegBaseAddr, u32 ui32Offset, u32 ui32Value);
++
++ void WriteHWRegs(void * pvLinRegBaseAddr, u32 ui32Count, PVRSRV_HWREG *psHWRegs);
++#endif
++
++
++PVRSRV_ERROR PVRSRVPollForValue ( const PVRSRV_CONNECTION *psConnection,
++ void * hOSEvent,
++ volatile u32 *pui32LinMemAddr,
++ u32 ui32Value,
++ u32 ui32Mask,
++ u32 ui32Waitus,
++ u32 ui32Tries);
++
++
++PVRSRV_ERROR PVRSRVCreateDeviceMemContext(const PVRSRV_DEV_DATA *psDevData,
++ void * *phDevMemContext,
++ u32 *pui32SharedHeapCount,
++ PVRSRV_HEAP_INFO *psHeapInfo);
++
++
++PVRSRV_ERROR PVRSRVDestroyDeviceMemContext(const PVRSRV_DEV_DATA *psDevData,
++ void * hDevMemContext);
++
++
++PVRSRV_ERROR PVRSRVGetDeviceMemHeapInfo(const PVRSRV_DEV_DATA *psDevData,
++ void * hDevMemContext,
++ u32 *pui32SharedHeapCount,
++ PVRSRV_HEAP_INFO *psHeapInfo);
++
++#if defined(PVRSRV_LOG_MEMORY_ALLOCS)
++ #define PVRSRVAllocDeviceMem_log(psDevData, hDevMemHeap, ui32Attribs, ui32Size, ui32Alignment, ppsMemInfo, logStr) \
++ (PVR_TRACE(("PVRSRVAllocDeviceMem(" #psDevData "," #hDevMemHeap "," #ui32Attribs "," #ui32Size "," #ui32Alignment "," #ppsMemInfo ")" \
++ ": " logStr " (size = 0x%lx)", ui32Size)), \
++ PVRSRVAllocDeviceMem(psDevData, hDevMemHeap, ui32Attribs, ui32Size, ui32Alignment, ppsMemInfo))
++#else
++ #define PVRSRVAllocDeviceMem_log(psDevData, hDevMemHeap, ui32Attribs, ui32Size, ui32Alignment, ppsMemInfo, logStr) \
++ PVRSRVAllocDeviceMem(psDevData, hDevMemHeap, ui32Attribs, ui32Size, ui32Alignment, ppsMemInfo)
++#endif
++
++
++
++PVRSRV_ERROR PVRSRVAllocDeviceMem(const PVRSRV_DEV_DATA *psDevData,
++ void * hDevMemHeap,
++ u32 ui32Attribs,
++ u32 ui32Size,
++ u32 ui32Alignment,
++ PVRSRV_CLIENT_MEM_INFO **ppsMemInfo);
++
++
++PVRSRV_ERROR PVRSRVFreeDeviceMem(const PVRSRV_DEV_DATA *psDevData,
++ PVRSRV_CLIENT_MEM_INFO *psMemInfo);
++
++
++PVRSRV_ERROR PVRSRVExportDeviceMem(const PVRSRV_DEV_DATA *psDevData,
++ PVRSRV_CLIENT_MEM_INFO *psMemInfo,
++ void * *phMemInfo);
++
++
++PVRSRV_ERROR PVRSRVReserveDeviceVirtualMem(const PVRSRV_DEV_DATA *psDevData,
++ void * hDevMemHeap,
++ IMG_DEV_VIRTADDR *psDevVAddr,
++ u32 ui32Size,
++ u32 ui32Alignment,
++ PVRSRV_CLIENT_MEM_INFO **ppsMemInfo);
++
++PVRSRV_ERROR PVRSRVFreeDeviceVirtualMem(const PVRSRV_DEV_DATA *psDevData,
++ PVRSRV_CLIENT_MEM_INFO *psMemInfo);
++
++
++PVRSRV_ERROR PVRSRVMapDeviceMemory (const PVRSRV_DEV_DATA *psDevData,
++ void * hKernelMemInfo,
++ void * hDstDevMemHeap,
++ PVRSRV_CLIENT_MEM_INFO **ppsDstMemInfo);
++
++
++PVRSRV_ERROR PVRSRVUnmapDeviceMemory (const PVRSRV_DEV_DATA *psDevData,
++ PVRSRV_CLIENT_MEM_INFO *psMemInfo);
++
++
++PVRSRV_ERROR PVRSRVMapExtMemory (const PVRSRV_DEV_DATA *psDevData,
++ PVRSRV_CLIENT_MEM_INFO *psMemInfo,
++ IMG_SYS_PHYADDR *psSysPAddr,
++ u32 ui32Flags);
++
++PVRSRV_ERROR PVRSRVUnmapExtMemory (const PVRSRV_DEV_DATA *psDevData,
++ PVRSRV_CLIENT_MEM_INFO *psMemInfo,
++ u32 ui32Flags);
++
++
++PVRSRV_ERROR PVRSRVWrapExtMemory2(const PVRSRV_DEV_DATA *psDevData,
++ void * hDevMemContext,
++ u32 ui32ByteSize,
++ u32 ui32PageOffset,
++ int bPhysContig,
++ IMG_SYS_PHYADDR *psSysPAddr,
++ void *pvLinAddr,
++ u32 ui32Flags,
++ PVRSRV_CLIENT_MEM_INFO **ppsMemInfo);
++
++PVRSRV_ERROR PVRSRVWrapExtMemory(const PVRSRV_DEV_DATA *psDevData,
++ void * hDevMemContext,
++ u32 ui32ByteSize,
++ u32 ui32PageOffset,
++ int bPhysContig,
++ IMG_SYS_PHYADDR *psSysPAddr,
++ void *pvLinAddr,
++ PVRSRV_CLIENT_MEM_INFO **ppsMemInfo);
++
++PVRSRV_ERROR PVRSRVUnwrapExtMemory (const PVRSRV_DEV_DATA *psDevData,
++ PVRSRV_CLIENT_MEM_INFO *psMemInfo);
++
++PVRSRV_ERROR PVRSRVChangeDeviceMemoryAttributes(const PVRSRV_DEV_DATA *psDevData,
++ PVRSRV_CLIENT_MEM_INFO *psClientMemInfo,
++ u32 ui32Attribs);
++
++
++PVRSRV_ERROR PVRSRVMapDeviceClassMemory (const PVRSRV_DEV_DATA *psDevData,
++ void * hDevMemContext,
++ void * hDeviceClassBuffer,
++ PVRSRV_CLIENT_MEM_INFO **ppsMemInfo);
++
++PVRSRV_ERROR PVRSRVUnmapDeviceClassMemory (const PVRSRV_DEV_DATA *psDevData,
++ PVRSRV_CLIENT_MEM_INFO *psMemInfo);
++
++
++PVRSRV_ERROR PVRSRVMapPhysToUserSpace(const PVRSRV_DEV_DATA *psDevData,
++ IMG_SYS_PHYADDR sSysPhysAddr,
++ u32 uiSizeInBytes,
++ void * *ppvUserAddr,
++ u32 *puiActualSize,
++ void * *ppvProcess);
++
++
++PVRSRV_ERROR PVRSRVUnmapPhysToUserSpace(const PVRSRV_DEV_DATA *psDevData,
++ void * pvUserAddr,
++ void * pvProcess);
++
++typedef enum _PVRSRV_SYNCVAL_MODE_
++{
++ PVRSRV_SYNCVAL_READ = 1,
++ PVRSRV_SYNCVAL_WRITE = 0,
++
++} PVRSRV_SYNCVAL_MODE, *PPVRSRV_SYNCVAL_MODE;
++
++typedef u32 PVRSRV_SYNCVAL;
++
++ PVRSRV_ERROR PVRSRVWaitForOpsComplete(PPVRSRV_CLIENT_MEM_INFO psMemInfo,
++ PVRSRV_SYNCVAL_MODE eMode, PVRSRV_SYNCVAL OpRequired);
++
++ PVRSRV_ERROR PVRSRVWaitForAllOpsComplete(PPVRSRV_CLIENT_MEM_INFO psMemInfo,
++ PVRSRV_SYNCVAL_MODE eMode);
++
++ int PVRSRVTestOpsComplete(PPVRSRV_CLIENT_MEM_INFO psMemInfo,
++ PVRSRV_SYNCVAL_MODE eMode, PVRSRV_SYNCVAL OpRequired);
++
++ int PVRSRVTestAllOpsComplete(PPVRSRV_CLIENT_MEM_INFO psMemInfo,
++ PVRSRV_SYNCVAL_MODE eMode);
++
++ int PVRSRVTestOpsNotComplete(PPVRSRV_CLIENT_MEM_INFO psMemInfo,
++ PVRSRV_SYNCVAL_MODE eMode, PVRSRV_SYNCVAL OpRequired);
++
++ int PVRSRVTestAllOpsNotComplete(PPVRSRV_CLIENT_MEM_INFO psMemInfo,
++ PVRSRV_SYNCVAL_MODE eMode);
++
++ PVRSRV_SYNCVAL PVRSRVGetPendingOpSyncVal(PPVRSRV_CLIENT_MEM_INFO psMemInfo,
++ PVRSRV_SYNCVAL_MODE eMode);
++
++
++
++PVRSRV_ERROR PVRSRVEnumerateDeviceClass(const PVRSRV_CONNECTION *psConnection,
++ PVRSRV_DEVICE_CLASS DeviceClass,
++ u32 *pui32DevCount,
++ u32 *pui32DevID);
++
++
++void * PVRSRVOpenDCDevice(const PVRSRV_DEV_DATA *psDevData,
++ u32 ui32DeviceID);
++
++
++PVRSRV_ERROR PVRSRVCloseDCDevice(const PVRSRV_CONNECTION *psConnection, void * hDevice);
++
++
++PVRSRV_ERROR PVRSRVEnumDCFormats (void * hDevice,
++ u32 *pui32Count,
++ DISPLAY_FORMAT *psFormat);
++
++
++PVRSRV_ERROR PVRSRVEnumDCDims (void * hDevice,
++ u32 *pui32Count,
++ DISPLAY_FORMAT *psFormat,
++ DISPLAY_DIMS *psDims);
++
++
++PVRSRV_ERROR PVRSRVGetDCSystemBuffer(void * hDevice,
++ void * *phBuffer);
++
++
++PVRSRV_ERROR PVRSRVGetDCInfo(void * hDevice,
++ DISPLAY_INFO* psDisplayInfo);
++
++
++PVRSRV_ERROR PVRSRVCreateDCSwapChain (void * hDevice,
++ u32 ui32Flags,
++ DISPLAY_SURF_ATTRIBUTES *psDstSurfAttrib,
++ DISPLAY_SURF_ATTRIBUTES *psSrcSurfAttrib,
++ u32 ui32BufferCount,
++ u32 ui32OEMFlags,
++ u32 *pui32SwapChainID,
++ void * *phSwapChain);
++
++
++PVRSRV_ERROR PVRSRVDestroyDCSwapChain (void * hDevice,
++ void * hSwapChain);
++
++
++PVRSRV_ERROR PVRSRVSetDCDstRect (void * hDevice,
++ void * hSwapChain,
++ IMG_RECT *psDstRect);
++
++
++PVRSRV_ERROR PVRSRVSetDCSrcRect (void * hDevice,
++ void * hSwapChain,
++ IMG_RECT *psSrcRect);
++
++
++PVRSRV_ERROR PVRSRVSetDCDstColourKey (void * hDevice,
++ void * hSwapChain,
++ u32 ui32CKColour);
++
++
++PVRSRV_ERROR PVRSRVSetDCSrcColourKey (void * hDevice,
++ void * hSwapChain,
++ u32 ui32CKColour);
++
++
++PVRSRV_ERROR PVRSRVGetDCBuffers(void * hDevice,
++ void * hSwapChain,
++ void * *phBuffer);
++
++
++PVRSRV_ERROR PVRSRVSwapToDCBuffer (void * hDevice,
++ void * hBuffer,
++ u32 ui32ClipRectCount,
++ IMG_RECT *psClipRect,
++ u32 ui32SwapInterval,
++ void * hPrivateTag);
++
++
++PVRSRV_ERROR PVRSRVSwapToDCSystem (void * hDevice,
++ void * hSwapChain);
++
++
++
++void * PVRSRVOpenBCDevice(const PVRSRV_DEV_DATA *psDevData,
++ u32 ui32DeviceID);
++
++
++PVRSRV_ERROR PVRSRVCloseBCDevice(const PVRSRV_CONNECTION *psConnection,
++ void * hDevice);
++
++
++PVRSRV_ERROR PVRSRVGetBCBufferInfo(void * hDevice,
++ BUFFER_INFO *psBuffer);
++
++
++PVRSRV_ERROR PVRSRVGetBCBuffer(void * hDevice,
++ u32 ui32BufferIndex,
++ void * *phBuffer);
++
++
++
++PVRSRV_ERROR PVRSRVPDumpInit(const PVRSRV_CONNECTION *psConnection);
++
++
++PVRSRV_ERROR PVRSRVPDumpStartInitPhase(const PVRSRV_CONNECTION *psConnection);
++
++
++PVRSRV_ERROR PVRSRVPDumpStopInitPhase(const PVRSRV_CONNECTION *psConnection);
++
++
++PVRSRV_ERROR PVRSRVPDumpMemPol(const PVRSRV_CONNECTION *psConnection,
++ PVRSRV_CLIENT_MEM_INFO *psMemInfo,
++ u32 ui32Offset,
++ u32 ui32Value,
++ u32 ui32Mask,
++ u32 ui32Flags);
++
++
++PVRSRV_ERROR PVRSRVPDumpSyncPol(const PVRSRV_CONNECTION *psConnection,
++ PVRSRV_CLIENT_SYNC_INFO *psClientSyncInfo,
++ int bIsRead,
++ u32 ui32Value,
++ u32 ui32Mask);
++
++
++PVRSRV_ERROR PVRSRVPDumpMem(const PVRSRV_CONNECTION *psConnection,
++ void * pvAltLinAddr,
++ PVRSRV_CLIENT_MEM_INFO *psMemInfo,
++ u32 ui32Offset,
++ u32 ui32Bytes,
++ u32 ui32Flags);
++
++
++PVRSRV_ERROR PVRSRVPDumpSync(const PVRSRV_CONNECTION *psConnection,
++ void * pvAltLinAddr,
++ PVRSRV_CLIENT_SYNC_INFO *psClientSyncInfo,
++ u32 ui32Offset,
++ u32 ui32Bytes);
++
++
++PVRSRV_ERROR PVRSRVPDumpReg(const PVRSRV_CONNECTION *psConnection,
++ u32 ui32RegAddr,
++ u32 ui32RegValue,
++ u32 ui32Flags);
++
++
++PVRSRV_ERROR PVRSRVPDumpRegPolWithFlags(const PVRSRV_CONNECTION *psConnection,
++ u32 ui32RegAddr,
++ u32 ui32RegValue,
++ u32 ui32Mask,
++ u32 ui32Flags);
++
++PVRSRV_ERROR PVRSRVPDumpRegPol(const PVRSRV_CONNECTION *psConnection,
++ u32 ui32RegAddr,
++ u32 ui32RegValue,
++ u32 ui32Mask);
++
++
++PVRSRV_ERROR PVRSRVPDumpPDReg(const PVRSRV_CONNECTION *psConnection,
++ u32 ui32RegAddr,
++ u32 ui32RegValue);
++
++PVRSRV_ERROR PVRSRVPDumpPDDevPAddr(const PVRSRV_CONNECTION *psConnection,
++ PVRSRV_CLIENT_MEM_INFO *psMemInfo,
++ u32 ui32Offset,
++ IMG_DEV_PHYADDR sPDDevPAddr);
++
++
++PVRSRV_ERROR PVRSRVPDumpMemPages(const PVRSRV_CONNECTION *psConnection,
++ void * hKernelMemInfo,
++ IMG_DEV_PHYADDR *pPages,
++ u32 ui32NumPages,
++ IMG_DEV_VIRTADDR sDevAddr,
++ u32 ui32Start,
++ u32 ui32Length,
++ int bContinuous);
++
++
++PVRSRV_ERROR PVRSRVPDumpSetFrame(const PVRSRV_CONNECTION *psConnection,
++ u32 ui32Frame);
++
++
++PVRSRV_ERROR PVRSRVPDumpComment(const PVRSRV_CONNECTION *psConnection,
++ const char *pszComment,
++ int bContinuous);
++
++
++PVRSRV_ERROR PVRSRVPDumpCommentf(const PVRSRV_CONNECTION *psConnection,
++ int bContinuous,
++ const char *pszFormat, ...);
++
++
++PVRSRV_ERROR PVRSRVPDumpCommentWithFlagsf(const PVRSRV_CONNECTION *psConnection,
++ u32 ui32Flags,
++ const char *pszFormat, ...);
++
++
++PVRSRV_ERROR PVRSRVPDumpDriverInfo(const PVRSRV_CONNECTION *psConnection,
++ char *pszString,
++ int bContinuous);
++
++
++PVRSRV_ERROR PVRSRVPDumpIsCapturing(const PVRSRV_CONNECTION *psConnection,
++ int *pbIsCapturing);
++
++
++PVRSRV_ERROR PVRSRVPDumpBitmap(const PVRSRV_CONNECTION *psConnection,
++ char *pszFileName,
++ u32 ui32FileOffset,
++ u32 ui32Width,
++ u32 ui32Height,
++ u32 ui32StrideInBytes,
++ IMG_DEV_VIRTADDR sDevBaseAddr,
++ u32 ui32Size,
++ PDUMP_PIXEL_FORMAT ePixelFormat,
++ PDUMP_MEM_FORMAT eMemFormat,
++ u32 ui32PDumpFlags);
++
++
++PVRSRV_ERROR PVRSRVPDumpRegRead(const PVRSRV_CONNECTION *psConnection,
++ const char *pszFileName,
++ u32 ui32FileOffset,
++ u32 ui32Address,
++ u32 ui32Size,
++ u32 ui32PDumpFlags);
++
++
++
++int PVRSRVPDumpIsCapturingTest(const PVRSRV_CONNECTION *psConnection);
++
++
++PVRSRV_ERROR PVRSRVPDumpCycleCountRegRead(const PVRSRV_CONNECTION *psConnection,
++ u32 ui32RegOffset,
++ int bLastFrame);
++
++ void * PVRSRVLoadLibrary(const char *pszLibraryName);
++ PVRSRV_ERROR PVRSRVUnloadLibrary(void * hExtDrv);
++ PVRSRV_ERROR PVRSRVGetLibFuncAddr(void * hExtDrv, const char *pszFunctionName, void **ppvFuncAddr);
++
++ u32 PVRSRVClockus (void);
++ void PVRSRVWaitus (u32 ui32Timeus);
++ void PVRSRVReleaseThreadQuanta (void);
++ u32 PVRSRVGetCurrentProcessID(void);
++ char * PVRSRVSetLocale(const char *pszLocale);
++
++
++
++
++
++ void PVRSRVCreateAppHintState(IMG_MODULE_ID eModuleID,
++ const char *pszAppName,
++ void **ppvState);
++ void PVRSRVFreeAppHintState(IMG_MODULE_ID eModuleID,
++ void *pvHintState);
++
++ int PVRSRVGetAppHint(void *pvHintState,
++ const char *pszHintName,
++ IMG_DATA_TYPE eDataType,
++ const void *pvDefault,
++ void *pvReturn);
++
++ void * PVRSRVAllocUserModeMem (u32 ui32Size);
++ void * PVRSRVCallocUserModeMem (u32 ui32Size);
++ void * PVRSRVReallocUserModeMem (void * pvBase, u32 uNewSize);
++ void PVRSRVFreeUserModeMem (void * pvMem);
++ void PVRSRVMemCopy(void *pvDst, const void *pvSrc, u32 ui32Size);
++ void PVRSRVMemSet(void *pvDest, u8 ui8Value, u32 ui32Size);
++
++struct _PVRSRV_MUTEX_OPAQUE_STRUCT_;
++typedef struct _PVRSRV_MUTEX_OPAQUE_STRUCT_ *PVRSRV_MUTEX_HANDLE;
++
++ PVRSRV_ERROR PVRSRVCreateMutex(PVRSRV_MUTEX_HANDLE *phMutex);
++ PVRSRV_ERROR PVRSRVDestroyMutex(PVRSRV_MUTEX_HANDLE hMutex);
++ void PVRSRVLockMutex(PVRSRV_MUTEX_HANDLE hMutex);
++ void PVRSRVUnlockMutex(PVRSRV_MUTEX_HANDLE hMutex);
++
++#if (defined(DEBUG) && defined(__linux__))
++void * PVRSRVAllocUserModeMemTracking(u32 ui32Size, char *pszFileName, u32 ui32LineNumber);
++void * PVRSRVCallocUserModeMemTracking(u32 ui32Size, char *pszFileName, u32 ui32LineNumber);
++void PVRSRVFreeUserModeMemTracking(void *pvMem);
++void * PVRSRVReallocUserModeMemTracking(void *pvMem, u32 ui32NewSize, char *pszFileName, u32 ui32LineNumber);
++#endif
++
++ PVRSRV_ERROR PVRSRVEventObjectWait(const PVRSRV_CONNECTION *psConnection,
++ void * hOSEvent);
++
++
++PVRSRV_ERROR PVRSRVModifyPendingSyncOps(PVRSRV_CONNECTION *psConnection,
++ void * hKernelSyncInfo,
++ u32 ui32ModifyFlags,
++ u32 *pui32ReadOpsPending,
++ u32 *pui32WriteOpsPending);
++
++
++PVRSRV_ERROR PVRSRVModifyCompleteSyncOps(PVRSRV_CONNECTION *psConnection,
++ void * hKernelSyncInfo,
++ u32 ui32ModifyFlags);
++
++
++#define TIME_NOT_PASSED_UINT32(a,b,c) ((a - b) < c)
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/include/services_headers.h
+@@ -0,0 +1,49 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef SERVICES_HEADERS_H
++#define SERVICES_HEADERS_H
++
++#ifdef DEBUG_RELEASE_BUILD
++#pragma optimize( "", off )
++#define DEBUG 1
++#endif
++
++
++#include "services.h"
++#include "servicesint.h"
++#include "power.h"
++#include "resman.h"
++#include "queue.h"
++#include "srvkm.h"
++#include "kerneldisplay.h"
++#include "syscommon.h"
++#include "pvr_debug.h"
++#include "metrics.h"
++#include "osfunc.h"
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/include/servicesext.h
+@@ -0,0 +1,659 @@
++/**********************************************************************
++ *
++ * Copyright (c) 2009-2010 Intel Corporation.
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined (__SERVICESEXT_H__)
++#define __SERVICESEXT_H__
++
++#define PVRSRV_LOCKFLG_READONLY (1)
++
++typedef enum _PVRSRV_ERROR_
++{
++ PVRSRV_OK = 0,
++ PVRSRV_ERROR_GENERIC = 1,
++ PVRSRV_ERROR_OUT_OF_MEMORY = 2,
++ PVRSRV_ERROR_TOO_FEW_BUFFERS = 3,
++ PVRSRV_ERROR_SYMBOL_NOT_FOUND = 4,
++ PVRSRV_ERROR_OUT_OF_HSPACE = 5,
++ PVRSRV_ERROR_INVALID_PARAMS = 6,
++ PVRSRV_ERROR_TILE_MAP_FAILED = 7,
++ PVRSRV_ERROR_INIT_FAILURE = 8,
++ PVRSRV_ERROR_CANT_REGISTER_CALLBACK = 9,
++ PVRSRV_ERROR_INVALID_DEVICE = 10,
++ PVRSRV_ERROR_NOT_OWNER = 11,
++ PVRSRV_ERROR_BAD_MAPPING = 12,
++ PVRSRV_ERROR_TIMEOUT = 13,
++ PVRSRV_ERROR_NO_PRIMARY = 14,
++ PVRSRV_ERROR_FLIP_CHAIN_EXISTS = 15,
++ PVRSRV_ERROR_CANNOT_ACQUIRE_SYSDATA = 16,
++ PVRSRV_ERROR_SCENE_INVALID = 17,
++ PVRSRV_ERROR_STREAM_ERROR = 18,
++ PVRSRV_ERROR_INVALID_INTERRUPT = 19,
++ PVRSRV_ERROR_FAILED_DEPENDENCIES = 20,
++ PVRSRV_ERROR_CMD_NOT_PROCESSED = 21,
++ PVRSRV_ERROR_CMD_TOO_BIG = 22,
++ PVRSRV_ERROR_DEVICE_REGISTER_FAILED = 23,
++ PVRSRV_ERROR_FIFO_SPACE = 24,
++ PVRSRV_ERROR_TA_RECOVERY = 25,
++ PVRSRV_ERROR_INDOSORLOWPOWER = 26,
++ PVRSRV_ERROR_TOOMANYBUFFERS = 27,
++ PVRSRV_ERROR_NOT_SUPPORTED = 28,
++ PVRSRV_ERROR_PROCESSING_BLOCKED = 29,
++
++
++ PVRSRV_ERROR_CANNOT_FLUSH_QUEUE = 31,
++ PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE = 32,
++ PVRSRV_ERROR_CANNOT_GET_RENDERDETAILS = 33,
++ PVRSRV_ERROR_RETRY = 34,
++
++ PVRSRV_ERROR_DDK_VERSION_MISMATCH = 35,
++ PVRSRV_ERROR_BUILD_MISMATCH = 36,
++ PVRSRV_ERROR_PDUMP_BUF_OVERFLOW,
++
++ PVRSRV_ERROR_FORCE_I32 = 0x7fffffff
++
++} PVRSRV_ERROR;
++
++
++typedef enum _PVRSRV_DEVICE_CLASS_
++{
++ PVRSRV_DEVICE_CLASS_3D = 0 ,
++ PVRSRV_DEVICE_CLASS_DISPLAY = 1 ,
++ PVRSRV_DEVICE_CLASS_BUFFER = 2 ,
++ PVRSRV_DEVICE_CLASS_VIDEO = 3 ,
++
++ PVRSRV_DEVICE_CLASS_FORCE_I32 = 0x7fffffff
++
++} PVRSRV_DEVICE_CLASS;
++
++
++
++typedef enum _PVRSRV_SYS_POWER_STATE_
++{
++ PVRSRV_SYS_POWER_STATE_Unspecified = -1,
++ PVRSRV_SYS_POWER_STATE_D0 = 0,
++ PVRSRV_SYS_POWER_STATE_D1 = 1,
++ PVRSRV_SYS_POWER_STATE_D2 = 2,
++ PVRSRV_SYS_POWER_STATE_D3 = 3,
++ PVRSRV_SYS_POWER_STATE_D4 = 4,
++
++ PVRSRV_SYS_POWER_STATE_FORCE_I32 = 0x7fffffff
++
++} PVRSRV_SYS_POWER_STATE, *PPVRSRV_SYS_POWER_STATE;
++
++
++typedef enum _PVRSRV_DEV_POWER_STATE_
++{
++ PVRSRV_DEV_POWER_STATE_DEFAULT = -1,
++ PVRSRV_DEV_POWER_STATE_ON = 0,
++ PVRSRV_DEV_POWER_STATE_IDLE = 1,
++ PVRSRV_DEV_POWER_STATE_OFF = 2,
++
++ PVRSRV_DEV_POWER_STATE_FORCE_I32 = 0x7fffffff
++
++} PVRSRV_DEV_POWER_STATE, *PPVRSRV_DEV_POWER_STATE;
++
++
++typedef PVRSRV_ERROR (*PFN_PRE_POWER) (void * hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++typedef PVRSRV_ERROR (*PFN_POST_POWER) (void * hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++
++typedef PVRSRV_ERROR (*PFN_PRE_CLOCKSPEED_CHANGE) (void * hDevHandle,
++ int bIdleDevice,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++typedef PVRSRV_ERROR (*PFN_POST_CLOCKSPEED_CHANGE) (void * hDevHandle,
++ int bIdleDevice,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++
++
++typedef enum _PVRSRV_PIXEL_FORMAT_ {
++
++ PVRSRV_PIXEL_FORMAT_UNKNOWN = 0,
++ PVRSRV_PIXEL_FORMAT_RGB565 = 1,
++ PVRSRV_PIXEL_FORMAT_RGB555 = 2,
++ PVRSRV_PIXEL_FORMAT_RGB888 = 3,
++ PVRSRV_PIXEL_FORMAT_BGR888 = 4,
++ PVRSRV_PIXEL_FORMAT_GREY_SCALE = 8,
++ PVRSRV_PIXEL_FORMAT_PAL12 = 13,
++ PVRSRV_PIXEL_FORMAT_PAL8 = 14,
++ PVRSRV_PIXEL_FORMAT_PAL4 = 15,
++ PVRSRV_PIXEL_FORMAT_PAL2 = 16,
++ PVRSRV_PIXEL_FORMAT_PAL1 = 17,
++ PVRSRV_PIXEL_FORMAT_ARGB1555 = 18,
++ PVRSRV_PIXEL_FORMAT_ARGB4444 = 19,
++ PVRSRV_PIXEL_FORMAT_ARGB8888 = 20,
++ PVRSRV_PIXEL_FORMAT_ABGR8888 = 21,
++ PVRSRV_PIXEL_FORMAT_YV12 = 22,
++ PVRSRV_PIXEL_FORMAT_I420 = 23,
++ PVRSRV_PIXEL_FORMAT_IMC2 = 25,
++ PVRSRV_PIXEL_FORMAT_XRGB8888,
++ PVRSRV_PIXEL_FORMAT_XBGR8888,
++ PVRSRV_PIXEL_FORMAT_BGRA8888,
++ PVRSRV_PIXEL_FORMAT_XRGB4444,
++ PVRSRV_PIXEL_FORMAT_ARGB8332,
++ PVRSRV_PIXEL_FORMAT_A2RGB10,
++ PVRSRV_PIXEL_FORMAT_A2BGR10,
++ PVRSRV_PIXEL_FORMAT_P8,
++ PVRSRV_PIXEL_FORMAT_L8,
++ PVRSRV_PIXEL_FORMAT_A8L8,
++ PVRSRV_PIXEL_FORMAT_A4L4,
++ PVRSRV_PIXEL_FORMAT_L16,
++ PVRSRV_PIXEL_FORMAT_L6V5U5,
++ PVRSRV_PIXEL_FORMAT_V8U8,
++ PVRSRV_PIXEL_FORMAT_V16U16,
++ PVRSRV_PIXEL_FORMAT_QWVU8888,
++ PVRSRV_PIXEL_FORMAT_XLVU8888,
++ PVRSRV_PIXEL_FORMAT_QWVU16,
++ PVRSRV_PIXEL_FORMAT_D16,
++ PVRSRV_PIXEL_FORMAT_D24S8,
++ PVRSRV_PIXEL_FORMAT_D24X8,
++
++
++ PVRSRV_PIXEL_FORMAT_ABGR16,
++ PVRSRV_PIXEL_FORMAT_ABGR16F,
++ PVRSRV_PIXEL_FORMAT_ABGR32,
++ PVRSRV_PIXEL_FORMAT_ABGR32F,
++ PVRSRV_PIXEL_FORMAT_B10GR11,
++ PVRSRV_PIXEL_FORMAT_GR88,
++ PVRSRV_PIXEL_FORMAT_BGR32,
++ PVRSRV_PIXEL_FORMAT_GR32,
++ PVRSRV_PIXEL_FORMAT_E5BGR9,
++
++
++ PVRSRV_PIXEL_FORMAT_DXT1,
++ PVRSRV_PIXEL_FORMAT_DXT2,
++ PVRSRV_PIXEL_FORMAT_DXT3,
++ PVRSRV_PIXEL_FORMAT_DXT4,
++ PVRSRV_PIXEL_FORMAT_DXT5,
++
++
++ PVRSRV_PIXEL_FORMAT_R8G8_B8G8,
++ PVRSRV_PIXEL_FORMAT_G8R8_G8B8,
++
++
++ PVRSRV_PIXEL_FORMAT_NV11,
++ PVRSRV_PIXEL_FORMAT_NV12,
++
++
++ PVRSRV_PIXEL_FORMAT_YUY2,
++ PVRSRV_PIXEL_FORMAT_YUV420,
++ PVRSRV_PIXEL_FORMAT_YUV444,
++ PVRSRV_PIXEL_FORMAT_VUY444,
++ PVRSRV_PIXEL_FORMAT_YUYV,
++ PVRSRV_PIXEL_FORMAT_YVYU,
++ PVRSRV_PIXEL_FORMAT_UYVY,
++ PVRSRV_PIXEL_FORMAT_VYUY,
++
++ PVRSRV_PIXEL_FORMAT_FOURCC_ORG_UYVY,
++ PVRSRV_PIXEL_FORMAT_FOURCC_ORG_YUYV,
++ PVRSRV_PIXEL_FORMAT_FOURCC_ORG_YVYU,
++ PVRSRV_PIXEL_FORMAT_FOURCC_ORG_VYUY,
++ PVRSRV_PIXEL_FORMAT_FOURCC_ORG_AYUV,
++
++
++ PVRSRV_PIXEL_FORMAT_A32B32G32R32,
++ PVRSRV_PIXEL_FORMAT_A32B32G32R32F,
++ PVRSRV_PIXEL_FORMAT_A32B32G32R32_UINT,
++ PVRSRV_PIXEL_FORMAT_A32B32G32R32_SINT,
++
++
++ PVRSRV_PIXEL_FORMAT_B32G32R32,
++ PVRSRV_PIXEL_FORMAT_B32G32R32F,
++ PVRSRV_PIXEL_FORMAT_B32G32R32_UINT,
++ PVRSRV_PIXEL_FORMAT_B32G32R32_SINT,
++
++
++ PVRSRV_PIXEL_FORMAT_G32R32,
++ PVRSRV_PIXEL_FORMAT_G32R32F,
++ PVRSRV_PIXEL_FORMAT_G32R32_UINT,
++ PVRSRV_PIXEL_FORMAT_G32R32_SINT,
++
++
++ PVRSRV_PIXEL_FORMAT_D32F,
++ PVRSRV_PIXEL_FORMAT_R32,
++ PVRSRV_PIXEL_FORMAT_R32F,
++ PVRSRV_PIXEL_FORMAT_R32_UINT,
++ PVRSRV_PIXEL_FORMAT_R32_SINT,
++
++
++ PVRSRV_PIXEL_FORMAT_A16B16G16R16,
++ PVRSRV_PIXEL_FORMAT_A16B16G16R16F,
++ PVRSRV_PIXEL_FORMAT_A16B16G16R16_SINT,
++ PVRSRV_PIXEL_FORMAT_A16B16G16R16_SNORM,
++ PVRSRV_PIXEL_FORMAT_A16B16G16R16_UINT,
++ PVRSRV_PIXEL_FORMAT_A16B16G16R16_UNORM,
++
++
++ PVRSRV_PIXEL_FORMAT_G16R16,
++ PVRSRV_PIXEL_FORMAT_G16R16F,
++ PVRSRV_PIXEL_FORMAT_G16R16_UINT,
++ PVRSRV_PIXEL_FORMAT_G16R16_UNORM,
++ PVRSRV_PIXEL_FORMAT_G16R16_SINT,
++ PVRSRV_PIXEL_FORMAT_G16R16_SNORM,
++
++
++ PVRSRV_PIXEL_FORMAT_R16,
++ PVRSRV_PIXEL_FORMAT_R16F,
++ PVRSRV_PIXEL_FORMAT_R16_UINT,
++ PVRSRV_PIXEL_FORMAT_R16_UNORM,
++ PVRSRV_PIXEL_FORMAT_R16_SINT,
++ PVRSRV_PIXEL_FORMAT_R16_SNORM,
++
++
++ PVRSRV_PIXEL_FORMAT_X8R8G8B8,
++ PVRSRV_PIXEL_FORMAT_X8R8G8B8_UNORM,
++ PVRSRV_PIXEL_FORMAT_X8R8G8B8_UNORM_SRGB,
++
++ PVRSRV_PIXEL_FORMAT_A8R8G8B8,
++ PVRSRV_PIXEL_FORMAT_A8R8G8B8_UNORM,
++ PVRSRV_PIXEL_FORMAT_A8R8G8B8_UNORM_SRGB,
++
++ PVRSRV_PIXEL_FORMAT_A8B8G8R8,
++ PVRSRV_PIXEL_FORMAT_A8B8G8R8_UINT,
++ PVRSRV_PIXEL_FORMAT_A8B8G8R8_UNORM,
++ PVRSRV_PIXEL_FORMAT_A8B8G8R8_UNORM_SRGB,
++ PVRSRV_PIXEL_FORMAT_A8B8G8R8_SINT,
++ PVRSRV_PIXEL_FORMAT_A8B8G8R8_SNORM,
++
++
++ PVRSRV_PIXEL_FORMAT_G8R8,
++ PVRSRV_PIXEL_FORMAT_G8R8_UINT,
++ PVRSRV_PIXEL_FORMAT_G8R8_UNORM,
++ PVRSRV_PIXEL_FORMAT_G8R8_SINT,
++ PVRSRV_PIXEL_FORMAT_G8R8_SNORM,
++
++
++ PVRSRV_PIXEL_FORMAT_A8,
++ PVRSRV_PIXEL_FORMAT_R8,
++ PVRSRV_PIXEL_FORMAT_R8_UINT,
++ PVRSRV_PIXEL_FORMAT_R8_UNORM,
++ PVRSRV_PIXEL_FORMAT_R8_SINT,
++ PVRSRV_PIXEL_FORMAT_R8_SNORM,
++
++
++ PVRSRV_PIXEL_FORMAT_A2B10G10R10,
++ PVRSRV_PIXEL_FORMAT_A2B10G10R10_UNORM,
++ PVRSRV_PIXEL_FORMAT_A2B10G10R10_UINT,
++
++
++ PVRSRV_PIXEL_FORMAT_B10G11R11,
++ PVRSRV_PIXEL_FORMAT_B10G11R11F,
++
++
++ PVRSRV_PIXEL_FORMAT_X24G8R32,
++ PVRSRV_PIXEL_FORMAT_G8R24,
++ PVRSRV_PIXEL_FORMAT_X8R24,
++ PVRSRV_PIXEL_FORMAT_E5B9G9R9,
++ PVRSRV_PIXEL_FORMAT_R1,
++
++ PVRSRV_PIXEL_FORMAT_BC1,
++ PVRSRV_PIXEL_FORMAT_BC1_UNORM,
++ PVRSRV_PIXEL_FORMAT_BC1_SRGB,
++ PVRSRV_PIXEL_FORMAT_BC2,
++ PVRSRV_PIXEL_FORMAT_BC2_UNORM,
++ PVRSRV_PIXEL_FORMAT_BC2_SRGB,
++ PVRSRV_PIXEL_FORMAT_BC3,
++ PVRSRV_PIXEL_FORMAT_BC3_UNORM,
++ PVRSRV_PIXEL_FORMAT_BC3_SRGB,
++ PVRSRV_PIXEL_FORMAT_BC4,
++ PVRSRV_PIXEL_FORMAT_BC4_UNORM,
++ PVRSRV_PIXEL_FORMAT_BC4_SNORM,
++ PVRSRV_PIXEL_FORMAT_BC5,
++ PVRSRV_PIXEL_FORMAT_BC5_UNORM,
++ PVRSRV_PIXEL_FORMAT_BC5_SNORM,
++
++
++ PVRSRV_PIXEL_FORMAT_L_F16,
++ PVRSRV_PIXEL_FORMAT_L_F16_REP,
++ PVRSRV_PIXEL_FORMAT_L_F16_A_F16,
++ PVRSRV_PIXEL_FORMAT_A_F16,
++ PVRSRV_PIXEL_FORMAT_B16G16R16F,
++
++ PVRSRV_PIXEL_FORMAT_L_F32,
++ PVRSRV_PIXEL_FORMAT_A_F32,
++ PVRSRV_PIXEL_FORMAT_L_F32_A_F32,
++
++
++ PVRSRV_PIXEL_FORMAT_PVRTC2,
++ PVRSRV_PIXEL_FORMAT_PVRTC4,
++ PVRSRV_PIXEL_FORMAT_PVRTCII2,
++ PVRSRV_PIXEL_FORMAT_PVRTCII4,
++ PVRSRV_PIXEL_FORMAT_PVRTCIII,
++ PVRSRV_PIXEL_FORMAT_PVRO8,
++ PVRSRV_PIXEL_FORMAT_PVRO88,
++ PVRSRV_PIXEL_FORMAT_PT1,
++ PVRSRV_PIXEL_FORMAT_PT2,
++ PVRSRV_PIXEL_FORMAT_PT4,
++ PVRSRV_PIXEL_FORMAT_PT8,
++ PVRSRV_PIXEL_FORMAT_PTW,
++ PVRSRV_PIXEL_FORMAT_PTB,
++ PVRSRV_PIXEL_FORMAT_MONO8,
++ PVRSRV_PIXEL_FORMAT_MONO16,
++
++
++ PVRSRV_PIXEL_FORMAT_C0_YUYV,
++ PVRSRV_PIXEL_FORMAT_C0_UYVY,
++ PVRSRV_PIXEL_FORMAT_C0_YVYU,
++ PVRSRV_PIXEL_FORMAT_C0_VYUY,
++ PVRSRV_PIXEL_FORMAT_C1_YUYV,
++ PVRSRV_PIXEL_FORMAT_C1_UYVY,
++ PVRSRV_PIXEL_FORMAT_C1_YVYU,
++ PVRSRV_PIXEL_FORMAT_C1_VYUY,
++
++
++ PVRSRV_PIXEL_FORMAT_C0_YUV420_2P_UV,
++ PVRSRV_PIXEL_FORMAT_C0_YUV420_2P_VU,
++ PVRSRV_PIXEL_FORMAT_C0_YUV420_3P,
++ PVRSRV_PIXEL_FORMAT_C1_YUV420_2P_UV,
++ PVRSRV_PIXEL_FORMAT_C1_YUV420_2P_VU,
++ PVRSRV_PIXEL_FORMAT_C1_YUV420_3P,
++
++ PVRSRV_PIXEL_FORMAT_A2B10G10R10F,
++ PVRSRV_PIXEL_FORMAT_B8G8R8_SINT,
++ PVRSRV_PIXEL_FORMAT_PVRF32SIGNMASK,
++
++ PVRSRV_PIXEL_FORMAT_FORCE_I32 = 0x7fffffff,
++} PVRSRV_PIXEL_FORMAT;
++
++typedef enum _PVRSRV_ALPHA_FORMAT_ {
++ PVRSRV_ALPHA_FORMAT_UNKNOWN = 0x00000000,
++ PVRSRV_ALPHA_FORMAT_PRE = 0x00000001,
++ PVRSRV_ALPHA_FORMAT_NONPRE = 0x00000002,
++ PVRSRV_ALPHA_FORMAT_MASK = 0x0000000F,
++} PVRSRV_ALPHA_FORMAT;
++
++typedef enum _PVRSRV_COLOURSPACE_FORMAT_ {
++ PVRSRV_COLOURSPACE_FORMAT_UNKNOWN = 0x00000000,
++ PVRSRV_COLOURSPACE_FORMAT_LINEAR = 0x00010000,
++ PVRSRV_COLOURSPACE_FORMAT_NONLINEAR = 0x00020000,
++ PVRSRV_COLOURSPACE_FORMAT_MASK = 0x000F0000,
++} PVRSRV_COLOURSPACE_FORMAT;
++
++
++typedef enum _PVRSRV_ROTATION_ {
++ PVRSRV_ROTATE_0 = 0,
++ PVRSRV_ROTATE_90 = 1,
++ PVRSRV_ROTATE_180 = 2,
++ PVRSRV_ROTATE_270 = 3,
++ PVRSRV_FLIP_Y
++
++} PVRSRV_ROTATION;
++
++#define PVRSRV_CREATE_SWAPCHAIN_SHARED (1<<0)
++#define PVRSRV_CREATE_SWAPCHAIN_QUERY (1<<1)
++#define PVRSRV_CREATE_SWAPCHAIN_OEMOVERLAY (1<<2)
++
++typedef struct _PVRSRV_SYNC_DATA_
++{
++
++#ifdef INTEL_D3_PAD
++ volatile u32 ui32WriteOpsComplete;
++ volatile u32 ui32ReadOpsComplete;
++
++ u32 _reserved[14];
++
++ u32 ui32WriteOpsPending;
++ u32 ui32ReadOpsPending;
++#else
++ u32 ui32WriteOpsPending;
++ volatile u32 ui32WriteOpsComplete;
++
++
++ u32 ui32ReadOpsPending;
++ volatile u32 ui32ReadOpsComplete;
++#endif
++
++
++ u32 ui32LastOpDumpVal;
++ u32 ui32LastReadOpDumpVal;
++
++} PVRSRV_SYNC_DATA;
++
++typedef struct _PVRSRV_CLIENT_SYNC_INFO_
++{
++
++ PVRSRV_SYNC_DATA *psSyncData;
++
++
++
++
++
++ IMG_DEV_VIRTADDR sWriteOpsCompleteDevVAddr;
++
++
++ IMG_DEV_VIRTADDR sReadOpsCompleteDevVAddr;
++
++
++ void * hMappingInfo;
++
++
++ void * hKernelSyncInfo;
++
++} PVRSRV_CLIENT_SYNC_INFO, *PPVRSRV_CLIENT_SYNC_INFO;
++
++
++typedef struct PVRSRV_RESOURCE_TAG
++{
++ volatile u32 ui32Lock;
++ u32 ui32ID;
++}PVRSRV_RESOURCE;
++typedef PVRSRV_RESOURCE PVRSRV_RES_HANDLE;
++
++
++typedef void (*PFN_CMD_COMPLETE) (void *);
++typedef void (**PPFN_CMD_COMPLETE) (void *);
++
++typedef int (*PFN_CMD_PROC) (void *, u32, void*);
++typedef int (**PPFN_CMD_PROC) (void *, u32, void*);
++
++
++typedef struct _IMG_RECT_
++{
++ s32 x0;
++ s32 y0;
++ s32 x1;
++ s32 y1;
++}IMG_RECT;
++
++typedef struct _IMG_RECT_16_
++{
++ s16 x0;
++ s16 y0;
++ s16 x1;
++ s16 y1;
++}IMG_RECT_16;
++
++
++typedef PVRSRV_ERROR (*PFN_GET_BUFFER_ADDR)(void *,
++ void *,
++ IMG_SYS_PHYADDR**,
++ u32*,
++ void**,
++ void **,
++ int*);
++
++
++typedef struct DISPLAY_DIMS_TAG
++{
++ u32 ui32ByteStride;
++ u32 ui32Width;
++ u32 ui32Height;
++} DISPLAY_DIMS;
++
++
++typedef struct DISPLAY_FORMAT_TAG
++{
++
++ PVRSRV_PIXEL_FORMAT pixelformat;
++} DISPLAY_FORMAT;
++
++typedef struct DISPLAY_SURF_ATTRIBUTES_TAG
++{
++
++ PVRSRV_PIXEL_FORMAT pixelformat;
++
++ DISPLAY_DIMS sDims;
++} DISPLAY_SURF_ATTRIBUTES;
++
++
++typedef struct DISPLAY_MODE_INFO_TAG
++{
++
++ PVRSRV_PIXEL_FORMAT pixelformat;
++
++ DISPLAY_DIMS sDims;
++
++ u32 ui32RefreshHZ;
++
++ u32 ui32OEMFlags;
++} DISPLAY_MODE_INFO;
++
++
++
++#define MAX_DISPLAY_NAME_SIZE (50)
++
++typedef struct DISPLAY_INFO_TAG
++{
++
++ u32 ui32MaxSwapChains;
++
++ u32 ui32MaxSwapChainBuffers;
++
++ u32 ui32MinSwapInterval;
++
++ u32 ui32MaxSwapInterval;
++
++ u32 ui32PhysicalWidthmm;
++ u32 ui32PhysicalHeightmm;
++
++ char szDisplayName[MAX_DISPLAY_NAME_SIZE];
++#if defined(SUPPORT_HW_CURSOR)
++
++ u16 ui32CursorWidth;
++ u16 ui32CursorHeight;
++#endif
++} DISPLAY_INFO;
++
++typedef struct ACCESS_INFO_TAG
++{
++ u32 ui32Size;
++ u32 ui32FBPhysBaseAddress;
++ u32 ui32FBMemAvailable;
++ u32 ui32SysPhysBaseAddress;
++ u32 ui32SysSize;
++ u32 ui32DevIRQ;
++}ACCESS_INFO;
++
++
++typedef struct PVRSRV_CURSOR_SHAPE_TAG
++{
++ u16 ui16Width;
++ u16 ui16Height;
++ s16 i16XHot;
++ s16 i16YHot;
++
++
++ void* pvMask;
++ s16 i16MaskByteStride;
++
++
++ void* pvColour;
++ s16 i16ColourByteStride;
++ PVRSRV_PIXEL_FORMAT eColourPixelFormat;
++} PVRSRV_CURSOR_SHAPE;
++
++#define PVRSRV_SET_CURSOR_VISIBILITY (1<<0)
++#define PVRSRV_SET_CURSOR_POSITION (1<<1)
++#define PVRSRV_SET_CURSOR_SHAPE (1<<2)
++#define PVRSRV_SET_CURSOR_ROTATION (1<<3)
++
++typedef struct PVRSRV_CURSOR_INFO_TAG
++{
++
++ u32 ui32Flags;
++
++
++ int bVisible;
++
++
++ s16 i16XPos;
++ s16 i16YPos;
++
++
++ PVRSRV_CURSOR_SHAPE sCursorShape;
++
++
++ u32 ui32Rotation;
++
++} PVRSRV_CURSOR_INFO;
++
++
++typedef struct _PVRSRV_REGISTRY_INFO_
++{
++ u32 ui32DevCookie;
++ char *pszKey;
++ char *pszValue;
++ char *pszBuf;
++ u32 ui32BufSize;
++} PVRSRV_REGISTRY_INFO, *PPVRSRV_REGISTRY_INFO;
++
++
++PVRSRV_ERROR PVRSRVReadRegistryString (PPVRSRV_REGISTRY_INFO psRegInfo);
++PVRSRV_ERROR PVRSRVWriteRegistryString (PPVRSRV_REGISTRY_INFO psRegInfo);
++
++
++#define PVRSRV_BC_FLAGS_YUVCSC_CONFORMANT_RANGE (0 << 0)
++#define PVRSRV_BC_FLAGS_YUVCSC_FULL_RANGE (1 << 0)
++
++#define PVRSRV_BC_FLAGS_YUVCSC_BT601 (0 << 1)
++#define PVRSRV_BC_FLAGS_YUVCSC_BT709 (1 << 1)
++
++#define MAX_BUFFER_DEVICE_NAME_SIZE (50)
++
++typedef struct BUFFER_INFO_TAG
++{
++ u32 ui32BufferCount;
++ u32 ui32BufferDeviceID;
++ PVRSRV_PIXEL_FORMAT pixelformat;
++ u32 ui32ByteStride;
++ u32 ui32Width;
++ u32 ui32Height;
++ u32 ui32Flags;
++ char szDeviceName[MAX_BUFFER_DEVICE_NAME_SIZE];
++} BUFFER_INFO;
++
++typedef enum _OVERLAY_DEINTERLACE_MODE_
++{
++ WEAVE=0x0,
++ BOB_ODD,
++ BOB_EVEN,
++ BOB_EVEN_NONINTERLEAVED
++} OVERLAY_DEINTERLACE_MODE;
++
++#endif
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/include/servicesint.h
+@@ -0,0 +1,254 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined (__SERVICESINT_H__)
++#define __SERVICESINT_H__
++
++
++#include "services.h"
++#include "sysinfo.h"
++
++#define HWREC_DEFAULT_TIMEOUT (500)
++
++#define DRIVERNAME_MAXLENGTH (100)
++
++
++
++typedef struct _PVRSRV_KERNEL_MEM_INFO_
++{
++
++ void * pvLinAddrKM;
++
++
++ IMG_DEV_VIRTADDR sDevVAddr;
++
++
++ u32 ui32Flags;
++
++
++ u32 ui32AllocSize;
++
++
++ PVRSRV_MEMBLK sMemBlk;
++
++
++ void * pvSysBackupBuffer;
++
++
++ u32 ui32RefCount;
++
++
++ int bPendingFree;
++
++
++ #if defined(SUPPORT_MEMINFO_IDS)
++ #if !defined(USE_CODE)
++
++ u64 ui64Stamp;
++ #else
++ u32 dummy1;
++ u32 dummy2;
++ #endif
++ #endif
++
++
++ struct _PVRSRV_KERNEL_SYNC_INFO_ *psKernelSyncInfo;
++
++} PVRSRV_KERNEL_MEM_INFO;
++
++
++typedef struct _PVRSRV_KERNEL_SYNC_INFO_
++{
++
++ PVRSRV_SYNC_DATA *psSyncData;
++
++
++ IMG_DEV_VIRTADDR sWriteOpsCompleteDevVAddr;
++
++
++ IMG_DEV_VIRTADDR sReadOpsCompleteDevVAddr;
++
++
++ PVRSRV_KERNEL_MEM_INFO *psSyncDataMemInfoKM;
++
++
++ void * hResItem;
++
++
++
++ u32 ui32RefCount;
++
++} PVRSRV_KERNEL_SYNC_INFO;
++
++typedef struct _PVRSRV_DEVICE_SYNC_OBJECT_
++{
++
++ u32 ui32ReadOpsPendingVal;
++ IMG_DEV_VIRTADDR sReadOpsCompleteDevVAddr;
++ u32 ui32WriteOpsPendingVal;
++ IMG_DEV_VIRTADDR sWriteOpsCompleteDevVAddr;
++} PVRSRV_DEVICE_SYNC_OBJECT;
++
++typedef struct _PVRSRV_SYNC_OBJECT
++{
++ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfoKM;
++ u32 ui32WriteOpsPending;
++ u32 ui32ReadOpsPending;
++
++}PVRSRV_SYNC_OBJECT, *PPVRSRV_SYNC_OBJECT;
++
++typedef struct _PVRSRV_COMMAND
++{
++ u32 ui32CmdSize;
++ u32 ui32DevIndex;
++ u32 CommandType;
++ u32 ui32DstSyncCount;
++ u32 ui32SrcSyncCount;
++ PVRSRV_SYNC_OBJECT *psDstSync;
++ PVRSRV_SYNC_OBJECT *psSrcSync;
++ u32 ui32DataSize;
++ u32 ui32ProcessID;
++ void *pvData;
++}PVRSRV_COMMAND, *PPVRSRV_COMMAND;
++
++
++typedef struct _PVRSRV_QUEUE_INFO_
++{
++ void *pvLinQueueKM;
++ void *pvLinQueueUM;
++ volatile u32 ui32ReadOffset;
++ volatile u32 ui32WriteOffset;
++ u32 *pui32KickerAddrKM;
++ u32 *pui32KickerAddrUM;
++ u32 ui32QueueSize;
++
++ u32 ui32ProcessID;
++
++ void * hMemBlock[2];
++
++ struct _PVRSRV_QUEUE_INFO_ *psNextKM;
++}PVRSRV_QUEUE_INFO;
++
++typedef PVRSRV_ERROR (*PFN_INSERT_CMD) (PVRSRV_QUEUE_INFO*,
++ PVRSRV_COMMAND**,
++ u32,
++ u16,
++ u32,
++ PVRSRV_KERNEL_SYNC_INFO*[],
++ u32,
++ PVRSRV_KERNEL_SYNC_INFO*[],
++ u32);
++typedef PVRSRV_ERROR (*PFN_SUBMIT_CMD) (PVRSRV_QUEUE_INFO*, PVRSRV_COMMAND*, int);
++
++
++typedef struct PVRSRV_DEVICECLASS_BUFFER_TAG
++{
++ PFN_GET_BUFFER_ADDR pfnGetBufferAddr;
++ void * hDevMemContext;
++ void * hExtDevice;
++ void * hExtBuffer;
++ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++
++} PVRSRV_DEVICECLASS_BUFFER;
++
++
++typedef struct PVRSRV_CLIENT_DEVICECLASS_INFO_TAG
++{
++ void * hDeviceKM;
++ void * hServices;
++} PVRSRV_CLIENT_DEVICECLASS_INFO;
++
++
++static inline
++u32 PVRSRVGetWriteOpsPending(PVRSRV_KERNEL_SYNC_INFO *psSyncInfo, int bIsReadOp)
++{
++ u32 ui32WriteOpsPending;
++
++ if(bIsReadOp)
++ {
++ ui32WriteOpsPending = psSyncInfo->psSyncData->ui32WriteOpsPending;
++ }
++ else
++ {
++
++
++
++ ui32WriteOpsPending = psSyncInfo->psSyncData->ui32WriteOpsPending++;
++ }
++
++ return ui32WriteOpsPending;
++}
++
++static inline
++u32 PVRSRVGetReadOpsPending(PVRSRV_KERNEL_SYNC_INFO *psSyncInfo, int bIsReadOp)
++{
++ u32 ui32ReadOpsPending;
++
++ if(bIsReadOp)
++ {
++ ui32ReadOpsPending = psSyncInfo->psSyncData->ui32ReadOpsPending++;
++ }
++ else
++ {
++ ui32ReadOpsPending = psSyncInfo->psSyncData->ui32ReadOpsPending;
++ }
++
++ return ui32ReadOpsPending;
++}
++
++
++PVRSRV_ERROR PVRSRVQueueCommand(void * hQueueInfo,
++ PVRSRV_COMMAND *psCommand);
++
++
++
++ PVRSRV_ERROR
++PVRSRVGetMMUContextPDDevPAddr(const PVRSRV_CONNECTION *psConnection,
++ void * hDevMemContext,
++ IMG_DEV_PHYADDR *sPDDevPAddr);
++
++ PVRSRV_ERROR
++PVRSRVAllocSharedSysMem(const PVRSRV_CONNECTION *psConnection,
++ u32 ui32Flags,
++ u32 ui32Size,
++ PVRSRV_CLIENT_MEM_INFO **ppsClientMemInfo);
++
++ PVRSRV_ERROR
++PVRSRVFreeSharedSysMem(const PVRSRV_CONNECTION *psConnection,
++ PVRSRV_CLIENT_MEM_INFO *psClientMemInfo);
++
++ PVRSRV_ERROR
++PVRSRVUnrefSharedSysMem(const PVRSRV_CONNECTION *psConnection,
++ PVRSRV_CLIENT_MEM_INFO *psClientMemInfo);
++
++ PVRSRV_ERROR
++PVRSRVMapMemInfoMem(const PVRSRV_CONNECTION *psConnection,
++ void * hKernelMemInfo,
++ PVRSRV_CLIENT_MEM_INFO **ppsClientMemInfo);
++
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/include/sgx_bridge.h
+@@ -0,0 +1,477 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__SGX_BRIDGE_H__)
++#define __SGX_BRIDGE_H__
++
++#include "sgxapi_km.h"
++#include "sgxinfo.h"
++#include "pvr_bridge.h"
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++
++#define PVRSRV_BRIDGE_SGX_CMD_BASE (PVRSRV_BRIDGE_LAST_NON_DEVICE_CMD+1)
++#define PVRSRV_BRIDGE_SGX_GETCLIENTINFO PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+0)
++#define PVRSRV_BRIDGE_SGX_RELEASECLIENTINFO PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+1)
++#define PVRSRV_BRIDGE_SGX_GETINTERNALDEVINFO PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+2)
++#define PVRSRV_BRIDGE_SGX_DOKICK PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+3)
++#define PVRSRV_BRIDGE_SGX_GETPHYSPAGEADDR PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+4)
++#define PVRSRV_BRIDGE_SGX_READREGISTRYDWORD PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+5)
++
++#define PVRSRV_BRIDGE_SGX_2DQUERYBLTSCOMPLETE PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+9)
++
++#define PVRSRV_BRIDGE_SGX_GETMMUPDADDR PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+10)
++
++#if defined(TRANSFER_QUEUE)
++#define PVRSRV_BRIDGE_SGX_SUBMITTRANSFER PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+13)
++#endif
++#define PVRSRV_BRIDGE_SGX_GETMISCINFO PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+14)
++#define PVRSRV_BRIDGE_SGXINFO_FOR_SRVINIT PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+15)
++#define PVRSRV_BRIDGE_SGX_DEVINITPART2 PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+16)
++
++#define PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+17)
++#define PVRSRV_BRIDGE_SGX_UNREFSHAREDPBDESC PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+18)
++#define PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+19)
++#define PVRSRV_BRIDGE_SGX_REGISTER_HW_RENDER_CONTEXT PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+20)
++#define PVRSRV_BRIDGE_SGX_FLUSH_HW_RENDER_TARGET PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+21)
++#define PVRSRV_BRIDGE_SGX_UNREGISTER_HW_RENDER_CONTEXT PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+22)
++#if defined(SGX_FEATURE_2D_HARDWARE)
++#define PVRSRV_BRIDGE_SGX_SUBMIT2D PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+23)
++#define PVRSRV_BRIDGE_SGX_REGISTER_HW_2D_CONTEXT PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+24)
++#define PVRSRV_BRIDGE_SGX_UNREGISTER_HW_2D_CONTEXT PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+25)
++#endif
++#define PVRSRV_BRIDGE_SGX_REGISTER_HW_TRANSFER_CONTEXT PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+26)
++#define PVRSRV_BRIDGE_SGX_UNREGISTER_HW_TRANSFER_CONTEXT PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+27)
++
++#define PVRSRV_BRIDGE_SGX_SCHEDULE_PROCESS_QUEUES PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+28)
++
++#if defined(SUPPORT_SGX_HWPERF)
++#define PVRSRV_BRIDGE_SGX_READ_DIFF_COUNTERS PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+29)
++#define PVRSRV_BRIDGE_SGX_READ_HWPERF_CB PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+30)
++#endif
++
++#if defined(PDUMP)
++#define PVRSRV_BRIDGE_SGX_PDUMP_BUFFER_ARRAY PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+31)
++#define PVRSRV_BRIDGE_SGX_PDUMP_3D_SIGNATURE_REGISTERS PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+32)
++#define PVRSRV_BRIDGE_SGX_PDUMP_COUNTER_REGISTERS PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+33)
++#define PVRSRV_BRIDGE_SGX_PDUMP_TA_SIGNATURE_REGISTERS PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+34)
++#define PVRSRV_BRIDGE_SGX_PDUMP_HWPERFCB PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+35)
++#endif
++
++
++
++#define PVRSRV_BRIDGE_LAST_SGX_CMD (PVRSRV_BRIDGE_SGX_CMD_BASE+35)
++
++
++typedef struct PVRSRV_BRIDGE_IN_GETPHYSPAGEADDR
++{
++ u32 ui32BridgeFlags;
++ void * hDevMemHeap;
++ IMG_DEV_VIRTADDR sDevVAddr;
++}PVRSRV_BRIDGE_IN_GETPHYSPAGEADDR;
++
++
++typedef struct PVRSRV_BRIDGE_OUT_GETPHYSPAGEADDR
++{
++ PVRSRV_ERROR eError;
++ IMG_DEV_PHYADDR DevPAddr;
++ IMG_CPU_PHYADDR CpuPAddr;
++}PVRSRV_BRIDGE_OUT_GETPHYSPAGEADDR;
++
++
++typedef struct PVRSRV_BRIDGE_IN_SGX_GETMMU_PDADDR_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDevCookie;
++ void * hDevMemContext;
++}PVRSRV_BRIDGE_IN_SGX_GETMMU_PDADDR;
++
++
++typedef struct PVRSRV_BRIDGE_OUT_SGX_GETMMU_PDADDR_TAG
++{
++ IMG_DEV_PHYADDR sPDDevPAddr;
++ PVRSRV_ERROR eError;
++}PVRSRV_BRIDGE_OUT_SGX_GETMMU_PDADDR;
++
++
++typedef struct PVRSRV_BRIDGE_IN_GETCLIENTINFO_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDevCookie;
++}PVRSRV_BRIDGE_IN_GETCLIENTINFO;
++
++
++typedef struct PVRSRV_BRIDGE_OUT_GETINTERNALDEVINFO_TAG
++{
++ SGX_INTERNAL_DEVINFO sSGXInternalDevInfo;
++ PVRSRV_ERROR eError;
++}PVRSRV_BRIDGE_OUT_GETINTERNALDEVINFO;
++
++
++typedef struct PVRSRV_BRIDGE_IN_GETINTERNALDEVINFO_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDevCookie;
++}PVRSRV_BRIDGE_IN_GETINTERNALDEVINFO;
++
++
++typedef struct PVRSRV_BRIDGE_OUT_GETCLIENTINFO_TAG
++{
++ SGX_CLIENT_INFO sClientInfo;
++ PVRSRV_ERROR eError;
++}PVRSRV_BRIDGE_OUT_GETCLIENTINFO;
++
++
++typedef struct PVRSRV_BRIDGE_IN_RELEASECLIENTINFO_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDevCookie;
++ SGX_CLIENT_INFO sClientInfo;
++}PVRSRV_BRIDGE_IN_RELEASECLIENTINFO;
++
++
++typedef struct PVRSRV_BRIDGE_IN_ISPBREAKPOLL_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDevCookie;
++}PVRSRV_BRIDGE_IN_ISPBREAKPOLL;
++
++
++typedef struct PVRSRV_BRIDGE_IN_DOKICK_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDevCookie;
++ SGX_CCB_KICK sCCBKick;
++}PVRSRV_BRIDGE_IN_DOKICK;
++
++
++typedef struct PVRSRV_BRIDGE_IN_SGX_SCHEDULE_PROCESS_QUEUES_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDevCookie;
++}PVRSRV_BRIDGE_IN_SGX_SCHEDULE_PROCESS_QUEUES;
++
++
++#if defined(TRANSFER_QUEUE)
++
++typedef struct PVRSRV_BRIDGE_IN_SUBMITTRANSFER_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDevCookie;
++ PVRSRV_TRANSFER_SGX_KICK sKick;
++}PVRSRV_BRIDGE_IN_SUBMITTRANSFER;
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++
++typedef struct PVRSRV_BRIDGE_IN_SUBMIT2D_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDevCookie;
++ PVRSRV_2D_SGX_KICK sKick;
++} PVRSRV_BRIDGE_IN_SUBMIT2D;
++#endif
++#endif
++
++
++typedef struct PVRSRV_BRIDGE_IN_READREGDWORD_TAG
++{
++ u32 ui32BridgeFlags;
++ void *hDevCookie;
++ char *pszKey;
++ char *pszValue;
++}PVRSRV_BRIDGE_IN_READREGDWORD;
++
++
++typedef struct PVRSRV_BRIDGE_OUT_READREGDWORD_TAG
++{
++ PVRSRV_ERROR eError;
++ u32 ui32Data;
++}PVRSRV_BRIDGE_OUT_READREGDWORD;
++
++
++typedef struct PVRSRV_BRIDGE_IN_SGXGETMISCINFO_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDevCookie;
++ SGX_MISC_INFO *psMiscInfo;
++}PVRSRV_BRIDGE_IN_SGXGETMISCINFO;
++
++typedef struct PVRSRV_BRIDGE_IN_SGXINFO_FOR_SRVINIT_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDevCookie;
++}PVRSRV_BRIDGE_IN_SGXINFO_FOR_SRVINIT;
++
++typedef struct PVRSRV_BRIDGE_OUT_SGXINFO_FOR_SRVINIT_TAG
++{
++ PVRSRV_ERROR eError;
++ SGX_BRIDGE_INFO_FOR_SRVINIT sInitInfo;
++}PVRSRV_BRIDGE_OUT_SGXINFO_FOR_SRVINIT;
++
++typedef struct PVRSRV_BRIDGE_IN_SGXDEVINITPART2_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDevCookie;
++ SGX_BRIDGE_INIT_INFO sInitInfo;
++}PVRSRV_BRIDGE_IN_SGXDEVINITPART2;
++
++
++typedef struct PVRSRV_BRIDGE_IN_2DQUERYBLTSCOMPLETE_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDevCookie;
++ void * hKernSyncInfo;
++ int bWaitForComplete;
++}PVRSRV_BRIDGE_IN_2DQUERYBLTSCOMPLETE;
++
++
++#define PVRSRV_BRIDGE_SGX_SHAREDPBDESC_MAX_SUBMEMINFOS 10
++
++typedef struct PVRSRV_BRIDGE_IN_SGXFINDSHAREDPBDESC_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDevCookie;
++ int bLockOnFailure;
++ u32 ui32TotalPBSize;
++}PVRSRV_BRIDGE_IN_SGXFINDSHAREDPBDESC;
++
++typedef struct PVRSRV_BRIDGE_OUT_SGXFINDSHAREDPBDESC_TAG
++{
++ void * hKernelMemInfo;
++ void * hSharedPBDesc;
++ void * hSharedPBDescKernelMemInfoHandle;
++ void * hHWPBDescKernelMemInfoHandle;
++ void * hBlockKernelMemInfoHandle;
++ void * hHWBlockKernelMemInfoHandle;
++ void * ahSharedPBDescSubKernelMemInfoHandles[PVRSRV_BRIDGE_SGX_SHAREDPBDESC_MAX_SUBMEMINFOS];
++ u32 ui32SharedPBDescSubKernelMemInfoHandlesCount;
++ PVRSRV_ERROR eError;
++}PVRSRV_BRIDGE_OUT_SGXFINDSHAREDPBDESC;
++
++typedef struct PVRSRV_BRIDGE_IN_SGXUNREFSHAREDPBDESC_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hSharedPBDesc;
++}PVRSRV_BRIDGE_IN_SGXUNREFSHAREDPBDESC;
++
++typedef struct PVRSRV_BRIDGE_OUT_SGXUNREFSHAREDPBDESC_TAG
++{
++ PVRSRV_ERROR eError;
++}PVRSRV_BRIDGE_OUT_SGXUNREFSHAREDPBDESC;
++
++
++typedef struct PVRSRV_BRIDGE_IN_SGXADDSHAREDPBDESC_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDevCookie;
++ void * hSharedPBDescKernelMemInfo;
++ void * hHWPBDescKernelMemInfo;
++ void * hBlockKernelMemInfo;
++ void * hHWBlockKernelMemInfo;
++ u32 ui32TotalPBSize;
++ void * *phKernelMemInfoHandles;
++ u32 ui32KernelMemInfoHandlesCount;
++}PVRSRV_BRIDGE_IN_SGXADDSHAREDPBDESC;
++
++typedef struct PVRSRV_BRIDGE_OUT_SGXADDSHAREDPBDESC_TAG
++{
++ PVRSRV_ERROR eError;
++ void * hSharedPBDesc;
++}PVRSRV_BRIDGE_OUT_SGXADDSHAREDPBDESC;
++
++
++#ifdef PDUMP
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_BUFFER_ARRAY_TAG
++{
++ u32 ui32BridgeFlags;
++ SGX_KICKTA_DUMP_BUFFER *psBufferArray;
++ u32 ui32BufferArrayLength;
++ int bDumpPolls;
++} PVRSRV_BRIDGE_IN_PDUMP_BUFFER_ARRAY;
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_3D_SIGNATURE_REGISTERS_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDevCookie;
++ u32 ui32DumpFrameNum;
++ int bLastFrame;
++ u32 *pui32Registers;
++ u32 ui32NumRegisters;
++}PVRSRV_BRIDGE_IN_PDUMP_3D_SIGNATURE_REGISTERS;
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMPCOUNTER_REGISTERS_TAG
++{
++ u32 ui32BridgeFlags;
++ u32 ui32DumpFrameNum;
++ int bLastFrame;
++ u32 *pui32Registers;
++ u32 ui32NumRegisters;
++}PVRSRV_BRIDGE_IN_PDUMP_COUNTER_REGISTERS;
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_TA_SIGNATURE_REGISTERS_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDevCookie;
++ u32 ui32DumpFrameNum;
++ u32 ui32TAKickCount;
++ int bLastFrame;
++ u32 *pui32Registers;
++ u32 ui32NumRegisters;
++}PVRSRV_BRIDGE_IN_PDUMP_TA_SIGNATURE_REGISTERS;
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_HWPERFCB_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDevCookie;
++ char szFileName[PVRSRV_PDUMP_MAX_FILENAME_SIZE];
++ u32 ui32FileOffset;
++ u32 ui32PDumpFlags;
++
++}PVRSRV_BRIDGE_IN_PDUMP_HWPERFCB;
++
++#endif
++
++typedef struct PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_RENDER_CONTEXT_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDevCookie;
++ IMG_DEV_VIRTADDR sHWRenderContextDevVAddr;
++}PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_RENDER_CONTEXT;
++
++typedef struct PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_RENDER_CONTEXT_TAG
++{
++ PVRSRV_ERROR eError;
++ void * hHWRenderContext;
++}PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_RENDER_CONTEXT;
++
++typedef struct PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_RENDER_CONTEXT_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDevCookie;
++ void * hHWRenderContext;
++}PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_RENDER_CONTEXT;
++
++typedef struct PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_TRANSFER_CONTEXT_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDevCookie;
++ IMG_DEV_VIRTADDR sHWTransferContextDevVAddr;
++}PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_TRANSFER_CONTEXT;
++
++typedef struct PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_TRANSFER_CONTEXT_TAG
++{
++ PVRSRV_ERROR eError;
++ void * hHWTransferContext;
++}PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_TRANSFER_CONTEXT;
++
++typedef struct PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_TRANSFER_CONTEXT_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDevCookie;
++ void * hHWTransferContext;
++}PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_TRANSFER_CONTEXT;
++
++typedef struct PVRSRV_BRIDGE_IN_SGX_FLUSH_HW_RENDER_TARGET_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDevCookie;
++ IMG_DEV_VIRTADDR sHWRTDataSetDevVAddr;
++}PVRSRV_BRIDGE_IN_SGX_FLUSH_HW_RENDER_TARGET;
++
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++typedef struct PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_2D_CONTEXT_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDevCookie;
++ IMG_DEV_VIRTADDR sHW2DContextDevVAddr;
++}PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_2D_CONTEXT;
++
++typedef struct PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_2D_CONTEXT_TAG
++{
++ PVRSRV_ERROR eError;
++ void * hHW2DContext;
++}PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_2D_CONTEXT;
++
++typedef struct PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_2D_CONTEXT_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDevCookie;
++ void * hHW2DContext;
++}PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_2D_CONTEXT;
++
++#define SGX2D_MAX_BLT_CMD_SIZ 256
++#endif
++
++
++typedef struct PVRSRV_BRIDGE_IN_SGX_READ_DIFF_COUNTERS_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDevCookie;
++ u32 ui32Reg;
++ int bNew;
++ u32 ui32New;
++ u32 ui32NewReset;
++ u32 ui32CountersReg;
++ u32 ui32Reg2;
++} PVRSRV_BRIDGE_IN_SGX_READ_DIFF_COUNTERS;
++
++typedef struct PVRSRV_BRIDGE_OUT_SGX_READ_DIFF_COUNTERS_TAG
++{
++ PVRSRV_ERROR eError;
++ u32 ui32Old;
++ int bActive;
++ PVRSRV_SGXDEV_DIFF_INFO sDiffs;
++} PVRSRV_BRIDGE_OUT_SGX_READ_DIFF_COUNTERS;
++
++
++#if defined(SUPPORT_SGX_HWPERF)
++typedef struct PVRSRV_BRIDGE_IN_SGX_READ_HWPERF_CB_TAG
++{
++ u32 ui32BridgeFlags;
++ void * hDevCookie;
++ u32 ui32ArraySize;
++ PVRSRV_SGX_HWPERF_CB_ENTRY *psHWPerfCBData;
++} PVRSRV_BRIDGE_IN_SGX_READ_HWPERF_CB;
++
++typedef struct PVRSRV_BRIDGE_OUT_SGX_READ_HWPERF_CB_TAG
++{
++ PVRSRV_ERROR eError;
++ u32 ui32DataCount;
++ u32 ui32ClockSpeed;
++ u32 ui32HostTimeStamp;
++} PVRSRV_BRIDGE_OUT_SGX_READ_HWPERF_CB;
++#endif
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/include/sgx_mkif_km.h
+@@ -0,0 +1,388 @@
++/**********************************************************************
++ *
++ * Copyright (c) 2009-2010 Intel Corporation.
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined (__SGX_MKIF_KM_H__)
++#define __SGX_MKIF_KM_H__
++
++#include "img_types.h"
++#include "servicesint.h"
++#include "sgxapi_km.h"
++
++
++#if defined(SGX_FEATURE_MP)
++ #define SGX_REG_BANK_SHIFT (12)
++ #define SGX_REG_BANK_SIZE (0x4000)
++ #if defined(SGX541)
++ #define SGX_REG_BANK_BASE_INDEX (1)
++ #define SGX_REG_BANK_MASTER_INDEX (SGX_REG_BANK_BASE_INDEX + SGX_FEATURE_MP_CORE_COUNT)
++ #else
++ #define SGX_REG_BANK_BASE_INDEX (2)
++ #define SGX_REG_BANK_MASTER_INDEX (1)
++ #endif
++ #define SGX_MP_CORE_SELECT(x,i) (x + ((i + SGX_REG_BANK_BASE_INDEX) * SGX_REG_BANK_SIZE))
++ #define SGX_MP_MASTER_SELECT(x) (x + (SGX_REG_BANK_MASTER_INDEX * SGX_REG_BANK_SIZE))
++#else
++ #define SGX_MP_CORE_SELECT(x,i) (x)
++#endif
++
++
++typedef struct _SGXMKIF_COMMAND_
++{
++ u32 ui32ServiceAddress;
++ u32 ui32CacheControl;
++ u32 ui32Data[2];
++} SGXMKIF_COMMAND;
++
++
++typedef struct _PVRSRV_SGX_KERNEL_CCB_
++{
++ SGXMKIF_COMMAND asCommands[256];
++} PVRSRV_SGX_KERNEL_CCB;
++
++
++typedef struct _PVRSRV_SGX_CCB_CTL_
++{
++ u32 ui32WriteOffset;
++#ifdef INTEL_D3_PAD
++ u32 _reserved[15];
++#endif
++ u32 ui32ReadOffset;
++} PVRSRV_SGX_CCB_CTL;
++
++
++typedef struct _SGXMKIF_HOST_CTL_
++{
++#ifndef INTEL_D3_PAD
++#if defined(PVRSRV_USSE_EDM_BREAKPOINTS)
++ u32 ui32BreakpointDisable;
++ u32 ui32Continue;
++#endif
++
++ volatile u32 ui32InitStatus;
++ volatile u32 ui32PowerStatus;
++ volatile u32 ui32CleanupStatus;
++#if defined(SUPPORT_HW_RECOVERY)
++ u32 ui32uKernelDetectedLockups;
++ u32 ui32HostDetectedLockups;
++ u32 ui32HWRecoverySampleRate;
++#endif
++ u32 ui32uKernelTimerClock;
++ u32 ui32ActivePowManSampleRate;
++ u32 ui32InterruptFlags;
++ u32 ui32InterruptClearFlags;
++
++
++ u32 ui32NumActivePowerEvents;
++
++#if defined(SUPPORT_SGX_HWPERF)
++ u32 ui32HWPerfFlags;
++#endif
++
++
++ u32 ui32TimeWraps;
++#else
++ // INTEL_D3_PAD defined
++
++ // SGX only write
++#if defined(PVRSRV_USSE_EDM_BREAKPOINTS)
++ u32 ui32BreakpointDisable;
++ u32 ui32Continue;
++#else
++ u32 _reserved1[2];
++#endif
++#if defined(SUPPORT_HW_RECOVERY)
++ u32 ui32uKernelDetectedLockups;
++#else
++ u32 _reserved2;
++#endif
++ u32 ui32InterruptFlags;
++ u32 ui32TimeWraps;
++
++ u32 _reserved3[11];
++
++ // CPU only write
++#if defined(SUPPORT_HW_RECOVERY)
++ u32 ui32HostDetectedLockups;
++ u32 ui32HWRecoverySampleRate;
++#else
++ u32 _reserved4[2];
++#endif
++ u32 ui32uKernelTimerClock;
++ u32 ui32ActivePowManSampleRate;
++ u32 ui32NumActivePowerEvents;
++#if defined(SUPPORT_SGX_HWPERF)
++ u32 ui32HWPerfFlags;
++#else
++ u32 _reserved5;
++#endif
++
++ u32 _reserved6[10];
++
++ // Both write
++ volatile u32 ui32InitStatus;
++ u32 _reserved7[15];
++ volatile u32 ui32PowerStatus;
++ u32 _reserved8[15];
++ volatile u32 ui32CleanupStatus;
++ u32 _reserved9[15];
++ volatile u32 ui32InterruptClearFlags;
++ u32 _reserved10[15];
++
++#endif
++} SGXMKIF_HOST_CTL;
++
++#define SGXMKIF_CMDTA_CTRLFLAGS_READY 0x00000001
++typedef struct _SGXMKIF_CMDTA_SHARED_
++{
++ u32 ui32CtrlFlags;
++
++ u32 ui32NumTAStatusVals;
++ u32 ui32Num3DStatusVals;
++
++
++ u32 ui32TATQSyncWriteOpsPendingVal;
++ IMG_DEV_VIRTADDR sTATQSyncWriteOpsCompleteDevVAddr;
++ u32 ui32TATQSyncReadOpsPendingVal;
++ IMG_DEV_VIRTADDR sTATQSyncReadOpsCompleteDevVAddr;
++
++
++ u32 ui323DTQSyncWriteOpsPendingVal;
++ IMG_DEV_VIRTADDR s3DTQSyncWriteOpsCompleteDevVAddr;
++ u32 ui323DTQSyncReadOpsPendingVal;
++ IMG_DEV_VIRTADDR s3DTQSyncReadOpsCompleteDevVAddr;
++
++
++#if defined(SUPPORT_SGX_GENERALISED_SYNCOBJECTS)
++
++ u32 ui32NumTASrcSyncs;
++ PVRSRV_DEVICE_SYNC_OBJECT asTASrcSyncs[SGX_MAX_TA_SRC_SYNCS];
++ u32 ui32NumTADstSyncs;
++ PVRSRV_DEVICE_SYNC_OBJECT asTADstSyncs[SGX_MAX_TA_DST_SYNCS];
++ u32 ui32Num3DSrcSyncs;
++ PVRSRV_DEVICE_SYNC_OBJECT as3DSrcSyncs[SGX_MAX_3D_SRC_SYNCS];
++#else
++
++ u32 ui32NumSrcSyncs;
++ PVRSRV_DEVICE_SYNC_OBJECT asSrcSyncs[SGX_MAX_SRC_SYNCS];
++#endif
++
++
++ PVRSRV_DEVICE_SYNC_OBJECT sTA3DDependency;
++
++ CTL_STATUS sCtlTAStatusInfo[SGX_MAX_TA_STATUS_VALS];
++ CTL_STATUS sCtl3DStatusInfo[SGX_MAX_3D_STATUS_VALS];
++
++} SGXMKIF_CMDTA_SHARED;
++
++#define SGXTQ_MAX_STATUS SGX_MAX_TRANSFER_STATUS_VALS + 2
++
++#define SGXMKIF_TQFLAGS_NOSYNCUPDATE 0x00000001
++#define SGXMKIF_TQFLAGS_KEEPPENDING 0x00000002
++#define SGXMKIF_TQFLAGS_TATQ_SYNC 0x00000004
++#define SGXMKIF_TQFLAGS_3DTQ_SYNC 0x00000008
++#if defined(SGX_FEATURE_FAST_RENDER_CONTEXT_SWITCH)
++#define SGXMKIF_TQFLAGS_CTXSWITCH 0x00000010
++#endif
++#define SGXMKIF_TQFLAGS_DUMMYTRANSFER 0x00000020
++
++typedef struct _SGXMKIF_TRANSFERCMD_SHARED_
++{
++
++
++ u32 ui32SrcReadOpPendingVal;
++ IMG_DEV_VIRTADDR sSrcReadOpsCompleteDevAddr;
++
++ u32 ui32SrcWriteOpPendingVal;
++ IMG_DEV_VIRTADDR sSrcWriteOpsCompleteDevAddr;
++
++
++
++ u32 ui32DstReadOpPendingVal;
++ IMG_DEV_VIRTADDR sDstReadOpsCompleteDevAddr;
++
++ u32 ui32DstWriteOpPendingVal;
++ IMG_DEV_VIRTADDR sDstWriteOpsCompleteDevAddr;
++
++
++ u32 ui32TASyncWriteOpsPendingVal;
++ IMG_DEV_VIRTADDR sTASyncWriteOpsCompleteDevVAddr;
++ u32 ui32TASyncReadOpsPendingVal;
++ IMG_DEV_VIRTADDR sTASyncReadOpsCompleteDevVAddr;
++
++
++ u32 ui323DSyncWriteOpsPendingVal;
++ IMG_DEV_VIRTADDR s3DSyncWriteOpsCompleteDevVAddr;
++ u32 ui323DSyncReadOpsPendingVal;
++ IMG_DEV_VIRTADDR s3DSyncReadOpsCompleteDevVAddr;
++
++ u32 ui32NumStatusVals;
++ CTL_STATUS sCtlStatusInfo[SGXTQ_MAX_STATUS];
++} SGXMKIF_TRANSFERCMD_SHARED, *PSGXMKIF_TRANSFERCMD_SHARED;
++
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++typedef struct _SGXMKIF_2DCMD_SHARED_ {
++
++ u32 ui32NumSrcSync;
++ PVRSRV_DEVICE_SYNC_OBJECT sSrcSyncData[SGX_MAX_2D_SRC_SYNC_OPS];
++
++
++ PVRSRV_DEVICE_SYNC_OBJECT sDstSyncData;
++
++
++ PVRSRV_DEVICE_SYNC_OBJECT sTASyncData;
++
++
++ PVRSRV_DEVICE_SYNC_OBJECT s3DSyncData;
++} SGXMKIF_2DCMD_SHARED, *PSGXMKIF_2DCMD_SHARED;
++#endif
++
++
++typedef struct _SGXMKIF_HWDEVICE_SYNC_LIST_
++{
++ IMG_DEV_VIRTADDR sAccessDevAddr;
++ u32 ui32NumSyncObjects;
++
++ PVRSRV_DEVICE_SYNC_OBJECT asSyncData[1];
++} SGXMKIF_HWDEVICE_SYNC_LIST, *PSGXMKIF_HWDEVICE_SYNC_LIST;
++
++
++#define PVRSRV_USSE_EDM_INIT_COMPLETE (1UL << 0)
++
++#define PVRSRV_USSE_EDM_POWMAN_IDLE_COMPLETE (1UL << 2)
++#define PVRSRV_USSE_EDM_POWMAN_POWEROFF_COMPLETE (1UL << 3)
++#define PVRSRV_USSE_EDM_POWMAN_POWEROFF_RESTART_IMMEDIATE (1UL << 4)
++#define PVRSRV_USSE_EDM_POWMAN_NO_WORK (1UL << 5)
++
++#define PVRSRV_USSE_EDM_INTERRUPT_HWR (1UL << 0)
++#define PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER (1UL << 1)
++
++#define PVRSRV_USSE_EDM_CLEANUPCMD_COMPLETE (1UL << 0)
++
++#define PVRSRV_USSE_MISCINFO_READY 0x1UL
++#define PVRSRV_USSE_MISCINFO_GET_STRUCT_SIZES 0x2UL
++#if defined(SUPPORT_SGX_EDM_MEMORY_DEBUG)
++#define PVRSRV_USSE_MISCINFO_MEMREAD 0x4UL
++
++#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++#define PVRSRV_USSE_MISCINFO_MEMREAD_FAIL 0x1UL << 31;
++#endif
++#endif
++
++
++#define PVRSRV_CLEANUPCMD_RT 0x1
++#define PVRSRV_CLEANUPCMD_RC 0x2
++#define PVRSRV_CLEANUPCMD_TC 0x3
++#define PVRSRV_CLEANUPCMD_2DC 0x4
++#define PVRSRV_CLEANUPCMD_PB 0x5
++
++#define PVRSRV_POWERCMD_POWEROFF 0x1
++#define PVRSRV_POWERCMD_IDLE 0x2
++#define PVRSRV_POWERCMD_RESUME 0x3
++
++
++#if defined(SGX_FEATURE_BIF_NUM_DIRLISTS)
++#define SGX_BIF_DIR_LIST_INDEX_EDM (SGX_FEATURE_BIF_NUM_DIRLISTS - 1)
++#else
++#define SGX_BIF_DIR_LIST_INDEX_EDM (0)
++#endif
++
++#define SGX_BIF_INVALIDATE_PTCACHE 0x1
++#define SGX_BIF_INVALIDATE_PDCACHE 0x2
++#define SGX_BIF_INVALIDATE_SLCACHE 0x4
++
++
++typedef struct _SGX_MISCINFO_STRUCT_SIZES_
++{
++#if defined (SGX_FEATURE_2D_HARDWARE)
++ u32 ui32Sizeof_2DCMD;
++ u32 ui32Sizeof_2DCMD_SHARED;
++#endif
++ u32 ui32Sizeof_CMDTA;
++ u32 ui32Sizeof_CMDTA_SHARED;
++ u32 ui32Sizeof_TRANSFERCMD;
++ u32 ui32Sizeof_TRANSFERCMD_SHARED;
++ u32 ui32Sizeof_3DREGISTERS;
++ u32 ui32Sizeof_HWPBDESC;
++ u32 ui32Sizeof_HWRENDERCONTEXT;
++ u32 ui32Sizeof_HWRENDERDETAILS;
++ u32 ui32Sizeof_HWRTDATA;
++ u32 ui32Sizeof_HWRTDATASET;
++ u32 ui32Sizeof_HWTRANSFERCONTEXT;
++ u32 ui32Sizeof_HOST_CTL;
++ u32 ui32Sizeof_COMMAND;
++} SGX_MISCINFO_STRUCT_SIZES;
++
++
++#if defined(SUPPORT_SGX_EDM_MEMORY_DEBUG)
++typedef struct _PVRSRV_SGX_MISCINFO_MEMREAD
++{
++ IMG_DEV_VIRTADDR sDevVAddr;
++ IMG_DEV_PHYADDR sPDDevPAddr;
++} PVRSRV_SGX_MISCINFO_MEMREAD;
++#endif
++
++typedef struct _PVRSRV_SGX_MISCINFO_INFO
++{
++ u32 ui32MiscInfoFlags;
++ PVRSRV_SGX_MISCINFO_FEATURES sSGXFeatures;
++ SGX_MISCINFO_STRUCT_SIZES sSGXStructSizes;
++#if defined(SUPPORT_SGX_EDM_MEMORY_DEBUG)
++ PVRSRV_SGX_MISCINFO_MEMREAD sSGXMemReadData;
++#endif
++} PVRSRV_SGX_MISCINFO_INFO;
++
++#ifdef PVRSRV_USSE_EDM_STATUS_DEBUG
++#define SGXMK_TRACE_BUFFER_SIZE 512
++#endif
++
++#define SGXMKIF_HWPERF_CB_SIZE 0x100
++
++#if defined(SUPPORT_SGX_HWPERF)
++typedef struct _SGXMKIF_HWPERF_CB_ENTRY_
++{
++ u32 ui32FrameNo;
++ u32 ui32Type;
++ u32 ui32Ordinal;
++ u32 ui32TimeWraps;
++ u32 ui32Time;
++ u32 ui32Counters[PVRSRV_SGX_HWPERF_NUM_COUNTERS];
++} SGXMKIF_HWPERF_CB_ENTRY;
++
++typedef struct _SGXMKIF_HWPERF_CB_
++{
++ u32 ui32Woff;
++ u32 ui32Roff;
++ u32 ui32OrdinalGRAPHICS;
++ u32 ui32OrdinalMK_EXECUTION;
++ SGXMKIF_HWPERF_CB_ENTRY psHWPerfCBData[SGXMKIF_HWPERF_CB_SIZE];
++} SGXMKIF_HWPERF_CB;
++#endif
++
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/include/sgx_options.h
+@@ -0,0 +1,224 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if defined(DEBUG) || defined (INTERNAL_TEST)
++#define DEBUG_SET_OFFSET OPTIONS_BIT0
++#define OPTIONS_BIT0 0x1
++#else
++#define OPTIONS_BIT0 0x0
++#endif
++
++#if defined(PDUMP) || defined (INTERNAL_TEST)
++#define PDUMP_SET_OFFSET OPTIONS_BIT1
++#define OPTIONS_BIT1 (0x1 << 1)
++#else
++#define OPTIONS_BIT1 0x0
++#endif
++
++#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG) || defined (INTERNAL_TEST)
++#define PVRSRV_USSE_EDM_STATUS_DEBUG_SET_OFFSET OPTIONS_BIT2
++#define OPTIONS_BIT2 (0x1 << 2)
++#else
++#define OPTIONS_BIT2 0x0
++#endif
++
++#if defined(SUPPORT_HW_RECOVERY) || defined (INTERNAL_TEST)
++#define SUPPORT_HW_RECOVERY_SET_OFFSET OPTIONS_BIT3
++#define OPTIONS_BIT3 (0x1 << 3)
++#else
++#define OPTIONS_BIT3 0x0
++#endif
++
++
++
++#if defined(PVR_SECURE_HANDLES) || defined (INTERNAL_TEST)
++#define PVR_SECURE_HANDLES_SET_OFFSET OPTIONS_BIT4
++#define OPTIONS_BIT4 (0x1 << 4)
++#else
++#define OPTIONS_BIT4 0x0
++#endif
++
++#if defined(SGX_BYPASS_SYSTEM_CACHE) || defined (INTERNAL_TEST)
++#define SGX_BYPASS_SYSTEM_CACHE_SET_OFFSET OPTIONS_BIT5
++#define OPTIONS_BIT5 (0x1 << 5)
++#else
++#define OPTIONS_BIT5 0x0
++#endif
++
++#if defined(SGX_DMS_AGE_ENABLE) || defined (INTERNAL_TEST)
++#define SGX_DMS_AGE_ENABLE_SET_OFFSET OPTIONS_BIT6
++#define OPTIONS_BIT6 (0x1 << 6)
++#else
++#define OPTIONS_BIT6 0x0
++#endif
++
++#if defined(SGX_FAST_DPM_INIT) || defined (INTERNAL_TEST)
++#define SGX_FAST_DPM_INIT_SET_OFFSET OPTIONS_BIT8
++#define OPTIONS_BIT8 (0x1 << 8)
++#else
++#define OPTIONS_BIT8 0x0
++#endif
++
++#if defined(SGX_FEATURE_DCU) || defined (INTERNAL_TEST)
++#define SGX_FEATURE_DCU_SET_OFFSET OPTIONS_BIT9
++#define OPTIONS_BIT9 (0x1 << 9)
++#else
++#define OPTIONS_BIT9 0x0
++#endif
++
++#if defined(SGX_FEATURE_MP) || defined (INTERNAL_TEST)
++#define SGX_FEATURE_MP_SET_OFFSET OPTIONS_BIT10
++#define OPTIONS_BIT10 (0x1 << 10)
++#else
++#define OPTIONS_BIT10 0x0
++#endif
++
++#if defined(SGX_FEATURE_MULTITHREADED_UKERNEL) || defined (INTERNAL_TEST)
++#define SGX_FEATURE_MULTITHREADED_UKERNEL_SET_OFFSET OPTIONS_BIT11
++#define OPTIONS_BIT11 (0x1 << 11)
++#else
++#define OPTIONS_BIT11 0x0
++#endif
++
++
++
++#if defined(SGX_FEATURE_OVERLAPPED_SPM) || defined (INTERNAL_TEST)
++#define SGX_FEATURE_OVERLAPPED_SPM_SET_OFFSET OPTIONS_BIT12
++#define OPTIONS_BIT12 (0x1 << 12)
++#else
++#define OPTIONS_BIT12 0x0
++#endif
++
++
++#if defined(SGX_FEATURE_SYSTEM_CACHE) || defined (INTERNAL_TEST)
++#define SGX_FEATURE_SYSTEM_CACHE_SET_OFFSET OPTIONS_BIT13
++#define OPTIONS_BIT13 (0x1 << 13)
++#else
++#define OPTIONS_BIT13 0x0
++#endif
++
++#if defined(SGX_SUPPORT_HWPROFILING) || defined (INTERNAL_TEST)
++#define SGX_SUPPORT_HWPROFILING_SET_OFFSET OPTIONS_BIT14
++#define OPTIONS_BIT14 (0x1 << 14)
++#else
++#define OPTIONS_BIT14 0x0
++#endif
++
++
++
++#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT) || defined (INTERNAL_TEST)
++#define SUPPORT_ACTIVE_POWER_MANAGEMENT_SET_OFFSET OPTIONS_BIT15
++#define OPTIONS_BIT15 (0x1 << 15)
++#else
++#define OPTIONS_BIT15 0x0
++#endif
++
++#if defined(SUPPORT_DISPLAYCONTROLLER_TILING) || defined (INTERNAL_TEST)
++#define SUPPORT_DISPLAYCONTROLLER_TILING_SET_OFFSET OPTIONS_BIT16
++#define OPTIONS_BIT16 (0x1 << 16)
++#else
++#define OPTIONS_BIT16 0x0
++#endif
++
++#if defined(SUPPORT_PERCONTEXT_PB) || defined (INTERNAL_TEST)
++#define SUPPORT_PERCONTEXT_PB_SET_OFFSET OPTIONS_BIT17
++#define OPTIONS_BIT17 (0x1 << 17)
++#else
++#define OPTIONS_BIT17 0x0
++#endif
++
++#if defined(SUPPORT_SGX_HWPERF) || defined (INTERNAL_TEST)
++#define SUPPORT_SGX_HWPERF_SET_OFFSET OPTIONS_BIT18
++#define OPTIONS_BIT18 (0x1 << 18)
++#else
++#define OPTIONS_BIT18 0x0
++#endif
++
++
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE) || defined (INTERNAL_TEST)
++#define SUPPORT_SGX_MMU_DUMMY_PAGE_SET_OFFSET OPTIONS_BIT19
++#define OPTIONS_BIT19 (0x1 << 19)
++#else
++#define OPTIONS_BIT19 0x0
++#endif
++
++#if defined(SUPPORT_SGX_PRIORITY_SCHEDULING) || defined (INTERNAL_TEST)
++#define SUPPORT_SGX_PRIORITY_SCHEDULING_SET_OFFSET OPTIONS_BIT20
++#define OPTIONS_BIT20 (0x1 << 20)
++#else
++#define OPTIONS_BIT20 0x0
++#endif
++
++#if defined(SGX_LOW_LATENCY_SCHEDULING) || defined (INTERNAL_TEST)
++#define SUPPORT_SGX_LOW_LATENCY_SCHEDULING_SET_OFFSET OPTIONS_BIT21
++#define OPTIONS_BIT21 (0x1 << 21)
++#else
++#define OPTIONS_BIT21 0x0
++#endif
++
++#if defined(USE_SUPPORT_NO_TA3D_OVERLAP) || defined (INTERNAL_TEST)
++#define USE_SUPPORT_NO_TA3D_OVERLAP_SET_OFFSET OPTIONS_BIT22
++#define OPTIONS_BIT22 (0x1 << 22)
++#else
++#define OPTIONS_BIT22 0x0
++#endif
++
++
++#if defined(SGX_FEATURE_MP) || defined (INTERNAL_TEST)
++#define OPTIONS_HIGHBYTE ((SGX_FEATURE_MP_CORE_COUNT-1) << SGX_FEATURE_MP_CORE_COUNT_SET_OFFSET)
++#define SGX_FEATURE_MP_CORE_COUNT_SET_OFFSET 28UL
++#define SGX_FEATURE_MP_CORE_COUNT_SET_MASK 0xFF
++#else
++#define OPTIONS_HIGHBYTE 0x0
++#endif
++
++
++
++#define SGX_BUILD_OPTIONS \
++ OPTIONS_BIT0 |\
++ OPTIONS_BIT1 |\
++ OPTIONS_BIT2 |\
++ OPTIONS_BIT3 |\
++ OPTIONS_BIT4 |\
++ OPTIONS_BIT5 |\
++ OPTIONS_BIT6 |\
++ OPTIONS_BIT8 |\
++ OPTIONS_BIT9 |\
++ OPTIONS_BIT10 |\
++ OPTIONS_BIT11 |\
++ OPTIONS_BIT12 |\
++ OPTIONS_BIT13 |\
++ OPTIONS_BIT14 |\
++ OPTIONS_BIT15 |\
++ OPTIONS_BIT16 |\
++ OPTIONS_BIT17 |\
++ OPTIONS_BIT18 |\
++ OPTIONS_BIT19 |\
++ OPTIONS_BIT20 |\
++ OPTIONS_BIT21 |\
++ OPTIONS_HIGHBYTE
++
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/include/sgxapi_km.h
+@@ -0,0 +1,329 @@
++/**********************************************************************
++ *
++ * Copyright (c) 2009-2010 Intel Corporation.
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __SGXAPI_KM_H__
++#define __SGXAPI_KM_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#include "sgxdefs.h"
++
++#if defined(__linux__) && !defined(USE_CODE)
++ #if defined(__KERNEL__)
++ #include <asm/unistd.h>
++ #else
++ #include <unistd.h>
++ #endif
++#endif
++
++#define SGX_UNDEFINED_HEAP_ID (~0LU)
++#define SGX_GENERAL_HEAP_ID 0
++#define SGX_TADATA_HEAP_ID 1
++#define SGX_KERNEL_CODE_HEAP_ID 2
++#define SGX_KERNEL_DATA_HEAP_ID 3
++#define SGX_PIXELSHADER_HEAP_ID 4
++#define SGX_VERTEXSHADER_HEAP_ID 5
++#define SGX_PDSPIXEL_CODEDATA_HEAP_ID 6
++#define SGX_PDSVERTEX_CODEDATA_HEAP_ID 7
++#define SGX_SYNCINFO_HEAP_ID 8
++#define SGX_3DPARAMETERS_HEAP_ID 9
++#if defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP)
++#define SGX_GENERAL_MAPPING_HEAP_ID 10
++#endif
++#if defined(SGX_FEATURE_2D_HARDWARE)
++#define SGX_2D_HEAP_ID 11
++#else
++#if defined(FIX_HW_BRN_26915)
++#define SGX_CGBUFFER_HEAP_ID 12
++#endif
++#endif
++#if !defined(INTEL_D3_CACHED_CBUF)
++#define SGX_MAX_HEAP_ID 13
++#else
++#define SGX_CACHED_GENERAL_HEAP_ID 13
++#define SGX_MAX_HEAP_ID 14
++#endif
++
++
++#define SGX_MAX_TA_STATUS_VALS 32
++#define SGX_MAX_3D_STATUS_VALS 3
++
++#if defined(SUPPORT_SGX_GENERALISED_SYNCOBJECTS)
++#define SGX_MAX_TA_DST_SYNCS 1
++#define SGX_MAX_TA_SRC_SYNCS 1
++#define SGX_MAX_3D_SRC_SYNCS 4
++#else
++#define SGX_MAX_SRC_SYNCS 4
++#endif
++
++#ifdef SUPPORT_SGX_HWPERF
++
++#define PVRSRV_SGX_HWPERF_NUM_COUNTERS 9
++
++#define PVRSRV_SGX_HWPERF_INVALID 0x1
++
++#define PVRSRV_SGX_HWPERF_TRANSFER 0x2
++#define PVRSRV_SGX_HWPERF_TA 0x3
++#define PVRSRV_SGX_HWPERF_3D 0x4
++#define PVRSRV_SGX_HWPERF_2D 0x5
++
++#define PVRSRV_SGX_HWPERF_MK_EVENT 0x101
++#define PVRSRV_SGX_HWPERF_MK_TA 0x102
++#define PVRSRV_SGX_HWPERF_MK_3D 0x103
++#define PVRSRV_SGX_HWPERF_MK_2D 0x104
++
++#define PVRSRV_SGX_HWPERF_TYPE_STARTEND_BIT 28
++#define PVRSRV_SGX_HWPERF_TYPE_OP_MASK ((1UL << PVRSRV_SGX_HWPERF_TYPE_STARTEND_BIT) - 1)
++#define PVRSRV_SGX_HWPERF_TYPE_OP_START (0UL << PVRSRV_SGX_HWPERF_TYPE_STARTEND_BIT)
++#define PVRSRV_SGX_HWPERF_TYPE_OP_END (1Ul << PVRSRV_SGX_HWPERF_TYPE_STARTEND_BIT)
++
++#define PVRSRV_SGX_HWPERF_TYPE_TRANSFER_START (PVRSRV_SGX_HWPERF_TRANSFER | PVRSRV_SGX_HWPERF_TYPE_OP_START)
++#define PVRSRV_SGX_HWPERF_TYPE_TRANSFER_END (PVRSRV_SGX_HWPERF_TRANSFER | PVRSRV_SGX_HWPERF_TYPE_OP_END)
++#define PVRSRV_SGX_HWPERF_TYPE_TA_START (PVRSRV_SGX_HWPERF_TA | PVRSRV_SGX_HWPERF_TYPE_OP_START)
++#define PVRSRV_SGX_HWPERF_TYPE_TA_END (PVRSRV_SGX_HWPERF_TA | PVRSRV_SGX_HWPERF_TYPE_OP_END)
++#define PVRSRV_SGX_HWPERF_TYPE_3D_START (PVRSRV_SGX_HWPERF_3D | PVRSRV_SGX_HWPERF_TYPE_OP_START)
++#define PVRSRV_SGX_HWPERF_TYPE_3D_END (PVRSRV_SGX_HWPERF_3D | PVRSRV_SGX_HWPERF_TYPE_OP_END)
++#define PVRSRV_SGX_HWPERF_TYPE_2D_START (PVRSRV_SGX_HWPERF_2D | PVRSRV_SGX_HWPERF_TYPE_OP_START)
++#define PVRSRV_SGX_HWPERF_TYPE_2D_END (PVRSRV_SGX_HWPERF_2D | PVRSRV_SGX_HWPERF_TYPE_OP_END)
++
++#define PVRSRV_SGX_HWPERF_TYPE_MK_EVENT_START (PVRSRV_SGX_HWPERF_MK_EVENT | PVRSRV_SGX_HWPERF_TYPE_OP_START)
++#define PVRSRV_SGX_HWPERF_TYPE_MK_EVENT_END (PVRSRV_SGX_HWPERF_MK_EVENT | PVRSRV_SGX_HWPERF_TYPE_OP_END)
++#define PVRSRV_SGX_HWPERF_TYPE_MK_TA_START (PVRSRV_SGX_HWPERF_MK_TA | PVRSRV_SGX_HWPERF_TYPE_OP_START)
++#define PVRSRV_SGX_HWPERF_TYPE_MK_TA_END (PVRSRV_SGX_HWPERF_MK_TA | PVRSRV_SGX_HWPERF_TYPE_OP_END)
++#define PVRSRV_SGX_HWPERF_TYPE_MK_3D_START (PVRSRV_SGX_HWPERF_MK_3D | PVRSRV_SGX_HWPERF_TYPE_OP_START)
++#define PVRSRV_SGX_HWPERF_TYPE_MK_3D_END (PVRSRV_SGX_HWPERF_MK_3D | PVRSRV_SGX_HWPERF_TYPE_OP_END)
++#define PVRSRV_SGX_HWPERF_TYPE_MK_2D_START (PVRSRV_SGX_HWPERF_MK_2D | PVRSRV_SGX_HWPERF_TYPE_OP_START)
++#define PVRSRV_SGX_HWPERF_TYPE_MK_2D_END (PVRSRV_SGX_HWPERF_MK_2D | PVRSRV_SGX_HWPERF_TYPE_OP_END)
++
++#define PVRSRV_SGX_HWPERF_OFF (0x0)
++#define PVRSRV_SGX_HWPERF_GRAPHICS_ON (1UL << 0)
++#define PVRSRV_SGX_HWPERF_MK_EXECUTION_ON (1UL << 1)
++
++
++typedef struct _PVRSRV_SGX_HWPERF_CB_ENTRY_
++{
++ u32 ui32FrameNo;
++ u32 ui32Type;
++ u32 ui32Ordinal;
++ u32 ui32Clocksx16;
++ u32 ui32Counters[PVRSRV_SGX_HWPERF_NUM_COUNTERS];
++} PVRSRV_SGX_HWPERF_CB_ENTRY;
++
++
++typedef struct _PVRSRV_SGX_HWPERF_CBDATA_
++{
++ u32 ui32FrameNo;
++ u32 ui32Type;
++ u32 ui32StartTimeWraps;
++ u32 ui32StartTime;
++ u32 ui32EndTimeWraps;
++ u32 ui32EndTime;
++ u32 ui32ClockSpeed;
++ u32 ui32TimeMax;
++} PVRSRV_SGX_HWPERF_CBDATA;
++
++
++typedef struct _SGX_MISC_INFO_HWPERF_RETRIEVE_CB
++{
++ PVRSRV_SGX_HWPERF_CBDATA* psHWPerfData;
++ u32 ui32ArraySize;
++ u32 ui32DataCount;
++ u32 ui32Time;
++} SGX_MISC_INFO_HWPERF_RETRIEVE_CB;
++#endif
++
++
++typedef struct _CTL_STATUS_
++{
++ IMG_DEV_VIRTADDR sStatusDevAddr;
++ u32 ui32StatusValue;
++} CTL_STATUS;
++
++
++typedef enum _SGX_MISC_INFO_REQUEST_
++{
++ SGX_MISC_INFO_REQUEST_CLOCKSPEED = 0,
++ SGX_MISC_INFO_REQUEST_SGXREV,
++ SGX_MISC_INFO_REQUEST_DRIVER_SGXREV,
++#if defined(SUPPORT_SGX_EDM_MEMORY_DEBUG)
++ SGX_MISC_INFO_REQUEST_MEMREAD,
++#endif
++#if defined(SUPPORT_SGX_HWPERF)
++ SGX_MISC_INFO_REQUEST_SET_HWPERF_STATUS,
++ SGX_MISC_INFO_REQUEST_HWPERF_CB_ON,
++ SGX_MISC_INFO_REQUEST_HWPERF_CB_OFF,
++ SGX_MISC_INFO_REQUEST_HWPERF_RETRIEVE_CB,
++#endif
++#if defined(SGX_FEATURE_DATA_BREAKPOINTS)
++ SGX_MISC_INFO_REQUEST_SET_BREAKPOINT,
++#endif
++ SGX_MISC_INFO_DUMP_DEBUG_INFO,
++ SGX_MISC_INFO_PANIC,
++ SGX_MISC_INFO_REQUEST_FORCE_I16 = 0x7fff
++} SGX_MISC_INFO_REQUEST;
++
++
++typedef struct _PVRSRV_SGX_MISCINFO_FEATURES
++{
++ u32 ui32CoreRev;
++ u32 ui32CoreID;
++ u32 ui32DDKVersion;
++ u32 ui32DDKBuild;
++ u32 ui32CoreIdSW;
++ u32 ui32CoreRevSW;
++ u32 ui32BuildOptions;
++#if defined(SUPPORT_SGX_EDM_MEMORY_DEBUG)
++ u32 ui32DeviceMemValue;
++#endif
++} PVRSRV_SGX_MISCINFO_FEATURES;
++
++
++#if defined(SGX_FEATURE_DATA_BREAKPOINTS)
++typedef struct _SGX_BREAKPOINT_INFO
++{
++
++ int bBPEnable;
++
++
++
++ u32 ui32BPIndex;
++
++ IMG_DEV_VIRTADDR sBPDevVAddr;
++} SGX_BREAKPOINT_INFO;
++#endif
++
++typedef struct _SGX_MISC_INFO_
++{
++ SGX_MISC_INFO_REQUEST eRequest;
++#if defined(SUPPORT_SGX_EDM_MEMORY_DEBUG)
++ IMG_DEV_VIRTADDR sDevVAddr;
++ void * hDevMemContext;
++#endif
++ union
++ {
++ u32 reserved;
++ PVRSRV_SGX_MISCINFO_FEATURES sSGXFeatures;
++ u32 ui32SGXClockSpeed;
++#if defined(SGX_FEATURE_DATA_BREAKPOINTS)
++ SGX_BREAKPOINT_INFO sSGXBreakpointInfo;
++#endif
++#ifdef SUPPORT_SGX_HWPERF
++ u32 ui32NewHWPerfStatus;
++ SGX_MISC_INFO_HWPERF_RETRIEVE_CB sRetrieveCB;
++#endif
++ } uData;
++} SGX_MISC_INFO;
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++#define PVRSRV_MAX_BLT_SRC_SYNCS 3
++#endif
++
++
++#define SGX_KICKTA_DUMPBITMAP_MAX_NAME_LENGTH 256
++
++typedef struct _SGX_KICKTA_DUMPBITMAP_
++{
++ IMG_DEV_VIRTADDR sDevBaseAddr;
++ u32 ui32Flags;
++ u32 ui32Width;
++ u32 ui32Height;
++ u32 ui32Stride;
++ u32 ui32PDUMPFormat;
++ u32 ui32BytesPP;
++ char pszName[SGX_KICKTA_DUMPBITMAP_MAX_NAME_LENGTH];
++} SGX_KICKTA_DUMPBITMAP, *PSGX_KICKTA_DUMPBITMAP;
++
++#define PVRSRV_SGX_PDUMP_CONTEXT_MAX_BITMAP_ARRAY_SIZE (16)
++
++typedef struct _PVRSRV_SGX_PDUMP_CONTEXT_
++{
++
++ u32 ui32CacheControl;
++
++} PVRSRV_SGX_PDUMP_CONTEXT;
++
++
++typedef struct _SGX_KICKTA_DUMP_ROFF_
++{
++ void *hKernelMemInfo;
++ u32 uiAllocIndex;
++ u32 ui32Offset;
++ u32 ui32Value;
++ char *pszName;
++} SGX_KICKTA_DUMP_ROFF, *PSGX_KICKTA_DUMP_ROFF;
++
++typedef struct _SGX_KICKTA_DUMP_BUFFER_
++{
++ u32 ui32SpaceUsed;
++ u32 ui32Start;
++ u32 ui32End;
++ u32 ui32BufferSize;
++ u32 ui32BackEndLength;
++ u32 uiAllocIndex;
++ void * hKernelMemInfo;
++ void * pvLinAddr;
++#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
++ void * hCtrlKernelMemInfo;
++ IMG_DEV_VIRTADDR sCtrlDevVAddr;
++#endif
++ char *pszName;
++} SGX_KICKTA_DUMP_BUFFER, *PSGX_KICKTA_DUMP_BUFFER;
++
++#ifdef PDUMP
++typedef struct _SGX_KICKTA_PDUMP_
++{
++
++ PSGX_KICKTA_DUMPBITMAP psPDumpBitmapArray;
++ u32 ui32PDumpBitmapSize;
++
++
++ PSGX_KICKTA_DUMP_BUFFER psBufferArray;
++ u32 ui32BufferArraySize;
++
++
++ PSGX_KICKTA_DUMP_ROFF psROffArray;
++ u32 ui32ROffArraySize;
++} SGX_KICKTA_PDUMP, *PSGX_KICKTA_PDUMP;
++#endif
++
++#if defined(TRANSFER_QUEUE)
++#if defined(SGX_FEATURE_2D_HARDWARE)
++#define SGX_MAX_2D_BLIT_CMD_SIZE 26
++#define SGX_MAX_2D_SRC_SYNC_OPS 3
++#endif
++#define SGX_MAX_TRANSFER_STATUS_VALS 2
++#define SGX_MAX_TRANSFER_SYNC_OPS 5
++#endif
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/include/sgxinfo.h
+@@ -0,0 +1,288 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined (__SGXINFO_H__)
++#define __SGXINFO_H__
++
++#include "sgxscript.h"
++#include "servicesint.h"
++#include "services.h"
++#include "sgxapi_km.h"
++#include "sgx_mkif_km.h"
++
++
++#define SGX_MAX_DEV_DATA 24
++#define SGX_MAX_INIT_MEM_HANDLES 16
++
++
++typedef struct _SGX_BRIDGE_INFO_FOR_SRVINIT
++{
++ IMG_DEV_PHYADDR sPDDevPAddr;
++ PVRSRV_HEAP_INFO asHeapInfo[PVRSRV_MAX_CLIENT_HEAPS];
++} SGX_BRIDGE_INFO_FOR_SRVINIT;
++
++
++typedef enum _SGXMKIF_CMD_TYPE_
++{
++ SGXMKIF_CMD_TA = 0,
++ SGXMKIF_CMD_TRANSFER = 1,
++ SGXMKIF_CMD_2D = 2,
++ SGXMKIF_CMD_POWER = 3,
++ SGXMKIF_CMD_CLEANUP = 4,
++ SGXMKIF_CMD_GETMISCINFO = 5,
++ SGXMKIF_CMD_PROCESS_QUEUES = 6,
++ SGXMKIF_CMD_MAX = 7,
++
++ SGXMKIF_CMD_FORCE_I32 = -1,
++
++} SGXMKIF_CMD_TYPE;
++
++
++typedef struct _SGX_BRIDGE_INIT_INFO_
++{
++ void * hKernelCCBMemInfo;
++ void * hKernelCCBCtlMemInfo;
++ void * hKernelCCBEventKickerMemInfo;
++ void * hKernelSGXHostCtlMemInfo;
++ void * hKernelSGXTA3DCtlMemInfo;
++ void * hKernelSGXMiscMemInfo;
++
++ u32 aui32HostKickAddr[SGXMKIF_CMD_MAX];
++
++ SGX_INIT_SCRIPTS sScripts;
++
++ u32 ui32ClientBuildOptions;
++ SGX_MISCINFO_STRUCT_SIZES sSGXStructSizes;
++
++#if defined(SGX_SUPPORT_HWPROFILING)
++ void * hKernelHWProfilingMemInfo;
++#endif
++#if defined(SUPPORT_SGX_HWPERF)
++ void * hKernelHWPerfCBMemInfo;
++#endif
++#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG)
++ void * hKernelEDMStatusBufferMemInfo;
++#endif
++#if defined(SGX_FEATURE_OVERLAPPED_SPM)
++ void * hKernelTmpRgnHeaderMemInfo;
++#endif
++#if defined(SGX_FEATURE_SPM_MODE_0)
++ void * hKernelTmpDPMStateMemInfo;
++#endif
++
++ u32 ui32EDMTaskReg0;
++ u32 ui32EDMTaskReg1;
++
++ u32 ui32ClkGateStatusReg;
++ u32 ui32ClkGateStatusMask;
++#if defined(SGX_FEATURE_MP)
++ u32 ui32MasterClkGateStatusReg;
++ u32 ui32MasterClkGateStatusMask;
++#endif
++
++ u32 ui32CacheControl;
++
++ u32 asInitDevData[SGX_MAX_DEV_DATA];
++ void * asInitMemHandles[SGX_MAX_INIT_MEM_HANDLES];
++
++} SGX_BRIDGE_INIT_INFO;
++
++
++typedef struct _SGX_DEVICE_SYNC_LIST_
++{
++ PSGXMKIF_HWDEVICE_SYNC_LIST psHWDeviceSyncList;
++
++ void * hKernelHWSyncListMemInfo;
++ PVRSRV_CLIENT_MEM_INFO *psHWDeviceSyncListClientMemInfo;
++ PVRSRV_CLIENT_MEM_INFO *psAccessResourceClientMemInfo;
++
++ volatile u32 *pui32Lock;
++
++ struct _SGX_DEVICE_SYNC_LIST_ *psNext;
++
++
++ u32 ui32NumSyncObjects;
++ void * ahSyncHandles[1];
++} SGX_DEVICE_SYNC_LIST, *PSGX_DEVICE_SYNC_LIST;
++
++
++typedef struct _SGX_INTERNEL_STATUS_UPDATE_
++{
++ CTL_STATUS sCtlStatus;
++ void * hKernelMemInfo;
++
++ u32 ui32LastStatusUpdateDumpVal;
++} SGX_INTERNEL_STATUS_UPDATE;
++
++
++typedef struct _SGX_CCB_KICK_
++{
++ SGXMKIF_COMMAND sCommand;
++ void * hCCBKernelMemInfo;
++
++ u32 ui32NumDstSyncObjects;
++ void * hKernelHWSyncListMemInfo;
++
++
++ void * *pahDstSyncHandles;
++
++ u32 ui32NumTAStatusVals;
++ u32 ui32Num3DStatusVals;
++
++#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
++ SGX_INTERNEL_STATUS_UPDATE asTAStatusUpdate[SGX_MAX_TA_STATUS_VALS];
++ SGX_INTERNEL_STATUS_UPDATE as3DStatusUpdate[SGX_MAX_3D_STATUS_VALS];
++#else
++ void * ahTAStatusSyncInfo[SGX_MAX_TA_STATUS_VALS];
++ void * ah3DStatusSyncInfo[SGX_MAX_3D_STATUS_VALS];
++#endif
++
++ int bFirstKickOrResume;
++#if (defined(NO_HARDWARE) || defined(PDUMP))
++ int bTerminateOrAbort;
++#endif
++#if defined(SUPPORT_SGX_HWPERF)
++ int bKickRender;
++#endif
++
++
++ u32 ui32CCBOffset;
++
++#if defined(SUPPORT_SGX_GENERALISED_SYNCOBJECTS)
++
++ u32 ui32NumTASrcSyncs;
++ void * ahTASrcKernelSyncInfo[SGX_MAX_TA_SRC_SYNCS];
++ u32 ui32NumTADstSyncs;
++ void * ahTADstKernelSyncInfo[SGX_MAX_TA_DST_SYNCS];
++ u32 ui32Num3DSrcSyncs;
++ void * ah3DSrcKernelSyncInfo[SGX_MAX_3D_SRC_SYNCS];
++#else
++
++ u32 ui32NumSrcSyncs;
++ void * ahSrcKernelSyncInfo[SGX_MAX_SRC_SYNCS];
++#endif
++
++
++ int bTADependency;
++ void * hTA3DSyncInfo;
++
++ void * hTASyncInfo;
++ void * h3DSyncInfo;
++#if defined(PDUMP)
++ u32 ui32CCBDumpWOff;
++#endif
++#if defined(NO_HARDWARE)
++ u32 ui32WriteOpsPendingVal;
++#endif
++} SGX_CCB_KICK;
++
++
++#define SGX_KERNEL_USE_CODE_BASE_INDEX 15
++
++
++typedef struct _SGX_CLIENT_INFO_
++{
++ u32 ui32ProcessID;
++ void *pvProcess;
++ PVRSRV_MISC_INFO sMiscInfo;
++
++ u32 asDevData[SGX_MAX_DEV_DATA];
++
++} SGX_CLIENT_INFO;
++
++typedef struct _SGX_INTERNAL_DEVINFO_
++{
++ u32 ui32Flags;
++ void * hHostCtlKernelMemInfoHandle;
++ int bForcePTOff;
++} SGX_INTERNAL_DEVINFO;
++
++
++#if defined(TRANSFER_QUEUE)
++typedef struct _PVRSRV_TRANSFER_SGX_KICK_
++{
++ void * hCCBMemInfo;
++ u32 ui32SharedCmdCCBOffset;
++
++ IMG_DEV_VIRTADDR sHWTransferContextDevVAddr;
++
++ void * hTASyncInfo;
++ void * h3DSyncInfo;
++
++ u32 ui32NumSrcSync;
++ void * ahSrcSyncInfo[SGX_MAX_TRANSFER_SYNC_OPS];
++
++ u32 ui32NumDstSync;
++ void * ahDstSyncInfo[SGX_MAX_TRANSFER_SYNC_OPS];
++
++ u32 ui32Flags;
++
++ u32 ui32PDumpFlags;
++#if defined(PDUMP)
++ u32 ui32CCBDumpWOff;
++#endif
++} PVRSRV_TRANSFER_SGX_KICK, *PPVRSRV_TRANSFER_SGX_KICK;
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++typedef struct _PVRSRV_2D_SGX_KICK_
++{
++ void * hCCBMemInfo;
++ u32 ui32SharedCmdCCBOffset;
++
++ IMG_DEV_VIRTADDR sHW2DContextDevVAddr;
++
++ u32 ui32NumSrcSync;
++ void * ahSrcSyncInfo[SGX_MAX_2D_SRC_SYNC_OPS];
++
++
++ void * hDstSyncInfo;
++
++
++ void * hTASyncInfo;
++
++
++ void * h3DSyncInfo;
++
++ u32 ui32PDumpFlags;
++#if defined(PDUMP)
++ u32 ui32CCBDumpWOff;
++#endif
++} PVRSRV_2D_SGX_KICK, *PPVRSRV_2D_SGX_KICK;
++#endif
++#endif
++
++#define PVRSRV_SGX_DIFF_NUM_COUNTERS 9
++
++typedef struct _PVRSRV_SGXDEV_DIFF_INFO_
++{
++ u32 aui32Counters[PVRSRV_SGX_DIFF_NUM_COUNTERS];
++ u32 ui32Time[3];
++ u32 ui32Marker[2];
++} PVRSRV_SGXDEV_DIFF_INFO, *PPVRSRV_SGXDEV_DIFF_INFO;
++
++
++
++#endif
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/include/sgxscript.h
+@@ -0,0 +1,81 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __SGXSCRIPT_H__
++#define __SGXSCRIPT_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#define SGX_MAX_INIT_COMMANDS 64
++#define SGX_MAX_DEINIT_COMMANDS 16
++
++typedef enum _SGX_INIT_OPERATION
++{
++ SGX_INIT_OP_ILLEGAL = 0,
++ SGX_INIT_OP_WRITE_HW_REG,
++#if defined(PDUMP)
++ SGX_INIT_OP_PDUMP_HW_REG,
++#endif
++ SGX_INIT_OP_HALT
++} SGX_INIT_OPERATION;
++
++typedef union _SGX_INIT_COMMAND
++{
++ SGX_INIT_OPERATION eOp;
++ struct {
++ SGX_INIT_OPERATION eOp;
++ u32 ui32Offset;
++ u32 ui32Value;
++ } sWriteHWReg;
++#if defined(PDUMP)
++ struct {
++ SGX_INIT_OPERATION eOp;
++ u32 ui32Offset;
++ u32 ui32Value;
++ } sPDumpHWReg;
++#endif
++#if defined(FIX_HW_BRN_22997) && defined(FIX_HW_BRN_23030) && defined(SGX_FEATURE_HOST_PORT)
++ struct {
++ SGX_INIT_OPERATION eOp;
++ } sWorkaroundBRN22997;
++#endif
++} SGX_INIT_COMMAND;
++
++typedef struct _SGX_INIT_SCRIPTS_
++{
++ SGX_INIT_COMMAND asInitCommandsPart1[SGX_MAX_INIT_COMMANDS];
++ SGX_INIT_COMMAND asInitCommandsPart2[SGX_MAX_INIT_COMMANDS];
++ SGX_INIT_COMMAND asDeinitCommands[SGX_MAX_DEINIT_COMMANDS];
++} SGX_INIT_SCRIPTS;
++
++#if defined(__cplusplus)
++}
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/include/srvkm.h
+@@ -0,0 +1,69 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef SRVKM_H
++#define SRVKM_H
++
++
++#if defined(__cplusplus)
++extern "C" {
++#endif
++
++
++ #ifdef PVR_DISABLE_LOGGING
++ #define PVR_LOG(X)
++ #else
++ #define PVR_LOG(X) PVRSRVReleasePrintf X
++ #endif
++
++ void PVRSRVReleasePrintf(const char *pszFormat,
++ ...);
++
++ PVRSRV_ERROR PVRSRVProcessConnect(u32 ui32PID);
++ void PVRSRVProcessDisconnect(u32 ui32PID);
++
++ void PVRSRVSetDCState(u32 ui32State);
++
++ PVRSRV_ERROR PVRSRVSaveRestoreLiveSegments(void *hArena, unsigned char *pbyBuffer, u32 *puiBufSize, int bSave);
++
++#if defined (__cplusplus)
++}
++#endif
++
++#define LOOP_UNTIL_TIMEOUT(TIMEOUT) \
++{\
++ u32 uiOffset, uiStart, uiCurrent, uiNotLastLoop; \
++ for(uiOffset = 0, uiStart = OSClockus(), uiCurrent = uiStart + 1, uiNotLastLoop = 1;\
++ ((uiCurrent - uiStart + uiOffset) < TIMEOUT) || uiNotLastLoop--; \
++ uiCurrent = OSClockus(), \
++ uiOffset = uiCurrent < uiStart ? u32_MAX - uiStart : uiOffset, \
++ uiStart = uiCurrent < uiStart ? 0 : uiStart)
++
++#define END_LOOP_UNTIL_TIMEOUT() \
++}
++
++
++#endif
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/linkage.h
+@@ -0,0 +1,61 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __LINKAGE_H__
++#define __LINKAGE_H__
++
++#if !defined(SUPPORT_DRI_DRM)
++s32 PVRSRV_BridgeDispatchKM(struct file *file, u32 cmd, u32 arg);
++#endif
++
++void PVRDPFInit(void);
++PVRSRV_ERROR PVROSFuncInit(void);
++void PVROSFuncDeInit(void);
++
++#ifdef DEBUG
++int PVRDebugProcSetLevel(struct file *file, const char *buffer, u32 count, void *data);
++void PVRDebugSetLevel(u32 uDebugLevel);
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++void ProcSeqShowDebugLevel(struct seq_file *sfile,void* el);
++#else
++int PVRDebugProcGetLevel(char *page, char **start, off_t off, int count, int *eof, void *data);
++#endif
++
++#ifdef PVR_MANUAL_POWER_CONTROL
++int PVRProcSetPowerLevel(struct file *file, const char *buffer, u32 count, void *data);
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++void ProcSeqShowPowerLevel(struct seq_file *sfile,void* el);
++#else
++int PVRProcGetPowerLevel(char *page, char **start, off_t off, int count, int *eof, void *data);
++#endif
++
++
++#endif
++#endif
++
++#endif
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/lock.h
+@@ -0,0 +1,32 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __LOCK_H__
++#define __LOCK_H__
++
++extern struct mutex gPVRSRVLock;
++
++#endif
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/mm.c
+@@ -0,0 +1,2150 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef AUTOCONF_INCLUDED
++#include <linux/config.h>
++#endif
++
++#include <linux/version.h>
++#include <linux/mm.h>
++#include <linux/vmalloc.h>
++#include <asm/io.h>
++#include <linux/slab.h>
++#include <linux/highmem.h>
++#include <linux/sched.h>
++#include <linux/mutex.h>
++
++
++#include "services.h"
++#include "servicesint.h"
++#include "syscommon.h"
++#include "mutils.h"
++#include "mm.h"
++#include "pvrmmap.h"
++#include "mmap.h"
++#include "osfunc.h"
++#include "pvr_debug.h"
++#include "proc.h"
++#include "lock.h"
++
++#if defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++#include "lists.h"
++#endif
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++typedef enum {
++ DEBUG_MEM_ALLOC_TYPE_KMALLOC,
++ DEBUG_MEM_ALLOC_TYPE_VMALLOC,
++ DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES,
++ DEBUG_MEM_ALLOC_TYPE_IOREMAP,
++ DEBUG_MEM_ALLOC_TYPE_IO,
++ DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE,
++ DEBUG_MEM_ALLOC_TYPE_COUNT
++} DEBUG_MEM_ALLOC_TYPE;
++
++typedef struct _DEBUG_MEM_ALLOC_REC {
++ DEBUG_MEM_ALLOC_TYPE eAllocType;
++ void *pvKey;
++ void *pvCpuVAddr;
++ u32 ulCpuPAddr;
++ void *pvPrivateData;
++ u32 ui32Bytes;
++ pid_t pid;
++ char *pszFileName;
++ u32 ui32Line;
++
++ struct _DEBUG_MEM_ALLOC_REC *psNext;
++ struct _DEBUG_MEM_ALLOC_REC **ppsThis;
++} DEBUG_MEM_ALLOC_REC;
++
++static IMPLEMENT_LIST_ANY_VA_2(DEBUG_MEM_ALLOC_REC, int, 0)
++static IMPLEMENT_LIST_ANY_VA(DEBUG_MEM_ALLOC_REC)
++static IMPLEMENT_LIST_FOR_EACH(DEBUG_MEM_ALLOC_REC)
++static IMPLEMENT_LIST_INSERT(DEBUG_MEM_ALLOC_REC)
++static IMPLEMENT_LIST_REMOVE(DEBUG_MEM_ALLOC_REC)
++
++static DEBUG_MEM_ALLOC_REC *g_MemoryRecords;
++
++static u32 g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_COUNT];
++static u32 g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_COUNT];
++
++static u32 g_SysRAMWaterMark;
++static u32 g_SysRAMHighWaterMark;
++
++static u32 g_IOMemWaterMark;
++static u32 g_IOMemHighWaterMark;
++
++static void DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE eAllocType,
++ void *pvKey,
++ void *pvCpuVAddr,
++ u32 ulCpuPAddr,
++ void *pvPrivateData,
++ u32 ui32Bytes,
++ char *pszFileName, u32 ui32Line);
++
++static void DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE eAllocType,
++ void *pvKey, char *pszFileName,
++ u32 ui32Line);
++
++static char *DebugMemAllocRecordTypeToString(DEBUG_MEM_ALLOC_TYPE eAllocType);
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++static struct proc_dir_entry *g_SeqFileMemoryRecords = 0;
++static void *ProcSeqNextMemoryRecords(struct seq_file *sfile, void *el,
++ loff_t off);
++static void ProcSeqShowMemoryRecords(struct seq_file *sfile, void *el);
++static void *ProcSeqOff2ElementMemoryRecords(struct seq_file *sfile,
++ loff_t off);
++
++#else
++static off_t printMemoryRecords(char *buffer, size_t size, off_t off);
++#endif
++
++#endif
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++typedef struct _DEBUG_LINUX_MEM_AREA_REC {
++ LinuxMemArea *psLinuxMemArea;
++ u32 ui32Flags;
++ pid_t pid;
++
++ struct _DEBUG_LINUX_MEM_AREA_REC *psNext;
++ struct _DEBUG_LINUX_MEM_AREA_REC **ppsThis;
++} DEBUG_LINUX_MEM_AREA_REC;
++
++static IMPLEMENT_LIST_ANY_VA(DEBUG_LINUX_MEM_AREA_REC)
++static IMPLEMENT_LIST_FOR_EACH(DEBUG_LINUX_MEM_AREA_REC)
++static IMPLEMENT_LIST_INSERT(DEBUG_LINUX_MEM_AREA_REC)
++static IMPLEMENT_LIST_REMOVE(DEBUG_LINUX_MEM_AREA_REC)
++
++#if defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++static PVRSRV_LINUX_MUTEX g_sDebugMutex;
++#endif
++
++static DEBUG_LINUX_MEM_AREA_REC *g_LinuxMemAreaRecords;
++static u32 g_LinuxMemAreaCount;
++static u32 g_LinuxMemAreaWaterMark;
++static u32 g_LinuxMemAreaHighWaterMark;
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++static struct proc_dir_entry *g_SeqFileMemArea = 0;
++
++static void *ProcSeqNextMemArea(struct seq_file *sfile, void *el, loff_t off);
++static void ProcSeqShowMemArea(struct seq_file *sfile, void *el);
++static void *ProcSeqOff2ElementMemArea(struct seq_file *sfile, loff_t off);
++
++#else
++static off_t printLinuxMemAreaRecords(char *buffer, size_t size, off_t off);
++#endif
++
++#endif
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++#if (defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MEMORY_ALLOCATIONS))
++static void ProcSeqStartstopDebugMutex(struct seq_file *sfile, int start);
++#endif
++#endif
++
++static LinuxKMemCache *psLinuxMemAreaCache;
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
++static void ReservePages(void *pvAddress, u32 ui32Length);
++static void UnreservePages(void *pvAddress, u32 ui32Length);
++#endif
++
++static LinuxMemArea *LinuxMemAreaStructAlloc(void);
++static void LinuxMemAreaStructFree(LinuxMemArea * psLinuxMemArea);
++#if defined(DEBUG_LINUX_MEM_AREAS)
++static void DebugLinuxMemAreaRecordAdd(LinuxMemArea * psLinuxMemArea,
++ u32 ui32Flags);
++static DEBUG_LINUX_MEM_AREA_REC *DebugLinuxMemAreaRecordFind(LinuxMemArea *
++ psLinuxMemArea);
++static void DebugLinuxMemAreaRecordRemove(LinuxMemArea * psLinuxMemArea);
++#endif
++
++PVRSRV_ERROR LinuxMMInit(void)
++{
++#if defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ mutex_init(&g_sDebugMutex);
++#endif
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ {
++ int iStatus;
++#ifdef PVR_PROC_USE_SEQ_FILE
++ g_SeqFileMemArea = CreateProcReadEntrySeq("mem_areas",
++ NULL,
++ ProcSeqNextMemArea,
++ ProcSeqShowMemArea,
++ ProcSeqOff2ElementMemArea,
++ ProcSeqStartstopDebugMutex);
++ iStatus = !g_SeqFileMemArea ? -1 : 0;
++#else
++ iStatus =
++ CreateProcReadEntry("mem_areas", printLinuxMemAreaRecords);
++#endif
++ if (iStatus != 0) {
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ }
++#endif
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ {
++ int iStatus;
++#ifdef PVR_PROC_USE_SEQ_FILE
++ g_SeqFileMemoryRecords = CreateProcReadEntrySeq("meminfo",
++ NULL,
++ ProcSeqNextMemoryRecords,
++ ProcSeqShowMemoryRecords,
++ ProcSeqOff2ElementMemoryRecords,
++ ProcSeqStartstopDebugMutex);
++
++ iStatus = !g_SeqFileMemoryRecords ? -1 : 0;
++#else
++ iStatus = CreateProcReadEntry("meminfo", printMemoryRecords);
++#endif
++ if (iStatus != 0) {
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ }
++#endif
++
++ psLinuxMemAreaCache =
++ KMemCacheCreateWrapper("img-mm", sizeof(LinuxMemArea), 0, 0);
++ if (!psLinuxMemAreaCache) {
++ PVR_DPF((PVR_DBG_ERROR, "%s: failed to allocate kmem_cache",
++ __FUNCTION__));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ return PVRSRV_OK;
++}
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++void LinuxMMCleanup_MemAreas_ForEachCb(DEBUG_LINUX_MEM_AREA_REC *
++ psCurrentRecord)
++{
++ LinuxMemArea *psLinuxMemArea;
++
++ psLinuxMemArea = psCurrentRecord->psLinuxMemArea;
++ PVR_DPF((PVR_DBG_ERROR,
++ "%s: BUG!: Cleaning up Linux memory area (%p), type=%s, size=%ld bytes",
++ __FUNCTION__, psCurrentRecord->psLinuxMemArea,
++ LinuxMemAreaTypeToString(psCurrentRecord->psLinuxMemArea->
++ eAreaType),
++ psCurrentRecord->psLinuxMemArea->ui32ByteSize));
++
++ LinuxMemAreaDeepFree(psLinuxMemArea);
++}
++#endif
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++void LinuxMMCleanup_MemRecords_ForEachVa(DEBUG_MEM_ALLOC_REC * psCurrentRecord)
++{
++
++ PVR_DPF((PVR_DBG_ERROR, "%s: BUG!: Cleaning up memory: "
++ "type=%s "
++ "CpuVAddr=%p "
++ "CpuPAddr=0x%08lx, "
++ "allocated @ file=%s,line=%d",
++ __FUNCTION__,
++ DebugMemAllocRecordTypeToString(psCurrentRecord->eAllocType),
++ psCurrentRecord->pvCpuVAddr,
++ psCurrentRecord->ulCpuPAddr,
++ psCurrentRecord->pszFileName, psCurrentRecord->ui32Line));
++ switch (psCurrentRecord->eAllocType) {
++ case DEBUG_MEM_ALLOC_TYPE_KMALLOC:
++ KFreeWrapper(psCurrentRecord->pvCpuVAddr);
++ break;
++ case DEBUG_MEM_ALLOC_TYPE_IOREMAP:
++ IOUnmapWrapper(psCurrentRecord->pvCpuVAddr);
++ break;
++ case DEBUG_MEM_ALLOC_TYPE_IO:
++
++ DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_IO,
++ psCurrentRecord->pvKey, __FILE__,
++ __LINE__);
++ break;
++ case DEBUG_MEM_ALLOC_TYPE_VMALLOC:
++ VFreeWrapper(psCurrentRecord->pvCpuVAddr);
++ break;
++ case DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES:
++
++ DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES,
++ psCurrentRecord->pvKey, __FILE__,
++ __LINE__);
++ break;
++ case DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE:
++ KMemCacheFreeWrapper(psCurrentRecord->pvPrivateData,
++ psCurrentRecord->pvCpuVAddr);
++ break;
++ default:
++ PVR_ASSERT(0);
++ }
++}
++#endif
++
++void LinuxMMCleanup(void)
++{
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ {
++ if (g_LinuxMemAreaCount) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "%s: BUG!: There are %d LinuxMemArea allocation unfreed (%ld bytes)",
++ __FUNCTION__, g_LinuxMemAreaCount,
++ g_LinuxMemAreaWaterMark));
++ }
++
++ List_DEBUG_LINUX_MEM_AREA_REC_ForEach(g_LinuxMemAreaRecords,
++ LinuxMMCleanup_MemAreas_ForEachCb);
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++ RemoveProcEntrySeq(g_SeqFileMemArea);
++#else
++ RemoveProcEntry("mem_areas");
++#endif
++ }
++#endif
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ {
++
++ List_DEBUG_MEM_ALLOC_REC_ForEach(g_MemoryRecords,
++ LinuxMMCleanup_MemRecords_ForEachVa);
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++ RemoveProcEntrySeq(g_SeqFileMemoryRecords);
++#else
++ RemoveProcEntry("meminfo");
++#endif
++
++ }
++#endif
++
++ if (psLinuxMemAreaCache) {
++ KMemCacheDestroyWrapper(psLinuxMemAreaCache);
++ psLinuxMemAreaCache = NULL;
++ }
++}
++
++void *_KMallocWrapper(u32 ui32ByteSize, char *pszFileName, u32 ui32Line)
++{
++ void *pvRet;
++ pvRet = kmalloc(ui32ByteSize, GFP_KERNEL);
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ if (pvRet) {
++ DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_KMALLOC,
++ pvRet,
++ pvRet,
++ 0,
++ NULL,
++ ui32ByteSize, pszFileName, ui32Line);
++ }
++#endif
++ return pvRet;
++}
++
++void _KFreeWrapper(void *pvCpuVAddr, char *pszFileName, u32 ui32Line)
++{
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_KMALLOC, pvCpuVAddr,
++ pszFileName, ui32Line);
++#endif
++ kfree(pvCpuVAddr);
++}
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++static void
++DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE eAllocType,
++ void *pvKey,
++ void *pvCpuVAddr,
++ u32 ulCpuPAddr,
++ void *pvPrivateData,
++ u32 ui32Bytes, char *pszFileName, u32 ui32Line)
++{
++ DEBUG_MEM_ALLOC_REC *psRecord;
++
++ mutex_lock(&g_sDebugMutex);
++
++ psRecord = kmalloc(sizeof(DEBUG_MEM_ALLOC_REC), GFP_KERNEL);
++
++ psRecord->eAllocType = eAllocType;
++ psRecord->pvKey = pvKey;
++ psRecord->pvCpuVAddr = pvCpuVAddr;
++ psRecord->ulCpuPAddr = ulCpuPAddr;
++ psRecord->pvPrivateData = pvPrivateData;
++ psRecord->pid = current->pid;
++ psRecord->ui32Bytes = ui32Bytes;
++ psRecord->pszFileName = pszFileName;
++ psRecord->ui32Line = ui32Line;
++
++ List_DEBUG_MEM_ALLOC_REC_Insert(&g_MemoryRecords, psRecord);
++
++ g_WaterMarkData[eAllocType] += ui32Bytes;
++ if (g_WaterMarkData[eAllocType] > g_HighWaterMarkData[eAllocType]) {
++ g_HighWaterMarkData[eAllocType] = g_WaterMarkData[eAllocType];
++ }
++
++ if (eAllocType == DEBUG_MEM_ALLOC_TYPE_KMALLOC
++ || eAllocType == DEBUG_MEM_ALLOC_TYPE_VMALLOC
++ || eAllocType == DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES
++ || eAllocType == DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE) {
++ g_SysRAMWaterMark += ui32Bytes;
++ if (g_SysRAMWaterMark > g_SysRAMHighWaterMark) {
++ g_SysRAMHighWaterMark = g_SysRAMWaterMark;
++ }
++ } else if (eAllocType == DEBUG_MEM_ALLOC_TYPE_IOREMAP
++ || eAllocType == DEBUG_MEM_ALLOC_TYPE_IO) {
++ g_IOMemWaterMark += ui32Bytes;
++ if (g_IOMemWaterMark > g_IOMemHighWaterMark) {
++ g_IOMemHighWaterMark = g_IOMemWaterMark;
++ }
++ }
++
++ mutex_unlock(&g_sDebugMutex);
++}
++
++int DebugMemAllocRecordRemove_AnyVaCb(DEBUG_MEM_ALLOC_REC * psCurrentRecord,
++ va_list va)
++{
++ DEBUG_MEM_ALLOC_TYPE eAllocType;
++ void *pvKey;
++
++ eAllocType = va_arg(va, DEBUG_MEM_ALLOC_TYPE);
++ pvKey = va_arg(va, void *);
++
++ if (psCurrentRecord->eAllocType == eAllocType
++ && psCurrentRecord->pvKey == pvKey) {
++ eAllocType = psCurrentRecord->eAllocType;
++ g_WaterMarkData[eAllocType] -= psCurrentRecord->ui32Bytes;
++
++ if (eAllocType == DEBUG_MEM_ALLOC_TYPE_KMALLOC
++ || eAllocType == DEBUG_MEM_ALLOC_TYPE_VMALLOC
++ || eAllocType == DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES
++ || eAllocType == DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE) {
++ g_SysRAMWaterMark -= psCurrentRecord->ui32Bytes;
++ } else if (eAllocType == DEBUG_MEM_ALLOC_TYPE_IOREMAP
++ || eAllocType == DEBUG_MEM_ALLOC_TYPE_IO) {
++ g_IOMemWaterMark -= psCurrentRecord->ui32Bytes;
++ }
++
++ List_DEBUG_MEM_ALLOC_REC_Remove(psCurrentRecord);
++ kfree(psCurrentRecord);
++
++ return 1;
++ } else {
++ return 0;
++ }
++}
++
++static void
++DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE eAllocType, void *pvKey,
++ char *pszFileName, u32 ui32Line)
++{
++ mutex_lock(&g_sDebugMutex);
++
++ if (!List_DEBUG_MEM_ALLOC_REC_int_Any_va(g_MemoryRecords,
++ DebugMemAllocRecordRemove_AnyVaCb,
++ eAllocType, pvKey)) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "%s: couldn't find an entry for type=%s with pvKey=%p (called from %s, line %d\n",
++ __FUNCTION__,
++ DebugMemAllocRecordTypeToString(eAllocType), pvKey,
++ pszFileName, ui32Line));
++ }
++
++ mutex_unlock(&g_sDebugMutex);
++}
++
++static char *DebugMemAllocRecordTypeToString(DEBUG_MEM_ALLOC_TYPE eAllocType)
++{
++ char *apszDebugMemoryRecordTypes[] = {
++ "KMALLOC",
++ "VMALLOC",
++ "ALLOC_PAGES",
++ "IOREMAP",
++ "IO",
++ "KMEM_CACHE_ALLOC"
++ };
++ return apszDebugMemoryRecordTypes[eAllocType];
++}
++#endif
++
++void *_VMallocWrapper(u32 ui32Bytes,
++ u32 ui32AllocFlags, char *pszFileName, u32 ui32Line)
++{
++ pgprot_t PGProtFlags;
++ void *pvRet;
++
++ switch (ui32AllocFlags & PVRSRV_HAP_CACHETYPE_MASK) {
++ case PVRSRV_HAP_CACHED:
++ PGProtFlags = PAGE_KERNEL;
++ break;
++ case PVRSRV_HAP_WRITECOMBINE:
++ PGProtFlags = PGPROT_WC(PAGE_KERNEL);
++ break;
++ case PVRSRV_HAP_UNCACHED:
++ PGProtFlags = PGPROT_UC(PAGE_KERNEL);
++ break;
++ default:
++ PVR_DPF((PVR_DBG_ERROR,
++ "VMAllocWrapper: unknown mapping flags=0x%08lx",
++ ui32AllocFlags));
++ dump_stack();
++ return NULL;
++ }
++
++ pvRet = __vmalloc(ui32Bytes, GFP_KERNEL | __GFP_HIGHMEM, PGProtFlags);
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ if (pvRet) {
++ DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_VMALLOC,
++ pvRet,
++ pvRet,
++ 0,
++ NULL,
++ PAGE_ALIGN(ui32Bytes),
++ pszFileName, ui32Line);
++ }
++#endif
++
++ return pvRet;
++}
++
++void _VFreeWrapper(void *pvCpuVAddr, char *pszFileName, u32 ui32Line)
++{
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_VMALLOC, pvCpuVAddr,
++ pszFileName, ui32Line);
++#endif
++ vfree(pvCpuVAddr);
++}
++
++LinuxMemArea *NewVMallocLinuxMemArea(u32 ui32Bytes, u32 ui32AreaFlags)
++{
++ LinuxMemArea *psLinuxMemArea;
++ void *pvCpuVAddr;
++
++ psLinuxMemArea = LinuxMemAreaStructAlloc();
++ if (!psLinuxMemArea) {
++ goto failed;
++ }
++
++ pvCpuVAddr = VMallocWrapper(ui32Bytes, ui32AreaFlags);
++ if (!pvCpuVAddr) {
++ goto failed;
++ }
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
++
++ ReservePages(pvCpuVAddr, ui32Bytes);
++#endif
++
++ psLinuxMemArea->eAreaType = LINUX_MEM_AREA_VMALLOC;
++ psLinuxMemArea->uData.sVmalloc.pvVmallocAddress = pvCpuVAddr;
++ psLinuxMemArea->ui32ByteSize = ui32Bytes;
++ psLinuxMemArea->ui32AreaFlags = ui32AreaFlags;
++ psLinuxMemArea->bMMapRegistered = 0;
++ INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags);
++#endif
++
++ return psLinuxMemArea;
++
++failed:
++ PVR_DPF((PVR_DBG_ERROR, "%s: failed!", __FUNCTION__));
++ if (psLinuxMemArea)
++ LinuxMemAreaStructFree(psLinuxMemArea);
++ return NULL;
++}
++
++void FreeVMallocLinuxMemArea(LinuxMemArea * psLinuxMemArea)
++{
++ PVR_ASSERT(psLinuxMemArea);
++ PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_VMALLOC);
++ PVR_ASSERT(psLinuxMemArea->uData.sVmalloc.pvVmallocAddress);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
++#endif
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
++ UnreservePages(psLinuxMemArea->uData.sVmalloc.pvVmallocAddress,
++ psLinuxMemArea->ui32ByteSize);
++#endif
++
++ PVR_DPF((PVR_DBG_MESSAGE, "%s: pvCpuVAddr: %p",
++ __FUNCTION__,
++ psLinuxMemArea->uData.sVmalloc.pvVmallocAddress));
++ VFreeWrapper(psLinuxMemArea->uData.sVmalloc.pvVmallocAddress);
++
++ LinuxMemAreaStructFree(psLinuxMemArea);
++}
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
++static void ReservePages(void *pvAddress, u32 ui32Length)
++{
++ void *pvPage;
++ void *pvEnd = pvAddress + ui32Length;
++
++ for (pvPage = pvAddress; pvPage < pvEnd; pvPage += PAGE_SIZE) {
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0))
++ SetPageReserved(vmalloc_to_page(pvPage));
++#else
++ mem_map_reserve(vmalloc_to_page(pvPage));
++#endif
++ }
++}
++
++static void UnreservePages(void *pvAddress, u32 ui32Length)
++{
++ void *pvPage;
++ void *pvEnd = pvAddress + ui32Length;
++
++ for (pvPage = pvAddress; pvPage < pvEnd; pvPage += PAGE_SIZE) {
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0))
++ ClearPageReserved(vmalloc_to_page(pvPage));
++#else
++ mem_map_unreserve(vmalloc_to_page(pvPage));
++#endif
++ }
++}
++#endif
++
++void *_IORemapWrapper(IMG_CPU_PHYADDR BasePAddr,
++ u32 ui32Bytes,
++ u32 ui32MappingFlags, char *pszFileName, u32 ui32Line)
++{
++ void *pvIORemapCookie;
++
++ switch (ui32MappingFlags & PVRSRV_HAP_CACHETYPE_MASK) {
++ case PVRSRV_HAP_CACHED:
++ pvIORemapCookie = (void *)IOREMAP(BasePAddr.uiAddr, ui32Bytes);
++ break;
++ case PVRSRV_HAP_WRITECOMBINE:
++ pvIORemapCookie =
++ (void *)IOREMAP_WC(BasePAddr.uiAddr, ui32Bytes);
++ break;
++ case PVRSRV_HAP_UNCACHED:
++ pvIORemapCookie =
++ (void *)IOREMAP_UC(BasePAddr.uiAddr, ui32Bytes);
++ break;
++ default:
++ PVR_DPF((PVR_DBG_ERROR,
++ "IORemapWrapper: unknown mapping flags"));
++ return NULL;
++ }
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ if (pvIORemapCookie) {
++ DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_IOREMAP,
++ pvIORemapCookie,
++ pvIORemapCookie,
++ BasePAddr.uiAddr,
++ NULL, ui32Bytes, pszFileName, ui32Line);
++ }
++#endif
++
++ return pvIORemapCookie;
++}
++
++void _IOUnmapWrapper(void *pvIORemapCookie, char *pszFileName, u32 ui32Line)
++{
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_IOREMAP, pvIORemapCookie,
++ pszFileName, ui32Line);
++#endif
++ iounmap(pvIORemapCookie);
++}
++
++LinuxMemArea *NewIORemapLinuxMemArea(IMG_CPU_PHYADDR BasePAddr,
++ u32 ui32Bytes, u32 ui32AreaFlags)
++{
++ LinuxMemArea *psLinuxMemArea;
++ void *pvIORemapCookie;
++
++ psLinuxMemArea = LinuxMemAreaStructAlloc();
++ if (!psLinuxMemArea) {
++ return NULL;
++ }
++
++ pvIORemapCookie = IORemapWrapper(BasePAddr, ui32Bytes, ui32AreaFlags);
++ if (!pvIORemapCookie) {
++ LinuxMemAreaStructFree(psLinuxMemArea);
++ return NULL;
++ }
++
++ psLinuxMemArea->eAreaType = LINUX_MEM_AREA_IOREMAP;
++ psLinuxMemArea->uData.sIORemap.pvIORemapCookie = pvIORemapCookie;
++ psLinuxMemArea->uData.sIORemap.CPUPhysAddr = BasePAddr;
++ psLinuxMemArea->ui32ByteSize = ui32Bytes;
++ psLinuxMemArea->ui32AreaFlags = ui32AreaFlags;
++ psLinuxMemArea->bMMapRegistered = 0;
++ INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags);
++#endif
++
++ return psLinuxMemArea;
++}
++
++void FreeIORemapLinuxMemArea(LinuxMemArea * psLinuxMemArea)
++{
++ PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_IOREMAP);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
++#endif
++
++ IOUnmapWrapper(psLinuxMemArea->uData.sIORemap.pvIORemapCookie);
++
++ LinuxMemAreaStructFree(psLinuxMemArea);
++}
++
++static int
++TreatExternalPagesAsContiguous(IMG_SYS_PHYADDR * psSysPhysAddr, u32 ui32Bytes,
++ int bPhysContig)
++{
++ u32 ui32;
++ u32 ui32AddrChk;
++ u32 ui32NumPages = RANGE_TO_PAGES(ui32Bytes);
++
++ for (ui32 = 0, ui32AddrChk = psSysPhysAddr[0].uiAddr;
++ ui32 < ui32NumPages;
++ ui32++, ui32AddrChk =
++ (bPhysContig) ? (ui32AddrChk +
++ PAGE_SIZE) : psSysPhysAddr[ui32].uiAddr) {
++ if (!pfn_valid(PHYS_TO_PFN(ui32AddrChk))) {
++ break;
++ }
++ }
++ if (ui32 == ui32NumPages) {
++ return 0;
++ }
++
++ if (!bPhysContig) {
++ for (ui32 = 0, ui32AddrChk = psSysPhysAddr[0].uiAddr;
++ ui32 < ui32NumPages; ui32++, ui32AddrChk += PAGE_SIZE) {
++ if (psSysPhysAddr[ui32].uiAddr != ui32AddrChk) {
++ return 0;
++ }
++ }
++ }
++
++ return 1;
++}
++
++LinuxMemArea *NewExternalKVLinuxMemArea(IMG_SYS_PHYADDR * pBasePAddr,
++ void *pvCPUVAddr, u32 ui32Bytes,
++ int bPhysContig, u32 ui32AreaFlags)
++{
++ LinuxMemArea *psLinuxMemArea;
++
++ psLinuxMemArea = LinuxMemAreaStructAlloc();
++ if (!psLinuxMemArea) {
++ return NULL;
++ }
++
++ psLinuxMemArea->eAreaType = LINUX_MEM_AREA_EXTERNAL_KV;
++ psLinuxMemArea->uData.sExternalKV.pvExternalKV = pvCPUVAddr;
++ psLinuxMemArea->uData.sExternalKV.bPhysContig = (int)(bPhysContig
++ ||
++ TreatExternalPagesAsContiguous
++ (pBasePAddr,
++ ui32Bytes,
++ bPhysContig));
++
++ if (psLinuxMemArea->uData.sExternalKV.bPhysContig) {
++ psLinuxMemArea->uData.sExternalKV.uPhysAddr.SysPhysAddr =
++ *pBasePAddr;
++ } else {
++ psLinuxMemArea->uData.sExternalKV.uPhysAddr.pSysPhysAddr =
++ pBasePAddr;
++ }
++ psLinuxMemArea->ui32ByteSize = ui32Bytes;
++ psLinuxMemArea->ui32AreaFlags = ui32AreaFlags;
++ psLinuxMemArea->bMMapRegistered = 0;
++ INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags);
++#endif
++
++ return psLinuxMemArea;
++}
++
++void FreeExternalKVLinuxMemArea(LinuxMemArea * psLinuxMemArea)
++{
++ PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_EXTERNAL_KV);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
++#endif
++
++ LinuxMemAreaStructFree(psLinuxMemArea);
++}
++
++LinuxMemArea *NewIOLinuxMemArea(IMG_CPU_PHYADDR BasePAddr,
++ u32 ui32Bytes, u32 ui32AreaFlags)
++{
++ LinuxMemArea *psLinuxMemArea = LinuxMemAreaStructAlloc();
++ if (!psLinuxMemArea) {
++ return NULL;
++ }
++
++ psLinuxMemArea->eAreaType = LINUX_MEM_AREA_IO;
++ psLinuxMemArea->uData.sIO.CPUPhysAddr.uiAddr = BasePAddr.uiAddr;
++ psLinuxMemArea->ui32ByteSize = ui32Bytes;
++ psLinuxMemArea->ui32AreaFlags = ui32AreaFlags;
++ psLinuxMemArea->bMMapRegistered = 0;
++ INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList);
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_IO,
++ (void *)BasePAddr.uiAddr,
++ 0,
++ BasePAddr.uiAddr, NULL, ui32Bytes, "unknown", 0);
++#endif
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags);
++#endif
++
++ return psLinuxMemArea;
++}
++
++void FreeIOLinuxMemArea(LinuxMemArea * psLinuxMemArea)
++{
++ PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_IO);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
++#endif
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_IO,
++ (void *)psLinuxMemArea->uData.sIO.CPUPhysAddr.
++ uiAddr, __FILE__, __LINE__);
++#endif
++
++ LinuxMemAreaStructFree(psLinuxMemArea);
++}
++
++LinuxMemArea *NewAllocPagesLinuxMemArea(u32 ui32Bytes, u32 ui32AreaFlags)
++{
++ LinuxMemArea *psLinuxMemArea;
++ u32 ui32PageCount;
++ struct page **pvPageList;
++ void *hBlockPageList;
++ s32 i;
++ PVRSRV_ERROR eError;
++
++ psLinuxMemArea = LinuxMemAreaStructAlloc();
++ if (!psLinuxMemArea) {
++ goto failed_area_alloc;
++ }
++
++ ui32PageCount = RANGE_TO_PAGES(ui32Bytes);
++ eError =
++ OSAllocMem(0, sizeof(*pvPageList) * ui32PageCount,
++ (void **)&pvPageList, &hBlockPageList, "Array of pages");
++ if (eError != PVRSRV_OK) {
++ goto failed_page_list_alloc;
++ }
++
++ for (i = 0; i < (s32) ui32PageCount; i++) {
++ pvPageList[i] = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, 0);
++ if (!pvPageList[i]) {
++ goto failed_alloc_pages;
++ }
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0))
++ SetPageReserved(pvPageList[i]);
++#else
++ mem_map_reserve(pvPageList[i]);
++#endif
++#endif
++
++ }
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES,
++ pvPageList,
++ 0, 0, NULL, PAGE_ALIGN(ui32Bytes), "unknown", 0);
++#endif
++
++ psLinuxMemArea->eAreaType = LINUX_MEM_AREA_ALLOC_PAGES;
++ psLinuxMemArea->uData.sPageList.pvPageList = pvPageList;
++ psLinuxMemArea->uData.sPageList.hBlockPageList = hBlockPageList;
++ psLinuxMemArea->ui32ByteSize = ui32Bytes;
++ psLinuxMemArea->ui32AreaFlags = ui32AreaFlags;
++ psLinuxMemArea->bMMapRegistered = 0;
++ INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags);
++#endif
++
++ return psLinuxMemArea;
++
++failed_alloc_pages:
++ for (i--; i >= 0; i--) {
++ __free_pages(pvPageList[i], 0);
++ }
++ (void)OSFreeMem(0, sizeof(*pvPageList) * ui32PageCount, pvPageList,
++ hBlockPageList);
++ psLinuxMemArea->uData.sPageList.pvPageList = NULL;
++failed_page_list_alloc:
++ LinuxMemAreaStructFree(psLinuxMemArea);
++failed_area_alloc:
++ PVR_DPF((PVR_DBG_ERROR, "%s: failed", __FUNCTION__));
++
++ return NULL;
++}
++
++void FreeAllocPagesLinuxMemArea(LinuxMemArea * psLinuxMemArea)
++{
++ u32 ui32PageCount;
++ struct page **pvPageList;
++ void *hBlockPageList;
++ s32 i;
++
++ PVR_ASSERT(psLinuxMemArea);
++ PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_ALLOC_PAGES);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
++#endif
++
++ ui32PageCount = RANGE_TO_PAGES(psLinuxMemArea->ui32ByteSize);
++ pvPageList = psLinuxMemArea->uData.sPageList.pvPageList;
++ hBlockPageList = psLinuxMemArea->uData.sPageList.hBlockPageList;
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES, pvPageList,
++ __FILE__, __LINE__);
++#endif
++
++ for (i = 0; i < (s32) ui32PageCount; i++) {
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0))
++ ClearPageReserved(pvPageList[i]);
++#else
++ mem_map_reserve(pvPageList[i]);
++#endif
++#endif
++ __free_pages(pvPageList[i], 0);
++ }
++
++ (void)OSFreeMem(0, sizeof(*pvPageList) * ui32PageCount, pvPageList,
++ hBlockPageList);
++ psLinuxMemArea->uData.sPageList.pvPageList = NULL;
++
++ LinuxMemAreaStructFree(psLinuxMemArea);
++}
++
++struct page *LinuxMemAreaOffsetToPage(LinuxMemArea * psLinuxMemArea,
++ u32 ui32ByteOffset)
++{
++ u32 ui32PageIndex;
++ char *pui8Addr;
++
++ switch (psLinuxMemArea->eAreaType) {
++ case LINUX_MEM_AREA_ALLOC_PAGES:
++ ui32PageIndex = PHYS_TO_PFN(ui32ByteOffset);
++ return psLinuxMemArea->uData.sPageList.
++ pvPageList[ui32PageIndex];
++
++ case LINUX_MEM_AREA_VMALLOC:
++ pui8Addr = psLinuxMemArea->uData.sVmalloc.pvVmallocAddress;
++ pui8Addr += ui32ByteOffset;
++ return vmalloc_to_page(pui8Addr);
++
++ case LINUX_MEM_AREA_SUB_ALLOC:
++
++ return LinuxMemAreaOffsetToPage(psLinuxMemArea->uData.sSubAlloc.
++ psParentLinuxMemArea,
++ psLinuxMemArea->uData.sSubAlloc.
++ ui32ByteOffset +
++ ui32ByteOffset);
++ default:
++ PVR_DPF((PVR_DBG_ERROR,
++ "%s: Unsupported request for struct page from LinuxMemArea with type=%s",
++ __FUNCTION__,
++ LinuxMemAreaTypeToString(psLinuxMemArea->eAreaType)));
++ return NULL;
++ }
++}
++
++LinuxKMemCache *KMemCacheCreateWrapper(char *pszName,
++ size_t Size, size_t Align, u32 ui32Flags)
++{
++#if defined(DEBUG_LINUX_SLAB_ALLOCATIONS)
++ ui32Flags |= SLAB_POISON | SLAB_RED_ZONE;
++#endif
++ return kmem_cache_create(pszName, Size, Align, ui32Flags, NULL
++#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,22))
++ , NULL
++#endif
++ );
++}
++
++void KMemCacheDestroyWrapper(LinuxKMemCache * psCache)
++{
++ kmem_cache_destroy(psCache);
++}
++
++void *_KMemCacheAllocWrapper(LinuxKMemCache * psCache,
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14))
++ gfp_t Flags,
++#else
++ int Flags,
++#endif
++ char *pszFileName, u32 ui32Line)
++{
++ void *pvRet;
++
++ pvRet = kmem_cache_alloc(psCache, Flags);
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE,
++ pvRet,
++ pvRet,
++ 0,
++ psCache,
++ kmem_cache_size(psCache), pszFileName, ui32Line);
++#endif
++
++ return pvRet;
++}
++
++void
++_KMemCacheFreeWrapper(LinuxKMemCache * psCache, void *pvObject,
++ char *pszFileName, u32 ui32Line)
++{
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE, pvObject,
++ pszFileName, ui32Line);
++#endif
++
++ kmem_cache_free(psCache, pvObject);
++}
++
++const char *KMemCacheNameWrapper(LinuxKMemCache * psCache)
++{
++ return "";
++}
++
++LinuxMemArea *NewSubLinuxMemArea(LinuxMemArea * psParentLinuxMemArea,
++ u32 ui32ByteOffset, u32 ui32Bytes)
++{
++ LinuxMemArea *psLinuxMemArea;
++
++ PVR_ASSERT((ui32ByteOffset + ui32Bytes) <=
++ psParentLinuxMemArea->ui32ByteSize);
++
++ psLinuxMemArea = LinuxMemAreaStructAlloc();
++ if (!psLinuxMemArea) {
++ return NULL;
++ }
++
++ psLinuxMemArea->eAreaType = LINUX_MEM_AREA_SUB_ALLOC;
++ psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea =
++ psParentLinuxMemArea;
++ psLinuxMemArea->uData.sSubAlloc.ui32ByteOffset = ui32ByteOffset;
++ psLinuxMemArea->ui32ByteSize = ui32Bytes;
++ psLinuxMemArea->ui32AreaFlags = psParentLinuxMemArea->ui32AreaFlags;
++ psLinuxMemArea->bMMapRegistered = 0;
++ INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ {
++ DEBUG_LINUX_MEM_AREA_REC *psParentRecord;
++ psParentRecord =
++ DebugLinuxMemAreaRecordFind(psParentLinuxMemArea);
++ DebugLinuxMemAreaRecordAdd(psLinuxMemArea,
++ psParentRecord->ui32Flags);
++ }
++#endif
++
++ return psLinuxMemArea;
++}
++
++void FreeSubLinuxMemArea(LinuxMemArea * psLinuxMemArea)
++{
++ PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_SUB_ALLOC);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
++#endif
++
++ LinuxMemAreaStructFree(psLinuxMemArea);
++}
++
++static LinuxMemArea *LinuxMemAreaStructAlloc(void)
++{
++#if 0
++ LinuxMemArea *psLinuxMemArea;
++ psLinuxMemArea = kmem_cache_alloc(psLinuxMemAreaCache, GFP_KERNEL);
++ printk(KERN_ERR "%s: psLinuxMemArea=%p\n", __FUNCTION__,
++ psLinuxMemArea);
++ dump_stack();
++ return psLinuxMemArea;
++#else
++ return KMemCacheAllocWrapper(psLinuxMemAreaCache, GFP_KERNEL);
++#endif
++}
++
++static void LinuxMemAreaStructFree(LinuxMemArea * psLinuxMemArea)
++{
++ KMemCacheFreeWrapper(psLinuxMemAreaCache, psLinuxMemArea);
++
++}
++
++void LinuxMemAreaDeepFree(LinuxMemArea * psLinuxMemArea)
++{
++ switch (psLinuxMemArea->eAreaType) {
++ case LINUX_MEM_AREA_VMALLOC:
++ FreeVMallocLinuxMemArea(psLinuxMemArea);
++ break;
++ case LINUX_MEM_AREA_ALLOC_PAGES:
++ FreeAllocPagesLinuxMemArea(psLinuxMemArea);
++ break;
++ case LINUX_MEM_AREA_IOREMAP:
++ FreeIORemapLinuxMemArea(psLinuxMemArea);
++ break;
++ case LINUX_MEM_AREA_EXTERNAL_KV:
++ FreeExternalKVLinuxMemArea(psLinuxMemArea);
++ break;
++ case LINUX_MEM_AREA_IO:
++ FreeIOLinuxMemArea(psLinuxMemArea);
++ break;
++ case LINUX_MEM_AREA_SUB_ALLOC:
++ FreeSubLinuxMemArea(psLinuxMemArea);
++ break;
++ default:
++ PVR_DPF((PVR_DBG_ERROR, "%s: Unknown are type (%d)\n",
++ __FUNCTION__, psLinuxMemArea->eAreaType));
++ break;
++ }
++}
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++static void
++DebugLinuxMemAreaRecordAdd(LinuxMemArea * psLinuxMemArea, u32 ui32Flags)
++{
++ DEBUG_LINUX_MEM_AREA_REC *psNewRecord;
++ const char *pi8FlagsString;
++
++ mutex_lock(&g_sDebugMutex);
++
++ if (psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC) {
++ g_LinuxMemAreaWaterMark += psLinuxMemArea->ui32ByteSize;
++ if (g_LinuxMemAreaWaterMark > g_LinuxMemAreaHighWaterMark) {
++ g_LinuxMemAreaHighWaterMark = g_LinuxMemAreaWaterMark;
++ }
++ }
++ g_LinuxMemAreaCount++;
++
++ psNewRecord = kmalloc(sizeof(DEBUG_LINUX_MEM_AREA_REC), GFP_KERNEL);
++ if (psNewRecord) {
++
++ psNewRecord->psLinuxMemArea = psLinuxMemArea;
++ psNewRecord->ui32Flags = ui32Flags;
++ psNewRecord->pid = current->pid;
++
++ List_DEBUG_LINUX_MEM_AREA_REC_Insert(&g_LinuxMemAreaRecords,
++ psNewRecord);
++ } else {
++ PVR_DPF((PVR_DBG_ERROR,
++ "%s: failed to allocate linux memory area record.",
++ __FUNCTION__));
++ }
++
++ pi8FlagsString = HAPFlagsToString(ui32Flags);
++ if (strstr(pi8FlagsString, "UNKNOWN")) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "%s: Unexpected flags (0x%08lx) associated with psLinuxMemArea @ 0x%08lx",
++ __FUNCTION__, ui32Flags, psLinuxMemArea));
++
++ }
++
++ mutex_unlock(&g_sDebugMutex);
++}
++
++void *MatchLinuxMemArea_AnyVaCb(DEBUG_LINUX_MEM_AREA_REC * psCurrentRecord,
++ va_list va)
++{
++ LinuxMemArea *psLinuxMemArea;
++
++ psLinuxMemArea = va_arg(va, LinuxMemArea *);
++ if (psCurrentRecord->psLinuxMemArea == psLinuxMemArea) {
++ return psCurrentRecord;
++ } else {
++ return NULL;
++ }
++}
++
++static DEBUG_LINUX_MEM_AREA_REC *DebugLinuxMemAreaRecordFind(LinuxMemArea *
++ psLinuxMemArea)
++{
++ DEBUG_LINUX_MEM_AREA_REC *psCurrentRecord;
++
++ mutex_lock(&g_sDebugMutex);
++ psCurrentRecord =
++ List_DEBUG_LINUX_MEM_AREA_REC_Any_va(g_LinuxMemAreaRecords,
++ MatchLinuxMemArea_AnyVaCb,
++ psLinuxMemArea);
++
++ mutex_unlock(&g_sDebugMutex);
++
++ return psCurrentRecord;
++}
++
++static void DebugLinuxMemAreaRecordRemove(LinuxMemArea * psLinuxMemArea)
++{
++ DEBUG_LINUX_MEM_AREA_REC *psCurrentRecord;
++
++ mutex_lock(&g_sDebugMutex);
++
++ if (psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC) {
++ g_LinuxMemAreaWaterMark -= psLinuxMemArea->ui32ByteSize;
++ }
++ g_LinuxMemAreaCount--;
++
++ psCurrentRecord =
++ List_DEBUG_LINUX_MEM_AREA_REC_Any_va(g_LinuxMemAreaRecords,
++ MatchLinuxMemArea_AnyVaCb,
++ psLinuxMemArea);
++ if (psCurrentRecord) {
++
++ List_DEBUG_LINUX_MEM_AREA_REC_Remove(psCurrentRecord);
++ kfree(psCurrentRecord);
++ } else {
++ PVR_DPF((PVR_DBG_ERROR,
++ "%s: couldn't find an entry for psLinuxMemArea=%p\n",
++ __FUNCTION__, psLinuxMemArea));
++ }
++
++ mutex_unlock(&g_sDebugMutex);
++}
++#endif
++
++void *LinuxMemAreaToCpuVAddr(LinuxMemArea * psLinuxMemArea)
++{
++ switch (psLinuxMemArea->eAreaType) {
++ case LINUX_MEM_AREA_VMALLOC:
++ return psLinuxMemArea->uData.sVmalloc.pvVmallocAddress;
++ case LINUX_MEM_AREA_IOREMAP:
++ return psLinuxMemArea->uData.sIORemap.pvIORemapCookie;
++ case LINUX_MEM_AREA_EXTERNAL_KV:
++ return psLinuxMemArea->uData.sExternalKV.pvExternalKV;
++ case LINUX_MEM_AREA_SUB_ALLOC:
++ {
++ char *pAddr =
++ LinuxMemAreaToCpuVAddr(psLinuxMemArea->uData.
++ sSubAlloc.
++ psParentLinuxMemArea);
++ if (!pAddr) {
++ return NULL;
++ }
++ return pAddr +
++ psLinuxMemArea->uData.sSubAlloc.ui32ByteOffset;
++ }
++ default:
++ return NULL;
++ }
++}
++
++IMG_CPU_PHYADDR
++LinuxMemAreaToCpuPAddr(LinuxMemArea * psLinuxMemArea, u32 ui32ByteOffset)
++{
++ IMG_CPU_PHYADDR CpuPAddr;
++
++ CpuPAddr.uiAddr = 0;
++
++ switch (psLinuxMemArea->eAreaType) {
++ case LINUX_MEM_AREA_IOREMAP:
++ {
++ CpuPAddr = psLinuxMemArea->uData.sIORemap.CPUPhysAddr;
++ CpuPAddr.uiAddr += ui32ByteOffset;
++ break;
++ }
++ case LINUX_MEM_AREA_EXTERNAL_KV:
++ {
++ if (psLinuxMemArea->uData.sExternalKV.bPhysContig) {
++ CpuPAddr =
++ SysSysPAddrToCpuPAddr(psLinuxMemArea->uData.
++ sExternalKV.uPhysAddr.
++ SysPhysAddr);
++ CpuPAddr.uiAddr += ui32ByteOffset;
++ } else {
++ u32 ui32PageIndex = PHYS_TO_PFN(ui32ByteOffset);
++ IMG_SYS_PHYADDR SysPAddr =
++ psLinuxMemArea->uData.sExternalKV.uPhysAddr.
++ pSysPhysAddr[ui32PageIndex];
++
++ CpuPAddr = SysSysPAddrToCpuPAddr(SysPAddr);
++ CpuPAddr.uiAddr +=
++ ADDR_TO_PAGE_OFFSET(ui32ByteOffset);
++ }
++ break;
++ }
++ case LINUX_MEM_AREA_IO:
++ {
++ CpuPAddr = psLinuxMemArea->uData.sIO.CPUPhysAddr;
++ CpuPAddr.uiAddr += ui32ByteOffset;
++ break;
++ }
++ case LINUX_MEM_AREA_VMALLOC:
++ {
++ char *pCpuVAddr;
++ pCpuVAddr =
++ (char *)psLinuxMemArea->uData.sVmalloc.
++ pvVmallocAddress;
++ pCpuVAddr += ui32ByteOffset;
++ CpuPAddr.uiAddr = VMallocToPhys(pCpuVAddr);
++ break;
++ }
++ case LINUX_MEM_AREA_ALLOC_PAGES:
++ {
++ struct page *page;
++ u32 ui32PageIndex = PHYS_TO_PFN(ui32ByteOffset);
++ page =
++ psLinuxMemArea->uData.sPageList.
++ pvPageList[ui32PageIndex];
++ CpuPAddr.uiAddr = page_to_phys(page);
++ CpuPAddr.uiAddr += ADDR_TO_PAGE_OFFSET(ui32ByteOffset);
++ break;
++ }
++ case LINUX_MEM_AREA_SUB_ALLOC:
++ {
++ CpuPAddr =
++ OSMemHandleToCpuPAddr(psLinuxMemArea->uData.
++ sSubAlloc.
++ psParentLinuxMemArea,
++ psLinuxMemArea->uData.
++ sSubAlloc.ui32ByteOffset +
++ ui32ByteOffset);
++ break;
++ }
++ default:
++ PVR_DPF((PVR_DBG_ERROR, "%s: Unknown LinuxMemArea type (%d)\n",
++ __FUNCTION__, psLinuxMemArea->eAreaType));
++ break;
++ }
++
++ PVR_ASSERT(CpuPAddr.uiAddr);
++ return CpuPAddr;
++}
++
++int LinuxMemAreaPhysIsContig(LinuxMemArea * psLinuxMemArea)
++{
++ switch (psLinuxMemArea->eAreaType) {
++ case LINUX_MEM_AREA_IOREMAP:
++ case LINUX_MEM_AREA_IO:
++ return 1;
++
++ case LINUX_MEM_AREA_EXTERNAL_KV:
++ return psLinuxMemArea->uData.sExternalKV.bPhysContig;
++
++ case LINUX_MEM_AREA_VMALLOC:
++ case LINUX_MEM_AREA_ALLOC_PAGES:
++ return 0;
++
++ case LINUX_MEM_AREA_SUB_ALLOC:
++
++ return LinuxMemAreaPhysIsContig(psLinuxMemArea->uData.sSubAlloc.
++ psParentLinuxMemArea);
++
++ default:
++ PVR_DPF((PVR_DBG_ERROR, "%s: Unknown LinuxMemArea type (%d)\n",
++ __FUNCTION__, psLinuxMemArea->eAreaType));
++ break;
++ }
++ return 0;
++}
++
++const char *LinuxMemAreaTypeToString(LINUX_MEM_AREA_TYPE eMemAreaType)
++{
++
++ switch (eMemAreaType) {
++ case LINUX_MEM_AREA_IOREMAP:
++ return "LINUX_MEM_AREA_IOREMAP";
++ case LINUX_MEM_AREA_EXTERNAL_KV:
++ return "LINUX_MEM_AREA_EXTERNAL_KV";
++ case LINUX_MEM_AREA_IO:
++ return "LINUX_MEM_AREA_IO";
++ case LINUX_MEM_AREA_VMALLOC:
++ return "LINUX_MEM_AREA_VMALLOC";
++ case LINUX_MEM_AREA_SUB_ALLOC:
++ return "LINUX_MEM_AREA_SUB_ALLOC";
++ case LINUX_MEM_AREA_ALLOC_PAGES:
++ return "LINUX_MEM_AREA_ALLOC_PAGES";
++ default:
++ PVR_ASSERT(0);
++ }
++
++ return "";
++}
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++#if defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++static void ProcSeqStartstopDebugMutex(struct seq_file *sfile, int start)
++{
++ if (start) {
++ mutex_lock(&g_sDebugMutex);
++ } else {
++ mutex_unlock(&g_sDebugMutex);
++ }
++}
++#endif
++#endif
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++
++void *DecOffMemAreaRec_AnyVaCb(DEBUG_LINUX_MEM_AREA_REC * psNode, va_list va)
++{
++ off_t *pOff = va_arg(va, off_t *);
++ if (--(*pOff)) {
++ return NULL;
++ } else {
++ return psNode;
++ }
++}
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++
++static void *ProcSeqNextMemArea(struct seq_file *sfile, void *el, loff_t off)
++{
++ DEBUG_LINUX_MEM_AREA_REC *psRecord;
++ psRecord = (DEBUG_LINUX_MEM_AREA_REC *)
++ List_DEBUG_LINUX_MEM_AREA_REC_Any_va(g_LinuxMemAreaRecords,
++ DecOffMemAreaRec_AnyVaCb,
++ &off);
++ return (void *)psRecord;
++}
++
++static void *ProcSeqOff2ElementMemArea(struct seq_file *sfile, loff_t off)
++{
++ DEBUG_LINUX_MEM_AREA_REC *psRecord;
++ if (!off) {
++ return PVR_PROC_SEQ_START_TOKEN;
++ }
++
++ psRecord = (DEBUG_LINUX_MEM_AREA_REC *)
++ List_DEBUG_LINUX_MEM_AREA_REC_Any_va(g_LinuxMemAreaRecords,
++ DecOffMemAreaRec_AnyVaCb,
++ &off);
++ return (void *)psRecord;
++}
++
++static void ProcSeqShowMemArea(struct seq_file *sfile, void *el)
++{
++ DEBUG_LINUX_MEM_AREA_REC *psRecord = (DEBUG_LINUX_MEM_AREA_REC *) el;
++ if (el == PVR_PROC_SEQ_START_TOKEN) {
++
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++ seq_printf(sfile,
++ "Number of Linux Memory Areas: %lu\n"
++ "At the current water mark these areas correspond to %lu bytes (excluding SUB areas)\n"
++ "At the highest water mark these areas corresponded to %lu bytes (excluding SUB areas)\n"
++ "\nDetails for all Linux Memory Areas:\n"
++ "%s %-24s %s %s %-8s %-5s %s\n",
++ g_LinuxMemAreaCount,
++ g_LinuxMemAreaWaterMark,
++ g_LinuxMemAreaHighWaterMark,
++ "psLinuxMemArea",
++ "LinuxMemType",
++ "CpuVAddr", "CpuPAddr", "Bytes", "Pid", "Flags");
++#else
++ seq_printf(sfile,
++ "<mem_areas_header>\n"
++ "\t<count>%lu</count>\n"
++ "\t<watermark key=\"mar0\" description=\"current\" bytes=\"%lu\"/>\n"
++ "\t<watermark key=\"mar1\" description=\"high\" bytes=\"%lu\"/>\n"
++ "</mem_areas_header>\n",
++ g_LinuxMemAreaCount,
++ g_LinuxMemAreaWaterMark,
++ g_LinuxMemAreaHighWaterMark);
++#endif
++ return;
++ }
++
++ seq_printf(sfile,
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++ "%8p %-24s %8p %08lx %-8ld %-5u %08lx=(%s)\n",
++#else
++ "<linux_mem_area>\n"
++ "\t<pointer>%8p</pointer>\n"
++ "\t<type>%s</type>\n"
++ "\t<cpu_virtual>%8p</cpu_virtual>\n"
++ "\t<cpu_physical>%08lx</cpu_physical>\n"
++ "\t<bytes>%ld</bytes>\n"
++ "\t<pid>%u</pid>\n"
++ "\t<flags>%08lx</flags>\n"
++ "\t<flags_string>%s</flags_string>\n" "</linux_mem_area>\n",
++#endif
++ psRecord->psLinuxMemArea,
++ LinuxMemAreaTypeToString(psRecord->psLinuxMemArea->
++ eAreaType),
++ LinuxMemAreaToCpuVAddr(psRecord->psLinuxMemArea),
++ LinuxMemAreaToCpuPAddr(psRecord->psLinuxMemArea, 0).uiAddr,
++ psRecord->psLinuxMemArea->ui32ByteSize, psRecord->pid,
++ psRecord->ui32Flags, HAPFlagsToString(psRecord->ui32Flags)
++ );
++
++}
++
++#else
++
++static off_t printLinuxMemAreaRecords(char *buffer, size_t count, off_t off)
++{
++ DEBUG_LINUX_MEM_AREA_REC *psRecord;
++ off_t Ret;
++
++ mutex_lock(&g_sDebugMutex);
++
++ if (!off) {
++ if (count < 500) {
++ Ret = 0;
++ goto unlock_and_return;
++ }
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++ Ret = printAppend(buffer, count, 0,
++ "Number of Linux Memory Areas: %lu\n"
++ "At the current water mark these areas correspond to %lu bytes (excluding SUB areas)\n"
++ "At the highest water mark these areas corresponded to %lu bytes (excluding SUB areas)\n"
++ "\nDetails for all Linux Memory Areas:\n"
++ "%s %-24s %s %s %-8s %-5s %s\n",
++ g_LinuxMemAreaCount,
++ g_LinuxMemAreaWaterMark,
++ g_LinuxMemAreaHighWaterMark,
++ "psLinuxMemArea",
++ "LinuxMemType",
++ "CpuVAddr",
++ "CpuPAddr", "Bytes", "Pid", "Flags");
++#else
++ Ret = printAppend(buffer, count, 0,
++ "<mem_areas_header>\n"
++ "\t<count>%lu</count>\n"
++ "\t<watermark key=\"mar0\" description=\"current\" bytes=\"%lu\"/>\n"
++ "\t<watermark key=\"mar1\" description=\"high\" bytes=\"%lu\"/>\n"
++ "</mem_areas_header>\n",
++ g_LinuxMemAreaCount,
++ g_LinuxMemAreaWaterMark,
++ g_LinuxMemAreaHighWaterMark);
++#endif
++ goto unlock_and_return;
++ }
++
++ psRecord = (DEBUG_LINUX_MEM_AREA_REC *)
++ List_DEBUG_LINUX_MEM_AREA_REC_Any_va(g_LinuxMemAreaRecords,
++ DecOffMemAreaRec_AnyVaCb,
++ &off);
++
++ if (!psRecord) {
++ Ret = END_OF_FILE;
++ goto unlock_and_return;
++ }
++
++ if (count < 500) {
++ Ret = 0;
++ goto unlock_and_return;
++ }
++
++ Ret = printAppend(buffer, count, 0,
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++ "%8p %-24s %8p %08lx %-8ld %-5u %08lx=(%s)\n",
++#else
++ "<linux_mem_area>\n"
++ "\t<pointer>%8p</pointer>\n"
++ "\t<type>%s</type>\n"
++ "\t<cpu_virtual>%8p</cpu_virtual>\n"
++ "\t<cpu_physical>%08lx</cpu_physical>\n"
++ "\t<bytes>%ld</bytes>\n"
++ "\t<pid>%u</pid>\n"
++ "\t<flags>%08lx</flags>\n"
++ "\t<flags_string>%s</flags_string>\n"
++ "</linux_mem_area>\n",
++#endif
++ psRecord->psLinuxMemArea,
++ LinuxMemAreaTypeToString(psRecord->psLinuxMemArea->
++ eAreaType),
++ LinuxMemAreaToCpuVAddr(psRecord->psLinuxMemArea),
++ LinuxMemAreaToCpuPAddr(psRecord->psLinuxMemArea,
++ 0).uiAddr,
++ psRecord->psLinuxMemArea->ui32ByteSize, psRecord->pid,
++ psRecord->ui32Flags,
++ HAPFlagsToString(psRecord->ui32Flags)
++ );
++
++unlock_and_return:
++ mutex_unlock(&g_sDebugMutex);
++ return Ret;
++}
++#endif
++
++#endif
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++
++void *DecOffMemAllocRec_AnyVaCb(DEBUG_MEM_ALLOC_REC * psNode, va_list va)
++{
++ off_t *pOff = va_arg(va, off_t *);
++ if (--(*pOff)) {
++ return NULL;
++ } else {
++ return psNode;
++ }
++}
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++
++static void *ProcSeqNextMemoryRecords(struct seq_file *sfile, void *el,
++ loff_t off)
++{
++ DEBUG_MEM_ALLOC_REC *psRecord;
++ psRecord = (DEBUG_MEM_ALLOC_REC *)
++ List_DEBUG_MEM_ALLOC_REC_Any_va(g_MemoryRecords,
++ DecOffMemAllocRec_AnyVaCb, &off);
++#if defined(DEBUG_LINUX_XML_PROC_FILES)
++ if (!psRecord) {
++ seq_printf(sfile, "</meminfo>\n");
++ }
++#endif
++
++ return (void *)psRecord;
++}
++
++static void *ProcSeqOff2ElementMemoryRecords(struct seq_file *sfile, loff_t off)
++{
++ DEBUG_MEM_ALLOC_REC *psRecord;
++ if (!off) {
++ return PVR_PROC_SEQ_START_TOKEN;
++ }
++
++ psRecord = (DEBUG_MEM_ALLOC_REC *)
++ List_DEBUG_MEM_ALLOC_REC_Any_va(g_MemoryRecords,
++ DecOffMemAllocRec_AnyVaCb, &off);
++
++#if defined(DEBUG_LINUX_XML_PROC_FILES)
++ if (!psRecord) {
++ seq_printf(sfile, "</meminfo>\n");
++ }
++#endif
++
++ return (void *)psRecord;
++}
++
++static void ProcSeqShowMemoryRecords(struct seq_file *sfile, void *el)
++{
++ DEBUG_MEM_ALLOC_REC *psRecord = (DEBUG_MEM_ALLOC_REC *) el;
++ if (el == PVR_PROC_SEQ_START_TOKEN) {
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++
++ seq_printf(sfile, "%-60s: %ld bytes\n",
++ "Current Water Mark of bytes allocated via kmalloc",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMALLOC]);
++ seq_printf(sfile, "%-60s: %ld bytes\n",
++ "Highest Water Mark of bytes allocated via kmalloc",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMALLOC]);
++ seq_printf(sfile, "%-60s: %ld bytes\n",
++ "Current Water Mark of bytes allocated via vmalloc",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]);
++ seq_printf(sfile, "%-60s: %ld bytes\n",
++ "Highest Water Mark of bytes allocated via vmalloc",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]);
++ seq_printf(sfile, "%-60s: %ld bytes\n",
++ "Current Water Mark of bytes allocated via alloc_pages",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]);
++ seq_printf(sfile, "%-60s: %ld bytes\n",
++ "Highest Water Mark of bytes allocated via alloc_pages",
++ g_HighWaterMarkData
++ [DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]);
++ seq_printf(sfile, "%-60s: %ld bytes\n",
++ "Current Water Mark of bytes allocated via ioremap",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]);
++ seq_printf(sfile, "%-60s: %ld bytes\n",
++ "Highest Water Mark of bytes allocated via ioremap",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]);
++ seq_printf(sfile, "%-60s: %ld bytes\n",
++ "Current Water Mark of bytes reserved for \"IO\" memory areas",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]);
++ seq_printf(sfile, "%-60s: %ld bytes\n",
++ "Highest Water Mark of bytes allocated for \"IO\" memory areas",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]);
++ seq_printf(sfile, "%-60s: %ld bytes\n",
++ "Current Water Mark of bytes allocated via kmem_cache_alloc",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]);
++ seq_printf(sfile, "%-60s: %ld bytes\n",
++ "Highest Water Mark of bytes allocated via kmem_cache_alloc",
++ g_HighWaterMarkData
++ [DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]);
++ seq_printf(sfile, "\n");
++
++ seq_printf(sfile, "%-60s: %ld bytes\n",
++ "The Current Water Mark for memory allocated from system RAM",
++ g_SysRAMWaterMark);
++ seq_printf(sfile, "%-60s: %ld bytes\n",
++ "The Highest Water Mark for memory allocated from system RAM",
++ g_SysRAMHighWaterMark);
++ seq_printf(sfile, "%-60s: %ld bytes\n",
++ "The Current Water Mark for memory allocated from IO memory",
++ g_IOMemWaterMark);
++ seq_printf(sfile, "%-60s: %ld bytes\n",
++ "The Highest Water Mark for memory allocated from IO memory",
++ g_IOMemHighWaterMark);
++
++ seq_printf(sfile, "\n");
++
++ seq_printf(sfile, "Details for all known allocations:\n"
++ "%-16s %-8s %-8s %-10s %-5s %-10s %s\n",
++ "Type",
++ "CpuVAddr",
++ "CpuPAddr",
++ "Bytes", "PID", "PrivateData", "Filename:Line");
++
++#else
++
++ seq_printf(sfile, "<meminfo>\n<meminfo_header>\n");
++ seq_printf(sfile,
++ "<watermark key=\"mr0\" description=\"kmalloc_current\" bytes=\"%ld\"/>\n",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMALLOC]);
++ seq_printf(sfile,
++ "<watermark key=\"mr1\" description=\"kmalloc_high\" bytes=\"%ld\"/>\n",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMALLOC]);
++ seq_printf(sfile,
++ "<watermark key=\"mr2\" description=\"vmalloc_current\" bytes=\"%ld\"/>\n",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]);
++ seq_printf(sfile,
++ "<watermark key=\"mr3\" description=\"vmalloc_high\" bytes=\"%ld\"/>\n",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]);
++ seq_printf(sfile,
++ "<watermark key=\"mr4\" description=\"alloc_pages_current\" bytes=\"%ld\"/>\n",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]);
++ seq_printf(sfile,
++ "<watermark key=\"mr5\" description=\"alloc_pages_high\" bytes=\"%ld\"/>\n",
++ g_HighWaterMarkData
++ [DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]);
++ seq_printf(sfile,
++ "<watermark key=\"mr6\" description=\"ioremap_current\" bytes=\"%ld\"/>\n",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]);
++ seq_printf(sfile,
++ "<watermark key=\"mr7\" description=\"ioremap_high\" bytes=\"%ld\"/>\n",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]);
++ seq_printf(sfile,
++ "<watermark key=\"mr8\" description=\"io_current\" bytes=\"%ld\"/>\n",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]);
++ seq_printf(sfile,
++ "<watermark key=\"mr9\" description=\"io_high\" bytes=\"%ld\"/>\n",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]);
++ seq_printf(sfile,
++ "<watermark key=\"mr10\" description=\"kmem_cache_current\" bytes=\"%ld\"/>\n",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]);
++ seq_printf(sfile,
++ "<watermark key=\"mr11\" description=\"kmem_cache_high\" bytes=\"%ld\"/>\n",
++ g_HighWaterMarkData
++ [DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]);
++ seq_printf(sfile, "\n");
++
++ seq_printf(sfile,
++ "<watermark key=\"mr14\" description=\"system_ram_current\" bytes=\"%ld\"/>\n",
++ g_SysRAMWaterMark);
++ seq_printf(sfile,
++ "<watermark key=\"mr15\" description=\"system_ram_high\" bytes=\"%ld\"/>\n",
++ g_SysRAMHighWaterMark);
++ seq_printf(sfile,
++ "<watermark key=\"mr16\" description=\"system_io_current\" bytes=\"%ld\"/>\n",
++ g_IOMemWaterMark);
++ seq_printf(sfile,
++ "<watermark key=\"mr17\" description=\"system_io_high\" bytes=\"%ld\"/>\n",
++ g_IOMemHighWaterMark);
++
++ seq_printf(sfile, "</meminfo_header>\n");
++
++#endif
++ return;
++ }
++
++ if (psRecord->eAllocType != DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE) {
++ seq_printf(sfile,
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++ "%-16s %-8p %08lx %-10ld %-5d %-10s %s:%ld\n",
++#else
++ "<allocation>\n"
++ "\t<type>%s</type>\n"
++ "\t<cpu_virtual>%-8p</cpu_virtual>\n"
++ "\t<cpu_physical>%08lx</cpu_physical>\n"
++ "\t<bytes>%ld</bytes>\n"
++ "\t<pid>%d</pid>\n"
++ "\t<private>%s</private>\n"
++ "\t<filename>%s</filename>\n"
++ "\t<line>%ld</line>\n" "</allocation>\n",
++#endif
++ DebugMemAllocRecordTypeToString(psRecord->
++ eAllocType),
++ psRecord->pvCpuVAddr, psRecord->ulCpuPAddr,
++ psRecord->ui32Bytes, psRecord->pid, "NULL",
++ psRecord->pszFileName, psRecord->ui32Line);
++ } else {
++ seq_printf(sfile,
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++ "%-16s %-8p %08lx %-10ld %-5d %-10s %s:%ld\n",
++#else
++ "<allocation>\n"
++ "\t<type>%s</type>\n"
++ "\t<cpu_virtual>%-8p</cpu_virtual>\n"
++ "\t<cpu_physical>%08lx</cpu_physical>\n"
++ "\t<bytes>%ld</bytes>\n"
++ "\t<pid>%d</pid>\n"
++ "\t<private>%s</private>\n"
++ "\t<filename>%s</filename>\n"
++ "\t<line>%ld</line>\n" "</allocation>\n",
++#endif
++ DebugMemAllocRecordTypeToString(psRecord->
++ eAllocType),
++ psRecord->pvCpuVAddr, psRecord->ulCpuPAddr,
++ psRecord->ui32Bytes, psRecord->pid,
++ KMemCacheNameWrapper(psRecord->pvPrivateData),
++ psRecord->pszFileName, psRecord->ui32Line);
++ }
++}
++
++#else
++
++static off_t printMemoryRecords(char *buffer, size_t count, off_t off)
++{
++ DEBUG_MEM_ALLOC_REC *psRecord;
++ off_t Ret;
++
++ mutex_lock(&g_sDebugMutex);
++
++ if (!off) {
++ if (count < 1000) {
++ Ret = 0;
++ goto unlock_and_return;
++ }
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++
++ Ret = printAppend(buffer, count, 0, "%-60s: %ld bytes\n",
++ "Current Water Mark of bytes allocated via kmalloc",
++ g_WaterMarkData
++ [DEBUG_MEM_ALLOC_TYPE_KMALLOC]);
++ Ret =
++ printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "Highest Water Mark of bytes allocated via kmalloc",
++ g_HighWaterMarkData
++ [DEBUG_MEM_ALLOC_TYPE_KMALLOC]);
++ Ret =
++ printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "Current Water Mark of bytes allocated via vmalloc",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]);
++ Ret =
++ printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "Highest Water Mark of bytes allocated via vmalloc",
++ g_HighWaterMarkData
++ [DEBUG_MEM_ALLOC_TYPE_VMALLOC]);
++ Ret =
++ printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "Current Water Mark of bytes allocated via alloc_pages",
++ g_WaterMarkData
++ [DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]);
++ Ret =
++ printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "Highest Water Mark of bytes allocated via alloc_pages",
++ g_HighWaterMarkData
++ [DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]);
++ Ret =
++ printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "Current Water Mark of bytes allocated via ioremap",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]);
++ Ret =
++ printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "Highest Water Mark of bytes allocated via ioremap",
++ g_HighWaterMarkData
++ [DEBUG_MEM_ALLOC_TYPE_IOREMAP]);
++ Ret =
++ printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "Current Water Mark of bytes reserved for \"IO\" memory areas",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]);
++ Ret =
++ printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "Highest Water Mark of bytes allocated for \"IO\" memory areas",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]);
++ Ret =
++ printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "Current Water Mark of bytes allocated via kmem_cache_alloc",
++ g_WaterMarkData
++ [DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]);
++ Ret =
++ printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "Highest Water Mark of bytes allocated via kmem_cache_alloc",
++ g_HighWaterMarkData
++ [DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]);
++ Ret = printAppend(buffer, count, Ret, "\n");
++
++ Ret = printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "The Current Water Mark for memory allocated from system RAM",
++ g_SysRAMWaterMark);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "The Highest Water Mark for memory allocated from system RAM",
++ g_SysRAMHighWaterMark);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "The Current Water Mark for memory allocated from IO memory",
++ g_IOMemWaterMark);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "The Highest Water Mark for memory allocated from IO memory",
++ g_IOMemHighWaterMark);
++
++ Ret = printAppend(buffer, count, Ret, "\n");
++
++ Ret =
++ printAppend(buffer, count, Ret,
++ "Details for all known allocations:\n"
++ "%-16s %-8s %-8s %-10s %-5s %-10s %s\n", "Type",
++ "CpuVAddr", "CpuPAddr", "Bytes", "PID",
++ "PrivateData", "Filename:Line");
++
++#else
++
++ Ret =
++ printAppend(buffer, count, 0,
++ "<meminfo>\n<meminfo_header>\n");
++ Ret =
++ printAppend(buffer, count, Ret,
++ "<watermark key=\"mr0\" description=\"kmalloc_current\" bytes=\"%ld\"/>\n",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMALLOC]);
++ Ret =
++ printAppend(buffer, count, Ret,
++ "<watermark key=\"mr1\" description=\"kmalloc_high\" bytes=\"%ld\"/>\n",
++ g_HighWaterMarkData
++ [DEBUG_MEM_ALLOC_TYPE_KMALLOC]);
++ Ret =
++ printAppend(buffer, count, Ret,
++ "<watermark key=\"mr2\" description=\"vmalloc_current\" bytes=\"%ld\"/>\n",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]);
++ Ret =
++ printAppend(buffer, count, Ret,
++ "<watermark key=\"mr3\" description=\"vmalloc_high\" bytes=\"%ld\"/>\n",
++ g_HighWaterMarkData
++ [DEBUG_MEM_ALLOC_TYPE_VMALLOC]);
++ Ret =
++ printAppend(buffer, count, Ret,
++ "<watermark key=\"mr4\" description=\"alloc_pages_current\" bytes=\"%ld\"/>\n",
++ g_WaterMarkData
++ [DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]);
++ Ret =
++ printAppend(buffer, count, Ret,
++ "<watermark key=\"mr5\" description=\"alloc_pages_high\" bytes=\"%ld\"/>\n",
++ g_HighWaterMarkData
++ [DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]);
++ Ret =
++ printAppend(buffer, count, Ret,
++ "<watermark key=\"mr6\" description=\"ioremap_current\" bytes=\"%ld\"/>\n",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]);
++ Ret =
++ printAppend(buffer, count, Ret,
++ "<watermark key=\"mr7\" description=\"ioremap_high\" bytes=\"%ld\"/>\n",
++ g_HighWaterMarkData
++ [DEBUG_MEM_ALLOC_TYPE_IOREMAP]);
++ Ret =
++ printAppend(buffer, count, Ret,
++ "<watermark key=\"mr8\" description=\"io_current\" bytes=\"%ld\"/>\n",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]);
++ Ret =
++ printAppend(buffer, count, Ret,
++ "<watermark key=\"mr9\" description=\"io_high\" bytes=\"%ld\"/>\n",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]);
++ Ret =
++ printAppend(buffer, count, Ret,
++ "<watermark key=\"mr10\" description=\"kmem_cache_current\" bytes=\"%ld\"/>\n",
++ g_WaterMarkData
++ [DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]);
++ Ret =
++ printAppend(buffer, count, Ret,
++ "<watermark key=\"mr11\" description=\"kmem_cache_high\" bytes=\"%ld\"/>\n",
++ g_HighWaterMarkData
++ [DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]);
++ Ret = printAppend(buffer, count, Ret, "\n");
++
++ Ret = printAppend(buffer, count, Ret,
++ "<watermark key=\"mr14\" description=\"system_ram_current\" bytes=\"%ld\"/>\n",
++ g_SysRAMWaterMark);
++ Ret = printAppend(buffer, count, Ret,
++ "<watermark key=\"mr15\" description=\"system_ram_high\" bytes=\"%ld\"/>\n",
++ g_SysRAMHighWaterMark);
++ Ret = printAppend(buffer, count, Ret,
++ "<watermark key=\"mr16\" description=\"system_io_current\" bytes=\"%ld\"/>\n",
++ g_IOMemWaterMark);
++ Ret = printAppend(buffer, count, Ret,
++ "<watermark key=\"mr17\" description=\"system_io_high\" bytes=\"%ld\"/>\n",
++ g_IOMemHighWaterMark);
++
++ Ret = printAppend(buffer, count, Ret, "</meminfo_header>\n");
++
++#endif
++
++ goto unlock_and_return;
++ }
++
++ if (count < 1000) {
++ Ret = 0;
++ goto unlock_and_return;
++ }
++
++ psRecord = (DEBUG_MEM_ALLOC_REC *)
++ List_DEBUG_MEM_ALLOC_REC_Any_va(g_MemoryRecords,
++ DecOffMemAllocRec_AnyVaCb, &off);
++ if (!psRecord) {
++#if defined(DEBUG_LINUX_XML_PROC_FILES)
++ if (off == 0) {
++ Ret = printAppend(buffer, count, 0, "</meminfo>\n");
++ goto unlock_and_return;
++ }
++#endif
++ Ret = END_OF_FILE;
++ goto unlock_and_return;
++ }
++
++ if (psRecord->eAllocType != DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE) {
++ Ret = printAppend(buffer, count, 0,
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++ "%-16s %-8p %08lx %-10ld %-5d %-10s %s:%ld\n",
++#else
++ "<allocation>\n"
++ "\t<type>%s</type>\n"
++ "\t<cpu_virtual>%-8p</cpu_virtual>\n"
++ "\t<cpu_physical>%08lx</cpu_physical>\n"
++ "\t<bytes>%ld</bytes>\n"
++ "\t<pid>%d</pid>\n"
++ "\t<private>%s</private>\n"
++ "\t<filename>%s</filename>\n"
++ "\t<line>%ld</line>\n" "</allocation>\n",
++#endif
++ DebugMemAllocRecordTypeToString(psRecord->
++ eAllocType),
++ psRecord->pvCpuVAddr, psRecord->ulCpuPAddr,
++ psRecord->ui32Bytes, psRecord->pid, "NULL",
++ psRecord->pszFileName, psRecord->ui32Line);
++ } else {
++ Ret = printAppend(buffer, count, 0,
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++ "%-16s %-8p %08lx %-10ld %-5d %-10s %s:%ld\n",
++#else
++ "<allocation>\n"
++ "\t<type>%s</type>\n"
++ "\t<cpu_virtual>%-8p</cpu_virtual>\n"
++ "\t<cpu_physical>%08lx</cpu_physical>\n"
++ "\t<bytes>%ld</bytes>\n"
++ "\t<pid>%d</pid>\n"
++ "\t<private>%s</private>\n"
++ "\t<filename>%s</filename>\n"
++ "\t<line>%ld</line>\n" "</allocation>\n",
++#endif
++ DebugMemAllocRecordTypeToString(psRecord->
++ eAllocType),
++ psRecord->pvCpuVAddr, psRecord->ulCpuPAddr,
++ psRecord->ui32Bytes, psRecord->pid,
++ KMemCacheNameWrapper(psRecord->pvPrivateData),
++ psRecord->pszFileName, psRecord->ui32Line);
++ }
++
++unlock_and_return:
++ mutex_unlock(&g_sDebugMutex);
++ return Ret;
++}
++#endif
++#endif
++
++#if defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MMAP_AREAS)
++const char *HAPFlagsToString(u32 ui32Flags)
++{
++ static char szFlags[50];
++ s32 i32Pos = 0;
++ u32 ui32CacheTypeIndex, ui32MapTypeIndex;
++ char *apszCacheTypes[] = {
++ "UNCACHED",
++ "CACHED",
++ "WRITECOMBINE",
++ "UNKNOWN"
++ };
++ char *apszMapType[] = {
++ "KERNEL_ONLY",
++ "SINGLE_PROCESS",
++ "MULTI_PROCESS",
++ "FROM_EXISTING_PROCESS",
++ "NO_CPU_VIRTUAL",
++ "UNKNOWN"
++ };
++
++ if (ui32Flags & PVRSRV_HAP_UNCACHED) {
++ ui32CacheTypeIndex = 0;
++ } else if (ui32Flags & PVRSRV_HAP_CACHED) {
++ ui32CacheTypeIndex = 1;
++ } else if (ui32Flags & PVRSRV_HAP_WRITECOMBINE) {
++ ui32CacheTypeIndex = 2;
++ } else {
++ ui32CacheTypeIndex = 3;
++ PVR_DPF((PVR_DBG_ERROR, "%s: unknown cache type (%u)",
++ __FUNCTION__,
++ (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK)));
++ }
++
++ if (ui32Flags & PVRSRV_HAP_KERNEL_ONLY) {
++ ui32MapTypeIndex = 0;
++ } else if (ui32Flags & PVRSRV_HAP_SINGLE_PROCESS) {
++ ui32MapTypeIndex = 1;
++ } else if (ui32Flags & PVRSRV_HAP_MULTI_PROCESS) {
++ ui32MapTypeIndex = 2;
++ } else if (ui32Flags & PVRSRV_HAP_FROM_EXISTING_PROCESS) {
++ ui32MapTypeIndex = 3;
++ } else if (ui32Flags & PVRSRV_HAP_NO_CPU_VIRTUAL) {
++ ui32MapTypeIndex = 4;
++ } else {
++ ui32MapTypeIndex = 5;
++ PVR_DPF((PVR_DBG_ERROR, "%s: unknown map type (%u)",
++ __FUNCTION__, (ui32Flags & PVRSRV_HAP_MAPTYPE_MASK)));
++ }
++
++ i32Pos = sprintf(szFlags, "%s|", apszCacheTypes[ui32CacheTypeIndex]);
++ if (i32Pos <= 0) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "%s: sprintf for cache type %u failed (%d)",
++ __FUNCTION__, ui32CacheTypeIndex, i32Pos));
++ szFlags[0] = 0;
++ } else {
++ sprintf(szFlags + i32Pos, "%s", apszMapType[ui32MapTypeIndex]);
++ }
++
++ return szFlags;
++}
++#endif
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/mm.h
+@@ -0,0 +1,331 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __IMG_LINUX_MM_H__
++#define __IMG_LINUX_MM_H__
++
++#ifndef AUTOCONF_INCLUDED
++ #include <linux/config.h>
++#endif
++
++#include <linux/version.h>
++#include <linux/slab.h>
++#include <linux/mm.h>
++#include <linux/list.h>
++
++#include <asm/io.h>
++
++#define PHYS_TO_PFN(phys) ((phys) >> PAGE_SHIFT)
++#define PFN_TO_PHYS(pfn) ((pfn) << PAGE_SHIFT)
++
++#define RANGE_TO_PAGES(range) (((range) + (PAGE_SIZE - 1)) >> PAGE_SHIFT)
++
++#define ADDR_TO_PAGE_OFFSET(addr) (((unsigned long)(addr)) & (PAGE_SIZE - 1))
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10))
++#define REMAP_PFN_RANGE(vma, addr, pfn, size, prot) remap_pfn_range(vma, addr, pfn, size, prot)
++#else
++#define REMAP_PFN_RANGE(vma, addr, pfn, size, prot) remap_page_range(vma, addr, PFN_TO_PHYS(pfn), size, prot)
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12))
++#define IO_REMAP_PFN_RANGE(vma, addr, pfn, size, prot) io_remap_pfn_range(vma, addr, pfn, size, prot)
++#else
++#define IO_REMAP_PFN_RANGE(vma, addr, pfn, size, prot) io_remap_page_range(vma, addr, PFN_TO_PHYS(pfn), size, prot)
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
++#define VM_INSERT_PAGE(vma, addr, page) vm_insert_page(vma, addr, page)
++#else
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10))
++#define VM_INSERT_PAGE(vma, addr, page) remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE, vma->vm_page_prot);
++#else
++#define VM_INSERT_PAGE(vma, addr, page) remap_page_range(vma, addr, page_to_phys(page), PAGE_SIZE, vma->vm_page_prot);
++#endif
++#endif
++
++static inline u32 VMallocToPhys(void *pCpuVAddr)
++{
++ return (page_to_phys(vmalloc_to_page(pCpuVAddr)) + ADDR_TO_PAGE_OFFSET(pCpuVAddr));
++
++}
++
++typedef enum {
++ LINUX_MEM_AREA_IOREMAP,
++ LINUX_MEM_AREA_EXTERNAL_KV,
++ LINUX_MEM_AREA_IO,
++ LINUX_MEM_AREA_VMALLOC,
++ LINUX_MEM_AREA_ALLOC_PAGES,
++ LINUX_MEM_AREA_SUB_ALLOC,
++ LINUX_MEM_AREA_TYPE_COUNT
++}LINUX_MEM_AREA_TYPE;
++
++typedef struct _LinuxMemArea LinuxMemArea;
++
++
++struct _LinuxMemArea {
++ LINUX_MEM_AREA_TYPE eAreaType;
++ union _uData
++ {
++ struct _sIORemap
++ {
++
++ IMG_CPU_PHYADDR CPUPhysAddr;
++ void *pvIORemapCookie;
++ }sIORemap;
++ struct _sExternalKV
++ {
++
++ int bPhysContig;
++ union {
++
++ IMG_SYS_PHYADDR SysPhysAddr;
++ IMG_SYS_PHYADDR *pSysPhysAddr;
++ } uPhysAddr;
++ void *pvExternalKV;
++ }sExternalKV;
++ struct _sIO
++ {
++
++ IMG_CPU_PHYADDR CPUPhysAddr;
++ }sIO;
++ struct _sVmalloc
++ {
++
++ void *pvVmallocAddress;
++ }sVmalloc;
++ struct _sPageList
++ {
++
++ struct page **pvPageList;
++ void * hBlockPageList;
++ }sPageList;
++ struct _sSubAlloc
++ {
++
++ LinuxMemArea *psParentLinuxMemArea;
++ u32 ui32ByteOffset;
++ }sSubAlloc;
++ }uData;
++
++ u32 ui32ByteSize;
++
++ u32 ui32AreaFlags;
++
++ int bMMapRegistered;
++
++
++ struct list_head sMMapItem;
++
++
++ struct list_head sMMapOffsetStructList;
++};
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17))
++typedef kmem_cache_t LinuxKMemCache;
++#else
++typedef struct kmem_cache LinuxKMemCache;
++#endif
++
++
++PVRSRV_ERROR LinuxMMInit(void);
++
++
++void LinuxMMCleanup(void);
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++#define KMallocWrapper(ui32ByteSize) _KMallocWrapper(ui32ByteSize, __FILE__, __LINE__)
++#else
++#define KMallocWrapper(ui32ByteSize) _KMallocWrapper(ui32ByteSize, NULL, 0)
++#endif
++void *_KMallocWrapper(u32 ui32ByteSize, char *szFileName, u32 ui32Line);
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++#define KFreeWrapper(pvCpuVAddr) _KFreeWrapper(pvCpuVAddr, __FILE__, __LINE__)
++#else
++#define KFreeWrapper(pvCpuVAddr) _KFreeWrapper(pvCpuVAddr, NULL, 0)
++#endif
++void _KFreeWrapper(void *pvCpuVAddr, char *pszFileName, u32 ui32Line);
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++#define VMallocWrapper(ui32Bytes, ui32AllocFlags) _VMallocWrapper(ui32Bytes, ui32AllocFlags, __FILE__, __LINE__)
++#else
++#define VMallocWrapper(ui32Bytes, ui32AllocFlags) _VMallocWrapper(ui32Bytes, ui32AllocFlags, NULL, 0)
++#endif
++void *_VMallocWrapper(u32 ui32Bytes, u32 ui32AllocFlags, char *pszFileName, u32 ui32Line);
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++#define VFreeWrapper(pvCpuVAddr) _VFreeWrapper(pvCpuVAddr, __FILE__, __LINE__)
++#else
++#define VFreeWrapper(pvCpuVAddr) _VFreeWrapper(pvCpuVAddr, NULL, 0)
++#endif
++void _VFreeWrapper(void *pvCpuVAddr, char *pszFileName, u32 ui32Line);
++
++
++LinuxMemArea *NewVMallocLinuxMemArea(u32 ui32Bytes, u32 ui32AreaFlags);
++
++
++void FreeVMallocLinuxMemArea(LinuxMemArea *psLinuxMemArea);
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++#define IORemapWrapper(BasePAddr, ui32Bytes, ui32MappingFlags) \
++ _IORemapWrapper(BasePAddr, ui32Bytes, ui32MappingFlags, __FILE__, __LINE__)
++#else
++#define IORemapWrapper(BasePAddr, ui32Bytes, ui32MappingFlags) \
++ _IORemapWrapper(BasePAddr, ui32Bytes, ui32MappingFlags, NULL, 0)
++#endif
++void *_IORemapWrapper(IMG_CPU_PHYADDR BasePAddr,
++ u32 ui32Bytes,
++ u32 ui32MappingFlags,
++ char *pszFileName,
++ u32 ui32Line);
++
++
++LinuxMemArea *NewIORemapLinuxMemArea(IMG_CPU_PHYADDR BasePAddr, u32 ui32Bytes, u32 ui32AreaFlags);
++
++
++void FreeIORemapLinuxMemArea(LinuxMemArea *psLinuxMemArea);
++
++LinuxMemArea *NewExternalKVLinuxMemArea(IMG_SYS_PHYADDR *pBasePAddr, void *pvCPUVAddr, u32 ui32Bytes, int bPhysContig, u32 ui32AreaFlags);
++
++
++void FreeExternalKVLinuxMemArea(LinuxMemArea *psLinuxMemArea);
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++#define IOUnmapWrapper(pvIORemapCookie) \
++ _IOUnmapWrapper(pvIORemapCookie, __FILE__, __LINE__)
++#else
++#define IOUnmapWrapper(pvIORemapCookie) \
++ _IOUnmapWrapper(pvIORemapCookie, NULL, 0)
++#endif
++void _IOUnmapWrapper(void *pvIORemapCookie, char *pszFileName, u32 ui32Line);
++
++
++struct page *LinuxMemAreaOffsetToPage(LinuxMemArea *psLinuxMemArea, u32 ui32ByteOffset);
++
++
++LinuxKMemCache *KMemCacheCreateWrapper(char *pszName, size_t Size, size_t Align, u32 ui32Flags);
++
++
++void KMemCacheDestroyWrapper(LinuxKMemCache *psCache);
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++#define KMemCacheAllocWrapper(psCache, Flags) _KMemCacheAllocWrapper(psCache, Flags, __FILE__, __LINE__)
++#else
++#define KMemCacheAllocWrapper(psCache, Flags) _KMemCacheAllocWrapper(psCache, Flags, NULL, 0)
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14))
++void *_KMemCacheAllocWrapper(LinuxKMemCache *psCache, gfp_t Flags, char *pszFileName, u32 ui32Line);
++#else
++void *_KMemCacheAllocWrapper(LinuxKMemCache *psCache, int Flags, char *pszFileName, u32 ui32Line);
++#endif
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++#define KMemCacheFreeWrapper(psCache, pvObject) _KMemCacheFreeWrapper(psCache, pvObject, __FILE__, __LINE__)
++#else
++#define KMemCacheFreeWrapper(psCache, pvObject) _KMemCacheFreeWrapper(psCache, pvObject, NULL, 0)
++#endif
++void _KMemCacheFreeWrapper(LinuxKMemCache *psCache, void *pvObject, char *pszFileName, u32 ui32Line);
++
++
++const char *KMemCacheNameWrapper(LinuxKMemCache *psCache);
++
++
++LinuxMemArea *NewIOLinuxMemArea(IMG_CPU_PHYADDR BasePAddr, u32 ui32Bytes, u32 ui32AreaFlags);
++
++
++void FreeIOLinuxMemArea(LinuxMemArea *psLinuxMemArea);
++
++
++LinuxMemArea *NewAllocPagesLinuxMemArea(u32 ui32Bytes, u32 ui32AreaFlags);
++
++
++void FreeAllocPagesLinuxMemArea(LinuxMemArea *psLinuxMemArea);
++
++
++LinuxMemArea *NewSubLinuxMemArea(LinuxMemArea *psParentLinuxMemArea,
++ u32 ui32ByteOffset,
++ u32 ui32Bytes);
++
++
++void LinuxMemAreaDeepFree(LinuxMemArea *psLinuxMemArea);
++
++
++#if defined(LINUX_MEM_AREAS_DEBUG)
++void LinuxMemAreaRegister(LinuxMemArea *psLinuxMemArea);
++#else
++#define LinuxMemAreaRegister(X)
++#endif
++
++
++void *LinuxMemAreaToCpuVAddr(LinuxMemArea *psLinuxMemArea);
++
++
++IMG_CPU_PHYADDR LinuxMemAreaToCpuPAddr(LinuxMemArea *psLinuxMemArea, u32 ui32ByteOffset);
++
++
++#define LinuxMemAreaToCpuPFN(psLinuxMemArea, ui32ByteOffset) PHYS_TO_PFN(LinuxMemAreaToCpuPAddr(psLinuxMemArea, ui32ByteOffset).uiAddr)
++
++int LinuxMemAreaPhysIsContig(LinuxMemArea *psLinuxMemArea);
++
++static inline LinuxMemArea *
++LinuxMemAreaRoot(LinuxMemArea *psLinuxMemArea)
++{
++ if(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_SUB_ALLOC)
++ {
++ return psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea;
++ }
++ else
++ {
++ return psLinuxMemArea;
++ }
++}
++
++
++static inline LINUX_MEM_AREA_TYPE
++LinuxMemAreaRootType(LinuxMemArea *psLinuxMemArea)
++{
++ return LinuxMemAreaRoot(psLinuxMemArea)->eAreaType;
++}
++
++
++const char *LinuxMemAreaTypeToString(LINUX_MEM_AREA_TYPE eMemAreaType);
++
++
++#if defined(DEBUG) || defined(DEBUG_LINUX_MEM_AREAS)
++const char *HAPFlagsToString(u32 ui32Flags);
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/mmap.c
+@@ -0,0 +1,1104 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef AUTOCONF_INCLUDED
++#include <linux/config.h>
++#endif
++
++#include <linux/version.h>
++#include <linux/mm.h>
++#include <linux/module.h>
++#include <linux/vmalloc.h>
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0))
++#include <linux/wrapper.h>
++#endif
++#include <linux/slab.h>
++#include <linux/mutex.h>
++#include <asm/io.h>
++#include <asm/page.h>
++#include <asm/shmparam.h>
++#include <asm/pgtable.h>
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22))
++#include <linux/sched.h>
++#include <asm/current.h>
++#endif
++#if defined(SUPPORT_DRI_DRM)
++#include <drm/drmP.h>
++#endif
++
++
++#include "services.h"
++#include "servicesint.h"
++#include "pvrmmap.h"
++#include "mutils.h"
++#include "mmap.h"
++#include "mm.h"
++#include "pvr_debug.h"
++#include "osfunc.h"
++#include "proc.h"
++#include "handle.h"
++#include "perproc.h"
++#include "env_perproc.h"
++#include "bridged_support.h"
++#if defined(SUPPORT_DRI_DRM)
++#include "pvr_drm.h"
++#endif
++
++#if !defined(PVR_SECURE_HANDLES)
++#error "The mmap code requires PVR_SECURE_HANDLES"
++#endif
++
++static struct mutex g_sMMapMutex;
++
++static LinuxKMemCache *g_psMemmapCache = NULL;
++static LIST_HEAD(g_sMMapAreaList);
++static LIST_HEAD(g_sMMapOffsetStructList);
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++static u32 g_ui32RegisteredAreas = 0;
++static u32 g_ui32TotalByteSize = 0;
++#endif
++
++#if defined(PVR_PROC_USE_SEQ_FILE) && defined(DEBUG_LINUX_MMAP_AREAS)
++static struct proc_dir_entry *g_ProcMMap;
++#endif
++
++#define FIRST_PHYSICAL_PFN 0
++#define LAST_PHYSICAL_PFN 0x7fffffffUL
++#define FIRST_SPECIAL_PFN (LAST_PHYSICAL_PFN + 1)
++#define LAST_SPECIAL_PFN 0xffffffffUL
++
++#define MAX_MMAP_HANDLE 0x7fffffffUL
++
++static inline int PFNIsPhysical(u32 pfn)
++{
++
++ return ((pfn >= FIRST_PHYSICAL_PFN)
++ && (pfn <= LAST_PHYSICAL_PFN)) ? 1 : 0;
++}
++
++static inline int PFNIsSpecial(u32 pfn)
++{
++
++ return ((pfn >= FIRST_SPECIAL_PFN)
++ && (pfn <= LAST_SPECIAL_PFN)) ? 1 : 0;
++}
++
++static inline void *MMapOffsetToHandle(u32 pfn)
++{
++ if (PFNIsPhysical(pfn)) {
++ PVR_ASSERT(PFNIsPhysical(pfn));
++ return NULL;
++ }
++
++ return (void *)(pfn - FIRST_SPECIAL_PFN);
++}
++
++static inline u32 HandleToMMapOffset(void *hHandle)
++{
++ u32 ulHandle = (u32) hHandle;
++
++ if (PFNIsSpecial(ulHandle)) {
++ PVR_ASSERT(PFNIsSpecial(ulHandle));
++ return 0;
++ }
++
++ return ulHandle + FIRST_SPECIAL_PFN;
++}
++
++static inline int LinuxMemAreaUsesPhysicalMap(LinuxMemArea * psLinuxMemArea)
++{
++ return LinuxMemAreaPhysIsContig(psLinuxMemArea);
++}
++
++static inline u32 GetCurrentThreadID(void)
++{
++
++ return (u32) current->pid;
++}
++
++static PKV_OFFSET_STRUCT
++CreateOffsetStruct(LinuxMemArea * psLinuxMemArea, u32 ui32Offset,
++ u32 ui32RealByteSize)
++{
++ PKV_OFFSET_STRUCT psOffsetStruct;
++#if defined(DEBUG) || defined(DEBUG_LINUX_MMAP_AREAS)
++ const char *pszName =
++ LinuxMemAreaTypeToString(LinuxMemAreaRootType(psLinuxMemArea));
++#endif
++
++#if defined(DEBUG) || defined(DEBUG_LINUX_MMAP_AREAS)
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "%s(%s, psLinuxMemArea: 0x%p, ui32AllocFlags: 0x%8lx)",
++ __FUNCTION__, pszName, psLinuxMemArea,
++ psLinuxMemArea->ui32AreaFlags));
++#endif
++
++ PVR_ASSERT(psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC
++ || LinuxMemAreaRoot(psLinuxMemArea)->eAreaType !=
++ LINUX_MEM_AREA_SUB_ALLOC);
++
++ PVR_ASSERT(psLinuxMemArea->bMMapRegistered);
++
++ psOffsetStruct = KMemCacheAllocWrapper(g_psMemmapCache, GFP_KERNEL);
++ if (psOffsetStruct == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRMMapRegisterArea: Couldn't alloc another mapping record from cache"));
++ return NULL;
++ }
++
++ psOffsetStruct->ui32MMapOffset = ui32Offset;
++
++ psOffsetStruct->psLinuxMemArea = psLinuxMemArea;
++
++ psOffsetStruct->ui32Mapped = 0;
++
++ psOffsetStruct->ui32RealByteSize = ui32RealByteSize;
++
++ psOffsetStruct->ui32TID = GetCurrentThreadID();
++
++ psOffsetStruct->ui32PID = OSGetCurrentProcessIDKM();
++
++ psOffsetStruct->bOnMMapList = 0;
++
++ psOffsetStruct->ui32RefCount = 0;
++
++ psOffsetStruct->ui32UserVAddr = 0;
++
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++
++ psOffsetStruct->pszName = pszName;
++#endif
++
++ list_add_tail(&psOffsetStruct->sAreaItem,
++ &psLinuxMemArea->sMMapOffsetStructList);
++
++ return psOffsetStruct;
++}
++
++static void DestroyOffsetStruct(PKV_OFFSET_STRUCT psOffsetStruct)
++{
++ list_del(&psOffsetStruct->sAreaItem);
++
++ if (psOffsetStruct->bOnMMapList) {
++ list_del(&psOffsetStruct->sMMapItem);
++ }
++
++ PVR_DPF((PVR_DBG_MESSAGE, "%s: Table entry: "
++ "psLinuxMemArea=0x%08lX, CpuPAddr=0x%08lX", __FUNCTION__,
++ psOffsetStruct->psLinuxMemArea,
++ LinuxMemAreaToCpuPAddr(psOffsetStruct->psLinuxMemArea, 0)));
++
++ KMemCacheFreeWrapper(g_psMemmapCache, psOffsetStruct);
++}
++
++static inline void
++DetermineUsersSizeAndByteOffset(LinuxMemArea * psLinuxMemArea,
++ u32 * pui32RealByteSize, u32 * pui32ByteOffset)
++{
++ u32 ui32PageAlignmentOffset;
++ IMG_CPU_PHYADDR CpuPAddr;
++
++ CpuPAddr = LinuxMemAreaToCpuPAddr(psLinuxMemArea, 0);
++ ui32PageAlignmentOffset = ADDR_TO_PAGE_OFFSET(CpuPAddr.uiAddr);
++
++ *pui32ByteOffset = ui32PageAlignmentOffset;
++
++ *pui32RealByteSize =
++ PAGE_ALIGN(psLinuxMemArea->ui32ByteSize + ui32PageAlignmentOffset);
++}
++
++PVRSRV_ERROR
++PVRMMapOSMemHandleToMMapData(PVRSRV_PER_PROCESS_DATA * psPerProc,
++ void *hMHandle,
++ u32 * pui32MMapOffset,
++ u32 * pui32ByteOffset,
++ u32 * pui32RealByteSize, u32 * pui32UserVAddr)
++{
++ LinuxMemArea *psLinuxMemArea;
++ PKV_OFFSET_STRUCT psOffsetStruct;
++ void *hOSMemHandle;
++ PVRSRV_ERROR eError;
++
++ mutex_lock(&g_sMMapMutex);
++
++ PVR_ASSERT(PVRSRVGetMaxHandle(psPerProc->psHandleBase) <=
++ MAX_MMAP_HANDLE);
++
++ eError =
++ PVRSRVLookupOSMemHandle(psPerProc->psHandleBase, &hOSMemHandle,
++ hMHandle);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR, "%s: Lookup of handle 0x%lx failed",
++ __FUNCTION__, hMHandle));
++
++ goto exit_unlock;
++ }
++
++ psLinuxMemArea = (LinuxMemArea *) hOSMemHandle;
++
++ DetermineUsersSizeAndByteOffset(psLinuxMemArea,
++ pui32RealByteSize, pui32ByteOffset);
++
++ list_for_each_entry(psOffsetStruct,
++ &psLinuxMemArea->sMMapOffsetStructList, sAreaItem) {
++ if (psPerProc->ui32PID == psOffsetStruct->ui32PID) {
++
++ PVR_ASSERT(*pui32RealByteSize ==
++ psOffsetStruct->ui32RealByteSize);
++
++ *pui32MMapOffset = psOffsetStruct->ui32MMapOffset;
++ *pui32UserVAddr = psOffsetStruct->ui32UserVAddr;
++ psOffsetStruct->ui32RefCount++;
++
++ eError = PVRSRV_OK;
++ goto exit_unlock;
++ }
++ }
++
++ *pui32UserVAddr = 0;
++
++ if (LinuxMemAreaUsesPhysicalMap(psLinuxMemArea)) {
++ *pui32MMapOffset = LinuxMemAreaToCpuPFN(psLinuxMemArea, 0);
++ PVR_ASSERT(PFNIsPhysical(*pui32MMapOffset));
++ } else {
++ *pui32MMapOffset = HandleToMMapOffset(hMHandle);
++ PVR_ASSERT(PFNIsSpecial(*pui32MMapOffset));
++ }
++
++ psOffsetStruct =
++ CreateOffsetStruct(psLinuxMemArea, *pui32MMapOffset,
++ *pui32RealByteSize);
++ if (psOffsetStruct == NULL) {
++ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto exit_unlock;
++ }
++
++ list_add_tail(&psOffsetStruct->sMMapItem, &g_sMMapOffsetStructList);
++
++ psOffsetStruct->bOnMMapList = 1;
++
++ psOffsetStruct->ui32RefCount++;
++
++ eError = PVRSRV_OK;
++
++exit_unlock:
++ mutex_unlock(&g_sMMapMutex);
++
++ return eError;
++}
++
++PVRSRV_ERROR
++PVRMMapReleaseMMapData(PVRSRV_PER_PROCESS_DATA * psPerProc,
++ void *hMHandle,
++ int *pbMUnmap,
++ u32 * pui32RealByteSize, u32 * pui32UserVAddr)
++{
++ LinuxMemArea *psLinuxMemArea;
++ PKV_OFFSET_STRUCT psOffsetStruct;
++ void *hOSMemHandle;
++ PVRSRV_ERROR eError;
++ u32 ui32PID = OSGetCurrentProcessIDKM();
++
++ mutex_lock(&g_sMMapMutex);
++
++ PVR_ASSERT(PVRSRVGetMaxHandle(psPerProc->psHandleBase) <=
++ MAX_MMAP_HANDLE);
++
++ eError =
++ PVRSRVLookupOSMemHandle(psPerProc->psHandleBase, &hOSMemHandle,
++ hMHandle);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR, "%s: Lookup of handle 0x%lx failed",
++ __FUNCTION__, hMHandle));
++
++ goto exit_unlock;
++ }
++
++ psLinuxMemArea = (LinuxMemArea *) hOSMemHandle;
++
++ list_for_each_entry(psOffsetStruct,
++ &psLinuxMemArea->sMMapOffsetStructList, sAreaItem) {
++ if (psOffsetStruct->ui32PID == ui32PID) {
++ if (psOffsetStruct->ui32RefCount == 0) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "%s: Attempt to release mmap data with zero reference count for offset struct 0x%p, memory area 0x%p",
++ __FUNCTION__, psOffsetStruct,
++ psLinuxMemArea));
++ eError = PVRSRV_ERROR_GENERIC;
++ goto exit_unlock;
++ }
++
++ psOffsetStruct->ui32RefCount--;
++
++ *pbMUnmap = (int)((psOffsetStruct->ui32RefCount == 0)
++ && (psOffsetStruct->ui32UserVAddr !=
++ 0));
++
++ *pui32UserVAddr =
++ (*pbMUnmap) ? psOffsetStruct->ui32UserVAddr : 0;
++ *pui32RealByteSize =
++ (*pbMUnmap) ? psOffsetStruct->ui32RealByteSize : 0;
++
++ eError = PVRSRV_OK;
++ goto exit_unlock;
++ }
++ }
++
++ PVR_DPF((PVR_DBG_ERROR,
++ "%s: Mapping data not found for handle 0x%lx (memory area 0x%p)",
++ __FUNCTION__, hMHandle, psLinuxMemArea));
++
++ eError = PVRSRV_ERROR_GENERIC;
++
++exit_unlock:
++ mutex_unlock(&g_sMMapMutex);
++
++ return eError;
++}
++
++static inline PKV_OFFSET_STRUCT
++FindOffsetStructByOffset(u32 ui32Offset, u32 ui32RealByteSize)
++{
++ PKV_OFFSET_STRUCT psOffsetStruct;
++ u32 ui32TID = GetCurrentThreadID();
++ u32 ui32PID = OSGetCurrentProcessIDKM();
++
++ list_for_each_entry(psOffsetStruct, &g_sMMapOffsetStructList, sMMapItem) {
++ if (ui32Offset == psOffsetStruct->ui32MMapOffset
++ && ui32RealByteSize == psOffsetStruct->ui32RealByteSize
++ && psOffsetStruct->ui32PID == ui32PID) {
++
++ if (!PFNIsPhysical(ui32Offset)
++ || psOffsetStruct->ui32TID == ui32TID) {
++ return psOffsetStruct;
++ }
++ }
++ }
++
++ return NULL;
++}
++
++static int
++DoMapToUser(LinuxMemArea * psLinuxMemArea,
++ struct vm_area_struct *ps_vma, u32 ui32ByteOffset)
++{
++ u32 ui32ByteSize;
++
++ if (psLinuxMemArea->eAreaType == LINUX_MEM_AREA_SUB_ALLOC) {
++ return DoMapToUser(LinuxMemAreaRoot(psLinuxMemArea),
++ ps_vma,
++ psLinuxMemArea->uData.sSubAlloc.
++ ui32ByteOffset + ui32ByteOffset);
++ }
++
++ ui32ByteSize = ps_vma->vm_end - ps_vma->vm_start;
++ PVR_ASSERT(ADDR_TO_PAGE_OFFSET(ui32ByteSize) == 0);
++
++#if defined (__sparc__)
++
++#error "SPARC not supported"
++#endif
++
++ if (PFNIsPhysical(ps_vma->vm_pgoff)) {
++ int result;
++
++ PVR_ASSERT(LinuxMemAreaPhysIsContig(psLinuxMemArea));
++ PVR_ASSERT(LinuxMemAreaToCpuPFN(psLinuxMemArea, ui32ByteOffset)
++ == ps_vma->vm_pgoff);
++
++ result =
++ IO_REMAP_PFN_RANGE(ps_vma, ps_vma->vm_start,
++ ps_vma->vm_pgoff, ui32ByteSize,
++ ps_vma->vm_page_prot);
++
++ if (result == 0) {
++ return 1;
++ }
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "%s: Failed to map contiguous physical address range (%d), trying non-contiguous path",
++ __FUNCTION__, result));
++ }
++
++ {
++
++ u32 ulVMAPos;
++ u32 ui32ByteEnd = ui32ByteOffset + ui32ByteSize;
++ u32 ui32PA;
++
++ for (ui32PA = ui32ByteOffset; ui32PA < ui32ByteEnd;
++ ui32PA += PAGE_SIZE) {
++ u32 pfn = LinuxMemAreaToCpuPFN(psLinuxMemArea, ui32PA);
++
++ if (!pfn_valid(pfn)) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "%s: Error - PFN invalid: 0x%lx",
++ __FUNCTION__, pfn));
++ return 0;
++ }
++ }
++
++ ulVMAPos = ps_vma->vm_start;
++ for (ui32PA = ui32ByteOffset; ui32PA < ui32ByteEnd;
++ ui32PA += PAGE_SIZE) {
++ u32 pfn;
++ struct page *psPage;
++ int result;
++
++ pfn = LinuxMemAreaToCpuPFN(psLinuxMemArea, ui32PA);
++ PVR_ASSERT(pfn_valid(pfn));
++
++ psPage = pfn_to_page(pfn);
++
++ result = VM_INSERT_PAGE(ps_vma, ulVMAPos, psPage);
++ if (result != 0) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "%s: Error - VM_INSERT_PAGE failed (%d)",
++ __FUNCTION__, result));
++ return 0;
++ }
++ ulVMAPos += PAGE_SIZE;
++ }
++ }
++
++ return 1;
++}
++
++static void MMapVOpenNoLock(struct vm_area_struct *ps_vma)
++{
++ PKV_OFFSET_STRUCT psOffsetStruct =
++ (PKV_OFFSET_STRUCT) ps_vma->vm_private_data;
++ PVR_ASSERT(psOffsetStruct != NULL)
++ psOffsetStruct->ui32Mapped++;
++ PVR_ASSERT(!psOffsetStruct->bOnMMapList);
++
++ if (psOffsetStruct->ui32Mapped > 1) {
++ PVR_DPF((PVR_DBG_WARNING,
++ "%s: Offset structure 0x%p is being shared across processes (psOffsetStruct->ui32Mapped: %lu)",
++ __FUNCTION__, psOffsetStruct,
++ psOffsetStruct->ui32Mapped));
++ PVR_ASSERT((ps_vma->vm_flags & VM_DONTCOPY) == 0);
++ }
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "%s: psLinuxMemArea 0x%p, KVAddress 0x%p MMapOffset %ld, ui32Mapped %d",
++ __FUNCTION__,
++ psOffsetStruct->psLinuxMemArea,
++ LinuxMemAreaToCpuVAddr(psOffsetStruct->psLinuxMemArea),
++ psOffsetStruct->ui32MMapOffset, psOffsetStruct->ui32Mapped));
++#endif
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0))
++ MOD_INC_USE_COUNT;
++#endif
++}
++
++static void MMapVOpen(struct vm_area_struct *ps_vma)
++{
++ mutex_lock(&g_sMMapMutex);
++
++ MMapVOpenNoLock(ps_vma);
++
++ mutex_unlock(&g_sMMapMutex);
++}
++
++static void MMapVCloseNoLock(struct vm_area_struct *ps_vma)
++{
++ PKV_OFFSET_STRUCT psOffsetStruct =
++ (PKV_OFFSET_STRUCT) ps_vma->vm_private_data;
++ PVR_ASSERT(psOffsetStruct != NULL)
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "%s: psLinuxMemArea 0x%p, CpuVAddr 0x%p ui32MMapOffset %ld, ui32Mapped %d",
++ __FUNCTION__,
++ psOffsetStruct->psLinuxMemArea,
++ LinuxMemAreaToCpuVAddr(psOffsetStruct->psLinuxMemArea),
++ psOffsetStruct->ui32MMapOffset,
++ psOffsetStruct->ui32Mapped));
++#endif
++
++ PVR_ASSERT(!psOffsetStruct->bOnMMapList);
++ psOffsetStruct->ui32Mapped--;
++ if (psOffsetStruct->ui32Mapped == 0) {
++ if (psOffsetStruct->ui32RefCount != 0) {
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "%s: psOffsetStruct 0x%p has non-zero reference count (ui32RefCount = %lu). User mode address of start of mapping: 0x%lx",
++ __FUNCTION__, psOffsetStruct,
++ psOffsetStruct->ui32RefCount,
++ psOffsetStruct->ui32UserVAddr));
++ }
++
++ DestroyOffsetStruct(psOffsetStruct);
++ }
++
++ ps_vma->vm_private_data = NULL;
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0))
++ MOD_DEC_USE_COUNT;
++#endif
++}
++
++static void MMapVClose(struct vm_area_struct *ps_vma)
++{
++ mutex_lock(&g_sMMapMutex);
++
++ MMapVCloseNoLock(ps_vma);
++
++ mutex_unlock(&g_sMMapMutex);
++}
++
++static struct vm_operations_struct MMapIOOps = {
++ .open = MMapVOpen,
++ .close = MMapVClose
++};
++
++int PVRMMap(struct file *pFile, struct vm_area_struct *ps_vma)
++{
++ u32 ui32ByteSize;
++ PKV_OFFSET_STRUCT psOffsetStruct;
++ int iRetVal = 0;
++
++ mutex_lock(&g_sMMapMutex);
++
++ ui32ByteSize = ps_vma->vm_end - ps_vma->vm_start;
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "%s: Received mmap(2) request with ui32MMapOffset 0x%08lx,"
++ " and ui32ByteSize %ld(0x%08lx)", __FUNCTION__,
++ ps_vma->vm_pgoff, ui32ByteSize, ui32ByteSize));
++
++ psOffsetStruct =
++ FindOffsetStructByOffset(ps_vma->vm_pgoff, ui32ByteSize);
++ if (psOffsetStruct == NULL) {
++#if defined(SUPPORT_DRI_DRM)
++ mutex_lock(&g_sMMapMutex);
++
++ return drm_mmap(pFile, ps_vma);
++#else
++ PVR_DPF((PVR_DBG_ERROR,
++ "%s: Attempted to mmap unregistered area at vm_pgoff %ld",
++ __FUNCTION__, ps_vma->vm_pgoff));
++ iRetVal = -EINVAL;
++#endif
++ goto unlock_and_return;
++ }
++ list_del(&psOffsetStruct->sMMapItem);
++ psOffsetStruct->bOnMMapList = 0;
++
++ if (((ps_vma->vm_flags & VM_WRITE) != 0) &&
++ ((ps_vma->vm_flags & VM_SHARED) == 0)) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "%s: Cannot mmap non-shareable writable areas",
++ __FUNCTION__));
++ iRetVal = -EINVAL;
++ goto unlock_and_return;
++ }
++
++ PVR_DPF((PVR_DBG_MESSAGE, "%s: Mapped psLinuxMemArea 0x%p\n",
++ __FUNCTION__, psOffsetStruct->psLinuxMemArea));
++
++ ps_vma->vm_flags |= VM_RESERVED;
++ ps_vma->vm_flags |= VM_IO;
++
++ ps_vma->vm_flags |= VM_DONTEXPAND;
++
++ ps_vma->vm_flags |= VM_DONTCOPY;
++
++ ps_vma->vm_private_data = (void *)psOffsetStruct;
++
++ switch (psOffsetStruct->psLinuxMemArea->
++ ui32AreaFlags & PVRSRV_HAP_CACHETYPE_MASK) {
++ case PVRSRV_HAP_CACHED:
++
++ break;
++ case PVRSRV_HAP_WRITECOMBINE:
++ ps_vma->vm_page_prot = PGPROT_WC(ps_vma->vm_page_prot);
++ break;
++ case PVRSRV_HAP_UNCACHED:
++ ps_vma->vm_page_prot = PGPROT_UC(ps_vma->vm_page_prot);
++ break;
++ default:
++ PVR_DPF((PVR_DBG_ERROR, "%s: unknown cache type",
++ __FUNCTION__));
++ iRetVal = -EINVAL;
++ goto unlock_and_return;
++ }
++
++ ps_vma->vm_ops = &MMapIOOps;
++
++ if (!DoMapToUser(psOffsetStruct->psLinuxMemArea, ps_vma, 0)) {
++ iRetVal = -EAGAIN;
++ goto unlock_and_return;
++ }
++
++ PVR_ASSERT(psOffsetStruct->ui32UserVAddr == 0)
++
++ psOffsetStruct->ui32UserVAddr = ps_vma->vm_start;
++
++ MMapVOpenNoLock(ps_vma);
++
++ PVR_DPF((PVR_DBG_MESSAGE, "%s: Mapped area at offset 0x%08lx\n",
++ __FUNCTION__, ps_vma->vm_pgoff));
++
++unlock_and_return:
++ if (iRetVal != 0 && psOffsetStruct != NULL) {
++ DestroyOffsetStruct(psOffsetStruct);
++ }
++
++ mutex_unlock(&g_sMMapMutex);
++
++ return iRetVal;
++}
++
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++
++static void ProcSeqStartstopMMapRegistations(struct seq_file *sfile, int start)
++{
++ if (start) {
++ mutex_lock(&g_sMMapMutex);
++ } else {
++ mutex_unlock(&g_sMMapMutex);
++ }
++}
++
++static void *ProcSeqOff2ElementMMapRegistrations(struct seq_file *sfile,
++ loff_t off)
++{
++ LinuxMemArea *psLinuxMemArea;
++ if (!off) {
++ return PVR_PROC_SEQ_START_TOKEN;
++ }
++
++ list_for_each_entry(psLinuxMemArea, &g_sMMapAreaList, sMMapItem) {
++ PKV_OFFSET_STRUCT psOffsetStruct;
++
++ list_for_each_entry(psOffsetStruct,
++ &psLinuxMemArea->sMMapOffsetStructList,
++ sAreaItem) {
++ off--;
++ if (off == 0) {
++ PVR_ASSERT(psOffsetStruct->psLinuxMemArea ==
++ psLinuxMemArea);
++ return (void *)psOffsetStruct;
++ }
++ }
++ }
++ return (void *)0;
++}
++
++static void *ProcSeqNextMMapRegistrations(struct seq_file *sfile, void *el,
++ loff_t off)
++{
++ return ProcSeqOff2ElementMMapRegistrations(sfile, off);
++}
++
++static void ProcSeqShowMMapRegistrations(struct seq_file *sfile, void *el)
++{
++ KV_OFFSET_STRUCT *psOffsetStruct = (KV_OFFSET_STRUCT *) el;
++ LinuxMemArea *psLinuxMemArea;
++ u32 ui32RealByteSize;
++ u32 ui32ByteOffset;
++
++ if (el == PVR_PROC_SEQ_START_TOKEN) {
++ seq_printf(sfile,
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++ "Allocations registered for mmap: %lu\n"
++ "In total these areas correspond to %lu bytes\n"
++ "psLinuxMemArea "
++ "UserVAddr "
++ "KernelVAddr "
++ "CpuPAddr "
++ "MMapOffset "
++ "ByteLength "
++ "LinuxMemType " "Pid Name Flags\n",
++#else
++ "<mmap_header>\n"
++ "\t<count>%lu</count>\n"
++ "\t<bytes>%lu</bytes>\n" "</mmap_header>\n",
++#endif
++ g_ui32RegisteredAreas, g_ui32TotalByteSize);
++ return;
++ }
++
++ psLinuxMemArea = psOffsetStruct->psLinuxMemArea;
++
++ DetermineUsersSizeAndByteOffset(psLinuxMemArea,
++ &ui32RealByteSize, &ui32ByteOffset);
++
++ seq_printf(sfile,
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++ "%-8p %08lx %-8p %08lx %08lx %-8ld %-24s %-5lu %-8s %08lx(%s)\n",
++#else
++ "<mmap_record>\n"
++ "\t<pointer>%-8p</pointer>\n"
++ "\t<user_virtual>%-8lx</user_virtual>\n"
++ "\t<kernel_virtual>%-8p</kernel_virtual>\n"
++ "\t<cpu_physical>%08lx</cpu_physical>\n"
++ "\t<mmap_offset>%08lx</mmap_offset>\n"
++ "\t<bytes>%-8ld</bytes>\n"
++ "\t<linux_mem_area_type>%-24s</linux_mem_area_type>\n"
++ "\t<pid>%-5lu</pid>\n"
++ "\t<name>%-8s</name>\n"
++ "\t<flags>%08lx</flags>\n"
++ "\t<flags_string>%s</flags_string>\n" "</mmap_record>\n",
++#endif
++ psLinuxMemArea,
++ psOffsetStruct->ui32UserVAddr + ui32ByteOffset,
++ LinuxMemAreaToCpuVAddr(psLinuxMemArea),
++ LinuxMemAreaToCpuPAddr(psLinuxMemArea, 0).uiAddr,
++ psOffsetStruct->ui32MMapOffset,
++ psLinuxMemArea->ui32ByteSize,
++ LinuxMemAreaTypeToString(psLinuxMemArea->eAreaType),
++ psOffsetStruct->ui32PID,
++ psOffsetStruct->pszName,
++ psLinuxMemArea->ui32AreaFlags,
++ HAPFlagsToString(psLinuxMemArea->ui32AreaFlags));
++}
++
++#else
++
++static off_t PrintMMapRegistrations(char *buffer, size_t size, off_t off)
++{
++ LinuxMemArea *psLinuxMemArea;
++ off_t Ret;
++
++ mutex_lock(&g_sMMapMutex);
++
++ if (!off) {
++ Ret = printAppend(buffer, size, 0,
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++ "Allocations registered for mmap: %lu\n"
++ "In total these areas correspond to %lu bytes\n"
++ "psLinuxMemArea "
++ "UserVAddr "
++ "KernelVAddr "
++ "CpuPAddr "
++ "MMapOffset "
++ "ByteLength "
++ "LinuxMemType "
++ "Pid Name Flags\n",
++#else
++ "<mmap_header>\n"
++ "\t<count>%lu</count>\n"
++ "\t<bytes>%lu</bytes>\n" "</mmap_header>\n",
++#endif
++ g_ui32RegisteredAreas, g_ui32TotalByteSize);
++
++ goto unlock_and_return;
++ }
++
++ if (size < 135) {
++ Ret = 0;
++ goto unlock_and_return;
++ }
++
++ PVR_ASSERT(off != 0);
++ list_for_each_entry(psLinuxMemArea, &g_sMMapAreaList, sMMapItem) {
++ PKV_OFFSET_STRUCT psOffsetStruct;
++
++ list_for_each_entry(psOffsetStruct,
++ &psLinuxMemArea->sMMapOffsetStructList,
++ sAreaItem) {
++ off--;
++ if (off == 0) {
++ u32 ui32RealByteSize;
++ u32 ui32ByteOffset;
++
++ PVR_ASSERT(psOffsetStruct->psLinuxMemArea ==
++ psLinuxMemArea);
++
++ DetermineUsersSizeAndByteOffset(psLinuxMemArea,
++ &ui32RealByteSize,
++ &ui32ByteOffset);
++
++ Ret = printAppend(buffer, size, 0,
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++ "%-8p %08lx %-8p %08lx %08lx %-8ld %-24s %-5lu %-8s %08lx(%s)\n",
++#else
++ "<mmap_record>\n"
++ "\t<pointer>%-8p</pointer>\n"
++ "\t<user_virtual>%-8lx</user_virtual>\n"
++ "\t<kernel_virtual>%-8p</kernel_virtual>\n"
++ "\t<cpu_physical>%08lx</cpu_physical>\n"
++ "\t<mmap_offset>%08lx</mmap_offset>\n"
++ "\t<bytes>%-8ld</bytes>\n"
++ "\t<linux_mem_area_type>%-24s</linux_mem_area_type>\n"
++ "\t<pid>%-5lu</pid>\n"
++ "\t<name>%-8s</name>\n"
++ "\t<flags>%08lx</flags>\n"
++ "\t<flags_string>%s</flags_string>\n"
++ "</mmap_record>\n",
++#endif
++ psLinuxMemArea,
++ psOffsetStruct->
++ ui32UserVAddr +
++ ui32ByteOffset,
++ LinuxMemAreaToCpuVAddr
++ (psLinuxMemArea),
++ LinuxMemAreaToCpuPAddr
++ (psLinuxMemArea, 0).uiAddr,
++ psOffsetStruct->
++ ui32MMapOffset,
++ psLinuxMemArea->ui32ByteSize,
++ LinuxMemAreaTypeToString
++ (psLinuxMemArea->eAreaType),
++ psOffsetStruct->ui32PID,
++ psOffsetStruct->pszName,
++ psLinuxMemArea->ui32AreaFlags,
++ HAPFlagsToString
++ (psLinuxMemArea->
++ ui32AreaFlags));
++ goto unlock_and_return;
++ }
++ }
++ }
++ Ret = END_OF_FILE;
++
++unlock_and_return:
++ mutex_unlock(&g_sMMapMutex);
++ return Ret;
++}
++#endif
++#endif
++
++PVRSRV_ERROR PVRMMapRegisterArea(LinuxMemArea * psLinuxMemArea)
++{
++ PVRSRV_ERROR eError;
++#if defined(DEBUG) || defined(DEBUG_LINUX_MMAP_AREAS)
++ const char *pszName =
++ LinuxMemAreaTypeToString(LinuxMemAreaRootType(psLinuxMemArea));
++#endif
++
++ mutex_lock(&g_sMMapMutex);
++
++#if defined(DEBUG) || defined(DEBUG_LINUX_MMAP_AREAS)
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "%s(%s, psLinuxMemArea 0x%p, ui32AllocFlags 0x%8lx)",
++ __FUNCTION__, pszName, psLinuxMemArea,
++ psLinuxMemArea->ui32AreaFlags));
++#endif
++
++ PVR_ASSERT(psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC
++ || LinuxMemAreaRoot(psLinuxMemArea)->eAreaType !=
++ LINUX_MEM_AREA_SUB_ALLOC);
++
++ if (psLinuxMemArea->bMMapRegistered) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "%s: psLinuxMemArea 0x%p is already registered",
++ __FUNCTION__, psLinuxMemArea));
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ goto exit_unlock;
++ }
++
++ list_add_tail(&psLinuxMemArea->sMMapItem, &g_sMMapAreaList);
++
++ psLinuxMemArea->bMMapRegistered = 1;
++
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++ g_ui32RegisteredAreas++;
++
++ if (psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC) {
++ g_ui32TotalByteSize += psLinuxMemArea->ui32ByteSize;
++ }
++#endif
++
++ eError = PVRSRV_OK;
++
++exit_unlock:
++ mutex_unlock(&g_sMMapMutex);
++
++ return eError;
++}
++
++PVRSRV_ERROR PVRMMapRemoveRegisteredArea(LinuxMemArea * psLinuxMemArea)
++{
++ PVRSRV_ERROR eError;
++ PKV_OFFSET_STRUCT psOffsetStruct, psTmpOffsetStruct;
++
++ mutex_lock(&g_sMMapMutex);
++
++ PVR_ASSERT(psLinuxMemArea->bMMapRegistered);
++
++ list_for_each_entry_safe(psOffsetStruct, psTmpOffsetStruct,
++ &psLinuxMemArea->sMMapOffsetStructList,
++ sAreaItem) {
++ if (psOffsetStruct->ui32Mapped != 0) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "%s: psOffsetStruct 0x%p for memory area 0x0x%p is still mapped; psOffsetStruct->ui32Mapped %lu",
++ __FUNCTION__, psOffsetStruct, psLinuxMemArea,
++ psOffsetStruct->ui32Mapped));
++ eError = PVRSRV_ERROR_GENERIC;
++ goto exit_unlock;
++ } else {
++
++ PVR_DPF((PVR_DBG_WARNING,
++ "%s: psOffsetStruct 0x%p was never mapped",
++ __FUNCTION__, psOffsetStruct));
++ }
++
++ PVR_ASSERT((psOffsetStruct->ui32Mapped == 0)
++ && psOffsetStruct->bOnMMapList);
++
++ DestroyOffsetStruct(psOffsetStruct);
++ }
++
++ list_del(&psLinuxMemArea->sMMapItem);
++
++ psLinuxMemArea->bMMapRegistered = 0;
++
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++ g_ui32RegisteredAreas--;
++ if (psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC) {
++ g_ui32TotalByteSize -= psLinuxMemArea->ui32ByteSize;
++ }
++#endif
++
++ eError = PVRSRV_OK;
++
++exit_unlock:
++ mutex_unlock(&g_sMMapMutex);
++ return eError;
++}
++
++PVRSRV_ERROR
++LinuxMMapPerProcessConnect(PVRSRV_ENV_PER_PROCESS_DATA * psEnvPerProc)
++{
++ return PVRSRV_OK;
++}
++
++void LinuxMMapPerProcessDisconnect(PVRSRV_ENV_PER_PROCESS_DATA * psEnvPerProc)
++{
++ PKV_OFFSET_STRUCT psOffsetStruct, psTmpOffsetStruct;
++ int bWarn = 0;
++ u32 ui32PID = OSGetCurrentProcessIDKM();
++
++ mutex_lock(&g_sMMapMutex);
++
++ list_for_each_entry_safe(psOffsetStruct, psTmpOffsetStruct,
++ &g_sMMapOffsetStructList, sMMapItem) {
++ if (psOffsetStruct->ui32PID == ui32PID) {
++ if (!bWarn) {
++ PVR_DPF((PVR_DBG_WARNING,
++ "%s: process has unmapped offset structures. Removing them",
++ __FUNCTION__));
++ bWarn = 1;
++ }
++ PVR_ASSERT(psOffsetStruct->ui32Mapped == 0);
++ PVR_ASSERT(psOffsetStruct->bOnMMapList);
++
++ DestroyOffsetStruct(psOffsetStruct);
++ }
++ }
++
++ mutex_unlock(&g_sMMapMutex);
++}
++
++PVRSRV_ERROR LinuxMMapPerProcessHandleOptions(PVRSRV_HANDLE_BASE * psHandleBase)
++{
++ PVRSRV_ERROR eError;
++
++ eError = PVRSRVSetMaxHandle(psHandleBase, MAX_MMAP_HANDLE);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR, "%s: failed to set handle limit (%d)",
++ __FUNCTION__, eError));
++ return eError;
++ }
++
++ return eError;
++}
++
++void PVRMMapInit(void)
++{
++ mutex_init(&g_sMMapMutex);
++
++ g_psMemmapCache =
++ KMemCacheCreateWrapper("img-mmap", sizeof(KV_OFFSET_STRUCT), 0, 0);
++ if (!g_psMemmapCache) {
++ PVR_DPF((PVR_DBG_ERROR, "%s: failed to allocate kmem_cache",
++ __FUNCTION__));
++ goto error;
++ }
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++#ifdef PVR_PROC_USE_SEQ_FILE
++ g_ProcMMap = CreateProcReadEntrySeq("mmap", NULL,
++ ProcSeqNextMMapRegistrations,
++ ProcSeqShowMMapRegistrations,
++ ProcSeqOff2ElementMMapRegistrations,
++ ProcSeqStartstopMMapRegistations);
++#else
++ CreateProcReadEntry("mmap", PrintMMapRegistrations);
++#endif
++#endif
++ return;
++
++error:
++ PVRMMapCleanup();
++ return;
++}
++
++void PVRMMapCleanup(void)
++{
++ PVRSRV_ERROR eError;
++
++ if (!list_empty(&g_sMMapAreaList)) {
++ LinuxMemArea *psLinuxMemArea, *psTmpMemArea;
++
++ PVR_DPF((PVR_DBG_ERROR,
++ "%s: Memory areas are still registered with MMap",
++ __FUNCTION__));
++
++ PVR_TRACE(("%s: Unregistering memory areas", __FUNCTION__));
++ list_for_each_entry_safe(psLinuxMemArea, psTmpMemArea,
++ &g_sMMapAreaList, sMMapItem) {
++ eError = PVRMMapRemoveRegisteredArea(psLinuxMemArea);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "%s: PVRMMapRemoveRegisteredArea failed (%d)",
++ __FUNCTION__, eError));
++ }
++ PVR_ASSERT(eError == PVRSRV_OK);
++
++ LinuxMemAreaDeepFree(psLinuxMemArea);
++ }
++ }
++ PVR_ASSERT(list_empty((&g_sMMapAreaList)));
++
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++#ifdef PVR_PROC_USE_SEQ_FILE
++ RemoveProcEntrySeq(g_ProcMMap);
++#else
++ RemoveProcEntry("mmap");
++#endif
++#endif
++
++ if (g_psMemmapCache) {
++ KMemCacheDestroyWrapper(g_psMemmapCache);
++ g_psMemmapCache = NULL;
++ }
++}
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/mmap.h
+@@ -0,0 +1,107 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__MMAP_H__)
++#define __MMAP_H__
++
++#include <linux/mm.h>
++#include <linux/list.h>
++
++#include "perproc.h"
++#include "mm.h"
++
++typedef struct KV_OFFSET_STRUCT_TAG
++{
++
++ u32 ui32Mapped;
++
++
++ u32 ui32MMapOffset;
++
++ u32 ui32RealByteSize;
++
++
++ LinuxMemArea *psLinuxMemArea;
++
++
++ u32 ui32TID;
++
++
++ u32 ui32PID;
++
++
++ int bOnMMapList;
++
++
++ u32 ui32RefCount;
++
++
++ u32 ui32UserVAddr;
++
++
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++ const char *pszName;
++#endif
++
++
++ struct list_head sMMapItem;
++
++
++ struct list_head sAreaItem;
++}KV_OFFSET_STRUCT, *PKV_OFFSET_STRUCT;
++
++
++
++void PVRMMapInit(void);
++
++
++void PVRMMapCleanup(void);
++
++
++PVRSRV_ERROR PVRMMapRegisterArea(LinuxMemArea *psLinuxMemArea);
++
++
++PVRSRV_ERROR PVRMMapRemoveRegisteredArea(LinuxMemArea *psLinuxMemArea);
++
++
++PVRSRV_ERROR PVRMMapOSMemHandleToMMapData(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ void * hMHandle,
++ u32 *pui32MMapOffset,
++ u32 *pui32ByteOffset,
++ u32 *pui32RealByteSize, u32 *pui32UserVAddr);
++
++PVRSRV_ERROR
++PVRMMapReleaseMMapData(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ void * hMHandle,
++ int *pbMUnmap,
++ u32 *pui32RealByteSize,
++ u32 *pui32UserVAddr);
++
++int PVRMMap(struct file* pFile, struct vm_area_struct* ps_vma);
++
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/module.c
+@@ -0,0 +1,734 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef AUTOCONF_INCLUDED
++#include <linux/config.h>
++#endif
++
++#if !defined(SUPPORT_DRI_DRM)
++
++#if defined(LDM_PLATFORM)
++#define PVR_LDM_PLATFORM_MODULE
++#define PVR_LDM_MODULE
++#else
++#if defined(LDM_PCI)
++#define PVR_LDM_PCI_MODULE
++#define PVR_LDM_MODULE
++#endif
++#endif
++#endif
++
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/version.h>
++#include <linux/fs.h>
++#include <linux/proc_fs.h>
++#include <linux/mutex.h>
++
++#if defined(SUPPORT_DRI_DRM)
++#include <drm/drmP.h>
++#if defined(PVR_SECURE_DRM_AUTH_EXPORT)
++#include "env_perproc.h"
++#endif
++#endif
++
++#if defined(PVR_LDM_PLATFORM_MODULE)
++#include <linux/platform_device.h>
++#endif
++
++#if defined(PVR_LDM_PCI_MODULE)
++#include <linux/pci.h>
++#endif
++
++#if defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL)
++#include <asm/uaccess.h>
++#endif
++
++
++#include "services.h"
++#include "kerneldisplay.h"
++#include "kernelbuffer.h"
++#include "syscommon.h"
++#include "pvrmmap.h"
++#include "mutils.h"
++#include "mm.h"
++#include "mmap.h"
++#include "pvr_debug.h"
++#include "srvkm.h"
++#include "perproc.h"
++#include "handle.h"
++#include "pvr_bridge_km.h"
++#include "proc.h"
++#include "pvrmodule.h"
++#include "private_data.h"
++#include "lock.h"
++#include "linkage.h"
++
++#if defined(SUPPORT_DRI_DRM)
++#include "pvr_drm.h"
++#endif
++#define DRVNAME "pvrsrvkm"
++#define DEVNAME "pvrsrvkm"
++
++#if defined(SUPPORT_DRI_DRM)
++#define PRIVATE_DATA(pFile) ((pFile)->driver_priv)
++#else
++#define PRIVATE_DATA(pFile) ((pFile)->private_data)
++#endif
++
++MODULE_SUPPORTED_DEVICE(DEVNAME);
++#ifdef DEBUG
++static int debug = DBGPRIV_WARNING;
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0))
++#include <linux/moduleparam.h>
++module_param(debug, int, 0);
++#else
++MODULE_PARM(debug, "i");
++MODULE_PARM_DESC(debug, "Sets the level of debug output (default=0x4)");
++#endif
++#endif
++
++extern int PVRGetDisplayClassJTable(PVRSRV_DC_DISP2SRV_KMJTABLE * psJTable);
++extern int PVRGetBufferClassJTable(PVRSRV_BC_BUFFER2SRV_KMJTABLE * psJTable);
++
++/*EXPORT_SYMBOL(PVRGetDisplayClassJTable); */
++/*EXPORT_SYMBOL(PVRGetBufferClassJTable); */
++
++#if defined(PVR_LDM_MODULE)
++static struct class *psPvrClass;
++#endif
++
++#if !defined(SUPPORT_DRI_DRM)
++static int AssignedMajorNumber;
++
++static int PVRSRVOpen(struct inode *pInode, struct file *pFile);
++static int PVRSRVRelease(struct inode *pInode, struct file *pFile);
++
++static struct file_operations pvrsrv_fops = {
++ .owner = THIS_MODULE,
++ .unlocked_ioctl = PVRSRV_BridgeDispatchKM,
++ .open = PVRSRVOpen,
++ .release = PVRSRVRelease,
++ .mmap = PVRMMap,
++};
++#endif
++
++struct mutex gPVRSRVLock;
++
++u32 gui32ReleasePID;
++
++#if defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL)
++static u32 gPVRPowerLevel;
++#endif
++
++#if defined(PVR_LDM_MODULE)
++
++#if defined(PVR_LDM_PLATFORM_MODULE)
++#define LDM_DEV struct platform_device
++#define LDM_DRV struct platform_driver
++#endif
++
++#if defined(PVR_LDM_PCI_MODULE)
++#define LDM_DEV struct pci_dev
++#define LDM_DRV struct pci_driver
++#endif
++
++#if defined(PVR_LDM_PLATFORM_MODULE)
++static int PVRSRVDriverRemove(LDM_DEV * device);
++static int PVRSRVDriverProbe(LDM_DEV * device);
++#endif
++#if defined(PVR_LDM_PCI_MODULE)
++static void PVRSRVDriverRemove(LDM_DEV * device);
++static int PVRSRVDriverProbe(LDM_DEV * device, const struct pci_device_id *id);
++#endif
++static int PVRSRVDriverSuspend(LDM_DEV * device, pm_message_t state);
++static void PVRSRVDriverShutdown(LDM_DEV * device);
++static int PVRSRVDriverResume(LDM_DEV * device);
++
++#if defined(PVR_LDM_PCI_MODULE)
++struct pci_device_id powervr_id_table[] __devinitdata = {
++ {PCI_DEVICE(SYS_SGX_DEV_VENDOR_ID, SYS_SGX_DEV_DEVICE_ID)},
++ {0}
++};
++
++MODULE_DEVICE_TABLE(pci, powervr_id_table);
++#endif
++
++static LDM_DRV powervr_driver = {
++#if defined(PVR_LDM_PLATFORM_MODULE)
++ .driver = {
++ .name = DRVNAME,
++ },
++#endif
++#if defined(PVR_LDM_PCI_MODULE)
++ .name = DRVNAME,
++ .id_table = powervr_id_table,
++#endif
++ .probe = PVRSRVDriverProbe,
++#if defined(PVR_LDM_PLATFORM_MODULE)
++ .remove = PVRSRVDriverRemove,
++#endif
++#if defined(PVR_LDM_PCI_MODULE)
++ .remove = __devexit_p(PVRSRVDriverRemove),
++#endif
++ .suspend = PVRSRVDriverSuspend,
++ .resume = PVRSRVDriverResume,
++ .shutdown = PVRSRVDriverShutdown,
++};
++
++LDM_DEV *gpsPVRLDMDev;
++
++#if defined(MODULE) && defined(PVR_LDM_PLATFORM_MODULE)
++
++static void PVRSRVDeviceRelease(struct device *pDevice)
++{
++}
++
++static struct platform_device powervr_device = {
++ .name = DEVNAME,
++ .id = -1,
++ .dev = {
++ .release = PVRSRVDeviceRelease}
++};
++
++#endif
++
++#if defined(PVR_LDM_PLATFORM_MODULE)
++static int PVRSRVDriverProbe(LDM_DEV * pDevice)
++#endif
++#if defined(PVR_LDM_PCI_MODULE)
++static int __devinit PVRSRVDriverProbe(LDM_DEV * pDevice,
++ const struct pci_device_id *id)
++#endif
++{
++ SYS_DATA *psSysData;
++
++ PVR_TRACE(("PVRSRVDriverProbe(pDevice=%p)", pDevice));
++
++#if 0
++
++ if (PerDeviceSysInitialise((void *)pDevice) != PVRSRV_OK) {
++ return -EINVAL;
++ }
++#endif
++
++ if (SysAcquireData(&psSysData) != PVRSRV_OK) {
++ gpsPVRLDMDev = pDevice;
++
++ if (SysInitialise() != PVRSRV_OK) {
++ return -ENODEV;
++ }
++ }
++
++ return 0;
++}
++
++#if defined (PVR_LDM_PLATFORM_MODULE)
++static int PVRSRVDriverRemove(LDM_DEV * pDevice)
++#endif
++#if defined(PVR_LDM_PCI_MODULE)
++static void __devexit PVRSRVDriverRemove(LDM_DEV * pDevice)
++#endif
++{
++ SYS_DATA *psSysData;
++
++ PVR_TRACE(("PVRSRVDriverRemove(pDevice=%p)", pDevice));
++
++ if (SysAcquireData(&psSysData) == PVRSRV_OK) {
++#if defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL)
++ if (gPVRPowerLevel != 0) {
++ if (PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D0) ==
++ PVRSRV_OK) {
++ gPVRPowerLevel = 0;
++ }
++ }
++#endif
++ SysDeinitialise(psSysData);
++
++ gpsPVRLDMDev = NULL;
++ }
++#if 0
++ if (PerDeviceSysDeInitialise((void *)pDevice) != PVRSRV_OK) {
++ return -EINVAL;
++ }
++#endif
++
++#if defined (PVR_LDM_PLATFORM_MODULE)
++ return 0;
++#endif
++#if defined (PVR_LDM_PCI_MODULE)
++ return;
++#endif
++}
++
++static void PVRSRVDriverShutdown(LDM_DEV * pDevice)
++{
++ PVR_TRACE(("PVRSRVDriverShutdown(pDevice=%p)", pDevice));
++
++ (void)PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D3);
++}
++
++#endif
++
++#if defined(PVR_LDM_MODULE) || defined(SUPPORT_DRI_DRM)
++#if defined(SUPPORT_DRI_DRM)
++int PVRSRVDriverSuspend(struct drm_device *pDevice, pm_message_t state)
++#else
++static int PVRSRVDriverSuspend(LDM_DEV * pDevice, pm_message_t state)
++#endif
++{
++#if !(defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL) && !defined(SUPPORT_DRI_DRM))
++ PVR_TRACE(("PVRSRVDriverSuspend(pDevice=%p)", pDevice));
++
++ if (PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D3) != PVRSRV_OK) {
++ return -EINVAL;
++ }
++#endif
++ return 0;
++}
++
++#if defined(SUPPORT_DRI_DRM)
++int PVRSRVDriverResume(struct drm_device *pDevice)
++#else
++static int PVRSRVDriverResume(LDM_DEV * pDevice)
++#endif
++{
++#if !(defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL) && !defined(SUPPORT_DRI_DRM))
++ PVR_TRACE(("PVRSRVDriverResume(pDevice=%p)", pDevice));
++
++ if (PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D0) != PVRSRV_OK) {
++ return -EINVAL;
++ }
++#endif
++ return 0;
++}
++#endif
++
++#if defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL) && !defined(SUPPORT_DRI_DRM)
++int PVRProcSetPowerLevel(struct file *file, const char *buffer, u32 count,
++ void *data)
++{
++ char data_buffer[2];
++ u32 PVRPowerLevel;
++
++ if (count != sizeof(data_buffer)) {
++ return -EINVAL;
++ } else {
++ if (copy_from_user(data_buffer, buffer, count))
++ return -EINVAL;
++ if (data_buffer[count - 1] != '\n')
++ return -EINVAL;
++ PVRPowerLevel = data_buffer[0] - '0';
++ if (PVRPowerLevel != gPVRPowerLevel) {
++ if (PVRPowerLevel != 0) {
++ if (PVRSRVSetPowerStateKM
++ (PVRSRV_SYS_POWER_STATE_D3) != PVRSRV_OK) {
++ return -EINVAL;
++ }
++ } else {
++ if (PVRSRVSetPowerStateKM
++ (PVRSRV_SYS_POWER_STATE_D0) != PVRSRV_OK) {
++ return -EINVAL;
++ }
++ }
++
++ gPVRPowerLevel = PVRPowerLevel;
++ }
++ }
++ return (count);
++}
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++void ProcSeqShowPowerLevel(struct seq_file *sfile, void *el)
++{
++ seq_printf(sfile, "%lu\n", gPVRPowerLevel);
++}
++
++#else
++int PVRProcGetPowerLevel(char *page, char **start, off_t off, int count,
++ int *eof, void *data)
++{
++ if (off == 0) {
++ *start = (char *)1;
++ return printAppend(page, count, 0, "%lu\n", gPVRPowerLevel);
++ }
++ *eof = 1;
++ return 0;
++}
++#endif
++
++#endif
++
++#if defined(SUPPORT_DRI_DRM)
++int PVRSRVOpen(struct drm_device * dev, struct drm_file *pFile)
++#else
++static int PVRSRVOpen(struct inode unref__ * pInode, struct file *pFile)
++#endif
++{
++ PVRSRV_FILE_PRIVATE_DATA *psPrivateData;
++ void *hBlockAlloc;
++ int iRet = -ENOMEM;
++ PVRSRV_ERROR eError;
++ u32 ui32PID;
++#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT)
++ PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc;
++#endif
++
++#if defined(SUPPORT_DRI_DRM)
++
++#else
++
++#endif
++
++ mutex_lock(&gPVRSRVLock);
++
++ ui32PID = OSGetCurrentProcessIDKM();
++
++ if (PVRSRVProcessConnect(ui32PID) != PVRSRV_OK)
++ goto err_unlock;
++
++#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT)
++ psEnvPerProc = PVRSRVPerProcessPrivateData(ui32PID);
++ if (psEnvPerProc == NULL) {
++ PVR_DPF((PVR_DBG_ERROR, "%s: No per-process private data",
++ __FUNCTION__));
++ goto err_unlock;
++ }
++#endif
++
++ eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_FILE_PRIVATE_DATA),
++ (void **)&psPrivateData,
++ &hBlockAlloc, "File Private Data");
++
++ if (eError != PVRSRV_OK)
++ goto err_unlock;
++
++#if defined(PVR_SECURE_FD_EXPORT)
++ psPrivateData->hKernelMemInfo = NULL;
++#endif
++#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT)
++ psPrivateData->psDRMFile = pFile;
++
++ list_add_tail(&psPrivateData->sDRMAuthListItem,
++ &psEnvPerProc->sDRMAuthListHead);
++#endif
++ psPrivateData->ui32OpenPID = ui32PID;
++ psPrivateData->hBlockAlloc = hBlockAlloc;
++ PRIVATE_DATA(pFile) = psPrivateData;
++ iRet = 0;
++err_unlock:
++ mutex_unlock(&gPVRSRVLock);
++ return iRet;
++}
++
++#if defined(SUPPORT_DRI_DRM)
++int PVRSRVRelease(struct drm_device * dev, struct drm_file *pFile)
++#else
++static int PVRSRVRelease(struct inode unref__ * pInode, struct file *pFile)
++#endif
++{
++ PVRSRV_FILE_PRIVATE_DATA *psPrivateData;
++
++#if defined(SUPPORT_DRI_DRM)
++#else
++#endif
++
++ mutex_lock(&gPVRSRVLock);
++
++ psPrivateData = PRIVATE_DATA(pFile);
++
++#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT)
++ list_del(&psPrivateData->sDRMAuthListItem);
++#endif
++
++ gui32ReleasePID = psPrivateData->ui32OpenPID;
++ PVRSRVProcessDisconnect(psPrivateData->ui32OpenPID);
++ gui32ReleasePID = 0;
++
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_FILE_PRIVATE_DATA),
++ psPrivateData, psPrivateData->hBlockAlloc);
++ PRIVATE_DATA(pFile) = NULL;
++
++ mutex_unlock(&gPVRSRVLock);
++ return 0;
++}
++
++#if defined(SUPPORT_DRI_DRM)
++int PVRCore_Init(void)
++#else
++static int __init PVRCore_Init(void)
++#endif
++{
++ int error;
++#if !defined(PVR_LDM_MODULE)
++ PVRSRV_ERROR eError;
++#else
++ struct device *psDev;
++#endif
++
++#if !defined(SUPPORT_DRI_DRM)
++
++ PVRDPFInit();
++#endif
++ PVR_TRACE(("PVRCore_Init"));
++
++ mutex_init(&gPVRSRVLock);
++
++#ifdef DEBUG
++ PVRDebugSetLevel(debug);
++#endif
++
++ if (CreateProcEntries()) {
++ error = -ENOMEM;
++ return error;
++ }
++
++ if (PVROSFuncInit() != PVRSRV_OK) {
++ error = -ENOMEM;
++ goto init_failed;
++ }
++
++ PVRLinuxMUtilsInit();
++
++ if (LinuxMMInit() != PVRSRV_OK) {
++ error = -ENOMEM;
++ goto init_failed;
++ }
++
++ LinuxBridgeInit();
++
++ PVRMMapInit();
++
++#if defined(PVR_LDM_MODULE)
++
++#if defined(PVR_LDM_PLATFORM_MODULE)
++ if ((error = platform_driver_register(&powervr_driver)) != 0) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRCore_Init: unable to register platform driver (%d)",
++ error));
++
++ goto init_failed;
++ }
++#if defined(MODULE)
++ if ((error = platform_device_register(&powervr_device)) != 0) {
++ platform_driver_unregister(&powervr_driver);
++
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRCore_Init: unable to register platform device (%d)",
++ error));
++
++ goto init_failed;
++ }
++#endif
++#endif
++
++#if defined(PVR_LDM_PCI_MODULE)
++ if ((error = pci_register_driver(&powervr_driver)) != 0) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRCore_Init: unable to register PCI driver (%d)",
++ error));
++
++ goto init_failed;
++ }
++#endif
++
++#else
++
++ if ((eError = SysInitialise()) != PVRSRV_OK) {
++ error = -ENODEV;
++#if defined(TCF_REV) && (TCF_REV == 110)
++ if (eError == PVRSRV_ERROR_NOT_SUPPORTED) {
++ printk("\nAtlas wrapper (FPGA image) version mismatch");
++ error = -ENODEV;
++ }
++#endif
++ goto init_failed;
++ }
++#endif
++
++#if !defined(SUPPORT_DRI_DRM)
++ AssignedMajorNumber = register_chrdev(0, DEVNAME, &pvrsrv_fops);
++
++ if (AssignedMajorNumber <= 0) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRCore_Init: unable to get major number"));
++
++ error = -EBUSY;
++ goto sys_deinit;
++ }
++
++ PVR_TRACE(("PVRCore_Init: major device %d", AssignedMajorNumber));
++#endif
++
++#if defined(PVR_LDM_MODULE)
++
++ psPvrClass = class_create(THIS_MODULE, "pvr");
++
++ if (IS_ERR(psPvrClass)) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRCore_Init: unable to create class (%ld)",
++ PTR_ERR(psPvrClass)));
++ error = -EBUSY;
++ goto unregister_device;
++ }
++
++ psDev = device_create(psPvrClass, NULL, MKDEV(AssignedMajorNumber, 0),
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26))
++ NULL,
++#endif
++ DEVNAME);
++ if (IS_ERR(psDev)) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRCore_Init: unable to create device (%ld)",
++ PTR_ERR(psDev)));
++ error = -EBUSY;
++ goto destroy_class;
++ }
++#endif
++
++ return 0;
++
++#if defined(PVR_LDM_MODULE)
++destroy_class:
++ class_destroy(psPvrClass);
++unregister_device:
++ unregister_chrdev((u32) AssignedMajorNumber, DRVNAME);
++#endif
++#if !defined(SUPPORT_DRI_DRM)
++sys_deinit:
++#endif
++#if defined(PVR_LDM_MODULE)
++#if defined(PVR_LDM_PCI_MODULE)
++ pci_unregister_driver(&powervr_driver);
++#endif
++
++#if defined (PVR_LDM_PLATFORM_MODULE)
++#if defined (MODULE)
++ platform_device_unregister(&powervr_device);
++#endif
++ platform_driver_unregister(&powervr_driver);
++#endif
++
++#else
++
++ {
++ SYS_DATA *psSysData;
++
++ SysAcquireData(&psSysData);
++ if (psSysData != NULL) {
++ SysDeinitialise(psSysData);
++ }
++ }
++#endif
++init_failed:
++ PVRMMapCleanup();
++ LinuxMMCleanup();
++ LinuxBridgeDeInit();
++ PVROSFuncDeInit();
++ RemoveProcEntries();
++
++ return error;
++
++}
++
++#if defined(SUPPORT_DRI_DRM)
++void PVRCore_Cleanup(void)
++#else
++static void __exit PVRCore_Cleanup(void)
++#endif
++{
++ SYS_DATA *psSysData;
++
++ PVR_TRACE(("PVRCore_Cleanup"));
++
++ SysAcquireData(&psSysData);
++
++#if defined(PVR_LDM_MODULE)
++ device_destroy(psPvrClass, MKDEV(AssignedMajorNumber, 0));
++ class_destroy(psPvrClass);
++#endif
++
++#if !defined(SUPPORT_DRI_DRM)
++#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,22))
++ if (
++#endif
++ unregister_chrdev((u32) AssignedMajorNumber, DRVNAME)
++#if !(LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,22))
++ ;
++#else
++ ) {
++ PVR_DPF((PVR_DBG_ERROR, " can't unregister device major %d",
++ AssignedMajorNumber));
++ }
++#endif
++#endif
++
++#if defined(PVR_LDM_MODULE)
++
++#if defined(PVR_LDM_PCI_MODULE)
++ pci_unregister_driver(&powervr_driver);
++#endif
++
++#if defined (PVR_LDM_PLATFORM_MODULE)
++#if defined (MODULE)
++ platform_device_unregister(&powervr_device);
++#endif
++ platform_driver_unregister(&powervr_driver);
++#endif
++
++#else
++#if defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL)
++ if (gPVRPowerLevel != 0) {
++ if (PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D0) ==
++ PVRSRV_OK) {
++ gPVRPowerLevel = 0;
++ }
++ }
++#endif
++
++ SysDeinitialise(psSysData);
++#endif
++
++ PVRMMapCleanup();
++
++ LinuxMMCleanup();
++
++ LinuxBridgeDeInit();
++
++ PVROSFuncDeInit();
++
++ RemoveProcEntries();
++
++ PVR_TRACE(("PVRCore_Cleanup: unloading"));
++}
++
++#if !defined(SUPPORT_DRI_DRM)
++module_init(PVRCore_Init);
++module_exit(PVRCore_Cleanup);
++#endif
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/mutils.c
+@@ -0,0 +1,129 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef AUTOCONF_INCLUDED
++#include <linux/config.h>
++#endif
++#include <linux/version.h>
++
++#include <linux/spinlock.h>
++#include <linux/mm.h>
++#include <asm/page.h>
++#include <asm/pgtable.h>
++
++
++#include "pvr_debug.h"
++#include "mutils.h"
++
++#if defined(SUPPORT_LINUX_X86_PAT)
++#define PAT_LINUX_X86_WC 1
++
++#define PAT_X86_ENTRY_BITS 8
++
++#define PAT_X86_BIT_PWT 1U
++#define PAT_X86_BIT_PCD 2U
++#define PAT_X86_BIT_PAT 4U
++#define PAT_X86_BIT_MASK (PAT_X86_BIT_PAT | PAT_X86_BIT_PCD | PAT_X86_BIT_PWT)
++
++static int g_write_combining_available = 0;
++
++#define PROT_TO_PAT_INDEX(v, B) ((v & _PAGE_ ## B) ? PAT_X86_BIT_ ## B : 0)
++
++static inline u32 pvr_pat_index(pgprotval_t prot_val)
++{
++ u32 ret = 0;
++ pgprotval_t val = prot_val & _PAGE_CACHE_MASK;
++
++ ret |= PROT_TO_PAT_INDEX(val, PAT);
++ ret |= PROT_TO_PAT_INDEX(val, PCD);
++ ret |= PROT_TO_PAT_INDEX(val, PWT);
++
++ return ret;
++}
++
++static inline u32 pvr_pat_entry(u64 pat, u32 index)
++{
++ return (u32) (pat >> (index * PAT_X86_ENTRY_BITS)) & PAT_X86_BIT_MASK;
++}
++
++static void PVRLinuxX86PATProbe(void)
++{
++
++ if (cpu_has_pat) {
++ u64 pat;
++ u32 pat_index;
++ u32 pat_entry;
++
++ PVR_TRACE(("%s: PAT available", __FUNCTION__));
++
++ rdmsrl(MSR_IA32_CR_PAT, pat);
++ PVR_TRACE(("%s: Top 32 bits of PAT: 0x%.8x", __FUNCTION__,
++ (u32) (pat >> 32)));
++ PVR_TRACE(("%s: Bottom 32 bits of PAT: 0x%.8x", __FUNCTION__,
++ (u32) (pat)));
++
++ pat_index = pvr_pat_index(_PAGE_CACHE_WC);
++ PVR_TRACE(("%s: PAT index for write combining: %u",
++ __FUNCTION__, pat_index));
++
++ pat_entry = pvr_pat_entry(pat, pat_index);
++ PVR_TRACE(("%s: PAT entry for write combining: 0x%.2x (should be 0x%.2x)", __FUNCTION__, pat_entry, PAT_LINUX_X86_WC));
++
++#if defined(SUPPORT_LINUX_X86_WRITECOMBINE)
++ g_write_combining_available =
++ (int)(pat_entry == PAT_LINUX_X86_WC);
++#endif
++ }
++#if defined(DEBUG)
++#if defined(SUPPORT_LINUX_X86_WRITECOMBINE)
++ if (g_write_combining_available) {
++ PVR_TRACE(("%s: Write combining available via PAT",
++ __FUNCTION__));
++ } else {
++ PVR_TRACE(("%s: Write combining not available", __FUNCTION__));
++ }
++#else
++ PVR_TRACE(("%s: Write combining disabled in driver build",
++ __FUNCTION__));
++#endif
++#endif
++}
++
++pgprot_t pvr_pgprot_writecombine(pgprot_t prot)
++{
++
++ return (g_write_combining_available) ?
++ __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_MASK) | _PAGE_CACHE_WC) :
++ pgprot_noncached(prot);
++}
++#endif
++
++void PVRLinuxMUtilsInit(void)
++{
++#if defined(SUPPORT_LINUX_X86_PAT)
++ PVRLinuxX86PATProbe();
++#endif
++}
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/mutils.h
+@@ -0,0 +1,101 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __IMG_LINUX_MUTILS_H__
++#define __IMG_LINUX_MUTILS_H__
++
++#ifndef AUTOCONF_INCLUDED
++#include <linux/config.h>
++#endif
++
++#include <linux/version.h>
++
++#if !(defined(__i386__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)))
++#if defined(SUPPORT_LINUX_X86_PAT)
++#undef SUPPORT_LINUX_X86_PAT
++#endif
++#endif
++
++#if defined(SUPPORT_LINUX_X86_PAT)
++ pgprot_t pvr_pgprot_writecombine(pgprot_t prot);
++ #define PGPROT_WC(pv) pvr_pgprot_writecombine(pv)
++#else
++ #if defined(__arm__) || defined(__sh__)
++ #define PGPROT_WC(pv) pgprot_writecombine(pv)
++ #else
++ #if defined(__i386__)
++ #define PGPROT_WC(pv) pgprot_noncached(pv)
++ #else
++ #define PGPROT_WC(pv) pgprot_noncached(pv)
++ #error Unsupported architecture!
++ #endif
++ #endif
++#endif
++
++#define PGPROT_UC(pv) pgprot_noncached(pv)
++
++#if defined(__i386__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))
++ #define IOREMAP(pa, bytes) ioremap_cache(pa, bytes)
++#else
++ #if defined(__arm__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0))
++ #define IOREMAP(pa, bytes) ioremap_cached(pa, bytes)
++ #else
++ #define IOREMAP(pa, bytes) ioremap(pa, bytes)
++ #endif
++#endif
++
++#if defined(SUPPORT_LINUX_X86_PAT)
++ #if defined(SUPPORT_LINUX_X86_WRITECOMBINE)
++ #define IOREMAP_WC(pa, bytes) ioremap_wc(pa, bytes)
++ #else
++ #define IOREMAP_WC(pa, bytes) ioremap_nocache(pa, bytes)
++ #endif
++#else
++ #if defined(__arm__)
++ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27))
++ #define IOREMAP_WC(pa, bytes) ioremap_wc(pa, bytes)
++ #else
++ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22))
++ #define IOREMAP_WC(pa, bytes) ioremap_nocache(pa, bytes)
++ #else
++ #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)) || (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17))
++ #define IOREMAP_WC(pa, bytes) __ioremap(pa, bytes, L_PTE_BUFFERABLE)
++ #else
++ #define IOREMAP_WC(pa, bytes) __ioremap(pa, bytes, , L_PTE_BUFFERABLE, 1)
++ #endif
++ #endif
++ #endif
++ #else
++ #define IOREMAP_WC(pa, bytes) ioremap_nocache(pa, bytes)
++ #endif
++#endif
++
++#define IOREMAP_UC(pa, bytes) ioremap_nocache(pa, bytes)
++
++void PVRLinuxMUtilsInit(void);
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/osfunc.c
+@@ -0,0 +1,2369 @@
++/**********************************************************************
++ *
++ * Copyright (c) 2009-2010 Intel Corporation.
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef AUTOCONF_INCLUDED
++#include <linux/config.h>
++#endif
++
++#include <linux/version.h>
++#include <asm/io.h>
++#include <asm/page.h>
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22))
++#include <asm/system.h>
++#endif
++#if defined(SUPPORT_CPU_CACHED_BUFFERS)
++#include <asm/cacheflush.h>
++#endif
++#include <linux/mm.h>
++#include <linux/pagemap.h>
++#include <linux/hugetlb.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/delay.h>
++#include <linux/pci.h>
++
++#include <linux/string.h>
++#include <linux/sched.h>
++#include <linux/interrupt.h>
++#include <asm/hardirq.h>
++#include <linux/timer.h>
++#include <linux/capability.h>
++#include <asm/uaccess.h>
++#include <linux/spinlock.h>
++#if defined(PVR_LINUX_MISR_USING_WORKQUEUE) || \
++ defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE) || \
++ defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || \
++ defined(PVR_LINUX_USING_WORKQUEUES)
++#include <linux/workqueue.h>
++#endif
++
++#include "img_types.h"
++#include "services_headers.h"
++#include "mm.h"
++#include "pvrmmap.h"
++#include "mmap.h"
++#include "env_data.h"
++#include "proc.h"
++#include "event.h"
++#include "linkage.h"
++
++#define EVENT_OBJECT_TIMEOUT_MS (100)
++
++#if defined(SUPPORT_CPU_CACHED_BUFFERS) || \
++ defined(SUPPORT_CACHEFLUSH_ON_ALLOC)
++
++#if defined(__i386__)
++static void per_cpu_cache_flush(void *arg)
++{
++ PVR_UNREFERENCED_PARAMETER(arg);
++ wbinvd();
++}
++#endif
++
++#if !defined(SUPPORT_CPU_CACHED_BUFFERS)
++static
++#endif
++void OSFlushCPUCacheKM(void)
++{
++#if defined(__arm__)
++ flush_cache_all();
++#elif defined(__i386__)
++
++ on_each_cpu(per_cpu_cache_flush, NULL, 1);
++#else
++#error "Implement full CPU cache flush for this CPU!"
++#endif
++}
++
++#endif
++#if defined(SUPPORT_CPU_CACHED_BUFFERS)
++
++void OSFlushCPUCacheRangeKM(void *pvRangeAddrStart, void *pvRangeAddrEnd)
++{
++ PVR_UNREFERENCED_PARAMETER(pvRangeAddrStart);
++ PVR_UNREFERENCED_PARAMETER(pvRangeAddrEnd);
++
++ OSFlushCPUCacheKM();
++}
++
++#endif
++
++#define HOST_ALLOC_MEM_USING_KMALLOC ((void *)0)
++#define HOST_ALLOC_MEM_USING_VMALLOC ((void *)1)
++
++#if !defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++PVRSRV_ERROR OSAllocMem_Impl(u32 ui32Flags, u32 ui32Size, void **ppvCpuVAddr,
++ void **phBlockAlloc)
++#else
++PVRSRV_ERROR OSAllocMem_Impl(u32 ui32Flags, u32 ui32Size, void **ppvCpuVAddr,
++ void **phBlockAlloc, char *pszFilename,
++ u32 ui32Line)
++#endif
++{
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ *ppvCpuVAddr = _KMallocWrapper(ui32Size, pszFilename, ui32Line);
++#else
++ *ppvCpuVAddr = KMallocWrapper(ui32Size);
++#endif
++ if (*ppvCpuVAddr) {
++ if (phBlockAlloc) {
++
++ *phBlockAlloc = HOST_ALLOC_MEM_USING_KMALLOC;
++ }
++ } else {
++ if (!phBlockAlloc) {
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ *ppvCpuVAddr =
++ _VMallocWrapper(ui32Size, PVRSRV_HAP_CACHED, pszFilename,
++ ui32Line);
++#else
++ *ppvCpuVAddr = VMallocWrapper(ui32Size, PVRSRV_HAP_CACHED);
++#endif
++ if (!*ppvCpuVAddr) {
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ *phBlockAlloc = HOST_ALLOC_MEM_USING_VMALLOC;
++ }
++
++ return PVRSRV_OK;
++}
++
++#if !defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++PVRSRV_ERROR OSFreeMem_Impl(u32 ui32Flags, u32 ui32Size, void *pvCpuVAddr,
++ void *hBlockAlloc)
++#else
++PVRSRV_ERROR OSFreeMem_Impl(u32 ui32Flags, u32 ui32Size, void *pvCpuVAddr,
++ void *hBlockAlloc, char *pszFilename, u32 ui32Line)
++#endif
++{
++ if (hBlockAlloc == HOST_ALLOC_MEM_USING_VMALLOC) {
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ _VFreeWrapper(pvCpuVAddr, pszFilename, ui32Line);
++#else
++ VFreeWrapper(pvCpuVAddr);
++#endif
++ } else {
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ _KFreeWrapper(pvCpuVAddr, pszFilename, ui32Line);
++#else
++ KFreeWrapper(pvCpuVAddr);
++#endif
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR
++OSAllocPages_Impl(u32 ui32AllocFlags,
++ u32 ui32Size,
++ u32 ui32PageSize, void **ppvCpuVAddr, void **phOSMemHandle)
++{
++ LinuxMemArea *psLinuxMemArea;
++
++#if 0
++
++ if (ui32AllocFlags & PVRSRV_HAP_SINGLE_PROCESS) {
++ ui32AllocFlags &= ~PVRSRV_HAP_SINGLE_PROCESS;
++ ui32AllocFlags |= PVRSRV_HAP_MULTI_PROCESS;
++ }
++#endif
++
++ switch (ui32AllocFlags & PVRSRV_HAP_MAPTYPE_MASK) {
++ case PVRSRV_HAP_KERNEL_ONLY:
++ {
++ psLinuxMemArea =
++ NewVMallocLinuxMemArea(ui32Size, ui32AllocFlags);
++ if (!psLinuxMemArea) {
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ break;
++ }
++ case PVRSRV_HAP_SINGLE_PROCESS:
++ {
++
++ psLinuxMemArea =
++ NewAllocPagesLinuxMemArea(ui32Size, ui32AllocFlags);
++ if (!psLinuxMemArea) {
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ PVRMMapRegisterArea(psLinuxMemArea);
++ break;
++ }
++
++ case PVRSRV_HAP_MULTI_PROCESS:
++ {
++
++#if defined(VIVT_CACHE) || defined(__sh__)
++
++ ui32AllocFlags &= ~PVRSRV_HAP_CACHED;
++#endif
++ psLinuxMemArea =
++ NewVMallocLinuxMemArea(ui32Size, ui32AllocFlags);
++ if (!psLinuxMemArea) {
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ PVRMMapRegisterArea(psLinuxMemArea);
++ break;
++ }
++ default:
++ PVR_DPF((PVR_DBG_ERROR, "OSAllocPages: invalid flags 0x%x\n",
++ ui32AllocFlags));
++ *ppvCpuVAddr = NULL;
++ *phOSMemHandle = (void *)0;
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++#if defined(SUPPORT_CACHEFLUSH_ON_ALLOC)
++
++ if (ui32AllocFlags & (PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_UNCACHED)) {
++ OSFlushCPUCacheKM();
++ }
++#endif
++
++ *ppvCpuVAddr = LinuxMemAreaToCpuVAddr(psLinuxMemArea);
++ *phOSMemHandle = psLinuxMemArea;
++
++ LinuxMemAreaRegister(psLinuxMemArea);
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR
++OSFreePages(u32 ui32AllocFlags, u32 ui32Bytes, void *pvCpuVAddr,
++ void *hOSMemHandle)
++{
++ LinuxMemArea *psLinuxMemArea;
++
++ psLinuxMemArea = (LinuxMemArea *) hOSMemHandle;
++
++ switch (ui32AllocFlags & PVRSRV_HAP_MAPTYPE_MASK) {
++ case PVRSRV_HAP_KERNEL_ONLY:
++ break;
++ case PVRSRV_HAP_SINGLE_PROCESS:
++ case PVRSRV_HAP_MULTI_PROCESS:
++ if (PVRMMapRemoveRegisteredArea(psLinuxMemArea) != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSFreePages(ui32AllocFlags=0x%08X, ui32Bytes=%ld, "
++ "pvCpuVAddr=%p, hOSMemHandle=%p) FAILED!",
++ ui32AllocFlags, ui32Bytes, pvCpuVAddr,
++ hOSMemHandle));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ break;
++ default:
++ PVR_DPF((PVR_DBG_ERROR, "%s: invalid flags 0x%x\n",
++ __FUNCTION__, ui32AllocFlags));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ LinuxMemAreaDeepFree(psLinuxMemArea);
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR
++OSGetSubMemHandle(void *hOSMemHandle,
++ u32 ui32ByteOffset,
++ u32 ui32Bytes, u32 ui32Flags, void **phOSMemHandleRet)
++{
++ LinuxMemArea *psParentLinuxMemArea, *psLinuxMemArea;
++ PVRSRV_ERROR eError;
++
++ psParentLinuxMemArea = (LinuxMemArea *) hOSMemHandle;
++
++ psLinuxMemArea =
++ NewSubLinuxMemArea(psParentLinuxMemArea, ui32ByteOffset, ui32Bytes);
++ if (!psLinuxMemArea) {
++ *phOSMemHandleRet = NULL;
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ *phOSMemHandleRet = psLinuxMemArea;
++
++ if (ui32Flags & PVRSRV_HAP_KERNEL_ONLY) {
++ return PVRSRV_OK;
++ }
++
++ eError = PVRMMapRegisterArea(psLinuxMemArea);
++ if (eError != PVRSRV_OK) {
++ goto failed_register_area;
++ }
++
++ return PVRSRV_OK;
++
++failed_register_area:
++ *phOSMemHandleRet = NULL;
++ LinuxMemAreaDeepFree(psLinuxMemArea);
++ return eError;
++}
++
++PVRSRV_ERROR OSReleaseSubMemHandle(void *hOSMemHandle, u32 ui32Flags)
++{
++ LinuxMemArea *psLinuxMemArea;
++ PVRSRV_ERROR eError;
++
++ psLinuxMemArea = (LinuxMemArea *) hOSMemHandle;
++ PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_SUB_ALLOC);
++
++ if ((ui32Flags & PVRSRV_HAP_KERNEL_ONLY) == 0) {
++ eError = PVRMMapRemoveRegisteredArea(psLinuxMemArea);
++ if (eError != PVRSRV_OK) {
++ return eError;
++ }
++ }
++ LinuxMemAreaDeepFree(psLinuxMemArea);
++
++ return PVRSRV_OK;
++}
++
++IMG_CPU_PHYADDR OSMemHandleToCpuPAddr(void *hOSMemHandle, u32 ui32ByteOffset)
++{
++ PVR_ASSERT(hOSMemHandle);
++
++ return LinuxMemAreaToCpuPAddr(hOSMemHandle, ui32ByteOffset);
++}
++
++void OSBreakResourceLock(PVRSRV_RESOURCE * psResource, u32 ui32ID)
++{
++ volatile u32 *pui32Access = (volatile u32 *)&psResource->ui32Lock;
++
++ if (*pui32Access) {
++ if (psResource->ui32ID == ui32ID) {
++ psResource->ui32ID = 0;
++ *pui32Access = 0;
++ } else {
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "OSBreakResourceLock: Resource is not locked for this process."));
++ }
++ } else {
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "OSBreakResourceLock: Resource is not locked"));
++ }
++}
++
++PVRSRV_ERROR OSCreateResource(PVRSRV_RESOURCE * psResource)
++{
++ psResource->ui32ID = 0;
++ psResource->ui32Lock = 0;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSDestroyResource(PVRSRV_RESOURCE * psResource)
++{
++ OSBreakResourceLock(psResource, psResource->ui32ID);
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSInitEnvData(void **ppvEnvSpecificData)
++{
++ ENV_DATA *psEnvData;
++
++ if (OSAllocMem
++ (PVRSRV_OS_PAGEABLE_HEAP, sizeof(ENV_DATA), (void **)&psEnvData,
++ NULL, "Environment Data") != PVRSRV_OK) {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if (OSAllocMem
++ (PVRSRV_OS_PAGEABLE_HEAP,
++ PVRSRV_MAX_BRIDGE_IN_SIZE + PVRSRV_MAX_BRIDGE_OUT_SIZE,
++ &psEnvData->pvBridgeData, NULL, "Bridge Data") != PVRSRV_OK) {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(ENV_DATA), psEnvData,
++ NULL);
++
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ psEnvData->bMISRInstalled = 0;
++ psEnvData->bLISRInstalled = 0;
++
++ *ppvEnvSpecificData = psEnvData;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSDeInitEnvData(void *pvEnvSpecificData)
++{
++ ENV_DATA *psEnvData = (ENV_DATA *) pvEnvSpecificData;
++
++ PVR_ASSERT(!psEnvData->bMISRInstalled);
++ PVR_ASSERT(!psEnvData->bLISRInstalled);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ PVRSRV_MAX_BRIDGE_IN_SIZE + PVRSRV_MAX_BRIDGE_OUT_SIZE,
++ psEnvData->pvBridgeData, NULL);
++ psEnvData->pvBridgeData = NULL;
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(ENV_DATA), pvEnvSpecificData,
++ NULL);
++
++ return PVRSRV_OK;
++}
++
++#ifdef INTEL_D3_CHANGES
++u32 OSPCIReadDword(u32 ui32Bus, u32 ui32Dev, u32 ui32Func, u32 ui32Reg)
++{
++ struct pci_dev *dev;
++ u32 ui32Value;
++
++ dev = pci_get_bus_and_slot(ui32Bus, PCI_DEVFN(ui32Dev, ui32Func));
++
++ if (dev) {
++ pci_read_config_dword(dev, (int)ui32Reg, (u32 *) & ui32Value);
++ return (ui32Value);
++ } else {
++ return (0);
++ }
++}
++
++void OSPCIWriteDword(u32 ui32Bus, u32 ui32Dev, u32 ui32Func, u32 ui32Reg,
++ u32 ui32Value)
++{
++ struct pci_dev *dev;
++
++ dev = pci_get_bus_and_slot(ui32Bus, PCI_DEVFN(ui32Dev, ui32Func));
++
++ if (dev) {
++ pci_write_config_dword(dev, (int)ui32Reg, (u32) ui32Value);
++ }
++}
++#endif
++
++void OSReleaseThreadQuanta(void)
++{
++ schedule();
++}
++
++u32 OSClockus(void)
++{
++ u32 time, j = jiffies;
++
++ time = j * (1000000 / HZ);
++
++ return time;
++}
++
++void OSWaitus(u32 ui32Timeus)
++{
++ udelay(ui32Timeus);
++}
++
++u32 OSGetCurrentProcessIDKM(void)
++{
++ if (in_interrupt()) {
++ return KERNEL_ID;
++ }
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0))
++ return (u32) current->pgrp;
++#else
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
++ return (u32) task_tgid_nr(current);
++#else
++ return (u32) current->tgid;
++#endif
++#endif
++}
++
++u32 OSGetPageSize(void)
++{
++#if defined(__sh__)
++ u32 ui32ReturnValue = PAGE_SIZE;
++
++ return (ui32ReturnValue);
++#else
++ return PAGE_SIZE;
++#endif
++}
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0))
++static irqreturn_t DeviceISRWrapper(int irq, void *dev_id
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
++ , struct pt_regs *regs
++#endif
++ )
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ int bStatus = 0;
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE *) dev_id;
++ if (!psDeviceNode) {
++ PVR_DPF((PVR_DBG_ERROR, "DeviceISRWrapper: invalid params\n"));
++ goto out;
++ }
++
++ bStatus = PVRSRVDeviceLISR(psDeviceNode);
++
++ if (bStatus) {
++ OSScheduleMISR((void *)psDeviceNode->psSysData);
++ }
++
++out:
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0))
++ return bStatus ? IRQ_HANDLED : IRQ_NONE;
++#endif
++}
++
++static irqreturn_t SystemISRWrapper(int irq, void *dev_id
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
++ , struct pt_regs *regs
++#endif
++ )
++{
++ SYS_DATA *psSysData;
++ int bStatus = 0;
++
++ psSysData = (SYS_DATA *) dev_id;
++ if (!psSysData) {
++ PVR_DPF((PVR_DBG_ERROR, "SystemISRWrapper: invalid params\n"));
++ goto out;
++ }
++
++ bStatus = PVRSRVSystemLISR(psSysData);
++
++ if (bStatus) {
++ OSScheduleMISR((void *)psSysData);
++ }
++
++out:
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0))
++ return bStatus ? IRQ_HANDLED : IRQ_NONE;
++#endif
++}
++
++PVRSRV_ERROR OSInstallDeviceLISR(void *pvSysData,
++ u32 ui32Irq,
++ char *pszISRName, void *pvDeviceNode)
++{
++ SYS_DATA *psSysData = (SYS_DATA *) pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA *) psSysData->pvEnvSpecificData;
++
++ if (psEnvData->bLISRInstalled) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSInstallDeviceLISR: An ISR has already been installed: IRQ %d cookie %x",
++ psEnvData->ui32IRQ, psEnvData->pvISRCookie));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ PVR_TRACE(("Installing device LISR %s on IRQ %d with cookie %x",
++ pszISRName, ui32Irq, pvDeviceNode));
++
++ if (request_irq(ui32Irq, DeviceISRWrapper,
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22))
++ SA_SHIRQ
++#else
++ IRQF_SHARED
++#endif
++ , pszISRName, pvDeviceNode)) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSInstallDeviceLISR: Couldn't install device LISR on IRQ %d",
++ ui32Irq));
++
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ psEnvData->ui32IRQ = ui32Irq;
++ psEnvData->pvISRCookie = pvDeviceNode;
++ psEnvData->bLISRInstalled = 1;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSUninstallDeviceLISR(void *pvSysData)
++{
++ SYS_DATA *psSysData = (SYS_DATA *) pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA *) psSysData->pvEnvSpecificData;
++
++ if (!psEnvData->bLISRInstalled) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSUninstallDeviceLISR: No LISR has been installed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ PVR_TRACE(("Uninstalling device LISR on IRQ %d with cookie %x",
++ psEnvData->ui32IRQ, psEnvData->pvISRCookie));
++
++ free_irq(psEnvData->ui32IRQ, psEnvData->pvISRCookie);
++
++ psEnvData->bLISRInstalled = 0;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSInstallSystemLISR(void *pvSysData, u32 ui32Irq)
++{
++ SYS_DATA *psSysData = (SYS_DATA *) pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA *) psSysData->pvEnvSpecificData;
++
++ if (psEnvData->bLISRInstalled) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSInstallSystemLISR: An LISR has already been installed: IRQ %d cookie %x",
++ psEnvData->ui32IRQ, psEnvData->pvISRCookie));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ PVR_TRACE(("Installing system LISR on IRQ %d with cookie %x", ui32Irq,
++ pvSysData));
++
++ if (request_irq(ui32Irq, SystemISRWrapper,
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22))
++ SA_SHIRQ
++#else
++ IRQF_SHARED
++#endif
++ , "PowerVR", pvSysData)) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSInstallSystemLISR: Couldn't install system LISR on IRQ %d",
++ ui32Irq));
++
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ psEnvData->ui32IRQ = ui32Irq;
++ psEnvData->pvISRCookie = pvSysData;
++ psEnvData->bLISRInstalled = 1;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSUninstallSystemLISR(void *pvSysData)
++{
++ SYS_DATA *psSysData = (SYS_DATA *) pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA *) psSysData->pvEnvSpecificData;
++
++ if (!psEnvData->bLISRInstalled) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSUninstallSystemLISR: No LISR has been installed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ PVR_TRACE(("Uninstalling system LISR on IRQ %d with cookie %x",
++ psEnvData->ui32IRQ, psEnvData->pvISRCookie));
++
++ free_irq(psEnvData->ui32IRQ, psEnvData->pvISRCookie);
++
++ psEnvData->bLISRInstalled = 0;
++
++ return PVRSRV_OK;
++}
++
++#if defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE)
++static void MISRWrapper(
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
++ void *data
++#else
++ struct work_struct *data
++#endif
++ )
++{
++ ENV_DATA *psEnvData = container_of(data, ENV_DATA, sMISRWork);
++ SYS_DATA *psSysData = (SYS_DATA *) psEnvData->pvMISRData;
++
++ PVRSRVMISR(psSysData);
++}
++
++PVRSRV_ERROR OSInstallMISR(void *pvSysData)
++{
++ SYS_DATA *psSysData = (SYS_DATA *) pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA *) psSysData->pvEnvSpecificData;
++
++ if (psEnvData->bMISRInstalled) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSInstallMISR: An MISR has already been installed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ PVR_TRACE(("Installing MISR with cookie %p", pvSysData));
++
++ psEnvData->psWorkQueue = create_singlethread_workqueue("pvr_workqueue");
++
++ if (psEnvData->psWorkQueue == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSInstallMISR: create_singlethreaded_workqueue failed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ INIT_WORK(&psEnvData->sMISRWork, MISRWrapper
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
++ , (void *)&psEnvData->sMISRWork
++#endif
++ );
++
++ psEnvData->pvMISRData = pvSysData;
++ psEnvData->bMISRInstalled = 1;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSUninstallMISR(void *pvSysData)
++{
++ SYS_DATA *psSysData = (SYS_DATA *) pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA *) psSysData->pvEnvSpecificData;
++
++ if (!psEnvData->bMISRInstalled) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSUninstallMISR: No MISR has been installed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ PVR_TRACE(("Uninstalling MISR"));
++
++ destroy_workqueue(psEnvData->psWorkQueue);
++
++ psEnvData->bMISRInstalled = 0;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSScheduleMISR(void *pvSysData)
++{
++ SYS_DATA *psSysData = (SYS_DATA *) pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA *) psSysData->pvEnvSpecificData;
++
++ if (psEnvData->bMISRInstalled) {
++ queue_work(psEnvData->psWorkQueue, &psEnvData->sMISRWork);
++ }
++
++ return PVRSRV_OK;
++}
++#else
++#if defined(PVR_LINUX_MISR_USING_WORKQUEUE)
++static void MISRWrapper(
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
++ void *data
++#else
++ struct work_struct *data
++#endif
++ )
++{
++ ENV_DATA *psEnvData = container_of(data, ENV_DATA, sMISRWork);
++ SYS_DATA *psSysData = (SYS_DATA *) psEnvData->pvMISRData;
++
++ PVRSRVMISR(psSysData);
++}
++
++PVRSRV_ERROR OSInstallMISR(void *pvSysData)
++{
++ SYS_DATA *psSysData = (SYS_DATA *) pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA *) psSysData->pvEnvSpecificData;
++
++ if (psEnvData->bMISRInstalled) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSInstallMISR: An MISR has already been installed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ PVR_TRACE(("Installing MISR with cookie %x", pvSysData));
++
++ INIT_WORK(&psEnvData->sMISRWork, MISRWrapper
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
++ , (void *)&psEnvData->sMISRWork
++#endif
++ );
++
++ psEnvData->pvMISRData = pvSysData;
++ psEnvData->bMISRInstalled = 1;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSUninstallMISR(void *pvSysData)
++{
++ SYS_DATA *psSysData = (SYS_DATA *) pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA *) psSysData->pvEnvSpecificData;
++
++ if (!psEnvData->bMISRInstalled) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSUninstallMISR: No MISR has been installed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ PVR_TRACE(("Uninstalling MISR"));
++
++ flush_scheduled_work();
++
++ psEnvData->bMISRInstalled = 0;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSScheduleMISR(void *pvSysData)
++{
++ SYS_DATA *psSysData = (SYS_DATA *) pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA *) psSysData->pvEnvSpecificData;
++
++ if (psEnvData->bMISRInstalled) {
++ schedule_work(&psEnvData->sMISRWork);
++ }
++
++ return PVRSRV_OK;
++}
++
++#else
++
++static void MISRWrapper(unsigned long data)
++{
++ SYS_DATA *psSysData;
++
++ psSysData = (SYS_DATA *) data;
++
++ PVRSRVMISR(psSysData);
++}
++
++PVRSRV_ERROR OSInstallMISR(void *pvSysData)
++{
++ SYS_DATA *psSysData = (SYS_DATA *) pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA *) psSysData->pvEnvSpecificData;
++
++ if (psEnvData->bMISRInstalled) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSInstallMISR: An MISR has already been installed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ PVR_TRACE(("Installing MISR with cookie %x", pvSysData));
++
++ tasklet_init(&psEnvData->sMISRTasklet, MISRWrapper,
++ (unsigned long)pvSysData);
++
++ psEnvData->bMISRInstalled = 1;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSUninstallMISR(void *pvSysData)
++{
++ SYS_DATA *psSysData = (SYS_DATA *) pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA *) psSysData->pvEnvSpecificData;
++
++ if (!psEnvData->bMISRInstalled) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSUninstallMISR: No MISR has been installed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ PVR_TRACE(("Uninstalling MISR"));
++
++ tasklet_kill(&psEnvData->sMISRTasklet);
++
++ psEnvData->bMISRInstalled = 0;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSScheduleMISR(void *pvSysData)
++{
++ SYS_DATA *psSysData = (SYS_DATA *) pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA *) psSysData->pvEnvSpecificData;
++
++ if (psEnvData->bMISRInstalled) {
++ tasklet_schedule(&psEnvData->sMISRTasklet);
++ }
++
++ return PVRSRV_OK;
++}
++
++#endif
++#endif
++
++#endif
++
++void OSPanic(void)
++{
++ BUG();
++}
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22))
++#define OS_TAS(p) xchg((p), 1)
++#else
++#define OS_TAS(p) tas(p)
++#endif
++PVRSRV_ERROR OSLockResource(PVRSRV_RESOURCE * psResource, u32 ui32ID)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if (!OS_TAS(&psResource->ui32Lock))
++ psResource->ui32ID = ui32ID;
++ else
++ eError = PVRSRV_ERROR_GENERIC;
++
++ return eError;
++}
++
++PVRSRV_ERROR OSUnlockResource(PVRSRV_RESOURCE * psResource, u32 ui32ID)
++{
++ volatile u32 *pui32Access = (volatile u32 *)&psResource->ui32Lock;
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if (*pui32Access) {
++ if (psResource->ui32ID == ui32ID) {
++ psResource->ui32ID = 0;
++ *pui32Access = 0;
++ } else {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSUnlockResource: Resource %p is not locked with expected value.",
++ psResource));
++ PVR_DPF((PVR_DBG_MESSAGE, "Should be %x is actually %x",
++ ui32ID, psResource->ui32ID));
++ eError = PVRSRV_ERROR_GENERIC;
++ }
++ } else {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSUnlockResource: Resource %p is not locked",
++ psResource));
++ eError = PVRSRV_ERROR_GENERIC;
++ }
++
++ return eError;
++}
++
++int OSIsResourceLocked(PVRSRV_RESOURCE * psResource, u32 ui32ID)
++{
++ volatile u32 *pui32Access = (volatile u32 *)&psResource->ui32Lock;
++
++ return (*(volatile u32 *)pui32Access == 1)
++ && (psResource->ui32ID == ui32ID)
++ ? 1 : 0;
++}
++
++IMG_CPU_PHYADDR OSMapLinToCPUPhys(void *pvLinAddr)
++{
++ IMG_CPU_PHYADDR CpuPAddr;
++
++ CpuPAddr.uiAddr = (u32) VMallocToPhys(pvLinAddr);
++
++ return CpuPAddr;
++}
++
++void *OSMapPhysToLin(IMG_CPU_PHYADDR BasePAddr,
++ u32 ui32Bytes, u32 ui32MappingFlags, void **phOSMemHandle)
++{
++ if (phOSMemHandle) {
++ *phOSMemHandle = (void *)0;
++ }
++
++ if (ui32MappingFlags & PVRSRV_HAP_KERNEL_ONLY) {
++ void *pvIORemapCookie;
++ pvIORemapCookie =
++ IORemapWrapper(BasePAddr, ui32Bytes, ui32MappingFlags);
++ if (pvIORemapCookie == NULL) {
++ return NULL;
++ }
++ return pvIORemapCookie;
++ } else {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSMapPhysToLin should only be used with PVRSRV_HAP_KERNEL_ONLY "
++ " (Use OSReservePhys otherwise)"));
++ return NULL;
++ }
++}
++
++int
++OSUnMapPhysToLin(void *pvLinAddr, u32 ui32Bytes, u32 ui32MappingFlags,
++ void *hPageAlloc)
++{
++ PVR_TRACE(("%s: unmapping %d bytes from 0x%08x", __FUNCTION__,
++ ui32Bytes, pvLinAddr));
++
++ if (ui32MappingFlags & PVRSRV_HAP_KERNEL_ONLY) {
++ IOUnmapWrapper(pvLinAddr);
++ return 1;
++ } else {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSUnMapPhysToLin should only be used with PVRSRV_HAP_KERNEL_ONLY "
++ " (Use OSUnReservePhys otherwise)"));
++ return 0;
++ }
++}
++
++static PVRSRV_ERROR
++RegisterExternalMem(IMG_SYS_PHYADDR * pBasePAddr,
++ void *pvCPUVAddr,
++ u32 ui32Bytes,
++ int bPhysContig, u32 ui32MappingFlags, void **phOSMemHandle)
++{
++ LinuxMemArea *psLinuxMemArea;
++
++ switch (ui32MappingFlags & PVRSRV_HAP_MAPTYPE_MASK) {
++ case PVRSRV_HAP_KERNEL_ONLY:
++ {
++ psLinuxMemArea =
++ NewExternalKVLinuxMemArea(pBasePAddr, pvCPUVAddr,
++ ui32Bytes, bPhysContig,
++ ui32MappingFlags);
++
++ if (!psLinuxMemArea) {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ break;
++ }
++ case PVRSRV_HAP_SINGLE_PROCESS:
++ {
++ psLinuxMemArea =
++ NewExternalKVLinuxMemArea(pBasePAddr, pvCPUVAddr,
++ ui32Bytes, bPhysContig,
++ ui32MappingFlags);
++
++ if (!psLinuxMemArea) {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ PVRMMapRegisterArea(psLinuxMemArea);
++ break;
++ }
++ case PVRSRV_HAP_MULTI_PROCESS:
++ {
++
++#if defined(VIVT_CACHE) || defined(__sh__)
++
++ ui32MappingFlags &= ~PVRSRV_HAP_CACHED;
++#endif
++ psLinuxMemArea =
++ NewExternalKVLinuxMemArea(pBasePAddr, pvCPUVAddr,
++ ui32Bytes, bPhysContig,
++ ui32MappingFlags);
++
++ if (!psLinuxMemArea) {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ PVRMMapRegisterArea(psLinuxMemArea);
++ break;
++ }
++ default:
++ PVR_DPF((PVR_DBG_ERROR, "OSRegisterMem : invalid flags 0x%x\n",
++ ui32MappingFlags));
++ *phOSMemHandle = (void *)0;
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ *phOSMemHandle = (void *)psLinuxMemArea;
++
++ LinuxMemAreaRegister(psLinuxMemArea);
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR
++OSRegisterMem(IMG_CPU_PHYADDR BasePAddr,
++ void *pvCPUVAddr,
++ u32 ui32Bytes, u32 ui32MappingFlags, void **phOSMemHandle)
++{
++ IMG_SYS_PHYADDR SysPAddr = SysCpuPAddrToSysPAddr(BasePAddr);
++
++ return RegisterExternalMem(&SysPAddr, pvCPUVAddr, ui32Bytes, 1,
++ ui32MappingFlags, phOSMemHandle);
++}
++
++PVRSRV_ERROR OSRegisterDiscontigMem(IMG_SYS_PHYADDR * pBasePAddr,
++ void *pvCPUVAddr, u32 ui32Bytes,
++ u32 ui32MappingFlags, void **phOSMemHandle)
++{
++ return RegisterExternalMem(pBasePAddr, pvCPUVAddr, ui32Bytes, 0,
++ ui32MappingFlags, phOSMemHandle);
++}
++
++PVRSRV_ERROR
++OSUnRegisterMem(void *pvCpuVAddr,
++ u32 ui32Bytes, u32 ui32MappingFlags, void *hOSMemHandle)
++{
++ LinuxMemArea *psLinuxMemArea = (LinuxMemArea *) hOSMemHandle;
++#ifdef INTEL_D3_P_CHANGES
++ if (!hOSMemHandle) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSUnRegisterMem : memory handle is null\n"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++#endif
++
++ switch (ui32MappingFlags & PVRSRV_HAP_MAPTYPE_MASK) {
++ case PVRSRV_HAP_KERNEL_ONLY:
++ break;
++ case PVRSRV_HAP_SINGLE_PROCESS:
++ case PVRSRV_HAP_MULTI_PROCESS:
++ {
++ if (PVRMMapRemoveRegisteredArea(psLinuxMemArea) !=
++ PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "%s(%p, %d, 0x%08X, %p) FAILED!",
++ __FUNCTION__, pvCpuVAddr, ui32Bytes,
++ ui32MappingFlags, hOSMemHandle));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ break;
++ }
++ default:
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSUnRegisterMem : invalid flags 0x%x",
++ ui32MappingFlags));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++ }
++
++ LinuxMemAreaDeepFree(psLinuxMemArea);
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSUnRegisterDiscontigMem(void *pvCpuVAddr, u32 ui32Bytes,
++ u32 ui32Flags, void *hOSMemHandle)
++{
++ return OSUnRegisterMem(pvCpuVAddr, ui32Bytes, ui32Flags, hOSMemHandle);
++}
++
++PVRSRV_ERROR
++OSReservePhys(IMG_CPU_PHYADDR BasePAddr,
++ u32 ui32Bytes,
++ u32 ui32MappingFlags, void **ppvCpuVAddr, void **phOSMemHandle)
++{
++ LinuxMemArea *psLinuxMemArea;
++
++#if 0
++
++ if (ui32MappingFlags & PVRSRV_HAP_SINGLE_PROCESS) {
++ ui32MappingFlags &= ~PVRSRV_HAP_SINGLE_PROCESS;
++ ui32MappingFlags |= PVRSRV_HAP_MULTI_PROCESS;
++ }
++#endif
++
++ switch (ui32MappingFlags & PVRSRV_HAP_MAPTYPE_MASK) {
++ case PVRSRV_HAP_KERNEL_ONLY:
++ {
++
++ psLinuxMemArea =
++ NewIORemapLinuxMemArea(BasePAddr, ui32Bytes,
++ ui32MappingFlags);
++ if (!psLinuxMemArea) {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ break;
++ }
++ case PVRSRV_HAP_SINGLE_PROCESS:
++ {
++
++ psLinuxMemArea =
++ NewIOLinuxMemArea(BasePAddr, ui32Bytes,
++ ui32MappingFlags);
++ if (!psLinuxMemArea) {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ PVRMMapRegisterArea(psLinuxMemArea);
++ break;
++ }
++ case PVRSRV_HAP_MULTI_PROCESS:
++ {
++
++#if defined(VIVT_CACHE) || defined(__sh__)
++
++ ui32MappingFlags &= ~PVRSRV_HAP_CACHED;
++#endif
++ psLinuxMemArea =
++ NewIORemapLinuxMemArea(BasePAddr, ui32Bytes,
++ ui32MappingFlags);
++ if (!psLinuxMemArea) {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ PVRMMapRegisterArea(psLinuxMemArea);
++ break;
++ }
++ default:
++ PVR_DPF((PVR_DBG_ERROR, "OSMapPhysToLin : invalid flags 0x%x\n",
++ ui32MappingFlags));
++ *ppvCpuVAddr = NULL;
++ *phOSMemHandle = (void *)0;
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ *phOSMemHandle = (void *)psLinuxMemArea;
++ *ppvCpuVAddr = LinuxMemAreaToCpuVAddr(psLinuxMemArea);
++
++ LinuxMemAreaRegister(psLinuxMemArea);
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR
++OSUnReservePhys(void *pvCpuVAddr,
++ u32 ui32Bytes, u32 ui32MappingFlags, void *hOSMemHandle)
++{
++ LinuxMemArea *psLinuxMemArea;
++
++ psLinuxMemArea = (LinuxMemArea *) hOSMemHandle;
++
++ switch (ui32MappingFlags & PVRSRV_HAP_MAPTYPE_MASK) {
++ case PVRSRV_HAP_KERNEL_ONLY:
++ break;
++ case PVRSRV_HAP_SINGLE_PROCESS:
++ case PVRSRV_HAP_MULTI_PROCESS:
++ {
++ if (PVRMMapRemoveRegisteredArea(psLinuxMemArea) !=
++ PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "%s(%p, %d, 0x%08X, %p) FAILED!",
++ __FUNCTION__, pvCpuVAddr, ui32Bytes,
++ ui32MappingFlags, hOSMemHandle));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ break;
++ }
++ default:
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSUnMapPhysToLin : invalid flags 0x%x",
++ ui32MappingFlags));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++ }
++
++ LinuxMemAreaDeepFree(psLinuxMemArea);
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSBaseAllocContigMemory(u32 ui32Size, IMG_CPU_VIRTADDR * pvLinAddr,
++ IMG_CPU_PHYADDR * psPhysAddr)
++{
++#if !defined(NO_HARDWARE)
++ PVR_DPF((PVR_DBG_ERROR, "%s: Not available", __FUNCTION__));
++
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++#else
++ void *pvKernLinAddr;
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ pvKernLinAddr = _KMallocWrapper(ui32Size, __FILE__, __LINE__);
++#else
++ pvKernLinAddr = KMallocWrapper(ui32Size);
++#endif
++ if (!pvKernLinAddr) {
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ *pvLinAddr = pvKernLinAddr;
++
++ psPhysAddr->uiAddr = virt_to_phys(pvKernLinAddr);
++
++ return PVRSRV_OK;
++#endif
++}
++
++PVRSRV_ERROR OSBaseFreeContigMemory(u32 ui32Size, IMG_CPU_VIRTADDR pvLinAddr,
++ IMG_CPU_PHYADDR psPhysAddr)
++{
++#if !defined(NO_HARDWARE)
++ PVR_DPF((PVR_DBG_WARNING, "%s: Not available", __FUNCTION__));
++#else
++ KFreeWrapper(pvLinAddr);
++#endif
++ return PVRSRV_OK;
++}
++
++u32 OSReadHWReg(void *pvLinRegBaseAddr, u32 ui32Offset)
++{
++#if !defined(NO_HARDWARE)
++ return (u32) readl((unsigned char *) pvLinRegBaseAddr + ui32Offset);
++#else
++ return *(u32 *) ((unsigned char *) pvLinRegBaseAddr + ui32Offset);
++#endif
++}
++
++void OSWriteHWReg(void *pvLinRegBaseAddr, u32 ui32Offset, u32 ui32Value)
++{
++#if !defined(NO_HARDWARE)
++ writel(ui32Value, (unsigned char *) pvLinRegBaseAddr + ui32Offset);
++#else
++ *(u32 *) ((unsigned char *) pvLinRegBaseAddr + ui32Offset) = ui32Value;
++#endif
++}
++
++#if defined(CONFIG_PCI) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14))
++
++PVRSRV_PCI_DEV_HANDLE OSPCISetDev(void *pvPCICookie, HOST_PCI_INIT_FLAGS eFlags)
++{
++ int err;
++ u32 i;
++ PVR_PCI_DEV *psPVRPCI;
++
++ PVR_TRACE(("OSPCISetDev"));
++
++ if (OSAllocMem
++ (PVRSRV_OS_PAGEABLE_HEAP, sizeof(*psPVRPCI), (void **)&psPVRPCI,
++ NULL, "PCI Device") != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSPCISetDev: Couldn't allocate PVR PCI structure"));
++ return NULL;
++ }
++
++ psPVRPCI->psPCIDev = (struct pci_dev *)pvPCICookie;
++ psPVRPCI->ePCIFlags = eFlags;
++
++ err = pci_enable_device(psPVRPCI->psPCIDev);
++ if (err != 0) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSPCISetDev: Couldn't enable device (%d)", err));
++ return NULL;
++ }
++
++ if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER) {
++ pci_set_master(psPVRPCI->psPCIDev);
++ }
++
++ if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_MSI) {
++#if defined(CONFIG_PCI_MSI)
++ err = pci_enable_msi(psPVRPCI->psPCIDev);
++ if (err != 0) {
++ PVR_DPF((PVR_DBG_WARNING,
++ "OSPCISetDev: Couldn't enable MSI (%d)", err));
++ psPVRPCI->ePCIFlags &= ~HOST_PCI_INIT_FLAG_MSI;
++ }
++#else
++ PVR_DPF((PVR_DBG_WARNING,
++ "OSPCISetDev: MSI support not enabled in the kernel"));
++#endif
++ }
++
++ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
++ psPVRPCI->abPCIResourceInUse[i] = 0;
++ }
++
++ return (PVRSRV_PCI_DEV_HANDLE) psPVRPCI;
++}
++
++PVRSRV_PCI_DEV_HANDLE OSPCIAcquireDev(u16 ui16VendorID, u16 ui16DeviceID,
++ HOST_PCI_INIT_FLAGS eFlags)
++{
++ struct pci_dev *psPCIDev;
++
++ psPCIDev = pci_get_device(ui16VendorID, ui16DeviceID, NULL);
++ if (psPCIDev == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSPCIAcquireDev: Couldn't acquire device"));
++ return NULL;
++ }
++
++ return OSPCISetDev((void *)psPCIDev, eFlags);
++}
++
++PVRSRV_ERROR OSPCIIRQ(PVRSRV_PCI_DEV_HANDLE hPVRPCI, u32 * pui32IRQ)
++{
++ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *) hPVRPCI;
++
++ *pui32IRQ = psPVRPCI->psPCIDev->irq;
++
++ return PVRSRV_OK;
++}
++
++enum HOST_PCI_ADDR_RANGE_FUNC {
++ HOST_PCI_ADDR_RANGE_FUNC_LEN,
++ HOST_PCI_ADDR_RANGE_FUNC_START,
++ HOST_PCI_ADDR_RANGE_FUNC_END,
++ HOST_PCI_ADDR_RANGE_FUNC_REQUEST,
++ HOST_PCI_ADDR_RANGE_FUNC_RELEASE
++};
++
++static u32 OSPCIAddrRangeFunc(enum HOST_PCI_ADDR_RANGE_FUNC eFunc,
++ PVRSRV_PCI_DEV_HANDLE hPVRPCI, u32 ui32Index)
++{
++ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *) hPVRPCI;
++
++ if (ui32Index >= DEVICE_COUNT_RESOURCE) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSPCIAddrRangeFunc: Index out of range"));
++ return 0;
++
++ }
++
++ switch (eFunc) {
++ case HOST_PCI_ADDR_RANGE_FUNC_LEN:
++ return pci_resource_len(psPVRPCI->psPCIDev, ui32Index);
++ case HOST_PCI_ADDR_RANGE_FUNC_START:
++ return pci_resource_start(psPVRPCI->psPCIDev, ui32Index);
++ case HOST_PCI_ADDR_RANGE_FUNC_END:
++ return pci_resource_end(psPVRPCI->psPCIDev, ui32Index);
++ case HOST_PCI_ADDR_RANGE_FUNC_REQUEST:
++ {
++ int err;
++
++ err =
++ pci_request_region(psPVRPCI->psPCIDev,
++ (int)ui32Index, "PowerVR");
++ if (err != 0) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSPCIAddrRangeFunc: pci_request_region_failed (%d)",
++ err));
++ return 0;
++ }
++ psPVRPCI->abPCIResourceInUse[ui32Index] = 1;
++ return 1;
++ }
++ case HOST_PCI_ADDR_RANGE_FUNC_RELEASE:
++ if (psPVRPCI->abPCIResourceInUse[ui32Index]) {
++ pci_release_region(psPVRPCI->psPCIDev, (int)ui32Index);
++ psPVRPCI->abPCIResourceInUse[ui32Index] = 0;
++ }
++ return 1;
++ default:
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSPCIAddrRangeFunc: Unknown function"));
++ break;
++ }
++
++ return 0;
++}
++
++u32 OSPCIAddrRangeLen(PVRSRV_PCI_DEV_HANDLE hPVRPCI, u32 ui32Index)
++{
++ return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_LEN, hPVRPCI,
++ ui32Index);
++}
++
++u32 OSPCIAddrRangeStart(PVRSRV_PCI_DEV_HANDLE hPVRPCI, u32 ui32Index)
++{
++ return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_START, hPVRPCI,
++ ui32Index);
++}
++
++u32 OSPCIAddrRangeEnd(PVRSRV_PCI_DEV_HANDLE hPVRPCI, u32 ui32Index)
++{
++ return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_END, hPVRPCI,
++ ui32Index);
++}
++
++PVRSRV_ERROR OSPCIRequestAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, u32 ui32Index)
++{
++ return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_REQUEST, hPVRPCI,
++ ui32Index) ==
++ 0 ? PVRSRV_ERROR_GENERIC : PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSPCIReleaseAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, u32 ui32Index)
++{
++ return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_RELEASE, hPVRPCI,
++ ui32Index) ==
++ 0 ? PVRSRV_ERROR_GENERIC : PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSPCIReleaseDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI)
++{
++ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *) hPVRPCI;
++ int i;
++
++ PVR_TRACE(("OSPCIReleaseDev"));
++
++ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
++ if (psPVRPCI->abPCIResourceInUse[i]) {
++ PVR_TRACE(("OSPCIReleaseDev: Releasing Address range %d", i));
++ pci_release_region(psPVRPCI->psPCIDev, i);
++ psPVRPCI->abPCIResourceInUse[i] = 0;
++ }
++ }
++
++#if defined(CONFIG_PCI_MSI)
++ if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_MSI) {
++ pci_disable_msi(psPVRPCI->psPCIDev);
++ }
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29))
++ if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER) {
++ pci_clear_master(psPVRPCI->psPCIDev);
++ }
++#endif
++ pci_disable_device(psPVRPCI->psPCIDev);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(*psPVRPCI), (void *)psPVRPCI,
++ NULL);
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSPCISuspendDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI)
++{
++ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *) hPVRPCI;
++ int i;
++ int err;
++
++ PVR_TRACE(("OSPCISuspendDev"));
++
++ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
++ if (psPVRPCI->abPCIResourceInUse[i]) {
++ pci_release_region(psPVRPCI->psPCIDev, i);
++ }
++ }
++
++ err = pci_save_state(psPVRPCI->psPCIDev);
++ if (err != 0) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSPCISuspendDev: pci_save_state_failed (%d)", err));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ pci_disable_device(psPVRPCI->psPCIDev);
++
++ err =
++ pci_set_power_state(psPVRPCI->psPCIDev,
++ pci_choose_state(psPVRPCI->psPCIDev,
++ PMSG_SUSPEND));
++ switch (err) {
++ case 0:
++ break;
++ case -EIO:
++ PVR_DPF((PVR_DBG_WARNING,
++ "OSPCISuspendDev: device doesn't support PCI PM"));
++ break;
++ case -EINVAL:
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSPCISuspendDev: can't enter requested power state"));
++ break;
++ default:
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSPCISuspendDev: pci_set_power_state failed (%d)",
++ err));
++ break;
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSPCIResumeDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI)
++{
++ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *) hPVRPCI;
++ int err;
++ int i;
++
++ PVR_TRACE(("OSPCIResumeDev"));
++
++ err =
++ pci_set_power_state(psPVRPCI->psPCIDev,
++ pci_choose_state(psPVRPCI->psPCIDev, PMSG_ON));
++ switch (err) {
++ case 0:
++ break;
++ case -EIO:
++ PVR_DPF((PVR_DBG_WARNING,
++ "OSPCIResumeDev: device doesn't support PCI PM"));
++ break;
++ case -EINVAL:
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSPCIResumeDev: can't enter requested power state"));
++ return PVRSRV_ERROR_GENERIC;
++ default:
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSPCIResumeDev: pci_set_power_state failed (%d)",
++ err));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ err = pci_restore_state(psPVRPCI->psPCIDev);
++ if (err != 0) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSPCIResumeDev: pci_restore_state failed (%d)", err));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ err = pci_enable_device(psPVRPCI->psPCIDev);
++ if (err != 0) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSPCIResumeDev: Couldn't enable device (%d)", err));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER)
++ pci_set_master(psPVRPCI->psPCIDev);
++
++ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
++ if (psPVRPCI->abPCIResourceInUse[i]) {
++ err =
++ pci_request_region(psPVRPCI->psPCIDev, i,
++ "PowerVR");
++ if (err != 0) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSPCIResumeDev: pci_request_region_failed (region %d, error %d)",
++ i, err));
++ }
++ }
++
++ }
++
++ return PVRSRV_OK;
++}
++
++#endif
++
++#define OS_MAX_TIMERS 8
++
++typedef struct TIMER_CALLBACK_DATA_TAG {
++ int bInUse;
++ PFN_TIMER_FUNC pfnTimerFunc;
++ void *pvData;
++ struct timer_list sTimer;
++ u32 ui32Delay;
++ int bActive;
++#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++ struct work_struct sWork;
++#endif
++} TIMER_CALLBACK_DATA;
++
++#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++static struct workqueue_struct *psTimerWorkQueue;
++#endif
++
++static TIMER_CALLBACK_DATA sTimers[OS_MAX_TIMERS];
++
++#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++DEFINE_MUTEX(sTimerStructLock);
++#else
++static spinlock_t sTimerStructLock = SPIN_LOCK_UNLOCKED;
++
++#endif
++
++static void OSTimerCallbackBody(TIMER_CALLBACK_DATA * psTimerCBData)
++{
++ if (!psTimerCBData->bActive)
++ return;
++
++ psTimerCBData->pfnTimerFunc(psTimerCBData->pvData);
++
++ mod_timer(&psTimerCBData->sTimer, psTimerCBData->ui32Delay + jiffies);
++}
++
++#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++static void OSTimerWorkQueueCallBack(struct work_struct *psWork)
++{
++ TIMER_CALLBACK_DATA *psTimerCBData =
++ container_of(psWork, TIMER_CALLBACK_DATA, sWork);
++
++ OSTimerCallbackBody(psTimerCBData);
++}
++#endif
++
++static void OSTimerCallbackWrapper(u32 ui32Data)
++{
++ TIMER_CALLBACK_DATA *psTimerCBData = (TIMER_CALLBACK_DATA *) ui32Data;
++
++#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++ int res;
++
++ res = queue_work(psTimerWorkQueue, &psTimerCBData->sWork);
++ if (res == 0) {
++ PVR_DPF((PVR_DBG_WARNING,
++ "OSTimerCallbackWrapper: work already queued"));
++ }
++#else
++ OSTimerCallbackBody(psTimerCBData);
++#endif
++}
++
++void *OSAddTimer(PFN_TIMER_FUNC pfnTimerFunc, void *pvData, u32 ui32MsTimeout)
++{
++ TIMER_CALLBACK_DATA *psTimerCBData;
++ u32 ui32i;
++#if !defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++ unsigned long ulLockFlags;
++#endif
++
++ if (!pfnTimerFunc) {
++ PVR_DPF((PVR_DBG_ERROR, "OSAddTimer: passed invalid callback"));
++ return NULL;
++ }
++
++#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++ mutex_lock(&sTimerStructLock);
++#else
++ spin_lock_irqsave(&sTimerStructLock, ulLockFlags);
++#endif
++ for (ui32i = 0; ui32i < OS_MAX_TIMERS; ui32i++) {
++ psTimerCBData = &sTimers[ui32i];
++ if (!psTimerCBData->bInUse) {
++ psTimerCBData->bInUse = 1;
++ break;
++ }
++ }
++#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++ mutex_unlock(&sTimerStructLock);
++#else
++ spin_unlock_irqrestore(&sTimerStructLock, ulLockFlags);
++#endif
++ if (ui32i >= OS_MAX_TIMERS) {
++ PVR_DPF((PVR_DBG_ERROR, "OSAddTimer: all timers are in use"));
++ return NULL;
++ }
++
++ psTimerCBData->pfnTimerFunc = pfnTimerFunc;
++ psTimerCBData->pvData = pvData;
++ psTimerCBData->bActive = 0;
++
++ psTimerCBData->ui32Delay = ((HZ * ui32MsTimeout) < 1000)
++ ? 1 : ((HZ * ui32MsTimeout) / 1000);
++
++ init_timer(&psTimerCBData->sTimer);
++
++ psTimerCBData->sTimer.function = (void *)OSTimerCallbackWrapper;
++ psTimerCBData->sTimer.data = (u32) psTimerCBData;
++ psTimerCBData->sTimer.expires = psTimerCBData->ui32Delay + jiffies;
++
++ return (void *)(ui32i + 1);
++}
++
++static inline TIMER_CALLBACK_DATA *GetTimerStructure(void *hTimer)
++{
++ u32 ui32i = ((u32) hTimer) - 1;
++
++ PVR_ASSERT(ui32i < OS_MAX_TIMERS);
++
++ return &sTimers[ui32i];
++}
++
++PVRSRV_ERROR OSRemoveTimer(void *hTimer)
++{
++ TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer);
++
++ PVR_ASSERT(psTimerCBData->bInUse);
++ PVR_ASSERT(!psTimerCBData->bActive);
++
++ psTimerCBData->bInUse = 0;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSEnableTimer(void *hTimer)
++{
++ TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer);
++
++ PVR_ASSERT(psTimerCBData->bInUse);
++ PVR_ASSERT(!psTimerCBData->bActive);
++
++ psTimerCBData->bActive = 1;
++
++ psTimerCBData->sTimer.expires = psTimerCBData->ui32Delay + jiffies;
++
++ add_timer(&psTimerCBData->sTimer);
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSDisableTimer(void *hTimer)
++{
++ TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer);
++
++ PVR_ASSERT(psTimerCBData->bInUse);
++ PVR_ASSERT(psTimerCBData->bActive);
++
++ psTimerCBData->bActive = 0;
++ smp_mb();
++
++#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++ flush_workqueue(psTimerWorkQueue);
++#endif
++
++ del_timer_sync(&psTimerCBData->sTimer);
++
++#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++
++ flush_workqueue(psTimerWorkQueue);
++#endif
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSEventObjectCreate(const char *pszName,
++ PVRSRV_EVENTOBJECT * psEventObject)
++{
++
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if (psEventObject) {
++ if (pszName) {
++
++ strncpy(psEventObject->szName, pszName,
++ EVENTOBJNAME_MAXLENGTH);
++ } else {
++
++ static u16 ui16NameIndex = 0;
++ snprintf(psEventObject->szName, EVENTOBJNAME_MAXLENGTH,
++ "PVRSRV_EVENTOBJECT_%d", ui16NameIndex++);
++ }
++
++ if (LinuxEventObjectListCreate(&psEventObject->hOSEventKM) !=
++ PVRSRV_OK) {
++ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ } else {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSEventObjectCreate: psEventObject is not a valid pointer"));
++ eError = PVRSRV_ERROR_GENERIC;
++ }
++
++ return eError;
++
++}
++
++PVRSRV_ERROR OSEventObjectDestroy(PVRSRV_EVENTOBJECT * psEventObject)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if (psEventObject) {
++ if (psEventObject->hOSEventKM) {
++ LinuxEventObjectListDestroy(psEventObject->hOSEventKM);
++ } else {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSEventObjectDestroy: hOSEventKM is not a valid pointer"));
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ }
++ } else {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSEventObjectDestroy: psEventObject is not a valid pointer"));
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ return eError;
++}
++
++PVRSRV_ERROR OSEventObjectWait(void *hOSEventKM)
++{
++ PVRSRV_ERROR eError;
++
++ if (hOSEventKM) {
++ eError =
++ LinuxEventObjectWait(hOSEventKM, EVENT_OBJECT_TIMEOUT_MS);
++ } else {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSEventObjectWait: hOSEventKM is not a valid handle"));
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ return eError;
++}
++
++PVRSRV_ERROR OSEventObjectOpen(PVRSRV_EVENTOBJECT * psEventObject,
++ void **phOSEvent)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if (psEventObject) {
++ if (LinuxEventObjectAdd(psEventObject->hOSEventKM, phOSEvent) !=
++ PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectAdd: failed"));
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ } else {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSEventObjectCreate: psEventObject is not a valid pointer"));
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ return eError;
++}
++
++PVRSRV_ERROR OSEventObjectClose(PVRSRV_EVENTOBJECT * psEventObject,
++ void *hOSEventKM)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if (psEventObject) {
++ if (LinuxEventObjectDelete
++ (psEventObject->hOSEventKM, hOSEventKM) != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "LinuxEventObjectDelete: failed"));
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ } else {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSEventObjectDestroy: psEventObject is not a valid pointer"));
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ return eError;
++
++}
++
++PVRSRV_ERROR OSEventObjectSignal(void *hOSEventKM)
++{
++ PVRSRV_ERROR eError;
++
++ if (hOSEventKM) {
++ eError = LinuxEventObjectSignal(hOSEventKM);
++ } else {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSEventObjectSignal: hOSEventKM is not a valid handle"));
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ return eError;
++}
++
++int OSProcHasPrivSrvInit(void)
++{
++ return (capable(CAP_SYS_MODULE) != 0) ? 1 : 0;
++}
++
++PVRSRV_ERROR OSCopyToUser(void *pvProcess,
++ void *pvDest, void *pvSrc, u32 ui32Bytes)
++{
++ if (copy_to_user(pvDest, pvSrc, ui32Bytes) == 0)
++ return PVRSRV_OK;
++ else
++ return PVRSRV_ERROR_GENERIC;
++}
++
++PVRSRV_ERROR OSCopyFromUser(void *pvProcess,
++ void *pvDest, void *pvSrc, u32 ui32Bytes)
++{
++ if (copy_from_user(pvDest, pvSrc, ui32Bytes) == 0)
++ return PVRSRV_OK;
++ else
++ return PVRSRV_ERROR_GENERIC;
++}
++
++int OSAccessOK(IMG_VERIFY_TEST eVerification, void *pvUserPtr, u32 ui32Bytes)
++{
++ int linuxType;
++
++ if (eVerification == PVR_VERIFY_READ) {
++ linuxType = VERIFY_READ;
++ } else {
++ PVR_ASSERT(eVerification == PVR_VERIFY_WRITE);
++ linuxType = VERIFY_WRITE;
++ }
++
++ return access_ok(linuxType, pvUserPtr, ui32Bytes);
++}
++
++typedef enum _eWrapMemType_ {
++ WRAP_TYPE_CLEANUP,
++ WRAP_TYPE_GET_USER_PAGES,
++ WRAP_TYPE_FIND_VMA_PAGES,
++ WRAP_TYPE_FIND_VMA_PFN
++} eWrapMemType;
++
++typedef struct _sWrapMemInfo_ {
++ eWrapMemType eType;
++ int iNumPages;
++ struct page **ppsPages;
++ IMG_SYS_PHYADDR *psPhysAddr;
++ int iPageOffset;
++ int iContiguous;
++#if defined(DEBUG)
++ u32 ulStartAddr;
++ u32 ulBeyondEndAddr;
++ struct vm_area_struct *psVMArea;
++#endif
++ int bWrapWorkaround;
++} sWrapMemInfo;
++
++static void CheckPagesContiguous(sWrapMemInfo * psInfo)
++{
++ int i;
++ u32 ui32AddrChk;
++
++ BUG_ON(psInfo == NULL);
++
++ psInfo->iContiguous = 1;
++
++ for (i = 0, ui32AddrChk = psInfo->psPhysAddr[0].uiAddr;
++ i < psInfo->iNumPages; i++, ui32AddrChk += PAGE_SIZE) {
++ if (psInfo->psPhysAddr[i].uiAddr != ui32AddrChk) {
++ psInfo->iContiguous = 0;
++ break;
++ }
++ }
++}
++
++static struct page *CPUVAddrToPage(struct vm_area_struct *psVMArea,
++ u32 ulCPUVAddr)
++{
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,10))
++ pgd_t *psPGD;
++ pud_t *psPUD;
++ pmd_t *psPMD;
++ pte_t *psPTE;
++ struct mm_struct *psMM = psVMArea->vm_mm;
++ u32 ulPFN;
++ spinlock_t *psPTLock;
++ struct page *psPage;
++
++ psPGD = pgd_offset(psMM, ulCPUVAddr);
++ if (pgd_none(*psPGD) || pgd_bad(*psPGD))
++ return NULL;
++
++ psPUD = pud_offset(psPGD, ulCPUVAddr);
++ if (pud_none(*psPUD) || pud_bad(*psPUD))
++ return NULL;
++
++ psPMD = pmd_offset(psPUD, ulCPUVAddr);
++ if (pmd_none(*psPMD) || pmd_bad(*psPMD))
++ return NULL;
++
++ psPage = NULL;
++
++ psPTE =
++ (pte_t *) pte_offset_map_lock(psMM, psPMD, ulCPUVAddr, &psPTLock);
++ if ((pte_none(*psPTE) != 0) || (pte_present(*psPTE) == 0)
++ || (pte_write(*psPTE) == 0))
++ goto exit_unlock;
++
++ ulPFN = pte_pfn(*psPTE);
++ if (!pfn_valid(ulPFN))
++ goto exit_unlock;
++
++ psPage = pfn_to_page(ulPFN);
++
++ get_page(psPage);
++
++exit_unlock:
++ pte_unmap_unlock(psPTE, psPTLock);
++
++ return psPage;
++#else
++ return NULL;
++#endif
++}
++
++PVRSRV_ERROR OSReleasePhysPageAddr(void *hOSWrapMem)
++{
++ sWrapMemInfo *psInfo = (sWrapMemInfo *) hOSWrapMem;
++ int i;
++
++ BUG_ON(psInfo == NULL);
++
++ switch (psInfo->eType) {
++ case WRAP_TYPE_CLEANUP:
++ break;
++ case WRAP_TYPE_FIND_VMA_PFN:
++ break;
++ case WRAP_TYPE_GET_USER_PAGES:
++ {
++ for (i = 0; i < psInfo->iNumPages; i++) {
++ struct page *psPage = psInfo->ppsPages[i];
++
++ if (!PageReserved(psPage)) ;
++ {
++ SetPageDirty(psPage);
++ }
++ page_cache_release(psPage);
++ }
++ break;
++ }
++ case WRAP_TYPE_FIND_VMA_PAGES:
++ {
++ for (i = 0; i < psInfo->iNumPages; i++) {
++ if (psInfo->bWrapWorkaround)
++ put_page(psInfo->ppsPages[i]);
++ else
++ put_page_testzero(psInfo->ppsPages[i]);
++ }
++ break;
++ }
++ default:
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSReleasePhysPageAddr: Unknown wrap type (%d)",
++ psInfo->eType));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ }
++
++ if (psInfo->ppsPages != NULL) {
++ kfree(psInfo->ppsPages);
++ }
++
++ if (psInfo->psPhysAddr != NULL) {
++ kfree(psInfo->psPhysAddr);
++ }
++
++ kfree(psInfo);
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSAcquirePhysPageAddr(void *pvCPUVAddr,
++ u32 ui32Bytes,
++ IMG_SYS_PHYADDR * psSysPAddr,
++ void **phOSWrapMem, int bWrapWorkaround)
++{
++ u32 ulStartAddrOrig = (u32) pvCPUVAddr;
++ u32 ulAddrRangeOrig = (u32) ui32Bytes;
++ u32 ulBeyondEndAddrOrig = ulStartAddrOrig + ulAddrRangeOrig;
++ u32 ulStartAddr;
++ u32 ulAddrRange;
++ u32 ulBeyondEndAddr;
++ u32 ulAddr;
++ int iNumPagesMapped;
++ int i;
++ struct vm_area_struct *psVMArea;
++ sWrapMemInfo *psInfo;
++
++ ulStartAddr = ulStartAddrOrig & PAGE_MASK;
++ ulBeyondEndAddr = PAGE_ALIGN(ulBeyondEndAddrOrig);
++ ulAddrRange = ulBeyondEndAddr - ulStartAddr;
++
++ psInfo = kmalloc(sizeof(*psInfo), GFP_KERNEL);
++ if (psInfo == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSAcquirePhysPageAddr: Couldn't allocate information structure"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ memset(psInfo, 0, sizeof(*psInfo));
++ psInfo->bWrapWorkaround = bWrapWorkaround;
++
++#if defined(DEBUG)
++ psInfo->ulStartAddr = ulStartAddrOrig;
++ psInfo->ulBeyondEndAddr = ulBeyondEndAddrOrig;
++#endif
++
++ psInfo->iNumPages = (int)(ulAddrRange >> PAGE_SHIFT);
++ psInfo->iPageOffset = (int)(ulStartAddrOrig & ~PAGE_MASK);
++
++ psInfo->psPhysAddr =
++ kmalloc((size_t) psInfo->iNumPages * sizeof(*psInfo->psPhysAddr),
++ GFP_KERNEL);
++ if (psInfo->psPhysAddr == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSAcquirePhysPageAddr: Couldn't allocate page array"));
++ goto error_free;
++ }
++
++ psInfo->ppsPages =
++ kmalloc((size_t) psInfo->iNumPages * sizeof(*psInfo->ppsPages),
++ GFP_KERNEL);
++ if (psInfo->ppsPages == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSAcquirePhysPageAddr: Couldn't allocate page array"));
++ goto error_free;
++ }
++
++ down_read(&current->mm->mmap_sem);
++ iNumPagesMapped =
++ get_user_pages(current, current->mm, ulStartAddr, psInfo->iNumPages,
++ 1, 0, psInfo->ppsPages, NULL);
++ up_read(&current->mm->mmap_sem);
++
++ if (iNumPagesMapped >= 0) {
++
++ if (iNumPagesMapped != psInfo->iNumPages) {
++ PVR_TRACE(("OSAcquirePhysPageAddr: Couldn't map all the pages needed (wanted: %d, got %d)", psInfo->iNumPages, iNumPagesMapped));
++
++ for (i = 0; i < iNumPagesMapped; i++) {
++ page_cache_release(psInfo->ppsPages[i]);
++
++ }
++ goto error_free;
++ }
++
++ for (i = 0; i < psInfo->iNumPages; i++) {
++ IMG_CPU_PHYADDR CPUPhysAddr;
++
++ CPUPhysAddr.uiAddr =
++ page_to_pfn(psInfo->ppsPages[i]) << PAGE_SHIFT;
++ psInfo->psPhysAddr[i] =
++ SysCpuPAddrToSysPAddr(CPUPhysAddr);
++ psSysPAddr[i] = psInfo->psPhysAddr[i];
++
++ }
++
++ psInfo->eType = WRAP_TYPE_GET_USER_PAGES;
++
++ goto exit_check;
++ }
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "OSAcquirePhysPageAddr: get_user_pages failed (%d), trying something else",
++ iNumPagesMapped));
++
++ down_read(&current->mm->mmap_sem);
++
++ psVMArea = find_vma(current->mm, ulStartAddrOrig);
++ if (psVMArea == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSAcquirePhysPageAddr: Couldn't find memory region containing start address %lx",
++ ulStartAddrOrig));
++
++ goto error_release_mmap_sem;
++ }
++#if defined(DEBUG)
++ psInfo->psVMArea = psVMArea;
++#endif
++
++ if (ulStartAddrOrig < psVMArea->vm_start) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSAcquirePhysPageAddr: Start address %lx is outside of the region returned by find_vma",
++ ulStartAddrOrig));
++ goto error_release_mmap_sem;
++ }
++
++ if (ulBeyondEndAddrOrig > psVMArea->vm_end) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSAcquirePhysPageAddr: End address %lx is outside of the region returned by find_vma",
++ ulBeyondEndAddrOrig));
++ goto error_release_mmap_sem;
++ }
++
++ if ((psVMArea->vm_flags & (VM_IO | VM_RESERVED)) !=
++ (VM_IO | VM_RESERVED)) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSAcquirePhysPageAddr: Memory region does not represent memory mapped I/O (VMA flags: 0x%lx)",
++ psVMArea->vm_flags));
++ goto error_release_mmap_sem;
++ }
++
++ if ((psVMArea->vm_flags & (VM_READ | VM_WRITE)) != (VM_READ | VM_WRITE)) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSAcquirePhysPageAddr: No read/write access to memory region (VMA flags: 0x%lx)",
++ psVMArea->vm_flags));
++ goto error_release_mmap_sem;
++ }
++
++ for (ulAddr = ulStartAddrOrig, i = 0; ulAddr < ulBeyondEndAddrOrig;
++ ulAddr += PAGE_SIZE, i++) {
++ struct page *psPage;
++
++ BUG_ON(i >= psInfo->iNumPages);
++
++ psPage = CPUVAddrToPage(psVMArea, ulAddr);
++ if (psPage == NULL) {
++ int j;
++
++ PVR_TRACE(("OSAcquirePhysPageAddr: Couldn't lookup page structure for address 0x%lx, trying something else", ulAddr));
++
++ for (j = 0; j < i; j++) {
++ if (psInfo->bWrapWorkaround)
++ put_page(psInfo->ppsPages[j]);
++ else
++ put_page_testzero(psInfo->ppsPages[j]);
++ }
++ break;
++ }
++
++ psInfo->ppsPages[i] = psPage;
++ }
++
++ BUG_ON(i > psInfo->iNumPages);
++ if (i == psInfo->iNumPages) {
++
++ for (i = 0; i < psInfo->iNumPages; i++) {
++ struct page *psPage = psInfo->ppsPages[i];
++ IMG_CPU_PHYADDR CPUPhysAddr;
++
++ CPUPhysAddr.uiAddr = page_to_pfn(psPage) << PAGE_SHIFT;
++
++ psInfo->psPhysAddr[i] =
++ SysCpuPAddrToSysPAddr(CPUPhysAddr);
++ psSysPAddr[i] = psInfo->psPhysAddr[i];
++ }
++
++ psInfo->eType = WRAP_TYPE_FIND_VMA_PAGES;
++ } else {
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,10)) && defined(PVR_SECURE_HANDLES)
++
++ if ((psVMArea->vm_flags & VM_PFNMAP) == 0) {
++ PVR_DPF((PVR_DBG_WARNING,
++ "OSAcquirePhysPageAddr: Region isn't a raw PFN mapping. Giving up."));
++ goto error_release_mmap_sem;
++ }
++
++ for (ulAddr = ulStartAddrOrig, i = 0;
++ ulAddr < ulBeyondEndAddrOrig; ulAddr += PAGE_SIZE, i++) {
++ IMG_CPU_PHYADDR CPUPhysAddr;
++
++ CPUPhysAddr.uiAddr =
++ ((ulAddr - psVMArea->vm_start) +
++ (psVMArea->vm_pgoff << PAGE_SHIFT)) & PAGE_MASK;
++
++ psInfo->psPhysAddr[i] =
++ SysCpuPAddrToSysPAddr(CPUPhysAddr);
++ psSysPAddr[i] = psInfo->psPhysAddr[i];
++ }
++ BUG_ON(i != psInfo->iNumPages);
++
++ psInfo->eType = WRAP_TYPE_FIND_VMA_PFN;
++
++ PVR_DPF((PVR_DBG_WARNING,
++ "OSAcquirePhysPageAddr: Region can't be locked down"));
++#else
++ PVR_DPF((PVR_DBG_WARNING,
++ "OSAcquirePhysPageAddr: Raw PFN mappings not supported. Giving up."));
++ goto error_release_mmap_sem;
++#endif
++ }
++
++ up_read(&current->mm->mmap_sem);
++
++exit_check:
++ CheckPagesContiguous(psInfo);
++
++ *phOSWrapMem = (void *)psInfo;
++
++ return PVRSRV_OK;
++
++error_release_mmap_sem:
++ up_read(&current->mm->mmap_sem);
++error_free:
++ psInfo->eType = WRAP_TYPE_CLEANUP;
++ OSReleasePhysPageAddr((void *)psInfo);
++ return PVRSRV_ERROR_GENERIC;
++}
++
++PVRSRV_ERROR PVROSFuncInit(void)
++{
++#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++ {
++ u32 ui32i;
++
++ psTimerWorkQueue = create_workqueue("pvr_timer");
++ if (psTimerWorkQueue == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "%s: couldn't create timer workqueue",
++ __FUNCTION__));
++ return PVRSRV_ERROR_GENERIC;
++
++ }
++
++ for (ui32i = 0; ui32i < OS_MAX_TIMERS; ui32i++) {
++ TIMER_CALLBACK_DATA *psTimerCBData = &sTimers[ui32i];
++
++ INIT_WORK(&psTimerCBData->sWork,
++ OSTimerWorkQueueCallBack);
++ }
++ }
++#endif
++ return PVRSRV_OK;
++}
++
++void PVROSFuncDeInit(void)
++{
++#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++ if (psTimerWorkQueue != NULL) {
++ destroy_workqueue(psTimerWorkQueue);
++ }
++#endif
++}
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/osperproc.c
+@@ -0,0 +1,106 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "osperproc.h"
++
++#include "env_perproc.h"
++#include "proc.h"
++
++extern u32 gui32ReleasePID;
++
++PVRSRV_ERROR OSPerProcessPrivateDataInit(void **phOsPrivateData)
++{
++ PVRSRV_ERROR eError;
++ void *hBlockAlloc;
++ PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc;
++
++ eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_ENV_PER_PROCESS_DATA),
++ phOsPrivateData,
++ &hBlockAlloc, "Environment per Process Data");
++
++ if (eError != PVRSRV_OK) {
++ *phOsPrivateData = NULL;
++
++ PVR_DPF((PVR_DBG_ERROR, "%s: OSAllocMem failed (%d)",
++ __FUNCTION__, eError));
++ return eError;
++ }
++
++ psEnvPerProc = (PVRSRV_ENV_PER_PROCESS_DATA *) * phOsPrivateData;
++ memset(psEnvPerProc, 0, sizeof(*psEnvPerProc));
++
++ psEnvPerProc->hBlockAlloc = hBlockAlloc;
++
++ LinuxMMapPerProcessConnect(psEnvPerProc);
++
++#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT)
++
++ INIT_LIST_HEAD(&psEnvPerProc->sDRMAuthListHead);
++#endif
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSPerProcessPrivateDataDeInit(void *hOsPrivateData)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc;
++
++ if (hOsPrivateData == NULL) {
++ return PVRSRV_OK;
++ }
++
++ psEnvPerProc = (PVRSRV_ENV_PER_PROCESS_DATA *) hOsPrivateData;
++
++ LinuxMMapPerProcessDisconnect(psEnvPerProc);
++
++ RemovePerProcessProcDir(psEnvPerProc);
++
++ eError = OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_ENV_PER_PROCESS_DATA),
++ hOsPrivateData, psEnvPerProc->hBlockAlloc);
++
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR, "%s: OSFreeMem failed (%d)",
++ __FUNCTION__, eError));
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSPerProcessSetHandleOptions(PVRSRV_HANDLE_BASE * psHandleBase)
++{
++ return LinuxMMapPerProcessHandleOptions(psHandleBase);
++}
++
++void *LinuxTerminatingProcessPrivateData(void)
++{
++ if (!gui32ReleasePID)
++ return NULL;
++ return PVRSRVPerProcessPrivateData(gui32ReleasePID);
++}
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/pdump.c
+@@ -0,0 +1,610 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if defined (SUPPORT_SGX)
++#if defined (PDUMP)
++
++#include <asm/atomic.h>
++#include <stdarg.h>
++#include "sgxdefs.h"
++#include "services_headers.h"
++
++#include "pvrversion.h"
++#include "pvr_debug.h"
++
++#include "dbgdrvif.h"
++#include "sgxmmu.h"
++#include "mm.h"
++#include "pdump_km.h"
++
++#include <linux/tty.h>
++
++static int PDumpWriteString2(char *pszString, u32 ui32Flags);
++static int PDumpWriteILock(PDBG_STREAM psStream, u8 * pui8Data, u32 ui32Count,
++ u32 ui32Flags);
++static void DbgSetFrame(PDBG_STREAM psStream, u32 ui32Frame);
++static u32 DbgGetFrame(PDBG_STREAM psStream);
++static void DbgSetMarker(PDBG_STREAM psStream, u32 ui32Marker);
++static u32 DbgWrite(PDBG_STREAM psStream, u8 * pui8Data, u32 ui32BCount,
++ u32 ui32Flags);
++
++#define PDUMP_DATAMASTER_PIXEL (1)
++#define PDUMP_DATAMASTER_EDM (3)
++
++#define MIN(a,b) (a > b ? b : a)
++
++#define MAX_FILE_SIZE 0x40000000
++
++static atomic_t gsPDumpSuspended = ATOMIC_INIT(0);
++
++static PDBGKM_SERVICE_TABLE gpfnDbgDrv = NULL;
++
++char *pszStreamName[PDUMP_NUM_STREAMS] = { "ParamStream2",
++ "ScriptStream2",
++ "DriverInfoStream"
++};
++
++typedef struct PDBG_PDUMP_STATE_TAG {
++ PDBG_STREAM psStream[PDUMP_NUM_STREAMS];
++ u32 ui32ParamFileNum;
++
++ char *pszMsg;
++ char *pszScript;
++ char *pszFile;
++
++} PDBG_PDUMP_STATE;
++
++static PDBG_PDUMP_STATE gsDBGPdumpState = { {NULL}, 0, NULL, NULL, NULL };
++
++#define SZ_MSG_SIZE_MAX PVRSRV_PDUMP_MAX_COMMENT_SIZE-1
++#define SZ_SCRIPT_SIZE_MAX PVRSRV_PDUMP_MAX_COMMENT_SIZE-1
++#define SZ_FILENAME_SIZE_MAX PVRSRV_PDUMP_MAX_COMMENT_SIZE-1
++
++void DBGDrvGetServiceTable(void **fn_table);
++
++static inline int PDumpSuspended(void)
++{
++ return atomic_read(&gsPDumpSuspended) != 0;
++}
++
++PVRSRV_ERROR PDumpOSGetScriptString(void **phScript, u32 * pui32MaxLen)
++{
++ *phScript = (void *)gsDBGPdumpState.pszScript;
++ *pui32MaxLen = SZ_SCRIPT_SIZE_MAX;
++ if ((!*phScript) || PDumpSuspended()) {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpOSGetMessageString(void **phMsg, u32 * pui32MaxLen)
++{
++ *phMsg = (void *)gsDBGPdumpState.pszMsg;
++ *pui32MaxLen = SZ_MSG_SIZE_MAX;
++ if ((!*phMsg) || PDumpSuspended()) {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpOSGetFilenameString(char **ppszFile, u32 * pui32MaxLen)
++{
++ *ppszFile = gsDBGPdumpState.pszFile;
++ *pui32MaxLen = SZ_FILENAME_SIZE_MAX;
++ if ((!*ppszFile) || PDumpSuspended()) {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ return PVRSRV_OK;
++}
++
++int PDumpOSWriteString2(void *hScript, u32 ui32Flags)
++{
++ return PDumpWriteString2(hScript, ui32Flags);
++}
++
++PVRSRV_ERROR PDumpOSBufprintf(void *hBuf, u32 ui32ScriptSizeMax,
++ char *pszFormat, ...)
++{
++ char *pszBuf = hBuf;
++ u32 n;
++ va_list vaArgs;
++
++ va_start(vaArgs, pszFormat);
++
++ n = vsnprintf(pszBuf, ui32ScriptSizeMax, pszFormat, vaArgs);
++
++ va_end(vaArgs);
++
++ if (n >= ui32ScriptSizeMax || n == -1) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "Buffer overflow detected, pdump output may be incomplete."));
++
++ return PVRSRV_ERROR_PDUMP_BUF_OVERFLOW;
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpOSVSprintf(char *pszComment, u32 ui32ScriptSizeMax,
++ char *pszFormat, PDUMP_va_list vaArgs)
++{
++ u32 n;
++
++ n = vsnprintf(pszComment, ui32ScriptSizeMax, pszFormat, vaArgs);
++
++ if (n >= ui32ScriptSizeMax || n == -1) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "Buffer overflow detected, pdump output may be incomplete."));
++
++ return PVRSRV_ERROR_PDUMP_BUF_OVERFLOW;
++ }
++
++ return PVRSRV_OK;
++}
++
++void PDumpOSDebugPrintf(char *pszFormat, ...)
++{
++
++}
++
++PVRSRV_ERROR PDumpOSSprintf(char *pszComment, u32 ui32ScriptSizeMax,
++ char *pszFormat, ...)
++{
++ u32 n;
++ va_list vaArgs;
++
++ va_start(vaArgs, pszFormat);
++
++ n = vsnprintf(pszComment, ui32ScriptSizeMax, pszFormat, vaArgs);
++
++ va_end(vaArgs);
++
++ if (n >= ui32ScriptSizeMax || n == -1) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "Buffer overflow detected, pdump output may be incomplete."));
++
++ return PVRSRV_ERROR_PDUMP_BUF_OVERFLOW;
++ }
++
++ return PVRSRV_OK;
++}
++
++u32 PDumpOSBuflen(void *hBuffer, u32 ui32BufferSizeMax)
++{
++ char *pszBuf = hBuffer;
++ u32 ui32Count = 0;
++
++ while ((pszBuf[ui32Count] != 0) && (ui32Count < ui32BufferSizeMax)) {
++ ui32Count++;
++ }
++ return (ui32Count);
++}
++
++void PDumpOSVerifyLineEnding(void *hBuffer, u32 ui32BufferSizeMax)
++{
++ u32 ui32Count = 0;
++ char *pszBuf = hBuffer;
++
++ ui32Count = PDumpOSBuflen(hBuffer, ui32BufferSizeMax);
++
++ if ((ui32Count >= 1) && (pszBuf[ui32Count - 1] != '\n')
++ && (ui32Count < ui32BufferSizeMax)) {
++ pszBuf[ui32Count] = '\n';
++ ui32Count++;
++ pszBuf[ui32Count] = '\0';
++ }
++ if ((ui32Count >= 2) && (pszBuf[ui32Count - 2] != '\r')
++ && (ui32Count < ui32BufferSizeMax)) {
++ pszBuf[ui32Count - 1] = '\r';
++ pszBuf[ui32Count] = '\n';
++ ui32Count++;
++ pszBuf[ui32Count] = '\0';
++ }
++}
++
++void *PDumpOSGetStream(u32 ePDumpStream)
++{
++ return (void *)gsDBGPdumpState.psStream[ePDumpStream];
++}
++
++u32 PDumpOSGetStreamOffset(u32 ePDumpStream)
++{
++ PDBG_STREAM psStream = gsDBGPdumpState.psStream[ePDumpStream];
++ return gpfnDbgDrv->pfnGetStreamOffset(psStream);
++}
++
++u32 PDumpOSGetParamFileNum(void)
++{
++ return gsDBGPdumpState.ui32ParamFileNum;
++}
++
++int PDumpOSWriteString(void *hStream,
++ u8 * psui8Data, u32 ui32Size, u32 ui32Flags)
++{
++ PDBG_STREAM psStream = (PDBG_STREAM) hStream;
++ return PDumpWriteILock(psStream, psui8Data, ui32Size, ui32Flags);
++}
++
++void PDumpOSCheckForSplitting(void *hStream, u32 ui32Size, u32 ui32Flags)
++{
++
++}
++
++int PDumpOSJTInitialised(void)
++{
++ if (gpfnDbgDrv) {
++ return 1;
++ }
++ return 0;
++}
++
++inline int PDumpOSIsSuspended(void)
++{
++ return atomic_read(&gsPDumpSuspended) != 0;
++}
++
++void PDumpOSCPUVAddrToDevPAddr(PVRSRV_DEVICE_TYPE eDeviceType,
++ void *hOSMemHandle,
++ u32 ui32Offset,
++ u8 * pui8LinAddr,
++ u32 ui32PageSize, IMG_DEV_PHYADDR * psDevPAddr)
++{
++ if (hOSMemHandle) {
++
++ IMG_CPU_PHYADDR sCpuPAddr;
++
++ sCpuPAddr = OSMemHandleToCpuPAddr(hOSMemHandle, ui32Offset);
++ PVR_ASSERT((sCpuPAddr.uiAddr & (ui32PageSize - 1)) == 0);
++
++ *psDevPAddr = SysCpuPAddrToDevPAddr(eDeviceType, sCpuPAddr);
++ } else {
++ IMG_CPU_PHYADDR sCpuPAddr;
++
++ sCpuPAddr = OSMapLinToCPUPhys(pui8LinAddr);
++ *psDevPAddr = SysCpuPAddrToDevPAddr(eDeviceType, sCpuPAddr);
++ }
++}
++
++void PDumpOSCPUVAddrToPhysPages(void *hOSMemHandle,
++ u32 ui32Offset,
++ u8 * pui8LinAddr, u32 * pui32PageOffset)
++{
++ if (hOSMemHandle) {
++
++ IMG_CPU_PHYADDR sCpuPAddr;
++
++ sCpuPAddr = OSMemHandleToCpuPAddr(hOSMemHandle, ui32Offset);
++ *pui32PageOffset = sCpuPAddr.uiAddr & (HOST_PAGESIZE() - 1);
++ } else {
++ *pui32PageOffset = (u32) pui8LinAddr & (HOST_PAGESIZE() - 1);
++ }
++}
++
++void PDumpInit(void)
++{
++ u32 i;
++
++ if (!gpfnDbgDrv) {
++ DBGDrvGetServiceTable((void **)&gpfnDbgDrv);
++
++ if (gpfnDbgDrv == NULL) {
++ return;
++ }
++
++ if (!gsDBGPdumpState.pszFile) {
++ if (OSAllocMem
++ (PVRSRV_OS_PAGEABLE_HEAP, SZ_FILENAME_SIZE_MAX,
++ (void **)&gsDBGPdumpState.pszFile, 0,
++ "Filename string") != PVRSRV_OK) {
++ goto init_failed;
++ }
++ }
++
++ if (!gsDBGPdumpState.pszMsg) {
++ if (OSAllocMem
++ (PVRSRV_OS_PAGEABLE_HEAP, SZ_MSG_SIZE_MAX,
++ (void **)&gsDBGPdumpState.pszMsg, 0,
++ "Message string") != PVRSRV_OK) {
++ goto init_failed;
++ }
++ }
++
++ if (!gsDBGPdumpState.pszScript) {
++ if (OSAllocMem
++ (PVRSRV_OS_PAGEABLE_HEAP, SZ_SCRIPT_SIZE_MAX,
++ (void **)&gsDBGPdumpState.pszScript, 0,
++ "Script string") != PVRSRV_OK) {
++ goto init_failed;
++ }
++ }
++
++ for (i = 0; i < PDUMP_NUM_STREAMS; i++) {
++ gsDBGPdumpState.psStream[i] =
++ gpfnDbgDrv->pfnCreateStream(pszStreamName[i],
++ DEBUG_CAPMODE_FRAMED,
++ DEBUG_OUTMODE_STREAMENABLE,
++ 0, 10);
++
++ gpfnDbgDrv->pfnSetCaptureMode(gsDBGPdumpState.
++ psStream[i],
++ DEBUG_CAPMODE_FRAMED,
++ 0xFFFFFFFF, 0xFFFFFFFF,
++ 1);
++ gpfnDbgDrv->pfnSetFrame(gsDBGPdumpState.psStream[i], 0);
++ }
++
++ PDUMPCOMMENT("Driver Product Name: %s", VS_PRODUCT_NAME);
++ PDUMPCOMMENT("Driver Product Version: %s (%s)",
++ PVRVERSION_STRING, PVRVERSION_FILE);
++ PDUMPCOMMENT("Start of Init Phase");
++ }
++
++ return;
++
++init_failed:
++
++ if (gsDBGPdumpState.pszFile) {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_FILENAME_SIZE_MAX,
++ (void *)gsDBGPdumpState.pszFile, 0);
++ gsDBGPdumpState.pszFile = NULL;
++ }
++
++ if (gsDBGPdumpState.pszScript) {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_SCRIPT_SIZE_MAX,
++ (void *)gsDBGPdumpState.pszScript, 0);
++ gsDBGPdumpState.pszScript = NULL;
++ }
++
++ if (gsDBGPdumpState.pszMsg) {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_MSG_SIZE_MAX,
++ (void *)gsDBGPdumpState.pszMsg, 0);
++ gsDBGPdumpState.pszMsg = NULL;
++ }
++
++ gpfnDbgDrv = NULL;
++}
++
++void PDumpDeInit(void)
++{
++ u32 i;
++
++ for (i = 0; i < PDUMP_NUM_STREAMS; i++) {
++ gpfnDbgDrv->pfnDestroyStream(gsDBGPdumpState.psStream[i]);
++ }
++
++ if (gsDBGPdumpState.pszFile) {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_FILENAME_SIZE_MAX,
++ (void *)gsDBGPdumpState.pszFile, 0);
++ gsDBGPdumpState.pszFile = NULL;
++ }
++
++ if (gsDBGPdumpState.pszScript) {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_SCRIPT_SIZE_MAX,
++ (void *)gsDBGPdumpState.pszScript, 0);
++ gsDBGPdumpState.pszScript = NULL;
++ }
++
++ if (gsDBGPdumpState.pszMsg) {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_MSG_SIZE_MAX,
++ (void *)gsDBGPdumpState.pszMsg, 0);
++ gsDBGPdumpState.pszMsg = NULL;
++ }
++
++ gpfnDbgDrv = NULL;
++}
++
++PVRSRV_ERROR PDumpStartInitPhaseKM(void)
++{
++ u32 i;
++
++ if (gpfnDbgDrv) {
++ PDUMPCOMMENT("Start Init Phase");
++ for (i = 0; i < PDUMP_NUM_STREAMS; i++) {
++ gpfnDbgDrv->pfnStartInitPhase(gsDBGPdumpState.
++ psStream[i]);
++ }
++ }
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpStopInitPhaseKM(void)
++{
++ u32 i;
++
++ if (gpfnDbgDrv) {
++ PDUMPCOMMENT("Stop Init Phase");
++
++ for (i = 0; i < PDUMP_NUM_STREAMS; i++) {
++ gpfnDbgDrv->pfnStopInitPhase(gsDBGPdumpState.
++ psStream[i]);
++ }
++ }
++ return PVRSRV_OK;
++}
++
++int PDumpIsLastCaptureFrameKM(void)
++{
++ return gpfnDbgDrv->pfnIsLastCaptureFrame(gsDBGPdumpState.
++ psStream
++ [PDUMP_STREAM_SCRIPT2]);
++}
++
++int PDumpIsCaptureFrameKM(void)
++{
++ if (PDumpSuspended()) {
++ return 0;
++ }
++ return gpfnDbgDrv->pfnIsCaptureFrame(gsDBGPdumpState.
++ psStream[PDUMP_STREAM_SCRIPT2], 0);
++}
++
++PVRSRV_ERROR PDumpSetFrameKM(u32 ui32Frame)
++{
++ u32 ui32Stream;
++
++ for (ui32Stream = 0; ui32Stream < PDUMP_NUM_STREAMS; ui32Stream++) {
++ if (gsDBGPdumpState.psStream[ui32Stream]) {
++ DbgSetFrame(gsDBGPdumpState.psStream[ui32Stream],
++ ui32Frame);
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpGetFrameKM(u32 * pui32Frame)
++{
++ *pui32Frame =
++ DbgGetFrame(gsDBGPdumpState.psStream[PDUMP_STREAM_SCRIPT2]);
++
++ return PVRSRV_OK;
++}
++
++static int PDumpWriteString2(char *pszString, u32 ui32Flags)
++{
++ return PDumpWriteILock(gsDBGPdumpState.psStream[PDUMP_STREAM_SCRIPT2],
++ (u8 *) pszString, strlen(pszString), ui32Flags);
++}
++
++static int PDumpWriteILock(PDBG_STREAM psStream, u8 * pui8Data, u32 ui32Count,
++ u32 ui32Flags)
++{
++ u32 ui32Written = 0;
++ u32 ui32Off = 0;
++
++ if ((psStream == NULL) || PDumpSuspended()
++ || ((ui32Flags & PDUMP_FLAGS_NEVER) != 0)) {
++ return 1;
++ }
++
++ if (psStream == gsDBGPdumpState.psStream[PDUMP_STREAM_PARAM2]) {
++ u32 ui32ParamOutPos =
++ gpfnDbgDrv->pfnGetStreamOffset(gsDBGPdumpState.
++ psStream
++ [PDUMP_STREAM_PARAM2]);
++
++ if (ui32ParamOutPos + ui32Count > MAX_FILE_SIZE) {
++ if ((gsDBGPdumpState.psStream[PDUMP_STREAM_SCRIPT2]
++ &&
++ PDumpWriteString2
++ ("\r\n-- Splitting pdump output file\r\n\r\n",
++ ui32Flags))) {
++ DbgSetMarker(gsDBGPdumpState.
++ psStream[PDUMP_STREAM_PARAM2],
++ ui32ParamOutPos);
++ gsDBGPdumpState.ui32ParamFileNum++;
++ }
++ }
++ }
++
++ while (((u32) ui32Count > 0) && (ui32Written != 0xFFFFFFFF)) {
++ ui32Written =
++ DbgWrite(psStream, &pui8Data[ui32Off], ui32Count,
++ ui32Flags);
++
++ if (ui32Written == 0) {
++ OSReleaseThreadQuanta();
++ }
++
++ if (ui32Written != 0xFFFFFFFF) {
++ ui32Off += ui32Written;
++ ui32Count -= ui32Written;
++ }
++ }
++
++ if (ui32Written == 0xFFFFFFFF) {
++ return 0;
++ }
++
++ return 1;
++}
++
++static void DbgSetFrame(PDBG_STREAM psStream, u32 ui32Frame)
++{
++ gpfnDbgDrv->pfnSetFrame(psStream, ui32Frame);
++}
++
++static u32 DbgGetFrame(PDBG_STREAM psStream)
++{
++ return gpfnDbgDrv->pfnGetFrame(psStream);
++}
++
++static void DbgSetMarker(PDBG_STREAM psStream, u32 ui32Marker)
++{
++ gpfnDbgDrv->pfnSetMarker(psStream, ui32Marker);
++}
++
++static u32 DbgWrite(PDBG_STREAM psStream, u8 * pui8Data, u32 ui32BCount,
++ u32 ui32Flags)
++{
++ u32 ui32BytesWritten;
++
++ if ((ui32Flags & PDUMP_FLAGS_CONTINUOUS) != 0) {
++
++ if (((psStream->ui32CapMode & DEBUG_CAPMODE_FRAMED) != 0) &&
++ (psStream->ui32Start == 0xFFFFFFFFUL) &&
++ (psStream->ui32End == 0xFFFFFFFFUL) &&
++ psStream->bInitPhaseComplete) {
++ ui32BytesWritten = ui32BCount;
++ } else {
++ ui32BytesWritten =
++ gpfnDbgDrv->pfnDBGDrivWrite2(psStream, pui8Data,
++ ui32BCount, 1);
++ }
++ } else {
++ if (ui32Flags & PDUMP_FLAGS_LASTFRAME) {
++ u32 ui32DbgFlags;
++
++ ui32DbgFlags = 0;
++ if (ui32Flags & PDUMP_FLAGS_RESETLFBUFFER) {
++ ui32DbgFlags |= WRITELF_FLAGS_RESETBUF;
++ }
++
++ ui32BytesWritten =
++ gpfnDbgDrv->pfnWriteLF(psStream, pui8Data,
++ ui32BCount, 1, ui32DbgFlags);
++ } else {
++ ui32BytesWritten =
++ gpfnDbgDrv->pfnWriteBINCM(psStream, pui8Data,
++ ui32BCount, 1);
++ }
++ }
++
++ return ui32BytesWritten;
++}
++
++void PDumpSuspendKM(void)
++{
++ atomic_inc(&gsPDumpSuspended);
++}
++
++void PDumpResumeKM(void)
++{
++ atomic_dec(&gsPDumpSuspended);
++}
++
++#endif
++#endif
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/private_data.h
+@@ -0,0 +1,67 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __INCLUDED_PRIVATE_DATA_H_
++#define __INCLUDED_PRIVATE_DATA_H_
++
++#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT)
++#include <linux/list.h>
++#include <drm/drmP.h>
++#endif
++
++typedef struct
++{
++
++ u32 ui32OpenPID;
++
++#if defined(PVR_SECURE_FD_EXPORT)
++
++ void * hKernelMemInfo;
++#endif
++
++#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT)
++
++ struct list_head sDRMAuthListItem;
++
++ struct drm_file *psDRMFile;
++#endif
++
++#if defined(SUPPORT_MEMINFO_IDS)
++
++ u64 ui64Stamp;
++#endif
++
++
++ void * hBlockAlloc;
++
++#if defined(SUPPORT_DRI_DRM_EXT)
++ void * pPriv;
++#endif
++}
++PVRSRV_FILE_PRIVATE_DATA;
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/proc.c
+@@ -0,0 +1,933 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef AUTOCONF_INCLUDED
++#include <linux/config.h>
++#endif
++
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/version.h>
++#include <linux/fs.h>
++#include <linux/proc_fs.h>
++#include <linux/seq_file.h>
++
++#include "services_headers.h"
++
++#include "queue.h"
++#include "resman.h"
++#include "pvrmmap.h"
++#include "pvr_debug.h"
++#include "pvrversion.h"
++#include "proc.h"
++#include "perproc.h"
++#include "env_perproc.h"
++#include "linkage.h"
++
++#include "lists.h"
++DECLARE_LIST_ANY_VA(PVRSRV_DEVICE_NODE);
++
++static struct proc_dir_entry *dir;
++
++#ifndef PVR_PROC_USE_SEQ_FILE
++static off_t procDumpSysNodes(char *buf, size_t size, off_t off);
++static off_t procDumpVersion(char *buf, size_t size, off_t off);
++#endif
++
++static const char PVRProcDirRoot[] = "pvr";
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++
++#define PVR_PROC_SEQ_START_TOKEN (void*)1
++static int pvr_proc_open(struct inode *inode, struct file *file);
++static void *pvr_proc_seq_start(struct seq_file *m, loff_t * pos);
++static void pvr_proc_seq_stop(struct seq_file *m, void *v);
++static void *pvr_proc_seq_next(struct seq_file *m, void *v, loff_t * pos);
++static int pvr_proc_seq_show(struct seq_file *m, void *v);
++static ssize_t pvr_proc_write(struct file *file, const char __user * buffer,
++ size_t count, loff_t * ppos);
++
++static struct file_operations pvr_proc_operations = {
++ .open = pvr_proc_open,
++ .read = seq_read,
++ .write = pvr_proc_write,
++ .llseek = seq_lseek,
++ .release = seq_release,
++};
++
++static struct seq_operations pvr_proc_seq_operations = {
++ .start = pvr_proc_seq_start,
++ .next = pvr_proc_seq_next,
++ .stop = pvr_proc_seq_stop,
++ .show = pvr_proc_seq_show,
++};
++
++static struct proc_dir_entry *g_pProcQueue;
++static struct proc_dir_entry *g_pProcVersion;
++static struct proc_dir_entry *g_pProcSysNodes;
++
++#ifdef DEBUG
++static struct proc_dir_entry *g_pProcDebugLevel;
++#endif
++
++#ifdef PVR_MANUAL_POWER_CONTROL
++static struct proc_dir_entry *g_pProcPowerLevel;
++#endif
++
++static void ProcSeqShowVersion(struct seq_file *sfile, void *el);
++
++static void ProcSeqShowSysNodes(struct seq_file *sfile, void *el);
++static void *ProcSeqOff2ElementSysNodes(struct seq_file *sfile, loff_t off);
++
++#endif
++
++off_t printAppend(char *buffer, size_t size, off_t off, const char *format, ...)
++{
++ int n;
++ size_t space = size - (size_t) off;
++ va_list ap;
++
++ PVR_ASSERT(space >= 0);
++
++ va_start(ap, format);
++
++ n = vsnprintf(buffer + off, space, format, ap);
++
++ va_end(ap);
++
++ if (n >= (int)space || n < 0) {
++
++ buffer[size - 1] = 0;
++ return (off_t) (size - 1);
++ } else {
++ return (off + (off_t) n);
++ }
++}
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++
++void *ProcSeq1ElementOff2Element(struct seq_file *sfile, loff_t off)
++{
++
++ if (!off)
++ return (void *)2;
++ return NULL;
++}
++
++void *ProcSeq1ElementHeaderOff2Element(struct seq_file *sfile, loff_t off)
++{
++ if (!off) {
++ return PVR_PROC_SEQ_START_TOKEN;
++ }
++
++ if (off == 1)
++ return (void *)2;
++
++ return NULL;
++}
++
++static int pvr_proc_open(struct inode *inode, struct file *file)
++{
++ int ret = seq_open(file, &pvr_proc_seq_operations);
++
++ struct seq_file *seq = (struct seq_file *)file->private_data;
++ struct proc_dir_entry *pvr_proc_entry = PDE(inode);
++
++ seq->private = pvr_proc_entry->data;
++ return ret;
++}
++
++static ssize_t pvr_proc_write(struct file *file, const char __user * buffer,
++ size_t count, loff_t * ppos)
++{
++ struct inode *inode = file->f_path.dentry->d_inode;
++ struct proc_dir_entry *dp;
++
++ dp = PDE(inode);
++
++ if (!dp->write_proc)
++ return -EIO;
++
++ return dp->write_proc(file, buffer, count, dp->data);
++}
++
++static void *pvr_proc_seq_start(struct seq_file *proc_seq_file, loff_t * pos)
++{
++ PVR_PROC_SEQ_HANDLERS *handlers =
++ (PVR_PROC_SEQ_HANDLERS *) proc_seq_file->private;
++ if (handlers->startstop != NULL)
++ handlers->startstop(proc_seq_file, 1);
++ return handlers->off2element(proc_seq_file, *pos);
++}
++
++static void pvr_proc_seq_stop(struct seq_file *proc_seq_file, void *v)
++{
++ PVR_PROC_SEQ_HANDLERS *handlers =
++ (PVR_PROC_SEQ_HANDLERS *) proc_seq_file->private;
++ if (handlers->startstop != NULL)
++ handlers->startstop(proc_seq_file, 0);
++}
++
++static void *pvr_proc_seq_next(struct seq_file *proc_seq_file, void *v,
++ loff_t * pos)
++{
++ PVR_PROC_SEQ_HANDLERS *handlers =
++ (PVR_PROC_SEQ_HANDLERS *) proc_seq_file->private;
++ (*pos)++;
++ if (handlers->next != NULL)
++ return handlers->next(proc_seq_file, v, *pos);
++ return handlers->off2element(proc_seq_file, *pos);
++}
++
++static int pvr_proc_seq_show(struct seq_file *proc_seq_file, void *v)
++{
++ PVR_PROC_SEQ_HANDLERS *handlers =
++ (PVR_PROC_SEQ_HANDLERS *) proc_seq_file->private;
++ handlers->show(proc_seq_file, v);
++ return 0;
++}
++
++static struct proc_dir_entry *CreateProcEntryInDirSeq(struct proc_dir_entry
++ *pdir, const char *name,
++ void *data,
++ pvr_next_proc_seq_t
++ next_handler,
++ pvr_show_proc_seq_t
++ show_handler,
++ pvr_off2element_proc_seq_t
++ off2element_handler,
++ pvr_startstop_proc_seq_t
++ startstop_handler,
++ write_proc_t whandler)
++{
++
++ struct proc_dir_entry *file;
++ mode_t mode;
++
++ if (!dir) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "CreateProcEntryInDirSeq: cannot make proc entry /proc/%s/%s: no parent",
++ PVRProcDirRoot, name));
++ return NULL;
++ }
++
++ mode = S_IFREG;
++
++ if (show_handler) {
++ mode |= S_IRUGO;
++ }
++
++ if (whandler) {
++ mode |= S_IWUSR;
++ }
++
++ file = create_proc_entry(name, mode, pdir);
++
++ if (file) {
++ PVR_PROC_SEQ_HANDLERS *seq_handlers;
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30))
++ file->owner = THIS_MODULE;
++#endif
++
++ file->proc_fops = &pvr_proc_operations;
++ file->write_proc = whandler;
++
++ file->data = kmalloc(sizeof(PVR_PROC_SEQ_HANDLERS), GFP_KERNEL);
++ if (file->data) {
++ seq_handlers = (PVR_PROC_SEQ_HANDLERS *) file->data;
++ seq_handlers->next = next_handler;
++ seq_handlers->show = show_handler;
++ seq_handlers->off2element = off2element_handler;
++ seq_handlers->startstop = startstop_handler;
++ seq_handlers->data = data;
++
++ return file;
++ }
++ }
++
++ PVR_DPF((PVR_DBG_ERROR,
++ "CreateProcEntryInDirSeq: cannot make proc entry /proc/%s/%s: no memory",
++ PVRProcDirRoot, name));
++ return 0;
++}
++
++struct proc_dir_entry *CreateProcReadEntrySeq(const char *name,
++ void *data,
++ pvr_next_proc_seq_t next_handler,
++ pvr_show_proc_seq_t show_handler,
++ pvr_off2element_proc_seq_t
++ off2element_handler,
++ pvr_startstop_proc_seq_t
++ startstop_handler)
++{
++ return CreateProcEntrySeq(name,
++ data,
++ next_handler,
++ show_handler,
++ off2element_handler, startstop_handler, NULL);
++}
++
++struct proc_dir_entry *CreateProcEntrySeq(const char *name,
++ void *data,
++ pvr_next_proc_seq_t next_handler,
++ pvr_show_proc_seq_t show_handler,
++ pvr_off2element_proc_seq_t
++ off2element_handler,
++ pvr_startstop_proc_seq_t
++ startstop_handler,
++ write_proc_t whandler)
++{
++ return CreateProcEntryInDirSeq(dir,
++ name,
++ data,
++ next_handler,
++ show_handler,
++ off2element_handler,
++ startstop_handler, NULL);
++}
++
++struct proc_dir_entry *CreatePerProcessProcEntrySeq(const char *name,
++ void *data,
++ pvr_next_proc_seq_t
++ next_handler,
++ pvr_show_proc_seq_t
++ show_handler,
++ pvr_off2element_proc_seq_t
++ off2element_handler,
++ pvr_startstop_proc_seq_t
++ startstop_handler,
++ write_proc_t whandler)
++{
++ PVRSRV_ENV_PER_PROCESS_DATA *psPerProc;
++ u32 ui32PID;
++
++ if (!dir) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "CreatePerProcessProcEntrySeq: /proc/%s doesn't exist",
++ PVRProcDirRoot));
++ return NULL;
++ }
++
++ ui32PID = OSGetCurrentProcessIDKM();
++
++ psPerProc = PVRSRVPerProcessPrivateData(ui32PID);
++ if (!psPerProc) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "CreatePerProcessProcEntrySeq: no per process data"));
++
++ return NULL;
++ }
++
++ if (!psPerProc->psProcDir) {
++ char dirname[16];
++ int ret;
++
++ ret = snprintf(dirname, sizeof(dirname), "%u", ui32PID);
++
++ if (ret <= 0 || ret >= (int)sizeof(dirname)) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "CreatePerProcessProcEntries: couldn't generate per process proc directory name \"%u\"",
++ ui32PID));
++ return NULL;
++ } else {
++ psPerProc->psProcDir = proc_mkdir(dirname, dir);
++ if (!psPerProc->psProcDir) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "CreatePerProcessProcEntries: couldn't create per process proc directory /proc/%s/%u",
++ PVRProcDirRoot, ui32PID));
++ return NULL;
++ }
++ }
++ }
++
++ return CreateProcEntryInDirSeq(psPerProc->psProcDir, name, data,
++ next_handler, show_handler,
++ off2element_handler, startstop_handler,
++ whandler);
++}
++
++void RemoveProcEntrySeq(struct proc_dir_entry *proc_entry)
++{
++ if (dir) {
++ void *data = proc_entry->data;
++ PVR_DPF((PVR_DBG_MESSAGE, "Removing /proc/%s/%s",
++ PVRProcDirRoot, proc_entry->name));
++
++ remove_proc_entry(proc_entry->name, dir);
++ if (data)
++ kfree(data);
++
++ }
++}
++
++void RemovePerProcessProcEntrySeq(struct proc_dir_entry *proc_entry)
++{
++ PVRSRV_ENV_PER_PROCESS_DATA *psPerProc;
++
++ psPerProc = LinuxTerminatingProcessPrivateData();
++ if (!psPerProc) {
++ psPerProc = PVRSRVFindPerProcessPrivateData();
++ if (!psPerProc) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "CreatePerProcessProcEntries: can't "
++ "remove %s, no per process data",
++ proc_entry->name));
++ return;
++ }
++ }
++
++ if (psPerProc->psProcDir) {
++ void *data = proc_entry->data;
++ PVR_DPF((PVR_DBG_MESSAGE, "Removing proc entry %s from %s",
++ proc_entry->name, psPerProc->psProcDir->name));
++
++ remove_proc_entry(proc_entry->name, psPerProc->psProcDir);
++ if (data)
++ kfree(data);
++ }
++}
++
++#endif
++
++static int pvr_read_proc(char *page, char **start, off_t off,
++ int count, int *eof, void *data)
++{
++ pvr_read_proc_t *pprn = (pvr_read_proc_t *) data;
++
++ off_t len = pprn(page, (size_t) count, off);
++
++ if (len == END_OF_FILE) {
++ len = 0;
++ *eof = 1;
++ } else if (!len) {
++ *start = (char *)0;
++ } else {
++ *start = (char *)1;
++ }
++
++ return len;
++}
++
++static int CreateProcEntryInDir(struct proc_dir_entry *pdir, const char *name,
++ read_proc_t rhandler, write_proc_t whandler,
++ void *data)
++{
++ struct proc_dir_entry *file;
++ mode_t mode;
++
++ if (!pdir) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "CreateProcEntryInDir: parent directory doesn't exist"));
++
++ return -ENOMEM;
++ }
++
++ mode = S_IFREG;
++
++ if (rhandler) {
++ mode |= S_IRUGO;
++ }
++
++ if (whandler) {
++ mode |= S_IWUSR;
++ }
++
++ file = create_proc_entry(name, mode, pdir);
++
++ if (file) {
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30))
++ file->owner = THIS_MODULE;
++#endif
++ file->read_proc = rhandler;
++ file->write_proc = whandler;
++ file->data = data;
++
++ PVR_DPF((PVR_DBG_MESSAGE, "Created proc entry %s in %s", name,
++ pdir->name));
++
++ return 0;
++ }
++
++ PVR_DPF((PVR_DBG_ERROR,
++ "CreateProcEntry: cannot create proc entry %s in %s", name,
++ pdir->name));
++
++ return -ENOMEM;
++}
++
++int CreateProcEntry(const char *name, read_proc_t rhandler,
++ write_proc_t whandler, void *data)
++{
++ return CreateProcEntryInDir(dir, name, rhandler, whandler, data);
++}
++
++int CreatePerProcessProcEntry(const char *name, read_proc_t rhandler,
++ write_proc_t whandler, void *data)
++{
++ PVRSRV_ENV_PER_PROCESS_DATA *psPerProc;
++ u32 ui32PID;
++
++ if (!dir) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "CreatePerProcessProcEntries: /proc/%s doesn't exist",
++ PVRProcDirRoot));
++
++ return -ENOMEM;
++ }
++
++ ui32PID = OSGetCurrentProcessIDKM();
++
++ psPerProc = PVRSRVPerProcessPrivateData(ui32PID);
++ if (!psPerProc) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "CreatePerProcessProcEntries: no per process data"));
++
++ return -ENOMEM;
++ }
++
++ if (!psPerProc->psProcDir) {
++ char dirname[16];
++ int ret;
++
++ ret = snprintf(dirname, sizeof(dirname), "%u", ui32PID);
++
++ if (ret <= 0 || ret >= (int)sizeof(dirname)) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "CreatePerProcessProcEntries: couldn't generate per process proc directory name \"%u\"",
++ ui32PID));
++
++ return -ENOMEM;
++ } else {
++ psPerProc->psProcDir = proc_mkdir(dirname, dir);
++ if (!psPerProc->psProcDir) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "CreatePerProcessProcEntries: couldn't create per process proc directory /proc/%s/%u",
++ PVRProcDirRoot, ui32PID));
++
++ return -ENOMEM;
++ }
++ }
++ }
++
++ return CreateProcEntryInDir(psPerProc->psProcDir, name, rhandler,
++ whandler, data);
++}
++
++int CreateProcReadEntry(const char *name, pvr_read_proc_t handler)
++{
++ struct proc_dir_entry *file;
++
++ if (!dir) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "CreateProcReadEntry: cannot make proc entry /proc/%s/%s: no parent",
++ PVRProcDirRoot, name));
++
++ return -ENOMEM;
++ }
++
++ file =
++ create_proc_read_entry(name, S_IFREG | S_IRUGO, dir, pvr_read_proc,
++ (void *)handler);
++
++ if (file) {
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30))
++ file->owner = THIS_MODULE;
++#endif
++ return 0;
++ }
++
++ PVR_DPF((PVR_DBG_ERROR,
++ "CreateProcReadEntry: cannot make proc entry /proc/%s/%s: no memory",
++ PVRProcDirRoot, name));
++
++ return -ENOMEM;
++}
++
++int CreateProcEntries(void)
++{
++ dir = proc_mkdir(PVRProcDirRoot, NULL);
++
++ if (!dir) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "CreateProcEntries: cannot make /proc/%s directory",
++ PVRProcDirRoot));
++
++ return -ENOMEM;
++ }
++#ifdef PVR_PROC_USE_SEQ_FILE
++ g_pProcQueue =
++ CreateProcReadEntrySeq("queue", NULL, NULL, ProcSeqShowQueue,
++ ProcSeqOff2ElementQueue, NULL);
++ g_pProcVersion =
++ CreateProcReadEntrySeq("version", NULL, NULL, ProcSeqShowVersion,
++ ProcSeq1ElementHeaderOff2Element, NULL);
++ g_pProcSysNodes =
++ CreateProcReadEntrySeq("nodes", NULL, NULL, ProcSeqShowSysNodes,
++ ProcSeqOff2ElementSysNodes, NULL);
++
++ if (!g_pProcQueue || !g_pProcVersion || !g_pProcSysNodes)
++#else
++ if (CreateProcReadEntry("queue", QueuePrintQueues) ||
++ CreateProcReadEntry("version", procDumpVersion) ||
++ CreateProcReadEntry("nodes", procDumpSysNodes))
++#endif
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "CreateProcEntries: couldn't make /proc/%s files",
++ PVRProcDirRoot));
++
++ return -ENOMEM;
++ }
++
++#ifdef DEBUG
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++ g_pProcDebugLevel = CreateProcEntrySeq("debug_level", NULL, NULL,
++ ProcSeqShowDebugLevel,
++ ProcSeq1ElementOff2Element, NULL,
++ PVRDebugProcSetLevel);
++ if (!g_pProcDebugLevel)
++#else
++ if (CreateProcEntry
++ ("debug_level", PVRDebugProcGetLevel, PVRDebugProcSetLevel, 0))
++#endif
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "CreateProcEntries: couldn't make /proc/%s/debug_level",
++ PVRProcDirRoot));
++
++ return -ENOMEM;
++ }
++#ifdef PVR_MANUAL_POWER_CONTROL
++#ifdef PVR_PROC_USE_SEQ_FILE
++ g_pProcPowerLevel = CreateProcEntrySeq("power_control", NULL, NULL,
++ ProcSeqShowPowerLevel,
++ ProcSeq1ElementOff2Element, NULL,
++ PVRProcSetPowerLevel);
++ if (!g_pProcPowerLevel)
++#else
++ if (CreateProcEntry
++ ("power_control", PVRProcGetPowerLevel, PVRProcSetPowerLevel, 0))
++#endif
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "CreateProcEntries: couldn't make /proc/%s/power_control",
++ PVRProcDirRoot));
++
++ return -ENOMEM;
++ }
++#endif
++#endif
++
++ return 0;
++}
++
++void RemoveProcEntry(const char *name)
++{
++ if (dir) {
++ remove_proc_entry(name, dir);
++ PVR_DPF((PVR_DBG_MESSAGE, "Removing /proc/%s/%s",
++ PVRProcDirRoot, name));
++ }
++}
++
++void RemovePerProcessProcEntry(const char *name)
++{
++ PVRSRV_ENV_PER_PROCESS_DATA *psPerProc;
++
++ psPerProc = LinuxTerminatingProcessPrivateData();
++ if (!psPerProc) {
++ psPerProc = PVRSRVFindPerProcessPrivateData();
++ if (!psPerProc) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "CreatePerProcessProcEntries: can't "
++ "remove %s, no per process data", name));
++ return;
++ }
++ }
++
++ if (psPerProc->psProcDir) {
++ remove_proc_entry(name, psPerProc->psProcDir);
++
++ PVR_DPF((PVR_DBG_MESSAGE, "Removing proc entry %s from %s",
++ name, psPerProc->psProcDir->name));
++ }
++}
++
++void RemovePerProcessProcDir(PVRSRV_ENV_PER_PROCESS_DATA * psPerProc)
++{
++ if (psPerProc->psProcDir) {
++ while (psPerProc->psProcDir->subdir) {
++ PVR_DPF((PVR_DBG_WARNING,
++ "Belatedly removing /proc/%s/%s/%s",
++ PVRProcDirRoot, psPerProc->psProcDir->name,
++ psPerProc->psProcDir->subdir->name));
++
++ RemoveProcEntry(psPerProc->psProcDir->subdir->name);
++ }
++ RemoveProcEntry(psPerProc->psProcDir->name);
++ }
++}
++
++void RemoveProcEntries(void)
++{
++#ifdef DEBUG
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++ RemoveProcEntrySeq(g_pProcDebugLevel);
++#else
++ RemoveProcEntry("debug_level");
++#endif
++
++#ifdef PVR_MANUAL_POWER_CONTROL
++#ifdef PVR_PROC_USE_SEQ_FILE
++ RemoveProcEntrySeq(g_pProcPowerLevel);
++#else
++ RemoveProcEntry("power_control");
++#endif
++#endif
++
++#endif
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++ RemoveProcEntrySeq(g_pProcQueue);
++ RemoveProcEntrySeq(g_pProcVersion);
++ RemoveProcEntrySeq(g_pProcSysNodes);
++#else
++ RemoveProcEntry("queue");
++ RemoveProcEntry("version");
++ RemoveProcEntry("nodes");
++#endif
++
++ while (dir->subdir) {
++ PVR_DPF((PVR_DBG_WARNING, "Belatedly removing /proc/%s/%s",
++ PVRProcDirRoot, dir->subdir->name));
++
++ RemoveProcEntry(dir->subdir->name);
++ }
++
++ remove_proc_entry(PVRProcDirRoot, NULL);
++}
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++
++static void ProcSeqShowVersion(struct seq_file *sfile, void *el)
++{
++ SYS_DATA *psSysData;
++ char *pszSystemVersionString = "None";
++
++ if (el == PVR_PROC_SEQ_START_TOKEN) {
++ seq_printf(sfile,
++ "Version %s (%s) %s\n",
++ PVRVERSION_STRING, PVR_BUILD_TYPE, PVR_BUILD_DIR);
++ return;
++ }
++
++ SysAcquireData(&psSysData);
++
++ if (psSysData->pszVersionString) {
++ pszSystemVersionString = psSysData->pszVersionString;
++ }
++
++ seq_printf(sfile, "System Version String: %s\n",
++ pszSystemVersionString);
++}
++
++#else
++
++static off_t procDumpVersion(char *buf, size_t size, off_t off)
++{
++ SYS_DATA *psSysData;
++
++ if (off == 0) {
++ return printAppend(buf, size, 0,
++ "Version %s (%s) %s\n",
++ PVRVERSION_STRING,
++ PVR_BUILD_TYPE, PVR_BUILD_DIR);
++ }
++
++ SysAcquireData(&psSysData)
++
++ if (off == 1) {
++ char *pszSystemVersionString = "None";
++
++ if (psSysData->pszVersionString) {
++ pszSystemVersionString = psSysData->pszVersionString;
++ }
++
++ if (strlen(pszSystemVersionString)
++ + strlen("System Version String: \n")
++ + 1 > size) {
++ return 0;
++ }
++ return printAppend(buf, size, 0,
++ "System Version String: %s\n",
++ pszSystemVersionString);
++ }
++
++ return END_OF_FILE;
++}
++
++#endif
++
++static const char *deviceTypeToString(PVRSRV_DEVICE_TYPE deviceType)
++{
++ switch (deviceType) {
++ default:
++ {
++ static char text[10];
++
++ sprintf(text, "?%x", (u32) deviceType);
++
++ return text;
++ }
++ }
++}
++
++static const char *deviceClassToString(PVRSRV_DEVICE_CLASS deviceClass)
++{
++ switch (deviceClass) {
++ case PVRSRV_DEVICE_CLASS_3D:
++ {
++ return "3D";
++ }
++ case PVRSRV_DEVICE_CLASS_DISPLAY:
++ {
++ return "display";
++ }
++ case PVRSRV_DEVICE_CLASS_BUFFER:
++ {
++ return "buffer";
++ }
++ default:
++ {
++ static char text[10];
++
++ sprintf(text, "?%x", (u32) deviceClass);
++ return text;
++ }
++ }
++}
++
++void *DecOffPsDev_AnyVaCb(PVRSRV_DEVICE_NODE * psNode, va_list va)
++{
++ off_t *pOff = va_arg(va, off_t *);
++ if (--(*pOff)) {
++ return NULL;
++ } else {
++ return psNode;
++ }
++}
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++
++static void ProcSeqShowSysNodes(struct seq_file *sfile, void *el)
++{
++ SYS_DATA *psSysData;
++ PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *) el;
++
++ if (el == PVR_PROC_SEQ_START_TOKEN) {
++ seq_printf(sfile,
++ "Registered nodes\n"
++ "Addr Type Class Index Ref pvDev Size Res\n");
++ return;
++ }
++
++ SysAcquireData(&psSysData);
++
++ seq_printf(sfile,
++ "%p %-8s %-8s %4d %2u %p %3u %p\n",
++ psDevNode,
++ deviceTypeToString(psDevNode->sDevId.eDeviceType),
++ deviceClassToString(psDevNode->sDevId.eDeviceClass),
++ psDevNode->sDevId.eDeviceClass,
++ psDevNode->ui32RefCount,
++ psDevNode->pvDevice,
++ psDevNode->ui32pvDeviceSize, psDevNode->hResManContext);
++
++}
++
++static void *ProcSeqOff2ElementSysNodes(struct seq_file *sfile, loff_t off)
++{
++ SYS_DATA *psSysData;
++ PVRSRV_DEVICE_NODE *psDevNode;
++ if (!off) {
++ return PVR_PROC_SEQ_START_TOKEN;
++ }
++
++ SysAcquireData(&psSysData);
++
++ psDevNode = (PVRSRV_DEVICE_NODE *)
++ List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList,
++ DecOffPsDev_AnyVaCb, &off);
++
++ return (void *)psDevNode;
++}
++
++#else
++
++static
++off_t procDumpSysNodes(char *buf, size_t size, off_t off)
++{
++ SYS_DATA *psSysData;
++ PVRSRV_DEVICE_NODE *psDevNode;
++ off_t len;
++
++ if (size < 80) {
++ return 0;
++ }
++
++ if (off == 0) {
++ return printAppend(buf, size, 0,
++ "Registered nodes\n"
++ "Addr Type Class Index Ref pvDev Size Res\n");
++ }
++
++ SysAcquireData(&psSysData);
++
++ psDevNode = (PVRSRV_DEVICE_NODE *)
++ List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList,
++ DecOffPsDev_AnyVaCb, &off);
++
++ if (!psDevNode) {
++ return END_OF_FILE;
++ }
++
++ len = printAppend(buf, size, 0,
++ "%p %-8s %-8s %4d %2lu %p %3lu %p\n",
++ psDevNode,
++ deviceTypeToString(psDevNode->sDevId.eDeviceType),
++ deviceClassToString(psDevNode->sDevId.eDeviceClass),
++ psDevNode->sDevId.eDeviceClass,
++ psDevNode->ui32RefCount,
++ psDevNode->pvDevice,
++ psDevNode->ui32pvDeviceSize,
++ psDevNode->hResManContext);
++ return (len);
++}
++
++#endif
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/proc.h
+@@ -0,0 +1,115 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __SERVICES_PROC_H__
++#define __SERVICES_PROC_H__
++
++#include <asm/system.h>
++#include <linux/proc_fs.h>
++#include <linux/seq_file.h>
++
++#define END_OF_FILE (off_t) -1
++
++typedef off_t (pvr_read_proc_t)(char *, size_t, off_t);
++
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++#define PVR_PROC_SEQ_START_TOKEN (void*)1
++typedef void* (pvr_next_proc_seq_t)(struct seq_file *,void*,loff_t);
++typedef void* (pvr_off2element_proc_seq_t)(struct seq_file *, loff_t);
++typedef void (pvr_show_proc_seq_t)(struct seq_file *,void*);
++typedef void (pvr_startstop_proc_seq_t)(struct seq_file *, int start);
++
++typedef struct _PVR_PROC_SEQ_HANDLERS_ {
++ pvr_next_proc_seq_t *next;
++ pvr_show_proc_seq_t *show;
++ pvr_off2element_proc_seq_t *off2element;
++ pvr_startstop_proc_seq_t *startstop;
++ void *data;
++} PVR_PROC_SEQ_HANDLERS;
++
++
++void* ProcSeq1ElementOff2Element(struct seq_file *sfile, loff_t off);
++
++void* ProcSeq1ElementHeaderOff2Element(struct seq_file *sfile, loff_t off);
++
++
++#endif
++
++off_t printAppend(char * buffer, size_t size, off_t off, const char * format, ...)
++ __attribute__((format(printf, 4, 5)));
++
++int CreateProcEntries(void);
++
++int CreateProcReadEntry (const char * name, pvr_read_proc_t handler);
++
++int CreateProcEntry(const char * name, read_proc_t rhandler, write_proc_t whandler, void *data);
++
++int CreatePerProcessProcEntry(const char * name, read_proc_t rhandler, write_proc_t whandler, void *data);
++
++void RemoveProcEntry(const char * name);
++
++void RemovePerProcessProcEntry(const char * name);
++
++void RemoveProcEntries(void);
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++struct proc_dir_entry* CreateProcReadEntrySeq (
++ const char* name,
++ void* data,
++ pvr_next_proc_seq_t next_handler,
++ pvr_show_proc_seq_t show_handler,
++ pvr_off2element_proc_seq_t off2element_handler,
++ pvr_startstop_proc_seq_t startstop_handler
++ );
++
++struct proc_dir_entry* CreateProcEntrySeq (
++ const char* name,
++ void* data,
++ pvr_next_proc_seq_t next_handler,
++ pvr_show_proc_seq_t show_handler,
++ pvr_off2element_proc_seq_t off2element_handler,
++ pvr_startstop_proc_seq_t startstop_handler,
++ write_proc_t whandler
++ );
++
++struct proc_dir_entry* CreatePerProcessProcEntrySeq (
++ const char* name,
++ void* data,
++ pvr_next_proc_seq_t next_handler,
++ pvr_show_proc_seq_t show_handler,
++ pvr_off2element_proc_seq_t off2element_handler,
++ pvr_startstop_proc_seq_t startstop_handler,
++ write_proc_t whandler
++ );
++
++
++void RemoveProcEntrySeq(struct proc_dir_entry* proc_entry);
++void RemovePerProcessProcEntrySeq(struct proc_dir_entry* proc_entry);
++
++#endif
++
++#endif
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/pvr_bridge_k.c
+@@ -0,0 +1,667 @@
++/**********************************************************************
++ *
++ * Copyright (c) 2010 Intel Corporation.
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++
++#include "services.h"
++#include "pvr_bridge.h"
++#include "perproc.h"
++#include "syscommon.h"
++#include "pvr_debug.h"
++#include "proc.h"
++#include "private_data.h"
++#include "linkage.h"
++#include "pvr_bridge_km.h"
++
++#if defined(SUPPORT_DRI_DRM)
++#include <drm/drmP.h>
++#include "pvr_drm.h"
++#if defined(PVR_SECURE_DRM_AUTH_EXPORT)
++#include "env_perproc.h"
++#endif
++#endif
++
++#if defined(SUPPORT_VGX)
++#include "vgx_bridge.h"
++#endif
++
++#if defined(SUPPORT_SGX)
++#include "sgx_bridge.h"
++#endif
++
++#include "bridged_pvr_bridge.h"
++
++#ifdef MODULE_TEST
++#include "pvr_test_bridge.h"
++#include "kern_test.h"
++#endif
++
++#if defined(SUPPORT_DRI_DRM)
++#define PRIVATE_DATA(pFile) ((pFile)->driver_priv)
++#else
++#define PRIVATE_DATA(pFile) ((pFile)->private_data)
++#endif
++
++#if defined(DEBUG_BRIDGE_KM)
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++static struct proc_dir_entry *g_ProcBridgeStats = 0;
++static void *ProcSeqNextBridgeStats(struct seq_file *sfile, void *el,
++ loff_t off);
++static void ProcSeqShowBridgeStats(struct seq_file *sfile, void *el);
++static void *ProcSeqOff2ElementBridgeStats(struct seq_file *sfile, loff_t off);
++static void ProcSeqStartstopBridgeStats(struct seq_file *sfile, int start);
++
++#else
++static off_t printLinuxBridgeStats(char *buffer, size_t size, off_t off);
++#endif
++
++#endif
++
++extern struct mutex gPVRSRVLock;
++
++#if defined(SUPPORT_MEMINFO_IDS)
++static u64 ui64Stamp;
++#endif
++
++PVRSRV_ERROR LinuxBridgeInit(void)
++{
++#if defined(DEBUG_BRIDGE_KM)
++ {
++ int iStatus;
++#ifdef PVR_PROC_USE_SEQ_FILE
++ g_ProcBridgeStats = CreateProcReadEntrySeq("bridge_stats",
++ NULL,
++ ProcSeqNextBridgeStats,
++ ProcSeqShowBridgeStats,
++ ProcSeqOff2ElementBridgeStats,
++ ProcSeqStartstopBridgeStats);
++ iStatus = !g_ProcBridgeStats ? -1 : 0;
++#else
++ iStatus =
++ CreateProcReadEntry("bridge_stats", printLinuxBridgeStats);
++#endif
++
++ if (iStatus != 0) {
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ }
++#endif
++ return CommonBridgeInit();
++}
++
++void LinuxBridgeDeInit(void)
++{
++#if defined(DEBUG_BRIDGE_KM)
++#ifdef PVR_PROC_USE_SEQ_FILE
++ RemoveProcEntrySeq(g_ProcBridgeStats);
++#else
++ RemoveProcEntry("bridge_stats");
++#endif
++#endif
++}
++
++#if defined(DEBUG_BRIDGE_KM)
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++
++static void ProcSeqStartstopBridgeStats(struct seq_file *sfile, int start)
++{
++ if (start) {
++ mutex_lock(&gPVRSRVLock);
++ } else {
++ mutex_unlock(&gPVRSRVLock);
++ }
++}
++
++static void *ProcSeqOff2ElementBridgeStats(struct seq_file *sfile, loff_t off)
++{
++ if (!off) {
++ return PVR_PROC_SEQ_START_TOKEN;
++ }
++
++ if (off > BRIDGE_DISPATCH_TABLE_ENTRY_COUNT) {
++ return (void *)0;
++ }
++
++ return (void *)&g_BridgeDispatchTable[off - 1];
++}
++
++static void *ProcSeqNextBridgeStats(struct seq_file *sfile, void *el,
++ loff_t off)
++{
++ return ProcSeqOff2ElementBridgeStats(sfile, off);
++}
++
++static void ProcSeqShowBridgeStats(struct seq_file *sfile, void *el)
++{
++ PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *psEntry =
++ (PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *) el;
++
++ if (el == PVR_PROC_SEQ_START_TOKEN) {
++ seq_printf(sfile,
++ "Total ioctl call count = %lu\n"
++ "Total number of bytes copied via copy_from_user = %lu\n"
++ "Total number of bytes copied via copy_to_user = %lu\n"
++ "Total number of bytes copied via copy_*_user = %lu\n\n"
++ "%-45s | %-40s | %10s | %20s | %10s\n",
++ g_BridgeGlobalStats.ui32IOCTLCount,
++ g_BridgeGlobalStats.ui32TotalCopyFromUserBytes,
++ g_BridgeGlobalStats.ui32TotalCopyToUserBytes,
++ g_BridgeGlobalStats.ui32TotalCopyFromUserBytes +
++ g_BridgeGlobalStats.ui32TotalCopyToUserBytes,
++ "Bridge Name", "Wrapper Function", "Call Count",
++ "copy_from_user Bytes", "copy_to_user Bytes");
++ return;
++ }
++
++ seq_printf(sfile,
++ "%-45s %-40s %-10lu %-20lu %-10lu\n",
++ psEntry->pszIOCName,
++ psEntry->pszFunctionName,
++ psEntry->ui32CallCount,
++ psEntry->ui32CopyFromUserTotalBytes,
++ psEntry->ui32CopyToUserTotalBytes);
++}
++
++#else
++
++static off_t printLinuxBridgeStats(char *buffer, size_t count, off_t off)
++{
++ PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *psEntry;
++ off_t Ret;
++
++ mutex_lock(&gPVRSRVLock);
++
++ if (!off) {
++ if (count < 500) {
++ Ret = 0;
++ goto unlock_and_return;
++ }
++ Ret = printAppend(buffer, count, 0,
++ "Total ioctl call count = %lu\n"
++ "Total number of bytes copied via copy_from_user = %lu\n"
++ "Total number of bytes copied via copy_to_user = %lu\n"
++ "Total number of bytes copied via copy_*_user = %lu\n\n"
++ "%-45s | %-40s | %10s | %20s | %10s\n",
++ g_BridgeGlobalStats.ui32IOCTLCount,
++ g_BridgeGlobalStats.
++ ui32TotalCopyFromUserBytes,
++ g_BridgeGlobalStats.ui32TotalCopyToUserBytes,
++ g_BridgeGlobalStats.
++ ui32TotalCopyFromUserBytes +
++ g_BridgeGlobalStats.ui32TotalCopyToUserBytes,
++ "Bridge Name", "Wrapper Function",
++ "Call Count", "copy_from_user Bytes",
++ "copy_to_user Bytes");
++ goto unlock_and_return;
++ }
++
++ if (off > BRIDGE_DISPATCH_TABLE_ENTRY_COUNT) {
++ Ret = END_OF_FILE;
++ goto unlock_and_return;
++ }
++
++ if (count < 300) {
++ Ret = 0;
++ goto unlock_and_return;
++ }
++
++ psEntry = &g_BridgeDispatchTable[off - 1];
++ Ret = printAppend(buffer, count, 0,
++ "%-45s %-40s %-10lu %-20lu %-10lu\n",
++ psEntry->pszIOCName,
++ psEntry->pszFunctionName,
++ psEntry->ui32CallCount,
++ psEntry->ui32CopyFromUserTotalBytes,
++ psEntry->ui32CopyToUserTotalBytes);
++
++unlock_and_return:
++ mutex_unlock(&gPVRSRVLock);
++ return Ret;
++}
++#endif
++#endif
++
++#if defined(SUPPORT_DRI_DRM)
++#if defined(INTEL_D3_CHANGES) && (LINUX_VERSION_CODE == KERNEL_VERSION(2,6,23))
++int
++PVRSRV_BridgeDispatchKM(struct inode *pInode, struct file *pFile,
++ u32 unref__ ioctlCmd, u32 arg)
++#else
++int
++PVRSRV_BridgeDispatchKM(struct drm_device *dev, void *arg,
++ struct drm_file *pFile)
++#endif
++#else
++int PVRSRV_BridgeDispatchKM(struct file *pFile, u32 unref__ ioctlCmd, u32 arg)
++#endif
++{
++ u32 cmd;
++#if !defined(SUPPORT_DRI_DRM)
++ PVRSRV_BRIDGE_PACKAGE *psBridgePackageUM =
++ (PVRSRV_BRIDGE_PACKAGE *) arg;
++ PVRSRV_BRIDGE_PACKAGE sBridgePackageKM;
++#endif
++ PVRSRV_BRIDGE_PACKAGE *psBridgePackageKM;
++ u32 ui32PID = OSGetCurrentProcessIDKM();
++ PVRSRV_PER_PROCESS_DATA *psPerProc;
++ int err = -EFAULT;
++
++ mutex_lock(&gPVRSRVLock);
++
++#if defined(SUPPORT_DRI_DRM)
++ psBridgePackageKM = (PVRSRV_BRIDGE_PACKAGE *) arg;
++ PVR_ASSERT(psBridgePackageKM != NULL);
++#else
++ psBridgePackageKM = &sBridgePackageKM;
++
++ if (!OSAccessOK(PVR_VERIFY_WRITE,
++ psBridgePackageUM, sizeof(PVRSRV_BRIDGE_PACKAGE))) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "%s: Received invalid pointer to function arguments",
++ __FUNCTION__));
++
++ goto unlock_and_return;
++ }
++
++ if (OSCopyFromUser(NULL,
++ psBridgePackageKM,
++ psBridgePackageUM, sizeof(PVRSRV_BRIDGE_PACKAGE))
++ != PVRSRV_OK) {
++ goto unlock_and_return;
++ }
++#endif
++
++ cmd = psBridgePackageKM->ui32BridgeID;
++
++#if defined(MODULE_TEST)
++ switch (cmd) {
++ case PVRSRV_BRIDGE_SERVICES_TEST_MEM1:
++ {
++ PVRSRV_ERROR eError = MemTest1();
++ if (psBridgePackageKM->ui32OutBufferSize ==
++ sizeof(PVRSRV_BRIDGE_RETURN)) {
++ PVRSRV_BRIDGE_RETURN *pReturn =
++ (PVRSRV_BRIDGE_RETURN *) psBridgePackageKM->
++ pvParamOut;
++ pReturn->eError = eError;
++ }
++ }
++ err = 0;
++ goto unlock_and_return;
++ case PVRSRV_BRIDGE_SERVICES_TEST_MEM2:
++ {
++ PVRSRV_ERROR eError = MemTest2();
++ if (psBridgePackageKM->ui32OutBufferSize ==
++ sizeof(PVRSRV_BRIDGE_RETURN)) {
++ PVRSRV_BRIDGE_RETURN *pReturn =
++ (PVRSRV_BRIDGE_RETURN *) psBridgePackageKM->
++ pvParamOut;
++ pReturn->eError = eError;
++ }
++ }
++ err = 0;
++ goto unlock_and_return;
++
++ case PVRSRV_BRIDGE_SERVICES_TEST_RESOURCE:
++ {
++ PVRSRV_ERROR eError = ResourceTest();
++ if (psBridgePackageKM->ui32OutBufferSize ==
++ sizeof(PVRSRV_BRIDGE_RETURN)) {
++ PVRSRV_BRIDGE_RETURN *pReturn =
++ (PVRSRV_BRIDGE_RETURN *) psBridgePackageKM->
++ pvParamOut;
++ pReturn->eError = eError;
++ }
++ }
++ err = 0;
++ goto unlock_and_return;
++
++ case PVRSRV_BRIDGE_SERVICES_TEST_EVENTOBJECT:
++ {
++ PVRSRV_ERROR eError = EventObjectTest();
++ if (psBridgePackageKM->ui32OutBufferSize ==
++ sizeof(PVRSRV_BRIDGE_RETURN)) {
++ PVRSRV_BRIDGE_RETURN *pReturn =
++ (PVRSRV_BRIDGE_RETURN *) psBridgePackageKM->
++ pvParamOut;
++ pReturn->eError = eError;
++ }
++ }
++ err = 0;
++ goto unlock_and_return;
++
++ case PVRSRV_BRIDGE_SERVICES_TEST_MEMMAPPING:
++ {
++ PVRSRV_ERROR eError = MemMappingTest();
++ if (psBridgePackageKM->ui32OutBufferSize ==
++ sizeof(PVRSRV_BRIDGE_RETURN)) {
++ PVRSRV_BRIDGE_RETURN *pReturn =
++ (PVRSRV_BRIDGE_RETURN *) psBridgePackageKM->
++ pvParamOut;
++ pReturn->eError = eError;
++ }
++ }
++ err = 0;
++ goto unlock_and_return;
++
++ case PVRSRV_BRIDGE_SERVICES_TEST_PROCESSID:
++ {
++ PVRSRV_ERROR eError = ProcessIDTest();
++ if (psBridgePackageKM->ui32OutBufferSize ==
++ sizeof(PVRSRV_BRIDGE_RETURN)) {
++ PVRSRV_BRIDGE_RETURN *pReturn =
++ (PVRSRV_BRIDGE_RETURN *) psBridgePackageKM->
++ pvParamOut;
++ pReturn->eError = eError;
++ }
++ }
++ err = 0;
++ goto unlock_and_return;
++
++ case PVRSRV_BRIDGE_SERVICES_TEST_CLOCKUSWAITUS:
++ {
++ PVRSRV_ERROR eError = ClockusWaitusTest();
++ if (psBridgePackageKM->ui32OutBufferSize ==
++ sizeof(PVRSRV_BRIDGE_RETURN)) {
++ PVRSRV_BRIDGE_RETURN *pReturn =
++ (PVRSRV_BRIDGE_RETURN *) psBridgePackageKM->
++ pvParamOut;
++ pReturn->eError = eError;
++ }
++ }
++ err = 0;
++ goto unlock_and_return;
++
++ case PVRSRV_BRIDGE_SERVICES_TEST_TIMER:
++ {
++ PVRSRV_ERROR eError = TimerTest();
++ if (psBridgePackageKM->ui32OutBufferSize ==
++ sizeof(PVRSRV_BRIDGE_RETURN)) {
++ PVRSRV_BRIDGE_RETURN *pReturn =
++ (PVRSRV_BRIDGE_RETURN *) psBridgePackageKM->
++ pvParamOut;
++ pReturn->eError = eError;
++ }
++ }
++ err = 0;
++ goto unlock_and_return;
++
++ case PVRSRV_BRIDGE_SERVICES_TEST_PRIVSRV:
++ {
++ PVRSRV_ERROR eError = PrivSrvTest();
++ if (psBridgePackageKM->ui32OutBufferSize ==
++ sizeof(PVRSRV_BRIDGE_RETURN)) {
++ PVRSRV_BRIDGE_RETURN *pReturn =
++ (PVRSRV_BRIDGE_RETURN *) psBridgePackageKM->
++ pvParamOut;
++ pReturn->eError = eError;
++ }
++ }
++ err = 0;
++ goto unlock_and_return;
++ case PVRSRV_BRIDGE_SERVICES_TEST_COPYDATA:
++ {
++ u32 ui32PID;
++ PVRSRV_PER_PROCESS_DATA *psPerProc;
++ PVRSRV_ERROR eError;
++
++ ui32PID = OSGetCurrentProcessIDKM();
++
++ PVRSRVTrace("PVRSRV_BRIDGE_SERVICES_TEST_COPYDATA %d",
++ ui32PID);
++
++ psPerProc = PVRSRVPerProcessData(ui32PID);
++
++ eError =
++ CopyDataTest(psBridgePackageKM->pvParamIn,
++ psBridgePackageKM->pvParamOut,
++ psPerProc);
++
++ *(PVRSRV_ERROR *) psBridgePackageKM->pvParamOut =
++ eError;
++ err = 0;
++ goto unlock_and_return;
++ }
++
++ case PVRSRV_BRIDGE_SERVICES_TEST_POWERMGMT:
++ {
++ PVRSRV_ERROR eError = PowerMgmtTest();
++ if (psBridgePackageKM->ui32OutBufferSize ==
++ sizeof(PVRSRV_BRIDGE_RETURN)) {
++ PVRSRV_BRIDGE_RETURN *pReturn =
++ (PVRSRV_BRIDGE_RETURN *) psBridgePackageKM->
++ pvParamOut;
++ pReturn->eError = eError;
++ }
++ }
++ err = 0;
++ goto unlock_and_return;
++
++ }
++#endif
++
++ if (cmd != PVRSRV_BRIDGE_CONNECT_SERVICES) {
++ PVRSRV_ERROR eError;
++
++ eError = PVRSRVLookupHandle(KERNEL_HANDLE_BASE,
++ (void **)&psPerProc,
++ psBridgePackageKM->hKernelServices,
++ PVRSRV_HANDLE_TYPE_PERPROC_DATA);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "%s: Invalid kernel services handle (%d)",
++ __FUNCTION__, eError));
++ goto unlock_and_return;
++ }
++
++ if (psPerProc->ui32PID != ui32PID) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "%s: Process %d tried to access data "
++ "belonging to process %d", __FUNCTION__,
++ ui32PID, psPerProc->ui32PID));
++ goto unlock_and_return;
++ }
++ } else {
++
++ psPerProc = PVRSRVPerProcessData(ui32PID);
++ if (psPerProc == NULL) {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRV_BridgeDispatchKM: "
++ "Couldn't create per-process data area"));
++ goto unlock_and_return;
++ }
++ }
++
++ psBridgePackageKM->ui32BridgeID =
++ PVRSRV_GET_BRIDGE_ID(psBridgePackageKM->ui32BridgeID);
++
++#if defined(PVR_SECURE_FD_EXPORT)
++ switch (cmd) {
++ case PVRSRV_BRIDGE_EXPORT_DEVICEMEM:
++ {
++ PVRSRV_FILE_PRIVATE_DATA *psPrivateData =
++ PRIVATE_DATA(pFile);
++
++ if (psPrivateData->hKernelMemInfo) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "%s: Can only export one MemInfo "
++ "per file descriptor", __FUNCTION__));
++ err = -EINVAL;
++ goto unlock_and_return;
++ }
++ break;
++ }
++
++ case PVRSRV_BRIDGE_MAP_DEV_MEMORY:
++ {
++ PVRSRV_BRIDGE_IN_MAP_DEV_MEMORY *psMapDevMemIN =
++ (PVRSRV_BRIDGE_IN_MAP_DEV_MEMORY *)
++ psBridgePackageKM->pvParamIn;
++ PVRSRV_FILE_PRIVATE_DATA *psPrivateData =
++ PRIVATE_DATA(pFile);
++
++ if (!psPrivateData->hKernelMemInfo) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "%s: File descriptor has no "
++ "associated MemInfo handle",
++ __FUNCTION__));
++ err = -EINVAL;
++ goto unlock_and_return;
++ }
++
++ psMapDevMemIN->hKernelMemInfo =
++ psPrivateData->hKernelMemInfo;
++ break;
++ }
++
++ default:
++ {
++ PVRSRV_FILE_PRIVATE_DATA *psPrivateData =
++ PRIVATE_DATA(pFile);
++
++ if (psPrivateData->hKernelMemInfo) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "%s: Import/Export handle tried "
++ "to use privileged service",
++ __FUNCTION__));
++ goto unlock_and_return;
++ }
++ break;
++ }
++ }
++#endif
++#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT)
++ switch (cmd) {
++ case PVRSRV_BRIDGE_MAP_DEV_MEMORY:
++ case PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY:
++ {
++ PVRSRV_FILE_PRIVATE_DATA *psPrivateData;
++ int authenticated = pFile->authenticated;
++ PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc;
++
++ if (authenticated) {
++ break;
++ }
++
++ psEnvPerProc =
++ (PVRSRV_ENV_PER_PROCESS_DATA *)
++ PVRSRVProcessPrivateData(psPerProc);
++ if (psEnvPerProc == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "%s: Process private data not allocated",
++ __FUNCTION__));
++ err = -EFAULT;
++ goto unlock_and_return;
++ }
++
++ list_for_each_entry(psPrivateData,
++ &psEnvPerProc->sDRMAuthListHead,
++ sDRMAuthListItem) {
++ struct drm_file *psDRMFile =
++ psPrivateData->psDRMFile;
++
++ if (pFile->master == psDRMFile->master) {
++ authenticated |=
++ psDRMFile->authenticated;
++ if (authenticated) {
++ break;
++ }
++ }
++ }
++
++ if (!authenticated) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "%s: Not authenticated for mapping device or device class memory",
++ __FUNCTION__));
++ err = -EPERM;
++ goto unlock_and_return;
++ }
++ break;
++ }
++ default:
++ break;
++ }
++#endif
++
++ err = BridgedDispatchKM(psPerProc, psBridgePackageKM);
++ if (err != PVRSRV_OK)
++ goto unlock_and_return;
++
++ switch (cmd) {
++#if defined(PVR_SECURE_FD_EXPORT)
++ case PVRSRV_BRIDGE_EXPORT_DEVICEMEM:
++ {
++ PVRSRV_BRIDGE_OUT_EXPORTDEVICEMEM *psExportDeviceMemOUT
++ =
++ (PVRSRV_BRIDGE_OUT_EXPORTDEVICEMEM *)
++ psBridgePackageKM->pvParamOut;
++ PVRSRV_FILE_PRIVATE_DATA *psPrivateData =
++ PRIVATE_DATA(pFile);
++
++ psPrivateData->hKernelMemInfo =
++ psExportDeviceMemOUT->hMemInfo;
++#if defined(SUPPORT_MEMINFO_IDS)
++ psExportDeviceMemOUT->ui64Stamp =
++ psPrivateData->ui64Stamp = ++ui64Stamp;
++#endif
++ break;
++ }
++#endif
++
++#if defined(SUPPORT_MEMINFO_IDS)
++ case PVRSRV_BRIDGE_MAP_DEV_MEMORY:
++ {
++ PVRSRV_BRIDGE_OUT_MAP_DEV_MEMORY *psMapDeviceMemoryOUT =
++ (PVRSRV_BRIDGE_OUT_MAP_DEV_MEMORY *)
++ psBridgePackageKM->pvParamOut;
++ PVRSRV_FILE_PRIVATE_DATA *psPrivateData =
++ PRIVATE_DATA(pFile);
++ psMapDeviceMemoryOUT->sDstClientMemInfo.ui64Stamp =
++ psPrivateData->ui64Stamp;
++ break;
++ }
++
++ case PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY:
++ {
++ PVRSRV_BRIDGE_OUT_MAP_DEVICECLASS_MEMORY
++ *psDeviceClassMemoryOUT =
++ (PVRSRV_BRIDGE_OUT_MAP_DEVICECLASS_MEMORY *)
++ psBridgePackageKM->pvParamOut;
++ psDeviceClassMemoryOUT->sClientMemInfo.ui64Stamp =
++ ++ui64Stamp;
++ break;
++ }
++#endif
++
++ default:
++ break;
++ }
++
++unlock_and_return:
++ mutex_unlock(&gPVRSRVLock);
++ return err;
++}
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/pvr_debug.c
+@@ -0,0 +1,413 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef AUTOCONF_INCLUDED
++#include <linux/config.h>
++#endif
++
++#include <asm/io.h>
++#include <asm/uaccess.h>
++#include <linux/kernel.h>
++#include <linux/hardirq.h>
++#include <linux/module.h>
++#include <linux/spinlock.h>
++#include <linux/tty.h>
++#include <linux/mutex.h>
++#include <stdarg.h>
++#include "img_types.h"
++#include "servicesext.h"
++#include "pvr_debug.h"
++#include "proc.h"
++#include "linkage.h"
++
++#if defined(PVRSRV_NEED_PVR_DPF)
++
++#define PVR_MAX_FILEPATH_LEN 256
++
++static u32 gPVRDebugLevel = DBGPRIV_WARNING;
++
++#endif
++
++#define PVR_MAX_MSG_LEN PVR_MAX_DEBUG_MESSAGE_LEN
++
++static char gszBufferNonIRQ[PVR_MAX_MSG_LEN + 1];
++
++static char gszBufferIRQ[PVR_MAX_MSG_LEN + 1];
++
++static struct mutex gsDebugMutexNonIRQ;
++
++static spinlock_t gsDebugLockIRQ = SPIN_LOCK_UNLOCKED;
++
++#define USE_SPIN_LOCK (in_interrupt() || !preemptible())
++
++static inline void GetBufferLock(unsigned long *pulLockFlags)
++{
++ if (USE_SPIN_LOCK) {
++ spin_lock_irqsave(&gsDebugLockIRQ, *pulLockFlags);
++ } else {
++ mutex_lock(&gsDebugMutexNonIRQ);
++ }
++}
++
++static inline void ReleaseBufferLock(unsigned long ulLockFlags)
++{
++ if (USE_SPIN_LOCK) {
++ spin_unlock_irqrestore(&gsDebugLockIRQ, ulLockFlags);
++ } else {
++ mutex_unlock(&gsDebugMutexNonIRQ);
++ }
++}
++
++static inline void SelectBuffer(char **ppszBuf, u32 * pui32BufSiz)
++{
++ if (USE_SPIN_LOCK) {
++ *ppszBuf = gszBufferIRQ;
++ *pui32BufSiz = sizeof(gszBufferIRQ);
++ } else {
++ *ppszBuf = gszBufferNonIRQ;
++ *pui32BufSiz = sizeof(gszBufferNonIRQ);
++ }
++}
++
++static int VBAppend(char *pszBuf, u32 ui32BufSiz, const char *pszFormat,
++ va_list VArgs)
++{
++ u32 ui32Used;
++ u32 ui32Space;
++ s32 i32Len;
++
++ ui32Used = strlen(pszBuf);
++ BUG_ON(ui32Used >= ui32BufSiz);
++ ui32Space = ui32BufSiz - ui32Used;
++
++ i32Len = vsnprintf(&pszBuf[ui32Used], ui32Space, pszFormat, VArgs);
++ pszBuf[ui32BufSiz - 1] = 0;
++
++ return (i32Len < 0 || i32Len >= ui32Space);
++}
++
++void PVRDPFInit(void)
++{
++ mutex_init(&gsDebugMutexNonIRQ);
++}
++
++void PVRSRVReleasePrintf(const char *pszFormat, ...)
++{
++ va_list vaArgs;
++ unsigned long ulLockFlags = 0;
++ char *pszBuf;
++ u32 ui32BufSiz;
++
++ SelectBuffer(&pszBuf, &ui32BufSiz);
++
++ va_start(vaArgs, pszFormat);
++
++ GetBufferLock(&ulLockFlags);
++ strncpy(pszBuf, "PVR_K: ", (ui32BufSiz - 1));
++
++ if (VBAppend(pszBuf, ui32BufSiz, pszFormat, vaArgs)) {
++ printk(KERN_INFO "PVR_K:(Message Truncated): %s\n", pszBuf);
++ } else {
++ printk(KERN_INFO "%s\n", pszBuf);
++ }
++
++ ReleaseBufferLock(ulLockFlags);
++ va_end(vaArgs);
++
++}
++
++#if defined(PVRSRV_NEED_PVR_ASSERT)
++
++void PVRSRVDebugAssertFail(const char *pszFile, u32 uLine)
++{
++ PVRSRVDebugPrintf(DBGPRIV_FATAL, pszFile, uLine,
++ "Debug assertion failed!");
++ BUG();
++}
++
++#endif
++
++#if defined(PVRSRV_NEED_PVR_TRACE)
++
++void PVRSRVTrace(const char *pszFormat, ...)
++{
++ va_list VArgs;
++ unsigned long ulLockFlags = 0;
++ char *pszBuf;
++ u32 ui32BufSiz;
++
++ SelectBuffer(&pszBuf, &ui32BufSiz);
++
++ va_start(VArgs, pszFormat);
++
++ GetBufferLock(&ulLockFlags);
++
++ strncpy(pszBuf, "PVR: ", (ui32BufSiz - 1));
++
++ if (VBAppend(pszBuf, ui32BufSiz, pszFormat, VArgs)) {
++ printk(KERN_INFO "PVR_K:(Message Truncated): %s\n", pszBuf);
++ } else {
++ printk(KERN_INFO "%s\n", pszBuf);
++ }
++
++ ReleaseBufferLock(ulLockFlags);
++
++ va_end(VArgs);
++}
++
++#endif
++
++#if defined(PVRSRV_NEED_PVR_DPF)
++
++static int BAppend(char *pszBuf, u32 ui32BufSiz, const char *pszFormat, ...)
++{
++ va_list VArgs;
++ int bTrunc;
++
++ va_start(VArgs, pszFormat);
++
++ bTrunc = VBAppend(pszBuf, ui32BufSiz, pszFormat, VArgs);
++
++ va_end(VArgs);
++
++ return bTrunc;
++}
++
++void PVRSRVDebugPrintf(u32 ui32DebugLevel,
++ const char *pszFullFileName,
++ u32 ui32Line, const char *pszFormat, ...
++ )
++{
++ int bTrace, bDebug;
++ const char *pszFileName = pszFullFileName;
++ char *pszLeafName;
++
++ bTrace = gPVRDebugLevel & ui32DebugLevel & DBGPRIV_CALLTRACE;
++ bDebug = ((gPVRDebugLevel & DBGPRIV_ALLLEVELS) >= ui32DebugLevel);
++
++ if (bTrace || bDebug) {
++ va_list vaArgs;
++ unsigned long ulLockFlags = 0;
++ char *pszBuf;
++ u32 ui32BufSiz;
++
++ SelectBuffer(&pszBuf, &ui32BufSiz);
++
++ va_start(vaArgs, pszFormat);
++
++ GetBufferLock(&ulLockFlags);
++
++ if (bDebug) {
++ switch (ui32DebugLevel) {
++ case DBGPRIV_FATAL:
++ {
++ strncpy(pszBuf, "PVR_K:(Fatal): ",
++ (ui32BufSiz - 1));
++ break;
++ }
++ case DBGPRIV_ERROR:
++ {
++ strncpy(pszBuf, "PVR_K:(Error): ",
++ (ui32BufSiz - 1));
++ break;
++ }
++ case DBGPRIV_WARNING:
++ {
++ strncpy(pszBuf, "PVR_K:(Warning): ",
++ (ui32BufSiz - 1));
++ break;
++ }
++ case DBGPRIV_MESSAGE:
++ {
++ strncpy(pszBuf, "PVR_K:(Message): ",
++ (ui32BufSiz - 1));
++ break;
++ }
++ case DBGPRIV_VERBOSE:
++ {
++ strncpy(pszBuf, "PVR_K:(Verbose): ",
++ (ui32BufSiz - 1));
++ break;
++ }
++ default:
++ {
++ strncpy(pszBuf,
++ "PVR_K:(Unknown message level)",
++ (ui32BufSiz - 1));
++ break;
++ }
++ }
++ } else {
++ strncpy(pszBuf, "PVR_K: ", (ui32BufSiz - 1));
++ }
++
++ if (VBAppend(pszBuf, ui32BufSiz, pszFormat, vaArgs)) {
++ printk(KERN_INFO "PVR_K:(Message Truncated): %s\n",
++ pszBuf);
++ } else {
++
++ if (!bTrace) {
++#ifdef DEBUG_LOG_PATH_TRUNCATE
++
++ static char
++ szFileNameRewrite[PVR_MAX_FILEPATH_LEN];
++
++ char *pszTruncIter;
++ char *pszTruncBackInter;
++
++ pszFileName =
++ pszFullFileName +
++ strlen(DEBUG_LOG_PATH_TRUNCATE) + 1;
++
++ strncpy(szFileNameRewrite, pszFileName,
++ PVR_MAX_FILEPATH_LEN);
++
++ if (strlen(szFileNameRewrite) ==
++ PVR_MAX_FILEPATH_LEN - 1) {
++ char szTruncateMassage[] =
++ "FILENAME TRUNCATED";
++ strcpy(szFileNameRewrite +
++ (PVR_MAX_FILEPATH_LEN - 1 -
++ strlen(szTruncateMassage)),
++ szTruncateMassage);
++ }
++
++ pszTruncIter = szFileNameRewrite;
++ while (*pszTruncIter++ != 0) {
++ char *pszNextStartPoint;
++
++ if (!
++ ((*pszTruncIter == '/'
++ && (pszTruncIter - 4 >=
++ szFileNameRewrite))
++ && (*(pszTruncIter - 1) == '.')
++ && (*(pszTruncIter - 2) == '.')
++ && (*(pszTruncIter - 3) == '/'))
++ )
++ continue;
++
++ pszTruncBackInter = pszTruncIter - 3;
++ while (*(--pszTruncBackInter) != '/') {
++ if (pszTruncBackInter <=
++ szFileNameRewrite)
++ break;
++ }
++ pszNextStartPoint = pszTruncBackInter;
++
++ while (*pszTruncIter != 0) {
++ *pszTruncBackInter++ =
++ *pszTruncIter++;
++ }
++ *pszTruncBackInter = 0;
++
++ pszTruncIter = pszNextStartPoint;
++ }
++
++ pszFileName = szFileNameRewrite;
++
++ if (*pszFileName == '/')
++ pszFileName++;
++#endif
++
++#if !defined(__sh__)
++ pszLeafName =
++ (char *)strrchr(pszFileName, '\\');
++
++ if (pszLeafName) {
++ pszFileName = pszLeafName;
++ }
++#endif
++
++ if (BAppend
++ (pszBuf, ui32BufSiz, " [%lu, %s]", ui32Line,
++ pszFileName)) {
++ printk(KERN_INFO
++ "PVR_K:(Message Truncated): %s\n",
++ pszBuf);
++ } else {
++ printk(KERN_INFO "%s\n", pszBuf);
++ }
++ } else {
++ printk(KERN_INFO "%s\n", pszBuf);
++ }
++ }
++
++ ReleaseBufferLock(ulLockFlags);
++
++ va_end(vaArgs);
++ }
++}
++
++#endif
++
++#if defined(DEBUG)
++
++void PVRDebugSetLevel(u32 uDebugLevel)
++{
++ printk(KERN_INFO "PVR: Setting Debug Level = 0x%x\n",
++ (u32) uDebugLevel);
++
++ gPVRDebugLevel = uDebugLevel;
++}
++
++int PVRDebugProcSetLevel(struct file *file, const char *buffer, u32 count,
++ void *data)
++{
++#define _PROC_SET_BUFFER_SZ 2
++ char data_buffer[_PROC_SET_BUFFER_SZ];
++
++ if (count != _PROC_SET_BUFFER_SZ) {
++ return -EINVAL;
++ } else {
++ if (copy_from_user(data_buffer, buffer, count))
++ return -EINVAL;
++ if (data_buffer[count - 1] != '\n')
++ return -EINVAL;
++ PVRDebugSetLevel(data_buffer[0] - '0');
++ }
++ return (count);
++}
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++void ProcSeqShowDebugLevel(struct seq_file *sfile, void *el)
++{
++ seq_printf(sfile, "%lu\n", gPVRDebugLevel);
++}
++
++#else
++int PVRDebugProcGetLevel(char *page, char **start, off_t off, int count,
++ int *eof, void *data)
++{
++ if (off == 0) {
++ *start = (char *)1;
++ return printAppend(page, count, 0, "%lu\n", gPVRDebugLevel);
++ }
++ *eof = 1;
++ return 0;
++}
++#endif
++
++#endif
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/pvr_drm.c
+@@ -0,0 +1,300 @@
++/**********************************************************************
++ *
++ * Copyright (c) 2010 Intel Corporation.
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if defined(SUPPORT_DRI_DRM)
++
++#ifndef AUTOCONF_INCLUDED
++#include <linux/config.h>
++#endif
++
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/version.h>
++#include <linux/fs.h>
++#include <linux/proc_fs.h>
++#include <asm/ioctl.h>
++#include <drm/drmP.h>
++#include <drm/drm.h>
++
++
++#include "services.h"
++#include "kerneldisplay.h"
++#include "kernelbuffer.h"
++#include "syscommon.h"
++#include "pvrmmap.h"
++#include "mm.h"
++#include "mmap.h"
++#include "pvr_debug.h"
++#include "srvkm.h"
++#include "perproc.h"
++#include "handle.h"
++#include "pvr_bridge_km.h"
++#include "pvr_bridge.h"
++#include "proc.h"
++#include "pvrmodule.h"
++#include "pvrversion.h"
++#include "lock.h"
++#include "linkage.h"
++#include "pvr_drm.h"
++
++#define PVR_DRM_NAME PVRSRV_MODNAME
++#define PVR_DRM_DESC "Imagination Technologies PVR DRM"
++
++#define PVR_PCI_IDS \
++ {SYS_SGX_DEV_VENDOR_ID, SYS_SGX_DEV_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++ {0, 0, 0}
++
++struct pci_dev *gpsPVRLDMDev;
++struct drm_device *gpsPVRDRMDev;
++
++#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,24))
++#error "Linux kernel version 2.6.25 or later required for PVR DRM support"
++#endif
++
++#define PVR_DRM_FILE struct drm_file *
++
++#if !defined(SUPPORT_DRI_DRM_EXT)
++static struct pci_device_id asPciIdList[] = {
++ PVR_PCI_IDS
++};
++#endif
++
++static int PVRSRVDrmLoad(struct drm_device *dev, unsigned long flags)
++{
++ int iRes;
++
++ PVR_TRACE(("PVRSRVDrmLoad"));
++
++ gpsPVRDRMDev = dev;
++ gpsPVRLDMDev = dev->pdev;
++
++#if defined(PDUMP)
++ iRes = dbgdrv_init();
++ if (iRes != 0) {
++ return iRes;
++ }
++#endif
++
++ iRes = PVRCore_Init();
++ if (iRes != 0) {
++ goto exit_dbgdrv_cleanup;
++ }
++#if defined(DISPLAY_CONTROLLER)
++ iRes = PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _Init) (dev);
++ if (iRes != 0) {
++ goto exit_pvrcore_cleanup;
++ }
++#endif
++ return 0;
++
++#if defined(DISPLAY_CONTROLLER)
++exit_pvrcore_cleanup:
++ PVRCore_Cleanup();
++#endif
++exit_dbgdrv_cleanup:
++#if defined(PDUMP)
++ dbgdrv_cleanup();
++#endif
++ return iRes;
++}
++
++static int PVRSRVDrmUnload(struct drm_device *dev)
++{
++ PVR_TRACE(("PVRSRVDrmUnload"));
++
++#if defined(DISPLAY_CONTROLLER)
++ PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _Cleanup) (dev);
++#endif
++
++ PVRCore_Cleanup();
++
++#if defined(PDUMP)
++ dbgdrv_cleanup();
++#endif
++
++ return 0;
++}
++
++static int PVRSRVDrmOpen(struct drm_device *dev, struct drm_file *file)
++{
++ return PVRSRVOpen(dev, file);
++}
++
++static void PVRSRVDrmPostClose(struct drm_device *dev, struct drm_file *file)
++{
++ PVRSRVRelease(dev, file);
++}
++
++static int
++PVRDRMIsMaster(struct drm_device *dev, void *arg, struct drm_file *pFile)
++{
++ return 0;
++}
++
++#if defined(SUPPORT_DRI_DRM_EXT)
++int
++PVRDRM_Dummy_ioctl(struct drm_device *dev, void *arg, struct drm_file *pFile)
++{
++ return 0;
++}
++#endif
++
++static int
++PVRDRMPCIBusIDField(struct drm_device *dev, u32 * pui32Field, u32 ui32FieldType)
++{
++ struct pci_dev *psPCIDev = (struct pci_dev *)dev->pdev;
++
++ switch (ui32FieldType) {
++ case PVR_DRM_PCI_DOMAIN:
++ *pui32Field = pci_domain_nr(psPCIDev->bus);
++ break;
++
++ case PVR_DRM_PCI_BUS:
++ *pui32Field = psPCIDev->bus->number;
++ break;
++
++ case PVR_DRM_PCI_DEV:
++ *pui32Field = PCI_SLOT(psPCIDev->devfn);
++ break;
++
++ case PVR_DRM_PCI_FUNC:
++ *pui32Field = PCI_FUNC(psPCIDev->devfn);
++ break;
++
++ default:
++ return -EFAULT;
++ }
++
++ return 0;
++}
++
++static int
++PVRDRMUnprivCmd(struct drm_device *dev, void *arg, struct drm_file *pFile)
++{
++ u32 *pui32Args = (u32 *) arg;
++ u32 ui32Cmd = pui32Args[0];
++ u32 ui32Arg1 = pui32Args[1];
++ u32 *pui32OutArg = (u32 *) arg;
++ s32 ret = 0;
++
++ mutex_lock(&gPVRSRVLock);
++
++ switch (ui32Cmd) {
++ case PVR_DRM_UNPRIV_INIT_SUCCESFUL:
++ *pui32OutArg =
++ PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_SUCCESSFUL) ? 1
++ : 0;
++ break;
++
++ case PVR_DRM_UNPRIV_BUSID_TYPE:
++ *pui32OutArg = PVR_DRM_BUS_TYPE_PCI;
++ break;
++
++ case PVR_DRM_UNPRIV_BUSID_FIELD:
++ ret = PVRDRMPCIBusIDField(dev, pui32OutArg, ui32Arg1);
++
++ default:
++ ret = -EFAULT;
++ }
++
++ mutex_unlock(&gPVRSRVLock);
++
++ return ret;
++}
++
++#define PVR_DRM_FOPS_IOCTL .unlocked_ioctl
++#define PVR_DRM_UNLOCKED DRM_UNLOCKED
++
++struct drm_ioctl_desc sPVRDrmIoctls[] = {
++ DRM_IOCTL_DEF(PVR_DRM_SRVKM_IOCTL, PVRSRV_BridgeDispatchKM,
++ PVR_DRM_UNLOCKED),
++ DRM_IOCTL_DEF(PVR_DRM_IS_MASTER_IOCTL, PVRDRMIsMaster,
++ DRM_MASTER | PVR_DRM_UNLOCKED),
++ DRM_IOCTL_DEF(PVR_DRM_UNPRIV_IOCTL, PVRDRMUnprivCmd, PVR_DRM_UNLOCKED),
++#if defined(PDUMP)
++ DRM_IOCTL_DEF(PVR_DRM_DBGDRV_IOCTL, dbgdrv_ioctl, PVR_DRM_UNLOCKED),
++#endif
++};
++
++static int pvr_max_ioctl = DRM_ARRAY_SIZE(sPVRDrmIoctls);
++
++static struct drm_driver sPVRDrmDriver = {
++ .driver_features = 0,
++ .dev_priv_size = 0,
++ .load = PVRSRVDrmLoad,
++ .unload = PVRSRVDrmUnload,
++ .open = PVRSRVDrmOpen,
++ .postclose = PVRSRVDrmPostClose,
++ .suspend = PVRSRVDriverSuspend,
++ .resume = PVRSRVDriverResume,
++ .get_map_ofs = drm_core_get_map_ofs,
++ .get_reg_ofs = drm_core_get_reg_ofs,
++ .ioctls = sPVRDrmIoctls,
++ .fops = {
++ .owner = THIS_MODULE,
++ .open = drm_open,
++ .release = drm_release,
++ PVR_DRM_FOPS_IOCTL = drm_ioctl,
++ .mmap = PVRMMap,
++ .poll = drm_poll,
++ .fasync = drm_fasync,
++ },
++ .pci_driver = {
++ .name = PVR_DRM_NAME,
++ .id_table = asPciIdList,
++ },
++
++ .name = PVR_DRM_NAME,
++ .desc = PVR_DRM_DESC,
++ .date = PVR_BUILD_DATE,
++ .major = PVRVERSION_MAJ,
++ .minor = PVRVERSION_MIN,
++ .patchlevel = PVRVERSION_BUILD,
++};
++
++static int __init PVRSRVDrmInit(void)
++{
++ int iRes;
++ sPVRDrmDriver.num_ioctls = pvr_max_ioctl;
++
++ PVRDPFInit();
++
++ iRes = drm_init(&sPVRDrmDriver);
++
++ return iRes;
++}
++
++static void __exit PVRSRVDrmExit(void)
++{
++ drm_exit(&sPVRDrmDriver);
++}
++
++module_init(PVRSRVDrmInit);
++module_exit(PVRSRVDrmExit);
++
++#endif
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/pvr_drm.h
+@@ -0,0 +1,87 @@
++/**********************************************************************
++ *
++ * Copyright (c) 2010 Intel Corporation.
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__PVR_DRM_H__)
++#define __PVR_DRM_H__
++
++#include <linux/version.h>
++#include "pvr_drm_shared.h"
++
++#if defined(SUPPORT_DRI_DRM)
++#define PVR_DRM_MAKENAME_HELPER(x, y) x ## y
++#define PVR_DRM_MAKENAME(x, y) PVR_DRM_MAKENAME_HELPER(x, y)
++
++s32 PVRCore_Init(void);
++void PVRCore_Cleanup(void);
++int PVRSRVOpen(struct drm_device *dev, struct drm_file *pFile);
++int PVRSRVRelease(struct drm_device *dev, struct drm_file *pFile);
++int PVRSRVDriverSuspend(struct drm_device *pDevice, pm_message_t state);
++int PVRSRVDriverResume(struct drm_device *pDevice);
++
++#if defined(INTEL_D3_CHANGES) && (LINUX_VERSION_CODE == KERNEL_VERSION(2,6,23))
++int
++PVRSRV_BridgeDispatchKM(struct inode *pInode, struct file *pFile, u32 unref__ ioctlCmd, u32 arg);
++#else
++int PVRSRV_BridgeDispatchKM(struct drm_device *dev, void *arg, struct drm_file *pFile);
++#endif
++
++#if defined(SUPPORT_DRI_DRM_EXT)
++#define DRI_DRM_STATIC
++int PVRSRVDrmLoad(struct drm_device *dev, unsigned long flags);
++int PVRSRVDrmUnload(struct drm_device *dev);
++int PVRSRVDrmOpen(struct drm_device *dev, struct drm_file *file);
++void PVRSRVDrmPostClose(struct drm_device *dev, struct drm_file *file);
++int PVRDRMIsMaster(struct drm_device *dev, void *arg, struct drm_file *pFile);
++int PVRDRMUnprivCmd(struct drm_device *dev, void *arg, struct drm_file *pFile);
++int PVRDRM_Dummy_ioctl(struct drm_device *dev, void *arg, struct drm_file *pFile);
++#else
++#define DRI_DRM_STATIC static
++#endif
++
++#if defined(DISPLAY_CONTROLLER)
++extern int PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _Init)(struct drm_device *);
++extern void PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _Cleanup)(struct drm_device *);
++#endif
++
++#if defined(PDUMP)
++int dbgdrv_init(void);
++void dbgdrv_cleanup(void);
++int dbgdrv_ioctl(struct drm_device *dev, void *arg, struct drm_file *pFile);
++#endif
++
++#if !defined(SUPPORT_DRI_DRM_EXT)
++#define PVR_DRM_SRVKM_IOCTL _IO(0, PVR_DRM_SRVKM_CMD)
++#define PVR_DRM_IS_MASTER_IOCTL _IO(0, PVR_DRM_IS_MASTER_CMD)
++#define PVR_DRM_UNPRIV_IOCTL _IO(0, PVR_DRM_UNPRIV_CMD)
++#define PVR_DRM_DBGDRV_IOCTL _IO(0, PVR_DRM_DBGDRV_CMD)
++#endif
++
++#endif
++
++#endif
++
++
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/system/include/syscommon.h
+@@ -0,0 +1,201 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _SYSCOMMON_H
++#define _SYSCOMMON_H
++
++#include "sysconfig.h"
++#include "sysinfo.h"
++#include "servicesint.h"
++#include "queue.h"
++#include "power.h"
++#include "resman.h"
++#include "ra.h"
++#include "device.h"
++#include "buffer_manager.h"
++
++#if defined(NO_HARDWARE) && defined(__linux__) && defined(__KERNEL__)
++#include <asm/io.h>
++#endif
++
++typedef struct _SYS_DEVICE_ID_TAG
++{
++ u32 uiID;
++ int bInUse;
++
++} SYS_DEVICE_ID;
++
++
++#define SYS_MAX_LOCAL_DEVMEM_ARENAS 4
++
++typedef struct _SYS_DATA_TAG_
++{
++ u32 ui32NumDevices;
++ SYS_DEVICE_ID sDeviceID[SYS_DEVICE_COUNT];
++ PVRSRV_DEVICE_NODE *psDeviceNodeList;
++ PVRSRV_POWER_DEV *psPowerDeviceList;
++ PVRSRV_RESOURCE sPowerStateChangeResource;
++ PVRSRV_SYS_POWER_STATE eCurrentPowerState;
++ PVRSRV_SYS_POWER_STATE eFailedPowerState;
++ u32 ui32CurrentOSPowerState;
++ PVRSRV_QUEUE_INFO *psQueueList;
++ PVRSRV_KERNEL_SYNC_INFO *psSharedSyncInfoList;
++ void * pvEnvSpecificData;
++ void * pvSysSpecificData;
++ PVRSRV_RESOURCE sQProcessResource;
++ void *pvSOCRegsBase;
++ void * hSOCTimerRegisterOSMemHandle;
++ u32 *pvSOCTimerRegisterKM;
++ void *pvSOCClockGateRegsBase;
++ u32 ui32SOCClockGateRegsSize;
++ PFN_CMD_PROC *ppfnCmdProcList[SYS_DEVICE_COUNT];
++
++
++
++ PCOMMAND_COMPLETE_DATA *ppsCmdCompleteData[SYS_DEVICE_COUNT];
++
++
++ int bReProcessQueues;
++
++ RA_ARENA *apsLocalDevMemArena[SYS_MAX_LOCAL_DEVMEM_ARENAS];
++
++ char *pszVersionString;
++ PVRSRV_EVENTOBJECT *psGlobalEventObject;
++
++ int bFlushAll;
++
++} SYS_DATA;
++
++
++
++PVRSRV_ERROR SysInitialise(void);
++PVRSRV_ERROR SysFinalise(void);
++
++PVRSRV_ERROR SysDeinitialise(SYS_DATA *psSysData);
++PVRSRV_ERROR SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE eDeviceType,
++ void **ppvDeviceMap);
++
++void SysRegisterExternalDevice(PVRSRV_DEVICE_NODE *psDeviceNode);
++void SysRemoveExternalDevice(PVRSRV_DEVICE_NODE *psDeviceNode);
++
++u32 SysGetInterruptSource(SYS_DATA *psSysData,
++ PVRSRV_DEVICE_NODE *psDeviceNode);
++
++void SysClearInterrupts(SYS_DATA* psSysData, u32 ui32ClearBits);
++
++PVRSRV_ERROR SysResetDevice(u32 ui32DeviceIndex);
++
++PVRSRV_ERROR SysSystemPrePowerState(PVRSRV_SYS_POWER_STATE eNewPowerState);
++PVRSRV_ERROR SysSystemPostPowerState(PVRSRV_SYS_POWER_STATE eNewPowerState);
++PVRSRV_ERROR SysDevicePrePowerState(u32 ui32DeviceIndex,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++PVRSRV_ERROR SysDevicePostPowerState(u32 ui32DeviceIndex,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++
++#if defined(SYS_CUSTOM_POWERLOCK_WRAP)
++PVRSRV_ERROR SysPowerLockWrap(SYS_DATA *psSysData);
++void SysPowerLockUnwrap(SYS_DATA *psSysData);
++#endif
++
++PVRSRV_ERROR SysOEMFunction ( u32 ui32ID,
++ void *pvIn,
++ u32 ulInSize,
++ void *pvOut,
++ u32 ulOutSize);
++
++
++IMG_DEV_PHYADDR SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE eDeviceType, IMG_CPU_PHYADDR cpu_paddr);
++IMG_DEV_PHYADDR SysSysPAddrToDevPAddr (PVRSRV_DEVICE_TYPE eDeviceType, IMG_SYS_PHYADDR SysPAddr);
++IMG_SYS_PHYADDR SysDevPAddrToSysPAddr (PVRSRV_DEVICE_TYPE eDeviceType, IMG_DEV_PHYADDR SysPAddr);
++IMG_CPU_PHYADDR SysSysPAddrToCpuPAddr (IMG_SYS_PHYADDR SysPAddr);
++IMG_SYS_PHYADDR SysCpuPAddrToSysPAddr (IMG_CPU_PHYADDR cpu_paddr);
++#if defined(PVR_LMA)
++int SysVerifyCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE eDeviceType, IMG_CPU_PHYADDR CpuPAddr);
++int SysVerifySysPAddrToDevPAddr (PVRSRV_DEVICE_TYPE eDeviceType, IMG_SYS_PHYADDR SysPAddr);
++#endif
++
++extern SYS_DATA* gpsSysData;
++
++#if !defined(USE_CODE)
++
++static inline PVRSRV_ERROR SysAcquireData(SYS_DATA **ppsSysData)
++{
++
++ *ppsSysData = gpsSysData;
++
++
++
++
++
++ if (!gpsSysData)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ return PVRSRV_OK;
++}
++
++
++static inline PVRSRV_ERROR SysInitialiseCommon(SYS_DATA *psSysData)
++{
++ PVRSRV_ERROR eError;
++
++
++ eError = PVRSRVInit(psSysData);
++
++ return eError;
++}
++
++static inline void SysDeinitialiseCommon(SYS_DATA *psSysData)
++{
++
++ PVRSRVDeInit(psSysData);
++
++ OSDestroyResource(&psSysData->sPowerStateChangeResource);
++}
++#endif
++
++
++#if !(defined(NO_HARDWARE) && defined(__linux__) && defined(__KERNEL__))
++#define SysReadHWReg(p, o) OSReadHWReg(p, o)
++#define SysWriteHWReg(p, o, v) OSWriteHWReg(p, o, v)
++#else
++static inline u32 SysReadHWReg(void * pvLinRegBaseAddr, u32 ui32Offset)
++{
++ return (u32) readl(pvLinRegBaseAddr + ui32Offset);
++}
++
++static inline void SysWriteHWReg(void * pvLinRegBaseAddr, u32 ui32Offset, u32 ui32Value)
++{
++ writel(ui32Value, pvLinRegBaseAddr + ui32Offset);
++}
++#endif
++
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/system/sgx_intel_ce/graphics_pm.c
+@@ -0,0 +1,262 @@
++/**********************************************************************
++ *
++ * Copyright (c) 2009-2010 Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Intel Corporation
++ * 2200 Mission College Blvd.
++ * Santa Clara, CA 97052
++ *
++ ******************************************************************************/
++#ifdef INTEL_D3_PM
++
++#include "graphics_pm.h"
++#include "intel_ce_pm.h"
++
++#include "services.h"
++#include "power.h"
++
++#include "osal.h"
++
++//#define GRAPHICS_PM_DEBUG_PRINT printk
++#define GRAPHICS_PM_DEBUG_PRINT(x...)
++
++#define GRAPHICS_PM_COMPONENT_NAME "graphics"
++
++#define GRAPHICS_PM_STATE_UNINITIALIZED (-1)
++#define GRAPHICS_PM_STATE_SUSPENDED ( 0)
++#define GRAPHICS_PM_STATE_IDLE ( 1)
++#define GRAPHICS_PM_STATE_BUSY ( 2)
++
++/* graphics_pm_state is used to track the power management state.
++ *
++ * Expected state transitions are:
++ * From: To: By:
++ * UNINITIALIZED IDLE graphics_pm_init()
++ * SUSPENDED UNINITIALIZED graphics_pm_deinit()
++ * IDLE UNINITIALIZED graphics_pm_deinit()
++ * BUSY UNINITIALIZED graphics_pm_deinit()
++ * IDLE SUSPENDED graphics_pm_suspend()
++ * SUSPENDED IDLE graphics_pm_resume()
++ * IDLE BUSY graphics_pm_set_busy()
++ * BUSY IDLE graphics_pm_set_idle()
++ *
++ * Other state transitions are not allowed. In particular, direct transition
++ * from BUSY to SUSPENDED by graphics_pm_suspend() is rejected and the only
++ * valid transition from UNINITIALIZED is to IDLE by graphics_pm_init().
++ *
++ */
++
++volatile static int graphics_pm_state = GRAPHICS_PM_STATE_UNINITIALIZED;
++
++static os_sema_t graphics_pm_semaphore;
++
++static int graphics_pm_suspend(struct pci_dev *dev, pm_message_t state)
++{
++ int rc = GRAPHICS_PM_OK;
++ int ret_val = 0;
++ GRAPHICS_PM_DEBUG_PRINT("graphics_pm_suspend: Hello\n");
++
++ assert(GRAPHICS_PM_STATE_UNINITIALIZED != graphics_pm_state);
++ if (GRAPHICS_PM_STATE_UNINITIALIZED != graphics_pm_state) {
++ os_sema_get(&graphics_pm_semaphore);
++ if (GRAPHICS_PM_STATE_BUSY == graphics_pm_state) {
++ rc = GRAPHICS_PM_ERR_FAILED;
++ }
++ if (GRAPHICS_PM_STATE_IDLE == graphics_pm_state) {
++ if (PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D3) !=
++ PVRSRV_OK) {
++ rc = GRAPHICS_PM_ERR_FAILED;
++ }
++
++ if (GRAPHICS_PM_OK == rc) {
++ graphics_pm_state = GRAPHICS_PM_STATE_SUSPENDED;
++ }
++ }
++ os_sema_put(&graphics_pm_semaphore);
++ } else {
++ rc = GRAPHICS_PM_ERR_FAILED;
++ }
++
++ if (GRAPHICS_PM_OK == rc) {
++ ret_val = 0;
++ } else {
++ ret_val = -EBUSY;
++ }
++
++ GRAPHICS_PM_DEBUG_PRINT("graphics_pm_suspend: Goodbye\n");
++ return ret_val;
++}
++
++static int graphics_pm_resume(struct pci_dev *dev)
++{
++ int rc = GRAPHICS_PM_OK;
++ int ret_val = 0;
++ GRAPHICS_PM_DEBUG_PRINT("graphics_pm_resume: Hello\n");
++
++ assert(GRAPHICS_PM_STATE_UNINITIALIZED != graphics_pm_state);
++ if (GRAPHICS_PM_STATE_UNINITIALIZED != graphics_pm_state) {
++ os_sema_get(&graphics_pm_semaphore);
++ if (GRAPHICS_PM_STATE_SUSPENDED == graphics_pm_state) {
++ if (PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D0) !=
++ PVRSRV_OK) {
++ rc = GRAPHICS_PM_ERR_FAILED;
++ }
++
++ if (GRAPHICS_PM_OK == rc) {
++ graphics_pm_state = GRAPHICS_PM_STATE_IDLE;
++ }
++ }
++ os_sema_put(&graphics_pm_semaphore);
++ } else {
++ rc = GRAPHICS_PM_ERR_FAILED;
++ }
++
++ if (GRAPHICS_PM_OK == rc) {
++ ret_val = 0;
++ } else {
++ ret_val = -EBUSY;
++ }
++
++ GRAPHICS_PM_DEBUG_PRINT("graphics_pm_resume: Goodbye\n");
++ return ret_val;
++}
++
++static icepm_functions_t graphics_pm_functions = {
++ graphics_pm_suspend,
++ graphics_pm_resume
++};
++
++int graphics_pm_init(void)
++{
++ int rc = GRAPHICS_PM_OK;
++ icepm_ret_t icepm_rc = ICEPM_OK;
++ osal_result osal_rc = OSAL_SUCCESS;
++ GRAPHICS_PM_DEBUG_PRINT("graphics_pm_init: Hello\n");
++
++ assert(GRAPHICS_PM_STATE_UNINITIALIZED == graphics_pm_state);
++ if (GRAPHICS_PM_STATE_UNINITIALIZED == graphics_pm_state) {
++ osal_rc = os_sema_init(&graphics_pm_semaphore, 1);
++ if (OSAL_SUCCESS != osal_rc) {
++ rc = GRAPHICS_PM_ERR_FAILED;
++ }
++
++ if (GRAPHICS_PM_OK == rc) {
++ os_sema_get(&graphics_pm_semaphore);
++
++ icepm_rc =
++ icepm_device_register(GRAPHICS_PM_COMPONENT_NAME,
++ &graphics_pm_functions);
++ if (ICEPM_OK != icepm_rc) {
++ rc = GRAPHICS_PM_ERR_FAILED;
++ }
++
++ if (GRAPHICS_PM_OK == rc) {
++ graphics_pm_state = GRAPHICS_PM_STATE_IDLE;
++ }
++
++ os_sema_put(&graphics_pm_semaphore);
++ }
++ } else {
++ rc = GRAPHICS_PM_ERR_FAILED;
++ }
++
++ GRAPHICS_PM_DEBUG_PRINT("graphics_pm_init: Goodbye\n");
++ return rc;
++}
++
++int graphics_pm_deinit(void)
++{
++ int rc = GRAPHICS_PM_OK;
++ icepm_ret_t icepm_rc = ICEPM_OK;
++ GRAPHICS_PM_DEBUG_PRINT("graphics_pm_deinit: Hello\n");
++
++ assert(GRAPHICS_PM_STATE_UNINITIALIZED != graphics_pm_state);
++ if (GRAPHICS_PM_STATE_UNINITIALIZED != graphics_pm_state) {
++ os_sema_get(&graphics_pm_semaphore);
++
++ icepm_rc = icepm_device_unregister(GRAPHICS_PM_COMPONENT_NAME);
++ if (ICEPM_OK != icepm_rc) {
++ rc = GRAPHICS_PM_ERR_FAILED;
++ }
++
++ if (GRAPHICS_PM_OK == rc) {
++ graphics_pm_state = GRAPHICS_PM_STATE_UNINITIALIZED;
++ }
++
++ os_sema_put(&graphics_pm_semaphore);
++ os_sema_destroy(&graphics_pm_semaphore);
++ } else {
++ rc = GRAPHICS_PM_ERR_FAILED;
++ }
++
++ GRAPHICS_PM_DEBUG_PRINT("graphics_pm_deinit: Goodbye\n");
++ return rc;
++}
++
++void graphics_pm_set_idle(void)
++{
++ GRAPHICS_PM_DEBUG_PRINT("graphics_pm_set_idle: Hello\n");
++
++ assert(GRAPHICS_PM_STATE_UNINITIALIZED != graphics_pm_state);
++ if (GRAPHICS_PM_STATE_UNINITIALIZED != graphics_pm_state) {
++ os_sema_get(&graphics_pm_semaphore);
++
++ assert(GRAPHICS_PM_STATE_BUSY == graphics_pm_state);
++ if (GRAPHICS_PM_STATE_BUSY == graphics_pm_state) {
++ graphics_pm_state = GRAPHICS_PM_STATE_IDLE;
++ }
++ os_sema_put(&graphics_pm_semaphore);
++ }
++
++ GRAPHICS_PM_DEBUG_PRINT("graphics_pm_set_idle: Goodbye\n");
++ return;
++}
++
++void graphics_pm_set_busy(void)
++{
++ GRAPHICS_PM_DEBUG_PRINT("graphics_pm_set_busy: Hello\n");
++
++ assert(GRAPHICS_PM_STATE_UNINITIALIZED != graphics_pm_state);
++ if (GRAPHICS_PM_STATE_UNINITIALIZED != graphics_pm_state) {
++ os_sema_get(&graphics_pm_semaphore);
++
++ assert(GRAPHICS_PM_STATE_IDLE == graphics_pm_state);
++ if (GRAPHICS_PM_STATE_IDLE == graphics_pm_state) {
++ graphics_pm_state = GRAPHICS_PM_STATE_BUSY;
++ }
++ os_sema_put(&graphics_pm_semaphore);
++ }
++
++ GRAPHICS_PM_DEBUG_PRINT("graphics_pm_set_busy: Goodbye\n");
++ return;
++}
++
++void graphics_pm_wait_not_suspended(void)
++{
++ assert(GRAPHICS_PM_STATE_UNINITIALIZED != graphics_pm_state);
++
++ while (GRAPHICS_PM_STATE_SUSPENDED == graphics_pm_state) {
++ OS_SLEEP(1);
++ }
++
++ return;
++}
++
++#endif
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/system/sgx_intel_ce/graphics_pm.h
+@@ -0,0 +1,51 @@
++/**********************************************************************
++ *
++ * Copyright (c) 2009 Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Intel Corporation
++ * 2200 Mission College Blvd.
++ * Santa Clara, CA 97052
++ *
++ ******************************************************************************/
++#ifdef INTEL_D3_PM
++
++#if !defined(__GRAPHICS_PM_H__)
++#define __GRAPHICS_PM_H__
++
++#define GRAPHICS_PM_OK 0
++#define GRAPHICS_PM_ERR_FAILED 1
++
++/* Initialize power management for graphics component */
++int graphics_pm_init(void );
++
++/* De-initialize power management for graphics component */
++int graphics_pm_deinit(void );
++
++/* Set pm state to idle */
++void graphics_pm_set_idle(void );
++
++/* Set pm state to busy */
++void graphics_pm_set_busy(void );
++
++/* Returns when the pm state is not suspended */
++void graphics_pm_wait_not_suspended(void );
++
++#endif
++#endif
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/system/sgx_intel_ce/oemfuncs.h
+@@ -0,0 +1,79 @@
++/**********************************************************************
++ *
++ * Copyright (c) 2008 Intel Corporation.
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Intel Corporation
++ * 2200 Mission College Blvd.
++ * Santa Clara, CA 97052
++ *
++ ******************************************************************************/
++
++/* This file was based on the Imagination Technologies sample implementation. */
++
++#if !defined(__OEMFUNCS_H__)
++#define __OEMFUNCS_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++
++#define OEM_EXCHANGE_POWER_STATE (1<<0)
++#define OEM_DEVICE_MEMORY_POWER (1<<1)
++#define OEM_DISPLAY_POWER (1<<2)
++#define OEM_GET_EXT_FUNCS (1<<3)
++
++typedef struct OEM_ACCESS_INFO_TAG
++{
++ u32 ui32Size;
++ u32 ui32FBPhysBaseAddress;
++ u32 ui32FBMemAvailable;
++ u32 ui32SysPhysBaseAddress;
++ u32 ui32SysSize;
++ u32 ui32DevIRQ;
++} OEM_ACCESS_INFO, *POEM_ACCESS_INFO;
++
++
++typedef u32 (*PFN_SRV_BRIDGEDISPATCH)( u32 Ioctl,
++ unsigned char *pInBuf,
++ u32 InBufLen,
++ unsigned char *pOutBuf,
++ u32 OutBufLen,
++ u32 *pdwBytesTransferred);
++
++
++typedef PVRSRV_ERROR (*PFN_SRV_READREGSTRING)(PPVRSRV_REGISTRY_INFO psRegInfo);
++
++
++typedef struct PVRSRV_DC_OEM_JTABLE_TAG
++{
++ PFN_SRV_BRIDGEDISPATCH pfnOEMBridgeDispatch;
++ PFN_SRV_READREGSTRING pfnOEMReadRegistryString;
++ PFN_SRV_READREGSTRING pfnOEMWriteRegistryString;
++
++} PVRSRV_DC_OEM_JTABLE;
++#if defined(__cplusplus)
++}
++#endif
++
++#endif
++
++
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/system/sgx_intel_ce/sysconfig.c
+@@ -0,0 +1,884 @@
++/**********************************************************************
++ *
++ * Copyright (c) 2008-2010 Intel Corporation.
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Intel Corporation
++ * 2200 Mission College Blvd.
++ * Santa Clara, CA 97052
++ *
++ ******************************************************************************/
++
++/* This file was based on the Imagination Technologies sample implementation. */
++
++#include "sgxdefs.h"
++#include "services_headers.h"
++#include "kerneldisplay.h"
++#include "oemfuncs.h"
++#include "sgxinfo.h"
++#include "pdump_km.h"
++#include "sgxinfokm.h"
++#include "syslocal.h"
++
++#ifdef __linux__
++#include "mm.h"
++#endif
++
++#ifdef LDM_PCI
++#include "linux/pci.h"
++#endif
++
++#ifdef INTEL_D3_PM
++#include "graphics_pm.h"
++#endif
++
++#define SYS_SGX_CLOCK_SPEED (400000000)
++#define SYS_SGX_HWRECOVERY_TIMEOUT_FREQ (100)
++#define SYS_SGX_PDS_TIMER_FREQ (1000)
++#define SYS_SGX_ACTIVE_POWER_LATENCY_MS (500)
++
++SYS_DATA *gpsSysData = (SYS_DATA *) NULL;
++SYS_DATA gsSysData;
++
++static SYS_SPECIFIC_DATA gsSysSpecificData;
++
++static u32 gui32SGXDeviceID;
++static SGX_DEVICE_MAP gsSGXDeviceMap;
++
++#ifdef LDM_PCI
++extern struct pci_dev *gpsPVRLDMDev;
++#endif
++
++u32 PVRSRV_BridgeDispatchKM(u32 Ioctl,
++ unsigned char * pInBuf,
++ u32 InBufLen,
++ unsigned char * pOutBuf,
++ u32 OutBufLen, u32 * pdwBytesTransferred);
++
++static PVRSRV_ERROR SysLocateDevices(SYS_DATA * psSysData)
++{
++ u32 ui32SGXRegBaseAddr, ui32SGXMemBaseAddr;
++ u32 ui32IRQ;
++
++#ifdef INTEL_D3_NO_PCI_ENUM
++ ui32SGXRegBaseAddr = 0xDC000000;
++ ui32IRQ = 4;
++#else
++
++#define SGX_PCI_BUS 1
++#define SGX_PCI_DEV 2
++#define SGX_PCI_FN 0
++
++#define BAR0 0x10
++
++#define IRQ_LINE 0x3C
++#define IRQ_LINE_MASK 0xFF
++
++ ui32SGXRegBaseAddr = OSPCIReadDword(SGX_PCI_BUS,
++ SGX_PCI_DEV, SGX_PCI_FN, BAR0);
++ ui32IRQ = OSPCIReadDword(SGX_PCI_BUS,
++ SGX_PCI_DEV,
++ SGX_PCI_FN, IRQ_LINE) & IRQ_LINE_MASK;
++#endif
++
++ // This address was only ever used in the addr translation functions. We took
++ // that out though, so it isn't used anywhere
++#ifdef INTEL_D3_LOCALMEM
++ ui32SGXMemBaseAddr = 0x18000000;
++#else
++ ui32SGXMemBaseAddr = 0;
++#endif
++
++ PVR_TRACE(("IRQ: %d", ui32IRQ));
++
++ gsSGXDeviceMap.ui32Flags = 0x0;
++
++ gsSGXDeviceMap.sRegsSysPBase.uiAddr =
++ ui32SGXRegBaseAddr + SYS_SGX_REG_OFFSET;
++ //gsSGXDeviceMap.sRegsDevPBase = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, gsSGXDeviceMap.sRegsSysPBase);
++ gsSGXDeviceMap.sRegsCpuPBase =
++ SysSysPAddrToCpuPAddr(gsSGXDeviceMap.sRegsSysPBase);
++ gsSGXDeviceMap.ui32RegsSize = SYS_SGX_REG_SIZE;
++
++ gsSGXDeviceMap.sLocalMemSysPBase.uiAddr = ui32SGXMemBaseAddr;
++ gsSGXDeviceMap.sLocalMemDevPBase =
++ SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX,
++ gsSGXDeviceMap.sLocalMemSysPBase);
++ gsSGXDeviceMap.sLocalMemCpuPBase =
++ SysSysPAddrToCpuPAddr(gsSGXDeviceMap.sLocalMemSysPBase);
++ gsSGXDeviceMap.ui32LocalMemSize = SYS_LOCALMEM_FOR_SGX_RESERVE_SIZE;
++
++ gsSGXDeviceMap.ui32IRQ = ui32IRQ;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR SysInitialise(void)
++{
++ u32 i;
++ PVRSRV_ERROR eError;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ SGX_TIMING_INFORMATION *psTimingInfo;
++
++ gpsSysData = &gsSysData;
++ memset(gpsSysData, 0, sizeof(SYS_DATA));
++
++ gpsSysData->pvSysSpecificData = &gsSysSpecificData;
++ gsSysSpecificData.ui32SysSpecificData = 0;
++#ifdef LDM_PCI
++
++ PVR_ASSERT(gpsPVRLDMDev != NULL);
++ gsSysSpecificData.psPCIDev = gpsPVRLDMDev;
++#endif
++
++ eError = OSInitEnvData(&gpsSysData->pvEnvSpecificData);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SysInitialise: Failed to setup env structure"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = NULL;
++ return eError;
++ }
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData,
++ SYS_SPECIFIC_DATA_ENABLE_ENVDATA);
++
++ psTimingInfo = &gsSGXDeviceMap.sTimingInfo;
++ psTimingInfo->ui32CoreClockSpeed = SYS_SGX_CLOCK_SPEED;
++ psTimingInfo->ui32HWRecoveryFreq = SYS_SGX_HWRECOVERY_TIMEOUT_FREQ;
++#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
++ psTimingInfo->bEnableActivePM = 1;
++#else
++ psTimingInfo->bEnableActivePM = 0;
++#endif
++ psTimingInfo->ui32ActivePowManLatencyms =
++ SYS_SGX_ACTIVE_POWER_LATENCY_MS;
++ psTimingInfo->ui32uKernelFreq = SYS_SGX_PDS_TIMER_FREQ;
++
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData,
++ SYS_SPECIFIC_DATA_ENABLE_PCINIT);
++
++ gpsSysData->ui32NumDevices = SYS_DEVICE_COUNT;
++
++ for (i = 0; i < SYS_DEVICE_COUNT; i++) {
++ gpsSysData->sDeviceID[i].uiID = i;
++ gpsSysData->sDeviceID[i].bInUse = 0;
++ }
++
++ gpsSysData->psDeviceNodeList = NULL;
++ gpsSysData->psQueueList = NULL;
++
++ eError = SysInitialiseCommon(gpsSysData);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SysInitialise: Failed in SysInitialiseCommon"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = NULL;
++ return eError;
++ }
++
++ eError = SysLocateDevices(gpsSysData);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SysInitialise: Failed to locate devices"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = NULL;
++ return eError;
++ }
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData,
++ SYS_SPECIFIC_DATA_ENABLE_LOCATEDEV);
++
++ eError = SysInitRegisters();
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SysInitRegisters: Failed to initialise registers"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = NULL;
++ return eError;
++ }
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData,
++ SYS_SPECIFIC_DATA_ENABLE_INITREG);
++
++#if defined(READ_TCF_HOST_MEM_SIGNATURE)
++ SysTestTCFHostMemSig(gsSGXDeviceMap.sRegsCpuPBase,
++ gsSGXDeviceMap.sLocalMemCpuPBase);
++#endif
++
++ eError =
++ PVRSRVRegisterDevice(gpsSysData, SGXRegisterDevice, 0x00000001,
++ &gui32SGXDeviceID);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SysInitialise: Failed to register device!"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = NULL;
++ return eError;
++ }
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData,
++ SYS_SPECIFIC_DATA_ENABLE_REGDEV);
++
++#ifdef INTEL_D3_LOCALMEM
++ gpsSysData->apsLocalDevMemArena[0] = RA_Create("SGXLocalDeviceMemory",
++ gsSGXDeviceMap.
++ sLocalMemSysPBase.uiAddr,
++ gsSGXDeviceMap.
++ ui32LocalMemSize, NULL,
++ HOST_PAGESIZE(), NULL,
++ NULL, NULL, NULL);
++#else
++ gpsSysData->apsLocalDevMemArena[0] = NULL;
++#endif
++
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData,
++ SYS_SPECIFIC_DATA_ENABLE_RA_ARENA);
++
++ psDeviceNode = gpsSysData->psDeviceNodeList;
++ while (psDeviceNode) {
++ switch (psDeviceNode->sDevId.eDeviceType) {
++ case PVRSRV_DEVICE_TYPE_SGX:
++ {
++ DEVICE_MEMORY_INFO *psDevMemoryInfo;
++ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++
++ psDeviceNode->psLocalDevMemArena =
++ gpsSysData->apsLocalDevMemArena[0];
++
++ psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
++ psDeviceMemoryHeap =
++ psDevMemoryInfo->psDeviceMemoryHeap;
++
++ for (i = 0; i < psDevMemoryInfo->ui32HeapCount;
++ i++) {
++#ifdef INTEL_D3_LOCALMEM
++ psDeviceMemoryHeap[i].ui32Attribs |=
++ PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG;
++#else
++ psDeviceMemoryHeap[i].ui32Attribs |=
++ PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG;
++ //psDeviceMemoryHeap[i].ui32Attribs |= PVRSRV_BACKINGSTORE_SYSMEM_CONTIG;
++#endif
++ psDeviceMemoryHeap[i].
++ psLocalDevMemArena =
++ gpsSysData->apsLocalDevMemArena[0];
++
++ }
++ break;
++ }
++ default:
++ break;
++ }
++
++ psDeviceNode = psDeviceNode->psNext;
++ }
++
++ PDUMPINIT();
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData,
++ SYS_SPECIFIC_DATA_ENABLE_PDUMPINIT);
++
++ eError = PVRSRVInitialiseDevice(gui32SGXDeviceID);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SysInitialise: Failed to initialise device!"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = NULL;
++ return eError;
++ }
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData,
++ SYS_SPECIFIC_DATA_ENABLE_INITDEV);
++
++#ifdef INTEL_D3_PM
++ {
++ int pm_rc = graphics_pm_init();
++ if (GRAPHICS_PM_OK != pm_rc) {
++ eError = PVRSRV_ERROR_INIT_FAILURE;
++ PVR_DPF((PVR_DBG_ERROR,
++ "SysInitialise: Failed to initialise power management!"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = NULL;
++ return eError;
++ }
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData,
++ SYS_SPECIFIC_DATA_ENABLE_GRAPHICS_PM);
++ }
++#endif
++
++ PVR_DPF((PVR_DBG_WARNING, "SysInitialise: OK 0x%x",
++ gsSysSpecificData.ui32SysSpecificData));
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR SysFinalise(void)
++{
++#if defined(SYS_USING_INTERRUPTS)
++ PVRSRV_ERROR eError;
++
++ eError = OSInstallMISR(gpsSysData);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSInstallMISR: Failed to install MISR"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = NULL;
++ return eError;
++ }
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData,
++ SYS_SPECIFIC_DATA_ENABLE_MISR);
++
++ eError = OSInstallSystemLISR(gpsSysData, gsSGXDeviceMap.ui32IRQ);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSInstallSystemLISR: Failed to install ISR"));
++ OSUninstallMISR(gpsSysData);
++ SysDeinitialise(gpsSysData);
++ gpsSysData = NULL;
++ return eError;
++ }
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData,
++ SYS_SPECIFIC_DATA_ENABLE_LISR);
++ SysEnableInterrupts(gpsSysData);
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_IRQ);
++#endif
++
++ gpsSysData->pszVersionString =
++ SysCreateVersionString(gsSGXDeviceMap.sRegsCpuPBase);
++ if (!gpsSysData->pszVersionString) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SysFinalise: Failed to create a system version string"));
++ } else {
++ PVR_DPF((PVR_DBG_WARNING, "SysFinalise: Version string: %s",
++ gpsSysData->pszVersionString));
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR SysDeinitialise(SYS_DATA * psSysData)
++{
++ SYS_SPECIFIC_DATA *psSysSpecData;
++ PVRSRV_ERROR eError;
++
++ if (psSysData == NULL) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SysDeinitialise: Called with NULL SYS_DATA pointer. Probably called before."));
++ return PVRSRV_OK;
++ }
++
++ psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
++
++#ifdef INTEL_D3_PM
++ if (SYS_SPECIFIC_DATA_TEST
++ (psSysSpecData, SYS_SPECIFIC_DATA_ENABLE_GRAPHICS_PM)) {
++ int pm_rc = graphics_pm_deinit();
++ if (GRAPHICS_PM_OK != pm_rc) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SysDeinitialise: Failed to deinitialise power management!"));
++ /* Continue with deinit even if failed to deinit power management */
++ }
++ }
++#endif
++
++#if defined(SYS_USING_INTERRUPTS)
++ if (SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_ENABLE_IRQ)) {
++ SysDisableInterrupts(psSysData);
++ }
++ if (SYS_SPECIFIC_DATA_TEST
++ (psSysSpecData, SYS_SPECIFIC_DATA_ENABLE_LISR)) {
++ eError = OSUninstallSystemLISR(psSysData);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SysDeinitialise: OSUninstallSystemLISR failed"));
++ return eError;
++ }
++ }
++ if (SYS_SPECIFIC_DATA_TEST
++ (psSysSpecData, SYS_SPECIFIC_DATA_ENABLE_MISR)) {
++ eError = OSUninstallMISR(psSysData);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SysDeinitialise: OSUninstallMISR failed"));
++ return eError;
++ }
++ }
++#endif
++ if (SYS_SPECIFIC_DATA_TEST
++ (psSysSpecData, SYS_SPECIFIC_DATA_ENABLE_INITDEV)) {
++ eError = PVRSRVDeinitialiseDevice(gui32SGXDeviceID);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SysDeinitialise: failed to de-init the device"));
++ return eError;
++ }
++ }
++
++ if (SYS_SPECIFIC_DATA_TEST
++ (psSysSpecData, SYS_SPECIFIC_DATA_ENABLE_RA_ARENA)) {
++#ifdef INTEL_D3_LOCALMEM
++ RA_Delete(gpsSysData->apsLocalDevMemArena[0]);
++#endif
++ gpsSysData->apsLocalDevMemArena[0] = NULL;
++ }
++
++ SysDeinitialiseCommon(gpsSysData);
++
++ if (SYS_SPECIFIC_DATA_TEST
++ (psSysSpecData, SYS_SPECIFIC_DATA_ENABLE_ENVDATA)) {
++ eError = OSDeInitEnvData(gpsSysData->pvEnvSpecificData);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SysDeinitialise: failed to de-init env structure"));
++ return eError;
++ }
++ }
++
++ if (SYS_SPECIFIC_DATA_TEST
++ (psSysSpecData, SYS_SPECIFIC_DATA_ENABLE_PDUMPINIT)) {
++ PDUMPDEINIT();
++ }
++
++ psSysSpecData->ui32SysSpecificData = 0;
++
++ gpsSysData = NULL;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE eDeviceType,
++ void **ppvDeviceMap)
++{
++
++ switch (eDeviceType) {
++ case PVRSRV_DEVICE_TYPE_SGX:
++ {
++ *ppvDeviceMap = (void *)&gsSGXDeviceMap;
++ break;
++ }
++ default:
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SysGetDeviceMemoryMap: unsupported device type"));
++ }
++ }
++ return PVRSRV_OK;
++}
++
++IMG_DEV_PHYADDR SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_CPU_PHYADDR CpuPAddr)
++{
++ IMG_DEV_PHYADDR DevPAddr;
++
++ DevPAddr.uiAddr = CpuPAddr.uiAddr;
++
++ return DevPAddr;
++}
++
++IMG_CPU_PHYADDR SysSysPAddrToCpuPAddr(IMG_SYS_PHYADDR sys_paddr)
++{
++ IMG_CPU_PHYADDR cpu_paddr;
++
++ cpu_paddr.uiAddr = sys_paddr.uiAddr;
++
++ return cpu_paddr;
++}
++
++IMG_SYS_PHYADDR SysCpuPAddrToSysPAddr(IMG_CPU_PHYADDR cpu_paddr)
++{
++ IMG_SYS_PHYADDR sys_paddr;
++
++ sys_paddr.uiAddr = cpu_paddr.uiAddr;
++
++ return sys_paddr;
++}
++
++IMG_DEV_PHYADDR SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_SYS_PHYADDR SysPAddr)
++{
++ IMG_DEV_PHYADDR DevPAddr;
++
++ DevPAddr.uiAddr = SysPAddr.uiAddr;
++
++ return DevPAddr;
++}
++
++IMG_SYS_PHYADDR SysDevPAddrToSysPAddr(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_DEV_PHYADDR DevPAddr)
++{
++ IMG_SYS_PHYADDR SysPAddr;
++
++ SysPAddr.uiAddr = DevPAddr.uiAddr;
++
++ return SysPAddr;
++}
++
++void SysRegisterExternalDevice(PVRSRV_DEVICE_NODE * psDeviceNode)
++{
++}
++
++void SysRemoveExternalDevice(PVRSRV_DEVICE_NODE * psDeviceNode)
++{
++}
++
++PVRSRV_ERROR SysResetDevice(u32 ui32DeviceIndex)
++{
++ if (ui32DeviceIndex == gui32SGXDeviceID) {
++ SysResetSGX(gpsSysData->pvSOCRegsBase);
++ } else {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR SysOEMFunction(u32 ui32ID,
++ void *pvIn,
++ u32 ulInSize, void *pvOut, u32 ulOutSize)
++{
++ if (ulInSize || pvIn) ;
++
++ if ((ui32ID == OEM_GET_EXT_FUNCS) &&
++ (ulOutSize == sizeof(PVRSRV_DC_OEM_JTABLE))) {
++ PVRSRV_DC_OEM_JTABLE *psOEMJTable =
++ (PVRSRV_DC_OEM_JTABLE *) pvOut;
++ psOEMJTable->pfnOEMBridgeDispatch = &PVRSRV_BridgeDispatchKM;
++#if !defined(SERVICES4)
++ psOEMJTable->pfnOEMReadRegistryString =
++ &PVRSRVReadRegistryString;
++ psOEMJTable->pfnOEMWriteRegistryString =
++ &PVRSRVWriteRegistryString;
++#endif
++ return PVRSRV_OK;
++ }
++
++ return PVRSRV_ERROR_INVALID_PARAMS;
++}
++
++void SysSaveRestoreArenaLiveSegments(int bSave)
++{
++ u32 uiBufferSize;
++ static void *pvBackupBuffer = NULL;
++ static void *hBlockAlloc;
++ static u32 uiWriteBufferSize = 0;
++ PVRSRV_ERROR eError;
++
++ uiBufferSize = 0;
++
++ if (gpsSysData->apsLocalDevMemArena[0] != NULL) {
++
++ if (PVRSRVSaveRestoreLiveSegments
++ ((void *)gpsSysData->apsLocalDevMemArena[0], NULL,
++ &uiBufferSize, bSave) == PVRSRV_OK) {
++ if (uiBufferSize) {
++ if (bSave && pvBackupBuffer == NULL) {
++ uiWriteBufferSize = uiBufferSize;
++
++ eError =
++ OSAllocPages(PVRSRV_HAP_KERNEL_ONLY
++ | PVRSRV_HAP_CACHED,
++ uiBufferSize,
++ HOST_PAGESIZE(),
++ &pvBackupBuffer,
++ &hBlockAlloc);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SysSaveRestoreArenaLiveSegments: OSAllocPages(0x%x) failed:%lu",
++ uiBufferSize, eError));
++ return;
++ }
++ } else {
++ PVR_ASSERT(uiWriteBufferSize ==
++ uiBufferSize);
++ }
++
++ PVRSRVSaveRestoreLiveSegments((void *)
++ gpsSysData->
++ apsLocalDevMemArena
++ [0],
++ pvBackupBuffer,
++ &uiBufferSize,
++ bSave);
++
++ if (!bSave && pvBackupBuffer) {
++
++ eError =
++ OSFreePages(PVRSRV_HAP_KERNEL_ONLY |
++ PVRSRV_HAP_CACHED,
++ uiWriteBufferSize,
++ pvBackupBuffer,
++ hBlockAlloc);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SysSaveRestoreArenaLiveSegments: OSFreePages(0x%x) failed:%lu",
++ uiBufferSize, eError));
++ }
++
++ pvBackupBuffer = NULL;
++ }
++ }
++ }
++ }
++
++}
++
++PVRSRV_ERROR SysMapInRegisters(void)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNodeList;
++
++ psDeviceNodeList = gpsSysData->psDeviceNodeList;
++
++ while (psDeviceNodeList) {
++ PVRSRV_SGXDEV_INFO *psDevInfo =
++ (PVRSRV_SGXDEV_INFO *) psDeviceNodeList->pvDevice;
++ if (psDeviceNodeList->sDevId.eDeviceType ==
++ PVRSRV_DEVICE_TYPE_SGX) {
++
++ psDevInfo->pvRegsBaseKM =
++ OSMapPhysToLin(gsSGXDeviceMap.sRegsCpuPBase,
++ gsSGXDeviceMap.ui32RegsSize,
++ PVRSRV_HAP_KERNEL_ONLY |
++ PVRSRV_HAP_UNCACHED, NULL);
++
++ if (!psDevInfo->pvRegsBaseKM) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SysMapInRegisters : Failed to map in regs\n"));
++ return PVRSRV_ERROR_BAD_MAPPING;
++ }
++
++ psDevInfo->ui32RegSize = gsSGXDeviceMap.ui32RegsSize;
++ psDevInfo->sRegsPhysBase = gsSGXDeviceMap.sRegsSysPBase;
++
++#ifdef SGX_FEATURE_2D_HARDWARE
++
++ psDevInfo->s2DSlavePortKM.pvData =
++ OSMapPhysToLin(gsSGXDeviceMap.sSPCpuPBase,
++ gsSGXDeviceMap.ui32SPSize,
++ PVRSRV_HAP_KERNEL_ONLY |
++ PVRSRV_HAP_UNCACHED, NULL);
++
++ if (!psDevInfo->s2DSlavePortKM.pvData) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SysMapInRegisters : Failed to map 2D Slave port region\n"));
++ return PVRSRV_ERROR_BAD_MAPPING;
++ }
++
++ psDevInfo->s2DSlavePortKM.ui32DataRange =
++ gsSGXDeviceMap.ui32SPSize;
++ psDevInfo->s2DSlavePortKM.sPhysBase =
++ gsSGXDeviceMap.sSPSysPBase;
++#endif
++
++ }
++
++ psDeviceNodeList = psDeviceNodeList->psNext;
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR SysUnmapRegisters(void)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNodeList;
++
++ psDeviceNodeList = gpsSysData->psDeviceNodeList;
++
++ while (psDeviceNodeList) {
++ PVRSRV_SGXDEV_INFO *psDevInfo =
++ (PVRSRV_SGXDEV_INFO *) psDeviceNodeList->pvDevice;
++ if (psDeviceNodeList->sDevId.eDeviceType ==
++ PVRSRV_DEVICE_TYPE_SGX) {
++
++ if (psDevInfo->pvRegsBaseKM) {
++ OSUnMapPhysToLin(psDevInfo->pvRegsBaseKM,
++ gsSGXDeviceMap.ui32RegsSize,
++ PVRSRV_HAP_KERNEL_ONLY |
++ PVRSRV_HAP_UNCACHED, NULL);
++
++ psDevInfo->pvRegsBaseKM = NULL;
++ }
++
++ psDevInfo->pvRegsBaseKM = NULL;
++ psDevInfo->ui32RegSize = 0;
++ psDevInfo->sRegsPhysBase.uiAddr = 0;
++
++#ifdef SGX_FEATURE_2D_HARDWARE
++
++ if (psDevInfo->s2DSlavePortKM.pvData) {
++ OSUnMapPhysToLin(psDevInfo->s2DSlavePortKM.
++ pvData,
++ gsSGXDeviceMap.ui32SPSize,
++ PVRSRV_HAP_KERNEL_ONLY |
++ PVRSRV_HAP_UNCACHED, NULL);
++
++ psDevInfo->s2DSlavePortKM.pvData = NULL;
++ }
++
++ psDevInfo->s2DSlavePortKM.pvData = NULL;
++ psDevInfo->s2DSlavePortKM.ui32DataRange = 0;
++ psDevInfo->s2DSlavePortKM.sPhysBase.uiAddr = 0;
++#endif
++
++ }
++
++ psDeviceNodeList = psDeviceNodeList->psNext;
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR SysSystemPrePowerState(PVRSRV_SYS_POWER_STATE eNewPowerState)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if (eNewPowerState != gpsSysData->eCurrentPowerState) {
++ if ((eNewPowerState == PVRSRV_SYS_POWER_STATE_D3) &&
++ (gpsSysData->eCurrentPowerState <
++ PVRSRV_SYS_POWER_STATE_D3)) {
++
++#if defined(SYS_USING_INTERRUPTS)
++ if (SYS_SPECIFIC_DATA_TEST
++ (&gsSysSpecificData,
++ SYS_SPECIFIC_DATA_ENABLE_IRQ)) {
++ SysDisableInterrupts(gpsSysData);
++
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData,
++ SYS_SPECIFIC_DATA_PM_IRQ_DISABLE);
++ SYS_SPECIFIC_DATA_CLEAR(&gsSysSpecificData,
++ SYS_SPECIFIC_DATA_ENABLE_IRQ);
++
++ }
++
++ if (SYS_SPECIFIC_DATA_TEST
++ (&gsSysSpecificData,
++ SYS_SPECIFIC_DATA_ENABLE_LISR)) {
++ eError = OSUninstallSystemLISR(gpsSysData);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SysSystemPrePowerState: OSUninstallSystemLISR failed (%d)",
++ eError));
++ }
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData,
++ SYS_SPECIFIC_DATA_PM_UNINSTALL_LISR);
++ SYS_SPECIFIC_DATA_CLEAR(&gsSysSpecificData,
++ SYS_SPECIFIC_DATA_ENABLE_LISR);
++ }
++#endif
++
++ SysUnmapRegisters();
++ }
++ }
++
++ return eError;
++}
++
++PVRSRV_ERROR SysSystemPostPowerState(PVRSRV_SYS_POWER_STATE eNewPowerState)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if (eNewPowerState != gpsSysData->eCurrentPowerState) {
++ if ((gpsSysData->eCurrentPowerState ==
++ PVRSRV_SYS_POWER_STATE_D3)
++ && (eNewPowerState < PVRSRV_SYS_POWER_STATE_D3)) {
++
++ eError = SysLocateDevices(gpsSysData);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SysSystemPostPowerState: Failed to locate devices"));
++ return eError;
++ }
++
++ eError = SysMapInRegisters();
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SysSystemPostPowerState: Failed to map in registers"));
++ return eError;
++ }
++
++ eError = SysInitRegisters();
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SysSystemPostPowerState: Failed to Initialise registers"));
++ return eError;
++ }
++#if defined(SYS_USING_INTERRUPTS)
++ if (SYS_SPECIFIC_DATA_TEST
++ (&gsSysSpecificData,
++ SYS_SPECIFIC_DATA_PM_UNINSTALL_LISR)) {
++ eError =
++ OSInstallSystemLISR(gpsSysData,
++ gsSGXDeviceMap.ui32IRQ);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SysSystemPostPowerState: OSInstallSystemLISR failed to install ISR (%d)",
++ eError));
++ }
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData,
++ SYS_SPECIFIC_DATA_ENABLE_LISR);
++ SYS_SPECIFIC_DATA_CLEAR(&gsSysSpecificData,
++ SYS_SPECIFIC_DATA_PM_UNINSTALL_LISR);
++ }
++
++ if (SYS_SPECIFIC_DATA_TEST
++ (&gsSysSpecificData,
++ SYS_SPECIFIC_DATA_PM_IRQ_DISABLE)) {
++ SysEnableInterrupts(gpsSysData);
++
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData,
++ SYS_SPECIFIC_DATA_ENABLE_IRQ);
++ SYS_SPECIFIC_DATA_CLEAR(&gsSysSpecificData,
++ SYS_SPECIFIC_DATA_PM_IRQ_DISABLE);
++ }
++#endif
++ }
++ }
++
++ return eError;
++}
++
++PVRSRV_ERROR SysDevicePrePowerState(u32 ui32DeviceIndex,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ if (ui32DeviceIndex == gui32SGXDeviceID) {
++ if ((eNewPowerState != eCurrentPowerState) &&
++ (eNewPowerState == PVRSRV_DEV_POWER_STATE_IDLE)) {
++#ifdef INTEL_D3_PM
++ graphics_pm_set_idle();
++#endif
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "SysDevicePrePowerState: Remove SGX power"));
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR SysDevicePostPowerState(u32 ui32DeviceIndex,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ if (ui32DeviceIndex == gui32SGXDeviceID) {
++ if ((eNewPowerState != eCurrentPowerState) &&
++ (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_IDLE)) {
++#ifdef INTEL_D3_PM
++ graphics_pm_set_busy();
++#endif
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "SysDevicePostPowerState: Restore SGX power"));
++ }
++ }
++
++ return PVRSRV_OK;
++}
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/system/sgx_intel_ce/sysconfig.h
+@@ -0,0 +1,88 @@
++/**********************************************************************
++ *
++ * Copyright (c) 2008-2009 Intel Corporation.
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Intel Corporation
++ * 2200 Mission College Blvd.
++ * Santa Clara, CA 97052
++ *
++ ******************************************************************************/
++
++/* This file was based on the Imagination Technologies sample implementation. */
++
++#if !defined(__SOCCONFIG_H__)
++#define __SOCCONFIG_H__
++
++#include "syscommon.h"
++
++#define VS_PRODUCT_NAME "Intel(R) GMA 500 based on PowerVR SGX 535"
++
++#define SYS_SGX_USSE_COUNT (2)
++
++#define PCI_BASEREG_OFFSET_DWORDS 4
++
++#define SYS_SGX_REG_PCI_BASENUM 1
++#define SYS_SGX_REG_PCI_OFFSET (SYS_SGX_REG_PCI_BASENUM + PCI_BASEREG_OFFSET_DWORDS)
++
++#define SYS_SGX_REG_OFFSET 0x0
++#define SYS_SGX_REG_SIZE 0x4000
++
++#define SYS_SGX_SP_OFFSET 0x4000
++#define SYS_SGX_SP_SIZE 0x4000
++
++#define SYS_SGX_REG_REGION_SIZE 0x8000
++
++
++#define SYS_SGX_MEM_PCI_BASENUM 2
++#define SYS_SGX_MEM_PCI_OFFSET (SYS_SGX_MEM_PCI_BASENUM + PCI_BASEREG_OFFSET_DWORDS)
++
++#define SYS_SGX_MEM_REGION_SIZE 0x20000000
++
++
++#define SYS_SGX_DEV_VENDOR_ID 0x8086
++#define SYS_SGX_DEV_DEVICE_ID 0x2E5B
++
++#define SYS_LOCALMEM_FOR_SGX_RESERVE_SIZE (220*1024*1024)
++
++
++#define MEMTEST_MAP_SIZE (1024*1024)
++
++
++typedef struct
++{
++ union
++ {
++ u8 aui8PCISpace[256];
++ u16 aui16PCISpace[128];
++ u32 aui32PCISpace[64];
++ struct
++ {
++ u16 ui16VenID;
++ u16 ui16DevID;
++ u16 ui16PCICmd;
++ u16 ui16PCIStatus;
++ }s;
++ }u;
++
++} PCICONFIG_SPACE, *PPCICONFIG_SPACE;
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/system/sgx_intel_ce/sysinfo.h
+@@ -0,0 +1,65 @@
++/**********************************************************************
++ *
++ * Copyright (c) 2008-2009 Intel Corporation.
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Intel Corporation
++ * 2200 Mission College Blvd.
++ * Santa Clara, CA 97052
++ *
++ ******************************************************************************/
++
++/* This file was based on the Imagination Technologies sample implementation. */
++
++#if !defined(__SYSINFO_H__)
++#define __SYSINFO_H__
++
++#define MAX_HW_TIME_US (500000)
++#define WAIT_TRY_COUNT (10000)
++
++typedef enum _SYS_DEVICE_TYPE_
++{
++ SYS_DEVICE_SGX = 0,
++
++ SYS_DEVICE_FORCE_I16 = 0x7fff
++
++} SYS_DEVICE_TYPE;
++
++#define SYS_DEVICE_COUNT 2
++
++
++
++#define SGX_SP_FIFO_DWSIZE 123
++
++#define SGX_SP_FIFO_RESERVEBYTES (SGX_SP_FIFO_DWSIZE & -4)
++#define SGX_SP_FIFO_MAXALLOWEDBYTES (SGX_SP_FIFO_DWSIZE * 4) - SGX_SP_FIFO_RESERVEBYTES
++
++#define SGX_EXTRACT_FIFO_COUNT(x) (((x) & SGX_INT_TA_FREEVCOUNT_MASK) >> SGX_INT_TA_FREEVCOUNT_SHIFT)
++
++
++#if !defined(USE_CODE)
++PVRSRV_ERROR SysInitRegisters(void);
++void SysResetSGX(void * pvRegsBaseKM);
++
++#endif
++
++
++
++#endif
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/system/sgx_intel_ce/syslocal.h
+@@ -0,0 +1,85 @@
++/**********************************************************************
++ *
++ * Copyright (c) 2008-2009 Intel Corporation.
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Intel Corporation
++ * 2200 Mission College Blvd.
++ * Santa Clara, CA 97052
++ *
++ ******************************************************************************/
++
++/* This file was based on the Imagination Technologies sample implementation. */
++
++#if !defined(__SYSLOCAL_H__)
++#define __SYSLOCAL_H__
++
++
++void SysEnableInterrupts(SYS_DATA *psSysData);
++void SysDisableInterrupts(SYS_DATA *psSysData);
++
++char *SysCreateVersionString(IMG_CPU_PHYADDR sRegRegion);
++
++
++
++#define SYS_SPECIFIC_DATA_ENABLE_IRQ 0x00000001UL
++#define SYS_SPECIFIC_DATA_ENABLE_LISR 0x00000002UL
++#define SYS_SPECIFIC_DATA_ENABLE_MISR 0x00000004UL
++#define SYS_SPECIFIC_DATA_ENABLE_ENVDATA 0x00000008UL
++#define SYS_SPECIFIC_DATA_ENABLE_PCINIT 0x00000010UL
++#define SYS_SPECIFIC_DATA_ENABLE_LOCATEDEV 0x00000020UL
++#define SYS_SPECIFIC_DATA_ENABLE_RA_ARENA 0x00000040UL
++#define SYS_SPECIFIC_DATA_ENABLE_INITREG 0x00000080UL
++#define SYS_SPECIFIC_DATA_ENABLE_REGDEV 0x00000100UL
++#define SYS_SPECIFIC_DATA_ENABLE_PDUMPINIT 0x00000200UL
++#define SYS_SPECIFIC_DATA_ENABLE_INITDEV 0x00000400UL
++#define SYS_SPECIFIC_DATA_ENABLE_PCI_MEM 0x00000800UL
++#define SYS_SPECIFIC_DATA_ENABLE_PCI_REG 0x00001000UL
++#define SYS_SPECIFIC_DATA_ENABLE_PCI_ATL 0x00002000UL
++#define SYS_SPECIFIC_DATA_ENABLE_PCI_DEV 0x00004000UL
++#ifdef INTEL_D3_PM
++#define SYS_SPECIFIC_DATA_ENABLE_GRAPHICS_PM 0x00008000UL
++#endif
++
++#define SYS_SPECIFIC_DATA_PM_UNMAP_SGX_REGS 0x00020000UL
++#define SYS_SPECIFIC_DATA_PM_UNMAP_SGX_HP 0x00080000UL
++#define SYS_SPECIFIC_DATA_PM_IRQ_DISABLE 0x00100000UL
++#define SYS_SPECIFIC_DATA_PM_UNINSTALL_LISR 0x00200000UL
++
++#define SYS_SPECIFIC_DATA_SET(psSysSpecData, flag) \
++ ((void)((psSysSpecData)->ui32SysSpecificData |= (flag)))
++
++#define SYS_SPECIFIC_DATA_CLEAR(psSysSpecData, flag) \
++ ((void)((psSysSpecData)->ui32SysSpecificData &= ~(flag)))
++
++#define SYS_SPECIFIC_DATA_TEST(psSysSpecData, flag) \
++ (((psSysSpecData)->ui32SysSpecificData & (flag)) != 0)
++
++typedef struct _SYS_SPECIFIC_DATA_TAG_
++{
++ u32 ui32SysSpecificData;
++#ifdef LDM_PCI
++ struct pci_dev *psPCIDev;
++#endif
++} SYS_SPECIFIC_DATA;
++
++#endif
++
++
+--- /dev/null
++++ b/drivers/staging/ice4100/sgx535/system/sgx_intel_ce/sysutils.c
+@@ -0,0 +1,118 @@
++/**********************************************************************
++ *
++ * Copyright (c) 2008-2009 Intel Corporation.
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Intel Corporation
++ * 2200 Mission College Blvd.
++ * Santa Clara, CA 97052
++ *
++ ******************************************************************************/
++
++/* This file was based on the Imagination Technologies sample implementation. */
++
++#include "sgxdefs.h"
++#include "services_headers.h"
++#include "sysinfo.h"
++#include "sgxinfo.h"
++#include "syslocal.h"
++
++PVRSRV_ERROR SysInitRegisters(void)
++{
++ SYS_DATA *psSysData;
++
++ if (SysAcquireData(&psSysData) != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SysInitRegisters: Failed to get SysData"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ return PVRSRV_OK;
++}
++
++char *SysCreateVersionString(IMG_CPU_PHYADDR sRegRegion)
++{
++ static char aszVersionString[100];
++ void *pvRegsLinAddr;
++ SYS_DATA *psSysData;
++ u32 ui32SGXRevision;
++ s32 i32Count;
++
++ pvRegsLinAddr = OSMapPhysToLin(sRegRegion,
++ SYS_SGX_REG_SIZE,
++ PVRSRV_HAP_UNCACHED |
++ PVRSRV_HAP_KERNEL_ONLY, NULL);
++ if (!pvRegsLinAddr) {
++ return NULL;
++ }
++
++ ui32SGXRevision =
++ OSReadHWReg((void *)((unsigned char *) pvRegsLinAddr +
++ SYS_SGX_REG_OFFSET), EUR_CR_CORE_REVISION);
++
++ if (SysAcquireData(&psSysData) != PVRSRV_OK) {
++ return NULL;
++ }
++
++ i32Count = snprintf(aszVersionString, 100,
++ "SGX revision = %u.%u.%u",
++ (u32) ((ui32SGXRevision &
++ EUR_CR_CORE_REVISION_MAJOR_MASK)
++ >> EUR_CR_CORE_REVISION_MAJOR_SHIFT),
++ (u32) ((ui32SGXRevision &
++ EUR_CR_CORE_REVISION_MINOR_MASK)
++ >> EUR_CR_CORE_REVISION_MINOR_SHIFT),
++ (u32) ((ui32SGXRevision &
++ EUR_CR_CORE_REVISION_MAINTENANCE_MASK)
++ >> EUR_CR_CORE_REVISION_MAINTENANCE_SHIFT)
++ );
++
++ OSUnMapPhysToLin(pvRegsLinAddr,
++ SYS_SGX_REG_SIZE,
++ PVRSRV_HAP_UNCACHED | PVRSRV_HAP_KERNEL_ONLY, NULL);
++
++ if (i32Count == -1) {
++ return NULL;
++ }
++
++ return aszVersionString;
++}
++
++void SysResetSGX(void *pvRegsBaseKM)
++{
++}
++
++void SysEnableInterrupts(SYS_DATA * psSysData)
++{
++}
++
++void SysDisableInterrupts(SYS_DATA * psSysData)
++{
++}
++
++u32 SysGetInterruptSource(SYS_DATA * psSysData,
++ PVRSRV_DEVICE_NODE * psDeviceNode)
++{
++ return 0x00000001;
++}
++
++void SysClearInterrupts(SYS_DATA * psSysData, u32 ui32InterruptBits)
++{
++}
+--- /dev/null
++++ b/drivers/staging/ifx-mux/Kconfig
+@@ -0,0 +1,4 @@
++config N_IFX_MUX
++ tristate "IFX SPI mux line discipline support"
++ help
++ Allows mux support related to IFX modem.
+--- /dev/null
++++ b/drivers/staging/ifx-mux/Makefile
+@@ -0,0 +1,3 @@
++n_ifx6x60-objs := gsm0710.o ifx_spi_mux.o crc8.o
++
++obj-$(CONFIG_N_IFX_MUX) += n_ifx6x60.o
+--- /dev/null
++++ b/drivers/staging/ifx-mux/crc8.c
+@@ -0,0 +1,63 @@
++/*
++ * crc8.c
++ *
++ * This source code is licensed under the GNU General Public License,
++ * Version 2. See the file COPYING for more details.
++ */
++
++#include <linux/types.h>
++#include <linux/module.h>
++
++const u8 crc8_table[256] = {
++ 0x00, 0x91, 0xE3, 0x72, 0x07, 0x96, 0xE4, 0x75,
++ 0x0E, 0x9F, 0xED, 0x7C, 0x09, 0x98, 0xEA, 0x7B,
++ 0x1C, 0x8D, 0xFF, 0x6E, 0x1B, 0x8A, 0xF8, 0x69,
++ 0x12, 0x83, 0xF1, 0x60, 0x15, 0x84, 0xF6, 0x67,
++ 0x38, 0xA9, 0xDB, 0x4A, 0x3F, 0xAE, 0xDC, 0x4D,
++ 0x36, 0xA7, 0xD5, 0x44, 0x31, 0xA0, 0xD2, 0x43,
++ 0x24, 0xB5, 0xC7, 0x56, 0x23, 0xB2, 0xC0, 0x51,
++ 0x2A, 0xBB, 0xC9, 0x58, 0x2D, 0xBC, 0xCE, 0x5F,
++ 0x70, 0xE1, 0x93, 0x02, 0x77, 0xE6, 0x94, 0x05,
++ 0x7E, 0xEF, 0x9D, 0x0C, 0x79, 0xE8, 0x9A, 0x0B,
++ 0x6C, 0xFD, 0x8F, 0x1E, 0x6B, 0xFA, 0x88, 0x19,
++ 0x62, 0xF3, 0x81, 0x10, 0x65, 0xF4, 0x86, 0x17,
++ 0x48, 0xD9, 0xAB, 0x3A, 0x4F, 0xDE, 0xAC, 0x3D,
++ 0x46, 0xD7, 0xA5, 0x34, 0x41, 0xD0, 0xA2, 0x33,
++ 0x54, 0xC5, 0xB7, 0x26, 0x53, 0xC2, 0xB0, 0x21,
++ 0x5A, 0xCB, 0xB9, 0x28, 0x5D, 0xCC, 0xBE, 0x2F,
++ 0xE0, 0x71, 0x03, 0x92, 0xE7, 0x76, 0x04, 0x95,
++ 0xEE, 0x7F, 0x0D, 0x9C, 0xE9, 0x78, 0x0A, 0x9B,
++ 0xFC, 0x6D, 0x1F, 0x8E, 0xFB, 0x6A, 0x18, 0x89,
++ 0xF2, 0x63, 0x11, 0x80, 0xF5, 0x64, 0x16, 0x87,
++ 0xD8, 0x49, 0x3B, 0xAA, 0xDF, 0x4E, 0x3C, 0xAD,
++ 0xD6, 0x47, 0x35, 0xA4, 0xD1, 0x40, 0x32, 0xA3,
++ 0xC4, 0x55, 0x27, 0xB6, 0xC3, 0x52, 0x20, 0xB1,
++ 0xCA, 0x5B, 0x29, 0xB8, 0xCD, 0x5C, 0x2E, 0xBF,
++ 0x90, 0x01, 0x73, 0xE2, 0x97, 0x06, 0x74, 0xE5,
++ 0x9E, 0x0F, 0x7D, 0xEC, 0x99, 0x08, 0x7A, 0xEB,
++ 0x8C, 0x1D, 0x6F, 0xFE, 0x8B, 0x1A, 0x68, 0xF9,
++ 0x82, 0x13, 0x61, 0xF0, 0x85, 0x14, 0x66, 0xF7,
++ 0xA8, 0x39, 0x4B, 0xDA, 0xAF, 0x3E, 0x4C, 0xDD,
++ 0xA6, 0x37, 0x45, 0xD4, 0xA1, 0x30, 0x42, 0xD3,
++ 0xB4, 0x25, 0x57, 0xC6, 0xB3, 0x22, 0x50, 0xC1,
++ 0xBA, 0x2B, 0x59, 0xC8, 0xBD, 0x2C, 0x5E, 0xCF
++};
++
++/**
++ * crc8 - update the CRC8 for the data buffer
++ * @crc: previous CRC8 value
++ * @buffer: data pointer
++ * @len: number of bytes in the buffer
++ * Context: any
++ *
++ * Returns the updated CRC8 value.
++ */
++u8 crc8(u8 crc, const u8 *buffer, size_t len)
++{
++ while (len--)
++ crc = crc8_table[crc ^ *buffer++];
++ return crc;
++}
++
++MODULE_DESCRIPTION("CRC8 calculations");
++MODULE_LICENSE("GPL");
+--- /dev/null
++++ b/drivers/staging/ifx-mux/crc8.h
+@@ -0,0 +1,7 @@
++#ifndef _LINUX_CRC8_H
++#define _LINUX_CRC8_H
++#include <linux/types.h>
++
++extern u8 crc8(u8 crc, const u8 *buffer, size_t len);
++
++#endif
+--- /dev/null
++++ b/drivers/staging/ifx-mux/gsm0710.c
+@@ -0,0 +1,723 @@
++/*
++ * gsm0710.c - low level 3GPP 07.10 protocol implementation
++ *
++ * (C) 2000-2008 TROLLTECH ASA.
++ * (C) 2009 Michael 'Mickey' Lauer <mlauer@vanille-media.de>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ *
++ * Modified by Intel from Trolltech user-space original, 2009, 2010
++ * Jim Stanley <jim.stanley@intel.com>
++ */
++
++#include <linux/kernel.h>
++#include <linux/slab.h>
++#include <linux/string.h>
++#include <linux/delay.h>
++#include <linux/interrupt.h>
++#include "crc8.h"
++#include "gsm0710.h"
++
++/* #define MUX_DEBUG */
++static void gsm0710_create_frame_int(struct gsm0710_context *, int, int,
++ char *, int, char **, char **, int *);
++static void gsm0710_write_frame_int(struct gsm0710_context *, int, char *,
++ int);
++
++static inline int is_channel_used(struct gsm0710_context *ctx, int ch)
++{
++ return test_bit(ch, (ctx)->used_channels);
++}
++static inline void mark_channel_used(struct gsm0710_context *ctx, int ch)
++{
++ set_bit(ch, (ctx)->used_channels);
++}
++static inline void mark_channel_unused(struct gsm0710_context *ctx, int ch)
++{
++ clear_bit(ch, (ctx)->used_channels);
++}
++
++/* Initialize a GSM 07.10 context, in preparation for startup */
++void gsm0710_initialize(struct gsm0710_context *ctx)
++{
++ memset(ctx, 0, sizeof(*ctx));
++ atomic_set(&ctx->init, 1);
++ ctx->mode = GSM0710_MODE_BASIC;
++ ctx->frame_size = GSM0710_DEFAULT_FRAME_SIZE;
++ ctx->port_speed = 115200;
++}
++
++/* Write a debug message */
++static void gsm0710_debug(struct gsm0710_context *ctx, const char *msg)
++{
++ if (ctx->debug_message)
++ (*(ctx->debug_message)) (ctx, msg);
++}
++
++/* Shut down the GSM 07.10 session, closing all channels */
++void gsm0710_shutdown(struct gsm0710_context *ctx)
++{
++ static const char terminate[2] = { GSM0710_TERMINATE_BYTE1,
++ GSM0710_TERMINATE_BYTE2 };
++ int channel;
++
++ /* guard against multiple shutdowns in a row */
++ if (!atomic_dec_and_test(&ctx->init))
++ return;
++
++ pr_debug("shutdown channels");
++ for (channel = 1; channel <= GSM0710_MAX_CHANNELS; ++channel) {
++ if (is_channel_used(ctx, channel)) {
++ pr_debug("closing channel %d", channel);
++ gsm0710_write_frame(ctx, channel, GSM0710_CLOSE_CHANNEL,
++ 0, 0);
++ }
++ }
++ pr_debug("close mux");
++ gsm0710_write_frame(ctx, 0, GSM0710_DATA, terminate, 2);
++ memset(ctx->used_channels, 0, sizeof(ctx->used_channels));
++}
++
++/* Open a specific channel. Returns non-zero if successful */
++int gsm0710_open_channel(struct gsm0710_context *ctx, int channel)
++{
++ /* pr_debug("open channel called ch=%d", channel); */
++
++ if (channel < 0 || channel > GSM0710_MAX_CHANNELS)
++ return 0; /* Invalid channel number */
++ if (is_channel_used(ctx, channel))
++ return 1; /* Channel is already open */
++ mark_channel_used(ctx, channel);
++ gsm0710_write_frame(ctx, channel, GSM0710_OPEN_CHANNEL, 0, 0);
++ return 1;
++}
++
++/* Close a specific channel */
++void gsm0710_close_channel(struct gsm0710_context *ctx, int channel)
++{
++ if (channel < 0 || channel > GSM0710_MAX_CHANNELS)
++ return; /* Invalid channel number */
++ if (!is_channel_used(ctx, channel))
++ return; /* Channel is already closed */
++ mark_channel_unused(ctx, channel);
++ gsm0710_write_frame(ctx, channel, GSM0710_CLOSE_CHANNEL, 0, 0);
++}
++
++/* Determine if a specific channel is open */
++int gsm0710_is_channel_open(struct gsm0710_context *ctx, int channel)
++{
++ if (channel < 0 || channel > GSM0710_MAX_CHANNELS)
++ return 0; /* Invalid channel number */
++ return is_channel_used(ctx, channel);
++}
++
++int gsm0710_compute_crc(const char *data, int len)
++{
++ return ~(crc8(~0, data, (size_t)len));
++}
++
++/* Process an incoming GSM 07.10 packet */
++static int
++gsm0710_packet(struct gsm0710_context *ctx, int channel, int type,
++ const char *data, int len)
++{
++ char *frame;
++ char *frame_data;
++ int frame_size;
++
++ pr_debug(
++ "0710 packet ok: chan %d, type 0x%02X, data[0] 0x%02X, len %d",
++ channel, type, (unsigned char)data[0], len);
++ if (ctx->packet_filter && (*(ctx->packet_filter)) (ctx, channel, type,
++ data, len)) {
++ /* The filter has extracted and processed the packet */
++ return 1;
++ }
++ if (type == 0xEF || type == 0x03) {
++ if (channel >= 1 && channel <= GSM0710_MAX_CHANNELS &&
++ is_channel_used(ctx, channel)) {
++ /* Ordinary data packet */
++ if (ctx->deliver_data)
++ (*(ctx->deliver_data)) (ctx, channel, data,
++ len);
++ } else if (channel == 0) {
++ /* An embedded command or response on channel 0 */
++ if (len >= 2 && data[0] == (char)GSM0710_STATUS_SET) {
++ return gsm0710_packet(ctx, channel,
++ GSM0710_STATUS_ACK,
++ data + 2, len - 2);
++ } else if (len >= 2 && data[0] == (char)0xC3) {
++ /* Incoming terminate request on server side */
++ gsm0710_debug(ctx,
++ "received terminate request");
++ for (channel = 1; channel <=
++ GSM0710_MAX_CHANNELS; ++channel) {
++ if (is_channel_used(ctx, channel)) {
++ if (ctx->close_channel)
++ (*(ctx->close_channel))
++ (ctx, channel);
++ }
++ }
++ memset(ctx->used_channels, 0,
++ sizeof(ctx->used_channels));
++ if (ctx->terminate)
++ (*(ctx->terminate)) (ctx);
++ return 0;
++ } else if (len >= 2 &&
++ data[0] == (char)(GSM0710_CMD_TEST |
++ GSM0710_EA | GSM0710_CR)) {
++ /* Test command from other side - send the
++ * same bytes back */
++ gsm0710_debug(ctx,
++ "received test command, sending response");
++ gsm0710_create_frame_int(ctx, 0, GSM0710_DATA,
++ (char *)data,
++ len, &frame,
++ &frame_data,
++ &frame_size);
++ /* Clear the C/R bit in the response */
++ frame_data[0] = (unsigned char)
++ (GSM0710_CMD_TEST | GSM0710_EA);
++ gsm0710_write_frame_int(ctx, 0, frame,
++ frame_size);
++ } else if (len >= 2 &&
++ data[0] == (char)(GSM0710_CMD_TEST |
++ GSM0710_EA)) {
++ /* Response for a test command we sent */
++ gsm0710_debug(ctx, "received test response");
++ if (ctx->response_to_test)
++ (*(ctx->response_to_test)) (ctx,
++ &data[2], len - 2);
++ }
++ }
++
++ } else if (type == GSM0710_STATUS_ACK && channel == 0) {
++ /* Status change message */
++ if (len >= 2) {
++ /* Handle status changes on other channels */
++ gsm0710_debug(ctx, "received status change command");
++ channel = ((data[0] & 0xFC) >> 2);
++ if (channel >= 0 && channel <= GSM0710_MAX_CHANNELS &&
++ is_channel_used(ctx, channel)) {
++ if (ctx->deliver_status)
++ (*(ctx->deliver_status)) (ctx,
++ channel, data[1] & 0xFF);
++ }
++ }
++
++ /* Send the response to the status change request to ACK it */
++ gsm0710_debug(ctx,
++ "received status line signal, sending response");
++ gsm0710_create_frame_int(ctx, 0, GSM0710_DATA,
++ (char *)&data[-2], len+2, &frame,
++ &frame_data, &frame_size);
++ frame_data[0] = (unsigned char)GSM0710_STATUS_ACK;
++ frame_data[1] = (unsigned char)((len << 1) | 0x01);
++ gsm0710_write_frame_int(ctx, 0, frame, frame_size);
++ } else if (type == (0x3F & 0xEF)) {
++ gsm0710_debug(ctx, "modem request channel open");
++ /* Incoming channel open request on server side */
++ if (channel >= 0 && channel <= GSM0710_MAX_CHANNELS) {
++ if (!is_channel_used(ctx, channel)) {
++ mark_channel_used(ctx, channel);
++ if (ctx->open_channel)
++ (*(ctx->open_channel)) (ctx, channel);
++ }
++ }
++ } else if (type == (0x53 & 0xEF)) {
++ gsm0710_debug(ctx, "modem request channel close");
++ /* Incoming channel close request on server side */
++ if (channel >= 0 && channel <= GSM0710_MAX_CHANNELS) {
++ if (is_channel_used(ctx, channel)) {
++ mark_channel_unused(ctx, channel);
++ if (ctx->close_channel)
++ (*(ctx->close_channel)) (ctx, channel);
++ }
++ }
++
++ } else if (type == GSM0710_UNNUMBERED_ACK) {
++ /* can mean channel is open or disconnect if channel==0 */
++ if (ctx->deliver_unnumbered_ack)
++ (*(ctx->deliver_unnumbered_ack))(ctx, channel);
++ } else if (type == GSM0710_DISCONNECT_MODE) {
++ pr_debug("channel %d is not open", channel);
++ /* mark channel not ready */
++ }
++ return 1;
++}
++
++/* Function that is called when the underlying device is ready to be read.
++ A callback will be made to ctx->read to get the data for processing */
++void gsm0710_ready_read(struct gsm0710_context *ctx)
++{
++ int len;
++ int posn = 0;
++ int posn2;
++ int header_size;
++ int channel, type;
++
++ if (!ctx->read)
++ return;
++
++ len = (*(ctx->read)) (ctx, ctx->buffer + ctx->buffer_used,
++ sizeof(ctx->buffer) - ctx->buffer_used);
++
++ if (len <= 0)
++ return;
++
++#ifdef MUX_DEBUG
++ do {
++ unsigned char *ptr;
++ int i;
++ ptr = ctx->buffer + ctx->buffer_used;
++ pr_debug("ready read received: %x, %x, %x (%d)",
++ ptr[0], ptr[1], ptr[2], len);
++ for (i = 0; i < len; i++)
++ pr_debug("%x %c %d", ptr[i], ptr[i], ptr[i]);
++ } while (0);
++#endif
++ /* Update the buffer size */
++ ctx->buffer_used += len;
++
++ /* Break the incoming data up into packets */
++ while (posn < ctx->buffer_used) {
++ if (ctx->buffer[posn] == (char)0xF9) {
++
++ /* Basic format: skip 0xF9 bytes between frames */
++ while ((posn + 1) < ctx->buffer_used &&
++ ctx->buffer[posn + 1] == (char)0xF9) {
++ ++posn;
++ }
++
++ /* We need at least 4 bytes for the header */
++ if ((posn + 4) > ctx->buffer_used)
++ break;
++
++ /* The low bit of the second byte should be 1,
++ which indicates a short channel number */
++ if ((ctx->buffer[posn + 1] & 0x01) == 0) {
++ ++posn;
++ continue;
++ }
++
++ /* Get the packet length and validate it */
++ len = (ctx->buffer[posn + 3] >> 1) & 0x7F;
++ if ((ctx->buffer[posn + 3] & 0x01) != 0) {
++ /* Single-byte length indication */
++ header_size = 3;
++ } else {
++ /* Double-byte length indication */
++ if ((posn + 5) > ctx->buffer_used)
++ break;
++ len |= ((int)(unsigned char)
++ (ctx->buffer[posn + 4])) << 7;
++ header_size = 4;
++ }
++ if ((posn + header_size + 2 + len) > ctx->buffer_used)
++ break;
++
++ /* Verify the packet header checksum */
++ if (((gsm0710_compute_crc(ctx->buffer + posn + 1,
++ header_size) ^
++ ctx->buffer[posn + len + header_size + 1]) & 0xFF)
++ != 0) {
++ gsm0710_debug(ctx,
++ "*** GSM 07.10 checksum check failed ***");
++ posn += len + header_size + 2;
++ continue;
++ }
++
++ /* Get channel number and packet type from header */
++ channel = (ctx->buffer[posn + 1] >> 2) & 0x3F;
++ /* Strip "PF" bit */
++ type = ctx->buffer[posn + 2] & 0xEF;
++ pr_debug("channel %d, type %d",
++ channel, type);
++
++ /* Dispatch data packets to the appropriate channel */
++ if (!gsm0710_packet(ctx, channel, type,
++ ctx->buffer + posn + header_size + 1, len)) {
++ /* Session has been terminated */
++ ctx->buffer_used = 0;
++ return;
++ }
++ posn += len + header_size + 2;
++
++ } else if (ctx->buffer[posn] == (char)0x7E) {
++
++ /* Advanced format: skip 0x7E bytes between frames */
++ while ((posn + 1) < ctx->buffer_used &&
++ ctx->buffer[posn + 1] == (char)0x7E) {
++ ++posn;
++ }
++
++ /* Search for end of packet (the next 0x7E byte) */
++ len = posn + 1;
++ while (len < ctx->buffer_used &&
++ ctx->buffer[len] != (char)0x7E) {
++ ++len;
++ }
++ if (len >= ctx->buffer_used) {
++ /* insufficient bytes for a packet */
++ if (posn == 0 && len >= (int)sizeof
++ (ctx->buffer)) {
++ /* The buffer is full and we were
++ * unable to find a
++ * legitimate packet.
++ * Discard the buffer and restart */
++ posn = len;
++ }
++ break;
++ }
++
++ /* Undo control byte quoting in the packet */
++ posn2 = 0;
++ ++posn;
++ while (posn < len) {
++ if (ctx->buffer[posn] == 0x7D) {
++ ++posn;
++ if (posn >= len)
++ break;
++ ctx->buffer[posn2++] = (char)
++ (ctx->buffer[posn++] ^ 0x20);
++ } else {
++ ctx->buffer[posn2++] =
++ ctx->buffer[posn++];
++ }
++ }
++
++ /* Validate the checksum on the packet header */
++ if (posn2 >= 3) {
++ if (((gsm0710_compute_crc(ctx->buffer, 2) ^
++ ctx->buffer[posn2 - 1]) &
++ 0xFF) != 0) {
++ gsm0710_debug(ctx,
++ "*** GSM 07.10 advanced checksum "
++ "check failed ***");
++ continue;
++ }
++ } else {
++ gsm0710_debug(ctx,
++ "*** GSM 07.10 advanced packet "
++ "is too small ***");
++ continue;
++ }
++
++ /* Decode and dispatch the packet */
++ channel = (ctx->buffer[0] >> 2) & 0x3F;
++ /* Strip "PF" bit */
++ type = ctx->buffer[1] & 0xEF;
++ pr_debug("call gsm0710_packet");
++ if (!gsm0710_packet(ctx, channel, type,
++ ctx->buffer + 2, posn2 - 3)) {
++ /* Session has been terminated */
++ ctx->buffer_used = 0;
++ return;
++ }
++
++ } else {
++ ++posn;
++ }
++ }
++ if (posn < ctx->buffer_used) {
++ memmove(ctx->buffer, ctx->buffer + posn,
++ ctx->buffer_used - posn);
++ ctx->buffer_used -= posn;
++ } else {
++ ctx->buffer_used = 0;
++ }
++ pr_debug("exit ready read");
++}
++
++/*
++ * gsm0710_create_frame -
++ * Create a raw GSM 07.10 frame for the underlying device
++ *
++ * @frame: allocation large enough to create a frame for len payload bytes
++ * @out_data: will contain ptr to payload data in the frame
++ * @out_size: will contain the size of the frame
++ */
++static void
++gsm0710_create_frame(struct gsm0710_context *ctx, int channel,
++ int type, const char *data, int len,
++ char *frame, char **out_data, int *out_size)
++{
++ int size;
++
++ if (ctx->mode) {
++ int temp, crc;
++
++ frame[0] = (char)0x7E;
++ frame[1] = (char)((channel << 2) | 0x03);
++ frame[2] = (char)type;
++ crc = gsm0710_compute_crc(frame + 1, 2);
++ if (type == 0x7E || type == 0x7D) {
++ /* Need to quote the type field now that
++ * crc has been computed */
++ frame[2] = (char)0x7D;
++ frame[3] = (char)(type ^ 0x20);
++ size = 4;
++ } else {
++ size = 3;
++ }
++ if (out_data)
++ *out_data = &frame[size];
++ while (len > 0) {
++ temp = *data++ & 0xFF;
++ --len;
++ if (temp != 0x7E && temp != 0x7D) {
++ frame[size++] = (char)temp;
++ } else {
++ frame[size++] = (char)0x7D;
++ frame[size++] = (char)(temp ^ 0x20);
++ }
++ }
++ if (crc != 0x7E && crc != 0x7D) {
++ frame[size++] = (char)crc;
++ } else {
++ frame[size++] = (char)0x7D;
++ frame[size++] = (char)(crc ^ 0x20);
++ }
++ frame[size++] = (char)0x7E;
++ } else {
++ int header_size;
++
++ frame[0] = (char)0xF9;
++ frame[1] = (char)((channel << 2) | 0x03);
++ frame[2] = (char)type;
++ if (len <= 127) {
++ frame[3] = (char)((len << 1) | 0x01);
++ header_size = size = 4;
++ } else {
++ frame[3] = (char)(len << 1);
++ frame[4] = (char)(len >> 7);
++ header_size = size = 5;
++ }
++ if (out_data)
++ *out_data = &frame[size];
++ if (len > 0) {
++ memcpy(frame + size, data, len);
++ size += len;
++ }
++ /* Note: GSM 07.10 says that the CRC is only computed over
++ * the header */
++ frame[size++] = (char)gsm0710_compute_crc(frame + 1,
++ header_size - 1);
++ frame[size++] = (char)0xF9;
++ }
++ if (out_size)
++ *out_size = size;
++}
++
++
++/*
++ * gsm0710_create_frame_int -
++ * version of gsm0710_create_frame suitable for interrupt context
++ *
++ * @out_frame : will contain a pointer to the frame created
++ * @out_data : if non-NULL, will contain a pointer to the data in the frame created
++ * @out_size : if non-NULL, will contain the size of the frame created
++ */
++static void
++gsm0710_create_frame_int(struct gsm0710_context *ctx, int channel,
++ int type, char *data, int len,
++ char **out_frame, char **out_data,
++ int *out_size)
++{
++ static unsigned char int_frame_buf[2*GSM0710_DEFAULT_FRAME_SIZE+8];
++ char *frame = int_frame_buf;
++
++ if (len > ctx->frame_size)
++ len = ctx->frame_size;
++ gsm0710_create_frame(ctx, channel, type, data, len, frame,
++ out_data, out_size);
++ *out_frame = frame;
++}
++
++
++/*
++ * gsm0710_write_frame_int -
++ * writes trhe frame created by gsm0710_create_frame_int to
++ * the underlaying device
++ */
++static void
++gsm0710_write_frame_int(struct gsm0710_context *ctx, int channel,
++ char *frame, int size)
++{
++ if (ctx->write)
++ (*(ctx->write)) (ctx, channel, frame, size);
++}
++
++/*
++ * gsm0710_write_frame -
++ * Write a raw GSM 07.10 frame to the underlying device
++ *
++ * Context: sleepable
++ */
++void
++gsm0710_write_frame(struct gsm0710_context *ctx, int channel, int type,
++ const char *data, int len)
++{
++ char *frame;
++ int alloc_size;
++ int size;
++
++ if (in_interrupt()) {
++ gsm0710_create_frame_int(ctx, channel, type, (char *)data,
++ len, &frame, NULL, &size);
++ gsm0710_write_frame_int(ctx, channel, frame, size);
++ } else {
++ if (ctx->mode)
++ alloc_size = (2 * ctx->frame_size) + 8;
++ else
++ alloc_size = ctx->frame_size + 8;
++ frame = kmalloc(alloc_size, GFP_KERNEL);
++ if (len > ctx->frame_size)
++ len = ctx->frame_size;
++ gsm0710_create_frame(ctx, channel, type, data, len, frame,
++ NULL, &size);
++ if (ctx->write)
++ (*(ctx->write)) (ctx, channel, frame, size);
++
++ kfree(frame);
++ }
++}
++
++void
++gsm0710_write_frame_buffer(struct gsm0710_context *ctx, char* frame,
++ int channel, int type, const char *data, int len)
++{
++ int size;
++
++ if (frame == NULL)
++ return;
++
++ if (len > ctx->frame_size)
++ len = ctx->frame_size;
++
++ if (ctx->mode) {
++ int temp, crc;
++
++ frame[0] = (char)0x7E;
++ frame[1] = (char)((channel << 2) | 0x03);
++ frame[2] = (char)type;
++ crc = gsm0710_compute_crc(frame + 1, 2);
++ if (type == 0x7E || type == 0x7D) {
++ /* Need to quote the type field now that crc
++ * has been computed */
++ frame[2] = (char)0x7D;
++ frame[3] = (char)(type ^ 0x20);
++ size = 4;
++ } else {
++ size = 3;
++ }
++ while (len > 0) {
++ temp = *data++ & 0xFF;
++ --len;
++ if (temp != 0x7E && temp != 0x7D) {
++ frame[size++] = (char)temp;
++ } else {
++ frame[size++] = (char)0x7D;
++ frame[size++] = (char)(temp ^ 0x20);
++ }
++ }
++ if (crc != 0x7E && crc != 0x7D) {
++ frame[size++] = (char)crc;
++ } else {
++ frame[size++] = (char)0x7D;
++ frame[size++] = (char)(crc ^ 0x20);
++ }
++ frame[size++] = (char)0x7E;
++ } else {
++ int header_size;
++
++ frame[0] = (char)0xF9;
++ frame[1] = (char)((channel << 2) | 0x03);
++ frame[2] = (char)type;
++ if (len <= 127) {
++ frame[3] = (char)((len << 1) | 0x01);
++ header_size = size = 4;
++ } else {
++ frame[3] = (char)(len << 1);
++ frame[4] = (char)(len >> 7);
++ header_size = size = 5;
++ }
++ if (len > 0) {
++ memcpy(frame + size, data, len);
++ size += len;
++ }
++ /* Note: GSM 07.10 says that the CRC is only computed over
++ * the header */
++ frame[size++] = (char)gsm0710_compute_crc(frame + 1,
++ header_size - 1);
++ frame[size++] = (char)0xF9;
++ pr_debug("frame %x, %x (%d)",
++ (unsigned char)frame[size-2],
++ (unsigned char)frame[size-1], size);
++ }
++}
++
++/* Write a block of data to the the underlying device. It will be split
++ into several frames according to the frame size, if necessary */
++void gsm0710_write_data(struct gsm0710_context *ctx, int channel,
++ const void *data, int len)
++{
++ int temp;
++
++ while (len > 0) {
++ temp = len;
++ if (temp > ctx->frame_size)
++ temp = ctx->frame_size;
++ gsm0710_write_frame(ctx, channel, GSM0710_DATA, data, temp);
++ data = (const void *)(((const char *)data) + temp);
++ len -= temp;
++ }
++}
++
++/* Set the modem status lines on a channel */
++void gsm0710_set_status(struct gsm0710_context *ctx, int channel, int status)
++{
++ char data[4];
++
++ data[0] = (char)GSM0710_STATUS_SET;
++ data[1] = (char)0x03;
++ data[2] = (char)((channel << 2) | 0x03);
++ data[3] = (char)status;
++ gsm0710_write_frame(ctx, 0, GSM0710_DATA, data, 4);
++}
++
++/* Test command */
++void gsm0710_send_test(struct gsm0710_context *ctx, const void *testdata,
++ int len)
++{
++ char *data;
++
++ if (len > ctx->frame_size) {
++ gsm0710_debug(ctx, "** GSM 07.10 truncating test command **");
++ len = ctx->frame_size - 4;
++ }
++
++ data = kmalloc(len+2, GFP_KERNEL);
++ if (data == NULL)
++ return;
++
++ data[0] = (char)GSM0710_CMD_TEST | GSM0710_CR | GSM0710_EA;
++ data[1] = (char)GSM0710_EA | (len << 1);
++ memcpy(&data[2], testdata, len);
++ gsm0710_write_frame(ctx, 0, GSM0710_DATA, data, len + 2);
++
++ kfree(data);
++}
++
+--- /dev/null
++++ b/drivers/staging/ifx-mux/gsm0710.h
+@@ -0,0 +1,168 @@
++/*
++ * gsm0710_p.h - low level 3GPP 07.10 protocol implementation
++ *
++ * (C) 2000-2008 TROLLTECH ASA.
++ * (C) 2009 Michael 'Mickey' Lauer <mlauer@vanille-media.de>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ *
++ * Modified by Intel, 2009, 2010
++ * Jim Stanley <jim.stanley@intel.com>
++ */
++
++#ifndef GSM0710_P_H
++#define GSM0710_P_H
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++#define GSM0710_SHORT_BUFFER_SIZE 127
++
++#define GSM0710_BUFFER_SIZE 4096
++/* size of SPI frame data */
++#define GSM0710_DEFAULT_FRAME_SIZE 1509
++#define GSM0710_MAX_CHANNELS 63
++
++#define GSM0710_MODE_BASIC 0
++#define GSM0710_MODE_ADVANCED 1
++
++/* Atoms */
++#define GSM0710_CMD_TEST 0x20
++#define GSM0710_PF 0x10
++#define GSM0710_CR 0x02
++#define GSM0710_EA 0x01
++
++/* Frame types and subtypes */
++#define GSM0710_OPEN_CHANNEL 0x3F
++#define GSM0710_CLOSE_CHANNEL 0x53
++#define GSM0710_DATA 0xEF
++#define GSM0710_DATA_ALT 0x03
++#define GSM0710_STATUS_SET 0xE3
++#define GSM0710_STATUS_ACK 0xE1
++#define GSM0710_TERMINATE_BYTE1 0xC3
++#define GSM0710_TERMINATE_BYTE2 0x01
++
++
++/* Additional Frame types and subtypes */
++#define GSM0710_SABM 0x2F
++#define GSM0710_UNNUMBERED_ACK 0x63
++#define GSM0710_DISCONNECT_MODE 0x0F
++#define GSM0710_DISCONNECT 0x43
++
++/*
++ * Status flags for virtual channels. These map like that:
++ *
++ * Direction host application -> module:
++ * RTC: mapped to DTR
++ * RTR: mapped to RTS
++ * Bits 5, 6, 7, 8 are not valid.
++ *
++ * Direction module -> host application:
++ * RTC: mapped to DSR
++ * RTR: mapped to CTS
++ * RING: mapped to RING
++ * DCD: mapped to DCD
++ * Bits 5, 6 are not valid.
++ */
++#define GSM0710_FC 0x02
++#define GSM0710_RTC 0x04
++#define GSM0710_RTR 0x08
++#define GSM0710_RING 0x40
++#define GSM0710_DCD 0x80
++
++
++/* Forward */
++struct gsm0710_context;
++
++/* Callbacks */
++typedef int (*gsm0710_context_at_command_callback)(
++ struct gsm0710_context *ctx, const char *cmd);
++typedef int (*gsm0710_context_read_callback)(
++ struct gsm0710_context *ctx, void *data, int len);
++typedef int (*gsm0710_context_write_callback)(
++ struct gsm0710_context *ctx, int channel,
++ const void *data, int len);
++typedef void (*gsm0710_context_deliver_data_callback)(
++ struct gsm0710_context *ctx, int channel,
++ const void *data, int len);
++typedef void (*gsm0710_context_deliver_status_callback)(
++ struct gsm0710_context *ctx, int channel, int status);
++typedef void (*gsm0710_context_debug_message_callback)(
++ struct gsm0710_context *ctx, const char *msg);
++typedef void (*gsm0710_context_open_channel_callback)(
++ struct gsm0710_context *ctx, int channel);
++typedef void (*gsm0710_context_close_channel_callback)(
++ struct gsm0710_context *ctx, int channel);
++typedef void (*gsm0710_context_terminate_callback)(
++ struct gsm0710_context *ctx);
++typedef int (*gsm0710_context_packet_filter_callback)(
++ struct gsm0710_context *ctx, int channel, int type,
++ const char *data, int len);
++typedef void (*gsm0710_context_response_to_test_callback)(
++ struct gsm0710_context *ctx, const char *data,
++ int len);
++typedef void (*gsm0710_context_deliver_unnumbered_ack)(
++ struct gsm0710_context *ctx, int channel);
++
++struct gsm0710_context {
++ /* GSM 07.10 implementation details */
++ int mode;
++ int frame_size;
++ int port_speed;
++ char buffer[GSM0710_BUFFER_SIZE];
++ int buffer_used;
++ unsigned long used_channels[(GSM0710_MAX_CHANNELS + 31) / 32];
++ atomic_t init;
++
++ /* Hooks to upper layers */
++ void *user_data;
++
++ gsm0710_context_at_command_callback at_command;
++ gsm0710_context_read_callback read;
++ gsm0710_context_write_callback write;
++ gsm0710_context_deliver_data_callback deliver_data;
++ gsm0710_context_deliver_status_callback deliver_status;
++ gsm0710_context_debug_message_callback debug_message;
++ gsm0710_context_open_channel_callback open_channel;
++ gsm0710_context_close_channel_callback close_channel;
++ gsm0710_context_terminate_callback terminate;
++ gsm0710_context_packet_filter_callback packet_filter;
++ gsm0710_context_response_to_test_callback response_to_test;
++ gsm0710_context_deliver_unnumbered_ack deliver_unnumbered_ack;
++};
++
++void gsm0710_initialize(struct gsm0710_context *ctx);
++int gsm0710_startup(struct gsm0710_context *ctx, int send_cmux);
++void gsm0710_shutdown(struct gsm0710_context *ctx);
++int gsm0710_open_channel(struct gsm0710_context *ctx, int channel);
++void gsm0710_close_channel(struct gsm0710_context *ctx, int channel);
++int gsm0710_is_channel_open(struct gsm0710_context *ctx, int channel);
++void gsm0710_ready_read(struct gsm0710_context *ctx);
++void gsm0710_write_frame(struct gsm0710_context *ctx, int channel, int type,
++ const char *data, int len);
++void gsm0710_write_frame_buffer(struct gsm0710_context *ctx, char *buf,
++ int channel, int type, const char *data, int len);
++void gsm0710_write_data(struct gsm0710_context *ctx, int channel,
++ const void *data, int len);
++void gsm0710_set_status(struct gsm0710_context *ctx, int channel, int status);
++int gsm0710_compute_crc(const char *data, int len);
++void gsm0710_send_test(struct gsm0710_context *ctx, const void *testdata,
++ int len);
++
++#ifdef __cplusplus
++};
++#endif
++
++#endif
+--- /dev/null
++++ b/drivers/staging/ifx-mux/ifx_spi_mux.c
+@@ -0,0 +1,2385 @@
++/****************************************************************************
++ *
++ * Driver for the IFX spi modem.
++ *
++ * Copyright (C) 2008 Option International
++ * Copyright (C) 2008 Filip Aben <f.aben@option.com>
++ * Denis Joseph Barrow <d.barow@option.com>
++ * Jan Dumon <j.dumon@option.com>
++ *
++ * Copyright (C) 2009,2010 Intel Corp
++ * Jim Stanley <jim.stanley@intel.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
++ * USA
++ *
++ * Driver modified by Intel from Option gtm501l_spi.c
++ *
++ * %START upstream-patch-readiness
++ * unifdef ISTAT DEBUG_KREF IFX_DO_TRACE
++ * UI -k -UISTAT -UDEBUG_KREF -UIFX_DO_TRACE -UIFX_THROTTLE_CODE
++ * remove all lines containing
++ * RL #define ISTAT
++ * RL #define DEBUG_KREF
++ * RL #define IFX_DO_TRACE
++ * RL #define trace(x)
++ * RL kref_is
++ * RL tty_kref_is
++ * RL IS_INC
++ * RL IS_MAX
++ * RL IS_CQADD
++ * RL IS_CLQADD
++ * RL IS_CLINC
++ * RL IS_TMCLR
++ * RL IS_TMMAX
++ * RL IS_TMMIN
++ * RL IS_TMS
++ * RL IS_TME
++ * RL IS_TMES
++ * RL IS_PRINT_TMSTATS
++ * remove all comments containing
++ * RC RRG:
++ * %END
++ *
++ *****************************************************************************/
++#include <linux/module.h>
++#include <linux/termios.h>
++#include <linux/tty.h>
++#include <linux/device.h>
++#include <linux/spi/spi.h>
++#include <linux/tty.h>
++#include <linux/kfifo.h>
++#include <linux/tty_flip.h>
++#include <linux/workqueue.h>
++#include <linux/timer.h>
++#include <linux/poll.h>
++#include <linux/serial.h>
++
++#include <linux/interrupt.h>
++#include <linux/irq.h>
++#include <linux/rfkill.h>
++#include <linux/netdevice.h>
++#include <linux/skbuff.h>
++#include <net/arp.h>
++#include <linux/fs.h>
++#include <linux/ip.h>
++#include <linux/dmapool.h>
++#include <linux/sysfs.h>
++#include <linux/gpio.h>
++#include <linux/sched.h>
++#include <linux/time.h>
++#include <linux/wait.h>
++
++#ifdef CONFIG_DEBUG_FS
++#include <linux/debugfs.h>
++#include <linux/ktime.h>
++#include <linux/spinlock.h>
++#endif
++
++#include "ifx_spi_mux.h"
++#include "gsm0710.h"
++#include "ifx_spi_mux_ioctl.h"
++
++#define IFX_NUM_DEFAULT_CHANNELS 12
++
++#define IFX_SPI_MORE_MASK 0x10
++#define IFX_SPI_MORE_BIT 4
++#define IFX_SPI_MODE SPI_MODE_1
++
++#define IFX_SPI_CONTROL_CHANNEL 0
++#define IFX_SPI_DEFAULT_CHANNEL 1
++
++#define IFX_SPI_MUX_MAX_CHANNEL_SIZE 1509
++
++/* #define IFX_SPI_MUX_DEBUG */
++
++/*
++ * RRG: DEBUG_KREF is temporary debug code - to be
++ * removed before final product
++ */
++/* #define DEBUG_KREF */
++/*
++ * RRG: IFX_DO_TRACE, ISTAT : TEMP code
++ * remove before final release
++ */
++/* #define IFX_DO_TRACE */
++/* #define ISTAT */
++
++#ifdef IFX_DO_TRACE
++#define TRACE_LENGTH 256
++static unsigned char *trace_index;
++static unsigned char *trace_array;
++
++#define trace(x) \
++ do { \
++ trace_array[*trace_index] = x; \
++ (*trace_index)++; \
++ } while (0)
++#else
++#define trace(x)
++#endif
++
++#ifdef ISTAT
++#define ISTAT_NAME n_ifx6160_istat
++#define CQSIZE 256 /* must be a power of 2 */
++#define CLQSIZE 32 /* must be a power of 2 */
++struct _cqueue {
++ int idx;
++ unsigned long q[CQSIZE];
++};
++struct _locus {
++ unsigned long val;
++ char *file;
++ int line;
++};
++struct _clqueue {
++ int idx;
++ struct _locus q[CLQSIZE];
++};
++static struct _n_ifx6160_istat {
++ /* counters */
++ signed long cnt_placeholder;
++ /* max_counters */
++ unsigned long max_placeholder;
++ /* data c-queues */
++ struct _cqueue cq_trace;
++ /* trace c-queues */
++ struct _clqueue clq_placeholder;
++} ISTAT_NAME = {0};
++#define IS_INC(x, i) (ISTAT_NAME.cnt_##x += i)
++#define IS_MAX(x, i) (ISTAT_NAME.max_##x = i)
++
++#ifdef IFX_DO_TRACE
++#define IS_CQADD(x, i) trace(i)
++#else /* IFX_DO_TRACE */
++#define IS_CQADD(x, i) \
++ do { ISTAT_NAME.cq_##x.q[ISTAT_NAME.cq_##x.idx] = i; \
++ ISTAT_NAME.cq_##x.idx = ((ISTAT_NAME.cq_##x.idx + 1) & \
++ (CQSIZE-1)); \
++ } while (0)
++#endif /* IFX_DO_TRACE */
++
++#define IS_CLQADD(x, i) \
++ do { ISTAT_NAME.clq_##x.q[ISTAT_NAME.clq_##x.idx].val = i; \
++ ISTAT_NAME.clq_##x.q[ISTAT_NAME.clq_##x.idx].file = __FILE__; \
++ ISTAT_NAME.clq_##x.q[ISTAT_NAME.clq_##x.idx].line = __LINE__; \
++ ISTAT_NAME.clq_##x.idx = ((ISTAT_NAME.clq_##x.idx + 1) & \
++ (CLQSIZE-1)); \
++ } while (0)
++#define IS_CLQINC(x, i) \
++ do { ISTAT_NAME.clq_##x.q[ISTAT_NAME.clq_##x.idx].val += i; \
++ ISTAT_NAME.clq_##x.q[ISTAT_NAME.clq_##x.idx].file = __FILE__; \
++ ISTAT_NAME.clq_##x.q[ISTAT_NAME.clq_##x.idx].line = __LINE__; \
++ ISTAT_NAME.clq_##x.idx = ((ISTAT_NAME.clq_##x.idx + 1) & \
++ (CLQSIZE-1)); \
++ } while (0)
++#else /* ISTAT */
++#define IS_INC(x, i)
++#define IS_MAX(x, i)
++#define IS_CQADD(x, i)
++#define IS_CLQADD(x, i)
++#define IS_CLINC(x, i)
++#endif /* ISTAT */
++
++#ifdef DEBUG_KREF
++#define kref_is(x) \
++ do { int cur; \
++ cur = atomic_read(&(x)->refcount); \
++ printk(KERN_DEBUG "kref=%d %s:%d", \
++ cur, __FILE__, __LINE__); \
++ } while (0)
++#define kref_get(x) \
++ do { int cur; \
++ kref_get(x); \
++ cur = atomic_read(&(x)->refcount); \
++ printk(KERN_DEBUG "++kref=%d %s:%d", \
++ cur, __FILE__, __LINE__); \
++ } while (0)
++#define kref_put(x, y) \
++ do { int cur; \
++ cur = atomic_read(&(x)->refcount); \
++ kref_put(x, y); \
++ printk(KERN_DEBUG "--kref=%d %s:%d", \
++ cur-1, __FILE__, __LINE__); \
++ } while (0)
++#define tty_kref_is(x) \
++ do { int cur; \
++ cur = atomic_read(&(x)->kref.refcount); \
++ printk(KERN_DEBUG "tty_kref=%d %s:%d", \
++ cur, __FILE__, __LINE__); \
++ } while (0)
++static inline struct tty_struct *_tty_kref_get(struct tty_struct *x)
++{
++ int cur;
++ struct tty_struct *tty = tty_kref_get(x);
++ cur = atomic_read(&(x)->kref.refcount);
++ printk(KERN_DEBUG "++tty_kref=%d %s:%d",
++ cur, __FILE__, __LINE__);
++
++ return tty;
++}
++#define tty_kref_get _tty_kref_get
++#define tty_kref_put(x) \
++ do { int cur; \
++ tty_kref_put(x); \
++ cur = atomic_read(&(x)->kref.refcount); \
++ printk(KERN_DEBUG "--tty_kref=%d %s:%d", \
++ cur, __FILE__, __LINE__); \
++ } while (0)
++#else /* DEBUG_KREF */
++#define kref_is(x)
++#define tty_kref_is(x)
++#endif /* DEBUG_KREF */
++
++#define mux_tty_serialdev(tty) \
++ (((struct ifx_spi_mux_port_data *)tty->driver_data)->type.serial.tty_dev)
++#define mux_dbg(tty, fmt, args...) \
++ do { \
++ if (tty->driver_data) \
++ dev_dbg(mux_tty_serialdev(tty), fmt, ## args); \
++ else \
++ pr_debug(fmt, ## args); \
++ } while (0)
++
++/* various static variables */
++static struct tty_driver *tty_drv;
++static struct ktermios *ifx_spi_termios[IFX_SPI_MAX_MINORS];
++static struct ktermios *ifx_spi_termios_locked[IFX_SPI_MAX_MINORS];
++static struct lock_class_key ifx_spi_key;
++
++static struct gsm0710_context mux_ctx;
++static struct ifx_spi_mux_device *saved_ifx_dev;
++
++static unsigned int total_tty_write;
++
++#define net_to_ifx_spi_data(net) \
++ (*((struct ifx_spi_mux_port_data **)netdev_priv(net)))
++
++/* declarations for status workqueue */
++struct work_arg {
++ struct ifx_spi_mux_device *ifx_dev;
++ int channel;
++ int status;
++};
++struct work_arg work;
++
++/* forward declaration */
++static int ifx_spi_mux_push_skb(struct ifx_spi_mux_port_data *port_data);
++static void ifx_spi_mux_ld_free_port(struct ifx_spi_mux_port_data *port_data);
++static int ifx_spi_mux_ld_create_port(struct ifx_spi_mux_device *ifx_dev,
++ int type, int channel, char *name);
++static void _close1_channel(struct ifx_spi_mux_device *ifx_dev, int channel);
++
++/*
++ * RRG: EIJ_ISSET(): TEMP code
++ * remove before final release
++ */
++#ifdef IFX_SPI_MUX_DEBUG
++#define EIJ_ISSET(v, x) ((v) & (1<<(x)))
++static unsigned int eijbits1;
++enum eijval1 {
++ EIJ_CH2_OPEN,
++ EIJ_CH0_OPEN,
++};
++
++module_param(eijbits1, uint, S_IRUGO);
++MODULE_PARM_DESC(eijbits1, "supply error injection bits");
++#else /* IFX_SPI_MUX_DEBUG */
++#define EIJ_ISSET(v, x) (0)
++#endif /* IFX_SPI_MUX_DEBUG */
++
++static inline void swap_buf(u16 *buf, int len)
++{
++ int n;
++ len = (len + 1) / 2;
++ n = (len + 7) / 8;
++ switch (len % 8) {
++ case 0:
++ do {
++ *buf = cpu_to_be16(*buf);
++ buf++;
++ case 7:
++ *buf = cpu_to_be16(*buf);
++ buf++;
++ case 6:
++ *buf = cpu_to_be16(*buf);
++ buf++;
++ case 5:
++ *buf = cpu_to_be16(*buf);
++ buf++;
++ case 4:
++ *buf = cpu_to_be16(*buf);
++ buf++;
++ case 3:
++ *buf = cpu_to_be16(*buf);
++ buf++;
++ case 2:
++ *buf = cpu_to_be16(*buf);
++ buf++;
++ case 1:
++ *buf = cpu_to_be16(*buf);
++ buf++;
++ } while (--n > 0);
++ }
++}
++
++static int ifx_spi_mux_get_free_port(struct ifx_spi_mux_device *ifx_dev)
++{
++ int i;
++
++ for (i = 0; i < IFX_SPI_MAX_PORTS; i++) {
++ if (!ifx_dev->port_data[i])
++ return i;
++ }
++ return -1;
++}
++
++static int ifx_spi_mux_find_port(struct ifx_spi_mux_device *ifx_dev,
++ int type, int channel)
++{
++ int i;
++
++ for (i = 0; i < IFX_SPI_MAX_PORTS; i++) {
++ if (!ifx_dev->port_data[i])
++ continue;
++ if (ifx_dev->port_data[i]->port_id == channel &&
++ ifx_dev->port_data[i]->spec.type == type)
++ return i;
++ }
++ return -1;
++}
++
++static int ifx_spi_mux_open_channel(struct ifx_spi_mux_device *ifx_dev,
++ int type, int channel)
++{
++ struct ifx_spi_mux_channel *channel_data;
++ int ret;
++ int newstate;
++
++ pr_debug("%s called (channel:%d type:%d)", __func__,
++ channel, type);
++ IS_CQADD(trace, 0x20);
++
++ channel_data = ifx_dev->channel[channel];
++ if (!channel_data) {
++ pr_err("channel %d: no channel data", channel);
++ return -1;
++ }
++
++ if (channel != 0 && ifx_dev->mux_on != IFX_MUX_ON) {
++ pr_err("channel %d: mux is not active", channel);
++ return -1;
++ }
++
++ channel_data->mux_channel_state = IFX_SPI_MUX_CHANNEL_CONNECT;
++
++ /* tell mux to open new channel */
++ pr_debug("tell modem to open channel");
++ ret = gsm0710_open_channel(&mux_ctx, channel);
++ if (ret <= 0) {
++ pr_err("channel %d: open channel error %d",
++ channel, ret);
++ channel_data->mux_channel_state = IFX_SPI_MUX_CHANNEL_NONE;
++ return -1;
++ }
++
++ IS_CQADD(trace, 0x21);
++ /*
++ * wait for modem to respond with yes/no status
++ *
++ * current modem doesn't send status for channel 0
++ * to complete channel setup, wait for ACCEPT, then
++ * set as ACTIVE if wait hasn't timed out
++ */
++ pr_debug("wait for channel event");
++ newstate = (channel == 0 ? IFX_SPI_MUX_CHANNEL_ACCEPT :
++ IFX_SPI_MUX_CHANNEL_ACTIVE);
++ ret = wait_event_timeout(channel_data->mux_state_wait,
++ channel_data->mux_channel_state == newstate,
++ IFX_SPI_STATUS_TIMEOUT);
++ if (EIJ_ISSET(eijbits1, EIJ_CH2_OPEN) && channel == 2)
++ ret = 0;
++ if (EIJ_ISSET(eijbits1, EIJ_CH0_OPEN) && channel == 0)
++ ret = 0;
++ if (ret) {
++ pr_debug("channel %d open", channel);
++ if (channel == 0) {
++ channel_data->mux_channel_state =
++ IFX_SPI_MUX_CHANNEL_ACTIVE;
++ ifx_dev->mux_on = IFX_MUX_ON;
++ }
++ ret = 0;
++ } else {
++ pr_warning("%s: channel %d open FAILED (state %x)",
++ DRVNAME, channel, channel_data->mux_channel_state);
++ (void)gsm0710_close_channel(&mux_ctx, channel);
++ channel_data->mux_channel_state = IFX_SPI_MUX_CHANNEL_NONE;
++ ret = -1;
++ }
++
++ IS_CQADD(trace, 0x22);
++ return ret;
++}
++
++/* char/tty operations */
++
++#ifdef IFX_THROTTLE_CODE
++static void ifx_spi_mux_throttle(struct tty_struct *tty)
++{
++ struct ifx_spi_mux_port_data *port_data =
++ (struct ifx_spi_mux_port_data *)tty->driver_data;
++
++ mux_dbg(tty, "%s called\n", __func__);
++
++ if (!test_bit(IFX_SPI_RX_FC, &port_data->signal_state)) {
++ set_bit(IFX_SPI_RX_FC, &port_data->signal_state);
++ set_bit(IFX_SPI_UPDATE, &port_data->signal_state);
++ }
++}
++
++#define UNTHROTTLE_STACK_BUF_SIZE (512)
++static void ifx_spi_mux_unthrottle(struct tty_struct *tty)
++{
++ struct ifx_spi_mux_port_data *port_data =
++ (struct ifx_spi_mux_port_data *)tty->driver_data;
++ struct ifx_spi_mux_serial *ifx_ser = &port_data->type.serial;
++ int write_length_remaining;
++ int curr_write_len;
++ char stack_buff[UNTHROTTLE_STACK_BUF_SIZE];
++
++ mux_dbg(tty, "%s called\n", __func__);
++
++ write_length_remaining = kfifo_len(&ifx_ser->throttle_fifo);
++ while (write_length_remaining) {
++ if (test_bit(TTY_THROTTLED, &tty->flags))
++ return;
++ curr_write_len = min(write_length_remaining,
++ UNTHROTTLE_STACK_BUF_SIZE);
++ curr_write_len = kfifo_out_locked(&ifx_ser->throttle_fifo,
++ stack_buff, curr_write_len,
++ &ifx_ser->throttle_fifo_lock);
++ curr_write_len = tty_insert_flip_string(tty, stack_buff,
++ curr_write_len);
++ write_length_remaining -= curr_write_len;
++ tty_flip_buffer_push(tty);
++ }
++
++ clear_bit(IFX_SPI_RX_FC, &port_data->signal_state);
++ set_bit(IFX_SPI_UPDATE, &port_data->signal_state);
++}
++#endif /* IFX_THROTTLE_CODE */
++
++static int ifx_spi_mux_tiocmget(struct tty_struct *tty, struct file *filp)
++{
++ unsigned int value = 0;
++ struct ifx_spi_mux_port_data *port_data =
++ (struct ifx_spi_mux_port_data *)tty->driver_data;
++
++ mux_dbg(tty, "%s called\n", __func__);
++
++ value =
++ (test_bit(IFX_SPI_RTS, &port_data->signal_state) ? TIOCM_RTS : 0) |
++ (test_bit(IFX_SPI_DTR, &port_data->signal_state) ? TIOCM_DTR : 0) |
++ (test_bit(IFX_SPI_CTS, &port_data->signal_state) ? TIOCM_CTS : 0) |
++ (test_bit(IFX_SPI_DSR, &port_data->signal_state) ? TIOCM_DSR : 0) |
++ (test_bit(IFX_SPI_DCD, &port_data->signal_state) ? TIOCM_CAR : 0) |
++ (test_bit(IFX_SPI_RI, &port_data->signal_state) ? TIOCM_RNG : 0);
++ return value;
++}
++
++static int ifx_spi_mux_tiocmset(struct tty_struct *tty, struct file *filp,
++ unsigned int set, unsigned int clear)
++{
++ struct ifx_spi_mux_port_data *port_data =
++ (struct ifx_spi_mux_port_data *)tty->driver_data;
++
++ mux_dbg(tty, "%s called (set:%x clear:%x)",
++ __func__, set, clear);
++
++ if (set & TIOCM_RTS)
++ set_bit(IFX_SPI_RTS, &port_data->signal_state);
++ if (set & TIOCM_DTR)
++ set_bit(IFX_SPI_DTR, &port_data->signal_state);
++
++ if (clear & TIOCM_RTS)
++ clear_bit(IFX_SPI_RTS, &port_data->signal_state);
++ if (clear & TIOCM_DTR)
++ clear_bit(IFX_SPI_DTR, &port_data->signal_state);
++
++ set_bit(IFX_SPI_UPDATE, &port_data->signal_state);
++ return 0;
++}
++
++static void _ifx_spi_mux_set_termios(struct ktermios *termios,
++ struct ktermios *old)
++{
++ pr_debug("%s called (new:%p old:%p)", __func__, termios, old);
++
++ /*
++ * The default requirements for this device are:
++ */
++ termios->c_iflag = 0;
++
++ termios->c_cc[VMIN] = 1;
++ termios->c_cc[VTIME] = 0;
++
++ /* disable postprocess output characters */
++ termios->c_oflag &= ~OPOST;
++
++ termios->c_lflag &= ~(ECHO /* disable echo input characters */
++ | ECHONL /* disable echo new line */
++ | ICANON /* disable erase, kill, werase, and
++ * rprnt special characters */
++ | ISIG /* disable interrupt, quit, and suspend
++ * special characters */
++ | IEXTEN); /* disable non-POSIX special
++ * characters */
++
++ termios->c_cflag &= ~(CSIZE /* no size */
++ | PARENB /* disable parity bit */
++ | CBAUD /* clear current baud rate */
++ | CBAUDEX); /* clear current buad rate */
++ termios->c_cflag |= CS8; /* character size 8 bits */
++ termios->c_cflag |= B115200; /* baud rate 115200 */
++
++}
++
++static void ifx_spi_mux_set_termios(struct tty_struct *tty,
++ struct ktermios *old)
++{
++ /* the actual setup */
++ _ifx_spi_mux_set_termios(tty->termios, old);
++
++ tty_encode_baud_rate(tty, 115200, 115200);
++ /*
++ * Force low_latency off so can be called from interrupt
++ */
++ tty->low_latency = 0;
++}
++
++static int _ifx_spi_mux_open(int channel)
++{
++ struct ifx_spi_mux_port_data *port_data;
++ int ret;
++
++ if (!saved_ifx_dev->channel[channel]) {
++ pr_err("%s: channel %d not present", __func__, channel);
++ return -ENODEV;
++ }
++ port_data = saved_ifx_dev->channel[channel]->serial_port_data;
++ if (!port_data) {
++ pr_err("%s: channel %d port data missing", __func__, channel);
++ return -ENODEV;
++ }
++
++ /* clear any old data; can't do this in 'close' */
++ kfifo_reset(&port_data->tx_fifo);
++
++ ret = ifx_spi_mux_open_channel(port_data->spi_itf,
++ IFX_SPI_PORT_SPEC_SERIAL, channel);
++ if (ret)
++ return -ENODEV;
++
++ return 0;
++}
++
++/*
++ * There is a balance between open/close routines that is maintined
++ * The tty code ensures that close is called for every open even if
++ * the open fails
++ * ifx_ser->open indicates how many opens are on the channel device
++ * (though they may not all have been successful opens)
++ * (ifx_ser->open > 0) indicates the driver has a reference on the tty
++ * for the channel and must be removed by close when the count hits 0
++ * Since close is called for failed opens we need to set ifx_ser->open
++ * even in some failure cases so close can cleanup properly
++ */
++static int ifx_spi_mux_open(struct tty_struct *tty, struct file *filp)
++{
++ struct ifx_spi_mux_serial *ifx_ser;
++ struct ifx_spi_mux_port_data *port_data;
++ struct tty_port *pport;
++ int channel = tty->index;
++ int ret = 0;
++
++ pr_debug("%s called (channel:%d tty:%p)\n", __func__,
++ channel, tty);
++
++ tty_kref_is(tty);
++ port_data = saved_ifx_dev->channel[channel]->serial_port_data;
++ if (!port_data)
++ return -ENODEV;
++ ifx_ser = &port_data->type.serial;
++ pport = &ifx_ser->tty_port;
++
++ ifx_ser->filp = filp; /* for activate method */
++ ret = tty_port_open(pport, tty, filp);
++ tty_kref_is(tty);
++ kref_get(&port_data->spi_itf->ref);
++
++ return ret;
++}
++
++static void ifx_spi_mux_free_device(struct kref *ref)
++{
++ int i;
++ struct ifx_spi_mux_device *ifx_dev =
++ container_of(ref, struct ifx_spi_mux_device, ref);
++ struct ifx_spi_mux_port_data *port_data;
++
++ pr_debug("%s called\n", __func__);
++
++ /* free channel 0 last -- closes the mux */
++ pr_debug("free all ports");
++ for (i = IFX_SPI_MAX_PORTS-1; i >= 0; i--) {
++ port_data = ifx_dev->port_data[i];
++ if (!port_data)
++ continue;
++ ifx_spi_mux_ld_free_port(port_data);
++ ifx_dev->port_data[i] = NULL;
++ }
++
++ pr_debug("free all channel entries");
++ for (i = IFX_SPI_PORT_PER_DEV-1; i >= 0; i--) {
++ if (!ifx_dev->channel[i])
++ continue;
++ pr_debug("free channel %d", i);
++ kfree(ifx_dev->channel[i]);
++ ifx_dev->channel[i] = NULL;
++ }
++
++ kfree(ifx_dev->tx_buffer[0]);
++ kfree(ifx_dev->tx_buffer[1]);
++ kfree(ifx_dev);
++ saved_ifx_dev = NULL;
++}
++
++/*
++ * see comments on ifx_spi_mux_open
++ */
++static void ifx_spi_mux_close(struct tty_struct *tty, struct file *filp)
++{
++ struct ifx_spi_mux_serial *ifx_ser;
++ struct ifx_spi_mux_port_data *port_data;
++ struct tty_port *pport;
++ int channel = tty->index;
++
++ mux_dbg(tty, "%s called (channel:%d tty:%p)\n", __func__,
++ channel, tty);
++
++ port_data = (struct ifx_spi_mux_port_data *)tty->driver_data;
++ if (!port_data)
++ return;
++ ifx_ser = &port_data->type.serial;
++ pport = &ifx_ser->tty_port;
++
++ ifx_ser->filp = filp; /* for shutdown method */
++ tty_port_close(pport, tty, filp);
++ tty_kref_is(tty);
++ kref_put(&saved_ifx_dev->ref, ifx_spi_mux_free_device);
++}
++
++#if 0
++static void ifx_spi_mux_wakeup_serial(struct ifx_spi_mux_port_data *port_data)
++{
++ struct tty_struct *tty;
++
++ tty = tty_port_tty_get(&port_data->type.serial.tty_port);
++ if (!tty)
++ return;
++ tty_wakeup(tty);
++ tty_kref_put(tty);
++}
++#endif /* 0 */
++
++static int ifx_spi_mux_prepare_tx_buffer(struct ifx_spi_mux_device *ifx_dev)
++{
++ int i;
++ int j;
++ int temp_count;
++ int queue_length;
++ int tx_count = 0;
++ unsigned char *tx_buffer;
++ struct ifx_spi_mux_port_data *port_data;
++
++ pr_debug("prepare_tx_buffer called\n");
++ tx_buffer = ifx_dev->tx_buffer[0];
++ memset(tx_buffer, 0, IFX_SPI_TRANSFER_SIZE);
++
++ /* clear to signal no more data if this turns out to be the
++ * last buffer sent in a sequence */
++ ifx_dev->spi_more = 0;
++
++ j = ifx_dev->round_robin_index;
++ for (i = 0; i < IFX_SPI_MAX_PORTS; i++, j++) {
++ /* look through ports for one that exists and is enabled */
++ if (j == IFX_SPI_MAX_PORTS)
++ j = 0;
++ /* no flow control now -- port is either there or not */
++ if (!ifx_dev->port_data[j])
++ continue;
++ port_data = ifx_dev->port_data[j];
++
++ /* have a proper record, see if there's tx data */
++ queue_length = kfifo_len(&port_data->tx_fifo);
++ if (queue_length == 0)
++ continue;
++#if 0
++ if (queue_length <= GSM0710_SHORT_BUFFER_SIZE)
++ overhead = IFX_SPI_SHORT_MUX_OVERHEAD;
++ else
++ overhead = IFX_SPI_LONG_MUX_OVERHEAD;
++#endif
++ /* OK, there's data to mux -- see if there's room for it */
++ temp_count = min(queue_length, IFX_SPI_TRANSFER_SIZE-tx_count);
++ /* if mux in use, be sure there's room for initial header */
++ if (temp_count > 0 /*overhead-IFX_SPI_TRAILING_MUX_OVERHEAD*/) {
++ /* room in buffer for enough data to be a message */
++ temp_count =
++ kfifo_out_locked(&port_data->tx_fifo, tx_buffer,
++ temp_count, &port_data->fifo_lock);
++
++ /* update buffer pointer and data count in message */
++ tx_buffer += temp_count;
++ tx_count += temp_count;
++#if 0
++ do {
++ int i;
++ unsigned char *buf = ifx_dev->tx_buffer[0];
++ pr_debug("buffer:\n");
++ for (i = 0; i < tx_count; i++)
++ pr_debug("%x %c", buf[i], buf[i]);
++ pr_debug("end buffer\n");
++ } while (0);
++#endif
++ /* start with next port next time if this is the
++ * last port for this message */
++ ifx_dev->round_robin_index = j + 1;
++ if (ifx_dev->round_robin_index >= IFX_SPI_MAX_PORTS)
++ ifx_dev->round_robin_index = 0;
++
++ if (temp_count == queue_length) {
++ /* all data used, go to next port */
++ /* poke port to get more data */
++#if 0
++ ifx_spi_mux_wakeup_serial(port_data);
++#endif /* 0 */
++ continue;
++ } else {
++ /* more data in port, use next SPI message */
++ ifx_dev->spi_more = 1;
++ break;
++ }
++ } else {
++ /* no room for this data in SPI buffer, more to send */
++ ifx_dev->spi_more = 1;
++ /* start with this port for next message */
++ ifx_dev->round_robin_index = j;
++ break;
++ }
++ }
++
++#if 0
++ /* have data and info for header -- set up SPI header in buffer */
++ /* spi header needs payload size, not entire buffer size */
++ ifx_spi_mux_setup_spi_header(ifx_dev->tx_buffer[0],
++ tx_count-IFX_SPI_HEADER_OVERHEAD,
++ ifx_dev->spi_more);
++ /* swap actual data in the buffer */
++ swap_buf((u16 *)(ifx_dev->tx_buffer[0]), 2*((tx_count+1)/2));
++
++ do {
++ int i;
++ int len = 2*((tx_count+1)/2);
++ unsigned char *buf = ifx_dev->tx_buffer[0];
++
++ pr_debug("prepare_tx_buffer buffer: (%d)\n", tx_count);
++ for (i = 0; i < len; i++)
++ pr_debug("%02x", (unsigned char)buf[i]);
++ pr_debug("end buffer\n");
++ } while (0);
++#endif
++ return tx_count;
++}
++
++static int ifx_spi_mux_write(struct tty_struct *tty, const unsigned char *buf,
++ int count)
++{
++ struct ifx_spi_mux_port_data *port_data =
++ (struct ifx_spi_mux_port_data *)tty->driver_data;
++ struct ifx_spi_mux_device *spi_itf;
++ int channel;
++
++ if (!port_data) {
++ mux_dbg(tty, "%s: no port data", __func__);
++ return -ENODEV;
++ }
++
++ spi_itf = port_data->spi_itf;
++ channel = port_data->port_id;
++ mux_dbg(tty, "write channel %d, count %d\n", channel, count);
++
++ /* mux the data first, then put into output */
++ /* calls write_callback to send output downstream */
++ gsm0710_write_frame(&mux_ctx, channel, GSM0710_DATA, buf, count);
++ return count;
++}
++
++static int ifx_spi_mux_write_room(struct tty_struct *tty)
++{
++ struct ifx_spi_mux_port_data *port_data =
++ (struct ifx_spi_mux_port_data *)tty->driver_data;
++
++ if (!port_data) {
++ mux_dbg(tty, "%s: no port data", __func__);
++ return -ENODEV;
++ }
++ return IFX_SPI_FIFO_SIZE - kfifo_len(&port_data->tx_fifo);
++}
++
++static int ifx_spi_mux_chars_in_buffer(struct tty_struct *tty)
++{
++ struct ifx_spi_mux_port_data *port_data =
++ (struct ifx_spi_mux_port_data *)tty->driver_data;
++
++ if (!port_data) {
++ mux_dbg(tty, "%s: no port data", __func__);
++ return -ENODEV;
++ }
++ return kfifo_len(&port_data->tx_fifo);
++}
++
++/*
++ * may be called for a partially configured port
++ */
++static void ifx_spi_mux_ld_free_port(struct ifx_spi_mux_port_data *port_data)
++{
++ struct ifx_spi_mux_device *ifx_dev;
++ struct ifx_spi_mux_serial *ifx_ser;
++ struct tty_struct *tty;
++ int channel;
++ int type;
++ int i;
++
++ if (!port_data) {
++ pr_debug("%s: port data NULL", __func__);
++ return;
++ }
++ ifx_dev = port_data->spi_itf;
++ ifx_ser = &port_data->type.serial;
++ channel = port_data->port_id;
++ type = port_data->spec.type;
++
++ pr_debug("%s called (channel:%d)\n", __func__, channel);
++ if (!ifx_dev) {
++ pr_debug("ifx dev NULL");
++ return;
++ }
++ i = ifx_spi_mux_find_port(ifx_dev, type, channel);
++ if (i < 0) {
++ pr_debug("can't find port to remove");
++ return;
++ }
++
++ /* do device type specific cleanup */
++ switch (port_data->spec.type) {
++ case IFX_SPI_PORT_SPEC_SERIAL:
++ /* serial device cleanup */
++ if (channel == 0)
++ break;
++
++ tty = tty_port_tty_get(&ifx_ser->tty_port);
++ if (tty) {
++ tty_hangup(tty);
++ tty_kref_put(tty);
++ }
++ tty_unregister_device(tty_drv, ifx_ser->minor);
++
++#ifdef IFX_THROTTLE_CODE
++ kfifo_free(&ifx_ser->throttle_fifo);
++#endif /* IFX_THROTTLE_CODE */
++ break;
++
++ case IFX_SPI_PORT_SPEC_NET:
++ /* net device cleanup */
++ unregister_netdev(port_data->type.net.net);
++ free_netdev(port_data->type.net.net);
++ break;
++ }
++
++ /* do common device deinitialization */
++ kfree(ifx_dev->channel[channel]);
++ ifx_dev->channel[channel] = NULL;
++ kfifo_free(&port_data->tx_fifo);
++ kfree(port_data);
++ ifx_dev->port_data[i] = NULL;
++}
++
++static int ifx_spi_mux_tty_ioctl(struct tty_struct *tty, struct file * file,
++ unsigned int cmd, unsigned long arg)
++{
++ int channel = tty->index;
++ int retval = 0;
++ int err;
++ int i;
++ struct ifx_spi_mux_device *ifx_dev;
++ struct ifx_spi_mux_port_data *port_data;
++ struct channel_arg charg = {0};
++ struct ifx_spi_mux_channel *channel_data;
++ struct passthrough_arg pass;
++
++ mux_dbg(tty, "%s called (cmd:%x arg:%lx)\n", __func__, cmd, arg);
++
++ /* handle tty ioctls separately */
++ if (_IOC_TYPE(cmd) == _IOC_TYPE(TTY_DRIVER_MAGIC))
++ return n_tty_ioctl_helper(tty, file, cmd, arg);
++
++ if (_IOC_TYPE(cmd) != IFX_SPI_MAGIC) {
++ mux_dbg(tty, "ld bad magic %x", _IOC_TYPE(cmd));
++ return -ENOTTY;
++ }
++
++ port_data = (struct ifx_spi_mux_port_data *)tty->driver_data;
++ if (port_data == NULL) {
++ pr_err("no port data\n");
++ return -EINVAL;
++ }
++ ifx_dev = port_data->spi_itf;
++ if (!ifx_dev)
++ return -EINVAL;
++
++ switch (cmd) {
++ case IFX_SPI_CREATECHANNEL:
++ /* argument is pointer to channel arg structure */
++ if (copy_from_user(&charg,
++ (struct channel_arg __user *)arg,
++ sizeof(struct channel_arg)))
++ return -EINVAL;
++ mux_dbg(tty, "create channel %d: %s\n",
++ charg.channel, charg.chname);
++ if (charg.channel <= 0 ||
++ charg.channel >= IFX_SPI_PORT_PER_DEV) {
++ retval = -EINVAL;
++ } else {
++ if (ifx_dev->mux_on != IFX_MUX_ON)
++ return -EPERM;
++ IS_CQADD(trace, 0x10);
++ retval = ifx_spi_mux_ld_create_port(ifx_dev,
++ charg.type, charg.channel,
++ charg.chname);
++ if (!retval) {
++ /* send SPI message to modem */
++ IS_CQADD(trace, 0x11);
++ err = ifx_spi_mux_open_channel(ifx_dev,
++ charg.type, charg.channel);
++ if (err)
++ retval = -ENODEV;
++ IS_CQADD(trace, 0x16);
++ }
++ }
++ break;
++
++ /* don't allow closing channel 0 or channel 1 */
++ case IFX_SPI_DELETECHANNEL:
++ get_user(channel, &arg);
++ if (channel < 2 || channel >= IFX_SPI_PORT_PER_DEV) {
++ retval = -EINVAL;
++ break;
++ }
++ channel_data = ifx_dev->channel[channel];
++ i = ifx_spi_mux_find_port(ifx_dev, IFX_SPI_PORT_SPEC_SERIAL,
++ channel);
++ if (i >= 0) {
++ ifx_spi_mux_ld_free_port(ifx_dev->port_data[i]);
++ ifx_dev->port_data[i] = NULL;
++ }
++ i = ifx_spi_mux_find_port(ifx_dev, IFX_SPI_PORT_SPEC_NET,
++ channel);
++ if (i >= 0) {
++ ifx_spi_mux_ld_free_port(ifx_dev->port_data[i]);
++ ifx_dev->port_data[i] = NULL;
++ }
++ kfree(channel_data);
++ ifx_dev->channel[channel] = NULL;
++
++ gsm0710_close_channel(&mux_ctx, channel);
++ break;
++
++ /* channel 0 and 1 tty ports set up at driver init */
++ case IFX_SPI_STARTMUX:
++ if (ifx_dev->mux_on == IFX_MUX_ON)
++ return -EPERM;
++ if (arg == 0 || arg > IFX_SPI_MUX_MAX_CHANNEL_SIZE)
++ return -EINVAL;
++ mux_dbg(tty, "mux is on %ld", arg);
++ mux_ctx.frame_size = arg;
++ ifx_dev->mux_on = IFX_MUX_ON;
++ IS_CQADD(trace, 0x12);
++ /* open channel 0 directly */
++ gsm0710_open_channel(&mux_ctx, IFX_SPI_CONTROL_CHANNEL);
++ IS_CQADD(trace, 0x13);
++ err = ifx_spi_mux_open_channel(ifx_dev,
++ IFX_SPI_PORT_SPEC_SERIAL,
++ IFX_SPI_DEFAULT_CHANNEL);
++ if (err)
++ retval = -ENODEV;
++ IS_CQADD(trace, 0x14);
++ break;
++
++ case IFX_SPI_STOPMUX:
++ if (ifx_dev->mux_on == IFX_MUX_OFF)
++ return -EPERM;
++ /* send SPI message to modem */
++ gsm0710_shutdown(&mux_ctx);
++ ifx_dev->mux_on = IFX_MUX_OFF;
++ /*
++ * remove any existing interfaces
++ * but leave 0 and 1 for non-mux control
++ */
++ for (i = 2; i < IFX_SPI_PORT_PER_DEV; i++) {
++ channel_data = ifx_dev->channel[i];
++ port_data = channel_data->network_port_data;
++ if (port_data) {
++ ifx_spi_mux_ld_free_port(port_data);
++ ifx_dev->port_data[i] = NULL;
++ }
++ port_data = channel_data->serial_port_data;
++ if (port_data) {
++ ifx_spi_mux_ld_free_port(port_data);
++ ifx_dev->port_data[i] = NULL;
++ }
++
++ kfree(ifx_dev->channel[i]);
++ ifx_dev->channel[i] = NULL;
++ }
++
++ break;
++
++ /* non-0 value will result in network passthrough -- no network
++ * interface created
++ */
++ case IFX_SPI_NETPASSTHROUGH:
++ if (copy_from_user(&pass,
++ (struct passthrough_arg __user *)arg,
++ sizeof(struct passthrough_arg)))
++ return -EINVAL;
++ mux_dbg(tty, "set channel passthrough %d, %d\n",
++ pass.channel, pass.passthrough);
++ if (pass.channel < 0 || pass.channel >= IFX_SPI_PORT_PER_DEV)
++ return -EINVAL;
++ if (!ifx_dev->channel[pass.channel])
++ return -EINVAL;
++ ifx_dev->channel[pass.channel]->passthrough =
++ pass.passthrough;
++ break;
++
++ default:
++ mux_dbg(tty, "ld main bad command number\n");
++ retval = -ENOIOCTLCMD;
++ break;
++ }
++
++ return retval;
++}
++
++static void ifx_spi_mux_tty_hangup(struct tty_struct *tty)
++{
++ struct ifx_spi_mux_serial *ifx_ser;
++ struct ifx_spi_mux_port_data *port_data;
++ int channel = tty->index;
++
++ mux_dbg(tty, "%s called (channel:%d)\n", __func__, channel);
++
++ port_data = (struct ifx_spi_mux_port_data *)tty->driver_data;
++ ifx_ser = &port_data->type.serial;
++ tty_port_hangup(&ifx_ser->tty_port);
++ tty_kref_is(tty);
++}
++
++/*
++ * tty port activate method - called for first port open
++ */
++static int
++ifx_mux_port_activate(struct tty_port *port, struct tty_struct *tty)
++{
++ struct ifx_spi_mux_port_data *port_data =
++ container_of(port, struct ifx_spi_mux_port_data,
++ type.serial.tty_port);
++ struct ifx_spi_mux_serial *ifx_ser = &port_data->type.serial;
++ int channel = tty->index;
++ int ret;
++
++ mux_dbg(tty, "%s called", __func__);
++
++ ret = _ifx_spi_mux_open(channel);
++ tty->driver_data = port_data;
++ tty->low_latency = 0; /* allows flip string push from int context */
++
++ /* signal_update_needed flag will be set by tiocmset */
++ clear_bit(IFX_SPI_RX_FC, &port_data->signal_state);
++ ifx_spi_mux_tiocmset(tty, ifx_ser->filp, TIOCM_DTR | TIOCM_RTS, 0);
++
++ return ret;
++}
++
++/*
++ * tty port shutdown method - called for last port close
++ */
++static void
++ifx_mux_port_shutdown(struct tty_port *port)
++{
++ struct ifx_spi_mux_port_data *port_data =
++ container_of(port, struct ifx_spi_mux_port_data,
++ type.serial.tty_port);
++ struct ifx_spi_mux_serial *ifx_ser = &port_data->type.serial;
++ struct tty_struct *tty;
++
++ dev_dbg(ifx_ser->tty_dev, "%s called", __func__);
++
++ tty = tty_port_tty_get(&ifx_ser->tty_port);
++ if (!tty) {
++ dev_dbg(ifx_ser->tty_dev, "no tty");
++ return;
++ }
++ _close1_channel(port_data->spi_itf, tty->index);
++ set_bit(IFX_SPI_RX_FC, &port_data->signal_state);
++ ifx_spi_mux_tiocmset(tty, ifx_ser->filp, 0, TIOCM_DTR | TIOCM_RTS);
++ tty->driver_data = NULL;
++ tty_kref_put(tty);
++}
++
++static const struct tty_port_operations ifx_tty_port_ops = {
++ .activate = ifx_mux_port_activate,
++ .shutdown = ifx_mux_port_shutdown,
++};
++
++/* tty operations exported by ldisc ttys */
++static const struct tty_operations ifx_spi_serial_ops = {
++ .open = ifx_spi_mux_open,
++ .close = ifx_spi_mux_close,
++ .write = ifx_spi_mux_write,
++ .ioctl = ifx_spi_mux_tty_ioctl,
++ .hangup = ifx_spi_mux_tty_hangup,
++ .write_room = ifx_spi_mux_write_room,
++ .set_termios = ifx_spi_mux_set_termios,
++ .chars_in_buffer = ifx_spi_mux_chars_in_buffer,
++ .tiocmget = ifx_spi_mux_tiocmget,
++ .tiocmset = ifx_spi_mux_tiocmset,
++#ifdef IFX_THROTTLE_CODE
++ .throttle = ifx_spi_mux_throttle,
++ .unthrottle = ifx_spi_mux_unthrottle
++#endif /* IFX_THROTTLE_CODE */
++};
++
++static void
++ifx_spi_mux_tty_insert_flip_string(struct ifx_spi_mux_serial *ifx_ser,
++ unsigned char *chars, size_t size)
++{
++ int chars_inserted;
++ struct tty_struct *tty;
++ int i;
++
++ if (!ifx_ser)
++ return;
++
++ tty = tty_port_tty_get(&ifx_ser->tty_port);
++ if (!tty)
++ return;
++
++ chars_inserted = tty_insert_flip_string(tty, chars, size);
++ pr_debug("insert flip string %d\n", chars_inserted);
++ for (i = 0; i < chars_inserted; i++)
++ pr_debug(" %c", chars[i]);
++ tty_flip_buffer_push(tty);
++ tty_wakeup(tty);
++ tty_kref_put(tty);
++}
++
++static int ifx_spi_mux_net_open(struct net_device *net)
++{
++ struct ifx_spi_mux_port_data *port_data = net_to_ifx_spi_data(net);
++
++ pr_debug("%s called\n", __func__);
++ port_data->type.net.rx_state = WAIT_IP;
++ port_data->type.net.rx_buf_size = 0;
++ port_data->type.net.rx_buf_missing = sizeof(struct iphdr);
++
++ /* update remote side it's ok to send us data */
++ clear_bit(IFX_SPI_RX_FC, &port_data->signal_state);
++ set_bit(IFX_SPI_UPDATE, &port_data->signal_state);
++ netif_start_queue(net);
++ return 0;
++}
++
++static int ifx_spi_mux_net_close(struct net_device *net)
++{
++ struct ifx_spi_mux_port_data *port_data = net_to_ifx_spi_data(net);
++
++/* pr_debug("%s called\n", __func__); */
++ /* stop remote side from sending us data */
++ set_bit(IFX_SPI_RX_FC, &port_data->signal_state);
++ set_bit(IFX_SPI_UPDATE, &port_data->signal_state);
++ netif_stop_queue(net);
++ return 0;
++}
++
++static int ifx_spi_mux_push_skb(struct ifx_spi_mux_port_data *port_data)
++{
++ struct ifx_spi_mux_net *ifx_net = &port_data->type.net;
++ struct sk_buff *skb = ifx_net->tx_skb;
++ struct ifx_spi_mux_device *spi_itf;
++ int channel;
++ unsigned int len;
++
++ spi_itf = port_data->spi_itf;
++ channel = port_data->port_id;
++/* pr_debug("net write channel %d\n", channel); */
++
++ if (!skb || !(ifx_net->net->flags & IFF_UP))
++ return 0;
++
++ len = skb->len;
++ gsm0710_write_frame(&mux_ctx, channel, GSM0710_DATA,
++ skb->data, skb->len);
++ skb_pull(skb, len);
++ len -= skb->len;
++ if (skb->len == 0) {
++ dev_kfree_skb(skb);
++ ifx_net->tx_skb = NULL;
++ netif_start_queue(ifx_net->net);
++ }
++
++ return len;
++}
++
++static int ifx_spi_mux_net_start_xmit(struct sk_buff *skb,
++ struct net_device *net)
++{
++ int result = 0;
++ unsigned int len = 0;
++ struct ifx_spi_mux_port_data *port_data = net_to_ifx_spi_data(net);
++ struct ifx_spi_mux_net *ifx_net = &port_data->type.net;
++
++ pr_debug("%s called\n", __func__);
++ netif_stop_queue(net);
++
++ if (ifx_net->tx_skb) {
++ pr_err("%s tx_skb not null\n", __func__);
++ result = -EIO;
++ } else {
++ ifx_net->tx_skb = skb;
++ len = ifx_spi_mux_push_skb(port_data);
++ }
++
++ if (result) {
++ STATS(net).tx_errors++;
++ netif_start_queue(net);
++ } else {
++ STATS(net).tx_packets++;
++ STATS(net).tx_bytes += len;
++ /* And tell the kernel when the last transmit started. */
++ net->trans_start = jiffies;
++ }
++
++ return result;
++}
++
++#ifndef NETDEVICE_HAS_STATS
++static struct net_device_stats *
++ifx_spi_mux_net_get_stats(struct net_device *net)
++{
++ return &STATS(net);
++}
++#endif
++
++/* called when a packet did not ack after watchdogtimeout */
++static void ifx_spi_mux_net_tx_timeout(struct net_device *net)
++{
++ /* Tell syslog we are hosed. */
++ dev_warn(&net->dev, "Tx timed out.\n");
++
++ /* Update statistics */
++ STATS(net).tx_errors++;
++}
++
++static void ifx_spi_mux_net_init(struct net_device *net)
++{
++ static const struct net_device_ops ifx_spi_netdev_ops = {
++ .ndo_open = ifx_spi_mux_net_open,
++ .ndo_stop = ifx_spi_mux_net_close,
++ .ndo_start_xmit = ifx_spi_mux_net_start_xmit,
++ .ndo_tx_timeout = ifx_spi_mux_net_tx_timeout,
++ /* .ndo_do_ioctl = wlan_do_ioctl, */
++ /* .ndo_set_mac_address = wlan_set_mac_address, */
++#ifndef NETDEVICE_HAS_STATS
++ .ndo_get_stats = ifx_spi_mux_net_get_stats,
++#endif
++ /* .ndo_set_multicast_list = wlan_set_multicast_list, */
++ };
++ net->netdev_ops = &ifx_spi_netdev_ops;
++
++ /* fill in the other fields */
++ net->watchdog_timeo = IFX_SPI_NET_TX_TIMEOUT;
++ net->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
++ net->type = ARPHRD_NONE;
++ net->mtu = IFX_SPI_DEFAULT_MTU;
++ net->tx_queue_len = 10;
++}
++
++static void ifx_spi_mux_rx_netchar(struct ifx_spi_mux_net *ifx_net,
++ unsigned char *in_buf, int size)
++{
++ struct net_device *net = ifx_net->net;
++ unsigned short temp_bytes;
++ unsigned short buffer_offset = 0;
++ unsigned short frame_len;
++ unsigned char *tmp_rx_buf;
++
++ pr_debug("%s called\n", __func__);
++
++ while (size) {
++ switch (ifx_net->rx_state) {
++ case WAIT_IP:
++ pr_debug("wait ip");
++ /* waiting for IP header. */
++ /* wanted bytes - size of ip header */
++ temp_bytes = (size < ifx_net->rx_buf_missing) ?
++ size : ifx_net->rx_buf_missing;
++
++ memcpy(((unsigned char *)(&ifx_net->rx_ip_hdr)) +
++ ifx_net->rx_buf_size,
++ in_buf + buffer_offset,
++ temp_bytes);
++
++ ifx_net->rx_buf_size += temp_bytes;
++ buffer_offset += temp_bytes;
++ ifx_net->rx_buf_missing -= temp_bytes;
++ size -= temp_bytes;
++
++ if (!ifx_net->rx_buf_missing) {
++ /* header is complete allocate an sk_buffer and
++ * continue to WAIT_DATA */
++ pr_debug("header complete");
++ frame_len =
++ ntohs(ifx_net->rx_ip_hdr.tot_len);
++
++ if ((frame_len > IFX_SPI_DEFAULT_MRU) ||
++ (frame_len < sizeof(struct iphdr))) {
++ dev_err(&net->dev,
++ "Invalid frame (%d) length\n",
++ frame_len);
++ ifx_net->rx_state = WAIT_SYNC;
++ continue;
++ }
++ /* Allocate an sk_buff */
++ ifx_net->rx_skb = dev_alloc_skb(frame_len);
++ if (!ifx_net->rx_skb) {
++ /* We got no receive buffer. */
++ ifx_net->rx_state = WAIT_SYNC;
++ return;
++ }
++ /* Here's where it came from */
++ ifx_net->rx_skb->dev = net;
++
++ /* Copy what we got so far. make room for iphdr
++ * after tail. */
++ tmp_rx_buf = skb_put(ifx_net->rx_skb,
++ sizeof(struct iphdr));
++ memcpy(tmp_rx_buf,
++ (char *)&(ifx_net->rx_ip_hdr),
++ sizeof(struct iphdr));
++
++ /* ETH_HLEN */
++ ifx_net->rx_buf_size =
++ sizeof(struct iphdr);
++
++ ifx_net->rx_buf_missing =
++ frame_len - sizeof(struct iphdr);
++ ifx_net->rx_state = WAIT_DATA;
++ }
++ break;
++
++ case WAIT_DATA:
++ pr_debug("wait data");
++ temp_bytes = (size < ifx_net->rx_buf_missing)
++ ? size : ifx_net->rx_buf_missing;
++
++ /* Copy the rest of the bytes that are left in the
++ * buffer into the waiting sk_buf. */
++ /* Make room for temp_bytes after tail. */
++ tmp_rx_buf = skb_put(ifx_net->rx_skb, temp_bytes);
++ memcpy(tmp_rx_buf, in_buf + buffer_offset, temp_bytes);
++
++ ifx_net->rx_buf_missing -= temp_bytes;
++ size -= temp_bytes;
++ buffer_offset += temp_bytes;
++ ifx_net->rx_buf_size += temp_bytes;
++ if (!ifx_net->rx_buf_missing) {
++ /* Packet is complete. Inject into stack. */
++ /* We have IP packet here */
++ ifx_net->rx_skb->protocol =
++ __constant_htons(ETH_P_IP);
++ /* don't check it */
++ ifx_net->rx_skb->ip_summed =
++ CHECKSUM_UNNECESSARY;
++
++ skb_reset_mac_header(ifx_net->rx_skb);
++
++ pr_debug("inject packet");
++ /* Ship it off to the kernel */
++ netif_rx(ifx_net->rx_skb);
++ /* No longer our buffer. */
++ ifx_net->rx_skb = NULL;
++
++ /* update out statistics */
++ STATS(net).rx_packets++;
++ STATS(net).rx_bytes +=
++ ifx_net->rx_buf_size;
++
++ ifx_net->rx_buf_size = 0;
++ ifx_net->rx_buf_missing =
++ sizeof(struct iphdr);
++ ifx_net->rx_state = WAIT_IP;
++ }
++ break;
++
++ case WAIT_SYNC:
++ pr_debug("wait sync");
++ size = 0;
++ break;
++
++ default:
++ size--;
++ break;
++ }
++ }
++
++ if (ifx_net->rx_state == WAIT_SYNC) {
++ ifx_net->rx_state = WAIT_IP;
++ ifx_net->rx_buf_size = 0;
++ ifx_net->rx_buf_missing = sizeof(struct iphdr);
++ }
++}
++
++static int ifx_spi_mux_ld_create_port(struct ifx_spi_mux_device *ifx_dev,
++ int type, int channel, char *name)
++{
++ int retval = 0;
++ struct net_device *net = NULL;
++ struct ifx_spi_mux_serial *ifx_ser = NULL;
++ struct ifx_spi_mux_port_data *port_data = NULL;
++ struct ifx_spi_mux_channel *channel_data = NULL;
++ struct tty_port *pport;
++ int minor = channel;
++ int port_idx = -1;
++ char netname[6];
++
++ pr_debug("%s called (channel:%d)\n", __func__, channel);
++
++ port_data = kzalloc(sizeof(struct ifx_spi_mux_port_data), GFP_ATOMIC);
++ if (!port_data)
++ return -ENOMEM;
++
++ if (!ifx_dev->channel[channel]) {
++ channel_data = kzalloc(sizeof(struct ifx_spi_mux_channel),
++ GFP_ATOMIC);
++ if (!channel_data) {
++ retval = -ENOMEM;
++ goto error_ret;
++ }
++ channel_data->channel_id = channel;
++ channel_data->mux_channel_state = IFX_SPI_MUX_CHANNEL_NONE;
++ init_waitqueue_head(&(channel_data->mux_state_wait));
++ ifx_dev->channel[channel] = channel_data;
++ }
++
++ port_data->spec.enabled = 1;
++ port_data->spec.type = type;
++ strncpy(port_data->spec.name, name,
++ sizeof(port_data->spec.name)-1);
++
++ spin_lock_init(&port_data->fifo_lock);
++ lockdep_set_class_and_subclass(&port_data->fifo_lock,
++ &ifx_spi_key, 0);
++
++ /* common initialization */
++ port_data->spi_itf = ifx_dev;
++ port_data->port_id = channel;
++ if (kfifo_alloc(&port_data->tx_fifo, IFX_SPI_FIFO_SIZE, GFP_ATOMIC)) {
++ retval = -ENOMEM;
++ goto error_ret;
++ }
++
++ /* connect port data to channel here instead of later
++ * to avoid race condition */
++ port_idx = ifx_spi_mux_get_free_port(ifx_dev);
++ if (port_idx < 0) {
++ retval = -ENOMEM;
++ goto error_ret;
++ }
++ ifx_dev->port_data[port_idx] = port_data;
++
++ /* channel type specific initialization */
++ ifx_dev->channel[channel]->type = type;
++ switch (type) {
++ case IFX_SPI_PORT_SPEC_SERIAL:
++ /* serial channel */
++ ifx_dev->channel[channel]->serial_port_data = port_data;
++ if (channel != 0) {
++ pr_debug("connect to tty port");
++ pport = &port_data->type.serial.tty_port;
++ tty_port_init(pport);
++ pport->closing_wait = ASYNC_CLOSING_WAIT_NONE;
++ pport->ops = &ifx_tty_port_ops;
++ ifx_ser = &port_data->type.serial;
++ ifx_ser->minor = channel;
++ ifx_ser->tty_dev = tty_register_device(tty_drv, minor,
++ NULL);
++ if (!ifx_ser->tty_dev) {
++ pr_debug("%s: registering tty device failed\n",
++ __func__);
++ retval = -ENODEV;
++ goto error_ret;
++ }
++#ifdef IFX_THROTTLE_CODE
++ spin_lock_init(&ifx_ser->throttle_fifo_lock);
++ lockdep_set_class_and_subclass(
++ &ifx_ser->throttle_fifo_lock,
++ &ifx_spi_key, 0);
++ if (kfifo_alloc(&ifx_ser->throttle_fifo,
++ IFX_SPI_THROTTLE_FIFO_SIZE,
++ GFP_ATOMIC)) {
++
++ retval = -ENODEV;
++ goto error_ret;
++ }
++#endif /* IFX_THROTTLE_CODE */
++ }
++ break;
++
++ case IFX_SPI_PORT_SPEC_NET:
++ /* network channel */
++ pr_debug("create network port");
++ netname[5] = 0;
++ snprintf(netname, 6, "ifx%02d", channel);
++ net = alloc_netdev(sizeof(struct ifx_spi_mux_port_data *),
++ netname,
++ ifx_spi_mux_net_init);
++ if (!net) {
++ pr_err("alloc_netdev failed");
++ retval = -ENOMEM;
++ goto error_ret;
++ }
++
++ *((struct ifx_spi_mux_port_data **)netdev_priv(net)) =
++ port_data;
++ port_data->type.net.net = net;
++ pr_debug("register netdev");
++ retval = register_netdev(net);
++ if (retval) {
++ pr_err("network port register fail %d\n", retval);
++ goto error_ret;
++ }
++ ifx_dev->channel[channel]->network_port_data = port_data;
++ break;
++ }
++
++ pr_debug("create port complete");
++ return 0;
++
++error_ret:
++ if (port_data) {
++ if (ifx_ser) {
++ if (ifx_ser->tty_dev)
++ tty_unregister_device(tty_drv, ifx_ser->minor);
++#ifdef IFX_THROTTLE_CODE
++ kfifo_free(&ifx_ser->throttle_fifo);
++#endif /* IFX_THROTTLE_CODE */
++ } else if (net) {
++ unregister_netdev(net);
++ free_netdev(net);
++ }
++ kfifo_free(&port_data->tx_fifo);
++ kfree(channel_data);
++ ifx_dev->channel[channel] = NULL;
++ if (port_idx >= 0)
++ ifx_dev->port_data[port_idx] = NULL;
++ }
++
++ return retval;
++}
++
++static int ifx_spi_mux_activate_net_conn(struct ifx_spi_mux_device *ifx_dev,
++ int channel)
++{
++ struct ifx_spi_mux_channel *channel_data;
++ int ret = 0;
++
++ pr_debug("net port creation");
++
++ channel_data = ifx_dev->channel[channel];
++ if (!channel_data)
++ return -1;
++
++ /* see if have port data for the network connection already */
++ if (!(channel_data->network_port_data)) {
++ ret = ifx_spi_mux_ld_create_port(ifx_dev, IFX_SPI_PORT_SPEC_NET,
++ channel, "net");
++ if (ret < 0)
++ return ret;
++ }
++
++ channel_data->type = IFX_SPI_PORT_SPEC_NET;
++ /* FIXME -- signal tty that it is blocked */
++
++ return ret;
++}
++
++static int
++ifx_spi_mux_deactivate_net_conn(struct ifx_spi_mux_device *ifx_dev, int channel)
++{
++ struct ifx_spi_mux_channel *channel_data;
++ int ret = 0;
++
++ channel_data = ifx_dev->channel[channel];
++ if (!channel_data)
++ return -1;
++
++ channel_data->type = IFX_SPI_PORT_SPEC_SERIAL;
++ /* FIXME -- signal tty that it is unblocked */
++
++ return ret;
++}
++
++/* mux interface routines */
++
++static void gsm0710_debug_message_callback(struct gsm0710_context *ctx,
++ const char *msg)
++{
++ pr_debug("%s\n", msg);
++}
++
++/* get unnumbered ack when channel has been accepted by modem */
++static void gsm0710_deliver_unnumbered_ack(struct gsm0710_context *ctx,
++ int channel)
++{
++ struct ifx_spi_mux_device *ifx_dev;
++ struct ifx_spi_mux_channel *channel_data;
++
++ pr_debug("un ack callback %d", channel);
++ IS_CQADD(trace, 0x61);
++
++ ifx_dev = (struct ifx_spi_mux_device *)(ctx->user_data);
++ if (!ifx_dev)
++ return;
++
++ channel_data = ifx_dev->channel[channel];
++ if (!channel_data)
++ return;
++
++ if (channel_data->mux_channel_state == IFX_SPI_MUX_CHANNEL_CONNECT) {
++ pr_debug("channel accepted %d", channel);
++ channel_data->mux_channel_state = IFX_SPI_MUX_CHANNEL_ACCEPT;
++ wake_up(&channel_data->mux_state_wait);
++ IS_CQADD(trace, 0x62);
++ } else if (channel_data->mux_channel_state ==
++ IFX_SPI_MUX_CHANNEL_DISCONNECT) {
++
++ pr_debug("channel disconnected %d", channel);
++ channel_data->mux_channel_state = IFX_SPI_MUX_CHANNEL_CLOSED;
++ wake_up(&channel_data->mux_state_wait);
++ IS_CQADD(trace, 0x63);
++ } else {
++ pr_debug("ERROR: unhandled mux state %d",
++ channel_data->mux_channel_state);
++ }
++
++ IS_CQADD(trace, 0x64);
++}
++
++/* pass data back to related tty or network interface */
++static void gsm0710_deliver_data_callback(struct gsm0710_context *ctx,
++ int channel, const void *data, int len)
++{
++ struct ifx_spi_mux_device *ifx_dev;
++ struct ifx_spi_mux_port_data *port_data = NULL;
++ struct ifx_spi_mux_channel *channel_data;
++
++ IS_CQADD(trace, 0x40);
++ ifx_dev = (struct ifx_spi_mux_device *)(ctx->user_data);
++ if (!ifx_dev)
++ return;
++ channel_data = ifx_dev->channel[channel];
++ if (!channel_data)
++ return;
++
++ pr_debug("deliver demuxed data %d (%d)", channel, len);
++ IS_CQADD(trace, 0x41);
++ switch (channel_data->type) {
++ case IFX_SPI_PORT_SPEC_SERIAL:
++ IS_CQADD(trace, 0x43);
++ port_data = channel_data->serial_port_data;
++ if (!port_data)
++ return;
++ ifx_spi_mux_tty_insert_flip_string(&port_data->type.serial,
++ (unsigned char *)data, (size_t)len);
++ break;
++ case IFX_SPI_PORT_SPEC_NET:
++ IS_CQADD(trace, 0x42);
++ port_data = channel_data->network_port_data;
++ if (!port_data)
++ return;
++ ifx_spi_mux_rx_netchar(&port_data->type.net,
++ (unsigned char *)data, len);
++ break;
++ }
++ IS_CQADD(trace, 0x44);
++}
++
++static void ifx_spi_mux_write_wakeup_callback(struct work_struct *_work)
++{
++ struct ifx_spi_mux_device *ifx_dev =
++ container_of(_work, struct ifx_spi_mux_device,
++ write_wakeup_work);
++
++ pr_debug("prepare tx buffer in write_wakeup callback queue");
++ ifx_spi_mux_prepare_tx_buffer(ifx_dev);
++}
++
++static void ifx_spi_mux_perform_status_callback(struct work_struct *_work)
++{
++ struct ifx_spi_mux_device *ifx_dev;
++ struct ifx_spi_mux_channel *channel_data;
++ struct ifx_spi_mux_port_data *port_data;
++ int prev_status;
++ int prev_active_status;
++ int channel;
++ int status;
++ int passthrough;
++
++ IS_CQADD(trace, 0x90);
++ pr_debug("status callback workqueue");
++
++ /* note this uses global work variable */
++ ifx_dev = work.ifx_dev;
++ channel = work.channel;
++ status = work.status;
++ channel_data = ifx_dev->channel[channel];
++ if (!channel_data)
++ return;
++ pr_debug("status callback workqueue %d, %x", channel,
++ status);
++ pr_debug("channel_data->mux_channel_state = %x",
++ channel_data->mux_channel_state);
++ IS_CQADD(trace, 0x91);
++ port_data = channel_data->serial_port_data;
++
++ /* see if this channel is just being set up */
++ passthrough = channel_data->passthrough;
++ prev_status = channel_data->signal_state;
++ prev_active_status = channel_data->previous_active_signal_state;
++ pr_debug("pass %x, prev status %x, prev active status %x",
++ passthrough, prev_status, prev_active_status);
++ channel_data->signal_state = status;
++ if (channel_data->mux_channel_state == IFX_SPI_MUX_CHANNEL_ACCEPT) {
++ if ((status & GSM0710_RTC) && (status & GSM0710_RTR)) {
++ channel_data->mux_channel_state =
++ IFX_SPI_MUX_CHANNEL_ACTIVE;
++ IS_CQADD(trace, 0x92);
++ pr_debug("wakeup channel %d", channel);
++ wake_up(&channel_data->mux_state_wait);
++ IS_CQADD(trace, 0x93);
++ }
++ } else if (channel_data->mux_channel_state ==
++ IFX_SPI_MUX_CHANNEL_ACTIVE) {
++ IS_CQADD(trace, 0x94);
++ if (status & GSM0710_FC) {
++ pr_debug("channel blocked %d", channel);
++ if (port_data) {
++ set_bit(IFX_SPI_RX_FC,
++ &port_data->signal_state);
++ set_bit(IFX_SPI_UPDATE,
++ &port_data->signal_state);
++ }
++ channel_data->previous_active_signal_state =
++ prev_status;
++ /* indicate channel blocked */
++ } else if ((status & GSM0710_RTC) && (status & GSM0710_RTR)) {
++ pr_debug("channel open %d", channel);
++ if (port_data) {
++ clear_bit(IFX_SPI_RX_FC,
++ &port_data->signal_state);
++ set_bit(IFX_SPI_UPDATE,
++ &port_data->signal_state);
++ }
++ if (!(prev_active_status & GSM0710_DCD) &&
++ (status & GSM0710_DCD) && !passthrough) {
++
++ /* new network connection */
++ ifx_spi_mux_activate_net_conn(ifx_dev,
++ channel);
++ } else if ((prev_active_status & GSM0710_DCD) &&
++ !(status & GSM0710_DCD)) {
++
++ /* remove current network connection */
++ ifx_spi_mux_deactivate_net_conn(ifx_dev,
++ channel);
++ }
++ }
++ }
++ IS_CQADD(trace, 0x95);
++}
++
++static void gsm0710_deliver_status_callback(struct gsm0710_context *ctx,
++ int channel, int status)
++{
++ struct ifx_spi_mux_device *ifx_dev;
++ struct ifx_spi_mux_channel *channel_data;
++
++ pr_debug("channel status callback %d: %x", channel, status);
++ IS_CQADD(trace, 0x81);
++
++ ifx_dev = (struct ifx_spi_mux_device *)(ctx->user_data);
++ channel_data = ifx_dev->channel[channel];
++ if (!channel_data)
++ return;
++
++ /* set up global variable for work queue callback */
++ work.ifx_dev = ifx_dev;
++ work.channel = channel;
++ work.status = status;
++
++ IS_CQADD(trace, 0x82);
++ schedule_work(&ifx_dev->status_work);
++ IS_CQADD(trace, 0x83);
++}
++
++/* get data from lower half of driver, put into context buffer */
++static int gsm0710_read_callback(struct gsm0710_context *ctx, void *data,
++ int len)
++{
++ struct ifx_spi_mux_device *ifx_dev;
++ int length;
++
++ if (!ctx->user_data)
++ return -ENODEV;
++
++ ifx_dev = (struct ifx_spi_mux_device *)(ctx->user_data);
++ length = ifx_dev->rx_count;
++ pr_debug("read callback %d (%d)\n", len, length);
++ /* FIXME -- too much data; put the rest someplace, tell modem STFU */
++ if (length > len)
++ length = len;
++ memcpy((char *)data, ifx_dev->rx_buffer, length);
++#ifdef IFX_SPI_MUX_DEBUG
++ {
++ int i;
++ for (i = 0; i < length; i++)
++ pr_debug("%x %c", ((unsigned char *)data)[i],
++ ((unsigned char *)data)[i]);
++ }
++#endif
++ return length;
++}
++
++/* deliver mux packet to ifx write interface */
++int gsm0710_write_callback(struct gsm0710_context *ctx, int channel,
++ const void *data, int len)
++{
++ int tx_count = 0;
++ int room;
++ struct ifx_spi_mux_device *ifx_dev =
++ (struct ifx_spi_mux_device *)ctx->user_data;
++ struct ifx_spi_mux_channel *channel_data =
++ ifx_dev->channel[channel];
++ struct ifx_spi_mux_port_data *port_data = NULL;
++ struct tty_struct *tty = NULL;
++ unsigned long irq_flags;
++
++ pr_debug("%s called (channel:%d)\n", __func__, channel);
++ IS_CQADD(trace, 0x50);
++
++ switch (channel_data->type) {
++ case IFX_SPI_PORT_SPEC_SERIAL:
++ port_data = channel_data->serial_port_data;
++ break;
++ case IFX_SPI_PORT_SPEC_NET:
++ port_data = channel_data->network_port_data;
++ break;
++ }
++ if (!port_data) {
++ pr_err("%s: port data not found", __func__);
++ return -ENODEV;
++ }
++ tx_count = kfifo_in_locked(&port_data->tx_fifo, (unsigned char *)data,
++ len, &port_data->fifo_lock);
++ total_tty_write += tx_count;
++ pr_debug("Write: wrote %d bytes in fifo (total = %d)\n",
++ tx_count, total_tty_write);
++
++ IS_CQADD(trace, 0x51);
++ spin_lock_irqsave(&ifx_dev->write_lock, irq_flags);
++ /* puts outgoing data into tx_buffer[0] for now */
++ tx_count = ifx_spi_mux_prepare_tx_buffer(ifx_dev);
++
++ /* send data to tty */
++ tty = ifx_dev->tty;
++ if (tty == NULL) {
++ spin_unlock_irqrestore(&ifx_dev->write_lock, irq_flags);
++ pr_err("%s: no tty", __func__);
++ return -ENODEV;
++ }
++ IS_CQADD(trace, 0x52);
++ room = tty_write_room(tty);
++ WARN(room < tx_count, "%s: dropped %d bytes - only room for %d",
++ __func__, tx_count, room);
++ if (room >= tx_count)
++ tty->ops->write(tty, ifx_dev->tx_buffer[0], tx_count);
++
++ IS_CQADD(trace, 0x53);
++ spin_unlock_irqrestore(&ifx_dev->write_lock, irq_flags);
++
++ return 1;
++}
++
++static void ifx_spi_mux_setup_mux_context(struct gsm0710_context *ctx)
++{
++ pr_debug("%s called\n", __func__);
++
++ gsm0710_initialize(ctx);
++
++ ctx->mode = GSM0710_MODE_BASIC;
++
++ ctx->debug_message = gsm0710_debug_message_callback;
++ ctx->deliver_data = gsm0710_deliver_data_callback;
++ ctx->write = gsm0710_write_callback;
++ ctx->read = gsm0710_read_callback;
++ ctx->deliver_status = gsm0710_deliver_status_callback;
++ ctx->deliver_unnumbered_ack = gsm0710_deliver_unnumbered_ack;
++
++#if 0
++ ctx->at_command = gsm0710_at_command_callback;
++ ctx->open_channel = open_channel_callback;
++ ctx->close_channel = close_channel_callback;
++ ctx->terminate = terminate_callback;
++ ctx->response_to_test = response_to_test_callback;
++#endif
++}
++
++int ifx_spi_mux_ctx_init(struct ifx_spi_mux_device *ifx_dev)
++{
++ pr_debug("%s called\n", __func__);
++
++ ifx_spi_mux_setup_mux_context(&mux_ctx);
++ mux_ctx.user_data = ifx_dev;
++
++ return 0;
++}
++
++/* module exit point */
++static void __exit ifx_spi_mux_exit(void)
++{
++ int ret;
++
++ pr_debug("%s called\n", __func__);
++ ret = tty_unregister_ldisc(N_IFX_SPI);
++ if (ret)
++ BUG_ON(1);
++ ret = tty_unregister_driver(tty_drv);
++ if (ret)
++ BUG_ON(1);
++}
++
++static void ifx_ld_open_fn(struct work_struct *_work)
++{
++ struct ifx_spi_mux_device *ifx_dev =
++ container_of(_work, struct ifx_spi_mux_device, ld_open_work);
++ int ret;
++ int i;
++
++ /* open channel 0 */
++ pr_debug("open mux channel 0");
++ ret = ifx_spi_mux_ld_create_port(ifx_dev, IFX_SPI_PORT_SPEC_SERIAL, 0,
++ "CH00");
++ if (!ret)
++ ret = _ifx_spi_mux_open(0);
++ if (ret)
++ return;
++ IS_CQADD(trace, 0xb1);
++
++ /* create mux channel ttys */
++ for (i = 1; i < IFX_NUM_DEFAULT_CHANNELS; i++) {
++ char chname[16];
++ snprintf(chname, sizeof(chname), "CH%02d", i);
++ ifx_spi_mux_ld_create_port(ifx_dev, IFX_SPI_PORT_SPEC_SERIAL,
++ i, chname);
++ }
++ IS_CQADD(trace, 0xb2);
++}
++
++
++/* called when ld is attached to parent tty */
++static int n_ifx_tty_open(struct tty_struct *tty)
++{
++ int i;
++ int ret;
++ struct ifx_spi_mux_device *ifx_dev;
++
++ IS_CQADD(trace, 0xb0);
++ pr_debug("%s called (ldisc open) [tty:%p]",
++ __func__, tty);
++
++ ifx_dev = kzalloc(sizeof(struct ifx_spi_mux_device), GFP_ATOMIC);
++ if (!ifx_dev)
++ return -ENOMEM;
++
++ saved_ifx_dev = ifx_dev;
++ ifx_dev->tty = tty_kref_get(tty);
++
++ /* init ifx_dev */
++ kref_init(&ifx_dev->ref);
++ clear_bit(IFX_SPI_STATE_IO_IN_PROGRESS, &ifx_dev->flags);
++
++ spin_lock_init(&ifx_dev->write_lock);
++
++ /* required due to kmalloc used in network interface creation */
++ INIT_WORK(&(ifx_dev->status_work),
++ (work_func_t)ifx_spi_mux_perform_status_callback);
++
++ /* required to perform write wakeup callback */
++ INIT_WORK(&(ifx_dev->write_wakeup_work),
++ (work_func_t)ifx_spi_mux_write_wakeup_callback);
++
++ /* required to open CH0 and create tty channel device nodes */
++ INIT_WORK(&(ifx_dev->ld_open_work), (work_func_t)ifx_ld_open_fn);
++
++ /* indicate mux is of now */
++ ifx_dev->mux_on = IFX_MUX_OFF;
++
++ /* ensure SPI protocol 'more' flag is off */
++ ifx_dev->spi_more = 0;
++
++ /*initialize transfer and dma buffers */
++ for (i = 0; i < 2; i++) {
++ ifx_dev->tx_buffer[i] =
++ kzalloc(IFX_SPI_TRANSFER_SIZE, GFP_KERNEL | GFP_DMA);
++ if (0 == ifx_dev->tx_buffer[i]) {
++ pr_err("%s: DMA-TX[%d] buffer allocation failed\n",
++ DRVNAME, i);
++ ret = -EIO;
++ goto error_ret;
++ }
++ }
++
++ /* set up mux context */
++ ifx_spi_mux_ctx_init(ifx_dev);
++
++ set_bit(IFX_SPI_STATE_PRESENT, &ifx_dev->flags);
++ tty->disc_data = (void *)ifx_dev;
++ tty->receive_room = 65536;
++
++ schedule_work(&ifx_dev->ld_open_work);
++
++ return 0;
++
++error_ret:
++ if (ifx_dev) {
++ if (ifx_dev->tty)
++ tty_kref_put(ifx_dev->tty);
++ ifx_spi_mux_free_device(&ifx_dev->ref);
++ }
++ return ret;
++}
++
++static void
++_close1_channel(struct ifx_spi_mux_device *ifx_dev, int channel)
++{
++ int ret;
++ struct ifx_spi_mux_channel *channel_data;
++
++ if (gsm0710_is_channel_open(&mux_ctx, channel)) {
++ pr_debug("closing channel %d%s", channel,
++ channel == 0 ? " (mux)" : "");
++ channel_data = ifx_dev->channel[channel];
++ channel_data->mux_channel_state =
++ IFX_SPI_MUX_CHANNEL_DISCONNECT;
++ gsm0710_close_channel(&mux_ctx, channel);
++ ret = wait_event_timeout(channel_data->mux_state_wait,
++ (channel_data->mux_channel_state ==
++ IFX_SPI_MUX_CHANNEL_CLOSED),
++ IFX_SPI_STATUS_TIMEOUT);
++
++ if (!ret)
++ pr_debug("channel %d close timed out (state %x)",
++ channel, channel_data->mux_channel_state);
++ else
++ pr_debug("channel %d closed", channel);
++ if (channel == 0)
++ ifx_dev->mux_on = IFX_MUX_OFF;
++ }
++}
++
++static void
++_close_channels(struct ifx_spi_mux_device *ifx_dev)
++{
++ int channel;
++ for (channel = IFX_SPI_PORT_PER_DEV-1; channel >= 0; --channel)
++ _close1_channel(ifx_dev, channel);
++}
++
++static void n_ifx_tty_close(struct tty_struct *tty)
++{
++ struct ifx_spi_mux_device *ifx_dev;
++
++ pr_debug("%s called (ldisc close) [tty:%p]",
++ __func__, tty);
++ ifx_dev = (struct ifx_spi_mux_device *)tty->disc_data;
++
++ _close_channels(ifx_dev);
++
++ if (ifx_dev->tty)
++ tty_kref_put(ifx_dev->tty);
++ kref_put(&ifx_dev->ref, ifx_spi_mux_free_device);
++ tty->disc_data = NULL;
++}
++
++#if 0
++static void n_ifx_tty_flush_buffer(struct tty_struct *tty)
++{
++ pr_debug("%s called\n", __func__);
++ IS_CQADD(trace, 0xa0);
++}
++
++static ssize_t n_ifx_tty_chars_in_buffer(struct tty_struct *tty)
++{
++ return 0;
++}
++
++/* reading data directly from ld is not allowed -- use ttys */
++static ssize_t n_ifx_tty_read(struct tty_struct *tty, struct file *file,
++ unsigned char __user *buf, size_t nr)
++{
++ pr_debug("%s called\n", __func__);
++ IS_CQADD(trace, 0xe0);
++ return -EOPNOTSUPP;
++}
++
++/* writing data directly to mux is not allowed -- use ttys */
++static ssize_t n_ifx_tty_write(struct tty_struct *tty, struct file *file,
++ const unsigned char __user *buf, size_t nr)
++{
++ int space;
++
++ pr_debug("%s called\n", __func__);
++ IS_CQADD(trace, 0x80);
++ return -EOPNOTSUPP;
++}
++#endif /* 0 */
++
++static int n_ifx_tty_ioctl(struct tty_struct *tty, struct file *file,
++ unsigned int cmd, unsigned long arg)
++{
++ pr_debug("%s called (cmd:%x)\n", __func__, cmd);
++ return n_tty_ioctl_helper(tty, file, cmd, arg);
++}
++
++static unsigned int n_ifx_tty_poll(struct tty_struct *tty, struct file *file,
++ struct poll_table_struct *wait)
++{
++ unsigned int mask = 0;
++
++ pr_debug("%s called\n", __func__);
++
++ poll_wait(file, &tty->read_wait, wait);
++ poll_wait(file, &tty->write_wait, wait);
++ /* check for SPI timeout -- hangup? */
++ return mask;
++}
++
++static void
++n_ifx_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
++ char *fp, int count)
++{
++ struct ifx_spi_mux_device *ifx_dev;
++
++ pr_debug("%s called\n", __func__);
++ IS_CQADD(trace, 0x90);
++
++ ifx_dev = (struct ifx_spi_mux_device *)tty->disc_data;
++ ifx_dev->rx_buffer = (unsigned char *)cp;
++ ifx_dev->rx_count = count;
++
++#ifdef IFX_SPI_MUX_DEBUG
++ do {
++ int i;
++ for (i = 0; i < count; i++)
++ pr_debug("%x %c", cp[i], cp[i]);
++ } while (0);
++#endif
++ gsm0710_ready_read(&mux_ctx);
++}
++
++static void n_ifx_tty_write_wakeup(struct tty_struct *tty)
++{
++ struct ifx_spi_mux_device *ifx_dev;
++
++ pr_debug("%s called\n", __func__);
++
++ IS_CQADD(trace, 0xd0);
++ ifx_dev = (struct ifx_spi_mux_device *)tty->disc_data;
++ schedule_work(&ifx_dev->write_wakeup_work);
++ IS_CQADD(trace, 0xd1);
++}
++
++static int n_ifx_tty_hangup(struct tty_struct *tty)
++{
++ struct ifx_spi_mux_device *ifx_dev;
++ struct ifx_spi_mux_port_data *port_data;
++ struct tty_port *pport;
++ int i;
++
++ pr_debug("%s called (tty:%p)\n",
++ __func__, tty);
++ IS_CQADD(trace, 0xc0);
++
++ ifx_dev = (struct ifx_spi_mux_device *)tty->disc_data;
++ if (!ifx_dev) {
++ pr_debug("disc_data empty");
++ return 0;
++ }
++
++ _close_channels(ifx_dev);
++
++ for (i = 0; i < IFX_SPI_MAX_PORTS; i++) {
++ port_data = ifx_dev->port_data[i];
++ if (!port_data)
++ continue;
++ if (port_data->port_id == 0)
++ continue;
++ if (port_data->spec.type == IFX_SPI_PORT_SPEC_SERIAL) {
++ pport = &port_data->type.serial.tty_port;
++ pr_debug("hangup channel %d serial port (tty %p)",
++ port_data->port_id,
++ pport->tty);
++ tty_port_hangup(pport);
++ }
++ }
++
++ return 0;
++}
++
++struct tty_ldisc_ops tty_ldisc_operations = {
++ .owner = THIS_MODULE,
++ .magic = TTY_LDISC_MAGIC,
++ .name = "ifx_spi",
++ .open = n_ifx_tty_open,
++ .close = n_ifx_tty_close,
++#if 0
++ .flush_buffer = n_ifx_tty_flush_buffer,
++ .chars_in_buffer = n_ifx_tty_chars_in_buffer,
++ .read = n_ifx_tty_read,
++ .write = n_ifx_tty_write,
++#endif /* 0 */
++ .ioctl = n_ifx_tty_ioctl,
++ .poll = n_ifx_tty_poll,
++ .hangup = n_ifx_tty_hangup,
++ .receive_buf = n_ifx_tty_receive_buf,
++ .write_wakeup = n_ifx_tty_write_wakeup
++};
++
++/* module entry point */
++static int __init ifx_spi_mux_init(void)
++{
++ int result = 0;
++#ifdef IFX_DO_TRACE
++ dma_addr_t index_handle = 0;
++ dma_addr_t array_handle = 0;
++#endif
++ /*
++ initialize upper-edge spi driver. needs to be done after tty
++ initialization because the spi probe will
++ race
++ */
++
++ pr_info("%s called\n", __func__);
++
++#ifdef IFX_DO_TRACE
++ trace_index = (unsigned char *)dma_alloc_coherent(NULL,
++ 1, &index_handle,
++ GFP_DMA | GFP_ATOMIC | GFP_KERNEL);
++ trace_array = (unsigned char *)dma_alloc_coherent(NULL,
++ TRACE_LENGTH, &array_handle,
++ GFP_DMA | GFP_ATOMIC | GFP_KERNEL);
++
++ pr_debug("IFX-MUX trace index = %px, %px",
++ (void *)index_handle, (void *)trace_index);
++ pr_debug("IFX-MUX trace array = %px, %px",
++ (void *)array_handle, (void *)trace_array);
++
++ memset(trace_array, 0, TRACE_LENGTH);
++ *trace_index = 0;
++#endif
++
++ memset(ifx_spi_termios, 0, sizeof(ifx_spi_termios));
++ memset(ifx_spi_termios_locked, 0, sizeof(ifx_spi_termios_locked));
++
++ result = tty_register_ldisc(N_IFX_SPI, &tty_ldisc_operations);
++ if (result != 0) {
++ pr_err("%s: can't register line discipline (err = %d)",
++ DRVNAME, result);
++ return result;
++ }
++
++ /* initialize lower-edge tty driver */
++ tty_drv = alloc_tty_driver(IFX_SPI_MAX_MINORS);
++ if (!tty_drv) {
++ pr_err("%s: alloc_tty_driver failed", DRVNAME);
++ return -ENOMEM;
++ }
++
++ tty_drv->magic = TTY_DRIVER_MAGIC;
++ tty_drv->owner = THIS_MODULE;
++ tty_drv->driver_name = DRVNAME;
++ tty_drv->name = TTYNAME;
++ tty_drv->minor_start = 0;
++ tty_drv->num = IFX_SPI_MAX_MINORS;
++ tty_drv->type = TTY_DRIVER_TYPE_SERIAL;
++ tty_drv->subtype = SERIAL_TYPE_NORMAL;
++ tty_drv->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
++ tty_drv->init_termios = tty_std_termios;
++ tty_drv->termios = ifx_spi_termios;
++ tty_drv->termios_locked = ifx_spi_termios_locked;
++
++ _ifx_spi_mux_set_termios(&tty_drv->init_termios, NULL);
++ tty_set_operations(tty_drv, &ifx_spi_serial_ops);
++
++ result = tty_register_driver(tty_drv);
++ if (result) {
++ pr_err("%s: tty_register_driver failed(%d)", DRVNAME, result);
++ return result;
++ }
++
++ IS_CQADD(trace, 1);
++
++ pr_debug("driver initialized successfully\n");
++ return 0;
++}
++
++module_init(ifx_spi_mux_init);
++module_exit(ifx_spi_mux_exit);
++
++MODULE_AUTHOR("Intel");
++MODULE_DESCRIPTION("IFX6160 spi mux driver");
++MODULE_LICENSE("GPL");
++MODULE_INFO(Version, "0.1-IFX6160");
+--- /dev/null
++++ b/drivers/staging/ifx-mux/ifx_spi_mux.h
+@@ -0,0 +1,219 @@
++/****************************************************************************
++ *
++ * Driver for the IFX spi modem.
++ *
++ * Copyright (C) 2009 Intel Corp
++ * Jim Stanley <jim.stanley@intel.com>
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
++ * USA
++ *
++ * %START upstream-patch-readiness
++ * unifdef test/debug/temp code
++ * UI -UIFX_THROTTLE_CODE
++ * remove all lines containing
++ * remove all comments containing
++ * RC RRG:
++ * %END
++ *
++ *****************************************************************************/
++#ifndef _IFX_SPI_H
++#define _IFX_SPI_H
++
++#include "gsm0710.h" /* for default MTU */
++
++#define DRVNAME "ifx_spi_mux"
++#define TTYNAME "ttyGSM"
++
++/*
++ * RRG: IFX_THROTTLE_CODE the throttle code - probably not needed
++ * will be removed before final release
++ */
++/* #define IFX_THROTTLE_CODE */
++
++/*
++ * GSM0710_MAX_CHANNELS does not include the mux control channel
++ * but IFX_SPI_PORT_PER_DEV/IFX_SPI_MAX_MINORS do
++ * each channel (except for the mux control channel) can have 2 ports,
++ * a serial and a network port.
++ */
++#define IFX_SPI_PORT_PER_DEV (GSM0710_MAX_CHANNELS+1)
++#define IFX_SPI_MAX_MINORS IFX_SPI_PORT_PER_DEV
++#define IFX_SPI_MAX_PORTS (2*IFX_SPI_PORT_PER_DEV-2)
++#define IFX_SPI_TRANSFER_SIZE 2048
++#ifdef IFX_THROTTLE_CODE
++#define IFX_SPI_THROTTLE_FIFO_SIZE 4096
++#endif /* IFX_THROTTLE_CODE */
++#define IFX_SPI_FIFO_SIZE 4096
++
++#define IFX_SPI_LONG_MUX_OVERHEAD 7
++#define IFX_SPI_SHORT_MUX_OVERHEAD 6
++#define IFX_SPI_TRAILING_MUX_OVERHEAD 2
++#define IFX_SPI_HEADER_OVERHEAD 4
++
++/* device flags bitfield definitions */
++#define IFX_SPI_STATE_PRESENT 0
++#define IFX_SPI_STATE_IO_IN_PROGRESS 1
++#define IFX_SPI_STATE_IO_READY 2
++#define IFX_SPI_STATE_TIMER_PENDING 3
++
++/* flow control bitfields */
++#define IFX_SPI_DCD 0
++#define IFX_SPI_CTS 1
++#define IFX_SPI_DSR 2
++#define IFX_SPI_RI 3
++#define IFX_SPI_DTR 4
++#define IFX_SPI_RTS 5
++#define IFX_SPI_TX_FC 6
++#define IFX_SPI_RX_FC 7
++#define IFX_SPI_UPDATE 8
++
++#define IFX_SPI_PAYLOAD_SIZE (IFX_SPI_TRANSFER_SIZE - \
++ IFX_SPI_HEADER_OVERHEAD)
++#define IFX_SPI_DEFAULT_MTU GSM0710_DEFAULT_FRAME_SIZE
++#define IFX_SPI_DEFAULT_MRU IFX_SPI_DEFAULT_MTU
++#define IFX_SPI_NET_TX_TIMEOUT (HZ*10)
++
++#define IFX_MUX_ON 1
++#define IFX_MUX_OFF 0
++
++#define IFX_MUX_ENTRY_NONE 0
++#define IFX_MUX_ENTRY_START 1
++#define IFX_MUX_ENTRY_ATCMUX 2
++#define IFX_MUX_ENTRY_OK 3
++
++#define IFX_SPI_IRQ_TYPE DETECT_EDGE_RISING
++#define IFX_SPI_GPIO_TARGET 0
++#define IFX_SPI_GPIO0 0x105
++
++#define IFX_SPI_PORT_SPEC_NONE 0
++#define IFX_SPI_PORT_SPEC_SERIAL 1
++#define IFX_SPI_PORT_SPEC_NET 2
++
++#define IFX_SPI_MAX_CHANNEL_NAME 16
++
++#define IFX_SPI_NET_STATE_NONE 0
++#define IFX_SPI_NET_STATE_START 1
++#define IFX_SPI_NET_STATE_OK 2
++#define IFX_SPI_NET_STATE_CONNECT 3
++#define IFX_SPI_NET_STATE_COMPLETE 4
++#define IFX_SPI_NET_STATE_ERROR 5
++
++#define IFX_SPI_MUX_CHANNEL_NONE 0
++#define IFX_SPI_MUX_CHANNEL_CONNECT 1
++#define IFX_SPI_MUX_CHANNEL_ACCEPT 2
++#define IFX_SPI_MUX_CHANNEL_DENY 3
++#define IFX_SPI_MUX_CHANNEL_ACTIVE 4
++#define IFX_SPI_MUX_CHANNEL_INACTIVE 5
++#define IFX_SPI_MUX_CHANNEL_DISCONNECT 6
++#define IFX_SPI_MUX_CHANNEL_CLOSED 7
++
++#define IFX_SPI_STATUS_TIMEOUT msecs_to_jiffies(5*1000)
++
++struct ifx_spi_mux_port_spec {
++ int enabled;
++ int type;
++ int channel;
++ char name[16];
++};
++
++struct ifx_spi_mux_device {
++ struct tty_struct *tty;
++ struct kref ref;
++ struct ifx_spi_mux_port_data *port_data[IFX_SPI_MAX_PORTS];
++ struct ifx_spi_mux_channel *channel[IFX_SPI_PORT_PER_DEV];
++ unsigned long flags;
++ int mux_on;
++ spinlock_t write_lock;
++ unsigned char *rx_buffer;
++ int rx_count;
++ unsigned char *tx_buffer[2];
++ unsigned char spi_more;
++ struct work_struct status_work;
++ struct work_struct write_wakeup_work;
++ struct work_struct ld_open_work;
++ struct spi_transfer spi_xfer;
++ int round_robin_index;
++
++};
++
++struct ifx_spi_mux_serial {
++ struct tty_port tty_port;
++ struct device *tty_dev;
++#ifdef IFX_THROTTLE_CODE
++ struct kfifo throttle_fifo;
++ spinlock_t throttle_fifo_lock;
++#endif /* IFX_THROTTLE_CODE */
++ int minor;
++ struct file *filp;
++};
++
++enum rx_parse_state {
++ syncing,
++ getting_frame_len,
++ filling_skb,
++ WAIT_IP,
++ WAIT_DATA,
++ WAIT_SYNC
++};
++
++#undef NETDEVICE_HAS_STATS
++
++struct ifx_spi_mux_net {
++ enum rx_parse_state rx_state;
++ struct sk_buff *tx_skb;
++ struct sk_buff *rx_skb;
++ struct net_device *net;
++ unsigned short rx_buf_size;
++ unsigned short rx_buf_missing;
++ struct iphdr rx_ip_hdr;
++#ifndef NETDEVICE_HAS_STATS
++ struct net_device_stats stats;
++#endif
++};
++
++#ifdef NETDEVICE_HAS_STATS
++#define STATS(net) ((net)->stats)
++#else
++#define STATS(net) (((struct ifx_spi_mux_port_data *) \
++ net_to_ifx_spi_data(net))->type.net.stats)
++#endif
++
++struct ifx_spi_mux_port_data {
++ struct ifx_spi_mux_device *spi_itf;
++ int port_id;
++ struct ifx_spi_mux_port_spec spec;
++ struct kfifo tx_fifo;
++ spinlock_t fifo_lock;
++ unsigned long signal_state;
++ union {
++ struct ifx_spi_mux_serial serial;
++ struct ifx_spi_mux_net net;
++ } type;
++};
++
++struct ifx_spi_mux_channel {
++ int channel_id;
++ int type;
++ int signal_state;
++ int previous_active_signal_state;
++ int mux_channel_state;
++ int passthrough;
++ wait_queue_head_t mux_state_wait;
++ struct ifx_spi_mux_port_data *serial_port_data;
++ struct ifx_spi_mux_port_data *network_port_data;
++};
++
++#endif /* _IFX_SPI_H */
+--- /dev/null
++++ b/drivers/staging/ifx-mux/ifx_spi_mux_ioctl.h
+@@ -0,0 +1,57 @@
++/****************************************************************************
++ *
++ * Driver IOCTLs for the IFX spi modem.
++ *
++ * Copyright (C) 2009 Intel Corp
++ * Jim Stanley <jim.stanley@intel.com>
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
++ * USA
++ *
++ *
++ *
++ *****************************************************************************/
++#ifndef _IFX_SPI_IOCTL_H
++#define _IFX_SPI_IOCTL_H
++
++#include <linux/ioctl.h>
++
++#define IFX_SPI_MAGIC 0x77
++
++/* parameters for ioctl arguments, also used in driver code */
++#define IFX_SPI_PORT_SPEC_SERIAL 1
++#define IFX_SPI_PORT_SPEC_NET 2
++#define IFX_SPI_MAX_CHANNEL_NAME 16
++
++#define IFX_SPI_CREATECHANNEL _IOW(IFX_SPI_MAGIC, 1, int)
++#define IFX_SPI_DELETECHANNEL _IOW(IFX_SPI_MAGIC, 2, int)
++
++#define IFX_SPI_STARTMUX _IOW(IFX_SPI_MAGIC, 3, int)
++#define IFX_SPI_STOPMUX _IO(IFX_SPI_MAGIC, 4)
++
++#define IFX_SPI_NETPASSTHROUGH _IOW(IFX_SPI_MAGIC, 5, int)
++
++struct channel_arg {
++ int type;
++ int channel;
++ char chname[IFX_SPI_MAX_CHANNEL_NAME];
++};
++
++struct passthrough_arg {
++ int channel;
++ int passthrough;
++};
++
++#endif
+--- a/drivers/staging/memrar/TODO
++++ b/drivers/staging/memrar/TODO
+@@ -1,7 +1,7 @@
+ RAR Handler (memrar) Driver TODO Items
+ ======================================
+
+-Maintainer: Ossama Othman <ossama.othman@intel.com>
++Maintainer: Eugene Epshteyn <eugene.epshteyn@intel.com>
+
+ memrar.h
+ --------
+--- a/drivers/staging/memrar/memrar-abi
++++ b/drivers/staging/memrar/memrar-abi
+@@ -1,7 +1,7 @@
+ What: /dev/memrar
+ Date: March 2010
+-KernelVersion: Kernel version this feature first showed up in.
+-Contact: Ossama Othman <ossama.othman@intel.com>
++KernelVersion: 2.6.34
++Contact: Eugene Epshteyn <eugene.epshteyn@intel.com>
+ Description: The Intel Moorestown Restricted Access Region (RAR)
+ Handler driver exposes an ioctl() based interface that
+ allows a user to reserve and release blocks of RAR
+--- a/drivers/staging/memrar/memrar_handler.c
++++ b/drivers/staging/memrar/memrar_handler.c
+@@ -279,15 +279,6 @@
+ BUG_ON(!memrar_is_valid_rar_type(rarnum));
+ BUG_ON(rar->allocated);
+
+- mutex_init(&rar->lock);
+-
+- /*
+- * Initialize the process table before we reach any
+- * code that exit on failure since the finalization
+- * code requires an initialized list.
+- */
+- INIT_LIST_HEAD(&rar->buffers.list);
+-
+ if (rar_get_address(rarnum, &low, &high) != 0)
+ /* No RAR is available. */
+ return -ENODEV;
+@@ -940,9 +931,28 @@
+ static int __init memrar_init(void)
+ {
+ int err;
++ int i;
+
+ printk(banner);
+
++ /*
++ * Some delayed initialization is performed in this driver.
++ * Make sure resources that are used during driver clean-up
++ * (e.g. during driver's release() function) are fully
++ * initialized before first use. This is particularly
++ * important for the case when the delayed initialization
++ * isn't completed, leaving behind a partially initialized
++ * driver.
++ *
++ * Such a scenario can occur when RAR is not available on the
++ * platform, and the driver is release()d.
++ */
++ for (i = 0; i != ARRAY_SIZE(memrars); ++i) {
++ struct memrar_rar_info * const rar = &memrars[i];
++ mutex_init(&rar->lock);
++ INIT_LIST_HEAD(&rar->buffers.list);
++ }
++
+ err = misc_register(&memrar_miscdev);
+ if (err)
+ return err;
+--- /dev/null
++++ b/drivers/staging/mfld-sensors/Kconfig
+@@ -0,0 +1,24 @@
++menuconfig MFLD_SENSORS
++ bool "Intel Medfield Sensor Device Drivers"
++ default y
++ ---help---
++ Say Y here to get to see options for device drivers for various
++ sensor devices on Medfiled.
++ This option alone does not add any kernel code.
++
++ If you say N, all options in this submenu will be skipped and disabled.
++
++if MFLD_SENSORS
++config SENSORS_AK8974COMPASS
++ tristate "Ak8974 Compass Module"
++ depends on I2C
++ help
++ To get Compass Sensor output from AK8974 sensor.
++
++config SENSORS_APDS9802PSPROXIMITY
++ tristate "Medfield Avago APDS9802 Proximity Sensor Module"
++ depends on I2C
++ help
++ To get Proximity Sensor output from APDS9802
++ Proximity sensor.
++endif #MFLD_SENSORS
+--- /dev/null
++++ b/drivers/staging/mfld-sensors/Makefile
+@@ -0,0 +1,6 @@
++#
++# Makefile for linux/drivers/staging/mfld-sensors
++# Medfield sensor drivers
++#
++obj-$(CONFIG_SENSORS_AK8974COMPASS) += ak8974.o
++obj-$(CONFIG_SENSORS_APDS9802PSPROXIMITY) += apds9802ps.o
+--- /dev/null
++++ b/drivers/staging/mfld-sensors/ak8974.c
+@@ -0,0 +1,250 @@
++/*
++ * ak8974.c - AKEMD Compass Driver
++ *
++ * Copyright (C) 2010 Intel Corp
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * this program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * this program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS for A PARTICULAR PURPOSE. See the GNU
++ * General public License for more details.
++ *
++ * You should have received a copy of the GNU General public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/i2c.h>
++#include <linux/err.h>
++#include <linux/mutex.h>
++#include <linux/sysfs.h>
++#include <linux/hwmon-sysfs.h>
++
++MODULE_AUTHOR("KalhanTrisal,Anantha Narayanan<anantha.narayanan@intel.com>");
++MODULE_DESCRIPTION("ak8974 Compass Driver");
++MODULE_LICENSE("GPL v2");
++
++/* register address */
++#define DEVICE_ID 0x0F
++#define ADDR_TMPS 0x31
++#define DATA_XL 0x10
++#define DATA_XM 0x11
++#define DATA_YL 0x12
++#define DATA_YM 0x13
++#define DATA_ZL 0x14
++#define DATA_ZM 0x15
++#define STAT_REG 0x18
++#define CNTL_1 0x1B
++#define CNTL_2 0x1C
++#define CNTL_3 0x1D
++#define PRET_REG 0x30
++
++struct compass_data {
++ bool needresume;
++ struct mutex write_lock;
++};
++
++static ssize_t power_mode_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ unsigned int ret_val;
++ struct i2c_client *client = to_i2c_client(dev);
++
++ ret_val = i2c_smbus_read_byte_data(client, CNTL_1);
++ if (ret_val < 0)
++ dev_warn(dev, "failed power mode read\n");
++
++ return sprintf(buf, "%d\n", ((char)ret_val & 0x80) >> 7);
++}
++
++static void set_power_state(struct i2c_client *client, bool on_off)
++{
++ char read_cntl1 = 0;
++ struct compass_data *data = i2c_get_clientdata(client);
++
++ mutex_lock(&data->write_lock);
++
++ read_cntl1 = i2c_smbus_read_byte_data(client, CNTL_1);
++ if (on_off)
++ read_cntl1 = (read_cntl1 | 0x80);
++ else
++ read_cntl1 = (read_cntl1 & 0x7F);
++
++ if (i2c_smbus_write_byte_data(client, CNTL_1, read_cntl1) < 0)
++ dev_warn(&client->dev, "failed power state write\n");
++
++ mutex_unlock(&data->write_lock);
++}
++
++static ssize_t power_mode_store(struct device *dev,
++ struct device_attribute *attr, const char *buf, size_t count)
++{
++ unsigned long set_val;
++ struct i2c_client *client = to_i2c_client(dev);
++ struct compass_data *data = i2c_get_clientdata(client);
++
++ if (strict_strtoul(buf, 10, &set_val))
++ return -EINVAL;
++
++ if (set_val == 1) {
++ set_power_state(client, true);
++ data->needresume = true;
++ } else if (set_val == 0) {
++ set_power_state(client, false);
++ data->needresume = false;
++ } else
++ return -EINVAL;
++
++ return count;
++}
++
++static ssize_t curr_xyz_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ unsigned short x, y, z;
++ unsigned char temp;
++ struct i2c_client *client = to_i2c_client(dev);
++ struct compass_data *data = i2c_get_clientdata(client);
++
++ mutex_lock(&data->write_lock);
++
++ if (i2c_smbus_write_byte_data(client, CNTL_3, 0x40) < 0)
++ dev_warn(dev, "failed xyz contrl3 register write\n");
++
++ /* wait for data ready */
++ msleep(15);
++
++ /*Force Read*/
++ x = i2c_smbus_read_byte_data(client, DATA_XM);
++ temp = i2c_smbus_read_byte_data(client, DATA_XL);
++ x = x << 8 | temp;
++ y = i2c_smbus_read_byte_data(client, DATA_YM);
++ temp = i2c_smbus_read_byte_data(client, DATA_YL);
++ y = y << 8 | temp;
++ z = i2c_smbus_read_byte_data(client, DATA_ZM);
++ temp = i2c_smbus_read_byte_data(client, DATA_ZL);
++ z = z << 8 | temp;
++
++ mutex_unlock(&data->write_lock);
++
++ return sprintf(buf, "%x:%x:%x\n", x, y, z);
++}
++
++static void ps_set_default_config(struct i2c_client *client)
++{
++ /*Power ON and set default*/
++ if (i2c_smbus_write_byte_data(client, CNTL_1, 0x82) < 0)
++ dev_warn(&client->dev, "failed default power on write\n");
++
++ if (i2c_smbus_write_byte_data(client, PRET_REG, 0x00) < 0)
++ dev_warn(&client->dev, "failed default control write\n");
++}
++
++static DEVICE_ATTR(power_state, S_IRUGO | S_IWUSR,
++ power_mode_show, power_mode_store);
++static DEVICE_ATTR(curr_pos, S_IRUGO, curr_xyz_show, NULL);
++
++static struct attribute *mid_att_compass[] = {
++ &dev_attr_power_state.attr,
++ &dev_attr_curr_pos.attr,
++ NULL
++};
++
++static struct attribute_group m_compass_gr = {
++ .name = "ak8974",
++ .attrs = mid_att_compass
++};
++
++static int ak8974_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ int res;
++ struct compass_data *data;
++
++ data = kzalloc(sizeof(struct compass_data), GFP_KERNEL);
++ if (data == NULL) {
++ dev_warn(&client->dev, "memory initialization failed\n");
++ return -ENOMEM;
++ }
++ i2c_set_clientdata(client, data);
++
++ res = sysfs_create_group(&client->dev.kobj, &m_compass_gr);
++ if (res) {
++ dev_warn(&client->dev, "device_create_file failed\n");
++ goto compass_error1;
++ }
++
++ dev_info(&client->dev, "compass chip found \n");
++ ps_set_default_config(client);
++ data->needresume = true;
++ mutex_init(&data->write_lock);
++ return 0;
++
++compass_error1:
++ i2c_set_clientdata(client, NULL);
++ kfree(data);
++ return res;
++}
++
++static int ak8974_remove(struct i2c_client *client)
++{
++ struct compass_data *data = i2c_get_clientdata(client);
++
++ sysfs_remove_group(&client->dev.kobj, &m_compass_gr);
++ kfree(data);
++ return 0;
++}
++
++static int ak8974_suspend(struct i2c_client *client, pm_message_t mesg)
++{
++ set_power_state(client, false);
++ return 0;
++}
++
++static int ak8974_resume(struct i2c_client *client)
++{
++ struct compass_data *data = i2c_get_clientdata(client);
++
++ if (data->needresume == true)
++ set_power_state(client, true);
++ return 0;
++}
++
++static struct i2c_device_id ak8974_id[] = {
++ { "ak8974", 0 },
++ { }
++};
++
++static struct i2c_driver ak8974_driver = {
++ .driver = {
++ .name = "ak8974",
++ },
++ .probe = ak8974_probe,
++ .remove = ak8974_remove,
++ .suspend = ak8974_suspend,
++ .resume = ak8974_resume,
++ .id_table = ak8974_id,
++};
++
++static int __init sensor_ak8974_init(void)
++{
++ return i2c_add_driver(&ak8974_driver);
++}
++
++static void __exit sensor_ak8974_exit(void)
++{
++ i2c_del_driver(&ak8974_driver);
++}
++
++module_init(sensor_ak8974_init);
++module_exit(sensor_ak8974_exit);
+--- /dev/null
++++ b/drivers/staging/mfld-sensors/apds9802ps.c
+@@ -0,0 +1,248 @@
++/*
++ * apds9802ps.c - Avago Proximity Sensor Driver
++ *
++ * Copyright (C) 2009 Intel Corp
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/i2c.h>
++#include <linux/err.h>
++#include <linux/delay.h>
++#include <linux/mutex.h>
++#include <linux/sysfs.h>
++#include <linux/pm_runtime.h>
++
++MODULE_AUTHOR("Anantha Narayanan <Anantha.Narayanan@intel.com");
++MODULE_DESCRIPTION("Avago apds9802ps Proximity Driver");
++MODULE_LICENSE("GPL v2");
++
++#define POWER_STA_ENABLE 1
++#define POWER_STA_DISABLE 0
++
++#define DRIVER_NAME "apds9802ps"
++
++struct ps_data{
++ struct mutex lock;
++};
++
++static ssize_t ps_proximity_output_data_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++ struct ps_data *data = i2c_get_clientdata(client);
++ int ret_val = 0;
++ int l, h;
++ int retry = 10;
++
++ pm_runtime_get_sync(dev);
++ mutex_lock(&data->lock);
++
++ /* start measurement */
++ i2c_smbus_write_byte_data(client, 0x82, 0x2d);
++
++ /* wait for data ready */
++ do {
++ msleep(5);
++ ret_val = i2c_smbus_read_byte_data(client, 0x87);
++ if (ret_val > 0 && (ret_val & 0x10))
++ break;
++ } while (retry--);
++
++ if (!retry)
++ dev_warn(dev, "timeout waiting for data ready\n");
++
++ l = i2c_smbus_read_byte_data(client, 0x85); /* LSB data */
++ if (l < 0)
++ dev_warn(dev, "failed proximity out read LSB\n");
++
++ h = i2c_smbus_read_byte_data(client, 0x86); /* MSB data */
++ if (h < 0)
++ dev_warn(dev, "failed proximity out read MSB\n");
++
++ /* stop measurement and clear interrupt status */
++ i2c_smbus_write_byte_data(client, 0x82, 0x0d);
++ i2c_smbus_write_byte(client, 0x60);
++
++ mutex_unlock(&data->lock);
++ pm_runtime_put_sync(dev);
++
++ ret_val = (h << 8 | l);
++ return sprintf(buf, "%d\n", ret_val);
++}
++
++static void ps_set_power_state(struct i2c_client *client, bool on_off)
++{
++ char curr_val = 0;
++ struct ps_data *data = i2c_get_clientdata(client);
++
++ mutex_lock(&data->lock);
++
++ curr_val = i2c_smbus_read_byte_data(client, 0x80);
++ if (on_off)
++ curr_val = curr_val | 0x01;
++ else
++ curr_val = curr_val & 0xFE;
++
++ if (i2c_smbus_write_byte_data(client, 0x80, curr_val) < 0)
++ dev_warn(&client->dev, "failed power state write\n");
++
++ mutex_unlock(&data->lock);
++}
++
++static DEVICE_ATTR(proximity_output, S_IRUGO,
++ ps_proximity_output_data_show, NULL);
++
++static struct attribute *mid_att_ps[] = {
++ &dev_attr_proximity_output.attr,
++ NULL
++};
++
++static struct attribute_group m_ps_gr = {
++ .name = "apds9802ps",
++ .attrs = mid_att_ps
++};
++
++static void ps_set_default_config(struct i2c_client *client)
++{
++ /* Power ON */
++ if (i2c_smbus_write_byte_data(client, 0x80, 0x01) < 0)
++ dev_warn(&client->dev, "failed default power on write\n");
++
++ /* 20 pulses, 100Khz Pulse frequency */
++ if (i2c_smbus_write_byte_data(client, 0x81, 0x86) < 0)
++ dev_warn(&client->dev, "failed pulse frequency write\n");
++
++ /* 100MA LED current, 500ms interval delay */
++ if (i2c_smbus_write_byte_data(client, 0x82, 0x0d) < 0)
++ dev_warn(&client->dev, "failed interval delay write\n");
++}
++
++static int apds9802ps_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ int res;
++ struct ps_data *data;
++
++ data = kzalloc(sizeof(struct ps_data), GFP_KERNEL);
++ if (data == NULL) {
++ dev_err(&client->dev, "alloc ps_data failed\n");
++ return -ENOMEM;
++ }
++ i2c_set_clientdata(client, data);
++
++ res = sysfs_create_group(&client->dev.kobj, &m_ps_gr);
++ if (res) {
++ dev_err(&client->dev, "sysfs file create failed\n");
++ goto ps_error1;
++ }
++
++ dev_info(&client->dev, "proximity sensor chip found\n");
++
++ ps_set_default_config(client);
++ pm_runtime_enable(&client->dev);
++
++ /* toggle the power state */
++ pm_runtime_get(&client->dev);
++ pm_runtime_put(&client->dev);
++
++ mutex_init(&data->lock);
++ return res;
++
++ps_error1:
++ i2c_set_clientdata(client, NULL);
++ kfree(data);
++ return res;
++}
++
++static int apds9802ps_remove(struct i2c_client *client)
++{
++ struct ps_data *data = i2c_get_clientdata(client);
++
++ ps_set_power_state(client, false);
++ sysfs_remove_group(&client->dev.kobj, &m_ps_gr);
++ kfree(data);
++ return 0;
++}
++
++static int apds9802ps_suspend(struct i2c_client *client, pm_message_t mesg)
++{
++ ps_set_power_state(client, false);
++ return 0;
++}
++
++static int apds9802ps_resume(struct i2c_client *client)
++{
++ ps_set_power_state(client, true);
++ return 0;
++}
++
++static struct i2c_device_id apds9802ps_id[] = {
++ { DRIVER_NAME, 0 },
++ { }
++};
++MODULE_DEVICE_TABLE(i2c, apds9802ps_id);
++
++static int apds9802ps_runtime_suspend(struct device *dev)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++
++ ps_set_power_state(client, false);
++ return 0;
++}
++
++static int apds9802ps_runtime_resume(struct device *dev)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++
++ ps_set_power_state(client, true);
++ return 0;
++}
++
++static const struct dev_pm_ops apds9802ps_pm_ops = {
++ .runtime_suspend = apds9802ps_runtime_suspend,
++ .runtime_resume = apds9802ps_runtime_resume,
++};
++
++static struct i2c_driver apds9802ps_driver = {
++ .driver = {
++ .name = DRIVER_NAME,
++ .pm = &apds9802ps_pm_ops,
++ },
++ .probe = apds9802ps_probe,
++ .remove = apds9802ps_remove,
++ .suspend = apds9802ps_suspend,
++ .resume = apds9802ps_resume,
++ .id_table = apds9802ps_id,
++};
++
++static int __init sensor_apds9802ps_init(void)
++{
++ return i2c_add_driver(&apds9802ps_driver);
++}
++
++static void __exit sensor_apds9802ps_exit(void)
++{
++ i2c_del_driver(&apds9802ps_driver);
++}
++
++module_init(sensor_apds9802ps_init);
++module_exit(sensor_apds9802ps_exit);
+--- /dev/null
++++ b/drivers/staging/mfld_ledflash/Kconfig
+@@ -0,0 +1,10 @@
++config MFLD_LEDFLASH
++ tristate "LM3555 LED flash device for Intel Medfield Platform"
++ depends on I2C && X86_MRST
++ help
++ Say yes here you get support for the LM3555 LED flash device.
++ Note this is only a prototype driver and full support of the device
++ is still working-in-progress.
++
++ This driver can also be built as a module. If so, the module
++ will be called mfld_ledflash.
+--- /dev/null
++++ b/drivers/staging/mfld_ledflash/Makefile
+@@ -0,0 +1 @@
++obj-$(CONFIG_MFLD_LEDFLASH) += mfld_ledflash.o
+--- /dev/null
++++ b/drivers/staging/mfld_ledflash/mfld_ledflash.c
+@@ -0,0 +1,138 @@
++/*
++ * drivers/misc/mfld_ledflash.c
++ *
++ * Copyright (c) 2010 Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++#include <linux/module.h>
++#include <linux/i2c.h>
++#include <linux/interrupt.h>
++#include <linux/sched.h>
++#include <linux/mutex.h>
++#include <linux/delay.h>
++#include <linux/input.h>
++#include <linux/device.h>
++#include <linux/gpio.h>
++
++#include <asm/io.h>
++
++static int mfld_ledflash_detect(struct i2c_client *client)
++{
++ struct i2c_adapter *adapter = client->adapter;
++ int ids, version;
++
++ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
++
++ dev_err(&client->dev, "error checking function\n");
++ return -ENODEV;
++ }
++
++ /* validate ID and version */
++ ids = i2c_smbus_read_byte_data(client, 0x00);
++ version = i2c_smbus_read_byte_data(client, 0x01);
++
++ if (ids != 0x31 || version != 0x06) {
++ dev_err(&client->dev, "error checking ID and version\n");
++ return -ENODEV;
++ }
++
++ dev_dbg(&client->dev, "Successfully detected LM3555 LED flash\n");
++
++ return 0;
++}
++
++static int __devinit mfld_ledflash_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ int err;
++ u32 __iomem *mem;
++ u32 value;
++
++ err = mfld_ledflash_detect(client);
++ if (err) {
++ dev_err(&client->dev, "device not found\n");
++ return err;
++ }
++
++ /* Read out default register values */
++ dev_dbg(&client->dev, "Indicator & Timer (0x02): 0x%x\n",
++ i2c_smbus_read_byte_data(client, 0x02));
++ dev_dbg(&client->dev, "Current Set (0x03): 0x%x\n",
++ i2c_smbus_read_byte_data(client, 0x03));
++ dev_dbg(&client->dev, "Control (0x04): 0x%x\n",
++ i2c_smbus_read_byte_data(client, 0x04));
++ dev_dbg(&client->dev, "Fault (0x05): 0x%x\n",
++ i2c_smbus_read_byte_data(client, 0x05));
++
++ /* strobe operation cfg */
++ i2c_smbus_write_byte_data(client, 0x04, 0xBF);
++ dev_dbg(&client->dev, "Control (0x04): 0x%x\n",
++ i2c_smbus_read_byte_data(client, 0x04));
++
++ /* request flash GPIO*/
++ gpio_request(161, "flash");
++
++ /* Fixme: SW workaround HW issue */
++ mem = ioremap_nocache(0xff12c854, 24);
++ value = readl(mem + 4);
++ value &= ~(3 << 2);
++ writel(value, mem + 4);
++ iounmap(mem);
++
++ /* Trigger an output from Penwell GP_CORE_65 */
++ gpio_direction_output(161, 1);
++
++ return 0;
++}
++
++static int __devexit mfld_ledflash_remove(struct i2c_client *client)
++{
++ return 0;
++}
++
++static const struct i2c_device_id mfld_ledflash_id[] = {
++ {"i2c_cam_flash", 0},
++ {}
++};
++
++static struct i2c_driver mfld_ledflash_i2c_driver = {
++ .driver = {
++ .name = "mfld_ledflash",
++ },
++ .probe = mfld_ledflash_probe,
++ .remove = __devexit_p(mfld_ledflash_remove),
++ .id_table = mfld_ledflash_id,
++};
++
++MODULE_DEVICE_TABLE(i2c, mfld_ledflash_id);
++
++static int __init mfld_ledflash_init(void)
++{
++ return i2c_add_driver(&mfld_ledflash_i2c_driver);
++}
++
++static void __exit mfld_ledflash_exit(void)
++{
++ i2c_del_driver(&mfld_ledflash_i2c_driver);
++}
++
++module_init(mfld_ledflash_init);
++module_exit(mfld_ledflash_exit);
++
++MODULE_AUTHOR("Zheng Ba <zheng.ba@intel.com>");
++MODULE_DESCRIPTION("LM3555 LED flash driver for Intel Medfield platform");
++MODULE_LICENSE("GPL v2");
+--- a/drivers/staging/mrst-touchscreen/Makefile
++++ b/drivers/staging/mrst-touchscreen/Makefile
+@@ -1,3 +1,3 @@
+-obj-$(CONFIG_TOUCHSCREEN_INTEL_MID) := intel_mid_touch.o
++obj-$(CONFIG_TOUCHSCREEN_INTEL_MID) := intel-mid-touch.o
+
+
+--- a/drivers/staging/mrst-touchscreen/intel-mid-touch.c
++++ b/drivers/staging/mrst-touchscreen/intel-mid-touch.c
+@@ -19,15 +19,11 @@
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * Questions/Comments/Bug fixes to Sreedhara (sreedhara.ds@intel.com)
+- * Ramesh Agarwal (ramesh.agarwal@intel.com)
++ * Ramesh Agarwal (ramesh.agarwal@intel.com)
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * TODO:
+- * kill off mrstouch_debug eventually
+ * review conversion of r/m/w sequences
+- * Replace interrupt mutex abuse
+- * Kill of mrstouchdevp pointer
+- *
+ */
+
+ #include <linux/module.h>
+@@ -36,218 +32,101 @@
+ #include <linux/interrupt.h>
+ #include <linux/err.h>
+ #include <linux/param.h>
+-#include <linux/spi/spi.h>
++#include <linux/slab.h>
++#include <linux/platform_device.h>
+ #include <linux/irq.h>
+ #include <linux/delay.h>
+-#include <linux/kthread.h>
+ #include <asm/intel_scu_ipc.h>
+
+-
+-#if defined(MRSTOUCH_DEBUG)
+-#define mrstouch_debug(fmt, args...)\
+- do { \
+- printk(KERN_DEBUG "\n[MRSTOUCH(%d)] - ", __LINE__); \
+- printk(KERN_DEBUG fmt, ##args); \
+- } while (0);
+-#else
+-#define mrstouch_debug(fmt, args...)
+-#endif
+-
+ /* PMIC Interrupt registers */
+-#define PMIC_REG_ID1 0x00 /*PMIC ID1 register */
++#define PMIC_REG_ID1 0x00 /* PMIC ID1 register */
+
+ /* PMIC Interrupt registers */
+-#define PMIC_REG_INT 0x04 /*PMIC interrupt register */
+-#define PMIC_REG_MINT 0x05 /*PMIC interrupt mask register */
++#define PMIC_REG_INT 0x04 /* PMIC interrupt register */
++#define PMIC_REG_MINT 0x05 /* PMIC interrupt mask register */
+
+ /* ADC Interrupt registers */
+-#define PMIC_REG_ADCINT 0x5F /*ADC interrupt register */
+-#define PMIC_REG_MADCINT 0x60 /*ADC interrupt mask register */
++#define PMIC_REG_ADCINT 0x5F /* ADC interrupt register */
++#define PMIC_REG_MADCINT 0x60 /* ADC interrupt mask register */
+
+ /* ADC Control registers */
+-#define PMIC_REG_ADCCNTL1 0x61 /*ADC control register */
++#define PMIC_REG_ADCCNTL1 0x61 /* ADC control register */
+
+ /* ADC Channel Selection registers */
+-#define PMICADDR0 0xA4
+-#define END_OF_CHANNEL 0x1F
++#define PMICADDR0 0xA4
++#define END_OF_CHANNEL 0x1F
+
+ /* ADC Result register */
+-#define PMIC_REG_ADCSNS0H 0x64
++#define PMIC_REG_ADCSNS0H 0x64
+
+ /* ADC channels for touch screen */
+-#define MRST_TS_CHAN10 0xA /* Touch screen X+ connection */
+-#define MRST_TS_CHAN11 0xB /* Touch screen X- connection */
+-#define MRST_TS_CHAN12 0xC /* Touch screen Y+ connection */
+-#define MRST_TS_CHAN13 0xD /* Touch screen Y- connection */
+-
+-/* Touch screen coordinate constants */
+-#define TOUCH_PRESSURE 50
+-#define TOUCH_PRESSURE_FS 100
+-
+-#define XMOVE_LIMIT 5
+-#define YMOVE_LIMIT 5
+-#define XYMOVE_CNT 3
+-
+-#define MAX_10BIT ((1<<10)-1)
++#define MRST_TS_CHAN10 0xA /* Touch screen X+ connection */
++#define MRST_TS_CHAN11 0xB /* Touch screen X- connection */
++#define MRST_TS_CHAN12 0xC /* Touch screen Y+ connection */
++#define MRST_TS_CHAN13 0xD /* Touch screen Y- connection */
+
+ /* Touch screen channel BIAS constants */
+-#define XBIAS 0x20
+-#define YBIAS 0x40
+-#define ZBIAS 0x80
++#define MRST_XBIAS 0x20
++#define MRST_YBIAS 0x40
++#define MRST_ZBIAS 0x80
+
+ /* Touch screen coordinates */
+-#define MIN_X 10
+-#define MAX_X 1024
+-#define MIN_Y 10
+-#define MAX_Y 1024
+-#define WAIT_ADC_COMPLETION 10
++#define MRST_X_MIN 10
++#define MRST_X_MAX 1024
++#define MRST_X_FUZZ 5
++#define MRST_Y_MIN 10
++#define MRST_Y_MAX 1024
++#define MRST_Y_FUZZ 5
++#define MRST_PRESSURE_MIN 0
++#define MRST_PRESSURE_NOMINAL 50
++#define MRST_PRESSURE_MAX 100
++
++#define WAIT_ADC_COMPLETION 10 /* msec */
+
+ /* PMIC ADC round robin delays */
+-#define ADC_LOOP_DELAY0 0x0 /* Continuous loop */
+-#define ADC_LOOP_DELAY1 0x1 /* 4.5 ms approximate */
++#define ADC_LOOP_DELAY0 0x0 /* Continuous loop */
++#define ADC_LOOP_DELAY1 0x1 /* 4.5 ms approximate */
+
+ /* PMIC Vendor Identifiers */
+-#define PMIC_VENDOR_FS 0 /* PMIC vendor FreeScale */
+-#define PMIC_VENDOR_MAXIM 1 /* PMIC vendor MAXIM */
+-#define PMIC_VENDOR_NEC 2 /* PMIC vendor NEC */
+-#define MRSTOUCH_MAX_CHANNELS 32 /* Maximum ADC channels */
++#define PMIC_VENDOR_FS 0 /* PMIC vendor FreeScale */
++#define PMIC_VENDOR_MAXIM 1 /* PMIC vendor MAXIM */
++#define PMIC_VENDOR_NEC 2 /* PMIC vendor NEC */
++#define MRSTOUCH_MAX_CHANNELS 32 /* Maximum ADC channels */
+
+ /* Touch screen device structure */
+ struct mrstouch_dev {
+- struct spi_device *spi; /* SPI device associated with touch screen */
+- struct input_dev *input; /* input device for touchscreen*/
+- char phys[32]; /* Device name */
+- struct task_struct *pendet_thrd; /* PENDET interrupt handler */
+- struct mutex lock; /* Sync between interrupt and PENDET handler */
+- bool busy; /* Busy flag */
+- u16 asr; /* Address selection register */
+- int irq; /* Touch screen IRQ # */
+- uint vendor; /* PMIC vendor */
+- uint rev; /* PMIC revision */
+- bool suspended; /* Device suspended status */
+- bool disabled; /* Device disabled status */
+- u16 x; /* X coordinate */
+- u16 y; /* Y coordinate */
+- bool pendown; /* PEN position */
+-} ;
++ struct device *dev; /* device associated with touch screen */
++ struct input_dev *input;
++ char phys[32];
++ u16 asr; /* Address selection register */
++ int irq;
++ unsigned int vendor; /* PMIC vendor */
++ unsigned int rev; /* PMIC revision */
++
++ int (*read_prepare)(struct mrstouch_dev *tsdev);
++ int (*read)(struct mrstouch_dev *tsdev, u16 *x, u16 *y, u16 *z);
++ int (*read_finish)(struct mrstouch_dev *tsdev);
++};
+
+
+-/* Global Pointer to Touch screen device */
+-static struct mrstouch_dev *mrstouchdevp;
++/*************************** NEC and Maxim Interface ************************/
+
+-/* Utility to read PMIC ID */
+-static int mrstouch_pmic_id(uint *vendor, uint *rev)
++static int mrstouch_nec_adc_read_prepare(struct mrstouch_dev *tsdev)
+ {
+- int err;
+- u8 r;
+-
+- err = intel_scu_ipc_ioread8(PMIC_REG_ID1, &r);
+- if (err)
+- return err;
+-
+- *vendor = r & 0x7;
+- *rev = (r >> 3) & 0x7;
+-
+- return 0;
++ /* Disable pen detection during ADC read */
++ return intel_scu_ipc_update_register(PMIC_REG_ADCCNTL1, 0, 0x20);
+ }
+
+-/*
+- * Parse ADC channels to find end of the channel configured by other ADC user
+- * NEC and MAXIM requires 4 channels and FreeScale needs 18 channels
+- */
+-static int mrstouch_chan_parse(struct mrstouch_dev *tsdev)
++static int mrstouch_nec_adc_read_finish(struct mrstouch_dev *tsdev)
+ {
+- int err, i, j, found;
+- u32 r32;
+-
+- found = -1;
+-
+- for (i = 0; i < MRSTOUCH_MAX_CHANNELS; i++) {
+- if (found >= 0)
+- break;
+-
+- err = intel_scu_ipc_ioread32(PMICADDR0, &r32);
+- if (err)
+- return err;
+-
+- for (j = 0; j < 32; j+= 8) {
+- if (((r32 >> j) & 0xFF) == END_OF_CHANNEL) {
+- found = i;
+- break;
+- }
+- }
+- }
+- if (found < 0)
+- return 0;
+-
+- if (tsdev->vendor == PMIC_VENDOR_FS) {
+- if (found && found > (MRSTOUCH_MAX_CHANNELS - 18))
+- return -ENOSPC;
+- } else {
+- if (found && found > (MRSTOUCH_MAX_CHANNELS - 4))
+- return -ENOSPC;
+- }
+- return found;
++ /* Re-enable pen detection */
++ return intel_scu_ipc_update_register(PMIC_REG_ADCCNTL1, 0x20, 0x20);
+ }
+
+-/* Utility to enable/disable pendet.
+- * pendet set to true enables PENDET interrupt
+- * pendet set to false disables PENDET interrupt
+- * Also clears RND mask bit
+-*/
+-static int pendet_enable(struct mrstouch_dev *tsdev, bool pendet)
+-{
+- u16 reg;
+- u8 r;
+- u8 pendet_enabled = 0;
+- int retry = 0;
+- int err;
+-
+- err = intel_scu_ipc_ioread16(PMIC_REG_MADCINT, &reg);
+- if (err)
+- return err;
+-
+- if (pendet) {
+- reg &= ~0x0005;
+- reg |= 0x2000; /* Enable pendet */
+- } else
+- reg &= 0xDFFF; /* Disable pendet */
+-
+- /* Set MADCINT and update ADCCNTL1 (next reg byte) */
+- err = intel_scu_ipc_iowrite16(PMIC_REG_MADCINT, reg);
+- if (!pendet || err)
+- return err;
+-
+- /*
+- * Sometimes even after the register write succeeds
+- * the PMIC register value is not updated. Retry few iterations
+- * to enable pendet.
+- */
+-
+- err = intel_scu_ipc_ioread8(PMIC_REG_ADCCNTL1, &r);
+- pendet_enabled = (r >> 5) & 0x01;
+-
+- retry = 0;
+- while (!err && !pendet_enabled) {
+- retry++;
+- msleep(10);
+- err = intel_scu_ipc_iowrite8(PMIC_REG_ADCCNTL1, reg >> 8);
+- if (err)
+- break;
+- err = intel_scu_ipc_ioread8(PMIC_REG_ADCCNTL1, &r);
+- if (err == 0)
+- pendet_enabled = (r >> 5) & 0x01;
+- if (retry >= 10) {
+- dev_err(&tsdev->spi->dev, "Touch screen disabled.\n");
+- return -EIO;
+- }
+- }
+- return 0;
+-}
+-
+-/* To read PMIC ADC touch screen result
+- * Reads ADC storage registers for higher 7 and lower 3 bits
+- * converts the two readings to single value and turns off gain bit
++/*
++ * Reads PMIC ADC touch screen result
++ * Reads ADC storage registers for higher 7 and lower 3 bits and
++ * converts the two readings into a single value and turns off gain bit
+ */
+ static int mrstouch_ts_chan_read(u16 offset, u16 chan, u16 *vp, u16 *vm)
+ {
+@@ -279,202 +158,93 @@
+ return 0;
+ }
+
+-/* To configure touch screen channels
+- * Writes touch screen channels to ADC address selection registers
++/*
++ * Enables X, Y and Z bias values
++ * Enables YPYM for X channels and XPXM for Y channels
+ */
+-static int mrstouch_ts_chan_set(uint offset)
++static int mrstouch_ts_bias_set(uint offset, uint bias)
+ {
+ int count;
+- u16 chan;
+- u16 reg[5];
+- u8 data[5];
++ u16 chan, start;
++ u16 reg[4];
++ u8 data[4];
+
+ chan = PMICADDR0 + offset;
++ start = MRST_TS_CHAN10;
++
+ for (count = 0; count <= 3; count++) {
+ reg[count] = chan++;
+- data[count] = MRST_TS_CHAN10 + count;
+- }
+- reg[count] = chan;
+- data[count] = END_OF_CHANNEL;
+-
+- return intel_scu_ipc_writev(reg, data, 5);
+-}
+-
+-/* Initialize ADC */
+-static int mrstouch_adc_init(struct mrstouch_dev *tsdev)
+-{
+- int err, start;
+- u8 ra, rm;
+-
+- err = mrstouch_pmic_id(&tsdev->vendor, &tsdev->rev);
+- if (err) {
+- dev_err(&tsdev->spi->dev, "Unable to read PMIC id\n");
+- return err;
+- }
+-
+- start = mrstouch_chan_parse(tsdev);
+- if (start < 0) {
+- dev_err(&tsdev->spi->dev, "Unable to parse channels\n");
+- return start;
++ data[count] = bias | (start + count);
+ }
+
+- tsdev->asr = start;
+-
+- mrstouch_debug("Channel offset(%d): 0x%X\n", tsdev->asr, tsdev->vendor);
+-
+- /* ADC power on, start, enable PENDET and set loop delay
+- * ADC loop delay is set to 4.5 ms approximately
+- * Loop delay more than this results in jitter in adc readings
+- * Setting loop delay to 0 (continous loop) in MAXIM stops PENDET
+- * interrupt generation sometimes.
+- */
+-
+- if (tsdev->vendor == PMIC_VENDOR_FS) {
+- ra = 0xE0 | ADC_LOOP_DELAY0;
+- rm = 0x5;
+- } else {
+- /* NEC and MAXIm not consistent with loop delay 0 */
+- ra = 0xE0 | ADC_LOOP_DELAY1;
+- rm = 0x0;
+-
+- /* configure touch screen channels */
+- err = mrstouch_ts_chan_set(tsdev->asr);
+- if (err)
+- return err;
+- }
+- err = intel_scu_ipc_update_register(PMIC_REG_ADCCNTL1, ra, 0xE7);
+- if (err == 0)
+- err = intel_scu_ipc_update_register(PMIC_REG_MADCINT, rm, 0x03);
+- return err;
++ return intel_scu_ipc_writev(reg, data, 4);
+ }
+
+-/* Reports x,y coordinates to event subsystem */
+-static void mrstouch_report_xy(struct mrstouch_dev *tsdev, u16 x, u16 y, u16 z)
++/* To read touch screen channel values */
++static int mrstouch_nec_adc_read(struct mrstouch_dev *tsdev,
++ u16 *x, u16 *y, u16 *z)
+ {
+- int xdiff, ydiff;
+-
+- if (tsdev->pendown && z <= TOUCH_PRESSURE) {
+- /* Pen removed, report button release */
+- mrstouch_debug("BTN REL(%d)", z);
+- input_report_key(tsdev->input, BTN_TOUCH, 0);
+- tsdev->pendown = false;
+- }
+-
+- xdiff = abs(x - tsdev->x);
+- ydiff = abs(y - tsdev->y);
+-
+- /*
+- if x and y values changes for XYMOVE_CNT readings it is considered
+- as stylus is moving. This is required to differentiate between stylus
+- movement and jitter
+- */
+- if (x < MIN_X || x > MAX_X || y < MIN_Y || y > MAX_Y) {
+- /* Spurious values, release button if touched and return */
+- if (tsdev->pendown) {
+- mrstouch_debug("BTN REL(%d)", z);
+- input_report_key(tsdev->input, BTN_TOUCH, 0);
+- tsdev->pendown = false;
+- }
+- return;
+- } else if (xdiff >= XMOVE_LIMIT || ydiff >= YMOVE_LIMIT) {
+- tsdev->x = x;
+- tsdev->y = y;
+-
+- input_report_abs(tsdev->input, ABS_X, x);
+- input_report_abs(tsdev->input, ABS_Y, y);
+- input_sync(tsdev->input);
+- }
+-
+-
+- if (!tsdev->pendown && z > TOUCH_PRESSURE) {
+- /* Pen touched, report button touch */
+- mrstouch_debug("BTN TCH(%d, %d, %d)", x, y, z);
+- input_report_key(tsdev->input, BTN_TOUCH, 1);
+- tsdev->pendown = true;
+- }
+-}
+-
++ int err;
++ u16 xm, ym, zm;
+
+-/* Utility to start ADC, used by freescale handler */
+-static int pendet_mask(void)
+-{
+- return intel_scu_ipc_update_register(PMIC_REG_MADCINT, 0x02, 0x02);
+-}
++ /* configure Y bias for X channels */
++ err = mrstouch_ts_bias_set(tsdev->asr, MRST_YBIAS); // XXX XBIAS?
++ if (err)
++ goto ipc_error;
+
+-/* Utility to stop ADC, used by freescale handler */
+-static int pendet_umask(void)
+-{
+- return intel_scu_ipc_update_register(PMIC_REG_MADCINT, 0x00, 0x02);
+-}
++ msleep(WAIT_ADC_COMPLETION);
+
+-/* Utility to read ADC, used by freescale handler */
+-static int mrstouch_pmic_fs_adc_read(struct mrstouch_dev *tsdev)
+-{
+- int err;
+- u16 x, y, z, result;
+- u16 reg[4];
+- u8 data[4];
++ /* read x+ and x- channels */
++ err = mrstouch_ts_chan_read(tsdev->asr, MRST_TS_CHAN10, x, &xm);
++ if (err)
++ goto ipc_error;
+
+- result = PMIC_REG_ADCSNS0H + tsdev->asr;
++ /* configure x bias for y channels */
++ err = mrstouch_ts_bias_set(tsdev->asr, MRST_XBIAS); // XXX YBIAS?
++ if (err)
++ goto ipc_error;
+
+- reg[0] = result + 4;
+- reg[1] = result + 5;
+- reg[2] = result + 16;
+- reg[3] = result + 17;
++ msleep(WAIT_ADC_COMPLETION);
+
+- err = intel_scu_ipc_readv(reg, data, 4);
++ /* read y+ and y- channels */
++ err = mrstouch_ts_chan_read(tsdev->asr, MRST_TS_CHAN12, y, &ym);
+ if (err)
+ goto ipc_error;
+
+- x = data[0] << 3; /* Higher 7 bits */
+- x |= data[1] & 0x7; /* Lower 3 bits */
+- x &= 0x3FF;
+-
+- y = data[2] << 3; /* Higher 7 bits */
+- y |= data[3] & 0x7; /* Lower 3 bits */
+- y &= 0x3FF;
++ /* configure z bias for x and y channels */
++ err = mrstouch_ts_bias_set(tsdev->asr, MRST_ZBIAS);
++ if (err)
++ goto ipc_error;
+
+- /* Read Z value */
+- reg[0] = result + 28;
+- reg[1] = result + 29;
++ msleep(WAIT_ADC_COMPLETION);
+
+- err = intel_scu_ipc_readv(reg, data, 4);
++ /* read z+ and z- channels */
++ err = mrstouch_ts_chan_read(tsdev->asr, MRST_TS_CHAN10, z, &zm);
+ if (err)
+ goto ipc_error;
+
+- z = data[0] << 3; /* Higher 7 bits */
+- z |= data[1] & 0x7; /* Lower 3 bits */
+- z &= 0x3FF;
+-
+-#if defined(MRSTOUCH_PRINT_XYZP)
+- mrstouch_debug("X: %d, Y: %d, Z: %d", x, y, z);
+-#endif
+-
+- if (z >= TOUCH_PRESSURE_FS) {
+- mrstouch_report_xy(tsdev, x, y, TOUCH_PRESSURE - 1); /* Pen Removed */
+- return TOUCH_PRESSURE - 1;
+- } else {
+- mrstouch_report_xy(tsdev, x, y, TOUCH_PRESSURE + 1); /* Pen Touched */
+- return TOUCH_PRESSURE + 1;
+- }
+-
+ return 0;
+
+ ipc_error:
+- dev_err(&tsdev->spi->dev, "ipc error during fs_adc read\n");
++ dev_err(tsdev->dev, "ipc error during adc read\n");
+ return err;
+ }
+
+-/* To handle free scale pmic pendet interrupt */
+-static int pmic0_pendet(void *dev_id)
++
++/*************************** Freescale Interface ************************/
++
++static int mrstouch_fs_adc_read_prepare(struct mrstouch_dev *tsdev)
+ {
+ int err, count;
+ u16 chan;
+- unsigned int touched;
+- struct mrstouch_dev *tsdev = (struct mrstouch_dev *)dev_id;
+ u16 reg[5];
+ u8 data[5];
+
++ /* Stop the ADC */
++ err = intel_scu_ipc_update_register(PMIC_REG_MADCINT, 0x00, 0x02);
++ if (err)
++ goto ipc_error;
++
+ chan = PMICADDR0 + tsdev->asr;
+
+ /* Set X BIAS */
+@@ -512,16 +282,65 @@
+
+ msleep(WAIT_ADC_COMPLETION);
+
+- /*Read touch screen channels till pen removed
+- * Freescale reports constant value of z for all points
+- * z is high when screen is not touched and low when touched
+- * Map high z value to not touched and low z value to pen touched
+- */
+- touched = mrstouch_pmic_fs_adc_read(tsdev);
+- while (touched > TOUCH_PRESSURE) {
+- touched = mrstouch_pmic_fs_adc_read(tsdev);
+- msleep(WAIT_ADC_COMPLETION);
+- }
++ return 0;
++
++ipc_error:
++ dev_err(tsdev->dev, "ipc error during %s\n", __func__);
++ return err;
++}
++
++static int mrstouch_fs_adc_read(struct mrstouch_dev *tsdev,
++ u16 *x, u16 *y, u16 *z)
++{
++ int err;
++ u16 result;
++ u16 reg[4];
++ u8 data[4];
++
++ result = PMIC_REG_ADCSNS0H + tsdev->asr;
++
++ reg[0] = result + 4;
++ reg[1] = result + 5;
++ reg[2] = result + 16;
++ reg[3] = result + 17;
++
++ err = intel_scu_ipc_readv(reg, data, 4);
++ if (err)
++ goto ipc_error;
++
++ *x = data[0] << 3; /* Higher 7 bits */
++ *x |= data[1] & 0x7; /* Lower 3 bits */
++ *x &= 0x3FF;
++
++ *y = data[2] << 3; /* Higher 7 bits */
++ *y |= data[3] & 0x7; /* Lower 3 bits */
++ *y &= 0x3FF;
++
++ /* Read Z value */
++ reg[0] = result + 28;
++ reg[1] = result + 29;
++
++ err = intel_scu_ipc_readv(reg, data, 4);
++ if (err)
++ goto ipc_error;
++
++ *z = data[0] << 3; /* Higher 7 bits */
++ *z |= data[1] & 0x7; /* Lower 3 bits */
++ *z &= 0x3FF;
++
++ return 0;
++
++ipc_error:
++ dev_err(tsdev->dev, "ipc error during %s\n", __func__);
++ return err;
++}
++
++static int mrstouch_fs_adc_read_finish(struct mrstouch_dev *tsdev)
++{
++ int err, count;
++ u16 chan;
++ u16 reg[5];
++ u8 data[5];
+
+ /* Clear all TS channels */
+ chan = PMICADDR0 + tsdev->asr;
+@@ -545,319 +364,311 @@
+ if (err)
+ goto ipc_error;
+
++ /* Start ADC */
++ err = intel_scu_ipc_update_register(PMIC_REG_MADCINT, 0x02, 0x02);
++ if (err)
++ goto ipc_error;
++
+ return 0;
+
+ ipc_error:
+- dev_err(&tsdev->spi->dev, "ipc error during pendet\n");
++ dev_err(tsdev->dev, "ipc error during %s\n", __func__);
+ return err;
+ }
+
+-
+-/* To enable X, Y and Z bias values
+- * Enables YPYM for X channels and XPXM for Y channels
+- */
+-static int mrstouch_ts_bias_set(uint offset, uint bias)
++static void mrstouch_report_event(struct input_dev *input,
++ unsigned int x, unsigned int y, unsigned int z)
+ {
+- int count;
+- u16 chan, start;
+- u16 reg[4];
+- u8 data[4];
+-
+- chan = PMICADDR0 + offset;
+- start = MRST_TS_CHAN10;
+-
+- for (count = 0; count <= 3; count++) {
+- reg[count] = chan++;
+- data[count] = bias | (start + count);
++ if (z > MRST_PRESSURE_NOMINAL) {
++ /* Pen touched, report button touch and coordinates */
++ input_report_key(input, BTN_TOUCH, 1);
++ input_report_abs(input, ABS_X, x);
++ input_report_abs(input, ABS_Y, y);
++ } else {
++ input_report_key(input, BTN_TOUCH, 0);
+ }
+- return intel_scu_ipc_writev(reg, data, 4);
++
++ input_report_abs(input, ABS_PRESSURE, z);
++ input_sync(input);
+ }
+
+-/* To read touch screen channel values */
+-static int mrstouch_adc_read(struct mrstouch_dev *tsdev)
++/* PENDET interrupt handler */
++static irqreturn_t mrstouch_pendet_irq(int irq, void *dev_id)
+ {
+- int err;
+- u16 xp, xm, yp, ym, zp, zm;
++ struct mrstouch_dev *tsdev = dev_id;
++ u16 x, y, z;
+
+- /* configure Y bias for X channels */
+- err = mrstouch_ts_bias_set(tsdev->asr, YBIAS);
+- if (err)
+- goto ipc_error;
++ // FIXME: should we lower thread priority? Maybe not, we
++ // not spinnig but sleeping...
+
+- msleep(WAIT_ADC_COMPLETION);
++ if (tsdev->read_prepare(tsdev))
++ goto out;
+
+- /* read x+ and x- channels */
+- err = mrstouch_ts_chan_read(tsdev->asr, MRST_TS_CHAN10, &xp, &xm);
+- if (err)
+- goto ipc_error;
++ do {
++ if (tsdev->read(tsdev, &x, &y, &z))
++ break;
+
+- /* configure x bias for y channels */
+- err = mrstouch_ts_bias_set(tsdev->asr, XBIAS);
+- if (err)
+- goto ipc_error;
++ mrstouch_report_event(tsdev->input, x, y, z);
++ } while (z > MRST_PRESSURE_NOMINAL);
+
+- msleep(WAIT_ADC_COMPLETION);
++ tsdev->read_finish(tsdev);
+
+- /* read y+ and y- channels */
+- err = mrstouch_ts_chan_read(tsdev->asr, MRST_TS_CHAN12, &yp, &ym);
+- if (err)
+- goto ipc_error;
+-
+- /* configure z bias for x and y channels */
+- err = mrstouch_ts_bias_set(tsdev->asr, ZBIAS);
+- if (err)
+- goto ipc_error;
++out:
++ return IRQ_HANDLED;
++}
+
+- msleep(WAIT_ADC_COMPLETION);
++/* Utility to read PMIC ID */
++static int __devinit mrstouch_read_pmic_id(uint *vendor, uint *rev)
++{
++ int err;
++ u8 r;
+
+- /* read z+ and z- channels */
+- err = mrstouch_ts_chan_read(tsdev->asr, MRST_TS_CHAN10, &zp, &zm);
++ err = intel_scu_ipc_ioread8(PMIC_REG_ID1, &r);
+ if (err)
+- goto ipc_error;
+-
+-#if defined(MRSTOUCH_PRINT_XYZP)
+- printk(KERN_INFO "X+: %d, Y+: %d, Z+: %d\n", xp, yp, zp);
+-#endif
+-
+-#if defined(MRSTOUCH_PRINT_XYZM)
+- printk(KERN_INFO "X-: %d, Y-: %d, Z-: %d\n", xm, ym, zm);
+-#endif
+-
+- mrstouch_report_xy(tsdev, xp, yp, zp); /* report x and y to eventX */
+-
+- return zp;
+-
+-ipc_error:
+- dev_err(&tsdev->spi->dev, "ipc error during adc read\n");
+- return err;
+-}
++ return err;
+
+-/* PENDET interrupt handler function for NEC and MAXIM */
+-static void pmic12_pendet(void *data)
+-{
+- unsigned int touched;
+- struct mrstouch_dev *tsdev = (struct mrstouch_dev *)data;
++ *vendor = r & 0x7;
++ *rev = (r >> 3) & 0x7;
+
+- /* read touch screen channels till pen removed */
+- do {
+- touched = mrstouch_adc_read(tsdev);
+- } while (touched > TOUCH_PRESSURE);
++ return 0;
+ }
+
+-/* Handler to process PENDET interrupt */
+-int mrstouch_pendet(void *data)
++/*
++ * Parse ADC channels to find end of the channel configured by other ADC user
++ * NEC and MAXIM requires 4 channels and FreeScale needs 18 channels
++ */
++static int __devinit mrstouch_chan_parse(struct mrstouch_dev *tsdev)
+ {
+- struct mrstouch_dev *tsdev = (struct mrstouch_dev *)data;
+- while (1) {
+- /* Wait for PENDET interrupt */
+- if (mutex_lock_interruptible(&tsdev->lock)) {
+- msleep(WAIT_ADC_COMPLETION);
+- continue;
+- }
++ int err, i, found;
++ u8 r8;
+
+- if (tsdev->busy)
+- return 0;
++ found = -1;
+
+- tsdev->busy = true;
++ for (i = 0; i < MRSTOUCH_MAX_CHANNELS; i++) {
++ if (found >= 0)
++ break;
+
+- if (tsdev->vendor == PMIC_VENDOR_NEC ||
+- tsdev->vendor == PMIC_VENDOR_MAXIM) {
+- /* PENDET must be disabled in NEC before reading ADC */
+- pendet_enable(tsdev,false); /* Disbale PENDET */
+- pmic12_pendet(tsdev);
+- pendet_enable(tsdev, true); /*Enable PENDET */
+- } else if (tsdev->vendor == PMIC_VENDOR_FS) {
+- pendet_umask(); /* Stop ADC */
+- pmic0_pendet(tsdev);
+- pendet_mask(); /* Stop ADC */
+- } else
+- dev_err(&tsdev->spi->dev, "Unsupported touchscreen: %d\n",
+- tsdev->vendor);
++ err = intel_scu_ipc_ioread8(PMICADDR0 + i, &r8);
++ if (err)
++ return err;
+
+- tsdev->busy = false;
++ if (r8 == END_OF_CHANNEL) {
++ found = i;
++ break;
++ }
++ }
++ if (found < 0)
++ return 0;
+
++ if (tsdev->vendor == PMIC_VENDOR_FS) {
++ if (found && found > (MRSTOUCH_MAX_CHANNELS - 18))
++ return -ENOSPC;
++ } else {
++ if (found && found > (MRSTOUCH_MAX_CHANNELS - 4))
++ return -ENOSPC;
+ }
+- return 0;
++ return found;
+ }
+
+-/* PENDET interrupt handler */
+-static irqreturn_t pendet_intr_handler(int irq, void *handle)
++
++/*
++ * Writes touch screen channels to ADC address selection registers
++ */
++static int __devinit mrstouch_ts_chan_set(uint offset)
+ {
+- struct mrstouch_dev *tsdev = (struct mrstouch_dev *)handle;
++ u16 chan;
+
+- mutex_unlock(&tsdev->lock);
+- return IRQ_HANDLED;
++ int ret, count;
++
++ chan = PMICADDR0 + offset;
++ for (count = 0; count <= 3; count++) {
++ ret = intel_scu_ipc_iowrite8(chan++, MRST_TS_CHAN10 + count);
++ if (ret)
++ return ret;
++ }
++ return intel_scu_ipc_iowrite8(chan++, END_OF_CHANNEL);
+ }
+
+-/* Intializes input device and registers with input subsystem */
+-static int ts_input_dev_init(struct mrstouch_dev *tsdev, struct spi_device *spi)
++/* Initialize ADC */
++static int __devinit mrstouch_adc_init(struct mrstouch_dev *tsdev)
+ {
+- int err = 0;
++ int err, start;
++ u8 ra, rm;
+
+- mrstouch_debug("%s", __func__);
++ err = mrstouch_read_pmic_id(&tsdev->vendor, &tsdev->rev);
++ if (err) {
++ dev_err(tsdev->dev, "Unable to read PMIC id\n");
++ return err;
++ }
+
+- tsdev->input = input_allocate_device();
+- if (!tsdev->input) {
+- dev_err(&tsdev->spi->dev, "Unable to allocate input device.\n");
+- return -EINVAL;
++ switch (tsdev->vendor) {
++ case PMIC_VENDOR_NEC:
++ case PMIC_VENDOR_MAXIM:
++ tsdev->read_prepare = mrstouch_nec_adc_read_prepare;
++ tsdev->read = mrstouch_nec_adc_read;
++ tsdev->read_finish = mrstouch_nec_adc_read_finish;
++ break;
++
++ case PMIC_VENDOR_FS:
++ tsdev->read_prepare = mrstouch_fs_adc_read_prepare;
++ tsdev->read = mrstouch_fs_adc_read;
++ tsdev->read_finish = mrstouch_fs_adc_read_finish;
++ break;
++
++ default:
++ dev_err(tsdev->dev,
++ "Unsupported touchscreen: %d\n", tsdev->vendor);
++ return -ENXIO;
+ }
+
+- tsdev->input->name = "mrst_touchscreen";
+- snprintf(tsdev->phys, sizeof(tsdev->phys),
+- "%s/input0", dev_name(&spi->dev));
+- tsdev->input->phys = tsdev->phys;
+- tsdev->input->dev.parent = &spi->dev;
++ start = mrstouch_chan_parse(tsdev);
++ if (start < 0) {
++ dev_err(tsdev->dev, "Unable to parse channels\n");
++ return start;
++ }
++
++ tsdev->asr = start;
+
+- tsdev->input->id.vendor = tsdev->vendor;
+- tsdev->input->id.version = tsdev->rev;
++ /*
++ * ADC power on, start, enable PENDET and set loop delay
++ * ADC loop delay is set to 4.5 ms approximately
++ * Loop delay more than this results in jitter in adc readings
++ * Setting loop delay to 0 (continous loop) in MAXIM stops PENDET
++ * interrupt generation sometimes.
++ */
+
+- tsdev->input->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+- tsdev->input->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
++ if (tsdev->vendor == PMIC_VENDOR_FS) {
++ ra = 0xE0 | ADC_LOOP_DELAY0;
++ rm = 0x5;
++ } else {
++ /* NEC and MAXIm not consistent with loop delay 0 */
++ ra = 0xE0 | ADC_LOOP_DELAY1;
++ rm = 0x0;
+
+- input_set_abs_params(tsdev->input, ABS_X, MIN_X, MIN_Y, 0, 0);
+- input_set_abs_params(tsdev->input, ABS_Y, MIN_X, MIN_Y, 0, 0);
++ /* configure touch screen channels */
++ err = mrstouch_ts_chan_set(tsdev->asr);
++ if (err)
++ return err;
++ }
+
+- err = input_register_device(tsdev->input);
+- if (err) {
+- dev_err(&tsdev->spi->dev, "unable to register input device\n");
+- input_free_device(tsdev->input);
++ err = intel_scu_ipc_update_register(PMIC_REG_ADCCNTL1, ra, 0xE7);
++ if (err)
+ return err;
+- }
+
+- mrstouch_debug("%s", "mrstouch initialized");
++ err = intel_scu_ipc_update_register(PMIC_REG_MADCINT, rm, 0x03);
++ if (err)
++ return err;
+
+ return 0;
+-
+ }
+
++
+ /* Probe function for touch screen driver */
+-static int __devinit mrstouch_probe(struct spi_device *mrstouch_spi)
++static int __devinit mrstouch_probe(struct platform_device *pdev)
+ {
+- int err;
+- unsigned int myirq;
+ struct mrstouch_dev *tsdev;
++ struct input_dev *input;
++ int err;
++ int irq;
+
+- mrstouch_debug("%s(%p)", __func__, mrstouch_spi);
+-
+- mrstouchdevp = NULL;
+- myirq = mrstouch_spi->irq;
+-
+- if (!mrstouch_spi->irq) {
+- dev_err(&mrstouch_spi->dev, "no interrupt assigned\n");
++ irq = platform_get_irq(pdev, 0);
++ if (irq < 0) {
++ dev_err(&pdev->dev, "no interrupt assigned\n");
+ return -EINVAL;
+ }
+
+ tsdev = kzalloc(sizeof(struct mrstouch_dev), GFP_KERNEL);
+- if (!tsdev) {
+- dev_err(&mrstouch_spi->dev, "unable to allocate memory\n");
+- return -ENOMEM;
++ input = input_allocate_device();
++ if (!tsdev || !input) {
++ dev_err(&pdev->dev, "unable to allocate memory\n");
++ err = -ENOMEM;
++ goto err_free_mem;
+ }
+
+- tsdev->irq = myirq;
+- mrstouchdevp = tsdev;
++ tsdev->dev = &pdev->dev;
++ tsdev->input = input;
++ tsdev->irq = irq;
++
++ snprintf(tsdev->phys, sizeof(tsdev->phys),
++ "%s/input0", dev_name(tsdev->dev));
+
+ err = mrstouch_adc_init(tsdev);
+ if (err) {
+- dev_err(&mrstouch_spi->dev, "ADC init failed\n");
+- goto mrstouch_err_free_mem;
++ dev_err(&pdev->dev, "ADC initialization failed\n");
++ goto err_free_mem;
+ }
+
+- dev_set_drvdata(&mrstouch_spi->dev, tsdev);
+- tsdev->spi = mrstouch_spi;
++ input->name = "mrst_touchscreen";
++ input->phys = tsdev->phys;
++ input->dev.parent = tsdev->dev;
+
+- err = ts_input_dev_init(tsdev, mrstouch_spi);
+- if (err) {
+- dev_err(&tsdev->spi->dev, "ts_input_dev_init failed");
+- goto mrstouch_err_free_mem;
+- }
++ input->id.vendor = tsdev->vendor;
++ input->id.version = tsdev->rev;
++
++ input->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
++ input->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
+
+- mutex_init(&tsdev->lock);
+- mutex_lock(&tsdev->lock)
++ input_set_abs_params(tsdev->input, ABS_X,
++ MRST_X_MIN, MRST_X_MAX, MRST_X_FUZZ, 0);
++ input_set_abs_params(tsdev->input, ABS_Y,
++ MRST_Y_MIN, MRST_Y_MAX, MRST_Y_FUZZ, 0);
++ input_set_abs_params(tsdev->input, ABS_PRESSURE,
++ MRST_PRESSURE_MIN, MRST_PRESSURE_MAX, 0, 0);
+
+- mrstouch_debug("Requesting IRQ-%d", myirq);
+- err = request_irq(myirq, pendet_intr_handler,
+- 0, "mrstouch", tsdev);
++ err = request_threaded_irq(tsdev->irq, NULL, mrstouch_pendet_irq,
++ 0, "mrstouch", tsdev);
+ if (err) {
+- dev_err(&tsdev->spi->dev, "unable to allocate irq\n");
+- goto mrstouch_err_free_mem;
++ dev_err(tsdev->dev, "unable to allocate irq\n");
++ goto err_free_mem;
+ }
+
+- tsdev->pendet_thrd = kthread_run(mrstouch_pendet,
+- (void *)tsdev, "pendet handler");
+- if (IS_ERR(tsdev->pendet_thrd)) {
+- dev_err(&tsdev->spi->dev, "kthread_run failed\n");
+- err = PTR_ERR(tsdev->pendet_thrd);
+- goto mrstouch_err_free_mem;
++ err = input_register_device(tsdev->input);
++ if (err) {
++ dev_err(tsdev->dev, "unable to register input device\n");
++ goto err_free_irq;
+ }
+- mrstouch_debug("%s", "Driver initialized");
++
++ dev_set_drvdata(tsdev->dev, tsdev);
+ return 0;
+
+-mrstouch_err_free_mem:
++err_free_irq:
++ free_irq(tsdev->irq, tsdev);
++err_free_mem:
++ input_free_device(input);
+ kfree(tsdev);
+ return err;
+ }
+
+-static int mrstouch_suspend(struct spi_device *spi, pm_message_t msg)
+-{
+- mrstouch_debug("%s", __func__);
+- mrstouchdevp->suspended = 1;
+- return 0;
+-}
+-
+-static int mrstouch_resume(struct spi_device *spi)
++static int __devexit mrstouch_remove(struct platform_device *pdev)
+ {
+- mrstouch_debug("%s", __func__);
+- mrstouchdevp->suspended = 0;
+- return 0;
+-}
++ struct mrstouch_dev *tsdev = dev_get_drvdata(&pdev->dev);
+
+-static int mrstouch_remove(struct spi_device *spi)
+-{
+- mrstouch_debug("%s", __func__);
+- free_irq(mrstouchdevp->irq, mrstouchdevp);
+- input_unregister_device(mrstouchdevp->input);
+- input_free_device(mrstouchdevp->input);
+- if (mrstouchdevp->pendet_thrd)
+- kthread_stop(mrstouchdevp->pendet_thrd);
+- kfree(mrstouchdevp);
++ free_irq(tsdev->irq, tsdev);
++ input_unregister_device(tsdev->input);
++ kfree(tsdev);
+ return 0;
+ }
+
+-static struct spi_driver mrstouch_driver = {
++static struct platform_driver mrstouch_driver = {
+ .driver = {
+ .name = "pmic_touch",
+- .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = mrstouch_probe,
+- .suspend = mrstouch_suspend,
+- .resume = mrstouch_resume,
+- .remove = mrstouch_remove,
++ .remove = __devexit_p(mrstouch_remove),
+ };
+
+-static int __init mrstouch_module_init(void)
++static int __init mrstouch_init(void)
+ {
+- int err;
+-
+- mrstouch_debug("%s", __func__);
+- err = spi_register_driver(&mrstouch_driver);
+- if (err) {
+- mrstouch_debug("%s(%d)", "SPI PENDET failed", err);
+- return -1;
+- }
+-
+- return 0;
++ return platform_driver_register(&mrstouch_driver);
+ }
++module_init(mrstouch_init);
+
+-static void __exit mrstouch_module_exit(void)
++static void __exit mrstouch_exit(void)
+ {
+- mrstouch_debug("%s", __func__);
+- spi_unregister_driver(&mrstouch_driver);
+- return;
++ platform_driver_unregister(&mrstouch_driver);
+ }
+-
+-module_init(mrstouch_module_init);
+-module_exit(mrstouch_module_exit);
++module_exit(mrstouch_exit);
+
+ MODULE_AUTHOR("Sreedhara Murthy. D.S, sreedhara.ds@intel.com");
+ MODULE_DESCRIPTION("Intel Moorestown Resistive Touch Screen Driver");
+--- /dev/null
++++ b/drivers/staging/mrst/Kconfig
+@@ -0,0 +1,93 @@
++#
++# Drm device configuration
++#
++# This driver provides support for the
++# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
++#
++
++config DRM_INTEL_MID
++ tristate "Intel Moorestown/Medfield (load along with IMG driver)"
++ depends on DRM && PCI
++ select FB_CFB_COPYAREA
++ select FB_CFB_FILLRECT
++ select FB_CFB_IMAGEBLIT
++ select DRM_KMS_HELPER
++ help
++ xxxxxxxxxxxxxxxxxxxxxxxxxx
++
++choice
++ prompt "Build IMG kernel service as "
++ depends on DRM_INTEL_MID
++ default DRM_MID_RELEASE
++
++config DRM_MID_RELEASE
++ bool "Release"
++ depends on DRM_INTEL_MID
++ help
++ Build IMG kernel services as release
++
++config DRM_MID_DEBUG
++ bool "Debug"
++ depends on DRM_INTEL_MID
++ help
++ Build IMG kernel services as debug
++
++endchoice
++
++config DRM_MRST
++ tristate "Intel Moorestown"
++ depends on DRM_INTEL_MID && PCI
++ select FB_CFB_COPYAREA
++ select FB_CFB_FILLRECT
++ select FB_CFB_IMAGEBLIT
++ select DRM_KMS_HELPER
++ help
++ Choose this option if you have a Moorestown platform.
++ If M is selected the module will be called mid_gfx.
++
++config DRM_MDFLD
++ tristate "Intel Medfield"
++ depends on DRM_INTEL_MID && PCI
++ select FB_CFB_COPYAREA
++ select FB_CFB_FILLRECT
++ select FB_CFB_IMAGEBLIT
++ select DRM_KMS_HELPER
++ help
++ Choose this option if you have a Medfield platform.
++ If M is selected the module will be called mid_gfx.
++
++config MDFLD_DSI_DPU
++ bool "Support DSI Display Partial Update"
++ depends on DRM_MDFLD
++ default n
++ help
++ xxxxxx
++
++config MDFD_COMMAND_MODE
++ bool "SUPPORT_MIPI_COMMAND_MODE"
++ depends on DRM_MDFLD
++ default y
++ help
++ xxxxxx
++
++config MDFD_DUAL_MIPI
++ bool "SUPPORT_DUAL_MIPI_DISPLAYS"
++ depends on DRM_MDFLD
++ default y
++ help
++ xxxxxx
++
++config MDFD_COMMAND_MODE_2
++ bool "SUPPORT_MIPI_COMMAND_MODE_2"
++ depends on DRM_MDFLD
++ default y
++ help
++ xxxxxx
++
++config MDFD_HDMI
++ bool "SUPPORT_HDMI_DISPLAY"
++ depends on DRM_MDFLD
++ default n
++ help
++ xxxxxx
++
+--- /dev/null
++++ b/drivers/staging/mrst/Makefile
+@@ -0,0 +1,4 @@
++obj-$(CONFIG_DRM_MDFLD) += medfield/
++obj-$(CONFIG_DRM_MRST) += moorestown/
++
++obj-$(CONFIG_DRM_INTEL_MID) += drm_global.o
+--- /dev/null
++++ b/drivers/staging/mrst/drm_global.c
+@@ -0,0 +1,113 @@
++/**************************************************************************
++ *
++ * Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++#include <drm/drmP.h>
++#include "drm_global.h"
++
++struct drm_global_item {
++ struct mutex mutex;
++ void *object;
++ int refcount;
++};
++
++static struct drm_global_item glob[DRM_GLOBAL_NUM];
++
++int drm_global_init(void)
++{
++ int i;
++
++ for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
++ struct drm_global_item *item = &glob[i];
++ mutex_init(&item->mutex);
++ item->object = NULL;
++ item->refcount = 0;
++ }
++ return 0;
++}
++
++void drm_global_release(void)
++{
++ int i;
++ for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
++ struct drm_global_item *item = &glob[i];
++ BUG_ON(item->object != NULL);
++ BUG_ON(item->refcount != 0);
++ }
++}
++
++int drm_global_item_ref(struct drm_global_reference *ref)
++{
++ int ret;
++ struct drm_global_item *item = &glob[ref->global_type];
++ void *object;
++
++ mutex_lock(&item->mutex);
++ if (item->refcount == 0) {
++ item->object = kmalloc(ref->size, GFP_KERNEL);
++ if (unlikely(item->object == NULL)) {
++ ret = -ENOMEM;
++ goto out_err;
++ }
++
++ ref->object = item->object;
++ ret = ref->init(ref);
++ if (unlikely(ret != 0))
++ goto out_err;
++
++ ++item->refcount;
++ }
++ ref->object = item->object;
++ object = item->object;
++ mutex_unlock(&item->mutex);
++ return 0;
++ out_err:
++ kfree(item->object);
++ mutex_unlock(&item->mutex);
++ item->object = NULL;
++ return ret;
++}
++
++EXPORT_SYMBOL(drm_global_item_ref);
++
++void drm_global_item_unref(struct drm_global_reference *ref)
++{
++ struct drm_global_item *item = &glob[ref->global_type];
++
++ mutex_lock(&item->mutex);
++ BUG_ON(item->refcount == 0);
++ BUG_ON(ref->object != item->object);
++ if (--item->refcount == 0) {
++ ref->release(ref);
++ kfree(item->object);
++ item->object = NULL;
++ }
++ mutex_unlock(&item->mutex);
++}
++
++EXPORT_SYMBOL(drm_global_item_unref);
++
++subsys_initcall(drm_global_init);
++module_exit(drm_global_release);
+--- /dev/null
++++ b/drivers/staging/mrst/drm_global.h
+@@ -0,0 +1,25 @@
++#ifndef __DRM_GLOBAL_H
++#define __DRM_GLOBAL_H
++
++#include <linux/types.h>
++
++enum drm_global_types {
++ DRM_GLOBAL_TTM_MEM = 0,
++ DRM_GLOBAL_TTM_BO,
++ DRM_GLOBAL_TTM_OBJECT,
++ DRM_GLOBAL_NUM
++};
++
++struct drm_global_reference {
++ enum drm_global_types global_type;
++ size_t size;
++void *object;
++ int (*init) (struct drm_global_reference *);
++ void (*release) (struct drm_global_reference *);
++};
++
++extern int drm_global_init(void);
++extern void drm_global_release(void);
++extern int drm_global_item_ref(struct drm_global_reference *ref);
++extern void drm_global_item_unref(struct drm_global_reference *ref);
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/drv/lnc_topaz.c
+@@ -0,0 +1,751 @@
++/**
++ * file lnc_topaz.c
++ * TOPAZ I/O operations and IRQ handling
++ *
++ */
++
++/**************************************************************************
++ *
++ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
++ * Copyright (c) Imagination Technologies Limited, UK
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++/* include headers */
++/* #define DRM_DEBUG_CODE 2 */
++#include <drm/drmP.h>
++#include <drm/drm_os_linux.h>
++
++#include "psb_drv.h"
++#include "psb_drm.h"
++#include "lnc_topaz.h"
++#include "psb_powermgmt.h"
++#include "lnc_topaz_hw_reg.h"
++
++#include <linux/io.h>
++#include <linux/delay.h>
++
++#define TOPAZ_RM_MULTI_MTX_WRITE
++
++/* static function define */
++static int lnc_topaz_deliver_command(struct drm_device *dev,
++ struct ttm_buffer_object *cmd_buffer,
++ unsigned long cmd_offset,
++ unsigned long cmd_size,
++ void **topaz_cmd, uint32_t sequence,
++ int copy_cmd);
++static int lnc_topaz_send(struct drm_device *dev, void *cmd,
++ unsigned long cmd_size, uint32_t sync_seq);
++static int lnc_mtx_send(struct drm_psb_private *dev_priv, const void *cmd);
++static int lnc_topaz_dequeue_send(struct drm_device *dev);
++static int lnc_topaz_save_command(struct drm_device *dev, void *cmd,
++ unsigned long cmd_size, uint32_t sequence);
++
++IMG_BOOL lnc_topaz_interrupt(IMG_VOID *pvData)
++{
++ struct drm_device *dev;
++ struct drm_psb_private *dev_priv;
++ uint32_t clr_flag;
++ struct topaz_private *topaz_priv;
++ uint32_t topaz_stat;
++ uint32_t cur_seq;
++
++ if (pvData == IMG_NULL) {
++ DRM_ERROR("ERROR: TOPAZ %s, Invalid params\n", __func__);
++ return IMG_FALSE;
++ }
++
++ if (!ospm_power_is_hw_on(OSPM_VIDEO_ENC_ISLAND)) {
++ DRM_ERROR("ERROR: interrupt arrived but HW is power off\n");
++ return IMG_FALSE;
++ }
++
++ dev = (struct drm_device *)pvData;
++ dev_priv = (struct drm_psb_private *) dev->dev_private;
++ topaz_priv = dev_priv->topaz_private;
++
++ topaz_priv->topaz_hw_busy = REG_READ(0x20D0) & (0x1 << 11);
++
++ TOPAZ_READ32(TOPAZ_CR_IMG_TOPAZ_INTSTAT, &topaz_stat);
++ clr_flag = lnc_topaz_queryirq(dev);
++
++ lnc_topaz_clearirq(dev, clr_flag);
++
++ /* ignore non-SYNC interrupts */
++ if ((CCB_CTRL_SEQ(dev_priv) & 0x8000) == 0)
++ return IMG_TRUE;
++
++ cur_seq = *(uint32_t *)topaz_priv->topaz_sync_addr;
++
++ PSB_DEBUG_IRQ("TOPAZ:Got SYNC IRQ,sync seq:0x%08x (MTX) vs 0x%08x\n",
++ cur_seq, dev_priv->sequence[LNC_ENGINE_ENCODE]);
++
++ psb_fence_handler(dev, LNC_ENGINE_ENCODE);
++
++ /* save frame skip flag for query */
++ topaz_priv->frame_skip = CCB_CTRL_FRAMESKIP(dev_priv);
++
++ topaz_priv->topaz_busy = 1;
++ lnc_topaz_dequeue_send(dev);
++
++ if (drm_topaz_pmpolicy != PSB_PMPOLICY_NOPM)
++ schedule_delayed_work(&dev_priv->scheduler.topaz_suspend_wq, 0);
++
++ return IMG_TRUE;
++}
++
++static int lnc_submit_encode_cmdbuf(struct drm_device *dev,
++ struct ttm_buffer_object *cmd_buffer,
++ unsigned long cmd_offset, unsigned long cmd_size,
++ struct ttm_fence_object *fence)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ unsigned long irq_flags;
++ int ret = 0;
++ void *cmd;
++ uint32_t tmp;
++ uint32_t sequence = dev_priv->sequence[LNC_ENGINE_ENCODE];
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ PSB_DEBUG_GENERAL("TOPAZ: command submit\n");
++
++ PSB_DEBUG_GENERAL("TOPAZ: topaz busy = %d\n", topaz_priv->topaz_busy);
++
++ /* FIXME: workaround for HSD 3469585
++ * disable DRAM Self Refresh Mode
++ * by resetting DUNIT.DPMC0
++ */
++ if (IS_MRST(dev)) {
++ uint32_t ui32_reg_value = 0;
++ ui32_reg_value = MSG_READ32(0x1, 0x4);
++ MSG_WRITE32(0x1, 0x4, (ui32_reg_value & (~(0x1 << 7))));
++ }
++
++ if (topaz_priv->topaz_fw_loaded == 0) {
++ /* #.# load fw to driver */
++ PSB_DEBUG_INIT("TOPAZ: load /lib/firmware/topaz_fw.bin\n");
++ ret = topaz_init_fw(dev);
++ if (ret != 0) {
++ /* FIXME: find a proper return value */
++ DRM_ERROR("TOPAX:load /lib/firmware/topaz_fw.bin fail,"
++ "ensure udevd is configured correctly!\n");
++
++ return -EFAULT;
++ }
++ topaz_priv->topaz_fw_loaded = 1;
++ }
++
++ tmp = atomic_cmpxchg(&dev_priv->topaz_mmu_invaldc, 1, 0);
++ if (tmp == 1)
++ topaz_mmu_flushcache(dev_priv);
++
++ /* # schedule watchdog */
++ /* psb_schedule_watchdog(dev_priv); */
++
++ /* # spin lock irq save [msvdx_lock] */
++ spin_lock_irqsave(&topaz_priv->topaz_lock, irq_flags);
++
++ /* # if topaz need to reset, reset it */
++ if (topaz_priv->topaz_needs_reset) {
++ /* #.# reset it */
++ spin_unlock_irqrestore(&topaz_priv->topaz_lock, irq_flags);
++ PSB_DEBUG_GENERAL("TOPAZ: needs reset.\n");
++
++ if (lnc_topaz_reset(dev_priv)) {
++ ret = -EBUSY;
++ DRM_ERROR("TOPAZ: reset failed.\n");
++ return ret;
++ }
++
++ PSB_DEBUG_GENERAL("TOPAZ: reset ok.\n");
++
++ /* #.# upload firmware */
++ if (topaz_setup_fw(dev, topaz_priv->topaz_cur_codec)) {
++ DRM_ERROR("TOPAZ: upload FW to HW failed\n");
++ return -EBUSY;
++ }
++
++ spin_lock_irqsave(&topaz_priv->topaz_lock, irq_flags);
++ }
++
++ if (!topaz_priv->topaz_busy) {
++ /* # direct map topaz command if topaz is free */
++ PSB_DEBUG_GENERAL("TOPAZ:direct send command,sequence %08x \n",
++ sequence);
++
++ topaz_priv->topaz_busy = 1;
++ spin_unlock_irqrestore(&topaz_priv->topaz_lock, irq_flags);
++
++ ret = lnc_topaz_deliver_command(dev, cmd_buffer, cmd_offset,
++ cmd_size, NULL, sequence, 0);
++
++ if (ret) {
++ DRM_ERROR("TOPAZ: failed to extract cmd...\n");
++ return ret;
++ }
++ } else {
++ PSB_DEBUG_GENERAL("TOPAZ: queue command,sequence %08x \n",
++ sequence);
++ cmd = NULL;
++
++ spin_unlock_irqrestore(&topaz_priv->topaz_lock, irq_flags);
++
++ ret = lnc_topaz_deliver_command(dev, cmd_buffer, cmd_offset,
++ cmd_size, &cmd, sequence, 1);
++ if (cmd == NULL || ret) {
++ DRM_ERROR("TOPAZ: map command for save fialed\n");
++ return ret;
++ }
++
++ ret = lnc_topaz_save_command(dev, cmd, cmd_size, sequence);
++ if (ret)
++ DRM_ERROR("TOPAZ: save command failed\n");
++ }
++
++ return ret;
++}
++
++static int lnc_topaz_save_command(struct drm_device *dev, void *cmd,
++ unsigned long cmd_size, uint32_t sequence)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct lnc_topaz_cmd_queue *topaz_cmd;
++ unsigned long irq_flags;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ PSB_DEBUG_GENERAL("TOPAZ: queue command,sequence: %08x..\n",
++ sequence);
++
++ topaz_cmd = kzalloc(sizeof(struct lnc_topaz_cmd_queue),
++ GFP_KERNEL);
++ if (topaz_cmd == NULL) {
++ mutex_unlock(&topaz_priv->topaz_mutex);
++ DRM_ERROR("TOPAZ: out of memory....\n");
++ return -ENOMEM;
++ }
++
++ topaz_cmd->cmd = cmd;
++ topaz_cmd->cmd_size = cmd_size;
++ topaz_cmd->sequence = sequence;
++
++ spin_lock_irqsave(&topaz_priv->topaz_lock, irq_flags);
++ list_add_tail(&topaz_cmd->head, &topaz_priv->topaz_queue);
++ if (!topaz_priv->topaz_busy) {
++ /* topaz_priv->topaz_busy = 1; */
++ PSB_DEBUG_GENERAL("TOPAZ: need immediate dequeue...\n");
++ lnc_topaz_dequeue_send(dev);
++ PSB_DEBUG_GENERAL("TOPAZ: after dequeue command\n");
++ }
++
++ spin_unlock_irqrestore(&topaz_priv->topaz_lock, irq_flags);
++
++ return 0;
++}
++
++
++int lnc_cmdbuf_video(struct drm_file *priv,
++ struct list_head *validate_list,
++ uint32_t fence_type,
++ struct drm_psb_cmdbuf_arg *arg,
++ struct ttm_buffer_object *cmd_buffer,
++ struct psb_ttm_fence_rep *fence_arg)
++{
++ struct drm_device *dev = priv->minor->dev;
++ struct ttm_fence_object *fence = NULL;
++ int ret;
++
++ ret = lnc_submit_encode_cmdbuf(dev, cmd_buffer, arg->cmdbuf_offset,
++ arg->cmdbuf_size, fence);
++ if (ret)
++ return ret;
++
++ /* workaround for interrupt issue */
++ psb_fence_or_sync(priv, LNC_ENGINE_ENCODE, fence_type, arg->fence_flags,
++ validate_list, fence_arg, &fence);
++
++ if (fence)
++ ttm_fence_object_unref(&fence);
++
++ mutex_lock(&cmd_buffer->mutex);
++ if (cmd_buffer->sync_obj != NULL)
++ ttm_fence_sync_obj_unref(&cmd_buffer->sync_obj);
++ mutex_unlock(&cmd_buffer->mutex);
++
++ return 0;
++}
++
++static int lnc_topaz_sync(struct drm_device *dev, uint32_t sync_seq)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ uint32_t sync_cmd[3];
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++
++#if 0
++ struct ttm_fence_device *fdev = &dev_priv->fdev;
++ struct ttm_fence_class_manager *fc =
++ &fdev->fence_class[LNC_ENGINE_ENCODE];
++ unsigned long irq_flags;
++#endif
++#if LNC_TOPAZ_NO_IRQ
++ uint32_t *sync_p = (uint32_t *)topaz_priv->topaz_sync_addr;
++ int count = 10000;
++ uint32_t cur_seq;
++#endif
++
++ /* insert a SYNC command here */
++ topaz_priv->topaz_sync_cmd_seq = (1 << 15) |
++ topaz_priv->topaz_cmd_seq++;
++ sync_cmd[0] = 1 | (MTX_CMDID_SYNC << 1) | (3 << 8) |
++ (topaz_priv->topaz_sync_cmd_seq << 16);
++ sync_cmd[1] = topaz_priv->topaz_sync_offset;
++ sync_cmd[2] = sync_seq;
++
++ PSB_DEBUG_GENERAL("TOPAZ:MTX_CMDID_SYNC: size(3),cmd seq (0x%04x),"
++ "sync_seq (0x%08x)\n",
++ topaz_priv->topaz_sync_cmd_seq, sync_seq);
++
++ if (drm_topaz_sbuswa)
++ TOPAZ_WAIT_UNTIL_IDLE;
++
++ lnc_mtx_send(dev_priv, sync_cmd);
++
++#if LNC_TOPAZ_NO_IRQ /* workaround for interrupt issue */
++ /* # poll topaz register for certain times */
++ while (count && *sync_p != sync_seq) {
++ DRM_UDELAY(100);
++ --count;
++ }
++ if ((count == 0) && (*sync_p != sync_seq)) {
++ DRM_ERROR("TOPAZ: wait sycn timeout (0x%08x),actual 0x%08x\n",
++ sync_seq, *sync_p);
++ return -EBUSY;
++ }
++ PSB_DEBUG_GENERAL("TOPAZ: SYNC done, seq=0x%08x\n", *sync_p);
++
++ topaz_priv->topaz_busy = 0;
++
++ /* XXX: check psb_fence_handler is suitable for topaz */
++ cur_seq = *sync_p;
++#if 0
++ write_lock_irqsave(&fc->lock, irq_flags);
++ ttm_fence_handler(fdev, LNC_ENGINE_ENCODE,
++ cur_seq,
++ _PSB_FENCE_TYPE_EXE, 0);
++ write_unlock_irqrestore(&fc->lock, irq_flags);
++#endif
++#endif
++ return 0;
++}
++
++int
++lnc_topaz_deliver_command(struct drm_device *dev,
++ struct ttm_buffer_object *cmd_buffer,
++ unsigned long cmd_offset, unsigned long cmd_size,
++ void **topaz_cmd, uint32_t sequence,
++ int copy_cmd)
++{
++ unsigned long cmd_page_offset = cmd_offset & ~PAGE_MASK;
++ struct ttm_bo_kmap_obj cmd_kmap;
++ bool is_iomem;
++ int ret;
++ unsigned char *cmd_start, *tmp;
++
++ ret = ttm_bo_kmap(cmd_buffer, cmd_offset >> PAGE_SHIFT, 2,
++ &cmd_kmap);
++ if (ret) {
++ DRM_ERROR("TOPAZ: drm_bo_kmap failed: %d\n", ret);
++ return ret;
++ }
++ cmd_start = (unsigned char *) ttm_kmap_obj_virtual(&cmd_kmap,
++ &is_iomem) + cmd_page_offset;
++
++ if (copy_cmd) {
++ PSB_DEBUG_GENERAL("TOPAZ: queue commands\n");
++ tmp = kzalloc(cmd_size, GFP_KERNEL);
++ if (tmp == NULL) {
++ ret = -ENOMEM;
++ goto out;
++ }
++ memcpy(tmp, cmd_start, cmd_size);
++ *topaz_cmd = tmp;
++ } else {
++ PSB_DEBUG_GENERAL("TOPAZ: directly send the command\n");
++ ret = lnc_topaz_send(dev, cmd_start, cmd_size, sequence);
++ if (ret) {
++ DRM_ERROR("TOPAZ: commit commands failed.\n");
++ ret = -EINVAL;
++ }
++ }
++
++out:
++ PSB_DEBUG_GENERAL("TOPAZ:cmd_size(%ld), sequence(%d) copy_cmd(%d)\n",
++ cmd_size, sequence, copy_cmd);
++
++ ttm_bo_kunmap(&cmd_kmap);
++
++ return ret;
++}
++
++int
++lnc_topaz_send(struct drm_device *dev, void *cmd,
++ unsigned long cmd_size, uint32_t sync_seq)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ int ret = 0;
++ unsigned char *command = (unsigned char *) cmd;
++ struct topaz_cmd_header *cur_cmd_header;
++ uint32_t cur_cmd_size, cur_cmd_id;
++ uint32_t codec;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ PSB_DEBUG_GENERAL("TOPAZ: send the command in the buffer one by one\n");
++
++ while (cmd_size > 0) {
++ cur_cmd_header = (struct topaz_cmd_header *) command;
++ cur_cmd_size = cur_cmd_header->size * 4;
++ cur_cmd_id = cur_cmd_header->id;
++
++ switch (cur_cmd_id) {
++ case MTX_CMDID_SW_NEW_CODEC:
++ codec = *((uint32_t *) cmd + 1);
++
++ PSB_DEBUG_GENERAL("TOPAZ: setup new codec %s (%d)\n",
++ codec_to_string(codec), codec);
++ if (topaz_setup_fw(dev, codec)) {
++ DRM_ERROR("TOPAZ: upload FW to HW failed\n");
++ return -EBUSY;
++ }
++
++ topaz_priv->topaz_cur_codec = codec;
++ break;
++
++ case MTX_CMDID_SW_ENTER_LOWPOWER:
++ PSB_DEBUG_GENERAL("TOPAZ: enter lowpower.... \n");
++ PSB_DEBUG_GENERAL("XXX: implement it\n");
++ break;
++
++ case MTX_CMDID_SW_LEAVE_LOWPOWER:
++ PSB_DEBUG_GENERAL("TOPAZ: leave lowpower... \n");
++ PSB_DEBUG_GENERAL("XXX: implement it\n");
++ break;
++
++ /* ordinary commmand */
++ case MTX_CMDID_START_PIC:
++ /* XXX: specially handle START_PIC hw command */
++ CCB_CTRL_SET_QP(dev_priv,
++ *(command + cur_cmd_size - 4));
++ /* strip the QP parameter (it's software arg) */
++ cur_cmd_header->size--;
++ default:
++ cur_cmd_header->seq = 0x7fff &
++ topaz_priv->topaz_cmd_seq++;
++
++ PSB_DEBUG_GENERAL("TOPAZ: %s: size(%d),"
++ " seq (0x%04x)\n",
++ cmd_to_string(cur_cmd_id),
++ cur_cmd_size, cur_cmd_header->seq);
++
++ if (drm_topaz_sbuswa && cur_cmd_id != \
++ MTX_CMDID_START_PIC)
++ TOPAZ_WAIT_UNTIL_IDLE;
++
++ ret = lnc_mtx_send(dev_priv, command);
++ if (ret) {
++ DRM_ERROR("TOPAZ: error -- ret(%d)\n", ret);
++ goto out;
++ }
++ break;
++ }
++
++ command += cur_cmd_size;
++ cmd_size -= cur_cmd_size;
++ }
++ lnc_topaz_sync(dev, sync_seq);
++out:
++ return ret;
++}
++
++static int lnc_mtx_send(struct drm_psb_private *dev_priv, const void *cmd)
++{
++ struct topaz_cmd_header *cur_cmd_header =
++ (struct topaz_cmd_header *) cmd;
++ uint32_t cmd_size = cur_cmd_header->size;
++ uint32_t read_index, write_index;
++ const uint32_t *cmd_pointer = (uint32_t *) cmd;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ int ret = 0;
++
++ /* <msvdx does> # enable all clock */
++
++ write_index = topaz_priv->topaz_cmd_windex;
++ if (write_index + cmd_size + 1 > topaz_priv->topaz_ccb_size) {
++ int free_space = topaz_priv->topaz_ccb_size - write_index;
++
++ PSB_DEBUG_GENERAL("TOPAZ: -------will wrap CCB write point.\n");
++ if (free_space > 0) {
++ struct topaz_cmd_header pad_cmd;
++
++ pad_cmd.id = MTX_CMDID_NULL;
++ pad_cmd.size = free_space;
++ pad_cmd.seq = 0x7fff & topaz_priv->topaz_cmd_seq;
++
++ PSB_DEBUG_GENERAL("TOPAZ: MTX_CMDID_NULL:"
++ " size(%d),seq (0x%04x)\n",
++ pad_cmd.size, pad_cmd.seq);
++
++#ifndef TOPAZ_RM_MULTI_MTX_WRITE
++ TOPAZ_BEGIN_CCB(dev_priv);
++ TOPAZ_OUT_CCB(dev_priv, pad_cmd.val);
++#else
++ topaz_write_mtx_mem(dev_priv,
++ topaz_priv->topaz_ccb_buffer_addr
++ + topaz_priv->topaz_cmd_windex * 4,
++ pad_cmd.val);
++ topaz_priv->topaz_cmd_windex++;
++#endif
++ TOPAZ_END_CCB(dev_priv, 1);
++
++ POLL_WB_SEQ(dev_priv, pad_cmd.seq);
++ ++topaz_priv->topaz_cmd_seq;
++ }
++ POLL_WB_RINDEX(dev_priv, 0);
++ if (ret == 0)
++ topaz_priv->topaz_cmd_windex = 0;
++ else {
++ DRM_ERROR("TOPAZ: poll rindex timeout\n");
++ return ret; /* HW may hang, need reset */
++ }
++ PSB_DEBUG_GENERAL("TOPAZ: -------wrap CCB was done.\n");
++ }
++
++ read_index = CCB_CTRL_RINDEX(dev_priv);/* temperily use CCB CTRL */
++ write_index = topaz_priv->topaz_cmd_windex;
++
++ PSB_DEBUG_GENERAL("TOPAZ: write index(%d), read index(%d,WB=%d)\n",
++ write_index, read_index, WB_CCB_CTRL_RINDEX(dev_priv));
++
++#ifndef TOPAZ_RM_MULTI_MTX_WRITE
++ TOPAZ_BEGIN_CCB(dev_priv);
++ while (cmd_size > 0) {
++ TOPAZ_OUT_CCB(dev_priv, *cmd_pointer++);
++ --cmd_size;
++ }
++#else
++ while (cmd_size > 0) {
++ topaz_write_mtx_mem(
++ dev_priv,
++ topaz_priv->topaz_ccb_buffer_addr
++ + topaz_priv->topaz_cmd_windex * 4,
++ *cmd_pointer++);
++ topaz_priv->topaz_cmd_windex++;
++ --cmd_size;
++ }
++#endif
++ TOPAZ_END_CCB(dev_priv, 1);
++
++#if 0
++ DRM_UDELAY(1000);
++ lnc_topaz_clearirq(dev,
++ lnc_topaz_queryirq(dev));
++ LNC_TRACEL("TOPAZ: after clear, query again\n");
++ lnc_topaz_queryirq(dev_priv);
++#endif
++
++ return ret;
++}
++
++int lnc_topaz_dequeue_send(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct lnc_topaz_cmd_queue *topaz_cmd = NULL;
++ int ret;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ PSB_DEBUG_GENERAL("TOPAZ: dequeue command and send it to topaz\n");
++
++ if (list_empty(&topaz_priv->topaz_queue)) {
++ topaz_priv->topaz_busy = 0;
++ return 0;
++ }
++
++ topaz_cmd = list_first_entry(&topaz_priv->topaz_queue,
++ struct lnc_topaz_cmd_queue, head);
++
++ PSB_DEBUG_GENERAL("TOPAZ: queue has id %08x\n", topaz_cmd->sequence);
++ ret = lnc_topaz_send(dev, topaz_cmd->cmd, topaz_cmd->cmd_size,
++ topaz_cmd->sequence);
++ if (ret) {
++ DRM_ERROR("TOPAZ: lnc_topaz_send failed.\n");
++ ret = -EINVAL;
++ }
++
++ list_del(&topaz_cmd->head);
++ kfree(topaz_cmd->cmd);
++ kfree(topaz_cmd
++ );
++
++ return ret;
++}
++
++void topaz_mtx_kick(struct drm_psb_private *dev_priv, uint32_t kick_count)
++{
++ PSB_DEBUG_GENERAL("TOPAZ: kick mtx count(%d).\n", kick_count);
++ MTX_WRITE32(MTX_CR_MTX_KICK, kick_count);
++}
++
++int lnc_check_topaz_idle(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ if (topaz_priv->topaz_fw_loaded == 0)
++ return 0;
++
++ if (topaz_priv->topaz_busy)
++ return -EBUSY;
++
++ if (topaz_priv->topaz_hw_busy) {
++ PSB_DEBUG_PM("TOPAZ: %s, HW is busy\n", __func__);
++ return -EBUSY;
++ }
++
++ return 0; /* we think it is idle */
++}
++
++int lnc_video_frameskip(struct drm_device *dev, uint64_t user_pointer)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++ int ret;
++
++ ret = copy_to_user((void __user *) ((unsigned long)user_pointer),
++ &topaz_priv->frame_skip, sizeof(topaz_priv->frame_skip));
++
++ if (ret)
++ return -EFAULT;
++
++ return 0;
++}
++
++static void lnc_topaz_flush_cmd_queue(struct topaz_private *topaz_priv)
++{
++ struct lnc_topaz_cmd_queue *entry, *next;
++
++ /* remind to reset topaz */
++ topaz_priv->topaz_needs_reset = 1;
++
++ if (list_empty(&topaz_priv->topaz_queue)) {
++ topaz_priv->topaz_busy = 0;
++ return;
++ }
++
++ /* flush all command in queue */
++ list_for_each_entry_safe(entry, next,
++ &topaz_priv->topaz_queue,
++ head) {
++ list_del(&entry->head);
++ kfree(entry->cmd);
++ kfree(entry);
++ }
++
++ return;
++}
++
++void lnc_topaz_handle_timeout(struct ttm_fence_device *fdev)
++{
++ struct drm_psb_private *dev_priv =
++ container_of(fdev, struct drm_psb_private, fdev);
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ lnc_topaz_flush_cmd_queue(topaz_priv);
++}
++
++inline int psb_try_power_down_topaz(struct drm_device *dev)
++{
++ ospm_apm_power_down_topaz(dev);
++ return 0;
++}
++
++void lnc_map_topaz_reg(struct drm_device *dev)
++{
++ unsigned long resource_start;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++
++ resource_start = pci_resource_start(dev->pdev, PSB_MMIO_RESOURCE);
++
++ if (IS_TOPAZ(dev)) {
++ dev_priv->topaz_reg =
++ ioremap(resource_start + LNC_TOPAZ_OFFSET,
++ LNC_TOPAZ_SIZE);
++ if (!dev_priv->topaz_reg)
++ DRM_ERROR("failed to map TOPAZ register address\n");
++ }
++
++ return;
++}
++
++void lnc_unmap_topaz_reg(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++
++ if (IS_TOPAZ(dev)) {
++ if (dev_priv->topaz_reg) {
++ iounmap(dev_priv->topaz_reg);
++ dev_priv->topaz_reg = NULL;
++ }
++ }
++
++ return;
++}
++
++
++void lnc_topaz_enableirq(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ /* uint32_t ier = dev_priv->vdc_irq_mask | _LNC_IRQ_TOPAZ_FLAG; */
++
++ PSB_DEBUG_IRQ("TOPAZ: enable IRQ\n");
++
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTENAB,
++ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_MAS_INTEN) |
++ /* F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTEN_MVEA) | */
++ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTEN_MMU_FAULT) |
++ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTEN_MTX) |
++ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTEN_MTX_HALT));
++
++ /* write in sysirq.c */
++ /* PSB_WVDC32(ier, PSB_INT_ENABLE_R); /\* essential *\/ */
++}
++
++void lnc_topaz_disableirq(struct drm_device *dev)
++{
++
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ /* uint32_t ier = dev_priv->vdc_irq_mask & (~_LNC_IRQ_TOPAZ_FLAG); */
++
++ PSB_DEBUG_INIT("TOPAZ: disable IRQ\n");
++
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTENAB, 0);
++
++ /* write in sysirq.c */
++ /* PSB_WVDC32(ier, PSB_INT_ENABLE_R); /\* essential *\/ */
++}
++
+--- /dev/null
++++ b/drivers/staging/mrst/drv/lnc_topaz.h
+@@ -0,0 +1,154 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
++ * Copyright (c) Imagination Technologies Limited, UK
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++#ifndef _LNC_TOPAZ_H_
++#define _LNC_TOPAZ_H_
++
++#include "psb_drv.h"
++#include "img_types.h"
++
++#define LNC_TOPAZ_NO_IRQ 0
++#define TOPAZ_MTX_REG_SIZE (34 * 4 + 183 * 4)
++/*Must be euqal to IMG_CODEC_NUM */
++#define LNC_IMG_CODEC_NUM_MAX (10)
++
++extern int drm_topaz_pmpolicy;
++
++
++/* XXX: it's a copy of msvdx cmd queue. should have some change? */
++struct lnc_topaz_cmd_queue {
++ struct list_head head;
++ void *cmd;
++ unsigned long cmd_size;
++ uint32_t sequence;
++};
++
++
++/* define structure */
++/* firmware file's info head */
++struct topaz_fwinfo {
++ unsigned int ver:16;
++ unsigned int codec:16;
++
++ unsigned int text_size;
++ unsigned int data_size;
++ unsigned int data_location;
++};
++
++/* firmware data array define */
++struct topaz_codec_fw {
++ uint32_t ver;
++ uint32_t codec;
++
++ uint32_t text_size;
++ uint32_t data_size;
++ uint32_t data_location;
++
++ struct ttm_buffer_object *text;
++ struct ttm_buffer_object *data;
++};
++
++struct topaz_private {
++ /* current video task */
++ unsigned int pmstate;
++ struct sysfs_dirent *sysfs_pmstate;
++ int frame_skip;
++
++ void *topaz_mtx_reg_state;
++ struct ttm_buffer_object *topaz_mtx_data_mem;
++ uint32_t topaz_cur_codec;
++ uint32_t cur_mtx_data_size;
++ int topaz_needs_reset;
++
++ /*
++ *topaz command queue
++ */
++ spinlock_t topaz_lock;
++ struct mutex topaz_mutex;
++ struct list_head topaz_queue;
++ int topaz_busy; /* 0 means topaz is free */
++ int topaz_fw_loaded;
++
++ /* topaz ccb data */
++ /* XXX: should the addr stored by 32 bits? more compatible way?? */
++ uint32_t topaz_ccb_buffer_addr;
++ uint32_t topaz_ccb_ctrl_addr;
++ uint32_t topaz_ccb_size;
++ uint32_t topaz_cmd_windex;
++ uint16_t topaz_cmd_seq;
++
++ uint32_t stored_initial_qp;
++ uint32_t topaz_dash_access_ctrl;
++
++ struct ttm_buffer_object *topaz_bo; /* 4K->2K/2K for writeback/sync */
++ struct ttm_bo_kmap_obj topaz_bo_kmap;
++ void *topaz_ccb_wb;
++ uint32_t topaz_wb_offset;
++ uint32_t *topaz_sync_addr;
++ uint32_t topaz_sync_offset;
++ uint32_t topaz_sync_cmd_seq;
++ uint32_t topaz_mtx_saved;
++
++ /* firmware */
++ struct topaz_codec_fw topaz_fw[LNC_IMG_CODEC_NUM_MAX];
++
++ uint32_t topaz_hw_busy;
++};
++
++/* external function declare */
++/*lnc_topaz.c*/
++extern int lnc_cmdbuf_video(struct drm_file *priv,
++ struct list_head *validate_list,
++ uint32_t fence_type,
++ struct drm_psb_cmdbuf_arg *arg,
++ struct ttm_buffer_object *cmd_buffer,
++ struct psb_ttm_fence_rep *fence_arg);
++
++extern IMG_BOOL lnc_topaz_interrupt(IMG_VOID *pvData);
++
++/* lnc_topazinit.c*/
++extern int lnc_wait_topaz_idle(struct drm_device *dev);
++extern int lnc_check_topaz_idle(struct drm_device *dev);
++extern void lnc_unmap_topaz_reg(struct drm_device *dev);
++extern void lnc_map_topaz_reg(struct drm_device *dev);
++
++extern int lnc_topaz_restore_mtx_state(struct drm_device *dev);
++
++extern int lnc_topaz_init(struct drm_device *dev);
++extern int lnc_topaz_uninit(struct drm_device *dev);
++
++extern void lnc_topaz_handle_timeout(struct ttm_fence_device *fdev);
++
++extern void lnc_topaz_enableirq(struct drm_device *dev);
++extern void lnc_topaz_disableirq(struct drm_device *dev);
++
++extern int lnc_topaz_save_mtx_state(struct drm_device *dev);
++
++#define TOPAZ_NEW_PMSTATE(drm_dev, topaz_priv, new_state) \
++do { \
++ topaz_priv->pmstate = new_state; \
++ sysfs_notify_dirent(topaz_priv->sysfs_pmstate); \
++ PSB_DEBUG_PM("TOPAZ: %s\n", \
++ (new_state == PSB_PMSTATE_POWERUP) ? "powerup" \
++ : ((new_state == PSB_PMSTATE_POWERDOWN) ? "powerdown" \
++ : "clockgated")); \
++} while (0)
++
++#endif /* _LNC_TOPAZ_H_ */
+--- /dev/null
++++ b/drivers/staging/mrst/drv/lnc_topaz_hw_reg.h
+@@ -0,0 +1,787 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
++ * Copyright (c) Imagination Technologies Limited, UK
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++#ifndef _LNC_TOPAZ_HW_REG_H_
++#define _LNC_TOPAZ_HW_REG_H_
++
++#ifdef _PNW_TOPAZ_HW_REG_H_
++#error "pnw_topaz_hw_reg.h shouldn't be included"
++#endif
++
++#include "psb_drv.h"
++#include "img_types.h"
++#include "lnc_topaz.h"
++
++#define LNC_TOPAZ_NO_IRQ 0
++#define TOPAZ_MTX_REG_SIZE (34 * 4 + 183 * 4)
++
++extern int drm_topaz_pmpolicy;
++
++/*
++ * MACROS to insert values into fields within a word. The basename of the
++ * field must have MASK_BASENAME and SHIFT_BASENAME constants.
++ */
++#define MM_WRITE32(base, offset, value) \
++do { \
++ *((unsigned long *)((unsigned char *)(dev_priv->topaz_reg) \
++ + base + offset)) = value; \
++} while (0)
++
++#define MM_READ32(base, offset, pointer) \
++do { \
++ *(pointer) = *((unsigned long *)((unsigned char *)(dev_priv->topaz_reg)\
++ + base + offset)); \
++} while (0)
++
++#define F_MASK(basename) (MASK_##basename)
++#define F_SHIFT(basename) (SHIFT_##basename)
++
++#define F_ENCODE(val, basename) \
++ (((val) << (F_SHIFT(basename))) & (F_MASK(basename)))
++
++/* MVEA macro */
++#define MVEA_START 0x03000
++
++#define MVEA_WRITE32(offset, value) MM_WRITE32(MVEA_START, offset, value)
++#define MVEA_READ32(offset, pointer) MM_READ32(MVEA_START, offset, pointer);
++
++#define F_MASK_MVEA(basename) (MASK_MVEA_##basename) /* MVEA */
++#define F_SHIFT_MVEA(basename) (SHIFT_MVEA_##basename) /* MVEA */
++#define F_ENCODE_MVEA(val, basename) \
++ (((val)<<(F_SHIFT_MVEA(basename)))&(F_MASK_MVEA(basename)))
++
++/* VLC macro */
++#define TOPAZ_VLC_START 0x05000
++
++/* TOPAZ macro */
++#define TOPAZ_START 0x02000
++
++#define TOPAZ_WRITE32(offset, value) MM_WRITE32(TOPAZ_START, offset, value)
++#define TOPAZ_READ32(offset, pointer) MM_READ32(TOPAZ_START, offset, pointer)
++
++#define F_MASK_TOPAZ(basename) (MASK_TOPAZ_##basename)
++#define F_SHIFT_TOPAZ(basename) (SHIFT_TOPAZ_##basename)
++#define F_ENCODE_TOPAZ(val, basename) \
++ (((val)<<(F_SHIFT_TOPAZ(basename)))&(F_MASK_TOPAZ(basename)))
++
++/* MTX macro */
++#define MTX_START 0x0
++
++#define MTX_WRITE32(offset, value) MM_WRITE32(MTX_START, offset, value)
++#define MTX_READ32(offset, pointer) MM_READ32(MTX_START, offset, pointer)
++
++/* DMAC macro */
++#define DMAC_START 0x0f000
++
++#define DMAC_WRITE32(offset, value) MM_WRITE32(DMAC_START, offset, value)
++#define DMAC_READ32(offset, pointer) MM_READ32(DMAC_START, offset, pointer)
++
++#define F_MASK_DMAC(basename) (MASK_DMAC_##basename)
++#define F_SHIFT_DMAC(basename) (SHIFT_DMAC_##basename)
++#define F_ENCODE_DMAC(val, basename) \
++ (((val)<<(F_SHIFT_DMAC(basename)))&(F_MASK_DMAC(basename)))
++
++
++/* Register CR_IMG_TOPAZ_INTENAB */
++#define TOPAZ_CR_IMG_TOPAZ_INTENAB 0x0008
++#define MASK_TOPAZ_CR_IMG_TOPAZ_INTEN_MVEA 0x00000001
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTEN_MVEA 0
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTEN_MVEA 0x0008
++
++#define MASK_TOPAZ_CR_IMG_TOPAZ_MAS_INTEN 0x80000000
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_MAS_INTEN 31
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_MAS_INTEN 0x0008
++
++#define MASK_TOPAZ_CR_IMG_TOPAZ_INTEN_MMU_FAULT 0x00000008
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTEN_MMU_FAULT 3
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTEN_MMU_FAULT 0x0008
++
++#define MASK_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX 0x00000002
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX 1
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX 0x0008
++
++#define MASK_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX_HALT 0x00000004
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX_HALT 2
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX_HALT 0x0008
++
++#define TOPAZ_CR_IMG_TOPAZ_INTCLEAR 0x000C
++#define MASK_TOPAZ_CR_IMG_TOPAZ_INTCLR_MVEA 0x00000001
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTCLR_MVEA 0
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTCLR_MVEA 0x000C
++
++#define TOPAZ_CR_IMG_TOPAZ_INTSTAT 0x0004
++#define MASK_TOPAZ_CR_IMG_TOPAZ_INTS_MVEA 0x00000001
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTS_MVEA 0
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTS_MVEA 0x0004
++
++#define MTX_CCBCTRL_ROFF 0
++#define MTX_CCBCTRL_COMPLETE 4
++#define MTX_CCBCTRL_CCBSIZE 8
++#define MTX_CCBCTRL_QP 12
++#define MTX_CCBCTRL_FRAMESKIP 20
++#define MTX_CCBCTRL_INITQP 24
++
++#define TOPAZ_CR_MMU_STATUS 0x001C
++#define MASK_TOPAZ_CR_MMU_PF_N_RW 0x00000001
++#define SHIFT_TOPAZ_CR_MMU_PF_N_RW 0
++#define REGNUM_TOPAZ_CR_MMU_PF_N_RW 0x001C
++
++#define MASK_TOPAZ_CR_IMG_TOPAZ_INTCLR_MMU_FAULT 0x00000008
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTCLR_MMU_FAULT 3
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTCLR_MMU_FAULT 0x000C
++
++#define TOPAZ_CR_MMU_MEM_REQ 0x0020
++#define MASK_TOPAZ_CR_MEM_REQ_STAT_READS 0x000000FF
++#define SHIFT_TOPAZ_CR_MEM_REQ_STAT_READS 0
++#define REGNUM_TOPAZ_CR_MEM_REQ_STAT_READS 0x0020
++
++#define MASK_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX 0x00000002
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX 1
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX 0x000C
++
++#define MASK_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX_HALT 0x00000004
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX_HALT 2
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX_HALT 0x000C
++
++#define MTX_CR_MTX_KICK 0x0080
++#define MASK_MTX_MTX_KICK 0x0000FFFF
++#define SHIFT_MTX_MTX_KICK 0
++#define REGNUM_MTX_MTX_KICK 0x0080
++
++#define MTX_DATA_MEM_BASE 0x82880000
++
++#define MTX_CR_MTX_RAM_ACCESS_CONTROL 0x0108
++#define MASK_MTX_MTX_MCMR 0x00000001
++#define SHIFT_MTX_MTX_MCMR 0
++#define REGNUM_MTX_MTX_MCMR 0x0108
++
++#define MASK_MTX_MTX_MCMID 0x0FF00000
++#define SHIFT_MTX_MTX_MCMID 20
++#define REGNUM_MTX_MTX_MCMID 0x0108
++
++#define MASK_MTX_MTX_MCM_ADDR 0x000FFFFC
++#define SHIFT_MTX_MTX_MCM_ADDR 2
++#define REGNUM_MTX_MTX_MCM_ADDR 0x0108
++
++#define MTX_CR_MTX_RAM_ACCESS_STATUS 0x010C
++#define MASK_MTX_MTX_MTX_MCM_STAT 0x00000001
++#define SHIFT_MTX_MTX_MTX_MCM_STAT 0
++#define REGNUM_MTX_MTX_MTX_MCM_STAT 0x010C
++
++#define MASK_MTX_MTX_MCMAI 0x00000002
++#define SHIFT_MTX_MTX_MCMAI 1
++#define REGNUM_MTX_MTX_MCMAI 0x0108
++
++#define MTX_CR_MTX_RAM_ACCESS_DATA_TRANSFER 0x0104
++
++#define MVEA_CR_MVEA_BUSY 0x0018
++#define MVEA_CR_MVEA_DMACMDFIFO_WAIT 0x001C
++#define MVEA_CR_MVEA_DMACMDFIFO_STATUS 0x0020
++
++#define MVEA_CR_IMG_MVEA_SRST 0x0000
++#define MASK_MVEA_CR_IMG_MVEA_SPE_SOFT_RESET 0x00000001
++#define SHIFT_MVEA_CR_IMG_MVEA_SPE_SOFT_RESET 0
++#define REGNUM_MVEA_CR_IMG_MVEA_SPE_SOFT_RESET 0x0000
++
++#define MASK_MVEA_CR_IMG_MVEA_IPE_SOFT_RESET 0x00000002
++#define SHIFT_MVEA_CR_IMG_MVEA_IPE_SOFT_RESET 1
++#define REGNUM_MVEA_CR_IMG_MVEA_IPE_SOFT_RESET 0x0000
++
++#define MASK_MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET 0x00000004
++#define SHIFT_MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET 2
++#define REGNUM_MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET 0x0000
++
++#define MASK_MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET 0x00000008
++#define SHIFT_MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET 3
++#define REGNUM_MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET 0x0000
++
++#define MASK_MVEA_CR_IMG_MVEA_CMC_SOFT_RESET 0x00000010
++#define SHIFT_MVEA_CR_IMG_MVEA_CMC_SOFT_RESET 4
++#define REGNUM_MVEA_CR_IMG_MVEA_CMC_SOFT_RESET 0x0000
++
++#define MASK_MVEA_CR_IMG_MVEA_DCF_SOFT_RESET 0x00000020
++#define SHIFT_MVEA_CR_IMG_MVEA_DCF_SOFT_RESET 5
++#define REGNUM_MVEA_CR_IMG_MVEA_DCF_SOFT_RESET 0x0000
++
++#define TOPAZ_CR_IMG_TOPAZ_CORE_ID 0x03C0
++#define TOPAZ_CR_IMG_TOPAZ_CORE_REV 0x03D0
++
++#define TOPAZ_MTX_PC (0x00000005)
++#define PC_START_ADDRESS (0x80900000)
++
++#define TOPAZ_CR_TOPAZ_AUTO_CLK_GATE 0x0014
++#define MASK_TOPAZ_CR_TOPAZ_VLC_AUTO_CLK_GATE 0x00000001
++#define SHIFT_TOPAZ_CR_TOPAZ_VLC_AUTO_CLK_GATE 0
++#define REGNUM_TOPAZ_CR_TOPAZ_VLC_AUTO_CLK_GATE 0x0014
++
++#define MASK_TOPAZ_CR_TOPAZ_DB_AUTO_CLK_GATE 0x00000002
++#define SHIFT_TOPAZ_CR_TOPAZ_DB_AUTO_CLK_GATE 1
++#define REGNUM_TOPAZ_CR_TOPAZ_DB_AUTO_CLK_GATE 0x0014
++
++#define MASK_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE 0x00000002
++#define SHIFT_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE 1
++#define REGNUM_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE 0x0010
++
++#define MTX_CORE_CR_MTX_REGISTER_READ_WRITE_DATA_OFFSET 0x000000F8
++#define MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET 0x000000FC
++#define MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_MASK 0x00010000
++#define MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK 0x80000000
++
++#define TOPAZ_CORE_CR_MTX_DEBUG_OFFSET 0x0000003C
++
++#define MASK_TOPAZ_CR_MTX_DBG_IS_SLAVE 0x00000004
++#define SHIFT_TOPAZ_CR_MTX_DBG_IS_SLAVE 2
++#define REGNUM_TOPAZ_CR_MTX_DBG_IS_SLAVE 0x003C
++
++#define MASK_TOPAZ_CR_MTX_DBG_GPIO_OUT 0x00000018
++#define SHIFT_TOPAZ_CR_MTX_DBG_GPIO_OUT 3
++#define REGNUM_TOPAZ_CR_MTX_DBG_GPIO_OUT 0x003C
++
++#define MTX_CORE_CR_MTX_RAM_ACCESS_CONTROL_OFFSET 0x00000108
++
++#define TOPAZ_CR_MMU_CONTROL0 0x0024
++#define MASK_TOPAZ_CR_MMU_BYPASS 0x00000800
++#define SHIFT_TOPAZ_CR_MMU_BYPASS 11
++#define REGNUM_TOPAZ_CR_MMU_BYPASS 0x0024
++
++#define TOPAZ_CR_MMU_DIR_LIST_BASE(X) (0x0030 + (4 * (X)))
++#define MASK_TOPAZ_CR_MMU_DIR_LIST_BASE_ADDR 0xFFFFF000
++#define SHIFT_TOPAZ_CR_MMU_DIR_LIST_BASE_ADDR 12
++#define REGNUM_TOPAZ_CR_MMU_DIR_LIST_BASE_ADDR 0x0030
++
++#define MASK_TOPAZ_CR_MMU_INVALDC 0x00000008
++#define SHIFT_TOPAZ_CR_MMU_INVALDC 3
++#define REGNUM_TOPAZ_CR_MMU_INVALDC 0x0024
++
++#define MASK_TOPAZ_CR_MMU_FLUSH 0x00000004
++#define SHIFT_TOPAZ_CR_MMU_FLUSH 2
++#define REGNUM_TOPAZ_CR_MMU_FLUSH 0x0024
++
++#define TOPAZ_CR_MMU_BANK_INDEX 0x0038
++#define MASK_TOPAZ_CR_MMU_BANK_N_INDEX_M(i) (0x00000003 << (8 + ((i) * 2)))
++#define SHIFT_TOPAZ_CR_MMU_BANK_N_INDEX_M(i) (8 + ((i) * 2))
++#define REGNUM_TOPAZ_CR_MMU_BANK_N_INDEX_M(i) 0x0038
++
++#define TOPAZ_CR_TOPAZ_MAN_CLK_GATE 0x0010
++#define MASK_TOPAZ_CR_TOPAZ_MVEA_MAN_CLK_GATE 0x00000001
++#define SHIFT_TOPAZ_CR_TOPAZ_MVEA_MAN_CLK_GATE 0
++#define REGNUM_TOPAZ_CR_TOPAZ_MVEA_MAN_CLK_GATE 0x0010
++
++#define MTX_CORE_CR_MTX_TXRPT_OFFSET 0x0000000c
++#define TXRPT_WAITONKICK_VALUE 0x8ade0000
++
++#define MTX_CORE_CR_MTX_ENABLE_MTX_TOFF_MASK 0x00000002
++
++#define MTX_CORE_CR_MTX_ENABLE_OFFSET 0x00000000
++#define MTX_CORE_CR_MTX_ENABLE_MTX_ENABLE_MASK 0x00000001
++
++#define MASK_TOPAZ_CR_IMG_TOPAZ_INTS_MTX 0x00000002
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTS_MTX 1
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTS_MTX 0x0004
++
++#define MTX_CORE_CR_MTX_SOFT_RESET_OFFSET 0x00000200
++#define MTX_CORE_CR_MTX_SOFT_RESET_MTX_RESET_MASK 0x00000001
++
++#define MTX_CR_MTX_SYSC_CDMAA 0x0344
++#define MASK_MTX_CDMAA_ADDRESS 0x03FFFFFC
++#define SHIFT_MTX_CDMAA_ADDRESS 2
++#define REGNUM_MTX_CDMAA_ADDRESS 0x0344
++
++#define MTX_CR_MTX_SYSC_CDMAC 0x0340
++#define MASK_MTX_LENGTH 0x0000FFFF
++#define SHIFT_MTX_LENGTH 0
++#define REGNUM_MTX_LENGTH 0x0340
++
++#define MASK_MTX_BURSTSIZE 0x07000000
++#define SHIFT_MTX_BURSTSIZE 24
++#define REGNUM_MTX_BURSTSIZE 0x0340
++
++#define MASK_MTX_RNW 0x00020000
++#define SHIFT_MTX_RNW 17
++#define REGNUM_MTX_RNW 0x0340
++
++#define MASK_MTX_ENABLE 0x00010000
++#define SHIFT_MTX_ENABLE 16
++#define REGNUM_MTX_ENABLE 0x0340
++
++#define MASK_MTX_LENGTH 0x0000FFFF
++#define SHIFT_MTX_LENGTH 0
++#define REGNUM_MTX_LENGTH 0x0340
++
++#define TOPAZ_CR_IMG_TOPAZ_SRST 0x0000
++#define MASK_TOPAZ_CR_IMG_TOPAZ_MVEA_SOFT_RESET 0x00000001
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_MVEA_SOFT_RESET 0
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_MVEA_SOFT_RESET 0x0000
++
++#define MASK_TOPAZ_CR_IMG_TOPAZ_VLC_SOFT_RESET 0x00000008
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_VLC_SOFT_RESET 3
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_VLC_SOFT_RESET 0x0000
++
++#define MASK_TOPAZ_CR_IMG_TOPAZ_MTX_SOFT_RESET 0x00000002
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_MTX_SOFT_RESET 1
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_MTX_SOFT_RESET 0x0000
++
++#define MVEA_CR_MVEA_AUTO_CLOCK_GATING 0x0024
++#define MASK_MVEA_CR_MVEA_SPE_AUTO_CLK_GATE 0x00000001
++#define SHIFT_MVEA_CR_MVEA_SPE_AUTO_CLK_GATE 0
++#define REGNUM_MVEA_CR_MVEA_SPE_AUTO_CLK_GATE 0x0024
++
++#define MASK_MVEA_CR_MVEA_IPE_AUTO_CLK_GATE 0x00000002
++#define SHIFT_MVEA_CR_MVEA_IPE_AUTO_CLK_GATE 1
++#define REGNUM_MVEA_CR_MVEA_IPE_AUTO_CLK_GATE 0x0024
++
++#define MASK_MVEA_CR_MVEA_CMPRS_AUTO_CLK_GATE 0x00000004
++#define SHIFT_MVEA_CR_MVEA_CMPRS_AUTO_CLK_GATE 2
++#define REGNUM_MVEA_CR_MVEA_CMPRS_AUTO_CLK_GATE 0x0024
++
++#define MASK_MVEA_CR_MVEA_JMCOMP_AUTO_CLK_GATE 0x00000008
++#define SHIFT_MVEA_CR_MVEA_JMCOMP_AUTO_CLK_GATE 3
++#define REGNUM_MVEA_CR_MVEA_JMCOMP_AUTO_CLK_GATE 0x0024
++
++#define TOPAZ_CR_IMG_TOPAZ_DMAC_MODE 0x0040
++#define MASK_TOPAZ_CR_DMAC_MASTER_MODE 0x00000001
++#define SHIFT_TOPAZ_CR_DMAC_MASTER_MODE 0
++#define REGNUM_TOPAZ_CR_DMAC_MASTER_MODE 0x0040
++
++#define MTX_CR_MTX_SYSC_CDMAT 0x0350
++#define MASK_MTX_TRANSFERDATA 0xFFFFFFFF
++#define SHIFT_MTX_TRANSFERDATA 0
++#define REGNUM_MTX_TRANSFERDATA 0x0350
++
++#define IMG_SOC_DMAC_IRQ_STAT(X) (0x000C + (32 * (X)))
++#define MASK_IMG_SOC_TRANSFER_FIN 0x00020000
++#define SHIFT_IMG_SOC_TRANSFER_FIN 17
++#define REGNUM_IMG_SOC_TRANSFER_FIN 0x000C
++
++#define IMG_SOC_DMAC_COUNT(X) (0x0004 + (32 * (X)))
++#define MASK_IMG_SOC_CNT 0x0000FFFF
++#define SHIFT_IMG_SOC_CNT 0
++#define REGNUM_IMG_SOC_CNT 0x0004
++
++#define MASK_IMG_SOC_EN 0x00010000
++#define SHIFT_IMG_SOC_EN 16
++#define REGNUM_IMG_SOC_EN 0x0004
++
++#define MASK_IMG_SOC_LIST_EN 0x00040000
++#define SHIFT_IMG_SOC_LIST_EN 18
++#define REGNUM_IMG_SOC_LIST_EN 0x0004
++
++#define IMG_SOC_DMAC_PER_HOLD(X) (0x0018 + (32 * (X)))
++#define MASK_IMG_SOC_PER_HOLD 0x0000007F
++#define SHIFT_IMG_SOC_PER_HOLD 0
++#define REGNUM_IMG_SOC_PER_HOLD 0x0018
++
++#define IMG_SOC_DMAC_SETUP(X) (0x0000 + (32 * (X)))
++#define MASK_IMG_SOC_START_ADDRESS 0xFFFFFFF
++#define SHIFT_IMG_SOC_START_ADDRESS 0
++#define REGNUM_IMG_SOC_START_ADDRESS 0x0000
++
++#define MASK_IMG_SOC_BSWAP 0x40000000
++#define SHIFT_IMG_SOC_BSWAP 30
++#define REGNUM_IMG_SOC_BSWAP 0x0004
++
++#define MASK_IMG_SOC_PW 0x18000000
++#define SHIFT_IMG_SOC_PW 27
++#define REGNUM_IMG_SOC_PW 0x0004
++
++#define MASK_IMG_SOC_DIR 0x04000000
++#define SHIFT_IMG_SOC_DIR 26
++#define REGNUM_IMG_SOC_DIR 0x0004
++
++#define MASK_IMG_SOC_PI 0x03000000
++#define SHIFT_IMG_SOC_PI 24
++#define REGNUM_IMG_SOC_PI 0x0004
++#define IMG_SOC_PI_1 0x00000002
++#define IMG_SOC_PI_2 0x00000001
++#define IMG_SOC_PI_4 0x00000000
++
++#define MASK_IMG_SOC_TRANSFER_IEN 0x20000000
++#define SHIFT_IMG_SOC_TRANSFER_IEN 29
++#define REGNUM_IMG_SOC_TRANSFER_IEN 0x0004
++
++#define DMAC_VALUE_COUNT(BSWAP, PW, DIR, PERIPH_INCR, COUNT) \
++ ((((BSWAP) << SHIFT_IMG_SOC_BSWAP) & MASK_IMG_SOC_BSWAP)| \
++ (((PW) << SHIFT_IMG_SOC_PW) & MASK_IMG_SOC_PW)| \
++ (((DIR) << SHIFT_IMG_SOC_DIR) & MASK_IMG_SOC_DIR)| \
++ (((PERIPH_INCR) << SHIFT_IMG_SOC_PI) & MASK_IMG_SOC_PI)| \
++ (((COUNT) << SHIFT_IMG_SOC_CNT) & MASK_IMG_SOC_CNT))
++
++#define IMG_SOC_DMAC_PERIPH(X) (0x0008 + (32 * (X)))
++#define MASK_IMG_SOC_EXT_SA 0x0000000F
++#define SHIFT_IMG_SOC_EXT_SA 0
++#define REGNUM_IMG_SOC_EXT_SA 0x0008
++
++#define MASK_IMG_SOC_ACC_DEL 0xE0000000
++#define SHIFT_IMG_SOC_ACC_DEL 29
++#define REGNUM_IMG_SOC_ACC_DEL 0x0008
++
++#define MASK_IMG_SOC_INCR 0x08000000
++#define SHIFT_IMG_SOC_INCR 27
++#define REGNUM_IMG_SOC_INCR 0x0008
++
++#define MASK_IMG_SOC_BURST 0x07000000
++#define SHIFT_IMG_SOC_BURST 24
++#define REGNUM_IMG_SOC_BURST 0x0008
++
++#define DMAC_VALUE_PERIPH_PARAM(ACC_DEL, INCR, BURST) \
++((((ACC_DEL) << SHIFT_IMG_SOC_ACC_DEL) & MASK_IMG_SOC_ACC_DEL)| \
++(((INCR) << SHIFT_IMG_SOC_INCR) & MASK_IMG_SOC_INCR)| \
++(((BURST) << SHIFT_IMG_SOC_BURST) & MASK_IMG_SOC_BURST))
++
++#define IMG_SOC_DMAC_PERIPHERAL_ADDR(X) (0x0014 + (32 * (X)))
++#define MASK_IMG_SOC_ADDR 0x007FFFFF
++#define SHIFT_IMG_SOC_ADDR 0
++#define REGNUM_IMG_SOC_ADDR 0x0014
++
++#define SHIFT_TOPAZ_VEC_BUSY 11
++#define MASK_TOPAZ_VEC_BUSY (0x1<<SHIFT_TOPAZ_VEC_BUSY)
++
++#define TOPAZ_MTX_TXRPT_OFFSET 0xc
++#define TOPAZ_GUNIT_GVD_PSMI_GFX_OFFSET 0x20D0
++
++#define TOPAZ_GUNIT_READ32(offset) ioread32(dev_priv->vdc_reg + offset)
++#define TOPAZ_READ_BITS(val, basename) \
++ (((val)&MASK_TOPAZ_##basename)>>SHIFT_TOPAZ_##basename)
++
++#define TOPAZ_WAIT_UNTIL_IDLE \
++ do { \
++ uint8_t tmp_poll_number = 0;\
++ uint32_t tmp_reg; \
++ if (topaz_priv->topaz_cmd_windex == WB_CCB_CTRL_RINDEX(dev_priv)) { \
++ tmp_reg = TOPAZ_GUNIT_READ32(TOPAZ_GUNIT_GVD_PSMI_GFX_OFFSET);\
++ if (0 != TOPAZ_READ_BITS(tmp_reg, VEC_BUSY)) { \
++ MTX_READ32(TOPAZ_MTX_TXRPT_OFFSET, &tmp_reg);\
++ while ((tmp_reg != 0x8ade0000) && \
++ (tmp_poll_number++ < 10)) \
++ MTX_READ32(0xc, &tmp_reg); \
++ PSB_DEBUG_GENERAL( \
++ "TOPAZ: TXRPT reg remain: %x,poll %d times.\n",\
++ tmp_reg, tmp_poll_number);\
++ } \
++ } \
++ } while (0)
++
++/* **************** DMAC define **************** */
++enum DMAC_eBSwap {
++ DMAC_BSWAP_NO_SWAP = 0x0,/* !< No byte swapping will be performed. */
++ DMAC_BSWAP_REVERSE = 0x1,/* !< Byte order will be reversed. */
++};
++
++enum DMAC_ePW {
++ DMAC_PWIDTH_32_BIT = 0x0,/* !< Peripheral width 32-bit. */
++ DMAC_PWIDTH_16_BIT = 0x1,/* !< Peripheral width 16-bit. */
++ DMAC_PWIDTH_8_BIT = 0x2,/* !< Peripheral width 8-bit. */
++};
++
++enum DMAC_eAccDel {
++ DMAC_ACC_DEL_0 = 0x0, /* !< Access delay zero clock cycles */
++ DMAC_ACC_DEL_256 = 0x1, /* !< Access delay 256 clock cycles */
++ DMAC_ACC_DEL_512 = 0x2, /* !< Access delay 512 clock cycles */
++ DMAC_ACC_DEL_768 = 0x3, /* !< Access delay 768 clock cycles */
++ DMAC_ACC_DEL_1024 = 0x4,/* !< Access delay 1024 clock cycles */
++ DMAC_ACC_DEL_1280 = 0x5,/* !< Access delay 1280 clock cycles */
++ DMAC_ACC_DEL_1536 = 0x6,/* !< Access delay 1536 clock cycles */
++ DMAC_ACC_DEL_1792 = 0x7,/* !< Access delay 1792 clock cycles */
++};
++
++enum DMAC_eBurst {
++ DMAC_BURST_0 = 0x0, /* !< burst size of 0 */
++ DMAC_BURST_1 = 0x1, /* !< burst size of 1 */
++ DMAC_BURST_2 = 0x2, /* !< burst size of 2 */
++ DMAC_BURST_3 = 0x3, /* !< burst size of 3 */
++ DMAC_BURST_4 = 0x4, /* !< burst size of 4 */
++ DMAC_BURST_5 = 0x5, /* !< burst size of 5 */
++ DMAC_BURST_6 = 0x6, /* !< burst size of 6 */
++ DMAC_BURST_7 = 0x7, /* !< burst size of 7 */
++};
++
++/* codecs topaz supports,shared with user space driver */
++enum drm_lnc_topaz_codec {
++ IMG_CODEC_JPEG = 0,
++ IMG_CODEC_H264_NO_RC,
++ IMG_CODEC_H264_VBR,
++ IMG_CODEC_H264_CBR,
++ IMG_CODEC_H263_NO_RC,
++ IMG_CODEC_H263_VBR,
++ IMG_CODEC_H263_CBR,
++ IMG_CODEC_MPEG4_NO_RC,
++ IMG_CODEC_MPEG4_VBR,
++ IMG_CODEC_MPEG4_CBR,
++ IMG_CODEC_NUM
++};
++
++/* commands for topaz,shared with user space driver */
++enum drm_lnc_topaz_cmd {
++ MTX_CMDID_NULL = 0,
++ MTX_CMDID_DO_HEADER = 1,
++ MTX_CMDID_ENCODE_SLICE = 2,
++ MTX_CMDID_WRITEREG = 3,
++ MTX_CMDID_START_PIC = 4,
++ MTX_CMDID_END_PIC = 5,
++ MTX_CMDID_SYNC = 6,
++ MTX_CMDID_ENCODE_ONE_ROW = 7,
++ MTX_CMDID_FLUSH = 8,
++ MTX_CMDID_SW_LEAVE_LOWPOWER = 0x7c,
++ MTX_CMDID_SW_ENTER_LOWPOWER = 0x7e,
++ MTX_CMDID_SW_NEW_CODEC = 0x7f
++};
++
++struct topaz_cmd_header {
++ union {
++ struct {
++ unsigned long enable_interrupt:1;
++ unsigned long id:7;
++ unsigned long size:8;
++ unsigned long seq:16;
++ };
++ uint32_t val;
++ };
++};
++
++/* lnc_topazinit.c */
++int lnc_topaz_reset(struct drm_psb_private *dev_priv);
++int topaz_init_fw(struct drm_device *dev);
++int topaz_setup_fw(struct drm_device *dev, enum drm_lnc_topaz_codec codec);
++int topaz_wait_for_register(struct drm_psb_private *dev_priv,
++ uint32_t addr, uint32_t value,
++ uint32_t enable);
++void topaz_write_mtx_mem(struct drm_psb_private *dev_priv,
++ uint32_t byte_addr, uint32_t val);
++uint32_t topaz_read_mtx_mem(struct drm_psb_private *dev_priv,
++ uint32_t byte_addr);
++void topaz_write_mtx_mem_multiple_setup(struct drm_psb_private *dev_priv,
++ uint32_t addr);
++void topaz_write_mtx_mem_multiple(struct drm_psb_private *dev_priv,
++ uint32_t val);
++void topaz_mmu_flushcache(struct drm_psb_private *dev_priv);
++
++void topaz_mtx_kick(struct drm_psb_private *dev_priv, uint32_t kick_cout);
++
++uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver);
++
++/* macros to get/set CCB control data */
++#define WB_CCB_CTRL_RINDEX(dev_priv) \
++(*((uint32_t *)((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_wb))
++
++#define WB_CCB_CTRL_SEQ(dev_priv) \
++(*((uint32_t *)((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_wb\
++ + 1))
++
++#define POLL_WB_RINDEX(dev_priv, value) \
++do { \
++ int i; \
++ for (i = 0; i < 10000; i++) { \
++ if (WB_CCB_CTRL_RINDEX(dev_priv) == value) \
++ break; \
++ else \
++ DRM_UDELAY(100); \
++ } \
++ if (WB_CCB_CTRL_RINDEX(dev_priv) != value) { \
++ DRM_ERROR("TOPAZ: poll rindex timeout\n"); \
++ ret = -EBUSY; \
++ } \
++} while (0)
++
++#define POLL_WB_SEQ(dev_priv, value) \
++do { \
++ int i; \
++ for (i = 0; i < 10000; i++) { \
++ if (CCB_CTRL_SEQ(dev_priv) == value) \
++ break; \
++ else \
++ DRM_UDELAY(1000); \
++ } \
++ if (CCB_CTRL_SEQ(dev_priv) != value) { \
++ DRM_ERROR("TOPAZ:poll mtxseq timeout,0x%08x(mtx) vs 0x%08x\n",\
++ WB_CCB_CTRL_SEQ(dev_priv), value); \
++ ret = -EBUSY; \
++ } \
++} while (0)
++
++#define CCB_CTRL_RINDEX(dev_priv) \
++ topaz_read_mtx_mem(dev_priv, \
++ ((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_ctrl_addr \
++ + MTX_CCBCTRL_ROFF)
++
++#define CCB_CTRL_RINDEX(dev_priv) \
++ topaz_read_mtx_mem(dev_priv, \
++ ((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_ctrl_addr \
++ + MTX_CCBCTRL_ROFF)
++
++#define CCB_CTRL_QP(dev_priv) \
++ topaz_read_mtx_mem(dev_priv, \
++ ((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_ctrl_addr \
++ + MTX_CCBCTRL_QP)
++
++#define CCB_CTRL_SEQ(dev_priv) \
++ topaz_read_mtx_mem(dev_priv, \
++ ((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_ctrl_addr \
++ + MTX_CCBCTRL_COMPLETE)
++
++#define CCB_CTRL_FRAMESKIP(dev_priv) \
++ topaz_read_mtx_mem(dev_priv, \
++ ((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_ctrl_addr \
++ + MTX_CCBCTRL_FRAMESKIP)
++
++#define CCB_CTRL_SET_QP(dev_priv, qp) \
++ topaz_write_mtx_mem(dev_priv, \
++ ((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_ctrl_addr \
++ + MTX_CCBCTRL_QP, qp)
++
++#define CCB_CTRL_SET_INITIALQP(dev_priv, qp) \
++ topaz_write_mtx_mem(dev_priv, \
++ ((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_ctrl_addr \
++ + MTX_CCBCTRL_INITQP, qp)
++
++
++#define TOPAZ_BEGIN_CCB(dev_priv) \
++ topaz_write_mtx_mem_multiple_setup(dev_priv, \
++ ((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_buffer_addr + \
++ ((struct topaz_private *)dev_priv->topaz_private)->topaz_cmd_windex * 4)
++
++#define TOPAZ_OUT_CCB(dev_priv, cmd) \
++do { \
++ topaz_write_mtx_mem_multiple(dev_priv, cmd); \
++ ((struct topaz_private *)dev_priv->topaz_private)->topaz_cmd_windex++; \
++} while (0)
++
++#define TOPAZ_END_CCB(dev_priv, kick_count) \
++ topaz_mtx_kick(dev_priv, 1);
++
++static inline char *cmd_to_string(int cmd_id)
++{
++ switch (cmd_id) {
++ case MTX_CMDID_START_PIC:
++ return "MTX_CMDID_START_PIC";
++ case MTX_CMDID_END_PIC:
++ return "MTX_CMDID_END_PIC";
++ case MTX_CMDID_DO_HEADER:
++ return "MTX_CMDID_DO_HEADER";
++ case MTX_CMDID_ENCODE_SLICE:
++ return "MTX_CMDID_ENCODE_SLICE";
++ case MTX_CMDID_SYNC:
++ return "MTX_CMDID_SYNC";
++
++ default:
++ return "Undefined command";
++
++ }
++}
++
++static inline char *codec_to_string(int codec)
++{
++ switch (codec) {
++ case IMG_CODEC_H264_NO_RC:
++ return "H264_NO_RC";
++ case IMG_CODEC_H264_VBR:
++ return "H264_VBR";
++ case IMG_CODEC_H264_CBR:
++ return "H264_CBR";
++ case IMG_CODEC_H263_NO_RC:
++ return "H263_NO_RC";
++ case IMG_CODEC_H263_VBR:
++ return "H263_VBR";
++ case IMG_CODEC_H263_CBR:
++ return "H263_CBR";
++ case IMG_CODEC_MPEG4_NO_RC:
++ return "MPEG4_NO_RC";
++ case IMG_CODEC_MPEG4_VBR:
++ return "MPEG4_VBR";
++ case IMG_CODEC_MPEG4_CBR:
++ return "MPEG4_CBR";
++ default:
++ return "Undefined codec";
++ }
++}
++
++static inline void lnc_topaz_clearirq(struct drm_device *dev,
++ uint32_t clear_topaz)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++
++ PSB_DEBUG_INIT("TOPAZ: clear IRQ\n");
++ if (clear_topaz != 0)
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTCLEAR, clear_topaz);
++
++ /* PSB_WVDC32(_LNC_IRQ_TOPAZ_FLAG, PSB_INT_IDENTITY_R); */
++}
++
++static inline uint32_t lnc_topaz_queryirq(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ uint32_t val, /* iir, */ clear = 0;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ TOPAZ_READ32(TOPAZ_CR_IMG_TOPAZ_INTSTAT, &val);
++ /* iir = PSB_RVDC32(PSB_INT_IDENTITY_R); */
++
++ (void) topaz_priv;
++
++ if ((val == 0) /* && (iir == 0) */) {/* no interrupt */
++ PSB_DEBUG_GENERAL("TOPAZ: no interrupt,IIR=TOPAZ_INTSTAT=0\n");
++ return 0;
++ }
++
++ PSB_DEBUG_IRQ("TOPAZ:TOPAZ_INTSTAT=0x%08x\n", val);
++
++ if (val & (1<<31))
++ PSB_DEBUG_IRQ("TOPAZ:IRQ pin activated,cmd seq=0x%04x,"
++ "sync seq: 0x%08x vs 0x%08x (MTX)\n",
++ CCB_CTRL_SEQ(dev_priv),
++ dev_priv->sequence[LNC_ENGINE_ENCODE],
++ *(uint32_t *)topaz_priv->topaz_sync_addr);
++ else
++ PSB_DEBUG_IRQ("TOPAZ:IRQ pin not activated,cmd seq=0x%04x,"
++ "sync seq: 0x%08x vs 0x%08x (MTX)\n",
++ CCB_CTRL_SEQ(dev_priv),
++ dev_priv->sequence[LNC_ENGINE_ENCODE],
++ *(uint32_t *)topaz_priv->topaz_sync_addr);
++
++ if (val & 0x8) {
++ uint32_t mmu_status, mmu_req;
++
++ TOPAZ_READ32(TOPAZ_CR_MMU_STATUS, &mmu_status);
++ TOPAZ_READ32(TOPAZ_CR_MMU_MEM_REQ, &mmu_req);
++
++ PSB_DEBUG_IRQ("TOPAZ: detect a page fault interrupt, "
++ "address=0x%08x,mem req=0x%08x\n",
++ mmu_status, mmu_req);
++ clear |= F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MMU_FAULT);
++ }
++
++ if (val & 0x4) {
++ PSB_DEBUG_IRQ("TOPAZ: detect a MTX_HALT interrupt\n");
++ clear |= F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX_HALT);
++ }
++
++ if (val & 0x2) {
++ PSB_DEBUG_IRQ("TOPAZ: detect a MTX interrupt\n");
++ clear |= F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX);
++ }
++
++ if (val & 0x1) {
++ PSB_DEBUG_IRQ("TOPAZ: detect a MVEA interrupt\n");
++ clear |= F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MVEA);
++ }
++
++ return clear;
++}
++
++#endif /* _LNC_TOPAZ_H_ */
+--- /dev/null
++++ b/drivers/staging/mrst/drv/lnc_topazinit.c
+@@ -0,0 +1,2062 @@
++/**
++ * file lnc_topazinit.c
++ * TOPAZ initialization and mtx-firmware upload
++ *
++ */
++
++/**************************************************************************
++ *
++ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
++ * Copyright (c) Imagination Technologies Limited, UK
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++/* NOTE: (READ BEFORE REFINE CODE)
++ * 1. The FIRMWARE's SIZE is measured by byte, we have to pass the size
++ * measured by word to DMAC.
++ *
++ *
++ *
++ */
++
++/* include headers */
++
++/* #define DRM_DEBUG_CODE 2 */
++
++#include <linux/firmware.h>
++
++#include <drm/drmP.h>
++#include <drm/drm.h>
++
++#include "psb_drv.h"
++#include "lnc_topaz.h"
++#include "lnc_topaz_hw_reg.h"
++#include "psb_powermgmt.h"
++
++/* WARNING: this define is very important */
++#define RAM_SIZE (1024 * 24)
++
++/* register default values
++ * THIS HEADER IS ONLY INCLUDE ONCE*/
++static unsigned long topaz_default_regs[183][3] = {
++ {MVEA_START, 0x00000000, 0x00000000},
++ {MVEA_START, 0x00000004, 0x00000400},
++ {MVEA_START, 0x00000008, 0x00000000},
++ {MVEA_START, 0x0000000C, 0x00000000},
++ {MVEA_START, 0x00000010, 0x00000000},
++ {MVEA_START, 0x00000014, 0x00000000},
++ {MVEA_START, 0x00000018, 0x00000000},
++ {MVEA_START, 0x0000001C, 0x00000000},
++ {MVEA_START, 0x00000020, 0x00000120},
++ {MVEA_START, 0x00000024, 0x00000000},
++ {MVEA_START, 0x00000028, 0x00000000},
++ {MVEA_START, 0x00000100, 0x00000000},
++ {MVEA_START, 0x00000104, 0x00000000},
++ {MVEA_START, 0x00000108, 0x00000000},
++ {MVEA_START, 0x0000010C, 0x00000000},
++ {MVEA_START, 0x0000011C, 0x00000001},
++ {MVEA_START, 0x0000012C, 0x00000000},
++ {MVEA_START, 0x00000180, 0x00000000},
++ {MVEA_START, 0x00000184, 0x00000000},
++ {MVEA_START, 0x00000188, 0x00000000},
++ {MVEA_START, 0x0000018C, 0x00000000},
++ {MVEA_START, 0x00000190, 0x00000000},
++ {MVEA_START, 0x00000194, 0x00000000},
++ {MVEA_START, 0x00000198, 0x00000000},
++ {MVEA_START, 0x0000019C, 0x00000000},
++ {MVEA_START, 0x000001A0, 0x00000000},
++ {MVEA_START, 0x000001A4, 0x00000000},
++ {MVEA_START, 0x000001A8, 0x00000000},
++ {MVEA_START, 0x000001AC, 0x00000000},
++ {MVEA_START, 0x000001B0, 0x00000000},
++ {MVEA_START, 0x000001B4, 0x00000000},
++ {MVEA_START, 0x000001B8, 0x00000000},
++ {MVEA_START, 0x000001BC, 0x00000000},
++ {MVEA_START, 0x000001F8, 0x00000000},
++ {MVEA_START, 0x000001FC, 0x00000000},
++ {MVEA_START, 0x00000200, 0x00000000},
++ {MVEA_START, 0x00000204, 0x00000000},
++ {MVEA_START, 0x00000208, 0x00000000},
++ {MVEA_START, 0x0000020C, 0x00000000},
++ {MVEA_START, 0x00000210, 0x00000000},
++ {MVEA_START, 0x00000220, 0x00000001},
++ {MVEA_START, 0x00000224, 0x0000001F},
++ {MVEA_START, 0x00000228, 0x00000100},
++ {MVEA_START, 0x0000022C, 0x00001F00},
++ {MVEA_START, 0x00000230, 0x00000101},
++ {MVEA_START, 0x00000234, 0x00001F1F},
++ {MVEA_START, 0x00000238, 0x00001F01},
++ {MVEA_START, 0x0000023C, 0x0000011F},
++ {MVEA_START, 0x00000240, 0x00000200},
++ {MVEA_START, 0x00000244, 0x00001E00},
++ {MVEA_START, 0x00000248, 0x00000002},
++ {MVEA_START, 0x0000024C, 0x0000001E},
++ {MVEA_START, 0x00000250, 0x00000003},
++ {MVEA_START, 0x00000254, 0x0000001D},
++ {MVEA_START, 0x00000258, 0x00001F02},
++ {MVEA_START, 0x0000025C, 0x00000102},
++ {MVEA_START, 0x00000260, 0x0000011E},
++ {MVEA_START, 0x00000264, 0x00000000},
++ {MVEA_START, 0x00000268, 0x00000000},
++ {MVEA_START, 0x0000026C, 0x00000000},
++ {MVEA_START, 0x00000270, 0x00000000},
++ {MVEA_START, 0x00000274, 0x00000000},
++ {MVEA_START, 0x00000278, 0x00000000},
++ {MVEA_START, 0x00000280, 0x00008000},
++ {MVEA_START, 0x00000284, 0x00000000},
++ {MVEA_START, 0x00000288, 0x00000000},
++ {MVEA_START, 0x0000028C, 0x00000000},
++ {MVEA_START, 0x00000314, 0x00000000},
++ {MVEA_START, 0x00000318, 0x00000000},
++ {MVEA_START, 0x0000031C, 0x00000000},
++ {MVEA_START, 0x00000320, 0x00000000},
++ {MVEA_START, 0x00000324, 0x00000000},
++ {MVEA_START, 0x00000348, 0x00000000},
++ {MVEA_START, 0x00000380, 0x00000000},
++ {MVEA_START, 0x00000384, 0x00000000},
++ {MVEA_START, 0x00000388, 0x00000000},
++ {MVEA_START, 0x0000038C, 0x00000000},
++ {MVEA_START, 0x00000390, 0x00000000},
++ {MVEA_START, 0x00000394, 0x00000000},
++ {MVEA_START, 0x00000398, 0x00000000},
++ {MVEA_START, 0x0000039C, 0x00000000},
++ {MVEA_START, 0x000003A0, 0x00000000},
++ {MVEA_START, 0x000003A4, 0x00000000},
++ {MVEA_START, 0x000003A8, 0x00000000},
++ {MVEA_START, 0x000003B0, 0x00000000},
++ {MVEA_START, 0x000003B4, 0x00000000},
++ {MVEA_START, 0x000003B8, 0x00000000},
++ {MVEA_START, 0x000003BC, 0x00000000},
++ {MVEA_START, 0x000003D4, 0x00000000},
++ {MVEA_START, 0x000003D8, 0x00000000},
++ {MVEA_START, 0x000003DC, 0x00000000},
++ {MVEA_START, 0x000003E0, 0x00000000},
++ {MVEA_START, 0x000003E4, 0x00000000},
++ {MVEA_START, 0x000003EC, 0x00000000},
++ {MVEA_START, 0x000002D0, 0x00000000},
++ {MVEA_START, 0x000002D4, 0x00000000},
++ {MVEA_START, 0x000002D8, 0x00000000},
++ {MVEA_START, 0x000002DC, 0x00000000},
++ {MVEA_START, 0x000002E0, 0x00000000},
++ {MVEA_START, 0x000002E4, 0x00000000},
++ {MVEA_START, 0x000002E8, 0x00000000},
++ {MVEA_START, 0x000002EC, 0x00000000},
++ {MVEA_START, 0x000002F0, 0x00000000},
++ {MVEA_START, 0x000002F4, 0x00000000},
++ {MVEA_START, 0x000002F8, 0x00000000},
++ {MVEA_START, 0x000002FC, 0x00000000},
++ {MVEA_START, 0x00000300, 0x00000000},
++ {MVEA_START, 0x00000304, 0x00000000},
++ {MVEA_START, 0x00000308, 0x00000000},
++ {MVEA_START, 0x0000030C, 0x00000000},
++ {MVEA_START, 0x00000290, 0x00000000},
++ {MVEA_START, 0x00000294, 0x00000000},
++ {MVEA_START, 0x00000298, 0x00000000},
++ {MVEA_START, 0x0000029C, 0x00000000},
++ {MVEA_START, 0x000002A0, 0x00000000},
++ {MVEA_START, 0x000002A4, 0x00000000},
++ {MVEA_START, 0x000002A8, 0x00000000},
++ {MVEA_START, 0x000002AC, 0x00000000},
++ {MVEA_START, 0x000002B0, 0x00000000},
++ {MVEA_START, 0x000002B4, 0x00000000},
++ {MVEA_START, 0x000002B8, 0x00000000},
++ {MVEA_START, 0x000002BC, 0x00000000},
++ {MVEA_START, 0x000002C0, 0x00000000},
++ {MVEA_START, 0x000002C4, 0x00000000},
++ {MVEA_START, 0x000002C8, 0x00000000},
++ {MVEA_START, 0x000002CC, 0x00000000},
++ {MVEA_START, 0x00000080, 0x00000000},
++ {MVEA_START, 0x00000084, 0x80705700},
++ {MVEA_START, 0x00000088, 0x00000000},
++ {MVEA_START, 0x0000008C, 0x00000000},
++ {MVEA_START, 0x00000090, 0x00000000},
++ {MVEA_START, 0x00000094, 0x00000000},
++ {MVEA_START, 0x00000098, 0x00000000},
++ {MVEA_START, 0x0000009C, 0x00000000},
++ {MVEA_START, 0x000000A0, 0x00000000},
++ {MVEA_START, 0x000000A4, 0x00000000},
++ {MVEA_START, 0x000000A8, 0x00000000},
++ {MVEA_START, 0x000000AC, 0x00000000},
++ {MVEA_START, 0x000000B0, 0x00000000},
++ {MVEA_START, 0x000000B4, 0x00000000},
++ {MVEA_START, 0x000000B8, 0x00000000},
++ {MVEA_START, 0x000000BC, 0x00000000},
++ {MVEA_START, 0x000000C0, 0x00000000},
++ {MVEA_START, 0x000000C4, 0x00000000},
++ {MVEA_START, 0x000000C8, 0x00000000},
++ {MVEA_START, 0x000000CC, 0x00000000},
++ {MVEA_START, 0x000000D0, 0x00000000},
++ {MVEA_START, 0x000000D4, 0x00000000},
++ {MVEA_START, 0x000000D8, 0x00000000},
++ {MVEA_START, 0x000000DC, 0x00000000},
++ {MVEA_START, 0x000000E0, 0x00000000},
++ {MVEA_START, 0x000000E4, 0x00000000},
++ {MVEA_START, 0x000000E8, 0x00000000},
++ {MVEA_START, 0x000000EC, 0x00000000},
++ {MVEA_START, 0x000000F0, 0x00000000},
++ {MVEA_START, 0x000000F4, 0x00000000},
++ {MVEA_START, 0x000000F8, 0x00000000},
++ {MVEA_START, 0x000000FC, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000000, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000004, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000008, 0x00000000},
++ {TOPAZ_VLC_START, 0x0000000C, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000010, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000014, 0x00000000},
++ {TOPAZ_VLC_START, 0x0000001C, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000020, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000024, 0x00000000},
++ {TOPAZ_VLC_START, 0x0000002C, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000034, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000038, 0x00000000},
++ {TOPAZ_VLC_START, 0x0000003C, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000040, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000044, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000048, 0x00000000},
++ {TOPAZ_VLC_START, 0x0000004C, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000050, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000054, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000058, 0x00000000},
++ {TOPAZ_VLC_START, 0x0000005C, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000060, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000064, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000068, 0x00000000},
++ {TOPAZ_VLC_START, 0x0000006C, 0x00000000}
++};
++
++#define FIRMWARE_NAME "topaz_fw.bin"
++
++/* static function define */
++static int topaz_upload_fw(struct drm_device *dev,
++ enum drm_lnc_topaz_codec codec);
++static inline void topaz_set_default_regs(struct drm_psb_private
++ *dev_priv);
++
++#define UPLOAD_FW_BY_DMA 1
++
++#if UPLOAD_FW_BY_DMA
++static void topaz_dma_transfer(struct drm_psb_private *dev_priv,
++ uint32_t channel, uint32_t src_phy_addr,
++ uint32_t offset, uint32_t dst_addr,
++ uint32_t byte_num, uint32_t is_increment,
++ uint32_t is_write);
++#else
++static void topaz_mtx_upload_by_register(struct drm_device *dev,
++ uint32_t mtx_mem, uint32_t addr,
++ uint32_t size,
++ struct ttm_buffer_object *buf);
++#endif
++
++static void topaz_write_core_reg(struct drm_psb_private *dev_priv,
++ uint32_t reg, const uint32_t val);
++static void topaz_read_core_reg(struct drm_psb_private *dev_priv,
++ uint32_t reg, uint32_t *ret_val);
++static void get_mtx_control_from_dash(struct drm_psb_private *dev_priv);
++static void release_mtx_control_from_dash(struct drm_psb_private
++ *dev_priv);
++static void topaz_mmu_hwsetup(struct drm_psb_private *dev_priv);
++static void mtx_dma_read(struct drm_device *dev, uint32_t source_addr,
++ uint32_t size);
++static void mtx_dma_write(struct drm_device *dev);
++
++
++#define DEBUG_FUNCTION 0
++
++#if DEBUG_FUNCTION
++static int topaz_test_null(struct drm_device *dev, uint32_t seq);
++static int topaz_test_sync(struct drm_device *dev, uint32_t seq,
++ uint32_t sync_seq);
++static void topaz_mmu_test(struct drm_device *dev, uint32_t sync_value);
++static void topaz_save_default_regs(struct drm_psb_private *dev_priv,
++ uint32_t *data);
++static void topaz_restore_default_regs(struct drm_psb_private *dev_priv,
++ uint32_t *data);
++static int topaz_test_sync_manual_alloc_page(struct drm_device *dev,
++ uint32_t seq,
++ uint32_t sync_seq,
++ uint32_t offset);
++static int topaz_test_sync_tt_test(struct drm_device *dev,
++ uint32_t seq,
++ uint32_t sync_seq);
++#endif
++
++uint32_t topaz_read_mtx_mem(struct drm_psb_private *dev_priv,
++ uint32_t byte_addr)
++{
++ uint32_t read_val;
++ uint32_t reg, bank_size, ram_bank_size, ram_id;
++
++ TOPAZ_READ32(0x3c, &reg);
++ reg = 0x0a0a0606;
++ bank_size = (reg & 0xF0000) >> 16;
++
++ ram_bank_size = (uint32_t) (1 << (bank_size + 2));
++ ram_id = (byte_addr - MTX_DATA_MEM_BASE) / ram_bank_size;
++
++ MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_CONTROL,
++ F_ENCODE(0x18 + ram_id, MTX_MTX_MCMID) |
++ F_ENCODE(byte_addr >> 2, MTX_MTX_MCM_ADDR) |
++ F_ENCODE(1, MTX_MTX_MCMR));
++
++ /* ?? poll this reg? */
++ topaz_wait_for_register(dev_priv,
++ MTX_START + MTX_CR_MTX_RAM_ACCESS_STATUS,
++ 1, 1);
++
++ MTX_READ32(MTX_CR_MTX_RAM_ACCESS_DATA_TRANSFER, &read_val);
++
++ return read_val;
++}
++
++void topaz_write_mtx_mem(struct drm_psb_private *dev_priv,
++ uint32_t byte_addr, uint32_t val)
++{
++ uint32_t ram_id = 0;
++ uint32_t reg, bank_size, ram_bank_size;
++
++ TOPAZ_READ32(0x3c, &reg);
++
++ /* PSB_DEBUG_GENERAL ("TOPAZ: DEBUG REG(%x)\n", reg); */
++ reg = 0x0a0a0606;
++
++ bank_size = (reg & 0xF0000) >> 16;
++
++ ram_bank_size = (uint32_t) (1 << (bank_size + 2));
++ ram_id = (byte_addr - MTX_DATA_MEM_BASE) / ram_bank_size;
++
++ MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_CONTROL,
++ F_ENCODE(0x18 + ram_id, MTX_MTX_MCMID) |
++ F_ENCODE(byte_addr >> 2, MTX_MTX_MCM_ADDR));
++
++ MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_DATA_TRANSFER, val);
++
++ /* ?? poll this reg? */
++ topaz_wait_for_register(dev_priv,
++ MTX_START + MTX_CR_MTX_RAM_ACCESS_STATUS,
++ 1, 1);
++
++ return;
++}
++
++void topaz_write_mtx_mem_multiple_setup(struct drm_psb_private *dev_priv,
++ uint32_t byte_addr)
++{
++ uint32_t ram_id = 0;
++ uint32_t reg, bank_size, ram_bank_size;
++
++ TOPAZ_READ32(0x3c, &reg);
++
++ reg = 0x0a0a0606;
++
++ bank_size = (reg & 0xF0000) >> 16;
++
++ ram_bank_size = (uint32_t) (1 << (bank_size + 2));
++ ram_id = (byte_addr - MTX_DATA_MEM_BASE) / ram_bank_size;
++
++ MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_CONTROL,
++ F_ENCODE(0x18 + ram_id, MTX_MTX_MCMID) |
++ F_ENCODE(1, MTX_MTX_MCMAI) |
++ F_ENCODE(byte_addr >> 2, MTX_MTX_MCM_ADDR));
++}
++
++void topaz_write_mtx_mem_multiple(struct drm_psb_private *dev_priv,
++ uint32_t val)
++{
++ MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_DATA_TRANSFER, val);
++}
++
++
++int topaz_wait_for_register(struct drm_psb_private *dev_priv,
++ uint32_t addr, uint32_t value, uint32_t mask)
++{
++ uint32_t tmp;
++ uint32_t count = 10000;
++
++ /* # poll topaz register for certain times */
++ while (count) {
++ /* #.# read */
++ MM_READ32(addr, 0, &tmp);
++
++ if (value == (tmp & mask))
++ return 0;
++
++ /* #.# delay and loop */
++ DRM_UDELAY(100);
++ --count;
++ }
++
++ /* # now waiting is timeout, return 1 indicat failed */
++ /* XXX: testsuit means a timeout 10000 */
++
++ DRM_ERROR("TOPAZ:time out to poll addr(0x%x) expected value(0x%08x), "
++ "actual 0x%08x (0x%08x & 0x%08x)\n",
++ addr, value, tmp & mask, tmp, mask);
++
++ return -EBUSY;
++
++}
++
++static ssize_t psb_topaz_pmstate_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct drm_device *drm_dev = dev_get_drvdata(dev);
++ struct drm_psb_private *dev_priv;
++ struct topaz_private *topaz_priv;
++ unsigned int pmstate;
++ unsigned long flags;
++ int ret = -EINVAL;
++
++ if (drm_dev == NULL)
++ return 0;
++
++ dev_priv = drm_dev->dev_private;
++ topaz_priv = dev_priv->topaz_private;
++ pmstate = topaz_priv->pmstate;
++
++ pmstate = topaz_priv->pmstate;
++ spin_lock_irqsave(&topaz_priv->topaz_lock, flags);
++ ret = snprintf(buf, 32, "%s\n",
++ (pmstate == PSB_PMSTATE_POWERUP) ? "powerup"
++ : ((pmstate == PSB_PMSTATE_POWERDOWN) ? "powerdown"
++ : "clockgated"));
++ spin_unlock_irqrestore(&topaz_priv->topaz_lock, flags);
++
++ return ret;
++}
++
++static DEVICE_ATTR(topaz_pmstate, 0444, psb_topaz_pmstate_show, NULL);
++
++
++/* this function finish the first part of initialization, the rest
++ * should be done in topaz_setup_fw
++ */
++int lnc_topaz_init(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct ttm_bo_device *bdev = &dev_priv->bdev;
++ uint32_t core_id, core_rev;
++ int ret = 0, n;
++ bool is_iomem;
++ struct topaz_private *topaz_priv;
++ void *topaz_bo_virt;
++
++ PSB_DEBUG_GENERAL("TOPAZ: init topaz data structures\n");
++ topaz_priv = kmalloc(sizeof(struct topaz_private), GFP_KERNEL);
++ if (topaz_priv == NULL)
++ return -1;
++
++ dev_priv->topaz_private = topaz_priv;
++ memset(topaz_priv, 0, sizeof(struct topaz_private));
++
++ /* get device --> drm_device --> drm_psb_private --> topaz_priv
++ * for psb_topaz_pmstate_show: topaz_pmpolicy
++ * if not pci_set_drvdata, can't get drm_device from device
++ */
++ pci_set_drvdata(dev->pdev, dev);
++ if (device_create_file(&dev->pdev->dev,
++ &dev_attr_topaz_pmstate))
++ DRM_ERROR("TOPAZ: could not create sysfs file\n");
++ topaz_priv->sysfs_pmstate = sysfs_get_dirent(
++ dev->pdev->dev.kobj.sd,
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
++ NULL,
++#endif
++ "topaz_pmstate");
++
++ topaz_priv = dev_priv->topaz_private;
++
++ /* # initialize comand topaz queueing [msvdx_queue] */
++ INIT_LIST_HEAD(&topaz_priv->topaz_queue);
++ /* # init mutex? CHECK: mutex usage [msvdx_mutex] */
++ mutex_init(&topaz_priv->topaz_mutex);
++ /* # spin lock init? CHECK spin lock usage [msvdx_lock] */
++ spin_lock_init(&topaz_priv->topaz_lock);
++
++ /* # topaz status init. [msvdx_busy] */
++ topaz_priv->topaz_busy = 0;
++ topaz_priv->topaz_cmd_seq = 0;
++ topaz_priv->topaz_fw_loaded = 0;
++ /* FIXME: workaround since JPEG firmware is not ready */
++ topaz_priv->topaz_cur_codec = 1;
++ topaz_priv->cur_mtx_data_size = 0;
++ topaz_priv->topaz_hw_busy = 1;
++
++ topaz_priv->topaz_mtx_reg_state = kmalloc(TOPAZ_MTX_REG_SIZE,
++ GFP_KERNEL);
++ if (topaz_priv->topaz_mtx_reg_state == NULL) {
++ DRM_ERROR("TOPAZ: failed to allocate space "
++ "for mtx register\n");
++ return -1;
++ }
++
++ /* # gain write back structure,we may only need 32+4=40DW */
++ ret = ttm_buffer_object_create(bdev, 4096,
++ ttm_bo_type_kernel,
++ DRM_PSB_FLAG_MEM_MMU | TTM_PL_FLAG_NO_EVICT,
++ 0, 0, 0, NULL, &(topaz_priv->topaz_bo));
++ if (ret != 0) {
++ DRM_ERROR("TOPAZ: failed to allocate topaz BO.\n");
++ return ret;
++ }
++
++ ret = ttm_bo_kmap(topaz_priv->topaz_bo, 0,
++ topaz_priv->topaz_bo->num_pages,
++ &topaz_priv->topaz_bo_kmap);
++ if (ret) {
++ DRM_ERROR("TOPAZ: map topaz BO bo failed......\n");
++ ttm_bo_unref(&topaz_priv->topaz_bo);
++ return ret;
++ }
++
++ topaz_bo_virt = ttm_kmap_obj_virtual(&topaz_priv->topaz_bo_kmap,
++ &is_iomem);
++ topaz_priv->topaz_ccb_wb = (void *) topaz_bo_virt;
++ topaz_priv->topaz_wb_offset = topaz_priv->topaz_bo->offset;
++ topaz_priv->topaz_sync_addr = (uint32_t *) (topaz_bo_virt
++ + 2048);
++ topaz_priv->topaz_sync_offset = topaz_priv->topaz_wb_offset
++ + 2048;
++ PSB_DEBUG_GENERAL("TOPAZ: alloc BO for WriteBack and SYNC\n");
++ PSB_DEBUG_GENERAL("TOPAZ: WB offset=0x%08x\n",
++ topaz_priv->topaz_wb_offset);
++ PSB_DEBUG_GENERAL("TOPAZ: SYNC offset=0x%08x\n",
++ topaz_priv->topaz_sync_offset);
++
++ *(topaz_priv->topaz_sync_addr) = ~0; /* reset sync seq */
++
++ /* # reset topaz */
++ MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST,
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET));
++
++ MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST,
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET));
++
++ /* # set up MMU */
++ topaz_mmu_hwsetup(dev_priv);
++
++ PSB_DEBUG_GENERAL("TOPAZ: defer firmware loading to the place"
++ "when receiving user space commands\n");
++
++#if 0 /* can't load FW here */
++ /* #.# load fw to driver */
++ PSB_DEBUG_GENERAL("TOPAZ: will init firmware\n");
++ ret = topaz_init_fw(dev);
++ if (ret != 0)
++ return -1;
++
++ topaz_setup_fw(dev, IMG_CODEC_MPEG4_NO_RC);/* just for test */
++#endif
++ /* <msvdx does> # minimal clock */
++
++ /* <msvdx does> # return 0 */
++ TOPAZ_READ32(TOPAZ_CR_IMG_TOPAZ_CORE_ID, &core_id);
++ TOPAZ_READ32(TOPAZ_CR_IMG_TOPAZ_CORE_REV, &core_rev);
++
++ PSB_DEBUG_GENERAL("TOPAZ: core_id(%x) core_rev(%x)\n",
++ core_id, core_rev);
++
++ /* create firmware storage */
++ for (n = 1; n < IMG_CODEC_NUM; ++n) {
++ /* #.# malloc DRM object for fw storage */
++ ret = ttm_buffer_object_create(bdev, 12 * 4096,
++ ttm_bo_type_kernel,
++ DRM_PSB_FLAG_MEM_MMU | TTM_PL_FLAG_NO_EVICT,
++ 0, 0, 0, NULL, &topaz_priv->topaz_fw[n].text);
++ if (ret) {
++ DRM_ERROR("Failed to allocate firmware.\n");
++ goto out;
++ }
++
++ /* #.# malloc DRM object for fw storage */
++ ret = ttm_buffer_object_create(bdev, 12 * 4096,
++ ttm_bo_type_kernel,
++ DRM_PSB_FLAG_MEM_MMU | TTM_PL_FLAG_NO_EVICT,
++ 0, 0, 0, NULL, &topaz_priv->topaz_fw[n].data);
++ if (ret) {
++ DRM_ERROR("Failed to allocate firmware.\n");
++ goto out;
++ }
++ }
++
++ ret = ttm_buffer_object_create(bdev,
++ 12 * 4096,
++ ttm_bo_type_kernel,
++ DRM_PSB_FLAG_MEM_MMU |
++ TTM_PL_FLAG_NO_EVICT,
++ 0, 0, 0, NULL,
++ &topaz_priv->topaz_mtx_data_mem);
++ if (ret) {
++ DRM_ERROR("TOPAZ: failed to allocate ttm buffer for "
++ "mtx data save\n");
++ goto out;
++ }
++ topaz_priv->cur_mtx_data_size = 0;
++
++ PSB_DEBUG_INIT("TOPAZ:old clock gating disable = 0x%08x\n",
++ PSB_RVDC32(PSB_TOPAZ_CLOCKGATING));
++
++ PSB_DEBUG_INIT("TOPAZ:rest MSDVX to disable clock gating\n");
++
++ PSB_WVDC32(0x00011fff, PSB_TOPAZ_CLOCKGATING);
++
++ PSB_DEBUG_INIT("MSDVX:new clock gating disable = 0x%08x\n",
++ PSB_RVDC32(PSB_TOPAZ_CLOCKGATING));
++
++ return 0;
++
++out:
++ for (n = 1; n < IMG_CODEC_NUM; ++n) {
++ if (topaz_priv->topaz_fw[n].text != NULL)
++ ttm_bo_unref(&topaz_priv->topaz_fw[n].text);
++ if (topaz_priv->topaz_fw[n].data != NULL)
++ ttm_bo_unref(&topaz_priv->topaz_fw[n].data);
++ }
++
++ if (topaz_priv->topaz_mtx_data_mem != NULL)
++ ttm_bo_unref(&topaz_priv->topaz_mtx_data_mem);
++
++ return ret;
++}
++
++int lnc_topaz_uninit(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++ int n;
++
++ /* flush MMU */
++ PSB_DEBUG_GENERAL("XXX: need to flush mmu cache here??\n");
++ /* topaz_mmu_flushcache (dev_priv); */
++
++ /* # reset TOPAZ chip */
++ lnc_topaz_reset(dev_priv);
++
++ if (NULL == topaz_priv)
++ {
++ DRM_ERROR("TOPAZ: topaz_priv is NULL!\n");
++ return -1;
++ }
++ /* release resources */
++ /* # release write back memory */
++ topaz_priv->topaz_ccb_wb = NULL;
++
++ /* release mtx register save space */
++ kfree(topaz_priv->topaz_mtx_reg_state);
++
++ /* release mtx data memory save space */
++ if (topaz_priv->topaz_mtx_data_mem)
++ ttm_bo_unref(&topaz_priv->topaz_mtx_data_mem);
++
++ /* # release firmware storage */
++ for (n = 1; n < IMG_CODEC_NUM; ++n) {
++ if (topaz_priv->topaz_fw[n].text != NULL)
++ ttm_bo_unref(&topaz_priv->topaz_fw[n].text);
++ if (topaz_priv->topaz_fw[n].data != NULL)
++ ttm_bo_unref(&topaz_priv->topaz_fw[n].data);
++ }
++
++ ttm_bo_kunmap(&topaz_priv->topaz_bo_kmap);
++ ttm_bo_unref(&topaz_priv->topaz_bo);
++
++ if (topaz_priv) {
++ pci_set_drvdata(dev->pdev, NULL);
++ device_remove_file(&dev->pdev->dev, &dev_attr_topaz_pmstate);
++ sysfs_put(topaz_priv->sysfs_pmstate);
++ topaz_priv->sysfs_pmstate = NULL;
++
++ kfree(topaz_priv);
++ dev_priv->topaz_private = NULL;
++ }
++
++ return 0;
++}
++
++int lnc_topaz_reset(struct drm_psb_private *dev_priv)
++{
++ struct topaz_private *topaz_priv;
++
++ topaz_priv = dev_priv->topaz_private;
++ topaz_priv->topaz_busy = 0;
++ topaz_priv->topaz_cmd_seq = 0;
++ topaz_priv->cur_mtx_data_size = 0;
++ topaz_priv->topaz_cmd_windex = 0;
++ topaz_priv->topaz_needs_reset = 0;
++
++ /* # reset topaz */
++ MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST,
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET));
++
++ MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST,
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET));
++
++ /* # set up MMU */
++ topaz_mmu_hwsetup(dev_priv);
++
++ return 0;
++}
++
++/* read firmware bin file and load all data into driver */
++int topaz_init_fw(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ const struct firmware *raw = NULL;
++ unsigned char *ptr;
++ int ret = 0;
++ int n;
++ struct topaz_fwinfo *cur_fw;
++ int cur_size;
++ struct topaz_codec_fw *cur_codec;
++ struct ttm_buffer_object **cur_drm_obj;
++ struct ttm_bo_kmap_obj tmp_kmap;
++ bool is_iomem;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ topaz_priv->stored_initial_qp = 0;
++
++ /* # get firmware */
++ ret = request_firmware(&raw, FIRMWARE_NAME, &dev->pdev->dev);
++ if (ret != 0) {
++ DRM_ERROR("TOPAZ: request_firmware failed: %d\n", ret);
++ return ret;
++ }
++
++ PSB_DEBUG_GENERAL("TOPAZ: opened firmware\n");
++
++ if ((raw == NULL) || (raw->size < sizeof(struct topaz_fwinfo))) {
++ DRM_ERROR("TOPAZ: firmware file is not correct size.\n");
++ goto out;
++ }
++
++ ptr = (unsigned char *) raw->data;
++
++ if (!ptr) {
++ DRM_ERROR("TOPAZ: failed to load firmware.\n");
++ goto out;
++ }
++
++ /* # load fw from file */
++ PSB_DEBUG_GENERAL("TOPAZ: load firmware.....\n");
++ cur_fw = NULL;
++ /* didn't use the first element */
++ for (n = 1; n < IMG_CODEC_NUM; ++n) {
++ cur_fw = (struct topaz_fwinfo *) ptr;
++
++ cur_codec = &topaz_priv->topaz_fw[cur_fw->codec];
++ cur_codec->ver = cur_fw->ver;
++ cur_codec->codec = cur_fw->codec;
++ cur_codec->text_size = cur_fw->text_size;
++ cur_codec->data_size = cur_fw->data_size;
++ cur_codec->data_location = cur_fw->data_location;
++
++ PSB_DEBUG_GENERAL("TOPAZ: load firemware %s.\n",
++ codec_to_string(cur_fw->codec));
++
++ /* #.# handle text section */
++ ptr += sizeof(struct topaz_fwinfo);
++ cur_drm_obj = &cur_codec->text;
++ cur_size = cur_fw->text_size;
++
++ /* #.# fill DRM object with firmware data */
++ ret = ttm_bo_kmap(*cur_drm_obj, 0, (*cur_drm_obj)->num_pages,
++ &tmp_kmap);
++ if (ret) {
++ PSB_DEBUG_GENERAL("drm_bo_kmap failed: %d\n", ret);
++ ttm_bo_unref(cur_drm_obj);
++ *cur_drm_obj = NULL;
++ goto out;
++ }
++
++ memcpy(ttm_kmap_obj_virtual(&tmp_kmap, &is_iomem), ptr,
++ cur_size);
++
++ ttm_bo_kunmap(&tmp_kmap);
++
++ /* #.# handle data section */
++ ptr += cur_fw->text_size;
++ cur_drm_obj = &cur_codec->data;
++ cur_size = cur_fw->data_size;
++
++ /* #.# fill DRM object with firmware data */
++ ret = ttm_bo_kmap(*cur_drm_obj, 0, (*cur_drm_obj)->num_pages,
++ &tmp_kmap);
++ if (ret) {
++ PSB_DEBUG_GENERAL("drm_bo_kmap failed: %d\n", ret);
++ ttm_bo_unref(cur_drm_obj);
++ *cur_drm_obj = NULL;
++ goto out;
++ }
++
++ memcpy(ttm_kmap_obj_virtual(&tmp_kmap, &is_iomem), ptr,
++ cur_size);
++
++ ttm_bo_kunmap(&tmp_kmap);
++
++ /* #.# validate firmware */
++
++ /* #.# update ptr */
++ ptr += cur_fw->data_size;
++ }
++
++ release_firmware(raw);
++
++ PSB_DEBUG_GENERAL("TOPAZ: return from firmware init\n");
++
++ return 0;
++
++out:
++ if (raw) {
++ PSB_DEBUG_GENERAL("release firmware....\n");
++ release_firmware(raw);
++ }
++
++ return -1;
++}
++
++/* setup fw when start a new context */
++int topaz_setup_fw(struct drm_device *dev, enum drm_lnc_topaz_codec codec)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ uint32_t mem_size = RAM_SIZE; /* follow DDK */
++ uint32_t verify_pc;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++
++#if 0
++ if (codec == topaz_priv->topaz_current_codec) {
++ LNC_TRACEL("TOPAZ: reuse previous codec\n");
++ return 0;
++ }
++#endif
++
++ /* XXX: need to rest topaz? */
++ PSB_DEBUG_GENERAL("XXX: should reset topaz when context change?\n");
++
++ /* XXX: interrupt enable shouldn't be enable here,
++ * this funtion is called when interrupt is enable,
++ * but here, we've no choice since we have to call setup_fw by
++ * manual */
++ /* # upload firmware, clear interruputs and start the firmware
++ * -- from hostutils.c in TestSuits*/
++
++ /* # reset MVEA */
++ MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST,
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET));
++
++ MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST,
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET));
++
++
++ topaz_mmu_hwsetup(dev_priv);
++
++#if !LNC_TOPAZ_NO_IRQ
++ psb_irq_uninstall_islands(dev, OSPM_VIDEO_ENC_ISLAND);
++#endif
++
++ PSB_DEBUG_GENERAL("TOPAZ: will setup firmware....\n");
++
++ topaz_set_default_regs(dev_priv);
++
++ /* # reset mtx */
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_SRST,
++ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_MVEA_SOFT_RESET) |
++ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_MTX_SOFT_RESET) |
++ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_VLC_SOFT_RESET));
++
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_SRST, 0x0);
++
++ /* # upload fw by drm */
++ PSB_DEBUG_GENERAL("TOPAZ: will upload firmware\n");
++
++ topaz_upload_fw(dev, codec);
++#if 0
++ /* allocate the space for context save & restore if needed */
++ if (topaz_priv->topaz_mtx_data_mem == NULL) {
++ ret = ttm_buffer_object_create(bdev,
++ topaz_priv->cur_mtx_data_size * 4,
++ ttm_bo_type_kernel,
++ DRM_PSB_FLAG_MEM_MMU |
++ TTM_PL_FLAG_NO_EVICT,
++ 0, 0, 0, NULL,
++ &topaz_priv->topaz_mtx_data_mem);
++ if (ret) {
++ DRM_ERROR("TOPAZ: failed to allocate ttm buffer for "
++ "mtx data save\n");
++ return -1;
++ }
++ }
++ PSB_DEBUG_GENERAL("TOPAZ: after upload fw ....\n");
++#endif
++
++ /* XXX: In power save mode, need to save the complete data memory
++ * and restore it. MTX_FWIF.c record the data size */
++ PSB_DEBUG_GENERAL("TOPAZ:in power save mode need to save memory?\n");
++
++ PSB_DEBUG_GENERAL("TOPAZ: setting up pc address\n");
++ topaz_write_core_reg(dev_priv, TOPAZ_MTX_PC, PC_START_ADDRESS);
++
++ PSB_DEBUG_GENERAL("TOPAZ: verify pc address\n");
++
++ topaz_read_core_reg(dev_priv, TOPAZ_MTX_PC, &verify_pc);
++
++ /* enable auto clock is essential for this driver */
++ TOPAZ_WRITE32(TOPAZ_CR_TOPAZ_AUTO_CLK_GATE,
++ F_ENCODE(1, TOPAZ_CR_TOPAZ_VLC_AUTO_CLK_GATE) |
++ F_ENCODE(1, TOPAZ_CR_TOPAZ_DB_AUTO_CLK_GATE));
++ MVEA_WRITE32(MVEA_CR_MVEA_AUTO_CLOCK_GATING,
++ F_ENCODE(1, MVEA_CR_MVEA_IPE_AUTO_CLK_GATE) |
++ F_ENCODE(1, MVEA_CR_MVEA_SPE_AUTO_CLK_GATE) |
++ F_ENCODE(1, MVEA_CR_MVEA_CMPRS_AUTO_CLK_GATE) |
++ F_ENCODE(1, MVEA_CR_MVEA_JMCOMP_AUTO_CLK_GATE));
++
++ PSB_DEBUG_GENERAL("TOPAZ: current pc(%08X) vs %08X\n",
++ verify_pc, PC_START_ADDRESS);
++
++ /* # turn on MTX */
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTCLEAR,
++ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX));
++
++ MTX_WRITE32(MTX_CORE_CR_MTX_ENABLE_OFFSET,
++ MTX_CORE_CR_MTX_ENABLE_MTX_ENABLE_MASK);
++
++ /* # poll on the interrupt which the firmware will generate */
++ topaz_wait_for_register(dev_priv,
++ TOPAZ_START + TOPAZ_CR_IMG_TOPAZ_INTSTAT,
++ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTS_MTX),
++ F_MASK(TOPAZ_CR_IMG_TOPAZ_INTS_MTX));
++
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTCLEAR,
++ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX));
++
++ PSB_DEBUG_GENERAL("TOPAZ: after topaz mtx setup ....\n");
++
++ /* # get ccb buffer addr -- file hostutils.c */
++ topaz_priv->topaz_ccb_buffer_addr =
++ topaz_read_mtx_mem(dev_priv,
++ MTX_DATA_MEM_BASE + mem_size - 4);
++ topaz_priv->topaz_ccb_ctrl_addr =
++ topaz_read_mtx_mem(dev_priv,
++ MTX_DATA_MEM_BASE + mem_size - 8);
++ topaz_priv->topaz_ccb_size =
++ topaz_read_mtx_mem(dev_priv,
++ topaz_priv->topaz_ccb_ctrl_addr +
++ MTX_CCBCTRL_CCBSIZE);
++
++ topaz_priv->topaz_cmd_windex = 0;
++
++ PSB_DEBUG_GENERAL("TOPAZ:ccb_buffer_addr(%x),ctrl_addr(%x) size(%d)\n",
++ topaz_priv->topaz_ccb_buffer_addr,
++ topaz_priv->topaz_ccb_ctrl_addr,
++ topaz_priv->topaz_ccb_size);
++
++ /* # write back the initial QP Value */
++ topaz_write_mtx_mem(dev_priv,
++ topaz_priv->topaz_ccb_ctrl_addr + MTX_CCBCTRL_INITQP,
++ topaz_priv->stored_initial_qp);
++
++ PSB_DEBUG_GENERAL("TOPAZ: write WB mem address 0x%08x\n",
++ topaz_priv->topaz_wb_offset);
++ topaz_write_mtx_mem(dev_priv, MTX_DATA_MEM_BASE + mem_size - 12,
++ topaz_priv->topaz_wb_offset);
++
++ /* this kick is essential for mtx.... */
++ *((uint32_t *) topaz_priv->topaz_ccb_wb) = 0x01020304;
++ topaz_mtx_kick(dev_priv, 1);
++ DRM_UDELAY(1000);
++ PSB_DEBUG_GENERAL("TOPAZ: DDK expected 0x12345678 in WB memory,"
++ " and here it is 0x%08x\n",
++ *((uint32_t *) topaz_priv->topaz_ccb_wb));
++
++ *((uint32_t *) topaz_priv->topaz_ccb_wb) = 0x0;/* reset it to 0 */
++ PSB_DEBUG_GENERAL("TOPAZ: firmware uploaded.\n");
++
++ /* XXX: is there any need to record next cmd num??
++ * we use fence seqence number to record it
++ */
++ topaz_priv->topaz_busy = 0;
++ topaz_priv->topaz_cmd_seq = 0;
++
++#if !LNC_TOPAZ_NO_IRQ
++ psb_irq_preinstall_islands(dev, OSPM_VIDEO_ENC_ISLAND);
++ psb_irq_postinstall_islands(dev, OSPM_VIDEO_ENC_ISLAND);
++ lnc_topaz_enableirq(dev);
++#endif
++
++#if 0
++ topaz_mmu_flushcache(dev_priv);
++ topaz_test_null(dev, 0xe1e1);
++ topaz_test_null(dev, 0xe2e2);
++ topaz_test_sync(dev, 0xe2e2, 0x87654321);
++
++ topaz_mmu_test(dev, 0x12345678);
++ topaz_test_null(dev, 0xe3e3);
++ topaz_mmu_test(dev, 0x8764321);
++
++ topaz_test_null(dev, 0xe4e4);
++ topaz_test_null(dev, 0xf3f3);
++#endif
++
++ return 0;
++}
++
++#if UPLOAD_FW_BY_DMA
++int topaz_upload_fw(struct drm_device *dev, enum drm_lnc_topaz_codec codec)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ const struct topaz_codec_fw *cur_codec_fw;
++ uint32_t text_size, data_size;
++ uint32_t data_location;
++ uint32_t cur_mtx_data_size;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ /* # refer HLD document */
++
++ /* # MTX reset */
++ PSB_DEBUG_GENERAL("TOPAZ: mtx reset.\n");
++ MTX_WRITE32(MTX_CORE_CR_MTX_SOFT_RESET_OFFSET,
++ MTX_CORE_CR_MTX_SOFT_RESET_MTX_RESET_MASK);
++
++ DRM_UDELAY(6000);
++
++ /* # upload the firmware by DMA */
++ cur_codec_fw = &topaz_priv->topaz_fw[codec];
++
++ PSB_DEBUG_GENERAL("Topaz:upload codec %s(%d) text sz=%d data sz=%d"
++ " data location(%d)\n", codec_to_string(codec), codec,
++ cur_codec_fw->text_size, cur_codec_fw->data_size,
++ cur_codec_fw->data_location);
++
++ /* # upload text */
++ text_size = cur_codec_fw->text_size / 4;
++
++ /* setup the MTX to start recieving data:
++ use a register for the transfer which will point to the source
++ (MTX_CR_MTX_SYSC_CDMAT) */
++ /* #.# fill the dst addr */
++ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA, 0x80900000);
++ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC,
++ F_ENCODE(2, MTX_BURSTSIZE) |
++ F_ENCODE(0, MTX_RNW) |
++ F_ENCODE(1, MTX_ENABLE) |
++ F_ENCODE(text_size, MTX_LENGTH));
++
++ /* #.# set DMAC access to host memory via BIF */
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1);
++
++ /* #.# transfer the codec */
++ topaz_dma_transfer(dev_priv, 0, cur_codec_fw->text->offset, 0,
++ MTX_CR_MTX_SYSC_CDMAT, text_size, 0, 0);
++
++ /* #.# wait dma finish */
++ topaz_wait_for_register(dev_priv,
++ DMAC_START + IMG_SOC_DMAC_IRQ_STAT(0),
++ F_ENCODE(1, IMG_SOC_TRANSFER_FIN),
++ F_ENCODE(1, IMG_SOC_TRANSFER_FIN));
++
++ /* #.# clear interrupt */
++ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0);
++
++ /* # return access to topaz core */
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 0);
++
++ /* # upload data */
++ data_size = cur_codec_fw->data_size / 4;
++ data_location = cur_codec_fw->data_location;
++
++ /* #.# fill the dst addr */
++ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA,
++ 0x80900000 + (data_location - 0x82880000));
++ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC,
++ F_ENCODE(2, MTX_BURSTSIZE) |
++ F_ENCODE(0, MTX_RNW) |
++ F_ENCODE(1, MTX_ENABLE) |
++ F_ENCODE(data_size, MTX_LENGTH));
++
++ /* #.# set DMAC access to host memory via BIF */
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1);
++
++ /* #.# transfer the codec */
++ topaz_dma_transfer(dev_priv, 0, cur_codec_fw->data->offset, 0,
++ MTX_CR_MTX_SYSC_CDMAT, data_size, 0, 0);
++
++ /* #.# wait dma finish */
++ topaz_wait_for_register(dev_priv,
++ DMAC_START + IMG_SOC_DMAC_IRQ_STAT(0),
++ F_ENCODE(1, IMG_SOC_TRANSFER_FIN),
++ F_ENCODE(1, IMG_SOC_TRANSFER_FIN));
++
++ /* #.# clear interrupt */
++ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0);
++
++ /* # return access to topaz core */
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 0);
++
++ /* record this codec's mtx data size for
++ * context save & restore */
++ /* FIXME: since non-root sighting fixed by pre allocated,
++ * only need to correct the buffer size
++ */
++ cur_mtx_data_size = data_size;
++ if (topaz_priv->cur_mtx_data_size != cur_mtx_data_size)
++ topaz_priv->cur_mtx_data_size = cur_mtx_data_size;
++
++ return 0;
++}
++
++#else
++
++void topaz_mtx_upload_by_register(struct drm_device *dev, uint32_t mtx_mem,
++ uint32_t addr, uint32_t size,
++ struct ttm_buffer_object *buf)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ uint32_t *buf_p;
++ uint32_t debug_reg, bank_size, bank_ram_size, bank_count;
++ uint32_t cur_ram_id, ram_addr , ram_id;
++ int map_ret, lp;
++ struct ttm_bo_kmap_obj bo_kmap;
++ bool is_iomem;
++ uint32_t cur_addr;
++
++ get_mtx_control_from_dash(dev_priv);
++
++ map_ret = ttm_bo_kmap(buf, 0, buf->num_pages, &bo_kmap);
++ if (map_ret) {
++ DRM_ERROR("TOPAZ: drm_bo_kmap failed: %d\n", map_ret);
++ return;
++ }
++ buf_p = (uint32_t *) ttm_kmap_obj_virtual(&bo_kmap, &is_iomem);
++
++
++ TOPAZ_READ32(TOPAZ_CORE_CR_MTX_DEBUG_OFFSET, &debug_reg);
++ debug_reg = 0x0a0a0606;
++ bank_size = (debug_reg & 0xf0000) >> 16;
++ bank_ram_size = 1 << (bank_size + 2);
++
++ bank_count = (debug_reg & 0xf00) >> 8;
++
++ topaz_wait_for_register(dev_priv,
++ MTX_START+MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_OFFSET,
++ MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_MTX_MTX_MCM_STAT_MASK,
++ MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_MTX_MTX_MCM_STAT_MASK);
++
++ cur_ram_id = -1;
++ cur_addr = addr;
++ for (lp = 0; lp < size / 4; ++lp) {
++ ram_id = mtx_mem + (cur_addr / bank_ram_size);
++
++ if (cur_ram_id != ram_id) {
++ ram_addr = cur_addr >> 2;
++
++ MTX_WRITE32(MTX_CORE_CR_MTX_RAM_ACCESS_CONTROL_OFFSET,
++ F_ENCODE(ram_id, MTX_MTX_MCMID) |
++ F_ENCODE(ram_addr, MTX_MTX_MCM_ADDR) |
++ F_ENCODE(1, MTX_MTX_MCMAI));
++
++ cur_ram_id = ram_id;
++ }
++ cur_addr += 4;
++
++ MTX_WRITE32(MTX_CORE_CR_MTX_RAM_ACCESS_DATA_TRANSFER_OFFSET,
++ *(buf_p + lp));
++
++ topaz_wait_for_register(dev_priv,
++ MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_OFFSET + MTX_START,
++ MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_MTX_MTX_MCM_STAT_MASK,
++ MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_MTX_MTX_MCM_STAT_MASK);
++ }
++
++ ttm_bo_kunmap(&bo_kmap);
++
++ PSB_DEBUG_GENERAL("TOPAZ: register data upload done\n");
++ return;
++}
++
++int topaz_upload_fw(struct drm_device *dev, enum drm_lnc_topaz_codec codec)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ const struct topaz_codec_fw *cur_codec_fw;
++ uint32_t text_size, data_size;
++ uint32_t data_location;
++
++ /* # refer HLD document */
++ /* # MTX reset */
++ PSB_DEBUG_GENERAL("TOPAZ: mtx reset.\n");
++ MTX_WRITE32(MTX_CORE_CR_MTX_SOFT_RESET_OFFSET,
++ MTX_CORE_CR_MTX_SOFT_RESET_MTX_RESET_MASK);
++
++ DRM_UDELAY(6000);
++
++ /* # upload the firmware by DMA */
++ cur_codec_fw = &topaz_priv->topaz_fw[codec];
++
++ PSB_DEBUG_GENERAL("Topaz: upload codec %s text size(%d) data size(%d)"
++ " data location(0x%08x)\n", codec_to_string(codec),
++ cur_codec_fw->text_size, cur_codec_fw->data_size,
++ cur_codec_fw->data_location);
++
++ /* # upload text */
++ text_size = cur_codec_fw->text_size;
++
++ topaz_mtx_upload_by_register(dev, LNC_MTX_CORE_CODE_MEM,
++ PC_START_ADDRESS - MTX_MEMORY_BASE,
++ text_size, cur_codec_fw->text);
++
++ /* # upload data */
++ data_size = cur_codec_fw->data_size;
++ data_location = cur_codec_fw->data_location;
++
++ topaz_mtx_upload_by_register(dev, LNC_MTX_CORE_DATA_MEM,
++ data_location - 0x82880000, data_size,
++ cur_codec_fw->data);
++
++ return 0;
++}
++
++#endif /* UPLOAD_FW_BY_DMA */
++
++void
++topaz_dma_transfer(struct drm_psb_private *dev_priv, uint32_t channel,
++ uint32_t src_phy_addr, uint32_t offset,
++ uint32_t soc_addr, uint32_t byte_num,
++ uint32_t is_increment, uint32_t is_write)
++{
++ uint32_t dmac_count;
++ uint32_t irq_stat;
++ uint32_t count;
++
++ PSB_DEBUG_GENERAL("TOPAZ: using dma to transfer firmware\n");
++ /* # check that no transfer is currently in progress and no
++ interrupts are outstanding ?? (why care interrupt) */
++ DMAC_READ32(IMG_SOC_DMAC_COUNT(channel), &dmac_count);
++ if (0 != (dmac_count & (MASK_IMG_SOC_EN | MASK_IMG_SOC_LIST_EN)))
++ DRM_ERROR("TOPAZ: there is tranfer in progress\n");
++
++ /* assert(0==(dmac_count & (MASK_IMG_SOC_EN | MASK_IMG_SOC_LIST_EN)));*/
++
++ /* no hold off period */
++ DMAC_WRITE32(IMG_SOC_DMAC_PER_HOLD(channel), 0);
++ /* clear previous interrupts */
++ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(channel), 0);
++ /* check irq status */
++ DMAC_READ32(IMG_SOC_DMAC_IRQ_STAT(channel), &irq_stat);
++ /* assert(0 == irq_stat); */
++ if (0 != irq_stat)
++ DRM_ERROR("TOPAZ: there is hold up\n");
++
++ DMAC_WRITE32(IMG_SOC_DMAC_SETUP(channel),
++ (src_phy_addr + offset));
++ count = DMAC_VALUE_COUNT(DMAC_BSWAP_NO_SWAP, DMAC_PWIDTH_32_BIT,
++ is_write, DMAC_PWIDTH_32_BIT, byte_num);
++ /* generate an interrupt at the end of transfer */
++ count |= MASK_IMG_SOC_TRANSFER_IEN;
++ count |= F_ENCODE(is_write, IMG_SOC_DIR);
++ DMAC_WRITE32(IMG_SOC_DMAC_COUNT(channel), count);
++
++ DMAC_WRITE32(IMG_SOC_DMAC_PERIPH(channel),
++ DMAC_VALUE_PERIPH_PARAM(DMAC_ACC_DEL_0,
++ is_increment, DMAC_BURST_2));
++
++ DMAC_WRITE32(IMG_SOC_DMAC_PERIPHERAL_ADDR(channel), soc_addr);
++
++ /* Finally, rewrite the count register with
++ * the enable bit set to kick off the transfer
++ */
++ DMAC_WRITE32(IMG_SOC_DMAC_COUNT(channel), count | MASK_IMG_SOC_EN);
++
++ PSB_DEBUG_GENERAL("TOPAZ: dma transfer started.\n");
++
++ return;
++}
++
++void topaz_set_default_regs(struct drm_psb_private *dev_priv)
++{
++ int n;
++ int count = sizeof(topaz_default_regs) / (sizeof(unsigned long) * 3);
++
++ for (n = 0; n < count; n++)
++ MM_WRITE32(topaz_default_regs[n][0],
++ topaz_default_regs[n][1],
++ topaz_default_regs[n][2]);
++
++}
++
++void topaz_write_core_reg(struct drm_psb_private *dev_priv, uint32_t reg,
++ const uint32_t val)
++{
++ uint32_t tmp;
++ get_mtx_control_from_dash(dev_priv);
++
++ /* put data into MTX_RW_DATA */
++ MTX_WRITE32(MTX_CORE_CR_MTX_REGISTER_READ_WRITE_DATA_OFFSET, val);
++
++ /* request a write */
++ tmp = reg &
++ ~MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK;
++ MTX_WRITE32(MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET, tmp);
++
++ /* wait for operation finished */
++ topaz_wait_for_register(dev_priv,
++ MTX_START +
++ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET,
++ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK,
++ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK);
++
++ release_mtx_control_from_dash(dev_priv);
++}
++
++void topaz_read_core_reg(struct drm_psb_private *dev_priv, uint32_t reg,
++ uint32_t *ret_val)
++{
++ uint32_t tmp;
++
++ get_mtx_control_from_dash(dev_priv);
++
++ /* request a write */
++ tmp = (reg &
++ ~MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK);
++ MTX_WRITE32(MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET,
++ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_MASK | tmp);
++
++ /* wait for operation finished */
++ topaz_wait_for_register(dev_priv,
++ MTX_START +
++ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET,
++ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK,
++ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK);
++
++ /* read */
++ MTX_READ32(MTX_CORE_CR_MTX_REGISTER_READ_WRITE_DATA_OFFSET,
++ ret_val);
++
++ release_mtx_control_from_dash(dev_priv);
++}
++
++void get_mtx_control_from_dash(struct drm_psb_private *dev_priv)
++{
++ int debug_reg_slave_val;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ /* GetMTXControlFromDash */
++ TOPAZ_WRITE32(TOPAZ_CORE_CR_MTX_DEBUG_OFFSET,
++ F_ENCODE(1, TOPAZ_CR_MTX_DBG_IS_SLAVE) |
++ F_ENCODE(2, TOPAZ_CR_MTX_DBG_GPIO_OUT));
++ do {
++ TOPAZ_READ32(TOPAZ_CORE_CR_MTX_DEBUG_OFFSET,
++ &debug_reg_slave_val);
++ } while ((debug_reg_slave_val & 0x18) != 0);
++
++ /* save access control */
++ TOPAZ_READ32(MTX_CORE_CR_MTX_RAM_ACCESS_CONTROL_OFFSET,
++ &topaz_priv->topaz_dash_access_ctrl);
++}
++
++void release_mtx_control_from_dash(struct drm_psb_private *dev_priv)
++{
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ /* restore access control */
++ TOPAZ_WRITE32(MTX_CORE_CR_MTX_RAM_ACCESS_CONTROL_OFFSET,
++ topaz_priv->topaz_dash_access_ctrl);
++
++ /* release bus */
++ TOPAZ_WRITE32(TOPAZ_CORE_CR_MTX_DEBUG_OFFSET,
++ F_ENCODE(1, TOPAZ_CR_MTX_DBG_IS_SLAVE));
++}
++
++void topaz_mmu_hwsetup(struct drm_psb_private *dev_priv)
++{
++ uint32_t pd_addr = psb_get_default_pd_addr(dev_priv->mmu);
++
++ /* bypass all request while MMU is being configured */
++ TOPAZ_WRITE32(TOPAZ_CR_MMU_CONTROL0,
++ F_ENCODE(1, TOPAZ_CR_MMU_BYPASS));
++
++ /* set MMU hardware at the page table directory */
++ PSB_DEBUG_GENERAL("TOPAZ: write PD phyaddr=0x%08x "
++ "into MMU_DIR_LIST0/1\n", pd_addr);
++ TOPAZ_WRITE32(TOPAZ_CR_MMU_DIR_LIST_BASE(0), pd_addr);
++ TOPAZ_WRITE32(TOPAZ_CR_MMU_DIR_LIST_BASE(1), 0);
++
++ /* setup index register, all pointing to directory bank 0 */
++ TOPAZ_WRITE32(TOPAZ_CR_MMU_BANK_INDEX, 0);
++
++ /* now enable MMU access for all requestors */
++ TOPAZ_WRITE32(TOPAZ_CR_MMU_CONTROL0, 0);
++}
++
++void topaz_mmu_flushcache(struct drm_psb_private *dev_priv)
++{
++ uint32_t mmu_control;
++
++ if (dev_priv->topaz_disabled)
++ return;
++
++#if 0
++ PSB_DEBUG_GENERAL("XXX: Only one PTD/PTE cache"
++ " so flush using the master core\n");
++#endif
++ /* XXX: disable interrupt */
++
++ TOPAZ_READ32(TOPAZ_CR_MMU_CONTROL0, &mmu_control);
++ mmu_control |= F_ENCODE(1, TOPAZ_CR_MMU_INVALDC);
++ mmu_control |= F_ENCODE(1, TOPAZ_CR_MMU_FLUSH);
++
++#if 0
++ PSB_DEBUG_GENERAL("Set Invalid flag (this causes a flush with MMU\n"
++ "still operating afterwards even if not cleared,\n"
++ "but may want to replace with MMU_FLUSH?\n");
++#endif
++ TOPAZ_WRITE32(TOPAZ_CR_MMU_CONTROL0, mmu_control);
++
++ /* clear it */
++ mmu_control &= (~F_ENCODE(1, TOPAZ_CR_MMU_INVALDC));
++ mmu_control &= (~F_ENCODE(1, TOPAZ_CR_MMU_FLUSH));
++ TOPAZ_WRITE32(TOPAZ_CR_MMU_CONTROL0, mmu_control);
++}
++
++#if DEBUG_FUNCTION
++
++static int topaz_test_sync(struct drm_device *dev, uint32_t seq,
++ uint32_t sync_seq)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++ uint32_t sync_cmd[3];
++ struct topaz_cmd_header *cmd_hdr;
++ uint32_t *sync_p = (uint32_t *)topaz_priv->topaz_sync_addr;
++ int count = 1000;
++ uint32_t clr_flag;
++
++ cmd_hdr = (struct topaz_cmd_header *)&sync_cmd[0];
++
++ /* reset sync area */
++ *sync_p = 0;
++
++ /* insert a SYNC command here */
++ cmd_hdr->id = MTX_CMDID_SYNC;
++ cmd_hdr->size = 3;
++ cmd_hdr->seq = seq;
++
++ sync_cmd[1] = topaz_priv->topaz_sync_offset;
++ sync_cmd[2] = sync_seq;
++
++ TOPAZ_BEGIN_CCB(dev_priv);
++ TOPAZ_OUT_CCB(dev_priv, sync_cmd[0]);
++ TOPAZ_OUT_CCB(dev_priv, sync_cmd[1]);
++ TOPAZ_OUT_CCB(dev_priv, sync_cmd[2]);
++ TOPAZ_END_CCB(dev_priv, 1);
++
++ PSB_DEBUG_GENERAL("Topaz: Sent SYNC with cmd seq=0x%08x,"
++ "sync_seq=0x%08x\n", seq, sync_seq);
++
++ while (count && *sync_p != sync_seq) {
++ DRM_UDELAY(100);
++ --count;
++ }
++ if ((count == 0) && (*sync_p != sync_seq)) {
++ DRM_ERROR("TOPAZ: wait sycn timeout, expect sync seq 0x%08x,"
++ "actual 0x%08x\n", sync_seq, *sync_p);
++ }
++ PSB_DEBUG_GENERAL("TOPAZ: SYNC succeed, sync seq=0x%08x\n", *sync_p);
++ PSB_DEBUG_GENERAL("Topaz: after SYNC test, query IRQ and clear it\n");
++
++ clr_flag = lnc_topaz_queryirq(dev);
++ lnc_topaz_clearirq(dev, clr_flag);
++
++ return 0;
++}
++static int topaz_test_sync_tt_test(struct drm_device *dev,
++ uint32_t seq,
++ uint32_t sync_seq)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct ttm_bo_device *bdev = &dev_priv->bdev;
++ int ret;
++ bool is_iomem;
++ struct ttm_buffer_object *test_obj;
++ struct ttm_bo_kmap_obj test_kmap;
++ unsigned int *test_adr;
++ uint32_t sync_cmd[3];
++ int count = 1000;
++ unsigned long pfn;
++
++ ret = ttm_buffer_object_create(bdev, 4096,
++ ttm_bo_type_kernel,
++ TTM_PL_FLAG_TT | TTM_PL_FLAG_NO_EVICT,
++ 0, 0, 0, NULL, &test_obj);
++ if (ret) {
++ DRM_ERROR("failed create test object buffer\n");
++ return -1;
++ }
++
++ ret = psb_mmu_virtual_to_pfn(psb_mmu_get_default_pd(dev_priv->mmu),
++ test_obj->offset, &pfn);
++ if (ret) {
++ DRM_ERROR("failed to get pfn from virtual\n");
++ return -1;
++ }
++
++ PSB_DEBUG_GENERAL("Topaz:offset %lx, pfn %lx\n", test_obj->offset, pfn);
++
++ ret = ttm_bo_kmap(test_obj, 0, test_obj->num_pages,
++ &test_kmap);
++ if (ret) {
++ DRM_ERROR("failed map buffer\n");
++ return -1;
++ }
++ test_adr = ttm_kmap_obj_virtual(&test_kmap, &is_iomem);
++ *test_adr = 0xff55;
++ ttm_bo_kunmap(&test_kmap);
++
++ /* insert a SYNC command here */
++ sync_cmd[0] = (MTX_CMDID_SYNC << 1) | (3 << 8) |
++ (seq << 16);
++ sync_cmd[1] = test_obj->offset;
++ sync_cmd[2] = sync_seq;
++
++ TOPAZ_BEGIN_CCB(dev_priv);
++ TOPAZ_OUT_CCB(dev_priv, sync_cmd[0]);
++ TOPAZ_OUT_CCB(dev_priv, sync_cmd[1]);
++ TOPAZ_OUT_CCB(dev_priv, sync_cmd[2]);
++ TOPAZ_END_CCB(dev_priv, 1);
++
++ ret = ttm_bo_kmap(test_obj, 0, test_obj->num_pages,
++ &test_kmap);
++ if (ret) {
++ DRM_ERROR("failed map buffer\n");
++ return -1;
++ }
++ test_adr = ttm_kmap_obj_virtual(&test_kmap, &is_iomem);
++
++ while (count && *test_adr != sync_seq) {
++ DRM_UDELAY(100);
++ --count;
++ }
++ if ((count == 0) && (*test_adr != sync_seq)) {
++ DRM_ERROR("TOPAZ: wait sycn timeout (0x%08x),"
++ "actual 0x%08x\n",
++ sync_seq, *test_adr);
++ }
++ PSB_DEBUG_GENERAL("TOPAZ: SYNC done, seq=0x%08x\n", *test_adr);
++ ttm_bo_kunmap(&test_kmap);
++ ttm_bo_unref(&test_obj);
++
++ return 0;
++}
++
++static int topaz_test_sync_manual_alloc_page(struct drm_device *dev,
++ uint32_t seq,
++ uint32_t sync_seq,
++ uint32_t offset)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ int ret;
++ uint32_t sync_cmd[3];
++ int count = 1000;
++ unsigned long pfn;
++
++ struct page *p;
++ uint32_t *v;
++/* uint32_t offset = 0xd0000000; */
++
++ p = alloc_page(GFP_DMA32);
++ if (!p) {
++ DRM_ERROR("Topaz:Failed allocating page\n");
++ return -1;
++ }
++
++ v = kmap(p);
++ memset(v, 0x67, PAGE_SIZE);
++ pfn = (offset >> PAGE_SHIFT);
++ kunmap(p);
++
++ ret = psb_mmu_insert_pages(psb_mmu_get_default_pd(dev_priv->mmu),
++ &p, pfn << PAGE_SHIFT, 1, 0, 0, 0);
++ if (ret) {
++ DRM_ERROR("Topaz:Failed inserting mmu page\n");
++ return -1;
++ }
++
++ /* insert a SYNC command here */
++ sync_cmd[0] = (MTX_CMDID_SYNC << 1) | (3 << 8) |
++ (0x5b << 16);
++ sync_cmd[1] = pfn << PAGE_SHIFT;
++ sync_cmd[2] = seq;
++
++ TOPAZ_BEGIN_CCB(dev_priv);
++ TOPAZ_OUT_CCB(dev_priv, sync_cmd[0]);
++ TOPAZ_OUT_CCB(dev_priv, sync_cmd[1]);
++ TOPAZ_OUT_CCB(dev_priv, sync_cmd[2]);
++ TOPAZ_END_CCB(dev_priv, 1);
++
++ v = kmap(p);
++ while (count && *v != sync_seq) {
++ DRM_UDELAY(100);
++ --count;
++ }
++ if ((count == 0) && (*v != sync_seq)) {
++ DRM_ERROR("TOPAZ: wait sycn timeout (0x%08x),"
++ "actual 0x%08x\n",
++ sync_seq, *v);
++ }
++ PSB_DEBUG_GENERAL("TOPAZ: SYNC done, seq=0x%08x\n", *v);
++ kunmap(p);
++
++ return 0;
++}
++
++static int topaz_test_null(struct drm_device *dev, uint32_t seq)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct topaz_cmd_header null_cmd;
++ uint32_t clr_flag;
++
++ /* XXX: here we finished firmware setup....
++ * using a NULL command to verify the
++ * correctness of firmware
++ */
++
++ null_cmd.id = MTX_CMDID_NULL;
++ null_cmd.size = 1;
++ null_cmd.seq = seq;
++
++ TOPAZ_BEGIN_CCB(dev_priv);
++ TOPAZ_OUT_CCB(dev_priv, *((uint32_t *)&null_cmd));
++ TOPAZ_END_CCB(dev_priv, 1);
++
++ DRM_UDELAY(1000); /* wait to finish */
++
++ PSB_DEBUG_GENERAL("Topaz: Sent NULL with sequence=0x%08x,"
++ " got sequence=0x%08x (WB_seq=0x%08x,WB_roff=%d)\n",
++ seq, CCB_CTRL_SEQ(dev_priv), WB_CCB_CTRL_SEQ(dev_priv),
++ WB_CCB_CTRL_RINDEX(dev_priv));
++
++ PSB_DEBUG_GENERAL("Topaz: after NULL test, query IRQ and clear it\n");
++
++ clr_flag = lnc_topaz_queryirq(dev);
++ lnc_topaz_clearirq(dev, clr_flag);
++
++ return 0;
++}
++
++
++/*
++ * this function will test whether the mmu is correct:
++ * it get a drm_buffer_object and use CMD_SYNC to write
++ * certain value into this buffer.
++ */
++static void topaz_mmu_test(struct drm_device *dev, uint32_t sync_value)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++ unsigned long real_pfn;
++ int ret;
++
++ /* topaz_mmu_flush(dev); */
++ topaz_test_sync(dev, 0x55, sync_value);
++
++ ret = psb_mmu_virtual_to_pfn(psb_mmu_get_default_pd(dev_priv->mmu),
++ topaz_priv->topaz_sync_offset, &real_pfn);
++ if (ret != 0) {
++ PSB_DEBUG_GENERAL("psb_mmu_virtual_to_pfn failed,exit\n");
++ return;
++ }
++ PSB_DEBUG_GENERAL("TOPAZ: issued SYNC command, "
++ "BO offset=0x%08x (pfn=%lu), synch value=0x%08x\n",
++ topaz_priv->topaz_sync_offset, real_pfn, sync_value);
++}
++
++void topaz_save_default_regs(struct drm_psb_private *dev_priv, uint32_t *data)
++{
++ int n;
++ int count;
++
++ count = sizeof(topaz_default_regs) / (sizeof(unsigned long) * 3);
++ for (n = 0; n < count; n++, ++data)
++ MM_READ32(topaz_default_regs[n][0],
++ topaz_default_regs[n][1],
++ data);
++
++}
++
++void topaz_restore_default_regs(struct drm_psb_private *dev_priv,
++ uint32_t *data)
++{
++ int n;
++ int count;
++
++ count = sizeof(topaz_default_regs) / (sizeof(unsigned long) * 3);
++ for (n = 0; n < count; n++, ++data)
++ MM_WRITE32(topaz_default_regs[n][0],
++ topaz_default_regs[n][1],
++ *data);
++
++}
++
++#endif
++
++int lnc_topaz_restore_mtx_state(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ uint32_t reg_val;
++ uint32_t *mtx_reg_state;
++ int i;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ if (!topaz_priv->topaz_mtx_saved)
++ return -1;
++
++ if (topaz_priv->topaz_mtx_data_mem == NULL) {
++ PSB_DEBUG_GENERAL("TOPAZ: try to restore context without "
++ "space allocated, return directly without restore\n");
++ return -1;
++ }
++
++ /* turn on mtx clocks */
++ MTX_READ32(TOPAZ_CR_TOPAZ_MAN_CLK_GATE, &reg_val);
++ MTX_WRITE32(TOPAZ_CR_TOPAZ_MAN_CLK_GATE,
++ reg_val & (~MASK_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE));
++
++ /* reset mtx */
++ /* FIXME: should use core_write??? */
++ MTX_WRITE32(MTX_CORE_CR_MTX_SOFT_RESET_OFFSET,
++ MTX_CORE_CR_MTX_SOFT_RESET_MTX_RESET_MASK);
++ DRM_UDELAY(6000);
++
++ topaz_mmu_hwsetup(dev_priv);
++ /* upload code, restore mtx data */
++ mtx_dma_write(dev);
++
++ mtx_reg_state = topaz_priv->topaz_mtx_reg_state;
++ /* restore register */
++ /* FIXME: conside to put read/write into one function */
++ /* Saves 8 Registers of D0 Bank */
++ /* DoRe0, D0Ar6, D0Ar4, D0Ar2, D0FrT, D0.5, D0.6 and D0.7 */
++ for (i = 0; i < 8; i++) {
++ topaz_write_core_reg(dev_priv, 0x1 | (i<<4),
++ *mtx_reg_state);
++ mtx_reg_state++;
++ }
++ /* Saves 8 Registers of D1 Bank */
++ /* D1Re0, D1Ar5, D1Ar3, D1Ar1, D1RtP, D1.5, D1.6 and D1.7 */
++ for (i = 0; i < 8; i++) {
++ topaz_write_core_reg(dev_priv, 0x2 | (i<<4),
++ *mtx_reg_state);
++ mtx_reg_state++;
++ }
++ /* Saves 4 Registers of A0 Bank */
++ /* A0StP, A0FrP, A0.2 and A0.3 */
++ for (i = 0; i < 4; i++) {
++ topaz_write_core_reg(dev_priv, 0x3 | (i<<4),
++ *mtx_reg_state);
++ mtx_reg_state++;
++ }
++ /* Saves 4 Registers of A1 Bank */
++ /* A1GbP, A1LbP, A1.2 and A1.3 */
++ for (i = 0; i < 4; i++) {
++ topaz_write_core_reg(dev_priv, 0x4 | (i<<4),
++ *mtx_reg_state);
++ mtx_reg_state++;
++ }
++ /* Saves PC and PCX */
++ for (i = 0; i < 2; i++) {
++ topaz_write_core_reg(dev_priv, 0x5 | (i<<4),
++ *mtx_reg_state);
++ mtx_reg_state++;
++ }
++ /* Saves 8 Control Registers */
++ /* TXSTAT, TXMASK, TXSTATI, TXMASKI, TXPOLL, TXGPIOI, TXPOLLI,
++ * TXGPIOO */
++ for (i = 0; i < 8; i++) {
++ topaz_write_core_reg(dev_priv, 0x7 | (i<<4),
++ *mtx_reg_state);
++ mtx_reg_state++;
++ }
++
++ /* turn on MTX */
++ MTX_WRITE32(MTX_CORE_CR_MTX_ENABLE_OFFSET,
++ MTX_CORE_CR_MTX_ENABLE_MTX_ENABLE_MASK);
++
++ topaz_priv->topaz_mtx_saved = 0;
++
++ return 0;
++}
++
++int lnc_topaz_save_mtx_state(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ uint32_t *mtx_reg_state;
++ int i;
++ struct topaz_codec_fw *cur_codec_fw;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ /* FIXME: make sure the topaz_mtx_data_mem is allocated */
++ if (topaz_priv->topaz_mtx_data_mem == NULL) {
++ PSB_DEBUG_GENERAL("TOPAZ: try to save context without space "
++ "allocated, return directly without save\n");
++ return -1;
++ }
++ if (topaz_priv->topaz_fw_loaded == 0) {
++ PSB_DEBUG_GENERAL("TOPAZ: try to save context without firmware "
++ "uploaded\n");
++ return -1;
++ }
++
++ topaz_wait_for_register(dev_priv,
++ MTX_START + MTX_CORE_CR_MTX_TXRPT_OFFSET,
++ TXRPT_WAITONKICK_VALUE,
++ 0xffffffff);
++
++ /* stop mtx */
++ MTX_WRITE32(MTX_CORE_CR_MTX_ENABLE_OFFSET,
++ MTX_CORE_CR_MTX_ENABLE_MTX_TOFF_MASK);
++
++ mtx_reg_state = topaz_priv->topaz_mtx_reg_state;
++
++ /* FIXME: conside to put read/write into one function */
++ /* Saves 8 Registers of D0 Bank */
++ /* DoRe0, D0Ar6, D0Ar4, D0Ar2, D0FrT, D0.5, D0.6 and D0.7 */
++ for (i = 0; i < 8; i++) {
++ topaz_read_core_reg(dev_priv, 0x1 | (i<<4),
++ mtx_reg_state);
++ mtx_reg_state++;
++ }
++ /* Saves 8 Registers of D1 Bank */
++ /* D1Re0, D1Ar5, D1Ar3, D1Ar1, D1RtP, D1.5, D1.6 and D1.7 */
++ for (i = 0; i < 8; i++) {
++ topaz_read_core_reg(dev_priv, 0x2 | (i<<4),
++ mtx_reg_state);
++ mtx_reg_state++;
++ }
++ /* Saves 4 Registers of A0 Bank */
++ /* A0StP, A0FrP, A0.2 and A0.3 */
++ for (i = 0; i < 4; i++) {
++ topaz_read_core_reg(dev_priv, 0x3 | (i<<4),
++ mtx_reg_state);
++ mtx_reg_state++;
++ }
++ /* Saves 4 Registers of A1 Bank */
++ /* A1GbP, A1LbP, A1.2 and A1.3 */
++ for (i = 0; i < 4; i++) {
++ topaz_read_core_reg(dev_priv, 0x4 | (i<<4),
++ mtx_reg_state);
++ mtx_reg_state++;
++ }
++ /* Saves PC and PCX */
++ for (i = 0; i < 2; i++) {
++ topaz_read_core_reg(dev_priv, 0x5 | (i<<4),
++ mtx_reg_state);
++ mtx_reg_state++;
++ }
++ /* Saves 8 Control Registers */
++ /* TXSTAT, TXMASK, TXSTATI, TXMASKI, TXPOLL, TXGPIOI, TXPOLLI,
++ * TXGPIOO */
++ for (i = 0; i < 8; i++) {
++ topaz_read_core_reg(dev_priv, 0x7 | (i<<4),
++ mtx_reg_state);
++ mtx_reg_state++;
++ }
++
++ /* save mtx data memory */
++ cur_codec_fw = &topaz_priv->topaz_fw[topaz_priv->topaz_cur_codec];
++
++ mtx_dma_read(dev, cur_codec_fw->data_location + 0x80900000 - 0x82880000,
++ topaz_priv->cur_mtx_data_size);
++
++ /* turn off mtx clocks */
++ MTX_WRITE32(TOPAZ_CR_TOPAZ_MAN_CLK_GATE,
++ MASK_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE);
++
++ topaz_priv->topaz_mtx_saved = 1;
++
++ return 0;
++}
++
++void mtx_dma_read(struct drm_device *dev, uint32_t source_addr, uint32_t size)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ struct ttm_buffer_object *target;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ /* setup mtx DMAC registers to do transfer */
++ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA, source_addr);
++ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC,
++ F_ENCODE(2, MTX_BURSTSIZE) |
++ F_ENCODE(1, MTX_RNW) |
++ F_ENCODE(1, MTX_ENABLE) |
++ F_ENCODE(size, MTX_LENGTH));
++
++ /* give the DMAC access to the host memory via BIF */
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1);
++
++ target = topaz_priv->topaz_mtx_data_mem;
++ /* transfert the data */
++ /* FIXME: size is meaured by bytes? */
++ topaz_dma_transfer(dev_priv, 0, target->offset, 0,
++ MTX_CR_MTX_SYSC_CDMAT,
++ size, 0, 1);
++
++ /* wait for it transfer */
++ topaz_wait_for_register(dev_priv, IMG_SOC_DMAC_IRQ_STAT(0) + DMAC_START,
++ F_ENCODE(1, IMG_SOC_TRANSFER_FIN),
++ F_ENCODE(1, IMG_SOC_TRANSFER_FIN));
++ /* clear interrupt */
++ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0);
++ /* give access back to topaz core */
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 0);
++}
++
++void dmac_transfer(struct drm_device *dev, uint32_t channel, uint32_t dst_addr,
++ uint32_t soc_addr, uint32_t bytes_num,
++ int increment, int rnw)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ uint32_t count_reg;
++ uint32_t irq_state;
++
++ /* check no transfer is in progress */
++ DMAC_READ32(IMG_SOC_DMAC_COUNT(channel), &count_reg);
++ if (0 != (count_reg & (MASK_IMG_SOC_EN | MASK_IMG_SOC_LIST_EN))) {
++ DRM_ERROR("TOPAZ: there's transfer in progress when wanna "
++ "save mtx data\n");
++ /* FIXME: how to handle this error */
++ return;
++ }
++
++ /* no hold off period */
++ DMAC_WRITE32(IMG_SOC_DMAC_PER_HOLD(channel), 0);
++ /* cleare irq state */
++ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(channel), 0);
++ DMAC_READ32(IMG_SOC_DMAC_IRQ_STAT(channel), &irq_state);
++ if (0 != irq_state) {
++ DRM_ERROR("TOPAZ: there's irq cann't clear\n");
++ return;
++ }
++
++ DMAC_WRITE32(IMG_SOC_DMAC_SETUP(channel), dst_addr);
++ count_reg = DMAC_VALUE_COUNT(DMAC_BSWAP_NO_SWAP,
++ DMAC_PWIDTH_32_BIT, rnw,
++ DMAC_PWIDTH_32_BIT, bytes_num);
++ /* generate an interrupt at end of transfer */
++ count_reg |= MASK_IMG_SOC_TRANSFER_IEN;
++ count_reg |= F_ENCODE(rnw, IMG_SOC_DIR);
++ DMAC_WRITE32(IMG_SOC_DMAC_COUNT(channel), count_reg);
++
++ DMAC_WRITE32(IMG_SOC_DMAC_PERIPH(channel),
++ DMAC_VALUE_PERIPH_PARAM(DMAC_ACC_DEL_0, increment,
++ DMAC_BURST_2));
++ DMAC_WRITE32(IMG_SOC_DMAC_PERIPHERAL_ADDR(channel), soc_addr);
++
++ /* Finally, rewrite the count register with the enable
++ * bit set to kick off the transfer */
++ DMAC_WRITE32(IMG_SOC_DMAC_COUNT(channel),
++ count_reg | MASK_IMG_SOC_EN);
++}
++
++void mtx_dma_write(struct drm_device *dev)
++{
++ struct topaz_codec_fw *cur_codec_fw;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ cur_codec_fw = &topaz_priv->topaz_fw[topaz_priv->topaz_cur_codec];
++
++ /* upload code */
++ /* setup mtx DMAC registers to recieve transfer */
++ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA, 0x80900000);
++ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC,
++ F_ENCODE(2, MTX_BURSTSIZE) |
++ F_ENCODE(0, MTX_RNW) |
++ F_ENCODE(1, MTX_ENABLE) |
++ F_ENCODE(cur_codec_fw->text_size / 4, MTX_LENGTH));
++
++ /* give DMAC access to host memory */
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1);
++
++ /* transfer code */
++ topaz_dma_transfer(dev_priv, 0, cur_codec_fw->text->offset, 0,
++ MTX_CR_MTX_SYSC_CDMAT, cur_codec_fw->text_size / 4,
++ 0, 0);
++ /* wait finished */
++ topaz_wait_for_register(dev_priv, IMG_SOC_DMAC_IRQ_STAT(0) + DMAC_START,
++ F_ENCODE(1, IMG_SOC_TRANSFER_FIN),
++ F_ENCODE(1, IMG_SOC_TRANSFER_FIN));
++ /* clear interrupt */
++ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0);
++
++ /* setup mtx start recieving data */
++ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA, 0x80900000 +
++ (cur_codec_fw->data_location) - 0x82880000);
++
++ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC,
++ F_ENCODE(2, MTX_BURSTSIZE) |
++ F_ENCODE(0, MTX_RNW) |
++ F_ENCODE(1, MTX_ENABLE) |
++ F_ENCODE(topaz_priv->cur_mtx_data_size, MTX_LENGTH));
++
++ /* give DMAC access to host memory */
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1);
++
++ /* transfer data */
++ topaz_dma_transfer(dev_priv, 0, topaz_priv->topaz_mtx_data_mem->offset,
++ 0, MTX_CR_MTX_SYSC_CDMAT,
++ topaz_priv->cur_mtx_data_size,
++ 0, 0);
++ /* wait finished */
++ topaz_wait_for_register(dev_priv, IMG_SOC_DMAC_IRQ_STAT(0) + DMAC_START,
++ F_ENCODE(1, IMG_SOC_TRANSFER_FIN),
++ F_ENCODE(1, IMG_SOC_TRANSFER_FIN));
++ /* clear interrupt */
++ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0);
++
++ /* give access back to Topaz Core */
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 0);
++}
++
+--- /dev/null
++++ b/drivers/staging/mrst/drv/mdfld_dsi_dbi.c
+@@ -0,0 +1,1895 @@
++/*
++ * Copyright © 2010 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ * jim liu <jim.liu@intel.com>
++ */
++
++#include "mdfld_dsi_dbi.h"
++#include "mdfld_dsi_dbi_dpu.h"
++
++#define DRM_MODE_ENCODER_MIPI 5
++#define MDFLD_DSI_BRIGHTNESS_MAX_LEVEL 100
++
++/**
++ * make these MCS command global
++ * we don't need 'movl' everytime we send them.
++ * FIXME: these datas were provided by OEM, we should get them from GCT.
++ **/
++static const u8 mdfld_dbi_mcs_hysteresis[] = {
++ 0x57, /*MCS write_hysteresis*/
++ 0x0f, 0x00, 0x42, 0x00,
++ 0x64, 0x00, 0x8c, 0x00,
++ 0xbf, 0x00, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff,
++ 0x0a, 0x00, 0x38, 0x00,
++ 0x50, 0x00, 0x82, 0x00,
++ 0xab, 0x00, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff,
++};
++
++static const u8 mdfld_dbi_mcs_display_profile[] = {
++ 0x50, /*MCS write_display_profile*/
++ 0x14, 0x28, 0x50,
++ 0x82, 0xc8, 0x00, 0x00,
++ 0x00, 0x00, 0x00, 0x00,
++ 0x00, 0x00, 0x00, 0x00,
++ 0x00, 0x00, 0x00, 0x00,
++};
++
++static const u8 mdfld_dbi_mcs_kbbc_profile[] = {
++ 0x60, /*MCS write_kbbc_profile*/
++ 0xcc, 0xff, 0x00,
++ 0x00, 0x00, 0x00, 0x00,
++ 0x00, 0x00, 0x00, 0x00,
++ 0x00, 0x00, 0x00, 0x00,
++};
++
++static const u8 mdfld_dbi_mcs_gamma_profile[] = {
++ 0x58, /*MCS write_gamma_setting*/
++ 0x11, 0x11, 0x81,
++ 0x88, 0x88, 0x88, 0x88,
++ 0x88, 0x88, 0x88, 0x88,
++};
++
++/**
++ * write hysteresis values.
++ */
++static void mdfld_dbi_write_hysteresis (struct drm_device *dev, int pipe)
++{
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++ u32 hs_gen_data_reg = HS_GEN_DATA_REG;
++ u32 hs_gen_ctrl_reg = HS_GEN_CTRL_REG;
++ u32 *p_gen_data_val = 0;
++ u32 gen_ctrl_val = 0;
++ u32 dcsChannelNumber = dev_priv->channelNumber;
++ u32 i = 0;
++
++ if (pipe == 2) {
++ dcsChannelNumber = dev_priv->channelNumber2;
++ hs_gen_data_reg = HS_GEN_DATA_REG + MIPIC_REG_OFFSET;
++ hs_gen_ctrl_reg = HS_GEN_CTRL_REG + MIPIC_REG_OFFSET;
++ }
++
++ p_gen_data_val = (u32 *)mdfld_dbi_mcs_hysteresis;
++
++ for (i = 0; i < (68 / 4); i++) {
++ REG_WRITE(hs_gen_data_reg, *(p_gen_data_val + i));
++ }
++
++ gen_ctrl_val = 65 << WORD_COUNTS_POS;
++ gen_ctrl_val |= dcsChannelNumber << DCS_CHANNEL_NUMBER_POS;
++ gen_ctrl_val |= MCS_LONG_WRITE;
++ REG_WRITE(hs_gen_ctrl_reg, gen_ctrl_val);
++}
++
++/**
++ * write display profile values.
++ */
++static void mdfld_dbi_write_display_profile (struct drm_device *dev, int pipe)
++{
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++ u32 hs_gen_data_reg = HS_GEN_DATA_REG;
++ u32 hs_gen_ctrl_reg = HS_GEN_CTRL_REG;
++ u32 *p_gen_data_val = 0;
++ u32 gen_ctrl_val = 0;
++ u32 dcsChannelNumber = dev_priv->channelNumber;
++ u32 i = 0;
++
++ if (pipe == 2) {
++ dcsChannelNumber = dev_priv->channelNumber2;
++ hs_gen_data_reg = HS_GEN_DATA_REG + MIPIC_REG_OFFSET;
++ hs_gen_ctrl_reg = HS_GEN_CTRL_REG + MIPIC_REG_OFFSET;
++ }
++
++ p_gen_data_val = (u32 *)mdfld_dbi_mcs_display_profile;
++
++ for (i = 0; i < (20 / 4); i++) {
++ REG_WRITE(hs_gen_data_reg, *(p_gen_data_val + i));
++ }
++
++ gen_ctrl_val = 17 << WORD_COUNTS_POS;
++ gen_ctrl_val |= dcsChannelNumber << DCS_CHANNEL_NUMBER_POS;
++ gen_ctrl_val |= MCS_LONG_WRITE;
++ REG_WRITE(hs_gen_ctrl_reg, gen_ctrl_val);
++}
++
++/**
++ * write KBBC profile values.
++ */
++static void mdfld_dbi_write_kbbc_profile (struct drm_device *dev, int pipe)
++{
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++ u32 hs_gen_data_reg = HS_GEN_DATA_REG;
++ u32 hs_gen_ctrl_reg = HS_GEN_CTRL_REG;
++ u32 *p_gen_data_val = 0;
++ u32 gen_ctrl_val = 0;
++ u32 dcsChannelNumber = dev_priv->channelNumber;
++ u32 i = 0;
++
++ if (pipe == 2) {
++ dcsChannelNumber = dev_priv->channelNumber2;
++ hs_gen_data_reg = HS_GEN_DATA_REG + MIPIC_REG_OFFSET;
++ hs_gen_ctrl_reg = HS_GEN_CTRL_REG + MIPIC_REG_OFFSET;
++ }
++
++ p_gen_data_val = (u32 *)mdfld_dbi_mcs_kbbc_profile;
++
++ for (i = 0; i < (20 / 4); i++) {
++ REG_WRITE(hs_gen_data_reg, *(p_gen_data_val + i));
++ }
++
++ gen_ctrl_val = 17 << WORD_COUNTS_POS;
++ gen_ctrl_val |= dcsChannelNumber << DCS_CHANNEL_NUMBER_POS;
++ gen_ctrl_val |= MCS_LONG_WRITE;
++ REG_WRITE(hs_gen_ctrl_reg, gen_ctrl_val);
++}
++
++/**
++ * write gamma setting.
++ */
++static void mdfld_dbi_write_gamma_setting (struct drm_device *dev, int pipe)
++{
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++ u32 hs_gen_data_reg = HS_GEN_DATA_REG;
++ u32 hs_gen_ctrl_reg = HS_GEN_CTRL_REG;
++ u32 *p_gen_data_val = 0;
++ u32 gen_ctrl_val = 0;
++ u32 dcsChannelNumber = dev_priv->channelNumber;
++ u32 i = 0;
++
++ if (pipe == 2) {
++ dcsChannelNumber = dev_priv->channelNumber2;
++ hs_gen_data_reg = HS_GEN_DATA_REG + MIPIC_REG_OFFSET;
++ hs_gen_ctrl_reg = HS_GEN_CTRL_REG + MIPIC_REG_OFFSET;
++ }
++
++ p_gen_data_val = (u32 *)mdfld_dbi_mcs_gamma_profile;
++
++ for (i = 0; i < (12 / 4); i++) {
++ REG_WRITE(hs_gen_data_reg, *(p_gen_data_val + i));
++ }
++
++ gen_ctrl_val = 9 << WORD_COUNTS_POS;
++ gen_ctrl_val |= dcsChannelNumber << DCS_CHANNEL_NUMBER_POS;
++ gen_ctrl_val |= MCS_LONG_WRITE;
++ REG_WRITE(hs_gen_ctrl_reg, gen_ctrl_val);
++}
++
++/**
++ * Check and see if the generic control or data buffer is empty and ready.
++ */
++void mdfld_dsi_dbi_gen_fifo_ready (struct drm_device *dev, u32 gen_fifo_stat_reg, u32 fifo_stat)
++{
++ u32 GEN_BF_time_out_count = 0;
++
++ /* Check MIPI Adatper command registers */
++ for (GEN_BF_time_out_count = 0; GEN_BF_time_out_count < GEN_FB_TIME_OUT; GEN_BF_time_out_count++)
++ {
++ if ((REG_READ(gen_fifo_stat_reg) & fifo_stat) == fifo_stat)
++ break;
++ udelay (100);
++ }
++
++ if (GEN_BF_time_out_count == GEN_FB_TIME_OUT)
++ DRM_ERROR("mdfld_dsi_dbi_gen_fifo_ready, Timeout. gen_fifo_stat_reg = 0x%x. \n", gen_fifo_stat_reg);
++}
++
++/**
++ * Manage the DSI MIPI keyboard and display brightness.
++ */
++void mdfld_dsi_dbi_brightness_init (struct drm_device *dev, int pipe)
++{
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++ u32 hs_gen_ctrl_reg = HS_GEN_CTRL_REG;
++ u32 gen_fifo_stat_reg = GEN_FIFO_STAT_REG;
++ u32 gen_ctrl_val = 0;
++ u32 dcsChannelNumber = dev_priv->channelNumber;
++
++ if (pipe == 2)
++ {
++ dcsChannelNumber = dev_priv->channelNumber2;
++ hs_gen_ctrl_reg = HS_GEN_CTRL_REG + MIPIC_REG_OFFSET;
++ gen_fifo_stat_reg = GEN_FIFO_STAT_REG + MIPIC_REG_OFFSET;
++ }
++
++ /* Set default display backlight value to 85% (0xd8)*/
++ mdfld_dsi_dbi_gen_fifo_ready (dev, gen_fifo_stat_reg, HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
++ gen_ctrl_val = 0xd8;
++ gen_ctrl_val <<= MCS_PARAMETER_POS;
++ gen_ctrl_val |= write_display_brightness << MCS_COMMANDS_POS;
++ gen_ctrl_val |= dcsChannelNumber << DCS_CHANNEL_NUMBER_POS;
++ gen_ctrl_val |= MCS_SHORT_WRITE_1;
++ REG_WRITE(hs_gen_ctrl_reg, gen_ctrl_val);
++
++ /* Set minimum brightness setting of CABC function to 20% (0x33)*/
++ mdfld_dsi_dbi_gen_fifo_ready (dev, gen_fifo_stat_reg, HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
++ gen_ctrl_val = 0x33;
++ gen_ctrl_val <<= MCS_PARAMETER_POS;
++ gen_ctrl_val |= write_cabc_min_bright << MCS_COMMANDS_POS;
++ gen_ctrl_val |= dcsChannelNumber << DCS_CHANNEL_NUMBER_POS;
++ gen_ctrl_val |= MCS_SHORT_WRITE_1;
++ REG_WRITE(hs_gen_ctrl_reg, gen_ctrl_val);
++
++ mdfld_dsi_dbi_gen_fifo_ready (dev, gen_fifo_stat_reg, HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
++ mdfld_dbi_write_hysteresis (dev, pipe);
++
++ mdfld_dsi_dbi_gen_fifo_ready (dev, gen_fifo_stat_reg, HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
++ mdfld_dbi_write_display_profile (dev, pipe);
++
++ mdfld_dsi_dbi_gen_fifo_ready (dev, gen_fifo_stat_reg, HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
++ mdfld_dbi_write_kbbc_profile (dev, pipe);
++
++ mdfld_dsi_dbi_gen_fifo_ready (dev, gen_fifo_stat_reg, HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
++ mdfld_dbi_write_gamma_setting (dev, pipe);
++
++ /* Enable LABC */
++ mdfld_dsi_dbi_gen_fifo_ready (dev, gen_fifo_stat_reg, HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
++ gen_ctrl_val = BRIGHT_CNTL_BLOCK_ON | AMBIENT_LIGHT_SENSE_ON | DISPLAY_DIMMING_ON| BACKLIGHT_ON | DISPLAY_BRIGHTNESS_AUTO | GAMMA_AUTO;
++ gen_ctrl_val <<= MCS_PARAMETER_POS;
++ gen_ctrl_val |= write_ctrl_display << MCS_COMMANDS_POS;
++ gen_ctrl_val |= dcsChannelNumber << DCS_CHANNEL_NUMBER_POS;
++ gen_ctrl_val |= MCS_SHORT_WRITE_1;
++ REG_WRITE(hs_gen_ctrl_reg, gen_ctrl_val);
++
++ /* Enable CABC */
++ mdfld_dsi_dbi_gen_fifo_ready (dev, gen_fifo_stat_reg, HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
++ gen_ctrl_val = UI_IMAGE;
++ gen_ctrl_val <<= MCS_PARAMETER_POS;
++ gen_ctrl_val |= write_ctrl_cabc << MCS_COMMANDS_POS;
++ gen_ctrl_val |= dcsChannelNumber << DCS_CHANNEL_NUMBER_POS;
++ gen_ctrl_val |= MCS_SHORT_WRITE_1;
++ REG_WRITE(hs_gen_ctrl_reg, gen_ctrl_val);
++}
++
++/**
++ * Manage the mipi display brightness.
++ */
++void mdfld_dsi_dbi_brightness_control (struct drm_device *dev, int pipe, int level)
++{
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++ u32 hs_gen_ctrl_reg = HS_GEN_CTRL_REG;
++ u32 gen_fifo_stat_reg = GEN_FIFO_STAT_REG;
++ u32 gen_ctrl_val = 0;
++ u32 dcsChannelNumber = dev_priv->channelNumber;
++
++ if (pipe == 2)
++ {
++ dcsChannelNumber = dev_priv->channelNumber2;
++ hs_gen_ctrl_reg = HS_GEN_CTRL_REG + MIPIC_REG_OFFSET;
++ gen_fifo_stat_reg = GEN_FIFO_STAT_REG + MIPIC_REG_OFFSET;
++ }
++
++ gen_ctrl_val = ((level * 0xff) / MDFLD_DSI_BRIGHTNESS_MAX_LEVEL) & 0xff;
++
++ PSB_DEBUG_ENTRY("pipe = %d, gen_ctrl_val = %d. \n", pipe, gen_ctrl_val);
++
++ gen_ctrl_val <<= MCS_PARAMETER_POS;
++ gen_ctrl_val |= write_display_brightness << MCS_COMMANDS_POS;
++ gen_ctrl_val |= dcsChannelNumber << DCS_CHANNEL_NUMBER_POS;
++ gen_ctrl_val |= MCS_SHORT_WRITE_1;
++ /* Set display backlight value */
++ mdfld_dsi_dbi_gen_fifo_ready (dev, gen_fifo_stat_reg, HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
++ REG_WRITE(hs_gen_ctrl_reg, gen_ctrl_val);
++
++ /* Enable LABC */
++ mdfld_dsi_dbi_gen_fifo_ready (dev, gen_fifo_stat_reg, HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
++
++ if (level == 0)
++ gen_ctrl_val = 0;
++ else
++ gen_ctrl_val = BRIGHT_CNTL_BLOCK_ON | AMBIENT_LIGHT_SENSE_ON | DISPLAY_DIMMING_ON| BACKLIGHT_ON | DISPLAY_BRIGHTNESS_AUTO | GAMMA_AUTO;
++
++ gen_ctrl_val <<= MCS_PARAMETER_POS;
++ gen_ctrl_val |= write_ctrl_display << MCS_COMMANDS_POS;
++ gen_ctrl_val |= dcsChannelNumber << DCS_CHANNEL_NUMBER_POS;
++ gen_ctrl_val |= MCS_SHORT_WRITE_1;
++ REG_WRITE(hs_gen_ctrl_reg, gen_ctrl_val);
++ msleep(3);
++}
++
++/**
++ * set refreshing area
++ */
++static int mdfld_dsi_dbi_update_area(struct mdfld_dsi_dbi_output * dbi_output,
++ u16 x1, u16 y1, u16 x2, u16 y2)
++{
++ struct drm_device * dev = dbi_output->dev;
++ u32 cb_phy = dbi_output->dbi_cb_phy;
++ u8 * cb = (u8*)dbi_output->dbi_cb_addr;
++ u32 * index;
++ int reg_offset = (dbi_output->channel_num == 1) ? MIPIC_REG_OFFSET : 0;
++ int ret = 0;
++ unsigned long flags;
++
++ /*if failed, try 5 times*/
++ int retry = 5;
++
++update_area_again:
++ /*first thing first, lock up DBI command buffer*/
++ spin_lock_irqsave(&dbi_output->cb_lock, flags);
++
++ index = &dbi_output->cb_write;
++
++ if(*index) {
++ DRM_ERROR("command buffer is in use\n");
++ ret = -EINVAL;
++ goto update_err;
++ }
++
++ /*check if display module is busy*/
++ ret = mdfld_dsi_dbi_cb_ready(dbi_output);
++ if(ret) {
++ DRM_ERROR("Display module is busy\n");
++ goto update_err;
++ }
++
++ /*set column*/
++ *(cb + ((*index)++)) = set_column_address;
++ *(cb + ((*index)++)) = x1 >> 8;
++ *(cb + ((*index)++)) = x1;
++ *(cb + ((*index)++)) = x2 >> 8;
++ *(cb + ((*index)++)) = x2;
++
++ *index = 8;
++
++ /*set page*/
++ *(cb + ((*index)++)) = set_page_addr;
++ *(cb + ((*index)++)) = y1 >> 8;
++ *(cb + ((*index)++)) = y1;
++ *(cb + ((*index)++)) = y2 >> 8;
++ *(cb + ((*index)++)) = y2;
++
++ *index = 16;
++
++ /*write start*/
++ *(cb + ((*index)++)) = write_mem_start;
++ /**
++ * release cb_lock before going to sleep.
++ * NOTE: since cb_write wasn't reset, DSR timer wouldn't be able to
++ * get access to command buffer.
++ * FIXME: this may lead to mode setting failed.
++ */
++ spin_unlock_irqrestore(&dbi_output->cb_lock, flags);
++
++ /*kick off*/
++ REG_WRITE((MIPIA_CMD_LEN_REG + reg_offset), 0x010505);
++ REG_WRITE((MIPIA_CMD_ADD_REG + reg_offset), (cb_phy << CMD_MEM_ADDR_OFFSET) | BIT0 | BIT1);
++
++ /*wait for 3ms*/
++ msleep(3);
++
++ /*lock cb_lock again for cb_write resetting*/
++ spin_lock_irqsave(&dbi_output->cb_lock, flags);
++
++ /*reset write pointer*/
++ *index = 0;
++update_err:
++ spin_unlock_irqrestore(&dbi_output->cb_lock, flags);
++
++ /*if failed, command buffer may be occupied by DSR timer, try it again*/
++ if(ret && retry) {
++ retry--;
++ goto update_area_again;
++ }
++
++ return ret;
++}
++
++/**
++ * set panel's power state
++ */
++static int mdfld_dsi_dbi_update_power(struct mdfld_dsi_dbi_output * dbi_output, int mode)
++{
++ struct drm_device * dev = dbi_output->dev;
++ u32 cb_phy = dbi_output->dbi_cb_phy;
++ u8 * cb = (u8*)dbi_output->dbi_cb_addr;
++ u32 * index;
++ int reg_offset = (dbi_output->channel_num == 1) ? MIPIC_REG_OFFSET : 0;
++ int ret = 0;
++ unsigned long flags;
++
++ /*if failed, try 5 times*/
++ int retry = 5;
++
++update_power_again:
++
++ spin_lock_irqsave(&dbi_output->cb_lock, flags);
++
++ index = &dbi_output->cb_write;
++
++ if(*index) {
++ DRM_ERROR("command buffer is in use\n");
++ ret = -EINVAL;
++ goto set_power_err;
++ }
++
++ /*check if display module is busy*/
++ ret = mdfld_dsi_dbi_cb_ready(dbi_output);
++ if(ret) {
++ DRM_ERROR("Display module is busy\n");
++ goto set_power_err;
++ }
++
++ if(mode == DRM_MODE_DPMS_ON) {
++ /*exit sleep mode*/
++ *(cb + ((*index)++)) = exit_sleep_mode;
++
++ /*set display on*/
++ *index = 8;
++
++ *(cb + ((*index)++)) = set_display_on;
++ } else {
++ /*set display off*/
++ *(cb + ((*index)++)) = set_display_off;
++
++ *index = 8;
++
++ /*enter sleep mode*/
++ *(cb + ((*index)++)) = enter_sleep_mode;
++ }
++
++ spin_unlock_irqrestore(&dbi_output->cb_lock, flags);
++
++ /*wait for 120ms before actually sending out the command*/
++ msleep(120);
++
++ /*kick off*/
++ REG_WRITE((MIPIA_CMD_LEN_REG + reg_offset), 0x0101);
++ REG_WRITE((MIPIA_CMD_ADD_REG + reg_offset), (cb_phy << CMD_MEM_ADDR_OFFSET) | BIT0 | BIT1);
++
++ /*wait for 5ms*/
++ msleep(5);
++
++ spin_lock_irqsave(&dbi_output->cb_lock, flags);
++
++ /*reset write pointer*/
++ *index = 0;
++
++set_power_err:
++ spin_unlock_irqrestore(&dbi_output->cb_lock, flags);
++
++ /*if failed, command buffer may be occupied by DSR timer, try it again*/
++ if(ret && retry) {
++ retry--;
++ goto update_power_again;
++ }
++
++ return ret;
++}
++
++/**
++ * send a generic DCS command with a parameter list
++ */
++static int mdfld_dsi_dbi_send_dcs(struct mdfld_dsi_dbi_output * dbi_output,
++ u8 dcs,
++ u8 * param,
++ u32 num,
++ u8 data_src)
++{
++ struct drm_device * dev = dbi_output->dev;
++ u32 cb_phy = dbi_output->dbi_cb_phy;
++ u8 * cb = (u8*)dbi_output->dbi_cb_addr;
++ uint32_t * index;
++ int reg_offset = (dbi_output->channel_num == 1) ? MIPIC_REG_OFFSET : 0;
++ int ret = 0;
++ unsigned long flags;
++ u32 i;
++
++ /*if failed, try 5 times*/
++ int retry = 5;
++
++ PSB_DEBUG_ENTRY("reg_offset %d\n", reg_offset);
++
++ if(!cb) {
++ DRM_ERROR("No command buffer\n");
++ return -EINVAL;
++ }
++
++send_dcs_again:
++ spin_lock_irqsave(&dbi_output->cb_lock, flags);
++
++ index = &dbi_output->cb_write;
++
++ if(*index) {
++ DRM_ERROR("DBI command buffer is not empty. weird\n");
++ ret = -EINVAL;
++ goto send_dcs_out;
++ }
++
++ /*prepare to send a DCS, wait for command buffer ready*/
++ ret = mdfld_dsi_dbi_cb_ready(dbi_output);
++ if(ret) {
++ DRM_ERROR("DBI is busy\n");
++ return -EAGAIN;
++ }
++
++ *(cb + ((*index)++)) = dcs;
++ if(num) {
++ for(i=0; i<num; i++) {
++ *(cb + ((*index)++)) = *(param + i);
++ }
++ }
++
++ spin_unlock_irqrestore(&dbi_output->cb_lock, flags);
++
++ /*wait for 120ms before actually sending out the command*/
++ msleep(120);
++
++ REG_WRITE((MIPIA_CMD_LEN_REG + reg_offset), (1 + num));
++ REG_WRITE((MIPIA_CMD_ADD_REG + reg_offset), (cb_phy << CMD_MEM_ADDR_OFFSET) | BIT0 | ((data_src == CMD_DATA_SRC_PIPE) ? BIT1 : 0));
++
++ /*wait for 5ms*/
++ msleep(5);
++
++ spin_lock_irqsave(&dbi_output->cb_lock, flags);
++
++ dbi_output->last_cmd = dcs;
++
++ ret = mdfld_dsi_dbi_cmd_sent(dbi_output);
++ if(ret) {
++ DRM_ERROR("Command execution time out\n");
++ }
++
++ /*reset command buffer*/
++ *index = 0;
++
++send_dcs_out:
++ spin_unlock_irqrestore(&dbi_output->cb_lock, flags);
++
++ /*if failed, command buffer may be occupied by DSR timer, try it again*/
++ if(ret && retry) {
++ retry--;
++ goto send_dcs_again;
++ }
++
++ return ret;
++}
++
++
++/**
++ * Update the DBI MIPI Panel Frame Buffer.
++ */
++void mdfld_dsi_dbi_update_fb (struct mdfld_dsi_dbi_output * dbi_output, int pipe)
++{
++ struct drm_device * dev = dbi_output->dev;
++ struct drm_crtc * crtc = dbi_output->base.enc.crtc;
++ struct psb_intel_crtc * psb_crtc = (crtc) ? to_psb_intel_crtc(crtc) : NULL;
++ u8 * cmd_buf = dbi_output->dbi_cb_addr;
++ u32 cmd_phy = dbi_output->dbi_cb_phy;
++ u32 * index;
++ u32 dpll_reg = MRST_DPLL_A;
++ u32 dspcntr_reg = DSPACNTR;
++ u32 pipeconf_reg = PIPEACONF;
++ u32 dsplinoff_reg = DSPALINOFF;
++ u32 dspsurf_reg = DSPASURF;
++ u32 reg_offset = 0;
++
++ /*if mode setting on-going, back off*/
++ if((dbi_output->mode_flags & MODE_SETTING_ON_GOING) ||
++ (psb_crtc && psb_crtc->mode_flags & MODE_SETTING_ON_GOING))
++ return;
++
++ if(pipe == 2) {
++ dspcntr_reg = DSPCCNTR;
++ pipeconf_reg = PIPECCONF;
++ dsplinoff_reg = DSPCLINOFF;
++ dspsurf_reg = DSPCSURF;
++
++ reg_offset = MIPIC_REG_OFFSET;
++ }
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, true))
++ return;
++
++ /*check DBI FIFO status*/
++ if(!(REG_READ((MIPIA_GEN_FIFO_STAT_REG + reg_offset)) & BIT27) ||
++ !(REG_READ(dpll_reg) & DPLL_VCO_ENABLE) ||
++ !(REG_READ(dspcntr_reg) & DISPLAY_PLANE_ENABLE) ||
++ !(REG_READ(pipeconf_reg) & DISPLAY_PLANE_ENABLE)) {
++ DRM_ERROR("DBI FIFO is busy\n");
++ goto update_fb_out1;
++ }
++
++ if(!spin_trylock(&dbi_output->cb_lock)) {
++ goto update_fb_out1;
++ }
++
++ index = &dbi_output->cb_write;
++
++ /*send write_mem_start cmd*/
++ if(*index) {
++ DRM_ERROR("Command buffer is used by others\n");
++ goto update_fb_out2;
++ }
++
++ /*refresh plane changes*/
++ REG_WRITE(dsplinoff_reg, REG_READ(dsplinoff_reg));
++ REG_WRITE(dspsurf_reg, REG_READ(dspsurf_reg));
++ REG_READ(dspsurf_reg);
++
++ *(cmd_buf + ((*index)++)) = write_mem_start;
++
++ /**
++ * NOTE: we don't need add a 3ms delay here, dsr update were scheduled
++ * by DSR timer.
++ */
++ REG_WRITE((MIPIA_CMD_LEN_REG + reg_offset), 1);
++ REG_WRITE((MIPIA_CMD_ADD_REG + reg_offset), cmd_phy | BIT0 | BIT1);
++
++ dbi_output->dsr_fb_update_done = true;
++
++ *index = 0;
++
++update_fb_out2:
++ spin_unlock(&dbi_output->cb_lock);
++update_fb_out1:
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++}
++
++/**
++ * Enter DSR
++ */
++void mdfld_dsi_dbi_enter_dsr (struct mdfld_dsi_dbi_output * dbi_output, int pipe)
++{
++ u32 reg_val;
++ struct drm_device * dev = dbi_output->dev;
++ struct drm_crtc * crtc = dbi_output->base.enc.crtc;
++ struct psb_intel_crtc * psb_crtc = (crtc) ? to_psb_intel_crtc(crtc) : NULL;
++ u32 dpll_reg = MRST_DPLL_A;
++ u32 pipeconf_reg = PIPEACONF;
++ u32 dspcntr_reg = DSPACNTR;
++ u32 dspbase_reg = DSPABASE;
++ u32 dspsurf_reg = DSPASURF;
++
++ PSB_DEBUG_ENTRY(" \n");
++
++ if(!dbi_output)
++ return;
++
++ if((dbi_output->mode_flags & MODE_SETTING_ON_GOING) ||
++ (psb_crtc && psb_crtc->mode_flags & MODE_SETTING_ON_GOING))
++ return;
++
++ if(pipe == 2) {
++ dpll_reg = MRST_DPLL_A;
++ pipeconf_reg = PIPECCONF;
++ dspcntr_reg = DSPCCNTR;
++ dspbase_reg = MDFLD_DSPCBASE;
++ dspsurf_reg = DSPCSURF;
++ }
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_FORCE_POWER_ON))
++ return;
++
++ /*disable plane*/
++ reg_val = REG_READ(dspcntr_reg);
++ if(!(reg_val & DISPLAY_PLANE_ENABLE)) {
++ REG_WRITE(dspcntr_reg, reg_val & ~DISPLAY_PLANE_ENABLE);
++ REG_READ(dspcntr_reg);
++ }
++
++ /*disable pipe*/
++ reg_val = REG_READ(pipeconf_reg);
++ if(!(reg_val & DISPLAY_PLANE_ENABLE)) {
++ reg_val &= ~DISPLAY_PLANE_ENABLE;
++ reg_val |= (PIPECONF_PLANE_OFF | PIPECONF_CURSOR_OFF);
++ REG_WRITE(pipeconf_reg, reg_val);
++ REG_READ(pipeconf_reg);
++ mdfldWaitForPipeDisable(dev, pipe);
++ }
++
++ /*disable DPLL*/
++ reg_val = REG_READ(dpll_reg);
++ if(!(reg_val & DPLL_VCO_ENABLE)) {
++ reg_val &= ~DPLL_VCO_ENABLE;
++ REG_WRITE(dpll_reg, reg_val);
++ REG_READ(dpll_reg);
++ udelay(500);
++ }
++
++ /*gate power of DSI DPLL*/
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++
++ /*update mode state to IN_DSR*/
++ dbi_output->mode_flags |= MODE_SETTING_IN_DSR;
++}
++
++#ifndef CONFIG_MDFLD_DSI_DPU
++static void mdfld_dbi_output_exit_dsr (struct mdfld_dsi_dbi_output * dbi_output, int pipe)
++{
++ struct drm_device * dev = dbi_output->dev;
++ struct drm_crtc * crtc = dbi_output->base.enc.crtc;
++ struct psb_intel_crtc * psb_crtc = (crtc) ? to_psb_intel_crtc(crtc) : NULL;
++ u32 reg_val;
++ u32 dpll_reg = MRST_DPLL_A;
++ u32 pipeconf_reg = PIPEACONF;
++ u32 dspcntr_reg = DSPACNTR;
++ u32 reg_offset = 0;
++
++ /*if mode setting on-going, back off*/
++ if((dbi_output->mode_flags & MODE_SETTING_ON_GOING) ||
++ (psb_crtc && psb_crtc->mode_flags & MODE_SETTING_ON_GOING))
++ return;
++
++ if(pipe == 2) {
++ dpll_reg = MRST_DPLL_A;
++ pipeconf_reg = PIPECCONF;
++ dspcntr_reg = DSPCCNTR;
++
++ reg_offset = MIPIC_REG_OFFSET;
++ }
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, true))
++ return;
++
++ /*enable DPLL*/
++ reg_val = REG_READ(dpll_reg);
++ if(!(reg_val & DPLL_VCO_ENABLE)) {
++
++ if(reg_val & MDFLD_PWR_GATE_EN) {
++ reg_val &= ~MDFLD_PWR_GATE_EN;
++ REG_WRITE(dpll_reg, reg_val);
++ REG_READ(dpll_reg);
++ udelay(500);
++ }
++
++ reg_val |= DPLL_VCO_ENABLE;
++ REG_WRITE(dpll_reg, reg_val);
++ REG_READ(dpll_reg);
++ udelay(500);
++
++ /*FIXME: add timeout*/
++ while (!(REG_READ(pipeconf_reg) & PIPECONF_DSIPLL_LOCK));
++ }
++
++ /*enable pipe*/
++ reg_val = REG_READ(pipeconf_reg);
++ if(!(reg_val & PIPEACONF_ENABLE)) {
++ reg_val |= PIPEACONF_ENABLE;
++ REG_WRITE(pipeconf_reg, reg_val);
++ REG_READ(pipeconf_reg);
++ udelay(500);
++ mdfldWaitForPipeEnable(dev, pipe);
++ }
++
++ /*enable plane*/
++ reg_val = REG_READ(dspcntr_reg);
++ if(!(reg_val & DISPLAY_PLANE_ENABLE)) {
++ reg_val |= DISPLAY_PLANE_ENABLE;
++ REG_WRITE(dspcntr_reg, reg_val);
++ REG_READ(dspcntr_reg);
++ udelay(500);
++ }
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++
++ /*clean IN_DSR flag*/
++ dbi_output->mode_flags &= ~MODE_SETTING_IN_DSR;
++}
++
++/**
++ * Exit from DSR
++ */
++void mdfld_dsi_dbi_exit_dsr (struct drm_device *dev, u32 update_src)
++{
++ struct drm_psb_private * dev_priv = dev->dev_private;
++ struct mdfld_dbi_dsr_info * dsr_info = dev_priv->dbi_dsr_info;
++ struct mdfld_dsi_dbi_output ** dbi_output;
++ int i;
++
++ PSB_DEBUG_ENTRY("\n");
++
++ dbi_output = dsr_info->dbi_outputs;
++ /*for each output, exit dsr*/
++ for(i=0; i<dsr_info->dbi_output_num; i++) {
++ if(dbi_output[i]->mode_flags & MODE_SETTING_IN_DSR) {
++ mdfld_dbi_output_exit_dsr(dbi_output[i], dbi_output[i]->channel_num ? 2 : 0);
++ }
++ }
++
++ dev_priv->dsr_fb_update |= update_src;
++
++ /*start timer*/
++ mdfld_dbi_dsr_timer_start(dsr_info);
++}
++
++/*timers for DSR*/
++static void mdfld_dsi_dbi_dsr_timer_func(unsigned long data)
++{
++ struct drm_device * dev = (struct drm_device *)data;
++ struct drm_psb_private * dev_priv = dev->dev_private;
++ struct mdfld_dbi_dsr_info * dsr_info = dev_priv->dbi_dsr_info;
++ struct timer_list * dsr_timer = &dsr_info->dsr_timer;
++ struct mdfld_dsi_dbi_output ** dbi_output;
++ unsigned long flags;
++ int i;
++
++ dbi_output = dsr_info->dbi_outputs;
++
++ if (dev_priv->dsr_fb_update)
++ {
++ for(i=0; i<dsr_info->dbi_output_num; i++) {
++ if ((dbi_output[i]->dbi_panel_on)) {
++ dbi_output[i]->dsr_fb_update_done = false;
++ mdfld_dsi_dbi_update_fb(dbi_output[i], dbi_output[i]->channel_num ? 2 : 0);
++
++ if (dev_priv->b_dsr_enable && dbi_output[i]->dsr_fb_update_done)
++ dev_priv->dsr_fb_update &= ~(dev_priv->dsr_fb_update);
++ }
++
++ /*clean IN_DSR flag*/
++ dbi_output[i]->mode_flags &= ~MODE_SETTING_IN_DSR;
++ }
++
++ dsr_info->dsr_idle_count = 0;
++
++ } else {
++ dsr_info->dsr_idle_count++;
++ }
++
++ if (dsr_info->dsr_idle_count > 1) {
++ for(i=0; i<dsr_info->dbi_output_num; i++) {
++ if(!(dbi_output[i]->mode_flags & MODE_SETTING_IN_DSR) &&
++ !(dbi_output[i]->mode_flags & MODE_SETTING_ON_GOING)) {
++ mdfld_dsi_dbi_enter_dsr(dbi_output[i], dbi_output[i]->channel_num ? 2 : 0);
++ }
++ }
++ return;
++ }
++
++ spin_lock_irqsave(&dsr_info->dsr_timer_lock, flags);
++ if(!timer_pending(dsr_timer)){
++ dsr_timer->expires = jiffies + MDFLD_DSR_DELAY;
++ add_timer(dsr_timer);
++ }
++ spin_unlock_irqrestore(&dsr_info->dsr_timer_lock, flags);
++}
++
++static int mdfld_dsi_dbi_dsr_timer_init(struct drm_device * dev)
++{
++ struct drm_psb_private * dev_priv = dev->dev_private;
++ struct mdfld_dbi_dsr_info * dsr_info = dev_priv->dbi_dsr_info;
++ struct timer_list * dsr_timer = &dsr_info->dsr_timer;
++ unsigned long flags;
++
++ PSB_DEBUG_ENTRY("\n");
++
++ spin_lock_init(&dsr_info->dsr_timer_lock);
++ spin_lock_irqsave(&dsr_info->dsr_timer_lock, flags);
++
++ init_timer(dsr_timer);
++
++ dsr_timer->data = (unsigned long)dev;
++ dsr_timer->function = mdfld_dsi_dbi_dsr_timer_func;
++ dsr_timer->expires = jiffies + MDFLD_DSR_DELAY;
++
++ spin_unlock_irqrestore(&dsr_info->dsr_timer_lock, flags);
++
++ PSB_DEBUG_ENTRY("successfully\n");
++
++ return 0;
++}
++
++void mdfld_dbi_dsr_timer_start(struct mdfld_dbi_dsr_info * dsr_info)
++{
++ struct timer_list * dsr_timer = &dsr_info->dsr_timer;
++ unsigned long flags;
++
++ spin_lock_irqsave(&dsr_info->dsr_timer_lock, flags);
++ if(!timer_pending(dsr_timer)){
++ dsr_timer->expires = jiffies + MDFLD_DSR_DELAY;
++ add_timer(dsr_timer);
++ }
++ spin_unlock_irqrestore(&dsr_info->dsr_timer_lock, flags);
++}
++
++int mdfld_dbi_dsr_init(struct drm_device * dev)
++{
++ struct drm_psb_private * dev_priv = dev->dev_private;
++ struct mdfld_dbi_dsr_info * dsr_info = dev_priv->dbi_dsr_info;
++
++ if(!dsr_info || IS_ERR(dsr_info)) {
++ dsr_info = kzalloc(sizeof(struct mdfld_dbi_dsr_info), GFP_KERNEL);
++ if(!dsr_info) {
++ DRM_ERROR("No memory\n");
++ return -ENOMEM;
++ }
++
++ dev_priv->dbi_dsr_info = dsr_info;
++ }
++
++ /*init dsr refresh timer*/
++ mdfld_dsi_dbi_dsr_timer_init(dev);
++
++ PSB_DEBUG_ENTRY("successfully\n");
++
++ return 0;
++}
++
++void mdfld_dbi_dsr_exit(struct drm_device * dev)
++{
++ struct drm_psb_private * dev_priv = dev->dev_private;
++ struct mdfld_dbi_dsr_info * dsr_info = dev_priv->dbi_dsr_info;
++
++ if(!dsr_info) {
++ return;
++ }
++
++ /*delete dsr timer*/
++ del_timer_sync(&dsr_info->dsr_timer);
++
++ /*free dsr info*/
++ kfree(dsr_info);
++
++ dev_priv->dbi_dsr_info = NULL;
++}
++#endif
++
++static void mdfld_dsi_dbi_save(struct drm_connector * connector)
++{
++ PSB_DEBUG_ENTRY("\n");
++}
++
++static void mdfld_dsi_dbi_restore(struct drm_connector * connector)
++{
++ PSB_DEBUG_ENTRY("\n");
++}
++
++static enum drm_connector_status mdfld_dsi_dbi_detect(struct drm_connector * connector)
++{
++ PSB_DEBUG_ENTRY("\n");
++
++ return connector_status_connected;
++}
++
++static int mdfld_dsi_dbi_set_property(struct drm_connector * connector,
++ struct drm_property * property,
++ uint64_t value)
++{
++ struct drm_encoder *pEncoder = connector->encoder;
++ struct drm_device * dev = connector->dev;
++ struct drm_psb_private * dev_priv = dev->dev_private;
++
++ PSB_DEBUG_ENTRY("\n");
++
++ if (!strcmp(property->name, "scaling mode") && pEncoder) {
++ struct psb_intel_crtc *pPsbCrtc = to_psb_intel_crtc(pEncoder->crtc);
++ bool bTransitionFromToCentered;
++ uint64_t curValue;
++
++ if (!pPsbCrtc)
++ goto set_prop_error;
++
++ switch (value) {
++ case DRM_MODE_SCALE_FULLSCREEN:
++ break;
++ case DRM_MODE_SCALE_NO_SCALE:
++ break;
++ case DRM_MODE_SCALE_ASPECT:
++ break;
++ default:
++ goto set_prop_error;
++ }
++
++ if (drm_connector_property_get_value(connector, property, &curValue))
++ goto set_prop_error;
++
++ if (curValue == value)
++ goto set_prop_done;
++
++ if (drm_connector_property_set_value(connector, property, value))
++ goto set_prop_error;
++
++ bTransitionFromToCentered = (curValue == DRM_MODE_SCALE_NO_SCALE) ||
++ (value == DRM_MODE_SCALE_NO_SCALE);
++
++ if (pPsbCrtc->saved_mode.hdisplay != 0 &&
++ pPsbCrtc->saved_mode.vdisplay != 0) {
++ if (bTransitionFromToCentered) {
++ if (!drm_crtc_helper_set_mode(pEncoder->crtc, &pPsbCrtc->saved_mode,
++ pEncoder->crtc->x, pEncoder->crtc->y, pEncoder->crtc->fb))
++ goto set_prop_error;
++ } else {
++ struct drm_encoder_helper_funcs *pEncHFuncs = pEncoder->helper_private;
++ pEncHFuncs->mode_set(pEncoder, &pPsbCrtc->saved_mode,
++ &pPsbCrtc->saved_adjusted_mode);
++ }
++ }
++ } else if (!strcmp(property->name, "backlight") && pEncoder) {
++ PSB_DEBUG_ENTRY("backlight level = %d\n", (int)value);
++ if (drm_connector_property_set_value(connector, property, value))
++ goto set_prop_error;
++ else {
++ PSB_DEBUG_ENTRY("set brightness to %d", (int)value);
++ if(dev_priv->dbi_panel_on) {
++ mdfld_dsi_dbi_brightness_control(dev, 0, value);
++ }
++
++ if(dev_priv->dbi_panel_on2) {
++ mdfld_dsi_dbi_brightness_control(dev, 2, value);
++ }
++ }
++ } else if (!strcmp(property->name, "DPMS") && pEncoder) {
++ struct drm_encoder_helper_funcs *pEncHFuncs = pEncoder->helper_private;
++ /*struct drm_crtc_helper_funcs *pCrtcHFuncs = pEncoder->crtc->helper_private;*/
++ PSB_DEBUG_ENTRY("DPMS \n");
++ pEncHFuncs->dpms(pEncoder, value);
++ /*pCrtcHFuncs->dpms(pEncoder->crtc, value);*/
++ }
++
++set_prop_done:
++ return 0;
++set_prop_error:
++ return -1;
++}
++
++static void mdfld_dsi_dbi_destroy(struct drm_connector * connector)
++{
++ struct psb_intel_output * psb_output = to_psb_intel_output(connector);
++ struct mdfld_dsi_dbi_output * dbi_output = MDFLD_DSI_DBI_OUTPUT(psb_output);
++
++ PSB_DEBUG_ENTRY("\n");
++
++ if(!dbi_output) {
++ return;
++ }
++
++ drm_sysfs_connector_remove(connector);
++ drm_connector_cleanup(connector);
++
++ kfree(dbi_output);
++}
++
++static int mdfld_dsi_dbi_get_modes(struct drm_connector * connector)
++{
++ struct psb_intel_output * psb_output = to_psb_intel_output(connector);
++ struct mdfld_dsi_dbi_output * dbi_output = MDFLD_DSI_DBI_OUTPUT(psb_output);
++ struct drm_display_mode * fixed_mode = dbi_output->panel_fixed_mode;
++ struct drm_display_mode * dup_mode = NULL;
++ struct drm_device * dev = dbi_output->dev;
++
++ PSB_DEBUG_ENTRY("\n");
++
++ connector->display_info.min_vfreq = 0;
++ connector->display_info.max_vfreq = 200;
++ connector->display_info.min_hfreq = 0;
++ connector->display_info.max_hfreq = 200;
++
++ if(fixed_mode) {
++ dup_mode = drm_mode_duplicate(dev, fixed_mode);
++ drm_mode_probed_add(connector, dup_mode);
++ return 1;
++ }
++
++ return 0;
++}
++
++static int mdfld_dsi_dbi_mode_valid(struct drm_connector * connector, struct drm_display_mode * mode)
++{
++ struct psb_intel_output * psb_output = to_psb_intel_output(connector);
++ struct mdfld_dsi_dbi_output * dbi_output = MDFLD_DSI_DBI_OUTPUT(psb_output);
++ struct drm_display_mode * fixed_mode = dbi_output->panel_fixed_mode;
++
++ PSB_DEBUG_ENTRY("\n");
++
++ if(mode->flags & DRM_MODE_FLAG_DBLSCAN)
++ return MODE_NO_DBLESCAN;
++
++ if(mode->flags & DRM_MODE_FLAG_INTERLACE)
++ return MODE_NO_INTERLACE;
++
++ if(fixed_mode) {
++ if(mode->hdisplay > fixed_mode->hdisplay)
++ return MODE_PANEL;
++
++ if(mode->vdisplay > fixed_mode->vdisplay)
++ return MODE_PANEL;
++ }
++
++ return MODE_OK;
++}
++
++static struct drm_encoder * mdfld_dsi_dbi_best_encoder(struct drm_connector * connector)
++{
++ struct psb_intel_output * psb_output = to_psb_intel_output(connector);
++
++ PSB_DEBUG_ENTRY("\n");
++
++ return &psb_output->enc;
++}
++
++static bool mdfld_dsi_dbi_mode_fixup(struct drm_encoder * encoder,
++ struct drm_display_mode * mode,
++ struct drm_display_mode * adjusted_mode)
++{
++ struct psb_intel_output * psb_output = enc_to_psb_intel_output(encoder);
++ struct mdfld_dsi_dbi_output * dbi_output = MDFLD_DSI_DBI_OUTPUT(psb_output);
++ struct drm_display_mode * fixed_mode = dbi_output->panel_fixed_mode;
++
++ PSB_DEBUG_ENTRY("\n");
++
++ if(fixed_mode) {
++ adjusted_mode->hdisplay = fixed_mode->hdisplay;
++ adjusted_mode->hsync_start = fixed_mode->hsync_start;
++ adjusted_mode->hsync_end = fixed_mode->hsync_end;
++ adjusted_mode->htotal = fixed_mode->htotal;
++ adjusted_mode->vdisplay = fixed_mode->vdisplay;
++ adjusted_mode->vsync_start = fixed_mode->vsync_start;
++ adjusted_mode->vsync_end = fixed_mode->vsync_end;
++ adjusted_mode->vtotal = fixed_mode->vtotal;
++ adjusted_mode->clock = fixed_mode->clock;
++ drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
++ }
++
++ return true;
++}
++
++static void mdfld_dsi_adapter_init(struct drm_device * dev, int pipe)
++{
++ u32 reg_offset = ((pipe == 2) ? MIPIC_REG_OFFSET : 0);
++
++ REG_WRITE((MIPIA_DEVICE_READY_REG + reg_offset), 0x00000000);
++ REG_WRITE((MIPIA_CONTROL_REG + reg_offset), 0x00000018);
++ REG_WRITE((MIPIA_DPHY_PARAM_REG + reg_offset), 0x150c3408);
++ REG_WRITE((MIPIA_CLK_LANE_SWITCH_TIME_CNT_REG + reg_offset), 0x000a0014);
++ REG_WRITE((MIPIA_DBI_BW_CTRL_REG + reg_offset), 0x00000400);
++ REG_WRITE((MIPIA_HS_LS_DBI_ENABLE_REG + reg_offset), 0x00000000);
++
++ REG_WRITE((MIPIA_INTR_EN_REG + reg_offset), 0xffffffff);
++ REG_WRITE((MIPIA_TURN_AROUND_TIMEOUT_REG + reg_offset), 0x0000001f);
++ REG_WRITE((MIPIA_DEVICE_RESET_TIMER_REG + reg_offset), 0x000000ff);
++ REG_WRITE((MIPIA_INIT_COUNT_REG + reg_offset), 0x000007d0);
++ REG_WRITE((MIPIA_DSI_FUNC_PRG_REG + reg_offset), (DBI_DATA_WIDTH_OPT2 << DBI_DATA_WIDTH_POS
++ | ((pipe ? 0x0 : 0x0) << DBI_CHANNEL_NUMBER_POS)
++ | 2));
++ REG_WRITE((MIPIA_HS_TX_TIMEOUT_REG + reg_offset), 0x3fffff);
++ REG_WRITE((MIPIA_LP_RX_TIMEOUT_REG + reg_offset), 0xffff);
++ REG_WRITE((MIPIA_HIGH_LOW_SWITCH_COUNT_REG + reg_offset), 0x46);
++ REG_WRITE((MIPIA_EOT_DISABLE_REG + reg_offset), 0x00000000);
++ REG_WRITE((MIPIA_LP_BYTECLK_REG + reg_offset), 0x00000004);
++ REG_WRITE((MIPIA_DEVICE_READY_REG + reg_offset), 0x00000001);
++}
++
++static void mdfld_dsi_dbi_mode_set(struct drm_encoder * encoder,
++ struct drm_display_mode * mode,
++ struct drm_display_mode * adjusted_mode)
++{
++ int ret = 0;
++ struct drm_device * dev = encoder->dev;
++ struct drm_psb_private * dev_priv = (struct drm_psb_private*)dev->dev_private;
++ struct psb_intel_output * psb_output = enc_to_psb_intel_output(encoder);
++ struct mdfld_dsi_dbi_output * dsi_output = MDFLD_DSI_DBI_OUTPUT(psb_output);
++
++ /*regs*/
++ u32 mipi_reg = MIPI;
++ u32 dspcntr_reg = DSPACNTR;
++ u32 pipeconf_reg = PIPEACONF;
++ u32 reg_offset = 0;
++
++ /*values*/
++ u32 dspcntr_val = dev_priv->dspcntr;
++ u32 pipeconf_val = dev_priv->pipeconf;
++ u32 h_active_area = mode->hdisplay;
++ u32 v_active_area = mode->vdisplay;
++ u32 mipi_val = (PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX);
++
++ PSB_DEBUG_ENTRY("type %s\n", (psb_output->type == INTEL_OUTPUT_MIPI2) ? "MIPI2" : "MIPI");
++ PSB_DEBUG_ENTRY("h %d v %d\n", mode->hdisplay, mode->vdisplay);
++
++ if(psb_output->type == INTEL_OUTPUT_MIPI2) {
++ if(!dev_priv->dbi_panel_on) {
++ DRM_ERROR("bad thing happened\n");
++ return;
++ }
++
++ mipi_reg = MIPI_C;
++ dspcntr_reg = DSPCCNTR;
++ pipeconf_reg = PIPECCONF;
++
++ reg_offset = MIPIC_REG_OFFSET;
++
++ dspcntr_val = dev_priv->dspcntr2;
++ pipeconf_val = dev_priv->pipeconf2;
++ } else {
++ mipi_val |= 0x2; /*two lanes for port A and C respectly*/
++ }
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_FORCE_POWER_ON))
++ return;
++
++ /*set up pipe related registers*/
++ REG_WRITE(mipi_reg, mipi_val);
++ REG_READ(mipi_reg);
++
++ /*setup MIPI adapter + MIPI IP registers*/
++ if(psb_output->type == INTEL_OUTPUT_MIPI2) {
++ mdfld_dsi_adapter_init(dev, 2);
++ } else {
++ mdfld_dsi_adapter_init(dev, 0);
++ }
++
++ msleep(20);
++
++ REG_WRITE(dspcntr_reg, dspcntr_val);
++
++ msleep(20);
++
++ /*send exit_sleep_mode DCS*/
++ ret = mdfld_dsi_dbi_send_dcs(dsi_output, exit_sleep_mode, NULL, 0, CMD_DATA_SRC_SYSTEM_MEM);
++ if(ret) {
++ DRM_ERROR("sent exit_sleep_mode faild\n");
++ goto out_err;
++ }
++
++ /*backlight operation*/
++ if(psb_output->type == INTEL_OUTPUT_MIPI2) {
++ mdfld_dsi_dbi_brightness_init(dev, 2);
++ } else {
++ mdfld_dsi_dbi_brightness_init(dev, 0);
++ }
++
++ mdfld_dsi_dbi_gen_fifo_ready (dev, (MIPIA_GEN_FIFO_STAT_REG + reg_offset), HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
++
++ REG_WRITE(pipeconf_reg, pipeconf_val | PIPEACONF_DSR);
++ REG_READ(pipeconf_reg);
++
++ /*TODO: this looks ugly, try to move it to CRTC mode setting*/
++ if(psb_output->type == INTEL_OUTPUT_MIPI2) {
++ dev_priv->pipeconf2 |= PIPEACONF_DSR;
++ } else {
++ dev_priv->pipeconf |= PIPEACONF_DSR;
++ }
++
++ PSB_DEBUG_ENTRY("pipeconf %x\n", REG_READ(pipeconf_reg));
++
++ ret = mdfld_dsi_dbi_update_area(dsi_output, 0, 0, h_active_area - 1, v_active_area - 1);
++ if(ret) {
++ DRM_ERROR("update area failed\n");
++ goto out_err;
++ }
++
++out_err:
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++
++ if(ret) {
++ DRM_ERROR("mode set failed\n");
++ } else {
++ PSB_DEBUG_ENTRY("mode set done successfully\n");
++ }
++}
++
++static void mdfld_dsi_dbi_set_power(struct drm_encoder * encoder, bool on)
++{
++ int ret = 0;
++ struct psb_intel_output * psb_output = enc_to_psb_intel_output(encoder);
++ struct mdfld_dsi_dbi_output * dbi_output = MDFLD_DSI_DBI_OUTPUT(psb_output);
++ struct drm_device * dev = dbi_output->dev;
++ struct drm_psb_private * dev_priv = dev->dev_private;
++ int pipe = (dbi_output->channel_num == 0) ? 0 : 2;
++
++ PSB_DEBUG_ENTRY("pipe %d : %s, panel on: %s\n",pipe, on ? "On" : "Off", dbi_output->dbi_panel_on ? "True" : "False");
++
++ if(pipe == 2) {
++ if(on) {
++ dev_priv->dual_mipi = true;
++ } else {
++ dev_priv->dual_mipi = false;
++ }
++ } else {
++ if (!on) {
++ dev_priv->dual_mipi = false;
++ }
++ }
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_FORCE_POWER_ON))
++ return;
++
++ if(on) {
++ if(dbi_output->dbi_panel_on)
++ goto out_err;
++
++ ret = mdfld_dsi_dbi_update_power(dbi_output, DRM_MODE_DPMS_ON);
++ if(ret) {
++ DRM_ERROR("power on error\n");
++ goto out_err;
++ }
++
++ dbi_output->dbi_panel_on = true;
++ if(pipe == 2) {
++ dev_priv->dbi_panel_on2 = true;
++ } else {
++ dev_priv->dbi_panel_on = true;
++ }
++ } else {
++ if(!dbi_output->dbi_panel_on && !dbi_output->first_boot)
++ goto out_err;
++
++ dbi_output->dbi_panel_on = false;
++ dbi_output->first_boot = false;
++
++ if(pipe == 2) {
++ dev_priv->dbi_panel_on2 = true;
++ } else {
++ dev_priv->dbi_panel_on = true;
++ }
++
++ ret = mdfld_dsi_dbi_update_power(dbi_output, DRM_MODE_DPMS_OFF);
++ if(ret) {
++ DRM_ERROR("power on error\n");
++ goto out_err;
++ }
++ }
++
++out_err:
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ if(ret) {
++ DRM_ERROR("failed\n");
++ } else {
++ PSB_DEBUG_ENTRY("successfully\n");
++ }
++}
++
++static void mdfld_dsi_dbi_prepare(struct drm_encoder * encoder) {
++ struct psb_intel_output * psb_output = enc_to_psb_intel_output(encoder);
++ struct mdfld_dsi_dbi_output * dbi_output = MDFLD_DSI_DBI_OUTPUT(psb_output);
++
++ PSB_DEBUG_ENTRY("\n");
++
++ dbi_output->mode_flags |= MODE_SETTING_IN_ENCODER;
++
++ mdfld_dsi_dbi_set_power(encoder, false);
++}
++
++static void mdfld_dsi_dbi_commit(struct drm_encoder * encoder) {
++ struct psb_intel_output * psb_output = enc_to_psb_intel_output(encoder);
++ struct mdfld_dsi_dbi_output * dbi_output = MDFLD_DSI_DBI_OUTPUT(psb_output);
++ struct drm_device * dev = dbi_output->dev;
++ struct drm_psb_private * dev_priv = dev->dev_private;
++
++/*DSI DPU was still on debugging, will remove this option later*/
++#ifdef CONFIG_MDFLD_DSI_DPU
++ struct psb_drm_dpu_rect rect;
++#endif
++
++ PSB_DEBUG_ENTRY("\n");
++
++ mdfld_dsi_dbi_set_power(encoder, true);
++
++ dbi_output->mode_flags &= ~MODE_SETTING_IN_ENCODER;
++
++#ifdef CONFIG_MDFLD_DSI_DPU
++ rect.x = rect.y = 0;
++ rect.width = 864;
++ rect.height = 480;
++#endif
++
++ if(psb_output->type == INTEL_OUTPUT_MIPI2) {
++ dev_priv->dsr_fb_update |= MDFLD_DSR_2D_3D_2;
++#ifdef CONFIG_MDFLD_DSI_DPU
++ /*if dpu enabled report a fullscreen damage*/
++ mdfld_dbi_dpu_report_damage(dev, MDFLD_PLANEC, &rect);
++#endif
++ } else {
++ dev_priv->dsr_fb_update |= MDFLD_DSR_2D_3D_0;
++
++#ifdef CONFIG_MDFLD_DSI_DPU
++ mdfld_dbi_dpu_report_damage(dev, MDFLD_PLANEA, &rect);
++ /*start dpu timer*/
++ mdfld_dbi_dpu_timer_start(dev_priv->dbi_dpu_info);
++#else
++ mdfld_dbi_dsr_timer_start(dev_priv->dbi_dsr_info);
++#endif
++ }
++}
++
++static void mdfld_dsi_dbi_dpms(struct drm_encoder *encoder, int mode)
++{
++ PSB_DEBUG_ENTRY("%s \n", (mode == DRM_MODE_DPMS_ON ? "on":"off"));
++
++ if (mode == DRM_MODE_DPMS_ON)
++ mdfld_dsi_dbi_set_power(encoder, true);
++ else
++ mdfld_dsi_dbi_set_power(encoder, false);
++}
++
++static const struct drm_encoder_helper_funcs mdfld_dsi_helper_funcs = {
++ .dpms = mdfld_dsi_dbi_dpms,
++ .mode_fixup = mdfld_dsi_dbi_mode_fixup,
++ .prepare = mdfld_dsi_dbi_prepare,
++ .mode_set = mdfld_dsi_dbi_mode_set,
++ .commit = mdfld_dsi_dbi_commit,
++};
++
++/*DBI output connector funcs*/
++static const struct drm_connector_funcs mdfld_dsi_dbi_connector_funcs = {
++ .dpms = drm_helper_connector_dpms,
++ .save = mdfld_dsi_dbi_save,
++ .restore = mdfld_dsi_dbi_restore,
++ .detect = mdfld_dsi_dbi_detect,
++ .fill_modes = drm_helper_probe_single_connector_modes,
++ .set_property = mdfld_dsi_dbi_set_property,
++ .destroy = mdfld_dsi_dbi_destroy,
++};
++
++/*DBI output encoder funcs*/
++static const struct drm_encoder_funcs mdfld_dsi_dbi_encoder_funcs = {
++ .destroy = drm_encoder_cleanup,
++};
++
++/*DBI connector helper funcs*/
++static const struct drm_connector_helper_funcs mdfld_dsi_dbi_connector_helper_funcs = {
++ .get_modes = mdfld_dsi_dbi_get_modes,
++ .mode_valid = mdfld_dsi_dbi_mode_valid,
++ .best_encoder = mdfld_dsi_dbi_best_encoder,
++};
++
++/** Returns the panel fixed mode from configuration. */
++/** FIXME JLIU7 need to revist it. */
++struct drm_display_mode *mdfld_dsi_dbi_get_configuration_mode(struct drm_device *dev, int dsi_num)
++{
++ struct drm_display_mode *mode;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++#if MDFLD_GCT_JLIU7
++ u8 panel_index = dev_priv->gct_data.bpi;
++ u8 panel_type = dev_priv->gct_data.pt;
++#endif /* MDFLD_GCT_JLIU7 */
++ struct mrst_timing_info *ti = &dev_priv->gct_data.DTD;
++ bool use_gct = false;
++ uint32_t Panel_RRate = 0;
++
++ PSB_DEBUG_ENTRY("\n");
++
++ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
++ if (!mode)
++ return NULL;
++
++#if MDFLD_GCT_JLIU7
++ if (dev_priv->vbt_data.Size != 0x00) /*if non-zero, vbt is present*/
++ if ((1<<panel_index) & panel_type) /* if non-zero,*/
++ use_gct = true; /*then mipi panel.*/
++#endif /* MDFLD_GCT_JLIU7 */
++
++ if (use_gct) {
++ PSB_DEBUG_ENTRY("gct find MIPI panel. \n");
++
++ mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo;
++ mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo;
++ mode->hsync_start = mode->hdisplay + \
++ ((ti->hsync_offset_hi << 8) | \
++ ti->hsync_offset_lo);
++ mode->hsync_end = mode->hsync_start + \
++ ((ti->hsync_pulse_width_hi << 8) | \
++ ti->hsync_pulse_width_lo);
++ mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) | \
++ ti->hblank_lo);
++ mode->vsync_start = \
++ mode->vdisplay + ((ti->vsync_offset_hi << 8) | \
++ ti->vsync_offset_lo);
++ mode->vsync_end = \
++ mode->vsync_start + ((ti->vsync_pulse_width_hi << 8) | \
++ ti->vsync_pulse_width_lo);
++ mode->vtotal = mode->vdisplay + \
++ ((ti->vblank_hi << 8) | ti->vblank_lo);
++ mode->clock = ti->pixel_clock * 10;
++
++ PSB_DEBUG_ENTRY("hdisplay is %d\n", mode->hdisplay);
++ PSB_DEBUG_ENTRY("vdisplay is %d\n", mode->vdisplay);
++ PSB_DEBUG_ENTRY("HSS is %d\n", mode->hsync_start);
++ PSB_DEBUG_ENTRY("HSE is %d\n", mode->hsync_end);
++ PSB_DEBUG_ENTRY("htotal is %d\n", mode->htotal);
++ PSB_DEBUG_ENTRY("VSS is %d\n", mode->vsync_start);
++ PSB_DEBUG_ENTRY("VSE is %d\n", mode->vsync_end);
++ PSB_DEBUG_ENTRY("vtotal is %d\n", mode->vtotal);
++ PSB_DEBUG_ENTRY("clock is %d\n", mode->clock);
++ } else {
++ if (dsi_num == 1)
++ {
++#if DSI_TPO_864x480 /*FIXME jliu7 remove it later */
++ mode->hdisplay = 864;
++ mode->vdisplay = 480;
++ mode->hsync_start = 873;
++ mode->hsync_end = 876;
++ mode->htotal = 887;
++ mode->vsync_start = 487;
++ mode->vsync_end = 490;
++ mode->vtotal = 499;
++ mode->clock = 33264;
++
++ dev_priv->dpi = true;
++ dev_priv->bpp = 24;
++ dev_priv->videoModeFormat = BURST_MODE;
++ dev_priv->laneCount = 2;
++ dev_priv->channelNumber = 0;
++#endif /*FIXME jliu7 remove it later */
++#if DBI_TPO_864x480 /* get from spec. */
++ mode->hdisplay = 864;
++ mode->vdisplay = 480;
++
++ Panel_RRate = 60;
++ dev_priv->dpi = false;
++ dev_priv->bpp = 24;
++
++ /* FIXME hard code values. */
++ dev_priv->laneCount = 2;
++ dev_priv->channelNumber = 0;
++#endif /*FIXME jliu7 remove it later */
++#if DBI_TPO_480x864 /* get from spec. */
++ mode->hdisplay = 480;
++ mode->vdisplay = 864;
++
++ Panel_RRate = 60;
++ dev_priv->dpi = false;
++ dev_priv->bpp = 24;
++
++ /* FIXME hard code values. */
++ dev_priv->laneCount = 2;
++ dev_priv->channelNumber = 0;
++#endif /*FIXME jliu7 remove it later */
++ } else {
++#if DSI_TPO_864x480_2 /*FIXME jliu7 remove it later */
++ mode->hdisplay = 864;
++ mode->vdisplay = 480;
++ mode->hsync_start = 873;
++ mode->hsync_end = 876;
++ mode->htotal = 887;
++ mode->vsync_start = 487;
++ mode->vsync_end = 490;
++ mode->vtotal = 499;
++ mode->clock = 33264;
++
++ dev_priv->dpi2 = true;
++ dev_priv->bpp2 = 24;
++ dev_priv->videoModeFormat2 = BURST_MODE;
++ dev_priv->laneCount2 = 2;
++ dev_priv->channelNumber2 = 0;
++#endif /*FIXME jliu7 remove it later */
++#if DBI_TPO_864x480_2 /* get from spec. */
++ mode->hdisplay = 864;
++ mode->vdisplay = 480;
++
++ Panel_RRate = 60;
++ dev_priv->dpi2 = false;
++ dev_priv->bpp2 = 24;
++
++ /* FIXME hard code values. */
++ dev_priv->laneCount2 = 2;
++ dev_priv->channelNumber2 = 0;
++#endif /*FIXME jliu7 remove it later */
++#if DBI_TPO_480x864_2 /* get from spec. */
++ mode->hdisplay = 480;
++ mode->vdisplay = 864;
++
++ Panel_RRate = 60;
++ dev_priv->dpi2 = false;
++ dev_priv->bpp2 = 24;
++
++ /* FIXME hard code values. */
++ dev_priv->laneCount2 = 2;
++ dev_priv->channelNumber2 = 0;
++#endif /*FIXME jliu7 remove it later */
++ }
++
++ }
++
++ if (((dsi_num == 1) && !dev_priv->dpi) || ((dsi_num == 2) && !dev_priv->dpi2))
++ {
++
++ mode->hsync_start = mode->hdisplay + 8;
++ mode->hsync_end = mode->hsync_start + 4;
++ mode->htotal = mode->hsync_end + 8;
++ mode->vsync_start = mode->vdisplay + 2;
++ mode->vsync_end = mode->vsync_start + 2;
++ mode->vtotal = mode->vsync_end + 2;
++ mode->clock = (mode->htotal * mode->vtotal * Panel_RRate) / 1000;
++ }
++
++ if (dsi_num == 1)
++ {
++ dev_priv->pixelClock = mode->clock; /*KHz*/
++ dev_priv->HsyncWidth = mode->hsync_end - mode->hsync_start;
++ dev_priv->HbackPorch = mode->htotal - mode->hsync_end;
++ dev_priv->HfrontPorch = mode->hsync_start - mode->hdisplay;
++ dev_priv->HactiveArea = mode->hdisplay;
++ dev_priv->VsyncWidth = mode->vsync_end - mode->vsync_start;
++ dev_priv->VbackPorch = mode->vtotal - mode->vsync_end;
++ dev_priv->VfrontPorch = mode->vsync_start - mode->vdisplay;
++ dev_priv->VactiveArea = mode->vdisplay;
++
++ PSB_DEBUG_ENTRY("pixelClock is %d\n", dev_priv->pixelClock);
++ PSB_DEBUG_ENTRY("HsyncWidth is %d\n", dev_priv->HsyncWidth);
++ PSB_DEBUG_ENTRY("HbackPorch is %d\n", dev_priv->HbackPorch);
++ PSB_DEBUG_ENTRY("HfrontPorch is %d\n", dev_priv->HfrontPorch);
++ PSB_DEBUG_ENTRY("HactiveArea is %d\n", dev_priv->HactiveArea);
++ PSB_DEBUG_ENTRY("VsyncWidth is %d\n", dev_priv->VsyncWidth);
++ PSB_DEBUG_ENTRY("VbackPorch is %d\n", dev_priv->VbackPorch);
++ PSB_DEBUG_ENTRY("VfrontPorch is %d\n", dev_priv->VfrontPorch);
++ PSB_DEBUG_ENTRY("VactiveArea is %d\n", dev_priv->VactiveArea);
++ } else {
++ dev_priv->pixelClock2 = mode->clock; /*KHz*/
++ dev_priv->HsyncWidth2 = mode->hsync_end - mode->hsync_start;
++ dev_priv->HbackPorch2 = mode->htotal - mode->hsync_end;
++ dev_priv->HfrontPorch2 = mode->hsync_start - mode->hdisplay;
++ dev_priv->HactiveArea2 = mode->hdisplay;
++ dev_priv->VsyncWidth2 = mode->vsync_end - mode->vsync_start;
++ dev_priv->VbackPorch2 = mode->vtotal - mode->vsync_end;
++ dev_priv->VfrontPorch2 = mode->vsync_start - mode->vdisplay;
++ dev_priv->VactiveArea2 = mode->vdisplay;
++
++ PSB_DEBUG_ENTRY("pixelClock2 is %d\n", dev_priv->pixelClock2);
++ PSB_DEBUG_ENTRY("HsyncWidth2 is %d\n", dev_priv->HsyncWidth2);
++ PSB_DEBUG_ENTRY("HbackPorch2 is %d\n", dev_priv->HbackPorch2);
++ PSB_DEBUG_ENTRY("HfrontPorch2 is %d\n", dev_priv->HfrontPorch2);
++ PSB_DEBUG_ENTRY("HactiveArea2 is %d\n", dev_priv->HactiveArea2);
++ PSB_DEBUG_ENTRY("VsyncWidth2 is %d\n", dev_priv->VsyncWidth2);
++ PSB_DEBUG_ENTRY("VbackPorch2 is %d\n", dev_priv->VbackPorch2);
++ PSB_DEBUG_ENTRY("VfrontPorch2 is %d\n", dev_priv->VfrontPorch2);
++ PSB_DEBUG_ENTRY("VactiveArea2 is %d\n", dev_priv->VactiveArea2);
++ }
++
++ drm_mode_set_name(mode);
++ drm_mode_set_crtcinfo(mode, 0);
++
++ return mode;
++}
++
++static int mdfld_dbi_cb_init(struct mdfld_dsi_dbi_output * output, struct psb_gtt * pg) {
++ uint32_t phy;
++ void * virt_addr = NULL;
++
++ switch(output->channel_num) {
++ case 0:
++ phy = pg->gtt_phys_start - 0x1000;
++ break;
++ case 1:
++ phy = pg->gtt_phys_start - 0x800;
++ break;
++ default:
++ DRM_ERROR("Unsupported channel\n");
++ return -EINVAL;
++ }
++
++ /*mapping*/
++ virt_addr = ioremap_nocache(phy, 0x800);
++ if(!virt_addr) {
++ DRM_ERROR("Map DBI command buffer error\n");
++ return -ENOMEM;
++ }
++
++ output->dbi_cb_phy = phy;
++ output->dbi_cb_addr = virt_addr;
++
++ /*init cb lock*/
++ spin_lock_init(&output->cb_lock);
++
++ PSB_DEBUG_ENTRY("DBI command buffer initailized. phy %x, addr %p\n", phy, virt_addr);
++
++ return 0;
++}
++
++static void mdfld_dbi_cb_destroy(struct mdfld_dsi_dbi_output * output) {
++ PSB_DEBUG_ENTRY("\n");
++
++ if(output && output->dbi_cb_addr) {
++ iounmap(output->dbi_cb_addr);
++ }
++
++}
++
++static int mdfld_dbi_panel_reset(struct mdfld_dsi_dbi_output * output) {
++ unsigned gpio;
++ int ret = 0;
++
++ switch(output->channel_num) {
++ case 0:
++ gpio = 128;
++ break;
++ case 1:
++ gpio = 34;
++ break;
++ default:
++ DRM_ERROR("Invalid output\n");
++ return -EINVAL;
++ }
++
++ ret = gpio_request(gpio, "gfx");
++ if(ret) {
++ DRM_ERROR("gpio_rqueset failed\n");
++ return ret;
++ }
++
++ ret = gpio_direction_output(gpio, 1);
++ if(ret) {
++ DRM_ERROR("gpio_direction_output failed\n");
++ goto gpio_error;
++ }
++
++ gpio_get_value(128);
++
++gpio_error:
++ if(gpio_is_valid(gpio))
++ gpio_free(gpio);
++
++ PSB_DEBUG_ENTRY("Panel reset done\n");
++
++ return ret;
++}
++
++void mdfld_dsi_dbi_init(struct drm_device * dev, struct psb_intel_mode_device * mode_dev, int dsi_num)
++{
++ struct drm_psb_private * dev_priv = (struct drm_psb_private *)dev->dev_private;
++ struct mdfld_dsi_dbi_output * dbi_output = NULL;
++ struct psb_intel_output * psb_output = NULL;
++ struct drm_connector * connector = NULL;
++ struct drm_encoder * encoder = NULL;
++ struct drm_display_mode * fixed_mode = NULL;
++ struct psb_gtt * pg = dev_priv ? (dev_priv->pg) : NULL;
++#ifdef CONFIG_MDFLD_DSI_DPU
++ struct mdfld_dbi_dpu_info * dpu_info = dev_priv ? (dev_priv->dbi_dpu_info) : NULL;
++#else
++ struct mdfld_dbi_dsr_info * dsr_info = dev_priv ? (dev_priv->dbi_dsr_info) : NULL;
++#endif
++ int ret;
++
++ PSB_DEBUG_ENTRY("\n");
++
++ if(!pg || !mode_dev) {
++ DRM_ERROR("Invalid parameters\n");
++ return;
++ }
++
++ dbi_output = kzalloc(sizeof(struct mdfld_dsi_dbi_output), GFP_KERNEL);
++ if(!dbi_output) {
++ DRM_ERROR("No memory\n");
++ return;
++ }
++
++ if(dsi_num == 1) {
++ dbi_output->channel_num = 0;
++ } else if (dsi_num == 2) {
++ dbi_output->channel_num = 1;
++ } else {
++ DRM_ERROR("only support 2 DSI outputs\n");
++ goto out_err1;
++ }
++
++ dbi_output->dev = dev;
++
++ /*init dbi command buffer*/
++ ret = mdfld_dbi_cb_init(dbi_output, pg);
++ if(ret) {
++ DRM_ERROR("DBI command buffer init error\n");
++ goto out_err1;
++ }
++
++ /*panel reset*/
++ ret = mdfld_dbi_panel_reset(dbi_output);
++ if(ret) {
++ DRM_ERROR("reset panel error\n");
++ goto out_err2;
++ }
++
++ /*TODO: get panel info from DDB*/
++
++ /*get fixed mode*/
++ fixed_mode = mdfld_dsi_dbi_get_configuration_mode(dev, dsi_num);
++ if(fixed_mode) {
++ fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
++ } else {
++ DRM_ERROR("No mode found\n");
++ goto out_err2;
++ }
++
++ dbi_output->panel_fixed_mode = fixed_mode;
++ mode_dev->panel_fixed_mode = fixed_mode;
++ mode_dev->panel_fixed_mode2 = fixed_mode;
++
++ /*create drm objects*/
++ psb_output = &dbi_output->base;
++ psb_output->mode_dev = mode_dev;
++ psb_output->type = (dsi_num == 1) ? INTEL_OUTPUT_MIPI : INTEL_OUTPUT_MIPI2;
++
++ connector = &psb_output->base;
++ encoder = &psb_output->enc;
++ drm_connector_init(dev, connector, &mdfld_dsi_dbi_connector_funcs, DRM_MODE_CONNECTOR_MIPI);
++ drm_encoder_init(dev, encoder, &mdfld_dsi_dbi_encoder_funcs, DRM_MODE_ENCODER_MIPI);
++ drm_mode_connector_attach_encoder(connector, encoder);
++ drm_encoder_helper_add(encoder, &mdfld_dsi_helper_funcs);
++ drm_connector_helper_add(connector, &mdfld_dsi_dbi_connector_helper_funcs);
++
++ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
++ connector->interlace_allowed = false;
++ connector->doublescan_allowed = false;
++
++ /*attach properties*/
++ drm_connector_attach_property(connector, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN);
++ drm_connector_attach_property(connector, dev_priv->backlight_property, MDFLD_DSI_BRIGHTNESS_MAX_LEVEL);
++
++ dev_priv->dsr_fb_update = 0;
++ dev_priv->b_dsr_enable = false;
++
++ dbi_output->first_boot = true;
++ dbi_output->mode_flags = MODE_SETTING_IN_ENCODER;
++
++ drm_sysfs_connector_add(connector);
++
++#ifdef CONFIG_MDFLD_DSI_DPU
++ /*add this output to dpu_info*/
++ if(dsi_num == 1) {
++ dpu_info->dbi_outputs[0] = dbi_output;
++ } else {
++ dpu_info->dbi_outputs[1] = dbi_output;
++ }
++
++ dpu_info->dbi_output_num++;
++
++#else /*CONFIG_MDFLD_DSI_DPU*/
++ /*add this output to dsr_info*/
++ if(dsi_num == 1) {
++ dsr_info->dbi_outputs[0] = dbi_output;
++ } else {
++ dsr_info->dbi_outputs[1] = dbi_output;
++ }
++
++ dsr_info->dbi_output_num++;
++#endif
++
++ PSB_DEBUG_ENTRY("successfully\n");
++
++ return;
++
++out_err2:
++ mdfld_dbi_cb_destroy(dbi_output);
++out_err1:
++ if(dbi_output) {
++ kfree(dbi_output);
++ }
++}
+--- /dev/null
++++ b/drivers/staging/mrst/drv/mdfld_dsi_dbi.h
+@@ -0,0 +1,185 @@
++#ifndef __MDFLD_DSI_DBI_H__
++#define __MDFLD_DSI_DBI_H__
++
++#include <linux/backlight.h>
++#include <linux/version.h>
++#include <drm/drmP.h>
++#include <drm/drm.h>
++#include <drm/drm_crtc.h>
++#include <drm/drm_edid.h>
++
++#include "psb_drv.h"
++#include "psb_intel_drv.h"
++#include "psb_intel_reg.h"
++#include "psb_powermgmt.h"
++
++#define DRM_MODE_ENCODER_MIPI 5
++
++struct mdfld_dsi_dbi_output {
++ struct psb_intel_output base;
++
++ struct drm_display_mode * panel_fixed_mode;
++
++ /*DBI command buffer lock*/
++ spinlock_t cb_lock;
++
++ /*DBI command buffer address*/
++ uint32_t dbi_cb_phy;
++
++ /*DBI command buffer virtual address*/
++ void * dbi_cb_addr;
++
++ /*command buffer write point*/
++ uint32_t cb_write;
++
++ u8 last_cmd;
++
++ u8 lane_count;
++
++ u8 channel_num;
++
++ struct drm_device * dev;
++
++ /*backlight operations*/
++
++ /*DSR timer*/
++ spinlock_t dsr_timer_lock;
++ struct timer_list dsr_timer;
++ void(*dsi_timer_func)(unsigned long data);
++ u32 dsr_idle_count;
++ bool dsr_fb_update_done;
++
++ /*mode setting flags*/
++ u32 mode_flags;
++
++ /*panel status*/
++ bool dbi_panel_on;
++ bool first_boot;
++};
++
++#define MDFLD_DSI_DBI_OUTPUT(psb_output) container_of(psb_output, struct mdfld_dsi_dbi_output, base)
++
++struct mdfld_dbi_dsr_info {
++ int dbi_output_num;
++ struct mdfld_dsi_dbi_output * dbi_outputs[2];
++
++ spinlock_t dsr_timer_lock;
++ struct timer_list dsr_timer;
++ u32 dsr_idle_count;
++};
++
++#define DBI_CB_TIMEOUT_COUNT 0xffff
++
++/*DCS commands*/
++#define enter_sleep_mode 0x10
++#define exit_sleep_mode 0x11
++#define set_display_off 0x28
++#define set_dispaly_on 0x29
++#define set_column_address 0x2a
++#define set_page_addr 0x2b
++#define write_mem_start 0x2c
++
++/*mdfld DBI registers*/
++#define MIPIA_DEVICE_READY_REG 0xb000
++#define MIPIA_INTR_EN_REG 0xb008
++#define MIPIA_DSI_FUNC_PRG_REG 0xb00c
++#define MIPIA_HS_TX_TIMEOUT_REG 0xb010
++#define MIPIA_LP_RX_TIMEOUT_REG 0xb014
++
++#define MIPIA_TURN_AROUND_TIMEOUT_REG 0xb018
++#define MIPIA_DEVICE_RESET_TIMER_REG 0xb01c
++#define MIPIA_HIGH_LOW_SWITCH_COUNT_REG 0xb044
++#define MIPIA_INIT_COUNT_REG 0xb050
++#define MIPIA_EOT_DISABLE_REG 0xb05c
++#define MIPIA_LP_BYTECLK_REG 0xb060
++#define MIPIA_GEN_FIFO_STAT_REG 0xb074
++#define MIPIA_HS_LS_DBI_ENABLE_REG 0xb078
++#define MIPIA_DPHY_PARAM_REG 0xb080
++#define MIPIA_DBI_BW_CTRL_REG 0xb084
++#define MIPIA_CLK_LANE_SWITCH_TIME_CNT_REG 0xb088
++
++#define MIPIA_CONTROL_REG 0xb104
++#define MIPIA_CMD_ADD_REG 0xb110
++#define MIPIA_CMD_LEN_REG 0xb114
++
++/*offsets*/
++#define CMD_MEM_ADDR_OFFSET 0
++
++#define CMD_DATA_SRC_SYSTEM_MEM 0
++#define CMD_DATA_SRC_PIPE 1
++
++static inline int mdfld_dsi_dbi_fifo_ready(struct mdfld_dsi_dbi_output * dbi_output)
++{
++ struct drm_device * dev = dbi_output->dev;
++ u32 retry = DBI_CB_TIMEOUT_COUNT;
++ int reg_offset = (dbi_output->channel_num == 1) ? MIPIC_REG_OFFSET : 0;
++ int ret = 0;
++
++ /*query the dbi fifo status*/
++ retry = DBI_CB_TIMEOUT_COUNT;
++ while(retry--) {
++ if(REG_READ((MIPIA_GEN_FIFO_STAT_REG + reg_offset)) & BIT27) {
++ break;
++ }
++ }
++
++ if(!retry) {
++ DRM_ERROR("Timeout waiting for DBI FIFO empty\n");
++ ret = -EAGAIN;
++ }
++
++ return ret;
++}
++
++static inline int mdfld_dsi_dbi_cmd_sent(struct mdfld_dsi_dbi_output * dbi_output)
++{
++ struct drm_device * dev = dbi_output->dev;
++ u32 retry = DBI_CB_TIMEOUT_COUNT;
++ int reg_offset = (dbi_output->channel_num == 1) ? MIPIC_REG_OFFSET : 0;
++ int ret = 0;
++
++ /*query the command execution status*/
++ while(retry--) {
++ if(!(REG_READ((MIPIA_CMD_ADD_REG + reg_offset)) & BIT0)) {
++ break;
++ }
++ }
++
++ if(!retry) {
++ DRM_ERROR("Timeout waiting for DBI Command status\n");
++ ret = -EAGAIN;
++ }
++
++ return ret;
++}
++
++static inline int mdfld_dsi_dbi_cb_ready(struct mdfld_dsi_dbi_output * dbi_output)
++{
++ int ret = 0;
++
++ /*query the command execution status*/
++ ret = mdfld_dsi_dbi_cmd_sent(dbi_output);
++ if(ret) {
++ DRM_ERROR("Perpheral is busy\n");
++ ret = -EAGAIN;
++ }
++ /*query the dbi fifo status*/
++ ret = mdfld_dsi_dbi_fifo_ready(dbi_output);
++ if(ret) {
++ DRM_ERROR("DBI FIFO is not empty\n");
++ ret = -EAGAIN;
++ }
++
++ return ret;
++}
++
++/*export functions*/
++extern void mdfld_dsi_dbi_output_init(struct drm_device * dev, struct psb_intel_mode_device * mode_dev, int pipe);
++extern void mdfld_dsi_dbi_exit_dsr (struct drm_device *dev, u32 update_src);
++extern void mdfld_dsi_dbi_enter_dsr (struct mdfld_dsi_dbi_output * dbi_output, int pipe);
++extern int mdfld_dbi_dsr_init(struct drm_device * dev);
++extern void mdfld_dbi_dsr_exit(struct drm_device * dev);
++extern void mdfld_dbi_dsr_timer_start(struct mdfld_dbi_dsr_info * dsr_info);
++extern void mdfld_dsi_dbi_init(struct drm_device * dev, struct psb_intel_mode_device * mode_dev, int dsi_num);
++
++#endif /*__MDFLD_DSI_DBI_H__*/
+--- /dev/null
++++ b/drivers/staging/mrst/drv/mdfld_dsi_dbi_dpu.c
+@@ -0,0 +1,703 @@
++/*
++ * Copyright © 2010 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ * jim liu <jim.liu@intel.com>
++ */
++
++ #include "mdfld_dsi_dbi_dpu.h"
++ #include "mdfld_dsi_dbi.h"
++
++/**
++ * NOTE: all mdlfd_x_damage funcs should be called by holding dpu_update_lock
++ */
++static int mdfld_cursor_damage(struct mdfld_dbi_dpu_info * dpu_info,
++ mdfld_plane_t plane,
++ struct psb_drm_dpu_rect * damaged_rect)
++{
++ int x, y;
++ int new_x, new_y;
++ struct psb_drm_dpu_rect * rect;
++ struct psb_drm_dpu_rect * pipe_rect;
++ int cursor_size;
++ struct mdfld_cursor_info * cursor;
++ mdfld_plane_t fb_plane;
++
++ if(plane == MDFLD_CURSORA) {
++ cursor = &dpu_info->cursors[0];
++ x = dpu_info->cursors[0].x;
++ y = dpu_info->cursors[0].y;
++ cursor_size = dpu_info->cursors[0].size;
++ pipe_rect = &dpu_info->damage_pipea;
++ fb_plane = MDFLD_PLANEA;
++ } else {
++ cursor = &dpu_info->cursors[1];
++ x = dpu_info->cursors[1].x;
++ y = dpu_info->cursors[1].y;
++ cursor_size = dpu_info->cursors[1].size;
++ pipe_rect = &dpu_info->damage_pipec;
++ fb_plane = MDFLD_PLANEC;
++ }
++ new_x = damaged_rect->x;
++ new_y = damaged_rect->y;
++
++ if((x == new_x) && (y == new_y)) {
++ return 0;
++ }
++
++ rect = &dpu_info->damaged_rects[plane];
++
++ /*move to right*/
++ if(new_x >= x) {
++ if(new_y > y) {
++ rect->x = x;
++ rect->y = y;
++ rect->width = (new_x + cursor_size) - x;
++ rect->height = (new_y + cursor_size) - y;
++ goto cursor_out;
++ } else {
++ rect->x = x;
++ rect->y = new_y;
++ rect->width = (new_x + cursor_size) - x;
++ rect->height = (y - new_y);
++ goto cursor_out;
++ }
++ } else {
++ if(new_y > y) {
++ rect->x = new_x;
++ rect->y = y;
++ rect->width = (x + cursor_size) - new_x;
++ rect->height = new_y - y;
++ goto cursor_out;
++ } else {
++ rect->x = new_x;
++ rect->y = new_y;
++ rect->width = (x + cursor_size) - new_x;
++ rect->height = (y + cursor_size) - new_y;
++ }
++ }
++cursor_out:
++ if(new_x < 0) {
++ cursor->x = 0;
++ } else if (new_x > 864) {
++ cursor->x = 864;
++ } else {
++ cursor->x = new_x;
++ }
++
++ if(new_y < 0) {
++ cursor->y = 0;
++ } else if (new_y > 480) {
++ cursor->y = 480;
++ } else {
++ cursor->y = new_y;
++ }
++
++ /**
++ * FIXME: this is a workaround for cursor plane update, remove it later!
++ */
++ rect->x = 0;
++ rect->y = 0;
++ rect->width = 864;
++ rect->height = 480;
++
++ mdfld_check_boundary(dpu_info, rect);
++
++ mdfld_dpu_region_extent(pipe_rect, rect);
++
++ /*update pending status of dpu_info*/
++ dpu_info->pending |= (1 << plane);
++
++ /*update fb panel as well*/
++ dpu_info->pending |= (1 << fb_plane);
++
++ return 0;
++}
++
++static int mdfld_fb_damage(struct mdfld_dbi_dpu_info * dpu_info,
++ mdfld_plane_t plane,
++ struct psb_drm_dpu_rect * damaged_rect)
++{
++ struct psb_drm_dpu_rect * rect;
++
++ if(plane == MDFLD_PLANEA) {
++ rect = &dpu_info->damage_pipea;
++ } else {
++ rect = &dpu_info->damage_pipec;
++ }
++
++ mdfld_check_boundary(dpu_info, damaged_rect);
++
++ /*add fb damage area to this pipe*/
++ mdfld_dpu_region_extent(rect, damaged_rect);
++
++ /*update pending status of dpu_info*/
++ dpu_info->pending |= (1 << plane);
++ return 0;
++}
++
++/*do nothing here, right now*/
++static int mdfld_overlay_damage(struct mdfld_dbi_dpu_info * dpu_info,
++ mdfld_plane_t plane,
++ struct psb_drm_dpu_rect * damaged_rect)
++{
++ PSB_DEBUG_ENTRY("\n");
++
++ return 0;
++}
++
++int mdfld_dbi_dpu_report_damage(struct drm_device * dev,
++ mdfld_plane_t plane,
++ struct psb_drm_dpu_rect * rect)
++{
++ struct drm_psb_private * dev_priv = dev->dev_private;
++ struct mdfld_dbi_dpu_info * dpu_info = dev_priv->dbi_dpu_info;
++ int ret = 0;
++
++ /*request lock*/
++ spin_lock(&dpu_info->dpu_update_lock);
++
++ switch(plane) {
++ case MDFLD_PLANEA:
++ case MDFLD_PLANEC:
++ mdfld_fb_damage(dpu_info, plane, rect);
++ break;
++ case MDFLD_CURSORA:
++ case MDFLD_CURSORC:
++ mdfld_cursor_damage(dpu_info, plane, rect);
++ break;
++ case MDFLD_OVERLAYA:
++ case MDFLD_OVERLAYC:
++ mdfld_overlay_damage(dpu_info, plane, rect);
++ break;
++ default:
++ DRM_ERROR("Invalid plane type %d\n", plane);
++ ret = -EINVAL;
++ }
++
++ spin_unlock(&dpu_info->dpu_update_lock);
++ return ret;
++}
++
++
++int mdfld_dsi_dbi_dsr_off(struct drm_device * dev, struct psb_drm_dpu_rect * rect)
++{
++ struct drm_psb_private * dev_priv = dev->dev_private;
++ struct mdfld_dbi_dpu_info * dpu_info = dev_priv->dbi_dpu_info;
++
++ mdfld_dbi_dpu_report_damage(dev, MDFLD_PLANEA, rect);
++
++ /*if dual display mode*/
++ if(dpu_info->dbi_output_num == 2)
++ mdfld_dbi_dpu_report_damage(dev, MDFLD_PLANEC, rect);
++
++ /*force dsi to exit DSR mode*/
++ mdfld_dpu_exit_dsr(dev);
++
++ return 0;
++}
++
++static void mdfld_dpu_cursor_plane_flush(struct mdfld_dbi_dpu_info * dpu_info,
++ mdfld_plane_t plane)
++{
++ struct drm_device * dev = dpu_info->dev;
++ u32 curpos_reg = CURAPOS;
++ u32 curbase_reg = CURABASE;
++ u32 curcntr_reg = CURACNTR;
++ struct mdfld_cursor_info * cursor = &dpu_info->cursors[0];
++
++ if(plane == MDFLD_CURSORC) {
++ curpos_reg = CURCPOS;
++ curbase_reg = CURCBASE;
++ curcntr_reg = CURCCNTR;
++ cursor = &dpu_info->cursors[1];
++ }
++
++ REG_WRITE(curcntr_reg, REG_READ(curcntr_reg));
++ REG_WRITE(curpos_reg, (((cursor->x & CURSOR_POS_MASK) << CURSOR_X_SHIFT) | ((cursor->y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT)));
++ REG_WRITE(curbase_reg, REG_READ(curbase_reg));
++}
++
++static void mdfld_dpu_fb_plane_flush(struct mdfld_dbi_dpu_info * dpu_info,
++ mdfld_plane_t plane)
++{
++ u32 pipesrc_reg = PIPEASRC;
++ u32 dspsize_reg = DSPASIZE;
++ u32 dspoff_reg = DSPALINOFF;
++ u32 dspsurf_reg = DSPASURF;
++ u32 dspstride_reg = DSPASTRIDE;
++ u32 stride;
++ struct psb_drm_dpu_rect * rect = &dpu_info->damage_pipea;
++ struct drm_device * dev = dpu_info->dev;
++
++ if(plane == MDFLD_PLANEC) {
++ pipesrc_reg = PIPECSRC;
++ dspsize_reg = DSPCSIZE;
++ dspoff_reg = DSPCLINOFF;
++ dspsurf_reg = DSPCSURF;
++ dspstride_reg = DSPCSTRIDE;
++
++ rect = &dpu_info->damage_pipec;
++ }
++
++ stride = REG_READ(dspstride_reg);
++ /*FIXME: should I do the pipe src update here?*/
++ REG_WRITE(pipesrc_reg, ((rect->width -1) << 16) | (rect->height -1));
++ /*flush plane*/
++ REG_WRITE(dspsize_reg, ((rect->height-1) << 16) | (rect->width - 1));
++ REG_WRITE(dspoff_reg, ((rect->x * 4) + (rect->y * stride)));
++ REG_WRITE(dspsurf_reg, REG_READ(dspsurf_reg));
++
++ /**
++ * TODO: wait for flip finished and restore the pipesrc reg,
++ * or cursor will be show at a wrong position
++ */
++}
++
++static void mdfld_dpu_overlay_plane_flush(struct mdfld_dbi_dpu_info * dpu_info,
++ mdfld_plane_t plane)
++{
++ PSB_DEBUG_ENTRY("\n");
++}
++
++/**
++ * TODO: we are still in dbi normal mode now, will try to use partial mode later.
++ */
++static int mdfld_dbi_prepare_cb(struct mdfld_dsi_dbi_output * dbi_output,
++ struct mdfld_dbi_dpu_info * dpu_info, int pipe)
++{
++ u8 * cb_addr = (u8 *)dbi_output->dbi_cb_addr;
++ u32 * index;
++ struct psb_drm_dpu_rect * rect = pipe ? (&dpu_info->damage_pipec) : (&dpu_info->damage_pipea);
++
++ /*FIXME: lock command buffer, this may lead to a dead lock, we've already hold the dpu_update_lock*/
++ if(!spin_trylock(&dbi_output->cb_lock)) {
++ DRM_ERROR("lock command buffer failed, try again\n");
++ return -EAGAIN;
++ }
++
++ index = &dbi_output->cb_write;
++
++ if(*index) {
++ DRM_ERROR("DBI command buffer unclean\n");
++ return -EAGAIN;
++ }
++
++ /*column address*/
++ *(cb_addr + ((*index)++)) = set_column_address;
++ *(cb_addr + ((*index)++)) = rect->x >> 8;
++ *(cb_addr + ((*index)++)) = rect->x;
++ *(cb_addr + ((*index)++)) = (rect->x + rect->width -1) >> 8;
++ *(cb_addr + ((*index)++)) = (rect->x + rect->width -1);
++
++ *index = 8;
++
++ /*page address*/
++ *(cb_addr + ((*index)++)) = set_page_addr;
++ *(cb_addr + ((*index)++)) = rect->y >> 8;
++ *(cb_addr + ((*index)++)) = rect->y;
++ *(cb_addr + ((*index)++)) = (rect->y + rect->height - 1) >> 8;
++ *(cb_addr + ((*index)++)) = (rect->y + rect->height -1 );
++
++ *index = 16;
++
++ /*write memory*/
++ *(cb_addr + ((*index)++)) = write_mem_start;
++
++ return 0;
++}
++
++static int mdfld_dbi_flush_cb(struct mdfld_dsi_dbi_output * dbi_output, int pipe)
++{
++ u32 cmd_phy = dbi_output->dbi_cb_phy;
++ u32 * index = &dbi_output->cb_write;
++ int reg_offset = pipe ? MIPIC_REG_OFFSET : 0;
++ struct drm_device * dev = dbi_output->dev;
++
++ if(*index == 0 || !dbi_output) {
++ return 0;
++ }
++
++ REG_WRITE((MIPIA_CMD_LEN_REG + reg_offset), 0x010505);
++ REG_WRITE((MIPIA_CMD_ADD_REG + reg_offset), cmd_phy | BIT0 | BIT1);
++
++ *index = 0;
++
++ /*FIXME: unlock command buffer*/
++ spin_unlock(&dbi_output->cb_lock);
++
++ return 0;
++}
++
++static int mdfld_dpu_update_pipe(struct mdfld_dsi_dbi_output * dbi_output,
++ struct mdfld_dbi_dpu_info * dpu_info, int pipe)
++{
++ struct drm_device * dev = dbi_output->dev;
++ struct drm_psb_private * dev_priv = dev->dev_private;
++ mdfld_plane_t cursor_plane = MDFLD_CURSORA;
++ mdfld_plane_t fb_plane = MDFLD_PLANEA;
++ mdfld_plane_t overlay_plane = MDFLD_OVERLAYA;
++ int ret = 0;
++ u32 plane_mask = MDFLD_PIPEA_PLANE_MASK;
++
++ /*damaged rects on this pipe*/
++ if(pipe) {
++ cursor_plane = MDFLD_CURSORC;
++ fb_plane = MDFLD_PLANEC;
++ overlay_plane = MDFLD_OVERLAYC;
++ plane_mask = MDFLD_PIPEC_PLANE_MASK;
++ }
++
++ /*update cursor which assigned to @pipe*/
++ if(dpu_info->pending & (1 << cursor_plane))
++ mdfld_dpu_cursor_plane_flush(dpu_info, cursor_plane);
++
++ /*update fb which assigned to @pipe*/
++ if(dpu_info->pending & (1 << fb_plane))
++ mdfld_dpu_fb_plane_flush(dpu_info, fb_plane);
++
++ /*TODO: update overlay*/
++ if(dpu_info->pending & (1 << overlay_plane))
++ mdfld_dpu_overlay_plane_flush(dpu_info, overlay_plane);
++
++ /*flush damage area to panel fb*/
++ if(dpu_info->pending & plane_mask) {
++ ret = mdfld_dbi_prepare_cb(dbi_output, dpu_info, pipe);
++
++ /**
++ * TODO: remove b_dsr_enable later,
++ * added it so that text console could boot smoothly
++ */
++ /*clean pending flags on this pipe*/
++ if(!ret && dev_priv->b_dsr_enable) {
++ dpu_info->pending &= ~plane_mask;
++
++ /*reset overlay pipe damage rect*/
++ mdfld_dpu_init_damage(dpu_info, pipe);
++ }
++ }
++
++ return ret;
++}
++
++static int mdfld_dpu_update_fb(struct drm_device * dev) {
++ struct drm_crtc * crtc;
++ struct psb_intel_crtc * psb_crtc;
++ struct mdfld_dsi_dbi_output ** dbi_output;
++ struct drm_psb_private * dev_priv = dev->dev_private;
++ struct mdfld_dbi_dpu_info * dpu_info = dev_priv->dbi_dpu_info;
++ bool pipe_updated[2];
++ unsigned long irq_flags;
++ int i;
++ int ret;
++
++ dbi_output = dpu_info->dbi_outputs;
++ pipe_updated[0] = pipe_updated[1] = false;
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, true))
++ return -EAGAIN;
++
++ /*try to prevent any new damage reports*/
++ if(!spin_trylock_irqsave(&dpu_info->dpu_update_lock, irq_flags)) {
++ return -EAGAIN;
++ }
++
++ for(i=0; i<dpu_info->dbi_output_num; i++) {
++ crtc = dbi_output[i]->base.enc.crtc;
++ psb_crtc = (crtc) ? to_psb_intel_crtc(crtc) : NULL;
++
++ /*if dbi output is in a exclusive state, pipe change won't be updated*/
++ if(dbi_output[i]->dbi_panel_on &&
++ !(dbi_output[i]->mode_flags & MODE_SETTING_ON_GOING) &&
++ !(psb_crtc && psb_crtc->mode_flags & MODE_SETTING_ON_GOING) &&
++ !(dbi_output[i]->mode_flags & MODE_SETTING_IN_DSR)) {
++ ret = mdfld_dpu_update_pipe(dbi_output[i], dpu_info, dbi_output[i]->channel_num ? 2 : 0);
++ if(!ret) {
++ pipe_updated[i] = true;
++ }
++ }
++ }
++
++ for(i=0; i<dpu_info->dbi_output_num; i++) {
++ if(pipe_updated[i]) {
++ mdfld_dbi_flush_cb(dbi_output[i], dbi_output[i]->channel_num ? 2 : 0);
++ }
++ }
++
++ spin_unlock_irqrestore(&dpu_info->dpu_update_lock, irq_flags);
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++
++ return 0;
++}
++
++static int __mdfld_dbi_exit_dsr(struct mdfld_dsi_dbi_output * dbi_output, int pipe)
++{
++ struct drm_device * dev = dbi_output->dev;
++ struct drm_crtc * crtc = dbi_output->base.enc.crtc;
++ struct psb_intel_crtc * psb_crtc = (crtc) ? to_psb_intel_crtc(crtc) : NULL;
++ u32 reg_val;
++ u32 dpll_reg = MRST_DPLL_A;
++ u32 pipeconf_reg = PIPEACONF;
++ u32 dspcntr_reg = DSPACNTR;
++ u32 dspbase_reg = DSPABASE;
++ u32 dspsurf_reg = DSPASURF;
++ u32 reg_offset = 0;
++
++ PSB_DEBUG_ENTRY("\n");
++
++ if(!dbi_output) {
++ return 0;
++ }
++
++ /*if mode setting on-going, back off*/
++ if((dbi_output->mode_flags & MODE_SETTING_ON_GOING) ||
++ (psb_crtc && psb_crtc->mode_flags & MODE_SETTING_ON_GOING))
++ return -EAGAIN;
++
++ if(pipe == 2) {
++ dpll_reg = MRST_DPLL_A;
++ pipeconf_reg = PIPECCONF;
++ dspcntr_reg = DSPCCNTR;
++ dspbase_reg = MDFLD_DSPCBASE;
++ dspsurf_reg = DSPCSURF;
++
++ reg_offset = MIPIC_REG_OFFSET;
++ }
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, true))
++ return -EAGAIN;
++
++ /*enable DPLL*/
++ reg_val = REG_READ(dpll_reg);
++ if(!(reg_val & DPLL_VCO_ENABLE)) {
++
++ if(reg_val & MDFLD_PWR_GATE_EN) {
++ reg_val &= ~MDFLD_PWR_GATE_EN;
++ REG_WRITE(dpll_reg, reg_val);
++ REG_READ(dpll_reg);
++ udelay(500);
++ }
++
++ reg_val |= DPLL_VCO_ENABLE;
++ REG_WRITE(dpll_reg, reg_val);
++ REG_READ(dpll_reg);
++ udelay(500);
++
++ /*FIXME: add timeout*/
++ while (!(REG_READ(pipeconf_reg) & PIPECONF_DSIPLL_LOCK));
++ }
++
++ /*enable pipe*/
++ reg_val = REG_READ(pipeconf_reg);
++ if(!(reg_val & PIPEACONF_ENABLE)) {
++ reg_val |= PIPEACONF_ENABLE;
++ REG_WRITE(pipeconf_reg, reg_val);
++ REG_READ(pipeconf_reg);
++ udelay(500);
++ mdfldWaitForPipeEnable(dev, pipe);
++ }
++
++ /*enable plane*/
++ reg_val = REG_READ(dspcntr_reg);
++ if(!(reg_val & DISPLAY_PLANE_ENABLE)) {
++ reg_val |= DISPLAY_PLANE_ENABLE;
++ REG_WRITE(dspcntr_reg, reg_val);
++ REG_READ(dspcntr_reg);
++ udelay(500);
++ }
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++
++ /*clean IN_DSR flag*/
++ dbi_output->mode_flags &= ~MODE_SETTING_IN_DSR;
++
++ return 0;
++}
++
++int mdfld_dpu_exit_dsr(struct drm_device * dev)
++{
++ struct mdfld_dsi_dbi_output ** dbi_output;
++ struct drm_psb_private * dev_priv = dev->dev_private;
++ struct mdfld_dbi_dpu_info * dpu_info = dev_priv->dbi_dpu_info;
++ int i;
++
++ dbi_output = dpu_info->dbi_outputs;
++
++ for(i=0; i<dpu_info->dbi_output_num; i++) {
++ /*if this output is not in DSR mode, don't call exit dsr*/
++ if(dbi_output[i]->mode_flags & MODE_SETTING_IN_DSR) {
++ __mdfld_dbi_exit_dsr(dbi_output[i], dbi_output[i]->channel_num ? 2 : 0);
++ }
++ }
++
++ /*start dpu timer*/
++ mdfld_dbi_dpu_timer_start(dpu_info);
++ return 0;
++
++}
++
++static int mdfld_dpu_enter_dsr(struct drm_device * dev)
++{
++ struct drm_psb_private * dev_priv = dev->dev_private;
++ struct mdfld_dbi_dpu_info * dpu_info = dev_priv->dbi_dpu_info;
++ struct mdfld_dsi_dbi_output ** dbi_output;
++ int i;
++
++ PSB_DEBUG_ENTRY("\n");
++
++ dbi_output = dpu_info->dbi_outputs;
++
++ for(i=0; i<dpu_info->dbi_output_num; i++) {
++ /*if output is off or already in DSR state, don't enter again*/
++ if(dbi_output[i]->dbi_panel_on &&
++ !(dbi_output[i]->mode_flags & MODE_SETTING_IN_DSR)) {
++ mdfld_dsi_dbi_enter_dsr(dbi_output[i], dbi_output[i]->channel_num ? 2 : 0);
++ }
++ }
++
++ return 0;
++}
++
++static void mdfld_dbi_dpu_timer_func(unsigned long data)
++{
++ struct drm_device * dev = (struct drm_device *)data;
++ struct drm_psb_private * dev_priv = dev->dev_private;
++ struct mdfld_dbi_dpu_info * dpu_info = dev_priv->dbi_dpu_info;
++ struct timer_list * dpu_timer = &dpu_info->dpu_timer;
++ unsigned long flags;
++
++ if(dpu_info->pending) {
++ dpu_info->idle_count = 0;
++
++ /*update panel fb with damaged area*/
++ mdfld_dpu_update_fb(dev);
++ } else {
++ dpu_info->idle_count++;
++ }
++
++ if(dpu_info->idle_count >= MDFLD_MAX_IDLE_COUNT) {
++ /*enter dsr*/
++ mdfld_dpu_enter_dsr(dev);
++
++ /*stop timer by return*/
++ return;
++ }
++
++ spin_lock_irqsave(&dpu_info->dpu_timer_lock, flags);
++ if(!timer_pending(dpu_timer)){
++ dpu_timer->expires = jiffies + MDFLD_DSR_DELAY;
++ add_timer(dpu_timer);
++ }
++ spin_unlock_irqrestore(&dpu_info->dpu_timer_lock, flags);
++}
++
++static int mdfld_dbi_dpu_timer_init(struct drm_device * dev, struct mdfld_dbi_dpu_info * dpu_info)
++{
++ struct timer_list * dpu_timer = &dpu_info->dpu_timer;
++ unsigned long flags;
++
++ PSB_DEBUG_ENTRY("\n");
++
++ spin_lock_init(&dpu_info->dpu_timer_lock);
++ spin_lock_irqsave(&dpu_info->dpu_timer_lock, flags);
++
++ init_timer(dpu_timer);
++
++ dpu_timer->data = (unsigned long)dev;
++ dpu_timer->function = mdfld_dbi_dpu_timer_func;
++ dpu_timer->expires = jiffies + MDFLD_DSR_DELAY;
++
++ spin_unlock_irqrestore(&dpu_info->dpu_timer_lock, flags);
++
++ PSB_DEBUG_ENTRY("successfully\n");
++
++ return 0;
++}
++
++void mdfld_dbi_dpu_timer_start(struct mdfld_dbi_dpu_info * dpu_info)
++{
++ struct timer_list * dpu_timer = &dpu_info->dpu_timer;
++ unsigned long flags;
++
++ spin_lock_irqsave(&dpu_info->dpu_timer_lock, flags);
++ if(!timer_pending(dpu_timer)){
++ dpu_timer->expires = jiffies + MDFLD_DSR_DELAY;
++ add_timer(dpu_timer);
++ }
++ spin_unlock_irqrestore(&dpu_info->dpu_timer_lock, flags);
++}
++
++int mdfld_dbi_dpu_init(struct drm_device * dev)
++{
++ struct drm_psb_private * dev_priv = dev->dev_private;
++ struct mdfld_dbi_dpu_info * dpu_info = dev_priv->dbi_dpu_info;
++
++ if(!dpu_info || IS_ERR(dpu_info)) {
++ dpu_info = kzalloc(sizeof(struct mdfld_dbi_dpu_info), GFP_KERNEL);
++ if(!dpu_info) {
++ DRM_ERROR("No memory\n");
++ return -ENOMEM;
++ }
++
++ dev_priv->dbi_dpu_info = dpu_info;
++ }
++
++ dpu_info->dev = dev;
++
++ dpu_info->cursors[0].size = MDFLD_CURSOR_SIZE;
++ dpu_info->cursors[1].size = MDFLD_CURSOR_SIZE;
++
++ /*init dpu_update_lock*/
++ spin_lock_init(&dpu_info->dpu_update_lock);
++
++ /*init dpu refresh timer*/
++ mdfld_dbi_dpu_timer_init(dev, dpu_info);
++
++ /*init pipe damage area*/
++ mdfld_dpu_init_damage(dpu_info, 0);
++ mdfld_dpu_init_damage(dpu_info, 2);
++
++ PSB_DEBUG_ENTRY("successfully\n");
++
++ return 0;
++}
++
++void mdfld_dbi_dpu_exit(struct drm_device * dev)
++{
++ struct drm_psb_private * dev_priv = dev->dev_private;
++ struct mdfld_dbi_dpu_info * dpu_info = dev_priv->dbi_dpu_info;
++
++ if(!dpu_info) {
++ return;
++ }
++
++ /*delete dpu timer*/
++ del_timer_sync(&dpu_info->dpu_timer);
++
++ /*free dpu info*/
++ kfree(dpu_info);
++
++ dev_priv->dbi_dpu_info = NULL;
++}
++
++
+--- /dev/null
++++ b/drivers/staging/mrst/drv/mdfld_dsi_dbi_dpu.h
+@@ -0,0 +1,130 @@
++#ifndef __MDFLD_DSI_DBI_DPU_H__
++#define __MDFLD_DSI_DBI_DPU_H__
++
++#include "mdfld_dsi_dbi.h"
++
++typedef enum {
++ MDFLD_PLANEA,
++ MDFLD_PLANEC,
++ MDFLD_CURSORA,
++ MDFLD_CURSORC,
++ MDFLD_OVERLAYA,
++ MDFLD_OVERLAYC,
++ MDFLD_PLANE_NUM,
++} mdfld_plane_t;
++
++#define MDFLD_PIPEA_PLANE_MASK 0x15
++#define MDFLD_PIPEC_PLANE_MASK 0x2A
++
++struct mdfld_cursor_info {
++ int x, y;
++ int size;
++};
++
++#define MDFLD_CURSOR_SIZE 64
++
++/**
++ * enter DSR mode if screen has no update for 2 frames.
++ * TODO: export this as a configuration variable.
++ * (or what's the PRD for this?)
++ */
++#define MDFLD_MAX_IDLE_COUNT 2
++
++struct mdfld_dbi_dpu_info {
++ struct drm_device * dev;
++ /*lock*/
++ spinlock_t dpu_update_lock;
++
++ /*cursor postion*/
++ struct mdfld_cursor_info cursors[2];
++
++ /*damaged area for each plane*/
++ struct psb_drm_dpu_rect damaged_rects[MDFLD_PLANE_NUM];
++
++ /*final damaged area*/
++ struct psb_drm_dpu_rect damage_pipea;
++ struct psb_drm_dpu_rect damage_pipec;
++
++ /*pending*/
++ u32 pending;
++
++ /*dpu timer*/
++ struct timer_list dpu_timer;
++ spinlock_t dpu_timer_lock;
++
++ /*dpu idle count*/
++ u32 idle_count;
++
++ /*dsi outputs*/
++ struct mdfld_dsi_dbi_output * dbi_outputs[2];
++ int dbi_output_num;
++};
++
++static inline int mdfld_dpu_region_extent(struct psb_drm_dpu_rect * origin,
++ struct psb_drm_dpu_rect * rect)
++{
++ int x1, y1, x2, y2;
++
++ /*PSB_DEBUG_ENTRY("rect (%d, %d, %d, %d)\n", rect->x, rect->y, rect->width, rect->height);*/
++
++ x1 = origin->x + origin->width;
++ y1 = origin->y + origin->height;
++
++ x2 = rect->x + rect->width;
++ y2 = rect->y + rect->height;
++
++ origin->x = min(origin->x, rect->x);
++ origin->y = min(origin->y, rect->y);
++ origin->width = max(x1, x2) - origin->x;
++ origin->height = max(y1, y2) - origin->y;
++
++ return 0;
++}
++
++static inline void mdfld_check_boundary(struct mdfld_dbi_dpu_info * dpu_info,
++ struct psb_drm_dpu_rect * rect)
++{
++ if(rect->x < 0)
++ rect->x = 0;
++ if(rect->y < 0)
++ rect->y = 0;
++
++ if((rect->x + rect->width) > 864) {
++ rect->width = 864 - rect->x;
++ }
++
++ if((rect->y + rect->height) > 480) {
++ rect->height = 480 - rect->height;
++ }
++
++ if(!rect->width)
++ rect->width = 1;
++ if(!rect->height)
++ rect->height = 1;
++}
++
++static inline void mdfld_dpu_init_damage(struct mdfld_dbi_dpu_info * dpu_info, int pipe) {
++ struct psb_drm_dpu_rect * rect;
++
++ if(pipe == 0) {
++ rect = &dpu_info->damage_pipea;
++ } else {
++ rect = &dpu_info->damage_pipec;
++ }
++
++ rect->x = 864;
++ rect->y = 480;
++ rect->width = -864;
++ rect->height = -480;
++}
++
++extern int mdfld_dsi_dbi_dsr_off(struct drm_device * dev, struct psb_drm_dpu_rect * rect);
++extern int mdfld_dbi_dpu_report_damage(struct drm_device * dev,
++ mdfld_plane_t plane,
++ struct psb_drm_dpu_rect * rect);
++extern int mdfld_dpu_exit_dsr(struct drm_device * dev);
++extern void mdfld_dbi_dpu_timer_start(struct mdfld_dbi_dpu_info * dpu_info);
++extern int mdfld_dbi_dpu_init(struct drm_device * dev);
++extern void mdfld_dbi_dpu_exit(struct drm_device * dev);
++
++#endif /*__MDFLD_DSI_DBI_DPU_H__*/
+--- /dev/null
++++ b/drivers/staging/mrst/drv/mdfld_hdcp.h
+@@ -0,0 +1,232 @@
++/*
++ * Copyright (c) 2010, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * jim liu <jim.liu@intel.com>
++ */
++
++
++#ifndef MDFLD_HDCP_H
++#define MDFLD_HDCP_H
++
++#define MAX_HDCP_DEVICES 127
++#define KSV_SIZE 5
++#define V_SIZE 20
++
++#define HDCP_MAX_RETRY_STATUS 1500
++
++#define HDCP_100MS_DELAY 100 //
++
++#if 0
++///////////////////////////////////////////////////////////////////
++//
++// Bit definition
++//
++///////////////////////////////////////////////////////////////////
++#define BIT(x) (1<<x)
++
++#define BIT0 BIT(0)
++#define BIT1 BIT(1)
++#define BIT2 BIT(2)
++#define BIT3 BIT(3)
++#define BIT4 BIT(4)
++#define BIT5 BIT(5)
++#define BIT6 BIT(6)
++#define BIT7 BIT(7)
++#define BIT8 BIT(8)
++#define BIT9 BIT(9)
++#define BIT10 BIT(10)
++#define BIT11 BIT(11)
++#define BIT12 BIT(12)
++#define BIT13 BIT(13)
++#define BIT14 BIT(14)
++#define BIT15 BIT(15)
++#define BIT16 BIT(16)
++#define BIT17 BIT(17)
++#define BIT18 BIT(18)
++#define BIT19 BIT(19)
++#define BIT20 BIT(20)
++#define BIT21 BIT(21)
++#define BIT22 BIT(22)
++#define BIT23 BIT(23)
++#define BIT24 BIT(24)
++#define BIT25 BIT(25)
++#define BIT26 BIT(26)
++#define BIT27 BIT(27)
++#define BIT28 BIT(28)
++#define BIT29 BIT(29)
++#define BIT30 BIT(30)
++#define BIT31 BIT(31)
++#endif
++
++typedef struct _hdcp_priv_data {
++ int enabled;
++ int drmFD;
++ int output_id;
++} hdcp_priv_data_t;
++
++typedef struct _sqword {
++ union {
++ unsigned long long quad_part;//ULONGLONG QuadPart;
++ struct {
++ unsigned long low_part;
++ unsigned long high_part;
++ } u;
++ struct {
++ uint8_t byte[8];
++ };
++ };
++} sqword_t;
++
++// A quadword size member
++//
++typedef struct _hqword {
++ union {
++ struct {
++ uint64_t major_part: 40; // lower 40 bits
++ uint64_t unused1: 24;
++ };
++ struct {
++ unsigned major_part_low: 32; // lower 32 bits
++ unsigned major_part_high: 8; // lower bits
++ unsigned unused2: 24;
++ };
++ struct {
++ uint8_t byte[8];
++ };
++ };
++} hqword_t;
++
++// HDCP related definitions are kept here for common usability between
++// Integrated and External SDVO based HDCP operations
++//I2C Address for HDCP cummnication with Receiver
++#define RX_ADDRESS 0x74000000 // HDCP Port I2C Address (Single Link)
++ // shifted for call back function
++
++//I2C Subaddress Defines - As per the HDCP Spec
++// Downstream spec does not specify which is MSB and LSB?
++#define RX_BKSV_0 0x00 // BKSV[7:0]
++#define RX_BKSV_1 0x01 // BKSV[15:8]
++#define RX_BKSV_2 0x02 // BKSV[23:16]
++#define RX_BKSV_3 0x03 // BKSV[31:24]
++#define RX_BKSV_4 0x04 // BKSV[39:32]
++#define RX_RI_HIGH 0x08 // Ri'[7:0]
++#define RX_RI_LOW 0x09 // Ri'[15:8]
++#define RX_AKSV_0 0x10 // AKSV[7:0]
++#define RX_AKSV_1 0x11 // AKSV[15:8]
++#define RX_AKSV_2 0x12 // AKSV[23:16]
++#define RX_AKSV_3 0x13 // AKSV[31:24]
++#define RX_AKSV_4 0x14 // AKSV[39:32]... write this byte last
++#define RX_AINFO 0x15 // Receiver register to inform it to enable 1.1 features
++#define RX_AN_0 0x18 // An[7:0]
++#define RX_AN_1 0x19 // An[15:8]
++#define RX_AN_2 0x1A // An[23:16]
++#define RX_AN_3 0x1B // An[31:24]
++#define RX_AN_4 0x1C // An[39:32]
++#define RX_AN_5 0x1D // An[47:40]
++#define RX_AN_6 0x1E // An[55:48]
++#define RX_AN_7 0x1F // An[63:56]
++#define RX_VPRIME_H0_0 0x20 // V'[7:0]
++#define RX_VPRIME_H0_1 0x21 // V'[15:8]
++#define RX_VPRIME_H0_2 0x22 // V'[23:16]
++#define RX_VPRIME_H0_3 0x23 // V'[31:24]
++#define RX_VPRIME_H1_0 0x24 // V'[39:32]
++#define RX_VPRIME_H1_1 0x25 // V'[47:40]
++#define RX_VPRIME_H1_2 0x26 // V'[55:48]
++#define RX_VPRIME_H1_3 0x27 // V'[63:56]
++#define RX_VPRIME_H2_0 0x28 // V'[71:64]
++#define RX_VPRIME_H2_1 0x29 // V'[79:72]
++#define RX_VPRIME_H2_2 0x2A // V'[87:80]
++#define RX_VPRIME_H2_3 0x2B // V'[95:88]
++#define RX_VPRIME_H3_0 0x2C // V'[103:96]
++#define RX_VPRIME_H3_1 0x2D // V'[111:104]
++#define RX_VPRIME_H3_2 0x2E // V'[119:112]
++#define RX_VPRIME_H3_3 0x2F // V'[127:120]
++#define RX_VPRIME_H4_0 0x30 // V'[135:128]
++#define RX_VPRIME_H4_1 0x31 // V'[143:136]
++#define RX_VPRIME_H4_2 0x32 // V'[151:144]
++#define RX_VPRIME_H4_3 0x33 // V'[159:152]
++#define RX_BCAPS 0x40 // [7] RSVD, [6] Repeater, [5] Ready, [4] Fast, [3:2] RSVD, [1] Features, [0] Fast_reauthentication
++#define RX_BSTATUS_0 0x41 // [7] MAX_DEVS_EXCEEDED, [6:0] DEVICE_COUNT
++#define RX_BSTATUS_1 0x42 // [15:14] RSVD, [13] HDMI_RSVD, [12] HDMI_MODE, [11] MAX_CASCADE_EXCEEDED, [10:8] DEPTH
++#define RX_KSV_FIFO 0x43
++
++typedef enum _mdfld_hdcp_rx_data_type_enum {
++ RX_TYPE_BKSV_DATA = 0,
++ RX_TYPE_BCAPS = 1,
++ RX_TYPE_BSTATUS = 2,
++ RX_TYPE_REPEATER_KSV_LIST = 3,
++ RX_TYPE_REPEATER_PRIME_V = 4,
++ RX_TYPE_RI_DATA = 5,
++ RX_TYPE_BINFO = 6
++} mdfld_hdcp_rx_data_type_en;
++
++typedef struct _hdcp_bstatus {
++ unsigned device_count : 7; // [6:0] Total Number of Receiver Devices (excluding repeaters) attached
++ unsigned max_devices_exceeded : 1; // [7] Topology Error. Greater than 127 devices attached
++ unsigned repeater_depth : 3; // [10:8] Repeater depth
++ unsigned max_cascade_exceeded : 1; // [11] Topology Error. Greater than 7 levels of Repeater attached
++ unsigned reserved : 20; // [31:12] Reserved for future expansion
++} hdcp_bstatus_t;
++
++//
++// BCAPS
++//
++typedef union _hdcp_rx_bcaps
++{
++ uint8_t value;
++ struct {
++ uint8_t fast_reauthantication : 1; // bit 0
++ uint8_t b1_1features_supported : 1; // bit 1
++ uint8_t reserved : 2; // bi 3:2
++ uint8_t fast_transfer : 1; // bit 4 ( TRUE = transfer speed at 400 kHz FALSE = transfer speed at 100 Khz)
++ uint8_t ksv_fifo_ready : 1; // bit 5
++ uint8_t is_reapeater : 1; // bit 6
++ uint8_t reserved1 : 1; // bit 7
++ };
++}hdcp_rx_bcaps_t;
++
++//
++// BSTATUS
++//
++typedef union _hdcp_rx_bstatus
++{
++ uint16_t value;
++ struct {
++ uint16_t device_count : 7; // bit 6:0
++ uint16_t max_devs_exceeded : 1; // bit 7
++ uint16_t depth : 3; // bit 10:8
++ uint16_t max_cascade_exceeded : 1; // bit 11
++ uint16_t rx_in_hdmi_mode : 1; // bit 12
++ uint16_t rserved : 3; // bit 15:13
++ };
++}hdcp_rx_bstatus_t;
++
++// HDCP authentication step
++typedef enum _hdcp_authentication_step {
++ HDCP_AUTHENTICATION_STEP_NONE = 0,
++ HDCP_AUTHENTICATION_STEP_1 = 1,
++ HDCP_AUTHENTICATION_STEP_2 = 2,
++} hdcp_authentication_step_t;
++
++// KSV_GET
++typedef struct _aksv_get {
++ uint8_t uc_aksv[CP_HDCP_KEY_SELECTION_VECTOR_SIZE];
++} aksv_get_t;
++
++int hdcp_init(void **data, int drmFD, int output_id);
++int hdcp_uninit(void * data);
++#endif /* MDFLD_HDCP_H */
+--- /dev/null
++++ b/drivers/staging/mrst/drv/mdfld_hdcp_if.h
+@@ -0,0 +1,77 @@
++/*
++ * Copyright (c) 2010, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * jim liu <jim.liu@intel.com>
++ */
++
++
++#ifndef MDFLD_HDCP_IF_H
++#define MDFLD_HDCP_IF_H
++
++// Constants
++#define CP_HDCP_KEY_SELECTION_VECTOR_SIZE 5
++
++// Protection level (HDCP)
++typedef enum _cp_protection_level_hdcp {
++ CP_PROTECTION_LEVEL_HDCP_OFF = 0,
++ CP_PROTECTION_LEVEL_HDCP_ON = 1,
++} cp_protection_level_hdcp_t;
++
++// Protection type
++typedef enum _cp_protection_type {
++ CP_PROTECTION_TYPE_UNKNOWN = 0x80000000,
++ CP_PROTECTION_TYPE_NONE = 0x00000000,
++ CP_PROTECTION_TYPE_HDCP = 0x00000001,
++ CP_PROTECTION_TYPE_MASK = 0x80000001,
++} cp_protection_type_t;
++
++typedef enum _cp_status {
++ STATUS_UNSUCCESSFUL = 0x80000000,
++ STATUS_SUCCESS = 0x00000000,
++ STATUS_NOT_SUPPORTED = 0x00000001,
++ STATUS_INVALID_DEVICE_REQUEST = 0x00000002,
++ STATUS_REVOKED_HDCP_DEVICE_ATTACHED = 0x00000003,
++ STATUS_DATA_ERROR = 0x00000004,
++ STATUS_PENDING = 0x00000005,
++ STATUS_INVALID_PARAMETER = 0x00000006,
++} cp_status_t;
++
++// KSV
++typedef struct _ksv_t {
++ uint8_t ab_ksv[CP_HDCP_KEY_SELECTION_VECTOR_SIZE];
++} ksv_t;
++
++// HDCP
++typedef struct _hdcp_data {
++ uint32_t ksv_list_length; // Length of the revoked KSV list (set)
++ //ksv_t aksv; // KSV of attached device
++ //ksv_t bksv; // KSV of attached device
++ ksv_t * ksv_list; // List of revoked KSVs (set)
++ int perform_second_step; // True when the second authentication step is requested (get)
++ int is_repeater; // True when a repeater is attached to the connector (get and set)
++} hdcp_data_t;
++
++// CP Parameters
++typedef struct _cp_parameters {
++ uint32_t protect_type_mask; // Protection type mask (get and set)
++ uint32_t level; // Protection level (get and set)
++ hdcp_data_t hdcp; // HDCP specific data (get and set)
++} cp_parameters_t;
++
++extern uint32_t hdcp_set_cp_data(cp_parameters_t* cp);
++extern uint32_t hdcp_get_cp_data(cp_parameters_t* cp);
++#endif /* MDFLD_HDCP_IF_H */
+--- /dev/null
++++ b/drivers/staging/mrst/drv/mdfld_hdcp_reg.h
+@@ -0,0 +1,231 @@
++/*
++ * Copyright (c) 2010, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * jim liu <jim.liu@intel.com>
++ */
++
++#ifndef MDFLD_HDCP_REG_H
++#define MDFLD_HDCP_REG_H
++
++/* Integrated HDMI specific registers */
++
++#define RESERVED2(x,y) x##y
++#define RESERVED1(x,y) RESERVED2(x,y)
++#define RANDOMNUMBER __LINE__ // __COUNTER__
++#define UNIQUENAME(ValueName) RESERVED1(ValueName, RANDOMNUMBER)
++
++/* TBD: This may change when tested on actual system */
++#define HDCP_MAX_RI_QUERY_COUNT 4
++#define HDCP_MAX_NUM_DWORDS 4 //128 bits
++#define HDCP_MAX_RANDOMNUM_LENGTH 2 //In DWORD => 64 bits
++#define HDCP_MAX_RETRY_DISABLE 2
++/*All sizes are defined in bytes */
++#define HDCP_SIZEOF_AKSV 8
++#define HDCP_SIZEOF_BKSV 8
++#define HDCP_SIZEOF_AN 5
++#define HDCP_SIZEOF_RI 2
++#define HDCP_ENCRYPTED_KEY_SIZE 12 //Akeys, IV and MAC
++#define HDCP_NUM_AKEYS 40
++#define HDCP_NEXT_RI_FRAME 126
++#define HDCP_MAX_RANDOM_NUM_SIZE 4//in dwords
++
++#define HDCP_CONVERT_BIG_ENDIAN(x) (((x&0x000000ff)<<24)|\
++ ((x&0x0000ff00)<<8)|\
++ ((x&0x00ff0000)>>8)|\
++ ((x&0xff000000)>>24))
++
++#define HDCP_MAX_AN_RETRY 100
++
++#define HDCP_AN_LO_INDEX 0
++#define HDCP_AN_HI_INDEX 1
++
++uint32_t hdcp_invalid_an_list[6][2] =
++{
++ {0x881cf9e4, 0x38155bf4},
++ {0xb0e81640, 0xb5cac2ec},
++ {0x514fa3e7, 0x5bbb3806},
++ {0xd1b4923a, 0x6172afbb},
++ {0x0c16fd1c, 0x1b28baf5},
++ {0x00000000, 0x00000000}
++};
++
++/* HDMI HDCP Regs */
++/* HDCP config */
++
++typedef enum _mdfld_hdcp_config_enum {
++ HDCP_Off = 0,
++ HDCP_CAPTURE_AN = 1,
++ HDCP_DECRYPT_KEYS = 2,
++ HDCP_AUTHENTICATE_AND_ENCRYPT = 3,
++ HDCP_UNIQUE_MCH_ID = 5,
++ HDCP_ENCRYPT_KEYS = 6,
++ HDCP_CYPHER_CHECK_MODE = 7
++} mdfld_hdcp_config_en;
++
++#define MDFLD_HDCP_CONFIG_REG 0x61400
++ #define MDFLD_HDCP_CONFIG_PRESERVED_BITS BITRANGE(3,31)
++typedef union _mdfld_hdcp_config {
++ uint32_t value;
++
++ struct{
++ uint32_t hdcp_config : 3; //bit 2:0; uses HDCP_CONFIGURATION_EN
++ uint32_t UNIQUENAME(Reserved): 29;//bit 3:31
++ };
++} mdfld_hdcp_config_t;
++
++
++/* HDCP_STATUS */
++
++#define MDFLD_HDCP_STATUS_REG 0x61448
++ #define MDFLD_HDCP_STATUS_PRESERVED_BITS BITRANGE(24,31)
++typedef union _mdfld_hdcp_status {
++ uint32_t value;
++
++ struct{
++ uint32_t ainfo : 8; //Bit 7:0
++ uint32_t frame_count : 8; //Bit 15:8
++ uint32_t cipher_hdcp_status : 1; //Bit 16
++ uint32_t cipher_an_status : 1; //Bit 17
++ uint32_t cipher_ri_ready_status : 1; //Bit 18
++ uint32_t cipher_ri_match_status : 1; //Bit 19
++ uint32_t cipher_encrypting_status : 1; //Bit 20
++ uint32_t cipher_ready_for_encryption : 1; //Bit 21
++ uint32_t cipher_mch_id_ready : 1; //Bit 22
++ uint32_t cipher_mac_status : 1; //Bit 23
++ uint32_t UNIQUENAME(Reserved) : 8; //Bit 31:24
++ };
++} mdfld_hdcp_status_t;
++
++
++/* HDCP_RI */
++#define MDFLD_HDCP_RECEIVER_RI_REG 0x61418
++ #define MDFLD_HDCP_RECEIVER_RI_PRESERVED_BITS BITRANGE(16,31)
++typedef union _mdfld_hdcp_receiver_ri {
++ uint32_t value;
++
++ struct{
++ uint32_t ri : 16; //bit 15:0
++ uint32_t UNIQUENAME(Reserved) : 16; //bit 31:16
++ };
++} mdfld_hdcp_receiver_ri_t;
++
++
++/* HDCP_BKSV_HI */
++#define MDFLD_HDCP_BKSV_HI_REG 0x6140C
++ #define MDFLD_HDCP_BKSV_HI_PRESERVED_BITS BITRANGE(8,31)
++typedef union _mdfld_hdcp_bksv_hi {
++ uint32_t value;
++
++ struct{
++ uint32_t bksv_hi : 8; //bit 7:0
++ uint32_t UNIQUENAME(Reserved) : 24; //bit 31:8
++ };
++} mdfld_hdcp_bksv_hi_t;
++
++
++/* HDCP_AKEY_HI */
++#define MDFLD_HDCP_AKEY_HI_REG 0x61424
++ #define MDFLD_HDCP_AKEY_HI_PRESERVED_BITS BITRANGE(20,31)
++typedef union _mdfld_hdcp_akey_hi {
++ uint32_t value;
++
++ struct{
++ uint32_t akey_hi : 20; //bit 7:0
++ uint32_t UNIQUENAME(Reserved) : 12; //bit 31:8
++ };
++} mdfld_hdcp_akey_hi_t;
++
++
++
++/* HDCP_REP: Repeator specific register definitions */
++
++/* Repeater Control register */
++typedef enum _mdfld_hdcp_repeater_status_enum {
++ HDCP_REPEATER_STATUS_IDLE = 0,
++ HDCP_REPEATER_STATUS_BUSY = 1,
++ HDCP_REPEATER_STATUS_RDY_NEXT_DATA = 2,
++ HDCP_REPEATER_STATUS_COMPLETE_NO_MATCH = 4,
++ HDCP_REPEATER_STATUS_COMPLETE_MATCH = 12
++} mdfld_hdcp_repeater_status_en;
++
++typedef enum _mdfld_hdcp_repeater_ctrl_enum {
++ HDCP_REPEATER_CTRL_IDLE = 0,
++ HDCP_REPEATER_32BIT_TEXT_IP = 1,
++ HDCP_REPEATER_COMPLETE_SHA1 = 2,
++ HDCP_REPEATER_24BIT_TEXT_8BIT_MO_IP = 4,
++ HDCP_REPEATER_16BIT_TEXT_16BIT_MO_IP = 5,
++ HDCP_REPEATER_8BIT_TEXT_24BIT_MO_IP = 6,
++ HDCP_REPEATER_32BIT_MO_IP = 7
++} mdfld_hdcp_repeater_ctrl_en;
++
++#define MDFLD_HDCP_REP_REG 0x61444
++ #define MDFLD_HDCP_REP_PRESERVED_BITS BITRANGE(8,31)
++typedef union _mdfld_hdcp_rep {
++ uint32_t value;
++
++ struct{
++ uint32_t repeater_present : 1; //bit 0
++ uint32_t repeater_control : 3; //bit 3:1
++ uint32_t UNIQUENAME(Reserved) : 12; //bit 15:4 BUN#: 07ww44#1
++ const uint32_t repeater_status : 4; //bit 19:16
++ uint32_t UNIQUENAME(Reserved) : 12;//bit 31:20
++ };
++} mdfld_hdcp_rep_t;
++
++/* HDCP_BKSV_HI */
++#define MDFLD_HDCP_AKSV_HI_REG 0x61450
++ #define MDFLD_HDCP_AKSV_HI_PRESERVED_BITS BITRANGE(8,31)
++typedef union _mdfld_hdcp_aksv_hi {
++ uint32_t value;
++
++ struct{
++ uint32_t aksv_hi : 8; //bit 7:0
++ uint32_t UNIQUENAME(Reserved) : 24; //bit 31:8
++ };
++} mdfld_hdcp_aksv_hi_t;
++
++typedef union _mdfld_hdcp_aksv {
++ uint8_t byte[8];
++
++ struct {
++ uint32_t low;
++ mdfld_hdcp_aksv_hi_t hi;
++ } aksv;
++} mdfld_hdcp_aksv_t;
++
++/* These holds part of the hash result from the receiver used for repeaters */
++#define MDFLD_HDCP_VPRIME_H0 0x6142C
++#define MDFLD_HDCP_VPRIME_H1 0x61430
++#define MDFLD_HDCP_VPRIME_H2 0x61434
++#define MDFLD_HDCP_VPRIME_H3 0x61438
++#define MDFLD_HDCP_VPRIME_H4 0x6143C
++
++#define MDFLD_HDCP_SHA1_IN 0x61440
++
++
++
++/* Define of registers that don't need register definitions */
++#define MDFLD_HDCP_INIT_REG 0x61404
++#define MDFLD_HDCP_AN_LOW_REG 0x61410
++#define MDFLD_HDCP_AN_HI_REG 0x61414
++#define MDFLD_HDCP_BKSV_LOW_REG 0x61408
++#define MDFLD_HDCP_AKSV_LOW_REG 0x61454
++/* Akey registers */
++#define MDFLD_HDCP_AKEY_LO_REG 0x6141C
++#define MDFLD_HDCP_AKEY_MED_REG 0x61420
++
++#endif /* MDFLD_HDCP_REG_H */
+--- /dev/null
++++ b/drivers/staging/mrst/drv/mdfld_hdmi_audio.c
+@@ -0,0 +1,199 @@
++/*
++ * Copyright (c) 2010, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * jim liu <jim.liu@intel.com>
++ */
++
++
++#include "mdfld_hdmi_audio_if.h"
++
++
++/*
++ * Audio register range 0x69000 to 0x69117
++ */
++
++#define IS_HDMI_AUDIO_REG(reg) ((reg >= 0x69000) && (reg < 0x69118))
++
++/**
++ * mdfld_hdmi_audio_write:
++ * used to write into display controller HDMI audio registers.
++ *
++ */
++static int mdfld_hdmi_audio_write (uint32_t reg, uint32_t val)
++{
++ struct drm_device *dev = hdmi_priv->dev;
++ int ret = 0;
++
++ if (IS_HDMI_AUDIO_REG(reg)) {
++ REG_WRITE(reg, val);
++ } else {
++ ret = -EINVAL;
++ }
++
++ return ret;
++}
++
++/**
++ * mdfld_hdmi_audio_read:
++ * used to get the register value read from display controller HDMI audio registers.
++ *
++ */
++static int mdfld_hdmi_audio_read (uint32_t reg, uint32_t *val)
++{
++ struct drm_device *dev = hdmi_priv->dev;
++ int ret = 0;
++
++ if (IS_HDMI_AUDIO_REG(reg)) {
++ *val = REG_READ(reg);
++ } else {
++ ret = -EINVAL;
++ }
++
++ return ret;
++}
++
++/**
++ * mdfld_hdmi_audio_rmw:
++ * used to update the masked bits in display controller HDMI audio registers .
++ *
++ */
++static int mdfld_hdmi_audio_rmw (uint32_t reg, uint32_t val, uint32_t mask)
++{
++ struct drm_device *dev = hdmi_priv->dev;
++ int ret = 0;
++ uint32_t val_tmp = 0;
++
++ if (IS_HDMI_AUDIO_REG(reg)) {
++ val_tmp = (val & mask) | (REG_READ(reg) & ~mask);
++ REG_WRITE(reg, val_tmp);
++ } else {
++ ret = -EINVAL;
++ }
++
++ return ret;
++}
++
++/**
++ * mdfld_hdmi_audio_get_caps:
++ * used to return the HDMI audio capabilities.
++ * e.g. resolution, frame rate.
++ */
++static int mdfld_hdmi_audio_get_caps (enum had_caps_list get_element, void *capabilities) {
++ int ret = 0;
++
++ PSB_DEBUG_ENTRY("\n");
++
++ switch (get_element) {
++ case HAD_GET_ELD:
++ memcpy(capabilities, &(hdmi_priv->eeld),sizeof(hdmi_eeld_t));
++ break;
++ default:
++ break;
++ }
++
++ return ret;
++}
++
++/**
++ * mdfld_hdmi_audio_set_caps:
++ * used to set the HDMI audio capabilities.
++ * e.g. Audio INT.
++ */
++static int mdfld_hdmi_audio_set_caps (enum had_caps_list set_element, void *capabilties) {
++ struct drm_device *dev = hdmi_priv->dev;
++ struct drm_psb_private *dev_priv = (struct drm_psb_private *) dev->dev_private;
++ int ret = 0;
++ u32 hdmib;
++ u32 int_masks = 0;
++
++ PSB_DEBUG_ENTRY("\n");
++
++ switch (set_element) {
++ case HAD_SET_ENABLE_AUDIO:
++ hdmib = REG_READ(hdmi_priv->hdmib_reg);
++
++ if ((hdmib & HDMIB_PORT_EN) && hdmi_priv->has_hdmi_sink)
++ hdmib |= HDMI_AUDIO_ENABLE;
++
++ REG_WRITE(hdmi_priv->hdmib_reg, hdmib);
++ REG_READ(hdmi_priv->hdmib_reg);
++ break;
++ case HAD_SET_DISABLE_AUDIO:
++ hdmib = REG_READ(hdmi_priv->hdmib_reg) & ~HDMI_AUDIO_ENABLE;
++ REG_WRITE(hdmi_priv->hdmib_reg, hdmib);
++ REG_READ(hdmi_priv->hdmib_reg);
++ break;
++ case HAD_SET_ENABLE_AUDIO_INT:
++ if (*((u32*)capabilties) & HDMI_AUDIO_UNDERRUN)
++ int_masks |= PIPE_HDMI_AUDIO_UNDERRUN;
++
++ if (*((u32*)capabilties) & HDMI_AUDIO_BUFFER_DONE)
++ int_masks |= PIPE_HDMI_AUDIO_BUFFER_DONE;
++
++ if (dev_priv->hdmi_audio_interrupt_mask != int_masks) {
++ dev_priv->hdmi_audio_interrupt_mask |= int_masks;
++ mdfld_irq_enable_hdmi_audio(dev);
++ }
++
++ break;
++ case HAD_SET_DISABLE_AUDIO_INT:
++ if (*((u32*)capabilties) & HDMI_AUDIO_UNDERRUN)
++ int_masks |= PIPE_HDMI_AUDIO_UNDERRUN;
++
++ if (*((u32*)capabilties) & HDMI_AUDIO_BUFFER_DONE)
++ int_masks |= PIPE_HDMI_AUDIO_BUFFER_DONE;
++
++ if (dev_priv->hdmi_audio_interrupt_mask & int_masks) {
++ dev_priv->hdmi_audio_interrupt_mask &= ~int_masks;
++
++ if (dev_priv->hdmi_audio_interrupt_mask)
++ mdfld_irq_enable_hdmi_audio(dev);
++ else
++ mdfld_irq_disable_hdmi_audio(dev);
++ }
++
++ break;
++ default:
++ break;
++ }
++
++ return ret;
++}
++
++static struct hdmi_audio_registers_ops mdfld_hdmi_audio_reg_ops = {
++ .hdmi_audio_read_register = mdfld_hdmi_audio_read,
++ .hdmi_audio_write_register = mdfld_hdmi_audio_write,
++ .hdmi_audio_read_modify = mdfld_hdmi_audio_rmw,
++};
++
++static struct hdmi_audio_query_set_ops mdfld_hdmi_audio_get_set_ops = {
++ .hdmi_audio_get_caps = mdfld_hdmi_audio_get_caps,
++ .hdmi_audio_set_caps = mdfld_hdmi_audio_set_caps,
++};
++
++int intel_hdmi_audio_query_capabilities (had_event_call_back audio_callbacks, struct hdmi_audio_registers_ops **reg_ops,struct hdmi_audio_query_set_ops **query_ops) {
++ int ret = 0;
++
++ *reg_ops = &mdfld_hdmi_audio_reg_ops;
++ *query_ops = &mdfld_hdmi_audio_get_set_ops;
++ hdmi_priv->mdfld_had_event_callbacks = audio_callbacks;
++
++ return ret;
++
++
++}
++EXPORT_SYMBOL(intel_hdmi_audio_query_capabilities);
+--- /dev/null
++++ b/drivers/staging/mrst/drv/mdfld_hdmi_audio_if.h
+@@ -0,0 +1,68 @@
++/*
++ * Copyright (c) 2010, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * jim liu <jim.liu@intel.com>
++ */
++
++
++#ifndef MDFLD_HDMI_AUDIO_IF_H
++#define MDFLD_HDMI_AUDIO_IF_H
++
++/* HDMI AUDIO INTERRUPT TYPE */
++#define HDMI_AUDIO_UNDERRUN (1UL<<0)
++#define HDMI_AUDIO_BUFFER_DONE (1UL<<1)
++
++enum had_caps_list {
++ HAD_GET_ELD = 1,
++ HAD_GET_SAMPLING_FREQ,
++ HAD_GET_DISPLAY_RATE,
++ HAD_GET_HDCP_STATUS,
++ HAD_GET_AUDIO_STATUS,
++ HAD_SET_ENABLE_AUDIO,
++ HAD_SET_DISABLE_AUDIO,
++ HAD_SET_ENABLE_AUDIO_INT,
++ HAD_SET_DISABLE_AUDIO_INT,
++ OTHERS_TBD,
++};
++
++enum had_event_type {
++ HAD_EVENT_HOT_PLUG = 1,
++ HAD_EVENT_HOT_UNPLUG,
++ HAD_EVENT_MODE_CHANGING,
++ HAD_EVENT_PM_CHANGING,
++ HAD_EVENT_AUDIO_BUFFER_DONE,
++ HAD_EVENT_AUDIO_BUFFER_UNDERRUN,
++};
++
++/**
++ * HDMI Display Controller Audio Interface
++ *
++ */
++typedef int (*had_event_call_back)(enum had_event_type event_type, void * ctxt_info);
++
++struct hdmi_audio_registers_ops{
++ int (*hdmi_audio_read_register)(uint32_t reg_addr, uint32_t *data);
++ int (*hdmi_audio_write_register) (uint32_t reg_addr, uint32_t data);
++ int (*hdmi_audio_read_modify)(uint32_t reg_addr, uint32_t data, uint32_t mask);
++};
++
++struct hdmi_audio_query_set_ops{
++ int (*hdmi_audio_get_caps)(enum had_caps_list query_element , void *capabilties);
++ int (*hdmi_audio_set_caps)(enum had_caps_list set_element , void *capabilties);
++};
++extern int intel_hdmi_audio_query_capabilities (had_event_call_back audio_callbacks, struct hdmi_audio_registers_ops **reg_ops,struct hdmi_audio_query_set_ops **query_ops);
++#endif /* MDFLD_HDMI_AUDIO_IF_H */
+--- /dev/null
++++ b/drivers/staging/mrst/drv/mdfld_intel_hdcp.c
+@@ -0,0 +1,1350 @@
++/*
++ * Copyright (c) 2010, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * jim liu <jim.liu@intel.com>
++ */
++
++
++#ifdef MDFLD_HDCP
++
++#include "mdfld_hdcp_if.h"
++#include "mdfld_hdcp_reg.h"
++#include "mdfld_hdcp.h"
++
++/*
++ *
++ */
++static struct mid_intel_hdmi_priv *hdmi_priv;
++
++/*
++ * IsValidBKSV:
++ * Checks if the BKSV is valid or not.
++ * A valid BKSV is one that contains 20 0's and 20 1's
++ *
++ */
++static int hdcp_is_valid_bksv(uint8_t* buffer,uint32_t size)
++{
++ uint8_t count = 0;
++ int i =0;
++ uint8_t bksv = 0;
++ uint8_t bit = 0;
++ int ret = 0;
++
++ if(buffer == NULL || size != CP_HDCP_KEY_SELECTION_VECTOR_SIZE)
++ return ret;
++
++ while(i<CP_HDCP_KEY_SELECTION_VECTOR_SIZE)
++ {
++ bksv=buffer[i];
++ while(bksv !=0)
++ {
++ bit = (bksv) & 0x01;
++ if(bit)
++ count++;
++ bksv = bksv >> 1;
++ }
++ i++;
++ }
++
++ if(count == 20)
++ ret = 1;
++
++ return ret;
++}
++
++/*
++ * Gets the current status of HDCP
++ *
++ */
++static int hdcp_is_enabled(void)
++{
++ struct drm_device *dev = hdmi_priv->dev;
++ mdfld_hdcp_status_t hdcp_status = {0};
++ int ret = 0;
++
++ if(hdmi_priv->is_hdcp_supported)
++ {
++ hdcp_status.value = REG_READ(MDFLD_HDCP_STATUS_REG);
++ ret = hdcp_status.cipher_hdcp_status;
++ }
++
++ return ret;
++}
++
++#define HDCP_PRIMARY_I2C_ADDR 0x74
++/*
++ *
++ * Read HDCP device data from i2c link
++ *
++ */
++static int read_hdcp_port_data(uint8_t offset, uint8_t* buffer, int size)
++{
++ struct i2c_msg msgs[] = {
++ {
++ .addr = HDCP_PRIMARY_I2C_ADDR,
++ .flags = 0,
++ .len = 1,
++ .buf = &offset,
++ }, {
++ .addr = HDCP_PRIMARY_I2C_ADDR,
++ .flags = I2C_M_RD,
++ .len = size,
++ .buf = buffer,
++ }
++ };
++
++ if (i2c_transfer(hdmi_priv->hdmi_i2c_adapter, msgs, 2) == 2)
++ return 1;
++
++ return 0;
++}
++
++/* Read device status from i2c link */
++static int read_hdcp_port(uint32_t read_request_type, uint8_t *buffer, int size)
++{
++ int more_blocks_to_read = 0;
++ uint32_t block_offset=0;
++ int ret = 1;
++ uint8_t offset = 0;
++
++ while(1)
++ {
++ switch(read_request_type)
++ {
++ case RX_TYPE_BSTATUS:
++ offset = RX_BSTATUS_0;
++ break;
++ case RX_TYPE_RI_DATA:
++ offset = RX_RI_HIGH;
++ break;
++ case RX_TYPE_BCAPS:
++ offset = RX_BCAPS;
++ break;
++ case RX_TYPE_REPEATER_KSV_LIST:
++ offset = RX_KSV_FIFO;
++ break;
++ case RX_TYPE_BKSV_DATA:
++ offset = RX_BKSV_0;
++ break;
++ case RX_TYPE_REPEATER_PRIME_V:
++ {
++ offset = block_offset + RX_VPRIME_H0_0;
++ buffer += block_offset;
++ size = 4;
++ if(offset < RX_VPRIME_H4_0)
++ {
++ more_blocks_to_read = 1;
++ block_offset += 4;
++ }
++ }
++ break;
++ default:
++ ret = 0;
++ break;
++ }
++
++ if(ret)
++ {
++ if(!read_hdcp_port_data(offset, buffer, size))
++ {
++ //I2C access failed
++ ret = 0;
++ break;
++ }
++
++ //Check whether more blocks are to be read
++ if(!more_blocks_to_read)
++ {
++ break;
++ }
++ else
++ {
++ more_blocks_to_read = 0;
++ }
++ }
++ else
++ {
++ break;
++ }
++ }
++
++ return ret;
++}
++
++/* write to HDCP device through i2c link */
++static int write_hdcp_port(uint8_t offset, uint8_t *buffer, int size)
++{
++ struct i2c_msg msgs[] = {
++ {
++ .addr = HDCP_PRIMARY_I2C_ADDR,
++ .flags = 0,
++ .len = 1,
++ .buf = &offset,
++ }, {
++ .addr = HDCP_PRIMARY_I2C_ADDR,
++ .flags = 0,
++ .len = size,
++ .buf = buffer,
++ }
++ };
++
++ if (i2c_transfer(hdmi_priv->hdmi_i2c_adapter, msgs, 2) == 2)
++ return 1;
++
++ return 0;
++}
++
++/*
++ *
++ * UpdateRepeaterState : Enables/Disables Repeater
++ *
++ */
++static int hdcp_update_repeater_state(int enable)
++{
++ struct drm_device *dev = hdmi_priv->dev;
++ mdfld_hdcp_rep_t hdcp_rep_ctrl_reg;
++ hdcp_rep_ctrl_reg.value = REG_READ(MDFLD_HDCP_REP_REG);
++ hdcp_rep_ctrl_reg.repeater_present = enable;
++
++ REG_WRITE(MDFLD_HDCP_REP_REG,hdcp_rep_ctrl_reg.value);
++ return 1;
++
++}
++
++/*
++ * EnableHDCP : Enables/Disables HDCP
++ *
++ */
++static int hdcp_enable(int enable)
++{
++ struct drm_device *dev = hdmi_priv->dev;
++ mdfld_hdcp_config_t config;
++ mdfld_hdcp_receiver_ri_t receivers_ri;
++ mdfld_hdcp_status_t status;
++ mdfld_hdcp_rep_t hdcp_repeater;
++ uint32_t max_retry = 0;
++ sqword_t hw_an;
++ sqword_t hw_aksv;
++ sqword_t hw_bksv;
++ uint8_t bcaps=0;
++ uint32_t rx_ri = 0;
++ int ret = 0;
++
++ if(enable == 0)
++ {
++ config.value = REG_READ(MDFLD_HDCP_CONFIG_REG);
++ config.hdcp_config = HDCP_Off;
++ REG_WRITE(MDFLD_HDCP_CONFIG_REG,config.value);
++
++ //Check the status of cipher till it get's turned off
++ // Bug #2808007-Delay required is one frame period.
++ // waiting for 2 VBlanks provides this amount of delay
++ max_retry = 0;
++ status.value = REG_READ(MDFLD_HDCP_STATUS_REG);
++ while((status.cipher_hdcp_status || status.cipher_encrypting_status) && max_retry< HDCP_MAX_RETRY_DISABLE)
++ {
++ psb_intel_wait_for_vblank(dev);
++ status.value = REG_READ(MDFLD_HDCP_STATUS_REG);
++ max_retry++;
++ }
++
++ // Check for cipher time out
++ if(status.cipher_hdcp_status || status.cipher_encrypting_status)
++ {
++ ret = 0;
++ return ret;
++ }
++
++ // clear the repeater specfic bits and set the repeater to idle
++ hdcp_repeater.value = REG_READ(MDFLD_HDCP_REP_REG);
++ hdcp_repeater.repeater_present = 0;
++ hdcp_repeater.repeater_control = HDCP_REPEATER_CTRL_IDLE;
++ REG_WRITE(MDFLD_HDCP_REP_REG, hdcp_repeater.value);
++
++ max_retry = HDCP_MAX_RETRY_STATUS;//tbd: not yet finalized
++ while(max_retry--)
++ {
++ hdcp_repeater.value = REG_READ(MDFLD_HDCP_REP_REG);
++
++ if(hdcp_repeater.repeater_status == HDCP_REPEATER_STATUS_IDLE)
++ {
++ ret = 1;
++ break;
++ }
++ }
++
++ // Check for cipher time out
++ if(max_retry == 0)
++ {
++ ret = 0;
++ return 0;
++ }
++
++ // Clear the Ri' register
++ // This is a HW issue because of which the Ri' status bit in HDCP_STATUS
++ // register doesn't get cleared.
++ // refer ro https://vthsd.fm.intel.com/hsd/cantiga/sighting/default.aspx?sighting_id=304464
++ // for details
++ REG_WRITE(MDFLD_HDCP_RECEIVER_RI_REG, 0);
++
++ //Disable the port on which HDCP is enabled
++ REG_WRITE(hdmi_priv->hdmib_reg, REG_READ(hdmi_priv->hdmib_reg) & ~HDMIB_HDCP_PORT);
++ }
++ else
++ {
++ //Generate An
++ config.value = REG_READ(MDFLD_HDCP_CONFIG_REG);
++
++ if (config.hdcp_config != HDCP_Off) {
++ config.hdcp_config = HDCP_Off;
++ REG_WRITE(MDFLD_HDCP_CONFIG_REG, config.value);
++ }
++
++ /* used the jiffies as a random number. */
++ REG_WRITE(MDFLD_HDCP_INIT_REG, (uint32_t) jiffies);
++ REG_WRITE(MDFLD_HDCP_INIT_REG, (uint32_t) (jiffies >> 1));
++ udelay (10);
++
++ config.hdcp_config = HDCP_CAPTURE_AN;
++ REG_WRITE(MDFLD_HDCP_CONFIG_REG, config.value);
++
++ //check the status of cipher before reading an
++ max_retry = HDCP_MAX_RETRY_STATUS;//tbd: not yet finalized
++ while(max_retry--)
++ {
++ status.value = REG_READ(MDFLD_HDCP_STATUS_REG);
++ if(status.cipher_an_status)
++ {
++ ret = 1;
++ break;
++ }
++ }
++
++ config.hdcp_config = HDCP_Off;
++ REG_WRITE(MDFLD_HDCP_CONFIG_REG, config.value);
++
++ if(max_retry == 0)
++ return 0;//Cipher timeout, was not able to generate An :(
++
++ //Read An
++ hw_an.u.low_part = REG_READ(MDFLD_HDCP_AN_LOW_REG);
++ hw_an.u.high_part = REG_READ(MDFLD_HDCP_AN_HI_REG);
++
++ hw_aksv.u.low_part = REG_READ(MDFLD_HDCP_AKSV_LOW_REG);
++ hw_aksv.u.high_part = REG_READ(MDFLD_HDCP_AKSV_HI_REG);
++ //stHdcpParams.hwAksv.MajorPart_Low = 0x0361f714;//test data
++ //stHdcpParams.hwAksv.MajorPart_High = 0xb7;
++
++ //write An
++ ret = write_hdcp_port(RX_AN_0, hw_an.byte, 8);
++ if(!ret)
++ return 0;
++
++ //write Aksv
++ ret = write_hdcp_port(RX_AKSV_0, hw_aksv.byte, 5);
++ if(!ret)
++ return 0;
++
++ //Read the Bksv from receiver
++ ret =read_hdcp_port(RX_TYPE_BKSV_DATA, &hw_bksv.byte[0], 5);
++ if(ret)
++ {
++ // Validate BKSV
++ ret = hdcp_is_valid_bksv(&hw_bksv.byte[0], 5);
++ }
++
++ if(!ret)
++ return 0;
++
++ //read the BCaps
++ ret = read_hdcp_port(RX_TYPE_BCAPS, &bcaps, 1);
++ if(!ret)
++ return 0;
++
++ // set repeater bit if receiver connected is a repeater
++ if(bcaps & BIT6)
++ {
++ hdcp_update_repeater_state(1);
++ }
++
++ //Write the BKsv into the encoder
++ REG_WRITE(MDFLD_HDCP_BKSV_LOW_REG, hw_bksv.u.low_part);
++ REG_WRITE(MDFLD_HDCP_BKSV_HI_REG, hw_bksv.u.high_part);
++
++
++ //enable HDCP on this port
++ REG_WRITE(hdmi_priv->hdmib_reg, REG_READ(hdmi_priv->hdmib_reg) | HDMIB_HDCP_PORT);
++
++ //TBD :Check the bStatus, for repeater and set HDCP_REP[1]
++ //Set HDCP_CONFIG to 011 = Authenticate and encrypt
++ config.hdcp_config = HDCP_AUTHENTICATE_AND_ENCRYPT;
++ REG_WRITE(MDFLD_HDCP_CONFIG_REG, config.value);
++
++
++ //At this point of time the Km is created
++
++ //Wait for Ri ready
++ max_retry = HDCP_MAX_RETRY_STATUS;//TBD: Not yet finalized
++ while(max_retry--)
++ {
++ status.value = REG_READ(MDFLD_HDCP_STATUS_REG);
++ if(status.cipher_ri_ready_status)
++ break;
++ }
++
++ if(max_retry == 0)
++ return 0;//Cipher timeout, was not able to generate An :(
++
++ //Compare the R0 and Ri
++ //Read the Ri' of reciever
++ ret = read_hdcp_port(RX_TYPE_RI_DATA, (uint8_t*)&rx_ri, 2);
++ if(!ret)
++ return 0;
++
++ //TBD:Have some delay before reading the Ri'
++ //Right now using 100 ms, as per the HDCP spec(Refer HDCP SAS for details)
++ mdelay(HDCP_100MS_DELAY);
++
++ //update the HDCP_Ri' register and read the status reg for confrmation
++ receivers_ri.value = REG_READ(MDFLD_HDCP_RECEIVER_RI_REG);
++ receivers_ri.ri = rx_ri;
++ REG_WRITE(MDFLD_HDCP_RECEIVER_RI_REG, receivers_ri.value);
++
++ status.value = REG_READ(MDFLD_HDCP_STATUS_REG);
++
++ //SoftbiosDebugMessage(DBG_CRITICAL,"R Prime = %x\n",dwRxRi);
++ //SoftbiosDebugMessage(DBG_CRITICAL,"HDCP_STATUS = %x\n",stStatus.value);
++ ret = status.cipher_ri_match_status;
++ /*if(GEN4INTHDCPCONTROLLER_HasInterruptOccured(pThis,ePort) == TRUE)
++ {
++ bRet = 0;
++ }*/
++ }
++
++ if(!ret)
++ {
++ //TODO: SoftbiosDebugMessage(DBG_CRITICAL," EnableHDCP failed \n");
++ }
++
++ return ret;
++}
++
++/*
++To obtain receiver specific data. The request type
++is from mdfld_hdcp_rx_data_type_en
++*/
++static int hdcp_get_receiver_data(uint8_t* buffer, uint32_t size, uint32_t rx_data_type)
++{
++ int ret = 0;
++
++ if(buffer)
++ {
++ memset(buffer, 0, size);
++ //Get the Data from reciever
++ ret = read_hdcp_port(rx_data_type, buffer, size);
++ }
++
++ // Validate BKSV and Check if its Valid.
++ if(RX_TYPE_BKSV_DATA == rx_data_type)
++ {
++ ret = hdcp_is_valid_bksv(buffer, size);
++ }
++
++ return ret;
++}
++
++/*
++ *
++ * WaitForNextDataReady : Function Waits for enryption ready
++ *
++ */
++static int hdcp_wait_for_next_data_ready(void)
++{
++ struct drm_device *dev = hdmi_priv->dev;
++ mdfld_hdcp_rep_t hdcp_rep_reg;
++ uint32_t i = 0;
++ int ret = 0;
++
++ for(i=0;i<HDCP_MAX_RETRY_STATUS;i++)
++ {
++ hdcp_rep_reg.value = REG_READ(MDFLD_HDCP_REP_REG);
++
++ if(HDCP_REPEATER_STATUS_RDY_NEXT_DATA == hdcp_rep_reg.repeater_status)
++ {
++ ret = 1;
++ break;
++ }
++ }
++
++ return ret;
++}
++
++/*
++ *
++ * CompareVPrime : This routine compares the vprime
++ * obtained from receiver with the one generated in
++ * transmitter.
++ *
++ */
++static int hdcp_compare_v_prime(uint32_t* buffer_repeater_v_prime, uint8_t size_in_dword)
++{
++ struct drm_device *dev = hdmi_priv->dev;
++ uint32_t value = 0;
++ uint8_t* buffer = (uint8_t*)buffer_repeater_v_prime;
++ int ret = 0;
++ mdfld_hdcp_rep_t hdcp_rep_ctrl_reg;
++ uint32_t i = 0;
++
++ //TBD: To be implemented as part of repeater implementation
++ //Set the repeater's vprime in GMCH
++
++ if(size_in_dword == KSV_SIZE)
++ {
++ memcpy(&value,buffer,4);
++ REG_WRITE(MDFLD_HDCP_VPRIME_H0,value);
++
++ buffer += 4;
++ memcpy(&value,buffer,4);
++ REG_WRITE(MDFLD_HDCP_VPRIME_H1,value);
++
++ buffer += 4;
++ memcpy(&value,buffer,4);
++ REG_WRITE(MDFLD_HDCP_VPRIME_H2,value);
++
++ buffer += 4;
++ memcpy(&value,buffer,4);
++ REG_WRITE(MDFLD_HDCP_VPRIME_H3,value);
++
++ buffer += 4;
++ memcpy(&value,buffer,4);
++ REG_WRITE(MDFLD_HDCP_VPRIME_H4,value);
++
++ if(!hdcp_wait_for_next_data_ready())
++ return 0;
++
++ // Set HDCP_REP to do the comparison
++ // Start transmitter's V calculation
++ hdcp_rep_ctrl_reg.value = REG_READ(MDFLD_HDCP_REP_REG);
++ hdcp_rep_ctrl_reg.repeater_control = HDCP_REPEATER_COMPLETE_SHA1;
++ REG_WRITE(MDFLD_HDCP_REP_REG, hdcp_rep_ctrl_reg.value);
++
++ for(i=0;i<HDCP_MAX_RETRY_STATUS;i++)
++ {
++ hdcp_rep_ctrl_reg.value = REG_READ(MDFLD_HDCP_REP_REG);
++
++ switch(hdcp_rep_ctrl_reg.repeater_status)
++ {
++ case HDCP_REPEATER_STATUS_COMPLETE_MATCH :
++ ret = 1;
++ break;
++ case HDCP_REPEATER_STATUS_COMPLETE_NO_MATCH :
++ ret = 0;
++ break;
++ case HDCP_REPEATER_STATUS_IDLE :
++ //Should not happen
++ ret = 0;
++ break;
++ //default: Not needed
++ }
++
++ if(hdcp_rep_ctrl_reg.repeater_status != HDCP_REPEATER_STATUS_BUSY)
++ {
++ break;
++ }
++ }
++
++ }
++ else
++ {
++ ret = 0;
++ }
++
++ return ret;
++}
++
++/*
++ *
++ * ComputeTransmitterV : This routine computes transmitter's V prime.
++ * As per HDCP spec 1.3 for HDMI/DVI the BStatus register contains data specific to repeater
++ * sink topology in case sink is a repeater. The same interface is used by HDCP over display port
++ * in which case BInfo contains the relevant data. The variable wBTopologyData represents Bstatus
++ * for HDMI/DVI and BInfo for Display Port
++ *
++ */
++static int hdcp_compute_transmitter_v(ksv_t *ksv_list, uint32_t ksv_list_entries, uint16_t b_topology_data)
++{
++ struct drm_device *dev = hdmi_priv->dev;
++ uint32_t num_devices = ksv_list_entries;
++// uint32_t lower_num_bytes_for_sha = 0, upper_num_bytes_for_sha = 0; // This has to be in mutiple of 512 bits
++ uint32_t lower_num_bytes_for_sha = 0;
++ uint32_t num_pad_bytes = 0;
++ uint8_t *buffer = NULL;
++ uint8_t *temp_buffer = NULL;
++ mdfld_hdcp_rep_t hdcp_rep_ctrl_reg;
++ uint32_t value = 0;
++ int ret = 1;
++ uint32_t i = 0;
++ uint32_t rem_text_data = 0, num_mo_bytes_left = 8;
++ uint8_t* temp_data_ptr = NULL;
++ sqword_t buffer_len;
++ uint32_t temp_data = 0;
++
++ //Clear SHA hash generator for new V calculation and set the repeater to idle state
++ REG_WRITE(MDFLD_HDCP_SHA1_IN, 0);
++ hdcp_rep_ctrl_reg.value = REG_READ(MDFLD_HDCP_REP_REG);
++ hdcp_rep_ctrl_reg.repeater_control = HDCP_REPEATER_CTRL_IDLE;
++ REG_WRITE(MDFLD_HDCP_REP_REG,hdcp_rep_ctrl_reg.value);
++ for(i=0; i<HDCP_MAX_RETRY_STATUS; i++)
++ {
++ hdcp_rep_ctrl_reg.value = REG_READ(MDFLD_HDCP_REP_REG);
++ if(HDCP_REPEATER_CTRL_IDLE == hdcp_rep_ctrl_reg.repeater_status)
++ {
++ ret = 1;
++ break;
++ }
++ }
++ if(i == HDCP_MAX_RETRY_STATUS)
++ {
++ return 0;
++ }
++
++ // Start the SHA buffer creation
++ //To find the number of pad bytes
++ num_pad_bytes = (64 - (ksv_list_entries*KSV_SIZE + 18)%64);
++
++ // Get the number of bytes for SHA
++ lower_num_bytes_for_sha = KSV_SIZE*num_devices + 18 + num_pad_bytes;//multiple of 64 bytes
++
++ buffer = (uint8_t*)kzalloc(lower_num_bytes_for_sha, GFP_KERNEL);
++
++ if(buffer)
++ {
++ //1. Copy the KSV buffer
++ //Note:The data is in little endian format
++ temp_buffer = buffer;
++ memcpy((void*)temp_buffer,(void*)ksv_list,num_devices*KSV_SIZE);
++ temp_buffer += num_devices*KSV_SIZE;
++
++ //2. Copy the b_topology_data
++ memcpy((void*)temp_buffer,(void*)&b_topology_data,2);
++ //The bstatus is copied in little endian format
++ temp_buffer +=2;
++
++ //3. Offset the pointer buffer by 8 bytes
++ // These 8 bytes are zeroed and are place holdes for Mo
++ temp_buffer +=8;
++
++ //4. Pad the buffer with extra bytes
++ // No need to pad the begining of padding bytes by adding
++ // 0x80. HW automatically appends the same while creating
++ // the buffer.
++ //*temp_buffer = (BYTE)0x80;
++ //temp_buffer++;
++ for(i=0;i<num_pad_bytes;i++)
++ {
++ *temp_buffer = (uint8_t)0x00;
++ temp_buffer++;
++ }
++
++ //5. Construct the length byte
++ buffer_len.quad_part = (unsigned long long)(ksv_list_entries*KSV_SIZE + 2 + 8)*8;
++ temp_data_ptr = (uint8_t*)&buffer_len.quad_part;
++ // Store it in big endian form
++ for(i=1; i<=8; i++)
++ {
++ *temp_buffer = *(temp_data_ptr + 8-i);
++ temp_buffer++;
++ }
++
++ //5.Write a random 64 bit value to the buffer
++ //memcpy(temp_buffer,&upper_num_bytes_for_sha,4);
++ //temp_buffer += 4;
++ //memcpy(temp_buffer,&lower_num_bytes_for_sha,4);
++
++ //Now write the data into the SHA
++ temp_buffer = buffer;
++ for(i=0; i<(KSV_SIZE*num_devices + 2)/4; i++)
++ {
++ hdcp_rep_ctrl_reg.value = REG_READ(MDFLD_HDCP_REP_REG);
++ hdcp_rep_ctrl_reg.repeater_control = HDCP_REPEATER_32BIT_TEXT_IP;
++ REG_WRITE(MDFLD_HDCP_REP_REG,hdcp_rep_ctrl_reg.value);
++
++ //As per HDCP spec sample SHA is in little endian format. But the
++ //data fed to the cipher needs to be in big endian format for it
++ //to compute it correctly
++ memcpy(&value,temp_buffer,4);
++ value = HDCP_CONVERT_BIG_ENDIAN(value);
++ REG_WRITE(MDFLD_HDCP_SHA1_IN, value);
++ temp_buffer += 4;
++
++ if(!hdcp_wait_for_next_data_ready())
++ return 0;
++ }
++
++ //Write the remaining text data with M0
++ //BUN#: 07ww44#1: text input must be aligned to LSB of the SHA1
++ //in register when inputting partial text and partial M0
++ rem_text_data = (KSV_SIZE*num_devices + 2)%4;
++ if(rem_text_data)
++ {
++ // Update the no of Mo bytes
++ num_mo_bytes_left = num_mo_bytes_left - (4-rem_text_data);
++
++ if(!hdcp_wait_for_next_data_ready())
++ return 0;
++
++ hdcp_rep_ctrl_reg.value = REG_READ(MDFLD_HDCP_REP_REG);
++ switch(rem_text_data)
++ {
++ case 1:
++ hdcp_rep_ctrl_reg.repeater_control = HDCP_REPEATER_8BIT_TEXT_24BIT_MO_IP;
++ break;
++ case 2:
++ hdcp_rep_ctrl_reg.repeater_control = HDCP_REPEATER_16BIT_TEXT_16BIT_MO_IP;
++ break;
++ case 3:
++ hdcp_rep_ctrl_reg.repeater_control = HDCP_REPEATER_24BIT_TEXT_8BIT_MO_IP;
++ break;
++ default:
++ ret = 0;
++ }
++
++ if(!ret)
++ return ret;
++
++ REG_WRITE(MDFLD_HDCP_REP_REG,hdcp_rep_ctrl_reg.value);
++ memcpy(&value,temp_buffer,4);
++
++ // swap the text data in big endian format leaving the Mo data as it is.
++ // As per the bun the LSB should contain the data in big endian format.
++ // since the M0 specfic data is all zeros while it's fed to the cipher.
++ // Those bit don't need to be modified
++ temp_data = 0;
++ for(i=0; i<rem_text_data; i++)
++ {
++ temp_data |= ((value & 0xff<<(i*8))>>(i*8))<<((rem_text_data - i-1)*8);
++ }
++ REG_WRITE(MDFLD_HDCP_SHA1_IN, temp_data);
++ temp_buffer += 4;
++ }
++
++ //Write 4 bytes of Mo
++ if(!hdcp_wait_for_next_data_ready())
++ return 0;
++
++ hdcp_rep_ctrl_reg.value = REG_READ(MDFLD_HDCP_REP_REG);
++ hdcp_rep_ctrl_reg.repeater_control = HDCP_REPEATER_32BIT_MO_IP;
++ REG_WRITE(MDFLD_HDCP_REP_REG,hdcp_rep_ctrl_reg.value);
++ memcpy(&value,temp_buffer,4);
++ REG_WRITE(MDFLD_HDCP_SHA1_IN, value);
++ temp_buffer += 4;
++ num_mo_bytes_left -= 4;
++
++ if(num_mo_bytes_left)
++ {
++ // The remaining Mo + padding bytes need to be added
++ num_pad_bytes = num_pad_bytes - (4-num_mo_bytes_left);
++
++ //Write 4 bytes of Mo
++ if(!hdcp_wait_for_next_data_ready())
++ return 0;
++
++ hdcp_rep_ctrl_reg.value = REG_READ(MDFLD_HDCP_REP_REG);
++ switch(num_mo_bytes_left)
++ {
++ case 1:
++ hdcp_rep_ctrl_reg.repeater_control = HDCP_REPEATER_24BIT_TEXT_8BIT_MO_IP;
++ break;
++ case 2:
++ hdcp_rep_ctrl_reg.repeater_control = HDCP_REPEATER_16BIT_TEXT_16BIT_MO_IP;
++ break;
++ case 3:
++ hdcp_rep_ctrl_reg.repeater_control = HDCP_REPEATER_8BIT_TEXT_24BIT_MO_IP;
++ break;
++ default:
++ // should never happen
++ ret = 0;
++ }
++
++ if(!ret)
++ return ret;
++
++ REG_WRITE(MDFLD_HDCP_REP_REG,hdcp_rep_ctrl_reg.value);
++ memcpy(&value,temp_buffer,4);
++ //BUN#:07ww44#1
++ temp_data = 0;
++ for(i=0; i<rem_text_data; i++)
++ {
++ temp_data |= ((value & 0xff<<(i*8))>>(i*8))<<((rem_text_data - i-1)*8);
++ }
++ REG_WRITE(MDFLD_HDCP_SHA1_IN, value);
++ temp_buffer += 4;
++ num_mo_bytes_left = 0;
++ }
++
++ //Write the remaining no of bytes
++ // Remaining data = remaining padding data + 64 bits of length data
++ rem_text_data = num_pad_bytes + 8;
++
++ if(rem_text_data%4)
++ {
++ //Should not happen
++ return 0;
++ }
++
++ for(i=0;i<rem_text_data/4;i++)
++ {
++ REG_WRITE(MDFLD_HDCP_SHA1_IN,temp_data);
++ if(!hdcp_wait_for_next_data_ready())
++ return 0;
++
++ hdcp_rep_ctrl_reg.value = REG_READ(MDFLD_HDCP_REP_REG);
++ hdcp_rep_ctrl_reg.repeater_control = HDCP_REPEATER_32BIT_TEXT_IP;
++ REG_WRITE(MDFLD_HDCP_REP_REG,hdcp_rep_ctrl_reg.value);
++ memcpy(&value,temp_buffer,4);
++ // do the Big endian conversion
++ value = HDCP_CONVERT_BIG_ENDIAN(value);
++ REG_WRITE(MDFLD_HDCP_SHA1_IN, value);
++ temp_buffer += 4;
++ }
++ kfree(buffer);
++
++ ret = 1;
++ }
++ else
++ {
++ return 0;
++ }
++
++ return ret;
++}
++
++/*
++ *
++ * SetHDCPEncryptionLevel:
++ *
++ */
++static uint32_t hdcp_set_encryption_level(cp_parameters_t* cp)
++{
++ uint32_t ret = STATUS_UNSUCCESSFUL;
++ int hdcp_enabled = 0;
++ uint32_t ksv_length = 0;
++ ksv_t bksv;
++
++ //Get the hdcp configuration of the port
++ if(hdmi_priv->is_hdcp_supported)
++ {
++ hdcp_enabled = hdcp_is_enabled();
++ if(((cp->level == CP_PROTECTION_LEVEL_HDCP_OFF)&&(!hdcp_enabled))||
++ ((cp->level == CP_PROTECTION_LEVEL_HDCP_ON)&&hdcp_enabled))
++ {
++ ret = STATUS_SUCCESS;
++ }
++
++ if(ret == STATUS_UNSUCCESSFUL)
++ {
++ //Turn off HDCP
++ if (hdcp_enable(0))
++ ret = STATUS_SUCCESS;
++
++ if((cp->level != CP_PROTECTION_LEVEL_HDCP_OFF) && (ret == STATUS_SUCCESS))
++ {
++ // Check if a Revoked device is attached
++ if(cp->hdcp.ksv_list_length)
++ {
++ //Get the current set of BKSV's from the
++ if(hdcp_get_receiver_data((uint8_t*)bksv.ab_ksv, CP_HDCP_KEY_SELECTION_VECTOR_SIZE, RX_TYPE_BKSV_DATA))
++ {
++ for(ksv_length = 0; ksv_length<cp->hdcp.ksv_list_length; ksv_length++)
++ {
++ if(!memcmp(&bksv,
++ &cp->hdcp.ksv_list[ksv_length],
++ CP_HDCP_KEY_SELECTION_VECTOR_SIZE))
++ {
++ ret = STATUS_REVOKED_HDCP_DEVICE_ATTACHED;
++ break;
++
++ }
++ }
++ }
++ }
++
++
++ if(ret == STATUS_SUCCESS)
++ {
++ //Activate the link layer
++ ret = hdcp_enable(1);
++ }
++
++ }
++ }
++ }
++ return ret;
++}
++
++/*
++ *
++ * GetMaxSupportedAttachedDevices: Returns the
++ * max no attached devices supported on repeater
++ *
++ */
++static uint16_t hdcp_get_max_supported_attached_devices(void)
++{
++ //currently return 128 as specified by the HDCP spec
++ return MAX_HDCP_DEVICES;
++}
++
++/*
++ *
++ * ActivateRepeater: Activates reciver mode
++ *
++ */
++static uint32_t hdcp_activate_repeater(cp_parameters_t* cp)
++{
++ uint32_t ret = STATUS_UNSUCCESSFUL;
++ uint16_t device_count = 0;
++ uint16_t get_max_device_supported = 0;
++ uint8_t* ksv_list=NULL;//[MAX_HDCP_DEVICES] = {0};// * KSV_SIZE] = { 0 };
++ uint16_t i = 0, j = 0, k = 0;
++ uint32_t repeater_prime_v[5];
++ hdcp_rx_bcaps_t b_caps;
++ hdcp_rx_bstatus_t b_status;
++ //TBD: TO be enabled for OPM - Vista
++
++ // Init bcaps
++ b_caps.value = 0;
++ b_status.value = 0;
++
++ for(i = 0; i<5; i++)
++ repeater_prime_v[i] = 0;
++
++ for(i=0;i<1;i++)
++ {
++ ksv_list = (uint8_t*)kzalloc(MAX_HDCP_DEVICES*KSV_SIZE, GFP_KERNEL);
++
++ if(!ksv_list)
++ {
++ ret = STATUS_UNSUCCESSFUL;
++ break;
++ }
++
++ //get the receiver bcaps
++ hdcp_get_receiver_data(&b_caps.value, 1, RX_TYPE_BCAPS);
++
++ // Check for repeater caps
++ if(!(b_caps.is_reapeater))
++ {
++ ret = STATUS_INVALID_PARAMETER;
++ break;
++ }
++
++ // Check if the KSV FIFO is ready
++ if(!(b_caps.ksv_fifo_ready))
++ {
++ // The HDCP repeater is not yet ready to return a KSV list.
++ // Per HDCP spec, the repeater has 5 seconds from when KSVs are exchanged
++ // in the first part of the authentication protocol (HDCPActivateLink)
++ // to be ready to report out downstream KSVs.
++ ret = STATUS_PENDING;
++ break;
++ }
++
++ //Read repeater's Bstatus
++ hdcp_get_receiver_data((uint8_t*)&b_status.value, 2, RX_TYPE_BSTATUS);
++
++ // check if max dev limit is exceeded
++ if(b_status.max_devs_exceeded)
++ {
++ ret = STATUS_INVALID_PARAMETER;
++ break;
++ }
++
++ // Check for topology error. This happens when
++ // more then seven levels of video repeater have been cascaded.
++ if(b_status.max_cascade_exceeded)
++ {
++ ret = STATUS_INVALID_PARAMETER;
++ break;
++ }
++
++ device_count = b_status.device_count;
++ if(device_count == 0)
++ {
++ ret = STATUS_SUCCESS;
++ break;
++ }
++
++ get_max_device_supported = hdcp_get_max_supported_attached_devices();
++
++ if(device_count > get_max_device_supported)
++ {
++ ret = STATUS_INVALID_PARAMETER;
++ break;
++ }
++
++ // Update the cipher saying sink supports repeater capabilities
++ if(!hdcp_update_repeater_state(1))
++ {
++ ret = STATUS_UNSUCCESSFUL;
++ break;
++ }
++
++ // Read the KSV list from the repeater
++ if(!hdcp_get_receiver_data(ksv_list, device_count * KSV_SIZE, RX_TYPE_REPEATER_KSV_LIST))
++ {
++ ret = STATUS_UNSUCCESSFUL;
++ break;
++ }
++
++ for(j=0; j<device_count; j++)
++ {
++ for(k=0; k<cp->hdcp.ksv_list_length; k++)
++ {
++ if(0==memcmp(&ksv_list[j* KSV_SIZE], &cp->hdcp.ksv_list[k], CP_HDCP_KEY_SELECTION_VECTOR_SIZE))
++ {
++ ret = STATUS_REVOKED_HDCP_DEVICE_ATTACHED;
++ break;
++ }
++ }
++ }
++
++ if(!hdcp_compute_transmitter_v((ksv_t *)ksv_list,device_count,b_status.value))
++ {
++ ret = STATUS_UNSUCCESSFUL;
++ break;
++ }
++
++ //Get the HDCP receiver's V' value (20 bytes in size)
++ if(!hdcp_get_receiver_data((uint8_t*)repeater_prime_v, KSV_SIZE*4, RX_TYPE_REPEATER_PRIME_V))
++ {
++ ret = STATUS_UNSUCCESSFUL;
++ break;
++ }
++
++
++ if(!hdcp_compare_v_prime(repeater_prime_v, KSV_SIZE))
++ {
++ //set hdcp encryption level to 0
++ hdcp_update_repeater_state(0);
++ hdcp_enable(0);
++ ret = STATUS_UNSUCCESSFUL;
++ }
++ else
++ {
++ ret = STATUS_SUCCESS;
++ }
++ }
++
++ if(ksv_list)
++ {
++ kfree(ksv_list);
++ ksv_list = NULL;
++ }
++
++
++ return ret;
++}
++
++/*
++ * IsHDCPRepeater : Reads the caps register and informs
++ * whether the received is a repeater
++ *
++ */
++static int hdcp_is_repeater(int *is_repeater)
++{
++ int ret = 0;
++ hdcp_rx_bcaps_t b_caps;
++
++ //Init
++ b_caps.value = 0;
++
++ ret = hdcp_get_receiver_data(&b_caps.value, 1, RX_TYPE_BCAPS);
++ if(ret)
++ {
++ *is_repeater = b_caps.is_reapeater;
++ }
++
++ return ret;
++}
++
++/* Get's the current link status */
++static int hdcp_get_link_status(void)
++{
++ struct drm_device *dev = hdmi_priv->dev;
++ int ret = 0;
++ uint32_t rx_ri = 0;
++ mdfld_hdcp_receiver_ri_t receivers_ri;
++ mdfld_hdcp_status_t status;
++ uint32_t max_count = 0;
++
++ max_count = HDCP_MAX_RI_QUERY_COUNT;
++ while(max_count)
++ {
++ max_count--;
++
++ //Read the Ri' of reciever
++ ret = read_hdcp_port(RX_TYPE_RI_DATA,(uint8_t*)&rx_ri,2);
++ if(!ret)
++ break; // I2C access failed
++
++ //update the HDCP_Ri' register and read the status reg for cofrmation
++ receivers_ri.value = REG_READ(MDFLD_HDCP_RECEIVER_RI_REG);
++ receivers_ri.ri = rx_ri;
++ REG_WRITE(MDFLD_HDCP_RECEIVER_RI_REG, receivers_ri.value);
++
++ status.value = REG_READ(MDFLD_HDCP_STATUS_REG);
++
++ ret = status.cipher_ri_match_status;
++ if(ret)
++ {
++ //Ri and Ri' matches, hence reciver is a authentic one :)
++ break;
++ }
++ else
++ {
++ //The Ri changes every 128th frame.Hence if the Ri check fails
++ //that means the sink has updated the Ri value and that can happen
++ //every 128th frame. In that case we wait for the next frame count.
++ //Wait should be around 16 ms.
++ while((status.frame_count&HDCP_NEXT_RI_FRAME) == HDCP_NEXT_RI_FRAME)
++ {
++ status.value = REG_READ(MDFLD_HDCP_STATUS_REG);
++ }
++ }
++ }
++
++ return ret;
++}
++
++/*
++ *
++ * GetHDCPEncryptionLevel:
++ *
++ */
++static void hdcp_get_encryption_level(cp_parameters_t* cp)
++{
++
++ if(hdcp_is_enabled())
++ {
++ cp->level = CP_PROTECTION_LEVEL_HDCP_ON;
++ }
++ else
++ {
++ cp->level = CP_PROTECTION_LEVEL_HDCP_OFF;
++ }
++
++ return ;
++}
++
++/*
++ *
++ * GetCPData: Get Content protection Data
++ * based upon the request from CP
++ *
++ */
++uint32_t hdcp_get_cp_data(cp_parameters_t* cp)
++{
++ uint32_t ret = STATUS_SUCCESS;
++ int is_repeater = 0;
++
++ if((cp->protect_type_mask & CP_PROTECTION_TYPE_HDCP))
++ {
++ //Check whether HDCP is on
++ hdcp_get_encryption_level(cp);
++
++ if (cp->level != CP_PROTECTION_LEVEL_HDCP_OFF)
++ {
++ // see if the link is valid, do it by authenticating
++ if(!hdcp_get_link_status())
++ {
++ // Encryption setting failed; swtich off the encryption
++ cp->level = CP_PROTECTION_LEVEL_HDCP_OFF;
++ hdcp_set_encryption_level(cp);
++ ret = STATUS_UNSUCCESSFUL;
++ }
++ }
++ //else
++ //{
++ //HDCP is off
++ //}
++#if 0 //Don't need this for client
++ //Get the BKSv and repeater status. This has to be returned irrespective of
++ //HDCP is ON or Not
++ if(!hdcp_get_receiver_data((uint8_t*)(cp->hdcp.bksv.ab_ksv), CP_HDCP_KEY_SELECTION_VECTOR_SIZE, RX_TYPE_BKSV_DATA))
++ {
++ cp->hdcp.is_repeater = 0;
++ memset(&(cp->hdcp.bksv), 0, CP_HDCP_KEY_SELECTION_VECTOR_SIZE);
++ ret = STATUS_DATA_ERROR;
++ }
++ else
++ {
++ // This is via opregion. This will return all zeros in production mode
++ // Get the AKSV
++ if(hdcp_get_aksv(&aksv))
++ {
++ memcpy(cp->hdcp.aksv.ab_ksv, aksv.uc_aksv, CP_HDCP_KEY_SELECTION_VECTOR_SIZE);
++ }
++ else // if failed return all zeros
++ {
++ memset(&cp->hdcp.aksv, 0, CP_HDCP_KEY_SELECTION_VECTOR_SIZE);
++ }
++ }
++#endif
++ if(ret != STATUS_DATA_ERROR)
++ {
++ if(hdcp_is_repeater(&is_repeater))
++ {
++ cp->hdcp.is_repeater = is_repeater;
++ }
++ else
++ {
++ cp->hdcp.is_repeater = 0;
++ ret = STATUS_DATA_ERROR;
++ }
++ }
++ }
++#if 0
++ else if(cp->protect_type_mask == CP_PROTECTION_TYPE_NONE)// report repeater capability+BKSV for this mask
++ {
++ if(!hdcp_get_receiver_data((uint8_t*)(cp->hdcp.bksv.ab_ksv), CP_HDCP_KEY_SELECTION_VECTOR_SIZE, RX_TYPE_BKSV_DATA))
++ {
++ cp->hdcp.is_repeater = 0;
++ memset(&(cp->hdcp.bksv), 0, CP_HDCP_KEY_SELECTION_VECTOR_SIZE);
++ }
++ else if(hdcp_is_repeater( &is_repeater))
++ {
++ cp->hdcp.is_repeater = is_repeater;
++ }
++ else
++ {
++ cp->hdcp.is_repeater = 0;
++ }
++
++ // Get the AKSV
++ if(hdcp_get_aksv(&aksv))
++ {
++ memcpy(cp->hdcp.aksv.ab_ksv, aksv.uc_aksv, CP_HDCP_KEY_SELECTION_VECTOR_SIZE);
++ }
++ else // if failed return all zeros
++ {
++ memset(&cp->hdcp.aksv, 0, CP_HDCP_KEY_SELECTION_VECTOR_SIZE);
++ }
++ }
++#endif
++ else //Invalid mask
++ {
++ //assert(0);
++
++ cp->hdcp.is_repeater = 0;
++ //memset(&(cp->hdcp.bksv), 0, CP_HDCP_KEY_SELECTION_VECTOR_SIZE);
++ }
++
++ //Note this data needs to be sent irrespective of any unsupported mask
++ if(ret == STATUS_SUCCESS)
++ {
++ cp->protect_type_mask |= CP_PROTECTION_TYPE_HDCP;
++ }
++
++ return ret;
++}
++
++/*
++ *
++ * SetCPData: Enables/Disables Content protection
++ * based upon the request from CP
++ *
++ */
++uint32_t hdcp_set_cp_data(cp_parameters_t* cp)
++{
++ uint32_t ret = STATUS_UNSUCCESSFUL;
++ int is_repeater = 0;
++
++ if(cp->protect_type_mask & CP_PROTECTION_TYPE_HDCP)
++ {
++ // Get Receiver's repeater status
++ // Note:- Reporting back Repeater status in SetCP Data.
++ // This is because the analyzer for CTS, acts as repeater only
++ // when the test is running, so notifying this back to opm in
++ // SetProtectionLevel.
++ if(hdcp_is_repeater(&is_repeater))
++ {
++ cp->hdcp.is_repeater = is_repeater;
++ }
++
++ // Second step flag is if Repeater support needs to be enabled
++ if(cp->hdcp.perform_second_step)
++ {
++ ret = hdcp_activate_repeater(cp);
++ if((ret != STATUS_SUCCESS) && (ret != STATUS_PENDING))
++ {
++ // Encryption setting failed; switch off the encryption
++ cp->level = CP_PROTECTION_LEVEL_HDCP_OFF;
++ hdcp_set_encryption_level(cp);
++ }
++ }
++ else
++ {
++ ret = hdcp_set_encryption_level(cp);
++
++ if(ret != STATUS_SUCCESS)
++ {
++ // Encryption setting failed; swtich off the encryption
++ cp->level = CP_PROTECTION_LEVEL_HDCP_OFF;
++ hdcp_set_encryption_level(cp);
++ }
++ }
++
++ if(ret == STATUS_SUCCESS)
++ {
++#if 0 //Do need this for client
++ // read the bksv
++ if(!hdcp_get_receiver_data((uint8_t*)(cp->hdcp.bksv.ab_ksv), CP_HDCP_KEY_SELECTION_VECTOR_SIZE, RX_TYPE_BKSV_DATA))
++ {
++ cp->hdcp.is_repeater = 0;
++ memset(&(cp->hdcp.bksv), 0, CP_HDCP_KEY_SELECTION_VECTOR_SIZE);
++ }
++ else
++ {
++ // read aksv
++ if(hdcp_get_aksv(&aksv))
++ {
++ memcpy(cp->hdcp.aksv.ab_ksv,aksv.uc_aksv, CP_HDCP_KEY_SELECTION_VECTOR_SIZE);
++ }
++ else // if failed return all zeros
++ {
++ memset(&cp->hdcp.aksv, 0, CP_HDCP_KEY_SELECTION_VECTOR_SIZE);
++ }
++ }
++#endif
++ cp->protect_type_mask |= CP_PROTECTION_TYPE_HDCP;
++ }
++ }
++ else
++ {
++ // No other calls are handled
++ return STATUS_SUCCESS;
++ }
++
++ return ret;
++}
++
++void mdfld_hdcp_init(struct mid_intel_hdmi_priv *p_hdmi_priv)
++{
++ hdmi_priv = p_hdmi_priv;
++}
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/drv/msvdx_power.c
+@@ -0,0 +1,164 @@
++/*
++ * Copyright (c) 2009, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors: binglin.chen@intel.com>
++ *
++ */
++
++#include "msvdx_power.h"
++#include "psb_msvdx.h"
++#include "psb_drv.h"
++
++#include "services_headers.h"
++#include "sysconfig.h"
++
++static PVRSRV_ERROR DevInitMSVDXPart1(IMG_VOID *pvDeviceNode)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvDeviceNode;
++ PVRSRV_ERROR eError;
++ PVRSRV_DEV_POWER_STATE eDefaultPowerState;
++
++ /* register power operation function */
++ /* FIXME: this should be in part2 init function, but
++ * currently here only OSPM needs IMG device... */
++ eDefaultPowerState = PVRSRV_DEV_POWER_STATE_OFF;
++ eError = PVRSRVRegisterPowerDevice(psDeviceNode->sDevId.ui32DeviceIndex,
++ MSVDXPrePowerState,
++ MSVDXPostPowerState,
++ MSVDXPreClockSpeedChange,
++ MSVDXPostClockSpeedChange,
++ (IMG_HANDLE)psDeviceNode,
++ PVRSRV_DEV_POWER_STATE_ON,
++ eDefaultPowerState);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR, "DevInitMSVDXPart1: failed to "
++ "register device with power manager"));
++ return eError;
++ }
++
++ return PVRSRV_OK;
++}
++
++static PVRSRV_ERROR DevDeInitMSVDX(IMG_VOID *pvDeviceNode)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvDeviceNode;
++ PVRSRV_ERROR eError;
++
++ /* should deinit all resource */
++
++ eError = PVRSRVRemovePowerDevice(psDeviceNode->sDevId.ui32DeviceIndex);
++ if (eError != PVRSRV_OK)
++ return eError;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR MSVDXDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ /* version check */
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR MSVDXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ psDeviceNode->sDevId.eDeviceType = PVRSRV_DEVICE_TYPE_MSVDX;
++ psDeviceNode->sDevId.eDeviceClass = PVRSRV_DEVICE_CLASS_VIDEO;
++
++ psDeviceNode->pfnInitDevice = DevInitMSVDXPart1;
++ psDeviceNode->pfnDeInitDevice = DevDeInitMSVDX;
++
++ psDeviceNode->pfnInitDeviceCompatCheck = MSVDXDevInitCompatCheck;
++
++ psDeviceNode->pfnDeviceISR = psb_msvdx_interrupt;
++ psDeviceNode->pvISRData = (IMG_VOID *)gpDrmDevice;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR MSVDXPrePowerState(IMG_HANDLE hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ /* ask for a change not power on*/
++ if ((eNewPowerState != eCurrentPowerState) &&
++ (eNewPowerState != PVRSRV_DEV_POWER_STATE_ON)) {
++ struct drm_psb_private *dev_priv = gpDrmDevice->dev_private;
++ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
++ MSVDX_NEW_PMSTATE(gpDrmDevice, msvdx_priv, PSB_PMSTATE_POWERDOWN);
++
++ /* context save */
++ psb_msvdx_save_context(gpDrmDevice);
++
++ /* internally close the device */
++
++ /* ask for power off */
++ if (eNewPowerState == PVRSRV_DEV_POWER_STATE_OFF) {
++ /* here will deinitialize the driver if needed */
++ } else {
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "%s no action for transform from %d to %d",
++ __func__,
++ eCurrentPowerState,
++ eNewPowerState));
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR MSVDXPostPowerState(IMG_HANDLE hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ /* if ask for change & current status is not on */
++ if ((eNewPowerState != eCurrentPowerState) &&
++ (eCurrentPowerState != PVRSRV_DEV_POWER_STATE_ON)) {
++ /* internally open device */
++ struct drm_psb_private *dev_priv = gpDrmDevice->dev_private;
++ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
++ MSVDX_NEW_PMSTATE(gpDrmDevice, msvdx_priv, PSB_PMSTATE_POWERUP);
++
++ /* context restore */
++ psb_msvdx_restore_context(gpDrmDevice);
++
++ if (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_OFF) {
++ /* here will initialize the driver if needed */
++ } else {
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "%s no action for transform from %d to %d",
++ __func__,
++ eCurrentPowerState,
++ eNewPowerState));
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR MSVDXPreClockSpeedChange(IMG_HANDLE hDevHandle,
++ IMG_BOOL bIdleDevice,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR MSVDXPostClockSpeedChange(IMG_HANDLE hDevHandle,
++ IMG_BOOL bIdleDevice,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ return PVRSRV_OK;
++}
+--- /dev/null
++++ b/drivers/staging/mrst/drv/msvdx_power.h
+@@ -0,0 +1,48 @@
++/*
++ * Copyright (c) 2009, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors: binglin.chen@intel.com>
++ *
++ */
++
++#ifndef MSVDX_POWER_H_
++#define MSVDX_POWER_H_
++
++#include "services_headers.h"
++#include "sysconfig.h"
++
++extern struct drm_device *gpDrmDevice;
++
++/* function define */
++PVRSRV_ERROR MSVDXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode);
++PVRSRV_ERROR MSVDXDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode);
++
++/* power function define */
++PVRSRV_ERROR MSVDXPrePowerState(IMG_HANDLE hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++PVRSRV_ERROR MSVDXPostPowerState(IMG_HANDLE hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++PVRSRV_ERROR MSVDXPreClockSpeedChange(IMG_HANDLE hDevHandle,
++ IMG_BOOL bIdleDevice,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++PVRSRV_ERROR MSVDXPostClockSpeedChange(IMG_HANDLE hDevHandle,
++ IMG_BOOL bIdleDevice,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++PVRSRV_ERROR MSVDXInitOSPM(PVRSRV_DEVICE_NODE *psDeviceNode);
++
++#endif /* !MSVDX_POWER_H_ */
+--- /dev/null
++++ b/drivers/staging/mrst/drv/pnw_topaz.c
+@@ -0,0 +1,889 @@
++/**
++ * file pnw_topaz.c
++ * TOPAZ I/O operations and IRQ handling
++ *
++ */
++
++/**************************************************************************
++ *
++ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
++ * Copyright (c) Imagination Technologies Limited, UK
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++/* include headers */
++/* #define DRM_DEBUG_CODE 2 */
++#include <drm/drmP.h>
++#include <drm/drm_os_linux.h>
++
++#include "psb_drv.h"
++#include "psb_drm.h"
++#include "pnw_topaz.h"
++#include "psb_powermgmt.h"
++#include "pnw_topaz_hw_reg.h"
++#include "lnc_topaz.h"
++
++#include <linux/io.h>
++#include <linux/delay.h>
++
++#define TOPAZ_MAX_COMMAND_IN_QUEUE 0x1000
++//#define SYNC_FOR_EACH_COMMAND
++/* static function define */
++static int pnw_topaz_deliver_command(struct drm_device *dev,
++ struct ttm_buffer_object *cmd_buffer,
++ unsigned long cmd_offset,
++ unsigned long cmd_size,
++ void **topaz_cmd, uint32_t sequence,
++ int copy_cmd);
++static int pnw_topaz_send(struct drm_device *dev, void *cmd,
++ unsigned long cmd_size, uint32_t sync_seq);
++/*static int pnw_mtx_send(struct drm_psb_private *dev_priv, const void *cmd);*/
++static int pnw_topaz_dequeue_send(struct drm_device *dev);
++static int pnw_topaz_save_command(struct drm_device *dev, void *cmd,
++ unsigned long cmd_size, uint32_t sequence);
++
++static void topaz_mtx_kick(struct drm_psb_private *dev_priv, uint32_t core_id, uint32_t kick_count);
++
++IMG_BOOL pnw_topaz_interrupt(IMG_VOID *pvData)
++{
++ struct drm_device *dev;
++ struct drm_psb_private *dev_priv;
++ uint32_t clr_flag;
++ struct pnw_topaz_private *topaz_priv;
++ uint32_t topaz_stat;
++ uint32_t cur_seq, cmd_id;
++
++ PSB_DEBUG_IRQ("Got an TopazSC interrupt\n");
++
++ if (pvData == IMG_NULL) {
++ DRM_ERROR("ERROR: TOPAZ %s, Invalid params\n", __func__);
++ return IMG_FALSE;
++ }
++
++ dev = (struct drm_device *)pvData;
++
++ /*if (!ospm_power_is_hw_on(OSPM_VIDEO_ENC_ISLAND)) {
++ DRM_ERROR("ERROR: interrupt arrived but HW is power off\n");
++ return IMG_FALSE;
++ }*/
++
++ dev_priv = (struct drm_psb_private *) dev->dev_private;
++ topaz_priv = dev_priv->topaz_private;
++
++ /*TODO : check if topaz is busy*/
++ /*topaz_priv->topaz_hw_busy = REG_READ(0x20D0) & (0x1 << 11)*/;
++
++ TOPAZ_READ32(TOPAZ_CR_IMG_TOPAZ_INTSTAT, &topaz_stat, 0);
++ clr_flag = pnw_topaz_queryirq(dev);
++
++ pnw_topaz_clearirq(dev, clr_flag);
++
++ TOPAZ_MTX_WB_READ32(topaz_priv->topaz_sync_addr, 0, MTX_WRITEBACK_CMDWORD, &cmd_id);
++ cmd_id = (cmd_id & 0x7f); /* CMD ID */
++ if(cmd_id != MTX_CMDID_NULL)
++ return IMG_TRUE;
++
++ TOPAZ_MTX_WB_READ32(topaz_priv->topaz_sync_addr, 0, MTX_WRITEBACK_VALUE, &cur_seq);
++
++ PSB_DEBUG_IRQ("TOPAZ:Got SYNC IRQ,sync seq:0x%08x (MTX) vs 0x%08x(fence)\n",
++ cur_seq, dev_priv->sequence[LNC_ENGINE_ENCODE]);
++
++ psb_fence_handler(dev, LNC_ENGINE_ENCODE);
++
++ /* save frame skip flag for query */
++ /*topaz_priv->frame_skip = CCB_CTRL_FRAMESKIP(dev_priv);*/
++
++ topaz_priv->topaz_busy = 1;
++ pnw_topaz_dequeue_send(dev);
++
++ if (drm_topaz_pmpolicy != PSB_PMPOLICY_NOPM)
++ schedule_delayed_work(&dev_priv->scheduler.topaz_suspend_wq, 0);
++
++ return IMG_TRUE;
++}
++
++//#define PSB_DEBUG_GENERAL DRM_ERROR
++static int pnw_submit_encode_cmdbuf(struct drm_device *dev,
++ struct ttm_buffer_object *cmd_buffer,
++ unsigned long cmd_offset, unsigned long cmd_size,
++ struct ttm_fence_object *fence)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ unsigned long irq_flags;
++ int ret = 0;
++ void *cmd;
++ uint32_t tmp;
++ uint32_t sequence = dev_priv->sequence[LNC_ENGINE_ENCODE];
++ struct pnw_topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ PSB_DEBUG_GENERAL("TOPAZ: command submit\n");
++
++ PSB_DEBUG_GENERAL("TOPAZ: topaz busy = %d\n", topaz_priv->topaz_busy);
++
++ if (topaz_priv->topaz_fw_loaded == 0) {
++ /* #.# load fw to driver */
++ PSB_DEBUG_INIT("TOPAZ: load /lib/firmware/topaz_fwsc.bin\n");
++ ret = pnw_topaz_init_fw(dev);
++ if (ret != 0) {
++ /* FIXME: find a proper return value */
++ DRM_ERROR("TOPAX:load /lib/firmware/topaz_fwsc.bin fail,"
++ "ensure udevd is configured correctly!\n");
++
++ return -EFAULT;
++ }
++ topaz_priv->topaz_fw_loaded = 1;
++ }
++
++ tmp = atomic_cmpxchg(&dev_priv->topaz_mmu_invaldc, 1, 0);
++ if (tmp == 1)
++ pnw_topaz_mmu_flushcache(dev_priv);
++
++ /* # schedule watchdog */
++ /* psb_schedule_watchdog(dev_priv); */
++
++ /* # spin lock irq save [msvdx_lock] */
++ spin_lock_irqsave(&topaz_priv->topaz_lock, irq_flags);
++
++ /* # if topaz need to reset, reset it */
++ if (topaz_priv->topaz_needs_reset) {
++ /* #.# reset it */
++ spin_unlock_irqrestore(&topaz_priv->topaz_lock, irq_flags);
++ PSB_DEBUG_GENERAL("TOPAZ: needs reset.\n");
++
++ if (pnw_topaz_reset(dev_priv)) {
++ ret = -EBUSY;
++ DRM_ERROR("TOPAZ: reset failed.\n");
++ return ret;
++ }
++
++ PSB_DEBUG_GENERAL("TOPAZ: reset ok.\n");
++
++ /* #.# upload firmware */
++ if (pnw_topaz_setup_fw(dev, topaz_priv->topaz_cur_codec)) {
++ DRM_ERROR("TOPAZ: upload FW to HW failed\n");
++ return -EBUSY;
++ }
++
++ spin_lock_irqsave(&topaz_priv->topaz_lock, irq_flags);
++ }
++
++ if (!topaz_priv->topaz_busy) {
++ /* # direct map topaz command if topaz is free */
++ PSB_DEBUG_GENERAL("TOPAZ:direct send command,sequence %08x \n",
++ sequence);
++
++ topaz_priv->topaz_busy = 1;
++ spin_unlock_irqrestore(&topaz_priv->topaz_lock, irq_flags);
++
++ ret = pnw_topaz_deliver_command(dev, cmd_buffer, cmd_offset,
++ cmd_size, NULL, sequence, 0);
++
++ if (ret) {
++ DRM_ERROR("TOPAZ: failed to extract cmd...\n");
++ return ret;
++ }
++ } else {
++ PSB_DEBUG_GENERAL("TOPAZ: queue command,sequence %08x \n",
++ sequence);
++ cmd = NULL;
++
++ spin_unlock_irqrestore(&topaz_priv->topaz_lock, irq_flags);
++
++ ret = pnw_topaz_deliver_command(dev, cmd_buffer, cmd_offset,
++ cmd_size, &cmd, sequence, 1);
++ if (cmd == NULL || ret) {
++ DRM_ERROR("TOPAZ: map command for save fialed\n");
++ return ret;
++ }
++
++ ret = pnw_topaz_save_command(dev, cmd, cmd_size, sequence);
++ if (ret)
++ DRM_ERROR("TOPAZ: save command failed\n");
++ }
++
++ return ret;
++}
++
++static int pnw_topaz_save_command(struct drm_device *dev, void *cmd,
++ unsigned long cmd_size, uint32_t sequence)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct pnw_topaz_cmd_queue *topaz_cmd;
++ unsigned long irq_flags;
++ struct pnw_topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ PSB_DEBUG_GENERAL("TOPAZ: queue command,sequence: %08x..\n",
++ sequence);
++
++ topaz_cmd = kzalloc(sizeof(struct pnw_topaz_cmd_queue),
++ GFP_KERNEL);
++ if (topaz_cmd == NULL) {
++ mutex_unlock(&topaz_priv->topaz_mutex);
++ DRM_ERROR("TOPAZ: out of memory....\n");
++ return -ENOMEM;
++ }
++
++ topaz_cmd->cmd = cmd;
++ topaz_cmd->cmd_size = cmd_size;
++ topaz_cmd->sequence = sequence;
++
++ spin_lock_irqsave(&topaz_priv->topaz_lock, irq_flags);
++ list_add_tail(&topaz_cmd->head, &topaz_priv->topaz_queue);
++ if (!topaz_priv->topaz_busy) {
++ /* topaz_priv->topaz_busy = 1; */
++ PSB_DEBUG_GENERAL("TOPAZ: need immediate dequeue...\n");
++ pnw_topaz_dequeue_send(dev);
++ PSB_DEBUG_GENERAL("TOPAZ: after dequeue command\n");
++ }
++
++ spin_unlock_irqrestore(&topaz_priv->topaz_lock, irq_flags);
++
++ return 0;
++}
++
++
++int pnw_cmdbuf_video(struct drm_file *priv,
++ struct list_head *validate_list,
++ uint32_t fence_type,
++ struct drm_psb_cmdbuf_arg *arg,
++ struct ttm_buffer_object *cmd_buffer,
++ struct psb_ttm_fence_rep *fence_arg)
++{
++ struct drm_device *dev = priv->minor->dev;
++ struct ttm_fence_object *fence = NULL;
++ int ret;
++
++ PSB_DEBUG_GENERAL("TOPAZ : enter %s cmdsize: %d\n", __FUNCTION__,
++ arg->cmdbuf_size);
++
++ ret = pnw_submit_encode_cmdbuf(dev, cmd_buffer, arg->cmdbuf_offset,
++ arg->cmdbuf_size, fence);
++ if (ret)
++ return ret;
++
++ /* workaround for interrupt issue */
++ psb_fence_or_sync(priv, LNC_ENGINE_ENCODE, fence_type, arg->fence_flags,
++ validate_list, fence_arg, &fence);
++
++ if (fence)
++ ttm_fence_object_unref(&fence);
++
++ mutex_lock(&cmd_buffer->mutex);
++ if (cmd_buffer->sync_obj != NULL)
++ ttm_fence_sync_obj_unref(&cmd_buffer->sync_obj);
++ mutex_unlock(&cmd_buffer->mutex);
++
++ PSB_DEBUG_GENERAL("TOPAZ exit %s\n", __FUNCTION__);
++ return 0;
++}
++
++int pnw_wait_on_sync(struct drm_psb_private *dev_priv,
++ uint32_t sync_seq,
++ uint32_t *sync_p)
++{
++ int count = 10000;
++ if (sync_p == NULL)
++ {
++ DRM_ERROR("TOPAZ: pnw_wait_on_sync invalid memory address\n ");
++ return -1;
++ }
++
++ while (count && (sync_seq != *sync_p)) {
++ DRM_UDELAY(100);
++ --count;
++ }
++ if ((count == 0) && (sync_seq != *sync_p)) {
++ DRM_ERROR("TOPAZ: wait sycn timeout (0x%08x),actual 0x%08x\n",
++ sync_seq, *sync_p);
++ return -EBUSY;
++ }
++ PSB_DEBUG_GENERAL("TOPAZ: SYNC done, seq=0x%08x\n", *sync_p);
++ return 0;
++}
++
++int
++pnw_topaz_deliver_command(struct drm_device *dev,
++ struct ttm_buffer_object *cmd_buffer,
++ unsigned long cmd_offset, unsigned long cmd_size,
++ void **topaz_cmd, uint32_t sequence,
++ int copy_cmd)
++{
++ unsigned long cmd_page_offset = cmd_offset & ~PAGE_MASK;
++ struct ttm_bo_kmap_obj cmd_kmap;
++ bool is_iomem;
++ int ret;
++ unsigned char *cmd_start, *tmp;
++
++ ret = ttm_bo_kmap(cmd_buffer, cmd_offset >> PAGE_SHIFT, 2,
++ &cmd_kmap);
++ if (ret) {
++ DRM_ERROR("TOPAZ: drm_bo_kmap failed: %d\n", ret);
++ return ret;
++ }
++ cmd_start = (unsigned char *) ttm_kmap_obj_virtual(&cmd_kmap,
++ &is_iomem) + cmd_page_offset;
++
++ if (copy_cmd) {
++ PSB_DEBUG_GENERAL("TOPAZ: queue commands\n");
++ tmp = kzalloc(cmd_size, GFP_KERNEL);
++ if (tmp == NULL) {
++ ret = -ENOMEM;
++ goto out;
++ }
++ memcpy(tmp, cmd_start, cmd_size);
++ *topaz_cmd = tmp;
++ } else {
++ PSB_DEBUG_GENERAL("TOPAZ: directly send the command\n");
++ ret = pnw_topaz_send(dev, cmd_start, cmd_size, sequence);
++ if (ret) {
++ DRM_ERROR("TOPAZ: commit commands failed.\n");
++ ret = -EINVAL;
++ }
++ }
++
++out:
++ PSB_DEBUG_GENERAL("TOPAZ:cmd_size(%ld), sequence(%d) copy_cmd(%d)\n",
++ cmd_size, sequence, copy_cmd);
++
++ ttm_bo_kunmap(&cmd_kmap);
++
++ return ret;
++}
++
++
++int pnw_topaz_kick_null_cmd(struct drm_psb_private *dev_priv,
++ uint32_t core_id,
++ uint32_t sync_seq,
++ uint8_t irq_enable)
++{
++ struct pnw_topaz_private *topaz_priv = dev_priv->topaz_private;
++ uint32_t cur_free_space;
++ struct topaz_cmd_header cur_cmd_header;
++ int ret;
++
++ POLL_TOPAZ_FREE_FIFO_SPACE(4, 100, 10000, &cur_free_space);
++ if (ret) {
++ DRM_ERROR("TOPAZ: error -- ret(%d)\n", ret);
++ return ret;
++ }
++
++ cur_cmd_header.core = core_id;
++ cur_cmd_header.seq = sync_seq,
++ cur_cmd_header.enable_interrupt = ((irq_enable == 0) ? 0 : 1);
++ cur_cmd_header.id = MTX_CMDID_NULL;
++
++ topaz_priv->topaz_cmd_count %= MAX_TOPAZ_CMD_COUNT;
++ PSB_DEBUG_GENERAL("TOPAZ: free FIFO space %d\n",
++ cur_free_space);
++ PSB_DEBUG_GENERAL("TOPAZ: write 4 words to FIFO:"
++ "0x%08x,0x%08x,0x%08x,0x%08x\n",
++ cur_cmd_header.val,
++ 0,
++ topaz_priv->topaz_sync_offset,
++ cur_cmd_header.seq);
++
++ TOPAZ_MULTICORE_WRITE32(TOPAZSC_CR_MULTICORE_CMD_FIFO_0,
++ cur_cmd_header.val);
++ TOPAZ_MULTICORE_WRITE32(TOPAZSC_CR_MULTICORE_CMD_FIFO_0,
++ 0);
++ TOPAZ_MULTICORE_WRITE32(TOPAZSC_CR_MULTICORE_CMD_FIFO_0,
++ topaz_priv->topaz_sync_offset);
++ TOPAZ_MULTICORE_WRITE32(TOPAZSC_CR_MULTICORE_CMD_FIFO_0,
++ sync_seq);
++
++ PSB_DEBUG_GENERAL("TOPAZ: Write back value for NULL CMD is %d\n",
++ sync_seq);
++
++ topaz_mtx_kick(dev_priv, 0, 1);
++
++ return 0;
++}
++
++int
++pnw_topaz_send(struct drm_device *dev, void *cmd,
++ unsigned long cmd_size, uint32_t sync_seq)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ int ret = 0;
++ unsigned char *command = (unsigned char *) cmd;
++ struct topaz_cmd_header *cur_cmd_header;
++ uint32_t cur_cmd_size = 4, cur_cmd_id, cur_free_space = 0;
++ uint32_t codec;
++ struct pnw_topaz_private *topaz_priv = dev_priv->topaz_private;
++ uint32_t reg_off, reg_val, reg_cnt;
++ uint32_t *p_command;
++
++ PSB_DEBUG_GENERAL("TOPAZ: send the command in the buffer one by one\n");
++
++ while (cmd_size > 0) {
++ cur_cmd_header = (struct topaz_cmd_header *) command;
++ cur_cmd_id = cur_cmd_header->id;
++ PSB_DEBUG_GENERAL("TOPAZ: %s: \n", cmd_to_string(cur_cmd_id));
++
++ switch (cur_cmd_id) {
++ case MTX_CMDID_SW_NEW_CODEC:
++ codec = *((uint32_t *) cmd + 1);
++ topaz_priv->frame_h = (uint16_t) ((*((uint32_t *) cmd + 2)) & 0xffff) ;
++ topaz_priv->frame_w = (uint16_t) (((*((uint32_t *) cmd + 2)) & 0xffff0000) >> 16) ;
++ PSB_DEBUG_GENERAL("TOPAZ: setup new codec %s (%d), width %d, height %d\n",
++ codec_to_string(codec), codec, topaz_priv->frame_w, topaz_priv->frame_h);
++ if (pnw_topaz_setup_fw(dev, codec)) {
++ DRM_ERROR("TOPAZ: upload FW to HW failed\n");
++ return -EBUSY;
++ }
++ topaz_priv->topaz_cur_codec = codec;
++ cur_cmd_size = 3;
++ break;
++
++ case MTX_CMDID_SW_ENTER_LOWPOWER:
++ PSB_DEBUG_GENERAL("TOPAZ: enter lowpower.... \n");
++ PSB_DEBUG_GENERAL("XXX: implement it\n");
++ cur_cmd_size = 1;
++ break;
++
++ case MTX_CMDID_SW_LEAVE_LOWPOWER:
++ PSB_DEBUG_GENERAL("TOPAZ: leave lowpower... \n");
++ PSB_DEBUG_GENERAL("XXX: implement it\n");
++ cur_cmd_size = 1;
++ break;
++
++ case MTX_CMDID_SW_WRITEREG:
++ p_command = (uint32_t *)(command);
++ p_command++;
++ cur_cmd_size = *p_command;
++ p_command++;
++ PSB_DEBUG_GENERAL("TOPAZ: Start to write %d Registers\n", cur_cmd_size);
++ if (cur_cmd_size > (cmd_size / 4))
++ {
++ DRM_ERROR("TOPAZ: Wrong number of write operations.Exceed command buffer.(%d)\n", (int)(cmd_size / 4));
++ goto out;
++
++ }
++
++ for (reg_cnt = 0; reg_cnt < cur_cmd_size; reg_cnt++)
++ {
++ reg_off = *p_command;
++ p_command++;
++ reg_val = *p_command;
++ p_command++;
++ if (reg_off > TOPAZSC_REG_OFF_MAX)
++ DRM_ERROR("TOPAZ: Ignore write (0x%08x) to register 0x%08x\n", reg_val, reg_off);
++ else
++ {
++ PSB_DEBUG_GENERAL("TOPAZ: write (0x%08x) to register 0x%08x\n",
++ reg_val, reg_off);
++ MM_WRITE32(0, reg_off, reg_val);
++ }
++ }
++ cur_cmd_size *= 2; /* Reg_off and reg_val are stored in a pair of words*/
++ cur_cmd_size += 2; /* Header size, 2 words */
++ break;
++ case MTX_CMDID_PAD:
++ /*Ignore this command, which is used to skip some commands in user space*/
++ cur_cmd_size = 4;
++ break;
++ /* ordinary commmand */
++ case MTX_CMDID_START_PIC:
++ case MTX_CMDID_DO_HEADER:
++ case MTX_CMDID_ENCODE_SLICE:
++ case MTX_CMDID_END_PIC:
++ case MTX_CMDID_SETQUANT:
++ case MTX_CMDID_RESET_ENCODE:
++ case MTX_CMDID_ISSUEBUFF:
++ case MTX_CMDID_SETUP:
++ cur_cmd_header->seq = topaz_priv->topaz_cmd_count++;
++ cur_cmd_header->enable_interrupt = 0;
++ cur_cmd_size = 4;
++ if (cur_free_space < cur_cmd_size){
++ POLL_TOPAZ_FREE_FIFO_SPACE(4, 100, 10000, &cur_free_space);
++ if (ret) {
++ DRM_ERROR("TOPAZ: error -- ret(%d)\n", ret);
++ goto out;
++ }
++ }
++
++ PSB_DEBUG_GENERAL("TOPAZ: free FIFO space %d\n",
++ cur_free_space);
++ PSB_DEBUG_GENERAL("TOPAZ: write 4 words to FIFO:"
++ "0x%08x,0x%08x,0x%08x,0x%08x\n",
++ cur_cmd_header->val,
++ *((uint32_t *)(command) + 1),
++ TOPAZ_MTX_WB_OFFSET(topaz_priv->topaz_wb_offset,
++ cur_cmd_header->core),
++ cur_cmd_header->seq);
++
++ TOPAZ_MULTICORE_WRITE32(TOPAZSC_CR_MULTICORE_CMD_FIFO_0,
++ cur_cmd_header->val);
++ TOPAZ_MULTICORE_WRITE32(TOPAZSC_CR_MULTICORE_CMD_FIFO_0,
++ *((uint32_t *)(command) + 1));
++ TOPAZ_MULTICORE_WRITE32(TOPAZSC_CR_MULTICORE_CMD_FIFO_0,
++ TOPAZ_MTX_WB_OFFSET(topaz_priv->topaz_wb_offset,
++ cur_cmd_header->core));
++ TOPAZ_MULTICORE_WRITE32(TOPAZSC_CR_MULTICORE_CMD_FIFO_0,
++ cur_cmd_header->seq);
++
++ cur_free_space -= 4;
++ /*topaz_priv->aui32LastSync[cur_cmd_header->seq % 2][cur_cmd_header->core]*/
++ topaz_priv->aui32LastSync[0][cur_cmd_header->core]
++ = cur_cmd_header->seq;
++ topaz_priv->topaz_cmd_count %= MAX_TOPAZ_CMD_COUNT;
++ topaz_mtx_kick(dev_priv, 0, 1);
++#ifdef SYNC_FOR_EACH_COMMAND
++ pnw_wait_on_sync(dev_priv, cur_cmd_header->seq,
++ topaz_priv->topaz_mtx_wb + cur_cmd_header->core * MTX_WRITEBACK_DATASIZE_ROUND + 1);
++#endif
++ break;
++ default:
++ DRM_ERROR("TOPAZ: unsupported command id: %x\n", cur_cmd_id);
++ goto out;
++ }
++
++ /*cur_cmd_size indicate the number of words of current command*/
++ command += cur_cmd_size*4;
++ cmd_size -= cur_cmd_size*4;
++ }
++#if PNW_TOPAZ_NO_IRQ
++ PSB_DEBUG_GENERAL("reset NULL writeback to 0xffffffff,topaz_priv->topaz_sync_addr=0x%p\n",
++ topaz_priv->topaz_sync_addr);
++
++ *((uint32_t *)topaz_priv->topaz_sync_addr + MTX_WRITEBACK_VALUE) = ~0;
++ pnw_topaz_kick_null_cmd(dev_priv, 0, sync_seq, 0);
++
++ pnw_wait_on_sync(dev_priv, sync_seq,
++ topaz_priv->topaz_sync_addr + MTX_WRITEBACK_VALUE);
++
++ PSB_DEBUG_GENERAL("Kicked command with sequence 0x%08x, and polling it, got 0x%08x\n",
++ sync_seq, *(topaz_priv->topaz_sync_addr + MTX_WRITEBACK_VALUE));
++ PSB_DEBUG_GENERAL("Can handle unfence here, but let fence polling do it\n");
++ topaz_priv->topaz_busy = 0;
++#else
++ PSB_DEBUG_GENERAL("Kick command with sequence %x\n", sync_seq);
++ pnw_topaz_kick_null_cmd(dev_priv, 0, sync_seq, 1);
++#endif
++out:
++ return ret;
++}
++
++
++
++#if 0
++static int pnw_mtx_send(struct drm_psb_private *dev_priv, const void *cmd)
++{
++ uint32_t cmd_size = 4;/*= cur_cmd_header->size;*/
++ uint32_t read_index, write_index;
++ const uint32_t *cmd_pointer = (uint32_t *) cmd;
++ struct pnw_topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ int ret = 0;
++
++ /* <msvdx does> # enable all clock */
++
++ write_index = topaz_priv->topaz_cmd_windex;
++ if (write_index + cmd_size + 1 > topaz_priv->topaz_ccb_size) {
++ int free_space = topaz_priv->topaz_ccb_size - write_index;
++
++ PSB_DEBUG_GENERAL("TOPAZ: -------will wrap CCB write point.\n");
++ if (free_space > 0) {
++ struct topaz_cmd_header pad_cmd;
++
++ pad_cmd.id = MTX_CMDID_NULL;
++ /*pad_cmd.size = free_space;*/
++ pad_cmd.seq = 0x7fff & topaz_priv->topaz_cmd_seq;
++
++ PSB_DEBUG_GENERAL("TOPAZ: MTX_CMDID_NULL:"
++ " seq (0x%04x)\n",
++ pad_cmd.seq);
++
++#ifndef TOPAZ_RM_MULTI_MTX_WRITE
++ TOPAZ_BEGIN_CCB(dev_priv);
++ TOPAZ_OUT_CCB(dev_priv, pad_cmd.val);
++#else
++ topaz_write_mtx_mem(dev_priv,
++ topaz_priv->topaz_ccb_buffer_addr
++ + topaz_priv->topaz_cmd_windex * 4,
++ pad_cmd.val);
++ topaz_priv->topaz_cmd_windex++;
++#endif
++ TOPAZ_END_CCB(dev_priv, 1);
++
++ POLL_WB_SEQ(dev_priv, pad_cmd.seq);
++ ++topaz_priv->topaz_cmd_seq;
++ }
++ POLL_WB_RINDEX(dev_priv, 0);
++ if (ret == 0)
++ topaz_priv->topaz_cmd_windex = 0;
++ else {
++ DRM_ERROR("TOPAZ: poll rindex timeout\n");
++ return ret; /* HW may hang, need reset */
++ }
++ PSB_DEBUG_GENERAL("TOPAZ: -------wrap CCB was done.\n");
++ }
++
++ read_index = CCB_CTRL_RINDEX(dev_priv);/* temperily use CCB CTRL */
++ write_index = topaz_priv->topaz_cmd_windex;
++
++ PSB_DEBUG_GENERAL("TOPAZ: write index(%d), read index(%d,WB=%d)\n",
++ write_index, read_index, WB_CCB_CTRL_RINDEX(dev_priv));
++
++#ifndef TOPAZ_RM_MULTI_MTX_WRITE
++ TOPAZ_BEGIN_CCB(dev_priv);
++ while (cmd_size > 0) {
++ TOPAZ_OUT_CCB(dev_priv, *cmd_pointer++);
++ --cmd_size;
++ }
++#else
++ while (cmd_size > 0) {
++ topaz_write_mtx_mem(
++ dev_priv,
++ topaz_priv->topaz_ccb_buffer_addr
++ + topaz_priv->topaz_cmd_windex * 4,
++ *cmd_pointer++);
++ topaz_priv->topaz_cmd_windex++;
++ --cmd_size;
++ }
++#endif
++ TOPAZ_END_CCB(dev_priv, 1);
++
++#if 0
++ DRM_UDELAY(1000);
++ pnw_topaz_clearirq(dev,
++ pnw_topaz_queryirq(dev));
++ PNW_TRACEL("TOPAZ: after clear, query again\n");
++ pnw_topaz_queryirq(dev_priv);
++#endif
++
++ return ret;
++}
++#endif
++
++int pnw_topaz_dequeue_send(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct pnw_topaz_cmd_queue *topaz_cmd = NULL;
++ int ret;
++ struct pnw_topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ PSB_DEBUG_GENERAL("TOPAZ: dequeue command and send it to topaz\n");
++
++ if (list_empty(&topaz_priv->topaz_queue)) {
++ topaz_priv->topaz_busy = 0;
++ return 0;
++ }
++
++ topaz_cmd = list_first_entry(&topaz_priv->topaz_queue,
++ struct pnw_topaz_cmd_queue, head);
++
++ PSB_DEBUG_GENERAL("TOPAZ: queue has id %08x\n", topaz_cmd->sequence);
++ ret = pnw_topaz_send(dev, topaz_cmd->cmd, topaz_cmd->cmd_size,
++ topaz_cmd->sequence);
++ if (ret) {
++ DRM_ERROR("TOPAZ: pnw_topaz_send failed.\n");
++ ret = -EINVAL;
++ }
++
++ list_del(&topaz_cmd->head);
++ kfree(topaz_cmd->cmd);
++ kfree(topaz_cmd
++ );
++
++ return ret;
++}
++
++void topaz_mtx_kick(struct drm_psb_private *dev_priv, uint32_t core_id, uint32_t kick_count)
++{
++ PSB_DEBUG_GENERAL("TOPAZ: kick core(%d) mtx count(%d).\n", core_id, kick_count);
++ topaz_set_mtx_target(dev_priv,core_id, 0);
++ MTX_WRITE32(MTX_CR_MTX_KICK, kick_count, core_id);
++ return;
++}
++
++int pnw_check_topaz_idle(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ struct pnw_topaz_private *topaz_priv = dev_priv->topaz_private;
++ struct ttm_fence_device *fdev = &dev_priv->fdev;
++ struct ttm_fence_class_manager *fc =
++ &fdev->fence_class[LNC_ENGINE_ENCODE];
++
++ if (topaz_priv->topaz_fw_loaded == 0)
++ return 0;
++
++ if (topaz_priv->topaz_busy)
++ return -EBUSY;
++
++ if (topaz_priv->topaz_hw_busy) {
++ PSB_DEBUG_PM("TOPAZ: %s, HW is busy\n", __func__);
++ return -EBUSY;
++ }
++
++ if (!list_empty(&fc->ring)) {
++ PSB_DEBUG_PM("TOPAZ: %s, fence is unhandled\n", __func__);
++ return -EBUSY;
++ }
++
++ return 0; /* we think it is idle */
++}
++
++
++int pnw_video_get_core_num(struct drm_device *dev, uint64_t user_pointer)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ struct pnw_topaz_private *topaz_priv = dev_priv->topaz_private;
++ int ret;
++
++ ret = copy_to_user((void __user *) ((unsigned long)user_pointer),
++ &topaz_priv->topaz_num_cores, sizeof(topaz_priv->topaz_num_cores));
++
++ if (ret)
++ return -EFAULT;
++
++ return 0;
++
++
++}
++
++int pnw_video_frameskip(struct drm_device *dev, uint64_t user_pointer)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ struct pnw_topaz_private *topaz_priv = dev_priv->topaz_private;
++ int ret;
++
++ ret = copy_to_user((void __user *) ((unsigned long)user_pointer),
++ &topaz_priv->frame_skip, sizeof(topaz_priv->frame_skip));
++
++ if (ret)
++ return -EFAULT;
++
++ return 0;
++}
++
++static void pnw_topaz_flush_cmd_queue(struct pnw_topaz_private *topaz_priv)
++{
++ struct pnw_topaz_cmd_queue *entry, *next;
++
++ /* remind to reset topaz */
++ topaz_priv->topaz_needs_reset = 1;
++
++ if (list_empty(&topaz_priv->topaz_queue)) {
++ topaz_priv->topaz_busy = 0;
++ return;
++ }
++
++ /* flush all command in queue */
++ list_for_each_entry_safe(entry, next,
++ &topaz_priv->topaz_queue,
++ head) {
++ list_del(&entry->head);
++ kfree(entry->cmd);
++ kfree(entry);
++ }
++
++ return;
++}
++
++void pnw_topaz_handle_timeout(struct ttm_fence_device *fdev)
++{
++ struct drm_psb_private *dev_priv =
++ container_of(fdev, struct drm_psb_private, fdev);
++ struct drm_device *dev =
++ container_of((void *)dev_priv, struct drm_device, dev_private);
++ struct pnw_topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ if (IS_MRST(dev))
++ return lnc_topaz_handle_timeout(fdev);
++ pnw_topaz_flush_cmd_queue(topaz_priv);
++}
++/*
++inline int psb_try_power_down_topaz(struct drm_device *dev)
++{
++ ospm_apm_power_down_topaz(dev);
++ return 0;
++}*/
++
++void pnw_map_topaz_reg(struct drm_device *dev)
++{
++ unsigned long resource_start;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++
++ resource_start = pci_resource_start(dev->pdev, PSB_MMIO_RESOURCE);
++
++ if (IS_MRST(dev) && !dev_priv->topaz_disabled) {
++ dev_priv->topaz_reg =
++ ioremap(resource_start + LNC_TOPAZ_OFFSET,
++ LNC_TOPAZ_SIZE);
++ if (!dev_priv->topaz_reg)
++ DRM_ERROR("failed to map TOPAZ register address\n");
++ }
++
++ return;
++}
++
++void pnw_unmap_topaz_reg(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++
++ if (IS_MRST(dev)) {
++ if (dev_priv->topaz_reg) {
++ iounmap(dev_priv->topaz_reg);
++ dev_priv->topaz_reg = NULL;
++ }
++ }
++
++ return;
++}
++
++void pnw_topaz_enableirq(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ /* uint32_t ier = dev_priv->vdc_irq_mask | _PNW_IRQ_TOPAZ_FLAG; */
++
++ PSB_DEBUG_IRQ("TOPAZ: enable IRQ\n");
++
++ /* Only enable the master core IRQ*/
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTENAB,
++ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_MAS_INTEN) |
++ /* F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTEN_MVEA) | */
++ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTEN_MMU_FAULT) |
++ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTEN_MTX) |
++ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTEN_MTX_HALT),
++ 0);
++
++ /* write in sysirq.c */
++ /* PSB_WVDC32(ier, PSB_INT_ENABLE_R); /\* essential *\/ */
++}
++
++void pnw_topaz_disableirq(struct drm_device *dev)
++{
++
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ /* uint32_t ier = dev_priv->vdc_irq_mask & (~_PNW_IRQ_TOPAZ_FLAG); */
++
++ PSB_DEBUG_INIT("TOPAZ: disable IRQ\n");
++
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTENAB, 0, 0);
++
++ /* write in sysirq.c */
++ /* PSB_WVDC32(ier, PSB_INT_ENABLE_R); /\* essential *\/ */
++}
++
++
++
++
+--- /dev/null
++++ b/drivers/staging/mrst/drv/pnw_topaz.h
+@@ -0,0 +1,150 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
++ * Copyright (c) Imagination Technologies Limited, UK
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++#ifndef _PNW_TOPAZ_H_
++#define _PNW_TOPAZ_H_
++
++#include "psb_drv.h"
++#include "img_types.h"
++
++#define PNW_TOPAZ_NO_IRQ 0
++#define TOPAZ_MTX_REG_SIZE (34 * 4 + 183 * 4)
++#define MAX_TOPAZ_CORES 2
++
++/*Must be equal to IMG_CODEC_NUM*/
++#define PNW_TOPAZ_CODEC_NUM_MAX (10)
++//#define TOPAZ_PDUMP
++
++extern int drm_topaz_pmpolicy;
++
++/* XXX: it's a copy of msvdx cmd queue. should have some change? */
++struct pnw_topaz_cmd_queue {
++ struct list_head head;
++ void *cmd;
++ unsigned long cmd_size;
++ uint32_t sequence;
++};
++
++/* define structure */
++/* firmware file's info head */
++struct topazsc_fwinfo {
++ unsigned int ver:16;
++ unsigned int codec:16;
++
++ unsigned int text_size;
++ unsigned int data_size;
++ unsigned int data_location;
++};
++
++/* firmware data array define */
++struct pnw_topaz_codec_fw {
++ uint32_t ver;
++ uint32_t codec;
++
++ uint32_t text_size;
++ uint32_t data_size;
++ uint32_t data_location;
++
++ struct ttm_buffer_object *text;
++ struct ttm_buffer_object *data;
++};
++
++struct pnw_topaz_private {
++ /* current video task */
++ unsigned int pmstate;
++ struct sysfs_dirent *sysfs_pmstate;
++ int frame_skip;
++
++ void *topaz_mtx_reg_state[MAX_TOPAZ_CORES] ;
++ struct ttm_buffer_object *topaz_mtx_data_mem[MAX_TOPAZ_CORES];
++ uint32_t topaz_cur_codec;
++ uint32_t cur_mtx_data_size[MAX_TOPAZ_CORES];
++ int topaz_needs_reset;
++
++ /*
++ *topaz command queue
++ */
++ spinlock_t topaz_lock;
++ struct mutex topaz_mutex;
++ struct list_head topaz_queue;
++ int topaz_busy; /* 0 means topaz is free */
++ int topaz_fw_loaded;
++
++ uint32_t stored_initial_qp;
++ uint32_t topaz_dash_access_ctrl;
++
++ struct ttm_buffer_object *topaz_bo; /* 4K->2K/2K for writeback/sync */
++ struct ttm_bo_kmap_obj topaz_bo_kmap;
++ uint32_t *topaz_mtx_wb;
++ uint32_t topaz_wb_offset;
++ uint32_t *topaz_sync_addr;
++ uint32_t topaz_sync_offset;
++ uint32_t topaz_cmd_count;
++ uint32_t topaz_mtx_saved;
++
++
++ /* firmware */
++ struct pnw_topaz_codec_fw topaz_fw[PNW_TOPAZ_CODEC_NUM_MAX * 2];
++
++ uint32_t topaz_hw_busy;
++
++ uint32_t topaz_num_cores;
++ uint32_t aui32LastSync[2][MAX_TOPAZ_CORES + 1]; //!< Last sync value sent to each core
++
++ /*Before load firmware, need to set up jitter according to resolution*/
++ /*The data of MTX_CMDID_SW_NEW_CODEC command contains width and length.*/
++ uint16_t frame_w;
++ uint16_t frame_h;
++};
++
++/* external function declare */
++/*ISR of TopazSC*/
++extern IMG_BOOL pnw_topaz_interrupt(IMG_VOID *pvData);
++
++/*topaz commad handling function*/
++extern int pnw_cmdbuf_video(struct drm_file *priv,
++ struct list_head *validate_list,
++ uint32_t fence_type,
++ struct drm_psb_cmdbuf_arg *arg,
++ struct ttm_buffer_object *cmd_buffer,
++ struct psb_ttm_fence_rep *fence_arg);
++extern int pnw_wait_topaz_idle(struct drm_device *dev);
++extern int pnw_check_topaz_idle(struct drm_device *dev);
++extern void pnw_unmap_topaz_reg(struct drm_device *dev);
++extern void pnw_map_topaz_reg(struct drm_device *dev);
++extern int pnw_topaz_restore_mtx_state(struct drm_device *dev);
++extern void pnw_topaz_enableirq(struct drm_device *dev);
++extern void pnw_topaz_disableirq(struct drm_device *dev);
++
++extern int pnw_topaz_init(struct drm_device *dev);
++extern int pnw_topaz_uninit(struct drm_device *dev);
++extern void pnw_topaz_handle_timeout(struct ttm_fence_device *fdev);
++
++extern int pnw_topaz_save_mtx_state(struct drm_device *dev);
++
++#define PNW_TOPAZ_NEW_PMSTATE(drm_dev, topaz_priv, new_state) \
++do { \
++ topaz_priv->pmstate = new_state; \
++ sysfs_notify_dirent(topaz_priv->sysfs_pmstate); \
++ PSB_DEBUG_PM("TOPAZ: %s\n", \
++ (new_state == PSB_PMSTATE_POWERUP) ? "powerup": "powerdown"); \
++} while (0)
++
++#endif /* _PNW_TOPAZ_H_ */
+--- /dev/null
++++ b/drivers/staging/mrst/drv/pnw_topaz_hw_reg.h
+@@ -0,0 +1,1133 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
++ * Copyright (c) Imagination Technologies Limited, UK
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++#ifndef _PNW_TOPAZ_HW_REG_H_
++#define _PNW_TOPAZ_HW_REG_H_
++
++#ifdef _LNC_TOPAZ_HW_REG_H_
++#error "lnc_topaz_hw_reg.h shouldn't be included"
++#endif
++
++#include "psb_drv.h"
++#include "img_types.h"
++#include "pnw_topaz.h"
++
++/*
++ * MACROS to insert values into fields within a word. The basename of the
++ * field must have MASK_BASENAME and SHIFT_BASENAME constants.
++ */
++#define MM_WRITE32(base, offset, value) \
++do { \
++ *((unsigned long *)((unsigned char *)(dev_priv->topaz_reg) \
++ + base + offset)) = value; \
++} while (0)
++
++#define MM_READ32(base, offset, pointer) \
++do { \
++ *(pointer) = *((unsigned long *)((unsigned char *)(dev_priv->topaz_reg)\
++ + base + offset)); \
++} while (0)
++
++#define F_MASK(basename) (MASK_##basename)
++#define F_SHIFT(basename) (SHIFT_##basename)
++
++#define F_ENCODE(val, basename) \
++ (((val) << (F_SHIFT(basename))) & (F_MASK(basename)))
++
++
++#define F_EXTRACT(val,basename) (((val)&(F_MASK(basename)))>>(F_SHIFT(basename)))
++
++/*! The number of TOPAZ cores present in the system */
++#define TOPAZSC_NUM_CORES 2
++
++#define TOPAZSC_REG_OFF_MAX (TOPAZSC_NUM_CORES * 0x10000 + 0x10000)
++#define REG_BASE_MTX 0x04800000
++#define REG_BASE_HOST 0x00000000
++
++#define MTX_CORE_CODE_MEM (0x10)
++#define MTX_CORE_DATA_MEM (0x18)
++
++/* Multicore Regs */
++#define REG_OFFSET_TOPAZ_MULTICORE 0x00000000
++#define REG_OFFSET_TOPAZ_DMAC 0x00001000
++
++#define REG_SIZE_TOPAZ_MULTICORE 0x00001000
++#define REG_SIZE_TOPAZ_DMAC 0x00001000
++
++/* Topaz core registers - Host view */
++#define REG_OFFSET_TOPAZ_CORE_HOST 0x00010000
++#define REG_SIZE_TOPAZ_CORE_HOST 0x00010000
++
++#define REG_OFFSET_TOPAZ_MTX_HOST 0x00000000
++#define REG_OFFSET_TOPAZ_TOPAZ_HOST 0x00002000
++#define REG_OFFSET_TOPAZ_MVEA_HOST 0x00003000
++#define REG_OFFSET_TOPAZ_MVEACMD_HOST 0x00004000
++#define REG_OFFSET_TOPAZ_VLC_HOST 0x00005000
++#define REG_OFFSET_TOPAZ_DEBLOCKER_HOST 0x00006000
++#define REG_OFFSET_TOPAZ_COMMS_HOST 0x00007000
++#define REG_OFFSET_TOPAZ_ESB_HOST 0x00008000
++
++#define REG_SIZE_TOPAZ_MTX_HOST 0x00002000
++#define REG_SIZE_TOPAZ_TOPAZ_HOST 0x00001000
++#define REG_SIZE_TOPAZ_MVEA_HOST 0x00001000
++#define REG_SIZE_TOPAZ_MVEACMD_HOST 0x00001000
++#define REG_SIZE_TOPAZ_VLC_HOST 0x00001000
++#define REG_SIZE_TOPAZ_DEBLOCKER_HOST 0x00001000
++#define REG_SIZE_TOPAZ_COMMS_HOST 0x00001000
++#define REG_SIZE_TOPAZ_ESB_HOST 0x00004000
++
++
++/* Topaz core registers MTX view */
++#define REG_OFFSET_TOPAZ_CORE_MTX 0x00010000 // MUST confirm
++#define REG_SIZE_TOPAZ_CORE_MTX 0x00010000 // MUST confirm
++
++#define REG_OFFSET_TOPAZ_MTX_MTX 0x00000000
++#define REG_OFFSET_TOPAZ_TOPAZ_MTX 0x00000800
++#define REG_OFFSET_TOPAZ_MVEA_MTX 0x00000C00
++#define REG_OFFSET_TOPAZ_MVEACMD_MTX 0x00001000
++#define REG_OFFSET_TOPAZ_VLC_MTX 0x00001400
++#define REG_OFFSET_TOPAZ_DEBLOCKER_MTX 0x00001800
++#define REG_OFFSET_TOPAZ_COMMS_MTX 0x00001C00
++#define REG_OFFSET_TOPAZ_ESB_MTX 0x00002000
++
++#define REG_SIZE_TOPAZ_MTX_MTX 0x00000800
++#define REG_SIZE_TOPAZ_TOPAZ_MTX 0x00000400
++#define REG_SIZE_TOPAZ_MVEA_MTX 0x00000400
++#define REG_SIZE_TOPAZ_MVEACMD_MTX 0x00000400
++#define REG_SIZE_TOPAZ_VLC_MTX 0x00000400
++#define REG_SIZE_TOPAZ_DEBLOCKER_MTX 0x00000400
++#define REG_SIZE_TOPAZ_COMMS_MTX 0x00000400
++#define REG_SIZE_TOPAZ_ESB_MTX 0x00002000
++
++
++/* Register bank addresses - Host View */
++#define REG_START_TOPAZ_MULTICORE_HOST (REG_BASE_HOST + REG_OFFSET_TOPAZ_MULTICORE)
++#define REG_END_TOPAZ_MULTICORE_HOST (REG_START_TOPAZ_MULTICORE_HOST + REG_SIZE_TOPAZ_MULTICORE)
++
++#define REG_START_TOPAZ_DMAC_HOST (REG_BASE_HOST + REG_OFFSET_TOPAZ_DMAC)
++#define REG_END_TOPAZ_DMAC_HOST (REG_START_TOPAZ_DMAC_HOST + REG_SIZE_TOPAZ_DMAC)
++
++#define REG_START_TOPAZ_MTX_HOST(core) (REG_BASE_HOST + (REG_SIZE_TOPAZ_CORE_HOST*core) + REG_OFFSET_TOPAZ_CORE_HOST + REG_OFFSET_TOPAZ_MTX_HOST)
++#define REG_END_TOPAZ_MTX_HOST(core) (REG_START_TOPAZ_MTX_HOST(core) + REG_SIZE_TOPAZ_MTX_HOST)
++
++#define REG_START_TOPAZ_TOPAZ_HOST(core) (REG_BASE_HOST + (REG_SIZE_TOPAZ_CORE_HOST*core) + REG_OFFSET_TOPAZ_CORE_HOST + REG_OFFSET_TOPAZ_TOPAZ_HOST)
++#define REG_END_TOPAZ_TOPAZ_HOST(core) (REG_START_TOPAZ_TOPAZ_HOST(core) + REG_SIZE_TOPAZ_TOPAZ_HOST)
++
++#define REG_START_TOPAZ_MVEA_HOST(core) (REG_BASE_HOST + (REG_SIZE_TOPAZ_CORE_HOST*core) + REG_OFFSET_TOPAZ_CORE_HOST + REG_OFFSET_TOPAZ_MVEA_HOST)
++#define REG_END_TOPAZ_MVEA_HOST(core) (REG_START_TOPAZ_MVEA_HOST(core) + REG_SIZE_TOPAZ_MVEA_HOST)
++
++
++/* Register bank addresses - MTX view */
++#define REG_START_TOPAZ_MULTICORE_MTX (REG_BASE_MTX + REG_OFFSET_TOPAZ_MULTICORE)
++#define REG_END_TOPAZ_MULTICORE_MTX (REG_START_TOPAZ_MULTICORE_MTX + REG_SIZE_TOPAZ_MULTICORE)
++
++#define REG_START_TOPAZ_DMAC_MTX (REG_BASE_MTX + REG_OFFSET_TOPAZ_DMAC)
++#define REG_END_TOPAZ_DMAC_MTX (REG_START_TOPAZ_DMAC_MTX + REG_SIZE_TOPAZ_DMAC)
++
++#define REG_START_TOPAZ_MTX_MTX(core) (REG_BASE_MTX + (REG_SIZE_TOPAZ_CORE_MTX*core) + REG_OFFSET_TOPAZ_CORE_MTX + REG_OFFSET_TOPAZ_MTX_MTX)
++#define REG_END_TOPAZ_MTX_MTX(core) (REG_START_TOPAZ_MTX_MTX(core) + REG_SIZE_TOPAZ_MTX_MTX)
++
++#define REG_START_TOPAZ_TOPAZ_MTX(core) (REG_BASE_MTX + (REG_SIZE_TOPAZ_CORE_MTX*core) + REG_OFFSET_TOPAZ_CORE_MTX + REG_OFFSET_TOPAZ_TOPAZ_MTX)
++#define REG_END_TOPAZ_TOPAZ_MTX(core) (REG_START_TOPAZ_TOPAZ_MTX(core) + REG_SIZE_TOPAZ_TOPAZ_MTX)
++
++#define REG_START_TOPAZ_MVEA_MTX(core) (REG_BASE_MTX + (REG_SIZE_TOPAZ_CORE_MTX*core) + REG_OFFSET_TOPAZ_CORE_MTX + REG_OFFSET_TOPAZ_MVEA_MTX)
++#define REG_END_TOPAZ_MVEA_MTX(core) (REG_START_TOPAZ_MVEA_MTX(core) + REG_SIZE_TOPAZ_MVEA_MTX)
++
++
++/* Every Topaz core has a 64K address space*/
++#define TOPAZ_CORE_REG_BASE(core) (REG_BASE_HOST + (REG_SIZE_TOPAZ_CORE_HOST*core) + REG_OFFSET_TOPAZ_CORE_HOST)
++
++/* MVEA macro */
++#define MVEA_START 0x03000
++
++#ifdef TOPAZ_PDUMP
++#define MVEA_WRITE32(offset, value, core) \
++ do { \
++ MM_WRITE32(MVEA_START + TOPAZ_CORE_REG_BASE(core), offset, value); \
++ DRM_ERROR("TOPAZ_PDUMP: MVEA core %d, REG_WT %x %x\n", core, offset, value); \
++ } while (0)
++#define MVEA_READ32(offset, pointer, core) \
++ do { \
++ MM_READ32(MVEA_START + TOPAZ_CORE_REG_BASE(core), offset, pointer); \
++ DRM_ERROR("TOPAZ_PDUMP: MVEA core %d, REG_RD %x %x\n", core, offset, *(uint32_t *)pointer);\
++ } while (0)
++#else
++#define MVEA_WRITE32(offset, value, core) \
++ MM_WRITE32(MVEA_START + TOPAZ_CORE_REG_BASE(core), offset, value)
++
++#define MVEA_READ32(offset, pointer, core) \
++ MM_READ32(MVEA_START + TOPAZ_CORE_REG_BASE(core), offset, pointer)
++#endif
++
++#define F_MASK_MVEA(basename) (MASK_MVEA_##basename) /* MVEA */
++#define F_SHIFT_MVEA(basename) (SHIFT_MVEA_##basename) /* MVEA */
++#define F_ENCODE_MVEA(val, basename) \
++ (((val)<<(F_SHIFT_MVEA(basename)))&(F_MASK_MVEA(basename)))
++
++/* VLC macro */
++#define TOPAZ_VLC_START 0x05000
++
++/* TOPAZ macro */
++#define TOPAZ_START 0x02000
++
++#ifdef TOPAZ_PDUMP
++#define TOPAZ_WRITE32(offset, value, core) \
++ do {\
++ MM_WRITE32(TOPAZ_START + TOPAZ_CORE_REG_BASE(core), offset, value); \
++ DRM_ERROR("TOPAZ_PDUMP: TOPAZ_CORE %d REG_WT: %x %x\n", core, \
++ offset, value);\
++ } while (0)
++#define TOPAZ_READ32(offset, pointer, core) \
++ do { \
++ MM_READ32(TOPAZ_START + TOPAZ_CORE_REG_BASE(core), offset, pointer); \
++ DRM_ERROR("TOPAZ_PDUMP: TOPAZ_CORE %d REG_RD: %x %x\n", core, \
++ offset, *(uint32_t *)pointer);\
++ } while (0)
++#else
++#define TOPAZ_WRITE32(offset, value, core) \
++ MM_WRITE32(TOPAZ_START + TOPAZ_CORE_REG_BASE(core), offset, value)
++#define TOPAZ_READ32(offset, pointer, core) \
++ MM_READ32(TOPAZ_START + TOPAZ_CORE_REG_BASE(core), offset, pointer)
++#endif
++#define F_MASK_TOPAZ(basename) (MASK_TOPAZ_##basename)
++#define F_SHIFT_TOPAZ(basename) (SHIFT_TOPAZ_##basename)
++#define F_ENCODE_TOPAZ(val, basename) \
++ (((val)<<(F_SHIFT_TOPAZ(basename)))&(F_MASK_TOPAZ(basename)))
++
++/* MTX macro */
++#define MTX_START 0x0
++
++#ifdef TOPAZ_PDUMP
++#define MTX_WRITE32(offset, value, core) \
++ do { \
++ MM_WRITE32(MTX_START + TOPAZ_CORE_REG_BASE(core), offset, value); \
++ DRM_ERROR("TOPAZ_PDUMP: MTX core %d REG_WT: %x %x\n", core,\
++ offset, value);\
++ } while (0)
++
++
++#define MTX_READ32(offset, pointer, core) \
++ do { \
++ MM_READ32(MTX_START + TOPAZ_CORE_REG_BASE(core), offset, pointer); \
++ DRM_ERROR("TOPAZ_PDUMP: MTX core %d REG_RD %x %x\n", core, \
++ offset, *(uint32_t *)pointer); \
++ } while (0);
++#else
++
++#define MTX_WRITE32(offset, value, core) \
++ MM_WRITE32(MTX_START + TOPAZ_CORE_REG_BASE(core), offset, value)
++#define MTX_READ32(offset, pointer, core) \
++ MM_READ32(MTX_START + TOPAZ_CORE_REG_BASE(core), offset, pointer)
++#endif
++
++/* DMAC macro */
++#define DMAC_START 0x01000
++
++#ifdef TOPAZ_DUMP
++#define DMAC_WRITE32(offset, value) \
++ do { \
++ MM_WRITE32(DMAC_START, offset, value);\
++ DRM_ERROR("TOPAZ_PDUMP: DMAC WT %x %x\n", offset, value);\
++ } while (0);
++
++#define DMAC_READ32(offset, pointer) \
++ do {\
++ MM_READ32(DMAC_START, offset, pointer);\
++ DRM_ERROR("TOPAZ_PDUMP: DMAC RD %x %x\n", offset, *(uint32_t *)pointer); \
++ } while (0)
++#else
++
++#define DMAC_WRITE32(offset, value) \
++ MM_WRITE32(DMAC_START, offset, value)
++
++#define DMAC_READ32(offset, pointer) \
++ MM_READ32(DMAC_START, offset, pointer)
++#endif
++#define F_MASK_DMAC(basename) (MASK_DMAC_##basename)
++#define F_SHIFT_DMAC(basename) (SHIFT_DMAC_##basename)
++#define F_ENCODE_DMAC(val, basename) \
++ (((val)<<(F_SHIFT_DMAC(basename)))&(F_MASK_DMAC(basename)))
++
++/* Register CR_IMG_TOPAZ_INTENAB */
++#define TOPAZ_CR_IMG_TOPAZ_INTENAB 0x0008
++#define MASK_TOPAZ_CR_IMG_TOPAZ_INTEN_MVEA 0x00000001
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTEN_MVEA 0
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTEN_MVEA 0x0008
++
++#define MASK_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX 0x00000002
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX 1
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX 0x0008
++
++#define MASK_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX_HALT 0x00000004
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX_HALT 2
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX_HALT 0x0008
++
++/*(Bit 3 enables fault interrupts caused by the topaz_cores. Bit 4 enables
++ * fault interrupts caused by the DMAC)*/
++#define MASK_TOPAZ_CR_IMG_TOPAZ_INTEN_MMU_FAULT 0x00000018
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTEN_MMU_FAULT 3
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTEN_MMU_FAULT 0x0008
++
++#define MASK_TOPAZ_CR_IMG_TOPAZ_MAS_INTEN 0x80000000
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_MAS_INTEN 31
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_MAS_INTEN 0x0008
++
++#define TOPAZ_CR_IMG_TOPAZ_INTCLEAR 0x000C
++#define MASK_TOPAZ_CR_IMG_TOPAZ_INTCLR_MVEA 0x00000001
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTCLR_MVEA 0
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTCLR_MVEA 0x000C
++
++#define TOPAZ_CR_IMG_TOPAZ_INTSTAT 0x0004
++#define MASK_TOPAZ_CR_IMG_TOPAZ_INTS_MVEA 0x00000001
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTS_MVEA 0
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTS_MVEA 0x0004
++
++#define MTX_CCBCTRL_ROFF 0
++#define MTX_CCBCTRL_COMPLETE 4
++#define MTX_CCBCTRL_CCBSIZE 8
++#define MTX_CCBCTRL_QP 12
++#define MTX_CCBCTRL_FRAMESKIP 20
++#define MTX_CCBCTRL_INITQP 24
++
++#define TOPAZ_CR_MMU_STATUS 0x001C
++#define MASK_TOPAZ_CR_MMU_PF_N_RW 0x00000001
++#define SHIFT_TOPAZ_CR_MMU_PF_N_RW 0
++#define REGNUM_TOPAZ_CR_MMU_PF_N_RW 0x001C
++
++#define MASK_TOPAZ_CR_IMG_TOPAZ_INTCLR_MMU_FAULT 0x00000008
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTCLR_MMU_FAULT 3
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTCLR_MMU_FAULT 0x000C
++
++#define TOPAZ_CR_MMU_MEM_REQ 0x0020
++#define MASK_TOPAZ_CR_MEM_REQ_STAT_READS 0x000000FF
++#define SHIFT_TOPAZ_CR_MEM_REQ_STAT_READS 0
++#define REGNUM_TOPAZ_CR_MEM_REQ_STAT_READS 0x0020
++
++#define MASK_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX 0x00000002
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX 1
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX 0x000C
++
++#define MASK_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX_HALT 0x00000004
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX_HALT 2
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX_HALT 0x000C
++
++/* Register CR_TOPAZ_CMD_FIFO_2 */
++#define TOPAZ_CR_TOPAZ_CMD_FIFO_2 0x005C
++#define MASK_TOPAZ_CR_CMD_FIFO_FLUSH 0x00000001
++#define SHIFT_TOPAZ_CR_CMD_FIFO_FLUSH 0
++#define REGNUM_TOPAZ_CR_CMD_FIFO_FLUSH 0x005C
++
++#define MTX_CR_MTX_KICK 0x0080
++#define MASK_MTX_MTX_KICK 0x0000FFFF
++#define SHIFT_MTX_MTX_KICK 0
++#define REGNUM_MTX_MTX_KICK 0x0080
++
++#define MTX_DATA_MEM_BASE 0x82880000
++
++#define MTX_CR_MTX_RAM_ACCESS_CONTROL 0x0108
++#define MASK_MTX_MTX_MCMR 0x00000001
++#define SHIFT_MTX_MTX_MCMR 0
++#define REGNUM_MTX_MTX_MCMR 0x0108
++
++#define MASK_MTX_MTX_MCMID 0x0FF00000
++#define SHIFT_MTX_MTX_MCMID 20
++#define REGNUM_MTX_MTX_MCMID 0x0108
++
++#define MASK_MTX_MTX_MCM_ADDR 0x000FFFFC
++#define SHIFT_MTX_MTX_MCM_ADDR 2
++#define REGNUM_MTX_MTX_MCM_ADDR 0x0108
++
++#define MTX_CR_MTX_RAM_ACCESS_STATUS 0x010C
++#define MASK_MTX_MTX_MTX_MCM_STAT 0x00000001
++#define SHIFT_MTX_MTX_MTX_MCM_STAT 0
++#define REGNUM_MTX_MTX_MTX_MCM_STAT 0x010C
++
++#define MASK_MTX_MTX_MCMAI 0x00000002
++#define SHIFT_MTX_MTX_MCMAI 1
++#define REGNUM_MTX_MTX_MCMAI 0x0108
++
++#define MVEA_CR_MVEA_BUSY 0x0018
++#define MVEA_CR_MVEA_DMACMDFIFO_WAIT 0x001C
++#define MVEA_CR_MVEA_DMACMDFIFO_STATUS 0x0020
++
++#define MVEA_CR_IMG_MVEA_SRST 0x0000
++#define MASK_MVEA_CR_IMG_MVEA_SPE_SOFT_RESET 0x00000001
++#define SHIFT_MVEA_CR_IMG_MVEA_SPE_SOFT_RESET 0
++#define REGNUM_MVEA_CR_IMG_MVEA_SPE_SOFT_RESET 0x0000
++
++#define MASK_MVEA_CR_IMG_MVEA_IPE_SOFT_RESET 0x00000002
++#define SHIFT_MVEA_CR_IMG_MVEA_IPE_SOFT_RESET 1
++#define REGNUM_MVEA_CR_IMG_MVEA_IPE_SOFT_RESET 0x0000
++
++#define MASK_MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET 0x00000004
++#define SHIFT_MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET 2
++#define REGNUM_MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET 0x0000
++
++#define MASK_MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET 0x00000008
++#define SHIFT_MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET 3
++#define REGNUM_MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET 0x0000
++
++#define MASK_MVEA_CR_IMG_MVEA_CMC_SOFT_RESET 0x00000010
++#define SHIFT_MVEA_CR_IMG_MVEA_CMC_SOFT_RESET 4
++#define REGNUM_MVEA_CR_IMG_MVEA_CMC_SOFT_RESET 0x0000
++
++#define MASK_MVEA_CR_IMG_MVEA_DCF_SOFT_RESET 0x00000020
++#define SHIFT_MVEA_CR_IMG_MVEA_DCF_SOFT_RESET 5
++#define REGNUM_MVEA_CR_IMG_MVEA_DCF_SOFT_RESET 0x0000
++
++#define TOPAZ_CR_IMG_TOPAZ_CORE_ID 0x03C0
++#define TOPAZ_CR_IMG_TOPAZ_CORE_REV 0x03D0
++
++#define TOPAZ_MTX_PC (0x00000005)
++
++#define TOPAZ_CR_TOPAZ_AUTO_CLK_GATE 0x0014
++#define MASK_TOPAZ_CR_TOPAZ_VLC_AUTO_CLK_GATE 0x00000001
++#define SHIFT_TOPAZ_CR_TOPAZ_VLC_AUTO_CLK_GATE 0
++#define REGNUM_TOPAZ_CR_TOPAZ_VLC_AUTO_CLK_GATE 0x0014
++
++#define MASK_TOPAZ_CR_TOPAZ_DB_AUTO_CLK_GATE 0x00000002
++#define SHIFT_TOPAZ_CR_TOPAZ_DB_AUTO_CLK_GATE 1
++#define REGNUM_TOPAZ_CR_TOPAZ_DB_AUTO_CLK_GATE 0x0014
++
++#define MTX_CORE_CR_MTX_REGISTER_READ_WRITE_DATA_OFFSET 0x000000F8
++#define MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET 0x000000FC
++#define MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_MASK 0x00010000
++#define MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK 0x80000000
++
++#define TOPAZ_CORE_CR_MTX_DEBUG_OFFSET 0x00000044
++
++#define MASK_TOPAZ_CR_MTX_RAM_BANKS 0x00000F00
++#define SHIFT_TOPAZ_CR_MTX_RAM_BANKS 8
++#define REGNUM_TOPAZ_CR_MTX_RAM_BANKS 0x0044
++
++#define MASK_TOPAZ_CR_MTX_RAM_BANK_SIZE 0x000F0000
++#define SHIFT_TOPAZ_CR_MTX_RAM_BANK_SIZE 16
++#define REGNUM_TOPAZ_CR_MTX_RAM_BANK_SIZE 0x0044
++
++#define MASK_TOPAZ_CR_MTX_LAST_RAM_BANK_SIZE 0x0F000000
++#define SHIFT_TOPAZ_CR_MTX_LAST_RAM_BANK_SIZE 24
++#define REGNUM_TOPAZ_CR_MTX_LAST_RAM_BANK_SIZE 0x0044
++
++#define MASK_TOPAZ_CR_MTX_DBG_IS_SLAVE 0x00000004
++#define SHIFT_TOPAZ_CR_MTX_DBG_IS_SLAVE 2
++#define REGNUM_TOPAZ_CR_MTX_DBG_IS_SLAVE 0x003C
++
++#define MASK_TOPAZ_CR_MTX_DBG_GPIO_OUT 0x00000018
++#define SHIFT_TOPAZ_CR_MTX_DBG_GPIO_OUT 3
++#define REGNUM_TOPAZ_CR_MTX_DBG_GPIO_OUT 0x003C
++
++/* Register CR_MTX_RAM_ACCESS_DATA_EXCHANGE */
++#define MTX_CR_MTX_RAM_ACCESS_DATA_EXCHANGE 0x0100
++/* Register CR_MTX_RAM_ACCESS_DATA_TRANSFER */
++#define MTX_CR_MTX_RAM_ACCESS_DATA_TRANSFER 0x0104
++
++#define MTX_CORE_CR_MTX_RAM_ACCESS_CONTROL_OFFSET 0x00000108
++#define MASK_MTX_MTX_MCMR 0x00000001
++#define SHIFT_MTX_MTX_MCMR 0
++#define REGNUM_MTX_MTX_MCMR 0x0108
++
++#define MASK_MTX_MTX_MCMAI 0x00000002
++#define SHIFT_MTX_MTX_MCMAI 1
++#define REGNUM_MTX_MTX_MCMAI 0x0108
++
++#define MASK_MTX_MTX_MCM_ADDR 0x000FFFFC
++#define SHIFT_MTX_MTX_MCM_ADDR 2
++#define REGNUM_MTX_MTX_MCM_ADDR 0x0108
++
++#define MASK_MTX_MTX_MCMID 0x0FF00000
++#define SHIFT_MTX_MTX_MCMID 20
++#define REGNUM_MTX_MTX_MCMID 0x0108
++
++#define TOPAZ_CR_MMU_CONTROL0 0x0024
++#define MASK_TOPAZ_CR_MMU_BYPASS_DMAC 0x00020000
++#define SHIFT_TOPAZ_CR_MMU_BYPASS_DMAC 17
++#define REGNUM_TOPAZ_CR_MMU_BYPASS_DMAC 0x0024
++
++#define MASK_TOPAZ_CR_MMU_BYPASS 0x00010000
++#define SHIFT_TOPAZ_CR_MMU_BYPASS 16
++#define REGNUM_TOPAZ_CR_MMU_BYPASS 0x0024
++
++#define TOPAZ_CR_MMU_DIR_LIST_BASE(X) (0x0030 + (4 * (X)))
++#define MASK_TOPAZ_CR_MMU_DIR_LIST_BASE_ADDR 0xFFFFF000
++#define SHIFT_TOPAZ_CR_MMU_DIR_LIST_BASE_ADDR 12
++#define REGNUM_TOPAZ_CR_MMU_DIR_LIST_BASE_ADDR 0x0030
++
++#define MASK_TOPAZ_CR_MMU_INVALDC 0x00000008
++#define SHIFT_TOPAZ_CR_MMU_INVALDC 3
++#define REGNUM_TOPAZ_CR_MMU_INVALDC 0x0024
++
++#define MASK_TOPAZ_CR_MMU_FLUSH 0x00000004
++#define SHIFT_TOPAZ_CR_MMU_FLUSH 2
++#define REGNUM_TOPAZ_CR_MMU_FLUSH 0x0024
++
++/* Register CR_MMU_BANK_INDEX */
++#define TOPAZ_CR_MMU_BANK_INDEX 0x0040
++#define MASK_TOPAZ_CR_MMU_BANK_N_INDEX_M(i) (0x00000003 << (8 + ((i) * 2)))
++#define SHIFT_TOPAZ_CR_MMU_BANK_N_INDEX_M(i) (8 + ((i) * 2))
++#define REGNUM_TOPAZ_CR_MMU_BANK_N_INDEX_M(i) 0x0040
++
++#define MASK_TOPAZ_CR_MMU_BANK_SELECT(i) (0x00000001 << (0 + ((i) * 1)))
++#define SHIFT_TOPAZ_CR_MMU_BANK_SELECT(i) (0 + ((i) * 1))
++#define REGNUM_TOPAZ_CR_MMU_BANK_SELECT(i) 0x0040
++
++#define TOPAZ_CR_TOPAZ_MAN_CLK_GATE 0x0010
++#define MASK_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE 0x00000002
++#define SHIFT_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE 1
++#define REGNUM_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE 0x0010
++
++#define MTX_CORE_CR_MTX_TXRPT_OFFSET 0x0000000c
++#define TXRPT_WAITONKICK_VALUE 0x8ade0000
++
++#define MTX_CORE_CR_MTX_ENABLE_MTX_TOFF_MASK 0x00000002
++
++#define MTX_CORE_CR_MTX_ENABLE_OFFSET 0x00000000
++#define MTX_CORE_CR_MTX_ENABLE_MTX_ENABLE_MASK 0x00000001
++
++#define MASK_TOPAZ_CR_IMG_TOPAZ_INTS_MTX 0x00000002
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTS_MTX 1
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTS_MTX 0x0004
++
++#define MTX_CORE_CR_MTX_SOFT_RESET_OFFSET 0x00000200
++#define MTX_CORE_CR_MTX_SOFT_RESET_MTX_RESET_MASK 0x00000001
++
++#define MTX_CR_MTX_SYSC_CDMAA 0x0344
++#define MASK_MTX_CDMAA_ADDRESS 0x03FFFFFC
++#define SHIFT_MTX_CDMAA_ADDRESS 2
++#define REGNUM_MTX_CDMAA_ADDRESS 0x0344
++
++#define MTX_CR_MTX_SYSC_CDMAC 0x0340
++#define MASK_MTX_LENGTH 0x0000FFFF
++#define SHIFT_MTX_LENGTH 0
++#define REGNUM_MTX_LENGTH 0x0340
++
++#define MASK_MTX_BURSTSIZE 0x07000000
++#define SHIFT_MTX_BURSTSIZE 24
++#define REGNUM_MTX_BURSTSIZE 0x0340
++
++#define MASK_MTX_RNW 0x00020000
++#define SHIFT_MTX_RNW 17
++#define REGNUM_MTX_RNW 0x0340
++
++#define MASK_MTX_ENABLE 0x00010000
++#define SHIFT_MTX_ENABLE 16
++#define REGNUM_MTX_ENABLE 0x0340
++
++#define MASK_MTX_LENGTH 0x0000FFFF
++#define SHIFT_MTX_LENGTH 0
++#define REGNUM_MTX_LENGTH 0x0340
++
++#define TOPAZ_CR_IMG_TOPAZ_SRST 0x0000
++#define MASK_TOPAZ_CR_IMG_TOPAZ_MVEA_SOFT_RESET 0x00000001
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_MVEA_SOFT_RESET 0
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_MVEA_SOFT_RESET 0x0000
++
++#define MASK_TOPAZ_CR_IMG_TOPAZ_VLC_SOFT_RESET 0x00000008
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_VLC_SOFT_RESET 3
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_VLC_SOFT_RESET 0x0000
++
++#define MASK_TOPAZ_CR_IMG_TOPAZ_MTX_SOFT_RESET 0x00000002
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_MTX_SOFT_RESET 1
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_MTX_SOFT_RESET 0x0000
++
++#define MVEA_CR_MVEA_AUTO_CLOCK_GATING 0x0024
++#define MASK_MVEA_CR_MVEA_SPE_AUTO_CLK_GATE 0x00000001
++#define SHIFT_MVEA_CR_MVEA_SPE_AUTO_CLK_GATE 0
++#define REGNUM_MVEA_CR_MVEA_SPE_AUTO_CLK_GATE 0x0024
++
++#define MASK_MVEA_CR_MVEA_IPE_AUTO_CLK_GATE 0x00000002
++#define SHIFT_MVEA_CR_MVEA_IPE_AUTO_CLK_GATE 1
++#define REGNUM_MVEA_CR_MVEA_IPE_AUTO_CLK_GATE 0x0024
++
++#define MASK_MVEA_CR_MVEA_CMPRS_AUTO_CLK_GATE 0x00000004
++#define SHIFT_MVEA_CR_MVEA_CMPRS_AUTO_CLK_GATE 2
++#define REGNUM_MVEA_CR_MVEA_CMPRS_AUTO_CLK_GATE 0x0024
++
++#define MASK_MVEA_CR_MVEA_JMCOMP_AUTO_CLK_GATE 0x00000008
++#define SHIFT_MVEA_CR_MVEA_JMCOMP_AUTO_CLK_GATE 3
++#define REGNUM_MVEA_CR_MVEA_JMCOMP_AUTO_CLK_GATE 0x0024
++
++/*#define TOPAZ_CR_IMG_TOPAZ_DMAC_MODE 0x0040
++#define MASK_TOPAZ_CR_DMAC_MASTER_MODE 0x00000001
++#define SHIFT_TOPAZ_CR_DMAC_MASTER_MODE 0
++#define REGNUM_TOPAZ_CR_DMAC_MASTER_MODE 0x0040*/
++
++/* Register CR_TOPAZ_HW_CFG */
++#define TOPAZ_CR_TOPAZ_HW_CFG 0x0050
++#define MASK_TOPAZ_CR_NUM_CORES_SUPPORTED 0x0000001F
++#define SHIFT_TOPAZ_CR_NUM_CORES_SUPPORTED 0
++#define REGNUM_TOPAZ_CR_NUM_CORES_SUPPORTED 0x0050
++
++#define MTX_CR_MTX_SYSC_CDMAT 0x0350
++#define MASK_MTX_TRANSFERDATA 0xFFFFFFFF
++#define SHIFT_MTX_TRANSFERDATA 0
++#define REGNUM_MTX_TRANSFERDATA 0x0350
++
++#define IMG_SOC_DMAC_IRQ_STAT(X) (0x000C + (32 * (X)))
++#define MASK_IMG_SOC_TRANSFER_FIN 0x00020000
++#define SHIFT_IMG_SOC_TRANSFER_FIN 17
++#define REGNUM_IMG_SOC_TRANSFER_FIN 0x000C
++
++#define IMG_SOC_DMAC_COUNT(X) (0x0004 + (32 * (X)))
++#define MASK_IMG_SOC_CNT 0x0000FFFF
++#define SHIFT_IMG_SOC_CNT 0
++#define REGNUM_IMG_SOC_CNT 0x0004
++
++#define MASK_IMG_SOC_EN 0x00010000
++#define SHIFT_IMG_SOC_EN 16
++#define REGNUM_IMG_SOC_EN 0x0004
++
++#define MASK_IMG_SOC_LIST_EN 0x00040000
++#define SHIFT_IMG_SOC_LIST_EN 18
++#define REGNUM_IMG_SOC_LIST_EN 0x0004
++
++#define IMG_SOC_DMAC_PER_HOLD(X) (0x0018 + (32 * (X)))
++#define MASK_IMG_SOC_PER_HOLD 0x0000007F
++#define SHIFT_IMG_SOC_PER_HOLD 0
++#define REGNUM_IMG_SOC_PER_HOLD 0x0018
++
++#define IMG_SOC_DMAC_SETUP(X) (0x0000 + (32 * (X)))
++#define MASK_IMG_SOC_START_ADDRESS 0xFFFFFFF
++#define SHIFT_IMG_SOC_START_ADDRESS 0
++#define REGNUM_IMG_SOC_START_ADDRESS 0x0000
++
++#define MASK_IMG_SOC_BSWAP 0x40000000
++#define SHIFT_IMG_SOC_BSWAP 30
++#define REGNUM_IMG_SOC_BSWAP 0x0004
++
++#define MASK_IMG_SOC_PW 0x18000000
++#define SHIFT_IMG_SOC_PW 27
++#define REGNUM_IMG_SOC_PW 0x0004
++
++#define MASK_IMG_SOC_DIR 0x04000000
++#define SHIFT_IMG_SOC_DIR 26
++#define REGNUM_IMG_SOC_DIR 0x0004
++
++#define MASK_IMG_SOC_PI 0x03000000
++#define SHIFT_IMG_SOC_PI 24
++#define REGNUM_IMG_SOC_PI 0x0004
++#define IMG_SOC_PI_1 0x00000002
++#define IMG_SOC_PI_2 0x00000001
++#define IMG_SOC_PI_4 0x00000000
++
++#define MASK_IMG_SOC_TRANSFER_IEN 0x20000000
++#define SHIFT_IMG_SOC_TRANSFER_IEN 29
++#define REGNUM_IMG_SOC_TRANSFER_IEN 0x0004
++
++#define DMAC_VALUE_COUNT(BSWAP, PW, DIR, PERIPH_INCR, COUNT) \
++ ((((BSWAP) << SHIFT_IMG_SOC_BSWAP) & MASK_IMG_SOC_BSWAP)| \
++ (((PW) << SHIFT_IMG_SOC_PW) & MASK_IMG_SOC_PW)| \
++ (((DIR) << SHIFT_IMG_SOC_DIR) & MASK_IMG_SOC_DIR)| \
++ (((PERIPH_INCR) << SHIFT_IMG_SOC_PI) & MASK_IMG_SOC_PI)| \
++ (((COUNT) << SHIFT_IMG_SOC_CNT) & MASK_IMG_SOC_CNT))
++
++#define IMG_SOC_DMAC_PERIPH(X) (0x0008 + (32 * (X)))
++#define MASK_IMG_SOC_EXT_SA 0x0000000F
++#define SHIFT_IMG_SOC_EXT_SA 0
++#define REGNUM_IMG_SOC_EXT_SA 0x0008
++
++#define MASK_IMG_SOC_ACC_DEL 0xE0000000
++#define SHIFT_IMG_SOC_ACC_DEL 29
++#define REGNUM_IMG_SOC_ACC_DEL 0x0008
++
++#define MASK_IMG_SOC_INCR 0x08000000
++#define SHIFT_IMG_SOC_INCR 27
++#define REGNUM_IMG_SOC_INCR 0x0008
++
++#define MASK_IMG_SOC_BURST 0x07000000
++#define SHIFT_IMG_SOC_BURST 24
++#define REGNUM_IMG_SOC_BURST 0x0008
++
++#define DMAC_VALUE_PERIPH_PARAM(ACC_DEL, INCR, BURST) \
++((((ACC_DEL) << SHIFT_IMG_SOC_ACC_DEL) & MASK_IMG_SOC_ACC_DEL)| \
++(((INCR) << SHIFT_IMG_SOC_INCR) & MASK_IMG_SOC_INCR)| \
++(((BURST) << SHIFT_IMG_SOC_BURST) & MASK_IMG_SOC_BURST))
++
++#define IMG_SOC_DMAC_PERIPHERAL_ADDR(X) (0x0014 + (32 * (X)))
++#define MASK_IMG_SOC_ADDR 0x007FFFFF
++#define SHIFT_IMG_SOC_ADDR 0
++#define REGNUM_IMG_SOC_ADDR 0x0014
++
++#define SHIFT_TOPAZ_VEC_BUSY 11
++#define MASK_TOPAZ_VEC_BUSY (0x1<<SHIFT_TOPAZ_VEC_BUSY)
++
++#define TOPAZ_MTX_TXRPT_OFFSET 0xc
++#define TOPAZ_GUNIT_GVD_PSMI_GFX_OFFSET 0x20D0
++
++#define TOPAZ_GUNIT_READ32(offset) ioread32(dev_priv->vdc_reg + offset)
++#define TOPAZ_READ_BITS(val, basename) \
++ (((val)&MASK_TOPAZ_##basename)>>SHIFT_TOPAZ_##basename)
++
++#define TOPAZ_WAIT_UNTIL_IDLE \
++ do { \
++ uint8_t tmp_poll_number = 0;\
++ uint32_t tmp_reg; \
++ if (topaz_priv->topaz_cmd_windex == WB_CCB_CTRL_RINDEX(dev_priv)) { \
++ tmp_reg = TOPAZ_GUNIT_READ32(TOPAZ_GUNIT_GVD_PSMI_GFX_OFFSET);\
++ if (0 != TOPAZ_READ_BITS(tmp_reg, VEC_BUSY)) { \
++ MTX_READ32(TOPAZ_MTX_TXRPT_OFFSET, &tmp_reg);\
++ while ((tmp_reg != 0x8ade0000) && \
++ (tmp_poll_number++ < 10)) \
++ MTX_READ32(0xc, &tmp_reg); \
++ PSB_DEBUG_GENERAL( \
++ "TOPAZ: TXRPT reg remain: %x,poll %d times.\n",\
++ tmp_reg, tmp_poll_number);\
++ } \
++ } \
++ } while (0)
++
++/* Register CR_BUFFER_SIDEBAND */
++#define MVEA_CR_BUFFER_SIDEBAND 0x017C
++#define MASK_MVEA_CR_CURR_MB_SBAND 0x00000003
++#define SHIFT_MVEA_CR_CURR_MB_SBAND 0
++#define REGNUM_MVEA_CR_CURR_MB_SBAND 0x017C
++
++#define MASK_MVEA_CR_ABOVE_PIX_IN_SBAND 0x0000000C
++#define SHIFT_MVEA_CR_ABOVE_PIX_IN_SBAND 2
++#define REGNUM_MVEA_CR_ABOVE_PIX_IN_SBAND 0x017C
++
++#define MASK_MVEA_CR_CURR_PARAM_SBAND 0x00000030
++#define SHIFT_MVEA_CR_CURR_PARAM_SBAND 4
++#define REGNUM_MVEA_CR_CURR_PARAM_SBAND 0x017C
++
++#define MASK_MVEA_CR_BELOW_PARAM_IN_SBAND 0x000000C0
++#define SHIFT_MVEA_CR_BELOW_PARAM_IN_SBAND 6
++#define REGNUM_MVEA_CR_BELOW_PARAM_IN_SBAND 0x017C
++
++#define MASK_MVEA_CR_ABOVE_PARAM_IN_SBAND 0x00000300
++#define SHIFT_MVEA_CR_ABOVE_PARAM_IN_SBAND 8
++#define REGNUM_MVEA_CR_ABOVE_PARAM_IN_SBAND 0x017C
++
++#define MASK_MVEA_CR_REF_SBAND 0x00000C00
++#define SHIFT_MVEA_CR_REF_SBAND 10
++#define REGNUM_MVEA_CR_REF_SBAND 0x017C
++
++#define MASK_MVEA_CR_RECON_SBAND 0x00003000
++#define SHIFT_MVEA_CR_RECON_SBAND 12
++#define REGNUM_MVEA_CR_RECON_SBAND 0x017C
++
++#define MASK_MVEA_CR_ABOVE_PIX_OUT_SBAND 0x0000C000
++#define SHIFT_MVEA_CR_ABOVE_PIX_OUT_SBAND 14
++#define REGNUM_MVEA_CR_ABOVE_PIX_OUT_SBAND 0x017C
++
++#define MASK_MVEA_CR_BELOW_PARAM_OUT_SBAND 0x00030000
++#define SHIFT_MVEA_CR_BELOW_PARAM_OUT_SBAND 16
++#define REGNUM_MVEA_CR_BELOW_PARAM_OUT_SBAND 0x017C
++
++#define MASK_MVEA_CR_ABOVE_PARAM_OUT_SBAND 0x000C0000
++#define SHIFT_MVEA_CR_ABOVE_PARAM_OUT_SBAND 18
++#define REGNUM_MVEA_CR_ABOVE_PARAM_OUT_SBAND 0x017C
++
++/* Register CR_IPE_JITTER_FACTOR */
++#define MVEA_CR_IPE_JITTER_FACTOR 0x0218
++#define MASK_MVEA_CR_IPE_JITTER_FACTOR 0x00000003
++#define SHIFT_MVEA_CR_IPE_JITTER_FACTOR 0
++#define REGNUM_MVEA_CR_IPE_JITTER_FACTOR 0x0218
++
++/* Register CR_MULTICORE_INT_STAT */
++#define TOPAZSC_CR_MULTICORE_INT_STAT 0x0000
++#define MASK_TOPAZSC_CR_INT_STAT_DMAC 0x80000000
++#define SHIFT_TOPAZSC_CR_INT_STAT_DMAC 31
++#define REGNUM_TOPAZSC_CR_INT_STAT_DMAC 0x0000
++
++#define MASK_TOPAZSC_CR_INT_STAT_CORES 0x7FFFFFFF
++#define SHIFT_TOPAZSC_CR_INT_STAT_CORES 0
++#define REGNUM_TOPAZSC_CR_INT_STAT_CORES 0x0000
++
++/* Register CR_MULTICORE_CORE_SEL_0 */
++#define TOPAZSC_CR_MULTICORE_CORE_SEL_0 0x0004
++#define MASK_TOPAZSC_CR_DMAC_CORE_SELECT 0x0000000F
++#define SHIFT_TOPAZSC_CR_DMAC_CORE_SELECT 0
++#define REGNUM_TOPAZSC_CR_DMAC_CORE_SELECT 0x0004
++
++#define MASK_TOPAZSC_CR_WRITES_CORE_ALL 0x80000000
++#define SHIFT_TOPAZSC_CR_WRITES_CORE_ALL 31
++#define REGNUM_TOPAZSC_CR_WRITES_CORE_ALL 0x0004
++
++/* Register CR_MULTICORE_CORE_SEL_1 */
++#define TOPAZSC_CR_MULTICORE_CORE_SEL_1 0x0008
++#define MASK_TOPAZSC_CR_RTM_PORT_CORE_SELECT 0x0000000F
++#define SHIFT_TOPAZSC_CR_RTM_PORT_CORE_SELECT 0
++#define REGNUM_TOPAZSC_CR_RTM_PORT_CORE_SELECT 0x0008
++
++/* Register CR_MULTICORE_RSVD0 */
++#define TOPAZSC_CR_MULTICORE_RSVD0 0x0010
++#define MASK_TOPAZSC_CR_RESERVED0 0xFFFFFFFF
++#define SHIFT_TOPAZSC_CR_RESERVED0 0
++#define REGNUM_TOPAZSC_CR_RESERVED0 0x0010
++
++/* Register CR_MULTICORE_CMD_FIFO_0 */
++#define TOPAZSC_CR_MULTICORE_CMD_FIFO_0 0x0014
++#define MASK_TOPAZSC_CR_CMD_FIFO_WDATA 0xFFFFFFFF
++#define SHIFT_TOPAZSC_CR_CMD_FIFO_WDATA 0
++#define REGNUM_TOPAZSC_CR_CMD_FIFO_WDATA 0x0014
++
++/* Register CR_MULTICORE_CMD_FIFO_1 */
++#define TOPAZSC_CR_MULTICORE_CMD_FIFO_1 0x0018
++#define MASK_TOPAZSC_CR_CMD_FIFO_SPACE 0x000000FF
++#define SHIFT_TOPAZSC_CR_CMD_FIFO_SPACE 0
++#define REGNUM_TOPAZSC_CR_CMD_FIFO_SPACE 0x0018
++
++#define MASK_TOPAZSC_CR_CMD_FIFO_FULL 0x00000100
++#define SHIFT_TOPAZSC_CR_CMD_FIFO_FULL 8
++#define REGNUM_TOPAZSC_CR_CMD_FIFO_FULL 0x0018
++
++/* Register CR_MULTICORE_IDLE_PWR_MAN */
++#define TOPAZSC_CR_MULTICORE_IDLE_PWR_MAN 0x001C
++#define MASK_TOPAZSC_CR_TOPAZ_IDLE_DISABLE 0x00000001
++#define SHIFT_TOPAZSC_CR_TOPAZ_IDLE_DISABLE 0
++#define REGNUM_TOPAZSC_CR_TOPAZ_IDLE_DISABLE 0x001C
++
++
++#define TOPAZ_MULTICORE_START 0x00000000
++
++#ifdef TOPAZ_PDUMP
++#define TOPAZ_MULTICORE_WRITE32(offset, value) \
++ do { \
++ MM_WRITE32(TOPAZ_MULTICORE_START, offset, value); \
++ DRM_ERROR("TOPAZ_PDUMP: MULTICORE, REG_WT %x %x\n", offset, value);\
++ } while (0)
++
++#define TOPAZ_MULTICORE_READ32(offset,pointer) \
++ do { \
++ MM_READ32(TOPAZ_MULTICORE_START, offset, pointer); \
++ DRM_ERROR("TOPAZ_PDUMP: MULTICORE, REG_RD %x %x\n", offset, *(uint32_t *)pointer); \
++ } while (0)
++#else
++#define TOPAZ_MULTICORE_WRITE32(offset, value) \
++ MM_WRITE32(TOPAZ_MULTICORE_START, offset, value)
++#define TOPAZ_MULTICORE_READ32(offset,pointer) \
++ MM_READ32(TOPAZ_MULTICORE_START, offset, pointer)
++#endif
++
++#define MTX_DMA_BURSTSIZE_BYTES 32
++#define MTX_DMA_ALIGNMENT_BYTES 16
++
++#define MTX_DMA_MEMORY_BASE (0x82880000)
++#define PC_START_ADDRESS (0x80900000)
++
++#define MAX_TOPAZ_CMD_COUNT (0x1000) /* max syncStatus value used*/
++
++
++#define MTX_WRITEBACK_DATASIZE_ROUND 4
++
++#define TOPAZ_MTX_WB_READ32(base, core, word, pointer) \
++ do { \
++ *(uint32_t *)(pointer) = *((uint32_t *)(base) + \
++ (core)*MTX_WRITEBACK_DATASIZE_ROUND + (word) ); \
++ } while (0)
++
++#define TOPAZ_MTX_WB_WRITE32(base, core, word, value) \
++ do { \
++ *((uint32_t *)(base) + (core)*MTX_WRITEBACK_DATASIZE_ROUND + (word) ) \
++ = value; \
++ } while (0)
++
++
++#define TOPAZ_MTX_WB_OFFSET(base, core) \
++ ((base) + (core)*MTX_WRITEBACK_DATASIZE_ROUND*4)
++
++#define POLL_TOPAZ_FREE_FIFO_SPACE(word_num, delay, retries, pointer) \
++ do { \
++ uint32_t free_space=0, i; \
++ for (i = 0; i < retries; i++) {\
++ TOPAZ_MULTICORE_READ32(TOPAZSC_CR_MULTICORE_CMD_FIFO_1, &free_space);\
++ free_space &= MASK_TOPAZSC_CR_CMD_FIFO_SPACE; \
++ if (free_space >= word_num) \
++ break; \
++ else \
++ DRM_UDELAY(delay);\
++ }\
++ if ( i >= retries){ \
++ ret = -1;\
++ DRM_ERROR("TOPAZ: poll FIFO free space failed (%d words free)!\n", free_space); \
++ }\
++ else \
++ ret = 0; \
++ *pointer = free_space; \
++ } while (0)
++
++
++/* **************** DMAC define **************** */
++enum DMAC_eBSwap {
++ DMAC_BSWAP_NO_SWAP = 0x0,/* !< No byte swapping will be performed. */
++ DMAC_BSWAP_REVERSE = 0x1,/* !< Byte order will be reversed. */
++};
++
++enum DMAC_ePW {
++ DMAC_PWIDTH_32_BIT = 0x0,/* !< Peripheral width 32-bit. */
++ DMAC_PWIDTH_16_BIT = 0x1,/* !< Peripheral width 16-bit. */
++ DMAC_PWIDTH_8_BIT = 0x2,/* !< Peripheral width 8-bit. */
++};
++
++enum DMAC_eAccDel {
++ DMAC_ACC_DEL_0 = 0x0, /* !< Access delay zero clock cycles */
++ DMAC_ACC_DEL_256 = 0x1, /* !< Access delay 256 clock cycles */
++ DMAC_ACC_DEL_512 = 0x2, /* !< Access delay 512 clock cycles */
++ DMAC_ACC_DEL_768 = 0x3, /* !< Access delay 768 clock cycles */
++ DMAC_ACC_DEL_1024 = 0x4,/* !< Access delay 1024 clock cycles */
++ DMAC_ACC_DEL_1280 = 0x5,/* !< Access delay 1280 clock cycles */
++ DMAC_ACC_DEL_1536 = 0x6,/* !< Access delay 1536 clock cycles */
++ DMAC_ACC_DEL_1792 = 0x7,/* !< Access delay 1792 clock cycles */
++};
++
++enum DMAC_eBurst {
++ DMAC_BURST_0 = 0x0, /* !< burst size of 0 */
++ DMAC_BURST_1 = 0x1, /* !< burst size of 1 */
++ DMAC_BURST_2 = 0x2, /* !< burst size of 2 */
++ DMAC_BURST_3 = 0x3, /* !< burst size of 3 */
++ DMAC_BURST_4 = 0x4, /* !< burst size of 4 */
++ DMAC_BURST_5 = 0x5, /* !< burst size of 5 */
++ DMAC_BURST_6 = 0x6, /* !< burst size of 6 */
++ DMAC_BURST_7 = 0x7, /* !< burst size of 7 */
++};
++
++/* commands for topaz,shared with user space driver */
++enum drm_pnw_topaz_cmd {
++ MTX_CMDID_NULL = 0,
++ MTX_CMDID_SHUTDOWN = 1,
++ MTX_CMDID_START_PIC = 2,
++ MTX_CMDID_DO_HEADER = 3,
++ MTX_CMDID_ENCODE_SLICE = 4,
++ MTX_CMDID_END_PIC = 5,
++ MTX_CMDID_FLUSH = 6,
++ /*JPEG commands*/
++ MTX_CMDID_SETQUANT = 7,
++ MTX_CMDID_RESET_ENCODE = 8,
++ MTX_CMDID_ISSUEBUFF = 9,
++ MTX_CMDID_SETUP = 10,
++
++ MTX_CMDID_PAD = 0x7a, /*Will be ignored*/
++ MTX_CMDID_SW_WRITEREG = 0x7b,
++ MTX_CMDID_SW_LEAVE_LOWPOWER = 0x7c,
++ MTX_CMDID_SW_ENTER_LOWPOWER = 0x7e,
++ MTX_CMDID_SW_NEW_CODEC = 0x7f
++};
++
++struct topaz_cmd_header {
++ union {
++ struct {
++ unsigned long id:7;
++ unsigned long enable_interrupt:1;
++ unsigned long core:8;
++ unsigned long seq:16;
++ };
++ uint32_t val;
++ };
++};
++
++/* codecs topaz supports,shared with user space driver */
++enum drm_pnw_topaz_codec {
++ IMG_CODEC_JPEG = 0,
++ IMG_CODEC_H264_NO_RC,
++ IMG_CODEC_H264_VBR,
++ IMG_CODEC_H264_CBR,
++ IMG_CODEC_H263_NO_RC,
++ IMG_CODEC_H263_VBR,
++ IMG_CODEC_H263_CBR,
++ IMG_CODEC_MPEG4_NO_RC,
++ IMG_CODEC_MPEG4_VBR,
++ IMG_CODEC_MPEG4_CBR,
++ IMG_CODEC_NUM
++};
++
++
++typedef enum
++{
++ MTX_WRITEBACK_CMDWORD = 0, //!< Command word of command executed by MTX
++ MTX_WRITEBACK_VALUE = 1, //!< Writeback value returned by command
++ MTX_WRITEBACK_FLAGSWORD_0 = 2, //!< Flags word indicating MTX status (see MTX writeback flags)
++ MTX_WRITEBACK_BITSWRITTEN = 3, //!< number of bits written out by this core
++
++ MTX_WRITEBACK_DATASIZE //!< End marker for enum
++
++} MTX_eWriteBackData;
++
++/* pnw_topazinit.c */
++int pnw_topaz_reset(struct drm_psb_private *dev_priv);
++int pnw_topaz_init_fw(struct drm_device *dev);
++int pnw_topaz_setup_fw(struct drm_device *dev, enum drm_pnw_topaz_codec codec);
++int pnw_topaz_wait_for_register(struct drm_psb_private *dev_priv,
++ uint32_t addr, uint32_t value,
++ uint32_t enable);
++/*void topaz_write_mtx_mem(struct drm_psb_private *dev_priv,
++ uint32_t byte_addr, uint32_t val);
++uint32_t topaz_read_mtx_mem(struct drm_psb_private *dev_priv,
++ uint32_t byte_addr);
++void topaz_write_mtx_mem_multiple_setup(struct drm_psb_private *dev_priv,
++ uint32_t addr);
++void topaz_write_mtx_mem_multiple(struct drm_psb_private *dev_priv,
++ uint32_t val);*/
++void pnw_topaz_mmu_flushcache(struct drm_psb_private *dev_priv);
++
++uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver);
++int pnw_video_get_core_num(struct drm_device *dev, uint64_t user_pointer);
++int pnw_topaz_kick_null_cmd(struct drm_psb_private *dev_priv,
++ uint32_t core_id,
++ uint32_t sync_req,
++ uint8_t irq_enable);
++int pnw_wait_on_sync(struct drm_psb_private *dev_priv,
++ uint32_t sync_seq,
++ uint32_t *sync_p);
++
++int pnw_video_frameskip(struct drm_device *dev, uint64_t user_pointer);
++
++static inline char *cmd_to_string(int cmd_id)
++{
++ switch (cmd_id) {
++ case MTX_CMDID_START_PIC:
++ return "MTX_CMDID_START_PIC";
++ case MTX_CMDID_END_PIC:
++ return "MTX_CMDID_END_PIC";
++ case MTX_CMDID_DO_HEADER:
++ return "MTX_CMDID_DO_HEADER";
++ case MTX_CMDID_ENCODE_SLICE:
++ return "MTX_CMDID_ENCODE_SLICE";
++ case MTX_CMDID_SW_NEW_CODEC:
++ return "MTX_CMDID_SW_NEW_CODEC";
++ case MTX_CMDID_SETQUANT:
++ return "MTX_CMDID_SETQUANT";
++ case MTX_CMDID_RESET_ENCODE:
++ return "MTX_CMDID_RESET_ENCODE";
++ case MTX_CMDID_ISSUEBUFF:
++ return "MTX_CMDID_ISSUEBUFF";
++ case MTX_CMDID_SETUP:
++ return "MTX_CMDID_SETUP";
++ case MTX_CMDID_SW_WRITEREG:
++ return "MTX_CMDID_SW_WRITEREG";
++ default:
++ return "Undefined command";
++
++ }
++}
++
++static inline char *codec_to_string(int codec)
++{
++ switch (codec) {
++ case IMG_CODEC_JPEG: /* Just guess, is JPEG firmware included in topaz_bin? */
++ return "JPEG";
++ case IMG_CODEC_H264_NO_RC:
++ return "H264_NO_RC";
++ case IMG_CODEC_H264_VBR:
++ return "H264_VBR";
++ case IMG_CODEC_H264_CBR:
++ return "H264_CBR";
++ case IMG_CODEC_H263_NO_RC:
++ return "H263_NO_RC";
++ case IMG_CODEC_H263_VBR:
++ return "H263_VBR";
++ case IMG_CODEC_H263_CBR:
++ return "H263_CBR";
++ case IMG_CODEC_MPEG4_NO_RC:
++ return "MPEG4_NO_RC";
++ case IMG_CODEC_MPEG4_VBR:
++ return "MPEG4_VBR";
++ case IMG_CODEC_MPEG4_CBR:
++ return "MPEG4_CBR";
++ default:
++ return "Undefined codec";
++ }
++}
++
++static inline void pnw_topaz_clearirq(struct drm_device *dev,
++ uint32_t clear_topaz)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++
++ PSB_DEBUG_INIT("TOPAZ: clear IRQ\n");
++ if (clear_topaz != 0)
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTCLEAR, clear_topaz, 0);
++
++ /* PSB_WVDC32(_PNW_IRQ_TOPAZ_FLAG, PSB_INT_IDENTITY_R); */
++}
++
++static inline uint32_t pnw_topaz_queryirq(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ uint32_t val, /* iir, */ clear = 0;
++ struct pnw_topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ TOPAZ_READ32(TOPAZ_CR_IMG_TOPAZ_INTSTAT, &val, 0);
++ /* iir = PSB_RVDC32(PSB_INT_IDENTITY_R); */
++
++ (void) topaz_priv;
++
++ if ((val == 0) /* && (iir == 0) */) {/* no interrupt */
++ PSB_DEBUG_GENERAL("TOPAZ: no interrupt,IIR=TOPAZ_INTSTAT=0\n");
++ return 0;
++ }
++
++ PSB_DEBUG_IRQ("TOPAZ:TOPAZ_INTSTAT=0x%08x\n", val);
++
++ if (val & (1<<31))
++ PSB_DEBUG_IRQ("TOPAZ:IRQ pin activated,cmd seq=0x%04x,"
++ "sync seq: 0x%08x vs 0x%08x (MTX)\n",
++ topaz_priv->aui32LastSync[0][0] ,
++ dev_priv->sequence[LNC_ENGINE_ENCODE],
++ *((uint32_t *)topaz_priv->topaz_mtx_wb + MTX_WRITEBACK_VALUE) );
++ else
++ PSB_DEBUG_IRQ("TOPAZ:IRQ pin not activated,cmd seq=0x%04x,"
++ "sync seq: 0x%08x vs 0x%08x (MTX)\n",
++ topaz_priv->aui32LastSync[0][0],
++ dev_priv->sequence[LNC_ENGINE_ENCODE],
++ *((uint32_t *)topaz_priv->topaz_mtx_wb + MTX_WRITEBACK_VALUE) );
++
++ if (val & 0x8) {
++ uint32_t mmu_status, mmu_req;
++
++ TOPAZ_READ32(TOPAZ_CR_MMU_STATUS, &mmu_status, 0);
++ TOPAZ_READ32(TOPAZ_CR_MMU_MEM_REQ, &mmu_req, 0);
++
++ PSB_DEBUG_IRQ("TOPAZ: detect a page fault interrupt, "
++ "address=0x%08x,mem req=0x%08x\n",
++ mmu_status, mmu_req);
++ clear |= F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MMU_FAULT);
++ }
++
++ if (val & 0x4) {
++ PSB_DEBUG_IRQ("TOPAZ: detect a MTX_HALT interrupt\n");
++ clear |= F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX_HALT);
++ }
++
++ if (val & 0x2) {
++ PSB_DEBUG_IRQ("TOPAZ: detect a MTX interrupt\n");
++ clear |= F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX);
++ }
++
++ if (val & 0x1) {
++ PSB_DEBUG_IRQ("TOPAZ: detect a MVEA interrupt\n");
++ clear |= F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MVEA);
++ }
++
++ return clear;
++}
++
++/*Set whether the write operation take effect on all cores
++ * or only the specific one*/
++static inline void topaz_set_mtx_target(struct drm_psb_private *dev_priv,
++ uint32_t core, uint8_t bTargetAll)
++{
++ TOPAZ_MULTICORE_WRITE32(
++ TOPAZSC_CR_MULTICORE_CORE_SEL_0,
++ F_ENCODE( (bTargetAll), TOPAZSC_CR_WRITES_CORE_ALL) |
++ F_ENCODE(core, TOPAZSC_CR_DMAC_CORE_SELECT));
++
++}
++
++#endif /* _PNW_TOPAZ_H_ */
+--- /dev/null
++++ b/drivers/staging/mrst/drv/pnw_topazinit.c
+@@ -0,0 +1,2346 @@
++/**
++ * file pnw_topazinit.c
++ * TOPAZ initialization and mtx-firmware upload
++ *
++ */
++
++/**************************************************************************
++ *
++ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
++ * Copyright (c) Imagination Technologies Limited, UK
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++/* NOTE: (READ BEFORE REFINE CODE)
++ * 1. The FIRMWARE's SIZE is measured by byte, we have to pass the size
++ * measured by word to DMAC.
++ *
++ *
++ *
++ */
++
++/* include headers */
++
++/* #define DRM_DEBUG_CODE 2 */
++
++#include <linux/firmware.h>
++
++#include <drm/drmP.h>
++#include <drm/drm.h>
++
++#include "psb_drv.h"
++#include "pnw_topaz.h"
++#include "psb_powermgmt.h"
++#include "pnw_topaz_hw_reg.h"
++
++/* WARNING: this define is very important */
++#define RAM_SIZE (1024 * 24)
++
++#define MEMORY_ONLY 0
++#define MEM_AND_CACHE 1
++#define CACHE_ONLY 2
++
++/* register default values
++ * THIS HEADER IS ONLY INCLUDE ONCE*/
++static unsigned long topaz_default_regs[181][3] = {
++ {MVEA_START, 0x00000000, 0x00000000},
++ {MVEA_START, 0x00000004, 0x00000400},
++ {MVEA_START, 0x00000008, 0x00000000},
++ {MVEA_START, 0x0000000C, 0x00000000},
++ {MVEA_START, 0x00000010, 0x00000000},
++ {MVEA_START, 0x00000014, 0x00000000},
++ {MVEA_START, 0x00000018, 0x00000000},
++ {MVEA_START, 0x0000001C, 0x00000000},
++ {MVEA_START, 0x00000020, 0x00000120},
++ {MVEA_START, 0x00000024, 0x00000000},
++ {MVEA_START, 0x00000028, 0x00000000},
++ {MVEA_START, 0x0000002C, 0x00000000},
++ {MVEA_START, 0x00000030, 0x00000000},
++ {MVEA_START, 0x00000034, 0x00000000},
++ {MVEA_START, 0x00000038, 0x00000000},
++ {MVEA_START, 0x00000100, 0x00000000},
++ {MVEA_START, 0x00000104, 0x00000000},
++ {MVEA_START, 0x00000108, 0x00000000},
++ {MVEA_START, 0x0000010C, 0x00000000},
++ {MVEA_START, 0x0000011C, 0x00000001},
++ {MVEA_START, 0x0000012C, 0x00000000},
++ {MVEA_START, 0x00000130, 0x00000000},
++ {MVEA_START, 0x00000134, 0x00000000},
++ {MVEA_START, 0x00000138, 0x00000000},
++ {MVEA_START, 0x0000013c, 0x00000000},
++ {MVEA_START, 0x00000140, 0x00000000},
++ {MVEA_START, 0x00000144, 0x00000000},
++ {MVEA_START, 0x00000148, 0x00000000},
++ {MVEA_START, 0x0000014c, 0x00000000},
++ {MVEA_START, 0x00000150, 0x00000000},
++ {MVEA_START, 0x00000154, 0x00000000},
++ {MVEA_START, 0x00000158, 0x00000000},
++ {MVEA_START, 0x0000015c, 0x00000000},
++ {MVEA_START, 0x00000160, 0x00000000},
++ {MVEA_START, 0x00000164, 0x00000000},
++ {MVEA_START, 0x00000168, 0x00000000},
++ {MVEA_START, 0x0000016c, 0x00000000},
++ {MVEA_START, 0x00000170, 0x00000000},
++ {MVEA_START, 0x00000174, 0x00000000},
++ {MVEA_START, 0x00000178, 0x00000000},
++ {MVEA_START, 0x0000017c, 0x00000000},
++ {MVEA_START, 0x00000180, 0x00000000},
++ {MVEA_START, 0x00000184, 0x00000000},
++ {MVEA_START, 0x00000188, 0x00000000},
++ {MVEA_START, 0x0000018C, 0x00000000},
++ {MVEA_START, 0x00000190, 0x00000000},
++ {MVEA_START, 0x00000194, 0x00000000},
++ {MVEA_START, 0x00000198, 0x00000000},
++ {MVEA_START, 0x0000019C, 0x00000000},
++ {MVEA_START, 0x000001A0, 0x00000000},
++ {MVEA_START, 0x000001A4, 0x00000000},
++ {MVEA_START, 0x000001A8, 0x00000000},
++ {MVEA_START, 0x000001AC, 0x00000000},
++ {MVEA_START, 0x000001B0, 0x00000000},
++ {MVEA_START, 0x000001B4, 0x00000000},
++ {MVEA_START, 0x000001B8, 0x00000000},
++ {MVEA_START, 0x000001BC, 0x00000000},
++ {MVEA_START, 0x000001C0, 0x00000000},
++ {MVEA_START, 0x000001C4, 0x00000000},
++ {MVEA_START, 0x000001F0, 0x00000000},
++ {MVEA_START, 0x000001F4, 0x00000000},
++ {MVEA_START, 0x000001FC, 0x00000000},
++ {MVEA_START, 0x00000200, 0x00000000},
++ {MVEA_START, 0x00000204, 0x00000000},
++ {MVEA_START, 0x00000208, 0x00000000},
++ {MVEA_START, 0x0000020C, 0x00000000},
++ {MVEA_START, 0x00000210, 0x00000000},
++ {MVEA_START, 0x00000214, 0x00000000},
++ {MVEA_START, 0x00000218, 0x00000000},
++ {MVEA_START, 0x00000264, 0x00000000},
++ {MVEA_START, 0x00000268, 0x00000000},
++ {MVEA_START, 0x0000026C, 0x00000000},
++ {MVEA_START, 0x00000278, 0x00000000},
++ {MVEA_START, 0x00000280, 0x00008000},
++ {MVEA_START, 0x00000284, 0x00000000},
++ {MVEA_START, 0x00000288, 0x00000000},
++ {MVEA_START, 0x0000028C, 0x00000000},
++ {MVEA_START, 0x00000314, 0x00000000},
++ {MVEA_START, 0x00000318, 0x00000000},
++ {MVEA_START, 0x0000031C, 0x00000000},
++ {MVEA_START, 0x00000320, 0x00000000},
++ {MVEA_START, 0x00000324, 0x00000000},
++ {MVEA_START, 0x00000348, 0x00000000},
++ {MVEA_START, 0x00000370, 0x00000000},
++ {MVEA_START, 0x00000374, 0x00000000},
++ {MVEA_START, 0x00000378, 0x00000000},
++ {MVEA_START, 0x0000037c, 0x00000000},
++ {MVEA_START, 0x00000380, 0x00000000},
++ {MVEA_START, 0x00000384, 0x00000000},
++ {MVEA_START, 0x0000038C, 0x00000000},
++ {MVEA_START, 0x00000390, 0x00000000},
++ {MVEA_START, 0x00000394, 0x00000000},
++ {MVEA_START, 0x00000398, 0x00000000},
++ {MVEA_START, 0x0000039C, 0x00000000},
++ {MVEA_START, 0x000003A0, 0x003CFFC2},
++ {MVEA_START, 0x000003B0, 0x00000000},
++ {MVEA_START, 0x000003B4, 0x00000000},
++ {MVEA_START, 0x000003BC, 0x00000000},
++ {MVEA_START, 0x000003D4, 0x00000000},
++ {MVEA_START, 0x000003D8, 0x00000000},
++ {MVEA_START, 0x000003DC, 0x00000000},
++ {MVEA_START, 0x000003E0, 0x00000000},
++ {MVEA_START, 0x000003E4, 0x00000000},
++ {MVEA_START, 0x000003E8, 0x00000000},
++ {MVEA_START, 0x000002D0, 0x00000000},
++ {MVEA_START, 0x000002D4, 0x00000000},
++ {MVEA_START, 0x000002D8, 0x00000000},
++ {MVEA_START, 0x000002DC, 0x00000000},
++ {MVEA_START, 0x000002E0, 0x00000000},
++ {MVEA_START, 0x000002E4, 0x00000000},
++ {MVEA_START, 0x000002E8, 0x00000000},
++ {MVEA_START, 0x000002EC, 0x00000000},
++ {MVEA_START, 0x000002F0, 0x00000000},
++ {MVEA_START, 0x000002F4, 0x00000000},
++ {MVEA_START, 0x000002F8, 0x00000000},
++ {MVEA_START, 0x000002FC, 0x00000000},
++ {MVEA_START, 0x00000300, 0x00000000},
++ {MVEA_START, 0x00000304, 0x00000000},
++ {MVEA_START, 0x00000308, 0x00000000},
++ {MVEA_START, 0x0000030C, 0x00000000},
++ {MVEA_START, 0x00000290, 0x00000000},
++ {MVEA_START, 0x00000294, 0x00000000},
++ {MVEA_START, 0x00000298, 0x00000000},
++ {MVEA_START, 0x0000029C, 0x00000000},
++ {MVEA_START, 0x000002A0, 0x00000000},
++ {MVEA_START, 0x000002A4, 0x00000000},
++ {MVEA_START, 0x000002A8, 0x00000000},
++ {MVEA_START, 0x000002AC, 0x00000000},
++ {MVEA_START, 0x000002B0, 0x00000000},
++ {MVEA_START, 0x000002B4, 0x00000000},
++ {MVEA_START, 0x000002B8, 0x00000000},
++ {MVEA_START, 0x000002BC, 0x00000000},
++ {MVEA_START, 0x000002C0, 0x00000000},
++ {MVEA_START, 0x000002C4, 0x00000000},
++ {MVEA_START, 0x000002C8, 0x00000000},
++ {MVEA_START, 0x000002CC, 0x00000000},
++ {MVEA_START, 0x00000080, 0x00000000},
++ {MVEA_START, 0x00000084, 0x80705700},
++ {MVEA_START, 0x00000088, 0x00000000},
++ {MVEA_START, 0x0000008C, 0x00000000},
++ {MVEA_START, 0x00000090, 0x00000000},
++ {MVEA_START, 0x00000094, 0x00000000},
++ {MVEA_START, 0x00000098, 0x00000000},
++ {MVEA_START, 0x0000009C, 0x00000000},
++ {MVEA_START, 0x000000A0, 0x00000000},
++ {MVEA_START, 0x000000A4, 0x00000000},
++ {MVEA_START, 0x000000A8, 0x00000000},
++ {MVEA_START, 0x000000AC, 0x00000000},
++ {MVEA_START, 0x000000B0, 0x00000000},
++ {MVEA_START, 0x000000B4, 0x00000000},
++ {MVEA_START, 0x000000B8, 0x00000000},
++ {MVEA_START, 0x000000BC, 0x00000000},
++ {MVEA_START, 0x000000C0, 0x00000000},
++ {MVEA_START, 0x000000C4, 0x00000000},
++ {MVEA_START, 0x000000C8, 0x00000000},
++ {MVEA_START, 0x000000CC, 0x00000000},
++ {MVEA_START, 0x000000D0, 0x00000000},
++ {MVEA_START, 0x000000D4, 0x00000000},
++ {MVEA_START, 0x000000D8, 0x00000000},
++ {MVEA_START, 0x000000DC, 0x00000000},
++ {MVEA_START, 0x000000E0, 0x00000000},
++ {MVEA_START, 0x000000E4, 0x00000000},
++ {MVEA_START, 0x000000E8, 0x00000000},
++ {MVEA_START, 0x000000EC, 0x00000000},
++ {MVEA_START, 0x000000F0, 0x00000000},
++ {MVEA_START, 0x000000F4, 0x00000000},
++ {MVEA_START, 0x000000F8, 0x00000000},
++ {MVEA_START, 0x000000FC, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000000, 0x00000000},
++ {TOPAZ_VLC_START, 0x0000000C, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000010, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000014, 0x00000000},
++ {TOPAZ_VLC_START, 0x0000001C, 0x00000000},
++ {TOPAZ_VLC_START, 0x0000004C, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000050, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000054, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000058, 0x00000000},
++ {TOPAZ_VLC_START, 0x0000005C, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000060, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000064, 0x00000000},
++ {TOPAZ_VLC_START, 0x0000006C, 0x00000000}
++};
++
++#define FIRMWARE_NAME "topazsc_fw.bin"
++
++/* static function define */
++static int topaz_upload_fw(struct drm_device *dev,
++ enum drm_pnw_topaz_codec codec,
++ uint32_t core_id);
++static inline void topaz_set_default_regs(struct drm_psb_private
++ *dev_priv);
++
++#define UPLOAD_FW_BY_DMA 1
++
++#if UPLOAD_FW_BY_DMA
++static void topaz_dma_transfer(struct drm_psb_private *dev_priv,
++ uint32_t channel, uint32_t src_phy_addr,
++ uint32_t offset, uint32_t dst_addr,
++ uint32_t byte_num, uint32_t is_increment,
++ uint32_t is_write);
++#else
++static void topaz_mtx_upload_by_register(struct drm_device *dev,
++ uint32_t mtx_mem, uint32_t addr,
++ uint32_t size,
++ struct ttm_buffer_object *buf,
++ uint32_t core);
++#endif
++
++static void topaz_write_core_reg(struct drm_psb_private *dev_priv,
++ uint32_t core, uint32_t reg, const uint32_t val);
++static void topaz_read_core_reg(struct drm_psb_private *dev_priv,
++ uint32_t core, uint32_t reg, uint32_t *ret_val);
++static void get_mtx_control_from_dash(struct drm_psb_private *dev_priv, uint32_t core);
++static void release_mtx_control_from_dash(struct drm_psb_private *dev_priv, uint32_t core);
++static void pnw_topaz_mmu_hwsetup(struct drm_psb_private *dev_priv, uint32_t core_id);
++static void mtx_dma_read(struct drm_device *dev, uint32_t core, uint32_t source_addr,
++ uint32_t size);
++static void mtx_dma_write(struct drm_device *dev, uint32_t core);
++
++
++#define DEBUG_FUNCTION 0
++
++#if DEBUG_FUNCTION
++static int topaz_test_null(struct drm_device *dev, uint32_t seq);
++static int topaz_test_sync(struct drm_device *dev, uint32_t seq,
++ uint32_t sync_seq);
++static void topaz_mmu_test(struct drm_device *dev, uint32_t sync_value);
++static void topaz_save_default_regs(struct drm_psb_private *dev_priv,
++ uint32_t *data);
++static void topaz_restore_default_regs(struct drm_psb_private *dev_priv,
++ uint32_t *data);
++static int topaz_test_sync_manual_alloc_page(struct drm_device *dev,
++ uint32_t seq,
++ uint32_t sync_seq,
++ uint32_t offset);
++static int topaz_test_sync_tt_test(struct drm_device *dev,
++ uint32_t seq,
++ uint32_t sync_seq);
++#endif
++
++int pnw_error_dump_reg(struct drm_psb_private *dev_priv, int core_id)
++{
++ uint32_t reg_val;
++ int i;
++ DRM_ERROR("DMA Register value dump:\n");
++ for (i = 0; i < 8; i++) {
++ DMAC_READ32(i*4, &reg_val);
++ DRM_ERROR("DMAC REG%d: 0x%08x\n", i, reg_val);
++ }
++ TOPAZ_MULTICORE_READ32(
++ TOPAZSC_CR_MULTICORE_CORE_SEL_0, &reg_val);
++ DRM_ERROR("TOPAZSC_CR_MULTICORE_CORE_SEL_0 0x%08x\n", reg_val);
++ MTX_READ32(MTX_CR_MTX_SYSC_CDMAA, &reg_val, core_id);
++ DRM_ERROR("MTX_CR_MTX_SYSC_CDMAA 0x%08x\n", reg_val);
++ MTX_READ32(MTX_CR_MTX_SYSC_CDMAC, &reg_val, core_id);
++ DRM_ERROR("MTX_CR_MTX_SYSC_CDMAC 0x%08x\n", reg_val);
++
++ MTX_READ32(0x348, &reg_val, core_id);
++ DRM_ERROR("0x348 0x%08x\n", reg_val);
++ MTX_READ32(0x34c, &reg_val, core_id);
++ DRM_ERROR("0x34c 0x%08x\n", reg_val);
++ MTX_READ32(0x350, &reg_val, core_id);
++ DRM_ERROR("0x350 0x%08x\n", reg_val);
++ for (i = 0; i < 6; i++) {
++ TOPAZ_READ32(0x1c+i*4, &reg_val, core_id);
++ DRM_ERROR("MMU REG %d value 0x%08x\n", i, reg_val);
++ }
++
++ topaz_read_core_reg(dev_priv, core_id, TOPAZ_MTX_PC, &reg_val);
++ DRM_ERROR("PC pointer: 0x%08x\n", reg_val);
++ return 0;
++}
++
++
++#if 0
++uint32_t topaz_read_mtx_mem(struct drm_psb_private *dev_priv,
++ uint32_t byte_addr)
++{
++ uint32_t read_val;
++ uint32_t reg, bank_size, ram_bank_size, ram_id;
++
++ /*TODO: need to handle multi cores*/
++ TOPAZ_READ32(0x3c, &reg, 0);
++ reg = 0x0a0a0606;
++ bank_size = (reg & 0xF0000) >> 16;
++
++ ram_bank_size = (uint32_t) (1 << (bank_size + 2));
++ ram_id = (byte_addr - MTX_DATA_MEM_BASE) / ram_bank_size;
++
++ /*TODO: need to handle multi cores*/
++ MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_CONTROL,
++ F_ENCODE(0x18 + ram_id, MTX_MTX_MCMID) |
++ F_ENCODE(byte_addr >> 2, MTX_MTX_MCM_ADDR) |
++ F_ENCODE(1, MTX_MTX_MCMR),
++ 0);
++
++ /* ?? poll this reg? */
++ pnw_topaz_wait_for_register(dev_priv,
++ MTX_START + MTX_CR_MTX_RAM_ACCESS_STATUS,
++ 1, 1);
++
++ /*TODO: need to handle multi cores*/
++ MTX_READ32(MTX_CR_MTX_RAM_ACCESS_DATA_TRANSFER, &read_val, 0);
++
++ return read_val;
++}
++
++void topaz_write_mtx_mem(struct drm_psb_private *dev_priv,
++ uint32_t byte_addr, uint32_t val)
++{
++ uint32_t ram_id = 0;
++ uint32_t reg, bank_size, ram_bank_size;
++
++ /*TODO: need to handle multi cores*/
++ TOPAZ_READ32(0x3c, &reg, 0);
++
++ /* PSB_DEBUG_GENERAL ("TOPAZ: DEBUG REG(%x)\n", reg); */
++ reg = 0x0a0a0606;
++
++ bank_size = (reg & 0xF0000) >> 16;
++
++ ram_bank_size = (uint32_t) (1 << (bank_size + 2));
++ ram_id = (byte_addr - MTX_DATA_MEM_BASE) / ram_bank_size;
++
++ /*TODO: need to handle multi cores*/
++ MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_CONTROL,
++ F_ENCODE(0x18 + ram_id, MTX_MTX_MCMID) |
++ F_ENCODE(byte_addr >> 2, MTX_MTX_MCM_ADDR), 0);
++
++ /*TODO: need to handle multi cores*/
++ MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_DATA_TRANSFER, val, 0);
++
++ /* ?? poll this reg? */
++ pnw_topaz_wait_for_register(dev_priv,
++ MTX_START + MTX_CR_MTX_RAM_ACCESS_STATUS,
++ 1, 1);
++
++ return;
++}
++
++void topaz_write_mtx_mem_multiple_setup(struct drm_psb_private *dev_priv,
++ uint32_t byte_addr)
++{
++ uint32_t ram_id = 0;
++ uint32_t reg, bank_size, ram_bank_size;
++
++ /*TODO: need to handle multi cores*/
++ TOPAZ_READ32(0x3c, &reg, 0);
++
++ reg = 0x0a0a0606;
++
++ bank_size = (reg & 0xF0000) >> 16;
++
++ ram_bank_size = (uint32_t) (1 << (bank_size + 2));
++ ram_id = (byte_addr - MTX_DATA_MEM_BASE) / ram_bank_size;
++
++ /*TODO: need to handle multi cores*/
++ MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_CONTROL,
++ F_ENCODE(0x18 + ram_id, MTX_MTX_MCMID) |
++ F_ENCODE(1, MTX_MTX_MCMAI) |
++ F_ENCODE(byte_addr >> 2, MTX_MTX_MCM_ADDR), 0);
++}
++
++void topaz_write_mtx_mem_multiple(struct drm_psb_private *dev_priv,
++ uint32_t val)
++{
++ /*TODO: need to handle multi cores*/
++ MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_DATA_TRANSFER, val, 0);
++}
++
++#endif
++int pnw_topaz_wait_for_register(struct drm_psb_private *dev_priv,
++ uint32_t addr, uint32_t value, uint32_t mask)
++{
++ uint32_t tmp;
++ uint32_t count = 10000;
++
++ /* # poll topaz register for certain times */
++ while (count) {
++ /* #.# read */
++ MM_READ32(addr, 0, &tmp);
++
++ if (value == (tmp & mask))
++ return 0;
++
++ /* #.# delay and loop */
++ DRM_UDELAY(100);
++ --count;
++ }
++
++ /* # now waiting is timeout, return 1 indicat failed */
++ /* XXX: testsuit means a timeout 10000 */
++
++ DRM_ERROR("TOPAZ:time out to poll addr(0x%x) expected value(0x%08x), "
++ "actual 0x%08x (0x%08x & 0x%08x)\n",
++ addr, value, tmp & mask, tmp, mask);
++
++ return -EBUSY;
++
++}
++
++static ssize_t psb_topaz_pmstate_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct drm_device *drm_dev = dev_get_drvdata(dev);
++ struct drm_psb_private *dev_priv;
++ struct pnw_topaz_private *topaz_priv;
++ unsigned int pmstate;
++ unsigned long flags;
++ int ret = -EINVAL;
++
++ if (drm_dev == NULL)
++ return 0;
++
++ dev_priv = drm_dev->dev_private;
++ topaz_priv = dev_priv->topaz_private;
++ pmstate = topaz_priv->pmstate;
++
++ pmstate = topaz_priv->pmstate;
++ spin_lock_irqsave(&topaz_priv->topaz_lock, flags);
++ ret = snprintf(buf, 64, "%s\n",
++ (pmstate == PSB_PMSTATE_POWERUP) ? "powerup" : "powerdown");
++ spin_unlock_irqrestore(&topaz_priv->topaz_lock, flags);
++
++ return ret;
++}
++
++static DEVICE_ATTR(topaz_pmstate, 0444, psb_topaz_pmstate_show, NULL);
++
++
++/* this function finish the first part of initialization, the rest
++ * should be done in pnw_topaz_setup_fw
++ */
++int pnw_topaz_init(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct ttm_bo_device *bdev = &dev_priv->bdev;
++ uint32_t core_id, core_rev;
++ int ret = 0, n;
++ bool is_iomem;
++ struct pnw_topaz_private *topaz_priv;
++ void *topaz_bo_virt;
++
++ PSB_DEBUG_GENERAL("TOPAZ: init topazsc data structures\n");
++ topaz_priv = kmalloc(sizeof(struct pnw_topaz_private), GFP_KERNEL);
++ if (topaz_priv == NULL)
++ return -1;
++
++ dev_priv->topaz_private = topaz_priv;
++ memset(topaz_priv, 0, sizeof(struct pnw_topaz_private));
++
++ /* get device --> drm_device --> drm_psb_private --> topaz_priv
++ * for psb_topaz_pmstate_show: topaz_pmpolicy
++ * if not pci_set_drvdata, can't get drm_device from device
++ */
++ pci_set_drvdata(dev->pdev, dev);
++ if (device_create_file(&dev->pdev->dev,
++ &dev_attr_topaz_pmstate))
++ DRM_ERROR("TOPAZ: could not create sysfs file\n");
++ topaz_priv->sysfs_pmstate = sysfs_get_dirent(
++ dev->pdev->dev.kobj.sd,
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
++ NULL,
++#endif
++ "topaz_pmstate");
++
++
++ topaz_priv = dev_priv->topaz_private;
++
++ /* # initialize comand topaz queueing [msvdx_queue] */
++ INIT_LIST_HEAD(&topaz_priv->topaz_queue);
++ /* # init mutex? CHECK: mutex usage [msvdx_mutex] */
++ mutex_init(&topaz_priv->topaz_mutex);
++ /* # spin lock init? CHECK spin lock usage [msvdx_lock] */
++ spin_lock_init(&topaz_priv->topaz_lock);
++
++ /* # topaz status init. [msvdx_busy] */
++ topaz_priv->topaz_busy = 0;
++ /*Initial topaz_cmd_count should be larger than initial writeback value*/
++ topaz_priv->topaz_cmd_count = 1;
++ topaz_priv->topaz_fw_loaded = 0;
++ /* FIXME: workaround since JPEG firmware is not ready */
++ topaz_priv->topaz_cur_codec = 0;
++ topaz_priv->topaz_hw_busy = 1;
++ /* # gain write back structure,we may only need 32+4=40DW */
++ ret = ttm_buffer_object_create(bdev, 4096,
++ ttm_bo_type_kernel,
++ DRM_PSB_FLAG_MEM_MMU | TTM_PL_FLAG_NO_EVICT,
++ 0, 0, 0, NULL, &(topaz_priv->topaz_bo));
++ if (ret != 0) {
++ DRM_ERROR("TOPAZ: failed to allocate topaz BO.\n");
++ return -1;
++ }
++
++ ret = ttm_bo_kmap(topaz_priv->topaz_bo, 0,
++ topaz_priv->topaz_bo->num_pages,
++ &topaz_priv->topaz_bo_kmap);
++ if (ret) {
++ DRM_ERROR("TOPAZ: map topaz BO bo failed......\n");
++ ttm_bo_unref(&topaz_priv->topaz_bo);
++ return -1;
++ }
++
++ TOPAZ_READ32(TOPAZ_CR_TOPAZ_HW_CFG, &topaz_priv->topaz_num_cores, 0);
++
++ topaz_priv->topaz_num_cores = F_EXTRACT(topaz_priv->topaz_num_cores,
++ TOPAZ_CR_NUM_CORES_SUPPORTED);
++ PSB_DEBUG_GENERAL("TOPAZ: number of cores: %d\n", topaz_priv->topaz_num_cores);
++
++ if ( topaz_priv->topaz_num_cores > TOPAZSC_NUM_CORES)
++ {
++ topaz_priv->topaz_num_cores = TOPAZSC_NUM_CORES;
++ DRM_ERROR("TOPAZ: number of cores (%d) exceed TOPAZSC_NUM_CORES (%d)!\n",
++ topaz_priv->topaz_num_cores, TOPAZSC_NUM_CORES);
++ }
++
++ for (n = 0; n < MAX_TOPAZ_CORES; n++)
++ {
++ topaz_priv->topaz_mtx_data_mem[n] = NULL;
++ topaz_priv->topaz_mtx_reg_state[n] = NULL;
++ topaz_priv->cur_mtx_data_size[n] = 0;
++ topaz_priv->topaz_fw[n].text = NULL;
++ topaz_priv->topaz_fw[n].data = NULL;
++ }
++
++ for(n=0; n < topaz_priv->topaz_num_cores; n++)
++ {
++ TOPAZ_READ32(TOPAZ_CR_IMG_TOPAZ_CORE_ID, &core_id, n);
++ TOPAZ_READ32(TOPAZ_CR_IMG_TOPAZ_CORE_REV, &core_rev, n);
++
++ PSB_DEBUG_GENERAL("TOPAZ: core(%d), core_id(%x) core_rev(%x)\n",
++ n, core_id, core_rev);
++
++ topaz_priv->topaz_mtx_reg_state[n] = kmalloc(TOPAZ_MTX_REG_SIZE,
++ GFP_KERNEL);
++ if (topaz_priv->topaz_mtx_reg_state[n] == NULL) {
++ DRM_ERROR("TOPAZ: failed to allocate space "
++ "for mtx register\n");
++ goto out;
++ }
++
++ ret = ttm_buffer_object_create(bdev,
++ 12 * 4096,
++ ttm_bo_type_kernel,
++ DRM_PSB_FLAG_MEM_MMU |
++ TTM_PL_FLAG_NO_EVICT,
++ 0, 0, 0, NULL,
++ &topaz_priv->topaz_mtx_data_mem[n]);
++ if (ret) {
++ DRM_ERROR("TOPAZ: failed to allocate ttm buffer for "
++ "mtx data save of core (%d)\n", n);
++ goto out;
++ }
++
++ }
++
++ topaz_bo_virt = ttm_kmap_obj_virtual(&topaz_priv->topaz_bo_kmap,
++ &is_iomem);
++ topaz_priv->topaz_mtx_wb = (uint32_t *) topaz_bo_virt;
++ topaz_priv->topaz_wb_offset = topaz_priv->topaz_bo->offset;
++ topaz_priv->topaz_sync_addr = (uint32_t *) (topaz_bo_virt
++ + 2048);
++ topaz_priv->topaz_sync_offset = topaz_priv->topaz_wb_offset
++ + 2048;
++
++ PSB_DEBUG_GENERAL("TOPAZ: alloc BO for WriteBack\n");
++ PSB_DEBUG_GENERAL("TOPAZ: WB offset=0x%08x\n",
++ topaz_priv->topaz_wb_offset);
++ PSB_DEBUG_GENERAL("TOPAZ: SYNC offset=0x%08x\n",
++ topaz_priv->topaz_sync_offset);
++
++ /*topaz_cmd_count starts with 1. Reset writback value with 0*/
++ memset((void *)(topaz_priv->topaz_mtx_wb), 0,
++ topaz_priv->topaz_num_cores * MTX_WRITEBACK_DATASIZE_ROUND);
++ memset((void *)topaz_priv->topaz_sync_addr, 0,
++ MTX_WRITEBACK_DATASIZE_ROUND);
++
++ /*fence sequence number starts with 0. Reset sync seq with ~1*/
++ *(topaz_priv->topaz_sync_addr) = ~0;
++
++ pnw_topaz_mmu_flushcache(dev_priv);
++
++ /* # set up MMU */
++ for(n=0; n < topaz_priv->topaz_num_cores; n++)
++ pnw_topaz_mmu_hwsetup(dev_priv, n);
++
++
++ for(n=0; n < topaz_priv->topaz_num_cores; n++) /* FIXME why use "n < topaz_priv->topaz_num_cores*2"? modify it */
++ {
++ /* # reset topaz */
++ MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST,
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET),
++ n);
++
++ MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST,
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET),
++ n);
++ }
++
++ PSB_DEBUG_GENERAL("TOPAZ: Reset MVEA successfully.\n");
++
++#if 0 /* can't load FW here */
++ /* #.# load fw to driver */
++ PSB_DEBUG_GENERAL("TOPAZ: will init firmware\n");
++ ret = pnw_topaz_init_fw(dev);
++ if (ret != 0)
++ return -1;
++
++ pnw_topaz_setup_fw(dev, IMG_CODEC_MPEG4_NO_RC);/* just for test */
++#endif
++
++ for ( n = 0; n < MAX_TOPAZ_CORES + 1; n++ )
++ {
++ topaz_priv->aui32LastSync[0][n] = ~0;
++ topaz_priv->aui32LastSync[1][n] = ~0;
++ }
++
++ /* create firmware storage */
++ for (n = 0; n < IMG_CODEC_NUM * 2; ++n) {
++ /* #.# malloc DRM object for fw storage */
++ ret = ttm_buffer_object_create(bdev, 12 * 4096,
++ ttm_bo_type_kernel,
++ DRM_PSB_FLAG_MEM_MMU | TTM_PL_FLAG_NO_EVICT,
++ 0, 0, 0, NULL, &topaz_priv->topaz_fw[n].text);
++ if (ret) {
++ DRM_ERROR("Failed to allocate firmware.\n");
++ goto out;
++ }
++
++ /* #.# malloc DRM object for fw storage */
++ ret = ttm_buffer_object_create(bdev, 12 * 4096,
++ ttm_bo_type_kernel,
++ DRM_PSB_FLAG_MEM_MMU | TTM_PL_FLAG_NO_EVICT,
++ 0, 0, 0, NULL, &topaz_priv->topaz_fw[n].data);
++ if (ret) {
++ DRM_ERROR("Failed to allocate firmware.\n");
++ goto out;
++ }
++ }
++
++ PSB_DEBUG_INIT("TOPAZ:old clock gating disable = 0x%08x\n",
++ PSB_RVDC32(PSB_TOPAZ_CLOCKGATING));
++
++ PSB_DEBUG_INIT("TOPAZ:rest MSDVX to disable clock gating\n");
++
++ PSB_WVDC32(0x00011fff, PSB_TOPAZ_CLOCKGATING);
++
++ PSB_DEBUG_INIT("MSDVX:new clock gating disable = 0x%08x\n",
++ PSB_RVDC32(PSB_TOPAZ_CLOCKGATING));
++ PSB_DEBUG_GENERAL("TOPAZ: Exit initialization\n");
++ return 0;
++
++out:
++ for (n = 0; n < IMG_CODEC_NUM*2; ++n) {
++ if (topaz_priv->topaz_fw[n].text != NULL)
++ ttm_bo_unref(&topaz_priv->topaz_fw[n].text);
++ if (topaz_priv->topaz_fw[n].data != NULL)
++ ttm_bo_unref(&topaz_priv->topaz_fw[n].data);
++ }
++
++ for ( n = 0; n < MAX_TOPAZ_CORES; n++ )
++ {
++ if (topaz_priv->topaz_mtx_data_mem[n] != NULL)
++ ttm_bo_unref(&topaz_priv->topaz_mtx_data_mem[n]);
++ if (topaz_priv->topaz_mtx_reg_state[n] != NULL)
++ kfree(topaz_priv->topaz_mtx_reg_state[n]);
++ }
++
++ return ret;
++}
++
++int pnw_topaz_uninit(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct pnw_topaz_private *topaz_priv = dev_priv->topaz_private;
++ int n;
++
++ /* flush MMU */
++ PSB_DEBUG_GENERAL("XXX: need to flush mmu cache here??\n");
++ /* pnw_topaz_mmu_flushcache (dev_priv); */
++
++ if (NULL == topaz_priv)
++ {
++ DRM_ERROR("TOPAZ: topaz_priv is NULL!\n");
++ return -1;
++ }
++
++ /* # reset TOPAZ chip */
++ pnw_topaz_reset(dev_priv);
++
++ /* release resources */
++ /* # release write back memory */
++ topaz_priv->topaz_mtx_wb = NULL;
++
++ for (n = 0; n < topaz_priv->topaz_num_cores; n++)
++ {
++ /* release mtx register save space */
++ kfree(topaz_priv->topaz_mtx_reg_state[n]);
++
++ /* release mtx data memory save space */
++ if (topaz_priv->topaz_mtx_data_mem[n])
++ ttm_bo_unref(&topaz_priv->topaz_mtx_data_mem[n]);
++ }
++ /* # release firmware storage */
++ for (n = 1; n < IMG_CODEC_NUM; ++n) {
++ if (topaz_priv->topaz_fw[n].text != NULL)
++ ttm_bo_unref(&topaz_priv->topaz_fw[n].text);
++ if (topaz_priv->topaz_fw[n].data != NULL)
++ ttm_bo_unref(&topaz_priv->topaz_fw[n].data);
++ }
++
++ ttm_bo_kunmap(&topaz_priv->topaz_bo_kmap);
++ ttm_bo_unref(&topaz_priv->topaz_bo);
++
++ if (topaz_priv) {
++ pci_set_drvdata(dev->pdev, NULL);
++ device_remove_file(&dev->pdev->dev, &dev_attr_topaz_pmstate);
++ sysfs_put(topaz_priv->sysfs_pmstate);
++ topaz_priv->sysfs_pmstate = NULL;
++
++ kfree(topaz_priv);
++ dev_priv->topaz_private = NULL;
++ }
++
++ return 0;
++}
++
++int pnw_topaz_reset(struct drm_psb_private *dev_priv)
++{
++ struct pnw_topaz_private *topaz_priv;
++ uint32_t i;
++
++ topaz_priv = dev_priv->topaz_private;
++ topaz_priv->topaz_busy = 0;
++ topaz_priv->topaz_cmd_count = 0;
++ for (i = 0; i < MAX_TOPAZ_CORES; i++)
++ topaz_priv->cur_mtx_data_size[i] = 0;
++ topaz_priv->topaz_needs_reset = 0;
++
++ memset((void *)(topaz_priv->topaz_mtx_wb), 0,
++ MAX_TOPAZ_CORES * MTX_WRITEBACK_DATASIZE_ROUND);
++
++ for(i=0; i<topaz_priv->topaz_num_cores; i++)
++ {
++ /* # reset topaz */
++ MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST,
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET),
++ i);
++
++ MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST,
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET),
++ i);
++ }
++
++ /* # set up MMU */
++ for(i=0; i<topaz_priv->topaz_num_cores; i++)
++ pnw_topaz_mmu_hwsetup(dev_priv, i);
++
++ return 0;
++}
++
++static void print_fw(void *prt, int word_size)
++{
++#ifdef TOPAZ_PDUMP
++ int i;
++ uint32_t *p_word = (uint32_t *)prt;
++ DRM_ERROR("FW first %d words: \n", word_size);
++ for (i = 0; i < word_size; i++)
++ {
++ p_word++;
++ DRM_ERROR("0x%08x\t", *p_word);
++ if ( (i+1) % 8 == 0)
++ DRM_ERROR("\n");
++ }
++ return;
++#endif
++}
++/* read firmware bin file and load all data into driver */
++int pnw_topaz_init_fw(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ const struct firmware *raw = NULL;
++ unsigned char *ptr;
++ int ret = 0;
++ int n;
++ struct topazsc_fwinfo *cur_fw;
++ int cur_size;
++ struct pnw_topaz_codec_fw *cur_codec;
++ struct ttm_buffer_object **cur_drm_obj;
++ struct ttm_bo_kmap_obj tmp_kmap;
++ bool is_iomem;
++ struct pnw_topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ topaz_priv->stored_initial_qp = 0;
++
++ /* # get firmware */
++ ret = request_firmware(&raw, FIRMWARE_NAME, &dev->pdev->dev);
++ if (ret != 0) {
++ DRM_ERROR("TOPAZ: request_firmware failed: %d\n", ret);
++ return ret;
++ }
++
++ PSB_DEBUG_GENERAL("TOPAZ: opened firmware\n");
++
++ if ((NULL == raw) || (raw->size < sizeof(struct topazsc_fwinfo))) {
++ DRM_ERROR("TOPAZ: firmware file is not correct size.\n");
++ goto out;
++ }
++
++ ptr = (unsigned char *) raw->data;
++
++ if (!ptr) {
++ DRM_ERROR("TOPAZ: failed to load firmware.\n");
++ goto out;
++ }
++
++ /* # load fw from file */
++ PSB_DEBUG_GENERAL("TOPAZ: load firmware.....\n");
++ cur_fw = NULL;
++ for (n = 0; n < IMG_CODEC_NUM *2; ++n) {
++ cur_fw = (struct topazsc_fwinfo *) ptr;
++
++ cur_codec = &topaz_priv->topaz_fw[cur_fw->codec];
++ cur_codec->ver = cur_fw->ver;
++ cur_codec->codec = cur_fw->codec;
++ cur_codec->text_size = cur_fw->text_size;
++ cur_codec->data_size = cur_fw->data_size;
++ cur_codec->data_location = cur_fw->data_location;
++
++ PSB_DEBUG_GENERAL("TOPAZ: load firemware %s.\n",
++ codec_to_string(cur_fw->codec));
++
++ /* #.# handle text section */
++ ptr += sizeof(struct topazsc_fwinfo);
++ cur_drm_obj = &cur_codec->text;
++ cur_size = cur_fw->text_size;
++
++ /* #.# fill DRM object with firmware data */
++ ret = ttm_bo_kmap(*cur_drm_obj, 0, (*cur_drm_obj)->num_pages,
++ &tmp_kmap);
++ if (ret) {
++ PSB_DEBUG_GENERAL("drm_bo_kmap failed: %d\n", ret);
++ ttm_bo_unref(cur_drm_obj);
++ *cur_drm_obj = NULL;
++ goto out;
++ }
++
++ PSB_DEBUG_GENERAL(" load codec %d, text_size: %d, data_size %d, data_location 08%x\n",
++ cur_codec->codec / 2 ,cur_codec->text_size,cur_codec->data_size, cur_codec->data_location);
++ memcpy(ttm_kmap_obj_virtual(&tmp_kmap, &is_iomem), ptr,
++ cur_size);
++
++ // print_fw(ptr, 16);
++ ttm_bo_kunmap(&tmp_kmap);
++
++ /* #.# handle data section */
++ ptr += cur_fw->text_size;
++ cur_drm_obj = &cur_codec->data;
++ cur_size = cur_fw->data_size;
++
++ /* #.# fill DRM object with firmware data */
++ ret = ttm_bo_kmap(*cur_drm_obj, 0, (*cur_drm_obj)->num_pages,
++ &tmp_kmap);
++ if (ret) {
++ PSB_DEBUG_GENERAL("drm_bo_kmap failed: %d\n", ret);
++ ttm_bo_unref(cur_drm_obj);
++ *cur_drm_obj = NULL;
++ goto out;
++ }
++
++ memcpy(ttm_kmap_obj_virtual(&tmp_kmap, &is_iomem), ptr,
++ cur_size);
++
++ print_fw(ptr, 16);
++ ttm_bo_kunmap(&tmp_kmap);
++
++ /* #.# validate firmware */
++
++ /* #.# update ptr */
++ ptr += cur_fw->data_size;
++ }
++
++ release_firmware(raw);
++ PSB_DEBUG_GENERAL("TOPAZ: return from firmware init\n");
++
++ return 0;
++
++out:
++ if (raw) {
++ PSB_DEBUG_GENERAL("release firmware....\n");
++ release_firmware(raw);
++ }
++
++ return -1;
++}
++
++/* setup fw when start a new context */
++int pnw_topaz_setup_fw(struct drm_device *dev, enum drm_pnw_topaz_codec codec)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ uint32_t verify_pc;
++ int core_id;
++ struct pnw_topaz_private *topaz_priv = dev_priv->topaz_private;
++ int i, ret = 0;
++#if 0
++ if (codec == topaz_priv->topaz_current_codec) {
++ PNW_TRACEL("TOPAZ: reuse previous codec\n");
++ return 0;
++ }
++#endif
++
++ /* XXX: need to rest topaz? */
++ PSB_DEBUG_GENERAL("TOPAZ: should reset topaz when context change\n");
++
++ if (topaz_priv->topaz_num_cores > MAX_TOPAZ_CORES)
++ {
++ DRM_ERROR("TOPAZ: Invalid core nubmer %d\n", topaz_priv->topaz_num_cores);
++ return -1;
++ }
++
++ PSB_DEBUG_GENERAL("TOPAZ: Set up mmu for all %d cores\n", topaz_priv->topaz_num_cores);
++ for(core_id=0; core_id<topaz_priv->topaz_num_cores; core_id++)
++ pnw_topaz_mmu_hwsetup(dev_priv, core_id);
++
++ /* # reset MVEA */
++ for(core_id=0; core_id<topaz_priv->topaz_num_cores; core_id++)
++ {
++ MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST,
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET),
++ core_id);
++
++ MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST,
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET),
++ core_id);
++ }
++
++ /* XXX: interrupt enable shouldn't be enable here,
++ * this funtion is called when interrupt is enable,
++ * but here, we've no choice since we have to call setup_fw by
++ * manual */
++ /* # upload firmware, clear interruputs and start the firmware
++ * -- from hostutils.c in TestSuits*/
++ psb_irq_uninstall_islands(dev, OSPM_VIDEO_ENC_ISLAND);
++
++ PSB_DEBUG_GENERAL("TOPAZ: will setup firmware ....\n");
++
++ /*In DDK, it doesn't set registers with default values*/
++ //topaz_set_default_regs(dev_priv);
++ PSB_DEBUG_GENERAL("TOPAZ: Finish default regs setting\n");
++
++
++ /* start each MTX in turn MUST start with master to enable comms to other cores*/
++ for(core_id = topaz_priv->topaz_num_cores-1; core_id >= 0; core_id--)
++ {
++ topaz_set_mtx_target(dev_priv,core_id, 0);
++ /* # reset mtx */
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_SRST,
++ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_MVEA_SOFT_RESET) |
++ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_MTX_SOFT_RESET) |
++ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_VLC_SOFT_RESET), core_id);
++
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_SRST, 0x0, core_id);
++
++ /* # upload fw by drm */
++ PSB_DEBUG_GENERAL("TOPAZ: will upload firmware to %d cores\n",
++ topaz_priv->topaz_num_cores);
++
++ topaz_upload_fw(dev, codec, core_id);
++
++ PSB_DEBUG_GENERAL("TOPAZ: after upload fw ....\n");
++
++ /* D0.5, D0.6 and D0.7 */
++ for (i = 5; i < 8; i++) {
++ topaz_write_core_reg(dev_priv, core_id, 0x1 | (i<<4),
++ 0);
++ }
++ /* Saves 8 Registers of D1 Bank */
++ /* D1.5, D1.6 and D1.7 */
++ for (i = 5; i < 8; i++) {
++ topaz_write_core_reg(dev_priv, core_id, 0x2 | (i<<4),
++ 0);
++ }
++
++ PSB_DEBUG_GENERAL("TOPAZ: setting up pc address: 0x%08x for core (%d)\n",
++ PC_START_ADDRESS, core_id);
++ topaz_write_core_reg(dev_priv, core_id, TOPAZ_MTX_PC, PC_START_ADDRESS);
++
++ topaz_read_core_reg(dev_priv, core_id, TOPAZ_MTX_PC, &verify_pc);
++
++ PSB_DEBUG_GENERAL("TOPAZ: verify pc address for core (%d): 0x%08x\n",
++ core_id, verify_pc);
++
++ /* enable auto clock is essential for this driver */
++ TOPAZ_WRITE32(TOPAZ_CR_TOPAZ_AUTO_CLK_GATE,
++ F_ENCODE(1, TOPAZ_CR_TOPAZ_VLC_AUTO_CLK_GATE) |
++ F_ENCODE(1, TOPAZ_CR_TOPAZ_DB_AUTO_CLK_GATE),
++ core_id);
++ MVEA_WRITE32(MVEA_CR_MVEA_AUTO_CLOCK_GATING,
++ F_ENCODE(1, MVEA_CR_MVEA_IPE_AUTO_CLK_GATE) |
++ F_ENCODE(1, MVEA_CR_MVEA_SPE_AUTO_CLK_GATE) |
++ F_ENCODE(1, MVEA_CR_MVEA_CMPRS_AUTO_CLK_GATE) |
++ F_ENCODE(1, MVEA_CR_MVEA_JMCOMP_AUTO_CLK_GATE),
++ core_id);
++
++ /* flush the command FIFO - only has effect on master MTX */
++ if(core_id == 0)
++ TOPAZ_WRITE32(TOPAZ_CR_TOPAZ_CMD_FIFO_2,
++ F_ENCODE(1, TOPAZ_CR_CMD_FIFO_FLUSH ),
++ 0);
++
++ /* clear MTX interrupt */
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTCLEAR,
++ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX),
++ core_id);
++
++ /* put the number of cores in use in the scratch register so is is ready when the firmware wakes up. */
++ TOPAZ_WRITE32(0x100 + (2 << 2), 2, core_id);
++
++ /* # turn on MTX */
++ topaz_set_mtx_target(dev_priv,core_id, 0);
++ MTX_WRITE32(MTX_CORE_CR_MTX_ENABLE_OFFSET,
++ MTX_CORE_CR_MTX_ENABLE_MTX_ENABLE_MASK,
++ core_id);
++
++ topaz_set_mtx_target(dev_priv,core_id, 0);
++ MTX_WRITE32(MTX_CR_MTX_KICK, 1, core_id);
++ }
++
++ topaz_priv->topaz_cmd_count = 1;
++ /* # poll on the interrupt which the firmware will generate */
++ /*With DDKv186, interrupt would't be generated automatically after
++ * firmware set up*/
++ PSB_DEBUG_GENERAL("TOPAZ: send NULL command to test firmware\n");
++ for (core_id = 0; core_id < topaz_priv->topaz_num_cores; core_id++)
++ {
++ pnw_topaz_kick_null_cmd(dev_priv, core_id,
++ topaz_priv->topaz_cmd_count++, 0);
++#if 1
++ ret = pnw_wait_on_sync(dev_priv,
++ topaz_priv->topaz_cmd_count - 1,
++ topaz_priv->topaz_sync_addr + MTX_WRITEBACK_VALUE);
++ if (0 != ret)
++ pnw_error_dump_reg(dev_priv, core_id);
++#else
++ pnw_topaz_wait_for_register(dev_priv,
++ REG_START_TOPAZ_TOPAZ_HOST(core_id) + TOPAZ_CR_IMG_TOPAZ_INTSTAT,
++ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTS_MTX),
++ F_MASK(TOPAZ_CR_IMG_TOPAZ_INTS_MTX));
++#endif
++ *(topaz_priv->topaz_sync_addr + MTX_WRITEBACK_VALUE) = 0xa5a5a5a5;
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTCLEAR,
++ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX),
++ core_id);
++ }
++
++
++ PSB_DEBUG_GENERAL("TOPAZ: after topaz mtx setup ....\n");
++
++ memset((void *)(topaz_priv->topaz_mtx_wb), 0,
++ topaz_priv->topaz_num_cores * MTX_WRITEBACK_DATASIZE_ROUND);
++
++ PSB_DEBUG_GENERAL("TOPAZ: firmware uploaded.\n");
++
++ /* XXX: is there any need to record next cmd num??
++ * we use fence seqence number to record it
++ */
++ topaz_priv->topaz_busy = 0;
++
++ for ( core_id = 0; core_id < topaz_priv->topaz_num_cores; core_id++ )
++ {
++ MVEA_WRITE32(MVEA_CR_BUFFER_SIDEBAND,
++ F_ENCODE(CACHE_ONLY,MVEA_CR_ABOVE_PARAM_OUT_SBAND) |
++ F_ENCODE(CACHE_ONLY,MVEA_CR_BELOW_PARAM_OUT_SBAND) |
++ F_ENCODE(MEM_AND_CACHE,MVEA_CR_ABOVE_PIX_OUT_SBAND) |
++ F_ENCODE(MEM_AND_CACHE,MVEA_CR_RECON_SBAND) |
++ F_ENCODE(CACHE_ONLY,MVEA_CR_REF_SBAND) |
++ F_ENCODE(CACHE_ONLY,MVEA_CR_ABOVE_PARAM_IN_SBAND) |
++ F_ENCODE(CACHE_ONLY,MVEA_CR_BELOW_PARAM_IN_SBAND) |
++ F_ENCODE(MEMORY_ONLY,MVEA_CR_CURR_PARAM_SBAND) |
++ F_ENCODE(CACHE_ONLY,MVEA_CR_ABOVE_PIX_IN_SBAND) |
++ F_ENCODE(MEMORY_ONLY,MVEA_CR_CURR_MB_SBAND),
++ core_id);
++ MVEA_WRITE32(MVEA_CR_IPE_JITTER_FACTOR, 3 - 1, core_id);
++ /*In DDK186, it set MVEA_CR_BUFFER_SIDEBAND register according to
++ *the height of source image */
++ // setup the jitter, base it on image size (using the height)
++ if(topaz_priv->frame_h >= 720)// HD
++ {
++ MVEA_WRITE32(MVEA_CR_IPE_JITTER_FACTOR, 3 - 1, core_id);
++ }
++ else if(topaz_priv->frame_w >= 480) // SD
++ {
++ MVEA_WRITE32(MVEA_CR_IPE_JITTER_FACTOR, 2 - 1, core_id);
++ }
++ else// others
++ {
++ MVEA_WRITE32(MVEA_CR_IPE_JITTER_FACTOR, 3 - 1, core_id);
++ }
++ }
++
++
++#if 0
++ pnw_topaz_mmu_flushcache(dev_priv);
++ topaz_test_null(dev, 0xe1e1);
++ topaz_test_null(dev, 0xe2e2);
++ topaz_test_sync(dev, 0xe2e2, 0x87654321);
++
++ topaz_mmu_test(dev, 0x12345678);
++ topaz_test_null(dev, 0xe3e3);
++ topaz_mmu_test(dev, 0x8764321);
++
++ topaz_test_null(dev, 0xe4e4);
++ topaz_test_null(dev, 0xf3f3);
++#endif
++ psb_irq_preinstall_islands(dev, OSPM_VIDEO_ENC_ISLAND);
++ psb_irq_postinstall_islands(dev, OSPM_VIDEO_ENC_ISLAND);
++ pnw_topaz_enableirq(dev);
++
++
++ return 0;
++}
++
++#if UPLOAD_FW_BY_DMA
++int topaz_upload_fw(struct drm_device *dev, enum drm_pnw_topaz_codec codec, uint32_t core_id)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ const struct pnw_topaz_codec_fw *cur_codec_fw;
++ uint32_t text_size, data_size;
++ uint32_t data_location;
++ uint32_t cur_mtx_data_size;
++ struct pnw_topaz_private *topaz_priv = dev_priv->topaz_private;
++ int ret = 0;
++
++ if (codec >= IMG_CODEC_NUM)
++ {
++ DRM_ERROR("TOPAZ: Invalid codec %d\n", codec);
++ return -1;
++ }
++
++ /* # MTX reset */
++ PSB_DEBUG_GENERAL("TOPAZ: mtx reset.\n");
++ MTX_WRITE32(MTX_CORE_CR_MTX_SOFT_RESET_OFFSET,
++ MTX_CORE_CR_MTX_SOFT_RESET_MTX_RESET_MASK,
++ core_id);
++
++ DRM_UDELAY(6000);
++
++ /* # upload the master and slave firmware by DMA */
++ if(core_id == 0)
++ cur_codec_fw = &topaz_priv->topaz_fw[codec*2];
++ else
++ cur_codec_fw = &topaz_priv->topaz_fw[codec*2+1];
++
++ PSB_DEBUG_GENERAL("Topaz:upload codec %s(%d) text sz=%d data sz=%d\n"
++ " data location(0x%x) to core(%d) \n",codec_to_string(codec), codec,
++ cur_codec_fw->text_size, cur_codec_fw->data_size,
++ cur_codec_fw->data_location, core_id);
++
++ /* # upload text. text_size is byte size*/
++ text_size = cur_codec_fw->text_size / 4;
++ /* adjust transfer sizes of text and data sections to match burst size */
++ text_size = ((text_size*4 + (MTX_DMA_BURSTSIZE_BYTES-1))
++ & ~(MTX_DMA_BURSTSIZE_BYTES-1))/4;
++
++ PSB_DEBUG_GENERAL("TOPAZ: text_size round up to %d\n", text_size);
++ /* setup the MTX to start recieving data:
++ use a register for the transfer which will point to the source
++ (MTX_CR_MTX_SYSC_CDMAT) */
++ /*MTX burst size (4 * 2 * 32bits = 32bytes) should match DMA burst
++ size (2 * 128bits = 32bytes) */
++ /* #.# fill the dst addr */
++
++ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA, MTX_DMA_MEMORY_BASE, core_id);
++ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC,
++ F_ENCODE(4, MTX_BURSTSIZE) |
++ F_ENCODE(0, MTX_RNW) |
++ F_ENCODE(1, MTX_ENABLE) |
++ F_ENCODE(text_size, MTX_LENGTH), core_id);
++
++ /* #.# set DMAC access to host memory via BIF (deserted)*/
++ /* TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1, core_id);*/
++
++ /* #.# transfer the codec */
++ topaz_dma_transfer(dev_priv, 0, cur_codec_fw->text->offset, 0,
++ MTX_CR_MTX_SYSC_CDMAT,
++ text_size, core_id, 0);
++
++ /* #.# wait dma finish */
++ ret = pnw_topaz_wait_for_register(dev_priv,
++ DMAC_START + IMG_SOC_DMAC_IRQ_STAT(0),
++ F_ENCODE(1, IMG_SOC_TRANSFER_FIN),
++ F_ENCODE(1, IMG_SOC_TRANSFER_FIN));
++ if (ret != 0) {
++ pnw_error_dump_reg(dev_priv, core_id);
++ return -1;
++ }
++
++ /* #.# clear interrupt */
++ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0);
++
++ PSB_DEBUG_GENERAL("TOPAZ: firmware text upload complete.\n");
++
++ /* # return access to topaz core (deserted)*/
++ /*TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 0, core_id);*/
++
++ /* # upload data */
++ data_size = cur_codec_fw->data_size / 4;
++ data_size = ((data_size*4 + (MTX_DMA_BURSTSIZE_BYTES-1))
++ & ~(MTX_DMA_BURSTSIZE_BYTES-1))/4;
++
++ data_location = cur_codec_fw->data_location;
++ data_location = data_location &( ~(MTX_DMA_BURSTSIZE_BYTES-1));
++
++ PSB_DEBUG_GENERAL("TOPAZ: data_size round up to %d\n"
++ "data_location round up to 0x%08x\n",
++ data_size, data_location);
++ /* #.# fill the dst addr */
++ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA,
++ data_location, core_id);
++ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC,
++ F_ENCODE(4, MTX_BURSTSIZE) |
++ F_ENCODE(0, MTX_RNW) |
++ F_ENCODE(1, MTX_ENABLE) |
++ F_ENCODE(data_size, MTX_LENGTH), core_id);
++ /* #.# set DMAC access to host memory via BIF(deserted) */
++ /*TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1, core_id);*/
++
++ /* #.# transfer the codec */
++ topaz_dma_transfer(dev_priv, 0, cur_codec_fw->data->offset, 0,
++ MTX_CR_MTX_SYSC_CDMAT, data_size, core_id, 0);
++
++ /* #.# wait dma finish */
++ ret = pnw_topaz_wait_for_register(dev_priv,
++ DMAC_START + IMG_SOC_DMAC_IRQ_STAT(0),
++ F_ENCODE(1, IMG_SOC_TRANSFER_FIN),
++ F_ENCODE(1, IMG_SOC_TRANSFER_FIN));
++ if (ret != 0) {
++ pnw_error_dump_reg(dev_priv, core_id);
++ return -1;
++ }
++ /* #.# clear interrupt */
++ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0);
++
++ pnw_topaz_mmu_flushcache(dev_priv);
++ PSB_DEBUG_GENERAL("TOPAZ: firmware data upload complete.\n");
++ /* # return access to topaz core(deserted) */
++ /*TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 0, core_id);*/
++
++ /* record this codec's mtx data size for
++ * context save & restore */
++ /* FIXME: since non-root sighting fixed by pre allocated,
++ * only need to correct the buffer size
++ */
++ cur_mtx_data_size = cur_codec_fw->data_size / 4;
++ if (topaz_priv->cur_mtx_data_size[core_id] != cur_mtx_data_size)
++ topaz_priv->cur_mtx_data_size[core_id] = cur_mtx_data_size;
++
++ return 0;
++}
++
++#else
++
++void topaz_mtx_upload_by_register(struct drm_device *dev, uint32_t mtx_mem,
++ uint32_t addr, uint32_t size,
++ struct ttm_buffer_object *buf,
++ uint32_t core)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ uint32_t *buf_p;
++ uint32_t debug_reg, bank_size, bank_ram_size, bank_count;
++ uint32_t cur_ram_id, ram_addr , ram_id;
++ int map_ret, lp;
++ struct ttm_bo_kmap_obj bo_kmap;
++ bool is_iomem;
++ uint32_t cur_addr, ui32Size;
++
++ PSB_DEBUG_GENERAL("TOPAZ: mtx upload: mtx_mem(0x%08x) addr(0x%08x)"
++ "size(%d)\n", mtx_mem, addr, size);
++
++ get_mtx_control_from_dash(dev_priv, core);
++
++ map_ret = ttm_bo_kmap(buf, 0, buf->num_pages, &bo_kmap);
++ if (map_ret) {
++ DRM_ERROR("TOPAZ: drm_bo_kmap failed: %d\n", map_ret);
++ return;
++ }
++ buf_p = (uint32_t *) ttm_kmap_obj_virtual(&bo_kmap, &is_iomem);
++
++
++ TOPAZ_READ32(TOPAZ_CORE_CR_MTX_DEBUG_OFFSET, &debug_reg, core);
++ debug_reg = 0x0a0a0600;
++ /*bank_size = (debug_reg & 0xf0000) >> 16;
++ bank_ram_size = 1 << (bank_size + 2);*/
++
++ /*Bank size 4096, BanK number 6, Totally ram size:24k*/
++ ui32Size = 0x1 << (F_EXTRACT( debug_reg, TOPAZ_CR_MTX_LAST_RAM_BANK_SIZE ) + 2);
++ /* all other banks */
++ bank_size = 0x1 << (F_EXTRACT( debug_reg, TOPAZ_CR_MTX_RAM_BANK_SIZE ) + 2);
++ /* total RAM size */
++ bank_ram_size = ui32Size + ( bank_size * (F_EXTRACT( debug_reg, TOPAZ_CR_MTX_RAM_BANKS ) -1) );
++
++ bank_count = (debug_reg & 0xf00) >> 8;
++
++ PSB_DEBUG_GENERAL("TOPAZ: bank size %d, bank count %d, ram size %d\n",
++ bank_size, bank_count, bank_ram_size);
++
++ pnw_topaz_wait_for_register(dev_priv,
++ REG_START_TOPAZ_MTX_HOST(core) + MTX_CR_MTX_RAM_ACCESS_STATUS,
++ MASK_MTX_MTX_MTX_MCM_STAT,
++ MASK_MTX_MTX_MTX_MCM_STAT);
++
++ cur_ram_id = -1;
++ cur_addr = addr;
++ for (lp = 0; lp < size / 4; ++lp) {
++ ram_id = mtx_mem + (cur_addr / bank_size);
++
++ if (cur_ram_id != ram_id) {
++ ram_addr = cur_addr >> 2;
++
++ MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_CONTROL,
++ F_ENCODE(ram_id, MTX_MTX_MCMID) |
++ F_ENCODE(ram_addr, MTX_MTX_MCM_ADDR) |
++ F_ENCODE(1, MTX_MTX_MCMAI),
++ core);
++
++ cur_ram_id = ram_id;
++ }
++ cur_addr += 4;
++
++ MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_DATA_TRANSFER,
++ *(buf_p + lp), core);
++
++ pnw_topaz_wait_for_register(dev_priv,
++ MTX_CR_MTX_RAM_ACCESS_STATUS + REG_START_TOPAZ_MTX_HOST(core),
++ MASK_MTX_MTX_MTX_MCM_STAT,
++ MASK_MTX_MTX_MTX_MCM_STAT);
++ }
++
++ release_mtx_control_from_dash(dev_priv, core);
++ ttm_bo_kunmap(&bo_kmap);
++
++ PSB_DEBUG_GENERAL("TOPAZ: register data upload done\n");
++ return;
++}
++
++int topaz_upload_fw(struct drm_device *dev, enum drm_pnw_topaz_codec codec, uint32_t core_id)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ const struct pnw_topaz_codec_fw *cur_codec_fw;
++ uint32_t text_size, data_size;
++ uint32_t data_location;
++ struct pnw_topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ /* # refer HLD document */
++ /* # MTX reset */
++ PSB_DEBUG_GENERAL("TOPAZ: mtx reset.\n");
++ MTX_WRITE32(MTX_CORE_CR_MTX_SOFT_RESET_OFFSET,
++ MTX_CORE_CR_MTX_SOFT_RESET_MTX_RESET_MASK,
++ core_id);
++
++ DRM_UDELAY(6000);
++
++ /* # upload the master and slave firmware by DMA */
++ if(core_id == 0)
++ cur_codec_fw = &topaz_priv->topaz_fw[codec*2];
++ else
++ cur_codec_fw = &topaz_priv->topaz_fw[codec*2+1];
++
++ PSB_DEBUG_GENERAL("Topaz:upload codec by MTX reg %s(%d) text sz=%d data sz=%d"
++ " data location(%d) to core(%d) \n",codec_to_string(codec), codec,
++ cur_codec_fw->text_size, cur_codec_fw->data_size,
++ cur_codec_fw->data_location, core_id);
++
++ /* # upload text */
++ text_size = cur_codec_fw->text_size;
++
++ topaz_mtx_upload_by_register(dev, MTX_CORE_CODE_MEM,
++ 0,
++ /*PC_START_ADDRESS - MTX_MEMORY_BASE,*/
++ text_size, cur_codec_fw->text, core_id);
++
++ /* # upload data */
++ data_size = cur_codec_fw->data_size;
++ data_location = cur_codec_fw->data_location;
++
++ topaz_mtx_upload_by_register(dev, MTX_CORE_DATA_MEM,
++ data_location - MTX_DMA_MEMORY_BASE, data_size,
++ cur_codec_fw->data, core_id);
++
++ return 0;
++}
++
++#endif /* UPLOAD_FW_BY_DMA */
++
++void
++topaz_dma_transfer(struct drm_psb_private *dev_priv, uint32_t channel,
++ uint32_t src_phy_addr, uint32_t offset,
++ uint32_t soc_addr, uint32_t byte_num,
++ uint32_t is_increment, uint32_t is_write) /* is_increment is always 0, so use it as core_id for workaround*/
++{
++ uint32_t dmac_count;
++ uint32_t irq_stat;
++ uint32_t count;
++
++ PSB_DEBUG_GENERAL("TOPAZ: using dma to transfer firmware\n");
++ /* # check that no transfer is currently in progress and no
++ interrupts are outstanding ?? (why care interrupt) */
++ DMAC_READ32(IMG_SOC_DMAC_COUNT(channel), &dmac_count);
++ if (0 != (dmac_count & (MASK_IMG_SOC_EN | MASK_IMG_SOC_LIST_EN)))
++ DRM_ERROR("TOPAZ: there is tranfer in progress\n");
++
++ /* assert(0==(dmac_count & (MASK_IMG_SOC_EN | MASK_IMG_SOC_LIST_EN)));*/
++
++ /* clear status of any previous interrupts */
++ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(channel), 0);
++
++ /* check irq status */
++ DMAC_READ32(IMG_SOC_DMAC_IRQ_STAT(channel), &irq_stat);
++ /* assert(0 == irq_stat); */
++ if (0 != irq_stat)
++ DRM_ERROR("TOPAZ: there is hold up\n");
++
++ /*MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA, MTX_DMA_MEMORY_BASE, is_increment);
++ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC,
++ F_ENCODE(4, MTX_BURSTSIZE) |
++ F_ENCODE(0, MTX_RNW) |
++ F_ENCODE(1, MTX_ENABLE) |
++ F_ENCODE(byte_num, MTX_LENGTH), is_increment);*/
++
++ /* per hold - allow HW to sort itself out */
++ DMAC_WRITE32(IMG_SOC_DMAC_PER_HOLD(channel), 16);
++ /* clear previous interrupts */
++ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(channel), 0);
++
++ DMAC_WRITE32(IMG_SOC_DMAC_SETUP(channel),
++ (src_phy_addr + offset));
++ count = DMAC_VALUE_COUNT(DMAC_BSWAP_NO_SWAP, DMAC_PWIDTH_32_BIT,
++ is_write, DMAC_PWIDTH_32_BIT, byte_num);
++ /* generate an interrupt at the end of transfer */
++ /* count |= MASK_IMG_SOC_TRANSFER_IEN; */
++ /*count |= F_ENCODE(is_write, IMG_SOC_DIR);*/
++ DMAC_WRITE32(IMG_SOC_DMAC_COUNT(channel), count);
++
++ /* Burst : 2 * 128 bits = 32 bytes*/
++ DMAC_WRITE32(IMG_SOC_DMAC_PERIPH(channel),
++ DMAC_VALUE_PERIPH_PARAM(DMAC_ACC_DEL_0,
++ 0, DMAC_BURST_2));
++
++ DMAC_WRITE32(IMG_SOC_DMAC_PERIPHERAL_ADDR(channel), MTX_CR_MTX_SYSC_CDMAT + REG_START_TOPAZ_MTX_HOST(is_increment)); /* is_increment here is actually core_id*/
++
++ /*Delay 1 ms to make sure DMAC is ready to start transfer.
++ *Otherwise it will failed randomly*/
++ DRM_UDELAY(1000);
++ /*pnw_error_dump_reg(dev_priv, is_increment);*/
++ /* Finally, rewrite the count register with
++ * the enable bit set to kick off the transfer
++ */
++ DMAC_WRITE32(IMG_SOC_DMAC_COUNT(channel), count | MASK_IMG_SOC_EN);
++
++ PSB_DEBUG_GENERAL("TOPAZ: dma transfer started.\n");
++
++ return;
++}
++
++void topaz_set_default_regs(struct drm_psb_private *dev_priv)
++{
++ int n, i;
++ int count = sizeof(topaz_default_regs) / (sizeof(unsigned long) * 3);
++
++ for (i = 0; i < ((struct pnw_topaz_private *)(dev_priv->topaz_private))->topaz_num_cores; i++)
++ for (n = 0; n < count; n++)
++ MM_WRITE32(topaz_default_regs[n][0] + TOPAZ_CORE_REG_BASE(i) ,
++ topaz_default_regs[n][1],
++ topaz_default_regs[n][2]);
++
++}
++
++void topaz_write_core_reg(struct drm_psb_private *dev_priv,
++ uint32_t core,
++ uint32_t reg,
++ const uint32_t val)
++{
++ uint32_t tmp;
++ get_mtx_control_from_dash(dev_priv, core);
++
++ /* put data into MTX_RW_DATA */
++ MTX_WRITE32(MTX_CORE_CR_MTX_REGISTER_READ_WRITE_DATA_OFFSET, val, core);
++
++ /* request a write */
++ tmp = reg &
++ ~MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK;
++ MTX_WRITE32(MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET, tmp, core);
++
++ /* wait for operation finished */
++ pnw_topaz_wait_for_register(dev_priv,
++ REG_START_TOPAZ_MTX_HOST(core) +
++ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET,
++ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK,
++ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK);
++
++ release_mtx_control_from_dash(dev_priv, core);
++}
++
++void topaz_read_core_reg(struct drm_psb_private *dev_priv,
++ uint32_t core,
++ uint32_t reg,
++ uint32_t *ret_val)
++{
++ uint32_t tmp;
++
++ get_mtx_control_from_dash(dev_priv, core);
++
++ /* request a write */
++ tmp = (reg &
++ ~MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK);
++ MTX_WRITE32(MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET,
++ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_MASK | tmp, core);
++
++ /* wait for operation finished */
++ pnw_topaz_wait_for_register(dev_priv,
++ REG_START_TOPAZ_MTX_HOST(core) +
++ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET,
++ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK,
++ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK);
++
++ /* read */
++ MTX_READ32(MTX_CORE_CR_MTX_REGISTER_READ_WRITE_DATA_OFFSET,
++ ret_val, core);
++
++ release_mtx_control_from_dash(dev_priv, core);
++}
++
++void get_mtx_control_from_dash(struct drm_psb_private *dev_priv, uint32_t core)
++{
++ int debug_reg_slave_val;
++ struct pnw_topaz_private *topaz_priv = dev_priv->topaz_private;
++ int count = 0;
++
++
++ /* GetMTXControlFromDash */
++ TOPAZ_WRITE32(TOPAZ_CORE_CR_MTX_DEBUG_OFFSET,
++ F_ENCODE(1, TOPAZ_CR_MTX_DBG_IS_SLAVE) |
++ F_ENCODE(2, TOPAZ_CR_MTX_DBG_GPIO_OUT), core);
++ do {
++ TOPAZ_READ32(TOPAZ_CORE_CR_MTX_DEBUG_OFFSET,
++ &debug_reg_slave_val, core);
++ count++;
++ } while (((debug_reg_slave_val & 0x18) != 0) && count < 50000);
++
++ if(count >= 50000)
++ PSB_DEBUG_GENERAL("TOPAZ: timeout in get_mtx_control_from_dash\n");
++
++ /* save access control */
++ TOPAZ_READ32(MTX_CORE_CR_MTX_RAM_ACCESS_CONTROL_OFFSET,
++ &topaz_priv->topaz_dash_access_ctrl, core);
++}
++
++void release_mtx_control_from_dash(struct drm_psb_private *dev_priv,
++ uint32_t core)
++{
++ struct pnw_topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ /* restore access control */
++ TOPAZ_WRITE32(MTX_CORE_CR_MTX_RAM_ACCESS_CONTROL_OFFSET,
++ topaz_priv->topaz_dash_access_ctrl, core);
++
++ /* release bus */
++ TOPAZ_WRITE32(TOPAZ_CORE_CR_MTX_DEBUG_OFFSET,
++ F_ENCODE(1, TOPAZ_CR_MTX_DBG_IS_SLAVE), core);
++}
++
++void pnw_topaz_mmu_hwsetup(struct drm_psb_private *dev_priv, uint32_t core_id)
++{
++ uint32_t pd_addr = psb_get_default_pd_addr(dev_priv->mmu);
++
++ PSB_DEBUG_GENERAL("TOPAZ: core (%d) MMU set up.\n", core_id);
++
++ /* bypass all request while MMU is being configured */
++ TOPAZ_WRITE32(TOPAZ_CR_MMU_CONTROL0,
++ F_ENCODE(1, TOPAZ_CR_MMU_BYPASS)
++ | F_ENCODE(1, TOPAZ_CR_MMU_BYPASS_DMAC), core_id);
++
++ /* set MMU hardware at the page table directory */
++ PSB_DEBUG_GENERAL("TOPAZ: write PD phyaddr=0x%08x "
++ "into MMU_DIR_LIST0/1\n", pd_addr);
++ /*There's two of these (0) and (1).. only 0 is currently used*/
++ TOPAZ_WRITE32(TOPAZ_CR_MMU_DIR_LIST_BASE(0), pd_addr, core_id);
++// TOPAZ_WRITE32(TOPAZ_CR_MMU_DIR_LIST_BASE(1), 0, core_id);
++
++ /* setup index register, all pointing to directory bank 0 */
++ TOPAZ_WRITE32(TOPAZ_CR_MMU_BANK_INDEX, 0, core_id);
++
++ /* now enable MMU access for all requestors */
++ TOPAZ_WRITE32(TOPAZ_CR_MMU_CONTROL0,
++ F_ENCODE(0, TOPAZ_CR_MMU_BYPASS)
++ | F_ENCODE(0, TOPAZ_CR_MMU_BYPASS_DMAC), core_id);
++}
++
++void pnw_topaz_mmu_flushcache(struct drm_psb_private *dev_priv)
++{
++ uint32_t mmu_control;
++
++ if (dev_priv->topaz_disabled)
++ return;
++
++ PSB_DEBUG_GENERAL("TOPAZ: pnw_topaz_mmu_flushcache\n");
++#if 0
++ PSB_DEBUG_GENERAL("XXX: Only one PTD/PTE cache"
++ " so flush using the master core\n");
++#endif
++ /* XXX: disable interrupt */
++ TOPAZ_READ32(TOPAZ_CR_MMU_CONTROL0, &mmu_control, 0);
++ mmu_control |= F_ENCODE(1, TOPAZ_CR_MMU_INVALDC);
++ /*mmu_control |= F_ENCODE(1, TOPAZ_CR_MMU_FLUSH);*/
++
++#if 0
++ PSB_DEBUG_GENERAL("Set Invalid flag (this causes a flush with MMU\n"
++ "still operating afterwards even if not cleared,\n"
++ "but may want to replace with MMU_FLUSH?\n");
++#endif
++ TOPAZ_WRITE32(TOPAZ_CR_MMU_CONTROL0, mmu_control, 0);
++
++ /* clear it */
++ mmu_control &= (~F_ENCODE(1, TOPAZ_CR_MMU_INVALDC));
++ //mmu_control &= (~F_ENCODE(1, TOPAZ_CR_MMU_FLUSH));
++ TOPAZ_WRITE32(TOPAZ_CR_MMU_CONTROL0, mmu_control, 0);
++}
++
++#if DEBUG_FUNCTION
++
++static int topaz_test_sync(struct drm_device *dev, uint32_t seq,
++ uint32_t sync_seq)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct pnw_topaz_private *topaz_priv = dev_priv->topaz_private;
++ uint32_t sync_cmd[3];
++ struct topaz_cmd_header *cmd_hdr;
++ uint32_t *sync_p = (uint32_t *)topaz_priv->topaz_sync_addr;
++ int count = 1000;
++ uint32_t clr_flag;
++
++ cmd_hdr = (struct topaz_cmd_header *)&sync_cmd[0];
++
++ /* reset sync area */
++ *sync_p = 0;
++
++ /* insert a SYNC command here */
++ cmd_hdr->id = MTX_CMDID_SYNC;
++ cmd_hdr->size = 3;
++ cmd_hdr->seq = seq;
++
++ sync_cmd[1] = topaz_priv->topaz_sync_offset;
++ sync_cmd[2] = sync_seq;
++
++ TOPAZ_BEGIN_CCB(dev_priv);
++ TOPAZ_OUT_CCB(dev_priv, sync_cmd[0]);
++ TOPAZ_OUT_CCB(dev_priv, sync_cmd[1]);
++ TOPAZ_OUT_CCB(dev_priv, sync_cmd[2]);
++ TOPAZ_END_CCB(dev_priv, 1);
++
++ PSB_DEBUG_GENERAL("Topaz: Sent SYNC with cmd seq=0x%08x,"
++ "sync_seq=0x%08x\n", seq, sync_seq);
++
++ while (count && *sync_p != sync_seq) {
++ DRM_UDELAY(100);
++ --count;
++ }
++ if ((count == 0) && (*sync_p != sync_seq)) {
++ DRM_ERROR("TOPAZ: wait sycn timeout, expect sync seq 0x%08x,"
++ "actual 0x%08x\n", sync_seq, *sync_p);
++ }
++ PSB_DEBUG_GENERAL("TOPAZ: SYNC succeed, sync seq=0x%08x\n", *sync_p);
++ PSB_DEBUG_GENERAL("Topaz: after SYNC test, query IRQ and clear it\n");
++
++ clr_flag = pnw_topaz_queryirq(dev);
++ pnw_topaz_clearirq(dev, clr_flag);
++
++ return 0;
++}
++static int topaz_test_sync_tt_test(struct drm_device *dev,
++ uint32_t seq,
++ uint32_t sync_seq)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct ttm_bo_device *bdev = &dev_priv->bdev;
++ int ret;
++ bool is_iomem;
++ struct ttm_buffer_object *test_obj;
++ struct ttm_bo_kmap_obj test_kmap;
++ unsigned int *test_adr;
++ uint32_t sync_cmd[3];
++ int count = 1000;
++ unsigned long pfn;
++
++ ret = ttm_buffer_object_create(bdev, 4096,
++ ttm_bo_type_kernel,
++ TTM_PL_FLAG_TT | TTM_PL_FLAG_NO_EVICT,
++ 0, 0, 0, NULL, &test_obj);
++ if (ret) {
++ DRM_ERROR("failed create test object buffer\n");
++ return -1;
++ }
++
++ ret = psb_mmu_virtual_to_pfn(psb_mmu_get_default_pd(dev_priv->mmu),
++ test_obj->offset, &pfn);
++ if (ret) {
++ DRM_ERROR("failed to get pfn from virtual\n");
++ return -1;
++ }
++
++ PSB_DEBUG_GENERAL("Topaz:offset %lx, pfn %lx\n", test_obj->offset, pfn);
++
++ ret = ttm_bo_kmap(test_obj, 0, test_obj->num_pages,
++ &test_kmap);
++ if (ret) {
++ DRM_ERROR("failed map buffer\n");
++ return -1;
++ }
++ test_adr = ttm_kmap_obj_virtual(&test_kmap, &is_iomem);
++ *test_adr = 0xff55;
++ ttm_bo_kunmap(&test_kmap);
++
++ /* insert a SYNC command here */
++ sync_cmd[0] = (MTX_CMDID_SYNC << 1) | (3 << 8) |
++ (seq << 16);
++ sync_cmd[1] = test_obj->offset;
++ sync_cmd[2] = sync_seq;
++
++ TOPAZ_BEGIN_CCB(dev_priv);
++ TOPAZ_OUT_CCB(dev_priv, sync_cmd[0]);
++ TOPAZ_OUT_CCB(dev_priv, sync_cmd[1]);
++ TOPAZ_OUT_CCB(dev_priv, sync_cmd[2]);
++ TOPAZ_END_CCB(dev_priv, 1);
++
++ ret = ttm_bo_kmap(test_obj, 0, test_obj->num_pages,
++ &test_kmap);
++ if (ret) {
++ DRM_ERROR("failed map buffer\n");
++ return -1;
++ }
++ test_adr = ttm_kmap_obj_virtual(&test_kmap, &is_iomem);
++
++ while (count && *test_adr != sync_seq) {
++ DRM_UDELAY(100);
++ --count;
++ }
++ if ((count == 0) && (*test_adr != sync_seq)) {
++ DRM_ERROR("TOPAZ: wait sycn timeout (0x%08x),"
++ "actual 0x%08x\n",
++ sync_seq, *test_adr);
++ }
++ PSB_DEBUG_GENERAL("TOPAZ: SYNC done, seq=0x%08x\n", *test_adr);
++ ttm_bo_kunmap(&test_kmap);
++ ttm_bo_unref(&test_obj);
++
++ return 0;
++}
++
++static int topaz_test_sync_manual_alloc_page(struct drm_device *dev,
++ uint32_t seq,
++ uint32_t sync_seq,
++ uint32_t offset)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ int ret;
++ uint32_t sync_cmd[3];
++ int count = 1000;
++ unsigned long pfn;
++
++ struct page *p;
++ uint32_t *v;
++/* uint32_t offset = 0xd0000000; */
++
++ p = alloc_page(GFP_DMA32);
++ if (!p) {
++ DRM_ERROR("Topaz:Failed allocating page\n");
++ return -1;
++ }
++
++ v = kmap(p);
++ memset(v, 0x67, PAGE_SIZE);
++ pfn = (offset >> PAGE_SHIFT);
++ kunmap(p);
++
++ ret = psb_mmu_insert_pages(psb_mmu_get_default_pd(dev_priv->mmu),
++ &p, pfn << PAGE_SHIFT, 1, 0, 0, 0);
++ if (ret) {
++ DRM_ERROR("Topaz:Failed inserting mmu page\n");
++ return -1;
++ }
++
++ /* insert a SYNC command here */
++ sync_cmd[0] = (MTX_CMDID_SYNC << 1) | (3 << 8) |
++ (0x5b << 16);
++ sync_cmd[1] = pfn << PAGE_SHIFT;
++ sync_cmd[2] = seq;
++
++ TOPAZ_BEGIN_CCB(dev_priv);
++ TOPAZ_OUT_CCB(dev_priv, sync_cmd[0]);
++ TOPAZ_OUT_CCB(dev_priv, sync_cmd[1]);
++ TOPAZ_OUT_CCB(dev_priv, sync_cmd[2]);
++ TOPAZ_END_CCB(dev_priv, 1);
++
++ v = kmap(p);
++ while (count && *v != sync_seq) {
++ DRM_UDELAY(100);
++ --count;
++ }
++ if ((count == 0) && (*v != sync_seq)) {
++ DRM_ERROR("TOPAZ: wait sycn timeout (0x%08x),"
++ "actual 0x%08x\n",
++ sync_seq, *v);
++ }
++ PSB_DEBUG_GENERAL("TOPAZ: SYNC done, seq=0x%08x\n", *v);
++ kunmap(p);
++
++ return 0;
++}
++
++static int topaz_test_null(struct drm_device *dev, uint32_t seq)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct topaz_cmd_header null_cmd;
++ uint32_t clr_flag;
++
++ /* XXX: here we finished firmware setup....
++ * using a NULL command to verify the
++ * correctness of firmware
++ */
++
++ null_cmd.id = MTX_CMDID_NULL;
++ null_cmd.size = 1;
++ null_cmd.seq = seq;
++
++ TOPAZ_BEGIN_CCB(dev_priv);
++ TOPAZ_OUT_CCB(dev_priv, *((uint32_t *)&null_cmd));
++ TOPAZ_END_CCB(dev_priv, 1);
++
++ DRM_UDELAY(1000); /* wait to finish */
++
++ PSB_DEBUG_GENERAL("Topaz: Sent NULL with sequence=0x%08x,"
++ " got sequence=0x%08x (WB_seq=0x%08x,WB_roff=%d)\n",
++ seq, CCB_CTRL_SEQ(dev_priv), WB_CCB_CTRL_SEQ(dev_priv),
++ WB_CCB_CTRL_RINDEX(dev_priv));
++
++ PSB_DEBUG_GENERAL("Topaz: after NULL test, query IRQ and clear it\n");
++
++ clr_flag = pnw_topaz_queryirq(dev);
++ pnw_topaz_clearirq(dev, clr_flag);
++
++ return 0;
++}
++
++
++/*
++ * this function will test whether the mmu is correct:
++ * it get a drm_buffer_object and use CMD_SYNC to write
++ * certain value into this buffer.
++ */
++static void topaz_mmu_test(struct drm_device *dev, uint32_t sync_value)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct pnw_topaz_private *topaz_priv = dev_priv->topaz_private;
++ unsigned long real_pfn;
++ int ret;
++
++ /* topaz_mmu_flush(dev); */
++ topaz_test_sync(dev, 0x55, sync_value);
++
++ ret = psb_mmu_virtual_to_pfn(psb_mmu_get_default_pd(dev_priv->mmu),
++ topaz_priv->topaz_sync_offset, &real_pfn);
++ if (ret != 0) {
++ PSB_DEBUG_GENERAL("psb_mmu_virtual_to_pfn failed,exit\n");
++ return;
++ }
++ PSB_DEBUG_GENERAL("TOPAZ: issued SYNC command, "
++ "BO offset=0x%08x (pfn=%lu), synch value=0x%08x\n",
++ topaz_priv->topaz_sync_offset, real_pfn, sync_value);
++}
++
++void topaz_save_default_regs(struct drm_psb_private *dev_priv, uint32_t *data)
++{
++ int n;
++ int count;
++ uint32_t core;
++ struct pnw_topaz_private *topaz_priv = (struct pnw_topaz_private*)dev_priv->topaz_private;
++
++ count = sizeof(topaz_default_regs) / (sizeof(unsigned long) * 3);
++ for (core = 0; core < topaz_priv->topaz_num_cores; core++)
++ {
++ for (n = 0; n < count; n++, ++data)
++ MM_READ32(topaz_default_regs[n][0] + TOPAZ_CORE_REG_BASE(core) ,
++ topaz_default_regs[n][1],
++ data);
++ }
++}
++
++void topaz_restore_default_regs(struct drm_psb_private *dev_priv,
++ uint32_t *data)
++{
++ int n;
++ int count;
++
++ count = sizeof(topaz_default_regs) / (sizeof(unsigned long) * 3);
++ for (n = 0; n < count; n++, ++data)
++ MM_WRITE32(topaz_default_regs[n][0],
++ topaz_default_regs[n][1],
++ *data);
++
++}
++
++#endif
++
++int pnw_topaz_restore_mtx_state(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ uint32_t reg_val, core;
++ uint32_t *mtx_reg_state;
++ int i;
++ struct pnw_topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ if (!topaz_priv->topaz_mtx_saved)
++ return -1;
++
++ if (topaz_priv->topaz_num_cores > MAX_TOPAZ_CORES)
++ {
++ DRM_ERROR("TOPAZ: Invalid core numbers: %d\n", topaz_priv->topaz_num_cores);
++ return -1;
++ }
++
++ for (core = 0; core < topaz_priv->topaz_num_cores; core++)
++ {
++ if (topaz_priv->topaz_mtx_data_mem[core] == NULL) {
++ PSB_DEBUG_GENERAL("TOPAZ: try to restore context without "
++ "space allocated, return directly without restore\n");
++ return -1;
++ }
++
++ /* turn on mtx clocks */
++ MTX_READ32(TOPAZ_CR_TOPAZ_MAN_CLK_GATE, &reg_val, core);
++ MTX_WRITE32(TOPAZ_CR_TOPAZ_MAN_CLK_GATE,
++ reg_val & (~MASK_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE), core);
++
++ /* reset mtx */
++ MTX_WRITE32(MTX_CORE_CR_MTX_SOFT_RESET_OFFSET,
++ MTX_CORE_CR_MTX_SOFT_RESET_MTX_RESET_MASK, core);
++ DRM_UDELAY(6000);
++
++ pnw_topaz_mmu_hwsetup(dev_priv, core);
++ /* upload code, restore mtx data */
++ mtx_dma_write(dev, core);
++
++ mtx_reg_state = topaz_priv->topaz_mtx_reg_state[core];
++ /* restore register */
++ /* Saves 8 Registers of D0 Bank */
++ /* DoRe0, D0Ar6, D0Ar4, D0Ar2, D0FrT, D0.5, D0.6 and D0.7 */
++ for (i = 0; i < 8; i++) {
++ topaz_write_core_reg(dev_priv, core, 0x1 | (i<<4),
++ *mtx_reg_state);
++ mtx_reg_state++;
++ }
++ /* Saves 8 Registers of D1 Bank */
++ /* D1Re0, D1Ar5, D1Ar3, D1Ar1, D1RtP, D1.5, D1.6 and D1.7 */
++ for (i = 0; i < 8; i++) {
++ topaz_write_core_reg(dev_priv, core, 0x2 | (i<<4),
++ *mtx_reg_state);
++ mtx_reg_state++;
++ }
++ /* Saves 4 Registers of A0 Bank */
++ /* A0StP, A0FrP, A0.2 and A0.3 */
++ for (i = 0; i < 4; i++) {
++ topaz_write_core_reg(dev_priv, core, 0x3 | (i<<4),
++ *mtx_reg_state);
++ mtx_reg_state++;
++ }
++ /* Saves 4 Registers of A1 Bank */
++ /* A1GbP, A1LbP, A1.2 and A1.3 */
++ for (i = 0; i < 4; i++) {
++ topaz_write_core_reg(dev_priv, core, 0x4 | (i<<4),
++ *mtx_reg_state);
++ mtx_reg_state++;
++ }
++ /* Saves PC and PCX */
++ for (i = 0; i < 2; i++) {
++ topaz_write_core_reg(dev_priv, core, 0x5 | (i<<4),
++ *mtx_reg_state);
++ mtx_reg_state++;
++ }
++ /* Saves 8 Control Registers */
++ /* TXSTAT, TXMASK, TXSTATI, TXMASKI, TXPOLL, TXGPIOI, TXPOLLI,
++ * TXGPIOO */
++ for (i = 0; i < 8; i++) {
++ topaz_write_core_reg(dev_priv, core, 0x7 | (i<<4),
++ *mtx_reg_state);
++ mtx_reg_state++;
++ }
++
++ /* turn on MTX */
++ MTX_WRITE32(MTX_CORE_CR_MTX_ENABLE_OFFSET,
++ MTX_CORE_CR_MTX_ENABLE_MTX_ENABLE_MASK, core);
++ }
++ topaz_priv->topaz_mtx_saved = 0;
++
++ return 0;
++}
++
++int pnw_topaz_save_mtx_state(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ uint32_t *mtx_reg_state;
++ int i;
++ uint32_t core;
++ struct pnw_topaz_codec_fw *cur_codec_fw;
++ struct pnw_topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ if (topaz_priv->topaz_num_cores > MAX_TOPAZ_CORES)
++ {
++ DRM_ERROR("TOPAZ: Invalid core numbers: %d\n", topaz_priv->topaz_num_cores);
++ return -1;
++ }
++
++ for ( core = 0; core < topaz_priv->topaz_num_cores; core++)
++ {
++ if (topaz_priv->topaz_mtx_data_mem[core] == NULL) {
++ PSB_DEBUG_GENERAL("TOPAZ: try to save context without space "
++ "allocated, return directly without save\n");
++ return -1;
++ }
++ if (topaz_priv->topaz_fw_loaded == 0) {
++ PSB_DEBUG_GENERAL("TOPAZ: try to save context without firmware "
++ "uploaded\n");
++ return -1;
++ }
++
++ pnw_topaz_wait_for_register(dev_priv,
++ REG_START_TOPAZ_MTX_HOST(core) + MTX_CORE_CR_MTX_TXRPT_OFFSET,
++ TXRPT_WAITONKICK_VALUE,
++ 0xffffffff);
++
++ /* stop mtx */
++ MTX_WRITE32(MTX_CORE_CR_MTX_ENABLE_OFFSET,
++ MTX_CORE_CR_MTX_ENABLE_MTX_TOFF_MASK, core);
++
++ mtx_reg_state = topaz_priv->topaz_mtx_reg_state[core];
++
++ /* Saves 8 Registers of D0 Bank */
++ /* DoRe0, D0Ar6, D0Ar4, D0Ar2, D0FrT, D0.5, D0.6 and D0.7 */
++ for (i = 0; i < 8; i++) {
++ topaz_read_core_reg(dev_priv, core, 0x1 | (i<<4),
++ mtx_reg_state);
++ mtx_reg_state++;
++ }
++ /* Saves 8 Registers of D1 Bank */
++ /* D1Re0, D1Ar5, D1Ar3, D1Ar1, D1RtP, D1.5, D1.6 and D1.7 */
++ for (i = 0; i < 8; i++) {
++ topaz_read_core_reg(dev_priv, core, 0x2 | (i<<4),
++ mtx_reg_state);
++ mtx_reg_state++;
++ }
++ /* Saves 4 Registers of A0 Bank */
++ /* A0StP, A0FrP, A0.2 and A0.3 */
++ for (i = 0; i < 4; i++) {
++ topaz_read_core_reg(dev_priv, core, 0x3 | (i<<4),
++ mtx_reg_state);
++ mtx_reg_state++;
++ }
++ /* Saves 4 Registers of A1 Bank */
++ /* A1GbP, A1LbP, A1.2 and A1.3 */
++ for (i = 0; i < 4; i++) {
++ topaz_read_core_reg(dev_priv, core, 0x4 | (i<<4),
++ mtx_reg_state);
++ mtx_reg_state++;
++ }
++ /* Saves PC and PCX */
++ for (i = 0; i < 2; i++) {
++ topaz_read_core_reg(dev_priv, core, 0x5 | (i<<4),
++ mtx_reg_state);
++ mtx_reg_state++;
++ }
++ /* Saves 8 Control Registers */
++ /* TXSTAT, TXMASK, TXSTATI, TXMASKI, TXPOLL, TXGPIOI, TXPOLLI,
++ * TXGPIOO */
++ for (i = 0; i < 8; i++) {
++ topaz_read_core_reg(dev_priv, core, 0x7 | (i<<4),
++ mtx_reg_state);
++ mtx_reg_state++;
++ }
++
++ /* save mtx data memory */
++ if (0 == core)
++ /*master core*/
++ cur_codec_fw = &topaz_priv->topaz_fw[topaz_priv->topaz_cur_codec*2];
++ else
++ cur_codec_fw = &topaz_priv->topaz_fw[topaz_priv->topaz_cur_codec*2+1];
++
++ mtx_dma_read(dev, core, cur_codec_fw->data_location +
++ PC_START_ADDRESS - MTX_DMA_MEMORY_BASE,
++ topaz_priv->cur_mtx_data_size[core]);
++
++ /* turn off mtx clocks */
++ MTX_WRITE32(TOPAZ_CR_TOPAZ_MAN_CLK_GATE,
++ MASK_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE, core);
++ }
++
++ topaz_priv->topaz_mtx_saved = 1;
++
++ return 0;
++}
++
++void mtx_dma_read(struct drm_device *dev, uint32_t core, uint32_t source_addr, uint32_t size)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ struct ttm_buffer_object *target;
++ struct pnw_topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ /* setup mtx DMAC registers to do transfer */
++ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA, source_addr, core);
++ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC,
++ F_ENCODE(4, MTX_BURSTSIZE) |
++ F_ENCODE(1, MTX_RNW) |
++ F_ENCODE(1, MTX_ENABLE) |
++ F_ENCODE(size, MTX_LENGTH), core);
++
++ /* give the DMAC access to the host memory via BIF */
++ /*TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1, 0);*/
++
++ target = topaz_priv->topaz_mtx_data_mem[core];
++ /* transfert the data */
++ topaz_dma_transfer(dev_priv, 0, target->offset, 0,
++ MTX_CR_MTX_SYSC_CDMAT,
++ size, 0, 1);
++
++ /* wait for it transfer */
++ pnw_topaz_wait_for_register(dev_priv, IMG_SOC_DMAC_IRQ_STAT(0) + DMAC_START,
++ F_ENCODE(1, IMG_SOC_TRANSFER_FIN),
++ F_ENCODE(1, IMG_SOC_TRANSFER_FIN));
++ /* clear interrupt */
++ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0);
++ /* give access back to topaz core */
++ /*TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 0, 0);*/
++}
++
++void pnw_dmac_transfer(struct drm_device *dev, uint32_t channel, uint32_t dst_addr,
++ uint32_t soc_addr, uint32_t bytes_num,
++ int increment, int rnw)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ uint32_t count_reg;
++ uint32_t irq_state;
++
++ /* check no transfer is in progress */
++ DMAC_READ32(IMG_SOC_DMAC_COUNT(channel), &count_reg);
++ if (0 != (count_reg & (MASK_IMG_SOC_EN | MASK_IMG_SOC_LIST_EN))) {
++ DRM_ERROR("TOPAZ: there's transfer in progress when wanna "
++ "save mtx data\n");
++ /* FIXME: how to handle this error */
++ return;
++ }
++
++ /* no hold off period */
++ DMAC_WRITE32(IMG_SOC_DMAC_PER_HOLD(channel), 0);
++ /* cleare irq state */
++ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(channel), 0);
++ DMAC_READ32(IMG_SOC_DMAC_IRQ_STAT(channel), &irq_state);
++ if (0 != irq_state) {
++ DRM_ERROR("TOPAZ: there's irq cann't clear\n");
++ return;
++ }
++
++ DMAC_WRITE32(IMG_SOC_DMAC_SETUP(channel), dst_addr);
++ count_reg = DMAC_VALUE_COUNT(DMAC_BSWAP_NO_SWAP,
++ DMAC_PWIDTH_32_BIT, rnw,
++ DMAC_PWIDTH_32_BIT, bytes_num);
++ /* generate an interrupt at end of transfer */
++ count_reg |= MASK_IMG_SOC_TRANSFER_IEN;
++ count_reg |= F_ENCODE(rnw, IMG_SOC_DIR);
++ DMAC_WRITE32(IMG_SOC_DMAC_COUNT(channel), count_reg);
++
++ DMAC_WRITE32(IMG_SOC_DMAC_PERIPH(channel),
++ DMAC_VALUE_PERIPH_PARAM(DMAC_ACC_DEL_0, increment,
++ DMAC_BURST_2));
++ DMAC_WRITE32(IMG_SOC_DMAC_PERIPHERAL_ADDR(channel), soc_addr);
++
++ /* Finally, rewrite the count register with the enable
++ * bit set to kick off the transfer */
++ DMAC_WRITE32(IMG_SOC_DMAC_COUNT(channel),
++ count_reg | MASK_IMG_SOC_EN);
++}
++
++void mtx_dma_write(struct drm_device *dev, uint32_t core)
++{
++ struct pnw_topaz_codec_fw *cur_codec_fw;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ struct pnw_topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ if (core == 0)/*for master core*/
++ cur_codec_fw = &topaz_priv->topaz_fw[topaz_priv->topaz_cur_codec * 2];
++ else
++ cur_codec_fw = &topaz_priv->topaz_fw[topaz_priv->topaz_cur_codec * 2 + 1];
++
++ /* upload code */
++ /* setup mtx DMAC registers to recieve transfer */
++ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA, PC_START_ADDRESS, core);
++ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC,
++ F_ENCODE(4, MTX_BURSTSIZE) |
++ F_ENCODE(0, MTX_RNW) |
++ F_ENCODE(1, MTX_ENABLE) |
++ F_ENCODE(cur_codec_fw->text_size / 4, MTX_LENGTH), core);
++
++ /* give DMAC access to host memory */
++ /*TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1, 0);*/
++
++ /* transfer code */
++ topaz_dma_transfer(dev_priv, 0, cur_codec_fw->text->offset, 0,
++ MTX_CR_MTX_SYSC_CDMAT, cur_codec_fw->text_size / 4,
++ 0, 0);
++ /* wait finished */
++ pnw_topaz_wait_for_register(dev_priv, IMG_SOC_DMAC_IRQ_STAT(0) + DMAC_START,
++ F_ENCODE(1, IMG_SOC_TRANSFER_FIN),
++ F_ENCODE(1, IMG_SOC_TRANSFER_FIN));
++ /* clear interrupt */
++ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0);
++
++ /* setup mtx start recieving data */
++ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA, PC_START_ADDRESS +
++ (cur_codec_fw->data_location) - MTX_DMA_MEMORY_BASE, core);
++
++ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC,
++ F_ENCODE(4, MTX_BURSTSIZE) |
++ F_ENCODE(0, MTX_RNW) |
++ F_ENCODE(1, MTX_ENABLE) |
++ F_ENCODE(topaz_priv->cur_mtx_data_size[core], MTX_LENGTH), core);
++
++ /*TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1);*/
++
++ /* transfer data */
++ topaz_dma_transfer(dev_priv, 0, topaz_priv->topaz_mtx_data_mem[core]->offset,
++ 0, MTX_CR_MTX_SYSC_CDMAT,
++ topaz_priv->cur_mtx_data_size[core],
++ 0, 0);
++ /* wait finished */
++ pnw_topaz_wait_for_register(dev_priv, IMG_SOC_DMAC_IRQ_STAT(0) + DMAC_START,
++ F_ENCODE(1, IMG_SOC_TRANSFER_FIN),
++ F_ENCODE(1, IMG_SOC_TRANSFER_FIN));
++ /* clear interrupt */
++ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0);
++
++ /* give access back to Topaz Core */
++ /*TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 0, 0);*/
++ return;
++}
++
++
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_bl.c
+@@ -0,0 +1,270 @@
++/*
++ * psb backlight using HAL
++ *
++ * Copyright (c) 2009, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors: Eric Knopp
++ *
++ */
++
++#include <linux/backlight.h>
++#include <linux/version.h>
++#include "psb_drv.h"
++#include "psb_intel_reg.h"
++#include "psb_intel_drv.h"
++#include "psb_intel_bios.h"
++#include "psb_powermgmt.h"
++
++#define MRST_BLC_MAX_PWM_REG_FREQ 0xFFFF
++#define BLC_PWM_PRECISION_FACTOR 100 /* 10000000 */
++#define BLC_PWM_FREQ_CALC_CONSTANT 32
++#define MHz 1000000
++#define BRIGHTNESS_MIN_LEVEL 1
++#define BRIGHTNESS_MAX_LEVEL 100
++#define BRIGHTNESS_MASK 0xFF
++#define BLC_POLARITY_NORMAL 0
++#define BLC_POLARITY_INVERSE 1
++#define BLC_ADJUSTMENT_MAX 100
++
++#define PSB_BLC_PWM_PRECISION_FACTOR 10
++#define PSB_BLC_MAX_PWM_REG_FREQ 0xFFFE
++#define PSB_BLC_MIN_PWM_REG_FREQ 0x2
++
++#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
++#define PSB_BACKLIGHT_PWM_CTL_SHIFT (16)
++
++static int psb_brightness;
++static struct backlight_device *psb_backlight_device;
++static u8 blc_brightnesscmd;
++u8 blc_pol;
++u8 blc_type;
++
++
++int psb_set_brightness(struct backlight_device *bd)
++{
++ u32 blc_pwm_ctl;
++ u32 max_pwm_blc;
++
++ struct drm_device *dev =
++ (struct drm_device *)bl_get_data(psb_backlight_device);
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++
++ int level = bd->props.brightness;
++
++ DRM_DEBUG("backlight level set to %d\n", level);
++
++ /* Perform value bounds checking */
++ if (level < BRIGHTNESS_MIN_LEVEL)
++ level = BRIGHTNESS_MIN_LEVEL;
++
++ if (IS_POULSBO(dev)) {
++ psb_intel_lvds_set_brightness(dev, level);
++ psb_brightness = level;
++ return 0;
++ }
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_ONLY_IF_ON)) {
++ /* Calculate and set the brightness value */
++ max_pwm_blc = REG_READ(BLC_PWM_CTL) >>
++ MRST_BACKLIGHT_MODULATION_FREQ_SHIFT;
++ blc_pwm_ctl = level * max_pwm_blc / BRIGHTNESS_MAX_LEVEL;
++
++ /* Adjust the backlight level with the percent in
++ * dev_priv->blc_adj1;
++ */
++ blc_pwm_ctl = blc_pwm_ctl * dev_priv->blc_adj1;
++ blc_pwm_ctl = blc_pwm_ctl / BLC_ADJUSTMENT_MAX;
++
++ /* Adjust the backlight level with the percent in
++ * dev_priv->blc_adj2;
++ */
++ blc_pwm_ctl = blc_pwm_ctl * dev_priv->blc_adj2;
++ blc_pwm_ctl = blc_pwm_ctl / BLC_ADJUSTMENT_MAX;
++
++
++ if (blc_pol == BLC_POLARITY_INVERSE)
++ blc_pwm_ctl = max_pwm_blc - blc_pwm_ctl;
++
++ /* force PWM bit on */
++ REG_WRITE(BLC_PWM_CTL2, (0x80000000 | REG_READ(BLC_PWM_CTL2)));
++ REG_WRITE(BLC_PWM_CTL,
++ (max_pwm_blc << MRST_BACKLIGHT_MODULATION_FREQ_SHIFT) |
++ blc_pwm_ctl);
++
++ /* printk("***backlight brightness = %i\n", level); */
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++
++ /* cache the brightness for later use */
++ psb_brightness = level;
++ return 0;
++}
++
++int psb_get_brightness(struct backlight_device *bd)
++{
++ /* return locally cached var instead of HW read (due to DPST etc.) */
++ return psb_brightness;
++}
++
++struct backlight_ops psb_ops = {
++ .get_brightness = psb_get_brightness,
++ .update_status = psb_set_brightness,
++};
++
++int psb_backlight_init(struct drm_device *dev)
++{
++#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
++ unsigned long CoreClock;
++ /* u32 bl_max_freq; */
++ /* unsigned long value; */
++ u16 bl_max_freq;
++ uint32_t value;
++ uint32_t clock;
++ uint32_t blc_pwm_precision_factor;
++
++ struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
++
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34))
++ psb_backlight_device = backlight_device_register("psb-bl",
++ NULL, (void *)dev, &psb_ops);
++#else
++ struct backlight_properties props;
++ memset(&props, 0, sizeof(struct backlight_properties));
++ props.max_brightness = BRIGHTNESS_MAX_LEVEL;
++
++ psb_backlight_device = backlight_device_register("psb-bl",
++ NULL, (void *)dev, &psb_ops, &props);
++#endif
++ if (IS_ERR(psb_backlight_device))
++ return PTR_ERR(psb_backlight_device);
++
++ if (IS_MRST(dev)) {
++ dev_priv->blc_adj1 = BLC_ADJUSTMENT_MAX;
++ dev_priv->blc_adj2 = BLC_ADJUSTMENT_MAX;
++
++ /* this needs to come from VBT when available */
++ bl_max_freq = 256;
++ /* this needs to be set elsewhere */
++ blc_pol = BLC_POLARITY_NORMAL;
++ blc_pwm_precision_factor = BLC_PWM_PRECISION_FACTOR;
++
++ if (dev_priv->sku_83)
++ CoreClock = 166;
++ else if (dev_priv->sku_100)
++ CoreClock = 200;
++ else if (dev_priv->sku_100L)
++ CoreClock = 100;
++ else
++ return 1;
++ } else {
++ /* get bl_max_freq and pol from dev_priv*/
++ if (!dev_priv->lvds_bl) {
++ DRM_ERROR("Has no valid LVDS backlight info\n");
++ return 1;
++ }
++ bl_max_freq = dev_priv->lvds_bl->freq;
++ blc_pol = dev_priv->lvds_bl->pol;
++ blc_pwm_precision_factor = PSB_BLC_PWM_PRECISION_FACTOR;
++ blc_brightnesscmd = dev_priv->lvds_bl->brightnesscmd;
++ blc_type = dev_priv->lvds_bl->type;
++
++ /*pci_write_config_dword(pci_root, 0xD4, 0x00C32004);*/
++ /*pci_write_config_dword(pci_root, 0xD0, 0xE0033000);*/
++
++ pci_write_config_dword(pci_root, 0xD0, 0xD0050300);
++ pci_read_config_dword(pci_root, 0xD4, &clock);
++
++ switch (clock & 0x07) {
++ case 0:
++ CoreClock = 100;
++ break;
++ case 1:
++ CoreClock = 133;
++ break;
++ case 2:
++ CoreClock = 150;
++ break;
++ case 3:
++ CoreClock = 178;
++ break;
++ case 4:
++ CoreClock = 200;
++ break;
++ case 5:
++ case 6:
++ case 7:
++ CoreClock = 266;
++ default:
++ return 1;
++ }
++ } /*end if(IS_MRST(dev))*/
++
++ value = (CoreClock * MHz) / BLC_PWM_FREQ_CALC_CONSTANT;
++ value *= blc_pwm_precision_factor;
++ value /= bl_max_freq;
++ value /= blc_pwm_precision_factor;
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_ONLY_IF_ON)) {
++ if (IS_MRST(dev)) {
++ if (value >
++ (unsigned long long)MRST_BLC_MAX_PWM_REG_FREQ)
++ return 2;
++ else {
++ REG_WRITE(BLC_PWM_CTL2,
++ (0x80000000 | REG_READ(BLC_PWM_CTL2)));
++ REG_WRITE(BLC_PWM_CTL, value |
++ (value <<
++ MRST_BACKLIGHT_MODULATION_FREQ_SHIFT));
++ }
++ } else {
++ if (
++ value > (unsigned long long)PSB_BLC_MAX_PWM_REG_FREQ ||
++ value < (unsigned long long)PSB_BLC_MIN_PWM_REG_FREQ)
++ return 2;
++ else {
++ value &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
++ REG_WRITE(BLC_PWM_CTL,
++ (value << PSB_BACKLIGHT_PWM_CTL_SHIFT) |
++ (value));
++ }
++ } /*end if(IS_MRST(dev))*/
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++
++ psb_backlight_device->props.brightness = BRIGHTNESS_MAX_LEVEL;
++ psb_backlight_device->props.max_brightness = BRIGHTNESS_MAX_LEVEL;
++ backlight_update_status(psb_backlight_device);
++#endif
++ return 0;
++}
++
++void psb_backlight_exit(void)
++{
++#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
++ psb_backlight_device->props.brightness = 0;
++ backlight_update_status(psb_backlight_device);
++ backlight_device_unregister(psb_backlight_device);
++#endif
++ return;
++}
++
++#include "psb_bl2.c"
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_bl2.c
+@@ -0,0 +1,165 @@
++/*
++ * psb backlight using HAL
++ *
++ * Copyright (c) 2009 Eric Knopp
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++static int mdfld_brightness;
++static int mdfld_blc_pol;
++static struct backlight_device *mdfld_backlight_device = 0;
++
++int mdfld_set_brightness(struct backlight_device *bd)
++{
++ struct drm_device *dev = 0;
++ DRM_DRIVER_PRIVATE_T *dev_priv = 0;
++#if 0 /* MDFLD_PO_JLIU7 */
++ u32 blc_pwm_ctl;
++ u32 max_pwm_blc;
++
++#endif /* MDFLD_PO_JLIU7 */
++
++ int level = bd->props.brightness;
++
++ PSB_DEBUG_ENTRY("backlight level = 0x%x. \n", level);
++
++ if (mdfld_backlight_device == 0)
++ return 0;
++
++ dev = (struct drm_device *)bl_get_data(mdfld_backlight_device);
++ dev_priv = dev->dev_private;
++
++ /* Perform value bounds checking */
++ if (level < BRIGHTNESS_MIN_LEVEL)
++ level = BRIGHTNESS_MIN_LEVEL;
++
++ /* cache the brightness for later use */
++ mdfld_brightness = level;
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON)) {
++#if 0 /* MDFLD_PO_JLIU7 */
++/* FIXME_MDFLD JLIU7 REG_READ2 */
++ /* Calculate and set the brightness value */
++ max_pwm_blc = REG_READ2(BLC_PWM_CTL) >>
++ MRST_BACKLIGHT_MODULATION_FREQ_SHIFT;
++ blc_pwm_ctl = level * max_pwm_blc / BRIGHTNESS_MAX_LEVEL;
++
++ if (mdfld_blc_pol == BLC_POLARITY_INVERSE)
++ blc_pwm_ctl = max_pwm_blc - blc_pwm_ctl;
++
++/* FIXME_MDFLD JLIU7 REG_READ2 */
++ /* force PWM bit on */
++ REG_WRITE2(BLC_PWM_CTL2, (0x80000000 | REG_READ2(BLC_PWM_CTL2)));
++ REG_WRITE2(BLC_PWM_CTL,
++ (max_pwm_blc << MRST_BACKLIGHT_MODULATION_FREQ_SHIFT) |
++ blc_pwm_ctl);
++
++ /* printk("***backlight brightness = %i\n", level); */
++#endif /* MDFLD_PO_JLIU7 */
++
++#if MDFLD_JLIU7_LABC
++ if ((dev_priv->dbi_panel_on) || (dev_priv->dpi_panel_on))
++ mdfld_dsi_brightness_control(dev, 0, level);
++ if ((dev_priv->dbi_panel_on2) || (dev_priv->dpi_panel_on2))
++ mdfld_dsi_brightness_control(dev, 2, level);
++#endif /* MDFLD_JLIU7_LABC */
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++ return 0;
++}
++
++int mdfld_get_brightness(struct backlight_device *bd)
++{
++ PSB_DEBUG_ENTRY("mdfld_brightness = 0x%x \n", mdfld_brightness);
++
++ /* return locally cached var instead of HW read (due to DPST etc.) */
++ return mdfld_brightness;
++}
++
++struct backlight_ops mdfld_ops = {
++ .get_brightness = mdfld_get_brightness,
++ .update_status = mdfld_set_brightness,
++};
++
++int mdfld_backlight_init(struct drm_device *dev)
++{
++/*FIXME JLIU7_MDFLD check for MDFLD */
++#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
++ u16 bl_max_freq;
++#if 0 /* MDFLD_PO_JLIU7 */
++ unsigned long CoreClock;
++ // u32 bl_max_freq;
++ // unsigned long value;
++ uint32_t value;
++
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++#endif /* MDFLD_PO_JLIU7 */
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34))
++ mdfld_backlight_device = backlight_device_register("psb-bl",
++ NULL, (void *)dev, &mdfld_ops);
++#else
++ struct backlight_properties props;
++ memset(&props, 0, sizeof(struct backlight_properties));
++ props.max_brightness = BRIGHTNESS_MAX_LEVEL;
++
++ mdfld_backlight_device = backlight_device_register("psb-bl",
++ NULL, (void *)dev, &mdfld_ops, &props);
++#endif
++ if (IS_ERR(mdfld_backlight_device))
++ return PTR_ERR(mdfld_backlight_device);
++
++ /* HACK HACK HACK */
++ bl_max_freq = 256; /* this needs to come from VBT when available */
++ mdfld_blc_pol = BLC_POLARITY_NORMAL; /* this needs to be set elsewhere */
++
++#if 0 /* MDFLD_PO_JLIU7 */
++ if (dev_priv->sku_83)
++ CoreClock = 166;
++ else if (dev_priv->sku_100)
++ CoreClock = 200;
++ else if (dev_priv->sku_100L)
++ CoreClock = 100;
++ else
++ return 1;
++
++ value = (CoreClock * MHz) / BLC_PWM_FREQ_CALC_CONSTANT;
++ value *= BLC_PWM_PRECISION_FACTOR;
++ value /= bl_max_freq;
++ value /= BLC_PWM_PRECISION_FACTOR;
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON)) {
++ if (value > (unsigned long long)MRST_BLC_MAX_PWM_REG_FREQ)
++ return 2;
++ else {
++ REG_WRITE2(BLC_PWM_CTL2, (0x80000000 | REG_READ2(BLC_PWM_CTL2)));
++ REG_WRITE2(BLC_PWM_CTL, value |
++ (value << MRST_BACKLIGHT_MODULATION_FREQ_SHIFT));
++ }
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++
++#endif /* MDFLD_PO_JLIU7 */
++ mdfld_backlight_device->props.brightness = BRIGHTNESS_MAX_LEVEL;
++ mdfld_backlight_device->props.max_brightness = BRIGHTNESS_MAX_LEVEL;
++ backlight_update_status(mdfld_backlight_device);
++
++ PSB_DEBUG_ENTRY("\n");
++#endif
++ return 0;
++}
++
++void mdfld_backlight_exit(void)
++{
++#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
++ PSB_DEBUG_ENTRY("\n");
++
++ mdfld_backlight_device->props.brightness = 0;
++ backlight_update_status(mdfld_backlight_device);
++ backlight_device_unregister(mdfld_backlight_device);
++#endif
++ return;
++}
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_buffer.c
+@@ -0,0 +1,379 @@
++/**************************************************************************
++ * Copyright (c) 2007, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
++ */
++#include "ttm/ttm_placement_common.h"
++#include "ttm/ttm_execbuf_util.h"
++#include "ttm/ttm_fence_api.h"
++#include <drm/drmP.h>
++#include "psb_drv.h"
++#include "psb_schedule.h"
++
++#define DRM_MEM_TTM 26
++
++struct drm_psb_ttm_backend {
++ struct ttm_backend base;
++ struct page **pages;
++ unsigned int desired_tile_stride;
++ unsigned int hw_tile_stride;
++ int mem_type;
++ unsigned long offset;
++ unsigned long num_pages;
++};
++
++/*
++ * MSVDX/TOPAZ GPU virtual space looks like this
++ * (We currently use only one MMU context).
++ * PSB_MEM_MMU_START: from 0x40000000, for generic buffers
++ * TTM_PL_CI: from 0xe0000000+half GTT space, for camear/video buffer sharing
++ * TTM_PL_RAR: from TTM_PL_CI, for RAR/video buffer sharing
++ * TTM_PL_TT: from TTM_PL_RAR, for buffers need to mapping into GTT
++ */
++static int psb_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
++ struct ttm_mem_type_manager *man)
++{
++
++ struct drm_psb_private *dev_priv =
++ container_of(bdev, struct drm_psb_private, bdev);
++ struct psb_gtt *pg = dev_priv->pg;
++
++ switch (type) {
++ case TTM_PL_SYSTEM:
++ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
++ man->available_caching = TTM_PL_FLAG_CACHED |
++ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
++ man->default_caching = TTM_PL_FLAG_CACHED;
++ break;
++ case DRM_PSB_MEM_MMU:
++ man->io_offset = 0x00000000;
++ man->io_size = 0x00000000;
++ man->io_addr = NULL;
++ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
++ TTM_MEMTYPE_FLAG_CMA;
++ man->gpu_offset = PSB_MEM_MMU_START;
++ man->available_caching = TTM_PL_FLAG_CACHED |
++ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
++ man->default_caching = TTM_PL_FLAG_WC;
++ break;
++ case TTM_PL_CI:
++ man->io_addr = NULL;
++ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
++ TTM_MEMTYPE_FLAG_FIXED |
++ TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
++ man->io_offset = dev_priv->ci_region_start;
++ man->io_size = pg->ci_stolen_size;
++ man->gpu_offset = pg->mmu_gatt_start;
++ man->available_caching = TTM_PL_FLAG_UNCACHED;
++ man->default_caching = TTM_PL_FLAG_UNCACHED;
++ break;
++ case TTM_PL_RAR: /* Unmappable RAR memory */
++ man->io_offset = dev_priv->rar_region_start;
++ man->io_size = pg->rar_stolen_size;
++ man->io_addr = NULL;
++ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
++ TTM_MEMTYPE_FLAG_FIXED |
++ TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
++ man->available_caching = TTM_PL_FLAG_UNCACHED;
++ man->default_caching = TTM_PL_FLAG_UNCACHED;
++ man->gpu_offset = pg->mmu_gatt_start;
++ break;
++ case TTM_PL_TT: /* Mappable GATT memory */
++ man->io_offset = pg->gatt_start;
++ man->io_size = pg->gatt_pages << PAGE_SHIFT;
++ man->io_addr = NULL;
++#ifdef PSB_WORKING_HOST_MMU_ACCESS
++ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
++ TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
++#else
++ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
++ TTM_MEMTYPE_FLAG_CMA;
++#endif
++ man->available_caching = TTM_PL_FLAG_CACHED |
++ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
++ man->default_caching = TTM_PL_FLAG_WC;
++ man->gpu_offset = pg->mmu_gatt_start;
++ break;
++ default:
++ DRM_ERROR("Unsupported memory type %u\n", (unsigned) type);
++ return -EINVAL;
++ }
++ return 0;
++}
++
++static uint32_t psb_evict_mask(struct ttm_buffer_object *bo)
++{
++ uint32_t cur_placement = bo->mem.flags & ~TTM_PL_MASK_MEM;
++
++ /* all buffers evicted to system memory */
++ return cur_placement | TTM_PL_FLAG_SYSTEM;
++}
++
++static int psb_invalidate_caches(struct ttm_bo_device *bdev,
++ uint32_t placement)
++{
++ return 0;
++}
++
++static int psb_move_blit(struct ttm_buffer_object *bo,
++ bool evict, bool no_wait,
++ struct ttm_mem_reg *new_mem)
++{
++ BUG();
++ return 0;
++}
++
++/*
++ * Flip destination ttm into GATT,
++ * then blit and subsequently move out again.
++ */
++
++static int psb_move_flip(struct ttm_buffer_object *bo,
++ bool evict, bool interruptible, bool no_wait,
++ struct ttm_mem_reg *new_mem)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ struct ttm_mem_reg tmp_mem;
++ int ret;
++
++ tmp_mem = *new_mem;
++ tmp_mem.mm_node = NULL;
++ tmp_mem.proposed_flags = TTM_PL_FLAG_TT;
++
++ ret = ttm_bo_mem_space(bo, &tmp_mem, interruptible, no_wait);
++ if (ret)
++ return ret;
++ ret = ttm_tt_bind(bo->ttm, &tmp_mem);
++ if (ret)
++ goto out_cleanup;
++ ret = psb_move_blit(bo, true, no_wait, &tmp_mem);
++ if (ret)
++ goto out_cleanup;
++
++ ret = ttm_bo_move_ttm(bo, evict, no_wait, new_mem);
++out_cleanup:
++ if (tmp_mem.mm_node) {
++ spin_lock(&bdev->lru_lock);
++ drm_mm_put_block(tmp_mem.mm_node);
++ tmp_mem.mm_node = NULL;
++ spin_unlock(&bdev->lru_lock);
++ }
++ return ret;
++}
++
++static int psb_move(struct ttm_buffer_object *bo,
++ bool evict, bool interruptible,
++ bool no_wait, struct ttm_mem_reg *new_mem)
++{
++ struct ttm_mem_reg *old_mem = &bo->mem;
++
++ if ((old_mem->mem_type == TTM_PL_RAR) ||
++ (new_mem->mem_type == TTM_PL_RAR)) {
++ ttm_bo_free_old_node(bo);
++ *old_mem = *new_mem;
++ } else if (old_mem->mem_type == TTM_PL_SYSTEM) {
++ return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
++ } else if (new_mem->mem_type == TTM_PL_SYSTEM) {
++ int ret = psb_move_flip(bo, evict, interruptible,
++ no_wait, new_mem);
++ if (unlikely(ret != 0)) {
++ if (ret == -ERESTART)
++ return ret;
++ else
++ return ttm_bo_move_memcpy(bo, evict, no_wait,
++ new_mem);
++ }
++ } else {
++ if (psb_move_blit(bo, evict, no_wait, new_mem))
++ return ttm_bo_move_memcpy(bo, evict, no_wait,
++ new_mem);
++ }
++ return 0;
++}
++
++static int drm_psb_tbe_populate(struct ttm_backend *backend,
++ unsigned long num_pages,
++ struct page **pages,
++ struct page *dummy_read_page)
++{
++ struct drm_psb_ttm_backend *psb_be =
++ container_of(backend, struct drm_psb_ttm_backend, base);
++
++ psb_be->pages = pages;
++ return 0;
++}
++
++static int drm_psb_tbe_unbind(struct ttm_backend *backend)
++{
++ struct ttm_bo_device *bdev = backend->bdev;
++ struct drm_psb_private *dev_priv =
++ container_of(bdev, struct drm_psb_private, bdev);
++ struct drm_psb_ttm_backend *psb_be =
++ container_of(backend, struct drm_psb_ttm_backend, base);
++ struct psb_mmu_pd *pd = psb_mmu_get_default_pd(dev_priv->mmu);
++ struct ttm_mem_type_manager *man = &bdev->man[psb_be->mem_type];
++
++ PSB_DEBUG_RENDER("MMU unbind.\n");
++
++ if (psb_be->mem_type == TTM_PL_TT) {
++ uint32_t gatt_p_offset =
++ (psb_be->offset - man->gpu_offset) >> PAGE_SHIFT;
++
++ (void) psb_gtt_remove_pages(dev_priv->pg, gatt_p_offset,
++ psb_be->num_pages,
++ psb_be->desired_tile_stride,
++ psb_be->hw_tile_stride);
++ }
++
++ psb_mmu_remove_pages(pd, psb_be->offset,
++ psb_be->num_pages,
++ psb_be->desired_tile_stride,
++ psb_be->hw_tile_stride);
++
++ return 0;
++}
++
++static int drm_psb_tbe_bind(struct ttm_backend *backend,
++ struct ttm_mem_reg *bo_mem)
++{
++ struct ttm_bo_device *bdev = backend->bdev;
++ struct drm_psb_private *dev_priv =
++ container_of(bdev, struct drm_psb_private, bdev);
++ struct drm_psb_ttm_backend *psb_be =
++ container_of(backend, struct drm_psb_ttm_backend, base);
++ struct psb_mmu_pd *pd = psb_mmu_get_default_pd(dev_priv->mmu);
++ struct ttm_mem_type_manager *man = &bdev->man[bo_mem->mem_type];
++ int type;
++ int ret = 0;
++
++ psb_be->mem_type = bo_mem->mem_type;
++ psb_be->num_pages = bo_mem->num_pages;
++ psb_be->desired_tile_stride = 0;
++ psb_be->hw_tile_stride = 0;
++ psb_be->offset = (bo_mem->mm_node->start << PAGE_SHIFT) +
++ man->gpu_offset;
++
++ type =
++ (bo_mem->
++ flags & TTM_PL_FLAG_CACHED) ? PSB_MMU_CACHED_MEMORY : 0;
++
++ PSB_DEBUG_RENDER("MMU bind.\n");
++ if (psb_be->mem_type == TTM_PL_TT) {
++ uint32_t gatt_p_offset =
++ (psb_be->offset - man->gpu_offset) >> PAGE_SHIFT;
++
++ ret = psb_gtt_insert_pages(dev_priv->pg, psb_be->pages,
++ gatt_p_offset,
++ psb_be->num_pages,
++ psb_be->desired_tile_stride,
++ psb_be->hw_tile_stride, type);
++ }
++
++ ret = psb_mmu_insert_pages(pd, psb_be->pages,
++ psb_be->offset, psb_be->num_pages,
++ psb_be->desired_tile_stride,
++ psb_be->hw_tile_stride, type);
++ if (ret)
++ goto out_err;
++
++ return 0;
++out_err:
++ drm_psb_tbe_unbind(backend);
++ return ret;
++
++}
++
++static void drm_psb_tbe_clear(struct ttm_backend *backend)
++{
++ struct drm_psb_ttm_backend *psb_be =
++ container_of(backend, struct drm_psb_ttm_backend, base);
++
++ psb_be->pages = NULL;
++ return;
++}
++
++static void drm_psb_tbe_destroy(struct ttm_backend *backend)
++{
++ struct drm_psb_ttm_backend *psb_be =
++ container_of(backend, struct drm_psb_ttm_backend, base);
++
++ if (backend)
++ kfree(psb_be);
++}
++
++static struct ttm_backend_func psb_ttm_backend = {
++ .populate = drm_psb_tbe_populate,
++ .clear = drm_psb_tbe_clear,
++ .bind = drm_psb_tbe_bind,
++ .unbind = drm_psb_tbe_unbind,
++ .destroy = drm_psb_tbe_destroy,
++};
++
++static struct ttm_backend *drm_psb_tbe_init(struct ttm_bo_device *bdev)
++{
++ struct drm_psb_ttm_backend *psb_be;
++
++ psb_be = kzalloc(sizeof(*psb_be), GFP_KERNEL);
++ if (!psb_be)
++ return NULL;
++ psb_be->pages = NULL;
++ psb_be->base.func = &psb_ttm_backend;
++ psb_be->base.bdev = bdev;
++ return &psb_be->base;
++}
++
++/*
++ * Use this memory type priority if no eviction is needed.
++ */
++static uint32_t psb_mem_prios[] = {
++ TTM_PL_CI,
++ TTM_PL_RAR,
++ TTM_PL_TT,
++ DRM_PSB_MEM_MMU,
++ TTM_PL_SYSTEM
++};
++
++/*
++ * Use this memory type priority if need to evict.
++ */
++static uint32_t psb_busy_prios[] = {
++ TTM_PL_TT,
++ TTM_PL_CI,
++ TTM_PL_RAR,
++ DRM_PSB_MEM_MMU,
++ TTM_PL_SYSTEM
++};
++
++
++struct ttm_bo_driver psb_ttm_bo_driver = {
++ .mem_type_prio = psb_mem_prios,
++ .mem_busy_prio = psb_busy_prios,
++ .num_mem_type_prio = ARRAY_SIZE(psb_mem_prios),
++ .num_mem_busy_prio = ARRAY_SIZE(psb_busy_prios),
++ .create_ttm_backend_entry = &drm_psb_tbe_init,
++ .invalidate_caches = &psb_invalidate_caches,
++ .init_mem_type = &psb_init_mem_type,
++ .evict_flags = &psb_evict_mask,
++ .move = &psb_move,
++ .verify_access = &psb_verify_access,
++ .sync_obj_signaled = &ttm_fence_sync_obj_signaled,
++ .sync_obj_wait = &ttm_fence_sync_obj_wait,
++ .sync_obj_flush = &ttm_fence_sync_obj_flush,
++ .sync_obj_unref = &ttm_fence_sync_obj_unref,
++ .sync_obj_ref = &ttm_fence_sync_obj_ref
++};
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_dpst.c
+@@ -0,0 +1,254 @@
++/*
++ * Copyright © 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * James C. Gualario <james.c.gualario@intel.com>
++ *
++ */
++
++#include "psb_umevents.h"
++#include "psb_dpst.h"
++/**
++ * inform the kernel of the work to be performed and related function.
++ *
++ */
++DECLARE_WORK(dpst_dev_change_work, &psb_dpst_dev_change_wq);
++/**
++ * psb_dpst_notify_change_um - notify user mode of hotplug changes
++ *
++ * @name: name of event to notify user mode of change to
++ * @state: dpst state struct to get workqueue from
++ *
++ */
++int psb_dpst_notify_change_um(enum dpst_event_enum event,
++ struct dpst_state *state)
++{
++ if (state == NULL)
++ return IRQ_HANDLED;
++
++ state->dpst_change_wq_data.dev_name_arry_rw_status
++ [state->dpst_change_wq_data.dev_name_write] =
++ DRM_DPST_READY_TO_READ;
++ state->dpst_change_wq_data.dpst_events
++ [state->dpst_change_wq_data.dev_name_write] =
++ event;
++ if (state->dpst_change_wq_data.dev_name_read_write_wrap_ack == 1)
++ state->dpst_change_wq_data.dev_name_read_write_wrap_ack = 0;
++ state->dpst_change_wq_data.dev_name_write++;
++ if (state->dpst_change_wq_data.dev_name_write ==
++ state->dpst_change_wq_data.dev_name_read) {
++ state->dpst_change_wq_data.dev_name_write--;
++ return IRQ_NONE;
++ }
++ if (state->dpst_change_wq_data.dev_name_write >
++ DRM_DPST_RING_DEPTH_MAX) {
++ state->dpst_change_wq_data.dev_name_write = 0;
++ state->dpst_change_wq_data.dev_name_write_wrap = 1;
++ }
++ state->dpst_change_wq_data.hotplug_dev_list = state->list;
++ queue_work(state->dpst_wq, &(state->dpst_change_wq_data.work));
++ return IRQ_HANDLED;
++}
++/*EXPORT_SYMBOL(psb_dpst_notify_change_um); */
++/**
++ *
++ * psb_dpst_create_and_notify_um - create and notify user mode of new dev
++ *
++ * @name: name to give for new event / device
++ * @state: dpst state instaces to associate event with
++ *
++ */
++struct umevent_obj *psb_dpst_create_and_notify_um(const char *name,
++ struct dpst_state *state)
++{
++ return psb_create_umevent_obj(name, state->list);
++
++}
++/*EXPORT_SYMBOL(psb_dpst_create_and_notify_um); */
++/**
++ * psb_dpst_device_pool_create_and_init - make new hotplug device pool
++ *
++ * @parent_kobj - parent kobject to associate dpst kset with
++ * @state - dpst state instance to associate list with
++ *
++ */
++struct umevent_list *psb_dpst_device_pool_create_and_init(
++ struct kobject *parent_kobj,
++ struct dpst_state *state)
++{
++ struct umevent_list *new_hotplug_dev_list = NULL;
++ new_hotplug_dev_list = psb_umevent_create_list();
++ if (new_hotplug_dev_list)
++ psb_umevent_init(parent_kobj, new_hotplug_dev_list,
++ "psb_dpst");
++
++ state->dpst_wq = create_singlethread_workqueue("dpst-wq");
++
++ if (!state->dpst_wq)
++ return NULL;
++
++ INIT_WORK(&state->dpst_change_wq_data.work, psb_dpst_dev_change_wq);
++
++ state->dpst_change_wq_data.dev_name_read = 0;
++ state->dpst_change_wq_data.dev_name_write = 0;
++ state->dpst_change_wq_data.dev_name_write_wrap = 0;
++ state->dpst_change_wq_data.dev_name_read_write_wrap_ack = 0;
++
++ memset(&(state->dpst_change_wq_data.dev_name_arry_rw_status[0]),
++ 0, sizeof(int)*DRM_DPST_RING_DEPTH);
++
++ return new_hotplug_dev_list;
++}
++/*EXPORT_SYMBOL(psb_dpst_device_pool_create_and_init); */
++/**
++ * psb_dpst_init - init dpst subsystem
++ * @parent_kobj - parent kobject to associate dpst state with
++ *
++ */
++struct dpst_state *psb_dpst_init(struct kobject *parent_kobj)
++{
++ struct dpst_state *state;
++ struct umevent_obj *working_umevent;
++
++ state = kzalloc(sizeof(struct dpst_state), GFP_KERNEL);
++ printk(KERN_ALERT "after kzalloc\n");
++ state->list = NULL;
++ state->list = psb_dpst_device_pool_create_and_init(
++ parent_kobj,
++ state);
++ working_umevent =
++ psb_dpst_create_and_notify_um("init",
++ state);
++ state->dpst_change_wq_data.dev_umevent_arry
++ [DPST_EVENT_INIT_COMPLETE] = &(working_umevent->head);
++ working_umevent =
++ psb_dpst_create_and_notify_um("hist_int",
++ state);
++ state->dpst_change_wq_data.dev_umevent_arry
++ [DPST_EVENT_HIST_INTERRUPT] = &(working_umevent->head);
++ working_umevent =
++ psb_dpst_create_and_notify_um("term",
++ state);
++ state->dpst_change_wq_data.dev_umevent_arry
++ [DPST_EVENT_TERMINATE] = &(working_umevent->head);
++ working_umevent =
++ psb_dpst_create_and_notify_um("phase_done",
++ state);
++ state->dpst_change_wq_data.dev_umevent_arry
++ [DPST_EVENT_PHASE_COMPLETE] = &(working_umevent->head);
++
++ return state;
++}
++/*EXPORT_SYMBOL(psb_dpst_init); */
++/**
++ * psb_dpst_device_pool_destroy - destroy all dpst related resources
++ *
++ * @state: dpst state instance to destroy
++ *
++ */
++void psb_dpst_device_pool_destroy(struct dpst_state *state)
++{
++ int i;
++ struct umevent_list *list;
++ struct umevent_obj *umevent_test;
++ list = state->list;
++ flush_workqueue(state->dpst_wq);
++ destroy_workqueue(state->dpst_wq);
++ for (i = 0; i < DRM_DPST_MAX_NUM_EVENTS; i++) {
++ umevent_test = list_entry(
++ (state->dpst_change_wq_data.dev_umevent_arry[i]),
++ struct umevent_obj, head);
++ state->dpst_change_wq_data.dev_umevent_arry[i] = NULL;
++ }
++ psb_umevent_cleanup(list);
++ kfree(state);
++}
++/*EXPORT_SYMBOL(psb_dpst_device_pool_destroy); */
++/**
++ * psb_dpst_dev_change_wq - change workqueue implementation
++ *
++ * @work: work struct to use for kernel scheduling
++ *
++ */
++void psb_dpst_dev_change_wq(struct work_struct *work)
++{
++ struct dpst_disp_workqueue_data *wq_data;
++ int curr_event_index;
++ wq_data = to_dpst_disp_workqueue_data(work);
++ if (wq_data->dev_name_write_wrap == 1) {
++ wq_data->dev_name_read_write_wrap_ack = 1;
++ wq_data->dev_name_write_wrap = 0;
++ while (wq_data->dev_name_read != DRM_DPST_RING_DEPTH_MAX) {
++ if (wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] ==
++ DRM_DPST_READY_TO_READ) {
++ wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] =
++ DRM_DPST_READ_COMPLETE;
++ curr_event_index = wq_data->dpst_events
++ [wq_data->dev_name_read];
++ psb_umevent_notify_change_gfxsock
++ (list_entry(
++ (wq_data->dev_umevent_arry
++ [curr_event_index]),
++ struct umevent_obj, head),
++ DRM_DPST_SOCKET_GROUP_ID);
++ }
++ wq_data->dev_name_read++;
++ }
++ wq_data->dev_name_read = 0;
++ while (wq_data->dev_name_read < wq_data->dev_name_write-1) {
++ if (wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] ==
++ DRM_DPST_READY_TO_READ) {
++ wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] =
++ DRM_DPST_READ_COMPLETE;
++ curr_event_index = wq_data->dpst_events
++ [wq_data->dev_name_read];
++ psb_umevent_notify_change_gfxsock
++ (list_entry(
++ (wq_data->dev_umevent_arry
++ [curr_event_index]),
++ struct umevent_obj, head),
++ DRM_DPST_SOCKET_GROUP_ID);
++ }
++ wq_data->dev_name_read++;
++ }
++ } else {
++ while (wq_data->dev_name_read < wq_data->dev_name_write) {
++ if (wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] ==
++ DRM_DPST_READY_TO_READ) {
++ wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] =
++ DRM_DPST_READ_COMPLETE;
++ curr_event_index = wq_data->dpst_events
++ [wq_data->dev_name_read];
++ psb_umevent_notify_change_gfxsock
++ (list_entry(
++ (wq_data->dev_umevent_arry
++ [curr_event_index]),
++ struct umevent_obj, head),
++ DRM_DPST_SOCKET_GROUP_ID);
++ }
++ wq_data->dev_name_read++;
++ }
++ }
++ if (wq_data->dev_name_read > DRM_DPST_RING_DEPTH_MAX)
++ wq_data->dev_name_read = 0;
++}
++/*EXPORT_SYMBOL(psb_dpst_dev_change_wq); */
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_dpst.h
+@@ -0,0 +1,98 @@
++/*
++ * Copyright © 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * James C. Gualario <james.c.gualario@intel.com>
++ *
++ */
++
++#ifndef _PSB_DPST_H_
++#define _PSB_DPST_H_
++/**
++ * required includes
++ *
++ */
++#include "psb_umevents.h"
++/**
++ * dpst event enumeration
++ *
++ */
++enum dpst_event_enum {
++ DPST_EVENT_INIT_COMPLETE,
++ DPST_EVENT_HIST_INTERRUPT,
++ DPST_EVENT_TERMINATE,
++ DPST_EVENT_PHASE_COMPLETE,
++ DPST_MAX_EVENT
++};
++/**
++ * dpst specific defines
++ *
++ */
++#define DRM_DPST_RING_DEPTH 256
++#define DRM_DPST_RING_DEPTH_MAX (DRM_DPST_RING_DEPTH-1)
++#define DRM_DPST_READY_TO_READ 1
++#define DRM_DPST_READ_COMPLETE 2
++#define DRM_DPST_MAX_NUM_EVENTS (DPST_MAX_EVENT)
++/**
++ * dpst workqueue data struct.
++ */
++struct dpst_disp_workqueue_data {
++ struct work_struct work;
++ const char *dev_name;
++ int dev_name_write;
++ int dev_name_read;
++ int dev_name_write_wrap;
++ int dev_name_read_write_wrap_ack;
++ enum dpst_event_enum dpst_events[DRM_DPST_RING_DEPTH];
++ int dev_name_arry_rw_status[DRM_DPST_RING_DEPTH];
++ struct umevent_list *hotplug_dev_list;
++ struct list_head *dev_umevent_arry[DRM_DPST_MAX_NUM_EVENTS];
++};
++/**
++ * dpst state structure
++ *
++ */
++struct dpst_state {
++ struct workqueue_struct *dpst_wq;
++ struct dpst_disp_workqueue_data dpst_change_wq_data;
++ struct umevent_list *list;
++};
++/**
++ * main interface function prototytpes for dpst support.
++ *
++ */
++extern struct dpst_state *psb_dpst_init(struct kobject *parent_kobj);
++extern int psb_dpst_notify_change_um(enum dpst_event_enum event,
++ struct dpst_state *state);
++extern struct umevent_obj *psb_dpst_create_and_notify_um(const char *name,
++ struct dpst_state *state);
++extern struct umevent_list *psb_dpst_device_pool_create_and_init(
++ struct kobject *parent_kobj,
++ struct dpst_state *state);
++extern void psb_dpst_device_pool_destroy(struct dpst_state *state);
++/**
++ * to go back and forth between work struct and workqueue data
++ *
++ */
++#define to_dpst_disp_workqueue_data(x) \
++ container_of(x, struct dpst_disp_workqueue_data, work)
++
++/**
++ * function prototypes for workqueue implementation
++ *
++ */
++extern void psb_dpst_dev_change_wq(struct work_struct *work);
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_drm.h
+@@ -0,0 +1,661 @@
++/**************************************************************************
++ * Copyright (c) 2007, Intel Corporation.
++ * All Rights Reserved.
++ * Copyright (c) 2008, Tungsten Graphics Inc. Cedar Park, TX., USA.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++#ifndef _PSB_DRM_H_
++#define _PSB_DRM_H_
++
++#if defined(__linux__) && !defined(__KERNEL__)
++#include<stdint.h>
++#include <linux/types.h>
++#include "drm_mode.h"
++#endif
++
++#include "ttm/ttm_fence_user.h"
++#include "ttm/ttm_placement_user.h"
++
++/*
++ * Menlow/MRST graphics driver package version
++ * a.b.c.xxxx
++ * a - Product Family: 5 - Linux
++ * b - Major Release Version: 0 - non-Gallium (Unbuntu);
++ * 1 - Gallium (Moblin2)
++ * c - Hotfix Release
++ * xxxx - Graphics internal build #
++ */
++#define PSB_PACKAGE_VERSION "5.3.0.32L.0014"
++
++#define DRM_PSB_SAREA_MAJOR 0
++#define DRM_PSB_SAREA_MINOR 2
++#define PSB_FIXED_SHIFT 16
++
++
++#define PSB_NUM_PIPE 4
++
++/*
++ * Public memory types.
++ */
++
++#define DRM_PSB_MEM_MMU TTM_PL_PRIV1
++#define DRM_PSB_FLAG_MEM_MMU TTM_PL_FLAG_PRIV1
++
++typedef int32_t psb_fixed;
++typedef uint32_t psb_ufixed;
++
++static inline int32_t psb_int_to_fixed(int a)
++{
++ return a * (1 << PSB_FIXED_SHIFT);
++}
++
++static inline uint32_t psb_unsigned_to_ufixed(unsigned int a)
++{
++ return a << PSB_FIXED_SHIFT;
++}
++
++/*Status of the command sent to the gfx device.*/
++typedef enum {
++ DRM_CMD_SUCCESS,
++ DRM_CMD_FAILED,
++ DRM_CMD_HANG
++} drm_cmd_status_t;
++
++struct drm_psb_scanout {
++ uint32_t buffer_id; /* DRM buffer object ID */
++ uint32_t rotation; /* Rotation as in RR_rotation definitions */
++ uint32_t stride; /* Buffer stride in bytes */
++ uint32_t depth; /* Buffer depth in bits (NOT) bpp */
++ uint32_t width; /* Buffer width in pixels */
++ uint32_t height; /* Buffer height in lines */
++ int32_t transform[3][3]; /* Buffer composite transform */
++ /* (scaling, rot, reflect) */
++};
++
++#define DRM_PSB_SAREA_OWNERS 16
++#define DRM_PSB_SAREA_OWNER_2D 0
++#define DRM_PSB_SAREA_OWNER_3D 1
++
++#define DRM_PSB_SAREA_SCANOUTS 3
++
++struct drm_psb_sarea {
++ /* Track changes of this data structure */
++
++ uint32_t major;
++ uint32_t minor;
++
++ /* Last context to touch part of hw */
++ uint32_t ctx_owners[DRM_PSB_SAREA_OWNERS];
++
++ /* Definition of front- and rotated buffers */
++ uint32_t num_scanouts;
++ struct drm_psb_scanout scanouts[DRM_PSB_SAREA_SCANOUTS];
++
++ int planeA_x;
++ int planeA_y;
++ int planeA_w;
++ int planeA_h;
++ int planeB_x;
++ int planeB_y;
++ int planeB_w;
++ int planeB_h;
++ /* Number of active scanouts */
++ uint32_t num_active_scanouts;
++};
++
++#define PSB_RELOC_MAGIC 0x67676767
++#define PSB_RELOC_SHIFT_MASK 0x0000FFFF
++#define PSB_RELOC_SHIFT_SHIFT 0
++#define PSB_RELOC_ALSHIFT_MASK 0xFFFF0000
++#define PSB_RELOC_ALSHIFT_SHIFT 16
++
++#define PSB_RELOC_OP_OFFSET 0 /* Offset of the indicated
++ * buffer
++ */
++
++struct drm_psb_reloc {
++ uint32_t reloc_op;
++ uint32_t where; /* offset in destination buffer */
++ uint32_t buffer; /* Buffer reloc applies to */
++ uint32_t mask; /* Destination format: */
++ uint32_t shift; /* Destination format: */
++ uint32_t pre_add; /* Destination format: */
++ uint32_t background; /* Destination add */
++ uint32_t dst_buffer; /* Destination buffer. Index into buffer_list */
++ uint32_t arg0; /* Reloc-op dependant */
++ uint32_t arg1;
++};
++
++
++#define PSB_GPU_ACCESS_READ (1ULL << 32)
++#define PSB_GPU_ACCESS_WRITE (1ULL << 33)
++#define PSB_GPU_ACCESS_MASK (PSB_GPU_ACCESS_READ | PSB_GPU_ACCESS_WRITE)
++
++#define PSB_BO_FLAG_COMMAND (1ULL << 52)
++
++#define PSB_ENGINE_2D 0
++#define PSB_ENGINE_VIDEO 1
++#define LNC_ENGINE_ENCODE 5
++
++/*
++ * For this fence class we have a couple of
++ * fence types.
++ */
++
++#define _PSB_FENCE_EXE_SHIFT 0
++#define _PSB_FENCE_FEEDBACK_SHIFT 4
++
++#define _PSB_FENCE_TYPE_EXE (1 << _PSB_FENCE_EXE_SHIFT)
++#define _PSB_FENCE_TYPE_FEEDBACK (1 << _PSB_FENCE_FEEDBACK_SHIFT)
++
++#define PSB_NUM_ENGINES 6
++
++
++#define PSB_FEEDBACK_OP_VISTEST (1 << 0)
++
++struct drm_psb_extension_rep {
++ int32_t exists;
++ uint32_t driver_ioctl_offset;
++ uint32_t sarea_offset;
++ uint32_t major;
++ uint32_t minor;
++ uint32_t pl;
++};
++
++#define DRM_PSB_EXT_NAME_LEN 128
++
++union drm_psb_extension_arg {
++ char extension[DRM_PSB_EXT_NAME_LEN];
++ struct drm_psb_extension_rep rep;
++};
++
++struct psb_validate_req {
++ uint64_t set_flags;
++ uint64_t clear_flags;
++ uint64_t next;
++ uint64_t presumed_gpu_offset;
++ uint32_t buffer_handle;
++ uint32_t presumed_flags;
++ uint32_t group;
++ uint32_t pad64;
++};
++
++struct psb_validate_rep {
++ uint64_t gpu_offset;
++ uint32_t placement;
++ uint32_t fence_type_mask;
++};
++
++#define PSB_USE_PRESUMED (1 << 0)
++
++struct psb_validate_arg {
++ int handled;
++ int ret;
++ union {
++ struct psb_validate_req req;
++ struct psb_validate_rep rep;
++ } d;
++};
++
++
++#define DRM_PSB_FENCE_NO_USER (1 << 0)
++
++struct psb_ttm_fence_rep {
++ uint32_t handle;
++ uint32_t fence_class;
++ uint32_t fence_type;
++ uint32_t signaled_types;
++ uint32_t error;
++};
++
++typedef struct drm_psb_cmdbuf_arg {
++ uint64_t buffer_list; /* List of buffers to validate */
++ uint64_t clip_rects; /* See i915 counterpart */
++ uint64_t scene_arg;
++ uint64_t fence_arg;
++
++ uint32_t ta_flags;
++
++ uint32_t ta_handle; /* TA reg-value pairs */
++ uint32_t ta_offset;
++ uint32_t ta_size;
++
++ uint32_t oom_handle;
++ uint32_t oom_offset;
++ uint32_t oom_size;
++
++ uint32_t cmdbuf_handle; /* 2D Command buffer object or, */
++ uint32_t cmdbuf_offset; /* rasterizer reg-value pairs */
++ uint32_t cmdbuf_size;
++
++ uint32_t reloc_handle; /* Reloc buffer object */
++ uint32_t reloc_offset;
++ uint32_t num_relocs;
++
++ int32_t damage; /* Damage front buffer with cliprects */
++ /* Not implemented yet */
++ uint32_t fence_flags;
++ uint32_t engine;
++
++ /*
++ * Feedback;
++ */
++
++ uint32_t feedback_ops;
++ uint32_t feedback_handle;
++ uint32_t feedback_offset;
++ uint32_t feedback_breakpoints;
++ uint32_t feedback_size;
++} drm_psb_cmdbuf_arg_t;
++
++typedef struct drm_psb_pageflip_arg {
++ uint32_t flip_offset;
++ uint32_t stride;
++} drm_psb_pageflip_arg_t;
++
++typedef enum {
++ LNC_VIDEO_DEVICE_INFO,
++ LNC_VIDEO_GETPARAM_RAR_INFO,
++ LNC_VIDEO_GETPARAM_CI_INFO,
++ LNC_VIDEO_GETPARAM_RAR_HANDLER_OFFSET,
++ LNC_VIDEO_FRAME_SKIP,
++ LNC_VIDEO_CORE_NUM
++} lnc_getparam_key_t;
++
++struct drm_lnc_video_getparam_arg {
++ lnc_getparam_key_t key;
++ uint64_t arg; /* argument pointer */
++ uint64_t value; /* feed back pointer */
++};
++
++
++/*
++ * Feedback components:
++ */
++
++/*
++ * Vistest component. The number of these in the feedback buffer
++ * equals the number of vistest breakpoints + 1.
++ * This is currently the only feedback component.
++ */
++
++struct drm_psb_vistest {
++ uint32_t vt[8];
++};
++
++struct drm_psb_sizes_arg {
++ uint32_t ta_mem_size;
++ uint32_t mmu_size;
++ uint32_t pds_size;
++ uint32_t rastgeom_size;
++ uint32_t tt_size;
++ uint32_t vram_size;
++};
++
++struct drm_psb_hist_status_arg {
++ uint32_t buf[32];
++};
++
++struct drm_psb_dpst_lut_arg {
++ uint8_t lut[256];
++ int output_id;
++};
++
++struct mrst_timing_info {
++ uint16_t pixel_clock;
++ uint8_t hactive_lo;
++ uint8_t hblank_lo;
++ uint8_t hblank_hi:4;
++ uint8_t hactive_hi:4;
++ uint8_t vactive_lo;
++ uint8_t vblank_lo;
++ uint8_t vblank_hi:4;
++ uint8_t vactive_hi:4;
++ uint8_t hsync_offset_lo;
++ uint8_t hsync_pulse_width_lo;
++ uint8_t vsync_pulse_width_lo:4;
++ uint8_t vsync_offset_lo:4;
++ uint8_t vsync_pulse_width_hi:2;
++ uint8_t vsync_offset_hi:2;
++ uint8_t hsync_pulse_width_hi:2;
++ uint8_t hsync_offset_hi:2;
++ uint8_t width_mm_lo;
++ uint8_t height_mm_lo;
++ uint8_t height_mm_hi:4;
++ uint8_t width_mm_hi:4;
++ uint8_t hborder;
++ uint8_t vborder;
++ uint8_t unknown0:1;
++ uint8_t hsync_positive:1;
++ uint8_t vsync_positive:1;
++ uint8_t separate_sync:2;
++ uint8_t stereo:1;
++ uint8_t unknown6:1;
++ uint8_t interlaced:1;
++} __attribute__((packed));
++
++struct mrst_panel_descriptor_v1{
++ uint32_t Panel_Port_Control; /* 1 dword, Register 0x61180 if LVDS */
++ /* 0x61190 if MIPI */
++ uint32_t Panel_Power_On_Sequencing;/*1 dword,Register 0x61208,*/
++ uint32_t Panel_Power_Off_Sequencing;/*1 dword,Register 0x6120C,*/
++ uint32_t Panel_Power_Cycle_Delay_and_Reference_Divisor;/* 1 dword */
++ /* Register 0x61210 */
++ struct mrst_timing_info DTD;/*18 bytes, Standard definition */
++ uint16_t Panel_Backlight_Inverter_Descriptor;/* 16 bits, as follows */
++ /* Bit 0, Frequency, 15 bits,0 - 32767Hz */
++ /* Bit 15, Polarity, 1 bit, 0: Normal, 1: Inverted */
++ uint16_t Panel_MIPI_Display_Descriptor;
++ /*16 bits, Defined as follows: */
++ /* if MIPI, 0x0000 if LVDS */
++ /* Bit 0, Type, 2 bits, */
++ /* 0: Type-1, */
++ /* 1: Type-2, */
++ /* 2: Type-3, */
++ /* 3: Type-4 */
++ /* Bit 2, Pixel Format, 4 bits */
++ /* Bit0: 16bpp (not supported in LNC), */
++ /* Bit1: 18bpp loosely packed, */
++ /* Bit2: 18bpp packed, */
++ /* Bit3: 24bpp */
++ /* Bit 6, Reserved, 2 bits, 00b */
++ /* Bit 8, Minimum Supported Frame Rate, 6 bits, 0 - 63Hz */
++ /* Bit 14, Reserved, 2 bits, 00b */
++} __attribute__ ((packed));
++
++struct mrst_panel_descriptor_v2{
++ uint32_t Panel_Port_Control; /* 1 dword, Register 0x61180 if LVDS */
++ /* 0x61190 if MIPI */
++ uint32_t Panel_Power_On_Sequencing;/*1 dword,Register 0x61208,*/
++ uint32_t Panel_Power_Off_Sequencing;/*1 dword,Register 0x6120C,*/
++ uint8_t Panel_Power_Cycle_Delay_and_Reference_Divisor;/* 1 byte */
++ /* Register 0x61210 */
++ struct mrst_timing_info DTD;/*18 bytes, Standard definition */
++ uint16_t Panel_Backlight_Inverter_Descriptor;/*16 bits, as follows*/
++ /*Bit 0, Frequency, 16 bits, 0 - 32767Hz*/
++ uint8_t Panel_Initial_Brightness;/* [7:0] 0 - 100% */
++ /*Bit 7, Polarity, 1 bit,0: Normal, 1: Inverted*/
++ uint16_t Panel_MIPI_Display_Descriptor;
++ /*16 bits, Defined as follows: */
++ /* if MIPI, 0x0000 if LVDS */
++ /* Bit 0, Type, 2 bits, */
++ /* 0: Type-1, */
++ /* 1: Type-2, */
++ /* 2: Type-3, */
++ /* 3: Type-4 */
++ /* Bit 2, Pixel Format, 4 bits */
++ /* Bit0: 16bpp (not supported in LNC), */
++ /* Bit1: 18bpp loosely packed, */
++ /* Bit2: 18bpp packed, */
++ /* Bit3: 24bpp */
++ /* Bit 6, Reserved, 2 bits, 00b */
++ /* Bit 8, Minimum Supported Frame Rate, 6 bits, 0 - 63Hz */
++ /* Bit 14, Reserved, 2 bits, 00b */
++} __attribute__ ((packed));
++
++union mrst_panel_rx{
++ struct{
++ uint16_t NumberOfLanes:2; /*Num of Lanes, 2 bits,0 = 1 lane,*/
++ /* 1 = 2 lanes, 2 = 3 lanes, 3 = 4 lanes. */
++ uint16_t MaxLaneFreq:3; /* 0: 100MHz, 1: 200MHz, 2: 300MHz, */
++ /*3: 400MHz, 4: 500MHz, 5: 600MHz, 6: 700MHz, 7: 800MHz.*/
++ uint16_t SupportedVideoTransferMode:2; /*0: Non-burst only */
++ /* 1: Burst and non-burst */
++ /* 2/3: Reserved */
++ uint16_t HSClkBehavior:1; /*0: Continuous, 1: Non-continuous*/
++ uint16_t DuoDisplaySupport:1; /*1 bit,0: No, 1: Yes*/
++ uint16_t ECC_ChecksumCapabilities:1;/*1 bit,0: No, 1: Yes*/
++ uint16_t BidirectionalCommunication:1;/*1 bit,0: No, 1: Yes */
++ uint16_t Rsvd:5;/*5 bits,00000b */
++ } panelrx;
++ uint16_t panel_receiver;
++} __attribute__ ((packed));
++
++struct gct_ioctl_arg{
++ uint8_t bpi; /* boot panel index, number of panel used during boot */
++ uint8_t pt; /* panel type, 4 bit field, 0=lvds, 1=mipi */
++ struct mrst_timing_info DTD; /* timing info for the selected panel */
++ uint32_t Panel_Port_Control;
++ uint32_t PP_On_Sequencing;/*1 dword,Register 0x61208,*/
++ uint32_t PP_Off_Sequencing;/*1 dword,Register 0x6120C,*/
++ uint32_t PP_Cycle_Delay;
++ uint16_t Panel_Backlight_Inverter_Descriptor;
++ uint16_t Panel_MIPI_Display_Descriptor;
++} __attribute__ ((packed));
++
++struct mrst_vbt{
++ char Signature[4]; /*4 bytes,"$GCT" */
++ uint8_t Revision; /*1 byte */
++ uint8_t Size; /*1 byte */
++ uint8_t Checksum; /*1 byte,Calculated*/
++ void *mrst_gct;
++} __attribute__ ((packed));
++
++struct mrst_gct_v1{ /* expect this table to change per customer request*/
++ union{ /*8 bits,Defined as follows: */
++ struct{
++ uint8_t PanelType:4; /*4 bits, Bit field for panels*/
++ /* 0 - 3: 0 = LVDS, 1 = MIPI*/
++ /*2 bits,Specifies which of the*/
++ uint8_t BootPanelIndex:2;
++ /* 4 panels to use by default*/
++ uint8_t BootMIPI_DSI_RxIndex:2;/*Specifies which of*/
++ /* the 4 MIPI DSI receivers to use*/
++ } PD;
++ uint8_t PanelDescriptor;
++ };
++ struct mrst_panel_descriptor_v1 panel[4];/*panel descrs,38 bytes each*/
++ union mrst_panel_rx panelrx[4]; /* panel receivers*/
++} __attribute__ ((packed));
++
++struct mrst_gct_v2{ /* expect this table to change per customer request*/
++ union{ /*8 bits,Defined as follows: */
++ struct{
++ uint8_t PanelType:4; /*4 bits, Bit field for panels*/
++ /* 0 - 3: 0 = LVDS, 1 = MIPI*/
++ /*2 bits,Specifies which of the*/
++ uint8_t BootPanelIndex:2;
++ /* 4 panels to use by default*/
++ uint8_t BootMIPI_DSI_RxIndex:2;/*Specifies which of*/
++ /* the 4 MIPI DSI receivers to use*/
++ } PD;
++ uint8_t PanelDescriptor;
++ };
++ struct mrst_panel_descriptor_v2 panel[4];/*panel descrs,38 bytes each*/
++ union mrst_panel_rx panelrx[4]; /* panel receivers*/
++} __attribute__ ((packed));
++
++#define PSB_DC_CRTC_SAVE 0x01
++#define PSB_DC_CRTC_RESTORE 0x02
++#define PSB_DC_OUTPUT_SAVE 0x04
++#define PSB_DC_OUTPUT_RESTORE 0x08
++#define PSB_DC_CRTC_MASK 0x03
++#define PSB_DC_OUTPUT_MASK 0x0C
++
++struct drm_psb_dc_state_arg {
++ uint32_t flags;
++ uint32_t obj_id;
++};
++
++struct drm_psb_mode_operation_arg {
++ uint32_t obj_id;
++ uint16_t operation;
++ struct drm_mode_modeinfo mode;
++ void *data;
++};
++
++struct drm_psb_stolen_memory_arg {
++ uint32_t base;
++ uint32_t size;
++};
++
++/*Display Register Bits*/
++#define REGRWBITS_PFIT_CONTROLS (1 << 0)
++#define REGRWBITS_PFIT_AUTOSCALE_RATIOS (1 << 1)
++#define REGRWBITS_PFIT_PROGRAMMED_SCALE_RATIOS (1 << 2)
++#define REGRWBITS_PIPEASRC (1 << 3)
++#define REGRWBITS_PIPEBSRC (1 << 4)
++#define REGRWBITS_VTOTAL_A (1 << 5)
++#define REGRWBITS_VTOTAL_B (1 << 6)
++#ifdef MDFLD_HDCP
++#define REGRWBITS_HDCP (1 << 7)
++#endif
++/*Overlay Register Bits*/
++#define OV_REGRWBITS_OVADD (1 << 0)
++#define OV_REGRWBITS_OGAM_ALL (1 << 1)
++
++struct drm_psb_register_rw_arg {
++ uint32_t b_force_hw_on;
++
++ uint32_t display_read_mask;
++ uint32_t display_write_mask;
++
++ struct {
++ uint32_t pfit_controls;
++ uint32_t pfit_autoscale_ratios;
++ uint32_t pfit_programmed_scale_ratios;
++ uint32_t pipeasrc;
++ uint32_t pipebsrc;
++ uint32_t vtotal_a;
++ uint32_t vtotal_b;
++ #ifdef MDFLD_HDCP
++ uint32_t hdcp_reg;
++ uint32_t hdcp_value;
++ #endif
++ } display;
++
++ uint32_t overlay_read_mask;
++ uint32_t overlay_write_mask;
++
++ struct {
++ uint32_t OVADD;
++ uint32_t OGAMC0;
++ uint32_t OGAMC1;
++ uint32_t OGAMC2;
++ uint32_t OGAMC3;
++ uint32_t OGAMC4;
++ uint32_t OGAMC5;
++ } overlay;
++
++ uint32_t sprite_enable_mask;
++ uint32_t sprite_disable_mask;
++
++ struct {
++ uint32_t dspa_control;
++ uint32_t dspa_key_value;
++ uint32_t dspa_key_mask;
++ uint32_t dspc_control;
++ uint32_t dspc_stride;
++ uint32_t dspc_position;
++ uint32_t dspc_linear_offset;
++ uint32_t dspc_size;
++ uint32_t dspc_surface;
++ } sprite;
++};
++
++struct psb_gtt_mapping_arg {
++ void *hKernelMemInfo;
++ uint32_t offset_pages;
++};
++
++struct drm_psb_getpageaddrs_arg {
++ uint32_t handle;
++ unsigned long *page_addrs;
++ unsigned long gtt_offset;
++};
++
++
++/* Controlling the kernel modesetting buffers */
++
++#define DRM_PSB_KMS_OFF 0x00
++#define DRM_PSB_KMS_ON 0x01
++#define DRM_PSB_VT_LEAVE 0x02
++#define DRM_PSB_VT_ENTER 0x03
++#define DRM_PSB_EXTENSION 0x06
++#define DRM_PSB_SIZES 0x07
++#define DRM_PSB_FUSE_REG 0x08
++#define DRM_PSB_VBT 0x09
++#define DRM_PSB_DC_STATE 0x0A
++#define DRM_PSB_ADB 0x0B
++#define DRM_PSB_MODE_OPERATION 0x0C
++#define DRM_PSB_STOLEN_MEMORY 0x0D
++#define DRM_PSB_REGISTER_RW 0x0E
++#define DRM_PSB_GTT_MAP 0x0F
++#define DRM_PSB_GTT_UNMAP 0x10
++#define DRM_PSB_GETPAGEADDRS 0x11
++/**
++ * NOTE: Add new commands here, but increment
++ * the values below and increment their
++ * corresponding defines where they're
++ * defined elsewhere.
++ */
++#define DRM_PVR_RESERVED1 0x12
++#define DRM_PVR_RESERVED2 0x13
++#define DRM_PVR_RESERVED3 0x14
++#define DRM_PVR_RESERVED4 0x15
++#define DRM_PVR_RESERVED5 0x16
++
++#define DRM_PSB_HIST_ENABLE 0x17
++#define DRM_PSB_HIST_STATUS 0x18
++#define DRM_PSB_UPDATE_GUARD 0x19
++#define DRM_PSB_INIT_COMM 0x1A
++#define DRM_PSB_DPST 0x1B
++#define DRM_PSB_GAMMA 0x1C
++#define DRM_PSB_DPST_BL 0x1D
++
++#define DRM_PVR_RESERVED6 0x1E
++
++
++#ifdef MDFLD_HDCP
++#define DRM_PSB_HDCP_I2C_ACCESS 0x1F
++#endif
++
++#define DRM_PSB_GET_PIPE_FROM_CRTC_ID 0x1F
++#define DRM_PSB_DPU_QUERY 0x20
++#define DRM_PSB_DPU_DSR_ON 0x21
++#define DRM_PSB_DPU_DSR_OFF 0x22
++
++
++struct psb_drm_dpu_rect {
++ int x, y;
++ int width, height;
++};
++
++struct drm_psb_drv_dsr_off_arg {
++ int screen;
++ struct psb_drm_dpu_rect damage_rect;
++};
++
++
++struct drm_psb_dev_info_arg {
++ uint32_t num_use_attribute_registers;
++};
++#define DRM_PSB_DEVINFO 0x01
++
++#define PSB_MODE_OPERATION_MODE_VALID 0x01
++#define PSB_MODE_OPERATION_SET_DC_BASE 0x02
++
++struct drm_psb_get_pipe_from_crtc_id_arg {
++ /** ID of CRTC being requested **/
++ uint32_t crtc_id;
++
++ /** pipe of requested CRTC **/
++ uint32_t pipe;
++};
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_drv.c
+@@ -0,0 +1,2557 @@
++/**************************************************************************
++ * Copyright (c) 2007, Intel Corporation.
++ * All Rights Reserved.
++ * Copyright (c) 2008, Tungsten Graphics, Inc. Cedar Park, TX., USA.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++#include <drm/drmP.h>
++#include <drm/drm.h>
++#include "psb_drm.h"
++#include "psb_drv.h"
++#include "psb_fb.h"
++#include "psb_reg.h"
++#include "psb_intel_reg.h"
++#include "psb_intel_bios.h"
++#include "psb_msvdx.h"
++#include "lnc_topaz.h"
++#include "pnw_topaz.h"
++#include <drm/drm_pciids.h>
++#include "pvr_drm_shared.h"
++#include "psb_powermgmt.h"
++#include "img_types.h"
++#include <linux/cpu.h>
++#include <linux/notifier.h>
++#include <linux/spinlock.h>
++#if defined(CONFIG_RAR_REGISTER)
++#include "../../rar_register/rar_register.h"
++#include "../../memrar/memrar.h"
++#endif
++
++#include "mdfld_dsi_dbi.h"
++#ifdef CONFIG_MDFLD_DSI_DPU
++#include "mdfld_dsi_dbi_dpu.h"
++#endif
++
++/*IMG headers*/
++#include "pvr_drm_shared.h"
++#include "img_types.h"
++#include "pvr_bridge.h"
++#include "linkage.h"
++
++
++int drm_psb_debug;
++/*EXPORT_SYMBOL(drm_psb_debug); */
++static int drm_psb_trap_pagefaults;
++
++int drm_psb_disable_vsync = 1;
++int drm_psb_no_fb;
++int drm_psb_force_pipeb;
++int drm_idle_check_interval = 5;
++int drm_msvdx_pmpolicy = PSB_PMPOLICY_POWERDOWN;
++int drm_topaz_pmpolicy = PSB_PMPOLICY_NOPM;
++int drm_topaz_sbuswa;
++int drm_psb_ospm = 1;
++
++static int psb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
++
++MODULE_PARM_DESC(debug, "Enable debug output");
++MODULE_PARM_DESC(no_fb, "Disable FBdev");
++MODULE_PARM_DESC(trap_pagefaults, "Error and reset on MMU pagefaults");
++MODULE_PARM_DESC(disable_vsync, "Disable vsync interrupts");
++MODULE_PARM_DESC(force_pipeb, "Forces PIPEB to become primary fb");
++MODULE_PARM_DESC(ta_mem_size, "TA memory size in kiB");
++MODULE_PARM_DESC(ospm, "switch for ospm support");
++MODULE_PARM_DESC(msvdx_pmpolicy, "msvdx power management policy btw frames");
++MODULE_PARM_DESC(topaz_pmpolicy, "topaz power managerment policy btw frames");
++MODULE_PARM_DESC(topaz_sbuswa, "WA for topaz sysbus write");
++module_param_named(debug, drm_psb_debug, int, 0600);
++module_param_named(no_fb, drm_psb_no_fb, int, 0600);
++module_param_named(trap_pagefaults, drm_psb_trap_pagefaults, int, 0600);
++module_param_named(force_pipeb, drm_psb_force_pipeb, int, 0600);
++module_param_named(msvdx_pmpolicy, drm_msvdx_pmpolicy, int, 0600);
++module_param_named(topaz_pmpolicy, drm_topaz_pmpolicy, int, 0600);
++module_param_named(topaz_sbuswa, drm_topaz_sbuswa, int, 0600);
++module_param_named(ospm, drm_psb_ospm, int, 0600);
++
++static struct pci_device_id pciidlist[] = {
++#ifdef SGX535
++ {0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8108},
++ {0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8109},
++ {0x8086, 0x4100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100},
++ {0x8086, 0x4101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100},
++ {0x8086, 0x4102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100},
++ {0x8086, 0x4103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100},
++ {0x8086, 0x4104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100},
++ {0x8086, 0x4105, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100},
++ {0x8086, 0x4106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100},
++ {0x8086, 0x4107, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100},
++#endif
++#ifdef SGX540
++ {0x8086, 0x0130, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MDFLD_0130},
++ {0x8086, 0x0131, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MDFLD_0130},
++ {0x8086, 0x0132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MDFLD_0130},
++ {0x8086, 0x0133, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MDFLD_0130},
++ {0x8086, 0x0134, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MDFLD_0130},
++ {0x8086, 0x0135, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MDFLD_0130},
++ {0x8086, 0x0136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MDFLD_0130},
++ {0x8086, 0x0137, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MDFLD_0130},
++#endif
++ {0, 0, 0}
++};
++
++MODULE_DEVICE_TABLE(pci, pciidlist);
++/*
++ * Standard IOCTLs.
++ */
++
++#define DRM_IOCTL_PSB_KMS_OFF \
++ DRM_IO(DRM_PSB_KMS_OFF + DRM_COMMAND_BASE)
++#define DRM_IOCTL_PSB_KMS_ON \
++ DRM_IO(DRM_PSB_KMS_ON + DRM_COMMAND_BASE)
++#define DRM_IOCTL_PSB_VT_LEAVE \
++ DRM_IO(DRM_PSB_VT_LEAVE + DRM_COMMAND_BASE)
++#define DRM_IOCTL_PSB_VT_ENTER \
++ DRM_IO(DRM_PSB_VT_ENTER + DRM_COMMAND_BASE)
++#define DRM_IOCTL_PSB_EXTENSION \
++ DRM_IOWR(DRM_PSB_EXTENSION + DRM_COMMAND_BASE, \
++ union drm_psb_extension_arg)
++#define DRM_IOCTL_PSB_SIZES \
++ DRM_IOR(DRM_PSB_SIZES + DRM_COMMAND_BASE, \
++ struct drm_psb_sizes_arg)
++#define DRM_IOCTL_PSB_FUSE_REG \
++ DRM_IOWR(DRM_PSB_FUSE_REG + DRM_COMMAND_BASE, uint32_t)
++#define DRM_IOCTL_PSB_VBT \
++ DRM_IOWR(DRM_PSB_VBT + DRM_COMMAND_BASE, \
++ struct gct_ioctl_arg)
++#define DRM_IOCTL_PSB_DC_STATE \
++ DRM_IOW(DRM_PSB_DC_STATE + DRM_COMMAND_BASE, \
++ struct drm_psb_dc_state_arg)
++#define DRM_IOCTL_PSB_ADB \
++ DRM_IOWR(DRM_PSB_ADB + DRM_COMMAND_BASE, uint32_t)
++#define DRM_IOCTL_PSB_MODE_OPERATION \
++ DRM_IOWR(DRM_PSB_MODE_OPERATION + DRM_COMMAND_BASE, \
++ struct drm_psb_mode_operation_arg)
++#define DRM_IOCTL_PSB_STOLEN_MEMORY \
++ DRM_IOWR(DRM_PSB_STOLEN_MEMORY + DRM_COMMAND_BASE, \
++ struct drm_psb_stolen_memory_arg)
++#define DRM_IOCTL_PSB_REGISTER_RW \
++ DRM_IOWR(DRM_PSB_REGISTER_RW + DRM_COMMAND_BASE, \
++ struct drm_psb_register_rw_arg)
++#define DRM_IOCTL_PSB_GTT_MAP \
++ DRM_IOWR(DRM_PSB_GTT_MAP + DRM_COMMAND_BASE, \
++ struct psb_gtt_mapping_arg)
++#define DRM_IOCTL_PSB_GTT_UNMAP \
++ DRM_IOW(DRM_PSB_GTT_UNMAP + DRM_COMMAND_BASE, \
++ struct psb_gtt_mapping_arg)
++#define DRM_IOCTL_PSB_GETPAGEADDRS \
++ DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_GETPAGEADDRS,\
++ struct drm_psb_getpageaddrs_arg)
++#define DRM_IOCTL_PSB_HIST_ENABLE \
++ DRM_IOWR(DRM_PSB_HIST_ENABLE + DRM_COMMAND_BASE, \
++ uint32_t)
++#define DRM_IOCTL_PSB_HIST_STATUS \
++ DRM_IOWR(DRM_PSB_HIST_STATUS + DRM_COMMAND_BASE, \
++ struct drm_psb_hist_status_arg)
++#define DRM_IOCTL_PSB_UPDATE_GUARD \
++ DRM_IOWR(DRM_PSB_UPDATE_GUARD + DRM_COMMAND_BASE, \
++ uint32_t)
++#define DRM_IOCTL_PSB_INIT_COMM \
++ DRM_IOWR(DRM_PSB_INIT_COMM + DRM_COMMAND_BASE, \
++ uint32_t)
++#define DRM_IOCTL_PSB_DPST \
++ DRM_IOWR(DRM_PSB_DPST + DRM_COMMAND_BASE, \
++ uint32_t)
++#define DRM_IOCTL_PSB_GAMMA \
++ DRM_IOWR(DRM_PSB_GAMMA + DRM_COMMAND_BASE, \
++ struct drm_psb_dpst_lut_arg)
++#define DRM_IOCTL_PSB_DPST_BL \
++ DRM_IOWR(DRM_PSB_DPST_BL + DRM_COMMAND_BASE, \
++ uint32_t)
++#define DRM_IOCTL_PSB_GET_PIPE_FROM_CRTC_ID \
++ DRM_IOWR(DRM_PSB_GET_PIPE_FROM_CRTC_ID + DRM_COMMAND_BASE, \
++ struct drm_psb_get_pipe_from_crtc_id_arg)
++#ifdef MDFLD_HDCP
++#define DRM_IOCTL_PSB_HDCP_I2C_ACCESS DRM_IOWR(DRM_PSB_HDCP_I2C_ACCESS + DRM_COMMAND_BASE, \
++ struct drm_psb_hdcp_i2c_arg)
++
++#endif
++
++/*pvr ioctls*/
++#define PVR_DRM_SRVKM_IOCTL \
++ DRM_IOW(DRM_COMMAND_BASE + PVR_DRM_SRVKM_CMD, \
++ PVRSRV_BRIDGE_PACKAGE)
++#define PVR_DRM_DISP_IOCTL \
++ DRM_IO(DRM_COMMAND_BASE + PVR_DRM_DISP_CMD)
++#define PVR_DRM_BC_IOCTL \
++ DRM_IO(DRM_COMMAND_BASE + PVR_DRM_BC_CMD)
++#define PVR_DRM_IS_MASTER_IOCTL \
++ DRM_IO(DRM_COMMAND_BASE + PVR_DRM_IS_MASTER_CMD)
++#define PVR_DRM_UNPRIV_IOCTL \
++ DRM_IOWR(DRM_COMMAND_BASE + PVR_DRM_UNPRIV_CMD, \
++ IMG_UINT32)
++#define PVR_DRM_DBGDRV_IOCTL \
++ DRM_IO(DRM_COMMAND_BASE + PVR_DRM_DBGDRV_CMD)
++
++/*DPU/DSR stuff*/
++#define DRM_IOCRL_PSB_DPU_QUERY DRM_IOR(DRM_PSB_DPU_QUERY + DRM_COMMAND_BASE, IMG_UINT32)
++#define DRM_IOCRL_PSB_DPU_DSR_ON DRM_IOW(DRM_PSB_DPU_DSR_ON + DRM_COMMAND_BASE, IMG_UINT32)
++//#define DRM_IOCRL_PSB_DPU_DSR_OFF DRM_IOW(DRM_PSB_DPU_DSR_OFF + DRM_COMMAND_BASE, IMG_UINT32)
++#define DRM_IOCRL_PSB_DPU_DSR_OFF DRM_IOW(DRM_PSB_DPU_DSR_OFF + DRM_COMMAND_BASE, struct drm_psb_drv_dsr_off_arg)
++
++/*
++ * TTM execbuf extension.
++ */
++#if defined(PDUMP)
++#define DRM_PSB_CMDBUF (PVR_DRM_DBGDRV_CMD + 1)
++#else
++#define DRM_PSB_CMDBUF (DRM_PSB_DPU_DSR_OFF + 1)
++/* #define DRM_PSB_CMDBUF (DRM_PSB_DPST_BL + 1) */
++#endif
++
++#define DRM_PSB_SCENE_UNREF (DRM_PSB_CMDBUF + 1)
++#define DRM_IOCTL_PSB_CMDBUF \
++ DRM_IOW(DRM_PSB_CMDBUF + DRM_COMMAND_BASE, \
++ struct drm_psb_cmdbuf_arg)
++#define DRM_IOCTL_PSB_SCENE_UNREF \
++ DRM_IOW(DRM_PSB_SCENE_UNREF + DRM_COMMAND_BASE, \
++ struct drm_psb_scene)
++#define DRM_IOCTL_PSB_KMS_OFF DRM_IO(DRM_PSB_KMS_OFF + DRM_COMMAND_BASE)
++#define DRM_IOCTL_PSB_KMS_ON DRM_IO(DRM_PSB_KMS_ON + DRM_COMMAND_BASE)
++#define DRM_IOCTL_PSB_EXTENSION \
++ DRM_IOWR(DRM_PSB_EXTENSION + DRM_COMMAND_BASE, \
++ union drm_psb_extension_arg)
++/*
++ * TTM placement user extension.
++ */
++
++#define DRM_PSB_PLACEMENT_OFFSET (DRM_PSB_SCENE_UNREF + 1)
++
++#define DRM_PSB_TTM_PL_CREATE (TTM_PL_CREATE + DRM_PSB_PLACEMENT_OFFSET)
++#define DRM_PSB_TTM_PL_REFERENCE (TTM_PL_REFERENCE + DRM_PSB_PLACEMENT_OFFSET)
++#define DRM_PSB_TTM_PL_UNREF (TTM_PL_UNREF + DRM_PSB_PLACEMENT_OFFSET)
++#define DRM_PSB_TTM_PL_SYNCCPU (TTM_PL_SYNCCPU + DRM_PSB_PLACEMENT_OFFSET)
++#define DRM_PSB_TTM_PL_WAITIDLE (TTM_PL_WAITIDLE + DRM_PSB_PLACEMENT_OFFSET)
++#define DRM_PSB_TTM_PL_SETSTATUS (TTM_PL_SETSTATUS + DRM_PSB_PLACEMENT_OFFSET)
++#define DRM_PSB_TTM_PL_CREATE_UB (TTM_PL_CREATE_UB + DRM_PSB_PLACEMENT_OFFSET)
++
++/*
++ * TTM fence extension.
++ */
++
++#define DRM_PSB_FENCE_OFFSET (DRM_PSB_TTM_PL_CREATE_UB + 1)
++#define DRM_PSB_TTM_FENCE_SIGNALED (TTM_FENCE_SIGNALED + DRM_PSB_FENCE_OFFSET)
++#define DRM_PSB_TTM_FENCE_FINISH (TTM_FENCE_FINISH + DRM_PSB_FENCE_OFFSET)
++#define DRM_PSB_TTM_FENCE_UNREF (TTM_FENCE_UNREF + DRM_PSB_FENCE_OFFSET)
++
++#define DRM_PSB_FLIP (DRM_PSB_TTM_FENCE_UNREF + 1) /*20*/
++/* PSB video extension */
++#define DRM_LNC_VIDEO_GETPARAM (DRM_PSB_FLIP + 1)
++
++#define DRM_IOCTL_PSB_TTM_PL_CREATE \
++ DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_CREATE,\
++ union ttm_pl_create_arg)
++#define DRM_IOCTL_PSB_TTM_PL_REFERENCE \
++ DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_REFERENCE,\
++ union ttm_pl_reference_arg)
++#define DRM_IOCTL_PSB_TTM_PL_UNREF \
++ DRM_IOW(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_UNREF,\
++ struct ttm_pl_reference_req)
++#define DRM_IOCTL_PSB_TTM_PL_SYNCCPU \
++ DRM_IOW(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_SYNCCPU,\
++ struct ttm_pl_synccpu_arg)
++#define DRM_IOCTL_PSB_TTM_PL_WAITIDLE \
++ DRM_IOW(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_WAITIDLE,\
++ struct ttm_pl_waitidle_arg)
++#define DRM_IOCTL_PSB_TTM_PL_SETSTATUS \
++ DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_SETSTATUS,\
++ union ttm_pl_setstatus_arg)
++#define DRM_IOCTL_PSB_TTM_PL_CREATE_UB \
++ DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_CREATE_UB,\
++ union ttm_pl_create_ub_arg)
++#define DRM_IOCTL_PSB_TTM_FENCE_SIGNALED \
++ DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_FENCE_SIGNALED, \
++ union ttm_fence_signaled_arg)
++#define DRM_IOCTL_PSB_TTM_FENCE_FINISH \
++ DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_FENCE_FINISH, \
++ union ttm_fence_finish_arg)
++#define DRM_IOCTL_PSB_TTM_FENCE_UNREF \
++ DRM_IOW(DRM_COMMAND_BASE + DRM_PSB_TTM_FENCE_UNREF, \
++ struct ttm_fence_unref_arg)
++#define DRM_IOCTL_PSB_FLIP \
++ DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_FLIP, \
++ struct drm_psb_pageflip_arg)
++#define DRM_IOCTL_LNC_VIDEO_GETPARAM \
++ DRM_IOWR(DRM_COMMAND_BASE + DRM_LNC_VIDEO_GETPARAM, \
++ struct drm_lnc_video_getparam_arg)
++
++static int psb_vt_leave_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++static int psb_vt_enter_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++static int psb_sizes_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++static int psb_fuse_reg_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++static int psb_vbt_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++static int psb_dc_state_ioctl(struct drm_device *dev, void * data,
++ struct drm_file *file_priv);
++static int psb_adb_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++static int psb_stolen_memory_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++static int psb_register_rw_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++static int psb_hist_enable_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++static int psb_hist_status_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++static int psb_update_guard_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++static int psb_init_comm_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++static int psb_dpst_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++static int psb_gamma_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++#if 0 /*#ifdef MDFLD_HDCP*/
++static int psb_hdcp_i2c_access_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++#endif
++
++#if MDFLD_JLIU7_DSR
++static int psb_dpu_query_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++static int psb_dpu_dsr_on_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++
++static int psb_dpu_dsr_off_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++#endif
++
++#define PSB_IOCTL_DEF(ioctl, func, flags) \
++ [DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {ioctl, flags, func}
++
++static struct drm_ioctl_desc psb_ioctls[] = {
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_KMS_OFF, psbfb_kms_off_ioctl,
++ DRM_ROOT_ONLY),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_KMS_ON,
++ psbfb_kms_on_ioctl,
++ DRM_ROOT_ONLY),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_VT_LEAVE, psb_vt_leave_ioctl,
++ DRM_ROOT_ONLY),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_VT_ENTER,
++ psb_vt_enter_ioctl,
++ DRM_ROOT_ONLY),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_EXTENSION, psb_extension_ioctl, DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_SIZES, psb_sizes_ioctl, DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_FUSE_REG, psb_fuse_reg_ioctl, DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_VBT, psb_vbt_ioctl, DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_DC_STATE, psb_dc_state_ioctl, DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_ADB, psb_adb_ioctl, DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_MODE_OPERATION, psb_mode_operation_ioctl,
++ DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_STOLEN_MEMORY, psb_stolen_memory_ioctl,
++ DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_REGISTER_RW, psb_register_rw_ioctl,
++ DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_GTT_MAP,
++ psb_gtt_map_meminfo_ioctl,
++ DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_GTT_UNMAP,
++ psb_gtt_unmap_meminfo_ioctl,
++ DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_GETPAGEADDRS,
++ psb_getpageaddrs_ioctl,
++ DRM_AUTH),
++ PSB_IOCTL_DEF(PVR_DRM_SRVKM_IOCTL, PVRSRV_BridgeDispatchKM, 0),
++ PSB_IOCTL_DEF(PVR_DRM_DISP_IOCTL, PVRDRM_Dummy_ioctl, 0),
++ PSB_IOCTL_DEF(PVR_DRM_BC_IOCTL, PVRDRM_Dummy_ioctl, 0),
++ PSB_IOCTL_DEF(PVR_DRM_IS_MASTER_IOCTL, PVRDRMIsMaster, DRM_MASTER),
++ PSB_IOCTL_DEF(PVR_DRM_UNPRIV_IOCTL, PVRDRMUnprivCmd, 0),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_HIST_ENABLE,
++ psb_hist_enable_ioctl,
++ DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_HIST_STATUS,
++ psb_hist_status_ioctl,
++ DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_UPDATE_GUARD, psb_update_guard_ioctl, DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_INIT_COMM, psb_init_comm_ioctl, DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_DPST, psb_dpst_ioctl, DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_GAMMA, psb_gamma_ioctl, DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_DPST_BL, psb_dpst_bl_ioctl, DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_GET_PIPE_FROM_CRTC_ID, psb_intel_get_pipe_from_crtc_id, 0),
++#if defined(PDUMP)
++ PSB_IOCTL_DEF(PVR_DRM_DBGDRV_IOCTL, dbgdrv_ioctl, 0),
++#endif
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_CMDBUF, psb_cmdbuf_ioctl, DRM_AUTH),
++ /*to be removed later*/
++ /*PSB_IOCTL_DEF(DRM_IOCTL_PSB_SCENE_UNREF, drm_psb_scene_unref_ioctl,
++ DRM_AUTH),*/
++
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_CREATE, psb_pl_create_ioctl,
++ DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_REFERENCE, psb_pl_reference_ioctl,
++ DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_UNREF, psb_pl_unref_ioctl,
++ DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_SYNCCPU, psb_pl_synccpu_ioctl,
++ DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_WAITIDLE, psb_pl_waitidle_ioctl,
++ DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_SETSTATUS, psb_pl_setstatus_ioctl,
++ DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_CREATE_UB, psb_pl_ub_create_ioctl,
++ DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_FENCE_SIGNALED,
++ psb_fence_signaled_ioctl, DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_FENCE_FINISH, psb_fence_finish_ioctl,
++ DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_FENCE_UNREF, psb_fence_unref_ioctl,
++ DRM_AUTH),
++ /*to be removed later */
++ /*PSB_IOCTL_DEF(DRM_IOCTL_PSB_FLIP, psb_page_flip, DRM_AUTH),*/
++ PSB_IOCTL_DEF(DRM_IOCTL_LNC_VIDEO_GETPARAM,
++ lnc_video_getparam, DRM_AUTH),
++#if 0 /*#ifdef MDFLD_HDCP*/
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_HDCP_I2C_ACCESS, psb_hdcp_i2c_access_ioctl,
++ DRM_AUTH)
++#endif
++
++#if MDFLD_JLIU7_DSR
++ PSB_IOCTL_DEF(DRM_IOCRL_PSB_DPU_QUERY, psb_dpu_query_ioctl,
++ DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCRL_PSB_DPU_DSR_ON, psb_dpu_dsr_on_ioctl,
++ DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCRL_PSB_DPU_DSR_OFF, psb_dpu_dsr_off_ioctl,
++ DRM_AUTH)
++#endif
++};
++
++static int psb_max_ioctl = DRM_ARRAY_SIZE(psb_ioctls);
++
++static void get_ci_info(struct drm_psb_private *dev_priv)
++{
++ struct pci_dev *pdev;
++
++ pdev = pci_get_subsys(0x8086, 0x080b, 0, 0, NULL);
++ if (pdev == NULL) {
++ /* IF no pci_device we set size & addr to 0, no ci
++ * share buffer can be created */
++ dev_priv->ci_region_start = 0;
++ dev_priv->ci_region_size = 0;
++ printk(KERN_ERR "can't find CI device, no ci share buffer\n");
++ return;
++ }
++
++ dev_priv->ci_region_start = pci_resource_start(pdev, 1);
++ dev_priv->ci_region_size = pci_resource_len(pdev, 1);
++
++ printk(KERN_INFO "ci_region_start %x ci_region_size %d\n",
++ dev_priv->ci_region_start, dev_priv->ci_region_size);
++
++ pci_dev_put(pdev);
++
++ return;
++}
++
++static void get_rar_info(struct drm_psb_private *dev_priv)
++{
++#if defined(CONFIG_RAR_REGISTER)
++ int ret;
++ dma_addr_t start_addr, end_addr;
++
++ dev_priv->rar_region_start = 0;
++ dev_priv->rar_region_size = 0;
++ end_addr = 0;
++ ret = 0;
++
++ ret = rar_get_address(RAR_TYPE_VIDEO, &start_addr,
++ &end_addr);
++ if (ret) {
++ printk(KERN_ERR "failed to get rar region info\n");
++ return;
++ }
++ dev_priv->rar_region_start = (uint32_t) start_addr;
++ if ((!ret) && (start_addr != 0) && (end_addr != 0))
++ dev_priv->rar_region_size =
++ end_addr - dev_priv->rar_region_start + 1;
++
++#endif
++ return;
++}
++
++static void psb_set_uopt(struct drm_psb_uopt *uopt)
++{
++ return;
++}
++
++static void psb_lastclose(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++
++ return;
++
++ if (!dev->dev_private)
++ return;
++
++ mutex_lock(&dev_priv->cmdbuf_mutex);
++ if (dev_priv->context.buffers) {
++ vfree(dev_priv->context.buffers);
++ dev_priv->context.buffers = NULL;
++ }
++ mutex_unlock(&dev_priv->cmdbuf_mutex);
++}
++
++static void psb_do_takedown(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ struct ttm_bo_device *bdev = &dev_priv->bdev;
++
++
++ if (dev_priv->have_mem_mmu) {
++ ttm_bo_clean_mm(bdev, DRM_PSB_MEM_MMU);
++ dev_priv->have_mem_mmu = 0;
++ }
++
++ if (dev_priv->have_tt) {
++ ttm_bo_clean_mm(bdev, TTM_PL_TT);
++ dev_priv->have_tt = 0;
++ }
++
++ if (dev_priv->have_camera) {
++ ttm_bo_clean_mm(bdev, TTM_PL_CI);
++ dev_priv->have_camera = 0;
++ }
++ if (dev_priv->have_rar) {
++ ttm_bo_clean_mm(bdev, TTM_PL_RAR);
++ dev_priv->have_rar = 0;
++ }
++
++ psb_msvdx_uninit(dev);
++
++ if (IS_MDFLD(dev))
++ pnw_topaz_uninit(dev);
++ else if (!dev_priv->topaz_disabled)
++ lnc_topaz_uninit(dev);
++}
++
++#define FB_REG06 0xD0810600
++#define FB_TOPAZ_DISABLE BIT0
++#define FB_MIPI_DISABLE BIT11
++#define FB_REG09 0xD0810900
++#define FB_SKU_MASK (BIT12|BIT13|BIT14)
++#define FB_SKU_SHIFT 12
++#define FB_SKU_100 0
++#define FB_SKU_100L 1
++#define FB_SKU_83 2
++#if 1 /* FIXME remove it after PO */
++#define FB_GFX_CLK_DIVIDE_MASK (BIT20|BIT21|BIT22)
++#define FB_GFX_CLK_DIVIDE_SHIFT 20
++#define FB_VED_CLK_DIVIDE_MASK (BIT23|BIT24)
++#define FB_VED_CLK_DIVIDE_SHIFT 23
++#define FB_VEC_CLK_DIVIDE_MASK (BIT25|BIT26)
++#define FB_VEC_CLK_DIVIDE_SHIFT 25
++#endif /* FIXME remove it after PO */
++
++
++void mrst_get_fuse_settings(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
++ uint32_t fuse_value = 0;
++ uint32_t fuse_value_tmp = 0;
++
++ pci_write_config_dword(pci_root, 0xD0, FB_REG06);
++ pci_read_config_dword(pci_root, 0xD4, &fuse_value);
++
++ dev_priv->iLVDS_enable = fuse_value & FB_MIPI_DISABLE;
++
++ if (IS_MDFLD(dev)) {
++ dev_priv->iLVDS_enable = 0;
++ }
++
++ DRM_INFO("internal display is %s\n",
++ dev_priv->iLVDS_enable ? "LVDS display" : "MIPI display");
++
++ if (dev_priv->dev->pci_device == PCI_ID_TOPAZ_DISABLED)
++ dev_priv->topaz_disabled = 1;
++ else
++ dev_priv->topaz_disabled = 0;
++
++ dev_priv->video_device_fuse = fuse_value;
++
++ DRM_INFO("topaz is %s\n",
++ dev_priv->topaz_disabled ? "disabled" : "enabled");
++
++ pci_write_config_dword(pci_root, 0xD0, FB_REG09);
++ pci_read_config_dword(pci_root, 0xD4, &fuse_value);
++
++ DRM_INFO("SKU values is 0x%x. \n", fuse_value);
++ fuse_value_tmp = (fuse_value & FB_SKU_MASK) >> FB_SKU_SHIFT;
++
++ dev_priv->fuse_reg_value = fuse_value;
++
++ switch (fuse_value_tmp) {
++ case FB_SKU_100:
++ DRM_INFO("SKU values is SKU_100. LNC core clk is 200MHz.\n");
++ dev_priv->sku_100 = true;
++ break;
++ case FB_SKU_100L:
++ DRM_INFO("SKU values is SKU_100L. LNC core clk is 100MHz.\n");
++ dev_priv->sku_100L = true;
++ break;
++ case FB_SKU_83:
++ DRM_INFO("SKU values is SKU_83. LNC core clk is 166MHz.\n");
++ dev_priv->sku_83 = true;
++ break;
++ default:
++ DRM_ERROR("Invalid SKU values, SKU value = 0x%08x\n",
++ fuse_value_tmp);
++ }
++
++#if 1 /* FIXME remove it after PO */
++ fuse_value_tmp =
++ (fuse_value & FB_GFX_CLK_DIVIDE_MASK) >> FB_GFX_CLK_DIVIDE_SHIFT;
++
++ switch (fuse_value_tmp) {
++ case 0:
++ DRM_INFO("Gfx clk : core clk = 1:1. \n");
++ break;
++ case 1:
++ DRM_INFO("Gfx clk : core clk = 4:3. \n");
++ break;
++ case 2:
++ DRM_INFO("Gfx clk : core clk = 8:5. \n");
++ break;
++ case 3:
++ DRM_INFO("Gfx clk : core clk = 2:1. \n");
++ break;
++ case 4:
++ DRM_INFO("Gfx clk : core clk = 16:7. \n");
++ break;
++ case 5:
++ DRM_INFO("Gfx clk : core clk = 8:3. \n");
++ break;
++ case 6:
++ DRM_INFO("Gfx clk : core clk = 16:5. \n");
++ break;
++ case 7:
++ DRM_INFO("Gfx clk : core clk = 4:1. \n");
++ break;
++ default:
++ DRM_ERROR("Invalid GFX CLK DIVIDE values, value = 0x%08x\n",
++ fuse_value_tmp);
++ }
++
++ fuse_value_tmp =
++ (fuse_value & FB_VED_CLK_DIVIDE_MASK) >> FB_VED_CLK_DIVIDE_SHIFT;
++
++ switch (fuse_value_tmp) {
++ case 0:
++ DRM_INFO("Ved clk : core clk = 1:1. \n");
++ break;
++ case 1:
++ DRM_INFO("Ved clk : core clk = 4:3. \n");
++ break;
++ case 2:
++ DRM_INFO("Ved clk : core clk = 8:5. \n");
++ break;
++ case 3:
++ DRM_INFO("Ved clk : core clk = 2:1. \n");
++ break;
++ default:
++ DRM_ERROR("Invalid VED CLK DIVIDE values, value = 0x%08x\n",
++ fuse_value_tmp);
++ }
++
++ fuse_value_tmp =
++ (fuse_value & FB_VEC_CLK_DIVIDE_MASK) >> FB_VEC_CLK_DIVIDE_SHIFT;
++
++ switch (fuse_value_tmp) {
++ case 0:
++ DRM_INFO("Vec clk : core clk = 1:1. \n");
++ break;
++ case 1:
++ DRM_INFO("Vec clk : core clk = 4:3. \n");
++ break;
++ case 2:
++ DRM_INFO("Vec clk : core clk = 8:5. \n");
++ break;
++ case 3:
++ DRM_INFO("Vec clk : core clk = 2:1. \n");
++ break;
++ default:
++ DRM_ERROR("Invalid VEC CLK DIVIDE values, value = 0x%08x\n",
++ fuse_value_tmp);
++ }
++#endif /* FIXME remove it after PO */
++
++ if (IS_MDFLD(dev)) {
++#if KSEL_BYPASS_83_100_ENABLE
++ dev_priv->ksel = KSEL_BYPASS_83_100;
++#endif /* KSEL_BYPASS_83_100_ENABLE */
++
++#if KSEL_CRYSTAL_19_ENABLED
++ dev_priv->ksel = KSEL_CRYSTAL_19;
++#endif /* KSEL_CRYSTAL_19_ENABLED */
++ }
++
++ return;
++}
++
++bool mrst_get_vbt_data(struct drm_psb_private *dev_priv)
++{
++ struct mrst_vbt *pVBT = &dev_priv->vbt_data;
++ u32 platform_config_address;
++ u8 *pVBT_virtual;
++ u8 bpi;
++ void *pGCT;
++ struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));
++
++ /*get the address of the platform config vbt, B0:D2:F0;0xFC */
++ pci_read_config_dword(pci_gfx_root, 0xFC, &platform_config_address);
++ DRM_INFO("drm platform config address is %x\n",
++ platform_config_address);
++
++ /* check for platform config address == 0. */
++ /* this means fw doesn't support vbt */
++
++ if (platform_config_address == 0) {
++ pVBT->Size = 0;
++ return false;
++ }
++
++ /* get the virtual address of the vbt */
++ pVBT_virtual = ioremap(platform_config_address, sizeof(*pVBT));
++
++ memcpy(pVBT, pVBT_virtual, sizeof(*pVBT));
++ iounmap(pVBT_virtual); /* Free virtual address space */
++
++ printk(KERN_ALERT "GCT Revision is %x\n", pVBT->Revision);
++ pVBT->mrst_gct = NULL;
++ pVBT->mrst_gct = ioremap(platform_config_address + sizeof(*pVBT) - 4,
++ pVBT->Size - sizeof(*pVBT) + 4);
++ pGCT = pVBT->mrst_gct;
++
++ switch (pVBT->Revision) {
++ case 0:
++ bpi = ((struct mrst_gct_v1 *)pGCT)->PD.BootPanelIndex;
++ dev_priv->gct_data.bpi = bpi;
++ dev_priv->gct_data.pt =
++ ((struct mrst_gct_v1 *)pGCT)->PD.PanelType;
++ memcpy(&dev_priv->gct_data.DTD,
++ &((struct mrst_gct_v1 *)pGCT)->panel[bpi].DTD,
++ sizeof(struct mrst_timing_info));
++ dev_priv->gct_data.Panel_Port_Control =
++ ((struct mrst_gct_v1 *)pGCT)->panel[bpi].Panel_Port_Control;
++ dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
++ ((struct mrst_gct_v1 *)pGCT)->panel[bpi].Panel_MIPI_Display_Descriptor;
++ break;
++ case 1:
++ bpi = ((struct mrst_gct_v2 *)pGCT)->PD.BootPanelIndex;
++ dev_priv->gct_data.bpi = bpi;
++ dev_priv->gct_data.pt =
++ ((struct mrst_gct_v2 *)pGCT)->PD.PanelType;
++ memcpy(&dev_priv->gct_data.DTD,
++ &((struct mrst_gct_v2 *)pGCT)->panel[bpi].DTD,
++ sizeof(struct mrst_timing_info));
++ dev_priv->gct_data.Panel_Port_Control =
++ ((struct mrst_gct_v2 *)pGCT)->panel[bpi].Panel_Port_Control;
++ dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
++ ((struct mrst_gct_v2 *)pGCT)->panel[bpi].Panel_MIPI_Display_Descriptor;
++ break;
++ default:
++ printk(KERN_ALERT "Unknown revision of GCT!\n");
++ pVBT->Size = 0;
++ return false;
++ }
++
++ return true;
++}
++
++static int psb_do_init(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ struct ttm_bo_device *bdev = &dev_priv->bdev;
++ struct psb_gtt *pg = dev_priv->pg;
++
++ uint32_t stolen_gtt;
++ uint32_t tt_start;
++ uint32_t tt_pages;
++
++ int ret = -ENOMEM;
++
++
++ /*
++ * Initialize sequence numbers for the different command
++ * submission mechanisms.
++ */
++
++ dev_priv->sequence[PSB_ENGINE_2D] = 0;
++ dev_priv->sequence[PSB_ENGINE_VIDEO] = 0;
++ dev_priv->sequence[LNC_ENGINE_ENCODE] = 0;
++
++ if (pg->mmu_gatt_start & 0x0FFFFFFF) {
++ DRM_ERROR("Gatt must be 256M aligned. This is a bug.\n");
++ ret = -EINVAL;
++ goto out_err;
++ }
++
++ stolen_gtt = (pg->stolen_size >> PAGE_SHIFT) * 4;
++ stolen_gtt = (stolen_gtt + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ stolen_gtt =
++ (stolen_gtt < pg->gtt_pages) ? stolen_gtt : pg->gtt_pages;
++
++ dev_priv->gatt_free_offset = pg->mmu_gatt_start +
++ (stolen_gtt << PAGE_SHIFT) * 1024;
++
++ if (1 || drm_debug) {
++ uint32_t core_id = PSB_RSGX32(PSB_CR_CORE_ID);
++ uint32_t core_rev = PSB_RSGX32(PSB_CR_CORE_REVISION);
++ DRM_INFO("SGX core id = 0x%08x\n", core_id);
++ DRM_INFO("SGX core rev major = 0x%02x, minor = 0x%02x\n",
++ (core_rev & _PSB_CC_REVISION_MAJOR_MASK) >>
++ _PSB_CC_REVISION_MAJOR_SHIFT,
++ (core_rev & _PSB_CC_REVISION_MINOR_MASK) >>
++ _PSB_CC_REVISION_MINOR_SHIFT);
++ DRM_INFO
++ ("SGX core rev maintenance = 0x%02x, designer = 0x%02x\n",
++ (core_rev & _PSB_CC_REVISION_MAINTENANCE_MASK) >>
++ _PSB_CC_REVISION_MAINTENANCE_SHIFT,
++ (core_rev & _PSB_CC_REVISION_DESIGNER_MASK) >>
++ _PSB_CC_REVISION_DESIGNER_SHIFT);
++ }
++
++ spin_lock_init(&dev_priv->irqmask_lock);
++
++ tt_pages = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ?
++ pg->gatt_pages : PSB_TT_PRIV0_PLIMIT;
++ tt_start = dev_priv->gatt_free_offset - pg->mmu_gatt_start;
++ tt_pages -= tt_start >> PAGE_SHIFT;
++ dev_priv->sizes.ta_mem_size = 0;
++
++
++ if (IS_MRST(dev) &&
++ (dev_priv->ci_region_size != 0) &&
++ !ttm_bo_init_mm(bdev, TTM_PL_CI, pg->ci_start >> PAGE_SHIFT,
++ dev_priv->ci_region_size >> PAGE_SHIFT)) {
++ dev_priv->have_camera = 1;
++ }
++
++ /* since there is always rar region for video, it is ok */
++ if (IS_MRST(dev) &&
++ (dev_priv->rar_region_size != 0) &&
++ !ttm_bo_init_mm(bdev, TTM_PL_RAR, pg->rar_start >> PAGE_SHIFT,
++ dev_priv->rar_region_size >> PAGE_SHIFT)) {
++ dev_priv->have_rar = 1;
++ }
++
++ /* TT region managed by TTM. */
++ if (!ttm_bo_init_mm(bdev, TTM_PL_TT,
++ (pg->rar_start + dev_priv->rar_region_size) >> PAGE_SHIFT,
++ pg->gatt_pages -
++ (pg->ci_start >> PAGE_SHIFT) -
++ ((dev_priv->ci_region_size + dev_priv->rar_region_size)
++ >> PAGE_SHIFT))) {
++
++ dev_priv->have_tt = 1;
++ dev_priv->sizes.tt_size =
++ (tt_pages << PAGE_SHIFT) / (1024 * 1024) / 2;
++ }
++
++ if (!ttm_bo_init_mm(bdev,
++ DRM_PSB_MEM_MMU,
++ 0x00000000,
++ (pg->gatt_start - PSB_MEM_MMU_START) >> PAGE_SHIFT)) {
++ dev_priv->have_mem_mmu = 1;
++ dev_priv->sizes.mmu_size =
++ (pg->gatt_start - PSB_MEM_MMU_START) /
++ (1024*1024);
++ }
++
++
++ PSB_DEBUG_INIT("Init MSVDX\n");
++ psb_msvdx_init(dev);
++
++ DRM_INFO(" IS_MRST(dev) %d, IS_MDFLD(dev) %d\n", IS_MRST(dev), IS_MDFLD(dev));
++ if (IS_MID(dev)) {
++ PSB_DEBUG_INIT("Init Topaz\n");
++ /* for sku100L and sku100M, VEC is disabled in fuses */
++ if (IS_MDFLD(dev))
++ pnw_topaz_init(dev);
++ else if(!dev_priv->topaz_disabled)
++ lnc_topaz_init(dev);
++ else
++ ospm_power_island_down(OSPM_VIDEO_ENC_ISLAND);
++ }
++
++ return 0;
++out_err:
++ psb_do_takedown(dev);
++ return ret;
++}
++
++static int psb_intel_opregion_init(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ /*struct psb_intel_opregion * opregion = &dev_priv->opregion;*/
++ u32 opregion_phy;
++ void *base;
++ u32 *lid_state;
++
++ dev_priv->lid_state = NULL;
++
++ pci_read_config_dword(dev->pdev, 0xfc, &opregion_phy);
++ if (opregion_phy == 0) {
++ DRM_DEBUG("Opregion not supported, won't support lid-switch\n");
++ return -ENOTSUPP;
++ }
++ DRM_DEBUG("OpRegion detected at 0x%8x\n", opregion_phy);
++
++ base = ioremap(opregion_phy, 8*1024);
++ if (!base)
++ return -ENOMEM;
++
++ lid_state = base + 0x01ac;
++
++ DRM_DEBUG("Lid switch state 0x%08x\n", *lid_state);
++
++ dev_priv->lid_state = lid_state;
++ dev_priv->lid_last_state = *lid_state;
++ return 0;
++}
++
++static int psb_driver_unload(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++
++ /*Fristly, unload pvr driver*/
++ PVRSRVDrmUnload(dev);
++
++ if (IS_MDFLD(dev)) {
++ mdfld_backlight_exit(); /*writes minimum value to backlight HW reg */
++ } else {
++ psb_backlight_exit(); /*writes minimum value to backlight HW reg */
++ }
++
++ if (drm_psb_no_fb == 0)
++ psb_modeset_cleanup(dev);
++
++ if (dev_priv) {
++ if (IS_POULSBO(dev))
++ psb_lid_timer_takedown(dev_priv);
++
++#if MDFLD_JLIU7_DSR
++ if (IS_MDFLD(dev)) {
++ if ((!dev_priv->dpi) || (!dev_priv->dpi2))
++ mdfld_dsr_timer_takedown(dev_priv);
++ }
++#endif /* MDFLD_JLIU7_DSR */
++
++ /* psb_watchdog_takedown(dev_priv); */
++ psb_do_takedown(dev);
++
++
++ if (dev_priv->pf_pd) {
++ psb_mmu_free_pagedir(dev_priv->pf_pd);
++ dev_priv->pf_pd = NULL;
++ }
++ if (dev_priv->mmu) {
++ struct psb_gtt *pg = dev_priv->pg;
++
++ down_read(&pg->sem);
++ psb_mmu_remove_pfn_sequence(
++ psb_mmu_get_default_pd
++ (dev_priv->mmu),
++ pg->mmu_gatt_start,
++ pg->vram_stolen_size >> PAGE_SHIFT);
++ if (pg->ci_stolen_size != 0)
++ psb_mmu_remove_pfn_sequence(
++ psb_mmu_get_default_pd
++ (dev_priv->mmu),
++ pg->ci_start,
++ pg->ci_stolen_size >> PAGE_SHIFT);
++ if (pg->rar_stolen_size != 0)
++ psb_mmu_remove_pfn_sequence(
++ psb_mmu_get_default_pd
++ (dev_priv->mmu),
++ pg->rar_start,
++ pg->rar_stolen_size >> PAGE_SHIFT);
++ up_read(&pg->sem);
++ psb_mmu_driver_takedown(dev_priv->mmu);
++ dev_priv->mmu = NULL;
++ }
++ psb_gtt_takedown(dev_priv->pg, 1);
++ if (dev_priv->scratch_page) {
++ __free_page(dev_priv->scratch_page);
++ dev_priv->scratch_page = NULL;
++ }
++ if (dev_priv->has_bo_device) {
++ ttm_bo_device_release(&dev_priv->bdev);
++ dev_priv->has_bo_device = 0;
++ }
++ if (dev_priv->has_fence_device) {
++ ttm_fence_device_release(&dev_priv->fdev);
++ dev_priv->has_fence_device = 0;
++ }
++ if (dev_priv->vdc_reg) {
++ iounmap(dev_priv->vdc_reg);
++ dev_priv->vdc_reg = NULL;
++ }
++ if (dev_priv->sgx_reg) {
++ iounmap(dev_priv->sgx_reg);
++ dev_priv->sgx_reg = NULL;
++ }
++ if (dev_priv->msvdx_reg) {
++ iounmap(dev_priv->msvdx_reg);
++ dev_priv->msvdx_reg = NULL;
++ }
++
++ if (IS_TOPAZ(dev)) {
++ if (dev_priv->topaz_reg) {
++ iounmap(dev_priv->topaz_reg);
++ dev_priv->topaz_reg = NULL;
++ }
++ }
++
++ if (dev_priv->tdev)
++ ttm_object_device_release(&dev_priv->tdev);
++
++ if (dev_priv->has_global)
++ psb_ttm_global_release(dev_priv);
++
++ kfree(dev_priv);
++ dev->dev_private = NULL;
++
++ /*destory VBT data*/
++ if (IS_POULSBO(dev))
++ psb_intel_destory_bios(dev);
++ }
++
++ ospm_power_uninit();
++
++ return 0;
++}
++
++
++static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
++{
++ struct drm_psb_private *dev_priv;
++ struct ttm_bo_device *bdev;
++ unsigned long resource_start;
++ struct psb_gtt *pg;
++ unsigned long irqflags;
++ int ret = -ENOMEM;
++ uint32_t tt_pages;
++
++ DRM_INFO("psb - %s\n", PSB_PACKAGE_VERSION);
++
++ if (IS_MDFLD(dev))
++ DRM_INFO("Run drivers on Medfield platform!\n");
++ else if (IS_MRST(dev))
++ DRM_INFO("Run drivers on Moorestown platform!\n");
++ else
++ DRM_INFO("Run drivers on Poulsbo platform!\n");
++
++ dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
++ if (dev_priv == NULL)
++ return -ENOMEM;
++
++ if (IS_MDFLD(dev)) {
++#if MDFLD_WLD_JLIU7
++ dev_priv->num_pipe = PSB_NUM_PIPE;
++#else
++ dev_priv->num_pipe = 3;
++#endif /* MDFLD_WLD_JLIU7 */
++ }
++ else if (IS_MRST(dev))
++ dev_priv->num_pipe = 1;
++ else
++ dev_priv->num_pipe = 2;
++
++ /*init DPST umcomm to NULL*/
++ dev_priv->psb_dpst_state = NULL;
++ dev_priv->psb_hotplug_state = NULL;
++
++ dev_priv->dev = dev;
++ bdev = &dev_priv->bdev;
++
++ ret = psb_ttm_global_init(dev_priv);
++ if (unlikely(ret != 0))
++ goto out_err;
++ dev_priv->has_global = 1;
++
++ dev_priv->tdev = ttm_object_device_init
++ (dev_priv->mem_global_ref.object, PSB_OBJECT_HASH_ORDER);
++ if (unlikely(dev_priv->tdev == NULL))
++ goto out_err;
++
++ mutex_init(&dev_priv->temp_mem);
++ mutex_init(&dev_priv->cmdbuf_mutex);
++ mutex_init(&dev_priv->reset_mutex);
++ INIT_LIST_HEAD(&dev_priv->context.validate_list);
++ INIT_LIST_HEAD(&dev_priv->context.kern_validate_list);
++
++#if MDFLD_JLIU7_DSR
++ mutex_init(&dev_priv->dsr_mutex);
++#endif /* MDFLD_JLIU7_DSR */
++
++ spin_lock_init(&dev_priv->reloc_lock);
++
++ DRM_INIT_WAITQUEUE(&dev_priv->rel_mapped_queue);
++
++ dev->dev_private = (void *) dev_priv;
++ dev_priv->chipset = chipset;
++ psb_set_uopt(&dev_priv->uopt);
++
++ PSB_DEBUG_GENERAL("Init watchdog and scheduler\n");
++ /* psb_watchdog_init(dev_priv); */
++ psb_scheduler_init(dev, &dev_priv->scheduler);
++
++
++ PSB_DEBUG_INIT("Mapping MMIO\n");
++ resource_start = pci_resource_start(dev->pdev, PSB_MMIO_RESOURCE);
++
++ if (IS_MSVDX(dev)) /* Work around for medfield by Li */
++ dev_priv->msvdx_reg =
++ ioremap(resource_start + MRST_MSVDX_OFFSET,
++ PSB_MSVDX_SIZE);
++ else
++ dev_priv->msvdx_reg =
++ ioremap(resource_start + PSB_MSVDX_OFFSET,
++ PSB_MSVDX_SIZE);
++
++ if (!dev_priv->msvdx_reg)
++ goto out_err;
++
++ if (IS_TOPAZ(dev)) {
++ if(IS_MDFLD(dev)) {
++ printk("map topazSC register space\n");
++ dev_priv->topaz_reg =
++ ioremap(resource_start + PNW_TOPAZ_OFFSET,
++ PNW_TOPAZ_SIZE);
++ }
++ else
++ dev_priv->topaz_reg =
++ ioremap(resource_start + LNC_TOPAZ_OFFSET,
++ LNC_TOPAZ_SIZE);
++ if (!dev_priv->topaz_reg)
++ goto out_err;
++ }
++
++ dev_priv->vdc_reg =
++ ioremap(resource_start + PSB_VDC_OFFSET, PSB_VDC_SIZE);
++ if (!dev_priv->vdc_reg)
++ goto out_err;
++
++ if (IS_MID(dev))
++ dev_priv->sgx_reg =
++ ioremap(resource_start + MRST_SGX_OFFSET,
++ PSB_SGX_SIZE);
++ else
++ dev_priv->sgx_reg =
++ ioremap(resource_start + PSB_SGX_OFFSET, PSB_SGX_SIZE);
++
++ if (!dev_priv->sgx_reg)
++ goto out_err;
++
++ if (IS_MID(dev)){
++ mrst_get_fuse_settings(dev);
++ mrst_get_vbt_data(dev_priv);
++ } else {
++ psb_intel_opregion_init(dev);
++ psb_intel_init_bios(dev);
++ }
++
++ PSB_DEBUG_INIT("Init TTM fence and BO driver\n");
++
++ if (IS_MRST(dev)) {
++ get_ci_info(dev_priv);
++ get_rar_info(dev_priv);
++ }
++
++ /* Init OSPM support */
++ ospm_power_init(dev);
++
++ ret = psb_ttm_fence_device_init(&dev_priv->fdev);
++ if (unlikely(ret != 0))
++ goto out_err;
++
++ dev_priv->has_fence_device = 1;
++ ret = ttm_bo_device_init(bdev,
++ dev_priv->mem_global_ref.object,
++ &psb_ttm_bo_driver,
++ DRM_PSB_FILE_PAGE_OFFSET);
++ if (unlikely(ret != 0))
++ goto out_err;
++ dev_priv->has_bo_device = 1;
++ ttm_lock_init(&dev_priv->ttm_lock);
++
++ ret = -ENOMEM;
++
++ dev_priv->scratch_page = alloc_page(GFP_DMA32 | __GFP_ZERO);
++ if (!dev_priv->scratch_page)
++ goto out_err;
++
++ set_pages_uc(dev_priv->scratch_page, 1);
++
++ dev_priv->pg = psb_gtt_alloc(dev);
++ if (!dev_priv->pg)
++ goto out_err;
++
++ ret = psb_gtt_init(dev_priv->pg, 0);
++ if (ret)
++ goto out_err;
++
++ ret = psb_gtt_mm_init(dev_priv->pg);
++ if (ret)
++ goto out_err;
++
++ dev_priv->mmu = psb_mmu_driver_init((void *)0,
++ drm_psb_trap_pagefaults, 0,
++ dev_priv);
++ if (!dev_priv->mmu)
++ goto out_err;
++
++ pg = dev_priv->pg;
++
++ tt_pages = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ?
++ (pg->gatt_pages) : PSB_TT_PRIV0_PLIMIT;
++
++ /* CI/RAR use the lower half of TT. */
++ pg->ci_start = (tt_pages / 2) << PAGE_SHIFT;
++ pg->rar_start = pg->ci_start + pg->ci_stolen_size;
++
++
++ /*
++ * Make MSVDX/TOPAZ MMU aware of the CI stolen memory area.
++ */
++ if (dev_priv->pg->ci_stolen_size != 0) {
++ down_read(&pg->sem);
++ ret = psb_mmu_insert_pfn_sequence(psb_mmu_get_default_pd
++ (dev_priv->mmu),
++ dev_priv->ci_region_start >> PAGE_SHIFT,
++ pg->mmu_gatt_start + pg->ci_start,
++ pg->ci_stolen_size >> PAGE_SHIFT, 0);
++ up_read(&pg->sem);
++ if (ret)
++ goto out_err;
++ }
++
++ /*
++ * Make MSVDX/TOPAZ MMU aware of the rar stolen memory area.
++ */
++ if (dev_priv->pg->rar_stolen_size != 0) {
++ down_read(&pg->sem);
++ ret = psb_mmu_insert_pfn_sequence(
++ psb_mmu_get_default_pd(dev_priv->mmu),
++ dev_priv->rar_region_start >> PAGE_SHIFT,
++ pg->mmu_gatt_start + pg->rar_start,
++ pg->rar_stolen_size >> PAGE_SHIFT, 0);
++ up_read(&pg->sem);
++ if (ret)
++ goto out_err;
++ }
++
++ dev_priv->pf_pd = psb_mmu_alloc_pd(dev_priv->mmu, 1, 0);
++ if (!dev_priv->pf_pd)
++ goto out_err;
++
++ psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
++ psb_mmu_set_pd_context(dev_priv->pf_pd, 1);
++
++ spin_lock_init(&dev_priv->sequence_lock);
++
++ PSB_DEBUG_INIT("Begin to init SGX/MSVDX/Topaz\n");
++
++ ret = psb_do_init(dev);
++ if (ret)
++ return ret;
++
++ /**
++ * Init lid switch timer.
++ * NOTE: must do this after psb_intel_opregion_init
++ * and psb_backlight_init
++ */
++ if (IS_POULSBO(dev) && dev_priv->lid_state)
++ psb_lid_timer_init(dev_priv);
++
++ /*initialize the MSI for MRST*/
++ if (IS_MID(dev)) {
++ if (pci_enable_msi(dev->pdev)) {
++ DRM_ERROR("Enable MSI for MRST failed!\n");
++ } else {
++ PSB_DEBUG_INIT("Enabled MSI IRQ (%d)\n",
++ dev->pdev->irq);
++ /* pci_write_config_word(pdev, 0x04, 0x07); */
++ }
++ }
++
++ ret = drm_vblank_init(dev, dev_priv->num_pipe);
++ if (ret)
++ goto out_err;
++
++ /*
++ * Install interrupt handlers prior to powering off SGX or else we will
++ * crash.
++ */
++ dev_priv->vdc_irq_mask = 0;
++ dev_priv->pipestat[0] = 0;
++ dev_priv->pipestat[1] = 0;
++ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
++ PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R);
++ PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R);
++ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
++ if (drm_core_check_feature(dev, DRIVER_MODESET))
++ drm_irq_install(dev);
++
++ dev->vblank_disable_allowed = 1;
++
++ dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
++
++ dev->driver->get_vblank_counter = psb_get_vblank_counter;
++
++#ifdef CONFIG_MDFLD_DSI_DPU
++ /*init dpu info*/
++ mdfld_dbi_dpu_init(dev);
++#else
++ mdfld_dbi_dsr_init(dev);
++#endif /*CONFIG_MDFLD_DSI_DPU*/
++
++ if (drm_psb_no_fb == 0) {
++ psb_modeset_init(dev);
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
++ psb_fbdev_init(dev);
++ drm_kms_helper_poll_init(dev);
++#else
++ drm_helper_initial_config(dev);
++#endif
++ }
++
++ if (IS_MDFLD(dev)) {
++ /*must be after mrst_get_fuse_settings() */
++ ret = mdfld_backlight_init(dev);
++ if (ret)
++ return ret;
++ } else {
++ /*must be after mrst_get_fuse_settings()*/
++ ret = psb_backlight_init(dev);
++ if (ret)
++ return ret;
++ }
++
++
++ /*Intel drm driver load is done, continue doing pvr load*/
++ DRM_DEBUG("Pvr driver load\n");
++
++ return PVRSRVDrmLoad(dev, chipset);
++out_err:
++ psb_driver_unload(dev);
++ return ret;
++}
++
++int psb_driver_device_is_agp(struct drm_device *dev)
++{
++ return 0;
++}
++
++int psb_extension_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ union drm_psb_extension_arg *arg = data;
++ struct drm_psb_extension_rep *rep = &arg->rep;
++
++ if (strcmp(arg->extension, "psb_ttm_placement_alphadrop") == 0) {
++ rep->exists = 1;
++ rep->driver_ioctl_offset = DRM_PSB_PLACEMENT_OFFSET;
++ rep->sarea_offset = 0;
++ rep->major = 1;
++ rep->minor = 0;
++ rep->pl = 0;
++ return 0;
++ }
++ if (strcmp(arg->extension, "psb_ttm_fence_alphadrop") == 0) {
++ rep->exists = 1;
++ rep->driver_ioctl_offset = DRM_PSB_FENCE_OFFSET;
++ rep->sarea_offset = 0;
++ rep->major = 1;
++ rep->minor = 0;
++ rep->pl = 0;
++ return 0;
++ }
++ if (strcmp(arg->extension, "psb_ttm_execbuf_alphadrop") == 0) {
++ rep->exists = 1;
++ rep->driver_ioctl_offset = DRM_PSB_CMDBUF;
++ rep->sarea_offset = 0;
++ rep->major = 1;
++ rep->minor = 0;
++ rep->pl = 0;
++ return 0;
++ }
++
++ /*return the page flipping ioctl offset*/
++ if (strcmp(arg->extension, "psb_page_flipping_alphadrop") == 0) {
++ rep->exists = 1;
++ rep->driver_ioctl_offset = DRM_PSB_FLIP;
++ rep->sarea_offset = 0;
++ rep->major = 1;
++ rep->minor = 0;
++ rep->pl = 0;
++ return 0;
++ }
++
++ /* return the video rar offset */
++ if (strcmp(arg->extension, "lnc_video_getparam") == 0) {
++ rep->exists = 1;
++ rep->driver_ioctl_offset = DRM_LNC_VIDEO_GETPARAM;
++ rep->sarea_offset = 0;
++ rep->major = 1;
++ rep->minor = 0;
++ rep->pl = 0;
++ return 0;
++ }
++
++ rep->exists = 0;
++ return 0;
++}
++
++static int psb_vt_leave_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ struct ttm_bo_device *bdev = &dev_priv->bdev;
++ struct ttm_mem_type_manager *man;
++ int clean;
++ int ret;
++
++ ret = ttm_write_lock(&dev_priv->ttm_lock, 1,
++ psb_fpriv(file_priv)->tfile);
++ if (unlikely(ret != 0))
++ return ret;
++
++ ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_TT);
++ if (unlikely(ret != 0))
++ goto out_unlock;
++
++ man = &bdev->man[TTM_PL_TT];
++ spin_lock(&bdev->lru_lock);
++ clean = drm_mm_clean(&man->manager);
++ spin_unlock(&bdev->lru_lock);
++ if (unlikely(!clean))
++ DRM_INFO("Warning: GATT was not clean after VT switch.\n");
++
++ ttm_bo_swapout_all(&dev_priv->bdev);
++
++ return 0;
++out_unlock:
++ (void) ttm_write_unlock(&dev_priv->ttm_lock,
++ psb_fpriv(file_priv)->tfile);
++ return ret;
++}
++
++static int psb_vt_enter_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ return ttm_write_unlock(&dev_priv->ttm_lock,
++ psb_fpriv(file_priv)->tfile);
++}
++
++static int psb_sizes_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ struct drm_psb_sizes_arg *arg =
++ (struct drm_psb_sizes_arg *) data;
++
++ *arg = dev_priv->sizes;
++ return 0;
++}
++
++static int psb_fuse_reg_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ uint32_t *arg = data;
++
++ *arg = dev_priv->fuse_reg_value;
++ return 0;
++}
++static int psb_vbt_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ struct gct_ioctl_arg *pGCT = data;
++
++ memcpy(pGCT, &dev_priv->gct_data, sizeof(*pGCT));
++
++ return 0;
++}
++
++static int psb_dc_state_ioctl(struct drm_device *dev, void * data,
++ struct drm_file *file_priv)
++{
++ uint32_t flags;
++ uint32_t obj_id;
++ struct drm_mode_object *obj;
++ struct drm_connector *connector;
++ struct drm_crtc *crtc;
++ struct drm_psb_dc_state_arg *arg =
++ (struct drm_psb_dc_state_arg *)data;
++
++ if (IS_MID(dev))
++ return 0;
++
++ flags = arg->flags;
++ obj_id = arg->obj_id;
++
++ if (flags & PSB_DC_CRTC_MASK) {
++ obj = drm_mode_object_find(dev, obj_id,
++ DRM_MODE_OBJECT_CRTC);
++ if (!obj) {
++ DRM_DEBUG("Invalid CRTC object.\n");
++ return -EINVAL;
++ }
++
++ crtc = obj_to_crtc(obj);
++
++ mutex_lock(&dev->mode_config.mutex);
++ if (drm_helper_crtc_in_use(crtc)) {
++ if (flags & PSB_DC_CRTC_SAVE)
++ crtc->funcs->save(crtc);
++ else
++ crtc->funcs->restore(crtc);
++ }
++ mutex_unlock(&dev->mode_config.mutex);
++
++ return 0;
++ } else if (flags & PSB_DC_OUTPUT_MASK) {
++ obj = drm_mode_object_find(dev, obj_id,
++ DRM_MODE_OBJECT_CONNECTOR);
++ if (!obj) {
++ DRM_DEBUG("Invalid connector id.\n");
++ return -EINVAL;
++ }
++
++ connector = obj_to_connector(obj);
++ if (flags & PSB_DC_OUTPUT_SAVE)
++ connector->funcs->save(connector);
++ else
++ connector->funcs->restore(connector);
++
++ return 0;
++ }
++
++ DRM_DEBUG("Bad flags 0x%x\n", flags);
++ return -EINVAL;
++}
++
++static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ uint32_t *arg = data;
++ struct backlight_device bd;
++ dev_priv->blc_adj2 = *arg;
++#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
++ if (IS_MDFLD(dev)) {
++ bd.props.brightness = mdfld_get_brightness(&bd);
++ mdfld_set_brightness(&bd);
++ } else {
++ bd.props.brightness = psb_get_brightness(&bd);
++ psb_set_brightness(&bd);
++ }
++#endif
++ return 0;
++}
++
++static int psb_adb_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ uint32_t *arg = data;
++ struct backlight_device bd;
++ dev_priv->blc_adj1 = *arg;
++#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
++ if (IS_MDFLD(dev)) {
++ bd.props.brightness = mdfld_get_brightness(&bd);
++ mdfld_set_brightness(&bd);
++ } else {
++ bd.props.brightness = psb_get_brightness(&bd);
++ psb_set_brightness(&bd);
++ }
++#endif
++ return 0;
++}
++
++static int psb_hist_enable_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ u32 irqCtrl = 0;
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ struct dpst_guardband guardband_reg;
++ struct dpst_ie_histogram_control ie_hist_cont_reg;
++ uint32_t *enable = data;
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON)) {
++ return 0;
++ }
++
++ if (*enable == 1) {
++ ie_hist_cont_reg.data = PSB_RVDC32(HISTOGRAM_LOGIC_CONTROL);
++ ie_hist_cont_reg.ie_pipe_assignment = 0;
++ ie_hist_cont_reg.histogram_mode_select = DPST_YUV_LUMA_MODE;
++ ie_hist_cont_reg.ie_histogram_enable = 1;
++ PSB_WVDC32(ie_hist_cont_reg.data, HISTOGRAM_LOGIC_CONTROL);
++
++ guardband_reg.data = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
++ guardband_reg.interrupt_enable = 1;
++ guardband_reg.interrupt_status = 1;
++ PSB_WVDC32(guardband_reg.data, HISTOGRAM_INT_CONTROL);
++
++ irqCtrl = PSB_RVDC32(PIPEASTAT);
++ PSB_WVDC32(irqCtrl | PIPE_DPST_EVENT_ENABLE, PIPEASTAT);
++ /* Wait for two vblanks */
++ } else {
++ guardband_reg.data = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
++ guardband_reg.interrupt_enable = 0;
++ guardband_reg.interrupt_status = 1;
++ PSB_WVDC32(guardband_reg.data, HISTOGRAM_INT_CONTROL);
++
++ ie_hist_cont_reg.data = PSB_RVDC32(HISTOGRAM_LOGIC_CONTROL);
++ ie_hist_cont_reg.ie_histogram_enable = 0;
++ PSB_WVDC32(ie_hist_cont_reg.data, HISTOGRAM_LOGIC_CONTROL);
++
++ irqCtrl = PSB_RVDC32(PIPEASTAT);
++ irqCtrl &= ~PIPE_DPST_EVENT_ENABLE;
++ PSB_WVDC32(irqCtrl, PIPEASTAT);
++ }
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++
++ return 0;
++}
++
++static int psb_hist_status_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ struct drm_psb_hist_status_arg *hist_status = data;
++ uint32_t *arg = hist_status->buf;
++ u32 iedbr_reg_data = 0;
++ struct dpst_ie_histogram_control ie_hist_cont_reg;
++ u32 i;
++ int dpst3_bin_threshold_count = 0;
++ uint32_t blm_hist_ctl = HISTOGRAM_LOGIC_CONTROL;
++ uint32_t iebdr_reg = HISTOGRAM_BIN_DATA;
++ uint32_t segvalue_max_22_bit = 0x3fffff;
++ uint32_t iedbr_busy_bit = 0x80000000;
++ int dpst3_bin_count = 32;
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON)) {
++ return 0;
++ }
++
++ ie_hist_cont_reg.data = PSB_RVDC32(blm_hist_ctl);
++ ie_hist_cont_reg.bin_reg_func_select = dpst3_bin_threshold_count;
++ ie_hist_cont_reg.bin_reg_index = 0;
++
++ PSB_WVDC32(ie_hist_cont_reg.data, blm_hist_ctl);
++
++ for (i = 0; i < dpst3_bin_count; i++) {
++ iedbr_reg_data = PSB_RVDC32(iebdr_reg);
++
++ if (!(iedbr_reg_data & iedbr_busy_bit)) {
++ arg[i] = iedbr_reg_data & segvalue_max_22_bit;
++ } else {
++ i = 0;
++ ie_hist_cont_reg.data = PSB_RVDC32(blm_hist_ctl);
++ ie_hist_cont_reg.bin_reg_index = 0;
++ PSB_WVDC32(ie_hist_cont_reg.data, blm_hist_ctl);
++ }
++ }
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++
++ return 0;
++}
++
++static int psb_init_comm_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ struct pci_dev *pdev = NULL;
++ struct device *ddev = NULL;
++ struct kobject *kobj = NULL;
++ uint32_t *arg = data;
++
++ if (*arg == 1) {
++ /*find handle to drm kboject*/
++ pdev = dev->pdev;
++ ddev = &pdev->dev;
++ kobj = &ddev->kobj;
++
++ if (dev_priv->psb_dpst_state == NULL) {
++ /*init dpst kmum comms*/
++ dev_priv->psb_dpst_state = psb_dpst_init(kobj);
++ } else {
++ printk(KERN_ALERT "DPST already initialized\n");
++ }
++
++ psb_irq_enable_dpst(dev);
++ psb_dpst_notify_change_um(DPST_EVENT_INIT_COMPLETE,
++ dev_priv->psb_dpst_state);
++ } else {
++ /*hotplug and dpst destroy examples*/
++ psb_irq_disable_dpst(dev);
++ psb_dpst_notify_change_um(DPST_EVENT_TERMINATE,
++ dev_priv->psb_dpst_state);
++ psb_dpst_device_pool_destroy(dev_priv->psb_dpst_state);
++ dev_priv->psb_dpst_state = NULL;
++ }
++ return 0;
++}
++
++/* return the current mode to the dpst module */
++static int psb_dpst_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ uint32_t *arg = data;
++ uint32_t x;
++ uint32_t y;
++ uint32_t reg;
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON)) {
++ return 0;
++ }
++
++ reg = PSB_RVDC32(PIPEASRC);
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++
++ /* horizontal is the left 16 bits */
++ x = reg >> 16;
++ /* vertical is the right 16 bits */
++ y = reg & 0x0000ffff;
++
++ /* the values are the image size minus one */
++ x+=1;
++ y+=1;
++
++ *arg = (x << 16) | y;
++
++ return 0;
++}
++static int psb_gamma_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_dpst_lut_arg *lut_arg = data;
++ struct drm_mode_object *obj;
++ struct drm_crtc *crtc;
++ struct drm_connector *connector;
++ struct psb_intel_crtc *psb_intel_crtc;
++ int i = 0;
++ int32_t obj_id;
++
++ obj_id = lut_arg->output_id;
++ obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_CONNECTOR);
++ if (!obj) {
++ DRM_DEBUG("Invalid Connector object.\n");
++ return -EINVAL;
++ }
++
++ connector = obj_to_connector(obj);
++ crtc = connector->encoder->crtc;
++ psb_intel_crtc = to_psb_intel_crtc(crtc);
++
++ for (i = 0; i < 256; i++)
++ psb_intel_crtc->lut_adj[i] = lut_arg->lut[i];
++
++ psb_intel_crtc_load_lut(crtc);
++
++ return 0;
++}
++
++static int psb_update_guard_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ struct dpst_guardband* input = (struct dpst_guardband*) data;
++ struct dpst_guardband reg_data;
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON)) {
++ return 0;
++ }
++
++ reg_data.data = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
++ reg_data.guardband = input->guardband;
++ reg_data.guardband_interrupt_delay = input->guardband_interrupt_delay;
++ /* printk(KERN_ALERT "guardband = %u\ninterrupt delay = %u\n",
++ reg_data.guardband, reg_data.guardband_interrupt_delay); */
++ PSB_WVDC32(reg_data.data, HISTOGRAM_INT_CONTROL);
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++
++ return 0;
++}
++
++static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ uint32_t obj_id;
++ uint16_t op;
++ struct drm_mode_modeinfo *umode;
++ struct drm_display_mode *mode = NULL;
++ struct drm_psb_mode_operation_arg *arg;
++ struct drm_mode_object *obj;
++ struct drm_connector *connector;
++ struct drm_framebuffer * drm_fb;
++ struct psb_framebuffer * psb_fb;
++ struct drm_connector_helper_funcs *connector_funcs;
++ int ret = 0;
++ int resp = MODE_OK;
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++
++ arg = (struct drm_psb_mode_operation_arg *)data;
++ obj_id = arg->obj_id;
++ op = arg->operation;
++
++ switch(op) {
++ case PSB_MODE_OPERATION_SET_DC_BASE:
++ obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_FB);
++ if(!obj) {
++ DRM_ERROR("Invalid FB id %d\n", obj_id);
++ return -EINVAL;
++ }
++
++ drm_fb = obj_to_fb(obj);
++ psb_fb = to_psb_fb(drm_fb);
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_ONLY_IF_ON)) {
++ REG_WRITE(DSPASURF, psb_fb->offset);
++ REG_READ(DSPASURF);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ } else {
++ dev_priv->saveDSPASURF = psb_fb->offset;
++ }
++
++ return 0;
++ case PSB_MODE_OPERATION_MODE_VALID:
++ umode = &arg->mode;
++
++ mutex_lock(&dev->mode_config.mutex);
++
++ obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_CONNECTOR);
++ if (!obj) {
++ ret = -EINVAL;
++ goto mode_op_out;
++ }
++
++ connector = obj_to_connector(obj);
++
++ mode = drm_mode_create(dev);
++ if (!mode) {
++ ret = -ENOMEM;
++ goto mode_op_out;
++ }
++
++ /* drm_crtc_convert_umode(mode, umode); */
++ {
++ mode->clock = umode->clock;
++ mode->hdisplay = umode->hdisplay;
++ mode->hsync_start = umode->hsync_start;
++ mode->hsync_end = umode->hsync_end;
++ mode->htotal = umode->htotal;
++ mode->hskew = umode->hskew;
++ mode->vdisplay = umode->vdisplay;
++ mode->vsync_start = umode->vsync_start;
++ mode->vsync_end = umode->vsync_end;
++ mode->vtotal = umode->vtotal;
++ mode->vscan = umode->vscan;
++ mode->vrefresh = umode->vrefresh;
++ mode->flags = umode->flags;
++ mode->type = umode->type;
++ strncpy(mode->name, umode->name, DRM_DISPLAY_MODE_LEN);
++ mode->name[DRM_DISPLAY_MODE_LEN-1] = 0;
++ }
++
++ connector_funcs = (struct drm_connector_helper_funcs *)
++ connector->helper_private;
++
++ if (connector_funcs->mode_valid) {
++ resp = connector_funcs->mode_valid(connector, mode);
++ arg->data = (void *)resp;
++ }
++
++ /*do some clean up work*/
++ if(mode) {
++ drm_mode_destroy(dev, mode);
++ }
++mode_op_out:
++ mutex_unlock(&dev->mode_config.mutex);
++ return ret;
++
++ default:
++ DRM_DEBUG("Unsupported psb mode operation");
++ return -EOPNOTSUPP;
++ }
++
++ return 0;
++}
++
++static int psb_stolen_memory_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ struct drm_psb_stolen_memory_arg *arg = data;
++
++ arg->base = dev_priv->pg->stolen_base;
++ arg->size = dev_priv->pg->vram_stolen_size;
++
++ return 0;
++}
++
++#if 0 /*#ifdef MDFLD_HDCP*/
++static int psb_hdcp_i2c_access_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ struct drm_psb_hdcp_i2c_arg *arg = data;
++ int ret = 0;
++
++ if(arg->i2c_read)
++ ret = dev_priv->hdmi_i2c_bus->read_data(dev_priv->hdmi_i2c_adapter,
++ arg->slave_address, arg->data, arg->size);
++ else
++ ret = dev_priv->hdmi_i2c_bus->write_data(dev_priv->hdmi_i2c_adapter,
++ arg->slave_address, arg->data, arg->size);
++
++ return ret;
++}
++#endif
++
++#if MDFLD_JLIU7_DSR
++static int psb_dpu_query_ioctl(struct drm_device *dev, void *arg,
++ struct drm_file *file_priv)
++{
++ IMG_INT *data = (IMG_INT*)arg;
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++
++ DRM_INFO("dsr query. \n");
++
++ dev_priv->b_dsr_enable = true;
++#if MDFLD_JLIU7_DPU
++ dev_priv->damage_rect_2d_3d.x = 0;
++ dev_priv->damage_rect_2d_3d.y = 0;
++ dev_priv->damage_rect_2d_3d.width = 864;
++ dev_priv->damage_rect_2d_3d.height = 480;
++ dev_priv->b_dpu_enable = true;
++#endif /* MDFLD_JLIU7_DPU */
++ /*DEADBEEF if disabled else F*/
++ *data = MDFLD_DSR_RR | MDFLD_DPU_ENABLE;
++
++ return 0;
++}
++
++static int psb_dpu_dsr_on_ioctl(struct drm_device *dev, void *arg,
++ struct drm_file *file_priv)
++{
++ /*todo: kick in DSR */
++ DRM_INFO("dsr kick in\n");
++ return 0;
++}
++
++#if MDFLD_JLIU7_DPU
++void psb_dpu_combine_rect (struct psb_drm_dpu_rect *d_r_1, struct psb_drm_dpu_rect *d_r_2, struct psb_drm_dpu_rect *d_r_result)
++{
++ u32 x_new, y_new;
++
++ d_r_result->x = d_r_1->x < d_r_2->x ? d_r_1->x : d_r_2->x;
++ d_r_result->y = d_r_1->y < d_r_2->y ? d_r_1->y : d_r_2->y;
++
++ x_new = ((d_r_1->x + d_r_1->width) > (d_r_2->x + d_r_2->width) ? (d_r_1->x + d_r_1->width) : (d_r_2->x + d_r_2->width));
++ y_new = ((d_r_1->y + d_r_1->height) > (d_r_2->y + d_r_2->height) ? (d_r_1->y + d_r_1->height) : (d_r_2->y + d_r_2->height));
++
++ d_r_result->width = x_new - d_r_result->x;
++ d_r_result->height = y_new - d_r_result->y;
++}
++#endif /* MDFLD_JLIU7_DPU */
++
++#ifdef CONFIG_MDFLD_DSI_DPU
++static int psb_dpu_dsr_off_ioctl(struct drm_device *dev, void *arg,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_drv_dsr_off_arg *dsr_off_arg = (struct drm_psb_drv_dsr_off_arg *) arg;
++ struct psb_drm_dpu_rect rect = dsr_off_arg->damage_rect;
++
++ return mdfld_dsi_dbi_dsr_off(dev, &rect);
++}
++
++#else /*CONFIG_MDFLD_DSI_DPU*/
++
++#if MDFLD_JLIU7_DPU_2
++static int psb_dpu_dsr_off_ioctl(struct drm_device *dev, void *arg,
++ struct drm_file *file_priv)
++{
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++
++#if MDFLD_JLIU7_DPU
++ struct drm_psb_drv_dsr_off_arg *dsr_off_arg = (struct drm_psb_drv_dsr_off_arg *) arg;
++ struct psb_drm_dpu_rect d_r_new = dsr_off_arg->damage_rect;
++ struct psb_drm_dpu_rect d_r_old = dev_priv->damage_rect_2d_3d;
++
++// DRM_INFO("exit from DSR, x = %d, y = %d, width = %d, height = %d. \n", d_r_new.x, d_r_new.y, d_r_new.width, d_r_new.height);
++
++ if (dev_priv->b_dpu_enable)
++ psb_dpu_combine_rect (&d_r_new, &d_r_old, &dev_priv->damage_rect_2d_3d);
++#endif /* MDFLD_JLIU7_DPU */
++
++ if ((dev_priv->dsr_fb_update & MDFLD_DSR_2D_3D) != MDFLD_DSR_2D_3D)
++ {
++ mdfld_dsi_dbi_exit_dsr (dev, MDFLD_DSR_2D_3D);
++ }
++
++// DRM_INFO("exit from DSR\n");
++ return 0;
++}
++#else /* MDFLD_JLIU7_DPU_2 */
++static int psb_dpu_dsr_off_ioctl(struct drm_device *dev, void *arg,
++ struct drm_file *file_priv)
++{
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++
++#if MDFLD_JLIU7_DPU
++ struct drm_psb_drv_dsr_off_arg *dsr_off_arg = (struct drm_psb_drv_dsr_off_arg *) arg;
++ struct psb_drm_dpu_rect d_r_new = dsr_off_arg->damage_rect;
++ struct psb_drm_dpu_rect d_r_old = dev_priv->damage_rect_2d_3d;
++
++// DRM_INFO("exit from DSR, x = %d, y = %d, width = %d, height = %d. \n", d_r_new.x, d_r_new.y, d_r_new.width, d_r_new.height);
++
++ if (dev_priv->b_dpu_enable)
++ psb_dpu_combine_rect (&d_r_new, &d_r_old, &dev_priv->damage_rect_2d_3d);
++#endif /* MDFLD_JLIU7_DPU */
++
++ if ((dev_priv->dsr_fb_update & MDFLD_DSR_2D_3D) != MDFLD_DSR_2D_3D)
++ {
++ mdfld_dsi_dbi_exit_dsr (dev, MDFLD_DSR_2D_3D);
++ }
++
++// DRM_INFO("exit from DSR\n");
++ return 0;
++}
++#endif /* MDFLD_JLIU7_DPU_2 */
++#endif
++#endif /*CONFIG_MDFLD_DSI_DPU*/
++
++static int psb_register_rw_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ struct drm_psb_register_rw_arg *arg = data;
++ UHBUsage usage =
++ arg->b_force_hw_on ? OSPM_UHB_FORCE_POWER_ON : OSPM_UHB_ONLY_IF_ON;
++
++ if (arg->display_write_mask != 0) {
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, usage)) {
++ if (arg->display_write_mask & REGRWBITS_PFIT_CONTROLS)
++ PSB_WVDC32(arg->display.pfit_controls,
++ PFIT_CONTROL);
++ if (arg->display_write_mask &
++ REGRWBITS_PFIT_AUTOSCALE_RATIOS)
++ PSB_WVDC32(arg->display.pfit_autoscale_ratios,
++ PFIT_AUTO_RATIOS);
++ if (arg->display_write_mask &
++ REGRWBITS_PFIT_PROGRAMMED_SCALE_RATIOS)
++ PSB_WVDC32(
++ arg->display.pfit_programmed_scale_ratios,
++ PFIT_PGM_RATIOS);
++ if (arg->display_write_mask & REGRWBITS_PIPEASRC)
++ PSB_WVDC32(arg->display.pipeasrc,
++ PIPEASRC);
++ if (arg->display_write_mask & REGRWBITS_PIPEBSRC)
++ PSB_WVDC32(arg->display.pipebsrc,
++ PIPEBSRC);
++ if (arg->display_write_mask & REGRWBITS_VTOTAL_A)
++ PSB_WVDC32(arg->display.vtotal_a,
++ VTOTAL_A);
++ if (arg->display_write_mask & REGRWBITS_VTOTAL_B)
++ PSB_WVDC32(arg->display.vtotal_b,
++ VTOTAL_B);
++#if 0 /*#ifdef MDFLD_HDCP*/
++ if (arg->display_write_mask & REGRWBITS_HDCP)
++ PSB_WVDC32(arg->display.hdcp_value,
++ arg->display.hdcp_reg);
++#endif
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ } else {
++ if (arg->display_write_mask & REGRWBITS_PFIT_CONTROLS)
++ dev_priv->savePFIT_CONTROL =
++ arg->display.pfit_controls;
++ if (arg->display_write_mask &
++ REGRWBITS_PFIT_AUTOSCALE_RATIOS)
++ dev_priv->savePFIT_AUTO_RATIOS =
++ arg->display.pfit_autoscale_ratios;
++ if (arg->display_write_mask &
++ REGRWBITS_PFIT_PROGRAMMED_SCALE_RATIOS)
++ dev_priv->savePFIT_PGM_RATIOS =
++ arg->display.pfit_programmed_scale_ratios;
++ if (arg->display_write_mask & REGRWBITS_PIPEASRC)
++ dev_priv->savePIPEASRC = arg->display.pipeasrc;
++ if (arg->display_write_mask & REGRWBITS_PIPEBSRC)
++ dev_priv->savePIPEBSRC = arg->display.pipebsrc;
++ if (arg->display_write_mask & REGRWBITS_VTOTAL_A)
++ dev_priv->saveVTOTAL_A = arg->display.vtotal_a;
++ if (arg->display_write_mask & REGRWBITS_VTOTAL_B)
++ dev_priv->saveVTOTAL_B = arg->display.vtotal_b;
++ }
++ }
++
++ if (arg->display_read_mask != 0) {
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, usage)) {
++ if (arg->display_read_mask &
++ REGRWBITS_PFIT_CONTROLS)
++ arg->display.pfit_controls =
++ PSB_RVDC32(PFIT_CONTROL);
++ if (arg->display_read_mask &
++ REGRWBITS_PFIT_AUTOSCALE_RATIOS)
++ arg->display.pfit_autoscale_ratios =
++ PSB_RVDC32(PFIT_AUTO_RATIOS);
++ if (arg->display_read_mask &
++ REGRWBITS_PFIT_PROGRAMMED_SCALE_RATIOS)
++ arg->display.pfit_programmed_scale_ratios =
++ PSB_RVDC32(PFIT_PGM_RATIOS);
++ if (arg->display_read_mask & REGRWBITS_PIPEASRC)
++ arg->display.pipeasrc = PSB_RVDC32(PIPEASRC);
++ if (arg->display_read_mask & REGRWBITS_PIPEBSRC)
++ arg->display.pipebsrc = PSB_RVDC32(PIPEBSRC);
++ if (arg->display_read_mask & REGRWBITS_VTOTAL_A)
++ arg->display.vtotal_a = PSB_RVDC32(VTOTAL_A);
++ if (arg->display_read_mask & REGRWBITS_VTOTAL_B)
++ arg->display.vtotal_b = PSB_RVDC32(VTOTAL_B);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++#if 0 /*#ifdef MDFLD_HDCP*/
++ if (arg->display_read_mask & REGRWBITS_HDCP)
++ arg->display.hdcp_value = PSB_RVDC32(arg->display.hdcp_reg);
++#endif
++
++ } else {
++ if (arg->display_read_mask &
++ REGRWBITS_PFIT_CONTROLS)
++ arg->display.pfit_controls =
++ dev_priv->savePFIT_CONTROL;
++ if (arg->display_read_mask &
++ REGRWBITS_PFIT_AUTOSCALE_RATIOS)
++ arg->display.pfit_autoscale_ratios =
++ dev_priv->savePFIT_AUTO_RATIOS;
++ if (arg->display_read_mask &
++ REGRWBITS_PFIT_PROGRAMMED_SCALE_RATIOS)
++ arg->display.pfit_programmed_scale_ratios =
++ dev_priv->savePFIT_PGM_RATIOS;
++ if (arg->display_read_mask & REGRWBITS_PIPEASRC)
++ arg->display.pipeasrc = dev_priv->savePIPEASRC;
++ if (arg->display_read_mask & REGRWBITS_PIPEBSRC)
++ arg->display.pipebsrc = dev_priv->savePIPEBSRC;
++ if (arg->display_read_mask & REGRWBITS_VTOTAL_A)
++ arg->display.vtotal_a = dev_priv->saveVTOTAL_A;
++ if (arg->display_read_mask & REGRWBITS_VTOTAL_B)
++ arg->display.vtotal_b = dev_priv->saveVTOTAL_B;
++ }
++ }
++
++ if (arg->overlay_write_mask != 0) {
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, usage)) {
++ if (arg->overlay_write_mask & OV_REGRWBITS_OGAM_ALL) {
++ PSB_WVDC32(arg->overlay.OGAMC5, OV_OGAMC5);
++ PSB_WVDC32(arg->overlay.OGAMC4, OV_OGAMC4);
++ PSB_WVDC32(arg->overlay.OGAMC3, OV_OGAMC3);
++ PSB_WVDC32(arg->overlay.OGAMC2, OV_OGAMC2);
++ PSB_WVDC32(arg->overlay.OGAMC1, OV_OGAMC1);
++ PSB_WVDC32(arg->overlay.OGAMC0, OV_OGAMC0);
++ }
++
++ if (arg->overlay_write_mask & OV_REGRWBITS_OVADD)
++ {
++ PSB_WVDC32(arg->overlay.OVADD, OV_OVADD);
++#if MDFLD_JLIU7_DSR
++ if ((((arg->overlay.OVADD & OV_PIPE_SELECT) >> OV_PIPE_SELECT_POS) == OV_PIPE_A) && (!(dev_priv->dsr_fb_update & MDFLD_DSR_OVERLAY_0))) {
++#ifndef CONFIG_MDFLD_DSI_DPU
++ mdfld_dsi_dbi_exit_dsr (dev, MDFLD_DSR_OVERLAY_0);
++#else
++ /*TODO: report overlay damage*/
++#endif
++ }
++
++ if ((((arg->overlay.OVADD & OV_PIPE_SELECT) >> OV_PIPE_SELECT_POS) == OV_PIPE_C) && (!(dev_priv->dsr_fb_update & MDFLD_DSR_OVERLAY_2))) {
++#ifndef CONFIG_MDFLD_DSI_DPU
++ mdfld_dsi_dbi_exit_dsr (dev, MDFLD_DSR_OVERLAY_2);
++#else
++ /*TODO: report overlay damage*/
++#endif
++ }
++#endif /* MDFLD_JLIU7_DSR */
++ }
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ } else {
++ if (arg->overlay_write_mask & OV_REGRWBITS_OGAM_ALL) {
++ dev_priv->saveOV_OGAMC5 = arg->overlay.OGAMC5;
++ dev_priv->saveOV_OGAMC4 = arg->overlay.OGAMC4;
++ dev_priv->saveOV_OGAMC3 = arg->overlay.OGAMC3;
++ dev_priv->saveOV_OGAMC2 = arg->overlay.OGAMC2;
++ dev_priv->saveOV_OGAMC1 = arg->overlay.OGAMC1;
++ dev_priv->saveOV_OGAMC0 = arg->overlay.OGAMC0;
++ }
++ if (arg->overlay_write_mask & OV_REGRWBITS_OVADD)
++ dev_priv->saveOV_OVADD = arg->overlay.OVADD;
++ }
++ }
++
++ if (arg->overlay_read_mask != 0) {
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, usage)) {
++ if (arg->overlay_read_mask & OV_REGRWBITS_OGAM_ALL) {
++ arg->overlay.OGAMC5 = PSB_RVDC32(OV_OGAMC5);
++ arg->overlay.OGAMC4 = PSB_RVDC32(OV_OGAMC4);
++ arg->overlay.OGAMC3 = PSB_RVDC32(OV_OGAMC3);
++ arg->overlay.OGAMC2 = PSB_RVDC32(OV_OGAMC2);
++ arg->overlay.OGAMC1 = PSB_RVDC32(OV_OGAMC1);
++ arg->overlay.OGAMC0 = PSB_RVDC32(OV_OGAMC0);
++ }
++ if (arg->overlay_read_mask & OV_REGRWBITS_OVADD)
++ arg->overlay.OVADD = PSB_RVDC32(OV_OVADD);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ } else {
++ if (arg->overlay_read_mask & OV_REGRWBITS_OGAM_ALL) {
++ arg->overlay.OGAMC5 = dev_priv->saveOV_OGAMC5;
++ arg->overlay.OGAMC4 = dev_priv->saveOV_OGAMC4;
++ arg->overlay.OGAMC3 = dev_priv->saveOV_OGAMC3;
++ arg->overlay.OGAMC2 = dev_priv->saveOV_OGAMC2;
++ arg->overlay.OGAMC1 = dev_priv->saveOV_OGAMC1;
++ arg->overlay.OGAMC0 = dev_priv->saveOV_OGAMC0;
++ }
++ if (arg->overlay_read_mask & OV_REGRWBITS_OVADD)
++ arg->overlay.OVADD = dev_priv->saveOV_OVADD;
++ }
++ }
++
++ if (arg->sprite_enable_mask != 0) {
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, usage)) {
++ PSB_WVDC32(0x1F3E, DSPARB);
++ PSB_WVDC32(arg->sprite.dspa_control | PSB_RVDC32(DSPACNTR), DSPACNTR);
++ PSB_WVDC32(arg->sprite.dspa_key_value, DSPAKEYVAL);
++ PSB_WVDC32(arg->sprite.dspa_key_mask, DSPAKEYMASK);
++ PSB_WVDC32(PSB_RVDC32(DSPASURF), DSPASURF);
++ PSB_RVDC32(DSPASURF);
++ PSB_WVDC32(arg->sprite.dspc_control, DSPCCNTR);
++ PSB_WVDC32(arg->sprite.dspc_stride, DSPCSTRIDE);
++ PSB_WVDC32(arg->sprite.dspc_position, DSPCPOS);
++ PSB_WVDC32(arg->sprite.dspc_linear_offset, DSPCLINOFF);
++ PSB_WVDC32(arg->sprite.dspc_size, DSPCSIZE);
++ PSB_WVDC32(arg->sprite.dspc_surface, DSPCSURF);
++ PSB_RVDC32(DSPCSURF);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++ }
++
++ if (arg->sprite_disable_mask != 0) {
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, usage)) {
++ PSB_WVDC32(0x3F3E, DSPARB);
++ PSB_WVDC32(0x0, DSPCCNTR);
++ PSB_WVDC32(arg->sprite.dspc_surface, DSPCSURF);
++ PSB_RVDC32(DSPCSURF);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++ }
++
++
++ return 0;
++}
++
++/* always available as we are SIGIO'd */
++static unsigned int psb_poll(struct file *filp,
++ struct poll_table_struct *wait)
++{
++ return POLLIN | POLLRDNORM;
++}
++
++static int psb_driver_open(struct drm_device *dev, struct drm_file *priv)
++{
++ DRM_DEBUG("\n");
++ return PVRSRVOpen(dev, priv);
++}
++
++static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd,
++ unsigned long arg)
++{
++ struct drm_file *file_priv = filp->private_data;
++ struct drm_device *dev = file_priv->minor->dev;
++ unsigned int nr = DRM_IOCTL_NR(cmd);
++ long ret;
++
++ DRM_DEBUG("cmd = %x, nr = %x\n", cmd, nr);
++
++ /*
++ * The driver private ioctls and TTM ioctls should be
++ * thread-safe.
++ */
++
++ if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
++ && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
++ struct drm_ioctl_desc *ioctl =
++ &psb_ioctls[nr - DRM_COMMAND_BASE];
++
++ if (unlikely(ioctl->cmd != cmd)) {
++ DRM_ERROR(
++ "Invalid drm cmnd %d ioctl->cmd %x, cmd %x\n",
++ nr - DRM_COMMAND_BASE, ioctl->cmd, cmd);
++ return -EINVAL;
++ }
++
++ return drm_ioctl(filp, cmd, arg);
++ }
++ /*
++ * Not all old drm ioctls are thread-safe.
++ */
++
++ lock_kernel();
++ ret = drm_ioctl(filp, cmd, arg);
++ unlock_kernel();
++ return ret;
++}
++
++static int psb_blc_read(char *buf, char **start, off_t offset, int request,
++ int *eof, void *data)
++{
++ struct drm_minor *minor = (struct drm_minor *) data;
++ struct drm_device *dev = minor->dev;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ struct backlight_device bd;
++ int user_brightness = 0;
++ int final_brightness = 0;
++ int len = 0;
++
++ *start = &buf[offset];
++ *eof = 0;
++
++ if (IS_MDFLD(dev))
++ user_brightness = mdfld_get_brightness(&bd);
++ else
++ user_brightness = psb_get_brightness(&bd);
++
++ final_brightness = (user_brightness * dev_priv->blc_adj1) / 100;
++ final_brightness = (final_brightness * dev_priv->blc_adj2) / 100;
++
++ DRM_INFO("%i\n", final_brightness);
++
++ if (len > request + offset)
++ return request;
++ *eof = 1;
++ return len - offset;
++}
++
++static int psb_ospm_read(char *buf, char **start, off_t offset, int request,
++ int *eof, void *data)
++{
++ struct drm_minor *minor = (struct drm_minor *) data;
++ struct drm_device *dev = minor->dev;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ int len = 0;
++#ifdef OSPM_STAT
++ unsigned long on_time = 0;
++ unsigned long off_time = 0;
++#endif
++
++ *start = &buf[offset];
++ *eof = 0;
++
++#ifdef SUPPORT_ACTIVE_POWER_MANAGEMENT
++ DRM_INFO("GFX D0i3: enabled ");
++#else
++ DRM_INFO("GFX D0i3: disabled ");
++#endif
++
++#ifdef OSPM_STAT
++ switch (dev_priv->graphics_state) {
++ case PSB_PWR_STATE_ON:
++ DRM_INFO("GFX state:%s\n", "on");
++ break;
++ case PSB_PWR_STATE_OFF:
++ DRM_INFO("GFX state:%s\n", "off");
++ break;
++ default:
++ DRM_INFO("GFX state:%s\n", "unknown");
++ }
++
++ on_time = dev_priv->gfx_on_time * 1000 / HZ;
++ off_time = dev_priv->gfx_off_time * 1000 / HZ;
++ switch (dev_priv->graphics_state) {
++ case PSB_PWR_STATE_ON:
++ on_time += (jiffies - dev_priv->gfx_last_mode_change) * \
++ 1000 / HZ;
++ break;
++ case PSB_PWR_STATE_OFF:
++ off_time += (jiffies - dev_priv->gfx_last_mode_change) * \
++ 1000 / HZ;
++ break;
++ }
++ DRM_INFO("GFX(count/ms):\n");
++ DRM_INFO("on:%lu/%lu, off:%lu/%lu \n",
++ dev_priv->gfx_on_cnt, on_time, dev_priv->gfx_off_cnt, off_time);
++#endif
++ if (len > request + offset)
++ return request;
++ *eof = 1;
++ return len - offset;
++}
++
++/* When a client dies:
++ * - Check for and clean up flipped page state
++ */
++void psb_driver_preclose(struct drm_device *dev, struct drm_file *priv)
++{
++}
++
++static void psb_remove(struct pci_dev *pdev)
++{
++ struct drm_device *dev = pci_get_drvdata(pdev);
++ drm_put_dev(dev);
++}
++
++static int psb_proc_init(struct drm_minor *minor)
++{
++ struct proc_dir_entry *ent;
++ struct proc_dir_entry *ent1;
++ ent = create_proc_read_entry(OSPM_PROC_ENTRY, 0, minor->proc_root,
++ psb_ospm_read, minor);
++ ent1 = create_proc_read_entry(BLC_PROC_ENTRY, 0, minor->proc_root,
++ psb_blc_read, minor);
++
++ if (!ent || !ent1)
++ return -1;
++
++ return 0;
++}
++
++static void psb_proc_cleanup(struct drm_minor *minor)
++{
++ remove_proc_entry(OSPM_PROC_ENTRY, minor->proc_root);
++ remove_proc_entry(BLC_PROC_ENTRY, minor->proc_root);
++ return;
++}
++
++static struct drm_driver driver = {
++ .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | \
++ DRIVER_IRQ_VBL | DRIVER_MODESET,
++ .load = psb_driver_load,
++ .unload = psb_driver_unload,
++
++ .ioctls = psb_ioctls,
++ .device_is_agp = psb_driver_device_is_agp,
++ .irq_preinstall = psb_irq_preinstall,
++ .irq_postinstall = psb_irq_postinstall,
++ .irq_uninstall = psb_irq_uninstall,
++ .irq_handler = psb_irq_handler,
++ .enable_vblank = psb_enable_vblank,
++ .disable_vblank = psb_disable_vblank,
++ .get_vblank_counter = psb_get_vblank_counter,
++ .firstopen = NULL,
++ .lastclose = psb_lastclose,
++ .open = psb_driver_open,
++ .postclose = PVRSRVDrmPostClose,
++ .suspend = PVRSRVDriverSuspend,
++ .resume = PVRSRVDriverResume,
++ .get_map_ofs = drm_core_get_map_ofs,
++ .get_reg_ofs = drm_core_get_reg_ofs,
++ .proc_init = psb_proc_init,
++ .proc_cleanup = psb_proc_cleanup,
++ .preclose = psb_driver_preclose,
++ .fops = {
++ .owner = THIS_MODULE,
++ .open = psb_open,
++ .release = psb_release,
++ .unlocked_ioctl = psb_unlocked_ioctl,
++ .mmap = psb_mmap,
++ .poll = psb_poll,
++ .fasync = drm_fasync,
++ .read = drm_read,
++ },
++ .pci_driver = {
++ .name = DRIVER_NAME,
++ .id_table = pciidlist,
++ .resume = ospm_power_resume,
++ .suspend = ospm_power_suspend,
++ .probe = psb_probe,
++ .remove = psb_remove,
++ },
++ .name = DRIVER_NAME,
++ .desc = DRIVER_DESC,
++ .date = PSB_DRM_DRIVER_DATE,
++ .major = PSB_DRM_DRIVER_MAJOR,
++ .minor = PSB_DRM_DRIVER_MINOR,
++ .patchlevel = PSB_DRM_DRIVER_PATCHLEVEL
++};
++
++static int psb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
++{
++ return drm_get_dev(pdev, ent, &driver);
++}
++
++static int __init psb_init(void)
++{
++ int ret;
++
++ driver.num_ioctls = psb_max_ioctl;
++
++#if defined(MODULE) && defined(CONFIG_NET)
++ psb_kobject_uevent_init();
++#endif
++
++ ret = SYSPVRInit();
++ if (ret != 0)
++ {
++ return ret;
++ }
++
++ ret = drm_init(&driver);
++
++ return ret;
++}
++
++static void __exit psb_exit(void)
++{
++ drm_exit(&driver);
++}
++
++late_initcall(psb_init);
++module_exit(psb_exit);
++
++MODULE_AUTHOR(DRIVER_AUTHOR);
++MODULE_DESCRIPTION(DRIVER_DESC);
++MODULE_LICENSE("GPL");
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_drv.h
+@@ -0,0 +1,1322 @@
++/**************************************************************************
++ * Copyright (c) 2007-2008, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++#ifndef _PSB_DRV_H_
++#define _PSB_DRV_H_
++
++#include <linux/version.h>
++
++#include <drm/drmP.h>
++#include "drm_global.h"
++#include "sys_pvr_drm_export.h"
++#include "psb_drm.h"
++#include "psb_reg.h"
++#include "psb_schedule.h"
++#include "psb_intel_drv.h"
++#include "psb_hotplug.h"
++#include "psb_dpst.h"
++#include "psb_gtt.h"
++#include "psb_powermgmt.h"
++#include "ttm/ttm_object.h"
++#include "ttm/ttm_fence_driver.h"
++#include "ttm/ttm_bo_driver.h"
++#include "ttm/ttm_lock.h"
++
++/*IMG headers*/
++#include "private_data.h"
++#include "pvr_drm.h"
++
++/*Append new drm mode definition here, align with libdrm definition*/
++#define DRM_MODE_SCALE_NO_SCALE 2
++
++extern struct ttm_bo_driver psb_ttm_bo_driver;
++
++enum {
++ CHIP_PSB_8108 = 0,
++ CHIP_PSB_8109 = 1,
++ CHIP_MRST_4100 = 2,
++ CHIP_MDFLD_0130 = 3
++};
++
++
++#define PCI_ID_TOPAZ_DISABLED 0x4101
++
++/*
++ *Hardware bugfixes
++ */
++
++#define FIX_TG_16
++#define FIX_TG_2D_CLOCKGATE
++#define OSPM_STAT
++
++#define DRIVER_NAME "pvrsrvkm"
++#define DRIVER_DESC "drm driver for the Intel GMA500"
++#define DRIVER_AUTHOR "Intel Corporation"
++#define OSPM_PROC_ENTRY "ospm"
++#define BLC_PROC_ENTRY "mrst_blc"
++
++#define PSB_DRM_DRIVER_DATE "2009-03-10"
++#define PSB_DRM_DRIVER_MAJOR 8
++#define PSB_DRM_DRIVER_MINOR 1
++#define PSB_DRM_DRIVER_PATCHLEVEL 0
++
++/*
++ *TTM driver private offsets.
++ */
++
++#define DRM_PSB_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
++
++#define PSB_OBJECT_HASH_ORDER 13
++#define PSB_FILE_OBJECT_HASH_ORDER 12
++#define PSB_BO_HASH_ORDER 12
++
++#define PSB_VDC_OFFSET 0x00000000
++#define PSB_VDC_SIZE 0x000080000
++#define MRST_MMIO_SIZE 0x0000C0000
++#define MDFLD_MMIO_SIZE 0x000100000
++#define PSB_SGX_SIZE 0x8000
++#define PSB_SGX_OFFSET 0x00040000
++#define MRST_SGX_OFFSET 0x00080000
++#define PSB_MMIO_RESOURCE 0
++#define PSB_GATT_RESOURCE 2
++#define PSB_GTT_RESOURCE 3
++#define PSB_GMCH_CTRL 0x52
++#define PSB_BSM 0x5C
++#define _PSB_GMCH_ENABLED 0x4
++#define PSB_PGETBL_CTL 0x2020
++#define _PSB_PGETBL_ENABLED 0x00000001
++#define PSB_SGX_2D_SLAVE_PORT 0x4000
++#define PSB_TT_PRIV0_LIMIT (256*1024*1024)
++#define PSB_TT_PRIV0_PLIMIT (PSB_TT_PRIV0_LIMIT >> PAGE_SHIFT)
++#define PSB_NUM_VALIDATE_BUFFERS 2048
++
++#define PSB_MEM_MMU_START 0x40000000
++
++/*
++ *Flags for external memory type field.
++ */
++
++#define MRST_MSVDX_OFFSET 0x90000 /*MSVDX Base offset */
++#define PSB_MSVDX_OFFSET 0x50000 /*MSVDX Base offset */
++/* MSVDX MMIO region is 0x50000 - 0x57fff ==> 32KB */
++#define PSB_MSVDX_SIZE 0x10000
++
++#define LNC_TOPAZ_OFFSET 0xA0000
++#define PNW_TOPAZ_OFFSET 0xC0000
++#define LNC_TOPAZ_SIZE 0x10000
++#define PNW_TOPAZ_SIZE 0x30000 /* PNW VXE285 has two cores */
++
++#define PSB_MMU_CACHED_MEMORY 0x0001 /* Bind to MMU only */
++#define PSB_MMU_RO_MEMORY 0x0002 /* MMU RO memory */
++#define PSB_MMU_WO_MEMORY 0x0004 /* MMU WO memory */
++
++/*
++ *PTE's and PDE's
++ */
++
++#define PSB_PDE_MASK 0x003FFFFF
++#define PSB_PDE_SHIFT 22
++#define PSB_PTE_SHIFT 12
++
++#define PSB_PTE_VALID 0x0001 /* PTE / PDE valid */
++#define PSB_PTE_WO 0x0002 /* Write only */
++#define PSB_PTE_RO 0x0004 /* Read only */
++#define PSB_PTE_CACHED 0x0008 /* CPU cache coherent */
++
++/*
++ *VDC registers and bits
++ */
++#define PSB_MSVDX_CLOCKGATING 0x2064
++#define PSB_TOPAZ_CLOCKGATING 0x2068
++#define PSB_HWSTAM 0x2098
++#define PSB_INSTPM 0x20C0
++#define PSB_INT_IDENTITY_R 0x20A4
++#define _PSB_PIPEB_EVENT (1<<4)
++#define _PSB_DPST_PIPEB_FLAG (1<<4)
++#define _PSB_VSYNC_PIPEB_FLAG (1<<5)
++#define _PSB_VSYNC_PIPEA_FLAG (1<<7)
++#define _PSB_DPST_PIPEA_FLAG (1<<6)
++#define _PSB_IRQ_SGX_FLAG (1<<18)
++#define _PSB_IRQ_MSVDX_FLAG (1<<19)
++#define _LNC_IRQ_TOPAZ_FLAG (1<<20)
++#define PSB_INT_IDENTITY_R 0x20A4
++#define PSB_INT_MASK_R 0x20A8
++#define PSB_INT_ENABLE_R 0x20A0
++
++#define _PSB_MMU_ER_MASK 0x0001FF00
++#define _PSB_MMU_ER_HOST (1 << 16)
++#define GPIOA 0x5010
++#define GPIOB 0x5014
++#define GPIOC 0x5018
++#define GPIOD 0x501c
++#define GPIOE 0x5020
++#define GPIOF 0x5024
++#define GPIOG 0x5028
++#define GPIOH 0x502c
++#define GPIO_CLOCK_DIR_MASK (1 << 0)
++#define GPIO_CLOCK_DIR_IN (0 << 1)
++#define GPIO_CLOCK_DIR_OUT (1 << 1)
++#define GPIO_CLOCK_VAL_MASK (1 << 2)
++#define GPIO_CLOCK_VAL_OUT (1 << 3)
++#define GPIO_CLOCK_VAL_IN (1 << 4)
++#define GPIO_CLOCK_PULLUP_DISABLE (1 << 5)
++#define GPIO_DATA_DIR_MASK (1 << 8)
++#define GPIO_DATA_DIR_IN (0 << 9)
++#define GPIO_DATA_DIR_OUT (1 << 9)
++#define GPIO_DATA_VAL_MASK (1 << 10)
++#define GPIO_DATA_VAL_OUT (1 << 11)
++#define GPIO_DATA_VAL_IN (1 << 12)
++#define GPIO_DATA_PULLUP_DISABLE (1 << 13)
++
++#define VCLK_DIVISOR_VGA0 0x6000
++#define VCLK_DIVISOR_VGA1 0x6004
++#define VCLK_POST_DIV 0x6010
++
++#define PSB_COMM_2D (PSB_ENGINE_2D << 4)
++#define PSB_COMM_3D (PSB_ENGINE_3D << 4)
++#define PSB_COMM_TA (PSB_ENGINE_TA << 4)
++#define PSB_COMM_HP (PSB_ENGINE_HP << 4)
++#define PSB_COMM_USER_IRQ (1024 >> 2)
++#define PSB_COMM_USER_IRQ_LOST (PSB_COMM_USER_IRQ + 1)
++#define PSB_COMM_FW (2048 >> 2)
++
++#define PSB_UIRQ_VISTEST 1
++#define PSB_UIRQ_OOM_REPLY 2
++#define PSB_UIRQ_FIRE_TA_REPLY 3
++#define PSB_UIRQ_FIRE_RASTER_REPLY 4
++
++#define PSB_2D_SIZE (256*1024*1024)
++#define PSB_MAX_RELOC_PAGES 1024
++
++#define PSB_LOW_REG_OFFS 0x0204
++#define PSB_HIGH_REG_OFFS 0x0600
++
++#define PSB_NUM_VBLANKS 2
++
++
++#define PSB_2D_SIZE (256*1024*1024)
++#define PSB_MAX_RELOC_PAGES 1024
++
++#define PSB_LOW_REG_OFFS 0x0204
++#define PSB_HIGH_REG_OFFS 0x0600
++
++#define PSB_NUM_VBLANKS 2
++#define PSB_WATCHDOG_DELAY (DRM_HZ * 2)
++#define PSB_LID_DELAY (DRM_HZ / 10)
++
++#if 1 /* MDFLD_JLIU7_DSR */
++#if MDFLD_JLIU7_DPU_2
++#define MDFLD_DSR_2D_3D BIT0
++#define MDFLD_DSR_CURSOR (BIT2 | BIT3)
++#define MDFLD_DSR_CURSOR_0 BIT2
++#define MDFLD_DSR_CURSOR_2 BIT3
++#define MDFLD_DSR_OVERLAY (BIT4 | BIT5)
++#define MDFLD_DSR_OVERLAY_0 BIT4
++#define MDFLD_DSR_OVERLAY_2 BIT5
++#else /* MDFLD_JLIU7_DPU_2 */
++#define MDFLD_DSR_2D_3D_0 BIT0
++#define MDFLD_DSR_2D_3D_2 BIT1
++# define MDFLD_DSR_2D_3D (MDFLD_DSR_2D_3D_0 | MDFLD_DSR_2D_3D_2)
++#define MDFLD_DSR_CURSOR_0 BIT2
++#define MDFLD_DSR_CURSOR_2 BIT3
++#define MDFLD_DSR_OVERLAY_0 BIT4
++#define MDFLD_DSR_OVERLAY_2 BIT5
++#endif /* MDFLD_JLIU7_DPU_2 */
++#define MDFLD_DSR_RR 45
++#define MDFLD_DPU_ENABLE BIT31
++#define MDFLD_DSR_DELAY (DRM_HZ / MDFLD_DSR_RR)
++#endif /* MDFLD_JLIU7_DSR */
++
++#define PSB_PWR_STATE_ON 1
++#define PSB_PWR_STATE_OFF 2
++
++#define PSB_PMPOLICY_NOPM 0
++#define PSB_PMPOLICY_CLOCKGATING 1
++#define PSB_PMPOLICY_POWERDOWN 2
++
++#define PSB_PMSTATE_POWERUP 0
++#define PSB_PMSTATE_CLOCKGATED 1
++#define PSB_PMSTATE_POWERDOWN 2
++#define PSB_PCIx_MSI_ADDR_LOC 0x94
++#define PSB_PCIx_MSI_DATA_LOC 0x98
++
++/*
++ *User options.
++ */
++
++struct drm_psb_uopt {
++ int pad; /*keep it here in case we use it in future*/
++};
++
++/**
++ *struct psb_context
++ *
++ *@buffers: array of pre-allocated validate buffers.
++ *@used_buffers: number of buffers in @buffers array currently in use.
++ *@validate_buffer: buffers validated from user-space.
++ *@kern_validate_buffers : buffers validated from kernel-space.
++ *@fence_flags : Fence flags to be used for fence creation.
++ *
++ *This structure is used during execbuf validation.
++ */
++
++struct psb_context {
++ struct psb_validate_buffer *buffers;
++ uint32_t used_buffers;
++ struct list_head validate_list;
++ struct list_head kern_validate_list;
++ uint32_t fence_types;
++ uint32_t val_seq;
++};
++
++struct psb_validate_buffer;
++
++struct psb_msvdx_cmd_queue {
++ struct list_head head;
++ void *cmd;
++ unsigned long cmd_size;
++ uint32_t sequence;
++};
++
++typedef int (*pfn_vsync_handler)(struct drm_device* dev, int pipe);
++
++
++#define MODE_SETTING_IN_CRTC 0x1
++#define MODE_SETTING_IN_ENCODER 0x2
++#define MODE_SETTING_ON_GOING 0x3
++#define MODE_SETTING_IN_DSR 0x4
++
++struct drm_psb_private {
++ void * dbi_dsr_info;
++#ifdef CONFIG_MDFLD_DSI_DPU
++ void * dbi_dpu_info;
++#endif
++ /*
++ *TTM Glue.
++ */
++
++ struct drm_global_reference mem_global_ref;
++ int has_global;
++
++ struct drm_device *dev;
++ struct ttm_object_device *tdev;
++ struct ttm_fence_device fdev;
++ struct ttm_bo_device bdev;
++ struct ttm_lock ttm_lock;
++ struct vm_operations_struct *ttm_vm_ops;
++ int has_fence_device;
++ int has_bo_device;
++
++ unsigned long chipset;
++
++ struct drm_psb_dev_info_arg dev_info;
++ struct drm_psb_uopt uopt;
++
++ struct psb_gtt *pg;
++
++ /*GTT Memory manager*/
++ struct psb_gtt_mm *gtt_mm;
++
++ struct page *scratch_page;
++ uint32_t sequence[PSB_NUM_ENGINES];
++ uint32_t last_sequence[PSB_NUM_ENGINES];
++ uint32_t last_submitted_seq[PSB_NUM_ENGINES];
++
++ struct psb_mmu_driver *mmu;
++ struct psb_mmu_pd *pf_pd;
++
++ uint8_t *sgx_reg;
++ uint8_t *vdc_reg;
++ uint32_t gatt_free_offset;
++
++ /*
++ *MSVDX
++ */
++ uint8_t *msvdx_reg;
++ atomic_t msvdx_mmu_invaldc;
++ void *msvdx_private;
++
++ /*
++ *TOPAZ
++ */
++ uint8_t *topaz_reg;
++ void *topaz_private;
++ uint8_t topaz_disabled;
++ uint32_t video_device_fuse;
++ atomic_t topaz_mmu_invaldc;
++
++ /*
++ *Fencing / irq.
++ */
++
++ uint32_t vdc_irq_mask;
++ u32 pipestat[PSB_NUM_PIPE];
++ bool vblanksEnabledForFlips;
++
++ spinlock_t irqmask_lock;
++ spinlock_t sequence_lock;
++
++ /*
++ *Modesetting
++ */
++ struct psb_intel_mode_device mode_dev;
++
++ struct drm_crtc *plane_to_crtc_mapping[PSB_NUM_PIPE];
++ struct drm_crtc *pipe_to_crtc_mapping[PSB_NUM_PIPE];
++ uint32_t num_pipe;
++
++ /*
++ * CI share buffer
++ */
++ unsigned int ci_region_start;
++ unsigned int ci_region_size;
++
++ /*
++ * RAR share buffer;
++ */
++ unsigned int rar_region_start;
++ unsigned int rar_region_size;
++
++ /*
++ *Memory managers
++ */
++
++ int have_camera;
++ int have_rar;
++ int have_tt;
++ int have_mem_mmu;
++ struct mutex temp_mem;
++
++ /*
++ *Relocation buffer mapping.
++ */
++
++ spinlock_t reloc_lock;
++ unsigned int rel_mapped_pages;
++ wait_queue_head_t rel_mapped_queue;
++
++ /*
++ *SAREA
++ */
++ struct drm_psb_sarea *sarea_priv;
++
++ /*
++ *OSPM info
++ */
++ uint32_t ospm_base;
++
++ /*
++ * Sizes info
++ */
++
++ struct drm_psb_sizes_arg sizes;
++
++ uint32_t fuse_reg_value;
++
++ /* vbt (gct) header information*/
++ struct mrst_vbt vbt_data;
++ /* info that is stored from the gct */
++ struct gct_ioctl_arg gct_data;
++
++ /*
++ *LVDS info
++ */
++ int backlight_duty_cycle; /* restore backlight to this value */
++ bool panel_wants_dither;
++ struct drm_display_mode *panel_fixed_mode;
++ struct drm_display_mode *lfp_lvds_vbt_mode;
++ struct drm_display_mode *sdvo_lvds_vbt_mode;
++
++ struct bdb_lvds_backlight *lvds_bl; /*LVDS backlight info from VBT*/
++ struct psb_intel_i2c_chan *lvds_i2c_bus;
++
++ /* Feature bits from the VBIOS*/
++ unsigned int int_tv_support:1;
++ unsigned int lvds_dither:1;
++ unsigned int lvds_vbt:1;
++ unsigned int int_crt_support:1;
++ unsigned int lvds_use_ssc:1;
++ int lvds_ssc_freq;
++
++/* MRST private date start */
++/*FIXME JLIU7 need to revisit */
++ bool sku_83;
++ bool sku_100;
++ bool sku_100L;
++ bool sku_bypass;
++ uint32_t iLVDS_enable;
++
++ /* pipe config register value */
++ uint32_t pipeconf;
++ uint32_t pipeconf1;
++ uint32_t pipeconf2;
++
++ /* plane control register value */
++ uint32_t dspcntr;
++ uint32_t dspcntr1;
++ uint32_t dspcntr2;
++
++ /* MRST_DSI private date start */
++ struct work_struct dsi_work;
++
++ /*
++ *MRST DSI info
++ */
++
++ /* The DPI panel power on */
++ bool dpi_panel_on;
++
++ /* The DBI panel power on */
++ bool dbi_panel_on;
++
++ /* The DPI display */
++ bool dpi;
++
++ enum mipi_panel_type panel_make;
++
++ /* Set if MIPI encoder wants to control plane/pipe */
++ bool dsi_plane_pipe_control;
++
++ /* status */
++ uint32_t videoModeFormat:2;
++ uint32_t laneCount:3;
++ uint32_t channelNumber:2;
++ uint32_t status_reserved:25;
++
++ /* dual display - DPI & DBI */
++ bool dual_display;
++
++ /* HS or LP transmission */
++ bool lp_transmission;
++
++ /* configuration phase */
++ bool config_phase;
++
++ /* first boot phase */
++ bool first_boot;
++
++ /* DSI clock */
++ uint32_t RRate;
++ uint32_t DDR_Clock;
++ uint32_t DDR_Clock_Calculated;
++ uint32_t ClockBits;
++
++ /* DBI Buffer pointer */
++ u32 DBI_CB_phys;
++ u8 *p_DBI_commandBuffer;
++ uint32_t DBI_CB_pointer;
++ u8 *p_DBI_dataBuffer_orig;
++ u8 *p_DBI_dataBuffer;
++ uint32_t DBI_DB_pointer;
++
++ /* DSI panel spec */
++ uint32_t pixelClock;
++ uint32_t HsyncWidth;
++ uint32_t HbackPorch;
++ uint32_t HfrontPorch;
++ uint32_t HactiveArea;
++ uint32_t VsyncWidth;
++ uint32_t VbackPorch;
++ uint32_t VfrontPorch;
++ uint32_t VactiveArea;
++ uint32_t bpp:5;
++ uint32_t Reserved:27;
++/* MRST_DSI private date end */
++
++/* MDFLD_DSI private date start */
++ /* dual display - DPI & DBI */
++ bool dual_mipi;
++ uint32_t ksel;
++ uint32_t mipi_lane_config;
++ /*
++ *MRST DSI info
++ */
++ /* The DPI panel power on */
++ bool dpi_panel_on2;
++
++ /* The DBI panel power on */
++ bool dbi_panel_on2;
++
++ /* The DPI display */
++ bool dpi2;
++
++ /* status */
++ uint32_t videoModeFormat2:2;
++ uint32_t laneCount2:3;
++ uint32_t channelNumber2:2;
++ uint32_t status_reserved2:25;
++
++ /* HS or LP transmission */
++ bool lp_transmission2;
++
++ /* configuration phase */
++ bool config_phase2;
++
++ /* DSI clock */
++ uint32_t RRate2;
++ uint32_t DDR_Clock2;
++ uint32_t DDR_Clock_Calculated2;
++ uint32_t ClockBits2;
++
++ /* DBI Buffer pointer */
++ u32 DBI_CB_phys2;
++ u8 *p_DBI_commandBuffer2;
++ uint32_t DBI_CB_pointer2;
++ u8 *p_DBI_dataBuffer_orig2;
++ u8 *p_DBI_dataBuffer2;
++
++ /* DSI panel spec */
++ uint32_t pixelClock2;
++ uint32_t HsyncWidth2;
++ uint32_t HbackPorch2;
++ uint32_t HfrontPorch2;
++ uint32_t HactiveArea2;
++ uint32_t VsyncWidth2;
++ uint32_t VbackPorch2;
++ uint32_t VfrontPorch2;
++ uint32_t VactiveArea2;
++ uint32_t bpp2:5;
++ uint32_t Reserved2:27;
++/* MDFLD_DSI private date end */
++
++ /*
++ *Register state
++ */
++ uint32_t saveDSPACNTR;
++ uint32_t saveDSPBCNTR;
++ uint32_t savePIPEACONF;
++ uint32_t savePIPEBCONF;
++ uint32_t savePIPEASRC;
++ uint32_t savePIPEBSRC;
++ uint32_t saveFPA0;
++ uint32_t saveFPA1;
++ uint32_t saveDPLL_A;
++ uint32_t saveDPLL_A_MD;
++ uint32_t saveHTOTAL_A;
++ uint32_t saveHBLANK_A;
++ uint32_t saveHSYNC_A;
++ uint32_t saveVTOTAL_A;
++ uint32_t saveVBLANK_A;
++ uint32_t saveVSYNC_A;
++ uint32_t saveDSPASTRIDE;
++ uint32_t saveDSPASIZE;
++ uint32_t saveDSPAPOS;
++ uint32_t saveDSPABASE;
++ uint32_t saveDSPASURF;
++ uint32_t saveFPB0;
++ uint32_t saveFPB1;
++ uint32_t saveDPLL_B;
++ uint32_t saveDPLL_B_MD;
++ uint32_t saveHTOTAL_B;
++ uint32_t saveHBLANK_B;
++ uint32_t saveHSYNC_B;
++ uint32_t saveVTOTAL_B;
++ uint32_t saveVBLANK_B;
++ uint32_t saveVSYNC_B;
++ uint32_t saveDSPBSTRIDE;
++ uint32_t saveDSPBSIZE;
++ uint32_t saveDSPBPOS;
++ uint32_t saveDSPBBASE;
++ uint32_t saveDSPBSURF;
++ uint32_t saveVCLK_DIVISOR_VGA0;
++ uint32_t saveVCLK_DIVISOR_VGA1;
++ uint32_t saveVCLK_POST_DIV;
++ uint32_t saveVGACNTRL;
++ uint32_t saveADPA;
++ uint32_t saveLVDS;
++ uint32_t saveDVOA;
++ uint32_t saveDVOB;
++ uint32_t saveDVOC;
++ uint32_t savePP_ON;
++ uint32_t savePP_OFF;
++ uint32_t savePP_CONTROL;
++ uint32_t savePP_CYCLE;
++ uint32_t savePFIT_CONTROL;
++ uint32_t savePaletteA[256];
++ uint32_t savePaletteB[256];
++ uint32_t saveBLC_PWM_CTL2;
++ uint32_t saveBLC_PWM_CTL;
++ uint32_t saveCLOCKGATING;
++ uint32_t saveDSPARB;
++ uint32_t saveDSPATILEOFF;
++ uint32_t saveDSPBTILEOFF;
++ uint32_t saveDSPAADDR;
++ uint32_t saveDSPBADDR;
++ uint32_t savePFIT_AUTO_RATIOS;
++ uint32_t savePFIT_PGM_RATIOS;
++ uint32_t savePP_ON_DELAYS;
++ uint32_t savePP_OFF_DELAYS;
++ uint32_t savePP_DIVISOR;
++ uint32_t saveBSM;
++ uint32_t saveVBT;
++ uint32_t saveBCLRPAT_A;
++ uint32_t saveBCLRPAT_B;
++ uint32_t saveDSPALINOFF;
++ uint32_t saveDSPBLINOFF;
++ uint32_t savePERF_MODE;
++ uint32_t saveDSPFW1;
++ uint32_t saveDSPFW2;
++ uint32_t saveDSPFW3;
++ uint32_t saveDSPFW4;
++ uint32_t saveDSPFW5;
++ uint32_t saveDSPFW6;
++ uint32_t saveCHICKENBIT;
++ uint32_t saveDSPACURSOR_CTRL;
++ uint32_t saveDSPBCURSOR_CTRL;
++ uint32_t saveDSPACURSOR_BASE;
++ uint32_t saveDSPBCURSOR_BASE;
++ uint32_t saveDSPACURSOR_POS;
++ uint32_t saveDSPBCURSOR_POS;
++ uint32_t save_palette_a[256];
++ uint32_t save_palette_b[256];
++ uint32_t saveOV_OVADD;
++ uint32_t saveOV_OGAMC0;
++ uint32_t saveOV_OGAMC1;
++ uint32_t saveOV_OGAMC2;
++ uint32_t saveOV_OGAMC3;
++ uint32_t saveOV_OGAMC4;
++ uint32_t saveOV_OGAMC5;
++
++ /* DSI reg save */
++ uint32_t saveDEVICE_READY_REG;
++ uint32_t saveINTR_EN_REG;
++ uint32_t saveDSI_FUNC_PRG_REG;
++ uint32_t saveHS_TX_TIMEOUT_REG;
++ uint32_t saveLP_RX_TIMEOUT_REG;
++ uint32_t saveTURN_AROUND_TIMEOUT_REG;
++ uint32_t saveDEVICE_RESET_REG;
++ uint32_t saveDPI_RESOLUTION_REG;
++ uint32_t saveHORIZ_SYNC_PAD_COUNT_REG;
++ uint32_t saveHORIZ_BACK_PORCH_COUNT_REG;
++ uint32_t saveHORIZ_FRONT_PORCH_COUNT_REG;
++ uint32_t saveHORIZ_ACTIVE_AREA_COUNT_REG;
++ uint32_t saveVERT_SYNC_PAD_COUNT_REG;
++ uint32_t saveVERT_BACK_PORCH_COUNT_REG;
++ uint32_t saveVERT_FRONT_PORCH_COUNT_REG;
++ uint32_t saveHIGH_LOW_SWITCH_COUNT_REG;
++ uint32_t saveINIT_COUNT_REG;
++ uint32_t saveMAX_RET_PAK_REG;
++ uint32_t saveVIDEO_FMT_REG;
++ uint32_t saveEOT_DISABLE_REG;
++ uint32_t saveLP_BYTECLK_REG;
++ uint32_t saveHS_LS_DBI_ENABLE_REG;
++ uint32_t saveTXCLKESC_REG;
++ uint32_t saveDPHY_PARAM_REG;
++ uint32_t saveMIPI_CONTROL_REG;
++ uint32_t saveMIPI;
++ void (*init_drvIC)(struct drm_device *dev);
++ void (*dsi_prePowerState)(struct drm_device *dev);
++ void (*dsi_postPowerState)(struct drm_device *dev);
++
++ /* DPST Register Save */
++ uint32_t saveHISTOGRAM_INT_CONTROL_REG;
++ uint32_t saveHISTOGRAM_LOGIC_CONTROL_REG;
++
++ /* MSI reg save */
++
++ uint32_t msi_addr;
++ uint32_t msi_data;
++
++ /*
++ *Scheduling.
++ */
++
++ struct mutex reset_mutex;
++ struct psb_scheduler scheduler;
++ struct mutex cmdbuf_mutex;
++ /*uint32_t ta_mem_pages;
++ struct psb_ta_mem *ta_mem;
++ int force_ta_mem_load;*/
++ atomic_t val_seq;
++
++ /*
++ *TODO: change this to be per drm-context.
++ */
++
++ struct psb_context context;
++
++ /*
++ * LID-Switch
++ */
++ spinlock_t lid_lock;
++ struct timer_list lid_timer;
++ struct psb_intel_opregion opregion;
++ u32 *lid_state;
++ u32 lid_last_state;
++
++ /*
++ *Watchdog
++ */
++
++ spinlock_t watchdog_lock;
++ struct timer_list watchdog_timer;
++ struct work_struct watchdog_wq;
++ struct work_struct msvdx_watchdog_wq;
++ struct work_struct topaz_watchdog_wq;
++ int timer_available;
++
++ uint32_t apm_reg;
++ uint16_t apm_base;
++#ifdef OSPM_STAT
++ unsigned char graphics_state;
++ unsigned long gfx_on_time;
++ unsigned long gfx_off_time;
++ unsigned long gfx_last_mode_change;
++ unsigned long gfx_on_cnt;
++ unsigned long gfx_off_cnt;
++#endif
++
++ /*to be removed later*/
++ /*int dri_page_flipping;
++ int current_page;
++#if MDFLD_HDMI_JLIU7
++ int pipe_active[3];
++#else
++ int pipe_active[2];
++#endif
++ int saved_start[2];
++ int saved_offset[2];
++ int saved_stride[2];
++
++ int flip_start[2];
++ int flip_offset[2];
++ int flip_stride[2];*/
++
++
++ /*
++ * Used for modifying backlight from
++ * xrandr -- consider removing and using HAL instead
++ */
++ struct drm_property *backlight_property;
++ uint32_t blc_adj1;
++ uint32_t blc_adj2;
++
++ /*
++ * DPST and Hotplug state
++ */
++
++ struct dpst_state *psb_dpst_state;
++ struct hotplug_state *psb_hotplug_state;
++ pfn_vsync_handler psb_vsync_handler;
++
++#if MDFLD_JLIU7_DPU_2
++#if MDFLD_JLIU7_DSR
++ struct mutex dsr_mutex;
++ bool b_dsr;
++ bool b_dsr_enable;
++ bool dsr_fb_update_done;
++ uint32_t dsr_fb_update;
++ uint32_t dsr_idle_count;
++ /*
++ * DSR TIMER
++ */
++ spinlock_t dsr_lock;
++ struct timer_list dsr_timer;
++#if MDFLD_JLIU7_DPU
++ bool b_dpu_enable;
++ struct psb_drm_dpu_rect damage_rect_2d_3d;
++ uint32_t offset_0;
++ uint32_t offset_2;
++ uint32_t bpp_0;
++ uint32_t bpp_2;
++ uint32_t cursor_addr_0;
++ uint32_t cursor_addr_2;
++ uint32_t cursor_cntr_0;
++ uint32_t cursor_cntr_2;
++ int cursor_0_x0;
++ int cursor_0_y0;
++ int cursor_0_x1;
++ int cursor_0_y1;
++ int cursor_2_x0;
++ int cursor_2_y0;
++ int cursor_2_x1;
++ int cursor_2_y1;
++#endif /* MDFLD_JLIU7_DPU */
++#endif /*FIXME JLIU */
++#else /* MDFLD_JLIU7_DPU_2 */
++#if 1 /* MDFLD_JLIU7_DSR */
++ struct mutex dsr_mutex;
++ bool b_dsr;
++ bool b_dsr_enable;
++ bool dsr_fb_update_done_0;
++ bool dsr_fb_update_done_2;
++ uint32_t dsr_fb_update;
++ uint32_t dsr_idle_count;
++ /*
++ * DSR TIMER
++ */
++ spinlock_t dsr_lock;
++ struct timer_list dsr_timer;
++#if MDFLD_JLIU7_DPU
++ bool b_dpu_enable;
++ bool b_cursor_update_0;
++ bool b_cursor_update_2;
++ struct psb_drm_dpu_rect damage_rect_2d_3d;
++ uint32_t offset_0;
++ uint32_t offset_2;
++ uint32_t bpp_0;
++ uint32_t bpp_2;
++ uint32_t cursor_addr_0;
++ uint32_t cursor_addr_2;
++ uint32_t cursor_cntr_0;
++ uint32_t cursor_cntr_2;
++ int cursor_0_x0;
++ int cursor_0_y0;
++ int cursor_0_x1;
++ int cursor_0_y1;
++ int cursor_2_x0;
++ int cursor_2_y0;
++ int cursor_2_x1;
++ int cursor_2_y1;
++#endif /* MDFLD_JLIU7_DPU */
++#endif /*FIXME JLIU */
++#endif /* MDFLD_JLIU7_DPU_2 */
++
++ bool dsi_device_ready;
++
++#ifdef MDFLD_HDCP
++ uint32_t hdmi_audio_interrupt_mask;
++#endif /* MDFLD_HDCP */
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
++ /*psb fb dev*/
++ void * fbdev;
++#endif
++};
++
++struct psb_fpriv {
++ struct ttm_object_file *tfile;
++};
++
++struct psb_mmu_driver;
++
++extern int drm_crtc_probe_output_modes(struct drm_device *dev, int, int);
++extern int drm_pick_crtcs(struct drm_device *dev);
++
++
++static inline struct psb_fpriv *psb_fpriv(struct drm_file *file_priv)
++{
++ PVRSRV_FILE_PRIVATE_DATA *pvr_file_priv
++ = (PVRSRV_FILE_PRIVATE_DATA *)file_priv->driver_priv;
++ return (struct psb_fpriv *) pvr_file_priv->pPriv;
++}
++
++static inline struct drm_psb_private *psb_priv(struct drm_device *dev)
++{
++ return (struct drm_psb_private *) dev->dev_private;
++}
++
++/*
++ *TTM glue. psb_ttm_glue.c
++ */
++
++extern int psb_open(struct inode *inode, struct file *filp);
++extern int psb_release(struct inode *inode, struct file *filp);
++extern int psb_mmap(struct file *filp, struct vm_area_struct *vma);
++
++extern int psb_fence_signaled_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psb_verify_access(struct ttm_buffer_object *bo,
++ struct file *filp);
++extern ssize_t psb_ttm_read(struct file *filp, char __user *buf,
++ size_t count, loff_t *f_pos);
++extern ssize_t psb_ttm_write(struct file *filp, const char __user *buf,
++ size_t count, loff_t *f_pos);
++extern int psb_fence_finish_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psb_fence_unref_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psb_pl_waitidle_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psb_pl_setstatus_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psb_pl_synccpu_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psb_pl_unref_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psb_pl_reference_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psb_pl_create_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psb_pl_ub_create_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psb_extension_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psb_ttm_global_init(struct drm_psb_private *dev_priv);
++extern void psb_ttm_global_release(struct drm_psb_private *dev_priv);
++extern int psb_getpageaddrs_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++/*
++ *MMU stuff.
++ */
++
++extern struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
++ int trap_pagefaults,
++ int invalid_type,
++ struct drm_psb_private *dev_priv);
++extern void psb_mmu_driver_takedown(struct psb_mmu_driver *driver);
++extern struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver
++ *driver);
++extern void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd, uint32_t mmu_offset,
++ uint32_t gtt_start, uint32_t gtt_pages);
++extern struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
++ int trap_pagefaults,
++ int invalid_type);
++extern void psb_mmu_free_pagedir(struct psb_mmu_pd *pd);
++extern void psb_mmu_flush(struct psb_mmu_driver *driver);
++extern void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
++ unsigned long address,
++ uint32_t num_pages);
++extern int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd,
++ uint32_t start_pfn,
++ unsigned long address,
++ uint32_t num_pages, int type);
++extern int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
++ unsigned long *pfn);
++
++/*
++ *Enable / disable MMU for different requestors.
++ */
++
++
++extern void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context);
++extern int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
++ unsigned long address, uint32_t num_pages,
++ uint32_t desired_tile_stride,
++ uint32_t hw_tile_stride, int type);
++extern void psb_mmu_remove_pages(struct psb_mmu_pd *pd,
++ unsigned long address, uint32_t num_pages,
++ uint32_t desired_tile_stride,
++ uint32_t hw_tile_stride);
++/*
++ *psb_sgx.c
++ */
++
++
++
++extern int psb_cmdbuf_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psb_reg_submit(struct drm_psb_private *dev_priv,
++ uint32_t *regs, unsigned int cmds);
++
++
++extern void psb_fence_or_sync(struct drm_file *file_priv,
++ uint32_t engine,
++ uint32_t fence_types,
++ uint32_t fence_flags,
++ struct list_head *list,
++ struct psb_ttm_fence_rep *fence_arg,
++ struct ttm_fence_object **fence_p);
++extern int psb_validate_kernel_buffer(struct psb_context *context,
++ struct ttm_buffer_object *bo,
++ uint32_t fence_class,
++ uint64_t set_flags,
++ uint64_t clr_flags);
++
++
++/*
++ *psb_irq.c
++ */
++
++extern irqreturn_t psb_irq_handler(DRM_IRQ_ARGS);
++extern int psb_irq_enable_dpst(struct drm_device *dev);
++extern int psb_irq_disable_dpst(struct drm_device *dev);
++extern void psb_irq_preinstall(struct drm_device *dev);
++extern int psb_irq_postinstall(struct drm_device *dev);
++extern void psb_irq_uninstall(struct drm_device *dev);
++extern void psb_irq_preinstall_islands(struct drm_device *dev, int hw_islands);
++extern int psb_irq_postinstall_islands(struct drm_device *dev, int hw_islands);
++extern void psb_irq_turn_on_dpst(struct drm_device *dev);
++extern void psb_irq_turn_off_dpst(struct drm_device *dev);
++
++extern void psb_irq_uninstall_islands(struct drm_device *dev, int hw_islands);
++extern int psb_vblank_wait2(struct drm_device *dev,unsigned int *sequence);
++extern int psb_vblank_wait(struct drm_device *dev, unsigned int *sequence);
++extern int psb_enable_vblank(struct drm_device *dev, int crtc);
++extern void psb_disable_vblank(struct drm_device *dev, int crtc);
++void
++psb_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
++
++void
++psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
++
++extern u32 psb_get_vblank_counter(struct drm_device *dev, int crtc);
++#ifdef MDFLD_HDCP
++extern int mdfld_irq_enable_hdmi_audio(struct drm_device *dev);
++extern int mdfld_irq_disable_hdmi_audio(struct drm_device *dev);
++#endif /* MDFLD_HDCP */
++
++/*
++ *psb_fence.c
++ */
++
++extern void psb_fence_handler(struct drm_device *dev, uint32_t class);
++
++extern int psb_fence_emit_sequence(struct ttm_fence_device *fdev,
++ uint32_t fence_class,
++ uint32_t flags, uint32_t *sequence,
++ unsigned long *timeout_jiffies);
++extern void psb_fence_error(struct drm_device *dev,
++ uint32_t class,
++ uint32_t sequence, uint32_t type, int error);
++extern int psb_ttm_fence_device_init(struct ttm_fence_device *fdev);
++
++/* MSVDX/Topaz stuff */
++extern int lnc_video_frameskip(struct drm_device *dev,
++ uint64_t user_pointer);
++extern int lnc_video_getparam(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psb_try_power_down_topaz(struct drm_device *dev);
++extern int psb_try_power_down_msvdx(struct drm_device *dev);
++
++
++/*
++ *psb_fb.c
++ */
++extern int psbfb_probed(struct drm_device *dev);
++extern int psbfb_remove(struct drm_device *dev,
++ struct drm_framebuffer *fb);
++extern int psbfb_kms_off_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psbfb_kms_on_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern void *psbfb_vdc_reg(struct drm_device* dev);
++
++/*
++ *psb_reset.c
++ */
++
++extern void psb_schedule_watchdog(struct drm_psb_private *dev_priv);
++extern void psb_watchdog_init(struct drm_psb_private *dev_priv);
++extern void psb_watchdog_takedown(struct drm_psb_private *dev_priv);
++extern void psb_lid_timer_init(struct drm_psb_private *dev_priv);
++extern void psb_lid_timer_takedown(struct drm_psb_private *dev_priv);
++extern void psb_print_pagefault(struct drm_psb_private *dev_priv);
++#if MDFLD_JLIU7_DSR
++extern void mdfld_dsr_timer_init(struct drm_psb_private * dev_priv);
++extern void mdfld_dsr_timer_takedown(struct drm_psb_private * dev_priv);
++#endif /* MDFLD_JLIU7_DSR */
++
++/* modesetting */
++extern void psb_modeset_init(struct drm_device *dev);
++extern void psb_modeset_cleanup(struct drm_device *dev);
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
++/*fbdev*/
++extern int psb_fbdev_init(struct drm_device * dev);
++#endif
++
++/* psb_bl.c */
++int psb_backlight_init(struct drm_device *dev);
++void psb_backlight_exit(void);
++int psb_set_brightness(struct backlight_device *bd);
++int psb_get_brightness(struct backlight_device *bd);
++/* MDFLD supports */
++int mdfld_backlight_init(struct drm_device *dev);
++void mdfld_backlight_exit(void);
++int mdfld_set_brightness(struct backlight_device *bd);
++int mdfld_get_brightness(struct backlight_device *bd);
++
++/*
++ *Debug print bits setting
++ */
++#define PSB_D_GENERAL (1 << 0)
++#define PSB_D_INIT (1 << 1)
++#define PSB_D_IRQ (1 << 2)
++#define PSB_D_ENTRY (1 << 3)
++/* debug the get H/V BP/FP count */
++#define PSB_D_HV (1 << 4)
++#define PSB_D_DBI_BF (1 << 5)
++#define PSB_D_PM (1 << 6)
++#define PSB_D_RENDER (1 << 7)
++#define PSB_D_REG (1 << 8)
++
++#ifndef DRM_DEBUG_CODE
++/* To enable debug printout, set drm_psb_debug in psb_drv.c
++ * to any combination of above print flags.
++ */
++/* #define DRM_DEBUG_CODE 2 */
++#endif
++
++extern int drm_psb_debug;
++extern int drm_psb_no_fb;
++extern int drm_psb_disable_vsync;
++extern int drm_idle_check_interval;
++extern int drm_topaz_sbuswa;
++
++#define PSB_DEBUG_GENERAL(_fmt, _arg...) \
++ PSB_DEBUG(PSB_D_GENERAL, _fmt, ##_arg)
++#define PSB_DEBUG_INIT(_fmt, _arg...) \
++ PSB_DEBUG(PSB_D_INIT, _fmt, ##_arg)
++#define PSB_DEBUG_IRQ(_fmt, _arg...) \
++ PSB_DEBUG(PSB_D_IRQ, _fmt, ##_arg)
++#define PSB_DEBUG_ENTRY(_fmt, _arg...) \
++ PSB_DEBUG(PSB_D_ENTRY, _fmt, ##_arg)
++#define PSB_DEBUG_HV(_fmt, _arg...) \
++ PSB_DEBUG(PSB_D_HV, _fmt, ##_arg)
++#define PSB_DEBUG_DBI_BF(_fmt, _arg...) \
++ PSB_DEBUG(PSB_D_DBI_BF, _fmt, ##_arg)
++#define PSB_DEBUG_PM(_fmt, _arg...) \
++ PSB_DEBUG(PSB_D_PM, _fmt, ##_arg)
++#define PSB_DEBUG_RENDER(_fmt, _arg...) \
++ PSB_DEBUG(PSB_D_RENDER, _fmt, ##_arg)
++#define PSB_DEBUG_REG(_fmt, _arg...) \
++ PSB_DEBUG(PSB_D_REG, _fmt, ##_arg)
++
++#if DRM_DEBUG_CODE
++#define PSB_DEBUG(_flag, _fmt, _arg...) \
++ do { \
++ if (unlikely((_flag) & drm_psb_debug)) \
++ printk(KERN_DEBUG \
++ "[psb:0x%02x:%s] " _fmt , _flag, \
++ __func__ , ##_arg); \
++ } while (0)
++#else
++#define PSB_DEBUG(_fmt, _arg...) do { } while (0)
++#endif
++
++/*
++ *Utilities
++ */
++#define DRM_DRIVER_PRIVATE_T struct drm_psb_private
++
++static inline u32 MSG_READ32(uint port, uint offset)
++{
++ int mcr = (0xD0<<24) | (port << 16) | (offset << 8);
++ outl(0x800000D0, 0xCF8);
++ outl(mcr, 0xCFC);
++ outl(0x800000D4, 0xCF8);
++ return inl(0xcfc);
++}
++static inline void MSG_WRITE32(uint port, uint offset, u32 value)
++{
++ int mcr = (0xE0<<24) | (port << 16) | (offset << 8) | 0xF0;
++ outl(0x800000D4, 0xCF8);
++ outl(value, 0xcfc);
++ outl(0x800000D0, 0xCF8);
++ outl(mcr, 0xCFC);
++}
++
++static inline uint32_t REGISTER_READ(struct drm_device *dev, uint32_t reg)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ int reg_val = ioread32(dev_priv->vdc_reg + (reg));
++ PSB_DEBUG_REG("reg = 0x%x. reg_val = 0x%x. \n", reg, reg_val);
++ return reg_val;
++}
++
++#define REG_READ(reg) REGISTER_READ(dev, (reg))
++static inline void REGISTER_WRITE(struct drm_device *dev, uint32_t reg,
++ uint32_t val)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ if ((reg < 0x70084 || reg >0x70088) && (reg < 0xa000 || reg >0xa3ff))
++ PSB_DEBUG_REG("reg = 0x%x, val = 0x%x. \n", reg, val);
++
++ iowrite32((val), dev_priv->vdc_reg + (reg));
++}
++
++#define REG_WRITE(reg, val) REGISTER_WRITE(dev, (reg), (val))
++
++static inline void REGISTER_WRITE16(struct drm_device *dev,
++ uint32_t reg, uint32_t val)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++
++ PSB_DEBUG_REG("reg = 0x%x, val = 0x%x. \n", reg, val);
++
++ iowrite16((val), dev_priv->vdc_reg + (reg));
++}
++
++#define REG_WRITE16(reg, val) REGISTER_WRITE16(dev, (reg), (val))
++
++static inline void REGISTER_WRITE8(struct drm_device *dev,
++ uint32_t reg, uint32_t val)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++
++ PSB_DEBUG_REG("reg = 0x%x, val = 0x%x. \n", reg, val);
++
++ iowrite8((val), dev_priv->vdc_reg + (reg));
++}
++
++#define REG_WRITE8(reg, val) REGISTER_WRITE8(dev, (reg), (val))
++
++#define PSB_ALIGN_TO(_val, _align) \
++ (((_val) + ((_align) - 1)) & ~((_align) - 1))
++#define PSB_WVDC32(_val, _offs) \
++ iowrite32(_val, dev_priv->vdc_reg + (_offs))
++#define PSB_RVDC32(_offs) \
++ ioread32(dev_priv->vdc_reg + (_offs))
++
++/* #define TRAP_SGX_PM_FAULT 1 */
++#ifdef TRAP_SGX_PM_FAULT
++#define PSB_RSGX32(_offs) \
++({ \
++ if (inl(dev_priv->apm_base + PSB_APM_STS) & 0x3) { \
++ printk(KERN_ERR "access sgx when it's off!! (READ) %s, %d\n", \
++ __FILE__, __LINE__); \
++ mdelay(1000); \
++ } \
++ ioread32(dev_priv->sgx_reg + (_offs)); \
++})
++#else
++#define PSB_RSGX32(_offs) \
++ ioread32(dev_priv->sgx_reg + (_offs))
++#endif
++
++#define MSVDX_REG_DUMP 0
++#if MSVDX_REG_DUMP
++
++#define PSB_WMSVDX32(_val, _offs) \
++ printk("MSVDX: write %08x to reg 0x%08x\n", (unsigned int)(_val), (unsigned int)(_offs));\
++ iowrite32(_val, dev_priv->msvdx_reg + (_offs))
++#define PSB_RMSVDX32(_offs) \
++ ioread32(dev_priv->msvdx_reg + (_offs))
++
++#else
++
++#define PSB_WMSVDX32(_val, _offs) \
++ iowrite32(_val, dev_priv->msvdx_reg + (_offs))
++#define PSB_RMSVDX32(_offs) \
++ ioread32(dev_priv->msvdx_reg + (_offs))
++
++#endif
++
++#define PSB_ALPL(_val, _base) \
++ (((_val) >> (_base ## _ALIGNSHIFT)) << (_base ## _SHIFT))
++#define PSB_ALPLM(_val, _base) \
++ ((((_val) >> (_base ## _ALIGNSHIFT)) << (_base ## _SHIFT)) & (_base ## _MASK))
++
++#define IS_POULSBO(dev) (((dev)->pci_device == 0x8108) || \
++ ((dev)->pci_device == 0x8109))
++
++#define IS_MRST(dev) (((dev)->pci_device & 0xfffc) == 0x4100)
++#define IS_PENWELL(dev) 0 /* FIXME */
++
++
++#define IS_MDFLD(dev) (((dev)->pci_device & 0xfffc) == 0x0130)
++#define IS_MID(dev) (IS_MRST(dev) || IS_MDFLD(dev))
++
++#define IS_MSVDX(dev) (IS_MRST(dev) || IS_MDFLD(dev))
++#define IS_TOPAZ(dev) ((IS_MRST(dev) && (((dev)->pci_device & 0xfffc) != PCI_ID_TOPAZ_DISABLED)) || IS_MDFLD(dev))
++
++extern int drm_psb_ospm;
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_fb.c
+@@ -0,0 +1,1822 @@
++/**************************************************************************
++ * Copyright (c) 2007, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/string.h>
++#include <linux/mm.h>
++#include <linux/tty.h>
++#include <linux/slab.h>
++#include <linux/delay.h>
++#include <linux/fb.h>
++#include <linux/init.h>
++#include <linux/console.h>
++
++#include <drm/drmP.h>
++#include <drm/drm.h>
++#include <drm/drm_crtc.h>
++
++#include "psb_drv.h"
++#include "psb_intel_reg.h"
++#include "psb_intel_drv.h"
++#include "ttm/ttm_userobj_api.h"
++#include "psb_fb.h"
++#include "psb_sgx.h"
++#include "psb_pvr_glue.h"
++
++#include "mdfld_dsi_dbi.h"
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35))
++static int fill_fb_bitfield(struct fb_var_screeninfo *var, int depth)
++{
++ switch (depth) {
++ case 8:
++ var->red.offset = 0;
++ var->green.offset = 0;
++ var->blue.offset = 0;
++ var->red.length = 8;
++ var->green.length = 8;
++ var->blue.length = 8;
++ var->transp.length = 0;
++ var->transp.offset = 0;
++ break;
++ case 15:
++ var->red.offset = 10;
++ var->green.offset = 5;
++ var->blue.offset = 0;
++ var->red.length = 5;
++ var->green.length = 5;
++ var->blue.length = 5;
++ var->transp.length = 1;
++ var->transp.offset = 15;
++ break;
++ case 16:
++ var->red.offset = 11;
++ var->green.offset = 5;
++ var->blue.offset = 0;
++ var->red.length = 5;
++ var->green.length = 6;
++ var->blue.length = 5;
++ var->transp.length = 0;
++ var->transp.offset = 0;
++ break;
++ case 24:
++ var->red.offset = 16;
++ var->green.offset = 8;
++ var->blue.offset = 0;
++ var->red.length = 8;
++ var->green.length = 8;
++ var->blue.length = 8;
++ var->transp.length = 0;
++ var->transp.offset = 0;
++ break;
++ case 32:
++ var->red.offset = 16;
++ var->green.offset = 8;
++ var->blue.offset = 0;
++ var->red.length = 8;
++ var->green.length = 8;
++ var->blue.length = 8;
++ var->transp.length = 8;
++ var->transp.offset = 24;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++struct psbfb_par {
++ struct drm_device *dev;
++ struct psb_framebuffer *psbfb;
++
++ int dpms_state;
++
++ int crtc_count;
++ /* crtc currently bound to this */
++ uint32_t crtc_ids[2];
++};
++#endif
++
++static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb);
++static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
++ struct drm_file *file_priv,
++ unsigned int *handle);
++
++static const struct drm_framebuffer_funcs psb_fb_funcs = {
++ .destroy = psb_user_framebuffer_destroy,
++ .create_handle = psb_user_framebuffer_create_handle,
++};
++
++#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
++
++void *psbfb_vdc_reg(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv;
++ dev_priv = (struct drm_psb_private *) dev->dev_private;
++ return dev_priv->vdc_reg;
++}
++/*EXPORT_SYMBOL(psbfb_vdc_reg); */
++
++static int psbfb_setcolreg(unsigned regno, unsigned red, unsigned green,
++ unsigned blue, unsigned transp,
++ struct fb_info *info)
++{
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35))
++ struct psbfb_par *par = info->par;
++ struct drm_framebuffer *fb = &par->psbfb->base;
++#else
++ struct psb_fbdev * fbdev = info->par;
++ struct drm_framebuffer *fb = fbdev->psb_fb_helper.fb;
++#endif
++ uint32_t v;
++
++ if (!fb)
++ return -ENOMEM;
++
++ if (regno > 255)
++ return 1;
++
++ red = CMAP_TOHW(red, info->var.red.length);
++ blue = CMAP_TOHW(blue, info->var.blue.length);
++ green = CMAP_TOHW(green, info->var.green.length);
++ transp = CMAP_TOHW(transp, info->var.transp.length);
++
++ v = (red << info->var.red.offset) |
++ (green << info->var.green.offset) |
++ (blue << info->var.blue.offset) |
++ (transp << info->var.transp.offset);
++
++ if (regno < 16) {
++ switch (fb->bits_per_pixel) {
++ case 16:
++ ((uint32_t *) info->pseudo_palette)[regno] = v;
++ break;
++ case 24:
++ case 32:
++ ((uint32_t *) info->pseudo_palette)[regno] = v;
++ break;
++ }
++ }
++
++ return 0;
++}
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35))
++static struct drm_display_mode *psbfb_find_first_mode(struct
++ fb_var_screeninfo
++ *var,
++ struct fb_info *info,
++ struct drm_crtc
++ *crtc)
++{
++ struct psbfb_par *par = info->par;
++ struct drm_device *dev = par->dev;
++ struct drm_display_mode *drm_mode;
++ struct drm_display_mode *preferred_mode = NULL;
++ struct drm_display_mode *last_mode = NULL;
++ struct drm_connector *connector;
++ int found;
++
++ found = 0;
++ list_for_each_entry(connector, &dev->mode_config.connector_list,
++ head) {
++ if (connector->encoder && connector->encoder->crtc == crtc) {
++ found = 1;
++ break;
++ }
++ }
++
++ /* found no connector, bail */
++ if (!found)
++ return NULL;
++
++ found = 0;
++ list_for_each_entry(drm_mode, &connector->modes, head) {
++ if (drm_mode->hdisplay == var->xres &&
++ drm_mode->vdisplay == var->yres
++ && drm_mode->clock != 0) {
++ found = 1;
++ last_mode = drm_mode;
++ if (IS_POULSBO(dev)) {
++ if (last_mode->type & DRM_MODE_TYPE_PREFERRED)
++ preferred_mode = last_mode;
++ }
++ }
++ }
++
++ /* No mode matching mode found */
++ if (!found)
++ return NULL;
++
++ if (IS_POULSBO(dev)) {
++ if (preferred_mode)
++ return preferred_mode;
++ else
++ return last_mode;
++ } else {
++ return last_mode;
++ }
++}
++
++static int psbfb_check_var(struct fb_var_screeninfo *var,
++ struct fb_info *info)
++{
++ struct psbfb_par *par = info->par;
++ struct psb_framebuffer *psbfb = par->psbfb;
++ struct drm_device *dev = par->dev;
++ int ret;
++ int depth;
++ int pitch;
++ int bpp = var->bits_per_pixel;
++
++ if (!psbfb)
++ return -ENOMEM;
++
++ if (!var->pixclock)
++ return -EINVAL;
++
++ /* don't support virtuals for now */
++ if (var->xres_virtual > var->xres)
++ return -EINVAL;
++
++ if (var->yres_virtual > var->yres)
++ return -EINVAL;
++
++ switch (bpp) {
++ case 16:
++ depth = (var->green.length == 6) ? 16 : 15;
++ break;
++ case 24: /* assume this is 32bpp / depth 24 */
++ bpp = 32;
++ /* fallthrough */
++ case 32:
++ depth = (var->transp.length > 0) ? 32 : 24;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ pitch = ((var->xres * ((bpp + 1) / 8)) + 0x3f) & ~0x3f;
++
++ /* Check that we can resize */
++ if ((pitch * var->yres) > psbfb->size) {
++#if 1
++ /* Need to resize the fb object.
++ * But the generic fbdev code doesn't really understand
++ * that we can do this. So disable for now.
++ */
++ DRM_INFO("Can't support requested size, too big!\n");
++ return -EINVAL;
++#endif
++ }
++
++ ret = fill_fb_bitfield(var, depth);
++ if (ret)
++ return ret;
++
++#if 1
++ /* Here we walk the output mode list and look for modes. If we haven't
++ * got it, then bail. Not very nice, so this is disabled.
++ * In the set_par code, we create our mode based on the incoming
++ * parameters. Nicer, but may not be desired by some.
++ */
++ {
++ struct drm_crtc *crtc;
++ int i;
++
++ list_for_each_entry(crtc, &dev->mode_config.crtc_list,
++ head) {
++ struct psb_intel_crtc *psb_intel_crtc =
++ to_psb_intel_crtc(crtc);
++
++ for (i = 0; i < par->crtc_count; i++)
++ if (crtc->base.id == par->crtc_ids[i])
++ break;
++
++ if (i == par->crtc_count)
++ continue;
++
++ if (psb_intel_crtc->mode_set.num_connectors == 0)
++ continue;
++
++ if (!psbfb_find_first_mode(&info->var, info, crtc))
++ return -EINVAL;
++ }
++ }
++#else
++ (void) i;
++ (void) dev; /* silence warnings */
++ (void) crtc;
++ (void) drm_mode;
++ (void) connector;
++#endif
++
++ return 0;
++}
++
++/* this will let fbcon do the mode init */
++static int psbfb_set_par(struct fb_info *info)
++{
++ struct psbfb_par *par = info->par;
++ struct psb_framebuffer *psbfb = par->psbfb;
++ struct drm_framebuffer *fb = &psbfb->base;
++ struct drm_device *dev = par->dev;
++ struct fb_var_screeninfo *var = &info->var;
++ /* struct drm_psb_private *dev_priv = dev->dev_private; */
++ struct drm_display_mode *drm_mode;
++ int pitch;
++ int depth;
++ int bpp = var->bits_per_pixel;
++
++ PSB_DEBUG_ENTRY("\n");
++
++ if (!fb)
++ return -ENOMEM;
++
++ PSB_DEBUG_ENTRY("01. \n");
++
++ switch (bpp) {
++ case 8:
++ depth = 8;
++ break;
++ case 16:
++ depth = (var->green.length == 6) ? 16 : 15;
++ break;
++ case 24: /* assume this is 32bpp / depth 24 */
++ bpp = 32;
++ /* fallthrough */
++ case 32:
++ depth = (var->transp.length > 0) ? 32 : 24;
++ break;
++ default:
++ DRM_ERROR("Illegal BPP\n");
++ return -EINVAL;
++ }
++
++ pitch = ((var->xres * ((bpp + 1) / 8)) + 0x3f) & ~0x3f;
++
++ if ((pitch * var->yres) > (psbfb->size)) {
++#if 1
++ /* Need to resize the fb object.
++ * But the generic fbdev code doesn't really understand
++ * that we can do this. So disable for now.
++ */
++ DRM_INFO("Can't support requested size, too big!\n");
++ return -EINVAL;
++#endif
++ }
++
++ psbfb->offset = 0;
++ fb->width = var->xres;
++ fb->height = var->yres;
++ fb->bits_per_pixel = bpp;
++ fb->pitch = pitch;
++ fb->depth = depth;
++
++ info->fix.line_length = psbfb->base.pitch;
++ info->fix.visual =
++ (psbfb->base.depth ==
++ 8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
++
++ /* some fbdev's apps don't want these to change */
++ info->fix.smem_start = dev->mode_config.fb_base + psbfb->offset;
++
++#if 0
++ /* relates to resize - disable */
++ info->fix.smem_len = info->fix.line_length * var->yres;
++ info->screen_size = info->fix.smem_len; /* ??? */
++#endif
++
++ /* Should we walk the output's modelist or just create our own ???
++ * For now, we create and destroy a mode based on the incoming
++ * parameters. But there's commented out code below which scans
++ * the output list too.
++ */
++#if 1
++ /* This code is now in the for loop futher down. */
++#endif
++
++ {
++ struct drm_crtc *crtc;
++ int ret;
++ int i;
++
++ list_for_each_entry(crtc, &dev->mode_config.crtc_list,
++ head) {
++ struct psb_intel_crtc *psb_intel_crtc =
++ to_psb_intel_crtc(crtc);
++
++ for (i = 0; i < par->crtc_count; i++)
++ if (crtc->base.id == par->crtc_ids[i])
++ break;
++
++ if (i == par->crtc_count)
++ continue;
++
++ if (psb_intel_crtc->mode_set.num_connectors == 0)
++ continue;
++
++#if 1
++ drm_mode =
++ psbfb_find_first_mode(&info->var, info, crtc);
++ if (!drm_mode)
++ DRM_ERROR("No matching mode found\n");
++ psb_intel_crtc->mode_set.mode = drm_mode;
++#endif
++
++#if 0 /* FIXME: TH */
++ if (crtc->fb == psb_intel_crtc->mode_set.fb) {
++#endif
++ DRM_DEBUG
++ ("setting mode on crtc %p with id %u\n",
++ crtc, crtc->base.id);
++ ret =
++ crtc->funcs->
++ set_config(&psb_intel_crtc->mode_set);
++ if (ret) {
++ DRM_ERROR("Failed setting mode\n");
++ return ret;
++ }
++#if 0
++ }
++#endif
++ }
++ DRM_DEBUG("Set par returned OK.\n");
++ return 0;
++ }
++
++ return 0;
++}
++
++void psbfb_imageblit(struct fb_info *info, const struct fb_image *image)
++{
++ if (unlikely(info->state != FBINFO_STATE_RUNNING))
++ return;
++
++ cfb_imageblit(info, image);
++}
++
++static void psbfb_onoff(struct fb_info *info, int dpms_mode)
++{
++ struct psbfb_par *par = info->par;
++ struct drm_device *dev = par->dev;
++ struct drm_crtc *crtc;
++ struct drm_encoder *encoder;
++ int i;
++
++ /*
++ * For each CRTC in this fb, find all associated encoders
++ * and turn them off, then turn off the CRTC.
++ */
++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++ struct drm_crtc_helper_funcs *crtc_funcs =
++ crtc->helper_private;
++
++ for (i = 0; i < par->crtc_count; i++)
++ if (crtc->base.id == par->crtc_ids[i])
++ break;
++
++ if (i == par->crtc_count)
++ continue;
++
++ if (dpms_mode == DRM_MODE_DPMS_ON)
++ crtc_funcs->dpms(crtc, dpms_mode);
++
++ /* Found a CRTC on this fb, now find encoders */
++ list_for_each_entry(encoder,
++ &dev->mode_config.encoder_list, head) {
++ if (encoder->crtc == crtc) {
++ struct drm_encoder_helper_funcs
++ *encoder_funcs;
++ encoder_funcs = encoder->helper_private;
++ encoder_funcs->dpms(encoder, dpms_mode);
++ }
++ }
++
++ if (dpms_mode == DRM_MODE_DPMS_OFF)
++ crtc_funcs->dpms(crtc, dpms_mode);
++ }
++}
++
++static int psbfb_blank(int blank_mode, struct fb_info *info)
++{
++ struct psbfb_par *par = info->par;
++
++ par->dpms_state = blank_mode;
++ PSB_DEBUG_PM("psbfb_blank \n");
++ switch (blank_mode) {
++ case FB_BLANK_UNBLANK:
++ psbfb_onoff(info, DRM_MODE_DPMS_ON);
++ break;
++ case FB_BLANK_NORMAL:
++ psbfb_onoff(info, DRM_MODE_DPMS_STANDBY);
++ break;
++ case FB_BLANK_HSYNC_SUSPEND:
++ psbfb_onoff(info, DRM_MODE_DPMS_STANDBY);
++ break;
++ case FB_BLANK_VSYNC_SUSPEND:
++ psbfb_onoff(info, DRM_MODE_DPMS_SUSPEND);
++ break;
++ case FB_BLANK_POWERDOWN:
++ psbfb_onoff(info, DRM_MODE_DPMS_OFF);
++ break;
++ }
++
++ return 0;
++}
++#endif /*KERNEL_VERSION < 2.6.35*/
++
++static int psbfb_kms_off(struct drm_device *dev, int suspend)
++{
++ struct drm_framebuffer *fb = 0;
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
++ struct psb_framebuffer * psbfb = to_psb_fb(fb);
++#endif
++ DRM_DEBUG("psbfb_kms_off_ioctl\n");
++
++ mutex_lock(&dev->mode_config.mutex);
++ list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
++ struct fb_info *info = psbfb->fbdev;
++
++ if (suspend) {
++ fb_set_suspend(info, 1);
++ drm_fb_helper_blank(FB_BLANK_POWERDOWN, info);
++ }
++#else
++ struct fb_info *info = fb->fbdev;
++
++ if (suspend) {
++ fb_set_suspend(info, 1);
++ psbfb_blank(FB_BLANK_POWERDOWN, info);
++ }
++#endif
++ }
++ mutex_unlock(&dev->mode_config.mutex);
++ return 0;
++}
++
++int psbfb_kms_off_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ int ret;
++
++ if (drm_psb_no_fb)
++ return 0;
++ acquire_console_sem();
++ ret = psbfb_kms_off(dev, 0);
++ release_console_sem();
++
++ return ret;
++}
++
++static int psbfb_kms_on(struct drm_device *dev, int resume)
++{
++ struct drm_framebuffer *fb = 0;
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
++ struct psb_framebuffer * psbfb = to_psb_fb(fb);
++#endif
++
++ DRM_DEBUG("psbfb_kms_on_ioctl\n");
++
++ mutex_lock(&dev->mode_config.mutex);
++ list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
++ struct fb_info *info = psbfb->fbdev;
++
++ if (resume) {
++ fb_set_suspend(info, 0);
++ drm_fb_helper_blank(FB_BLANK_UNBLANK, info);
++ }
++#else
++ struct fb_info *info = fb->fbdev;
++
++ if (resume) {
++ fb_set_suspend(info, 0);
++ psbfb_blank(FB_BLANK_UNBLANK, info);
++ }
++#endif
++ }
++ mutex_unlock(&dev->mode_config.mutex);
++
++ return 0;
++}
++
++int psbfb_kms_on_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ int ret;
++
++ if (drm_psb_no_fb)
++ return 0;
++ acquire_console_sem();
++ ret = psbfb_kms_on(dev, 0);
++ release_console_sem();
++ drm_helper_disable_unused_functions(dev);
++ return ret;
++}
++
++void psbfb_suspend(struct drm_device *dev)
++{
++ acquire_console_sem();
++ psbfb_kms_off(dev, 1);
++ release_console_sem();
++}
++
++void psbfb_resume(struct drm_device *dev)
++{
++ acquire_console_sem();
++ psbfb_kms_on(dev, 1);
++ release_console_sem();
++ drm_helper_disable_unused_functions(dev);
++}
++
++static int psbfb_vm_fault(struct vm_area_struct * vma, struct vm_fault * vmf)
++{
++ int page_num = 0;
++ int i;
++ unsigned long address = 0;
++ int ret;
++ unsigned long pfn;
++ struct psb_framebuffer *psbfb = (struct psb_framebuffer *)vma->vm_private_data;
++ struct drm_device * dev = psbfb->base.dev;
++ struct drm_psb_private * dev_priv = (struct drm_psb_private *)dev->dev_private;
++ struct psb_gtt *pg = dev_priv->pg;
++ unsigned long phys_addr = (unsigned long)pg->stolen_base;;
++
++ page_num = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
++
++ address = (unsigned long)vmf->virtual_address;
++
++ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
++
++ for(i=0; i<page_num; i++) {
++ pfn = (phys_addr >> PAGE_SHIFT); //phys_to_pfn(phys_addr);
++
++ ret = vm_insert_mixed(vma, address, pfn);
++ if(unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
++ break;
++ else if(unlikely(ret != 0)) {
++ ret = (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
++ return ret;
++ }
++
++ address += PAGE_SIZE;
++ phys_addr += PAGE_SIZE;
++ }
++
++ return VM_FAULT_NOPAGE;
++}
++
++static void psbfb_vm_open(struct vm_area_struct * vma)
++{
++ DRM_DEBUG("vm_open\n");
++}
++
++static void psbfb_vm_close(struct vm_area_struct * vma)
++{
++ DRM_DEBUG("vm_close\n");
++}
++
++static struct vm_operations_struct psbfb_vm_ops = {
++ .fault = psbfb_vm_fault,
++ .open = psbfb_vm_open,
++ .close = psbfb_vm_close
++};
++
++static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
++{
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35))
++ struct psbfb_par *par = info->par;
++ struct psb_framebuffer *psbfb = par->psbfb;
++#else
++ struct psb_fbdev * fbdev = info->par;
++ struct psb_framebuffer *psbfb = fbdev->pfb;
++#endif
++ char * fb_screen_base = NULL;
++ struct drm_device * dev = psbfb->base.dev;
++ struct drm_psb_private * dev_priv = (struct drm_psb_private *)dev->dev_private;
++ struct psb_gtt *pg = dev_priv->pg;
++
++ if (vma->vm_pgoff != 0)
++ return -EINVAL;
++ if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
++ return -EINVAL;
++
++ if (!psbfb->addr_space)
++ psbfb->addr_space = vma->vm_file->f_mapping;
++
++ fb_screen_base = (char *)info->screen_base;
++
++ DRM_DEBUG("vm_pgoff 0x%lx, screen base %p vram_addr %p\n", vma->vm_pgoff, fb_screen_base, pg->vram_addr);
++
++ /*if using stolen memory, */
++ if(fb_screen_base == pg->vram_addr) {
++ vma->vm_ops = &psbfb_vm_ops;
++ vma->vm_private_data = (void *)psbfb;
++ vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
++ } else {
++ /*using IMG meminfo, can I use pvrmmap to map it?*/
++
++ }
++
++ return 0;
++}
++
++
++static struct fb_ops psbfb_ops = {
++ .owner = THIS_MODULE,
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35))
++ .fb_check_var = psbfb_check_var,
++ .fb_set_par = psbfb_set_par,
++ .fb_blank = psbfb_blank,
++#else
++ .fb_check_var = drm_fb_helper_check_var,
++ .fb_set_par = drm_fb_helper_set_par,
++ .fb_blank = drm_fb_helper_blank,
++#endif
++ .fb_setcolreg = psbfb_setcolreg,
++ .fb_fillrect = cfb_fillrect,
++ .fb_copyarea = cfb_copyarea,
++ .fb_imageblit = cfb_imageblit,
++ .fb_mmap = psbfb_mmap,
++};
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35))
++static struct drm_mode_set panic_mode;
++
++int psbfb_panic(struct notifier_block *n, unsigned long ununsed,
++ void *panic_str)
++{
++ DRM_ERROR("panic occurred, switching back to text console\n");
++ drm_crtc_helper_set_config(&panic_mode);
++
++ return 0;
++}
++/*EXPORT_SYMBOL(psbfb_panic); */
++
++static struct notifier_block paniced = {
++ .notifier_call = psbfb_panic,
++};
++#endif
++
++static struct drm_framebuffer *psb_framebuffer_create
++ (struct drm_device *dev, struct drm_mode_fb_cmd *r,
++ void *mm_private)
++{
++ struct psb_framebuffer *fb;
++ int ret;
++
++ fb = kzalloc(sizeof(*fb), GFP_KERNEL);
++ if (!fb)
++ return NULL;
++
++ ret = drm_framebuffer_init(dev, &fb->base, &psb_fb_funcs);
++
++ if (ret)
++ goto err;
++
++ drm_helper_mode_fill_fb_struct(&fb->base, r);
++
++ fb->pvrBO = mm_private;
++
++ return &fb->base;
++
++err:
++ kfree(fb);
++ return NULL;
++}
++
++static struct drm_framebuffer *psb_user_framebuffer_create
++ (struct drm_device *dev, struct drm_file *filp,
++ struct drm_mode_fb_cmd *r)
++{
++ struct psb_framebuffer *psbfb;
++ struct drm_framebuffer *fb;
++ struct fb_info *info;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = IMG_NULL;
++ IMG_HANDLE hKernelMemInfo = (IMG_HANDLE)r->handle;
++ struct drm_psb_private *dev_priv
++ = (struct drm_psb_private *) dev->dev_private;
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
++ struct psb_fbdev * fbdev = dev_priv->fbdev;
++#endif
++ struct psb_gtt *pg = dev_priv->pg;
++ int ret;
++ uint32_t offset;
++ uint64_t size;
++
++ ret = psb_get_meminfo_by_handle(hKernelMemInfo, &psKernelMemInfo);
++ if (ret) {
++ DRM_ERROR("Cannot get meminfo for handle %lx\n",
++ (IMG_UINT32)hKernelMemInfo);
++
++ return NULL;
++ }
++
++ DRM_DEBUG("Got Kernel MemInfo for handle %lx\n",
++ (IMG_UINT32)hKernelMemInfo);
++
++ /* JB: TODO not drop, make smarter */
++ size = psKernelMemInfo->ui32AllocSize;
++ if (size < r->height * r->pitch)
++ return NULL;
++
++ /* JB: TODO not drop, refcount buffer */
++ /* return psb_framebuffer_create(dev, r, bo); */
++
++ fb = psb_framebuffer_create(dev, r, (void *)psKernelMemInfo);
++ if (!fb) {
++ DRM_ERROR("failed to allocate fb.\n");
++ return NULL;
++ }
++
++ psbfb = to_psb_fb(fb);
++ psbfb->size = size;
++ psbfb->hKernelMemInfo = hKernelMemInfo;
++
++ DRM_DEBUG("Mapping to gtt..., KernelMemInfo %p\n", psKernelMemInfo);
++
++ /*if not VRAM, map it into tt aperture*/
++ if (psKernelMemInfo->pvLinAddrKM != pg->vram_addr) {
++ ret = psb_gtt_map_meminfo(dev, hKernelMemInfo, &offset);
++ if (ret) {
++ DRM_ERROR("map meminfo for %lx failed\n",
++ (IMG_UINT32)hKernelMemInfo);
++ return NULL;
++ }
++ psbfb->offset = (offset << PAGE_SHIFT);
++ } else {
++ psbfb->offset = 0;
++ }
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35))
++ info = framebuffer_alloc(sizeof(struct psbfb_par), &dev->pdev->dev);
++#else
++ info = framebuffer_alloc(0, &dev->pdev->dev);
++#endif
++ if (!info)
++ return NULL;
++
++ strcpy(info->fix.id, "psbfb");
++
++ info->flags = FBINFO_DEFAULT;
++ info->fbops = &psbfb_ops;
++
++ info->fix.smem_start = dev->mode_config.fb_base;
++ info->fix.smem_len = size;
++
++ info->screen_base = psKernelMemInfo->pvLinAddrKM;
++ info->screen_size = size;
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35))
++ info->fix.type = FB_TYPE_PACKED_PIXELS;
++ info->fix.visual = FB_VISUAL_TRUECOLOR;
++ info->fix.type_aux = 0;
++ info->fix.xpanstep = 1; /* doing it in hw */
++ info->fix.ypanstep = 1; /* doing it in hw */
++ info->fix.ywrapstep = 0;
++ info->fix.accel = FB_ACCEL_I830;
++ info->fix.type_aux = 0;
++ info->fix.line_length = fb->pitch;
++
++ /* it is called for kms flip, the back buffer has been rendered,
++ * then we should not clear it*/
++#if 0
++ if (is_iomem)
++ memset_io(info->screen_base, 0, size);
++ else
++ memset(info->screen_base, 0, size);
++#endif
++ info->pseudo_palette = fb->pseudo_palette;
++ info->var.xres_virtual = fb->width;
++ info->var.yres_virtual = fb->height;
++ info->var.bits_per_pixel = fb->bits_per_pixel;
++ info->var.xoffset = 0;
++ info->var.yoffset = 0;
++ info->var.activate = FB_ACTIVATE_NOW;
++ info->var.height = -1;
++ info->var.width = -1;
++
++ info->var.xres = r->width;
++ info->var.yres = r->height;
++
++ fill_fb_bitfield(&info->var, fb->depth);
++# else /*KERNEL_VERSION > 2.6.35*/
++ drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
++ drm_fb_helper_fill_var(info, &fbdev->psb_fb_helper, fb->width, fb->height);
++#endif
++
++ info->fix.mmio_start = pci_resource_start(dev->pdev, 0);
++ info->fix.mmio_len = pci_resource_len(dev->pdev, 0);
++
++ info->pixmap.size = 64 * 1024;
++ info->pixmap.buf_align = 8;
++ info->pixmap.access_align = 32;
++ info->pixmap.flags = FB_PIXMAP_SYSTEM;
++ info->pixmap.scan_align = 1;
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35))
++ fb->fbdev = info;
++#else
++ psbfb->fbdev = info;
++ fbdev->pfb = psbfb;
++#endif
++
++ return fb;
++}
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35))
++int psbfb_create(struct drm_device *dev, uint32_t fb_width,
++ uint32_t fb_height, uint32_t surface_width,
++ uint32_t surface_height, struct psb_framebuffer **psbfb_p)
++{
++ struct fb_info *info;
++ struct psbfb_par *par;
++ struct drm_framebuffer *fb;
++ struct psb_framebuffer *psbfb;
++ struct drm_mode_fb_cmd mode_cmd;
++ struct device *device = &dev->pdev->dev;
++ struct drm_psb_private *dev_priv
++ = (struct drm_psb_private *)dev->dev_private;
++ struct psb_gtt *pg = dev_priv->pg;
++ int size, aligned_size, ret;
++
++ PSB_DEBUG_ENTRY("\n");
++
++ mode_cmd.width = surface_width; /* crtc->desired_mode->hdisplay; */
++ mode_cmd.height = surface_height; /* crtc->desired_mode->vdisplay; */
++
++ mode_cmd.bpp = 32;
++ //HW requires pitch to be 64 byte aligned
++ mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 1) / 8), 64);
++ mode_cmd.depth = 24;
++
++ size = mode_cmd.pitch * mode_cmd.height;
++ aligned_size = ALIGN(size, PAGE_SIZE);
++
++ mutex_lock(&dev->struct_mutex);
++ fb = psb_framebuffer_create(dev, &mode_cmd, NULL);
++ if (!fb) {
++
++ DRM_ERROR("failed to allocate fb.\n");
++ ret = -ENOMEM;
++ goto out_err0;
++ }
++ psbfb = to_psb_fb(fb);
++ psbfb->size = size;
++
++ list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list);
++ info = framebuffer_alloc(sizeof(struct psbfb_par), device);
++ if (!info) {
++ ret = -ENOMEM;
++ goto out_err1;
++ }
++
++ par = info->par;
++ par->psbfb = psbfb;
++
++ strcpy(info->fix.id, "psbfb");
++ info->fix.type = FB_TYPE_PACKED_PIXELS;
++ info->fix.visual = FB_VISUAL_TRUECOLOR;
++ info->fix.type_aux = 0;
++ info->fix.xpanstep = 1; /* doing it in hw */
++ info->fix.ypanstep = 1; /* doing it in hw */
++ info->fix.ywrapstep = 0;
++ info->fix.accel = FB_ACCEL_I830;
++ info->fix.type_aux = 0;
++
++ info->flags = FBINFO_DEFAULT;
++
++ info->fbops = &psbfb_ops;
++
++ info->fix.line_length = fb->pitch;
++ info->fix.smem_start = dev->mode_config.fb_base;
++ info->fix.smem_len = size;
++ info->flags = FBINFO_DEFAULT;
++ info->screen_base = (char *)pg->vram_addr;
++ info->screen_size = size;
++ memset(info->screen_base, 0, size);
++
++ info->pseudo_palette = fb->pseudo_palette;
++ info->var.xres_virtual = fb->width;
++ info->var.yres_virtual = fb->height;
++ info->var.bits_per_pixel = fb->bits_per_pixel;
++ info->var.xoffset = 0;
++ info->var.yoffset = 0;
++ info->var.activate = FB_ACTIVATE_NOW;
++ info->var.height = -1;
++ info->var.width = -1;
++
++ info->var.xres = fb_width;
++ info->var.yres = fb_height;
++
++ info->fix.mmio_start = pci_resource_start(dev->pdev, 0);
++ info->fix.mmio_len = pci_resource_len(dev->pdev, 0);
++
++ info->pixmap.size = 64 * 1024;
++ info->pixmap.buf_align = 8;
++ info->pixmap.access_align = 32;
++ info->pixmap.flags = FB_PIXMAP_SYSTEM;
++ info->pixmap.scan_align = 1;
++
++ DRM_DEBUG("fb depth is %d\n", fb->depth);
++ DRM_DEBUG(" pitch is %d\n", fb->pitch);
++ fill_fb_bitfield(&info->var, fb->depth);
++
++ fb->fbdev = info;
++
++ par->dev = dev;
++
++ /* To allow resizing without swapping buffers */
++ printk(KERN_INFO"allocated %dx%d fb\n",
++ psbfb->base.width,
++ psbfb->base.height);
++
++ if (psbfb_p)
++ *psbfb_p = psbfb;
++
++ mutex_unlock(&dev->struct_mutex);
++
++ return 0;
++out_err1:
++ fb->funcs->destroy(fb);
++out_err0:
++ mutex_unlock(&dev->struct_mutex);
++ return ret;
++}
++#else /*KERNEL_VERSION >= 2.6.35*/
++
++static int psbfb_create(struct psb_fbdev * fbdev, struct drm_fb_helper_surface_size * sizes)
++{
++ struct drm_device * dev = fbdev->psb_fb_helper.dev;
++ struct drm_psb_private * dev_priv = (struct drm_psb_private *)dev->dev_private;
++ struct psb_gtt *pg = dev_priv->pg;
++ struct fb_info * info;
++ struct drm_framebuffer *fb;
++ struct psb_framebuffer * psbfb;
++ struct drm_mode_fb_cmd mode_cmd;
++ struct device * device = &dev->pdev->dev;
++ int size, aligned_size;
++ int ret;
++
++ mode_cmd.width = sizes->surface_width;
++ mode_cmd.height = sizes->surface_height;
++
++ mode_cmd.bpp = 32;
++ //HW requires pitch to be 64 byte aligned
++ mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 1) / 8), 64);
++ mode_cmd.depth = 24;
++
++ size = mode_cmd.pitch * mode_cmd.height;
++ aligned_size = ALIGN(size, PAGE_SIZE);
++
++ mutex_lock(&dev->struct_mutex);
++ fb = psb_framebuffer_create(dev, &mode_cmd, NULL);
++ if (!fb) {
++ DRM_ERROR("failed to allocate fb.\n");
++ ret = -ENOMEM;
++ goto out_err0;
++ }
++ psbfb = to_psb_fb(fb);
++ psbfb->size = size;
++
++ info = framebuffer_alloc(sizeof(struct psb_fbdev), device);
++ if(!info) {
++ ret = -ENOMEM;
++ goto out_err1;
++ }
++
++ info->par = fbdev;
++
++ psbfb->fbdev = info;
++
++ fbdev->psb_fb_helper.fb = fb;
++ fbdev->psb_fb_helper.fbdev = info;
++ fbdev->pfb = psbfb;
++
++ strcpy(info->fix.id, "psbfb");
++
++ info->flags = FBINFO_DEFAULT;
++ info->fbops = &psbfb_ops;
++ info->fix.smem_start = dev->mode_config.fb_base;
++ info->fix.smem_len = size;
++ info->screen_base = (char *)pg->vram_addr;
++ info->screen_size = size;
++ memset(info->screen_base, 0, size);
++
++ drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
++ drm_fb_helper_fill_var(info, &fbdev->psb_fb_helper, sizes->fb_width, sizes->fb_height);
++
++ info->fix.mmio_start = pci_resource_start(dev->pdev, 0);
++ info->fix.mmio_len = pci_resource_len(dev->pdev, 0);
++
++ info->pixmap.size = 64 * 1024;
++ info->pixmap.buf_align = 8;
++ info->pixmap.access_align = 32;
++ info->pixmap.flags = FB_PIXMAP_SYSTEM;
++ info->pixmap.scan_align = 1;
++
++ DRM_DEBUG("fb depth is %d\n", fb->depth);
++ DRM_DEBUG(" pitch is %d\n", fb->pitch);
++
++ printk(KERN_INFO"allocated %dx%d fb\n", psbfb->base.width, psbfb->base.height);
++
++ mutex_unlock(&dev->struct_mutex);
++
++ return 0;
++out_err0:
++ fb->funcs->destroy(fb);
++out_err1:
++ mutex_unlock(&dev->struct_mutex);
++ return ret;
++}
++
++static void psbfb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, u16 blue, int regno)
++{
++ DRM_DEBUG("%s\n", __FUNCTION__);
++}
++
++static void psbfb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, int regno)
++{
++ DRM_DEBUG("%s\n", __FUNCTION__);
++}
++
++static int psbfb_probe(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes)
++{
++ struct psb_fbdev * psb_fbdev = (struct psb_fbdev *)helper;
++ int new_fb = 0;
++ int ret;
++
++ DRM_DEBUG("%s\n", __FUNCTION__);
++
++ if(!helper->fb) {
++ ret = psbfb_create(psb_fbdev, sizes);
++ if(ret) {
++ return ret;
++ }
++
++ new_fb = 1;
++ }
++
++ return new_fb;
++}
++
++struct drm_fb_helper_funcs psb_fb_helper_funcs = {
++ .gamma_set = psbfb_gamma_set,
++ .gamma_get = psbfb_gamma_get,
++ .fb_probe = psbfb_probe,
++};
++
++int psb_fbdev_destroy(struct drm_device * dev, struct psb_fbdev * fbdev)
++{
++ struct fb_info * info;
++ struct psb_framebuffer * psbfb = fbdev->pfb;
++
++ if(fbdev->psb_fb_helper.fbdev) {
++ info = fbdev->psb_fb_helper.fbdev;
++ unregister_framebuffer(info);
++ iounmap(info->screen_base);
++ framebuffer_release(info);
++ }
++
++ drm_fb_helper_fini(&fbdev->psb_fb_helper);
++
++ drm_framebuffer_cleanup(&psbfb->base);
++
++ return 0;
++}
++
++int psb_fbdev_init(struct drm_device * dev)
++{
++ struct psb_fbdev * fbdev;
++ struct drm_psb_private * dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ int num_crtc;
++
++ fbdev = kzalloc(sizeof(struct psb_fbdev), GFP_KERNEL);
++ if(!fbdev) {
++ DRM_ERROR("no memory\n");
++ return -ENOMEM;
++ }
++
++ dev_priv->fbdev = fbdev;
++ fbdev->psb_fb_helper.funcs = &psb_fb_helper_funcs;
++
++ /*FIXME: how many crtc will MDFL support?*/
++ if(IS_MRST(dev)) {
++ num_crtc = 1;
++ } else {
++ num_crtc = 3;
++ }
++
++ drm_fb_helper_init(dev, &fbdev->psb_fb_helper, num_crtc, INTELFB_CONN_LIMIT);
++
++ drm_fb_helper_single_add_all_connectors(&fbdev->psb_fb_helper);
++ drm_fb_helper_initial_config(&fbdev->psb_fb_helper, 32);
++ return 0;
++}
++
++void psb_fbdev_fini(struct drm_device * dev)
++{
++ struct drm_psb_private * dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++
++ if(!dev_priv->fbdev) {
++ return;
++ }
++
++ psb_fbdev_destroy(dev, dev_priv->fbdev);
++ kfree(dev_priv->fbdev);
++ dev_priv->fbdev = NULL;
++}
++#endif
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35))
++static int psbfb_multi_fb_probe_crtc(struct drm_device *dev,
++ struct drm_crtc *crtc)
++{
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ struct drm_framebuffer *fb = crtc->fb;
++ struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
++ struct drm_connector *connector;
++ struct fb_info *info;
++ struct psbfb_par *par;
++ struct drm_mode_set *modeset;
++ unsigned int width, height;
++ int new_fb = 0;
++ int ret, i, conn_count;
++
++ if (!drm_helper_crtc_in_use(crtc))
++ return 0;
++
++ if (!crtc->desired_mode)
++ return 0;
++
++ width = crtc->desired_mode->hdisplay;
++ height = crtc->desired_mode->vdisplay;
++
++ /* is there an fb bound to this crtc already */
++ if (!psb_intel_crtc->mode_set.fb) {
++ ret =
++ psbfb_create(dev, width, height, width, height,
++ &psbfb);
++ if (ret)
++ return -EINVAL;
++ new_fb = 1;
++ } else {
++ fb = psb_intel_crtc->mode_set.fb;
++ if ((fb->width < width) || (fb->height < height))
++ return -EINVAL;
++ }
++
++ info = fb->fbdev;
++ par = info->par;
++
++ modeset = &psb_intel_crtc->mode_set;
++ modeset->fb = fb;
++ conn_count = 0;
++ list_for_each_entry(connector, &dev->mode_config.connector_list,
++ head) {
++ if (connector->encoder)
++ if (connector->encoder->crtc == modeset->crtc) {
++ modeset->connectors[conn_count] =
++ connector;
++ conn_count++;
++ if (conn_count > INTELFB_CONN_LIMIT)
++ BUG();
++ }
++ }
++
++ for (i = conn_count; i < INTELFB_CONN_LIMIT; i++)
++ modeset->connectors[i] = NULL;
++
++ par->crtc_ids[0] = crtc->base.id;
++
++ modeset->num_connectors = conn_count;
++ if (modeset->mode != modeset->crtc->desired_mode)
++ modeset->mode = modeset->crtc->desired_mode;
++
++ par->crtc_count = 1;
++
++ if (new_fb) {
++ info->var.pixclock = -1;
++ if (register_framebuffer(info) < 0)
++ return -EINVAL;
++ } else
++ psbfb_set_par(info);
++
++ printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
++ info->fix.id);
++
++ /* Switch back to kernel console on panic */
++ panic_mode = *modeset;
++ atomic_notifier_chain_register(&panic_notifier_list, &paniced);
++ printk(KERN_INFO "registered panic notifier\n");
++
++ return 0;
++}
++
++static int psbfb_multi_fb_probe(struct drm_device *dev)
++{
++
++ struct drm_crtc *crtc;
++ int ret = 0;
++
++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++ ret = psbfb_multi_fb_probe_crtc(dev, crtc);
++ if (ret)
++ return ret;
++ }
++ return ret;
++}
++
++static int psbfb_single_fb_probe(struct drm_device *dev)
++{
++ struct drm_crtc *crtc;
++ struct drm_connector *connector;
++ unsigned int fb_width = (unsigned) -1, fb_height = (unsigned) -1;
++ unsigned int surface_width = 0, surface_height = 0;
++ int new_fb = 0;
++ int crtc_count = 0;
++ int ret, i, conn_count = 0;
++ struct fb_info *info;
++ struct psbfb_par *par;
++ struct drm_mode_set *modeset = NULL;
++ struct drm_framebuffer *fb = NULL;
++ struct psb_framebuffer *psbfb = NULL;
++
++ PSB_DEBUG_ENTRY("\n");
++
++ /* first up get a count of crtcs now in use and
++ * new min/maxes width/heights */
++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++ if (drm_helper_crtc_in_use(crtc)) {
++ if (crtc->desired_mode) {
++ fb = crtc->fb;
++ if (crtc->desired_mode->hdisplay <
++ fb_width)
++ fb_width =
++ crtc->desired_mode->hdisplay;
++
++ if (crtc->desired_mode->vdisplay <
++ fb_height)
++ fb_height =
++ crtc->desired_mode->vdisplay;
++
++ if (crtc->desired_mode->hdisplay >
++ surface_width)
++ surface_width =
++ crtc->desired_mode->hdisplay;
++
++ if (crtc->desired_mode->vdisplay >
++ surface_height)
++ surface_height =
++ crtc->desired_mode->vdisplay;
++
++ }
++ crtc_count++;
++ }
++ }
++
++ if (crtc_count == 0 || fb_width == -1 || fb_height == -1) {
++ /* hmm everyone went away - assume VGA cable just fell out
++ and will come back later. */
++ return 0;
++ }
++
++ /* do we have an fb already? */
++ if (list_empty(&dev->mode_config.fb_kernel_list)) {
++ /* create an fb if we don't have one */
++ ret =
++ psbfb_create(dev, fb_width, fb_height, surface_width,
++ surface_height, &psbfb);
++ if (ret)
++ return -EINVAL;
++ new_fb = 1;
++ fb = &psbfb->base;
++ } else {
++ fb = list_first_entry(&dev->mode_config.fb_kernel_list,
++ struct drm_framebuffer, filp_head);
++
++ /* if someone hotplugs something bigger than we have already
++ * allocated, we are pwned. As really we can't resize an
++ * fbdev that is in the wild currently due to fbdev not really
++ * being designed for the lower layers moving stuff around
++ * under it. - so in the grand style of things - punt. */
++ if ((fb->width < surface_width)
++ || (fb->height < surface_height)) {
++ DRM_ERROR
++ ("Framebuffer not large enough to scale"
++ " console onto.\n");
++ return -EINVAL;
++ }
++ }
++
++ info = fb->fbdev;
++ par = info->par;
++
++ crtc_count = 0;
++ /* okay we need to setup new connector sets in the crtcs */
++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ modeset = &psb_intel_crtc->mode_set;
++ modeset->fb = fb;
++ conn_count = 0;
++ list_for_each_entry(connector,
++ &dev->mode_config.connector_list,
++ head) {
++ if (connector->encoder)
++ if (connector->encoder->crtc ==
++ modeset->crtc) {
++ modeset->connectors[conn_count] =
++ connector;
++ conn_count++;
++ if (conn_count >
++ INTELFB_CONN_LIMIT)
++ BUG();
++ }
++ }
++
++ for (i = conn_count; i < INTELFB_CONN_LIMIT; i++)
++ modeset->connectors[i] = NULL;
++
++ par->crtc_ids[crtc_count++] = crtc->base.id;
++
++ modeset->num_connectors = conn_count;
++ if (modeset->mode != modeset->crtc->desired_mode)
++ modeset->mode = modeset->crtc->desired_mode;
++ }
++ par->crtc_count = crtc_count;
++
++ if (new_fb) {
++ info->var.pixclock = -1;
++ if (register_framebuffer(info) < 0)
++ return -EINVAL;
++ } else
++ psbfb_set_par(info);
++
++ printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
++ info->fix.id);
++
++ /* Switch back to kernel console on panic */
++ panic_mode = *modeset;
++ atomic_notifier_chain_register(&panic_notifier_list, &paniced);
++ printk(KERN_INFO "registered panic notifier\n");
++
++ return 0;
++}
++
++int psbfb_probe(struct drm_device *dev)
++{
++ int ret = 0;
++
++ DRM_DEBUG("\n");
++
++ /* something has changed in the lower levels of hell - deal with it
++ here */
++
++ /* two modes : a) 1 fb to rule all crtcs.
++ b) one fb per crtc.
++ two actions 1) new connected device
++ 2) device removed.
++ case a/1 : if the fb surface isn't big enough -
++ resize the surface fb.
++ if the fb size isn't big enough - resize fb into surface.
++ if everything big enough configure the new crtc/etc.
++ case a/2 : undo the configuration
++ possibly resize down the fb to fit the new configuration.
++ case b/1 : see if it is on a new crtc - setup a new fb and add it.
++ case b/2 : teardown the new fb.
++ */
++
++ /* mode a first */
++ /* search for an fb */
++ if (0 /*i915_fbpercrtc == 1 */)
++ ret = psbfb_multi_fb_probe(dev);
++ else
++ ret = psbfb_single_fb_probe(dev);
++
++ return ret;
++}
++/*EXPORT_SYMBOL(psbfb_probe); */
++
++#else /*KERNEL_VERSION >= 2.6.35*/
++
++static void psbfb_output_poll_changed(struct drm_device * dev)
++{
++ struct drm_psb_private * dev_priv = (struct drm_psb_private *)dev->dev_private;
++ struct psb_fbdev * fbdev = (struct psb_fbdev *)dev_priv->fbdev;
++ drm_fb_helper_hotplug_event(&fbdev->psb_fb_helper);
++}
++#endif
++
++int psbfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
++{
++ struct fb_info *info;
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
++ struct psb_framebuffer * psbfb = to_psb_fb(fb);
++#endif
++
++ if (drm_psb_no_fb)
++ return 0;
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
++ info = psbfb->fbdev;
++#else
++ info = fb->fbdev;
++#endif
++
++ if (info) {
++ framebuffer_release(info);
++ }
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35))
++ atomic_notifier_chain_unregister(&panic_notifier_list, &paniced);
++ memset(&panic_mode, 0, sizeof(struct drm_mode_set));
++#endif
++ return 0;
++}
++/*EXPORT_SYMBOL(psbfb_remove); */
++
++static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
++ struct drm_file *file_priv,
++ unsigned int *handle)
++{
++ /* JB: TODO currently we can't go from a bo to a handle with ttm */
++ (void) file_priv;
++ *handle = 0;
++ return 0;
++}
++
++static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb)
++{
++ struct drm_device *dev = fb->dev;
++ struct psb_framebuffer *psbfb = to_psb_fb(fb);
++
++ /*ummap gtt pages*/
++ psb_gtt_unmap_meminfo(dev, psbfb->hKernelMemInfo);
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35))
++ if (fb->fbdev)
++#else
++ if (psbfb->fbdev)
++#endif
++ {
++ psbfb_remove(dev, fb);
++ }
++
++ /* JB: TODO not drop, refcount buffer */
++ drm_framebuffer_cleanup(fb);
++
++ kfree(fb);
++}
++
++static const struct drm_mode_config_funcs psb_mode_funcs = {
++ .fb_create = psb_user_framebuffer_create,
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
++ .output_poll_changed = psbfb_output_poll_changed,
++#else
++ .fb_changed = psbfb_probe,
++#endif
++};
++
++static int psb_create_backlight_property(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv
++ = (struct drm_psb_private *) dev->dev_private;
++ struct drm_property *backlight;
++
++ if (dev_priv->backlight_property)
++ return 0;
++
++ backlight = drm_property_create(dev,
++ DRM_MODE_PROP_RANGE,
++ "backlight",
++ 2);
++ backlight->values[0] = 0;
++ backlight->values[1] = 100;
++
++ dev_priv->backlight_property = backlight;
++
++ return 0;
++}
++
++#if MDFLD_WLD_JLIU7
++void mdfld_wld_init(struct drm_device *dev)
++{
++}
++#endif /* MDFLD_WLD_JLIU7 */
++
++static void psb_setup_outputs(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ struct drm_connector *connector;
++
++ PSB_DEBUG_ENTRY("\n");
++
++ drm_mode_create_scaling_mode_property(dev);
++
++ psb_create_backlight_property(dev);
++
++ if (IS_MDFLD(dev)) {
++ /* Set up integrated MIPI for MDFLD */
++ mdfld_dsi_dbi_init(dev, &dev_priv->mode_dev,1);
++#ifdef CONFIG_MDFD_DUAL_MIPI
++ mdfld_dsi_dbi_init(dev, &dev_priv->mode_dev,2);
++#endif /* CONFIG_DUAL_MIPI */
++
++#if MDFLD_HDMI_JLIU7
++#ifdef CONFIG_MDFD_HDMI
++ mdfld_hdmi_init(dev, &dev_priv->mode_dev);
++#endif /* CONFIG_MDFD_HDMI */
++#endif /* MDFLD_HDMI_JLIU7 */
++
++#if MDFLD_WLD_JLIU7
++ mdfld_wld_init(dev);
++#endif /* MDFLD_WLD_JLIU7 */
++ } else if (IS_MRST(dev)) {
++ if (dev_priv->iLVDS_enable)
++ /* Set up integrated LVDS for MRST */
++ mrst_lvds_init(dev, &dev_priv->mode_dev);
++ else {
++ /* Set up integrated MIPI for MRST */
++ mrst_dsi_init(dev, &dev_priv->mode_dev);
++ }
++ } else {
++ psb_intel_lvds_init(dev, &dev_priv->mode_dev);
++ psb_intel_sdvo_init(dev, SDVOB);
++ }
++
++ list_for_each_entry(connector, &dev->mode_config.connector_list,
++ head) {
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++ struct drm_encoder *encoder = &psb_intel_output->enc;
++ int crtc_mask = 0, clone_mask = 0;
++
++ /* valid crtcs */
++ switch (psb_intel_output->type) {
++ case INTEL_OUTPUT_SDVO:
++ crtc_mask = ((1 << 0) | (1 << 1));
++ clone_mask = (1 << INTEL_OUTPUT_SDVO);
++ break;
++ case INTEL_OUTPUT_LVDS:
++ PSB_DEBUG_ENTRY("LVDS. \n");
++ if (IS_MRST(dev))
++ crtc_mask = (1 << 0);
++ else
++ crtc_mask = (1 << 1);
++
++ clone_mask = (1 << INTEL_OUTPUT_LVDS);
++ break;
++ case INTEL_OUTPUT_MIPI:
++ PSB_DEBUG_ENTRY("MIPI. \n");
++ crtc_mask = (1 << 0);
++ clone_mask = (1 << INTEL_OUTPUT_MIPI);
++ break;
++ case INTEL_OUTPUT_MIPI2:
++ PSB_DEBUG_ENTRY("MIPI2. \n");
++ crtc_mask = (1 << 2);
++ clone_mask = (1 << INTEL_OUTPUT_MIPI2);
++ break;
++ case INTEL_OUTPUT_HDMI:
++ PSB_DEBUG_ENTRY("HDMI. \n");
++ crtc_mask = (1 << 1);
++ clone_mask = (1 << INTEL_OUTPUT_HDMI);
++ break;
++ case INTEL_OUTPUT_WLD:
++ PSB_DEBUG_ENTRY("WLD. \n");
++ crtc_mask = (1 << 3);
++ clone_mask = (1 << INTEL_OUTPUT_WLD);
++ break;
++ }
++ encoder->possible_crtcs = crtc_mask;
++ encoder->possible_clones =
++ psb_intel_connector_clones(dev, clone_mask);
++
++#if 0
++{
++ struct drm_encoder *pEncoder = encoder;
++ DRM_INFO("Enter psb_setup_outputs, connector info, type = %d, type_id=%d, base=0x%x, base.id=0x%x. \n", connector->connector_type, connector->connector_type_id, connector->base, connector->base.id);
++ DRM_INFO("Enter psb_setup_outputs, encoder info, base.id=%d, encoder_type=%d, dev=0x%x, base=0x%x, possible_clones=0x%x. \n", pEncoder->base.id, pEncoder->encoder_type, pEncoder->dev, pEncoder->base, pEncoder->possible_clones);
++ DRM_INFO("Enter psb_setup_outputs, encoder info, possible_crtcs=0x%x, crtc=0x%x. \n", pEncoder->possible_crtcs, pEncoder->crtc);
++}
++#endif
++ }
++}
++
++static void *psb_bo_from_handle(struct drm_device *dev,
++ struct drm_file *file_priv,
++ unsigned int handle)
++{
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = IMG_NULL;
++ IMG_HANDLE hKernelMemInfo = (IMG_HANDLE)handle;
++ int ret;
++
++ ret = psb_get_meminfo_by_handle(hKernelMemInfo, &psKernelMemInfo);
++ if (ret) {
++ DRM_ERROR("Cannot get meminfo for handle %lx\n",
++ (IMG_UINT32)hKernelMemInfo);
++ return NULL;
++ }
++
++ return (void *)psKernelMemInfo;
++}
++
++static size_t psb_bo_size(struct drm_device *dev, void *bof)
++{
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = (PVRSRV_KERNEL_MEM_INFO *)bof;
++ return (size_t)psKernelMemInfo->ui32AllocSize;
++}
++
++static size_t psb_bo_offset(struct drm_device *dev, void *bof)
++{
++ struct psb_framebuffer *psbfb
++ = (struct psb_framebuffer *)bof;
++
++ return (size_t)psbfb->offset;
++}
++
++static int psb_bo_pin_for_scanout(struct drm_device *dev, void *bo)
++{
++ return 0;
++}
++
++static int psb_bo_unpin_for_scanout(struct drm_device *dev, void *bo)
++{
++ return 0;
++}
++
++void psb_modeset_init(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
++ int i;
++
++ PSB_DEBUG_ENTRY("\n");
++
++ /* Init mm functions */
++ mode_dev->bo_from_handle = psb_bo_from_handle;
++ mode_dev->bo_size = psb_bo_size;
++ mode_dev->bo_offset = psb_bo_offset;
++ mode_dev->bo_pin_for_scanout = psb_bo_pin_for_scanout;
++ mode_dev->bo_unpin_for_scanout = psb_bo_unpin_for_scanout;
++
++ drm_mode_config_init(dev);
++
++ dev->mode_config.min_width = 0;
++ dev->mode_config.min_height = 0;
++
++ dev->mode_config.funcs = (void *) &psb_mode_funcs;
++
++ dev->mode_config.max_width = 2048;
++ dev->mode_config.max_height = 2048;
++ /* set memory base */
++ /* MRST and PSB should use BAR 2*/
++ pci_read_config_dword(dev->pdev, PSB_BSM, (uint32_t *) &(dev->mode_config.fb_base));
++
++ for (i = 0; i < dev_priv->num_pipe; i++)
++ psb_intel_crtc_init(dev, i, mode_dev);
++
++ psb_setup_outputs(dev);
++
++ /* setup fbs */
++ /* drm_initial_config(dev); */
++}
++
++void psb_modeset_cleanup(struct drm_device *dev)
++{
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35))
++ drm_mode_config_cleanup(dev);
++#else
++ mutex_lock(&dev->struct_mutex);
++
++ drm_kms_helper_poll_fini(dev);
++ psb_fbdev_fini(dev);
++
++ drm_mode_config_cleanup(dev);
++
++ mutex_unlock(&dev->struct_mutex);
++#endif
++}
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_fb.h
+@@ -0,0 +1,66 @@
++/*
++ * Copyright (c) 2008, Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * Eric Anholt <eric@anholt.net>
++ *
++ */
++
++#ifndef _PSB_FB_H_
++#define _PSB_FB_H_
++
++#include <linux/version.h>
++#include <drm/drmP.h>
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
++#include <drm/drm_fb_helper.h>
++#endif
++
++#include "psb_drv.h"
++
++/*IMG Headers*/
++#include "servicesint.h"
++
++struct psb_framebuffer {
++ struct drm_framebuffer base;
++ struct address_space *addr_space;
++ struct ttm_buffer_object *bo;
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
++ struct fb_info * fbdev;
++#endif
++ /* struct ttm_bo_kmap_obj kmap; */
++ PVRSRV_KERNEL_MEM_INFO *pvrBO;
++ IMG_HANDLE hKernelMemInfo;
++ uint32_t size;
++ uint32_t offset;
++};
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
++struct psb_fbdev {
++ struct drm_fb_helper psb_fb_helper;
++ struct psb_framebuffer * pfb;
++};
++#endif
++
++
++#define to_psb_fb(x) container_of(x, struct psb_framebuffer, base)
++
++
++extern int psb_intel_connector_clones(struct drm_device *dev, int type_mask);
++
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_fence.c
+@@ -0,0 +1,191 @@
++/*
++ * Copyright (c) 2007, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ *
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include <drm/drmP.h>
++#include "psb_drv.h"
++#include "psb_msvdx.h"
++#include "pnw_topaz.h"
++#include "lnc_topaz.h"
++
++
++static void psb_fence_poll(struct ttm_fence_device *fdev,
++ uint32_t fence_class, uint32_t waiting_types)
++{
++ struct drm_psb_private *dev_priv =
++ container_of(fdev, struct drm_psb_private, fdev);
++ struct drm_device *dev = dev_priv->dev;
++ uint32_t sequence = 0;
++ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
++
++
++ if (unlikely(!dev_priv))
++ return;
++
++ if (waiting_types == 0)
++ return;
++
++ switch (fence_class) {
++ case PSB_ENGINE_VIDEO:
++ sequence = msvdx_priv->msvdx_current_sequence;
++ break;
++ case LNC_ENGINE_ENCODE:
++ if (IS_MDFLD(dev))
++ sequence = *((uint32_t *)
++ ((struct pnw_topaz_private *)dev_priv->topaz_private)->topaz_sync_addr + 1);
++ else
++ sequence = *((uint32_t *)
++ ((struct topaz_private *)dev_priv->topaz_private)->topaz_sync_addr);
++ break;
++ default:
++ break;
++ }
++
++ /* DRM_ERROR("Polling fence sequence, got 0x%08x\n", sequence); */
++ ttm_fence_handler(fdev, fence_class, sequence,
++ _PSB_FENCE_TYPE_EXE, 0);
++}
++
++void psb_fence_error(struct drm_device *dev,
++ uint32_t fence_class,
++ uint32_t sequence, uint32_t type, int error)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ struct ttm_fence_device *fdev = &dev_priv->fdev;
++ unsigned long irq_flags;
++ struct ttm_fence_class_manager *fc =
++ &fdev->fence_class[fence_class];
++
++ BUG_ON(fence_class >= PSB_NUM_ENGINES);
++ write_lock_irqsave(&fc->lock, irq_flags);
++ ttm_fence_handler(fdev, fence_class, sequence, type, error);
++ write_unlock_irqrestore(&fc->lock, irq_flags);
++}
++
++int psb_fence_emit_sequence(struct ttm_fence_device *fdev,
++ uint32_t fence_class,
++ uint32_t flags, uint32_t *sequence,
++ unsigned long *timeout_jiffies)
++{
++ struct drm_psb_private *dev_priv =
++ container_of(fdev, struct drm_psb_private, fdev);
++ uint32_t seq = 0;
++
++ if (!dev_priv)
++ return -EINVAL;
++
++ if (fence_class >= PSB_NUM_ENGINES)
++ return -EINVAL;
++
++ switch (fence_class) {
++ case PSB_ENGINE_VIDEO:
++ spin_lock(&dev_priv->sequence_lock);
++ seq = dev_priv->sequence[fence_class]++;
++ spin_unlock(&dev_priv->sequence_lock);
++ break;
++ case LNC_ENGINE_ENCODE:
++ spin_lock(&dev_priv->sequence_lock);
++ seq = dev_priv->sequence[fence_class]++;
++ spin_unlock(&dev_priv->sequence_lock);
++ break;
++ default:
++ DRM_ERROR("Unexpected fence class\n");
++ return -EINVAL;
++ }
++
++ *sequence = seq;
++ *timeout_jiffies = jiffies + DRM_HZ * 3;
++
++ return 0;
++}
++
++static void psb_fence_lockup(struct ttm_fence_object *fence,
++ uint32_t fence_types)
++{
++ struct ttm_fence_device *fdev = fence->fdev;
++ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
++ struct drm_psb_private *dev_priv =
++ container_of(fdev, struct drm_psb_private, fdev);
++ struct drm_device *dev = (struct drm_device *)dev_priv->dev;
++
++ if (fence->fence_class == LNC_ENGINE_ENCODE) {
++ DRM_ERROR("TOPAZ timeout (probable lockup) detected, flush queued cmdbuf");
++
++ write_lock(&fc->lock);
++ pnw_topaz_handle_timeout(fence->fdev);
++ ttm_fence_handler(fence->fdev, fence->fence_class,
++ fence->sequence, fence_types, -EBUSY);
++ write_unlock(&fc->lock);
++ } else if (fence->fence_class == PSB_ENGINE_VIDEO) {
++ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
++
++ DRM_ERROR("MVVDX timeout (probable lockup) detected, flush queued cmdbuf");
++
++ psb_msvdx_flush_cmd_queue(dev);
++
++ write_lock(&fc->lock);
++ ttm_fence_handler(fence->fdev, fence->fence_class,
++ fence->sequence, fence_types, -EBUSY);
++ write_unlock(&fc->lock);
++
++ msvdx_priv->msvdx_needs_reset = 1;
++ } else
++ DRM_ERROR("Unsupported fence class\n");
++}
++
++void psb_fence_handler(struct drm_device *dev, uint32_t fence_class)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ struct ttm_fence_device *fdev = &dev_priv->fdev;
++ struct ttm_fence_class_manager *fc =
++ &fdev->fence_class[fence_class];
++ unsigned long irq_flags;
++
++ write_lock_irqsave(&fc->lock, irq_flags);
++ psb_fence_poll(fdev, fence_class, fc->waiting_types);
++ write_unlock_irqrestore(&fc->lock, irq_flags);
++}
++
++
++static struct ttm_fence_driver psb_ttm_fence_driver = {
++ .has_irq = NULL,
++ .emit = psb_fence_emit_sequence,
++ .flush = NULL,
++ .poll = psb_fence_poll,
++ .needed_flush = NULL,
++ .wait = NULL,
++ .signaled = NULL,
++ .lockup = psb_fence_lockup,
++};
++
++int psb_ttm_fence_device_init(struct ttm_fence_device *fdev)
++{
++ struct drm_psb_private *dev_priv =
++ container_of(fdev, struct drm_psb_private, fdev);
++ struct ttm_fence_class_init fci = {.wrap_diff = (1 << 30),
++ .flush_diff = (1 << 29),
++ .sequence_mask = 0xFFFFFFFF
++ };
++
++ return ttm_fence_device_init(PSB_NUM_ENGINES,
++ dev_priv->mem_global_ref.object,
++ fdev, &fci, 1,
++ &psb_ttm_fence_driver);
++}
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_gtt.c
+@@ -0,0 +1,1040 @@
++/*
++ * Copyright (c) 2007, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
++ */
++
++#include <drm/drmP.h>
++#include "psb_drv.h"
++#include "psb_pvr_glue.h"
++
++static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
++{
++ uint32_t mask = PSB_PTE_VALID;
++
++ if (type & PSB_MMU_CACHED_MEMORY)
++ mask |= PSB_PTE_CACHED;
++ if (type & PSB_MMU_RO_MEMORY)
++ mask |= PSB_PTE_RO;
++ if (type & PSB_MMU_WO_MEMORY)
++ mask |= PSB_PTE_WO;
++
++ return (pfn << PAGE_SHIFT) | mask;
++}
++
++struct psb_gtt *psb_gtt_alloc(struct drm_device *dev)
++{
++ struct psb_gtt *tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
++
++ if (!tmp)
++ return NULL;
++
++ init_rwsem(&tmp->sem);
++ tmp->dev = dev;
++
++ return tmp;
++}
++
++void psb_gtt_takedown(struct psb_gtt *pg, int free)
++{
++ struct drm_psb_private *dev_priv = pg->dev->dev_private;
++
++ if (!pg)
++ return;
++
++ if (pg->gtt_map) {
++ iounmap(pg->gtt_map);
++ pg->gtt_map = NULL;
++ }
++ if (pg->initialized) {
++ pci_write_config_word(pg->dev->pdev, PSB_GMCH_CTRL,
++ pg->gmch_ctrl);
++ PSB_WVDC32(pg->pge_ctl, PSB_PGETBL_CTL);
++ (void) PSB_RVDC32(PSB_PGETBL_CTL);
++ }
++ if (free)
++ kfree(pg);
++}
++
++int psb_gtt_init(struct psb_gtt *pg, int resume)
++{
++ struct drm_device *dev = pg->dev;
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ unsigned gtt_pages;
++ unsigned long stolen_size, vram_stolen_size, ci_stolen_size;
++ unsigned long rar_stolen_size;
++ unsigned i, num_pages;
++ unsigned pfn_base;
++ uint32_t ci_pages, vram_pages;
++ uint32_t tt_pages;
++ uint32_t *ttm_gtt_map;
++ uint32_t dvmt_mode = 0;
++
++ int ret = 0;
++ uint32_t pte;
++
++ pci_read_config_word(dev->pdev, PSB_GMCH_CTRL, &pg->gmch_ctrl);
++ pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
++ pg->gmch_ctrl | _PSB_GMCH_ENABLED);
++
++ pg->pge_ctl = PSB_RVDC32(PSB_PGETBL_CTL);
++ PSB_WVDC32(pg->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
++ (void) PSB_RVDC32(PSB_PGETBL_CTL);
++
++ pg->initialized = 1;
++
++ pg->gtt_phys_start = pg->pge_ctl & PAGE_MASK;
++
++ pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE);
++ /* fix me: video mmu has hw bug to access 0x0D0000000,
++ * then make gatt start at 0x0e000,0000 */
++ pg->mmu_gatt_start = 0xE0000000;
++ pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE);
++ gtt_pages =
++ pci_resource_len(dev->pdev, PSB_GTT_RESOURCE) >> PAGE_SHIFT;
++ pg->gatt_pages = pci_resource_len(dev->pdev, PSB_GATT_RESOURCE)
++ >> PAGE_SHIFT;
++
++ pci_read_config_dword(dev->pdev, PSB_BSM, &pg->stolen_base);
++ vram_stolen_size = pg->gtt_phys_start - pg->stolen_base - PAGE_SIZE;
++
++ /* CI is not included in the stolen size since the TOPAZ MMU bug */
++ ci_stolen_size = dev_priv->ci_region_size;
++ /* Don't add CI & RAR share buffer space
++ * managed by TTM to stolen_size */
++ stolen_size = vram_stolen_size;
++
++ rar_stolen_size = dev_priv->rar_region_size;
++
++ printk(KERN_INFO"GMMADR(region 0) start: 0x%08x (%dM).\n",
++ pg->gatt_start, pg->gatt_pages/256);
++ printk(KERN_INFO"GTTADR(region 3) start: 0x%08x (can map %dM RAM), and actual RAM base 0x%08x.\n",
++ pg->gtt_start, gtt_pages * 4, pg->gtt_phys_start);
++ printk(KERN_INFO"Stole memory information \n");
++ printk(KERN_INFO" base in RAM: 0x%x \n", pg->stolen_base);
++ printk(KERN_INFO" size: %luK, calculated by (GTT RAM base) - (Stolen base), seems wrong\n",
++ vram_stolen_size/1024);
++ dvmt_mode = (pg->gmch_ctrl >> 4) & 0x7;
++ printk(KERN_INFO" the correct size should be: %dM(dvmt mode=%d) \n",
++ (dvmt_mode == 1) ? 1 : (2 << (dvmt_mode - 1)), dvmt_mode);
++
++ if (ci_stolen_size > 0)
++ printk(KERN_INFO"CI Stole memory: RAM base = 0x%08x, size = %lu M \n",
++ dev_priv->ci_region_start,
++ ci_stolen_size / 1024 / 1024);
++ if (rar_stolen_size > 0)
++ printk(KERN_INFO"RAR Stole memory: RAM base = 0x%08x, size = %lu M \n",
++ dev_priv->rar_region_start,
++ rar_stolen_size / 1024 / 1024);
++
++ if (resume && (gtt_pages != pg->gtt_pages) &&
++ (stolen_size != pg->stolen_size)) {
++ DRM_ERROR("GTT resume error.\n");
++ ret = -EINVAL;
++ goto out_err;
++ }
++
++ pg->gtt_pages = gtt_pages;
++ pg->stolen_size = stolen_size;
++ pg->vram_stolen_size = vram_stolen_size;
++ pg->ci_stolen_size = ci_stolen_size;
++ pg->rar_stolen_size = rar_stolen_size;
++ pg->gtt_map =
++ ioremap_nocache(pg->gtt_phys_start, gtt_pages << PAGE_SHIFT);
++ if (!pg->gtt_map) {
++ DRM_ERROR("Failure to map gtt.\n");
++ ret = -ENOMEM;
++ goto out_err;
++ }
++
++ pg->vram_addr = ioremap_wc(pg->stolen_base, stolen_size);
++ if (!pg->vram_addr) {
++ DRM_ERROR("Failure to map stolen base.\n");
++ ret = -ENOMEM;
++ goto out_err;
++ }
++
++ DRM_DEBUG("%s: vram kernel virtual address %p\n", pg->vram_addr);
++
++ tt_pages = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ?
++ (pg->gatt_pages) : PSB_TT_PRIV0_PLIMIT;
++
++ ttm_gtt_map = pg->gtt_map + tt_pages / 2;
++
++ /*
++ * insert vram stolen pages.
++ */
++
++ pfn_base = pg->stolen_base >> PAGE_SHIFT;
++ vram_pages = num_pages = vram_stolen_size >> PAGE_SHIFT;
++ printk(KERN_INFO"Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n",
++ num_pages, pfn_base, 0);
++ for (i = 0; i < num_pages; ++i) {
++ pte = psb_gtt_mask_pte(pfn_base + i, 0);
++ iowrite32(pte, pg->gtt_map + i);
++ }
++
++ /*
++ * Init rest of gtt managed by IMG.
++ */
++ pfn_base = page_to_pfn(dev_priv->scratch_page);
++ pte = psb_gtt_mask_pte(pfn_base, 0);
++ for (; i < tt_pages / 2 - 1; ++i)
++ iowrite32(pte, pg->gtt_map + i);
++
++ /*
++ * insert CI stolen pages
++ */
++
++ pfn_base = dev_priv->ci_region_start >> PAGE_SHIFT;
++ ci_pages = num_pages = ci_stolen_size >> PAGE_SHIFT;
++ printk(KERN_INFO"Set up %d CI stolen pages starting at 0x%08x, GTT offset %dK\n",
++ num_pages, pfn_base, (ttm_gtt_map - pg->gtt_map) * 4);
++ for (i = 0; i < num_pages; ++i) {
++ pte = psb_gtt_mask_pte(pfn_base + i, 0);
++ iowrite32(pte, ttm_gtt_map + i);
++ }
++
++ /*
++ * insert RAR stolen pages
++ */
++ if (rar_stolen_size != 0) {
++ pfn_base = dev_priv->rar_region_start >> PAGE_SHIFT;
++ num_pages = rar_stolen_size >> PAGE_SHIFT;
++ printk(KERN_INFO"Set up %d RAR stolen pages starting at 0x%08x, GTT offset %dK\n",
++ num_pages, pfn_base,
++ (ttm_gtt_map - pg->gtt_map + i) * 4);
++ for (; i < num_pages + ci_pages; ++i) {
++ pte = psb_gtt_mask_pte(pfn_base + i - ci_pages, 0);
++ iowrite32(pte, ttm_gtt_map + i);
++ }
++ }
++ /*
++ * Init rest of gtt managed by TTM.
++ */
++
++ pfn_base = page_to_pfn(dev_priv->scratch_page);
++ pte = psb_gtt_mask_pte(pfn_base, 0);
++ PSB_DEBUG_INIT("Initializing the rest of a total "
++ "of %d gtt pages.\n", pg->gatt_pages);
++
++ for (; i < pg->gatt_pages - tt_pages / 2; ++i)
++ iowrite32(pte, ttm_gtt_map + i);
++ (void) ioread32(pg->gtt_map + i - 1);
++
++ return 0;
++
++out_err:
++ psb_gtt_takedown(pg, 0);
++ return ret;
++}
++
++int psb_gtt_insert_pages(struct psb_gtt *pg, struct page **pages,
++ unsigned offset_pages, unsigned num_pages,
++ unsigned desired_tile_stride,
++ unsigned hw_tile_stride, int type)
++{
++ unsigned rows = 1;
++ unsigned add;
++ unsigned row_add;
++ unsigned i;
++ unsigned j;
++ uint32_t *cur_page = NULL;
++ uint32_t pte;
++
++ if (hw_tile_stride)
++ rows = num_pages / desired_tile_stride;
++ else
++ desired_tile_stride = num_pages;
++
++ add = desired_tile_stride;
++ row_add = hw_tile_stride;
++
++ down_read(&pg->sem);
++ for (i = 0; i < rows; ++i) {
++ cur_page = pg->gtt_map + offset_pages;
++ for (j = 0; j < desired_tile_stride; ++j) {
++ pte =
++ psb_gtt_mask_pte(page_to_pfn(*pages++), type);
++ iowrite32(pte, cur_page++);
++ }
++ offset_pages += add;
++ }
++ (void) ioread32(cur_page - 1);
++ up_read(&pg->sem);
++
++ return 0;
++}
++
++int psb_gtt_insert_phys_addresses(struct psb_gtt *pg, IMG_CPU_PHYADDR *pPhysFrames,
++ unsigned offset_pages, unsigned num_pages, int type)
++{
++ unsigned j;
++ uint32_t *cur_page = NULL;
++ uint32_t pte;
++
++ //printk("Allocatng IMG GTT mem at %x (pages %d)\n",offset_pages,num_pages);
++ down_read(&pg->sem);
++
++ cur_page = pg->gtt_map + offset_pages;
++ for (j = 0; j < num_pages; ++j)
++ {
++ pte = psb_gtt_mask_pte( (pPhysFrames++)->uiAddr >> PAGE_SHIFT, type);
++ iowrite32(pte, cur_page++);
++ //printk("PTE %d: %x/%x\n",j,(pPhysFrames-1)->uiAddr,pte);
++ }
++ (void) ioread32(cur_page - 1);
++
++ up_read(&pg->sem);
++
++ return 0;
++}
++
++int psb_gtt_remove_pages(struct psb_gtt *pg, unsigned offset_pages,
++ unsigned num_pages, unsigned desired_tile_stride,
++ unsigned hw_tile_stride)
++{
++ struct drm_psb_private *dev_priv = pg->dev->dev_private;
++ unsigned rows = 1;
++ unsigned add;
++ unsigned row_add;
++ unsigned i;
++ unsigned j;
++ uint32_t *cur_page = NULL;
++ unsigned pfn_base = page_to_pfn(dev_priv->scratch_page);
++ uint32_t pte = psb_gtt_mask_pte(pfn_base, 0);
++
++ if (hw_tile_stride)
++ rows = num_pages / desired_tile_stride;
++ else
++ desired_tile_stride = num_pages;
++
++ add = desired_tile_stride;
++ row_add = hw_tile_stride;
++
++ down_read(&pg->sem);
++ for (i = 0; i < rows; ++i) {
++ cur_page = pg->gtt_map + offset_pages;
++ for (j = 0; j < desired_tile_stride; ++j)
++ iowrite32(pte, cur_page++);
++
++ offset_pages += add;
++ }
++ (void) ioread32(cur_page - 1);
++ up_read(&pg->sem);
++
++ return 0;
++}
++
++int psb_gtt_mm_init(struct psb_gtt *pg)
++{
++ struct psb_gtt_mm *gtt_mm;
++ struct drm_psb_private *dev_priv = pg->dev->dev_private;
++ struct drm_open_hash *ht;
++ struct drm_mm *mm;
++ int ret;
++ uint32_t tt_start;
++ uint32_t tt_size;
++
++ if (!pg || !pg->initialized) {
++ DRM_DEBUG("Invalid gtt struct\n");
++ return -EINVAL;
++ }
++
++ gtt_mm = kzalloc(sizeof(struct psb_gtt_mm), GFP_KERNEL);
++ if (!gtt_mm)
++ return -ENOMEM;
++
++ spin_lock_init(&gtt_mm->lock);
++
++ ht = &gtt_mm->hash;
++ ret = drm_ht_create(ht, 20);
++ if (ret) {
++ DRM_DEBUG("Create hash table failed(%d)\n", ret);
++ goto err_free;
++ }
++
++ tt_start = (pg->stolen_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ tt_start = (tt_start < pg->gatt_pages) ? tt_start : pg->gatt_pages;
++ tt_size = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ?
++ (pg->gatt_pages) : PSB_TT_PRIV0_PLIMIT;
++
++ mm = &gtt_mm->base;
++
++ /*will use tt_start ~ 128M for IMG TT buffers*/
++ ret = drm_mm_init(mm, tt_start, ((tt_size / 2) - tt_start));
++ if (ret) {
++ DRM_DEBUG("drm_mm_int error(%d)\n", ret);
++ goto err_mm_init;
++ }
++
++ gtt_mm->count = 0;
++
++ dev_priv->gtt_mm = gtt_mm;
++
++ DRM_INFO("PSB GTT mem manager ready, tt_start %ld, tt_size %ld pages\n",
++ (unsigned long)tt_start,
++ (unsigned long)((tt_size / 2) - tt_start));
++ return 0;
++err_mm_init:
++ drm_ht_remove(ht);
++
++err_free:
++ kfree(gtt_mm);
++ return ret;
++}
++
++/**
++ * Delete all hash entries;
++ */
++void psb_gtt_mm_takedown(void)
++{
++ return;
++}
++
++static int psb_gtt_mm_get_ht_by_pid_locked(struct psb_gtt_mm *mm,
++ u32 tgid,
++ struct psb_gtt_hash_entry **hentry)
++{
++ struct drm_hash_item *entry;
++ struct psb_gtt_hash_entry *psb_entry;
++ int ret;
++
++ ret = drm_ht_find_item(&mm->hash, tgid, &entry);
++ if (ret) {
++ DRM_DEBUG("Cannot find entry pid=%ld\n", tgid);
++ return ret;
++ }
++
++ psb_entry = container_of(entry, struct psb_gtt_hash_entry, item);
++ if (!psb_entry) {
++ DRM_DEBUG("Invalid entry");
++ return -EINVAL;
++ }
++
++ *hentry = psb_entry;
++ return 0;
++}
++
++
++static int psb_gtt_mm_insert_ht_locked(struct psb_gtt_mm *mm,
++ u32 tgid,
++ struct psb_gtt_hash_entry *hentry)
++{
++ struct drm_hash_item *item;
++ int ret;
++
++ if (!hentry) {
++ DRM_DEBUG("Invalid parameters\n");
++ return -EINVAL;
++ }
++
++ item = &hentry->item;
++ item->key = tgid;
++
++ /**
++ * NOTE: drm_ht_insert_item will perform such a check
++ ret = psb_gtt_mm_get_ht_by_pid(mm, tgid, &tmp);
++ if (!ret) {
++ DRM_DEBUG("Entry already exists for pid %ld\n", tgid);
++ return -EAGAIN;
++ }
++ */
++
++ /*Insert the given entry*/
++ ret = drm_ht_insert_item(&mm->hash, item);
++ if (ret) {
++ DRM_DEBUG("Insert failure\n");
++ return ret;
++ }
++
++ mm->count++;
++
++ return 0;
++}
++
++static int psb_gtt_mm_alloc_insert_ht(struct psb_gtt_mm *mm,
++ u32 tgid,
++ struct psb_gtt_hash_entry **entry)
++{
++ struct psb_gtt_hash_entry *hentry;
++ int ret;
++
++ /*if the hentry for this tgid exists, just get it and return*/
++ spin_lock(&mm->lock);
++ ret = psb_gtt_mm_get_ht_by_pid_locked(mm, tgid, &hentry);
++ if (!ret) {
++ DRM_DEBUG("Entry for tgid %ld exist, hentry %p\n",
++ tgid, hentry);
++ *entry = hentry;
++ spin_unlock(&mm->lock);
++ return 0;
++ }
++ spin_unlock(&mm->lock);
++
++ DRM_DEBUG("Entry for tgid %ld doesn't exist, will create it\n", tgid);
++
++ hentry = kzalloc(sizeof(struct psb_gtt_hash_entry), GFP_KERNEL);
++ if (!hentry) {
++ DRM_DEBUG("Kmalloc failled\n");
++ return -ENOMEM;
++ }
++
++ ret = drm_ht_create(&hentry->ht, 20);
++ if (ret) {
++ DRM_DEBUG("Create hash table failed\n");
++ return ret;
++ }
++
++ spin_lock(&mm->lock);
++ ret = psb_gtt_mm_insert_ht_locked(mm, tgid, hentry);
++ spin_unlock(&mm->lock);
++
++ if (!ret)
++ *entry = hentry;
++
++ return ret;
++}
++
++static struct psb_gtt_hash_entry *
++psb_gtt_mm_remove_ht_locked(struct psb_gtt_mm *mm, u32 tgid)
++{
++ struct psb_gtt_hash_entry *tmp;
++ int ret;
++
++ ret = psb_gtt_mm_get_ht_by_pid_locked(mm, tgid, &tmp);
++ if (ret) {
++ DRM_DEBUG("Cannot find entry pid %ld\n", tgid);
++ return NULL;
++ }
++
++ /*remove it from ht*/
++ drm_ht_remove_item(&mm->hash, &tmp->item);
++
++ mm->count--;
++
++ return tmp;
++}
++
++static int psb_gtt_mm_remove_free_ht_locked(struct psb_gtt_mm *mm, u32 tgid)
++{
++ struct psb_gtt_hash_entry *entry;
++
++ entry = psb_gtt_mm_remove_ht_locked(mm, tgid);
++
++ if (!entry) {
++ DRM_DEBUG("Invalid entry");
++ return -EINVAL;
++ }
++
++ /*delete ht*/
++ drm_ht_remove(&entry->ht);
++
++ /*free this entry*/
++ kfree(entry);
++ return 0;
++}
++
++static int
++psb_gtt_mm_get_mem_mapping_locked(struct drm_open_hash *ht,
++ u32 key,
++ struct psb_gtt_mem_mapping **hentry)
++{
++ struct drm_hash_item *entry;
++ struct psb_gtt_mem_mapping *mapping;
++ int ret;
++
++ ret = drm_ht_find_item(ht, key, &entry);
++ if (ret) {
++ DRM_DEBUG("Cannot find key %ld\n", key);
++ return ret;
++ }
++
++ mapping = container_of(entry, struct psb_gtt_mem_mapping, item);
++ if (!mapping) {
++ DRM_DEBUG("Invalid entry\n");
++ return -EINVAL;
++ }
++
++ *hentry = mapping;
++ return 0;
++}
++
++static int
++psb_gtt_mm_insert_mem_mapping_locked(struct drm_open_hash *ht,
++ u32 key,
++ struct psb_gtt_mem_mapping *hentry)
++{
++ struct drm_hash_item *item;
++ struct psb_gtt_hash_entry *entry;
++ int ret;
++
++ if (!hentry) {
++ DRM_DEBUG("hentry is NULL\n");
++ return -EINVAL;
++ }
++
++ item = &hentry->item;
++ item->key = key;
++
++ ret = drm_ht_insert_item(ht, item);
++ if (ret) {
++ DRM_DEBUG("insert_item failed\n");
++ return ret;
++ }
++
++ entry = container_of(ht, struct psb_gtt_hash_entry, ht);
++ if (entry)
++ entry->count++;
++
++ return 0;
++}
++
++static int
++psb_gtt_mm_alloc_insert_mem_mapping(struct psb_gtt_mm *mm,
++ struct drm_open_hash *ht,
++ u32 key,
++ struct drm_mm_node *node,
++ struct psb_gtt_mem_mapping **entry)
++{
++ struct psb_gtt_mem_mapping *mapping;
++ int ret;
++
++ if (!node || !ht) {
++ DRM_DEBUG("parameter error\n");
++ return -EINVAL;
++ }
++
++ /*try to get this mem_map */
++ spin_lock(&mm->lock);
++ ret = psb_gtt_mm_get_mem_mapping_locked(ht, key, &mapping);
++ if (!ret) {
++ DRM_DEBUG("mapping entry for key %ld exists, entry %p\n",
++ key, mapping);
++ *entry = mapping;
++ spin_unlock(&mm->lock);
++ return 0;
++ }
++ spin_unlock(&mm->lock);
++
++ DRM_DEBUG("Mapping entry for key %ld doesn't exist, will create it\n",
++ key);
++
++ mapping = kzalloc(sizeof(struct psb_gtt_mem_mapping), GFP_KERNEL);
++ if (!mapping) {
++ DRM_DEBUG("kmalloc failed\n");
++ return -ENOMEM;
++ }
++
++ mapping->node = node;
++
++ spin_lock(&mm->lock);
++ ret = psb_gtt_mm_insert_mem_mapping_locked(ht, key, mapping);
++ spin_unlock(&mm->lock);
++
++ if (!ret)
++ *entry = mapping;
++
++ return ret;
++}
++
++static struct psb_gtt_mem_mapping *
++psb_gtt_mm_remove_mem_mapping_locked(struct drm_open_hash *ht, u32 key)
++{
++ struct psb_gtt_mem_mapping *tmp;
++ struct psb_gtt_hash_entry *entry;
++ int ret;
++
++ ret = psb_gtt_mm_get_mem_mapping_locked(ht, key, &tmp);
++ if (ret) {
++ DRM_DEBUG("Cannot find key %ld\n", key);
++ return NULL;
++ }
++
++ drm_ht_remove_item(ht, &tmp->item);
++
++ entry = container_of(ht, struct psb_gtt_hash_entry, ht);
++ if (entry)
++ entry->count--;
++
++ return tmp;
++}
++
++static int psb_gtt_mm_remove_free_mem_mapping_locked(struct drm_open_hash *ht,
++ u32 key,
++ struct drm_mm_node **node)
++{
++ struct psb_gtt_mem_mapping *entry;
++
++ entry = psb_gtt_mm_remove_mem_mapping_locked(ht, key);
++ if (!entry) {
++ DRM_DEBUG("entry is NULL\n");
++ return -EINVAL;
++ }
++
++ *node = entry->node;
++
++ kfree(entry);
++ return 0;
++}
++
++static int psb_gtt_add_node(struct psb_gtt_mm *mm,
++ u32 tgid,
++ u32 key,
++ struct drm_mm_node *node,
++ struct psb_gtt_mem_mapping **entry)
++{
++ struct psb_gtt_hash_entry *hentry;
++ struct psb_gtt_mem_mapping *mapping;
++ int ret;
++
++ ret = psb_gtt_mm_alloc_insert_ht(mm, tgid, &hentry);
++ if (ret) {
++ DRM_DEBUG("alloc_insert failed\n");
++ return ret;
++ }
++
++ ret = psb_gtt_mm_alloc_insert_mem_mapping(mm,
++ &hentry->ht,
++ key,
++ node,
++ &mapping);
++ if (ret) {
++ DRM_DEBUG("mapping alloc_insert failed\n");
++ return ret;
++ }
++
++ *entry = mapping;
++
++ return 0;
++}
++
++static int psb_gtt_remove_node(struct psb_gtt_mm *mm,
++ u32 tgid,
++ u32 key,
++ struct drm_mm_node **node)
++{
++ struct psb_gtt_hash_entry *hentry;
++ struct drm_mm_node *tmp;
++ int ret;
++
++ spin_lock(&mm->lock);
++ ret = psb_gtt_mm_get_ht_by_pid_locked(mm, tgid, &hentry);
++ if (ret) {
++ DRM_DEBUG("Cannot find entry for pid %ld\n", tgid);
++ spin_unlock(&mm->lock);
++ return ret;
++ }
++ spin_unlock(&mm->lock);
++
++ /*remove mapping entry*/
++ spin_lock(&mm->lock);
++ ret = psb_gtt_mm_remove_free_mem_mapping_locked(&hentry->ht,
++ key,
++ &tmp);
++ if (ret) {
++ DRM_DEBUG("remove_free failed\n");
++ spin_unlock(&mm->lock);
++ return ret;
++ }
++
++ *node = tmp;
++
++ /*check the count of mapping entry*/
++ if (!hentry->count) {
++ DRM_DEBUG("count of mapping entry is zero, tgid=%ld\n", tgid);
++ psb_gtt_mm_remove_free_ht_locked(mm, tgid);
++ }
++
++ spin_unlock(&mm->lock);
++
++ return 0;
++}
++
++static int psb_gtt_mm_alloc_mem(struct psb_gtt_mm *mm,
++ uint32_t pages,
++ uint32_t align,
++ struct drm_mm_node **node)
++{
++ struct drm_mm_node *tmp_node;
++ int ret;
++
++ do {
++ ret = drm_mm_pre_get(&mm->base);
++ if (unlikely(ret)) {
++ DRM_DEBUG("drm_mm_pre_get error\n");
++ return ret;
++ }
++
++ spin_lock(&mm->lock);
++ tmp_node = drm_mm_search_free(&mm->base, pages, align, 1);
++ if (unlikely(!tmp_node)) {
++ DRM_DEBUG("No free node found\n");
++ spin_unlock(&mm->lock);
++ break;
++ }
++
++ tmp_node = drm_mm_get_block_atomic(tmp_node, pages, align);
++ spin_unlock(&mm->lock);
++ } while (!tmp_node);
++
++ if (!tmp_node) {
++ DRM_DEBUG("Node allocation failed\n");
++ return -ENOMEM;
++ }
++
++ *node = tmp_node;
++ return 0;
++}
++
++static void psb_gtt_mm_free_mem(struct psb_gtt_mm *mm, struct drm_mm_node *node)
++{
++ spin_lock(&mm->lock);
++ drm_mm_put_block(node);
++ spin_unlock(&mm->lock);
++}
++
++int psb_gtt_map_meminfo(struct drm_device *dev,
++ IMG_HANDLE hKernelMemInfo,
++ uint32_t *offset)
++{
++ struct drm_psb_private *dev_priv
++ = (struct drm_psb_private *)dev->dev_private;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ struct psb_gtt_mm *mm = dev_priv->gtt_mm;
++ struct psb_gtt *pg = dev_priv->pg;
++ uint32_t size, pages, offset_pages;
++ void *kmem;
++ struct drm_mm_node *node;
++ struct page **page_list;
++ struct psb_gtt_mem_mapping *mapping = NULL;
++ int ret;
++
++ ret = psb_get_meminfo_by_handle(hKernelMemInfo, &psKernelMemInfo);
++ if (ret) {
++ DRM_DEBUG("Cannot find kernelMemInfo handle %ld\n",
++ hKernelMemInfo);
++ return -EINVAL;
++ }
++
++ DRM_DEBUG("Got psKernelMemInfo %p for handle %lx\n",
++ psKernelMemInfo, (u32)hKernelMemInfo);
++
++ size = psKernelMemInfo->ui32AllocSize;
++ kmem = psKernelMemInfo->pvLinAddrKM;
++ pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
++
++ DRM_DEBUG("KerMemInfo size %ld, cpuVadr %lx, pages %ld, osMemHdl %lx\n",
++ size, kmem, pages, psKernelMemInfo->sMemBlk.hOSMemHandle);
++
++ if (!kmem)
++ DRM_DEBUG("kmem is NULL");
++
++ /*get pages*/
++ ret = psb_get_pages_by_mem_handle(psKernelMemInfo->sMemBlk.hOSMemHandle,
++ &page_list);
++ if (ret) {
++ DRM_DEBUG("get pages error\n");
++ return ret;
++ }
++
++ DRM_DEBUG("get %ld pages\n", pages);
++
++ /*alloc memory in TT apeture*/
++ ret = psb_gtt_mm_alloc_mem(mm, pages, 0, &node);
++ if (ret) {
++ DRM_DEBUG("alloc TT memory error\n");
++ goto failed_pages_alloc;
++ }
++
++ /*update psb_gtt_mm*/
++ ret = psb_gtt_add_node(mm,
++ (u32)psb_get_tgid(),
++ (u32)hKernelMemInfo,
++ node,
++ &mapping);
++ if (ret) {
++ DRM_DEBUG("add_node failed");
++ goto failed_add_node;
++ }
++
++ node = mapping->node;
++ offset_pages = node->start;
++
++ DRM_DEBUG("get free node for %ld pages, offset %ld pages",
++ pages, offset_pages);
++
++ /*update gtt*/
++ psb_gtt_insert_pages(pg, page_list,
++ (unsigned)offset_pages,
++ (unsigned)pages,
++ 0,
++ 0,
++ 0);
++
++ *offset = offset_pages;
++ return 0;
++
++failed_add_node:
++ psb_gtt_mm_free_mem(mm, node);
++failed_pages_alloc:
++ kfree(page_list);
++ return ret;
++}
++
++int psb_gtt_unmap_meminfo(struct drm_device *dev, IMG_HANDLE hKernelMemInfo)
++{
++ struct drm_psb_private *dev_priv
++ = (struct drm_psb_private *)dev->dev_private;
++ struct psb_gtt_mm *mm = dev_priv->gtt_mm;
++ struct psb_gtt *pg = dev_priv->pg;
++ uint32_t pages, offset_pages;
++ struct drm_mm_node *node;
++ int ret;
++
++ ret = psb_gtt_remove_node(mm,
++ (u32)psb_get_tgid(),
++ (u32)hKernelMemInfo,
++ &node);
++ if (ret) {
++ DRM_DEBUG("remove node failed\n");
++ return ret;
++ }
++
++ /*remove gtt entries*/
++ offset_pages = node->start;
++ pages = node->size;
++
++ psb_gtt_remove_pages(pg, offset_pages, pages, 0, 0);
++
++
++ /*free tt node*/
++
++ psb_gtt_mm_free_mem(mm, node);
++ return 0;
++}
++
++int psb_gtt_map_meminfo_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct psb_gtt_mapping_arg *arg
++ = (struct psb_gtt_mapping_arg *)data;
++ uint32_t *offset_pages = &arg->offset_pages;
++
++ DRM_DEBUG("\n");
++
++ return psb_gtt_map_meminfo(dev, arg->hKernelMemInfo, offset_pages);
++}
++
++int psb_gtt_unmap_meminfo_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++
++ struct psb_gtt_mapping_arg *arg
++ = (struct psb_gtt_mapping_arg *)data;
++
++ DRM_DEBUG("\n");
++
++ return psb_gtt_unmap_meminfo(dev, arg->hKernelMemInfo);
++}
++
++int psb_gtt_map_pvr_memory(struct drm_device *dev,
++ unsigned int hHandle,
++ unsigned int ui32TaskId,
++ IMG_CPU_PHYADDR *pPages,
++ unsigned int ui32PagesNum,
++ unsigned int *ui32Offset)
++{
++ struct drm_psb_private * dev_priv = (struct drm_psb_private *)dev->dev_private;
++ struct psb_gtt_mm * mm = dev_priv->gtt_mm;
++ struct psb_gtt * pg = dev_priv->pg;
++
++ uint32_t size, pages, offset_pages;
++ struct drm_mm_node * node = NULL;
++ struct psb_gtt_mem_mapping * mapping = NULL;
++ int ret;
++
++ size = ui32PagesNum * PAGE_SIZE;
++ pages = 0;
++
++ /*alloc memory in TT apeture*/
++ ret = psb_gtt_mm_alloc_mem(mm, ui32PagesNum, 0, &node);
++ if(ret)
++ {
++ DRM_DEBUG("alloc TT memory error\n");
++ goto failed_pages_alloc;
++ }
++
++ /*update psb_gtt_mm*/
++ ret = psb_gtt_add_node(mm,
++ (u32)ui32TaskId,
++ (u32)hHandle,
++ node,
++ &mapping);
++ if(ret)
++ {
++ DRM_DEBUG("add_node failed");
++ goto failed_add_node;
++ }
++
++ node = mapping->node;
++ offset_pages = node->start;
++
++ DRM_DEBUG("get free node for %ld pages, offset %ld pages", pages, offset_pages);
++
++ /*update gtt*/
++ psb_gtt_insert_phys_addresses( pg, pPages, (unsigned)offset_pages, (unsigned)ui32PagesNum, 0 );
++
++ *ui32Offset = offset_pages;
++ return 0;
++
++failed_add_node:
++ psb_gtt_mm_free_mem(mm, node);
++failed_pages_alloc:
++ return ret;
++}
++
++
++int psb_gtt_unmap_pvr_memory(struct drm_device *dev, unsigned int hHandle, unsigned int ui32TaskId)
++{
++ struct drm_psb_private * dev_priv = (struct drm_psb_private *)dev->dev_private;
++ struct psb_gtt_mm * mm = dev_priv->gtt_mm;
++ struct psb_gtt * pg = dev_priv->pg;
++ uint32_t pages, offset_pages;
++ struct drm_mm_node * node;
++ int ret;
++
++ ret = psb_gtt_remove_node(mm,
++ (u32)ui32TaskId,
++ (u32)hHandle,
++ &node);
++ if(ret)
++ {
++ printk("remove node failed\n");
++ return ret;
++ }
++
++ /*remove gtt entries*/
++ offset_pages = node->start;
++ pages = node->size;
++
++ psb_gtt_remove_pages(pg, offset_pages, pages, 0, 0);
++
++ /*free tt node*/
++ psb_gtt_mm_free_mem(mm, node);
++ return 0;
++}
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_gtt.h
+@@ -0,0 +1,111 @@
++/**************************************************************************
++ * Copyright (c) 2007-2008, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++#ifndef _PSB_GTT_H_
++#define _PSB_GTT_H_
++
++#include <drm/drmP.h>
++
++#include "img_types.h"
++
++struct psb_gtt {
++ struct drm_device *dev;
++ int initialized;
++ uint32_t gatt_start;
++ uint32_t mmu_gatt_start;
++ uint32_t ci_start;
++ uint32_t rar_start;
++ uint32_t gtt_start;
++ uint32_t gtt_phys_start;
++ unsigned gtt_pages;
++ unsigned gatt_pages;
++ uint32_t stolen_base;
++ void *vram_addr;
++ uint32_t pge_ctl;
++ u16 gmch_ctrl;
++ unsigned long stolen_size;
++ unsigned long vram_stolen_size;
++ unsigned long ci_stolen_size;
++ unsigned long rar_stolen_size;
++ uint32_t *gtt_map;
++ struct rw_semaphore sem;
++};
++
++struct psb_gtt_mm {
++ struct drm_mm base;
++ struct drm_open_hash hash;
++ uint32_t count;
++ spinlock_t lock;
++};
++
++struct psb_gtt_hash_entry {
++ struct drm_open_hash ht;
++ uint32_t count;
++ struct drm_hash_item item;
++};
++
++struct psb_gtt_mem_mapping {
++ struct drm_mm_node *node;
++ struct drm_hash_item item;
++};
++
++#if 0
++/*Ioctl args*/
++struct psb_gtt_mapping_arg {
++ IMG_HANDLE hKernelMemInfo;
++};
++#endif
++
++/*Exported functions*/
++extern int psb_gtt_init(struct psb_gtt *pg, int resume);
++extern int psb_gtt_insert_pages(struct psb_gtt *pg, struct page **pages,
++ unsigned offset_pages, unsigned num_pages,
++ unsigned desired_tile_stride,
++ unsigned hw_tile_stride, int type);
++extern int psb_gtt_remove_pages(struct psb_gtt *pg, unsigned offset_pages,
++ unsigned num_pages,
++ unsigned desired_tile_stride,
++ unsigned hw_tile_stride);
++
++extern struct psb_gtt *psb_gtt_alloc(struct drm_device *dev);
++extern void psb_gtt_takedown(struct psb_gtt *pg, int free);
++extern int psb_gtt_map_meminfo(struct drm_device *dev,
++ IMG_HANDLE hKernelMemInfo,
++ uint32_t *offset);
++extern int psb_gtt_unmap_meminfo(struct drm_device *dev,
++ IMG_HANDLE hKernelMemInfo);
++extern int psb_gtt_map_meminfo_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psb_gtt_unmap_meminfo_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psb_gtt_mm_init(struct psb_gtt *pg);
++extern void psb_gtt_mm_takedown(void);
++
++extern int psb_gtt_map_pvr_memory(struct drm_device *dev,
++ unsigned int hHandle,
++ unsigned int ui32TaskId,
++ IMG_CPU_PHYADDR *pPages,
++ unsigned int ui32PagesNum,
++ unsigned int *ui32Offset);
++
++extern int psb_gtt_unmap_pvr_memory(struct drm_device *dev,
++ unsigned int hHandle,
++ unsigned int ui32TaskId);
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_hotplug.c
+@@ -0,0 +1,425 @@
++/*
++ * Copyright © 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * James C. Gualario <james.c.gualario@intel.com>
++ *
++ */
++
++#include "psb_umevents.h"
++#include "psb_hotplug.h"
++/**
++ * inform the kernel of the work to be performed and related function.
++ *
++ */
++DECLARE_WORK(hotplug_dev_create_work, &psb_hotplug_dev_create_wq);
++DECLARE_WORK(hotplug_dev_remove_work, &psb_hotplug_dev_remove_wq);
++DECLARE_WORK(hotplug_dev_change_work, &psb_hotplug_dev_change_wq);
++/**
++ * psb_hotplug_notify_change_um - notify user mode of hotplug changes
++ *
++ * @name: name of event to notify user mode of change to
++ * @state: hotplug state to search for event object in
++ *
++ */
++int psb_hotplug_notify_change_um(const char *name,
++ struct hotplug_state *state)
++{
++ strcpy(&(state->hotplug_change_wq_data.dev_name_arry
++ [state->hotplug_change_wq_data.dev_name_write][0]), name);
++ state->hotplug_change_wq_data.dev_name_arry_rw_status
++ [state->hotplug_change_wq_data.dev_name_write] =
++ DRM_HOTPLUG_READY_TO_READ;
++ if (state->hotplug_change_wq_data.dev_name_read_write_wrap_ack == 1)
++ state->hotplug_change_wq_data.dev_name_read_write_wrap_ack = 0;
++ state->hotplug_change_wq_data.dev_name_write++;
++ if (state->hotplug_change_wq_data.dev_name_write ==
++ state->hotplug_change_wq_data.dev_name_read) {
++ state->hotplug_change_wq_data.dev_name_write--;
++ return IRQ_NONE;
++ }
++ if (state->hotplug_change_wq_data.dev_name_write >
++ DRM_HOTPLUG_RING_DEPTH_MAX) {
++ state->hotplug_change_wq_data.dev_name_write = 0;
++ state->hotplug_change_wq_data.dev_name_write_wrap = 1;
++ }
++ state->hotplug_change_wq_data.hotplug_dev_list = state->list;
++ queue_work(state->hotplug_wq, &(state->hotplug_change_wq_data.work));
++ return IRQ_HANDLED;
++}
++/**
++ *
++ * psb_hotplug_create_and_notify_um - create and notify user mode of new dev
++ *
++ * @name: name to give for new event / device
++ * @state: hotplug state to track new event /device in
++ *
++ */
++int psb_hotplug_create_and_notify_um(const char *name,
++ struct hotplug_state *state)
++{
++ strcpy(&(state->hotplug_create_wq_data.dev_name_arry
++ [state->hotplug_create_wq_data.dev_name_write][0]), name);
++ state->hotplug_create_wq_data.dev_name_arry_rw_status
++ [state->hotplug_create_wq_data.dev_name_write] =
++ DRM_HOTPLUG_READY_TO_READ;
++ if (state->hotplug_create_wq_data.dev_name_read_write_wrap_ack == 1)
++ state->hotplug_create_wq_data.dev_name_read_write_wrap_ack = 0;
++ state->hotplug_create_wq_data.dev_name_write++;
++ if (state->hotplug_create_wq_data.dev_name_write ==
++ state->hotplug_create_wq_data.dev_name_read) {
++ state->hotplug_create_wq_data.dev_name_write--;
++ return IRQ_NONE;
++ }
++ if (state->hotplug_create_wq_data.dev_name_write >
++ DRM_HOTPLUG_RING_DEPTH_MAX) {
++ state->hotplug_create_wq_data.dev_name_write = 0;
++ state->hotplug_create_wq_data.dev_name_write_wrap = 1;
++ }
++ state->hotplug_create_wq_data.hotplug_dev_list = state->list;
++ queue_work(state->hotplug_wq, &(state->hotplug_create_wq_data.work));
++ return IRQ_HANDLED;
++}
++/*EXPORT_SYMBOL(psb_hotplug_create_and_notify_um); */
++/**
++ * psb_hotplug_remove_and_notify_um - remove device and notify user mode
++ *
++ * @name: name of event / device to remove
++ * @state: hotplug state to remove event / device from
++ *
++ */
++int psb_hotplug_remove_and_notify_um(const char *name,
++ struct hotplug_state *state)
++{
++ strcpy(&(state->hotplug_remove_wq_data.dev_name_arry
++ [state->hotplug_remove_wq_data.dev_name_write][0]), name);
++ state->hotplug_remove_wq_data.dev_name_arry_rw_status
++ [state->hotplug_remove_wq_data.dev_name_write] =
++ DRM_HOTPLUG_READY_TO_READ;
++ if (state->hotplug_remove_wq_data.dev_name_read_write_wrap_ack == 1)
++ state->hotplug_remove_wq_data.dev_name_read_write_wrap_ack = 0;
++ state->hotplug_remove_wq_data.dev_name_write++;
++ if (state->hotplug_remove_wq_data.dev_name_write ==
++ state->hotplug_remove_wq_data.dev_name_read) {
++ state->hotplug_remove_wq_data.dev_name_write--;
++ return IRQ_NONE;
++ }
++ if (state->hotplug_remove_wq_data.dev_name_write >
++ DRM_HOTPLUG_RING_DEPTH_MAX) {
++ state->hotplug_remove_wq_data.dev_name_write = 0;
++ state->hotplug_remove_wq_data.dev_name_write_wrap = 1;
++ }
++ state->hotplug_remove_wq_data.hotplug_dev_list = state->list;
++ queue_work(state->hotplug_wq, &(state->hotplug_remove_wq_data.work));
++ return IRQ_HANDLED;
++}
++/*EXPORT_SYMBOL(psb_hotplug_remove_and_notify_um); */
++/**
++ * psb_hotplug_device_pool_create_and_init - make new hotplug device pool
++ *
++ * @parent_kobj: parent kobject to associate hotplug kset with
++ * @state: hotplug state to assocaite workqueues with
++ *
++ */
++struct umevent_list *psb_hotplug_device_pool_create_and_init(
++ struct kobject *parent_kobj,
++ struct hotplug_state *state)
++{
++ struct umevent_list *new_hotplug_dev_list = NULL;
++
++ new_hotplug_dev_list = psb_umevent_create_list();
++ if (new_hotplug_dev_list)
++ psb_umevent_init(parent_kobj, new_hotplug_dev_list,
++ "psb_hotplug");
++
++ state->hotplug_wq = create_singlethread_workqueue("hotplug-wq");
++ if (!state->hotplug_wq)
++ return NULL;
++
++ INIT_WORK(&state->hotplug_create_wq_data.work,
++ psb_hotplug_dev_create_wq);
++ INIT_WORK(&state->hotplug_remove_wq_data.work,
++ psb_hotplug_dev_remove_wq);
++ INIT_WORK(&state->hotplug_change_wq_data.work,
++ psb_hotplug_dev_change_wq);
++
++ state->hotplug_create_wq_data.dev_name_read = 0;
++ state->hotplug_create_wq_data.dev_name_write = 0;
++ state->hotplug_create_wq_data.dev_name_write_wrap = 0;
++ state->hotplug_create_wq_data.dev_name_read_write_wrap_ack = 0;
++ memset(&(state->hotplug_create_wq_data.dev_name_arry_rw_status[0]),
++ 0, sizeof(int)*DRM_HOTPLUG_RING_DEPTH);
++
++ state->hotplug_remove_wq_data.dev_name_read = 0;
++ state->hotplug_remove_wq_data.dev_name_write = 0;
++ state->hotplug_remove_wq_data.dev_name_write_wrap = 0;
++ state->hotplug_remove_wq_data.dev_name_read_write_wrap_ack = 0;
++ memset(&(state->hotplug_remove_wq_data.dev_name_arry_rw_status[0]),
++ 0, sizeof(int)*DRM_HOTPLUG_RING_DEPTH);
++
++ state->hotplug_change_wq_data.dev_name_read = 0;
++ state->hotplug_change_wq_data.dev_name_write = 0;
++ state->hotplug_change_wq_data.dev_name_write_wrap = 0;
++ state->hotplug_change_wq_data.dev_name_read_write_wrap_ack = 0;
++ memset(&(state->hotplug_change_wq_data.dev_name_arry_rw_status[0]),
++ 0, sizeof(int)*DRM_HOTPLUG_RING_DEPTH);
++
++ return new_hotplug_dev_list;
++}
++/*EXPORT_SYMBOL(psb_hotplug_device_pool_create_and_init); */
++/**
++ *
++ * psb_hotplug_init - init hotplug subsystem
++ *
++ * @parent_kobj: parent kobject to associate hotplug state with
++ *
++ */
++struct hotplug_state *psb_hotplug_init(struct kobject *parent_kobj)
++{
++ struct hotplug_state *state;
++ state = kzalloc(sizeof(struct hotplug_state), GFP_KERNEL);
++ state->list = NULL;
++ state->list = psb_hotplug_device_pool_create_and_init(
++ parent_kobj,
++ state);
++ return state;
++}
++/**
++ * psb_hotplug_device_pool_destroy - destroy all hotplug related resources
++ *
++ * @state: hotplug state to destroy
++ *
++ */
++void psb_hotplug_device_pool_destroy(struct hotplug_state *state)
++{
++ flush_workqueue(state->hotplug_wq);
++ destroy_workqueue(state->hotplug_wq);
++ psb_umevent_cleanup(state->list);
++ kfree(state);
++}
++/*EXPORT_SYMBOL(psb_hotplug_device_pool_destroy); */
++/**
++ * psb_hotplug_dev_create_wq - create workqueue implementation
++ *
++ * @work: work struct to use for kernel scheduling
++ *
++ */
++void psb_hotplug_dev_create_wq(struct work_struct *work)
++{
++ struct hotplug_disp_workqueue_data *wq_data;
++ struct umevent_obj *wq_working_hotplug_disp_obj;
++ wq_data = to_hotplug_disp_workqueue_data(work);
++ if (wq_data->dev_name_write_wrap == 1) {
++ wq_data->dev_name_read_write_wrap_ack = 1;
++ wq_data->dev_name_write_wrap = 0;
++ while (wq_data->dev_name_read != DRM_HOTPLUG_RING_DEPTH_MAX) {
++ if (wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] ==
++ DRM_HOTPLUG_READY_TO_READ) {
++ wq_working_hotplug_disp_obj =
++ psb_create_umevent_obj(
++ &wq_data->dev_name_arry
++ [wq_data->dev_name_read][0],
++ wq_data->hotplug_dev_list);
++ wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] =
++ DRM_HOTPLUG_READ_COMPLETE;
++ psb_umevent_notify
++ (wq_working_hotplug_disp_obj);
++ }
++ wq_data->dev_name_read++;
++ }
++ wq_data->dev_name_read = 0;
++ while (wq_data->dev_name_read < wq_data->dev_name_write-1) {
++ if (wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] ==
++ DRM_HOTPLUG_READY_TO_READ) {
++ wq_working_hotplug_disp_obj =
++ psb_create_umevent_obj(
++ &wq_data->dev_name_arry
++ [wq_data->dev_name_read][0],
++ wq_data->hotplug_dev_list);
++ wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] =
++ DRM_HOTPLUG_READ_COMPLETE;
++ psb_umevent_notify
++ (wq_working_hotplug_disp_obj);
++ }
++ wq_data->dev_name_read++;
++ }
++ } else {
++ while (wq_data->dev_name_read < wq_data->dev_name_write) {
++ if (wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] ==
++ DRM_HOTPLUG_READY_TO_READ) {
++ wq_working_hotplug_disp_obj =
++ psb_create_umevent_obj(
++ &wq_data->dev_name_arry
++ [wq_data->dev_name_read][0],
++ wq_data->hotplug_dev_list);
++ wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] =
++ DRM_HOTPLUG_READ_COMPLETE;
++ psb_umevent_notify
++ (wq_working_hotplug_disp_obj);
++ }
++ wq_data->dev_name_read++;
++ }
++ }
++ if (wq_data->dev_name_read > DRM_HOTPLUG_RING_DEPTH_MAX)
++ wq_data->dev_name_read = 0;
++}
++/*EXPORT_SYMBOL(psb_hotplug_dev_create_wq); */
++/**
++ * psb_hotplug_dev_remove_wq - remove workqueue implementation
++ *
++ * @work: work struct to use for kernel scheduling
++ *
++ */
++void psb_hotplug_dev_remove_wq(struct work_struct *work)
++{
++ struct hotplug_disp_workqueue_data *wq_data;
++ wq_data = to_hotplug_disp_workqueue_data(work);
++ if (wq_data->dev_name_write_wrap == 1) {
++ wq_data->dev_name_read_write_wrap_ack = 1;
++ wq_data->dev_name_write_wrap = 0;
++ while (wq_data->dev_name_read != DRM_HOTPLUG_RING_DEPTH_MAX) {
++ if (wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] ==
++ DRM_HOTPLUG_READY_TO_READ) {
++ psb_umevent_remove_from_list(
++ wq_data->hotplug_dev_list,
++ &wq_data->dev_name_arry
++ [wq_data->dev_name_read][0]);
++ wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] =
++ DRM_HOTPLUG_READ_COMPLETE;
++ }
++ wq_data->dev_name_read++;
++ }
++ wq_data->dev_name_read = 0;
++ while (wq_data->dev_name_read < wq_data->dev_name_write-1) {
++ if (wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] ==
++ DRM_HOTPLUG_READY_TO_READ) {
++ psb_umevent_remove_from_list(
++ wq_data->hotplug_dev_list,
++ &wq_data->dev_name_arry
++ [wq_data->dev_name_read][0]);
++ wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] =
++ DRM_HOTPLUG_READ_COMPLETE;
++ }
++ wq_data->dev_name_read++;
++ }
++ } else {
++ while (wq_data->dev_name_read < wq_data->dev_name_write) {
++ if (wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] ==
++ DRM_HOTPLUG_READY_TO_READ) {
++ psb_umevent_remove_from_list(
++ wq_data->hotplug_dev_list,
++ &wq_data->dev_name_arry
++ [wq_data->dev_name_read][0]);
++ wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] =
++ DRM_HOTPLUG_READ_COMPLETE;
++ }
++ wq_data->dev_name_read++;
++ }
++ }
++ if (wq_data->dev_name_read > DRM_HOTPLUG_RING_DEPTH_MAX)
++ wq_data->dev_name_read = 0;
++}
++/*EXPORT_SYMBOL(psb_hotplug_dev_remove_wq); */
++/**
++ * psb_hotplug_dev_change_wq - change workqueue implementation
++ *
++ * @work: work struct to use for kernel scheduling
++ *
++ */
++void psb_hotplug_dev_change_wq(struct work_struct *work)
++{
++ struct hotplug_disp_workqueue_data *wq_data;
++ struct umevent_obj *wq_working_hotplug_disp_obj;
++ wq_data = to_hotplug_disp_workqueue_data(work);
++ if (wq_data->dev_name_write_wrap == 1) {
++ wq_data->dev_name_read_write_wrap_ack = 1;
++ wq_data->dev_name_write_wrap = 0;
++ while (wq_data->dev_name_read != DRM_HOTPLUG_RING_DEPTH_MAX) {
++ if (wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] ==
++ DRM_HOTPLUG_READY_TO_READ) {
++ wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] =
++ DRM_HOTPLUG_READ_COMPLETE;
++
++ wq_working_hotplug_disp_obj =
++ psb_umevent_find_obj(
++ &wq_data->dev_name_arry
++ [wq_data->dev_name_read][0],
++ wq_data->hotplug_dev_list);
++ psb_umevent_notify_change_gfxsock
++ (wq_working_hotplug_disp_obj,
++ DRM_HOTPLUG_SOCKET_GROUP_ID);
++ }
++ wq_data->dev_name_read++;
++ }
++ wq_data->dev_name_read = 0;
++ while (wq_data->dev_name_read < wq_data->dev_name_write-1) {
++ if (wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] ==
++ DRM_HOTPLUG_READY_TO_READ) {
++ wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] =
++ DRM_HOTPLUG_READ_COMPLETE;
++
++ wq_working_hotplug_disp_obj =
++ psb_umevent_find_obj(
++ &wq_data->dev_name_arry
++ [wq_data->dev_name_read][0],
++ wq_data->hotplug_dev_list);
++ psb_umevent_notify_change_gfxsock
++ (wq_working_hotplug_disp_obj,
++ DRM_HOTPLUG_SOCKET_GROUP_ID);
++ }
++ wq_data->dev_name_read++;
++ }
++ } else {
++ while (wq_data->dev_name_read < wq_data->dev_name_write) {
++ if (wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] ==
++ DRM_HOTPLUG_READY_TO_READ) {
++ wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] =
++ DRM_HOTPLUG_READ_COMPLETE;
++
++ wq_working_hotplug_disp_obj =
++ psb_umevent_find_obj(
++ &wq_data->dev_name_arry
++ [wq_data->dev_name_read][0],
++ wq_data->hotplug_dev_list);
++ psb_umevent_notify_change_gfxsock
++ (wq_working_hotplug_disp_obj,
++ DRM_HOTPLUG_SOCKET_GROUP_ID);
++ }
++ wq_data->dev_name_read++;
++ }
++ }
++ if (wq_data->dev_name_read > DRM_HOTPLUG_RING_DEPTH_MAX)
++ wq_data->dev_name_read = 0;
++}
++/*EXPORT_SYMBOL(psb_hotplug_dev_change_wq); */
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_hotplug.h
+@@ -0,0 +1,90 @@
++/*
++ * Copyright © 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * James C. Gualario <james.c.gualario@intel.com>
++ *
++ */
++#ifndef _PSB_HOTPLUG_H_
++#define _PSB_HOTPLUG_H_
++/**
++ * required includes
++ *
++ */
++#include "psb_umevents.h"
++/**
++ * hotplug specific defines
++ *
++ */
++#define DRM_HOTPLUG_RING_DEPTH 256
++#define DRM_HOTPLUG_RING_DEPTH_MAX (DRM_HOTPLUG_RING_DEPTH-1)
++#define DRM_HOTPLUG_READY_TO_READ 1
++#define DRM_HOTPLUG_READ_COMPLETE 2
++/**
++ * hotplug workqueue data struct.
++ */
++struct hotplug_disp_workqueue_data {
++ struct work_struct work;
++ const char *dev_name;
++ int dev_name_write;
++ int dev_name_read;
++ int dev_name_write_wrap;
++ int dev_name_read_write_wrap_ack;
++ char dev_name_arry[DRM_HOTPLUG_RING_DEPTH][24];
++ int dev_name_arry_rw_status[DRM_HOTPLUG_RING_DEPTH];
++ struct umevent_list *hotplug_dev_list;
++};
++/**
++ * hotplug state structure
++ *
++ */
++struct hotplug_state {
++ struct workqueue_struct *hotplug_wq;
++ struct hotplug_disp_workqueue_data hotplug_remove_wq_data;
++ struct hotplug_disp_workqueue_data hotplug_create_wq_data;
++ struct hotplug_disp_workqueue_data hotplug_change_wq_data;
++ struct umevent_list *list;
++};
++/**
++ * main interface function prototytpes for hotplug support.
++ *
++ */
++struct hotplug_state *psb_hotplug_init(struct kobject *parent_kobj);
++extern int psb_hotplug_notify_change_um(const char *name,
++ struct hotplug_state *state);
++extern int psb_hotplug_create_and_notify_um(const char *name,
++ struct hotplug_state *state);
++extern int psb_hotplug_remove_and_notify_um(const char *name,
++ struct hotplug_state *state);
++extern struct umevent_list *psb_hotplug_device_pool_create_and_init(
++ struct kobject *parent_kobj,
++ struct hotplug_state *state);
++extern void psb_hotplug_device_pool_destroy(struct hotplug_state *state);
++/**
++ * to go back and forth between work strauct and workqueue data
++ *
++ */
++#define to_hotplug_disp_workqueue_data(x) \
++ container_of(x, struct hotplug_disp_workqueue_data, work)
++
++/**
++ * function prototypes for workqueue implementation
++ *
++ */
++extern void psb_hotplug_dev_create_wq(struct work_struct *work);
++extern void psb_hotplug_dev_remove_wq(struct work_struct *work);
++extern void psb_hotplug_dev_change_wq(struct work_struct *work);
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_intel_bios.c
+@@ -0,0 +1,305 @@
++/*
++ * Copyright (c) 2006 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * Eric Anholt <eric@anholt.net>
++ *
++ */
++#include <drm/drmP.h>
++#include <drm/drm.h>
++#include "psb_drm.h"
++#include "psb_drv.h"
++#include "psb_intel_drv.h"
++#include "psb_intel_reg.h"
++#include "psb_intel_bios.h"
++
++
++static void *find_section(struct bdb_header *bdb, int section_id)
++{
++ u8 *base = (u8 *)bdb;
++ int index = 0;
++ u16 total, current_size;
++ u8 current_id;
++
++ /* skip to first section */
++ index += bdb->header_size;
++ total = bdb->bdb_size;
++
++ /* walk the sections looking for section_id */
++ while (index < total) {
++ current_id = *(base + index);
++ index++;
++ current_size = *((u16 *)(base + index));
++ index += 2;
++ if (current_id == section_id)
++ return base + index;
++ index += current_size;
++ }
++
++ return NULL;
++}
++
++static void fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
++ struct lvds_dvo_timing *dvo_timing)
++{
++ panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) |
++ dvo_timing->hactive_lo;
++ panel_fixed_mode->hsync_start = panel_fixed_mode->hdisplay +
++ ((dvo_timing->hsync_off_hi << 8) | dvo_timing->hsync_off_lo);
++ panel_fixed_mode->hsync_end = panel_fixed_mode->hsync_start +
++ dvo_timing->hsync_pulse_width;
++ panel_fixed_mode->htotal = panel_fixed_mode->hdisplay +
++ ((dvo_timing->hblank_hi << 8) | dvo_timing->hblank_lo);
++
++ panel_fixed_mode->vdisplay = (dvo_timing->vactive_hi << 8) |
++ dvo_timing->vactive_lo;
++ panel_fixed_mode->vsync_start = panel_fixed_mode->vdisplay +
++ dvo_timing->vsync_off;
++ panel_fixed_mode->vsync_end = panel_fixed_mode->vsync_start +
++ dvo_timing->vsync_pulse_width;
++ panel_fixed_mode->vtotal = panel_fixed_mode->vdisplay +
++ ((dvo_timing->vblank_hi << 8) | dvo_timing->vblank_lo);
++ panel_fixed_mode->clock = dvo_timing->clock * 10;
++ panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED;
++
++ /* Some VBTs have bogus h/vtotal values */
++ if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal)
++ panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
++ if (panel_fixed_mode->vsync_end > panel_fixed_mode->vtotal)
++ panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end + 1;
++
++ drm_mode_set_name(panel_fixed_mode);
++}
++
++static void parse_backlight_data(struct drm_psb_private *dev_priv,
++ struct bdb_header *bdb)
++{
++ struct bdb_lvds_backlight *vbt_lvds_bl = NULL;
++ struct bdb_lvds_backlight *lvds_bl;
++ u8 p_type = 0;
++ void *bl_start = NULL;
++ struct bdb_lvds_options *lvds_opts
++ = find_section(bdb, BDB_LVDS_OPTIONS);
++
++ dev_priv->lvds_bl = NULL;
++
++ if (lvds_opts) {
++ DRM_DEBUG("lvds_options found at %p\n", lvds_opts);
++ p_type = lvds_opts->panel_type;
++ } else {
++ DRM_DEBUG("no lvds_options\n");
++ return;
++ }
++
++ bl_start = find_section(bdb, BDB_LVDS_BACKLIGHT);
++ vbt_lvds_bl = (struct bdb_lvds_backlight *)(bl_start + 1) + p_type;
++
++ lvds_bl = kzalloc(sizeof(*vbt_lvds_bl), GFP_KERNEL);
++ if (!lvds_bl) {
++ DRM_DEBUG("No memory\n");
++ return;
++ }
++
++ memcpy(lvds_bl, vbt_lvds_bl, sizeof(*vbt_lvds_bl));
++
++ dev_priv->lvds_bl = lvds_bl;
++}
++
++/* Try to find integrated panel data */
++static void parse_lfp_panel_data(struct drm_psb_private *dev_priv,
++ struct bdb_header *bdb)
++{
++ struct bdb_lvds_options *lvds_options;
++ struct bdb_lvds_lfp_data *lvds_lfp_data;
++ struct bdb_lvds_lfp_data_entry *entry;
++ struct lvds_dvo_timing *dvo_timing;
++ struct drm_display_mode *panel_fixed_mode;
++
++ /* Defaults if we can't find VBT info */
++ dev_priv->lvds_dither = 0;
++ dev_priv->lvds_vbt = 0;
++
++ lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
++ if (!lvds_options)
++ return;
++
++ dev_priv->lvds_dither = lvds_options->pixel_dither;
++ if (lvds_options->panel_type == 0xff)
++ return;
++
++ lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
++ if (!lvds_lfp_data)
++ return;
++
++ dev_priv->lvds_vbt = 1;
++
++ entry = &lvds_lfp_data->data[lvds_options->panel_type];
++ dvo_timing = &entry->dvo_timing;
++
++ panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode),
++ GFP_KERNEL);
++
++ fill_detail_timing_data(panel_fixed_mode, dvo_timing);
++
++ dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode;
++
++ DRM_DEBUG("Found panel mode in BIOS VBT tables:\n");
++ drm_mode_debug_printmodeline(panel_fixed_mode);
++
++ return;
++}
++
++/* Try to find sdvo panel data */
++static void parse_sdvo_panel_data(struct drm_psb_private *dev_priv,
++ struct bdb_header *bdb)
++{
++ struct bdb_sdvo_lvds_options *sdvo_lvds_options;
++ struct lvds_dvo_timing *dvo_timing;
++ struct drm_display_mode *panel_fixed_mode;
++
++ dev_priv->sdvo_lvds_vbt_mode = NULL;
++
++ sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
++ if (!sdvo_lvds_options)
++ return;
++
++ dvo_timing = find_section(bdb, BDB_SDVO_PANEL_DTDS);
++ if (!dvo_timing)
++ return;
++
++ panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
++
++ if (!panel_fixed_mode)
++ return;
++
++ fill_detail_timing_data(panel_fixed_mode,
++ dvo_timing + sdvo_lvds_options->panel_type);
++
++ dev_priv->sdvo_lvds_vbt_mode = panel_fixed_mode;
++
++ return;
++}
++
++static void parse_general_features(struct drm_psb_private *dev_priv,
++ struct bdb_header *bdb)
++{
++ struct bdb_general_features *general;
++
++ /* Set sensible defaults in case we can't find the general block */
++ dev_priv->int_tv_support = 1;
++ dev_priv->int_crt_support = 1;
++
++ general = find_section(bdb, BDB_GENERAL_FEATURES);
++ if (general) {
++ dev_priv->int_tv_support = general->int_tv_support;
++ dev_priv->int_crt_support = general->int_crt_support;
++ dev_priv->lvds_use_ssc = general->enable_ssc;
++
++ if (dev_priv->lvds_use_ssc) {
++ if (IS_I855(dev_priv->dev))
++ dev_priv->lvds_ssc_freq
++ = general->ssc_freq ? 66 : 48;
++ else
++ dev_priv->lvds_ssc_freq
++ = general->ssc_freq ? 100 : 96;
++ }
++ }
++}
++
++/**
++ * psb_intel_init_bios - initialize VBIOS settings & find VBT
++ * @dev: DRM device
++ *
++ * Loads the Video BIOS and checks that the VBT exists. Sets scratch registers
++ * to appropriate values.
++ *
++ * VBT existence is a sanity check that is relied on by other i830_bios.c code.
++ * Note that it would be better to use a BIOS call to get the VBT, as BIOSes may
++ * feed an updated VBT back through that, compared to what we'll fetch using
++ * this method of groping around in the BIOS data.
++ *
++ * Returns 0 on success, nonzero on failure.
++ */
++bool psb_intel_init_bios(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct pci_dev *pdev = dev->pdev;
++ struct vbt_header *vbt = NULL;
++ struct bdb_header *bdb;
++ u8 __iomem *bios;
++ size_t size;
++ int i;
++
++ bios = pci_map_rom(pdev, &size);
++ if (!bios)
++ return -1;
++
++ /* Scour memory looking for the VBT signature */
++ for (i = 0; i + 4 < size; i++) {
++ if (!memcmp(bios + i, "$VBT", 4)) {
++ vbt = (struct vbt_header *)(bios + i);
++ break;
++ }
++ }
++
++ if (!vbt) {
++ DRM_ERROR("VBT signature missing\n");
++ pci_unmap_rom(pdev, bios);
++ return -1;
++ }
++
++ bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
++
++ /* Grab useful general definitions */
++ parse_general_features(dev_priv, bdb);
++ parse_lfp_panel_data(dev_priv, bdb);
++ parse_sdvo_panel_data(dev_priv, bdb);
++ parse_backlight_data(dev_priv, bdb);
++
++ pci_unmap_rom(pdev, bios);
++
++ return 0;
++}
++
++/**
++ * Destory and free VBT data
++ */
++void psb_intel_destory_bios(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct drm_display_mode *sdvo_lvds_vbt_mode =
++ dev_priv->sdvo_lvds_vbt_mode;
++ struct drm_display_mode *lfp_lvds_vbt_mode =
++ dev_priv->lfp_lvds_vbt_mode;
++ struct bdb_lvds_backlight *lvds_bl =
++ dev_priv->lvds_bl;
++
++ /*free sdvo panel mode*/
++ if (sdvo_lvds_vbt_mode) {
++ dev_priv->sdvo_lvds_vbt_mode = NULL;
++ kfree(sdvo_lvds_vbt_mode);
++ }
++
++ if (lfp_lvds_vbt_mode) {
++ dev_priv->lfp_lvds_vbt_mode = NULL;
++ kfree(lfp_lvds_vbt_mode);
++ }
++
++ if (lvds_bl) {
++ dev_priv->lvds_bl = NULL;
++ kfree(lvds_bl);
++ }
++}
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_intel_bios.h
+@@ -0,0 +1,430 @@
++/*
++ * Copyright (c) 2006 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * Eric Anholt <eric@anholt.net>
++ *
++ */
++
++#ifndef _I830_BIOS_H_
++#define _I830_BIOS_H_
++
++#include <drm/drmP.h>
++
++struct vbt_header {
++ u8 signature[20]; /**< Always starts with 'VBT$' */
++ u16 version; /**< decimal */
++ u16 header_size; /**< in bytes */
++ u16 vbt_size; /**< in bytes */
++ u8 vbt_checksum;
++ u8 reserved0;
++ u32 bdb_offset; /**< from beginning of VBT */
++ u32 aim_offset[4]; /**< from beginning of VBT */
++} __attribute__((packed));
++
++
++struct bdb_header {
++ u8 signature[16]; /**< Always 'BIOS_DATA_BLOCK' */
++ u16 version; /**< decimal */
++ u16 header_size; /**< in bytes */
++ u16 bdb_size; /**< in bytes */
++};
++
++/* strictly speaking, this is a "skip" block, but it has interesting info */
++struct vbios_data {
++ u8 type; /* 0 == desktop, 1 == mobile */
++ u8 relstage;
++ u8 chipset;
++ u8 lvds_present:1;
++ u8 tv_present:1;
++ u8 rsvd2:6; /* finish byte */
++ u8 rsvd3[4];
++ u8 signon[155];
++ u8 copyright[61];
++ u16 code_segment;
++ u8 dos_boot_mode;
++ u8 bandwidth_percent;
++ u8 rsvd4; /* popup memory size */
++ u8 resize_pci_bios;
++ u8 rsvd5; /* is crt already on ddc2 */
++} __attribute__((packed));
++
++/*
++ * There are several types of BIOS data blocks (BDBs), each block has
++ * an ID and size in the first 3 bytes (ID in first, size in next 2).
++ * Known types are listed below.
++ */
++#define BDB_GENERAL_FEATURES 1
++#define BDB_GENERAL_DEFINITIONS 2
++#define BDB_OLD_TOGGLE_LIST 3
++#define BDB_MODE_SUPPORT_LIST 4
++#define BDB_GENERIC_MODE_TABLE 5
++#define BDB_EXT_MMIO_REGS 6
++#define BDB_SWF_IO 7
++#define BDB_SWF_MMIO 8
++#define BDB_DOT_CLOCK_TABLE 9
++#define BDB_MODE_REMOVAL_TABLE 10
++#define BDB_CHILD_DEVICE_TABLE 11
++#define BDB_DRIVER_FEATURES 12
++#define BDB_DRIVER_PERSISTENCE 13
++#define BDB_EXT_TABLE_PTRS 14
++#define BDB_DOT_CLOCK_OVERRIDE 15
++#define BDB_DISPLAY_SELECT 16
++/* 17 rsvd */
++#define BDB_DRIVER_ROTATION 18
++#define BDB_DISPLAY_REMOVE 19
++#define BDB_OEM_CUSTOM 20
++#define BDB_EFP_LIST 21 /* workarounds for VGA hsync/vsync */
++#define BDB_SDVO_LVDS_OPTIONS 22
++#define BDB_SDVO_PANEL_DTDS 23
++#define BDB_SDVO_LVDS_PNP_IDS 24
++#define BDB_SDVO_LVDS_POWER_SEQ 25
++#define BDB_TV_OPTIONS 26
++#define BDB_LVDS_OPTIONS 40
++#define BDB_LVDS_LFP_DATA_PTRS 41
++#define BDB_LVDS_LFP_DATA 42
++#define BDB_LVDS_BACKLIGHT 43
++#define BDB_LVDS_POWER 44
++#define BDB_SKIP 254 /* VBIOS private block, ignore */
++
++struct bdb_general_features {
++ /* bits 1 */
++ u8 panel_fitting:2;
++ u8 flexaim:1;
++ u8 msg_enable:1;
++ u8 clear_screen:3;
++ u8 color_flip:1;
++
++ /* bits 2 */
++ u8 download_ext_vbt:1;
++ u8 enable_ssc:1;
++ u8 ssc_freq:1;
++ u8 enable_lfp_on_override:1;
++ u8 disable_ssc_ddt:1;
++ u8 rsvd8:3; /* finish byte */
++
++ /* bits 3 */
++ u8 disable_smooth_vision:1;
++ u8 single_dvi:1;
++ u8 rsvd9:6; /* finish byte */
++
++ /* bits 4 */
++ u8 legacy_monitor_detect;
++
++ /* bits 5 */
++ u8 int_crt_support:1;
++ u8 int_tv_support:1;
++ u8 rsvd11:6; /* finish byte */
++} __attribute__((packed));
++
++struct bdb_general_definitions {
++ /* DDC GPIO */
++ u8 crt_ddc_gmbus_pin;
++
++ /* DPMS bits */
++ u8 dpms_acpi:1;
++ u8 skip_boot_crt_detect:1;
++ u8 dpms_aim:1;
++ u8 rsvd1:5; /* finish byte */
++
++ /* boot device bits */
++ u8 boot_display[2];
++ u8 child_dev_size;
++
++ /* device info */
++ u8 tv_or_lvds_info[33];
++ u8 dev1[33];
++ u8 dev2[33];
++ u8 dev3[33];
++ u8 dev4[33];
++ /* may be another device block here on some platforms */
++};
++
++struct bdb_lvds_options {
++ u8 panel_type;
++ u8 rsvd1;
++ /* LVDS capabilities, stored in a dword */
++ u8 pfit_mode:2;
++ u8 pfit_text_mode_enhanced:1;
++ u8 pfit_gfx_mode_enhanced:1;
++ u8 pfit_ratio_auto:1;
++ u8 pixel_dither:1;
++ u8 lvds_edid:1;
++ u8 rsvd2:1;
++ u8 rsvd4;
++} __attribute__((packed));
++
++struct bdb_lvds_backlight {
++ u8 type:2;
++ u8 pol:1;
++ u8 gpio:3;
++ u8 gmbus:2;
++ u16 freq;
++ u8 minbrightness;
++ u8 i2caddr;
++ u8 brightnesscmd;
++ /*FIXME: more...*/
++} __attribute__((packed));
++
++/* LFP pointer table contains entries to the struct below */
++struct bdb_lvds_lfp_data_ptr {
++ u16 fp_timing_offset; /* offsets are from start of bdb */
++ u8 fp_table_size;
++ u16 dvo_timing_offset;
++ u8 dvo_table_size;
++ u16 panel_pnp_id_offset;
++ u8 pnp_table_size;
++} __attribute__((packed));
++
++struct bdb_lvds_lfp_data_ptrs {
++ u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */
++ struct bdb_lvds_lfp_data_ptr ptr[16];
++} __attribute__((packed));
++
++/* LFP data has 3 blocks per entry */
++struct lvds_fp_timing {
++ u16 x_res;
++ u16 y_res;
++ u32 lvds_reg;
++ u32 lvds_reg_val;
++ u32 pp_on_reg;
++ u32 pp_on_reg_val;
++ u32 pp_off_reg;
++ u32 pp_off_reg_val;
++ u32 pp_cycle_reg;
++ u32 pp_cycle_reg_val;
++ u32 pfit_reg;
++ u32 pfit_reg_val;
++ u16 terminator;
++} __attribute__((packed));
++
++struct lvds_dvo_timing {
++ u16 clock; /**< In 10khz */
++ u8 hactive_lo;
++ u8 hblank_lo;
++ u8 hblank_hi:4;
++ u8 hactive_hi:4;
++ u8 vactive_lo;
++ u8 vblank_lo;
++ u8 vblank_hi:4;
++ u8 vactive_hi:4;
++ u8 hsync_off_lo;
++ u8 hsync_pulse_width;
++ u8 vsync_pulse_width:4;
++ u8 vsync_off:4;
++ u8 rsvd0:6;
++ u8 hsync_off_hi:2;
++ u8 h_image;
++ u8 v_image;
++ u8 max_hv;
++ u8 h_border;
++ u8 v_border;
++ u8 rsvd1:3;
++ u8 digital:2;
++ u8 vsync_positive:1;
++ u8 hsync_positive:1;
++ u8 rsvd2:1;
++} __attribute__((packed));
++
++struct lvds_pnp_id {
++ u16 mfg_name;
++ u16 product_code;
++ u32 serial;
++ u8 mfg_week;
++ u8 mfg_year;
++} __attribute__((packed));
++
++struct bdb_lvds_lfp_data_entry {
++ struct lvds_fp_timing fp_timing;
++ struct lvds_dvo_timing dvo_timing;
++ struct lvds_pnp_id pnp_id;
++} __attribute__((packed));
++
++struct bdb_lvds_lfp_data {
++ struct bdb_lvds_lfp_data_entry data[16];
++} __attribute__((packed));
++
++struct aimdb_header {
++ char signature[16];
++ char oem_device[20];
++ u16 aimdb_version;
++ u16 aimdb_header_size;
++ u16 aimdb_size;
++} __attribute__((packed));
++
++struct aimdb_block {
++ u8 aimdb_id;
++ u16 aimdb_size;
++} __attribute__((packed));
++
++struct vch_panel_data {
++ u16 fp_timing_offset;
++ u8 fp_timing_size;
++ u16 dvo_timing_offset;
++ u8 dvo_timing_size;
++ u16 text_fitting_offset;
++ u8 text_fitting_size;
++ u16 graphics_fitting_offset;
++ u8 graphics_fitting_size;
++} __attribute__((packed));
++
++struct vch_bdb_22 {
++ struct aimdb_block aimdb_block;
++ struct vch_panel_data panels[16];
++} __attribute__((packed));
++
++struct bdb_sdvo_lvds_options {
++ u8 panel_backlight;
++ u8 h40_set_panel_type;
++ u8 panel_type;
++ u8 ssc_clk_freq;
++ u16 als_low_trip;
++ u16 als_high_trip;
++ u8 sclalarcoeff_tab_row_num;
++ u8 sclalarcoeff_tab_row_size;
++ u8 coefficient[8];
++ u8 panel_misc_bits_1;
++ u8 panel_misc_bits_2;
++ u8 panel_misc_bits_3;
++ u8 panel_misc_bits_4;
++} __attribute__((packed));
++
++
++extern bool psb_intel_init_bios(struct drm_device *dev);
++extern void psb_intel_destory_bios(struct drm_device *dev);
++
++/*
++ * Driver<->VBIOS interaction occurs through scratch bits in
++ * GR18 & SWF*.
++ */
++
++/* GR18 bits are set on display switch and hotkey events */
++#define GR18_DRIVER_SWITCH_EN (1<<7) /* 0: VBIOS control, 1: driver control */
++#define GR18_HOTKEY_MASK 0x78 /* See also SWF4 15:0 */
++#define GR18_HK_NONE (0x0<<3)
++#define GR18_HK_LFP_STRETCH (0x1<<3)
++#define GR18_HK_TOGGLE_DISP (0x2<<3)
++#define GR18_HK_DISP_SWITCH (0x4<<3) /* see SWF14 15:0 for what to enable */
++#define GR18_HK_POPUP_DISABLED (0x6<<3)
++#define GR18_HK_POPUP_ENABLED (0x7<<3)
++#define GR18_HK_PFIT (0x8<<3)
++#define GR18_HK_APM_CHANGE (0xa<<3)
++#define GR18_HK_MULTIPLE (0xc<<3)
++#define GR18_USER_INT_EN (1<<2)
++#define GR18_A0000_FLUSH_EN (1<<1)
++#define GR18_SMM_EN (1<<0)
++
++/* Set by driver, cleared by VBIOS */
++#define SWF00_YRES_SHIFT 16
++#define SWF00_XRES_SHIFT 0
++#define SWF00_RES_MASK 0xffff
++
++/* Set by VBIOS at boot time and driver at runtime */
++#define SWF01_TV2_FORMAT_SHIFT 8
++#define SWF01_TV1_FORMAT_SHIFT 0
++#define SWF01_TV_FORMAT_MASK 0xffff
++
++#define SWF10_VBIOS_BLC_I2C_EN (1<<29)
++#define SWF10_GTT_OVERRIDE_EN (1<<28)
++#define SWF10_LFP_DPMS_OVR (1<<27) /* override DPMS on display switch */
++#define SWF10_ACTIVE_TOGGLE_LIST_MASK (7<<24)
++#define SWF10_OLD_TOGGLE 0x0
++#define SWF10_TOGGLE_LIST_1 0x1
++#define SWF10_TOGGLE_LIST_2 0x2
++#define SWF10_TOGGLE_LIST_3 0x3
++#define SWF10_TOGGLE_LIST_4 0x4
++#define SWF10_PANNING_EN (1<<23)
++#define SWF10_DRIVER_LOADED (1<<22)
++#define SWF10_EXTENDED_DESKTOP (1<<21)
++#define SWF10_EXCLUSIVE_MODE (1<<20)
++#define SWF10_OVERLAY_EN (1<<19)
++#define SWF10_PLANEB_HOLDOFF (1<<18)
++#define SWF10_PLANEA_HOLDOFF (1<<17)
++#define SWF10_VGA_HOLDOFF (1<<16)
++#define SWF10_ACTIVE_DISP_MASK 0xffff
++#define SWF10_PIPEB_LFP2 (1<<15)
++#define SWF10_PIPEB_EFP2 (1<<14)
++#define SWF10_PIPEB_TV2 (1<<13)
++#define SWF10_PIPEB_CRT2 (1<<12)
++#define SWF10_PIPEB_LFP (1<<11)
++#define SWF10_PIPEB_EFP (1<<10)
++#define SWF10_PIPEB_TV (1<<9)
++#define SWF10_PIPEB_CRT (1<<8)
++#define SWF10_PIPEA_LFP2 (1<<7)
++#define SWF10_PIPEA_EFP2 (1<<6)
++#define SWF10_PIPEA_TV2 (1<<5)
++#define SWF10_PIPEA_CRT2 (1<<4)
++#define SWF10_PIPEA_LFP (1<<3)
++#define SWF10_PIPEA_EFP (1<<2)
++#define SWF10_PIPEA_TV (1<<1)
++#define SWF10_PIPEA_CRT (1<<0)
++
++#define SWF11_MEMORY_SIZE_SHIFT 16
++#define SWF11_SV_TEST_EN (1<<15)
++#define SWF11_IS_AGP (1<<14)
++#define SWF11_DISPLAY_HOLDOFF (1<<13)
++#define SWF11_DPMS_REDUCED (1<<12)
++#define SWF11_IS_VBE_MODE (1<<11)
++#define SWF11_PIPEB_ACCESS (1<<10) /* 0 here means pipe a */
++#define SWF11_DPMS_MASK 0x07
++#define SWF11_DPMS_OFF (1<<2)
++#define SWF11_DPMS_SUSPEND (1<<1)
++#define SWF11_DPMS_STANDBY (1<<0)
++#define SWF11_DPMS_ON 0
++
++#define SWF14_GFX_PFIT_EN (1<<31)
++#define SWF14_TEXT_PFIT_EN (1<<30)
++#define SWF14_LID_STATUS_CLOSED (1<<29) /* 0 here means open */
++#define SWF14_POPUP_EN (1<<28)
++#define SWF14_DISPLAY_HOLDOFF (1<<27)
++#define SWF14_DISP_DETECT_EN (1<<26)
++#define SWF14_DOCKING_STATUS_DOCKED (1<<25) /* 0 here means undocked */
++#define SWF14_DRIVER_STATUS (1<<24)
++#define SWF14_OS_TYPE_WIN9X (1<<23)
++#define SWF14_OS_TYPE_WINNT (1<<22)
++/* 21:19 rsvd */
++#define SWF14_PM_TYPE_MASK 0x00070000
++#define SWF14_PM_ACPI_VIDEO (0x4 << 16)
++#define SWF14_PM_ACPI (0x3 << 16)
++#define SWF14_PM_APM_12 (0x2 << 16)
++#define SWF14_PM_APM_11 (0x1 << 16)
++#define SWF14_HK_REQUEST_MASK 0x0000ffff /* see GR18 6:3 for event type */
++ /* if GR18 indicates a display switch */
++#define SWF14_DS_PIPEB_LFP2_EN (1<<15)
++#define SWF14_DS_PIPEB_EFP2_EN (1<<14)
++#define SWF14_DS_PIPEB_TV2_EN (1<<13)
++#define SWF14_DS_PIPEB_CRT2_EN (1<<12)
++#define SWF14_DS_PIPEB_LFP_EN (1<<11)
++#define SWF14_DS_PIPEB_EFP_EN (1<<10)
++#define SWF14_DS_PIPEB_TV_EN (1<<9)
++#define SWF14_DS_PIPEB_CRT_EN (1<<8)
++#define SWF14_DS_PIPEA_LFP2_EN (1<<7)
++#define SWF14_DS_PIPEA_EFP2_EN (1<<6)
++#define SWF14_DS_PIPEA_TV2_EN (1<<5)
++#define SWF14_DS_PIPEA_CRT2_EN (1<<4)
++#define SWF14_DS_PIPEA_LFP_EN (1<<3)
++#define SWF14_DS_PIPEA_EFP_EN (1<<2)
++#define SWF14_DS_PIPEA_TV_EN (1<<1)
++#define SWF14_DS_PIPEA_CRT_EN (1<<0)
++ /* if GR18 indicates a panel fitting request */
++#define SWF14_PFIT_EN (1<<0) /* 0 means disable */
++ /* if GR18 indicates an APM change request */
++#define SWF14_APM_HIBERNATE 0x4
++#define SWF14_APM_SUSPEND 0x3
++#define SWF14_APM_STANDBY 0x1
++#define SWF14_APM_RESTORE 0x0
++
++#endif /* _I830_BIOS_H_ */
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_intel_display.c
+@@ -0,0 +1,2564 @@
++/*
++ * Copyright © 2006-2007 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * Eric Anholt <eric@anholt.net>
++ */
++
++#include <linux/i2c.h>
++
++#include <drm/drmP.h>
++#include "psb_fb.h"
++#include "psb_drv.h"
++#include "psb_intel_drv.h"
++#include "psb_intel_reg.h"
++#include "psb_intel_display.h"
++#include "psb_powermgmt.h"
++
++/*MDFLE_JLIU7 defines */
++static int mdfld_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
++static int mdfld_intel_crtc_cursor_set(struct drm_crtc *crtc,
++ struct drm_file *file_priv,
++ uint32_t handle,
++ uint32_t width, uint32_t height);
++int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb);
++static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
++ struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode,
++ int x, int y,
++ struct drm_framebuffer *old_fb);
++static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode);
++/*MDFLE_JLIU7 defines end */
++
++struct psb_intel_clock_t {
++ /* given values */
++ int n;
++ int m1, m2;
++ int p1, p2;
++ /* derived values */
++ int dot;
++ int vco;
++ int m;
++ int p;
++};
++
++struct psb_intel_range_t {
++ int min, max;
++};
++
++struct psb_intel_p2_t {
++ int dot_limit;
++ int p2_slow, p2_fast;
++};
++
++#define INTEL_P2_NUM 2
++
++struct psb_intel_limit_t {
++ struct psb_intel_range_t dot, vco, n, m, m1, m2, p, p1;
++ struct psb_intel_p2_t p2;
++};
++
++#define I8XX_DOT_MIN 25000
++#define I8XX_DOT_MAX 350000
++#define I8XX_VCO_MIN 930000
++#define I8XX_VCO_MAX 1400000
++#define I8XX_N_MIN 3
++#define I8XX_N_MAX 16
++#define I8XX_M_MIN 96
++#define I8XX_M_MAX 140
++#define I8XX_M1_MIN 18
++#define I8XX_M1_MAX 26
++#define I8XX_M2_MIN 6
++#define I8XX_M2_MAX 16
++#define I8XX_P_MIN 4
++#define I8XX_P_MAX 128
++#define I8XX_P1_MIN 2
++#define I8XX_P1_MAX 33
++#define I8XX_P1_LVDS_MIN 1
++#define I8XX_P1_LVDS_MAX 6
++#define I8XX_P2_SLOW 4
++#define I8XX_P2_FAST 2
++#define I8XX_P2_LVDS_SLOW 14
++#define I8XX_P2_LVDS_FAST 14 /* No fast option */
++#define I8XX_P2_SLOW_LIMIT 165000
++
++#define I9XX_DOT_MIN 20000
++#define I9XX_DOT_MAX 400000
++#define I9XX_VCO_MIN 1400000
++#define I9XX_VCO_MAX 2800000
++#define I9XX_N_MIN 3
++#define I9XX_N_MAX 8
++#define I9XX_M_MIN 70
++#define I9XX_M_MAX 120
++#define I9XX_M1_MIN 10
++#define I9XX_M1_MAX 20
++#define I9XX_M2_MIN 5
++#define I9XX_M2_MAX 9
++#define I9XX_P_SDVO_DAC_MIN 5
++#define I9XX_P_SDVO_DAC_MAX 80
++#define I9XX_P_LVDS_MIN 7
++#define I9XX_P_LVDS_MAX 98
++#define I9XX_P1_MIN 1
++#define I9XX_P1_MAX 8
++#define I9XX_P2_SDVO_DAC_SLOW 10
++#define I9XX_P2_SDVO_DAC_FAST 5
++#define I9XX_P2_SDVO_DAC_SLOW_LIMIT 200000
++#define I9XX_P2_LVDS_SLOW 14
++#define I9XX_P2_LVDS_FAST 7
++#define I9XX_P2_LVDS_SLOW_LIMIT 112000
++
++#define INTEL_LIMIT_I8XX_DVO_DAC 0
++#define INTEL_LIMIT_I8XX_LVDS 1
++#define INTEL_LIMIT_I9XX_SDVO_DAC 2
++#define INTEL_LIMIT_I9XX_LVDS 3
++
++static const struct psb_intel_limit_t psb_intel_limits[] = {
++ { /* INTEL_LIMIT_I8XX_DVO_DAC */
++ .dot = {.min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX},
++ .vco = {.min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX},
++ .n = {.min = I8XX_N_MIN, .max = I8XX_N_MAX},
++ .m = {.min = I8XX_M_MIN, .max = I8XX_M_MAX},
++ .m1 = {.min = I8XX_M1_MIN, .max = I8XX_M1_MAX},
++ .m2 = {.min = I8XX_M2_MIN, .max = I8XX_M2_MAX},
++ .p = {.min = I8XX_P_MIN, .max = I8XX_P_MAX},
++ .p1 = {.min = I8XX_P1_MIN, .max = I8XX_P1_MAX},
++ .p2 = {.dot_limit = I8XX_P2_SLOW_LIMIT,
++ .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST},
++ },
++ { /* INTEL_LIMIT_I8XX_LVDS */
++ .dot = {.min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX},
++ .vco = {.min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX},
++ .n = {.min = I8XX_N_MIN, .max = I8XX_N_MAX},
++ .m = {.min = I8XX_M_MIN, .max = I8XX_M_MAX},
++ .m1 = {.min = I8XX_M1_MIN, .max = I8XX_M1_MAX},
++ .m2 = {.min = I8XX_M2_MIN, .max = I8XX_M2_MAX},
++ .p = {.min = I8XX_P_MIN, .max = I8XX_P_MAX},
++ .p1 = {.min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX},
++ .p2 = {.dot_limit = I8XX_P2_SLOW_LIMIT,
++ .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST},
++ },
++ { /* INTEL_LIMIT_I9XX_SDVO_DAC */
++ .dot = {.min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
++ .vco = {.min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX},
++ .n = {.min = I9XX_N_MIN, .max = I9XX_N_MAX},
++ .m = {.min = I9XX_M_MIN, .max = I9XX_M_MAX},
++ .m1 = {.min = I9XX_M1_MIN, .max = I9XX_M1_MAX},
++ .m2 = {.min = I9XX_M2_MIN, .max = I9XX_M2_MAX},
++ .p = {.min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX},
++ .p1 = {.min = I9XX_P1_MIN, .max = I9XX_P1_MAX},
++ .p2 = {.dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
++ .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast =
++ I9XX_P2_SDVO_DAC_FAST},
++ },
++ { /* INTEL_LIMIT_I9XX_LVDS */
++ .dot = {.min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
++ .vco = {.min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX},
++ .n = {.min = I9XX_N_MIN, .max = I9XX_N_MAX},
++ .m = {.min = I9XX_M_MIN, .max = I9XX_M_MAX},
++ .m1 = {.min = I9XX_M1_MIN, .max = I9XX_M1_MAX},
++ .m2 = {.min = I9XX_M2_MIN, .max = I9XX_M2_MAX},
++ .p = {.min = I9XX_P_LVDS_MIN, .max = I9XX_P_LVDS_MAX},
++ .p1 = {.min = I9XX_P1_MIN, .max = I9XX_P1_MAX},
++ /* The single-channel range is 25-112Mhz, and dual-channel
++ * is 80-224Mhz. Prefer single channel as much as possible.
++ */
++ .p2 = {.dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
++ .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST},
++ },
++};
++
++static const struct psb_intel_limit_t *psb_intel_limit(struct drm_crtc *crtc)
++{
++ struct drm_device *dev = crtc->dev;
++ const struct psb_intel_limit_t *limit;
++
++ if (IS_I9XX(dev)) {
++ if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
++ limit = &psb_intel_limits[INTEL_LIMIT_I9XX_LVDS];
++ else
++ limit = &psb_intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
++ } else {
++ if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
++ limit = &psb_intel_limits[INTEL_LIMIT_I8XX_LVDS];
++ else
++ limit = &psb_intel_limits[INTEL_LIMIT_I8XX_DVO_DAC];
++ }
++ return limit;
++}
++
++/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
++
++static void i8xx_clock(int refclk, struct psb_intel_clock_t *clock)
++{
++ clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
++ clock->p = clock->p1 * clock->p2;
++ clock->vco = refclk * clock->m / (clock->n + 2);
++ clock->dot = clock->vco / clock->p;
++}
++
++/** Derive the pixel clock for the given refclk and divisors for 9xx chips. */
++
++static void i9xx_clock(int refclk, struct psb_intel_clock_t *clock)
++{
++ clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
++ clock->p = clock->p1 * clock->p2;
++ clock->vco = refclk * clock->m / (clock->n + 2);
++ clock->dot = clock->vco / clock->p;
++}
++
++static void psb_intel_clock(struct drm_device *dev, int refclk,
++ struct psb_intel_clock_t *clock)
++{
++ if (IS_I9XX(dev))
++ return i9xx_clock(refclk, clock);
++ else
++ return i8xx_clock(refclk, clock);
++}
++
++/**
++ * Returns whether any output on the specified pipe is of the specified type
++ */
++bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type)
++{
++ struct drm_device *dev = crtc->dev;
++ struct drm_mode_config *mode_config = &dev->mode_config;
++ struct drm_connector *l_entry;
++
++ list_for_each_entry(l_entry, &mode_config->connector_list, head) {
++ if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(l_entry);
++ if (psb_intel_output->type == type)
++ return true;
++ }
++ }
++ return false;
++}
++
++#define INTELPllInvalid(s) { /* ErrorF (s) */; return false; }
++/**
++ * Returns whether the given set of divisors are valid for a given refclk with
++ * the given connectors.
++ */
++
++static bool psb_intel_PLL_is_valid(struct drm_crtc *crtc,
++ struct psb_intel_clock_t *clock)
++{
++ const struct psb_intel_limit_t *limit = psb_intel_limit(crtc);
++
++ if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
++ INTELPllInvalid("p1 out of range\n");
++ if (clock->p < limit->p.min || limit->p.max < clock->p)
++ INTELPllInvalid("p out of range\n");
++ if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
++ INTELPllInvalid("m2 out of range\n");
++ if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
++ INTELPllInvalid("m1 out of range\n");
++ if (clock->m1 <= clock->m2)
++ INTELPllInvalid("m1 <= m2\n");
++ if (clock->m < limit->m.min || limit->m.max < clock->m)
++ INTELPllInvalid("m out of range\n");
++ if (clock->n < limit->n.min || limit->n.max < clock->n)
++ INTELPllInvalid("n out of range\n");
++ if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
++ INTELPllInvalid("vco out of range\n");
++ /* XXX: We may need to be checking "Dot clock"
++ * depending on the multiplier, connector, etc.,
++ * rather than just a single range.
++ */
++ if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
++ INTELPllInvalid("dot out of range\n");
++
++ return true;
++}
++
++/**
++ * Returns a set of divisors for the desired target clock with the given
++ * refclk, or FALSE. The returned values represent the clock equation:
++ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
++ */
++static bool psb_intel_find_best_PLL(struct drm_crtc *crtc, int target,
++ int refclk,
++ struct psb_intel_clock_t *best_clock)
++{
++ struct drm_device *dev = crtc->dev;
++ struct psb_intel_clock_t clock;
++ const struct psb_intel_limit_t *limit = psb_intel_limit(crtc);
++ int err = target;
++
++ if (IS_I9XX(dev) && psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
++ (REG_READ(LVDS) & LVDS_PORT_EN) != 0) {
++ /*
++ * For LVDS, if the panel is on, just rely on its current
++ * settings for dual-channel. We haven't figured out how to
++ * reliably set up different single/dual channel state, if we
++ * even can.
++ */
++ if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
++ LVDS_CLKB_POWER_UP)
++ clock.p2 = limit->p2.p2_fast;
++ else
++ clock.p2 = limit->p2.p2_slow;
++ } else {
++ if (target < limit->p2.dot_limit)
++ clock.p2 = limit->p2.p2_slow;
++ else
++ clock.p2 = limit->p2.p2_fast;
++ }
++
++ memset(best_clock, 0, sizeof(*best_clock));
++
++ for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
++ clock.m1++) {
++ for (clock.m2 = limit->m2.min;
++ clock.m2 < clock.m1 && clock.m2 <= limit->m2.max;
++ clock.m2++) {
++ for (clock.n = limit->n.min;
++ clock.n <= limit->n.max; clock.n++) {
++ for (clock.p1 = limit->p1.min;
++ clock.p1 <= limit->p1.max;
++ clock.p1++) {
++ int this_err;
++
++ psb_intel_clock(dev, refclk, &clock);
++
++ if (!psb_intel_PLL_is_valid
++ (crtc, &clock))
++ continue;
++
++ this_err = abs(clock.dot - target);
++ if (this_err < err) {
++ *best_clock = clock;
++ err = this_err;
++ }
++ }
++ }
++ }
++ }
++
++ return err != target;
++}
++
++void psb_intel_wait_for_vblank(struct drm_device *dev)
++{
++ /* Wait for 20ms, i.e. one cycle at 50hz. */
++ udelay(20000);
++}
++
++int psb_intel_pipe_set_base(struct drm_crtc *crtc,
++ int x, int y, struct drm_framebuffer *old_fb)
++{
++ struct drm_device *dev = crtc->dev;
++ /* struct drm_i915_master_private *master_priv; */
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
++ struct psb_intel_mode_device *mode_dev = psb_intel_crtc->mode_dev;
++ int pipe = psb_intel_crtc->pipe;
++ unsigned long Start, Offset;
++ int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE);
++ int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
++ int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
++ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
++ u32 dspcntr;
++ int ret = 0;
++
++ if (IS_MDFLD(dev))
++ return mdfld__intel_pipe_set_base(crtc, x, y, old_fb);
++
++ PSB_DEBUG_ENTRY("\n");
++
++ /* no fb bound */
++ if (!crtc->fb) {
++ DRM_DEBUG("No FB bound\n");
++ return 0;
++ }
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON))
++ return 0;
++
++ if (IS_MRST(dev) && (pipe == 0))
++ dspbase = MRST_DSPABASE;
++
++ Start = mode_dev->bo_offset(dev, psbfb);
++ Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
++
++ REG_WRITE(dspstride, crtc->fb->pitch);
++
++ dspcntr = REG_READ(dspcntr_reg);
++ dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
++
++ switch (crtc->fb->bits_per_pixel) {
++ case 8:
++ dspcntr |= DISPPLANE_8BPP;
++ break;
++ case 16:
++ if (crtc->fb->depth == 15)
++ dspcntr |= DISPPLANE_15_16BPP;
++ else
++ dspcntr |= DISPPLANE_16BPP;
++ break;
++ case 24:
++ case 32:
++ dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
++ break;
++ default:
++ DRM_ERROR("Unknown color depth\n");
++ ret = -EINVAL;
++ goto psb_intel_pipe_set_base_exit;
++ }
++ REG_WRITE(dspcntr_reg, dspcntr);
++
++ DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y);
++ if (IS_I965G(dev) || IS_MRST(dev)) {
++ REG_WRITE(dspbase, Offset);
++ REG_READ(dspbase);
++ REG_WRITE(dspsurf, Start);
++ REG_READ(dspsurf);
++ } else {
++ REG_WRITE(dspbase, Start + Offset);
++ REG_READ(dspbase);
++ }
++
++psb_intel_pipe_set_base_exit:
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++
++ return ret;
++}
++
++/**
++ * Sets the power management mode of the pipe and plane.
++ *
++ * This code should probably grow support for turning the cursor off and back
++ * on appropriately at the same time as we're turning the pipe off/on.
++ */
++static void psb_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
++{
++ struct drm_device *dev = crtc->dev;
++ /* struct drm_i915_master_private *master_priv; */
++ /* struct drm_i915_private *dev_priv = dev->dev_private; */
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ int pipe = psb_intel_crtc->pipe;
++ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
++ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
++ int dspbase_reg = (pipe == 0) ? DSPABASE : DSPBBASE;
++ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
++ u32 temp;
++ bool enabled;
++
++ /* XXX: When our outputs are all unaware of DPMS modes other than off
++ * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
++ */
++ switch (mode) {
++ case DRM_MODE_DPMS_ON:
++ case DRM_MODE_DPMS_STANDBY:
++ case DRM_MODE_DPMS_SUSPEND:
++ /* Enable the DPLL */
++ temp = REG_READ(dpll_reg);
++ if ((temp & DPLL_VCO_ENABLE) == 0) {
++ REG_WRITE(dpll_reg, temp);
++ REG_READ(dpll_reg);
++ /* Wait for the clocks to stabilize. */
++ udelay(150);
++ REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
++ REG_READ(dpll_reg);
++ /* Wait for the clocks to stabilize. */
++ udelay(150);
++ REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
++ REG_READ(dpll_reg);
++ /* Wait for the clocks to stabilize. */
++ udelay(150);
++ }
++
++ /* Enable the pipe */
++ temp = REG_READ(pipeconf_reg);
++ if ((temp & PIPEACONF_ENABLE) == 0)
++ REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
++
++ /* Enable the plane */
++ temp = REG_READ(dspcntr_reg);
++ if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
++ REG_WRITE(dspcntr_reg,
++ temp | DISPLAY_PLANE_ENABLE);
++ /* Flush the plane changes */
++ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
++ }
++
++ psb_intel_crtc_load_lut(crtc);
++
++ /* Give the overlay scaler a chance to enable
++ * if it's on this pipe */
++ /* psb_intel_crtc_dpms_video(crtc, true); TODO */
++ break;
++ case DRM_MODE_DPMS_OFF:
++ /* Give the overlay scaler a chance to disable
++ * if it's on this pipe */
++ /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
++
++ /* Disable the VGA plane that we never use */
++ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
++
++ /* Disable display plane */
++ temp = REG_READ(dspcntr_reg);
++ if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
++ REG_WRITE(dspcntr_reg,
++ temp & ~DISPLAY_PLANE_ENABLE);
++ /* Flush the plane changes */
++ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
++ REG_READ(dspbase_reg);
++ }
++
++ if (!IS_I9XX(dev)) {
++ /* Wait for vblank for the disable to take effect */
++ psb_intel_wait_for_vblank(dev);
++ }
++
++ /* Next, disable display pipes */
++ temp = REG_READ(pipeconf_reg);
++ if ((temp & PIPEACONF_ENABLE) != 0) {
++ REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
++ REG_READ(pipeconf_reg);
++ }
++
++ /* Wait for vblank for the disable to take effect. */
++ psb_intel_wait_for_vblank(dev);
++
++ temp = REG_READ(dpll_reg);
++ if ((temp & DPLL_VCO_ENABLE) != 0) {
++ REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
++ REG_READ(dpll_reg);
++ }
++
++ /* Wait for the clocks to turn off. */
++ udelay(150);
++ break;
++ }
++
++ enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
++
++#if 0 /* JB: Add vblank support later */
++ if (enabled)
++ dev_priv->vblank_pipe |= (1 << pipe);
++ else
++ dev_priv->vblank_pipe &= ~(1 << pipe);
++#endif
++
++#if 0 /* JB: Add sarea support later */
++ if (!dev->primary->master)
++ return 0;
++
++ master_priv = dev->primary->master->driver_priv;
++ if (!master_priv->sarea_priv)
++ return 0;
++
++ switch (pipe) {
++ case 0:
++ master_priv->sarea_priv->planeA_w =
++ enabled ? crtc->mode.hdisplay : 0;
++ master_priv->sarea_priv->planeA_h =
++ enabled ? crtc->mode.vdisplay : 0;
++ break;
++ case 1:
++ master_priv->sarea_priv->planeB_w =
++ enabled ? crtc->mode.hdisplay : 0;
++ master_priv->sarea_priv->planeB_h =
++ enabled ? crtc->mode.vdisplay : 0;
++ break;
++ default:
++ DRM_ERROR("Can't update pipe %d in SAREA\n", pipe);
++ break;
++ }
++#endif
++
++ /*Set FIFO Watermarks*/
++ REG_WRITE(DSPARB, 0x3F3E);
++}
++
++static void psb_intel_crtc_prepare(struct drm_crtc *crtc)
++{
++ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
++ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
++}
++
++static void psb_intel_crtc_commit(struct drm_crtc *crtc)
++{
++ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
++ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
++}
++
++void psb_intel_encoder_prepare(struct drm_encoder *encoder)
++{
++ struct drm_encoder_helper_funcs *encoder_funcs =
++ encoder->helper_private;
++ /* lvds has its own version of prepare see psb_intel_lvds_prepare */
++ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
++}
++
++void psb_intel_encoder_commit(struct drm_encoder *encoder)
++{
++ struct drm_encoder_helper_funcs *encoder_funcs =
++ encoder->helper_private;
++ /* lvds has its own version of commit see psb_intel_lvds_commit */
++ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
++}
++
++static bool psb_intel_crtc_mode_fixup(struct drm_crtc *crtc,
++ struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode)
++{
++ return true;
++}
++
++
++/**
++ * Return the pipe currently connected to the panel fitter,
++ * or -1 if the panel fitter is not present or not in use
++ */
++static int psb_intel_panel_fitter_pipe(struct drm_device *dev)
++{
++ u32 pfit_control;
++
++ /* i830 doesn't have a panel fitter */
++ if (IS_I830(dev))
++ return -1;
++
++ pfit_control = REG_READ(PFIT_CONTROL);
++
++ /* See if the panel fitter is in use */
++ if ((pfit_control & PFIT_ENABLE) == 0)
++ return -1;
++
++ /* 965 can place panel fitter on either pipe */
++ if (IS_I965G(dev) || IS_MID(dev))
++ return (pfit_control >> 29) & 0x3;
++
++ /* older chips can only use pipe 1 */
++ return 1;
++}
++
++static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
++ struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode,
++ int x, int y,
++ struct drm_framebuffer *old_fb)
++{
++ struct drm_device *dev = crtc->dev;
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ int pipe = psb_intel_crtc->pipe;
++ int fp_reg = (pipe == 0) ? FPA0 : FPB0;
++ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
++ int dpll_md_reg = (psb_intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD;
++ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
++ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
++ int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
++ int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
++ int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
++ int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
++ int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
++ int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
++ int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
++ int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
++ int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
++ int refclk;
++ struct psb_intel_clock_t clock;
++ u32 dpll = 0, fp = 0, dspcntr, pipeconf;
++ bool ok, is_sdvo = false, is_dvo = false;
++ bool is_crt = false, is_lvds = false, is_tv = false;
++ struct drm_mode_config *mode_config = &dev->mode_config;
++ struct drm_connector *connector;
++
++ list_for_each_entry(connector, &mode_config->connector_list, head) {
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++
++ if (!connector->encoder
++ || connector->encoder->crtc != crtc)
++ continue;
++
++ switch (psb_intel_output->type) {
++ case INTEL_OUTPUT_LVDS:
++ is_lvds = true;
++ break;
++ case INTEL_OUTPUT_SDVO:
++ is_sdvo = true;
++ break;
++ case INTEL_OUTPUT_DVO:
++ is_dvo = true;
++ break;
++ case INTEL_OUTPUT_TVOUT:
++ is_tv = true;
++ break;
++ case INTEL_OUTPUT_ANALOG:
++ is_crt = true;
++ break;
++ }
++ }
++
++ if (IS_I9XX(dev))
++ refclk = 96000;
++ else
++ refclk = 48000;
++
++ ok = psb_intel_find_best_PLL(crtc, adjusted_mode->clock, refclk,
++ &clock);
++ if (!ok) {
++ DRM_ERROR("Couldn't find PLL settings for mode!\n");
++ return 0;
++ }
++
++ fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
++
++ dpll = DPLL_VGA_MODE_DIS;
++ if (IS_I9XX(dev)) {
++ if (is_lvds) {
++ dpll |= DPLLB_MODE_LVDS;
++ if (IS_POULSBO(dev))
++ dpll |= DPLL_DVO_HIGH_SPEED;
++ } else
++ dpll |= DPLLB_MODE_DAC_SERIAL;
++ if (is_sdvo) {
++ dpll |= DPLL_DVO_HIGH_SPEED;
++ if (IS_I945G(dev) ||
++ IS_I945GM(dev) ||
++ IS_POULSBO(dev)) {
++ int sdvo_pixel_multiply =
++ adjusted_mode->clock / mode->clock;
++ dpll |=
++ (sdvo_pixel_multiply -
++ 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
++ }
++ }
++
++ /* compute bitmask from p1 value */
++ dpll |= (1 << (clock.p1 - 1)) << 16;
++ switch (clock.p2) {
++ case 5:
++ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
++ break;
++ case 7:
++ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
++ break;
++ case 10:
++ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
++ break;
++ case 14:
++ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
++ break;
++ }
++ if (IS_I965G(dev))
++ dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
++ } else {
++ if (is_lvds) {
++ dpll |=
++ (1 << (clock.p1 - 1)) <<
++ DPLL_FPA01_P1_POST_DIV_SHIFT;
++ } else {
++ if (clock.p1 == 2)
++ dpll |= PLL_P1_DIVIDE_BY_TWO;
++ else
++ dpll |=
++ (clock.p1 -
++ 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
++ if (clock.p2 == 4)
++ dpll |= PLL_P2_DIVIDE_BY_4;
++ }
++ }
++
++ if (is_tv) {
++ /* XXX: just matching BIOS for now */
++/* dpll |= PLL_REF_INPUT_TVCLKINBC; */
++ dpll |= 3;
++ }
++#if 0
++ else if (is_lvds)
++ dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
++#endif
++ else
++ dpll |= PLL_REF_INPUT_DREFCLK;
++
++ /* setup pipeconf */
++ pipeconf = REG_READ(pipeconf_reg);
++
++ /* Set up the display plane register */
++ dspcntr = DISPPLANE_GAMMA_ENABLE;
++
++ if (pipe == 0)
++ dspcntr |= DISPPLANE_SEL_PIPE_A;
++ else
++ dspcntr |= DISPPLANE_SEL_PIPE_B;
++
++ dspcntr |= DISPLAY_PLANE_ENABLE;
++ pipeconf |= PIPEACONF_ENABLE;
++ dpll |= DPLL_VCO_ENABLE;
++
++
++ /* Disable the panel fitter if it was on our pipe */
++ if (psb_intel_panel_fitter_pipe(dev) == pipe)
++ REG_WRITE(PFIT_CONTROL, 0);
++
++ DRM_DEBUG("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
++ drm_mode_debug_printmodeline(mode);
++
++ if (dpll & DPLL_VCO_ENABLE) {
++ REG_WRITE(fp_reg, fp);
++ REG_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
++ REG_READ(dpll_reg);
++ udelay(150);
++ }
++
++ /* The LVDS pin pair needs to be on before the DPLLs are enabled.
++ * This is an exception to the general rule that mode_set doesn't turn
++ * things on.
++ */
++ if (is_lvds) {
++ u32 lvds = REG_READ(LVDS);
++
++ lvds |=
++ LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP |
++ LVDS_PIPEB_SELECT;
++ /* Set the B0-B3 data pairs corresponding to
++ * whether we're going to
++ * set the DPLLs for dual-channel mode or not.
++ */
++ if (clock.p2 == 7)
++ lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
++ else
++ lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
++
++ /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
++ * appropriately here, but we need to look more
++ * thoroughly into how panels behave in the two modes.
++ */
++
++ REG_WRITE(LVDS, lvds);
++ REG_READ(LVDS);
++ }
++
++ REG_WRITE(fp_reg, fp);
++ REG_WRITE(dpll_reg, dpll);
++ REG_READ(dpll_reg);
++ /* Wait for the clocks to stabilize. */
++ udelay(150);
++
++ if (IS_I965G(dev)) {
++ int sdvo_pixel_multiply =
++ adjusted_mode->clock / mode->clock;
++ REG_WRITE(dpll_md_reg,
++ (0 << DPLL_MD_UDI_DIVIDER_SHIFT) |
++ ((sdvo_pixel_multiply -
++ 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
++ } else {
++ /* write it again -- the BIOS does, after all */
++ REG_WRITE(dpll_reg, dpll);
++ }
++ REG_READ(dpll_reg);
++ /* Wait for the clocks to stabilize. */
++ udelay(150);
++
++ REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
++ ((adjusted_mode->crtc_htotal - 1) << 16));
++ REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
++ ((adjusted_mode->crtc_hblank_end - 1) << 16));
++ REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
++ ((adjusted_mode->crtc_hsync_end - 1) << 16));
++ REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
++ ((adjusted_mode->crtc_vtotal - 1) << 16));
++ REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
++ ((adjusted_mode->crtc_vblank_end - 1) << 16));
++ REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
++ ((adjusted_mode->crtc_vsync_end - 1) << 16));
++ /* pipesrc and dspsize control the size that is scaled from,
++ * which should always be the user's requested size.
++ */
++ REG_WRITE(dspsize_reg,
++ ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
++ REG_WRITE(dsppos_reg, 0);
++ REG_WRITE(pipesrc_reg,
++ ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
++ REG_WRITE(pipeconf_reg, pipeconf);
++ REG_READ(pipeconf_reg);
++
++ psb_intel_wait_for_vblank(dev);
++
++ REG_WRITE(dspcntr_reg, dspcntr);
++
++ /* Flush the plane changes */
++ {
++ struct drm_crtc_helper_funcs *crtc_funcs =
++ crtc->helper_private;
++ crtc_funcs->mode_set_base(crtc, x, y, old_fb);
++ }
++
++ psb_intel_wait_for_vblank(dev);
++
++ return 0;
++}
++
++/** Loads the palette/gamma unit for the CRTC with the prepared values */
++void psb_intel_crtc_load_lut(struct drm_crtc *crtc)
++{
++ struct drm_device *dev = crtc->dev;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ int palreg = PALETTE_A;
++ int i;
++
++ /* The clocks have to be on to load the palette. */
++ if (!crtc->enabled)
++ return;
++
++ switch (psb_intel_crtc->pipe) {
++ case 0:
++ break;
++ case 1:
++ palreg = PALETTE_B;
++ break;
++ case 2:
++ palreg = PALETTE_C;
++ break;
++#if MDFLD_WLD_JLIU7
++ case 3:
++ break;
++#endif /* MDFLD_WLD_JLIU7 */
++ default:
++ DRM_ERROR("Illegal Pipe Number. \n");
++ return;
++ }
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_ONLY_IF_ON)) {
++ for (i = 0; i < 256; i++) {
++ REG_WRITE(palreg + 4 * i,
++ ((psb_intel_crtc->lut_r[i] +
++ psb_intel_crtc->lut_adj[i]) << 16) |
++ ((psb_intel_crtc->lut_g[i] +
++ psb_intel_crtc->lut_adj[i]) << 8) |
++ (psb_intel_crtc->lut_b[i] +
++ psb_intel_crtc->lut_adj[i]));
++ }
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ } else {
++ for (i = 0; i < 256; i++) {
++ dev_priv->save_palette_a[i] =
++ ((psb_intel_crtc->lut_r[i] +
++ psb_intel_crtc->lut_adj[i]) << 16) |
++ ((psb_intel_crtc->lut_g[i] +
++ psb_intel_crtc->lut_adj[i]) << 8) |
++ (psb_intel_crtc->lut_b[i] +
++ psb_intel_crtc->lut_adj[i]);
++ }
++
++ }
++}
++
++#ifndef CONFIG_X86_MRST
++/**
++ * Save HW states of giving crtc
++ */
++static void psb_intel_crtc_save(struct drm_crtc *crtc)
++{
++ struct drm_device *dev = crtc->dev;
++ /* struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private; */
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
++ int pipeA = (psb_intel_crtc->pipe == 0);
++ uint32_t paletteReg;
++ int i;
++
++ DRM_DEBUG("\n");
++
++ if (!crtc_state) {
++ DRM_DEBUG("No CRTC state found\n");
++ return;
++ }
++
++ crtc_state->saveDSPCNTR = REG_READ(pipeA ? DSPACNTR : DSPBCNTR);
++ crtc_state->savePIPECONF = REG_READ(pipeA ? PIPEACONF : PIPEBCONF);
++ crtc_state->savePIPESRC = REG_READ(pipeA ? PIPEASRC : PIPEBSRC);
++ crtc_state->saveFP0 = REG_READ(pipeA ? FPA0 : FPB0);
++ crtc_state->saveFP1 = REG_READ(pipeA ? FPA1 : FPB1);
++ crtc_state->saveDPLL = REG_READ(pipeA ? DPLL_A : DPLL_B);
++ crtc_state->saveHTOTAL = REG_READ(pipeA ? HTOTAL_A : HTOTAL_B);
++ crtc_state->saveHBLANK = REG_READ(pipeA ? HBLANK_A : HBLANK_B);
++ crtc_state->saveHSYNC = REG_READ(pipeA ? HSYNC_A : HSYNC_B);
++ crtc_state->saveVTOTAL = REG_READ(pipeA ? VTOTAL_A : VTOTAL_B);
++ crtc_state->saveVBLANK = REG_READ(pipeA ? VBLANK_A : VBLANK_B);
++ crtc_state->saveVSYNC = REG_READ(pipeA ? VSYNC_A : VSYNC_B);
++ crtc_state->saveDSPSTRIDE = REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE);
++
++ /*NOTE: DSPSIZE DSPPOS only for psb*/
++ crtc_state->saveDSPSIZE = REG_READ(pipeA ? DSPASIZE : DSPBSIZE);
++ crtc_state->saveDSPPOS = REG_READ(pipeA ? DSPAPOS : DSPBPOS);
++
++ crtc_state->saveDSPBASE = REG_READ(pipeA ? DSPABASE : DSPBBASE);
++
++ DRM_DEBUG("(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
++ crtc_state->saveDSPCNTR,
++ crtc_state->savePIPECONF,
++ crtc_state->savePIPESRC,
++ crtc_state->saveFP0,
++ crtc_state->saveFP1,
++ crtc_state->saveDPLL,
++ crtc_state->saveHTOTAL,
++ crtc_state->saveHBLANK,
++ crtc_state->saveHSYNC,
++ crtc_state->saveVTOTAL,
++ crtc_state->saveVBLANK,
++ crtc_state->saveVSYNC,
++ crtc_state->saveDSPSTRIDE,
++ crtc_state->saveDSPSIZE,
++ crtc_state->saveDSPPOS,
++ crtc_state->saveDSPBASE
++ );
++
++ paletteReg = pipeA ? PALETTE_A : PALETTE_B;
++ for (i = 0; i < 256; ++i)
++ crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2));
++}
++
++/**
++ * Restore HW states of giving crtc
++ */
++static void psb_intel_crtc_restore(struct drm_crtc *crtc)
++{
++ struct drm_device *dev = crtc->dev;
++ /* struct drm_psb_private * dev_priv =
++ (struct drm_psb_private *)dev->dev_private; */
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
++ /* struct drm_crtc_helper_funcs * crtc_funcs = crtc->helper_private; */
++ int pipeA = (psb_intel_crtc->pipe == 0);
++ uint32_t paletteReg;
++ int i;
++
++ DRM_DEBUG("\n");
++
++ if (!crtc_state) {
++ DRM_DEBUG("No crtc state\n");
++ return;
++ }
++
++ DRM_DEBUG(
++ "current:(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
++ REG_READ(pipeA ? DSPACNTR : DSPBCNTR),
++ REG_READ(pipeA ? PIPEACONF : PIPEBCONF),
++ REG_READ(pipeA ? PIPEASRC : PIPEBSRC),
++ REG_READ(pipeA ? FPA0 : FPB0),
++ REG_READ(pipeA ? FPA1 : FPB1),
++ REG_READ(pipeA ? DPLL_A : DPLL_B),
++ REG_READ(pipeA ? HTOTAL_A : HTOTAL_B),
++ REG_READ(pipeA ? HBLANK_A : HBLANK_B),
++ REG_READ(pipeA ? HSYNC_A : HSYNC_B),
++ REG_READ(pipeA ? VTOTAL_A : VTOTAL_B),
++ REG_READ(pipeA ? VBLANK_A : VBLANK_B),
++ REG_READ(pipeA ? VSYNC_A : VSYNC_B),
++ REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE),
++ REG_READ(pipeA ? DSPASIZE : DSPBSIZE),
++ REG_READ(pipeA ? DSPAPOS : DSPBPOS),
++ REG_READ(pipeA ? DSPABASE : DSPBBASE)
++ );
++
++ DRM_DEBUG(
++ "saved: (%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
++ crtc_state->saveDSPCNTR,
++ crtc_state->savePIPECONF,
++ crtc_state->savePIPESRC,
++ crtc_state->saveFP0,
++ crtc_state->saveFP1,
++ crtc_state->saveDPLL,
++ crtc_state->saveHTOTAL,
++ crtc_state->saveHBLANK,
++ crtc_state->saveHSYNC,
++ crtc_state->saveVTOTAL,
++ crtc_state->saveVBLANK,
++ crtc_state->saveVSYNC,
++ crtc_state->saveDSPSTRIDE,
++ crtc_state->saveDSPSIZE,
++ crtc_state->saveDSPPOS,
++ crtc_state->saveDSPBASE
++ );
++
++
++#if 0
++ if (drm_helper_crtc_in_use(crtc))
++ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
++
++
++ if (psb_intel_panel_fitter_pipe(dev) == psb_intel_crtc->pipe) {
++ REG_WRITE(PFIT_CONTROL, crtc_state->savePFITCTRL);
++ DRM_DEBUG("write pfit_controle: %x\n", REG_READ(PFIT_CONTROL));
++ }
++#endif
++
++ if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
++ REG_WRITE(pipeA ? DPLL_A : DPLL_B,
++ crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
++ REG_READ(pipeA ? DPLL_A : DPLL_B);
++ DRM_DEBUG("write dpll: %x\n",
++ REG_READ(pipeA ? DPLL_A : DPLL_B));
++ udelay(150);
++ }
++
++ REG_WRITE(pipeA ? FPA0 : FPB0, crtc_state->saveFP0);
++ REG_READ(pipeA ? FPA0 : FPB0);
++
++ REG_WRITE(pipeA ? FPA1 : FPB1, crtc_state->saveFP1);
++ REG_READ(pipeA ? FPA1 : FPB1);
++
++ REG_WRITE(pipeA ? DPLL_A : DPLL_B, crtc_state->saveDPLL);
++ REG_READ(pipeA ? DPLL_A : DPLL_B);
++ udelay(150);
++
++ REG_WRITE(pipeA ? HTOTAL_A : HTOTAL_B, crtc_state->saveHTOTAL);
++ REG_WRITE(pipeA ? HBLANK_A : HBLANK_B, crtc_state->saveHBLANK);
++ REG_WRITE(pipeA ? HSYNC_A : HSYNC_B, crtc_state->saveHSYNC);
++ REG_WRITE(pipeA ? VTOTAL_A : VTOTAL_B, crtc_state->saveVTOTAL);
++ REG_WRITE(pipeA ? VBLANK_A : VBLANK_B, crtc_state->saveVBLANK);
++ REG_WRITE(pipeA ? VSYNC_A : VSYNC_B, crtc_state->saveVSYNC);
++ REG_WRITE(pipeA ? DSPASTRIDE : DSPBSTRIDE, crtc_state->saveDSPSTRIDE);
++
++ REG_WRITE(pipeA ? DSPASIZE : DSPBSIZE, crtc_state->saveDSPSIZE);
++ REG_WRITE(pipeA ? DSPAPOS : DSPBPOS, crtc_state->saveDSPPOS);
++
++ REG_WRITE(pipeA ? PIPEASRC : PIPEBSRC, crtc_state->savePIPESRC);
++ REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
++ REG_WRITE(pipeA ? PIPEACONF : PIPEBCONF, crtc_state->savePIPECONF);
++
++ psb_intel_wait_for_vblank(dev);
++
++ REG_WRITE(pipeA ? DSPACNTR : DSPBCNTR, crtc_state->saveDSPCNTR);
++ REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
++
++ psb_intel_wait_for_vblank(dev);
++
++ paletteReg = pipeA ? PALETTE_A : PALETTE_B;
++ for (i = 0; i < 256; ++i)
++ REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]);
++}
++#endif
++
++static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
++ struct drm_file *file_priv,
++ uint32_t handle,
++ uint32_t width, uint32_t height)
++{
++ struct drm_device *dev = crtc->dev;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ struct psb_gtt *pg = dev_priv->pg;
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ struct psb_intel_mode_device *mode_dev = psb_intel_crtc->mode_dev;
++ int pipe = psb_intel_crtc->pipe;
++ uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
++ uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
++ uint32_t temp;
++ size_t addr = 0;
++ uint32_t page_offset;
++ size_t size;
++ void *bo;
++ int ret;
++
++ DRM_DEBUG("\n");
++
++ if (IS_MDFLD(dev))
++ return mdfld_intel_crtc_cursor_set(crtc, file_priv, handle, width, height);
++
++ /* if we want to turn of the cursor ignore width and height */
++ if (!handle) {
++ DRM_DEBUG("cursor off\n");
++ /* turn off the cursor */
++ temp = 0;
++ temp |= CURSOR_MODE_DISABLE;
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_ONLY_IF_ON)) {
++ REG_WRITE(control, temp);
++ REG_WRITE(base, 0);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++
++ /* unpin the old bo */
++ if (psb_intel_crtc->cursor_bo) {
++ mode_dev->bo_unpin_for_scanout(dev,
++ psb_intel_crtc->
++ cursor_bo);
++ psb_intel_crtc->cursor_bo = NULL;
++ }
++
++ return 0;
++ }
++
++ /* Currently we only support 64x64 cursors */
++ if (width != 64 || height != 64) {
++ DRM_ERROR("we currently only support 64x64 cursors\n");
++ return -EINVAL;
++ }
++
++ bo = mode_dev->bo_from_handle(dev, file_priv, handle);
++ if (!bo)
++ return -ENOENT;
++
++ ret = mode_dev->bo_pin_for_scanout(dev, bo);
++ if (ret)
++ return ret;
++ size = mode_dev->bo_size(dev, bo);
++ if (size < width * height * 4) {
++ DRM_ERROR("buffer is to small\n");
++ return -ENOMEM;
++ }
++
++ /*insert this bo into gtt*/
++ DRM_DEBUG("%s: map meminfo for hw cursor. handle %x\n",
++ __func__, handle);
++
++ ret = psb_gtt_map_meminfo(dev, (IMG_HANDLE)handle, &page_offset);
++ if (ret) {
++ DRM_ERROR("Can not map meminfo to GTT. handle 0x%x\n", handle);
++ return ret;
++ }
++
++ addr = page_offset << PAGE_SHIFT;
++
++ if (IS_POULSBO(dev))
++ addr += pg->stolen_base;
++
++ psb_intel_crtc->cursor_addr = addr;
++
++ temp = 0;
++ /* set the pipe for the cursor */
++ temp |= (pipe << 28);
++ temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_ONLY_IF_ON)) {
++ REG_WRITE(control, temp);
++ REG_WRITE(base, addr);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++
++ /* unpin the old bo */
++ if (psb_intel_crtc->cursor_bo && psb_intel_crtc->cursor_bo != bo) {
++ mode_dev->bo_unpin_for_scanout(dev, psb_intel_crtc->cursor_bo);
++ psb_intel_crtc->cursor_bo = bo;
++ }
++
++ return 0;
++}
++
++static int psb_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
++{
++ struct drm_device *dev = crtc->dev;
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ int pipe = psb_intel_crtc->pipe;
++ uint32_t temp = 0;
++ uint32_t adder;
++
++ if (IS_MDFLD(dev))
++ return mdfld_intel_crtc_cursor_move(crtc, x, y);
++
++ if (x < 0) {
++ temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
++ x = -x;
++ }
++ if (y < 0) {
++ temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
++ y = -y;
++ }
++
++ temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
++ temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
++
++ adder = psb_intel_crtc->cursor_addr;
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_ONLY_IF_ON)) {
++ REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
++ REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, adder);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++ return 0;
++}
++
++static void psb_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
++ u16 *green, u16 *blue, uint32_t size)
++{
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ int i;
++
++ if (size != 256)
++ return;
++
++ for (i = 0; i < 256; i++) {
++ psb_intel_crtc->lut_r[i] = red[i] >> 8;
++ psb_intel_crtc->lut_g[i] = green[i] >> 8;
++ psb_intel_crtc->lut_b[i] = blue[i] >> 8;
++ }
++
++ psb_intel_crtc_load_lut(crtc);
++}
++
++/* Returns the clock of the currently programmed mode of the given pipe. */
++static int psb_intel_crtc_clock_get(struct drm_device *dev,
++ struct drm_crtc *crtc)
++{
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ int pipe = psb_intel_crtc->pipe;
++ u32 dpll;
++ u32 fp;
++ struct psb_intel_clock_t clock;
++ bool is_lvds;
++ struct drm_psb_private *dev_priv = dev->dev_private;
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_ONLY_IF_ON)) {
++ dpll = REG_READ((pipe == 0) ? DPLL_A : DPLL_B);
++ if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
++ fp = REG_READ((pipe == 0) ? FPA0 : FPB0);
++ else
++ fp = REG_READ((pipe == 0) ? FPA1 : FPB1);
++ is_lvds = (pipe == 1) && (REG_READ(LVDS) & LVDS_PORT_EN);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ } else {
++ dpll = (pipe == 0) ?
++ dev_priv->saveDPLL_A : dev_priv->saveDPLL_B;
++
++ if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
++ fp = (pipe == 0) ?
++ dev_priv->saveFPA0 :
++ dev_priv->saveFPB0;
++ else
++ fp = (pipe == 0) ?
++ dev_priv->saveFPA1 :
++ dev_priv->saveFPB1;
++
++ is_lvds = (pipe == 1) && (dev_priv->saveLVDS & LVDS_PORT_EN);
++ }
++
++ clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
++ clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
++ clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
++
++ if (is_lvds) {
++ clock.p1 =
++ ffs((dpll &
++ DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
++ DPLL_FPA01_P1_POST_DIV_SHIFT);
++ clock.p2 = 14;
++
++ if ((dpll & PLL_REF_INPUT_MASK) ==
++ PLLB_REF_INPUT_SPREADSPECTRUMIN) {
++ /* XXX: might not be 66MHz */
++ i8xx_clock(66000, &clock);
++ } else
++ i8xx_clock(48000, &clock);
++ } else {
++ if (dpll & PLL_P1_DIVIDE_BY_TWO)
++ clock.p1 = 2;
++ else {
++ clock.p1 =
++ ((dpll &
++ DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
++ DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
++ }
++ if (dpll & PLL_P2_DIVIDE_BY_4)
++ clock.p2 = 4;
++ else
++ clock.p2 = 2;
++
++ i8xx_clock(48000, &clock);
++ }
++
++ /* XXX: It would be nice to validate the clocks, but we can't reuse
++ * i830PllIsValid() because it relies on the xf86_config connector
++ * configuration being accurate, which it isn't necessarily.
++ */
++
++ return clock.dot;
++}
++
++/** Returns the currently programmed mode of the given pipe. */
++struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
++ struct drm_crtc *crtc)
++{
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ int pipe = psb_intel_crtc->pipe;
++ struct drm_display_mode *mode;
++ int htot;
++ int hsync;
++ int vtot;
++ int vsync;
++ struct drm_psb_private *dev_priv = dev->dev_private;
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_ONLY_IF_ON)) {
++ htot = REG_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B);
++ hsync = REG_READ((pipe == 0) ? HSYNC_A : HSYNC_B);
++ vtot = REG_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B);
++ vsync = REG_READ((pipe == 0) ? VSYNC_A : VSYNC_B);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ } else {
++ htot = (pipe == 0) ?
++ dev_priv->saveHTOTAL_A : dev_priv->saveHTOTAL_B;
++ hsync = (pipe == 0) ?
++ dev_priv->saveHSYNC_A : dev_priv->saveHSYNC_B;
++ vtot = (pipe == 0) ?
++ dev_priv->saveVTOTAL_A : dev_priv->saveVTOTAL_B;
++ vsync = (pipe == 0) ?
++ dev_priv->saveVSYNC_A : dev_priv->saveVSYNC_B;
++ }
++
++ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
++ if (!mode)
++ return NULL;
++
++ mode->clock = psb_intel_crtc_clock_get(dev, crtc);
++ mode->hdisplay = (htot & 0xffff) + 1;
++ mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
++ mode->hsync_start = (hsync & 0xffff) + 1;
++ mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
++ mode->vdisplay = (vtot & 0xffff) + 1;
++ mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
++ mode->vsync_start = (vsync & 0xffff) + 1;
++ mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
++
++ drm_mode_set_name(mode);
++ drm_mode_set_crtcinfo(mode, 0);
++
++ return mode;
++}
++
++static void psb_intel_crtc_destroy(struct drm_crtc *crtc)
++{
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++
++#ifndef CONFIG_X86_MRST
++ kfree(psb_intel_crtc->crtc_state);
++#endif
++ drm_crtc_cleanup(crtc);
++ kfree(psb_intel_crtc);
++}
++
++static const struct drm_crtc_helper_funcs psb_intel_helper_funcs = {
++ .dpms = psb_intel_crtc_dpms,
++ .mode_fixup = psb_intel_crtc_mode_fixup,
++ .mode_set = psb_intel_crtc_mode_set,
++ .mode_set_base = psb_intel_pipe_set_base,
++ .prepare = psb_intel_crtc_prepare,
++ .commit = psb_intel_crtc_commit,
++};
++
++static const struct drm_crtc_helper_funcs mrst_helper_funcs;
++
++const struct drm_crtc_funcs psb_intel_crtc_funcs = {
++#ifndef CONFIG_X86_MRST
++ .save = psb_intel_crtc_save,
++ .restore = psb_intel_crtc_restore,
++#endif
++ .cursor_set = psb_intel_crtc_cursor_set,
++ .cursor_move = psb_intel_crtc_cursor_move,
++ .gamma_set = psb_intel_crtc_gamma_set,
++ .set_config = drm_crtc_helper_set_config,
++ .destroy = psb_intel_crtc_destroy,
++};
++
++
++void psb_intel_crtc_init(struct drm_device *dev, int pipe,
++ struct psb_intel_mode_device *mode_dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct psb_intel_crtc *psb_intel_crtc;
++ int i;
++ uint16_t *r_base, *g_base, *b_base;
++
++ PSB_DEBUG_ENTRY("\n");
++
++ /* We allocate a extra array of drm_connector pointers
++ * for fbdev after the crtc */
++ psb_intel_crtc =
++ kzalloc(sizeof(struct psb_intel_crtc) +
++ (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)),
++ GFP_KERNEL);
++ if (psb_intel_crtc == NULL)
++ return;
++
++#ifndef CONFIG_X86_MRST
++ psb_intel_crtc->crtc_state =
++ kzalloc(sizeof(struct psb_intel_crtc_state), GFP_KERNEL);
++ if (!psb_intel_crtc->crtc_state) {
++ DRM_INFO("Crtc state error: No memory\n");
++ kfree(psb_intel_crtc);
++ return;
++ }
++#endif
++
++ drm_crtc_init(dev, &psb_intel_crtc->base, &psb_intel_crtc_funcs);
++
++ drm_mode_crtc_set_gamma_size(&psb_intel_crtc->base, 256);
++ psb_intel_crtc->pipe = pipe;
++ psb_intel_crtc->plane = pipe;
++
++ r_base = psb_intel_crtc->base.gamma_store;
++ g_base = r_base + 256;
++ b_base = g_base + 256;
++ for (i = 0; i < 256; i++) {
++ psb_intel_crtc->lut_r[i] = i;
++ psb_intel_crtc->lut_g[i] = i;
++ psb_intel_crtc->lut_b[i] = i;
++ r_base[i] = i << 8;
++ g_base[i] = i << 8;
++ b_base[i] = i << 8;
++
++ psb_intel_crtc->lut_adj[i] = 0;
++ }
++
++ psb_intel_crtc->mode_dev = mode_dev;
++ psb_intel_crtc->cursor_addr = 0;
++
++ if (IS_MID(dev)) {
++ drm_crtc_helper_add(&psb_intel_crtc->base, &mrst_helper_funcs);
++ } else {
++ drm_crtc_helper_add(&psb_intel_crtc->base,
++ &psb_intel_helper_funcs);
++ }
++
++ /* Setup the array of drm_connector pointer array */
++ psb_intel_crtc->mode_set.crtc = &psb_intel_crtc->base;
++ BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
++ dev_priv->plane_to_crtc_mapping[psb_intel_crtc->plane] != NULL);
++ dev_priv->plane_to_crtc_mapping[psb_intel_crtc->plane] = &psb_intel_crtc->base;
++ dev_priv->pipe_to_crtc_mapping[psb_intel_crtc->pipe] = &psb_intel_crtc->base;
++ psb_intel_crtc->mode_set.connectors =
++ (struct drm_connector **) (psb_intel_crtc + 1);
++ psb_intel_crtc->mode_set.num_connectors = 0;
++}
++
++int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct drm_psb_get_pipe_from_crtc_id_arg *pipe_from_crtc_id = data;
++ struct drm_mode_object *drmmode_obj;
++ struct psb_intel_crtc *crtc;
++
++ if (!dev_priv) {
++ DRM_ERROR("called with no initialization\n");
++ return -EINVAL;
++ }
++
++ drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
++ DRM_MODE_OBJECT_CRTC);
++
++ if (!drmmode_obj) {
++ DRM_ERROR("no such CRTC id\n");
++ return -EINVAL;
++ }
++
++ crtc = to_psb_intel_crtc(obj_to_crtc(drmmode_obj));
++ pipe_from_crtc_id->pipe = crtc->pipe;
++
++ return 0;
++}
++
++struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
++{
++ struct drm_crtc *crtc = NULL;
++
++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ if (psb_intel_crtc->pipe == pipe)
++ break;
++ }
++ return crtc;
++}
++
++int psb_intel_connector_clones(struct drm_device *dev, int type_mask)
++{
++ int index_mask = 0;
++ struct drm_connector *connector;
++ int entry = 0;
++
++ list_for_each_entry(connector, &dev->mode_config.connector_list,
++ head) {
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++ if (type_mask & (1 << psb_intel_output->type))
++ index_mask |= (1 << entry);
++ entry++;
++ }
++ return index_mask;
++}
++
++#if 0 /* JB: Should be per device */
++static void psb_intel_setup_outputs(struct drm_device *dev)
++{
++ struct drm_connector *connector;
++
++ psb_intel_crt_init(dev);
++
++ /* Set up integrated LVDS */
++ if (IS_MOBILE(dev) && !IS_I830(dev))
++ psb_intel_lvds_init(dev);
++
++ if (IS_I9XX(dev)) {
++ psb_intel_sdvo_init(dev, SDVOB);
++ psb_intel_sdvo_init(dev, SDVOC);
++ } else
++ psb_intel_dvo_init(dev);
++
++ if (IS_I9XX(dev) && !IS_I915G(dev))
++ psb_intel_tv_init(dev);
++
++ list_for_each_entry(connector, &dev->mode_config.connector_list,
++ head) {
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++ struct drm_encoder *encoder = &psb_intel_output->enc;
++ int crtc_mask = 0, clone_mask = 0;
++
++ /* valid crtcs */
++ switch (psb_intel_output->type) {
++ case INTEL_OUTPUT_DVO:
++ case INTEL_OUTPUT_SDVO:
++ crtc_mask = ((1 << 0) | (1 << 1));
++ clone_mask = ((1 << INTEL_OUTPUT_ANALOG) |
++ (1 << INTEL_OUTPUT_DVO) |
++ (1 << INTEL_OUTPUT_SDVO));
++ break;
++ case INTEL_OUTPUT_ANALOG:
++ crtc_mask = ((1 << 0) | (1 << 1));
++ clone_mask = ((1 << INTEL_OUTPUT_ANALOG) |
++ (1 << INTEL_OUTPUT_DVO) |
++ (1 << INTEL_OUTPUT_SDVO));
++ break;
++ case INTEL_OUTPUT_LVDS:
++ crtc_mask = (1 << 1);
++ clone_mask = (1 << INTEL_OUTPUT_LVDS);
++ break;
++ case INTEL_OUTPUT_TVOUT:
++ crtc_mask = ((1 << 0) | (1 << 1));
++ clone_mask = (1 << INTEL_OUTPUT_TVOUT);
++ break;
++ }
++ encoder->possible_crtcs = crtc_mask;
++ encoder->possible_clones =
++ psb_intel_connector_clones(dev, clone_mask);
++ }
++}
++#endif
++
++#if 0 /* JB: Rework framebuffer code into something none device specific */
++static void psb_intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
++{
++ struct psb_intel_framebuffer *psb_intel_fb =
++ to_psb_intel_framebuffer(fb);
++ struct drm_device *dev = fb->dev;
++
++ if (fb->fbdev)
++ intelfb_remove(dev, fb);
++
++ drm_framebuffer_cleanup(fb);
++ drm_gem_object_unreference(fb->mm_private);
++
++ kfree(psb_intel_fb);
++}
++
++static int psb_intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
++ struct drm_file *file_priv,
++ unsigned int *handle)
++{
++ struct drm_gem_object *object = fb->mm_private;
++
++ return drm_gem_handle_create(file_priv, object, handle);
++}
++
++static const struct drm_framebuffer_funcs psb_intel_fb_funcs = {
++ .destroy = psb_intel_user_framebuffer_destroy,
++ .create_handle = psb_intel_user_framebuffer_create_handle,
++};
++
++struct drm_framebuffer *psb_intel_framebuffer_create(struct drm_device *dev,
++ struct drm_mode_fb_cmd
++ *mode_cmd,
++ void *mm_private)
++{
++ struct psb_intel_framebuffer *psb_intel_fb;
++
++ psb_intel_fb = kzalloc(sizeof(*psb_intel_fb), GFP_KERNEL);
++ if (!psb_intel_fb)
++ return NULL;
++
++ if (!drm_framebuffer_init(dev,
++ &psb_intel_fb->base,
++ &psb_intel_fb_funcs))
++ return NULL;
++
++ drm_helper_mode_fill_fb_struct(&psb_intel_fb->base, mode_cmd);
++
++ return &psb_intel_fb->base;
++}
++
++
++static struct drm_framebuffer *psb_intel_user_framebuffer_create(struct
++ drm_device
++ *dev,
++ struct
++ drm_file
++ *filp,
++ struct
++ drm_mode_fb_cmd
++ *mode_cmd)
++{
++ struct drm_gem_object *obj;
++
++ obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle);
++ if (!obj)
++ return NULL;
++
++ return psb_intel_framebuffer_create(dev, mode_cmd, obj);
++}
++
++static int psb_intel_insert_new_fb(struct drm_device *dev,
++ struct drm_file *file_priv,
++ struct drm_framebuffer *fb,
++ struct drm_mode_fb_cmd *mode_cmd)
++{
++ struct psb_intel_framebuffer *psb_intel_fb;
++ struct drm_gem_object *obj;
++ struct drm_crtc *crtc;
++
++ psb_intel_fb = to_psb_intel_framebuffer(fb);
++
++ mutex_lock(&dev->struct_mutex);
++ obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
++
++ if (!obj) {
++ mutex_unlock(&dev->struct_mutex);
++ return -EINVAL;
++ }
++ drm_gem_object_unreference(psb_intel_fb->base.mm_private);
++ drm_helper_mode_fill_fb_struct(fb, mode_cmd, obj);
++ mutex_unlock(&dev->struct_mutex);
++
++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++ if (crtc->fb == fb) {
++ struct drm_crtc_helper_funcs *crtc_funcs =
++ crtc->helper_private;
++ crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y);
++ }
++ }
++ return 0;
++}
++
++static const struct drm_mode_config_funcs psb_intel_mode_funcs = {
++ .resize_fb = psb_intel_insert_new_fb,
++ .fb_create = psb_intel_user_framebuffer_create,
++ .fb_changed = intelfb_probe,
++};
++#endif
++
++#if 0 /* Should be per device */
++void psb_intel_modeset_init(struct drm_device *dev)
++{
++ int num_pipe;
++ int i;
++
++ drm_mode_config_init(dev);
++
++ dev->mode_config.min_width = 0;
++ dev->mode_config.min_height = 0;
++
++ dev->mode_config.funcs = (void *) &psb_intel_mode_funcs;
++
++ if (IS_I965G(dev)) {
++ dev->mode_config.max_width = 8192;
++ dev->mode_config.max_height = 8192;
++ } else {
++ dev->mode_config.max_width = 2048;
++ dev->mode_config.max_height = 2048;
++ }
++
++ /* set memory base */
++ /* MRST and PSB should use BAR 2*/
++ dev->mode_config.fb_base =
++ pci_resource_start(dev->pdev, 2);
++
++ if (IS_MOBILE(dev) || IS_I9XX(dev))
++ num_pipe = 2;
++ else
++ num_pipe = 1;
++ DRM_DEBUG("%d display pipe%s available.\n",
++ num_pipe, num_pipe > 1 ? "s" : "");
++
++ for (i = 0; i < num_pipe; i++)
++ psb_intel_crtc_init(dev, i);
++
++ psb_intel_setup_outputs(dev);
++
++ /* setup fbs */
++ /* drm_initial_config(dev); */
++}
++#endif
++
++void psb_intel_modeset_cleanup(struct drm_device *dev)
++{
++ drm_mode_config_cleanup(dev);
++}
++
++
++/* current intel driver doesn't take advantage of encoders
++ always give back the encoder for the connector
++*/
++struct drm_encoder *psb_intel_best_encoder(struct drm_connector *connector)
++{
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++
++ return &psb_intel_output->enc;
++}
++
++/* MRST_PLATFORM start */
++
++#if DUMP_REGISTER
++void dump_dc_registers(struct drm_device *dev)
++{
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++ unsigned int i = 0;
++
++ DRM_INFO("jliu7 dump_dc_registers\n");
++
++
++ if (0x80000000 & REG_READ(0x70008)) {
++ for (i = 0x20a0; i < 0x20af; i += 4) {
++ DRM_INFO("jliu7 interrupt register=0x%x, value=%x\n",
++ i, (unsigned int) REG_READ(i));
++ }
++
++ for (i = 0xf014; i < 0xf047; i += 4) {
++ DRM_INFO("jliu7 pipe A dpll register=0x%x, value=%x\n",
++ i, (unsigned int) REG_READ(i));
++ }
++
++ for (i = 0x60000; i < 0x6005f; i += 4) {
++ DRM_INFO
++ ("jliu7 pipe A timing register=0x%x, value=%x\n",
++ i, (unsigned int) REG_READ(i));
++ }
++
++ for (i = 0x61140; i < 0x61143; i += 4) {
++ DRM_INFO("jliu7 SDBOB register=0x%x, value=%x\n",
++ i, (unsigned int) REG_READ(i));
++ }
++
++ for (i = 0x61180; i < 0x6123F; i += 4) {
++ DRM_INFO
++ ("jliu7 LVDS PORT register=0x%x, value=%x\n",
++ i, (unsigned int) REG_READ(i));
++ }
++
++ for (i = 0x61254; i < 0x612AB; i += 4) {
++ DRM_INFO("jliu7 BLC register=0x%x, value=%x\n",
++ i, (unsigned int) REG_READ(i));
++ }
++
++ for (i = 0x70000; i < 0x70047; i += 4) {
++ DRM_INFO
++ ("jliu7 PIPE A control register=0x%x, value=%x\n",
++ i, (unsigned int) REG_READ(i));
++ }
++
++ for (i = 0x70180; i < 0x7020b; i += 4) {
++ DRM_INFO("jliu7 display A control register=0x%x,"
++ "value=%x\n", i,
++ (unsigned int) REG_READ(i));
++ }
++
++ for (i = 0x71400; i < 0x71403; i += 4) {
++ DRM_INFO
++ ("jliu7 VGA Display Plane Control register=0x%x,"
++ "value=%x\n", i, (unsigned int) REG_READ(i));
++ }
++ }
++
++ if (0x80000000 & REG_READ(0x71008)) {
++ for (i = 0x61000; i < 0x6105f; i += 4) {
++ DRM_INFO
++ ("jliu7 pipe B timing register=0x%x, value=%x\n",
++ i, (unsigned int) REG_READ(i));
++ }
++
++ for (i = 0x71000; i < 0x71047; i += 4) {
++ DRM_INFO
++ ("jliu7 PIPE B control register=0x%x, value=%x\n",
++ i, (unsigned int) REG_READ(i));
++ }
++
++ for (i = 0x71180; i < 0x7120b; i += 4) {
++ DRM_INFO("jliu7 display B control register=0x%x,"
++ "value=%x\n", i,
++ (unsigned int) REG_READ(i));
++ }
++ }
++#if 0
++ for (i = 0x70080; i < 0x700df; i += 4) {
++ DRM_INFO("jliu7 cursor A & B register=0x%x, value=%x\n",
++ i, (unsigned int) REG_READ(i));
++ }
++#endif
++
++}
++
++void dump_dsi_registers(struct drm_device *dev)
++{
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++ unsigned int i = 0;
++
++ DRM_INFO("jliu7 dump_dsi_registers\n");
++
++ for (i = 0xb000; i < 0xb064; i += 4) {
++ DRM_INFO("jliu7 MIPI IP register=0x%x, value=%x\n", i,
++ (unsigned int) REG_READ(i));
++ }
++
++ i = 0xb104;
++ DRM_INFO("jliu7 MIPI control register=0x%x, value=%x\n",
++ i, (unsigned int) REG_READ(i));
++}
++#endif /* DUMP_REGISTER */
++
++
++struct mrst_limit_t {
++ struct psb_intel_range_t dot, m, p1;
++};
++
++struct mrst_clock_t {
++ /* derived values */
++ int dot;
++ int m;
++ int p1;
++};
++
++#define MRST_LIMIT_LVDS_100L 0
++#define MRST_LIMIT_LVDS_83 1
++#define MRST_LIMIT_LVDS_100 2
++
++#define MRST_DOT_MIN 19750
++#define MRST_DOT_MAX 120000
++#define MRST_M_MIN_100L 20
++#define MRST_M_MIN_100 10
++#define MRST_M_MIN_83 12
++#define MRST_M_MAX_100L 34
++#define MRST_M_MAX_100 17
++#define MRST_M_MAX_83 20
++#define MRST_P1_MIN 2
++#define MRST_P1_MAX_0 7
++#define MRST_P1_MAX_1 8
++
++static const struct mrst_limit_t mrst_limits[] = {
++ { /* MRST_LIMIT_LVDS_100L */
++ .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
++ .m = {.min = MRST_M_MIN_100L, .max = MRST_M_MAX_100L},
++ .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1},
++ },
++ { /* MRST_LIMIT_LVDS_83L */
++ .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
++ .m = {.min = MRST_M_MIN_83, .max = MRST_M_MAX_83},
++ .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_0},
++ },
++ { /* MRST_LIMIT_LVDS_100 */
++ .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
++ .m = {.min = MRST_M_MIN_100, .max = MRST_M_MAX_100},
++ .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1},
++ },
++};
++
++#define MRST_M_MIN 10
++static const u32 mrst_m_converts[] = {
++ 0x2B, 0x15, 0x2A, 0x35, 0x1A, 0x0D, 0x26, 0x33, 0x19, 0x2C,
++ 0x36, 0x3B, 0x1D, 0x2E, 0x37, 0x1B, 0x2D, 0x16, 0x0B, 0x25,
++ 0x12, 0x09, 0x24, 0x32, 0x39, 0x1c,
++};
++
++#define COUNT_MAX 0x10000000
++void mrstWaitForPipeDisable(struct drm_device *dev)
++{
++ int count, temp;
++
++ /* FIXME JLIU7_PO */
++ if (IS_MRST(dev)) {
++ psb_intel_wait_for_vblank(dev);
++ return;
++ }
++
++ /* Wait for for the pipe disable to take effect. */
++ for (count = 0; count < COUNT_MAX; count++) {
++ temp = REG_READ(PIPEACONF);
++ if ((temp & PIPEACONF_PIPE_STATE) == 0)
++ break;
++ }
++
++ PSB_DEBUG_ENTRY("cout = %d. \n", count);
++}
++
++void mrstWaitForPipeEnable(struct drm_device *dev)
++{
++ int count, temp;
++
++ /* FIXME JLIU7_PO */
++ if (IS_MRST(dev)) {
++ psb_intel_wait_for_vblank(dev);
++ return;
++ }
++
++ /* Wait for for the pipe enable to take effect. */
++ for (count = 0; count < COUNT_MAX; count++) {
++ temp = REG_READ(PIPEACONF);
++ if ((temp & PIPEACONF_PIPE_STATE) == 1)
++ break;
++ }
++
++ PSB_DEBUG_ENTRY("cout = %d. \n", count);
++}
++
++static const struct mrst_limit_t *mrst_limit(struct drm_crtc *crtc)
++{
++ const struct mrst_limit_t *limit;
++ struct drm_device *dev = crtc->dev;
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++
++ if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
++ || psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)) {
++ if (dev_priv->sku_100L)
++ limit = &mrst_limits[MRST_LIMIT_LVDS_100L];
++ if (dev_priv->sku_83)
++ limit = &mrst_limits[MRST_LIMIT_LVDS_83];
++ if (dev_priv->sku_100)
++ limit = &mrst_limits[MRST_LIMIT_LVDS_100];
++ } else {
++ limit = NULL;
++ PSB_DEBUG_ENTRY("mrst_limit Wrong display type. \n");
++ }
++
++ return limit;
++}
++
++/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
++static void mrst_clock(int refclk, struct mrst_clock_t *clock)
++{
++ clock->dot = (refclk * clock->m) / (14 * clock->p1);
++}
++
++void mrstPrintPll(char *prefix, struct mrst_clock_t *clock)
++{
++ PSB_DEBUG_ENTRY
++ ("%s: dotclock = %d, m = %d, p1 = %d. \n",
++ prefix, clock->dot, clock->m, clock->p1);
++}
++
++/**
++ * Returns a set of divisors for the desired target clock with the given refclk,
++ * or FALSE. Divisor values are the actual divisors for
++ */
++static bool
++mrstFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
++ struct mrst_clock_t *best_clock)
++{
++ struct mrst_clock_t clock;
++ const struct mrst_limit_t *limit = mrst_limit(crtc);
++ int err = target;
++
++ memset(best_clock, 0, sizeof(*best_clock));
++
++ for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
++ for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max;
++ clock.p1++) {
++ int this_err;
++
++ mrst_clock(refclk, &clock);
++
++ this_err = abs(clock.dot - target);
++ if (this_err < err) {
++ *best_clock = clock;
++ err = this_err;
++ }
++ }
++ }
++ DRM_DEBUG("mrstFindBestPLL err = %d.\n", err);
++
++ return err != target;
++}
++
++/**
++ * Sets the power management mode of the pipe and plane.
++ *
++ * This code should probably grow support for turning the cursor off and back
++ * on appropriately at the same time as we're turning the pipe off/on.
++ */
++static void mrst_crtc_dpms(struct drm_crtc *crtc, int mode)
++{
++ struct drm_device *dev = crtc->dev;
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ int pipe = psb_intel_crtc->pipe;
++ int dpll_reg = (pipe == 0) ? MRST_DPLL_A : DPLL_B;
++ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
++ int dspbase_reg = (pipe == 0) ? MRST_DSPABASE : DSPBBASE;
++ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
++ u32 temp;
++ bool enabled;
++
++ PSB_DEBUG_ENTRY("mode = %d, pipe = %d \n", mode, pipe);
++
++ if (IS_MDFLD(dev))
++ return mdfld_crtc_dpms(crtc, mode);
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON))
++ return;
++
++ /* XXX: When our outputs are all unaware of DPMS modes other than off
++ * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
++ */
++ switch (mode) {
++ case DRM_MODE_DPMS_ON:
++ case DRM_MODE_DPMS_STANDBY:
++ case DRM_MODE_DPMS_SUSPEND:
++ /* Enable the DPLL */
++ temp = REG_READ(dpll_reg);
++ if ((temp & DPLL_VCO_ENABLE) == 0) {
++ REG_WRITE(dpll_reg, temp);
++ REG_READ(dpll_reg);
++ /* Wait for the clocks to stabilize. */
++ udelay(150);
++ REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
++ REG_READ(dpll_reg);
++ /* Wait for the clocks to stabilize. */
++ udelay(150);
++ REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
++ REG_READ(dpll_reg);
++ /* Wait for the clocks to stabilize. */
++ udelay(150);
++ }
++ if (dev_priv->iLVDS_enable || \
++ !dev_priv->dsi_plane_pipe_control) {
++ /* Enable the pipe */
++ temp = REG_READ(pipeconf_reg);
++ if ((temp & PIPEACONF_ENABLE) == 0)
++ REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
++
++ /* Enable the plane */
++ temp = REG_READ(dspcntr_reg);
++ if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
++ REG_WRITE(dspcntr_reg,
++ temp | DISPLAY_PLANE_ENABLE);
++ /* Flush the plane changes */
++ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
++ }
++ }
++
++ psb_intel_crtc_load_lut(crtc);
++
++ /* Give the overlay scaler a chance to enable
++ if it's on this pipe */
++ /* psb_intel_crtc_dpms_video(crtc, true); TODO */
++ break;
++ case DRM_MODE_DPMS_OFF:
++ /* Give the overlay scaler a chance to disable
++ * if it's on this pipe */
++ /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
++
++ /* Disable the VGA plane that we never use */
++ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
++ if (dev_priv->iLVDS_enable || \
++ !dev_priv->dsi_plane_pipe_control) {
++ /* Disable display plane */
++ temp = REG_READ(dspcntr_reg);
++ if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
++ REG_WRITE(dspcntr_reg,
++ temp & ~DISPLAY_PLANE_ENABLE);
++ /* Flush the plane changes */
++ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
++ REG_READ(dspbase_reg);
++ }
++
++ if (!IS_I9XX(dev)) {
++ /* Wait for vblank for the disable to take effect */
++ psb_intel_wait_for_vblank(dev);
++ }
++
++ /* Next, disable display pipes */
++ temp = REG_READ(pipeconf_reg);
++ if ((temp & PIPEACONF_ENABLE) != 0) {
++ REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
++ REG_READ(pipeconf_reg);
++ }
++
++ /* Wait for for the pipe disable to take effect. */
++ mrstWaitForPipeDisable(dev);
++ }
++
++ temp = REG_READ(dpll_reg);
++ if ((temp & DPLL_VCO_ENABLE) != 0) {
++ REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
++ REG_READ(dpll_reg);
++ }
++
++ /* Wait for the clocks to turn off. */
++ udelay(150);
++
++ break;
++ }
++
++#if DUMP_REGISTER
++ dump_dc_registers(dev);
++#endif /* DUMP_REGISTER */
++
++ enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
++
++#if 0 /* JB: Add vblank support later */
++ if (enabled)
++ dev_priv->vblank_pipe |= (1 << pipe);
++ else
++ dev_priv->vblank_pipe &= ~(1 << pipe);
++#endif
++
++#if 0 /* JB: Add sarea support later */
++ if (!dev->primary->master)
++ return;
++
++ master_priv = dev->primary->master->driver_priv;
++ if (!master_priv->sarea_priv)
++ return;
++
++ switch (pipe) {
++ case 0:
++ master_priv->sarea_priv->planeA_w =
++ enabled ? crtc->mode.hdisplay : 0;
++ master_priv->sarea_priv->planeA_h =
++ enabled ? crtc->mode.vdisplay : 0;
++ break;
++ case 1:
++ master_priv->sarea_priv->planeB_w =
++ enabled ? crtc->mode.hdisplay : 0;
++ master_priv->sarea_priv->planeB_h =
++ enabled ? crtc->mode.vdisplay : 0;
++ break;
++ default:
++ DRM_ERROR("Can't update pipe %d in SAREA\n", pipe);
++ break;
++ }
++#endif
++
++ /*Set FIFO Watermarks*/
++ REG_WRITE(DSPARB, 0x3FFF);
++ REG_WRITE(DSPFW1, 0x3F88080A);
++ REG_WRITE(DSPFW2, 0x0b060808);
++ REG_WRITE(DSPFW3, 0x0);
++ REG_WRITE(DSPFW4, 0x08030404);
++ REG_WRITE(DSPFW5, 0x04040404);
++ REG_WRITE(DSPFW6, 0x78);
++ REG_WRITE(0x70400, REG_READ(0x70400) | 0x4000);
++ /* Must write Bit 14 of the Chicken Bit Register */
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++}
++
++static int mrst_crtc_mode_set(struct drm_crtc *crtc,
++ struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode,
++ int x, int y,
++ struct drm_framebuffer *old_fb)
++{
++ struct drm_device *dev = crtc->dev;
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++ int pipe = psb_intel_crtc->pipe;
++ int fp_reg = (pipe == 0) ? MRST_FPA0 : FPB0;
++ int dpll_reg = (pipe == 0) ? MRST_DPLL_A : DPLL_B;
++ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
++ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
++ int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
++ int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
++ int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
++ int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
++ int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
++ int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
++ int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
++ int refclk = 0;
++ struct mrst_clock_t clock;
++ u32 dpll = 0, fp = 0, dspcntr, pipeconf, lvdsport;
++ bool ok, is_sdvo = false;
++ bool is_crt = false, is_lvds = false, is_tv = false;
++ bool is_mipi = false;
++ struct drm_mode_config *mode_config = &dev->mode_config;
++ struct psb_intel_output *psb_intel_output = NULL;
++ uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
++ struct drm_encoder *encoder;
++
++ PSB_DEBUG_ENTRY("pipe = 0x%x \n", pipe);
++
++ if (IS_MDFLD(dev))
++ return mdfld_crtc_mode_set(crtc, mode, adjusted_mode, x, y, old_fb);
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON))
++ return 0;
++
++ memcpy(&psb_intel_crtc->saved_mode,
++ mode,
++ sizeof(struct drm_display_mode));
++ memcpy(&psb_intel_crtc->saved_adjusted_mode,
++ adjusted_mode,
++ sizeof(struct drm_display_mode));
++
++ list_for_each_entry(encoder, &mode_config->encoder_list, head) {
++
++ if (encoder->crtc != crtc)
++ continue;
++
++ psb_intel_output = enc_to_psb_intel_output(encoder);
++ switch (psb_intel_output->type) {
++ case INTEL_OUTPUT_LVDS:
++ is_lvds = true;
++ break;
++ case INTEL_OUTPUT_SDVO:
++ is_sdvo = true;
++ break;
++ case INTEL_OUTPUT_TVOUT:
++ is_tv = true;
++ break;
++ case INTEL_OUTPUT_ANALOG:
++ is_crt = true;
++ break;
++ case INTEL_OUTPUT_MIPI:
++ is_mipi = true;
++ break;
++ }
++ }
++
++ if (is_lvds | is_mipi) {
++ /*FIXME JLIU7 Get panel power delay parameters from
++ config data */
++ /*REG_WRITE(0x61208, 0x25807d0); */
++ /*REG_WRITE(0x6120c, 0x1f407d0); */
++ /*REG_WRITE(0x61210, 0x270f04); */
++ }
++
++ /* Disable the VGA plane that we never use */
++ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
++
++ /* Disable the panel fitter if it was on our pipe */
++ if (psb_intel_panel_fitter_pipe(dev) == pipe)
++ REG_WRITE(PFIT_CONTROL, 0);
++
++ REG_WRITE(pipesrc_reg,
++ ((mode->crtc_hdisplay - 1) << 16) |
++ (mode->crtc_vdisplay - 1));
++
++ if (psb_intel_output)
++ drm_connector_property_get_value(&psb_intel_output->base,
++ dev->mode_config.scaling_mode_property, &scalingType);
++
++ if (scalingType == DRM_MODE_SCALE_NO_SCALE) {
++ /* Moorestown doesn't have register support for centering so
++ * we need to mess with the h/vblank and h/vsync start and
++ * ends to get centering */
++ int offsetX = 0, offsetY = 0;
++
++ offsetX = (adjusted_mode->crtc_hdisplay -
++ mode->crtc_hdisplay) / 2;
++ offsetY = (adjusted_mode->crtc_vdisplay -
++ mode->crtc_vdisplay) / 2;
++
++ REG_WRITE(htot_reg, (mode->crtc_hdisplay - 1) |
++ ((adjusted_mode->crtc_htotal - 1) << 16));
++ REG_WRITE(vtot_reg, (mode->crtc_vdisplay - 1) |
++ ((adjusted_mode->crtc_vtotal - 1) << 16));
++ REG_WRITE(hblank_reg,
++ (adjusted_mode->crtc_hblank_start - offsetX - 1) |
++ ((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16));
++ REG_WRITE(hsync_reg,
++ (adjusted_mode->crtc_hsync_start - offsetX - 1) |
++ ((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16));
++ REG_WRITE(vblank_reg,
++ (adjusted_mode->crtc_vblank_start - offsetY - 1) |
++ ((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16));
++ REG_WRITE(vsync_reg,
++ (adjusted_mode->crtc_vsync_start - offsetY - 1) |
++ ((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16));
++ } else {
++ REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
++ ((adjusted_mode->crtc_htotal - 1) << 16));
++ REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
++ ((adjusted_mode->crtc_vtotal - 1) << 16));
++ REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
++ ((adjusted_mode->crtc_hblank_end - 1) << 16));
++ REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
++ ((adjusted_mode->crtc_hsync_end - 1) << 16));
++ REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
++ ((adjusted_mode->crtc_vblank_end - 1) << 16));
++ REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
++ ((adjusted_mode->crtc_vsync_end - 1) << 16));
++ }
++
++ /* Flush the plane changes */
++ {
++ struct drm_crtc_helper_funcs *crtc_funcs =
++ crtc->helper_private;
++ crtc_funcs->mode_set_base(crtc, x, y, old_fb);
++ }
++
++ /* setup pipeconf */
++ pipeconf = REG_READ(pipeconf_reg);
++
++ /* Set up the display plane register */
++ dspcntr = REG_READ(dspcntr_reg);
++ dspcntr |= DISPPLANE_GAMMA_ENABLE;
++
++ if (pipe == 0)
++ dspcntr |= DISPPLANE_SEL_PIPE_A;
++ else
++ dspcntr |= DISPPLANE_SEL_PIPE_B;
++
++ dev_priv->dspcntr = dspcntr |= DISPLAY_PLANE_ENABLE;
++ dev_priv->pipeconf = pipeconf |= PIPEACONF_ENABLE;
++
++ if (is_mipi)
++ goto mrst_crtc_mode_set_exit;
++
++ if (dev_priv->sku_100L)
++ refclk = 100000;
++ else if (dev_priv->sku_83)
++ refclk = 166000;
++ else if (dev_priv->sku_100)
++ refclk = 200000;
++
++ dpll = 0; /*BIT16 = 0 for 100MHz reference */
++
++ ok = mrstFindBestPLL(crtc, adjusted_mode->clock, refclk, &clock);
++
++ if (!ok) {
++ PSB_DEBUG_ENTRY("mrstFindBestPLL fail in mrst_crtc_mode_set. \n");
++ } else {
++ PSB_DEBUG_ENTRY("mrst_crtc_mode_set pixel clock = %d,"
++ "m = %x, p1 = %x. \n", clock.dot, clock.m,
++ clock.p1);
++ }
++
++ fp = mrst_m_converts[(clock.m - MRST_M_MIN)] << 8;
++
++ dpll |= DPLL_VGA_MODE_DIS;
++
++
++ dpll |= DPLL_VCO_ENABLE;
++
++ if (is_lvds)
++ dpll |= DPLLA_MODE_LVDS;
++ else
++ dpll |= DPLLB_MODE_DAC_SERIAL;
++
++ if (is_sdvo) {
++ int sdvo_pixel_multiply =
++ adjusted_mode->clock / mode->clock;
++
++ dpll |= DPLL_DVO_HIGH_SPEED;
++ dpll |=
++ (sdvo_pixel_multiply -
++ 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
++ }
++
++
++ /* compute bitmask from p1 value */
++ dpll |= (1 << (clock.p1 - 2)) << 17;
++
++ dpll |= DPLL_VCO_ENABLE;
++
++ mrstPrintPll("chosen", &clock);
++
++#if 0
++ if (!xf86ModesEqual(mode, adjusted_mode)) {
++ xf86DrvMsg(pScrn->scrnIndex, X_INFO,
++ "Adjusted mode for pipe %c:\n",
++ pipe == 0 ? 'A' : 'B');
++ xf86PrintModeline(pScrn->scrnIndex, mode);
++ }
++ i830PrintPll("chosen", &clock);
++#endif
++
++ if (dpll & DPLL_VCO_ENABLE) {
++ REG_WRITE(fp_reg, fp);
++ REG_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
++ REG_READ(dpll_reg);
++/* FIXME jliu7 check the DPLLA lock bit PIPEACONF[29] */
++ udelay(150);
++ }
++
++ /* The LVDS pin pair needs to be on before the DPLLs are enabled.
++ * This is an exception to the general rule that mode_set doesn't turn
++ * things on.
++ */
++ if (is_lvds) {
++
++ /*lvdsport = 0x803003c0;*/
++ /*lvdsport = 0x813003c0;*/
++ lvdsport = dev_priv->gct_data.Panel_Port_Control;
++
++ /*REG_WRITE(LVDS, lvdsport); */
++ }
++
++ REG_WRITE(fp_reg, fp);
++ REG_WRITE(dpll_reg, dpll);
++ REG_READ(dpll_reg);
++ /* Wait for the clocks to stabilize. */
++ udelay(150);
++
++ /* write it again -- the BIOS does, after all */
++ REG_WRITE(dpll_reg, dpll);
++ REG_READ(dpll_reg);
++ /* Wait for the clocks to stabilize. */
++ udelay(150);
++
++ REG_WRITE(pipeconf_reg, pipeconf);
++ REG_READ(pipeconf_reg);
++
++ /* Wait for for the pipe enable to take effect. */
++ mrstWaitForPipeEnable(dev);
++
++ REG_WRITE(dspcntr_reg, dspcntr);
++ psb_intel_wait_for_vblank(dev);
++
++mrst_crtc_mode_set_exit:
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++
++ return 0;
++}
++
++
++static const struct drm_crtc_helper_funcs mrst_helper_funcs = {
++ .dpms = mrst_crtc_dpms,
++ .mode_fixup = psb_intel_crtc_mode_fixup,
++ .mode_set = mrst_crtc_mode_set,
++ .mode_set_base = psb_intel_pipe_set_base,
++ .prepare = psb_intel_crtc_prepare,
++ .commit = psb_intel_crtc_commit,
++};
++
++/* MRST_PLATFORM end */
++
++#include "psb_intel_display2.c"
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_intel_display.h
+@@ -0,0 +1,25 @@
++/* copyright (c) 2008, Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * Eric Anholt <eric@anholt.net>
++ */
++
++#ifndef _INTEL_DISPLAY_H_
++#define _INTEL_DISPLAY_H_
++
++bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type);
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_intel_display2.c
+@@ -0,0 +1,1304 @@
++/*
++ * Copyright © 2006-2007 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ * Eric Anholt <eric@anholt.net>
++ */
++
++#include "mdfld_dsi_dbi.h"
++#ifdef CONFIG_MDFLD_DSI_DPU
++#include "mdfld_dsi_dbi_dpu.h"
++#endif
++
++/* MDFLD_PLATFORM start */
++void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe)
++{
++ int count, temp;
++ u32 pipeconf_reg = PIPEACONF;
++
++ switch (pipe) {
++ case 0:
++ break;
++ case 1:
++ pipeconf_reg = PIPEBCONF;
++ break;
++ case 2:
++ pipeconf_reg = PIPECCONF;
++ break;
++#if MDFLD_WLD_JLIU7
++ case 3:
++#endif /* MDFLD_WLD_JLIU7 */
++ default:
++ DRM_ERROR("Illegal Pipe Number. \n");
++ return;
++ }
++
++ /* FIXME JLIU7_PO */
++ psb_intel_wait_for_vblank(dev);
++ return;
++
++ /* Wait for for the pipe disable to take effect. */
++ for (count = 0; count < COUNT_MAX; count++) {
++ temp = REG_READ(pipeconf_reg);
++ if ((temp & PIPEACONF_PIPE_STATE) == 0)
++ break;
++ }
++
++ PSB_DEBUG_ENTRY("cout = %d. \n", count);
++}
++
++void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe)
++{
++ int count, temp;
++ u32 pipeconf_reg = PIPEACONF;
++
++ switch (pipe) {
++ case 0:
++ break;
++ case 1:
++ pipeconf_reg = PIPEBCONF;
++ break;
++ case 2:
++ pipeconf_reg = PIPECCONF;
++ break;
++#if MDFLD_WLD_JLIU7
++ case 3:
++#endif /* MDFLD_WLD_JLIU7 */
++ default:
++ DRM_ERROR("Illegal Pipe Number. \n");
++ return;
++ }
++
++ /* FIXME JLIU7_PO */
++ psb_intel_wait_for_vblank(dev);
++ return;
++
++ /* Wait for for the pipe enable to take effect. */
++ for (count = 0; count < COUNT_MAX; count++) {
++ temp = REG_READ(pipeconf_reg);
++ if ((temp & PIPEACONF_PIPE_STATE) == 1)
++ break;
++ }
++
++ PSB_DEBUG_ENTRY("cout = %d. \n", count);
++}
++
++
++static int mdfld_intel_crtc_cursor_set(struct drm_crtc *crtc,
++ struct drm_file *file_priv,
++ uint32_t handle,
++ uint32_t width, uint32_t height)
++{
++ struct drm_device *dev = crtc->dev;
++ struct drm_psb_private * dev_priv = (struct drm_psb_private *)dev->dev_private;
++ struct psb_gtt * pg = dev_priv->pg;
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ struct psb_intel_mode_device *mode_dev = psb_intel_crtc->mode_dev;
++ int pipe = psb_intel_crtc->pipe;
++ uint32_t control = CURACNTR;
++ uint32_t base = CURABASE;
++ uint32_t temp;
++ size_t addr = 0;
++ uint32_t page_offset;
++ size_t size;
++ void *bo;
++ int ret;
++#if MDFLD_JLIU7_DPU
++ uint32_t *cursor_addr = &dev_priv->cursor_addr_0;
++ uint32_t *cursor_cntr = &dev_priv->cursor_cntr_0;
++#endif /* MDFLD_JLIU7_DPU */
++
++ DRM_DEBUG("\n");
++
++ switch (pipe) {
++ case 0:
++ break;
++ case 1:
++ control = CURBCNTR;
++ base = CURBBASE;
++ break;
++ case 2:
++ control = CURCCNTR;
++ base = CURCBASE;
++#if MDFLD_JLIU7_DPU
++ cursor_addr = &dev_priv->cursor_addr_2;
++ cursor_cntr = &dev_priv->cursor_cntr_2;
++#endif /* MDFLD_JLIU7_DPU */
++ break;
++#if MDFLD_WLD_JLIU7
++ case 3:
++ break;
++#endif /* MDFLD_WLD_JLIU7 */
++ default:
++ DRM_ERROR("Illegal Pipe Number. \n");
++ return -EINVAL;
++ }
++
++#if 1 // MDFLD_HDMI_JLIU7_HACKS2 /* Debug HDMI - Can't enalbe HDMI */
++ if (pipe != 0)
++ return 0;
++#endif /* MDFLD_HDMI_JLIU7_HACKS2 */ /* Debug HDMI - Can't enalbe HDMI */
++ /* if we want to turn of the cursor ignore width and height */
++ if (!handle) {
++ DRM_DEBUG("cursor off\n");
++ /* turn off the cursor */
++ temp = 0;
++ temp |= CURSOR_MODE_DISABLE;
++
++#if MDFLD_JLIU7_DPU
++ *cursor_cntr = temp;
++#endif /* MDFLD_JLIU7_DPU */
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_ONLY_IF_ON)) {
++ REG_WRITE(control, temp);
++ REG_WRITE(base, 0);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++
++ /* unpin the old bo */
++ if (psb_intel_crtc->cursor_bo) {
++ mode_dev->bo_unpin_for_scanout(dev,
++ psb_intel_crtc->
++ cursor_bo);
++ psb_intel_crtc->cursor_bo = NULL;
++ }
++
++ return 0;
++ }
++
++ /* Currently we only support 64x64 cursors */
++ if (width != 64 || height != 64) {
++ DRM_ERROR("we currently only support 64x64 cursors\n");
++ return -EINVAL;
++ }
++
++ bo = mode_dev->bo_from_handle(dev, file_priv, handle);
++ if (!bo)
++ return -ENOENT;
++
++ ret = mode_dev->bo_pin_for_scanout(dev, bo);
++ if (ret)
++ return ret;
++ size = mode_dev->bo_size(dev, bo);
++ if (size < width * height * 4) {
++ DRM_ERROR("buffer is to small\n");
++ return -ENOMEM;
++ }
++
++ /*insert this bo into gtt*/
++// DRM_INFO("%s: map meminfo for hw cursor. handle %x, pipe = %d \n", __FUNCTION__, handle, pipe);
++
++ ret = psb_gtt_map_meminfo(dev, (IMG_HANDLE)handle, &page_offset);
++ if(ret) {
++ DRM_ERROR("Can not map meminfo to GTT. handle 0x%x\n", handle);
++ return ret;
++ }
++
++ addr = page_offset << PAGE_SHIFT;
++
++ if(IS_POULSBO(dev)) {
++ addr += pg->stolen_base;
++ }
++
++ psb_intel_crtc->cursor_addr = addr;
++
++ temp = 0;
++ /* set the pipe for the cursor */
++ temp |= (pipe << 28);
++ temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
++
++#if MDFLD_JLIU7_DPU
++ *cursor_cntr = temp;
++ *cursor_addr = addr;
++#endif /* MDFLD_JLIU7_DPU */
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON)) {
++ REG_WRITE(control, temp);
++ REG_WRITE(base, addr);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++
++ /* unpin the old bo */
++ if (psb_intel_crtc->cursor_bo && psb_intel_crtc->cursor_bo != bo) {
++ mode_dev->bo_unpin_for_scanout(dev, psb_intel_crtc->cursor_bo);
++ psb_intel_crtc->cursor_bo = bo;
++ }
++
++ return 0;
++}
++
++static int mdfld_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
++{
++ struct drm_device *dev = crtc->dev;
++#ifndef CONFIG_MDFLD_DSI_DPU
++#if MDFLD_JLIU7_DSR
++ struct drm_psb_private * dev_priv = (struct drm_psb_private *)dev->dev_private;
++#endif /* MDFLD_JLIU7_DSR */
++#else
++ struct psb_drm_dpu_rect rect;
++#endif
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ int pipe = psb_intel_crtc->pipe;
++ uint32_t pos = CURAPOS;
++ uint32_t base = CURABASE;
++ uint32_t temp = 0;
++ uint32_t addr;
++
++ switch (pipe) {
++ case 0:
++#ifndef CONFIG_MDFLD_DSI_DPU
++#if MDFLD_JLIU7_DSR
++#if MDFLD_JLIU7_DPU
++ dev_priv->cursor_0_x0 = x;
++ dev_priv->cursor_0_y0 = y;
++#endif /* MDFLD_JLIU7_DPU */
++ if (!(dev_priv->dsr_fb_update & MDFLD_DSR_CURSOR_0))
++ mdfld_dsi_dbi_exit_dsr (dev, MDFLD_DSR_CURSOR_0);
++#endif /* MDFLD_JLIU7_DSR */
++#else /*CONFIG_MDFLD_DSI_DPU*/
++ rect.x = x;
++ rect.y = y;
++
++ mdfld_dbi_dpu_report_damage(dev, MDFLD_CURSORA, &rect);
++ mdfld_dpu_exit_dsr(dev);
++#endif
++ break;
++ case 1:
++ pos = CURBPOS;
++ base = CURBBASE;
++ break;
++ case 2:
++#ifndef CONFIG_MDFLD_DSI_DPU
++#if MDFLD_JLIU7_DSR
++#if MDFLD_JLIU7_DPU
++ dev_priv->cursor_2_x0 = x;
++ dev_priv->cursor_2_y0 = y;
++#endif /* MDFLD_JLIU7_DPU */
++ if (!(dev_priv->dsr_fb_update & MDFLD_DSR_CURSOR_2))
++ mdfld_dsi_dbi_exit_dsr (dev, MDFLD_DSR_CURSOR_2);
++#endif /* MDFLD_JLIU7_DSR */
++
++#else /*CONFIG_MDFLD_DSI_DPU*/
++ mdfld_dbi_dpu_report_damage(dev, MDFLD_CURSORC, &rect);
++ mdfld_dpu_exit_dsr(dev);
++#endif
++ pos = CURCPOS;
++ base = CURCBASE;
++ break;
++#if MDFLD_WLD_JLIU7
++ case 3:
++ break;
++#endif /* MDFLD_WLD_JLIU7 */
++ default:
++ DRM_ERROR("Illegal Pipe Number. \n");
++ return -EINVAL;
++ }
++
++#if MDFLD_JLIU7_DPU
++ if (pipe != 1)
++ return 0;
++#endif /* MDFLD_JLIU7_DPU */
++#if 1 // MDFLD_HDMI_JLIU7_HACKS2 /* Debug HDMI - Can't enalbe HDMI */
++ if (pipe != 0)
++ return 0;
++#endif /* MDFLD_HDMI_JLIU7_HACKS2 */ /* Debug HDMI - Can't enalbe HDMI */
++ if (x < 0) {
++ temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
++ x = -x;
++ }
++ if (y < 0) {
++ temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
++ y = -y;
++ }
++
++ temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
++ temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
++
++ addr = psb_intel_crtc->cursor_addr;
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_ONLY_IF_ON)) {
++ REG_WRITE(pos, temp);
++ REG_WRITE(base, addr);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++
++ return 0;
++}
++
++int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb)
++{
++ struct drm_device *dev = crtc->dev;
++ /* struct drm_i915_master_private *master_priv; */
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
++ struct psb_intel_mode_device *mode_dev = psb_intel_crtc->mode_dev;
++ int pipe = psb_intel_crtc->pipe;
++ unsigned long Start, Offset;
++ int dspbase = DSPABASE;
++ int dspsurf = DSPASURF;
++ int dspstride = DSPASTRIDE;
++ int dspcntr_reg = DSPACNTR;
++ u32 dspcntr;
++ int ret = 0;
++#if MDFLD_JLIU7_DPU
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++ u32 *p_offset = &dev_priv->offset_0;
++ u32 *p_bpp = &dev_priv->bpp_0;
++#endif /* MDFLD_JLIU7_DPU */
++
++ PSB_DEBUG_ENTRY("pipe = 0x%x. \n", pipe);
++
++ /* no fb bound */
++ if (!crtc->fb) {
++ PSB_DEBUG_ENTRY("No FB bound\n");
++ return 0;
++ }
++
++ switch (pipe) {
++ case 0:
++ if (IS_MID(dev))
++ dspbase = MRST_DSPABASE;
++ break;
++ case 1:
++ dspbase = DSPBBASE;
++ dspsurf = DSPBSURF;
++ dspstride = DSPBSTRIDE;
++ dspcntr_reg = DSPBCNTR;
++ if (IS_MDFLD(dev))
++ dspbase = MRST_DSPBBASE;
++ break;
++ case 2:
++ dspbase = DSPCBASE;
++ dspsurf = DSPCSURF;
++ dspstride = DSPCSTRIDE;
++ dspcntr_reg = DSPCCNTR;
++#if MDFLD_JLIU7_DPU
++ p_offset = &dev_priv->offset_2;
++ p_bpp = &dev_priv->bpp_2;
++#endif /* MDFLD_JLIU7_DPU */
++ break;
++#if MDFLD_WLD_JLIU7
++ case 3:
++ break;
++#endif /* MDFLD_WLD_JLIU7 */
++ default:
++ DRM_ERROR("Illegal Pipe Number. \n");
++ return -EINVAL;
++ }
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON))
++ return 0;
++
++ Start = mode_dev->bo_offset(dev, psbfb);
++ Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
++#if MDFLD_JLIU7_DPU
++ *p_offset = Offset;
++ *p_bpp = crtc->fb->bits_per_pixel / 8;
++#endif /* MDFLD_JLIU7_DPU */
++
++ REG_WRITE(dspstride, crtc->fb->pitch);
++ dspcntr = REG_READ(dspcntr_reg);
++ dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
++
++ switch (crtc->fb->bits_per_pixel) {
++ case 8:
++ dspcntr |= DISPPLANE_8BPP;
++ break;
++ case 16:
++ if (crtc->fb->depth == 15)
++ dspcntr |= DISPPLANE_15_16BPP;
++ else
++ dspcntr |= DISPPLANE_16BPP;
++ break;
++ case 24:
++ case 32:
++ dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
++ break;
++ default:
++ DRM_ERROR("Unknown color depth\n");
++ ret = -EINVAL;
++ goto psb_intel_pipe_set_base_exit;
++ }
++ REG_WRITE(dspcntr_reg, dspcntr);
++
++ PSB_DEBUG_ENTRY("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y);
++
++ if (IS_I965G(dev) || IS_MID(dev)) {
++ REG_WRITE(dspbase, Offset);
++ REG_READ(dspbase);
++ REG_WRITE(dspsurf, Start);
++ REG_READ(dspsurf);
++ } else {
++ REG_WRITE(dspbase, Start + Offset);
++ REG_READ(dspbase);
++ }
++
++psb_intel_pipe_set_base_exit:
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++
++ return ret;
++}
++
++/**
++ * Sets the power management mode of the pipe and plane.
++ *
++ * This code should probably grow support for turning the cursor off and back
++ * on appropriately at the same time as we're turning the pipe off/on.
++ */
++static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
++{
++ struct drm_device *dev = crtc->dev;
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ int pipe = psb_intel_crtc->pipe;
++ int dpll_reg = MRST_DPLL_A;
++ int dspcntr_reg = DSPACNTR;
++ int dspbase_reg = MRST_DSPABASE;
++ int pipeconf_reg = PIPEACONF;
++ u32 gen_fifo_stat_reg = GEN_FIFO_STAT_REG;
++ u32 pipeconf = dev_priv->pipeconf;
++ u32 dspcntr = dev_priv->dspcntr;
++ u32 temp;
++ bool enabled;
++ int timeout = 0;
++
++ PSB_DEBUG_ENTRY("mode = %d, pipe = %d \n", mode, pipe);
++
++/* FIXME_JLIU7 MDFLD_PO replaced w/ the following function */
++/* mdfld_dbi_dpms (struct drm_device *dev, int pipe, bool enabled) */
++
++ switch (pipe) {
++ case 0:
++ break;
++ case 1:
++ dpll_reg = DPLL_B;
++ dspcntr_reg = DSPBCNTR;
++ dspbase_reg = MRST_DSPBBASE;
++ pipeconf_reg = PIPEBCONF;
++ pipeconf = dev_priv->pipeconf1;
++ dspcntr = dev_priv->dspcntr1;
++ if (IS_MDFLD(dev))
++ dpll_reg = MDFLD_DPLL_B;
++ break;
++ case 2:
++ dpll_reg = MRST_DPLL_A;
++ dspcntr_reg = DSPCCNTR;
++ dspbase_reg = MDFLD_DSPCBASE;
++ pipeconf_reg = PIPECCONF;
++ pipeconf = dev_priv->pipeconf2;
++ dspcntr = dev_priv->dspcntr2;
++ gen_fifo_stat_reg = GEN_FIFO_STAT_REG + MIPIC_REG_OFFSET;
++ break;
++#if MDFLD_WLD_JLIU7
++ case 3:
++ break;
++#endif /* MDFLD_WLD_JLIU7 */
++ default:
++ DRM_ERROR("Illegal Pipe Number. \n");
++ return;
++ }
++
++#if (DSI_TPO_864x480 || DSI_TPO_864x480_2) /* get from spec. */
++ switch (mode) {
++ case DRM_MODE_DPMS_OFF:
++ return;
++ }
++#endif //HACKS_JLIU7
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON))
++ return;
++
++ /* XXX: When our outputs are all unaware of DPMS modes other than off
++ * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
++ */
++ switch (mode) {
++ case DRM_MODE_DPMS_ON:
++ case DRM_MODE_DPMS_STANDBY:
++ case DRM_MODE_DPMS_SUSPEND:
++ /* Enable the DPLL */
++ temp = REG_READ(dpll_reg);
++
++ if ((temp & DPLL_VCO_ENABLE) == 0) {
++ /* When ungating power of DPLL, needs to wait 0.5us before enable the VCO */
++ if (temp & MDFLD_PWR_GATE_EN) {
++ temp &= ~MDFLD_PWR_GATE_EN;
++ REG_WRITE(dpll_reg, temp);
++ /* FIXME_MDFLD PO - change 500 to 1 after PO */
++ udelay(500);
++ }
++
++ REG_WRITE(dpll_reg, temp);
++ REG_READ(dpll_reg);
++ /* FIXME_MDFLD PO - change 500 to 1 after PO */
++ udelay(500);
++
++ REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
++ REG_READ(dpll_reg);
++
++ /* wait for DSI PLL to lock */
++ while ((timeout < 20000) && !(REG_READ(pipeconf_reg) & PIPECONF_DSIPLL_LOCK)) {
++ udelay(150);
++ timeout ++;
++ }
++ }
++
++ /* Enable the pipe */
++ temp = REG_READ(pipeconf_reg);
++ if ((temp & PIPEACONF_ENABLE) == 0) {
++ REG_WRITE(pipeconf_reg, pipeconf);
++
++ /* Wait for for the pipe enable to take effect. */
++ mdfldWaitForPipeEnable(dev, pipe);
++ }
++
++ /* Enable the plane */
++ temp = REG_READ(dspcntr_reg);
++ if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
++ REG_WRITE(dspcntr_reg,
++ temp | DISPLAY_PLANE_ENABLE);
++ /* Flush the plane changes */
++ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
++ }
++
++ psb_intel_crtc_load_lut(crtc);
++
++ /* Give the overlay scaler a chance to enable
++ if it's on this pipe */
++ /* psb_intel_crtc_dpms_video(crtc, true); TODO */
++ break;
++ case DRM_MODE_DPMS_OFF:
++ /* Give the overlay scaler a chance to disable
++ * if it's on this pipe */
++ /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
++
++ if (pipe != 1)
++ mdfld_dsi_gen_fifo_ready (dev, gen_fifo_stat_reg, HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
++
++ /* Disable the VGA plane that we never use */
++ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
++
++ /* Disable display plane */
++ temp = REG_READ(dspcntr_reg);
++ if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
++ REG_WRITE(dspcntr_reg,
++ temp & ~DISPLAY_PLANE_ENABLE);
++ /* Flush the plane changes */
++ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
++ REG_READ(dspbase_reg);
++ }
++
++ /* FIXME_JLIU7 MDFLD_PO revisit */
++ /* Wait for vblank for the disable to take effect */
++// MDFLD_PO_JLIU7 psb_intel_wait_for_vblank(dev);
++
++ /* Next, disable display pipes */
++ temp = REG_READ(pipeconf_reg);
++ if ((temp & PIPEACONF_ENABLE) != 0) {
++ temp &= ~PIPEACONF_ENABLE;
++ temp |= PIPECONF_PLANE_OFF | PIPECONF_CURSOR_OFF;
++ REG_WRITE(pipeconf_reg, temp);
++// REG_WRITE(pipeconf_reg, 0);
++ REG_READ(pipeconf_reg);
++
++ /* Wait for for the pipe disable to take effect. */
++ mdfldWaitForPipeDisable(dev, pipe);
++ }
++
++ temp = REG_READ(dpll_reg);
++ if (temp & DPLL_VCO_ENABLE) {
++ if (((pipe != 1) && !((REG_READ(PIPEACONF) | REG_READ(PIPECCONF)) & PIPEACONF_ENABLE))
++ || (pipe == 1)){
++ temp &= ~(DPLL_VCO_ENABLE);
++ REG_WRITE(dpll_reg, temp);
++ REG_READ(dpll_reg);
++ /* Wait for the clocks to turn off. */
++ /* FIXME_MDFLD PO may need more delay */
++ udelay(500);
++ }
++ }
++
++#if 0 /* MDFLD_PO_JLIU7 */
++ if (!(temp & MDFLD_PWR_GATE_EN)) {
++ /* gating power of DPLL */
++ REG_WRITE(dpll_reg, temp | MDFLD_PWR_GATE_EN);
++ /* FIXME_MDFLD PO - change 500 to 1 after PO */
++ udelay(5000);
++ }
++#endif /* MDFLD_PO_JLIU7 */
++ break;
++ }
++
++#if DUMP_REGISTER
++ dump_dc_registers(dev);
++#endif /* DUMP_REGISTER */
++
++ enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
++
++#if 0 /* JB: Add vblank support later */
++ if (enabled)
++ dev_priv->vblank_pipe |= (1 << pipe);
++ else
++ dev_priv->vblank_pipe &= ~(1 << pipe);
++#endif
++
++#if 0 /* JB: Add sarea support later */
++ if (!dev->primary->master)
++ return;
++
++ master_priv = dev->primary->master->driver_priv;
++ if (!master_priv->sarea_priv)
++ return;
++
++ switch (pipe) {
++ case 0:
++ master_priv->sarea_priv->planeA_w =
++ enabled ? crtc->mode.hdisplay : 0;
++ master_priv->sarea_priv->planeA_h =
++ enabled ? crtc->mode.vdisplay : 0;
++ break;
++ case 1:
++ master_priv->sarea_priv->planeB_w =
++ enabled ? crtc->mode.hdisplay : 0;
++ master_priv->sarea_priv->planeB_h =
++ enabled ? crtc->mode.vdisplay : 0;
++ break;
++ default:
++ DRM_ERROR("Can't update pipe %d in SAREA\n", pipe);
++ break;
++ }
++#endif
++
++#if MDFLD_PO_WATERMARK
++ /*Set FIFO Watermarks*/
++ REG_WRITE(DSPARB, 0x3FFF);
++ REG_WRITE(DSPFW1, 0x3F88080A);
++ REG_WRITE(DSPFW2, 0x0b060808);
++ REG_WRITE(DSPFW3, 0x0);
++ REG_WRITE(DSPFW4, 0x08030404);
++ REG_WRITE(DSPFW5, 0x04040404);
++ REG_WRITE(DSPFW6, 0x78);
++ REG_WRITE(0x70400, REG_READ(0x70400) | 0x4000);
++ /* Must write Bit 14 of the Chicken Bit Register */
++#endif /* MDFLD_PO_WATERMARK */
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++}
++
++
++#define MDFLD_LIMT_DPLL_19 0
++#define MDFLD_LIMT_DPLL_25 1
++#define MDFLD_LIMT_DPLL_83 2
++#define MDFLD_LIMT_DPLL_100 3
++#define MDFLD_LIMT_DSIPLL_19 4
++#define MDFLD_LIMT_DSIPLL_25 5
++#define MDFLD_LIMT_DSIPLL_83 6
++#define MDFLD_LIMT_DSIPLL_100 7
++
++#define MDFLD_DOT_MIN 19750 /* FIXME_MDFLD JLIU7 need to find out min & max for MDFLD */
++#define MDFLD_DOT_MAX 120000
++#define MDFLD_DPLL_M_MIN_19 113
++#define MDFLD_DPLL_M_MAX_19 155
++#define MDFLD_DPLL_P1_MIN_19 2
++#define MDFLD_DPLL_P1_MAX_19 10
++#define MDFLD_DPLL_M_MIN_25 101
++#define MDFLD_DPLL_M_MAX_25 130
++#define MDFLD_DPLL_P1_MIN_25 2
++#define MDFLD_DPLL_P1_MAX_25 10
++#define MDFLD_DPLL_M_MIN_83 64
++#define MDFLD_DPLL_M_MAX_83 64
++#define MDFLD_DPLL_P1_MIN_83 2
++#define MDFLD_DPLL_P1_MAX_83 2
++#define MDFLD_DPLL_M_MIN_100 64
++#define MDFLD_DPLL_M_MAX_100 64
++#define MDFLD_DPLL_P1_MIN_100 2
++#define MDFLD_DPLL_P1_MAX_100 2
++#define MDFLD_DSIPLL_M_MIN_19 131
++#define MDFLD_DSIPLL_M_MAX_19 175
++#define MDFLD_DSIPLL_P1_MIN_19 3
++#define MDFLD_DSIPLL_P1_MAX_19 8
++#define MDFLD_DSIPLL_M_MIN_25 97
++#define MDFLD_DSIPLL_M_MAX_25 140
++#define MDFLD_DSIPLL_P1_MIN_25 3
++#define MDFLD_DSIPLL_P1_MAX_25 9
++#define MDFLD_DSIPLL_M_MIN_83 33
++#define MDFLD_DSIPLL_M_MAX_83 92
++#define MDFLD_DSIPLL_P1_MIN_83 2
++#define MDFLD_DSIPLL_P1_MAX_83 3
++#define MDFLD_DSIPLL_M_MIN_100 97
++#define MDFLD_DSIPLL_M_MAX_100 140
++#define MDFLD_DSIPLL_P1_MIN_100 3
++#define MDFLD_DSIPLL_P1_MAX_100 9
++
++static const struct mrst_limit_t mdfld_limits[] = {
++ { /* MDFLD_LIMT_DPLL_19 */
++ .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
++ .m = {.min = MDFLD_DPLL_M_MIN_19, .max = MDFLD_DPLL_M_MAX_19},
++ .p1 = {.min = MDFLD_DPLL_P1_MIN_19, .max = MDFLD_DPLL_P1_MAX_19},
++ },
++ { /* MDFLD_LIMT_DPLL_25 */
++ .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
++ .m = {.min = MDFLD_DPLL_M_MIN_25, .max = MDFLD_DPLL_M_MAX_25},
++ .p1 = {.min = MDFLD_DPLL_P1_MIN_25, .max = MDFLD_DPLL_P1_MAX_25},
++ },
++ { /* MDFLD_LIMT_DPLL_83 */
++ .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
++ .m = {.min = MDFLD_DPLL_M_MIN_83, .max = MDFLD_DPLL_M_MAX_83},
++ .p1 = {.min = MDFLD_DPLL_P1_MIN_83, .max = MDFLD_DPLL_P1_MAX_83},
++ },
++ { /* MDFLD_LIMT_DPLL_100 */
++ .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
++ .m = {.min = MDFLD_DPLL_M_MIN_100, .max = MDFLD_DPLL_M_MAX_100},
++ .p1 = {.min = MDFLD_DPLL_P1_MIN_100, .max = MDFLD_DPLL_P1_MAX_100},
++ },
++ { /* MDFLD_LIMT_DSIPLL_19 */
++ .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
++ .m = {.min = MDFLD_DSIPLL_M_MIN_19, .max = MDFLD_DSIPLL_M_MAX_19},
++ .p1 = {.min = MDFLD_DSIPLL_P1_MIN_19, .max = MDFLD_DSIPLL_P1_MAX_19},
++ },
++ { /* MDFLD_LIMT_DSIPLL_25 */
++ .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
++ .m = {.min = MDFLD_DSIPLL_M_MIN_25, .max = MDFLD_DSIPLL_M_MAX_25},
++ .p1 = {.min = MDFLD_DSIPLL_P1_MIN_25, .max = MDFLD_DSIPLL_P1_MAX_25},
++ },
++ { /* MDFLD_LIMT_DSIPLL_83 */
++ .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
++ .m = {.min = MDFLD_DSIPLL_M_MIN_83, .max = MDFLD_DSIPLL_M_MAX_83},
++ .p1 = {.min = MDFLD_DSIPLL_P1_MIN_83, .max = MDFLD_DSIPLL_P1_MAX_83},
++ },
++ { /* MDFLD_LIMT_DSIPLL_100 */
++ .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
++ .m = {.min = MDFLD_DSIPLL_M_MIN_100, .max = MDFLD_DSIPLL_M_MAX_100},
++ .p1 = {.min = MDFLD_DSIPLL_P1_MIN_100, .max = MDFLD_DSIPLL_P1_MAX_100},
++ },
++};
++
++#define MDFLD_M_MIN 21
++#define MDFLD_M_MAX 180
++static const u32 mdfld_m_converts[] = {
++/* M configuration table from 9-bit LFSR table */
++ 224, 368, 440, 220, 366, 439, 219, 365, 182, 347, /* 21 - 30 */
++ 173, 342, 171, 85, 298, 149, 74, 37, 18, 265, /* 31 - 40 */
++ 388, 194, 353, 432, 216, 108, 310, 155, 333, 166, /* 41 - 50 */
++ 83, 41, 276, 138, 325, 162, 337, 168, 340, 170, /* 51 - 60 */
++ 341, 426, 469, 234, 373, 442, 221, 110, 311, 411, /* 61 - 70 */
++ 461, 486, 243, 377, 188, 350, 175, 343, 427, 213, /* 71 - 80 */
++ 106, 53, 282, 397, 354, 227, 113, 56, 284, 142, /* 81 - 90 */
++ 71, 35, 273, 136, 324, 418, 465, 488, 500, 506, /* 91 - 100 */
++ 253, 126, 63, 287, 399, 455, 483, 241, 376, 444, /* 101 - 110 */
++ 478, 495, 503, 251, 381, 446, 479, 239, 375, 443, /* 111 - 120 */
++ 477, 238, 119, 315, 157, 78, 295, 147, 329, 420, /* 121 - 130 */
++ 210, 105, 308, 154, 77, 38, 275, 137, 68, 290, /* 131 - 140 */
++ 145, 328, 164, 82, 297, 404, 458, 485, 498, 249, /* 141 - 150 */
++ 380, 190, 351, 431, 471, 235, 117, 314, 413, 206, /* 151 - 160 */
++ 103, 51, 25, 12, 262, 387, 193, 96, 48, 280, /* 161 - 170 */
++ 396, 198, 99, 305, 152, 76, 294, 403, 457, 228, /* 171 - 180 */
++};
++
++static const struct mrst_limit_t *mdfld_limit(struct drm_crtc *crtc)
++{
++ const struct mrst_limit_t *limit;
++ struct drm_device *dev = crtc->dev;
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++
++ if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)
++ || psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI2)) {
++ if ((dev_priv->ksel == KSEL_CRYSTAL_19) || (dev_priv->ksel == KSEL_BYPASS_19))
++ limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_19];
++ else if (dev_priv->ksel == KSEL_BYPASS_25)
++ limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_25];
++ else if ((dev_priv->ksel == KSEL_BYPASS_83_100) && dev_priv->sku_83)
++ limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_83];
++ else if ((dev_priv->ksel == KSEL_BYPASS_83_100) && (dev_priv->sku_100L || dev_priv->sku_100))
++ limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_100];
++ } else if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
++ if ((dev_priv->ksel == KSEL_CRYSTAL_19) || (dev_priv->ksel == KSEL_BYPASS_19))
++ limit = &mdfld_limits[MDFLD_LIMT_DPLL_19];
++ else if (dev_priv->ksel == KSEL_BYPASS_25)
++ limit = &mdfld_limits[MDFLD_LIMT_DPLL_25];
++ else if ((dev_priv->ksel == KSEL_BYPASS_83_100) && dev_priv->sku_83)
++ limit = &mdfld_limits[MDFLD_LIMT_DPLL_83];
++ else if ((dev_priv->ksel == KSEL_BYPASS_83_100) && (dev_priv->sku_100L || dev_priv->sku_100))
++ limit = &mdfld_limits[MDFLD_LIMT_DPLL_100];
++ } else {
++ limit = NULL;
++ PSB_DEBUG_ENTRY("mdfld_limit Wrong display type. \n");
++ }
++
++ return limit;
++}
++
++/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
++static void mdfld_clock(int refclk, struct mrst_clock_t *clock)
++{
++ clock->dot = (refclk * clock->m) / clock->p1;
++}
++
++/**
++ * Returns a set of divisors for the desired target clock with the given refclk,
++ * or FALSE. Divisor values are the actual divisors for
++ */
++static bool
++mdfldFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
++ struct mrst_clock_t *best_clock)
++{
++ struct mrst_clock_t clock;
++ const struct mrst_limit_t *limit = mdfld_limit(crtc);
++ int err = target;
++
++ memset(best_clock, 0, sizeof(*best_clock));
++
++ PSB_DEBUG_ENTRY("mdfldFindBestPLL target = %d,"
++ "m_min = %d, m_max = %d, p_min = %d, p_max = %d. \n", target, limit->m.min, limit->m.max, limit->p1.min, limit->p1.max);
++
++ for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
++ for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max;
++ clock.p1++) {
++ int this_err;
++
++ mdfld_clock(refclk, &clock);
++
++ this_err = abs(clock.dot - target);
++ if (this_err < err) {
++ *best_clock = clock;
++ err = this_err;
++ }
++ }
++ }
++ PSB_DEBUG_ENTRY("mdfldFindBestPLL target = %d,"
++ "m = %d, p = %d. \n", target, best_clock->m, best_clock->p1);
++ PSB_DEBUG_ENTRY("mdfldFindBestPLL err = %d.\n", err);
++
++ return err != target;
++}
++
++static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
++ struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode,
++ int x, int y,
++ struct drm_framebuffer *old_fb)
++{
++ struct drm_device *dev = crtc->dev;
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++ int pipe = psb_intel_crtc->pipe;
++ int fp_reg = MRST_FPA0;
++ int dpll_reg = MRST_DPLL_A;
++ int dspcntr_reg = DSPACNTR;
++ int pipeconf_reg = PIPEACONF;
++ int htot_reg = HTOTAL_A;
++ int hblank_reg = HBLANK_A;
++ int hsync_reg = HSYNC_A;
++ int vtot_reg = VTOTAL_A;
++ int vblank_reg = VBLANK_A;
++ int vsync_reg = VSYNC_A;
++ int dspsize_reg = DSPASIZE;
++ int dsppos_reg = DSPAPOS;
++ int pipesrc_reg = PIPEASRC;
++ u32 *pipeconf = &dev_priv->pipeconf;
++ u32 *dspcntr = &dev_priv->dspcntr;
++ int refclk = 0;
++ int clk_n = 0, clk_p2 = 0, clk_byte = 1, clk = 0, m_conv = 0, clk_tmp = 0;
++ struct mrst_clock_t clock;
++ bool ok;
++ u32 dpll = 0, fp = 0;
++ bool is_crt = false, is_lvds = false, is_tv = false;
++ bool is_mipi = false, is_mipi2 = false, is_hdmi = false;
++ struct drm_mode_config *mode_config = &dev->mode_config;
++ struct psb_intel_output *psb_intel_output = NULL;
++ uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
++ struct drm_encoder *encoder;
++ int timeout = 0;
++
++ PSB_DEBUG_ENTRY("pipe = 0x%x \n", pipe);
++
++ switch (pipe) {
++ case 0:
++ break;
++ case 1:
++ fp_reg = FPB0;
++ dpll_reg = DPLL_B;
++ dspcntr_reg = DSPBCNTR;
++ pipeconf_reg = PIPEBCONF;
++ htot_reg = HTOTAL_B;
++ hblank_reg = HBLANK_B;
++ hsync_reg = HSYNC_B;
++ vtot_reg = VTOTAL_B;
++ vblank_reg = VBLANK_B;
++ vsync_reg = VSYNC_B;
++ dspsize_reg = DSPBSIZE;
++ dsppos_reg = DSPBPOS;
++ pipesrc_reg = PIPEBSRC;
++ pipeconf = &dev_priv->pipeconf1;
++ dspcntr = &dev_priv->dspcntr1;
++ if (IS_MDFLD(dev)) {
++ fp_reg = MDFLD_DPLL_DIV0;
++ dpll_reg = MDFLD_DPLL_B;
++ }
++ break;
++ case 2:
++ dpll_reg = MRST_DPLL_A;
++ dspcntr_reg = DSPCCNTR;
++ pipeconf_reg = PIPECCONF;
++ htot_reg = HTOTAL_C;
++ hblank_reg = HBLANK_C;
++ hsync_reg = HSYNC_C;
++ vtot_reg = VTOTAL_C;
++ vblank_reg = VBLANK_C;
++ vsync_reg = VSYNC_C;
++ dspsize_reg = DSPCSIZE;
++ dsppos_reg = DSPCPOS;
++ pipesrc_reg = PIPECSRC;
++ pipeconf = &dev_priv->pipeconf2;
++ dspcntr = &dev_priv->dspcntr2;
++ break;
++#if MDFLD_WLD_JLIU7
++ case 3:
++ break;
++#endif /* MDFLD_WLD_JLIU7 */
++ default:
++ DRM_ERROR("Illegal Pipe Number. \n");
++ return 0;
++ }
++
++ PSB_DEBUG_ENTRY("adjusted_hdisplay = %d\n",
++ adjusted_mode->hdisplay);
++ PSB_DEBUG_ENTRY("adjusted_vdisplay = %d\n",
++ adjusted_mode->vdisplay);
++ PSB_DEBUG_ENTRY("adjusted_hsync_start = %d\n",
++ adjusted_mode->hsync_start);
++ PSB_DEBUG_ENTRY("adjusted_hsync_end = %d\n",
++ adjusted_mode->hsync_end);
++ PSB_DEBUG_ENTRY("adjusted_htotal = %d\n",
++ adjusted_mode->htotal);
++ PSB_DEBUG_ENTRY("adjusted_vsync_start = %d\n",
++ adjusted_mode->vsync_start);
++ PSB_DEBUG_ENTRY("adjusted_vsync_end = %d\n",
++ adjusted_mode->vsync_end);
++ PSB_DEBUG_ENTRY("adjusted_vtotal = %d\n",
++ adjusted_mode->vtotal);
++ PSB_DEBUG_ENTRY("adjusted_clock = %d\n",
++ adjusted_mode->clock);
++ PSB_DEBUG_ENTRY("hdisplay = %d\n",
++ mode->hdisplay);
++ PSB_DEBUG_ENTRY("vdisplay = %d\n",
++ mode->vdisplay);
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_FORCE_POWER_ON))
++ return 0;
++
++ memcpy(&psb_intel_crtc->saved_mode, mode, sizeof(struct drm_display_mode));
++ memcpy(&psb_intel_crtc->saved_adjusted_mode, adjusted_mode, sizeof(struct drm_display_mode));
++
++ list_for_each_entry(encoder, &mode_config->encoder_list, head) {
++
++ if (encoder->crtc != crtc)
++ continue;
++
++ psb_intel_output = enc_to_psb_intel_output(encoder);
++
++ PSB_DEBUG_ENTRY("output->type = 0x%x \n", psb_intel_output->type);
++
++ switch (psb_intel_output->type) {
++ case INTEL_OUTPUT_LVDS:
++ is_lvds = true;
++ break;
++ case INTEL_OUTPUT_TVOUT:
++ is_tv = true;
++ break;
++ case INTEL_OUTPUT_ANALOG:
++ is_crt = true;
++ break;
++ case INTEL_OUTPUT_MIPI:
++ is_mipi = true;
++ break;
++ case INTEL_OUTPUT_MIPI2:
++ is_mipi2 = true;
++ break;
++ case INTEL_OUTPUT_HDMI:
++ is_hdmi = true;
++ break;
++#if MDFLD_WLD_JLIU7
++ case INTEL_OUTPUT_WLD:
++ return 0;
++#endif /* MDFLD_WLD_JLIU7 */
++ }
++ }
++
++ /* Disable the VGA plane that we never use */
++ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
++
++ /* Disable the panel fitter if it was on our pipe */
++ if (psb_intel_panel_fitter_pipe(dev) == pipe)
++ REG_WRITE(PFIT_CONTROL, 0);
++
++ /* pipesrc and dspsize control the size that is scaled from,
++ * which should always be the user's requested size.
++ */
++ REG_WRITE(dspsize_reg,
++ ((mode->crtc_vdisplay - 1) << 16) | (mode->crtc_hdisplay - 1));
++ REG_WRITE(dsppos_reg, 0);
++ REG_WRITE(pipesrc_reg, ((mode->crtc_hdisplay - 1) << 16) | (mode->crtc_vdisplay - 1));
++
++ if (psb_intel_output)
++ drm_connector_property_get_value(&psb_intel_output->base,
++ dev->mode_config.scaling_mode_property, &scalingType);
++
++ if (scalingType == DRM_MODE_SCALE_NO_SCALE) {
++ /*Moorestown doesn't have register support for centering so we need to
++ mess with the h/vblank and h/vsync start and ends to get centering*/
++ int offsetX = 0, offsetY = 0;
++
++ offsetX = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2;
++ offsetY = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2;
++
++ REG_WRITE(htot_reg, (mode->crtc_hdisplay - 1) |
++ ((adjusted_mode->crtc_htotal - 1) << 16));
++ REG_WRITE(vtot_reg, (mode->crtc_vdisplay - 1) |
++ ((adjusted_mode->crtc_vtotal - 1) << 16));
++ REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - offsetX - 1) |
++ ((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16));
++ REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - offsetX - 1) |
++ ((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16));
++ REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - offsetY - 1) |
++ ((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16));
++ REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - offsetY - 1) |
++ ((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16));
++ } else {
++ REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
++ ((adjusted_mode->crtc_htotal - 1) << 16));
++ REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
++ ((adjusted_mode->crtc_vtotal - 1) << 16));
++ REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
++ ((adjusted_mode->crtc_hblank_end - 1) << 16));
++ REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
++ ((adjusted_mode->crtc_hsync_end - 1) << 16));
++ REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
++ ((adjusted_mode->crtc_vblank_end - 1) << 16));
++ REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
++ ((adjusted_mode->crtc_vsync_end - 1) << 16));
++ }
++
++ /* Flush the plane changes */
++ {
++ struct drm_crtc_helper_funcs *crtc_funcs =
++ crtc->helper_private;
++ crtc_funcs->mode_set_base(crtc, x, y, old_fb);
++ }
++
++ /* setup pipeconf */
++ *pipeconf = PIPEACONF_ENABLE; /* FIXME_JLIU7 REG_READ(pipeconf_reg); */
++
++ /* Set up the display plane register */
++ *dspcntr = REG_READ(dspcntr_reg);
++ *dspcntr |= pipe << DISPPLANE_SEL_PIPE_POS;
++ *dspcntr |= DISPLAY_PLANE_ENABLE;
++/* MDFLD_PO_JLIU7 dspcntr |= DISPPLANE_BOTTOM; */
++/* MDFLD_PO_JLIU7 dspcntr |= DISPPLANE_GAMMA_ENABLE; */
++
++ if (is_mipi2)
++ {
++ goto mrst_crtc_mode_set_exit;
++ }
++/* FIXME JLIU7 Add MDFLD HDMI supports */
++/* FIXME_MDFLD JLIU7 DSIPLL clock *= 8? */
++/* FIXME_MDFLD JLIU7 need to revist for dual MIPI supports */
++ clk = adjusted_mode->clock;
++
++ if (is_hdmi) {
++ if ((dev_priv->ksel == KSEL_CRYSTAL_19) || (dev_priv->ksel == KSEL_BYPASS_19))
++ {
++ refclk = 19200;
++
++ if (is_mipi || is_mipi2)
++ {
++ clk_n = 1, clk_p2 = 8;
++ } else if (is_hdmi) {
++ clk_n = 1, clk_p2 = 10;
++ }
++ } else if (dev_priv->ksel == KSEL_BYPASS_25) {
++ refclk = 25000;
++
++ if (is_mipi || is_mipi2)
++ {
++ clk_n = 1, clk_p2 = 8;
++ } else if (is_hdmi) {
++ clk_n = 1, clk_p2 = 10;
++ }
++ } else if ((dev_priv->ksel == KSEL_BYPASS_83_100) && dev_priv->sku_83) {
++ refclk = 83000;
++
++ if (is_mipi || is_mipi2)
++ {
++ clk_n = 4, clk_p2 = 8;
++ } else if (is_hdmi) {
++ clk_n = 4, clk_p2 = 10;
++ }
++ } else if ((dev_priv->ksel == KSEL_BYPASS_83_100) && (dev_priv->sku_100L || dev_priv->sku_100)) {
++ refclk = 100000;
++ if (is_mipi || is_mipi2)
++ {
++ clk_n = 4, clk_p2 = 8;
++ } else if (is_hdmi) {
++ clk_n = 4, clk_p2 = 10;
++ }
++ }
++
++ if (is_mipi)
++ clk_byte = dev_priv->bpp / 8;
++ else if (is_mipi2)
++ clk_byte = dev_priv->bpp2 / 8;
++
++ clk_tmp = clk * clk_n * clk_p2 * clk_byte;
++
++ PSB_DEBUG_ENTRY("clk = %d, clk_n = %d, clk_p2 = %d. \n", clk, clk_n, clk_p2);
++ PSB_DEBUG_ENTRY("adjusted_mode->clock = %d, clk_tmp = %d. \n", adjusted_mode->clock, clk_tmp);
++
++ ok = mdfldFindBestPLL(crtc, clk_tmp, refclk, &clock);
++
++ if (!ok) {
++#if 0 /* FIXME JLIU7 */
++ DRM_ERROR("Couldn't find PLL settings for mode!\n");
++ return;
++#endif /* FIXME JLIU7 */
++ DRM_ERROR
++ ("mdfldFindBestPLL fail in mdfld_crtc_mode_set. \n");
++ } else {
++ m_conv = mdfld_m_converts[(clock.m - MDFLD_M_MIN)];
++
++ PSB_DEBUG_ENTRY("dot clock = %d,"
++ "m = %d, p1 = %d, m_conv = %d. \n", clock.dot, clock.m,
++ clock.p1, m_conv);
++ }
++
++ dpll = REG_READ(dpll_reg);
++
++ if (dpll & DPLL_VCO_ENABLE) {
++ dpll &= ~DPLL_VCO_ENABLE;
++ REG_WRITE(dpll_reg, dpll);
++ REG_READ(dpll_reg);
++
++ /* FIXME jliu7 check the DPLL lock bit PIPEACONF[29] */
++ /* FIXME_MDFLD PO - change 500 to 1 after PO */
++ udelay(500);
++
++ /* reset M1, N1 & P1 */
++ REG_WRITE(fp_reg, 0);
++ dpll &= ~MDFLD_P1_MASK;
++ REG_WRITE(dpll_reg, dpll);
++ /* FIXME_MDFLD PO - change 500 to 1 after PO */
++ udelay(500);
++ }
++
++ /* When ungating power of DPLL, needs to wait 0.5us before enable the VCO */
++ if (dpll & MDFLD_PWR_GATE_EN) {
++ dpll &= ~MDFLD_PWR_GATE_EN;
++ REG_WRITE(dpll_reg, dpll);
++ /* FIXME_MDFLD PO - change 500 to 1 after PO */
++ udelay(500);
++ }
++
++ dpll = 0;
++
++#if 0 /* FIXME revisit later */
++ if ((dev_priv->ksel == KSEL_CRYSTAL_19) || (dev_priv->ksel == KSEL_BYPASS_19) || (dev_priv->ksel == KSEL_BYPASS_25)) {
++ dpll &= ~MDFLD_INPUT_REF_SEL;
++ } else if (dev_priv->ksel == KSEL_BYPASS_83_100) {
++ dpll |= MDFLD_INPUT_REF_SEL;
++ }
++#endif /* FIXME revisit later */
++
++ if (is_hdmi)
++ dpll |= MDFLD_VCO_SEL;
++
++ fp = (clk_n / 2) << 16;
++ fp |= m_conv;
++
++ /* compute bitmask from p1 value */
++ dpll |= (1 << (clock.p1 - 2)) << 17;
++
++#if 0 /* 1080p30 & 720p */
++ dpll = 0x00050000;
++ fp = 0x000001be;
++#endif
++#if 0 /* 480p */
++ dpll = 0x02010000;
++ fp = 0x000000d2;
++#endif
++ } else {
++#if DBI_TPO_480x864 /* get from spec. */
++ dpll = 0x00020000;
++ fp = 0x00000156;
++#endif /* DBI_TPO_480x864 */ /* get from spec. */
++
++#if DBI_TPO_864x480 || DSI_TPO_864x480 /* get from spec. */
++ dpll = 0x00800000;
++ fp = 0x000000c1;
++#endif /* DBI_TPO_864x480 */ /* get from spec. */
++
++#if 0 /* single dpi DSI_TPO_864x480 */ /* get from spec. */
++ dpll = 0x00800000;
++ fp = 0x00000044;
++#endif /* DBI_TPO_864x480 */ /* get from spec. */
++}
++
++ REG_WRITE(fp_reg, fp);
++ REG_WRITE(dpll_reg, dpll);
++ /* FIXME_MDFLD PO - change 500 to 1 after PO */
++ udelay(500);
++
++ dpll |= DPLL_VCO_ENABLE;
++ REG_WRITE(dpll_reg, dpll);
++ REG_READ(dpll_reg);
++
++ /* wait for DSI PLL to lock */
++ while ((timeout < 20000) && !(REG_READ(pipeconf_reg) & PIPECONF_DSIPLL_LOCK)) {
++ udelay(150);
++ timeout ++;
++ }
++
++ if (is_mipi)
++ goto mrst_crtc_mode_set_exit;
++
++ PSB_DEBUG_ENTRY("is_mipi = 0x%x \n", is_mipi);
++
++ REG_WRITE(pipeconf_reg, *pipeconf);
++ REG_READ(pipeconf_reg);
++
++ /* Wait for for the pipe enable to take effect. */
++//FIXME_JLIU7 HDMI mrstWaitForPipeEnable(dev);
++
++ REG_WRITE(dspcntr_reg, *dspcntr);
++ psb_intel_wait_for_vblank(dev);
++
++mrst_crtc_mode_set_exit:
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++
++ return 0;
++}
++
++/* MDFLD_PLATFORM end */
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_intel_drv.h
+@@ -0,0 +1,365 @@
++/*
++ * Copyright (c) 2009, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++#ifndef __INTEL_DRV_H__
++#define __INTEL_DRV_H__
++
++#include <linux/i2c.h>
++#include <linux/i2c-id.h>
++#include <linux/i2c-algo-bit.h>
++#include <drm/drm_crtc.h>
++#include <drm/drm_crtc_helper.h>
++#include <linux/gpio.h>
++
++#if 0 /* Debug HDMI */
++#include "psb_intel_hdmi_i2c.h"
++#endif
++
++/* Temp Switch */
++#define MDFLD_HDMI_JLIU7_DEBUG 1 /* Debug HDMI - Can't enalbe HDMI */
++#define MDFLD_HDMI_JLIU7_DEBUG_1 1 /* Debug HDMI - Can't enalbe HDMI */
++
++/* Switch - don't change before PO */
++#define MDFLD_PO_STATUS 1 /* 0 - before real PO, 1 - need to test it after real PO */
++/* Must be 0 but need to be revisited afte MDFLD PO */
++#define MDFLD_GCT_JLIU7 0
++#define MDFLD_GET_SYNC_BURST 0 /* Consider BURST_MODE when calcaulation H/V sync counts */
++#define MDFLD_HDMI_JLIU7 1
++#define MDFLD_WLD_JLIU7 0
++/* MDFLD FEATURE SWITCHES*/
++#define MDFLD_PO_WATERMARK 0
++#define MDFLD_JLIU7_LABC 1 /* Ambient light based Automatic Brightness Control */
++/* MDFLD MIPI panels only one of them can be set to 1 */
++#ifdef CONFIG_MDFD_COMMAND_MODE
++/* MDFLD DSR SWITCHES*/
++#define MDFLD_JLIU7_DSR 1 /* have to be enable all the time. */
++#define MDFLD_JLIU7_DPU 0
++#define MDFLD_JLIU7_DPU_2 0
++#define DBI_TPO_480x864 0 /* TPO DBI 4.8 MIPI panel */
++#define DBI_TPO_864x480 1 /* TPO DBI 4.8 MIPI panel */
++#define DSI_TPO_864x480 0 /* TPO DBI 4.8 MIPI panel */
++#else
++/* MDFLD DSR SWITCHES*/
++#define MDFLD_JLIU7_DSR 0
++#define MDFLD_JLIU7_DPU 0
++#define MDFLD_JLIU7_DPU_2 0
++#define DBI_TPO_480x864 0 /* TPO DBI 4.8 MIPI panel */
++#define DBI_TPO_864x480 0 /* TPO DBI 4.8 MIPI panel */
++#define DSI_TPO_864x480 1 /* TPO DBI 4.8 MIPI panel */
++#endif
++
++#ifdef CONFIG_MDFD_COMMAND_MODE_2
++/* 2nd MIPI Panel type */
++#define DBI_TPO_480x864_2 0 /* TPO DBI 4.8 MIPI panel */
++#define DBI_TPO_864x480_2 1 /* TPO DBI 4.8 MIPI panel */
++#define DSI_TPO_864x480_2 0 /* TPO DBI 4.8 MIPI panel */
++#else
++#define DBI_TPO_480x864_2 0 /* TPO DBI 4.8 MIPI panel */
++#define DBI_TPO_864x480_2 0 /* TPO DBI 4.8 MIPI panel */
++#define DSI_TPO_864x480_2 1 /* TPO DBI 4.8 MIPI panel */
++#endif
++
++/* MDFLD KSEL only one of them can be set to 1 */
++#define KSEL_CRYSTAL_19_ENABLED 1
++#define KSEL_BYPASS_19_ENABLED 0
++#define KSEL_BYPASS_25_ENABLED 0
++#define KSEL_BYPASS_83_100_ENABLE 0
++
++#define KSEL_CRYSTAL_19 1
++#define KSEL_BYPASS_19 5
++#define KSEL_BYPASS_25 6
++#define KSEL_BYPASS_83_100 7
++/*
++ * MOORESTOWN defines
++ */
++#define DUMP_REGISTER 0
++#define DELAY_TIME1 2000 /* 1000 = 1ms */
++
++/*
++ * Display related stuff
++ */
++
++/* store information about an Ixxx DVO */
++/* The i830->i865 use multiple DVOs with multiple i2cs */
++/* the i915, i945 have a single sDVO i2c bus - which is different */
++#define MAX_OUTPUTS 6
++/* maximum connectors per crtcs in the mode set */
++#define INTELFB_CONN_LIMIT 4
++
++#define INTEL_I2C_BUS_DVO 1
++#define INTEL_I2C_BUS_SDVO 2
++
++/* these are outputs from the chip - integrated only
++ * external chips are via DVO or SDVO output */
++#define INTEL_OUTPUT_UNUSED 0
++#define INTEL_OUTPUT_ANALOG 1
++#define INTEL_OUTPUT_DVO 2
++#define INTEL_OUTPUT_SDVO 3
++#define INTEL_OUTPUT_LVDS 4
++#define INTEL_OUTPUT_TVOUT 5
++#define INTEL_OUTPUT_HDMI 6
++#define INTEL_OUTPUT_MIPI 7
++#define INTEL_OUTPUT_MIPI2 8
++#define INTEL_OUTPUT_WLD 9
++
++#define INTEL_DVO_CHIP_NONE 0
++#define INTEL_DVO_CHIP_LVDS 1
++#define INTEL_DVO_CHIP_TMDS 2
++#define INTEL_DVO_CHIP_TVOUT 4
++
++enum mipi_panel_type {
++ NSC_800X480 = 1,
++ LGE_480X1024 = 2,
++ TPO_864X480 = 3
++};
++
++struct opregion_header {
++ u8 signature[16];
++ u32 size;
++ u32 opregion_ver;
++ u8 bios_ver[32];
++ u8 vbios_ver[16];
++ u8 driver_ver[16];
++ u32 mboxes;
++ u8 reserved[164];
++} __attribute__((packed));
++
++struct opregion_apci {
++ /*FIXME: add it later*/
++} __attribute__((packed));
++
++struct opregion_swsci {
++ /*FIXME: add it later*/
++} __attribute__((packed));
++
++struct opregion_acpi {
++ /*FIXME: add it later*/
++} __attribute__((packed));
++
++struct psb_intel_opregion {
++ struct opregion_header *header;
++ struct opregion_acpi *acpi;
++ struct opregion_swsci *swsci;
++ struct opregion_asle *asle;
++ int enabled;
++};
++
++/**
++ * Hold information useally put on the device driver privates here,
++ * since it needs to be shared across multiple of devices drivers privates.
++*/
++struct psb_intel_mode_device {
++
++ /*
++ * Abstracted memory manager operations
++ */
++ void *(*bo_from_handle) (struct drm_device *dev,
++ struct drm_file *file_priv,
++ unsigned int handle);
++ size_t(*bo_size) (struct drm_device *dev, void *bo);
++ size_t(*bo_offset) (struct drm_device *dev, void *bo);
++ int (*bo_pin_for_scanout) (struct drm_device *dev, void *bo);
++ int (*bo_unpin_for_scanout) (struct drm_device *dev, void *bo);
++
++ /*
++ * Cursor
++ */
++ int cursor_needs_physical;
++
++ /*
++ * LVDS info
++ */
++ int backlight_duty_cycle; /* restore backlight to this value */
++ bool panel_wants_dither;
++ struct drm_display_mode *panel_fixed_mode;
++ struct drm_display_mode *panel_fixed_mode2;
++ struct drm_display_mode *vbt_mode; /* if any */
++
++ uint32_t saveBLC_PWM_CTL;
++};
++
++struct psb_intel_i2c_chan {
++ /* for getting at dev. private (mmio etc.) */
++ struct drm_device *drm_dev;
++ u32 reg; /* GPIO reg */
++ struct i2c_adapter adapter;
++ struct i2c_algo_bit_data algo;
++ u8 slave_addr;
++};
++
++struct psb_intel_output {
++ struct drm_connector base;
++
++ struct drm_encoder enc;
++ int type;
++#if 1 /*MDFLD_HDMI_JLIU7*/
++ struct i2c_adapter *hdmi_i2c_adapter; /* for control functions */
++#endif
++ struct psb_intel_i2c_chan *i2c_bus; /* for control functions */
++ struct psb_intel_i2c_chan *ddc_bus; /* for DDC only stuff */
++ bool load_detect_temp;
++ void *dev_priv;
++
++ struct psb_intel_mode_device *mode_dev;
++
++};
++
++struct psb_intel_crtc_state {
++ uint32_t saveDSPCNTR;
++ uint32_t savePIPECONF;
++ uint32_t savePIPESRC;
++ uint32_t saveDPLL;
++ uint32_t saveFP0;
++ uint32_t saveFP1;
++ uint32_t saveHTOTAL;
++ uint32_t saveHBLANK;
++ uint32_t saveHSYNC;
++ uint32_t saveVTOTAL;
++ uint32_t saveVBLANK;
++ uint32_t saveVSYNC;
++ uint32_t saveDSPSTRIDE;
++ uint32_t saveDSPSIZE;
++ uint32_t saveDSPPOS;
++ uint32_t saveDSPBASE;
++ uint32_t savePalette[256];
++};
++
++struct psb_intel_crtc {
++ struct drm_crtc base;
++ int pipe;
++ int plane;
++ uint32_t cursor_addr;
++ u8 lut_r[256], lut_g[256], lut_b[256];
++ u8 lut_adj[256];
++ struct psb_intel_framebuffer *fbdev_fb;
++ /* a mode_set for fbdev users on this crtc */
++ struct drm_mode_set mode_set;
++
++ /* current bo we scanout from */
++ void *scanout_bo;
++
++ /* current bo we cursor from */
++ void *cursor_bo;
++
++ struct drm_display_mode saved_mode;
++ struct drm_display_mode saved_adjusted_mode;
++
++ struct psb_intel_mode_device *mode_dev;
++
++ /*crtc mode setting flags*/
++ u32 mode_flags;
++
++/*FIXME: Workaround to avoid MRST block.*/
++#ifndef CONFIG_X86_MRST
++ /* Saved Crtc HW states */
++ struct psb_intel_crtc_state *crtc_state;
++#endif
++};
++
++#define to_psb_intel_crtc(x) \
++ container_of(x, struct psb_intel_crtc, base)
++#define to_psb_intel_output(x) \
++ container_of(x, struct psb_intel_output, base)
++#define enc_to_psb_intel_output(x) \
++ container_of(x, struct psb_intel_output, enc)
++#define to_psb_intel_framebuffer(x) \
++ container_of(x, struct psb_intel_framebuffer, base)
++
++struct psb_intel_i2c_chan *psb_intel_i2c_create(struct drm_device *dev,
++ const u32 reg, const char *name);
++void psb_intel_i2c_destroy(struct psb_intel_i2c_chan *chan);
++int psb_intel_ddc_get_modes(struct psb_intel_output *psb_intel_output);
++extern bool psb_intel_ddc_probe(struct psb_intel_output *psb_intel_output);
++
++extern void psb_intel_crtc_init(struct drm_device *dev, int pipe,
++ struct psb_intel_mode_device *mode_dev);
++extern void psb_intel_crt_init(struct drm_device *dev);
++extern void psb_intel_sdvo_init(struct drm_device *dev, int output_device);
++extern void psb_intel_dvo_init(struct drm_device *dev);
++extern void psb_intel_tv_init(struct drm_device *dev);
++extern void psb_intel_lvds_init(struct drm_device *dev,
++ struct psb_intel_mode_device *mode_dev);
++extern void psb_intel_lvds_set_brightness(struct drm_device *dev, int level);
++extern void mrst_lvds_init(struct drm_device *dev,
++ struct psb_intel_mode_device *mode_dev);
++extern void mrst_dsi_init(struct drm_device *dev,
++ struct psb_intel_mode_device *mode_dev);
++extern void mid_dsi_init(struct drm_device *dev,
++ struct psb_intel_mode_device *mode_dev, int dsi_num);
++extern void mdfld_hdmi_init(struct drm_device *dev,
++ struct psb_intel_mode_device *mode_dev);
++extern void mdfld_wld_init(struct drm_device *dev);
++
++extern void psb_intel_crtc_load_lut(struct drm_crtc *crtc);
++extern void psb_intel_encoder_prepare(struct drm_encoder *encoder);
++extern void psb_intel_encoder_commit(struct drm_encoder *encoder);
++
++extern struct drm_encoder *psb_intel_best_encoder(struct drm_connector
++ *connector);
++
++extern struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
++ struct drm_crtc *crtc);
++extern void psb_intel_wait_for_vblank(struct drm_device *dev);
++extern int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev,
++ int pipe);
++extern struct drm_connector *psb_intel_sdvo_find(struct drm_device *dev,
++ int sdvoB);
++extern int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector);
++extern void psb_intel_sdvo_set_hotplug(struct drm_connector *connector,
++ int enable);
++extern int intelfb_probe(struct drm_device *dev);
++extern int intelfb_remove(struct drm_device *dev,
++ struct drm_framebuffer *fb);
++extern struct drm_framebuffer *psb_intel_framebuffer_create(struct drm_device
++ *dev, struct
++ drm_mode_fb_cmd
++ *mode_cmd,
++ void *mm_private);
++extern bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
++ struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode);
++extern int psb_intel_lvds_mode_valid(struct drm_connector *connector,
++ struct drm_display_mode *mode);
++extern int psb_intel_lvds_set_property(struct drm_connector *connector,
++ struct drm_property *property,
++ uint64_t value);
++extern void psb_intel_lvds_destroy(struct drm_connector *connector);
++extern const struct drm_encoder_funcs psb_intel_lvds_enc_funcs;
++
++#if MDFLD_JLIU7_DSR
++#if MDFLD_JLIU7_DPU_2
++extern void mdfld_dbi_update_fb (struct drm_device *dev);
++#else /* MDFLD_JLIU7_DPU_2 */
++extern void mdfld_dbi_update_fb (struct drm_device *dev, int pipe);
++#endif /* MDFLD_JLIU7_DPU_2 */
++extern void mdfld_dbi_enter_dsr (struct drm_device *dev);
++extern void mdfld_dbi_exit_dsr (struct drm_device *dev, u32 update_src);
++#endif /* MDFLD_JLIU7_DSR */
++#if MDFLD_JLIU7_LABC
++extern void mdfld_dsi_brightness_control (struct drm_device *dev, int pipe, int level);
++#endif /* MDFLD_JLIU7_LABC */
++extern void mdfld_dsi_gen_fifo_ready (struct drm_device *dev, u32 gen_fifo_stat_reg, u32 fifo_stat);
++extern void mdfld_dsi_dbi_CB_ready (struct drm_device *dev, u32 mipi_command_address_reg, u32 gen_fifo_stat_reg);
++extern void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe);
++extern void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe);
++extern uint8_t blc_pol;
++extern uint8_t blc_freq;
++
++#endif /* __INTEL_DRV_H__ */
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_intel_dsi.c
+@@ -0,0 +1,2361 @@
++/*
++ * Copyright © 2006-2007 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * jim liu <jim.liu@intel.com>
++ */
++
++#include <linux/backlight.h>
++#include <linux/version.h>
++#include <drm/drmP.h>
++#include <drm/drm.h>
++#include <drm/drm_crtc.h>
++#include <drm/drm_edid.h>
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34))
++ #include <asm/ipc_defs.h>
++#else
++ #include <asm/intel_scu_ipc.h>
++#endif
++#include <asm/mrst.h>
++
++#include "psb_drv.h"
++#include "psb_intel_drv.h"
++#include "psb_intel_reg.h"
++#include "psb_powermgmt.h"
++
++#define DRM_MODE_ENCODER_MIPI 5
++/* #define DRM_MODE_CONNECTOR_MIPI 13 */
++
++#define BRIGHTNESS_MAX_LEVEL 100
++#define BLC_POLARITY_NORMAL 0
++
++#if DUMP_REGISTER
++extern void dump_dsi_registers(struct drm_device *dev);
++#endif /* DUMP_REGISTER */
++void mrst_init_TPO_MIPI(struct drm_device *dev);
++
++uint8_t blc_pol2;
++uint8_t blc_freq2;
++int dsi_backlight; /* restore backlight to this value */
++int dsi_backlight2; /* restore backlight to this value */
++
++/**
++ * Returns the maximum level of the backlight duty cycle field.
++ */
++static u32 mrst_dsi_get_max_backlight(struct drm_device *dev)
++{
++ PSB_DEBUG_ENTRY("\n");
++
++ return BRIGHTNESS_MAX_LEVEL;
++
++/* FIXME jliu7 need to revisit */
++}
++
++/**
++ * Sets the power state for the panel.
++ */
++static void mrst_dsi_set_power(struct drm_device *dev,
++ struct psb_intel_output *output, bool on)
++{
++ /* u32 pp_status; */
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++
++ DRM_INFO("Enter mrst_dsi_set_power \n");
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON))
++ return;
++
++ if (on) {
++ /* program MIPI DSI controller and Display Controller
++ * set the device ready bit + set 'turn on' bit b048
++ * wait for 100 ms ??
++ * set pipe enable bit */
++ REG_WRITE(DPI_CONTROL_REG, DPI_TURN_ON);
++ msleep(100);
++
++ if (dev_priv->panel_make == TPO_864X480)
++ dev_priv->init_drvIC(dev); /* initialize the panel */
++ /* Turn on backlight */
++ REG_WRITE(BLC_PWM_CTL, 0x2faf1fc9);
++ } else {
++ /* set the shutdown bit b048h
++ * de-assert pipe enable
++ * clear device ready bit unless DBI is to be left on */
++ REG_WRITE(BLC_PWM_CTL, 0x2faf0000);
++ REG_WRITE(DPI_CONTROL_REG, 1);
++ }
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++}
++
++static void mrst_dsi_dpms(struct drm_encoder *encoder, int mode)
++{
++ struct drm_device *dev = encoder->dev;
++ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
++
++ PSB_DEBUG_ENTRY("%s \n", (mode == DRM_MODE_DPMS_ON ? "on":"off"));
++
++ if (mode == DRM_MODE_DPMS_ON)
++ mrst_dsi_set_power(dev, output, true);
++ else
++ mrst_dsi_set_power(dev, output, false);
++}
++
++static void mrst_dsi_save(struct drm_connector *connector)
++{
++ PSB_DEBUG_ENTRY("\n");
++
++#if 0 /* JB: Disable for drop */
++ struct drm_device *dev = connector->dev;
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++
++ dev_priv->savePP_ON = REG_READ(LVDSPP_ON);
++ dev_priv->savePP_OFF = REG_READ(LVDSPP_OFF);
++ dev_priv->savePP_CONTROL = REG_READ(PP_CONTROL);
++ dev_priv->savePP_CYCLE = REG_READ(PP_CYCLE);
++ dev_priv->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
++ dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
++ BACKLIGHT_DUTY_CYCLE_MASK);
++
++ /*
++ * make backlight to full brightness
++ */
++ dsi_backlight = mrst_dsi_get_max_backlight(dev);
++#endif
++}
++
++static void mrst_dsi_restore(struct drm_connector *connector)
++{
++ PSB_DEBUG_ENTRY("\n");
++
++#if 0 /* JB: Disable for drop */
++ struct drm_device *dev = connector->dev;
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++
++ REG_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
++ REG_WRITE(LVDSPP_ON, dev_priv->savePP_ON);
++ REG_WRITE(LVDSPP_OFF, dev_priv->savePP_OFF);
++ REG_WRITE(PP_CYCLE, dev_priv->savePP_CYCLE);
++ *REG_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
++ if (dev_priv->savePP_CONTROL & POWER_TARGET_ON)
++ mrst_dsi_set_power(dev, true);
++ else
++ mrst_dsi_set_power(dev, false);
++#endif
++}
++
++static void mrst_dsi_prepare(struct drm_encoder *encoder)
++{
++ struct drm_device *dev = encoder->dev;
++ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
++ struct psb_intel_mode_device *mode_dev = output->mode_dev;
++
++ PSB_DEBUG_ENTRY("\n");
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON))
++ return;
++
++ mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
++ mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
++ BACKLIGHT_DUTY_CYCLE_MASK);
++
++ mrst_dsi_set_power(dev, output, false);
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++}
++
++static void mrst_dsi_commit(struct drm_encoder *encoder)
++{
++ struct drm_device *dev = encoder->dev;
++ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
++ struct psb_intel_mode_device *mode_dev = output->mode_dev;
++
++ PSB_DEBUG_ENTRY("\n");
++
++ if (mode_dev->backlight_duty_cycle == 0)
++ mode_dev->backlight_duty_cycle =
++ mrst_dsi_get_max_backlight(dev);
++
++ mrst_dsi_set_power(dev, output, true);
++
++#if DUMP_REGISTER
++ dump_dsi_registers(dev);
++#endif /* DUMP_REGISTER */
++}
++
++#if 0
++/* ************************************************************************* *\
++FUNCTION: GetHS_TX_timeoutCount
++DESCRIPTION: In burst mode, value greater than one DPI line Time in byte clock
++ (txbyteclkhs). To timeout this timer 1+ of the
++ above said value is recommended.
++
++ In non-burst mode, Value greater than one DPI frame time
++ in byte clock(txbyteclkhs).
++
++ To timeout this timer 1+ of the above said value is recommended.
++
++\* ************************************************************************* */
++static u32 GetHS_TX_timeoutCount(DRM_DRIVER_PRIVATE_T *dev_priv)
++{
++
++ u32 timeoutCount = 0, HTOT_count = 0, VTOT_count = 0, HTotalPixel = 0;
++
++ /* Total pixels need to be transfer per line*/
++ HTotalPixel = (dev_priv->HsyncWidth +
++ dev_priv->HbackPorch +
++ dev_priv->HfrontPorch) *
++ dev_priv->laneCount +
++ dev_priv->HactiveArea;
++
++ /* byte count = (pixel count * bits per pixel) / 8 */
++ HTOT_count = (HTotalPixel * dev_priv->bpp) / 8;
++
++ if (dev_priv->videoModeFormat == BURST_MODE) {
++ timeoutCount = HTOT_count + 1;
++#if 1 /*FIXME remove it after power-on */
++ VTOT_count = dev_priv->VactiveArea +
++ dev_priv->VbackPorch +
++ dev_priv->VfrontPorch + dev_priv->VsyncWidth;
++
++ /* timeoutCount = (HTOT_count * VTOT_count) + 1; */
++ timeoutCount = (HTOT_count * VTOT_count) + 1;
++#endif
++ } else {
++ VTOT_count = dev_priv->VactiveArea +
++ dev_priv->VbackPorch +
++ dev_priv->VfrontPorch +
++ dev_priv->VsyncWidth;
++ /* timeoutCount = (HTOT_count * VTOT_count) + 1; */
++ timeoutCount = (HTOT_count * VTOT_count) + 1;
++ }
++
++ return timeoutCount & 0xFFFF;
++}
++
++/* ************************************************************************* *\
++FUNCTION: GetLP_RX_timeoutCount
++
++DESCRIPTION: The timeout value is protocol specific. Time out value is
++ calculated from txclkesc(50ns).
++
++ Minimum value =
++ Time to send one Trigger message = 4 X txclkesc
++ [Escape mode entry sequence)
++ + 8-bit trigger message (2x8xtxclkesc)
++ +1 txclksesc [stop_state]
++ = 21 X txclkesc [ 15h]
++
++ Maximum Value =
++ Time to send a long packet with maximum payload data
++ = 4 X txclkesc [Escape mode entry sequence)
++ + 8-bit Low power data transmission Command (2x8xtxclkesc)
++ + packet header [ 4X8X2X txclkesc]
++ +payload [ nX8X2Xtxclkesc]
++ +CRC[2X8X2txclkesc]
++ +1 txclksesc [stop_state]
++ = 117 txclkesc +n[payload in terms of bytes]X16txclkesc.
++
++\* ************************************************************************* */
++static u32 GetLP_RX_timeoutCount(DRM_DRIVER_PRIVATE_T *dev_priv)
++{
++
++ u32 timeoutCount = 0;
++
++ if (dev_priv->config_phase) {
++ /* Assuming 256 byte DDB data.*/
++ timeoutCount = 117 + 256 * 16;
++ } else {
++ /* For DPI video only mode use the minimum value.*/
++ timeoutCount = 0x15;
++#if 1 /*FIXME remove it after power-on */
++ /* Assuming 256 byte DDB data.*/
++ timeoutCount = 117 + 256 * 16;
++#endif
++ }
++
++ return timeoutCount;
++}
++#endif /* #if 0 - to avoid warnings */
++
++/* ************************************************************************* *\
++FUNCTION: GetHSA_Count
++
++DESCRIPTION: Shows the horizontal sync value in terms of byte clock
++ (txbyteclkhs)
++ Minimum HSA period should be sufficient to transmit a hsync start short
++ packet(4 bytes)
++ i) For Non-burst Mode with sync pulse, Min value 4 in decimal
++ [plus an optional 6 bytes for a zero payload blanking
++ packet]. But if the value is less than 10 but more
++ than 4, then this count will be added to the HBP s
++ count for one lane.
++ ii) For Non-Burst Sync Event & Burst Mode, there is no HSA,
++ so you can program this to zero. If you program this
++ register, these byte values will be added to HBP.
++ iii) For Burst mode of operation, normally the values
++ programmed in terms of byte clock are based on the
++ principle - time for transfering
++ HSA in Burst mode is the same as in non-bust mode.
++\* ************************************************************************* */
++static u32 GetHSA_Count(struct drm_device *dev, DRM_DRIVER_PRIVATE_T *dev_priv)
++{
++ u32 HSA_count;
++ u32 HSA_countX8;
++
++ /* byte clock count = (pixel clock count * bits per pixel) /8 */
++ HSA_countX8 = dev_priv->HsyncWidth * dev_priv->bpp;
++
++ if (dev_priv->videoModeFormat == BURST_MODE) {
++ HSA_countX8 *= dev_priv->DDR_Clock /
++ dev_priv->DDR_Clock_Calculated;
++ }
++
++ HSA_count = HSA_countX8 / 8;
++
++ /* FIXME_JLIU7 the above formulus is deduced from the MIPI spec. The following
++ equation comes from HW SV. need to double check it. */
++ /* compute HSA according to equation:
++ (hsync_width) * 24 bpp / (2 * 8 bits per lane * 2 lanes)*/
++ /* FIXME_JLIU the lower equation = the upper equation / (2 * lane number) */
++
++ HSA_count /= (2 * dev_priv->laneCount);
++
++ if (HSA_count < 4) /* minimum value of 4 */
++ HSA_count = 4;
++
++ PSB_DEBUG_ENTRY("HSA_count is %d\n", HSA_count);
++
++ return HSA_count;
++}
++
++/* ************************************************************************* *\
++FUNCTION: GetHBP_Count
++
++DESCRIPTION: Shows the horizontal back porch value in terms of txbyteclkhs.
++ Minimum HBP period should be sufficient to transmit a �hsync end short
++ packet(4 bytes) + Blanking packet overhead(6 bytes) +
++ RGB packet header(4 bytes)�
++ For Burst mode of operation, normally the values programmed in terms of
++ byte clock are based on the principle - time for transfering HBP
++ in Burst mode is the same as in non-bust mode.
++
++ Min value � 14 in decimal
++ [accounted with zero payload for blanking packet] for one lane.
++ Max value � any value greater than 14 based on DPI resolution
++\* ************************************************************************* */
++static u32 GetHBP_Count(struct drm_device *dev, DRM_DRIVER_PRIVATE_T *dev_priv)
++{
++ u32 HBP_count, HBP_countX8;
++ /* byte clock count = (pixel clock count * bits per pixel) /8 */
++ HBP_countX8 = dev_priv->HbackPorch * dev_priv->bpp;
++
++ if (dev_priv->videoModeFormat == BURST_MODE) {
++ HBP_countX8 *= dev_priv->DDR_Clock /
++ dev_priv->DDR_Clock_Calculated;
++ }
++
++ HBP_count = HBP_countX8 / 8;
++
++ /* FIXME_JLIU7 the above formulus is deduced from the MIPI spec. The following
++ equation comes from HW SV. need to double check it. */
++ /* compute HBP according to equation:
++ (hsync_backporch) * 24 bpp / (2 * 8 bits per lane * 2 lanes)*/
++ /* FIXME_JLIU the lower equation = the upper equation / (2 * lane number) */
++
++ HBP_count /= (2 * dev_priv->laneCount);
++
++ if (HBP_count < 8) /* minimum value of 8 */
++ HBP_count = 8;
++
++ PSB_DEBUG_ENTRY("HBP_count is %d\n", HBP_count);
++
++ return HBP_count;
++}
++
++/* ************************************************************************* *\
++FUNCTION: GetHFP_Count
++
++DESCRIPTION: Shows the horizontal front porch value in terms of txbyteclkhs.
++Minimum HFP period should be sufficient to transmit �RGB Data packet
++footer(2 bytes) + Blanking packet overhead(6 bytes)� for non burst mode.
++
++For burst mode, Minimum HFP period should be sufficient to transmit
++Blanking packet overhead(6 bytes)�
++
++For Burst mode of operation, normally the values programmed in terms of
++ byte clock are based on the principle - time for transfering HFP
++ in Burst mode is the same as in non-bust mode.
++
++Min value � 8 in decimal for non-burst mode [accounted with zero payload
++ for blanking packet] for one lane.
++Min value � 6 in decimal for burst mode for one lane.
++
++Max value � any value greater than the minimum vaue based on DPI resolution
++\* ************************************************************************* */
++static u32 GetHFP_Count(struct drm_device *dev, DRM_DRIVER_PRIVATE_T *dev_priv)
++{
++ u32 HFP_count, HFP_countX8;
++
++ /* byte clock count = (pixel clock count * bits per pixel) /8 */
++ HFP_countX8 = dev_priv->HfrontPorch * dev_priv->bpp;
++
++ if (dev_priv->videoModeFormat == BURST_MODE) {
++ HFP_countX8 *= dev_priv->DDR_Clock /
++ dev_priv->DDR_Clock_Calculated;
++ }
++
++ HFP_count = HFP_countX8 / 8;
++
++ /* FIXME_JLIU7 the above formulus is deduced from the MIPI spec. The following
++ equation comes from HW SV. need to double check it. */
++ /* compute HFP according to equation:
++ (hsync_frontporch) * 24 bpp / (2 * 8 bits per lane * 2 lanes)*/
++ /* FIXME_JLIU the lower equation = the upper equation / (2 * lane number) */
++
++ HFP_count /= (2 * dev_priv->laneCount);
++
++ if (HFP_count < 8) /* minimum value of 8 */
++ HFP_count = 8;
++
++ PSB_DEBUG_ENTRY("HFP_count is %d\n", HFP_count);
++
++ return HFP_count;
++}
++
++/* ************************************************************************* *\
++FUNCTION: GetHAdr_Count
++
++DESCRIPTION: Shows the horizontal active area value in terms of txbyteclkhs.
++ In Non Burst Mode, Count equal to RGB word count value
++
++In Burst Mode, RGB pixel packets are time-compressed, leaving more time
++ during a scan line for LP mode (saving power) or for multiplexing
++ other transmissions onto the DSI link. Hence, the count equals the
++ time in txbyteclkhs for sending time compressed RGB pixels plus
++ the time needed for moving to power save mode or the time needed
++ for secondary channel to use the DSI link.
++
++But if the left out time for moving to low power mode is less than
++ 8 txbyteclkhs [2txbyteclkhs for RGB data packet footer and
++ 6txbyteclkhs for a blanking packet with zero payload], then
++ this count will be added to the HFP's count for one lane.
++
++Min value � 8 in decimal for non-burst mode [accounted with zero payload
++ for blanking packet] for one lane.
++Min value � 6 in decimal for burst mode for one lane.
++
++Max value � any value greater than the minimum vaue based on DPI resolution
++\* ************************************************************************* */
++static u32 GetHAdr_Count(struct drm_device *dev, DRM_DRIVER_PRIVATE_T *dev_priv)
++{
++ u32 HAdr_count, HAdr_countX8;
++
++ /* byte clock count = (pixel clock count * bits per pixel) /8 */
++ HAdr_countX8 = dev_priv->HactiveArea * dev_priv->bpp;
++
++ if (dev_priv->videoModeFormat == BURST_MODE) {
++ HAdr_countX8 *= dev_priv->DDR_Clock /
++ dev_priv->DDR_Clock_Calculated;
++ }
++
++ HAdr_count = HAdr_countX8 / 8;
++
++ /* FIXME_JLIU7 the above formulus is deduced from the MIPI spec. The following
++ equation comes from HW SV. need to double check it. */
++ /* compute HAdr according to equation:
++ (horizontal active) * 24 bpp / (8 bits per lane * 2 lanes)*/
++ /* FIXME_JLIU the lower equation = the upper equation / (lane number) */
++
++ HAdr_count /= dev_priv->laneCount;
++
++ PSB_DEBUG_ENTRY("HAdr_count is %d\n", HAdr_count);
++
++ return HAdr_count;
++}
++
++/* ************************************************************************* *\
++FUNCTION: GetVSA_Count
++
++DESCRIPTION: Shows the vertical sync value in terms of lines
++
++\* ************************************************************************* */
++static u32 GetVSA_Count(struct drm_device *dev, DRM_DRIVER_PRIVATE_T *dev_priv)
++{
++ u32 VSA_count;
++
++ /* Get the vsync pulse width */
++ VSA_count = dev_priv->VsyncWidth;
++
++ if (VSA_count < 2) /* minimum value of 2 */
++ VSA_count = 2;
++
++ PSB_DEBUG_ENTRY("VSA_count is %d\n", VSA_count);
++
++ return VSA_count;
++}
++
++/* ************************************************************************* *\
++ * FUNCTION: GetVBP_Count
++ *
++ * DESCRIPTION: Shows the vertical back porch value in lines.
++ *
++\* ************************************************************************* */
++static u32 GetVBP_Count(struct drm_device *dev, DRM_DRIVER_PRIVATE_T *dev_priv)
++{
++ u32 VBP_count;
++
++ /* Get the Vertical Backporch width */
++ VBP_count = dev_priv->VbackPorch;
++
++ if (VBP_count < 2) /* minimum value of 2 */
++ VBP_count = 2;
++
++ PSB_DEBUG_ENTRY("VBP_count is %d\n", VBP_count);
++
++ return VBP_count;
++}
++/* ************************************************************************* *\
++ * FUNCTION: GetVFP_Count
++ *
++ * DESCRIPTION: Shows the vertical front porch value in terms of lines.
++ *
++\* ************************************************************************* */
++static u32 GetVFP_Count(struct drm_device *dev, DRM_DRIVER_PRIVATE_T *dev_priv)
++{
++ u32 VFP_count;
++
++ /* Get the Vertical Frontporch width */
++ VFP_count = dev_priv->VfrontPorch;
++
++ if (VFP_count < 2) /* minimum value of 2 */
++ VFP_count = 2;
++
++ PSB_DEBUG_ENTRY("VFP_count is %d\n", VFP_count);
++
++ return VFP_count;
++}
++
++#if 0
++/* ************************************************************************* *\
++FUNCTION: GetHighLowSwitchCount
++
++DESCRIPTION: High speed to low power or Low power to high speed switching time
++ in terms byte clock (txbyteclkhs). This value is based on the
++ byte clock (txbyteclkhs) and low power clock frequency (txclkesc)
++
++Typical value - Number of byte clocks required to switch from low power mode
++ to high speed mode after "txrequesths" is asserted.
++
++The worst count value among the low to high or high to low switching time
++ in terms of txbyteclkhs has to be programmed in this register.
++
++Usefull Formulae:
++ DDR clock period = 2 times UI
++ txbyteclkhs clock = 8 times UI
++ Tlpx = 1 / txclkesc
++ CALCULATION OF LOW POWER TO HIGH SPEED SWITCH COUNT VALUE
++ (from Standard D-PHY spec)
++
++ LP01 + LP00 + HS0 = 1Tlpx + 1Tlpx + 3Tlpx [Approx] +
++ 1DDR clock [2UI] + 1txbyteclkhs clock [8UI]
++
++ CALCULATION OF HIGH SPEED TO LOW POWER SWITCH COUNT VALUE
++ (from Standard D-PHY spec)
++
++ Ths-trail = 1txbyteclkhs clock [8UI] +
++ 5DDR clock [10UI] + 4 Tlpx [Approx]
++\* ************************************************************************* */
++static u32 GetHighLowSwitchCount(DRM_DRIVER_PRIVATE_T *dev_priv)
++{
++ u32 HighLowSwitchCount, HighToLowSwitchCount, LowToHighSwitchCount;
++
++/* ************************************************************************* *\
++CALCULATION OF HIGH SPEED TO LOW POWER SWITCH COUNT VALUE
++(from Standard D-PHY spec)
++
++Ths-trail = 1txbyteclkhs clock [8UI] + 5DDR clock [10UI] + 4 Tlpx [Approx]
++
++Tlpx = 50 ns, Using max txclkesc (20MHz)
++
++txbyteclkhs_period = 4000 / dev_priv->DDR_Clock; in ns
++UI_period = 500 / dev_priv->DDR_Clock; in ns
++
++HS_to_LP = Ths-trail = 18 * UI_period + 4 * Tlpx
++ = 9000 / dev_priv->DDR_Clock + 200;
++
++HighToLowSwitchCount = HS_to_LP / txbyteclkhs_period
++ = (9000 / dev_priv->DDR_Clock + 200) / (4000 / dev_priv->DDR_Clock)
++ = (9000 + (200 * dev_priv->DDR_Clock)) / 4000
++
++\* ************************************************************************* */
++ HighToLowSwitchCount = (9000 + (200 * dev_priv->DDR_Clock)) / 4000 + 1;
++
++/* ************************************************************************* *\
++CALCULATION OF LOW POWER TO HIGH SPEED SWITCH COUNT VALUE
++(from Standard D-PHY spec)
++
++LP01 + LP00 + HS0 = 1Tlpx + 1Tlpx + 3Tlpx [Approx] +
++1DDR clock [2UI] + 1txbyteclkhs clock [8UI]
++
++ LP_to_HS = 10 * UI_period + 5 * Tlpx =
++ = 5000 / dev_priv->DDR_Clock + 250;
++
++ LowToHighSwitchCount = LP_to_HS / txbyteclkhs_period
++ = (5000 / dev_priv->DDR_Clock + 250) /
++ (4000 / dev_priv->DDR_Clock)
++
++ = (5000 + (250 * dev_priv->DDR_Clock)) / 4000
++
++\* ************************************************************************* */
++ LowToHighSwitchCount = (5000 + (250 * dev_priv->DDR_Clock)) / 4000 + 1;
++
++ if (HighToLowSwitchCount > LowToHighSwitchCount)
++ HighLowSwitchCount = HighToLowSwitchCount;
++ else
++ HighLowSwitchCount = LowToHighSwitchCount;
++
++ /* FIXME jliu need to fine tune the above formulae and remove the
++ * following after power on */
++ if (HighLowSwitchCount < 0x1f)
++ HighLowSwitchCount = 0x1f;
++
++ return HighLowSwitchCount;
++}
++
++/* ************************************************************************* *\
++FUNCTION: mrst_gen_long_write
++DESCRIPTION:
++\* ************************************************************************* */
++static void mrst_gen_long_write(struct drm_device *dev,
++ u32 *data,
++ u16 wc,
++ u8 vc)
++{
++ u32 gen_data_reg = HS_GEN_DATA_REG;
++ u32 gen_ctrl_reg = HS_GEN_CTRL_REG;
++ u32 date_full_bit = HS_DATA_FIFO_FULL;
++ u32 control_full_bit = HS_CTRL_FIFO_FULL;
++ u16 wc_saved = wc;
++
++ PSB_DEBUG_ENTRY("Enter mrst_gen_long_write \n");
++
++ /* sanity check */
++ if (vc > 4) {
++ DRM_ERROR
++ (KERN_ERR "MIPI Virtual channel Can't greater than 4.\n");
++ return;
++ }
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON))
++ return;
++
++ if (0) { /* FIXME JLIU7 check if it is in LP*/
++ gen_data_reg = LP_GEN_DATA_REG;
++ gen_ctrl_reg = LP_GEN_CTRL_REG;
++ date_full_bit = LP_DATA_FIFO_FULL;
++ control_full_bit = LP_CTRL_FIFO_FULL;
++ }
++
++ while (wc >= 4) {
++ /* Check if MIPI IP generic data fifo is not full */
++ while ((REG_READ(GEN_FIFO_STAT_REG) & date_full_bit)
++ == date_full_bit) {
++ /* Do Nothing Here */
++ /* This will make checkpatch work */
++ }
++
++ /* write to data buffer */
++ REG_WRITE(gen_data_reg, *data);
++
++ wc -= 4;
++ data++;
++ }
++
++ switch (wc) {
++ case 1:
++ REG_WRITE8(gen_data_reg, *((u8 *)data));
++ break;
++ case 2:
++ REG_WRITE16(gen_data_reg, *((u16 *)data));
++ break;
++ case 3:
++ REG_WRITE16(gen_data_reg, *((u16 *)data));
++ data = (u32 *)((u8 *) data + 2);
++ REG_WRITE8(gen_data_reg, *((u8 *)data));
++ break;
++ }
++
++ /* Check if MIPI IP generic control fifo is not full */
++ while ((REG_READ(GEN_FIFO_STAT_REG) & control_full_bit)
++ == control_full_bit) {
++ /* Do Nothing Here */
++ /* This will make Checkpatch work */
++ }
++ /* write to control buffer */
++ REG_WRITE(gen_ctrl_reg, 0x29 | (wc_saved << 8) | (vc << 6));
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++}
++
++/* ************************************************************************* *\
++FUNCTION: mrst_init_HIMAX_MIPI_bridge
++DESCRIPTION:
++\* ************************************************************************* */
++static void mrst_init_HIMAX_MIPI_bridge(struct drm_device *dev)
++{
++ u32 gen_data[2];
++ u16 wc = 0;
++ u8 vc = 0;
++ u32 gen_data_intel = 0x200105;
++
++ PSB_DEBUG_ENTRY("\n");
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON))
++ return;
++
++ /* exit sleep mode */
++ wc = 0x5;
++ gen_data[0] = gen_data_intel | (0x11 << 24);
++ gen_data[1] = 0;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* set_pixel_format */
++ gen_data[0] = gen_data_intel | (0x3A << 24);
++ gen_data[1] = 0x77;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* Set resolution for (800X480) */
++ wc = 0x8;
++ gen_data[0] = gen_data_intel | (0x2A << 24);
++ gen_data[1] = 0x1F030000;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[0] = gen_data_intel | (0x2B << 24);
++ gen_data[1] = 0xDF010000;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* System control */
++ wc = 0x6;
++ gen_data[0] = gen_data_intel | (0xEE << 24);
++ gen_data[1] = 0x10FA;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* INPUT TIMING FOR TEST PATTERN(800X480) */
++ /* H-size */
++ gen_data[1] = 0x2000;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x0301;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* V-size */
++ gen_data[1] = 0xE002;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x0103;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* H-total */
++ gen_data[1] = 0x2004;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x0405;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* V-total */
++ gen_data[1] = 0x0d06;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x0207;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* H-blank */
++ gen_data[1] = 0x0308;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x0009;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* H-blank */
++ gen_data[1] = 0x030A;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x000B;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* H-start */
++ gen_data[1] = 0xD80C;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x000D;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* V-start */
++ gen_data[1] = 0x230E;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x000F;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* RGB domain */
++ gen_data[1] = 0x0027;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* INP_FORM Setting */
++ /* set_1 */
++ gen_data[1] = 0x1C10;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* set_2 */
++ gen_data[1] = 0x0711;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* set_3 */
++ gen_data[1] = 0x0012;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* set_4 */
++ gen_data[1] = 0x0013;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* set_5 */
++ gen_data[1] = 0x2314;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* set_6 */
++ gen_data[1] = 0x0015;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* set_7 */
++ gen_data[1] = 0x2316;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* set_8 */
++ gen_data[1] = 0x0017;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* set_1 */
++ gen_data[1] = 0x0330;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* FRC Setting */
++ /* FRC_set_2 */
++ gen_data[1] = 0x237A;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* FRC_set_3 */
++ gen_data[1] = 0x4C7B;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* FRC_set_4 */
++ gen_data[1] = 0x037C;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* FRC_set_5 */
++ gen_data[1] = 0x3482;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* FRC_set_7 */
++ gen_data[1] = 0x1785;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++#if 0
++ /* FRC_set_8 */
++ gen_data[1] = 0xD08F;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++#endif
++
++ /* OUTPUT TIMING FOR TEST PATTERN (800X480) */
++ /* out_htotal */
++ gen_data[1] = 0x2090;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x0491;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* out_hsync */
++ gen_data[1] = 0x0392;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x0093;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* out_hstart */
++ gen_data[1] = 0xD894;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x0095;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* out_hsize */
++ gen_data[1] = 0x2096;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x0397;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* out_vtotal */
++ gen_data[1] = 0x0D98;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x0299;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* out_vsync */
++ gen_data[1] = 0x039A;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x009B;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* out_vstart */
++ gen_data[1] = 0x239C;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x009D;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* out_vsize */
++ gen_data[1] = 0xE09E;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x019F;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* FRC_set_6 */
++ gen_data[1] = 0x9084;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* Other setting */
++ gen_data[1] = 0x0526;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* RBG domain */
++ gen_data[1] = 0x1177;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* rgbw */
++ /* set_1 */
++ gen_data[1] = 0xD28F;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* set_2 */
++ gen_data[1] = 0x02D0;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* set_3 */
++ gen_data[1] = 0x08D1;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* set_4 */
++ gen_data[1] = 0x05D2;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* set_5 */
++ gen_data[1] = 0x24D4;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* set_6 */
++ gen_data[1] = 0x00D5;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x02D7;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x00D8;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ gen_data[1] = 0x48F3;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0xD4F2;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x3D8E;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x60FD;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x00B5;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x48F4;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* inside patten */
++ gen_data[1] = 0x0060;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++}
++#endif
++
++static void mrst_wait_for_INTR_PKT_SENT(struct drm_device *dev)
++{
++ int timeout = 0;
++ udelay(500);
++
++ /* This will time out after approximately 2+ seconds */
++ while ((timeout < 20000) && (!(REG_READ(INTR_STAT_REG) & SPL_PKT_SENT))) {
++ udelay(100);
++ timeout++;
++ }
++
++ if (timeout == 20000)
++ DRM_INFO("MIPI: SPL_PKT_SENT_INTERRUPT was not set correctly!\n");
++}
++
++static void mrst_wait_for_PIPEA_DISABLE(struct drm_device *dev)
++{
++ int timeout = 0;
++ udelay(500);
++
++ /* This will time out after approximately 2+ seconds */
++ while ((timeout < 20000) && (REG_READ(0x70008) & 0x40000000)) {
++ udelay(100);
++ timeout++;
++ }
++
++ if (timeout == 20000)
++ DRM_INFO("MIPI: PIPEA was not disabled!\n");
++}
++
++static void mrst_wait_for_DPI_CTRL_FIFO(struct drm_device *dev)
++{
++ int timeout = 0;
++ udelay(500);
++
++ /* This will time out after approximately 2+ seconds */
++ while ((timeout < 20000) && ((REG_READ(GEN_FIFO_STAT_REG) & DPI_FIFO_EMPTY)
++ != DPI_FIFO_EMPTY)) {
++ udelay(100);
++ timeout++;
++ }
++
++ if (timeout == 20000)
++ DRM_INFO("MIPI: DPI FIFO was never cleared!\n");
++}
++
++static void mrst_wait_for_LP_CTRL_FIFO(struct drm_device *dev)
++{
++ int timeout = 0;
++ udelay(500);
++
++ /* This will time out after approximately 2+ seconds */
++ while ((timeout < 20000) && (REG_READ(GEN_FIFO_STAT_REG) &
++ LP_CTRL_FIFO_FULL)) {
++ udelay(100);
++ timeout++;
++ }
++
++ if (timeout == 20000)
++ DRM_INFO("MIPI: LP CMD FIFO was never cleared!\n");
++}
++
++static void mrst_wait_for_HS_DATA_FIFO(struct drm_device *dev)
++{
++ int timeout = 0;
++ udelay(500);
++
++ /* This will time out after approximately 2+ seconds */
++ while ((timeout < 20000) && (REG_READ(GEN_FIFO_STAT_REG) &
++ HS_DATA_FIFO_FULL)) {
++ udelay(100);
++ timeout++;
++ }
++
++ if (timeout == 20000)
++ DRM_INFO("MIPI: HS Data FIFO was never cleared!\n");
++}
++
++static void mrst_wait_for_HS_CTRL_FIFO(struct drm_device *dev)
++{
++ int timeout = 0;
++ udelay(500);
++
++ /* This will time out after approximately 2+ seconds */
++ while ((timeout < 20000) && (REG_READ(GEN_FIFO_STAT_REG) &
++ HS_CTRL_FIFO_FULL)) {
++ udelay(100);
++ timeout++;
++ }
++ if (timeout == 20000)
++ DRM_INFO("MIPI: HS CMD FIFO was never cleared!\n");
++}
++
++/* ************************************************************************* *\
++FUNCTION: mrst_init_NSC_MIPI_bridge
++DESCRIPTION: This function is called only by mrst_dsi_mode_set and
++ restore_display_registers. since this function does not
++ acquire the mutex, it is important that the calling function
++ does!
++\* ************************************************************************* */
++void mrst_init_NSC_MIPI_bridge(struct drm_device *dev)
++{
++
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++
++ PSB_DEBUG_ENTRY("\n");
++
++ /* Program MIPI IP to 100MHz DSI, Non-Burst mode with sync event,
++ 2 Data Lanes */
++
++ mrst_wait_for_LP_CTRL_FIFO(dev);
++ /* enable RGB24*/
++ REG_WRITE(LP_GEN_CTRL_REG, 0x003205e3);
++
++ mrst_wait_for_LP_CTRL_FIFO(dev);
++ /* enable all error reporting*/
++ REG_WRITE(LP_GEN_CTRL_REG, 0x000040e3);
++ mrst_wait_for_LP_CTRL_FIFO(dev);
++ REG_WRITE(LP_GEN_CTRL_REG, 0x000041e3);
++
++ mrst_wait_for_LP_CTRL_FIFO(dev);
++ /* enable 2 data lane; video shaping & error reporting */
++ REG_WRITE(LP_GEN_CTRL_REG, 0x00a842e3); /* 0x006842e3 for 1 data lane */
++
++ mrst_wait_for_LP_CTRL_FIFO(dev);
++ /* HS timeout */
++ REG_WRITE(LP_GEN_CTRL_REG, 0x009243e3);
++
++ mrst_wait_for_LP_CTRL_FIFO(dev);
++ /* setle = 6h; low power timeout = ((2^21)-1)*4TX_esc_clks. */
++ REG_WRITE(LP_GEN_CTRL_REG, 0x00e645e3);
++
++ mrst_wait_for_LP_CTRL_FIFO(dev);
++ /* enable all virtual channels */
++ REG_WRITE(LP_GEN_CTRL_REG, 0x000f46e3);
++
++ mrst_wait_for_LP_CTRL_FIFO(dev);
++ /* set output strength to low-drive */
++ REG_WRITE(LP_GEN_CTRL_REG, 0x00007de3);
++
++ mrst_wait_for_LP_CTRL_FIFO(dev);
++ if (dev_priv->sku_83) {
++ /* set escape clock to divede by 8 */
++ REG_WRITE(LP_GEN_CTRL_REG, 0x000044e3);
++ } else if (dev_priv->sku_100L) {
++ /* set escape clock to divede by 16 */
++ REG_WRITE(LP_GEN_CTRL_REG, 0x001044e3);
++ } else if (dev_priv->sku_100) {
++ /* set escape clock to divede by 32*/
++ /*REG_WRITE(LP_GEN_CTRL_REG, 0x003044e3);*/
++ REG_WRITE(LP_GEN_CTRL_REG, 0x001044e3);
++
++ /*mrst_wait_for_LP_CTRL_FIFO(dev);*/
++ /* setle = 6h; low power timeout = ((2^21)-1)*4TX_esc_clks. */
++ /*REG_WRITE(LP_GEN_CTRL_REG, 0x00ec45e3);*/
++ }
++
++ mrst_wait_for_LP_CTRL_FIFO(dev);
++ /* CFG_VALID=1; RGB_CLK_EN=1. */
++ REG_WRITE(LP_GEN_CTRL_REG, 0x00057fe3);
++
++ /*ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);*/
++}
++
++static int mrst_check_mipi_error(struct drm_device *dev)
++{
++ u32 int_status_reg = 0;
++ u32 relevant_error_bits = 0x0fff; /* only care about error bits 0-11 */
++ u32 reported_errors = 0;
++
++ mrst_wait_for_LP_CTRL_FIFO(dev);
++ REG_WRITE(LP_GEN_CTRL_REG, 0x010524); /* 2-parameter gen short read */
++
++ /* sleep 100 microseconds */
++ udelay(100);
++
++ int_status_reg = REG_READ(INTR_STAT_REG);
++ printk(KERN_ALERT "MIPI Intr Status Reg: 0x%X\n", int_status_reg);
++
++ reported_errors = int_status_reg & relevant_error_bits;
++ if (reported_errors) {
++ printk(KERN_ALERT "MIPI Init sequence reported errs: 0x%X\n",
++ reported_errors);
++ /* Clear the error bits */
++ REG_WRITE(INTR_STAT_REG, reported_errors);
++ return reported_errors;
++ }
++
++ return 0;
++}
++
++/* ************************************************************************* *\
++ * FUNCTION: mrst_init_TPO_MIPI
++ *
++ * DESCRIPTION: This function is called only by mrst_dsi_mode_set and
++ * restore_display_registers. since this function does not
++ * acquire the mutex, it is important that the calling function
++ * does!
++\* ************************************************************************* */
++void mrst_init_TPO_MIPI(struct drm_device *dev)
++{
++ /*DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;*/
++
++ DRM_INFO("Enter mrst init TPO MIPI display.\n");
++
++ /* Flip page order */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x00008036);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(0xb070, 0x00000229);
++
++ /* 0xF0 */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x005a5af0);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(0xb070, 0x00000329);
++
++ /* Write protection key */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x005a5af1);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(0xb070, 0x00000329);
++
++ /* 0xFC */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x005a5afc);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(0xb070, 0x00000329);
++
++ /* 0xB7 */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x770000b7);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x00000044);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(0xb070, 0x00000529);
++
++ /* 0xB6 */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x000a0ab6);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(0xb070, 0x00000329);
++
++ /* 0xF2 */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x081010f2);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x4a070708);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x000000c5);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(0xb070, 0x00000929);
++
++ /* 0xF8 */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x024003f8);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x01030a04);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x0e020220);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x00000004);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(0xb070, 0x00000d29);
++
++ /* 0xE2 */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x398fc3e2);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x0000916f);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(0xb070, 0x00000629);
++
++ /* 0xB0 */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x000000b0);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(0xb070, 0x00000229);
++
++ /* 0xF4 */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x240242f4);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x78ee2002);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x2a071050);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x507fee10);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x10300710);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(0xb070, 0x00001429);
++
++ /* 0xBA */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x19fe07ba);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x101c0a31);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x00000010);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(0xb070, 0x00000929);
++
++ /* 0xBB */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x28ff07bb);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x24280a31);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x00000034);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(0xb070, 0x00000929);
++
++ /* 0xFB */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x535d05fb);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x1b1a2130);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x221e180e);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x131d2120);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x535d0508);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x1c1a2131);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x231f160d);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x111b2220);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x535c2008);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x1f1d2433);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x2c251a10);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x2c34372d);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x00000023);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(0xb070, 0x00003129);
++
++ /* 0xFA */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x525c0bfa);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x1c1c232f);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x2623190e);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x18212625);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x545d0d0e);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x1e1d2333);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x26231a10);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x1a222725);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x545d280f);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x21202635);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x31292013);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x31393d33);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x00000029);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(0xb070, 0x00003129);
++
++ /* Set DM */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x000100f7);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(0xb070, 0x00000329);
++}
++
++/* ************************************************************************* *\
++ * FUNCTION: mrst_init_LGE_MIPI
++ *
++ * DESCRIPTION: This function is called only by mrst_dsi_mode_set and
++ * restore_display_registers. since this function does not
++ * acquire the mutex, it is important that the calling function
++ * does!
++\* ************************************************************************* */
++void mrst_init_LGE_MIPI(struct drm_device *dev)
++{
++ /*DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;*/
++ int i = 0;
++
++ DRM_INFO("Enter mrst init LGE MIPI display.\n");
++
++ mrst_wait_for_LP_CTRL_FIFO(dev);
++ REG_WRITE(0xb06c, 0x00870123);
++
++ /* LGE 480x1024 Panel Initialization sequence */
++ for (i = 0; i < 10; i++) {
++ /* Panel Characteristics Settings */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0xb2200105);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x0ec820);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(HS_GEN_CTRL_REG, 0x29 | 0x7 << 8 | 0x0 << 6);
++
++ /* Panel Driver Setting */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0xb3200105);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x02);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(HS_GEN_CTRL_REG, 0x29 | 0x5 << 8 | 0x0 << 6);
++
++ /* Display Mode Control */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0xb4200105);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x00);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(HS_GEN_CTRL_REG, 0x29 | 0x5 << 8 | 0x0 << 6);
++
++ /* Display Mode and Frame Memory write Mode Setting */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0xb5200105);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x000f0f12);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x00);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(HS_GEN_CTRL_REG, 0x29 | 0x9 << 8 | 0x0 << 6);
++
++ /* Display Control (GIP Specific) */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0xb6200105);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x40021803);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x3010);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(HS_GEN_CTRL_REG, 0x29 | 0xa << 8 | 0x0 << 6);
++
++ /* Power Setting */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0xc0200105);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x1f01);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(HS_GEN_CTRL_REG, 0x29 | 0x6 << 8 | 0x0 << 6);
++
++ /* Power Setting */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0xc3200105);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x03040407);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x07);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(HS_GEN_CTRL_REG, 0x29 | 0x9 << 8 | 0x0 << 6);
++
++ /* */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0xc4200105);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x15154412);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x6d04);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(HS_GEN_CTRL_REG, 0x29 | 0xa << 8 | 0x0 << 6);
++
++ /* */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0xc5200105);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x64);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(HS_GEN_CTRL_REG, 0x29 | 0x5 << 8 | 0x0 << 6);
++
++ /* */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0xc6200105);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x004024);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(HS_GEN_CTRL_REG, 0x29 | 0x7 << 8 | 0x0 << 6);
++
++ /* red */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0xd0200105);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x06774701);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x00200000);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x02);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(HS_GEN_CTRL_REG, 0x29 | 0xd << 8 | 0x0 << 6);
++
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0xd1200105);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x06774701);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x00200000);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x02);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(HS_GEN_CTRL_REG, 0x29 | 0xd << 8 | 0x0 << 6);
++
++ /* green */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0xd2200105);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x06774701);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x00200000);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x02);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(HS_GEN_CTRL_REG, 0x29 | 0xd << 8 | 0x0 << 6);
++
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0xd3200105);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x06774701);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x00200000);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x02);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(HS_GEN_CTRL_REG, 0x29 | 0xd << 8 | 0x0 << 6);
++
++ /* blue */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0xd4200105);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x06774701);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x00200000);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x02);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(HS_GEN_CTRL_REG, 0x29 | 0xd << 8 | 0x0 << 6);
++
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0xd5200105);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x06774701);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x00200000);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x02);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(HS_GEN_CTRL_REG, 0x29 | 0xd << 8 | 0x0 << 6);
++
++ if (!mrst_check_mipi_error(dev)) {
++ i = 0;
++ break;
++ }
++ }
++
++ for (i = 0; i < 10; i++) {
++ /* Sleep Out */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x11200105);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(HS_GEN_CTRL_REG, 0x29 | 0x4 << 8 | 0x0 << 6);
++
++ if (!mrst_check_mipi_error(dev)) {
++ i = 0;
++ break;
++ }
++ }
++
++ udelay(10000);
++
++ for (i = 0; i < 10; i++) {
++ /* Display On */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x29200105);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(HS_GEN_CTRL_REG, 0x29 | 0x4 << 8 | 0x0 << 6);
++
++ if (!mrst_check_mipi_error(dev)) {
++ i = 0;
++ break;
++ }
++ }
++
++ /*ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);*/
++}
++
++/*enum mipi_panel_type {
++ NSC_800X480 = 0,
++ LGE_480X1024 = 1,
++ TPO_864X480 = 2
++};*/
++
++static void mrst_dsi_mode_set(struct drm_encoder *encoder,
++ struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode)
++{
++ struct psb_intel_mode_device *mode_dev =
++ enc_to_psb_intel_output(encoder)->mode_dev;
++ struct drm_device *dev = encoder->dev;
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++ u32 dsiFuncPrgValue = 0;
++ u32 SupportedFormat = 0;
++ u32 resolution = 0;
++ u32 mipi_control_val = 0;
++ u32 intr_en_val = 0;
++ u32 turnaround_timeout_val = 0;
++ u32 device_reset_val = 0;
++ u32 init_count_val = 0;
++ u32 hs_tx_timeout_val = 0;
++ u32 lp_rx_timeout_val = 0;
++ u32 high_low_switch_count_val = 0;
++ u32 eot_disable_val = 0;
++ u32 lp_byteclk_val = 0;
++ u32 device_ready_val = 0;
++ /*u32 dpi_control_val = 0;*/
++ u32 vsa_count = 0;
++ u32 vbp_count = 0;
++ u32 vfp_count = 0;
++ u32 hsa_count = 0;
++ u32 hbp_count = 0;
++ u32 hfp_count = 0;
++ u32 haa_count = 0;
++ u32 video_mode_format = 0;
++ u32 max_ret_packet_size = 0;
++ uint64_t curValue = DRM_MODE_SCALE_FULLSCREEN;
++ u32 mipi_port;
++
++ PSB_DEBUG_ENTRY("\n");
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON))
++ return;
++
++ switch (dev_priv->bpp) {
++ case 16:
++ SupportedFormat = RGB_565_FMT;
++ break;
++ case 18:
++ SupportedFormat = RGB_666_FMT;
++ break;
++ case 24:
++ SupportedFormat = RGB_888_FMT;
++ break;
++ default:
++ DRM_INFO("mrst_dsi_mode_set, invalid bpp \n");
++ break;
++ }
++
++
++ if (dev_priv->dpi) {
++ drm_connector_property_get_value(
++ &enc_to_psb_intel_output(encoder)->base,
++ dev->mode_config.scaling_mode_property,
++ &curValue);
++
++ if (curValue == DRM_MODE_SCALE_NO_SCALE)
++ REG_WRITE(PFIT_CONTROL, 0);
++ else if (curValue == DRM_MODE_SCALE_ASPECT) {
++ if ((mode->vdisplay != adjusted_mode->crtc_vdisplay) ||
++ (mode->hdisplay != adjusted_mode->crtc_hdisplay)) {
++ if ((adjusted_mode->crtc_hdisplay *
++ mode->vdisplay) == (mode->hdisplay *
++ adjusted_mode->crtc_vdisplay))
++ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
++ else if ((adjusted_mode->crtc_hdisplay *
++ mode->vdisplay) > (mode->hdisplay *
++ adjusted_mode->crtc_vdisplay))
++ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE |
++ PFIT_SCALING_MODE_PILLARBOX);
++ else
++ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE |
++ PFIT_SCALING_MODE_LETTERBOX);
++ } else
++ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
++ } else /*(curValue == DRM_MODE_SCALE_FULLSCREEN)*/
++ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
++
++ switch (dev_priv->panel_make) {
++ case NSC_800X480:
++ intr_en_val = 0xffffffff;
++ turnaround_timeout_val = 0x00000001;
++ device_reset_val = 0x000000ff;
++ init_count_val = 0x00000fff;
++ resolution = dev_priv->HactiveArea |
++ (dev_priv->VactiveArea << RES_V_POS);
++ SupportedFormat <<= FMT_DPI_POS;
++ dsiFuncPrgValue = dev_priv->laneCount | SupportedFormat;
++ vsa_count = GetVSA_Count(dev, dev_priv);
++ vbp_count = GetVBP_Count(dev, dev_priv);
++ vfp_count = GetVFP_Count(dev, dev_priv);
++ hsa_count = GetHSA_Count(dev, dev_priv);
++ hbp_count = GetHBP_Count(dev, dev_priv);
++ hfp_count = GetHFP_Count(dev, dev_priv);
++ haa_count = GetHAdr_Count(dev, dev_priv);
++ video_mode_format = dev_priv->videoModeFormat;
++ hs_tx_timeout_val = 0x00001000;
++ lp_rx_timeout_val = 0x0000ffff;
++ high_low_switch_count_val = 0x46;
++ eot_disable_val = 0x00000000;
++ lp_byteclk_val = 0x00000004;
++ device_ready_val = 0x00000001;
++ max_ret_packet_size = 0x40;
++ break;
++ case TPO_864X480:
++ intr_en_val = 0xffffffff;
++ turnaround_timeout_val = 0x0000000a;
++ device_reset_val = 0x000000ff;
++ init_count_val = 0x00000fff;
++ resolution = 0x01e00360;
++ dsiFuncPrgValue = 0x00000202;
++ vsa_count = 0x00000004;
++ vbp_count = 0x00000008;
++ vfp_count = 0x00000008;
++ hsa_count = 0x00000006;
++ hbp_count = 0x0000000f;
++ hfp_count = 0x0000000f;
++ haa_count = 0x00000510;
++ video_mode_format = 0x00000003;
++ hs_tx_timeout_val = 0x00090000;
++ lp_rx_timeout_val = 0x0000ffff;
++ high_low_switch_count_val = 0x00000046;
++ eot_disable_val = 0x00000000;
++ lp_byteclk_val = 0x00000004;
++ device_ready_val = 0x00000001;
++ max_ret_packet_size = 0x40;
++ break;
++ case LGE_480X1024:
++ intr_en_val = 0xffffffff;
++ turnaround_timeout_val = 0x00000012;
++ device_reset_val = 0x000000ff;
++ init_count_val = 0x00000fff;
++ resolution = 0x040001e0;
++ dsiFuncPrgValue = 0x00000202;
++ vsa_count = 0x00000005;
++ vbp_count = 0x0000000f;
++ vfp_count = 0x0000000f;
++ hsa_count = 0x00000008;
++ hbp_count = 0x00000018;
++ hfp_count = 0x0000000f;
++ haa_count = 0x00000320;
++ video_mode_format = 0x00000003;
++ hs_tx_timeout_val = 0x00ffffff;
++ lp_rx_timeout_val = 0x0000ffff;
++ high_low_switch_count_val = 0x00000016;
++ eot_disable_val = 0x00000000;
++ lp_byteclk_val = 0x00000004;
++ device_ready_val = 0x00000001;
++ max_ret_packet_size = 0x40;
++ break;
++ }
++
++ /* set 100 mhz dsi clk based on sku */
++ if (dev_priv->sku_83)
++ mipi_control_val = 0x0018; /* 100 mhz * 1 = 100 mhz */
++ else if (dev_priv->sku_100L)
++ mipi_control_val = 0x0019; /* 50 mhz * 2 = 100 mhz */
++ else if (dev_priv->sku_100)
++ mipi_control_val = 0x0018; /* 100 mhz * 1 = 100 mhz */
++
++ /* wait for PIPE A to disable */
++ mrst_wait_for_PIPEA_DISABLE(dev);
++
++ /* wait for DPI FIFO to clear */
++ mrst_wait_for_DPI_CTRL_FIFO(dev);
++
++ /* should not clear intr status register here, need clear up later */
++#if 0
++ if (REG_READ(INTR_STAT_REG))
++ {
++ REG_WRITE(INTR_STAT_REG, REG_READ(INTR_STAT_REG));
++ }
++#endif
++
++ /* Clear Device Ready Bit */
++ REG_WRITE(DEVICE_READY_REG, 0x00000000);
++
++ /* Enable MIPI Port */
++ mipi_port = MIPI_PORT_EN | MIPI_BORDER_EN;
++
++ /* Enable dithering if required */
++ if (mode_dev->panel_wants_dither)
++ mipi_port |= MRST_PANEL_8TO6_DITHER_ENABLE;
++
++ REG_WRITE(MIPI, mipi_port);
++
++ /* set the lane speed */
++ REG_WRITE(MIPI_CONTROL_REG, mipi_control_val);
++
++ /* Enable all the error interrupt */
++ REG_WRITE(INTR_EN_REG, intr_en_val);
++ REG_WRITE(TURN_AROUND_TIMEOUT_REG, turnaround_timeout_val);
++ REG_WRITE(DEVICE_RESET_REG, device_reset_val);
++ REG_WRITE(INIT_COUNT_REG, init_count_val);
++
++ REG_WRITE(DSI_FUNC_PRG_REG, dsiFuncPrgValue);
++
++ REG_WRITE(DPI_RESOLUTION_REG, resolution);
++ /*REG_WRITE(DBI_RESOLUTION_REG, 0x00000000);*/
++
++ REG_WRITE(VERT_SYNC_PAD_COUNT_REG, vsa_count);
++ REG_WRITE(VERT_BACK_PORCH_COUNT_REG, vbp_count);
++ REG_WRITE(VERT_FRONT_PORCH_COUNT_REG, vfp_count);
++
++ REG_WRITE(HORIZ_SYNC_PAD_COUNT_REG, hsa_count);
++ REG_WRITE(HORIZ_BACK_PORCH_COUNT_REG, hbp_count);
++ REG_WRITE(HORIZ_FRONT_PORCH_COUNT_REG, hfp_count);
++ REG_WRITE(HORIZ_ACTIVE_AREA_COUNT_REG, haa_count);
++
++ REG_WRITE(VIDEO_FMT_REG, video_mode_format);
++
++ REG_WRITE(HS_TX_TIMEOUT_REG, hs_tx_timeout_val);
++ REG_WRITE(LP_RX_TIMEOUT_REG, lp_rx_timeout_val);
++
++ REG_WRITE(HIGH_LOW_SWITCH_COUNT_REG,
++ high_low_switch_count_val);
++
++ REG_WRITE(EOT_DISABLE_REG, eot_disable_val);
++
++ REG_WRITE(LP_BYTECLK_REG, lp_byteclk_val);
++ REG_WRITE(MAX_RET_PAK_REG, max_ret_packet_size);
++
++ REG_WRITE(DEVICE_READY_REG, device_ready_val);
++ REG_WRITE(DPI_CONTROL_REG, DPI_TURN_ON);
++ }
++
++#if 0
++/* JLIU7_TEMP */
++{
++int jliu7_temp = 0;
++DRM_INFO("JLIU7_TEMP 05, f014 = 0x%x \n", REG_READ(MRST_DPLL_A));
++DRM_INFO("JLIU7_TEMP 05, 70008 = 0x%x \n", REG_READ(PIPEACONF));
++DRM_INFO("JLIU7_TEMP 05, b000 = 0x%x \n", REG_READ(DEVICE_READY_REG));
++DRM_INFO("JLIU7_TEMP 05, b004 = 0x%x \n", jliu7_temp = REG_READ(INTR_STAT_REG));
++DRM_INFO("JLIU7_TEMP 05, b048 = 0x%x \n", REG_READ(DPI_CONTROL_REG));
++DRM_INFO("JLIU7_TEMP 05, b074 = 0x%x \n", REG_READ(GEN_FIFO_STAT_REG));
++DRM_INFO("JLIU7_TEMP 05, IER = 0x%x \n", REG_READ(IER));
++DRM_INFO("JLIU7_TEMP 05, IIR = 0x%x \n", REG_READ(IIR));
++DRM_INFO("JLIU7_TEMP 05, IMR = 0x%x \n", REG_READ(IMR));
++DRM_INFO("JLIU7_TEMP 05, ISR = 0x%x \n", REG_READ(ISR));
++if (jliu7_temp)
++{
++// DRM_INFO("JLIU7_TEMP 01a, \n");
++// REG_WRITE(INTR_STAT_REG, jliu7_temp);
++}
++while (!(REG_READ(INTR_STAT_REG) & SPL_PKT_SENT))
++{
++ DRM_INFO("JLIU7_TEMP 01a, \n");
++}
++ REG_WRITE(INTR_STAT_REG, REG_READ(INTR_STAT_REG));
++DRM_INFO("JLIU7_TEMP 05, b004 = 0x%x \n", REG_READ(INTR_STAT_REG));
++}
++#endif
++ if ((REG_READ(INTR_STAT_REG) & SPL_PKT_SENT)) {
++ REG_WRITE(INTR_STAT_REG, SPL_PKT_SENT);
++ }
++ mrst_wait_for_INTR_PKT_SENT(dev);
++
++ if ((dev_priv->panel_make == NSC_800X480) || (dev_priv->panel_make == LGE_480X1024))
++ dev_priv->init_drvIC(dev); /* initialize the mipi panel */
++
++ /* set the dphy settings for 100 mhz */
++ REG_WRITE(0xb080, 0x0b061c04);
++
++ REG_WRITE(PIPEACONF, dev_priv->pipeconf);
++ /* REG_READ(PIPEACONF); */
++
++ /* Wait for 20ms for the pipe enable to take effect. */
++ /*udelay(20000);*/
++
++ REG_WRITE(DSPACNTR, dev_priv->dspcntr);
++
++ /* Wait for 20ms for the plane enable to take effect. */
++ /*udelay(20000);*/
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++}
++
++/**
++ * Detect the MIPI connection.
++ *
++ * This always returns CONNECTOR_STATUS_CONNECTED.
++ * This connector should only have
++ * been set up if the MIPI was actually connected anyway.
++ */
++static enum drm_connector_status mrst_dsi_detect(struct drm_connector
++ *connector)
++{
++ PSB_DEBUG_ENTRY("\n");
++
++ return connector_status_connected;
++}
++
++/**
++ * Return the list of MIPI DDB modes if available.
++ */
++static int mrst_dsi_get_modes(struct drm_connector *connector)
++{
++ struct drm_device *dev = connector->dev;
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++ struct psb_intel_mode_device *mode_dev = psb_intel_output->mode_dev;
++ struct drm_display_mode *panel_fixed_mode = mode_dev->panel_fixed_mode;
++
++ PSB_DEBUG_ENTRY("\n");
++
++ if (psb_intel_output->type == INTEL_OUTPUT_MIPI2)
++ panel_fixed_mode = mode_dev->panel_fixed_mode2;
++
++#if MDFLD_HDMI_JLIU7
++ if (psb_intel_output->type == INTEL_OUTPUT_HDMI)
++ DRM_INFO("Enter mrst_dsi_get_modes, wrong output type. \n");
++#endif /* MDFLD_JLIU7 */
++
++/* FIXME get the MIPI DDB modes */
++
++ /* Didn't get an DDB, so
++ * Set wide sync ranges so we get all modes
++ * handed to valid_mode for checking
++ */
++ connector->display_info.min_vfreq = 0;
++ connector->display_info.max_vfreq = 200;
++ connector->display_info.min_hfreq = 0;
++ connector->display_info.max_hfreq = 200;
++
++ if (panel_fixed_mode != NULL) {
++ struct drm_display_mode *mode =
++ drm_mode_duplicate(dev, panel_fixed_mode);
++ drm_mode_probed_add(connector, mode);
++ return 1;
++ }
++
++ return 0;
++}
++
++static const struct drm_encoder_helper_funcs mrst_dsi_helper_funcs = {
++ .dpms = mrst_dsi_dpms,
++ .mode_fixup = psb_intel_lvds_mode_fixup,
++ .prepare = mrst_dsi_prepare,
++ .mode_set = mrst_dsi_mode_set,
++ .commit = mrst_dsi_commit,
++};
++
++static const struct drm_connector_helper_funcs
++ mrst_dsi_connector_helper_funcs = {
++ .get_modes = mrst_dsi_get_modes,
++ .mode_valid = psb_intel_lvds_mode_valid,
++ .best_encoder = psb_intel_best_encoder,
++};
++
++static const struct drm_connector_funcs mrst_dsi_connector_funcs = {
++ .dpms = drm_helper_connector_dpms,
++ .save = mrst_dsi_save,
++ .restore = mrst_dsi_restore,
++ .detect = mrst_dsi_detect,
++ .fill_modes = drm_helper_probe_single_connector_modes,
++ .set_property = psb_intel_lvds_set_property,
++ .destroy = psb_intel_lvds_destroy,
++};
++
++/** Returns the panel fixed mode from configuration. */
++/** FIXME JLIU7 need to revist it. */
++struct drm_display_mode *mrst_dsi_get_configuration_mode(struct drm_device *dev)
++{
++ struct drm_display_mode *mode;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ u8 panel_index = dev_priv->gct_data.bpi;
++ u8 panel_type = dev_priv->gct_data.pt;
++ struct mrst_timing_info *ti = &dev_priv->gct_data.DTD;
++ bool use_gct = false;
++
++ PSB_DEBUG_ENTRY("\n");
++
++ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
++ if (!mode)
++ return NULL;
++
++ if (dev_priv->vbt_data.Size != 0x00) /*if non-zero, vbt is present*/
++ if ((1<<panel_index) & panel_type) /* if non-zero,*/
++ use_gct = true; /*then mipi panel.*/
++
++ if (use_gct) {
++ PSB_DEBUG_ENTRY("gct find MIPI panel. \n");
++
++ mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo;
++ mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo;
++ mode->hsync_start = mode->hdisplay + \
++ ((ti->hsync_offset_hi << 8) | \
++ ti->hsync_offset_lo);
++ mode->hsync_end = mode->hsync_start + \
++ ((ti->hsync_pulse_width_hi << 8) | \
++ ti->hsync_pulse_width_lo);
++ mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) | \
++ ti->hblank_lo);
++ mode->vsync_start = \
++ mode->vdisplay + ((ti->vsync_offset_hi << 4) | \
++ ti->vsync_offset_lo);
++ mode->vsync_end = \
++ mode->vsync_start + ((ti->vsync_pulse_width_hi << 4) | \
++ ti->vsync_pulse_width_lo);
++ mode->vtotal = mode->vdisplay + \
++ ((ti->vblank_hi << 8) | ti->vblank_lo);
++ mode->clock = ti->pixel_clock * 10;
++
++ PSB_DEBUG_ENTRY("hdisplay is %d\n", mode->hdisplay);
++ PSB_DEBUG_ENTRY("vdisplay is %d\n", mode->vdisplay);
++ PSB_DEBUG_ENTRY("HSS is %d\n", mode->hsync_start);
++ PSB_DEBUG_ENTRY("HSE is %d\n", mode->hsync_end);
++ PSB_DEBUG_ENTRY("htotal is %d\n", mode->htotal);
++ PSB_DEBUG_ENTRY("VSS is %d\n", mode->vsync_start);
++ PSB_DEBUG_ENTRY("VSE is %d\n", mode->vsync_end);
++ PSB_DEBUG_ENTRY("vtotal is %d\n", mode->vtotal);
++ PSB_DEBUG_ENTRY("clock is %d\n", mode->clock);
++ } else {
++
++#if 0 /* LGE 480x1024 tentative timings */
++ mode->hdisplay = 480;
++ mode->vdisplay = 1024;
++ mode->hsync_start = 499;
++ mode->hsync_end = 506;
++ mode->htotal = 517;
++ mode->vsync_start = 1039;
++ mode->vsync_end = 1041;
++ mode->vtotal = 1047;
++ mode->clock = 33264;
++#endif
++#if 1 /*FIXME jliu7 remove it later */
++ /* copy from SV - hard coded fixed mode for
++ * DSI TPO 3.8" panel */
++ mode->hdisplay = 864;
++ mode->vdisplay = 480;
++ mode->hsync_start = 873;
++ mode->hsync_end = 876;
++ mode->htotal = 887;
++ mode->vsync_start = 487;
++ mode->vsync_end = 490;
++ mode->vtotal = 499;
++ mode->clock = 33264;
++#endif /*FIXME jliu7 remove it later */
++
++#if 0 /*FIXME jliu7 remove it later */
++ /* hard coded fixed mode for DSI TPO TD043MTEA2 LCD panel */
++ mode->hdisplay = 800;
++ mode->vdisplay = 480;
++ mode->hsync_start = 836;
++ mode->hsync_end = 846;
++ mode->htotal = 1056;
++ mode->vsync_start = 489;
++ mode->vsync_end = 491;
++ mode->vtotal = 525;
++ mode->clock = 33264;
++#endif /*FIXME jliu7 remove it later */
++
++#if 0 /*FIXME jliu7 remove it later */
++ /* hard coded fixed mode for LVDS 800x480 */
++ mode->hdisplay = 800;
++ mode->vdisplay = 480;
++ mode->hsync_start = 801;
++ mode->hsync_end = 802;
++ mode->htotal = 1024;
++ mode->vsync_start = 481;
++ mode->vsync_end = 482;
++ mode->vtotal = 525;
++ mode->clock = 30994;
++#endif /*FIXME jliu7 remove it later */
++
++#if 0 /*FIXME jliu7 remove it later, jliu7 modify it according to the spec*/
++ /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
++ mode->hdisplay = 1024;
++ mode->vdisplay = 600;
++ mode->hsync_start = 1072;
++ mode->hsync_end = 1104;
++ mode->htotal = 1184;
++ mode->vsync_start = 603;
++ mode->vsync_end = 604;
++ mode->vtotal = 608;
++ mode->clock = 53990;
++#endif /*FIXME jliu7 remove it later */
++
++#if 0 /*FIXME jliu7 remove it, it is copied from SBIOS */
++ /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
++ mode->hdisplay = 1024;
++ mode->vdisplay = 600;
++ mode->hsync_start = 1104;
++ mode->hsync_end = 1136;
++ mode->htotal = 1184;
++ mode->vsync_start = 603;
++ mode->vsync_end = 604;
++ mode->vtotal = 608;
++ mode->clock = 53990;
++#endif /*FIXME jliu7 remove it later */
++
++#if 0 /*FIXME jliu7 remove it later */
++ /* hard coded fixed mode for Sharp wsvga LVDS 1024x600 */
++ mode->hdisplay = 1024;
++ mode->vdisplay = 600;
++ mode->hsync_start = 1124;
++ mode->hsync_end = 1204;
++ mode->htotal = 1312;
++ mode->vsync_start = 607;
++ mode->vsync_end = 610;
++ mode->vtotal = 621;
++ mode->clock = 48885;
++#endif /*FIXME jliu7 remove it later */
++
++#if 0 /*FIXME jliu7 remove it later */
++ /* hard coded fixed mode for LVDS 1024x768 */
++ mode->hdisplay = 1024;
++ mode->vdisplay = 768;
++ mode->hsync_start = 1048;
++ mode->hsync_end = 1184;
++ mode->htotal = 1344;
++ mode->vsync_start = 771;
++ mode->vsync_end = 777;
++ mode->vtotal = 806;
++ mode->clock = 65000;
++#endif /*FIXME jliu7 remove it later */
++
++#if 0 /*FIXME jliu7 remove it later */
++ /* hard coded fixed mode for LVDS 1366x768 */
++ mode->hdisplay = 1366;
++ mode->vdisplay = 768;
++ mode->hsync_start = 1430;
++ mode->hsync_end = 1558;
++ mode->htotal = 1664;
++ mode->vsync_start = 769;
++ mode->vsync_end = 770;
++ mode->vtotal = 776;
++ mode->clock = 77500;
++#endif /*FIXME jliu7 remove it later */
++ }
++
++ dev_priv->pixelClock = mode->clock; /*KHz*/
++ dev_priv->HsyncWidth = mode->hsync_end - mode->hsync_start;
++ dev_priv->HbackPorch = mode->htotal - mode->hsync_end;
++ dev_priv->HfrontPorch = mode->hsync_start - mode->hdisplay;
++ dev_priv->HactiveArea = mode->hdisplay;
++ dev_priv->VsyncWidth = mode->vsync_end - mode->vsync_start;
++ dev_priv->VbackPorch = mode->vtotal - mode->vsync_end;
++ dev_priv->VfrontPorch = mode->vsync_start - mode->vdisplay;
++ dev_priv->VactiveArea = mode->vdisplay;
++
++ PSB_DEBUG_ENTRY("pixelClock is %d\n", dev_priv->pixelClock);
++ PSB_DEBUG_ENTRY("HsyncWidth is %d\n", dev_priv->HsyncWidth);
++ PSB_DEBUG_ENTRY("HbackPorch is %d\n", dev_priv->HbackPorch);
++ PSB_DEBUG_ENTRY("HfrontPorch is %d\n", dev_priv->HfrontPorch);
++ PSB_DEBUG_ENTRY("HactiveArea is %d\n", dev_priv->HactiveArea);
++ PSB_DEBUG_ENTRY("VsyncWidth is %d\n", dev_priv->VsyncWidth);
++ PSB_DEBUG_ENTRY("VbackPorch is %d\n", dev_priv->VbackPorch);
++ PSB_DEBUG_ENTRY("VfrontPorch is %d\n", dev_priv->VfrontPorch);
++ PSB_DEBUG_ENTRY("VactiveArea is %d\n", dev_priv->VactiveArea);
++
++ /* FIXME jliu7 we only support DPI */
++ dev_priv->dpi = true;
++
++ /* FIXME hard coded 4 lanes for Himax HX8858-A, 2 lanes for NSC LM2550 */
++ dev_priv->laneCount = 2;
++ dev_priv->bpp = 24;
++
++ /* FIXME hard coded for NSC PO. */
++ /* We only support BUST_MODE */
++ dev_priv->videoModeFormat = NON_BURST_MODE_SYNC_EVENTS; /* BURST_MODE */
++
++ drm_mode_set_name(mode);
++ drm_mode_set_crtcinfo(mode, 0);
++
++ return mode;
++}
++
++/* ************************************************************************* *\
++FUNCTION: mrstDSI_clockInit
++DESCRIPTION:
++
++\* ************************************************************************* */
++static u32 sku_83_mipi_2xclk[4] = {166667, 333333, 444444, 666667};
++static u32 sku_100_mipi_2xclk[4] = {200000, 400000, 533333, 800000};
++static u32 sku_100L_mipi_2xclk[4] = {100000, 200000, 266667, 400000};
++#define MIPI_2XCLK_COUNT 0x04
++
++static bool mrstDSI_clockInit(DRM_DRIVER_PRIVATE_T *dev_priv)
++{
++ u32 Htotal = 0, Vtotal = 0, RRate = 0, mipi_2xclk = 0;
++ u32 i = 0;
++ u32 *p_mipi_2xclk = NULL;
++
++ Htotal = dev_priv->HsyncWidth + dev_priv->HbackPorch + dev_priv->HfrontPorch + dev_priv->HactiveArea;
++ Vtotal = dev_priv->VsyncWidth + dev_priv->VbackPorch + dev_priv->VfrontPorch + dev_priv->VactiveArea;
++
++ RRate = ((dev_priv->pixelClock * 1000) / (Htotal * Vtotal)) + 1;
++
++ dev_priv->RRate = RRate;
++
++ /* ddr clock frequence = (pixel clock frequence * bits per pixel)/2*/
++ mipi_2xclk = (dev_priv->pixelClock * dev_priv->bpp) /
++ dev_priv->laneCount; /* KHz */
++ dev_priv->DDR_Clock_Calculated = mipi_2xclk / 2; /* KHz */
++
++ PSB_DEBUG_ENTRY("mrstDSI_clockInit RRate = %d, mipi_2xclk = %d. \n", RRate, mipi_2xclk);
++
++ if (dev_priv->sku_100)
++ p_mipi_2xclk = sku_100_mipi_2xclk;
++ else if (dev_priv->sku_100L)
++ p_mipi_2xclk = sku_100L_mipi_2xclk;
++ else
++ p_mipi_2xclk = sku_83_mipi_2xclk;
++
++ for (; i < MIPI_2XCLK_COUNT; i++) {
++ if ((dev_priv->DDR_Clock_Calculated * 2) < p_mipi_2xclk[i])
++ break;
++ }
++
++ if (i == MIPI_2XCLK_COUNT)
++ {
++ PSB_DEBUG_ENTRY("mrstDSI_clockInit the DDR clock is too big, DDR_Clock_Calculated is = %d\n", dev_priv->DDR_Clock_Calculated);
++ return false;
++ }
++
++ dev_priv->DDR_Clock = p_mipi_2xclk[i] / 2;
++ dev_priv->ClockBits = i;
++
++ PSB_DEBUG_ENTRY("mrstDSI_clockInit, mipi_2x_clock_divider = 0x%x, DDR_Clock_Calculated is = %d\n", i, dev_priv->DDR_Clock_Calculated);
++
++ return true;
++}
++
++void aava_koski_dsi_init(struct drm_device *dev, \
++ struct psb_intel_mode_device *mode_dev);
++
++/**
++ * mrst_dsi_init - setup MIPI connectors on this device
++ * @dev: drm device
++ *
++ * Create the connector, try to figure out what
++ * modes we can display on the MIPI panel (if present).
++ */
++void mrst_dsi_init(struct drm_device *dev,
++ struct psb_intel_mode_device *mode_dev)
++{
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++ struct psb_intel_output *psb_intel_output;
++ struct drm_connector *connector;
++ struct drm_encoder *encoder;
++
++ if (mrst_platform_id() == MRST_PLATFORM_AAVA_SC) {
++ aava_koski_dsi_init(dev, mode_dev);
++ return;
++ }
++
++ psb_intel_output = kzalloc(sizeof(struct psb_intel_output), GFP_KERNEL);
++ if (!psb_intel_output)
++ return;
++
++ psb_intel_output->mode_dev = mode_dev;
++ connector = &psb_intel_output->base;
++ encoder = &psb_intel_output->enc;
++ drm_connector_init(dev, &psb_intel_output->base,
++ &mrst_dsi_connector_funcs,
++ DRM_MODE_CONNECTOR_MIPI);
++
++ drm_encoder_init(dev, &psb_intel_output->enc, &psb_intel_lvds_enc_funcs,
++ DRM_MODE_ENCODER_MIPI);
++
++ drm_mode_connector_attach_encoder(&psb_intel_output->base,
++ &psb_intel_output->enc);
++ psb_intel_output->type = INTEL_OUTPUT_MIPI;
++
++ drm_encoder_helper_add(encoder, &mrst_dsi_helper_funcs);
++ drm_connector_helper_add(connector,
++ &mrst_dsi_connector_helper_funcs);
++ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
++ connector->interlace_allowed = false;
++ connector->doublescan_allowed = false;
++
++ drm_connector_attach_property(connector,
++ dev->mode_config.scaling_mode_property,
++ DRM_MODE_SCALE_FULLSCREEN);
++ drm_connector_attach_property(connector,
++ dev_priv->backlight_property,
++ BRIGHTNESS_MAX_LEVEL);
++
++ dsi_backlight = BRIGHTNESS_MAX_LEVEL;
++ blc_pol = BLC_POLARITY_NORMAL;
++ blc_freq = 0xc8;
++
++ mode_dev->panel_wants_dither = false;
++ if (dev_priv->vbt_data.Size != 0x00) {
++ mode_dev->panel_wants_dither = (dev_priv->gct_data.Panel_MIPI_Display_Descriptor & (BIT3 | BIT4));
++ switch (dev_priv->gct_data.bpi) { /* set panel make */
++ case 1:
++ dev_priv->panel_make = NSC_800X480;
++ break;
++ case 2:
++ dev_priv->panel_make = TPO_864X480;
++ break;
++ case 3:
++ dev_priv->panel_make = LGE_480X1024;
++ break;
++ default:
++ DRM_INFO("MIPI: unknown panel type! Setting NSC.\n");
++ dev_priv->panel_make = NSC_800X480; /* assume NSC */
++ }
++ } else {
++ DRM_INFO("MIPI: No GCT! Setting NSC.\n");
++ dev_priv->panel_make = NSC_800X480;
++ }
++
++ /* set panel initialize function */
++ switch (dev_priv->panel_make) {
++ case NSC_800X480:
++ dev_priv->init_drvIC = mrst_init_NSC_MIPI_bridge;
++ break;
++ case TPO_864X480:
++ dev_priv->init_drvIC = mrst_init_TPO_MIPI;
++ break;
++ case LGE_480X1024:
++ dev_priv->init_drvIC = mrst_init_LGE_MIPI;
++ break;
++ }
++
++ /*
++ * MIPI discovery:
++ * 1) check for DDB data
++ * 2) check for VBT data
++ * 4) make sure lid is open
++ * if closed, act like it's not there for now
++ */
++
++ /* FIXME change it to true if GET_DDB works */
++ dev_priv->config_phase = false;
++
++ /*
++ * If we didn't get DDB data, try geting panel timing
++ * from configuration data
++ */
++ mode_dev->panel_fixed_mode = mrst_dsi_get_configuration_mode(dev);
++
++ if (mode_dev->panel_fixed_mode) {
++ mode_dev->panel_fixed_mode->type |=
++ DRM_MODE_TYPE_PREFERRED;
++ } else {
++ /* If we still don't have a mode after all that, give up. */
++ DRM_ERROR
++ ("Found no modes on the mipi, ignoring the MIPI.\n");
++ goto failed_find;
++ }
++
++ if (!mrstDSI_clockInit(dev_priv))
++ {
++ DRM_ERROR("Can't iniitialize MRST DSI clock.\n");
++#if 0 /* FIXME JLIU7 */
++ goto failed_find;
++#endif /* FIXME JLIU7 */
++ }
++
++ drm_sysfs_connector_add(connector);
++ return;
++
++failed_find:
++ DRM_DEBUG("No MIIP modes found, disabling.\n");
++ drm_encoder_cleanup(encoder);
++ drm_connector_cleanup(connector);
++ kfree(connector);
++}
++
++#include "psb_intel_dsi2.c"
++
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_intel_dsi2.c
+@@ -0,0 +1,3583 @@
++/*
++ * Copyright © 2006-2007 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ * jim liu <jim.liu@intel.com>
++ */
++
++#if MDFLD_JLIU7_LABC
++/**
++ * write hysteresis values.
++ */
++static void mdfld_dbi_write_hysteresis (struct drm_device *dev, int pipe)
++{
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++ u32 hs_gen_data_reg = HS_GEN_DATA_REG;
++ u32 hs_gen_ctrl_reg = HS_GEN_CTRL_REG;
++ u32 *p_gen_data_val = 0;
++ u32 gen_ctrl_val = 0;
++ u32 dcsChannelNumber = dev_priv->channelNumber;
++ u32 i = 0;
++ u8 hysteresis[68] = {write_hysteresis, 0x0f, 0x00, 0x42, 0x00,
++ 0x64, 0x00, 0x8c, 0x00,
++ 0xbf, 0x00, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff,
++ 0x0a, 0x00, 0x38, 0x00,
++ 0x50, 0x00, 0x82, 0x00,
++ 0xab, 0x00, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff,
++ };
++
++ if (pipe == 2)
++ {
++ dcsChannelNumber = dev_priv->channelNumber2;
++ hs_gen_data_reg = HS_GEN_DATA_REG + MIPIC_REG_OFFSET;
++ hs_gen_ctrl_reg = HS_GEN_CTRL_REG + MIPIC_REG_OFFSET;
++ }
++
++ p_gen_data_val = (u32 *)hysteresis;
++
++ for (i = 0; i < (68 / 4); i++) {
++ REG_WRITE(hs_gen_data_reg, *(p_gen_data_val + i));
++ }
++
++ gen_ctrl_val = 65 << WORD_COUNTS_POS;
++ gen_ctrl_val |= dcsChannelNumber << DCS_CHANNEL_NUMBER_POS;
++ gen_ctrl_val |= MCS_LONG_WRITE;
++ REG_WRITE(hs_gen_ctrl_reg, gen_ctrl_val);
++}
++
++/**
++ * write display profile values.
++ */
++static void mdfld_dbi_write_display_profile (struct drm_device *dev, int pipe)
++{
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++ u32 hs_gen_data_reg = HS_GEN_DATA_REG;
++ u32 hs_gen_ctrl_reg = HS_GEN_CTRL_REG;
++ u32 *p_gen_data_val = 0;
++ u32 gen_ctrl_val = 0;
++ u32 dcsChannelNumber = dev_priv->channelNumber;
++ u32 i = 0;
++ u8 profile[20] = {write_display_profile, 0x14, 0x28, 0x50,
++ 0x82, 0xc8, 0x00, 0x00,
++ 0x00, 0x00, 0x00, 0x00,
++ 0x00, 0x00, 0x00, 0x00,
++ 0x00, 0x00, 0x00, 0x00,
++ };
++
++ if (pipe == 2)
++ {
++ dcsChannelNumber = dev_priv->channelNumber2;
++ hs_gen_data_reg = HS_GEN_DATA_REG + MIPIC_REG_OFFSET;
++ hs_gen_ctrl_reg = HS_GEN_CTRL_REG + MIPIC_REG_OFFSET;
++ }
++
++ p_gen_data_val = (u32 *)profile;
++
++ for (i = 0; i < (20 / 4); i++) {
++ REG_WRITE(hs_gen_data_reg, *(p_gen_data_val + i));
++ }
++
++ gen_ctrl_val = 17 << WORD_COUNTS_POS;
++ gen_ctrl_val |= dcsChannelNumber << DCS_CHANNEL_NUMBER_POS;
++ gen_ctrl_val |= MCS_LONG_WRITE;
++ REG_WRITE(hs_gen_ctrl_reg, gen_ctrl_val);
++}
++
++/**
++ * write KBBC profile values.
++ */
++static void mdfld_dbi_write_kbbc_profile (struct drm_device *dev, int pipe)
++{
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++ u32 hs_gen_data_reg = HS_GEN_DATA_REG;
++ u32 hs_gen_ctrl_reg = HS_GEN_CTRL_REG;
++ u32 *p_gen_data_val = 0;
++ u32 gen_ctrl_val = 0;
++ u32 dcsChannelNumber = dev_priv->channelNumber;
++ u32 i = 0;
++ u8 profile[20] = {write_kbbc_profile, 0xcc, 0xff, 0x00,
++ 0x00, 0x00, 0x00, 0x00,
++ 0x00, 0x00, 0x00, 0x00,
++ 0x00, 0x00, 0x00, 0x00,
++ };
++
++ if (pipe == 2)
++ {
++ dcsChannelNumber = dev_priv->channelNumber2;
++ hs_gen_data_reg = HS_GEN_DATA_REG + MIPIC_REG_OFFSET;
++ hs_gen_ctrl_reg = HS_GEN_CTRL_REG + MIPIC_REG_OFFSET;
++ }
++
++ p_gen_data_val = (u32 *)profile;
++
++ for (i = 0; i < (20 / 4); i++) {
++ REG_WRITE(hs_gen_data_reg, *(p_gen_data_val + i));
++ }
++
++ gen_ctrl_val = 17 << WORD_COUNTS_POS;
++ gen_ctrl_val |= dcsChannelNumber << DCS_CHANNEL_NUMBER_POS;
++ gen_ctrl_val |= MCS_LONG_WRITE;
++ REG_WRITE(hs_gen_ctrl_reg, gen_ctrl_val);
++}
++
++/**
++ * write gamma setting.
++ */
++static void mdfld_dbi_write_gamma_setting (struct drm_device *dev, int pipe)
++{
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++ u32 hs_gen_data_reg = HS_GEN_DATA_REG;
++ u32 hs_gen_ctrl_reg = HS_GEN_CTRL_REG;
++ u32 *p_gen_data_val = 0;
++ u32 gen_ctrl_val = 0;
++ u32 dcsChannelNumber = dev_priv->channelNumber;
++ u32 i = 0;
++ u8 profile[12] = {write_gamma_setting, 0x11, 0x11, 0x81,
++ 0x88, 0x88, 0x88, 0x88,
++ 0x88, 0x88, 0x88, 0x88,
++ };
++
++ if (pipe == 2)
++ {
++ dcsChannelNumber = dev_priv->channelNumber2;
++ hs_gen_data_reg = HS_GEN_DATA_REG + MIPIC_REG_OFFSET;
++ hs_gen_ctrl_reg = HS_GEN_CTRL_REG + MIPIC_REG_OFFSET;
++ }
++
++ p_gen_data_val = (u32 *)profile;
++
++ for (i = 0; i < (12 / 4); i++) {
++ REG_WRITE(hs_gen_data_reg, *(p_gen_data_val + i));
++ }
++
++ gen_ctrl_val = 9 << WORD_COUNTS_POS;
++ gen_ctrl_val |= dcsChannelNumber << DCS_CHANNEL_NUMBER_POS;
++ gen_ctrl_val |= MCS_LONG_WRITE;
++ REG_WRITE(hs_gen_ctrl_reg, gen_ctrl_val);
++}
++
++/**
++ * Check and see if the generic control or data buffer is empty and ready.
++ */
++void mdfld_dsi_gen_fifo_ready (struct drm_device *dev, u32 gen_fifo_stat_reg, u32 fifo_stat)
++{
++ u32 GEN_BF_time_out_count = 0;
++
++ /* Check MIPI Adatper command registers */
++ for (GEN_BF_time_out_count = 0; GEN_BF_time_out_count < GEN_FB_TIME_OUT; GEN_BF_time_out_count++)
++ {
++ if ((REG_READ(gen_fifo_stat_reg) & fifo_stat) == fifo_stat)
++ break;
++ udelay (100);
++ }
++
++ if (GEN_BF_time_out_count == GEN_FB_TIME_OUT)
++ DRM_ERROR("mdfld_dsi_gen_fifo_ready, Timeout. gen_fifo_stat_reg = 0x%x. \n", gen_fifo_stat_reg);
++}
++
++/**
++ * Manage the DSI MIPI keyboard and display brightness.
++ */
++void mdfld_dsi_brightness_init (struct drm_device *dev, int pipe)
++{
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++ u32 hs_gen_ctrl_reg = HS_GEN_CTRL_REG;
++ u32 gen_fifo_stat_reg = GEN_FIFO_STAT_REG;
++ u32 gen_ctrl_val = 0;
++ u32 dcsChannelNumber = dev_priv->channelNumber;
++
++ if (pipe == 2)
++ {
++ dcsChannelNumber = dev_priv->channelNumber2;
++ hs_gen_ctrl_reg = HS_GEN_CTRL_REG + MIPIC_REG_OFFSET;
++ gen_fifo_stat_reg = GEN_FIFO_STAT_REG + MIPIC_REG_OFFSET;
++ }
++
++ /* Set default display backlight value to 85% (0xd8)*/
++ mdfld_dsi_gen_fifo_ready (dev, gen_fifo_stat_reg, HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
++ gen_ctrl_val = 0xd8;
++ gen_ctrl_val <<= MCS_PARAMETER_POS;
++ gen_ctrl_val |= write_display_brightness << MCS_COMMANDS_POS;
++ gen_ctrl_val |= dcsChannelNumber << DCS_CHANNEL_NUMBER_POS;
++ gen_ctrl_val |= MCS_SHORT_WRITE_1;
++ REG_WRITE(hs_gen_ctrl_reg, gen_ctrl_val);
++
++ /* Set minimum brightness setting of CABC function to 20% (0x33)*/
++ mdfld_dsi_gen_fifo_ready (dev, gen_fifo_stat_reg, HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
++ gen_ctrl_val = 0x33;
++ gen_ctrl_val <<= MCS_PARAMETER_POS;
++ gen_ctrl_val |= write_cabc_min_bright << MCS_COMMANDS_POS;
++ gen_ctrl_val |= dcsChannelNumber << DCS_CHANNEL_NUMBER_POS;
++ gen_ctrl_val |= MCS_SHORT_WRITE_1;
++ REG_WRITE(hs_gen_ctrl_reg, gen_ctrl_val);
++
++ mdfld_dsi_gen_fifo_ready (dev, gen_fifo_stat_reg, HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
++ mdfld_dbi_write_hysteresis (dev, pipe);
++
++ mdfld_dsi_gen_fifo_ready (dev, gen_fifo_stat_reg, HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
++ mdfld_dbi_write_display_profile (dev, pipe);
++
++ mdfld_dsi_gen_fifo_ready (dev, gen_fifo_stat_reg, HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
++ mdfld_dbi_write_kbbc_profile (dev, pipe);
++
++ mdfld_dsi_gen_fifo_ready (dev, gen_fifo_stat_reg, HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
++ mdfld_dbi_write_gamma_setting (dev, pipe);
++
++ /* Enable LABC */
++ mdfld_dsi_gen_fifo_ready (dev, gen_fifo_stat_reg, HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
++ gen_ctrl_val = BRIGHT_CNTL_BLOCK_ON | AMBIENT_LIGHT_SENSE_ON | DISPLAY_DIMMING_ON| BACKLIGHT_ON | DISPLAY_BRIGHTNESS_AUTO | GAMMA_AUTO;
++ gen_ctrl_val <<= MCS_PARAMETER_POS;
++ gen_ctrl_val |= write_ctrl_display << MCS_COMMANDS_POS;
++ gen_ctrl_val |= dcsChannelNumber << DCS_CHANNEL_NUMBER_POS;
++ gen_ctrl_val |= MCS_SHORT_WRITE_1;
++ REG_WRITE(hs_gen_ctrl_reg, gen_ctrl_val);
++
++ /* Enable CABC */
++ mdfld_dsi_gen_fifo_ready (dev, gen_fifo_stat_reg, HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
++ gen_ctrl_val = UI_IMAGE;
++ gen_ctrl_val <<= MCS_PARAMETER_POS;
++ gen_ctrl_val |= write_ctrl_cabc << MCS_COMMANDS_POS;
++ gen_ctrl_val |= dcsChannelNumber << DCS_CHANNEL_NUMBER_POS;
++ gen_ctrl_val |= MCS_SHORT_WRITE_1;
++ REG_WRITE(hs_gen_ctrl_reg, gen_ctrl_val);
++}
++
++/**
++ * Manage the mipi display brightness.
++ */
++void mdfld_dsi_brightness_control (struct drm_device *dev, int pipe, int level)
++{
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++ u32 hs_gen_ctrl_reg = HS_GEN_CTRL_REG;
++ u32 gen_fifo_stat_reg = GEN_FIFO_STAT_REG;
++ u32 gen_ctrl_val = 0;
++ u32 dcsChannelNumber = dev_priv->channelNumber;
++
++ if (pipe == 2)
++ {
++ dcsChannelNumber = dev_priv->channelNumber2;
++ hs_gen_ctrl_reg = HS_GEN_CTRL_REG + MIPIC_REG_OFFSET;
++ gen_fifo_stat_reg = GEN_FIFO_STAT_REG + MIPIC_REG_OFFSET;
++ }
++
++ gen_ctrl_val = ((level * 0xff) / BRIGHTNESS_MAX_LEVEL) & 0xff;
++
++ PSB_DEBUG_ENTRY("pipe = %d, gen_ctrl_val = %d. \n", pipe, gen_ctrl_val);
++
++ gen_ctrl_val <<= MCS_PARAMETER_POS;
++ gen_ctrl_val |= write_display_brightness << MCS_COMMANDS_POS;
++ gen_ctrl_val |= dcsChannelNumber << DCS_CHANNEL_NUMBER_POS;
++ gen_ctrl_val |= MCS_SHORT_WRITE_1;
++ /* Set display backlight value */
++ mdfld_dsi_gen_fifo_ready (dev, gen_fifo_stat_reg, HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
++ REG_WRITE(hs_gen_ctrl_reg, gen_ctrl_val);
++
++ /* Enable LABC */
++ mdfld_dsi_gen_fifo_ready (dev, gen_fifo_stat_reg, HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
++
++ if (level == 0)
++ gen_ctrl_val = 0;
++ else
++ gen_ctrl_val = BRIGHT_CNTL_BLOCK_ON | AMBIENT_LIGHT_SENSE_ON | DISPLAY_DIMMING_ON| BACKLIGHT_ON | DISPLAY_BRIGHTNESS_AUTO | GAMMA_AUTO;
++
++ gen_ctrl_val <<= MCS_PARAMETER_POS;
++ gen_ctrl_val |= write_ctrl_display << MCS_COMMANDS_POS;
++ gen_ctrl_val |= dcsChannelNumber << DCS_CHANNEL_NUMBER_POS;
++ gen_ctrl_val |= MCS_SHORT_WRITE_1;
++ REG_WRITE(hs_gen_ctrl_reg, gen_ctrl_val);
++}
++
++#endif /* MDFLD_JLIU7_LABC */
++
++/**
++ * Check and see if the DBI command buffer is empty and ready.
++ */
++void mdfld_dsi_dbi_CB_ready (struct drm_device *dev, u32 mipi_command_address_reg, u32 gen_fifo_stat_reg)
++{
++ u32 DBI_CB_time_out_count = 0;
++
++ /* Check MIPI Adatper command registers */
++ for (DBI_CB_time_out_count = 0; DBI_CB_time_out_count < DBI_CB_TIME_OUT; DBI_CB_time_out_count++)
++ {
++ if (!(REG_READ(mipi_command_address_reg) & BIT0))
++ break;
++ }
++
++ if (DBI_CB_time_out_count == DBI_CB_TIME_OUT)
++ DRM_ERROR("Timeout waiting for DBI COMMAND status. \n");
++
++ if (!gen_fifo_stat_reg)
++ return;
++
++ /* Check and make sure the MIPI DBI BUFFER is empty. */
++ for (DBI_CB_time_out_count = 0; DBI_CB_time_out_count < DBI_CB_TIME_OUT; DBI_CB_time_out_count++)
++ {
++ if (REG_READ(gen_fifo_stat_reg) & DBI_FIFO_EMPTY)
++ break;
++ }
++
++ if (DBI_CB_time_out_count == DBI_CB_TIME_OUT)
++ DRM_ERROR("Timeout waiting for DBI FIFO empty. \n");
++}
++
++#if MDFLD_JLIU7_DSR
++#if MDFLD_JLIU7_DPU
++/* Currently we only support 64x64 cursors */
++#define CURSOR_SIZE 64
++extern void psb_dpu_combine_rect (struct psb_drm_dpu_rect *d_r_1, struct psb_drm_dpu_rect *d_r_2, struct psb_drm_dpu_rect *d_r_result);
++#if MDFLD_JLIU7_DPU_2
++/**
++ * Get the damaged rect in DBI MIPI Frame Buffer.
++ */
++static int mdfld_dbi_damage_rect (struct drm_device *dev, struct psb_drm_dpu_rect *damage_rect_0, struct psb_drm_dpu_rect *damage_rect_2)
++{
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++
++ u32 HactiveArea = dev_priv->HactiveArea;
++ u32 VactiveArea = dev_priv->VactiveArea;
++ u32 offset = dev_priv->offset_0;
++ u32 bpp = dev_priv->bpp_0;
++ int cursor_x0 = dev_priv->cursor_0_x0;
++ int cursor_y0 = dev_priv->cursor_0_y0;
++ int *cursor_x1 = &dev_priv->cursor_0_x1;
++ int *cursor_y1 = &dev_priv->cursor_0_y1;
++ int ret = -1;
++ u32 stride_reg = DSPASTRIDE;
++ u32 DSR_CURSOR = MDFLD_DSR_CURSOR_0;
++ u32 stride_value, pipe_src_value, dsp_size_value, dsp_offset_value;
++ struct psb_drm_dpu_rect damage_rect_2d_3d = dev_priv->damage_rect_2d_3d;
++ struct psb_drm_dpu_rect damage_rect_cursor = {0, 0, 0, 0};
++ struct psb_drm_dpu_rect damage_rect_tmp = {0, 0, 0, 0};
++ struct psb_drm_dpu_rect damage_rect_tmp2 = {0, 0, 0, 0};
++ struct psb_drm_dpu_rect *damage_rect = &damage_rect_tmp2;
++ uint32_t pos = CURAPOS;
++ uint32_t base = CURABASE;
++ uint32_t control = CURACNTR;
++ uint32_t temp = 0;
++ uint32_t addr = dev_priv->cursor_addr_0;
++ uint32_t cursor_cntr = dev_priv->cursor_cntr_0;
++
++#if 0
++ PSB_DEBUG_ENTRY("\n");
++#endif
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, true))
++ return ret;
++
++
++ /* Calculate the cursor damage rect */
++ if (dev_priv->dsr_fb_update & DSR_CURSOR) {
++ if (cursor_x0 < *cursor_x1) {
++ if (cursor_x0 < 0)
++ damage_rect_cursor.x = 0;
++ else
++ damage_rect_cursor.x = cursor_x0;
++
++ damage_rect_cursor.width = *cursor_x1 - cursor_x0 + CURSOR_SIZE;
++ } else {
++ if (*cursor_x1 < 0)
++ damage_rect_cursor.x = 0;
++ else
++ damage_rect_cursor.x = *cursor_x1;
++
++ damage_rect_cursor.width = cursor_x0 - *cursor_x1 + CURSOR_SIZE;
++ }
++
++ if (cursor_y0 < *cursor_y1) {
++ if (cursor_y0 < 0)
++ damage_rect_cursor.y = 0;
++ else
++ damage_rect_cursor.y = cursor_y0;
++
++ damage_rect_cursor.height = *cursor_y1 - cursor_y0 + CURSOR_SIZE;
++ } else {
++ if (*cursor_y1 < 0)
++ damage_rect_cursor.y = 0;
++ else
++ damage_rect_cursor.y = *cursor_y1;
++
++ damage_rect_cursor.height = cursor_y0 - *cursor_y1 + CURSOR_SIZE;
++ }
++
++ ret = 0;
++ } else {
++ addr = 0;
++ cursor_cntr = CURSOR_MODE_DISABLE;
++ }
++
++ if (!ret) {
++ if ((HactiveArea - damage_rect_cursor.x) < CURSOR_SIZE)
++ damage_rect_cursor.x = HactiveArea - CURSOR_SIZE;
++
++ if ((VactiveArea - damage_rect_cursor.y) < CURSOR_SIZE)
++ damage_rect_cursor.y = VactiveArea - CURSOR_SIZE;
++
++ if ((damage_rect_cursor.x + damage_rect_cursor.width) > HactiveArea)
++ damage_rect_cursor.width = HactiveArea - damage_rect_cursor.x;
++
++ if ((damage_rect_cursor.y + damage_rect_cursor.height) > VactiveArea)
++ damage_rect_cursor.height = VactiveArea - damage_rect_cursor.y;
++
++ damage_rect_tmp = damage_rect_cursor;
++ }
++
++ /* Calculate the 2d/3d damage rect */
++ if (dev_priv->dsr_fb_update & MDFLD_DSR_2D_3D) {
++
++ if (!ret)
++ psb_dpu_combine_rect (&damage_rect_cursor, &damage_rect_2d_3d, &damage_rect_tmp);
++ else
++ damage_rect_tmp = damage_rect_2d_3d;
++
++ ret = 0;
++ }
++
++ if (!ret)
++ {
++
++ if (damage_rect_tmp.x < HactiveArea)
++ damage_rect->x = damage_rect_tmp.x;
++ else
++ damage_rect->x = HactiveArea - 1;
++
++ if (damage_rect_tmp.y < VactiveArea)
++ damage_rect->y = damage_rect_tmp.y;
++ else
++ damage_rect->y = VactiveArea - 1;
++
++ if ((damage_rect_tmp.x + damage_rect_tmp.width) > HactiveArea)
++ damage_rect->width = HactiveArea - damage_rect_tmp.x;
++ else
++ damage_rect->width = damage_rect_tmp.width;
++
++ if ((damage_rect_tmp.y + damage_rect_tmp.height) > VactiveArea)
++ damage_rect->height = VactiveArea - damage_rect_tmp.y;
++ else
++ damage_rect->height = damage_rect_tmp.height;
++
++ *cursor_x1 = cursor_x0;
++ *cursor_y1 = cursor_y0;
++
++ if (cursor_x0 < 0) {
++ temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
++ cursor_x0 = -cursor_x0;
++ } else {
++ if (cursor_x0 > damage_rect->x)
++ cursor_x0 = cursor_x0 - damage_rect->x;
++ else
++ cursor_x0 = 0;
++ }
++
++ if (cursor_y0 < 0) {
++ temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
++ cursor_y0 = -cursor_y0;
++ } else {
++ if (cursor_y0 > damage_rect->y)
++ cursor_y0 = cursor_y0 - damage_rect->y;
++ else
++ cursor_y0 = 0;
++ }
++
++ temp |= ((cursor_x0 & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
++ temp |= ((cursor_y0 & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
++
++ REG_WRITE(control, cursor_cntr);
++ REG_WRITE(pos, temp);
++ REG_WRITE(base, addr);
++
++ stride_value = REG_READ(stride_reg);
++ pipe_src_value = (damage_rect->height -1 ) | ((damage_rect->width -1) << 16);
++ dsp_size_value = ((damage_rect->height -1 ) << 16) | (damage_rect->width -1);
++ dsp_offset_value = offset + damage_rect->x * bpp + damage_rect->y * stride_value;
++
++ REG_WRITE(PIPEASRC, pipe_src_value);
++ REG_WRITE(DSPASIZE, dsp_size_value);
++ REG_WRITE(DSPALINOFF, dsp_offset_value);
++ REG_WRITE(DSPASURF, REG_READ(DSPASURF));
++
++ REG_WRITE(PIPECSRC, pipe_src_value);
++ REG_WRITE(DSPCSIZE, dsp_size_value);
++ REG_WRITE(DSPCLINOFF, dsp_offset_value);
++ REG_WRITE(DSPCSURF, REG_READ(DSPCSURF));
++
++ if (damage_rect_0)
++ *damage_rect_0 = *damage_rect;
++ if (damage_rect_2)
++ *damage_rect_2 = *damage_rect;
++
++ }
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++
++ return ret;
++}
++#else /* MDFLD_JLIU7_DPU_2 */
++/**
++ * Get the damaged rect in DBI MIPI Frame Buffer.
++ */
++static int mdfld_dbi_damage_rect (struct drm_device *dev, int pipe, struct psb_drm_dpu_rect *damage_rect)
++{
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++
++ u32 HactiveArea = dev_priv->HactiveArea;
++ u32 VactiveArea = dev_priv->VactiveArea;
++ u32 offset = dev_priv->offset_0;
++ u32 bpp = dev_priv->bpp_0;
++ int cursor_x0 = dev_priv->cursor_0_x0;
++ int cursor_y0 = dev_priv->cursor_0_y0;
++ int *cursor_x1 = &dev_priv->cursor_0_x1;
++ int *cursor_y1 = &dev_priv->cursor_0_y1;
++ int ret = -1;
++ u32 dspsurf_reg = DSPASURF;
++ u32 stride_reg = DSPASTRIDE;
++ u32 pipe_src_reg = PIPEASRC;
++ u32 dsp_size_reg = DSPASIZE;
++ u32 dsp_offset_reg = DSPALINOFF;
++ u32 DSR_CURSOR = MDFLD_DSR_CURSOR_0;
++ u32 stride_value, pipe_src_value, dsp_size_value, dsp_offset_value;
++ struct psb_drm_dpu_rect damage_rect_2d_3d = dev_priv->damage_rect_2d_3d;
++ struct psb_drm_dpu_rect damage_rect_cursor = {0, 0, 0, 0};
++ struct psb_drm_dpu_rect damage_rect_tmp = {0, 0, 0, 0};
++ uint32_t pos = CURAPOS;
++ uint32_t base = CURABASE;
++ uint32_t control = CURACNTR;
++ uint32_t temp = 0;
++ uint32_t addr = dev_priv->cursor_addr_0;
++ uint32_t cursor_cntr = dev_priv->cursor_cntr_0;
++ bool *b_cursor_update = &dev_priv->b_cursor_update_0;
++
++#if 0
++ PSB_DEBUG_ENTRY("pipe = %d \n", pipe);
++#endif
++
++ switch (pipe) {
++ case 0:
++ break;
++ case 2:
++ HactiveArea = dev_priv->HactiveArea2;
++ VactiveArea = dev_priv->VactiveArea2;
++ offset = dev_priv->offset_2;
++ bpp = dev_priv->bpp_2;
++ cursor_x0 = dev_priv->cursor_2_x0;
++ cursor_y0 = dev_priv->cursor_2_y0;
++ cursor_x1 = &dev_priv->cursor_2_x1;
++ cursor_y1 = &dev_priv->cursor_2_y1;
++ dspsurf_reg = DSPCSURF;
++ stride_reg = DSPCSTRIDE;
++ pipe_src_reg = PIPECSRC;
++ dsp_size_reg = DSPCSIZE;
++ dsp_offset_reg = DSPCLINOFF;
++ DSR_CURSOR = MDFLD_DSR_CURSOR_2;
++ pos = CURCPOS;
++ base = CURCBASE;
++ control = CURCCNTR;
++ addr = dev_priv->cursor_addr_2;
++ cursor_cntr = dev_priv->cursor_cntr_2;
++ b_cursor_update = &dev_priv->b_cursor_update_2;
++ break;
++ default:
++ DRM_ERROR("%s, Illegal Pipe Number. \n",__FUNCTION__);
++ return ret;
++ }
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, true))
++ return ret;
++
++
++ /* Calculate the cursor damage rect */
++ if (dev_priv->dsr_fb_update & DSR_CURSOR) {
++ if (cursor_x0 < *cursor_x1) {
++ if (cursor_x0 < 0)
++ damage_rect_cursor.x = 0;
++ else
++ damage_rect_cursor.x = cursor_x0;
++
++ damage_rect_cursor.width = *cursor_x1 - cursor_x0 + CURSOR_SIZE;
++ } else {
++ if (*cursor_x1 < 0)
++ damage_rect_cursor.x = 0;
++ else
++ damage_rect_cursor.x = *cursor_x1;
++
++ damage_rect_cursor.width = cursor_x0 - *cursor_x1 + CURSOR_SIZE;
++ }
++
++ if (cursor_y0 < *cursor_y1) {
++ if (cursor_y0 < 0)
++ damage_rect_cursor.y = 0;
++ else
++ damage_rect_cursor.y = cursor_y0;
++
++ damage_rect_cursor.height = *cursor_y1 - cursor_y0 + CURSOR_SIZE;
++ } else {
++ if (*cursor_y1 < 0)
++ damage_rect_cursor.y = 0;
++ else
++ damage_rect_cursor.y = *cursor_y1;
++
++ damage_rect_cursor.height = cursor_y0 - *cursor_y1 + CURSOR_SIZE;
++ }
++
++ *b_cursor_update = true;
++ ret = 0;
++ } else if (*b_cursor_update) {
++
++ if (*cursor_x1 < 0)
++ damage_rect_cursor.x = 0;
++ else
++ damage_rect_cursor.x = *cursor_x1;
++
++ damage_rect_cursor.width = CURSOR_SIZE;
++
++ if (*cursor_y1 < 0)
++ damage_rect_cursor.y = 0;
++ else
++ damage_rect_cursor.y = *cursor_y1;
++
++ damage_rect_cursor.height = CURSOR_SIZE;
++
++ cursor_x0 = *cursor_x1;
++ cursor_y0 = *cursor_y1;
++ cursor_cntr = 0;
++ ret = 0;
++ }
++
++ if (!ret) {
++ if ((HactiveArea - damage_rect_cursor.x) < CURSOR_SIZE)
++ damage_rect_cursor.x = HactiveArea - CURSOR_SIZE;
++
++ if ((VactiveArea - damage_rect_cursor.y) < CURSOR_SIZE)
++ damage_rect_cursor.y = VactiveArea - CURSOR_SIZE;
++
++ if ((damage_rect_cursor.x + damage_rect_cursor.width) > HactiveArea)
++ damage_rect_cursor.width = HactiveArea - damage_rect_cursor.x;
++
++ if ((damage_rect_cursor.y + damage_rect_cursor.height) > VactiveArea)
++ damage_rect_cursor.height = VactiveArea - damage_rect_cursor.y;
++
++ damage_rect_tmp = damage_rect_cursor;
++ }
++
++ /* Calculate the 2d/3d damage rect */
++ if (dev_priv->dsr_fb_update & MDFLD_DSR_2D_3D) {
++
++ if (!ret)
++ psb_dpu_combine_rect (&damage_rect_cursor, &damage_rect_2d_3d, &damage_rect_tmp);
++ else {
++ cursor_cntr = 0;
++ damage_rect_tmp = damage_rect_2d_3d;
++ }
++
++ ret = 0;
++ }
++
++ if (!ret)
++ {
++ if (damage_rect_tmp.x < HactiveArea)
++ damage_rect->x = damage_rect_tmp.x;
++ else
++ ret = -1;
++
++ if (damage_rect_tmp.y < VactiveArea)
++ damage_rect->y = damage_rect_tmp.y;
++ else
++ ret = -1;
++
++ if (!ret) {
++ if ((damage_rect_tmp.x + damage_rect_tmp.width) > HactiveArea)
++ damage_rect->width = HactiveArea - damage_rect_tmp.x;
++ else
++ damage_rect->width = damage_rect_tmp.width;
++
++
++ if ((damage_rect_tmp.y + damage_rect_tmp.height) > VactiveArea)
++ damage_rect->height = VactiveArea - damage_rect_tmp.y;
++ else
++ damage_rect->height = damage_rect_tmp.height;
++ }
++ }
++
++ if (!ret) {
++ if ((dev_priv->dsr_fb_update & DSR_CURSOR) || *b_cursor_update) {
++ *b_cursor_update = false;
++ *cursor_x1 = cursor_x0;
++ *cursor_y1 = cursor_y0;
++
++ if (cursor_x0 < 0) {
++ temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
++ cursor_x0 = -cursor_x0;
++ } else {
++ if (cursor_x0 > damage_rect->x)
++ cursor_x0 = cursor_x0 - damage_rect->x;
++ else
++ cursor_x0 = 0;
++ }
++
++ if (cursor_y0 < 0) {
++ temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
++ cursor_y0 = -cursor_y0;
++ } else {
++ if (cursor_y0 > damage_rect->y)
++ cursor_y0 = cursor_y0 - damage_rect->y;
++ else
++ cursor_y0 = 0;
++ }
++
++ temp |= ((cursor_x0 & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
++ temp |= ((cursor_y0 & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
++
++if (pipe == 0) {
++ REG_WRITE(control, cursor_cntr);
++ REG_WRITE(pos, temp);
++ REG_WRITE(base, addr);
++}
++ }
++
++ stride_value = REG_READ(stride_reg);
++ pipe_src_value = (damage_rect->height -1 ) | ((damage_rect->width -1) << 16);
++ dsp_size_value = ((damage_rect->height -1 ) << 16) | (damage_rect->width -1);
++ dsp_offset_value = offset + damage_rect->x * bpp + damage_rect->y * stride_value;
++
++//DRM_INFO("damage_rect, x_n_2 = %d, y_n_2 = %d, width_n_2 = %d, height_n_2 = %d. \n", damage_rect->x, damage_rect->y, damage_rect->width, damage_rect->height);
++//DRM_INFO ("%s, p_src = 0x%x, d_size = 0x%x, off = 0x%x. \n", __FUNCTION__, pipe_src_value, dsp_size_value, dsp_offset_value);
++ REG_WRITE(pipe_src_reg, pipe_src_value);
++ REG_WRITE(dsp_size_reg, dsp_size_value);
++ REG_WRITE(dsp_offset_reg, dsp_offset_value);
++ REG_WRITE(dspsurf_reg, REG_READ(dspsurf_reg));
++ }
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++
++ return ret;
++}
++#endif /* MDFLD_JLIU7_DPU_2 */
++#endif /* MDFLD_JLIU7_DPU */
++
++#if MDFLD_JLIU7_DPU_2
++/**
++ * Update the DBI MIPI Panel Frame Buffer.
++ */
++void mdfld_dbi_update_fb (struct drm_device *dev)
++{
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++
++ u8 *p_DBI_commandBuffer = dev_priv->p_DBI_commandBuffer;
++ u8 *p_DBI_commandBuffer2 = dev_priv->p_DBI_commandBuffer2;
++ u32 DBI_CB_phys = dev_priv->DBI_CB_phys;
++ u32 DBI_CB_phys2 = dev_priv->DBI_CB_phys2;
++ u32 *pDBI_CB_pointer = &(dev_priv->DBI_CB_pointer);
++ bool *update_done = &dev_priv->dsr_fb_update_done;
++#if MDFLD_JLIU7_DPU
++ struct psb_drm_dpu_rect damage_rect = {0, 0, 864, 480};
++ struct psb_drm_dpu_rect damage_rect2 = {0, 0, 864, 480};
++#endif /* MDFLD_JLIU7_DPU */
++#if 0 /* MDFLD_PO_JLIU7 */
++ static u32 count_te = 0;
++ static u32 count_update = 0;
++#endif /* MDFLD_PO_JLIU7 */
++
++#if 0
++ PSB_DEBUG_ENTRY("\n");
++#endif
++
++ /* FIXME_JLIU7 MDFLD_PO */
++ /* disable all the MIPI interrupts at the beginning. */
++ /* enable all the MIPI interrupts at the end. */
++ /* Make sure dbi command & data buffer are empty */
++ if (*pDBI_CB_pointer != 0)
++ {
++ DRM_ERROR("mdfld_dbi_update_fb, dbi command buffer was interrupted before finished. \n");
++ return;
++ }
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, true))
++ return;
++
++#if 0 /* MDFLD_PO_JLIU7 */
++ count_te ++;
++#endif /* MDFLD_PO_JLIU7 */
++ if ((dev_priv->dbi_panel_on) && (dev_priv->dbi_panel_on2)) {
++ if ((!(REG_READ(MIPI_COMMAND_ADDRESS_REG) & BIT0))
++ && (REG_READ(GEN_FIFO_STAT_REG) & DBI_FIFO_EMPTY)
++ && (REG_READ(PIPEACONF) & PIPEACONF_ENABLE)
++ && (!(REG_READ(MIPI_COMMAND_ADDRESS_REG + MIPIC_REG_OFFSET) & BIT0))
++ && (REG_READ(GEN_FIFO_STAT_REG + MIPIC_REG_OFFSET) & DBI_FIFO_EMPTY)
++ && (REG_READ(PIPECCONF) & PIPEACONF_ENABLE)) {
++ //udelay(5000);
++ *pDBI_CB_pointer = 0;
++#if MDFLD_JLIU7_DPU
++ if (dev_priv->b_dpu_enable) {
++ if (!(mdfld_dbi_damage_rect (dev, &damage_rect, &damage_rect2))) {
++ /* set_column_address */
++ *(p_DBI_commandBuffer + (*pDBI_CB_pointer)++) = set_column_address;
++
++ *(p_DBI_commandBuffer + (*pDBI_CB_pointer)++) = damage_rect.x >> 8;
++ *(p_DBI_commandBuffer + (*pDBI_CB_pointer)++) = damage_rect.x;
++ *(p_DBI_commandBuffer + (*pDBI_CB_pointer)++) = (damage_rect.x + damage_rect.width -1) >> 8;
++ *(p_DBI_commandBuffer + (*pDBI_CB_pointer)++) = (damage_rect.x + damage_rect.width -1);
++
++ *pDBI_CB_pointer = 8;
++ /* set_page_addr */
++ *(p_DBI_commandBuffer + (*pDBI_CB_pointer)++) = set_page_addr;
++ *(p_DBI_commandBuffer + (*pDBI_CB_pointer)++) = damage_rect.y >> 8;
++ *(p_DBI_commandBuffer + (*pDBI_CB_pointer)++) = damage_rect.y;
++ *(p_DBI_commandBuffer + (*pDBI_CB_pointer)++) = (damage_rect.y + damage_rect.height -1) >> 8;
++ *(p_DBI_commandBuffer + (*pDBI_CB_pointer)++) = (damage_rect.y + damage_rect.height -1);
++
++ *(p_DBI_commandBuffer + (*pDBI_CB_pointer)++) = set_column_address;
++
++ *pDBI_CB_pointer = 0;
++ *(p_DBI_commandBuffer2 + (*pDBI_CB_pointer)++) = set_column_address;
++
++ *(p_DBI_commandBuffer2 + (*pDBI_CB_pointer)++) = damage_rect2.x >> 8;
++ *(p_DBI_commandBuffer2 + (*pDBI_CB_pointer)++) = damage_rect2.x;
++ *(p_DBI_commandBuffer2 + (*pDBI_CB_pointer)++) = (damage_rect2.x + damage_rect2.width -1) >> 8;
++ *(p_DBI_commandBuffer2 + (*pDBI_CB_pointer)++) = (damage_rect2.x + damage_rect2.width -1);
++
++ *pDBI_CB_pointer = 8;
++ /* set_page_addr */
++ *(p_DBI_commandBuffer2 + (*pDBI_CB_pointer)++) = set_page_addr;
++ *(p_DBI_commandBuffer2 + (*pDBI_CB_pointer)++) = damage_rect2.y >> 8;
++ *(p_DBI_commandBuffer2 + (*pDBI_CB_pointer)++) = damage_rect2.y;
++ *(p_DBI_commandBuffer2 + (*pDBI_CB_pointer)++) = (damage_rect2.y + damage_rect2.height -1) >> 8;
++ *(p_DBI_commandBuffer2 + (*pDBI_CB_pointer)++) = (damage_rect2.y + damage_rect2.height -1);
++
++ *pDBI_CB_pointer = 16;
++ /* write_mem_start */
++ *(p_DBI_commandBuffer + *pDBI_CB_pointer) = write_mem_start;
++ *(p_DBI_commandBuffer2 + *pDBI_CB_pointer) = write_mem_start;
++
++ REG_WRITE(MIPI_COMMAND_LENGTH_REG, 0x010505);
++ REG_WRITE(MIPI_COMMAND_LENGTH_REG + MIPIC_REG_OFFSET, 0x010505);
++ REG_WRITE(MIPI_COMMAND_ADDRESS_REG, DBI_CB_phys | BIT0 |BIT1);
++ REG_WRITE(MIPI_COMMAND_ADDRESS_REG + MIPIC_REG_OFFSET, DBI_CB_phys2 | BIT0 |BIT1);
++ udelay(3000);
++ //udelay(5000);
++ *pDBI_CB_pointer = 0;
++ *update_done = true;
++ }
++ } else {
++#endif /* MDFLD_JLIU7_DPU */
++ /* write_mem_start */
++ *(p_DBI_commandBuffer + *pDBI_CB_pointer) = write_mem_start;
++ *(p_DBI_commandBuffer2 + *pDBI_CB_pointer) = write_mem_start;
++ REG_WRITE(MIPI_COMMAND_LENGTH_REG, 0x01);
++ REG_WRITE(MIPI_COMMAND_LENGTH_REG + MIPIC_REG_OFFSET, 0x01);
++ REG_WRITE(MIPI_COMMAND_ADDRESS_REG, DBI_CB_phys | BIT0 |BIT1);
++ REG_WRITE(MIPI_COMMAND_ADDRESS_REG + MIPIC_REG_OFFSET, DBI_CB_phys2 | BIT0 |BIT1);
++ udelay(3000);
++ //udelay(5000);
++ *pDBI_CB_pointer = 0;
++ *update_done = true;
++#if MDFLD_JLIU7_DPU
++ }
++#endif /* MDFLD_JLIU7_DPU */
++
++
++#if 0 /* MDFLD_PO_JLIU7 */
++ count_update ++;
++#endif /* MDFLD_PO_JLIU7 */
++ }
++ } else if (dev_priv->dbi_panel_on) {
++ if ((!(REG_READ(MIPI_COMMAND_ADDRESS_REG) & BIT0))
++ && (REG_READ(GEN_FIFO_STAT_REG) & DBI_FIFO_EMPTY)
++ && (REG_READ(PIPEACONF) & PIPEACONF_ENABLE)) {
++ //udelay(5000);
++ *pDBI_CB_pointer = 0;
++#if MDFLD_JLIU7_DPU
++ if (dev_priv->b_dpu_enable) {
++ if (!(mdfld_dbi_damage_rect (dev, &damage_rect, 0))) {
++ /* set_column_address */
++ *(p_DBI_commandBuffer + (*pDBI_CB_pointer)++) = set_column_address;
++
++ *(p_DBI_commandBuffer + (*pDBI_CB_pointer)++) = damage_rect.x >> 8;
++ *(p_DBI_commandBuffer + (*pDBI_CB_pointer)++) = damage_rect.x;
++ *(p_DBI_commandBuffer + (*pDBI_CB_pointer)++) = (damage_rect.x + damage_rect.width -1) >> 8;
++ *(p_DBI_commandBuffer + (*pDBI_CB_pointer)++) = (damage_rect.x + damage_rect.width -1);
++
++ *pDBI_CB_pointer = 8;
++ /* set_page_addr */
++ *(p_DBI_commandBuffer + (*pDBI_CB_pointer)++) = set_page_addr;
++ *(p_DBI_commandBuffer + (*pDBI_CB_pointer)++) = damage_rect.y >> 8;
++ *(p_DBI_commandBuffer + (*pDBI_CB_pointer)++) = damage_rect.y;
++ *(p_DBI_commandBuffer + (*pDBI_CB_pointer)++) = (damage_rect.y + damage_rect.height -1) >> 8;
++ *(p_DBI_commandBuffer + (*pDBI_CB_pointer)++) = (damage_rect.y + damage_rect.height -1);
++
++ *(p_DBI_commandBuffer + (*pDBI_CB_pointer)++) = set_column_address;
++
++ *pDBI_CB_pointer = 16;
++ /* write_mem_start */
++ *(p_DBI_commandBuffer + *pDBI_CB_pointer) = write_mem_start;
++
++ REG_WRITE(MIPI_COMMAND_LENGTH_REG, 0x010505);
++ REG_WRITE(MIPI_COMMAND_ADDRESS_REG, DBI_CB_phys | BIT0 |BIT1);
++ udelay(3000);
++ //udelay(5000);
++ *pDBI_CB_pointer = 0;
++ *update_done = true;
++ }
++
++ } else {
++#endif /* MDFLD_JLIU7_DPU */
++ /* write_mem_start */
++ *(p_DBI_commandBuffer + *pDBI_CB_pointer) = write_mem_start;
++ REG_WRITE(MIPI_COMMAND_LENGTH_REG, 0x01);
++ REG_WRITE(MIPI_COMMAND_ADDRESS_REG, DBI_CB_phys | BIT0 |BIT1);
++ udelay(3000);
++ //udelay(5000);
++ *pDBI_CB_pointer = 0;
++ *update_done = true;
++#if MDFLD_JLIU7_DPU
++ }
++#endif /* MDFLD_JLIU7_DPU */
++ }
++ } else if (dev_priv->dbi_panel_on2) {
++ if ((!(REG_READ(MIPI_COMMAND_ADDRESS_REG + MIPIC_REG_OFFSET) & BIT0))
++ && (REG_READ(GEN_FIFO_STAT_REG + MIPIC_REG_OFFSET) & DBI_FIFO_EMPTY)
++ && (REG_READ(PIPECCONF) & PIPEACONF_ENABLE)) {
++ //udelay(5000);
++ *pDBI_CB_pointer = 0;
++#if MDFLD_JLIU7_DPU
++ if (dev_priv->b_dpu_enable) {
++ if (!(mdfld_dbi_damage_rect (dev, &damage_rect, 0))) {
++ /* set_column_address */
++ *(p_DBI_commandBuffer2 + (*pDBI_CB_pointer)++) = set_column_address;
++
++ *(p_DBI_commandBuffer2 + (*pDBI_CB_pointer)++) = damage_rect2.x >> 8;
++ *(p_DBI_commandBuffer2 + (*pDBI_CB_pointer)++) = damage_rect2.x;
++ *(p_DBI_commandBuffer2 + (*pDBI_CB_pointer)++) = (damage_rect2.x + damage_rect2.width -1) >> 8;
++ *(p_DBI_commandBuffer2 + (*pDBI_CB_pointer)++) = (damage_rect2.x + damage_rect2.width -1);
++
++ *pDBI_CB_pointer = 8;
++ /* set_page_addr */
++ *(p_DBI_commandBuffer2 + (*pDBI_CB_pointer)++) = set_page_addr;
++ *(p_DBI_commandBuffer2 + (*pDBI_CB_pointer)++) = damage_rect2.y >> 8;
++ *(p_DBI_commandBuffer2 + (*pDBI_CB_pointer)++) = damage_rect2.y;
++ *(p_DBI_commandBuffer2 + (*pDBI_CB_pointer)++) = (damage_rect2.y + damage_rect2.height -1) >> 8;
++ *(p_DBI_commandBuffer2 + (*pDBI_CB_pointer)++) = (damage_rect2.y + damage_rect2.height -1);
++
++ *pDBI_CB_pointer = 16;
++ /* write_mem_start */
++ *(p_DBI_commandBuffer2 + *pDBI_CB_pointer) = write_mem_start;
++
++ REG_WRITE(MIPI_COMMAND_LENGTH_REG + MIPIC_REG_OFFSET, 0x010505);
++ REG_WRITE(MIPI_COMMAND_ADDRESS_REG + MIPIC_REG_OFFSET, DBI_CB_phys | BIT0 |BIT1);
++ udelay(3000);
++ //udelay(5000);
++ *pDBI_CB_pointer = 0;
++ *update_done = true;
++ }
++ } else {
++#endif /* MDFLD_JLIU7_DPU */
++ /* write_mem_start */
++ *(p_DBI_commandBuffer2 + *pDBI_CB_pointer) = write_mem_start;
++ REG_WRITE(MIPI_COMMAND_LENGTH_REG + MIPIC_REG_OFFSET, 0x01);
++ REG_WRITE(MIPI_COMMAND_ADDRESS_REG + MIPIC_REG_OFFSET, DBI_CB_phys2 | BIT0 |BIT1);
++ udelay(3000);
++ //udelay(5000);
++ *pDBI_CB_pointer = 0;
++ *update_done = true;
++ }
++
++#if 0 /* MDFLD_PO_JLIU7 */
++ count_update ++;
++#endif /* MDFLD_PO_JLIU7 */
++ }
++ }
++
++#if 0 /* MDFLD_PO_JLIU7 */
++ if (((count_te % 200) == 0) || ((count_te % 201) == 0)) {
++ DRM_INFO("count_te = 0x%x, count_update = 0x%x, pipe = 0x%x \n", count_te, count_update, pipe);
++ DRM_INFO("dbi_panel_on = 0x%x, dbi_panel_on2 = 0x%x. \n", dev_priv->dbi_panel_on, dev_priv->dbi_panel_on2);
++ }
++#endif /* MDFLD_PO_JLIU7 */
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++}
++#else /* MDFLD_JLIU7_DPU_2 */
++/**
++ * Update the DBI MIPI Panel Frame Buffer.
++ */
++void mdfld_dbi_update_fb (struct drm_device *dev, int pipe)
++{
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++
++ u8 *p_DBI_commandBuffer = dev_priv->p_DBI_commandBuffer;
++ u32 DBI_CB_phys = dev_priv->DBI_CB_phys;
++ u32 *pDBI_CB_pointer = &(dev_priv->DBI_CB_pointer);
++ u32 mipi_command_length_reg = MIPI_COMMAND_LENGTH_REG;
++ u32 mipi_command_address_reg = MIPI_COMMAND_ADDRESS_REG;
++ u32 gen_fifo_stat_reg = GEN_FIFO_STAT_REG;
++ u32 pipeconf_reg = PIPEACONF;
++ bool *update_done = &dev_priv->dsr_fb_update_done_0;
++#if MDFLD_JLIU7_DPU
++ struct psb_drm_dpu_rect damage_rect = {0, 0, 864, 480};
++#endif /* MDFLD_JLIU7_DPU */
++#if 0 /* MDFLD_PO_JLIU7 */
++ static u32 count_te = 0;
++ static u32 count_update = 0;
++#endif /* MDFLD_PO_JLIU7 */
++
++#if 0
++ PSB_DEBUG_ENTRY("pipe = %d \n", pipe);
++#endif
++
++ switch (pipe) {
++ case 0:
++ break;
++ case 2:
++ p_DBI_commandBuffer = dev_priv->p_DBI_commandBuffer2;
++ DBI_CB_phys = dev_priv->DBI_CB_phys2;
++ pDBI_CB_pointer = &(dev_priv->DBI_CB_pointer2);
++
++ mipi_command_length_reg = MIPI_COMMAND_LENGTH_REG + MIPIC_REG_OFFSET;
++ mipi_command_address_reg = MIPI_COMMAND_ADDRESS_REG + MIPIC_REG_OFFSET;
++ gen_fifo_stat_reg = GEN_FIFO_STAT_REG + MIPIC_REG_OFFSET;
++ pipeconf_reg = PIPECCONF;
++ update_done = &dev_priv->dsr_fb_update_done_2;
++ break;
++ default:
++ DRM_ERROR("mdfld_dbi_update_fb, Illegal Pipe Number. \n");
++ return;
++ }
++
++ /* FIXME_JLIU7 MDFLD_PO */
++ /* disable all the MIPI interrupts at the beginning. */
++ /* enable all the MIPI interrupts at the end. */
++ /* Make sure dbi command & data buffer are empty */
++ if (*pDBI_CB_pointer != 0)
++ {
++ DRM_ERROR("mdfld_dbi_update_fb, dbi command buffer was interrupted before finished. pipe = %d \n", pipe);
++ return;
++ }
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, true))
++ return;
++
++#if 0 /* MDFLD_PO_JLIU7 */
++ count_te ++;
++#endif /* MDFLD_PO_JLIU7 */
++ if ((!(REG_READ(mipi_command_address_reg) & BIT0))
++ && (REG_READ(gen_fifo_stat_reg) & DBI_FIFO_EMPTY)
++ && (REG_READ(pipeconf_reg) & PIPEACONF_ENABLE)) {
++ //udelay(5000);
++ *pDBI_CB_pointer = 0;
++#if MDFLD_JLIU7_DPU
++ if (dev_priv->b_dpu_enable) {
++ if (!(mdfld_dbi_damage_rect (dev, pipe, &damage_rect))) {
++// DRM_ERROR("mdfld_dbi_update_fb, damaged rect update pipe = %d \n", pipe);
++ /* set_column_address */
++ *(p_DBI_commandBuffer + (*pDBI_CB_pointer)++) = set_column_address;
++
++ *(p_DBI_commandBuffer + (*pDBI_CB_pointer)++) = damage_rect.x >> 8;
++ *(p_DBI_commandBuffer + (*pDBI_CB_pointer)++) = damage_rect.x;
++ *(p_DBI_commandBuffer + (*pDBI_CB_pointer)++) = (damage_rect.x + damage_rect.width -1) >> 8;
++ *(p_DBI_commandBuffer + (*pDBI_CB_pointer)++) = (damage_rect.x + damage_rect.width -1);
++
++ *pDBI_CB_pointer = 8;
++ /* set_page_addr */
++ *(p_DBI_commandBuffer + (*pDBI_CB_pointer)++) = set_page_addr;
++ *(p_DBI_commandBuffer + (*pDBI_CB_pointer)++) = damage_rect.y >> 8;
++ *(p_DBI_commandBuffer + (*pDBI_CB_pointer)++) = damage_rect.y;
++ *(p_DBI_commandBuffer + (*pDBI_CB_pointer)++) = (damage_rect.y + damage_rect.height -1) >> 8;
++ *(p_DBI_commandBuffer + (*pDBI_CB_pointer)++) = (damage_rect.y + damage_rect.height -1);
++
++ *pDBI_CB_pointer = 16;
++ /* write_mem_start */
++ *(p_DBI_commandBuffer + (*pDBI_CB_pointer)++) = write_mem_start;
++
++ REG_WRITE(mipi_command_length_reg, 0x010505);
++
++ REG_WRITE(mipi_command_address_reg, DBI_CB_phys | BIT0 |BIT1);
++
++ udelay(3000);
++ //udelay(5000);
++ *pDBI_CB_pointer = 0;
++ *update_done = true;
++ }
++
++ } else {
++ /* write_mem_start */
++ *(p_DBI_commandBuffer + (*pDBI_CB_pointer)++) = write_mem_start;
++ REG_WRITE(mipi_command_length_reg, 1);
++
++ REG_WRITE(mipi_command_address_reg, DBI_CB_phys | BIT0 |BIT1);
++
++ udelay(3000);
++ //udelay(5000);
++ *pDBI_CB_pointer = 0;
++ *update_done = true;
++ }
++#else /* MDFLD_JLIU7_DPU */
++ /* write_mem_start */
++ *(p_DBI_commandBuffer + (*pDBI_CB_pointer)++) = write_mem_start;
++
++ REG_WRITE(mipi_command_length_reg, 1);
++ REG_WRITE(mipi_command_address_reg, DBI_CB_phys | BIT0 |BIT1);
++
++ udelay(3000);
++ //udelay(5000);
++ *pDBI_CB_pointer = 0;
++ *update_done = true;
++#if 0 /* MDFLD_PO_JLIU7 */
++ count_update ++;
++#endif /* MDFLD_PO_JLIU7 */
++#endif /* MDFLD_JLIU7_DPU */
++ }
++
++#if 0 /* MDFLD_PO_JLIU7 */
++ if (((count_te % 200) == 0) || ((count_te % 201) == 0)) {
++ DRM_INFO("count_te = 0x%x, count_update = 0x%x, pipe = 0x%x \n", count_te, count_update, pipe);
++ DRM_INFO("dbi_panel_on = 0x%x, dbi_panel_on2 = 0x%x. \n", dev_priv->dbi_panel_on, dev_priv->dbi_panel_on2);
++ }
++#endif /* MDFLD_PO_JLIU7 */
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++}
++#endif /* MDFLD_JLIU7_DPU_2 */
++#endif /*FIXME JLIU */
++
++#if MDFLD_JLIU7_DSR
++/**
++ * Sets the power management mode of the pipe and plane.
++ *
++ * This code should probably grow support for turning the cursor off and back
++ * on appropriately at the same time as we're turning the pipe off/on.
++ */
++void mdfld_dbi_dpms (struct drm_device *dev, int pipe, bool enabled)
++{
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++ int dpll_reg = MRST_DPLL_A;
++ int dspcntr_reg = DSPACNTR;
++ int dspbase_reg = MRST_DSPABASE;
++ int pipeconf_reg = PIPEACONF;
++ u32 gen_fifo_stat_reg = GEN_FIFO_STAT_REG;
++ u32 pipeconf = dev_priv->pipeconf;
++ u32 dspcntr = dev_priv->dspcntr;
++ u32 temp;
++ int timeout = 0;
++#if 0 /* MDFLD_PO_JLIU7 */
++ static u32 count_te1 = 0;
++ static u32 count_te2 = 0;
++#endif /* MDFLD_PO_JLIU7 */
++
++#if 0
++ PSB_DEBUG_ENTRY("pipe = %d \n", pipe);
++#endif
++
++ switch (pipe) {
++ case 0:
++ break;
++ case 2:
++ dpll_reg = MRST_DPLL_A;
++ dspcntr_reg = DSPCCNTR;
++ dspbase_reg = MDFLD_DSPCBASE;
++ pipeconf_reg = PIPECCONF;
++ pipeconf = dev_priv->pipeconf2;
++ dspcntr = dev_priv->dspcntr2;
++ gen_fifo_stat_reg = GEN_FIFO_STAT_REG + MIPIC_REG_OFFSET;
++ break;
++ case 1:
++ case 3:
++ default:
++ DRM_ERROR("Illegal Pipe Number. \n");
++ return;
++ }
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_FORCE_POWER_ON))
++ return;
++
++ if (enabled) {
++#if 0 /* MDFLD_PO_JLIU7 */
++ count_te1 ++;
++ if ((count_te1 % 40) == 0) {
++ DRM_ERROR("%s, enabled, b_dsr = 0x%x. \n", __FUNCTION__, dev_priv->b_dsr);
++ }
++#endif /* MDFLD_PO_JLIU7 */
++ /* Enable the DPLL */
++ temp = REG_READ(dpll_reg);
++
++ if ((temp & DPLL_VCO_ENABLE) == 0) {
++ /* When ungating power of DPLL, needs to wait 0.5us before enable the VCO */
++ if (temp & MDFLD_PWR_GATE_EN) {
++ temp &= ~MDFLD_PWR_GATE_EN;
++ REG_WRITE(dpll_reg, temp);
++ /* FIXME_MDFLD PO - change 500 to 1 after PO */
++ udelay(500);
++ }
++
++ REG_WRITE(dpll_reg, temp);
++ REG_READ(dpll_reg);
++ /* FIXME_MDFLD PO - change 500 to 1 after PO */
++ udelay(500);
++
++ REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
++ REG_READ(dpll_reg);
++
++ /* wait for DSI PLL to lock */
++ while ((timeout < 20000) && !(REG_READ(pipeconf_reg) & PIPECONF_DSIPLL_LOCK)) {
++ udelay(150);
++ timeout ++;
++ }
++ }
++
++ /* Enable the pipe */
++ temp = REG_READ(pipeconf_reg);
++ if ((temp & PIPEACONF_ENABLE) == 0) {
++ REG_WRITE(pipeconf_reg, pipeconf);
++
++ /* Wait for for the pipe enable to take effect. */
++ mdfldWaitForPipeEnable(dev, pipe);
++ }
++
++ /* Enable the plane */
++ temp = REG_READ(dspcntr_reg);
++ if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
++ REG_WRITE(dspcntr_reg,
++ temp | DISPLAY_PLANE_ENABLE);
++ /* Flush the plane changes */
++ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
++ }
++#if 0 /* MDFLD_JLIU7 DSR */
++ /* Restore display pipe & dphy */
++ /* Restore device states */
++
++ /* Set DSI host into Utra Low Power State */
++ temp = REG_READ(device_ready_reg);
++ temp &= ~ULPS_MASK;
++ temp |= ENTERING_ULPS;
++ REG_WRITE(device_ready_reg, temp);
++
++ temp = REG_READ(mipi_reg);
++ REG_WRITE(mipi_reg, temp);
++
++ /* Set DSI host to exit from Utra Low Power State */
++ temp = REG_READ(device_ready_reg);
++ temp &= ~ULPS_MASK;
++ temp |= EXITING_ULPS;
++ REG_WRITE(device_ready_reg, temp);
++
++#endif /* MDFLD_JLIU7 DSR */
++ } else {
++#if 0 /* MDFLD_PO_JLIU7 */
++ count_te2 ++;
++ if ((count_te2 % 40) == 0) {
++ DRM_ERROR("%s, disabled, b_dsr = 0x%x. \n", __FUNCTION__, dev_priv->b_dsr);
++ }
++#endif /* MDFLD_PO_JLIU7 */
++ mdfld_dsi_gen_fifo_ready (dev, gen_fifo_stat_reg, HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
++
++#if 0 /* MDFLD_JLIU7 DSR */
++ /* Set DSI host into Utra Low Power State */
++ temp = REG_READ(device_ready_reg);
++ temp &= ~ULPS_MASK;
++ temp |= ENTERING_ULPS;
++ REG_WRITE(device_ready_reg, temp);
++
++ temp = REG_READ(mipi_reg);
++ temp &= ~PASS_FROM_SPHY_TO_AFE;
++ REG_WRITE(mipi_reg, temp);
++
++ /* Saving device states */
++ /* Power gate display pipe & dphy */
++#endif /* MDFLD_JLIU7 DSR */
++ /* Disable display plane */
++ temp = REG_READ(dspcntr_reg);
++ if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
++ REG_WRITE(dspcntr_reg,
++ temp & ~DISPLAY_PLANE_ENABLE);
++ /* Flush the plane changes */
++ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
++ REG_READ(dspbase_reg);
++ }
++
++ /* FIXME_JLIU7 MDFLD_PO revisit */
++ /* Wait for vblank for the disable to take effect */
++// MDFLD_PO_JLIU7 psb_intel_wait_for_vblank(dev);
++
++ /* Next, disable display pipes */
++ temp = REG_READ(pipeconf_reg);
++ if ((temp & PIPEACONF_ENABLE) != 0) {
++ temp &= ~PIPEACONF_ENABLE;
++ temp |= PIPECONF_PLANE_OFF | PIPECONF_CURSOR_OFF;
++ REG_WRITE(pipeconf_reg, temp);
++// REG_WRITE(pipeconf_reg, 0);
++ REG_READ(pipeconf_reg);
++
++ /* Wait for for the pipe disable to take effect. */
++ mdfldWaitForPipeDisable(dev, pipe);
++ }
++
++ temp = REG_READ(dpll_reg);
++ if (temp & DPLL_VCO_ENABLE) {
++ if (!((REG_READ(PIPEACONF) | REG_READ(PIPECCONF)) & PIPEACONF_ENABLE)) {
++ temp &= ~(DPLL_VCO_ENABLE);
++ REG_WRITE(dpll_reg, temp);
++ REG_READ(dpll_reg);
++ /* Wait for the clocks to turn off. */
++ /* FIXME_MDFLD PO may need more delay */
++ udelay(500);
++ }
++ }
++
++ }
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++}
++
++/**
++ * Enter DSR
++ */
++void mdfld_dbi_enter_dsr (struct drm_device *dev)
++{
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++#if 0 /* MDFLD_PO_JLIU7 */
++ static u32 count_te1 = 0;
++#endif /* MDFLD_PO_JLIU7 */
++
++ PSB_DEBUG_ENTRY(" \n");
++
++#if 0 /* MDFLD_PO_JLIU7 */
++ count_te1 ++;
++#endif /* MDFLD_PO_JLIU7 */
++
++// mutex_lock(&dev_priv->dsr_mutex);
++
++ if (!dev_priv->b_dsr)
++ {
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, true))
++ return;
++
++ dev_priv->b_dsr = true;
++
++ if (dev_priv->dbi_panel_on)
++ {
++ mdfld_dsi_dbi_CB_ready (dev, MIPI_COMMAND_ADDRESS_REG, GEN_FIFO_STAT_REG);
++ mdfld_dbi_dpms (dev, 0, false);
++ }
++
++ if (dev_priv->dbi_panel_on2)
++ {
++ mdfld_dsi_dbi_CB_ready (dev, MIPI_COMMAND_ADDRESS_REG + MIPIC_REG_OFFSET, GEN_FIFO_STAT_REG + MIPIC_REG_OFFSET);
++ mdfld_dbi_dpms (dev, 2, false);
++ }
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++#if 0 /* MDFLD_PO_JLIU7 */
++ if ((count_te1 % 200) == 0) {
++ DRM_ERROR("%s, count_te1 = 0x%x, b_dsr = 0x%x. \n", __FUNCTION__, count_te1,dev_priv->b_dsr);
++ }
++#endif /* MDFLD_PO_JLIU7 */
++
++// mutex_unlock(&dev_priv->dsr_mutex);
++}
++
++/**
++ * Exit from DSR
++ */
++void mdfld_dbi_exit_dsr (struct drm_device *dev, u32 update_src)
++{
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++ struct timer_list * dsr_timer = &dev_priv->dsr_timer;
++ unsigned long irq_flags;
++#if 0 /* MDFLD_PO_JLIU7 */
++ static u32 count_te2 = 0;
++#endif /* MDFLD_PO_JLIU7 */
++
++ PSB_DEBUG_ENTRY("update_src = 0x%x. \n", update_src);
++
++#if 0 /* MDFLD_PO_JLIU7 */
++ count_te2 ++;
++#endif /* MDFLD_PO_JLIU7 */
++
++// mutex_lock(&dev_priv->dsr_mutex);
++
++ if (dev_priv->b_dsr)
++ {
++ if (dev_priv->dbi_panel_on)
++ mdfld_dbi_dpms (dev, 0, true);
++
++ if (dev_priv->dbi_panel_on2)
++ mdfld_dbi_dpms (dev, 2, true);
++
++ dev_priv->b_dsr = false;
++ }
++
++// mutex_unlock(&dev_priv->dsr_mutex);
++
++ dev_priv->dsr_fb_update |= update_src;
++#if 0 /* MDFLD_PO_JLIU7 */
++ if ((count_te2 % 200) == 0) {
++ DRM_ERROR("%s, count_te2 = 0x%x, b_dsr = 0x%x. \n", __FUNCTION__, count_te2,dev_priv->b_dsr);
++ }
++#endif /* MDFLD_PO_JLIU7 */
++
++ spin_lock_irqsave(&dev_priv->dsr_lock, irq_flags);
++ if(!timer_pending(dsr_timer)){
++ dsr_timer->expires = jiffies + MDFLD_DSR_DELAY;
++ add_timer(dsr_timer);
++ }
++ spin_unlock_irqrestore(&dev_priv->dsr_lock, irq_flags);
++}
++#endif /*FIXME JLIU */
++
++static void mdfld_wait_for_HS_DATA_FIFO(struct drm_device *dev, u32 pipe)
++{
++ u32 gen_fifo_stat_reg = GEN_FIFO_STAT_REG;
++ int timeout = 0;
++
++ if (pipe == 2)
++ gen_fifo_stat_reg = GEN_FIFO_STAT_REG + MIPIC_REG_OFFSET;
++
++ udelay(500);
++
++ /* This will time out after approximately 2+ seconds */
++ while ((timeout < 20000) && (REG_READ(gen_fifo_stat_reg) &
++ HS_DATA_FIFO_FULL)) {
++ udelay(100);
++ timeout++;
++ }
++
++ if (timeout == 20000)
++ DRM_INFO("MIPI: HS Data FIFO was never cleared!\n");
++}
++
++static void mdfld_wait_for_HS_CTRL_FIFO(struct drm_device *dev, u32 pipe)
++{
++ u32 gen_fifo_stat_reg = GEN_FIFO_STAT_REG;
++ int timeout = 0;
++
++ if (pipe == 2)
++ gen_fifo_stat_reg = GEN_FIFO_STAT_REG + MIPIC_REG_OFFSET;
++
++ udelay(500);
++
++ /* This will time out after approximately 2+ seconds */
++ while ((timeout < 20000) && (REG_READ(gen_fifo_stat_reg) &
++ HS_CTRL_FIFO_FULL)) {
++ udelay(100);
++ timeout++;
++ }
++ if (timeout == 20000)
++ DRM_INFO("MIPI: HS CMD FIFO was never cleared!\n");
++}
++
++/* ************************************************************************* *\
++ * FUNCTION: mdfld_init_TPO_MIPI
++ *
++ * DESCRIPTION: This function is called only by mrst_dsi_mode_set and
++ * restore_display_registers. since this function does not
++ * acquire the mutex, it is important that the calling function
++ * does!
++\* ************************************************************************* */
++void mdfld_init_TPO_MIPI(struct drm_device *dev, u32 pipe)
++{
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++ u32 dcsChannelNumber = dev_priv->channelNumber;
++ u32 gen_data_reg = HS_GEN_DATA_REG;
++ u32 gen_ctrl_reg = HS_GEN_CTRL_REG;
++ u32 gen_ctrl_val = GEN_LONG_WRITE;
++
++ DRM_INFO("Enter mrst init TPO MIPI display.\n");
++
++ if (pipe == 2) {
++ dcsChannelNumber = dev_priv->channelNumber2;
++ gen_data_reg = HS_GEN_DATA_REG + MIPIC_REG_OFFSET;
++ gen_ctrl_reg = HS_GEN_CTRL_REG + MIPIC_REG_OFFSET;
++ }
++
++ gen_ctrl_val |= dcsChannelNumber << DCS_CHANNEL_NUMBER_POS;
++
++ /* Flip page order */
++ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++ REG_WRITE(gen_data_reg, 0x00008036);
++ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
++ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x02 << WORD_COUNTS_POS));
++
++ /* 0xF0 */
++ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++ REG_WRITE(gen_data_reg, 0x005a5af0);
++ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
++ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
++
++ /* Write protection key */
++ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++ REG_WRITE(gen_data_reg, 0x005a5af1);
++ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
++ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
++
++ /* 0xFC */
++ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++ REG_WRITE(gen_data_reg, 0x005a5afc);
++ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
++ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
++
++ /* 0xB7 */
++ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++ REG_WRITE(gen_data_reg, 0x770000b7);
++ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++ REG_WRITE(gen_data_reg, 0x00000044);
++ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
++ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x05 << WORD_COUNTS_POS));
++
++ /* 0xB6 */
++ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++ REG_WRITE(gen_data_reg, 0x000a0ab6);
++ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
++ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
++
++ /* 0xF2 */
++ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++ REG_WRITE(gen_data_reg, 0x081010f2);
++ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++ REG_WRITE(gen_data_reg, 0x4a070708);
++ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++ REG_WRITE(gen_data_reg, 0x000000c5);
++ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
++ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x09 << WORD_COUNTS_POS));
++
++ /* 0xF8 */
++ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++ REG_WRITE(gen_data_reg, 0x024003f8);
++ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++ REG_WRITE(gen_data_reg, 0x01030a04);
++ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++ REG_WRITE(gen_data_reg, 0x0e020220);
++ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++ REG_WRITE(gen_data_reg, 0x00000004);
++ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
++ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x0d << WORD_COUNTS_POS));
++
++ /* 0xE2 */
++ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++ REG_WRITE(gen_data_reg, 0x398fc3e2);
++ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++ REG_WRITE(gen_data_reg, 0x0000916f);
++ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
++ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x06 << WORD_COUNTS_POS));
++
++ /* 0xB0 */
++ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++ REG_WRITE(gen_data_reg, 0x000000b0);
++ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
++ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x02 << WORD_COUNTS_POS));
++
++ /* 0xF4 */
++ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++ REG_WRITE(gen_data_reg, 0x240242f4);
++ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++ REG_WRITE(gen_data_reg, 0x78ee2002);
++ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++ REG_WRITE(gen_data_reg, 0x2a071050);
++ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++ REG_WRITE(gen_data_reg, 0x507fee10);
++ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++ REG_WRITE(gen_data_reg, 0x10300710);
++ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
++ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x14 << WORD_COUNTS_POS));
++
++ /* 0xBA */
++ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++ REG_WRITE(gen_data_reg, 0x19fe07ba);
++ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++ REG_WRITE(gen_data_reg, 0x101c0a31);
++ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++ REG_WRITE(gen_data_reg, 0x00000010);
++ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
++ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x09 << WORD_COUNTS_POS));
++
++ /* 0xBB */
++ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++ REG_WRITE(gen_data_reg, 0x28ff07bb);
++ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++ REG_WRITE(gen_data_reg, 0x24280a31);
++ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++ REG_WRITE(gen_data_reg, 0x00000034);
++ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
++ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x09 << WORD_COUNTS_POS));
++
++ /* 0xFB */
++ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++ REG_WRITE(gen_data_reg, 0x535d05fb);
++ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++ REG_WRITE(gen_data_reg, 0x1b1a2130);
++ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++ REG_WRITE(gen_data_reg, 0x221e180e);
++ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++ REG_WRITE(gen_data_reg, 0x131d2120);
++ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++ REG_WRITE(gen_data_reg, 0x535d0508);
++ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++ REG_WRITE(gen_data_reg, 0x1c1a2131);
++ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++ REG_WRITE(gen_data_reg, 0x231f160d);
++ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++ REG_WRITE(gen_data_reg, 0x111b2220);
++ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++ REG_WRITE(gen_data_reg, 0x535c2008);
++ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++ REG_WRITE(gen_data_reg, 0x1f1d2433);
++ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++ REG_WRITE(gen_data_reg, 0x2c251a10);
++ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++ REG_WRITE(gen_data_reg, 0x2c34372d);
++ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++ REG_WRITE(gen_data_reg, 0x00000023);
++ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
++ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x31 << WORD_COUNTS_POS));
++
++ /* 0xFA */
++ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++ REG_WRITE(gen_data_reg, 0x525c0bfa);
++ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++ REG_WRITE(gen_data_reg, 0x1c1c232f);
++ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++ REG_WRITE(gen_data_reg, 0x2623190e);
++ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++ REG_WRITE(gen_data_reg, 0x18212625);
++ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++ REG_WRITE(gen_data_reg, 0x545d0d0e);
++ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++ REG_WRITE(gen_data_reg, 0x1e1d2333);
++ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++ REG_WRITE(gen_data_reg, 0x26231a10);
++ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++ REG_WRITE(gen_data_reg, 0x1a222725);
++ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++ REG_WRITE(gen_data_reg, 0x545d280f);
++ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++ REG_WRITE(gen_data_reg, 0x21202635);
++ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++ REG_WRITE(gen_data_reg, 0x31292013);
++ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++ REG_WRITE(gen_data_reg, 0x31393d33);
++ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++ REG_WRITE(gen_data_reg, 0x00000029);
++ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
++ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x31 << WORD_COUNTS_POS));
++
++ /* Set DM */
++ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++ REG_WRITE(gen_data_reg, 0x000100f7);
++ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
++ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
++}
++
++/**
++ * Sets the power state for the dpi panel.
++ */
++static void mdfld_dpi_set_power(struct drm_device *dev,
++ struct psb_intel_output *output, bool on)
++{
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++#if 0 /*FIXME JLIU7 */
++ u32 pp_status;
++#endif /*FIXME JLIU7 */
++ bool *pdpi_panel_on = &(dev_priv->dpi_panel_on);
++ u32 dpi_control_reg = DPI_CONTROL_REG;
++ u32 pipeconf_reg = PIPEACONF;
++ u32 pwm_clt_reg = BLC_PWM_CTL;
++ u32 gen_fifo_stat_reg = GEN_FIFO_STAT_REG;
++ u32 intr_stat_reg = INTR_STAT_REG;
++ int pipe = 0;
++#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
++ struct backlight_device bd;
++#endif
++
++ PSB_DEBUG_ENTRY("on = %d, output_type = 0x%x \n", on, output->type);
++
++ if (output->type == INTEL_OUTPUT_MIPI2) {
++ if (on) {
++ dev_priv->dual_mipi = true;
++ } else
++ dev_priv->dual_mipi = false;
++
++ pdpi_panel_on = &(dev_priv->dpi_panel_on2);
++ dpi_control_reg = DPI_CONTROL_REG + MIPIC_REG_OFFSET;
++ pipeconf_reg = PIPECCONF;
++ pwm_clt_reg = BLC_PWM_CTL_C;
++ gen_fifo_stat_reg = GEN_FIFO_STAT_REG + MIPIC_REG_OFFSET;
++ intr_stat_reg = INTR_STAT_REG + MIPIC_REG_OFFSET;
++ pipe = 2;
++ } else if (output->type == INTEL_OUTPUT_MIPI) {
++ if (!on) {
++ dev_priv->dual_mipi = false;
++ }
++ }
++
++ PSB_DEBUG_ENTRY("dpi_panel_on = %d. \n", *pdpi_panel_on);
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_FORCE_POWER_ON))
++ return;
++
++ if (on) {
++ if (!(*pdpi_panel_on))
++ {
++ /* wait for DPI FIFO to clear */
++ while ((REG_READ(gen_fifo_stat_reg) & DPI_FIFO_EMPTY)
++ != DPI_FIFO_EMPTY) {
++ /* Do Nothing Here */
++ /* This should make checkpatch work */
++ }
++
++ if ((REG_READ(intr_stat_reg) & SPL_PKT_SENT)) {
++ REG_WRITE(intr_stat_reg, SPL_PKT_SENT);
++ }
++
++ REG_WRITE(dpi_control_reg, DPI_TURN_ON);
++ /* make sure Turn On Packet Sent */
++ while (!(REG_READ(intr_stat_reg) & SPL_PKT_SENT)) {
++ /* Do Nothing Here */
++ }
++
++ if ((REG_READ(intr_stat_reg) & SPL_PKT_SENT)) {
++ REG_WRITE(intr_stat_reg, SPL_PKT_SENT);
++ }
++
++#if DSI_TPO_864x480 || DSI_TPO_864x480_2
++ mdfld_init_TPO_MIPI(dev, pipe);
++#endif /* DSI_TPO_864x480*/
++ *pdpi_panel_on = true;
++#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
++ bd.props.brightness = mdfld_get_brightness(&bd);
++ mdfld_set_brightness(&bd);
++#endif
++ }
++ }
++ else
++ {
++ if ((*pdpi_panel_on) || dev_priv->first_boot)
++ {
++ mdfld_dsi_brightness_control(dev, pipe, 0);
++#if MDFLD_PO_STATUS /* FIXME_JLIU7 remove after MDFLD_PO */
++
++ /* wait for DPI FIFO to clear */
++ mdfld_dsi_gen_fifo_ready (dev, gen_fifo_stat_reg, DPI_FIFO_EMPTY);
++
++ if ((REG_READ(intr_stat_reg) & SPL_PKT_SENT)) {
++ REG_WRITE(intr_stat_reg, SPL_PKT_SENT);
++ }
++
++#endif /* MDFLD_PO */
++
++ REG_WRITE(dpi_control_reg, DPI_SHUT_DOWN);
++
++ /* make sure Turn off Packet Sent */
++ mdfld_dsi_gen_fifo_ready (dev, intr_stat_reg, SPL_PKT_SENT);
++
++ *pdpi_panel_on = false;
++ dev_priv->first_boot = false;
++ }
++ }
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++}
++
++/**
++ * Sets the power state for the dbi panel.
++ */
++static void mdfld_dbi_set_power(struct drm_device *dev,
++ struct psb_intel_output *output, bool on)
++{
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++#if 0 /*FIXME JLIU7 */
++ u32 pp_status;
++#endif /*FIXME JLIU7 */
++ bool *pdbi_panel_on = &(dev_priv->dbi_panel_on);
++ u8 *p_DBI_commandBuffer = dev_priv->p_DBI_commandBuffer;
++ u32 *pDBI_CB_pointer = &(dev_priv->DBI_CB_pointer);
++ u32 DBI_CB_phys = dev_priv->DBI_CB_phys;
++ u32 mipi_command_length_reg = MIPI_COMMAND_LENGTH_REG;
++ u32 mipi_command_address_reg = MIPI_COMMAND_ADDRESS_REG;
++ u32 gen_fifo_stat_reg = GEN_FIFO_STAT_REG;
++ u32 pipeconf = dev_priv->pipeconf;
++ u32 dspcntr = dev_priv->dspcntr;
++ u32 pipeconf_reg = PIPEACONF;
++ u32 disp_cntr_reg = DSPACNTR;
++ int pipe = 0;
++#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
++ struct backlight_device bd;
++#endif
++
++ PSB_DEBUG_ENTRY("on = %d, output_type = 0x%x \n", on, output->type);
++
++ if (output->type == INTEL_OUTPUT_MIPI2) {
++ if (on) {
++ dev_priv->dual_mipi = true;
++ } else
++ dev_priv->dual_mipi = false;
++
++ pdbi_panel_on = &(dev_priv->dbi_panel_on2);
++ p_DBI_commandBuffer = dev_priv->p_DBI_commandBuffer2;
++ DBI_CB_phys = dev_priv->DBI_CB_phys2;
++ pDBI_CB_pointer = &(dev_priv->DBI_CB_pointer2);
++ mipi_command_length_reg = MIPI_COMMAND_LENGTH_REG + MIPIC_REG_OFFSET;
++ mipi_command_address_reg = MIPI_COMMAND_ADDRESS_REG + MIPIC_REG_OFFSET;
++ gen_fifo_stat_reg = GEN_FIFO_STAT_REG + MIPIC_REG_OFFSET;
++ pipeconf = dev_priv->pipeconf2;
++ dspcntr = dev_priv->dspcntr2;
++ pipeconf_reg = PIPECCONF;
++ disp_cntr_reg = DSPCCNTR;
++ pipe = 2;
++ } else if (output->type == INTEL_OUTPUT_MIPI) {
++ if (!on) {
++ dev_priv->dual_mipi = false;
++ }
++ }
++
++ PSB_DEBUG_ENTRY("dbi_panel_on = %d \n", *pdbi_panel_on);
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_FORCE_POWER_ON))
++ return;
++
++ if (on) {
++ if (!(*pdbi_panel_on))
++ {
++ /* FIXME_JLIU7 MDFLD_PO */
++ /* disable all the MIPI interrupts at the beginning. */
++ /* enable all the MIPI interrupts at the end. */
++ /* Make sure dbi command & data buffer are empty */
++ if (*pDBI_CB_pointer != 0)
++ {
++ DRM_ERROR("dbi command buffer was interrupted before finished. \n");
++ }
++
++ mdfld_dsi_dbi_CB_ready (dev, mipi_command_address_reg, gen_fifo_stat_reg);
++
++ /* Wait for 20ms. */
++ udelay(20000);
++
++#if 1 /* MDFLD_PO_JLIU7 */
++ /* exit sleep mode */
++ *(p_DBI_commandBuffer + (*pDBI_CB_pointer)++) = exit_sleep_mode;
++
++ REG_WRITE(mipi_command_length_reg, 1);
++ udelay(5000);
++ REG_WRITE(mipi_command_address_reg, DBI_CB_phys | BIT0);
++
++ /* The host processor must wait five milliseconds after sending exit_sleep_mode command before sending another
++ command. This delay allows the supply voltages and clock circuits to stabilize */
++ udelay(5000);
++
++ mdfld_dsi_dbi_CB_ready (dev, mipi_command_address_reg, gen_fifo_stat_reg);
++
++ *pDBI_CB_pointer = 0;
++
++ /* Wait for 20ms. */
++ udelay(20000);
++#endif /* MDFLD_PO_JLIU7 */
++
++ /* set display on */
++ *(p_DBI_commandBuffer + (*pDBI_CB_pointer)++) = set_display_on ;
++
++ /* FIXME_jliu7 mapVitualToPhysical(dev_priv->p_DBI_commandBuffer);*/
++ REG_WRITE(mipi_command_length_reg, 1);
++ udelay(5000);
++ REG_WRITE(mipi_command_address_reg, DBI_CB_phys | BIT0);
++
++ *pDBI_CB_pointer = 0;
++
++ *pdbi_panel_on = true;
++#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
++ bd.props.brightness = mdfld_get_brightness(&bd);
++ mdfld_set_brightness(&bd);
++#endif
++ }
++/*FIXME JLIU7 */
++/* Need to figure out how to control the MIPI panel power on sequence*/
++ }
++ else
++ {
++/*FIXME JLIU7 */
++/* Need to figure out how to control the MIPI panel power down sequence*/
++ /*
++ * Only save the current backlight value if we're going from
++ * on to off.
++ */
++ if ((*pdbi_panel_on) || dev_priv->first_boot)
++ {
++#if 1 /* MDFLD_PO_JLIU7 */
++ /* FIXME_JLIU7 MDFLD_PO */
++ /* disable all the MIPI interrupts at the beginning. */
++ /* enable all the MIPI interrupts at the end. */
++ /* Make sure dbi command & data buffer are empty */
++ if (*pDBI_CB_pointer != 0)
++ {
++ DRM_ERROR("dbi command buffer was interrupted before finished. \n");
++ }
++
++ *pdbi_panel_on = false;
++ mdfld_dsi_brightness_control(dev, pipe, 0);
++
++ mdfld_dsi_dbi_CB_ready (dev, mipi_command_address_reg, gen_fifo_stat_reg);
++
++ /* Wait for 20ms. */
++ udelay(20000);
++
++ *pDBI_CB_pointer = 0;
++ /* enter sleep mode */
++ *(p_DBI_commandBuffer + (*pDBI_CB_pointer)++) = enter_sleep_mode;
++
++ REG_WRITE(mipi_command_length_reg, 1);
++ udelay(5000);
++ REG_WRITE(mipi_command_address_reg, DBI_CB_phys | BIT0);
++
++ *pDBI_CB_pointer = 0;
++#endif /* MDFLD_PO_JLIU7 */
++ dev_priv->first_boot = false;
++ }
++ }
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++}
++
++/**
++ * Sets the power state for the mipi panel.
++ */
++static void mdfld_dsi_set_power(struct drm_device *dev,
++ struct psb_intel_output *output, bool on)
++{
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++ bool dpi = dev_priv->dpi;
++
++ if (output->type == INTEL_OUTPUT_MIPI2)
++ dpi = dev_priv->dpi2;
++
++ if (dpi)
++ mdfld_dpi_set_power(dev, output, on);
++ else
++ mdfld_dbi_set_power(dev, output, on);
++}
++
++static void mdfld_dsi_dpms(struct drm_encoder *encoder, int mode)
++{
++ struct drm_device *dev = encoder->dev;
++ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
++
++ PSB_DEBUG_ENTRY("%s \n", (mode == DRM_MODE_DPMS_ON ? "on":"off"));
++
++ if (mode == DRM_MODE_DPMS_ON)
++ mdfld_dsi_set_power(dev, output, true);
++ else
++ mdfld_dsi_set_power(dev, output, false);
++
++ /* XXX: We never power down the DSI pairs. */
++}
++
++static enum drm_connector_status mdfld_dsi_detect(struct drm_connector
++ *connector)
++{
++ PSB_DEBUG_ENTRY("\n");
++
++ return connector_status_connected;
++}
++
++static int mdfld_dsi_set_property(struct drm_connector *connector,
++ struct drm_property *property,
++ uint64_t value)
++{
++
++ struct drm_encoder *pEncoder = connector->encoder;
++
++ PSB_DEBUG_ENTRY("\n");
++
++ if (!strcmp(property->name, "scaling mode") && pEncoder) {
++ struct psb_intel_crtc *pPsbCrtc = to_psb_intel_crtc(pEncoder->crtc);
++ bool bTransitionFromToCentered;
++ uint64_t curValue;
++
++ if (!pPsbCrtc)
++ goto set_prop_error;
++
++ switch (value) {
++ case DRM_MODE_SCALE_FULLSCREEN:
++ break;
++ case DRM_MODE_SCALE_NO_SCALE:
++ break;
++ case DRM_MODE_SCALE_ASPECT:
++ break;
++ default:
++ goto set_prop_error;
++ }
++
++ if (drm_connector_property_get_value(connector, property, &curValue))
++ goto set_prop_error;
++
++ if (curValue == value)
++ goto set_prop_done;
++
++ if (drm_connector_property_set_value(connector, property, value))
++ goto set_prop_error;
++
++ bTransitionFromToCentered = (curValue == DRM_MODE_SCALE_NO_SCALE) ||
++ (value == DRM_MODE_SCALE_NO_SCALE);
++
++ if (pPsbCrtc->saved_mode.hdisplay != 0 &&
++ pPsbCrtc->saved_mode.vdisplay != 0) {
++ if (bTransitionFromToCentered) {
++ if (!drm_crtc_helper_set_mode(pEncoder->crtc, &pPsbCrtc->saved_mode,
++ pEncoder->crtc->x, pEncoder->crtc->y, pEncoder->crtc->fb))
++ goto set_prop_error;
++ } else {
++ struct drm_encoder_helper_funcs *pEncHFuncs = pEncoder->helper_private;
++ pEncHFuncs->mode_set(pEncoder, &pPsbCrtc->saved_mode,
++ &pPsbCrtc->saved_adjusted_mode);
++ }
++ }
++ } else if (!strcmp(property->name, "backlight") && pEncoder) {
++ PSB_DEBUG_ENTRY("backlight \n");
++ if (drm_connector_property_set_value(connector, property, value))
++ goto set_prop_error;
++ else {
++#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
++ struct backlight_device bd;
++ bd.props.brightness = value;
++ mdfld_set_brightness(&bd);
++#endif
++ }
++ } else if (!strcmp(property->name, "DPMS") && pEncoder) {
++ struct drm_encoder_helper_funcs *pEncHFuncs = pEncoder->helper_private;
++ /*struct drm_crtc_helper_funcs *pCrtcHFuncs = pEncoder->crtc->helper_private;*/
++ PSB_DEBUG_ENTRY("DPMS \n");
++ pEncHFuncs->dpms(pEncoder, value);
++ /*pCrtcHFuncs->dpms(pEncoder->crtc, value);*/
++ }
++
++set_prop_done:
++ return 0;
++set_prop_error:
++ return -1;
++}
++
++void mdfld_dsi_prepare(struct drm_encoder *encoder)
++{
++ struct drm_device *dev = encoder->dev;
++ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
++#if 0 /* FIXME JLIU7 */
++ struct psb_intel_mode_device *mode_dev = output->mode_dev;
++#endif /* FIXME JLIU7 */
++
++ PSB_DEBUG_ENTRY("\n");
++
++#if 0 /* FIXME JLIU7 */
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_FORCE_POWER_ON))
++ return;
++ mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
++ mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
++ BACKLIGHT_DUTY_CYCLE_MASK);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++#endif /* FIXME JLIU7 */
++
++ mdfld_dsi_set_power(dev, output, false);
++}
++
++/* ************************************************************************* *\
++FUNCTION: mdfld_GetHSA_Count
++
++DESCRIPTION: Shows the horizontal sync value in terms of byte clock
++ (txbyteclkhs)
++ Minimum HSA period should be sufficient to transmit a hsync start short
++ packet(4 bytes)
++ i) For Non-burst Mode with sync pulse, Min value � 4 in decimal [plus
++ an optional 6 bytes for a zero payload blanking packet]. But if
++ the value is less than 10 but more than 4, then this count will
++ be added to the HBP�s count for one lane.
++ ii) For Non-Burst Sync Event & Burst Mode, there is no HSA, so you
++ can program this to zero. If you program this register, these
++ byte values will be added to HBP.
++ iii) For Burst mode of operation, normally the values programmed in
++ terms of byte clock are based on the principle - time for transfering
++ HSA in Burst mode is the same as in non-bust mode.
++\* ************************************************************************* */
++static u32 mdfld_GetHSA_Count(struct drm_device *dev, DRM_DRIVER_PRIVATE_T *dev_priv, int dsi_num)
++{
++ u32 HSA_count;
++ u32 HSA_countX8;
++ u32 bpp = dev_priv->bpp;
++ u32 HsyncWidth = dev_priv->HsyncWidth;
++ u32 videoModeFormat = dev_priv->videoModeFormat;
++ u32 DDR_Clock = dev_priv->DDR_Clock;
++ u32 DDR_Clock_Calculated = dev_priv->DDR_Clock_Calculated;
++ u32 laneCount = dev_priv->laneCount;
++
++ if (dsi_num == 2)
++ {
++ bpp = dev_priv->bpp2;
++ HsyncWidth = dev_priv->HsyncWidth2;
++ videoModeFormat = dev_priv->videoModeFormat2;
++ DDR_Clock = dev_priv->DDR_Clock2;
++ DDR_Clock_Calculated = dev_priv->DDR_Clock_Calculated2;
++ laneCount = dev_priv->laneCount2;
++ }
++
++ /* byte clock count = (pixel clock count * bits per pixel) /8 */
++ HSA_countX8 = HsyncWidth * bpp;
++
++#if MDFLD_GET_SYNC_BURST
++ if (videoModeFormat == BURST_MODE)
++ {
++ HSA_countX8 *= DDR_Clock / DDR_Clock_Calculated;
++ }
++#endif /* MDFLD_GET_SYNC_BURST */
++
++ HSA_count = HSA_countX8 / 8;
++
++ /* FIXME_JLIU7 the above formulus is deduced from the MIPI spec. The following
++ equation comes from HW SV. need to double check it. */
++ /* compute HSA according to equation:
++ (hsync_width) * 24 bpp / (2 * 8 bits per lane * 2 lanes)*/
++ /* FIXME_JLIU the lower equation = the upper equation / (2 * lane number) */
++
++ HSA_count /= (2 * laneCount);
++
++ if (HSA_count < 4) /* minimum value of 4 */
++ HSA_count = 4;
++
++ PSB_DEBUG_HV("mdfld_HSA_count is %d, for dsi_num %d. \n", HSA_count, dsi_num);
++
++ return HSA_count;
++}
++
++/* ************************************************************************* *\
++FUNCTION: mdfld_GetHBP_Count
++
++DESCRIPTION: Shows the horizontal back porch value in terms of txbyteclkhs.
++ Minimum HBP period should be sufficient to transmit a �hsync end short
++ packet(4 bytes) + Blanking packet overhead(6 bytes) + RGB packet header(4 bytes)�
++ For Burst mode of operation, normally the values programmed in terms of
++ byte clock are based on the principle - time for transfering HBP
++ in Burst mode is the same as in non-bust mode.
++
++ Min value � 14 in decimal [ accounted with zero payload for blanking packet] for one lane.
++ Max value � any value greater than 14 based on DPI resolution
++\* ************************************************************************* */
++static u32 mdfld_GetHBP_Count(struct drm_device *dev, DRM_DRIVER_PRIVATE_T *dev_priv, int dsi_num)
++{
++ u32 HBP_count, HBP_countX8;
++ u32 bpp = dev_priv->bpp;
++ u32 HbackPorch = dev_priv->HbackPorch;
++ u32 videoModeFormat = dev_priv->videoModeFormat;
++ u32 DDR_Clock = dev_priv->DDR_Clock;
++ u32 DDR_Clock_Calculated = dev_priv->DDR_Clock_Calculated;
++ u32 laneCount = dev_priv->laneCount;
++
++ if (dsi_num == 2)
++ {
++ bpp = dev_priv->bpp2;
++ HbackPorch = dev_priv->HbackPorch2;
++ videoModeFormat = dev_priv->videoModeFormat2;
++ DDR_Clock = dev_priv->DDR_Clock2;
++ DDR_Clock_Calculated = dev_priv->DDR_Clock_Calculated2;
++ laneCount = dev_priv->laneCount2;
++ }
++
++ /* byte clock count = (pixel clock count * bits per pixel) /8 */
++ HBP_countX8 = HbackPorch * bpp;
++
++#if MDFLD_GET_SYNC_BURST
++ if (videoModeFormat == BURST_MODE)
++ {
++ HBP_countX8 *= DDR_Clock / DDR_Clock_Calculated;
++ }
++#endif /* MDFLD_GET_SYNC_BURST */
++
++ HBP_count = HBP_countX8 / 8;
++
++ /* FIXME_JLIU7 the above formulus is deduced from the MIPI spec. The following
++ equation comes from HW SV. need to double check it. */
++ /* compute HBP according to equation:
++ (hsync_backporch) * 24 bpp / (2 * 8 bits per lane * 2 lanes)*/
++ /* FIXME_JLIU the lower equation = the upper equation / (2 * lane number) */
++
++ HBP_count /= (2 * laneCount);
++
++ if (HBP_count < 8) /* minimum value of 8 */
++ HBP_count = 8;
++
++ PSB_DEBUG_HV("mdfld_HBP_count is %d, for dsi_num %d. \n", HBP_count, dsi_num);
++
++/* MDFLD_PO_JLIU7 */
++ return 0x0e;
++ return HBP_count;
++}
++
++/* ************************************************************************* *\
++FUNCTION: mdfld_GetHFP_Count
++
++DESCRIPTION: Shows the horizontal front porch value in terms of txbyteclkhs.
++ Minimum HFP period should be sufficient to transmit �RGB Data packet
++ footer(2 bytes) + Blanking packet overhead(6 bytes)� for non burst mode.
++
++ For burst mode, Minimum HFP period should be sufficient to transmit
++ Blanking packet overhead(6 bytes)�
++
++ For Burst mode of operation, normally the values programmed in terms of
++ byte clock are based on the principle - time for transfering HFP
++ in Burst mode is the same as in non-bust mode.
++
++ Min value � 8 in decimal for non-burst mode [accounted with zero payload
++ for blanking packet] for one lane.
++ Min value � 6 in decimal for burst mode for one lane.
++
++ Max value � any value greater than the minimum vaue based on DPI resolution
++\* ************************************************************************* */
++static u32 mdfld_GetHFP_Count(struct drm_device *dev, DRM_DRIVER_PRIVATE_T *dev_priv, int dsi_num)
++{
++ u32 HFP_count, HFP_countX8;
++ u32 bpp = dev_priv->bpp;
++ u32 HfrontPorch = dev_priv->HfrontPorch;
++ u32 videoModeFormat = dev_priv->videoModeFormat;
++ u32 DDR_Clock = dev_priv->DDR_Clock;
++ u32 DDR_Clock_Calculated = dev_priv->DDR_Clock_Calculated;
++ u32 laneCount = dev_priv->laneCount;
++
++ if (dsi_num == 2)
++ {
++ bpp = dev_priv->bpp2;
++ HfrontPorch = dev_priv->HfrontPorch2;
++ videoModeFormat = dev_priv->videoModeFormat2;
++ DDR_Clock = dev_priv->DDR_Clock2;
++ DDR_Clock_Calculated = dev_priv->DDR_Clock_Calculated2;
++ laneCount = dev_priv->laneCount2;
++ }
++
++ /* byte clock count = (pixel clock count * bits per pixel) /8 */
++ HFP_countX8 = HfrontPorch * bpp;
++
++#if MDFLD_GET_SYNC_BURST
++ if (videoModeFormat == BURST_MODE)
++ {
++ HFP_countX8 *= DDR_Clock / DDR_Clock_Calculated;
++ }
++#endif /* MDFLD_GET_SYNC_BURST */
++
++ HFP_count = HFP_countX8 / 8;
++
++ /* FIXME_JLIU7 the above formulus is deduced from the MIPI spec. The following
++ equation comes from HW SV. need to double check it. */
++ /* compute HFP according to equation:
++ (hsync_frontporch) * 24 bpp / (2 * 8 bits per lane * 2 lanes)*/
++ /* FIXME_JLIU the lower equation = the upper equation / (2 * lane number) */
++
++ HFP_count /= (2 * laneCount);
++
++ if (HFP_count < 8) /* minimum value of 8 */
++ HFP_count = 8;
++
++ PSB_DEBUG_HV("mdfld_HFP_count is %d, for dsi_num %d. \n", HFP_count, dsi_num);
++
++ return HFP_count;
++}
++
++/* ************************************************************************* *\
++FUNCTION: mdfld_GetHAdr_Count
++
++DESCRIPTION: Shows the horizontal active area value in terms of txbyteclkhs.
++ In Non Burst Mode, Count equal to RGB word count value
++
++ In Burst Mode, RGB pixel packets are time-compressed, leaving more time
++ during a scan line for LP mode (saving power) or for multiplexing
++ other transmissions onto the DSI link. Hence, the count equals the
++ time in txbyteclkhs for sending time compressed RGB pixels plus
++ the time needed for moving to power save mode or the time needed
++ for secondary channel to use the DSI link.
++
++ But if the left out time for moving to low power mode is less than
++ 8 txbyteclkhs [2txbyteclkhs for RGB data packet footer and
++ 6txbyteclkhs for a blanking packet with zero payload], then
++ this count will be added to the HFP's count for one lane.
++
++ Min value � 8 in decimal for non-burst mode [accounted with zero payload
++ for blanking packet] for one lane.
++ Min value � 6 in decimal for burst mode for one lane.
++
++ Max value � any value greater than the minimum vaue based on DPI resolution
++\* ************************************************************************* */
++static u32 mdfld_GetHAdr_Count(struct drm_device *dev, DRM_DRIVER_PRIVATE_T *dev_priv, int dsi_num)
++{
++ u32 HAdr_count, HAdr_countX8;
++ u32 bpp = dev_priv->bpp;
++ u32 HactiveArea = dev_priv->HactiveArea;
++ u32 videoModeFormat = dev_priv->videoModeFormat;
++ u32 DDR_Clock = dev_priv->DDR_Clock;
++ u32 DDR_Clock_Calculated = dev_priv->DDR_Clock_Calculated;
++ u32 laneCount = dev_priv->laneCount;
++
++ if (dsi_num == 2)
++ {
++ bpp = dev_priv->bpp2;
++ HactiveArea = dev_priv->HactiveArea2;
++ videoModeFormat = dev_priv->videoModeFormat2;
++ DDR_Clock = dev_priv->DDR_Clock2;
++ DDR_Clock_Calculated = dev_priv->DDR_Clock_Calculated2;
++ laneCount = dev_priv->laneCount2;
++ }
++
++ /* byte clock count = (pixel clock count * bits per pixel) /8 */
++ HAdr_countX8 = HactiveArea * bpp;
++
++#if MDFLD_GET_SYNC_BURST
++ if (videoModeFormat == BURST_MODE)
++ {
++ HAdr_countX8 *= DDR_Clock / DDR_Clock_Calculated;
++ }
++#endif /* MDFLD_GET_SYNC_BURST */
++
++ HAdr_count = HAdr_countX8 / 8;
++
++ /* FIXME_JLIU7 the above formulus is deduced from the MIPI spec. The following
++ equation comes from HW SV. need to double check it. */
++ /* compute HAdr according to equation:
++ (horizontal active) * 24 bpp / (8 bits per lane * 2 lanes)*/
++ /* FIXME_JLIU the lower equation = the upper equation / (lane number) */
++
++ HAdr_count /= laneCount;
++
++ PSB_DEBUG_HV("mdfld_HAdr_count is %d, for dsi_num %d. \n", HAdr_count, dsi_num);
++
++ return HAdr_count;
++}
++
++/* ************************************************************************* *\
++FUNCTION: mdfld_GetVSA_Count
++
++DESCRIPTION: Shows the vertical sync value in terms of lines
++
++\* ************************************************************************* */
++static u32 mdfld_GetVSA_Count(struct drm_device *dev, DRM_DRIVER_PRIVATE_T *dev_priv, int dsi_num)
++{
++ u32 VSA_count;
++ u32 VsyncWidth = dev_priv->VsyncWidth;
++
++ if (dsi_num == 2)
++ {
++ VsyncWidth = dev_priv->VsyncWidth2;
++ }
++
++ /* Get the vsync pulse width */
++ VSA_count = VsyncWidth;
++
++ if (VSA_count < 2) /* minimum value of 2 */
++ VSA_count = 2;
++
++ PSB_DEBUG_HV("mdfld_VSA_count is %d, for dsi_num %d. \n", VSA_count, dsi_num);
++
++ return VSA_count;
++}
++
++/* ************************************************************************* *\
++ * FUNCTION: mdfld_GetVBP_Count
++ *
++ * DESCRIPTION: Shows the vertical back porch value in lines.
++ *
++\* ************************************************************************* */
++static u32 mdfld_GetVBP_Count(struct drm_device *dev, DRM_DRIVER_PRIVATE_T *dev_priv, int dsi_num)
++{
++ u32 VBP_count;
++ u32 VbackPorch = dev_priv->VbackPorch;
++
++ if (dsi_num == 2)
++ {
++ VbackPorch = dev_priv->VbackPorch2;
++ }
++
++ /* Get the Vertical Backporch width */
++ VBP_count = VbackPorch;
++
++ if (VBP_count < 2) /* minimum value of 2 */
++ VBP_count = 2;
++
++ PSB_DEBUG_HV("mdfld_VBP_count is %d, for dsi_num %d. \n", VBP_count, dsi_num);
++
++ return VBP_count;
++}
++/* ************************************************************************* *\
++ * FUNCTION: mdfld_GetVFP_Count
++ *
++ * DESCRIPTION: Shows the vertical front porch value in terms of lines.
++ *
++\* ************************************************************************* */
++static u32 mdfld_GetVFP_Count(struct drm_device *dev, DRM_DRIVER_PRIVATE_T *dev_priv, int dsi_num)
++{
++ u32 VFP_count;
++ u32 VfrontPorch = dev_priv->VfrontPorch;
++
++ if (dsi_num == 2)
++ {
++ VfrontPorch = dev_priv->VfrontPorch2;
++ }
++
++ /* Get the Vertical Frontporch width */
++ VFP_count = VfrontPorch;
++
++ if (VFP_count < 2) /* minimum value of 2 */
++ VFP_count = 2;
++
++ PSB_DEBUG_HV("mdfld_VFP_count is %d, for dsi_num %d. \n", VFP_count, dsi_num);
++
++ return VFP_count;
++}
++
++static void mdfld_dpi_mode_set(struct drm_encoder *encoder,
++ struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode)
++{
++ struct drm_device *dev = encoder->dev;
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
++#if 0 /* FIXME_JLIU7 add it later */
++ uint64_t curValue = DRM_MODE_SCALE_FULLSCREEN;
++#endif /* FIXME_JLIU7 add it later */
++ u32 SupportedFormat = 0;
++ u32 resolution = 0;
++
++ u32 bpp = dev_priv->bpp;
++ u32 HactiveArea = dev_priv->HactiveArea;
++ u32 VactiveArea = dev_priv->VactiveArea;
++ u32 videoModeFormat = dev_priv->videoModeFormat;
++ u32 channelNumber = dev_priv->channelNumber;
++ u32 laneCount = dev_priv->laneCount;
++ u32 pipeconf = dev_priv->pipeconf;
++ u32 dspcntr = dev_priv->dspcntr;
++ bool *pdpi_panel_on = &(dev_priv->dpi_panel_on);
++ bool *pdbi_panel_on = &(dev_priv->dbi_panel_on);
++
++ u32 mipi_reg = MIPI;
++ u32 mipi_control_reg = MIPI_CONTROL_REG;
++ u32 intr_en_reg = INTR_EN_REG;
++ u32 turn_around_timeout_reg = TURN_AROUND_TIMEOUT_REG;
++ u32 device_reset_reg = DEVICE_RESET_REG;
++ u32 init_count_reg = INIT_COUNT_REG;
++ u32 dsi_func_prg_reg = DSI_FUNC_PRG_REG;
++ u32 dpi_resolution_reg = DPI_RESOLUTION_REG;
++ u32 vert_sync_pad_count_reg = VERT_SYNC_PAD_COUNT_REG;
++ u32 vert_back_porch_count_reg = VERT_BACK_PORCH_COUNT_REG;
++ u32 vert_front_porch_count_reg = VERT_FRONT_PORCH_COUNT_REG;
++ u32 horiz_sync_pad_count_reg = HORIZ_SYNC_PAD_COUNT_REG;
++ u32 horiz_back_porch_count_reg = HORIZ_BACK_PORCH_COUNT_REG;
++ u32 horiz_front_porch_count_reg = HORIZ_FRONT_PORCH_COUNT_REG;
++ u32 horiz_active_area_count_reg = HORIZ_ACTIVE_AREA_COUNT_REG;
++ u32 video_fmt_reg = VIDEO_FMT_REG;
++ u32 hs_tx_timeout_reg = HS_TX_TIMEOUT_REG;
++ u32 lp_rx_timeout_reg = LP_RX_TIMEOUT_REG;
++ u32 high_low_switch_count_reg = HIGH_LOW_SWITCH_COUNT_REG;
++ u32 eot_disable_reg = EOT_DISABLE_REG;
++ u32 lp_byteclk_reg = LP_BYTECLK_REG;
++ u32 device_ready_reg = DEVICE_READY_REG;
++ u32 dpi_control_reg = DPI_CONTROL_REG;
++ u32 dphy_param_reg = DPHY_PARAM_REG;
++ u32 clk_lane_swt_reg = CLK_LANE_SWT_REG;
++ u32 gen_fifo_stat_reg = GEN_FIFO_STAT_REG;
++ u32 intr_stat_reg = INTR_STAT_REG;
++ u32 pipeconf_reg = PIPEACONF;
++ u32 disp_cntr_reg = DSPACNTR;
++
++ /* defaut to be dpi on values on pipe A. */
++ /* Enable MIPI Port */
++ u32 mipi_val = MIPI_PORT_EN | PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX;
++ u32 mipi_control_val = 0x00000018;
++ u32 dphy_param_val = 0x150c3408; /* dual dbi - dpi */ /* single = 0x0B14540C; */
++ u32 clk_lane_swt_val = 0x000A0014;
++ u32 intr_en_val = 0xffffffff;
++ u32 turn_around_timeout_val =0x0000001F;
++ u32 device_reset_val = 0x000000ff; /* old value = 0x00000015 may depends on the DSI RX device*/
++ u32 init_count_val = 0x000007d0; /* 0x00000050; Minimum value = 0x000007d0 */
++ u32 dsi_func_prg_val = 0;
++ u32 horiz_sync_pad_count_val = mdfld_GetHSA_Count(dev, dev_priv, 1);
++ u32 horiz_back_porch_count_val = mdfld_GetHBP_Count(dev, dev_priv, 1);
++ u32 horiz_front_porch_count_val = mdfld_GetHFP_Count(dev, dev_priv, 1);
++ u32 horiz_active_area_count_val = mdfld_GetHAdr_Count(dev, dev_priv, 1);
++ u32 VSA_count = mdfld_GetVSA_Count(dev, dev_priv, 1);
++ u32 VBP_count = mdfld_GetVBP_Count(dev, dev_priv, 1);
++ u32 VFP_count = mdfld_GetVFP_Count(dev, dev_priv, 1);
++
++#if 1 /* MDFLD_PO_JLIU7 QCIF_176x144_SLE || DSI_TPO_800x600 || DBI_TPO_480x864 || DSI_TPO_800x480 ||DSI_TPO_864x480 || DBI_TPO_864x480 */
++ u32 hs_tx_timeout_val = 0x3fffff;
++ u32 lp_rx_timeout_val = 0xffff;
++ u32 high_low_switch_count_val = 0x46;
++#else /*JLIU7_PO hard coded for NSC PO */
++ u32 hs_tx_timeout_val = GetHS_TX_timeoutCount(dev_priv);
++ u32 lp_rx_timeout_val = GetLP_RX_timeoutCount(dev_priv);
++ u32 high_low_switch_count_val = GetHighLowSwitchCount(dev_priv);
++#endif /*JLIU7_PO hard coded for NSC PO */
++
++
++/* MDFLD_PO_JLIU7 u32 eot_disable_val = ENABLE_CLOCK_STOPPING; */
++ u32 eot_disable_val = 0;
++ u32 lp_byteclk_val = 0x00000004; /* FIXME JLIU7 for NSC PO */
++ u32 device_ready_val =0x00000001;
++ u32 dpi_control_val = 0x00000002; /* Turn On */
++ u32 timeout = 0;
++ u32 pipe = 0;
++
++ PSB_DEBUG_ENTRY("output_type = 0x%x \n", output->type);
++
++ if (output->type == INTEL_OUTPUT_MIPI2)
++ {
++ /* MIPI_A has to be on before we can enable MIPI_C*/
++ if (!(dev_priv->dpi_panel_on || dev_priv->dbi_panel_on))
++ {
++ DRM_ERROR("mdfld_dpi_mode_set need to enable MIPI0 before enabling MIPI1. \n");
++ return;
++ }
++
++ bpp = dev_priv->bpp2;
++ }
++
++ switch (bpp)
++ {
++ case 16:
++ SupportedFormat = RGB_565_FMT;
++ break;
++ case 18:
++ SupportedFormat = RGB_666_FMT;
++ break;
++ case 24:
++ SupportedFormat = RGB_888_FMT;
++ break;
++ default:
++ DRM_INFO("mdfld_dpi_mode_set, invalid bpp \n");
++ break;
++ }
++
++ SupportedFormat <<= FMT_DPI_POS;
++ channelNumber <<= DPI_CHANNEL_NUMBER_POS;
++ dsi_func_prg_val = laneCount | SupportedFormat | channelNumber;
++
++ if (output->type == INTEL_OUTPUT_MIPI2)
++ {
++ pipe = 2;
++ HactiveArea = dev_priv->HactiveArea2;
++ VactiveArea = dev_priv->VactiveArea2;
++ videoModeFormat = dev_priv->videoModeFormat2;
++ channelNumber = dev_priv->channelNumber2;
++ laneCount = dev_priv->laneCount2;
++ pipeconf = dev_priv->pipeconf2;
++ dspcntr = dev_priv->dspcntr2;
++ pdpi_panel_on = &(dev_priv->dpi_panel_on2);
++ pdbi_panel_on = &(dev_priv->dbi_panel_on2);
++
++ mipi_reg = MIPI_C;
++ mipi_control_reg = MIPI_CONTROL_REG + MIPIC_REG_OFFSET;
++ intr_en_reg = INTR_EN_REG + MIPIC_REG_OFFSET;
++ device_reset_reg = DEVICE_RESET_REG + MIPIC_REG_OFFSET;
++ turn_around_timeout_reg = TURN_AROUND_TIMEOUT_REG + MIPIC_REG_OFFSET;
++ init_count_reg = INIT_COUNT_REG + MIPIC_REG_OFFSET;
++ dsi_func_prg_reg = DSI_FUNC_PRG_REG + MIPIC_REG_OFFSET;
++ dpi_resolution_reg = DPI_RESOLUTION_REG + MIPIC_REG_OFFSET;
++ vert_sync_pad_count_reg = VERT_SYNC_PAD_COUNT_REG + MIPIC_REG_OFFSET;
++ vert_back_porch_count_reg = VERT_BACK_PORCH_COUNT_REG + MIPIC_REG_OFFSET;
++ vert_front_porch_count_reg = VERT_FRONT_PORCH_COUNT_REG + MIPIC_REG_OFFSET;
++ horiz_sync_pad_count_reg = HORIZ_SYNC_PAD_COUNT_REG + MIPIC_REG_OFFSET;
++ horiz_back_porch_count_reg = HORIZ_BACK_PORCH_COUNT_REG + MIPIC_REG_OFFSET;
++ horiz_front_porch_count_reg = HORIZ_FRONT_PORCH_COUNT_REG + MIPIC_REG_OFFSET;
++ horiz_active_area_count_reg = HORIZ_ACTIVE_AREA_COUNT_REG + MIPIC_REG_OFFSET;
++ video_fmt_reg = VIDEO_FMT_REG + MIPIC_REG_OFFSET;
++ hs_tx_timeout_reg = HS_TX_TIMEOUT_REG + MIPIC_REG_OFFSET;
++ lp_rx_timeout_reg = LP_RX_TIMEOUT_REG + MIPIC_REG_OFFSET;
++ high_low_switch_count_reg = HIGH_LOW_SWITCH_COUNT_REG + MIPIC_REG_OFFSET;
++ eot_disable_reg = EOT_DISABLE_REG + MIPIC_REG_OFFSET;
++ lp_byteclk_reg = LP_BYTECLK_REG + MIPIC_REG_OFFSET;
++ device_ready_reg = DEVICE_READY_REG + MIPIC_REG_OFFSET;
++ dpi_control_reg = DPI_CONTROL_REG + MIPIC_REG_OFFSET;
++ dphy_param_reg = DPHY_PARAM_REG + MIPIC_REG_OFFSET;
++ clk_lane_swt_reg = CLK_LANE_SWT_REG + MIPIC_REG_OFFSET;
++ gen_fifo_stat_reg = GEN_FIFO_STAT_REG + MIPIC_REG_OFFSET;
++ intr_stat_reg = INTR_STAT_REG + MIPIC_REG_OFFSET;
++
++ pipeconf_reg = PIPECCONF;
++ disp_cntr_reg = DSPCCNTR;
++
++ horiz_sync_pad_count_val = mdfld_GetHSA_Count(dev, dev_priv, 2);
++ horiz_back_porch_count_val = mdfld_GetHBP_Count(dev, dev_priv, 2);
++ horiz_front_porch_count_val = mdfld_GetHFP_Count(dev, dev_priv, 2);
++ horiz_active_area_count_val = mdfld_GetHAdr_Count(dev, dev_priv, 2);
++ VSA_count = mdfld_GetVSA_Count(dev, dev_priv, 2);
++ VBP_count = mdfld_GetVBP_Count(dev, dev_priv, 2);
++ VFP_count = mdfld_GetVFP_Count(dev, dev_priv, 2);
++ } else {
++ mipi_val |= dev_priv->mipi_lane_config;
++ }
++
++ resolution = HactiveArea | (VactiveArea << RES_V_POS);
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_FORCE_POWER_ON))
++ return;
++
++#if 0 /* FIXME_JLIU7 add it later */
++ drm_connector_property_get_value(&enc_to_psb_intel_output(encoder)->base, dev->mode_config.scaling_mode_property, &curValue);
++
++ if (curValue == DRM_MODE_SCALE_NO_SCALE)
++ REG_WRITE(PFIT_CONTROL, 0);
++ else if (curValue == DRM_MODE_SCALE_ASPECT) {
++ if ((mode->vdisplay != adjusted_mode->crtc_vdisplay) || (mode->hdisplay != adjusted_mode->crtc_hdisplay)) {
++ if ((adjusted_mode->crtc_hdisplay * mode->vdisplay) == (mode->hdisplay * adjusted_mode->crtc_vdisplay))
++ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
++ else if ((adjusted_mode->crtc_hdisplay * mode->vdisplay) > (mode->hdisplay * adjusted_mode->crtc_vdisplay))
++ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE | PFIT_SCALING_MODE_PILLARBOX);
++ else
++ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE | PFIT_SCALING_MODE_LETTERBOX);
++ } else
++ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
++ } else /*(curValue == DRM_MODE_SCALE_FULLSCREEN)*/
++ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
++#endif /* FIXME_JLIU7 add it later */
++
++ /* udelay(20000); */
++ /* wait for PIPE A to disable */
++#if 0 /* FIXME_JLIU7 */
++ while (REG_READ(pipeconf_reg) & PIPECONF_ACTIVE) {
++ /* Do Nothing Here */
++ /* This should make checkpatch work */
++ }
++ /* wait for DPI FIFO to clear */
++ while ((timeout < 20000) && ((REG_READ(gen_fifo_stat_reg) & DPI_FIFO_EMPTY)
++ != DPI_FIFO_EMPTY)) {
++ udelay(100);
++ }
++#endif
++
++ /* clear intr status register */
++
++ /* Clear Device Ready Bit */
++ REG_WRITE(device_ready_reg, 0x00000000);
++
++ REG_WRITE(mipi_reg, mipi_val);
++
++ /* FIXME_MDFLD JLIU7 revisit MIPI_CONTROL_REG */
++ /* JLIU7_FIXME set MIPI clock ratio to 1:1 for NSC init */
++ REG_WRITE(mipi_control_reg, mipi_control_val);
++
++ /* Get the value from HW SV */
++ REG_WRITE(dphy_param_reg, dphy_param_val);
++ REG_WRITE(clk_lane_swt_reg, clk_lane_swt_val);
++
++ /* Enable all the error interrupt */
++ REG_WRITE(intr_en_reg, intr_en_val);
++ REG_WRITE(turn_around_timeout_reg, turn_around_timeout_val);
++ REG_WRITE(device_reset_reg, device_reset_val); /* old value = 0x00000015 may depends on the DSI RX device*/
++ REG_WRITE(init_count_reg, init_count_val); /* Minimum value = 0x000007d0 */
++
++ REG_WRITE(dsi_func_prg_reg, dsi_func_prg_val);
++
++ REG_WRITE(vert_sync_pad_count_reg, VSA_count);
++ REG_WRITE(vert_back_porch_count_reg, VBP_count);
++ REG_WRITE(vert_front_porch_count_reg, VFP_count);
++
++ REG_WRITE(horiz_sync_pad_count_reg, horiz_sync_pad_count_val);
++ REG_WRITE(horiz_back_porch_count_reg, horiz_back_porch_count_val);
++ REG_WRITE(horiz_front_porch_count_reg, horiz_front_porch_count_val);
++ REG_WRITE(horiz_active_area_count_reg, horiz_active_area_count_val);
++
++ REG_WRITE(video_fmt_reg, videoModeFormat | COMPLETE_LAST_PCKT);
++
++ REG_WRITE(dpi_resolution_reg, resolution);
++
++ REG_WRITE(hs_tx_timeout_reg, hs_tx_timeout_val);
++ REG_WRITE(lp_rx_timeout_reg, lp_rx_timeout_val);
++ REG_WRITE(high_low_switch_count_reg, high_low_switch_count_val);
++
++ REG_WRITE(eot_disable_reg, eot_disable_val);
++ REG_WRITE(lp_byteclk_reg, lp_byteclk_val);
++
++ REG_WRITE(device_ready_reg, device_ready_val);
++
++ /* wait for DPI FIFO to clear */
++ while ((timeout < 20000) && ((REG_READ(gen_fifo_stat_reg) & DPI_FIFO_EMPTY)
++ != DPI_FIFO_EMPTY)) {
++ udelay(100);
++ }
++
++ if ((REG_READ(intr_stat_reg) & SPL_PKT_SENT)) {
++ REG_WRITE(intr_stat_reg, SPL_PKT_SENT);
++ }
++
++ REG_WRITE(dpi_control_reg, dpi_control_val); /* Turn On */
++ udelay(20000);
++ udelay(20000);
++
++ /* make sure Turn On Packet Sent */
++// udelay(20000);
++ while (!(REG_READ(intr_stat_reg) & SPL_PKT_SENT)) {
++ /* Do Nothing Here */
++ }
++
++ REG_WRITE(pipeconf_reg, pipeconf);
++ REG_READ(pipeconf_reg);
++
++ /* Wait for for the pipe enable to take effect. */
++ mdfldWaitForPipeEnable(dev, pipe);
++
++ REG_WRITE(disp_cntr_reg, dspcntr);
++
++ /* Wait for 20ms for the plane enable to take effect. */
++ udelay(20000);
++
++ *pdpi_panel_on = true;
++
++#if DSI_TPO_864x480 || DSI_TPO_864x480_2
++ mdfld_init_TPO_MIPI(dev, pipe);
++#endif /* DSI_TPO_864x480*/
++#if MDFLD_JLIU7_LABC
++ mdfld_dsi_brightness_init (dev, pipe);
++#endif /* MDFLD_JLIU7_LABC */
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++}
++
++static void mdfld_dbi_mode_set(struct drm_encoder *encoder,
++ struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode)
++{
++ struct drm_device *dev = encoder->dev;
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
++#if 0 /* FIXME_JLIU7 add it later */
++ uint64_t curValue = DRM_MODE_SCALE_FULLSCREEN;
++#endif /* FIXME_JLIU7 add it later */
++ u32 DcsPixelFormat = 0;
++ u32 DBI_dataWidth = 0;
++
++ u32 bpp = dev_priv->bpp;
++ u32 HactiveArea = dev_priv->HactiveArea;
++ u32 VactiveArea = dev_priv->VactiveArea;
++ u32 channelNumber = dev_priv->channelNumber;
++ u32 laneCount = dev_priv->laneCount;
++ u32 *pipeconf = &dev_priv->pipeconf;
++ u32 dspcntr = dev_priv->dspcntr;
++ bool *pdbi_panel_on = &(dev_priv->dbi_panel_on);
++ u8 *p_DBI_commandBuffer = dev_priv->p_DBI_commandBuffer;
++ u32 DBI_CB_phys = dev_priv->DBI_CB_phys;
++ u32 *pDBI_CB_pointer = &(dev_priv->DBI_CB_pointer);
++ u32 mipi_reg = MIPI;
++ u32 mipi_control_reg = MIPI_CONTROL_REG;
++ u32 intr_en_reg = INTR_EN_REG;
++ u32 turn_around_timeout_reg = TURN_AROUND_TIMEOUT_REG;
++ u32 device_reset_reg = DEVICE_RESET_REG;
++ u32 init_count_reg = INIT_COUNT_REG;
++ u32 dsi_func_prg_reg = DSI_FUNC_PRG_REG;
++ u32 hs_tx_timeout_reg = HS_TX_TIMEOUT_REG;
++ u32 lp_rx_timeout_reg = LP_RX_TIMEOUT_REG;
++ u32 high_low_switch_count_reg = HIGH_LOW_SWITCH_COUNT_REG;
++ u32 eot_disable_reg = EOT_DISABLE_REG;
++ u32 lp_byteclk_reg = LP_BYTECLK_REG;
++ u32 device_ready_reg = DEVICE_READY_REG;
++ u32 dphy_param_reg = DPHY_PARAM_REG;
++ u32 clk_lane_swt_reg = CLK_LANE_SWT_REG;
++ u32 dbi_bw_ctrl_reg = DBI_BW_CTRL_REG;
++ u32 hs_ls_dbi_enable_reg = HS_LS_DBI_ENABLE_REG;
++ u32 mipi_command_length_reg = MIPI_COMMAND_LENGTH_REG;
++ u32 mipi_command_address_reg = MIPI_COMMAND_ADDRESS_REG;
++ u32 gen_fifo_stat_reg = GEN_FIFO_STAT_REG;
++ u32 pipeconf_reg = PIPEACONF;
++ u32 disp_cntr_reg = DSPACNTR;
++
++ /* defaut to be dbi values on pipe A. */
++ u32 mipi_val = PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX;
++ u32 dphy_param_val = 0x150c3408; /* dual dbi - dpi */ /* single 0x180a2b07; */ /*SLE 0x0b14540c;*/ /* HW SIMU 0x0b061a10; */
++ u32 mipi_control_val = 0x00000018;
++ u32 hs_tx_timeout_val = 0x3fffff;
++ u32 lp_rx_timeout_val = 0xffff;
++ u32 turn_around_timeout_val =0x0000001F;
++ u32 device_reset_val = 0x000000ff; /* old value = 0x00000015 may depends on the DSI RX device*/
++ u32 intr_en_val = 0xffffffff;
++/* MDFLD_PO_JLIU7 u32 eot_disable_val = ENABLE_CLOCK_STOPPING; */
++ u32 eot_disable_val = 0;
++ u32 init_count_val = 0x000007d0;
++ u32 dsi_func_prg_val = 0;
++ u32 device_ready_val =0x00000001;
++
++ /* FIXME_JLIU7 need to update the following MIPI adaptor register values */
++ u32 clk_lane_swt_val = 0x000A0014;
++ u32 dbi_bw_ctrl_val = 0x00000400; /* HW SIMU 0x00000820; */
++ u32 hs_ls_dbi_enable_val = 0;
++ u32 high_low_switch_count_val = 0x46;
++ u32 lp_byteclk_val = 0x00000004; /* FIXME JLIU7 for NSC PO */
++ u32 pipe = 0;
++
++ PSB_DEBUG_ENTRY("output_type = 0x%x \n", output->type);
++
++ if (output->type == INTEL_OUTPUT_MIPI2)
++ {
++ /* MIPI_A has to be on before we can enable MIPI_C*/
++ if (!(dev_priv->dpi_panel_on || dev_priv->dbi_panel_on))
++ {
++ DRM_ERROR("mdfld_dbi_mode_set need to enable MIPI0 before enabling MIPI1. \n");
++ return;
++ }
++
++ bpp = dev_priv->bpp2;
++ }
++
++ switch (bpp)
++ {
++ case 16:
++ DcsPixelFormat = DCS_PIXEL_FORMAT_16bbp;
++ break;
++ case 18:
++ DcsPixelFormat = DCS_PIXEL_FORMAT_18bbp;
++ break;
++ case 24:
++ DcsPixelFormat = DCS_PIXEL_FORMAT_24bbp;
++ break;
++ default:
++ DRM_INFO("mdfld_dbi_mode_set, invalid bpp \n");
++ break;
++ }
++
++ if (output->type == INTEL_OUTPUT_MIPI2)
++ {
++ pipe = 2;
++ HactiveArea = dev_priv->HactiveArea2;
++ VactiveArea = dev_priv->VactiveArea2;
++ channelNumber = dev_priv->channelNumber2;
++ laneCount = dev_priv->laneCount2;
++ pipeconf = &dev_priv->pipeconf2;
++ dspcntr = dev_priv->dspcntr2;
++ pdbi_panel_on = &(dev_priv->dbi_panel_on2);
++ p_DBI_commandBuffer = dev_priv->p_DBI_commandBuffer2;
++ DBI_CB_phys = dev_priv->DBI_CB_phys2;
++ pDBI_CB_pointer = &(dev_priv->DBI_CB_pointer2);
++
++ mipi_reg = MIPI_C;
++ mipi_control_reg = MIPI_CONTROL_REG + MIPIC_REG_OFFSET;
++ intr_en_reg = INTR_EN_REG + MIPIC_REG_OFFSET;
++ device_reset_reg = DEVICE_RESET_REG + MIPIC_REG_OFFSET;
++ turn_around_timeout_reg = TURN_AROUND_TIMEOUT_REG + MIPIC_REG_OFFSET;
++ init_count_reg = INIT_COUNT_REG + MIPIC_REG_OFFSET;
++ dsi_func_prg_reg = DSI_FUNC_PRG_REG + MIPIC_REG_OFFSET;
++ hs_tx_timeout_reg = HS_TX_TIMEOUT_REG + MIPIC_REG_OFFSET;
++ lp_rx_timeout_reg = LP_RX_TIMEOUT_REG + MIPIC_REG_OFFSET;
++ high_low_switch_count_reg = HIGH_LOW_SWITCH_COUNT_REG + MIPIC_REG_OFFSET;
++ eot_disable_reg = EOT_DISABLE_REG + MIPIC_REG_OFFSET;
++ lp_byteclk_reg = LP_BYTECLK_REG + MIPIC_REG_OFFSET;
++ device_ready_reg = DEVICE_READY_REG + MIPIC_REG_OFFSET;
++ dphy_param_reg = DPHY_PARAM_REG + MIPIC_REG_OFFSET;
++ clk_lane_swt_reg = CLK_LANE_SWT_REG + MIPIC_REG_OFFSET;
++ dbi_bw_ctrl_reg = DBI_BW_CTRL_REG + MIPIC_REG_OFFSET;
++ hs_ls_dbi_enable_reg = HS_LS_DBI_ENABLE_REG + MIPIC_REG_OFFSET;
++ mipi_command_length_reg = MIPI_COMMAND_LENGTH_REG + MIPIC_REG_OFFSET;
++ mipi_command_address_reg = MIPI_COMMAND_ADDRESS_REG + MIPIC_REG_OFFSET;
++ gen_fifo_stat_reg = GEN_FIFO_STAT_REG + MIPIC_REG_OFFSET;
++
++ pipeconf_reg = PIPECCONF;
++ disp_cntr_reg = DSPCCNTR;
++
++ } else {
++ mipi_val |= dev_priv->mipi_lane_config;
++ }
++
++ *pipeconf |= PIPEACONF_DSR;
++ channelNumber <<= DBI_CHANNEL_NUMBER_POS;
++ DBI_dataWidth = DBI_DATA_WIDTH_OPT2 << DBI_DATA_WIDTH_POS;
++ dsi_func_prg_val = laneCount | channelNumber | DBI_dataWidth;
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_FORCE_POWER_ON))
++ return;
++
++#if 0 /* FIXME_JLIU7 add it later */
++ drm_connector_property_get_value(&enc_to_psb_intel_output(encoder)->base, dev->mode_config.scaling_mode_property, &curValue);
++
++ if (curValue == DRM_MODE_SCALE_NO_SCALE)
++ REG_WRITE(PFIT_CONTROL, 0);
++ else if (curValue == DRM_MODE_SCALE_ASPECT) {
++ if ((mode->vdisplay != adjusted_mode->crtc_vdisplay) || (mode->hdisplay != adjusted_mode->crtc_hdisplay)) {
++ if ((adjusted_mode->crtc_hdisplay * mode->vdisplay) == (mode->hdisplay * adjusted_mode->crtc_vdisplay))
++ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
++ else if ((adjusted_mode->crtc_hdisplay * mode->vdisplay) > (mode->hdisplay * adjusted_mode->crtc_vdisplay))
++ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE | PFIT_SCALING_MODE_PILLARBOX);
++ else
++ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE | PFIT_SCALING_MODE_LETTERBOX);
++ } else
++ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
++ } else /*(curValue == DRM_MODE_SCALE_FULLSCREEN)*/
++ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
++#endif /* FIXME_JLIU7 add it later */
++
++ /* clear intr status register */
++
++ /* Clear Device Ready Bit */
++ REG_WRITE(device_ready_reg, 0x00000000);
++
++ REG_WRITE(mipi_reg, mipi_val);
++
++ /* FIXME_MDFLD JLIU7 revisit MIPI_CONTROL_REG */
++ /* JLIU7_FIXME set MIPI clock ratio to 1:1 for NSC init */
++ REG_WRITE(mipi_control_reg, mipi_control_val);
++
++ /* Get the value from HW SV */
++ REG_WRITE(dphy_param_reg, dphy_param_val);
++ REG_WRITE(clk_lane_swt_reg, clk_lane_swt_val);
++ REG_WRITE(dbi_bw_ctrl_reg, dbi_bw_ctrl_val);
++ REG_WRITE(hs_ls_dbi_enable_reg, hs_ls_dbi_enable_val);
++
++ /* Enable all the error interrupt */
++ REG_WRITE(intr_en_reg, intr_en_val);
++ REG_WRITE(turn_around_timeout_reg, turn_around_timeout_val);
++ REG_WRITE(device_reset_reg, device_reset_val); /* old value = 0x00000015 may depends on the DSI RX device*/
++ REG_WRITE(init_count_reg, init_count_val); /* Minimum value = 0x000007d0 */
++ REG_WRITE(dsi_func_prg_reg, dsi_func_prg_val);
++ REG_WRITE(hs_tx_timeout_reg, hs_tx_timeout_val);
++ REG_WRITE(lp_rx_timeout_reg, lp_rx_timeout_val);
++ REG_WRITE(high_low_switch_count_reg, high_low_switch_count_val);
++ REG_WRITE(eot_disable_reg, eot_disable_val);
++ REG_WRITE(lp_byteclk_reg, lp_byteclk_val);
++ REG_WRITE(device_ready_reg, device_ready_val);
++
++ /* Wait for 20ms for the pipe enable to take effect. */
++ udelay(20000);
++
++ REG_WRITE(disp_cntr_reg, dspcntr);
++
++ /* Wait for 20ms for the plane enable to take effect. */
++ udelay(20000);
++
++ /* FIXME_JLIU7 MDFLD_PO */
++ /* disable all the MIPI interrupts at the beginning. */
++ /* enable all the MIPI interrupts at the end. */
++ /* Make sure dbi command & data buffer are empty */
++ if (*pDBI_CB_pointer != 0)
++ {
++ DRM_ERROR("dbi command buffer was interrupted before finished. \n");
++ }
++
++ mdfld_dsi_dbi_CB_ready (dev, mipi_command_address_reg, gen_fifo_stat_reg);
++ /* Wait for 20ms. */
++ udelay(20000);
++
++ /* exit sleep mode */
++ *p_DBI_commandBuffer = exit_sleep_mode;
++
++ /* FIXME_jliu7 mapVitualToPhysical(dev_priv->p_DBI_commandBuffer);*/
++ REG_WRITE(mipi_command_length_reg, 1);
++ udelay(5000);
++ REG_WRITE(mipi_command_address_reg, DBI_CB_phys | BIT0);
++
++ /* The host processor must wait five milliseconds after sending exit_sleep_mode command before sending another
++ command. This delay allows the supply voltages and clock circuits to stabilize */
++ udelay(5000);
++
++ mdfld_dsi_dbi_CB_ready (dev, mipi_command_address_reg, 0);
++ /* Wait for 20ms. */
++ udelay(20000);
++#if MDFLD_JLIU7_LABC
++ mdfld_dsi_brightness_init (dev, pipe);
++ mdfld_dsi_gen_fifo_ready (dev, gen_fifo_stat_reg, HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
++#endif /* MDFLD_JLIU7_LABC */
++
++ REG_WRITE(pipeconf_reg, *pipeconf);
++ REG_READ(pipeconf_reg);
++
++ *pDBI_CB_pointer = 0;
++ /* set_column_address */
++ *(p_DBI_commandBuffer + (*pDBI_CB_pointer)++) = set_column_address;
++ *((u16 *) (p_DBI_commandBuffer + *pDBI_CB_pointer)) = 0;
++ *pDBI_CB_pointer += 2;
++ *(p_DBI_commandBuffer + (*pDBI_CB_pointer)++) = (HactiveArea - 1) >> 8;
++ *(p_DBI_commandBuffer + (*pDBI_CB_pointer)++) = HactiveArea - 1;
++
++
++ *pDBI_CB_pointer = 8;
++ /* set_page_addr */
++ *(p_DBI_commandBuffer + (*pDBI_CB_pointer)++) = set_page_addr;
++ *((u16 *) (p_DBI_commandBuffer + *pDBI_CB_pointer)) = 0;
++ *pDBI_CB_pointer += 2;
++ *(p_DBI_commandBuffer + (*pDBI_CB_pointer)++) = (VactiveArea- 1) >> 8;
++ *(p_DBI_commandBuffer + (*pDBI_CB_pointer)++) = VactiveArea - 1;
++
++#if 0 /*FIXME JLIU7 */
++ /* write_LUT if needed. */
++#endif /*FIXME JLIU7 */
++
++ *pDBI_CB_pointer = 16;
++ /* write_mem_start */
++ *(p_DBI_commandBuffer + (*pDBI_CB_pointer)++) = write_mem_start;
++
++ REG_WRITE(mipi_command_length_reg, 0x010505);
++ udelay(5000);
++ REG_WRITE(mipi_command_address_reg, DBI_CB_phys | BIT0 |BIT1);
++ udelay(5000);
++
++ /* FIXME_Enable pipe vblank interrupt */
++
++ *pDBI_CB_pointer = 0;
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++
++#if MDFLD_JLIU7_DSR
++ if (output->type == INTEL_OUTPUT_MIPI) {
++ mdfld_dsr_timer_init(dev_priv);
++ }
++#endif /* MDFLD_JLIU7_DSR */
++
++}
++
++static void mdfld_dsi_mode_set(struct drm_encoder *encoder,
++ struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode)
++{
++ struct drm_device *dev = encoder->dev;
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
++ bool dpi = dev_priv->dpi;
++
++ if (output->type == INTEL_OUTPUT_MIPI2)
++ dpi = dev_priv->dpi2;
++
++ if (dpi)
++ mdfld_dpi_mode_set(encoder, mode, adjusted_mode);
++ else
++ mdfld_dbi_mode_set(encoder, mode, adjusted_mode);
++}
++
++void mdfld_dsi_commit( struct drm_encoder *encoder)
++{
++ struct drm_device *dev = encoder->dev;
++ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
++#if 0 /* FIXME_MDFLD JLIU7_PO */
++ struct psb_intel_mode_device *mode_dev = output->mode_dev;
++#endif /* FIXME_MDFLD JLIU7_PO */
++
++ PSB_DEBUG_ENTRY("Enter mdfld_dsi_commit \n");
++
++#if 0 /* FIXME_MDFLD JLIU7_PO */
++ if (mode_dev->backlight_duty_cycle == 0)
++ mode_dev->backlight_duty_cycle =
++ mrst_dsi_get_max_backlight(dev);
++#endif /* FIXME_MDFLD JLIU7_PO */
++
++ mdfld_dsi_set_power(dev, output, true);
++
++#if DUMP_REGISTER
++ dump_dsi_registers(dev);
++#endif /* DUMP_REGISTER */
++}
++
++static const struct drm_encoder_helper_funcs mdfld_dsi_helper_funcs = {
++ .dpms = mdfld_dsi_dpms,
++ .mode_fixup = psb_intel_lvds_mode_fixup,
++ .prepare = mdfld_dsi_prepare,
++ .mode_set = mdfld_dsi_mode_set,
++ .commit = mdfld_dsi_commit,
++};
++
++static const struct drm_connector_funcs mdfld_dsi_connector_funcs = {
++ .dpms = drm_helper_connector_dpms,
++ .save = mrst_dsi_save,
++ .restore = mrst_dsi_restore,
++ .detect = mdfld_dsi_detect,
++ .fill_modes = drm_helper_probe_single_connector_modes,
++ .set_property = mdfld_dsi_set_property,
++ .destroy = psb_intel_lvds_destroy,
++};
++
++/* ************************************************************************* *\
++FUNCTION: mrstDSI_clockInit
++ `
++DESCRIPTION:
++
++\* ************************************************************************* */
++static u32 mdfld_sku_83_mipi_2xclk[4] = {166667, 333333, 444444, 666667};
++static u32 mdfld_sku_100_mipi_2xclk[4] = {200000, 400000, 533333, 800000};
++static u32 mdfld_sku_100L_mipi_2xclk[4] = {100000, 200000, 266667, 400000};
++#define MDFLD_MIPI_2XCLK_COUNT 0x04
++
++static bool mdfldDSI_clockInit(DRM_DRIVER_PRIVATE_T *dev_priv, int dsi_num)
++{
++ u32 Htotal = 0, Vtotal = 0, RRate = 0, mipi_2xclk = 0;
++ u32 i = 0;
++ u32 *p_mipi_2xclk = NULL;
++ u32 pixelClock, HsyncWidth, HbackPorch, HfrontPorch, HactiveArea, VsyncWidth, VbackPorch, VfrontPorch, VactiveArea, bpp;
++ u32 laneCount, DDR_Clock_Calculated;
++
++ if (dsi_num ==1 ) {
++ laneCount = dev_priv->laneCount;
++ pixelClock = dev_priv->pixelClock;
++ HsyncWidth = dev_priv->HsyncWidth;
++ HbackPorch = dev_priv->HbackPorch;
++ HfrontPorch = dev_priv->HfrontPorch;
++ HactiveArea = dev_priv->HactiveArea;
++ VsyncWidth = dev_priv->VsyncWidth;
++ VbackPorch = dev_priv->VbackPorch;
++ VfrontPorch = dev_priv->VfrontPorch;
++ VactiveArea = dev_priv->VactiveArea;
++ bpp = dev_priv->bpp;
++ } else {
++ laneCount = dev_priv->laneCount2;
++ pixelClock = dev_priv->pixelClock2;
++ HsyncWidth = dev_priv->HsyncWidth2;
++ HbackPorch = dev_priv->HbackPorch2;
++ HfrontPorch = dev_priv->HfrontPorch2;
++ HactiveArea = dev_priv->HactiveArea2;
++ VsyncWidth = dev_priv->VsyncWidth2;
++ VbackPorch = dev_priv->VbackPorch2;
++ VfrontPorch = dev_priv->VfrontPorch2;
++ VactiveArea = dev_priv->VactiveArea2;
++ bpp = dev_priv->bpp2;
++ }
++
++ Htotal = HsyncWidth + HbackPorch + HfrontPorch + HactiveArea;
++ Vtotal = VsyncWidth + VbackPorch + VfrontPorch + VactiveArea;
++
++ RRate = ((pixelClock * 1000) / (Htotal * Vtotal)) + 1;
++
++ /* ddr clock frequence = (pixel clock frequence * bits per pixel)/2*/
++ mipi_2xclk = (pixelClock * bpp) / laneCount; /* KHz */
++
++ if (dsi_num ==1 ) {
++ dev_priv->RRate = RRate;
++ DDR_Clock_Calculated = dev_priv->DDR_Clock_Calculated = mipi_2xclk / 2; /* KHz */
++ } else {
++ dev_priv->RRate2 = RRate;
++ DDR_Clock_Calculated = dev_priv->DDR_Clock_Calculated2 = mipi_2xclk / 2; /* KHz */
++ }
++
++ PSB_DEBUG_ENTRY("RRate = %d, mipi_2xclk = %d. \n", RRate, mipi_2xclk);
++
++ if (dev_priv->sku_100)
++ {
++ p_mipi_2xclk = mdfld_sku_100_mipi_2xclk;
++ }
++ else if (dev_priv->sku_100L)
++ {
++ p_mipi_2xclk = mdfld_sku_100L_mipi_2xclk;
++ }
++ else
++ {
++ p_mipi_2xclk = mdfld_sku_83_mipi_2xclk;
++ }
++
++ for (; i < MDFLD_MIPI_2XCLK_COUNT; i++)
++ {
++ if ((DDR_Clock_Calculated * 2) < p_mipi_2xclk[i])
++ break;
++ }
++
++ if (i == MIPI_2XCLK_COUNT)
++ {
++ PSB_DEBUG_ENTRY("the DDR clock is too big, DDR_Clock_Calculated is = %d\n", dev_priv->DDR_Clock_Calculated);
++
++ return false;
++ }
++
++
++ if (dsi_num ==1 ) {
++ dev_priv->DDR_Clock = p_mipi_2xclk[i] / 2;
++ dev_priv->ClockBits = i;
++ } else {
++ dev_priv->DDR_Clock2 = p_mipi_2xclk[i] / 2;
++ dev_priv->ClockBits2 = i;
++ }
++
++#if 0 /*JLIU7_PO */
++#if 0 /* FIXME remove it after power on*/
++ mipiControlReg = REG_READ(MIPI_CONTROL_REG) & (~MIPI_2X_CLOCK_BITS);
++ mipiControlReg |= i;
++ REG_WRITE(MIPI_CONTROL_REG, mipiControlReg);
++#else /* FIXME remove it after power on*/
++ mipiControlReg |= i;
++ REG_WRITE(MIPI_CONTROL_REG, mipiControlReg);
++#endif /* FIXME remove it after power on*/
++#endif /*JLIU7_PO */
++
++ PSB_DEBUG_ENTRY("mipi_2x_clock_divider = 0x%x, DDR_Clock_Calculated is = %d\n", i, DDR_Clock_Calculated);
++
++ return true;
++}
++
++/** Returns the panel fixed mode from configuration. */
++/** FIXME JLIU7 need to revist it. */
++struct drm_display_mode *mid_dsi_get_configuration_mode(struct drm_device *dev, int dsi_num)
++{
++ struct drm_display_mode *mode;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++#if MDFLD_GCT_JLIU7
++ u8 panel_index = dev_priv->gct_data.bpi;
++ u8 panel_type = dev_priv->gct_data.pt;
++#endif /* MDFLD_GCT_JLIU7 */
++ struct mrst_timing_info *ti = &dev_priv->gct_data.DTD;
++ bool use_gct = false;
++ uint32_t Panel_RRate = 0;
++
++ PSB_DEBUG_ENTRY("\n");
++
++ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
++ if (!mode)
++ return NULL;
++
++#if MDFLD_GCT_JLIU7
++ if (dev_priv->vbt_data.Size != 0x00) /*if non-zero, vbt is present*/
++ if ((1<<panel_index) & panel_type) /* if non-zero,*/
++ use_gct = true; /*then mipi panel.*/
++#endif /* MDFLD_GCT_JLIU7 */
++
++ if (use_gct) {
++ PSB_DEBUG_ENTRY("gct find MIPI panel. \n");
++
++ mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo;
++ mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo;
++ mode->hsync_start = mode->hdisplay + \
++ ((ti->hsync_offset_hi << 8) | \
++ ti->hsync_offset_lo);
++ mode->hsync_end = mode->hsync_start + \
++ ((ti->hsync_pulse_width_hi << 8) | \
++ ti->hsync_pulse_width_lo);
++ mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) | \
++ ti->hblank_lo);
++ mode->vsync_start = \
++ mode->vdisplay + ((ti->vsync_offset_hi << 8) | \
++ ti->vsync_offset_lo);
++ mode->vsync_end = \
++ mode->vsync_start + ((ti->vsync_pulse_width_hi << 8) | \
++ ti->vsync_pulse_width_lo);
++ mode->vtotal = mode->vdisplay + \
++ ((ti->vblank_hi << 8) | ti->vblank_lo);
++ mode->clock = ti->pixel_clock * 10;
++
++ PSB_DEBUG_ENTRY("hdisplay is %d\n", mode->hdisplay);
++ PSB_DEBUG_ENTRY("vdisplay is %d\n", mode->vdisplay);
++ PSB_DEBUG_ENTRY("HSS is %d\n", mode->hsync_start);
++ PSB_DEBUG_ENTRY("HSE is %d\n", mode->hsync_end);
++ PSB_DEBUG_ENTRY("htotal is %d\n", mode->htotal);
++ PSB_DEBUG_ENTRY("VSS is %d\n", mode->vsync_start);
++ PSB_DEBUG_ENTRY("VSE is %d\n", mode->vsync_end);
++ PSB_DEBUG_ENTRY("vtotal is %d\n", mode->vtotal);
++ PSB_DEBUG_ENTRY("clock is %d\n", mode->clock);
++ } else {
++ if (dsi_num == 1)
++ {
++#if DSI_TPO_864x480 /*FIXME jliu7 remove it later */
++ mode->hdisplay = 864;
++ mode->vdisplay = 480;
++ mode->hsync_start = 873;
++ mode->hsync_end = 876;
++ mode->htotal = 887;
++ mode->vsync_start = 487;
++ mode->vsync_end = 490;
++ mode->vtotal = 499;
++ mode->clock = 33264;
++
++ dev_priv->dpi = true;
++ dev_priv->bpp = 24;
++ dev_priv->videoModeFormat = BURST_MODE;
++ dev_priv->laneCount = 2;
++ dev_priv->channelNumber = 0;
++#endif /*FIXME jliu7 remove it later */
++#if DBI_TPO_864x480 /* get from spec. */
++ mode->hdisplay = 864;
++ mode->vdisplay = 480;
++
++ Panel_RRate = 60;
++ dev_priv->dpi = false;
++ dev_priv->bpp = 24;
++
++ /* FIXME hard code values. */
++ dev_priv->laneCount = 2;
++ dev_priv->channelNumber = 0;
++#endif /*FIXME jliu7 remove it later */
++#if DBI_TPO_480x864 /* get from spec. */
++ mode->hdisplay = 480;
++ mode->vdisplay = 864;
++
++ Panel_RRate = 60;
++ dev_priv->dpi = false;
++ dev_priv->bpp = 24;
++
++ /* FIXME hard code values. */
++ dev_priv->laneCount = 2;
++ dev_priv->channelNumber = 0;
++#endif /*FIXME jliu7 remove it later */
++ } else {
++#if DSI_TPO_864x480_2 /*FIXME jliu7 remove it later */
++ mode->hdisplay = 864;
++ mode->vdisplay = 480;
++ mode->hsync_start = 873;
++ mode->hsync_end = 876;
++ mode->htotal = 887;
++ mode->vsync_start = 487;
++ mode->vsync_end = 490;
++ mode->vtotal = 499;
++ mode->clock = 33264;
++
++ dev_priv->dpi2 = true;
++ dev_priv->bpp2 = 24;
++ dev_priv->videoModeFormat2 = BURST_MODE;
++ dev_priv->laneCount2 = 2;
++ dev_priv->channelNumber2 = 0;
++#endif /*FIXME jliu7 remove it later */
++#if DBI_TPO_864x480_2 /* get from spec. */
++ mode->hdisplay = 864;
++ mode->vdisplay = 480;
++
++ Panel_RRate = 60;
++ dev_priv->dpi2 = false;
++ dev_priv->bpp2 = 24;
++
++ /* FIXME hard code values. */
++ dev_priv->laneCount2 = 2;
++ dev_priv->channelNumber2 = 0;
++#endif /*FIXME jliu7 remove it later */
++#if DBI_TPO_480x864_2 /* get from spec. */
++ mode->hdisplay = 480;
++ mode->vdisplay = 864;
++
++ Panel_RRate = 60;
++ dev_priv->dpi2 = false;
++ dev_priv->bpp2 = 24;
++
++ /* FIXME hard code values. */
++ dev_priv->laneCount2 = 2;
++ dev_priv->channelNumber2 = 0;
++#endif /*FIXME jliu7 remove it later */
++ }
++
++ }
++
++
++ if (((dsi_num == 1) && !dev_priv->dpi) || ((dsi_num == 2) && !dev_priv->dpi2))
++ {
++
++ mode->hsync_start = mode->hdisplay + 8;
++ mode->hsync_end = mode->hsync_start + 4;
++ mode->htotal = mode->hsync_end + 8;
++ mode->vsync_start = mode->vdisplay + 2;
++ mode->vsync_end = mode->vsync_start + 2;
++ mode->vtotal = mode->vsync_end + 2;
++ mode->clock = (mode->htotal * mode->vtotal * Panel_RRate) / 1000;
++ }
++
++ if (dsi_num == 1)
++ {
++ dev_priv->pixelClock = mode->clock; /*KHz*/
++ dev_priv->HsyncWidth = mode->hsync_end - mode->hsync_start;
++ dev_priv->HbackPorch = mode->htotal - mode->hsync_end;
++ dev_priv->HfrontPorch = mode->hsync_start - mode->hdisplay;
++ dev_priv->HactiveArea = mode->hdisplay;
++ dev_priv->VsyncWidth = mode->vsync_end - mode->vsync_start;
++ dev_priv->VbackPorch = mode->vtotal - mode->vsync_end;
++ dev_priv->VfrontPorch = mode->vsync_start - mode->vdisplay;
++ dev_priv->VactiveArea = mode->vdisplay;
++
++ PSB_DEBUG_ENTRY("pixelClock is %d\n", dev_priv->pixelClock);
++ PSB_DEBUG_ENTRY("HsyncWidth is %d\n", dev_priv->HsyncWidth);
++ PSB_DEBUG_ENTRY("HbackPorch is %d\n", dev_priv->HbackPorch);
++ PSB_DEBUG_ENTRY("HfrontPorch is %d\n", dev_priv->HfrontPorch);
++ PSB_DEBUG_ENTRY("HactiveArea is %d\n", dev_priv->HactiveArea);
++ PSB_DEBUG_ENTRY("VsyncWidth is %d\n", dev_priv->VsyncWidth);
++ PSB_DEBUG_ENTRY("VbackPorch is %d\n", dev_priv->VbackPorch);
++ PSB_DEBUG_ENTRY("VfrontPorch is %d\n", dev_priv->VfrontPorch);
++ PSB_DEBUG_ENTRY("VactiveArea is %d\n", dev_priv->VactiveArea);
++ } else {
++ dev_priv->pixelClock2 = mode->clock; /*KHz*/
++ dev_priv->HsyncWidth2 = mode->hsync_end - mode->hsync_start;
++ dev_priv->HbackPorch2 = mode->htotal - mode->hsync_end;
++ dev_priv->HfrontPorch2 = mode->hsync_start - mode->hdisplay;
++ dev_priv->HactiveArea2 = mode->hdisplay;
++ dev_priv->VsyncWidth2 = mode->vsync_end - mode->vsync_start;
++ dev_priv->VbackPorch2 = mode->vtotal - mode->vsync_end;
++ dev_priv->VfrontPorch2 = mode->vsync_start - mode->vdisplay;
++ dev_priv->VactiveArea2 = mode->vdisplay;
++
++ PSB_DEBUG_ENTRY("pixelClock2 is %d\n", dev_priv->pixelClock2);
++ PSB_DEBUG_ENTRY("HsyncWidth2 is %d\n", dev_priv->HsyncWidth2);
++ PSB_DEBUG_ENTRY("HbackPorch2 is %d\n", dev_priv->HbackPorch2);
++ PSB_DEBUG_ENTRY("HfrontPorch2 is %d\n", dev_priv->HfrontPorch2);
++ PSB_DEBUG_ENTRY("HactiveArea2 is %d\n", dev_priv->HactiveArea2);
++ PSB_DEBUG_ENTRY("VsyncWidth2 is %d\n", dev_priv->VsyncWidth2);
++ PSB_DEBUG_ENTRY("VbackPorch2 is %d\n", dev_priv->VbackPorch2);
++ PSB_DEBUG_ENTRY("VfrontPorch2 is %d\n", dev_priv->VfrontPorch2);
++ PSB_DEBUG_ENTRY("VactiveArea2 is %d\n", dev_priv->VactiveArea2);
++ }
++
++ drm_mode_set_name(mode);
++ drm_mode_set_crtcinfo(mode, 0);
++
++ return mode;
++}
++
++/**
++ * mdfld_dsi_init - setup MIPI pipe A connectors on this device
++ * @dev: drm device
++ *
++ * Create the connector, try to figure out what
++ * modes we can display on the MIPI panel (if present).
++ */
++void mid_dsi_init(struct drm_device *dev,
++ struct psb_intel_mode_device *mode_dev, int dsi_num)
++{
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++ struct psb_intel_output *psb_intel_output;
++ struct drm_connector *connector;
++ struct drm_encoder *encoder;
++ struct drm_display_mode *panel_fixed_mode;
++ struct psb_gtt *pg = dev_priv->pg;
++
++ PSB_DEBUG_ENTRY("Enter mid_dsi_init2\n");
++
++ psb_intel_output = kzalloc(sizeof(struct psb_intel_output), GFP_KERNEL);
++ if (!psb_intel_output)
++ return;
++
++ psb_intel_output->mode_dev = mode_dev;
++ connector = &psb_intel_output->base;
++ encoder = &psb_intel_output->enc;
++ drm_connector_init(dev, &psb_intel_output->base,
++ &mdfld_dsi_connector_funcs,
++ DRM_MODE_CONNECTOR_MIPI);
++
++ drm_encoder_init(dev, &psb_intel_output->enc, &psb_intel_lvds_enc_funcs,
++ DRM_MODE_ENCODER_MIPI);
++
++ drm_mode_connector_attach_encoder(&psb_intel_output->base,
++ &psb_intel_output->enc);
++ psb_intel_output->type = (dsi_num == 1) ? INTEL_OUTPUT_MIPI : INTEL_OUTPUT_MIPI2;
++
++ drm_encoder_helper_add(encoder, &mdfld_dsi_helper_funcs);
++ drm_connector_helper_add(connector,
++ &mrst_dsi_connector_helper_funcs);
++ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
++ connector->interlace_allowed = false;
++ connector->doublescan_allowed = false;
++
++ drm_connector_attach_property(connector, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN);
++ drm_connector_attach_property(connector, dev_priv->backlight_property, BRIGHTNESS_MAX_LEVEL);
++
++ if (dsi_num == 1)
++ {
++ dsi_backlight = BRIGHTNESS_MAX_LEVEL;
++ blc_pol = BLC_POLARITY_NORMAL;
++ blc_freq = 0xc8;
++
++ /*
++ * MIPI discovery:
++ * 1) check for DDB data
++ * 2) check for VBT data
++ * 4) make sure lid is open
++ * if closed, act like it's not there for now
++ */
++
++ /* FIXME change it to true if GET_DDB works */
++ dev_priv->config_phase = false;
++ dev_priv->mipi_lane_config = 0x0;
++#if MDFLD_JLIU7_DSR
++ dev_priv->dsr_fb_update = MDFLD_DSR_2D_3D;
++ dev_priv->b_dsr_enable = false;
++#endif /* MDFLD_JLIU7_DSR */
++
++ } else {
++ dsi_backlight2 = BRIGHTNESS_MAX_LEVEL;
++ blc_pol2 = BLC_POLARITY_NORMAL;
++ blc_freq2 = 0xc8;
++
++ /*
++ * MIPI discovery:
++ * 1) check for DDB data
++ * 2) check for VBT data
++ * 4) make sure lid is open
++ * if closed, act like it's not there for now
++ */
++
++ /* FIXME change it to true if GET_DDB works */
++ dev_priv->config_phase2 = false;
++ dev_priv->mipi_lane_config = 0x2;
++ }
++
++ /*
++ * If we didn't get DDB data, try geting panel timing
++ * from configuration data
++ */
++ panel_fixed_mode = mid_dsi_get_configuration_mode(dev, dsi_num);
++
++ if (dsi_num == 1) {
++ /* GPIO control to reset MIP */
++ gpio_request(128, "gfx");
++ gpio_direction_output(128, 1);
++ __gpio_get_value(128);
++ mode_dev->panel_fixed_mode = panel_fixed_mode;
++ } else {
++ /* GPIO control to reset MIP */
++ gpio_request(34, "gfx");
++ gpio_direction_output(34, 1);
++ __gpio_get_value(128);
++ mode_dev->panel_fixed_mode2 = panel_fixed_mode;
++ }
++
++ if (panel_fixed_mode) {
++ panel_fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
++ } else {
++ /* If we still don't have a mode after all that, give up. */
++ DRM_DEBUG
++ ("Found no modes on the lvds, ignoring the LVDS\n");
++ goto failed_find;
++ }
++
++ if (!mdfldDSI_clockInit(dev_priv, dsi_num))
++ {
++ DRM_DEBUG("Can't iniitialize MRST DSI clock.\n");
++#if 0 /* FIXME JLIU7 */
++ goto failed_find;
++#endif /* FIXME JLIU7 */
++ }
++
++ if (dsi_num == 1)
++ {
++ /* FIXME_JLIU7 MDFLD_PO do we need to allocate DMA-capable memory? */
++ /* dev_priv->p_DBI_commandBuffer = (u8 *)((u32)kzalloc(DBI_COMMAND_BUFFER_SIZE, GFP_KERNEL) & ALIGNMENT_32BYTE_MASK);*/
++ dev_priv->DBI_CB_phys = pg->gtt_phys_start - 0x1000;
++ dev_priv->p_DBI_commandBuffer = (u8 *)ioremap_nocache(dev_priv->DBI_CB_phys, 0x800);
++ if (!dev_priv->p_DBI_commandBuffer)
++ goto failed_find;
++
++ /*dev_priv->DBI_CB_phys = (u32) virt_to_phys(dev_priv->p_DBI_commandBuffer);*/
++ PSB_DEBUG_DBI_BF("mid_dsi_init2 p_DBI_commandBuffer = 0x%x, DBI_CB_phys = 0x%x. \n", (u32) dev_priv->p_DBI_commandBuffer, dev_priv->DBI_CB_phys);
++ } else {
++ dev_priv->DBI_CB_phys2 = pg->gtt_phys_start - 0x800;
++ dev_priv->p_DBI_commandBuffer2 = (u8 *)ioremap_nocache(dev_priv->DBI_CB_phys2, 0x800);
++ if (!dev_priv->p_DBI_commandBuffer2)
++ goto failed_find;
++
++ /*dev_priv->DBI_CB_phys2 = (u32) virt_to_phys(dev_priv->p_DBI_commandBuffer2);*/
++ PSB_DEBUG_DBI_BF("mid_dsi_init2 p_DBI_commandBuffer2 = 0x%x, DBI_CB_phys2 = 0x%x. \n", (u32) dev_priv->p_DBI_commandBuffer2, dev_priv->DBI_CB_phys2);
++ }
++
++ dev_priv->first_boot = true;
++
++ drm_sysfs_connector_add(connector);
++ return;
++
++failed_find:
++ DRM_DEBUG("No MIIP modes found, disabling.\n");
++ drm_encoder_cleanup(encoder);
++ drm_connector_cleanup(connector);
++ kfree(connector);
++}
++
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_intel_dsi_aava.c
+@@ -0,0 +1,930 @@
++/*
++ * Copyright © 2006-2007 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ * jim liu <jim.liu@intel.com>
++ */
++
++/* This enables setting backlights on with a delay at startup,
++ should be removed after resolving issue with backlights going off
++ after setting them on in initial mrst_dsi_set_power call */
++#define AAVA_BACKLIGHT_HACK
++
++#include <linux/version.h>
++#include <linux/backlight.h>
++#include <drm/drmP.h>
++#include <drm/drm.h>
++#include <drm/drm_crtc.h>
++#include <drm/drm_edid.h>
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34))
++#include <asm/ipc_defs.h>
++#else
++#include <asm/intel_scu_ipc.h>
++#endif
++
++#ifdef AAVA_BACKLIGHT_HACK
++#include <linux/workqueue.h>
++#endif /* AAVA_BACKLIGHT_HACK */
++
++#include "psb_drv.h"
++#include "psb_intel_drv.h"
++#include "psb_intel_reg.h"
++#include "psb_powermgmt.h"
++
++/* Debug trace definitions */
++#define DBG_LEVEL 0
++#define AAVA_EV_0_5
++
++#if (DBG_LEVEL > 0)
++#define DBG_TRACE(format, args...) printk(KERN_ERR "%s: " format "\n", \
++ __func__ , ## args)
++#else
++#define DBG_TRACE(format, args...)
++#endif
++
++#define DBG_ERR(format, args...) printk(KERN_ERR "%s: " format "\n", \
++ __func__ , ## args)
++
++#define BRIGHTNESS_MAX_LEVEL 100
++
++#define DRM_MODE_ENCODER_MIPI 5
++
++#define DBG_PRINTS 0
++
++#define NEW_CRAP_SAMPLE_SETTINGS
++
++
++#define VSIZE 480
++#define HSIZE 864
++#define HFP_DOTS 10
++#define HBP_DOTS 10
++#define HSYNC_DOTS 4
++#define VFP_LINES 8
++#define VBP_LINES 8
++#define VSYNC_LINES 4
++
++#define MIPI_LANES 2
++#define MIPI_HACT ((HSIZE * 3) / MIPI_LANES)
++#define MIPI_HFP ((HFP_DOTS * 3) / MIPI_LANES)
++#define MIPI_HBP ((HBP_DOTS * 3) / MIPI_LANES)
++#define MIPI_HSPAD ((HSYNC_DOTS * 3) / MIPI_LANES)
++#define MIPI_VFP VFP_LINES
++#define MIPI_VSPAD VSYNC_LINES
++#define MIPI_VBP VBP_LINES
++
++#define DISP_HPIX (HSIZE - 1)
++#define DISP_VPIX (VSIZE - 1)
++#define DISP_HBLANK_START DISP_HPIX
++#define DISP_HBLANK_END (DISP_HBLANK_START + HFP_DOTS + HSYNC_DOTS + HBP_DOTS - 1)
++#define DISP_HSYNC_START (DISP_HBLANK_START + HFP_DOTS - 1)
++#define DISP_HSYNC_END (DISP_HSYNC_START + HSYNC_DOTS - 1)
++#define DISP_VBLANK_START DISP_VPIX
++#define DISP_VBLANK_END (DISP_VBLANK_START + VFP_LINES + VSYNC_LINES + VBP_LINES - 1)
++#define DISP_VSYNC_START (DISP_VBLANK_START + VFP_LINES - 1)
++#define DISP_VSYNC_END (DISP_VSYNC_START + VSYNC_LINES - 1)
++
++#define MAX_FIFO_WAIT_MS 100
++
++#define MIPI_2XCLK_COUNT 0x04
++#define BLC_POLARITY_NORMAL 0
++
++static unsigned int dphy_reg = 0x0d0a7f06;
++static unsigned int mipi_clock = 0x2;
++
++#ifdef AAVA_BACKLIGHT_HACK
++static void dsi_bl_work_handler(struct work_struct *work);
++DECLARE_DELAYED_WORK(bl_work, dsi_bl_work_handler);
++#endif /* AAVA_BACKLIGHT_HACK */
++
++
++
++static int dsi_wait_hs_data_fifo(struct drm_device *dev)
++{
++ int fifo_wait_time = 0;
++
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_DATA_FIFO_FULL) ==
++ HS_DATA_FIFO_FULL) {
++ if (fifo_wait_time == MAX_FIFO_WAIT_MS) {
++ DBG_ERR("timeout");
++ return -1;
++ }
++ udelay(1000);
++ fifo_wait_time++;
++ }
++ return 0;
++}
++
++static int dsi_wait_hs_ctrl_fifo(struct drm_device *dev)
++{
++ int fifo_wait_time = 0;
++
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_CTRL_FIFO_FULL) ==
++ HS_CTRL_FIFO_FULL) {
++ if (fifo_wait_time == MAX_FIFO_WAIT_MS) {
++ DBG_ERR("timeout");
++ return -1;
++ }
++ udelay(1000);
++ fifo_wait_time++;
++ }
++ return 0;
++}
++
++static void dsi_set_backlight_state(int state)
++{
++ u8 addr[2], value[2];
++
++ addr[0] = 0x2a;
++ addr[1] = 0x28;
++
++ if (state) {
++ value[0] = 0xaa;
++ #ifdef AAVA_EV_0_5
++ value[1] = 0x30;
++ #else
++ value[1] = 0x60;
++ #endif
++ } else {
++ value[0] = 0x0;
++ value[1] = 0x0;
++ }
++
++ intel_scu_ipc_iowrite8(addr[0], value[0]);
++ intel_scu_ipc_iowrite8(addr[1], value[1]);
++}
++
++
++#ifdef AAVA_BACKLIGHT_HACK
++static void dsi_bl_work_handler(struct work_struct *work)
++{
++ DBG_TRACE("");
++ dsi_set_backlight_state(1);
++}
++#endif /* AAVA_BACKLIGHT_HACK */
++
++
++static void dsi_set_panel_reset_state(int state)
++{
++ if (state) {
++ #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34))
++ struct ipc_pmic_reg_data tmp_reg = {0};
++ #if DBG_PRINTS
++ printk("panel_reset_on\n");
++ #endif /* DBG_PRINTS */
++ tmp_reg.ioc = 1;
++ tmp_reg.num_entries = 1;
++ #ifdef AAVA_EV_0_5
++ tmp_reg.pmic_reg_data[0].register_address = 0xe6;
++ tmp_reg.pmic_reg_data[0].value = 0x01;
++ #else /* CDK */
++ tmp_reg.pmic_reg_data[0].register_address = 0xf4;
++ if (ipc_pmic_register_read(&tmp_reg)) {
++ printk("panel_reset_on: failed to read pmic reg 0xf4!\n");
++ return;
++ }
++ tmp_reg.pmic_reg_data[0].value &= 0xbf;
++ #endif /* AAVA_EV_0_5 */
++ if (ipc_pmic_register_write(&tmp_reg, TRUE)) {
++ printk("panel_reset_on: failed to write pmic reg 0xe6!\n");
++ }
++ #else /*KERNEL_VERSION >= 2.6.34*/
++
++ /*FIXME: if wrong*/
++ u8 addr, value;
++
++ #ifdef AAVA_EV_0_5
++ addr = 0xe6;
++ value = 0x01;
++ #else /*CDK*/
++ addr = 0xf4;
++
++ if(intel_scu_ipc_ioread8(addr, &value)) {
++ printk("panel_reset_on: failed to read pmic reg 0xf4!\n");
++ return;
++ }
++
++ value &= 0xbf;
++ #endif /*AAVA_EV_0_5*/
++ if(intel_scu_ipc_iowrite8(addr, value)) {
++ printk("panel_reset_on: failed to write pmic reg 0xf4!\n");
++ return;
++ }
++ #endif /*LINUX_VERSION_CODE*/
++ /* Minimum active time to trigger reset is 10us */
++ udelay(10);
++ } else {
++
++ #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34))
++ struct ipc_pmic_reg_data tmp_reg = {0};
++ #if DBG_PRINTS
++ printk("panel_reset_off\n");
++ #endif /* DBG_PRINTS */
++ tmp_reg.ioc = 1;
++ tmp_reg.num_entries = 1;
++ #ifdef AAVA_EV_0_5
++ tmp_reg.pmic_reg_data[0].register_address = 0xe6;
++ tmp_reg.pmic_reg_data[0].value = 0x09;
++ #else /* CDK */
++ tmp_reg.pmic_reg_data[0].register_address = 0xf4;
++ if (ipc_pmic_register_read(&tmp_reg)) {
++ printk("panel_reset_off: failed to read pmic reg 0xf4!\n");
++ return;
++ }
++ tmp_reg.pmic_reg_data[0].value |= 0x40;
++ #endif /* AAVA_EV_0_5 */
++ if (ipc_pmic_register_write(&tmp_reg, TRUE)) {
++ printk("panel_reset_off: failed to write pmic reg 0xe6!\n");
++ }
++ #else /*KERNEL_VERSION > 2.6.34*/
++ u8 addr, value;
++
++ #ifdef AAVA_EV_0_5
++ addr = 0xe6;
++ value = 0x09;
++ #else
++ addr = 0xf4;
++
++ if (intel_scu_ipc_ioread8(addr, &value)) {
++ printk("panel_reset_off: failed to read pmic reg 0xf4!\n");
++ return;
++ }
++
++ value |= 0x40;
++ #endif
++ if (intel_scu_ipc_iowrite8(addr, value)) {
++ printk("panel_reset_off: failed to write pmic reg 0xe6!\n");
++ }
++ #endif
++ /* Maximum startup time from reset is 120ms */
++ msleep(120);
++ }
++}
++
++
++static void dsi_init_panel(struct drm_device *dev)
++{
++ DBG_TRACE("");
++
++ /* Flip page order to have correct image orientation */
++ if (dsi_wait_hs_data_fifo(dev) < 0)
++ return;
++ REG_WRITE(0xb068, 0x00008036);
++ if (dsi_wait_hs_ctrl_fifo(dev) < 0)
++ return;
++ REG_WRITE(0xb070, 0x00000229);
++
++ /* Write protection key to allow DM bit setting */
++ if (dsi_wait_hs_data_fifo(dev) < 0)
++ return;
++ REG_WRITE(0xb068, 0x005a5af1);
++ if (dsi_wait_hs_ctrl_fifo(dev) < 0)
++ return;
++ REG_WRITE(0xb070, 0x00000329);
++
++ /* Set DM bit to enable video mode */
++ if (dsi_wait_hs_data_fifo(dev) < 0)
++ return;
++ REG_WRITE(0xb068, 0x000100f7);
++ if (dsi_wait_hs_ctrl_fifo(dev) < 0)
++ return;
++ REG_WRITE(0xb070, 0x00000329);
++
++ /* Write protection keys to allow TCON setting */
++ if (dsi_wait_hs_data_fifo(dev) < 0)
++ return;
++ REG_WRITE(0xb068, 0x005a5af0);
++ if (dsi_wait_hs_ctrl_fifo(dev) < 0)
++ return;
++ REG_WRITE(0xb070, 0x00000329);
++
++ if (dsi_wait_hs_data_fifo(dev) < 0)
++ return;
++ REG_WRITE(0xb068, 0x005a5afc);
++ if (dsi_wait_hs_ctrl_fifo(dev) < 0)
++ return;
++ REG_WRITE(0xb070, 0x00000329);
++
++ /* Write TCON setting */
++ if (dsi_wait_hs_data_fifo(dev) < 0)
++ return;
++#if 0
++ /* Suggested by TPO, doesn't work */
++ REG_WRITE(0xb068, 0x110000b7);
++ REG_WRITE(0xb068, 0x00000044);
++#else
++ REG_WRITE(0xb068, 0x770000b7);
++ REG_WRITE(0xb068, 0x00000044);
++#endif
++ if (dsi_wait_hs_ctrl_fifo(dev) < 0)
++ return;
++ REG_WRITE(0xb070, 0x00000529);
++}
++
++
++static void dsi_set_ptarget_state(struct drm_device *dev, int state)
++{
++ u32 pp_sts_reg;
++
++ DBG_TRACE("%d", state);
++
++ if (state) {
++ REG_WRITE(PP_CONTROL, (REG_READ(PP_CONTROL) | POWER_TARGET_ON));
++ do {
++ pp_sts_reg = REG_READ(PP_STATUS);
++ } while ((pp_sts_reg & (PP_ON | PP_READY)) == PP_READY);
++ } else {
++ REG_WRITE(PP_CONTROL,
++ (REG_READ(PP_CONTROL) & ~POWER_TARGET_ON));
++ do {
++ pp_sts_reg = REG_READ(PP_STATUS);
++ } while (pp_sts_reg & PP_ON);
++ }
++}
++
++
++static void dsi_send_turn_on_packet(struct drm_device *dev)
++{
++ DBG_TRACE("");
++
++ REG_WRITE(DPI_CONTROL_REG, DPI_TURN_ON);
++
++ /* Short delay to wait that display turns on */
++ msleep(10);
++}
++
++
++static void dsi_send_shutdown_packet(struct drm_device *dev)
++{
++ DBG_TRACE("");
++
++ REG_WRITE(DPI_CONTROL_REG, DPI_SHUT_DOWN);
++}
++
++
++static void dsi_set_pipe_plane_enable_state(struct drm_device *dev, int state)
++{
++ u32 temp_reg;
++
++ DBG_TRACE("%d", state);
++
++ if (state) {
++ /* Enable pipe */
++ temp_reg = REG_READ(PIPEACONF);
++ temp_reg |= (PIPEACONF_ENABLE);
++ REG_WRITE(PIPEACONF, temp_reg);
++ temp_reg = REG_READ(PIPEACONF);
++
++ /* Wait for 20ms for the pipe enable to take effect. */
++ msleep(20);
++
++ /* Enable plane */
++ temp_reg = REG_READ(DSPACNTR);
++ temp_reg |= (DISPLAY_PLANE_ENABLE);
++ REG_WRITE(DSPACNTR, temp_reg);
++ temp_reg = REG_READ(DSPACNTR);
++
++ /* Flush plane change by read/write/read of BASE reg */
++ temp_reg = REG_READ(MRST_DSPABASE);
++ REG_WRITE(MRST_DSPABASE, temp_reg);
++ temp_reg = REG_READ(MRST_DSPABASE);
++
++ /* Wait for 20ms for the plane enable to take effect. */
++ msleep(20);
++ } else {
++ /* Disable plane */
++ temp_reg = REG_READ(DSPACNTR);
++ temp_reg &= ~(DISPLAY_PLANE_ENABLE);
++ REG_WRITE(DSPACNTR, temp_reg);
++ temp_reg = REG_READ(DSPACNTR);
++
++ /* Flush plane change by read/write/read of BASE reg */
++ temp_reg = REG_READ(MRST_DSPABASE);
++ REG_WRITE(MRST_DSPABASE, temp_reg);
++ temp_reg = REG_READ(MRST_DSPABASE);
++
++ /* Wait for 20ms for the plane disable to take effect. */
++ msleep(20);
++
++ /* Disable pipe */
++ temp_reg = REG_READ(PIPEACONF);
++ temp_reg &= ~(PIPEACONF_ENABLE);
++ REG_WRITE(PIPEACONF, temp_reg);
++ temp_reg = REG_READ(PIPEACONF);
++
++ /* Wait for 20ms for the pipe disable to take effect. */
++ msleep(20);
++ }
++}
++
++
++static void dsi_set_device_ready_state(struct drm_device *dev, int state)
++{
++ DBG_TRACE("%d", state);
++
++ if (state)
++ REG_WRITE(DEVICE_READY_REG, 0x00000001);
++ else
++ REG_WRITE(DEVICE_READY_REG, 0x00000000);
++}
++
++
++static void dsi_configure_mipi_block(struct drm_device *dev)
++{
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++ u32 color_format = (RGB_888_FMT << FMT_DPI_POS);
++ u32 res = 0;
++
++ DBG_TRACE("");
++
++ /* MIPI clock ratio 1:1 */
++ /* REG_WRITE(MIPI_CONTROL_REG, 0x00000018); */
++ /* REG_WRITE(0xb080, 0x0b061a02); */
++
++ /* MIPI clock ratio 2:1 */
++ /* REG_WRITE(MIPI_CONTROL_REG, 0x00000019); */
++ /* REG_WRITE(0xb080, 0x3f1f1c04); */
++
++ /* MIPI clock ratio 3:1 */
++ /* REG_WRITE(MIPI_CONTROL_REG, 0x0000001a); */
++ /* REG_WRITE(0xb080, 0x091f7f08); */
++
++ /* MIPI clock ratio 4:1 */
++ REG_WRITE(MIPI_CONTROL_REG, (0x00000018 | mipi_clock));
++ REG_WRITE(0xb080, dphy_reg);
++
++ /* Enable all interrupts */
++ REG_WRITE(INTR_EN_REG, 0xffffffff);
++
++ REG_WRITE(TURN_AROUND_TIMEOUT_REG, 0x0000000A);
++ REG_WRITE(DEVICE_RESET_REG, 0x000000ff);
++ REG_WRITE(INIT_COUNT_REG, 0x00000fff);
++ REG_WRITE(HS_TX_TIMEOUT_REG, 0x90000);
++ REG_WRITE(LP_RX_TIMEOUT_REG, 0xffff);
++ REG_WRITE(HIGH_LOW_SWITCH_COUNT_REG, 0x46);
++ REG_WRITE(EOT_DISABLE_REG, 0x00000000);
++ REG_WRITE(LP_BYTECLK_REG, 0x00000004);
++
++ REG_WRITE(VIDEO_FMT_REG, dev_priv->videoModeFormat);
++
++ REG_WRITE(DSI_FUNC_PRG_REG, (dev_priv->laneCount | color_format));
++
++ res = dev_priv->HactiveArea | (dev_priv->VactiveArea << RES_V_POS);
++ REG_WRITE(DPI_RESOLUTION_REG, res);
++
++ REG_WRITE(VERT_SYNC_PAD_COUNT_REG, dev_priv->VsyncWidth);
++ REG_WRITE(VERT_BACK_PORCH_COUNT_REG, dev_priv->VbackPorch);
++ REG_WRITE(VERT_FRONT_PORCH_COUNT_REG, dev_priv->VfrontPorch);
++
++ REG_WRITE(HORIZ_SYNC_PAD_COUNT_REG, dev_priv->HsyncWidth);
++ REG_WRITE(HORIZ_BACK_PORCH_COUNT_REG, dev_priv->HbackPorch);
++ REG_WRITE(HORIZ_FRONT_PORCH_COUNT_REG, dev_priv->HfrontPorch);
++ REG_WRITE(HORIZ_ACTIVE_AREA_COUNT_REG, MIPI_HACT);
++
++ /* Enable MIPI Port */
++ REG_WRITE(MIPI, MIPI_PORT_EN);
++}
++
++
++static void dsi_configure_down(struct drm_device *dev)
++{
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++
++ DBG_TRACE("");
++
++ if (!dev_priv->dpi_panel_on) {
++ DBG_TRACE("already off");
++ return;
++ }
++
++ /* Disable backlight */
++ dsi_set_backlight_state(0);
++
++ /* Disable pipe and plane */
++ dsi_set_pipe_plane_enable_state(dev, 0);
++
++ /* Disable PTARGET */
++ dsi_set_ptarget_state(dev, 0);
++
++ /* Send shutdown command, can only be sent if
++ * interface is configured
++ */
++ if (dev_priv->dsi_device_ready)
++ dsi_send_shutdown_packet(dev);
++
++ /* Clear device ready state */
++ dsi_set_device_ready_state(dev, 0);
++
++ /* Set panel to reset */
++ dsi_set_panel_reset_state(1);
++
++ dev_priv->dpi_panel_on = false;
++}
++
++
++static void dsi_configure_up(struct drm_device *dev)
++{
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++
++ DBG_TRACE("");
++
++ if (dev_priv->dpi_panel_on) {
++ DBG_TRACE("already on");
++ return;
++ }
++
++ /* Get panel from reset */
++ dsi_set_panel_reset_state(0);
++
++ /* Set device ready state */
++ dsi_set_device_ready_state(dev, 1);
++
++ /* Send turn on command */
++ dsi_send_turn_on_packet(dev);
++
++ /* Enable PTARGET */
++ dsi_set_ptarget_state(dev, 1);
++
++ /* Initialize panel */
++ dsi_init_panel(dev);
++
++ /* Enable plane and pipe */
++ dsi_set_pipe_plane_enable_state(dev, 1);
++
++ /* Enable backlight */
++ dsi_set_backlight_state(1);
++
++ dev_priv->dpi_panel_on = true;
++}
++
++
++static void dsi_init_drv_ic(struct drm_device *dev)
++{
++ DBG_TRACE("");
++}
++
++
++static void dsi_schedule_work(struct drm_device *dev)
++{
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++
++ DBG_TRACE("");
++
++ schedule_work(&dev_priv->dsi_work);
++}
++
++
++static void dsi_work_handler(struct work_struct *work)
++{
++ struct drm_psb_private *dev_priv = container_of(work,
++ struct drm_psb_private, dsi_work);
++
++ DBG_TRACE("");
++
++ dsi_configure_up(dev_priv->dev);
++}
++
++
++static void dsi_init_mipi_config(DRM_DRIVER_PRIVATE_T *dev_priv)
++{
++ DBG_TRACE("");
++
++ /* Fixed values for TPO display */
++ dev_priv->pixelClock = 33264;
++ dev_priv->HsyncWidth = MIPI_HSPAD;
++ dev_priv->HbackPorch = MIPI_HBP;
++ dev_priv->HfrontPorch = MIPI_HFP;
++ dev_priv->HactiveArea = HSIZE;
++ dev_priv->VsyncWidth = MIPI_VSPAD;
++ dev_priv->VbackPorch = MIPI_VBP;
++ dev_priv->VfrontPorch = MIPI_VFP;
++ dev_priv->VactiveArea = VSIZE;
++ dev_priv->bpp = 24;
++
++ /* video mode */
++ dev_priv->dpi = true;
++
++ /* Set this true since firmware or kboot has enabled display */
++ dev_priv->dpi_panel_on = true;
++
++ /* Set this false to ensure proper initial configuration */
++ dev_priv->dsi_device_ready = false;
++
++ /* 2 lanes */
++ dev_priv->laneCount = MIPI_LANES;
++
++ /* Burst mode */
++ dev_priv->videoModeFormat = BURST_MODE;
++
++ dev_priv->init_drvIC = dsi_init_drv_ic;
++ dev_priv->dsi_prePowerState = dsi_configure_down;
++ dev_priv->dsi_postPowerState = dsi_schedule_work;
++}
++
++
++static struct drm_display_mode *dsi_get_fixed_display_mode(void)
++{
++ struct drm_display_mode *mode;
++
++ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
++ if (!mode)
++ return NULL;
++
++ /* MiKo, fixed mode for TPO display
++ Note: Using defined values for easier match with ITP scripts
++ and adding 1 since psb_intel_display.c decreases by 1
++ */
++ mode->hdisplay = (DISP_HPIX + 1);
++ mode->vdisplay = (DISP_VPIX + 1);
++ mode->hsync_start = (DISP_HSYNC_START + 1);
++ mode->hsync_end = (DISP_HSYNC_END + 1);
++ mode->htotal = (DISP_HBLANK_END + 1);
++ mode->vsync_start = (DISP_VSYNC_START + 1);
++ mode->vsync_end = (DISP_VSYNC_END + 1);
++ mode->vtotal = (DISP_VBLANK_END + 1);
++ mode->clock = 33264;
++
++ drm_mode_set_name(mode);
++ drm_mode_set_crtcinfo(mode, 0);
++
++ return mode;
++}
++
++
++/* Encoder funcs */
++static void dsi_encoder_mode_set(struct drm_encoder *encoder,
++ struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode)
++{
++ struct drm_device *dev = encoder->dev;
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++ uint64_t scale_mode = DRM_MODE_SCALE_FULLSCREEN;
++
++ DBG_TRACE("");
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON)) {
++ DBG_ERR("OSPM_DISPLAY_ISLAND OSPM_UHB_FORCE_POWER_ON failed");
++ return;
++ }
++
++ /* Sleep to ensure that the graphics engine is ready
++ * since its mode_set is called before ours
++ */
++ msleep(100);
++
++ /* Only one mode is supported,
++ * so configure only if not yet configured
++ */
++ if (!dev_priv->dsi_device_ready) {
++ drm_connector_property_get_value(
++ &enc_to_psb_intel_output(encoder)->base,
++ dev->mode_config.scaling_mode_property,
++ &scale_mode);
++ if (scale_mode == DRM_MODE_SCALE_CENTER)
++ REG_WRITE(PFIT_CONTROL, 0);
++ else if (scale_mode == DRM_MODE_SCALE_FULLSCREEN)
++ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
++ else {
++ DBG_ERR("unsupported scaling");
++ REG_WRITE(PFIT_CONTROL, 0);
++ }
++ dsi_configure_mipi_block(dev);
++ dev_priv->dsi_device_ready = true;
++ }
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++}
++
++
++static void dsi_encoder_prepare(struct drm_encoder *encoder)
++{
++ struct drm_device *dev = encoder->dev;
++
++ DBG_TRACE("");
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON)) {
++ DBG_ERR("OSPM_DISPLAY_ISLAND OSPM_UHB_FORCE_POWER_ON failed");
++ return;
++ }
++
++ dsi_configure_down(dev);
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++}
++
++
++static void dsi_encoder_commit(struct drm_encoder *encoder)
++{
++ struct drm_device *dev = encoder->dev;
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++
++ DBG_TRACE("");
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON)) {
++ DBG_ERR("OSPM_DISPLAY_ISLAND OSPM_UHB_FORCE_POWER_ON failed");
++ return;
++ }
++
++ if (!work_pending(&dev_priv->dsi_work))
++ dsi_configure_up(dev);
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++}
++
++
++static void dsi_encoder_dpms(struct drm_encoder *encoder, int mode)
++{
++ struct drm_device *dev = encoder->dev;
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++
++ DBG_TRACE("%s", ((mode == DRM_MODE_DPMS_ON) ? "ON" : "OFF"));
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON)) {
++ DBG_ERR("OSPM_DISPLAY_ISLAND OSPM_UHB_FORCE_POWER_ON failed");
++ return;
++ }
++
++ if (mode == DRM_MODE_DPMS_ON) {
++ if (!work_pending(&dev_priv->dsi_work))
++ dsi_configure_up(dev);
++ } else
++ dsi_configure_down(dev);
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++}
++
++
++/* Connector funcs */
++static enum drm_connector_status dsi_connector_detect(struct drm_connector
++ *connector)
++{
++ DBG_TRACE("");
++ return connector_status_connected;
++}
++
++
++static int dsi_connector_get_modes(struct drm_connector *connector)
++{
++ struct drm_device *dev = connector->dev;
++ struct psb_intel_output *psb_output = to_psb_intel_output(connector);
++ struct psb_intel_mode_device *mode_dev = psb_output->mode_dev;
++ struct drm_display_mode *mode;
++
++ DBG_TRACE("");
++
++ /* Didn't get an DDB, so
++ * Set wide sync ranges so we get all modes
++ * handed to valid_mode for checking
++ */
++ connector->display_info.min_vfreq = 0;
++ connector->display_info.max_vfreq = 200;
++ connector->display_info.min_hfreq = 0;
++ connector->display_info.max_hfreq = 200;
++
++ if (mode_dev->panel_fixed_mode != NULL) {
++ mode = drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
++ drm_mode_probed_add(connector, mode);
++ return 1;
++ }
++ return 0;
++}
++
++
++static void mrst_dsi_save(struct drm_connector *connector)
++{
++ DBG_TRACE("");
++}
++
++
++static void mrst_dsi_restore(struct drm_connector *connector)
++{
++ DBG_TRACE("");
++}
++
++
++static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
++ .dpms = dsi_encoder_dpms,
++ .mode_fixup = psb_intel_lvds_mode_fixup,
++ .prepare = dsi_encoder_prepare,
++ .mode_set = dsi_encoder_mode_set,
++ .commit = dsi_encoder_commit,
++};
++
++static const struct drm_connector_helper_funcs mrst_dsi_connector_helper_funcs = {
++ .get_modes = dsi_connector_get_modes,
++ .mode_valid = psb_intel_lvds_mode_valid,
++ .best_encoder = psb_intel_best_encoder,
++};
++
++
++static const struct drm_connector_funcs connector_funcs = {
++ .dpms = drm_helper_connector_dpms,
++ .save = mrst_dsi_save,
++ .restore = mrst_dsi_restore,
++ .detect = dsi_connector_detect,
++ .fill_modes = drm_helper_probe_single_connector_modes,
++ .set_property = psb_intel_lvds_set_property,
++ .destroy = psb_intel_lvds_destroy,
++};
++
++
++void aava_koski_dsi_init(struct drm_device *dev,
++ struct psb_intel_mode_device *mode_dev)
++{
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++ struct psb_intel_output *psb_intel_output;
++ struct drm_connector *connector;
++ struct drm_encoder *encoder;
++
++ psb_intel_output = kzalloc(sizeof(struct psb_intel_output), GFP_KERNEL);
++ if (!psb_intel_output)
++ return;
++
++ /* panel_reset(); */
++
++#ifdef AAVA_BACKLIGHT_HACK
++ schedule_delayed_work(&bl_work, 2*HZ);
++#endif /* AAVA_BACKLIGHT_HACK */
++
++ psb_intel_output->mode_dev = mode_dev;
++ connector = &psb_intel_output->base;
++ encoder = &psb_intel_output->enc;
++ drm_connector_init(dev,
++ &psb_intel_output->base,
++ &connector_funcs,
++ DRM_MODE_CONNECTOR_MIPI);
++
++ drm_encoder_init(dev,
++ &psb_intel_output->enc,
++ &psb_intel_lvds_enc_funcs,
++ DRM_MODE_ENCODER_MIPI);
++
++ drm_mode_connector_attach_encoder(&psb_intel_output->base,
++ &psb_intel_output->enc);
++ psb_intel_output->type = INTEL_OUTPUT_MIPI;
++
++ drm_encoder_helper_add(encoder, &encoder_helper_funcs);
++ drm_connector_helper_add(connector, &mrst_dsi_connector_helper_funcs);
++ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
++ connector->interlace_allowed = false;
++ connector->doublescan_allowed = false;
++
++ drm_connector_attach_property(connector,
++ dev->mode_config.scaling_mode_property,
++ DRM_MODE_SCALE_FULLSCREEN);
++ drm_connector_attach_property(connector,
++ dev_priv->backlight_property,
++ BRIGHTNESS_MAX_LEVEL);
++
++ mode_dev->panel_wants_dither = false;
++
++ dsi_init_mipi_config(dev_priv);
++
++ /* No config phase */
++ dev_priv->config_phase = false;
++
++ /* Get the fixed mode */
++ mode_dev->panel_fixed_mode = dsi_get_fixed_display_mode();
++ if (mode_dev->panel_fixed_mode)
++ mode_dev->panel_fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
++ else {
++ DBG_ERR("Fixed mode not available!\n");
++ goto failed_find;
++ }
++ dev_priv->dsi_plane_pipe_control = true;
++ drm_sysfs_connector_add(connector);
++
++ /* Initialize work queue */
++ INIT_WORK(&dev_priv->dsi_work, dsi_work_handler);
++
++ return;
++
++failed_find:
++ drm_encoder_cleanup(encoder);
++ drm_connector_cleanup(connector);
++ kfree(connector);
++}
++/*EXPORT_SYMBOL_GPL(aava_koski_dsi_init); */
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_intel_hdmi.c
+@@ -0,0 +1,989 @@
++/*
++ * Copyright © 2006-2007 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ * jim liu <jim.liu@intel.com>
++ */
++
++#include <drm/drmP.h>
++#include <drm/drm.h>
++#include <drm/drm_crtc.h>
++#include <drm/drm_edid.h>
++#include "psb_intel_drv.h"
++#include "psb_drv.h"
++#include "psb_intel_reg.h"
++#include "psb_intel_hdmi_reg.h"
++#include "psb_intel_hdmi_edid.h"
++#include "psb_intel_hdmi.h"
++#ifdef MDFLD_HDCP
++#include "mdfld_hdmi_audio_if.h"
++#endif /* MDFLD_HDCP */
++
++#if MDFLD_HDMI_JLIU7
++/* FIXME_MDFLD HDMI EDID supports */
++
++struct mid_intel_hdmi_priv {
++ u32 hdmib_reg;
++ u32 save_HDMIB;
++ bool has_hdmi_sink;
++ /* Should set this when detect hotplug */
++ bool hdmi_device_connected;
++ struct mdfld_hdmi_i2c *i2c_bus;
++ /* EELD packet holder*/
++ hdmi_eeld_t eeld;
++ u32 hdmi_eeld_size;
++ cea_861b_adb_t lpcm_sad;
++#ifdef MDFLD_HDCP
++ bool is_hdcp_supported;
++ struct i2c_adapter *hdmi_i2c_adapter; /* for control functions */
++ struct drm_device *dev;
++ had_event_call_back mdfld_had_event_callbacks;
++#endif /* MDFLD_HDCP */
++};
++
++#ifdef MDFLD_HDCP
++extern void mdfld_hdcp_init(struct mid_intel_hdmi_priv *p_hdmi_priv);
++#endif
++
++#if 1 /*FIXME_MDFLD_HDMI remove it later */
++static void mdfld_hdmi_mode_set(struct drm_encoder *encoder,
++ struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode)
++{
++ struct drm_device *dev = encoder->dev;
++ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
++ struct mid_intel_hdmi_priv *hdmi_priv = output->dev_priv;
++ u32 hdmib, hdmi_phy_misc;
++
++ PSB_DEBUG_ENTRY("\n");
++
++ hdmib = REG_READ(hdmi_priv->hdmib_reg) | HDMIB_PORT_EN | HDMIB_PIPE_B_SELECT | HDMIB_NULL_PACKET;
++ hdmi_phy_misc = REG_READ(HDMIPHYMISCCTL) & ~HDMI_PHY_POWER_DOWN;
++
++ REG_WRITE(HDMIPHYMISCCTL, hdmi_phy_misc);
++ REG_WRITE(hdmi_priv->hdmib_reg, hdmib);
++ REG_READ(hdmi_priv->hdmib_reg);
++}
++
++static bool mdfld_hdmi_mode_fixup(struct drm_encoder *encoder,
++ struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode)
++{
++ struct drm_device *dev = encoder->dev;
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(encoder->crtc);
++ PSB_DEBUG_ENTRY("hdisplay = %d, vdisplay = %d. a_hdisplay = %d, a_vdisplay = %d.\n", mode->hdisplay, mode->vdisplay, adjusted_mode->hdisplay, adjusted_mode->vdisplay);
++
++ /* Should never happen!! */
++ if (IS_MID(dev) && psb_intel_crtc->pipe != 1) {
++ printk(KERN_ERR
++ "Only support HDMI on pipe B on MID \n");
++ }
++
++#if 1 /* MDFLD_HDMI_JLIU7_HACKS */
++ if (mode->hdisplay == 864 && mode->vdisplay == 480) {
++ adjusted_mode->hdisplay = 0x500;
++ adjusted_mode->htotal = 0x672;
++ adjusted_mode->hsync_start = 0x56e;
++ adjusted_mode->hsync_end = 0x596;
++ adjusted_mode->vdisplay = 0x2d0;
++ adjusted_mode->vtotal = 0x2ee;
++ adjusted_mode->vsync_start = 0x2d5;
++ adjusted_mode->vsync_end = 0x2da;
++ drm_mode_set_crtcinfo(adjusted_mode,
++ CRTC_INTERLACE_HALVE_V);
++ }
++#endif /* MDFLD_HDMI_JLIU7_HACKS */ /* Debug HDMI - Can't enalbe HDMI */
++#if 0 // MDFLD_HDMI_JLIU7_HACKS /* Debug HDMI - Can't enalbe HDMI */
++#if 1 /* 720p - Adeel */
++ adjusted_mode->hdisplay = 0x500;
++ adjusted_mode->htotal = 0x672;
++ adjusted_mode->hsync_start = 0x56e;
++ adjusted_mode->hsync_end = 0x596;
++ adjusted_mode->vdisplay = 0x2d0;
++ adjusted_mode->vtotal = 0x2ee;
++ adjusted_mode->vsync_start = 0x2d5;
++ adjusted_mode->vsync_end = 0x2da;
++#endif
++
++#if 0 /* 1080p - Brian */
++ adjusted_mode->hdisplay = 0x780;
++ adjusted_mode->htotal = 0x898;
++ adjusted_mode->hsync_start = 0x7d8;
++ adjusted_mode->hsync_end = 0x804;
++ adjusted_mode->vdisplay = 0x438;
++ adjusted_mode->vtotal = 0x464;
++ adjusted_mode->vsync_start = 0x43c;
++ adjusted_mode->vsync_end = 0x446;
++#endif
++#if 0 /* 1080p - Adeel */
++ adjusted_mode->hdisplay = 0x780;
++ adjusted_mode->htotal = 0xabe;
++ adjusted_mode->hsync_start = 0x9fe;
++ adjusted_mode->hsync_end = 0xa2a;
++ adjusted_mode->vdisplay = 0x438;
++ adjusted_mode->vtotal = 0x465;
++ adjusted_mode->vsync_start = 0x43c;
++ adjusted_mode->vsync_end = 0x441;
++#endif
++
++
++#if 0 /* 480p - Adeel */
++ adjusted_mode->hdisplay = 0x280;
++ adjusted_mode->htotal = 0x320;
++ adjusted_mode->hsync_start = 0x290;
++ adjusted_mode->hsync_end = 0x2f0;
++ adjusted_mode->vdisplay = 0x1e0;
++ adjusted_mode->vtotal = 0x20d;
++ adjusted_mode->vsync_start = 0x1ea;
++ adjusted_mode->vsync_end = 0x1ec;
++#endif
++
++#if 0 /* 480p - icdk */
++ adjusted_mode->hdisplay = 0x280;
++ adjusted_mode->htotal = 0x35a;
++ adjusted_mode->hsync_start = 0x2e0;
++ adjusted_mode->hsync_end = 0x31e;
++ adjusted_mode->vdisplay = 0x1e0;
++ adjusted_mode->vtotal = 0x20e;
++ adjusted_mode->vsync_start = 0x1ea;
++ adjusted_mode->vsync_end = 0x1ec;
++#endif
++#if 0 /* 720p - Adeel */
++ REG_WRITE(htot_reg, 0x067104ff);
++ REG_WRITE(hblank_reg, 0x067104ff);
++ REG_WRITE(hsync_reg, 0x0595056d);
++ REG_WRITE(vtot_reg, 0x02ed02cf);
++ REG_WRITE(vblank_reg, 0x02ed02cf);
++ REG_WRITE(vsync_reg, 0x02d902d4);
++#endif
++#if 0 /* 1080p - Brian */
++ REG_WRITE(htot_reg, 0x0897077f);
++ REG_WRITE(hblank_reg, 0x0897077f);
++ REG_WRITE(hsync_reg, 0x080307d7);
++ REG_WRITE(vtot_reg, 0x04630437);
++ REG_WRITE(vblank_reg, 0x04630437);
++ REG_WRITE(vsync_reg, 0x0445043b);
++#endif
++
++#if 0 /* 1080p - Adeel */
++ REG_WRITE(htot_reg, 0x0abd077f);
++ REG_WRITE(hblank_reg, 0x0abd077f);
++ REG_WRITE(hsync_reg, 0x0a2909fd);
++ REG_WRITE(vtot_reg, 0x04640437);
++ REG_WRITE(vblank_reg, 0x04640437);
++ REG_WRITE(vsync_reg, 0x0440043b);
++#endif
++
++
++#if 0 /* 480p - Adeel */
++ REG_WRITE(htot_reg, 0x031f027f);
++ REG_WRITE(hblank_reg, 0x031f027f);
++ REG_WRITE(hsync_reg, 0x02ef028f);
++ REG_WRITE(vtot_reg, 0x020c01df);
++ REG_WRITE(vblank_reg, 0x020c01df);
++ REG_WRITE(vsync_reg, 0x01eb01e9);
++#endif
++
++#if 0 /* 480p - icdk */
++ REG_WRITE(htot_reg, 0x0359027f);
++ REG_WRITE(hblank_reg, 0x0359027f);
++ REG_WRITE(hsync_reg, 0x031d02df);
++ REG_WRITE(vtot_reg, 0x020d01df);
++ REG_WRITE(vblank_reg, 0x020d01df);
++ REG_WRITE(vsync_reg, 0x01eb01e9);
++#endif
++ drm_mode_set_crtcinfo(adjusted_mode,
++ CRTC_INTERLACE_HALVE_V);
++#endif /* MDFLD_HDMI_JLIU7_HACKS */ /* Debug HDMI - Can't enalbe HDMI */
++ return true;
++}
++#endif /*FIXME_MDFLD_HDMI remove it later */
++
++
++static void mdfld_hdmi_dpms(struct drm_encoder *encoder, int mode)
++{
++ struct drm_device *dev = encoder->dev;
++ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
++ struct mid_intel_hdmi_priv *hdmi_priv = output->dev_priv;
++ u32 hdmib, hdmi_phy_misc;
++
++ PSB_DEBUG_ENTRY("%s \n", mode == DRM_MODE_DPMS_ON ? "on" : "off");
++
++ hdmib = REG_READ(hdmi_priv->hdmib_reg) | HDMIB_PIPE_B_SELECT | HDMIB_NULL_PACKET;
++ hdmi_phy_misc = REG_READ(HDMIPHYMISCCTL);
++
++ if (mode != DRM_MODE_DPMS_ON) {
++ REG_WRITE(hdmi_priv->hdmib_reg, hdmib & ~HDMIB_PORT_EN);
++ REG_WRITE(HDMIPHYMISCCTL, hdmi_phy_misc | HDMI_PHY_POWER_DOWN);
++ } else {
++ REG_WRITE(HDMIPHYMISCCTL, hdmi_phy_misc & ~HDMI_PHY_POWER_DOWN);
++ REG_WRITE(hdmi_priv->hdmib_reg, hdmib | HDMIB_PORT_EN);
++ }
++ REG_READ(hdmi_priv->hdmib_reg);
++}
++
++static void mdfld_hdmi_save(struct drm_connector *connector)
++{
++ struct drm_device *dev = connector->dev;
++ struct psb_intel_output *output = to_psb_intel_output(connector);
++ struct mid_intel_hdmi_priv *hdmi_priv = output->dev_priv;
++
++ PSB_DEBUG_ENTRY("\n");
++
++ hdmi_priv->save_HDMIB = REG_READ(hdmi_priv->hdmib_reg);
++}
++
++static void mdfld_hdmi_restore(struct drm_connector *connector)
++{
++ struct drm_device *dev = connector->dev;
++ struct psb_intel_output *output = to_psb_intel_output(connector);
++ struct mid_intel_hdmi_priv *hdmi_priv = output->dev_priv;
++
++ PSB_DEBUG_ENTRY("\n");
++
++ REG_WRITE(hdmi_priv->hdmib_reg, hdmi_priv->save_HDMIB);
++ REG_READ(hdmi_priv->hdmib_reg);
++}
++
++/* HDMI DIP related stuff */
++static int mdfld_hdmi_get_cached_edid_block(struct drm_connector *connector, uint32_t num_block, uint8_t *edid_block, uint32_t size)
++{
++ struct drm_display_info *displayinfo = &(connector->display_info);
++ if (num_block >= MAX_EDID_BLOCKS)
++ {
++ DRM_ERROR("mdfld_hdmi_get_cached_edid_block() - Invalid EDID block\n");
++ return 0;
++ }
++ edid_block = &displayinfo->raw_edid[EDID_BLOCK_SIZE*num_block];
++ return 1;
++}
++
++/////////////////////////////////////////////////////////////////////////
++// INTHDMIENCODER_CreateEELDPacket():
++// This function parses v1.3 base EDID and CEA-861b EDID Timing Extension
++// Version3 and creates EELD (Enhanced EDID Like Data) packet. This EELD data contains
++// audio configuration information and other details read EDID.This can also contain Vendor specific Data
++//
++/////////////////////////////////////////////////////////////////////////
++static int mdfld_hdmi_create_eeld_packet(struct drm_connector *connector)
++{
++ struct psb_intel_output *output = to_psb_intel_output(connector);
++ struct mid_intel_hdmi_priv *hdmi_priv = output->dev_priv;
++ uint8_t ucEdidBlock[128];
++ hdmi_eeld_t *pEEld = NULL;
++ baseedid_1_x_t *pEdid = NULL;
++ ce_edid_t *pCeEdid = NULL;
++ int dwNumOfBytes = 0;
++ int sizeOfCEADataBlock = 0;
++ uint8_t * pDataBlock = NULL;
++ edid_dtd_timing_t *pDTD = NULL;
++ uint8_t *pData = NULL;
++ uint8_t ucDataBlockTag = 0;
++ cea_861b_adb_t *pADB = NULL;
++ uint8_t i = 0;
++ uint8_t j = 0;
++ uint8_t * pSADBlocks = NULL;
++ uint8_t * pCurrentSADBlocks = NULL;
++ uint32_t ulNumSADBytes = 0;
++ //vsdb_byte6_to_byte8_t *pVSDB = NULL;
++ uint32_t ulIndex = 0;
++ //uint8_t b48kHzCADPresent = false;
++
++ pEEld = (hdmi_eeld_t *) &hdmi_priv->eeld;
++
++ // Fill Version info
++ pEEld->cea_edid_rev_id = HDMI_EELD_CEA_EDID_VERSION;
++ pEEld->eld_ver = HDMI_EELD_VERSION;
++
++ // Fill BaseLine ELD length
++ // This is 80 bytes as per EELD proposal
++ pEEld->baseline_eld_length = HDMI_EELD_BASELINE_DATA_LENGTH;
++
++ //Zero out EDID block buffer
++ memset(ucEdidBlock, 0, sizeof(ucEdidBlock));
++
++ // Get Extn EDID
++ if(!mdfld_hdmi_get_cached_edid_block(connector, 1, ucEdidBlock, EDID_BLOCK_SIZE))
++ {
++ return 0;
++ }
++
++ pCeEdid = (ce_edid_t *) ucEdidBlock;
++
++ //allocate memory (48 bytes) for SAD Blocks buffer
++ pSADBlocks = kcalloc(1, 48, GFP_KERNEL);
++
++ if(pSADBlocks == NULL)
++ {
++ DRM_ERROR("mdfld_hdmi_create_eld_packaet() - Failed to allocate mem for pSADBlocks\n");
++ return 0;
++ }
++
++ pCurrentSADBlocks = pSADBlocks;
++
++ // Now pull out data from CEA Extension EDID
++ // If Offset <= 4, we will not have CEA DataBlocks
++ if(pCeEdid->ucDTDOffset > CEA_EDID_HEADER_SZIE)
++ {
++ sizeOfCEADataBlock = pCeEdid->ucDTDOffset - CEA_EDID_HEADER_SZIE;
++
++ pDataBlock = (uint8_t *)pCeEdid;
++
++ // skip header (first 4 bytes) in CEA EDID Timing Extension
++ // and set pointer to start of DataBlocks collection
++ pDataBlock += CEA_EDID_HEADER_SZIE;
++
++ // General Format of CEA Data Block Collection
++ // -----------+--------------------+-----------------------------------------+
++ // |Byte# |bits5-7 | bits 0-4 |
++ // -----------|--------------------+-----------------------------------------+
++ // | 1 | Video Tag |Length = total #of video bytes following |
++ // | | Code |this byte (L1) |
++ // |--------------------+-----------------------------------------+
++ // Video | 2 | CEA Short Video Descriptor 1 |
++ // Data |--------+-----------------------------------------------------|
++ // Block | 3 | CEA Short Video Descriptor 2 |
++ // |--------+-----------------------------------------------------|
++ // | ... | ... |
++ // |--------------------------------------------------------------+
++ // | 1+L1 | CEA Short Video Descriptor L1 |
++ // -----------+--------------------+-----------------------------------------+
++ // | 2+L1 | Audio Tag |Length = total #of audio bytes following |
++ // | | Code |this byte (L2) |
++ // |--------------------+-----------------------------------------+
++ // Audio | 3+L1 | |
++ // Data |--------+ |
++ // Block | 4+L1 | CEA Short Audio Descriptor 1 |
++ // |--------+ |
++ // | 5+L1 | |
++ // |--------------------------------------------------------------+
++ // | ... | |
++ // | | |
++ // | | |
++ // | ... | |
++ // |---------------------------------------------------------------
++ // |L1+L2 | |
++ // |--------| |
++ // |1+L1+L2 | CEA Short Audio Descriptor L2/3 |
++ // |--------| |
++ // |2+L1+L2 | |
++ // -----------+--------------------------------------------------------------+
++ // |3+L1+L2 | Speaker |Length = total #of SA bytes following |
++ // | | Tag Code |this byte (L1) |
++ // Speaker |--------------------------------------------------------------+
++ // Allocation|4+L1+L2 | |
++ // Data |--------| |
++ // Block |5+L1+L2 | Speaker Allocation Data Block Payload(3 bytes) |
++ // |--------| |
++ // |6+L1+L2 | |
++ // -----------+--------------------------------------------------------------+
++ // |7+L1+L2 | VSDB Tag |Length = total #of VSDB bytes following |
++ // | | Code |this byte (L1) |
++ // Vendor |--------------------------------------------------------------+
++ // Specific |8+L1+L2 | |
++ // Data |--------| |
++ // Block |9+L1+L2 | 24-bit IEEE Registration Identifier (LSB first) |
++ // |--------| |
++ // |10+L1+L2| |
++ // |--------------------------------------------------------------+
++ // | ... | Vendor Specific Data block Payload |
++ // -----------+--------------------------------------------------------------+
++
++ while(sizeOfCEADataBlock > 0)
++ {
++ // Get the Size of CEA DataBlock in bytes and TAG
++ dwNumOfBytes = *pDataBlock & CEA_DATABLOCK_LENGTH_MASK;
++ ucDataBlockTag = (*pDataBlock & CEA_DATABLOCK_TAG_MASK) >> 5;
++
++ switch(ucDataBlockTag)
++ {
++ case CEA_AUDIO_DATABLOCK:
++ // move beyond tag/length byte
++ ++pDataBlock;
++ for (i = 0; i < (dwNumOfBytes / 3); ++i, pDataBlock += 3)
++ {
++ pADB = (cea_861b_adb_t*)pDataBlock;
++ switch(pADB->audio_format_code)
++ {
++ // uncompressed audio (Linear PCM)
++ case AUDIO_LPCM:
++ memcpy(&(hdmi_priv->lpcm_sad),pDataBlock,3);
++ //save these blocks
++ memcpy(pCurrentSADBlocks, pDataBlock, 3);
++ // move pointer in SAD blocks buffer
++ pCurrentSADBlocks += 3;
++ // update SADC field
++ pEEld->sadc += 1;
++ break;
++ // compressed audio
++ case AUDIO_AC3:
++ case AUDIO_MPEG1:
++ case AUDIO_MP3:
++ case AUDIO_MPEG2:
++ case AUDIO_AAC:
++ case AUDIO_DTS:
++ case AUDIO_ATRAC:
++ case AUDIO_OBA:
++ case AUDIO_DOLBY_DIGITAL:
++ case AUDIO_DTS_HD:
++ case AUDIO_MAT:
++ case AUDIO_DST:
++ case AUDIO_WMA_PRO:
++ //save these blocks
++ memcpy(pCurrentSADBlocks, pDataBlock, 3);
++ // move pointer in SAD blocks buffer
++ pCurrentSADBlocks += 3;
++ // update SADC field
++ pEEld->sadc += 1;
++ break;
++ }
++ }
++ break;
++
++ case CEA_VENDOR_DATABLOCK:
++ // audio wants data from 6th byte of VSDB onwards
++ //Sighting 94842:
++
++ // | Byte # | bits[7-0] |
++ // |--------------------------------------------------------------------|
++ // | 1-3 |24-bit IEEE Registration Identifier (0x000C03) |
++ // |--------------------------------------------------------------------|
++ // | 4-5 | Source Physical Address |
++ // |--------------------------------------------------------------------|
++ // | 6 |SupportsAI|DC48bit|DC36bit|Dc30bit|DCY444|Rsvd|Rsvd|DVIDual|
++ // |--------------------------------------------------------------------|
++ // | 7 | Max TMDS clock |
++ // |--------------------------------------------------------------------|
++ // | 8 |Latency_Field |I_Latency_Field| Reserved bits 5-0 |
++ // | | _Present | _Present | |
++ // |--------------------------------------------------------------------|
++ // | 9 | Video Latency |
++ // |--------------------------------------------------------------------|
++ // | 10 | Audio Latency |
++ // |--------------------------------------------------------------------|
++ // | 11 | Interlaced Video Latency |
++ // |--------------------------------------------------------------------|
++ // | 12 | Interlaced Audio Latency |
++ // |--------------------------------------------------------------------|
++
++ ++pDataBlock;
++ // move pointer to next CEA Datablock
++ pDataBlock += dwNumOfBytes;
++ break;
++
++ case CEA_SPEAKER_DATABLOCK:
++ pEEld->speaker_allocation_block = *(++pDataBlock);
++ // move pointer to next CEA Datablock
++ pDataBlock += dwNumOfBytes;
++ break;
++
++ default:
++ // Move pointer to next CEA DataBlock
++ pDataBlock += (dwNumOfBytes + 1);
++ }
++ // Decrement size of CEA DataBlock
++ sizeOfCEADataBlock -= (dwNumOfBytes + 1);
++ }
++ }
++
++ //Copy all the saved SAD blocks at the end of ELD
++ //SAD blocks should be written after the Monitor name and VSDB.
++ //See ELD definition in iHDMI.h
++ ulNumSADBytes = (pEEld->sadc) * 3; //Size of each SAD block is 3 bytes
++
++ //DCN 460119: Audio does not play on displays which do not provide SAB in EDID.
++ //Solution: Graphics driver should create a default SAB in ELD with front left and front right
++ //speakers enabled if the display supports basic audio.
++ pDataBlock = (uint8_t *)pCeEdid;
++ if((*(pDataBlock + HDMI_CEA_EXTENSION_BLOCK_BYTE_3) & HDMI_BASIC_AUDIO_SUPPORTED) && (pEEld->speaker_allocation_block == 0))
++ {
++ pEEld->flr = 1;
++ }
++ //End of DCN 460119
++
++ // zero out local buffers
++ memset(ucEdidBlock, 0, sizeof(ucEdidBlock));
++
++ // Get base EDID
++ if(!mdfld_hdmi_get_cached_edid_block(connector, 0, ucEdidBlock, EDID_BLOCK_SIZE))
++ {
++ return 0;
++ }
++
++ pEdid = (baseedid_1_x_t*) ucEdidBlock;
++ pDTD = &pEdid->DTD[1];
++
++ //Update the Manufacturer ID and Product Code here
++ memcpy(pEEld->manufacturer_id,pEdid->ManufacturerID,2);
++ memcpy(pEEld->product_id,pEdid->ProductID,2);
++
++ // Now Fill the monitor string name
++ // Search through DTD blocks, looking for monitor name
++ for (i = 0; i < MAX_BASEEDID_DTD_BLOCKS - 1; ++i, ++pDTD)
++ {
++ // Set a uint8_t pointer to DTD data
++ pData = (uint8_t *)pDTD;
++
++ // Check the Flag (the first two bytes) to determine
++ // if this block is used as descriptor
++ if (pData[0] == 0x00 && pData[1] == 0x00)
++ {
++ // And now check Data Type Tag within this descriptor
++ // Tag = 0xFC, then monitor name stored as ASCII
++ if (pData[3] == 0xFC)
++ {
++ ulIndex = 0;
++ // Copy monitor name
++ for (j = 0; (j < 13) && (pData[j+5] != 0x0A); ++j)
++ {
++ pEEld->mn_sand_sads[ulIndex] = pData[j+5];
++ ulIndex++;
++ }
++ pEEld->mnl = j;
++ break;
++ }
++ }
++ }
++
++ //Check if number of SAD Bytes > 0 and for size within limits of allowed Base line Data size as per EELD spec
++ if((ulNumSADBytes > 0) && (ulNumSADBytes <= 64))
++ {
++ //Copy the SADs immediately after the Monitor Name String
++ memcpy(&pEEld->mn_sand_sads[j], pSADBlocks, ulNumSADBytes);
++ }
++
++
++ // Header = 4, Baseline Data = 60 and Vendor (INTEL) specific = 2
++ // 4 + 60 + 2 = 66
++ hdmi_priv->hdmi_eeld_size = HDMI_EELD_SIZE;
++
++ //free the buffer allocated for SAD blocks
++ kfree(pSADBlocks);
++ pSADBlocks = NULL;
++ pCurrentSADBlocks = NULL;
++ return 1;
++}
++
++static enum drm_connector_status
++mdfld_hdmi_edid_detect(struct drm_connector *connector)
++{
++ struct psb_intel_output *output = to_psb_intel_output(connector);
++ struct mid_intel_hdmi_priv *hdmi_priv = output->dev_priv;
++ struct edid *edid = NULL;
++ enum drm_connector_status status = connector_status_disconnected;
++
++#if 1 /* FIXME_JLIU7 HDMI*/
++ char Toshiba_edid [256] =
++ {
++0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x52, 0x62, 0x05, 0x02, 0x00, 0x00, 0x00, 0x00,
++0x00, 0x11, 0x01, 0x03, 0x80, 0x69, 0x3b, 0x78, 0x0a, 0x0d, 0xc9, 0xa0, 0x57, 0x47, 0x98, 0x27,
++0x12, 0x48, 0x4c, 0x20, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
++0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x3a, 0x80, 0x18, 0x71, 0x38, 0x2d, 0x40, 0x58, 0x2c,
++0x45, 0x00, 0xc4, 0x8e, 0x21, 0x00, 0x00, 0x1e, 0x8c, 0x0a, 0xd0, 0x8a, 0x20, 0xe0, 0x2d, 0x10,
++0x10, 0x3e, 0x96, 0x00, 0xc4, 0x8e, 0x21, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0xfc, 0x00, 0x54,
++0x53, 0x42, 0x2d, 0x54, 0x56, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd,
++0x00, 0x17, 0x3d, 0x0f, 0x44, 0x0f, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x01, 0x03,
++0x02, 0x03, 0x20, 0x77, 0x4a, 0x90, 0x05, 0x04, 0x03, 0x07, 0x02, 0x06, 0x01, 0x20, 0x22, 0x23,
++0x09, 0x07, 0x07, 0x6c, 0x03, 0x0c, 0x00, 0x30, 0x00, 0x00, 0x1e, 0xc0, 0x2b, 0x2b, 0x33, 0x33,
++0x01, 0x1d, 0x00, 0x72, 0x51, 0xd0, 0x1e, 0x20, 0x6e, 0x28, 0x55, 0x00, 0xc4, 0x8e, 0x21, 0x00,
++0x00, 0x1e, 0x8c, 0x0a, 0xa0, 0x14, 0x51, 0xf0, 0x16, 0x00, 0x26, 0x7c, 0x43, 0x00, 0xc4, 0x8e,
++0x21, 0x00, 0x00, 0x98, 0x8c, 0x0a, 0xd0, 0x8a, 0x20, 0xe0, 0x2d, 0x10, 0x10, 0x3e, 0x96, 0x00,
++0x13, 0x8e, 0x21, 0x00, 0x00, 0x18, 0x8c, 0x0a, 0xa0, 0x14, 0x51, 0xf0, 0x16, 0x00, 0x26, 0x7c,
++0x43, 0x00, 0x13, 0x8e, 0x21, 0x00, 0x00, 0x98, 0x01, 0x1d, 0x80, 0x18, 0x71, 0x1c, 0x16, 0x20,
++0x58, 0x2c, 0x25, 0x00, 0xc4, 0x8e, 0x21, 0x00, 0x00, 0x9e, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb3
++ };
++#endif
++ PSB_DEBUG_ENTRY("\n");
++
++#if 0
++ if (!psb_intel_output->hdmi_i2c_adapter) {
++ DRM_INFO("Enter mdfld_hdmi_get_modes, i2c_adapter is NULL. \n");
++
++ /* hard-coded the HDMI_I2C_ADAPTER_ID to be 3, Should get from GCT*/
++ psb_intel_output->hdmi_i2c_adapter = i2c_get_adapter(3);
++ }
++
++ if (!psb_intel_output->hdmi_i2c_adapter) {
++ DRM_INFO("Enter mdfld_hdmi_get_modes, no valid i2c_adapter . \n");
++ return ret;
++ }
++
++ edid =
++ drm_get_edid(&psb_intel_output->base,
++ psb_intel_output->hdmi_i2c_adapter);
++
++ hdmi_priv->has_hdmi_sink = false;
++ if (edid) {
++ if (edid->input & DRM_EDID_INPUT_DIGITAL) {
++ status = connector_status_connected;
++ hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
++ mdfld_hdmi_create_eeld_packet(connector);
++ }
++
++ intel_output->base.display_info.raw_edid = NULL;
++ kfree(edid);
++ }
++#else
++ edid = (struct edid *)Toshiba_edid;
++
++ hdmi_priv->has_hdmi_sink = false;
++ if (edid) {
++ if (edid->input & DRM_EDID_INPUT_DIGITAL) {
++ status = connector_status_connected;
++ hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
++ mdfld_hdmi_create_eeld_packet(connector);
++ }
++ }
++#endif
++ return status;
++}
++
++static enum drm_connector_status mdfld_hdmi_detect(struct drm_connector
++ *connector)
++{
++ PSB_DEBUG_ENTRY("\n");
++
++/* FIXME_HDMI_JLIU7 add a variable in priv data to track the HDMI HDP/connection status. Add a variable to check if an HDMI device is connected. if it is HDMI device, we neend to enable HDMI audio. */
++#if 0 /*FIXME_JLIU7 HDMI */
++ struct psb_intel_output *intel_output = to_psb_intel_output(connector);
++ struct mid_intel_hdmi_priv *hdmi_priv = intel_output->dev_priv;
++
++ if(hdmi_priv->has_hdmi_sink && hdmi_priv->hdmi_device_connected) {
++ return mdfld_hdmi_edid_detect(connector);
++ } else {
++ return connector_status_disconnected;
++ }
++#else
++ return mdfld_hdmi_edid_detect(connector);
++#endif
++}
++
++static int mdfld_hdmi_set_property(struct drm_connector *connector,
++ struct drm_property *property,
++ uint64_t value)
++{
++ struct drm_encoder *pEncoder = connector->encoder;
++
++ PSB_DEBUG_ENTRY("connector info, type = %d, type_id=%d, base=0x%p, base.id=0x%x. \n", connector->connector_type, connector->connector_type_id, &connector->base, connector->base.id);
++ PSB_DEBUG_ENTRY("encoder info, base.id=%d, encoder_type=%d, dev=0x%p, base=0x%p, possible_clones=0x%x. \n", pEncoder->base.id, pEncoder->encoder_type, pEncoder->dev, &pEncoder->base, pEncoder->possible_clones);
++ PSB_DEBUG_ENTRY("encoder info, possible_crtcs=0x%x, crtc=0x%p. \n", pEncoder->possible_crtcs, pEncoder->crtc);
++
++ if (!strcmp(property->name, "scaling mode") && pEncoder) {
++ PSB_DEBUG_ENTRY("scaling mode \n");
++ } else if (!strcmp(property->name, "backlight") && pEncoder) {
++ PSB_DEBUG_ENTRY("backlight \n");
++ } else if (!strcmp(property->name, "DPMS") && pEncoder) {
++ PSB_DEBUG_ENTRY("DPMS \n");
++ }
++
++ if (!strcmp(property->name, "scaling mode") && pEncoder) {
++ struct psb_intel_crtc *pPsbCrtc = to_psb_intel_crtc(pEncoder->crtc);
++ bool bTransitionFromToCentered;
++ uint64_t curValue;
++
++ if (!pPsbCrtc)
++ goto set_prop_error;
++
++ switch (value) {
++ case DRM_MODE_SCALE_FULLSCREEN:
++ break;
++ case DRM_MODE_SCALE_NO_SCALE:
++ break;
++ case DRM_MODE_SCALE_ASPECT:
++ break;
++ default:
++ goto set_prop_error;
++ }
++
++ if (drm_connector_property_get_value(connector, property, &curValue))
++ goto set_prop_error;
++
++ if (curValue == value)
++ goto set_prop_done;
++
++ if (drm_connector_property_set_value(connector, property, value))
++ goto set_prop_error;
++
++ bTransitionFromToCentered = (curValue == DRM_MODE_SCALE_NO_SCALE) ||
++ (value == DRM_MODE_SCALE_NO_SCALE);
++
++ if (pPsbCrtc->saved_mode.hdisplay != 0 &&
++ pPsbCrtc->saved_mode.vdisplay != 0) {
++ if (bTransitionFromToCentered) {
++ if (!drm_crtc_helper_set_mode(pEncoder->crtc, &pPsbCrtc->saved_mode,
++ pEncoder->crtc->x, pEncoder->crtc->y, pEncoder->crtc->fb))
++ goto set_prop_error;
++ } else {
++ struct drm_encoder_helper_funcs *pEncHFuncs = pEncoder->helper_private;
++ pEncHFuncs->mode_set(pEncoder, &pPsbCrtc->saved_mode,
++ &pPsbCrtc->saved_adjusted_mode);
++ }
++ }
++ } else if (!strcmp(property->name, "DPMS") && pEncoder) {
++ struct drm_encoder_helper_funcs *pEncHFuncs = pEncoder->helper_private;
++ /*struct drm_crtc_helper_funcs *pCrtcHFuncs = pEncoder->crtc->helper_private; */
++ pEncHFuncs->dpms(pEncoder, value);
++ /* pCrtcHFuncs->dpms(pEncoder->crtc, value); */
++ }
++
++set_prop_done:
++ return 0;
++set_prop_error:
++ return -1;
++}
++
++/**
++ * Return the list of HDMI DDC modes if available.
++ */
++static int mdfld_hdmi_get_modes(struct drm_connector *connector)
++{
++ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
++ struct edid *edid = NULL;
++ int ret = 0;
++#if 1 /* FIXME_JLIU7 HDMI*/
++ struct drm_device *dev = connector->dev;
++ struct psb_intel_mode_device *mode_dev = psb_intel_output->mode_dev;
++ struct drm_display_mode *panel_fixed_mode = mode_dev->panel_fixed_mode;
++#endif
++
++#if 1 /* FIXME_JLIU7 HDMI*/
++ char Toshiba_edid [256] =
++ {
++0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x52, 0x62, 0x05, 0x02, 0x00, 0x00, 0x00, 0x00,
++0x00, 0x11, 0x01, 0x03, 0x80, 0x69, 0x3b, 0x78, 0x0a, 0x0d, 0xc9, 0xa0, 0x57, 0x47, 0x98, 0x27,
++0x12, 0x48, 0x4c, 0x20, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
++0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x3a, 0x80, 0x18, 0x71, 0x38, 0x2d, 0x40, 0x58, 0x2c,
++0x45, 0x00, 0xc4, 0x8e, 0x21, 0x00, 0x00, 0x1e, 0x8c, 0x0a, 0xd0, 0x8a, 0x20, 0xe0, 0x2d, 0x10,
++0x10, 0x3e, 0x96, 0x00, 0xc4, 0x8e, 0x21, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0xfc, 0x00, 0x54,
++0x53, 0x42, 0x2d, 0x54, 0x56, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd,
++0x00, 0x17, 0x3d, 0x0f, 0x44, 0x0f, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x01, 0x03,
++0x02, 0x03, 0x20, 0x77, 0x4a, 0x90, 0x05, 0x04, 0x03, 0x07, 0x02, 0x06, 0x01, 0x20, 0x22, 0x23,
++0x09, 0x07, 0x07, 0x6c, 0x03, 0x0c, 0x00, 0x30, 0x00, 0x00, 0x1e, 0xc0, 0x2b, 0x2b, 0x33, 0x33,
++0x01, 0x1d, 0x00, 0x72, 0x51, 0xd0, 0x1e, 0x20, 0x6e, 0x28, 0x55, 0x00, 0xc4, 0x8e, 0x21, 0x00,
++0x00, 0x1e, 0x8c, 0x0a, 0xa0, 0x14, 0x51, 0xf0, 0x16, 0x00, 0x26, 0x7c, 0x43, 0x00, 0xc4, 0x8e,
++0x21, 0x00, 0x00, 0x98, 0x8c, 0x0a, 0xd0, 0x8a, 0x20, 0xe0, 0x2d, 0x10, 0x10, 0x3e, 0x96, 0x00,
++0x13, 0x8e, 0x21, 0x00, 0x00, 0x18, 0x8c, 0x0a, 0xa0, 0x14, 0x51, 0xf0, 0x16, 0x00, 0x26, 0x7c,
++0x43, 0x00, 0x13, 0x8e, 0x21, 0x00, 0x00, 0x98, 0x01, 0x1d, 0x80, 0x18, 0x71, 0x1c, 0x16, 0x20,
++0x58, 0x2c, 0x25, 0x00, 0xc4, 0x8e, 0x21, 0x00, 0x00, 0x9e, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb3
++ };
++#endif
++ PSB_DEBUG_ENTRY("\n");
++
++#if 0
++ if (!psb_intel_output->hdmi_i2c_adapter) {
++ DRM_INFO("Enter mdfld_hdmi_get_modes, i2c_adapter is NULL. \n");
++
++ /* hard-coded the HDMI_I2C_ADAPTER_ID to be 3, Should get from GCT*/
++ psb_intel_output->hdmi_i2c_adapter = i2c_get_adapter(3);
++ }
++
++ if (!psb_intel_output->hdmi_i2c_adapter) {
++ DRM_INFO("Enter mdfld_hdmi_get_modes, no valid i2c_adapter . \n");
++ return ret;
++ }
++
++ edid =
++ drm_get_edid(&psb_intel_output->base,
++ psb_intel_output->hdmi_i2c_adapter);
++#else
++edid = 0;
++#endif
++ if (edid) {
++ drm_mode_connector_update_edid_property(&psb_intel_output->
++ base, edid);
++ ret = drm_add_edid_modes(&psb_intel_output->base, edid);
++ kfree(edid);
++ }
++#if MDFLD_HDMI_JLIU7_DEBUG_1
++ else {
++ DRM_INFO("mdfld_hdmi_get_modes, invalid edid info. \n");
++#if 1 //MDFLD_HDMI_JLIU7_DEBUG_1
++ edid = (struct edid *)Toshiba_edid;
++ connector->display_info.raw_edid = (char *)edid;
++ drm_mode_connector_update_edid_property(&psb_intel_output->
++ base, edid);
++ ret = drm_add_edid_modes(&psb_intel_output->base, edid);
++#if 1 /* FIXME_JLIU7 HDMI*/
++ if (panel_fixed_mode != NULL) {
++ struct drm_display_mode *mode =
++ drm_mode_duplicate(dev, panel_fixed_mode);
++ drm_mode_probed_add(connector, mode);
++ }
++#endif /* FIXME_JLIU7 HDMI*/
++#endif /* MDFLD_HDMI_JLIU7_DEBUG_1 */
++ }
++#endif /* MDFLD_HDMI_JLIU7_DEBUG_1 */
++
++ if (ret)
++ return ret;
++
++ /* Didn't get an EDID, so
++ * Set wide sync ranges so we get all modes
++ * handed to valid_mode for checking
++ */
++ connector->display_info.min_vfreq = 0;
++ connector->display_info.max_vfreq = 200;
++ connector->display_info.min_hfreq = 0;
++ connector->display_info.max_hfreq = 200;
++
++ return 0;
++}
++
++static int mdfld_hdmi_mode_valid(struct drm_connector *connector,
++ struct drm_display_mode *mode)
++{
++
++ PSB_DEBUG_ENTRY("display info. hdisplay = %d, vdisplay = %d. \n", mode->hdisplay, mode->vdisplay);
++
++#if 0 /* MDFLD_HDMI_JLIU7_DEBUG */
++ if (mode->clock > 165000)
++ return MODE_CLOCK_HIGH;
++ if (mode->clock < 20000)
++ return MODE_CLOCK_HIGH;
++#endif /* MDFLD_HDMI_JLIU7_DEBUG */
++
++ /* just in case */
++ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
++ return MODE_NO_DBLESCAN;
++
++ /* just in case */
++ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
++ return MODE_NO_INTERLACE;
++
++ if (mode->hdisplay > 1600)
++ return MODE_PANEL;
++ if (mode->vdisplay > 1080)
++ return MODE_PANEL;
++
++ return MODE_OK;
++}
++
++static const struct drm_encoder_helper_funcs mdfld_hdmi_helper_funcs = {
++ .dpms = mdfld_hdmi_dpms,
++ .mode_fixup = mdfld_hdmi_mode_fixup,
++ .prepare = psb_intel_encoder_prepare,
++ .mode_set = mdfld_hdmi_mode_set,
++ .commit = psb_intel_encoder_commit,
++};
++
++static const struct drm_connector_helper_funcs
++ mdfld_hdmi_connector_helper_funcs = {
++ .get_modes = mdfld_hdmi_get_modes,
++ .mode_valid = mdfld_hdmi_mode_valid,
++ .best_encoder = psb_intel_best_encoder,
++};
++
++static const struct drm_connector_funcs mdfld_hdmi_connector_funcs = {
++ .dpms = drm_helper_connector_dpms,
++ .save = mdfld_hdmi_save,
++ .restore = mdfld_hdmi_restore,
++ .detect = mdfld_hdmi_detect,
++ .fill_modes = drm_helper_probe_single_connector_modes,
++ .set_property = mdfld_hdmi_set_property,
++ .destroy = psb_intel_lvds_destroy,
++};
++
++void mdfld_hdmi_init(struct drm_device *dev,
++ struct psb_intel_mode_device *mode_dev)
++{
++#if 0
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++#endif
++ struct psb_intel_output *psb_intel_output;
++ struct drm_connector *connector;
++ struct drm_encoder *encoder;
++ struct mid_intel_hdmi_priv *hdmi_priv;
++
++ PSB_DEBUG_ENTRY("\n");
++
++ psb_intel_output = kzalloc(sizeof(struct psb_intel_output) +
++ sizeof(struct mid_intel_hdmi_priv), GFP_KERNEL);
++ if (!psb_intel_output)
++ return;
++
++ hdmi_priv = (struct mid_intel_hdmi_priv *)(psb_intel_output + 1);
++ psb_intel_output->mode_dev = mode_dev;
++ connector = &psb_intel_output->base;
++ encoder = &psb_intel_output->enc;
++ drm_connector_init(dev, &psb_intel_output->base,
++ &mdfld_hdmi_connector_funcs,
++ DRM_MODE_CONNECTOR_DVID);
++
++ drm_encoder_init(dev, &psb_intel_output->enc, &psb_intel_lvds_enc_funcs,
++ DRM_MODE_ENCODER_TMDS);
++
++ drm_mode_connector_attach_encoder(&psb_intel_output->base,
++ &psb_intel_output->enc);
++ psb_intel_output->type = INTEL_OUTPUT_HDMI;
++ /*FIXME: May need to get this somewhere, but CG code seems hard coded it*/
++ hdmi_priv->hdmib_reg = HDMIB_CONTROL;
++ hdmi_priv->has_hdmi_sink = false;
++ psb_intel_output->dev_priv = hdmi_priv;
++
++ drm_encoder_helper_add(encoder, &mdfld_hdmi_helper_funcs);
++ drm_connector_helper_add(connector,
++ &mdfld_hdmi_connector_helper_funcs);
++ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
++ connector->interlace_allowed = false;
++ connector->doublescan_allowed = false;
++
++ drm_connector_attach_property(connector, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN);
++
++#if MDFLD_HDMI_JLIU7_DEBUG_1
++ /* hard-coded the HDMI_I2C_ADAPTER_ID to be 3, Should get from GCT*/
++ psb_intel_output->hdmi_i2c_adapter = i2c_get_adapter(3);
++
++ if (psb_intel_output->hdmi_i2c_adapter) {
++ /* HACKS_JLIU7 */
++ DRM_INFO("Enter mdfld_hdmi_init, i2c_adapter is availabe.\n");
++
++ } else {
++ printk(KERN_ALERT "No ddc adapter available!\n");
++ }
++#ifdef MDFLD_HDCP
++ hdmi_priv->is_hdcp_supported = true;
++ hdmi_priv->hdmi_i2c_adapter = psb_intel_output->hdmi_i2c_adapter;
++ hdmi_priv->dev = dev;
++ mdfld_hdcp_init(hdmi_priv);
++#endif
++#else /* MDFLD_HDMI_JLIU7_DEBUG */
++#ifdef MDFLD_HDCP
++ mdfld_hdcp_init(hdmi_priv, dev_priv);
++ /* Save a copy for user mode access */
++ dev_priv->hdmi_i2c_bus = hdmi_priv->i2c_bus;
++ dev_priv->hdmi_i2c_adapter = psb_intel_output->i2c_bus->hdmi_i2c_adapter;
++#endif
++#endif /* MDFLD_HDMI_JLIU7_DEBUG */
++
++ drm_sysfs_connector_add(connector);
++ return;
++}
++
++/* Merge the mdfld_intel_hdcp.c & mdfld_hdmi_audio.c into this file later */
++
++#ifdef MDFLD_HDCP
++#include "mdfld_intel_hdcp.c"
++#include "mdfld_hdmi_audio.c"
++#endif /* MDFLD_HDCP */
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_intel_hdmi.h
+@@ -0,0 +1,883 @@
++/*
++ * Copyright © 2006-2007 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ * Chunfeng Zhao <chunfeng.zhao@intel.com>
++ */
++
++#ifndef __PSB_INTEL_HDMI_H__
++#define __PSB_INTEL_HDMI_H__
++
++/*
++ * HDMI Parameters
++ */
++
++/* GUID HDMI Parameters */
++#if 0
++//#ifdef DEFINE_GUID
++//#undef DEFINE_GUID
++#define DEFINE_GUID(n,l,w1,w2,b1,b2,b3,b4,b5,b6,b7,b8) const GUID n GUID_SECT = {l,w1,w2,{b1,b2,b3,b4,b5,b6,b7,b8}}
++//#endif /* DEFINE_GUID */
++
++#define HDMI_PARAMETERS_GUID "{6FD3BE0E-80F9-4206-86B7-3714FA439634}"
++DEFINE_GUID(GUID_HDMI_PARAMETERS, 0x6fd3be0e, 0x80f9, 0x4206, 0x86, 0xb7, 0x37, 0x14, 0xfa, 0x43, 0x96, 0x34);
++
++#define AVI_INFOFRAME_GUID "{DFCB113B-E54F-49A2-B5E3-78D0C6B4F4CB}"
++DEFINE_GUID(GUID_AVI_INFOFRAME, 0xdfcb113b, 0xe54f, 0x49a2, 0xb5, 0xe3, 0x78, 0xd0, 0xc6, 0xb4, 0xf4, 0xcb);
++#endif
++
++#define HDMI_DEVICE_NAME "ABC_VEND"
++#define HDMI_DEVICE_DESC "XZ05 PC VIDEO"
++
++#define HDMI_MAX_PIXEL_REPETITION 0x04 // On Cantiga only upto 4X pixel repetition is supported
++#define HDMI_HBR_AUDIO_SAMPLE_RATE 192000 // 192kHz is the sample rate corresponding to the HBR audio formats
++#define HDMI_AUDIO_CLOCK_PACKET_RATE 1500 // Audio clock packet rate of 1.5kHz has to be considered while calculating audio BW
++
++#define HDMI_BAR_INFO_LENGTH 8 // 8 bytes of barinfo
++
++// BaseLineDataLength.
++// Total size is in multiple of 4 bytes. i.e, 80/4 = 20
++#define HDMI_EELD_BASELINE_DATA_LENGTH 0x14
++
++// Header = 4, Baseline Data = 80 and Vendor (INTEL) specific = 2 as per EELD spec
++// 4 + 80 + = 84
++#define HDMI_EELD_SIZE 84
++
++//
++// HDMI command types
++//
++typedef enum
++{
++ HDMI_COMMAND_GET,
++ HDMI_COMMAND_SET
++} hdmi_command_t;
++
++#define HDMI_AVI_FLAG_ITCONTENT 0x00800000
++#define HDMI_AVI_FLAG_RGB_QUANT_RANGE 0x00040000
++#define HDMI_AVI_FLAG_SCAN_INFO 0x00000001
++#define HDMI_AVI_FLAG_BAR_INFO 0x00000010
++//
++// CEA-861b definitions
++//
++#define HDMI_CEA_VERSION 0x00
++#define HDMI_ELD_VERSION 0x01
++#define HDMI_EELD_VERSION 0x02
++#define HDMI_BASE_ELD_SIZE 0x0E
++#define HDMI_CEA_EDID_HEADER_SIZE 0x04
++#define HDMI_EELD_CEA_EDID_VERSION 0x03
++
++//
++//Basic Audio support definitions
++//
++
++#define HDMI_BASIC_AUDIO_SUPPORTED 0x40
++#define HDMI_CEA_EXTENSION_BLOCK_BYTE_3 3
++#define HDMI_FL_AND_FR_SPEAKERS_CONNECTED 0x1
++
++//
++// HDMI buffer/information types
++//
++typedef enum {
++ // Non-standard or non-HDMI type
++ HDMI_ELD_TYPE = 0x00, // ELD buffer type
++ HDMI_EELD_TYPE = 0x01, // EELD buffer type
++
++ // Per HDMI Spec, refer Table 2-1 in HDMI EDS
++ // or Table 5-8 in HDMI spec
++ HDMI_VS_TYPE = 0x81, // Vendor-Specific InfoFrame type
++ HDMI_AVI_TYPE = 0x82, // AVI InfoFrame type
++ HDMI_SPD_TYPE = 0x83, // SPD InfoFrame type
++ HDMI_AUDIO_TYPE = 0x84, // Audio InfoFrame type
++ HDMI_MS_TYPE = 0x85, // MPEG Source InfoFrame type
++
++ // Non-standard or non-HDMI types
++ HDMI_PR_PE_TYPE = 0x86, // Pixel Replication & Pixel Encoding(colorimetry) type
++ HDMI_AUDIO_CAPS_TYPE = 0x87, // Encoder Audio Capabilities type
++ HDMI_AUDIO_ENABLE_FLAGS_TYPE = 0x88 // Flags for enabling / disabling audio
++} hdmi_info_type_t;
++
++//
++// InfoFrame Version Information
++//
++typedef enum {
++ HDMI_VS_VERSION = 1, // Vendor-Specific InfoFrame Version 1
++ HDMI_AVI_VERSION = 1, // AVI InfoFrame Version 1
++ HDMI_AVI_VERSION2 = 2, // AVI InfoFrame Version 2
++ HDMI_SPD_VERSION = 1, // SPD InfoFrame Version 1
++ HDMI_AUDIO_VERSION = 1, // Audio InfoFrame Version 1
++ HDMI_MS_VERSION = 1 // MPEG Source InfoFrame Version 1
++} infoframe_version_t;
++
++//
++// InfoFrame Payload Length in bytes
++//
++typedef enum {
++ HDMI_VS_MAX_LENGTH = 27, // Vendor-Specific InfoFrame Payload Length, including IEEE reg ID
++ HDMI_AVI_LENGTH = 13, // AVI InfoFrame Payload Length
++ HDMI_SPD_LENGTH = 25, // SPD InfoFrame Payload Length
++ HDMI_AUDIO_LENGTH = 10, // Audio InfoFrame Payload Length
++ HDMI_MS_LENGTH = 10, // MPEG Source InfoFrame Payload Length
++ HDMI_PR_PE_LENGTH = 4, // Length of PR_PE_TYPE
++ HDMI_AUDIO_CAPS_LENGTH = 4 // Length of AUDIO_CAPS_TYPE
++} infoframe_length_t;
++
++//
++// InfoFrame TOTAL Length in bytes (includes header + payload)
++//
++typedef enum {
++ HDMI_VS_MAX_TOTAL_LENGTH = HDMI_VS_MAX_LENGTH + 4, // Max Total size of Vendor-Specific InfoFrame
++ HDMI_AVI_TOTAL_LENGTH = HDMI_AVI_LENGTH + 4, // Total size of AVI InfoFrame
++ HDMI_SPD_TOTAL_LENGTH = HDMI_SPD_LENGTH + 4, // Total size of SPD InfoFrame
++ HDMI_AUDIO_TOTAL_LENGTH = HDMI_AUDIO_LENGTH + 4, // Total size of Audio InfoFrame
++ HDMI_MS_TOTAL_LENGTH = HDMI_MS_LENGTH + 4, // Total size of MPEG Source InfoFrame
++} infoframe_total_length_t;
++
++
++//
++// Pixel Replication multipliers
++//
++typedef enum {
++ HDMI_PR_ONE = 0, // No repetition (ie., pixel sent once)
++ HDMI_PR_TWO, // Pixel sent 2 times (ie.,repeated once)
++ HDMI_PR_THREE, // Pixel sent 3 times
++ HDMI_PR_FOUR, // Pixel sent 4 times
++ HDMI_PR_FIVE, // Pixel sent 5 times
++ HDMI_PR_SIX, // Pixel sent 6 times
++ HDMI_PR_SEVEN, // Pixel sent 7 times
++ HDMI_PR_EIGHT, // Pixel sent 8 times
++ HDMI_PR_NINE, // Pixel sent 9 times
++ HDMI_PR_TEN // Pixel sent 10 times
++} hdmi_pixel_replication_t;
++
++//
++// Pixel encoding modes
++//
++//typedef typedef enum {
++ // HDMI_RGB256 = 0x01,
++ // HDMI_RGB220 = 0x02,
++ // HDMI_YCrCb422 = 0x04,
++ // HDMI_YCrCb444 = 0x08
++//}HDMI_COLORIMETRY;
++
++//
++// Pixel encoding modes
++//
++typedef enum {
++ HDMI_COLORIMETRY_RGB256 = 0x01,
++ HDMI_COLORIMETRY_RGB220 = 0x02,
++ HDMI_COLORIMETRY_YCrCb422 = 0x04,
++ HDMI_COLORIMETRY_YCrCb444 = 0x08
++} hdmi_colorimetry_t;
++
++//
++// AVI InfoFrame definitions - start
++//
++// Scan Info
++typedef enum {
++ HDMI_AVI_SCAN_NODATA = 0, // No data
++ HDMI_AVI_SCAN_OVERSCAN = 1, // Overscanned (TV)
++ HDMI_AVI_SCAN_UNDERSCAN = 2, // Underscanned (Computer)
++ HDMI_AVI_SCAN_FUTURE = 3 // Future
++} avi_scan_info_t;
++
++// Bar Info
++typedef enum {
++ HDMI_AVI_BAR_INVALID = 0, // Bar data not valid
++ HDMI_AVI_BAR_VALID_VERTICAL = 1, // Vertical Bar data valid
++ HDMI_AVI_BAR_VALID_HORIZONTAL= 2, // Horizontal Bar data valid
++ HDMI_AVI_BAR_VALID_BOTH = 3 // Vertical & Horizontal Bar data valid
++} avi_bar_info_t;
++
++// Active Format Information
++typedef enum {
++ HDMI_AVI_AFI_INVALID = 0, // No data
++ HDMI_AVI_AFI_VALID = 1 // Active Format Information valid
++} avi_fi_info_t;
++
++// AVI Pixel Encoding modes
++typedef enum {
++ HDMI_AVI_RGB_MODE = 0, // RGB pixel encoding mode
++ HDMI_AVI_YCRCB422_MODE = 1, // YCrCb 4:2:2 mode
++ HDMI_AVI_YCRCB444_MODE = 2, // YCrCb 4:4:4 mode
++ HDMI_AVI_FUTURE_MODE = 3 // Future mode
++} avi_encoding_mode_t;
++
++// AVI Active Format Aspect Ratio
++typedef enum {
++ HDMI_AVI_AFAR_SAME = 8, // same as picture aspect ratio
++ HDMI_AVI_AFAR_4_3 = 9, // 4:3 center
++ HDMI_AVI_AFAR_16_9 = 10, // 16:9 center
++ HDMI_AVI_AFAR_14_9 = 11 // 14:9 center
++} avi_afar_info_t;
++
++// AVI Picture Aspect Ratio
++typedef enum {
++ HDMI_AVI_PAR_NODATA = 0, // No Data
++ HDMI_AVI_PAR_4_3 = 1, // 4:3
++ HDMI_AVI_PAR_16_9 = 2, // 16:9
++ HDMI_AVI_PAR_FUTURE = 3 // Future
++} avi_par_info_t;
++
++// AVI Colorimetry Information
++typedef enum {
++ HDMI_AVI_COLOR_NODATA = 0, // No data
++ HDMI_AVI_COLOR_ITU601 = 1, // SMPTE 170M, ITU601
++ HDMI_AVI_COLOR_ITU709 = 2, // ITU709
++ HDMI_AVI_COLOR_FUTURE = 3 // Future
++} avi_color_info_t;
++
++// AVI Non-uniform Picture Scaling Info
++typedef enum {
++ HDMI_AVI_SCALING_NODATA = 0, // No scaling
++ HDMI_AVI_SCALING_HORIZONTAL = 1, // horizontal scaling
++ HDMI_AVI_SCALING_VERTICAL = 2, // vertical scaling
++ HDMI_AVI_SCALING_BOTH = 3 // horizontal & vertical scaling
++} avi_scaling_infp_t;
++
++// AVI RGB Quantization Range
++typedef enum {
++ HDMI_AVI_RGBQUANT_DEFAULT = 0, // Default value
++ HDMI_AVI_RGBQUANT_LIMITED = 1, // Limited Range
++ HDMI_AVI_RGBQUANT_FULL = 2, // Full Range
++ HDMI_AVI_RGBQUANT_FUTURE = 3 // Future use
++} avi_rgbquant_range_t;
++
++// AVI IT Content
++typedef enum {
++ HDMI_AVI_ITC_NODATA = 0, // No Data
++ HDMI_AVI_ITC_ITCONTENT = 1 //IT Content
++} avi_it_content_t;
++
++//
++// AVI InfoFrame definitions - end
++//
++
++//
++// SPD InfoFrame definitions - start
++//
++// SPD InfoFrame Data Byte 25, refer Table-17 in CEA-861b
++typedef enum {
++ HDMI_SPD_SRC_UNKNOWN = 0x00, // unknown
++ HDMI_SPD_SRC_DIGITAL_STB = 0x01, // Digital STB
++ HDMI_SPD_SRC_DVD = 0x02, // DVD
++ HDMI_SPD_SRC_DVHS = 0x03, // D-VHS
++ HDMI_SPD_SRC_HDD_VIDEO = 0x04, // HDD Video
++ HDMI_SPD_SRC_DVC = 0x05, // DVC
++ HDMI_SPD_SRC_DSC = 0x06, // DSC
++ HDMI_SPD_SRC_VCD = 0x07, // Video CD
++ HDMI_SPD_SRC_GAME = 0x08, // Game
++ HDMI_SPD_SRC_PC = 0x09 // PC General
++} spd_src_type_t;
++
++// SPD InfoFrame Vendor Name & Descriptor Length in bytes
++typedef enum {
++ HDMI_SPD_VNAME_LENGTH = 8, // SPD Vendor Name Length in bytes
++ HDMI_SPD_VDESC_LENGTH = 16, // SPD Vendor Descriptor Length in bytes
++} spd_namedesc_length_info_t;
++
++//
++// SPD InfoFrame definitions - end
++//
++
++//
++// InfoFrame Packet Header - generic
++//
++typedef struct _if_header {
++ uint8_t type; // InfoFrame Type
++ uint8_t version; // InfoFrame Version
++ uint8_t length; // InfoFrame Length
++ uint8_t chksum; // Checksum of the InfoFrame
++} if_header_t;
++
++//
++// AVI InfoFrame structure
++//
++typedef union _avi_if {
++ uint8_t avi_buf[HDMI_AVI_TOTAL_LENGTH];
++ #pragma pack(1)
++ struct
++ {
++ if_header_t avi_if_header; // AVI header data
++ union
++ {
++ uint8_t byte1;
++ struct
++ {
++ uint8_t scan_info:2; // scan information
++ uint8_t bar_info :2; // bar information
++ uint8_t format :1; // active format information
++ uint8_t enc_mode :2; // pixel encoding (RGB or YCrCb)
++ uint8_t b1rsvd :1; // reserved
++ };
++ };
++ union
++ {
++ uint8_t byte2;
++ struct
++ {
++ uint8_t afar :4; // Active Format Aspect Ratio
++ uint8_t par :2; // Picture Aspect Ratio
++ uint8_t colorimetry :2; // colorimetry
++ };
++ };
++ union
++ {
++ uint8_t byte3;
++ struct
++ {
++ uint8_t scaling_info :2; // Scaling information
++ uint8_t rgbquant_range :2; // RGB Quantization Range
++ uint8_t ext_colorimetry :3; //Extended Colorimetry
++ uint8_t it_content :1; //IT Content
++ };
++ };
++ union
++ {
++ uint8_t byte4;
++ struct
++ {
++ uint8_t vic :7; // Video Identification code (refer Table 13 in CEA-861b)
++ uint8_t b4rsvd :1; // reserved
++ };
++ };
++ union
++ {
++ uint8_t byte5;
++ struct
++ {
++ uint8_t pr :4; // pixel repetition (refer Table 15 in CEA-861b)
++ uint8_t b5rsvd :4; // reserved
++ };
++ };
++ uint8_t byte6; // end of top bar(lower), set to "00"
++ uint8_t byte7; // end of top bar(upper), set to "00"
++ uint8_t byte8; // start of bottom bar(lower), set to "00"
++ uint8_t byte9; // start of bottom bar(upper), set to "00"
++ uint8_t byte10; // end of left bar(lower), set to "00"
++ uint8_t byte11; // end of left bar(upper), set to "00"
++ uint8_t byte12; // start of right bar(lower), set to "00"
++ uint8_t byte13; // start of right bar(upper), set to "00"
++ };
++ #pragma pack()
++} avi_if_t;
++
++//
++// SPD InfoFrame structure
++//
++typedef union _spd_if {
++ uint8_t spd_buf[HDMI_SPD_TOTAL_LENGTH];
++ #pragma pack(1)
++ struct
++ {
++ if_header_t spd_if_header; // SPD header data
++ uint8_t name[8]; // Vendor Name, 8 characters
++ uint8_t desc[16]; // Product Description, 16 characters
++ uint8_t sdi; // Source Device Information
++ };
++ #pragma pack()
++} spd_if_t;
++
++//
++// Vendor Specific InfoFrame structure
++//
++typedef union _vs_if
++{
++ uint8_t vs_buf[HDMI_VS_MAX_TOTAL_LENGTH];
++ #pragma pack(1)
++ struct
++ {
++ if_header_t vs_if_header; // VS header data
++ uint8_t ieee_reg_id[3]; // 3-byte IEEE registration ID
++ uint8_t pay_load[24]; // Payload bytes
++ };
++ #pragma pack()
++} vs_if_t;
++
++//
++// AVI Infoframe structure for customization
++//
++
++typedef struct _avi_infoframe_custom {
++ //GUID guid; // GUID
++ int32_t command; // Command
++ int32_t flags; // Flags
++ uint32_t type_code; // Type code of AVI Infoframe
++ uint32_t version; // Version of AVI Infoframe
++ uint32_t length; // Length of AVI Info Frame
++ uint8_t r3r0_valid; // Reserved
++ uint8_t it_content; // IT Content
++ uint8_t bar_info[8]; // Reserved
++ int32_t active_format_aspect_ratio;// Reserved
++ int32_t non_uniform_scaling; // Reserved
++ int32_t rgb_ycc_indicator; // Reserved
++ int32_t ext_colorimetry; // Reserved
++ int32_t pixel_factor; // Reserved
++ int32_t bar_info_valid; // Reserved
++ int32_t colorimetry; // Reserved
++ int32_t aspect_ratio; // Reserved
++ int32_t quant_range; // Quantization Range
++ int32_t video_code; // Reserved
++ int32_t scan_info; // Scan Information
++} avi_infoframe_custom_t;
++
++
++//
++// LinearPCM Consolidated Audio Data(CAD) structure
++//
++typedef union _lpcm_cad {
++ uint8_t value;
++ struct {
++ uint8_t maxch_cp_on :3; // Max channels-1 supported with CP turned ON
++ uint8_t maxch_cp_off :3; // Max channels-1 supported with CP turned OFF
++ uint8_t sp_20bit :1; // 20-bit sample support
++ uint8_t sp_24bit :1; // 24-bit sample support
++ };
++} lpcm_cad_t;
++
++//
++// CEA Short Audio Descriptor
++//
++typedef struct _cea_861b_adb {
++#pragma pack(1)
++ union
++ {
++ uint8_t byte1;
++ struct
++ {
++ uint8_t max_channels :3; // Bits[0-2]
++ uint8_t audio_format_code :4; // Bits[3-6], see AUDIO_FORMAT_CODES
++ uint8_t b1reserved :1; // Bit[7] - reserved
++ };
++ };
++ union
++ {
++ uint8_t byte2;
++ struct
++ {
++ uint8_t sp_rate_32kHz :1; // Bit[0] sample rate = 32kHz
++ uint8_t sp_rate_44kHz :1; // Bit[1] sample rate = 44kHz
++ uint8_t sp_rate_48kHz :1; // Bit[2] sample rate = 48kHz
++ uint8_t sp_rate_88kHz :1; // Bit[3] sample rate = 88kHz
++ uint8_t sp_rate_96kHz :1; // Bit[4] sample rate = 96kHz
++ uint8_t sp_rate_176kHz :1; // Bit[5] sample rate = 176kHz
++ uint8_t sp_rate_192kHz :1; // Bit[6] sample rate = 192kHz
++ uint8_t sp_rate_b2reserved :1; // Bit[7] - reserved
++ };
++ };
++ union
++ {
++ uint8_t byte3; // maximum bit rate divided by 8kHz
++ // following is the format of 3rd byte for uncompressed(LPCM) audio
++ struct
++ {
++ uint8_t bit_rate_16bit :1; // Bit[0]
++ uint8_t bit_rate_20bit :1; // Bit[1]
++ uint8_t bit_rate_24bit :1; // Bit[2]
++ uint8_t bit_rate_b3reserved :5; // Bits[3-7]
++ };
++ };
++#pragma pack()
++}cea_861b_adb_t;
++
++//
++// Enhanced EDID Like Data aka EELD structure
++//
++typedef union _hdmi_eeld {
++ uint8_t eeld[HDMI_EELD_SIZE];
++ #pragma pack(1)
++ struct
++ {
++ // Byte[0] = ELD Version Number
++ union
++ {
++ uint8_t byte0;
++ struct
++ {
++ uint8_t reserved:3; // Reserf
++ uint8_t eld_ver:5; // ELD Version Number
++ // 00000b - reserved
++ // 00001b - first rev
++ // 00010b:11111b - reserved for future
++ };
++ };
++
++ // Byte[1] = Vendor Version Field
++ union
++ {
++ uint8_t vendor_version;
++ struct
++ {
++ uint8_t reserved1:3;
++ uint8_t veld_ver:5; // Version number of the ELD extension.
++ // This value is provisioned and unique to each vendor.
++ };
++ };
++
++ // Byte[2] = Baseline Lenght field
++ uint8_t baseline_eld_length; // Length of the Baseline structure divided by Four.
++
++ // Byte [3] = Reserved for future use
++ uint8_t byte3;
++
++ // Starting of the BaseLine EELD structure
++ // Byte[4] = Monitor Name Length
++ union
++ {
++ uint8_t byte4;
++ struct
++ {
++ uint8_t mnl:5;
++ uint8_t cea_edid_rev_id:3;
++ };
++ };
++
++ // Byte[5] = Capabilities
++ union
++ {
++ uint8_t capabilities;
++ struct
++ {
++ uint8_t hdcp:1; // Indicates HDCP support
++ uint8_t ai_support:1; // Inidcates AI support
++ uint8_t connection_type:2; // Indicates Connection type
++ // 00 - HDMI
++ // 01 - DP
++ // 10 -11 Reserved for future connection types
++ uint8_t sadc:4; // Indicates number of 3 bytes Short Audio Descriptors.
++ };
++ };
++
++ // Byte[6] = Audio Synch Delay
++ uint8_t audio_synch_delay; // Amount of time reported by the sink that the video trails audio in milliseconds.
++
++ // Byte[7] = Speaker Allocation Block
++ union
++ {
++ uint8_t speaker_allocation_block;
++ struct
++ {
++ uint8_t flr:1; // Front Left and Right channels
++ uint8_t lfe:1; // Low Frequency Effect channel
++ uint8_t fc:1; // Center transmission channel
++ uint8_t rlr:1; // Rear Left and Right channels
++ uint8_t rc:1; // Rear Center channel
++ uint8_t flrc:1; // Front left and Right of Center transmission channels
++ uint8_t rlrc:1; // Rear left and Right of Center transmission channels
++ uint8_t reserved3:1; // Reserved
++ };
++ };
++
++ // Byte[8 - 15] - 8 Byte port identification value
++ uint8_t port_id_value[8];
++
++ // Byte[16 - 17] - 2 Byte Manufacturer ID
++ uint8_t manufacturer_id[2];
++
++ // Byte[18 - 19] - 2 Byte Product ID
++ uint8_t product_id[2];
++
++ // Byte [20-83] - 64 Bytes of BaseLine Data
++ uint8_t mn_sand_sads[64]; // This will include
++ // - ASCII string of Monitor name
++ // - List of 3 byte SADs
++ // - Zero padding
++
++ // Vendor ELD Block should continue here!
++ // No Vendor ELD block defined as of now.
++ };
++ #pragma pack()
++} hdmi_eeld_t;
++
++//
++// Data structure for misc HDMI data
++//
++typedef struct _misc_hdmi_data {
++ int32_t colorimetry :4; //
++ int32_t pr :4; // pixel repetition value
++ int32_t reserved :24;// reserved bits
++} misc_hdmi_data_t;
++
++//
++// Audio capability structure
++//
++typedef struct _device_audio_caps {
++ int32_t npl_design :8; // max number of audio packets device can
++ // deliver per line
++ int32_t k0 :8; // The overhead(in pixels) per line requied
++ // by device for setting up audio packets when
++ // CP is disabled
++ int32_t k1 :8; // The overhead(in pixels) per line requied
++ // by device for setting up audio packets when
++ // CP is enabled
++ // Misc data
++ int32_t pr :4; // Pixel Replication value
++ int32_t is_hdcp :1; // Driver, Device and Receiver support HDCP
++ int32_t is_rptr :1; // Receiver is HDCP repeater
++ int32_t reserved :2; // reserved bits
++} device_audio_caps_t;
++
++typedef struct _audio_enable_flags {
++ int32_t is_hdmi_display :1; //1 if HDMI display, 0 if not HDMI display
++ int32_t is_eld_valid :1; //1 if ELD valid, 0 if ELD not valid
++ int32_t reserved1 :30;
++} audio_enable_flags_t;
++
++//
++// Data structure to exchange HDMI data through GetSetParameters interface
++//
++typedef struct _hdmi_parameters {
++ //GUID Guid;
++ hdmi_command_t command;
++ uint8_t type;
++ uint8_t size;
++ union {
++ hdmi_eeld_t eeld_buffer;
++ avi_if_t avi_infoframe;
++ spd_if_t spd_infoframe;
++ vs_if_t vs_infoframe;
++ union {
++ int32_t gen_data;
++ device_audio_caps_t audio_caps;
++ misc_hdmi_data_t misc_data;
++ audio_enable_flags_t fl_audio_enable_flags;
++ };
++ };
++} hdmi_parameters_t;
++
++//
++// Audio format codes
++//
++typedef enum {
++ AUDIO_LPCM = 0x0001, // Linear PCM (eg. IEC60958)
++ AUDIO_AC3 = 0x0002, // AC-3
++ AUDIO_MPEG1 = 0x0003, // MPEG1 (Layers 1 & 2)
++ AUDIO_MP3 = 0x0004, // MP3 (MPEG1 Layer 3)
++ AUDIO_MPEG2 = 0x0005, // MPEG2 (multichannel)
++ AUDIO_AAC = 0x0006, // AAC
++ AUDIO_DTS = 0x0007, // DTS
++ AUDIO_ATRAC = 0x0008, // ATRAC
++ AUDIO_OBA = 0x0009, // One Bit Audio
++ AUDIO_DOLBY_DIGITAL = 0x000A, // Dolby Digital
++ AUDIO_DTS_HD = 0x000B, // DTS-HD
++ AUDIO_MAT = 0x000C, // MAT (MLP)
++ AUDIO_DST = 0x000D, // DST
++ AUDIO_WMA_PRO = 0x000E // WMA Pro
++} audio_format_codes_t;
++
++//
++// Data structure for byte #6 to 8 which has fixed definition
++//
++typedef struct _vsdb_char6_to_char8
++{
++ #pragma pack(1)
++
++ union
++ {
++ uint8_t byte1;
++ struct
++ {
++ uint8_t dvi_dual :1; // Bit[0]
++ uint8_t b1reserved :2; // Bits[1-2]
++ uint8_t dcy444 :1; // Bit[3] YCBCR 4:4:4 in Deep Color modes.
++ uint8_t dc30bit :1; //Bit[4]
++ uint8_t dc36bit :1; //Bit[5]
++ uint8_t dc48bit :1; //Bit[6]
++ uint8_t supports_ai :1; // Bit[7]
++ };
++ };
++
++ uint8_t max_tmds_clock;
++
++ union
++ {
++ uint8_t byte3;
++ struct
++ {
++ uint8_t b3reserved :6; // Bit[0-5] reserved
++ uint8_t i_latency_field_present :1;// Bit[6]
++ uint8_t latency_field_present :1;// Bits[7]
++ };
++ };
++
++ #pragma pack()
++} vsdb_byte6_to_byte8_t;
++
++
++//
++// Gamut metadata structure
++//
++// Note : The data is written in big endian format
++#if 0
++// GUID for calling GBD interface
++// {EEE24BDF-6D30-40bf-9BA2-139F0FFFC797}
++#define DXVA_HDMI13_GBD_P0_GUID "{EEE24BDF-6D30-40BF-9BA2-139F0FFFC797}"
++DEFINE_GUID(GUID_DXVA_HDMI13_GBD_P0, 0xeee24bdf, 0x6d30, 0x40bf, 0x9b, 0xa2, 0x13, 0x9f, 0xf, 0xff, 0xc7, 0x97);
++#endif
++
++#define HDMI_GBD_PKT_TYPE 0x0A
++#define HDMI_GBD_P0_DATA_SIZE 27
++#define HDMI_MAX_VERTICES_DATA 25
++#define HDMI_MAX_FACET_DATA 25
++
++typedef enum {
++ VERTICES_AND_FACETS = 0,
++ RGB_MIN_MAX_RANGE = 1
++} gbd_format_flag_t;
++
++typedef enum {
++ GBD_8BIT_PRECISION = 0,
++ GBD_10BIT_PRECISION = 1,
++ GBD_12BIT_PRECISION = 2
++} gbd_color_precision_t;
++
++typedef enum {
++ RGB_BT709 = 0,
++ XVY_CC601 = 1,
++ XVY_CC709 = 2,
++ RESERVED_COLORSPACE
++} gbd_color_space_t;
++
++typedef enum {
++ MIN_RED_INDEX = 0,
++ MAX_RED_INDEX = 1,
++ MIN_GREEN_INDEX = 2,
++ MAX_GREEN_INDEX = 3,
++ MIN_BLUE_INDEX = 4,
++ MAX_BLUE_INDEX = 5,
++ MAX_RANGE_DATA_INDEX_LIMIT = 6
++} gbd_rgb_range_data_index_t;
++
++//
++// App needs to feel the data in this structure
++//
++typedef struct _gbd_p0_hdmi_1_3 {
++ uint8_t enable; // Enable/Disable GBD profile sending
++ gbd_format_flag_t format_flag; // uses GBD_FORMAT_FLAG_EN, this defines the gamut data format
++ gbd_color_precision_t color_precision; // uses GBD_COLOR_PRECISION, this is the bit precision of GBD vertex and range data
++ gbd_color_space_t color_space; // uses GBD_COLOR_SPACE_EN, this defines the color space being represented
++
++ union
++ {
++ // If bFormatFlag is 0
++ struct {
++ uint8_t facet_mode; // spec supports 0 alone right now
++ uint16_t num_vertices; // Number of vertices
++ uint16_t num_facets; // Number of faces
++
++ // For 4 vertices of 12bits size is 18
++ // Max possible with 0 facets and 28 bytes of GBD is 28-5=23 bytes
++ uint16_t vertices_data[HDMI_MAX_VERTICES_DATA]; // Vertices data representation
++ uint16_t facets_data[HDMI_MAX_FACET_DATA]; // kept it as input data but to be defined based on future spec
++ } vertices_facets_data;
++
++
++ // If eFormatFlag is 1
++ struct {
++ uint16_t rgb_primary_data[MAX_RANGE_DATA_INDEX_LIMIT];
++ } rgb_range_data;
++ };
++
++} gbd_p0_hdmi_1_3_t;
++
++#define HDMI_GBD_MAX_SEQ_NUM_INDEX 16
++
++// various GBD profiles
++typedef enum {
++ P0_PROFILE = 0,
++ P1_PROFILE = 1,
++ P2_PROFILE = 2,
++ P3_PROFILE = 3,
++ INVALID_PROFILE
++} gbd_profile_type_t;
++
++// various packet transmission options
++typedef enum {
++ INTERMEDIATE_PKT_IN_SEQ = 0,
++ FIRST_PKT_IN_SEQ = 1,
++ LAST_PKT_IN_SEQ = 2,
++ ONLY_PKT_IN_SEQ = 3
++} gbd_pkt_seq_t;
++
++//
++// Packet header defn as per HDMI spec
++//
++typedef struct _gamut_pkt_header {
++ uint8_t pkt_type; // Defines the pkt type
++ union{
++ uint8_t field_byte;
++ struct{
++ uint8_t affected_gamut_info :4; // BIT 3:0
++ uint8_t gbd_profile :3; // BIT 6:4 ; uses GBD_PROFILE_TYPE_EN
++ uint8_t next_field :1; // BIT7
++ };
++ };
++
++ union{
++ uint8_t gbd_seq_info;
++ struct{
++ uint8_t current_gamut_info :4; // BIT 3:0
++ uint8_t packet_seq :2; // BIT 5:4 ; use GBD_PKT_SEQ_EN
++ uint8_t reserved2 :1; // BIT 6
++ uint8_t no_current_gbd :1; // BIT 7
++ };
++ };
++} gamut_pkt_header_t;
++
++//
++// Gamut structure contains data in following format
++//
++typedef struct _gamut_metadata_struct {
++ #pragma pack(1)
++ gamut_pkt_header_t pkt_hdr; // Gamut Metadata header data
++ union
++ {
++ uint8_t byte1;
++ struct
++ {
++ uint8_t gbd_color_space :3;
++ // Note: GBD buffer is formatted based upon the color precision
++ // 8 bit precision : 1 sign bit, 2 bits of integer, 5 bits of fraction
++ // 10 bit precision : 1 sign bit, 2 bits of integer, 7 bits of fraction
++ // 12 bit precision : 1 sign bit, 2 bits of integer, 9 bits of fraction
++ uint8_t gbd_color_precision :2;
++ uint8_t reserved3 :1;
++ uint8_t facet_mode :1; // 0 - No facet info in GBD; 1 - Facet info in GBD
++ uint8_t format_flag :1; // uses GBD_FORMAT_FLAG_EN
++ };
++ };
++
++ // For P0 profile below is the syntax in which data will be filled
++ // If Format is YUV
++ // char 2 : Higher 8 bits of number of vertices
++ // char 3 : Lower 8 bits of number of vertices
++ // char 4 to VSIZE+2 : Vertex data of size VSIZE,
++ // where VSIZE = 3*number of vertices*GBD color precision/8 + 0.99999
++ // char VSIZE+3: Higher 8 bits of number of facets
++ // char VSIZE+4: Lower 8 bits of number of facets
++ // char VSIZE+5 to VSIZE+FSIZE+4 : Facet data
++ // where VSIZE = number of facet data
++ uint8_t gbd_data[HDMI_GBD_P0_DATA_SIZE]; // data will be filled
++
++ #pragma pack()
++} gamut_metadata_st_t;
++
++#endif //__IHDMI_H__
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_intel_hdmi_edid.h
+@@ -0,0 +1,1057 @@
++/*
++ * Copyright © 2006-2007 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *
++ */
++/* chunfeng.zhao@intel.com
++ */
++#ifndef PSB_INTEL_HDMI_EDID_H
++#define PSB_INTEL_HDMI_EDID_H
++
++//#include "..\\..\\Common\\Platform.h"
++
++////////////////////////////////////////////
++//
++// Max number of EDID extensions possible
++//
++////////////////////////////////////////////
++#define MAX_EDID_EXTENSIONS 254 //Max EDID blocks minus Block 0
++#define NUM_BASEEDID_STANDARD_TIMING 8
++#define MAX_BASEEDID_DTD_BLOCKS 4
++
++#define MAX_VIC_DEFINED 128
++
++// New Macros for supporting EDID 1.4
++
++// Macros for EDID Revision and Version
++#define EDID_VERSION_1 0x01
++#define EDID_REVISION_4 0x04
++
++// Macros for CVT and GTF related support in Monitor descriptor
++#define EDID14_CVT_TIMING_SUPPORTED 0x04
++#define EDID14_DEFAULT_GTF_SUPPORTED 0x00
++#define EDID14_SECONDARY_GTF_SUPPORTED 0x02
++
++// Macros for display device data block in CEA.
++#define EDID14_DISPLAY_DEVICE_DATA_TAG 0xFF
++#define EDID14_DISPLAY_DEVICE_DATA_CHILD_TAG 0x02
++#define EDID14_DISPLAY_DEVICE_DATA_LENGTH 0x20
++#define EDID14_DISPLAY_PORT_INTERFACE 0x09
++
++// Macros indicating digital interfaces supported by the display.
++#define EDID14_DVI_SUPPORTED 0x01
++#define EDID14_DISPLAY_PORT_SUPPORTED 0x05
++#define EDID14_HDMI_A_SUPPORTED 0x02
++#define EDID14_HDMI_B_SUPPORTED 0x03
++
++#define EDID14_MAX_MONITOR_DESCRIPTORS 0x03
++
++// Macros related to EDID 1.4 Color Bit Depth support
++#define EDID14_COLOR_BIT_DEPTH_UNDEFINED 0x00
++#define EDID14_SIX_BITS_PER_PRIMARY_COLOR 0x06
++#define EDID14_EIGHT_BITS_PER_PRIMARY_COLOR 0x08
++#define EDID14_TEN_BITS_PER_PRIMARY_COLOR 0x0A
++#define EDID14_TWELVE_BITS_PER_PRIMARY_COLOR 0x0C
++#define EDID14_FOURTEEN_BITS_PER_PRIMARY_COLOR 0x0E
++#define EDID14_SIXTEEN_BITS_PER_PRIMARY_COLOR 0x10
++#define EDID14_INVALID_COLOR_BIT_DEPTH 0x07
++
++// Macro for showing Color Bit Depth support for existing displays
++#define EDID_EIGHT_BITS_PER_PRIMARY_COLOR 0x08
++
++// Macro for Established Timings III Block descriptor
++#define EST_TIMINGS_III_BLOCK_TAG 0xF7
++#define EST_TIMINGS_III_BLOCK_DATA_LENGTH 0x06
++
++// Macro for indicating byte length
++#define BYTE_LENGTH 0x08
++
++////////////////////////////////////////////
++//
++// Max number of EDID Blocks
++//
++////////////////////////////////////////////
++#define MAX_EDID_BLOCKS 255 //According to E-EDID Standard doc.
++#define EDID_BLOCK_SIZE 128
++
++// Macros for EDID Revision and Version for EDID 1.3
++#define EDID_VERSION_1_3 0x01
++#define EDID_REVISION_1_3 0x03
++
++////////////////////////////////////////////
++// Base EDID header
++////////////////////////////////////////////
++static const unsigned char BASEEDID_Header[8] = {0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00};
++
++// Display Range Limits Offset Flags.
++// Applicable only from EDID 1.4 onwards
++typedef union _edid_range_limits_flags {
++ uint8_t ucRangeLimitOffsetFlags; // Range Limits Offset Flags
++ struct {
++ uint8_t ucVerticalRateOffset : 2; // Vertical Rate Offset
++ uint8_t ucHorizontalRateOffset : 2; // Horizontal Rate Offset
++ uint8_t ucReserved : 4; // Reserved.
++ };
++} edid_range_limits_flags_t;
++
++////////////////////////////////////////////
++//
++// 18-byte DTD block
++// Refer Table 3.16, 3.17 & 3.18 of
++// EDID spec
++//
++////////////////////////////////////////////
++typedef struct _edid_dtd_timing {
++#pragma pack(1)
++
++ int16_t wPixelClock; // Pixel clock / 10000
++
++ uint8_t ucHA_low; // Lower 8 bits of H. active pixels
++ uint8_t ucHBL_low; // Lower 8 bits of H. blanking
++ union {
++ uint8_t ucHAHBL_high;
++ struct {
++ uint8_t ucHBL_high : 4; // Upper 4 bits of H. blanking
++ uint8_t ucHA_high : 4; // Upper 4 bits of H. active pixels
++ };
++ };
++
++ uint8_t ucVA_low; // Lower 8 bits of V. active lines
++ uint8_t ucVBL_low; // Lower 8 bits of V. blanking
++ union {
++ uint8_t ucVAVBL_high;
++ struct {
++ uint8_t ucVBL_high : 4; // Upper 4 bits of V. blanking
++ uint8_t ucVA_high : 4; // Upper 4 bits of V. active pixels
++ };
++ };
++
++ uint8_t ucHSO_low; // Lower 8 bits of H. sync offset
++ uint8_t ucHSPW_low; // Lower 8 bits of H. sync pulse width
++ union {
++ uint8_t ucVSOVSPW_low;
++ struct {
++ uint8_t ucVSPW_low : 4; // Lower 4 bits of V. sync pulse width
++ uint8_t ucVSO_low : 4; // Lower 4 bits of V. sync offset
++ };
++ };
++ union {
++ uint8_t ucHSVS_high;
++ struct {
++ uint8_t ucVSPW_high : 2; // Upper 2 bits of V. sync pulse width
++ uint8_t ucVSO_high : 2; // Upper 2 bits of V. sync offset
++ uint8_t ucHSPW_high : 2; // Upper 2 bits of H. sync pulse width
++ uint8_t ucHSO_high : 2; // Upper 2 bits of H. sync offset
++ };
++ };
++
++ uint8_t ucHIS_low; // Lower 8 bits of H. image size in mm
++ uint8_t ucVIS_low; // Lower 8 bits of V. image size in mm
++ union {
++ uint8_t ucHISVIS_high;
++ struct {
++ uint8_t ucVIS_high : 4; // Upper 4 bits of V. image size
++ uint8_t ucHIS_high : 4; // Upper 4 bits of H. image size
++ };
++ };
++
++ uint8_t ucHBorder; // H. border in pixels
++ uint8_t ucVBorder; // V. border in pixels
++
++ union {
++ uint8_t ucFlags; // Hsync & Vsync polarity, etc. flags
++ struct {
++ uint8_t ucStereo1 : 1; // Stereo definition with bit[6:5]
++ uint8_t ucHSync_Pol : 1; // Hsync polarity (0: Neg, 1: Pos)
++ uint8_t ucVSync_Pol : 1; // Vsync polarity (0: Neg, 1: Pos)
++ uint8_t ucSync_Conf : 2; // Sync configuration
++ // 00 : Analog composite
++ // 01 : Bipolar analog composite
++ // 00 : Digital composite
++ // 00 : Digital separate
++ uint8_t ucStereo2 : 2; // Stereo definition
++ // 00 : Normal display, no stereo
++ // xx : Stereo definition with bit0
++ uint8_t ucInterlaced : 1; // Interlaced / Non-interlaced
++ // 0 : Non-interlaced
++ // 1 : Interlaced
++ };
++ };
++
++#pragma pack()
++} edid_dtd_timing_t;
++
++
++////////////////////////////////////////////
++//
++// Standard timing identification
++// Refer Table 3.15 of EDID spec
++//
++////////////////////////////////////////////
++typedef union _edid_std_timing {
++ uint16_t usStdTiming;
++
++ struct {
++#pragma pack(1)
++ uint8_t ucHActive; // (HActive/8) - 31;
++ struct {
++ uint8_t ucRefreshRate : 6; // Refresh Rate - 60
++ uint8_t ucAspectRatio : 2; // Aspect ratio (HActive/VActive)
++ // 00: 1:1 Aspect ratio
++ // 01: 4:3 Aspect ratio
++ // 10: 5:4 Aspect ratio
++ // 11: 16:9 Aspect ratio
++ };
++ };
++#pragma pack()
++
++} edid_std_timing_t;
++////////////////////////////////////////////////////////
++// Aspect Ratio def's as per Edid 1.3 Standard Timings
++////////////////////////////////////////////////////////
++#define EDID_STD_ASPECT_RATIO_16_10 0x0
++#define EDID_STD_ASPECT_RATIO_4_3 0x1
++#define EDID_STD_ASPECT_RATIO_5_4 0x2
++#define EDID_STD_ASPECT_RATIO_16_9 0x3
++
++
++////////////////////////////////////////////
++//
++// Monitor range limits
++//
++////////////////////////////////////////////
++typedef struct _monitor_range_limits {
++#pragma pack(1)
++
++ uint8_t ucMin_vert_rate; //Min Vertical Rate,in Hz
++ uint8_t ucMax_vert_rate; //Max Vertical Rate, in Hz
++ uint8_t ucMin_horz_rate; //Min Horizontal Rate, in Hz
++ uint8_t ucMax_horz_rate; //Max Horizontal Rate, in Hz
++ uint8_t ucMax_pixel_clock; //Max Pixel Clock,Value/10 Mhz
++ uint8_t ucTiming_formula_support; //00 - No Secondary Timing Formula Supported
++ //02 - Secondary GTF Curve Supported
++ //In EDID 1.4, this may indicate CVT support as well
++ //If timing_formula_support is 02
++ uint8_t ucReserved; //00h
++ uint8_t ucStart_freq; //Horizontal Freq, Value/2, KHz
++ uint8_t ucByte_C; //C*2
++ uint8_t ucLSB_M; //LSB of M Value
++ uint8_t ucMSB_M; //MSB of M Value
++ uint8_t ucByte_K; //K Value
++ uint8_t ucByte_J; //J*2
++
++#pragma pack()
++} monitor_range_limits_t;
++
++////////////////////////////////////////////
++//
++// Color point
++//
++////////////////////////////////////////////
++typedef struct _color_point {
++#pragma pack(1)
++
++ uint8_t ucWhite_point_index_number_1;
++ uint8_t ucWhite_low_bits_1;
++ uint8_t ucWhite_x_1;
++ uint8_t ucWhite_y_1;
++ uint8_t ucWhite_gamma_1;
++ uint8_t ucWhite_point_index_number_2;
++ uint8_t ucWhite_low_bits_2;
++ uint8_t ucWhite_x_2;
++ uint8_t ucWhite_y_2;
++ uint8_t ucWhite_gamma_2;
++ uint8_t ucByte_15;
++ uint8_t ucByte_16_17[2];
++
++#pragma pack()
++} color_point_t;
++
++////////////////////////////////////////////
++//
++// Monitor description descriptor
++// Refer Table 3.19 & 3.20 of EDID spec
++//
++////////////////////////////////////////////
++#define BASEEDID_MONITORSN_MDDATATYPE 0xFF
++#define BASEEDID_ASCIISTRING_MDDATATYPE 0xFE
++#define BASEEDID_MONITORRANGELIMIT_MDDATATYPE 0xFD
++#define BASEEDID_MONITORNAME_MDDATATYPE 0xFC
++#define BASEEDID_COLORPOINT_MDDATATYPE 0xFB
++#define BASEEDID_STDTIMINGS_MDDATATYPE 0xFA
++
++// Structure definition for Established Timings III monitor block
++typedef struct _est_timings_iii_block {
++#pragma pack(1)
++ // The first byte will show the VESA DMTS Standard Version.
++ // The following six bytes will have the Timings Bit Mask.
++ // Right now only 6 bytes are used for this!!!
++ // Rest is reserved.
++ uint8_t ucVesaDMTVersion; //Byte 0 indicating the VESA DMT Version.
++ uint8_t ucTimingBitMask[6];// Next 6 bytes indicating the Timing Bit Mask Bytes used in Est Timing III.
++ uint8_t bReserved[6];//Next 6 bytes are reserved
++#pragma pack()
++} est_timings_iii_block_t;
++
++typedef struct _monitor_descriptor {
++#pragma pack(1)
++
++ int16_t wFlag; // = 0000 when block is used as descriptor
++ uint8_t ucFlag0; // Reserved
++
++ uint8_t ucDataTypeTag;
++
++ uint8_t ucFlag1; // 00 for descriptor
++
++ union {
++
++ // Monitor S/N (ucDataTypeTag = FF)
++ uint8_t ucMonitorSerialNumber[13];
++
++ // ASCII string (ucDataTypeTag = FE)
++ uint8_t ucASCIIString[13];
++
++ // Monitor range limit (ucDataTypeTag = FD)
++ monitor_range_limits_t MonitorRangeLimits;
++
++ // Monitor name (ucDataTypeTag = FC)
++ uint8_t ucMonitorName[13];
++
++ // Color point (ucDataTypeTag = FB)
++ color_point_t ColorPoint;
++
++ // ESTABLISHED TIMINGS III BLOCK = F7 (Added for EDID 1.4)
++ est_timings_iii_block_t stEstTimingsIIIBlock;
++
++ // Standard timings (ucDataTypeTag = FA)
++ struct {
++ edid_std_timing_t ExtraStdTiming[6];
++ uint8_t ucFixedValueOfA0; // Should be 0xA0
++ };
++
++ // Manufacturer specific value (ucDataTypeTag = 0F-00)
++ uint8_t ucMfgSpecificData[13];
++ };
++
++#pragma pack()
++} monitor_descriptor_t;
++
++////////////////////////////////////////////
++//
++// EDID PnP ID fields
++//
++////////////////////////////////////////////
++typedef union _baseedid_pnpid {
++ uint8_t VendorProductID[10]; // Vendor / Product identification
++
++ struct {
++ uint8_t ManufacturerID[2]; // Bytes 8, 9: Manufacturer ID
++ uint8_t ProductID[2]; // Bytes 10, 11: Product ID
++ uint8_t SerialNumber[4]; // Bytes 12-15: Serial numbers
++ uint8_t WeekOfManufacture; // Byte 16: Week of manufacture
++ uint8_t YearOfManufacture; // Byte 17: Year of manufacture
++ };
++} baseedid_pnpid_t;
++
++//
++// Chromaticity structure
++// Table 3.12 of Base Block for details
++//
++typedef struct _baseedid_chromaticity_block {
++ union{
++ uint8_t RedGreenLowBits; // Byte 1
++ struct{
++ uint8_t ucGreenYLowBits : 2; // bit 1:0
++ uint8_t ucGreenXLowBits : 2; // bit 3:2
++ uint8_t ucRedYLowBits : 2; // bit 5:4
++ uint8_t ucRedXLowBits : 2; // bit 7:6
++ };
++ };
++
++ union{
++ uint8_t ucBlueWhiteLowBits; // Byte 2
++ struct{
++ uint8_t ucWhiteYLowBits : 2; // bit 1:0
++ uint8_t ucWhiteXLowBits : 2; // bit 3:2
++ uint8_t ucBlueYLowBits : 2; // bit 5:4
++ uint8_t ucBlueXLowBits : 2; // bit 7:6
++ };
++ };
++
++ uint8_t ucRedXUpperBits; // bit 9:2 Byte 3
++ uint8_t ucRedYUpperBits; // bit 9:2 Byte 4
++
++ uint8_t ucGreenXUpperBits; // bit 9:2 Byte 5
++ uint8_t ucGreenYUpperBits; // bit 9:2 Byte 6
++
++ uint8_t ucBlueXUpperBits; // bit 9:2 Byte 7
++ uint8_t ucBlueYUpperBits; // bit 9:2 Byte 8
++
++ uint8_t ucWhiteXUpperBits; // bit 9:2 Byte 9
++ uint8_t ucWhiteYUpperBits; // bit 9:2 Byte 10
++} baseedid_chromaticity_block_t;
++
++////////////////////////////////////////////
++//
++// 128-byte EDID 1.x block0 structure
++//
++////////////////////////////////////////////
++typedef struct _baseedid_1_x {
++#pragma pack(1)
++
++ //
++ // Header: 8 bytes (Table 3.3 of EDID spec)
++ char Header[8]; // EDID1.x header "0 FFh FFh FFh FFh FFh FFh 0"
++
++ //
++ // Vendor/Product ID: 10 bytes (Table 3.4, 3.5 & 3.6 of EDID spec)
++ //baseedid_pnpid_t;
++ union {
++ uint8_t VendorProductID[10]; // Vendor / Product identification
++ struct {
++ uint8_t ManufacturerID[2]; // Bytes 8, 9: Manufacturer ID
++ uint8_t ProductID[2]; // Bytes 10, 11: Product ID
++ uint8_t SerialNumber[4]; // Bytes 12-15: Serial numbers
++ uint8_t WeekOfManufacture; // Byte 16: Week of manufacture
++ uint8_t YearOfManufacture; // Byte 17: Year of manufacture
++ };
++ };
++
++ //
++ // EDID structure Version/Revision: 2 bytes (Table 3.7 of EDID spec)
++ uint8_t ucVersion; // EDID version no.
++ uint8_t ucRevision; // EDID revision no.
++
++ //
++ // Basic display parameters & features: 5 bytes (Table 3.8 of EDID spec)
++ union {
++ uint8_t ucVideoInput; // Video input definition (Refer Table 3.9 of EDID spec)
++
++ struct {
++ uint8_t ucSyncInput : 4; // Sync input supported (iff ucDigitInput = 0)
++ uint8_t ucSetup : 1; // Display setup (iff ucDigitInput = 0)
++ uint8_t ucSigLevStd : 2; // Signal level Standard (iff ucDigitInput = 0)
++
++ uint8_t ucDigitInput : 1; // 1: Digital input; 0: Analog input
++ };
++ };
++
++ // Image size (Table 3.10 of EDID spec)
++ uint8_t ucMaxHIS; // Maximum H. image size in cm
++ uint8_t ucMaxVIS; // Maximum V. image size in cm
++
++ // Gamma (display transfer characteristic)
++ uint8_t ucGamma; // Display gamma value [= (gamma*100)-100]
++
++ // Feature support (Table 3.11 of EDID spec)
++ union {
++ uint8_t ucDMPSFeature; // DPMS feature support
++
++ struct {
++ uint8_t ucGTFSupport : 1; // GTF timing support (1: Yes)
++ uint8_t ucPTM : 1; // Preferred timing is 1st DTD (1: Yes) [Must if EDID >= 1.3]
++ uint8_t ucColorSpace : 1; // Use STD color space (1:Yes) [If set ColorChars should match sRGB values in EDID spec Appendix A]
++ uint8_t ucDispType : 2; // Display type
++ // 00: Monochrome
++ // 01: R/G/B color display
++ // 10: Non R/G/B multicolor display
++ // 11: Undefined
++ uint8_t ucActiveOff : 1; // Active off (Display consumes less power/blanks out when it receives an out of range timing)
++ uint8_t ucSuspend : 1; // Suspend (Refer VESA DPMS spec)
++ uint8_t ucStandBy : 1; // Stand-by (Refer VESA DPMS spec)
++ };
++ };
++
++ //
++ // Phosphor or Filter Chromaticity: 10 bytes
++ uint8_t ColorChars[10]; // Color characteristics (Refer Table 3.12 of EDID spec)
++
++ //
++ // Established timings: 3 bytes (Table 3.14 of EDID spec)
++ union {
++ uint8_t EstTiming1;
++ struct {
++ uint8_t bSupports800x600_60 : 1;
++ uint8_t bSupports800x600_56 : 1;
++ uint8_t bSupports640x480_75 : 1;
++ uint8_t bSupports640x480_72 : 1;
++ uint8_t bSupports640x480_67 : 1;
++ uint8_t bSupports640x480_60 : 1;
++ uint8_t bSupports720x400_88 : 1;
++ uint8_t bSupports720x400_70 : 1;
++ };
++ };
++ union {
++ uint8_t EstTiming2;
++ struct {
++ uint8_t bSupports1280x1024_75 : 1;
++ uint8_t bSupports1024x768_75 : 1;
++ uint8_t bSupports1024x768_70 : 1;
++ uint8_t bSupports1024x768_60 : 1;
++ uint8_t bSupports1024x768_87i : 1;
++ uint8_t bSupports832x624_75 : 1;
++ uint8_t bSupports800x600_75 : 1;
++ uint8_t bSupports800x600_72 : 1;
++ };
++ };
++ union {
++ uint8_t MfgTimings;
++ struct {
++ uint8_t bMfgReservedTimings : 7;
++ uint8_t bSupports1152x870_75 : 1;
++ };
++ };
++
++ //
++ // Standard timings: 8 bytes (Table 3.15 of EDID spec)
++ edid_std_timing_t StdTiming[NUM_BASEEDID_STANDARD_TIMING]; // 8 Standard timing support
++
++ //
++ // Detailed timing section - 72 bytes (4*18 bytes)
++ union {
++ edid_dtd_timing_t DTD[MAX_BASEEDID_DTD_BLOCKS]; // Four DTD data blocks
++
++ monitor_descriptor_t MonitorInfo[MAX_BASEEDID_DTD_BLOCKS];
++ };
++
++ uint8_t ucNumExtBlocks; // Number of extension EDID blocks
++ uint8_t ucChecksum; // Checksum of the EDID block
++
++#pragma pack()
++} baseedid_1_x_t;
++
++////////////////////////////////////////////
++//
++// 128-byte EDID 1.4 block0 structure
++// EDID 1.4 block0 structure is different from 1.3 block0
++// Thats why this new structure has been added
++// Changes are commented in the structure itself
++//
++////////////////////////////////////////////
++typedef struct _baseedid_1_4 {
++#pragma pack(1)
++
++ //
++ // Header: 8 bytes (Table 3.3 of EDID spec)
++ char Header[8]; // EDID1.x header "0 FFh FFh FFh FFh FFh FFh 0"
++
++ //
++ // Vendor/Product ID: 10 bytes (Table 3.4, 3.5 & 3.6 of EDID spec)
++ union {
++ uint8_t VendorProductID[10]; // Vendor / Product identification
++ struct {
++ uint8_t ManufacturerID[2]; // Bytes 8, 9: Manufacturer ID
++ uint8_t ProductID[2]; // Bytes 10, 11: Product ID
++ uint8_t SerialNumber[4]; // Bytes 12-15: Serial numbers
++ uint8_t WeekOfManufacture; // Byte 16: Week of manufacture
++ uint8_t YearOfManufacture; // Byte 17: Year of manufacture
++ };
++ };
++
++ //
++ // EDID structure Version/Revision: 2 bytes (Table 3.7 of EDID spec)
++ uint8_t ucVersion; // EDID version no.
++ uint8_t ucRevision; // EDID revision no.
++
++ //
++ // Basic display parameters & features: 5 bytes (Table 3.8 of EDID spec)
++ union {
++ uint8_t ucVideoInput; // Video input definition (Refer Table 3.9 of EDID spec)
++
++ struct {
++ uint8_t ucSyncInput : 4; // Sync input supported (iff ucDigitInput = 0)
++ uint8_t ucSetup : 1; // Display setup (iff ucDigitInput = 0)
++ uint8_t ucSigLevStd : 2; // Signal level Standard (iff ucDigitInput = 0)
++
++ uint8_t ucDigitInput : 1; // 1: Digital input; 0: Analog input
++ };
++ // This structure has been introduced to reflect the changes in EDID 1.4 spec
++ // This sturcture shows new meaning of VIDEO INPUT DEFINITION when input is digital
++ struct {
++ uint8_t ucDigitalVideoInterface : 4; // Digital Video Interface Standard Supported.
++ uint8_t ucColorBitDepth : 3; // Color Bit Depth.
++ // 0 0 0 -- Color Bit Depth is undefined
++ // 0 0 1 -- 6 Bits per Primary Color
++ // 0 1 0 -- 8 Bits per Primary Color
++ // 0 1 1 -- 10 Bits per Primary Color
++ // 1 0 0 -- 12 Bits per Primary Color
++ // 1 0 1 -- 14 Bits per Primary Color
++ // 1 1 0 -- 16 Bits per Primary Color
++ // 1 1 1 -- Reserved (Do Not Use)
++ uint8_t bIsDigitalVideoSignalInterface : 1; // Bit 7
++ };
++ };
++
++ // As per the EDID spec 1.4, the following two fields can be aspect ratios as well.
++ union {
++ uint8_t ucMaxHIS; // Maximum H. image size in cm
++ uint8_t ucARLandscape; // Landscape Aspect raio as per EDID 1.4 spec
++ };
++ union {
++ uint8_t ucMaxVIS; // Maximum V. image size in cm
++ uint8_t ucARPortrait; // Portrait Aspect raio as per EDID 1.4 spec
++ };
++
++ // Gamma (display transfer characteristic)
++ uint8_t ucGamma; // Display gamma value [= (gamma*100)-100]
++
++ // Feature support (Table 3.11 of EDID spec)
++ union {
++ uint8_t ucDMPSFeature; // DPMS feature support
++
++ struct {
++ uint8_t ucContinuousDisplay : 1; // Display is continuous or non-continuous (1: Yes)
++ uint8_t ucPTM : 1; // Preferred timing mode indicates native pixel format and native RR.
++ uint8_t ucColorSpace : 1; // Use STD color space (1:Yes) [If set ColorChars should match sRGB values in EDID spec Appendix A]
++ uint8_t ucDispType : 2; // Display type
++ // 00: Monochrome
++ // 01: R/G/B color display
++ // 10: Non R/G/B multicolor display
++ // 11: Undefined
++ uint8_t ucActiveOff : 1; // Active off (Display consumes less power/blanks out when it receives an out of range timing)
++ uint8_t ucSuspend : 1; // Suspend (Refer VESA DPMS spec)
++ uint8_t ucStandBy : 1; // Stand-by (Refer VESA DPMS spec)
++ };
++
++ struct {
++ uint8_t bReserved0 : 1;
++ uint8_t bReserved1 : 1;
++ uint8_t bReserved2 : 1;
++ uint8_t ucColorEncodingFormat : 2; // Supported Color Encoding Format if Video Input is digital
++ // 00: RGB 4:4:4
++ // 01: RGB 4:4:4 & YCrCb 4:4:4
++ // 10: RGB 4:4:4 & YCrCb 4:2:2
++ // 11: RGB 4:4:4 & YCrCb 4:4:4 & YCrCb 4:2:2
++ uint8_t bReserved3 : 1;
++ uint8_t bReserved4 : 1;
++ uint8_t bReserved5 : 1;
++ };
++ };
++
++ //
++ // Phosphor or Filter Chromaticity: 10 bytes
++ uint8_t ColorChars[10]; // Color characteristics (Refer Table 3.12 of EDID spec)
++
++ //
++ // Established timings: 3 bytes (Table 3.14 of EDID spec)
++ union {
++ uint8_t EstTiming1;
++ struct {
++ uint8_t bSupports800x600_60 : 1;
++ uint8_t bSupports800x600_56 : 1;
++ uint8_t bSupports640x480_75 : 1;
++ uint8_t bSupports640x480_72 : 1;
++ uint8_t bSupports640x480_67 : 1;
++ uint8_t bSupports640x480_60 : 1;
++ uint8_t bSupports720x400_88 : 1;
++ uint8_t bSupports720x400_70 : 1;
++ };
++ };
++ union {
++ uint8_t EstTiming2;
++ struct {
++ uint8_t bSupports1280x1024_75 : 1;
++ uint8_t bSupports1024x768_75 : 1;
++ uint8_t bSupports1024x768_70 : 1;
++ uint8_t bSupports1024x768_60 : 1;
++ uint8_t bSupports1024x768_87i : 1;
++ uint8_t bSupports832x624_75 : 1;
++ uint8_t bSupports800x600_75 : 1;
++ uint8_t bSupports800x600_72 : 1;
++ };
++ };
++ union {
++ uint8_t MfgTimings;
++ struct {
++ uint8_t bMfgReservedTimings : 7;
++ uint8_t bSupports1152x870_75 : 1;
++ };
++ };
++
++ //
++ // Standard timings: 8 bytes (Table 3.15 of EDID spec)
++ edid_std_timing_t StdTiming[NUM_BASEEDID_STANDARD_TIMING]; // 8 Standard timing support
++
++ // Detailed timing section - 72 bytes (4*18 bytes)
++ // As per the new spec 1.4, the first Detailed Timing Section should contain the PREFERED TIMING BLOCK
++ edid_dtd_timing_t PreferedTimingMode;
++ // The rest 54 bytes of the Detailed Timing Section.
++ union {
++ edid_dtd_timing_t DTD[MAX_BASEEDID_DTD_BLOCKS - 1]; // Three DTD data blocks
++
++ monitor_descriptor_t MonitorInfo[MAX_BASEEDID_DTD_BLOCKS - 1]; // Three Monitor Descriptor blocks
++ };
++
++ uint8_t ucNumExtBlocks; // Number of extension EDID blocks
++ uint8_t ucChecksum; // Checksum of the EDID block
++
++#pragma pack()
++} baseedid_1_4_t;
++
++
++//*****************************************************
++//*****************************************************
++//
++// DATA STRUCTURES AND DEFINITIONS FOR CE-EXTENSION
++//
++//*****************************************************
++//*****************************************************
++
++/////////////////////////////////
++//
++//CE - Extension Block Structure
++//
++/////////////////////////////////
++typedef struct _ce_edid {
++ uint8_t ucTag;
++ uint8_t ucRevision;
++ uint8_t ucDTDOffset;
++ uint8_t ucCapabilty;
++ uint8_t data[123];
++ uint8_t ucCheckSum;
++} ce_edid_t;
++
++////////////////////////////////////////////
++//
++//CE - Video Capability Data block structure
++//
++////////////////////////////////////////////
++typedef union _video_cap_data_block {
++ uint8_t ucValue;
++ struct
++ {
++ uint8_t ucCEScanBehavior :2; // Indicates scan behavior of CE mode
++ uint8_t ucITScanBehavior :2; // Indicates scan behavior of IT mode
++ uint8_t ucPTScanBehavior :2; // Indicates scan behavior of Preferred mode
++ uint8_t ucQuantRangeSelectable :1; // Indicates if RGB Quantization Range can be overridden
++ uint8_t ucReserved :1;
++ };
++} video_cap_data_block_t;
++
++////////////////////////////////////////////
++//
++//CEA Extn Block Byte3 structure
++//
++////////////////////////////////////////////
++typedef union _cea_ext_capability {
++ uint8_t ucValue;
++ struct
++ {
++ uint8_t ucTotalNativeDTDs :4; // Total number of DTDs in extension block
++ uint8_t ucSupportsYCBCR422 :1; // Indicates support for YCBCR 4:2:2
++ uint8_t ucSupportsYCBCR444 :1; // Indicates support for YCBCR 4:4:4
++ uint8_t ucSupportsBasicAudio :1; // Indicates support for Basic audio
++ uint8_t ucUnderscansITFormats :1; // Indicates underscan behavior of IT formats
++ };
++} cea_ext_capability_t;
++
++////////////////////////////////////////////
++//
++//CE - Video Capability Data block structure
++//
++////////////////////////////////////////////
++typedef enum {
++ FORMAT_NOT_SUPPORTED = 0, // Format is not supported
++ ALWAYS_OVERSCANNED = 1, // Format is always overscanned
++ ALWAYS_UNDERSCANNED = 2, // Format is always underscanned
++ SUPPORTS_OVER_AND_UNDERSCAN = 3 // Sink supports both overscan and underscan
++} cea_scan_behavior_t;
++
++
++/////////////////////////////////
++//
++// #defines required for CE Etxn
++//
++/////////////////////////////////
++#define CEA_EXT_TAG 0x02
++#define CEA_EXT_SUPPORTED_VERSION 0x03
++#define CEA_EXT_861_REVISION 0x01
++
++#define CEA_USE_EXTENDED_TAG 0x7
++
++#define CEA_AUDIO_DATABLOCK 0x1
++#define CEA_VIDEO_DATABLOCK 0x2
++#define CEA_VENDOR_DATABLOCK 0x3
++#define CEA_SPEAKER_DATABLOCK 0x4
++#define CEA_VIDEO_CAP_DATABLOCK 0x0
++
++#define CEA_DATABLOCK_TAG_MASK 0xE0
++#define CEA_DATABLOCK_LENGTH_MASK 0x1F
++#define CEA_SHORT_VIDEO_DESCRIPTOR_CODE_MASK 0x7F
++#define CEA_NATIVE_FORMAT_BIT_MASK 0x80
++
++#define CEA_HDMI_IEEE_REG_ID 0x00000C03
++#define CEA_EDID_HEADER_SZIE 0x04
++
++// Extended Data block type
++// This bit definitions are as per CE 861-D spec
++#define CEA_COLORIMETRY_DATABLOCK 0x5
++ #define CE_COLORIMETRY_MD0_MASK BIT0
++ #define CE_COLORIMETRY_MD1_MASK BIT1
++ #define CE_COLORIMETRY_MD2_MASK BIT3
++#if 0
++//==================================================================================
++//==================================================================================
++// DATA Structure definitions for VTB parsing.....
++// Reference VESA Documents are VTB Extension(Release A) & CVT standard version 1.1
++//===================================================================================
++// #defines for VTB-EXT
++//===================================================================================
++
++#define VTB_EXT_TAG 0x10
++#define VTB_EXT_SUPPORTED_VERSION 0x03
++
++#define VTB_MAX_DTD_TIMINGS 6
++#define VTB_MAX_CVT_TIMINGS 40
++#define VTB_MAX_STANDARD_TIMINGS 61
++
++#define VTB_DTD_OFFSET 5
++#define VTB_DTD_SIZE 18
++#define VTB_CVT_SIZE 3
++#define VTB_ST_SIZE 2
++
++// This struct is for VTB Extension block.
++typedef struct _VTB_EXT
++{
++ uint8_t ucTag;
++ uint8_t ucVersion;
++ uint8_t ulNumDTD;
++ uint8_t ulNumCVT;
++ uint8_t ulNumST;
++ uint8_t DATA[122];
++ uint8_t ucChecksum;
++}VTB_EXT, *PVTB_EXT;
++
++// Following struct is for CVT descriptor (Version 1.1)
++typedef struct _VTB_CVT_TIMING
++{
++#pragma pack(1)
++
++ uint8_t ucVA_low; // Lower 8 bits of Vertical size. This Vsize = (vertical active lines/2)-1.
++ // Range for VA lines is 2 to 8192. CVT supprts only an even no. of active lines per frame.
++
++ union {
++ uint8_t ucVA_high_AR;
++ struct {
++
++ uint8_t ucReserved00 :2; //Bits 1-0 are reserved and set to 00h
++ uint8_t ucAspectRatio :2; // Aspect Ratio specifier bits.
++ // 00: 4:3 Aspect ratio
++ // 01: 16:9 Aspect ratio
++ // 10: 16:10 Aspect ratio
++ // 11: Undefined (Reserved)
++
++
++ uint8_t ucVA_high :4; // Upper 4 bits of Vertical Size.
++ };
++ };
++
++ union {
++ uint8_t ucRefresh_Rate_Bits;
++ struct {
++
++ uint8_t ucRR_60Hz_RB :1; // When set, indicates 60Hz support with Reduced Blanking.
++ uint8_t ucRR_85Hz :1; // || 85Hz || .
++ uint8_t ucRR_75Hz :1; // || 75Hz || .
++ uint8_t ucRR_60Hz :1; // || 60Hz || .
++ uint8_t ucRR_50Hz :1; // When set, indicates 50Hz Refrash Rate with CRT Blanking supports specified pixel format.
++ uint8_t ucPreferredRefresh_Rate :2; // Preferred Refresh Rate specifier bits.
++ // 00: 50 Hz
++ // 01: 60 Hz (this means either CRT blanking or Reduced Blanking whichever is supported.
++ // If both are supported, then RB is preferred.)
++ // 10: 75 Hz
++ // 11: 85 Hz
++
++ uint8_t ucReserved0 :1; // This bit is reserved and set to '0'.
++
++ };
++ };
++#pragma pack()
++} VTB_CVT_TIMING, *PVTB_CVT_TIMING;
++
++
++// This struct is for storing extracted Info from CVT descriptor....
++// This is defined by author.....not based on CVT specs.
++typedef struct _CVT_INFO
++{
++ ULONG ulYRes;
++ ULONG ulXRes;
++ ULONG ulRRate[5]; //As max 5 Refresh Rates can be supported.
++ BOOLEAN bRed_Blank_Req[5];
++ BOOLEAN bPreferred_RR[5]; //To set flag for Preffered RR
++ ULONG ulNumRates; //Number of Refresh rates Supported. (Max. 5)
++} CVT_INFO, *PCVT_INFO;
++#endif
++// This structure is for stroing the Display device Data retreived from CEA block
++// This is defined as per the Display Device Data Block standard.
++typedef struct _display_device_data {
++#pragma pack (1)
++ union
++ {
++ uint8_t ucTagAndLength; // Data Block Tag and Block Length. should be 0xFF
++ struct
++ {
++ uint8_t ucLength : 5;
++ uint8_t ucTag : 3;
++ };
++ };
++ uint8_t ucChildTag; // Child tag required as per CEA spec should be 0x02
++ union
++ {
++ uint8_t ucInterfaceType;
++ struct
++ {
++ uint8_t ucNumOfChannels : 4;// Number of channels supported
++ uint8_t ucInterfaceCode : 4;// Interface code
++ };
++ };
++ union
++ {
++ uint8_t ucVerAndRel;
++ struct
++ {
++ uint8_t ucRelease : 4;// Release
++ uint8_t ucVersion : 4;// Version.
++ };
++ };
++ uint8_t ucContentProtectionSuppFlag;// Flag indicating support for content protection.
++ union
++ {
++ uint16_t usClockFrequency;// Clock Frequency
++ struct
++ {
++ uint16_t usMinClockFrequency : 6; // First 6 bits indicates Min frequency
++ uint16_t usMaxClockFrequency : 10;// Next 10 bits indicates Max frequency
++ };
++ };
++ union
++ {
++ uint8_t ucNativePixelFormat[4];// Pixel Format
++ struct
++ {
++ uint8_t ucHorizontalPixelCntLower;// Lower byte value of the Horizontal pixel count
++ uint8_t ucHorizontalPixelCntUpper;// Upper byte value of the Horizontal pixel count
++ uint8_t ucVerticalPixelCntLower;// Lower byte value of the vertical pixel count
++ uint8_t ucVerticalPixelCntUpper; // Upper byte value of the vertical pixel count
++ };
++ };
++ uint8_t ucAspectRatio;// Byte indicating Aspect ratio.
++ union
++ {
++ uint8_t ucOrientationAndRotation;
++ struct
++ {
++ uint8_t ucScanDirection : 2;// Scan direction.
++ uint8_t ucZeroPixelLocation : 2;// Zero Pixel Location.
++ uint8_t ucRotationCapability : 2;// Indicates rotation capability
++ uint8_t ucDefaultOrientation : 2;// Default Orientation.
++ };
++ };
++ uint8_t ucSubPixelInfo;// Sub-Pixle Information.
++ uint8_t ucHorizontalPitch;// Horizontal Pitch
++ uint8_t ucVerticalPitch;// Vertical Pitch
++ union
++ {
++ uint8_t ucMiscDisplayCapabilities;
++ struct
++ {
++ uint8_t bReserved : 3;
++ uint8_t ucDeinterlacing : 1;// indicates deinterlacing support
++ uint8_t ucOverdriverNotRecommended : 1;
++ uint8_t ucDirectDrive : 1;// indicates DirectDrive support
++ uint8_t ucDithering : 2;// indicates Dithering support.
++ };
++ };
++ union
++ {
++ uint8_t ucAudioFlags;// Flags indicating Audio details
++ struct
++ {
++ uint8_t bReserved1 : 4;
++ uint8_t ucAudioInputOverride : 1;// Indicates Audio Input Override
++ uint8_t ucSeparateAudioInputs : 1;// Indicates Separate Audio Inputs
++ uint8_t ucAudioInputOnVideoInterface : 1;// Shows whether Audio input is through the video interface.
++ };
++ };
++ union
++ {
++ uint8_t ucAudioDelayFlags; // Audio Delay Flags
++ struct
++ {
++ uint8_t ucAudioDelay : 7;// Absolute offset between the audio and video signals.
++ uint8_t ucAudioSign : 1;// Indicates positive or negative delay.
++ };
++ };
++ union
++ {
++ uint8_t ucFrameRateAndModeConversion;
++ struct
++ {
++ uint8_t ucFrameRateRange : 6;//Device Frame rate Range
++ uint8_t ucFrameRateConversion : 2;//00 � No dedicated rate conversion hardware is provided;
++ //01 � The display provides a single frame buffer
++ //10 � The display provides double-buffering
++ //11- The display provides frame-rate conversion involving interframe interpolation
++ };
++ };
++ uint8_t ucDeviceNativeRate;// Device Native Frame rate
++ union
++ {
++ uint8_t ucColorBitDepth;// Color bit depth
++ struct
++ {
++ uint8_t ucDisplayDeviceColBitDepth : 4; // Color bit depth of the display device
++ uint8_t ucInterfaceColBitDepth : 4;// color bit depth supported by the interface.h
++ };
++ };
++ uint8_t ucAddPrimaryChromaticities[8];// Additional Primary Chromaticities.
++ union
++ {
++ uint8_t ucResponseTimeFlags;
++ struct
++ {
++ uint8_t ucResponseTime : 7;// Time for transition.
++ uint8_t ucBlackToWhite : 1;// if 1, then transition from black to white
++ // if 0, then transition from white to black
++ };
++ };
++ union
++ {
++ uint8_t ucOverscanInformation;
++ struct
++ {
++ uint8_t ucVerticalPercentage : 4;// Percentage of Overscan in vertical direction.
++ uint8_t ucHorizontalPercentage : 4;// Percentage of Overscan in horizontal direction.
++ };
++ };
++#pragma pack()
++} display_device_data_t;
++
++//=========================================================================
++//=========================================================================
++// #defines for Block Map Ext.
++//=========================================================================
++//=========================================================================
++#define BLOCK_MAP_EXT_TAG 0xF0
++
++#endif // EDIDSTRUCTS_H
+\ No newline at end of file
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_intel_hdmi_i2c.c
+@@ -0,0 +1,213 @@
++/*
++ * Copyright © 2006-2007 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *
++ */
++/* chunfeng.zhao@intel.com
++ */
++
++
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/i2c.h>
++#include <linux/i2c-id.h>
++#include <linux/i2c-algo-bit.h>
++#if 0 /* FIXME_JLIU7 HDMI */
++#include "psb_intel_hdmi_i2c.h"
++
++#define MDFLD_HDMI_I2C_ADAPTER_ID 3
++
++static int hdmi_i2c_open(struct i2c_client *c, void *data)
++{
++ /*Do nothing for now, may need to add sync code? */
++ return 0;
++}
++
++static int hdmi_i2c_close(struct i2c_client *c, void *data)
++{
++ /*Do nothing for now, may need to add sync code? */
++ return 0;
++}
++
++static char hdmi_i2c_read_byte_data(struct i2c_client *c, unsigned char adr)
++{
++ return i2c_smbus_read_byte_data(c, adr);
++}
++
++static int hdmi_i2c_write_byte_data(struct i2c_client *c, unsigned char adr, unsigned char data)
++{
++ return i2c_smbus_write_byte_data(c, adr, data);
++}
++
++static int hdmi_i2c_read_data(struct i2c_adapter * adapter, unsigned char adr, unsigned char * data, int size)
++{
++ struct i2c_msg msg = {
++ .addr = adr,.flags = I2C_M_RD,.buf = data,.len = size
++ };
++ return i2c_transfer(adapter, &msg, 1);
++}
++
++static int hdmi_i2c_write_data(struct i2c_adapter * adapter, unsigned char adr, unsigned char *data, int size)
++{
++ struct i2c_msg msg = {
++ .addr = adr,.flags = 0,.buf = data,.len = size
++ };
++ return i2c_transfer(adapter, &msg, 1);
++}
++
++static struct i2c_adapter * hdmi_i2c_get_adapter(struct i2c_client *c)
++{
++ /* For HDMI if not plugged in, then i2c core may not create the client driver */
++ /* Should use the adapter directly */
++ if(c)
++ return c->adapter;
++ else
++ return i2c_get_adapter(MDFLD_HDMI_I2C_ADAPTER_ID);
++}
++
++static struct mdfld_hdmi_i2c hdmi_i2c_bus = {
++ .open = hdmi_i2c_open,
++ .close = hdmi_i2c_close,
++ .read_byte_data = hdmi_i2c_read_byte_data,
++ .write_byte_data = hdmi_i2c_write_byte_data,
++ .read_data = hdmi_i2c_read_data,
++ .write_data = hdmi_i2c_write_data,
++ .get_adapter = hdmi_i2c_get_adapter,
++};
++
++struct mdfld_hdmi_i2c * hdmi_i2c_init()
++{
++ return &hdmi_i2c_bus;
++}
++
++/*
++ * * i2c addresses to scan
++ * 0x28 is from 0x50 >> 1 to remove first bit for ddc address
++ * 0x39 is from 0x73 >> 1 for HDCP address
++ * */
++static unsigned short normal_i2c[] = {0x28, 0x39, I2C_CLIENT_END};
++I2C_CLIENT_INSMOD;
++
++/* Each client has this additional data */
++struct mdfld_hdmi_i2c_data {
++ struct semaphore data_lock;
++ int data;
++};
++
++static const struct i2c_device_id mdfld_hdmi_id[] = {
++ {"mdfld_hdmi", 0},
++ {}
++};
++
++static int mdfld_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ return 0;
++}
++
++/* This function is called by i2c_detect */
++static int mdfld_detect(struct i2c_client *client, int kind,
++ struct i2c_board_info *info)
++{
++ struct i2c_adapter *adapter = client->adapter;
++ struct mdfld_hdmi_i2c_data *data = NULL;
++ int err = 0;
++
++ /* HDMI i2c is i2c3 with id = 3 */
++ if (adapter->id !=3) {
++ err = -ENODEV;
++ goto error;
++ }
++
++ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
++ err = -ENODEV;
++ goto error;
++ }
++
++ data = kmalloc(sizeof(*data), GFP_KERNEL);
++ if (!data) {
++ err = -ENOMEM;
++ goto error;
++ }
++
++ memset(data, 0x00, sizeof(*data));
++
++ i2c_set_clientdata(client, data);
++
++ if (client->addr == 0xA0)
++ hdmi_i2c_bus.ddc_client = client;
++ else
++ hdmi_i2c_bus.hdcp_client = client;
++
++ return 0;
++
++error:
++ if(data != NULL) kfree(data);
++ return err;
++}
++
++static int mdfld_remove(struct i2c_client *client)
++{
++ struct mdfld_hdmi_i2c_data *data = i2c_get_clientdata(client);
++ kfree(data);
++ return 0;
++}
++
++/* This is the driver that will be inserted */
++static struct i2c_driver mdfld_hdmi_i2c_driver = {
++ .driver = {
++ .name = "mdfld_hdmi",
++ },
++ .probe = mdfld_probe,
++ .remove = mdfld_remove,
++ .id_table = mdfld_hdmi_id,
++
++ .class = I2C_CLASS_DDC,
++ .detect = mdfld_detect,
++ .address_data = &addr_data,
++};
++
++
++static int __init mdfld_i2c_init(void)
++{
++ hdmi_i2c_bus.ddc_client = NULL;
++ hdmi_i2c_bus.hdcp_client = NULL;
++
++ return i2c_add_driver(&mdfld_hdmi_i2c_driver);
++}
++
++static void __exit mdfld_i2c_exit(void)
++{
++ i2c_del_driver(&mdfld_hdmi_i2c_driver);
++}
++
++
++MODULE_AUTHOR("Chunfeng Zhao <chunfeng.zhao@intel.com>");
++MODULE_DESCRIPTION("mdfld hdmi i2c client driver");
++MODULE_LICENSE("GPL");
++
++module_init(mdfld_i2c_init);
++module_exit(mdfld_i2c_exit);
++#endif /* FIXME_JLIU7 HDMI */
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_intel_hdmi_i2c.h
+@@ -0,0 +1,21 @@
++#ifndef PSB_INTEL_HDMI_I2C_H
++#define PSB_INTEL_HDMI_I2C_H
++
++#if 0 /* FIXME_JLIU7 HDMI */
++struct mdfld_hdmi_i2c {
++ struct i2c_client *ddc_client;
++ struct i2c_client *hdcp_client;
++ int (*open) (struct i2c_client *, void *);
++ int (*close) (struct i2c_client *, void *);
++ char (*read_byte_data)(struct i2c_client *, unsigned char adr);
++ int (*write_byte_data)(struct i2c_client *, unsigned char adr, unsigned char data);
++ int (*read_data)(struct i2c_adapter * adapter, unsigned char adr, unsigned char * data, int size);
++ int (*write_data)(struct i2c_adapter * adapter, unsigned char adr, unsigned char *data, int size);
++ struct i2c_adapter * (*get_adapter)(struct i2c_client *c);
++ struct mutex lock;
++};
++
++extern struct mdfld_hdmi_i2c * hdmi_i2c_init(void);
++
++#endif /* FIXME_JLIU7 HDMI */
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_intel_hdmi_reg.h
+@@ -0,0 +1,130 @@
++/*
++ * Copyright © 2006-2007 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *
++ */
++/* chunfeng.zhao@intel.com
++ */
++
++#ifndef PSB_INTEL_HDMI_REG_H
++#define PSB_INTEL_HDMI_REG_H
++
++//////////////////////////////////////////
++//
++// Integrated HDMI specific registers
++//
++/////////////////////////////////////////
++
++#define RESERVED2(x,y) x##y
++#define RESERVED1(x,y) RESERVED2(x,y)
++#define RANDOMNUMBER __LINE__ // __COUNTER__
++#define UNIQUENAME(ValueName) RESERVED1(ValueName, RANDOMNUMBER)
++
++/** Requird for HDMI operation */
++#define HDMI_NULL_PACKETS_DURING_VSYNC (1 << 9)
++#define HDMI_BORDER_ENABLE (1 << 7)
++#define HDMI_AUDIO_ENABLE (1 << 6)
++/** New with 965, default is to be set */
++#define HDMI_VSYNC_ACTIVE_HIGH (1 << 4)
++/** New with 965, default is to be set */
++#define HDMI_HSYNC_ACTIVE_HIGH (1 << 3)
++#define HDMIB_PCIE_CONCURRENCY (1 << 3)
++#define HDMI_DETECTED (1 << 2)
++/* Bits to be preserved when writing */
++#if 0
++#define HDMIB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14) | (1 << 26))
++#define HDMIC_PRESERVE_MASK ((1 << 17) | (1 << 26))
++#endif
++//
++//AUDIO configuration register
++//
++#define MDFLD_AUD_CONFIG_REG 0x69000
++ #define MDFLD_AUD_CONFIG_REG_RESERVED_BITS BITRANGE(31,25)
++typedef union _mdfld_aud_config {
++ uint32_t value;
++
++ struct
++ {
++ const uint32_t disable_ncts : 1; //Bit 0
++ uint32_t lay_out : 1; //Bit 1 (0 - layout 0 1 - layout 1)
++ uint32_t format : 2; /*Bit [3:2]
++ * 00 - l-PCM or IEC 61937
++ * 01 - High bit rate IEC 61937 stream packet)
++ * 10 - Not supported
++ * 11 - Not supported
++ */
++ uint32_t num_audio_ch : 2; /*Bit [5:4]
++ * 00 - 2 channels(stereo)
++ * 01 - 3 or 4 channels
++ * 10 - 5 or 6 channels
++ * 11 - 7 or 8 channels
++ */
++ uint32_t UNIQUENAME(Reserved) : 1; //Bit 6
++ uint32_t b_bit_enabled : 1; /* Bit 7 (0 - B bit set only for sub-packet 0
++ * 1 - B bit set for all valid sub packet)
++ */
++ uint32_t sample_flat_bit : 1; //Bit 8
++ uint32_t validity_bit : 1; //Bit 9 (1 - set V bit in sub-frame 0 - clear V bit(debugging, testing))
++ uint32_t user_bit : 1; //Bit 10 (1 - set U bit in sub frame 0 - clear U bit(default)
++ uint32_t underrun_packet_bit : 1; //Bit 11 (1 - send underrun packet 0 - send null packet)
++ uint32_t UNIQUENAME(Reserved) : 20; //Bit [31:12]
++ };
++} mdfld_aud_config_t;
++
++//
++// Audio control state register
++//
++#define MDFLD_AUD_CNTL_ST_REG 0x69060
++ #define MDFLD_AUD_CNTL_ST_RESERVED_BITS (BITRANGE(14,4) | BITRANGE(31,25))
++// Note => DIP : Data Island Packet
++typedef union _mdfld_aud_cntl {
++ uint32_t value;
++
++ struct
++ {
++ uint32_t dip_ram_access_address :4; // bit 3:0
++ uint32_t UNIQUENAME(Reserved) :11; // bit 14:4
++ uint32_t cp_ready :1; // bit 15
++ uint32_t video_dip_trans_freq :2; // bit 17:16
++ uint32_t dip_buffer_index :3; // bit 20:18
++ uint32_t enable_dip_type :4; // bit 24:21
++ uint32_t UNIQUENAME(Reserved) :7; // bit 31:25
++ };
++
++} mdfld_aud_cntl_t;
++
++
++// HDMI Audio Data Island Packet Data
++//
++#define MDFLD_HDMI_AUDPAC_DATA_REG 0x69114
++
++typedef union _mdfld_hdmi_audpac_data {
++ uint32_t value;
++
++ struct
++ {
++ uint32_t audio_dip_data :32; // bit 31:0
++ };
++} mdfld_hdmi_audpac_data_t;
++
++#endif // PSB_INTEL_HDMI_REG_H
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_intel_i2c.c
+@@ -0,0 +1,172 @@
++/*
++ * Copyright © 2006-2007 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * Eric Anholt <eric@anholt.net>
++ */
++
++#include <linux/i2c.h>
++#include <linux/i2c-id.h>
++#include <linux/i2c-algo-bit.h>
++
++#include "psb_drv.h"
++#include "psb_intel_reg.h"
++
++/*
++ * Intel GPIO access functions
++ */
++
++#define I2C_RISEFALL_TIME 20
++
++static int get_clock(void *data)
++{
++ struct psb_intel_i2c_chan *chan = data;
++ struct drm_device *dev = chan->drm_dev;
++ u32 val;
++
++ val = REG_READ(chan->reg);
++ return (val & GPIO_CLOCK_VAL_IN) != 0;
++}
++
++static int get_data(void *data)
++{
++ struct psb_intel_i2c_chan *chan = data;
++ struct drm_device *dev = chan->drm_dev;
++ u32 val;
++
++ val = REG_READ(chan->reg);
++ return (val & GPIO_DATA_VAL_IN) != 0;
++}
++
++static void set_clock(void *data, int state_high)
++{
++ struct psb_intel_i2c_chan *chan = data;
++ struct drm_device *dev = chan->drm_dev;
++ u32 reserved = 0, clock_bits;
++
++ /* On most chips, these bits must be preserved in software. */
++ if (!IS_I830(dev) && !IS_845G(dev))
++ reserved =
++ REG_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
++ GPIO_CLOCK_PULLUP_DISABLE);
++
++ if (state_high)
++ clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
++ else
++ clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
++ GPIO_CLOCK_VAL_MASK;
++ REG_WRITE(chan->reg, reserved | clock_bits);
++ udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
++}
++
++static void set_data(void *data, int state_high)
++{
++ struct psb_intel_i2c_chan *chan = data;
++ struct drm_device *dev = chan->drm_dev;
++ u32 reserved = 0, data_bits;
++
++ /* On most chips, these bits must be preserved in software. */
++ if (!IS_I830(dev) && !IS_845G(dev))
++ reserved =
++ REG_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
++ GPIO_CLOCK_PULLUP_DISABLE);
++
++ if (state_high)
++ data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
++ else
++ data_bits =
++ GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
++ GPIO_DATA_VAL_MASK;
++
++ REG_WRITE(chan->reg, reserved | data_bits);
++ udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
++}
++
++/**
++ * psb_intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg
++ * @dev: DRM device
++ * @output: driver specific output device
++ * @reg: GPIO reg to use
++ * @name: name for this bus
++ *
++ * Creates and registers a new i2c bus with the Linux i2c layer, for use
++ * in output probing and control (e.g. DDC or SDVO control functions).
++ *
++ * Possible values for @reg include:
++ * %GPIOA
++ * %GPIOB
++ * %GPIOC
++ * %GPIOD
++ * %GPIOE
++ * %GPIOF
++ * %GPIOG
++ * %GPIOH
++ * see PRM for details on how these different busses are used.
++ */
++struct psb_intel_i2c_chan *psb_intel_i2c_create(struct drm_device *dev,
++ const u32 reg, const char *name)
++{
++ struct psb_intel_i2c_chan *chan;
++
++ chan = kzalloc(sizeof(struct psb_intel_i2c_chan), GFP_KERNEL);
++ if (!chan)
++ goto out_free;
++
++ chan->drm_dev = dev;
++ chan->reg = reg;
++ snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name);
++ chan->adapter.owner = THIS_MODULE;
++ chan->adapter.algo_data = &chan->algo;
++ chan->adapter.dev.parent = &dev->pdev->dev;
++ chan->algo.setsda = set_data;
++ chan->algo.setscl = set_clock;
++ chan->algo.getsda = get_data;
++ chan->algo.getscl = get_clock;
++ chan->algo.udelay = 20;
++ chan->algo.timeout = usecs_to_jiffies(2200);
++ chan->algo.data = chan;
++
++ i2c_set_adapdata(&chan->adapter, chan);
++
++ if (i2c_bit_add_bus(&chan->adapter))
++ goto out_free;
++
++ /* JJJ: raise SCL and SDA? */
++ set_data(chan, 1);
++ set_clock(chan, 1);
++ udelay(20);
++
++ return chan;
++
++out_free:
++ kfree(chan);
++ return NULL;
++}
++
++/**
++ * psb_intel_i2c_destroy - unregister and free i2c bus resources
++ * @output: channel to free
++ *
++ * Unregister the adapter from the i2c layer, then free the structure.
++ */
++void psb_intel_i2c_destroy(struct psb_intel_i2c_chan *chan)
++{
++ if (!chan)
++ return;
++
++ i2c_del_adapter(&chan->adapter);
++ kfree(chan);
++}
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_intel_lvds.c
+@@ -0,0 +1,1390 @@
++/*
++ * Copyright © 2006-2007 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * Eric Anholt <eric@anholt.net>
++ * Dave Airlie <airlied@linux.ie>
++ * Jesse Barnes <jesse.barnes@intel.com>
++ */
++
++#include <linux/i2c.h>
++/* #include <drm/drm_crtc.h> */
++/* #include <drm/drm_edid.h> */
++#include <drm/drmP.h>
++
++#include "psb_intel_bios.h"
++#include "psb_drv.h"
++#include "psb_intel_drv.h"
++#include "psb_intel_reg.h"
++#include "psb_powermgmt.h"
++
++/* MRST defines start */
++uint8_t blc_freq;
++uint8_t blc_minbrightness;
++uint8_t blc_i2caddr;
++uint8_t blc_brightnesscmd;
++int lvds_backlight; /* restore backlight to this value */
++
++u32 CoreClock;
++u32 PWMControlRegFreq;
++
++/**
++ * LVDS I2C backlight control macros
++ */
++#define BRIGHTNESS_MAX_LEVEL 100
++#define BRIGHTNESS_MASK 0xFF
++#define BLC_I2C_TYPE 0x01
++#define BLC_PWM_TYPT 0x02
++
++#define BLC_POLARITY_NORMAL 0
++#define BLC_POLARITY_INVERSE 1
++
++#define PSB_BLC_MAX_PWM_REG_FREQ (0xFFFE)
++#define PSB_BLC_MIN_PWM_REG_FREQ (0x2)
++#define PSB_BLC_PWM_PRECISION_FACTOR (10)
++#define PSB_BACKLIGHT_PWM_CTL_SHIFT (16)
++#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
++
++struct psb_intel_lvds_priv {
++ /**
++ * Saved LVDO output states
++ */
++ uint32_t savePP_ON;
++ uint32_t savePP_OFF;
++ uint32_t saveLVDS;
++ uint32_t savePP_CONTROL;
++ uint32_t savePP_CYCLE;
++ uint32_t savePFIT_CONTROL;
++ uint32_t savePFIT_PGM_RATIOS;
++ uint32_t saveBLC_PWM_CTL;
++};
++
++/* MRST defines end */
++
++/**
++ * Returns the maximum level of the backlight duty cycle field.
++ */
++static u32 psb_intel_lvds_get_max_backlight(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ u32 retVal;
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_ONLY_IF_ON)) {
++ retVal = ((REG_READ(BLC_PWM_CTL) &
++ BACKLIGHT_MODULATION_FREQ_MASK) >>
++ BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ } else
++ retVal = ((dev_priv->saveBLC_PWM_CTL &
++ BACKLIGHT_MODULATION_FREQ_MASK) >>
++ BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
++
++ return retVal;
++}
++
++/**
++ * Set LVDS backlight level by I2C command
++ */
++static int psb_lvds_i2c_set_brightness(struct drm_device *dev,
++ unsigned int level)
++ {
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++
++ struct psb_intel_i2c_chan *lvds_i2c_bus = dev_priv->lvds_i2c_bus;
++ u8 out_buf[2];
++ unsigned int blc_i2c_brightness;
++
++ struct i2c_msg msgs[] = {
++ {
++ .addr = lvds_i2c_bus->slave_addr,
++ .flags = 0,
++ .len = 2,
++ .buf = out_buf,
++ }
++ };
++
++ blc_i2c_brightness = BRIGHTNESS_MASK & ((unsigned int)level *
++ BRIGHTNESS_MASK /
++ BRIGHTNESS_MAX_LEVEL);
++
++ if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
++ blc_i2c_brightness = BRIGHTNESS_MASK - blc_i2c_brightness;
++
++ out_buf[0] = dev_priv->lvds_bl->brightnesscmd;
++ out_buf[1] = (u8)blc_i2c_brightness;
++
++ if (i2c_transfer(&lvds_i2c_bus->adapter, msgs, 1) == 1) {
++ DRM_DEBUG("I2C set brightness.(command, value) (%d, %d)\n",
++ blc_brightnesscmd,
++ blc_i2c_brightness);
++ return 0;
++ }
++
++ DRM_ERROR("I2C transfer error\n");
++ return -1;
++}
++
++
++static int psb_lvds_pwm_set_brightness(struct drm_device *dev, int level)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++
++ u32 max_pwm_blc;
++ u32 blc_pwm_duty_cycle;
++
++ max_pwm_blc = psb_intel_lvds_get_max_backlight(dev);
++
++ /*BLC_PWM_CTL Should be initiated while backlight device init*/
++ BUG_ON((max_pwm_blc & PSB_BLC_MAX_PWM_REG_FREQ) == 0);
++
++ blc_pwm_duty_cycle = level * max_pwm_blc / BRIGHTNESS_MAX_LEVEL;
++
++ if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
++ blc_pwm_duty_cycle = max_pwm_blc - blc_pwm_duty_cycle;
++
++ blc_pwm_duty_cycle &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
++ REG_WRITE(BLC_PWM_CTL,
++ (max_pwm_blc << PSB_BACKLIGHT_PWM_CTL_SHIFT) |
++ (blc_pwm_duty_cycle));
++
++ return 0;
++}
++
++/**
++ * Set LVDS backlight level either by I2C or PWM
++ */
++void psb_intel_lvds_set_brightness(struct drm_device *dev, int level)
++{
++ /*u32 blc_pwm_ctl;*/
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++
++ DRM_DEBUG("backlight level is %d\n", level);
++
++ if (!dev_priv->lvds_bl) {
++ DRM_ERROR("NO LVDS Backlight Info\n");
++ return;
++ }
++
++ if (IS_MRST(dev)) {
++ DRM_ERROR(
++ "psb_intel_lvds_set_brightness called...not expected\n");
++ return;
++ }
++
++ if (dev_priv->lvds_bl->type == BLC_I2C_TYPE)
++ psb_lvds_i2c_set_brightness(dev, level);
++ else
++ psb_lvds_pwm_set_brightness(dev, level);
++}
++
++/**
++ * Sets the backlight level.
++ *
++ * \param level backlight level, from 0 to psb_intel_lvds_get_max_backlight().
++ */
++static void psb_intel_lvds_set_backlight(struct drm_device *dev, int level)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ u32 blc_pwm_ctl;
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_ONLY_IF_ON)) {
++ blc_pwm_ctl =
++ REG_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
++ REG_WRITE(BLC_PWM_CTL,
++ (blc_pwm_ctl |
++ (level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ } else {
++ blc_pwm_ctl = dev_priv->saveBLC_PWM_CTL &
++ ~BACKLIGHT_DUTY_CYCLE_MASK;
++ dev_priv->saveBLC_PWM_CTL = (blc_pwm_ctl |
++ (level << BACKLIGHT_DUTY_CYCLE_SHIFT));
++ }
++}
++
++/**
++ * Sets the power state for the panel.
++ */
++static void psb_intel_lvds_set_power(struct drm_device *dev,
++ struct psb_intel_output *output, bool on)
++{
++ u32 pp_status;
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON))
++ return;
++
++ if (on) {
++ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
++ POWER_TARGET_ON);
++ do {
++ pp_status = REG_READ(PP_STATUS);
++ } while ((pp_status & PP_ON) == 0);
++
++ psb_intel_lvds_set_backlight(dev,
++ output->
++ mode_dev->backlight_duty_cycle);
++ } else {
++ psb_intel_lvds_set_backlight(dev, 0);
++
++ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
++ ~POWER_TARGET_ON);
++ do {
++ pp_status = REG_READ(PP_STATUS);
++ } while (pp_status & PP_ON);
++ }
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++}
++
++static void psb_intel_lvds_encoder_dpms(struct drm_encoder *encoder, int mode)
++{
++ struct drm_device *dev = encoder->dev;
++ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
++
++ if (mode == DRM_MODE_DPMS_ON)
++ psb_intel_lvds_set_power(dev, output, true);
++ else
++ psb_intel_lvds_set_power(dev, output, false);
++
++ /* XXX: We never power down the LVDS pairs. */
++}
++
++static void psb_intel_lvds_save(struct drm_connector *connector)
++{
++ struct drm_device *dev = connector->dev;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++ struct psb_intel_lvds_priv *lvds_priv =
++ (struct psb_intel_lvds_priv *)psb_intel_output->dev_priv;
++
++ if (IS_POULSBO(dev)) {
++ lvds_priv->savePP_ON = REG_READ(LVDSPP_ON);
++ lvds_priv->savePP_OFF = REG_READ(LVDSPP_OFF);
++ lvds_priv->saveLVDS = REG_READ(LVDS);
++ lvds_priv->savePP_CONTROL = REG_READ(PP_CONTROL);
++ lvds_priv->savePP_CYCLE = REG_READ(PP_CYCLE);
++ /*lvds_priv->savePP_DIVISOR = REG_READ(PP_DIVISOR);*/
++ lvds_priv->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
++ lvds_priv->savePFIT_CONTROL = REG_READ(PFIT_CONTROL);
++ lvds_priv->savePFIT_PGM_RATIOS = REG_READ(PFIT_PGM_RATIOS);
++
++ /*TODO: move backlight_duty_cycle to psb_intel_lvds_priv*/
++ dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
++ BACKLIGHT_DUTY_CYCLE_MASK);
++
++ /*
++ * If the light is off at server startup,
++ * just make it full brightness
++ */
++ if (dev_priv->backlight_duty_cycle == 0)
++ dev_priv->backlight_duty_cycle =
++ psb_intel_lvds_get_max_backlight(dev);
++
++ DRM_DEBUG("(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n",
++ lvds_priv->savePP_ON,
++ lvds_priv->savePP_OFF,
++ lvds_priv->saveLVDS,
++ lvds_priv->savePP_CONTROL,
++ lvds_priv->savePP_CYCLE,
++ lvds_priv->saveBLC_PWM_CTL);
++ }
++}
++
++static void psb_intel_lvds_restore(struct drm_connector *connector)
++{
++ struct drm_device *dev = connector->dev;
++ u32 pp_status;
++
++ /*struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;*/
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++ struct psb_intel_lvds_priv *lvds_priv =
++ (struct psb_intel_lvds_priv *)psb_intel_output->dev_priv;
++
++ if (IS_POULSBO(dev)) {
++ DRM_DEBUG("(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n",
++ lvds_priv->savePP_ON,
++ lvds_priv->savePP_OFF,
++ lvds_priv->saveLVDS,
++ lvds_priv->savePP_CONTROL,
++ lvds_priv->savePP_CYCLE,
++ lvds_priv->saveBLC_PWM_CTL);
++
++ REG_WRITE(BLC_PWM_CTL, lvds_priv->saveBLC_PWM_CTL);
++ REG_WRITE(PFIT_CONTROL, lvds_priv->savePFIT_CONTROL);
++ REG_WRITE(PFIT_PGM_RATIOS, lvds_priv->savePFIT_PGM_RATIOS);
++ REG_WRITE(LVDSPP_ON, lvds_priv->savePP_ON);
++ REG_WRITE(LVDSPP_OFF, lvds_priv->savePP_OFF);
++ /*REG_WRITE(PP_DIVISOR, lvds_priv->savePP_DIVISOR);*/
++ REG_WRITE(PP_CYCLE, lvds_priv->savePP_CYCLE);
++ REG_WRITE(PP_CONTROL, lvds_priv->savePP_CONTROL);
++ REG_WRITE(LVDS, lvds_priv->saveLVDS);
++
++ if (lvds_priv->savePP_CONTROL & POWER_TARGET_ON) {
++ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
++ POWER_TARGET_ON);
++ do {
++ pp_status = REG_READ(PP_STATUS);
++ } while ((pp_status & PP_ON) == 0);
++ } else {
++ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
++ ~POWER_TARGET_ON);
++ do {
++ pp_status = REG_READ(PP_STATUS);
++ } while (pp_status & PP_ON);
++ }
++ }
++}
++
++int psb_intel_lvds_mode_valid(struct drm_connector *connector,
++ struct drm_display_mode *mode)
++{
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++ struct drm_display_mode *fixed_mode =
++ psb_intel_output->mode_dev->panel_fixed_mode;
++
++ PSB_DEBUG_ENTRY("\n");
++
++ if (psb_intel_output->type == INTEL_OUTPUT_MIPI2)
++ fixed_mode = psb_intel_output->mode_dev->panel_fixed_mode2;
++
++ /* just in case */
++ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
++ return MODE_NO_DBLESCAN;
++
++ /* just in case */
++ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
++ return MODE_NO_INTERLACE;
++
++ if (fixed_mode) {
++ if (mode->hdisplay > fixed_mode->hdisplay)
++ return MODE_PANEL;
++ if (mode->vdisplay > fixed_mode->vdisplay)
++ return MODE_PANEL;
++ }
++ return MODE_OK;
++}
++
++bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
++ struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode)
++{
++ struct psb_intel_mode_device *mode_dev =
++ enc_to_psb_intel_output(encoder)->mode_dev;
++ struct drm_device *dev = encoder->dev;
++ struct psb_intel_crtc *psb_intel_crtc =
++ to_psb_intel_crtc(encoder->crtc);
++ struct drm_encoder *tmp_encoder;
++ struct drm_display_mode *panel_fixed_mode = mode_dev->panel_fixed_mode;
++ struct psb_intel_output *psb_intel_output = enc_to_psb_intel_output(encoder);
++
++ PSB_DEBUG_ENTRY("type = 0x%x, pipe = %d.\n",psb_intel_output->type, psb_intel_crtc->pipe);
++
++ if (psb_intel_output->type == INTEL_OUTPUT_MIPI2)
++ panel_fixed_mode = mode_dev->panel_fixed_mode2;
++
++ /* Should never happen!! */
++ if (IS_MID(dev) && psb_intel_crtc->pipe == 1) {
++ printk(KERN_ERR
++ "Can't support LVDS/MIPI on pipe B on MRST\n");
++ return false;
++ } else if (!IS_MID(dev) && !IS_I965G(dev)
++ && psb_intel_crtc->pipe == 0) {
++ printk(KERN_ERR "Can't support LVDS on pipe A\n");
++ return false;
++ }
++ /* Should never happen!! */
++ list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list,
++ head) {
++ if (tmp_encoder != encoder
++ && tmp_encoder->crtc == encoder->crtc) {
++ printk(KERN_ERR "Can't enable LVDS and another "
++ "encoder on the same pipe\n");
++ return false;
++ }
++ }
++
++ /*
++ * If we have timings from the BIOS for the panel, put them in
++ * to the adjusted mode. The CRTC will be set up for this mode,
++ * with the panel scaling set up to source from the H/VDisplay
++ * of the original mode.
++ */
++ if (panel_fixed_mode != NULL) {
++ adjusted_mode->hdisplay = panel_fixed_mode->hdisplay;
++ adjusted_mode->hsync_start = panel_fixed_mode->hsync_start;
++ adjusted_mode->hsync_end = panel_fixed_mode->hsync_end;
++ adjusted_mode->htotal = panel_fixed_mode->htotal;
++ adjusted_mode->vdisplay = panel_fixed_mode->vdisplay;
++ adjusted_mode->vsync_start = panel_fixed_mode->vsync_start;
++ adjusted_mode->vsync_end = panel_fixed_mode->vsync_end;
++ adjusted_mode->vtotal = panel_fixed_mode->vtotal;
++ adjusted_mode->clock = panel_fixed_mode->clock;
++ drm_mode_set_crtcinfo(adjusted_mode,
++ CRTC_INTERLACE_HALVE_V);
++ }
++
++ /*
++ * XXX: It would be nice to support lower refresh rates on the
++ * panels to reduce power consumption, and perhaps match the
++ * user's requested refresh rate.
++ */
++
++ return true;
++}
++
++static void psb_intel_lvds_prepare(struct drm_encoder *encoder)
++{
++ struct drm_device *dev = encoder->dev;
++ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
++ struct psb_intel_mode_device *mode_dev = output->mode_dev;
++
++ PSB_DEBUG_ENTRY("\n");
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON))
++ return;
++
++ mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
++ mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
++ BACKLIGHT_DUTY_CYCLE_MASK);
++
++ psb_intel_lvds_set_power(dev, output, false);
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++}
++
++static void psb_intel_lvds_commit(struct drm_encoder *encoder)
++{
++ struct drm_device *dev = encoder->dev;
++ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
++ struct psb_intel_mode_device *mode_dev = output->mode_dev;
++
++ PSB_DEBUG_ENTRY("\n");
++
++ if (mode_dev->backlight_duty_cycle == 0)
++ mode_dev->backlight_duty_cycle =
++ psb_intel_lvds_get_max_backlight(dev);
++
++ psb_intel_lvds_set_power(dev, output, true);
++}
++
++static void psb_intel_lvds_mode_set(struct drm_encoder *encoder,
++ struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode)
++{
++ struct psb_intel_mode_device *mode_dev =
++ enc_to_psb_intel_output(encoder)->mode_dev;
++ struct drm_device *dev = encoder->dev;
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(
++ encoder->crtc);
++ u32 pfit_control;
++
++ /*
++ * The LVDS pin pair will already have been turned on in the
++ * psb_intel_crtc_mode_set since it has a large impact on the DPLL
++ * settings.
++ */
++
++ /*
++ * Enable automatic panel scaling so that non-native modes fill the
++ * screen. Should be enabled before the pipe is enabled, according to
++ * register description and PRM.
++ */
++ if (mode->hdisplay != adjusted_mode->hdisplay ||
++ mode->vdisplay != adjusted_mode->vdisplay)
++ pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE |
++ HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR |
++ HORIZ_INTERP_BILINEAR);
++ else
++ pfit_control = 0;
++
++ if (!IS_I965G(dev)) {
++ if (mode_dev->panel_wants_dither)
++ pfit_control |= PANEL_8TO6_DITHER_ENABLE;
++ } else
++ pfit_control |= psb_intel_crtc->pipe << PFIT_PIPE_SHIFT;
++
++ REG_WRITE(PFIT_CONTROL, pfit_control);
++}
++
++/**
++ * Detect the LVDS connection.
++ *
++ * This always returns CONNECTOR_STATUS_CONNECTED.
++ * This connector should only have
++ * been set up if the LVDS was actually connected anyway.
++ */
++static enum drm_connector_status psb_intel_lvds_detect(struct drm_connector
++ *connector)
++{
++ return connector_status_connected;
++}
++
++/**
++ * Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
++ */
++static int psb_intel_lvds_get_modes(struct drm_connector *connector)
++{
++ struct drm_device *dev = connector->dev;
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++ struct psb_intel_mode_device *mode_dev =
++ psb_intel_output->mode_dev;
++ int ret = 0;
++
++ if (!IS_MRST(dev))
++ ret = psb_intel_ddc_get_modes(psb_intel_output);
++
++ if (ret)
++ return ret;
++
++ /* Didn't get an EDID, so
++ * Set wide sync ranges so we get all modes
++ * handed to valid_mode for checking
++ */
++ connector->display_info.min_vfreq = 0;
++ connector->display_info.max_vfreq = 200;
++ connector->display_info.min_hfreq = 0;
++ connector->display_info.max_hfreq = 200;
++
++ if (mode_dev->panel_fixed_mode != NULL) {
++ struct drm_display_mode *mode =
++ drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
++ drm_mode_probed_add(connector, mode);
++ return 1;
++ }
++
++ return 0;
++}
++
++/**
++ * psb_intel_lvds_destroy - unregister and free LVDS structures
++ * @connector: connector to free
++ *
++ * Unregister the DDC bus for this connector then free the driver private
++ * structure.
++ */
++void psb_intel_lvds_destroy(struct drm_connector *connector)
++{
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++
++ if (psb_intel_output->ddc_bus)
++ psb_intel_i2c_destroy(psb_intel_output->ddc_bus);
++ drm_sysfs_connector_remove(connector);
++ drm_connector_cleanup(connector);
++ kfree(connector);
++}
++
++int psb_intel_lvds_set_property(struct drm_connector *connector,
++ struct drm_property *property,
++ uint64_t value)
++{
++ struct drm_encoder *pEncoder = connector->encoder;
++
++ PSB_DEBUG_ENTRY("\n");
++
++#if 0
++ PSB_DEBUG_ENTRY("connector info, type = %d, type_id=%d, base=0x%x, base.id=0x%x. \n", connector->connector_type, connector->connector_type_id, connector->base, connector->base.id);
++ PSB_DEBUG_ENTRY("encoder info, base.id=%d, encoder_type=%d, dev=0x%x, base=0x%x, possible_clones=0x%x. \n", pEncoder->base.id, pEncoder->encoder_type, pEncoder->dev, pEncoder->base, pEncoder->possible_clones);
++ PSB_DEBUG_ENTRY("encoder info, possible_crtcs=0x%x, crtc=0x%x. \n", pEncoder->possible_crtcs, pEncoder->crtc);
++#endif
++
++ if (!strcmp(property->name, "scaling mode") && pEncoder) {
++ struct psb_intel_crtc *pPsbCrtc =
++ to_psb_intel_crtc(pEncoder->crtc);
++ uint64_t curValue;
++
++ PSB_DEBUG_ENTRY("scaling mode \n");
++
++ if (!pPsbCrtc)
++ goto set_prop_error;
++
++ switch (value) {
++ case DRM_MODE_SCALE_FULLSCREEN:
++ break;
++ case DRM_MODE_SCALE_NO_SCALE:
++ break;
++ case DRM_MODE_SCALE_ASPECT:
++ break;
++ default:
++ goto set_prop_error;
++ }
++
++ if (drm_connector_property_get_value(connector,
++ property,
++ &curValue))
++ goto set_prop_error;
++
++ if (curValue == value)
++ goto set_prop_done;
++
++ if (drm_connector_property_set_value(connector,
++ property,
++ value))
++ goto set_prop_error;
++
++ if (pPsbCrtc->saved_mode.hdisplay != 0 &&
++ pPsbCrtc->saved_mode.vdisplay != 0) {
++ if (!drm_crtc_helper_set_mode(pEncoder->crtc,
++ &pPsbCrtc->saved_mode,
++ pEncoder->crtc->x,
++ pEncoder->crtc->y,
++ pEncoder->crtc->fb))
++ goto set_prop_error;
++ }
++ } else if (!strcmp(property->name, "backlight") && pEncoder) {
++ PSB_DEBUG_ENTRY("backlight \n");
++
++ if (drm_connector_property_set_value(connector,
++ property,
++ value))
++ goto set_prop_error;
++ else {
++#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
++ struct backlight_device bd;
++ bd.props.brightness = value;
++ psb_set_brightness(&bd);
++#endif
++ }
++ } else if (!strcmp(property->name, "DPMS") && pEncoder) {
++ struct drm_encoder_helper_funcs *pEncHFuncs = pEncoder->helper_private;
++ /*struct drm_crtc_helper_funcs *pCrtcHFuncs = pEncoder->crtc->helper_private;*/
++ PSB_DEBUG_ENTRY("DPMS \n");
++ pEncHFuncs->dpms(pEncoder, value);
++ /*pCrtcHFuncs->dpms(pEncoder->crtc, value);*/
++ }
++
++set_prop_done:
++ return 0;
++set_prop_error:
++ return -1;
++}
++
++static const struct drm_encoder_helper_funcs psb_intel_lvds_helper_funcs = {
++ .dpms = psb_intel_lvds_encoder_dpms,
++ .mode_fixup = psb_intel_lvds_mode_fixup,
++ .prepare = psb_intel_lvds_prepare,
++ .mode_set = psb_intel_lvds_mode_set,
++ .commit = psb_intel_lvds_commit,
++};
++
++static const struct drm_connector_helper_funcs
++ psb_intel_lvds_connector_helper_funcs = {
++ .get_modes = psb_intel_lvds_get_modes,
++ .mode_valid = psb_intel_lvds_mode_valid,
++ .best_encoder = psb_intel_best_encoder,
++};
++
++static const struct drm_connector_funcs psb_intel_lvds_connector_funcs = {
++ .dpms = drm_helper_connector_dpms,
++ .save = psb_intel_lvds_save,
++ .restore = psb_intel_lvds_restore,
++ .detect = psb_intel_lvds_detect,
++ .fill_modes = drm_helper_probe_single_connector_modes,
++ .set_property = psb_intel_lvds_set_property,
++ .destroy = psb_intel_lvds_destroy,
++};
++
++
++static void psb_intel_lvds_enc_destroy(struct drm_encoder *encoder)
++{
++ drm_encoder_cleanup(encoder);
++}
++
++const struct drm_encoder_funcs psb_intel_lvds_enc_funcs = {
++ .destroy = psb_intel_lvds_enc_destroy,
++};
++
++
++
++/**
++ * psb_intel_lvds_init - setup LVDS connectors on this device
++ * @dev: drm device
++ *
++ * Create the connector, register the LVDS DDC bus, and try to figure out what
++ * modes we can display on the LVDS panel (if present).
++ */
++void psb_intel_lvds_init(struct drm_device *dev,
++ struct psb_intel_mode_device *mode_dev)
++{
++ struct psb_intel_output *psb_intel_output;
++ struct psb_intel_lvds_priv *lvds_priv;
++ struct drm_connector *connector;
++ struct drm_encoder *encoder;
++ struct drm_display_mode *scan; /* *modes, *bios_mode; */
++ struct drm_crtc *crtc;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ u32 lvds;
++ int pipe;
++
++ psb_intel_output = kzalloc(sizeof(struct psb_intel_output), GFP_KERNEL);
++ if (!psb_intel_output)
++ return;
++
++ lvds_priv = kzalloc(sizeof(struct psb_intel_lvds_priv), GFP_KERNEL);
++ if (!lvds_priv) {
++ kfree(psb_intel_output);
++ DRM_DEBUG("LVDS private allocation error\n");
++ return;
++ }
++
++ psb_intel_output->dev_priv = lvds_priv;
++
++ psb_intel_output->mode_dev = mode_dev;
++ connector = &psb_intel_output->base;
++ encoder = &psb_intel_output->enc;
++ drm_connector_init(dev, &psb_intel_output->base,
++ &psb_intel_lvds_connector_funcs,
++ DRM_MODE_CONNECTOR_LVDS);
++
++ drm_encoder_init(dev, &psb_intel_output->enc,
++ &psb_intel_lvds_enc_funcs,
++ DRM_MODE_ENCODER_LVDS);
++
++ drm_mode_connector_attach_encoder(&psb_intel_output->base,
++ &psb_intel_output->enc);
++ psb_intel_output->type = INTEL_OUTPUT_LVDS;
++
++ drm_encoder_helper_add(encoder, &psb_intel_lvds_helper_funcs);
++ drm_connector_helper_add(connector,
++ &psb_intel_lvds_connector_helper_funcs);
++ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
++ connector->interlace_allowed = false;
++ connector->doublescan_allowed = false;
++
++ /*Attach connector properties*/
++ drm_connector_attach_property(connector,
++ dev->mode_config.scaling_mode_property,
++ DRM_MODE_SCALE_FULLSCREEN);
++ drm_connector_attach_property(connector,
++ dev_priv->backlight_property,
++ BRIGHTNESS_MAX_LEVEL);
++
++ /**
++ * Set up I2C bus
++ * FIXME: distroy i2c_bus when exit
++ */
++ psb_intel_output->i2c_bus = psb_intel_i2c_create(dev,
++ GPIOB,
++ "LVDSBLC_B");
++ if (!psb_intel_output->i2c_bus) {
++ dev_printk(KERN_ERR,
++ &dev->pdev->dev, "I2C bus registration failed.\n");
++ goto failed_blc_i2c;
++ }
++ psb_intel_output->i2c_bus->slave_addr = 0x2C;
++ dev_priv->lvds_i2c_bus = psb_intel_output->i2c_bus;
++
++ /*
++ * LVDS discovery:
++ * 1) check for EDID on DDC
++ * 2) check for VBT data
++ * 3) check to see if LVDS is already on
++ * if none of the above, no panel
++ * 4) make sure lid is open
++ * if closed, act like it's not there for now
++ */
++
++ /* Set up the DDC bus. */
++ psb_intel_output->ddc_bus = psb_intel_i2c_create(dev,
++ GPIOC,
++ "LVDSDDC_C");
++ if (!psb_intel_output->ddc_bus) {
++ dev_printk(KERN_ERR, &dev->pdev->dev,
++ "DDC bus registration " "failed.\n");
++ goto failed_ddc;
++ }
++
++ /*
++ * Attempt to get the fixed panel mode from DDC. Assume that the
++ * preferred mode is the right one.
++ */
++ psb_intel_ddc_get_modes(psb_intel_output);
++ list_for_each_entry(scan, &connector->probed_modes, head) {
++ if (scan->type & DRM_MODE_TYPE_PREFERRED) {
++ mode_dev->panel_fixed_mode =
++ drm_mode_duplicate(dev, scan);
++ goto out; /* FIXME: check for quirks */
++ }
++ }
++
++ /* Failed to get EDID, what about VBT? do we need this?*/
++ if (mode_dev->vbt_mode)
++ mode_dev->panel_fixed_mode =
++ drm_mode_duplicate(dev, mode_dev->vbt_mode);
++
++ if (!mode_dev->panel_fixed_mode)
++ if (dev_priv->lfp_lvds_vbt_mode)
++ mode_dev->panel_fixed_mode =
++ drm_mode_duplicate(dev,
++ dev_priv->lfp_lvds_vbt_mode);
++
++ /*
++ * If we didn't get EDID, try checking if the panel is already turned
++ * on. If so, assume that whatever is currently programmed is the
++ * correct mode.
++ */
++ lvds = REG_READ(LVDS);
++ pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
++ crtc = psb_intel_get_crtc_from_pipe(dev, pipe);
++
++ if (crtc && (lvds & LVDS_PORT_EN)) {
++ mode_dev->panel_fixed_mode =
++ psb_intel_crtc_mode_get(dev, crtc);
++ if (mode_dev->panel_fixed_mode) {
++ mode_dev->panel_fixed_mode->type |=
++ DRM_MODE_TYPE_PREFERRED;
++ goto out; /* FIXME: check for quirks */
++ }
++ }
++
++ /* If we still don't have a mode after all that, give up. */
++ if (!mode_dev->panel_fixed_mode) {
++ DRM_DEBUG
++ ("Found no modes on the lvds, ignoring the LVDS\n");
++ goto failed_find;
++ }
++
++ /* FIXME: detect aopen & mac mini type stuff automatically? */
++ /*
++ * Blacklist machines with BIOSes that list an LVDS panel without
++ * actually having one.
++ */
++ if (IS_I945GM(dev)) {
++ /* aopen mini pc */
++ if (dev->pdev->subsystem_vendor == 0xa0a0) {
++ DRM_DEBUG
++ ("Suspected AOpen Mini PC, ignoring the LVDS\n");
++ goto failed_find;
++ }
++
++ if ((dev->pdev->subsystem_vendor == 0x8086) &&
++ (dev->pdev->subsystem_device == 0x7270)) {
++ /* It's a Mac Mini or Macbook Pro. */
++
++ if (mode_dev->panel_fixed_mode != NULL &&
++ mode_dev->panel_fixed_mode->hdisplay == 800 &&
++ mode_dev->panel_fixed_mode->vdisplay == 600) {
++ DRM_DEBUG
++ ("Suspected Mac Mini, ignoring the LVDS\n");
++ goto failed_find;
++ }
++ }
++ }
++
++out:
++ drm_sysfs_connector_add(connector);
++
++ PSB_DEBUG_ENTRY("hdisplay = %d\n",
++ mode_dev->panel_fixed_mode->hdisplay);
++ PSB_DEBUG_ENTRY(" vdisplay = %d\n",
++ mode_dev->panel_fixed_mode->vdisplay);
++ PSB_DEBUG_ENTRY(" hsync_start = %d\n",
++ mode_dev->panel_fixed_mode->hsync_start);
++ PSB_DEBUG_ENTRY(" hsync_end = %d\n",
++ mode_dev->panel_fixed_mode->hsync_end);
++ PSB_DEBUG_ENTRY(" htotal = %d\n",
++ mode_dev->panel_fixed_mode->htotal);
++ PSB_DEBUG_ENTRY(" vsync_start = %d\n",
++ mode_dev->panel_fixed_mode->vsync_start);
++ PSB_DEBUG_ENTRY(" vsync_end = %d\n",
++ mode_dev->panel_fixed_mode->vsync_end);
++ PSB_DEBUG_ENTRY(" vtotal = %d\n",
++ mode_dev->panel_fixed_mode->vtotal);
++ PSB_DEBUG_ENTRY(" clock = %d\n",
++ mode_dev->panel_fixed_mode->clock);
++
++ return;
++
++failed_find:
++ if (psb_intel_output->ddc_bus)
++ psb_intel_i2c_destroy(psb_intel_output->ddc_bus);
++failed_ddc:
++ if (psb_intel_output->i2c_bus)
++ psb_intel_i2c_destroy(psb_intel_output->i2c_bus);
++failed_blc_i2c:
++ drm_encoder_cleanup(encoder);
++ drm_connector_cleanup(connector);
++ kfree(connector);
++}
++
++/* MRST platform start */
++
++/*
++ * FIXME need to move to register define head file
++ */
++#define MRST_BACKLIGHT_MODULATION_FREQ_SHIFT (16)
++#define MRST_BACKLIGHT_MODULATION_FREQ_MASK (0xffff << 16)
++
++/* The max/min PWM frequency in BPCR[31:17] - */
++/* The smallest number is 1 (not 0) that can fit in the
++ * 15-bit field of the and then*/
++/* shifts to the left by one bit to get the actual 16-bit
++ * value that the 15-bits correspond to.*/
++#define MRST_BLC_MAX_PWM_REG_FREQ 0xFFFF
++
++#define BRIGHTNESS_MAX_LEVEL 100
++#define BLC_PWM_PRECISION_FACTOR 10 /* 10000000 */
++#define BLC_PWM_FREQ_CALC_CONSTANT 32
++#define MHz 1000000
++#define BLC_POLARITY_NORMAL 0
++#define BLC_POLARITY_INVERSE 1
++
++/**
++ * Calculate PWM control register value.
++ */
++#if 0
++static bool mrstLVDSCalculatePWMCtrlRegFreq(struct drm_device *dev)
++{
++ unsigned long value = 0;
++ if (blc_freq == 0) {
++ /* DRM_ERROR(KERN_ERR "mrstLVDSCalculatePWMCtrlRegFreq:
++ * Frequency Requested is 0.\n"); */
++ return false;
++ }
++
++ value = (CoreClock * MHz);
++ value = (value / BLC_PWM_FREQ_CALC_CONSTANT);
++ value = (value * BLC_PWM_PRECISION_FACTOR);
++ value = (value / blc_freq);
++ value = (value / BLC_PWM_PRECISION_FACTOR);
++
++ if (value > (unsigned long) MRST_BLC_MAX_PWM_REG_FREQ) {
++ return 0;
++ } else {
++ PWMControlRegFreq = (u32) value;
++ return 1;
++ }
++}
++#endif
++/**
++ * Sets the power state for the panel.
++ */
++static void mrst_lvds_set_power(struct drm_device *dev,
++ struct psb_intel_output *output, bool on)
++{
++ u32 pp_status;
++
++ PSB_DEBUG_ENTRY("\n");
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON))
++ return;
++
++ if (on) {
++ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
++ POWER_TARGET_ON);
++ do {
++ pp_status = REG_READ(PP_STATUS);
++ } while ((pp_status & (PP_ON | PP_READY)) == PP_READY);
++ } else {
++ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
++ ~POWER_TARGET_ON);
++ do {
++ pp_status = REG_READ(PP_STATUS);
++ } while (pp_status & PP_ON);
++ }
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++}
++
++static void mrst_lvds_dpms(struct drm_encoder *encoder, int mode)
++{
++ struct drm_device *dev = encoder->dev;
++ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
++
++ PSB_DEBUG_ENTRY("\n");
++
++ if (mode == DRM_MODE_DPMS_ON)
++ mrst_lvds_set_power(dev, output, true);
++ else
++ mrst_lvds_set_power(dev, output, false);
++
++ /* XXX: We never power down the LVDS pairs. */
++}
++
++static void mrst_lvds_mode_set(struct drm_encoder *encoder,
++ struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode)
++{
++ struct psb_intel_mode_device *mode_dev =
++ enc_to_psb_intel_output(encoder)->mode_dev;
++ struct drm_device *dev = encoder->dev;
++ u32 lvds_port;
++ uint64_t curValue = DRM_MODE_SCALE_FULLSCREEN;
++
++ PSB_DEBUG_ENTRY("\n");
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON))
++ return;
++
++ /*
++ * The LVDS pin pair will already have been turned on in the
++ * psb_intel_crtc_mode_set since it has a large impact on the DPLL
++ * settings.
++ */
++ /*FIXME JLIU7 Get panel power delay parameters from config data */
++ /*REG_WRITE(0x61208, 0x25807d0); */
++ /*REG_WRITE(0x6120c, 0x1f407d0); */
++ /*REG_WRITE(0x61210, 0x270f04); */
++
++ lvds_port = (REG_READ(LVDS) &
++ (~LVDS_PIPEB_SELECT)) |
++ LVDS_PORT_EN |
++ LVDS_BORDER_EN;
++
++ if (mode_dev->panel_wants_dither)
++ lvds_port |= MRST_PANEL_8TO6_DITHER_ENABLE;
++
++ REG_WRITE(LVDS, lvds_port);
++
++ drm_connector_property_get_value(
++ &enc_to_psb_intel_output(encoder)->base,
++ dev->mode_config.scaling_mode_property,
++ &curValue);
++
++ if (curValue == DRM_MODE_SCALE_NO_SCALE)
++ REG_WRITE(PFIT_CONTROL, 0);
++ else if (curValue == DRM_MODE_SCALE_ASPECT) {
++ if ((mode->vdisplay != adjusted_mode->crtc_vdisplay) ||
++ (mode->hdisplay != adjusted_mode->crtc_hdisplay)) {
++ if ((adjusted_mode->crtc_hdisplay * mode->vdisplay) ==
++ (mode->hdisplay * adjusted_mode->crtc_vdisplay))
++ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
++ else if ((adjusted_mode->crtc_hdisplay *
++ mode->vdisplay) > (mode->hdisplay *
++ adjusted_mode->crtc_vdisplay))
++ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE |
++ PFIT_SCALING_MODE_PILLARBOX);
++ else
++ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE |
++ PFIT_SCALING_MODE_LETTERBOX);
++ } else
++ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
++ } else /*(curValue == DRM_MODE_SCALE_FULLSCREEN)*/
++ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++}
++
++
++static const struct drm_encoder_helper_funcs mrst_lvds_helper_funcs = {
++ .dpms = mrst_lvds_dpms,
++ .mode_fixup = psb_intel_lvds_mode_fixup,
++ .prepare = psb_intel_lvds_prepare,
++ .mode_set = mrst_lvds_mode_set,
++ .commit = psb_intel_lvds_commit,
++};
++
++/** Returns the panel fixed mode from configuration. */
++/** FIXME JLIU7 need to revist it. */
++struct drm_display_mode *mrst_lvds_get_configuration_mode(struct drm_device
++ *dev)
++{
++ struct drm_display_mode *mode;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ struct mrst_timing_info *ti = &dev_priv->gct_data.DTD;
++
++ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
++ if (!mode)
++ return NULL;
++
++ if (dev_priv->vbt_data.Size != 0x00) { /*if non-zero, then use vbt*/
++
++ mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo;
++ mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo;
++ mode->hsync_start = mode->hdisplay + \
++ ((ti->hsync_offset_hi << 8) | \
++ ti->hsync_offset_lo);
++ mode->hsync_end = mode->hsync_start + \
++ ((ti->hsync_pulse_width_hi << 8) | \
++ ti->hsync_pulse_width_lo);
++ mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) | \
++ ti->hblank_lo);
++ mode->vsync_start = \
++ mode->vdisplay + ((ti->vsync_offset_hi << 4) | \
++ ti->vsync_offset_lo);
++ mode->vsync_end = \
++ mode->vsync_start + ((ti->vsync_pulse_width_hi << 4) | \
++ ti->vsync_pulse_width_lo);
++ mode->vtotal = mode->vdisplay + \
++ ((ti->vblank_hi << 8) | ti->vblank_lo);
++ mode->clock = ti->pixel_clock * 10;
++#if 0
++ printk(KERN_INFO "hdisplay is %d\n", mode->hdisplay);
++ printk(KERN_INFO "vdisplay is %d\n", mode->vdisplay);
++ printk(KERN_INFO "HSS is %d\n", mode->hsync_start);
++ printk(KERN_INFO "HSE is %d\n", mode->hsync_end);
++ printk(KERN_INFO "htotal is %d\n", mode->htotal);
++ printk(KERN_INFO "VSS is %d\n", mode->vsync_start);
++ printk(KERN_INFO "VSE is %d\n", mode->vsync_end);
++ printk(KERN_INFO "vtotal is %d\n", mode->vtotal);
++ printk(KERN_INFO "clock is %d\n", mode->clock);
++#endif
++ } else {
++
++#if 0 /*FIXME jliu7 remove it later */
++ /* hard coded fixed mode for TPO LTPS LPJ040K001A */
++ mode->hdisplay = 800;
++ mode->vdisplay = 480;
++ mode->hsync_start = 836;
++ mode->hsync_end = 846;
++ mode->htotal = 1056;
++ mode->vsync_start = 489;
++ mode->vsync_end = 491;
++ mode->vtotal = 525;
++ mode->clock = 33264;
++#endif /*FIXME jliu7 remove it later */
++
++#if 0 /*FIXME jliu7 remove it later */
++ /* hard coded fixed mode for LVDS 800x480 */
++ mode->hdisplay = 800;
++ mode->vdisplay = 480;
++ mode->hsync_start = 801;
++ mode->hsync_end = 802;
++ mode->htotal = 1024;
++ mode->vsync_start = 481;
++ mode->vsync_end = 482;
++ mode->vtotal = 525;
++ mode->clock = 30994;
++#endif /*FIXME jliu7 remove it later */
++
++#if 1 /*FIXME jliu7 remove it later, jliu7 modify it according to the spec*/
++ /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
++ mode->hdisplay = 1024;
++ mode->vdisplay = 600;
++ mode->hsync_start = 1072;
++ mode->hsync_end = 1104;
++ mode->htotal = 1184;
++ mode->vsync_start = 603;
++ mode->vsync_end = 604;
++ mode->vtotal = 608;
++ mode->clock = 53990;
++#endif /*FIXME jliu7 remove it later */
++
++#if 0 /*FIXME jliu7 remove it, it is copied from SBIOS */
++ /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
++ mode->hdisplay = 1024;
++ mode->vdisplay = 600;
++ mode->hsync_start = 1104;
++ mode->hsync_end = 1136;
++ mode->htotal = 1184;
++ mode->vsync_start = 603;
++ mode->vsync_end = 604;
++ mode->vtotal = 608;
++ mode->clock = 53990;
++#endif /*FIXME jliu7 remove it later */
++
++#if 0 /*FIXME jliu7 remove it later */
++ /* hard coded fixed mode for Sharp wsvga LVDS 1024x600 */
++ mode->hdisplay = 1024;
++ mode->vdisplay = 600;
++ mode->hsync_start = 1124;
++ mode->hsync_end = 1204;
++ mode->htotal = 1312;
++ mode->vsync_start = 607;
++ mode->vsync_end = 610;
++ mode->vtotal = 621;
++ mode->clock = 48885;
++#endif /*FIXME jliu7 remove it later */
++
++#if 0 /*FIXME jliu7 remove it later */
++ /* hard coded fixed mode for LVDS 1024x768 */
++ mode->hdisplay = 1024;
++ mode->vdisplay = 768;
++ mode->hsync_start = 1048;
++ mode->hsync_end = 1184;
++ mode->htotal = 1344;
++ mode->vsync_start = 771;
++ mode->vsync_end = 777;
++ mode->vtotal = 806;
++ mode->clock = 65000;
++#endif /*FIXME jliu7 remove it later */
++
++#if 0 /*FIXME jliu7 remove it later */
++ /* hard coded fixed mode for LVDS 1366x768 */
++ mode->hdisplay = 1366;
++ mode->vdisplay = 768;
++ mode->hsync_start = 1430;
++ mode->hsync_end = 1558;
++ mode->htotal = 1664;
++ mode->vsync_start = 769;
++ mode->vsync_end = 770;
++ mode->vtotal = 776;
++ mode->clock = 77500;
++#endif /*FIXME jliu7 remove it later */
++ }
++ drm_mode_set_name(mode);
++ drm_mode_set_crtcinfo(mode, 0);
++
++ return mode;
++}
++
++/**
++ * mrst_lvds_init - setup LVDS connectors on this device
++ * @dev: drm device
++ *
++ * Create the connector, register the LVDS DDC bus, and try to figure out what
++ * modes we can display on the LVDS panel (if present).
++ */
++void mrst_lvds_init(struct drm_device *dev,
++ struct psb_intel_mode_device *mode_dev)
++{
++ struct psb_intel_output *psb_intel_output;
++ struct drm_connector *connector;
++ struct drm_encoder *encoder;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ struct edid *edid;
++ int ret = 0;
++ struct i2c_adapter *i2c_adap;
++ struct drm_display_mode *scan; /* *modes, *bios_mode; */
++
++ PSB_DEBUG_ENTRY("\n");
++
++ psb_intel_output = kzalloc(sizeof(struct psb_intel_output), GFP_KERNEL);
++ if (!psb_intel_output)
++ return;
++
++ psb_intel_output->mode_dev = mode_dev;
++ connector = &psb_intel_output->base;
++ encoder = &psb_intel_output->enc;
++ drm_connector_init(dev, &psb_intel_output->base,
++ &psb_intel_lvds_connector_funcs,
++ DRM_MODE_CONNECTOR_LVDS);
++
++ drm_encoder_init(dev, &psb_intel_output->enc, &psb_intel_lvds_enc_funcs,
++ DRM_MODE_ENCODER_LVDS);
++
++ drm_mode_connector_attach_encoder(&psb_intel_output->base,
++ &psb_intel_output->enc);
++ psb_intel_output->type = INTEL_OUTPUT_LVDS;
++
++ drm_encoder_helper_add(encoder, &mrst_lvds_helper_funcs);
++ drm_connector_helper_add(connector,
++ &psb_intel_lvds_connector_helper_funcs);
++ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
++ connector->interlace_allowed = false;
++ connector->doublescan_allowed = false;
++
++ drm_connector_attach_property(connector,
++ dev->mode_config.scaling_mode_property,
++ DRM_MODE_SCALE_FULLSCREEN);
++ drm_connector_attach_property(connector,
++ dev_priv->backlight_property,
++ BRIGHTNESS_MAX_LEVEL);
++
++ lvds_backlight = BRIGHTNESS_MAX_LEVEL;
++
++ mode_dev->panel_wants_dither = false;
++ if (dev_priv->vbt_data.Size != 0x00)
++ mode_dev->panel_wants_dither = (dev_priv->gct_data.Panel_Port_Control & MRST_PANEL_8TO6_DITHER_ENABLE);
++
++ /*
++ * LVDS discovery:
++ * 1) check for EDID on DDC
++ * 2) check for VBT data
++ * 3) check to see if LVDS is already on
++ * if none of the above, no panel
++ * 4) make sure lid is open
++ * if closed, act like it's not there for now
++ */
++ i2c_adap = i2c_get_adapter(2);
++ if (i2c_adap == NULL)
++ printk(KERN_ALERT "No ddc adapter available!\n");
++ /* Set up the DDC bus. */
++/* psb_intel_output->ddc_bus = psb_intel_i2c_create(dev,
++ GPIOC,
++ "LVDSDDC_C");
++ if (!psb_intel_output->ddc_bus) {
++ dev_printk(KERN_ERR, &dev->pdev->dev,
++ "DDC bus registration " "failed.\n");
++ goto failed_ddc;
++ }*/
++
++ /*
++ * Attempt to get the fixed panel mode from DDC. Assume that the
++ * preferred mode is the right one.
++ */
++ if (i2c_adap) {
++ edid = drm_get_edid(connector, i2c_adap);
++ if (edid) {
++ drm_mode_connector_update_edid_property(connector, edid);
++ ret = drm_add_edid_modes(connector, edid);
++ kfree(edid);
++ }
++
++ list_for_each_entry(scan, &connector->probed_modes, head) {
++ if (scan->type & DRM_MODE_TYPE_PREFERRED) {
++ mode_dev->panel_fixed_mode =
++ drm_mode_duplicate(dev, scan);
++ goto out; /* FIXME: check for quirks */
++ }
++ }
++ }
++
++ /*
++ * If we didn't get EDID, try geting panel timing
++ * from configuration data
++ */
++ mode_dev->panel_fixed_mode = mrst_lvds_get_configuration_mode(dev);
++
++ if (mode_dev->panel_fixed_mode) {
++ mode_dev->panel_fixed_mode->type |=
++ DRM_MODE_TYPE_PREFERRED;
++ goto out; /* FIXME: check for quirks */
++ }
++
++ /* If we still don't have a mode after all that, give up. */
++ if (!mode_dev->panel_fixed_mode) {
++ DRM_DEBUG
++ ("Found no modes on the lvds, ignoring the LVDS\n");
++ goto failed_find;
++ }
++
++out:
++ drm_sysfs_connector_add(connector);
++ return;
++
++failed_find:
++ DRM_DEBUG("No LVDS modes found, disabling.\n");
++ if (psb_intel_output->ddc_bus)
++ psb_intel_i2c_destroy(psb_intel_output->ddc_bus);
++
++/* failed_ddc: */
++
++ drm_encoder_cleanup(encoder);
++ drm_connector_cleanup(connector);
++ kfree(connector);
++}
++
++/* MRST platform end */
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_intel_modes.c
+@@ -0,0 +1,77 @@
++/*
++ * Copyright (c) 2007 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authers: Jesse Barnes <jesse.barnes@intel.com>
++ */
++
++#include <linux/i2c.h>
++#include <linux/fb.h>
++#include <drm/drmP.h>
++#include "psb_intel_drv.h"
++
++/**
++ * psb_intel_ddc_probe
++ *
++ */
++bool psb_intel_ddc_probe(struct psb_intel_output *psb_intel_output)
++{
++ u8 out_buf[] = { 0x0, 0x0 };
++ u8 buf[2];
++ int ret;
++ struct i2c_msg msgs[] = {
++ {
++ .addr = 0x50,
++ .flags = 0,
++ .len = 1,
++ .buf = out_buf,
++ },
++ {
++ .addr = 0x50,
++ .flags = I2C_M_RD,
++ .len = 1,
++ .buf = buf,
++ }
++ };
++
++ ret = i2c_transfer(&psb_intel_output->ddc_bus->adapter, msgs, 2);
++ if (ret == 2)
++ return true;
++
++ return false;
++}
++
++/**
++ * psb_intel_ddc_get_modes - get modelist from monitor
++ * @connector: DRM connector device to use
++ *
++ * Fetch the EDID information from @connector using the DDC bus.
++ */
++int psb_intel_ddc_get_modes(struct psb_intel_output *psb_intel_output)
++{
++ struct edid *edid;
++ int ret = 0;
++
++ edid =
++ drm_get_edid(&psb_intel_output->base,
++ &psb_intel_output->ddc_bus->adapter);
++ if (edid) {
++ drm_mode_connector_update_edid_property(&psb_intel_output->
++ base, edid);
++ ret = drm_add_edid_modes(&psb_intel_output->base, edid);
++ kfree(edid);
++ }
++ return ret;
++}
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_intel_reg.h
+@@ -0,0 +1,1232 @@
++/*
++ * Copyright (c) 2009, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++#ifndef __PSB_INTEL_REG_H__
++#define __PSB_INTEL_REG_H__
++
++#define BLC_PWM_CTL 0x61254
++#define BLC_PWM_CTL2 0x61250
++#define BLC_PWM_CTL_C 0x62254
++#define BLC_PWM_CTL2_C 0x62250
++#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
++/**
++ * This is the most significant 15 bits of the number of backlight cycles in a
++ * complete cycle of the modulated backlight control.
++ *
++ * The actual value is this field multiplied by two.
++ */
++#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
++#define BLM_LEGACY_MODE (1 << 16)
++/**
++ * This is the number of cycles out of the backlight modulation cycle for which
++ * the backlight is on.
++ *
++ * This field must be no greater than the number of cycles in the complete
++ * backlight modulation cycle.
++ */
++#define BACKLIGHT_DUTY_CYCLE_SHIFT (0)
++#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff)
++
++#define I915_GCFGC 0xf0
++#define I915_LOW_FREQUENCY_ENABLE (1 << 7)
++#define I915_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
++#define I915_DISPLAY_CLOCK_333_MHZ (4 << 4)
++#define I915_DISPLAY_CLOCK_MASK (7 << 4)
++
++#define I855_HPLLCC 0xc0
++#define I855_CLOCK_CONTROL_MASK (3 << 0)
++#define I855_CLOCK_133_200 (0 << 0)
++#define I855_CLOCK_100_200 (1 << 0)
++#define I855_CLOCK_100_133 (2 << 0)
++#define I855_CLOCK_166_250 (3 << 0)
++
++/* I830 CRTC registers */
++#define HTOTAL_A 0x60000
++#define HBLANK_A 0x60004
++#define HSYNC_A 0x60008
++#define VTOTAL_A 0x6000c
++#define VBLANK_A 0x60010
++#define VSYNC_A 0x60014
++#define PIPEASRC 0x6001c
++#define BCLRPAT_A 0x60020
++#define VSYNCSHIFT_A 0x60028
++
++#define HTOTAL_B 0x61000
++#define HBLANK_B 0x61004
++#define HSYNC_B 0x61008
++#define VTOTAL_B 0x6100c
++#define VBLANK_B 0x61010
++#define VSYNC_B 0x61014
++#define PIPEBSRC 0x6101c
++#define BCLRPAT_B 0x61020
++#define VSYNCSHIFT_B 0x61028
++
++#define HTOTAL_C 0x62000
++#define HBLANK_C 0x62004
++#define HSYNC_C 0x62008
++#define VTOTAL_C 0x6200c
++#define VBLANK_C 0x62010
++#define VSYNC_C 0x62014
++#define PIPECSRC 0x6201c
++#define BCLRPAT_C 0x62020
++#define VSYNCSHIFT_C 0x62028
++
++#define PP_STATUS 0x61200
++# define PP_ON (1 << 31)
++/**
++ * Indicates that all dependencies of the panel are on:
++ *
++ * - PLL enabled
++ * - pipe enabled
++ * - LVDS/DVOB/DVOC on
++ */
++# define PP_READY (1 << 30)
++# define PP_SEQUENCE_NONE (0 << 28)
++# define PP_SEQUENCE_ON (1 << 28)
++# define PP_SEQUENCE_OFF (2 << 28)
++# define PP_SEQUENCE_MASK 0x30000000
++#define PP_CONTROL 0x61204
++# define POWER_TARGET_ON (1 << 0)
++
++#define LVDSPP_ON 0x61208
++#define LVDSPP_OFF 0x6120c
++#define PP_CYCLE 0x61210
++
++#define PFIT_CONTROL 0x61230
++# define PFIT_ENABLE (1 << 31)
++# define PFIT_PIPE_MASK (3 << 29)
++# define PFIT_PIPE_SHIFT 29
++# define PFIT_SCALING_MODE_PILLARBOX (1 << 27)
++# define PFIT_SCALING_MODE_LETTERBOX (3 << 26)
++# define VERT_INTERP_DISABLE (0 << 10)
++# define VERT_INTERP_BILINEAR (1 << 10)
++# define VERT_INTERP_MASK (3 << 10)
++# define VERT_AUTO_SCALE (1 << 9)
++# define HORIZ_INTERP_DISABLE (0 << 6)
++# define HORIZ_INTERP_BILINEAR (1 << 6)
++# define HORIZ_INTERP_MASK (3 << 6)
++# define HORIZ_AUTO_SCALE (1 << 5)
++# define PANEL_8TO6_DITHER_ENABLE (1 << 3)
++
++#define PFIT_PGM_RATIOS 0x61234
++# define PFIT_VERT_SCALE_MASK 0xfff00000
++# define PFIT_HORIZ_SCALE_MASK 0x0000fff0
++
++#define PFIT_AUTO_RATIOS 0x61238
++
++
++#define DPLL_A 0x06014
++#define DPLL_B 0x06018
++# define DPLL_VCO_ENABLE (1 << 31)
++# define DPLL_DVO_HIGH_SPEED (1 << 30)
++# define DPLL_SYNCLOCK_ENABLE (1 << 29)
++# define DPLL_VGA_MODE_DIS (1 << 28)
++# define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */
++# define DPLLB_MODE_LVDS (2 << 26) /* i915 */
++# define DPLL_MODE_MASK (3 << 26)
++# define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */
++# define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */
++# define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */
++# define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
++# define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
++# define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
++/**
++ * The i830 generation, in DAC/serial mode, defines p1 as two plus this
++ * bitfield, or just 2 if PLL_P1_DIVIDE_BY_TWO is set.
++ */
++# define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
++/**
++ * The i830 generation, in LVDS mode, defines P1 as the bit number set within
++ * this field (only one bit may be set).
++ */
++# define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
++# define DPLL_FPA01_P1_POST_DIV_SHIFT 16
++# define PLL_P2_DIVIDE_BY_4 (1 << 23) /* i830, required
++ * in DVO non-gang */
++# define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */
++# define PLL_REF_INPUT_DREFCLK (0 << 13)
++# define PLL_REF_INPUT_TVCLKINA (1 << 13) /* i830 */
++# define PLL_REF_INPUT_TVCLKINBC (2 << 13) /* SDVO
++ * TVCLKIN */
++# define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
++# define PLL_REF_INPUT_MASK (3 << 13)
++# define PLL_LOAD_PULSE_PHASE_SHIFT 9
++/*
++ * Parallel to Serial Load Pulse phase selection.
++ * Selects the phase for the 10X DPLL clock for the PCIe
++ * digital display port. The range is 4 to 13; 10 or more
++ * is just a flip delay. The default is 6
++ */
++# define PLL_LOAD_PULSE_PHASE_MASK (0xf << PLL_LOAD_PULSE_PHASE_SHIFT)
++# define DISPLAY_RATE_SELECT_FPA1 (1 << 8)
++
++/**
++ * SDVO multiplier for 945G/GM. Not used on 965.
++ *
++ * \sa DPLL_MD_UDI_MULTIPLIER_MASK
++ */
++# define SDVO_MULTIPLIER_MASK 0x000000ff
++# define SDVO_MULTIPLIER_SHIFT_HIRES 4
++# define SDVO_MULTIPLIER_SHIFT_VGA 0
++
++/** @defgroup DPLL_MD
++ * @{
++ */
++/** Pipe A SDVO/UDI clock multiplier/divider register for G965. */
++#define DPLL_A_MD 0x0601c
++/** Pipe B SDVO/UDI clock multiplier/divider register for G965. */
++#define DPLL_B_MD 0x06020
++/**
++ * UDI pixel divider, controlling how many pixels are stuffed into a packet.
++ *
++ * Value is pixels minus 1. Must be set to 1 pixel for SDVO.
++ */
++# define DPLL_MD_UDI_DIVIDER_MASK 0x3f000000
++# define DPLL_MD_UDI_DIVIDER_SHIFT 24
++/** UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */
++# define DPLL_MD_VGA_UDI_DIVIDER_MASK 0x003f0000
++# define DPLL_MD_VGA_UDI_DIVIDER_SHIFT 16
++/**
++ * SDVO/UDI pixel multiplier.
++ *
++ * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus
++ * clock rate is 10 times the DPLL clock. At low resolution/refresh rate
++ * modes, the bus rate would be below the limits, so SDVO allows for stuffing
++ * dummy bytes in the datastream at an increased clock rate, with both sides of
++ * the link knowing how many bytes are fill.
++ *
++ * So, for a mode with a dotclock of 65Mhz, we would want to double the clock
++ * rate to 130Mhz to get a bus rate of 1.30Ghz. The DPLL clock rate would be
++ * set to 130Mhz, and the SDVO multiplier set to 2x in this register and
++ * through an SDVO command.
++ *
++ * This register field has values of multiplication factor minus 1, with
++ * a maximum multiplier of 5 for SDVO.
++ */
++# define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00
++# define DPLL_MD_UDI_MULTIPLIER_SHIFT 8
++/** SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.
++ * This best be set to the default value (3) or the CRT won't work. No,
++ * I don't entirely understand what this does...
++ */
++# define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
++# define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
++/** @} */
++
++#define DPLL_TEST 0x606c
++# define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
++# define DPLLB_TEST_SDVO_DIV_2 (1 << 22)
++# define DPLLB_TEST_SDVO_DIV_4 (2 << 22)
++# define DPLLB_TEST_SDVO_DIV_MASK (3 << 22)
++# define DPLLB_TEST_N_BYPASS (1 << 19)
++# define DPLLB_TEST_M_BYPASS (1 << 18)
++# define DPLLB_INPUT_BUFFER_ENABLE (1 << 16)
++# define DPLLA_TEST_N_BYPASS (1 << 3)
++# define DPLLA_TEST_M_BYPASS (1 << 2)
++# define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
++
++#define ADPA 0x61100
++#define ADPA_DAC_ENABLE (1<<31)
++#define ADPA_DAC_DISABLE 0
++#define ADPA_PIPE_SELECT_MASK (1<<30)
++#define ADPA_PIPE_A_SELECT 0
++#define ADPA_PIPE_B_SELECT (1<<30)
++#define ADPA_USE_VGA_HVPOLARITY (1<<15)
++#define ADPA_SETS_HVPOLARITY 0
++#define ADPA_VSYNC_CNTL_DISABLE (1<<11)
++#define ADPA_VSYNC_CNTL_ENABLE 0
++#define ADPA_HSYNC_CNTL_DISABLE (1<<10)
++#define ADPA_HSYNC_CNTL_ENABLE 0
++#define ADPA_VSYNC_ACTIVE_HIGH (1<<4)
++#define ADPA_VSYNC_ACTIVE_LOW 0
++#define ADPA_HSYNC_ACTIVE_HIGH (1<<3)
++#define ADPA_HSYNC_ACTIVE_LOW 0
++
++#define FPA0 0x06040
++#define FPA1 0x06044
++#define FPB0 0x06048
++#define FPB1 0x0604c
++# define FP_N_DIV_MASK 0x003f0000
++# define FP_N_DIV_SHIFT 16
++# define FP_M1_DIV_MASK 0x00003f00
++# define FP_M1_DIV_SHIFT 8
++# define FP_M2_DIV_MASK 0x0000003f
++# define FP_M2_DIV_SHIFT 0
++
++
++#define PORT_HOTPLUG_EN 0x61110
++# define SDVOB_HOTPLUG_INT_EN (1 << 26)
++# define SDVOC_HOTPLUG_INT_EN (1 << 25)
++# define TV_HOTPLUG_INT_EN (1 << 18)
++# define CRT_HOTPLUG_INT_EN (1 << 9)
++# define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
++
++#define PORT_HOTPLUG_STAT 0x61114
++# define CRT_HOTPLUG_INT_STATUS (1 << 11)
++# define TV_HOTPLUG_INT_STATUS (1 << 10)
++# define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
++# define CRT_HOTPLUG_MONITOR_COLOR (3 << 8)
++# define CRT_HOTPLUG_MONITOR_MONO (2 << 8)
++# define CRT_HOTPLUG_MONITOR_NONE (0 << 8)
++# define SDVOC_HOTPLUG_INT_STATUS (1 << 7)
++# define SDVOB_HOTPLUG_INT_STATUS (1 << 6)
++
++#define SDVOB 0x61140
++#define SDVOC 0x61160
++#define SDVO_ENABLE (1 << 31)
++#define SDVO_PIPE_B_SELECT (1 << 30)
++#define SDVO_STALL_SELECT (1 << 29)
++#define SDVO_INTERRUPT_ENABLE (1 << 26)
++/**
++ * 915G/GM SDVO pixel multiplier.
++ *
++ * Programmed value is multiplier - 1, up to 5x.
++ *
++ * \sa DPLL_MD_UDI_MULTIPLIER_MASK
++ */
++#define SDVO_PORT_MULTIPLY_MASK (7 << 23)
++#define SDVO_PORT_MULTIPLY_SHIFT 23
++#define SDVO_PHASE_SELECT_MASK (15 << 19)
++#define SDVO_PHASE_SELECT_DEFAULT (6 << 19)
++#define SDVO_CLOCK_OUTPUT_INVERT (1 << 18)
++#define SDVOC_GANG_MODE (1 << 16)
++#define SDVO_BORDER_ENABLE (1 << 7)
++#define SDVOB_PCIE_CONCURRENCY (1 << 3)
++#define SDVO_DETECTED (1 << 2)
++/* Bits to be preserved when writing */
++#define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14))
++#define SDVOC_PRESERVE_MASK (1 << 17)
++
++/** @defgroup LVDS
++ * @{
++ */
++/**
++ * This register controls the LVDS output enable, pipe selection, and data
++ * format selection.
++ *
++ * All of the clock/data pairs are force powered down by power sequencing.
++ */
++#define LVDS 0x61180
++/**
++ * Enables the LVDS port. This bit must be set before DPLLs are enabled, as
++ * the DPLL semantics change when the LVDS is assigned to that pipe.
++ */
++# define LVDS_PORT_EN (1 << 31)
++/** Selects pipe B for LVDS data. Must be set on pre-965. */
++# define LVDS_PIPEB_SELECT (1 << 30)
++
++/** Turns on border drawing to allow centered display. */
++# define LVDS_BORDER_EN (1 << 15)
++
++/**
++ * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
++ * pixel.
++ */
++# define LVDS_A0A2_CLKA_POWER_MASK (3 << 8)
++# define LVDS_A0A2_CLKA_POWER_DOWN (0 << 8)
++# define LVDS_A0A2_CLKA_POWER_UP (3 << 8)
++/**
++ * Controls the A3 data pair, which contains the additional LSBs for 24 bit
++ * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be
++ * on.
++ */
++# define LVDS_A3_POWER_MASK (3 << 6)
++# define LVDS_A3_POWER_DOWN (0 << 6)
++# define LVDS_A3_POWER_UP (3 << 6)
++/**
++ * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP
++ * is set.
++ */
++# define LVDS_CLKB_POWER_MASK (3 << 4)
++# define LVDS_CLKB_POWER_DOWN (0 << 4)
++# define LVDS_CLKB_POWER_UP (3 << 4)
++
++/**
++ * Controls the B0-B3 data pairs. This must be set to match the DPLL p2
++ * setting for whether we are in dual-channel mode. The B3 pair will
++ * additionally only be powered up when LVDS_A3_POWER_UP is set.
++ */
++# define LVDS_B0B3_POWER_MASK (3 << 2)
++# define LVDS_B0B3_POWER_DOWN (0 << 2)
++# define LVDS_B0B3_POWER_UP (3 << 2)
++
++#define PIPEACONF 0x70008
++#define PIPEACONF_ENABLE (1<<31)
++#define PIPEACONF_DISABLE 0
++#define PIPEACONF_DOUBLE_WIDE (1<<30)
++#define PIPECONF_ACTIVE (1<<30)
++#define I965_PIPECONF_ACTIVE (1<<30)
++#define PIPECONF_DSIPLL_LOCK (1<<29)
++#define PIPEACONF_SINGLE_WIDE 0
++#define PIPEACONF_PIPE_UNLOCKED 0
++#define PIPEACONF_DSR (1<<26)
++#define PIPEACONF_PIPE_LOCKED (1<<25)
++#define PIPEACONF_PALETTE 0
++#define PIPECONF_FORCE_BORDER (1<<25)
++#define PIPEACONF_GAMMA (1<<24)
++#define PIPECONF_PROGRESSIVE (0 << 21)
++#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
++#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21)
++#define PIPECONF_PLANE_OFF (1<<19)
++#define PIPECONF_CURSOR_OFF (1<<18)
++
++
++#define PIPEBCONF 0x71008
++#define PIPEBCONF_ENABLE (1<<31)
++#define PIPEBCONF_DISABLE 0
++#define PIPEBCONF_DOUBLE_WIDE (1<<30)
++#define PIPEBCONF_DISABLE 0
++#define PIPEBCONF_GAMMA (1<<24)
++#define PIPEBCONF_PALETTE 0
++
++#define PIPECCONF 0x72008
++
++#define PIPEBGCMAXRED 0x71010
++#define PIPEBGCMAXGREEN 0x71014
++#define PIPEBGCMAXBLUE 0x71018
++
++#define PIPEASTAT 0x70024
++#define PIPEBSTAT 0x71024
++#define PIPE_VBLANK_CLEAR (1 << 1)
++#define PIPE_VSYNC_CLEAR (1UL<<9)
++#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17)
++#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18)
++
++
++#define PIPE_VSYNC_ENABL (1UL<<25)
++#define PIPE_HDMI_AUDIO_UNDERRUN (1UL<<26)
++#define PIPE_HDMI_AUDIO_BUFFER_DONE (1UL<<27)
++#define PIPE_HDMI_AUDIO_INT_MASK (PIPE_HDMI_AUDIO_UNDERRUN | PIPE_HDMI_AUDIO_BUFFER_DONE)
++#define PIPEB_EVENT_MASK (BIT29|BIT28|BIT27|BIT26|BIT25|BIT24|BIT23|BIT22|BIT21|BIT20|BIT18|BIT17|BIT16)
++#define HISTOGRAM_INT_CONTROL 0x61268
++#define HISTOGRAM_BIN_DATA 0X61264
++#define HISTOGRAM_LOGIC_CONTROL 0x61260
++#define PWM_CONTROL_LOGIC 0x61250
++#define PIPE_DPST_EVENT_ENABLE (1UL<<23)
++#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL<<10)
++#define PIPE_DPST_EVENT_STATUS (1UL<<7)
++#define HISTOGRAM_INTERRUPT_ENABLE (1UL<<31)
++#define HISTOGRAM_LOGIC_ENABLE (1UL<<31)
++#define PWM_LOGIC_ENABLE (1UL<<31)
++#define PWM_PHASEIN_ENABLE (1UL<<25)
++#define PWM_PHASEIN_INT_ENABLE (1UL<<24)
++#define PWM_PHASEIN_VB_COUNT 0x00001f00
++#define PWM_PHASEIN_INC 0x0000001f
++#define HISTOGRAM_INT_CTRL_CLEAR (1UL<<30)
++#define DPST_YUV_LUMA_MODE 0
++
++struct dpst_ie_histogram_control {
++ union {
++ uint32_t data;
++ struct {
++ uint32_t bin_reg_index:7;
++ uint32_t reserved:4;
++ uint32_t bin_reg_func_select:1;
++ uint32_t sync_to_phase_in:1;
++ uint32_t alt_enhancement_mode:2;
++ uint32_t reserved1:1;
++ uint32_t sync_to_phase_in_count:8;
++ uint32_t histogram_mode_select:1;
++ uint32_t reserved2:4;
++ uint32_t ie_pipe_assignment:1;
++ uint32_t ie_mode_table_enabled:1;
++ uint32_t ie_histogram_enable:1;
++ };
++ };
++};
++
++struct dpst_guardband {
++ union {
++ uint32_t data;
++ struct {
++ uint32_t guardband:22;
++ uint32_t guardband_interrupt_delay:8;
++ uint32_t interrupt_status:1;
++ uint32_t interrupt_enable:1;
++ };
++ };
++};
++
++#define PIPEAFRAMEHIGH 0x70040
++#define PIPEAFRAMEPIXEL 0x70044
++#define PIPEBFRAMEHIGH 0x71040
++#define PIPEBFRAMEPIXEL 0x71044
++#define PIPECFRAMEHIGH 0x72040
++#define PIPECFRAMEPIXEL 0x72044
++#define PIPE_FRAME_HIGH_MASK 0x0000ffff
++#define PIPE_FRAME_HIGH_SHIFT 0
++#define PIPE_FRAME_LOW_MASK 0xff000000
++#define PIPE_FRAME_LOW_SHIFT 24
++#define PIPE_PIXEL_MASK 0x00ffffff
++#define PIPE_PIXEL_SHIFT 0
++
++#define DSPARB 0x70030
++#define DSPFW1 0x70034
++#define DSPFW2 0x70038
++#define DSPFW3 0x7003c
++#define DSPFW4 0x70050
++#define DSPFW5 0x70054
++#define DSPFW6 0x70058
++#define DSPCHICKENBIT 0x70400
++#define DSPACNTR 0x70180
++#define DSPBCNTR 0x71180
++#define DSPCCNTR 0x72180
++#define DISPLAY_PLANE_ENABLE (1<<31)
++#define DISPLAY_PLANE_DISABLE 0
++#define DISPPLANE_GAMMA_ENABLE (1<<30)
++#define DISPPLANE_GAMMA_DISABLE 0
++#define DISPPLANE_PIXFORMAT_MASK (0xf<<26)
++#define DISPPLANE_8BPP (0x2<<26)
++#define DISPPLANE_15_16BPP (0x4<<26)
++#define DISPPLANE_16BPP (0x5<<26)
++#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26)
++#define DISPPLANE_32BPP (0x7<<26)
++#define DISPPLANE_STEREO_ENABLE (1<<25)
++#define DISPPLANE_STEREO_DISABLE 0
++#define DISPPLANE_SEL_PIPE_MASK (1<<24)
++#define DISPPLANE_SEL_PIPE_POS 24
++#define DISPPLANE_SEL_PIPE_A 0
++#define DISPPLANE_SEL_PIPE_B (1<<24)
++#define DISPPLANE_SRC_KEY_ENABLE (1<<22)
++#define DISPPLANE_SRC_KEY_DISABLE 0
++#define DISPPLANE_LINE_DOUBLE (1<<20)
++#define DISPPLANE_NO_LINE_DOUBLE 0
++#define DISPPLANE_STEREO_POLARITY_FIRST 0
++#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
++/* plane B only */
++#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15)
++#define DISPPLANE_ALPHA_TRANS_DISABLE 0
++#define DISPPLANE_SPRITE_ABOVE_DISPLAYA 0
++#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1)
++#define DISPPLANE_BOTTOM (4)
++
++#define DSPABASE 0x70184
++#define DSPALINOFF 0x70184
++#define DSPASTRIDE 0x70188
++
++#define DSPBBASE 0x71184
++#define DSPBLINOFF 0X71184
++#define DSPBADDR DSPBBASE
++#define DSPBSTRIDE 0x71188
++
++#define DSPCBASE 0x72184
++#define DSPCLINOFF 0x72184
++#define DSPCSTRIDE 0x72188
++
++#define DSPAKEYVAL 0x70194
++#define DSPAKEYMASK 0x70198
++
++#define DSPAPOS 0x7018C /* reserved */
++#define DSPASIZE 0x70190
++#define DSPBPOS 0x7118C
++#define DSPBSIZE 0x71190
++#define DSPCPOS 0x7218C
++#define DSPCSIZE 0x72190
++
++#define DSPASURF 0x7019C
++#define DSPATILEOFF 0x701A4
++
++#define DSPBSURF 0x7119C
++#define DSPBTILEOFF 0x711A4
++
++#define DSPCSURF 0x7219C
++#define DSPCTILEOFF 0x721A4
++#define DSPCKEYMAXVAL 0x721A0
++#define DSPCKEYMINVAL 0x72194
++#define DSPCKEYMSK 0x72198
++
++#define VGACNTRL 0x71400
++# define VGA_DISP_DISABLE (1 << 31)
++# define VGA_2X_MODE (1 << 30)
++# define VGA_PIPE_B_SELECT (1 << 29)
++
++/*
++ * Overlay registers
++ */
++#define OV_OVADD 0x30000
++# define OV_PIPE_SELECT (BIT6|BIT7)
++# define OV_PIPE_SELECT_POS 6
++# define OV_PIPE_A 0
++# define OV_PIPE_C 1
++#define OV_OGAMC5 0x30010
++#define OV_OGAMC4 0x30014
++#define OV_OGAMC3 0x30018
++#define OV_OGAMC2 0x3001C
++#define OV_OGAMC1 0x30020
++#define OV_OGAMC0 0x30024
++
++/*
++ * Some BIOS scratch area registers. The 845 (and 830?) store the amount
++ * of video memory available to the BIOS in SWF1.
++ */
++
++#define SWF0 0x71410
++#define SWF1 0x71414
++#define SWF2 0x71418
++#define SWF3 0x7141c
++#define SWF4 0x71420
++#define SWF5 0x71424
++#define SWF6 0x71428
++
++/*
++ * 855 scratch registers.
++ */
++#define SWF00 0x70410
++#define SWF01 0x70414
++#define SWF02 0x70418
++#define SWF03 0x7041c
++#define SWF04 0x70420
++#define SWF05 0x70424
++#define SWF06 0x70428
++
++#define SWF10 SWF0
++#define SWF11 SWF1
++#define SWF12 SWF2
++#define SWF13 SWF3
++#define SWF14 SWF4
++#define SWF15 SWF5
++#define SWF16 SWF6
++
++#define SWF30 0x72414
++#define SWF31 0x72418
++#define SWF32 0x7241c
++
++
++/*
++ * Palette registers
++ */
++#define PALETTE_A 0x0a000
++#define PALETTE_B 0x0a800
++#define PALETTE_C 0x0ac00
++
++#define IS_I830(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82830_CGC)
++#define IS_845G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82845G_IG)
++#define IS_I85X(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82855GM_IG)
++#define IS_I855(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82855GM_IG)
++#define IS_I865G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82865_IG)
++
++
++/* || dev->pci_device == PCI_DEVICE_ID_INTELPCI_CHIP_E7221_G) */
++#define IS_I915G(dev) (dev->pci_device == PCI_DEVICE_ID_INTEL_82915G_IG)
++#define IS_I915GM(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82915GM_IG)
++#define IS_I945G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82945G_IG)
++#define IS_I945GM(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82945GM_IG)
++
++#define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \
++ (dev)->pci_device == 0x2982 || \
++ (dev)->pci_device == 0x2992 || \
++ (dev)->pci_device == 0x29A2 || \
++ (dev)->pci_device == 0x2A02 || \
++ (dev)->pci_device == 0x2A12)
++
++#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02)
++
++#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \
++ (dev)->pci_device == 0x29B2 || \
++ (dev)->pci_device == 0x29D2)
++
++#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
++ IS_I945GM(dev) || IS_I965G(dev) || IS_POULSBO(dev) || \
++ IS_MRST(dev))
++
++#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
++ IS_I945GM(dev) || IS_I965GM(dev) || \
++ IS_POULSBO(dev) || IS_MRST(dev))
++
++/* Cursor A & B regs */
++#define CURACNTR 0x70080
++#define CURSOR_MODE_DISABLE 0x00
++#define CURSOR_MODE_64_32B_AX 0x07
++#define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX)
++#define MCURSOR_GAMMA_ENABLE (1 << 26)
++#define CURABASE 0x70084
++#define CURAPOS 0x70088
++#define CURSOR_POS_MASK 0x007FF
++#define CURSOR_POS_SIGN 0x8000
++#define CURSOR_X_SHIFT 0
++#define CURSOR_Y_SHIFT 16
++#define CURBCNTR 0x700c0
++#define CURBBASE 0x700c4
++#define CURBPOS 0x700c8
++#define CURCCNTR 0x700e0
++#define CURCBASE 0x700e4
++#define CURCPOS 0x700e8
++
++/*
++ * Interrupt Registers
++ */
++#define IER 0x020a0
++#define IIR 0x020a4
++#define IMR 0x020a8
++#define ISR 0x020ac
++
++/*
++ * MOORESTOWN delta registers
++ */
++#define MRST_DPLL_A 0x0f014
++#define MDFLD_DPLL_B 0x0f018
++#define MDFLD_INPUT_REF_SEL (1 << 14)
++#define MDFLD_VCO_SEL (1 << 16)
++#define DPLLA_MODE_LVDS (2 << 26) /* mrst */
++#define MDFLD_PLL_LATCHEN (1 << 28)
++#define MDFLD_PWR_GATE_EN (1 << 30)
++#define MDFLD_P1_MASK (0x1FF << 17)
++#define MRST_FPA0 0x0f040
++#define MRST_FPA1 0x0f044
++#define MDFLD_DPLL_DIV0 0x0f048
++#define MDFLD_DPLL_DIV1 0x0f04c
++#define MRST_PERF_MODE 0x020f4
++
++/* MEDFIELD HDMI registers */
++#define HDMIPHYMISCCTL 0x61134
++# define HDMI_PHY_POWER_DOWN 0x7f
++#define HDMIB_CONTROL 0x61140
++# define HDMIB_PORT_EN (1 << 31)
++# define HDMIB_PIPE_B_SELECT (1 << 30)
++# define HDMIB_NULL_PACKET (1 << 9)
++#define HDMIB_HDCP_PORT (1 << 5)
++
++/* #define LVDS 0x61180 */
++# define MRST_PANEL_8TO6_DITHER_ENABLE (1 << 25)
++# define MRST_PANEL_24_DOT_1_FORMAT (1 << 24)
++# define LVDS_A3_POWER_UP_0_OUTPUT (1 << 6)
++
++#define MIPI 0x61190
++#define MIPI_C 0x62190
++# define MIPI_PORT_EN (1 << 31)
++/** Turns on border drawing to allow centered display. */
++# define SEL_FLOPPED_HSTX (1 << 23)
++# define PASS_FROM_SPHY_TO_AFE (1 << 16)
++# define MIPI_BORDER_EN (1 << 15)
++# define MIPIA_3LANE_MIPIC_1LANE 0x1
++# define MIPIA_2LANE_MIPIC_2LANE 0x2
++# define TE_TRIGGER_DSI_PROTOCOL (1 << 2)
++# define TE_TRIGGER_GPIO_PIN (1 << 3)
++#define MIPI_TE_COUNT 0x61194
++
++/* #define PP_CONTROL 0x61204 */
++# define POWER_DOWN_ON_RESET (1 << 1)
++
++/* #define PFIT_CONTROL 0x61230 */
++# define PFIT_PIPE_SELECT (3 << 29)
++# define PFIT_PIPE_SELECT_SHIFT (29)
++
++/* #define BLC_PWM_CTL 0x61254 */
++#define MRST_BACKLIGHT_MODULATION_FREQ_SHIFT (16)
++#define MRST_BACKLIGHT_MODULATION_FREQ_MASK (0xffff << 16)
++
++/* #define PIPEACONF 0x70008 */
++#define PIPEACONF_PIPE_STATE (1<<30)
++/* #define DSPACNTR 0x70180 */
++#if 0 /*FIXME JLIU7 need to define the following */
++1000 = 32 - bit RGBX(10 : 10 : 10 : 2)
++pixel format.Ignore alpha.1010 = BGRX 10 : 10 : 10 : 2 1100 = 64 - bit RGBX
++(16 : 16 : 16 : 16) 16 bit floating point pixel format.
++Ignore alpha.1110 = 32 - bit RGBX(8 : 8 : 8 : 8) pixel format.
++ Ignore
++ alpha.
++#endif /*FIXME JLIU7 need to define the following */
++
++#define MRST_DSPABASE 0x7019c
++#define MRST_DSPBBASE 0x7119c
++#define MDFLD_DSPCBASE 0x7219c
++
++/*
++ * MOORESTOWN reserved registers
++ */
++#if 0
++#define DSPAPOS 0x7018C /* reserved */
++#define DSPASIZE 0x70190
++#endif
++/*
++ * Moorestown registers.
++ */
++/*===========================================================================
++; General Constants
++;--------------------------------------------------------------------------*/
++#define BIT0 0x00000001
++#define BIT1 0x00000002
++#define BIT2 0x00000004
++#define BIT3 0x00000008
++#define BIT4 0x00000010
++#define BIT5 0x00000020
++#define BIT6 0x00000040
++#define BIT7 0x00000080
++#define BIT8 0x00000100
++#define BIT9 0x00000200
++#define BIT10 0x00000400
++#define BIT11 0x00000800
++#define BIT12 0x00001000
++#define BIT13 0x00002000
++#define BIT14 0x00004000
++#define BIT15 0x00008000
++#define BIT16 0x00010000
++#define BIT17 0x00020000
++#define BIT18 0x00040000
++#define BIT19 0x00080000
++#define BIT20 0x00100000
++#define BIT21 0x00200000
++#define BIT22 0x00400000
++#define BIT23 0x00800000
++#define BIT24 0x01000000
++#define BIT25 0x02000000
++#define BIT26 0x04000000
++#define BIT27 0x08000000
++#define BIT28 0x10000000
++#define BIT29 0x20000000
++#define BIT30 0x40000000
++#define BIT31 0x80000000
++/*===========================================================================
++; MIPI IP registers
++;--------------------------------------------------------------------------*/
++#define MIPIC_REG_OFFSET 0x800
++#define DEVICE_READY_REG 0xb000
++# define ENTERING_ULPS (2 << 1)
++# define EXITING_ULPS (1 << 1)
++# define ULPS_MASK (3 << 1)
++# define BUS_POSSESSION (1 << 3)
++#define INTR_STAT_REG 0xb004
++#define RX_SOT_ERROR BIT0
++#define RX_SOT_SYNC_ERROR BIT1
++#define RX_ESCAPE_MODE_ENTRY_ERROR BIT3
++#define RX_LP_TX_SYNC_ERROR BIT4
++#define RX_HS_RECEIVE_TIMEOUT_ERROR BIT5
++#define RX_FALSE_CONTROL_ERROR BIT6
++#define RX_ECC_SINGLE_BIT_ERROR BIT7
++#define RX_ECC_MULTI_BIT_ERROR BIT8
++#define RX_CHECKSUM_ERROR BIT9
++#define RX_DSI_DATA_TYPE_NOT_RECOGNIZED BIT10
++#define RX_DSI_VC_ID_INVALID BIT11
++#define TX_FALSE_CONTROL_ERROR BIT12
++#define TX_ECC_SINGLE_BIT_ERROR BIT13
++#define TX_ECC_MULTI_BIT_ERROR BIT14
++#define TX_CHECKSUM_ERROR BIT15
++#define TX_DSI_DATA_TYPE_NOT_RECOGNIZED BIT16
++#define TX_DSI_VC_ID_INVALID BIT17
++#define HIGH_CONTENTION BIT18
++#define LOW_CONTENTION BIT19
++#define DPI_FIFO_UNDER_RUN BIT20
++#define HS_TX_TIMEOUT BIT21
++#define LP_RX_TIMEOUT BIT22
++#define TURN_AROUND_ACK_TIMEOUT BIT23
++#define ACK_WITH_NO_ERROR BIT24
++#define HS_GENERIC_WR_FIFO_FULL BIT27
++#define LP_GENERIC_WR_FIFO_FULL BIT28
++#define SPL_PKT_SENT BIT30
++#define INTR_EN_REG 0xb008
++#define DSI_FUNC_PRG_REG 0xb00c
++#define DPI_CHANNEL_NUMBER_POS 0x03
++#define DBI_CHANNEL_NUMBER_POS 0x05
++#define FMT_DPI_POS 0x07
++#define FMT_DBI_POS 0x0A
++#define DBI_DATA_WIDTH_POS 0x0D
++/* DPI PIXEL FORMATS */
++#define RGB_565_FMT 0x01 /* RGB 565 FORMAT */
++#define RGB_666_FMT 0x02 /* RGB 666 FORMAT */
++#define LRGB_666_FMT 0x03 /* RGB LOOSELY PACKED
++ * 666 FORMAT
++ */
++#define RGB_888_FMT 0x04 /* RGB 888 FORMAT */
++#define VIRTUAL_CHANNEL_NUMBER_0 0x00 /* Virtual channel 0 */
++#define VIRTUAL_CHANNEL_NUMBER_1 0x01 /* Virtual channel 1 */
++#define VIRTUAL_CHANNEL_NUMBER_2 0x02 /* Virtual channel 2 */
++#define VIRTUAL_CHANNEL_NUMBER_3 0x03 /* Virtual channel 3 */
++#define DBI_NOT_SUPPORTED 0x00 /* command mode
++ * is not supported
++ */
++#define DBI_DATA_WIDTH_16BIT 0x01 /* 16 bit data */
++#define DBI_DATA_WIDTH_9BIT 0x02 /* 9 bit data */
++#define DBI_DATA_WIDTH_8BIT 0x03 /* 8 bit data */
++#define DBI_DATA_WIDTH_OPT1 0x04 /* option 1 */
++#define DBI_DATA_WIDTH_OPT2 0x05 /* option 2 */
++#define HS_TX_TIMEOUT_REG 0xb010
++#define LP_RX_TIMEOUT_REG 0xb014
++#define TURN_AROUND_TIMEOUT_REG 0xb018
++#define DEVICE_RESET_REG 0xb01C
++#define DPI_RESOLUTION_REG 0xb020
++#define RES_V_POS 0x10
++#define DBI_RESOLUTION_REG 0xb024 /* Reserved for MDFLD */
++#define HORIZ_SYNC_PAD_COUNT_REG 0xb028
++#define HORIZ_BACK_PORCH_COUNT_REG 0xb02C
++#define HORIZ_FRONT_PORCH_COUNT_REG 0xb030
++#define HORIZ_ACTIVE_AREA_COUNT_REG 0xb034
++#define VERT_SYNC_PAD_COUNT_REG 0xb038
++#define VERT_BACK_PORCH_COUNT_REG 0xb03c
++#define VERT_FRONT_PORCH_COUNT_REG 0xb040
++#define HIGH_LOW_SWITCH_COUNT_REG 0xb044
++#define DPI_CONTROL_REG 0xb048
++#define DPI_SHUT_DOWN BIT0
++#define DPI_TURN_ON BIT1
++#define DPI_COLOR_MODE_ON BIT2
++#define DPI_COLOR_MODE_OFF BIT3
++#define DPI_BACK_LIGHT_ON BIT4
++#define DPI_BACK_LIGHT_OFF BIT5
++#define DPI_LP BIT6
++#define DPI_DATA_REG 0xb04c
++#define DPI_BACK_LIGHT_ON_DATA 0x07
++#define DPI_BACK_LIGHT_OFF_DATA 0x17
++#define INIT_COUNT_REG 0xb050
++#define MAX_RET_PAK_REG 0xb054
++#define VIDEO_FMT_REG 0xb058
++#define COMPLETE_LAST_PCKT BIT2
++#define EOT_DISABLE_REG 0xb05c
++#define ENABLE_CLOCK_STOPPING BIT1
++#define LP_BYTECLK_REG 0xb060
++#define LP_GEN_DATA_REG 0xb064
++#define HS_GEN_DATA_REG 0xb068
++#define LP_GEN_CTRL_REG 0xb06C
++#define HS_GEN_CTRL_REG 0xb070
++#define DCS_CHANNEL_NUMBER_POS 0x06
++#define MCS_COMMANDS_POS 0x8
++#define WORD_COUNTS_POS 0x8
++#define MCS_PARAMETER_POS 0x10
++#define GEN_FIFO_STAT_REG 0xb074
++#define HS_DATA_FIFO_FULL BIT0
++#define HS_DATA_FIFO_HALF_EMPTY BIT1
++#define HS_DATA_FIFO_EMPTY BIT2
++#define LP_DATA_FIFO_FULL BIT8
++#define LP_DATA_FIFO_HALF_EMPTY BIT9
++#define LP_DATA_FIFO_EMPTY BIT10
++#define HS_CTRL_FIFO_FULL BIT16
++#define HS_CTRL_FIFO_HALF_EMPTY BIT17
++#define HS_CTRL_FIFO_EMPTY BIT18
++#define LP_CTRL_FIFO_FULL BIT24
++#define LP_CTRL_FIFO_HALF_EMPTY BIT25
++#define LP_CTRL_FIFO_EMPTY BIT26
++#define DBI_FIFO_EMPTY BIT27
++#define DPI_FIFO_EMPTY BIT28
++#define HS_LS_DBI_ENABLE_REG 0xb078
++#define TXCLKESC_REG 0xb07c
++#define DPHY_PARAM_REG 0xb080
++#define DBI_BW_CTRL_REG 0xb084
++#define CLK_LANE_SWT_REG 0xb088
++/*===========================================================================
++; MIPI Adapter registers
++;--------------------------------------------------------------------------*/
++#define MIPI_CONTROL_REG 0xb104
++#define MIPI_2X_CLOCK_BITS (BIT0 | BIT1)
++#define MIPI_DATA_ADDRESS_REG 0xb108
++#define MIPI_DATA_LENGTH_REG 0xb10C
++#define MIPI_COMMAND_ADDRESS_REG 0xb110
++#define MIPI_COMMAND_LENGTH_REG 0xb114
++#define MIPI_READ_DATA_RETURN_REG0 0xb118
++#define MIPI_READ_DATA_RETURN_REG1 0xb11C
++#define MIPI_READ_DATA_RETURN_REG2 0xb120
++#define MIPI_READ_DATA_RETURN_REG3 0xb124
++#define MIPI_READ_DATA_RETURN_REG4 0xb128
++#define MIPI_READ_DATA_RETURN_REG5 0xb12C
++#define MIPI_READ_DATA_RETURN_REG6 0xb130
++#define MIPI_READ_DATA_RETURN_REG7 0xb134
++#define MIPI_READ_DATA_VALID_REG 0xb138
++/* DBI COMMANDS */
++#define soft_reset 0x01
++/* ************************************************************************* *\
++The display module performs a software reset.
++Registers are written with their SW Reset default values.
++\* ************************************************************************* */
++#define get_power_mode 0x0a
++/* ************************************************************************* *\
++The display module returns the current power mode
++\* ************************************************************************* */
++#define get_address_mode 0x0b
++/* ************************************************************************* *\
++The display module returns the current status.
++\* ************************************************************************* */
++#define get_pixel_format 0x0c
++/* ************************************************************************* *\
++This command gets the pixel format for the RGB image data
++used by the interface.
++\* ************************************************************************* */
++#define get_display_mode 0x0d
++/* ************************************************************************* *\
++The display module returns the Display Image Mode status.
++\* ************************************************************************* */
++#define get_signal_mode 0x0e
++/* ************************************************************************* *\
++The display module returns the Display Signal Mode.
++\* ************************************************************************* */
++#define get_diagnostic_result 0x0f
++/* ************************************************************************* *\
++The display module returns the self-diagnostic results following
++a Sleep Out command.
++\* ************************************************************************* */
++#define enter_sleep_mode 0x10
++/* ************************************************************************* *\
++This command causes the display module to enter the Sleep mode.
++In this mode, all unnecessary blocks inside the display module are disabled
++except interface communication. This is the lowest power mode
++the display module supports.
++\* ************************************************************************* */
++#define exit_sleep_mode 0x11
++/* ************************************************************************* *\
++This command causes the display module to exit Sleep mode.
++All blocks inside the display module are enabled.
++\* ************************************************************************* */
++#define enter_partial_mode 0x12
++/* ************************************************************************* *\
++This command causes the display module to enter the Partial Display Mode.
++The Partial Display Mode window is described by the set_partial_area command.
++\* ************************************************************************* */
++#define enter_normal_mode 0x13
++/* ************************************************************************* *\
++This command causes the display module to enter the Normal mode.
++Normal Mode is defined as Partial Display mode and Scroll mode are off
++\* ************************************************************************* */
++#define exit_invert_mode 0x20
++/* ************************************************************************* *\
++This command causes the display module to stop inverting the image data on
++the display device. The frame memory contents remain unchanged.
++No status bits are changed.
++\* ************************************************************************* */
++#define enter_invert_mode 0x21
++/* ************************************************************************* *\
++This command causes the display module to invert the image data only on
++the display device. The frame memory contents remain unchanged.
++No status bits are changed.
++\* ************************************************************************* */
++#define set_gamma_curve 0x26
++/* ************************************************************************* *\
++This command selects the desired gamma curve for the display device.
++Four fixed gamma curves are defined in section DCS spec.
++\* ************************************************************************* */
++#define set_display_off 0x28
++/* ************************************************************************* *\
++This command causes the display module to stop displaying the image data
++on the display device. The frame memory contents remain unchanged.
++No status bits are changed.
++\* ************************************************************************* */
++#define set_display_on 0x29
++/* ************************************************************************* *\
++This command causes the display module to start displaying the image data
++on the display device. The frame memory contents remain unchanged.
++No status bits are changed.
++\* ************************************************************************* */
++#define set_column_address 0x2a
++/* ************************************************************************* *\
++This command defines the column extent of the frame memory accessed by the
++hostprocessor with the read_memory_continue and write_memory_continue commands.
++No status bits are changed.
++\* ************************************************************************* */
++#define set_page_addr 0x2b
++/* ************************************************************************* *\
++This command defines the page extent of the frame memory accessed by the host
++processor with the write_memory_continue and read_memory_continue command.
++No status bits are changed.
++\* ************************************************************************* */
++#define write_mem_start 0x2c
++/* ************************************************************************* *\
++This command transfers image data from the host processor to the display
++module s frame memory starting at the pixel location specified by
++preceding set_column_address and set_page_address commands.
++\* ************************************************************************* */
++#define set_partial_area 0x30
++/* ************************************************************************* *\
++This command defines the Partial Display mode s display area.
++There are two parameters associated with
++this command, the first defines the Start Row (SR) and the second the End Row
++(ER). SR and ER refer to the Frame Memory Line Pointer.
++\* ************************************************************************* */
++#define set_scroll_area 0x33
++/* ************************************************************************* *\
++This command defines the display modules Vertical Scrolling Area.
++\* ************************************************************************* */
++#define set_tear_off 0x34
++/* ************************************************************************* *\
++This command turns off the display modules Tearing Effect output signal on
++the TE signal line.
++\* ************************************************************************* */
++#define set_tear_on 0x35
++/* ************************************************************************* *\
++This command turns on the display modules Tearing Effect output signal
++on the TE signal line.
++\* ************************************************************************* */
++#define set_address_mode 0x36
++/* ************************************************************************* *\
++This command sets the data order for transfers from the host processor to
++display modules frame memory,bits B[7:5] and B3, and from the display
++modules frame memory to the display device, bits B[2:0] and B4.
++\* ************************************************************************* */
++#define set_scroll_start 0x37
++/* ************************************************************************* *\
++This command sets the start of the vertical scrolling area in the frame memory.
++The vertical scrolling area is fully defined when this command is used with
++the set_scroll_area command The set_scroll_start command has one parameter,
++the Vertical Scroll Pointer. The VSP defines the line in the frame memory
++that is written to the display device as the first line of the vertical
++scroll area.
++\* ************************************************************************* */
++#define exit_idle_mode 0x38
++/* ************************************************************************* *\
++This command causes the display module to exit Idle mode.
++\* ************************************************************************* */
++#define enter_idle_mode 0x39
++/* ************************************************************************* *\
++This command causes the display module to enter Idle Mode.
++In Idle Mode, color expression is reduced. Colors are shown on the display
++device using the MSB of each of the R, G and B color components in the frame
++memory
++\* ************************************************************************* */
++#define set_pixel_format 0x3a
++/* ************************************************************************* *\
++This command sets the pixel format for the RGB image data used by the interface.
++Bits D[6:4] DPI Pixel Format Definition
++Bits D[2:0] DBI Pixel Format Definition
++Bits D7 and D3 are not used.
++\* ************************************************************************* */
++ #define DCS_PIXEL_FORMAT_3bbp 0x1
++ #define DCS_PIXEL_FORMAT_8bbp 0x2
++ #define DCS_PIXEL_FORMAT_12bbp 0x3
++ #define DCS_PIXEL_FORMAT_16bbp 0x5
++ #define DCS_PIXEL_FORMAT_18bbp 0x6
++ #define DCS_PIXEL_FORMAT_24bbp 0x7
++#define write_mem_cont 0x3c
++/* ************************************************************************* *\
++This command transfers image data from the host processor to the display
++module's frame memory continuing from the pixel location following the
++previous write_memory_continue or write_memory_start command.
++\* ************************************************************************* */
++#define set_tear_scanline 0x44
++/* ************************************************************************* *\
++This command turns on the display modules Tearing Effect output signal on the
++TE signal line when the display module reaches line N.
++\* ************************************************************************* */
++#define get_scanline 0x45
++/* ************************************************************************* *\
++The display module returns the current scanline, N, used to update the
++display device. The total number of scanlines on a display device is
++defined as VSYNC + VBP + VACT + VFP.The first scanline is defined as
++the first line of V Sync and is denoted as Line 0.
++When in Sleep Mode, the value returned by get_scanline is undefined.
++\* ************************************************************************* */
++
++/* MCS or Generic COMMANDS */
++/* MCS/generic data type */
++#define GEN_SHORT_WRITE_0 0x03 /* generic short write, no parameters */
++#define GEN_SHORT_WRITE_1 0x13 /* generic short write, 1 parameters */
++#define GEN_SHORT_WRITE_2 0x23 /* generic short write, 2 parameters */
++#define GEN_READ_0 0x04 /* generic read, no parameters */
++#define GEN_READ_1 0x14 /* generic read, 1 parameters */
++#define GEN_READ_2 0x24 /* generic read, 2 parameters */
++#define GEN_LONG_WRITE 0x29 /* generic long write */
++#define MCS_SHORT_WRITE_0 0x05 /* MCS short write, no parameters */
++#define MCS_SHORT_WRITE_1 0x15 /* MCS short write, 1 parameters */
++#define MCS_READ 0x06 /* MCS read, no parameters */
++#define MCS_LONG_WRITE 0x39 /* MCS long write */
++/* MCS/generic commands */
++#define write_display_profile 0x50
++#define write_display_brightness 0x51
++#define write_ctrl_display 0x53
++#define write_ctrl_cabc 0x55
++ #define UI_IMAGE 0x01
++ #define STILL_IMAGE 0x02
++ #define MOVING_IMAGE 0x03
++#define write_hysteresis 0x57
++#define write_gamma_setting 0x58
++#define write_cabc_min_bright 0x5e
++#define write_kbbc_profile 0x60
++/* ************************************************************************* *\
++This command is used to control ambient light, panel backlight brightness and
++gamma settings.
++\* ************************************************************************* */
++#define BRIGHT_CNTL_BLOCK_ON BIT5
++#define AMBIENT_LIGHT_SENSE_ON BIT4
++#define DISPLAY_DIMMING_ON BIT3
++#define BACKLIGHT_ON BIT2
++#define DISPLAY_BRIGHTNESS_AUTO BIT1
++#define GAMMA_AUTO BIT0
++
++/* DCS Interface Pixel Formats */
++#define DCS_PIXEL_FORMAT_3BPP 0x1
++#define DCS_PIXEL_FORMAT_8BPP 0x2
++#define DCS_PIXEL_FORMAT_12BPP 0x3
++#define DCS_PIXEL_FORMAT_16BPP 0x5
++#define DCS_PIXEL_FORMAT_18BPP 0x6
++#define DCS_PIXEL_FORMAT_24BPP 0x7
++/* ONE PARAMETER READ DATA */
++#define addr_mode_data 0xfc
++#define diag_res_data 0x00
++#define disp_mode_data 0x23
++#define pxl_fmt_data 0x77
++#define pwr_mode_data 0x74
++#define sig_mode_data 0x00
++/* TWO PARAMETERS READ DATA */
++#define scanline_data1 0xff
++#define scanline_data2 0xff
++#define NON_BURST_MODE_SYNC_PULSE 0x01 /* Non Burst Mode
++ * with Sync Pulse
++ */
++#define NON_BURST_MODE_SYNC_EVENTS 0x02 /* Non Burst Mode
++ * with Sync events
++ */
++#define BURST_MODE 0x03 /* Burst Mode */
++#define DBI_COMMAND_BUFFER_SIZE 0x240 /* 0x32 */ /* 0x120 */ /* Allocate at least
++ * 0x100 Byte with 32
++ * byte alignment
++ */
++#define DBI_DATA_BUFFER_SIZE 0x120 /* Allocate at least
++ * 0x100 Byte with 32
++ * byte alignment
++ */
++#define DBI_CB_TIME_OUT 0xFFFF
++#define GEN_FB_TIME_OUT 2000
++#define ALIGNMENT_32BYTE_MASK (~(BIT0|BIT1|BIT2|BIT3|BIT4))
++#define SKU_83 0x01
++#define SKU_100 0x02
++#define SKU_100L 0x04
++#define SKU_BYPASS 0x08
++#if 0
++/* ************************************************************************* *\
++DSI command data structure
++\* ************************************************************************* */
++union DSI_LONG_PACKET_HEADER {
++ u32 DSI_longPacketHeader;
++ struct {
++ u8 dataID;
++ u16 wordCount;
++ u8 ECC;
++ };
++#if 0 /*FIXME JLIU7 */
++ struct {
++ u8 DT:6;
++ u8 VC:2;
++ };
++#endif /*FIXME JLIU7 */
++};
++
++union MIPI_ADPT_CMD_LNG_REG {
++ u32 commnadLengthReg;
++ struct {
++ u8 command0;
++ u8 command1;
++ u8 command2;
++ u8 command3;
++ };
++};
++
++struct SET_COLUMN_ADDRESS_DATA {
++ u8 command;
++ u16 SC; /* Start Column */
++ u16 EC; /* End Column */
++};
++
++struct SET_PAGE_ADDRESS_DATA {
++ u8 command;
++ u16 SP; /* Start Page */
++ u16 EP; /* End Page */
++};
++#endif
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_intel_sdvo.c
+@@ -0,0 +1,1408 @@
++/*
++ * Copyright (c) 2006-2007 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * Eric Anholt <eric@anholt.net>
++ */
++
++#include <linux/i2c.h>
++#include <linux/delay.h>
++/* #include <drm/drm_crtc.h> */
++#include <drm/drmP.h>
++#include "psb_drv.h"
++#include "psb_intel_drv.h"
++#include "psb_intel_reg.h"
++#include "psb_intel_sdvo_regs.h"
++
++struct psb_intel_sdvo_priv {
++ struct psb_intel_i2c_chan *i2c_bus;
++ int slaveaddr;
++ int output_device;
++
++ u16 active_outputs;
++
++ struct psb_intel_sdvo_caps caps;
++ int pixel_clock_min, pixel_clock_max;
++
++ int save_sdvo_mult;
++ u16 save_active_outputs;
++ struct psb_intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2;
++ struct psb_intel_sdvo_dtd save_output_dtd[16];
++ u32 save_SDVOX;
++ u8 in_out_map[4];
++
++ u8 by_input_wiring;
++ u32 active_device;
++};
++
++/**
++ * Writes the SDVOB or SDVOC with the given value, but always writes both
++ * SDVOB and SDVOC to work around apparent hardware issues (according to
++ * comments in the BIOS).
++ */
++void psb_intel_sdvo_write_sdvox(struct psb_intel_output *psb_intel_output,
++ u32 val)
++{
++ struct drm_device *dev = psb_intel_output->base.dev;
++ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
++ u32 bval = val, cval = val;
++ int i;
++
++ if (sdvo_priv->output_device == SDVOB)
++ cval = REG_READ(SDVOC);
++ else
++ bval = REG_READ(SDVOB);
++ /*
++ * Write the registers twice for luck. Sometimes,
++ * writing them only once doesn't appear to 'stick'.
++ * The BIOS does this too. Yay, magic
++ */
++ for (i = 0; i < 2; i++) {
++ REG_WRITE(SDVOB, bval);
++ REG_READ(SDVOB);
++ REG_WRITE(SDVOC, cval);
++ REG_READ(SDVOC);
++ }
++}
++
++static bool psb_intel_sdvo_read_byte(
++ struct psb_intel_output *psb_intel_output,
++ u8 addr, u8 *ch)
++{
++ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
++ u8 out_buf[2];
++ u8 buf[2];
++ int ret;
++
++ struct i2c_msg msgs[] = {
++ {
++ .addr = sdvo_priv->i2c_bus->slave_addr,
++ .flags = 0,
++ .len = 1,
++ .buf = out_buf,
++ },
++ {
++ .addr = sdvo_priv->i2c_bus->slave_addr,
++ .flags = I2C_M_RD,
++ .len = 1,
++ .buf = buf,
++ }
++ };
++
++ out_buf[0] = addr;
++ out_buf[1] = 0;
++
++ ret = i2c_transfer(&sdvo_priv->i2c_bus->adapter, msgs, 2);
++ if (ret == 2) {
++ /* DRM_DEBUG("got back from addr %02X = %02x\n",
++ * out_buf[0], buf[0]);
++ */
++ *ch = buf[0];
++ return true;
++ }
++
++ DRM_DEBUG("i2c transfer returned %d\n", ret);
++ return false;
++}
++
++static bool psb_intel_sdvo_write_byte(
++ struct psb_intel_output *psb_intel_output,
++ int addr, u8 ch)
++{
++ u8 out_buf[2];
++ struct i2c_msg msgs[] = {
++ {
++ .addr = psb_intel_output->i2c_bus->slave_addr,
++ .flags = 0,
++ .len = 2,
++ .buf = out_buf,
++ }
++ };
++
++ out_buf[0] = addr;
++ out_buf[1] = ch;
++
++ if (i2c_transfer(&psb_intel_output->i2c_bus->adapter, msgs, 1) == 1)
++ return true;
++ return false;
++}
++
++#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
++/** Mapping of command numbers to names, for debug output */
++const static struct _sdvo_cmd_name {
++ u8 cmd;
++ char *name;
++} sdvo_cmd_names[] = {
++SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG),
++ SDVO_CMD_NAME_ENTRY
++ (SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2),
++ SDVO_CMD_NAME_ENTRY
++ (SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING),
++ SDVO_CMD_NAME_ENTRY
++ (SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1),
++ SDVO_CMD_NAME_ENTRY
++ (SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2),
++ SDVO_CMD_NAME_ENTRY
++ (SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE),
++ SDVO_CMD_NAME_ENTRY
++ (SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE),
++ SDVO_CMD_NAME_ENTRY
++ (SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT),
++ SDVO_CMD_NAME_ENTRY
++ (SDVO_CMD_SET_TV_RESOLUTION_SUPPORT),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),};
++
++#define SDVO_NAME(dev_priv) \
++ ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC")
++#define SDVO_PRIV(output) ((struct psb_intel_sdvo_priv *) (output)->dev_priv)
++
++static void psb_intel_sdvo_write_cmd(struct psb_intel_output *psb_intel_output,
++ u8 cmd,
++ void *args,
++ int args_len)
++{
++ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
++ int i;
++
++ if (1) {
++ DRM_DEBUG("%s: W: %02X ", SDVO_NAME(sdvo_priv), cmd);
++ for (i = 0; i < args_len; i++)
++ printk(KERN_INFO"%02X ", ((u8 *) args)[i]);
++ for (; i < 8; i++)
++ printk(" ");
++ for (i = 0;
++ i <
++ sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0]);
++ i++) {
++ if (cmd == sdvo_cmd_names[i].cmd) {
++ printk("(%s)", sdvo_cmd_names[i].name);
++ break;
++ }
++ }
++ if (i ==
++ sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0]))
++ printk("(%02X)", cmd);
++ printk("\n");
++ }
++
++ for (i = 0; i < args_len; i++) {
++ psb_intel_sdvo_write_byte(psb_intel_output,
++ SDVO_I2C_ARG_0 - i,
++ ((u8 *) args)[i]);
++ }
++
++ psb_intel_sdvo_write_byte(psb_intel_output, SDVO_I2C_OPCODE, cmd);
++}
++
++static const char *cmd_status_names[] = {
++ "Power on",
++ "Success",
++ "Not supported",
++ "Invalid arg",
++ "Pending",
++ "Target not specified",
++ "Scaling not supported"
++};
++
++static u8 psb_intel_sdvo_read_response(
++ struct psb_intel_output *psb_intel_output,
++ void *response, int response_len)
++{
++ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
++ int i;
++ u8 status;
++ u8 retry = 50;
++
++ while (retry--) {
++ /* Read the command response */
++ for (i = 0; i < response_len; i++) {
++ psb_intel_sdvo_read_byte(psb_intel_output,
++ SDVO_I2C_RETURN_0 + i,
++ &((u8 *) response)[i]);
++ }
++
++ /* read the return status */
++ psb_intel_sdvo_read_byte(psb_intel_output,
++ SDVO_I2C_CMD_STATUS,
++ &status);
++
++ if (1) {
++ DRM_DEBUG("%s: R: ", SDVO_NAME(sdvo_priv));
++ for (i = 0; i < response_len; i++)
++ printk(KERN_INFO"%02X ", ((u8 *) response)[i]);
++ for (; i < 8; i++)
++ printk(" ");
++ if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
++ printk(KERN_INFO"(%s)",
++ cmd_status_names[status]);
++ else
++ printk(KERN_INFO"(??? %d)", status);
++ printk("\n");
++ }
++
++ if (status != SDVO_CMD_STATUS_PENDING)
++ return status;
++
++ mdelay(50);
++ }
++
++ return status;
++}
++
++int psb_intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
++{
++ if (mode->clock >= 100000)
++ return 1;
++ else if (mode->clock >= 50000)
++ return 2;
++ else
++ return 4;
++}
++
++/**
++ * Don't check status code from this as it switches the bus back to the
++ * SDVO chips which defeats the purpose of doing a bus switch in the first
++ * place.
++ */
++void psb_intel_sdvo_set_control_bus_switch(
++ struct psb_intel_output *psb_intel_output,
++ u8 target)
++{
++ psb_intel_sdvo_write_cmd(psb_intel_output,
++ SDVO_CMD_SET_CONTROL_BUS_SWITCH,
++ &target,
++ 1);
++}
++
++static bool psb_intel_sdvo_set_target_input(
++ struct psb_intel_output *psb_intel_output,
++ bool target_0, bool target_1)
++{
++ struct psb_intel_sdvo_set_target_input_args targets = { 0 };
++ u8 status;
++
++ if (target_0 && target_1)
++ return SDVO_CMD_STATUS_NOTSUPP;
++
++ if (target_1)
++ targets.target_1 = 1;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_TARGET_INPUT,
++ &targets, sizeof(targets));
++
++ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
++
++ return status == SDVO_CMD_STATUS_SUCCESS;
++}
++
++/**
++ * Return whether each input is trained.
++ *
++ * This function is making an assumption about the layout of the response,
++ * which should be checked against the docs.
++ */
++static bool psb_intel_sdvo_get_trained_inputs(struct psb_intel_output
++ *psb_intel_output, bool *input_1,
++ bool *input_2)
++{
++ struct psb_intel_sdvo_get_trained_inputs_response response;
++ u8 status;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_TRAINED_INPUTS,
++ NULL, 0);
++ status =
++ psb_intel_sdvo_read_response(psb_intel_output, &response,
++ sizeof(response));
++ if (status != SDVO_CMD_STATUS_SUCCESS)
++ return false;
++
++ *input_1 = response.input0_trained;
++ *input_2 = response.input1_trained;
++ return true;
++}
++
++static bool psb_intel_sdvo_get_active_outputs(struct psb_intel_output
++ *psb_intel_output, u16 *outputs)
++{
++ u8 status;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_ACTIVE_OUTPUTS,
++ NULL, 0);
++ status =
++ psb_intel_sdvo_read_response(psb_intel_output, outputs,
++ sizeof(*outputs));
++
++ return status == SDVO_CMD_STATUS_SUCCESS;
++}
++
++static bool psb_intel_sdvo_set_active_outputs(struct psb_intel_output
++ *psb_intel_output, u16 outputs)
++{
++ u8 status;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_ACTIVE_OUTPUTS,
++ &outputs, sizeof(outputs));
++ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
++ return status == SDVO_CMD_STATUS_SUCCESS;
++}
++
++static bool psb_intel_sdvo_set_encoder_power_state(struct psb_intel_output
++ *psb_intel_output, int mode)
++{
++ u8 status, state = SDVO_ENCODER_STATE_ON;
++
++ switch (mode) {
++ case DRM_MODE_DPMS_ON:
++ state = SDVO_ENCODER_STATE_ON;
++ break;
++ case DRM_MODE_DPMS_STANDBY:
++ state = SDVO_ENCODER_STATE_STANDBY;
++ break;
++ case DRM_MODE_DPMS_SUSPEND:
++ state = SDVO_ENCODER_STATE_SUSPEND;
++ break;
++ case DRM_MODE_DPMS_OFF:
++ state = SDVO_ENCODER_STATE_OFF;
++ break;
++ }
++
++ psb_intel_sdvo_write_cmd(psb_intel_output,
++ SDVO_CMD_SET_ENCODER_POWER_STATE, &state,
++ sizeof(state));
++ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
++
++ return status == SDVO_CMD_STATUS_SUCCESS;
++}
++
++static bool psb_intel_sdvo_get_input_pixel_clock_range(struct psb_intel_output
++ *psb_intel_output,
++ int *clock_min,
++ int *clock_max)
++{
++ struct psb_intel_sdvo_pixel_clock_range clocks;
++ u8 status;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output,
++ SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE, NULL,
++ 0);
++
++ status =
++ psb_intel_sdvo_read_response(psb_intel_output, &clocks,
++ sizeof(clocks));
++
++ if (status != SDVO_CMD_STATUS_SUCCESS)
++ return false;
++
++ /* Convert the values from units of 10 kHz to kHz. */
++ *clock_min = clocks.min * 10;
++ *clock_max = clocks.max * 10;
++
++ return true;
++}
++
++static bool psb_intel_sdvo_set_target_output(
++ struct psb_intel_output *psb_intel_output,
++ u16 outputs)
++{
++ u8 status;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_TARGET_OUTPUT,
++ &outputs, sizeof(outputs));
++
++ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
++ return status == SDVO_CMD_STATUS_SUCCESS;
++}
++
++static bool psb_intel_sdvo_get_timing(struct psb_intel_output *psb_intel_output,
++ u8 cmd, struct psb_intel_sdvo_dtd *dtd)
++{
++ u8 status;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output, cmd, NULL, 0);
++ status = psb_intel_sdvo_read_response(psb_intel_output, &dtd->part1,
++ sizeof(dtd->part1));
++ if (status != SDVO_CMD_STATUS_SUCCESS)
++ return false;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output, cmd + 1, NULL, 0);
++ status = psb_intel_sdvo_read_response(psb_intel_output, &dtd->part2,
++ sizeof(dtd->part2));
++ if (status != SDVO_CMD_STATUS_SUCCESS)
++ return false;
++
++ return true;
++}
++
++static bool psb_intel_sdvo_get_input_timing(
++ struct psb_intel_output *psb_intel_output,
++ struct psb_intel_sdvo_dtd *dtd)
++{
++ return psb_intel_sdvo_get_timing(psb_intel_output,
++ SDVO_CMD_GET_INPUT_TIMINGS_PART1,
++ dtd);
++}
++#if 0
++static bool psb_intel_sdvo_get_output_timing(
++ struct psb_intel_output *psb_intel_output,
++ struct psb_intel_sdvo_dtd *dtd)
++{
++ return psb_intel_sdvo_get_timing(psb_intel_output,
++ SDVO_CMD_GET_OUTPUT_TIMINGS_PART1,
++ dtd);
++}
++#endif
++static bool psb_intel_sdvo_set_timing(
++ struct psb_intel_output *psb_intel_output,
++ u8 cmd,
++ struct psb_intel_sdvo_dtd *dtd)
++{
++ u8 status;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output, cmd, &dtd->part1,
++ sizeof(dtd->part1));
++ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
++ if (status != SDVO_CMD_STATUS_SUCCESS)
++ return false;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output, cmd + 1, &dtd->part2,
++ sizeof(dtd->part2));
++ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
++ if (status != SDVO_CMD_STATUS_SUCCESS)
++ return false;
++
++ return true;
++}
++
++static bool psb_intel_sdvo_set_input_timing(
++ struct psb_intel_output *psb_intel_output,
++ struct psb_intel_sdvo_dtd *dtd)
++{
++ return psb_intel_sdvo_set_timing(psb_intel_output,
++ SDVO_CMD_SET_INPUT_TIMINGS_PART1,
++ dtd);
++}
++
++static bool psb_intel_sdvo_set_output_timing(
++ struct psb_intel_output *psb_intel_output,
++ struct psb_intel_sdvo_dtd *dtd)
++{
++ return psb_intel_sdvo_set_timing(psb_intel_output,
++ SDVO_CMD_SET_OUTPUT_TIMINGS_PART1,
++ dtd);
++}
++
++#if 0
++static bool psb_intel_sdvo_get_preferred_input_timing(struct psb_intel_output
++ *psb_intel_output,
++ struct psb_intel_sdvo_dtd
++ *dtd)
++{
++ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
++ u8 status;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output,
++ SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
++ NULL, 0);
++
++ status = psb_intel_sdvo_read_response(psb_intel_output, &dtd->part1,
++ sizeof(dtd->part1));
++ if (status != SDVO_CMD_STATUS_SUCCESS)
++ return false;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output,
++ SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
++ NULL, 0);
++ status =
++ psb_intel_sdvo_read_response(psb_intel_output, &dtd->part2,
++ sizeof(dtd->part2));
++ if (status != SDVO_CMD_STATUS_SUCCESS)
++ return false;
++
++ return true;
++}
++#endif
++
++static int psb_intel_sdvo_get_clock_rate_mult(struct psb_intel_output
++ *psb_intel_output)
++{
++ u8 response, status;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output,
++ SDVO_CMD_GET_CLOCK_RATE_MULT,
++ NULL,
++ 0);
++
++ status = psb_intel_sdvo_read_response(psb_intel_output, &response, 1);
++
++ if (status != SDVO_CMD_STATUS_SUCCESS) {
++ DRM_DEBUG("Couldn't get SDVO clock rate multiplier\n");
++ return SDVO_CLOCK_RATE_MULT_1X;
++ } else {
++ DRM_DEBUG("Current clock rate multiplier: %d\n", response);
++ }
++
++ return response;
++}
++
++static bool psb_intel_sdvo_set_clock_rate_mult(struct psb_intel_output
++ *psb_intel_output, u8 val)
++{
++ u8 status;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output,
++ SDVO_CMD_SET_CLOCK_RATE_MULT,
++ &val,
++ 1);
++
++ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
++ if (status != SDVO_CMD_STATUS_SUCCESS)
++ return false;
++
++ return true;
++}
++
++static bool psb_sdvo_set_current_inoutmap(struct psb_intel_output *output,
++ u32 in0outputmask,
++ u32 in1outputmask)
++{
++ u8 byArgs[4];
++ u8 status;
++ int i;
++ struct psb_intel_sdvo_priv *sdvo_priv = output->dev_priv;
++
++ /* Make all fields of the args/ret to zero */
++ memset(byArgs, 0, sizeof(byArgs));
++
++ /* Fill up the arguement values; */
++ byArgs[0] = (u8) (in0outputmask & 0xFF);
++ byArgs[1] = (u8) ((in0outputmask >> 8) & 0xFF);
++ byArgs[2] = (u8) (in1outputmask & 0xFF);
++ byArgs[3] = (u8) ((in1outputmask >> 8) & 0xFF);
++
++
++ /*save inoutmap arg here*/
++ for (i = 0; i < 4; i++)
++ sdvo_priv->in_out_map[i] = byArgs[0];
++
++ psb_intel_sdvo_write_cmd(output, SDVO_CMD_SET_IN_OUT_MAP, byArgs, 4);
++ status = psb_intel_sdvo_read_response(output, NULL, 0);
++
++ if (status != SDVO_CMD_STATUS_SUCCESS)
++ return false;
++ return true;
++}
++
++
++static void psb_intel_sdvo_set_iomap(struct psb_intel_output *output)
++{
++ u32 dwCurrentSDVOIn0 = 0;
++ u32 dwCurrentSDVOIn1 = 0;
++ u32 dwDevMask = 0;
++
++
++ struct psb_intel_sdvo_priv *sdvo_priv = output->dev_priv;
++
++ /* Please DO NOT change the following code. */
++ /* SDVOB_IN0 or SDVOB_IN1 ==> sdvo_in0 */
++ /* SDVOC_IN0 or SDVOC_IN1 ==> sdvo_in1 */
++ if (sdvo_priv->by_input_wiring & (SDVOB_IN0 | SDVOC_IN0)) {
++ switch (sdvo_priv->active_device) {
++ case SDVO_DEVICE_LVDS:
++ dwDevMask = SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1;
++ break;
++ case SDVO_DEVICE_TMDS:
++ dwDevMask = SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1;
++ break;
++ case SDVO_DEVICE_TV:
++ dwDevMask =
++ SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_SVID0 |
++ SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_YPRPB1 |
++ SDVO_OUTPUT_SVID1 | SDVO_OUTPUT_CVBS1 |
++ SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1;
++ break;
++ case SDVO_DEVICE_CRT:
++ dwDevMask = SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1;
++ break;
++ }
++ dwCurrentSDVOIn0 = (sdvo_priv->active_outputs & dwDevMask);
++ } else if (sdvo_priv->by_input_wiring & (SDVOB_IN1 | SDVOC_IN1)) {
++ switch (sdvo_priv->active_device) {
++ case SDVO_DEVICE_LVDS:
++ dwDevMask = SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1;
++ break;
++ case SDVO_DEVICE_TMDS:
++ dwDevMask = SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1;
++ break;
++ case SDVO_DEVICE_TV:
++ dwDevMask =
++ SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_SVID0 |
++ SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_YPRPB1 |
++ SDVO_OUTPUT_SVID1 | SDVO_OUTPUT_CVBS1 |
++ SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1;
++ break;
++ case SDVO_DEVICE_CRT:
++ dwDevMask = SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1;
++ break;
++ }
++ dwCurrentSDVOIn1 = (sdvo_priv->active_outputs & dwDevMask);
++ }
++
++ psb_sdvo_set_current_inoutmap(output, dwCurrentSDVOIn0,
++ dwCurrentSDVOIn1);
++}
++
++
++static bool psb_intel_sdvo_mode_fixup(struct drm_encoder *encoder,
++ struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode)
++{
++ /* Make the CRTC code factor in the SDVO pixel multiplier. The SDVO
++ * device will be told of the multiplier during mode_set.
++ */
++ adjusted_mode->clock *= psb_intel_sdvo_get_pixel_multiplier(mode);
++ return true;
++}
++
++static void psb_intel_sdvo_mode_set(struct drm_encoder *encoder,
++ struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode)
++{
++ struct drm_device *dev = encoder->dev;
++ struct drm_crtc *crtc = encoder->crtc;
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ struct psb_intel_output *psb_intel_output =
++ enc_to_psb_intel_output(encoder);
++ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
++ u16 width, height;
++ u16 h_blank_len, h_sync_len, v_blank_len, v_sync_len;
++ u16 h_sync_offset, v_sync_offset;
++ u32 sdvox;
++ struct psb_intel_sdvo_dtd output_dtd;
++ int sdvo_pixel_multiply;
++
++ if (!mode)
++ return;
++
++ psb_intel_sdvo_set_target_output(psb_intel_output, 0);
++
++ width = mode->crtc_hdisplay;
++ height = mode->crtc_vdisplay;
++
++ /* do some mode translations */
++ h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start;
++ h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
++
++ v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start;
++ v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
++
++ h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
++ v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
++
++ output_dtd.part1.clock = mode->clock / 10;
++ output_dtd.part1.h_active = width & 0xff;
++ output_dtd.part1.h_blank = h_blank_len & 0xff;
++ output_dtd.part1.h_high = (((width >> 8) & 0xf) << 4) |
++ ((h_blank_len >> 8) & 0xf);
++ output_dtd.part1.v_active = height & 0xff;
++ output_dtd.part1.v_blank = v_blank_len & 0xff;
++ output_dtd.part1.v_high = (((height >> 8) & 0xf) << 4) |
++ ((v_blank_len >> 8) & 0xf);
++
++ output_dtd.part2.h_sync_off = h_sync_offset;
++ output_dtd.part2.h_sync_width = h_sync_len & 0xff;
++ output_dtd.part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 |
++ (v_sync_len & 0xf);
++ output_dtd.part2.sync_off_width_high =
++ ((h_sync_offset & 0x300) >> 2) | ((h_sync_len & 0x300) >> 4) |
++ ((v_sync_offset & 0x30) >> 2) | ((v_sync_len & 0x30) >> 4);
++
++ output_dtd.part2.dtd_flags = 0x18;
++ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
++ output_dtd.part2.dtd_flags |= 0x2;
++ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
++ output_dtd.part2.dtd_flags |= 0x4;
++
++ output_dtd.part2.sdvo_flags = 0;
++ output_dtd.part2.v_sync_off_high = v_sync_offset & 0xc0;
++ output_dtd.part2.reserved = 0;
++
++ /* Set the output timing to the screen */
++ psb_intel_sdvo_set_target_output(psb_intel_output,
++ sdvo_priv->active_outputs);
++
++ /* Set the input timing to the screen. Assume always input 0. */
++ psb_intel_sdvo_set_target_input(psb_intel_output, true, false);
++
++ psb_intel_sdvo_set_output_timing(psb_intel_output, &output_dtd);
++
++ /* We would like to use i830_sdvo_create_preferred_input_timing() to
++ * provide the device with a timing it can support, if it supports that
++ * feature. However, presumably we would need to adjust the CRTC to
++ * output the preferred timing, and we don't support that currently.
++ */
++#if 0
++ success =
++ psb_intel_sdvo_create_preferred_input_timing(psb_intel_output,
++ clock,
++ width,
++ height);
++ if (success) {
++ struct psb_intel_sdvo_dtd *input_dtd;
++
++ psb_intel_sdvo_get_preferred_input_timing(psb_intel_output,
++ &input_dtd);
++ psb_intel_sdvo_set_input_timing(psb_intel_output, &input_dtd);
++ }
++#else
++ psb_intel_sdvo_set_input_timing(psb_intel_output, &output_dtd);
++#endif
++
++ switch (psb_intel_sdvo_get_pixel_multiplier(mode)) {
++ case 1:
++ psb_intel_sdvo_set_clock_rate_mult(psb_intel_output,
++ SDVO_CLOCK_RATE_MULT_1X);
++ break;
++ case 2:
++ psb_intel_sdvo_set_clock_rate_mult(psb_intel_output,
++ SDVO_CLOCK_RATE_MULT_2X);
++ break;
++ case 4:
++ psb_intel_sdvo_set_clock_rate_mult(psb_intel_output,
++ SDVO_CLOCK_RATE_MULT_4X);
++ break;
++ }
++
++ /* Set the SDVO control regs. */
++ if (0 /*IS_I965GM(dev) */) {
++ sdvox = SDVO_BORDER_ENABLE;
++ } else {
++ sdvox = REG_READ(sdvo_priv->output_device);
++ switch (sdvo_priv->output_device) {
++ case SDVOB:
++ sdvox &= SDVOB_PRESERVE_MASK;
++ break;
++ case SDVOC:
++ sdvox &= SDVOC_PRESERVE_MASK;
++ break;
++ }
++ sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
++ }
++ if (psb_intel_crtc->pipe == 1)
++ sdvox |= SDVO_PIPE_B_SELECT;
++
++ sdvo_pixel_multiply = psb_intel_sdvo_get_pixel_multiplier(mode);
++
++#if 0
++ if (IS_I965G(dev)) {
++ /* done in crtc_mode_set as the dpll_md reg must be written
++ * early */
++ } else if (IS_I945G(dev) || IS_I945GM(dev)) {
++ /* done in crtc_mode_set as it lives inside the
++ * dpll register */
++ } else {
++ sdvox |=
++ (sdvo_pixel_multiply - 1) << SDVO_PORT_MULTIPLY_SHIFT;
++ }
++#endif
++
++ psb_intel_sdvo_write_sdvox(psb_intel_output, sdvox);
++
++ psb_intel_sdvo_set_iomap(psb_intel_output);
++}
++
++static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
++{
++ struct drm_device *dev = encoder->dev;
++ struct psb_intel_output *psb_intel_output =
++ enc_to_psb_intel_output(encoder);
++ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
++ u32 temp;
++
++ if (mode != DRM_MODE_DPMS_ON) {
++ psb_intel_sdvo_set_active_outputs(psb_intel_output, 0);
++ if (0)
++ psb_intel_sdvo_set_encoder_power_state(
++ psb_intel_output,
++ mode);
++
++ if (mode == DRM_MODE_DPMS_OFF) {
++ temp = REG_READ(sdvo_priv->output_device);
++ if ((temp & SDVO_ENABLE) != 0) {
++ psb_intel_sdvo_write_sdvox(psb_intel_output,
++ temp &
++ ~SDVO_ENABLE);
++ }
++ }
++ } else {
++ bool input1, input2;
++ int i;
++ u8 status;
++
++ temp = REG_READ(sdvo_priv->output_device);
++ if ((temp & SDVO_ENABLE) == 0)
++ psb_intel_sdvo_write_sdvox(psb_intel_output,
++ temp | SDVO_ENABLE);
++ for (i = 0; i < 2; i++)
++ psb_intel_wait_for_vblank(dev);
++
++ status =
++ psb_intel_sdvo_get_trained_inputs(psb_intel_output,
++ &input1,
++ &input2);
++
++
++ /* Warn if the device reported failure to sync.
++ * A lot of SDVO devices fail to notify of sync, but it's
++ * a given it the status is a success, we succeeded.
++ */
++ if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
++ DRM_DEBUG
++ ("First %s output reported failure to sync\n",
++ SDVO_NAME(sdvo_priv));
++ }
++
++ if (0)
++ psb_intel_sdvo_set_encoder_power_state(
++ psb_intel_output,
++ mode);
++ psb_intel_sdvo_set_active_outputs(psb_intel_output,
++ sdvo_priv->active_outputs);
++ }
++ return;
++}
++
++static void psb_intel_sdvo_save(struct drm_connector *connector)
++{
++ struct drm_device *dev = connector->dev;
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
++ /*int o;*/
++
++ sdvo_priv->save_sdvo_mult =
++ psb_intel_sdvo_get_clock_rate_mult(psb_intel_output);
++ psb_intel_sdvo_get_active_outputs(psb_intel_output,
++ &sdvo_priv->save_active_outputs);
++
++ if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
++ psb_intel_sdvo_set_target_input(psb_intel_output,
++ true,
++ false);
++ psb_intel_sdvo_get_input_timing(psb_intel_output,
++ &sdvo_priv->save_input_dtd_1);
++ }
++
++ if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
++ psb_intel_sdvo_set_target_input(psb_intel_output,
++ false,
++ true);
++ psb_intel_sdvo_get_input_timing(psb_intel_output,
++ &sdvo_priv->save_input_dtd_2);
++ }
++
++#if 0
++ for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++) {
++ u16 this_output = (1 << o);
++ if (sdvo_priv->caps.output_flags & this_output) {
++ psb_intel_sdvo_set_target_output(psb_intel_output,
++ this_output);
++ psb_intel_sdvo_get_output_timing(psb_intel_output,
++ &sdvo_priv->
++ save_output_dtd[o]);
++ }
++ }
++#endif
++
++ sdvo_priv->save_SDVOX = REG_READ(sdvo_priv->output_device);
++
++ /*TODO: save the in_out_map state*/
++}
++
++static void psb_intel_sdvo_restore(struct drm_connector *connector)
++{
++ struct drm_device *dev = connector->dev;
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
++ /*int o;*/
++ int i;
++ bool input1, input2;
++ u8 status;
++
++ psb_intel_sdvo_set_active_outputs(psb_intel_output, 0);
++
++#if 0
++ for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++) {
++ u16 this_output = (1 << o);
++ if (sdvo_priv->caps.output_flags & this_output) {
++ psb_intel_sdvo_set_target_output(psb_intel_output,
++ this_output);
++ psb_intel_sdvo_set_output_timing(psb_intel_output,
++ &sdvo_priv->
++ save_output_dtd[o]);
++ }
++ }
++#endif
++
++ if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
++ psb_intel_sdvo_set_target_input(psb_intel_output, true, false);
++ psb_intel_sdvo_set_input_timing(psb_intel_output,
++ &sdvo_priv->save_input_dtd_1);
++ }
++
++ if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
++ psb_intel_sdvo_set_target_input(psb_intel_output, false, true);
++ psb_intel_sdvo_set_input_timing(psb_intel_output,
++ &sdvo_priv->save_input_dtd_2);
++ }
++
++ psb_intel_sdvo_set_clock_rate_mult(psb_intel_output,
++ sdvo_priv->save_sdvo_mult);
++
++ REG_WRITE(sdvo_priv->output_device, sdvo_priv->save_SDVOX);
++
++ if (sdvo_priv->save_SDVOX & SDVO_ENABLE) {
++ for (i = 0; i < 2; i++)
++ psb_intel_wait_for_vblank(dev);
++ status =
++ psb_intel_sdvo_get_trained_inputs(psb_intel_output,
++ &input1,
++ &input2);
++ if (status == SDVO_CMD_STATUS_SUCCESS && !input1)
++ DRM_DEBUG
++ ("First %s output reported failure to sync\n",
++ SDVO_NAME(sdvo_priv));
++ }
++
++ psb_intel_sdvo_set_active_outputs(psb_intel_output,
++ sdvo_priv->save_active_outputs);
++
++ /*TODO: restore in_out_map*/
++ psb_intel_sdvo_write_cmd(psb_intel_output,
++ SDVO_CMD_SET_IN_OUT_MAP,
++ sdvo_priv->in_out_map,
++ 4);
++
++ psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
++}
++
++static int psb_intel_sdvo_mode_valid(struct drm_connector *connector,
++ struct drm_display_mode *mode)
++{
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
++
++ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
++ return MODE_NO_DBLESCAN;
++
++ if (sdvo_priv->pixel_clock_min > mode->clock)
++ return MODE_CLOCK_LOW;
++
++ if (sdvo_priv->pixel_clock_max < mode->clock)
++ return MODE_CLOCK_HIGH;
++
++ return MODE_OK;
++}
++
++static bool psb_intel_sdvo_get_capabilities(
++ struct psb_intel_output *psb_intel_output,
++ struct psb_intel_sdvo_caps *caps)
++{
++ u8 status;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output,
++ SDVO_CMD_GET_DEVICE_CAPS,
++ NULL,
++ 0);
++ status = psb_intel_sdvo_read_response(psb_intel_output,
++ caps,
++ sizeof(*caps));
++ if (status != SDVO_CMD_STATUS_SUCCESS)
++ return false;
++
++ return true;
++}
++
++struct drm_connector *psb_intel_sdvo_find(struct drm_device *dev, int sdvoB)
++{
++ struct drm_connector *connector = NULL;
++ struct psb_intel_output *iout = NULL;
++ struct psb_intel_sdvo_priv *sdvo;
++
++ /* find the sdvo connector */
++ list_for_each_entry(connector, &dev->mode_config.connector_list,
++ head) {
++ iout = to_psb_intel_output(connector);
++
++ if (iout->type != INTEL_OUTPUT_SDVO)
++ continue;
++
++ sdvo = iout->dev_priv;
++
++ if (sdvo->output_device == SDVOB && sdvoB)
++ return connector;
++
++ if (sdvo->output_device == SDVOC && !sdvoB)
++ return connector;
++
++ }
++
++ return NULL;
++}
++
++int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector)
++{
++ u8 response[2];
++ u8 status;
++ struct psb_intel_output *psb_intel_output;
++ DRM_DEBUG("\n");
++
++ if (!connector)
++ return 0;
++
++ psb_intel_output = to_psb_intel_output(connector);
++
++ psb_intel_sdvo_write_cmd(psb_intel_output,
++ SDVO_CMD_GET_HOT_PLUG_SUPPORT,
++ NULL,
++ 0);
++ status = psb_intel_sdvo_read_response(psb_intel_output,
++ &response,
++ 2);
++
++ if (response[0] != 0)
++ return 1;
++
++ return 0;
++}
++
++void psb_intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
++{
++ u8 response[2];
++ u8 status;
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++
++ psb_intel_sdvo_write_cmd(psb_intel_output,
++ SDVO_CMD_GET_ACTIVE_HOT_PLUG,
++ NULL,
++ 0);
++ psb_intel_sdvo_read_response(psb_intel_output, &response, 2);
++
++ if (on) {
++ psb_intel_sdvo_write_cmd(psb_intel_output,
++ SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL,
++ 0);
++ status = psb_intel_sdvo_read_response(psb_intel_output,
++ &response,
++ 2);
++
++ psb_intel_sdvo_write_cmd(psb_intel_output,
++ SDVO_CMD_SET_ACTIVE_HOT_PLUG,
++ &response, 2);
++ } else {
++ response[0] = 0;
++ response[1] = 0;
++ psb_intel_sdvo_write_cmd(psb_intel_output,
++ SDVO_CMD_SET_ACTIVE_HOT_PLUG,
++ &response, 2);
++ }
++
++ psb_intel_sdvo_write_cmd(psb_intel_output,
++ SDVO_CMD_GET_ACTIVE_HOT_PLUG,
++ NULL,
++ 0);
++ psb_intel_sdvo_read_response(psb_intel_output, &response, 2);
++}
++
++static enum drm_connector_status psb_intel_sdvo_detect(struct drm_connector
++ *connector)
++{
++ u8 response[2];
++ u8 status;
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++
++ psb_intel_sdvo_write_cmd(psb_intel_output,
++ SDVO_CMD_GET_ATTACHED_DISPLAYS,
++ NULL,
++ 0);
++ status = psb_intel_sdvo_read_response(psb_intel_output, &response, 2);
++
++ DRM_DEBUG("SDVO response %d %d\n", response[0], response[1]);
++ if ((response[0] != 0) || (response[1] != 0))
++ return connector_status_connected;
++ else
++ return connector_status_disconnected;
++}
++
++static int psb_intel_sdvo_get_modes(struct drm_connector *connector)
++{
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++
++ /* set the bus switch and get the modes */
++ psb_intel_sdvo_set_control_bus_switch(psb_intel_output,
++ SDVO_CONTROL_BUS_DDC2);
++ psb_intel_ddc_get_modes(psb_intel_output);
++
++ if (list_empty(&connector->probed_modes))
++ return 0;
++ return 1;
++#if 0
++ /* Mac mini hack. On this device, I get DDC through the analog, which
++ * load-detects as disconnected. I fail to DDC through the SDVO DDC,
++ * but it does load-detect as connected. So, just steal the DDC bits
++ * from analog when we fail at finding it the right way.
++ */
++ /* TODO */
++ return NULL;
++
++ return NULL;
++#endif
++}
++
++static void psb_intel_sdvo_destroy(struct drm_connector *connector)
++{
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++
++ if (psb_intel_output->i2c_bus)
++ psb_intel_i2c_destroy(psb_intel_output->i2c_bus);
++ drm_sysfs_connector_remove(connector);
++ drm_connector_cleanup(connector);
++ kfree(psb_intel_output);
++}
++
++static const struct drm_encoder_helper_funcs psb_intel_sdvo_helper_funcs = {
++ .dpms = psb_intel_sdvo_dpms,
++ .mode_fixup = psb_intel_sdvo_mode_fixup,
++ .prepare = psb_intel_encoder_prepare,
++ .mode_set = psb_intel_sdvo_mode_set,
++ .commit = psb_intel_encoder_commit,
++};
++
++static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = {
++ .dpms = drm_helper_connector_dpms,
++ .save = psb_intel_sdvo_save,
++ .restore = psb_intel_sdvo_restore,
++ .detect = psb_intel_sdvo_detect,
++ .fill_modes = drm_helper_probe_single_connector_modes,
++ .destroy = psb_intel_sdvo_destroy,
++};
++
++static const struct drm_connector_helper_funcs
++ psb_intel_sdvo_connector_helper_funcs = {
++ .get_modes = psb_intel_sdvo_get_modes,
++ .mode_valid = psb_intel_sdvo_mode_valid,
++ .best_encoder = psb_intel_best_encoder,
++};
++
++void psb_intel_sdvo_enc_destroy(struct drm_encoder *encoder)
++{
++ drm_encoder_cleanup(encoder);
++}
++
++static const struct drm_encoder_funcs psb_intel_sdvo_enc_funcs = {
++ .destroy = psb_intel_sdvo_enc_destroy,
++};
++
++
++void psb_intel_sdvo_init(struct drm_device *dev, int output_device)
++{
++ struct drm_connector *connector;
++ struct psb_intel_output *psb_intel_output;
++ struct psb_intel_sdvo_priv *sdvo_priv;
++ struct psb_intel_i2c_chan *i2cbus = NULL;
++ int connector_type;
++ u8 ch[0x40];
++ int i;
++ int encoder_type, output_id;
++
++ psb_intel_output =
++ kcalloc(sizeof(struct psb_intel_output) +
++ sizeof(struct psb_intel_sdvo_priv), 1, GFP_KERNEL);
++ if (!psb_intel_output)
++ return;
++
++ connector = &psb_intel_output->base;
++
++ drm_connector_init(dev, connector, &psb_intel_sdvo_connector_funcs,
++ DRM_MODE_CONNECTOR_Unknown);
++ drm_connector_helper_add(connector,
++ &psb_intel_sdvo_connector_helper_funcs);
++ sdvo_priv = (struct psb_intel_sdvo_priv *) (psb_intel_output + 1);
++ psb_intel_output->type = INTEL_OUTPUT_SDVO;
++
++ connector->interlace_allowed = 0;
++ connector->doublescan_allowed = 0;
++
++ /* setup the DDC bus. */
++ if (output_device == SDVOB)
++ i2cbus =
++ psb_intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB");
++ else
++ i2cbus =
++ psb_intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC");
++
++ if (!i2cbus)
++ goto err_connector;
++
++ sdvo_priv->i2c_bus = i2cbus;
++
++ if (output_device == SDVOB) {
++ output_id = 1;
++ sdvo_priv->by_input_wiring = SDVOB_IN0;
++ sdvo_priv->i2c_bus->slave_addr = 0x38;
++ } else {
++ output_id = 2;
++ sdvo_priv->i2c_bus->slave_addr = 0x39;
++ }
++
++ sdvo_priv->output_device = output_device;
++ psb_intel_output->i2c_bus = i2cbus;
++ psb_intel_output->dev_priv = sdvo_priv;
++
++
++ /* Read the regs to test if we can talk to the device */
++ for (i = 0; i < 0x40; i++) {
++ if (!psb_intel_sdvo_read_byte(psb_intel_output, i, &ch[i])) {
++ DRM_DEBUG("No SDVO device found on SDVO%c\n",
++ output_device == SDVOB ? 'B' : 'C');
++ goto err_i2c;
++ }
++ }
++
++ psb_intel_sdvo_get_capabilities(psb_intel_output, &sdvo_priv->caps);
++
++ memset(&sdvo_priv->active_outputs, 0,
++ sizeof(sdvo_priv->active_outputs));
++
++ /* TODO, CVBS, SVID, YPRPB & SCART outputs. */
++ if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB0) {
++ sdvo_priv->active_outputs = SDVO_OUTPUT_RGB0;
++ sdvo_priv->active_device = SDVO_DEVICE_CRT;
++ connector->display_info.subpixel_order =
++ SubPixelHorizontalRGB;
++ encoder_type = DRM_MODE_ENCODER_DAC;
++ connector_type = DRM_MODE_CONNECTOR_VGA;
++ } else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB1) {
++ sdvo_priv->active_outputs = SDVO_OUTPUT_RGB1;
++ sdvo_priv->active_outputs = SDVO_DEVICE_CRT;
++ connector->display_info.subpixel_order =
++ SubPixelHorizontalRGB;
++ encoder_type = DRM_MODE_ENCODER_DAC;
++ connector_type = DRM_MODE_CONNECTOR_VGA;
++ } else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0) {
++ sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS0;
++ sdvo_priv->active_device = SDVO_DEVICE_TMDS;
++ connector->display_info.subpixel_order =
++ SubPixelHorizontalRGB;
++ encoder_type = DRM_MODE_ENCODER_TMDS;
++ connector_type = DRM_MODE_CONNECTOR_DVID;
++ } else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS1) {
++ sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS1;
++ sdvo_priv->active_device = SDVO_DEVICE_TMDS;
++ connector->display_info.subpixel_order =
++ SubPixelHorizontalRGB;
++ encoder_type = DRM_MODE_ENCODER_TMDS;
++ connector_type = DRM_MODE_CONNECTOR_DVID;
++ } else {
++ unsigned char bytes[2];
++
++ memcpy(bytes, &sdvo_priv->caps.output_flags, 2);
++ DRM_DEBUG
++ ("%s: No active RGB or TMDS outputs (0x%02x%02x)\n",
++ SDVO_NAME(sdvo_priv), bytes[0], bytes[1]);
++ goto err_i2c;
++ }
++
++ drm_encoder_init(dev, &psb_intel_output->enc, &psb_intel_sdvo_enc_funcs,
++ encoder_type);
++ drm_encoder_helper_add(&psb_intel_output->enc,
++ &psb_intel_sdvo_helper_funcs);
++ connector->connector_type = connector_type;
++
++ drm_mode_connector_attach_encoder(&psb_intel_output->base,
++ &psb_intel_output->enc);
++ drm_sysfs_connector_add(connector);
++
++ /* Set the input timing to the screen. Assume always input 0. */
++ psb_intel_sdvo_set_target_input(psb_intel_output, true, false);
++
++ psb_intel_sdvo_get_input_pixel_clock_range(psb_intel_output,
++ &sdvo_priv->pixel_clock_min,
++ &sdvo_priv->
++ pixel_clock_max);
++
++
++ DRM_DEBUG("%s device VID/DID: %02X:%02X.%02X, "
++ "clock range %dMHz - %dMHz, "
++ "input 1: %c, input 2: %c, "
++ "output 1: %c, output 2: %c\n",
++ SDVO_NAME(sdvo_priv),
++ sdvo_priv->caps.vendor_id, sdvo_priv->caps.device_id,
++ sdvo_priv->caps.device_rev_id,
++ sdvo_priv->pixel_clock_min / 1000,
++ sdvo_priv->pixel_clock_max / 1000,
++ (sdvo_priv->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N',
++ (sdvo_priv->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N',
++ /* check currently supported outputs */
++ sdvo_priv->caps.output_flags &
++ (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N',
++ sdvo_priv->caps.output_flags &
++ (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
++
++ psb_intel_output->ddc_bus = i2cbus;
++
++ return;
++
++err_i2c:
++ psb_intel_i2c_destroy(psb_intel_output->i2c_bus);
++err_connector:
++ drm_connector_cleanup(connector);
++ kfree(psb_intel_output);
++
++ return;
++}
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_intel_sdvo_regs.h
+@@ -0,0 +1,338 @@
++/*
++ * SDVO command definitions and structures.
++ *
++ * Copyright (c) 2008, Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * Eric Anholt <eric@anholt.net>
++ */
++
++#define SDVO_OUTPUT_FIRST (0)
++#define SDVO_OUTPUT_TMDS0 (1 << 0)
++#define SDVO_OUTPUT_RGB0 (1 << 1)
++#define SDVO_OUTPUT_CVBS0 (1 << 2)
++#define SDVO_OUTPUT_SVID0 (1 << 3)
++#define SDVO_OUTPUT_YPRPB0 (1 << 4)
++#define SDVO_OUTPUT_SCART0 (1 << 5)
++#define SDVO_OUTPUT_LVDS0 (1 << 6)
++#define SDVO_OUTPUT_TMDS1 (1 << 8)
++#define SDVO_OUTPUT_RGB1 (1 << 9)
++#define SDVO_OUTPUT_CVBS1 (1 << 10)
++#define SDVO_OUTPUT_SVID1 (1 << 11)
++#define SDVO_OUTPUT_YPRPB1 (1 << 12)
++#define SDVO_OUTPUT_SCART1 (1 << 13)
++#define SDVO_OUTPUT_LVDS1 (1 << 14)
++#define SDVO_OUTPUT_LAST (14)
++
++struct psb_intel_sdvo_caps {
++ u8 vendor_id;
++ u8 device_id;
++ u8 device_rev_id;
++ u8 sdvo_version_major;
++ u8 sdvo_version_minor;
++ unsigned int sdvo_inputs_mask:2;
++ unsigned int smooth_scaling:1;
++ unsigned int sharp_scaling:1;
++ unsigned int up_scaling:1;
++ unsigned int down_scaling:1;
++ unsigned int stall_support:1;
++ unsigned int pad:1;
++ u16 output_flags;
++} __attribute__ ((packed));
++
++/** This matches the EDID DTD structure, more or less */
++struct psb_intel_sdvo_dtd {
++ struct {
++ u16 clock; /**< pixel clock, in 10kHz units */
++ u8 h_active; /**< lower 8 bits (pixels) */
++ u8 h_blank; /**< lower 8 bits (pixels) */
++ u8 h_high; /**< upper 4 bits each h_active, h_blank */
++ u8 v_active; /**< lower 8 bits (lines) */
++ u8 v_blank; /**< lower 8 bits (lines) */
++ u8 v_high; /**< upper 4 bits each v_active, v_blank */
++ } part1;
++
++ struct {
++ u8 h_sync_off;
++ /**< lower 8 bits, from hblank start */
++ u8 h_sync_width;/**< lower 8 bits (pixels) */
++ /** lower 4 bits each vsync offset, vsync width */
++ u8 v_sync_off_width;
++ /**
++ * 2 high bits of hsync offset, 2 high bits of hsync width,
++ * bits 4-5 of vsync offset, and 2 high bits of vsync width.
++ */
++ u8 sync_off_width_high;
++ u8 dtd_flags;
++ u8 sdvo_flags;
++ /** bits 6-7 of vsync offset at bits 6-7 */
++ u8 v_sync_off_high;
++ u8 reserved;
++ } part2;
++} __attribute__ ((packed));
++
++struct psb_intel_sdvo_pixel_clock_range {
++ u16 min; /**< pixel clock, in 10kHz units */
++ u16 max; /**< pixel clock, in 10kHz units */
++} __attribute__ ((packed));
++
++struct psb_intel_sdvo_preferred_input_timing_args {
++ u16 clock;
++ u16 width;
++ u16 height;
++} __attribute__ ((packed));
++
++/* I2C registers for SDVO */
++#define SDVO_I2C_ARG_0 0x07
++#define SDVO_I2C_ARG_1 0x06
++#define SDVO_I2C_ARG_2 0x05
++#define SDVO_I2C_ARG_3 0x04
++#define SDVO_I2C_ARG_4 0x03
++#define SDVO_I2C_ARG_5 0x02
++#define SDVO_I2C_ARG_6 0x01
++#define SDVO_I2C_ARG_7 0x00
++#define SDVO_I2C_OPCODE 0x08
++#define SDVO_I2C_CMD_STATUS 0x09
++#define SDVO_I2C_RETURN_0 0x0a
++#define SDVO_I2C_RETURN_1 0x0b
++#define SDVO_I2C_RETURN_2 0x0c
++#define SDVO_I2C_RETURN_3 0x0d
++#define SDVO_I2C_RETURN_4 0x0e
++#define SDVO_I2C_RETURN_5 0x0f
++#define SDVO_I2C_RETURN_6 0x10
++#define SDVO_I2C_RETURN_7 0x11
++#define SDVO_I2C_VENDOR_BEGIN 0x20
++
++/* Status results */
++#define SDVO_CMD_STATUS_POWER_ON 0x0
++#define SDVO_CMD_STATUS_SUCCESS 0x1
++#define SDVO_CMD_STATUS_NOTSUPP 0x2
++#define SDVO_CMD_STATUS_INVALID_ARG 0x3
++#define SDVO_CMD_STATUS_PENDING 0x4
++#define SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED 0x5
++#define SDVO_CMD_STATUS_SCALING_NOT_SUPP 0x6
++
++/* SDVO commands, argument/result registers */
++
++#define SDVO_CMD_RESET 0x01
++
++/** Returns a struct psb_intel_sdvo_caps */
++#define SDVO_CMD_GET_DEVICE_CAPS 0x02
++
++#define SDVO_CMD_GET_FIRMWARE_REV 0x86
++# define SDVO_DEVICE_FIRMWARE_MINOR SDVO_I2C_RETURN_0
++# define SDVO_DEVICE_FIRMWARE_MAJOR SDVO_I2C_RETURN_1
++# define SDVO_DEVICE_FIRMWARE_PATCH SDVO_I2C_RETURN_2
++
++/**
++ * Reports which inputs are trained (managed to sync).
++ *
++ * Devices must have trained within 2 vsyncs of a mode change.
++ */
++#define SDVO_CMD_GET_TRAINED_INPUTS 0x03
++struct psb_intel_sdvo_get_trained_inputs_response {
++ unsigned int input0_trained:1;
++ unsigned int input1_trained:1;
++ unsigned int pad:6;
++} __attribute__ ((packed));
++
++/** Returns a struct psb_intel_sdvo_output_flags of active outputs. */
++#define SDVO_CMD_GET_ACTIVE_OUTPUTS 0x04
++
++/**
++ * Sets the current set of active outputs.
++ *
++ * Takes a struct psb_intel_sdvo_output_flags.
++ * Must be preceded by a SET_IN_OUT_MAP
++ * on multi-output devices.
++ */
++#define SDVO_CMD_SET_ACTIVE_OUTPUTS 0x05
++
++/**
++ * Returns the current mapping of SDVO inputs to outputs on the device.
++ *
++ * Returns two struct psb_intel_sdvo_output_flags structures.
++ */
++#define SDVO_CMD_GET_IN_OUT_MAP 0x06
++
++/**
++ * Sets the current mapping of SDVO inputs to outputs on the device.
++ *
++ * Takes two struct i380_sdvo_output_flags structures.
++ */
++#define SDVO_CMD_SET_IN_OUT_MAP 0x07
++
++/**
++ * Returns a struct psb_intel_sdvo_output_flags of attached displays.
++ */
++#define SDVO_CMD_GET_ATTACHED_DISPLAYS 0x0b
++
++/**
++ * Returns a struct psb_intel_sdvo_ouptut_flags of displays supporting hot plugging.
++ */
++#define SDVO_CMD_GET_HOT_PLUG_SUPPORT 0x0c
++
++/**
++ * Takes a struct psb_intel_sdvo_output_flags.
++ */
++#define SDVO_CMD_SET_ACTIVE_HOT_PLUG 0x0d
++
++/**
++ * Returns a struct psb_intel_sdvo_output_flags of displays with hot plug
++ * interrupts enabled.
++ */
++#define SDVO_CMD_GET_ACTIVE_HOT_PLUG 0x0e
++
++#define SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE 0x0f
++struct psb_intel_sdvo_get_interrupt_event_source_response {
++ u16 interrupt_status;
++ unsigned int ambient_light_interrupt:1;
++ unsigned int pad:7;
++} __attribute__ ((packed));
++
++/**
++ * Selects which input is affected by future input commands.
++ *
++ * Commands affected include SET_INPUT_TIMINGS_PART[12],
++ * GET_INPUT_TIMINGS_PART[12], GET_PREFERRED_INPUT_TIMINGS_PART[12],
++ * GET_INPUT_PIXEL_CLOCK_RANGE, and CREATE_PREFERRED_INPUT_TIMINGS.
++ */
++#define SDVO_CMD_SET_TARGET_INPUT 0x10
++struct psb_intel_sdvo_set_target_input_args {
++ unsigned int target_1:1;
++ unsigned int pad:7;
++} __attribute__ ((packed));
++
++/**
++ * Takes a struct psb_intel_sdvo_output_flags of which outputs are targetted by
++ * future output commands.
++ *
++ * Affected commands inclue SET_OUTPUT_TIMINGS_PART[12],
++ * GET_OUTPUT_TIMINGS_PART[12], and GET_OUTPUT_PIXEL_CLOCK_RANGE.
++ */
++#define SDVO_CMD_SET_TARGET_OUTPUT 0x11
++
++#define SDVO_CMD_GET_INPUT_TIMINGS_PART1 0x12
++#define SDVO_CMD_GET_INPUT_TIMINGS_PART2 0x13
++#define SDVO_CMD_SET_INPUT_TIMINGS_PART1 0x14
++#define SDVO_CMD_SET_INPUT_TIMINGS_PART2 0x15
++#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART1 0x16
++#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART2 0x17
++#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART1 0x18
++#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART2 0x19
++/* Part 1 */
++# define SDVO_DTD_CLOCK_LOW SDVO_I2C_ARG_0
++# define SDVO_DTD_CLOCK_HIGH SDVO_I2C_ARG_1
++# define SDVO_DTD_H_ACTIVE SDVO_I2C_ARG_2
++# define SDVO_DTD_H_BLANK SDVO_I2C_ARG_3
++# define SDVO_DTD_H_HIGH SDVO_I2C_ARG_4
++# define SDVO_DTD_V_ACTIVE SDVO_I2C_ARG_5
++# define SDVO_DTD_V_BLANK SDVO_I2C_ARG_6
++# define SDVO_DTD_V_HIGH SDVO_I2C_ARG_7
++/* Part 2 */
++# define SDVO_DTD_HSYNC_OFF SDVO_I2C_ARG_0
++# define SDVO_DTD_HSYNC_WIDTH SDVO_I2C_ARG_1
++# define SDVO_DTD_VSYNC_OFF_WIDTH SDVO_I2C_ARG_2
++# define SDVO_DTD_SYNC_OFF_WIDTH_HIGH SDVO_I2C_ARG_3
++# define SDVO_DTD_DTD_FLAGS SDVO_I2C_ARG_4
++# define SDVO_DTD_DTD_FLAG_INTERLACED (1 << 7)
++# define SDVO_DTD_DTD_FLAG_STEREO_MASK (3 << 5)
++# define SDVO_DTD_DTD_FLAG_INPUT_MASK (3 << 3)
++# define SDVO_DTD_DTD_FLAG_SYNC_MASK (3 << 1)
++# define SDVO_DTD_SDVO_FLAS SDVO_I2C_ARG_5
++# define SDVO_DTD_SDVO_FLAG_STALL (1 << 7)
++# define SDVO_DTD_SDVO_FLAG_CENTERED (0 << 6)
++# define SDVO_DTD_SDVO_FLAG_UPPER_LEFT (1 << 6)
++# define SDVO_DTD_SDVO_FLAG_SCALING_MASK (3 << 4)
++# define SDVO_DTD_SDVO_FLAG_SCALING_NONE (0 << 4)
++# define SDVO_DTD_SDVO_FLAG_SCALING_SHARP (1 << 4)
++# define SDVO_DTD_SDVO_FLAG_SCALING_SMOOTH (2 << 4)
++# define SDVO_DTD_VSYNC_OFF_HIGH SDVO_I2C_ARG_6
++
++/**
++ * Generates a DTD based on the given width, height, and flags.
++ *
++ * This will be supported by any device supporting scaling or interlaced
++ * modes.
++ */
++#define SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING 0x1a
++# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_LOW SDVO_I2C_ARG_0
++# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_HIGH SDVO_I2C_ARG_1
++# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_LOW SDVO_I2C_ARG_2
++# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_HIGH SDVO_I2C_ARG_3
++# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_LOW SDVO_I2C_ARG_4
++# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_HIGH SDVO_I2C_ARG_5
++# define SDVO_PREFERRED_INPUT_TIMING_FLAGS SDVO_I2C_ARG_6
++# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_INTERLACED (1 << 0)
++# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_SCALED (1 << 1)
++
++#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1 0x1b
++#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2 0x1c
++
++/** Returns a struct psb_intel_sdvo_pixel_clock_range */
++#define SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE 0x1d
++/** Returns a struct psb_intel_sdvo_pixel_clock_range */
++#define SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE 0x1e
++
++/** Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */
++#define SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS 0x1f
++
++/** Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
++#define SDVO_CMD_GET_CLOCK_RATE_MULT 0x20
++/** Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
++#define SDVO_CMD_SET_CLOCK_RATE_MULT 0x21
++# define SDVO_CLOCK_RATE_MULT_1X (1 << 0)
++# define SDVO_CLOCK_RATE_MULT_2X (1 << 1)
++# define SDVO_CLOCK_RATE_MULT_4X (1 << 3)
++
++#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27
++
++#define SDVO_CMD_GET_TV_FORMAT 0x28
++
++#define SDVO_CMD_SET_TV_FORMAT 0x29
++
++#define SDVO_CMD_GET_SUPPORTED_POWER_STATES 0x2a
++#define SDVO_CMD_GET_ENCODER_POWER_STATE 0x2b
++#define SDVO_CMD_SET_ENCODER_POWER_STATE 0x2c
++# define SDVO_ENCODER_STATE_ON (1 << 0)
++# define SDVO_ENCODER_STATE_STANDBY (1 << 1)
++# define SDVO_ENCODER_STATE_SUSPEND (1 << 2)
++# define SDVO_ENCODER_STATE_OFF (1 << 3)
++
++#define SDVO_CMD_SET_TV_RESOLUTION_SUPPORT 0x93
++
++#define SDVO_CMD_SET_CONTROL_BUS_SWITCH 0x7a
++# define SDVO_CONTROL_BUS_PROM 0x0
++# define SDVO_CONTROL_BUS_DDC1 0x1
++# define SDVO_CONTROL_BUS_DDC2 0x2
++# define SDVO_CONTROL_BUS_DDC3 0x3
++
++/* SDVO Bus & SDVO Inputs wiring details*/
++/* Bit 0: Is SDVOB connected to In0 (1 = yes, 0 = no*/
++/* Bit 1: Is SDVOB connected to In1 (1 = yes, 0 = no*/
++/* Bit 2: Is SDVOC connected to In0 (1 = yes, 0 = no*/
++/* Bit 3: Is SDVOC connected to In1 (1 = yes, 0 = no*/
++#define SDVOB_IN0 0x01
++#define SDVOB_IN1 0x02
++#define SDVOC_IN0 0x04
++#define SDVOC_IN1 0x08
++
++#define SDVO_DEVICE_NONE 0x00
++#define SDVO_DEVICE_CRT 0x01
++#define SDVO_DEVICE_TV 0x02
++#define SDVO_DEVICE_LVDS 0x04
++#define SDVO_DEVICE_TMDS 0x08
++
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_irq.c
+@@ -0,0 +1,675 @@
++/**************************************************************************
++ * Copyright (c) 2007, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
++ * develop this driver.
++ *
++ **************************************************************************/
++/*
++ */
++
++#include <drm/drmP.h>
++#include "psb_drv.h"
++#include "psb_reg.h"
++#include "psb_msvdx.h"
++#include "lnc_topaz.h"
++#include "pnw_topaz.h"
++#include "psb_intel_reg.h"
++#include "psb_powermgmt.h"
++
++/*
++ * Video display controller interrupt.
++ */
++
++/**
++ * TODO:
++ * Re-Enable vdc interrupt due to some overwrite
++ * This could be removed later, and display class should handle this interrupt
++ */
++#if 0
++static void psb_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++
++ if (!drm_psb_disable_vsync && (vdc_stat & _PSB_VSYNC_PIPEA_FLAG)) {
++#ifdef PSB_FIXME
++ atomic_inc(&dev->vbl_received);
++#endif
++ PSB_WVDC32(PIPE_VBLANK_INTERRUPT_ENABLE |
++ PIPE_VBLANK_CLEAR, PIPEASTAT);
++ drm_handle_vblank(dev, 0);
++
++ if( dev_priv->psb_vsync_handler != NULL)
++ (*dev_priv->psb_vsync_handler)(dev,0);
++ }
++
++ if (!drm_psb_disable_vsync && (vdc_stat & _PSB_VSYNC_PIPEB_FLAG)) {
++#ifdef PSB_FIXME
++ atomic_inc(&dev->vbl_received2);
++#endif
++ PSB_WVDC32(PIPE_VBLANK_INTERRUPT_ENABLE |
++ PIPE_VBLANK_CLEAR, PIPEBSTAT);
++ drm_handle_vblank(dev, 1);
++
++ if( dev_priv->psb_vsync_handler != NULL)
++ (*dev_priv->psb_vsync_handler)(dev,1);
++ }
++}
++#endif
++
++irqreturn_t psb_irq_handler(DRM_IRQ_ARGS)
++{
++ struct drm_device *dev = (struct drm_device *) arg;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++
++ uint32_t vdc_stat, sgx_int = 0, msvdx_int = 0, topaz_int = 0;
++ int handled = 0;
++
++ spin_lock(&dev_priv->irqmask_lock);
++
++ vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R);
++
++ if (vdc_stat & _PSB_IRQ_SGX_FLAG) {
++ PSB_DEBUG_IRQ("Got SGX interrupt\n");
++ sgx_int = 1;
++ }
++ if (vdc_stat & _PSB_IRQ_MSVDX_FLAG) {
++ PSB_DEBUG_IRQ("Got MSVDX interrupt\n");
++ msvdx_int = 1;
++ }
++
++ if (vdc_stat & _LNC_IRQ_TOPAZ_FLAG) {
++ PSB_DEBUG_IRQ("Got TOPAX interrupt\n");
++ topaz_int = 1;
++ }
++
++ vdc_stat &= dev_priv->vdc_irq_mask;
++ spin_unlock(&dev_priv->irqmask_lock);
++
++ /*if (msvdx_int && ospm_power_is_hw_on(OSPM_VIDEO_DEC_ISLAND)) {*/
++ if (msvdx_int && (IS_MDFLD(dev)
++ || ospm_power_is_hw_on(OSPM_VIDEO_DEC_ISLAND))) {
++ psb_msvdx_interrupt(dev);
++ handled = 1;
++ }
++
++ if ((IS_MDFLD(dev) && topaz_int)) {
++ pnw_topaz_interrupt(dev);
++ handled = 1;
++ } else if (IS_MID(dev) && topaz_int &&
++ ospm_power_is_hw_on(OSPM_VIDEO_ENC_ISLAND)) {
++ /* sometimes, even topaz power down, IIR
++ * may still have topaz bit set
++ */
++ lnc_topaz_interrupt(dev);
++ handled = 1;
++ }
++
++ if (sgx_int) {
++ if (SYSPVRServiceSGXInterrupt(dev) != 0)
++ handled = 1;
++ }
++
++ PSB_WVDC32(vdc_stat, PSB_INT_IDENTITY_R);
++ (void) PSB_RVDC32(PSB_INT_IDENTITY_R);
++ DRM_READMEMORYBARRIER();
++
++ if (!handled)
++ return IRQ_NONE;
++
++
++ return IRQ_HANDLED;
++}
++
++void psb_irq_preinstall(struct drm_device *dev)
++{
++ psb_irq_preinstall_islands(dev, OSPM_ALL_ISLANDS);
++}
++
++/**
++ * FIXME: should I remove display irq enable here??
++ */
++void psb_irq_preinstall_islands(struct drm_device *dev, int hw_islands)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ unsigned long irqflags;
++
++ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
++
++ if (hw_islands & OSPM_DISPLAY_ISLAND) {
++ if (ospm_power_is_hw_on(OSPM_DISPLAY_ISLAND)) {
++ if (IS_POULSBO(dev))
++ PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
++ if (dev->vblank_enabled[0])
++ dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
++ if (dev->vblank_enabled[1])
++ dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
++ }
++ }
++ if (hw_islands & OSPM_GRAPHICS_ISLAND)
++ dev_priv->vdc_irq_mask |= _PSB_IRQ_SGX_FLAG;
++
++ if (hw_islands & OSPM_VIDEO_DEC_ISLAND)
++ if (IS_MID(dev) && ospm_power_is_hw_on(OSPM_VIDEO_DEC_ISLAND))
++ dev_priv->vdc_irq_mask |= _PSB_IRQ_MSVDX_FLAG;
++
++ if (hw_islands & OSPM_VIDEO_ENC_ISLAND)
++ if (IS_MID(dev) && ospm_power_is_hw_on(OSPM_VIDEO_ENC_ISLAND))
++ dev_priv->vdc_irq_mask |= _LNC_IRQ_TOPAZ_FLAG;
++
++ /*This register is safe even if display island is off*/
++ PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
++
++ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
++}
++
++int psb_irq_postinstall(struct drm_device *dev)
++{
++ return psb_irq_postinstall_islands(dev, OSPM_ALL_ISLANDS);
++}
++
++int psb_irq_postinstall_islands(struct drm_device *dev, int hw_islands)
++{
++
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ unsigned long irqflags;
++
++ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
++
++ /*This register is safe even if display island is off*/
++ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
++
++ if (hw_islands & OSPM_DISPLAY_ISLAND) {
++ if (true/*powermgmt_is_hw_on(dev->pdev, PSB_DISPLAY_ISLAND)*/) {
++ if (IS_POULSBO(dev))
++ PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
++ if (dev->vblank_enabled[0]) {
++ if (IS_MID(dev))
++ psb_enable_pipestat(dev_priv, 0,
++ PIPE_START_VBLANK_INTERRUPT_ENABLE |
++ PIPE_VBLANK_INTERRUPT_ENABLE);
++ else
++ psb_enable_pipestat(dev_priv, 0,
++ PIPE_VBLANK_INTERRUPT_ENABLE);
++ } else
++ psb_disable_pipestat(dev_priv, 0,
++ PIPE_VBLANK_INTERRUPT_ENABLE |
++ PIPE_START_VBLANK_INTERRUPT_ENABLE);
++
++ if (dev->vblank_enabled[1]) {
++ if (IS_MID(dev))
++ psb_enable_pipestat(dev_priv, 1,
++ PIPE_START_VBLANK_INTERRUPT_ENABLE |
++ PIPE_VBLANK_INTERRUPT_ENABLE);
++ else
++ psb_enable_pipestat(dev_priv, 1,
++ PIPE_VBLANK_INTERRUPT_ENABLE);
++ } else
++ psb_disable_pipestat(dev_priv, 1,
++ PIPE_VBLANK_INTERRUPT_ENABLE |
++ PIPE_START_VBLANK_INTERRUPT_ENABLE);
++ }
++ }
++
++ if (IS_MID(dev) && !dev_priv->topaz_disabled)
++ if (hw_islands & OSPM_VIDEO_ENC_ISLAND)
++ if (true/*powermgmt_is_hw_on(dev->pdev, PSB_VIDEO_ENC_ISLAND)*/)
++ lnc_topaz_enableirq(dev);
++
++ if (hw_islands & OSPM_VIDEO_DEC_ISLAND)
++ if (true/*powermgmt_is_hw_on(dev->pdev, PSB_VIDEO_DEC_ISLAND)*/)
++ psb_msvdx_enableirq(dev);
++
++ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
++
++ return 0;
++}
++
++void psb_irq_uninstall(struct drm_device *dev)
++{
++ psb_irq_uninstall_islands(dev, OSPM_ALL_ISLANDS);
++}
++
++void psb_irq_uninstall_islands(struct drm_device *dev, int hw_islands)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ unsigned long irqflags;
++
++ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
++
++ if (hw_islands & OSPM_DISPLAY_ISLAND) {
++ if (true/*powermgmt_is_hw_on(dev->pdev, PSB_DISPLAY_ISLAND)*/) {
++ if (IS_POULSBO(dev))
++ PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
++ if (dev->vblank_enabled[0])
++ psb_disable_pipestat(dev_priv, 0,
++ PIPE_VBLANK_INTERRUPT_ENABLE |
++ PIPE_START_VBLANK_INTERRUPT_ENABLE);
++ if (dev->vblank_enabled[1])
++ psb_disable_pipestat(dev_priv, 1,
++ PIPE_VBLANK_INTERRUPT_ENABLE |
++ PIPE_START_VBLANK_INTERRUPT_ENABLE);
++ }
++ dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG |
++ _PSB_IRQ_MSVDX_FLAG |
++ _LNC_IRQ_TOPAZ_FLAG;
++ }
++ /*TODO: remove follwoing code*/
++ if (hw_islands & OSPM_GRAPHICS_ISLAND)
++ dev_priv->vdc_irq_mask &= ~_PSB_IRQ_SGX_FLAG;
++
++ if ((hw_islands & OSPM_VIDEO_DEC_ISLAND) && IS_MRST(dev))
++ dev_priv->vdc_irq_mask &= ~_PSB_IRQ_MSVDX_FLAG;
++
++ if ((hw_islands & OSPM_VIDEO_ENC_ISLAND) && IS_MRST(dev))
++ dev_priv->vdc_irq_mask &= ~_LNC_IRQ_TOPAZ_FLAG;
++
++ /*These two registers are safe even if display island is off*/
++ PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
++ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
++
++ wmb();
++
++ /*This register is safe even if display island is off*/
++ PSB_WVDC32(PSB_RVDC32(PSB_INT_IDENTITY_R), PSB_INT_IDENTITY_R);
++
++ if (IS_MID(dev) && !dev_priv->topaz_disabled)
++ if (hw_islands & OSPM_VIDEO_ENC_ISLAND)
++ if (ospm_power_is_hw_on(OSPM_VIDEO_ENC_ISLAND))
++ lnc_topaz_disableirq(dev);
++ if (hw_islands & OSPM_VIDEO_DEC_ISLAND)
++ if (ospm_power_is_hw_on(OSPM_VIDEO_DEC_ISLAND))
++ psb_msvdx_disableirq(dev);
++
++ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
++}
++
++void psb_irq_turn_on_dpst(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ u32 hist_reg;
++ u32 pwm_reg;
++ u32 pipea_stat;
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON)) {
++ PSB_WVDC32(BIT31, HISTOGRAM_LOGIC_CONTROL);
++ hist_reg = PSB_RVDC32(HISTOGRAM_LOGIC_CONTROL);
++ PSB_WVDC32(BIT31, HISTOGRAM_INT_CONTROL);
++ hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
++
++ PSB_WVDC32(0x80010100, PWM_CONTROL_LOGIC);
++ pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
++ PSB_WVDC32(pwm_reg | PWM_PHASEIN_ENABLE | PWM_PHASEIN_INT_ENABLE,
++ PWM_CONTROL_LOGIC);
++ pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
++
++ pipea_stat = PSB_RVDC32(PIPEASTAT);
++ PSB_WVDC32(pipea_stat | PIPE_DPST_EVENT_ENABLE, PIPEASTAT);
++ pipea_stat = PSB_RVDC32(PIPEASTAT);
++
++ PSB_WVDC32(pipea_stat | PIPE_DPST_EVENT_STATUS, PIPEASTAT);
++ hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
++ PSB_WVDC32(hist_reg | HISTOGRAM_INT_CTRL_CLEAR,HISTOGRAM_INT_CONTROL);
++ pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
++ PSB_WVDC32(pwm_reg | 0x80010100 | PWM_PHASEIN_ENABLE, PWM_CONTROL_LOGIC);
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++}
++
++int psb_irq_enable_dpst(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ unsigned long irqflags;
++
++ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON)) {
++ /* enable DPST */
++ dev_priv->vdc_irq_mask |= _PSB_DPST_PIPEA_FLAG;
++ PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
++ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
++
++ psb_irq_turn_on_dpst(dev);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
++ return 0;
++}
++
++void psb_irq_turn_off_dpst(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ u32 hist_reg;
++ u32 pwm_reg;
++ u32 pipea_stat;
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON)) {
++ PSB_WVDC32(0x00000000, HISTOGRAM_INT_CONTROL);
++ hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
++
++ pipea_stat = PSB_RVDC32(PIPEASTAT);
++ PSB_WVDC32(pipea_stat & ~PIPE_DPST_EVENT_ENABLE, PIPEASTAT);
++ pipea_stat = PSB_RVDC32(PIPEASTAT);
++
++ pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
++ PSB_WVDC32(pwm_reg & !(PWM_PHASEIN_INT_ENABLE), PWM_CONTROL_LOGIC);
++ pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++}
++
++int psb_irq_disable_dpst(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ unsigned long irqflags;
++ u32 hist_reg;
++ u32 pwm_reg;
++ u32 pipea_stat;
++
++ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON)) {
++ dev_priv->vdc_irq_mask &= ~_PSB_DPST_PIPEA_FLAG;
++ PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
++ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
++
++ PSB_WVDC32(0x00000000, HISTOGRAM_INT_CONTROL);
++ hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
++
++ pipea_stat = PSB_RVDC32(PIPEASTAT);
++ PSB_WVDC32(pipea_stat & ~PIPE_DPST_EVENT_ENABLE, PIPEASTAT);
++ pipea_stat = PSB_RVDC32(PIPEASTAT);
++
++ pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
++ PSB_WVDC32(pwm_reg & !(PWM_PHASEIN_INT_ENABLE), PWM_CONTROL_LOGIC);
++ pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
++
++ return 0;
++}
++
++
++
++#ifdef PSB_FIXME
++static int psb_vblank_do_wait(struct drm_device *dev,
++ unsigned int *sequence, atomic_t *counter)
++{
++ unsigned int cur_vblank;
++ int ret = 0;
++ DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
++ (((cur_vblank = atomic_read(counter))
++ - *sequence) <= (1 << 23)));
++ *sequence = cur_vblank;
++
++ return ret;
++}
++#endif
++
++
++/* Called from drm generic code, passed 'crtc' which
++ * we use as a pipe index
++ */
++int psb_enable_vblank(struct drm_device *dev, int pipe)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ unsigned long irqflags;
++ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
++ u32 pipeconf = 0;
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, false)) {
++ pipeconf = REG_READ(pipeconf_reg);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++ if (!(pipeconf & PIPEACONF_ENABLE))
++ return -EINVAL;
++
++ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, false)) {
++ drm_psb_disable_vsync = 0;
++ if (pipe == 0)
++ dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
++ else
++ dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
++ PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
++ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
++ if (IS_MID(dev)) {
++ psb_enable_pipestat(dev_priv, pipe,
++ PIPE_START_VBLANK_INTERRUPT_ENABLE |
++ PIPE_VBLANK_INTERRUPT_ENABLE);
++ } else
++ psb_enable_pipestat(dev_priv, pipe,
++ PIPE_VBLANK_INTERRUPT_ENABLE);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
++
++ return 0;
++}
++
++/* Called from drm generic code, passed 'crtc' which
++ * we use as a pipe index
++ */
++void psb_disable_vblank(struct drm_device *dev, int pipe)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ unsigned long irqflags;
++
++ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, false)) {
++ if (pipe == 0)
++ dev_priv->vdc_irq_mask &= ~_PSB_VSYNC_PIPEA_FLAG;
++ else
++ dev_priv->vdc_irq_mask &= ~_PSB_VSYNC_PIPEB_FLAG;
++ PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
++ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
++ psb_disable_pipestat(dev_priv, pipe,
++ PIPE_VBLANK_INTERRUPT_ENABLE |
++ PIPE_START_VBLANK_INTERRUPT_ENABLE);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
++}
++
++static inline u32
++psb_pipestat(int pipe)
++{
++ if (pipe == 0)
++ return PIPEASTAT;
++ if (pipe == 1)
++ return PIPEBSTAT;
++ BUG();
++}
++
++void
++psb_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
++{
++ if ((dev_priv->pipestat[pipe] & mask) != mask) {
++ u32 reg = psb_pipestat(pipe);
++ dev_priv->pipestat[pipe] |= mask;
++ /* Enable the interrupt, clear any pending status */
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON)) {
++ u32 writeVal = PSB_RVDC32(reg);
++ writeVal |= (mask | (mask >> 16));
++ PSB_WVDC32(writeVal, reg);
++ (void) PSB_RVDC32(reg);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++ }
++}
++
++void
++psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
++{
++ if ((dev_priv->pipestat[pipe] & mask) != 0) {
++ u32 reg = psb_pipestat(pipe);
++ dev_priv->pipestat[pipe] &= ~mask;
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON)) {
++ u32 writeVal = PSB_RVDC32(reg);
++ writeVal &= ~mask;
++ PSB_WVDC32(writeVal, reg);
++ (void) PSB_RVDC32(reg);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++ }
++}
++
++/**
++ * psb_pipe_enabled - check if a pipe is enabled
++ * @dev: DRM device
++ * @pipe: pipe to check
++ *
++ * Reading certain registers when the pipe is disabled can hang the chip.
++ * Use this routine to make sure the PLL is running and the pipe is active
++ * before reading such registers if unsure.
++ */
++static int
++psb_pipe_enabled(struct drm_device *dev, int pipe)
++{
++ unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF;
++ int ret = 0;
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, false)) {
++ ret = (REG_READ(pipeconf) & PIPEACONF_ENABLE);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++
++ return ret;
++}
++
++/* Called from drm generic code, passed a 'crtc', which
++ * we use as a pipe index
++ */
++u32 psb_get_vblank_counter(struct drm_device *dev, int pipe)
++{
++ unsigned long high_frame;
++ unsigned long low_frame;
++ u32 high1, high2, low;
++ u32 count = 0;
++
++ high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
++ low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, false))
++ return 0;
++
++ if (!psb_pipe_enabled(dev, pipe)) {
++ DRM_DEBUG("trying to get vblank count for disabled pipe %d\n", pipe);
++ goto psb_get_vblank_counter_exit;
++ }
++
++ /*
++ * High & low register fields aren't synchronized, so make sure
++ * we get a low value that's stable across two reads of the high
++ * register.
++ */
++ do {
++ high1 = ((REG_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
++ PIPE_FRAME_HIGH_SHIFT);
++ low = ((REG_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
++ PIPE_FRAME_LOW_SHIFT);
++ high2 = ((REG_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
++ PIPE_FRAME_HIGH_SHIFT);
++ } while (high1 != high2);
++
++ count = (high1 << 8) | low;
++
++psb_get_vblank_counter_exit:
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++
++ return count;
++}
++
++#ifdef MDFLD_HDCP
++int mdfld_irq_enable_hdmi_audio(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ unsigned long irqflags;
++ u32 reg_val = 0, mask = 0;
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON)) {
++ reg_val = REG_READ(PIPEBCONF);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++
++ if (!(reg_val & PIPEACONF_ENABLE))
++ return -EINVAL;
++
++ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON)) {
++ /* enable HDMI audio interrupt*/
++ dev_priv->vdc_irq_mask |= _PSB_PIPEB_EVENT;
++ PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
++ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
++
++ reg_val = PSB_RVDC32(PIPEBSTAT);
++ reg_val &= ~PIPE_HDMI_AUDIO_INT_MASK;
++ mask = dev_priv->hdmi_audio_interrupt_mask;
++ reg_val |= (mask | (mask >> 16));
++ PSB_WVDC32(reg_val, PIPEBSTAT);
++ (void) PSB_RVDC32(PIPEBSTAT);
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++
++ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
++ return 0;
++}
++
++int mdfld_irq_disable_hdmi_audio(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ unsigned long irqflags;
++ u32 reg_val = 0;
++
++ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON)) {
++ reg_val = PSB_RVDC32(PIPEBSTAT);
++ reg_val &= ~PIPE_HDMI_AUDIO_INT_MASK;
++ PSB_WVDC32(reg_val, PIPEBSTAT);
++ (void) PSB_RVDC32(PIPEBSTAT);
++
++ /* Disable PIPEB event only if no PIPEB event is enabled. */
++ if (!(reg_val & PIPEB_EVENT_MASK)) {
++ dev_priv->vdc_irq_mask &= ~_PSB_PIPEB_EVENT;
++ PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
++ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
++ }
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
++ return 0;
++}
++#endif /* MDFLD_HDCP */
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_irq.h
+@@ -0,0 +1,49 @@
++/**************************************************************************
++ * Copyright (c) 2009, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * Benjamin Defnet <benjamin.r.defnet@intel.com>
++ * Rajesh Poornachandran <rajesh.poornachandran@intel.com>
++ *
++ **************************************************************************/
++
++#ifndef _SYSIRQ_H_
++#define _SYSIRQ_H_
++
++#include <drm/drmP.h>
++
++bool sysirq_init(struct drm_device *dev);
++void sysirq_uninit(struct drm_device *dev);
++
++void psb_irq_preinstall(struct drm_device *dev);
++int psb_irq_postinstall(struct drm_device *dev);
++void psb_irq_uninstall(struct drm_device *dev);
++irqreturn_t psb_irq_handler(DRM_IRQ_ARGS);
++
++void psb_irq_preinstall_islands(struct drm_device *dev, int hw_islands);
++int psb_irq_postinstall_islands(struct drm_device *dev, int hw_islands);
++void psb_irq_uninstall_islands(struct drm_device *dev, int hw_islands);
++
++int psb_irq_enable_dpst(struct drm_device *dev);
++int psb_irq_disable_dpst(struct drm_device *dev);
++void psb_irq_turn_on_dpst(struct drm_device *dev);
++void psb_irq_turn_off_dpst(struct drm_device *dev);
++int psb_enable_vblank(struct drm_device *dev, int pipe);
++void psb_disable_vblank(struct drm_device *dev, int pipe);
++u32 psb_get_vblank_counter(struct drm_device *dev, int pipe);
++
++#endif //_SYSIRQ_H_
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_mmu.c
+@@ -0,0 +1,1010 @@
++/**************************************************************************
++ * Copyright (c) 2007, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++#include <drm/drmP.h>
++#include "psb_drv.h"
++#include "psb_reg.h"
++
++/*
++ * Code for the SGX MMU:
++ */
++
++/*
++ * clflush on one processor only:
++ * clflush should apparently flush the cache line on all processors in an
++ * SMP system.
++ */
++
++/*
++ * kmap atomic:
++ * The usage of the slots must be completely encapsulated within a spinlock, and
++ * no other functions that may be using the locks for other purposed may be
++ * called from within the locked region.
++ * Since the slots are per processor, this will guarantee that we are the only
++ * user.
++ */
++
++/*
++ * TODO: Inserting ptes from an interrupt handler:
++ * This may be desirable for some SGX functionality where the GPU can fault in
++ * needed pages. For that, we need to make an atomic insert_pages function, that
++ * may fail.
++ * If it fails, the caller need to insert the page using a workqueue function,
++ * but on average it should be fast.
++ */
++
++struct psb_mmu_driver {
++ /* protects driver- and pd structures. Always take in read mode
++ * before taking the page table spinlock.
++ */
++ struct rw_semaphore sem;
++
++ /* protects page tables, directory tables and pt tables.
++ * and pt structures.
++ */
++ spinlock_t lock;
++
++ atomic_t needs_tlbflush;
++
++ uint8_t __iomem *register_map;
++ struct psb_mmu_pd *default_pd;
++ /*uint32_t bif_ctrl;*/
++ int has_clflush;
++ int clflush_add;
++ unsigned long clflush_mask;
++
++ struct drm_psb_private *dev_priv;
++};
++
++struct psb_mmu_pd;
++
++struct psb_mmu_pt {
++ struct psb_mmu_pd *pd;
++ uint32_t index;
++ uint32_t count;
++ struct page *p;
++ uint32_t *v;
++};
++
++struct psb_mmu_pd {
++ struct psb_mmu_driver *driver;
++ int hw_context;
++ struct psb_mmu_pt **tables;
++ struct page *p;
++ struct page *dummy_pt;
++ struct page *dummy_page;
++ uint32_t pd_mask;
++ uint32_t invalid_pde;
++ uint32_t invalid_pte;
++};
++
++static inline uint32_t psb_mmu_pt_index(uint32_t offset)
++{
++ return (offset >> PSB_PTE_SHIFT) & 0x3FF;
++}
++
++static inline uint32_t psb_mmu_pd_index(uint32_t offset)
++{
++ return offset >> PSB_PDE_SHIFT;
++}
++
++#if defined(CONFIG_X86)
++static inline void psb_clflush(void *addr)
++{
++ __asm__ __volatile__("clflush (%0)\n" : : "r"(addr) : "memory");
++}
++
++static inline void psb_mmu_clflush(struct psb_mmu_driver *driver,
++ void *addr)
++{
++ if (!driver->has_clflush)
++ return;
++
++ mb();
++ psb_clflush(addr);
++ mb();
++}
++#else
++
++static inline void psb_mmu_clflush(struct psb_mmu_driver *driver,
++ void *addr)
++{;
++}
++
++#endif
++
++static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver,
++ int force)
++{
++ if (atomic_read(&driver->needs_tlbflush) || force) {
++ if (driver->dev_priv) {
++ atomic_set(&driver->dev_priv->msvdx_mmu_invaldc, 1);
++ if (IS_MSVDX(driver->dev_priv->dev))
++ atomic_set( \
++ &driver->dev_priv->topaz_mmu_invaldc, 1);
++ }
++ }
++ atomic_set(&driver->needs_tlbflush, 0);
++}
++
++static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force)
++{
++ down_write(&driver->sem);
++ psb_mmu_flush_pd_locked(driver, force);
++ up_write(&driver->sem);
++}
++
++void psb_mmu_flush(struct psb_mmu_driver *driver)
++{
++ down_write(&driver->sem);
++ if (driver->dev_priv) {
++ atomic_set(&driver->dev_priv->msvdx_mmu_invaldc, 1);
++ if (IS_MSVDX(driver->dev_priv->dev))
++ atomic_set(&driver->dev_priv->topaz_mmu_invaldc, 1);
++ }
++
++ up_write(&driver->sem);
++}
++
++void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
++{
++ ttm_tt_cache_flush(&pd->p, 1);
++ down_write(&pd->driver->sem);
++ wmb();
++ psb_mmu_flush_pd_locked(pd->driver, 1);
++ pd->hw_context = hw_context;
++ up_write(&pd->driver->sem);
++
++}
++
++static inline unsigned long psb_pd_addr_end(unsigned long addr,
++ unsigned long end)
++{
++
++ addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK;
++ return (addr < end) ? addr : end;
++}
++
++static inline uint32_t psb_mmu_mask_pte(uint32_t pfn, int type)
++{
++ uint32_t mask = PSB_PTE_VALID;
++
++ if (type & PSB_MMU_CACHED_MEMORY)
++ mask |= PSB_PTE_CACHED;
++ if (type & PSB_MMU_RO_MEMORY)
++ mask |= PSB_PTE_RO;
++ if (type & PSB_MMU_WO_MEMORY)
++ mask |= PSB_PTE_WO;
++
++ return (pfn << PAGE_SHIFT) | mask;
++}
++
++struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
++ int trap_pagefaults, int invalid_type)
++{
++ struct psb_mmu_pd *pd = kmalloc(sizeof(*pd), GFP_KERNEL);
++ uint32_t *v;
++ int i;
++
++ if (!pd)
++ return NULL;
++
++ pd->p = alloc_page(GFP_DMA32);
++ if (!pd->p)
++ goto out_err1;
++ pd->dummy_pt = alloc_page(GFP_DMA32);
++ if (!pd->dummy_pt)
++ goto out_err2;
++ pd->dummy_page = alloc_page(GFP_DMA32);
++ if (!pd->dummy_page)
++ goto out_err3;
++
++ if (!trap_pagefaults) {
++ pd->invalid_pde =
++ psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt),
++ invalid_type);
++ pd->invalid_pte =
++ psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
++ invalid_type);
++ } else {
++ pd->invalid_pde = 0;
++ pd->invalid_pte = 0;
++ }
++
++ v = kmap(pd->dummy_pt);
++ for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
++ v[i] = pd->invalid_pte;
++
++ kunmap(pd->dummy_pt);
++
++ v = kmap(pd->p);
++ for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
++ v[i] = pd->invalid_pde;
++
++ kunmap(pd->p);
++
++ clear_page(kmap(pd->dummy_page));
++ kunmap(pd->dummy_page);
++
++ pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024);
++ if (!pd->tables)
++ goto out_err4;
++
++ pd->hw_context = -1;
++ pd->pd_mask = PSB_PTE_VALID;
++ pd->driver = driver;
++
++ return pd;
++
++out_err4:
++ __free_page(pd->dummy_page);
++out_err3:
++ __free_page(pd->dummy_pt);
++out_err2:
++ __free_page(pd->p);
++out_err1:
++ kfree(pd);
++ return NULL;
++}
++
++void psb_mmu_free_pt(struct psb_mmu_pt *pt)
++{
++ __free_page(pt->p);
++ kfree(pt);
++}
++
++void psb_mmu_free_pagedir(struct psb_mmu_pd *pd)
++{
++ struct psb_mmu_driver *driver = pd->driver;
++ struct psb_mmu_pt *pt;
++ int i;
++
++ down_write(&driver->sem);
++ if (pd->hw_context != -1)
++ psb_mmu_flush_pd_locked(driver, 1);
++
++ /* Should take the spinlock here, but we don't need to do that
++ since we have the semaphore in write mode. */
++
++ for (i = 0; i < 1024; ++i) {
++ pt = pd->tables[i];
++ if (pt)
++ psb_mmu_free_pt(pt);
++ }
++
++ vfree(pd->tables);
++ __free_page(pd->dummy_page);
++ __free_page(pd->dummy_pt);
++ __free_page(pd->p);
++ kfree(pd);
++ up_write(&driver->sem);
++}
++
++static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
++{
++ struct psb_mmu_pt *pt = kmalloc(sizeof(*pt), GFP_KERNEL);
++ void *v;
++ uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT;
++ uint32_t clflush_count = PAGE_SIZE / clflush_add;
++ spinlock_t *lock = &pd->driver->lock;
++ uint8_t *clf;
++ uint32_t *ptes;
++ int i;
++
++ if (!pt)
++ return NULL;
++
++ pt->p = alloc_page(GFP_DMA32);
++ if (!pt->p) {
++ kfree(pt);
++ return NULL;
++ }
++
++ spin_lock(lock);
++
++ v = kmap_atomic(pt->p, KM_USER0);
++ clf = (uint8_t *) v;
++ ptes = (uint32_t *) v;
++ for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
++ *ptes++ = pd->invalid_pte;
++
++
++#if defined(CONFIG_X86)
++ if (pd->driver->has_clflush && pd->hw_context != -1) {
++ mb();
++ for (i = 0; i < clflush_count; ++i) {
++ psb_clflush(clf);
++ clf += clflush_add;
++ }
++ mb();
++ }
++#endif
++ kunmap_atomic(v, KM_USER0);
++ spin_unlock(lock);
++
++ pt->count = 0;
++ pt->pd = pd;
++ pt->index = 0;
++
++ return pt;
++}
++
++struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
++ unsigned long addr)
++{
++ uint32_t index = psb_mmu_pd_index(addr);
++ struct psb_mmu_pt *pt;
++ uint32_t *v;
++ spinlock_t *lock = &pd->driver->lock;
++
++ spin_lock(lock);
++ pt = pd->tables[index];
++ while (!pt) {
++ spin_unlock(lock);
++ pt = psb_mmu_alloc_pt(pd);
++ if (!pt)
++ return NULL;
++ spin_lock(lock);
++
++ if (pd->tables[index]) {
++ spin_unlock(lock);
++ psb_mmu_free_pt(pt);
++ spin_lock(lock);
++ pt = pd->tables[index];
++ continue;
++ }
++
++ v = kmap_atomic(pd->p, KM_USER0);
++ pd->tables[index] = pt;
++ v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask;
++ pt->index = index;
++ kunmap_atomic((void *) v, KM_USER0);
++
++ if (pd->hw_context != -1) {
++ psb_mmu_clflush(pd->driver, (void *) &v[index]);
++ atomic_set(&pd->driver->needs_tlbflush, 1);
++ }
++ }
++ pt->v = kmap_atomic(pt->p, KM_USER0);
++ return pt;
++}
++
++static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd,
++ unsigned long addr)
++{
++ uint32_t index = psb_mmu_pd_index(addr);
++ struct psb_mmu_pt *pt;
++ spinlock_t *lock = &pd->driver->lock;
++
++ spin_lock(lock);
++ pt = pd->tables[index];
++ if (!pt) {
++ spin_unlock(lock);
++ return NULL;
++ }
++ pt->v = kmap_atomic(pt->p, KM_USER0);
++ return pt;
++}
++
++static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
++{
++ struct psb_mmu_pd *pd = pt->pd;
++ uint32_t *v;
++
++ kunmap_atomic(pt->v, KM_USER0);
++ if (pt->count == 0) {
++ v = kmap_atomic(pd->p, KM_USER0);
++ v[pt->index] = pd->invalid_pde;
++ pd->tables[pt->index] = NULL;
++
++ if (pd->hw_context != -1) {
++ psb_mmu_clflush(pd->driver,
++ (void *) &v[pt->index]);
++ atomic_set(&pd->driver->needs_tlbflush, 1);
++ }
++ kunmap_atomic(pt->v, KM_USER0);
++ spin_unlock(&pd->driver->lock);
++ psb_mmu_free_pt(pt);
++ return;
++ }
++ spin_unlock(&pd->driver->lock);
++}
++
++static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt,
++ unsigned long addr, uint32_t pte)
++{
++ pt->v[psb_mmu_pt_index(addr)] = pte;
++}
++
++static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt,
++ unsigned long addr)
++{
++ pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte;
++}
++
++#if 0
++static uint32_t psb_mmu_check_pte_locked(struct psb_mmu_pd *pd,
++ uint32_t mmu_offset)
++{
++ uint32_t *v;
++ uint32_t pfn;
++
++ v = kmap_atomic(pd->p, KM_USER0);
++ if (!v) {
++ printk(KERN_INFO "Could not kmap pde page.\n");
++ return 0;
++ }
++ pfn = v[psb_mmu_pd_index(mmu_offset)];
++ /* printk(KERN_INFO "pde is 0x%08x\n",pfn); */
++ kunmap_atomic(v, KM_USER0);
++ if (((pfn & 0x0F) != PSB_PTE_VALID)) {
++ printk(KERN_INFO "Strange pde at 0x%08x: 0x%08x.\n",
++ mmu_offset, pfn);
++ }
++ v = ioremap(pfn & 0xFFFFF000, 4096);
++ if (!v) {
++ printk(KERN_INFO "Could not kmap pte page.\n");
++ return 0;
++ }
++ pfn = v[psb_mmu_pt_index(mmu_offset)];
++ /* printk(KERN_INFO "pte is 0x%08x\n",pfn); */
++ iounmap(v);
++ if (((pfn & 0x0F) != PSB_PTE_VALID)) {
++ printk(KERN_INFO "Strange pte at 0x%08x: 0x%08x.\n",
++ mmu_offset, pfn);
++ }
++ return pfn >> PAGE_SHIFT;
++}
++
++static void psb_mmu_check_mirrored_gtt(struct psb_mmu_pd *pd,
++ uint32_t mmu_offset,
++ uint32_t gtt_pages)
++{
++ uint32_t start;
++ uint32_t next;
++
++ printk(KERN_INFO "Checking mirrored gtt 0x%08x %d\n",
++ mmu_offset, gtt_pages);
++ down_read(&pd->driver->sem);
++ start = psb_mmu_check_pte_locked(pd, mmu_offset);
++ mmu_offset += PAGE_SIZE;
++ gtt_pages -= 1;
++ while (gtt_pages--) {
++ next = psb_mmu_check_pte_locked(pd, mmu_offset);
++ if (next != start + 1) {
++ printk(KERN_INFO
++ "Ptes out of order: 0x%08x, 0x%08x.\n",
++ start, next);
++ }
++ start = next;
++ mmu_offset += PAGE_SIZE;
++ }
++ up_read(&pd->driver->sem);
++}
++
++#endif
++
++void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd,
++ uint32_t mmu_offset, uint32_t gtt_start,
++ uint32_t gtt_pages)
++{
++ uint32_t *v;
++ uint32_t start = psb_mmu_pd_index(mmu_offset);
++ struct psb_mmu_driver *driver = pd->driver;
++ int num_pages = gtt_pages;
++
++ down_read(&driver->sem);
++ spin_lock(&driver->lock);
++
++ v = kmap_atomic(pd->p, KM_USER0);
++ v += start;
++
++ while (gtt_pages--) {
++ *v++ = gtt_start | pd->pd_mask;
++ gtt_start += PAGE_SIZE;
++ }
++
++ ttm_tt_cache_flush(&pd->p, num_pages);
++ kunmap_atomic(v, KM_USER0);
++ spin_unlock(&driver->lock);
++
++ if (pd->hw_context != -1)
++ atomic_set(&pd->driver->needs_tlbflush, 1);
++
++ up_read(&pd->driver->sem);
++ psb_mmu_flush_pd(pd->driver, 0);
++}
++
++struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
++{
++ struct psb_mmu_pd *pd;
++
++ down_read(&driver->sem);
++ pd = driver->default_pd;
++ up_read(&driver->sem);
++
++ return pd;
++}
++
++/* Returns the physical address of the PD shared by sgx/msvdx */
++uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver)
++{
++ struct psb_mmu_pd *pd;
++
++ pd = psb_mmu_get_default_pd(driver);
++ return page_to_pfn(pd->p) << PAGE_SHIFT;
++}
++
++void psb_mmu_driver_takedown(struct psb_mmu_driver *driver)
++{
++ psb_mmu_free_pagedir(driver->default_pd);
++ kfree(driver);
++}
++
++struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
++ int trap_pagefaults,
++ int invalid_type,
++ struct drm_psb_private *dev_priv)
++{
++ struct psb_mmu_driver *driver;
++
++ driver = kmalloc(sizeof(*driver), GFP_KERNEL);
++
++ if (!driver)
++ return NULL;
++ driver->dev_priv = dev_priv;
++
++ driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults,
++ invalid_type);
++ if (!driver->default_pd)
++ goto out_err1;
++
++ spin_lock_init(&driver->lock);
++ init_rwsem(&driver->sem);
++ down_write(&driver->sem);
++ driver->register_map = registers;
++ atomic_set(&driver->needs_tlbflush, 1);
++
++ driver->has_clflush = 0;
++
++#if defined(CONFIG_X86)
++ if (boot_cpu_has(X86_FEATURE_CLFLSH)) {
++ uint32_t tfms, misc, cap0, cap4, clflush_size;
++
++ /*
++ * clflush size is determined at kernel setup for x86_64
++ * but not for i386. We have to do it here.
++ */
++
++ cpuid(0x00000001, &tfms, &misc, &cap0, &cap4);
++ clflush_size = ((misc >> 8) & 0xff) * 8;
++ driver->has_clflush = 1;
++ driver->clflush_add =
++ PAGE_SIZE * clflush_size / sizeof(uint32_t);
++ driver->clflush_mask = driver->clflush_add - 1;
++ driver->clflush_mask = ~driver->clflush_mask;
++ }
++#endif
++
++ up_write(&driver->sem);
++ return driver;
++
++out_err1:
++ kfree(driver);
++ return NULL;
++}
++
++#if defined(CONFIG_X86)
++static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd,
++ unsigned long address, uint32_t num_pages,
++ uint32_t desired_tile_stride,
++ uint32_t hw_tile_stride)
++{
++ struct psb_mmu_pt *pt;
++ uint32_t rows = 1;
++ uint32_t i;
++ unsigned long addr;
++ unsigned long end;
++ unsigned long next;
++ unsigned long add;
++ unsigned long row_add;
++ unsigned long clflush_add = pd->driver->clflush_add;
++ unsigned long clflush_mask = pd->driver->clflush_mask;
++
++ if (!pd->driver->has_clflush) {
++ ttm_tt_cache_flush(&pd->p, num_pages);
++ return;
++ }
++
++ if (hw_tile_stride)
++ rows = num_pages / desired_tile_stride;
++ else
++ desired_tile_stride = num_pages;
++
++ add = desired_tile_stride << PAGE_SHIFT;
++ row_add = hw_tile_stride << PAGE_SHIFT;
++ mb();
++ for (i = 0; i < rows; ++i) {
++
++ addr = address;
++ end = addr + add;
++
++ do {
++ next = psb_pd_addr_end(addr, end);
++ pt = psb_mmu_pt_map_lock(pd, addr);
++ if (!pt)
++ continue;
++ do {
++ psb_clflush(&pt->v
++ [psb_mmu_pt_index(addr)]);
++ } while (addr +=
++ clflush_add,
++ (addr & clflush_mask) < next);
++
++ psb_mmu_pt_unmap_unlock(pt);
++ } while (addr = next, next != end);
++ address += row_add;
++ }
++ mb();
++}
++#else
++static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd,
++ unsigned long address, uint32_t num_pages,
++ uint32_t desired_tile_stride,
++ uint32_t hw_tile_stride)
++{
++ drm_ttm_cache_flush(&pd->p, num_pages);
++}
++#endif
++
++void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
++ unsigned long address, uint32_t num_pages)
++{
++ struct psb_mmu_pt *pt;
++ unsigned long addr;
++ unsigned long end;
++ unsigned long next;
++ unsigned long f_address = address;
++
++ down_read(&pd->driver->sem);
++
++ addr = address;
++ end = addr + (num_pages << PAGE_SHIFT);
++
++ do {
++ next = psb_pd_addr_end(addr, end);
++ pt = psb_mmu_pt_alloc_map_lock(pd, addr);
++ if (!pt)
++ goto out;
++ do {
++ psb_mmu_invalidate_pte(pt, addr);
++ --pt->count;
++ } while (addr += PAGE_SIZE, addr < next);
++ psb_mmu_pt_unmap_unlock(pt);
++
++ } while (addr = next, next != end);
++
++out:
++ if (pd->hw_context != -1)
++ psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
++
++ up_read(&pd->driver->sem);
++
++ if (pd->hw_context != -1)
++ psb_mmu_flush(pd->driver);
++
++ return;
++}
++
++void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
++ uint32_t num_pages, uint32_t desired_tile_stride,
++ uint32_t hw_tile_stride)
++{
++ struct psb_mmu_pt *pt;
++ uint32_t rows = 1;
++ uint32_t i;
++ unsigned long addr;
++ unsigned long end;
++ unsigned long next;
++ unsigned long add;
++ unsigned long row_add;
++ unsigned long f_address = address;
++
++ if (hw_tile_stride)
++ rows = num_pages / desired_tile_stride;
++ else
++ desired_tile_stride = num_pages;
++
++ add = desired_tile_stride << PAGE_SHIFT;
++ row_add = hw_tile_stride << PAGE_SHIFT;
++
++ down_read(&pd->driver->sem);
++
++ /* Make sure we only need to flush this processor's cache */
++
++ for (i = 0; i < rows; ++i) {
++
++ addr = address;
++ end = addr + add;
++
++ do {
++ next = psb_pd_addr_end(addr, end);
++ pt = psb_mmu_pt_map_lock(pd, addr);
++ if (!pt)
++ continue;
++ do {
++ psb_mmu_invalidate_pte(pt, addr);
++ --pt->count;
++
++ } while (addr += PAGE_SIZE, addr < next);
++ psb_mmu_pt_unmap_unlock(pt);
++
++ } while (addr = next, next != end);
++ address += row_add;
++ }
++ if (pd->hw_context != -1)
++ psb_mmu_flush_ptes(pd, f_address, num_pages,
++ desired_tile_stride, hw_tile_stride);
++
++ up_read(&pd->driver->sem);
++
++ if (pd->hw_context != -1)
++ psb_mmu_flush(pd->driver);
++}
++
++int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
++ unsigned long address, uint32_t num_pages,
++ int type)
++{
++ struct psb_mmu_pt *pt;
++ uint32_t pte;
++ unsigned long addr;
++ unsigned long end;
++ unsigned long next;
++ unsigned long f_address = address;
++ int ret = 0;
++
++ down_read(&pd->driver->sem);
++
++ addr = address;
++ end = addr + (num_pages << PAGE_SHIFT);
++
++ do {
++ next = psb_pd_addr_end(addr, end);
++ pt = psb_mmu_pt_alloc_map_lock(pd, addr);
++ if (!pt) {
++ ret = -ENOMEM;
++ goto out;
++ }
++ do {
++ pte = psb_mmu_mask_pte(start_pfn++, type);
++ psb_mmu_set_pte(pt, addr, pte);
++ pt->count++;
++ } while (addr += PAGE_SIZE, addr < next);
++ psb_mmu_pt_unmap_unlock(pt);
++
++ } while (addr = next, next != end);
++
++out:
++ if (pd->hw_context != -1)
++ psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
++
++ up_read(&pd->driver->sem);
++
++ if (pd->hw_context != -1)
++ psb_mmu_flush(pd->driver);
++
++ return ret;
++}
++
++int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
++ unsigned long address, uint32_t num_pages,
++ uint32_t desired_tile_stride,
++ uint32_t hw_tile_stride, int type)
++{
++ struct psb_mmu_pt *pt;
++ uint32_t rows = 1;
++ uint32_t i;
++ uint32_t pte;
++ unsigned long addr;
++ unsigned long end;
++ unsigned long next;
++ unsigned long add;
++ unsigned long row_add;
++ unsigned long f_address = address;
++ int ret = 0;
++
++ if (hw_tile_stride) {
++ if (num_pages % desired_tile_stride != 0)
++ return -EINVAL;
++ rows = num_pages / desired_tile_stride;
++ } else {
++ desired_tile_stride = num_pages;
++ }
++
++ add = desired_tile_stride << PAGE_SHIFT;
++ row_add = hw_tile_stride << PAGE_SHIFT;
++
++ down_read(&pd->driver->sem);
++
++ for (i = 0; i < rows; ++i) {
++
++ addr = address;
++ end = addr + add;
++
++ do {
++ next = psb_pd_addr_end(addr, end);
++ pt = psb_mmu_pt_alloc_map_lock(pd, addr);
++ if (!pt) {
++ ret = -ENOMEM;
++ goto out;
++ }
++ do {
++ pte =
++ psb_mmu_mask_pte(page_to_pfn(*pages++),
++ type);
++ psb_mmu_set_pte(pt, addr, pte);
++ pt->count++;
++ } while (addr += PAGE_SIZE, addr < next);
++ psb_mmu_pt_unmap_unlock(pt);
++
++ } while (addr = next, next != end);
++
++ address += row_add;
++ }
++out:
++ if (pd->hw_context != -1)
++ psb_mmu_flush_ptes(pd, f_address, num_pages,
++ desired_tile_stride, hw_tile_stride);
++
++ up_read(&pd->driver->sem);
++
++ if (pd->hw_context != -1)
++ psb_mmu_flush(pd->driver);
++
++ return ret;
++}
++#if 0 /*comented out, only used in mmu test now*/
++void psb_mmu_enable_requestor(struct psb_mmu_driver *driver, uint32_t mask)
++{
++ mask &= _PSB_MMU_ER_MASK;
++ psb_iowrite32(driver,
++ psb_ioread32(driver, PSB_CR_BIF_CTRL) & ~mask,
++ PSB_CR_BIF_CTRL);
++ (void) psb_ioread32(driver, PSB_CR_BIF_CTRL);
++}
++
++void psb_mmu_disable_requestor(struct psb_mmu_driver *driver,
++ uint32_t mask)
++{
++ mask &= _PSB_MMU_ER_MASK;
++ psb_iowrite32(driver, psb_ioread32(driver, PSB_CR_BIF_CTRL) | mask,
++ PSB_CR_BIF_CTRL);
++ (void) psb_ioread32(driver, PSB_CR_BIF_CTRL);
++}
++#endif
++int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
++ unsigned long *pfn)
++{
++ int ret;
++ struct psb_mmu_pt *pt;
++ uint32_t tmp;
++ spinlock_t *lock = &pd->driver->lock;
++
++ down_read(&pd->driver->sem);
++ pt = psb_mmu_pt_map_lock(pd, virtual);
++ if (!pt) {
++ uint32_t *v;
++
++ spin_lock(lock);
++ v = kmap_atomic(pd->p, KM_USER0);
++ tmp = v[psb_mmu_pd_index(virtual)];
++ kunmap_atomic(v, KM_USER0);
++ spin_unlock(lock);
++
++ if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||
++ !(pd->invalid_pte & PSB_PTE_VALID)) {
++ ret = -EINVAL;
++ goto out;
++ }
++ ret = 0;
++ *pfn = pd->invalid_pte >> PAGE_SHIFT;
++ goto out;
++ }
++ tmp = pt->v[psb_mmu_pt_index(virtual)];
++ if (!(tmp & PSB_PTE_VALID)) {
++ ret = -EINVAL;
++ } else {
++ ret = 0;
++ *pfn = tmp >> PAGE_SHIFT;
++ }
++ psb_mmu_pt_unmap_unlock(pt);
++out:
++ up_read(&pd->driver->sem);
++ return ret;
++}
++#if 0
++void psb_mmu_test(struct psb_mmu_driver *driver, uint32_t offset)
++{
++ struct page *p;
++ unsigned long pfn;
++ int ret = 0;
++ struct psb_mmu_pd *pd;
++ uint32_t *v;
++ uint32_t *vmmu;
++
++ pd = driver->default_pd;
++ if (!pd)
++ printk(KERN_WARNING "Could not get default pd\n");
++
++
++ p = alloc_page(GFP_DMA32);
++
++ if (!p) {
++ printk(KERN_WARNING "Failed allocating page\n");
++ return;
++ }
++
++ v = kmap(p);
++ memset(v, 0x67, PAGE_SIZE);
++
++ pfn = (offset >> PAGE_SHIFT);
++
++ ret = psb_mmu_insert_pages(pd, &p, pfn << PAGE_SHIFT, 1, 0, 0, 0);
++ if (ret) {
++ printk(KERN_WARNING "Failed inserting mmu page\n");
++ goto out_err1;
++ }
++
++ /* Ioremap the page through the GART aperture */
++
++ vmmu = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
++ if (!vmmu) {
++ printk(KERN_WARNING "Failed ioremapping page\n");
++ goto out_err2;
++ }
++
++ /* Read from the page with mmu disabled. */
++ printk(KERN_INFO "Page first dword is 0x%08x\n", ioread32(vmmu));
++
++ /* Enable the mmu for host accesses and read again. */
++ psb_mmu_enable_requestor(driver, _PSB_MMU_ER_HOST);
++
++ printk(KERN_INFO "MMU Page first dword is (0x67676767) 0x%08x\n",
++ ioread32(vmmu));
++ *v = 0x15243705;
++ printk(KERN_INFO "MMU Page new dword is (0x15243705) 0x%08x\n",
++ ioread32(vmmu));
++ iowrite32(0x16243355, vmmu);
++ (void) ioread32(vmmu);
++ printk(KERN_INFO "Page new dword is (0x16243355) 0x%08x\n", *v);
++
++ printk(KERN_INFO "Int stat is 0x%08x\n",
++ psb_ioread32(driver, PSB_CR_BIF_INT_STAT));
++ printk(KERN_INFO "Fault is 0x%08x\n",
++ psb_ioread32(driver, PSB_CR_BIF_FAULT));
++
++ /* Disable MMU for host accesses and clear page fault register */
++ psb_mmu_disable_requestor(driver, _PSB_MMU_ER_HOST);
++ iounmap(vmmu);
++out_err2:
++ psb_mmu_remove_pages(pd, pfn << PAGE_SHIFT, 1, 0, 0);
++out_err1:
++ kunmap(p);
++ __free_page(p);
++}
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_msvdx.c
+@@ -0,0 +1,1099 @@
++/**************************************************************************
++ * MSVDX I/O operations and IRQ handling
++ *
++ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
++ * Copyright (c) Imagination Technologies Limited, UK
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++#include <drm/drmP.h>
++#include <drm/drm_os_linux.h>
++#include "psb_drm.h"
++#include "psb_drv.h"
++#include "psb_msvdx.h"
++#include "psb_powermgmt.h"
++#include <linux/io.h>
++#include <linux/delay.h>
++
++#ifndef list_first_entry
++#define list_first_entry(ptr, type, member) \
++ list_entry((ptr)->next, type, member)
++#endif
++
++
++static int psb_msvdx_send(struct drm_device *dev, void *cmd,
++ unsigned long cmd_size);
++
++static int psb_msvdx_dequeue_send(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct psb_msvdx_cmd_queue *msvdx_cmd = NULL;
++ int ret = 0;
++ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
++
++ if (list_empty(&msvdx_priv->msvdx_queue)) {
++ PSB_DEBUG_GENERAL("MSVDXQUE: msvdx list empty.\n");
++ msvdx_priv->msvdx_busy = 0;
++ return -EINVAL;
++ }
++ msvdx_cmd = list_first_entry(&msvdx_priv->msvdx_queue,
++ struct psb_msvdx_cmd_queue, head);
++ PSB_DEBUG_GENERAL("MSVDXQUE: Queue has id %08x\n", msvdx_cmd->sequence);
++ ret = psb_msvdx_send(dev, msvdx_cmd->cmd, msvdx_cmd->cmd_size);
++ if (ret) {
++ DRM_ERROR("MSVDXQUE: psb_msvdx_send failed\n");
++ ret = -EINVAL;
++ }
++ list_del(&msvdx_cmd->head);
++ kfree(msvdx_cmd->cmd);
++ kfree(msvdx_cmd);
++
++ return ret;
++}
++
++static int psb_msvdx_map_command(struct drm_device *dev,
++ struct ttm_buffer_object *cmd_buffer,
++ unsigned long cmd_offset, unsigned long cmd_size,
++ void **msvdx_cmd, uint32_t sequence, int copy_cmd)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
++ int ret = 0;
++ unsigned long cmd_page_offset = cmd_offset & ~PAGE_MASK;
++ unsigned long cmd_size_remaining;
++ struct ttm_bo_kmap_obj cmd_kmap, regio_kmap;
++ void *cmd, *tmp, *cmd_start;
++ bool is_iomem;
++
++ /* command buffers may not exceed page boundary */
++ if (cmd_size + cmd_page_offset > PAGE_SIZE)
++ return -EINVAL;
++
++ ret = ttm_bo_kmap(cmd_buffer, cmd_offset >> PAGE_SHIFT, 1, &cmd_kmap);
++ if (ret) {
++ DRM_ERROR("MSVDXQUE:ret:%d\n", ret);
++ return ret;
++ }
++
++ cmd_start = (void *)ttm_kmap_obj_virtual(&cmd_kmap, &is_iomem)
++ + cmd_page_offset;
++ cmd = cmd_start;
++ cmd_size_remaining = cmd_size;
++
++ while (cmd_size_remaining > 0) {
++ uint32_t cur_cmd_size = MEMIO_READ_FIELD(cmd, FWRK_GENMSG_SIZE);
++ uint32_t cur_cmd_id = MEMIO_READ_FIELD(cmd, FWRK_GENMSG_ID);
++ uint32_t mmu_ptd = 0, tmp = 0;
++ struct psb_msvdx_deblock_queue *msvdx_deblock;
++ unsigned long irq_flags;
++
++ PSB_DEBUG_GENERAL("cmd start at %08x cur_cmd_size = %d"
++ " cur_cmd_id = %02x fence = %08x\n",
++ (uint32_t) cmd, cur_cmd_size, cur_cmd_id, sequence);
++ if ((cur_cmd_size % sizeof(uint32_t))
++ || (cur_cmd_size > cmd_size_remaining)) {
++ ret = -EINVAL;
++ DRM_ERROR("MSVDX: ret:%d\n", ret);
++ goto out;
++ }
++
++ switch (cur_cmd_id) {
++ case VA_MSGID_RENDER:
++ /* Fence ID */
++ MEMIO_WRITE_FIELD(cmd, FW_VA_RENDER_FENCE_VALUE,
++ sequence);
++ mmu_ptd = psb_get_default_pd_addr(dev_priv->mmu);
++ tmp = atomic_cmpxchg(&dev_priv->msvdx_mmu_invaldc,
++ 1, 0);
++ if (tmp == 1) {
++ mmu_ptd |= 1;
++ PSB_DEBUG_GENERAL("MSVDX:Set MMU invalidate\n");
++ }
++
++ /* PTD */
++ MEMIO_WRITE_FIELD(cmd, FW_VA_RENDER_MMUPTD, mmu_ptd);
++
++ break;
++
++ case DXVA_MSGID_OOLD:
++ MEMIO_WRITE_FIELD(cmd, FW_DXVA_OOLD_FENCE_VALUE,
++ sequence);
++ mmu_ptd = psb_get_default_pd_addr(dev_priv->mmu);
++ tmp = atomic_cmpxchg(&dev_priv->msvdx_mmu_invaldc,
++ 1, 0);
++ if (tmp == 1) {
++ mmu_ptd |= 1;
++ PSB_DEBUG_GENERAL("MSVDX:Set MMU invalidate\n");
++ }
++
++ /* PTD */
++ MEMIO_WRITE_FIELD(cmd, FW_DXVA_OOLD_MMUPTD, mmu_ptd);
++
++ PSB_DEBUG_GENERAL("MSVDX:Get oold cmd\n");
++
++ break;
++
++ case VA_MSGID_DEBLOCK:
++ /* Fence ID */
++ MEMIO_WRITE_FIELD(cmd, FW_DXVA_DEBLOCK_FENCE_VALUE,
++ sequence);
++ mmu_ptd = psb_get_default_pd_addr(dev_priv->mmu);
++ tmp = atomic_cmpxchg(&dev_priv->msvdx_mmu_invaldc,
++ 1, 0);
++ if (tmp == 1) {
++ mmu_ptd |= 1;
++ PSB_DEBUG_GENERAL("MSVDX:Set MMU invalidate\n");
++ }
++
++ /* PTD */
++ MEMIO_WRITE_FIELD(cmd,
++ FW_DXVA_DEBLOCK_MMUPTD,
++ mmu_ptd);
++
++ /* printk("Got deblock msg\n"); */
++ /* Deblock message is followed by 32 */
++ /* bytes of deblock params */
++ msvdx_deblock = kmalloc(
++ sizeof(struct psb_msvdx_deblock_queue),
++ GFP_KERNEL);
++
++ if (msvdx_deblock == NULL) {
++ DRM_ERROR("DEBLOCK QUE: Out of memory...\n");
++ ret = -ENOMEM;
++ goto out;
++ }
++
++ memcpy(&msvdx_deblock->dbParams, cmd + 16, 32);
++
++ ret = ttm_bo_kmap(
++ (struct ttm_buffer_object *)
++ msvdx_deblock->dbParams.handle,
++ 0,
++ (msvdx_deblock->dbParams.buffer_size +
++ PAGE_SIZE - 1) >> PAGE_SHIFT,
++ &regio_kmap);
++
++ /* printk("deblock regio buffer size is 0x%x\n",
++ msvdx_deblock->dbParams.buffer_size); */
++
++ if (likely(!ret)) {
++ msvdx_deblock->dbParams.pPicparams = kmalloc(
++ msvdx_deblock->dbParams.buffer_size,
++ GFP_KERNEL);
++
++ if (msvdx_deblock->dbParams.pPicparams != NULL)
++ memcpy(
++ msvdx_deblock->dbParams.pPicparams,
++ regio_kmap.virtual,
++ msvdx_deblock->dbParams.buffer_size);
++ ttm_bo_kunmap(&regio_kmap);
++ }
++ spin_lock_irqsave(&msvdx_priv->msvdx_lock, irq_flags);
++ list_add_tail(&msvdx_deblock->head,
++ &msvdx_priv->deblock_queue);
++ spin_unlock_irqrestore(&msvdx_priv->msvdx_lock,
++ irq_flags);
++
++ cmd += 32;
++ cmd_size_remaining -= 32;
++ break;
++
++
++ default:
++ /* Msg not supported */
++ ret = -EINVAL;
++ PSB_DEBUG_GENERAL("MSVDX: ret:%d\n", ret);
++ goto out;
++ }
++
++ cmd += cur_cmd_size;
++ cmd_size_remaining -= cur_cmd_size;
++ }
++
++ if (copy_cmd) {
++ PSB_DEBUG_GENERAL("MSVDXQUE:copying command\n");
++
++ tmp = kzalloc(cmd_size, GFP_KERNEL);
++ if (tmp == NULL) {
++ ret = -ENOMEM;
++ DRM_ERROR("MSVDX: fail to callc,ret=:%d\n", ret);
++ goto out;
++ }
++ memcpy(tmp, cmd_start, cmd_size);
++ *msvdx_cmd = tmp;
++ } else {
++ PSB_DEBUG_GENERAL("MSVDXQUE:did NOT copy command\n");
++ ret = psb_msvdx_send(dev, cmd_start, cmd_size);
++ if (ret) {
++ DRM_ERROR("MSVDXQUE: psb_msvdx_send failed\n");
++ ret = -EINVAL;
++ }
++ }
++
++out:
++ ttm_bo_kunmap(&cmd_kmap);
++
++ return ret;
++}
++
++int psb_submit_video_cmdbuf(struct drm_device *dev,
++ struct ttm_buffer_object *cmd_buffer,
++ unsigned long cmd_offset, unsigned long cmd_size,
++ struct ttm_fence_object *fence)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ uint32_t sequence = dev_priv->sequence[PSB_ENGINE_VIDEO];
++ unsigned long irq_flags;
++ int ret = 0;
++ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
++ int offset = 0;
++
++ /* psb_schedule_watchdog(dev_priv); */
++
++ spin_lock_irqsave(&msvdx_priv->msvdx_lock, irq_flags);
++ if (msvdx_priv->msvdx_needs_reset) {
++ spin_unlock_irqrestore(&msvdx_priv->msvdx_lock, irq_flags);
++ PSB_DEBUG_GENERAL("MSVDX: will reset msvdx\n");
++ if (psb_msvdx_reset(dev_priv)) {
++ ret = -EBUSY;
++ DRM_ERROR("MSVDX: Reset failed\n");
++ return ret;
++ }
++ msvdx_priv->msvdx_needs_reset = 0;
++ msvdx_priv->msvdx_busy = 0;
++
++ psb_msvdx_init(dev);
++
++ /* restore vec local mem if needed */
++ if (msvdx_priv->vec_local_mem_saved) {
++ for (offset = 0; offset < VEC_LOCAL_MEM_BYTE_SIZE / 4; ++offset)
++ PSB_WMSVDX32(msvdx_priv->vec_local_mem_data[offset],
++ VEC_LOCAL_MEM_OFFSET + offset * 4);
++
++ msvdx_priv->vec_local_mem_saved = 0;
++ }
++
++ spin_lock_irqsave(&msvdx_priv->msvdx_lock, irq_flags);
++ }
++
++ if (!msvdx_priv->msvdx_fw_loaded) {
++ spin_unlock_irqrestore(&msvdx_priv->msvdx_lock, irq_flags);
++ PSB_DEBUG_GENERAL("MSVDX:reload FW to MTX\n");
++
++ ret = psb_setup_fw(dev);
++ if (ret) {
++ DRM_ERROR("MSVDX:fail to load FW\n");
++ /* FIXME: find a proper return value */
++ return -EFAULT;
++ }
++ msvdx_priv->msvdx_fw_loaded = 1;
++
++ PSB_DEBUG_GENERAL("MSVDX: load firmware successfully\n");
++ spin_lock_irqsave(&msvdx_priv->msvdx_lock, irq_flags);
++ }
++
++ if (!msvdx_priv->msvdx_busy) {
++ msvdx_priv->msvdx_busy = 1;
++ spin_unlock_irqrestore(&msvdx_priv->msvdx_lock, irq_flags);
++ PSB_DEBUG_GENERAL("MSVDX: commit command to HW,seq=0x%08x\n",
++ sequence);
++ ret = psb_msvdx_map_command(dev, cmd_buffer, cmd_offset,
++ cmd_size, NULL, sequence, 0);
++ if (ret) {
++ DRM_ERROR("MSVDXQUE: Failed to extract cmd\n");
++ return ret;
++ }
++ } else {
++ struct psb_msvdx_cmd_queue *msvdx_cmd;
++ void *cmd = NULL;
++
++ spin_unlock_irqrestore(&msvdx_priv->msvdx_lock, irq_flags);
++ /* queue the command to be sent when the h/w is ready */
++ PSB_DEBUG_GENERAL("MSVDXQUE: queueing sequence:%08x..\n",
++ sequence);
++ msvdx_cmd = kzalloc(sizeof(struct psb_msvdx_cmd_queue),
++ GFP_KERNEL);
++ if (msvdx_cmd == NULL) {
++ DRM_ERROR("MSVDXQUE: Out of memory...\n");
++ return -ENOMEM;
++ }
++
++ ret = psb_msvdx_map_command(dev, cmd_buffer, cmd_offset,
++ cmd_size, &cmd, sequence, 1);
++ if (ret) {
++ DRM_ERROR("MSVDXQUE: Failed to extract cmd\n");
++ kfree(msvdx_cmd
++ );
++ return ret;
++ }
++ msvdx_cmd->cmd = cmd;
++ msvdx_cmd->cmd_size = cmd_size;
++ msvdx_cmd->sequence = sequence;
++ spin_lock_irqsave(&msvdx_priv->msvdx_lock, irq_flags);
++ list_add_tail(&msvdx_cmd->head, &msvdx_priv->msvdx_queue);
++ if (!msvdx_priv->msvdx_busy) {
++ msvdx_priv->msvdx_busy = 1;
++ PSB_DEBUG_GENERAL("MSVDXQUE: Need immediate dequeue\n");
++ psb_msvdx_dequeue_send(dev);
++ }
++ spin_unlock_irqrestore(&msvdx_priv->msvdx_lock, irq_flags);
++ }
++
++ return ret;
++}
++
++int psb_cmdbuf_video(struct drm_file *priv,
++ struct list_head *validate_list,
++ uint32_t fence_type,
++ struct drm_psb_cmdbuf_arg *arg,
++ struct ttm_buffer_object *cmd_buffer,
++ struct psb_ttm_fence_rep *fence_arg)
++{
++ struct drm_device *dev = priv->minor->dev;
++ struct ttm_fence_object *fence;
++ int ret;
++
++ /*
++ * Check this. Doesn't seem right. Have fencing done AFTER command
++ * submission and make sure drm_psb_idle idles the MSVDX completely.
++ */
++ ret =
++ psb_submit_video_cmdbuf(dev, cmd_buffer, arg->cmdbuf_offset,
++ arg->cmdbuf_size, NULL);
++ if (ret)
++ return ret;
++
++
++ /* DRM_ERROR("Intel: Fix video fencing!!\n"); */
++ psb_fence_or_sync(priv, PSB_ENGINE_VIDEO, fence_type,
++ arg->fence_flags, validate_list, fence_arg,
++ &fence);
++
++ ttm_fence_object_unref(&fence);
++ mutex_lock(&cmd_buffer->mutex);
++ if (cmd_buffer->sync_obj != NULL)
++ ttm_fence_sync_obj_unref(&cmd_buffer->sync_obj);
++ mutex_unlock(&cmd_buffer->mutex);
++
++ return 0;
++}
++
++
++static int psb_msvdx_send(struct drm_device *dev, void *cmd,
++ unsigned long cmd_size)
++{
++ int ret = 0;
++ struct drm_psb_private *dev_priv = dev->dev_private;
++
++ while (cmd_size > 0) {
++ uint32_t cur_cmd_size = MEMIO_READ_FIELD(cmd, FWRK_GENMSG_SIZE);
++ uint32_t cur_cmd_id = MEMIO_READ_FIELD(cmd, FWRK_GENMSG_ID);
++ if (cur_cmd_size > cmd_size) {
++ ret = -EINVAL;
++ DRM_ERROR("MSVDX:cmd_size %lu cur_cmd_size %lu\n",
++ cmd_size, (unsigned long)cur_cmd_size);
++ goto out;
++ }
++
++ /* Send the message to h/w */
++ ret = psb_mtx_send(dev_priv, cmd);
++ if (ret) {
++ PSB_DEBUG_GENERAL("MSVDX: ret:%d\n", ret);
++ goto out;
++ }
++ cmd += cur_cmd_size;
++ cmd_size -= cur_cmd_size;
++ if (cur_cmd_id == VA_MSGID_DEBLOCK) {
++ cmd += 32;
++ cmd_size -= 32;
++ }
++ }
++
++out:
++ PSB_DEBUG_GENERAL("MSVDX: ret:%d\n", ret);
++ return ret;
++}
++
++int psb_mtx_send(struct drm_psb_private *dev_priv, const void *msg)
++{
++ static uint32_t pad_msg[FWRK_PADMSG_SIZE];
++ const uint32_t *p_msg = (uint32_t *) msg;
++ uint32_t msg_num, words_free, ridx, widx, buf_size, buf_offset;
++ int ret = 0;
++
++ PSB_DEBUG_GENERAL("MSVDX: psb_mtx_send\n");
++
++ /* we need clocks enabled before we touch VEC local ram */
++ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE);
++
++ msg_num = (MEMIO_READ_FIELD(msg, FWRK_GENMSG_SIZE) + 3) / 4;
++
++ buf_size = PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_BUF_SIZE) & ((1 << 16) - 1);
++
++ if (msg_num > buf_size) {
++ ret = -EINVAL;
++ DRM_ERROR("MSVDX: message exceed maximum,ret:%d\n", ret);
++ goto out;
++ }
++
++ ridx = PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_RD_INDEX);
++ widx = PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_WRT_INDEX);
++
++
++ buf_size = PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_BUF_SIZE) & ((1 << 16) - 1);
++ /*0x2000 is VEC Local Ram offset*/
++ buf_offset =
++ (PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_BUF_SIZE) >> 16) + 0x2000;
++
++ /* message would wrap, need to send a pad message */
++ if (widx + msg_num > buf_size) {
++ /* Shouldn't happen for a PAD message itself */
++ BUG_ON(MEMIO_READ_FIELD(msg, FWRK_GENMSG_ID)
++ == FWRK_MSGID_PADDING);
++
++ /* if the read pointer is at zero then we must wait for it to
++ * change otherwise the write pointer will equal the read
++ * pointer,which should only happen when the buffer is empty
++ *
++ * This will only happens if we try to overfill the queue,
++ * queue management should make
++ * sure this never happens in the first place.
++ */
++ BUG_ON(0 == ridx);
++ if (0 == ridx) {
++ ret = -EINVAL;
++ DRM_ERROR("MSVDX: RIndex=0, ret:%d\n", ret);
++ goto out;
++ }
++
++ /* Send a pad message */
++ MEMIO_WRITE_FIELD(pad_msg, FWRK_GENMSG_SIZE,
++ (buf_size - widx) << 2);
++ MEMIO_WRITE_FIELD(pad_msg, FWRK_GENMSG_ID,
++ FWRK_MSGID_PADDING);
++ psb_mtx_send(dev_priv, pad_msg);
++ widx = PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_WRT_INDEX);
++ }
++
++ if (widx >= ridx)
++ words_free = buf_size - (widx - ridx);
++ else
++ words_free = ridx - widx;
++
++ BUG_ON(msg_num > words_free);
++ if (msg_num > words_free) {
++ ret = -EINVAL;
++ DRM_ERROR("MSVDX: msg_num > words_free, ret:%d\n", ret);
++ goto out;
++ }
++ while (msg_num > 0) {
++ PSB_WMSVDX32(*p_msg++, buf_offset + (widx << 2));
++ msg_num--;
++ widx++;
++ if (buf_size == widx)
++ widx = 0;
++ }
++
++ PSB_WMSVDX32(widx, MSVDX_COMMS_TO_MTX_WRT_INDEX);
++
++ /* Make sure clocks are enabled before we kick */
++ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE);
++
++ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE);
++
++ /* signal an interrupt to let the mtx know there is a new message */
++ /* PSB_WMSVDX32(1, MSVDX_MTX_KICKI); */
++ PSB_WMSVDX32(1, MSVDX_MTX_KICK);
++
++ /* Read MSVDX Register several times in case Idle signal assert */
++ PSB_RMSVDX32(MSVDX_INTERRUPT_STATUS);
++ PSB_RMSVDX32(MSVDX_INTERRUPT_STATUS);
++ PSB_RMSVDX32(MSVDX_INTERRUPT_STATUS);
++ PSB_RMSVDX32(MSVDX_INTERRUPT_STATUS);
++
++
++out:
++ return ret;
++}
++
++static int psb_msvdx_towpass_deblock(struct drm_device *dev,
++ uint32_t *pPicparams)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ uint32_t cmd_size, cmd_count = 0;
++ uint32_t cmd_id, reg, value, wait, tmp, read = 0, ret = 0;
++
++ cmd_size = *pPicparams++;
++ PSB_DEBUG_GENERAL("MSVDX DEBLOCK: deblock get cmd size %d\n", cmd_size);
++ /* printk("MSVDX DEBLOCK: deblock get cmd size %d\n", cmd_size); */
++
++ do {
++ cmd_id = (*pPicparams) & 0xf0000000;
++ reg = (*pPicparams++) & 0x0fffffff;
++ switch (cmd_id) {
++ case MSVDX_DEBLOCK_REG_SET: {
++ value = *pPicparams++;
++ PSB_WMSVDX32(value, reg);
++ cmd_count += 2;
++ break;
++ }
++ case MSVDX_DEBLOCK_REG_GET: {
++ read = PSB_RMSVDX32(reg);
++ cmd_count += 1;
++ break;
++ }
++ case MSVDX_DEBLOCK_REG_POLLn: {
++ value = *pPicparams++;
++ wait = 0;
++
++ do {
++ tmp = PSB_RMSVDX32(reg);
++ } while ((wait++ < 20000) && (value > tmp));
++
++ if (wait >= 20000) {
++ ret = 1;
++ PSB_DEBUG_GENERAL(
++ "MSVDX DEBLOCK: polln cmd space time out!\n");
++ goto finish_deblock;
++ }
++ cmd_count += 2;
++ break;
++ }
++ case MSVDX_DEBLOCK_REG_POLLx: {
++ wait = 0;
++
++ do {
++ tmp = PSB_RMSVDX32(reg);
++ } while ((wait++ < 20000) && (read > tmp));
++
++ if (wait >= 20000) {
++ ret = 1;
++ PSB_DEBUG_GENERAL(
++ "MSVDX DEBLOCK: pollx cmd space time out!\n");
++ goto finish_deblock;
++ }
++
++ cmd_count += 1;
++ break;
++ }
++ default:
++ ret = 1;
++ PSB_DEBUG_GENERAL(
++ "MSVDX DEBLOCK: get error cmd_id: 0x%x!\n",
++ cmd_id);
++ PSB_DEBUG_GENERAL(
++ "MSVDX DEBLOCK: execute cmd num is %d\n",
++ cmd_count);
++ /* printk("MSVDX DEBLOCK: get error cmd_id: 0x%x!\n",
++ cmd_id); */
++ /* printk("MSVDX DEBLOCK: execute cmd num is %d\n",
++ cmd_count); */
++ goto finish_deblock;
++ }
++ } while (cmd_count < cmd_size);
++
++
++finish_deblock:
++ PSB_DEBUG_GENERAL("MSVDX DEBLOCK: execute cmd num is %d\n", cmd_count);
++ return ret;
++}
++
++/*
++ * MSVDX MTX interrupt
++ */
++static void psb_msvdx_mtx_interrupt(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ static uint32_t buf[128]; /* message buffer */
++ uint32_t ridx, widx, buf_size, buf_offset;
++ uint32_t num, ofs; /* message num and offset */
++ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
++
++ PSB_DEBUG_GENERAL("MSVDX:Got a MSVDX MTX interrupt\n");
++
++ /* Are clocks enabled - If not enable before
++ * attempting to read from VLR
++ */
++ if (PSB_RMSVDX32(MSVDX_MAN_CLK_ENABLE) != (clk_enable_all)) {
++ PSB_DEBUG_GENERAL("MSVDX:Clocks disabled when Interupt set\n");
++ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE);
++ }
++
++loop: /* just for coding style check */
++ ridx = PSB_RMSVDX32(MSVDX_COMMS_TO_HOST_RD_INDEX);
++ widx = PSB_RMSVDX32(MSVDX_COMMS_TO_HOST_WRT_INDEX);
++
++ /* Get out of here if nothing */
++ if (ridx == widx)
++ goto done;
++
++
++ buf_size = PSB_RMSVDX32(MSVDX_COMMS_TO_HOST_BUF_SIZE) & ((1 << 16) - 1);
++ /*0x2000 is VEC Local Ram offset*/
++ buf_offset =
++ (PSB_RMSVDX32(MSVDX_COMMS_TO_HOST_BUF_SIZE) >> 16) + 0x2000;
++
++ ofs = 0;
++ buf[ofs] = PSB_RMSVDX32(buf_offset + (ridx << 2));
++
++ /* round to nearest word */
++ num = (MEMIO_READ_FIELD(buf, FWRK_GENMSG_SIZE) + 3) / 4;
++
++ /* ASSERT(num <= sizeof(buf) / sizeof(uint32_t)); */
++
++ if (++ridx >= buf_size)
++ ridx = 0;
++
++ for (ofs++; ofs < num; ofs++) {
++ buf[ofs] = PSB_RMSVDX32(buf_offset + (ridx << 2));
++
++ if (++ridx >= buf_size)
++ ridx = 0;
++ }
++
++ /* Update the Read index */
++ PSB_WMSVDX32(ridx, MSVDX_COMMS_TO_HOST_RD_INDEX);
++
++ if (msvdx_priv->msvdx_needs_reset)
++ goto loop;
++
++ switch (MEMIO_READ_FIELD(buf, FWRK_GENMSG_ID)) {
++ /* FIXME error recovery handle is not implement yet! Need to queue pending msg for error recovery */
++ case VA_MSGID_CMD_HW_PANIC:
++ case VA_MSGID_CMD_FAILED: {
++ /* For VXD385 firmware, fence value is not validate here */
++ uint32_t fence = MEMIO_READ_FIELD(buf,
++ FW_VA_CMD_FAILED_FENCE_VALUE);
++ uint32_t fault = MEMIO_READ_FIELD(buf,
++ FW_VA_CMD_FAILED_IRQSTATUS);
++ uint32_t msg_id = MEMIO_READ_FIELD(buf, FWRK_GENMSG_ID);
++ uint32_t diff = 0;
++
++ (void) fault;
++ if (msg_id == VA_MSGID_CMD_HW_PANIC)
++ PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_CMD_HW_PANIC:"
++ "Fault detected"
++ " - Fence: %08x, Status: %08x"
++ " - resetting and ignoring error\n",
++ fence, fault);
++ else
++ PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_CMD_FAILED:"
++ "Fault detected"
++ " - Fence: %08x, Status: %08x"
++ " - resetting and ignoring error\n",
++ fence, fault);
++
++ msvdx_priv->msvdx_needs_reset = 1;
++
++ if (msg_id == VA_MSGID_CMD_HW_PANIC) {
++ diff = msvdx_priv->msvdx_current_sequence
++ - dev_priv->sequence[PSB_ENGINE_VIDEO];
++
++ if (diff > 0x0FFFFFFF)
++ msvdx_priv->msvdx_current_sequence++;
++
++ PSB_DEBUG_GENERAL("MSVDX: Fence ID missing, "
++ "assuming %08x\n",
++ msvdx_priv->msvdx_current_sequence);
++ } else {
++ msvdx_priv->msvdx_current_sequence = fence;
++ }
++
++ psb_fence_error(dev, PSB_ENGINE_VIDEO,
++ msvdx_priv->msvdx_current_sequence,
++ _PSB_FENCE_TYPE_EXE, DRM_CMD_FAILED);
++
++ /* Flush the command queue */
++ psb_msvdx_flush_cmd_queue(dev);
++
++ goto done;
++ }
++ case VA_MSGID_CMD_COMPLETED: {
++ uint32_t fence = MEMIO_READ_FIELD(buf,
++ FW_VA_CMD_COMPLETED_FENCE_VALUE);
++ uint32_t flags = MEMIO_READ_FIELD(buf,
++ FW_VA_CMD_COMPLETED_FLAGS);
++
++ PSB_DEBUG_GENERAL("MSVDX:VA_MSGID_CMD_COMPLETED: "
++ "FenceID: %08x, flags: 0x%x\n",
++ fence, flags);
++
++ msvdx_priv->msvdx_current_sequence = fence;
++
++ psb_fence_handler(dev, PSB_ENGINE_VIDEO);
++
++ if (flags & FW_VA_RENDER_HOST_INT) {
++ /*Now send the next command from the msvdx cmd queue */
++ psb_msvdx_dequeue_send(dev);
++ goto done;
++ }
++
++ break;
++ }
++ case VA_MSGID_CMD_COMPLETED_BATCH: {
++ uint32_t fence = MEMIO_READ_FIELD(buf,
++ FW_VA_CMD_COMPLETED_FENCE_VALUE);
++ uint32_t tickcnt = MEMIO_READ_FIELD(buf,
++ FW_VA_CMD_COMPLETED_NO_TICKS);
++ (void)tickcnt;
++ /* we have the fence value in the message */
++ PSB_DEBUG_GENERAL("MSVDX:VA_MSGID_CMD_COMPLETED_BATCH:"
++ " FenceID: %08x, TickCount: %08x\n",
++ fence, tickcnt);
++ msvdx_priv->msvdx_current_sequence = fence;
++
++ break;
++ }
++ case VA_MSGID_ACK:
++ PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_ACK\n");
++ break;
++
++ case VA_MSGID_TEST1:
++ PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_TEST1\n");
++ break;
++
++ case VA_MSGID_TEST2:
++ PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_TEST2\n");
++ break;
++ /* Don't need to do anything with these messages */
++
++ /* Penwell deblock is not implemented here */
++ case VA_MSGID_DEBLOCK_REQUIRED: {
++ uint32_t ctxid = MEMIO_READ_FIELD(buf,
++ FW_VA_DEBLOCK_REQUIRED_CONTEXT);
++ struct psb_msvdx_deblock_queue *msvdx_deblock;
++
++ PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_DEBLOCK_REQUIRED"
++ " Context=%08x\n", ctxid);
++ if (list_empty(&msvdx_priv->deblock_queue)) {
++ PSB_DEBUG_GENERAL(
++ "DEBLOCKQUE: deblock param list is empty\n");
++ PSB_WMSVDX32(0, MSVDX_CMDS_END_SLICE_PICTURE);
++ PSB_WMSVDX32(1, MSVDX_CMDS_END_SLICE_PICTURE);
++ goto done;
++ }
++ msvdx_deblock = list_first_entry(&msvdx_priv->deblock_queue,
++ struct psb_msvdx_deblock_queue, head);
++
++ if (0) {
++ PSB_DEBUG_GENERAL("MSVDX DEBLOCK: by pass \n");
++ /* try to unblock rendec */
++ PSB_WMSVDX32(0, MSVDX_CMDS_END_SLICE_PICTURE);
++ PSB_WMSVDX32(1, MSVDX_CMDS_END_SLICE_PICTURE);
++ kfree(msvdx_deblock->dbParams.pPicparams);
++ list_del(&msvdx_deblock->head);
++ goto done;
++ }
++
++
++ if (ctxid != msvdx_deblock->dbParams.ctxid) {
++ PSB_DEBUG_GENERAL("MSVDX DEBLOCK: wrong ctxid, may "
++ "caused by multiple context since "
++ "it's not supported yet\n");
++ /* try to unblock rendec */
++ PSB_WMSVDX32(0, MSVDX_CMDS_END_SLICE_PICTURE);
++ PSB_WMSVDX32(1, MSVDX_CMDS_END_SLICE_PICTURE);
++ kfree(msvdx_deblock->dbParams.pPicparams);
++ list_del(&msvdx_deblock->head);
++ goto done;
++ }
++
++ if (msvdx_deblock->dbParams.pPicparams) {
++ PSB_DEBUG_GENERAL("MSVDX DEBLOCK: start deblocking\n");
++ /* printk("MSVDX DEBLOCK: start deblocking\n"); */
++
++ if (psb_msvdx_towpass_deblock(dev,
++ msvdx_deblock->dbParams.pPicparams)) {
++
++ PSB_DEBUG_GENERAL(
++ "MSVDX DEBLOCK: deblock fail!\n");
++ PSB_WMSVDX32(0, MSVDX_CMDS_END_SLICE_PICTURE);
++ PSB_WMSVDX32(1, MSVDX_CMDS_END_SLICE_PICTURE);
++ }
++ kfree(msvdx_deblock->dbParams.pPicparams);
++ } else {
++ PSB_DEBUG_GENERAL("MSVDX DEBLOCK: deblock abort!\n");
++ /* printk("MSVDX DEBLOCK: deblock abort!\n"); */
++ PSB_WMSVDX32(0, MSVDX_CMDS_END_SLICE_PICTURE);
++ PSB_WMSVDX32(1, MSVDX_CMDS_END_SLICE_PICTURE);
++ }
++
++ list_del(&msvdx_deblock->head);
++ kfree(msvdx_deblock);
++ break;
++ }
++ default:
++ DRM_ERROR("ERROR: msvdx Unknown message from MTX \n");
++ goto done;
++ }
++
++done:
++ /* we get a frame/slice done, try to save some power*/
++ if (drm_msvdx_pmpolicy != PSB_PMPOLICY_NOPM)
++ schedule_delayed_work(&dev_priv->scheduler.msvdx_suspend_wq, 0);
++
++ DRM_MEMORYBARRIER(); /* TBD check this... */
++}
++
++
++/*
++ * MSVDX interrupt.
++ */
++IMG_BOOL psb_msvdx_interrupt(IMG_VOID *pvData)
++{
++ struct drm_device *dev;
++ struct drm_psb_private *dev_priv;
++ struct msvdx_private *msvdx_priv;
++ uint32_t msvdx_stat;
++
++ if (pvData == IMG_NULL) {
++ DRM_ERROR("ERROR: msvdx %s, Invalid params\n", __func__);
++ return IMG_FALSE;
++ }
++
++ dev = (struct drm_device *)pvData;
++
++ if (!ospm_power_is_hw_on(OSPM_VIDEO_DEC_ISLAND) && IS_MRST(dev)) {
++ DRM_ERROR("ERROR: interrupt arrived but HW is power off\n");
++ return IMG_FALSE;
++ }
++
++ dev_priv = (struct drm_psb_private *) dev->dev_private;
++ msvdx_priv = dev_priv->msvdx_private;
++
++ msvdx_priv->msvdx_hw_busy = REG_READ(0x20D0) & (0x1 << 9);
++
++ msvdx_stat = PSB_RMSVDX32(MSVDX_INTERRUPT_STATUS);
++
++ if (msvdx_stat & MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_MASK) {
++ /*Ideally we should we should never get to this */
++ PSB_DEBUG_IRQ("MSVDX:MMU Fault:0x%x\n", msvdx_stat);
++
++ /* Pause MMU */
++ PSB_WMSVDX32(MSVDX_MMU_CONTROL0_CR_MMU_PAUSE_MASK,
++ MSVDX_MMU_CONTROL0);
++ DRM_WRITEMEMORYBARRIER();
++
++ /* Clear this interupt bit only */
++ PSB_WMSVDX32(MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_MASK,
++ MSVDX_INTERRUPT_CLEAR);
++ PSB_RMSVDX32(MSVDX_INTERRUPT_CLEAR);
++ DRM_READMEMORYBARRIER();
++
++ msvdx_priv->msvdx_needs_reset = 1;
++ } else if (msvdx_stat & MSVDX_INTERRUPT_STATUS_CR_MTX_IRQ_MASK) {
++ PSB_DEBUG_IRQ
++ ("MSVDX: msvdx_stat: 0x%x(MTX)\n", msvdx_stat);
++
++ /* Clear all interupt bits */
++ PSB_WMSVDX32(0xffff, MSVDX_INTERRUPT_CLEAR);
++ PSB_RMSVDX32(MSVDX_INTERRUPT_CLEAR);
++ DRM_READMEMORYBARRIER();
++
++ psb_msvdx_mtx_interrupt(dev);
++ }
++
++ return IMG_TRUE;
++}
++
++
++void psb_msvdx_lockup(struct drm_psb_private *dev_priv,
++ int *msvdx_lockup, int *msvdx_idle)
++{
++ int tmp;
++ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
++
++ *msvdx_lockup = 0;
++ *msvdx_idle = 1;
++
++#if 0
++ PSB_DEBUG_GENERAL("MSVDXTimer: current_sequence:%d "
++ "last_sequence:%d and last_submitted_sequence :%d\n",
++ msvdx_priv->msvdx_current_sequence,
++ msvdx_priv->msvdx_last_sequence,
++ dev_priv->sequence[PSB_ENGINE_VIDEO]);
++#endif
++
++ tmp = msvdx_priv->msvdx_current_sequence -
++ dev_priv->sequence[PSB_ENGINE_VIDEO];
++
++ if (tmp > 0x0FFFFFFF) {
++ if (msvdx_priv->msvdx_current_sequence ==
++ msvdx_priv->msvdx_last_sequence) {
++ DRM_ERROR("MSVDXTimer:locked-up for sequence:%d\n",
++ msvdx_priv->msvdx_current_sequence);
++ *msvdx_lockup = 1;
++ } else {
++ PSB_DEBUG_GENERAL("MSVDXTimer: "
++ "msvdx responded fine so far\n");
++ msvdx_priv->msvdx_last_sequence =
++ msvdx_priv->msvdx_current_sequence;
++ *msvdx_idle = 0;
++ }
++ }
++}
++
++int psb_check_msvdx_idle(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
++
++ if (msvdx_priv->msvdx_fw_loaded == 0)
++ return 0;
++
++ if (msvdx_priv->msvdx_busy) {
++ PSB_DEBUG_PM("MSVDX: psb_check_msvdx_idle returns busy\n");
++ return -EBUSY;
++ }
++
++ if (msvdx_priv->msvdx_hw_busy) {
++ PSB_DEBUG_PM("MSVDX: %s, HW is busy\n", __func__);
++ return -EBUSY;
++ }
++
++ return 0;
++}
++
++int lnc_video_getparam(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_lnc_video_getparam_arg *arg = data;
++ int ret = 0;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)file_priv->minor->dev->dev_private;
++#if defined(CONFIG_MRST_RAR_HANDLER)
++ struct RAR_buffer rar_buf;
++ size_t rar_status;
++#endif
++ void *rar_handler;
++ uint32_t offset = 0;
++ uint32_t device_info = 0;
++ uint32_t rar_ci_info[2];
++
++ switch (arg->key) {
++ case LNC_VIDEO_GETPARAM_RAR_INFO:
++ rar_ci_info[0] = dev_priv->rar_region_start;
++ rar_ci_info[1] = dev_priv->rar_region_size;
++ ret = copy_to_user((void __user *) ((unsigned long)arg->value),
++ &rar_ci_info[0],
++ sizeof(rar_ci_info));
++ break;
++ case LNC_VIDEO_GETPARAM_CI_INFO:
++ rar_ci_info[0] = dev_priv->ci_region_start;
++ rar_ci_info[1] = dev_priv->ci_region_size;
++ ret = copy_to_user((void __user *) ((unsigned long)arg->value),
++ &rar_ci_info[0],
++ sizeof(rar_ci_info));
++ break;
++ case LNC_VIDEO_GETPARAM_RAR_HANDLER_OFFSET:
++ ret = copy_from_user(&rar_handler,
++ (void __user *)((unsigned long)arg->arg),
++ sizeof(rar_handler));
++ if (ret)
++ break;
++
++#if defined(CONFIG_MRST_RAR_HANDLER)
++ rar_buf.info.handle = (__u32)rar_handler;
++ rar_buf.bus_address = (dma_addr_t)dev_priv->rar_region_start;
++ rar_status = 1;
++
++ rar_status = rar_handle_to_bus(&rar_buf, 1);
++ if (rar_status != 1) {
++ DRM_ERROR("MSVDX:rar_handle_to_bus failed\n");
++ ret = -1;
++ break;
++ }
++ rar_status = rar_release(&rar_buf, 1);
++ if (rar_status != 1)
++ DRM_ERROR("MSVDX:rar_release failed\n");
++
++ offset = (uint32_t) rar_buf.bus_address - dev_priv->rar_region_start;
++ PSB_DEBUG_GENERAL("MSVDX:RAR handler %p, bus address=0x%08x,"
++ "RAR region=0x%08x\n",
++ rar_handler,
++ (uint32_t)rar_buf.bus_address,
++ dev_priv->rar_region_start);
++#endif
++ ret = copy_to_user((void __user *)((unsigned long)arg->value),
++ &offset,
++ sizeof(offset));
++ break;
++ case LNC_VIDEO_FRAME_SKIP:
++ if(IS_MRST(dev))
++ ret = lnc_video_frameskip(dev, arg->value);
++ else if(IS_MDFLD(dev)) /* Medfield should not call it */
++ ret = -EFAULT;
++ break;
++ case LNC_VIDEO_DEVICE_INFO:
++ device_info = 0xffff & dev_priv->video_device_fuse;
++ device_info |= (0xffff & dev->pci_device) << 16;
++
++ ret = copy_to_user((void __user *) ((unsigned long)arg->value),
++ &device_info, sizeof(device_info));
++ break;
++ case LNC_VIDEO_CORE_NUM: {
++ /* ret = pnw_video_get_core_num(dev, arg->value); */
++ int n = 2;
++ if(IS_MRST(dev))
++ ret = -EFAULT;
++ else if(IS_MDFLD(dev)) /* Medfield has two cores for encode */
++ ret = copy_to_user((void __user *) ((unsigned long)arg->value),
++ &n, sizeof(unsigned long));
++ }
++ break;
++
++ default:
++ ret = -EFAULT;
++ break;
++ }
++
++ if (ret)
++ return -EFAULT;
++
++ return 0;
++}
++
++inline int psb_try_power_down_msvdx(struct drm_device *dev)
++{
++ ospm_apm_power_down_msvdx(dev);
++ return 0;
++}
++
++int psb_msvdx_save_context(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
++ int offset = 0;
++
++ msvdx_priv->msvdx_needs_reset = 1;
++
++ for (offset = 0; offset < VEC_LOCAL_MEM_BYTE_SIZE / 4; ++offset)
++ msvdx_priv->vec_local_mem_data[offset] =
++ PSB_RMSVDX32(VEC_LOCAL_MEM_OFFSET + offset * 4);
++
++ msvdx_priv->vec_local_mem_saved = 1;
++
++ return 0;
++}
++
++int psb_msvdx_restore_context(struct drm_device *dev)
++{
++ return 0;
++}
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_msvdx.h
+@@ -0,0 +1,785 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
++ * Copyright (c) Imagination Technologies Limited, UK
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++#ifndef _PSB_MSVDX_H_
++#define _PSB_MSVDX_H_
++
++#include "psb_drv.h"
++#include "img_types.h"
++
++#if defined(CONFIG_MRST_RAR_HANDLER)
++#include "../../memrar/memrar.h"
++#endif
++
++extern int drm_msvdx_pmpolicy;
++
++typedef enum
++{
++ PSB_DMAC_BSWAP_NO_SWAP = 0x0, //!< No byte swapping will be performed.
++ PSB_DMAC_BSWAP_REVERSE = 0x1, //!< Byte order will be reversed.
++
++} DMAC_eBSwap;
++
++typedef enum
++{
++ PSB_DMAC_DIR_MEM_TO_PERIPH = 0x0, //!< Data from memory to peripheral.
++ PSB_DMAC_DIR_PERIPH_TO_MEM = 0x1, //!< Data from peripheral to memory.
++
++} DMAC_eDir;
++
++typedef enum
++{
++ PSB_DMAC_ACC_DEL_0 = 0x0, //!< Access delay zero clock cycles
++ PSB_DMAC_ACC_DEL_256 = 0x1, //!< Access delay 256 clock cycles
++ PSB_DMAC_ACC_DEL_512 = 0x2, //!< Access delay 512 clock cycles
++ PSB_DMAC_ACC_DEL_768 = 0x3, //!< Access delay 768 clock cycles
++ PSB_DMAC_ACC_DEL_1024 = 0x4, //!< Access delay 1024 clock cycles
++ PSB_DMAC_ACC_DEL_1280 = 0x5, //!< Access delay 1280 clock cycles
++ PSB_DMAC_ACC_DEL_1536 = 0x6, //!< Access delay 1536 clock cycles
++ PSB_DMAC_ACC_DEL_1792 = 0x7, //!< Access delay 1792 clock cycles
++
++} DMAC_eAccDel;
++
++typedef enum
++{
++ PSB_DMAC_INCR_OFF = 0, //!< Static peripheral address.
++ PSB_DMAC_INCR_ON = 1 //!< Incrementing peripheral address.
++
++} DMAC_eIncr;
++
++typedef enum
++{
++ PSB_DMAC_BURST_0 = 0x0, //!< burst size of 0
++ PSB_DMAC_BURST_1 = 0x1, //!< burst size of 1
++ PSB_DMAC_BURST_2 = 0x2, //!< burst size of 2
++ PSB_DMAC_BURST_3 = 0x3, //!< burst size of 3
++ PSB_DMAC_BURST_4 = 0x4, //!< burst size of 4
++ PSB_DMAC_BURST_5 = 0x5, //!< burst size of 5
++ PSB_DMAC_BURST_6 = 0x6, //!< burst size of 6
++ PSB_DMAC_BURST_7 = 0x7, //!< burst size of 7
++
++} DMAC_eBurst;
++
++int psb_wait_for_register(struct drm_psb_private *dev_priv,
++ uint32_t offset,
++ uint32_t value,
++ uint32_t enable);
++
++IMG_BOOL psb_msvdx_interrupt(IMG_VOID *pvData);
++
++int psb_msvdx_init(struct drm_device *dev);
++int psb_msvdx_uninit(struct drm_device *dev);
++int psb_msvdx_reset(struct drm_psb_private *dev_priv);
++uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver);
++int psb_mtx_send(struct drm_psb_private *dev_priv, const void *pvMsg);
++void psb_msvdx_flush_cmd_queue(struct drm_device *dev);
++void psb_msvdx_lockup(struct drm_psb_private *dev_priv,
++ int *msvdx_lockup, int *msvdx_idle);
++int psb_setup_fw(struct drm_device *dev);
++int psb_check_msvdx_idle(struct drm_device *dev);
++int psb_wait_msvdx_idle(struct drm_device *dev);
++int psb_cmdbuf_video(struct drm_file *priv,
++ struct list_head *validate_list,
++ uint32_t fence_type,
++ struct drm_psb_cmdbuf_arg *arg,
++ struct ttm_buffer_object *cmd_buffer,
++ struct psb_ttm_fence_rep *fence_arg);
++int psb_msvdx_save_context(struct drm_device *dev);
++int psb_msvdx_restore_context(struct drm_device *dev);
++
++bool
++psb_host_second_pass(struct drm_device *dev,
++ uint32_t ui32OperatingModeCmd,
++ void *pvParamBase,
++ uint32_t PicWidthInMbs,
++ uint32_t FrameHeightInMbs,
++ uint32_t ui32DeblockSourceY,
++ uint32_t ui32DeblockSourceUV);
++
++/* Non-Optimal Invalidation is not default */
++#define MSVDX_DEVICE_NODE_FLAGS_MMU_NONOPT_INV 2
++#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK (0x00000100)
++
++#define FW_VA_RENDER_HOST_INT 0x00004000
++#define MSVDX_DEVICE_NODE_FLAGS_MMU_HW_INVALIDATION 0x00000020
++
++/* There is no work currently underway on the hardware */
++#define MSVDX_FW_STATUS_HW_IDLE 0x00000001
++#define MSVDX_DEVICE_NODE_FLAG_BRN23154_BLOCK_ON_FE 0x00000200
++#define MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D0 \
++ (MSVDX_DEVICE_NODE_FLAGS_MMU_NONOPT_INV | \
++ MSVDX_DEVICE_NODE_FLAGS_MMU_HW_INVALIDATION | \
++ MSVDX_DEVICE_NODE_FLAG_BRN23154_BLOCK_ON_FE)
++
++#define MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D1 \
++ (MSVDX_DEVICE_NODE_FLAGS_MMU_HW_INVALIDATION | \
++ MSVDX_DEVICE_NODE_FLAG_BRN23154_BLOCK_ON_FE)
++
++#define POULSBO_D0 0x5
++#define POULSBO_D1 0x6
++#define PSB_REVID_OFFSET 0x8
++
++#define MTX_CODE_BASE (0x80900000)
++#define MTX_DATA_BASE (0x82880000)
++#define PC_START_ADDRESS (0x80900000)
++
++#define MTX_CORE_CODE_MEM (0x10)
++#define MTX_CORE_DATA_MEM (0x18)
++
++#define MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK (0x00000100)
++#define MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_SHIFT (8)
++#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_FE_SOFT_RESET_MASK \
++ (0x00010000)
++#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_BE_SOFT_RESET_MASK \
++ (0x00100000)
++#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_MEMIF_SOFT_RESET_MASK \
++ (0x01000000)
++#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_RENDEC_DEC_SOFT_RESET_MASK \
++ (0x10000000)
++
++#define clk_enable_all \
++(MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_MAN_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_MAN_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_MAN_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_MAN_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_MAN_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK)
++
++#define clk_enable_minimal \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK
++
++#define clk_enable_auto \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_AUTO_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_AUTO_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_AUTO_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_AUTO_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_AUTO_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK
++
++#define msvdx_sw_reset_all \
++(MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK | \
++MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_FE_SOFT_RESET_MASK | \
++MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_BE_SOFT_RESET_MASK | \
++MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_MEMIF_SOFT_RESET_MASK | \
++MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_RENDEC_DEC_SOFT_RESET_MASK)
++
++#define MTX_INTERNAL_REG(R_SPECIFIER , U_SPECIFIER) \
++ (((R_SPECIFIER)<<4) | (U_SPECIFIER))
++#define MTX_PC MTX_INTERNAL_REG(0, 5)
++
++#define RENDEC_A_SIZE (4 * 1024 * 1024)
++#define RENDEC_B_SIZE (1024 * 1024)
++
++#define MEMIO_READ_FIELD(vpMem, field) \
++ ((uint32_t)(((*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) \
++ & field##_MASK) >> field##_SHIFT))
++
++#define MEMIO_WRITE_FIELD(vpMem, field, value) \
++ (*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) = \
++ ((*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) \
++ & (field##_TYPE)~field##_MASK) | \
++ (field##_TYPE)(((uint32_t)(value) << field##_SHIFT) & field##_MASK);
++
++#define MEMIO_WRITE_FIELD_LITE(vpMem, field, value) \
++ (*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) = \
++ ((*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) | \
++ (field##_TYPE)(((uint32_t)(value) << field##_SHIFT)));
++
++#define REGIO_READ_FIELD(reg_val, reg, field) \
++ ((reg_val & reg##_##field##_MASK) >> reg##_##field##_SHIFT)
++
++#define REGIO_WRITE_FIELD(reg_val, reg, field, value) \
++ (reg_val) = \
++ ((reg_val) & ~(reg##_##field##_MASK)) | \
++ (((value) << (reg##_##field##_SHIFT)) & (reg##_##field##_MASK));
++
++#define REGIO_WRITE_FIELD_LITE(reg_val, reg, field, value) \
++ (reg_val) = \
++ ((reg_val) | ((value) << (reg##_##field##_SHIFT)));
++
++#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK \
++ (0x00000001)
++#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_MAN_CLK_ENABLE_MASK \
++ (0x00000002)
++#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_MAN_CLK_ENABLE_MASK \
++ (0x00000004)
++#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_MAN_CLK_ENABLE_MASK \
++ (0x00000008)
++#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_MAN_CLK_ENABLE_MASK \
++ (0x00000010)
++#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_MAN_CLK_ENABLE_MASK \
++ (0x00000020)
++#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK \
++ (0x00000040)
++
++#define clk_enable_all \
++ (MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_MAN_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_MAN_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_MAN_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_MAN_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_MAN_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK)
++
++#define clk_enable_minimal \
++ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \
++ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK
++
++/* MTX registers */
++#define MSVDX_MTX_ENABLE (0x0000)
++#define MSVDX_MTX_KICKI (0x0088)
++#define MSVDX_MTX_KICK (0x0080)
++#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST (0x00FC)
++#define MSVDX_MTX_REGISTER_READ_WRITE_DATA (0x00F8)
++#define MSVDX_MTX_RAM_ACCESS_DATA_TRANSFER (0x0104)
++#define MSVDX_MTX_RAM_ACCESS_CONTROL (0x0108)
++#define MSVDX_MTX_RAM_ACCESS_STATUS (0x010C)
++#define MSVDX_MTX_SOFT_RESET (0x0200)
++#define MTX_CORE_CR_MTX_SYSC_CDMAS0_OFFSET (0x0348)
++#define MTX_CORE_CR_MTX_SYSC_CDMAA_OFFSET (0x0344)
++#define MTX_CORE_CR_MTX_SYSC_CDMAT_OFFSET (0x0350)
++#define MTX_CORE_CR_MTX_SYSC_CDMAC_OFFSET (0x0340)
++
++/* MSVDX registers */
++#define MSVDX_CONTROL (0x0600)
++#define MSVDX_INTERRUPT_CLEAR (0x060C)
++#define MSVDX_INTERRUPT_STATUS (0x0608)
++#define MSVDX_HOST_INTERRUPT_ENABLE (0x0610)
++#define MSVDX_CORE_REV (0x0640)
++#define MSVDX_MMU_CONTROL0 (0x0680)
++#define MSVDX_MMU_MEM_REQ (0x06D0)
++#define MSVDX_MTX_RAM_BANK (0x06F0)
++#define MSVDX_MTX_DEBUG MSVDX_MTX_RAM_BANK
++#define MSVDX_MAN_CLK_ENABLE (0x0620)
++#define MSVDX_CORE_CR_MSVDX_CONTROL_OFFSET (0x0600)
++#define MSVDX_CORE_CR_MMU_BANK_INDEX_OFFSET (0x0688)
++#define MSVDX_CORE_CR_MMU_DIR_LIST_BASE_OFFSET (0x0694)
++#define MSVDX_CORE_CR_MMU_CONTROL0_OFFSET MSVDX_MMU_CONTROL0
++
++/* RENDEC registers */
++#define MSVDX_RENDEC_CONTROL0 (0x0868)
++#define MSVDX_RENDEC_CONTROL1 (0x086C)
++#define MSVDX_RENDEC_BUFFER_SIZE (0x0870)
++#define MSVDX_RENDEC_BASE_ADDR0 (0x0874)
++#define MSVDX_RENDEC_BASE_ADDR1 (0x0878)
++#define MSVDX_RENDEC_READ_DATA (0x0898)
++#define MSVDX_RENDEC_CONTEXT0 (0x0950)
++#define MSVDX_RENDEC_CONTEXT1 (0x0954)
++#define MSVDX_RENDEC_CONTEXT2 (0x0958)
++#define MSVDX_RENDEC_CONTEXT3 (0x095C)
++#define MSVDX_RENDEC_CONTEXT4 (0x0960)
++#define MSVDX_RENDEC_CONTEXT5 (0x0964)
++
++/* DMAC registers */
++#define DMAC_DMAC_SETUP_OFFSET (0x0500)
++#define DMAC_DMAC_COUNT_OFFSET (0x0504)
++#define DMAC_DMAC_PERIPH_OFFSET (0x0508)
++#define DMAC_DMAC_IRQ_STAT_OFFSET (0x050C)
++#define DMAC_DMAC_PERIPHERAL_ADDR_OFFSET (0x0514)
++
++/* DMAC control */
++#define PSB_DMAC_VALUE_COUNT(BSWAP,PW,DIR,PERIPH_INCR,COUNT) \
++ \
++ (((BSWAP) & DMAC_DMAC_COUNT_BSWAP_LSBMASK) << DMAC_DMAC_COUNT_BSWAP_SHIFT) | \
++ (((PW) & DMAC_DMAC_COUNT_PW_LSBMASK) << DMAC_DMAC_COUNT_PW_SHIFT) | \
++ (((DIR) & DMAC_DMAC_COUNT_DIR_LSBMASK) << DMAC_DMAC_COUNT_DIR_SHIFT) | \
++ (((PERIPH_INCR) & DMAC_DMAC_COUNT_PI_LSBMASK) << DMAC_DMAC_COUNT_PI_SHIFT) | \
++ (((COUNT) & DMAC_DMAC_COUNT_CNT_LSBMASK) << DMAC_DMAC_COUNT_CNT_SHIFT)
++
++#define PSB_DMAC_VALUE_PERIPH_PARAM(ACC_DEL,INCR,BURST) \
++ \
++ (((ACC_DEL) & DMAC_DMAC_PERIPH_ACC_DEL_LSBMASK) << DMAC_DMAC_PERIPH_ACC_DEL_SHIFT) | \
++ (((INCR) & DMAC_DMAC_PERIPH_INCR_LSBMASK) << DMAC_DMAC_PERIPH_INCR_SHIFT) | \
++ (((BURST) & DMAC_DMAC_PERIPH_BURST_LSBMASK) << DMAC_DMAC_PERIPH_BURST_SHIFT)
++
++
++/* CMD */
++#define MSVDX_CMDS_END_SLICE_PICTURE (0x1404)
++
++/*
++ * This defines the MSVDX communication buffer
++ */
++#define MSVDX_COMMS_SIGNATURE_VALUE (0xA5A5A5A5) /*!< Signature value */
++/*!< Host buffer size (in 32-bit words) */
++#define NUM_WORDS_HOST_BUF (100)
++/*!< MTX buffer size (in 32-bit words) */
++#define NUM_WORDS_MTX_BUF (100)
++
++/* There is no work currently underway on the hardware */
++#define MSVDX_FW_STATUS_HW_IDLE 0x00000001
++
++#define MSVDX_COMMS_AREA_ADDR (0x02fe0)
++
++#define MSVDX_COMMS_CORE_WTD (MSVDX_COMMS_AREA_ADDR - 0x08)
++#define MSVDX_COMMS_OFFSET_FLAGS (MSVDX_COMMS_AREA_ADDR + 0x18)
++#define MSVDX_COMMS_MSG_COUNTER (MSVDX_COMMS_AREA_ADDR - 0x04)
++#define MSVDX_COMMS_FW_STATUS (MSVDX_COMMS_AREA_ADDR - 0x10)
++#define MSVDX_COMMS_SIGNATURE (MSVDX_COMMS_AREA_ADDR + 0x00)
++#define MSVDX_COMMS_TO_HOST_BUF_SIZE (MSVDX_COMMS_AREA_ADDR + 0x04)
++#define MSVDX_COMMS_TO_HOST_RD_INDEX (MSVDX_COMMS_AREA_ADDR + 0x08)
++#define MSVDX_COMMS_TO_HOST_WRT_INDEX (MSVDX_COMMS_AREA_ADDR + 0x0C)
++#define MSVDX_COMMS_TO_MTX_BUF_SIZE (MSVDX_COMMS_AREA_ADDR + 0x10)
++#define MSVDX_COMMS_TO_MTX_RD_INDEX (MSVDX_COMMS_AREA_ADDR + 0x14)
++#define MSVDX_COMMS_TO_MTX_CB_RD_INDEX (MSVDX_COMMS_AREA_ADDR + 0x18)
++#define MSVDX_COMMS_TO_MTX_WRT_INDEX (MSVDX_COMMS_AREA_ADDR + 0x1C)
++#define MSVDX_COMMS_TO_HOST_BUF (MSVDX_COMMS_AREA_ADDR + 0x20)
++#define MSVDX_COMMS_TO_MTX_BUF \
++ (MSVDX_COMMS_TO_HOST_BUF + (NUM_WORDS_HOST_BUF << 2))
++
++/*
++#define MSVDX_COMMS_AREA_END \
++ (MSVDX_COMMS_TO_MTX_BUF + (NUM_WORDS_HOST_BUF << 2))
++*/
++#define MSVDX_COMMS_AREA_END 0x03000
++
++#if (MSVDX_COMMS_AREA_END != 0x03000)
++#error
++#endif
++
++#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK (0x80000000)
++#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_SHIFT (31)
++
++#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_MASK (0x00010000)
++#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_SHIFT (16)
++
++#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMID_MASK (0x0FF00000)
++#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMID_SHIFT (20)
++
++#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCM_ADDR_MASK (0x000FFFFC)
++#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCM_ADDR_SHIFT (2)
++
++#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMAI_MASK (0x00000002)
++#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMAI_SHIFT (1)
++
++#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMR_MASK (0x00000001)
++#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMR_SHIFT (0)
++
++#define MSVDX_MTX_SOFT_RESET_MTX_RESET_MASK (0x00000001)
++#define MSVDX_MTX_SOFT_RESET_MTX_RESET_SHIFT (0)
++
++#define MSVDX_MTX_ENABLE_MTX_ENABLE_MASK (0x00000001)
++#define MSVDX_MTX_ENABLE_MTX_ENABLE_SHIFT (0)
++
++#define MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK (0x00000100)
++#define MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_SHIFT (8)
++
++#define MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_MASK (0x00000F00)
++#define MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_SHIFT (8)
++
++#define MSVDX_INTERRUPT_STATUS_CR_MTX_IRQ_MASK (0x00004000)
++#define MSVDX_INTERRUPT_STATUS_CR_MTX_IRQ_SHIFT (14)
++
++#define MSVDX_MMU_CONTROL0_CR_MMU_PAUSE_MASK (0x00000002)
++#define MSVDX_MMU_CONTROL0_CR_MMU_PAUSE_SHIFT (1)
++
++#define MSVDX_MTX_RAM_BANK_CR_MTX_RAM_BANK_SIZE_MASK (0x000F0000)
++#define MSVDX_MTX_RAM_BANK_CR_MTX_RAM_BANK_SIZE_SHIFT (16)
++
++#define MSVDX_MTX_DEBUG_MTX_DBG_IS_SLAVE_MASK (0x00000004)
++#define MSVDX_MTX_DEBUG_MTX_DBG_IS_SLAVE_LSBMASK (0x00000001)
++#define MSVDX_MTX_DEBUG_MTX_DBG_IS_SLAVE_SHIFT (2)
++
++#define MSVDX_MTX_DEBUG_MTX_DBG_GPIO_IN_MASK (0x00000003)
++#define MSVDX_MTX_DEBUG_MTX_DBG_GPIO_IN_LSBMASK (0x00000003)
++#define MSVDX_MTX_DEBUG_MTX_DBG_GPIO_IN_SHIFT (0)
++
++#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE0_MASK (0x0000FFFF)
++#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE0_SHIFT (0)
++
++#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE1_MASK (0xFFFF0000)
++#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE1_SHIFT (16)
++
++#define MSVDX_RENDEC_CONTROL1_RENDEC_DECODE_START_SIZE_MASK (0x000000FF)
++#define MSVDX_RENDEC_CONTROL1_RENDEC_DECODE_START_SIZE_SHIFT (0)
++
++#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_W_MASK (0x000C0000)
++#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_W_SHIFT (18)
++
++#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_R_MASK (0x00030000)
++#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_R_SHIFT (16)
++
++#define MSVDX_RENDEC_CONTROL1_RENDEC_EXTERNAL_MEMORY_MASK (0x01000000)
++#define MSVDX_RENDEC_CONTROL1_RENDEC_EXTERNAL_MEMORY_SHIFT (24)
++
++#define MSVDX_RENDEC_CONTROL0_RENDEC_INITIALISE_MASK (0x00000001)
++#define MSVDX_RENDEC_CONTROL0_RENDEC_INITIALISE_SHIFT (0)
++
++#define VEC_SHIFTREG_CONTROL_SR_MASTER_SELECT_MASK (0x00000300)
++#define VEC_SHIFTREG_CONTROL_SR_MASTER_SELECT_SHIFT (8)
++
++#define MTX_CORE_CR_MTX_SYSC_CDMAC_BURSTSIZE_MASK (0x07000000)
++#define MTX_CORE_CR_MTX_SYSC_CDMAC_BURSTSIZE_SHIFT (24)
++
++#define MTX_CORE_CR_MTX_SYSC_CDMAC_RNW_MASK (0x00020000)
++#define MTX_CORE_CR_MTX_SYSC_CDMAC_RNW_SHIFT (17)
++
++#define MTX_CORE_CR_MTX_SYSC_CDMAC_ENABLE_MASK (0x00010000)
++#define MTX_CORE_CR_MTX_SYSC_CDMAC_ENABLE_SHIFT (16)
++
++#define MTX_CORE_CR_MTX_SYSC_CDMAC_LENGTH_MASK (0x0000FFFF)
++#define MTX_CORE_CR_MTX_SYSC_CDMAC_LENGTH_SHIFT (0)
++
++#define MSVDX_CORE_CR_MSVDX_CONTROL_DMAC_CH0_SELECT_MASK (0x00001000)
++#define MSVDX_CORE_CR_MSVDX_CONTROL_DMAC_CH0_SELECT_SHIFT (12)
++
++#define MSVDX_CORE_CR_MMU_CONTROL0_CR_MMU_INVALDC_MASK (0x00000008)
++#define MSVDX_CORE_CR_MMU_CONTROL0_CR_MMU_INVALDC_SHIFT (3)
++
++#define DMAC_DMAC_COUNT_BSWAP_LSBMASK (0x00000001)
++#define DMAC_DMAC_COUNT_BSWAP_SHIFT (30)
++
++#define DMAC_DMAC_COUNT_PW_LSBMASK (0x00000003)
++#define DMAC_DMAC_COUNT_PW_SHIFT (27)
++
++#define DMAC_DMAC_COUNT_DIR_LSBMASK (0x00000001)
++#define DMAC_DMAC_COUNT_DIR_SHIFT (26)
++
++#define DMAC_DMAC_COUNT_PI_LSBMASK (0x00000003)
++#define DMAC_DMAC_COUNT_PI_SHIFT (24)
++
++#define DMAC_DMAC_COUNT_CNT_LSBMASK (0x0000FFFF)
++#define DMAC_DMAC_COUNT_CNT_SHIFT (0)
++
++#define DMAC_DMAC_PERIPH_ACC_DEL_LSBMASK (0x00000007)
++#define DMAC_DMAC_PERIPH_ACC_DEL_SHIFT (29)
++
++#define DMAC_DMAC_PERIPH_INCR_LSBMASK (0x00000001)
++#define DMAC_DMAC_PERIPH_INCR_SHIFT (27)
++
++#define DMAC_DMAC_PERIPH_BURST_LSBMASK (0x00000007)
++#define DMAC_DMAC_PERIPH_BURST_SHIFT (24)
++
++#define DMAC_DMAC_PERIPHERAL_ADDR_ADDR_MASK (0x007FFFFF)
++#define DMAC_DMAC_PERIPHERAL_ADDR_ADDR_LSBMASK (0x007FFFFF)
++#define DMAC_DMAC_PERIPHERAL_ADDR_ADDR_SHIFT (0)
++
++#define DMAC_DMAC_COUNT_EN_MASK (0x00010000)
++#define DMAC_DMAC_COUNT_EN_SHIFT (16)
++
++#define DMAC_DMAC_IRQ_STAT_TRANSFER_FIN_MASK (0x00020000)
++
++/* Start of parser specific Host->MTX messages. */
++#define FWRK_MSGID_START_PSR_HOSTMTX_MSG (0x80)
++
++/* Start of parser specific MTX->Host messages. */
++#define FWRK_MSGID_START_PSR_MTXHOST_MSG (0xC0)
++
++#define FWRK_MSGID_PADDING (0)
++
++#define FWRK_GENMSG_SIZE_TYPE uint8_t
++#define FWRK_GENMSG_SIZE_MASK (0xFF)
++#define FWRK_GENMSG_SIZE_SHIFT (0)
++#define FWRK_GENMSG_SIZE_OFFSET (0x0000)
++#define FWRK_GENMSG_ID_TYPE uint8_t
++#define FWRK_GENMSG_ID_MASK (0xFF)
++#define FWRK_GENMSG_ID_SHIFT (0)
++#define FWRK_GENMSG_ID_OFFSET (0x0001)
++#define FWRK_PADMSG_SIZE (2)
++
++/* Deblock CMD_ID */
++#define MSVDX_DEBLOCK_REG_SET 0x10000000
++#define MSVDX_DEBLOCK_REG_GET 0x20000000
++#define MSVDX_DEBLOCK_REG_POLLn 0x30000000
++#define MSVDX_DEBLOCK_REG_POLLx 0x40000000
++
++/* vec local MEM save/restore */
++#define VEC_LOCAL_MEM_BYTE_SIZE (4 * 1024)
++#define VEC_LOCAL_MEM_OFFSET 0x2000
++
++/* This type defines the framework specified message ids */
++enum {
++ /* ! Sent by the DXVA driver on the host to the mtx firmware.
++ */
++ VA_MSGID_INIT = FWRK_MSGID_START_PSR_HOSTMTX_MSG,
++ VA_MSGID_RENDER,
++ VA_MSGID_DEBLOCK,
++ DXVA_MSGID_OOLD,
++
++ /* Test Messages */
++ VA_MSGID_TEST1,
++ VA_MSGID_TEST2,
++
++ /*! Sent by the mtx firmware to itself.
++ */
++ VA_MSGID_RENDER_MC_INTERRUPT,
++
++ /*! Sent by the DXVA firmware on the MTX to the host.
++ */
++ VA_MSGID_CMD_COMPLETED = FWRK_MSGID_START_PSR_MTXHOST_MSG,
++ VA_MSGID_CMD_COMPLETED_BATCH,
++ VA_MSGID_DEBLOCK_REQUIRED,
++ VA_MSGID_TEST_RESPONCE,
++ VA_MSGID_ACK,
++
++ VA_MSGID_CMD_FAILED,
++ VA_MSGID_CMD_UNSUPPORTED,
++ VA_MSGID_CMD_HW_PANIC,
++};
++
++/* Deblock parameters */
++struct DEBLOCKPARAMS {
++ uint32_t handle; /* struct ttm_buffer_object * of REGIO */
++ uint32_t buffer_size;
++ uint32_t ctxid;
++
++ uint32_t *pPicparams;
++ struct ttm_bo_kmap_obj *regio_kmap; /* virtual of regio */
++ uint32_t pad[3];
++};
++
++struct psb_msvdx_deblock_queue {
++
++ struct list_head head;
++ struct DEBLOCKPARAMS dbParams;
++};
++
++/* MSVDX private structure */
++struct msvdx_private {
++ int msvdx_needs_reset;
++
++ unsigned int pmstate;
++
++ struct sysfs_dirent *sysfs_pmstate;
++
++ uint32_t msvdx_current_sequence;
++ uint32_t msvdx_last_sequence;
++
++ /*
++ *MSVDX Rendec Memory
++ */
++ struct ttm_buffer_object *ccb0;
++ uint32_t base_addr0;
++ struct ttm_buffer_object *ccb1;
++ uint32_t base_addr1;
++
++ struct ttm_buffer_object *fw;
++ uint32_t is_load;
++ uint32_t mtx_mem_size;
++
++ /*
++ *msvdx command queue
++ */
++ spinlock_t msvdx_lock;
++ struct mutex msvdx_mutex;
++ struct list_head msvdx_queue;
++ int msvdx_busy;
++ int msvdx_fw_loaded;
++ void *msvdx_fw;
++ int msvdx_fw_size;
++
++ struct list_head deblock_queue; /* deblock parameter list */
++
++ uint32_t msvdx_hw_busy;
++
++ uint32_t *vec_local_mem_data;
++ uint32_t vec_local_mem_size;
++ uint32_t vec_local_mem_saved;
++ uint32_t psb_dash_access_ctrl;
++};
++
++#define REGISTER(__group__, __reg__ ) (__group__##_##__reg__##_OFFSET)
++/* MSVDX Firmware interface */
++#define FW_VA_INIT_SIZE (8)
++#define FW_VA_DEBUG_TEST2_SIZE (4)
++
++/* FW_VA_DEBUG_TEST2 MSG_SIZE */
++#define FW_VA_DEBUG_TEST2_MSG_SIZE_TYPE uint8_t
++#define FW_VA_DEBUG_TEST2_MSG_SIZE_MASK (0xFF)
++#define FW_VA_DEBUG_TEST2_MSG_SIZE_OFFSET (0x0000)
++#define FW_VA_DEBUG_TEST2_MSG_SIZE_SHIFT (0)
++
++/* FW_VA_DEBUG_TEST2 ID */
++#define FW_VA_DEBUG_TEST2_ID_TYPE uint8_t
++#define FW_VA_DEBUG_TEST2_ID_MASK (0xFF)
++#define FW_VA_DEBUG_TEST2_ID_OFFSET (0x0001)
++#define FW_VA_DEBUG_TEST2_ID_SHIFT (0)
++
++/* FW_VA_CMD_FAILED FENCE_VALUE */
++#define FW_VA_CMD_FAILED_FENCE_VALUE_TYPE uint32_t
++#define FW_VA_CMD_FAILED_FENCE_VALUE_MASK (0xFFFFFFFF)
++#define FW_VA_CMD_FAILED_FENCE_VALUE_OFFSET (0x0004)
++#define FW_VA_CMD_FAILED_FENCE_VALUE_SHIFT (0)
++
++/* FW_VA_CMD_FAILED IRQSTATUS */
++#define FW_VA_CMD_FAILED_IRQSTATUS_TYPE uint32_t
++#define FW_VA_CMD_FAILED_IRQSTATUS_MASK (0xFFFFFFFF)
++#define FW_VA_CMD_FAILED_IRQSTATUS_OFFSET (0x0008)
++#define FW_VA_CMD_FAILED_IRQSTATUS_SHIFT (0)
++
++/* FW_VA_CMD_COMPLETED FENCE_VALUE */
++#define FW_VA_CMD_COMPLETED_FENCE_VALUE_TYPE uint32_t
++#define FW_VA_CMD_COMPLETED_FENCE_VALUE_MASK (0xFFFFFFFF)
++#define FW_VA_CMD_COMPLETED_FENCE_VALUE_OFFSET (0x0004)
++#define FW_VA_CMD_COMPLETED_FENCE_VALUE_SHIFT (0)
++
++/* FW_VA_CMD_COMPLETED FLAGS */
++#define FW_VA_CMD_COMPLETED_FLAGS_ALIGNMENT (4)
++#define FW_VA_CMD_COMPLETED_FLAGS_TYPE uint32_t
++#define FW_VA_CMD_COMPLETED_FLAGS_MASK (0xFFFFFFFF)
++#define FW_VA_CMD_COMPLETED_FLAGS_LSBMASK (0xFFFFFFFF)
++#define FW_VA_CMD_COMPLETED_FLAGS_OFFSET (0x0008)
++#define FW_VA_CMD_COMPLETED_FLAGS_SHIFT (0)
++
++/* FW_VA_CMD_COMPLETED NO_TICKS */
++#define FW_VA_CMD_COMPLETED_NO_TICKS_TYPE uint16_t
++#define FW_VA_CMD_COMPLETED_NO_TICKS_MASK (0xFFFF)
++#define FW_VA_CMD_COMPLETED_NO_TICKS_OFFSET (0x0002)
++#define FW_VA_CMD_COMPLETED_NO_TICKS_SHIFT (0)
++
++/* FW_VA_DEBLOCK_REQUIRED CONTEXT */
++#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_TYPE uint32_t
++#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_MASK (0xFFFFFFFF)
++#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_OFFSET (0x0004)
++#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_SHIFT (0)
++
++/* FW_VA_INIT GLOBAL_PTD */
++#define FW_VA_INIT_GLOBAL_PTD_TYPE uint32_t
++#define FW_VA_INIT_GLOBAL_PTD_MASK (0xFFFFFFFF)
++#define FW_VA_INIT_GLOBAL_PTD_OFFSET (0x0004)
++#define FW_VA_INIT_GLOBAL_PTD_SHIFT (0)
++
++/* FW_VA_RENDER FENCE_VALUE */
++#define FW_VA_RENDER_FENCE_VALUE_TYPE uint32_t
++#define FW_VA_RENDER_FENCE_VALUE_MASK (0xFFFFFFFF)
++#define FW_VA_RENDER_FENCE_VALUE_OFFSET (0x0010)
++#define FW_VA_RENDER_FENCE_VALUE_SHIFT (0)
++
++/* FW_VA_RENDER MMUPTD */
++#define FW_VA_RENDER_MMUPTD_TYPE uint32_t
++#define FW_VA_RENDER_MMUPTD_MASK (0xFFFFFFFF)
++#define FW_VA_RENDER_MMUPTD_OFFSET (0x0004)
++#define FW_VA_RENDER_MMUPTD_SHIFT (0)
++
++/* FW_VA_RENDER BUFFER_ADDRESS */
++#define FW_VA_RENDER_BUFFER_ADDRESS_TYPE uint32_t
++#define FW_VA_RENDER_BUFFER_ADDRESS_MASK (0xFFFFFFFF)
++#define FW_VA_RENDER_BUFFER_ADDRESS_OFFSET (0x0008)
++#define FW_VA_RENDER_BUFFER_ADDRESS_SHIFT (0)
++
++/* FW_VA_RENDER BUFFER_SIZE */
++#define FW_VA_RENDER_BUFFER_SIZE_TYPE uint16_t
++#define FW_VA_RENDER_BUFFER_SIZE_MASK (0x0FFF)
++#define FW_VA_RENDER_BUFFER_SIZE_OFFSET (0x0002)
++#define FW_VA_RENDER_BUFFER_SIZE_SHIFT (0)
++
++/* FW_VA_RENDER FLAGS */
++#define FW_VA_RENDER_FLAGS_TYPE uint32_t
++#define FW_VA_RENDER_FLAGS_MASK (0xFFFFFFFF)
++#define FW_VA_RENDER_FLAGS_OFFSET (0x001C)
++#define FW_VA_RENDER_FLAGS_SHIFT (0)
++
++
++ /* FW_DXVA_DEBLOCK MSG_SIZE */
++#define FW_DXVA_DEBLOCK_MSG_SIZE_ALIGNMENT (1)
++#define FW_DXVA_DEBLOCK_MSG_SIZE_TYPE uint8_t
++#define FW_DXVA_DEBLOCK_MSG_SIZE_MASK (0xFF)
++#define FW_DXVA_DEBLOCK_MSG_SIZE_LSBMASK (0xFF)
++#define FW_DXVA_DEBLOCK_MSG_SIZE_OFFSET (0x0000)
++#define FW_DXVA_DEBLOCK_MSG_SIZE_SHIFT (0)
++
++/* FW_DXVA_DEBLOCK ID */
++#define FW_DXVA_DEBLOCK_ID_ALIGNMENT (1)
++#define FW_DXVA_DEBLOCK_ID_TYPE uint8_t
++#define FW_DXVA_DEBLOCK_ID_MASK (0xFF)
++#define FW_DXVA_DEBLOCK_ID_LSBMASK (0xFF)
++#define FW_DXVA_DEBLOCK_ID_OFFSET (0x0001)
++#define FW_DXVA_DEBLOCK_ID_SHIFT (0)
++
++/* FW_DXVA_DEBLOCK FENCE_VALUE */
++#define FW_DXVA_DEBLOCK_FENCE_VALUE_ALIGNMENT (4)
++#define FW_DXVA_DEBLOCK_FENCE_VALUE_TYPE uint32_t
++#define FW_DXVA_DEBLOCK_FENCE_VALUE_MASK (0xFFFFFFFF)
++#define FW_DXVA_DEBLOCK_FENCE_VALUE_LSBMASK (0xFFFFFFFF)
++#define FW_DXVA_DEBLOCK_FENCE_VALUE_OFFSET (0x0008)
++#define FW_DXVA_DEBLOCK_FENCE_VALUE_SHIFT (0)
++
++/* FW_DXVA_DEBLOCK MMUPTD */
++#define FW_DXVA_DEBLOCK_MMUPTD_ALIGNMENT (4)
++#define FW_DXVA_DEBLOCK_MMUPTD_TYPE uint32_t
++#define FW_DXVA_DEBLOCK_MMUPTD_MASK (0xFFFFFFFF)
++#define FW_DXVA_DEBLOCK_MMUPTD_LSBMASK (0xFFFFFFFF)
++#define FW_DXVA_DEBLOCK_MMUPTD_OFFSET (0x000C)
++#define FW_DXVA_DEBLOCK_MMUPTD_SHIFT (0)
++
++#define FW_DXVA_OOLD_SIZE (40)
++
++/* FW_DXVA_OOLD FENCE_VALUE */
++#define FW_DXVA_OOLD_FENCE_VALUE_ALIGNMENT (4)
++#define FW_DXVA_OOLD_FENCE_VALUE_TYPE uint32_t
++#define FW_DXVA_OOLD_FENCE_VALUE_MASK (0xFFFFFFFF)
++#define FW_DXVA_OOLD_FENCE_VALUE_LSBMASK (0xFFFFFFFF)
++#define FW_DXVA_OOLD_FENCE_VALUE_OFFSET (0x0008)
++#define FW_DXVA_OOLD_FENCE_VALUE_SHIFT (0)
++
++/* FW_DXVA_OOLD MMUPTD */
++#define FW_DXVA_OOLD_MMUPTD_ALIGNMENT (4)
++#define FW_DXVA_OOLD_MMUPTD_TYPE IMG_UINT32
++#define FW_DXVA_OOLD_MMUPTD_MASK (0xFFFFFFFF)
++#define FW_DXVA_OOLD_MMUPTD_LSBMASK (0xFFFFFFFF)
++#define FW_DXVA_OOLD_MMUPTD_OFFSET (0x0004)
++#define FW_DXVA_OOLD_MMUPTD_SHIFT (0)
++
++#define FW_VA_LAST_SLICE_OF_EXT_DMA 0x00001000
++
++static inline void psb_msvdx_clearirq(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ unsigned long mtx_int = 0;
++
++ PSB_DEBUG_IRQ("MSVDX: clear IRQ\n");
++
++ /* Clear MTX interrupt */
++ REGIO_WRITE_FIELD_LITE(mtx_int, MSVDX_INTERRUPT_STATUS, CR_MTX_IRQ,
++ 1);
++ PSB_WMSVDX32(mtx_int, MSVDX_INTERRUPT_CLEAR);
++}
++
++
++static inline void psb_msvdx_disableirq(struct drm_device *dev)
++{
++ /* nothing */
++}
++
++
++static inline void psb_msvdx_enableirq(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ unsigned long enables = 0;
++
++ PSB_DEBUG_IRQ("MSVDX: enable MSVDX MTX IRQ\n");
++ REGIO_WRITE_FIELD_LITE(enables, MSVDX_INTERRUPT_STATUS, CR_MTX_IRQ,
++ 1);
++ PSB_WMSVDX32(enables, MSVDX_HOST_INTERRUPT_ENABLE);
++}
++
++#define MSVDX_NEW_PMSTATE(drm_dev, msvdx_priv, new_state) \
++do { \
++ msvdx_priv->pmstate = new_state; \
++ sysfs_notify_dirent(msvdx_priv->sysfs_pmstate); \
++ PSB_DEBUG_PM("MSVDX: %s\n", \
++ (new_state == PSB_PMSTATE_POWERUP) ? "powerup" \
++ : ((new_state == PSB_PMSTATE_POWERDOWN) ? "powerdown" \
++ : "clockgated")); \
++} while (0)
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_msvdxinit.c
+@@ -0,0 +1,1063 @@
++/**************************************************************************
++ * psb_msvdxinit.c
++ * MSVDX initialization and mtx-firmware upload
++ *
++ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
++ * Copyright (c) Imagination Technologies Limited, UK
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++#include <drm/drmP.h>
++#include <drm/drm.h>
++#include "psb_drv.h"
++#include "psb_msvdx.h"
++#include <linux/firmware.h>
++
++#define MSVDX_REG (dev_priv->msvdx_reg)
++#define UPLOAD_FW_BY_DMA 1
++#define STACKGUARDWORD ( 0x10101010 )
++uint8_t psb_rev_id;
++/*MSVDX FW header*/
++struct msvdx_fw {
++ uint32_t ver;
++ uint32_t text_size;
++ uint32_t data_size;
++ uint32_t data_location;
++};
++
++int psb_wait_for_register(struct drm_psb_private *dev_priv,
++ uint32_t offset, uint32_t value, uint32_t enable)
++{
++ uint32_t tmp;
++ uint32_t poll_cnt = 10000;
++ while (poll_cnt) {
++ tmp = PSB_RMSVDX32(offset);
++ if (value == (tmp & enable)) /* All the bits are reset */
++ return 0; /* So exit */
++
++ /* Wait a bit */
++ DRM_UDELAY(1000);
++ poll_cnt--;
++ }
++ DRM_ERROR("MSVDX: Timeout while waiting for register %08x:"
++ " expecting %08x (mask %08x), got %08x\n",
++ offset, value, enable, tmp);
++
++ return 1;
++}
++
++int psb_poll_mtx_irq(struct drm_psb_private *dev_priv)
++{
++ int ret = 0;
++ uint32_t mtx_int = 0;
++
++ REGIO_WRITE_FIELD_LITE(mtx_int, MSVDX_INTERRUPT_STATUS, CR_MTX_IRQ,
++ 1);
++
++ ret = psb_wait_for_register(dev_priv, MSVDX_INTERRUPT_STATUS,
++ /* Required value */
++ mtx_int,
++ /* Enabled bits */
++ mtx_int);
++
++ if (ret) {
++ DRM_ERROR("MSVDX: Error Mtx did not return"
++ " int within a resonable time\n");
++ return ret;
++ }
++
++ PSB_DEBUG_IRQ("MSVDX: Got MTX Int\n");
++
++ /* Got it so clear the bit */
++ PSB_WMSVDX32(mtx_int, MSVDX_INTERRUPT_CLEAR);
++
++ return ret;
++}
++
++void psb_write_mtx_core_reg(struct drm_psb_private *dev_priv,
++ const uint32_t core_reg, const uint32_t val)
++{
++ uint32_t reg = 0;
++
++ /* Put data in MTX_RW_DATA */
++ PSB_WMSVDX32(val, MSVDX_MTX_REGISTER_READ_WRITE_DATA);
++
++ /* DREADY is set to 0 and request a write */
++ reg = core_reg;
++ REGIO_WRITE_FIELD_LITE(reg, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST,
++ MTX_RNW, 0);
++ REGIO_WRITE_FIELD_LITE(reg, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST,
++ MTX_DREADY, 0);
++ PSB_WMSVDX32(reg, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST);
++
++ psb_wait_for_register(dev_priv,
++ MSVDX_MTX_REGISTER_READ_WRITE_REQUEST,
++ MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK,
++ MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK);
++}
++
++#if UPLOAD_FW_BY_DMA
++
++static void psb_get_mtx_control_from_dash(struct drm_psb_private *dev_priv)
++{
++ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
++ int count = 0;
++ uint32_t reg_val = 0;
++
++ REGIO_WRITE_FIELD(reg_val, MSVDX_MTX_DEBUG, MTX_DBG_IS_SLAVE, 1);
++ REGIO_WRITE_FIELD(reg_val, MSVDX_MTX_DEBUG, MTX_DBG_GPIO_IN, 0x02);
++ PSB_WMSVDX32(reg_val, MSVDX_MTX_DEBUG);
++
++ do
++ {
++ reg_val = PSB_RMSVDX32(MSVDX_MTX_DEBUG);
++ count++;
++ } while (((reg_val & 0x18) != 0) && count < 50000);
++
++ if(count >= 50000)
++ PSB_DEBUG_GENERAL("TOPAZ: timeout in get_mtx_control_from_dash\n");
++
++ /* Save the access control register...*/
++ msvdx_priv->psb_dash_access_ctrl = PSB_RMSVDX32(MSVDX_MTX_RAM_ACCESS_CONTROL);
++
++}
++
++static void psb_release_mtx_control_from_dash(struct drm_psb_private *dev_priv)
++{
++ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
++
++ /* restore access control */
++ PSB_WMSVDX32(msvdx_priv->psb_dash_access_ctrl, MSVDX_MTX_RAM_ACCESS_CONTROL);
++ /* release bus */
++ PSB_WMSVDX32(0x4, MSVDX_MTX_DEBUG);
++}
++
++
++
++static void psb_upload_fw(struct drm_psb_private *dev_priv,
++ uint32_t address, const unsigned int words)
++{
++ uint32_t reg_val=0;
++ uint32_t cmd;
++ uint32_t uCountReg, offset, mmu_ptd;
++ uint32_t size = (words*4 ); /* byte count */
++ uint32_t dma_channel = 0; /* Setup a Simple DMA for Ch0 */
++ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
++
++ PSB_DEBUG_GENERAL("MSVDX: Upload firmware by DMA\n");
++ psb_get_mtx_control_from_dash(dev_priv);
++
++ // dma transfers to/from the mtx have to be 32-bit aligned and in multiples of 32 bits
++ PSB_WMSVDX32(address, REGISTER(MTX_CORE, CR_MTX_SYSC_CDMAA));
++
++ REGIO_WRITE_FIELD_LITE(reg_val, MTX_CORE_CR_MTX_SYSC_CDMAC, BURSTSIZE, 4 );// burst size in multiples of 64 bits (allowed values are 2 or 4)
++ REGIO_WRITE_FIELD_LITE(reg_val, MTX_CORE_CR_MTX_SYSC_CDMAC, RNW, 0); // false means write to mtx mem, true means read from mtx mem
++ REGIO_WRITE_FIELD_LITE(reg_val, MTX_CORE_CR_MTX_SYSC_CDMAC, ENABLE, 1); // begin transfer
++ REGIO_WRITE_FIELD_LITE(reg_val, MTX_CORE_CR_MTX_SYSC_CDMAC, LENGTH, words ); // This specifies the transfer size of the DMA operation in terms of 32-bit words
++ PSB_WMSVDX32(reg_val, REGISTER(MTX_CORE, CR_MTX_SYSC_CDMAC));
++
++ // toggle channel 0 usage between mtx and other msvdx peripherals
++ {
++ reg_val = PSB_RMSVDX32(REGISTER( MSVDX_CORE, CR_MSVDX_CONTROL));
++ REGIO_WRITE_FIELD(reg_val, MSVDX_CORE_CR_MSVDX_CONTROL, DMAC_CH0_SELECT, 0 );
++ PSB_WMSVDX32(reg_val, REGISTER( MSVDX_CORE, CR_MSVDX_CONTROL));
++ }
++
++
++ /* Clear the DMAC Stats */
++ PSB_WMSVDX32(0 , REGISTER(DMAC, DMAC_IRQ_STAT ) + dma_channel);
++
++ offset = msvdx_priv->fw->offset;
++ /* use bank 0 */
++ cmd = 0;
++ PSB_WMSVDX32(cmd, REGISTER(MSVDX_CORE, CR_MMU_BANK_INDEX));
++
++ /* Write PTD to mmu base 0*/
++ mmu_ptd = psb_get_default_pd_addr(dev_priv->mmu);
++ PSB_WMSVDX32(mmu_ptd, REGISTER( MSVDX_CORE, CR_MMU_DIR_LIST_BASE) + 0);
++
++ /* Invalidate */
++ reg_val = PSB_RMSVDX32(REGISTER(MSVDX_CORE, CR_MMU_CONTROL0));
++ reg_val &= ~0xf;
++ REGIO_WRITE_FIELD(reg_val, MSVDX_CORE_CR_MMU_CONTROL0, CR_MMU_INVALDC, 1 );
++ PSB_WMSVDX32(reg_val, REGISTER(MSVDX_CORE, CR_MMU_CONTROL0 ));
++
++ PSB_WMSVDX32(offset, REGISTER(DMAC, DMAC_SETUP ) + dma_channel);
++
++ /* Only use a single dma - assert that this is valid */
++ if( (size / 4 ) >= (1<<15) ) {
++ DRM_ERROR("psb: DMA size beyond limited, aboart firmware uploading\n");
++ return;
++ }
++
++
++ uCountReg = PSB_DMAC_VALUE_COUNT(PSB_DMAC_BSWAP_NO_SWAP,
++ 0, /* 32 bits */
++ PSB_DMAC_DIR_MEM_TO_PERIPH,
++ 0,
++ (size / 4 ) );
++ /* Set the number of bytes to dma*/
++ PSB_WMSVDX32(uCountReg, REGISTER(DMAC, DMAC_COUNT ) + dma_channel);
++
++ cmd = PSB_DMAC_VALUE_PERIPH_PARAM(PSB_DMAC_ACC_DEL_0, PSB_DMAC_INCR_OFF, PSB_DMAC_BURST_2);
++ PSB_WMSVDX32(cmd, REGISTER(DMAC, DMAC_PERIPH ) + dma_channel);
++
++ /* Set destination port for dma */
++ cmd = 0;
++ REGIO_WRITE_FIELD(cmd, DMAC_DMAC_PERIPHERAL_ADDR, ADDR, MTX_CORE_CR_MTX_SYSC_CDMAT_OFFSET);
++ PSB_WMSVDX32(cmd, REGISTER(DMAC, DMAC_PERIPHERAL_ADDR ) + dma_channel);
++
++
++ /* Finally, rewrite the count register with the enable bit set*/
++ PSB_WMSVDX32(uCountReg | DMAC_DMAC_COUNT_EN_MASK, REGISTER(DMAC, DMAC_COUNT ) + dma_channel);
++
++ /* Wait for all to be done */
++ if(psb_wait_for_register(dev_priv,
++ REGISTER(DMAC, DMAC_IRQ_STAT ) + dma_channel,
++ DMAC_DMAC_IRQ_STAT_TRANSFER_FIN_MASK,
++ DMAC_DMAC_IRQ_STAT_TRANSFER_FIN_MASK )) {
++ psb_release_mtx_control_from_dash(dev_priv);
++ return;
++ }
++
++ /* Assert that the MTX DMA port is all done aswell */
++ if(psb_wait_for_register(dev_priv, REGISTER(MTX_CORE, CR_MTX_SYSC_CDMAS0), 1, 1)) {
++ psb_release_mtx_control_from_dash(dev_priv);
++ return;
++ }
++
++ psb_release_mtx_control_from_dash(dev_priv);
++ PSB_DEBUG_GENERAL("MSVDX: Upload done\n");
++}
++
++#else
++
++static void psb_upload_fw(struct drm_psb_private *dev_priv,
++ const uint32_t data_mem, uint32_t ram_bank_size,
++ uint32_t address, const unsigned int words,
++ const uint32_t * const data)
++{
++ uint32_t loop, ctrl, ram_id, addr, cur_bank = (uint32_t) ~0;
++ uint32_t access_ctrl;
++
++ PSB_DEBUG_GENERAL("MSVDX: Upload firmware by register interface\n");
++ /* Save the access control register... */
++ access_ctrl = PSB_RMSVDX32(MSVDX_MTX_RAM_ACCESS_CONTROL);
++
++ /* Wait for MCMSTAT to become be idle 1 */
++ psb_wait_for_register(dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS,
++ 1, /* Required Value */
++ 0xffffffff /* Enables */);
++
++ for (loop = 0; loop < words; loop++) {
++ ram_id = data_mem + (address / ram_bank_size);
++ if (ram_id != cur_bank) {
++ addr = address >> 2;
++ ctrl = 0;
++ REGIO_WRITE_FIELD_LITE(ctrl,
++ MSVDX_MTX_RAM_ACCESS_CONTROL,
++ MTX_MCMID, ram_id);
++ REGIO_WRITE_FIELD_LITE(ctrl,
++ MSVDX_MTX_RAM_ACCESS_CONTROL,
++ MTX_MCM_ADDR, addr);
++ REGIO_WRITE_FIELD_LITE(ctrl,
++ MSVDX_MTX_RAM_ACCESS_CONTROL,
++ MTX_MCMAI, 1);
++ PSB_WMSVDX32(ctrl, MSVDX_MTX_RAM_ACCESS_CONTROL);
++ cur_bank = ram_id;
++ }
++ address += 4;
++
++ PSB_WMSVDX32(data[loop],
++ MSVDX_MTX_RAM_ACCESS_DATA_TRANSFER);
++
++ /* Wait for MCMSTAT to become be idle 1 */
++ psb_wait_for_register(dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS,
++ 1, /* Required Value */
++ 0xffffffff /* Enables */);
++ }
++ PSB_DEBUG_GENERAL("MSVDX: Upload done\n");
++
++ /* Restore the access control register... */
++ PSB_WMSVDX32(access_ctrl, MSVDX_MTX_RAM_ACCESS_CONTROL);
++}
++
++#endif
++
++static int psb_verify_fw(struct drm_psb_private *dev_priv,
++ const uint32_t ram_bank_size,
++ const uint32_t data_mem, uint32_t address,
++ const uint32_t words, const uint32_t * const data)
++{
++ uint32_t loop, ctrl, ram_id, addr, cur_bank = (uint32_t) ~0;
++ uint32_t access_ctrl;
++ int ret = 0;
++
++ /* Save the access control register... */
++ access_ctrl = PSB_RMSVDX32(MSVDX_MTX_RAM_ACCESS_CONTROL);
++
++ /* Wait for MCMSTAT to become be idle 1 */
++ psb_wait_for_register(dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS,
++ 1, /* Required Value */
++ 0xffffffff /* Enables */);
++
++ for (loop = 0; loop < words; loop++) {
++ uint32_t tmp;
++ ram_id = data_mem + (address / ram_bank_size);
++
++ if (ram_id != cur_bank) {
++ addr = address >> 2;
++ ctrl = 0;
++ REGIO_WRITE_FIELD_LITE(ctrl,
++ MSVDX_MTX_RAM_ACCESS_CONTROL,
++ MTX_MCMID, ram_id);
++ REGIO_WRITE_FIELD_LITE(ctrl,
++ MSVDX_MTX_RAM_ACCESS_CONTROL,
++ MTX_MCM_ADDR, addr);
++ REGIO_WRITE_FIELD_LITE(ctrl,
++ MSVDX_MTX_RAM_ACCESS_CONTROL,
++ MTX_MCMAI, 1);
++ REGIO_WRITE_FIELD_LITE(ctrl,
++ MSVDX_MTX_RAM_ACCESS_CONTROL,
++ MTX_MCMR, 1);
++
++ PSB_WMSVDX32(ctrl, MSVDX_MTX_RAM_ACCESS_CONTROL);
++
++ cur_bank = ram_id;
++ }
++ address += 4;
++
++ /* Wait for MCMSTAT to become be idle 1 */
++ psb_wait_for_register(dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS,
++ 1, /* Required Value */
++ 0xffffffff /* Enables */);
++
++ tmp = PSB_RMSVDX32(MSVDX_MTX_RAM_ACCESS_DATA_TRANSFER);
++ if (data[loop] != tmp) {
++ DRM_ERROR("psb: Firmware validation fails"
++ " at index=%08x\n", loop);
++ ret = 1;
++ break;
++ }
++ }
++
++ /* Restore the access control register... */
++ PSB_WMSVDX32(access_ctrl, MSVDX_MTX_RAM_ACCESS_CONTROL);
++
++ return ret;
++}
++
++static int msvdx_get_fw_bo(struct drm_device *dev,
++ const struct firmware **raw, uint8_t *name)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ int rc, fw_size;
++ void *ptr = NULL;
++ struct ttm_bo_kmap_obj tmp_kmap;
++ bool is_iomem;
++ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
++
++ rc = request_firmware(raw, name, &dev->pdev->dev);
++ if (rc < 0) {
++ DRM_ERROR("MSVDX: %s request_firmware failed: Reason %d\n",
++ name, rc);
++ return 1;
++ }
++
++ if ((*raw)->size < sizeof(struct msvdx_fw)) {
++ DRM_ERROR("MSVDX: %s is is not correct size(%zd)\n",
++ name, (*raw)->size);
++ return 1;
++ }
++
++ ptr = (void *) ((*raw))->data;
++
++ if (!ptr) {
++ DRM_ERROR("MSVDX: Failed to load %s\n", name);
++ return 1;
++ }
++
++ /* another sanity check... */
++ fw_size = sizeof(struct msvdx_fw) +
++ sizeof(uint32_t) * ((struct msvdx_fw *) ptr)->text_size +
++ sizeof(uint32_t) * ((struct msvdx_fw *) ptr)->data_size;
++ if ((*raw)->size != fw_size) {
++ DRM_ERROR("MSVDX: %s is is not correct size(%zd)\n",
++ name, (*raw)->size);
++ return 1;
++ }
++
++ rc = ttm_bo_kmap(msvdx_priv->fw, 0, (msvdx_priv->fw)->num_pages, &tmp_kmap);
++ if (rc) {
++ PSB_DEBUG_GENERAL("drm_bo_kmap failed: %d\n", rc);
++ ttm_bo_unref(&msvdx_priv->fw);
++ return 1;
++ }
++ else {
++ uint32_t *last_word;
++ memset(ttm_kmap_obj_virtual(&tmp_kmap, &is_iomem), 0xcdcdcdcd, msvdx_priv->mtx_mem_size);
++
++ memcpy(ttm_kmap_obj_virtual(&tmp_kmap, &is_iomem),
++ ptr + sizeof(struct msvdx_fw),
++ sizeof(uint32_t) * ((struct msvdx_fw *) ptr)->text_size);
++
++ memcpy(ttm_kmap_obj_virtual(&tmp_kmap, &is_iomem) + (((struct msvdx_fw *) ptr)->data_location - 0x82880000),
++ (void *)ptr + sizeof(struct msvdx_fw) + sizeof(uint32_t) * ((struct msvdx_fw *) ptr)->text_size,
++ sizeof(uint32_t) * ((struct msvdx_fw *) ptr)->data_size);
++
++ last_word = (uint32_t *) (ttm_kmap_obj_virtual(&tmp_kmap, &is_iomem) + msvdx_priv->mtx_mem_size -4);
++ /* Write a know value to last word in mtx memory*/
++ /* Usefull for detection of stack overrun */
++ *last_word = STACKGUARDWORD;
++ }
++ ttm_bo_kunmap(&tmp_kmap);
++ PSB_DEBUG_GENERAL("MSVDX: releasing firmware resouces\n");
++ PSB_DEBUG_GENERAL("MSVDX: Load firmware into BO successfully\n");
++ release_firmware(*raw);
++ return rc;
++}
++
++
++static uint32_t *msvdx_get_fw(struct drm_device *dev,
++ const struct firmware **raw, uint8_t *name)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ int rc, fw_size;
++ int *ptr = NULL;
++ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
++
++ rc = request_firmware(raw, name, &dev->pdev->dev);
++ if (rc < 0) {
++ DRM_ERROR("MSVDX: %s request_firmware failed: Reason %d\n",
++ name, rc);
++ return NULL;
++ }
++
++ if ((*raw)->size < sizeof(struct msvdx_fw)) {
++ DRM_ERROR("MSVDX: %s is is not correct size(%zd)\n",
++ name, (*raw)->size);
++ return NULL;
++ }
++
++ ptr = (int *) ((*raw))->data;
++
++ if (!ptr) {
++ DRM_ERROR("MSVDX: Failed to load %s\n", name);
++ return NULL;
++ }
++
++ /* another sanity check... */
++ fw_size = sizeof(struct msvdx_fw) +
++ sizeof(uint32_t) * ((struct msvdx_fw *) ptr)->text_size +
++ sizeof(uint32_t) * ((struct msvdx_fw *) ptr)->data_size;
++ if ((*raw)->size != fw_size) {
++ DRM_ERROR("MSVDX: %s is is not correct size(%zd)\n",
++ name, (*raw)->size);
++ return NULL;
++ }
++ msvdx_priv->msvdx_fw = kzalloc(fw_size, GFP_KERNEL);
++ if (msvdx_priv->msvdx_fw == NULL)
++ DRM_ERROR("MSVDX: allocate FW buffer failed\n");
++ else {
++ memcpy(msvdx_priv->msvdx_fw, ptr, fw_size);
++ msvdx_priv->msvdx_fw_size = fw_size;
++ }
++
++ PSB_DEBUG_GENERAL("MSVDX: releasing firmware resouces\n");
++ release_firmware(*raw);
++
++ return msvdx_priv->msvdx_fw;
++}
++
++int psb_setup_fw(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ int ret = 0;
++
++ uint32_t ram_bank_size;
++ struct msvdx_fw *fw;
++ uint32_t *fw_ptr = NULL;
++ uint32_t *text_ptr = NULL;
++ uint32_t *data_ptr = NULL;
++ const struct firmware *raw = NULL;
++ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
++
++ /* todo : Assert the clock is on - if not turn it on to upload code */
++ PSB_DEBUG_GENERAL("MSVDX: psb_setup_fw\n");
++ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE);
++
++ /* Reset MTX */
++ PSB_WMSVDX32(MSVDX_MTX_SOFT_RESET_MTX_RESET_MASK,
++ MSVDX_MTX_SOFT_RESET);
++
++ /* Initialses Communication controll area to 0 */
++/*
++ if (psb_rev_id >= POULSBO_D1) {
++ PSB_DEBUG_GENERAL("MSVDX: Detected Poulsbo D1"
++ " or later revision.\n");
++ PSB_WMSVDX32(MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D1,
++ MSVDX_COMMS_OFFSET_FLAGS);
++ } else {
++ PSB_DEBUG_GENERAL("MSVDX: Detected Poulsbo D0"
++ " or earlier revision.\n");
++ PSB_WMSVDX32(MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D0,
++ MSVDX_COMMS_OFFSET_FLAGS);
++ }
++*/
++
++ if(IS_MDFLD(dev))
++ PSB_WMSVDX32(5, MSVDX_COMMS_CORE_WTD);
++ PSB_WMSVDX32(0, MSVDX_COMMS_MSG_COUNTER);
++ PSB_WMSVDX32(0, MSVDX_COMMS_SIGNATURE);
++ PSB_WMSVDX32(0, MSVDX_COMMS_TO_HOST_RD_INDEX);
++ PSB_WMSVDX32(0, MSVDX_COMMS_TO_HOST_WRT_INDEX);
++ PSB_WMSVDX32(0, MSVDX_COMMS_TO_MTX_RD_INDEX);
++ PSB_WMSVDX32(0, MSVDX_COMMS_TO_MTX_WRT_INDEX);
++ PSB_WMSVDX32(0, MSVDX_COMMS_FW_STATUS);
++ PSB_WMSVDX32(0, MSVDX_COMMS_OFFSET_FLAGS);
++ PSB_WMSVDX32(0, MSVDX_COMMS_SIGNATURE);
++ /* read register bank size */
++ {
++ uint32_t bank_size, reg;
++ reg = PSB_RMSVDX32(MSVDX_MTX_RAM_BANK);
++ bank_size =
++ REGIO_READ_FIELD(reg, MSVDX_MTX_RAM_BANK,
++ CR_MTX_RAM_BANK_SIZE);
++ ram_bank_size = (uint32_t) (1 << (bank_size + 2));
++ }
++
++ PSB_DEBUG_GENERAL("MSVDX: RAM bank size = %d bytes\n",
++ ram_bank_size);
++
++ /* if FW already loaded from storage */
++ if (msvdx_priv->msvdx_fw)
++ fw_ptr = msvdx_priv->msvdx_fw;
++ else {
++ if(IS_MRST(dev)) {
++ fw_ptr = msvdx_get_fw(dev, &raw, "msvdx_fw.bin");
++ PSB_DEBUG_GENERAL("MSVDX:load msvdx_fw.bin by udevd\n");
++ }
++ else if (IS_MDFLD(dev)) {
++ fw_ptr = msvdx_get_fw(dev, &raw, "msvdx_fw_mfld.bin");
++ PSB_DEBUG_GENERAL("MSVDX:load msvdx_fw_mfld.bin by udevd\n");
++ }
++ else
++ DRM_ERROR("MSVDX:HW is neither mrst nor mfld\n");
++ }
++
++ if (!fw_ptr) {
++ DRM_ERROR("MSVDX:load msvdx_fw.bin failed,is udevd running?\n");
++ ret = 1;
++ goto out;
++ }
++
++ if (!msvdx_priv->is_load) {/* Load firmware into BO */
++ PSB_DEBUG_GENERAL("MSVDX:load msvdx_fw.bin by udevd into BO\n");
++ if(IS_MRST(dev))
++ ret = msvdx_get_fw_bo(dev, &raw, "msvdx_fw.bin");
++ else if(IS_MDFLD(dev))
++ ret = msvdx_get_fw_bo(dev, &raw, "msvdx_fw_mfld.bin");
++ else
++ DRM_ERROR("MSVDX:HW is neither mrst nor mfld\n");
++ msvdx_priv->is_load = 1;
++ }
++
++
++ fw = (struct msvdx_fw *) fw_ptr;
++ if (fw->ver != 0x02) {
++ DRM_ERROR("psb: msvdx_fw.bin firmware version mismatch,"
++ "got version=%02x expected version=%02x\n",
++ fw->ver, 0x02);
++ ret = 1;
++ goto out;
++ }
++
++ text_ptr =
++ (uint32_t *) ((uint8_t *) fw_ptr + sizeof(struct msvdx_fw));
++ data_ptr = text_ptr + fw->text_size;
++
++ if (fw->text_size == 2858)
++ PSB_DEBUG_GENERAL(
++ "MSVDX: FW ver 1.00.10.0187 of SliceSwitch variant\n");
++ else if (fw->text_size == 3021)
++ PSB_DEBUG_GENERAL(
++ "MSVDX: FW ver 1.00.10.0187 of FrameSwitch variant\n");
++ else if (fw->text_size == 2841)
++ PSB_DEBUG_GENERAL("MSVDX: FW ver 1.00.10.0788\n");
++ else if (fw->text_size == 3147)
++ PSB_DEBUG_GENERAL("MSVDX: FW ver BUILD_DXVA_FW1.00.10.1042 of SliceSwitch variant\n");
++ else if (fw->text_size == 3097)
++ PSB_DEBUG_GENERAL("MSVDX: FW ver BUILD_DXVA_FW1.00.10.0963.02.0011 of FrameSwitch variant\n");
++ else
++ PSB_DEBUG_GENERAL("MSVDX: FW ver unknown\n");
++
++
++ PSB_DEBUG_GENERAL("MSVDX: Retrieved pointers for firmware\n");
++ PSB_DEBUG_GENERAL("MSVDX: text_size: %d\n", fw->text_size);
++ PSB_DEBUG_GENERAL("MSVDX: data_size: %d\n", fw->data_size);
++ PSB_DEBUG_GENERAL("MSVDX: data_location: 0x%x\n",
++ fw->data_location);
++ PSB_DEBUG_GENERAL("MSVDX: First 4 bytes of text: 0x%x\n",
++ *text_ptr);
++ PSB_DEBUG_GENERAL("MSVDX: First 4 bytes of data: 0x%x\n",
++ *data_ptr);
++
++ PSB_DEBUG_GENERAL("MSVDX: Uploading firmware\n");
++#if UPLOAD_FW_BY_DMA
++ psb_upload_fw(dev_priv, 0, msvdx_priv->mtx_mem_size/4);
++#else
++ psb_upload_fw(dev_priv, MTX_CORE_CODE_MEM, ram_bank_size,
++ PC_START_ADDRESS - MTX_CODE_BASE, fw->text_size,
++ text_ptr);
++ psb_upload_fw(dev_priv, MTX_CORE_DATA_MEM, ram_bank_size,
++ fw->data_location - MTX_DATA_BASE, fw->data_size,
++ data_ptr);
++#endif
++#if 0
++ /* todo : Verify code upload possibly only in debug */
++ ret = psb_verify_fw(dev_priv, ram_bank_size,
++ MTX_CORE_CODE_MEM,
++ PC_START_ADDRESS - MTX_CODE_BASE,
++ fw->text_size, text_ptr);
++ if (ret) {
++ /* Firmware code upload failed */
++ ret = 1;
++ goto out;
++ }
++
++ ret = psb_verify_fw(dev_priv, ram_bank_size, MTX_CORE_DATA_MEM,
++ fw->data_location - MTX_DATA_BASE,
++ fw->data_size, data_ptr);
++ if (ret) {
++ /* Firmware data upload failed */
++ ret = 1;
++ goto out;
++ }
++#else
++ (void)psb_verify_fw;
++#endif
++ /* -- Set starting PC address */
++ psb_write_mtx_core_reg(dev_priv, MTX_PC, PC_START_ADDRESS);
++
++ /* -- Turn on the thread */
++ PSB_WMSVDX32(MSVDX_MTX_ENABLE_MTX_ENABLE_MASK, MSVDX_MTX_ENABLE);
++
++ /* Wait for the signature value to be written back */
++ ret = psb_wait_for_register(dev_priv, MSVDX_COMMS_SIGNATURE,
++ MSVDX_COMMS_SIGNATURE_VALUE, /*Required value*/
++ 0xffffffff /* Enabled bits */);
++ if (ret) {
++ DRM_ERROR("MSVDX: firmware fails to initialize.\n");
++ goto out;
++ }
++
++ PSB_DEBUG_GENERAL("MSVDX: MTX Initial indications OK\n");
++ PSB_DEBUG_GENERAL("MSVDX: MSVDX_COMMS_AREA_ADDR = %08x\n",
++ MSVDX_COMMS_AREA_ADDR);
++#if 0
++
++ /* Send test message */
++ {
++ uint32_t msg_buf[FW_VA_DEBUG_TEST2_SIZE >> 2];
++
++ MEMIO_WRITE_FIELD(msg_buf, FW_VA_DEBUG_TEST2_MSG_SIZE,
++ FW_VA_DEBUG_TEST2_SIZE);
++ MEMIO_WRITE_FIELD(msg_buf, FW_VA_DEBUG_TEST2_ID,
++ VA_MSGID_TEST2);
++
++ ret = psb_mtx_send(dev_priv, msg_buf);
++ if (ret) {
++ DRM_ERROR("psb: MSVDX sending fails.\n");
++ goto out;
++ }
++
++ /* Wait for Mtx to ack this message */
++ psb_poll_mtx_irq(dev_priv);
++
++ }
++#endif
++out:
++
++ return ret;
++}
++
++
++static void psb_free_ccb(struct ttm_buffer_object **ccb)
++{
++ ttm_bo_unref(ccb);
++ *ccb = NULL;
++}
++
++/**
++ * Reset chip and disable interrupts.
++ * Return 0 success, 1 failure
++ */
++int psb_msvdx_reset(struct drm_psb_private *dev_priv)
++{
++ int ret = 0;
++
++ if(IS_PENWELL(dev_priv->dev)) {
++ int loop;
++ /* Enable Clocks */
++ PSB_DEBUG_GENERAL("Enabling clocks\n");
++ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE);
++
++ /* Always pause the MMU as the core may be still active when resetting. It is very bad to have memory
++ activity at the same time as a reset - Very Very bad */
++ PSB_WMSVDX32(2, MSVDX_MMU_CONTROL0);
++
++ for(loop = 0; loop < 50; loop++)
++ ret = psb_wait_for_register(dev_priv, MSVDX_MMU_MEM_REQ, 0,
++ 0xff);
++ if(ret)
++ return ret;
++ }
++ /* Issue software reset */
++ /* PSB_WMSVDX32(msvdx_sw_reset_all, MSVDX_CONTROL); */
++ PSB_WMSVDX32(MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK, MSVDX_CONTROL);
++
++ ret = psb_wait_for_register(dev_priv, MSVDX_CONTROL, 0,
++ MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK);
++
++ if (!ret) {
++ /* Clear interrupt enabled flag */
++ PSB_WMSVDX32(0, MSVDX_HOST_INTERRUPT_ENABLE);
++
++ /* Clear any pending interrupt flags */
++ PSB_WMSVDX32(0xFFFFFFFF, MSVDX_INTERRUPT_CLEAR);
++ }
++
++ /* mutex_destroy(&msvdx_priv->msvdx_mutex); */
++
++ return ret;
++}
++
++static int psb_allocate_ccb(struct drm_device *dev,
++ struct ttm_buffer_object **ccb,
++ uint32_t *base_addr, unsigned long size)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ struct ttm_bo_device *bdev = &dev_priv->bdev;
++ int ret;
++ struct ttm_bo_kmap_obj tmp_kmap;
++ bool is_iomem;
++
++ PSB_DEBUG_INIT("MSVDX: allocate CCB\n");
++
++ ret = ttm_buffer_object_create(bdev, size,
++ ttm_bo_type_kernel,
++ DRM_PSB_FLAG_MEM_MMU |
++ TTM_PL_FLAG_NO_EVICT, 0, 0, 0,
++ NULL, ccb);
++ if (ret) {
++ DRM_ERROR("MSVDX:failed to allocate CCB.\n");
++ *ccb = NULL;
++ return 1;
++ }
++
++ ret = ttm_bo_kmap(*ccb, 0, (*ccb)->num_pages, &tmp_kmap);
++ if (ret) {
++ PSB_DEBUG_GENERAL("ttm_bo_kmap failed ret: %d\n", ret);
++ ttm_bo_unref(ccb);
++ *ccb = NULL;
++ return 1;
++ }
++/*
++ memset(ttm_kmap_obj_virtual(&tmp_kmap, &is_iomem), 0,
++ RENDEC_A_SIZE);
++*/
++ memset(ttm_kmap_obj_virtual(&tmp_kmap, &is_iomem), 0,
++ size);
++ ttm_bo_kunmap(&tmp_kmap);
++
++ *base_addr = (*ccb)->offset;
++ return 0;
++}
++
++static ssize_t psb_msvdx_pmstate_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct drm_device *drm_dev = dev_get_drvdata(dev);
++ struct drm_psb_private *dev_priv;
++ struct msvdx_private *msvdx_priv;
++ unsigned int pmstate;
++ unsigned long flags;
++ int ret = -EINVAL;
++
++ if (drm_dev == NULL)
++ return 0;
++
++ dev_priv = drm_dev->dev_private;
++ msvdx_priv = dev_priv->msvdx_private;
++ pmstate = msvdx_priv->pmstate;
++
++ spin_lock_irqsave(&msvdx_priv->msvdx_lock, flags);
++ ret = snprintf(buf, 64, "%s\n",
++ (pmstate == PSB_PMSTATE_POWERUP) ? "powerup"
++ : ((pmstate == PSB_PMSTATE_POWERDOWN) ? "powerdown"
++ : "clockgated"));
++ spin_unlock_irqrestore(&msvdx_priv->msvdx_lock, flags);
++
++ return ret;
++}
++
++static DEVICE_ATTR(msvdx_pmstate, 0444, psb_msvdx_pmstate_show, NULL);
++
++int psb_msvdx_init(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ /* uint32_t clk_gate_ctrl = clk_enable_all; */
++ uint32_t cmd;
++ int ret;
++ struct msvdx_private *msvdx_priv;
++
++ if (!dev_priv->msvdx_private) {
++ msvdx_priv = kmalloc(sizeof(struct msvdx_private), GFP_KERNEL);
++ if (msvdx_priv == NULL)
++ goto err_exit;
++
++ dev_priv->msvdx_private = msvdx_priv;
++ memset(msvdx_priv, 0, sizeof(struct msvdx_private));
++
++ /* get device --> drm_device --> drm_psb_private --> msvdx_priv
++ * for psb_msvdx_pmstate_show: msvdx_pmpolicy
++ * if not pci_set_drvdata, can't get drm_device from device
++ */
++ /* pci_set_drvdata(dev->pdev, dev); */
++ if (device_create_file(&dev->pdev->dev,
++ &dev_attr_msvdx_pmstate))
++ DRM_ERROR("MSVDX: could not create sysfs file\n");
++ msvdx_priv->sysfs_pmstate = sysfs_get_dirent(
++ dev->pdev->dev.kobj.sd,
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
++ NULL,
++#endif
++ "msvdx_pmstate");
++ }
++
++ msvdx_priv = dev_priv->msvdx_private;
++ if (!msvdx_priv->ccb0) { /* one for the first time */
++ /* Initialize comand msvdx queueing */
++ INIT_LIST_HEAD(&msvdx_priv->msvdx_queue);
++ INIT_LIST_HEAD(&msvdx_priv->deblock_queue);
++ mutex_init(&msvdx_priv->msvdx_mutex);
++ spin_lock_init(&msvdx_priv->msvdx_lock);
++ /*figure out the stepping */
++ pci_read_config_byte(dev->pdev, PSB_REVID_OFFSET, &psb_rev_id);
++ }
++
++ msvdx_priv->vec_local_mem_size = VEC_LOCAL_MEM_BYTE_SIZE;
++ if (!msvdx_priv->vec_local_mem_data) {
++ msvdx_priv->vec_local_mem_data =
++ kmalloc(msvdx_priv->vec_local_mem_size, GFP_KERNEL);
++ memset(msvdx_priv->vec_local_mem_data, 0, msvdx_priv->vec_local_mem_size);
++ }
++
++ msvdx_priv->msvdx_busy = 0;
++ msvdx_priv->msvdx_hw_busy = 1;
++
++ /* Enable Clocks */
++ PSB_DEBUG_GENERAL("Enabling clocks\n");
++ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE);
++
++
++ /* Enable MMU by removing all bypass bits */
++ PSB_WMSVDX32(0, MSVDX_MMU_CONTROL0);
++
++ /* move firmware loading to the place receiving first command buffer */
++
++ PSB_DEBUG_GENERAL("MSVDX: Setting up RENDEC,allocate CCB 0/1\n");
++ /* Allocate device virtual memory as required by rendec.... */
++ if (!msvdx_priv->ccb0) {
++ ret = psb_allocate_ccb(dev, &msvdx_priv->ccb0,
++ &msvdx_priv->base_addr0,
++ RENDEC_A_SIZE);
++ if (ret) {
++ PSB_DEBUG_GENERAL("Allocate Rendec A fail\n");
++ goto err_exit;
++ }
++ }
++
++ if (!msvdx_priv->ccb1) {
++ ret = psb_allocate_ccb(dev, &msvdx_priv->ccb1,
++ &msvdx_priv->base_addr1,
++ RENDEC_B_SIZE);
++ if (ret)
++ goto err_exit;
++ }
++
++ if(!msvdx_priv->fw) {
++ uint32_t core_rev;
++
++ core_rev = PSB_RMSVDX32(MSVDX_CORE_REV);
++
++ if( (core_rev&0xffffff ) < 0x020000 )
++ msvdx_priv->mtx_mem_size = 16*1024;
++ else
++ msvdx_priv->mtx_mem_size = 32*1024;
++
++ PSB_DEBUG_INIT("MSVDX: MTX mem size is 0x%08xbytes allocate firmware BO size 0x%08x\n", msvdx_priv->mtx_mem_size,
++ msvdx_priv->mtx_mem_size + 4096);
++
++ ret = ttm_buffer_object_create(&dev_priv->bdev, msvdx_priv->mtx_mem_size + 4096, /* DMA may run over a page */
++ ttm_bo_type_kernel,
++ DRM_PSB_FLAG_MEM_MMU | TTM_PL_FLAG_NO_EVICT,
++ 0, 0, 0, NULL, &msvdx_priv->fw);
++
++ if (ret) {
++ PSB_DEBUG_GENERAL("Allocate firmware BO fail\n");
++ goto err_exit;
++ }
++ }
++
++ PSB_DEBUG_GENERAL("MSVDX: RENDEC A: %08x RENDEC B: %08x\n",
++ msvdx_priv->base_addr0, msvdx_priv->base_addr1);
++
++ PSB_WMSVDX32(msvdx_priv->base_addr0, MSVDX_RENDEC_BASE_ADDR0);
++ PSB_WMSVDX32(msvdx_priv->base_addr1, MSVDX_RENDEC_BASE_ADDR1);
++
++ cmd = 0;
++ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_BUFFER_SIZE,
++ RENDEC_BUFFER_SIZE0, RENDEC_A_SIZE / 4096);
++ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_BUFFER_SIZE,
++ RENDEC_BUFFER_SIZE1, RENDEC_B_SIZE / 4096);
++ PSB_WMSVDX32(cmd, MSVDX_RENDEC_BUFFER_SIZE);
++
++
++ cmd = 0;
++ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL1,
++ RENDEC_DECODE_START_SIZE, 0);
++ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL1,
++ RENDEC_BURST_SIZE_W, 1);
++ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL1,
++ RENDEC_BURST_SIZE_R, 1);
++ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL1,
++ RENDEC_EXTERNAL_MEMORY, 1);
++ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTROL1);
++
++ cmd = 0x00101010;
++ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT0);
++ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT1);
++ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT2);
++ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT3);
++ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT4);
++ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT5);
++
++ cmd = 0;
++ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL0, RENDEC_INITIALISE,
++ 1);
++ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTROL0);
++
++ /* PSB_WMSVDX32(clk_enable_minimal, MSVDX_MAN_CLK_ENABLE); */
++ PSB_DEBUG_INIT("MSVDX:defer firmware loading to the"
++ " place when receiving user space commands\n");
++
++ msvdx_priv->msvdx_fw_loaded = 0; /* need to load firware */
++
++ psb_msvdx_clearirq(dev);
++ psb_msvdx_enableirq(dev);
++
++ if (IS_MSVDX(dev)) {
++ PSB_DEBUG_INIT("MSDVX:old clock gating disable = 0x%08x\n",
++ PSB_RVDC32(PSB_MSVDX_CLOCKGATING));
++ }
++
++ {
++ cmd = 0;
++ cmd = PSB_RMSVDX32(0x818); /* VEC_SHIFTREG_CONTROL */
++ REGIO_WRITE_FIELD(cmd,
++ VEC_SHIFTREG_CONTROL,
++ SR_MASTER_SELECT,
++ 1); /* Host */
++ PSB_WMSVDX32(cmd, 0x818);
++ }
++
++#if 0
++ ret = psb_setup_fw(dev);
++ if (ret)
++ goto err_exit;
++ /* Send Initialisation message to firmware */
++ if (0) {
++ uint32_t msg_init[FW_VA_INIT_SIZE >> 2];
++ MEMIO_WRITE_FIELD(msg_init, FWRK_GENMSG_SIZE,
++ FW_VA_INIT_SIZE);
++ MEMIO_WRITE_FIELD(msg_init, FWRK_GENMSG_ID, VA_MSGID_INIT);
++
++ /* Need to set this for all but A0 */
++ MEMIO_WRITE_FIELD(msg_init, FW_VA_INIT_GLOBAL_PTD,
++ psb_get_default_pd_addr(dev_priv->mmu));
++
++ ret = psb_mtx_send(dev_priv, msg_init);
++ if (ret)
++ goto err_exit;
++
++ psb_poll_mtx_irq(dev_priv);
++ }
++#endif
++
++ return 0;
++
++err_exit:
++ DRM_ERROR("MSVDX: initialization failed\n");
++ if (msvdx_priv && msvdx_priv->ccb0)
++ psb_free_ccb(&msvdx_priv->ccb0);
++ if (msvdx_priv && msvdx_priv->ccb1)
++ psb_free_ccb(&msvdx_priv->ccb1);
++ kfree(dev_priv->msvdx_private);
++
++ return 1;
++}
++
++int psb_msvdx_uninit(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
++
++ /* Reset MSVDX chip */
++ psb_msvdx_reset(dev_priv);
++
++ /* PSB_WMSVDX32 (clk_enable_minimal, MSVDX_MAN_CLK_ENABLE); */
++ PSB_DEBUG_INIT("MSVDX:set the msvdx clock to 0\n");
++ PSB_WMSVDX32(0, MSVDX_MAN_CLK_ENABLE);
++
++ if (NULL == msvdx_priv)
++ {
++ DRM_ERROR("MSVDX: psb_msvdx_uninit: msvdx_priv is NULL!\n");
++ return -1;
++ }
++
++ if (msvdx_priv->ccb0)
++ psb_free_ccb(&msvdx_priv->ccb0);
++ if (msvdx_priv->ccb1)
++ psb_free_ccb(&msvdx_priv->ccb1);
++ if (msvdx_priv->msvdx_fw)
++ kfree(msvdx_priv->msvdx_fw
++ );
++ if (msvdx_priv->vec_local_mem_data)
++ kfree(msvdx_priv->vec_local_mem_data);
++
++ if (msvdx_priv) {
++ /* pci_set_drvdata(dev->pdev, NULL); */
++ device_remove_file(&dev->pdev->dev, &dev_attr_msvdx_pmstate);
++ sysfs_put(msvdx_priv->sysfs_pmstate);
++ msvdx_priv->sysfs_pmstate = NULL;
++
++ kfree(msvdx_priv);
++ dev_priv->msvdx_private = NULL;
++ }
++
++ return 0;
++}
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_powermgmt.c
+@@ -0,0 +1,890 @@
++/**************************************************************************
++ * Copyright (c) 2009, Intel Corporation.
++ * All Rights Reserved.
++
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
++ * SOFTWARE.
++ *
++ * Authors:
++ * Benjamin Defnet <benjamin.r.defnet@intel.com>
++ * Rajesh Poornachandran <rajesh.poornachandran@intel.com>
++ *
++ */
++
++#include "psb_powermgmt.h"
++#include "psb_drv.h"
++#include "psb_intel_reg.h"
++#include "psb_msvdx.h"
++#include "lnc_topaz.h"
++#include <linux/mutex.h>
++#include "lnc_topaz_hw_reg.h"
++
++extern IMG_UINT32 gui32SGXDeviceID;
++extern IMG_UINT32 gui32MRSTDisplayDeviceID;
++extern IMG_UINT32 gui32MRSTMSVDXDeviceID;
++extern IMG_UINT32 gui32MRSTTOPAZDeviceID;
++
++struct drm_device *gpDrmDevice = NULL;
++static struct mutex g_ospm_mutex;
++static bool gbSuspendInProgress = false;
++static bool gbResumeInProgress = false;
++static int g_hw_power_status_mask;
++static atomic_t g_display_access_count;
++static atomic_t g_graphics_access_count;
++static atomic_t g_videoenc_access_count;
++static atomic_t g_videodec_access_count;
++
++void ospm_power_island_up(int hw_islands);
++void ospm_power_island_down(int hw_islands);
++static bool gbSuspended = false;
++
++void ospm_apm_power_down_msvdx(struct drm_device *dev)
++{
++
++ mutex_lock(&g_ospm_mutex);
++
++ if (!ospm_power_is_hw_on(OSPM_VIDEO_DEC_ISLAND))
++ goto out;
++
++ if (atomic_read(&g_videodec_access_count))
++ goto out;
++ if (psb_check_msvdx_idle(dev))
++ goto out;
++
++ gbSuspendInProgress = true;
++ psb_msvdx_save_context(dev);
++ ospm_power_island_down(OSPM_VIDEO_DEC_ISLAND);
++ gbSuspendInProgress = false;
++out:
++ mutex_unlock(&g_ospm_mutex);
++ return;
++}
++
++void ospm_apm_power_down_topaz(struct drm_device *dev)
++{
++ return; /* todo for OSPM */
++
++ mutex_lock(&g_ospm_mutex);
++
++ if (!ospm_power_is_hw_on(OSPM_VIDEO_ENC_ISLAND))
++ goto out;
++ if (atomic_read(&g_videoenc_access_count))
++ goto out;
++ if (lnc_check_topaz_idle(dev))
++ goto out;
++
++ gbSuspendInProgress = true;
++ lnc_topaz_save_mtx_state(dev);
++ lnc_unmap_topaz_reg(dev);
++ ospm_power_island_down(OSPM_VIDEO_ENC_ISLAND);
++ gbSuspendInProgress = false;
++out:
++ mutex_unlock(&g_ospm_mutex);
++ return;
++}
++/*
++ * ospm_power_init
++ *
++ * Description: Initialize this ospm power management module
++ */
++void ospm_power_init(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = (struct drm_psb_private *)dev->dev_private;
++ struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
++
++ gpDrmDevice = dev;
++
++ pci_write_config_dword(pci_root, 0xD0, 0xd0047800);
++ pci_read_config_dword(pci_root, 0xD4, &dev_priv->ospm_base);
++ dev_priv->ospm_base &= 0xffff;
++
++ dev_priv->apm_reg = MSG_READ32(PSB_PUNIT_PORT, PSB_APMBA);
++ dev_priv->apm_base = dev_priv->apm_reg & 0xffff;
++
++ gpDrmDevice = dev;
++ mutex_init(&g_ospm_mutex);
++ g_hw_power_status_mask = OSPM_ALL_ISLANDS;
++ atomic_set(&g_display_access_count, 0);
++ atomic_set(&g_graphics_access_count, 0);
++ atomic_set(&g_videoenc_access_count, 0);
++ atomic_set(&g_videodec_access_count, 0);
++
++#ifdef OSPM_STAT
++ dev_priv->graphics_state = PSB_PWR_STATE_ON;
++ dev_priv->gfx_last_mode_change = jiffies;
++ dev_priv->gfx_on_time = 0;
++ dev_priv->gfx_off_time = 0;
++#endif
++}
++
++/*
++ * ospm_power_uninit
++ *
++ * Description: Uninitialize this ospm power management module
++ */
++void ospm_power_uninit(void)
++{
++ mutex_destroy(&g_ospm_mutex);
++}
++/*
++ * save_display_registers
++ *
++ * Description: We are going to suspend so save current display
++ * register state.
++ */
++static int save_display_registers(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct drm_crtc * crtc;
++ struct drm_connector * connector;
++ int i;
++
++ /* Display arbitration control + watermarks */
++ dev_priv->saveDSPARB = PSB_RVDC32(DSPARB);
++ dev_priv->saveDSPFW1 = PSB_RVDC32(DSPFW1);
++ dev_priv->saveDSPFW2 = PSB_RVDC32(DSPFW2);
++ dev_priv->saveDSPFW3 = PSB_RVDC32(DSPFW3);
++ dev_priv->saveDSPFW4 = PSB_RVDC32(DSPFW4);
++ dev_priv->saveDSPFW5 = PSB_RVDC32(DSPFW5);
++ dev_priv->saveDSPFW6 = PSB_RVDC32(DSPFW6);
++ dev_priv->saveCHICKENBIT = PSB_RVDC32(DSPCHICKENBIT);
++
++ if (IS_MRST(dev)) {
++ /* Pipe & plane A info */
++ dev_priv->savePIPEACONF = PSB_RVDC32(PIPEACONF);
++ dev_priv->savePIPEASRC = PSB_RVDC32(PIPEASRC);
++ dev_priv->saveFPA0 = PSB_RVDC32(MRST_FPA0);
++ dev_priv->saveFPA1 = PSB_RVDC32(MRST_FPA1);
++ dev_priv->saveDPLL_A = PSB_RVDC32(MRST_DPLL_A);
++ dev_priv->saveHTOTAL_A = PSB_RVDC32(HTOTAL_A);
++ dev_priv->saveHBLANK_A = PSB_RVDC32(HBLANK_A);
++ dev_priv->saveHSYNC_A = PSB_RVDC32(HSYNC_A);
++ dev_priv->saveVTOTAL_A = PSB_RVDC32(VTOTAL_A);
++ dev_priv->saveVBLANK_A = PSB_RVDC32(VBLANK_A);
++ dev_priv->saveVSYNC_A = PSB_RVDC32(VSYNC_A);
++ dev_priv->saveBCLRPAT_A = PSB_RVDC32(BCLRPAT_A);
++ dev_priv->saveDSPACNTR = PSB_RVDC32(DSPACNTR);
++ dev_priv->saveDSPASTRIDE = PSB_RVDC32(DSPASTRIDE);
++ dev_priv->saveDSPAADDR = PSB_RVDC32(DSPABASE);
++ dev_priv->saveDSPASURF = PSB_RVDC32(DSPASURF);
++ dev_priv->saveDSPALINOFF = PSB_RVDC32(DSPALINOFF);
++ dev_priv->saveDSPATILEOFF = PSB_RVDC32(DSPATILEOFF);
++
++ /*save cursor regs*/
++ dev_priv->saveDSPACURSOR_CTRL = PSB_RVDC32(CURACNTR);
++ dev_priv->saveDSPACURSOR_BASE = PSB_RVDC32(CURABASE);
++ dev_priv->saveDSPACURSOR_POS = PSB_RVDC32(CURAPOS);
++
++ /*save palette (gamma) */
++ for (i = 0; i < 256; i++)
++ dev_priv->save_palette_a[i] = PSB_RVDC32(PALETTE_A + (i<<2));
++
++ /*save performance state*/
++ dev_priv->savePERF_MODE = PSB_RVDC32(MRST_PERF_MODE);
++
++ /* LVDS state */
++ dev_priv->savePP_CONTROL = PSB_RVDC32(PP_CONTROL);
++ dev_priv->savePFIT_PGM_RATIOS = PSB_RVDC32(PFIT_PGM_RATIOS);
++ dev_priv->savePFIT_AUTO_RATIOS = PSB_RVDC32(PFIT_AUTO_RATIOS);
++ dev_priv->saveBLC_PWM_CTL = PSB_RVDC32(BLC_PWM_CTL);
++ dev_priv->saveBLC_PWM_CTL2 = PSB_RVDC32(BLC_PWM_CTL2);
++ dev_priv->saveLVDS = PSB_RVDC32(LVDS);
++ dev_priv->savePFIT_CONTROL = PSB_RVDC32(PFIT_CONTROL);
++ dev_priv->savePP_ON_DELAYS = PSB_RVDC32(LVDSPP_ON);
++ dev_priv->savePP_OFF_DELAYS = PSB_RVDC32(LVDSPP_OFF);
++ dev_priv->savePP_DIVISOR = PSB_RVDC32(PP_CYCLE);
++
++ /* HW overlay */
++ dev_priv->saveOV_OVADD = PSB_RVDC32(OV_OVADD);
++ dev_priv->saveOV_OGAMC0 = PSB_RVDC32(OV_OGAMC0);
++ dev_priv->saveOV_OGAMC1 = PSB_RVDC32(OV_OGAMC1);
++ dev_priv->saveOV_OGAMC2 = PSB_RVDC32(OV_OGAMC2);
++ dev_priv->saveOV_OGAMC3 = PSB_RVDC32(OV_OGAMC3);
++ dev_priv->saveOV_OGAMC4 = PSB_RVDC32(OV_OGAMC4);
++ dev_priv->saveOV_OGAMC5 = PSB_RVDC32(OV_OGAMC5);
++
++ } else { /*PSB*/
++ /*save crtc and output state*/
++ mutex_lock(&dev->mode_config.mutex);
++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++ if(drm_helper_crtc_in_use(crtc)) {
++ crtc->funcs->save(crtc);
++ }
++ }
++
++ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
++ connector->funcs->save(connector);
++ }
++ mutex_unlock(&dev->mode_config.mutex);
++ }
++
++ /* Interrupt state */
++ /*
++ * Handled in psb_irq.c
++ */
++
++ return 0;
++}
++/*
++ * restore_display_registers
++ *
++ * Description: We are going to resume so restore display register state.
++ */
++static int restore_display_registers(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct drm_crtc * crtc;
++ struct drm_connector * connector;
++ unsigned long i, pp_stat;
++
++ /* Display arbitration + watermarks */
++ PSB_WVDC32(dev_priv->saveDSPARB, DSPARB);
++ PSB_WVDC32(dev_priv->saveDSPFW1, DSPFW1);
++ PSB_WVDC32(dev_priv->saveDSPFW2, DSPFW2);
++ PSB_WVDC32(dev_priv->saveDSPFW3, DSPFW3);
++ PSB_WVDC32(dev_priv->saveDSPFW4, DSPFW4);
++ PSB_WVDC32(dev_priv->saveDSPFW5, DSPFW5);
++ PSB_WVDC32(dev_priv->saveDSPFW6, DSPFW6);
++ PSB_WVDC32(dev_priv->saveCHICKENBIT, DSPCHICKENBIT);
++
++ /*make sure VGA plane is off. it initializes to on after reset!*/
++ PSB_WVDC32(0x80000000, VGACNTRL);
++
++ if (IS_MRST(dev)) {
++ /* set the plls */
++ PSB_WVDC32(dev_priv->saveFPA0, MRST_FPA0);
++ PSB_WVDC32(dev_priv->saveFPA1, MRST_FPA1);
++ /* Actually enable it */
++ PSB_WVDC32(dev_priv->saveDPLL_A, MRST_DPLL_A);
++ DRM_UDELAY(150);
++
++ /* Restore mode */
++ PSB_WVDC32(dev_priv->saveHTOTAL_A, HTOTAL_A);
++ PSB_WVDC32(dev_priv->saveHBLANK_A, HBLANK_A);
++ PSB_WVDC32(dev_priv->saveHSYNC_A, HSYNC_A);
++ PSB_WVDC32(dev_priv->saveVTOTAL_A, VTOTAL_A);
++ PSB_WVDC32(dev_priv->saveVBLANK_A, VBLANK_A);
++ PSB_WVDC32(dev_priv->saveVSYNC_A, VSYNC_A);
++ PSB_WVDC32(dev_priv->savePIPEASRC, PIPEASRC);
++ PSB_WVDC32(dev_priv->saveBCLRPAT_A, BCLRPAT_A);
++
++ /*restore performance mode*/
++ PSB_WVDC32(dev_priv->savePERF_MODE, MRST_PERF_MODE);
++
++ /*enable the pipe*/
++ PSB_WVDC32(dev_priv->savePIPEACONF, PIPEACONF);
++
++ /*set up the plane*/
++ PSB_WVDC32(dev_priv->saveDSPALINOFF, DSPALINOFF);
++ PSB_WVDC32(dev_priv->saveDSPASTRIDE, DSPASTRIDE);
++ PSB_WVDC32(dev_priv->saveDSPATILEOFF, DSPATILEOFF);
++
++ /* Enable the plane */
++ PSB_WVDC32(dev_priv->saveDSPACNTR, DSPACNTR);
++ PSB_WVDC32(dev_priv->saveDSPASURF, DSPASURF);
++
++ /*Enable Cursor A*/
++ PSB_WVDC32(dev_priv->saveDSPACURSOR_CTRL, CURACNTR);
++ PSB_WVDC32(dev_priv->saveDSPACURSOR_POS, CURAPOS);
++ PSB_WVDC32(dev_priv->saveDSPACURSOR_BASE, CURABASE);
++
++ /* restore palette (gamma) */
++ /*DRM_UDELAY(50000); */
++ for (i = 0; i < 256; i++)
++ PSB_WVDC32(dev_priv->save_palette_a[i], PALETTE_A + (i<<2));
++
++ PSB_WVDC32(dev_priv->saveBLC_PWM_CTL2, BLC_PWM_CTL2);
++ PSB_WVDC32(dev_priv->saveLVDS, LVDS); /*port 61180h*/
++ PSB_WVDC32(dev_priv->savePFIT_CONTROL, PFIT_CONTROL);
++ PSB_WVDC32(dev_priv->savePFIT_PGM_RATIOS, PFIT_PGM_RATIOS);
++ PSB_WVDC32(dev_priv->savePFIT_AUTO_RATIOS, PFIT_AUTO_RATIOS);
++ PSB_WVDC32(dev_priv->saveBLC_PWM_CTL, BLC_PWM_CTL);
++ PSB_WVDC32(dev_priv->savePP_ON_DELAYS, LVDSPP_ON);
++ PSB_WVDC32(dev_priv->savePP_OFF_DELAYS, LVDSPP_OFF);
++ PSB_WVDC32(dev_priv->savePP_DIVISOR, PP_CYCLE);
++ PSB_WVDC32(dev_priv->savePP_CONTROL, PP_CONTROL);
++
++ /*wait for cycle delay*/
++ do {
++ pp_stat = PSB_RVDC32(PP_STATUS);
++ } while (pp_stat & 0x08000000);
++
++ DRM_UDELAY(999);
++ /*wait for panel power up*/
++ do {
++ pp_stat = PSB_RVDC32(PP_STATUS);
++ } while (pp_stat & 0x10000000);
++
++ /* restore HW overlay */
++ PSB_WVDC32(dev_priv->saveOV_OVADD, OV_OVADD);
++ PSB_WVDC32(dev_priv->saveOV_OGAMC0, OV_OGAMC0);
++ PSB_WVDC32(dev_priv->saveOV_OGAMC1, OV_OGAMC1);
++ PSB_WVDC32(dev_priv->saveOV_OGAMC2, OV_OGAMC2);
++ PSB_WVDC32(dev_priv->saveOV_OGAMC3, OV_OGAMC3);
++ PSB_WVDC32(dev_priv->saveOV_OGAMC4, OV_OGAMC4);
++ PSB_WVDC32(dev_priv->saveOV_OGAMC5, OV_OGAMC5);
++
++ } else { /*PSB*/
++ mutex_lock(&dev->mode_config.mutex);
++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++ if(drm_helper_crtc_in_use(crtc))
++ crtc->funcs->restore(crtc);
++ }
++
++ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
++ connector->funcs->restore(connector);
++ }
++ mutex_unlock(&dev->mode_config.mutex);
++ }
++
++
++ /*Interrupt state*/
++ /*
++ * Handled in psb_irq.c
++ */
++
++ return 0;
++}
++/*
++ * powermgmt_suspend_display
++ *
++ * Description: Suspend the display hardware saving state and disabling
++ * as necessary.
++ */
++static void ospm_suspend_display(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ int pp_stat;
++
++ if (!(g_hw_power_status_mask & OSPM_DISPLAY_ISLAND))
++ return;
++
++ printk(KERN_ALERT "ospm_suspend_display\n");
++
++ save_display_registers(dev);
++
++ /*shutdown the panel*/
++ PSB_WVDC32(0, PP_CONTROL);
++
++ do {
++ pp_stat = PSB_RVDC32(PP_STATUS);
++ } while (pp_stat & 0x80000000);
++
++ /*turn off the plane*/
++ PSB_WVDC32(0x58000000, DSPACNTR);
++ PSB_WVDC32(0, DSPASURF);/*trigger the plane disable*/
++ /*wait ~4 ticks*/
++ msleep(4);
++
++ /*turn off pipe*/
++ PSB_WVDC32(0x0, PIPEACONF);
++ /*wait ~8 ticks*/
++ msleep(8);
++
++ /*turn off PLLs*/
++ PSB_WVDC32(0, MRST_DPLL_A);
++
++ ospm_power_island_down(OSPM_DISPLAY_ISLAND);
++}
++#if 0
++/*
++ * powermgmt_suspend_graphics
++ *
++ * Description: Suspend the graphics hardware saving state and disabling
++ * as necessary.
++ */
++void ospm_suspend_graphics()
++{
++ if (!(g_hw_power_status_mask & OSPM_GRAPHICS_ISLAND))
++ return;
++
++// mutex_lock(&g_ospm_mutex);
++
++ gbSuspendInProgress = true;
++ printk(KERN_ALERT "ospm_suspend_graphics - SGX D0i3\n");
++
++ psb_irq_uninstall_islands(gpDrmDevice, OSPM_GRAPHICS_ISLAND);
++ ospm_power_island_down(OSPM_GRAPHICS_ISLAND);
++
++// gbSuspendInProgress = false;
++
++// mutex_unlock(&g_ospm_mutex);
++}
++#endif
++/*
++ * ospm_resume_display
++ *
++ * Description: Resume the display hardware restoring state and enabling
++ * as necessary.
++ */
++static void ospm_resume_display(struct pci_dev *pdev)
++{
++ struct drm_device *dev = pci_get_drvdata(pdev);
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct psb_gtt *pg = dev_priv->pg;
++
++ if (g_hw_power_status_mask & OSPM_DISPLAY_ISLAND)
++ return;
++
++ printk(KERN_ALERT "ospm_resume_display\n");
++
++ /* turn on the display power island */
++ ospm_power_island_up(OSPM_DISPLAY_ISLAND);
++
++ PSB_WVDC32(pg->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
++ pci_write_config_word(pdev, PSB_GMCH_CTRL,
++ pg->gmch_ctrl | _PSB_GMCH_ENABLED);
++
++ /* Don't reinitialize the GTT as it is unnecessary. The gtt is
++ * stored in memory so it will automatically be restored. All
++ * we need to do is restore the PGETBL_CTL which we already do
++ * above.
++ */
++ /*psb_gtt_init(dev_priv->pg, 1);*/
++
++ restore_display_registers(dev);
++}
++#if 0
++/*
++ * ospm_suspend_pci
++ *
++ * Description: Suspend the pci device saving state and disabling
++ * as necessary.
++ */
++static void ospm_suspend_pci(struct pci_dev *pdev)
++{
++ struct drm_device *dev = pci_get_drvdata(pdev);
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));
++ int bsm, vbt;
++
++ if (gbSuspended)
++ return;
++
++ printk(KERN_ALERT "ospm_suspend_pci\n");
++
++ pci_save_state(pdev);
++ pci_read_config_dword(pci_gfx_root, 0x5C, &bsm);
++ dev_priv->saveBSM = bsm;
++ pci_read_config_dword(pci_gfx_root, 0xFC, &vbt);
++ dev_priv->saveVBT = vbt;
++ pci_read_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, &dev_priv->msi_addr);
++ pci_read_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, &dev_priv->msi_data);
++
++ pci_disable_device(pdev);
++ pci_set_power_state(pdev, PCI_D3hot);
++
++ gbSuspended = true;
++}
++
++/*
++ * ospm_resume_pci
++ *
++ * Description: Resume the pci device restoring state and enabling
++ * as necessary.
++ */
++static bool ospm_resume_pci(struct pci_dev *pdev)
++{
++ struct drm_device *dev = pci_get_drvdata(pdev);
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));
++ int ret = 0;
++
++ if (!gbSuspended)
++ return true;
++
++ printk(KERN_ALERT "ospm_resume_pci\n");
++
++ pci_set_power_state(pdev, PCI_D0);
++ pci_restore_state(pdev);
++ pci_write_config_dword(pci_gfx_root, 0x5c, dev_priv->saveBSM);
++ pci_write_config_dword(pci_gfx_root, 0xFC, dev_priv->saveVBT);
++ /* retoring MSI address and data in PCIx space */
++ pci_write_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, dev_priv->msi_addr);
++ pci_write_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, dev_priv->msi_data);
++ ret = pci_enable_device(pdev);
++
++ if (ret != 0)
++ printk(KERN_ALERT "ospm_resume_pci: pci_enable_device failed: %d\n", ret);
++ else
++ gbSuspended = false;
++
++ return !gbSuspended;
++}
++#endif
++/*
++ * ospm_power_suspend
++ *
++ * Description: OSPM is telling our driver to suspend so save state
++ * and power down all hardware.
++ */
++int ospm_power_suspend(struct pci_dev *pdev, pm_message_t state)
++{
++ int ret = 0;
++ int graphics_access_count;
++ int videoenc_access_count;
++ int videodec_access_count;
++ int display_access_count;
++ struct drm_device *dev = pci_get_drvdata(pdev);
++
++ mutex_lock(&g_ospm_mutex);
++
++ if (!gbSuspended) {
++ graphics_access_count = atomic_read(&g_graphics_access_count);
++ videoenc_access_count = atomic_read(&g_videoenc_access_count);
++ videodec_access_count = atomic_read(&g_videodec_access_count);
++ display_access_count = atomic_read(&g_display_access_count);
++
++ if (graphics_access_count ||
++ videoenc_access_count ||
++ videodec_access_count ||
++ display_access_count)
++ ret = -EBUSY;
++
++ if (!ret) {
++ gbSuspendInProgress = true;
++
++ psb_irq_uninstall_islands(gpDrmDevice, OSPM_DISPLAY_ISLAND);
++ ospm_suspend_display(gpDrmDevice);
++ PVRSRVDriverSuspend(dev, state);
++ gbSuspended = true;
++
++ gbSuspendInProgress = false;
++ } else {
++ printk(KERN_ALERT "ospm_power_suspend: device busy: graphics %d videoenc %d videodec %d display %d\n", graphics_access_count, videoenc_access_count, videodec_access_count, display_access_count);
++ }
++ }
++
++
++ mutex_unlock(&g_ospm_mutex);
++ return ret;
++}
++
++/*
++ * ospm_power_island_up
++ *
++ * Description: Restore power to the specified island(s) (powergating)
++ */
++void ospm_power_island_up(int hw_islands)
++{
++ u32 pwr_cnt = 0;
++ u32 pwr_sts = 0;
++ u32 pwr_mask = 0;
++
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) gpDrmDevice->dev_private;
++
++ if (IS_MID(gpDrmDevice) &&
++ (hw_islands & (OSPM_GRAPHICS_ISLAND | OSPM_VIDEO_ENC_ISLAND |
++ OSPM_VIDEO_DEC_ISLAND))) {
++ pwr_cnt = inl(dev_priv->apm_base + PSB_APM_CMD);
++ pwr_mask = 0;
++ if (hw_islands & OSPM_GRAPHICS_ISLAND) {
++ pwr_cnt &= ~PSB_PWRGT_GFX_MASK;
++ pwr_mask |= PSB_PWRGT_GFX_MASK;
++ #ifdef OSPM_STAT
++ if (dev_priv->graphics_state == PSB_PWR_STATE_OFF) {
++ dev_priv->gfx_off_time += (jiffies - dev_priv->gfx_last_mode_change) * 1000 / HZ;
++ dev_priv->gfx_last_mode_change = jiffies;
++ dev_priv->graphics_state = PSB_PWR_STATE_ON;
++ dev_priv->gfx_on_cnt++;
++ }
++ #endif
++ }
++ if (hw_islands & OSPM_VIDEO_ENC_ISLAND) {
++ pwr_cnt &= ~PSB_PWRGT_VID_ENC_MASK;
++ pwr_mask |= PSB_PWRGT_VID_ENC_MASK;
++ }
++ if (hw_islands & OSPM_VIDEO_DEC_ISLAND) {
++ pwr_cnt &= ~PSB_PWRGT_VID_DEC_MASK;
++ pwr_mask |= PSB_PWRGT_VID_DEC_MASK;
++ }
++
++ outl(pwr_cnt, dev_priv->apm_base + PSB_APM_CMD);
++ while (true) {
++ pwr_sts = inl(dev_priv->apm_base + PSB_APM_STS);
++ if ((pwr_sts & pwr_mask) == 0)
++ break;
++ else
++ udelay(10);
++ }
++ }
++
++ if (hw_islands & OSPM_DISPLAY_ISLAND) {
++ pwr_cnt = inl(dev_priv->ospm_base + PSB_PM_SSC);
++ pwr_cnt &= ~PSB_PWRGT_DISPLAY_MASK;
++ pwr_mask = PSB_PWRGT_DISPLAY_MASK;
++ outl(pwr_cnt, (dev_priv->ospm_base + PSB_PM_SSC));
++ while (true) {
++ pwr_sts = inl(dev_priv->ospm_base + PSB_PM_SSS);
++ if ((pwr_sts & pwr_mask) == 0)
++ break;
++ else
++ udelay(10);
++ }
++ }
++
++ g_hw_power_status_mask |= hw_islands;
++}
++
++/*
++ * ospm_power_resume
++ */
++int ospm_power_resume(struct pci_dev *pdev)
++{
++ mutex_lock(&g_ospm_mutex);
++
++ printk(KERN_ALERT "FIXME-RAJESH: ospm_power_resume \n");
++
++ PVRSRVDriverResume(gpDrmDevice);
++ ospm_resume_display(gpDrmDevice->pdev);
++ psb_irq_preinstall_islands(gpDrmDevice, OSPM_DISPLAY_ISLAND);
++ psb_irq_postinstall_islands(gpDrmDevice, OSPM_DISPLAY_ISLAND);
++
++ gbSuspended = false;
++ mutex_unlock(&g_ospm_mutex);
++
++ return 0;
++}
++
++
++/*
++ * ospm_power_island_down
++ *
++ * Description: Cut power to the specified island(s) (powergating)
++ */
++void ospm_power_island_down(int islands)
++{
++ u32 pwr_cnt = 0;
++ u32 pwr_mask = 0;
++ u32 pwr_sts = 0;
++
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) gpDrmDevice->dev_private;
++
++ g_hw_power_status_mask &= ~islands;
++
++ if (islands & OSPM_GRAPHICS_ISLAND) {
++ pwr_cnt |= PSB_PWRGT_GFX_MASK;
++ pwr_mask |= PSB_PWRGT_GFX_MASK;
++ #ifdef OSPM_STAT
++ if (dev_priv->graphics_state == PSB_PWR_STATE_ON) {
++ dev_priv->gfx_on_time += (jiffies - dev_priv->gfx_last_mode_change) * 1000 / HZ;
++ dev_priv->gfx_last_mode_change = jiffies;
++ dev_priv->graphics_state = PSB_PWR_STATE_OFF;
++ dev_priv->gfx_off_cnt++;
++ }
++ #endif
++ }
++ if (islands & OSPM_VIDEO_ENC_ISLAND) {
++ pwr_cnt |= PSB_PWRGT_VID_ENC_MASK;
++ pwr_mask |= PSB_PWRGT_VID_ENC_MASK;
++ }
++ if (islands & OSPM_VIDEO_DEC_ISLAND) {
++ pwr_cnt |= PSB_PWRGT_VID_DEC_MASK;
++ pwr_mask |= PSB_PWRGT_VID_DEC_MASK;
++ }
++ if (pwr_cnt) {
++ pwr_cnt |= inl(dev_priv->apm_base);
++ outl(pwr_cnt, dev_priv->apm_base);
++ while (true) {
++ pwr_sts = inl(dev_priv->apm_base + PSB_APM_STS);
++ if ((pwr_sts & pwr_mask) == pwr_mask)
++ break;
++ else
++ udelay(10);
++ }
++ }
++
++ if (islands & OSPM_DISPLAY_ISLAND) {
++ pwr_mask = PSB_PWRGT_DISPLAY_MASK;
++ outl(PSB_PWRGT_DISPLAY_MASK, (dev_priv->ospm_base + PSB_PM_SSC));
++ while (true) {
++ pwr_sts = inl(dev_priv->ospm_base + PSB_PM_SSS);
++ if ((pwr_sts & pwr_mask) == pwr_mask)
++ break;
++ else
++ udelay(10);
++ }
++ }
++}
++
++
++/*
++ * ospm_power_is_hw_on
++ *
++ * Description: do an instantaneous check for if the specified islands
++ * are on. Only use this in cases where you know the g_state_change_mutex
++ * is already held such as in irq install/uninstall. Otherwise, use
++ * ospm_power_using_hw_begin().
++ */
++bool ospm_power_is_hw_on(int hw_islands)
++{
++ return ((g_hw_power_status_mask & hw_islands) == hw_islands) ? true:false;
++}
++
++/*
++ * ospm_power_using_hw_begin
++ *
++ * Description: Notify PowerMgmt module that you will be accessing the
++ * specified island's hw so don't power it off. If force_on is true,
++ * this will power on the specified island if it is off.
++ * Otherwise, this will return false and the caller is expected to not
++ * access the hw.
++ *
++ * NOTE *** If this is called from and interrupt handler or other atomic
++ * context, then it will return false if we are in the middle of a
++ * power state transition and the caller will be expected to handle that
++ * even if force_on is set to true.
++ */
++bool ospm_power_using_hw_begin(int hw_island, bool force_on)
++{
++ bool ret = true;
++ bool island_is_off = false;
++ bool b_atomic = (in_interrupt() || in_atomic());
++ struct pci_dev *pdev = gpDrmDevice->pdev;
++ IMG_UINT32 deviceID = 0;
++
++ if (!b_atomic)
++ mutex_lock(&g_ospm_mutex);
++
++ island_is_off = hw_island & (OSPM_ALL_ISLANDS & ~g_hw_power_status_mask);
++
++ if (b_atomic && (gbSuspendInProgress || gbResumeInProgress || gbSuspended) && force_on && island_is_off)
++ ret = false;
++
++ if (ret && island_is_off && !force_on)
++ ret = false;
++
++ if (ret && island_is_off && force_on) {
++ gbResumeInProgress = true;
++
++ if (ret) {
++ switch(hw_island)
++ {
++ case OSPM_DISPLAY_ISLAND:
++ deviceID = gui32MRSTDisplayDeviceID;
++ ospm_resume_display(pdev);
++ break;
++ case OSPM_GRAPHICS_ISLAND:
++ deviceID = gui32SGXDeviceID;
++ if (g_hw_power_status_mask & OSPM_GRAPHICS_ISLAND) {
++ /*printk(KERN_ALERT "FIXME-RAJESH: Powering Up SGX - already ON \n"); */
++ }
++ else {
++ if(gbSuspended){
++ /*printk(KERN_ALERT "FIXME-RAJESH: PowerOnSystemWithDevice \n"); */
++ PVRSRVDriverResume(gpDrmDevice);
++ } else {
++ ospm_power_island_up(OSPM_GRAPHICS_ISLAND);
++ psb_irq_preinstall_islands(gpDrmDevice, OSPM_GRAPHICS_ISLAND);
++ psb_irq_postinstall_islands(gpDrmDevice, OSPM_GRAPHICS_ISLAND);
++ }
++ }
++ break;
++ case OSPM_VIDEO_ENC_ISLAND:
++ /*
++ * PVR Active Power Management will reinitialise
++ * SGX before use.
++ */
++ if (IS_MRST(gpDrmDevice))
++ {
++ ospm_power_island_up(OSPM_VIDEO_ENC_ISLAND);
++ lnc_topaz_restore_mtx_state(gpDrmDevice);
++ }
++ break;
++ case OSPM_VIDEO_DEC_ISLAND:
++ /*
++ * PVR Active Power Management will reinitialise
++ * SGX before use.
++ */
++ if (IS_MRST(gpDrmDevice))
++ {
++ ospm_power_island_up(OSPM_VIDEO_DEC_ISLAND);
++ psb_msvdx_restore_context(gpDrmDevice);
++ }
++ break;
++
++ default:
++ break;
++ }
++
++ }
++
++ if (!ret)
++ printk(KERN_ALERT "ospm_power_using_hw_begin: forcing on %d failed\n", hw_island);
++
++ gbResumeInProgress = false;
++ }
++
++ if (ret) {
++ switch(hw_island)
++ {
++ case OSPM_GRAPHICS_ISLAND:
++ atomic_inc(&g_graphics_access_count);
++ break;
++ case OSPM_VIDEO_ENC_ISLAND:
++ atomic_inc(&g_videoenc_access_count);
++ break;
++ case OSPM_VIDEO_DEC_ISLAND:
++ atomic_inc(&g_videodec_access_count);
++ break;
++ case OSPM_DISPLAY_ISLAND:
++ atomic_inc(&g_display_access_count);
++ break;
++ }
++ }
++
++ if (!b_atomic)
++ mutex_unlock(&g_ospm_mutex);
++
++ return ret;
++}
++
++
++/*
++ * ospm_power_using_hw_end
++ *
++ * Description: Notify PowerMgmt module that you are done accessing the
++ * specified island's hw so feel free to power it off. Note that this
++ * function doesn't actually power off the islands.
++ */
++void ospm_power_using_hw_end(int hw_island)
++{
++ switch(hw_island)
++ {
++ case OSPM_GRAPHICS_ISLAND:
++ atomic_dec(&g_graphics_access_count);
++ break;
++ case OSPM_VIDEO_ENC_ISLAND:
++ atomic_dec(&g_videoenc_access_count);
++ break;
++ case OSPM_VIDEO_DEC_ISLAND:
++ atomic_dec(&g_videodec_access_count);
++ break;
++ case OSPM_DISPLAY_ISLAND:
++ atomic_dec(&g_display_access_count);
++ break;
++ }
++
++ WARN_ON(atomic_read(&g_graphics_access_count) < 0);
++ WARN_ON(atomic_read(&g_videoenc_access_count) < 0);
++ WARN_ON(atomic_read(&g_videodec_access_count) < 0);
++ WARN_ON(atomic_read(&g_display_access_count) < 0);
++}
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_powermgmt.h
+@@ -0,0 +1,85 @@
++/**************************************************************************
++ * Copyright (c) 2009, Intel Corporation.
++ * All Rights Reserved.
++
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
++ * SOFTWARE.
++ *
++ * Authors:
++ * Benjamin Defnet <benjamin.r.defnet@intel.com>
++ * Rajesh Poornachandran <rajesh.poornachandran@intel.com>
++ *
++ */
++#ifndef _PSB_POWERMGMT_H_
++#define _PSB_POWERMGMT_H_
++
++#include <linux/pci.h>
++#include <drm/drmP.h>
++
++#define OSPM_GRAPHICS_ISLAND 0x1
++#define OSPM_VIDEO_ENC_ISLAND 0x2
++#define OSPM_VIDEO_DEC_ISLAND 0x4
++#define OSPM_DISPLAY_ISLAND 0x8
++#define OSPM_ALL_ISLANDS 0xf
++
++typedef enum _UHBUsage
++{
++ OSPM_UHB_ONLY_IF_ON = 0,
++ OSPM_UHB_FORCE_POWER_ON,
++ OSPM_UHB_IGNORE_POWER_OFF,
++} UHBUsage;
++
++//extern int psb_check_msvdx_idle(struct drm_device *dev);
++//extern int lnc_check_topaz_idle(struct drm_device *dev);
++/* Use these functions to power down video HW for D0i3 purpose */
++void ospm_apm_power_down_msvdx(struct drm_device *dev);
++void ospm_apm_power_down_topaz(struct drm_device *dev);
++
++void ospm_power_init(struct drm_device *dev);
++void ospm_power_uninit(void);
++
++
++/*
++ * OSPM will call these functions
++ */
++int ospm_power_suspend(struct pci_dev *pdev, pm_message_t state);
++int ospm_power_resume(struct pci_dev *pdev);
++
++/*
++ * These are the functions the driver should use to wrap all hw access
++ * (i.e. register reads and writes)
++ */
++bool ospm_power_using_hw_begin(int hw_island, bool force_on);
++void ospm_power_using_hw_end(int hw_island);
++
++/*
++ * Use this function to do an instantaneous check for if the hw is on.
++ * Only use this in cases where you know the g_state_change_mutex
++ * is already held such as in irq install/uninstall and you need to
++ * prevent a deadlock situation. Otherwise use ospm_power_using_hw_begin().
++ */
++bool ospm_power_is_hw_on(int hw_islands);
++
++/*
++ * Power up/down different hw component rails/islands
++ */
++void ospm_power_island_down(int hw_islands);
++void ospm_power_island_up(int hw_islands);
++void ospm_suspend_graphics(void);
++#endif /*_PSB_POWERMGMT_H_*/
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_pvr_glue.c
+@@ -0,0 +1,74 @@
++/*
++ * Copyright (c) 2009, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++#include "psb_pvr_glue.h"
++
++/**
++ * FIXME: should NOT use these file under env/linux directly
++ */
++#include "mm.h"
++
++int psb_get_meminfo_by_handle(IMG_HANDLE hKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO **ppsKernelMemInfo)
++{
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = IMG_NULL;
++ PVRSRV_PER_PROCESS_DATA *psPerProc = IMG_NULL;
++ PVRSRV_ERROR eError;
++
++ psPerProc = PVRSRVPerProcessData(OSGetCurrentProcessIDKM());
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ (IMG_VOID *)&psKernelMemInfo,
++ hKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if (eError != PVRSRV_OK) {
++ DRM_ERROR("Cannot find kernel meminfo for handle %lx\n",
++ (IMG_UINT32)hKernelMemInfo);
++ return -EINVAL;
++ }
++
++ *ppsKernelMemInfo = psKernelMemInfo;
++
++ DRM_DEBUG("Got Kernel MemInfo for handle %lx\n",
++ (IMG_UINT32)hKernelMemInfo);
++ return 0;
++}
++
++IMG_UINT32 psb_get_tgid(void)
++{
++ return OSGetCurrentProcessIDKM();
++}
++
++int psb_get_pages_by_mem_handle(IMG_HANDLE hOSMemHandle, struct page ***pages)
++{
++ LinuxMemArea *psLinuxMemArea = (LinuxMemArea *)hOSMemHandle;
++ struct page **page_list;
++
++ if (psLinuxMemArea->eAreaType != LINUX_MEM_AREA_ALLOC_PAGES) {
++ DRM_ERROR("MemArea type is not LINUX_MEM_AREA_ALLOC_PAGES\n");
++ return -EINVAL;
++ }
++
++ page_list = psLinuxMemArea->uData.sPageList.pvPageList;
++ if (!page_list) {
++ DRM_DEBUG("Page List is NULL\n");
++ return -ENOMEM;
++ }
++
++ *pages = page_list;
++ return 0;
++}
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_pvr_glue.h
+@@ -0,0 +1,26 @@
++/*
++ * Copyright (c) 2009, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++#include "psb_drv.h"
++#include "services_headers.h"
++
++extern int psb_get_meminfo_by_handle(IMG_HANDLE hKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO **ppsKernelMemInfo);
++extern IMG_UINT32 psb_get_tgid(void);
++extern int psb_get_pages_by_mem_handle(IMG_HANDLE hOSMemHandle,
++ struct page ***pages);
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_reg.h
+@@ -0,0 +1,570 @@
++/**************************************************************************
++ *
++ * Copyright (c) (2005-2007) Imagination Technologies Limited.
++ * Copyright (c) 2007, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA..
++ *
++ **************************************************************************/
++
++#ifndef _PSB_REG_H_
++#define _PSB_REG_H_
++
++#define PSB_CR_CLKGATECTL 0x0000
++#define _PSB_C_CLKGATECTL_AUTO_MAN_REG (1 << 24)
++#define _PSB_C_CLKGATECTL_USE_CLKG_SHIFT (20)
++#define _PSB_C_CLKGATECTL_USE_CLKG_MASK (0x3 << 20)
++#define _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT (16)
++#define _PSB_C_CLKGATECTL_DPM_CLKG_MASK (0x3 << 16)
++#define _PSB_C_CLKGATECTL_TA_CLKG_SHIFT (12)
++#define _PSB_C_CLKGATECTL_TA_CLKG_MASK (0x3 << 12)
++#define _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT (8)
++#define _PSB_C_CLKGATECTL_TSP_CLKG_MASK (0x3 << 8)
++#define _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT (4)
++#define _PSB_C_CLKGATECTL_ISP_CLKG_MASK (0x3 << 4)
++#define _PSB_C_CLKGATECTL_2D_CLKG_SHIFT (0)
++#define _PSB_C_CLKGATECTL_2D_CLKG_MASK (0x3 << 0)
++#define _PSB_C_CLKGATECTL_CLKG_ENABLED (0)
++#define _PSB_C_CLKGATECTL_CLKG_DISABLED (1)
++#define _PSB_C_CLKGATECTL_CLKG_AUTO (2)
++
++#define PSB_CR_CORE_ID 0x0010
++#define _PSB_CC_ID_ID_SHIFT (16)
++#define _PSB_CC_ID_ID_MASK (0xFFFF << 16)
++#define _PSB_CC_ID_CONFIG_SHIFT (0)
++#define _PSB_CC_ID_CONFIG_MASK (0xFFFF << 0)
++
++#define PSB_CR_CORE_REVISION 0x0014
++#define _PSB_CC_REVISION_DESIGNER_SHIFT (24)
++#define _PSB_CC_REVISION_DESIGNER_MASK (0xFF << 24)
++#define _PSB_CC_REVISION_MAJOR_SHIFT (16)
++#define _PSB_CC_REVISION_MAJOR_MASK (0xFF << 16)
++#define _PSB_CC_REVISION_MINOR_SHIFT (8)
++#define _PSB_CC_REVISION_MINOR_MASK (0xFF << 8)
++#define _PSB_CC_REVISION_MAINTENANCE_SHIFT (0)
++#define _PSB_CC_REVISION_MAINTENANCE_MASK (0xFF << 0)
++
++#define PSB_CR_DESIGNER_REV_FIELD1 0x0018
++
++#define PSB_CR_SOFT_RESET 0x0080
++#define _PSB_CS_RESET_TSP_RESET (1 << 6)
++#define _PSB_CS_RESET_ISP_RESET (1 << 5)
++#define _PSB_CS_RESET_USE_RESET (1 << 4)
++#define _PSB_CS_RESET_TA_RESET (1 << 3)
++#define _PSB_CS_RESET_DPM_RESET (1 << 2)
++#define _PSB_CS_RESET_TWOD_RESET (1 << 1)
++#define _PSB_CS_RESET_BIF_RESET (1 << 0)
++
++#define PSB_CR_DESIGNER_REV_FIELD2 0x001C
++
++#define PSB_CR_EVENT_HOST_ENABLE2 0x0110
++
++#define PSB_CR_EVENT_STATUS2 0x0118
++
++#define PSB_CR_EVENT_HOST_CLEAR2 0x0114
++#define _PSB_CE2_BIF_REQUESTER_FAULT (1 << 4)
++
++#define PSB_CR_EVENT_STATUS 0x012C
++
++#define PSB_CR_EVENT_HOST_ENABLE 0x0130
++
++#define PSB_CR_EVENT_HOST_CLEAR 0x0134
++#define _PSB_CE_MASTER_INTERRUPT (1 << 31)
++#define _PSB_CE_TA_DPM_FAULT (1 << 28)
++#define _PSB_CE_TWOD_COMPLETE (1 << 27)
++#define _PSB_CE_DPM_OUT_OF_MEMORY_ZLS (1 << 25)
++#define _PSB_CE_DPM_TA_MEM_FREE (1 << 24)
++#define _PSB_CE_PIXELBE_END_RENDER (1 << 18)
++#define _PSB_CE_SW_EVENT (1 << 14)
++#define _PSB_CE_TA_FINISHED (1 << 13)
++#define _PSB_CE_TA_TERMINATE (1 << 12)
++#define _PSB_CE_DPM_REACHED_MEM_THRESH (1 << 3)
++#define _PSB_CE_DPM_OUT_OF_MEMORY_GBL (1 << 2)
++#define _PSB_CE_DPM_OUT_OF_MEMORY_MT (1 << 1)
++#define _PSB_CE_DPM_3D_MEM_FREE (1 << 0)
++
++
++#define PSB_USE_OFFSET_MASK 0x0007FFFF
++#define PSB_USE_OFFSET_SIZE (PSB_USE_OFFSET_MASK + 1)
++#define PSB_CR_USE_CODE_BASE0 0x0A0C
++#define PSB_CR_USE_CODE_BASE1 0x0A10
++#define PSB_CR_USE_CODE_BASE2 0x0A14
++#define PSB_CR_USE_CODE_BASE3 0x0A18
++#define PSB_CR_USE_CODE_BASE4 0x0A1C
++#define PSB_CR_USE_CODE_BASE5 0x0A20
++#define PSB_CR_USE_CODE_BASE6 0x0A24
++#define PSB_CR_USE_CODE_BASE7 0x0A28
++#define PSB_CR_USE_CODE_BASE8 0x0A2C
++#define PSB_CR_USE_CODE_BASE9 0x0A30
++#define PSB_CR_USE_CODE_BASE10 0x0A34
++#define PSB_CR_USE_CODE_BASE11 0x0A38
++#define PSB_CR_USE_CODE_BASE12 0x0A3C
++#define PSB_CR_USE_CODE_BASE13 0x0A40
++#define PSB_CR_USE_CODE_BASE14 0x0A44
++#define PSB_CR_USE_CODE_BASE15 0x0A48
++#define PSB_CR_USE_CODE_BASE(_i) (0x0A0C + ((_i) << 2))
++#define _PSB_CUC_BASE_DM_SHIFT (25)
++#define _PSB_CUC_BASE_DM_MASK (0x3 << 25)
++#define _PSB_CUC_BASE_ADDR_SHIFT (0) /* 1024-bit aligned address? */
++#define _PSB_CUC_BASE_ADDR_ALIGNSHIFT (7)
++#define _PSB_CUC_BASE_ADDR_MASK (0x1FFFFFF << 0)
++#define _PSB_CUC_DM_VERTEX (0)
++#define _PSB_CUC_DM_PIXEL (1)
++#define _PSB_CUC_DM_RESERVED (2)
++#define _PSB_CUC_DM_EDM (3)
++
++#define PSB_CR_PDS_EXEC_BASE 0x0AB8
++#define _PSB_CR_PDS_EXEC_BASE_ADDR_SHIFT (20) /* 1MB aligned address */
++#define _PSB_CR_PDS_EXEC_BASE_ADDR_ALIGNSHIFT (20)
++
++#define PSB_CR_EVENT_KICKER 0x0AC4
++#define _PSB_CE_KICKER_ADDRESS_SHIFT (4) /* 128-bit aligned address */
++
++#define PSB_CR_EVENT_KICK 0x0AC8
++#define _PSB_CE_KICK_NOW (1 << 0)
++
++
++#define PSB_CR_BIF_DIR_LIST_BASE1 0x0C38
++
++#define PSB_CR_BIF_CTRL 0x0C00
++#define _PSB_CB_CTRL_CLEAR_FAULT (1 << 4)
++#define _PSB_CB_CTRL_INVALDC (1 << 3)
++#define _PSB_CB_CTRL_FLUSH (1 << 2)
++
++#define PSB_CR_BIF_INT_STAT 0x0C04
++
++#define PSB_CR_BIF_FAULT 0x0C08
++#define _PSB_CBI_STAT_PF_N_RW (1 << 14)
++#define _PSB_CBI_STAT_FAULT_SHIFT (0)
++#define _PSB_CBI_STAT_FAULT_MASK (0x3FFF << 0)
++#define _PSB_CBI_STAT_FAULT_CACHE (1 << 1)
++#define _PSB_CBI_STAT_FAULT_TA (1 << 2)
++#define _PSB_CBI_STAT_FAULT_VDM (1 << 3)
++#define _PSB_CBI_STAT_FAULT_2D (1 << 4)
++#define _PSB_CBI_STAT_FAULT_PBE (1 << 5)
++#define _PSB_CBI_STAT_FAULT_TSP (1 << 6)
++#define _PSB_CBI_STAT_FAULT_ISP (1 << 7)
++#define _PSB_CBI_STAT_FAULT_USSEPDS (1 << 8)
++#define _PSB_CBI_STAT_FAULT_HOST (1 << 9)
++
++#define PSB_CR_BIF_BANK0 0x0C78
++
++#define PSB_CR_BIF_BANK1 0x0C7C
++
++#define PSB_CR_BIF_DIR_LIST_BASE0 0x0C84
++
++#define PSB_CR_BIF_TWOD_REQ_BASE 0x0C88
++#define PSB_CR_BIF_3D_REQ_BASE 0x0CAC
++
++#define PSB_CR_2D_SOCIF 0x0E18
++#define _PSB_C2_SOCIF_FREESPACE_SHIFT (0)
++#define _PSB_C2_SOCIF_FREESPACE_MASK (0xFF << 0)
++#define _PSB_C2_SOCIF_EMPTY (0x80 << 0)
++
++#define PSB_CR_2D_BLIT_STATUS 0x0E04
++#define _PSB_C2B_STATUS_BUSY (1 << 24)
++#define _PSB_C2B_STATUS_COMPLETE_SHIFT (0)
++#define _PSB_C2B_STATUS_COMPLETE_MASK (0xFFFFFF << 0)
++
++/*
++ * 2D defs.
++ */
++
++/*
++ * 2D Slave Port Data : Block Header's Object Type
++ */
++
++#define PSB_2D_CLIP_BH (0x00000000)
++#define PSB_2D_PAT_BH (0x10000000)
++#define PSB_2D_CTRL_BH (0x20000000)
++#define PSB_2D_SRC_OFF_BH (0x30000000)
++#define PSB_2D_MASK_OFF_BH (0x40000000)
++#define PSB_2D_RESERVED1_BH (0x50000000)
++#define PSB_2D_RESERVED2_BH (0x60000000)
++#define PSB_2D_FENCE_BH (0x70000000)
++#define PSB_2D_BLIT_BH (0x80000000)
++#define PSB_2D_SRC_SURF_BH (0x90000000)
++#define PSB_2D_DST_SURF_BH (0xA0000000)
++#define PSB_2D_PAT_SURF_BH (0xB0000000)
++#define PSB_2D_SRC_PAL_BH (0xC0000000)
++#define PSB_2D_PAT_PAL_BH (0xD0000000)
++#define PSB_2D_MASK_SURF_BH (0xE0000000)
++#define PSB_2D_FLUSH_BH (0xF0000000)
++
++/*
++ * Clip Definition block (PSB_2D_CLIP_BH)
++ */
++#define PSB_2D_CLIPCOUNT_MAX (1)
++#define PSB_2D_CLIPCOUNT_MASK (0x00000000)
++#define PSB_2D_CLIPCOUNT_CLRMASK (0xFFFFFFFF)
++#define PSB_2D_CLIPCOUNT_SHIFT (0)
++/* clip rectangle min & max */
++#define PSB_2D_CLIP_XMAX_MASK (0x00FFF000)
++#define PSB_2D_CLIP_XMAX_CLRMASK (0xFF000FFF)
++#define PSB_2D_CLIP_XMAX_SHIFT (12)
++#define PSB_2D_CLIP_XMIN_MASK (0x00000FFF)
++#define PSB_2D_CLIP_XMIN_CLRMASK (0x00FFF000)
++#define PSB_2D_CLIP_XMIN_SHIFT (0)
++/* clip rectangle offset */
++#define PSB_2D_CLIP_YMAX_MASK (0x00FFF000)
++#define PSB_2D_CLIP_YMAX_CLRMASK (0xFF000FFF)
++#define PSB_2D_CLIP_YMAX_SHIFT (12)
++#define PSB_2D_CLIP_YMIN_MASK (0x00000FFF)
++#define PSB_2D_CLIP_YMIN_CLRMASK (0x00FFF000)
++#define PSB_2D_CLIP_YMIN_SHIFT (0)
++
++/*
++ * Pattern Control (PSB_2D_PAT_BH)
++ */
++#define PSB_2D_PAT_HEIGHT_MASK (0x0000001F)
++#define PSB_2D_PAT_HEIGHT_SHIFT (0)
++#define PSB_2D_PAT_WIDTH_MASK (0x000003E0)
++#define PSB_2D_PAT_WIDTH_SHIFT (5)
++#define PSB_2D_PAT_YSTART_MASK (0x00007C00)
++#define PSB_2D_PAT_YSTART_SHIFT (10)
++#define PSB_2D_PAT_XSTART_MASK (0x000F8000)
++#define PSB_2D_PAT_XSTART_SHIFT (15)
++
++/*
++ * 2D Control block (PSB_2D_CTRL_BH)
++ */
++/* Present Flags */
++#define PSB_2D_SRCCK_CTRL (0x00000001)
++#define PSB_2D_DSTCK_CTRL (0x00000002)
++#define PSB_2D_ALPHA_CTRL (0x00000004)
++/* Colour Key Colour (SRC/DST)*/
++#define PSB_2D_CK_COL_MASK (0xFFFFFFFF)
++#define PSB_2D_CK_COL_CLRMASK (0x00000000)
++#define PSB_2D_CK_COL_SHIFT (0)
++/* Colour Key Mask (SRC/DST)*/
++#define PSB_2D_CK_MASK_MASK (0xFFFFFFFF)
++#define PSB_2D_CK_MASK_CLRMASK (0x00000000)
++#define PSB_2D_CK_MASK_SHIFT (0)
++/* Alpha Control (Alpha/RGB)*/
++#define PSB_2D_GBLALPHA_MASK (0x000FF000)
++#define PSB_2D_GBLALPHA_CLRMASK (0xFFF00FFF)
++#define PSB_2D_GBLALPHA_SHIFT (12)
++#define PSB_2D_SRCALPHA_OP_MASK (0x00700000)
++#define PSB_2D_SRCALPHA_OP_CLRMASK (0xFF8FFFFF)
++#define PSB_2D_SRCALPHA_OP_SHIFT (20)
++#define PSB_2D_SRCALPHA_OP_ONE (0x00000000)
++#define PSB_2D_SRCALPHA_OP_SRC (0x00100000)
++#define PSB_2D_SRCALPHA_OP_DST (0x00200000)
++#define PSB_2D_SRCALPHA_OP_SG (0x00300000)
++#define PSB_2D_SRCALPHA_OP_DG (0x00400000)
++#define PSB_2D_SRCALPHA_OP_GBL (0x00500000)
++#define PSB_2D_SRCALPHA_OP_ZERO (0x00600000)
++#define PSB_2D_SRCALPHA_INVERT (0x00800000)
++#define PSB_2D_SRCALPHA_INVERT_CLR (0xFF7FFFFF)
++#define PSB_2D_DSTALPHA_OP_MASK (0x07000000)
++#define PSB_2D_DSTALPHA_OP_CLRMASK (0xF8FFFFFF)
++#define PSB_2D_DSTALPHA_OP_SHIFT (24)
++#define PSB_2D_DSTALPHA_OP_ONE (0x00000000)
++#define PSB_2D_DSTALPHA_OP_SRC (0x01000000)
++#define PSB_2D_DSTALPHA_OP_DST (0x02000000)
++#define PSB_2D_DSTALPHA_OP_SG (0x03000000)
++#define PSB_2D_DSTALPHA_OP_DG (0x04000000)
++#define PSB_2D_DSTALPHA_OP_GBL (0x05000000)
++#define PSB_2D_DSTALPHA_OP_ZERO (0x06000000)
++#define PSB_2D_DSTALPHA_INVERT (0x08000000)
++#define PSB_2D_DSTALPHA_INVERT_CLR (0xF7FFFFFF)
++
++#define PSB_2D_PRE_MULTIPLICATION_ENABLE (0x10000000)
++#define PSB_2D_PRE_MULTIPLICATION_CLRMASK (0xEFFFFFFF)
++#define PSB_2D_ZERO_SOURCE_ALPHA_ENABLE (0x20000000)
++#define PSB_2D_ZERO_SOURCE_ALPHA_CLRMASK (0xDFFFFFFF)
++
++/*
++ *Source Offset (PSB_2D_SRC_OFF_BH)
++ */
++#define PSB_2D_SRCOFF_XSTART_MASK ((0x00000FFF) << 12)
++#define PSB_2D_SRCOFF_XSTART_SHIFT (12)
++#define PSB_2D_SRCOFF_YSTART_MASK (0x00000FFF)
++#define PSB_2D_SRCOFF_YSTART_SHIFT (0)
++
++/*
++ * Mask Offset (PSB_2D_MASK_OFF_BH)
++ */
++#define PSB_2D_MASKOFF_XSTART_MASK ((0x00000FFF) << 12)
++#define PSB_2D_MASKOFF_XSTART_SHIFT (12)
++#define PSB_2D_MASKOFF_YSTART_MASK (0x00000FFF)
++#define PSB_2D_MASKOFF_YSTART_SHIFT (0)
++
++/*
++ * 2D Fence (see PSB_2D_FENCE_BH): bits 0:27 are ignored
++ */
++
++/*
++ *Blit Rectangle (PSB_2D_BLIT_BH)
++ */
++
++#define PSB_2D_ROT_MASK (3<<25)
++#define PSB_2D_ROT_CLRMASK (~PSB_2D_ROT_MASK)
++#define PSB_2D_ROT_NONE (0<<25)
++#define PSB_2D_ROT_90DEGS (1<<25)
++#define PSB_2D_ROT_180DEGS (2<<25)
++#define PSB_2D_ROT_270DEGS (3<<25)
++
++#define PSB_2D_COPYORDER_MASK (3<<23)
++#define PSB_2D_COPYORDER_CLRMASK (~PSB_2D_COPYORDER_MASK)
++#define PSB_2D_COPYORDER_TL2BR (0<<23)
++#define PSB_2D_COPYORDER_BR2TL (1<<23)
++#define PSB_2D_COPYORDER_TR2BL (2<<23)
++#define PSB_2D_COPYORDER_BL2TR (3<<23)
++
++#define PSB_2D_DSTCK_CLRMASK (0xFF9FFFFF)
++#define PSB_2D_DSTCK_DISABLE (0x00000000)
++#define PSB_2D_DSTCK_PASS (0x00200000)
++#define PSB_2D_DSTCK_REJECT (0x00400000)
++
++#define PSB_2D_SRCCK_CLRMASK (0xFFE7FFFF)
++#define PSB_2D_SRCCK_DISABLE (0x00000000)
++#define PSB_2D_SRCCK_PASS (0x00080000)
++#define PSB_2D_SRCCK_REJECT (0x00100000)
++
++#define PSB_2D_CLIP_ENABLE (0x00040000)
++
++#define PSB_2D_ALPHA_ENABLE (0x00020000)
++
++#define PSB_2D_PAT_CLRMASK (0xFFFEFFFF)
++#define PSB_2D_PAT_MASK (0x00010000)
++#define PSB_2D_USE_PAT (0x00010000)
++#define PSB_2D_USE_FILL (0x00000000)
++/*
++ * Tungsten Graphics note on rop codes: If rop A and rop B are
++ * identical, the mask surface will not be read and need not be
++ * set up.
++ */
++
++#define PSB_2D_ROP3B_MASK (0x0000FF00)
++#define PSB_2D_ROP3B_CLRMASK (0xFFFF00FF)
++#define PSB_2D_ROP3B_SHIFT (8)
++/* rop code A */
++#define PSB_2D_ROP3A_MASK (0x000000FF)
++#define PSB_2D_ROP3A_CLRMASK (0xFFFFFF00)
++#define PSB_2D_ROP3A_SHIFT (0)
++
++#define PSB_2D_ROP4_MASK (0x0000FFFF)
++/*
++ * DWORD0: (Only pass if Pattern control == Use Fill Colour)
++ * Fill Colour RGBA8888
++ */
++#define PSB_2D_FILLCOLOUR_MASK (0xFFFFFFFF)
++#define PSB_2D_FILLCOLOUR_SHIFT (0)
++/*
++ * DWORD1: (Always Present)
++ * X Start (Dest)
++ * Y Start (Dest)
++ */
++#define PSB_2D_DST_XSTART_MASK (0x00FFF000)
++#define PSB_2D_DST_XSTART_CLRMASK (0xFF000FFF)
++#define PSB_2D_DST_XSTART_SHIFT (12)
++#define PSB_2D_DST_YSTART_MASK (0x00000FFF)
++#define PSB_2D_DST_YSTART_CLRMASK (0xFFFFF000)
++#define PSB_2D_DST_YSTART_SHIFT (0)
++/*
++ * DWORD2: (Always Present)
++ * X Size (Dest)
++ * Y Size (Dest)
++ */
++#define PSB_2D_DST_XSIZE_MASK (0x00FFF000)
++#define PSB_2D_DST_XSIZE_CLRMASK (0xFF000FFF)
++#define PSB_2D_DST_XSIZE_SHIFT (12)
++#define PSB_2D_DST_YSIZE_MASK (0x00000FFF)
++#define PSB_2D_DST_YSIZE_CLRMASK (0xFFFFF000)
++#define PSB_2D_DST_YSIZE_SHIFT (0)
++
++/*
++ * Source Surface (PSB_2D_SRC_SURF_BH)
++ */
++/*
++ * WORD 0
++ */
++
++#define PSB_2D_SRC_FORMAT_MASK (0x00078000)
++#define PSB_2D_SRC_1_PAL (0x00000000)
++#define PSB_2D_SRC_2_PAL (0x00008000)
++#define PSB_2D_SRC_4_PAL (0x00010000)
++#define PSB_2D_SRC_8_PAL (0x00018000)
++#define PSB_2D_SRC_8_ALPHA (0x00020000)
++#define PSB_2D_SRC_4_ALPHA (0x00028000)
++#define PSB_2D_SRC_332RGB (0x00030000)
++#define PSB_2D_SRC_4444ARGB (0x00038000)
++#define PSB_2D_SRC_555RGB (0x00040000)
++#define PSB_2D_SRC_1555ARGB (0x00048000)
++#define PSB_2D_SRC_565RGB (0x00050000)
++#define PSB_2D_SRC_0888ARGB (0x00058000)
++#define PSB_2D_SRC_8888ARGB (0x00060000)
++#define PSB_2D_SRC_8888UYVY (0x00068000)
++#define PSB_2D_SRC_RESERVED (0x00070000)
++#define PSB_2D_SRC_1555ARGB_LOOKUP (0x00078000)
++
++
++#define PSB_2D_SRC_STRIDE_MASK (0x00007FFF)
++#define PSB_2D_SRC_STRIDE_CLRMASK (0xFFFF8000)
++#define PSB_2D_SRC_STRIDE_SHIFT (0)
++/*
++ * WORD 1 - Base Address
++ */
++#define PSB_2D_SRC_ADDR_MASK (0x0FFFFFFC)
++#define PSB_2D_SRC_ADDR_CLRMASK (0x00000003)
++#define PSB_2D_SRC_ADDR_SHIFT (2)
++#define PSB_2D_SRC_ADDR_ALIGNSHIFT (2)
++
++/*
++ * Pattern Surface (PSB_2D_PAT_SURF_BH)
++ */
++/*
++ * WORD 0
++ */
++
++#define PSB_2D_PAT_FORMAT_MASK (0x00078000)
++#define PSB_2D_PAT_1_PAL (0x00000000)
++#define PSB_2D_PAT_2_PAL (0x00008000)
++#define PSB_2D_PAT_4_PAL (0x00010000)
++#define PSB_2D_PAT_8_PAL (0x00018000)
++#define PSB_2D_PAT_8_ALPHA (0x00020000)
++#define PSB_2D_PAT_4_ALPHA (0x00028000)
++#define PSB_2D_PAT_332RGB (0x00030000)
++#define PSB_2D_PAT_4444ARGB (0x00038000)
++#define PSB_2D_PAT_555RGB (0x00040000)
++#define PSB_2D_PAT_1555ARGB (0x00048000)
++#define PSB_2D_PAT_565RGB (0x00050000)
++#define PSB_2D_PAT_0888ARGB (0x00058000)
++#define PSB_2D_PAT_8888ARGB (0x00060000)
++
++#define PSB_2D_PAT_STRIDE_MASK (0x00007FFF)
++#define PSB_2D_PAT_STRIDE_CLRMASK (0xFFFF8000)
++#define PSB_2D_PAT_STRIDE_SHIFT (0)
++/*
++ * WORD 1 - Base Address
++ */
++#define PSB_2D_PAT_ADDR_MASK (0x0FFFFFFC)
++#define PSB_2D_PAT_ADDR_CLRMASK (0x00000003)
++#define PSB_2D_PAT_ADDR_SHIFT (2)
++#define PSB_2D_PAT_ADDR_ALIGNSHIFT (2)
++
++/*
++ * Destination Surface (PSB_2D_DST_SURF_BH)
++ */
++/*
++ * WORD 0
++ */
++
++#define PSB_2D_DST_FORMAT_MASK (0x00078000)
++#define PSB_2D_DST_332RGB (0x00030000)
++#define PSB_2D_DST_4444ARGB (0x00038000)
++#define PSB_2D_DST_555RGB (0x00040000)
++#define PSB_2D_DST_1555ARGB (0x00048000)
++#define PSB_2D_DST_565RGB (0x00050000)
++#define PSB_2D_DST_0888ARGB (0x00058000)
++#define PSB_2D_DST_8888ARGB (0x00060000)
++#define PSB_2D_DST_8888AYUV (0x00070000)
++
++#define PSB_2D_DST_STRIDE_MASK (0x00007FFF)
++#define PSB_2D_DST_STRIDE_CLRMASK (0xFFFF8000)
++#define PSB_2D_DST_STRIDE_SHIFT (0)
++/*
++ * WORD 1 - Base Address
++ */
++#define PSB_2D_DST_ADDR_MASK (0x0FFFFFFC)
++#define PSB_2D_DST_ADDR_CLRMASK (0x00000003)
++#define PSB_2D_DST_ADDR_SHIFT (2)
++#define PSB_2D_DST_ADDR_ALIGNSHIFT (2)
++
++/*
++ * Mask Surface (PSB_2D_MASK_SURF_BH)
++ */
++/*
++ * WORD 0
++ */
++#define PSB_2D_MASK_STRIDE_MASK (0x00007FFF)
++#define PSB_2D_MASK_STRIDE_CLRMASK (0xFFFF8000)
++#define PSB_2D_MASK_STRIDE_SHIFT (0)
++/*
++ * WORD 1 - Base Address
++ */
++#define PSB_2D_MASK_ADDR_MASK (0x0FFFFFFC)
++#define PSB_2D_MASK_ADDR_CLRMASK (0x00000003)
++#define PSB_2D_MASK_ADDR_SHIFT (2)
++#define PSB_2D_MASK_ADDR_ALIGNSHIFT (2)
++
++/*
++ * Source Palette (PSB_2D_SRC_PAL_BH)
++ */
++
++#define PSB_2D_SRCPAL_ADDR_SHIFT (0)
++#define PSB_2D_SRCPAL_ADDR_CLRMASK (0xF0000007)
++#define PSB_2D_SRCPAL_ADDR_MASK (0x0FFFFFF8)
++#define PSB_2D_SRCPAL_BYTEALIGN (1024)
++
++/*
++ * Pattern Palette (PSB_2D_PAT_PAL_BH)
++ */
++
++#define PSB_2D_PATPAL_ADDR_SHIFT (0)
++#define PSB_2D_PATPAL_ADDR_CLRMASK (0xF0000007)
++#define PSB_2D_PATPAL_ADDR_MASK (0x0FFFFFF8)
++#define PSB_2D_PATPAL_BYTEALIGN (1024)
++
++/*
++ * Rop3 Codes (2 LS bytes)
++ */
++
++#define PSB_2D_ROP3_SRCCOPY (0xCCCC)
++#define PSB_2D_ROP3_PATCOPY (0xF0F0)
++#define PSB_2D_ROP3_WHITENESS (0xFFFF)
++#define PSB_2D_ROP3_BLACKNESS (0x0000)
++#define PSB_2D_ROP3_SRC (0xCC)
++#define PSB_2D_ROP3_PAT (0xF0)
++#define PSB_2D_ROP3_DST (0xAA)
++
++
++/*
++ * Sizes.
++ */
++
++#define PSB_SCENE_HW_COOKIE_SIZE 16
++#define PSB_TA_MEM_HW_COOKIE_SIZE 16
++
++/*
++ * Scene stuff.
++ */
++
++#define PSB_NUM_HW_SCENES 2
++
++/*
++ * Scheduler completion actions.
++ */
++
++#define PSB_RASTER_BLOCK 0
++#define PSB_RASTER 1
++#define PSB_RETURN 2
++#define PSB_TA 3
++
++
++/*Power management*/
++#define PSB_PUNIT_PORT 0x04
++#define PSB_APMBA 0x7a
++#define PSB_APM_CMD 0x0
++#define PSB_APM_STS 0x04
++#define PSB_PWRGT_GFX_MASK 0x3
++#define PSB_PWRGT_VID_ENC_MASK 0x30
++#define PSB_PWRGT_VID_DEC_MASK 0xc
++
++#define PSB_PM_SSC 0x20
++#define PSB_PM_SSS 0x30
++#define PSB_PWRGT_DISPLAY_MASK 0xc /*on a different BA than video/gfx*/
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_reset.c
+@@ -0,0 +1,330 @@
++/**************************************************************************
++ * Copyright (c) 2007, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ **************************************************************************/
++
++#include <drm/drmP.h>
++#include "psb_drv.h"
++#include "psb_reg.h"
++#include "psb_intel_reg.h"
++#include "psb_msvdx.h"
++#include "lnc_topaz.h"
++#include "pnw_topaz.h"
++#include <linux/spinlock.h>
++
++
++void psb_schedule_watchdog(struct drm_psb_private *dev_priv)
++{
++ struct timer_list *wt = &dev_priv->watchdog_timer;
++ unsigned long irq_flags;
++
++ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
++ if (dev_priv->timer_available && !timer_pending(wt)) {
++ wt->expires = jiffies + PSB_WATCHDOG_DELAY;
++ add_timer(wt);
++ }
++ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
++}
++
++
++static void psb_watchdog_func(unsigned long data)
++{
++ struct drm_psb_private *dev_priv = (struct drm_psb_private *) data;
++ int msvdx_lockup;
++ int msvdx_idle;
++ unsigned long irq_flags;
++
++ psb_msvdx_lockup(dev_priv, &msvdx_lockup, &msvdx_idle);
++
++ if (msvdx_lockup) {
++ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
++ dev_priv->timer_available = 0;
++ spin_unlock_irqrestore(&dev_priv->watchdog_lock,
++ irq_flags);
++ if (msvdx_lockup)
++ schedule_work(&dev_priv->msvdx_watchdog_wq);
++ }
++ if (!msvdx_idle)
++ psb_schedule_watchdog(dev_priv);
++}
++
++void psb_msvdx_flush_cmd_queue(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct psb_msvdx_cmd_queue *msvdx_cmd;
++ struct list_head *list, *next;
++ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
++
++ /*Flush the msvdx cmd queue and signal all fences in the queue */
++ list_for_each_safe(list, next, &msvdx_priv->msvdx_queue) {
++ msvdx_cmd =
++ list_entry(list, struct psb_msvdx_cmd_queue, head);
++ PSB_DEBUG_GENERAL("MSVDXQUE: flushing sequence:%d\n",
++ msvdx_cmd->sequence);
++ msvdx_priv->msvdx_current_sequence = msvdx_cmd->sequence;
++ psb_fence_error(dev, PSB_ENGINE_VIDEO,
++ msvdx_priv->msvdx_current_sequence,
++ _PSB_FENCE_TYPE_EXE, DRM_CMD_HANG);
++ list_del(list);
++ kfree(msvdx_cmd->cmd);
++ kfree(msvdx_cmd
++ );
++ }
++}
++
++static void psb_msvdx_reset_wq(struct work_struct *work)
++{
++ struct drm_psb_private *dev_priv =
++ container_of(work, struct drm_psb_private, msvdx_watchdog_wq);
++ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
++
++ struct psb_scheduler *scheduler = &dev_priv->scheduler;
++ unsigned long irq_flags;
++
++ mutex_lock(&msvdx_priv->msvdx_mutex);
++ msvdx_priv->msvdx_needs_reset = 1;
++ msvdx_priv->msvdx_current_sequence++;
++ PSB_DEBUG_GENERAL
++ ("MSVDXFENCE: incremented msvdx_current_sequence to :%d\n",
++ msvdx_priv->msvdx_current_sequence);
++
++ psb_fence_error(scheduler->dev, PSB_ENGINE_VIDEO,
++ msvdx_priv->msvdx_current_sequence,
++ _PSB_FENCE_TYPE_EXE, DRM_CMD_HANG);
++
++ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
++ dev_priv->timer_available = 1;
++ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
++
++ spin_lock_irqsave(&msvdx_priv->msvdx_lock, irq_flags);
++ psb_msvdx_flush_cmd_queue(scheduler->dev);
++ spin_unlock_irqrestore(&msvdx_priv->msvdx_lock, irq_flags);
++
++ psb_schedule_watchdog(dev_priv);
++ mutex_unlock(&msvdx_priv->msvdx_mutex);
++}
++
++void psb_watchdog_init(struct drm_psb_private *dev_priv)
++{
++ struct timer_list *wt = &dev_priv->watchdog_timer;
++ unsigned long irq_flags;
++
++ spin_lock_init(&dev_priv->watchdog_lock);
++ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
++ init_timer(wt);
++ INIT_WORK(&dev_priv->msvdx_watchdog_wq, &psb_msvdx_reset_wq);
++ wt->data = (unsigned long) dev_priv;
++ wt->function = &psb_watchdog_func;
++ dev_priv->timer_available = 1;
++ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
++}
++
++void psb_watchdog_takedown(struct drm_psb_private *dev_priv)
++{
++ unsigned long irq_flags;
++
++ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
++ dev_priv->timer_available = 0;
++ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
++ (void) del_timer_sync(&dev_priv->watchdog_timer);
++}
++
++static void psb_lid_timer_func(unsigned long data)
++{
++ struct drm_psb_private * dev_priv = (struct drm_psb_private *)data;
++ struct drm_device *dev = (struct drm_device *)dev_priv->dev;
++ struct timer_list *lid_timer = &dev_priv->lid_timer;
++ unsigned long irq_flags;
++ u32 *lid_state = dev_priv->lid_state;
++ u32 pp_status;
++
++ if (*lid_state == dev_priv->lid_last_state)
++ goto lid_timer_schedule;
++
++ if ((*lid_state) & 0x01) {
++ /*lid state is open*/
++ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) | POWER_TARGET_ON);
++ do {
++ pp_status = REG_READ(PP_STATUS);
++ } while ((pp_status & PP_ON) == 0);
++
++ /*FIXME: should be backlight level before*/
++ psb_intel_lvds_set_brightness(dev, 100);
++ } else {
++ psb_intel_lvds_set_brightness(dev, 0);
++
++ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) & ~POWER_TARGET_ON);
++ do {
++ pp_status = REG_READ(PP_STATUS);
++ } while ((pp_status & PP_ON) == 0);
++ }
++ /* printk(KERN_INFO"%s: lid: closed\n", __FUNCTION__); */
++
++ dev_priv->lid_last_state = *lid_state;
++
++lid_timer_schedule:
++ spin_lock_irqsave(&dev_priv->lid_lock, irq_flags);
++ if (!timer_pending(lid_timer)) {
++ lid_timer->expires = jiffies + PSB_LID_DELAY;
++ add_timer(lid_timer);
++ }
++ spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags);
++}
++
++void psb_lid_timer_init(struct drm_psb_private *dev_priv)
++{
++ struct timer_list *lid_timer = &dev_priv->lid_timer;
++ unsigned long irq_flags;
++
++ spin_lock_init(&dev_priv->lid_lock);
++ spin_lock_irqsave(&dev_priv->lid_lock, irq_flags);
++
++ init_timer(lid_timer);
++
++ lid_timer->data = (unsigned long)dev_priv;
++ lid_timer->function = psb_lid_timer_func;
++ lid_timer->expires = jiffies + PSB_LID_DELAY;
++
++ add_timer(lid_timer);
++ spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags);
++}
++
++void psb_lid_timer_takedown(struct drm_psb_private *dev_priv)
++{
++ del_timer_sync(&dev_priv->lid_timer);
++}
++
++#if MDFLD_JLIU7_DSR
++#if MDFLD_JLIU7_DPU_2
++static void mdfld_dsr_timer_func(unsigned long data)
++{
++ struct drm_psb_private * dev_priv = (struct drm_psb_private *)data;
++ struct drm_device * dev = (struct drm_device *)dev_priv->dev;
++ struct timer_list * dsr_timer = &dev_priv->dsr_timer;
++ unsigned long irq_flags;
++
++// PSB_DEBUG_ENTRY("dsr_idle_count = %d\n", dev_priv->dsr_idle_count);
++
++ if (dev_priv->dsr_fb_update)
++ {
++ if ((dev_priv->dbi_panel_on) || (dev_priv->dbi_panel_on2))
++ {
++ dev_priv->dsr_fb_update_done = false;
++ mdfld_dbi_update_fb (dev);
++
++ if (dev_priv->b_dsr_enable && dev_priv->dsr_fb_update_done)
++ dev_priv->dsr_fb_update &= ~(MDFLD_DSR_2D_3D | MDFLD_DSR_CURSOR | MDFLD_DSR_OVERLAY);
++ }
++
++ dev_priv->dsr_idle_count = 0;
++ }
++ else
++ {
++ dev_priv->dsr_idle_count++;
++ }
++
++ if (dev_priv->dsr_idle_count > 1)
++ {
++ mdfld_dbi_enter_dsr (dev);
++ return;
++ }
++
++ spin_lock_irqsave(&dev_priv->dsr_lock, irq_flags);
++ if(!timer_pending(dsr_timer)){
++ dsr_timer->expires = jiffies + MDFLD_DSR_DELAY;
++ add_timer(dsr_timer);
++ }
++ spin_unlock_irqrestore(&dev_priv->dsr_lock, irq_flags);
++}
++#else /* MDFLD_JLIU7_DPU_2 */
++static void mdfld_dsr_timer_func(unsigned long data)
++{
++ struct drm_psb_private * dev_priv = (struct drm_psb_private *)data;
++ struct drm_device * dev = (struct drm_device *)dev_priv->dev;
++ struct timer_list * dsr_timer = &dev_priv->dsr_timer;
++ unsigned long irq_flags;
++
++// PSB_DEBUG_ENTRY("dsr_idle_count = %d\n", dev_priv->dsr_idle_count);
++
++ if (dev_priv->dsr_fb_update)
++ {
++ if ((dev_priv->dbi_panel_on) && (dev_priv->dsr_fb_update & (MDFLD_DSR_2D_3D_0 | MDFLD_DSR_CURSOR_0 | MDFLD_DSR_OVERLAY_0)))
++ {
++ dev_priv->dsr_fb_update_done_0 = false;
++ mdfld_dbi_update_fb (dev, 0);
++
++ if (dev_priv->b_dsr_enable && dev_priv->dsr_fb_update_done_0)
++ dev_priv->dsr_fb_update &= ~(MDFLD_DSR_2D_3D_0 | MDFLD_DSR_CURSOR_0 | MDFLD_DSR_OVERLAY_0);
++ }
++
++ if ((dev_priv->dbi_panel_on2) && (dev_priv->dsr_fb_update & (MDFLD_DSR_2D_3D_2 | MDFLD_DSR_CURSOR_2 | MDFLD_DSR_OVERLAY_2)))
++ {
++ dev_priv->dsr_fb_update_done_2 = false;
++ mdfld_dbi_update_fb (dev, 2);
++
++ if (dev_priv->b_dsr_enable && dev_priv->dsr_fb_update_done_2)
++ dev_priv->dsr_fb_update &= ~(MDFLD_DSR_2D_3D_2 | MDFLD_DSR_CURSOR_2 | MDFLD_DSR_OVERLAY_2);
++ }
++
++ dev_priv->dsr_idle_count = 0;
++ }
++ else
++ {
++ dev_priv->dsr_idle_count++;
++ }
++
++ if (dev_priv->dsr_idle_count > 1)
++ {
++ mdfld_dbi_enter_dsr (dev);
++ return;
++ }
++
++ spin_lock_irqsave(&dev_priv->dsr_lock, irq_flags);
++ if(!timer_pending(dsr_timer)){
++ dsr_timer->expires = jiffies + MDFLD_DSR_DELAY;
++ add_timer(dsr_timer);
++ }
++ spin_unlock_irqrestore(&dev_priv->dsr_lock, irq_flags);
++}
++#endif /* MDFLD_JLIU7_DPU_2 */
++
++void mdfld_dsr_timer_init(struct drm_psb_private *dev_priv)
++{
++ struct timer_list * dsr_timer = &dev_priv->dsr_timer;
++ unsigned long irq_flags;
++
++ PSB_DEBUG_ENTRY("\n");
++
++ spin_lock_init(&dev_priv->dsr_lock);
++ spin_lock_irqsave(&dev_priv->dsr_lock, irq_flags);
++
++ init_timer(dsr_timer);
++
++ dsr_timer->data = (unsigned long)dev_priv;
++ dsr_timer->function = mdfld_dsr_timer_func;
++ dsr_timer->expires = jiffies + MDFLD_DSR_DELAY;
++
++ add_timer(dsr_timer);
++ spin_unlock_irqrestore(&dev_priv->dsr_lock, irq_flags);
++}
++
++void mdfld_dsr_timer_takedown(struct drm_psb_private * dev_priv)
++{
++ del_timer_sync(&dev_priv->dsr_timer);
++}
++
++#endif /*FIXME JLIU */
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_schedule.c
+@@ -0,0 +1,70 @@
++/**************************************************************************
++ * Copyright (c) 2007, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
++ **************************************************************************/
++
++#include <drm/drmP.h>
++#include "psb_drm.h"
++#include "psb_drv.h"
++#include "psb_reg.h"
++#include "ttm/ttm_execbuf_util.h"
++
++
++static void psb_powerdown_topaz(struct work_struct *work)
++{
++ struct psb_scheduler *scheduler =
++ container_of(work, struct psb_scheduler, topaz_suspend_wq.work);
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) scheduler->dev->dev_private;
++
++ if (!dev_priv->topaz_disabled) {
++ if (!mutex_trylock(&scheduler->topaz_power_mutex))
++ return;
++
++ psb_try_power_down_topaz(scheduler->dev);
++ mutex_unlock(&scheduler->topaz_power_mutex);
++ }
++}
++
++static void psb_powerdown_msvdx(struct work_struct *work)
++{
++ struct psb_scheduler *scheduler =
++ container_of(work, struct psb_scheduler, msvdx_suspend_wq.work);
++
++ if (!mutex_trylock(&scheduler->msvdx_power_mutex))
++ return;
++
++ psb_try_power_down_msvdx(scheduler->dev);
++ mutex_unlock(&scheduler->msvdx_power_mutex);
++}
++
++int psb_scheduler_init(struct drm_device *dev,
++ struct psb_scheduler *scheduler)
++{
++ memset(scheduler, 0, sizeof(*scheduler));
++ scheduler->dev = dev;
++ mutex_init(&scheduler->topaz_power_mutex);
++ mutex_init(&scheduler->msvdx_power_mutex);
++
++ INIT_DELAYED_WORK(&scheduler->topaz_suspend_wq,
++ &psb_powerdown_topaz);
++ INIT_DELAYED_WORK(&scheduler->msvdx_suspend_wq,
++ &psb_powerdown_msvdx);
++
++ return 0;
++}
++
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_schedule.h
+@@ -0,0 +1,81 @@
++/**************************************************************************
++ * Copyright (c) 2007, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
++ **************************************************************************/
++#ifndef _PSB_SCHEDULE_H_
++#define _PSB_SCHEDULE_H_
++
++#include <drm/drmP.h>
++
++struct psb_context;
++
++enum psb_task_type {
++ psb_flip_task
++};
++
++struct drm_psb_private;
++
++/*struct psb_scheduler_seq {
++ uint32_t sequence;
++ int reported;
++};*/
++
++struct psb_scheduler {
++ struct drm_device *dev;
++ /*struct psb_scheduler_seq seq[_PSB_ENGINE_TA_FENCE_TYPES];
++ struct psb_hw_scene hs[PSB_NUM_HW_SCENES];
++ struct mutex task_wq_mutex;*/
++ struct mutex topaz_power_mutex;
++ struct mutex msvdx_power_mutex;
++ /*spinlock_t lock;
++ struct list_head hw_scenes;
++ struct list_head ta_queue;
++ struct list_head raster_queue;
++ struct list_head hp_raster_queue;
++ struct list_head task_done_queue;
++ struct psb_task *current_task[PSB_SCENE_NUM_ENGINES];
++ struct psb_task *feedback_task;
++ int ta_state;
++ struct psb_hw_scene *pending_hw_scene;
++ uint32_t pending_hw_scene_seq;
++ struct delayed_work wq*/;
++ struct delayed_work topaz_suspend_wq;
++ struct delayed_work msvdx_suspend_wq;
++ /*struct psb_scene_pool *pool;
++ uint32_t idle_count;
++ int idle;
++ wait_queue_head_t idle_queue;
++ unsigned long ta_end_jiffies;
++ unsigned long total_ta_jiffies;
++ unsigned long raster_end_jiffies;
++ unsigned long total_raster_jiffies;*/
++};
++
++/*#define PSB_RF_FIRE_TA (1 << 0)
++#define PSB_RF_OOM (1 << 1)
++#define PSB_RF_OOM_REPLY (1 << 2)
++#define PSB_RF_TERMINATE (1 << 3)
++#define PSB_RF_TA_DONE (1 << 4)
++#define PSB_RF_FIRE_RASTER (1 << 5)
++#define PSB_RF_RASTER_DONE (1 << 6)
++#define PSB_RF_DEALLOC (1 << 7)
++*/
++
++extern int psb_scheduler_init(struct drm_device *dev,
++ struct psb_scheduler *scheduler);
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_setup.c
+@@ -0,0 +1,36 @@
++/*
++ * Copyright (c) 2009, Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#include <drm/drmP.h>
++#include <drm/drm.h>
++#include <drm/drm_crtc.h>
++#include <drm/drm_edid.h>
++#include "psb_intel_drv.h"
++#include "psb_drv.h"
++#include "psb_intel_reg.h"
++
++/* Fixed name */
++#define ACPI_EDID_LCD "\\_SB_.PCI0.GFX0.DD04._DDC"
++#define ACPI_DOD "\\_SB_.PCI0.GFX0._DOD"
++
++#include "psb_intel_i2c.c"
++#include "psb_intel_sdvo.c"
++#include "psb_intel_modes.c"
++#include "psb_intel_lvds.c"
++#include "psb_intel_dsi.c"
++#include "psb_intel_dsi2.c"
++#include "psb_intel_display.c"
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_sgx.c
+@@ -0,0 +1,936 @@
++/**************************************************************************
++ * Copyright (c) 2007, Intel Corporation.
++ * All Rights Reserved.
++ * Copyright (c) 2008, Tungsten Graphics, Inc. Cedar Park, TX. USA.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++#include <drm/drmP.h>
++#include "psb_drv.h"
++#include "psb_drm.h"
++#include "psb_reg.h"
++#include "psb_msvdx.h"
++#include "lnc_topaz.h"
++#include "pnw_topaz.h"
++#include "ttm/ttm_bo_api.h"
++#include "ttm/ttm_execbuf_util.h"
++#include "ttm/ttm_userobj_api.h"
++#include "ttm/ttm_placement_common.h"
++#include "psb_sgx.h"
++#include "psb_intel_reg.h"
++#include "psb_powermgmt.h"
++
++
++static inline int psb_same_page(unsigned long offset,
++ unsigned long offset2)
++{
++ return (offset & PAGE_MASK) == (offset2 & PAGE_MASK);
++}
++
++static inline unsigned long psb_offset_end(unsigned long offset,
++ unsigned long end)
++{
++ offset = (offset + PAGE_SIZE) & PAGE_MASK;
++ return (end < offset) ? end : offset;
++}
++
++static void psb_idle_engine(struct drm_device *dev, int engine);
++
++struct psb_dstbuf_cache {
++ unsigned int dst;
++ struct ttm_buffer_object *dst_buf;
++ unsigned long dst_offset;
++ uint32_t *dst_page;
++ unsigned int dst_page_offset;
++ struct ttm_bo_kmap_obj dst_kmap;
++ bool dst_is_iomem;
++};
++
++struct psb_validate_buffer {
++ struct ttm_validate_buffer base;
++ struct psb_validate_req req;
++ int ret;
++ struct psb_validate_arg __user *user_val_arg;
++ uint32_t flags;
++ uint32_t offset;
++ int po_correct;
++};
++
++static int psb_check_presumed(struct psb_validate_req *req,
++ struct ttm_buffer_object *bo,
++ struct psb_validate_arg __user *data,
++ int *presumed_ok)
++{
++ struct psb_validate_req __user *user_req = &(data->d.req);
++
++ *presumed_ok = 0;
++
++ if (bo->mem.mem_type == TTM_PL_SYSTEM) {
++ *presumed_ok = 1;
++ return 0;
++ }
++
++ if (unlikely(!(req->presumed_flags & PSB_USE_PRESUMED)))
++ return 0;
++
++ if (bo->offset == req->presumed_gpu_offset) {
++ *presumed_ok = 1;
++ return 0;
++ }
++
++ return __put_user(req->presumed_flags & ~PSB_USE_PRESUMED,
++ &user_req->presumed_flags);
++}
++
++
++static void psb_unreference_buffers(struct psb_context *context)
++{
++ struct ttm_validate_buffer *entry, *next;
++ struct psb_validate_buffer *vbuf;
++ struct list_head *list = &context->validate_list;
++
++ list_for_each_entry_safe(entry, next, list, head) {
++ vbuf =
++ container_of(entry, struct psb_validate_buffer, base);
++ list_del(&entry->head);
++ ttm_bo_unref(&entry->bo);
++ }
++
++ list = &context->kern_validate_list;
++
++ list_for_each_entry_safe(entry, next, list, head) {
++ vbuf =
++ container_of(entry, struct psb_validate_buffer, base);
++ list_del(&entry->head);
++ ttm_bo_unref(&entry->bo);
++ }
++}
++
++
++static int psb_lookup_validate_buffer(struct drm_file *file_priv,
++ uint64_t data,
++ struct psb_validate_buffer *item)
++{
++ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
++
++ item->user_val_arg =
++ (struct psb_validate_arg __user *) (unsigned long) data;
++
++ if (unlikely(copy_from_user(&item->req, &item->user_val_arg->d.req,
++ sizeof(item->req)) != 0)) {
++ DRM_ERROR("Lookup copy fault.\n");
++ return -EFAULT;
++ }
++
++ item->base.bo =
++ ttm_buffer_object_lookup(tfile, item->req.buffer_handle);
++
++ if (unlikely(item->base.bo == NULL)) {
++ DRM_ERROR("Bo lookup fault.\n");
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static int psb_reference_buffers(struct drm_file *file_priv,
++ uint64_t data,
++ struct psb_context *context)
++{
++ struct psb_validate_buffer *item;
++ int ret;
++
++ while (likely(data != 0)) {
++ if (unlikely(context->used_buffers >=
++ PSB_NUM_VALIDATE_BUFFERS)) {
++ DRM_ERROR("Too many buffers "
++ "on validate list.\n");
++ ret = -EINVAL;
++ goto out_err0;
++ }
++
++ item = &context->buffers[context->used_buffers];
++
++ ret = psb_lookup_validate_buffer(file_priv, data, item);
++ if (unlikely(ret != 0))
++ goto out_err0;
++
++ item->base.reserved = 0;
++ list_add_tail(&item->base.head, &context->validate_list);
++ context->used_buffers++;
++ data = item->req.next;
++ }
++ return 0;
++
++out_err0:
++ psb_unreference_buffers(context);
++ return ret;
++}
++
++static int
++psb_placement_fence_type(struct ttm_buffer_object *bo,
++ uint64_t set_val_flags,
++ uint64_t clr_val_flags,
++ uint32_t new_fence_class,
++ uint32_t *new_fence_type)
++{
++ int ret;
++ uint32_t n_fence_type;
++ uint32_t set_flags = set_val_flags & 0xFFFFFFFF;
++ uint32_t clr_flags = clr_val_flags & 0xFFFFFFFF;
++ struct ttm_fence_object *old_fence;
++ uint32_t old_fence_type;
++
++ if (unlikely
++ (!(set_val_flags &
++ (PSB_GPU_ACCESS_READ | PSB_GPU_ACCESS_WRITE)))) {
++ DRM_ERROR
++ ("GPU access type (read / write) is not indicated.\n");
++ return -EINVAL;
++ }
++
++ ret = ttm_bo_check_placement(bo, set_flags, clr_flags);
++ if (unlikely(ret != 0))
++ return ret;
++
++ switch (new_fence_class) {
++ default:
++ n_fence_type = _PSB_FENCE_TYPE_EXE;
++ }
++
++ *new_fence_type = n_fence_type;
++ old_fence = (struct ttm_fence_object *) bo->sync_obj;
++ old_fence_type = (uint32_t) (unsigned long) bo->sync_obj_arg;
++
++ if (old_fence && ((new_fence_class != old_fence->fence_class) ||
++ ((n_fence_type ^ old_fence_type) &
++ old_fence_type))) {
++ ret = ttm_bo_wait(bo, 0, 1, 0);
++ if (unlikely(ret != 0))
++ return ret;
++ }
++
++ bo->proposed_flags = (bo->proposed_flags | set_flags)
++ & ~clr_flags & TTM_PL_MASK_MEMTYPE;
++
++ return 0;
++}
++
++int psb_validate_kernel_buffer(struct psb_context *context,
++ struct ttm_buffer_object *bo,
++ uint32_t fence_class,
++ uint64_t set_flags, uint64_t clr_flags)
++{
++ struct psb_validate_buffer *item;
++ uint32_t cur_fence_type;
++ int ret;
++
++ if (unlikely(context->used_buffers >= PSB_NUM_VALIDATE_BUFFERS)) {
++ DRM_ERROR("Out of free validation buffer entries for "
++ "kernel buffer validation.\n");
++ return -ENOMEM;
++ }
++
++ item = &context->buffers[context->used_buffers];
++ item->user_val_arg = NULL;
++ item->base.reserved = 0;
++
++ ret = ttm_bo_reserve(bo, 1, 0, 1, context->val_seq);
++ if (unlikely(ret != 0))
++ goto out_unlock;
++
++ mutex_lock(&bo->mutex);
++ ret = psb_placement_fence_type(bo, set_flags, clr_flags, fence_class,
++ &cur_fence_type);
++ if (unlikely(ret != 0)) {
++ ttm_bo_unreserve(bo);
++ goto out_unlock;
++ }
++
++ item->base.bo = ttm_bo_reference(bo);
++ item->base.new_sync_obj_arg = (void *) (unsigned long) cur_fence_type;
++ item->base.reserved = 1;
++
++ list_add_tail(&item->base.head, &context->kern_validate_list);
++ context->used_buffers++;
++
++ ret = ttm_buffer_object_validate(bo, 1, 0);
++ if (unlikely(ret != 0))
++ goto out_unlock;
++
++ item->offset = bo->offset;
++ item->flags = bo->mem.flags;
++ context->fence_types |= cur_fence_type;
++
++out_unlock:
++ mutex_unlock(&bo->mutex);
++ return ret;
++}
++
++
++static int psb_validate_buffer_list(struct drm_file *file_priv,
++ uint32_t fence_class,
++ struct psb_context *context,
++ int *po_correct)
++{
++ struct psb_validate_buffer *item;
++ struct ttm_buffer_object *bo;
++ int ret;
++ struct psb_validate_req *req;
++ uint32_t fence_types = 0;
++ uint32_t cur_fence_type;
++ struct ttm_validate_buffer *entry;
++ struct list_head *list = &context->validate_list;
++
++ *po_correct = 1;
++
++ list_for_each_entry(entry, list, head) {
++ item =
++ container_of(entry, struct psb_validate_buffer, base);
++ bo = entry->bo;
++ item->ret = 0;
++ req = &item->req;
++
++ mutex_lock(&bo->mutex);
++ ret = psb_placement_fence_type(bo,
++ req->set_flags,
++ req->clear_flags,
++ fence_class,
++ &cur_fence_type);
++ if (unlikely(ret != 0))
++ goto out_err;
++
++ ret = ttm_buffer_object_validate(bo, 1, 0);
++
++ if (unlikely(ret != 0))
++ goto out_err;
++
++ fence_types |= cur_fence_type;
++ entry->new_sync_obj_arg = (void *)
++ (unsigned long) cur_fence_type;
++
++ item->offset = bo->offset;
++ item->flags = bo->mem.flags;
++ mutex_unlock(&bo->mutex);
++
++ ret =
++ psb_check_presumed(&item->req, bo, item->user_val_arg,
++ &item->po_correct);
++ if (unlikely(ret != 0))
++ goto out_err;
++
++ if (unlikely(!item->po_correct))
++ *po_correct = 0;
++
++ item++;
++ }
++
++ context->fence_types |= fence_types;
++
++ return 0;
++out_err:
++ mutex_unlock(&bo->mutex);
++ item->ret = ret;
++ return ret;
++}
++
++static void psb_clear_dstbuf_cache(struct psb_dstbuf_cache *dst_cache)
++{
++ if (dst_cache->dst_page) {
++ ttm_bo_kunmap(&dst_cache->dst_kmap);
++ dst_cache->dst_page = NULL;
++ }
++ dst_cache->dst_buf = NULL;
++ dst_cache->dst = ~0;
++}
++
++static int psb_update_dstbuf_cache(struct psb_dstbuf_cache *dst_cache,
++ struct psb_validate_buffer *buffers,
++ unsigned int dst,
++ unsigned long dst_offset)
++{
++ int ret;
++
++ PSB_DEBUG_GENERAL("Destination buffer is %d.\n", dst);
++
++ if (unlikely(dst != dst_cache->dst || NULL == dst_cache->dst_buf)) {
++ psb_clear_dstbuf_cache(dst_cache);
++ dst_cache->dst = dst;
++ dst_cache->dst_buf = buffers[dst].base.bo;
++ }
++
++ if (unlikely
++ (dst_offset > dst_cache->dst_buf->num_pages * PAGE_SIZE)) {
++ DRM_ERROR("Relocation destination out of bounds.\n");
++ return -EINVAL;
++ }
++
++ if (!psb_same_page(dst_cache->dst_offset, dst_offset) ||
++ NULL == dst_cache->dst_page) {
++ if (NULL != dst_cache->dst_page) {
++ ttm_bo_kunmap(&dst_cache->dst_kmap);
++ dst_cache->dst_page = NULL;
++ }
++
++ ret =
++ ttm_bo_kmap(dst_cache->dst_buf,
++ dst_offset >> PAGE_SHIFT, 1,
++ &dst_cache->dst_kmap);
++ if (ret) {
++ DRM_ERROR("Could not map destination buffer for "
++ "relocation.\n");
++ return ret;
++ }
++
++ dst_cache->dst_page =
++ ttm_kmap_obj_virtual(&dst_cache->dst_kmap,
++ &dst_cache->dst_is_iomem);
++ dst_cache->dst_offset = dst_offset & PAGE_MASK;
++ dst_cache->dst_page_offset = dst_cache->dst_offset >> 2;
++ }
++ return 0;
++}
++
++static int psb_apply_reloc(struct drm_psb_private *dev_priv,
++ uint32_t fence_class,
++ const struct drm_psb_reloc *reloc,
++ struct psb_validate_buffer *buffers,
++ int num_buffers,
++ struct psb_dstbuf_cache *dst_cache,
++ int no_wait, int interruptible)
++{
++ uint32_t val;
++ uint32_t background;
++ unsigned int index;
++ int ret;
++ unsigned int shift;
++ unsigned int align_shift;
++ struct ttm_buffer_object *reloc_bo;
++
++
++ PSB_DEBUG_GENERAL("Reloc type %d\n"
++ "\t where 0x%04x\n"
++ "\t buffer 0x%04x\n"
++ "\t mask 0x%08x\n"
++ "\t shift 0x%08x\n"
++ "\t pre_add 0x%08x\n"
++ "\t background 0x%08x\n"
++ "\t dst_buffer 0x%08x\n"
++ "\t arg0 0x%08x\n"
++ "\t arg1 0x%08x\n",
++ reloc->reloc_op,
++ reloc->where,
++ reloc->buffer,
++ reloc->mask,
++ reloc->shift,
++ reloc->pre_add,
++ reloc->background,
++ reloc->dst_buffer, reloc->arg0, reloc->arg1);
++
++ if (unlikely(reloc->buffer >= num_buffers)) {
++ DRM_ERROR("Illegal relocation buffer %d.\n",
++ reloc->buffer);
++ return -EINVAL;
++ }
++
++ if (buffers[reloc->buffer].po_correct)
++ return 0;
++
++ if (unlikely(reloc->dst_buffer >= num_buffers)) {
++ DRM_ERROR
++ ("Illegal destination buffer for relocation %d.\n",
++ reloc->dst_buffer);
++ return -EINVAL;
++ }
++
++ ret =
++ psb_update_dstbuf_cache(dst_cache, buffers, reloc->dst_buffer,
++ reloc->where << 2);
++ if (ret)
++ return ret;
++
++ reloc_bo = buffers[reloc->buffer].base.bo;
++
++ if (unlikely(reloc->pre_add > (reloc_bo->num_pages << PAGE_SHIFT))) {
++ DRM_ERROR("Illegal relocation offset add.\n");
++ return -EINVAL;
++ }
++
++ switch (reloc->reloc_op) {
++ case PSB_RELOC_OP_OFFSET:
++ val = reloc_bo->offset + reloc->pre_add;
++ break;
++ default:
++ DRM_ERROR("Unimplemented relocation.\n");
++ return -EINVAL;
++ }
++
++ shift =
++ (reloc->shift & PSB_RELOC_SHIFT_MASK) >> PSB_RELOC_SHIFT_SHIFT;
++ align_shift =
++ (reloc->
++ shift & PSB_RELOC_ALSHIFT_MASK) >> PSB_RELOC_ALSHIFT_SHIFT;
++
++ val = ((val >> align_shift) << shift);
++ index = reloc->where - dst_cache->dst_page_offset;
++
++ background = reloc->background;
++ val = (background & ~reloc->mask) | (val & reloc->mask);
++ dst_cache->dst_page[index] = val;
++
++ PSB_DEBUG_GENERAL("Reloc buffer %d index 0x%08x, value 0x%08x\n",
++ reloc->dst_buffer, index,
++ dst_cache->dst_page[index]);
++
++ return 0;
++}
++
++static int psb_ok_to_map_reloc(struct drm_psb_private *dev_priv,
++ unsigned int num_pages)
++{
++ int ret = 0;
++
++ spin_lock(&dev_priv->reloc_lock);
++ if (dev_priv->rel_mapped_pages + num_pages <= PSB_MAX_RELOC_PAGES) {
++ dev_priv->rel_mapped_pages += num_pages;
++ ret = 1;
++ }
++ spin_unlock(&dev_priv->reloc_lock);
++ return ret;
++}
++
++static int psb_fixup_relocs(struct drm_file *file_priv,
++ uint32_t fence_class,
++ unsigned int num_relocs,
++ unsigned int reloc_offset,
++ uint32_t reloc_handle,
++ struct psb_context *context,
++ int no_wait, int interruptible)
++{
++ struct drm_device *dev = file_priv->minor->dev;
++ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ struct ttm_buffer_object *reloc_buffer = NULL;
++ unsigned int reloc_num_pages;
++ unsigned int reloc_first_page;
++ unsigned int reloc_last_page;
++ struct psb_dstbuf_cache dst_cache;
++ struct drm_psb_reloc *reloc;
++ struct ttm_bo_kmap_obj reloc_kmap;
++ bool reloc_is_iomem;
++ int count;
++ int ret = 0;
++ int registered = 0;
++ uint32_t num_buffers = context->used_buffers;
++
++ if (num_relocs == 0)
++ return 0;
++
++ memset(&dst_cache, 0, sizeof(dst_cache));
++ memset(&reloc_kmap, 0, sizeof(reloc_kmap));
++
++ reloc_buffer = ttm_buffer_object_lookup(tfile, reloc_handle);
++ if (!reloc_buffer)
++ goto out;
++
++ if (unlikely(atomic_read(&reloc_buffer->reserved) != 1)) {
++ DRM_ERROR("Relocation buffer was not on validate list.\n");
++ ret = -EINVAL;
++ goto out;
++ }
++
++ reloc_first_page = reloc_offset >> PAGE_SHIFT;
++ reloc_last_page =
++ (reloc_offset +
++ num_relocs * sizeof(struct drm_psb_reloc)) >> PAGE_SHIFT;
++ reloc_num_pages = reloc_last_page - reloc_first_page + 1;
++ reloc_offset &= ~PAGE_MASK;
++
++ if (reloc_num_pages > PSB_MAX_RELOC_PAGES) {
++ DRM_ERROR("Relocation buffer is too large\n");
++ ret = -EINVAL;
++ goto out;
++ }
++
++ DRM_WAIT_ON(ret, dev_priv->rel_mapped_queue, 3 * DRM_HZ,
++ (registered =
++ psb_ok_to_map_reloc(dev_priv, reloc_num_pages)));
++
++ if (ret == -EINTR) {
++ ret = -ERESTART;
++ goto out;
++ }
++ if (ret) {
++ DRM_ERROR("Error waiting for space to map "
++ "relocation buffer.\n");
++ goto out;
++ }
++
++ ret = ttm_bo_kmap(reloc_buffer, reloc_first_page,
++ reloc_num_pages, &reloc_kmap);
++
++ if (ret) {
++ DRM_ERROR("Could not map relocation buffer.\n"
++ "\tReloc buffer id 0x%08x.\n"
++ "\tReloc first page %d.\n"
++ "\tReloc num pages %d.\n",
++ reloc_handle, reloc_first_page, reloc_num_pages);
++ goto out;
++ }
++
++ reloc = (struct drm_psb_reloc *)
++ ((unsigned long)
++ ttm_kmap_obj_virtual(&reloc_kmap,
++ &reloc_is_iomem) + reloc_offset);
++
++ for (count = 0; count < num_relocs; ++count) {
++ ret = psb_apply_reloc(dev_priv, fence_class,
++ reloc, context->buffers,
++ num_buffers, &dst_cache,
++ no_wait, interruptible);
++ if (ret)
++ goto out1;
++ reloc++;
++ }
++
++out1:
++ ttm_bo_kunmap(&reloc_kmap);
++out:
++ if (registered) {
++ spin_lock(&dev_priv->reloc_lock);
++ dev_priv->rel_mapped_pages -= reloc_num_pages;
++ spin_unlock(&dev_priv->reloc_lock);
++ DRM_WAKEUP(&dev_priv->rel_mapped_queue);
++ }
++
++ psb_clear_dstbuf_cache(&dst_cache);
++ if (reloc_buffer)
++ ttm_bo_unref(&reloc_buffer);
++ return ret;
++}
++
++void psb_fence_or_sync(struct drm_file *file_priv,
++ uint32_t engine,
++ uint32_t fence_types,
++ uint32_t fence_flags,
++ struct list_head *list,
++ struct psb_ttm_fence_rep *fence_arg,
++ struct ttm_fence_object **fence_p)
++{
++ struct drm_device *dev = file_priv->minor->dev;
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ struct ttm_fence_device *fdev = &dev_priv->fdev;
++ int ret;
++ struct ttm_fence_object *fence;
++ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
++ uint32_t handle;
++
++ ret = ttm_fence_user_create(fdev, tfile,
++ engine, fence_types,
++ TTM_FENCE_FLAG_EMIT, &fence, &handle);
++ if (ret) {
++
++ /*
++ * Fence creation failed.
++ * Fall back to synchronous operation and idle the engine.
++ */
++
++ psb_idle_engine(dev, engine);
++ if (!(fence_flags & DRM_PSB_FENCE_NO_USER)) {
++
++ /*
++ * Communicate to user-space that
++ * fence creation has failed and that
++ * the engine is idle.
++ */
++
++ fence_arg->handle = ~0;
++ fence_arg->error = ret;
++ }
++
++ ttm_eu_backoff_reservation(list);
++ if (fence_p)
++ *fence_p = NULL;
++ return;
++ }
++
++ ttm_eu_fence_buffer_objects(list, fence);
++ if (!(fence_flags & DRM_PSB_FENCE_NO_USER)) {
++ struct ttm_fence_info info = ttm_fence_get_info(fence);
++ fence_arg->handle = handle;
++ fence_arg->fence_class = ttm_fence_class(fence);
++ fence_arg->fence_type = ttm_fence_types(fence);
++ fence_arg->signaled_types = info.signaled_types;
++ fence_arg->error = 0;
++ } else {
++ ret =
++ ttm_ref_object_base_unref(tfile, handle,
++ ttm_fence_type);
++ BUG_ON(ret);
++ }
++
++ if (fence_p)
++ *fence_p = fence;
++ else if (fence)
++ ttm_fence_object_unref(&fence);
++}
++
++
++#if 0
++static int psb_dump_page(struct ttm_buffer_object *bo,
++ unsigned int page_offset, unsigned int num)
++{
++ struct ttm_bo_kmap_obj kmobj;
++ int is_iomem;
++ uint32_t *p;
++ int ret;
++ unsigned int i;
++
++ ret = ttm_bo_kmap(bo, page_offset, 1, &kmobj);
++ if (ret)
++ return ret;
++
++ p = ttm_kmap_obj_virtual(&kmobj, &is_iomem);
++ for (i = 0; i < num; ++i)
++ PSB_DEBUG_GENERAL("0x%04x: 0x%08x\n", i, *p++);
++
++ ttm_bo_kunmap(&kmobj);
++ return 0;
++}
++#endif
++
++static void psb_idle_engine(struct drm_device *dev, int engine)
++{
++ /*Fix me add video engile support*/
++ return;
++}
++
++static int psb_handle_copyback(struct drm_device *dev,
++ struct psb_context *context,
++ int ret)
++{
++ int err = ret;
++ struct ttm_validate_buffer *entry;
++ struct psb_validate_arg arg;
++ struct list_head *list = &context->validate_list;
++
++ if (ret) {
++ ttm_eu_backoff_reservation(list);
++ ttm_eu_backoff_reservation(&context->kern_validate_list);
++ }
++
++
++ if (ret != -EAGAIN && ret != -EINTR && ret != -ERESTART) {
++ list_for_each_entry(entry, list, head) {
++ struct psb_validate_buffer *vbuf =
++ container_of(entry, struct psb_validate_buffer,
++ base);
++ arg.handled = 1;
++ arg.ret = vbuf->ret;
++ if (!arg.ret) {
++ struct ttm_buffer_object *bo = entry->bo;
++ mutex_lock(&bo->mutex);
++ arg.d.rep.gpu_offset = bo->offset;
++ arg.d.rep.placement = bo->mem.flags;
++ arg.d.rep.fence_type_mask =
++ (uint32_t) (unsigned long)
++ entry->new_sync_obj_arg;
++ mutex_unlock(&bo->mutex);
++ }
++
++ if (__copy_to_user(vbuf->user_val_arg,
++ &arg, sizeof(arg)))
++ err = -EFAULT;
++
++ if (arg.ret)
++ break;
++ }
++ }
++
++ return err;
++}
++
++int psb_cmdbuf_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_cmdbuf_arg *arg = data;
++ int ret = 0;
++ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
++ struct ttm_buffer_object *cmd_buffer = NULL;
++ struct psb_ttm_fence_rep fence_arg;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)file_priv->minor->dev->dev_private;
++ int engine;
++ int po_correct;
++ struct psb_context *context;
++ unsigned num_buffers;
++
++ num_buffers = PSB_NUM_VALIDATE_BUFFERS;
++
++ ret = ttm_read_lock(&dev_priv->ttm_lock, true);
++ if (unlikely(ret != 0))
++ return ret;
++
++ if (arg->engine == PSB_ENGINE_VIDEO) {
++ if (!ospm_power_using_hw_begin(OSPM_VIDEO_DEC_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON))
++ return -EBUSY;
++ } else if (arg->engine == LNC_ENGINE_ENCODE) {
++ if (dev_priv->topaz_disabled)
++ return -ENODEV;
++
++ if (!ospm_power_using_hw_begin(OSPM_VIDEO_ENC_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON))
++ return -EBUSY;
++ }
++
++
++ ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
++ if (unlikely(ret != 0))
++ goto out_err0;
++
++
++ context = &dev_priv->context;
++ context->used_buffers = 0;
++ context->fence_types = 0;
++ BUG_ON(!list_empty(&context->validate_list));
++ BUG_ON(!list_empty(&context->kern_validate_list));
++
++ if (unlikely(context->buffers == NULL)) {
++ context->buffers = vmalloc(PSB_NUM_VALIDATE_BUFFERS *
++ sizeof(*context->buffers));
++ if (unlikely(context->buffers == NULL)) {
++ ret = -ENOMEM;
++ goto out_err1;
++ }
++ }
++
++ ret = psb_reference_buffers(file_priv,
++ arg->buffer_list,
++ context);
++
++ if (unlikely(ret != 0))
++ goto out_err1;
++
++ context->val_seq = atomic_add_return(1, &dev_priv->val_seq);
++
++ ret = ttm_eu_reserve_buffers(&context->validate_list,
++ context->val_seq);
++ if (unlikely(ret != 0))
++ goto out_err2;
++
++ engine = arg->engine;
++ ret = psb_validate_buffer_list(file_priv, engine,
++ context, &po_correct);
++ if (unlikely(ret != 0))
++ goto out_err3;
++
++ if (!po_correct) {
++ ret = psb_fixup_relocs(file_priv, engine, arg->num_relocs,
++ arg->reloc_offset,
++ arg->reloc_handle, context, 0, 1);
++ if (unlikely(ret != 0))
++ goto out_err3;
++
++ }
++
++ cmd_buffer = ttm_buffer_object_lookup(tfile, arg->cmdbuf_handle);
++ if (unlikely(cmd_buffer == NULL)) {
++ ret = -EINVAL;
++ goto out_err4;
++ }
++
++ switch (arg->engine) {
++ case PSB_ENGINE_VIDEO:
++ if (arg->cmdbuf_size == (16 + 32)) {
++ /* Identify deblock msg cmdbuf */
++ /* according to cmdbuf_size */
++ struct ttm_bo_kmap_obj cmd_kmap;
++ struct ttm_buffer_object *deblock;
++ uint32_t *cmd;
++ bool is_iomem;
++
++ /* write regIO BO's address after deblcok msg */
++ ret = ttm_bo_kmap(cmd_buffer, 0, 1, &cmd_kmap);
++ if (unlikely(ret != 0))
++ goto out_err4;
++ cmd = (uint32_t *)(ttm_kmap_obj_virtual(&cmd_kmap,
++ &is_iomem) + 16);
++ deblock = ttm_buffer_object_lookup(tfile,
++ (uint32_t)(*cmd));
++ *cmd = (uint32_t)deblock;
++ ttm_bo_kunmap(&cmd_kmap);
++ }
++
++ ret = psb_cmdbuf_video(file_priv, &context->validate_list,
++ context->fence_types, arg,
++ cmd_buffer, &fence_arg);
++
++ if (unlikely(ret != 0))
++ goto out_err4;
++ break;
++ case LNC_ENGINE_ENCODE:
++ if (IS_MRST(dev))
++ ret = lnc_cmdbuf_video(file_priv, &context->validate_list,
++ context->fence_types, arg,
++ cmd_buffer, &fence_arg);
++ else
++ ret = pnw_cmdbuf_video(file_priv, &context->validate_list,
++ context->fence_types, arg,
++ cmd_buffer, &fence_arg);
++
++ if (unlikely(ret != 0))
++ goto out_err4;
++ break;
++
++
++ default:
++ DRM_ERROR
++ ("Unimplemented command submission mechanism (%x).\n",
++ arg->engine);
++ ret = -EINVAL;
++ goto out_err4;
++ }
++
++ if (!(arg->fence_flags & DRM_PSB_FENCE_NO_USER)) {
++ ret = copy_to_user((void __user *)
++ ((unsigned long) arg->fence_arg),
++ &fence_arg, sizeof(fence_arg));
++ }
++
++out_err4:
++ if (cmd_buffer)
++ ttm_bo_unref(&cmd_buffer);
++out_err3:
++ ret = psb_handle_copyback(dev, context, ret);
++out_err2:
++ psb_unreference_buffers(context);
++out_err1:
++ mutex_unlock(&dev_priv->cmdbuf_mutex);
++out_err0:
++ ttm_read_unlock(&dev_priv->ttm_lock);
++
++ if (arg->engine == PSB_ENGINE_VIDEO)
++ ospm_power_using_hw_end(OSPM_VIDEO_DEC_ISLAND);
++
++ if (arg->engine == LNC_ENGINE_ENCODE)
++ ospm_power_using_hw_end(OSPM_VIDEO_ENC_ISLAND);
++
++ return ret;
++}
++
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_sgx.h
+@@ -0,0 +1,32 @@
++/*
++ * Copyright (c) 2008, Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * Eric Anholt <eric@anholt.net>
++ *
++ **/
++#ifndef _PSB_SGX_H_
++#define _PSB_SGX_H_
++
++extern int psb_submit_video_cmdbuf(struct drm_device *dev,
++ struct ttm_buffer_object *cmd_buffer,
++ unsigned long cmd_offset,
++ unsigned long cmd_size,
++ struct ttm_fence_object *fence);
++
++extern int drm_idle_check_interval;
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_socket.c
+@@ -0,0 +1,379 @@
++/*
++ * Copyright (c) 2009, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Copyright (C) 2004 Red Hat, Inc. All rights reserved.
++ * Copyright (C) 2004 Novell, Inc. All rights reserved.
++ * Copyright (C) 2004 IBM, Inc. All rights reserved.
++ * Copyright (C) 2009 Intel Corporation. All rights reserved.
++ *
++ * Licensed under the GNU GPL v2.
++ *
++ * Authors:
++ * Robert Love <rml@novell.com>
++ * Kay Sievers <kay.sievers@vrfy.org>
++ * Arjan van de Ven <arjanv@redhat.com>
++ * Greg Kroah-Hartman <greg@kroah.com>
++ *
++ * Notes:
++ * Adapted from existing kobj event socket code to enable
++ * mutlicast usermode communication for gfx driver to mutiple
++ * usermode threads via different socket broadcast groups.
++ * Original kobject uevent code does not allow for different
++ * broadcast groups. Due to the frequency of usermode events
++ * generated by some gfx subsystems it is necessary to open
++ * a new dedicated socket with multicast group support. In
++ * the future it is hoped that this code can be removed
++ * and either a new netlink protocol type added for graphics
++ * or conversely to simply enable group routing to be leveraged
++ * on the existing kobject uevent infrastructure.
++ */
++
++#include <linux/spinlock.h>
++#include <linux/string.h>
++#include <linux/kobject.h>
++#include <linux/module.h>
++#include <linux/socket.h>
++#include <linux/skbuff.h>
++#include <linux/netlink.h>
++#include <net/sock.h>
++#include "psb_umevents.h"
++
++#define NETLINK_PSB_KOBJECT_UEVENT 31
++
++u64 psb_uevent_seqnum;
++char psb_uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH;
++static DEFINE_SPINLOCK(sequence_lock);
++#if defined(CONFIG_NET)
++static struct sock *uevent_sock;
++#endif
++
++/* the strings here must match the enum in include/linux/kobject.h */
++static const char *psb_kobject_actions[] = {
++ [KOBJ_ADD] = "add",
++ [KOBJ_REMOVE] = "remove",
++ [KOBJ_CHANGE] = "change",
++ [KOBJ_MOVE] = "move",
++ [KOBJ_ONLINE] = "online",
++ [KOBJ_OFFLINE] = "offline",
++};
++
++/**
++ * kobject_action_type - translate action string to numeric type
++ *
++ * @buf: buffer containing the action string, newline is ignored
++ * @len: length of buffer
++ * @type: pointer to the location to store the action type
++ *
++ * Returns 0 if the action string was recognized.
++ */
++int psb_kobject_action_type(const char *buf, size_t count,
++ enum kobject_action *type)
++{
++ enum kobject_action action;
++ int ret = -EINVAL;
++
++ if (count && (buf[count-1] == '\n' || buf[count-1] == '\0'))
++ count--;
++
++ if (!count)
++ goto out;
++
++ for (action = 0; action < ARRAY_SIZE(psb_kobject_actions); action++) {
++ if (strncmp(psb_kobject_actions[action], buf, count) != 0)
++ continue;
++ if (psb_kobject_actions[action][count] != '\0')
++ continue;
++ *type = action;
++ ret = 0;
++ break;
++ }
++out:
++ return ret;
++}
++
++/**
++ * psb_kobject_uevent_env - send an uevent with environmental data
++ *
++ * @action: action that is happening
++ * @kobj: struct kobject that the action is happening to
++ * @envp_ext: pointer to environmental data
++ *
++ * Returns 0 if kobject_uevent() is completed with success or the
++ * corresponding error when it fails.
++ */
++int psb_kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
++ char *envp_ext[], int dst_group_id)
++{
++ struct kobj_uevent_env *env;
++ const char *action_string = psb_kobject_actions[action];
++ const char *devpath = NULL;
++ const char *subsystem;
++ struct kobject *top_kobj;
++ struct kset *kset;
++ struct kset_uevent_ops *uevent_ops;
++ u64 seq;
++ int i = 0;
++ int retval = 0;
++
++ pr_debug("kobject: '%s' (%p): %s\n",
++ kobject_name(kobj), kobj, __func__);
++
++ /* search the kset we belong to */
++ top_kobj = kobj;
++ while (!top_kobj->kset && top_kobj->parent)
++ top_kobj = top_kobj->parent;
++
++ if (!top_kobj->kset) {
++ pr_debug("kobject: '%s' (%p): %s: attempted to send uevent "
++ "without kset!\n", kobject_name(kobj), kobj,
++ __func__);
++ return -EINVAL;
++ }
++
++ kset = top_kobj->kset;
++ uevent_ops = (struct kset_uevent_ops *)kset->uevent_ops;
++
++ /* skip the event, if uevent_suppress is set*/
++ if (kobj->uevent_suppress) {
++ pr_debug("kobject: '%s' (%p): %s: uevent_suppress "
++ "caused the event to drop!\n",
++ kobject_name(kobj), kobj, __func__);
++ return 0;
++ }
++ /* skip the event, if the filter returns zero. */
++ if (uevent_ops && uevent_ops->filter)
++ if (!uevent_ops->filter(kset, kobj)) {
++ pr_debug("kobject: '%s' (%p): %s: filter function "
++ "caused the event to drop!\n",
++ kobject_name(kobj), kobj, __func__);
++ return 0;
++ }
++
++ /* originating subsystem */
++ if (uevent_ops && uevent_ops->name)
++ subsystem = uevent_ops->name(kset, kobj);
++ else
++ subsystem = kobject_name(&kset->kobj);
++ if (!subsystem) {
++ pr_debug("kobject: '%s' (%p): %s: unset subsystem caused the "
++ "event to drop!\n", kobject_name(kobj), kobj,
++ __func__);
++ return 0;
++ }
++
++ /* environment buffer */
++ env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
++ if (!env)
++ return -ENOMEM;
++
++ /* complete object path */
++ devpath = kobject_get_path(kobj, GFP_KERNEL);
++ if (!devpath) {
++ retval = -ENOENT;
++ goto exit;
++ }
++
++ /* default keys */
++ retval = add_uevent_var(env, "ACTION=%s", action_string);
++ if (retval)
++ goto exit;
++ retval = add_uevent_var(env, "DEVPATH=%s", devpath);
++ if (retval)
++ goto exit;
++ retval = add_uevent_var(env, "SUBSYSTEM=%s", subsystem);
++ if (retval)
++ goto exit;
++
++ /* keys passed in from the caller */
++ if (envp_ext) {
++ for (i = 0; envp_ext[i]; i++) {
++ retval = add_uevent_var(env, "%s", envp_ext[i]);
++ if (retval)
++ goto exit;
++ }
++ }
++
++ /* let the kset specific function add its stuff */
++ if (uevent_ops && uevent_ops->uevent) {
++ retval = uevent_ops->uevent(kset, kobj, env);
++ if (retval) {
++ pr_debug("kobject: '%s' (%p): %s: uevent() returned "
++ "%d\n", kobject_name(kobj), kobj,
++ __func__, retval);
++ goto exit;
++ }
++ }
++
++ /*
++ * Mark "add" and "remove" events in the object to ensure proper
++ * events to userspace during automatic cleanup. If the object did
++ * send an "add" event, "remove" will automatically generated by
++ * the core, if not already done by the caller.
++ */
++ if (action == KOBJ_ADD)
++ kobj->state_add_uevent_sent = 1;
++ else if (action == KOBJ_REMOVE)
++ kobj->state_remove_uevent_sent = 1;
++
++ /* we will send an event, so request a new sequence number */
++ spin_lock(&sequence_lock);
++ seq = ++psb_uevent_seqnum;
++ spin_unlock(&sequence_lock);
++ retval = add_uevent_var(env, "SEQNUM=%llu", (unsigned long long)seq);
++ if (retval)
++ goto exit;
++
++#if defined(CONFIG_NET)
++ /* send netlink message */
++ if (uevent_sock) {
++ struct sk_buff *skb;
++ size_t len;
++
++ /* allocate message with the maximum possible size */
++ len = strlen(action_string) + strlen(devpath) + 2;
++ skb = alloc_skb(len + env->buflen, GFP_KERNEL);
++ if (skb) {
++ char *scratch;
++
++ /* add header */
++ scratch = skb_put(skb, len);
++ sprintf(scratch, "%s@%s", action_string, devpath);
++
++ /* copy keys to our continuous event payload buffer */
++ for (i = 0; i < env->envp_idx; i++) {
++ len = strlen(env->envp[i]) + 1;
++ scratch = skb_put(skb, len);
++ strcpy(scratch, env->envp[i]);
++ }
++
++ NETLINK_CB(skb).dst_group = dst_group_id;
++ retval = netlink_broadcast(uevent_sock, skb, 0,
++ dst_group_id,
++ GFP_KERNEL);
++
++ /* ENOBUFS should be handled in userspace */
++ if (retval == -ENOBUFS)
++ retval = 0;
++ } else
++ retval = -ENOMEM;
++ }
++#endif
++
++ /* call psb_uevent_helper, usually only enabled during early boot */
++ if (psb_uevent_helper[0]) {
++ char *argv[3];
++
++ argv[0] = psb_uevent_helper;
++ argv[1] = (char *)subsystem;
++ argv[2] = NULL;
++ retval = add_uevent_var(env, "HOME=/");
++ if (retval)
++ goto exit;
++ retval = add_uevent_var(env,
++ "PATH=/sbin:/bin:/usr/sbin:/usr/bin");
++ if (retval)
++ goto exit;
++
++ retval = call_usermodehelper(argv[0], argv,
++ env->envp, UMH_WAIT_EXEC);
++ }
++
++exit:
++ kfree(devpath);
++ kfree(env);
++ return retval;
++}
++/*EXPORT_SYMBOL_GPL(psb_kobject_uevent_env); */
++
++/**
++ * psb_kobject_uevent - notify userspace by ending an uevent
++ *
++ * @action: action that is happening
++ * @kobj: struct kobject that the action is happening to
++ *
++ * Returns 0 if psb_kobject_uevent() is completed with success or the
++ * corresponding error when it fails.
++ */
++int psb_kobject_uevent(struct kobject *kobj, enum kobject_action action,
++ int dst_group_id)
++{
++ return psb_kobject_uevent_env(kobj, action, NULL, dst_group_id);
++}
++/*EXPORT_SYMBOL_GPL(psb_kobject_uevent); */
++
++/**
++ * psb_add_uevent_var - add key value string to the environment buffer
++ * @env: environment buffer structure
++ * @format: printf format for the key=value pair
++ *
++ * Returns 0 if environment variable was added successfully or -ENOMEM
++ * if no space was available.
++ */
++int psb_add_uevent_var(struct kobj_uevent_env *env, const char *format, ...)
++{
++ va_list args;
++ int len;
++
++ if (env->envp_idx >= ARRAY_SIZE(env->envp)) {
++ WARN(1, KERN_ERR "psb_add_uevent_var: too many keys\n");
++ return -ENOMEM;
++ }
++
++ va_start(args, format);
++ len = vsnprintf(&env->buf[env->buflen],
++ sizeof(env->buf) - env->buflen,
++ format, args);
++ va_end(args);
++
++ if (len >= (sizeof(env->buf) - env->buflen)) {
++ WARN(1,
++ KERN_ERR "psb_add_uevent_var: buffer size too small\n");
++ return -ENOMEM;
++ }
++
++ env->envp[env->envp_idx++] = &env->buf[env->buflen];
++ env->buflen += len + 1;
++ return 0;
++}
++/*EXPORT_SYMBOL_GPL(psb_add_uevent_var);*/
++
++#if defined(CONFIG_NET)
++int __init psb_kobject_uevent_init(void)
++{
++ /* This should be the 15, but 3 seems to work better. Why? WHY!? */
++ /* uevent_sock = netlink_kernel_create(&init_net,
++ NETLINK_PSB_KOBJECT_UEVENT,
++ DRM_GFX_SOCKET_GROUPS,
++ NULL, NULL, THIS_MODULE); */
++ uevent_sock = netlink_kernel_create(&init_net,
++ NETLINK_PSB_KOBJECT_UEVENT,
++ 0x1, /* 3 is for hotplug & dpst */
++ NULL, NULL, THIS_MODULE);
++
++ if (!uevent_sock) {
++ printk(KERN_ERR "psb_kobject_uevent: failed create socket!\n");
++ return -ENODEV;
++ }
++ netlink_set_nonroot(NETLINK_PSB_KOBJECT_UEVENT, NL_NONROOT_RECV);
++
++ return 0;
++}
++
++#ifndef MODULE
++postcore_initcall(psb_kobject_uevent_init);
++#endif
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_ttm_glue.c
+@@ -0,0 +1,353 @@
++/**************************************************************************
++ * Copyright (c) 2008, Intel Corporation.
++ * All Rights Reserved.
++ * Copyright (c) 2008, Tungsten Graphics Inc. Cedar Park, TX., USA.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++
++#include <drm/drmP.h>
++#include "psb_drv.h"
++#include "ttm/ttm_userobj_api.h"
++#include <linux/io.h>
++
++/*IMG Headers*/
++#include "private_data.h"
++
++extern int PVRMMap(struct file *pFile, struct vm_area_struct *ps_vma);
++
++static struct vm_operations_struct psb_ttm_vm_ops;
++
++/**
++ * NOTE: driver_private of drm_file is now a PVRSRV_FILE_PRIVATE_DATA struct
++ * pPriv in PVRSRV_FILE_PRIVATE_DATA contains the original psb_fpriv;
++ */
++int psb_open(struct inode *inode, struct file *filp)
++{
++ struct drm_file *file_priv;
++ struct drm_psb_private *dev_priv;
++ struct psb_fpriv *psb_fp;
++ PVRSRV_FILE_PRIVATE_DATA *pvr_file_priv;
++ int ret;
++
++ DRM_DEBUG("\n");
++
++ ret = drm_open(inode, filp);
++ if (unlikely(ret))
++ return ret;
++
++ psb_fp = kzalloc(sizeof(*psb_fp), GFP_KERNEL);
++
++ if (unlikely(psb_fp == NULL))
++ goto out_err0;
++
++ file_priv = (struct drm_file *) filp->private_data;
++ dev_priv = psb_priv(file_priv->minor->dev);
++
++ DRM_DEBUG("is_master %d\n", file_priv->is_master ? 1 : 0);
++
++ psb_fp->tfile = ttm_object_file_init(dev_priv->tdev,
++ PSB_FILE_OBJECT_HASH_ORDER);
++ if (unlikely(psb_fp->tfile == NULL))
++ goto out_err1;
++
++ pvr_file_priv = (PVRSRV_FILE_PRIVATE_DATA *)file_priv->driver_priv;
++ if (!pvr_file_priv) {
++ DRM_ERROR("drm file private is NULL\n");
++ goto out_err1;
++ }
++
++ pvr_file_priv->pPriv = psb_fp;
++
++ if (unlikely(dev_priv->bdev.dev_mapping == NULL))
++ dev_priv->bdev.dev_mapping = dev_priv->dev->dev_mapping;
++
++ return 0;
++
++out_err1:
++ kfree(psb_fp);
++out_err0:
++ (void) drm_release(inode, filp);
++ return ret;
++}
++
++int psb_release(struct inode *inode, struct file *filp)
++{
++ struct drm_file *file_priv;
++ struct psb_fpriv *psb_fp;
++ struct drm_psb_private *dev_priv;
++ int ret;
++ uint32_t ui32_reg_value = 0;
++
++ file_priv = (struct drm_file *) filp->private_data;
++ psb_fp = psb_fpriv(file_priv);
++ dev_priv = psb_priv(file_priv->minor->dev);
++
++ ttm_object_file_release(&psb_fp->tfile);
++ kfree(psb_fp);
++
++ if (IS_MRST(dev_priv->dev))
++ {
++ schedule_delayed_work(&dev_priv->scheduler.topaz_suspend_wq, 10);
++ /* FIXME: workaround for HSD3469585
++ * re-enable DRAM Self Refresh Mode
++ * by setting DUNIT.DPMC0
++ */
++ ui32_reg_value = MSG_READ32(0x1, 0x4);
++ MSG_WRITE32(0x1, 0x4, (ui32_reg_value | (0x1 << 7)));
++ }
++
++ if (IS_MRST(dev_priv->dev))
++ schedule_delayed_work(&dev_priv->scheduler.msvdx_suspend_wq, 10);
++
++ ret = drm_release(inode, filp);
++
++ return ret;
++}
++
++int psb_fence_signaled_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++
++ return ttm_fence_signaled_ioctl(psb_fpriv(file_priv)->tfile, data);
++}
++
++int psb_fence_finish_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ return ttm_fence_finish_ioctl(psb_fpriv(file_priv)->tfile, data);
++}
++
++int psb_fence_unref_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ return ttm_fence_unref_ioctl(psb_fpriv(file_priv)->tfile, data);
++}
++
++int psb_pl_waitidle_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ return ttm_pl_waitidle_ioctl(psb_fpriv(file_priv)->tfile, data);
++}
++
++int psb_pl_setstatus_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ return ttm_pl_setstatus_ioctl(psb_fpriv(file_priv)->tfile,
++ &psb_priv(dev)->ttm_lock, data);
++
++}
++
++int psb_pl_synccpu_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ return ttm_pl_synccpu_ioctl(psb_fpriv(file_priv)->tfile, data);
++}
++
++int psb_pl_unref_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ return ttm_pl_unref_ioctl(psb_fpriv(file_priv)->tfile, data);
++
++}
++
++int psb_pl_reference_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ return ttm_pl_reference_ioctl(psb_fpriv(file_priv)->tfile, data);
++
++}
++
++int psb_pl_create_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++
++ return ttm_pl_create_ioctl(psb_fpriv(file_priv)->tfile,
++ &dev_priv->bdev, &dev_priv->ttm_lock, data);
++
++}
++
++int psb_pl_ub_create_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++
++ return ttm_pl_ub_create_ioctl(psb_fpriv(file_priv)->tfile,
++ &dev_priv->bdev, &dev_priv->ttm_lock, data);
++
++}
++/**
++ * psb_ttm_fault - Wrapper around the ttm fault method.
++ *
++ * @vma: The struct vm_area_struct as in the vm fault() method.
++ * @vmf: The struct vm_fault as in the vm fault() method.
++ *
++ * Since ttm_fault() will reserve buffers while faulting,
++ * we need to take the ttm read lock around it, as this driver
++ * relies on the ttm_lock in write mode to exclude all threads from
++ * reserving and thus validating buffers in aperture- and memory shortage
++ * situations.
++ */
++
++static int psb_ttm_fault(struct vm_area_struct *vma,
++ struct vm_fault *vmf)
++{
++ struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
++ vma->vm_private_data;
++ struct drm_psb_private *dev_priv =
++ container_of(bo->bdev, struct drm_psb_private, bdev);
++ int ret;
++
++ ret = ttm_read_lock(&dev_priv->ttm_lock, true);
++ if (unlikely(ret != 0))
++ return VM_FAULT_NOPAGE;
++
++ ret = dev_priv->ttm_vm_ops->fault(vma, vmf);
++
++ ttm_read_unlock(&dev_priv->ttm_lock);
++ return ret;
++}
++
++/**
++ * if vm_pgoff < DRM_PSB_FILE_PAGE_OFFSET call directly to
++ * PVRMMap
++ */
++int psb_mmap(struct file *filp, struct vm_area_struct *vma)
++{
++ struct drm_file *file_priv;
++ struct drm_psb_private *dev_priv;
++ int ret;
++
++ if (vma->vm_pgoff < DRM_PSB_FILE_PAGE_OFFSET ||
++ vma->vm_pgoff > 2 * DRM_PSB_FILE_PAGE_OFFSET)
++ return PVRMMap(filp, vma);
++
++ file_priv = (struct drm_file *) filp->private_data;
++ dev_priv = psb_priv(file_priv->minor->dev);
++
++ ret = ttm_bo_mmap(filp, vma, &dev_priv->bdev);
++ if (unlikely(ret != 0))
++ return ret;
++
++ if (unlikely(dev_priv->ttm_vm_ops == NULL)) {
++ dev_priv->ttm_vm_ops = (struct vm_operations_struct *)vma->vm_ops;
++ psb_ttm_vm_ops = *vma->vm_ops;
++ psb_ttm_vm_ops.fault = &psb_ttm_fault;
++ }
++
++ vma->vm_ops = &psb_ttm_vm_ops;
++
++ return 0;
++}
++
++ssize_t psb_ttm_write(struct file *filp, const char __user *buf,
++ size_t count, loff_t *f_pos)
++{
++ struct drm_file *file_priv = (struct drm_file *)filp->private_data;
++ struct drm_psb_private *dev_priv = psb_priv(file_priv->minor->dev);
++
++ return ttm_bo_io(&dev_priv->bdev, filp, buf, NULL, count, f_pos, 1);
++}
++
++ssize_t psb_ttm_read(struct file *filp, char __user *buf,
++ size_t count, loff_t *f_pos)
++{
++ struct drm_file *file_priv = (struct drm_file *)filp->private_data;
++ struct drm_psb_private *dev_priv = psb_priv(file_priv->minor->dev);
++
++ return ttm_bo_io(&dev_priv->bdev, filp, NULL, buf, count, f_pos, 1);
++}
++
++int psb_verify_access(struct ttm_buffer_object *bo,
++ struct file *filp)
++{
++ struct drm_file *file_priv = (struct drm_file *)filp->private_data;
++
++ if (capable(CAP_SYS_ADMIN))
++ return 0;
++
++ if (unlikely(!file_priv->authenticated))
++ return -EPERM;
++
++ return ttm_pl_verify_access(bo, psb_fpriv(file_priv)->tfile);
++}
++
++static int psb_ttm_mem_global_init(struct drm_global_reference *ref)
++{
++ return ttm_mem_global_init(ref->object);
++}
++
++static void psb_ttm_mem_global_release(struct drm_global_reference *ref)
++{
++ ttm_mem_global_release(ref->object);
++}
++
++int psb_ttm_global_init(struct drm_psb_private *dev_priv)
++{
++ struct drm_global_reference *global_ref;
++ int ret;
++
++ global_ref = &dev_priv->mem_global_ref;
++ global_ref->global_type = DRM_GLOBAL_TTM_MEM;
++ global_ref->size = sizeof(struct ttm_mem_global);
++ global_ref->init = &psb_ttm_mem_global_init;
++ global_ref->release = &psb_ttm_mem_global_release;
++
++ ret = drm_global_item_ref(global_ref);
++ if (unlikely(ret != 0)) {
++ DRM_ERROR("Failed referencing a global TTM memory object.\n");
++ return ret;
++ }
++
++ return 0;
++}
++
++void psb_ttm_global_release(struct drm_psb_private *dev_priv)
++{
++ drm_global_item_unref(&dev_priv->mem_global_ref);
++}
++
++int psb_getpageaddrs_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_getpageaddrs_arg *arg = data;
++ struct ttm_buffer_object *bo;
++ struct ttm_tt *ttm;
++ struct page **tt_pages;
++ unsigned long i, num_pages;
++ unsigned long *p = arg->page_addrs;
++ int ret = 0;
++
++ bo = ttm_buffer_object_lookup(psb_fpriv(file_priv)->tfile,
++ arg->handle);
++ if (unlikely(bo == NULL)) {
++ printk(KERN_ERR
++ "Could not find buffer object for getpageaddrs.\n");
++ return -EINVAL;
++ }
++
++ arg->gtt_offset = bo->offset;
++ ttm = bo->ttm;
++ num_pages = ttm->num_pages;
++ tt_pages = ttm->pages;
++
++ for (i = 0; i < num_pages; i++)
++ p[i] = (unsigned long)page_to_phys(tt_pages[i]);
++
++ return ret;
++}
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_umevents.c
+@@ -0,0 +1,485 @@
++/*
++ * Copyright © 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * James C. Gualario <james.c.gualario@intel.com>
++ *
++ */
++#include "psb_umevents.h"
++/**
++ * define sysfs operations supported by umevent objects.
++ *
++ */
++static struct sysfs_ops umevent_obj_sysfs_ops = {
++ .show = psb_umevent_attr_show,
++ .store = psb_umevent_attr_store,
++};
++/**
++ * define the data attributes we will expose through sysfs.
++ *
++ */
++static struct umevent_attribute data_0 =
++ __ATTR(data_0_val, 0666, psb_umevent_attr_show_imp,
++ psb_umevent_attr_store_imp);
++static struct umevent_attribute data_1 =
++ __ATTR(data_1_val, 0666, psb_umevent_attr_show_imp,
++ psb_umevent_attr_store_imp);
++static struct umevent_attribute data_2 =
++ __ATTR(data_2_val, 0666, psb_umevent_attr_show_imp,
++ psb_umevent_attr_store_imp);
++static struct umevent_attribute data_3 =
++ __ATTR(data_3_val, 0666, psb_umevent_attr_show_imp,
++ psb_umevent_attr_store_imp);
++static struct umevent_attribute data_4 =
++ __ATTR(data_4_val, 0666, psb_umevent_attr_show_imp,
++ psb_umevent_attr_store_imp);
++static struct umevent_attribute data_5 =
++ __ATTR(data_5_val, 0666, psb_umevent_attr_show_imp,
++ psb_umevent_attr_store_imp);
++static struct umevent_attribute data_6 =
++ __ATTR(data_6_val, 0666, psb_umevent_attr_show_imp,
++ psb_umevent_attr_store_imp);
++static struct umevent_attribute data_7 =
++ __ATTR(data_7_val, 0666, psb_umevent_attr_show_imp,
++ psb_umevent_attr_store_imp);
++/**
++ * define the structure used to seed our ktype.
++ *
++ */
++static struct attribute *umevent_obj_default_attrs[] = {
++ &data_0.attr,
++ &data_1.attr,
++ &data_2.attr,
++ &data_3.attr,
++ &data_4.attr,
++ &data_5.attr,
++ &data_6.attr,
++ &data_7.attr,
++ NULL, /* need to NULL terminate the list of attributes */
++};
++/**
++ * specify the ktype for our kobjects.
++ *
++ */
++static struct kobj_type umevent_obj_ktype = {
++ .sysfs_ops = &umevent_obj_sysfs_ops,
++ .release = psb_umevent_obj_release,
++ .default_attrs = umevent_obj_default_attrs,
++};
++/**
++ * psb_umevent_attr_show - default kobject show function
++ *
++ * @kobj: kobject associated with the show operation
++ * @attr: attribute being requested
++ * @buf: pointer to the return buffer
++ *
++ */
++ssize_t psb_umevent_attr_show(struct kobject *kobj,
++ struct attribute *attr,
++ char *buf)
++{
++ struct umevent_attribute *attribute;
++ struct umevent_obj *any_umevent_obj;
++ attribute = to_umevent_attr(attr);
++ any_umevent_obj = to_umevent_obj(kobj);
++ if (!attribute->show)
++ return -EIO;
++
++ return attribute->show(any_umevent_obj, attribute, buf);
++}
++/**
++ * psb_umevent_attr_store - default kobject store function
++ *
++ * @kobj: kobject associated with the store operation
++ * @attr: attribute being requested
++ * @buf: input data to write to attribute
++ * @len: character count
++ *
++ */
++ssize_t psb_umevent_attr_store(struct kobject *kobj,
++ struct attribute *attr,
++ const char *buf, size_t len)
++{
++ struct umevent_attribute *attribute;
++ struct umevent_obj *any_umevent_obj;
++ attribute = to_umevent_attr(attr);
++ any_umevent_obj = to_umevent_obj(kobj);
++ if (!attribute->store)
++ return -EIO;
++
++ return attribute->store(any_umevent_obj, attribute, buf, len);
++}
++/**
++ * psb_umevent_obj_release - kobject release funtion
++ *
++ * @kobj: kobject to be released.
++ */
++void psb_umevent_obj_release(struct kobject *kobj)
++{
++ struct umevent_obj *any_umevent_obj;
++ any_umevent_obj = to_umevent_obj(kobj);
++ kfree(any_umevent_obj);
++}
++/**
++ * psb_umevent_attr_show_imp - attribute show implementation
++ *
++ * @any_umevent_obj: kobject managed data to read from
++ * @attr: attribute being requested
++ * @buf: pointer to the return buffer
++ *
++ */
++ssize_t psb_umevent_attr_show_imp(struct umevent_obj
++ *any_umevent_obj,
++ struct umevent_attribute *attr,
++ char *buf)
++{
++ int var;
++
++ if (strcmp(attr->attr.name, "data_0_val") == 0)
++ var = any_umevent_obj->data_0_val;
++ else if (strcmp(attr->attr.name, "data_1_val") == 0)
++ var = any_umevent_obj->data_1_val;
++ else if (strcmp(attr->attr.name, "data_2_val") == 0)
++ var = any_umevent_obj->data_2_val;
++ else if (strcmp(attr->attr.name, "data_3_val") == 0)
++ var = any_umevent_obj->data_3_val;
++ else if (strcmp(attr->attr.name, "data_4_val") == 0)
++ var = any_umevent_obj->data_4_val;
++ else if (strcmp(attr->attr.name, "data_5_val") == 0)
++ var = any_umevent_obj->data_5_val;
++ else if (strcmp(attr->attr.name, "data_6_val") == 0)
++ var = any_umevent_obj->data_6_val;
++ else
++ var = any_umevent_obj->data_7_val;
++
++ return sprintf(buf, "%d\n", var);
++}
++/**
++ * psb_umevent_attr_store_imp - attribute store implementation
++ *
++ * @any_umevent_obj: kobject managed data to write to
++ * @attr: attribute being requested
++ * @buf: input data to write to attribute
++ * @count: character count
++ *
++ */
++ssize_t psb_umevent_attr_store_imp(struct umevent_obj
++ *any_umevent_obj,
++ struct umevent_attribute *attr,
++ const char *buf, size_t count)
++{
++ int var;
++
++ sscanf(buf, "%du", &var);
++ if (strcmp(attr->attr.name, "data_0_val") == 0)
++ any_umevent_obj->data_0_val = var;
++ else if (strcmp(attr->attr.name, "data_1_val") == 0)
++ any_umevent_obj->data_1_val = var;
++ else if (strcmp(attr->attr.name, "data_2_val") == 0)
++ any_umevent_obj->data_2_val = var;
++ else if (strcmp(attr->attr.name, "data_3_val") == 0)
++ any_umevent_obj->data_3_val = var;
++ else if (strcmp(attr->attr.name, "data_4_val") == 0)
++ any_umevent_obj->data_4_val = var;
++ else if (strcmp(attr->attr.name, "data_5_val") == 0)
++ any_umevent_obj->data_5_val = var;
++ else if (strcmp(attr->attr.name, "data_6_val") == 0)
++ any_umevent_obj->data_6_val = var;
++ else
++ any_umevent_obj->data_7_val = var;
++ return count;
++}
++/**
++ * psb_create_umevent_obj - create and track new event objects
++ *
++ * @name: name to give to new sysfs / kobject entry
++ * @list: event object list to track the kobject in
++ */
++struct umevent_obj *psb_create_umevent_obj(const char *name,
++ struct umevent_list
++ *list)
++{
++ struct umevent_obj *new_umevent_obj;
++ int retval;
++ new_umevent_obj = kzalloc(sizeof(*new_umevent_obj),
++ GFP_KERNEL);
++ if (!new_umevent_obj)
++ return NULL;
++
++ new_umevent_obj->kobj.kset = list->umevent_disp_pool;
++ retval = kobject_init_and_add(&new_umevent_obj->kobj,
++ &umevent_obj_ktype, NULL,
++ "%s", name);
++ if (retval) {
++ kobject_put(&new_umevent_obj->kobj);
++ return NULL;
++ }
++ psb_umevent_add_to_list(list, new_umevent_obj);
++ return new_umevent_obj;
++}
++/*EXPORT_SYMBOL(psb_create_umevent_obj); */
++/**
++ * psb_umevent_notify - info user mode of a new device
++ *
++ * @notify_disp_obj: event object to perform notification for
++ *
++ */
++void psb_umevent_notify(struct umevent_obj *notify_disp_obj)
++{
++ kobject_uevent(&notify_disp_obj->kobj, KOBJ_ADD);
++}
++/*EXPORT_SYMBOL(psb_umevent_notify); */
++/**
++ * psb_umevent_notify_change - notify user mode of a change to a device
++ *
++ * @notify_disp_obj: event object to perform notification for
++ *
++ */
++void psb_umevent_notify_change(struct umevent_obj *notify_disp_obj)
++{
++ kobject_uevent(&notify_disp_obj->kobj, KOBJ_CHANGE);
++}
++/*EXPORT_SYMBOL(psb_umevent_notify_change); */
++/**
++ * psb_umevent_notify_change - notify user mode of a change to a device
++ *
++ * @notify_disp_obj: event object to perform notification for
++ *
++ */
++void psb_umevent_notify_change_gfxsock(struct umevent_obj *notify_disp_obj,
++ int dst_group_id)
++{
++ psb_kobject_uevent(&notify_disp_obj->kobj, KOBJ_CHANGE, dst_group_id);
++}
++/*EXPORT_SYMBOL(psb_umevent_notify_change_gfxsock); */
++/**
++ * psb_destroy_umvent_obj - decrement ref count on event so kernel can kill it
++ *
++ * @any_umevent_obj: event object to destroy
++ *
++ */
++void psb_destroy_umevent_obj(struct umevent_obj
++ *any_umevent_obj)
++{
++ kobject_put(&any_umevent_obj->kobj);
++}
++/**
++ *
++ * psb_umevent_init - init the event pool
++ *
++ * @parent_kobj: parent kobject to associate new kset with
++ * @new_umevent_list: event list to associate kset with
++ * @name: name to give to new sysfs entry
++ *
++ */
++int psb_umevent_init(struct kobject *parent_kobj,
++ struct umevent_list *new_umevent_list,
++ const char *name)
++{
++ psb_umevent_init_list(new_umevent_list);
++ new_umevent_list->umevent_disp_pool = kset_create_and_add(name, NULL,
++ parent_kobj);
++ if (!new_umevent_list->umevent_disp_pool)
++ return -ENOMEM;
++
++ return 0;
++}
++/*EXPORT_SYMBOL(psb_umevent_init); */
++/**
++ *
++ * psb_umevent_cleanup - cleanup all event objects
++ *
++ * @kill_list: list of events to destroy
++ *
++ */
++void psb_umevent_cleanup(struct umevent_list *kill_list)
++{
++ psb_umevent_destroy_list(kill_list);
++}
++/*EXPORT_SYMBOL(psb_umevent_cleanup); */
++/**
++ * psb_umevent_add_to_list - add an event to the event list
++ *
++ * @list: list to add the event to
++ * @umevent_obj_to_add: event to add
++ *
++ */
++void psb_umevent_add_to_list(struct umevent_list *list,
++ struct umevent_obj *umevent_obj_to_add)
++{
++ unsigned long flags;
++ spin_lock_irqsave(&list->list_lock, flags);
++ list_add(&umevent_obj_to_add->head, &list->head);
++ spin_unlock_irqrestore(&list->list_lock, flags);
++}
++/**
++ * psb_umevent_init_list - initialize event list
++ *
++ * @list: list to initialize
++ *
++ */
++void psb_umevent_init_list(struct umevent_list *list)
++{
++ spin_lock_init(&list->list_lock);
++ INIT_LIST_HEAD(&list->head);
++}
++/**
++ * psb_umevent_create_list - allocate an event list
++ *
++ */
++struct umevent_list *psb_umevent_create_list()
++{
++ struct umevent_list *new_umevent_list;
++ new_umevent_list = NULL;
++ new_umevent_list = kmalloc(sizeof(struct umevent_list),
++ GFP_ATOMIC);
++ return new_umevent_list;
++}
++/*EXPORT_SYMBOL(psb_umevent_create_list); */
++/**
++ * psb_umevent_destroy_list - destroy a list and clean up all mem
++ *
++ * @list: list to destroy and clean up after
++ *
++ */
++void psb_umevent_destroy_list(struct umevent_list *list)
++{
++ struct umevent_obj *umevent_obj_curr;
++ struct list_head *node;
++ struct list_head *node_kill;
++ int i;
++ i = 0;
++ node = NULL;
++ node_kill = NULL;
++ node = list->head.next;
++ while (node != (&list->head)) {
++ umevent_obj_curr = list_entry(node,
++ struct umevent_obj,
++ head);
++ node_kill = node;
++ node = umevent_obj_curr->head.next;
++ psb_destroy_umevent_obj(umevent_obj_curr);
++ umevent_obj_curr = NULL;
++ list_del(node_kill);
++ i++;
++ }
++ kset_unregister(list->umevent_disp_pool);
++ kfree(list);
++}
++/**
++ * psb_umevent_remove_from_list - remove an event from tracking list
++ *
++ * @list: list to remove the event from
++ * @disp_to_remove: name of event to remove.
++ *
++ */
++void psb_umevent_remove_from_list(struct umevent_list *list,
++ const char *disp_to_remove)
++{
++ struct umevent_obj *umevent_obj_curr = NULL;
++ struct list_head *node = NULL;
++ struct list_head *node_kill = NULL;
++ int i = 0;
++ int found_match = 0;
++ i = 0;
++ node = NULL;
++ node_kill = NULL;
++ node = list->head.next;
++ while (node != (&list->head)) {
++ umevent_obj_curr = list_entry(node,
++ struct umevent_obj, head);
++ if (strcmp(umevent_obj_curr->kobj.name,
++ disp_to_remove) == 0) {
++ found_match = 1;
++ break;
++ }
++ node = NULL;
++ node = umevent_obj_curr->head.next;
++ i++;
++ }
++ if (found_match == 1) {
++ node_kill = node;
++ node = umevent_obj_curr->head.next;
++ psb_destroy_umevent_obj(umevent_obj_curr);
++ umevent_obj_curr = NULL;
++ list_del(node_kill);
++ }
++}
++/*EXPORT_SYMBOL(psb_umevent_remove_from_list); */
++/**
++ * psb_umevent_find_obj - find an event in a tracking list
++ *
++ * @name: name of the event to find
++ * @list: list to find the event in
++ *
++ */
++struct umevent_obj *psb_umevent_find_obj(const char *name,
++ struct umevent_list *list)
++{
++ struct umevent_obj *umevent_obj_curr = NULL;
++ struct list_head *node = NULL;
++ struct list_head *node_find = NULL;
++ int i = 0;
++ int found_match = 0;
++ i = 0;
++ node = NULL;
++ node_find = NULL;
++ node = list->head.next;
++ while (node != (&list->head)) {
++ umevent_obj_curr = list_entry(node,
++ struct umevent_obj, head);
++ if (strcmp(umevent_obj_curr->kobj.name,
++ name) == 0) {
++ found_match = 1;
++ break;
++ }
++ node = NULL;
++ node = umevent_obj_curr->head.next;
++ i++;
++ }
++ if (found_match == 1)
++ return umevent_obj_curr;
++
++ return NULL;
++}
++/*EXPORT_SYMBOL(psb_umevent_find_obj); */
++/**
++ * psb_umevent_debug_dump_list - debug list dump
++ *
++ * @list: list to dump
++ *
++ */
++void psb_umevent_debug_dump_list(struct umevent_list *list)
++{
++ struct umevent_obj *umevent_obj_curr;
++ unsigned long flags;
++ struct list_head *node;
++ int i;
++ spin_lock_irqsave(&list->list_lock, flags);
++ i = 0;
++ node = NULL;
++ node = list->head.next;
++ while (node != (&list->head)) {
++ umevent_obj_curr = list_entry(node,
++ struct umevent_obj,
++ head);
++ /*TBD: DUMP ANY REQUIRED VALUES WITH PRINTK*/
++ node = NULL;
++ node = umevent_obj_curr->head.next;
++ i++;
++ }
++ spin_unlock_irqrestore(&list->list_lock, flags);
++}
+--- /dev/null
++++ b/drivers/staging/mrst/drv/psb_umevents.h
+@@ -0,0 +1,159 @@
++/*
++ * Copyright (c) 2009, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * James C. Gualario <james.c.gualario@intel.com>
++ *
++ */
++#ifndef _PSB_UMEVENT_H_
++#define _PSB_UMEVENT_H_
++/**
++ * required includes
++ *
++ */
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <drm/drmP.h>
++#include <drm/drm_core.h>
++#include <drm/drm_pciids.h>
++#include <linux/spinlock.h>
++/**
++ * event groups for routing to different user mode threads
++ *
++ */
++#define DRM_DPST_SOCKET_GROUP_ID 1
++#define DRM_HOTPLUG_SOCKET_GROUP_ID 2
++#define DRM_HDMI_AUDIO_SOCKET_GROUP 4
++#define DRM_HDMI_HDCP_SOCKET_GROUP 8
++#define DRM_GFX_SOCKET_GROUPS 15
++/**
++ * event structure managed by kobjects
++ *
++ */
++struct umevent_obj {
++ struct kobject kobj;
++ struct list_head head;
++ int data_0_val;
++ int data_1_val;
++ int data_2_val;
++ int data_3_val;
++ int data_4_val;
++ int data_5_val;
++ int data_6_val;
++ int data_7_val;
++};
++/**
++ * event tracking list element
++ *
++ */
++struct umevent_list{
++ struct list_head head;
++ struct kset *umevent_disp_pool;
++ spinlock_t list_lock;
++};
++/**
++ * to go back and forth between kobjects and their main container
++ *
++ */
++#define to_umevent_obj(x) \
++ container_of(x, struct umevent_obj, kobj)
++
++/**
++ * event attributes exposed via sysfs
++ *
++ */
++struct umevent_attribute {
++ struct attribute attr;
++ ssize_t (*show)(struct umevent_obj *any_umevent_obj,
++ struct umevent_attribute *attr, char *buf);
++ ssize_t (*store)(struct umevent_obj *any_umevent_obj,
++ struct umevent_attribute *attr,
++ const char *buf, size_t count);
++};
++/**
++ * to go back and forth between the attribute passed to us by the OS
++ * and the umevent_attribute
++ *
++ */
++#define to_umevent_attr(x) \
++ container_of(x, struct umevent_attribute, \
++ attr)
++
++/**
++ * umevent function prototypes
++ *
++ */
++extern struct umevent_obj *psb_create_umevent_obj(const char *name,
++ struct umevent_list
++ *list);
++extern ssize_t psb_umevent_attr_show(struct kobject *kobj,
++ struct attribute *attr, char *buf);
++extern ssize_t psb_umevent_attr_store(struct kobject *kobj,
++ struct attribute *attr,
++ const char *buf, size_t len);
++extern ssize_t psb_umevent_attr_show_imp(struct umevent_obj
++ *any_umevent_obj,
++ struct umevent_attribute *attr,
++ char *buf);
++extern ssize_t psb_umevent_attr_store_imp(struct umevent_obj
++ *any_umevent_obj,
++ struct umevent_attribute *attr,
++ const char *buf, size_t count);
++extern void psb_umevent_cleanup(struct umevent_list *kill_list);
++extern int psb_umevent_init(struct kobject *parent_kobj,
++ struct umevent_list *new_umevent_list,
++ const char *name);
++extern void psb_umevent_init_list(struct umevent_list *list);
++extern void psb_umevent_debug_dump_list(struct umevent_list *list);
++extern void psb_umevent_add_to_list(struct umevent_list *list,
++ struct umevent_obj
++ *umevent_obj_to_add);
++extern void psb_umevent_destroy_list(struct umevent_list *list);
++extern struct umevent_list *psb_umevent_create_list(void);
++extern void psb_umevent_notify(struct umevent_obj *notify_disp_obj);
++extern void psb_umevent_obj_release(struct kobject *kobj);
++extern void psb_umevent_remove_from_list(struct umevent_list *list,
++ const char *disp_to_remove);
++extern void psb_umevent_workqueue_dispatch(int work_type, const char *name,
++ struct umevent_list *list);
++extern void psb_umevent_notify_change(struct umevent_obj *notify_disp_obj);
++extern void psb_umevent_notify_change_gfxsock(struct umevent_obj
++ *notify_disp_obj,
++ int dst_group_id);
++extern struct umevent_obj *psb_umevent_find_obj(const char *name,
++ struct umevent_list
++ *list);
++/**
++ * socket function prototypes
++ *
++ */
++extern int psb_kobject_uevent(struct kobject *kobj,
++ enum kobject_action action, int dst_group_id);
++extern int psb_kobject_uevent_env(struct kobject *kobj,
++ enum kobject_action action,
++ char *envp[], int dst_group_id);
++int psb_add_uevent_var(struct kobj_uevent_env *env,
++ const char *format, ...)
++ __attribute__((format (printf, 2, 3)));
++int psb_kobject_action_type(const char *buf,
++ size_t count, enum kobject_action *type);
++
++#if defined(CONFIG_NET)
++int psb_kobject_uevent_init(void);
++#endif
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/drv/topaz_power.c
+@@ -0,0 +1,229 @@
++/*
++ * Copyright (c) 2009, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Author:binglin.chen@intel.com
++ */
++
++#include "topaz_power.h"
++#include "lnc_topaz.h"
++#include "pnw_topaz.h"
++#include "psb_drv.h"
++#include "services_headers.h"
++#include "sysconfig.h"
++
++static PVRSRV_ERROR DevInitTOPAZPart1(IMG_VOID *pvDeviceNode)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvDeviceNode;
++ PVRSRV_ERROR eError;
++ PVRSRV_DEV_POWER_STATE eDefaultPowerState;
++
++ /* register power operation function */
++ /* FIXME: this should be in part2 init function, but
++ * currently here only OSPM needs IMG device... */
++ eDefaultPowerState = PVRSRV_DEV_POWER_STATE_OFF;
++ eError = PVRSRVRegisterPowerDevice(psDeviceNode->sDevId.ui32DeviceIndex,
++ TOPAZPrePowerState,
++ TOPAZPostPowerState,
++ TOPAZPreClockSpeedChange,
++ TOPAZPostClockSpeedChange,
++ (IMG_HANDLE)psDeviceNode,
++ PVRSRV_DEV_POWER_STATE_ON,
++ eDefaultPowerState);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR, "DevInitTOPAZPart1: failed to "
++ "register device with power manager"));
++ return eError;
++ }
++
++ return PVRSRV_OK;
++}
++
++static PVRSRV_ERROR DevDeInitTOPAZ(IMG_VOID *pvDeviceNode)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvDeviceNode;
++ PVRSRV_ERROR eError;
++
++ /* should deinit all resource */
++
++ eError = PVRSRVRemovePowerDevice(psDeviceNode->sDevId.ui32DeviceIndex);
++ if (eError != PVRSRV_OK)
++ return eError;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR TOPAZDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ /* version check */
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR TOPAZRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ psDeviceNode->sDevId.eDeviceType = PVRSRV_DEVICE_TYPE_TOPAZ;
++ psDeviceNode->sDevId.eDeviceClass = PVRSRV_DEVICE_CLASS_VIDEO;
++
++ psDeviceNode->pfnInitDevice = DevInitTOPAZPart1;
++ psDeviceNode->pfnDeInitDevice = DevDeInitTOPAZ;
++
++ psDeviceNode->pfnInitDeviceCompatCheck = TOPAZDevInitCompatCheck;
++
++ if (IS_MRST(gpDrmDevice))
++ psDeviceNode->pfnDeviceISR = lnc_topaz_interrupt;
++ else
++ psDeviceNode->pfnDeviceISR = pnw_topaz_interrupt;
++ psDeviceNode->pvISRData = (IMG_VOID *)gpDrmDevice;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR TOPAZPrePowerState(IMG_HANDLE hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ /* ask for a change not power on*/
++ if ((eNewPowerState != eCurrentPowerState) &&
++ (eNewPowerState != PVRSRV_DEV_POWER_STATE_ON)) {
++ if (IS_MRST(gpDrmDevice))
++ {
++ struct drm_psb_private *dev_priv = gpDrmDevice->dev_private;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++ TOPAZ_NEW_PMSTATE(gpDrmDevice, topaz_priv, PSB_PMSTATE_POWERDOWN);
++
++ /* context save */
++ /* context save require irq disable first */
++ psb_irq_uninstall_islands(gpDrmDevice, OSPM_VIDEO_ENC_ISLAND);
++ lnc_topaz_save_mtx_state(gpDrmDevice);
++
++ /* internally close the device */
++
++ /* ask for power off */
++ if (eNewPowerState == PVRSRV_DEV_POWER_STATE_OFF) {
++ /* here will deinitialize the driver if needed */
++ lnc_unmap_topaz_reg(gpDrmDevice);
++ } else {
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "%s no action for transform from %d to %d",
++ __func__,
++ eCurrentPowerState,
++ eNewPowerState));
++ }
++ }
++ else
++ {
++ struct drm_psb_private *dev_priv = gpDrmDevice->dev_private;
++ struct pnw_topaz_private *topaz_priv = dev_priv->topaz_private;
++ PNW_TOPAZ_NEW_PMSTATE(gpDrmDevice, topaz_priv, PSB_PMSTATE_POWERDOWN);
++
++ /* context save */
++ /* context save require irq disable first */
++ psb_irq_uninstall_islands(gpDrmDevice, OSPM_VIDEO_ENC_ISLAND);
++ pnw_topaz_save_mtx_state(gpDrmDevice);
++
++ /* internally close the device */
++
++ /* ask for power off */
++ if (eNewPowerState == PVRSRV_DEV_POWER_STATE_OFF) {
++ /* here will deinitialize the driver if needed */
++ pnw_unmap_topaz_reg(gpDrmDevice);
++ } else {
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "%s no action for transform from %d to %d",
++ __func__,
++ eCurrentPowerState,
++ eNewPowerState));
++ }
++ }
++
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR TOPAZPostPowerState(IMG_HANDLE hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ /* if ask for change & current status is not on */
++ if ((eNewPowerState != eCurrentPowerState) &&
++ (eCurrentPowerState != PVRSRV_DEV_POWER_STATE_ON)) {
++ if (IS_MRST(gpDrmDevice))
++ {
++ /* internally open device */
++ struct drm_psb_private *dev_priv = gpDrmDevice->dev_private;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++ TOPAZ_NEW_PMSTATE(gpDrmDevice, topaz_priv, PSB_PMSTATE_POWERUP);
++
++ if (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_OFF) {
++ /* here will initialize the driver if needed */
++ lnc_map_topaz_reg(gpDrmDevice);
++ } else {
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "%s no action for transform from %d to %d",
++ __func__,
++ eCurrentPowerState,
++ eNewPowerState));
++ }
++
++ /* context restore */
++ psb_irq_uninstall_islands(gpDrmDevice, OSPM_VIDEO_ENC_ISLAND);
++ lnc_topaz_restore_mtx_state(gpDrmDevice);
++ psb_irq_preinstall_islands(gpDrmDevice, OSPM_VIDEO_ENC_ISLAND);
++ psb_irq_postinstall_islands(gpDrmDevice, OSPM_VIDEO_ENC_ISLAND);
++ }
++ else
++ {/* internally open device */
++ struct drm_psb_private *dev_priv = gpDrmDevice->dev_private;
++ struct pnw_topaz_private *topaz_priv = dev_priv->topaz_private;
++ PNW_TOPAZ_NEW_PMSTATE(gpDrmDevice, topaz_priv, PSB_PMSTATE_POWERUP);
++
++ if (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_OFF) {
++ /* here will initialize the driver if needed */
++ pnw_map_topaz_reg(gpDrmDevice);
++ } else {
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "%s no action for transform from %d to %d",
++ __func__,
++ eCurrentPowerState,
++ eNewPowerState));
++ }
++
++ /* context restore */
++ psb_irq_uninstall_islands(gpDrmDevice, OSPM_VIDEO_ENC_ISLAND);
++ pnw_topaz_restore_mtx_state(gpDrmDevice);
++ psb_irq_preinstall_islands(gpDrmDevice, OSPM_VIDEO_ENC_ISLAND);
++ psb_irq_postinstall_islands(gpDrmDevice, OSPM_VIDEO_ENC_ISLAND);
++ }
++
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR TOPAZPreClockSpeedChange(IMG_HANDLE hDevHandle,
++ IMG_BOOL bIdleDevice,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR TOPAZPostClockSpeedChange(IMG_HANDLE hDevHandle,
++ IMG_BOOL bIdleDevice,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ return PVRSRV_OK;
++}
+--- /dev/null
++++ b/drivers/staging/mrst/drv/topaz_power.h
+@@ -0,0 +1,53 @@
++/*
++** topaz_power.h
++** Login : <binglin.chen@intel.com>
++** Started on Mon Nov 16 13:31:42 2009 brady
++**
++** Copyright (C) 2009 brady
++** This program is free software; you can redistribute it and/or modify
++** it under the terms of the GNU General Public License as published by
++** the Free Software Foundation; either version 2 of the License, or
++** (at your option) any later version.
++**
++** This program is distributed in the hope that it will be useful,
++** but WITHOUT ANY WARRANTY; without even the implied warranty of
++** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++** GNU General Public License for more details.
++**
++** You should have received a copy of the GNU General Public License
++** along with this program; if not, write to the Free Software
++** Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++*/
++
++#ifndef TOPAZ_POWER_H_
++#define TOPAZ_POWER_H_
++
++#include "services_headers.h"
++#include "sysconfig.h"
++
++extern struct drm_device *gpDrmDevice;
++
++/* function define */
++PVRSRV_ERROR TOPAZRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode);
++PVRSRV_ERROR TOPAZDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode);
++
++/* power function define */
++PVRSRV_ERROR TOPAZPrePowerState(
++ IMG_HANDLE hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++PVRSRV_ERROR TOPAZPostPowerState(
++ IMG_HANDLE hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++PVRSRV_ERROR TOPAZPreClockSpeedChange(
++ IMG_HANDLE hDevHandle,
++ IMG_BOOL bIdleDevice,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++PVRSRV_ERROR TOPAZPostClockSpeedChange(
++ IMG_HANDLE hDevHandle,
++ IMG_BOOL bIdleDevice,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++PVRSRV_ERROR TOPAZInitOSPM(PVRSRV_DEVICE_NODE *psDeviceNode);
++
++#endif /* !TOPAZ_POWER_H_ */
+--- /dev/null
++++ b/drivers/staging/mrst/drv/ttm/ttm_agp_backend.c
+@@ -0,0 +1,144 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ * Keith Packard.
++ */
++
++#include "ttm_bo_driver.h"
++#ifdef TTM_HAS_AGP
++#include "ttm_placement_common.h"
++#include <linux/agp_backend.h>
++#include <asm/agp.h>
++#include <linux/io.h>
++
++struct ttm_agp_backend {
++ struct ttm_backend backend;
++ struct agp_memory *mem;
++ struct agp_bridge_data *bridge;
++};
++
++static int ttm_agp_populate(struct ttm_backend *backend,
++ unsigned long num_pages, struct page **pages,
++ struct page *dummy_read_page)
++{
++ struct ttm_agp_backend *agp_be =
++ container_of(backend, struct ttm_agp_backend, backend);
++ struct page **cur_page, **last_page = pages + num_pages;
++ struct agp_memory *mem;
++
++ mem = agp_allocate_memory(agp_be->bridge, num_pages, AGP_USER_MEMORY);
++ if (unlikely(mem == NULL))
++ return -ENOMEM;
++
++ mem->page_count = 0;
++ for (cur_page = pages; cur_page < last_page; ++cur_page) {
++ struct page *page = *cur_page;
++ if (!page)
++ page = dummy_read_page;
++
++ #if 0
++ mem->memory[mem->page_count++] =
++ phys_to_gart(page_to_phys(page));
++ #endif
++ }
++ agp_be->mem = mem;
++ return 0;
++}
++
++static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
++{
++ struct ttm_agp_backend *agp_be =
++ container_of(backend, struct ttm_agp_backend, backend);
++ struct agp_memory *mem = agp_be->mem;
++ int cached = (bo_mem->flags & TTM_PL_FLAG_CACHED);
++ int ret;
++
++ mem->is_flushed = 1;
++ mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY;
++
++ ret = agp_bind_memory(mem, bo_mem->mm_node->start);
++ if (ret)
++ printk(KERN_ERR "AGP Bind memory failed.\n");
++
++ return ret;
++}
++
++static int ttm_agp_unbind(struct ttm_backend *backend)
++{
++ struct ttm_agp_backend *agp_be =
++ container_of(backend, struct ttm_agp_backend, backend);
++
++ if (agp_be->mem->is_bound)
++ return agp_unbind_memory(agp_be->mem);
++ else
++ return 0;
++}
++
++static void ttm_agp_clear(struct ttm_backend *backend)
++{
++ struct ttm_agp_backend *agp_be =
++ container_of(backend, struct ttm_agp_backend, backend);
++ struct agp_memory *mem = agp_be->mem;
++
++ if (mem) {
++ ttm_agp_unbind(backend);
++ agp_free_memory(mem);
++ }
++ agp_be->mem = NULL;
++}
++
++static void ttm_agp_destroy(struct ttm_backend *backend)
++{
++ struct ttm_agp_backend *agp_be =
++ container_of(backend, struct ttm_agp_backend, backend);
++
++ if (agp_be->mem)
++ ttm_agp_clear(backend);
++ kfree(agp_be);
++}
++
++static struct ttm_backend_func ttm_agp_func = {
++ .populate = ttm_agp_populate,
++ .clear = ttm_agp_clear,
++ .bind = ttm_agp_bind,
++ .unbind = ttm_agp_unbind,
++ .destroy = ttm_agp_destroy,
++};
++
++struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev,
++ struct agp_bridge_data *bridge)
++{
++ struct ttm_agp_backend *agp_be;
++
++ agp_be = kmalloc(sizeof(*agp_be), GFP_KERNEL);
++ if (!agp_be)
++ return NULL;
++
++ agp_be->mem = NULL;
++ agp_be->bridge = bridge;
++ agp_be->backend.func = &ttm_agp_func;
++ agp_be->backend.bdev = bdev;
++ return &agp_be->backend;
++}
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/drv/ttm/ttm_bo.c
+@@ -0,0 +1,1730 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include "ttm_bo_driver.h"
++#include "ttm_placement_common.h"
++#include <linux/jiffies.h>
++#include <linux/slab.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/file.h>
++
++#define TTM_ASSERT_LOCKED(param)
++#define TTM_DEBUG(fmt, arg...)
++#define TTM_BO_HASH_ORDER 13
++
++static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
++static void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
++static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
++
++static inline uint32_t ttm_bo_type_flags(unsigned type)
++{
++ uint32_t return_type = 1 << (type);
++ return return_type;
++}
++
++static void ttm_bo_release_list(struct kref *list_kref)
++{
++ struct ttm_buffer_object *bo =
++ container_of(list_kref, struct ttm_buffer_object, list_kref);
++ struct ttm_bo_device *bdev = bo->bdev;
++
++ BUG_ON(atomic_read(&bo->list_kref.refcount));
++ BUG_ON(atomic_read(&bo->kref.refcount));
++ BUG_ON(atomic_read(&bo->cpu_writers));
++ BUG_ON(bo->sync_obj != NULL);
++ BUG_ON(bo->mem.mm_node != NULL);
++ BUG_ON(!list_empty(&bo->lru));
++ BUG_ON(!list_empty(&bo->ddestroy));
++
++ if (bo->ttm)
++ ttm_tt_destroy(bo->ttm);
++ if (bo->destroy)
++ bo->destroy(bo);
++ else {
++ ttm_mem_global_free(bdev->mem_glob, bo->acc_size, false);
++ kfree(bo);
++ }
++}
++
++int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
++{
++
++ if (interruptible) {
++ int ret = 0;
++
++ ret = wait_event_interruptible(bo->event_queue,
++ atomic_read(&bo->reserved) == 0);
++ if (unlikely(ret != 0))
++ return -ERESTART;
++ } else {
++ wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
++ }
++ return 0;
++}
++
++static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ struct ttm_mem_type_manager *man;
++
++ BUG_ON(!atomic_read(&bo->reserved));
++
++ if (!(bo->mem.flags & TTM_PL_FLAG_NO_EVICT)) {
++
++ BUG_ON(!list_empty(&bo->lru));
++
++ man = &bdev->man[bo->mem.mem_type];
++ list_add_tail(&bo->lru, &man->lru);
++ kref_get(&bo->list_kref);
++
++ if (bo->ttm != NULL) {
++ list_add_tail(&bo->swap, &bdev->swap_lru);
++ kref_get(&bo->list_kref);
++ }
++ }
++}
++
++/*
++ * Call with bdev->lru_lock and bdev->global->swap_lock held..
++ */
++
++static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
++{
++ int put_count = 0;
++
++ if (!list_empty(&bo->swap)) {
++ list_del_init(&bo->swap);
++ ++put_count;
++ }
++ if (!list_empty(&bo->lru)) {
++ list_del_init(&bo->lru);
++ ++put_count;
++ }
++
++ /*
++ * TODO: Add a driver hook to delete from
++ * driver-specific LRU's here.
++ */
++
++ return put_count;
++}
++
++int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
++ bool interruptible,
++ bool no_wait, bool use_sequence, uint32_t sequence)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ int ret;
++
++ while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
++ if (use_sequence && bo->seq_valid &&
++ (sequence - bo->val_seq < (1 << 31))) {
++ return -EAGAIN;
++ }
++
++ if (no_wait)
++ return -EBUSY;
++
++ spin_unlock(&bdev->lru_lock);
++ ret = ttm_bo_wait_unreserved(bo, interruptible);
++ spin_lock(&bdev->lru_lock);
++
++ if (unlikely(ret))
++ return ret;
++ }
++
++ if (use_sequence) {
++ bo->val_seq = sequence;
++ bo->seq_valid = true;
++ } else {
++ bo->seq_valid = false;
++ }
++
++ return 0;
++}
++
++static void ttm_bo_ref_bug(struct kref *list_kref)
++{
++ BUG();
++}
++
++int ttm_bo_reserve(struct ttm_buffer_object *bo,
++ bool interruptible,
++ bool no_wait, bool use_sequence, uint32_t sequence)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ int put_count = 0;
++ int ret;
++
++ spin_lock(&bdev->lru_lock);
++ ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
++ sequence);
++ if (likely(ret == 0))
++ put_count = ttm_bo_del_from_lru(bo);
++ spin_unlock(&bdev->lru_lock);
++
++ while (put_count--)
++ kref_put(&bo->list_kref, ttm_bo_ref_bug);
++
++ return ret;
++}
++
++void ttm_bo_unreserve(struct ttm_buffer_object *bo)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++
++ spin_lock(&bdev->lru_lock);
++ ttm_bo_add_to_lru(bo);
++ atomic_set(&bo->reserved, 0);
++ wake_up_all(&bo->event_queue);
++ spin_unlock(&bdev->lru_lock);
++}
++
++/*
++ * Call bo->mutex locked.
++ */
++
++static int ttm_bo_add_ttm(struct ttm_buffer_object *bo)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ int ret = 0;
++ uint32_t page_flags = 0;
++
++ TTM_ASSERT_LOCKED(&bo->mutex);
++ bo->ttm = NULL;
++
++ switch (bo->type) {
++ case ttm_bo_type_device:
++ case ttm_bo_type_kernel:
++ bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
++ page_flags, bdev->dummy_read_page);
++ if (unlikely(bo->ttm == NULL))
++ ret = -ENOMEM;
++ break;
++ case ttm_bo_type_user:
++ bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
++ page_flags | TTM_PAGE_FLAG_USER,
++ bdev->dummy_read_page);
++ if (unlikely(bo->ttm == NULL)) {
++ ret = -ENOMEM;
++ break;
++ }
++
++ ret = ttm_tt_set_user(bo->ttm, current,
++ bo->buffer_start, bo->num_pages);
++ if (unlikely(ret != 0))
++ ttm_tt_destroy(bo->ttm);
++ break;
++ default:
++ printk(KERN_ERR "Illegal buffer object type\n");
++ ret = -EINVAL;
++ break;
++ }
++
++ return ret;
++}
++
++static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
++ struct ttm_mem_reg *mem,
++ bool evict, bool interruptible, bool no_wait)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
++ bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
++ struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
++ struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
++ int ret = 0;
++
++ if (old_is_pci || new_is_pci ||
++ ((mem->flags & bo->mem.flags & TTM_PL_MASK_CACHING) == 0))
++ ttm_bo_unmap_virtual(bo);
++
++ /*
++ * Create and bind a ttm if required.
++ */
++
++ if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) {
++ ret = ttm_bo_add_ttm(bo);
++ if (ret)
++ goto out_err;
++
++ ret = ttm_tt_set_placement_caching(bo->ttm, mem->flags);
++ if (ret)
++ return ret;
++
++ if (mem->mem_type != TTM_PL_SYSTEM) {
++ ret = ttm_tt_bind(bo->ttm, mem);
++ if (ret)
++ goto out_err;
++ }
++
++ if (bo->mem.mem_type == TTM_PL_SYSTEM) {
++
++ struct ttm_mem_reg *old_mem = &bo->mem;
++ uint32_t save_flags = old_mem->flags;
++ uint32_t save_proposed_flags = old_mem->proposed_flags;
++
++ *old_mem = *mem;
++ mem->mm_node = NULL;
++ old_mem->proposed_flags = save_proposed_flags;
++ ttm_flag_masked(&save_flags, mem->flags,
++ TTM_PL_MASK_MEMTYPE);
++ goto moved;
++ }
++
++ }
++
++ if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
++ !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
++ ret = ttm_bo_move_ttm(bo, evict, no_wait, mem);
++ else if (bdev->driver->move)
++ ret = bdev->driver->move(bo, evict, interruptible,
++ no_wait, mem);
++ else
++ ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem);
++
++ if (ret)
++ goto out_err;
++
++moved:
++ if (bo->priv_flags & TTM_BO_PRIV_FLAG_EVICTED) {
++ ret = bdev->driver->invalidate_caches(bdev, bo->mem.flags);
++ if (ret)
++ printk(KERN_ERR "Can not flush read caches\n");
++ }
++
++ ttm_flag_masked(&bo->priv_flags,
++ (evict) ? TTM_BO_PRIV_FLAG_EVICTED : 0,
++ TTM_BO_PRIV_FLAG_EVICTED);
++
++ if (bo->mem.mm_node)
++ bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
++ bdev->man[bo->mem.mem_type].gpu_offset;
++
++ return 0;
++
++out_err:
++ new_man = &bdev->man[bo->mem.mem_type];
++ if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
++ ttm_tt_unbind(bo->ttm);
++ ttm_tt_destroy(bo->ttm);
++ bo->ttm = NULL;
++ }
++
++ return ret;
++}
++
++static int ttm_bo_expire_sync_obj(struct ttm_buffer_object *bo,
++ bool allow_errors)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ struct ttm_bo_driver *driver = bdev->driver;
++
++ if (bo->sync_obj) {
++ if (bdev->nice_mode) {
++ unsigned long _end = jiffies + 3 * HZ;
++ int ret;
++ do {
++ ret = ttm_bo_wait(bo, false, false, false);
++ if (ret && allow_errors)
++ return ret;
++
++ } while (ret && !time_after_eq(jiffies, _end));
++
++ if (bo->sync_obj) {
++ bdev->nice_mode = false;
++ printk(KERN_ERR "Detected probable GPU lockup. "
++ "Evicting buffer.\n");
++ }
++ }
++ if (bo->sync_obj) {
++ driver->sync_obj_unref(&bo->sync_obj);
++ bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING;
++ }
++ }
++ return 0;
++}
++
++/**
++ * If bo idle, remove from delayed- and lru lists, and unref.
++ * If not idle, and already on delayed list, do nothing.
++ * If not idle, and not on delayed list, put on delayed list,
++ * up the list_kref and schedule a delayed list check.
++ */
++
++static void ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ struct ttm_bo_driver *driver = bdev->driver;
++
++ mutex_lock(&bo->mutex);
++
++ if (bo->sync_obj && driver->sync_obj_signaled(bo->sync_obj,
++ bo->sync_obj_arg)) {
++ driver->sync_obj_unref(&bo->sync_obj);
++ bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING;
++ }
++
++ if (bo->sync_obj && remove_all)
++ (void)ttm_bo_expire_sync_obj(bo, false);
++
++ if (!bo->sync_obj) {
++ int put_count;
++
++ if (bo->ttm)
++ ttm_tt_unbind(bo->ttm);
++ spin_lock(&bdev->lru_lock);
++ if (!list_empty(&bo->ddestroy)) {
++ list_del_init(&bo->ddestroy);
++ kref_put(&bo->list_kref, ttm_bo_ref_bug);
++ }
++ if (bo->mem.mm_node) {
++ drm_mm_put_block(bo->mem.mm_node);
++ bo->mem.mm_node = NULL;
++ }
++ put_count = ttm_bo_del_from_lru(bo);
++ spin_unlock(&bdev->lru_lock);
++ mutex_unlock(&bo->mutex);
++ while (put_count--)
++ kref_put(&bo->list_kref, ttm_bo_release_list);
++
++ return;
++ }
++
++ spin_lock(&bdev->lru_lock);
++ if (list_empty(&bo->ddestroy)) {
++ spin_unlock(&bdev->lru_lock);
++ driver->sync_obj_flush(bo->sync_obj, bo->sync_obj_arg);
++ spin_lock(&bdev->lru_lock);
++ if (list_empty(&bo->ddestroy)) {
++ kref_get(&bo->list_kref);
++ list_add_tail(&bo->ddestroy, &bdev->ddestroy);
++ }
++ spin_unlock(&bdev->lru_lock);
++ schedule_delayed_work(&bdev->wq,
++ ((HZ / 100) < 1) ? 1 : HZ / 100);
++ } else
++ spin_unlock(&bdev->lru_lock);
++
++ mutex_unlock(&bo->mutex);
++ return;
++}
++
++/**
++ * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
++ * encountered buffers.
++ */
++
++static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
++{
++ struct ttm_buffer_object *entry, *nentry;
++ struct list_head *list, *next;
++ int ret;
++
++ spin_lock(&bdev->lru_lock);
++ list_for_each_safe(list, next, &bdev->ddestroy) {
++ entry = list_entry(list, struct ttm_buffer_object, ddestroy);
++ nentry = NULL;
++
++ /*
++ * Protect the next list entry from destruction while we
++ * unlock the lru_lock.
++ */
++
++ if (next != &bdev->ddestroy) {
++ nentry = list_entry(next, struct ttm_buffer_object,
++ ddestroy);
++ kref_get(&nentry->list_kref);
++ }
++ kref_get(&entry->list_kref);
++
++ spin_unlock(&bdev->lru_lock);
++ ttm_bo_cleanup_refs(entry, remove_all);
++ kref_put(&entry->list_kref, ttm_bo_release_list);
++ spin_lock(&bdev->lru_lock);
++
++ if (nentry) {
++ bool next_onlist = !list_empty(next);
++ kref_put(&nentry->list_kref, ttm_bo_release_list);
++
++ /*
++ * Someone might have raced us and removed the
++ * next entry from the list. We don't bother restarting
++ * list traversal.
++ */
++
++ if (!next_onlist)
++ break;
++ }
++ }
++ ret = !list_empty(&bdev->ddestroy);
++ spin_unlock(&bdev->lru_lock);
++
++ return ret;
++}
++
++static void ttm_bo_delayed_workqueue(struct work_struct *work)
++{
++ struct ttm_bo_device *bdev =
++ container_of(work, struct ttm_bo_device, wq.work);
++
++ if (ttm_bo_delayed_delete(bdev, false)) {
++ schedule_delayed_work(&bdev->wq,
++ ((HZ / 100) < 1) ? 1 : HZ / 100);
++ }
++}
++
++static void ttm_bo_release(struct kref *kref)
++{
++ struct ttm_buffer_object *bo =
++ container_of(kref, struct ttm_buffer_object, kref);
++ struct ttm_bo_device *bdev = bo->bdev;
++
++ if (likely(bo->vm_node != NULL)) {
++ rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
++ drm_mm_put_block(bo->vm_node);
++ }
++ write_unlock(&bdev->vm_lock);
++ ttm_bo_cleanup_refs(bo, false);
++ kref_put(&bo->list_kref, ttm_bo_release_list);
++ write_lock(&bdev->vm_lock);
++}
++
++void ttm_bo_unref(struct ttm_buffer_object **p_bo)
++{
++ struct ttm_buffer_object *bo = *p_bo;
++ struct ttm_bo_device *bdev = bo->bdev;
++
++ *p_bo = NULL;
++ write_lock(&bdev->vm_lock);
++ kref_put(&bo->kref, ttm_bo_release);
++ write_unlock(&bdev->vm_lock);
++}
++
++static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
++ bool interruptible, bool no_wait)
++{
++ int ret = 0;
++ struct ttm_bo_device *bdev = bo->bdev;
++ struct ttm_mem_reg evict_mem;
++
++ if (bo->mem.mem_type != mem_type)
++ goto out;
++
++ ret = ttm_bo_wait(bo, false, interruptible, no_wait);
++ if (ret && ret != -ERESTART) {
++ printk(KERN_ERR "Failed to expire sync object before "
++ "buffer eviction.\n");
++ goto out;
++ }
++
++ BUG_ON(!atomic_read(&bo->reserved));
++
++ evict_mem = bo->mem;
++ evict_mem.mm_node = NULL;
++
++ evict_mem.proposed_flags = bdev->driver->evict_flags(bo);
++ BUG_ON(ttm_bo_type_flags(mem_type) & evict_mem.proposed_flags);
++
++ ret = ttm_bo_mem_space(bo, &evict_mem, interruptible, no_wait);
++ if (unlikely(ret != 0 && ret != -ERESTART)) {
++ evict_mem.proposed_flags = TTM_PL_FLAG_SYSTEM;
++ BUG_ON(ttm_bo_type_flags(mem_type) & evict_mem.proposed_flags);
++ ret = ttm_bo_mem_space(bo, &evict_mem, interruptible, no_wait);
++ }
++
++ if (ret) {
++ if (ret != -ERESTART)
++ printk(KERN_ERR "Failed to find memory space for "
++ "buffer 0x%p eviction.\n", bo);
++ goto out;
++ }
++
++ ret = ttm_bo_handle_move_mem(bo,
++ &evict_mem,
++ true,
++ interruptible,
++ no_wait);
++ if (ret) {
++ if (ret != -ERESTART)
++ printk(KERN_ERR "Buffer eviction failed\n");
++ goto out;
++ }
++
++ spin_lock(&bdev->lru_lock);
++ if (evict_mem.mm_node) {
++ drm_mm_put_block(evict_mem.mm_node);
++ evict_mem.mm_node = NULL;
++ }
++ spin_unlock(&bdev->lru_lock);
++
++ ttm_flag_masked(&bo->priv_flags, TTM_BO_PRIV_FLAG_EVICTED,
++ TTM_BO_PRIV_FLAG_EVICTED);
++
++out:
++ return ret;
++}
++
++/**
++ * Repeatedly evict memory from the LRU for @mem_type until we create enough
++ * space, or we've evicted everything and there isn't enough space.
++ */
++static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev,
++ struct ttm_mem_reg *mem,
++ uint32_t mem_type,
++ bool interruptible, bool no_wait)
++{
++ struct drm_mm_node *node;
++ struct ttm_buffer_object *entry;
++ struct ttm_mem_type_manager *man = &bdev->man[mem_type];
++ struct list_head *lru;
++ unsigned long num_pages = mem->num_pages;
++ int put_count = 0;
++ int ret;
++
++retry_pre_get:
++ ret = drm_mm_pre_get(&man->manager);
++ if (unlikely(ret != 0))
++ return ret;
++
++ spin_lock(&bdev->lru_lock);
++ do {
++ node = drm_mm_search_free(&man->manager, num_pages,
++ mem->page_alignment, 1);
++ if (node)
++ break;
++
++ lru = &man->lru;
++ if (list_empty(lru))
++ break;
++
++ entry = list_first_entry(lru, struct ttm_buffer_object, lru);
++ kref_get(&entry->list_kref);
++
++ ret = ttm_bo_reserve_locked(entry,
++ interruptible,
++ no_wait,
++ false,
++ 0);
++
++ if (likely(ret == 0))
++ put_count = ttm_bo_del_from_lru(entry);
++
++ spin_unlock(&bdev->lru_lock);
++
++ if (unlikely(ret != 0))
++ return ret;
++
++ while (put_count--)
++ kref_put(&entry->list_kref, ttm_bo_ref_bug);
++
++ mutex_lock(&entry->mutex);
++ ret = ttm_bo_evict(entry, mem_type, interruptible, no_wait);
++ mutex_unlock(&entry->mutex);
++
++ ttm_bo_unreserve(entry);
++
++ kref_put(&entry->list_kref, ttm_bo_release_list);
++ if (ret)
++ return ret;
++
++ spin_lock(&bdev->lru_lock);
++ } while (1);
++
++ if (!node) {
++ spin_unlock(&bdev->lru_lock);
++ return -ENOMEM;
++ }
++
++ node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment);
++ if (unlikely(!node)) {
++ spin_unlock(&bdev->lru_lock);
++ goto retry_pre_get;
++ }
++
++ spin_unlock(&bdev->lru_lock);
++ mem->mm_node = node;
++ mem->mem_type = mem_type;
++ return 0;
++}
++
++static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
++ bool disallow_fixed,
++ uint32_t mem_type,
++ uint32_t mask, uint32_t *res_mask)
++{
++ uint32_t cur_flags = ttm_bo_type_flags(mem_type);
++
++ if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
++ return false;
++
++ if ((cur_flags & mask & TTM_PL_MASK_MEM) == 0)
++ return false;
++
++ if ((mask & man->available_caching) == 0)
++ return false;
++ if (mask & man->default_caching)
++ cur_flags |= man->default_caching;
++ else if (mask & TTM_PL_FLAG_CACHED)
++ cur_flags |= TTM_PL_FLAG_CACHED;
++ else if (mask & TTM_PL_FLAG_WC)
++ cur_flags |= TTM_PL_FLAG_WC;
++ else
++ cur_flags |= TTM_PL_FLAG_UNCACHED;
++
++ *res_mask = cur_flags;
++ return true;
++}
++
++/**
++ * Creates space for memory region @mem according to its type.
++ *
++ * This function first searches for free space in compatible memory types in
++ * the priority order defined by the driver. If free space isn't found, then
++ * ttm_bo_mem_force_space is attempted in priority order to evict and find
++ * space.
++ */
++int ttm_bo_mem_space(struct ttm_buffer_object *bo,
++ struct ttm_mem_reg *mem, bool interruptible, bool no_wait)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ struct ttm_mem_type_manager *man;
++
++ uint32_t num_prios = bdev->driver->num_mem_type_prio;
++ const uint32_t *prios = bdev->driver->mem_type_prio;
++ uint32_t i;
++ uint32_t mem_type = TTM_PL_SYSTEM;
++ uint32_t cur_flags = 0;
++ bool type_found = false;
++ bool type_ok = false;
++ bool has_eagain = false;
++ struct drm_mm_node *node = NULL;
++ int ret;
++
++ mem->mm_node = NULL;
++ for (i = 0; i < num_prios; ++i) {
++ mem_type = prios[i];
++ man = &bdev->man[mem_type];
++
++ type_ok = ttm_bo_mt_compatible(man,
++ bo->type == ttm_bo_type_user,
++ mem_type, mem->proposed_flags,
++ &cur_flags);
++
++ if (!type_ok)
++ continue;
++
++ if (mem_type == TTM_PL_SYSTEM)
++ break;
++
++ if (man->has_type && man->use_type) {
++ type_found = true;
++ do {
++ ret = drm_mm_pre_get(&man->manager);
++ if (unlikely(ret))
++ return ret;
++
++ spin_lock(&bdev->lru_lock);
++ node = drm_mm_search_free(&man->manager,
++ mem->num_pages,
++ mem->page_alignment,
++ 1);
++ if (unlikely(!node)) {
++ spin_unlock(&bdev->lru_lock);
++ break;
++ }
++ node = drm_mm_get_block_atomic(node,
++ mem->num_pages,
++ mem->
++ page_alignment);
++ spin_unlock(&bdev->lru_lock);
++ } while (!node);
++ }
++ if (node)
++ break;
++ }
++
++ if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || node) {
++ mem->mm_node = node;
++ mem->mem_type = mem_type;
++ mem->flags = cur_flags;
++ return 0;
++ }
++
++ if (!type_found)
++ return -EINVAL;
++
++ num_prios = bdev->driver->num_mem_busy_prio;
++ prios = bdev->driver->mem_busy_prio;
++
++ for (i = 0; i < num_prios; ++i) {
++ mem_type = prios[i];
++ man = &bdev->man[mem_type];
++
++ if (!man->has_type)
++ continue;
++
++ if (!ttm_bo_mt_compatible(man,
++ bo->type == ttm_bo_type_user,
++ mem_type,
++ mem->proposed_flags, &cur_flags))
++ continue;
++
++ ret = ttm_bo_mem_force_space(bdev, mem, mem_type,
++ interruptible, no_wait);
++
++ if (ret == 0 && mem->mm_node) {
++ mem->flags = cur_flags;
++ return 0;
++ }
++
++ if (ret == -ERESTART)
++ has_eagain = true;
++ }
++
++ ret = (has_eagain) ? -ERESTART : -ENOMEM;
++ return ret;
++}
++
++/*
++ * Call bo->mutex locked.
++ * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
++ */
++
++static int ttm_bo_busy(struct ttm_buffer_object *bo)
++{
++ void *sync_obj = bo->sync_obj;
++ struct ttm_bo_driver *driver = bo->bdev->driver;
++
++ if (sync_obj) {
++ if (driver->sync_obj_signaled(sync_obj, bo->sync_obj_arg)) {
++ driver->sync_obj_unref(&bo->sync_obj);
++ bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING;
++ return 0;
++ }
++ driver->sync_obj_flush(sync_obj, bo->sync_obj_arg);
++ if (driver->sync_obj_signaled(sync_obj, bo->sync_obj_arg)) {
++ driver->sync_obj_unref(&bo->sync_obj);
++ bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING;
++ return 0;
++ }
++ return 1;
++ }
++ return 0;
++}
++
++int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
++{
++ int ret = 0;
++
++ if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
++ return -EBUSY;
++
++ ret = wait_event_interruptible(bo->event_queue,
++ atomic_read(&bo->cpu_writers) == 0);
++
++ if (ret == -ERESTARTSYS)
++ ret = -ERESTART;
++
++ return ret;
++}
++
++/*
++ * bo->mutex locked.
++ * Note that new_mem_flags are NOT transferred to the bo->mem.proposed_flags.
++ */
++
++int ttm_bo_move_buffer(struct ttm_buffer_object *bo, uint32_t new_mem_flags,
++ bool interruptible, bool no_wait)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ int ret = 0;
++ struct ttm_mem_reg mem;
++
++ BUG_ON(!atomic_read(&bo->reserved));
++
++ /*
++ * FIXME: It's possible to pipeline buffer moves.
++ * Have the driver move function wait for idle when necessary,
++ * instead of doing it here.
++ */
++
++ ttm_bo_busy(bo);
++ ret = ttm_bo_wait(bo, false, interruptible, no_wait);
++ if (ret)
++ return ret;
++
++ mem.num_pages = bo->num_pages;
++ mem.size = mem.num_pages << PAGE_SHIFT;
++ mem.proposed_flags = new_mem_flags;
++ mem.page_alignment = bo->mem.page_alignment;
++
++ /*
++ * Determine where to move the buffer.
++ */
++
++ ret = ttm_bo_mem_space(bo, &mem, interruptible, no_wait);
++ if (ret)
++ goto out_unlock;
++
++ ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
++
++out_unlock:
++ if (ret && mem.mm_node) {
++ spin_lock(&bdev->lru_lock);
++ drm_mm_put_block(mem.mm_node);
++ spin_unlock(&bdev->lru_lock);
++ }
++ return ret;
++}
++
++static int ttm_bo_mem_compat(struct ttm_mem_reg *mem)
++{
++ if ((mem->proposed_flags & mem->flags & TTM_PL_MASK_MEM) == 0)
++ return 0;
++ if ((mem->proposed_flags & mem->flags & TTM_PL_MASK_CACHING) == 0)
++ return 0;
++
++ return 1;
++}
++
++int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
++ bool interruptible, bool no_wait)
++{
++ int ret;
++
++ BUG_ON(!atomic_read(&bo->reserved));
++ bo->mem.proposed_flags = bo->proposed_flags;
++
++ TTM_DEBUG("Proposed flags 0x%08lx, Old flags 0x%08lx\n",
++ (unsigned long)bo->mem.proposed_flags,
++ (unsigned long)bo->mem.flags);
++
++ /*
++ * Check whether we need to move buffer.
++ */
++
++ if (!ttm_bo_mem_compat(&bo->mem)) {
++ ret = ttm_bo_move_buffer(bo, bo->mem.proposed_flags,
++ interruptible, no_wait);
++ if (ret) {
++ if (ret != -ERESTART)
++ printk(KERN_ERR "Failed moving buffer. "
++ "Proposed placement 0x%08x\n",
++ bo->mem.proposed_flags);
++ if (ret == -ENOMEM)
++ printk(KERN_ERR "Out of aperture space or "
++ "DRM memory quota.\n");
++ return ret;
++ }
++ }
++
++ /*
++ * We might need to add a TTM.
++ */
++
++ if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
++ ret = ttm_bo_add_ttm(bo);
++ if (ret)
++ return ret;
++ }
++ /*
++ * Validation has succeeded, move the access and other
++ * non-mapping-related flag bits from the proposed flags to
++ * the active flags
++ */
++
++ ttm_flag_masked(&bo->mem.flags, bo->proposed_flags,
++ ~TTM_PL_MASK_MEMTYPE);
++
++ return 0;
++}
++
++int
++ttm_bo_check_placement(struct ttm_buffer_object *bo,
++ uint32_t set_flags, uint32_t clr_flags)
++{
++ uint32_t new_mask = set_flags | clr_flags;
++
++ if ((bo->type == ttm_bo_type_user) &&
++ (clr_flags & TTM_PL_FLAG_CACHED)) {
++ printk(KERN_ERR
++ "User buffers require cache-coherent memory.\n");
++ return -EINVAL;
++ }
++
++ if (!capable(CAP_SYS_ADMIN)) {
++ if (new_mask & TTM_PL_FLAG_NO_EVICT) {
++ printk(KERN_ERR "Need to be root to modify"
++ " NO_EVICT status.\n");
++ return -EINVAL;
++ }
++
++ if ((clr_flags & bo->mem.flags & TTM_PL_MASK_MEMTYPE) &&
++ (bo->mem.flags & TTM_PL_FLAG_NO_EVICT)) {
++ printk(KERN_ERR "Incompatible memory specification"
++ " for NO_EVICT buffer.\n");
++ return -EINVAL;
++ }
++ }
++ return 0;
++}
++
++int ttm_buffer_object_init(struct ttm_bo_device *bdev,
++ struct ttm_buffer_object *bo,
++ unsigned long size,
++ enum ttm_bo_type type,
++ uint32_t flags,
++ uint32_t page_alignment,
++ unsigned long buffer_start,
++ bool interruptible,
++ struct file *persistant_swap_storage,
++ size_t acc_size,
++ void (*destroy) (struct ttm_buffer_object *))
++{
++ int ret = 0;
++ unsigned long num_pages;
++
++ size += buffer_start & ~PAGE_MASK;
++ num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ if (num_pages == 0) {
++ printk(KERN_ERR "Illegal buffer object size.\n");
++ return -EINVAL;
++ }
++ bo->destroy = destroy;
++
++ mutex_init(&bo->mutex);
++ mutex_lock(&bo->mutex);
++ kref_init(&bo->kref);
++ kref_init(&bo->list_kref);
++ atomic_set(&bo->cpu_writers, 0);
++ atomic_set(&bo->reserved, 1);
++ init_waitqueue_head(&bo->event_queue);
++ INIT_LIST_HEAD(&bo->lru);
++ INIT_LIST_HEAD(&bo->ddestroy);
++ INIT_LIST_HEAD(&bo->swap);
++ bo->bdev = bdev;
++ bo->type = type;
++ bo->num_pages = num_pages;
++ bo->mem.mem_type = TTM_PL_SYSTEM;
++ bo->mem.num_pages = bo->num_pages;
++ bo->mem.mm_node = NULL;
++ bo->mem.page_alignment = page_alignment;
++ bo->buffer_start = buffer_start & PAGE_MASK;
++ bo->priv_flags = 0;
++ bo->mem.flags = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
++ bo->seq_valid = false;
++ bo->persistant_swap_storage = persistant_swap_storage;
++ bo->acc_size = acc_size;
++
++ ret = ttm_bo_check_placement(bo, flags, 0ULL);
++ if (unlikely(ret != 0))
++ goto out_err;
++
++ /*
++ * If no caching attributes are set, accept any form of caching.
++ */
++
++ if ((flags & TTM_PL_MASK_CACHING) == 0)
++ flags |= TTM_PL_MASK_CACHING;
++
++ bo->proposed_flags = flags;
++ bo->mem.proposed_flags = flags;
++
++ /*
++ * For ttm_bo_type_device buffers, allocate
++ * address space from the device.
++ */
++
++ if (bo->type == ttm_bo_type_device) {
++ ret = ttm_bo_setup_vm(bo);
++ if (ret)
++ goto out_err;
++ }
++
++ ret = ttm_buffer_object_validate(bo, interruptible, false);
++ if (ret)
++ goto out_err;
++
++ mutex_unlock(&bo->mutex);
++ ttm_bo_unreserve(bo);
++ return 0;
++
++out_err:
++ mutex_unlock(&bo->mutex);
++ ttm_bo_unreserve(bo);
++ ttm_bo_unref(&bo);
++
++ return ret;
++}
++
++static inline size_t ttm_bo_size(struct ttm_bo_device *bdev,
++ unsigned long num_pages)
++{
++ size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
++ PAGE_MASK;
++
++ return bdev->ttm_bo_size + 2 * page_array_size;
++}
++
++int ttm_buffer_object_create(struct ttm_bo_device *bdev,
++ unsigned long size,
++ enum ttm_bo_type type,
++ uint32_t flags,
++ uint32_t page_alignment,
++ unsigned long buffer_start,
++ bool interruptible,
++ struct file *persistant_swap_storage,
++ struct ttm_buffer_object **p_bo)
++{
++ struct ttm_buffer_object *bo;
++ int ret;
++ struct ttm_mem_global *mem_glob = bdev->mem_glob;
++
++ size_t acc_size =
++ ttm_bo_size(bdev, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
++ ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false, false);
++ if (unlikely(ret != 0))
++ return ret;
++
++ bo = kzalloc(sizeof(*bo), GFP_KERNEL);
++
++ if (unlikely(bo == NULL)) {
++ ttm_mem_global_free(mem_glob, acc_size, false);
++ return -ENOMEM;
++ }
++
++ ret = ttm_buffer_object_init(bdev, bo, size, type, flags,
++ page_alignment, buffer_start,
++ interruptible,
++ persistant_swap_storage, acc_size, NULL);
++ if (likely(ret == 0))
++ *p_bo = bo;
++
++ return ret;
++}
++
++static int ttm_bo_leave_list(struct ttm_buffer_object *bo,
++ uint32_t mem_type, bool allow_errors)
++{
++ int ret;
++
++ mutex_lock(&bo->mutex);
++
++ ret = ttm_bo_expire_sync_obj(bo, allow_errors);
++ if (ret)
++ goto out;
++
++ if (bo->mem.mem_type == mem_type)
++ ret = ttm_bo_evict(bo, mem_type, false, false);
++
++ if (ret) {
++ if (allow_errors)
++ goto out;
++ else {
++ ret = 0;
++ printk(KERN_ERR "Cleanup eviction failed\n");
++ }
++ }
++
++out:
++ mutex_unlock(&bo->mutex);
++ return ret;
++}
++
++static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
++ struct list_head *head,
++ unsigned mem_type, bool allow_errors)
++{
++ struct ttm_buffer_object *entry;
++ int ret;
++ int put_count;
++
++ /*
++ * Can't use standard list traversal since we're unlocking.
++ */
++
++ spin_lock(&bdev->lru_lock);
++
++ while (!list_empty(head)) {
++ entry = list_first_entry(head, struct ttm_buffer_object, lru);
++ kref_get(&entry->list_kref);
++ ret = ttm_bo_reserve_locked(entry, false, false, false, 0);
++ put_count = ttm_bo_del_from_lru(entry);
++ spin_unlock(&bdev->lru_lock);
++ while (put_count--)
++ kref_put(&entry->list_kref, ttm_bo_ref_bug);
++ BUG_ON(ret);
++ ret = ttm_bo_leave_list(entry, mem_type, allow_errors);
++ ttm_bo_unreserve(entry);
++ kref_put(&entry->list_kref, ttm_bo_release_list);
++ spin_lock(&bdev->lru_lock);
++ }
++
++ spin_unlock(&bdev->lru_lock);
++
++ return 0;
++}
++
++int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
++{
++ struct ttm_mem_type_manager *man = &bdev->man[mem_type];
++ int ret = -EINVAL;
++
++ if (mem_type >= TTM_NUM_MEM_TYPES) {
++ printk(KERN_ERR "Illegal memory type %d\n", mem_type);
++ return ret;
++ }
++
++ if (!man->has_type) {
++ printk(KERN_ERR "Trying to take down uninitialized "
++ "memory manager type %u\n", mem_type);
++ return ret;
++ }
++
++ man->use_type = false;
++ man->has_type = false;
++
++ ret = 0;
++ if (mem_type > 0) {
++ ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false);
++
++ spin_lock(&bdev->lru_lock);
++ if (drm_mm_clean(&man->manager))
++ drm_mm_takedown(&man->manager);
++ else
++ ret = -EBUSY;
++ spin_unlock(&bdev->lru_lock);
++ }
++
++ return ret;
++}
++
++int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
++{
++ struct ttm_mem_type_manager *man = &bdev->man[mem_type];
++
++ if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
++ printk(KERN_ERR "Illegal memory manager memory type %u.\n",
++ mem_type);
++ return -EINVAL;
++ }
++
++ if (!man->has_type) {
++ printk(KERN_ERR "Memory type %u has not been initialized.\n",
++ mem_type);
++ return 0;
++ }
++
++ return ttm_bo_force_list_clean(bdev, &man->lru, mem_type, true);
++}
++
++int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
++ unsigned long p_offset, unsigned long p_size)
++{
++ int ret = -EINVAL;
++ struct ttm_mem_type_manager *man;
++
++ if (type >= TTM_NUM_MEM_TYPES) {
++ printk(KERN_ERR "Illegal memory type %d\n", type);
++ return ret;
++ }
++
++ man = &bdev->man[type];
++ if (man->has_type) {
++ printk(KERN_ERR
++ "Memory manager already initialized for type %d\n",
++ type);
++ return ret;
++ }
++
++ ret = bdev->driver->init_mem_type(bdev, type, man);
++ if (ret)
++ return ret;
++
++ ret = 0;
++ if (type != TTM_PL_SYSTEM) {
++ if (!p_size) {
++ printk(KERN_ERR "Zero size memory manager type %d\n",
++ type);
++ return ret;
++ }
++ ret = drm_mm_init(&man->manager, p_offset, p_size);
++ if (ret)
++ return ret;
++ }
++ man->has_type = true;
++ man->use_type = true;
++ man->size = p_size;
++
++ INIT_LIST_HEAD(&man->lru);
++
++ return 0;
++}
++
++int ttm_bo_device_release(struct ttm_bo_device *bdev)
++{
++ int ret = 0;
++ unsigned i = TTM_NUM_MEM_TYPES;
++ struct ttm_mem_type_manager *man;
++
++ while (i--) {
++ man = &bdev->man[i];
++ if (man->has_type) {
++ man->use_type = false;
++ if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
++ ret = -EBUSY;
++ printk(KERN_ERR "DRM memory manager type %d "
++ "is not clean.\n", i);
++ }
++ man->has_type = false;
++ }
++ }
++
++ if (!cancel_delayed_work(&bdev->wq))
++ flush_scheduled_work();
++
++ while (ttm_bo_delayed_delete(bdev, true)) {
++ /* Don't you know you have to do */
++ /* something here otherwise checkpatch will */
++ /* give you error */
++ }
++
++
++ spin_lock(&bdev->lru_lock);
++ if (list_empty(&bdev->ddestroy))
++ TTM_DEBUG("Delayed destroy list was clean\n");
++
++ if (list_empty(&bdev->man[0].lru))
++ TTM_DEBUG("Swap list was clean\n");
++ spin_unlock(&bdev->lru_lock);
++
++ ttm_mem_unregister_shrink(bdev->mem_glob, &bdev->shrink);
++ BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
++ write_lock(&bdev->vm_lock);
++ drm_mm_takedown(&bdev->addr_space_mm);
++ write_unlock(&bdev->vm_lock);
++
++ __free_page(bdev->dummy_read_page);
++ return ret;
++}
++
++/*
++ * This function is intended to be called on drm driver load.
++ * If you decide to call it from firstopen, you must protect the call
++ * from a potentially racing ttm_bo_driver_finish in lastclose.
++ * (This may happen on X server restart).
++ */
++
++int ttm_bo_device_init(struct ttm_bo_device *bdev,
++ struct ttm_mem_global *mem_glob,
++ struct ttm_bo_driver *driver, uint64_t file_page_offset)
++{
++ int ret = -EINVAL;
++
++ bdev->dummy_read_page = NULL;
++ rwlock_init(&bdev->vm_lock);
++ spin_lock_init(&bdev->lru_lock);
++
++ bdev->driver = driver;
++ bdev->mem_glob = mem_glob;
++
++ memset(bdev->man, 0, sizeof(bdev->man));
++
++ bdev->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
++ if (unlikely(bdev->dummy_read_page == NULL)) {
++ ret = -ENOMEM;
++ goto out_err0;
++ }
++
++ /*
++ * Initialize the system memory buffer type.
++ * Other types need to be driver / IOCTL initialized.
++ */
++ ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0);
++ if (unlikely(ret != 0))
++ goto out_err1;
++
++ bdev->addr_space_rb = RB_ROOT;
++ ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
++ if (unlikely(ret != 0))
++ goto out_err2;
++
++ INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
++ bdev->nice_mode = true;
++ INIT_LIST_HEAD(&bdev->ddestroy);
++ INIT_LIST_HEAD(&bdev->swap_lru);
++ bdev->dev_mapping = NULL;
++ ttm_mem_init_shrink(&bdev->shrink, ttm_bo_swapout);
++ ret = ttm_mem_register_shrink(mem_glob, &bdev->shrink);
++ if (unlikely(ret != 0)) {
++ printk(KERN_ERR "Could not register buffer object swapout.\n");
++ goto out_err2;
++ }
++ return 0;
++out_err2:
++ ttm_bo_clean_mm(bdev, 0);
++out_err1:
++ __free_page(bdev->dummy_read_page);
++out_err0:
++ return ret;
++}
++
++/*
++ * buffer object vm functions.
++ */
++
++bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
++{
++ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
++
++ if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
++ if (mem->mem_type == TTM_PL_SYSTEM)
++ return false;
++
++ if (man->flags & TTM_MEMTYPE_FLAG_CMA)
++ return false;
++
++ if (mem->flags & TTM_PL_FLAG_CACHED)
++ return false;
++ }
++ return true;
++}
++
++int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
++ struct ttm_mem_reg *mem,
++ unsigned long *bus_base,
++ unsigned long *bus_offset, unsigned long *bus_size)
++{
++ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
++
++ *bus_size = 0;
++ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
++ return -EINVAL;
++
++ if (ttm_mem_reg_is_pci(bdev, mem)) {
++ *bus_offset = mem->mm_node->start << PAGE_SHIFT;
++ *bus_size = mem->num_pages << PAGE_SHIFT;
++ *bus_base = man->io_offset;
++ }
++
++ return 0;
++}
++
++/**
++ * \c Kill all user-space virtual mappings of this buffer object.
++ *
++ * \param bo The buffer object.
++ *
++ * Call bo->mutex locked.
++ */
++
++void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ loff_t offset = (loff_t) bo->addr_space_offset;
++ loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
++
++ if (!bdev->dev_mapping)
++ return;
++
++ unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
++}
++
++static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ struct rb_node **cur = &bdev->addr_space_rb.rb_node;
++ struct rb_node *parent = NULL;
++ struct ttm_buffer_object *cur_bo;
++ unsigned long offset = bo->vm_node->start;
++ unsigned long cur_offset;
++
++ while (*cur) {
++ parent = *cur;
++ cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
++ cur_offset = cur_bo->vm_node->start;
++ if (offset < cur_offset)
++ cur = &parent->rb_left;
++ else if (offset > cur_offset)
++ cur = &parent->rb_right;
++ else
++ BUG();
++ }
++
++ rb_link_node(&bo->vm_rb, parent, cur);
++ rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
++}
++
++/**
++ * ttm_bo_setup_vm:
++ *
++ * @bo: the buffer to allocate address space for
++ *
++ * Allocate address space in the drm device so that applications
++ * can mmap the buffer and access the contents. This only
++ * applies to ttm_bo_type_device objects as others are not
++ * placed in the drm device address space.
++ */
++
++static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ int ret;
++
++retry_pre_get:
++ ret = drm_mm_pre_get(&bdev->addr_space_mm);
++ if (unlikely(ret != 0))
++ return ret;
++
++ write_lock(&bdev->vm_lock);
++ bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
++ bo->mem.num_pages, 0, 0);
++
++ if (unlikely(bo->vm_node == NULL)) {
++ ret = -ENOMEM;
++ goto out_unlock;
++ }
++
++ bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
++ bo->mem.num_pages, 0);
++
++ if (unlikely(bo->vm_node == NULL)) {
++ write_unlock(&bdev->vm_lock);
++ goto retry_pre_get;
++ }
++
++ ttm_bo_vm_insert_rb(bo);
++ write_unlock(&bdev->vm_lock);
++ bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
++
++ return 0;
++out_unlock:
++ write_unlock(&bdev->vm_lock);
++ return ret;
++}
++
++int ttm_bo_wait(struct ttm_buffer_object *bo,
++ bool lazy, bool interruptible, bool no_wait)
++{
++ struct ttm_bo_driver *driver = bo->bdev->driver;
++ void *sync_obj;
++ void *sync_obj_arg;
++ int ret = 0;
++
++ while (bo->sync_obj) {
++ if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) {
++ driver->sync_obj_unref(&bo->sync_obj);
++ bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING;
++ goto out;
++ }
++ if (no_wait) {
++ ret = -EBUSY;
++ goto out;
++ }
++ sync_obj = driver->sync_obj_ref(bo->sync_obj);
++ sync_obj_arg = bo->sync_obj_arg;
++ mutex_unlock(&bo->mutex);
++ ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
++ lazy, interruptible);
++
++ mutex_lock(&bo->mutex);
++ if (unlikely(ret != 0)) {
++ driver->sync_obj_unref(&sync_obj);
++ return ret;
++ }
++
++ if (bo->sync_obj == sync_obj) {
++ driver->sync_obj_unref(&bo->sync_obj);
++ bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING;
++ }
++ driver->sync_obj_unref(&sync_obj);
++ }
++out:
++ return 0;
++}
++
++void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo)
++{
++ atomic_set(&bo->reserved, 0);
++ wake_up_all(&bo->event_queue);
++}
++
++int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible,
++ bool no_wait)
++{
++ int ret;
++
++ while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
++ if (no_wait)
++ return -EBUSY;
++ else if (interruptible) {
++ ret = wait_event_interruptible
++ (bo->event_queue, atomic_read(&bo->reserved) == 0);
++ if (unlikely(ret != 0))
++ return -ERESTART;
++ } else {
++ wait_event(bo->event_queue,
++ atomic_read(&bo->reserved) == 0);
++ }
++ }
++ return 0;
++}
++
++int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
++{
++ int ret = 0;
++
++ /*
++ * Using ttm_bo_reserve instead of ttm_bo_block_reservation
++ * makes sure the lru lists are updated.
++ */
++
++ ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
++ if (unlikely(ret != 0))
++ return ret;
++ mutex_lock(&bo->mutex);
++ ret = ttm_bo_wait(bo, false, true, no_wait);
++ if (unlikely(ret != 0))
++ goto out_err0;
++ atomic_inc(&bo->cpu_writers);
++out_err0:
++ mutex_unlock(&bo->mutex);
++ ttm_bo_unreserve(bo);
++ return ret;
++}
++
++void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
++{
++ if (atomic_dec_and_test(&bo->cpu_writers))
++ wake_up_all(&bo->event_queue);
++}
++
++/**
++ * A buffer object shrink method that tries to swap out the first
++ * buffer object on the bo_global::swap_lru list.
++ */
++
++static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
++{
++ struct ttm_bo_device *bdev =
++ container_of(shrink, struct ttm_bo_device, shrink);
++ struct ttm_buffer_object *bo;
++ int ret = -EBUSY;
++ int put_count;
++ uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
++
++ spin_lock(&bdev->lru_lock);
++ while (ret == -EBUSY) {
++ if (unlikely(list_empty(&bdev->swap_lru))) {
++ spin_unlock(&bdev->lru_lock);
++ return -EBUSY;
++ }
++
++ bo = list_first_entry(&bdev->swap_lru,
++ struct ttm_buffer_object, swap);
++ kref_get(&bo->list_kref);
++
++ /**
++ * Reserve buffer. Since we unlock while sleeping, we need
++ * to re-check that nobody removed us from the swap-list while
++ * we slept.
++ */
++
++ ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
++ if (unlikely(ret == -EBUSY)) {
++ spin_unlock(&bdev->lru_lock);
++ ttm_bo_wait_unreserved(bo, false);
++ kref_put(&bo->list_kref, ttm_bo_release_list);
++ spin_lock(&bdev->lru_lock);
++ }
++ }
++
++ BUG_ON(ret != 0);
++ put_count = ttm_bo_del_from_lru(bo);
++ spin_unlock(&bdev->lru_lock);
++
++ while (put_count--)
++ kref_put(&bo->list_kref, ttm_bo_ref_bug);
++
++ /**
++ * Wait for GPU, then move to system cached.
++ */
++
++ mutex_lock(&bo->mutex);
++ ret = ttm_bo_wait(bo, false, false, false);
++ if (unlikely(ret != 0))
++ goto out;
++
++ if ((bo->mem.flags & swap_placement) != swap_placement) {
++ struct ttm_mem_reg evict_mem;
++
++ evict_mem = bo->mem;
++ evict_mem.mm_node = NULL;
++ evict_mem.proposed_flags =
++ TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
++ evict_mem.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
++ evict_mem.mem_type = TTM_PL_SYSTEM;
++
++ ret = ttm_bo_handle_move_mem(bo,
++ &evict_mem,
++ true,
++ false,
++ false);
++ if (unlikely(ret != 0))
++ goto out;
++ }
++
++ ttm_bo_unmap_virtual(bo);
++
++ /**
++ * Swap out. Buffer will be swapped in again as soon as
++ * anyone tries to access a ttm page.
++ */
++
++ ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage);
++out:
++ mutex_unlock(&bo->mutex);
++
++ /**
++ *
++ * Unreserve without putting on LRU to avoid swapping out an
++ * already swapped buffer.
++ */
++
++ atomic_set(&bo->reserved, 0);
++ wake_up_all(&bo->event_queue);
++ kref_put(&bo->list_kref, ttm_bo_release_list);
++ return ret;
++}
++
++void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
++{
++ while (ttm_bo_swapout(&bdev->shrink) == 0) {
++ /* Checkpatch doesn't like it */
++ /* adding something here */
++ }
++}
+--- /dev/null
++++ b/drivers/staging/mrst/drv/ttm/ttm_bo_api.h
+@@ -0,0 +1,573 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++
++#ifndef _TTM_BO_API_H_
++#define _TTM_BO_API_H_
++
++#include <drm/drm_hashtab.h>
++#include <linux/kref.h>
++#include <linux/list.h>
++#include <linux/wait.h>
++#include <linux/mutex.h>
++#include <linux/mm.h>
++#include <linux/rbtree.h>
++
++struct ttm_bo_device;
++
++struct drm_mm_node;
++
++/**
++ * struct ttm_mem_reg
++ *
++ * @mm_node: Memory manager node.
++ * @size: Requested size of memory region.
++ * @num_pages: Actual size of memory region in pages.
++ * @page_alignment: Page alignment.
++ * @flags: Placement flags.
++ * @proposed_flags: Proposed placement flags.
++ *
++ * Structure indicating the placement and space resources used by a
++ * buffer object.
++ */
++
++struct ttm_mem_reg {
++ struct drm_mm_node *mm_node;
++ unsigned long size;
++ unsigned long num_pages;
++ uint32_t page_alignment;
++ uint32_t mem_type;
++ uint32_t flags;
++ uint32_t proposed_flags;
++};
++
++/**
++ * enum ttm_bo_type
++ *
++ * @ttm_bo_type_device: These are 'normal' buffers that can
++ * be mmapped by user space. Each of these bos occupy a slot in the
++ * device address space, that can be used for normal vm operations.
++ *
++ * @ttm_bo_type_user: These are user-space memory areas that are made
++ * available to the GPU by mapping the buffer pages into the GPU aperture
++ * space. These buffers cannot be mmaped from the device address space.
++ *
++ * @ttm_bo_type_kernel: These buffers are like ttm_bo_type_device buffers,
++ * but they cannot be accessed from user-space. For kernel-only use.
++ */
++
++enum ttm_bo_type {
++ ttm_bo_type_device,
++ ttm_bo_type_user,
++ ttm_bo_type_kernel
++};
++
++struct ttm_tt;
++
++/**
++ * struct ttm_buffer_object
++ *
++ * @bdev: Pointer to the buffer object device structure.
++ * @kref: Reference count of this buffer object. When this refcount reaches
++ * zero, the object is put on the delayed delete list.
++ * @list_kref: List reference count of this buffer object. This member is
++ * used to avoid destruction while the buffer object is still on a list.
++ * Lru lists may keep one refcount, the delayed delete list, and kref != 0
++ * keeps one refcount. When this refcount reaches zero,
++ * the object is destroyed.
++ * @proposed_flags: Proposed placement for the buffer. Changed only by the
++ * creator prior to validation as opposed to bo->mem.proposed_flags which is
++ * changed by the implementation prior to a buffer move if it wants to outsmart
++ * the buffer creator / user. This latter happens, for example, at eviction.
++ * @buffer_start: The virtual user-space start address of ttm_bo_type_user
++ * buffers.
++ * @type: The bo type.
++ * @offset: The current GPU offset, which can have different meanings
++ * depending on the memory type. For SYSTEM type memory, it should be 0.
++ * @mem: structure describing current placement.
++ * @val_seq: Sequence of the validation holding the @reserved lock.
++ * Used to avoid starvation when many processes compete to validate the
++ * buffer. This member is protected by the bo_device::lru_lock.
++ * @seq_valid: The value of @val_seq is valid. This value is protected by
++ * the bo_device::lru_lock.
++ * @lru: List head for the lru list.
++ * @ddestroy: List head for the delayed destroy list.
++ * @swap: List head for swap LRU list.
++ * @persistant_swap_storage: Usually the swap storage is deleted for buffers
++ * pinned in physical memory. If this behaviour is not desired, this member
++ * holds a pointer to a persistant shmem object.
++ * @destroy: Destruction function. If NULL, kfree is used.
++ * @sync_obj_arg: Opaque argument to synchronization object function.
++ * @sync_obj: Pointer to a synchronization object.
++ * @priv_flags: Flags describing buffer object internal state.
++ * @event_queue: Queue for processes waiting on buffer object status change.
++ * @mutex: Lock protecting all members with the exception of constant members
++ * and list heads. We should really use a spinlock here.
++ * @num_pages: Actual number of pages.
++ * @ttm: TTM structure holding system pages.
++ * @vm_hash: Hash item for fast address space lookup. Need to change to a
++ * rb-tree node.
++ * @vm_node: Address space manager node.
++ * @addr_space_offset: Address space offset.
++ * @cpu_writes: For synchronization. Number of cpu writers.
++ * @reserved: Deadlock-free lock used for synchronization state transitions.
++ * @acc_size: Accounted size for this object.
++ *
++ * Base class for TTM buffer object, that deals with data placement and CPU
++ * mappings. GPU mappings are really up to the driver, but for simpler GPUs
++ * the driver can usually use the placement offset @offset directly as the
++ * GPU virtual address. For drivers implementing multiple
++ * GPU memory manager contexts, the driver should manage the address space
++ * in these contexts separately and use these objects to get the correct
++ * placement and caching for these GPU maps. This makes it possible to use
++ * these objects for even quite elaborate memory management schemes.
++ * The destroy member, the API visibility of this object makes it possible
++ * to derive driver specific types.
++ */
++
++struct ttm_buffer_object {
++ struct ttm_bo_device *bdev;
++ struct kref kref;
++ struct kref list_kref;
++
++ /*
++ * If there is a possibility that the usage variable is zero,
++ * then dev->struct_mutex should be locked before incrementing it.
++ */
++
++ uint32_t proposed_flags;
++ unsigned long buffer_start;
++ enum ttm_bo_type type;
++ unsigned long offset;
++ struct ttm_mem_reg mem;
++ uint32_t val_seq;
++ bool seq_valid;
++
++ struct list_head lru;
++ struct list_head ddestroy;
++ struct list_head swap;
++
++ struct file *persistant_swap_storage;
++
++ void (*destroy) (struct ttm_buffer_object *);
++
++ void *sync_obj_arg;
++ void *sync_obj;
++
++ uint32_t priv_flags;
++ wait_queue_head_t event_queue;
++ struct mutex mutex;
++ unsigned long num_pages;
++
++ struct ttm_tt *ttm;
++ struct rb_node vm_rb;
++ struct drm_mm_node *vm_node;
++ uint64_t addr_space_offset;
++
++ atomic_t cpu_writers;
++ atomic_t reserved;
++
++ size_t acc_size;
++};
++
++/**
++ * struct ttm_bo_kmap_obj
++ *
++ * @virtual: The current kernel virtual address.
++ * @page: The page when kmap'ing a single page.
++ * @bo_kmap_type: Type of bo_kmap.
++ *
++ * Object describing a kernel mapping. Since a TTM bo may be located
++ * in various memory types with various caching policies, the
++ * mapping can either be an ioremap, a vmap, a kmap or part of a
++ * premapped region.
++ */
++
++struct ttm_bo_kmap_obj {
++ void *virtual;
++ struct page *page;
++ enum {
++ ttm_bo_map_iomap,
++ ttm_bo_map_vmap,
++ ttm_bo_map_kmap,
++ ttm_bo_map_premapped,
++ } bo_kmap_type;
++};
++
++/**
++ * ttm_bo_reference - reference a struct ttm_buffer_object
++ *
++ * @bo: The buffer object.
++ *
++ * Returns a refcounted pointer to a buffer object.
++ */
++
++static inline struct ttm_buffer_object *ttm_bo_reference(
++ struct ttm_buffer_object *bo)
++{
++ kref_get(&bo->kref);
++ return bo;
++}
++
++/**
++ * ttm_bo_wait - wait for buffer idle.
++ *
++ * @bo: The buffer object.
++ * @interruptible: Use interruptible wait.
++ * @no_wait: Return immediately if buffer is busy.
++ *
++ * This function must be called with the bo::mutex held, and makes
++ * sure any previous rendering to the buffer is completed.
++ * Note: It might be necessary to block validations before the
++ * wait by reserving the buffer.
++ * Returns -EBUSY if no_wait is true and the buffer is busy.
++ * Returns -ERESTART if interrupted by a signal.
++ */
++extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
++ bool interruptible, bool no_wait);
++/**
++ * ttm_buffer_object_validate
++ *
++ * @bo: The buffer object.
++ * @interruptible: Sleep interruptible if sleeping.
++ * @no_wait: Return immediately if the buffer is busy.
++ *
++ * Changes placement and caching policy of the buffer object
++ * according to bo::proposed_flags.
++ * Returns
++ * -EINVAL on invalid proposed_flags.
++ * -ENOMEM on out-of-memory condition.
++ * -EBUSY if no_wait is true and buffer busy.
++ * -ERESTART if interrupted by a signal.
++ */
++extern int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
++ bool interruptible, bool no_wait);
++/**
++ * ttm_bo_unref
++ *
++ * @bo: The buffer object.
++ *
++ * Unreference and clear a pointer to a buffer object.
++ */
++extern void ttm_bo_unref(struct ttm_buffer_object **bo);
++
++/**
++ * ttm_bo_synccpu_write_grab
++ *
++ * @bo: The buffer object:
++ * @no_wait: Return immediately if buffer is busy.
++ *
++ * Synchronizes a buffer object for CPU RW access. This means
++ * blocking command submission that affects the buffer and
++ * waiting for buffer idle. This lock is recursive.
++ * Returns
++ * -EBUSY if the buffer is busy and no_wait is true.
++ * -ERESTART if interrupted by a signal.
++ */
++
++extern int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo,
++ bool no_wait);
++/**
++ * ttm_bo_synccpu_write_release:
++ *
++ * @bo : The buffer object.
++ *
++ * Releases a synccpu lock.
++ */
++extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
++
++/**
++ * ttm_buffer_object_init
++ *
++ * @bdev: Pointer to a ttm_bo_device struct.
++ * @bo: Pointer to a ttm_buffer_object to be initialized.
++ * @size: Requested size of buffer object.
++ * @type: Requested type of buffer object.
++ * @flags: Initial placement flags.
++ * @page_alignment: Data alignment in pages.
++ * @buffer_start: Virtual address of user space data backing a
++ * user buffer object.
++ * @interruptible: If needing to sleep to wait for GPU resources,
++ * sleep interruptible.
++ * @persistant_swap_storage: Usually the swap storage is deleted for buffers
++ * pinned in physical memory. If this behaviour is not desired, this member
++ * holds a pointer to a persistant shmem object. Typically, this would
++ * point to the shmem object backing a GEM object if TTM is used to back a
++ * GEM user interface.
++ * @acc_size: Accounted size for this object.
++ * @destroy: Destroy function. Use NULL for kfree().
++ *
++ * This function initializes a pre-allocated struct ttm_buffer_object.
++ * As this object may be part of a larger structure, this function,
++ * together with the @destroy function,
++ * enables driver-specific objects derived from a ttm_buffer_object.
++ * On successful return, the object kref and list_kref are set to 1.
++ * Returns
++ * -ENOMEM: Out of memory.
++ * -EINVAL: Invalid placement flags.
++ * -ERESTART: Interrupted by signal while sleeping waiting for resources.
++ */
++
++extern int ttm_buffer_object_init(struct ttm_bo_device *bdev,
++ struct ttm_buffer_object *bo,
++ unsigned long size,
++ enum ttm_bo_type type,
++ uint32_t flags,
++ uint32_t page_alignment,
++ unsigned long buffer_start,
++ bool interrubtible,
++ struct file *persistant_swap_storage,
++ size_t acc_size,
++ void (*destroy) (struct ttm_buffer_object *));
++/**
++ * ttm_bo_synccpu_object_init
++ *
++ * @bdev: Pointer to a ttm_bo_device struct.
++ * @bo: Pointer to a ttm_buffer_object to be initialized.
++ * @size: Requested size of buffer object.
++ * @type: Requested type of buffer object.
++ * @flags: Initial placement flags.
++ * @page_alignment: Data alignment in pages.
++ * @buffer_start: Virtual address of user space data backing a
++ * user buffer object.
++ * @interruptible: If needing to sleep while waiting for GPU resources,
++ * sleep interruptible.
++ * @persistant_swap_storage: Usually the swap storage is deleted for buffers
++ * pinned in physical memory. If this behaviour is not desired, this member
++ * holds a pointer to a persistant shmem object. Typically, this would
++ * point to the shmem object backing a GEM object if TTM is used to back a
++ * GEM user interface.
++ * @p_bo: On successful completion *p_bo points to the created object.
++ *
++ * This function allocates a ttm_buffer_object, and then calls
++ * ttm_buffer_object_init on that object.
++ * The destroy function is set to kfree().
++ * Returns
++ * -ENOMEM: Out of memory.
++ * -EINVAL: Invalid placement flags.
++ * -ERESTART: Interrupted by signal while waiting for resources.
++ */
++
++extern int ttm_buffer_object_create(struct ttm_bo_device *bdev,
++ unsigned long size,
++ enum ttm_bo_type type,
++ uint32_t flags,
++ uint32_t page_alignment,
++ unsigned long buffer_start,
++ bool interruptible,
++ struct file *persistant_swap_storage,
++ struct ttm_buffer_object **p_bo);
++
++/**
++ * ttm_bo_check_placement
++ *
++ * @bo: the buffer object.
++ * @set_flags: placement flags to set.
++ * @clr_flags: placement flags to clear.
++ *
++ * Performs minimal validity checking on an intended change of
++ * placement flags.
++ * Returns
++ * -EINVAL: Intended change is invalid or not allowed.
++ */
++
++extern int ttm_bo_check_placement(struct ttm_buffer_object *bo,
++ uint32_t set_flags, uint32_t clr_flags);
++
++/**
++ * ttm_bo_init_mm
++ *
++ * @bdev: Pointer to a ttm_bo_device struct.
++ * @mem_type: The memory type.
++ * @p_offset: offset for managed area in pages.
++ * @p_size: size managed area in pages.
++ *
++ * Initialize a manager for a given memory type.
++ * Note: if part of driver firstopen, it must be protected from a
++ * potentially racing lastclose.
++ * Returns:
++ * -EINVAL: invalid size or memory type.
++ * -ENOMEM: Not enough memory.
++ * May also return driver-specified errors.
++ */
++
++extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
++ unsigned long p_offset, unsigned long p_size);
++/**
++ * ttm_bo_clean_mm
++ *
++ * @bdev: Pointer to a ttm_bo_device struct.
++ * @mem_type: The memory type.
++ *
++ * Take down a manager for a given memory type after first walking
++ * the LRU list to evict any buffers left alive.
++ *
++ * Normally, this function is part of lastclose() or unload(), and at that
++ * point there shouldn't be any buffers left created by user-space, since
++ * there should've been removed by the file descriptor release() method.
++ * However, before this function is run, make sure to signal all sync objects,
++ * and verify that the delayed delete queue is empty. The driver must also
++ * make sure that there are no NO_EVICT buffers present in this memory type
++ * when the call is made.
++ *
++ * If this function is part of a VT switch, the caller must make sure that
++ * there are no appications currently validating buffers before this
++ * function is called. The caller can do that by first taking the
++ * struct ttm_bo_device::ttm_lock in write mode.
++ *
++ * Returns:
++ * -EINVAL: invalid or uninitialized memory type.
++ * -EBUSY: There are still buffers left in this memory type.
++ */
++
++extern int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type);
++
++/**
++ * ttm_bo_evict_mm
++ *
++ * @bdev: Pointer to a ttm_bo_device struct.
++ * @mem_type: The memory type.
++ *
++ * Evicts all buffers on the lru list of the memory type.
++ * This is normally part of a VT switch or an
++ * out-of-memory-space-due-to-fragmentation handler.
++ * The caller must make sure that there are no other processes
++ * currently validating buffers, and can do that by taking the
++ * struct ttm_bo_device::ttm_lock in write mode.
++ *
++ * Returns:
++ * -EINVAL: Invalid or uninitialized memory type.
++ * -ERESTART: The call was interrupted by a signal while waiting to
++ * evict a buffer.
++ */
++
++extern int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type);
++
++/**
++ * ttm_kmap_obj_virtual
++ *
++ * @map: A struct ttm_bo_kmap_obj returned from ttm_bo_kmap.
++ * @is_iomem: Pointer to an integer that on return indicates 1 if the
++ * virtual map is io memory, 0 if normal memory.
++ *
++ * Returns the virtual address of a buffer object area mapped by ttm_bo_kmap.
++ * If *is_iomem is 1 on return, the virtual address points to an io memory area,
++ * that should strictly be accessed by the iowriteXX() and similar functions.
++ */
++
++static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map,
++ bool *is_iomem)
++{
++ *is_iomem = (map->bo_kmap_type == ttm_bo_map_iomap ||
++ map->bo_kmap_type == ttm_bo_map_premapped);
++ return map->virtual;
++}
++
++/**
++ * ttm_bo_kmap
++ *
++ * @bo: The buffer object.
++ * @start_page: The first page to map.
++ * @num_pages: Number of pages to map.
++ * @map: pointer to a struct ttm_bo_kmap_obj representing the map.
++ *
++ * Sets up a kernel virtual mapping, using ioremap, vmap or kmap to the
++ * data in the buffer object. The ttm_kmap_obj_virtual function can then be
++ * used to obtain a virtual address to the data.
++ *
++ * Returns
++ * -ENOMEM: Out of memory.
++ * -EINVAL: Invalid range.
++ */
++
++extern int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page,
++ unsigned long num_pages, struct ttm_bo_kmap_obj *map);
++
++/**
++ * ttm_bo_kunmap
++ *
++ * @map: Object describing the map to unmap.
++ *
++ * Unmaps a kernel map set up by ttm_bo_kmap.
++ */
++
++extern void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
++
++#if 0
++#endif
++
++/**
++ * ttm_fbdev_mmap - mmap fbdev memory backed by a ttm buffer object.
++ *
++ * @vma: vma as input from the fbdev mmap method.
++ * @bo: The bo backing the address space. The address space will
++ * have the same size as the bo, and start at offset 0.
++ *
++ * This function is intended to be called by the fbdev mmap method
++ * if the fbdev address space is to be backed by a bo.
++ */
++
++extern int ttm_fbdev_mmap(struct vm_area_struct *vma,
++ struct ttm_buffer_object *bo);
++
++/**
++ * ttm_bo_mmap - mmap out of the ttm device address space.
++ *
++ * @filp: filp as input from the mmap method.
++ * @vma: vma as input from the mmap method.
++ * @bdev: Pointer to the ttm_bo_device with the address space manager.
++ *
++ * This function is intended to be called by the device mmap method.
++ * if the device address space is to be backed by the bo manager.
++ */
++
++extern int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
++ struct ttm_bo_device *bdev);
++
++/**
++ * ttm_bo_io
++ *
++ * @bdev: Pointer to the struct ttm_bo_device.
++ * @filp: Pointer to the struct file attempting to read / write.
++ * @wbuf: User-space pointer to address of buffer to write. NULL on read.
++ * @rbuf: User-space pointer to address of buffer to read into.
++ * Null on write.
++ * @count: Number of bytes to read / write.
++ * @f_pos: Pointer to current file position.
++ * @write: 1 for read, 0 for write.
++ *
++ * This function implements read / write into ttm buffer objects, and is
++ * intended to be called from the fops::read and fops::write method.
++ * Returns:
++ * See man (2) write, man(2) read. In particular, the function may
++ * return -EINTR if interrupted by a signal.
++ */
++
++extern ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
++ const char __user *wbuf, char __user *rbuf,
++ size_t count, loff_t *f_pos, bool write);
++
++extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/drv/ttm/ttm_bo_driver.h
+@@ -0,0 +1,864 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 Vmware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++#ifndef _TTM_BO_DRIVER_H_
++#define _TTM_BO_DRIVER_H_
++
++#include "ttm_bo_api.h"
++#include "ttm_memory.h"
++#include <drm/drm_mm.h>
++#include "linux/workqueue.h"
++#include "linux/fs.h"
++#include "linux/spinlock.h"
++
++#include <linux/slab.h>
++
++struct ttm_backend;
++
++struct ttm_backend_func {
++ /**
++ * struct ttm_backend_func member populate
++ *
++ * @backend: Pointer to a struct ttm_backend.
++ * @num_pages: Number of pages to populate.
++ * @pages: Array of pointers to ttm pages.
++ * @dummy_read_page: Page to be used instead of NULL pages in the
++ * array @pages.
++ *
++ * Populate the backend with ttm pages. Depending on the backend,
++ * it may or may not copy the @pages array.
++ */
++ int (*populate) (struct ttm_backend *backend,
++ unsigned long num_pages, struct page **pages,
++ struct page *dummy_read_page);
++ /**
++ * struct ttm_backend_func member clear
++ *
++ * @backend: Pointer to a struct ttm_backend.
++ *
++ * This is an "unpopulate" function. Release all resources
++ * allocated with populate.
++ */
++ void (*clear) (struct ttm_backend *backend);
++
++ /**
++ * struct ttm_backend_func member bind
++ *
++ * @backend: Pointer to a struct ttm_backend.
++ * @bo_mem: Pointer to a struct ttm_mem_reg describing the
++ * memory type and location for binding.
++ *
++ * Bind the backend pages into the aperture in the location
++ * indicated by @bo_mem. This function should be able to handle
++ * differences between aperture- and system page sizes.
++ */
++ int (*bind) (struct ttm_backend *backend, struct ttm_mem_reg *bo_mem);
++
++ /**
++ * struct ttm_backend_func member unbind
++ *
++ * @backend: Pointer to a struct ttm_backend.
++ *
++ * Unbind previously bound backend pages. This function should be
++ * able to handle differences between aperture- and system page sizes.
++ */
++ int (*unbind) (struct ttm_backend *backend);
++
++ /**
++ * struct ttm_backend_func member destroy
++ *
++ * @backend: Pointer to a struct ttm_backend.
++ *
++ * Destroy the backend.
++ */
++ void (*destroy) (struct ttm_backend *backend);
++};
++
++/**
++ * struct ttm_backend
++ *
++ * @bdev: Pointer to a struct ttm_bo_device.
++ * @flags: For driver use.
++ * @func: Pointer to a struct ttm_backend_func that describes
++ * the backend methods.
++ *
++ */
++
++struct ttm_backend {
++ struct ttm_bo_device *bdev;
++ uint32_t flags;
++ struct ttm_backend_func *func;
++};
++
++#define TTM_PAGE_FLAG_VMALLOC (1 << 0)
++#define TTM_PAGE_FLAG_USER (1 << 1)
++#define TTM_PAGE_FLAG_USER_DIRTY (1 << 2)
++#define TTM_PAGE_FLAG_WRITE (1 << 3)
++#define TTM_PAGE_FLAG_SWAPPED (1 << 4)
++#define TTM_PAGE_FLAG_PERSISTANT_SWAP (1 << 5)
++
++enum ttm_caching_state {
++ tt_uncached,
++ tt_wc,
++ tt_cached
++};
++
++/**
++ * struct ttm_tt
++ *
++ * @dummy_read_page: Page to map where the ttm_tt page array contains a NULL
++ * pointer.
++ * @pages: Array of pages backing the data.
++ * @first_himem_page: Himem pages are put last in the page array, which
++ * enables us to run caching attribute changes on only the first part
++ * of the page array containing lomem pages. This is the index of the
++ * first himem page.
++ * @last_lomem_page: Index of the last lomem page in the page array.
++ * @num_pages: Number of pages in the page array.
++ * @bdev: Pointer to the current struct ttm_bo_device.
++ * @be: Pointer to the ttm backend.
++ * @tsk: The task for user ttm.
++ * @start: virtual address for user ttm.
++ * @swap_storage: Pointer to shmem struct file for swap storage.
++ * @caching_state: The current caching state of the pages.
++ * @state: The current binding state of the pages.
++ *
++ * This is a structure holding the pages, caching- and aperture binding
++ * status for a buffer object that isn't backed by fixed (VRAM / AGP)
++ * memory.
++ */
++
++struct ttm_tt {
++ struct page *dummy_read_page;
++ struct page **pages;
++ long first_himem_page;
++ long last_lomem_page;
++ uint32_t page_flags;
++ unsigned long num_pages;
++ struct ttm_bo_device *bdev;
++ struct ttm_backend *be;
++ struct task_struct *tsk;
++ unsigned long start;
++ struct file *swap_storage;
++ enum ttm_caching_state caching_state;
++ enum {
++ tt_bound,
++ tt_unbound,
++ tt_unpopulated,
++ } state;
++};
++
++#define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */
++#define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */
++#define TTM_MEMTYPE_FLAG_NEEDS_IOREMAP (1 << 2) /* Fixed memory needs ioremap
++ before kernel access. */
++#define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */
++
++/**
++ * struct ttm_mem_type_manager
++ *
++ * @has_type: The memory type has been initialized.
++ * @use_type: The memory type is enabled.
++ * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
++ * managed by this memory type.
++ * @gpu_offset: If used, the GPU offset of the first managed page of
++ * fixed memory or the first managed location in an aperture.
++ * @io_offset: The io_offset of the first managed page of IO memory or
++ * the first managed location in an aperture. For TTM_MEMTYPE_FLAG_CMA
++ * memory, this should be set to NULL.
++ * @io_size: The size of a managed IO region (fixed memory or aperture).
++ * @io_addr: Virtual kernel address if the io region is pre-mapped. For
++ * TTM_MEMTYPE_FLAG_NEEDS_IOREMAP there is no pre-mapped io map and
++ * @io_addr should be set to NULL.
++ * @size: Size of the managed region.
++ * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
++ * as defined in ttm_placement_common.h
++ * @default_caching: The default caching policy used for a buffer object
++ * placed in this memory type if the user doesn't provide one.
++ * @manager: The range manager used for this memory type. FIXME: If the aperture
++ * has a page size different from the underlying system, the granularity
++ * of this manager should take care of this. But the range allocating code
++ * in ttm_bo.c needs to be modified for this.
++ * @lru: The lru list for this memory type.
++ *
++ * This structure is used to identify and manage memory types for a device.
++ * It's set up by the ttm_bo_driver::init_mem_type method.
++ */
++
++struct ttm_mem_type_manager {
++
++ /*
++ * No protection. Constant from start.
++ */
++
++ bool has_type;
++ bool use_type;
++ uint32_t flags;
++ unsigned long gpu_offset;
++ unsigned long io_offset;
++ unsigned long io_size;
++ void *io_addr;
++ uint64_t size;
++ uint32_t available_caching;
++ uint32_t default_caching;
++
++ /*
++ * Protected by the bdev->lru_lock.
++ * TODO: Consider one lru_lock per ttm_mem_type_manager.
++ * Plays ill with list removal, though.
++ */
++
++ struct drm_mm manager;
++ struct list_head lru;
++};
++
++/**
++ * struct ttm_bo_driver
++ *
++ * @mem_type_prio: Priority array of memory types to place a buffer object in
++ * if it fits without evicting buffers from any of these memory types.
++ * @mem_busy_prio: Priority array of memory types to place a buffer object in
++ * if it needs to evict buffers to make room.
++ * @num_mem_type_prio: Number of elements in the @mem_type_prio array.
++ * @num_mem_busy_prio: Number of elements in the @num_mem_busy_prio array.
++ * @create_ttm_backend_entry: Callback to create a struct ttm_backend.
++ * @invalidate_caches: Callback to invalidate read caches when a buffer object
++ * has been evicted.
++ * @init_mem_type: Callback to initialize a struct ttm_mem_type_manager
++ * structure.
++ * @evict_flags: Callback to obtain placement flags when a buffer is evicted.
++ * @move: Callback for a driver to hook in accelerated functions to move
++ * a buffer.
++ * If set to NULL, a potentially slow memcpy() move is used.
++ * @sync_obj_signaled: See ttm_fence_api.h
++ * @sync_obj_wait: See ttm_fence_api.h
++ * @sync_obj_flush: See ttm_fence_api.h
++ * @sync_obj_unref: See ttm_fence_api.h
++ * @sync_obj_ref: See ttm_fence_api.h
++ */
++
++struct ttm_bo_driver {
++ const uint32_t *mem_type_prio;
++ const uint32_t *mem_busy_prio;
++ uint32_t num_mem_type_prio;
++ uint32_t num_mem_busy_prio;
++
++ /**
++ * struct ttm_bo_driver member create_ttm_backend_entry
++ *
++ * @bdev: The buffer object device.
++ *
++ * Create a driver specific struct ttm_backend.
++ */
++
++ struct ttm_backend *(*create_ttm_backend_entry)
++ (struct ttm_bo_device *bdev);
++
++ /**
++ * struct ttm_bo_driver member invalidate_caches
++ *
++ * @bdev: the buffer object device.
++ * @flags: new placement of the rebound buffer object.
++ *
++ * A previosly evicted buffer has been rebound in a
++ * potentially new location. Tell the driver that it might
++ * consider invalidating read (texture) caches on the next command
++ * submission as a consequence.
++ */
++
++ int (*invalidate_caches) (struct ttm_bo_device *bdev, uint32_t flags);
++ int (*init_mem_type) (struct ttm_bo_device *bdev, uint32_t type,
++ struct ttm_mem_type_manager *man);
++ /**
++ * struct ttm_bo_driver member evict_flags:
++ *
++ * @bo: the buffer object to be evicted
++ *
++ * Return the bo flags for a buffer which is not mapped to the hardware.
++ * These will be placed in proposed_flags so that when the move is
++ * finished, they'll end up in bo->mem.flags
++ */
++
++ uint32_t(*evict_flags) (struct ttm_buffer_object *bo);
++ /**
++ * struct ttm_bo_driver member move:
++ *
++ * @bo: the buffer to move
++ * @evict: whether this motion is evicting the buffer from
++ * the graphics address space
++ * @interruptible: Use interruptible sleeps if possible when sleeping.
++ * @no_wait: whether this should give up and return -EBUSY
++ * if this move would require sleeping
++ * @new_mem: the new memory region receiving the buffer
++ *
++ * Move a buffer between two memory regions.
++ */
++ int (*move) (struct ttm_buffer_object *bo,
++ bool evict, bool interruptible,
++ bool no_wait, struct ttm_mem_reg *new_mem);
++
++ /**
++ * struct ttm_bo_driver_member verify_access
++ *
++ * @bo: Pointer to a buffer object.
++ * @filp: Pointer to a struct file trying to access the object.
++ *
++ * Called from the map / write / read methods to verify that the
++ * caller is permitted to access the buffer object.
++ * This member may be set to NULL, which will refuse this kind of
++ * access for all buffer objects.
++ * This function should return 0 if access is granted, -EPERM otherwise.
++ */
++ int (*verify_access) (struct ttm_buffer_object *bo,
++ struct file *filp);
++
++ /**
++ * In case a driver writer dislikes the TTM fence objects,
++ * the driver writer can replace those with sync objects of
++ * his / her own. If it turns out that no driver writer is
++ * using these. I suggest we remove these hooks and plug in
++ * fences directly. The bo driver needs the following functionality:
++ * See the corresponding functions in the fence object API
++ * documentation.
++ */
++
++ bool (*sync_obj_signaled) (void *sync_obj, void *sync_arg);
++ int (*sync_obj_wait) (void *sync_obj, void *sync_arg,
++ bool lazy, bool interruptible);
++ int (*sync_obj_flush) (void *sync_obj, void *sync_arg);
++ void (*sync_obj_unref) (void **sync_obj);
++ void *(*sync_obj_ref) (void *sync_obj);
++};
++
++#define TTM_NUM_MEM_TYPES 11
++
++#define TTM_BO_PRIV_FLAG_EVICTED (1 << 0) /* Buffer object is evicted. */
++#define TTM_BO_PRIV_FLAG_MOVING (1 << 1) /* Buffer object is moving
++ and needs idling before
++ CPU mapping */
++/**
++ * struct ttm_bo_device - Buffer object driver device-specific data.
++ *
++ * @mem_glob: Pointer to a struct ttm_mem_global object for accounting.
++ * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
++ * @count: Current number of buffer object.
++ * @pages: Current number of pinned pages.
++ * @dummy_read_page: Pointer to a dummy page used for mapping requests
++ * of unpopulated pages.
++ * @shrink: A shrink callback object used for buffre object swap.
++ * @ttm_bo_extra_size: Extra size (sizeof(struct ttm_buffer_object) excluded)
++ * used by a buffer object. This is excluding page arrays and backing pages.
++ * @ttm_bo_size: This is @ttm_bo_extra_size + sizeof(struct ttm_buffer_object).
++ * @man: An array of mem_type_managers.
++ * @addr_space_mm: Range manager for the device address space.
++ * lru_lock: Spinlock that protects the buffer+device lru lists and
++ * ddestroy lists.
++ * @nice_mode: Try nicely to wait for buffer idle when cleaning a manager.
++ * If a GPU lockup has been detected, this is forced to 0.
++ * @dev_mapping: A pointer to the struct address_space representing the
++ * device address space.
++ * @wq: Work queue structure for the delayed delete workqueue.
++ *
++ */
++
++struct ttm_bo_device {
++
++ /*
++ * Constant after bo device init / atomic.
++ */
++
++ struct ttm_mem_global *mem_glob;
++ struct ttm_bo_driver *driver;
++ struct page *dummy_read_page;
++ struct ttm_mem_shrink shrink;
++
++ size_t ttm_bo_extra_size;
++ size_t ttm_bo_size;
++
++ rwlock_t vm_lock;
++ /*
++ * Protected by the vm lock.
++ */
++ struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
++ struct rb_root addr_space_rb;
++ struct drm_mm addr_space_mm;
++
++ /*
++ * Might want to change this to one lock per manager.
++ */
++ spinlock_t lru_lock;
++ /*
++ * Protected by the lru lock.
++ */
++ struct list_head ddestroy;
++ struct list_head swap_lru;
++
++ /*
++ * Protected by load / firstopen / lastclose /unload sync.
++ */
++
++ bool nice_mode;
++ struct address_space *dev_mapping;
++
++ /*
++ * Internal protection.
++ */
++
++ struct delayed_work wq;
++};
++
++/**
++ * ttm_flag_masked
++ *
++ * @old: Pointer to the result and original value.
++ * @new: New value of bits.
++ * @mask: Mask of bits to change.
++ *
++ * Convenience function to change a number of bits identified by a mask.
++ */
++
++static inline uint32_t
++ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
++{
++ *old ^= (*old ^ new) & mask;
++ return *old;
++}
++
++/**
++ * ttm_tt_create
++ *
++ * @bdev: pointer to a struct ttm_bo_device:
++ * @size: Size of the data needed backing.
++ * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
++ * @dummy_read_page: See struct ttm_bo_device.
++ *
++ * Create a struct ttm_tt to back data with system memory pages.
++ * No pages are actually allocated.
++ * Returns:
++ * NULL: Out of memory.
++ */
++extern struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev,
++ unsigned long size,
++ uint32_t page_flags,
++ struct page *dummy_read_page);
++
++/**
++ * ttm_tt_set_user:
++ *
++ * @ttm: The struct ttm_tt to populate.
++ * @tsk: A struct task_struct for which @start is a valid user-space address.
++ * @start: A valid user-space address.
++ * @num_pages: Size in pages of the user memory area.
++ *
++ * Populate a struct ttm_tt with a user-space memory area after first pinning
++ * the pages backing it.
++ * Returns:
++ * !0: Error.
++ */
++
++extern int ttm_tt_set_user(struct ttm_tt *ttm,
++ struct task_struct *tsk,
++ unsigned long start, unsigned long num_pages);
++
++/**
++ * ttm_ttm_bind:
++ *
++ * @ttm: The struct ttm_tt containing backing pages.
++ * @bo_mem: The struct ttm_mem_reg identifying the binding location.
++ *
++ * Bind the pages of @ttm to an aperture location identified by @bo_mem
++ */
++extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
++
++/**
++ * ttm_ttm_destroy:
++ *
++ * @ttm: The struct ttm_tt.
++ *
++ * Unbind, unpopulate and destroy a struct ttm_tt.
++ */
++extern void ttm_tt_destroy(struct ttm_tt *ttm);
++
++/**
++ * ttm_ttm_unbind:
++ *
++ * @ttm: The struct ttm_tt.
++ *
++ * Unbind a struct ttm_tt.
++ */
++extern void ttm_tt_unbind(struct ttm_tt *ttm);
++
++/**
++ * ttm_ttm_destroy:
++ *
++ * @ttm: The struct ttm_tt.
++ * @index: Index of the desired page.
++ *
++ * Return a pointer to the struct page backing @ttm at page
++ * index @index. If the page is unpopulated, one will be allocated to
++ * populate that index.
++ *
++ * Returns:
++ * NULL on OOM.
++ */
++extern struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index);
++
++/**
++ * ttm_tt_cache_flush:
++ *
++ * @pages: An array of pointers to struct page:s to flush.
++ * @num_pages: Number of pages to flush.
++ *
++ * Flush the data of the indicated pages from the cpu caches.
++ * This is used when changing caching attributes of the pages from
++ * cache-coherent.
++ */
++extern void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages);
++
++/**
++ * ttm_tt_set_placement_caching:
++ *
++ * @ttm A struct ttm_tt the backing pages of which will change caching policy.
++ * @placement: Flag indicating the desired caching policy.
++ *
++ * This function will change caching policy of any default kernel mappings of
++ * the pages backing @ttm. If changing from cached to uncached or
++ * write-combined, all CPU caches will first be flushed to make sure the
++ * data of the pages hit RAM. This function may be very costly as it involves
++ * global TLB and cache flushes and potential page splitting / combining.
++ */
++extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm,
++ uint32_t placement);
++extern int ttm_tt_swapout(struct ttm_tt *ttm,
++ struct file *persistant_swap_storage);
++
++/*
++ * ttm_bo.c
++ */
++
++/**
++ * ttm_mem_reg_is_pci
++ *
++ * @bdev: Pointer to a struct ttm_bo_device.
++ * @mem: A valid struct ttm_mem_reg.
++ *
++ * Returns true if the memory described by @mem is PCI memory,
++ * false otherwise.
++ */
++extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
++ struct ttm_mem_reg *mem);
++
++/**
++ * ttm_bo_mem_space
++ *
++ * @bo: Pointer to a struct ttm_buffer_object. the data of which
++ * we want to allocate space for.
++ * @mem: A struct ttm_mem_reg with the struct ttm_mem_reg::proposed_flags set
++ * up.
++ * @interruptible: Sleep interruptible when sliping.
++ * @no_wait: Don't sleep waiting for space to become available.
++ *
++ * Allocate memory space for the buffer object pointed to by @bo, using
++ * the placement flags in @mem, potentially evicting other idle buffer objects.
++ * This function may sleep while waiting for space to become available.
++ * Returns:
++ * -EBUSY: No space available (only if no_wait == 1).
++ * -ENOMEM: Could not allocate memory for the buffer object, either due to
++ * fragmentation or concurrent allocators.
++ * -ERESTART: An interruptible sleep was interrupted by a signal.
++ */
++extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
++ struct ttm_mem_reg *mem,
++ bool interruptible, bool no_wait);
++/**
++ * ttm_bo_wait_for_cpu
++ *
++ * @bo: Pointer to a struct ttm_buffer_object.
++ * @no_wait: Don't sleep while waiting.
++ *
++ * Wait until a buffer object is no longer sync'ed for CPU access.
++ * Returns:
++ * -EBUSY: Buffer object was sync'ed for CPU access. (only if no_wait == 1).
++ * -ERESTART: An interruptible sleep was interrupted by a signal.
++ */
++
++extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait);
++
++/**
++ * ttm_bo_pci_offset - Get the PCI offset for the buffer object memory.
++ *
++ * @bo Pointer to a struct ttm_buffer_object.
++ * @bus_base On return the base of the PCI region
++ * @bus_offset On return the byte offset into the PCI region
++ * @bus_size On return the byte size of the buffer object or zero if
++ * the buffer object memory is not accessible through a PCI region.
++ *
++ * Returns:
++ * -EINVAL if the buffer object is currently not mappable.
++ * 0 otherwise.
++ */
++
++extern int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
++ struct ttm_mem_reg *mem,
++ unsigned long *bus_base,
++ unsigned long *bus_offset,
++ unsigned long *bus_size);
++
++extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
++
++/**
++ * ttm_bo_device_init
++ *
++ * @bdev: A pointer to a struct ttm_bo_device to initialize.
++ * @mem_global: A pointer to an initialized struct ttm_mem_global.
++ * @driver: A pointer to a struct ttm_bo_driver set up by the caller.
++ * @file_page_offset: Offset into the device address space that is available
++ * for buffer data. This ensures compatibility with other users of the
++ * address space.
++ *
++ * Initializes a struct ttm_bo_device:
++ * Returns:
++ * !0: Failure.
++ */
++extern int ttm_bo_device_init(struct ttm_bo_device *bdev,
++ struct ttm_mem_global *mem_glob,
++ struct ttm_bo_driver *driver,
++ uint64_t file_page_offset);
++
++/**
++ * ttm_bo_reserve:
++ *
++ * @bo: A pointer to a struct ttm_buffer_object.
++ * @interruptible: Sleep interruptible if waiting.
++ * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
++ * @use_sequence: If @bo is already reserved, Only sleep waiting for
++ * it to become unreserved if @sequence < (@bo)->sequence.
++ *
++ * Locks a buffer object for validation. (Or prevents other processes from
++ * locking it for validation) and removes it from lru lists, while taking
++ * a number of measures to prevent deadlocks.
++ *
++ * Deadlocks may occur when two processes try to reserve multiple buffers in
++ * different order, either by will or as a result of a buffer being evicted
++ * to make room for a buffer already reserved. (Buffers are reserved before
++ * they are evicted). The following algorithm prevents such deadlocks from
++ * occuring:
++ * 1) Buffers are reserved with the lru spinlock held. Upon successful
++ * reservation they are removed from the lru list. This stops a reserved buffer
++ * from being evicted. However the lru spinlock is released between the time
++ * a buffer is selected for eviction and the time it is reserved.
++ * Therefore a check is made when a buffer is reserved for eviction, that it
++ * is still the first buffer in the lru list, before it is removed from the
++ * list. @check_lru == 1 forces this check. If it fails, the function returns
++ * -EINVAL, and the caller should then choose a new buffer to evict and repeat
++ * the procedure.
++ * 2) Processes attempting to reserve multiple buffers other than for eviction,
++ * (typically execbuf), should first obtain a unique 32-bit
++ * validation sequence number,
++ * and call this function with @use_sequence == 1 and @sequence == the unique
++ * sequence number. If upon call of this function, the buffer object is already
++ * reserved, the validation sequence is checked against the validation
++ * sequence of the process currently reserving the buffer,
++ * and if the current validation sequence is greater than that of the process
++ * holding the reservation, the function returns -EAGAIN. Otherwise it sleeps
++ * waiting for the buffer to become unreserved, after which it retries
++ * reserving. The caller should, when receiving an -EAGAIN error
++ * release all its buffer reservations, wait for @bo to become unreserved, and
++ * then rerun the validation with the same validation sequence. This procedure
++ * will always guarantee that the process with the lowest validation sequence
++ * will eventually succeed, preventing both deadlocks and starvation.
++ *
++ * Returns:
++ * -EAGAIN: The reservation may cause a deadlock. Release all buffer
++ * reservations, wait for @bo to become unreserved and try again.
++ * (only if use_sequence == 1).
++ * -ERESTART: A wait for the buffer to become unreserved was interrupted by
++ * a signal. Release all buffer reservations and return to user-space.
++ */
++extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
++ bool interruptible,
++ bool no_wait, bool use_sequence, uint32_t sequence);
++
++/**
++ * ttm_bo_unreserve
++ *
++ * @bo: A pointer to a struct ttm_buffer_object.
++ *
++ * Unreserve a previous reservation of @bo.
++ */
++extern void ttm_bo_unreserve(struct ttm_buffer_object *bo);
++
++/**
++ * ttm_bo_wait_unreserved
++ *
++ * @bo: A pointer to a struct ttm_buffer_object.
++ *
++ * Wait for a struct ttm_buffer_object to become unreserved.
++ * This is typically used in the execbuf code to relax cpu-usage when
++ * a potential deadlock condition backoff.
++ */
++extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
++ bool interruptible);
++
++/**
++ * ttm_bo_block_reservation
++ *
++ * @bo: A pointer to a struct ttm_buffer_object.
++ * @interruptible: Use interruptible sleep when waiting.
++ * @no_wait: Don't sleep, but rather return -EBUSY.
++ *
++ * Block reservation for validation by simply reserving the buffer.
++ * This is intended for single buffer use only without eviction,
++ * and thus needs no deadlock protection.
++ *
++ * Returns:
++ * -EBUSY: If no_wait == 1 and the buffer is already reserved.
++ * -ERESTART: If interruptible == 1 and the process received a
++ * signal while sleeping.
++ */
++extern int ttm_bo_block_reservation(struct ttm_buffer_object *bo,
++ bool interruptible, bool no_wait);
++
++/**
++ * ttm_bo_unblock_reservation
++ *
++ * @bo: A pointer to a struct ttm_buffer_object.
++ *
++ * Unblocks reservation leaving lru lists untouched.
++ */
++extern void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo);
++
++/*
++ * ttm_bo_util.c
++ */
++
++/**
++ * ttm_bo_move_ttm
++ *
++ * @bo: A pointer to a struct ttm_buffer_object.
++ * @evict: 1: This is an eviction. Don't try to pipeline.
++ * @no_wait: Never sleep, but rather return with -EBUSY.
++ * @new_mem: struct ttm_mem_reg indicating where to move.
++ *
++ * Optimized move function for a buffer object with both old and
++ * new placement backed by a TTM. The function will, if successful,
++ * free any old aperture space, and set (@new_mem)->mm_node to NULL,
++ * and update the (@bo)->mem placement flags. If unsuccessful, the old
++ * data remains untouched, and it's up to the caller to free the
++ * memory space indicated by @new_mem.
++ * Returns:
++ * !0: Failure.
++ */
++
++extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
++ bool evict,
++ bool no_wait,
++ struct ttm_mem_reg *new_mem);
++
++/**
++ * ttm_bo_move_memcpy
++ *
++ * @bo: A pointer to a struct ttm_buffer_object.
++ * @evict: 1: This is an eviction. Don't try to pipeline.
++ * @no_wait: Never sleep, but rather return with -EBUSY.
++ * @new_mem: struct ttm_mem_reg indicating where to move.
++ *
++ * Fallback move function for a mappable buffer object in mappable memory.
++ * The function will, if successful,
++ * free any old aperture space, and set (@new_mem)->mm_node to NULL,
++ * and update the (@bo)->mem placement flags. If unsuccessful, the old
++ * data remains untouched, and it's up to the caller to free the
++ * memory space indicated by @new_mem.
++ * Returns:
++ * !0: Failure.
++ */
++
++extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
++ bool evict,
++ bool no_wait,
++ struct ttm_mem_reg *new_mem);
++
++/**
++ * ttm_bo_free_old_node
++ *
++ * @bo: A pointer to a struct ttm_buffer_object.
++ *
++ * Utility function to free an old placement after a successful move.
++ */
++extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
++
++/**
++ * ttm_bo_move_accel_cleanup.
++ *
++ * @bo: A pointer to a struct ttm_buffer_object.
++ * @sync_obj: A sync object that signals when moving is complete.
++ * @sync_obj_arg: An argument to pass to the sync object idle / wait
++ * functions.
++ * @evict: This is an evict move. Don't return until the buffer is idle.
++ * @no_wait: Never sleep, but rather return with -EBUSY.
++ * @new_mem: struct ttm_mem_reg indicating where to move.
++ *
++ * Accelerated move function to be called when an accelerated move
++ * has been scheduled. The function will create a new temporary buffer object
++ * representing the old placement, and put the sync object on both buffer
++ * objects. After that the newly created buffer object is unref'd to be
++ * destroyed when the move is complete. This will help pipeline
++ * buffer moves.
++ */
++
++extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
++ void *sync_obj,
++ void *sync_obj_arg,
++ bool evict, bool no_wait,
++ struct ttm_mem_reg *new_mem);
++/**
++ * ttm_io_prot
++ *
++ * @c_state: Caching state.
++ * @tmp: Page protection flag for a normal, cached mapping.
++ *
++ * Utility function that returns the pgprot_t that should be used for
++ * setting up a PTE with the caching model indicated by @c_state.
++ */
++extern pgprot_t ttm_io_prot(enum ttm_caching_state c_state, pgprot_t tmp);
++
++#if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
++#define TTM_HAS_AGP
++#include <linux/agp_backend.h>
++
++/**
++ * ttm_agp_backend_init
++ *
++ * @bdev: Pointer to a struct ttm_bo_device.
++ * @bridge: The agp bridge this device is sitting on.
++ *
++ * Create a TTM backend that uses the indicated AGP bridge as an aperture
++ * for TT memory. This function uses the linux agpgart interface to
++ * bind and unbind memory backing a ttm_tt.
++ */
++extern struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev,
++ struct agp_bridge_data *bridge);
++#endif
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/drv/ttm/ttm_bo_util.c
+@@ -0,0 +1,546 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include "ttm_bo_driver.h"
++#include "ttm_placement_common.h"
++#include "ttm_pat_compat.h"
++#include <linux/io.h>
++#include <linux/highmem.h>
++#include <linux/wait.h>
++#include <linux/version.h>
++
++void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
++{
++ struct ttm_mem_reg *old_mem = &bo->mem;
++
++ if (old_mem->mm_node) {
++ spin_lock(&bo->bdev->lru_lock);
++ drm_mm_put_block(old_mem->mm_node);
++ spin_unlock(&bo->bdev->lru_lock);
++ }
++ old_mem->mm_node = NULL;
++}
++
++int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
++ bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
++{
++ struct ttm_tt *ttm = bo->ttm;
++ struct ttm_mem_reg *old_mem = &bo->mem;
++ uint32_t save_flags = old_mem->flags;
++ uint32_t save_proposed_flags = old_mem->proposed_flags;
++ int ret;
++
++ if (old_mem->mem_type != TTM_PL_SYSTEM) {
++ ttm_tt_unbind(ttm);
++ ttm_bo_free_old_node(bo);
++ ttm_flag_masked(&old_mem->flags, TTM_PL_FLAG_SYSTEM,
++ TTM_PL_MASK_MEM);
++ old_mem->mem_type = TTM_PL_SYSTEM;
++ save_flags = old_mem->flags;
++ }
++
++ ret = ttm_tt_set_placement_caching(ttm, new_mem->flags);
++ if (unlikely(ret != 0))
++ return ret;
++
++ if (new_mem->mem_type != TTM_PL_SYSTEM) {
++ ret = ttm_tt_bind(ttm, new_mem);
++ if (unlikely(ret != 0))
++ return ret;
++ }
++
++ *old_mem = *new_mem;
++ new_mem->mm_node = NULL;
++ old_mem->proposed_flags = save_proposed_flags;
++ ttm_flag_masked(&save_flags, new_mem->flags, TTM_PL_MASK_MEMTYPE);
++ return 0;
++}
++
++int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
++ void **virtual)
++{
++ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
++ unsigned long bus_offset;
++ unsigned long bus_size;
++ unsigned long bus_base;
++ int ret;
++ void *addr;
++
++ *virtual = NULL;
++ ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset, &bus_size);
++ if (ret || bus_size == 0)
++ return ret;
++
++ if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
++ addr = (void *)(((u8 *) man->io_addr) + bus_offset);
++ else {
++ if (mem->flags & TTM_PL_FLAG_WC)
++ addr = ioremap_wc(bus_base + bus_offset, bus_size);
++ else
++ addr = ioremap_nocache(bus_base + bus_offset, bus_size);
++ if (!addr)
++ return -ENOMEM;
++ }
++ *virtual = addr;
++ return 0;
++}
++
++void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
++ void *virtual)
++{
++ struct ttm_mem_type_manager *man;
++
++ man = &bdev->man[mem->mem_type];
++
++ if (virtual && (man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
++ iounmap(virtual);
++}
++
++static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
++{
++ uint32_t *dstP =
++ (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
++ uint32_t *srcP =
++ (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
++
++ int i;
++ for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
++ iowrite32(ioread32(srcP++), dstP++);
++ return 0;
++}
++
++static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
++ unsigned long page)
++{
++ struct page *d = ttm_tt_get_page(ttm, page);
++ void *dst;
++
++ if (!d)
++ return -ENOMEM;
++
++ src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
++ dst = kmap(d);
++ if (!dst)
++ return -ENOMEM;
++
++ memcpy_fromio(dst, src, PAGE_SIZE);
++ kunmap(d);
++ return 0;
++}
++
++static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
++ unsigned long page)
++{
++ struct page *s = ttm_tt_get_page(ttm, page);
++ void *src;
++
++ if (!s)
++ return -ENOMEM;
++
++ dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
++ src = kmap(s);
++ if (!src)
++ return -ENOMEM;
++
++ memcpy_toio(dst, src, PAGE_SIZE);
++ kunmap(s);
++ return 0;
++}
++
++int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
++ bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
++ struct ttm_tt *ttm = bo->ttm;
++ struct ttm_mem_reg *old_mem = &bo->mem;
++ struct ttm_mem_reg old_copy = *old_mem;
++ void *old_iomap;
++ void *new_iomap;
++ int ret;
++ uint32_t save_flags = old_mem->flags;
++ uint32_t save_proposed_flags = old_mem->proposed_flags;
++ unsigned long i;
++ unsigned long page;
++ unsigned long add = 0;
++ int dir;
++
++ ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
++ if (ret)
++ return ret;
++ ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
++ if (ret)
++ goto out;
++
++ if (old_iomap == NULL && new_iomap == NULL)
++ goto out2;
++ if (old_iomap == NULL && ttm == NULL)
++ goto out2;
++
++ add = 0;
++ dir = 1;
++
++ if ((old_mem->mem_type == new_mem->mem_type) &&
++ (new_mem->mm_node->start <
++ old_mem->mm_node->start + old_mem->mm_node->size)) {
++ dir = -1;
++ add = new_mem->num_pages - 1;
++ }
++
++ for (i = 0; i < new_mem->num_pages; ++i) {
++ page = i * dir + add;
++ if (old_iomap == NULL)
++ ret = ttm_copy_ttm_io_page(ttm, new_iomap, page);
++ else if (new_iomap == NULL)
++ ret = ttm_copy_io_ttm_page(ttm, old_iomap, page);
++ else
++ ret = ttm_copy_io_page(new_iomap, old_iomap, page);
++ if (ret)
++ goto out1;
++ }
++ mb();
++out2:
++ ttm_bo_free_old_node(bo);
++
++ *old_mem = *new_mem;
++ new_mem->mm_node = NULL;
++ old_mem->proposed_flags = save_proposed_flags;
++ ttm_flag_masked(&save_flags, new_mem->flags, TTM_PL_MASK_MEMTYPE);
++
++ if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
++ ttm_tt_unbind(ttm);
++ ttm_tt_destroy(ttm);
++ bo->ttm = NULL;
++ }
++
++out1:
++ ttm_mem_reg_iounmap(bdev, new_mem, new_iomap);
++out:
++ ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
++ return ret;
++}
++
++/**
++ * ttm_buffer_object_transfer
++ *
++ * @bo: A pointer to a struct ttm_buffer_object.
++ * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
++ * holding the data of @bo with the old placement.
++ *
++ * This is a utility function that may be called after an accelerated move
++ * has been scheduled. A new buffer object is created as a placeholder for
++ * the old data while it's being copied. When that buffer object is idle,
++ * it can be destroyed, releasing the space of the old placement.
++ * Returns:
++ * !0: Failure.
++ */
++
++static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
++ struct ttm_buffer_object **new_obj)
++{
++ struct ttm_buffer_object *fbo;
++ struct ttm_bo_device *bdev = bo->bdev;
++ struct ttm_bo_driver *driver = bdev->driver;
++
++ fbo = kzalloc(sizeof(*fbo), GFP_KERNEL);
++ if (!fbo)
++ return -ENOMEM;
++
++ *fbo = *bo;
++ mutex_init(&fbo->mutex);
++ mutex_lock(&fbo->mutex);
++
++ init_waitqueue_head(&fbo->event_queue);
++ INIT_LIST_HEAD(&fbo->ddestroy);
++ INIT_LIST_HEAD(&fbo->lru);
++
++ fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
++ if (fbo->mem.mm_node)
++ fbo->mem.mm_node->private = (void *)fbo;
++ kref_init(&fbo->list_kref);
++ kref_init(&fbo->kref);
++
++ mutex_unlock(&fbo->mutex);
++
++ *new_obj = fbo;
++ return 0;
++}
++
++pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
++{
++#if defined(__i386__) || defined(__x86_64__)
++ if (caching_flags & TTM_PL_FLAG_WC) {
++ tmp = pgprot_ttm_x86_wc(tmp);
++ } else if (boot_cpu_data.x86 > 3 &&
++ (caching_flags & TTM_PL_FLAG_UNCACHED)) {
++ tmp = pgprot_noncached(tmp);
++ }
++#elif defined(__powerpc__)
++ if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
++ pgprot_val(tmp) |= _PAGE_NO_CACHE;
++ if (caching_flags & TTM_PL_FLAG_UNCACHED)
++ pgprot_val(tmp) |= _PAGE_GUARDED;
++ }
++#endif
++#if defined(__ia64__)
++ if (caching_flags & TTM_PL_FLAG_WC)
++ tmp = pgprot_writecombine(tmp);
++ else
++ tmp = pgprot_noncached(tmp);
++#endif
++#if defined(__sparc__)
++ if (!(caching_flags & TTM_PL_FLAG_CACHED))
++ tmp = pgprot_noncached(tmp);
++#endif
++ return tmp;
++}
++
++static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
++ unsigned long bus_base,
++ unsigned long bus_offset,
++ unsigned long bus_size,
++ struct ttm_bo_kmap_obj *map)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ struct ttm_mem_reg *mem = &bo->mem;
++ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
++
++ if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) {
++ map->bo_kmap_type = ttm_bo_map_premapped;
++ map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset);
++ } else {
++ map->bo_kmap_type = ttm_bo_map_iomap;
++ if (mem->flags & TTM_PL_FLAG_WC)
++ map->virtual =
++ ioremap_wc(bus_base + bus_offset,
++ bus_size);
++ else
++ map->virtual =
++ ioremap_nocache(bus_base + bus_offset,
++ bus_size);
++ }
++ return (!map->virtual) ? -ENOMEM : 0;
++}
++
++static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
++ unsigned long start_page,
++ unsigned long num_pages,
++ struct ttm_bo_kmap_obj *map)
++{
++ struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
++ struct ttm_tt *ttm = bo->ttm;
++ struct page *d;
++ bool do_kmap = false;
++ int i;
++ BUG_ON(!ttm);
++ if (num_pages == 1) {
++ map->page = ttm_tt_get_page(ttm, start_page);
++ do_kmap = (!PageHighMem(map->page) ||
++ (mem->flags & TTM_PL_FLAG_CACHED));
++ }
++
++ if (do_kmap) {
++ /*
++ * We're mapping a single page, and the desired
++ * page protection is consistent with the bo.
++ */
++ map->bo_kmap_type = ttm_bo_map_kmap;
++ map->virtual = kmap(map->page);
++ } else {
++ /* Populate the part we're mapping; */
++ for (i = start_page; i < start_page + num_pages; ++i) {
++ d = ttm_tt_get_page(ttm, i);
++
++ if (!d)
++ return -ENOMEM;
++ }
++
++ /*
++ * We need to use vmap to get the desired page protection
++ * or to make the buffer object look contigous.
++ */
++ prot = (mem->flags & TTM_PL_FLAG_CACHED) ?
++ PAGE_KERNEL :
++ ttm_io_prot(mem->flags, PAGE_KERNEL);
++ map->bo_kmap_type = ttm_bo_map_vmap;
++ map->virtual = vmap(ttm->pages + start_page,
++ num_pages,
++ 0,
++ prot);
++ }
++ return (!map->virtual) ? -ENOMEM : 0;
++}
++
++int ttm_bo_kmap(struct ttm_buffer_object *bo,
++ unsigned long start_page, unsigned long num_pages,
++ struct ttm_bo_kmap_obj *map)
++{
++ int ret;
++ unsigned long bus_base;
++ unsigned long bus_offset;
++ unsigned long bus_size;
++ BUG_ON(!list_empty(&bo->swap));
++ map->virtual = NULL;
++
++ if (num_pages > bo->num_pages)
++ return -EINVAL;
++
++ if (start_page > bo->num_pages)
++ return -EINVAL;
++#if 0
++ if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
++ return -EPERM;
++#endif
++ ret = ttm_bo_pci_offset(bo->bdev,
++ &bo->mem,
++ &bus_base,
++ &bus_offset,
++ &bus_size);
++ if (ret)
++ return ret;
++
++ if (bus_size == 0) {
++ return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
++ } else {
++ bus_offset += start_page << PAGE_SHIFT;
++ bus_size = num_pages << PAGE_SHIFT;
++
++ return ttm_bo_ioremap(bo,
++ bus_base,
++ bus_offset,
++ bus_size, map);
++ }
++}
++
++void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
++{
++ if (!map->virtual)
++ return;
++ switch (map->bo_kmap_type) {
++ case ttm_bo_map_iomap:
++ iounmap(map->virtual);
++ break;
++ case ttm_bo_map_vmap:
++ vunmap(map->virtual);
++ break;
++ case ttm_bo_map_kmap:
++ kunmap(map->page);
++ break;
++ case ttm_bo_map_premapped:
++ break;
++ default:
++ BUG();
++ }
++ map->virtual = NULL;
++ map->page = NULL;
++}
++
++int ttm_bo_pfn_prot(struct ttm_buffer_object *bo,
++ unsigned long dst_offset,
++ unsigned long *pfn, pgprot_t *prot)
++{
++ struct ttm_mem_reg *mem = &bo->mem;
++ struct ttm_bo_device *bdev = bo->bdev;
++ unsigned long bus_offset;
++ unsigned long bus_size;
++ unsigned long bus_base;
++ int ret;
++ ret = ttm_bo_pci_offset(bdev,
++ mem,
++ &bus_base,
++ &bus_offset,
++ &bus_size);
++ if (ret)
++ return -EINVAL;
++ if (bus_size != 0)
++ *pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT;
++ else
++ if (!bo->ttm)
++ return -EINVAL;
++ else
++ *pfn = page_to_pfn(ttm_tt_get_page(
++ bo->ttm,
++ dst_offset >> PAGE_SHIFT));
++
++ *prot = (mem->flags & TTM_PL_FLAG_CACHED) ?
++ PAGE_KERNEL :
++ ttm_io_prot(mem->flags, PAGE_KERNEL);
++ return 0;
++}
++
++int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
++ void *sync_obj,
++ void *sync_obj_arg,
++ bool evict, bool no_wait,
++ struct ttm_mem_reg *new_mem)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ struct ttm_bo_driver *driver = bdev->driver;
++ struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
++ struct ttm_mem_reg *old_mem = &bo->mem;
++ int ret;
++ uint32_t save_flags = old_mem->flags;
++ uint32_t save_proposed_flags = old_mem->proposed_flags;
++ struct ttm_buffer_object *old_obj;
++ if (bo->sync_obj)
++ driver->sync_obj_unref(&bo->sync_obj);
++ bo->sync_obj = driver->sync_obj_ref(sync_obj);
++ bo->sync_obj_arg = sync_obj_arg;
++ if (evict) {
++ ret = ttm_bo_wait(bo, false, false, false);
++ if (ret)
++ return ret;
++ ttm_bo_free_old_node(bo);
++ if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
++ (bo->ttm != NULL)) {
++ ttm_tt_unbind(bo->ttm);
++ ttm_tt_destroy(bo->ttm);
++ bo->ttm = NULL;
++ }
++ } else {
++
++ /* This should help pipeline ordinary buffer moves.
++ *
++ * Hang old buffer memory on a new buffer object,
++ * and leave it to be released when the GPU
++ * operation has completed.
++ */
++ ret = ttm_buffer_object_transfer(bo, &old_obj);
++ if (ret)
++ return ret;
++ if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
++ old_obj->ttm = NULL;
++ else
++ bo->ttm = NULL;
++ bo->priv_flags |= TTM_BO_PRIV_FLAG_MOVING;
++ ttm_bo_unreserve(old_obj);
++ }
++
++ *old_mem = *new_mem;
++ new_mem->mm_node = NULL;
++ old_mem->proposed_flags = save_proposed_flags;
++ ttm_flag_masked(&save_flags, new_mem->flags, TTM_PL_MASK_MEMTYPE);
++ return 0;
++}
+--- /dev/null
++++ b/drivers/staging/mrst/drv/ttm/ttm_bo_vm.c
+@@ -0,0 +1,429 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 Vmware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++
++
++#include "ttm_bo_driver.h"
++#include "ttm_placement_common.h"
++#include <linux/mm.h>
++#include <linux/version.h>
++#include <linux/rbtree.h>
++#include <linux/uaccess.h>
++
++#define TTM_BO_VM_NUM_PREFAULT 16
++
++static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
++ unsigned long page_start,
++ unsigned long num_pages)
++{
++ struct rb_node *cur = bdev->addr_space_rb.rb_node;
++ unsigned long cur_offset;
++ struct ttm_buffer_object *bo;
++ struct ttm_buffer_object *best_bo = NULL;
++
++ while (likely(cur != NULL)) {
++ bo = rb_entry(cur, struct ttm_buffer_object, vm_rb);
++ cur_offset = bo->vm_node->start;
++ if (page_start >= cur_offset) {
++ cur = cur->rb_right;
++ best_bo = bo;
++ if (page_start == cur_offset)
++ break;
++ } else
++ cur = cur->rb_left;
++ }
++
++ if (unlikely(best_bo == NULL))
++ return NULL;
++
++ if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
++ (page_start + num_pages)))
++ return NULL;
++
++ return best_bo;
++}
++
++static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++ struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
++ vma->vm_private_data;
++ struct ttm_bo_device *bdev = bo->bdev;
++ unsigned long bus_base;
++ unsigned long bus_offset;
++ unsigned long bus_size;
++ unsigned long page_offset;
++ unsigned long page_last;
++ unsigned long pfn;
++ struct ttm_tt *ttm = NULL;
++ struct page *page;
++ int ret;
++ int i;
++ bool is_iomem;
++ unsigned long address = (unsigned long)vmf->virtual_address;
++ int retval = VM_FAULT_NOPAGE;
++
++ ret = ttm_bo_reserve(bo, true, false, false, 0);
++ if (unlikely(ret != 0))
++ return VM_FAULT_NOPAGE;
++
++ mutex_lock(&bo->mutex);
++
++ /*
++ * Wait for buffer data in transit, due to a pipelined
++ * move.
++ */
++
++ if (bo->priv_flags & TTM_BO_PRIV_FLAG_MOVING) {
++ ret = ttm_bo_wait(bo, false, true, false);
++ if (unlikely(ret != 0)) {
++ retval = (ret != -ERESTART) ?
++ VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
++ goto out_unlock;
++ }
++ }
++
++ ret = ttm_bo_pci_offset(bdev, &bo->mem, &bus_base, &bus_offset,
++ &bus_size);
++ if (unlikely(ret != 0)) {
++ retval = VM_FAULT_SIGBUS;
++ goto out_unlock;
++ }
++
++ is_iomem = (bus_size != 0);
++
++ page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
++ bo->vm_node->start - vma->vm_pgoff;
++ page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) +
++ bo->vm_node->start - vma->vm_pgoff;
++
++ if (unlikely(page_offset >= bo->num_pages)) {
++ retval = VM_FAULT_SIGBUS;
++ goto out_unlock;
++ }
++
++ /*
++ * Strictly, we're not allowed to modify vma->vm_page_prot here,
++ * since the mmap_sem is only held in read mode. However, we
++ * modify only the caching bits of vma->vm_page_prot and
++ * consider those bits protected by
++ * the bo->mutex, as we should be the only writers.
++ * There shouldn't really be any readers of these bits except
++ * within vm_insert_mixed()? fork?
++ *
++ * TODO: Add a list of vmas to the bo, and change the
++ * vma->vm_page_prot when the object changes caching policy, with
++ * the correct locks held.
++ */
++
++ if (is_iomem) {
++ vma->vm_page_prot = ttm_io_prot(bo->mem.flags,
++ vma->vm_page_prot);
++ } else {
++ ttm = bo->ttm;
++ vma->vm_page_prot = (bo->mem.flags & TTM_PL_FLAG_CACHED) ?
++ vm_get_page_prot(vma->vm_flags) :
++ ttm_io_prot(bo->mem.flags, vma->vm_page_prot);
++ }
++
++ /*
++ * Speculatively prefault a number of pages. Only error on
++ * first page.
++ */
++
++ for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
++
++ if (is_iomem)
++ pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) +
++ page_offset;
++ else {
++ page = ttm_tt_get_page(ttm, page_offset);
++ if (unlikely(!page && i == 0)) {
++ retval = VM_FAULT_OOM;
++ goto out_unlock;
++ } else if (unlikely(!page)) {
++ break;
++ }
++ pfn = page_to_pfn(page);
++ }
++
++ ret = vm_insert_mixed(vma, address, pfn);
++ /*
++ * Somebody beat us to this PTE or prefaulting to
++ * an already populated PTE, or prefaulting error.
++ */
++
++ if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
++ break;
++ else if (unlikely(ret != 0)) {
++ retval =
++ (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
++ goto out_unlock;
++
++ }
++
++ address += PAGE_SIZE;
++ if (unlikely(++page_offset >= page_last))
++ break;
++ }
++
++out_unlock:
++ mutex_unlock(&bo->mutex);
++ ttm_bo_unreserve(bo);
++ return retval;
++}
++
++static void ttm_bo_vm_open(struct vm_area_struct *vma)
++{
++ struct ttm_buffer_object *bo =
++ (struct ttm_buffer_object *)vma->vm_private_data;
++
++ (void)ttm_bo_reference(bo);
++}
++
++static void ttm_bo_vm_close(struct vm_area_struct *vma)
++{
++ struct ttm_buffer_object *bo =
++ (struct ttm_buffer_object *)vma->vm_private_data;
++
++ ttm_bo_unref(&bo);
++ vma->vm_private_data = NULL;
++}
++
++static struct vm_operations_struct ttm_bo_vm_ops = {
++ .fault = ttm_bo_vm_fault,
++ .open = ttm_bo_vm_open,
++ .close = ttm_bo_vm_close
++};
++
++int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
++ struct ttm_bo_device *bdev)
++{
++ struct ttm_bo_driver *driver;
++ struct ttm_buffer_object *bo;
++ int ret;
++
++ read_lock(&bdev->vm_lock);
++ bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff,
++ (vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
++ if (likely(bo != NULL))
++ ttm_bo_reference(bo);
++ read_unlock(&bdev->vm_lock);
++
++ if (unlikely(bo == NULL)) {
++ printk(KERN_ERR "Could not find buffer object to map.\n");
++ ret = -EINVAL;
++ goto out_unref;
++ }
++
++ driver = bo->bdev->driver;
++ if (unlikely(!driver->verify_access)) {
++ ret = -EPERM;
++ goto out_unref;
++ }
++ ret = driver->verify_access(bo, filp);
++ if (unlikely(ret != 0))
++ goto out_unref;
++
++ vma->vm_ops = &ttm_bo_vm_ops;
++
++ /*
++ * Note: We're transferring the bo reference to
++ * vma->vm_private_data here.
++ */
++
++ vma->vm_private_data = bo;
++ vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
++ return 0;
++out_unref:
++ ttm_bo_unref(&bo);
++ return ret;
++}
++
++int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
++{
++ if (vma->vm_pgoff != 0)
++ return -EACCES;
++
++ vma->vm_ops = &ttm_bo_vm_ops;
++ vma->vm_private_data = ttm_bo_reference(bo);
++ vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
++ return 0;
++}
++
++ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
++ const char __user *wbuf, char __user *rbuf, size_t count,
++ loff_t *f_pos, bool write)
++{
++ struct ttm_buffer_object *bo;
++ struct ttm_bo_driver *driver;
++ struct ttm_bo_kmap_obj map;
++ unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
++ unsigned long kmap_offset;
++ unsigned long kmap_end;
++ unsigned long kmap_num;
++ size_t io_size;
++ unsigned int page_offset;
++ char *virtual;
++ int ret;
++ bool no_wait = false;
++ bool dummy;
++
++ read_lock(&bdev->vm_lock);
++ bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
++ if (likely(bo != NULL))
++ ttm_bo_reference(bo);
++ read_unlock(&bdev->vm_lock);
++
++ if (unlikely(bo == NULL))
++ return -EFAULT;
++
++ driver = bo->bdev->driver;
++ if (unlikely(driver->verify_access))
++ return -EPERM;
++
++ ret = driver->verify_access(bo, filp);
++ if (unlikely(ret != 0))
++ goto out_unref;
++
++ kmap_offset = dev_offset - bo->vm_node->start;
++ if (unlikely(kmap_offset) >= bo->num_pages) {
++ ret = -EFBIG;
++ goto out_unref;
++ }
++
++ page_offset = *f_pos & ~PAGE_MASK;
++ io_size = bo->num_pages - kmap_offset;
++ io_size = (io_size << PAGE_SHIFT) - page_offset;
++ if (count < io_size)
++ io_size = count;
++
++ kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
++ kmap_num = kmap_end - kmap_offset + 1;
++
++ ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
++
++ switch (ret) {
++ case 0:
++ break;
++ case -ERESTART:
++ ret = -EINTR;
++ goto out_unref;
++ case -EBUSY:
++ ret = -EAGAIN;
++ goto out_unref;
++ default:
++ goto out_unref;
++ }
++
++ ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
++ if (unlikely(ret != 0))
++ goto out_unref;
++
++ virtual = ttm_kmap_obj_virtual(&map, &dummy);
++ virtual += page_offset;
++
++ if (write)
++ ret = copy_from_user(virtual, wbuf, io_size);
++ else
++ ret = copy_to_user(rbuf, virtual, io_size);
++
++ ttm_bo_kunmap(&map);
++ ttm_bo_unreserve(bo);
++ ttm_bo_unref(&bo);
++
++ if (unlikely(ret != 0))
++ return -EFBIG;
++
++ *f_pos += io_size;
++
++ return io_size;
++out_unref:
++ ttm_bo_unref(&bo);
++ return ret;
++}
++
++ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
++ char __user *rbuf, size_t count, loff_t *f_pos,
++ bool write)
++{
++ struct ttm_bo_kmap_obj map;
++ unsigned long kmap_offset;
++ unsigned long kmap_end;
++ unsigned long kmap_num;
++ size_t io_size;
++ unsigned int page_offset;
++ char *virtual;
++ int ret;
++ bool no_wait = false;
++ bool dummy;
++
++ kmap_offset = (*f_pos >> PAGE_SHIFT);
++ if (unlikely(kmap_offset) >= bo->num_pages)
++ return -EFBIG;
++
++ page_offset = *f_pos & ~PAGE_MASK;
++ io_size = bo->num_pages - kmap_offset;
++ io_size = (io_size << PAGE_SHIFT) - page_offset;
++ if (count < io_size)
++ io_size = count;
++
++ kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
++ kmap_num = kmap_end - kmap_offset + 1;
++
++ ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
++
++ switch (ret) {
++ case 0:
++ break;
++ case -ERESTART:
++ return -EINTR;
++ case -EBUSY:
++ return -EAGAIN;
++ default:
++ return ret;
++ }
++
++ ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
++ if (unlikely(ret != 0))
++ return ret;
++
++ virtual = ttm_kmap_obj_virtual(&map, &dummy);
++ virtual += page_offset;
++
++ if (write)
++ ret = copy_from_user(virtual, wbuf, io_size);
++ else
++ ret = copy_to_user(rbuf, virtual, io_size);
++
++ ttm_bo_kunmap(&map);
++ ttm_bo_unreserve(bo);
++ ttm_bo_unref(&bo);
++
++ if (unlikely(ret != 0))
++ return ret;
++
++ *f_pos += io_size;
++
++ return io_size;
++}
+--- /dev/null
++++ b/drivers/staging/mrst/drv/ttm/ttm_execbuf_util.c
+@@ -0,0 +1,108 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++#include "ttm_execbuf_util.h"
++#include "ttm_bo_driver.h"
++#include "ttm_placement_common.h"
++#include <linux/wait.h>
++#include <linux/sched.h>
++
++void ttm_eu_backoff_reservation(struct list_head *list)
++{
++ struct ttm_validate_buffer *entry;
++
++ list_for_each_entry(entry, list, head) {
++ struct ttm_buffer_object *bo = entry->bo;
++ if (!entry->reserved)
++ continue;
++
++ entry->reserved = false;
++ ttm_bo_unreserve(bo);
++ }
++}
++
++/*
++ * Reserve buffers for validation.
++ *
++ * If a buffer in the list is marked for CPU access, we back off and
++ * wait for that buffer to become free for GPU access.
++ *
++ * If a buffer is reserved for another validation, the validator with
++ * the highest validation sequence backs off and waits for that buffer
++ * to become unreserved. This prevents deadlocks when validating multiple
++ * buffers in different orders.
++ */
++
++int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq)
++{
++ struct ttm_validate_buffer *entry;
++ int ret;
++
++retry:
++ list_for_each_entry(entry, list, head) {
++ struct ttm_buffer_object *bo = entry->bo;
++
++ entry->reserved = false;
++ ret = ttm_bo_reserve(bo, true, false, true, val_seq);
++ if (ret != 0) {
++ ttm_eu_backoff_reservation(list);
++ if (ret == -EAGAIN) {
++ ret = ttm_bo_wait_unreserved(bo, true);
++ if (unlikely(ret != 0))
++ return ret;
++ goto retry;
++ } else
++ return ret;
++ }
++
++ entry->reserved = true;
++ if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
++ ttm_eu_backoff_reservation(list);
++ ret = ttm_bo_wait_cpu(bo, false);
++ if (ret)
++ return ret;
++ goto retry;
++ }
++ }
++ return 0;
++}
++
++void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
++{
++ struct ttm_validate_buffer *entry;
++
++ list_for_each_entry(entry, list, head) {
++ struct ttm_buffer_object *bo = entry->bo;
++ struct ttm_bo_driver *driver = bo->bdev->driver;
++ void *old_sync_obj;
++
++ mutex_lock(&bo->mutex);
++ old_sync_obj = bo->sync_obj;
++ bo->sync_obj = driver->sync_obj_ref(sync_obj);
++ bo->sync_obj_arg = entry->new_sync_obj_arg;
++ mutex_unlock(&bo->mutex);
++ ttm_bo_unreserve(bo);
++ entry->reserved = false;
++ if (old_sync_obj)
++ driver->sync_obj_unref(&old_sync_obj);
++ }
++}
+--- /dev/null
++++ b/drivers/staging/mrst/drv/ttm/ttm_execbuf_util.h
+@@ -0,0 +1,103 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++
++#ifndef _TTM_EXECBUF_UTIL_H_
++#define _TTM_EXECBUF_UTIL_H_
++
++#include "ttm_bo_api.h"
++#include "ttm_fence_api.h"
++#include <linux/list.h>
++
++/**
++ * struct ttm_validate_buffer
++ *
++ * @head: list head for thread-private list.
++ * @bo: refcounted buffer object pointer.
++ * @new_sync_obj_arg: New sync_obj_arg for @bo, to be used once
++ * adding a new sync object.
++ * @reservied: Indicates whether @bo has been reserved for validation.
++ */
++
++struct ttm_validate_buffer {
++ struct list_head head;
++ struct ttm_buffer_object *bo;
++ void *new_sync_obj_arg;
++ bool reserved;
++};
++
++/**
++ * function ttm_eu_backoff_reservation
++ *
++ * @list: thread private list of ttm_validate_buffer structs.
++ *
++ * Undoes all buffer validation reservations for bos pointed to by
++ * the list entries.
++ */
++
++extern void ttm_eu_backoff_reservation(struct list_head *list);
++
++/**
++ * function ttm_eu_reserve_buffers
++ *
++ * @list: thread private list of ttm_validate_buffer structs.
++ * @val_seq: A unique sequence number.
++ *
++ * Tries to reserve bos pointed to by the list entries for validation.
++ * If the function returns 0, all buffers are marked as "unfenced",
++ * taken off the lru lists and are not synced for write CPU usage.
++ *
++ * If the function detects a deadlock due to multiple threads trying to
++ * reserve the same buffers in reverse order, all threads except one will
++ * back off and retry. This function may sleep while waiting for
++ * CPU write reservations to be cleared, and for other threads to
++ * unreserve their buffers.
++ *
++ * This function may return -ERESTART or -EAGAIN if the calling process
++ * receives a signal while waiting. In that case, no buffers on the list
++ * will be reserved upon return.
++ *
++ * Buffers reserved by this function should be unreserved by
++ * a call to either ttm_eu_backoff_reservation() or
++ * ttm_eu_fence_buffer_objects() when command submission is complete or
++ * has failed.
++ */
++
++extern int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq);
++
++/**
++ * function ttm_eu_fence_buffer_objects.
++ *
++ * @list: thread private list of ttm_validate_buffer structs.
++ * @sync_obj: The new sync object for the buffers.
++ *
++ * This function should be called when command submission is complete, and
++ * it will add a new sync object to bos pointed to by entries on @list.
++ * It also unreserves all buffers, putting them on lru lists.
++ *
++ */
++
++extern void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj);
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/drv/ttm/ttm_fence.c
+@@ -0,0 +1,607 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include "ttm_fence_api.h"
++#include "ttm_fence_driver.h"
++#include <linux/wait.h>
++#include <linux/sched.h>
++
++#include <drm/drmP.h>
++
++/*
++ * Simple implementation for now.
++ */
++
++static void ttm_fence_lockup(struct ttm_fence_object *fence, uint32_t mask)
++{
++ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
++
++ printk(KERN_ERR "GPU lockup dectected on engine %u "
++ "fence type 0x%08x\n",
++ (unsigned int)fence->fence_class, (unsigned int)mask);
++ /*
++ * Give engines some time to idle?
++ */
++
++ write_lock(&fc->lock);
++ ttm_fence_handler(fence->fdev, fence->fence_class,
++ fence->sequence, mask, -EBUSY);
++ write_unlock(&fc->lock);
++}
++
++/*
++ * Convenience function to be called by fence::wait methods that
++ * need polling.
++ */
++
++int ttm_fence_wait_polling(struct ttm_fence_object *fence, bool lazy,
++ bool interruptible, uint32_t mask)
++{
++ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
++ const struct ttm_fence_driver *driver = ttm_fence_driver(fence);
++ uint32_t count = 0;
++ int ret;
++ unsigned long end_jiffies = fence->timeout_jiffies;
++
++ DECLARE_WAITQUEUE(entry, current);
++ add_wait_queue(&fc->fence_queue, &entry);
++
++ ret = 0;
++
++ for (;;) {
++ __set_current_state((interruptible) ?
++ TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
++ if (ttm_fence_object_signaled(fence, mask))
++ break;
++ if (time_after_eq(jiffies, end_jiffies)) {
++ if (driver->lockup)
++ driver->lockup(fence, mask);
++ else
++ ttm_fence_lockup(fence, mask);
++ continue;
++ }
++ if (lazy)
++ schedule_timeout(1);
++ else if ((++count & 0x0F) == 0) {
++ __set_current_state(TASK_RUNNING);
++ schedule();
++ __set_current_state((interruptible) ?
++ TASK_INTERRUPTIBLE :
++ TASK_UNINTERRUPTIBLE);
++ }
++ if (interruptible && signal_pending(current)) {
++ ret = -ERESTART;
++ break;
++ }
++ }
++ __set_current_state(TASK_RUNNING);
++ remove_wait_queue(&fc->fence_queue, &entry);
++ return ret;
++}
++
++/*
++ * Typically called by the IRQ handler.
++ */
++
++void ttm_fence_handler(struct ttm_fence_device *fdev, uint32_t fence_class,
++ uint32_t sequence, uint32_t type, uint32_t error)
++{
++ int wake = 0;
++ uint32_t diff;
++ uint32_t relevant_type;
++ uint32_t new_type;
++ struct ttm_fence_class_manager *fc = &fdev->fence_class[fence_class];
++ const struct ttm_fence_driver *driver = ttm_fence_driver_from_dev(fdev);
++ struct list_head *head;
++ struct ttm_fence_object *fence, *next;
++ bool found = false;
++
++ if (list_empty(&fc->ring))
++ return;
++
++ list_for_each_entry(fence, &fc->ring, ring) {
++ diff = (sequence - fence->sequence) & fc->sequence_mask;
++ if (diff > fc->wrap_diff) {
++ found = true;
++ break;
++ }
++ }
++
++ fc->waiting_types &= ~type;
++ head = (found) ? &fence->ring : &fc->ring;
++
++ list_for_each_entry_safe_reverse(fence, next, head, ring) {
++ if (&fence->ring == &fc->ring)
++ break;
++
++ DRM_DEBUG("Fence 0x%08lx, sequence 0x%08x, type 0x%08x\n",
++ (unsigned long)fence, fence->sequence,
++ fence->fence_type);
++
++ if (error) {
++ fence->info.error = error;
++ fence->info.signaled_types = fence->fence_type;
++ list_del_init(&fence->ring);
++ wake = 1;
++ break;
++ }
++
++ relevant_type = type & fence->fence_type;
++ new_type = (fence->info.signaled_types | relevant_type) ^
++ fence->info.signaled_types;
++
++ if (new_type) {
++ fence->info.signaled_types |= new_type;
++ DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n",
++ (unsigned long)fence,
++ fence->info.signaled_types);
++
++ if (unlikely(driver->signaled))
++ driver->signaled(fence);
++
++ if (driver->needed_flush)
++ fc->pending_flush |=
++ driver->needed_flush(fence);
++
++ if (new_type & fence->waiting_types)
++ wake = 1;
++ }
++
++ fc->waiting_types |=
++ fence->waiting_types & ~fence->info.signaled_types;
++
++ if (!(fence->fence_type & ~fence->info.signaled_types)) {
++ DRM_DEBUG("Fence completely signaled 0x%08lx\n",
++ (unsigned long)fence);
++ list_del_init(&fence->ring);
++ }
++ }
++
++ /*
++ * Reinstate lost waiting types.
++ */
++
++ if ((fc->waiting_types & type) != type) {
++ head = head->prev;
++ list_for_each_entry(fence, head, ring) {
++ if (&fence->ring == &fc->ring)
++ break;
++ diff =
++ (fc->highest_waiting_sequence -
++ fence->sequence) & fc->sequence_mask;
++ if (diff > fc->wrap_diff)
++ break;
++
++ fc->waiting_types |=
++ fence->waiting_types & ~fence->info.signaled_types;
++ }
++ }
++
++ if (wake)
++ wake_up_all(&fc->fence_queue);
++}
++
++static void ttm_fence_unring(struct ttm_fence_object *fence)
++{
++ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
++ unsigned long irq_flags;
++
++ write_lock_irqsave(&fc->lock, irq_flags);
++ list_del_init(&fence->ring);
++ write_unlock_irqrestore(&fc->lock, irq_flags);
++}
++
++bool ttm_fence_object_signaled(struct ttm_fence_object *fence, uint32_t mask)
++{
++ unsigned long flags;
++ bool signaled;
++ const struct ttm_fence_driver *driver = ttm_fence_driver(fence);
++ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
++
++ mask &= fence->fence_type;
++ read_lock_irqsave(&fc->lock, flags);
++ signaled = (mask & fence->info.signaled_types) == mask;
++ read_unlock_irqrestore(&fc->lock, flags);
++ if (!signaled && driver->poll) {
++ write_lock_irqsave(&fc->lock, flags);
++ driver->poll(fence->fdev, fence->fence_class, mask);
++ signaled = (mask & fence->info.signaled_types) == mask;
++ write_unlock_irqrestore(&fc->lock, flags);
++ }
++ return signaled;
++}
++
++int ttm_fence_object_flush(struct ttm_fence_object *fence, uint32_t type)
++{
++ const struct ttm_fence_driver *driver = ttm_fence_driver(fence);
++ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
++ unsigned long irq_flags;
++ uint32_t saved_pending_flush;
++ uint32_t diff;
++ bool call_flush;
++
++ if (type & ~fence->fence_type) {
++ DRM_ERROR("Flush trying to extend fence type, "
++ "0x%x, 0x%x\n", type, fence->fence_type);
++ return -EINVAL;
++ }
++
++ write_lock_irqsave(&fc->lock, irq_flags);
++ fence->waiting_types |= type;
++ fc->waiting_types |= fence->waiting_types;
++ diff = (fence->sequence - fc->highest_waiting_sequence) &
++ fc->sequence_mask;
++
++ if (diff < fc->wrap_diff)
++ fc->highest_waiting_sequence = fence->sequence;
++
++ /*
++ * fence->waiting_types has changed. Determine whether
++ * we need to initiate some kind of flush as a result of this.
++ */
++
++ saved_pending_flush = fc->pending_flush;
++ if (driver->needed_flush)
++ fc->pending_flush |= driver->needed_flush(fence);
++
++ if (driver->poll)
++ driver->poll(fence->fdev, fence->fence_class,
++ fence->waiting_types);
++
++ call_flush = (fc->pending_flush != 0);
++ write_unlock_irqrestore(&fc->lock, irq_flags);
++
++ if (call_flush && driver->flush)
++ driver->flush(fence->fdev, fence->fence_class);
++
++ return 0;
++}
++
++/*
++ * Make sure old fence objects are signaled before their fence sequences are
++ * wrapped around and reused.
++ */
++
++void ttm_fence_flush_old(struct ttm_fence_device *fdev,
++ uint32_t fence_class, uint32_t sequence)
++{
++ struct ttm_fence_class_manager *fc = &fdev->fence_class[fence_class];
++ struct ttm_fence_object *fence;
++ unsigned long irq_flags;
++ const struct ttm_fence_driver *driver = fdev->driver;
++ bool call_flush;
++
++ uint32_t diff;
++
++ write_lock_irqsave(&fc->lock, irq_flags);
++
++ list_for_each_entry_reverse(fence, &fc->ring, ring) {
++ diff = (sequence - fence->sequence) & fc->sequence_mask;
++ if (diff <= fc->flush_diff)
++ break;
++
++ fence->waiting_types = fence->fence_type;
++ fc->waiting_types |= fence->fence_type;
++
++ if (driver->needed_flush)
++ fc->pending_flush |= driver->needed_flush(fence);
++ }
++
++ if (driver->poll)
++ driver->poll(fdev, fence_class, fc->waiting_types);
++
++ call_flush = (fc->pending_flush != 0);
++ write_unlock_irqrestore(&fc->lock, irq_flags);
++
++ if (call_flush && driver->flush)
++ driver->flush(fdev, fence->fence_class);
++
++ /*
++ * FIXME: Shold we implement a wait here for really old fences?
++ */
++
++}
++
++int ttm_fence_object_wait(struct ttm_fence_object *fence,
++ bool lazy, bool interruptible, uint32_t mask)
++{
++ const struct ttm_fence_driver *driver = ttm_fence_driver(fence);
++ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
++ int ret = 0;
++ unsigned long timeout;
++ unsigned long cur_jiffies;
++ unsigned long to_jiffies;
++
++ if (mask & ~fence->fence_type) {
++ DRM_ERROR("Wait trying to extend fence type"
++ " 0x%08x 0x%08x\n", mask, fence->fence_type);
++ BUG();
++ return -EINVAL;
++ }
++
++ if (driver->wait)
++ return driver->wait(fence, lazy, interruptible, mask);
++
++ ttm_fence_object_flush(fence, mask);
++retry:
++ if (!driver->has_irq ||
++ driver->has_irq(fence->fdev, fence->fence_class, mask)) {
++
++ cur_jiffies = jiffies;
++ to_jiffies = fence->timeout_jiffies;
++
++ timeout = (time_after(to_jiffies, cur_jiffies)) ?
++ to_jiffies - cur_jiffies : 1;
++
++ if (interruptible)
++ ret = wait_event_interruptible_timeout
++ (fc->fence_queue,
++ ttm_fence_object_signaled(fence, mask), timeout);
++ else
++ ret = wait_event_timeout
++ (fc->fence_queue,
++ ttm_fence_object_signaled(fence, mask), timeout);
++
++ if (unlikely(ret == -ERESTARTSYS))
++ return -ERESTART;
++
++ if (unlikely(ret == 0)) {
++ if (driver->lockup)
++ driver->lockup(fence, mask);
++ else
++ ttm_fence_lockup(fence, mask);
++ goto retry;
++ }
++
++ return 0;
++ }
++
++ return ttm_fence_wait_polling(fence, lazy, interruptible, mask);
++}
++
++int ttm_fence_object_emit(struct ttm_fence_object *fence, uint32_t fence_flags,
++ uint32_t fence_class, uint32_t type)
++{
++ const struct ttm_fence_driver *driver = ttm_fence_driver(fence);
++ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
++ unsigned long flags;
++ uint32_t sequence;
++ unsigned long timeout;
++ int ret;
++
++ ttm_fence_unring(fence);
++ ret = driver->emit(fence->fdev,
++ fence_class, fence_flags, &sequence, &timeout);
++ if (ret)
++ return ret;
++
++ write_lock_irqsave(&fc->lock, flags);
++ fence->fence_class = fence_class;
++ fence->fence_type = type;
++ fence->waiting_types = 0;
++ fence->info.signaled_types = 0;
++ fence->info.error = 0;
++ fence->sequence = sequence;
++ fence->timeout_jiffies = timeout;
++ if (list_empty(&fc->ring))
++ fc->highest_waiting_sequence = sequence - 1;
++ list_add_tail(&fence->ring, &fc->ring);
++ fc->latest_queued_sequence = sequence;
++ write_unlock_irqrestore(&fc->lock, flags);
++ return 0;
++}
++
++int ttm_fence_object_init(struct ttm_fence_device *fdev,
++ uint32_t fence_class,
++ uint32_t type,
++ uint32_t create_flags,
++ void (*destroy) (struct ttm_fence_object *),
++ struct ttm_fence_object *fence)
++{
++ int ret = 0;
++
++ kref_init(&fence->kref);
++ fence->fence_class = fence_class;
++ fence->fence_type = type;
++ fence->info.signaled_types = 0;
++ fence->waiting_types = 0;
++ fence->sequence = 0;
++ fence->info.error = 0;
++ fence->fdev = fdev;
++ fence->destroy = destroy;
++ INIT_LIST_HEAD(&fence->ring);
++ atomic_inc(&fdev->count);
++
++ if (create_flags & TTM_FENCE_FLAG_EMIT) {
++ ret = ttm_fence_object_emit(fence, create_flags,
++ fence->fence_class, type);
++ }
++
++ return ret;
++}
++
++int ttm_fence_object_create(struct ttm_fence_device *fdev,
++ uint32_t fence_class,
++ uint32_t type,
++ uint32_t create_flags,
++ struct ttm_fence_object **c_fence)
++{
++ struct ttm_fence_object *fence;
++ int ret;
++
++ ret = ttm_mem_global_alloc(fdev->mem_glob,
++ sizeof(*fence),
++ false,
++ false,
++ false);
++ if (unlikely(ret != 0)) {
++ printk(KERN_ERR "Out of memory creating fence object\n");
++ return ret;
++ }
++
++ fence = kmalloc(sizeof(*fence), GFP_KERNEL);
++ if (!fence) {
++ printk(KERN_ERR "Out of memory creating fence object\n");
++ ttm_mem_global_free(fdev->mem_glob, sizeof(*fence), false);
++ return -ENOMEM;
++ }
++
++ ret = ttm_fence_object_init(fdev, fence_class, type,
++ create_flags, NULL, fence);
++ if (ret) {
++ ttm_fence_object_unref(&fence);
++ return ret;
++ }
++ *c_fence = fence;
++
++ return 0;
++}
++
++static void ttm_fence_object_destroy(struct kref *kref)
++{
++ struct ttm_fence_object *fence =
++ container_of(kref, struct ttm_fence_object, kref);
++ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
++ unsigned long irq_flags;
++
++ write_lock_irqsave(&fc->lock, irq_flags);
++ list_del_init(&fence->ring);
++ write_unlock_irqrestore(&fc->lock, irq_flags);
++
++ atomic_dec(&fence->fdev->count);
++ if (fence->destroy)
++ fence->destroy(fence);
++ else {
++ ttm_mem_global_free(fence->fdev->mem_glob,
++ sizeof(*fence),
++ false);
++ kfree(fence);
++ }
++}
++
++void ttm_fence_device_release(struct ttm_fence_device *fdev)
++{
++ kfree(fdev->fence_class);
++}
++
++int
++ttm_fence_device_init(int num_classes,
++ struct ttm_mem_global *mem_glob,
++ struct ttm_fence_device *fdev,
++ const struct ttm_fence_class_init *init,
++ bool replicate_init,
++ const struct ttm_fence_driver *driver)
++{
++ struct ttm_fence_class_manager *fc;
++ const struct ttm_fence_class_init *fci;
++ int i;
++
++ fdev->mem_glob = mem_glob;
++ fdev->fence_class = kzalloc(num_classes *
++ sizeof(*fdev->fence_class), GFP_KERNEL);
++
++ if (unlikely(!fdev->fence_class))
++ return -ENOMEM;
++
++ fdev->num_classes = num_classes;
++ atomic_set(&fdev->count, 0);
++ fdev->driver = driver;
++
++ for (i = 0; i < fdev->num_classes; ++i) {
++ fc = &fdev->fence_class[i];
++ fci = &init[(replicate_init) ? 0 : i];
++
++ fc->wrap_diff = fci->wrap_diff;
++ fc->flush_diff = fci->flush_diff;
++ fc->sequence_mask = fci->sequence_mask;
++
++ rwlock_init(&fc->lock);
++ INIT_LIST_HEAD(&fc->ring);
++ init_waitqueue_head(&fc->fence_queue);
++ }
++
++ return 0;
++}
++
++struct ttm_fence_info ttm_fence_get_info(struct ttm_fence_object *fence)
++{
++ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
++ struct ttm_fence_info tmp;
++ unsigned long irq_flags;
++
++ read_lock_irqsave(&fc->lock, irq_flags);
++ tmp = fence->info;
++ read_unlock_irqrestore(&fc->lock, irq_flags);
++
++ return tmp;
++}
++
++void ttm_fence_object_unref(struct ttm_fence_object **p_fence)
++{
++ struct ttm_fence_object *fence = *p_fence;
++
++ *p_fence = NULL;
++ (void)kref_put(&fence->kref, &ttm_fence_object_destroy);
++}
++
++/*
++ * Placement / BO sync object glue.
++ */
++
++bool ttm_fence_sync_obj_signaled(void *sync_obj, void *sync_arg)
++{
++ struct ttm_fence_object *fence = (struct ttm_fence_object *)sync_obj;
++ uint32_t fence_types = (uint32_t) (unsigned long)sync_arg;
++
++ return ttm_fence_object_signaled(fence, fence_types);
++}
++
++int ttm_fence_sync_obj_wait(void *sync_obj, void *sync_arg,
++ bool lazy, bool interruptible)
++{
++ struct ttm_fence_object *fence = (struct ttm_fence_object *)sync_obj;
++ uint32_t fence_types = (uint32_t) (unsigned long)sync_arg;
++
++ return ttm_fence_object_wait(fence, lazy, interruptible, fence_types);
++}
++
++int ttm_fence_sync_obj_flush(void *sync_obj, void *sync_arg)
++{
++ struct ttm_fence_object *fence = (struct ttm_fence_object *)sync_obj;
++ uint32_t fence_types = (uint32_t) (unsigned long)sync_arg;
++
++ return ttm_fence_object_flush(fence, fence_types);
++}
++
++void ttm_fence_sync_obj_unref(void **sync_obj)
++{
++ ttm_fence_object_unref((struct ttm_fence_object **)sync_obj);
++}
++
++void *ttm_fence_sync_obj_ref(void *sync_obj)
++{
++ return (void *)
++ ttm_fence_object_ref((struct ttm_fence_object *)sync_obj);
++}
+--- /dev/null
++++ b/drivers/staging/mrst/drv/ttm/ttm_fence_api.h
+@@ -0,0 +1,272 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++#ifndef _TTM_FENCE_API_H_
++#define _TTM_FENCE_API_H_
++
++#include <linux/list.h>
++#include <linux/kref.h>
++
++#define TTM_FENCE_FLAG_EMIT (1 << 0)
++#define TTM_FENCE_TYPE_EXE (1 << 0)
++
++struct ttm_fence_device;
++
++/**
++ * struct ttm_fence_info
++ *
++ * @fence_class: The fence class.
++ * @fence_type: Bitfield indicating types for this fence.
++ * @signaled_types: Bitfield indicating which types are signaled.
++ * @error: Last error reported from the device.
++ *
++ * Used as output from the ttm_fence_get_info
++ */
++
++struct ttm_fence_info {
++ uint32_t signaled_types;
++ uint32_t error;
++};
++
++/**
++ * struct ttm_fence_object
++ *
++ * @fdev: Pointer to the fence device struct.
++ * @kref: Holds the reference count of this fence object.
++ * @ring: List head used for the circular list of not-completely
++ * signaled fences.
++ * @info: Data for fast retrieval using the ttm_fence_get_info()
++ * function.
++ * @timeout_jiffies: Absolute jiffies value indicating when this fence
++ * object times out and, if waited on, calls ttm_fence_lockup
++ * to check for and resolve a GPU lockup.
++ * @sequence: Fence sequence number.
++ * @waiting_types: Types currently waited on.
++ * @destroy: Called to free the fence object, when its refcount has
++ * reached zero. If NULL, kfree is used.
++ *
++ * This struct is provided in the driver interface so that drivers can
++ * derive from it and create their own fence implementation. All members
++ * are private to the fence implementation and the fence driver callbacks.
++ * Otherwise a driver may access the derived object using container_of().
++ */
++
++struct ttm_fence_object {
++ struct ttm_fence_device *fdev;
++ struct kref kref;
++ uint32_t fence_class;
++ uint32_t fence_type;
++
++ /*
++ * The below fields are protected by the fence class
++ * manager spinlock.
++ */
++
++ struct list_head ring;
++ struct ttm_fence_info info;
++ unsigned long timeout_jiffies;
++ uint32_t sequence;
++ uint32_t waiting_types;
++ void (*destroy) (struct ttm_fence_object *);
++};
++
++/**
++ * ttm_fence_object_init
++ *
++ * @fdev: Pointer to a struct ttm_fence_device.
++ * @fence_class: Fence class for this fence.
++ * @type: Fence type for this fence.
++ * @create_flags: Flags indicating varios actions at init time. At this point
++ * there's only TTM_FENCE_FLAG_EMIT, which triggers a sequence emission to
++ * the command stream.
++ * @destroy: Destroy function. If NULL, kfree() is used.
++ * @fence: The struct ttm_fence_object to initialize.
++ *
++ * Initialize a pre-allocated fence object. This function, together with the
++ * destroy function makes it possible to derive driver-specific fence objects.
++ */
++
++extern int
++ttm_fence_object_init(struct ttm_fence_device *fdev,
++ uint32_t fence_class,
++ uint32_t type,
++ uint32_t create_flags,
++ void (*destroy) (struct ttm_fence_object *fence),
++ struct ttm_fence_object *fence);
++
++/**
++ * ttm_fence_object_create
++ *
++ * @fdev: Pointer to a struct ttm_fence_device.
++ * @fence_class: Fence class for this fence.
++ * @type: Fence type for this fence.
++ * @create_flags: Flags indicating varios actions at init time. At this point
++ * there's only TTM_FENCE_FLAG_EMIT, which triggers a sequence emission to
++ * the command stream.
++ * @c_fence: On successful termination, *(@c_fence) will point to the created
++ * fence object.
++ *
++ * Create and initialize a struct ttm_fence_object. The destroy function will
++ * be set to kfree().
++ */
++
++extern int
++ttm_fence_object_create(struct ttm_fence_device *fdev,
++ uint32_t fence_class,
++ uint32_t type,
++ uint32_t create_flags,
++ struct ttm_fence_object **c_fence);
++
++/**
++ * ttm_fence_object_wait
++ *
++ * @fence: The fence object to wait on.
++ * @lazy: Allow sleeps to reduce the cpu-usage if polling.
++ * @interruptible: Sleep interruptible when waiting.
++ * @type_mask: Wait for the given type_mask to signal.
++ *
++ * Wait for a fence to signal the given type_mask. The function will
++ * perform a fence_flush using type_mask. (See ttm_fence_object_flush).
++ *
++ * Returns
++ * -ERESTART if interrupted by a signal.
++ * May return driver-specific error codes if timed-out.
++ */
++
++extern int
++ttm_fence_object_wait(struct ttm_fence_object *fence,
++ bool lazy, bool interruptible, uint32_t type_mask);
++
++/**
++ * ttm_fence_object_flush
++ *
++ * @fence: The fence object to flush.
++ * @flush_mask: Fence types to flush.
++ *
++ * Make sure that the given fence eventually signals the
++ * types indicated by @flush_mask. Note that this may or may not
++ * map to a CPU or GPU flush.
++ */
++
++extern int
++ttm_fence_object_flush(struct ttm_fence_object *fence, uint32_t flush_mask);
++
++/**
++ * ttm_fence_get_info
++ *
++ * @fence: The fence object.
++ *
++ * Copy the info block from the fence while holding relevant locks.
++ */
++
++struct ttm_fence_info ttm_fence_get_info(struct ttm_fence_object *fence);
++
++/**
++ * ttm_fence_object_ref
++ *
++ * @fence: The fence object.
++ *
++ * Return a ref-counted pointer to the fence object indicated by @fence.
++ */
++
++static inline struct ttm_fence_object *ttm_fence_object_ref(struct
++ ttm_fence_object
++ *fence)
++{
++ kref_get(&fence->kref);
++ return fence;
++}
++
++/**
++ * ttm_fence_object_unref
++ *
++ * @p_fence: Pointer to a ref-counted pinter to a struct ttm_fence_object.
++ *
++ * Unreference the fence object pointed to by *(@p_fence), clearing
++ * *(p_fence).
++ */
++
++extern void ttm_fence_object_unref(struct ttm_fence_object **p_fence);
++
++/**
++ * ttm_fence_object_signaled
++ *
++ * @fence: Pointer to the struct ttm_fence_object.
++ * @mask: Type mask to check whether signaled.
++ *
++ * This function checks (without waiting) whether the fence object
++ * pointed to by @fence has signaled the types indicated by @mask,
++ * and returns 1 if true, 0 if false. This function does NOT perform
++ * an implicit fence flush.
++ */
++
++extern bool
++ttm_fence_object_signaled(struct ttm_fence_object *fence, uint32_t mask);
++
++/**
++ * ttm_fence_class
++ *
++ * @fence: Pointer to the struct ttm_fence_object.
++ *
++ * Convenience function that returns the fence class of a
++ * struct ttm_fence_object.
++ */
++
++static inline uint32_t ttm_fence_class(const struct ttm_fence_object *fence)
++{
++ return fence->fence_class;
++}
++
++/**
++ * ttm_fence_types
++ *
++ * @fence: Pointer to the struct ttm_fence_object.
++ *
++ * Convenience function that returns the fence types of a
++ * struct ttm_fence_object.
++ */
++
++static inline uint32_t ttm_fence_types(const struct ttm_fence_object *fence)
++{
++ return fence->fence_type;
++}
++
++/*
++ * The functions below are wrappers to the above functions, with
++ * similar names but with sync_obj omitted. These wrappers are intended
++ * to be plugged directly into the buffer object driver's sync object
++ * API, if the driver chooses to use ttm_fence_objects as buffer object
++ * sync objects. In the prototypes below, a sync_obj is cast to a
++ * struct ttm_fence_object, whereas a sync_arg is cast to an
++ * uint32_t representing a fence_type argument.
++ */
++
++extern bool ttm_fence_sync_obj_signaled(void *sync_obj, void *sync_arg);
++extern int ttm_fence_sync_obj_wait(void *sync_obj, void *sync_arg,
++ bool lazy, bool interruptible);
++extern int ttm_fence_sync_obj_flush(void *sync_obj, void *sync_arg);
++extern void ttm_fence_sync_obj_unref(void **sync_obj);
++extern void *ttm_fence_sync_obj_ref(void *sync_obj);
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/drv/ttm/ttm_fence_driver.h
+@@ -0,0 +1,302 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++#ifndef _TTM_FENCE_DRIVER_H_
++#define _TTM_FENCE_DRIVER_H_
++
++#include <linux/kref.h>
++#include <linux/spinlock.h>
++#include <linux/wait.h>
++#include "ttm_fence_api.h"
++#include "ttm_memory.h"
++
++/** @file ttm_fence_driver.h
++ *
++ * Definitions needed for a driver implementing the
++ * ttm_fence subsystem.
++ */
++
++/**
++ * struct ttm_fence_class_manager:
++ *
++ * @wrap_diff: Sequence difference to catch 32-bit wrapping.
++ * if (seqa - seqb) > @wrap_diff, then seqa < seqb.
++ * @flush_diff: Sequence difference to trigger fence flush.
++ * if (cur_seq - seqa) > @flush_diff, then consider fence object with
++ * seqa as old an needing a flush.
++ * @sequence_mask: Mask of valid bits in a fence sequence.
++ * @lock: Lock protecting this struct as well as fence objects
++ * associated with this struct.
++ * @ring: Circular sequence-ordered list of fence objects.
++ * @pending_flush: Fence types currently needing a flush.
++ * @waiting_types: Fence types that are currently waited for.
++ * @fence_queue: Queue of waiters on fences belonging to this fence class.
++ * @highest_waiting_sequence: Sequence number of the fence with highest
++ * sequence number and that is waited for.
++ * @latest_queued_sequence: Sequence number of the fence latest queued
++ * on the ring.
++ */
++
++struct ttm_fence_class_manager {
++
++ /*
++ * Unprotected constant members.
++ */
++
++ uint32_t wrap_diff;
++ uint32_t flush_diff;
++ uint32_t sequence_mask;
++
++ /*
++ * The rwlock protects this structure as well as
++ * the data in all fence objects belonging to this
++ * class. This should be OK as most fence objects are
++ * only read from once they're created.
++ */
++
++ rwlock_t lock;
++ struct list_head ring;
++ uint32_t pending_flush;
++ uint32_t waiting_types;
++ wait_queue_head_t fence_queue;
++ uint32_t highest_waiting_sequence;
++ uint32_t latest_queued_sequence;
++};
++
++/**
++ * struct ttm_fence_device
++ *
++ * @fence_class: Array of fence class managers.
++ * @num_classes: Array dimension of @fence_class.
++ * @count: Current number of fence objects for statistics.
++ * @driver: Driver struct.
++ *
++ * Provided in the driver interface so that the driver can derive
++ * from this struct for its driver_private, and accordingly
++ * access the driver_private from the fence driver callbacks.
++ *
++ * All members except "count" are initialized at creation and
++ * never touched after that. No protection needed.
++ *
++ * This struct is private to the fence implementation and to the fence
++ * driver callbacks, and may otherwise be used by drivers only to
++ * obtain the derived device_private object using container_of().
++ */
++
++struct ttm_fence_device {
++ struct ttm_mem_global *mem_glob;
++ struct ttm_fence_class_manager *fence_class;
++ uint32_t num_classes;
++ atomic_t count;
++ const struct ttm_fence_driver *driver;
++};
++
++/**
++ * struct ttm_fence_class_init
++ *
++ * @wrap_diff: Fence sequence number wrap indicator. If
++ * (sequence1 - sequence2) > @wrap_diff, then sequence1 is
++ * considered to be older than sequence2.
++ * @flush_diff: Fence sequence number flush indicator.
++ * If a non-completely-signaled fence has a fence sequence number
++ * sequence1 and (sequence1 - current_emit_sequence) > @flush_diff,
++ * the fence is considered too old and it will be flushed upon the
++ * next call of ttm_fence_flush_old(), to make sure no fences with
++ * stale sequence numbers remains unsignaled. @flush_diff should
++ * be sufficiently less than @wrap_diff.
++ * @sequence_mask: Mask with valid bits of the fence sequence
++ * number set to 1.
++ *
++ * This struct is used as input to ttm_fence_device_init.
++ */
++
++struct ttm_fence_class_init {
++ uint32_t wrap_diff;
++ uint32_t flush_diff;
++ uint32_t sequence_mask;
++};
++
++/**
++ * struct ttm_fence_driver
++ *
++ * @has_irq: Called by a potential waiter. Should return 1 if a
++ * fence object with indicated parameters is expected to signal
++ * automatically, and 0 if the fence implementation needs to
++ * repeatedly call @poll to make it signal.
++ * @emit: Make sure a fence with the given parameters is
++ * present in the indicated command stream. Return its sequence number
++ * in "breadcrumb".
++ * @poll: Check and report sequences of the given "fence_class"
++ * that have signaled "types"
++ * @flush: Make sure that the types indicated by the bitfield
++ * ttm_fence_class_manager::pending_flush will eventually
++ * signal. These bits have been put together using the
++ * result from the needed_flush function described below.
++ * @needed_flush: Given the fence_class and fence_types indicated by
++ * "fence", and the last received fence sequence of this
++ * fence class, indicate what types need a fence flush to
++ * signal. Return as a bitfield.
++ * @wait: Set to non-NULL if the driver wants to override the fence
++ * wait implementation. Return 0 on success, -EBUSY on failure,
++ * and -ERESTART if interruptible and a signal is pending.
++ * @signaled: Driver callback that is called whenever a
++ * ttm_fence_object::signaled_types has changed status.
++ * This function is called from atomic context,
++ * with the ttm_fence_class_manager::lock held in write mode.
++ * @lockup: Driver callback that is called whenever a wait has exceeded
++ * the lifetime of a fence object.
++ * If there is a GPU lockup,
++ * this function should, if possible, reset the GPU,
++ * call the ttm_fence_handler with an error status, and
++ * return. If no lockup was detected, simply extend the
++ * fence timeout_jiffies and return. The driver might
++ * want to protect the lockup check with a mutex and cache a
++ * non-locked-up status for a while to avoid an excessive
++ * amount of lockup checks from every waiting thread.
++ */
++
++struct ttm_fence_driver {
++ bool (*has_irq) (struct ttm_fence_device *fdev,
++ uint32_t fence_class, uint32_t flags);
++ int (*emit) (struct ttm_fence_device *fdev,
++ uint32_t fence_class,
++ uint32_t flags,
++ uint32_t *breadcrumb, unsigned long *timeout_jiffies);
++ void (*flush) (struct ttm_fence_device *fdev, uint32_t fence_class);
++ void (*poll) (struct ttm_fence_device *fdev,
++ uint32_t fence_class, uint32_t types);
++ uint32_t(*needed_flush)
++ (struct ttm_fence_object *fence);
++ int (*wait) (struct ttm_fence_object *fence, bool lazy,
++ bool interruptible, uint32_t mask);
++ void (*signaled) (struct ttm_fence_object *fence);
++ void (*lockup) (struct ttm_fence_object *fence, uint32_t fence_types);
++};
++
++/**
++ * function ttm_fence_device_init
++ *
++ * @num_classes: Number of fence classes for this fence implementation.
++ * @mem_global: Pointer to the global memory accounting info.
++ * @fdev: Pointer to an uninitialised struct ttm_fence_device.
++ * @init: Array of initialization info for each fence class.
++ * @replicate_init: Use the first @init initialization info for all classes.
++ * @driver: Driver callbacks.
++ *
++ * Initialize a struct ttm_fence_driver structure. Returns -ENOMEM if
++ * out-of-memory. Otherwise returns 0.
++ */
++extern int
++ttm_fence_device_init(int num_classes,
++ struct ttm_mem_global *mem_glob,
++ struct ttm_fence_device *fdev,
++ const struct ttm_fence_class_init *init,
++ bool replicate_init,
++ const struct ttm_fence_driver *driver);
++
++/**
++ * function ttm_fence_device_release
++ *
++ * @fdev: Pointer to the fence device.
++ *
++ * Release all resources held by a fence device. Note that before
++ * this function is called, the caller must have made sure all fence
++ * objects belonging to this fence device are completely signaled.
++ */
++
++extern void ttm_fence_device_release(struct ttm_fence_device *fdev);
++
++/**
++ * ttm_fence_handler - the fence handler.
++ *
++ * @fdev: Pointer to the fence device.
++ * @fence_class: Fence class that signals.
++ * @sequence: Signaled sequence.
++ * @type: Types that signal.
++ * @error: Error from the engine.
++ *
++ * This function signals all fences with a sequence previous to the
++ * @sequence argument, and belonging to @fence_class. The signaled fence
++ * types are provided in @type. If error is non-zero, the error member
++ * of the fence with sequence = @sequence is set to @error. This value
++ * may be reported back to user-space, indicating, for example an illegal
++ * 3D command or illegal mpeg data.
++ *
++ * This function is typically called from the driver::poll method when the
++ * command sequence preceding the fence marker has executed. It should be
++ * called with the ttm_fence_class_manager::lock held in write mode and
++ * may be called from interrupt context.
++ */
++
++extern void
++ttm_fence_handler(struct ttm_fence_device *fdev,
++ uint32_t fence_class,
++ uint32_t sequence, uint32_t type, uint32_t error);
++
++/**
++ * ttm_fence_driver_from_dev
++ *
++ * @fdev: The ttm fence device.
++ *
++ * Returns a pointer to the fence driver struct.
++ */
++
++static inline const struct ttm_fence_driver *ttm_fence_driver_from_dev(
++ struct ttm_fence_device *fdev)
++{
++ return fdev->driver;
++}
++
++/**
++ * ttm_fence_driver
++ *
++ * @fence: Pointer to a ttm fence object.
++ *
++ * Returns a pointer to the fence driver struct.
++ */
++
++static inline const struct ttm_fence_driver *ttm_fence_driver(struct
++ ttm_fence_object
++ *fence)
++{
++ return ttm_fence_driver_from_dev(fence->fdev);
++}
++
++/**
++ * ttm_fence_fc
++ *
++ * @fence: Pointer to a ttm fence object.
++ *
++ * Returns a pointer to the struct ttm_fence_class_manager for the
++ * fence class of @fence.
++ */
++
++static inline struct ttm_fence_class_manager *ttm_fence_fc(struct
++ ttm_fence_object
++ *fence)
++{
++ return &fence->fdev->fence_class[fence->fence_class];
++}
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/drv/ttm/ttm_fence_user.c
+@@ -0,0 +1,238 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include <drm/drmP.h>
++#include "ttm_fence_user.h"
++#include "ttm_object.h"
++#include "ttm_fence_driver.h"
++#include "ttm_userobj_api.h"
++
++/**
++ * struct ttm_fence_user_object
++ *
++ * @base: The base object used for user-space visibility and refcounting.
++ *
++ * @fence: The fence object itself.
++ *
++ */
++
++struct ttm_fence_user_object {
++ struct ttm_base_object base;
++ struct ttm_fence_object fence;
++};
++
++static struct ttm_fence_user_object *ttm_fence_user_object_lookup(
++ struct ttm_object_file *tfile,
++ uint32_t handle)
++{
++ struct ttm_base_object *base;
++
++ base = ttm_base_object_lookup(tfile, handle);
++ if (unlikely(base == NULL)) {
++ printk(KERN_ERR "Invalid fence handle 0x%08lx\n",
++ (unsigned long)handle);
++ return NULL;
++ }
++
++ if (unlikely(base->object_type != ttm_fence_type)) {
++ ttm_base_object_unref(&base);
++ printk(KERN_ERR "Invalid fence handle 0x%08lx\n",
++ (unsigned long)handle);
++ return NULL;
++ }
++
++ return container_of(base, struct ttm_fence_user_object, base);
++}
++
++/*
++ * The fence object destructor.
++ */
++
++static void ttm_fence_user_destroy(struct ttm_fence_object *fence)
++{
++ struct ttm_fence_user_object *ufence =
++ container_of(fence, struct ttm_fence_user_object, fence);
++
++ ttm_mem_global_free(fence->fdev->mem_glob, sizeof(*ufence), false);
++ kfree(ufence);
++}
++
++/*
++ * The base object destructor. We basically unly unreference the
++ * attached fence object.
++ */
++
++static void ttm_fence_user_release(struct ttm_base_object **p_base)
++{
++ struct ttm_fence_user_object *ufence;
++ struct ttm_base_object *base = *p_base;
++ struct ttm_fence_object *fence;
++
++ *p_base = NULL;
++
++ if (unlikely(base == NULL))
++ return;
++
++ ufence = container_of(base, struct ttm_fence_user_object, base);
++ fence = &ufence->fence;
++ ttm_fence_object_unref(&fence);
++}
++
++int
++ttm_fence_user_create(struct ttm_fence_device *fdev,
++ struct ttm_object_file *tfile,
++ uint32_t fence_class,
++ uint32_t fence_types,
++ uint32_t create_flags,
++ struct ttm_fence_object **fence,
++ uint32_t *user_handle)
++{
++ int ret;
++ struct ttm_fence_object *tmp;
++ struct ttm_fence_user_object *ufence;
++
++ ret = ttm_mem_global_alloc(fdev->mem_glob,
++ sizeof(*ufence),
++ false,
++ false,
++ false);
++ if (unlikely(ret != 0))
++ return -ENOMEM;
++
++ ufence = kmalloc(sizeof(*ufence), GFP_KERNEL);
++ if (unlikely(ufence == NULL)) {
++ ttm_mem_global_free(fdev->mem_glob, sizeof(*ufence), false);
++ return -ENOMEM;
++ }
++
++ ret = ttm_fence_object_init(fdev,
++ fence_class,
++ fence_types, create_flags,
++ &ttm_fence_user_destroy, &ufence->fence);
++
++ if (unlikely(ret != 0))
++ goto out_err0;
++
++ /*
++ * One fence ref is held by the fence ptr we return.
++ * The other one by the base object. Need to up the
++ * fence refcount before we publish this object to
++ * user-space.
++ */
++
++ tmp = ttm_fence_object_ref(&ufence->fence);
++ ret = ttm_base_object_init(tfile, &ufence->base,
++ false, ttm_fence_type,
++ &ttm_fence_user_release, NULL);
++
++ if (unlikely(ret != 0))
++ goto out_err1;
++
++ *fence = &ufence->fence;
++ *user_handle = ufence->base.hash.key;
++
++ return 0;
++out_err1:
++ ttm_fence_object_unref(&tmp);
++ tmp = &ufence->fence;
++ ttm_fence_object_unref(&tmp);
++ return ret;
++out_err0:
++ ttm_mem_global_free(fdev->mem_glob, sizeof(*ufence), false);
++ kfree(ufence);
++ return ret;
++}
++
++int ttm_fence_signaled_ioctl(struct ttm_object_file *tfile, void *data)
++{
++ int ret;
++ union ttm_fence_signaled_arg *arg = data;
++ struct ttm_fence_object *fence;
++ struct ttm_fence_info info;
++ struct ttm_fence_user_object *ufence;
++ struct ttm_base_object *base;
++ ret = 0;
++
++ ufence = ttm_fence_user_object_lookup(tfile, arg->req.handle);
++ if (unlikely(ufence == NULL))
++ return -EINVAL;
++
++ fence = &ufence->fence;
++
++ if (arg->req.flush) {
++ ret = ttm_fence_object_flush(fence, arg->req.fence_type);
++ if (unlikely(ret != 0))
++ goto out;
++ }
++
++ info = ttm_fence_get_info(fence);
++ arg->rep.signaled_types = info.signaled_types;
++ arg->rep.fence_error = info.error;
++
++out:
++ base = &ufence->base;
++ ttm_base_object_unref(&base);
++ return ret;
++}
++
++int ttm_fence_finish_ioctl(struct ttm_object_file *tfile, void *data)
++{
++ int ret;
++ union ttm_fence_finish_arg *arg = data;
++ struct ttm_fence_user_object *ufence;
++ struct ttm_base_object *base;
++ struct ttm_fence_object *fence;
++ ret = 0;
++
++ ufence = ttm_fence_user_object_lookup(tfile, arg->req.handle);
++ if (unlikely(ufence == NULL))
++ return -EINVAL;
++
++ fence = &ufence->fence;
++
++ ret = ttm_fence_object_wait(fence,
++ arg->req.mode & TTM_FENCE_FINISH_MODE_LAZY,
++ true, arg->req.fence_type);
++ if (likely(ret == 0)) {
++ struct ttm_fence_info info = ttm_fence_get_info(fence);
++
++ arg->rep.signaled_types = info.signaled_types;
++ arg->rep.fence_error = info.error;
++ }
++
++ base = &ufence->base;
++ ttm_base_object_unref(&base);
++
++ return ret;
++}
++
++int ttm_fence_unref_ioctl(struct ttm_object_file *tfile, void *data)
++{
++ struct ttm_fence_unref_arg *arg = data;
++ int ret = 0;
++
++ ret = ttm_ref_object_base_unref(tfile, arg->handle, ttm_fence_type);
++ return ret;
++}
+--- /dev/null
++++ b/drivers/staging/mrst/drv/ttm/ttm_fence_user.h
+@@ -0,0 +1,140 @@
++/**************************************************************************
++ *
++ * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors
++ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#ifndef TTM_FENCE_USER_H
++#define TTM_FENCE_USER_H
++
++#if !defined(__KERNEL__) && !defined(_KERNEL)
++#include <stdint.h>
++#endif
++
++#define TTM_FENCE_MAJOR 0
++#define TTM_FENCE_MINOR 1
++#define TTM_FENCE_PL 0
++#define TTM_FENCE_DATE "080819"
++
++/**
++ * struct ttm_fence_signaled_req
++ *
++ * @handle: Handle to the fence object. Input.
++ *
++ * @fence_type: Fence types we want to flush. Input.
++ *
++ * @flush: Boolean. Flush the indicated fence_types. Input.
++ *
++ * Argument to the TTM_FENCE_SIGNALED ioctl.
++ */
++
++struct ttm_fence_signaled_req {
++ uint32_t handle;
++ uint32_t fence_type;
++ int32_t flush;
++ uint32_t pad64;
++};
++
++/**
++ * struct ttm_fence_rep
++ *
++ * @signaled_types: Fence type that has signaled.
++ *
++ * @fence_error: Command execution error.
++ * Hardware errors that are consequences of the execution
++ * of the command stream preceding the fence are reported
++ * here.
++ *
++ * Output argument to the TTM_FENCE_SIGNALED and
++ * TTM_FENCE_FINISH ioctls.
++ */
++
++struct ttm_fence_rep {
++ uint32_t signaled_types;
++ uint32_t fence_error;
++};
++
++union ttm_fence_signaled_arg {
++ struct ttm_fence_signaled_req req;
++ struct ttm_fence_rep rep;
++};
++
++/*
++ * Waiting mode flags for the TTM_FENCE_FINISH ioctl.
++ *
++ * TTM_FENCE_FINISH_MODE_LAZY: Allow for sleeps during polling
++ * wait.
++ *
++ * TTM_FENCE_FINISH_MODE_NO_BLOCK: Don't block waiting for GPU,
++ * but return -EBUSY if the buffer is busy.
++ */
++
++#define TTM_FENCE_FINISH_MODE_LAZY (1 << 0)
++#define TTM_FENCE_FINISH_MODE_NO_BLOCK (1 << 1)
++
++/**
++ * struct ttm_fence_finish_req
++ *
++ * @handle: Handle to the fence object. Input.
++ *
++ * @fence_type: Fence types we want to finish.
++ *
++ * @mode: Wait mode.
++ *
++ * Input to the TTM_FENCE_FINISH ioctl.
++ */
++
++struct ttm_fence_finish_req {
++ uint32_t handle;
++ uint32_t fence_type;
++ uint32_t mode;
++ uint32_t pad64;
++};
++
++union ttm_fence_finish_arg {
++ struct ttm_fence_finish_req req;
++ struct ttm_fence_rep rep;
++};
++
++/**
++ * struct ttm_fence_unref_arg
++ *
++ * @handle: Handle to the fence object.
++ *
++ * Argument to the TTM_FENCE_UNREF ioctl.
++ */
++
++struct ttm_fence_unref_arg {
++ uint32_t handle;
++ uint32_t pad64;
++};
++
++/*
++ * Ioctl offsets frome extenstion start.
++ */
++
++#define TTM_FENCE_SIGNALED 0x01
++#define TTM_FENCE_FINISH 0x02
++#define TTM_FENCE_UNREF 0x03
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/drv/ttm/ttm_lock.c
+@@ -0,0 +1,155 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include "ttm_lock.h"
++#include <asm/atomic.h>
++#include <linux/errno.h>
++#include <linux/wait.h>
++#include <linux/sched.h>
++
++void ttm_lock_init(struct ttm_lock *lock)
++{
++ init_waitqueue_head(&lock->queue);
++ atomic_set(&lock->write_lock_pending, 0);
++ atomic_set(&lock->readers, 0);
++ lock->kill_takers = false;
++ lock->signal = SIGKILL;
++}
++
++void ttm_read_unlock(struct ttm_lock *lock)
++{
++ if (atomic_dec_and_test(&lock->readers))
++ wake_up_all(&lock->queue);
++}
++
++int ttm_read_lock(struct ttm_lock *lock, bool interruptible)
++{
++ while (unlikely(atomic_read(&lock->write_lock_pending) != 0)) {
++ int ret;
++
++ if (!interruptible) {
++ wait_event(lock->queue,
++ atomic_read(&lock->write_lock_pending) == 0);
++ continue;
++ }
++ ret = wait_event_interruptible
++ (lock->queue, atomic_read(&lock->write_lock_pending) == 0);
++ if (ret)
++ return -ERESTART;
++ }
++
++ while (unlikely(!atomic_add_unless(&lock->readers, 1, -1))) {
++ int ret;
++ if (!interruptible) {
++ wait_event(lock->queue,
++ atomic_read(&lock->readers) != -1);
++ continue;
++ }
++ ret = wait_event_interruptible
++ (lock->queue, atomic_read(&lock->readers) != -1);
++ if (ret)
++ return -ERESTART;
++ }
++
++ if (unlikely(lock->kill_takers)) {
++ send_sig(lock->signal, current, 0);
++ ttm_read_unlock(lock);
++ return -ERESTART;
++ }
++
++ return 0;
++}
++
++static int __ttm_write_unlock(struct ttm_lock *lock)
++{
++ if (unlikely(atomic_cmpxchg(&lock->readers, -1, 0) != -1))
++ return -EINVAL;
++ wake_up_all(&lock->queue);
++ return 0;
++}
++
++static void ttm_write_lock_remove(struct ttm_base_object **p_base)
++{
++ struct ttm_base_object *base = *p_base;
++ struct ttm_lock *lock = container_of(base, struct ttm_lock, base);
++ int ret;
++
++ *p_base = NULL;
++ ret = __ttm_write_unlock(lock);
++ BUG_ON(ret != 0);
++}
++
++int ttm_write_lock(struct ttm_lock *lock,
++ bool interruptible,
++ struct ttm_object_file *tfile)
++{
++ int ret = 0;
++
++ atomic_inc(&lock->write_lock_pending);
++
++ while (unlikely(atomic_cmpxchg(&lock->readers, 0, -1) != 0)) {
++ if (!interruptible) {
++ wait_event(lock->queue,
++ atomic_read(&lock->readers) == 0);
++ continue;
++ }
++ ret = wait_event_interruptible
++ (lock->queue, atomic_read(&lock->readers) == 0);
++
++ if (ret) {
++ if (atomic_dec_and_test(&lock->write_lock_pending))
++ wake_up_all(&lock->queue);
++ return -ERESTART;
++ }
++ }
++
++ if (atomic_dec_and_test(&lock->write_lock_pending))
++ wake_up_all(&lock->queue);
++
++ if (unlikely(lock->kill_takers)) {
++ send_sig(lock->signal, current, 0);
++ __ttm_write_unlock(lock);
++ return -ERESTART;
++ }
++
++ /*
++ * Add a base-object, the destructor of which will
++ * make sure the lock is released if the client dies
++ * while holding it.
++ */
++
++ ret = ttm_base_object_init(tfile, &lock->base, false,
++ ttm_lock_type, &ttm_write_lock_remove, NULL);
++ if (ret)
++ (void)__ttm_write_unlock(lock);
++
++ return ret;
++}
++
++int ttm_write_unlock(struct ttm_lock *lock, struct ttm_object_file *tfile)
++{
++ return ttm_ref_object_base_unref(tfile,
++ lock->base.hash.key, TTM_REF_USAGE);
++}
+--- /dev/null
++++ b/drivers/staging/mrst/drv/ttm/ttm_lock.h
+@@ -0,0 +1,176 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++
++/** @file ttm_lock.h
++ * This file implements a simple replacement for the buffer manager use
++ * of the DRM heavyweight hardware lock.
++ * The lock is a read-write lock. Taking it in read mode is fast, and
++ * intended for in-kernel use only.
++ * Taking it in write mode is slow.
++ *
++ * The write mode is used only when there is a need to block all
++ * user-space processes from validating buffers.
++ * It's allowed to leave kernel space with the write lock held.
++ * If a user-space process dies while having the write-lock,
++ * it will be released during the file descriptor release.
++ *
++ * The read lock is typically placed at the start of an IOCTL- or
++ * user-space callable function that may end up allocating a memory area.
++ * This includes setstatus, super-ioctls and faults; the latter may move
++ * unmappable regions to mappable. It's a bug to leave kernel space with the
++ * read lock held.
++ *
++ * Both read- and write lock taking is interruptible for low signal-delivery
++ * latency. The locking functions will return -ERESTART if interrupted by a
++ * signal.
++ *
++ * Locking order: The lock should be taken BEFORE any TTM mutexes
++ * or spinlocks.
++ *
++ * Typical usages:
++ * a) VT-switching, when we want to clean VRAM and perhaps AGP. The lock
++ * stops it from being repopulated.
++ * b) out-of-VRAM or out-of-aperture space, in which case the process
++ * receiving the out-of-space notification may take the lock in write mode
++ * and evict all buffers prior to start validating its own buffers.
++ */
++
++#ifndef _TTM_LOCK_H_
++#define _TTM_LOCK_H_
++
++#include "ttm_object.h"
++#include <linux/wait.h>
++#include <asm/atomic.h>
++
++/**
++ * struct ttm_lock
++ *
++ * @base: ttm base object used solely to release the lock if the client
++ * holding the lock dies.
++ * @queue: Queue for processes waiting for lock change-of-status.
++ * @write_lock_pending: Flag indicating that a write-lock is pending. Avoids
++ * write lock starvation.
++ * @readers: The lock status: A negative number indicates that a write lock is
++ * held. Positive values indicate number of concurrent readers.
++ */
++
++struct ttm_lock {
++ struct ttm_base_object base;
++ wait_queue_head_t queue;
++ atomic_t write_lock_pending;
++ atomic_t readers;
++ bool kill_takers;
++ int signal;
++};
++
++/**
++ * ttm_lock_init
++ *
++ * @lock: Pointer to a struct ttm_lock
++ * Initializes the lock.
++ */
++extern void ttm_lock_init(struct ttm_lock *lock);
++
++/**
++ * ttm_read_unlock
++ *
++ * @lock: Pointer to a struct ttm_lock
++ *
++ * Releases a read lock.
++ */
++
++extern void ttm_read_unlock(struct ttm_lock *lock);
++
++/**
++ * ttm_read_unlock
++ *
++ * @lock: Pointer to a struct ttm_lock
++ * @interruptible: Interruptible sleeping while waiting for a lock.
++ *
++ * Takes the lock in read mode.
++ * Returns:
++ * -ERESTART If interrupted by a signal and interruptible is true.
++ */
++
++extern int ttm_read_lock(struct ttm_lock *lock, bool interruptible);
++
++/**
++ * ttm_write_lock
++ *
++ * @lock: Pointer to a struct ttm_lock
++ * @interruptible: Interruptible sleeping while waiting for a lock.
++ * @tfile: Pointer to a struct ttm_object_file used to identify the user-space
++ * application taking the lock.
++ *
++ * Takes the lock in write mode.
++ * Returns:
++ * -ERESTART If interrupted by a signal and interruptible is true.
++ * -ENOMEM: Out of memory when locking.
++ */
++extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible,
++ struct ttm_object_file *tfile);
++
++/**
++ * ttm_write_unlock
++ *
++ * @lock: Pointer to a struct ttm_lock
++ * @tfile: Pointer to a struct ttm_object_file used to identify the user-space
++ * application taking the lock.
++ *
++ * Releases a write lock.
++ * Returns:
++ * -EINVAL If the lock was not held.
++ */
++extern int ttm_write_unlock(struct ttm_lock *lock,
++ struct ttm_object_file *tfile);
++
++/**
++ * ttm_lock_set_kill
++ *
++ * @lock: Pointer to a struct ttm_lock
++ * @val: Boolean whether to kill processes taking the lock.
++ * @signal: Signal to send to the process taking the lock.
++ *
++ * The kill-when-taking-lock functionality is used to kill processes that keep
++ * on using the TTM functionality when its resources has been taken down, for
++ * example when the X server exits. A typical sequence would look like this:
++ * - X server takes lock in write mode.
++ * - ttm_lock_set_kill() is called with @val set to true.
++ * - As part of X server exit, TTM resources are taken down.
++ * - X server releases the lock on file release.
++ * - Another dri client wants to render, takes the lock and is killed.
++ *
++ */
++
++static inline void ttm_lock_set_kill(struct ttm_lock *lock,
++ bool val,
++ int signal)
++{
++ lock->kill_takers = val;
++ if (val)
++ lock->signal = signal;
++}
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/drv/ttm/ttm_memory.c
+@@ -0,0 +1,228 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++#include "ttm_memory.h"
++#include <linux/spinlock.h>
++#include <linux/sched.h>
++#include <linux/wait.h>
++#include <linux/mm.h>
++
++#define TTM_MEMORY_ALLOC_RETRIES 4
++
++/**
++ * At this point we only support a single shrink callback.
++ * Extend this if needed, perhaps using a linked list of callbacks.
++ * Note that this function is reentrant:
++ * many threads may try to swap out at any given time.
++ */
++
++static void ttm_shrink(struct ttm_mem_global *glob, bool from_workqueue,
++ uint64_t extra)
++{
++ int ret;
++ struct ttm_mem_shrink *shrink;
++ uint64_t target;
++ uint64_t total_target;
++
++ spin_lock(&glob->lock);
++ if (glob->shrink == NULL)
++ goto out;
++
++ if (from_workqueue) {
++ target = glob->swap_limit;
++ total_target = glob->total_memory_swap_limit;
++ } else if (capable(CAP_SYS_ADMIN)) {
++ total_target = glob->emer_total_memory;
++ target = glob->emer_memory;
++ } else {
++ total_target = glob->max_total_memory;
++ target = glob->max_memory;
++ }
++
++ total_target = (extra >= total_target) ? 0 : total_target - extra;
++ target = (extra >= target) ? 0 : target - extra;
++
++ while (glob->used_memory > target ||
++ glob->used_total_memory > total_target) {
++ shrink = glob->shrink;
++ spin_unlock(&glob->lock);
++ ret = shrink->do_shrink(shrink);
++ spin_lock(&glob->lock);
++ if (unlikely(ret != 0))
++ goto out;
++ }
++out:
++ spin_unlock(&glob->lock);
++}
++
++static void ttm_shrink_work(struct work_struct *work)
++{
++ struct ttm_mem_global *glob =
++ container_of(work, struct ttm_mem_global, work);
++
++ ttm_shrink(glob, true, 0ULL);
++}
++
++int ttm_mem_global_init(struct ttm_mem_global *glob)
++{
++ struct sysinfo si;
++ uint64_t mem;
++
++ spin_lock_init(&glob->lock);
++ glob->swap_queue = create_singlethread_workqueue("ttm_swap");
++ INIT_WORK(&glob->work, ttm_shrink_work);
++ init_waitqueue_head(&glob->queue);
++
++ si_meminfo(&si);
++
++ mem = si.totalram - si.totalhigh;
++ mem *= si.mem_unit;
++
++ glob->max_memory = mem >> 1;
++ glob->emer_memory = glob->max_memory + (mem >> 2);
++ glob->swap_limit = glob->max_memory - (mem >> 5);
++ glob->used_memory = 0;
++ glob->used_total_memory = 0;
++ glob->shrink = NULL;
++
++ mem = si.totalram;
++ mem *= si.mem_unit;
++
++ glob->max_total_memory = mem >> 1;
++ glob->emer_total_memory = glob->max_total_memory + (mem >> 2);
++ glob->total_memory_swap_limit = glob->max_total_memory - (mem >> 5);
++
++ printk(KERN_INFO "TTM available graphics memory: %llu MiB\n",
++ glob->max_total_memory >> 20);
++ printk(KERN_INFO "TTM available object memory: %llu MiB\n",
++ glob->max_memory >> 20);
++ printk(KERN_INFO "TTM available swap breakpoint: %llu MiB\n",
++ glob->swap_limit >> 20);
++
++ return 0;
++}
++
++void ttm_mem_global_release(struct ttm_mem_global *glob)
++{
++ printk(KERN_INFO "Used total memory is %llu bytes.\n",
++ (unsigned long long)glob->used_total_memory);
++ flush_workqueue(glob->swap_queue);
++ destroy_workqueue(glob->swap_queue);
++ glob->swap_queue = NULL;
++}
++
++static inline void ttm_check_swapping(struct ttm_mem_global *glob)
++{
++ bool needs_swapping;
++
++ spin_lock(&glob->lock);
++ needs_swapping = (glob->used_memory > glob->swap_limit ||
++ glob->used_total_memory >
++ glob->total_memory_swap_limit);
++ spin_unlock(&glob->lock);
++
++ if (unlikely(needs_swapping))
++ (void)queue_work(glob->swap_queue, &glob->work);
++
++}
++
++void ttm_mem_global_free(struct ttm_mem_global *glob,
++ uint64_t amount, bool himem)
++{
++ spin_lock(&glob->lock);
++ glob->used_total_memory -= amount;
++ if (!himem)
++ glob->used_memory -= amount;
++ wake_up_all(&glob->queue);
++ spin_unlock(&glob->lock);
++}
++
++static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
++ uint64_t amount, bool himem, bool reserve)
++{
++ uint64_t limit;
++ uint64_t lomem_limit;
++ int ret = -ENOMEM;
++
++ spin_lock(&glob->lock);
++
++ if (capable(CAP_SYS_ADMIN)) {
++ limit = glob->emer_total_memory;
++ lomem_limit = glob->emer_memory;
++ } else {
++ limit = glob->max_total_memory;
++ lomem_limit = glob->max_memory;
++ }
++
++ if (unlikely(glob->used_total_memory + amount > limit))
++ goto out_unlock;
++ if (unlikely(!himem && glob->used_memory + amount > lomem_limit))
++ goto out_unlock;
++
++ if (reserve) {
++ glob->used_total_memory += amount;
++ if (!himem)
++ glob->used_memory += amount;
++ }
++ ret = 0;
++out_unlock:
++ spin_unlock(&glob->lock);
++ ttm_check_swapping(glob);
++
++ return ret;
++}
++
++int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
++ bool no_wait, bool interruptible, bool himem)
++{
++ int count = TTM_MEMORY_ALLOC_RETRIES;
++
++ while (unlikely(ttm_mem_global_reserve(glob,
++ memory,
++ himem,
++ true) != 0)) {
++ if (no_wait)
++ return -ENOMEM;
++ if (unlikely(count-- == 0))
++ return -ENOMEM;
++ ttm_shrink(glob, false, memory + (memory >> 2) + 16);
++ }
++
++ return 0;
++}
++
++size_t ttm_round_pot(size_t size)
++{
++ if ((size & (size - 1)) == 0)
++ return size;
++ else if (size > PAGE_SIZE)
++ return PAGE_ALIGN(size);
++ else {
++ size_t tmp_size = 4;
++
++ while (tmp_size < size)
++ tmp_size <<= 1;
++
++ return tmp_size;
++ }
++ return 0;
++}
+--- /dev/null
++++ b/drivers/staging/mrst/drv/ttm/ttm_memory.h
+@@ -0,0 +1,147 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++#ifndef TTM_MEMORY_H
++#define TTM_MEMORY_H
++
++#include <linux/workqueue.h>
++#include <linux/spinlock.h>
++#include <linux/wait.h>
++
++/**
++ * struct ttm_mem_shrink - callback to shrink TTM memory usage.
++ *
++ * @do_shrink: The callback function.
++ *
++ * Arguments to the do_shrink functions are intended to be passed using
++ * inheritance. That is, the argument class derives from struct ttm_mem_srink,
++ * and can be accessed using container_of().
++ */
++
++struct ttm_mem_shrink {
++ int (*do_shrink) (struct ttm_mem_shrink *);
++};
++
++/**
++ * struct ttm_mem_global - Global memory accounting structure.
++ *
++ * @shrink: A single callback to shrink TTM memory usage. Extend this
++ * to a linked list to be able to handle multiple callbacks when needed.
++ * @swap_queue: A workqueue to handle shrinking in low memory situations. We
++ * need a separate workqueue since it will spend a lot of time waiting
++ * for the GPU, and this will otherwise block other workqueue tasks(?)
++ * At this point we use only a single-threaded workqueue.
++ * @work: The workqueue callback for the shrink queue.
++ * @queue: Wait queue for processes suspended waiting for memory.
++ * @lock: Lock to protect the @shrink - and the memory accounting members,
++ * that is, essentially the whole structure with some exceptions.
++ * @emer_memory: Lowmem memory limit available for root.
++ * @max_memory: Lowmem memory limit available for non-root.
++ * @swap_limit: Lowmem memory limit where the shrink workqueue kicks in.
++ * @used_memory: Currently used lowmem memory.
++ * @used_total_memory: Currently used total (lowmem + highmem) memory.
++ * @total_memory_swap_limit: Total memory limit where the shrink workqueue
++ * kicks in.
++ * @max_total_memory: Total memory available to non-root processes.
++ * @emer_total_memory: Total memory available to root processes.
++ *
++ * Note that this structure is not per device. It should be global for all
++ * graphics devices.
++ */
++
++struct ttm_mem_global {
++ struct ttm_mem_shrink *shrink;
++ struct workqueue_struct *swap_queue;
++ struct work_struct work;
++ wait_queue_head_t queue;
++ spinlock_t lock;
++ uint64_t emer_memory;
++ uint64_t max_memory;
++ uint64_t swap_limit;
++ uint64_t used_memory;
++ uint64_t used_total_memory;
++ uint64_t total_memory_swap_limit;
++ uint64_t max_total_memory;
++ uint64_t emer_total_memory;
++};
++
++/**
++ * ttm_mem_init_shrink - initialize a struct ttm_mem_shrink object
++ *
++ * @shrink: The object to initialize.
++ * @func: The callback function.
++ */
++
++static inline void ttm_mem_init_shrink(struct ttm_mem_shrink *shrink,
++ int (*func) (struct ttm_mem_shrink *))
++{
++ shrink->do_shrink = func;
++}
++
++/**
++ * ttm_mem_register_shrink - register a struct ttm_mem_shrink object.
++ *
++ * @glob: The struct ttm_mem_global object to register with.
++ * @shrink: An initialized struct ttm_mem_shrink object to register.
++ *
++ * Returns:
++ * -EBUSY: There's already a callback registered. (May change).
++ */
++
++static inline int ttm_mem_register_shrink(struct ttm_mem_global *glob,
++ struct ttm_mem_shrink *shrink)
++{
++ spin_lock(&glob->lock);
++ if (glob->shrink != NULL) {
++ spin_unlock(&glob->lock);
++ return -EBUSY;
++ }
++ glob->shrink = shrink;
++ spin_unlock(&glob->lock);
++ return 0;
++}
++
++/**
++ * ttm_mem_unregister_shrink - unregister a struct ttm_mem_shrink object.
++ *
++ * @glob: The struct ttm_mem_global object to unregister from.
++ * @shrink: A previously registert struct ttm_mem_shrink object.
++ *
++ */
++
++static inline void ttm_mem_unregister_shrink(struct ttm_mem_global *glob,
++ struct ttm_mem_shrink *shrink)
++{
++ spin_lock(&glob->lock);
++ BUG_ON(glob->shrink != shrink);
++ glob->shrink = NULL;
++ spin_unlock(&glob->lock);
++}
++
++extern int ttm_mem_global_init(struct ttm_mem_global *glob);
++extern void ttm_mem_global_release(struct ttm_mem_global *glob);
++extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
++ bool no_wait, bool interruptible, bool himem);
++extern void ttm_mem_global_free(struct ttm_mem_global *glob,
++ uint64_t amount, bool himem);
++extern size_t ttm_round_pot(size_t size);
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/drv/ttm/ttm_object.c
+@@ -0,0 +1,440 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++/** @file ttm_ref_object.c
++ *
++ * Base- and reference object implementation for the various
++ * ttm objects. Implements reference counting, minimal security checks
++ * and release on file close.
++ */
++
++/**
++ * struct ttm_object_file
++ *
++ * @tdev: Pointer to the ttm_object_device.
++ *
++ * @lock: Lock that protects the ref_list list and the
++ * ref_hash hash tables.
++ *
++ * @ref_list: List of ttm_ref_objects to be destroyed at
++ * file release.
++ *
++ * @ref_hash: Hash tables of ref objects, one per ttm_ref_type,
++ * for fast lookup of ref objects given a base object.
++ */
++
++#include "ttm_object.h"
++#include <linux/list.h>
++#include <linux/spinlock.h>
++#include <linux/slab.h>
++#include <asm/atomic.h>
++
++struct ttm_object_file {
++ struct ttm_object_device *tdev;
++ rwlock_t lock;
++ struct list_head ref_list;
++ struct drm_open_hash ref_hash[TTM_REF_NUM];
++ struct kref refcount;
++};
++
++/**
++ * struct ttm_object_device
++ *
++ * @object_lock: lock that protects the object_hash hash table.
++ *
++ * @object_hash: hash table for fast lookup of object global names.
++ *
++ * @object_count: Per device object count.
++ *
++ * This is the per-device data structure needed for ttm object management.
++ */
++
++struct ttm_object_device {
++ rwlock_t object_lock;
++ struct drm_open_hash object_hash;
++ atomic_t object_count;
++ struct ttm_mem_global *mem_glob;
++};
++
++/**
++ * struct ttm_ref_object
++ *
++ * @hash: Hash entry for the per-file object reference hash.
++ *
++ * @head: List entry for the per-file list of ref-objects.
++ *
++ * @kref: Ref count.
++ *
++ * @obj: Base object this ref object is referencing.
++ *
++ * @ref_type: Type of ref object.
++ *
++ * This is similar to an idr object, but it also has a hash table entry
++ * that allows lookup with a pointer to the referenced object as a key. In
++ * that way, one can easily detect whether a base object is referenced by
++ * a particular ttm_object_file. It also carries a ref count to avoid creating
++ * multiple ref objects if a ttm_object_file references the same base object
++ * more than once.
++ */
++
++struct ttm_ref_object {
++ struct drm_hash_item hash;
++ struct list_head head;
++ struct kref kref;
++ struct ttm_base_object *obj;
++ enum ttm_ref_type ref_type;
++ struct ttm_object_file *tfile;
++};
++
++static inline struct ttm_object_file *
++ttm_object_file_ref(struct ttm_object_file *tfile)
++{
++ kref_get(&tfile->refcount);
++ return tfile;
++}
++
++static void ttm_object_file_destroy(struct kref *kref)
++{
++ struct ttm_object_file *tfile =
++ container_of(kref, struct ttm_object_file, refcount);
++
++ /* printk(KERN_INFO "Freeing 0x%08lx\n", (unsigned long) tfile); */
++ kfree(tfile);
++}
++
++
++static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile)
++{
++ struct ttm_object_file *tfile = *p_tfile;
++
++ *p_tfile = NULL;
++ kref_put(&tfile->refcount, ttm_object_file_destroy);
++}
++
++
++int ttm_base_object_init(struct ttm_object_file *tfile,
++ struct ttm_base_object *base,
++ bool shareable,
++ enum ttm_object_type object_type,
++ void (*refcount_release) (struct ttm_base_object **),
++ void (*ref_obj_release) (struct ttm_base_object *,
++ enum ttm_ref_type ref_type))
++{
++ struct ttm_object_device *tdev = tfile->tdev;
++ int ret;
++
++ base->shareable = shareable;
++ base->tfile = ttm_object_file_ref(tfile);
++ base->refcount_release = refcount_release;
++ base->ref_obj_release = ref_obj_release;
++ base->object_type = object_type;
++ write_lock(&tdev->object_lock);
++ kref_init(&base->refcount);
++ ret = drm_ht_just_insert_please(&tdev->object_hash,
++ &base->hash,
++ (unsigned long)base, 31, 0, 0);
++ write_unlock(&tdev->object_lock);
++ if (unlikely(ret != 0))
++ goto out_err0;
++
++ ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
++ if (unlikely(ret != 0))
++ goto out_err1;
++
++ ttm_base_object_unref(&base);
++
++ return 0;
++out_err1:
++ (void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
++out_err0:
++ return ret;
++}
++
++static void ttm_release_base(struct kref *kref)
++{
++ struct ttm_base_object *base =
++ container_of(kref, struct ttm_base_object, refcount);
++ struct ttm_object_device *tdev = base->tfile->tdev;
++
++ (void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
++ write_unlock(&tdev->object_lock);
++ if (base->refcount_release) {
++ ttm_object_file_unref(&base->tfile);
++ base->refcount_release(&base);
++ }
++ write_lock(&tdev->object_lock);
++}
++
++void ttm_base_object_unref(struct ttm_base_object **p_base)
++{
++ struct ttm_base_object *base = *p_base;
++ struct ttm_object_device *tdev = base->tfile->tdev;
++
++ /* printk(KERN_INFO "TTM base object unref.\n"); */
++ *p_base = NULL;
++
++ /*
++ * Need to take the lock here to avoid racing with
++ * users trying to look up the object.
++ */
++
++ write_lock(&tdev->object_lock);
++ (void)kref_put(&base->refcount, &ttm_release_base);
++ write_unlock(&tdev->object_lock);
++}
++
++struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
++ uint32_t key)
++{
++ struct ttm_object_device *tdev = tfile->tdev;
++ struct ttm_base_object *base;
++ struct drm_hash_item *hash;
++ int ret;
++
++ read_lock(&tdev->object_lock);
++ ret = drm_ht_find_item(&tdev->object_hash, key, &hash);
++
++ if (likely(ret == 0)) {
++ base = drm_hash_entry(hash, struct ttm_base_object, hash);
++ kref_get(&base->refcount);
++ }
++ read_unlock(&tdev->object_lock);
++
++ if (unlikely(ret != 0))
++ return NULL;
++
++ if (tfile != base->tfile && !base->shareable) {
++ printk(KERN_ERR "Attempted access of non-shareable object.\n");
++ ttm_base_object_unref(&base);
++ return NULL;
++ }
++
++ return base;
++}
++
++int ttm_ref_object_add(struct ttm_object_file *tfile,
++ struct ttm_base_object *base,
++ enum ttm_ref_type ref_type, bool *existed)
++{
++ struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
++ struct ttm_ref_object *ref;
++ struct drm_hash_item *hash;
++ struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
++ int ret = -EINVAL;
++
++ if (existed != NULL)
++ *existed = true;
++
++ while (ret == -EINVAL) {
++ read_lock(&tfile->lock);
++ ret = drm_ht_find_item(ht, base->hash.key, &hash);
++
++ if (ret == 0) {
++ ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
++ kref_get(&ref->kref);
++ read_unlock(&tfile->lock);
++ break;
++ }
++
++ read_unlock(&tfile->lock);
++ ret = ttm_mem_global_alloc(mem_glob,
++ sizeof(*ref),
++ false,
++ false,
++ false);
++ if (unlikely(ret != 0))
++ return ret;
++ ref = kmalloc(sizeof(*ref), GFP_KERNEL);
++ if (unlikely(ref == NULL)) {
++ ttm_mem_global_free(mem_glob, sizeof(*ref), false);
++ return -ENOMEM;
++ }
++
++ ref->hash.key = base->hash.key;
++ ref->obj = base;
++ ref->tfile = tfile;
++ ref->ref_type = ref_type;
++ kref_init(&ref->kref);
++
++ write_lock(&tfile->lock);
++ ret = drm_ht_insert_item(ht, &ref->hash);
++
++ if (likely(ret == 0)) {
++ list_add_tail(&ref->head, &tfile->ref_list);
++ kref_get(&base->refcount);
++ write_unlock(&tfile->lock);
++ if (existed != NULL)
++ *existed = false;
++ break;
++ }
++
++ write_unlock(&tfile->lock);
++ BUG_ON(ret != -EINVAL);
++
++ ttm_mem_global_free(mem_glob, sizeof(*ref), false);
++ kfree(ref);
++ }
++
++ return ret;
++}
++
++static void ttm_ref_object_release(struct kref *kref)
++{
++ struct ttm_ref_object *ref =
++ container_of(kref, struct ttm_ref_object, kref);
++ struct ttm_base_object *base = ref->obj;
++ struct ttm_object_file *tfile = ref->tfile;
++ struct drm_open_hash *ht;
++ struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
++
++ ht = &tfile->ref_hash[ref->ref_type];
++ (void)drm_ht_remove_item(ht, &ref->hash);
++ list_del(&ref->head);
++ write_unlock(&tfile->lock);
++
++ if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
++ base->ref_obj_release(base, ref->ref_type);
++
++ ttm_base_object_unref(&ref->obj);
++ ttm_mem_global_free(mem_glob, sizeof(*ref), false);
++ kfree(ref);
++ write_lock(&tfile->lock);
++}
++
++int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
++ unsigned long key, enum ttm_ref_type ref_type)
++{
++ struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
++ struct ttm_ref_object *ref;
++ struct drm_hash_item *hash;
++ int ret;
++
++ write_lock(&tfile->lock);
++ ret = drm_ht_find_item(ht, key, &hash);
++ if (unlikely(ret != 0)) {
++ write_unlock(&tfile->lock);
++ return -EINVAL;
++ }
++ ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
++ kref_put(&ref->kref, ttm_ref_object_release);
++ write_unlock(&tfile->lock);
++ return 0;
++}
++
++void ttm_object_file_release(struct ttm_object_file **p_tfile)
++{
++ struct ttm_ref_object *ref;
++ struct list_head *list;
++ unsigned int i;
++ struct ttm_object_file *tfile = *p_tfile;
++
++ *p_tfile = NULL;
++ write_lock(&tfile->lock);
++
++ /*
++ * Since we release the lock within the loop, we have to
++ * restart it from the beginning each time.
++ */
++
++ while (!list_empty(&tfile->ref_list)) {
++ list = tfile->ref_list.next;
++ ref = list_entry(list, struct ttm_ref_object, head);
++ ttm_ref_object_release(&ref->kref);
++ }
++
++ for (i = 0; i < TTM_REF_NUM; ++i)
++ drm_ht_remove(&tfile->ref_hash[i]);
++
++ write_unlock(&tfile->lock);
++ ttm_object_file_unref(&tfile);
++}
++
++struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
++ unsigned int hash_order)
++{
++ struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
++ unsigned int i;
++ unsigned int j = 0;
++ int ret;
++
++ if (unlikely(tfile == NULL))
++ return NULL;
++
++ rwlock_init(&tfile->lock);
++ tfile->tdev = tdev;
++ kref_init(&tfile->refcount);
++ INIT_LIST_HEAD(&tfile->ref_list);
++
++ for (i = 0; i < TTM_REF_NUM; ++i) {
++ ret = drm_ht_create(&tfile->ref_hash[i], hash_order);
++ if (ret) {
++ j = i;
++ goto out_err;
++ }
++ }
++
++ return tfile;
++out_err:
++ for (i = 0; i < j; ++i)
++ drm_ht_remove(&tfile->ref_hash[i]);
++
++ kfree(tfile);
++
++ return NULL;
++}
++
++struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
++ *mem_glob,
++ unsigned int hash_order)
++{
++ struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
++ int ret;
++
++ if (unlikely(tdev == NULL))
++ return NULL;
++
++ tdev->mem_glob = mem_glob;
++ rwlock_init(&tdev->object_lock);
++ atomic_set(&tdev->object_count, 0);
++ ret = drm_ht_create(&tdev->object_hash, hash_order);
++
++ if (likely(ret == 0))
++ return tdev;
++
++ kfree(tdev);
++ return NULL;
++}
++
++void ttm_object_device_release(struct ttm_object_device **p_tdev)
++{
++ struct ttm_object_device *tdev = *p_tdev;
++
++ *p_tdev = NULL;
++
++ write_lock(&tdev->object_lock);
++ drm_ht_remove(&tdev->object_hash);
++ write_unlock(&tdev->object_lock);
++
++ kfree(tdev);
++}
+--- /dev/null
++++ b/drivers/staging/mrst/drv/ttm/ttm_object.h
+@@ -0,0 +1,262 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++/** @file ttm_ref_object.h
++ *
++ * Base- and reference object implementation for the various
++ * ttm objects. Implements reference counting, minimal security checks
++ * and release on file close.
++ */
++
++#ifndef _TTM_OBJECT_H_
++#define _TTM_OBJECT_H_
++
++#include <linux/list.h>
++#include <drm/drm_hashtab.h>
++#include <linux/kref.h>
++#include "ttm_memory.h"
++
++/**
++ * enum ttm_ref_type
++ *
++ * Describes what type of reference a ref object holds.
++ *
++ * TTM_REF_USAGE is a simple refcount on a base object.
++ *
++ * TTM_REF_SYNCCPU_READ is a SYNCCPU_READ reference on a
++ * buffer object.
++ *
++ * TTM_REF_SYNCCPU_WRITE is a SYNCCPU_WRITE reference on a
++ * buffer object.
++ *
++ */
++
++enum ttm_ref_type {
++ TTM_REF_USAGE,
++ TTM_REF_SYNCCPU_READ,
++ TTM_REF_SYNCCPU_WRITE,
++ TTM_REF_NUM
++};
++
++/**
++ * enum ttm_object_type
++ *
++ * One entry per ttm object type.
++ * Device-specific types should use the
++ * ttm_driver_typex types.
++ */
++
++enum ttm_object_type {
++ ttm_fence_type,
++ ttm_buffer_type,
++ ttm_lock_type,
++ ttm_driver_type0 = 256,
++ ttm_driver_type1
++};
++
++struct ttm_object_file;
++struct ttm_object_device;
++
++/**
++ * struct ttm_base_object
++ *
++ * @hash: hash entry for the per-device object hash.
++ * @type: derived type this object is base class for.
++ * @shareable: Other ttm_object_files can access this object.
++ *
++ * @tfile: Pointer to ttm_object_file of the creator.
++ * NULL if the object was not created by a user request.
++ * (kernel object).
++ *
++ * @refcount: Number of references to this object, not
++ * including the hash entry. A reference to a base object can
++ * only be held by a ref object.
++ *
++ * @refcount_release: A function to be called when there are
++ * no more references to this object. This function should
++ * destroy the object (or make sure destruction eventually happens),
++ * and when it is called, the object has
++ * already been taken out of the per-device hash. The parameter
++ * "base" should be set to NULL by the function.
++ *
++ * @ref_obj_release: A function to be called when a reference object
++ * with another ttm_ref_type than TTM_REF_USAGE is deleted.
++ * this function may, for example, release a lock held by a user-space
++ * process.
++ *
++ * This struct is intended to be used as a base struct for objects that
++ * are visible to user-space. It provides a global name, race-safe
++ * access and refcounting, minimal access contol and hooks for unref actions.
++ */
++
++struct ttm_base_object {
++ struct drm_hash_item hash;
++ enum ttm_object_type object_type;
++ bool shareable;
++ struct ttm_object_file *tfile;
++ struct kref refcount;
++ void (*refcount_release) (struct ttm_base_object **base);
++ void (*ref_obj_release) (struct ttm_base_object *base,
++ enum ttm_ref_type ref_type);
++};
++
++/**
++ * ttm_base_object_init
++ *
++ * @tfile: Pointer to a struct ttm_object_file.
++ * @base: The struct ttm_base_object to initialize.
++ * @shareable: This object is shareable with other applcations.
++ * (different @tfile pointers.)
++ * @type: The object type.
++ * @refcount_release: See the struct ttm_base_object description.
++ * @ref_obj_release: See the struct ttm_base_object description.
++ *
++ * Initializes a struct ttm_base_object.
++ */
++
++extern int ttm_base_object_init(struct ttm_object_file *tfile,
++ struct ttm_base_object *base,
++ bool shareable,
++ enum ttm_object_type type,
++ void (*refcount_release) (struct ttm_base_object
++ **),
++ void (*ref_obj_release) (struct ttm_base_object
++ *,
++ enum ttm_ref_type
++ ref_type));
++
++/**
++ * ttm_base_object_lookup
++ *
++ * @tfile: Pointer to a struct ttm_object_file.
++ * @key: Hash key
++ *
++ * Looks up a struct ttm_base_object with the key @key.
++ * Also verifies that the object is visible to the application, by
++ * comparing the @tfile argument and checking the object shareable flag.
++ */
++
++extern struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file
++ *tfile, uint32_t key);
++
++/**
++ * ttm_base_object_unref
++ *
++ * @p_base: Pointer to a pointer referncing a struct ttm_base_object.
++ *
++ * Decrements the base object refcount and clears the pointer pointed to by
++ * p_base.
++ */
++
++extern void ttm_base_object_unref(struct ttm_base_object **p_base);
++
++/**
++ * ttm_ref_object_add.
++ *
++ * @tfile: A struct ttm_object_file representing the application owning the
++ * ref_object.
++ * @base: The base object to reference.
++ * @ref_type: The type of reference.
++ * @existed: Upon completion, indicates that an identical reference object
++ * already existed, and the refcount was upped on that object instead.
++ *
++ * Adding a ref object to a base object is basically like referencing the
++ * base object, but a user-space application holds the reference. When the
++ * file corresponding to @tfile is closed, all its reference objects are
++ * deleted. A reference object can have different types depending on what
++ * it's intended for. It can be refcounting to prevent object destruction,
++ * When user-space takes a lock, it can add a ref object to that lock to
++ * make sure the lock is released if the application dies. A ref object
++ * will hold a single reference on a base object.
++ */
++extern int ttm_ref_object_add(struct ttm_object_file *tfile,
++ struct ttm_base_object *base,
++ enum ttm_ref_type ref_type, bool *existed);
++/**
++ * ttm_ref_object_base_unref
++ *
++ * @key: Key representing the base object.
++ * @ref_type: Ref type of the ref object to be dereferenced.
++ *
++ * Unreference a ref object with type @ref_type
++ * on the base object identified by @key. If there are no duplicate
++ * references, the ref object will be destroyed and the base object
++ * will be unreferenced.
++ */
++extern int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
++ unsigned long key,
++ enum ttm_ref_type ref_type);
++
++/**
++ * ttm_object_file_init - initialize a struct ttm_object file
++ *
++ * @tdev: A struct ttm_object device this file is initialized on.
++ * @hash_order: Order of the hash table used to hold the reference objects.
++ *
++ * This is typically called by the file_ops::open function.
++ */
++
++extern struct ttm_object_file *ttm_object_file_init(struct ttm_object_device
++ *tdev,
++ unsigned int hash_order);
++
++/**
++ * ttm_object_file_release - release data held by a ttm_object_file
++ *
++ * @p_tfile: Pointer to pointer to the ttm_object_file object to release.
++ * *p_tfile will be set to NULL by this function.
++ *
++ * Releases all data associated by a ttm_object_file.
++ * Typically called from file_ops::release. The caller must
++ * ensure that there are no concurrent users of tfile.
++ */
++
++extern void ttm_object_file_release(struct ttm_object_file **p_tfile);
++
++/**
++ * ttm_object device init - initialize a struct ttm_object_device
++ *
++ * @hash_order: Order of hash table used to hash the base objects.
++ *
++ * This function is typically called on device initialization to prepare
++ * data structures needed for ttm base and ref objects.
++ */
++
++extern struct ttm_object_device *ttm_object_device_init
++ (struct ttm_mem_global *mem_glob, unsigned int hash_order);
++
++/**
++ * ttm_object_device_release - release data held by a ttm_object_device
++ *
++ * @p_tdev: Pointer to pointer to the ttm_object_device object to release.
++ * *p_tdev will be set to NULL by this function.
++ *
++ * Releases all data associated by a ttm_object_device.
++ * Typically called from driver::unload before the destruction of the
++ * device private data structure.
++ */
++
++extern void ttm_object_device_release(struct ttm_object_device **p_tdev);
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/drv/ttm/ttm_pat_compat.c
+@@ -0,0 +1,164 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include "ttm_pat_compat.h"
++#include <linux/version.h>
++#include <asm/page.h>
++#include <linux/spinlock.h>
++#include <asm/pgtable.h>
++
++#if (defined(CONFIG_X86) && !defined(CONFIG_X86_PAT))
++#include <asm/tlbflush.h>
++#include <asm/msr.h>
++#include <asm/system.h>
++#include <linux/notifier.h>
++#include <linux/cpu.h>
++
++#ifndef MSR_IA32_CR_PAT
++#define MSR_IA32_CR_PAT 0x0277
++#endif
++
++#ifndef _PAGE_PAT
++#define _PAGE_PAT 0x080
++#endif
++
++static int ttm_has_pat;
++
++/*
++ * Used at resume-time when CPU-s are fired up.
++ */
++
++static void ttm_pat_ipi_handler(void *notused)
++{
++ u32 v1, v2;
++
++ rdmsr(MSR_IA32_CR_PAT, v1, v2);
++ v2 &= 0xFFFFFFF8;
++ v2 |= 0x00000001;
++ wbinvd();
++ wrmsr(MSR_IA32_CR_PAT, v1, v2);
++ wbinvd();
++ __flush_tlb_all();
++}
++
++static void ttm_pat_enable(void)
++{
++ if (on_each_cpu(ttm_pat_ipi_handler, NULL, 1) != 0)
++ printk(KERN_ERR "Timed out setting up CPU PAT.\n");
++}
++
++void ttm_pat_resume(void)
++{
++ if (unlikely(!ttm_has_pat))
++ return;
++
++ ttm_pat_enable();
++}
++
++static int psb_cpu_callback(struct notifier_block *nfb,
++ unsigned long action, void *hcpu)
++{
++ if (action == CPU_ONLINE)
++ ttm_pat_resume();
++
++ return 0;
++}
++
++static struct notifier_block psb_nb = {
++ .notifier_call = psb_cpu_callback,
++ .priority = 1
++};
++
++/*
++ * Set i386 PAT entry PAT4 to Write-combining memory type on all processors.
++ */
++
++void ttm_pat_init(void)
++{
++ if (likely(ttm_has_pat))
++ return;
++
++ if (!boot_cpu_has(X86_FEATURE_PAT))
++ return;
++
++ ttm_pat_enable();
++
++ if (num_present_cpus() > 1)
++ register_cpu_notifier(&psb_nb);
++
++ ttm_has_pat = 1;
++}
++
++void ttm_pat_takedown(void)
++{
++ if (unlikely(!ttm_has_pat))
++ return;
++
++ if (num_present_cpus() > 1)
++ unregister_cpu_notifier(&psb_nb);
++
++ ttm_has_pat = 0;
++}
++
++pgprot_t pgprot_ttm_x86_wc(pgprot_t prot)
++{
++ if (likely(ttm_has_pat)) {
++ pgprot_val(prot) |= _PAGE_PAT;
++ return prot;
++ } else {
++ return pgprot_noncached(prot);
++ }
++}
++
++#else
++
++void ttm_pat_init(void)
++{
++}
++
++void ttm_pat_takedown(void)
++{
++}
++
++void ttm_pat_resume(void)
++{
++}
++
++#ifdef CONFIG_X86
++#include <asm/pat.h>
++
++pgprot_t pgprot_ttm_x86_wc(pgprot_t prot)
++{
++ uint32_t cache_bits = ((1) ? _PAGE_CACHE_WC : _PAGE_CACHE_UC_MINUS);
++
++ return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_MASK) | cache_bits);
++}
++#else
++pgprot_t pgprot_ttm_x86_wc(pgprot_t prot)
++{
++ BUG();
++}
++#endif
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/drv/ttm/ttm_pat_compat.h
+@@ -0,0 +1,34 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++
++#ifndef _TTM_PAT_COMPAT_
++#define _TTM_PAT_COMPAT_
++#include <asm/page.h>
++#include <asm/pgtable_types.h>
++extern void ttm_pat_init(void);
++extern void ttm_pat_takedown(void);
++extern void ttm_pat_resume(void);
++extern pgprot_t pgprot_ttm_x86_wc(pgprot_t prot);
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/drv/ttm/ttm_placement_common.h
+@@ -0,0 +1,91 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++
++#ifndef _TTM_PL_COMMON_H_
++#define _TTM_PL_COMMON_H_
++/*
++ * Memory regions for data placement.
++ */
++
++#define TTM_PL_SYSTEM 0
++#define TTM_PL_TT 1
++#define TTM_PL_VRAM 2
++#define TTM_PL_PRIV0 3
++#define TTM_PL_PRIV1 4
++#define TTM_PL_PRIV2 5
++#define TTM_PL_PRIV3 6
++#define TTM_PL_PRIV4 7
++#define TTM_PL_PRIV5 8
++#define TTM_PL_CI 9
++#define TTM_PL_RAR 10
++#define TTM_PL_SWAPPED 15
++
++#define TTM_PL_FLAG_SYSTEM (1 << TTM_PL_SYSTEM)
++#define TTM_PL_FLAG_TT (1 << TTM_PL_TT)
++#define TTM_PL_FLAG_VRAM (1 << TTM_PL_VRAM)
++#define TTM_PL_FLAG_PRIV0 (1 << TTM_PL_PRIV0)
++#define TTM_PL_FLAG_PRIV1 (1 << TTM_PL_PRIV1)
++#define TTM_PL_FLAG_PRIV2 (1 << TTM_PL_PRIV2)
++#define TTM_PL_FLAG_PRIV3 (1 << TTM_PL_PRIV3)
++#define TTM_PL_FLAG_PRIV4 (1 << TTM_PL_PRIV4)
++#define TTM_PL_FLAG_PRIV5 (1 << TTM_PL_PRIV5)
++#define TTM_PL_FLAG_CI (1 << TTM_PL_CI)
++#define TTM_PL_FLAG_RAR (1 << TTM_PL_RAR)
++#define TTM_PL_FLAG_SWAPPED (1 << TTM_PL_SWAPPED)
++#define TTM_PL_MASK_MEM 0x0000FFFF
++
++/*
++ * Other flags that affects data placement.
++ * TTM_PL_FLAG_CACHED indicates cache-coherent mappings
++ * if available.
++ * TTM_PL_FLAG_SHARED means that another application may
++ * reference the buffer.
++ * TTM_PL_FLAG_NO_EVICT means that the buffer may never
++ * be evicted to make room for other buffers.
++ */
++
++#define TTM_PL_FLAG_CACHED (1 << 16)
++#define TTM_PL_FLAG_UNCACHED (1 << 17)
++#define TTM_PL_FLAG_WC (1 << 18)
++#define TTM_PL_FLAG_SHARED (1 << 20)
++#define TTM_PL_FLAG_NO_EVICT (1 << 21)
++
++#define TTM_PL_MASK_CACHING (TTM_PL_FLAG_CACHED | \
++ TTM_PL_FLAG_UNCACHED | \
++ TTM_PL_FLAG_WC)
++
++#define TTM_PL_MASK_MEMTYPE (TTM_PL_MASK_MEM | TTM_PL_MASK_CACHING)
++
++/*
++ * Access flags to be used for CPU- and GPU- mappings.
++ * The idea is that the TTM synchronization mechanism will
++ * allow concurrent READ access and exclusive write access.
++ * Currently GPU- and CPU accesses are exclusive.
++ */
++
++#define TTM_ACCESS_READ (1 << 0)
++#define TTM_ACCESS_WRITE (1 << 1)
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/drv/ttm/ttm_placement_user.c
+@@ -0,0 +1,468 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include "ttm_placement_user.h"
++#include "ttm_bo_driver.h"
++#include "ttm_object.h"
++#include "ttm_userobj_api.h"
++#include "ttm_lock.h"
++
++struct ttm_bo_user_object {
++ struct ttm_base_object base;
++ struct ttm_buffer_object bo;
++};
++
++static size_t pl_bo_size;
++
++static size_t ttm_pl_size(struct ttm_bo_device *bdev, unsigned long num_pages)
++{
++ size_t page_array_size =
++ (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK;
++
++ if (unlikely(pl_bo_size == 0)) {
++ pl_bo_size = bdev->ttm_bo_extra_size +
++ ttm_round_pot(sizeof(struct ttm_bo_user_object));
++ }
++
++ return bdev->ttm_bo_size + 2 * page_array_size;
++}
++
++static struct ttm_bo_user_object *ttm_bo_user_lookup(struct ttm_object_file
++ *tfile, uint32_t handle)
++{
++ struct ttm_base_object *base;
++
++ base = ttm_base_object_lookup(tfile, handle);
++ if (unlikely(base == NULL)) {
++ printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
++ (unsigned long)handle);
++ return NULL;
++ }
++
++ if (unlikely(base->object_type != ttm_buffer_type)) {
++ ttm_base_object_unref(&base);
++ printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
++ (unsigned long)handle);
++ return NULL;
++ }
++
++ return container_of(base, struct ttm_bo_user_object, base);
++}
++
++struct ttm_buffer_object *ttm_buffer_object_lookup(struct ttm_object_file
++ *tfile, uint32_t handle)
++{
++ struct ttm_bo_user_object *user_bo;
++ struct ttm_base_object *base;
++
++ user_bo = ttm_bo_user_lookup(tfile, handle);
++ if (unlikely(user_bo == NULL))
++ return NULL;
++
++ (void)ttm_bo_reference(&user_bo->bo);
++ base = &user_bo->base;
++ ttm_base_object_unref(&base);
++ return &user_bo->bo;
++}
++
++static void ttm_bo_user_destroy(struct ttm_buffer_object *bo)
++{
++ struct ttm_bo_user_object *user_bo =
++ container_of(bo, struct ttm_bo_user_object, bo);
++
++ ttm_mem_global_free(bo->bdev->mem_glob, bo->acc_size, false);
++ kfree(user_bo);
++}
++
++static void ttm_bo_user_release(struct ttm_base_object **p_base)
++{
++ struct ttm_bo_user_object *user_bo;
++ struct ttm_base_object *base = *p_base;
++ struct ttm_buffer_object *bo;
++
++ *p_base = NULL;
++
++ if (unlikely(base == NULL))
++ return;
++
++ user_bo = container_of(base, struct ttm_bo_user_object, base);
++ bo = &user_bo->bo;
++ ttm_bo_unref(&bo);
++}
++
++static void ttm_bo_user_ref_release(struct ttm_base_object *base,
++ enum ttm_ref_type ref_type)
++{
++ struct ttm_bo_user_object *user_bo =
++ container_of(base, struct ttm_bo_user_object, base);
++ struct ttm_buffer_object *bo = &user_bo->bo;
++
++ switch (ref_type) {
++ case TTM_REF_SYNCCPU_WRITE:
++ ttm_bo_synccpu_write_release(bo);
++ break;
++ default:
++ BUG();
++ }
++}
++
++static void ttm_pl_fill_rep(struct ttm_buffer_object *bo,
++ struct ttm_pl_rep *rep)
++{
++ struct ttm_bo_user_object *user_bo =
++ container_of(bo, struct ttm_bo_user_object, bo);
++
++ rep->gpu_offset = bo->offset;
++ rep->bo_size = bo->num_pages << PAGE_SHIFT;
++ rep->map_handle = bo->addr_space_offset;
++ rep->placement = bo->mem.flags;
++ rep->handle = user_bo->base.hash.key;
++ rep->sync_object_arg = (uint32_t) (unsigned long)bo->sync_obj_arg;
++}
++
++int ttm_pl_create_ioctl(struct ttm_object_file *tfile,
++ struct ttm_bo_device *bdev,
++ struct ttm_lock *lock, void *data)
++{
++ union ttm_pl_create_arg *arg = data;
++ struct ttm_pl_create_req *req = &arg->req;
++ struct ttm_pl_rep *rep = &arg->rep;
++ struct ttm_buffer_object *bo;
++ struct ttm_buffer_object *tmp;
++ struct ttm_bo_user_object *user_bo;
++ uint32_t flags;
++ int ret = 0;
++ struct ttm_mem_global *mem_glob = bdev->mem_glob;
++ size_t acc_size =
++ ttm_pl_size(bdev, (req->size + PAGE_SIZE - 1) >> PAGE_SHIFT);
++ ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false, false);
++ if (unlikely(ret != 0))
++ return ret;
++
++ flags = req->placement;
++ user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
++ if (unlikely(user_bo == NULL)) {
++ ttm_mem_global_free(mem_glob, acc_size, false);
++ return -ENOMEM;
++ }
++
++ bo = &user_bo->bo;
++ ret = ttm_read_lock(lock, true);
++ if (unlikely(ret != 0)) {
++ ttm_mem_global_free(mem_glob, acc_size, false);
++ kfree(user_bo);
++ return ret;
++ }
++
++ ret = ttm_buffer_object_init(bdev, bo, req->size,
++ ttm_bo_type_device, flags,
++ req->page_alignment, 0, true,
++ NULL, acc_size, &ttm_bo_user_destroy);
++ ttm_read_unlock(lock);
++
++ /*
++ * Note that the ttm_buffer_object_init function
++ * would've called the destroy function on failure!!
++ */
++
++ if (unlikely(ret != 0))
++ goto out;
++
++ tmp = ttm_bo_reference(bo);
++ ret = ttm_base_object_init(tfile, &user_bo->base,
++ flags & TTM_PL_FLAG_SHARED,
++ ttm_buffer_type,
++ &ttm_bo_user_release,
++ &ttm_bo_user_ref_release);
++ if (unlikely(ret != 0))
++ goto out_err;
++
++ mutex_lock(&bo->mutex);
++ ttm_pl_fill_rep(bo, rep);
++ mutex_unlock(&bo->mutex);
++ ttm_bo_unref(&bo);
++out:
++ return 0;
++out_err:
++ ttm_bo_unref(&tmp);
++ ttm_bo_unref(&bo);
++ return ret;
++}
++
++int ttm_pl_ub_create_ioctl(struct ttm_object_file *tfile,
++ struct ttm_bo_device *bdev,
++ struct ttm_lock *lock, void *data)
++{
++ union ttm_pl_create_ub_arg *arg = data;
++ struct ttm_pl_create_ub_req *req = &arg->req;
++ struct ttm_pl_rep *rep = &arg->rep;
++ struct ttm_buffer_object *bo;
++ struct ttm_buffer_object *tmp;
++ struct ttm_bo_user_object *user_bo;
++ uint32_t flags;
++ int ret = 0;
++ struct ttm_mem_global *mem_glob = bdev->mem_glob;
++ size_t acc_size =
++ ttm_pl_size(bdev, (req->size + PAGE_SIZE - 1) >> PAGE_SHIFT);
++ ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false, false);
++ if (unlikely(ret != 0))
++ return ret;
++
++ flags = req->placement;
++ user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
++ if (unlikely(user_bo == NULL)) {
++ ttm_mem_global_free(mem_glob, acc_size, false);
++ return -ENOMEM;
++ }
++ ret = ttm_read_lock(lock, true);
++ if (unlikely(ret != 0)) {
++ ttm_mem_global_free(mem_glob, acc_size, false);
++ kfree(user_bo);
++ return ret;
++ }
++ bo = &user_bo->bo;
++ ret = ttm_buffer_object_init(bdev,
++ bo,
++ req->size,
++ ttm_bo_type_user,
++ flags,
++ req->page_alignment,
++ req->user_address,
++ true,
++ NULL,
++ acc_size,
++ &ttm_bo_user_destroy);
++
++ /*
++ * Note that the ttm_buffer_object_init function
++ * would've called the destroy function on failure!!
++ */
++ ttm_read_unlock(lock);
++ if (unlikely(ret != 0))
++ goto out;
++
++ tmp = ttm_bo_reference(bo);
++ ret = ttm_base_object_init(tfile, &user_bo->base,
++ flags & TTM_PL_FLAG_SHARED,
++ ttm_buffer_type,
++ &ttm_bo_user_release,
++ &ttm_bo_user_ref_release);
++ if (unlikely(ret != 0))
++ goto out_err;
++
++ mutex_lock(&bo->mutex);
++ ttm_pl_fill_rep(bo, rep);
++ mutex_unlock(&bo->mutex);
++ ttm_bo_unref(&bo);
++out:
++ return 0;
++out_err:
++ ttm_bo_unref(&tmp);
++ ttm_bo_unref(&bo);
++ return ret;
++}
++
++int ttm_pl_reference_ioctl(struct ttm_object_file *tfile, void *data)
++{
++ union ttm_pl_reference_arg *arg = data;
++ struct ttm_pl_rep *rep = &arg->rep;
++ struct ttm_bo_user_object *user_bo;
++ struct ttm_buffer_object *bo;
++ struct ttm_base_object *base;
++ int ret;
++
++ user_bo = ttm_bo_user_lookup(tfile, arg->req.handle);
++ if (unlikely(user_bo == NULL)) {
++ printk(KERN_ERR "Could not reference buffer object.\n");
++ return -EINVAL;
++ }
++
++ bo = &user_bo->bo;
++ ret = ttm_ref_object_add(tfile, &user_bo->base, TTM_REF_USAGE, NULL);
++ if (unlikely(ret != 0)) {
++ printk(KERN_ERR
++ "Could not add a reference to buffer object.\n");
++ goto out;
++ }
++
++ mutex_lock(&bo->mutex);
++ ttm_pl_fill_rep(bo, rep);
++ mutex_unlock(&bo->mutex);
++
++out:
++ base = &user_bo->base;
++ ttm_base_object_unref(&base);
++ return ret;
++}
++
++int ttm_pl_unref_ioctl(struct ttm_object_file *tfile, void *data)
++{
++ struct ttm_pl_reference_req *arg = data;
++
++ return ttm_ref_object_base_unref(tfile, arg->handle, TTM_REF_USAGE);
++}
++
++int ttm_pl_synccpu_ioctl(struct ttm_object_file *tfile, void *data)
++{
++ struct ttm_pl_synccpu_arg *arg = data;
++ struct ttm_bo_user_object *user_bo;
++ struct ttm_buffer_object *bo;
++ struct ttm_base_object *base;
++ bool existed;
++ int ret;
++
++ switch (arg->op) {
++ case TTM_PL_SYNCCPU_OP_GRAB:
++ user_bo = ttm_bo_user_lookup(tfile, arg->handle);
++ if (unlikely(user_bo == NULL)) {
++ printk(KERN_ERR
++ "Could not find buffer object for synccpu.\n");
++ return -EINVAL;
++ }
++ bo = &user_bo->bo;
++ base = &user_bo->base;
++ ret = ttm_bo_synccpu_write_grab(bo,
++ arg->access_mode &
++ TTM_PL_SYNCCPU_MODE_NO_BLOCK);
++ if (unlikely(ret != 0)) {
++ ttm_base_object_unref(&base);
++ goto out;
++ }
++ ret = ttm_ref_object_add(tfile, &user_bo->base,
++ TTM_REF_SYNCCPU_WRITE, &existed);
++ if (existed || ret != 0)
++ ttm_bo_synccpu_write_release(bo);
++ ttm_base_object_unref(&base);
++ break;
++ case TTM_PL_SYNCCPU_OP_RELEASE:
++ ret = ttm_ref_object_base_unref(tfile, arg->handle,
++ TTM_REF_SYNCCPU_WRITE);
++ break;
++ default:
++ ret = -EINVAL;
++ break;
++ }
++out:
++ return ret;
++}
++
++int ttm_pl_setstatus_ioctl(struct ttm_object_file *tfile,
++ struct ttm_lock *lock, void *data)
++{
++ union ttm_pl_setstatus_arg *arg = data;
++ struct ttm_pl_setstatus_req *req = &arg->req;
++ struct ttm_pl_rep *rep = &arg->rep;
++ struct ttm_buffer_object *bo;
++ struct ttm_bo_device *bdev;
++ int ret;
++
++ bo = ttm_buffer_object_lookup(tfile, req->handle);
++ if (unlikely(bo == NULL)) {
++ printk(KERN_ERR
++ "Could not find buffer object for setstatus.\n");
++ return -EINVAL;
++ }
++
++ bdev = bo->bdev;
++
++ ret = ttm_read_lock(lock, true);
++ if (unlikely(ret != 0))
++ goto out_err0;
++
++ ret = ttm_bo_reserve(bo, true, false, false, 0);
++ if (unlikely(ret != 0))
++ goto out_err1;
++
++ ret = ttm_bo_wait_cpu(bo, false);
++ if (unlikely(ret != 0))
++ goto out_err2;
++
++ mutex_lock(&bo->mutex);
++ ret = ttm_bo_check_placement(bo, req->set_placement,
++ req->clr_placement);
++ if (unlikely(ret != 0))
++ goto out_err2;
++
++ bo->proposed_flags = (bo->proposed_flags | req->set_placement)
++ & ~req->clr_placement;
++ ret = ttm_buffer_object_validate(bo, true, false);
++ if (unlikely(ret != 0))
++ goto out_err2;
++
++ ttm_pl_fill_rep(bo, rep);
++out_err2:
++ mutex_unlock(&bo->mutex);
++ ttm_bo_unreserve(bo);
++out_err1:
++ ttm_read_unlock(lock);
++out_err0:
++ ttm_bo_unref(&bo);
++ return ret;
++}
++
++int ttm_pl_waitidle_ioctl(struct ttm_object_file *tfile, void *data)
++{
++ struct ttm_pl_waitidle_arg *arg = data;
++ struct ttm_buffer_object *bo;
++ int ret;
++
++ bo = ttm_buffer_object_lookup(tfile, arg->handle);
++ if (unlikely(bo == NULL)) {
++ printk(KERN_ERR "Could not find buffer object for waitidle.\n");
++ return -EINVAL;
++ }
++
++ ret =
++ ttm_bo_block_reservation(bo, true,
++ arg->mode & TTM_PL_WAITIDLE_MODE_NO_BLOCK);
++ if (unlikely(ret != 0))
++ goto out;
++ mutex_lock(&bo->mutex);
++ ret = ttm_bo_wait(bo,
++ arg->mode & TTM_PL_WAITIDLE_MODE_LAZY,
++ true, arg->mode & TTM_PL_WAITIDLE_MODE_NO_BLOCK);
++ mutex_unlock(&bo->mutex);
++ ttm_bo_unblock_reservation(bo);
++out:
++ ttm_bo_unref(&bo);
++ return ret;
++}
++
++int ttm_pl_verify_access(struct ttm_buffer_object *bo,
++ struct ttm_object_file *tfile)
++{
++ struct ttm_bo_user_object *ubo;
++
++ /*
++ * Check bo subclass.
++ */
++
++ if (unlikely(bo->destroy != &ttm_bo_user_destroy))
++ return -EPERM;
++
++ ubo = container_of(bo, struct ttm_bo_user_object, bo);
++ if (likely(ubo->base.shareable || ubo->base.tfile == tfile))
++ return 0;
++
++ return -EPERM;
++}
+--- /dev/null
++++ b/drivers/staging/mrst/drv/ttm/ttm_placement_user.h
+@@ -0,0 +1,252 @@
++/**************************************************************************
++ *
++ * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors
++ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#ifndef _TTM_PLACEMENT_USER_H_
++#define _TTM_PLACEMENT_USER_H_
++
++#if !defined(__KERNEL__) && !defined(_KERNEL)
++#include <stdint.h>
++#else
++#include <linux/kernel.h>
++#endif
++
++#include "ttm_placement_common.h"
++
++#define TTM_PLACEMENT_MAJOR 0
++#define TTM_PLACEMENT_MINOR 1
++#define TTM_PLACEMENT_PL 0
++#define TTM_PLACEMENT_DATE "080819"
++
++/**
++ * struct ttm_pl_create_req
++ *
++ * @size: The buffer object size.
++ * @placement: Flags that indicate initial acceptable
++ * placement.
++ * @page_alignment: Required alignment in pages.
++ *
++ * Input to the TTM_BO_CREATE ioctl.
++ */
++
++struct ttm_pl_create_req {
++ uint64_t size;
++ uint32_t placement;
++ uint32_t page_alignment;
++};
++
++/**
++ * struct ttm_pl_create_ub_req
++ *
++ * @size: The buffer object size.
++ * @user_address: User-space address of the memory area that
++ * should be used to back the buffer object cast to 64-bit.
++ * @placement: Flags that indicate initial acceptable
++ * placement.
++ * @page_alignment: Required alignment in pages.
++ *
++ * Input to the TTM_BO_CREATE_UB ioctl.
++ */
++
++struct ttm_pl_create_ub_req {
++ uint64_t size;
++ uint64_t user_address;
++ uint32_t placement;
++ uint32_t page_alignment;
++};
++
++/**
++ * struct ttm_pl_rep
++ *
++ * @gpu_offset: The current offset into the memory region used.
++ * This can be used directly by the GPU if there are no
++ * additional GPU mapping procedures used by the driver.
++ *
++ * @bo_size: Actual buffer object size.
++ *
++ * @map_handle: Offset into the device address space.
++ * Used for map, seek, read, write. This will never change
++ * during the lifetime of an object.
++ *
++ * @placement: Flag indicating the placement status of
++ * the buffer object using the TTM_PL flags above.
++ *
++ * @sync_object_arg: Used for user-space synchronization and
++ * depends on the synchronization model used. If fences are
++ * used, this is the buffer_object::fence_type_mask
++ *
++ * Output from the TTM_PL_CREATE and TTM_PL_REFERENCE, and
++ * TTM_PL_SETSTATUS ioctls.
++ */
++
++struct ttm_pl_rep {
++ uint64_t gpu_offset;
++ uint64_t bo_size;
++ uint64_t map_handle;
++ uint32_t placement;
++ uint32_t handle;
++ uint32_t sync_object_arg;
++ uint32_t pad64;
++};
++
++/**
++ * struct ttm_pl_setstatus_req
++ *
++ * @set_placement: Placement flags to set.
++ *
++ * @clr_placement: Placement flags to clear.
++ *
++ * @handle: The object handle
++ *
++ * Input to the TTM_PL_SETSTATUS ioctl.
++ */
++
++struct ttm_pl_setstatus_req {
++ uint32_t set_placement;
++ uint32_t clr_placement;
++ uint32_t handle;
++ uint32_t pad64;
++};
++
++/**
++ * struct ttm_pl_reference_req
++ *
++ * @handle: The object to put a reference on.
++ *
++ * Input to the TTM_PL_REFERENCE and the TTM_PL_UNREFERENCE ioctls.
++ */
++
++struct ttm_pl_reference_req {
++ uint32_t handle;
++ uint32_t pad64;
++};
++
++/*
++ * ACCESS mode flags for SYNCCPU.
++ *
++ * TTM_SYNCCPU_MODE_READ will guarantee that the GPU is not
++ * writing to the buffer.
++ *
++ * TTM_SYNCCPU_MODE_WRITE will guarantee that the GPU is not
++ * accessing the buffer.
++ *
++ * TTM_SYNCCPU_MODE_NO_BLOCK makes sure the call does not wait
++ * for GPU accesses to finish but return -EBUSY.
++ *
++ * TTM_SYNCCPU_MODE_TRYCACHED Try to place the buffer in cacheable
++ * memory while synchronized for CPU.
++ */
++
++#define TTM_PL_SYNCCPU_MODE_READ TTM_ACCESS_READ
++#define TTM_PL_SYNCCPU_MODE_WRITE TTM_ACCESS_WRITE
++#define TTM_PL_SYNCCPU_MODE_NO_BLOCK (1 << 2)
++#define TTM_PL_SYNCCPU_MODE_TRYCACHED (1 << 3)
++
++/**
++ * struct ttm_pl_synccpu_arg
++ *
++ * @handle: The object to synchronize.
++ *
++ * @access_mode: access mode indicated by the
++ * TTM_SYNCCPU_MODE flags.
++ *
++ * @op: indicates whether to grab or release the
++ * buffer for cpu usage.
++ *
++ * Input to the TTM_PL_SYNCCPU ioctl.
++ */
++
++struct ttm_pl_synccpu_arg {
++ uint32_t handle;
++ uint32_t access_mode;
++ enum {
++ TTM_PL_SYNCCPU_OP_GRAB,
++ TTM_PL_SYNCCPU_OP_RELEASE
++ } op;
++ uint32_t pad64;
++};
++
++/*
++ * Waiting mode flags for the TTM_BO_WAITIDLE ioctl.
++ *
++ * TTM_WAITIDLE_MODE_LAZY: Allow for sleeps during polling
++ * wait.
++ *
++ * TTM_WAITIDLE_MODE_NO_BLOCK: Don't block waiting for GPU,
++ * but return -EBUSY if the buffer is busy.
++ */
++
++#define TTM_PL_WAITIDLE_MODE_LAZY (1 << 0)
++#define TTM_PL_WAITIDLE_MODE_NO_BLOCK (1 << 1)
++
++/**
++ * struct ttm_waitidle_arg
++ *
++ * @handle: The object to synchronize.
++ *
++ * @mode: wait mode indicated by the
++ * TTM_SYNCCPU_MODE flags.
++ *
++ * Argument to the TTM_BO_WAITIDLE ioctl.
++ */
++
++struct ttm_pl_waitidle_arg {
++ uint32_t handle;
++ uint32_t mode;
++};
++
++union ttm_pl_create_arg {
++ struct ttm_pl_create_req req;
++ struct ttm_pl_rep rep;
++};
++
++union ttm_pl_reference_arg {
++ struct ttm_pl_reference_req req;
++ struct ttm_pl_rep rep;
++};
++
++union ttm_pl_setstatus_arg {
++ struct ttm_pl_setstatus_req req;
++ struct ttm_pl_rep rep;
++};
++
++union ttm_pl_create_ub_arg {
++ struct ttm_pl_create_ub_req req;
++ struct ttm_pl_rep rep;
++};
++
++/*
++ * Ioctl offsets.
++ */
++
++#define TTM_PL_CREATE 0x00
++#define TTM_PL_REFERENCE 0x01
++#define TTM_PL_UNREF 0x02
++#define TTM_PL_SYNCCPU 0x03
++#define TTM_PL_WAITIDLE 0x04
++#define TTM_PL_SETSTATUS 0x05
++#define TTM_PL_CREATE_UB 0x06
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/drv/ttm/ttm_regman.h
+@@ -0,0 +1,67 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++
++#ifndef _TTM_REGMAN_H_
++#define _TTM_REGMAN_H_
++
++#include <linux/list.h>
++
++struct ttm_fence_object;
++
++struct ttm_reg {
++ struct list_head head;
++ struct ttm_fence_object *fence;
++ uint32_t fence_type;
++ uint32_t new_fence_type;
++};
++
++struct ttm_reg_manager {
++ struct list_head free;
++ struct list_head lru;
++ struct list_head unfenced;
++
++ int (*reg_reusable)(const struct ttm_reg *reg, const void *data);
++ void (*reg_destroy)(struct ttm_reg *reg);
++};
++
++extern int ttm_regs_alloc(struct ttm_reg_manager *manager,
++ const void *data,
++ uint32_t fence_class,
++ uint32_t fence_type,
++ int interruptible,
++ int no_wait,
++ struct ttm_reg **reg);
++
++extern void ttm_regs_fence(struct ttm_reg_manager *regs,
++ struct ttm_fence_object *fence);
++
++extern void ttm_regs_free(struct ttm_reg_manager *manager);
++extern void ttm_regs_add(struct ttm_reg_manager *manager, struct ttm_reg *reg);
++extern void ttm_regs_init(struct ttm_reg_manager *manager,
++ int (*reg_reusable)(const struct ttm_reg *,
++ const void *),
++ void (*reg_destroy)(struct ttm_reg *));
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/drv/ttm/ttm_tt.c
+@@ -0,0 +1,656 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include <linux/version.h>
++#include <linux/vmalloc.h>
++#include <linux/sched.h>
++#include <linux/highmem.h>
++#include <linux/pagemap.h>
++#include <linux/file.h>
++#include <linux/swap.h>
++#include "ttm_bo_driver.h"
++#include "ttm_placement_common.h"
++
++static int ttm_tt_swapin(struct ttm_tt *ttm);
++
++#if defined(CONFIG_X86)
++static void ttm_tt_clflush_page(struct page *page)
++{
++ uint8_t *page_virtual;
++ unsigned int i;
++
++ if (unlikely(page == NULL))
++ return;
++
++ page_virtual = kmap_atomic(page, KM_USER0);
++
++ for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
++ clflush(page_virtual + i);
++
++ kunmap_atomic(page_virtual, KM_USER0);
++}
++
++static void ttm_tt_cache_flush_clflush(struct page *pages[],
++ unsigned long num_pages)
++{
++ unsigned long i;
++
++ mb();
++ for (i = 0; i < num_pages; ++i)
++ ttm_tt_clflush_page(*pages++);
++ mb();
++}
++#else
++static void ttm_tt_ipi_handler(void *null)
++{
++ ;
++}
++#endif
++
++void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages)
++{
++
++#if defined(CONFIG_X86)
++ if (cpu_has_clflush) {
++ ttm_tt_cache_flush_clflush(pages, num_pages);
++ return;
++ }
++#else
++ if (on_each_cpu(ttm_tt_ipi_handler, NULL, 1, 1) != 0)
++ printk(KERN_ERR "Timed out waiting for drm cache flush.\n");
++#endif
++}
++
++/**
++ * Allocates storage for pointers to the pages that back the ttm.
++ *
++ * Uses kmalloc if possible. Otherwise falls back to vmalloc.
++ */
++static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
++{
++ unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
++ ttm->pages = NULL;
++
++ if (size <= PAGE_SIZE)
++ ttm->pages = kzalloc(size, GFP_KERNEL);
++
++ if (!ttm->pages) {
++ ttm->pages = vmalloc_user(size);
++ if (ttm->pages)
++ ttm->page_flags |= TTM_PAGE_FLAG_VMALLOC;
++ }
++}
++
++static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
++{
++ if (ttm->page_flags & TTM_PAGE_FLAG_VMALLOC) {
++ vfree(ttm->pages);
++ ttm->page_flags &= ~TTM_PAGE_FLAG_VMALLOC;
++ } else {
++ kfree(ttm->pages);
++ }
++ ttm->pages = NULL;
++}
++
++static struct page *ttm_tt_alloc_page(void)
++{
++ return alloc_page(GFP_KERNEL | __GFP_ZERO);
++}
++
++static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
++{
++ int write;
++ int dirty;
++ struct page *page;
++ int i;
++ struct ttm_backend *be = ttm->be;
++
++ BUG_ON(!(ttm->page_flags & TTM_PAGE_FLAG_USER));
++ write = ((ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0);
++ dirty = ((ttm->page_flags & TTM_PAGE_FLAG_USER_DIRTY) != 0);
++
++ if (be)
++ be->func->clear(be);
++
++ for (i = 0; i < ttm->num_pages; ++i) {
++ page = ttm->pages[i];
++ if (page == NULL)
++ continue;
++
++ if (page == ttm->dummy_read_page) {
++ BUG_ON(write);
++ continue;
++ }
++
++ if (write && dirty && !PageReserved(page))
++ set_page_dirty_lock(page);
++
++ ttm->pages[i] = NULL;
++ ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE, false);
++ put_page(page);
++ }
++ ttm->state = tt_unpopulated;
++ ttm->first_himem_page = ttm->num_pages;
++ ttm->last_lomem_page = -1;
++}
++
++static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
++{
++ struct page *p;
++ struct ttm_bo_device *bdev = ttm->bdev;
++ struct ttm_mem_global *mem_glob = bdev->mem_glob;
++ int ret;
++
++ while (NULL == (p = ttm->pages[index])) {
++ p = ttm_tt_alloc_page();
++
++ if (!p)
++ return NULL;
++
++ if (PageHighMem(p)) {
++ ret = ttm_mem_global_alloc(mem_glob,
++ PAGE_SIZE,
++ false,
++ false,
++ true);
++ if (unlikely(ret != 0))
++ goto out_err;
++ ttm->pages[--ttm->first_himem_page] = p;
++ } else {
++ ret =
++ ttm_mem_global_alloc(mem_glob,
++ PAGE_SIZE,
++ false,
++ false,
++ false);
++ if (unlikely(ret != 0))
++ goto out_err;
++ ttm->pages[++ttm->last_lomem_page] = p;
++ }
++ }
++ return p;
++out_err:
++ put_page(p);
++ return NULL;
++}
++
++struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index)
++{
++ int ret;
++
++ if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
++ ret = ttm_tt_swapin(ttm);
++ if (unlikely(ret != 0))
++ return NULL;
++ }
++ return __ttm_tt_get_page(ttm, index);
++}
++
++int ttm_tt_populate(struct ttm_tt *ttm)
++{
++ struct page *page;
++ unsigned long i;
++ struct ttm_backend *be;
++ int ret;
++
++ if (ttm->state != tt_unpopulated)
++ return 0;
++
++ if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
++ ret = ttm_tt_swapin(ttm);
++ if (unlikely(ret != 0))
++ return ret;
++ }
++
++ be = ttm->be;
++
++ for (i = 0; i < ttm->num_pages; ++i) {
++ page = __ttm_tt_get_page(ttm, i);
++ if (!page)
++ return -ENOMEM;
++ }
++
++ be->func->populate(be, ttm->num_pages, ttm->pages,
++ ttm->dummy_read_page);
++ ttm->state = tt_unbound;
++ return 0;
++}
++
++#ifdef CONFIG_X86
++static inline int ttm_tt_set_page_caching(struct page *p,
++ enum ttm_caching_state c_state)
++{
++ if (PageHighMem(p))
++ return 0;
++
++ switch (c_state) {
++ case tt_cached:
++ return set_pages_wb(p, 1);
++ case tt_wc:
++ return set_memory_wc((unsigned long) page_address(p), 1);
++ default:
++ return set_pages_uc(p, 1);
++ }
++}
++#else /* CONFIG_X86 */
++static inline int ttm_tt_set_page_caching(struct page *p,
++ enum ttm_caching_state c_state)
++{
++ return 0;
++}
++#endif /* CONFIG_X86 */
++
++/*
++ * Change caching policy for the linear kernel map
++ * for range of pages in a ttm.
++ */
++
++static int ttm_tt_set_caching(struct ttm_tt *ttm,
++ enum ttm_caching_state c_state)
++{
++ int i, j;
++ struct page *cur_page;
++ int ret;
++
++ if (ttm->caching_state == c_state)
++ return 0;
++
++ if (c_state != tt_cached) {
++ ret = ttm_tt_populate(ttm);
++ if (unlikely(ret != 0))
++ return ret;
++ }
++
++ if (ttm->caching_state == tt_cached)
++ ttm_tt_cache_flush(ttm->pages, ttm->num_pages);
++
++ for (i = 0; i < ttm->num_pages; ++i) {
++ cur_page = ttm->pages[i];
++ if (likely(cur_page != NULL)) {
++ ret = ttm_tt_set_page_caching(cur_page, c_state);
++ if (unlikely(ret != 0))
++ goto out_err;
++ }
++ }
++
++ ttm->caching_state = c_state;
++
++ return 0;
++
++out_err:
++ for (j = 0; j < i; ++j) {
++ cur_page = ttm->pages[j];
++ if (likely(cur_page != NULL)) {
++ (void)ttm_tt_set_page_caching(cur_page,
++ ttm->caching_state);
++ }
++ }
++
++ return ret;
++}
++
++int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
++{
++ enum ttm_caching_state state;
++
++ if (placement & TTM_PL_FLAG_WC)
++ state = tt_wc;
++ else if (placement & TTM_PL_FLAG_UNCACHED)
++ state = tt_uncached;
++ else
++ state = tt_cached;
++
++ return ttm_tt_set_caching(ttm, state);
++}
++
++static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
++{
++ int i;
++ struct page *cur_page;
++ struct ttm_backend *be = ttm->be;
++
++ if (be)
++ be->func->clear(be);
++ (void)ttm_tt_set_caching(ttm, tt_cached);
++ for (i = 0; i < ttm->num_pages; ++i) {
++ cur_page = ttm->pages[i];
++ ttm->pages[i] = NULL;
++ if (cur_page) {
++ if (page_count(cur_page) != 1)
++ printk(KERN_ERR
++ "Erroneous page count. Leaking pages.\n");
++ ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE,
++ PageHighMem(cur_page));
++ __free_page(cur_page);
++ }
++ }
++ ttm->state = tt_unpopulated;
++ ttm->first_himem_page = ttm->num_pages;
++ ttm->last_lomem_page = -1;
++}
++
++void ttm_tt_destroy(struct ttm_tt *ttm)
++{
++ struct ttm_backend *be;
++
++ if (unlikely(ttm == NULL))
++ return;
++
++ be = ttm->be;
++ if (likely(be != NULL)) {
++ be->func->destroy(be);
++ ttm->be = NULL;
++ }
++
++ if (likely(ttm->pages != NULL)) {
++ if (ttm->page_flags & TTM_PAGE_FLAG_USER)
++ ttm_tt_free_user_pages(ttm);
++ else
++ ttm_tt_free_alloced_pages(ttm);
++
++ ttm_tt_free_page_directory(ttm);
++ }
++
++ if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP) &&
++ ttm->swap_storage)
++ fput(ttm->swap_storage);
++
++ kfree(ttm);
++}
++
++int ttm_tt_set_user(struct ttm_tt *ttm,
++ struct task_struct *tsk,
++ unsigned long start, unsigned long num_pages)
++{
++ struct mm_struct *mm = tsk->mm;
++ int ret;
++ int write = (ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0;
++ struct ttm_mem_global *mem_glob = ttm->bdev->mem_glob;
++
++ BUG_ON(num_pages != ttm->num_pages);
++ BUG_ON((ttm->page_flags & TTM_PAGE_FLAG_USER) == 0);
++
++ /**
++ * Account user pages as lowmem pages for now.
++ */
++
++ ret = ttm_mem_global_alloc(mem_glob,
++ num_pages * PAGE_SIZE,
++ false,
++ false,
++ false);
++ if (unlikely(ret != 0))
++ return ret;
++
++ down_read(&mm->mmap_sem);
++ ret = get_user_pages(tsk, mm, start, num_pages,
++ write, 0, ttm->pages, NULL);
++ up_read(&mm->mmap_sem);
++
++ if (ret != num_pages && write) {
++ ttm_tt_free_user_pages(ttm);
++ ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE, false);
++ return -ENOMEM;
++ }
++
++ ttm->tsk = tsk;
++ ttm->start = start;
++ /*ttm->state = tt_unbound;*/
++ /*In ttm_tt_populate, if it's tt_unbound, it returns directly.
++ * But the bo need to be populated.*/
++ ttm->state = tt_unpopulated;
++
++ return 0;
++}
++
++struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
++ uint32_t page_flags, struct page *dummy_read_page)
++{
++ struct ttm_bo_driver *bo_driver = bdev->driver;
++ struct ttm_tt *ttm;
++
++ if (!bo_driver)
++ return NULL;
++
++ ttm = kzalloc(sizeof(*ttm), GFP_KERNEL);
++ if (!ttm)
++ return NULL;
++
++ ttm->bdev = bdev;
++
++ ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ ttm->first_himem_page = ttm->num_pages;
++ ttm->last_lomem_page = -1;
++ ttm->caching_state = tt_cached;
++ ttm->page_flags = page_flags;
++
++ ttm->dummy_read_page = dummy_read_page;
++
++ ttm_tt_alloc_page_directory(ttm);
++ if (!ttm->pages) {
++ ttm_tt_destroy(ttm);
++ printk(KERN_ERR "Failed allocating page table\n");
++ return NULL;
++ }
++ ttm->be = bo_driver->create_ttm_backend_entry(bdev);
++ if (!ttm->be) {
++ ttm_tt_destroy(ttm);
++ printk(KERN_ERR "Failed creating ttm backend entry\n");
++ return NULL;
++ }
++ ttm->state = tt_unpopulated;
++ return ttm;
++}
++
++/**
++ * ttm_tt_unbind:
++ *
++ * @ttm: the object to unbind from the graphics device
++ *
++ * Unbind an object from the aperture. This removes the mappings
++ * from the graphics device and flushes caches if necessary.
++ */
++void ttm_tt_unbind(struct ttm_tt *ttm)
++{
++ int ret;
++ struct ttm_backend *be = ttm->be;
++
++ if (ttm->state == tt_bound) {
++ ret = be->func->unbind(be);
++ BUG_ON(ret);
++ }
++ ttm->state = tt_unbound;
++}
++
++/**
++ * ttm_tt_bind:
++ *
++ * @ttm: the ttm object to bind to the graphics device
++ *
++ * @bo_mem: the aperture memory region which will hold the object
++ *
++ * Bind a ttm object to the aperture. This ensures that the necessary
++ * pages are allocated, flushes CPU caches as needed and marks the
++ * ttm as DRM_TTM_PAGE_USER_DIRTY to indicate that it may have been
++ * modified by the GPU
++ */
++
++int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
++{
++ int ret = 0;
++ struct ttm_backend *be;
++
++ if (!ttm)
++ return -EINVAL;
++
++ if (ttm->state == tt_bound)
++ return 0;
++
++ be = ttm->be;
++
++ ret = ttm_tt_populate(ttm);
++ if (ret)
++ return ret;
++
++ ret = be->func->bind(be, bo_mem);
++ if (ret) {
++ printk(KERN_ERR "Couldn't bind backend.\n");
++ return ret;
++ }
++
++ ttm->state = tt_bound;
++
++ if (ttm->page_flags & TTM_PAGE_FLAG_USER)
++ ttm->page_flags |= TTM_PAGE_FLAG_USER_DIRTY;
++ return 0;
++}
++
++static int ttm_tt_swapin(struct ttm_tt *ttm)
++{
++ struct address_space *swap_space;
++ struct file *swap_storage;
++ struct page *from_page;
++ struct page *to_page;
++ void *from_virtual;
++ void *to_virtual;
++ int i;
++ int ret;
++
++ if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
++ ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start,
++ ttm->num_pages);
++ if (unlikely(ret != 0))
++ return ret;
++
++ ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
++ return 0;
++ }
++
++ swap_storage = ttm->swap_storage;
++ BUG_ON(swap_storage == NULL);
++
++ swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
++
++ for (i = 0; i < ttm->num_pages; ++i) {
++ from_page = read_mapping_page(swap_space, i, NULL);
++ if (IS_ERR(from_page))
++ goto out_err;
++ to_page = __ttm_tt_get_page(ttm, i);
++ if (unlikely(to_page == NULL))
++ goto out_err;
++
++ preempt_disable();
++ from_virtual = kmap_atomic(from_page, KM_USER0);
++ to_virtual = kmap_atomic(to_page, KM_USER1);
++ memcpy(to_virtual, from_virtual, PAGE_SIZE);
++ kunmap_atomic(to_virtual, KM_USER1);
++ kunmap_atomic(from_virtual, KM_USER0);
++ preempt_enable();
++ page_cache_release(from_page);
++ }
++
++ if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP))
++ fput(swap_storage);
++ ttm->swap_storage = NULL;
++ ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
++
++ return 0;
++out_err:
++ ttm_tt_free_alloced_pages(ttm);
++ return -ENOMEM;
++}
++
++int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
++{
++ struct address_space *swap_space;
++ struct file *swap_storage;
++ struct page *from_page;
++ struct page *to_page;
++ void *from_virtual;
++ void *to_virtual;
++ int i;
++
++ BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
++ BUG_ON(ttm->caching_state != tt_cached);
++
++ /*
++ * For user buffers, just unpin the pages, as there should be
++ * vma references.
++ */
++
++ if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
++ ttm_tt_free_user_pages(ttm);
++ ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
++ ttm->swap_storage = NULL;
++ return 0;
++ }
++
++ if (!persistant_swap_storage) {
++ swap_storage = shmem_file_setup("ttm swap",
++ ttm->num_pages << PAGE_SHIFT,
++ 0);
++ if (unlikely(IS_ERR(swap_storage))) {
++ printk(KERN_ERR "Failed allocating swap storage.\n");
++ return -ENOMEM;
++ }
++ } else
++ swap_storage = persistant_swap_storage;
++
++ swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
++
++ for (i = 0; i < ttm->num_pages; ++i) {
++ from_page = ttm->pages[i];
++ if (unlikely(from_page == NULL))
++ continue;
++ to_page = read_mapping_page(swap_space, i, NULL);
++ if (unlikely(to_page == NULL))
++ goto out_err;
++
++ preempt_disable();
++ from_virtual = kmap_atomic(from_page, KM_USER0);
++ to_virtual = kmap_atomic(to_page, KM_USER1);
++ memcpy(to_virtual, from_virtual, PAGE_SIZE);
++ kunmap_atomic(to_virtual, KM_USER1);
++ kunmap_atomic(from_virtual, KM_USER0);
++ preempt_enable();
++ set_page_dirty(to_page);
++ mark_page_accessed(to_page);
++ /* unlock_page(to_page); */
++ page_cache_release(to_page);
++ }
++
++ ttm_tt_free_alloced_pages(ttm);
++ ttm->swap_storage = swap_storage;
++ ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
++ if (persistant_swap_storage)
++ ttm->page_flags |= TTM_PAGE_FLAG_PERSISTANT_SWAP;
++
++ return 0;
++out_err:
++ if (!persistant_swap_storage)
++ fput(swap_storage);
++
++ return -ENOMEM;
++}
+--- /dev/null
++++ b/drivers/staging/mrst/drv/ttm/ttm_userobj_api.h
+@@ -0,0 +1,72 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++
++#ifndef _TTM_USEROBJ_API_H_
++#define _TTM_USEROBJ_API_H_
++
++#include "ttm_placement_user.h"
++#include "ttm_fence_user.h"
++#include "ttm_object.h"
++#include "ttm_fence_api.h"
++#include "ttm_bo_api.h"
++
++struct ttm_lock;
++
++/*
++ * User ioctls.
++ */
++
++extern int ttm_pl_create_ioctl(struct ttm_object_file *tfile,
++ struct ttm_bo_device *bdev,
++ struct ttm_lock *lock, void *data);
++extern int ttm_pl_ub_create_ioctl(struct ttm_object_file *tfile,
++ struct ttm_bo_device *bdev,
++ struct ttm_lock *lock, void *data);
++extern int ttm_pl_reference_ioctl(struct ttm_object_file *tfile, void *data);
++extern int ttm_pl_unref_ioctl(struct ttm_object_file *tfile, void *data);
++extern int ttm_pl_synccpu_ioctl(struct ttm_object_file *tfile, void *data);
++extern int ttm_pl_setstatus_ioctl(struct ttm_object_file *tfile,
++ struct ttm_lock *lock, void *data);
++extern int ttm_pl_waitidle_ioctl(struct ttm_object_file *tfile, void *data);
++extern int ttm_fence_signaled_ioctl(struct ttm_object_file *tfile, void *data);
++extern int ttm_fence_finish_ioctl(struct ttm_object_file *tfile, void *data);
++extern int ttm_fence_unref_ioctl(struct ttm_object_file *tfile, void *data);
++
++extern int
++ttm_fence_user_create(struct ttm_fence_device *fdev,
++ struct ttm_object_file *tfile,
++ uint32_t fence_class,
++ uint32_t fence_types,
++ uint32_t create_flags,
++ struct ttm_fence_object **fence, uint32_t * user_handle);
++
++extern struct ttm_buffer_object *ttm_buffer_object_lookup(struct ttm_object_file
++ *tfile,
++ uint32_t handle);
++
++extern int
++ttm_pl_verify_access(struct ttm_buffer_object *bo,
++ struct ttm_object_file *tfile);
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/medfield/Makefile
+@@ -0,0 +1,176 @@
++# Makefile for the drm device driver. This driver provides support for the
++# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
++
++INCDIR=drivers/staging/mrst
++
++include_dirs := \
++ -I$(INCDIR)/pvr/include4 \
++ -I$(INCDIR)/pvr/services4/include \
++ -I$(INCDIR)/pvr/services4/include/env/linux-intel \
++ -I$(INCDIR)/pvr/services4/srvkm/env/linux-intel \
++ -I$(INCDIR)/pvr/services4/srvkm/include \
++ -I$(INCDIR)/pvr/services4/srvkm/bridged \
++ -I$(INCDIR)/pvr/services4/system/include \
++ -I$(INCDIR)/pvr/services4/srvkm/hwdefs \
++ -I$(INCDIR)/pvr/services4/srvkm/bridged/sgx \
++ -I$(INCDIR)/pvr/services4/srvkm/devices/sgx \
++ -I$(INCDIR)/ \
++ -I$(INCDIR)/drv \
++ -I$(INCDIR)/drv/ttm \
++ -Iinclude/linux
++
++ccflags-y += $(include_dirs)
++ccflags-y += -I$(INCDIR)/pvr/services4/system/unified -DSGX540 -DSUPPORT_SGX540 -DSGX_CORE_REV=101
++
++ccflags-y += \
++ -Werror \
++ -DLINUX \
++ -DPVR_BUILD_DIR="\"pc_i686_moorestown_linux\"" \
++ -DSERVICES4 \
++ -D_XOPEN_SOURCE=600 \
++ -DPVR2D_VALIDATE_INPUT_PARAMS \
++ -DDISPLAY_CONTROLLER=mrstlfb \
++ -UDEBUG_LOG_PATH_TRUNCATE \
++ -DSUPPORT_SRVINIT \
++ -DSUPPORT_SGX \
++ -DSUPPORT_PERCONTEXT_PB \
++ -DSUPPORT_LINUX_X86_WRITECOMBINE \
++ -DTRANSFER_QUEUE \
++ -DSUPPORT_DRI_DRM \
++ -DSUPPORT_DRI_DRM_EXT \
++ -DSYS_USING_INTERRUPTS \
++ -DSUPPORT_HW_RECOVERY \
++ -DSUPPORT_ACTIVE_POWER_MANAGEMENT \
++ -DPVR_SECURE_HANDLES \
++ -DUSE_PTHREADS \
++ -DSUPPORT_SGX_EVENT_OBJECT \
++ -DSUPPORT_SGX_HWPERF \
++ -DSUPPORT_SGX_LOW_LATENCY_SCHEDULING \
++ -DSUPPORT_LINUX_X86_PAT \
++ -DPVR_PROC_USE_SEQ_FILE \
++ -DSUPPORT_CACHE_LINE_FLUSH \
++ -DSUPPORT_CPU_CACHED_BUFFERS \
++ -DDEBUG_MESA_OGL_TRACE \
++ -DDISABLE_PM
++
++ccflags-$(CONFIG_DRM_MID_RELEASE) += -DBUILD="\"release\"" -DPVR_BUILD_TYPE="\"release\"" -DRELEASE
++ccflags-$(CONFIG_DRM_MID_DEBUG) += -DBUILD="\"debug\"" -DPVR_BUILD_TYPE="\"debug\"" -DDEBUG
++ccflags-$(CONFIG_PCI_MSI) += -DCONFIG_PCI_MSI
++
++
++ENVDIR = ../pvr/services4/srvkm/env/linux-intel
++COMMONDIR = ../pvr/services4/srvkm/common
++BRIDGEDDIR = ../pvr/services4/srvkm/bridged
++SGXDIR = ../pvr/services4/srvkm/devices/sgx
++FBDEVDIR = ../pvr/services4/3rdparty/linux_framebuffer_mrst
++DRMDRVDIR = ../drv
++SYSCONFIGDIR = ../pvr/services4/system/unified
++
++%.medfield.c: %.c
++ cp $< $@
++
++medfield_gfx-y += $(ENVDIR)/osfunc.medfield.o \
++ $(ENVDIR)/mutils.medfield.o \
++ $(ENVDIR)/mmap.medfield.o \
++ $(ENVDIR)/module.medfield.o \
++ $(ENVDIR)/pdump.medfield.o \
++ $(ENVDIR)/proc.medfield.o \
++ $(ENVDIR)/pvr_bridge_k.medfield.o \
++ $(ENVDIR)/pvr_debug.medfield.o \
++ $(ENVDIR)/mm.medfield.o \
++ $(ENVDIR)/event.medfield.o \
++ $(ENVDIR)/osperproc.medfield.o \
++ $(ENVDIR)/pvr_drm.medfield.o
++
++medfield_gfx-y += $(COMMONDIR)/buffer_manager.medfield.o \
++ $(COMMONDIR)/devicemem.medfield.o \
++ $(COMMONDIR)/deviceclass.medfield.o \
++ $(COMMONDIR)/handle.medfield.o \
++ $(COMMONDIR)/hash.medfield.o \
++ $(COMMONDIR)/metrics.medfield.o \
++ $(COMMONDIR)/pvrsrv.medfield.o \
++ $(COMMONDIR)/queue.medfield.o \
++ $(COMMONDIR)/ra.medfield.o \
++ $(COMMONDIR)/resman.medfield.o \
++ $(COMMONDIR)/power.medfield.o \
++ $(COMMONDIR)/mem.medfield.o \
++ $(COMMONDIR)/pdump_common.medfield.o \
++ $(COMMONDIR)/perproc.medfield.o \
++ $(COMMONDIR)/lists.medfield.o \
++ $(COMMONDIR)/mem_debug.medfield.o
++
++medfield_gfx-y += $(BRIDGEDDIR)/bridged_support.medfield.o \
++ $(BRIDGEDDIR)/bridged_pvr_bridge.medfield.o \
++ $(BRIDGEDDIR)/sgx/bridged_sgx_bridge.medfield.o
++
++medfield_gfx-y += $(SYSCONFIGDIR)/sysconfig-medfield.o \
++ $(SYSCONFIGDIR)/sysutils.medfield.o \
++ $(SYSCONFIGDIR)/sys_pvr_drm_export.medfield.o
++
++medfield_gfx-y += $(SGXDIR)/sgxinit.medfield.o \
++ $(SGXDIR)/sgxpower.medfield.o \
++ $(SGXDIR)/sgxreset.medfield.o \
++ $(SGXDIR)/sgxutils.medfield.o \
++ $(SGXDIR)/sgxkick.medfield.o \
++ $(SGXDIR)/sgxtransfer.medfield.o \
++ $(SGXDIR)/mmu.medfield.o \
++ $(SGXDIR)/pb.medfield.o
++
++medfield_gfx-y += $(FBDEVDIR)/mrstlfb_displayclass.medfield.o \
++ $(FBDEVDIR)/mrstlfb_linux.medfield.o
++
++medfield_gfx-y += $(DRMDRVDIR)/lnc_topaz.medfield.o \
++ $(DRMDRVDIR)/topaz_power.medfield.o \
++ $(DRMDRVDIR)/lnc_topazinit.medfield.o \
++ $(DRMDRVDIR)/pnw_topaz.medfield.o \
++ $(DRMDRVDIR)/pnw_topazinit.medfield.o \
++ $(DRMDRVDIR)/psb_bl.medfield.o \
++ $(DRMDRVDIR)/psb_buffer.medfield.o \
++ $(DRMDRVDIR)/psb_dpst.medfield.o \
++ $(DRMDRVDIR)/psb_drv.medfield.o \
++ $(DRMDRVDIR)/psb_fb.medfield.o \
++ $(DRMDRVDIR)/psb_fence.medfield.o \
++ $(DRMDRVDIR)/psb_gtt.medfield.o \
++ $(DRMDRVDIR)/psb_hotplug.medfield.o \
++ $(DRMDRVDIR)/psb_intel_bios.medfield.o \
++ $(DRMDRVDIR)/psb_intel_display.medfield.o \
++ $(DRMDRVDIR)/psb_intel_i2c.medfield.o \
++ $(DRMDRVDIR)/psb_intel_lvds.medfield.o \
++ $(DRMDRVDIR)/psb_intel_modes.medfield.o \
++ $(DRMDRVDIR)/psb_intel_sdvo.medfield.o \
++ $(DRMDRVDIR)/psb_intel_hdmi.medfield.o \
++ $(DRMDRVDIR)/psb_intel_hdmi_i2c.medfield.o \
++ $(DRMDRVDIR)/psb_mmu.medfield.o \
++ $(DRMDRVDIR)/psb_msvdx.medfield.o \
++ $(DRMDRVDIR)/msvdx_power.medfield.o \
++ $(DRMDRVDIR)/psb_msvdxinit.medfield.o \
++ $(DRMDRVDIR)/psb_reset.medfield.o \
++ $(DRMDRVDIR)/psb_schedule.medfield.o \
++ $(DRMDRVDIR)/psb_sgx.medfield.o \
++ $(DRMDRVDIR)/psb_socket.medfield.o \
++ $(DRMDRVDIR)/psb_ttm_glue.medfield.o \
++ $(DRMDRVDIR)/psb_pvr_glue.medfield.o \
++ $(DRMDRVDIR)/psb_umevents.medfield.o \
++ $(DRMDRVDIR)/ttm/ttm_agp_backend.medfield.o \
++ $(DRMDRVDIR)/ttm/ttm_bo.medfield.o \
++ $(DRMDRVDIR)/ttm/ttm_bo_util.medfield.o \
++ $(DRMDRVDIR)/ttm/ttm_bo_vm.medfield.o \
++ $(DRMDRVDIR)/ttm/ttm_execbuf_util.medfield.o \
++ $(DRMDRVDIR)/ttm/ttm_fence.medfield.o \
++ $(DRMDRVDIR)/ttm/ttm_fence_user.medfield.o \
++ $(DRMDRVDIR)/ttm/ttm_lock.medfield.o \
++ $(DRMDRVDIR)/ttm/ttm_memory.medfield.o \
++ $(DRMDRVDIR)/ttm/ttm_object.medfield.o \
++ $(DRMDRVDIR)/ttm/ttm_pat_compat.medfield.o \
++ $(DRMDRVDIR)/ttm/ttm_placement_user.medfield.o \
++ $(DRMDRVDIR)/ttm/ttm_tt.medfield.o \
++ $(DRMDRVDIR)/psb_intel_dsi.medfield.o \
++ $(DRMDRVDIR)/psb_intel_dsi_aava.medfield.o \
++ $(DRMDRVDIR)/mdfld_dsi_dbi.medfield.o
++
++medfield_gfx-$(CONFIG_MDFLD_DSI_DPU) += $(DRMDRVDIR)/mdfld_dsi_dbi_dpu.medfield.o
++
++medfield_gfx-y += $(DRMDRVDIR)/psb_powermgmt.medfield.o $(DRMDRVDIR)/psb_irq.medfield.o
++
++obj-$(CONFIG_DRM_MDFLD) += medfield_gfx.o
++
+--- /dev/null
++++ b/drivers/staging/mrst/moorestown/Makefile
+@@ -0,0 +1,178 @@
++# Makefile for the drm device driver. This driver provides support for the
++# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
++
++INCDIR=drivers/staging/mrst
++
++include_dirs := \
++ -I$(INCDIR)/pvr/include4 \
++ -I$(INCDIR)/pvr/services4/include \
++ -I$(INCDIR)/pvr/services4/include/env/linux-intel \
++ -I$(INCDIR)/pvr/services4/srvkm/env/linux-intel \
++ -I$(INCDIR)/pvr/services4/srvkm/include \
++ -I$(INCDIR)/pvr/services4/srvkm/bridged \
++ -I$(INCDIR)/pvr/services4/system/include \
++ -I$(INCDIR)/pvr/services4/srvkm/hwdefs \
++ -I$(INCDIR)/pvr/services4/srvkm/bridged/sgx \
++ -I$(INCDIR)/pvr/services4/srvkm/devices/sgx \
++ -I$(INCDIR)/ \
++ -I$(INCDIR)/drv \
++ -I$(INCDIR)/drv/ttm \
++ -Iinclude/linux
++
++ccflags-y += $(include_dirs)
++ccflags-y += -I$(INCDIR)/pvr/services4/system/unified -DSGX535 -DSUPPORT_SGX535 -DSGX_CORE_REV=121
++
++ccflags-y += \
++ -Werror \
++ -DLINUX \
++ -DPVR_BUILD_DIR="\"pc_i686_moorestown_linux\"" \
++ -DSERVICES4 \
++ -D_XOPEN_SOURCE=600 \
++ -DPVR2D_VALIDATE_INPUT_PARAMS \
++ -DDISPLAY_CONTROLLER=mrstlfb \
++ -UDEBUG_LOG_PATH_TRUNCATE \
++ -DSUPPORT_SRVINIT \
++ -DSUPPORT_SGX \
++ -DSUPPORT_PERCONTEXT_PB \
++ -DSUPPORT_LINUX_X86_WRITECOMBINE \
++ -DTRANSFER_QUEUE \
++ -DSUPPORT_DRI_DRM \
++ -DSUPPORT_DRI_DRM_EXT \
++ -DSYS_USING_INTERRUPTS \
++ -DSUPPORT_HW_RECOVERY \
++ -DSUPPORT_ACTIVE_POWER_MANAGEMENT \
++ -DPVR_SECURE_HANDLES \
++ -DUSE_PTHREADS \
++ -DSUPPORT_SGX_EVENT_OBJECT \
++ -DSUPPORT_SGX_HWPERF \
++ -DSUPPORT_SGX_LOW_LATENCY_SCHEDULING \
++ -DSUPPORT_LINUX_X86_PAT \
++ -DPVR_PROC_USE_SEQ_FILE \
++ -DSUPPORT_CACHE_LINE_FLUSH \
++ -DSUPPORT_CPU_CACHED_BUFFERS \
++ -DDEBUG_MESA_OGL_TRACE \
++ -DDISABLE_PM
++
++ccflags-$(CONFIG_DRM_MID_RELEASE) += -DBUILD="\"release\"" -DPVR_BUILD_TYPE="\"release\"" -DRELEASE
++ccflags-$(CONFIG_DRM_MID_DEBUG) += -DBUILD="\"debug\"" -DPVR_BUILD_TYPE="\"debug\"" -DDEBUG
++ccflags-$(CONFIG_PCI_MSI) += -DCONFIG_PCI_MSI
++
++
++ENVDIR = ../pvr/services4/srvkm/env/linux-intel
++COMMONDIR = ../pvr/services4/srvkm/common
++BRIDGEDDIR = ../pvr/services4/srvkm/bridged
++SGXDIR = ../pvr/services4/srvkm/devices/sgx
++FBDEVDIR = ../pvr/services4/3rdparty/linux_framebuffer_mrst
++DRMDRVDIR = ../drv
++SYSCONFIGDIR = ../pvr/services4/system/unified
++
++%.mrst.c: %.c
++ cp $< $@
++
++mrst_gfx-y += $(ENVDIR)/osfunc.mrst.o \
++ $(ENVDIR)/mutils.mrst.o \
++ $(ENVDIR)/mmap.mrst.o \
++ $(ENVDIR)/module.mrst.o \
++ $(ENVDIR)/pdump.mrst.o \
++ $(ENVDIR)/proc.mrst.o \
++ $(ENVDIR)/pvr_bridge_k.mrst.o \
++ $(ENVDIR)/pvr_debug.mrst.o \
++ $(ENVDIR)/mm.mrst.o \
++ $(ENVDIR)/event.mrst.o \
++ $(ENVDIR)/osperproc.mrst.o \
++ $(ENVDIR)/pvr_drm.mrst.o
++
++mrst_gfx-y += $(COMMONDIR)/buffer_manager.mrst.o \
++ $(COMMONDIR)/devicemem.mrst.o \
++ $(COMMONDIR)/deviceclass.mrst.o \
++ $(COMMONDIR)/handle.mrst.o \
++ $(COMMONDIR)/hash.mrst.o \
++ $(COMMONDIR)/metrics.mrst.o \
++ $(COMMONDIR)/pvrsrv.mrst.o \
++ $(COMMONDIR)/queue.mrst.o \
++ $(COMMONDIR)/ra.mrst.o \
++ $(COMMONDIR)/resman.mrst.o \
++ $(COMMONDIR)/power.mrst.o \
++ $(COMMONDIR)/mem.mrst.o \
++ $(COMMONDIR)/pdump_common.mrst.o \
++ $(COMMONDIR)/perproc.mrst.o \
++ $(COMMONDIR)/lists.mrst.o \
++ $(COMMONDIR)/mem_debug.mrst.o
++
++mrst_gfx-y += $(BRIDGEDDIR)/bridged_support.mrst.o \
++ $(BRIDGEDDIR)/bridged_pvr_bridge.mrst.o \
++ $(BRIDGEDDIR)/sgx/bridged_sgx_bridge.mrst.o
++
++mrst_gfx-y += $(SYSCONFIGDIR)/sysconfig-moorestown.o \
++ $(SYSCONFIGDIR)/sysutils.mrst.o \
++ $(SYSCONFIGDIR)/sys_pvr_drm_export.mrst.o
++
++mrst_gfx-y += $(SGXDIR)/sgxinit.mrst.o \
++ $(SGXDIR)/sgxpower.mrst.o \
++ $(SGXDIR)/sgxreset.mrst.o \
++ $(SGXDIR)/sgxutils.mrst.o \
++ $(SGXDIR)/sgxkick.mrst.o \
++ $(SGXDIR)/sgxtransfer.mrst.o \
++ $(SGXDIR)/mmu.mrst.o \
++ $(SGXDIR)/pb.mrst.o
++
++mrst_gfx-y += $(FBDEVDIR)/mrstlfb_displayclass.mrst.o \
++ $(FBDEVDIR)/mrstlfb_linux.mrst.o
++
++mrst_gfx-y += $(DRMDRVDIR)/lnc_topaz.mrst.o \
++ $(DRMDRVDIR)/topaz_power.mrst.o \
++ $(DRMDRVDIR)/lnc_topazinit.mrst.o \
++ $(DRMDRVDIR)/pnw_topaz.mrst.o \
++ $(DRMDRVDIR)/pnw_topazinit.mrst.o \
++ $(DRMDRVDIR)/psb_bl.mrst.o \
++ $(DRMDRVDIR)/psb_buffer.mrst.o \
++ $(DRMDRVDIR)/psb_dpst.mrst.o \
++ $(DRMDRVDIR)/psb_drv.mrst.o \
++ $(DRMDRVDIR)/psb_fb.mrst.o \
++ $(DRMDRVDIR)/psb_fence.mrst.o \
++ $(DRMDRVDIR)/psb_gtt.mrst.o \
++ $(DRMDRVDIR)/psb_hotplug.mrst.o \
++ $(DRMDRVDIR)/psb_intel_bios.mrst.o \
++ $(DRMDRVDIR)/psb_intel_display.mrst.o \
++ $(DRMDRVDIR)/psb_intel_i2c.mrst.o \
++ $(DRMDRVDIR)/psb_intel_lvds.mrst.o \
++ $(DRMDRVDIR)/psb_intel_modes.mrst.o \
++ $(DRMDRVDIR)/psb_intel_sdvo.mrst.o \
++ $(DRMDRVDIR)/psb_intel_hdmi.mrst.o \
++ $(DRMDRVDIR)/psb_intel_hdmi_i2c.mrst.o \
++ $(DRMDRVDIR)/psb_mmu.mrst.o \
++ $(DRMDRVDIR)/psb_msvdx.mrst.o \
++ $(DRMDRVDIR)/msvdx_power.mrst.o \
++ $(DRMDRVDIR)/psb_msvdxinit.mrst.o \
++ $(DRMDRVDIR)/psb_reset.mrst.o \
++ $(DRMDRVDIR)/psb_schedule.mrst.o \
++ $(DRMDRVDIR)/psb_sgx.mrst.o \
++ $(DRMDRVDIR)/psb_socket.mrst.o \
++ $(DRMDRVDIR)/psb_ttm_glue.mrst.o \
++ $(DRMDRVDIR)/psb_pvr_glue.mrst.o \
++ $(DRMDRVDIR)/psb_umevents.mrst.o \
++ $(DRMDRVDIR)/ttm/ttm_agp_backend.mrst.o \
++ $(DRMDRVDIR)/ttm/ttm_bo.mrst.o \
++ $(DRMDRVDIR)/ttm/ttm_bo_util.mrst.o \
++ $(DRMDRVDIR)/ttm/ttm_bo_vm.mrst.o \
++ $(DRMDRVDIR)/ttm/ttm_execbuf_util.mrst.o \
++ $(DRMDRVDIR)/ttm/ttm_fence.mrst.o \
++ $(DRMDRVDIR)/ttm/ttm_fence_user.mrst.o \
++ $(DRMDRVDIR)/ttm/ttm_lock.mrst.o \
++ $(DRMDRVDIR)/ttm/ttm_memory.mrst.o \
++ $(DRMDRVDIR)/ttm/ttm_object.mrst.o \
++ $(DRMDRVDIR)/ttm/ttm_pat_compat.mrst.o \
++ $(DRMDRVDIR)/ttm/ttm_placement_user.mrst.o \
++ $(DRMDRVDIR)/ttm/ttm_tt.mrst.o \
++ $(DRMDRVDIR)/psb_intel_dsi_aava.mrst.o \
++ $(DRMDRVDIR)/psb_intel_dsi.mrst.o \
++ $(DRMDRVDIR)/mdfld_dsi_dbi.mrst.o
++
++#FIXME: check this is right for MRST ??
++
++mrst_gfx-$(CONFIG_MDFLD_DSI_DPU) += $(DRMDRVDIR)/mdfld_dsi_dbi_dpu.mrst.o
++
++mrst_gfx-y += $(DRMDRVDIR)/psb_powermgmt.mrst.o $(DRMDRVDIR)/psb_irq.mrst.o
++
++obj-$(CONFIG_DRM_MRST) += mrst_gfx.o
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/COPYING
+@@ -0,0 +1,351 @@
++
++This software is Copyright (C) 2008 Imagination Technologies Ltd.
++ All rights reserved.
++
++You may use, distribute and copy this software under the terms of
++GNU General Public License version 2, which is displayed below.
++
++-------------------------------------------------------------------------
++
++ GNU GENERAL PUBLIC LICENSE
++ Version 2, June 1991
++
++ Copyright (C) 1989, 1991 Free Software Foundation, Inc.
++ 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ Everyone is permitted to copy and distribute verbatim copies
++ of this license document, but changing it is not allowed.
++
++ Preamble
++
++ The licenses for most software are designed to take away your
++freedom to share and change it. By contrast, the GNU General Public
++License is intended to guarantee your freedom to share and change free
++software--to make sure the software is free for all its users. This
++General Public License applies to most of the Free Software
++Foundation's software and to any other program whose authors commit to
++using it. (Some other Free Software Foundation software is covered by
++the GNU Library General Public License instead.) You can apply it to
++your programs, too.
++
++ When we speak of free software, we are referring to freedom, not
++price. Our General Public Licenses are designed to make sure that you
++have the freedom to distribute copies of free software (and charge for
++this service if you wish), that you receive source code or can get it
++if you want it, that you can change the software or use pieces of it
++in new free programs; and that you know you can do these things.
++
++ To protect your rights, we need to make restrictions that forbid
++anyone to deny you these rights or to ask you to surrender the rights.
++These restrictions translate to certain responsibilities for you if you
++distribute copies of the software, or if you modify it.
++
++ For example, if you distribute copies of such a program, whether
++gratis or for a fee, you must give the recipients all the rights that
++you have. You must make sure that they, too, receive or can get the
++source code. And you must show them these terms so they know their
++rights.
++
++ We protect your rights with two steps: (1) copyright the software, and
++(2) offer you this license which gives you legal permission to copy,
++distribute and/or modify the software.
++
++ Also, for each author's protection and ours, we want to make certain
++that everyone understands that there is no warranty for this free
++software. If the software is modified by someone else and passed on, we
++want its recipients to know that what they have is not the original, so
++that any problems introduced by others will not reflect on the original
++authors' reputations.
++
++ Finally, any free program is threatened constantly by software
++patents. We wish to avoid the danger that redistributors of a free
++program will individually obtain patent licenses, in effect making the
++program proprietary. To prevent this, we have made it clear that any
++patent must be licensed for everyone's free use or not licensed at all.
++
++ The precise terms and conditions for copying, distribution and
++modification follow.
++
++ GNU GENERAL PUBLIC LICENSE
++ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
++
++ 0. This License applies to any program or other work which contains
++a notice placed by the copyright holder saying it may be distributed
++under the terms of this General Public License. The "Program", below,
++refers to any such program or work, and a "work based on the Program"
++means either the Program or any derivative work under copyright law:
++that is to say, a work containing the Program or a portion of it,
++either verbatim or with modifications and/or translated into another
++language. (Hereinafter, translation is included without limitation in
++the term "modification".) Each licensee is addressed as "you".
++
++Activities other than copying, distribution and modification are not
++covered by this License; they are outside its scope. The act of
++running the Program is not restricted, and the output from the Program
++is covered only if its contents constitute a work based on the
++Program (independent of having been made by running the Program).
++Whether that is true depends on what the Program does.
++
++ 1. You may copy and distribute verbatim copies of the Program's
++source code as you receive it, in any medium, provided that you
++conspicuously and appropriately publish on each copy an appropriate
++copyright notice and disclaimer of warranty; keep intact all the
++notices that refer to this License and to the absence of any warranty;
++and give any other recipients of the Program a copy of this License
++along with the Program.
++
++You may charge a fee for the physical act of transferring a copy, and
++you may at your option offer warranty protection in exchange for a fee.
++
++ 2. You may modify your copy or copies of the Program or any portion
++of it, thus forming a work based on the Program, and copy and
++distribute such modifications or work under the terms of Section 1
++above, provided that you also meet all of these conditions:
++
++ a) You must cause the modified files to carry prominent notices
++ stating that you changed the files and the date of any change.
++
++ b) You must cause any work that you distribute or publish, that in
++ whole or in part contains or is derived from the Program or any
++ part thereof, to be licensed as a whole at no charge to all third
++ parties under the terms of this License.
++
++ c) If the modified program normally reads commands interactively
++ when run, you must cause it, when started running for such
++ interactive use in the most ordinary way, to print or display an
++ announcement including an appropriate copyright notice and a
++ notice that there is no warranty (or else, saying that you provide
++ a warranty) and that users may redistribute the program under
++ these conditions, and telling the user how to view a copy of this
++ License. (Exception: if the Program itself is interactive but
++ does not normally print such an announcement, your work based on
++ the Program is not required to print an announcement.)
++
++These requirements apply to the modified work as a whole. If
++identifiable sections of that work are not derived from the Program,
++and can be reasonably considered independent and separate works in
++themselves, then this License, and its terms, do not apply to those
++sections when you distribute them as separate works. But when you
++distribute the same sections as part of a whole which is a work based
++on the Program, the distribution of the whole must be on the terms of
++this License, whose permissions for other licensees extend to the
++entire whole, and thus to each and every part regardless of who wrote it.
++
++Thus, it is not the intent of this section to claim rights or contest
++your rights to work written entirely by you; rather, the intent is to
++exercise the right to control the distribution of derivative or
++collective works based on the Program.
++
++In addition, mere aggregation of another work not based on the Program
++with the Program (or with a work based on the Program) on a volume of
++a storage or distribution medium does not bring the other work under
++the scope of this License.
++
++ 3. You may copy and distribute the Program (or a work based on it,
++under Section 2) in object code or executable form under the terms of
++Sections 1 and 2 above provided that you also do one of the following:
++
++ a) Accompany it with the complete corresponding machine-readable
++ source code, which must be distributed under the terms of Sections
++ 1 and 2 above on a medium customarily used for software interchange; or,
++
++ b) Accompany it with a written offer, valid for at least three
++ years, to give any third party, for a charge no more than your
++ cost of physically performing source distribution, a complete
++ machine-readable copy of the corresponding source code, to be
++ distributed under the terms of Sections 1 and 2 above on a medium
++ customarily used for software interchange; or,
++
++ c) Accompany it with the information you received as to the offer
++ to distribute corresponding source code. (This alternative is
++ allowed only for noncommercial distribution and only if you
++ received the program in object code or executable form with such
++ an offer, in accord with Subsection b above.)
++
++The source code for a work means the preferred form of the work for
++making modifications to it. For an executable work, complete source
++code means all the source code for all modules it contains, plus any
++associated interface definition files, plus the scripts used to
++control compilation and installation of the executable. However, as a
++special exception, the source code distributed need not include
++anything that is normally distributed (in either source or binary
++form) with the major components (compiler, kernel, and so on) of the
++operating system on which the executable runs, unless that component
++itself accompanies the executable.
++
++If distribution of executable or object code is made by offering
++access to copy from a designated place, then offering equivalent
++access to copy the source code from the same place counts as
++distribution of the source code, even though third parties are not
++compelled to copy the source along with the object code.
++
++ 4. You may not copy, modify, sublicense, or distribute the Program
++except as expressly provided under this License. Any attempt
++otherwise to copy, modify, sublicense or distribute the Program is
++void, and will automatically terminate your rights under this License.
++However, parties who have received copies, or rights, from you under
++this License will not have their licenses terminated so long as such
++parties remain in full compliance.
++
++ 5. You are not required to accept this License, since you have not
++signed it. However, nothing else grants you permission to modify or
++distribute the Program or its derivative works. These actions are
++prohibited by law if you do not accept this License. Therefore, by
++modifying or distributing the Program (or any work based on the
++Program), you indicate your acceptance of this License to do so, and
++all its terms and conditions for copying, distributing or modifying
++the Program or works based on it.
++
++ 6. Each time you redistribute the Program (or any work based on the
++Program), the recipient automatically receives a license from the
++original licensor to copy, distribute or modify the Program subject to
++these terms and conditions. You may not impose any further
++restrictions on the recipients' exercise of the rights granted herein.
++You are not responsible for enforcing compliance by third parties to
++this License.
++
++ 7. If, as a consequence of a court judgment or allegation of patent
++infringement or for any other reason (not limited to patent issues),
++conditions are imposed on you (whether by court order, agreement or
++otherwise) that contradict the conditions of this License, they do not
++excuse you from the conditions of this License. If you cannot
++distribute so as to satisfy simultaneously your obligations under this
++License and any other pertinent obligations, then as a consequence you
++may not distribute the Program at all. For example, if a patent
++license would not permit royalty-free redistribution of the Program by
++all those who receive copies directly or indirectly through you, then
++the only way you could satisfy both it and this License would be to
++refrain entirely from distribution of the Program.
++
++If any portion of this section is held invalid or unenforceable under
++any particular circumstance, the balance of the section is intended to
++apply and the section as a whole is intended to apply in other
++circumstances.
++
++It is not the purpose of this section to induce you to infringe any
++patents or other property right claims or to contest validity of any
++such claims; this section has the sole purpose of protecting the
++integrity of the free software distribution system, which is
++implemented by public license practices. Many people have made
++generous contributions to the wide range of software distributed
++through that system in reliance on consistent application of that
++system; it is up to the author/donor to decide if he or she is willing
++to distribute software through any other system and a licensee cannot
++impose that choice.
++
++This section is intended to make thoroughly clear what is believed to
++be a consequence of the rest of this License.
++
++ 8. If the distribution and/or use of the Program is restricted in
++certain countries either by patents or by copyrighted interfaces, the
++original copyright holder who places the Program under this License
++may add an explicit geographical distribution limitation excluding
++those countries, so that distribution is permitted only in or among
++countries not thus excluded. In such case, this License incorporates
++the limitation as if written in the body of this License.
++
++ 9. The Free Software Foundation may publish revised and/or new versions
++of the General Public License from time to time. Such new versions will
++be similar in spirit to the present version, but may differ in detail to
++address new problems or concerns.
++
++Each version is given a distinguishing version number. If the Program
++specifies a version number of this License which applies to it and "any
++later version", you have the option of following the terms and conditions
++either of that version or of any later version published by the Free
++Software Foundation. If the Program does not specify a version number of
++this License, you may choose any version ever published by the Free Software
++Foundation.
++
++ 10. If you wish to incorporate parts of the Program into other free
++programs whose distribution conditions are different, write to the author
++to ask for permission. For software which is copyrighted by the Free
++Software Foundation, write to the Free Software Foundation; we sometimes
++make exceptions for this. Our decision will be guided by the two goals
++of preserving the free status of all derivatives of our free software and
++of promoting the sharing and reuse of software generally.
++
++ NO WARRANTY
++
++ 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
++FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
++OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
++PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
++OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
++MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
++TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
++PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
++REPAIR OR CORRECTION.
++
++ 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
++WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
++REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
++INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
++OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
++TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
++YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
++PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
++POSSIBILITY OF SUCH DAMAGES.
++
++ END OF TERMS AND CONDITIONS
++
++ Appendix: How to Apply These Terms to Your New Programs
++
++ If you develop a new program, and you want it to be of the greatest
++possible use to the public, the best way to achieve this is to make it
++free software which everyone can redistribute and change under these terms.
++
++ To do so, attach the following notices to the program. It is safest
++to attach them to the start of each source file to most effectively
++convey the exclusion of warranty; and each file should have at least
++the "copyright" line and a pointer to where the full notice is found.
++
++ <one line to give the program's name and a brief idea of what it does.>
++ Copyright (C) 19yy <name of author>
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 2 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++
++Also add information on how to contact you by electronic and paper mail.
++
++If the program is interactive, make it output a short notice like this
++when it starts in an interactive mode:
++
++ Gnomovision version 69, Copyright (C) 19yy name of author
++ Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
++ This is free software, and you are welcome to redistribute it
++ under certain conditions; type `show c' for details.
++
++The hypothetical commands `show w' and `show c' should show the appropriate
++parts of the General Public License. Of course, the commands you use may
++be called something other than `show w' and `show c'; they could even be
++mouse-clicks or menu items--whatever suits your program.
++
++You should also get your employer (if you work as a programmer) or your
++school, if any, to sign a "copyright disclaimer" for the program, if
++necessary. Here is a sample; alter the names:
++
++ Yoyodyne, Inc., hereby disclaims all copyright interest in the program
++ `Gnomovision' (which makes passes at compilers) written by James Hacker.
++
++ <signature of Ty Coon>, 1 April 1989
++ Ty Coon, President of Vice
++
++This General Public License does not permit incorporating your program into
++proprietary programs. If your program is a subroutine library, you may
++consider it more useful to permit linking proprietary applications with the
++library. If this is what you want to do, use the GNU Library General
++Public License instead of this License.
++
++-------------------------------------------------------------------------
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/INSTALL
+@@ -0,0 +1,76 @@
++
++SGX Embedded Systems DDK for the Linux kernel.
++Copyright (C) 2008 Imagination Technologies Ltd. All rights reserved.
++======================================================================
++
++This file covers how to build and install the Imagination Technologies
++SGX DDK for the Linux kernel.
++
++
++Build System Environment Variables
++-------------------------------------------
++
++The SGX DDK Build scripts depend on a number of environment variables
++being setup before compilation or installation of DDK software can
++commence:
++
++$DISCIMAGE
++The DDK Build scripts install files to the location specified by the
++DISCIMAGE environment variable, when the make install target is used.
++This should point to the target filesystem.
++$ export DISCIMAGE=/path/to/filesystem
++
++$KERNELDIR
++When building the SGX DDK kernel module, the build needs access
++to the headers of the Linux kernel
++$ export KERNELDIR=/path/to/kernel
++
++$PATH
++If a cross compiler is being used make sure the PATH environment variable
++includes the path to the toolchain
++$ export PATH=$PATH:/path/to/toolchain
++
++$CROSS_COMPILE
++Since the SGX DDK Build scripts are geared toward a cross-compilation
++workflow, the CROSS_COMPILE environment variable needs to be set
++$ export CROSS_COMPILE=toolchain-prefix-
++
++
++Build and Install Instructions
++-------------------------------------------
++
++The SGX DDK configures different target builds within directories under
++eurasiacon/build/linux/.
++
++The supported build targets are:
++
++ all Makes everything
++ clean Removes all intermediate files created by a build.
++ clobber Removes all binaries for all builds as well.
++ install Runs the install script generated by the build.
++
++The following variables may be set on the command line to influence a build.
++
++ BUILD The type of build being performed.
++ Alternatives are release, timing or debug.
++ CFLAGS Build dependent optimisations and debug information flags.
++ SILENT Determines whether text of commands is produced during build.
++
++To build for, change to the appropriate target directory, e.g.:
++$ cd eurasiacon/build/linux/platform/kbuild
++
++Issue the make command:
++$ make BUILD=debug all
++
++The DDK software must be installed by the root user. Become the root user:
++$ su
++
++Install the DDK software:
++$ make install
++
++Become an ordinary user again:
++$ exit
++
++
++
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/README
+@@ -0,0 +1,48 @@
++
++SGX Embedded Systems DDK for Linux kernel.
++Copyright (C) 2008 Imagination Technologies Ltd. All rights reserved.
++======================================================================
++
++
++About
++-------------------------------------------
++
++This is the Imagination Technologies SGX DDK for the Linux kernel.
++
++
++License
++-------------------------------------------
++
++You may use, distribute and copy this software under the terms of
++GNU General Public License version 2.
++
++The full GNU General Public License version 2 is included in this
++distribution in the file called "COPYING".
++
++
++Build and Install Instructions
++-------------------------------------------
++
++For details see the "INSTALL" file.
++
++To build for, change to the appropriate target directory, e.g.:
++$ cd eurasiacon/build/linux/platform/kbuild
++
++Issue the make command:
++$ make BUILD=debug all
++
++The DDK software must be installed by the root user. Become the root user:
++$ su
++
++Install the DDK software:
++$ make install
++
++Become an ordinary user again:
++$ exit
++
++
++Contact information:
++-------------------------------------------
++
++Imagination Technologies Ltd. <gpl-support@imgtec.com>
++Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/eurasiacon/.gitignore
+@@ -0,0 +1,6 @@
++bin_pc_i686*
++tmp_pc_i686*
++host_pc_i686*
++binary_pc_i686*
++*.o
++*.o.cmd
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/include4/dbgdrvif.h
+@@ -0,0 +1,298 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _DBGDRVIF_
++#define _DBGDRVIF_
++
++
++#include "ioctldef.h"
++
++#define DEBUG_CAPMODE_FRAMED 0x00000001UL
++#define DEBUG_CAPMODE_CONTINUOUS 0x00000002UL
++#define DEBUG_CAPMODE_HOTKEY 0x00000004UL
++
++#define DEBUG_OUTMODE_STANDARDDBG 0x00000001UL
++#define DEBUG_OUTMODE_MONO 0x00000002UL
++#define DEBUG_OUTMODE_STREAMENABLE 0x00000004UL
++#define DEBUG_OUTMODE_ASYNC 0x00000008UL
++#define DEBUG_OUTMODE_SGXVGA 0x00000010UL
++
++#define DEBUG_FLAGS_USE_NONPAGED_MEM 0x00000001UL
++#define DEBUG_FLAGS_NO_BUF_EXPANDSION 0x00000002UL
++#define DEBUG_FLAGS_ENABLESAMPLE 0x00000004UL
++
++#define DEBUG_FLAGS_TEXTSTREAM 0x80000000UL
++
++#define DEBUG_LEVEL_0 0x00000001UL
++#define DEBUG_LEVEL_1 0x00000003UL
++#define DEBUG_LEVEL_2 0x00000007UL
++#define DEBUG_LEVEL_3 0x0000000FUL
++#define DEBUG_LEVEL_4 0x0000001FUL
++#define DEBUG_LEVEL_5 0x0000003FUL
++#define DEBUG_LEVEL_6 0x0000007FUL
++#define DEBUG_LEVEL_7 0x000000FFUL
++#define DEBUG_LEVEL_8 0x000001FFUL
++#define DEBUG_LEVEL_9 0x000003FFUL
++#define DEBUG_LEVEL_10 0x000007FFUL
++#define DEBUG_LEVEL_11 0x00000FFFUL
++
++#define DEBUG_LEVEL_SEL0 0x00000001UL
++#define DEBUG_LEVEL_SEL1 0x00000002UL
++#define DEBUG_LEVEL_SEL2 0x00000004UL
++#define DEBUG_LEVEL_SEL3 0x00000008UL
++#define DEBUG_LEVEL_SEL4 0x00000010UL
++#define DEBUG_LEVEL_SEL5 0x00000020UL
++#define DEBUG_LEVEL_SEL6 0x00000040UL
++#define DEBUG_LEVEL_SEL7 0x00000080UL
++#define DEBUG_LEVEL_SEL8 0x00000100UL
++#define DEBUG_LEVEL_SEL9 0x00000200UL
++#define DEBUG_LEVEL_SEL10 0x00000400UL
++#define DEBUG_LEVEL_SEL11 0x00000800UL
++
++#define DEBUG_SERVICE_IOCTL_BASE 0x800UL
++#define DEBUG_SERVICE_CREATESTREAM \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x01, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_DESTROYSTREAM \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x02, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_GETSTREAM \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x03, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_WRITESTRING \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x04, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_READSTRING \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x05, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_WRITE \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x06, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_READ \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x07, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_SETDEBUGMODE \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x08, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_SETDEBUGOUTMODE \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x09, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_SETDEBUGLEVEL \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0A, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_SETFRAME \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0B, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_GETFRAME \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0C, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_OVERRIDEMODE \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0D, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_DEFAULTMODE \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0E, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_GETSERVICETABLE \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0F, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_WRITE2 \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x10, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_WRITESTRINGCM \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x11, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_WRITECM \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x12, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_SETMARKER \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x13, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_GETMARKER \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x14, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_ISCAPTUREFRAME \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x15, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_WRITELF \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x16, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_READLF \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x17, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_WAITFOREVENT \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x18, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++
++
++typedef enum _DBG_EVENT_ {
++ DBG_EVENT_STREAM_DATA = 1
++} DBG_EVENT;
++
++typedef struct _DBG_IN_CREATESTREAM_ {
++ IMG_UINT32 ui32Pages;
++ IMG_UINT32 ui32CapMode;
++ IMG_UINT32 ui32OutMode;
++ IMG_CHAR *pszName;
++}DBG_IN_CREATESTREAM, *PDBG_IN_CREATESTREAM;
++
++typedef struct _DBG_IN_FINDSTREAM_ {
++ IMG_BOOL bResetStream;
++ IMG_CHAR *pszName;
++}DBG_IN_FINDSTREAM, *PDBG_IN_FINDSTREAM;
++
++typedef struct _DBG_IN_WRITESTRING_ {
++ IMG_VOID *pvStream;
++ IMG_UINT32 ui32Level;
++ IMG_CHAR *pszString;
++}DBG_IN_WRITESTRING, *PDBG_IN_WRITESTRING;
++
++typedef struct _DBG_IN_READSTRING_ {
++ IMG_VOID *pvStream;
++ IMG_UINT32 ui32StringLen;
++ IMG_CHAR *pszString;
++} DBG_IN_READSTRING, *PDBG_IN_READSTRING;
++
++typedef struct _DBG_IN_SETDEBUGMODE_ {
++ IMG_VOID *pvStream;
++ IMG_UINT32 ui32Mode;
++ IMG_UINT32 ui32Start;
++ IMG_UINT32 ui32End;
++ IMG_UINT32 ui32SampleRate;
++} DBG_IN_SETDEBUGMODE, *PDBG_IN_SETDEBUGMODE;
++
++typedef struct _DBG_IN_SETDEBUGOUTMODE_ {
++ IMG_VOID *pvStream;
++ IMG_UINT32 ui32Mode;
++} DBG_IN_SETDEBUGOUTMODE, *PDBG_IN_SETDEBUGOUTMODE;
++
++typedef struct _DBG_IN_SETDEBUGLEVEL_ {
++ IMG_VOID *pvStream;
++ IMG_UINT32 ui32Level;
++} DBG_IN_SETDEBUGLEVEL, *PDBG_IN_SETDEBUGLEVEL;
++
++typedef struct _DBG_IN_SETFRAME_ {
++ IMG_VOID *pvStream;
++ IMG_UINT32 ui32Frame;
++} DBG_IN_SETFRAME, *PDBG_IN_SETFRAME;
++
++typedef struct _DBG_IN_WRITE_ {
++ IMG_VOID *pvStream;
++ IMG_UINT32 ui32Level;
++ IMG_UINT32 ui32TransferSize;
++ IMG_UINT8 *pui8InBuffer;
++} DBG_IN_WRITE, *PDBG_IN_WRITE;
++
++typedef struct _DBG_IN_READ_ {
++ IMG_VOID *pvStream;
++ IMG_BOOL bReadInitBuffer;
++ IMG_UINT32 ui32OutBufferSize;
++ IMG_UINT8 *pui8OutBuffer;
++} DBG_IN_READ, *PDBG_IN_READ;
++
++typedef struct _DBG_IN_OVERRIDEMODE_ {
++ IMG_VOID *pvStream;
++ IMG_UINT32 ui32Mode;
++} DBG_IN_OVERRIDEMODE, *PDBG_IN_OVERRIDEMODE;
++
++typedef struct _DBG_IN_ISCAPTUREFRAME_ {
++ IMG_VOID *pvStream;
++ IMG_BOOL bCheckPreviousFrame;
++} DBG_IN_ISCAPTUREFRAME, *PDBG_IN_ISCAPTUREFRAME;
++
++typedef struct _DBG_IN_SETMARKER_ {
++ IMG_VOID *pvStream;
++ IMG_UINT32 ui32Marker;
++} DBG_IN_SETMARKER, *PDBG_IN_SETMARKER;
++
++typedef struct _DBG_IN_WRITE_LF_ {
++ IMG_UINT32 ui32Flags;
++ IMG_VOID *pvStream;
++ IMG_UINT32 ui32Level;
++ IMG_UINT32 ui32BufferSize;
++ IMG_UINT8 *pui8InBuffer;
++} DBG_IN_WRITE_LF, *PDBG_IN_WRITE_LF;
++
++#define WRITELF_FLAGS_RESETBUF 0x00000001UL
++
++typedef struct _DBG_STREAM_ {
++ struct _DBG_STREAM_ *psNext;
++ struct _DBG_STREAM_ *psInitStream;
++ IMG_BOOL bInitPhaseComplete;
++ IMG_UINT32 ui32Flags;
++ IMG_UINT32 ui32Base;
++ IMG_UINT32 ui32Size;
++ IMG_UINT32 ui32RPtr;
++ IMG_UINT32 ui32WPtr;
++ IMG_UINT32 ui32DataWritten;
++ IMG_UINT32 ui32CapMode;
++ IMG_UINT32 ui32OutMode;
++ IMG_UINT32 ui32DebugLevel;
++ IMG_UINT32 ui32DefaultMode;
++ IMG_UINT32 ui32Start;
++ IMG_UINT32 ui32End;
++ IMG_UINT32 ui32Current;
++ IMG_UINT32 ui32Access;
++ IMG_UINT32 ui32SampleRate;
++ IMG_UINT32 ui32Reserved;
++ IMG_UINT32 ui32Timeout;
++ IMG_UINT32 ui32Marker;
++ IMG_CHAR szName[30];
++} DBG_STREAM,*PDBG_STREAM;
++
++typedef struct _DBGKM_SERVICE_TABLE_ {
++ IMG_UINT32 ui32Size;
++ IMG_VOID * (IMG_CALLCONV *pfnCreateStream) (IMG_CHAR * pszName,IMG_UINT32 ui32CapMode,IMG_UINT32 ui32OutMode,IMG_UINT32 ui32Flags,IMG_UINT32 ui32Pages);
++ IMG_VOID (IMG_CALLCONV *pfnDestroyStream) (PDBG_STREAM psStream);
++ IMG_VOID * (IMG_CALLCONV *pfnFindStream) (IMG_CHAR * pszName, IMG_BOOL bResetInitBuffer);
++ IMG_UINT32 (IMG_CALLCONV *pfnWriteString) (PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Level);
++ IMG_UINT32 (IMG_CALLCONV *pfnReadString) (PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Limit);
++ IMG_UINT32 (IMG_CALLCONV *pfnWriteBIN) (PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level);
++ IMG_UINT32 (IMG_CALLCONV *pfnReadBIN) (PDBG_STREAM psStream,IMG_BOOL bReadInitBuffer, IMG_UINT32 ui32OutBufferSize,IMG_UINT8 *pui8OutBuf);
++ IMG_VOID (IMG_CALLCONV *pfnSetCaptureMode) (PDBG_STREAM psStream,IMG_UINT32 ui32CapMode,IMG_UINT32 ui32Start,IMG_UINT32 ui32Stop,IMG_UINT32 ui32SampleRate);
++ IMG_VOID (IMG_CALLCONV *pfnSetOutputMode) (PDBG_STREAM psStream,IMG_UINT32 ui32OutMode);
++ IMG_VOID (IMG_CALLCONV *pfnSetDebugLevel) (PDBG_STREAM psStream,IMG_UINT32 ui32DebugLevel);
++ IMG_VOID (IMG_CALLCONV *pfnSetFrame) (PDBG_STREAM psStream,IMG_UINT32 ui32Frame);
++ IMG_UINT32 (IMG_CALLCONV *pfnGetFrame) (PDBG_STREAM psStream);
++ IMG_VOID (IMG_CALLCONV *pfnOverrideMode) (PDBG_STREAM psStream,IMG_UINT32 ui32Mode);
++ IMG_VOID (IMG_CALLCONV *pfnDefaultMode) (PDBG_STREAM psStream);
++ IMG_UINT32 (IMG_CALLCONV *pfnDBGDrivWrite2) (PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level);
++ IMG_UINT32 (IMG_CALLCONV *pfnWriteStringCM) (PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Level);
++ IMG_UINT32 (IMG_CALLCONV *pfnWriteBINCM) (PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level);
++ IMG_VOID (IMG_CALLCONV *pfnSetMarker) (PDBG_STREAM psStream,IMG_UINT32 ui32Marker);
++ IMG_UINT32 (IMG_CALLCONV *pfnGetMarker) (PDBG_STREAM psStream);
++ IMG_VOID (IMG_CALLCONV *pfnStartInitPhase) (PDBG_STREAM psStream);
++ IMG_VOID (IMG_CALLCONV *pfnStopInitPhase) (PDBG_STREAM psStream);
++ IMG_BOOL (IMG_CALLCONV *pfnIsCaptureFrame) (PDBG_STREAM psStream, IMG_BOOL bCheckPreviousFrame);
++ IMG_UINT32 (IMG_CALLCONV *pfnWriteLF) (PDBG_STREAM psStream, IMG_UINT8 *pui8InBuf, IMG_UINT32 ui32InBuffSize, IMG_UINT32 ui32Level, IMG_UINT32 ui32Flags);
++ IMG_UINT32 (IMG_CALLCONV *pfnReadLF) (PDBG_STREAM psStream, IMG_UINT32 ui32OutBuffSize, IMG_UINT8 *pui8OutBuf);
++ IMG_UINT32 (IMG_CALLCONV *pfnGetStreamOffset) (PDBG_STREAM psStream);
++ IMG_VOID (IMG_CALLCONV *pfnSetStreamOffset) (PDBG_STREAM psStream, IMG_UINT32 ui32StreamOffset);
++ IMG_BOOL (IMG_CALLCONV *pfnIsLastCaptureFrame) (PDBG_STREAM psStream);
++ IMG_VOID (IMG_CALLCONV *pfnWaitForEvent) (DBG_EVENT eEvent);
++} DBGKM_SERVICE_TABLE, *PDBGKM_SERVICE_TABLE;
++
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/include4/img_defs.h
+@@ -0,0 +1,108 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined (__IMG_DEFS_H__)
++#define __IMG_DEFS_H__
++
++#include "img_types.h"
++
++typedef enum img_tag_TriStateSwitch
++{
++ IMG_ON = 0x00,
++ IMG_OFF,
++ IMG_IGNORE
++
++} img_TriStateSwitch, * img_pTriStateSwitch;
++
++#define IMG_SUCCESS 0
++
++#define IMG_NO_REG 1
++
++#if defined (NO_INLINE_FUNCS)
++ #define INLINE
++ #define FORCE_INLINE
++#else
++#if defined (__cplusplus)
++ #define INLINE inline
++ #define FORCE_INLINE inline
++#else
++#if !defined(INLINE)
++ #define INLINE __inline
++#endif
++ #define FORCE_INLINE static __inline
++#endif
++#endif
++
++
++#ifndef PVR_UNREFERENCED_PARAMETER
++#define PVR_UNREFERENCED_PARAMETER(param) (param) = (param)
++#endif
++
++#ifdef __GNUC__
++#define unref__ __attribute__ ((unused))
++#else
++#define unref__
++#endif
++
++#ifndef _TCHAR_DEFINED
++#if defined(UNICODE)
++typedef unsigned short TCHAR, *PTCHAR, *PTSTR;
++#else
++typedef char TCHAR, *PTCHAR, *PTSTR;
++#endif
++#define _TCHAR_DEFINED
++#endif
++
++
++ #if defined(__linux__) || defined(__METAG)
++
++ #define IMG_CALLCONV
++ #define IMG_INTERNAL __attribute__ ((visibility ("hidden")))
++ #define IMG_EXPORT
++ #define IMG_IMPORT
++ #define IMG_RESTRICT __restrict__
++
++ #else
++ #error("define an OS")
++ #endif
++
++#ifndef IMG_ABORT
++ #define IMG_ABORT() abort()
++#endif
++
++#ifndef IMG_MALLOC
++ #define IMG_MALLOC(A) malloc (A)
++#endif
++
++#ifndef IMG_FREE
++ #define IMG_FREE(A) free (A)
++#endif
++
++#define IMG_CONST const
++
++#define IMG_FORMAT_PRINTF(x,y)
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/include4/img_types.h
+@@ -0,0 +1,128 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __IMG_TYPES_H__
++#define __IMG_TYPES_H__
++
++#if !defined(IMG_ADDRSPACE_CPUVADDR_BITS)
++#define IMG_ADDRSPACE_CPUVADDR_BITS 32
++#endif
++
++#if !defined(IMG_ADDRSPACE_PHYSADDR_BITS)
++#define IMG_ADDRSPACE_PHYSADDR_BITS 32
++#endif
++
++typedef unsigned int IMG_UINT, *IMG_PUINT;
++typedef signed int IMG_INT, *IMG_PINT;
++
++typedef unsigned char IMG_UINT8, *IMG_PUINT8;
++typedef unsigned char IMG_BYTE, *IMG_PBYTE;
++typedef signed char IMG_INT8, *IMG_PINT8;
++typedef char IMG_CHAR, *IMG_PCHAR;
++
++typedef unsigned short IMG_UINT16, *IMG_PUINT16;
++typedef signed short IMG_INT16, *IMG_PINT16;
++typedef unsigned long IMG_UINT32, *IMG_PUINT32;
++typedef signed long IMG_INT32, *IMG_PINT32;
++
++#if !defined(IMG_UINT32_MAX)
++ #define IMG_UINT32_MAX 0xFFFFFFFFUL
++#endif
++
++ #if (defined(LINUX) || defined(__METAG))
++#if !defined(USE_CODE)
++ typedef unsigned long long IMG_UINT64, *IMG_PUINT64;
++ typedef long long IMG_INT64, *IMG_PINT64;
++#endif
++ #else
++
++ #error("define an OS")
++
++ #endif
++
++#if !(defined(LINUX) && defined (__KERNEL__))
++typedef float IMG_FLOAT, *IMG_PFLOAT;
++typedef double IMG_DOUBLE, *IMG_PDOUBLE;
++#endif
++
++typedef enum tag_img_bool
++{
++ IMG_FALSE = 0,
++ IMG_TRUE = 1,
++ IMG_FORCE_ALIGN = 0x7FFFFFFF
++} IMG_BOOL, *IMG_PBOOL;
++
++typedef void IMG_VOID, *IMG_PVOID;
++
++typedef IMG_INT32 IMG_RESULT;
++
++typedef IMG_UINT32 IMG_UINTPTR_T;
++
++typedef IMG_PVOID IMG_HANDLE;
++
++typedef void** IMG_HVOID, * IMG_PHVOID;
++
++typedef IMG_UINT32 IMG_SIZE_T;
++
++#define IMG_NULL 0
++
++
++typedef IMG_PVOID IMG_CPU_VIRTADDR;
++
++typedef struct
++{
++
++ IMG_UINT32 uiAddr;
++#define IMG_CAST_TO_DEVVADDR_UINT(var) (IMG_UINT32)(var)
++
++} IMG_DEV_VIRTADDR;
++
++typedef struct _IMG_CPU_PHYADDR
++{
++
++ IMG_UINTPTR_T uiAddr;
++} IMG_CPU_PHYADDR;
++
++typedef struct _IMG_DEV_PHYADDR
++{
++#if IMG_ADDRSPACE_PHYSADDR_BITS == 32
++
++ IMG_UINTPTR_T uiAddr;
++#else
++ IMG_UINT32 uiAddr;
++ IMG_UINT32 uiHighAddr;
++#endif
++} IMG_DEV_PHYADDR;
++
++typedef struct _IMG_SYS_PHYADDR
++{
++
++ IMG_UINTPTR_T uiAddr;
++} IMG_SYS_PHYADDR;
++
++#include "img_defs.h"
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/include4/ioctldef.h
+@@ -0,0 +1,98 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __IOCTLDEF_H__
++#define __IOCTLDEF_H__
++
++#define MAKEIOCTLINDEX(i) (((i) >> 2) & 0xFFF)
++
++#ifndef CTL_CODE
++
++#define DEVICE_TYPE ULONG
++
++#define FILE_DEVICE_BEEP 0x00000001
++#define FILE_DEVICE_CD_ROM 0x00000002
++#define FILE_DEVICE_CD_ROM_FILE_SYSTEM 0x00000003
++#define FILE_DEVICE_CONTROLLER 0x00000004
++#define FILE_DEVICE_DATALINK 0x00000005
++#define FILE_DEVICE_DFS 0x00000006
++#define FILE_DEVICE_DISK 0x00000007
++#define FILE_DEVICE_DISK_FILE_SYSTEM 0x00000008
++#define FILE_DEVICE_FILE_SYSTEM 0x00000009
++#define FILE_DEVICE_INPORT_PORT 0x0000000a
++#define FILE_DEVICE_KEYBOARD 0x0000000b
++#define FILE_DEVICE_MAILSLOT 0x0000000c
++#define FILE_DEVICE_MIDI_IN 0x0000000d
++#define FILE_DEVICE_MIDI_OUT 0x0000000e
++#define FILE_DEVICE_MOUSE 0x0000000f
++#define FILE_DEVICE_MULTI_UNC_PROVIDER 0x00000010
++#define FILE_DEVICE_NAMED_PIPE 0x00000011
++#define FILE_DEVICE_NETWORK 0x00000012
++#define FILE_DEVICE_NETWORK_BROWSER 0x00000013
++#define FILE_DEVICE_NETWORK_FILE_SYSTEM 0x00000014
++#define FILE_DEVICE_NULL 0x00000015
++#define FILE_DEVICE_PARALLEL_PORT 0x00000016
++#define FILE_DEVICE_PHYSICAL_NETCARD 0x00000017
++#define FILE_DEVICE_PRINTER 0x00000018
++#define FILE_DEVICE_SCANNER 0x00000019
++#define FILE_DEVICE_SERIAL_MOUSE_PORT 0x0000001a
++#define FILE_DEVICE_SERIAL_PORT 0x0000001b
++#define FILE_DEVICE_SCREEN 0x0000001c
++#define FILE_DEVICE_SOUND 0x0000001d
++#define FILE_DEVICE_STREAMS 0x0000001e
++#define FILE_DEVICE_TAPE 0x0000001f
++#define FILE_DEVICE_TAPE_FILE_SYSTEM 0x00000020
++#define FILE_DEVICE_TRANSPORT 0x00000021
++#define FILE_DEVICE_UNKNOWN 0x00000022
++#define FILE_DEVICE_VIDEO 0x00000023
++#define FILE_DEVICE_VIRTUAL_DISK 0x00000024
++#define FILE_DEVICE_WAVE_IN 0x00000025
++#define FILE_DEVICE_WAVE_OUT 0x00000026
++#define FILE_DEVICE_8042_PORT 0x00000027
++#define FILE_DEVICE_NETWORK_REDIRECTOR 0x00000028
++#define FILE_DEVICE_BATTERY 0x00000029
++#define FILE_DEVICE_BUS_EXTENDER 0x0000002a
++#define FILE_DEVICE_MODEM 0x0000002b
++#define FILE_DEVICE_VDM 0x0000002c
++#define FILE_DEVICE_MASS_STORAGE 0x0000002d
++
++#define CTL_CODE( DeviceType, Function, Method, Access ) ( \
++ ((DeviceType) << 16) | ((Access) << 14) | ((Function) << 2) | (Method) \
++)
++
++#define METHOD_BUFFERED 0
++#define METHOD_IN_DIRECT 1
++#define METHOD_OUT_DIRECT 2
++#define METHOD_NEITHER 3
++
++#define FILE_ANY_ACCESS 0
++#define FILE_READ_ACCESS ( 0x0001 )
++#define FILE_WRITE_ACCESS ( 0x0002 )
++
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/include4/pdumpdefs.h
+@@ -0,0 +1,99 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined (__PDUMPDEFS_H__)
++#define __PDUMPDEFS_H__
++
++typedef enum _PDUMP_PIXEL_FORMAT_
++{
++ PVRSRV_PDUMP_PIXEL_FORMAT_UNSUPPORTED = 0,
++ PVRSRV_PDUMP_PIXEL_FORMAT_RGB8 = 1,
++ PVRSRV_PDUMP_PIXEL_FORMAT_RGB332 = 2,
++ PVRSRV_PDUMP_PIXEL_FORMAT_KRGB555 = 3,
++ PVRSRV_PDUMP_PIXEL_FORMAT_RGB565 = 4,
++ PVRSRV_PDUMP_PIXEL_FORMAT_ARGB4444 = 5,
++ PVRSRV_PDUMP_PIXEL_FORMAT_ARGB1555 = 6,
++ PVRSRV_PDUMP_PIXEL_FORMAT_RGB888 = 7,
++ PVRSRV_PDUMP_PIXEL_FORMAT_ARGB8888 = 8,
++ PVRSRV_PDUMP_PIXEL_FORMAT_YUV8 = 9,
++ PVRSRV_PDUMP_PIXEL_FORMAT_AYUV4444 = 10,
++ PVRSRV_PDUMP_PIXEL_FORMAT_VY0UY1_8888 = 11,
++ PVRSRV_PDUMP_PIXEL_FORMAT_UY0VY1_8888 = 12,
++ PVRSRV_PDUMP_PIXEL_FORMAT_Y0UY1V_8888 = 13,
++ PVRSRV_PDUMP_PIXEL_FORMAT_Y0VY1U_8888 = 14,
++ PVRSRV_PDUMP_PIXEL_FORMAT_YUV888 = 15,
++ PVRSRV_PDUMP_PIXEL_FORMAT_UYVY10101010 = 16,
++ PVRSRV_PDUMP_PIXEL_FORMAT_VYAUYA8888 = 17,
++ PVRSRV_PDUMP_PIXEL_FORMAT_AYUV8888 = 18,
++ PVRSRV_PDUMP_PIXEL_FORMAT_AYUV2101010 = 19,
++ PVRSRV_PDUMP_PIXEL_FORMAT_YUV101010 = 20,
++ PVRSRV_PDUMP_PIXEL_FORMAT_PL12Y8 = 21,
++ PVRSRV_PDUMP_PIXEL_FORMAT_YUV_IMC2 = 22,
++ PVRSRV_PDUMP_PIXEL_FORMAT_YUV_YV12 = 23,
++ PVRSRV_PDUMP_PIXEL_FORMAT_YUV_PL8 = 24,
++ PVRSRV_PDUMP_PIXEL_FORMAT_YUV_PL12 = 25,
++ PVRSRV_PDUMP_PIXEL_FORMAT_422PL12YUV8 = 26,
++ PVRSRV_PDUMP_PIXEL_FORMAT_420PL12YUV8 = 27,
++ PVRSRV_PDUMP_PIXEL_FORMAT_PL12Y10 = 28,
++ PVRSRV_PDUMP_PIXEL_FORMAT_422PL12YUV10 = 29,
++ PVRSRV_PDUMP_PIXEL_FORMAT_420PL12YUV10 = 30,
++ PVRSRV_PDUMP_PIXEL_FORMAT_ABGR8888 = 31,
++ PVRSRV_PDUMP_PIXEL_FORMAT_BGRA8888 = 32,
++ PVRSRV_PDUMP_PIXEL_FORMAT_ARGB8332 = 33,
++ PVRSRV_PDUMP_PIXEL_FORMAT_RGB555 = 34,
++ PVRSRV_PDUMP_PIXEL_FORMAT_F16 = 35,
++ PVRSRV_PDUMP_PIXEL_FORMAT_F32 = 36,
++ PVRSRV_PDUMP_PIXEL_FORMAT_L16 = 37,
++ PVRSRV_PDUMP_PIXEL_FORMAT_L32 = 38,
++
++ PVRSRV_PDUMP_PIXEL_FORMAT_FORCE_I32 = 0x7fffffff
++
++} PDUMP_PIXEL_FORMAT;
++
++typedef enum _PDUMP_MEM_FORMAT_
++{
++ PVRSRV_PDUMP_MEM_FORMAT_STRIDE = 0,
++ PVRSRV_PDUMP_MEM_FORMAT_RESERVED = 1,
++ PVRSRV_PDUMP_MEM_FORMAT_TILED = 8,
++ PVRSRV_PDUMP_MEM_FORMAT_TWIDDLED = 9,
++ PVRSRV_PDUMP_MEM_FORMAT_HYBRID = 10,
++
++ PVRSRV_PDUMP_MEM_FORMAT_FORCE_I32 = 0x7fffffff
++} PDUMP_MEM_FORMAT;
++
++typedef enum _PDUMP_POLL_OPERATOR
++{
++ PDUMP_POLL_OPERATOR_EQUAL = 0,
++ PDUMP_POLL_OPERATOR_LESS = 1,
++ PDUMP_POLL_OPERATOR_LESSEQUAL = 2,
++ PDUMP_POLL_OPERATOR_GREATER = 3,
++ PDUMP_POLL_OPERATOR_GREATEREQUAL = 4,
++ PDUMP_POLL_OPERATOR_NOTEQUAL = 5,
++} PDUMP_POLL_OPERATOR;
++
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/include4/pvr_debug.h
+@@ -0,0 +1,127 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __PVR_DEBUG_H__
++#define __PVR_DEBUG_H__
++
++
++#include "img_types.h"
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#define PVR_MAX_DEBUG_MESSAGE_LEN (512)
++
++#define DBGPRIV_FATAL 0x01UL
++#define DBGPRIV_ERROR 0x02UL
++#define DBGPRIV_WARNING 0x04UL
++#define DBGPRIV_MESSAGE 0x08UL
++#define DBGPRIV_VERBOSE 0x10UL
++#define DBGPRIV_CALLTRACE 0x20UL
++#define DBGPRIV_ALLOC 0x40UL
++#define DBGPRIV_ALLLEVELS (DBGPRIV_FATAL | DBGPRIV_ERROR | DBGPRIV_WARNING | DBGPRIV_MESSAGE | DBGPRIV_VERBOSE)
++
++
++
++#define PVR_DBG_FATAL DBGPRIV_FATAL,__FILE__, __LINE__
++#define PVR_DBG_ERROR DBGPRIV_ERROR,__FILE__, __LINE__
++#define PVR_DBG_WARNING DBGPRIV_WARNING,__FILE__, __LINE__
++#define PVR_DBG_MESSAGE DBGPRIV_MESSAGE,__FILE__, __LINE__
++#define PVR_DBG_VERBOSE DBGPRIV_VERBOSE,__FILE__, __LINE__
++#define PVR_DBG_CALLTRACE DBGPRIV_CALLTRACE,__FILE__, __LINE__
++#define PVR_DBG_ALLOC DBGPRIV_ALLOC,__FILE__, __LINE__
++
++#if !defined(PVRSRV_NEED_PVR_ASSERT) && defined(DEBUG)
++#define PVRSRV_NEED_PVR_ASSERT
++#endif
++
++#if defined(PVRSRV_NEED_PVR_ASSERT) && !defined(PVRSRV_NEED_PVR_DPF)
++#define PVRSRV_NEED_PVR_DPF
++#endif
++
++#if !defined(PVRSRV_NEED_PVR_TRACE) && (defined(DEBUG) || defined(TIMING))
++#define PVRSRV_NEED_PVR_TRACE
++#endif
++
++
++#if defined(PVRSRV_NEED_PVR_ASSERT)
++
++ #define PVR_ASSERT(EXPR) if (!(EXPR)) PVRSRVDebugAssertFail(__FILE__, __LINE__);
++
++IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVDebugAssertFail(const IMG_CHAR *pszFile,
++ IMG_UINT32 ui32Line);
++
++ #if defined(PVR_DBG_BREAK_ASSERT_FAIL)
++ #define PVR_DBG_BREAK PVRSRVDebugAssertFail("PVR_DBG_BREAK", 0)
++ #else
++ #define PVR_DBG_BREAK
++ #endif
++
++#else
++
++ #define PVR_ASSERT(EXPR)
++ #define PVR_DBG_BREAK
++
++#endif
++
++
++#if defined(PVRSRV_NEED_PVR_DPF)
++
++ #define PVR_DPF(X) PVRSRVDebugPrintf X
++
++IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVDebugPrintf(IMG_UINT32 ui32DebugLevel,
++ const IMG_CHAR *pszFileName,
++ IMG_UINT32 ui32Line,
++ const IMG_CHAR *pszFormat,
++ ...);
++
++#else
++
++ #define PVR_DPF(X)
++
++#endif
++
++
++#if defined(PVRSRV_NEED_PVR_TRACE)
++
++ #define PVR_TRACE(X) PVRSRVTrace X
++
++IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVTrace(const IMG_CHAR* pszFormat, ... );
++
++#else
++
++ #define PVR_TRACE(X)
++
++#endif
++
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/include4/pvrmodule.h
+@@ -0,0 +1,31 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _PVRMODULE_H_
++#define _PVRMODULE_H_
++MODULE_AUTHOR("Imagination Technologies Ltd. <gpl-support@imgtec.com>");
++MODULE_LICENSE("GPL");
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/include4/pvrversion.h
+@@ -0,0 +1,38 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _PVRVERSION_H_
++#define _PVRVERSION_H_
++
++#define PVRVERSION_MAJ 1
++#define PVRVERSION_MIN 5
++#define PVRVERSION_BRANCH 15
++#define PVRVERSION_BUILD 3106
++#define PVRVERSION_STRING "1.5.15.3106"
++#define PVRVERSION_FILE "eurasiacon.pj"
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/include4/regpaths.h
+@@ -0,0 +1,43 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __REGPATHS_H__
++#define __REGPATHS_H__
++
++#define POWERVR_REG_ROOT "Drivers\\Display\\PowerVR"
++#define POWERVR_CHIP_KEY "\\SGX1\\"
++
++#define POWERVR_EURASIA_KEY "PowerVREurasia\\"
++
++#define POWERVR_SERVICES_KEY "\\Registry\\Machine\\System\\CurrentControlSet\\Services\\PowerVR\\"
++
++#define PVRSRV_REGISTRY_ROOT POWERVR_EURASIA_KEY "HWSettings\\PVRSRVKM"
++
++
++#define MAX_REG_STRING_SIZE 128
++
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/include4/services.h
+@@ -0,0 +1,872 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __SERVICES_H__
++#define __SERVICES_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#include "img_defs.h"
++#include "servicesext.h"
++#include "pdumpdefs.h"
++
++
++#define PVRSRV_4K_PAGE_SIZE 4096UL
++
++#define PVRSRV_MAX_CMD_SIZE 1024
++
++#define PVRSRV_MAX_DEVICES 16
++
++#define EVENTOBJNAME_MAXLENGTH (50)
++
++#define PVRSRV_MEM_READ (1UL<<0)
++#define PVRSRV_MEM_WRITE (1UL<<1)
++#define PVRSRV_MEM_CACHE_CONSISTENT (1UL<<2)
++#define PVRSRV_MEM_NO_SYNCOBJ (1UL<<3)
++#define PVRSRV_MEM_INTERLEAVED (1UL<<4)
++#define PVRSRV_MEM_DUMMY (1UL<<5)
++#define PVRSRV_MEM_EDM_PROTECT (1UL<<6)
++#define PVRSRV_MEM_ZERO (1UL<<7)
++#define PVRSRV_MEM_USER_SUPPLIED_DEVVADDR (1UL<<8)
++#define PVRSRV_MEM_RAM_BACKED_ALLOCATION (1UL<<9)
++#define PVRSRV_MEM_NO_RESMAN (1UL<<10)
++#define PVRSRV_MEM_EXPORTED (1UL<<11)
++
++
++#define PVRSRV_HAP_CACHED (1UL<<12)
++#define PVRSRV_HAP_UNCACHED (1UL<<13)
++#define PVRSRV_HAP_WRITECOMBINE (1UL<<14)
++#define PVRSRV_HAP_CACHETYPE_MASK (PVRSRV_HAP_CACHED|PVRSRV_HAP_UNCACHED|PVRSRV_HAP_WRITECOMBINE)
++#define PVRSRV_HAP_KERNEL_ONLY (1UL<<15)
++#define PVRSRV_HAP_SINGLE_PROCESS (1UL<<16)
++#define PVRSRV_HAP_MULTI_PROCESS (1UL<<17)
++#define PVRSRV_HAP_FROM_EXISTING_PROCESS (1UL<<18)
++#define PVRSRV_HAP_NO_CPU_VIRTUAL (1UL<<19)
++#define PVRSRV_HAP_MAPTYPE_MASK (PVRSRV_HAP_KERNEL_ONLY \
++ |PVRSRV_HAP_SINGLE_PROCESS \
++ |PVRSRV_HAP_MULTI_PROCESS \
++ |PVRSRV_HAP_FROM_EXISTING_PROCESS \
++ |PVRSRV_HAP_NO_CPU_VIRTUAL)
++
++#define PVRSRV_MEM_CACHED PVRSRV_HAP_CACHED
++#define PVRSRV_MEM_UNCACHED PVRSRV_HAP_UNCACHED
++#define PVRSRV_MEM_WRITECOMBINE PVRSRV_HAP_WRITECOMBINE
++
++#define PVRSRV_MEM_BACKINGSTORE_FIELD_SHIFT (24)
++
++#define PVRSRV_MAP_NOUSERVIRTUAL (1UL<<27)
++
++#define PVRSRV_NO_CONTEXT_LOSS 0
++#define PVRSRV_SEVERE_LOSS_OF_CONTEXT 1
++#define PVRSRV_PRE_STATE_CHANGE_MASK 0x80
++
++
++#define PVRSRV_DEFAULT_DEV_COOKIE (1)
++
++
++#define PVRSRV_MISC_INFO_TIMER_PRESENT (1UL<<0)
++#define PVRSRV_MISC_INFO_CLOCKGATE_PRESENT (1UL<<1)
++#define PVRSRV_MISC_INFO_MEMSTATS_PRESENT (1UL<<2)
++#define PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT (1UL<<3)
++#define PVRSRV_MISC_INFO_DDKVERSION_PRESENT (1UL<<4)
++#define PVRSRV_MISC_INFO_CPUCACHEFLUSH_PRESENT (1UL<<5)
++
++#define PVRSRV_MISC_INFO_RESET_PRESENT (1UL<<31)
++
++#define PVRSRV_PDUMP_MAX_FILENAME_SIZE 20
++#define PVRSRV_PDUMP_MAX_COMMENT_SIZE 200
++
++
++#define PVRSRV_CHANGEDEVMEM_ATTRIBS_CACHECOHERENT 0x00000001
++
++#define PVRSRV_MAPEXTMEMORY_FLAGS_ALTERNATEVA 0x00000001
++#define PVRSRV_MAPEXTMEMORY_FLAGS_PHYSCONTIG 0x00000002
++
++#define PVRSRV_MODIFYSYNCOPS_FLAGS_WO_INC 0x00000001
++#define PVRSRV_MODIFYSYNCOPS_FLAGS_RO_INC 0x00000002
++
++typedef enum _PVRSRV_DEVICE_TYPE_
++{
++ PVRSRV_DEVICE_TYPE_UNKNOWN = 0 ,
++ PVRSRV_DEVICE_TYPE_MBX1 = 1 ,
++ PVRSRV_DEVICE_TYPE_MBX1_LITE = 2 ,
++
++ PVRSRV_DEVICE_TYPE_M24VA = 3,
++ PVRSRV_DEVICE_TYPE_MVDA2 = 4,
++ PVRSRV_DEVICE_TYPE_MVED1 = 5,
++ PVRSRV_DEVICE_TYPE_MSVDX = 6,
++
++ PVRSRV_DEVICE_TYPE_SGX = 7,
++
++ PVRSRV_DEVICE_TYPE_VGX = 8,
++
++ PVRSRV_DEVICE_TYPE_TOPAZ = 9,
++
++ PVRSRV_DEVICE_TYPE_EXT = 10,
++
++ PVRSRV_DEVICE_TYPE_LAST = 10,
++
++ PVRSRV_DEVICE_TYPE_FORCE_I32 = 0x7fffffff
++
++} PVRSRV_DEVICE_TYPE;
++
++#define HEAP_ID( _dev_ , _dev_heap_idx_ ) ( ((_dev_)<<24) | ((_dev_heap_idx_)&((1<<24)-1)) )
++#define HEAP_IDX( _heap_id_ ) ( (_heap_id_)&((1<<24) - 1 ) )
++#define HEAP_DEV( _heap_id_ ) ( (_heap_id_)>>24 )
++
++#define PVRSRV_UNDEFINED_HEAP_ID (~0LU)
++
++typedef enum
++{
++ IMG_EGL = 0x00000001,
++ IMG_OPENGLES1 = 0x00000002,
++ IMG_OPENGLES2 = 0x00000003,
++ IMG_D3DM = 0x00000004,
++ IMG_SRV_UM = 0x00000005,
++ IMG_OPENVG = 0x00000006,
++ IMG_SRVCLIENT = 0x00000007,
++ IMG_VISTAKMD = 0x00000008,
++ IMG_VISTA3DNODE = 0x00000009,
++ IMG_VISTAMVIDEONODE = 0x0000000A,
++ IMG_VISTAVPBNODE = 0x0000000B,
++ IMG_OPENGL = 0x0000000C,
++ IMG_D3D = 0x0000000D,
++#if defined(SUPPORT_GRAPHICS_HAL)
++ IMG_GRAPHICS_HAL = 0x0000000E
++#endif
++
++} IMG_MODULE_ID;
++
++
++#define APPHINT_MAX_STRING_SIZE 256
++
++typedef enum
++{
++ IMG_STRING_TYPE = 1,
++ IMG_FLOAT_TYPE ,
++ IMG_UINT_TYPE ,
++ IMG_INT_TYPE ,
++ IMG_FLAG_TYPE
++}IMG_DATA_TYPE;
++
++
++typedef struct _PVRSRV_DEV_DATA_ *PPVRSRV_DEV_DATA;
++
++typedef struct _PVRSRV_DEVICE_IDENTIFIER_
++{
++ PVRSRV_DEVICE_TYPE eDeviceType;
++ PVRSRV_DEVICE_CLASS eDeviceClass;
++ IMG_UINT32 ui32DeviceIndex;
++
++} PVRSRV_DEVICE_IDENTIFIER;
++
++
++typedef struct _PVRSRV_CLIENT_DEV_DATA_
++{
++ IMG_UINT32 ui32NumDevices;
++ PVRSRV_DEVICE_IDENTIFIER asDevID[PVRSRV_MAX_DEVICES];
++ PVRSRV_ERROR (*apfnDevConnect[PVRSRV_MAX_DEVICES])(PPVRSRV_DEV_DATA);
++ PVRSRV_ERROR (*apfnDumpTrace[PVRSRV_MAX_DEVICES])(PPVRSRV_DEV_DATA);
++
++} PVRSRV_CLIENT_DEV_DATA;
++
++
++typedef struct _PVRSRV_CONNECTION_
++{
++ IMG_HANDLE hServices;
++ IMG_UINT32 ui32ProcessID;
++ PVRSRV_CLIENT_DEV_DATA sClientDevData;
++}PVRSRV_CONNECTION;
++
++
++typedef struct _PVRSRV_DEV_DATA_
++{
++ PVRSRV_CONNECTION sConnection;
++ IMG_HANDLE hDevCookie;
++
++} PVRSRV_DEV_DATA;
++
++typedef struct _PVRSRV_MEMUPDATE_
++{
++ IMG_UINT32 ui32UpdateAddr;
++ IMG_UINT32 ui32UpdateVal;
++} PVRSRV_MEMUPDATE;
++
++typedef struct _PVRSRV_HWREG_
++{
++ IMG_UINT32 ui32RegAddr;
++ IMG_UINT32 ui32RegVal;
++} PVRSRV_HWREG;
++
++typedef struct _PVRSRV_MEMBLK_
++{
++ IMG_DEV_VIRTADDR sDevVirtAddr;
++ IMG_HANDLE hOSMemHandle;
++ IMG_HANDLE hOSWrapMem;
++ IMG_HANDLE hBuffer;
++ IMG_HANDLE hResItem;
++ IMG_SYS_PHYADDR *psIntSysPAddr;
++
++} PVRSRV_MEMBLK;
++
++typedef struct _PVRSRV_KERNEL_MEM_INFO_ *PPVRSRV_KERNEL_MEM_INFO;
++
++typedef struct _PVRSRV_CLIENT_MEM_INFO_
++{
++
++ IMG_PVOID pvLinAddr;
++
++
++ IMG_PVOID pvLinAddrKM;
++
++
++ IMG_DEV_VIRTADDR sDevVAddr;
++
++
++
++
++
++
++ IMG_CPU_PHYADDR sCpuPAddr;
++
++
++ IMG_UINT32 ui32Flags;
++
++
++
++
++ IMG_UINT32 ui32ClientFlags;
++
++
++ IMG_SIZE_T ui32AllocSize;
++
++
++
++ struct _PVRSRV_CLIENT_SYNC_INFO_ *psClientSyncInfo;
++
++
++ IMG_HANDLE hMappingInfo;
++
++
++ IMG_HANDLE hKernelMemInfo;
++
++
++ IMG_HANDLE hResItem;
++
++#if defined(SUPPORT_MEMINFO_IDS)
++ #if !defined(USE_CODE)
++
++ IMG_UINT64 ui64Stamp;
++ #else
++ IMG_UINT32 dummy1;
++ IMG_UINT32 dummy2;
++ #endif
++#endif
++
++
++
++
++ struct _PVRSRV_CLIENT_MEM_INFO_ *psNext;
++
++} PVRSRV_CLIENT_MEM_INFO, *PPVRSRV_CLIENT_MEM_INFO;
++
++
++#define PVRSRV_MAX_CLIENT_HEAPS (32)
++typedef struct _PVRSRV_HEAP_INFO_
++{
++ IMG_UINT32 ui32HeapID;
++ IMG_HANDLE hDevMemHeap;
++ IMG_DEV_VIRTADDR sDevVAddrBase;
++ IMG_UINT32 ui32HeapByteSize;
++ IMG_UINT32 ui32Attribs;
++}PVRSRV_HEAP_INFO;
++
++
++
++
++typedef struct _PVRSRV_EVENTOBJECT_
++{
++
++ IMG_CHAR szName[EVENTOBJNAME_MAXLENGTH];
++
++ IMG_HANDLE hOSEventKM;
++
++} PVRSRV_EVENTOBJECT;
++
++typedef struct _PVRSRV_MISC_INFO_
++{
++ IMG_UINT32 ui32StateRequest;
++ IMG_UINT32 ui32StatePresent;
++
++
++ IMG_VOID *pvSOCTimerRegisterKM;
++ IMG_VOID *pvSOCTimerRegisterUM;
++ IMG_HANDLE hSOCTimerRegisterOSMemHandle;
++ IMG_HANDLE hSOCTimerRegisterMappingInfo;
++
++
++ IMG_VOID *pvSOCClockGateRegs;
++ IMG_UINT32 ui32SOCClockGateRegsSize;
++
++
++ IMG_CHAR *pszMemoryStr;
++ IMG_UINT32 ui32MemoryStrLen;
++
++
++ PVRSRV_EVENTOBJECT sGlobalEventObject;
++ IMG_HANDLE hOSGlobalEvent;
++
++
++ IMG_UINT32 aui32DDKVersion[4];
++
++
++
++ IMG_BOOL bCPUCacheFlushAll;
++
++ IMG_BOOL bDeferCPUCacheFlush;
++
++ IMG_PVOID pvRangeAddrStart;
++
++ IMG_PVOID pvRangeAddrEnd;
++
++} PVRSRV_MISC_INFO;
++
++
++typedef enum _PVRSRV_CLIENT_EVENT_
++{
++ PVRSRV_CLIENT_EVENT_HWTIMEOUT = 0,
++} PVRSRV_CLIENT_EVENT;
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVClientEvent(IMG_CONST PVRSRV_CLIENT_EVENT eEvent,
++ PVRSRV_DEV_DATA *psDevData,
++ IMG_PVOID pvData);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVConnect(PVRSRV_CONNECTION *psConnection);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDisconnect(PVRSRV_CONNECTION *psConnection);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVEnumerateDevices(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_UINT32 *puiNumDevices,
++ PVRSRV_DEVICE_IDENTIFIER *puiDevIDs);
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVAcquireDeviceData(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_UINT32 uiDevIndex,
++ PVRSRV_DEV_DATA *psDevData,
++ PVRSRV_DEVICE_TYPE eDeviceType);
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetMiscInfo (IMG_CONST PVRSRV_CONNECTION *psConnection, PVRSRV_MISC_INFO *psMiscInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVReleaseMiscInfo (IMG_CONST PVRSRV_CONNECTION *psConnection, PVRSRV_MISC_INFO *psMiscInfo);
++
++#if 1
++IMG_IMPORT
++IMG_UINT32 ReadHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset);
++
++IMG_IMPORT
++IMG_VOID WriteHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value);
++
++IMG_IMPORT IMG_VOID WriteHWRegs(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Count, PVRSRV_HWREG *psHWRegs);
++#endif
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVPollForValue ( const PVRSRV_CONNECTION *psConnection,
++ IMG_HANDLE hOSEvent,
++ volatile IMG_UINT32 *pui32LinMemAddr,
++ IMG_UINT32 ui32Value,
++ IMG_UINT32 ui32Mask,
++ IMG_UINT32 ui32Waitus,
++ IMG_UINT32 ui32Tries);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateDeviceMemContext(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ IMG_HANDLE *phDevMemContext,
++ IMG_UINT32 *pui32SharedHeapCount,
++ PVRSRV_HEAP_INFO *psHeapInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyDeviceMemContext(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ IMG_HANDLE hDevMemContext);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDeviceMemHeapInfo(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ IMG_HANDLE hDevMemContext,
++ IMG_UINT32 *pui32SharedHeapCount,
++ PVRSRV_HEAP_INFO *psHeapInfo);
++
++#if defined(PVRSRV_LOG_MEMORY_ALLOCS)
++ #define PVRSRVAllocDeviceMem_log(psDevData, hDevMemHeap, ui32Attribs, ui32Size, ui32Alignment, ppsMemInfo, logStr) \
++ (PVR_TRACE(("PVRSRVAllocDeviceMem(" #psDevData "," #hDevMemHeap "," #ui32Attribs "," #ui32Size "," #ui32Alignment "," #ppsMemInfo ")" \
++ ": " logStr " (size = 0x%lx)", ui32Size)), \
++ PVRSRVAllocDeviceMem(psDevData, hDevMemHeap, ui32Attribs, ui32Size, ui32Alignment, ppsMemInfo))
++#else
++ #define PVRSRVAllocDeviceMem_log(psDevData, hDevMemHeap, ui32Attribs, ui32Size, ui32Alignment, ppsMemInfo, logStr) \
++ PVRSRVAllocDeviceMem(psDevData, hDevMemHeap, ui32Attribs, ui32Size, ui32Alignment, ppsMemInfo)
++#endif
++
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVAllocDeviceMem(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ IMG_HANDLE hDevMemHeap,
++ IMG_UINT32 ui32Attribs,
++ IMG_SIZE_T ui32Size,
++ IMG_SIZE_T ui32Alignment,
++ PVRSRV_CLIENT_MEM_INFO **ppsMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVFreeDeviceMem(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ PVRSRV_CLIENT_MEM_INFO *psMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVExportDeviceMem(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ PVRSRV_CLIENT_MEM_INFO *psMemInfo,
++ IMG_HANDLE *phMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVReserveDeviceVirtualMem(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ IMG_HANDLE hDevMemHeap,
++ IMG_DEV_VIRTADDR *psDevVAddr,
++ IMG_SIZE_T ui32Size,
++ IMG_SIZE_T ui32Alignment,
++ PVRSRV_CLIENT_MEM_INFO **ppsMemInfo);
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVFreeDeviceVirtualMem(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ PVRSRV_CLIENT_MEM_INFO *psMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVMapDeviceMemory (IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ IMG_HANDLE hKernelMemInfo,
++ IMG_HANDLE hDstDevMemHeap,
++ PVRSRV_CLIENT_MEM_INFO **ppsDstMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapDeviceMemory (IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ PVRSRV_CLIENT_MEM_INFO *psMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVMapExtMemory (IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ PVRSRV_CLIENT_MEM_INFO *psMemInfo,
++ IMG_SYS_PHYADDR *psSysPAddr,
++ IMG_UINT32 ui32Flags);
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapExtMemory (IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ PVRSRV_CLIENT_MEM_INFO *psMemInfo,
++ IMG_UINT32 ui32Flags);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVWrapExtMemory2(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ IMG_HANDLE hDevMemContext,
++ IMG_SIZE_T ui32ByteSize,
++ IMG_SIZE_T ui32PageOffset,
++ IMG_BOOL bPhysContig,
++ IMG_SYS_PHYADDR *psSysPAddr,
++ IMG_VOID *pvLinAddr,
++ IMG_UINT32 ui32Flags,
++ PVRSRV_CLIENT_MEM_INFO **ppsMemInfo);
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVWrapExtMemory(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ IMG_HANDLE hDevMemContext,
++ IMG_SIZE_T ui32ByteSize,
++ IMG_SIZE_T ui32PageOffset,
++ IMG_BOOL bPhysContig,
++ IMG_SYS_PHYADDR *psSysPAddr,
++ IMG_VOID *pvLinAddr,
++ PVRSRV_CLIENT_MEM_INFO **ppsMemInfo);
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnwrapExtMemory (IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ PVRSRV_CLIENT_MEM_INFO *psMemInfo);
++
++PVRSRV_ERROR PVRSRVChangeDeviceMemoryAttributes(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ PVRSRV_CLIENT_MEM_INFO *psClientMemInfo,
++ IMG_UINT32 ui32Attribs);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVMapDeviceClassMemory (IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ IMG_HANDLE hDevMemContext,
++ IMG_HANDLE hDeviceClassBuffer,
++ PVRSRV_CLIENT_MEM_INFO **ppsMemInfo);
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapDeviceClassMemory (IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ PVRSRV_CLIENT_MEM_INFO *psMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVMapPhysToUserSpace(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ IMG_SYS_PHYADDR sSysPhysAddr,
++ IMG_UINT32 uiSizeInBytes,
++ IMG_PVOID *ppvUserAddr,
++ IMG_UINT32 *puiActualSize,
++ IMG_PVOID *ppvProcess);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapPhysToUserSpace(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ IMG_PVOID pvUserAddr,
++ IMG_PVOID pvProcess);
++
++typedef enum _PVRSRV_SYNCVAL_MODE_
++{
++ PVRSRV_SYNCVAL_READ = IMG_TRUE,
++ PVRSRV_SYNCVAL_WRITE = IMG_FALSE,
++
++} PVRSRV_SYNCVAL_MODE, *PPVRSRV_SYNCVAL_MODE;
++
++typedef IMG_UINT32 PVRSRV_SYNCVAL;
++
++IMG_IMPORT PVRSRV_ERROR PVRSRVWaitForOpsComplete(PPVRSRV_CLIENT_MEM_INFO psMemInfo,
++ PVRSRV_SYNCVAL_MODE eMode, PVRSRV_SYNCVAL OpRequired);
++
++IMG_IMPORT PVRSRV_ERROR PVRSRVWaitForAllOpsComplete(PPVRSRV_CLIENT_MEM_INFO psMemInfo,
++ PVRSRV_SYNCVAL_MODE eMode);
++
++IMG_IMPORT IMG_BOOL PVRSRVTestOpsComplete(PPVRSRV_CLIENT_MEM_INFO psMemInfo,
++ PVRSRV_SYNCVAL_MODE eMode, PVRSRV_SYNCVAL OpRequired);
++
++IMG_IMPORT IMG_BOOL PVRSRVTestAllOpsComplete(PPVRSRV_CLIENT_MEM_INFO psMemInfo,
++ PVRSRV_SYNCVAL_MODE eMode);
++
++IMG_IMPORT IMG_BOOL PVRSRVTestOpsNotComplete(PPVRSRV_CLIENT_MEM_INFO psMemInfo,
++ PVRSRV_SYNCVAL_MODE eMode, PVRSRV_SYNCVAL OpRequired);
++
++IMG_IMPORT IMG_BOOL PVRSRVTestAllOpsNotComplete(PPVRSRV_CLIENT_MEM_INFO psMemInfo,
++ PVRSRV_SYNCVAL_MODE eMode);
++
++IMG_IMPORT PVRSRV_SYNCVAL PVRSRVGetPendingOpSyncVal(PPVRSRV_CLIENT_MEM_INFO psMemInfo,
++ PVRSRV_SYNCVAL_MODE eMode);
++
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVEnumerateDeviceClass(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ PVRSRV_DEVICE_CLASS DeviceClass,
++ IMG_UINT32 *pui32DevCount,
++ IMG_UINT32 *pui32DevID);
++
++IMG_IMPORT
++IMG_HANDLE IMG_CALLCONV PVRSRVOpenDCDevice(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ IMG_UINT32 ui32DeviceID);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVCloseDCDevice(IMG_CONST PVRSRV_CONNECTION *psConnection, IMG_HANDLE hDevice);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVEnumDCFormats (IMG_HANDLE hDevice,
++ IMG_UINT32 *pui32Count,
++ DISPLAY_FORMAT *psFormat);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVEnumDCDims (IMG_HANDLE hDevice,
++ IMG_UINT32 *pui32Count,
++ DISPLAY_FORMAT *psFormat,
++ DISPLAY_DIMS *psDims);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDCSystemBuffer(IMG_HANDLE hDevice,
++ IMG_HANDLE *phBuffer);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDCInfo(IMG_HANDLE hDevice,
++ DISPLAY_INFO* psDisplayInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateDCSwapChain (IMG_HANDLE hDevice,
++ IMG_UINT32 ui32Flags,
++ DISPLAY_SURF_ATTRIBUTES *psDstSurfAttrib,
++ DISPLAY_SURF_ATTRIBUTES *psSrcSurfAttrib,
++ IMG_UINT32 ui32BufferCount,
++ IMG_UINT32 ui32OEMFlags,
++ IMG_UINT32 *pui32SwapChainID,
++ IMG_HANDLE *phSwapChain);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyDCSwapChain (IMG_HANDLE hDevice,
++ IMG_HANDLE hSwapChain);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVSetDCDstRect (IMG_HANDLE hDevice,
++ IMG_HANDLE hSwapChain,
++ IMG_RECT *psDstRect);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVSetDCSrcRect (IMG_HANDLE hDevice,
++ IMG_HANDLE hSwapChain,
++ IMG_RECT *psSrcRect);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVSetDCDstColourKey (IMG_HANDLE hDevice,
++ IMG_HANDLE hSwapChain,
++ IMG_UINT32 ui32CKColour);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVSetDCSrcColourKey (IMG_HANDLE hDevice,
++ IMG_HANDLE hSwapChain,
++ IMG_UINT32 ui32CKColour);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDCBuffers(IMG_HANDLE hDevice,
++ IMG_HANDLE hSwapChain,
++ IMG_HANDLE *phBuffer);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVSwapToDCBuffer (IMG_HANDLE hDevice,
++ IMG_HANDLE hBuffer,
++ IMG_UINT32 ui32ClipRectCount,
++ IMG_RECT *psClipRect,
++ IMG_UINT32 ui32SwapInterval,
++ IMG_HANDLE hPrivateTag);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVSwapToDCSystem (IMG_HANDLE hDevice,
++ IMG_HANDLE hSwapChain);
++
++
++IMG_IMPORT
++IMG_HANDLE IMG_CALLCONV PVRSRVOpenBCDevice(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ IMG_UINT32 ui32DeviceID);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVCloseBCDevice(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_HANDLE hDevice);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetBCBufferInfo(IMG_HANDLE hDevice,
++ BUFFER_INFO *psBuffer);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetBCBuffer(IMG_HANDLE hDevice,
++ IMG_UINT32 ui32BufferIndex,
++ IMG_HANDLE *phBuffer);
++
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpInit(IMG_CONST PVRSRV_CONNECTION *psConnection);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpStartInitPhase(IMG_CONST PVRSRV_CONNECTION *psConnection);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpStopInitPhase(IMG_CONST PVRSRV_CONNECTION *psConnection);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpMemPol(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ PVRSRV_CLIENT_MEM_INFO *psMemInfo,
++ IMG_UINT32 ui32Offset,
++ IMG_UINT32 ui32Value,
++ IMG_UINT32 ui32Mask,
++ IMG_UINT32 ui32Flags);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpSyncPol(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ PVRSRV_CLIENT_SYNC_INFO *psClientSyncInfo,
++ IMG_BOOL bIsRead,
++ IMG_UINT32 ui32Value,
++ IMG_UINT32 ui32Mask);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpMem(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_PVOID pvAltLinAddr,
++ PVRSRV_CLIENT_MEM_INFO *psMemInfo,
++ IMG_UINT32 ui32Offset,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32Flags);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpSync(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_PVOID pvAltLinAddr,
++ PVRSRV_CLIENT_SYNC_INFO *psClientSyncInfo,
++ IMG_UINT32 ui32Offset,
++ IMG_UINT32 ui32Bytes);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpReg(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_UINT32 ui32RegAddr,
++ IMG_UINT32 ui32RegValue,
++ IMG_UINT32 ui32Flags);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpRegPolWithFlags(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_UINT32 ui32RegAddr,
++ IMG_UINT32 ui32RegValue,
++ IMG_UINT32 ui32Mask,
++ IMG_UINT32 ui32Flags);
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpRegPol(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_UINT32 ui32RegAddr,
++ IMG_UINT32 ui32RegValue,
++ IMG_UINT32 ui32Mask);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpPDReg(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_UINT32 ui32RegAddr,
++ IMG_UINT32 ui32RegValue);
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpPDDevPAddr(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ PVRSRV_CLIENT_MEM_INFO *psMemInfo,
++ IMG_UINT32 ui32Offset,
++ IMG_DEV_PHYADDR sPDDevPAddr);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpMemPages(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_HANDLE hKernelMemInfo,
++ IMG_DEV_PHYADDR *pPages,
++ IMG_UINT32 ui32NumPages,
++ IMG_DEV_VIRTADDR sDevAddr,
++ IMG_UINT32 ui32Start,
++ IMG_UINT32 ui32Length,
++ IMG_BOOL bContinuous);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpSetFrame(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_UINT32 ui32Frame);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpComment(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_CONST IMG_CHAR *pszComment,
++ IMG_BOOL bContinuous);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpCommentf(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_BOOL bContinuous,
++ IMG_CONST IMG_CHAR *pszFormat, ...);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpCommentWithFlagsf(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_UINT32 ui32Flags,
++ IMG_CONST IMG_CHAR *pszFormat, ...);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpDriverInfo(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_CHAR *pszString,
++ IMG_BOOL bContinuous);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpIsCapturing(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_BOOL *pbIsCapturing);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpBitmap(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_CHAR *pszFileName,
++ IMG_UINT32 ui32FileOffset,
++ IMG_UINT32 ui32Width,
++ IMG_UINT32 ui32Height,
++ IMG_UINT32 ui32StrideInBytes,
++ IMG_DEV_VIRTADDR sDevBaseAddr,
++ IMG_UINT32 ui32Size,
++ PDUMP_PIXEL_FORMAT ePixelFormat,
++ PDUMP_MEM_FORMAT eMemFormat,
++ IMG_UINT32 ui32PDumpFlags);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpRegRead(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_CONST IMG_CHAR *pszFileName,
++ IMG_UINT32 ui32FileOffset,
++ IMG_UINT32 ui32Address,
++ IMG_UINT32 ui32Size,
++ IMG_UINT32 ui32PDumpFlags);
++
++
++IMG_IMPORT
++IMG_BOOL IMG_CALLCONV PVRSRVPDumpIsCapturingTest(IMG_CONST PVRSRV_CONNECTION *psConnection);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpCycleCountRegRead(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_UINT32 ui32RegOffset,
++ IMG_BOOL bLastFrame);
++
++IMG_IMPORT IMG_HANDLE PVRSRVLoadLibrary(const IMG_CHAR *pszLibraryName);
++IMG_IMPORT PVRSRV_ERROR PVRSRVUnloadLibrary(IMG_HANDLE hExtDrv);
++IMG_IMPORT PVRSRV_ERROR PVRSRVGetLibFuncAddr(IMG_HANDLE hExtDrv, const IMG_CHAR *pszFunctionName, IMG_VOID **ppvFuncAddr);
++
++IMG_IMPORT IMG_UINT32 PVRSRVClockus (void);
++IMG_IMPORT IMG_VOID PVRSRVWaitus (IMG_UINT32 ui32Timeus);
++IMG_IMPORT IMG_VOID PVRSRVReleaseThreadQuanta (void);
++IMG_IMPORT IMG_UINT32 IMG_CALLCONV PVRSRVGetCurrentProcessID(void);
++IMG_IMPORT IMG_CHAR * IMG_CALLCONV PVRSRVSetLocale(const IMG_CHAR *pszLocale);
++
++
++
++
++
++IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVCreateAppHintState(IMG_MODULE_ID eModuleID,
++ const IMG_CHAR *pszAppName,
++ IMG_VOID **ppvState);
++IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVFreeAppHintState(IMG_MODULE_ID eModuleID,
++ IMG_VOID *pvHintState);
++
++IMG_IMPORT IMG_BOOL IMG_CALLCONV PVRSRVGetAppHint(IMG_VOID *pvHintState,
++ const IMG_CHAR *pszHintName,
++ IMG_DATA_TYPE eDataType,
++ const IMG_VOID *pvDefault,
++ IMG_VOID *pvReturn);
++
++IMG_IMPORT IMG_PVOID IMG_CALLCONV PVRSRVAllocUserModeMem (IMG_SIZE_T ui32Size);
++IMG_IMPORT IMG_PVOID IMG_CALLCONV PVRSRVCallocUserModeMem (IMG_SIZE_T ui32Size);
++IMG_IMPORT IMG_PVOID IMG_CALLCONV PVRSRVReallocUserModeMem (IMG_PVOID pvBase, IMG_SIZE_T uNewSize);
++IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVFreeUserModeMem (IMG_PVOID pvMem);
++IMG_IMPORT IMG_VOID PVRSRVMemCopy(IMG_VOID *pvDst, const IMG_VOID *pvSrc, IMG_SIZE_T ui32Size);
++IMG_IMPORT IMG_VOID PVRSRVMemSet(IMG_VOID *pvDest, IMG_UINT8 ui8Value, IMG_SIZE_T ui32Size);
++
++struct _PVRSRV_MUTEX_OPAQUE_STRUCT_;
++typedef struct _PVRSRV_MUTEX_OPAQUE_STRUCT_ *PVRSRV_MUTEX_HANDLE;
++
++IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateMutex(PVRSRV_MUTEX_HANDLE *phMutex);
++IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyMutex(PVRSRV_MUTEX_HANDLE hMutex);
++IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVLockMutex(PVRSRV_MUTEX_HANDLE hMutex);
++IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVUnlockMutex(PVRSRV_MUTEX_HANDLE hMutex);
++
++#if (defined(DEBUG) && defined(__linux__))
++IMG_PVOID PVRSRVAllocUserModeMemTracking(IMG_SIZE_T ui32Size, IMG_CHAR *pszFileName, IMG_UINT32 ui32LineNumber);
++IMG_PVOID PVRSRVCallocUserModeMemTracking(IMG_SIZE_T ui32Size, IMG_CHAR *pszFileName, IMG_UINT32 ui32LineNumber);
++IMG_VOID PVRSRVFreeUserModeMemTracking(IMG_VOID *pvMem);
++IMG_PVOID PVRSRVReallocUserModeMemTracking(IMG_VOID *pvMem, IMG_SIZE_T ui32NewSize, IMG_CHAR *pszFileName, IMG_UINT32 ui32LineNumber);
++#endif
++
++IMG_IMPORT PVRSRV_ERROR PVRSRVEventObjectWait(const PVRSRV_CONNECTION *psConnection,
++ IMG_HANDLE hOSEvent);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVModifyPendingSyncOps(PVRSRV_CONNECTION *psConnection,
++ IMG_HANDLE hKernelSyncInfo,
++ IMG_UINT32 ui32ModifyFlags,
++ IMG_UINT32 *pui32ReadOpsPending,
++ IMG_UINT32 *pui32WriteOpsPending);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVModifyCompleteSyncOps(PVRSRV_CONNECTION *psConnection,
++ IMG_HANDLE hKernelSyncInfo,
++ IMG_UINT32 ui32ModifyFlags);
++
++
++#define TIME_NOT_PASSED_UINT32(a,b,c) ((a - b) < c)
++
++#if defined (__cplusplus)
++}
++#endif
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/include4/servicesext.h
+@@ -0,0 +1,648 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined (__SERVICESEXT_H__)
++#define __SERVICESEXT_H__
++
++#define PVRSRV_LOCKFLG_READONLY (1)
++
++typedef enum _PVRSRV_ERROR_
++{
++ PVRSRV_OK = 0,
++ PVRSRV_ERROR_GENERIC = 1,
++ PVRSRV_ERROR_OUT_OF_MEMORY = 2,
++ PVRSRV_ERROR_TOO_FEW_BUFFERS = 3,
++ PVRSRV_ERROR_SYMBOL_NOT_FOUND = 4,
++ PVRSRV_ERROR_OUT_OF_HSPACE = 5,
++ PVRSRV_ERROR_INVALID_PARAMS = 6,
++ PVRSRV_ERROR_TILE_MAP_FAILED = 7,
++ PVRSRV_ERROR_INIT_FAILURE = 8,
++ PVRSRV_ERROR_CANT_REGISTER_CALLBACK = 9,
++ PVRSRV_ERROR_INVALID_DEVICE = 10,
++ PVRSRV_ERROR_NOT_OWNER = 11,
++ PVRSRV_ERROR_BAD_MAPPING = 12,
++ PVRSRV_ERROR_TIMEOUT = 13,
++ PVRSRV_ERROR_NO_PRIMARY = 14,
++ PVRSRV_ERROR_FLIP_CHAIN_EXISTS = 15,
++ PVRSRV_ERROR_CANNOT_ACQUIRE_SYSDATA = 16,
++ PVRSRV_ERROR_SCENE_INVALID = 17,
++ PVRSRV_ERROR_STREAM_ERROR = 18,
++ PVRSRV_ERROR_INVALID_INTERRUPT = 19,
++ PVRSRV_ERROR_FAILED_DEPENDENCIES = 20,
++ PVRSRV_ERROR_CMD_NOT_PROCESSED = 21,
++ PVRSRV_ERROR_CMD_TOO_BIG = 22,
++ PVRSRV_ERROR_DEVICE_REGISTER_FAILED = 23,
++ PVRSRV_ERROR_FIFO_SPACE = 24,
++ PVRSRV_ERROR_TA_RECOVERY = 25,
++ PVRSRV_ERROR_INDOSORLOWPOWER = 26,
++ PVRSRV_ERROR_TOOMANYBUFFERS = 27,
++ PVRSRV_ERROR_NOT_SUPPORTED = 28,
++ PVRSRV_ERROR_PROCESSING_BLOCKED = 29,
++
++
++ PVRSRV_ERROR_CANNOT_FLUSH_QUEUE = 31,
++ PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE = 32,
++ PVRSRV_ERROR_CANNOT_GET_RENDERDETAILS = 33,
++ PVRSRV_ERROR_RETRY = 34,
++
++ PVRSRV_ERROR_DDK_VERSION_MISMATCH = 35,
++ PVRSRV_ERROR_BUILD_MISMATCH = 36,
++ PVRSRV_ERROR_PDUMP_BUF_OVERFLOW,
++
++ PVRSRV_ERROR_FORCE_I32 = 0x7fffffff
++
++} PVRSRV_ERROR;
++
++
++typedef enum _PVRSRV_DEVICE_CLASS_
++{
++ PVRSRV_DEVICE_CLASS_3D = 0 ,
++ PVRSRV_DEVICE_CLASS_DISPLAY = 1 ,
++ PVRSRV_DEVICE_CLASS_BUFFER = 2 ,
++ PVRSRV_DEVICE_CLASS_VIDEO = 3 ,
++
++ PVRSRV_DEVICE_CLASS_FORCE_I32 = 0x7fffffff
++
++} PVRSRV_DEVICE_CLASS;
++
++
++
++typedef enum _PVRSRV_SYS_POWER_STATE_
++{
++ PVRSRV_SYS_POWER_STATE_Unspecified = -1,
++ PVRSRV_SYS_POWER_STATE_D0 = 0,
++ PVRSRV_SYS_POWER_STATE_D1 = 1,
++ PVRSRV_SYS_POWER_STATE_D2 = 2,
++ PVRSRV_SYS_POWER_STATE_D3 = 3,
++ PVRSRV_SYS_POWER_STATE_D4 = 4,
++
++ PVRSRV_SYS_POWER_STATE_FORCE_I32 = 0x7fffffff
++
++} PVRSRV_SYS_POWER_STATE, *PPVRSRV_SYS_POWER_STATE;
++
++
++typedef enum _PVRSRV_DEV_POWER_STATE_
++{
++ PVRSRV_DEV_POWER_STATE_DEFAULT = -1,
++ PVRSRV_DEV_POWER_STATE_ON = 0,
++ PVRSRV_DEV_POWER_STATE_IDLE = 1,
++ PVRSRV_DEV_POWER_STATE_OFF = 2,
++
++ PVRSRV_DEV_POWER_STATE_FORCE_I32 = 0x7fffffff
++
++} PVRSRV_DEV_POWER_STATE, *PPVRSRV_DEV_POWER_STATE;
++
++
++typedef PVRSRV_ERROR (*PFN_PRE_POWER) (IMG_HANDLE hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++typedef PVRSRV_ERROR (*PFN_POST_POWER) (IMG_HANDLE hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++
++typedef PVRSRV_ERROR (*PFN_PRE_CLOCKSPEED_CHANGE) (IMG_HANDLE hDevHandle,
++ IMG_BOOL bIdleDevice,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++typedef PVRSRV_ERROR (*PFN_POST_CLOCKSPEED_CHANGE) (IMG_HANDLE hDevHandle,
++ IMG_BOOL bIdleDevice,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++
++
++typedef enum _PVRSRV_PIXEL_FORMAT_ {
++
++ PVRSRV_PIXEL_FORMAT_UNKNOWN = 0,
++ PVRSRV_PIXEL_FORMAT_RGB565 = 1,
++ PVRSRV_PIXEL_FORMAT_RGB555 = 2,
++ PVRSRV_PIXEL_FORMAT_RGB888 = 3,
++ PVRSRV_PIXEL_FORMAT_BGR888 = 4,
++ PVRSRV_PIXEL_FORMAT_GREY_SCALE = 8,
++ PVRSRV_PIXEL_FORMAT_PAL12 = 13,
++ PVRSRV_PIXEL_FORMAT_PAL8 = 14,
++ PVRSRV_PIXEL_FORMAT_PAL4 = 15,
++ PVRSRV_PIXEL_FORMAT_PAL2 = 16,
++ PVRSRV_PIXEL_FORMAT_PAL1 = 17,
++ PVRSRV_PIXEL_FORMAT_ARGB1555 = 18,
++ PVRSRV_PIXEL_FORMAT_ARGB4444 = 19,
++ PVRSRV_PIXEL_FORMAT_ARGB8888 = 20,
++ PVRSRV_PIXEL_FORMAT_ABGR8888 = 21,
++ PVRSRV_PIXEL_FORMAT_YV12 = 22,
++ PVRSRV_PIXEL_FORMAT_I420 = 23,
++ PVRSRV_PIXEL_FORMAT_IMC2 = 25,
++ PVRSRV_PIXEL_FORMAT_XRGB8888,
++ PVRSRV_PIXEL_FORMAT_XBGR8888,
++ PVRSRV_PIXEL_FORMAT_BGRA8888,
++ PVRSRV_PIXEL_FORMAT_XRGB4444,
++ PVRSRV_PIXEL_FORMAT_ARGB8332,
++ PVRSRV_PIXEL_FORMAT_A2RGB10,
++ PVRSRV_PIXEL_FORMAT_A2BGR10,
++ PVRSRV_PIXEL_FORMAT_P8,
++ PVRSRV_PIXEL_FORMAT_L8,
++ PVRSRV_PIXEL_FORMAT_A8L8,
++ PVRSRV_PIXEL_FORMAT_A4L4,
++ PVRSRV_PIXEL_FORMAT_L16,
++ PVRSRV_PIXEL_FORMAT_L6V5U5,
++ PVRSRV_PIXEL_FORMAT_V8U8,
++ PVRSRV_PIXEL_FORMAT_V16U16,
++ PVRSRV_PIXEL_FORMAT_QWVU8888,
++ PVRSRV_PIXEL_FORMAT_XLVU8888,
++ PVRSRV_PIXEL_FORMAT_QWVU16,
++ PVRSRV_PIXEL_FORMAT_D16,
++ PVRSRV_PIXEL_FORMAT_D24S8,
++ PVRSRV_PIXEL_FORMAT_D24X8,
++
++
++ PVRSRV_PIXEL_FORMAT_ABGR16,
++ PVRSRV_PIXEL_FORMAT_ABGR16F,
++ PVRSRV_PIXEL_FORMAT_ABGR32,
++ PVRSRV_PIXEL_FORMAT_ABGR32F,
++ PVRSRV_PIXEL_FORMAT_B10GR11,
++ PVRSRV_PIXEL_FORMAT_GR88,
++ PVRSRV_PIXEL_FORMAT_BGR32,
++ PVRSRV_PIXEL_FORMAT_GR32,
++ PVRSRV_PIXEL_FORMAT_E5BGR9,
++
++
++ PVRSRV_PIXEL_FORMAT_DXT1,
++ PVRSRV_PIXEL_FORMAT_DXT2,
++ PVRSRV_PIXEL_FORMAT_DXT3,
++ PVRSRV_PIXEL_FORMAT_DXT4,
++ PVRSRV_PIXEL_FORMAT_DXT5,
++
++
++ PVRSRV_PIXEL_FORMAT_R8G8_B8G8,
++ PVRSRV_PIXEL_FORMAT_G8R8_G8B8,
++
++
++ PVRSRV_PIXEL_FORMAT_NV11,
++ PVRSRV_PIXEL_FORMAT_NV12,
++
++
++ PVRSRV_PIXEL_FORMAT_YUY2,
++ PVRSRV_PIXEL_FORMAT_YUV420,
++ PVRSRV_PIXEL_FORMAT_YUV444,
++ PVRSRV_PIXEL_FORMAT_VUY444,
++ PVRSRV_PIXEL_FORMAT_YUYV,
++ PVRSRV_PIXEL_FORMAT_YVYU,
++ PVRSRV_PIXEL_FORMAT_UYVY,
++ PVRSRV_PIXEL_FORMAT_VYUY,
++
++ PVRSRV_PIXEL_FORMAT_FOURCC_ORG_UYVY,
++ PVRSRV_PIXEL_FORMAT_FOURCC_ORG_YUYV,
++ PVRSRV_PIXEL_FORMAT_FOURCC_ORG_YVYU,
++ PVRSRV_PIXEL_FORMAT_FOURCC_ORG_VYUY,
++ PVRSRV_PIXEL_FORMAT_FOURCC_ORG_AYUV,
++
++
++ PVRSRV_PIXEL_FORMAT_A32B32G32R32,
++ PVRSRV_PIXEL_FORMAT_A32B32G32R32F,
++ PVRSRV_PIXEL_FORMAT_A32B32G32R32_UINT,
++ PVRSRV_PIXEL_FORMAT_A32B32G32R32_SINT,
++
++
++ PVRSRV_PIXEL_FORMAT_B32G32R32,
++ PVRSRV_PIXEL_FORMAT_B32G32R32F,
++ PVRSRV_PIXEL_FORMAT_B32G32R32_UINT,
++ PVRSRV_PIXEL_FORMAT_B32G32R32_SINT,
++
++
++ PVRSRV_PIXEL_FORMAT_G32R32,
++ PVRSRV_PIXEL_FORMAT_G32R32F,
++ PVRSRV_PIXEL_FORMAT_G32R32_UINT,
++ PVRSRV_PIXEL_FORMAT_G32R32_SINT,
++
++
++ PVRSRV_PIXEL_FORMAT_D32F,
++ PVRSRV_PIXEL_FORMAT_R32,
++ PVRSRV_PIXEL_FORMAT_R32F,
++ PVRSRV_PIXEL_FORMAT_R32_UINT,
++ PVRSRV_PIXEL_FORMAT_R32_SINT,
++
++
++ PVRSRV_PIXEL_FORMAT_A16B16G16R16,
++ PVRSRV_PIXEL_FORMAT_A16B16G16R16F,
++ PVRSRV_PIXEL_FORMAT_A16B16G16R16_SINT,
++ PVRSRV_PIXEL_FORMAT_A16B16G16R16_SNORM,
++ PVRSRV_PIXEL_FORMAT_A16B16G16R16_UINT,
++ PVRSRV_PIXEL_FORMAT_A16B16G16R16_UNORM,
++
++
++ PVRSRV_PIXEL_FORMAT_G16R16,
++ PVRSRV_PIXEL_FORMAT_G16R16F,
++ PVRSRV_PIXEL_FORMAT_G16R16_UINT,
++ PVRSRV_PIXEL_FORMAT_G16R16_UNORM,
++ PVRSRV_PIXEL_FORMAT_G16R16_SINT,
++ PVRSRV_PIXEL_FORMAT_G16R16_SNORM,
++
++
++ PVRSRV_PIXEL_FORMAT_R16,
++ PVRSRV_PIXEL_FORMAT_R16F,
++ PVRSRV_PIXEL_FORMAT_R16_UINT,
++ PVRSRV_PIXEL_FORMAT_R16_UNORM,
++ PVRSRV_PIXEL_FORMAT_R16_SINT,
++ PVRSRV_PIXEL_FORMAT_R16_SNORM,
++
++
++ PVRSRV_PIXEL_FORMAT_X8R8G8B8,
++ PVRSRV_PIXEL_FORMAT_X8R8G8B8_UNORM,
++ PVRSRV_PIXEL_FORMAT_X8R8G8B8_UNORM_SRGB,
++
++ PVRSRV_PIXEL_FORMAT_A8R8G8B8,
++ PVRSRV_PIXEL_FORMAT_A8R8G8B8_UNORM,
++ PVRSRV_PIXEL_FORMAT_A8R8G8B8_UNORM_SRGB,
++
++ PVRSRV_PIXEL_FORMAT_A8B8G8R8,
++ PVRSRV_PIXEL_FORMAT_A8B8G8R8_UINT,
++ PVRSRV_PIXEL_FORMAT_A8B8G8R8_UNORM,
++ PVRSRV_PIXEL_FORMAT_A8B8G8R8_UNORM_SRGB,
++ PVRSRV_PIXEL_FORMAT_A8B8G8R8_SINT,
++ PVRSRV_PIXEL_FORMAT_A8B8G8R8_SNORM,
++
++
++ PVRSRV_PIXEL_FORMAT_G8R8,
++ PVRSRV_PIXEL_FORMAT_G8R8_UINT,
++ PVRSRV_PIXEL_FORMAT_G8R8_UNORM,
++ PVRSRV_PIXEL_FORMAT_G8R8_SINT,
++ PVRSRV_PIXEL_FORMAT_G8R8_SNORM,
++
++
++ PVRSRV_PIXEL_FORMAT_A8,
++ PVRSRV_PIXEL_FORMAT_R8,
++ PVRSRV_PIXEL_FORMAT_R8_UINT,
++ PVRSRV_PIXEL_FORMAT_R8_UNORM,
++ PVRSRV_PIXEL_FORMAT_R8_SINT,
++ PVRSRV_PIXEL_FORMAT_R8_SNORM,
++
++
++ PVRSRV_PIXEL_FORMAT_A2B10G10R10,
++ PVRSRV_PIXEL_FORMAT_A2B10G10R10_UNORM,
++ PVRSRV_PIXEL_FORMAT_A2B10G10R10_UINT,
++
++
++ PVRSRV_PIXEL_FORMAT_B10G11R11,
++ PVRSRV_PIXEL_FORMAT_B10G11R11F,
++
++
++ PVRSRV_PIXEL_FORMAT_X24G8R32,
++ PVRSRV_PIXEL_FORMAT_G8R24,
++ PVRSRV_PIXEL_FORMAT_X8R24,
++ PVRSRV_PIXEL_FORMAT_E5B9G9R9,
++ PVRSRV_PIXEL_FORMAT_R1,
++
++ PVRSRV_PIXEL_FORMAT_BC1,
++ PVRSRV_PIXEL_FORMAT_BC1_UNORM,
++ PVRSRV_PIXEL_FORMAT_BC1_SRGB,
++ PVRSRV_PIXEL_FORMAT_BC2,
++ PVRSRV_PIXEL_FORMAT_BC2_UNORM,
++ PVRSRV_PIXEL_FORMAT_BC2_SRGB,
++ PVRSRV_PIXEL_FORMAT_BC3,
++ PVRSRV_PIXEL_FORMAT_BC3_UNORM,
++ PVRSRV_PIXEL_FORMAT_BC3_SRGB,
++ PVRSRV_PIXEL_FORMAT_BC4,
++ PVRSRV_PIXEL_FORMAT_BC4_UNORM,
++ PVRSRV_PIXEL_FORMAT_BC4_SNORM,
++ PVRSRV_PIXEL_FORMAT_BC5,
++ PVRSRV_PIXEL_FORMAT_BC5_UNORM,
++ PVRSRV_PIXEL_FORMAT_BC5_SNORM,
++
++
++ PVRSRV_PIXEL_FORMAT_L_F16,
++ PVRSRV_PIXEL_FORMAT_L_F16_REP,
++ PVRSRV_PIXEL_FORMAT_L_F16_A_F16,
++ PVRSRV_PIXEL_FORMAT_A_F16,
++ PVRSRV_PIXEL_FORMAT_B16G16R16F,
++
++ PVRSRV_PIXEL_FORMAT_L_F32,
++ PVRSRV_PIXEL_FORMAT_A_F32,
++ PVRSRV_PIXEL_FORMAT_L_F32_A_F32,
++
++
++ PVRSRV_PIXEL_FORMAT_PVRTC2,
++ PVRSRV_PIXEL_FORMAT_PVRTC4,
++ PVRSRV_PIXEL_FORMAT_PVRTCII2,
++ PVRSRV_PIXEL_FORMAT_PVRTCII4,
++ PVRSRV_PIXEL_FORMAT_PVRTCIII,
++ PVRSRV_PIXEL_FORMAT_PVRO8,
++ PVRSRV_PIXEL_FORMAT_PVRO88,
++ PVRSRV_PIXEL_FORMAT_PT1,
++ PVRSRV_PIXEL_FORMAT_PT2,
++ PVRSRV_PIXEL_FORMAT_PT4,
++ PVRSRV_PIXEL_FORMAT_PT8,
++ PVRSRV_PIXEL_FORMAT_PTW,
++ PVRSRV_PIXEL_FORMAT_PTB,
++ PVRSRV_PIXEL_FORMAT_MONO8,
++ PVRSRV_PIXEL_FORMAT_MONO16,
++
++
++ PVRSRV_PIXEL_FORMAT_C0_YUYV,
++ PVRSRV_PIXEL_FORMAT_C0_UYVY,
++ PVRSRV_PIXEL_FORMAT_C0_YVYU,
++ PVRSRV_PIXEL_FORMAT_C0_VYUY,
++ PVRSRV_PIXEL_FORMAT_C1_YUYV,
++ PVRSRV_PIXEL_FORMAT_C1_UYVY,
++ PVRSRV_PIXEL_FORMAT_C1_YVYU,
++ PVRSRV_PIXEL_FORMAT_C1_VYUY,
++
++
++ PVRSRV_PIXEL_FORMAT_C0_YUV420_2P_UV,
++ PVRSRV_PIXEL_FORMAT_C0_YUV420_2P_VU,
++ PVRSRV_PIXEL_FORMAT_C0_YUV420_3P,
++ PVRSRV_PIXEL_FORMAT_C1_YUV420_2P_UV,
++ PVRSRV_PIXEL_FORMAT_C1_YUV420_2P_VU,
++ PVRSRV_PIXEL_FORMAT_C1_YUV420_3P,
++
++ PVRSRV_PIXEL_FORMAT_A2B10G10R10F,
++ PVRSRV_PIXEL_FORMAT_B8G8R8_SINT,
++ PVRSRV_PIXEL_FORMAT_PVRF32SIGNMASK,
++
++ PVRSRV_PIXEL_FORMAT_FORCE_I32 = 0x7fffffff,
++} PVRSRV_PIXEL_FORMAT;
++
++typedef enum _PVRSRV_ALPHA_FORMAT_ {
++ PVRSRV_ALPHA_FORMAT_UNKNOWN = 0x00000000,
++ PVRSRV_ALPHA_FORMAT_PRE = 0x00000001,
++ PVRSRV_ALPHA_FORMAT_NONPRE = 0x00000002,
++ PVRSRV_ALPHA_FORMAT_MASK = 0x0000000F,
++} PVRSRV_ALPHA_FORMAT;
++
++typedef enum _PVRSRV_COLOURSPACE_FORMAT_ {
++ PVRSRV_COLOURSPACE_FORMAT_UNKNOWN = 0x00000000,
++ PVRSRV_COLOURSPACE_FORMAT_LINEAR = 0x00010000,
++ PVRSRV_COLOURSPACE_FORMAT_NONLINEAR = 0x00020000,
++ PVRSRV_COLOURSPACE_FORMAT_MASK = 0x000F0000,
++} PVRSRV_COLOURSPACE_FORMAT;
++
++
++typedef enum _PVRSRV_ROTATION_ {
++ PVRSRV_ROTATE_0 = 0,
++ PVRSRV_ROTATE_90 = 1,
++ PVRSRV_ROTATE_180 = 2,
++ PVRSRV_ROTATE_270 = 3,
++ PVRSRV_FLIP_Y
++
++} PVRSRV_ROTATION;
++
++#define PVRSRV_CREATE_SWAPCHAIN_SHARED (1<<0)
++#define PVRSRV_CREATE_SWAPCHAIN_QUERY (1<<1)
++#define PVRSRV_CREATE_SWAPCHAIN_OEMOVERLAY (1<<2)
++
++typedef struct _PVRSRV_SYNC_DATA_
++{
++
++ IMG_UINT32 ui32WriteOpsPending;
++ volatile IMG_UINT32 ui32WriteOpsComplete;
++
++
++ IMG_UINT32 ui32ReadOpsPending;
++ volatile IMG_UINT32 ui32ReadOpsComplete;
++
++
++ IMG_UINT32 ui32LastOpDumpVal;
++ IMG_UINT32 ui32LastReadOpDumpVal;
++
++} PVRSRV_SYNC_DATA;
++
++typedef struct _PVRSRV_CLIENT_SYNC_INFO_
++{
++
++ PVRSRV_SYNC_DATA *psSyncData;
++
++
++
++
++
++ IMG_DEV_VIRTADDR sWriteOpsCompleteDevVAddr;
++
++
++ IMG_DEV_VIRTADDR sReadOpsCompleteDevVAddr;
++
++
++ IMG_HANDLE hMappingInfo;
++
++
++ IMG_HANDLE hKernelSyncInfo;
++
++} PVRSRV_CLIENT_SYNC_INFO, *PPVRSRV_CLIENT_SYNC_INFO;
++
++
++typedef struct PVRSRV_RESOURCE_TAG
++{
++ volatile IMG_UINT32 ui32Lock;
++ IMG_UINT32 ui32ID;
++}PVRSRV_RESOURCE;
++typedef PVRSRV_RESOURCE PVRSRV_RES_HANDLE;
++
++
++typedef IMG_VOID (*PFN_CMD_COMPLETE) (IMG_HANDLE);
++typedef IMG_VOID (**PPFN_CMD_COMPLETE) (IMG_HANDLE);
++
++typedef IMG_BOOL (*PFN_CMD_PROC) (IMG_HANDLE, IMG_UINT32, IMG_VOID*);
++typedef IMG_BOOL (**PPFN_CMD_PROC) (IMG_HANDLE, IMG_UINT32, IMG_VOID*);
++
++
++typedef struct _IMG_RECT_
++{
++ IMG_INT32 x0;
++ IMG_INT32 y0;
++ IMG_INT32 x1;
++ IMG_INT32 y1;
++}IMG_RECT;
++
++typedef struct _IMG_RECT_16_
++{
++ IMG_INT16 x0;
++ IMG_INT16 y0;
++ IMG_INT16 x1;
++ IMG_INT16 y1;
++}IMG_RECT_16;
++
++
++typedef PVRSRV_ERROR (*PFN_GET_BUFFER_ADDR)(IMG_HANDLE,
++ IMG_HANDLE,
++ IMG_SYS_PHYADDR**,
++ IMG_SIZE_T*,
++ IMG_VOID**,
++ IMG_HANDLE*,
++ IMG_BOOL*);
++
++
++typedef struct DISPLAY_DIMS_TAG
++{
++ IMG_UINT32 ui32ByteStride;
++ IMG_UINT32 ui32Width;
++ IMG_UINT32 ui32Height;
++} DISPLAY_DIMS;
++
++
++typedef struct DISPLAY_FORMAT_TAG
++{
++
++ PVRSRV_PIXEL_FORMAT pixelformat;
++} DISPLAY_FORMAT;
++
++typedef struct DISPLAY_SURF_ATTRIBUTES_TAG
++{
++
++ PVRSRV_PIXEL_FORMAT pixelformat;
++
++ DISPLAY_DIMS sDims;
++} DISPLAY_SURF_ATTRIBUTES;
++
++
++typedef struct DISPLAY_MODE_INFO_TAG
++{
++
++ PVRSRV_PIXEL_FORMAT pixelformat;
++
++ DISPLAY_DIMS sDims;
++
++ IMG_UINT32 ui32RefreshHZ;
++
++ IMG_UINT32 ui32OEMFlags;
++} DISPLAY_MODE_INFO;
++
++
++
++#define MAX_DISPLAY_NAME_SIZE (50)
++
++typedef struct DISPLAY_INFO_TAG
++{
++ IMG_UINT32 ui32MaxSwapChains;
++
++ IMG_UINT32 ui32MaxSwapChainBuffers;
++
++ IMG_UINT32 ui32MinSwapInterval;
++
++ IMG_UINT32 ui32MaxSwapInterval;
++
++ IMG_UINT32 ui32PhysicalWidthmm;
++ IMG_UINT32 ui32PhysicalHeightmm;
++
++ IMG_CHAR szDisplayName[MAX_DISPLAY_NAME_SIZE];
++
++#if defined(SUPPORT_HW_CURSOR)
++ IMG_UINT16 ui32CursorWidth;
++ IMG_UINT16 ui32CursorHeight;
++#endif
++
++} DISPLAY_INFO;
++
++typedef struct ACCESS_INFO_TAG
++{
++ IMG_UINT32 ui32Size;
++ IMG_UINT32 ui32FBPhysBaseAddress;
++ IMG_UINT32 ui32FBMemAvailable;
++ IMG_UINT32 ui32SysPhysBaseAddress;
++ IMG_UINT32 ui32SysSize;
++ IMG_UINT32 ui32DevIRQ;
++}ACCESS_INFO;
++
++
++typedef struct PVRSRV_CURSOR_SHAPE_TAG
++{
++ IMG_UINT16 ui16Width;
++ IMG_UINT16 ui16Height;
++ IMG_INT16 i16XHot;
++ IMG_INT16 i16YHot;
++
++
++ IMG_VOID* pvMask;
++ IMG_INT16 i16MaskByteStride;
++
++
++ IMG_VOID* pvColour;
++ IMG_INT16 i16ColourByteStride;
++ PVRSRV_PIXEL_FORMAT eColourPixelFormat;
++} PVRSRV_CURSOR_SHAPE;
++
++#define PVRSRV_SET_CURSOR_VISIBILITY (1<<0)
++#define PVRSRV_SET_CURSOR_POSITION (1<<1)
++#define PVRSRV_SET_CURSOR_SHAPE (1<<2)
++#define PVRSRV_SET_CURSOR_ROTATION (1<<3)
++
++typedef struct PVRSRV_CURSOR_INFO_TAG
++{
++
++ IMG_UINT32 ui32Flags;
++
++
++ IMG_BOOL bVisible;
++
++
++ IMG_INT16 i16XPos;
++ IMG_INT16 i16YPos;
++
++
++ PVRSRV_CURSOR_SHAPE sCursorShape;
++
++
++ IMG_UINT32 ui32Rotation;
++
++} PVRSRV_CURSOR_INFO;
++
++
++typedef struct _PVRSRV_REGISTRY_INFO_
++{
++ IMG_UINT32 ui32DevCookie;
++ IMG_PCHAR pszKey;
++ IMG_PCHAR pszValue;
++ IMG_PCHAR pszBuf;
++ IMG_UINT32 ui32BufSize;
++} PVRSRV_REGISTRY_INFO, *PPVRSRV_REGISTRY_INFO;
++
++
++PVRSRV_ERROR IMG_CALLCONV PVRSRVReadRegistryString (PPVRSRV_REGISTRY_INFO psRegInfo);
++PVRSRV_ERROR IMG_CALLCONV PVRSRVWriteRegistryString (PPVRSRV_REGISTRY_INFO psRegInfo);
++
++
++#define PVRSRV_BC_FLAGS_YUVCSC_CONFORMANT_RANGE (0 << 0)
++#define PVRSRV_BC_FLAGS_YUVCSC_FULL_RANGE (1 << 0)
++
++#define PVRSRV_BC_FLAGS_YUVCSC_BT601 (0 << 1)
++#define PVRSRV_BC_FLAGS_YUVCSC_BT709 (1 << 1)
++
++#define MAX_BUFFER_DEVICE_NAME_SIZE (50)
++
++typedef struct BUFFER_INFO_TAG
++{
++ IMG_UINT32 ui32BufferCount;
++ IMG_UINT32 ui32BufferDeviceID;
++ PVRSRV_PIXEL_FORMAT pixelformat;
++ IMG_UINT32 ui32ByteStride;
++ IMG_UINT32 ui32Width;
++ IMG_UINT32 ui32Height;
++ IMG_UINT32 ui32Flags;
++ IMG_CHAR szDeviceName[MAX_BUFFER_DEVICE_NAME_SIZE];
++} BUFFER_INFO;
++
++typedef enum _OVERLAY_DEINTERLACE_MODE_
++{
++ WEAVE=0x0,
++ BOB_ODD,
++ BOB_EVEN,
++ BOB_EVEN_NONINTERLEAVED
++} OVERLAY_DEINTERLACE_MODE;
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/include4/sgx_options.h
+@@ -0,0 +1,224 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if defined(DEBUG) || defined (INTERNAL_TEST)
++#define DEBUG_SET_OFFSET OPTIONS_BIT0
++#define OPTIONS_BIT0 0x1
++#else
++#define OPTIONS_BIT0 0x0
++#endif
++
++#if defined(PDUMP) || defined (INTERNAL_TEST)
++#define PDUMP_SET_OFFSET OPTIONS_BIT1
++#define OPTIONS_BIT1 (0x1 << 1)
++#else
++#define OPTIONS_BIT1 0x0
++#endif
++
++#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG) || defined (INTERNAL_TEST)
++#define PVRSRV_USSE_EDM_STATUS_DEBUG_SET_OFFSET OPTIONS_BIT2
++#define OPTIONS_BIT2 (0x1 << 2)
++#else
++#define OPTIONS_BIT2 0x0
++#endif
++
++#if defined(SUPPORT_HW_RECOVERY) || defined (INTERNAL_TEST)
++#define SUPPORT_HW_RECOVERY_SET_OFFSET OPTIONS_BIT3
++#define OPTIONS_BIT3 (0x1 << 3)
++#else
++#define OPTIONS_BIT3 0x0
++#endif
++
++
++
++#if defined(PVR_SECURE_HANDLES) || defined (INTERNAL_TEST)
++#define PVR_SECURE_HANDLES_SET_OFFSET OPTIONS_BIT4
++#define OPTIONS_BIT4 (0x1 << 4)
++#else
++#define OPTIONS_BIT4 0x0
++#endif
++
++#if defined(SGX_BYPASS_SYSTEM_CACHE) || defined (INTERNAL_TEST)
++#define SGX_BYPASS_SYSTEM_CACHE_SET_OFFSET OPTIONS_BIT5
++#define OPTIONS_BIT5 (0x1 << 5)
++#else
++#define OPTIONS_BIT5 0x0
++#endif
++
++#if defined(SGX_DMS_AGE_ENABLE) || defined (INTERNAL_TEST)
++#define SGX_DMS_AGE_ENABLE_SET_OFFSET OPTIONS_BIT6
++#define OPTIONS_BIT6 (0x1 << 6)
++#else
++#define OPTIONS_BIT6 0x0
++#endif
++
++#if defined(SGX_FAST_DPM_INIT) || defined (INTERNAL_TEST)
++#define SGX_FAST_DPM_INIT_SET_OFFSET OPTIONS_BIT8
++#define OPTIONS_BIT8 (0x1 << 8)
++#else
++#define OPTIONS_BIT8 0x0
++#endif
++
++#if defined(SGX_FEATURE_DCU) || defined (INTERNAL_TEST)
++#define SGX_FEATURE_DCU_SET_OFFSET OPTIONS_BIT9
++#define OPTIONS_BIT9 (0x1 << 9)
++#else
++#define OPTIONS_BIT9 0x0
++#endif
++
++#if defined(SGX_FEATURE_MP) || defined (INTERNAL_TEST)
++#define SGX_FEATURE_MP_SET_OFFSET OPTIONS_BIT10
++#define OPTIONS_BIT10 (0x1 << 10)
++#else
++#define OPTIONS_BIT10 0x0
++#endif
++
++#if defined(SGX_FEATURE_MULTITHREADED_UKERNEL) || defined (INTERNAL_TEST)
++#define SGX_FEATURE_MULTITHREADED_UKERNEL_SET_OFFSET OPTIONS_BIT11
++#define OPTIONS_BIT11 (0x1 << 11)
++#else
++#define OPTIONS_BIT11 0x0
++#endif
++
++
++
++#if defined(SGX_FEATURE_OVERLAPPED_SPM) || defined (INTERNAL_TEST)
++#define SGX_FEATURE_OVERLAPPED_SPM_SET_OFFSET OPTIONS_BIT12
++#define OPTIONS_BIT12 (0x1 << 12)
++#else
++#define OPTIONS_BIT12 0x0
++#endif
++
++
++#if defined(SGX_FEATURE_SYSTEM_CACHE) || defined (INTERNAL_TEST)
++#define SGX_FEATURE_SYSTEM_CACHE_SET_OFFSET OPTIONS_BIT13
++#define OPTIONS_BIT13 (0x1 << 13)
++#else
++#define OPTIONS_BIT13 0x0
++#endif
++
++#if defined(SGX_SUPPORT_HWPROFILING) || defined (INTERNAL_TEST)
++#define SGX_SUPPORT_HWPROFILING_SET_OFFSET OPTIONS_BIT14
++#define OPTIONS_BIT14 (0x1 << 14)
++#else
++#define OPTIONS_BIT14 0x0
++#endif
++
++
++
++#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT) || defined (INTERNAL_TEST)
++#define SUPPORT_ACTIVE_POWER_MANAGEMENT_SET_OFFSET OPTIONS_BIT15
++#define OPTIONS_BIT15 (0x1 << 15)
++#else
++#define OPTIONS_BIT15 0x0
++#endif
++
++#if defined(SUPPORT_DISPLAYCONTROLLER_TILING) || defined (INTERNAL_TEST)
++#define SUPPORT_DISPLAYCONTROLLER_TILING_SET_OFFSET OPTIONS_BIT16
++#define OPTIONS_BIT16 (0x1 << 16)
++#else
++#define OPTIONS_BIT16 0x0
++#endif
++
++#if defined(SUPPORT_PERCONTEXT_PB) || defined (INTERNAL_TEST)
++#define SUPPORT_PERCONTEXT_PB_SET_OFFSET OPTIONS_BIT17
++#define OPTIONS_BIT17 (0x1 << 17)
++#else
++#define OPTIONS_BIT17 0x0
++#endif
++
++#if defined(SUPPORT_SGX_HWPERF) || defined (INTERNAL_TEST)
++#define SUPPORT_SGX_HWPERF_SET_OFFSET OPTIONS_BIT18
++#define OPTIONS_BIT18 (0x1 << 18)
++#else
++#define OPTIONS_BIT18 0x0
++#endif
++
++
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE) || defined (INTERNAL_TEST)
++#define SUPPORT_SGX_MMU_DUMMY_PAGE_SET_OFFSET OPTIONS_BIT19
++#define OPTIONS_BIT19 (0x1 << 19)
++#else
++#define OPTIONS_BIT19 0x0
++#endif
++
++#if defined(SUPPORT_SGX_PRIORITY_SCHEDULING) || defined (INTERNAL_TEST)
++#define SUPPORT_SGX_PRIORITY_SCHEDULING_SET_OFFSET OPTIONS_BIT20
++#define OPTIONS_BIT20 (0x1 << 20)
++#else
++#define OPTIONS_BIT20 0x0
++#endif
++
++#if defined(SGX_LOW_LATENCY_SCHEDULING) || defined (INTERNAL_TEST)
++#define SUPPORT_SGX_LOW_LATENCY_SCHEDULING_SET_OFFSET OPTIONS_BIT21
++#define OPTIONS_BIT21 (0x1 << 21)
++#else
++#define OPTIONS_BIT21 0x0
++#endif
++
++#if defined(USE_SUPPORT_NO_TA3D_OVERLAP) || defined (INTERNAL_TEST)
++#define USE_SUPPORT_NO_TA3D_OVERLAP_SET_OFFSET OPTIONS_BIT22
++#define OPTIONS_BIT22 (0x1 << 22)
++#else
++#define OPTIONS_BIT22 0x0
++#endif
++
++
++#if defined(SGX_FEATURE_MP) || defined (INTERNAL_TEST)
++#define OPTIONS_HIGHBYTE ((SGX_FEATURE_MP_CORE_COUNT-1) << SGX_FEATURE_MP_CORE_COUNT_SET_OFFSET)
++#define SGX_FEATURE_MP_CORE_COUNT_SET_OFFSET 28UL
++#define SGX_FEATURE_MP_CORE_COUNT_SET_MASK 0xFF
++#else
++#define OPTIONS_HIGHBYTE 0x0
++#endif
++
++
++
++#define SGX_BUILD_OPTIONS \
++ OPTIONS_BIT0 |\
++ OPTIONS_BIT1 |\
++ OPTIONS_BIT2 |\
++ OPTIONS_BIT3 |\
++ OPTIONS_BIT4 |\
++ OPTIONS_BIT5 |\
++ OPTIONS_BIT6 |\
++ OPTIONS_BIT8 |\
++ OPTIONS_BIT9 |\
++ OPTIONS_BIT10 |\
++ OPTIONS_BIT11 |\
++ OPTIONS_BIT12 |\
++ OPTIONS_BIT13 |\
++ OPTIONS_BIT14 |\
++ OPTIONS_BIT15 |\
++ OPTIONS_BIT16 |\
++ OPTIONS_BIT17 |\
++ OPTIONS_BIT18 |\
++ OPTIONS_BIT19 |\
++ OPTIONS_BIT20 |\
++ OPTIONS_BIT21 |\
++ OPTIONS_HIGHBYTE
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/include4/sgxapi_km.h
+@@ -0,0 +1,323 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __SGXAPI_KM_H__
++#define __SGXAPI_KM_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#include "sgxdefs.h"
++
++#if defined(__linux__) && !defined(USE_CODE)
++ #if defined(__KERNEL__)
++ #include <asm/unistd.h>
++ #else
++ #include <unistd.h>
++ #endif
++#endif
++
++#define SGX_UNDEFINED_HEAP_ID (~0LU)
++#define SGX_GENERAL_HEAP_ID 0
++#define SGX_TADATA_HEAP_ID 1
++#define SGX_KERNEL_CODE_HEAP_ID 2
++#define SGX_KERNEL_DATA_HEAP_ID 3
++#define SGX_PIXELSHADER_HEAP_ID 4
++#define SGX_VERTEXSHADER_HEAP_ID 5
++#define SGX_PDSPIXEL_CODEDATA_HEAP_ID 6
++#define SGX_PDSVERTEX_CODEDATA_HEAP_ID 7
++#define SGX_SYNCINFO_HEAP_ID 8
++#define SGX_3DPARAMETERS_HEAP_ID 9
++#if defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP)
++#define SGX_GENERAL_MAPPING_HEAP_ID 10
++#endif
++#if defined(SGX_FEATURE_2D_HARDWARE)
++#define SGX_2D_HEAP_ID 11
++#else
++#if defined(FIX_HW_BRN_26915)
++#define SGX_CGBUFFER_HEAP_ID 12
++#endif
++#endif
++#define SGX_MAX_HEAP_ID 13
++
++
++#define SGX_MAX_TA_STATUS_VALS 32
++#define SGX_MAX_3D_STATUS_VALS 3
++
++#if defined(SUPPORT_SGX_GENERALISED_SYNCOBJECTS)
++#define SGX_MAX_TA_DST_SYNCS 1
++#define SGX_MAX_TA_SRC_SYNCS 1
++#define SGX_MAX_3D_SRC_SYNCS 4
++#else
++#define SGX_MAX_SRC_SYNCS 4
++#endif
++
++#ifdef SUPPORT_SGX_HWPERF
++
++#define PVRSRV_SGX_HWPERF_NUM_COUNTERS 9
++
++#define PVRSRV_SGX_HWPERF_INVALID 0x1
++
++#define PVRSRV_SGX_HWPERF_TRANSFER 0x2
++#define PVRSRV_SGX_HWPERF_TA 0x3
++#define PVRSRV_SGX_HWPERF_3D 0x4
++#define PVRSRV_SGX_HWPERF_2D 0x5
++
++#define PVRSRV_SGX_HWPERF_MK_EVENT 0x101
++#define PVRSRV_SGX_HWPERF_MK_TA 0x102
++#define PVRSRV_SGX_HWPERF_MK_3D 0x103
++#define PVRSRV_SGX_HWPERF_MK_2D 0x104
++
++#define PVRSRV_SGX_HWPERF_TYPE_STARTEND_BIT 28
++#define PVRSRV_SGX_HWPERF_TYPE_OP_MASK ((1UL << PVRSRV_SGX_HWPERF_TYPE_STARTEND_BIT) - 1)
++#define PVRSRV_SGX_HWPERF_TYPE_OP_START (0UL << PVRSRV_SGX_HWPERF_TYPE_STARTEND_BIT)
++#define PVRSRV_SGX_HWPERF_TYPE_OP_END (1Ul << PVRSRV_SGX_HWPERF_TYPE_STARTEND_BIT)
++
++#define PVRSRV_SGX_HWPERF_TYPE_TRANSFER_START (PVRSRV_SGX_HWPERF_TRANSFER | PVRSRV_SGX_HWPERF_TYPE_OP_START)
++#define PVRSRV_SGX_HWPERF_TYPE_TRANSFER_END (PVRSRV_SGX_HWPERF_TRANSFER | PVRSRV_SGX_HWPERF_TYPE_OP_END)
++#define PVRSRV_SGX_HWPERF_TYPE_TA_START (PVRSRV_SGX_HWPERF_TA | PVRSRV_SGX_HWPERF_TYPE_OP_START)
++#define PVRSRV_SGX_HWPERF_TYPE_TA_END (PVRSRV_SGX_HWPERF_TA | PVRSRV_SGX_HWPERF_TYPE_OP_END)
++#define PVRSRV_SGX_HWPERF_TYPE_3D_START (PVRSRV_SGX_HWPERF_3D | PVRSRV_SGX_HWPERF_TYPE_OP_START)
++#define PVRSRV_SGX_HWPERF_TYPE_3D_END (PVRSRV_SGX_HWPERF_3D | PVRSRV_SGX_HWPERF_TYPE_OP_END)
++#define PVRSRV_SGX_HWPERF_TYPE_2D_START (PVRSRV_SGX_HWPERF_2D | PVRSRV_SGX_HWPERF_TYPE_OP_START)
++#define PVRSRV_SGX_HWPERF_TYPE_2D_END (PVRSRV_SGX_HWPERF_2D | PVRSRV_SGX_HWPERF_TYPE_OP_END)
++
++#define PVRSRV_SGX_HWPERF_TYPE_MK_EVENT_START (PVRSRV_SGX_HWPERF_MK_EVENT | PVRSRV_SGX_HWPERF_TYPE_OP_START)
++#define PVRSRV_SGX_HWPERF_TYPE_MK_EVENT_END (PVRSRV_SGX_HWPERF_MK_EVENT | PVRSRV_SGX_HWPERF_TYPE_OP_END)
++#define PVRSRV_SGX_HWPERF_TYPE_MK_TA_START (PVRSRV_SGX_HWPERF_MK_TA | PVRSRV_SGX_HWPERF_TYPE_OP_START)
++#define PVRSRV_SGX_HWPERF_TYPE_MK_TA_END (PVRSRV_SGX_HWPERF_MK_TA | PVRSRV_SGX_HWPERF_TYPE_OP_END)
++#define PVRSRV_SGX_HWPERF_TYPE_MK_3D_START (PVRSRV_SGX_HWPERF_MK_3D | PVRSRV_SGX_HWPERF_TYPE_OP_START)
++#define PVRSRV_SGX_HWPERF_TYPE_MK_3D_END (PVRSRV_SGX_HWPERF_MK_3D | PVRSRV_SGX_HWPERF_TYPE_OP_END)
++#define PVRSRV_SGX_HWPERF_TYPE_MK_2D_START (PVRSRV_SGX_HWPERF_MK_2D | PVRSRV_SGX_HWPERF_TYPE_OP_START)
++#define PVRSRV_SGX_HWPERF_TYPE_MK_2D_END (PVRSRV_SGX_HWPERF_MK_2D | PVRSRV_SGX_HWPERF_TYPE_OP_END)
++
++#define PVRSRV_SGX_HWPERF_OFF (0x0)
++#define PVRSRV_SGX_HWPERF_GRAPHICS_ON (1UL << 0)
++#define PVRSRV_SGX_HWPERF_MK_EXECUTION_ON (1UL << 1)
++
++
++typedef struct _PVRSRV_SGX_HWPERF_CB_ENTRY_
++{
++ IMG_UINT32 ui32FrameNo;
++ IMG_UINT32 ui32Type;
++ IMG_UINT32 ui32Ordinal;
++ IMG_UINT32 ui32Clocksx16;
++ IMG_UINT32 ui32Counters[PVRSRV_SGX_HWPERF_NUM_COUNTERS];
++} PVRSRV_SGX_HWPERF_CB_ENTRY;
++
++
++typedef struct _PVRSRV_SGX_HWPERF_CBDATA_
++{
++ IMG_UINT32 ui32FrameNo;
++ IMG_UINT32 ui32Type;
++ IMG_UINT32 ui32StartTimeWraps;
++ IMG_UINT32 ui32StartTime;
++ IMG_UINT32 ui32EndTimeWraps;
++ IMG_UINT32 ui32EndTime;
++ IMG_UINT32 ui32ClockSpeed;
++ IMG_UINT32 ui32TimeMax;
++} PVRSRV_SGX_HWPERF_CBDATA;
++
++
++typedef struct _SGX_MISC_INFO_HWPERF_RETRIEVE_CB
++{
++ PVRSRV_SGX_HWPERF_CBDATA* psHWPerfData;
++ IMG_UINT32 ui32ArraySize;
++ IMG_UINT32 ui32DataCount;
++ IMG_UINT32 ui32Time;
++} SGX_MISC_INFO_HWPERF_RETRIEVE_CB;
++#endif
++
++
++typedef struct _CTL_STATUS_
++{
++ IMG_DEV_VIRTADDR sStatusDevAddr;
++ IMG_UINT32 ui32StatusValue;
++} CTL_STATUS;
++
++
++typedef enum _SGX_MISC_INFO_REQUEST_
++{
++ SGX_MISC_INFO_REQUEST_CLOCKSPEED = 0,
++ SGX_MISC_INFO_REQUEST_SGXREV,
++ SGX_MISC_INFO_REQUEST_DRIVER_SGXREV,
++#if defined(SUPPORT_SGX_EDM_MEMORY_DEBUG)
++ SGX_MISC_INFO_REQUEST_MEMREAD,
++#endif
++#if defined(SUPPORT_SGX_HWPERF)
++ SGX_MISC_INFO_REQUEST_SET_HWPERF_STATUS,
++ SGX_MISC_INFO_REQUEST_HWPERF_CB_ON,
++ SGX_MISC_INFO_REQUEST_HWPERF_CB_OFF,
++ SGX_MISC_INFO_REQUEST_HWPERF_RETRIEVE_CB,
++#endif
++#if defined(SGX_FEATURE_DATA_BREAKPOINTS)
++ SGX_MISC_INFO_REQUEST_SET_BREAKPOINT,
++#endif
++ SGX_MISC_INFO_DUMP_DEBUG_INFO,
++ SGX_MISC_INFO_PANIC,
++ SGX_MISC_INFO_REQUEST_FORCE_I16 = 0x7fff
++} SGX_MISC_INFO_REQUEST;
++
++
++typedef struct _PVRSRV_SGX_MISCINFO_FEATURES
++{
++ IMG_UINT32 ui32CoreRev;
++ IMG_UINT32 ui32CoreID;
++ IMG_UINT32 ui32DDKVersion;
++ IMG_UINT32 ui32DDKBuild;
++ IMG_UINT32 ui32CoreIdSW;
++ IMG_UINT32 ui32CoreRevSW;
++ IMG_UINT32 ui32BuildOptions;
++#if defined(SUPPORT_SGX_EDM_MEMORY_DEBUG)
++ IMG_UINT32 ui32DeviceMemValue;
++#endif
++} PVRSRV_SGX_MISCINFO_FEATURES;
++
++
++#if defined(SGX_FEATURE_DATA_BREAKPOINTS)
++typedef struct _SGX_BREAKPOINT_INFO
++{
++
++ IMG_BOOL bBPEnable;
++
++
++
++ IMG_UINT32 ui32BPIndex;
++
++ IMG_DEV_VIRTADDR sBPDevVAddr;
++} SGX_BREAKPOINT_INFO;
++#endif
++
++typedef struct _SGX_MISC_INFO_
++{
++ SGX_MISC_INFO_REQUEST eRequest;
++#if defined(SUPPORT_SGX_EDM_MEMORY_DEBUG)
++ IMG_DEV_VIRTADDR sDevVAddr;
++ IMG_HANDLE hDevMemContext;
++#endif
++ union
++ {
++ IMG_UINT32 reserved;
++ PVRSRV_SGX_MISCINFO_FEATURES sSGXFeatures;
++ IMG_UINT32 ui32SGXClockSpeed;
++#if defined(SGX_FEATURE_DATA_BREAKPOINTS)
++ SGX_BREAKPOINT_INFO sSGXBreakpointInfo;
++#endif
++#ifdef SUPPORT_SGX_HWPERF
++ IMG_UINT32 ui32NewHWPerfStatus;
++ SGX_MISC_INFO_HWPERF_RETRIEVE_CB sRetrieveCB;
++#endif
++ } uData;
++} SGX_MISC_INFO;
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++#define PVRSRV_MAX_BLT_SRC_SYNCS 3
++#endif
++
++
++#define SGX_KICKTA_DUMPBITMAP_MAX_NAME_LENGTH 256
++
++typedef struct _SGX_KICKTA_DUMPBITMAP_
++{
++ IMG_DEV_VIRTADDR sDevBaseAddr;
++ IMG_UINT32 ui32Flags;
++ IMG_UINT32 ui32Width;
++ IMG_UINT32 ui32Height;
++ IMG_UINT32 ui32Stride;
++ IMG_UINT32 ui32PDUMPFormat;
++ IMG_UINT32 ui32BytesPP;
++ IMG_CHAR pszName[SGX_KICKTA_DUMPBITMAP_MAX_NAME_LENGTH];
++} SGX_KICKTA_DUMPBITMAP, *PSGX_KICKTA_DUMPBITMAP;
++
++#define PVRSRV_SGX_PDUMP_CONTEXT_MAX_BITMAP_ARRAY_SIZE (16)
++
++typedef struct _PVRSRV_SGX_PDUMP_CONTEXT_
++{
++
++ IMG_UINT32 ui32CacheControl;
++
++} PVRSRV_SGX_PDUMP_CONTEXT;
++
++
++typedef struct _SGX_KICKTA_DUMP_ROFF_
++{
++ IMG_HANDLE hKernelMemInfo;
++ IMG_UINT32 uiAllocIndex;
++ IMG_UINT32 ui32Offset;
++ IMG_UINT32 ui32Value;
++ IMG_PCHAR pszName;
++} SGX_KICKTA_DUMP_ROFF, *PSGX_KICKTA_DUMP_ROFF;
++
++typedef struct _SGX_KICKTA_DUMP_BUFFER_
++{
++ IMG_UINT32 ui32SpaceUsed;
++ IMG_UINT32 ui32Start;
++ IMG_UINT32 ui32End;
++ IMG_UINT32 ui32BufferSize;
++ IMG_UINT32 ui32BackEndLength;
++ IMG_UINT32 uiAllocIndex;
++ IMG_HANDLE hKernelMemInfo;
++ IMG_PVOID pvLinAddr;
++#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
++ IMG_HANDLE hCtrlKernelMemInfo;
++ IMG_DEV_VIRTADDR sCtrlDevVAddr;
++#endif
++ IMG_PCHAR pszName;
++} SGX_KICKTA_DUMP_BUFFER, *PSGX_KICKTA_DUMP_BUFFER;
++
++#ifdef PDUMP
++typedef struct _SGX_KICKTA_PDUMP_
++{
++
++ PSGX_KICKTA_DUMPBITMAP psPDumpBitmapArray;
++ IMG_UINT32 ui32PDumpBitmapSize;
++
++
++ PSGX_KICKTA_DUMP_BUFFER psBufferArray;
++ IMG_UINT32 ui32BufferArraySize;
++
++
++ PSGX_KICKTA_DUMP_ROFF psROffArray;
++ IMG_UINT32 ui32ROffArraySize;
++} SGX_KICKTA_PDUMP, *PSGX_KICKTA_PDUMP;
++#endif
++
++#if defined(TRANSFER_QUEUE)
++#if defined(SGX_FEATURE_2D_HARDWARE)
++#define SGX_MAX_2D_BLIT_CMD_SIZE 26
++#define SGX_MAX_2D_SRC_SYNC_OPS 3
++#endif
++#define SGX_MAX_TRANSFER_STATUS_VALS 2
++#define SGX_MAX_TRANSFER_SYNC_OPS 5
++#endif
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/include4/sgxscript.h
+@@ -0,0 +1,81 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __SGXSCRIPT_H__
++#define __SGXSCRIPT_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#define SGX_MAX_INIT_COMMANDS 64
++#define SGX_MAX_DEINIT_COMMANDS 16
++
++typedef enum _SGX_INIT_OPERATION
++{
++ SGX_INIT_OP_ILLEGAL = 0,
++ SGX_INIT_OP_WRITE_HW_REG,
++#if defined(PDUMP)
++ SGX_INIT_OP_PDUMP_HW_REG,
++#endif
++ SGX_INIT_OP_HALT
++} SGX_INIT_OPERATION;
++
++typedef union _SGX_INIT_COMMAND
++{
++ SGX_INIT_OPERATION eOp;
++ struct {
++ SGX_INIT_OPERATION eOp;
++ IMG_UINT32 ui32Offset;
++ IMG_UINT32 ui32Value;
++ } sWriteHWReg;
++#if defined(PDUMP)
++ struct {
++ SGX_INIT_OPERATION eOp;
++ IMG_UINT32 ui32Offset;
++ IMG_UINT32 ui32Value;
++ } sPDumpHWReg;
++#endif
++#if defined(FIX_HW_BRN_22997) && defined(FIX_HW_BRN_23030) && defined(SGX_FEATURE_HOST_PORT)
++ struct {
++ SGX_INIT_OPERATION eOp;
++ } sWorkaroundBRN22997;
++#endif
++} SGX_INIT_COMMAND;
++
++typedef struct _SGX_INIT_SCRIPTS_
++{
++ SGX_INIT_COMMAND asInitCommandsPart1[SGX_MAX_INIT_COMMANDS];
++ SGX_INIT_COMMAND asInitCommandsPart2[SGX_MAX_INIT_COMMANDS];
++ SGX_INIT_COMMAND asDeinitCommands[SGX_MAX_DEINIT_COMMANDS];
++} SGX_INIT_SCRIPTS;
++
++#if defined(__cplusplus)
++}
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/.gitignore
+@@ -0,0 +1,6 @@
++bin_pc_i686*
++tmp_pc_i686*
++host_pc_i686*
++binary_pc_i686*
++*.o
++*.o.cmd
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/makefile.linux.common
+@@ -0,0 +1,41 @@
++#
++# Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++#
++# This program is free software; you can redistribute it and/or modify it
++# under the terms and conditions of the GNU General Public License,
++# version 2, as published by the Free Software Foundation.
++#
++# This program is distributed in the hope it will be useful but, except
++# as otherwise stated in writing, without any warranty; without even the
++# implied warranty of merchantability or fitness for a particular purpose.
++# See the GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License along with
++# this program; if not, write to the Free Software Foundation, Inc.,
++# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++#
++# The full GNU General Public License is included in this distribution in
++# the file called "COPYING".
++#
++# Contact Information:
++# Imagination Technologies Ltd. <gpl-support@imgtec.com>
++# Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++#
++#
++#
++
++ifeq ($(SUPPORT_DRI_DRM),1)
++DISPLAY_CONTROLLER_SOURCES_ROOT = $(KBUILDROOT)/$(DISPLAY_CONTROLLER_DIR)
++else
++DISPLAY_CONTROLLER_SOURCES_ROOT = ..
++endif
++
++INCLUDES += -I$(EURASIAROOT)/include4 \
++ -I$(EURASIAROOT)/services4/include \
++ -I$(EURASIAROOT)/services4/system/$(PVR_SYSTEM) \
++ -I$(EURASIAROOT)/services4/system/include \
++ -I$(EURASIAROOT)/services4/srvkm/env/linux/mrst
++
++SOURCES += $(DISPLAY_CONTROLLER_SOURCES_ROOT)/mrstlfb_displayclass.c \
++ $(DISPLAY_CONTROLLER_SOURCES_ROOT)/mrstlfb_linux.c
++MODULE_CFLAGS += -DPVR_MRST_FB_SET_PAR_ON_INIT
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/mrstlfb.h
+@@ -0,0 +1,295 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __MRSTLFB_H__
++#define __MRSTLFB_H__
++
++#include <drm/drmP.h>
++#include "psb_intel_reg.h"
++
++#define MRST_USING_INTERRUPTS
++
++#define PSB_HWSTAM 0x2098
++#define PSB_INSTPM 0x20C0
++#define PSB_INT_IDENTITY_R 0x20A4
++#define _PSB_VSYNC_PIPEB_FLAG (1<<5)
++#define _PSB_VSYNC_PIPEA_FLAG (1<<7)
++#define _PSB_IRQ_SGX_FLAG (1<<18)
++#define _PSB_IRQ_MSVDX_FLAG (1<<19)
++#define _LNC_IRQ_TOPAZ_FLAG (1<<20)
++#define PSB_INT_MASK_R 0x20A8
++#define PSB_INT_ENABLE_R 0x20A0
++
++/* IPC message and command defines used to enable/disable mipi panel voltages */
++#define IPC_MSG_PANEL_ON_OFF 0xE9
++#define IPC_CMD_PANEL_ON 1
++#define IPC_CMD_PANEL_OFF 0
++
++typedef void * MRST_HANDLE;
++
++typedef enum tag_mrst_bool
++{
++ MRST_FALSE = 0,
++ MRST_TRUE = 1,
++} MRST_BOOL, *MRST_PBOOL;
++
++typedef IMG_INT (* MRSTLFB_VSYNC_ISR_PFN)(struct drm_device* psDrmDevice, int iPipe);
++
++extern IMG_BOOL PVRGetDisplayClassJTable(PVRSRV_DC_DISP2SRV_KMJTABLE *psJTable);
++
++
++typedef struct MRSTLFB_BUFFER_TAG
++{
++
++ IMG_UINT32 ui32BufferSize;
++ union {
++
++ IMG_SYS_PHYADDR *psNonCont;
++
++ IMG_SYS_PHYADDR sCont;
++ } uSysAddr;
++
++ IMG_DEV_VIRTADDR sDevVAddr;
++
++ IMG_CPU_VIRTADDR sCPUVAddr;
++
++ PVRSRV_SYNC_DATA *psSyncData;
++
++ IMG_BOOL bIsContiguous;
++
++ IMG_BOOL bIsAllocated;
++
++ IMG_UINT32 ui32OwnerTaskID;
++
++ struct MRSTLFB_BUFFER_TAG *psNext;
++} MRSTLFB_BUFFER;
++
++typedef struct MRSTLFB_VSYNC_FLIP_ITEM_TAG
++{
++
++
++
++ MRST_HANDLE hCmdComplete;
++
++ unsigned long ulSwapInterval;
++
++ MRST_BOOL bValid;
++
++ MRST_BOOL bFlipped;
++
++ MRST_BOOL bCmdCompleted;
++
++
++
++
++
++ IMG_DEV_VIRTADDR sDevVAddr;
++} MRSTLFB_VSYNC_FLIP_ITEM;
++
++typedef struct MRSTLFB_SWAPCHAIN_TAG
++{
++
++ unsigned long ulBufferCount;
++
++ MRSTLFB_BUFFER **ppsBuffer;
++
++ MRSTLFB_VSYNC_FLIP_ITEM *psVSyncFlips;
++
++
++ unsigned long ulInsertIndex;
++
++
++ unsigned long ulRemoveIndex;
++
++
++ PVRSRV_DC_DISP2SRV_KMJTABLE *psPVRJTable;
++
++
++ MRST_BOOL bFlushCommands;
++
++
++ unsigned long ulSetFlushStateRefCount;
++
++
++ MRST_BOOL bBlanked;
++
++
++ spinlock_t *psSwapChainLock;
++
++
++ struct drm_driver *psDrmDriver;
++
++
++ struct drm_device *psDrmDev;
++
++ struct MRSTLFB_SWAPCHAIN_TAG *psNext;
++
++ struct MRSTLFB_DEVINFO_TAG *psDevInfo;
++
++} MRSTLFB_SWAPCHAIN;
++
++typedef struct MRSTLFB_FBINFO_TAG
++{
++ unsigned long ulFBSize;
++ unsigned long ulBufferSize;
++ unsigned long ulRoundedBufferSize;
++ unsigned long ulWidth;
++ unsigned long ulHeight;
++ unsigned long ulByteStride;
++
++
++
++ IMG_SYS_PHYADDR sSysAddr;
++ IMG_CPU_VIRTADDR sCPUVAddr;
++ IMG_DEV_VIRTADDR sDevVAddr;
++
++
++ PVRSRV_PIXEL_FORMAT ePixelFormat;
++}MRSTLFB_FBINFO;
++
++/**
++ * If DRI is enable then extemding drm_device
++ */
++typedef struct MRSTLFB_DEVINFO_TAG
++{
++ unsigned long ulDeviceID;
++
++ struct drm_device *psDrmDevice;
++
++ MRSTLFB_BUFFER sSystemBuffer;
++
++
++ PVRSRV_DC_DISP2SRV_KMJTABLE sPVRJTable;
++
++
++ PVRSRV_DC_SRV2DISP_KMJTABLE sDCJTable;
++
++
++ unsigned long ulRefCount;
++
++
++ MRSTLFB_SWAPCHAIN *psSwapChain;
++
++ IMG_UINT32 ui32SwapChainNum;
++
++ IMG_UINT32 ui32SwapChainIdCounter;
++
++
++ void *pvRegs;
++
++
++ MRST_BOOL bFlushCommands;
++
++
++ struct fb_info *psLINFBInfo;
++
++
++ struct notifier_block sLINNotifBlock;
++
++
++ MRST_BOOL bDeviceSuspended;
++
++
++ spinlock_t sSwapChainLock;
++
++
++
++
++
++ IMG_DEV_VIRTADDR sDisplayDevVAddr;
++
++ DISPLAY_INFO sDisplayInfo;
++
++
++ DISPLAY_FORMAT sDisplayFormat;
++
++
++ DISPLAY_DIMS sDisplayDim;
++
++ IMG_UINT32 ui32MainPipe;
++
++} MRSTLFB_DEVINFO;
++
++#if 0
++#define MRSTLFB_PAGE_SIZE 4096
++#define MRSTLFB_PAGE_MASK (MRSTLFB_PAGE_SIZE - 1)
++#define MRSTLFB_PAGE_TRUNC (~MRSTLFB_PAGE_MASK)
++
++#define MRSTLFB_PAGE_ROUNDUP(x) (((x) + MRSTLFB_PAGE_MASK) & MRSTLFB_PAGE_TRUNC)
++#endif
++
++#ifdef DEBUG
++#define DEBUG_PRINTK(x) printk x
++#else
++#define DEBUG_PRINTK(x)
++#endif
++
++#define DISPLAY_DEVICE_NAME "PowerVR Moorestown Linux Display Driver"
++#define DRVNAME "mrstlfb"
++#define DEVNAME DRVNAME
++#define DRIVER_PREFIX DRVNAME
++
++typedef enum _MRST_ERROR_
++{
++ MRST_OK = 0,
++ MRST_ERROR_GENERIC = 1,
++ MRST_ERROR_OUT_OF_MEMORY = 2,
++ MRST_ERROR_TOO_FEW_BUFFERS = 3,
++ MRST_ERROR_INVALID_PARAMS = 4,
++ MRST_ERROR_INIT_FAILURE = 5,
++ MRST_ERROR_CANT_REGISTER_CALLBACK = 6,
++ MRST_ERROR_INVALID_DEVICE = 7,
++ MRST_ERROR_DEVICE_REGISTER_FAILED = 8
++} MRST_ERROR;
++
++
++#ifndef UNREFERENCED_PARAMETER
++#define UNREFERENCED_PARAMETER(param) (param) = (param)
++#endif
++
++MRST_ERROR MRSTLFBInit(struct drm_device * dev);
++MRST_ERROR MRSTLFBDeinit(void);
++
++MRST_ERROR MRSTLFBAllocBuffer(struct MRSTLFB_DEVINFO_TAG *psDevInfo, IMG_UINT32 ui32Size, MRSTLFB_BUFFER **ppBuffer);
++MRST_ERROR MRSTLFBFreeBuffer(struct MRSTLFB_DEVINFO_TAG *psDevInfo, MRSTLFB_BUFFER **ppBuffer);
++
++void *MRSTLFBAllocKernelMem(unsigned long ulSize);
++void MRSTLFBFreeKernelMem(void *pvMem);
++MRST_ERROR MRSTLFBGetLibFuncAddr(char *szFunctionName, PFN_DC_GET_PVRJTABLE *ppfnFuncTable);
++MRST_ERROR MRSTLFBInstallVSyncISR (MRSTLFB_DEVINFO *psDevInfo, MRSTLFB_VSYNC_ISR_PFN pVsyncHandler);
++MRST_ERROR MRSTLFBUninstallVSyncISR(MRSTLFB_DEVINFO *psDevInfo);
++MRST_BOOL MRSTLFBVSyncIHandler(MRSTLFB_SWAPCHAIN *psSwapChain);
++
++void MRSTLFBEnableVSyncInterrupt(MRSTLFB_DEVINFO *psDevInfo);
++void MRSTLFBDisableVSyncInterrupt(MRSTLFB_DEVINFO *psDevInfo);
++
++void MRSTLFBEnableDisplayRegisterAccess(void);
++void MRSTLFBDisableDisplayRegisterAccess(void);
++
++void MRSTLFBFlip(MRSTLFB_DEVINFO *psDevInfo, unsigned long uiAddr);
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/mrstlfb_displayclass.c
+@@ -0,0 +1,2092 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <linux/version.h>
++#include <linux/kernel.h>
++#include <linux/console.h>
++#include <linux/fb.h>
++#include <linux/module.h>
++#include <linux/string.h>
++#include <linux/notifier.h>
++#include <linux/spinlock.h>
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34))
++#include <asm/ipc_defs.h>
++#else
++#include <asm/intel_scu_ipc.h>
++#endif
++
++#include "img_defs.h"
++#include "servicesext.h"
++#include "kerneldisplay.h"
++#include "mrstlfb.h"
++
++#include "psb_fb.h"
++#include "psb_drv.h"
++#include "psb_powermgmt.h"
++
++#if !defined(SUPPORT_DRI_DRM)
++#error "SUPPORT_DRI_DRM must be set"
++#endif
++
++IMG_UINT32 gui32MRSTDisplayDeviceID;
++
++extern void MRSTLFBVSyncWriteReg(MRSTLFB_DEVINFO * psDevinfo, unsigned long ulOffset, unsigned long ulValue);
++extern unsigned long MRSTLFBVSyncReadReg(MRSTLFB_DEVINFO * psDevinfo, unsigned long ulOffset);
++
++PVRSRV_ERROR MRSTLFBPrePowerState(IMG_HANDLE hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++
++PVRSRV_ERROR MRSTLFBPostPowerState(IMG_HANDLE hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++
++#ifdef MODESET_640x480
++extern int psb_to_640 (struct fb_info* info);
++#endif
++
++extern void mrst_init_LGE_MIPI(struct drm_device *dev);
++extern void mrst_init_NSC_MIPI_bridge(struct drm_device *dev);
++
++struct psbfb_par {
++ struct drm_device *dev;
++ void *psbfb;
++
++ int dpms_state;
++
++ int crtc_count;
++
++ uint32_t crtc_ids[2];
++};
++
++extern void* psbfb_vdc_reg(struct drm_device* dev);
++
++static void *gpvAnchor;
++
++
++#define MRSTLFB_COMMAND_COUNT 1
++
++static PFN_DC_GET_PVRJTABLE pfnGetPVRJTable = 0;
++
++static MRSTLFB_DEVINFO * GetAnchorPtr(void)
++{
++ return (MRSTLFB_DEVINFO *)gpvAnchor;
++}
++
++static void SetAnchorPtr(MRSTLFB_DEVINFO *psDevInfo)
++{
++ gpvAnchor = (void*)psDevInfo;
++}
++
++
++static void FlushInternalVSyncQueue(MRSTLFB_SWAPCHAIN *psSwapChain)
++{
++ MRSTLFB_VSYNC_FLIP_ITEM *psFlipItem;
++ unsigned long ulMaxIndex;
++ unsigned long i;
++
++
++ psFlipItem = &psSwapChain->psVSyncFlips[psSwapChain->ulRemoveIndex];
++ ulMaxIndex = psSwapChain->ulBufferCount - 1;
++
++ for(i = 0; i < psSwapChain->ulBufferCount; i++)
++ {
++ if (psFlipItem->bValid == MRST_FALSE)
++ {
++ continue;
++ }
++
++ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX ": FlushInternalVSyncQueue: Flushing swap buffer (index %lu)\n", psSwapChain->ulRemoveIndex));
++
++ if(psFlipItem->bFlipped == MRST_FALSE)
++ {
++
++ MRSTLFBFlip(psSwapChain->psDevInfo, (unsigned long)psFlipItem->sDevVAddr.uiAddr);
++ }
++
++ if(psFlipItem->bCmdCompleted == MRST_FALSE)
++ {
++ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX ": FlushInternalVSyncQueue: Calling command complete for swap buffer (index %lu)\n", psSwapChain->ulRemoveIndex));
++
++ psSwapChain->psPVRJTable->pfnPVRSRVCmdComplete((IMG_HANDLE)psFlipItem->hCmdComplete, IMG_TRUE);
++ }
++
++
++ psSwapChain->ulRemoveIndex++;
++
++ if(psSwapChain->ulRemoveIndex > ulMaxIndex)
++ {
++ psSwapChain->ulRemoveIndex = 0;
++ }
++
++
++ psFlipItem->bFlipped = MRST_FALSE;
++ psFlipItem->bCmdCompleted = MRST_FALSE;
++ psFlipItem->bValid = MRST_FALSE;
++
++
++ psFlipItem = &psSwapChain->psVSyncFlips[psSwapChain->ulRemoveIndex];
++ }
++
++ psSwapChain->ulInsertIndex = 0;
++ psSwapChain->ulRemoveIndex = 0;
++}
++
++static void SetFlushStateInternalNoLock(MRSTLFB_DEVINFO* psDevInfo,
++ MRST_BOOL bFlushState)
++{
++ MRSTLFB_SWAPCHAIN *psSwapChain = psDevInfo->psSwapChain;
++
++ if (psSwapChain == NULL)
++ {
++ return;
++ }
++
++ if (bFlushState)
++ {
++ if (psSwapChain->ulSetFlushStateRefCount == 0)
++ {
++ MRSTLFBDisableVSyncInterrupt(psDevInfo);
++ psSwapChain->bFlushCommands = MRST_TRUE;
++ FlushInternalVSyncQueue(psSwapChain);
++ }
++ psSwapChain->ulSetFlushStateRefCount++;
++ }
++ else
++ {
++ if (psSwapChain->ulSetFlushStateRefCount != 0)
++ {
++ psSwapChain->ulSetFlushStateRefCount--;
++ if (psSwapChain->ulSetFlushStateRefCount == 0)
++ {
++ psSwapChain->bFlushCommands = MRST_FALSE;
++ MRSTLFBEnableVSyncInterrupt(psDevInfo);
++ }
++ }
++ }
++}
++
++static IMG_VOID SetFlushStateInternal(MRSTLFB_DEVINFO* psDevInfo,
++ MRST_BOOL bFlushState)
++{
++ unsigned long ulLockFlags;
++
++ spin_lock_irqsave(&psDevInfo->sSwapChainLock, ulLockFlags);
++
++ SetFlushStateInternalNoLock(psDevInfo, bFlushState);
++
++ spin_unlock_irqrestore(&psDevInfo->sSwapChainLock, ulLockFlags);
++}
++
++static void SetFlushStateExternal(MRSTLFB_DEVINFO* psDevInfo,
++ MRST_BOOL bFlushState)
++{
++ unsigned long ulLockFlags;
++
++ spin_lock_irqsave(&psDevInfo->sSwapChainLock, ulLockFlags);
++
++
++ if (psDevInfo->bFlushCommands != bFlushState)
++ {
++ psDevInfo->bFlushCommands = bFlushState;
++ SetFlushStateInternalNoLock(psDevInfo, bFlushState);
++ }
++
++ spin_unlock_irqrestore(&psDevInfo->sSwapChainLock, ulLockFlags);
++}
++
++static IMG_VOID SetDCState(IMG_HANDLE hDevice, IMG_UINT32 ui32State)
++{
++ MRSTLFB_DEVINFO *psDevInfo = (MRSTLFB_DEVINFO *)hDevice;
++
++ switch (ui32State)
++ {
++ case DC_STATE_FLUSH_COMMANDS:
++ SetFlushStateExternal(psDevInfo, MRST_TRUE);
++ break;
++ case DC_STATE_NO_FLUSH_COMMANDS:
++ SetFlushStateExternal(psDevInfo, MRST_FALSE);
++ break;
++ default:
++ break;
++ }
++
++ return;
++}
++
++static int FrameBufferEvents(struct notifier_block *psNotif,
++ unsigned long event, void *data)
++{
++ MRSTLFB_DEVINFO *psDevInfo;
++ MRSTLFB_SWAPCHAIN *psSwapChain;
++ struct fb_event *psFBEvent = (struct fb_event *)data;
++ MRST_BOOL bBlanked;
++
++
++ if (event != FB_EVENT_BLANK)
++ {
++ return 0;
++ }
++
++ psDevInfo = GetAnchorPtr();
++ psSwapChain = psDevInfo->psSwapChain;
++
++ bBlanked = (*(IMG_INT *)psFBEvent->data != 0) ? MRST_TRUE: MRST_FALSE;
++
++ if (bBlanked != psSwapChain->bBlanked)
++ {
++ psSwapChain->bBlanked = bBlanked;
++
++ if (bBlanked)
++ {
++
++ SetFlushStateInternal(psDevInfo, MRST_TRUE);
++ }
++ else
++ {
++
++ SetFlushStateInternal(psDevInfo, MRST_FALSE);
++ }
++ }
++
++ return 0;
++}
++
++
++static MRST_ERROR UnblankDisplay(MRSTLFB_DEVINFO *psDevInfo)
++{
++ int res;
++
++ acquire_console_sem();
++ res = fb_blank(psDevInfo->psLINFBInfo, 0);
++ release_console_sem();
++ if (res != 0)
++ {
++ printk(KERN_WARNING DRIVER_PREFIX
++ ": fb_blank failed (%d)", res);
++ return (MRST_ERROR_GENERIC);
++ }
++
++ return (MRST_OK);
++}
++
++static MRST_ERROR EnableLFBEventNotification(MRSTLFB_DEVINFO *psDevInfo)
++{
++ int res;
++ MRST_ERROR eError;
++
++
++ memset(&psDevInfo->sLINNotifBlock, 0, sizeof(psDevInfo->sLINNotifBlock));
++
++ psDevInfo->sLINNotifBlock.notifier_call = FrameBufferEvents;
++
++ res = fb_register_client(&psDevInfo->sLINNotifBlock);
++ if (res != 0)
++ {
++ printk(KERN_WARNING DRIVER_PREFIX
++ ": fb_register_client failed (%d)", res);
++
++ return (MRST_ERROR_GENERIC);
++ }
++
++ eError = UnblankDisplay(psDevInfo);
++ if (eError != MRST_OK)
++ {
++ DEBUG_PRINTK((KERN_WARNING DRIVER_PREFIX
++ ": UnblankDisplay failed (%d)", eError));
++ return eError;
++ }
++
++ return (MRST_OK);
++}
++
++static MRST_ERROR DisableLFBEventNotification(MRSTLFB_DEVINFO *psDevInfo)
++{
++ int res;
++
++
++ res = fb_unregister_client(&psDevInfo->sLINNotifBlock);
++ if (res != 0)
++ {
++ printk(KERN_WARNING DRIVER_PREFIX
++ ": fb_unregister_client failed (%d)", res);
++ return (MRST_ERROR_GENERIC);
++ }
++
++ return (MRST_OK);
++}
++
++static PVRSRV_ERROR OpenDCDevice(IMG_UINT32 ui32DeviceID,
++ IMG_HANDLE *phDevice,
++ PVRSRV_SYNC_DATA* psSystemBufferSyncData)
++{
++ MRSTLFB_DEVINFO *psDevInfo;
++ MRST_ERROR eError;
++
++ UNREFERENCED_PARAMETER(ui32DeviceID);
++
++ psDevInfo = GetAnchorPtr();
++
++
++ psDevInfo->sSystemBuffer.psSyncData = psSystemBufferSyncData;
++
++ eError = UnblankDisplay(psDevInfo);
++ if (eError != MRST_OK)
++ {
++ DEBUG_PRINTK((KERN_WARNING DRIVER_PREFIX
++ ": UnblankDisplay failed (%d)", eError));
++ return (PVRSRV_ERROR_GENERIC);
++ }
++
++
++ *phDevice = (IMG_HANDLE)psDevInfo;
++
++ return (PVRSRV_OK);
++}
++
++static PVRSRV_ERROR CloseDCDevice(IMG_HANDLE hDevice)
++{
++ UNREFERENCED_PARAMETER(hDevice);
++
++ return (PVRSRV_OK);
++}
++
++static PVRSRV_ERROR EnumDCFormats(IMG_HANDLE hDevice,
++ IMG_UINT32 *pui32NumFormats,
++ DISPLAY_FORMAT *psFormat)
++{
++ MRSTLFB_DEVINFO *psDevInfo;
++
++ if(!hDevice || !pui32NumFormats)
++ {
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++
++ psDevInfo = (MRSTLFB_DEVINFO*)hDevice;
++
++ *pui32NumFormats = 1;
++
++ if(psFormat)
++ {
++ psFormat[0] = psDevInfo->sDisplayFormat;
++ }
++
++ return (PVRSRV_OK);
++}
++
++static PVRSRV_ERROR EnumDCDims(IMG_HANDLE hDevice,
++ DISPLAY_FORMAT *psFormat,
++ IMG_UINT32 *pui32NumDims,
++ DISPLAY_DIMS *psDim)
++{
++ MRSTLFB_DEVINFO *psDevInfo;
++
++ if(!hDevice || !psFormat || !pui32NumDims)
++ {
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++
++ psDevInfo = (MRSTLFB_DEVINFO*)hDevice;
++
++ *pui32NumDims = 1;
++
++
++ if(psDim)
++ {
++ psDim[0] = psDevInfo->sDisplayDim;
++ }
++
++ return (PVRSRV_OK);
++}
++
++
++static PVRSRV_ERROR GetDCSystemBuffer(IMG_HANDLE hDevice, IMG_HANDLE *phBuffer)
++{
++ MRSTLFB_DEVINFO *psDevInfo;
++
++ if(!hDevice || !phBuffer)
++ {
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++
++ psDevInfo = (MRSTLFB_DEVINFO*)hDevice;
++
++
++
++ *phBuffer = (IMG_HANDLE)&psDevInfo->sSystemBuffer;
++
++ return (PVRSRV_OK);
++}
++
++
++static PVRSRV_ERROR GetDCInfo(IMG_HANDLE hDevice, DISPLAY_INFO *psDCInfo)
++{
++ MRSTLFB_DEVINFO *psDevInfo;
++
++ if(!hDevice || !psDCInfo)
++ {
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++
++ psDevInfo = (MRSTLFB_DEVINFO*)hDevice;
++
++ *psDCInfo = psDevInfo->sDisplayInfo;
++
++ return (PVRSRV_OK);
++}
++
++static PVRSRV_ERROR GetDCBufferAddr(IMG_HANDLE hDevice,
++ IMG_HANDLE hBuffer,
++ IMG_SYS_PHYADDR **ppsSysAddr,
++ IMG_UINT32 *pui32ByteSize,
++ IMG_VOID **ppvCpuVAddr,
++ IMG_HANDLE *phOSMapInfo,
++ IMG_BOOL *pbIsContiguous)
++{
++ MRSTLFB_DEVINFO *psDevInfo;
++ MRSTLFB_BUFFER *psSystemBuffer;
++
++ if(!hDevice)
++ {
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++ psDevInfo = (MRSTLFB_DEVINFO*)hDevice;
++
++ if(!hBuffer)
++ {
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++ psSystemBuffer = (MRSTLFB_BUFFER *)hBuffer;
++
++ if (!ppsSysAddr)
++ {
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++
++ if( psSystemBuffer->bIsContiguous )
++ *ppsSysAddr = &psSystemBuffer->uSysAddr.sCont;
++ else
++ *ppsSysAddr = psSystemBuffer->uSysAddr.psNonCont;
++
++ if (!pui32ByteSize)
++ {
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++ *pui32ByteSize = psSystemBuffer->ui32BufferSize;
++
++ if (ppvCpuVAddr)
++ {
++ *ppvCpuVAddr = psSystemBuffer->sCPUVAddr;
++ }
++
++ if (phOSMapInfo)
++ {
++ *phOSMapInfo = (IMG_HANDLE)0;
++ }
++
++ if (pbIsContiguous)
++ {
++ *pbIsContiguous = psSystemBuffer->bIsContiguous;
++ }
++
++ return (PVRSRV_OK);
++}
++
++
++static MRST_ERROR MRSTLFBEnableSwapChains(MRSTLFB_DEVINFO *psDevInfo)
++{
++ unsigned long ulLockFlags;
++
++ spin_lock_irqsave(&psDevInfo->sSwapChainLock, ulLockFlags);
++
++ if(!psDevInfo->bFlushCommands)
++ MRSTLFBEnableVSyncInterrupt(psDevInfo);
++
++ spin_unlock_irqrestore(&psDevInfo->sSwapChainLock, ulLockFlags);
++
++ if (EnableLFBEventNotification(psDevInfo)!= MRST_OK)
++ {
++ printk(KERN_WARNING DRIVER_PREFIX ": Couldn't enable framebuffer event notification\n");
++ }
++
++ return MRST_OK;
++}
++
++static MRST_ERROR MRSTLFBDisableSwapChains(MRSTLFB_DEVINFO *psDevInfo)
++{
++ MRST_ERROR eError;
++ unsigned long ulLockFlags;
++
++ eError = DisableLFBEventNotification(psDevInfo);
++ if (eError != MRST_OK)
++ {
++ printk(KERN_WARNING DRIVER_PREFIX ": Couldn't disable framebuffer event notification\n");
++ }
++
++ spin_lock_irqsave(&psDevInfo->sSwapChainLock, ulLockFlags);
++
++ MRSTLFBDisableVSyncInterrupt(psDevInfo);
++
++
++ MRSTLFBFlip(psDevInfo, (unsigned long)psDevInfo->sSystemBuffer.sDevVAddr.uiAddr);
++
++ psDevInfo->psSwapChain = NULL;
++
++ spin_unlock_irqrestore(&psDevInfo->sSwapChainLock, ulLockFlags);
++ return MRST_OK;
++}
++
++
++static PVRSRV_ERROR CreateDCSwapChain(IMG_HANDLE hDevice,
++ IMG_UINT32 ui32Flags,
++ DISPLAY_SURF_ATTRIBUTES *psDstSurfAttrib,
++ DISPLAY_SURF_ATTRIBUTES *psSrcSurfAttrib,
++ IMG_UINT32 ui32BufferCount,
++ PVRSRV_SYNC_DATA **ppsSyncData,
++ IMG_UINT32 ui32OEMFlags,
++ IMG_HANDLE *phSwapChain,
++ IMG_UINT32 *pui32SwapChainID)
++{
++ MRSTLFB_DEVINFO *psDevInfo;
++ MRSTLFB_SWAPCHAIN *psSwapChain;
++ MRSTLFB_BUFFER **ppsBuffer;
++ MRSTLFB_VSYNC_FLIP_ITEM *psVSyncFlips;
++ IMG_UINT32 i;
++ PVRSRV_ERROR eError = PVRSRV_ERROR_NOT_SUPPORTED;
++ unsigned long ulLockFlags;
++ struct drm_device* psDrmDev;
++
++ UNREFERENCED_PARAMETER(ui32OEMFlags);
++
++
++ if(!hDevice
++ || !psDstSurfAttrib
++ || !psSrcSurfAttrib
++ || !ppsSyncData
++ || !phSwapChain)
++ {
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++
++ psDevInfo = (MRSTLFB_DEVINFO*)hDevice;
++
++
++ if(ui32BufferCount > psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers)
++ {
++ return (PVRSRV_ERROR_TOOMANYBUFFERS);
++ }
++
++
++
++
++
++ if(psDstSurfAttrib->pixelformat != psDevInfo->sDisplayFormat.pixelformat
++ || psDstSurfAttrib->sDims.ui32ByteStride != psDevInfo->sDisplayDim.ui32ByteStride
++ || psDstSurfAttrib->sDims.ui32Width != psDevInfo->sDisplayDim.ui32Width
++ || psDstSurfAttrib->sDims.ui32Height != psDevInfo->sDisplayDim.ui32Height)
++ {
++
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++
++ if(psDstSurfAttrib->pixelformat != psSrcSurfAttrib->pixelformat
++ || psDstSurfAttrib->sDims.ui32ByteStride != psSrcSurfAttrib->sDims.ui32ByteStride
++ || psDstSurfAttrib->sDims.ui32Width != psSrcSurfAttrib->sDims.ui32Width
++ || psDstSurfAttrib->sDims.ui32Height != psSrcSurfAttrib->sDims.ui32Height)
++ {
++
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++
++
++ UNREFERENCED_PARAMETER(ui32Flags);
++
++
++ psSwapChain = (MRSTLFB_SWAPCHAIN*)MRSTLFBAllocKernelMem(sizeof(MRSTLFB_SWAPCHAIN));
++ if(!psSwapChain)
++ {
++ return (PVRSRV_ERROR_OUT_OF_MEMORY);
++ }
++
++ ppsBuffer = (MRSTLFB_BUFFER**)MRSTLFBAllocKernelMem(sizeof(MRSTLFB_BUFFER*) * ui32BufferCount);
++ if(!ppsBuffer)
++ {
++ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto ErrorFreeSwapChain;
++ }
++
++ psVSyncFlips = (MRSTLFB_VSYNC_FLIP_ITEM *)MRSTLFBAllocKernelMem(sizeof(MRSTLFB_VSYNC_FLIP_ITEM) * ui32BufferCount);
++ if (!psVSyncFlips)
++ {
++ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto ErrorFreeBuffers;
++ }
++
++ psSwapChain->ulBufferCount = (unsigned long)ui32BufferCount;
++ psSwapChain->ppsBuffer = ppsBuffer;
++ psSwapChain->psVSyncFlips = psVSyncFlips;
++ psSwapChain->ulInsertIndex = 0;
++ psSwapChain->ulRemoveIndex = 0;
++ psSwapChain->psPVRJTable = &psDevInfo->sPVRJTable;
++ psSwapChain->psSwapChainLock = &psDevInfo->sSwapChainLock;
++
++
++
++ for(i=0; i<ui32BufferCount; i++)
++ {
++ unsigned long bufSize = psDevInfo->sDisplayDim.ui32ByteStride * psDevInfo->sDisplayDim.ui32Height;
++ MRSTLFBAllocBuffer(psDevInfo, bufSize, &ppsBuffer[i]);
++ ppsBuffer[i]->psSyncData = ppsSyncData[i];
++ }
++
++
++ for(i=0; i<ui32BufferCount-1; i++)
++ {
++ ppsBuffer[i]->psNext = ppsBuffer[i+1];
++ }
++
++ ppsBuffer[i]->psNext = ppsBuffer[0];
++
++
++ for(i=0; i<ui32BufferCount; i++)
++ {
++ psVSyncFlips[i].bValid = MRST_FALSE;
++ psVSyncFlips[i].bFlipped = MRST_FALSE;
++ psVSyncFlips[i].bCmdCompleted = MRST_FALSE;
++ }
++
++
++ psDrmDev = psDevInfo->psDrmDevice;
++
++ psSwapChain->psDevInfo = psDevInfo;
++ psSwapChain->psDrmDev = psDrmDev;
++ psSwapChain->psDrmDriver = psDrmDev->driver;
++ psSwapChain->bBlanked = MRST_FALSE;
++
++ spin_lock_irqsave(&psDevInfo->sSwapChainLock, ulLockFlags);
++
++
++ psSwapChain->bFlushCommands = psDevInfo->bFlushCommands;
++
++ if (psSwapChain->bFlushCommands)
++ {
++ psSwapChain->ulSetFlushStateRefCount = 1;
++ }
++ else
++ {
++ psSwapChain->ulSetFlushStateRefCount = 0;
++ }
++
++ spin_unlock_irqrestore(&psDevInfo->sSwapChainLock, ulLockFlags);
++
++
++
++
++
++
++
++ *phSwapChain = (IMG_HANDLE)psSwapChain;
++ *pui32SwapChainID = ++psDevInfo->ui32SwapChainIdCounter;
++ psDevInfo->psSwapChain = psSwapChain;
++
++ if( psDevInfo->ui32SwapChainNum++ == 0)
++ {
++ MRSTLFBEnableSwapChains( psDevInfo );
++ }
++
++ return (PVRSRV_OK);
++
++
++ MRSTLFBFreeKernelMem(psVSyncFlips);
++ErrorFreeBuffers:
++ MRSTLFBFreeKernelMem(ppsBuffer);
++ErrorFreeSwapChain:
++ MRSTLFBFreeKernelMem(psSwapChain);
++
++ return eError;
++}
++
++static PVRSRV_ERROR DestroyDCSwapChain(IMG_HANDLE hDevice,
++ IMG_HANDLE hSwapChain)
++{
++ MRSTLFB_DEVINFO *psDevInfo;
++ MRSTLFB_SWAPCHAIN *psSwapChain;
++ int i;
++
++
++ if(!hDevice || !hSwapChain)
++ {
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++
++ psDevInfo = (MRSTLFB_DEVINFO*)hDevice;
++ psSwapChain = (MRSTLFB_SWAPCHAIN*)hSwapChain;
++
++
++ FlushInternalVSyncQueue(psSwapChain);
++
++
++ if(--psDevInfo->ui32SwapChainNum == 0)
++ {
++ MRSTLFBDisableSwapChains(psDevInfo);
++ }
++
++ if( psDevInfo->psSwapChain == psSwapChain )
++ psDevInfo->psSwapChain = IMG_NULL;
++
++
++
++ for(i=0; i< psSwapChain->ulBufferCount; i++)
++ {
++ MRSTLFBFreeBuffer(psDevInfo, &psSwapChain->ppsBuffer[i] );
++ }
++ MRSTLFBFreeKernelMem(psSwapChain->psVSyncFlips);
++ MRSTLFBFreeKernelMem(psSwapChain->ppsBuffer);
++ MRSTLFBFreeKernelMem(psSwapChain);
++
++ return (PVRSRV_OK);
++}
++
++static PVRSRV_ERROR SetDCDstRect(IMG_HANDLE hDevice,
++ IMG_HANDLE hSwapChain,
++ IMG_RECT *psRect)
++{
++ UNREFERENCED_PARAMETER(hDevice);
++ UNREFERENCED_PARAMETER(hSwapChain);
++ UNREFERENCED_PARAMETER(psRect);
++
++
++
++ return (PVRSRV_ERROR_NOT_SUPPORTED);
++}
++
++static PVRSRV_ERROR SetDCSrcRect(IMG_HANDLE hDevice,
++ IMG_HANDLE hSwapChain,
++ IMG_RECT *psRect)
++{
++ UNREFERENCED_PARAMETER(hDevice);
++ UNREFERENCED_PARAMETER(hSwapChain);
++ UNREFERENCED_PARAMETER(psRect);
++
++
++
++ return (PVRSRV_ERROR_NOT_SUPPORTED);
++}
++
++static PVRSRV_ERROR SetDCDstColourKey(IMG_HANDLE hDevice,
++ IMG_HANDLE hSwapChain,
++ IMG_UINT32 ui32CKColour)
++{
++ UNREFERENCED_PARAMETER(hDevice);
++ UNREFERENCED_PARAMETER(hSwapChain);
++ UNREFERENCED_PARAMETER(ui32CKColour);
++
++
++
++ return (PVRSRV_ERROR_NOT_SUPPORTED);
++}
++
++static PVRSRV_ERROR SetDCSrcColourKey(IMG_HANDLE hDevice,
++ IMG_HANDLE hSwapChain,
++ IMG_UINT32 ui32CKColour)
++{
++ UNREFERENCED_PARAMETER(hDevice);
++ UNREFERENCED_PARAMETER(hSwapChain);
++ UNREFERENCED_PARAMETER(ui32CKColour);
++
++
++
++ return (PVRSRV_ERROR_NOT_SUPPORTED);
++}
++
++static PVRSRV_ERROR GetDCBuffers(IMG_HANDLE hDevice,
++ IMG_HANDLE hSwapChain,
++ IMG_UINT32 *pui32BufferCount,
++ IMG_HANDLE *phBuffer)
++{
++ MRSTLFB_DEVINFO *psDevInfo;
++ MRSTLFB_SWAPCHAIN *psSwapChain;
++ unsigned long i;
++
++
++ if(!hDevice
++ || !hSwapChain
++ || !pui32BufferCount
++ || !phBuffer)
++ {
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++
++ psDevInfo = (MRSTLFB_DEVINFO*)hDevice;
++ psSwapChain = (MRSTLFB_SWAPCHAIN*)hSwapChain;
++ if (psSwapChain != psDevInfo->psSwapChain)
++ {
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++
++
++ *pui32BufferCount = (IMG_UINT32)psSwapChain->ulBufferCount;
++
++
++ for(i=0; i<psSwapChain->ulBufferCount; i++)
++ {
++ phBuffer[i] = (IMG_HANDLE)psSwapChain->ppsBuffer[i];
++ }
++
++ return (PVRSRV_OK);
++}
++
++static PVRSRV_ERROR SwapToDCBuffer(IMG_HANDLE hDevice,
++ IMG_HANDLE hBuffer,
++ IMG_UINT32 ui32SwapInterval,
++ IMG_HANDLE hPrivateTag,
++ IMG_UINT32 ui32ClipRectCount,
++ IMG_RECT *psClipRect)
++{
++ MRSTLFB_DEVINFO *psDevInfo;
++
++ UNREFERENCED_PARAMETER(ui32SwapInterval);
++ UNREFERENCED_PARAMETER(hPrivateTag);
++ UNREFERENCED_PARAMETER(psClipRect);
++
++ if(!hDevice
++ || !hBuffer
++ || (ui32ClipRectCount != 0))
++ {
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++
++ psDevInfo = (MRSTLFB_DEVINFO*)hDevice;
++
++
++ return (PVRSRV_OK);
++}
++
++static PVRSRV_ERROR SwapToDCSystem(IMG_HANDLE hDevice,
++ IMG_HANDLE hSwapChain)
++{
++ MRSTLFB_DEVINFO *psDevInfo;
++ MRSTLFB_SWAPCHAIN *psSwapChain;
++ unsigned long ulLockFlags;
++
++ if(!hDevice || !hSwapChain)
++ {
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++
++ psDevInfo = (MRSTLFB_DEVINFO*)hDevice;
++ psSwapChain = (MRSTLFB_SWAPCHAIN*)hSwapChain;
++ if (psSwapChain != psDevInfo->psSwapChain)
++ {
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++
++ spin_lock_irqsave(&psDevInfo->sSwapChainLock, ulLockFlags);
++
++
++ FlushInternalVSyncQueue(psSwapChain);
++
++
++ MRSTLFBFlip(psDevInfo, (unsigned long)(psDevInfo->sSystemBuffer.sDevVAddr.uiAddr));
++
++ spin_unlock_irqrestore(&psDevInfo->sSwapChainLock, ulLockFlags);
++
++ return (PVRSRV_OK);
++}
++
++MRST_BOOL MRSTLFBVSyncIHandler(MRSTLFB_SWAPCHAIN *psSwapChain)
++{
++ IMG_BOOL bStatus = IMG_TRUE;
++ MRSTLFB_VSYNC_FLIP_ITEM *psFlipItem;
++ unsigned long ulMaxIndex;
++ unsigned long ulLockFlags;
++
++ psFlipItem = &psSwapChain->psVSyncFlips[psSwapChain->ulRemoveIndex];
++ ulMaxIndex = psSwapChain->ulBufferCount - 1;
++
++ spin_lock_irqsave(psSwapChain->psSwapChainLock, ulLockFlags);
++
++
++ if (psSwapChain->bFlushCommands)
++ {
++ goto ExitUnlock;
++ }
++
++ while(psFlipItem->bValid)
++ {
++
++ if(psFlipItem->bFlipped)
++ {
++
++ if(!psFlipItem->bCmdCompleted)
++ {
++
++ IMG_BOOL bScheduleMISR;
++
++ bScheduleMISR = IMG_TRUE;
++
++
++ psSwapChain->psPVRJTable->pfnPVRSRVCmdComplete((IMG_HANDLE)psFlipItem->hCmdComplete, bScheduleMISR);
++
++
++ psFlipItem->bCmdCompleted = MRST_TRUE;
++ }
++
++
++ psFlipItem->ulSwapInterval--;
++
++
++ if(psFlipItem->ulSwapInterval == 0)
++ {
++
++ psSwapChain->ulRemoveIndex++;
++
++ if(psSwapChain->ulRemoveIndex > ulMaxIndex)
++ {
++ psSwapChain->ulRemoveIndex = 0;
++ }
++
++
++ psFlipItem->bCmdCompleted = MRST_FALSE;
++ psFlipItem->bFlipped = MRST_FALSE;
++
++
++ psFlipItem->bValid = MRST_FALSE;
++ }
++ else
++ {
++
++ break;
++ }
++ }
++ else
++ {
++
++ MRSTLFBFlip(psSwapChain->psDevInfo, (unsigned long)psFlipItem->sDevVAddr.uiAddr);
++
++
++ psFlipItem->bFlipped = MRST_TRUE;
++
++
++ break;
++ }
++
++
++ psFlipItem = &psSwapChain->psVSyncFlips[psSwapChain->ulRemoveIndex];
++ }
++
++ExitUnlock:
++ spin_unlock_irqrestore(psSwapChain->psSwapChainLock, ulLockFlags);
++
++ return bStatus;
++}
++
++#if defined(MRST_USING_INTERRUPTS)
++static int
++MRSTLFBVSyncISR(struct drm_device *psDrmDevice, int iPipe)
++{
++ MRSTLFB_DEVINFO *psDevInfo = GetAnchorPtr();
++
++
++ if(!psDevInfo->psSwapChain)
++ {
++ return (IMG_TRUE);
++ }
++
++ (void) MRSTLFBVSyncIHandler(psDevInfo->psSwapChain);
++ return 0;
++}
++#endif
++
++#if defined(MRST_USING_INTERRUPTS)
++static IMG_BOOL
++MRSTLFBISRHandler(IMG_VOID* pvDevInfo)
++{
++ MRSTLFB_DEVINFO *psDevInfo = (MRSTLFB_DEVINFO *)pvDevInfo;
++#if 0
++#ifdef MRST_USING_INTERRUPTS
++ MRSTLFB_SWAPCHAIN *psSwapChain;
++#endif
++#endif
++ unsigned long vdc_stat;
++ struct drm_psb_private *dev_priv;
++#if defined(SUPPORT_DRI_DRM)
++ uint32_t pipea_stat = 0;
++#endif
++
++ if (!ospm_power_is_hw_on(OSPM_DISPLAY_ISLAND)) {
++ DRM_ERROR("ERROR: interrupt arrived but Display HW is power off\n");
++ return IMG_FALSE;
++ }
++
++#if defined(SUPPORT_DRI_DRM)
++ dev_priv = (struct drm_psb_private *) psDevInfo->psDrmDevice->dev_private;
++
++ pipea_stat = PSB_RVDC32(PIPEASTAT);
++ //write back to clear all interrupt status bits and reset interrupts.
++ PSB_WVDC32(pipea_stat, PIPEASTAT);
++
++ vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R);
++ vdc_stat &= dev_priv->vdc_irq_mask;
++ if (vdc_stat & _PSB_VSYNC_PIPEA_FLAG)
++ {
++ drm_handle_vblank(psDevInfo->psDrmDevice, 0);
++ }
++#endif
++
++/* Use drm_handle_vblank() as the VSync handler, otherwise kernel would panic if handle
++ * the VSync event again. */
++#if 0
++#ifdef MRST_USING_INTERRUPTS
++
++ psSwapChain = psDevInfo->psSwapChain;
++ vdc_stat = MRSTLFBVSyncReadReg(psDevInfo, PSB_INT_IDENTITY_R);
++
++ if (vdc_stat & _PSB_VSYNC_PIPEA_FLAG)
++ {
++ if(!psDevInfo->psSwapChain)
++ {
++ psSwapChain = psDevInfo->psSwapChain;
++ (void) MRSTLFBVSyncIHandler(psSwapChain);
++ }
++ }
++#endif
++#endif
++
++#if defined(SUPPORT_DRI_DRM)
++ vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R);
++ vdc_stat &= dev_priv->vdc_irq_mask;
++ if (vdc_stat & _PSB_DPST_PIPEA_FLAG) {
++
++ /* Check for DPST related interrupts */
++ if((pipea_stat & PIPE_DPST_EVENT_STATUS) &&
++ (dev_priv->psb_dpst_state != NULL)) {
++ uint32_t pwm_reg = 0;
++ uint32_t hist_reg = 0;
++ u32 irqCtrl = 0;
++ struct dpst_guardband guardband_reg;
++ struct dpst_ie_histogram_control ie_hist_cont_reg;
++
++ hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
++
++ /* Determine if this is histogram or pwm interrupt */
++ if(hist_reg & HISTOGRAM_INT_CTRL_CLEAR) {
++ /* Notify UM of histogram interrupt */
++ psb_dpst_notify_change_um(DPST_EVENT_HIST_INTERRUPT,
++ dev_priv->psb_dpst_state);
++
++ /* disable dpst interrupts */
++ guardband_reg.data = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
++ guardband_reg.interrupt_enable = 0;
++ guardband_reg.interrupt_status = 1;
++ PSB_WVDC32(guardband_reg.data, HISTOGRAM_INT_CONTROL);
++
++ ie_hist_cont_reg.data = PSB_RVDC32(HISTOGRAM_LOGIC_CONTROL);
++ ie_hist_cont_reg.ie_histogram_enable = 0;
++ PSB_WVDC32(ie_hist_cont_reg.data, HISTOGRAM_LOGIC_CONTROL);
++
++ irqCtrl = PSB_RVDC32(PIPEASTAT);
++ irqCtrl &= ~PIPE_DPST_EVENT_ENABLE;
++ PSB_WVDC32(irqCtrl, PIPEASTAT);
++ }
++ pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
++ if((pwm_reg & PWM_PHASEIN_INT_ENABLE) &&
++ !(pwm_reg & PWM_PHASEIN_ENABLE)) {
++ /* Notify UM of the phase complete */
++ psb_dpst_notify_change_um(DPST_EVENT_PHASE_COMPLETE,
++ dev_priv->psb_dpst_state);
++
++ /* Temporarily get phase mngr ready to generate
++ * another interrupt until this can be moved to
++ * user mode */
++ /* PSB_WVDC32(pwm_reg | 0x80010100 | PWM_PHASEIN_ENABLE,
++ PWM_CONTROL_LOGIC); */
++ }
++ }
++ }
++#endif
++ return IMG_TRUE;
++}
++#endif
++
++static IMG_BOOL ProcessFlip(IMG_HANDLE hCmdCookie,
++ IMG_UINT32 ui32DataSize,
++ IMG_VOID *pvData)
++{
++ DISPLAYCLASS_FLIP_COMMAND *psFlipCmd;
++ MRSTLFB_DEVINFO *psDevInfo;
++ MRSTLFB_BUFFER *psBuffer;
++ MRSTLFB_SWAPCHAIN *psSwapChain;
++#if 0//defined(MRST_USING_INTERRUPTS)
++ MRSTLFB_VSYNC_FLIP_ITEM* psFlipItem;
++#endif
++ unsigned long ulLockFlags;
++
++
++ if(!hCmdCookie || !pvData)
++ {
++ return IMG_FALSE;
++ }
++
++
++ psFlipCmd = (DISPLAYCLASS_FLIP_COMMAND*)pvData;
++
++ if (psFlipCmd == IMG_NULL || sizeof(DISPLAYCLASS_FLIP_COMMAND) != ui32DataSize)
++ {
++ return IMG_FALSE;
++ }
++
++
++ psDevInfo = (MRSTLFB_DEVINFO*)psFlipCmd->hExtDevice;
++
++ psBuffer = (MRSTLFB_BUFFER*)psFlipCmd->hExtBuffer;
++ psSwapChain = (MRSTLFB_SWAPCHAIN*) psFlipCmd->hExtSwapChain;
++
++ spin_lock_irqsave(&psDevInfo->sSwapChainLock, ulLockFlags);
++
++
++
++ if (psDevInfo->bDeviceSuspended)
++ {
++ psSwapChain->psPVRJTable->pfnPVRSRVCmdComplete(hCmdCookie, IMG_TRUE);
++ goto ExitTrueUnlock;
++ }
++
++#if 0 //defined(MRST_USING_INTERRUPTS)
++
++ if(psFlipCmd->ui32SwapInterval == 0 || psSwapChain->bFlushCommands == MRST_TRUE || psBuffer == &psDevInfo->sSystemBuffer)
++ {
++#endif
++
++ MRSTLFBFlip(psDevInfo, (unsigned long)psBuffer->sDevVAddr.uiAddr);
++
++
++
++ psSwapChain->psPVRJTable->pfnPVRSRVCmdComplete(hCmdCookie, IMG_TRUE);
++
++#if 0 //defined(MRST_USING_INTERRUPTS)
++ goto ExitTrueUnlock;
++ }
++
++ psFlipItem = &psSwapChain->psVSyncFlips[psSwapChain->ulInsertIndex];
++
++
++ if(psFlipItem->bValid == MRST_FALSE)
++ {
++ unsigned long ulMaxIndex = psSwapChain->ulBufferCount - 1;
++
++ if(psSwapChain->ulInsertIndex == psSwapChain->ulRemoveIndex)
++ {
++
++ MRSTLFBFlip(psDevInfo, (unsigned long)psBuffer->sDevVAddr.uiAddr);
++
++ psFlipItem->bFlipped = MRST_TRUE;
++ }
++ else
++ {
++ psFlipItem->bFlipped = MRST_FALSE;
++ }
++
++ psFlipItem->hCmdComplete = (MRST_HANDLE)hCmdCookie;
++ psFlipItem->ulSwapInterval = (unsigned long)psFlipCmd->ui32SwapInterval;
++ psFlipItem->sDevVAddr = psBuffer->sDevVAddr;
++ psFlipItem->bValid = MRST_TRUE;
++
++ psSwapChain->ulInsertIndex++;
++ if(psSwapChain->ulInsertIndex > ulMaxIndex)
++ {
++ psSwapChain->ulInsertIndex = 0;
++ }
++
++ goto ExitTrueUnlock;
++ }
++
++ spin_unlock_irqrestore(&psDevInfo->sSwapChainLock, ulLockFlags);
++ return IMG_FALSE;
++#endif
++
++ExitTrueUnlock:
++ spin_unlock_irqrestore(&psDevInfo->sSwapChainLock, ulLockFlags);
++ return IMG_TRUE;
++}
++
++
++#if defined(PVR_MRST_FB_SET_PAR_ON_INIT)
++static void MRSTFBSetPar(struct fb_info *psLINFBInfo)
++{
++ acquire_console_sem();
++
++ if (psLINFBInfo->fbops->fb_set_par != NULL)
++ {
++ int res;
++
++ res = psLINFBInfo->fbops->fb_set_par(psLINFBInfo);
++ if (res != 0)
++ {
++ printk(KERN_WARNING DRIVER_PREFIX
++ ": fb_set_par failed: %d\n", res);
++
++ }
++ }
++ else
++ {
++ printk(KERN_WARNING DRIVER_PREFIX
++ ": fb_set_par not set - HW cursor may not work\n");
++ }
++
++ release_console_sem();
++}
++#endif
++
++
++static int MRSTLFBHandleChangeFB(struct drm_device* dev, struct psb_framebuffer *psbfb)
++{
++ MRSTLFB_DEVINFO *psDevInfo = GetAnchorPtr();
++ int i;
++ struct drm_psb_private * dev_priv;
++ struct psb_gtt * pg;
++
++ if( !psDevInfo->sSystemBuffer.bIsContiguous )
++ MRSTLFBFreeKernelMem( psDevInfo->sSystemBuffer.uSysAddr.psNonCont );
++
++ dev_priv = (struct drm_psb_private *)dev->dev_private;
++ pg = dev_priv->pg;
++
++
++ psDevInfo->sDisplayDim.ui32ByteStride = psbfb->base.pitch;
++ psDevInfo->sDisplayDim.ui32Width = psbfb->base.width;
++ psDevInfo->sDisplayDim.ui32Height = psbfb->base.height;
++
++ psDevInfo->sSystemBuffer.ui32BufferSize = psbfb->size;
++ //psDevInfo->sSystemBuffer.sCPUVAddr = psbfb->pvKMAddr;
++ psDevInfo->sSystemBuffer.sCPUVAddr = pg->vram_addr;
++ //psDevInfo->sSystemBuffer.sDevVAddr.uiAddr = psbfb->offsetGTT;
++ psDevInfo->sSystemBuffer.sDevVAddr.uiAddr = 0;
++ psDevInfo->sSystemBuffer.bIsAllocated = IMG_FALSE;
++
++ if(psbfb->bo )
++ {
++
++ psDevInfo->sSystemBuffer.bIsContiguous = IMG_FALSE;
++ psDevInfo->sSystemBuffer.uSysAddr.psNonCont = MRSTLFBAllocKernelMem( sizeof( IMG_SYS_PHYADDR ) * psbfb->bo->ttm->num_pages);
++ for(i = 0;i < psbfb->bo->ttm->num_pages;++i)
++ {
++ struct page *p = ttm_tt_get_page( psbfb->bo->ttm, i);
++ psDevInfo->sSystemBuffer.uSysAddr.psNonCont[i].uiAddr = page_to_pfn(p) << PAGE_SHIFT;
++
++ }
++ }
++ else
++ {
++
++ //struct drm_device * psDrmDevice = psDevInfo->psDrmDevice;
++ //struct drm_psb_private * dev_priv = (struct drm_psb_private *)psDrmDevice->dev_private;
++ //struct psb_gtt * pg = dev_priv->pg;
++
++ psDevInfo->sSystemBuffer.bIsContiguous = IMG_TRUE;
++ psDevInfo->sSystemBuffer.uSysAddr.sCont.uiAddr = pg->stolen_base;
++ }
++
++ return 0;
++}
++
++static int MRSTLFBFindMainPipe(struct drm_device *dev) {
++ struct drm_crtc *crtc;
++
++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
++ {
++ if ( drm_helper_crtc_in_use(crtc) )
++ {
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ return psb_intel_crtc->pipe;
++ }
++ }
++
++ return 0;
++}
++
++static MRST_ERROR InitDev(MRSTLFB_DEVINFO *psDevInfo)
++{
++ MRST_ERROR eError = MRST_ERROR_GENERIC;
++ struct fb_info *psLINFBInfo;
++ struct drm_device * psDrmDevice = psDevInfo->psDrmDevice;
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
++ struct drm_psb_private * psDrmPrivate = (struct drm_psb_private *)psDrmDevice->dev_private;
++ struct psb_fbdev * psPsbFBDev = (struct psb_fbdev *)psDrmPrivate->fbdev;
++#endif
++ struct drm_framebuffer * psDrmFB;
++ struct psb_framebuffer *psbfb;
++
++
++ int hdisplay;
++ int vdisplay;
++
++ unsigned long FBSize;
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
++ psDrmFB = psPsbFBDev->psb_fb_helper.fb;
++#else
++ psDrmFB = list_first_entry(&psDrmDevice->mode_config.fb_kernel_list,
++ struct drm_framebuffer,
++ filp_head);
++#endif
++ if(!psDrmFB) {
++ printk(KERN_INFO"%s:Cannot find drm FB", __FUNCTION__);
++ return eError;
++ }
++ psbfb = to_psb_fb(psDrmFB);
++
++ hdisplay = psDrmFB->width;
++ vdisplay = psDrmFB->height;
++ FBSize = psDrmFB->pitch * psDrmFB->height;
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
++ psLINFBInfo = (struct fb_info*)psPsbFBDev->psb_fb_helper.fbdev;
++#else
++ psLINFBInfo = (struct fb_info*)psDrmFB->fbdev;
++#endif
++
++#if defined(PVR_MRST_FB_SET_PAR_ON_INIT)
++ MRSTFBSetPar(psLINFBInfo);
++#endif
++
++
++ psDevInfo->sSystemBuffer.bIsContiguous = IMG_TRUE;
++ psDevInfo->sSystemBuffer.bIsAllocated = IMG_FALSE;
++
++ MRSTLFBHandleChangeFB(psDrmDevice, psbfb);
++
++
++ psDevInfo->sDisplayFormat.pixelformat = PVRSRV_PIXEL_FORMAT_ARGB8888;
++ psDevInfo->psLINFBInfo = psLINFBInfo;
++
++
++ psDevInfo->ui32MainPipe = MRSTLFBFindMainPipe(psDevInfo->psDrmDevice);
++
++
++
++
++ psDevInfo->pvRegs = psbfb_vdc_reg(psDevInfo->psDrmDevice);
++
++ if (psDevInfo->pvRegs == NULL)
++ {
++ eError = PVRSRV_ERROR_BAD_MAPPING;
++ printk(KERN_WARNING DRIVER_PREFIX ": Couldn't map registers needed for flipping\n");
++ return eError;
++ }
++
++ return MRST_OK;
++}
++
++static void DeInitDev(MRSTLFB_DEVINFO *psDevInfo)
++{
++
++}
++
++MRST_ERROR MRSTLFBInit(struct drm_device * dev)
++{
++ MRSTLFB_DEVINFO *psDevInfo;
++ //struct drm_psb_private *psDrmPriv = (struct drm_psb_private *)dev->dev_private;
++
++ psDevInfo = GetAnchorPtr();
++
++ if (psDevInfo == NULL)
++ {
++ PFN_CMD_PROC pfnCmdProcList[MRSTLFB_COMMAND_COUNT];
++ IMG_UINT32 aui32SyncCountList[MRSTLFB_COMMAND_COUNT][2];
++
++ psDevInfo = (MRSTLFB_DEVINFO *)MRSTLFBAllocKernelMem(sizeof(MRSTLFB_DEVINFO));
++
++ if(!psDevInfo)
++ {
++ return (MRST_ERROR_OUT_OF_MEMORY);
++ }
++
++
++ memset(psDevInfo, 0, sizeof(MRSTLFB_DEVINFO));
++
++
++ SetAnchorPtr((void*)psDevInfo);
++
++ psDevInfo->psDrmDevice = dev;
++ psDevInfo->ulRefCount = 0;
++
++
++ if(InitDev(psDevInfo) != MRST_OK)
++ {
++ return (MRST_ERROR_INIT_FAILURE);
++ }
++
++ if(MRSTLFBGetLibFuncAddr ("PVRGetDisplayClassJTable", &pfnGetPVRJTable) != MRST_OK)
++ {
++ return (MRST_ERROR_INIT_FAILURE);
++ }
++
++
++ if(!(*pfnGetPVRJTable)(&psDevInfo->sPVRJTable))
++ {
++ return (MRST_ERROR_INIT_FAILURE);
++ }
++
++
++ spin_lock_init(&psDevInfo->sSwapChainLock);
++
++ psDevInfo->psSwapChain = 0;
++ psDevInfo->bFlushCommands = MRST_FALSE;
++ psDevInfo->bDeviceSuspended = MRST_FALSE;
++
++ psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers = 3;
++ psDevInfo->sDisplayInfo.ui32MaxSwapChains = 2;
++ psDevInfo->sDisplayInfo.ui32MaxSwapInterval = 3;
++ psDevInfo->sDisplayInfo.ui32MinSwapInterval = 0;
++
++ strncpy(psDevInfo->sDisplayInfo.szDisplayName, DISPLAY_DEVICE_NAME, MAX_DISPLAY_NAME_SIZE);
++
++
++
++
++ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
++ ": Maximum number of swap chain buffers: %lu\n",
++ psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers));
++
++
++
++
++ psDevInfo->sDCJTable.ui32TableSize = sizeof(PVRSRV_DC_SRV2DISP_KMJTABLE);
++ psDevInfo->sDCJTable.pfnOpenDCDevice = OpenDCDevice;
++ psDevInfo->sDCJTable.pfnCloseDCDevice = CloseDCDevice;
++ psDevInfo->sDCJTable.pfnEnumDCFormats = EnumDCFormats;
++ psDevInfo->sDCJTable.pfnEnumDCDims = EnumDCDims;
++ psDevInfo->sDCJTable.pfnGetDCSystemBuffer = GetDCSystemBuffer;
++ psDevInfo->sDCJTable.pfnGetDCInfo = GetDCInfo;
++ psDevInfo->sDCJTable.pfnGetBufferAddr = GetDCBufferAddr;
++ psDevInfo->sDCJTable.pfnCreateDCSwapChain = CreateDCSwapChain;
++ psDevInfo->sDCJTable.pfnDestroyDCSwapChain = DestroyDCSwapChain;
++ psDevInfo->sDCJTable.pfnSetDCDstRect = SetDCDstRect;
++ psDevInfo->sDCJTable.pfnSetDCSrcRect = SetDCSrcRect;
++ psDevInfo->sDCJTable.pfnSetDCDstColourKey = SetDCDstColourKey;
++ psDevInfo->sDCJTable.pfnSetDCSrcColourKey = SetDCSrcColourKey;
++ psDevInfo->sDCJTable.pfnGetDCBuffers = GetDCBuffers;
++ psDevInfo->sDCJTable.pfnSwapToDCBuffer = SwapToDCBuffer;
++ psDevInfo->sDCJTable.pfnSwapToDCSystem = SwapToDCSystem;
++ psDevInfo->sDCJTable.pfnSetDCState = SetDCState;
++
++
++ if(psDevInfo->sPVRJTable.pfnPVRSRVRegisterDCDevice (
++ &psDevInfo->sDCJTable,
++ &psDevInfo->ulDeviceID ) != PVRSRV_OK)
++ {
++ return (MRST_ERROR_DEVICE_REGISTER_FAILED);
++ }
++
++ printk("Device ID: %lu\n", psDevInfo->ulDeviceID);
++
++#if defined (SYS_USING_INTERRUPTS)
++ if (psDevInfo->sPVRJTable.pfnPVRSRVRegisterSystemISRHandler(MRSTLFBISRHandler,
++ psDevInfo,
++ 0,
++ (IMG_UINT32)psDevInfo->ulDeviceID) != PVRSRV_OK)
++ {
++ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX "ISR Installation failed\n"));
++ return (MRST_ERROR_INIT_FAILURE);
++ }
++#endif
++#if 0
++ if (psDevInfo->sPVRJTable.pfnPVRSRVRegisterPowerDevice((IMG_UINT32)psDevInfo->ulDeviceID,
++ MRSTLFBPrePowerState, MRSTLFBPostPowerState,
++ IMG_NULL, IMG_NULL,
++ psDevInfo,
++ PVRSRV_DEV_POWER_STATE_ON,
++ PVRSRV_DEV_POWER_STATE_ON) != PVRSRV_OK)
++ {
++ return (MRST_ERROR_INIT_FAILURE);
++ }
++#endif
++
++
++
++
++
++
++
++
++
++
++
++#if defined (MRST_USING_INTERRUPTS)
++
++ if(MRSTLFBInstallVSyncISR(psDevInfo,MRSTLFBVSyncISR) != MRST_OK)
++ {
++ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX "ISR Installation failed\n"));
++ return (MRST_ERROR_INIT_FAILURE);
++ }
++#endif
++
++
++ pfnCmdProcList[DC_FLIP_COMMAND] = ProcessFlip;
++
++
++ aui32SyncCountList[DC_FLIP_COMMAND][0] = 0;
++ aui32SyncCountList[DC_FLIP_COMMAND][1] = 2;
++
++
++
++
++
++ if (psDevInfo->sPVRJTable.pfnPVRSRVRegisterCmdProcList (psDevInfo->ulDeviceID,
++ &pfnCmdProcList[0],
++ aui32SyncCountList,
++ MRSTLFB_COMMAND_COUNT) != PVRSRV_OK)
++ {
++ printk(KERN_WARNING DRIVER_PREFIX ": Can't register callback\n");
++ return (MRST_ERROR_CANT_REGISTER_CALLBACK);
++ }
++
++
++ }
++
++
++ //psDrmPriv->psb_change_fb_handler = MRSTLFBHandleChangeFB;
++
++
++ psDevInfo->ulRefCount++;
++
++
++ return (MRST_OK);
++}
++
++MRST_ERROR MRSTLFBDeinit(void)
++{
++ MRSTLFB_DEVINFO *psDevInfo, *psDevFirst;
++
++ psDevFirst = GetAnchorPtr();
++ psDevInfo = psDevFirst;
++
++
++ if (psDevInfo == NULL)
++ {
++ return (MRST_ERROR_GENERIC);
++ }
++
++
++ psDevInfo->ulRefCount--;
++
++ psDevInfo->psDrmDevice = NULL;
++ if (psDevInfo->ulRefCount == 0)
++ {
++
++ PVRSRV_DC_DISP2SRV_KMJTABLE *psJTable = &psDevInfo->sPVRJTable;
++
++ if (psDevInfo->sPVRJTable.pfnPVRSRVRemoveCmdProcList (psDevInfo->ulDeviceID, MRSTLFB_COMMAND_COUNT) != PVRSRV_OK)
++ {
++ return (MRST_ERROR_GENERIC);
++ }
++
++ if (psDevInfo->sPVRJTable.pfnPVRSRVRegisterPowerDevice((IMG_UINT32)psDevInfo->ulDeviceID,
++ IMG_NULL, IMG_NULL,
++ IMG_NULL, IMG_NULL, IMG_NULL,
++ PVRSRV_DEV_POWER_STATE_ON,
++ PVRSRV_DEV_POWER_STATE_ON) != PVRSRV_OK)
++ {
++ return (MRST_ERROR_GENERIC);
++ }
++
++#if defined (SYS_USING_INTERRUPTS)
++ if (psDevInfo->sPVRJTable.pfnPVRSRVRegisterSystemISRHandler(IMG_NULL, IMG_NULL, 0,
++ (IMG_UINT32)psDevInfo->ulDeviceID) != PVRSRV_OK)
++ {
++ return (MRST_ERROR_GENERIC);
++ }
++#endif
++
++#if defined (MRST_USING_INTERRUPTS)
++
++ if(MRSTLFBUninstallVSyncISR(psDevInfo) != MRST_OK)
++ {
++ return (MRST_ERROR_GENERIC);
++ }
++#endif
++
++ if (psJTable->pfnPVRSRVRemoveDCDevice(psDevInfo->ulDeviceID) != PVRSRV_OK)
++ {
++ return (MRST_ERROR_GENERIC);
++ }
++
++ DeInitDev(psDevInfo);
++
++
++ MRSTLFBFreeKernelMem(psDevInfo);
++ }
++
++
++ SetAnchorPtr(NULL);
++
++
++ return (MRST_OK);
++}
++
++
++/*
++ * save_display_registers
++ *
++ * Description: We are going to suspend so save current display
++ * register state.
++ */
++static void save_display_registers(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ int i;
++
++ /* Display arbitration control + watermarks */
++ dev_priv->saveDSPARB = PSB_RVDC32(DSPARB);
++ dev_priv->saveDSPFW1 = PSB_RVDC32(DSPFW1);
++ dev_priv->saveDSPFW2 = PSB_RVDC32(DSPFW2);
++ dev_priv->saveDSPFW3 = PSB_RVDC32(DSPFW3);
++ dev_priv->saveDSPFW4 = PSB_RVDC32(DSPFW4);
++ dev_priv->saveDSPFW5 = PSB_RVDC32(DSPFW5);
++ dev_priv->saveDSPFW6 = PSB_RVDC32(DSPFW6);
++ dev_priv->saveCHICKENBIT = PSB_RVDC32(DSPCHICKENBIT);
++
++ /* Pipe & plane A info */
++ dev_priv->savePIPEACONF = PSB_RVDC32(PIPEACONF);
++ dev_priv->savePIPEASRC = PSB_RVDC32(PIPEASRC);
++ dev_priv->saveFPA0 = PSB_RVDC32(MRST_FPA0);
++ dev_priv->saveFPA1 = PSB_RVDC32(MRST_FPA1);
++ dev_priv->saveDPLL_A = PSB_RVDC32(MRST_DPLL_A);
++ dev_priv->saveHTOTAL_A = PSB_RVDC32(HTOTAL_A);
++ dev_priv->saveHBLANK_A = PSB_RVDC32(HBLANK_A);
++ dev_priv->saveHSYNC_A = PSB_RVDC32(HSYNC_A);
++ dev_priv->saveVTOTAL_A = PSB_RVDC32(VTOTAL_A);
++ dev_priv->saveVBLANK_A = PSB_RVDC32(VBLANK_A);
++ dev_priv->saveVSYNC_A = PSB_RVDC32(VSYNC_A);
++ dev_priv->saveBCLRPAT_A = PSB_RVDC32(BCLRPAT_A);
++ dev_priv->saveDSPACNTR = PSB_RVDC32(DSPACNTR);
++ dev_priv->saveDSPASTRIDE = PSB_RVDC32(DSPASTRIDE);
++ dev_priv->saveDSPAADDR = PSB_RVDC32(DSPABASE);
++ dev_priv->saveDSPASURF = PSB_RVDC32(DSPASURF);
++ dev_priv->saveDSPALINOFF = PSB_RVDC32(DSPALINOFF);
++ dev_priv->saveDSPATILEOFF = PSB_RVDC32(DSPATILEOFF);
++
++ /*save cursor regs*/
++ dev_priv->saveDSPACURSOR_CTRL = PSB_RVDC32(CURACNTR);
++ dev_priv->saveDSPACURSOR_BASE = PSB_RVDC32(CURABASE);
++ dev_priv->saveDSPACURSOR_POS = PSB_RVDC32(CURAPOS);
++
++ /*save palette (gamma) */
++ for (i = 0; i < 256; i++)
++ dev_priv->save_palette_a[i] = PSB_RVDC32(PALETTE_A + (i<<2));
++
++ /*save performance state*/
++ dev_priv->savePERF_MODE = PSB_RVDC32(MRST_PERF_MODE);
++
++ /* LVDS state */
++ dev_priv->savePP_CONTROL = PSB_RVDC32(PP_CONTROL);
++ dev_priv->savePFIT_PGM_RATIOS = PSB_RVDC32(PFIT_PGM_RATIOS);
++ dev_priv->savePFIT_AUTO_RATIOS = PSB_RVDC32(PFIT_AUTO_RATIOS);
++ dev_priv->saveBLC_PWM_CTL = PSB_RVDC32(BLC_PWM_CTL);
++ dev_priv->saveBLC_PWM_CTL2 = PSB_RVDC32(BLC_PWM_CTL2);
++ dev_priv->saveLVDS = PSB_RVDC32(LVDS);
++ dev_priv->savePFIT_CONTROL = PSB_RVDC32(PFIT_CONTROL);
++ dev_priv->savePP_ON_DELAYS = PSB_RVDC32(LVDSPP_ON);
++ dev_priv->savePP_OFF_DELAYS = PSB_RVDC32(LVDSPP_OFF);
++ dev_priv->savePP_DIVISOR = PSB_RVDC32(PP_CYCLE);
++
++ /* HW overlay */
++ dev_priv->saveOV_OVADD = PSB_RVDC32(OV_OVADD);
++ dev_priv->saveOV_OGAMC0 = PSB_RVDC32(OV_OGAMC0);
++ dev_priv->saveOV_OGAMC1 = PSB_RVDC32(OV_OGAMC1);
++ dev_priv->saveOV_OGAMC2 = PSB_RVDC32(OV_OGAMC2);
++ dev_priv->saveOV_OGAMC3 = PSB_RVDC32(OV_OGAMC3);
++ dev_priv->saveOV_OGAMC4 = PSB_RVDC32(OV_OGAMC4);
++ dev_priv->saveOV_OGAMC5 = PSB_RVDC32(OV_OGAMC5);
++
++ /* MIPI DSI */
++ dev_priv->saveMIPI = PSB_RVDC32(MIPI);
++ dev_priv->saveDEVICE_READY_REG = PSB_RVDC32(DEVICE_READY_REG);
++ dev_priv->saveINTR_EN_REG = PSB_RVDC32(INTR_EN_REG);
++ dev_priv->saveDSI_FUNC_PRG_REG = PSB_RVDC32(DSI_FUNC_PRG_REG);
++ dev_priv->saveHS_TX_TIMEOUT_REG = PSB_RVDC32(HS_TX_TIMEOUT_REG);
++ dev_priv->saveLP_RX_TIMEOUT_REG = PSB_RVDC32(LP_RX_TIMEOUT_REG);
++ dev_priv->saveTURN_AROUND_TIMEOUT_REG =
++ PSB_RVDC32(TURN_AROUND_TIMEOUT_REG);
++ dev_priv->saveDEVICE_RESET_REG = PSB_RVDC32(DEVICE_RESET_REG);
++ dev_priv->saveDPI_RESOLUTION_REG =
++ PSB_RVDC32(DPI_RESOLUTION_REG);
++ dev_priv->saveHORIZ_SYNC_PAD_COUNT_REG =
++ PSB_RVDC32(HORIZ_SYNC_PAD_COUNT_REG);
++ dev_priv->saveHORIZ_BACK_PORCH_COUNT_REG =
++ PSB_RVDC32(HORIZ_BACK_PORCH_COUNT_REG);
++ dev_priv->saveHORIZ_FRONT_PORCH_COUNT_REG =
++ PSB_RVDC32(HORIZ_FRONT_PORCH_COUNT_REG);
++ dev_priv->saveHORIZ_ACTIVE_AREA_COUNT_REG =
++ PSB_RVDC32(HORIZ_ACTIVE_AREA_COUNT_REG);
++ dev_priv->saveVERT_SYNC_PAD_COUNT_REG =
++ PSB_RVDC32(VERT_SYNC_PAD_COUNT_REG);
++ dev_priv->saveVERT_BACK_PORCH_COUNT_REG =
++ PSB_RVDC32(VERT_BACK_PORCH_COUNT_REG);
++ dev_priv->saveVERT_FRONT_PORCH_COUNT_REG =
++ PSB_RVDC32(VERT_FRONT_PORCH_COUNT_REG);
++ dev_priv->saveHIGH_LOW_SWITCH_COUNT_REG =
++ PSB_RVDC32(HIGH_LOW_SWITCH_COUNT_REG);
++ dev_priv->saveINIT_COUNT_REG = PSB_RVDC32(INIT_COUNT_REG);
++ dev_priv->saveMAX_RET_PAK_REG = PSB_RVDC32(MAX_RET_PAK_REG);
++ dev_priv->saveVIDEO_FMT_REG = PSB_RVDC32(VIDEO_FMT_REG);
++ dev_priv->saveEOT_DISABLE_REG = PSB_RVDC32(EOT_DISABLE_REG);
++ dev_priv->saveLP_BYTECLK_REG = PSB_RVDC32(LP_BYTECLK_REG);
++ dev_priv->saveHS_LS_DBI_ENABLE_REG =
++ PSB_RVDC32(HS_LS_DBI_ENABLE_REG);
++ dev_priv->saveTXCLKESC_REG = PSB_RVDC32(TXCLKESC_REG);
++ dev_priv->saveDPHY_PARAM_REG = PSB_RVDC32(DPHY_PARAM_REG);
++ dev_priv->saveMIPI_CONTROL_REG = PSB_RVDC32(MIPI_CONTROL_REG);
++
++ /* DPST registers */
++ dev_priv->saveHISTOGRAM_INT_CONTROL_REG = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
++ dev_priv->saveHISTOGRAM_LOGIC_CONTROL_REG = PSB_RVDC32(HISTOGRAM_LOGIC_CONTROL);
++}
++
++
++/*
++ * restore_display_registers
++ *
++ * Description: We are going to resume so restore display register state.
++ */
++static void restore_display_registers(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ unsigned long i, pp_stat;
++
++ /* Display arbitration + watermarks */
++ PSB_WVDC32(dev_priv->saveDSPARB, DSPARB);
++ PSB_WVDC32(dev_priv->saveDSPFW1, DSPFW1);
++ PSB_WVDC32(dev_priv->saveDSPFW2, DSPFW2);
++ PSB_WVDC32(dev_priv->saveDSPFW3, DSPFW3);
++ PSB_WVDC32(dev_priv->saveDSPFW4, DSPFW4);
++ PSB_WVDC32(dev_priv->saveDSPFW5, DSPFW5);
++ PSB_WVDC32(dev_priv->saveDSPFW6, DSPFW6);
++ PSB_WVDC32(dev_priv->saveCHICKENBIT, DSPCHICKENBIT);
++
++ /*make sure VGA plane is off. it initializes to on after reset!*/
++ PSB_WVDC32(0x80000000, VGACNTRL);
++
++ /* set the plls */
++ PSB_WVDC32(dev_priv->saveFPA0, MRST_FPA0);
++ PSB_WVDC32(dev_priv->saveFPA1, MRST_FPA1);
++ /* Actually enable it */
++ PSB_WVDC32(dev_priv->saveDPLL_A, MRST_DPLL_A);
++ DRM_UDELAY(150);
++
++ /* Restore mode */
++ PSB_WVDC32(dev_priv->saveHTOTAL_A, HTOTAL_A);
++ PSB_WVDC32(dev_priv->saveHBLANK_A, HBLANK_A);
++ PSB_WVDC32(dev_priv->saveHSYNC_A, HSYNC_A);
++ PSB_WVDC32(dev_priv->saveVTOTAL_A, VTOTAL_A);
++ PSB_WVDC32(dev_priv->saveVBLANK_A, VBLANK_A);
++ PSB_WVDC32(dev_priv->saveVSYNC_A, VSYNC_A);
++ PSB_WVDC32(dev_priv->savePIPEASRC, PIPEASRC);
++ PSB_WVDC32(dev_priv->saveBCLRPAT_A, BCLRPAT_A);
++
++ /*restore performance mode*/
++ PSB_WVDC32(dev_priv->savePERF_MODE, MRST_PERF_MODE);
++
++ /*enable the pipe*/
++ if (dev_priv->iLVDS_enable)
++ PSB_WVDC32(dev_priv->savePIPEACONF, PIPEACONF);
++
++ /* set up MIPI */
++ PSB_WVDC32(dev_priv->saveINTR_EN_REG, INTR_EN_REG);
++ PSB_WVDC32(dev_priv->saveDSI_FUNC_PRG_REG, DSI_FUNC_PRG_REG);
++ PSB_WVDC32(dev_priv->saveHS_TX_TIMEOUT_REG, HS_TX_TIMEOUT_REG);
++ PSB_WVDC32(dev_priv->saveLP_RX_TIMEOUT_REG, LP_RX_TIMEOUT_REG);
++ PSB_WVDC32(dev_priv->saveTURN_AROUND_TIMEOUT_REG,
++ TURN_AROUND_TIMEOUT_REG);
++ PSB_WVDC32(dev_priv->saveDEVICE_RESET_REG, DEVICE_RESET_REG);
++ PSB_WVDC32(dev_priv->saveDPI_RESOLUTION_REG,
++ DPI_RESOLUTION_REG);
++ PSB_WVDC32(dev_priv->saveHORIZ_SYNC_PAD_COUNT_REG,
++ HORIZ_SYNC_PAD_COUNT_REG);
++ PSB_WVDC32(dev_priv->saveHORIZ_BACK_PORCH_COUNT_REG,
++ HORIZ_BACK_PORCH_COUNT_REG);
++ PSB_WVDC32(dev_priv->saveHORIZ_FRONT_PORCH_COUNT_REG,
++ HORIZ_FRONT_PORCH_COUNT_REG);
++ PSB_WVDC32(dev_priv->saveHORIZ_ACTIVE_AREA_COUNT_REG,
++ HORIZ_ACTIVE_AREA_COUNT_REG);
++ PSB_WVDC32(dev_priv->saveVERT_SYNC_PAD_COUNT_REG,
++ VERT_SYNC_PAD_COUNT_REG);
++ PSB_WVDC32(dev_priv->saveVERT_BACK_PORCH_COUNT_REG,
++ VERT_BACK_PORCH_COUNT_REG);
++ PSB_WVDC32(dev_priv->saveVERT_FRONT_PORCH_COUNT_REG,
++ VERT_FRONT_PORCH_COUNT_REG);
++ PSB_WVDC32(dev_priv->saveHIGH_LOW_SWITCH_COUNT_REG,
++ HIGH_LOW_SWITCH_COUNT_REG);
++ PSB_WVDC32(dev_priv->saveINIT_COUNT_REG, INIT_COUNT_REG);
++ PSB_WVDC32(dev_priv->saveMAX_RET_PAK_REG, MAX_RET_PAK_REG);
++ PSB_WVDC32(dev_priv->saveVIDEO_FMT_REG, VIDEO_FMT_REG);
++ PSB_WVDC32(dev_priv->saveEOT_DISABLE_REG, EOT_DISABLE_REG);
++ PSB_WVDC32(dev_priv->saveLP_BYTECLK_REG, LP_BYTECLK_REG);
++ PSB_WVDC32(dev_priv->saveHS_LS_DBI_ENABLE_REG,
++ HS_LS_DBI_ENABLE_REG);
++ PSB_WVDC32(dev_priv->saveTXCLKESC_REG, TXCLKESC_REG);
++ PSB_WVDC32(dev_priv->saveDPHY_PARAM_REG, DPHY_PARAM_REG);
++ PSB_WVDC32(dev_priv->saveMIPI_CONTROL_REG, MIPI_CONTROL_REG);
++
++ /*set up the plane*/
++ PSB_WVDC32(dev_priv->saveDSPALINOFF, DSPALINOFF);
++ PSB_WVDC32(dev_priv->saveDSPASTRIDE, DSPASTRIDE);
++ PSB_WVDC32(dev_priv->saveDSPATILEOFF, DSPATILEOFF);
++
++ /* Enable the plane */
++ PSB_WVDC32(dev_priv->saveDSPACNTR, DSPACNTR);
++ PSB_WVDC32(dev_priv->saveDSPASURF, DSPASURF);
++
++ /*Enable Cursor A*/
++ PSB_WVDC32(dev_priv->saveDSPACURSOR_CTRL, CURACNTR);
++ PSB_WVDC32(dev_priv->saveDSPACURSOR_POS, CURAPOS);
++ PSB_WVDC32(dev_priv->saveDSPACURSOR_BASE, CURABASE);
++
++ /* restore palette (gamma) */
++ /*DRM_UDELAY(50000); */
++ for (i = 0; i < 256; i++)
++ PSB_WVDC32(dev_priv->save_palette_a[i], PALETTE_A + (i<<2));
++
++ if (dev_priv->iLVDS_enable) {
++ PSB_WVDC32(dev_priv->saveBLC_PWM_CTL2, BLC_PWM_CTL2);
++ PSB_WVDC32(dev_priv->saveLVDS, LVDS); /*port 61180h*/
++ PSB_WVDC32(dev_priv->savePFIT_CONTROL, PFIT_CONTROL);
++ PSB_WVDC32(dev_priv->savePFIT_PGM_RATIOS, PFIT_PGM_RATIOS);
++ PSB_WVDC32(dev_priv->savePFIT_AUTO_RATIOS, PFIT_AUTO_RATIOS);
++ PSB_WVDC32(dev_priv->saveBLC_PWM_CTL, BLC_PWM_CTL);
++ PSB_WVDC32(dev_priv->savePP_ON_DELAYS, LVDSPP_ON);
++ PSB_WVDC32(dev_priv->savePP_OFF_DELAYS, LVDSPP_OFF);
++ PSB_WVDC32(dev_priv->savePP_DIVISOR, PP_CYCLE);
++ PSB_WVDC32(dev_priv->savePP_CONTROL, PP_CONTROL);
++ } else { /* enable MIPI */
++ PSB_WVDC32(MIPI_PORT_EN | MIPI_BORDER_EN, MIPI); /*force on port*/
++ PSB_WVDC32(1, DEVICE_READY_REG);/* force on to re-program */
++ dev_priv->init_drvIC(dev);
++ PSB_WVDC32(dev_priv->saveMIPI, MIPI); /*port 61190h*/
++ PSB_WVDC32(dev_priv->saveDEVICE_READY_REG, DEVICE_READY_REG);
++ if (dev_priv->saveDEVICE_READY_REG)
++ PSB_WVDC32(DPI_TURN_ON, DPI_CONTROL_REG);
++ PSB_WVDC32(dev_priv->savePIPEACONF, PIPEACONF);
++ PSB_WVDC32(dev_priv->saveBLC_PWM_CTL2, BLC_PWM_CTL2);
++ PSB_WVDC32(dev_priv->saveBLC_PWM_CTL, BLC_PWM_CTL);
++ }
++
++ /*wait for cycle delay*/
++ do {
++ pp_stat = PSB_RVDC32(PP_STATUS);
++ } while (pp_stat & 0x08000000);
++
++ DRM_UDELAY(999);
++ /*wait for panel power up*/
++ do {
++ pp_stat = PSB_RVDC32(PP_STATUS);
++ } while (pp_stat & 0x10000000);
++
++ /* restore HW overlay */
++ PSB_WVDC32(dev_priv->saveOV_OVADD, OV_OVADD);
++ PSB_WVDC32(dev_priv->saveOV_OGAMC0, OV_OGAMC0);
++ PSB_WVDC32(dev_priv->saveOV_OGAMC1, OV_OGAMC1);
++ PSB_WVDC32(dev_priv->saveOV_OGAMC2, OV_OGAMC2);
++ PSB_WVDC32(dev_priv->saveOV_OGAMC3, OV_OGAMC3);
++ PSB_WVDC32(dev_priv->saveOV_OGAMC4, OV_OGAMC4);
++ PSB_WVDC32(dev_priv->saveOV_OGAMC5, OV_OGAMC5);
++
++ /* DPST registers */
++ PSB_WVDC32(dev_priv->saveHISTOGRAM_INT_CONTROL_REG, HISTOGRAM_INT_CONTROL);
++ PSB_WVDC32(dev_priv->saveHISTOGRAM_LOGIC_CONTROL_REG, HISTOGRAM_LOGIC_CONTROL);
++}
++
++MRST_ERROR MRSTLFBAllocBuffer(struct MRSTLFB_DEVINFO_TAG *psDevInfo, IMG_UINT32 ui32Size, MRSTLFB_BUFFER **ppBuffer)
++{
++ IMG_VOID *pvBuf;
++ IMG_UINT32 ulPagesNumber;
++ IMG_UINT32 ulCounter;
++ int i;
++
++ pvBuf = __vmalloc( ui32Size, GFP_KERNEL | __GFP_HIGHMEM, __pgprot((pgprot_val(PAGE_KERNEL ) & ~_PAGE_CACHE_MASK) | _PAGE_CACHE_WC) );
++ if( pvBuf == NULL )
++ {
++ return MRST_ERROR_OUT_OF_MEMORY;
++ }
++
++ ulPagesNumber = (ui32Size + PAGE_SIZE -1) / PAGE_SIZE;
++
++ *ppBuffer = MRSTLFBAllocKernelMem( sizeof( MRSTLFB_BUFFER ) );
++ (*ppBuffer)->sCPUVAddr = pvBuf;
++ (*ppBuffer)->ui32BufferSize = ui32Size;
++ (*ppBuffer)->uSysAddr.psNonCont = MRSTLFBAllocKernelMem( sizeof( IMG_SYS_PHYADDR ) * ulPagesNumber);
++ (*ppBuffer)->bIsAllocated = IMG_TRUE;
++ (*ppBuffer)->bIsContiguous = IMG_FALSE;
++ (*ppBuffer)->ui32OwnerTaskID = task_tgid_nr(current);
++
++ i = 0;
++ for(ulCounter = 0; ulCounter < ui32Size; ulCounter += PAGE_SIZE)
++ {
++ (*ppBuffer)->uSysAddr.psNonCont[i++].uiAddr = vmalloc_to_pfn( pvBuf + ulCounter ) << PAGE_SHIFT;
++ }
++
++ psb_gtt_map_pvr_memory( psDevInfo->psDrmDevice,
++ (unsigned int)*ppBuffer,
++ (*ppBuffer)->ui32OwnerTaskID,
++ (IMG_CPU_PHYADDR*) (*ppBuffer)->uSysAddr.psNonCont,
++ ulPagesNumber,
++ (unsigned int *)&(*ppBuffer)->sDevVAddr.uiAddr );
++
++ (*ppBuffer)->sDevVAddr.uiAddr <<= PAGE_SHIFT;
++
++ return MRST_OK;
++}
++
++MRST_ERROR MRSTLFBFreeBuffer(struct MRSTLFB_DEVINFO_TAG *psDevInfo, MRSTLFB_BUFFER **ppBuffer)
++{
++ if( !(*ppBuffer)->bIsAllocated )
++ return MRST_ERROR_INVALID_PARAMS;
++
++ psb_gtt_unmap_pvr_memory( psDevInfo->psDrmDevice,
++ (unsigned int)*ppBuffer,
++ (*ppBuffer)->ui32OwnerTaskID);
++
++ vfree( (*ppBuffer)->sCPUVAddr );
++
++ MRSTLFBFreeKernelMem( (*ppBuffer)->uSysAddr.psNonCont );
++
++ MRSTLFBFreeKernelMem( *ppBuffer);
++
++ *ppBuffer = NULL;
++
++ return MRST_OK;
++}
++
++
++
++PVRSRV_ERROR MRSTLFBPrePowerState(IMG_HANDLE hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ MRSTLFB_DEVINFO* psDevInfo = (MRSTLFB_DEVINFO *)hDevHandle;
++ struct drm_device* dev = psDevInfo->psDrmDevice;
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ int pp_stat, ret;
++
++ if ((eNewPowerState == eCurrentPowerState) ||
++ (eNewPowerState == PVRSRV_DEV_POWER_STATE_ON))
++ return PVRSRV_OK;
++
++ if (!dev_priv->iLVDS_enable && dev_priv->dsi_prePowerState != NULL)
++ dev_priv->dsi_prePowerState(dev);
++
++ save_display_registers(dev);
++
++ if (dev_priv->iLVDS_enable) {
++ /*shutdown the panel*/
++ PSB_WVDC32(0, PP_CONTROL);
++
++ do {
++ pp_stat = PSB_RVDC32(PP_STATUS);
++ } while (pp_stat & 0x80000000);
++
++ /*turn off the plane*/
++ PSB_WVDC32(0x58000000, DSPACNTR);
++ PSB_WVDC32(0, DSPASURF);/*trigger the plane disable*/
++ msleep(4);
++
++ /*turn off pipe*/
++ PSB_WVDC32(0x0, PIPEACONF);
++ msleep(8);
++
++ /*turn off PLLs*/
++ PSB_WVDC32(0, MRST_DPLL_A);
++ } else {
++ if (dev_priv->dsi_prePowerState == NULL) {
++ PSB_WVDC32(DPI_SHUT_DOWN, DPI_CONTROL_REG);
++ PSB_WVDC32(0x0, PIPEACONF);
++ PSB_WVDC32(0x2faf0000, BLC_PWM_CTL);
++ while (REG_READ(0x70008) & 0x40000000)
++ ;
++ while ((PSB_RVDC32(GEN_FIFO_STAT_REG) & DPI_FIFO_EMPTY)
++ != DPI_FIFO_EMPTY)
++ ;
++ PSB_WVDC32(0, DEVICE_READY_REG);
++ }
++ #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34))
++ /* turn off mipi panel power */
++ ret = lnw_ipc_single_cmd(IPC_MSG_PANEL_ON_OFF, IPC_CMD_PANEL_OFF, 0, 0);
++ #else /*KERNEL_VERSON >= 2.6.34*/
++ ret = intel_scu_ipc_simple_command(IPC_MSG_PANEL_ON_OFF, IPC_CMD_PANEL_OFF);
++ #endif
++ if (ret)
++ printk(KERN_WARNING "IPC 0xE9 failed to turn off pnl pwr. Error is: %x\n", ret);
++ }
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR MRSTLFBPostPowerState(IMG_HANDLE hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ MRSTLFB_DEVINFO* psDevInfo = (MRSTLFB_DEVINFO *)hDevHandle;
++ struct drm_device* dev = psDevInfo->psDrmDevice;
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct psb_gtt *pg = dev_priv->pg;
++ int ret;
++
++ if ((eNewPowerState == eCurrentPowerState) ||
++ (eNewPowerState == PVRSRV_DEV_POWER_STATE_OFF))
++ return PVRSRV_OK;
++
++ PSB_WVDC32(pg->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
++ pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
++ pg->gmch_ctrl | _PSB_GMCH_ENABLED);
++
++ /* Don't reinitialize the GTT as it is unnecessary. The gtt is
++ * stored in memory so it will automatically be restored. All
++ * we need to do is restore the PGETBL_CTL which we already do
++ * above.
++ */
++ /*psb_gtt_init(dev_priv->pg, 1);*/
++
++ if (!dev_priv->iLVDS_enable) {
++ #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34))
++ /* turn on mipi panel power */
++ ret = lnw_ipc_single_cmd(IPC_MSG_PANEL_ON_OFF, IPC_CMD_PANEL_ON, 0, 0);
++ #else
++ ret = intel_scu_ipc_simple_command(IPC_MSG_PANEL_ON_OFF, IPC_CMD_PANEL_ON);
++ #endif
++ if (ret)
++ printk(KERN_WARNING "IPC 0xE9 failed to turn on pnl pwr. Error is: %x\n", ret);
++ msleep(2000); /* wait 2 seconds */
++ }
++
++ restore_display_registers(dev);
++
++ if (!dev_priv->iLVDS_enable && dev_priv->dsi_postPowerState != NULL)
++ dev_priv->dsi_postPowerState(dev);
++
++ return PVRSRV_OK;
++}
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/mrstlfb_linux.c
+@@ -0,0 +1,206 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef AUTOCONF_INCLUDED
++#include <linux/config.h>
++#endif
++
++#include <linux/version.h>
++
++#include <linux/pci.h>
++#include <linux/slab.h>
++#include <linux/errno.h>
++#include <linux/interrupt.h>
++
++#include <drm/drmP.h>
++
++#include <asm/io.h>
++
++#include "img_defs.h"
++#include "servicesext.h"
++#include "kerneldisplay.h"
++#include "pvrmodule.h"
++#include "pvr_drm.h"
++#include "mrstlfb.h"
++#include "kerneldisplay.h"
++#include "psb_irq.h"
++
++#include "psb_drv.h"
++
++#if !defined(SUPPORT_DRI_DRM)
++#error "SUPPORT_DRI_DRM must be set"
++#endif
++
++#define MAKESTRING(x) # x
++
++#if !defined(DISPLAY_CONTROLLER)
++#define DISPLAY_CONTROLLER pvrlfb
++#endif
++
++//#define MAKENAME_HELPER(x, y) x ## y
++//#define MAKENAME2(x, y) MAKENAME_HELPER(x, y)
++//#define MAKENAME(x) MAKENAME2(DISPLAY_CONTROLLER, x)
++
++#define unref__ __attribute__ ((unused))
++
++
++extern int fb_idx;
++
++void *MRSTLFBAllocKernelMem(unsigned long ulSize)
++{
++ return kmalloc(ulSize, GFP_KERNEL);
++}
++
++void MRSTLFBFreeKernelMem(void *pvMem)
++{
++ kfree(pvMem);
++}
++
++
++MRST_ERROR MRSTLFBGetLibFuncAddr (char *szFunctionName, PFN_DC_GET_PVRJTABLE *ppfnFuncTable)
++{
++ if(strcmp("PVRGetDisplayClassJTable", szFunctionName) != 0)
++ {
++ return (MRST_ERROR_INVALID_PARAMS);
++ }
++
++
++ *ppfnFuncTable = PVRGetDisplayClassJTable;
++
++ return (MRST_OK);
++}
++
++static void MRSTLFBVSyncWriteReg(MRSTLFB_DEVINFO *psDevInfo, unsigned long ulOffset, unsigned long ulValue)
++{
++
++ void *pvRegAddr = (void *)(psDevInfo->pvRegs + ulOffset);
++ mb();
++ iowrite32(ulValue, pvRegAddr);
++}
++
++unsigned long MRSTLFBVSyncReadReg(MRSTLFB_DEVINFO * psDevinfo, unsigned long ulOffset)
++{
++ mb();
++ return ioread32((char *)psDevinfo->pvRegs + ulOffset);
++}
++
++void MRSTLFBEnableVSyncInterrupt(MRSTLFB_DEVINFO * psDevinfo)
++{
++#if defined(MRST_USING_INTERRUPTS)
++
++#if defined(SUPPORT_DRI_DRM)
++
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) psDevinfo->psDrmDevice->dev_private;
++ dev_priv->vblanksEnabledForFlips = true;
++ psb_enable_vblank(psDevinfo->psDrmDevice, 0);
++
++#else
++
++ unsigned long vdc_irq_mask;
++
++ vdc_irq_mask = ~MRSTLFBVSyncReadReg( psDevinfo, PSB_INT_MASK_R);
++ vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
++
++ MRSTLFBVSyncWriteReg(psDevinfo, PSB_INT_MASK_R, ~vdc_irq_mask);
++ MRSTLFBVSyncWriteReg(psDevinfo, PSB_INT_ENABLE_R, vdc_irq_mask);
++
++ {
++ unsigned int writeVal = MRSTLFBVSyncReadReg(psDevinfo, PIPEASTAT);
++ unsigned int mask = PIPE_START_VBLANK_INTERRUPT_ENABLE | PIPE_VBLANK_INTERRUPT_ENABLE;
++
++ writeVal |= (mask | (mask >> 16));
++ MRSTLFBVSyncWriteReg(psDevinfo, PIPEASTAT, writeVal);
++ MRSTLFBVSyncReadReg(psDevinfo, PIPEASTAT);
++ }
++#endif
++#endif
++}
++
++void MRSTLFBDisableVSyncInterrupt(MRSTLFB_DEVINFO * psDevinfo)
++{
++#if defined(MRST_USING_INTERRUPTS)
++ struct drm_device * dev = psDevinfo->psDrmDevice;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) psDevinfo->psDrmDevice->dev_private;
++ dev_priv->vblanksEnabledForFlips = false;
++ //Only turn off if DRM isn't currently using vblanks, otherwise, leave on.
++ if (!dev->vblank_enabled[0])
++ psb_disable_vblank(psDevinfo->psDrmDevice, 0);
++#endif
++}
++
++#if defined(MRST_USING_INTERRUPTS)
++MRST_ERROR MRSTLFBInstallVSyncISR(MRSTLFB_DEVINFO *psDevInfo, MRSTLFB_VSYNC_ISR_PFN pVsyncHandler)
++{
++ //struct drm_psb_private *dev_priv =
++ // (struct drm_psb_private *) psDevInfo->psDrmDevice->dev_private;
++ //dev_priv->psb_vsync_handler = pVsyncHandler;
++ return (MRST_OK);
++}
++
++
++MRST_ERROR MRSTLFBUninstallVSyncISR(MRSTLFB_DEVINFO *psDevInfo)
++{
++ //struct drm_psb_private *dev_priv =
++ // (struct drm_psb_private *) psDevInfo->psDrmDevice->dev_private;
++ //dev_priv->psb_vsync_handler = NULL;
++ return (MRST_OK);
++}
++#endif
++
++
++void MRSTLFBFlip(MRSTLFB_DEVINFO *psDevInfo, unsigned long uiAddr)
++{
++ int dspbase = (psDevInfo->ui32MainPipe == 0 ? DSPABASE : DSPBBASE);
++ int dspsurf = (psDevInfo->ui32MainPipe == 0 ? DSPASURF : DSPBSURF);
++
++ if (IS_MRST(psDevInfo->psDrmDevice)) {
++ MRSTLFBVSyncWriteReg(psDevInfo, dspsurf, uiAddr);
++ } else {
++ MRSTLFBVSyncWriteReg(psDevInfo, dspbase, uiAddr);
++ }
++}
++
++
++int PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _Init)(struct drm_device unref__ *dev)
++{
++ if(MRSTLFBInit(dev) != MRST_OK)
++ {
++ printk(KERN_WARNING DRIVER_PREFIX ": MRSTLFB_Init: MRSTLFBInit failed\n");
++ return -ENODEV;
++ }
++
++ return 0;
++}
++
++void PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _Cleanup)(struct drm_device unref__ *dev)
++{
++ if(MRSTLFBDeinit() != MRST_OK)
++ {
++ printk(KERN_WARNING DRIVER_PREFIX "%s: can't deinit device\n", __FUNCTION__);
++ }
++}
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/include/env/linux-intel/pvr_drm_shared.h
+@@ -0,0 +1,54 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__PVR_DRM_SHARED_H__)
++#define __PVR_DRM_SHARED_H__
++
++#if defined(SUPPORT_DRI_DRM)
++
++#define PVR_DRM_SRVKM_CMD 0x12
++#define PVR_DRM_DISP_CMD 0x13
++#define PVR_DRM_BC_CMD 0x14
++#define PVR_DRM_IS_MASTER_CMD 0x15
++#define PVR_DRM_UNPRIV_CMD 0x16
++#define PVR_DRM_DBGDRV_CMD 0x1E
++
++#define PVR_DRM_UNPRIV_INIT_SUCCESFUL 0
++#define PVR_DRM_UNPRIV_BUSID_TYPE 1
++#define PVR_DRM_UNPRIV_BUSID_FIELD 2
++
++#define PVR_DRM_BUS_TYPE_PCI 0
++
++#define PVR_DRM_PCI_DOMAIN 0
++#define PVR_DRM_PCI_BUS 1
++#define PVR_DRM_PCI_DEV 2
++#define PVR_DRM_PCI_FUNC 3
++
++#endif
++
++#endif
++
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/include/env/linux/pvr_drm_shared.h
+@@ -0,0 +1,54 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__PVR_DRM_SHARED_H__)
++#define __PVR_DRM_SHARED_H__
++
++#if defined(SUPPORT_DRI_DRM)
++
++#define PVR_DRM_SRVKM_CMD 0x12
++#define PVR_DRM_DISP_CMD 0x13
++#define PVR_DRM_BC_CMD 0x14
++#define PVR_DRM_IS_MASTER_CMD 0x15
++#define PVR_DRM_UNPRIV_CMD 0x16
++#define PVR_DRM_DBGDRV_CMD 0x1E
++
++#define PVR_DRM_UNPRIV_INIT_SUCCESFUL 0
++#define PVR_DRM_UNPRIV_BUSID_TYPE 1
++#define PVR_DRM_UNPRIV_BUSID_FIELD 2
++
++#define PVR_DRM_BUS_TYPE_PCI 0
++
++#define PVR_DRM_PCI_DOMAIN 0
++#define PVR_DRM_PCI_BUS 1
++#define PVR_DRM_PCI_DEV 2
++#define PVR_DRM_PCI_FUNC 3
++
++#endif
++
++#endif
++
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/include/kernelbuffer.h
+@@ -0,0 +1,60 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined (__KERNELBUFFER_H__)
++#define __KERNELBUFFER_H__
++
++typedef PVRSRV_ERROR (*PFN_OPEN_BC_DEVICE)(IMG_UINT32, IMG_HANDLE*);
++typedef PVRSRV_ERROR (*PFN_CLOSE_BC_DEVICE)(IMG_UINT32, IMG_HANDLE);
++typedef PVRSRV_ERROR (*PFN_GET_BC_INFO)(IMG_HANDLE, BUFFER_INFO*);
++typedef PVRSRV_ERROR (*PFN_GET_BC_BUFFER)(IMG_HANDLE, IMG_UINT32, PVRSRV_SYNC_DATA*, IMG_HANDLE*);
++
++typedef struct PVRSRV_BC_SRV2BUFFER_KMJTABLE_TAG
++{
++ IMG_UINT32 ui32TableSize;
++ PFN_OPEN_BC_DEVICE pfnOpenBCDevice;
++ PFN_CLOSE_BC_DEVICE pfnCloseBCDevice;
++ PFN_GET_BC_INFO pfnGetBCInfo;
++ PFN_GET_BC_BUFFER pfnGetBCBuffer;
++ PFN_GET_BUFFER_ADDR pfnGetBufferAddr;
++
++} PVRSRV_BC_SRV2BUFFER_KMJTABLE;
++
++
++typedef PVRSRV_ERROR (*PFN_BC_REGISTER_BUFFER_DEV)(PVRSRV_BC_SRV2BUFFER_KMJTABLE*, IMG_UINT32*);
++typedef PVRSRV_ERROR (*PFN_BC_REMOVE_BUFFER_DEV)(IMG_UINT32);
++
++typedef struct PVRSRV_BC_BUFFER2SRV_KMJTABLE_TAG
++{
++ IMG_UINT32 ui32TableSize;
++ PFN_BC_REGISTER_BUFFER_DEV pfnPVRSRVRegisterBCDevice;
++ PFN_BC_REMOVE_BUFFER_DEV pfnPVRSRVRemoveBCDevice;
++
++} PVRSRV_BC_BUFFER2SRV_KMJTABLE, *PPVRSRV_BC_BUFFER2SRV_KMJTABLE;
++
++typedef IMG_BOOL (*PFN_BC_GET_PVRJTABLE) (PPVRSRV_BC_BUFFER2SRV_KMJTABLE);
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/include/kerneldisplay.h
+@@ -0,0 +1,156 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined (__KERNELDISPLAY_H__)
++#define __KERNELDISPLAY_H__
++
++typedef PVRSRV_ERROR (*PFN_OPEN_DC_DEVICE)(IMG_UINT32, IMG_HANDLE*, PVRSRV_SYNC_DATA*);
++typedef PVRSRV_ERROR (*PFN_CLOSE_DC_DEVICE)(IMG_HANDLE);
++typedef PVRSRV_ERROR (*PFN_ENUM_DC_FORMATS)(IMG_HANDLE, IMG_UINT32*, DISPLAY_FORMAT*);
++typedef PVRSRV_ERROR (*PFN_ENUM_DC_DIMS)(IMG_HANDLE,
++ DISPLAY_FORMAT*,
++ IMG_UINT32*,
++ DISPLAY_DIMS*);
++typedef PVRSRV_ERROR (*PFN_GET_DC_SYSTEMBUFFER)(IMG_HANDLE, IMG_HANDLE*);
++typedef PVRSRV_ERROR (*PFN_GET_DC_INFO)(IMG_HANDLE, DISPLAY_INFO*);
++typedef PVRSRV_ERROR (*PFN_CREATE_DC_SWAPCHAIN)(IMG_HANDLE,
++ IMG_UINT32,
++ DISPLAY_SURF_ATTRIBUTES*,
++ DISPLAY_SURF_ATTRIBUTES*,
++ IMG_UINT32,
++ PVRSRV_SYNC_DATA**,
++ IMG_UINT32,
++ IMG_HANDLE*,
++ IMG_UINT32*);
++typedef PVRSRV_ERROR (*PFN_DESTROY_DC_SWAPCHAIN)(IMG_HANDLE,
++ IMG_HANDLE);
++typedef PVRSRV_ERROR (*PFN_SET_DC_DSTRECT)(IMG_HANDLE, IMG_HANDLE, IMG_RECT*);
++typedef PVRSRV_ERROR (*PFN_SET_DC_SRCRECT)(IMG_HANDLE, IMG_HANDLE, IMG_RECT*);
++typedef PVRSRV_ERROR (*PFN_SET_DC_DSTCK)(IMG_HANDLE, IMG_HANDLE, IMG_UINT32);
++typedef PVRSRV_ERROR (*PFN_SET_DC_SRCCK)(IMG_HANDLE, IMG_HANDLE, IMG_UINT32);
++typedef PVRSRV_ERROR (*PFN_GET_DC_BUFFERS)(IMG_HANDLE,
++ IMG_HANDLE,
++ IMG_UINT32*,
++ IMG_HANDLE*);
++typedef PVRSRV_ERROR (*PFN_SWAP_TO_DC_BUFFER)(IMG_HANDLE,
++ IMG_HANDLE,
++ IMG_UINT32,
++ IMG_HANDLE,
++ IMG_UINT32,
++ IMG_RECT*);
++typedef PVRSRV_ERROR (*PFN_SWAP_TO_DC_SYSTEM)(IMG_HANDLE, IMG_HANDLE);
++typedef IMG_VOID (*PFN_QUERY_SWAP_COMMAND_ID)(IMG_HANDLE, IMG_HANDLE, IMG_HANDLE, IMG_HANDLE, IMG_UINT16*, IMG_BOOL*);
++typedef IMG_VOID (*PFN_SET_DC_STATE)(IMG_HANDLE, IMG_UINT32);
++
++typedef struct PVRSRV_DC_SRV2DISP_KMJTABLE_TAG
++{
++ IMG_UINT32 ui32TableSize;
++ PFN_OPEN_DC_DEVICE pfnOpenDCDevice;
++ PFN_CLOSE_DC_DEVICE pfnCloseDCDevice;
++ PFN_ENUM_DC_FORMATS pfnEnumDCFormats;
++ PFN_ENUM_DC_DIMS pfnEnumDCDims;
++ PFN_GET_DC_SYSTEMBUFFER pfnGetDCSystemBuffer;
++ PFN_GET_DC_INFO pfnGetDCInfo;
++ PFN_GET_BUFFER_ADDR pfnGetBufferAddr;
++ PFN_CREATE_DC_SWAPCHAIN pfnCreateDCSwapChain;
++ PFN_DESTROY_DC_SWAPCHAIN pfnDestroyDCSwapChain;
++ PFN_SET_DC_DSTRECT pfnSetDCDstRect;
++ PFN_SET_DC_SRCRECT pfnSetDCSrcRect;
++ PFN_SET_DC_DSTCK pfnSetDCDstColourKey;
++ PFN_SET_DC_SRCCK pfnSetDCSrcColourKey;
++ PFN_GET_DC_BUFFERS pfnGetDCBuffers;
++ PFN_SWAP_TO_DC_BUFFER pfnSwapToDCBuffer;
++ PFN_SWAP_TO_DC_SYSTEM pfnSwapToDCSystem;
++ PFN_SET_DC_STATE pfnSetDCState;
++ PFN_QUERY_SWAP_COMMAND_ID pfnQuerySwapCommandID;
++
++} PVRSRV_DC_SRV2DISP_KMJTABLE;
++
++typedef IMG_BOOL (*PFN_ISR_HANDLER)(IMG_VOID*);
++
++typedef PVRSRV_ERROR (*PFN_DC_REGISTER_DISPLAY_DEV)(PVRSRV_DC_SRV2DISP_KMJTABLE*, IMG_UINT32*);
++typedef PVRSRV_ERROR (*PFN_DC_REMOVE_DISPLAY_DEV)(IMG_UINT32);
++typedef PVRSRV_ERROR (*PFN_DC_OEM_FUNCTION)(IMG_UINT32, IMG_VOID*, IMG_UINT32, IMG_VOID*, IMG_UINT32);
++typedef PVRSRV_ERROR (*PFN_DC_REGISTER_COMMANDPROCLIST)(IMG_UINT32, PPFN_CMD_PROC,IMG_UINT32[][2], IMG_UINT32);
++typedef PVRSRV_ERROR (*PFN_DC_REMOVE_COMMANDPROCLIST)(IMG_UINT32, IMG_UINT32);
++typedef IMG_VOID (*PFN_DC_CMD_COMPLETE)(IMG_HANDLE, IMG_BOOL);
++typedef PVRSRV_ERROR (*PFN_DC_REGISTER_SYS_ISR)(PFN_ISR_HANDLER, IMG_VOID*, IMG_UINT32, IMG_UINT32);
++typedef PVRSRV_ERROR (*PFN_DC_REGISTER_POWER)(IMG_UINT32, PFN_PRE_POWER, PFN_POST_POWER,
++ PFN_PRE_CLOCKSPEED_CHANGE, PFN_POST_CLOCKSPEED_CHANGE,
++ IMG_HANDLE, PVRSRV_DEV_POWER_STATE, PVRSRV_DEV_POWER_STATE);
++
++typedef struct PVRSRV_DC_DISP2SRV_KMJTABLE_TAG
++{
++ IMG_UINT32 ui32TableSize;
++ PFN_DC_REGISTER_DISPLAY_DEV pfnPVRSRVRegisterDCDevice;
++ PFN_DC_REMOVE_DISPLAY_DEV pfnPVRSRVRemoveDCDevice;
++ PFN_DC_OEM_FUNCTION pfnPVRSRVOEMFunction;
++ PFN_DC_REGISTER_COMMANDPROCLIST pfnPVRSRVRegisterCmdProcList;
++ PFN_DC_REMOVE_COMMANDPROCLIST pfnPVRSRVRemoveCmdProcList;
++ PFN_DC_CMD_COMPLETE pfnPVRSRVCmdComplete;
++ PFN_DC_REGISTER_SYS_ISR pfnPVRSRVRegisterSystemISRHandler;
++ PFN_DC_REGISTER_POWER pfnPVRSRVRegisterPowerDevice;
++ PFN_DC_CMD_COMPLETE pfnPVRSRVFreeCmdCompletePacket;
++} PVRSRV_DC_DISP2SRV_KMJTABLE, *PPVRSRV_DC_DISP2SRV_KMJTABLE;
++
++
++typedef struct DISPLAYCLASS_FLIP_COMMAND_TAG
++{
++
++ IMG_HANDLE hExtDevice;
++
++
++ IMG_HANDLE hExtSwapChain;
++
++
++ IMG_HANDLE hExtBuffer;
++
++
++ IMG_HANDLE hPrivateTag;
++
++
++ IMG_UINT32 ui32ClipRectCount;
++
++
++ IMG_RECT *psClipRect;
++
++
++ IMG_UINT32 ui32SwapInterval;
++
++} DISPLAYCLASS_FLIP_COMMAND;
++
++#define DC_FLIP_COMMAND 0
++
++#define DC_STATE_NO_FLUSH_COMMANDS 0
++#define DC_STATE_FLUSH_COMMANDS 1
++
++
++typedef IMG_BOOL (*PFN_DC_GET_PVRJTABLE)(PPVRSRV_DC_DISP2SRV_KMJTABLE);
++
++
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/include/pvr_bridge.h
+@@ -0,0 +1,1383 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __PVR_BRIDGE_H__
++#define __PVR_BRIDGE_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#include "servicesint.h"
++
++#ifdef __linux__
++
++ #include <linux/ioctl.h>
++
++ #define PVRSRV_IOC_GID 'g'
++ #define PVRSRV_IO(INDEX) _IO(PVRSRV_IOC_GID, INDEX, PVRSRV_BRIDGE_PACKAGE)
++ #define PVRSRV_IOW(INDEX) _IOW(PVRSRV_IOC_GID, INDEX, PVRSRV_BRIDGE_PACKAGE)
++ #define PVRSRV_IOR(INDEX) _IOR(PVRSRV_IOC_GID, INDEX, PVRSRV_BRIDGE_PACKAGE)
++ #define PVRSRV_IOWR(INDEX) _IOWR(PVRSRV_IOC_GID, INDEX, PVRSRV_BRIDGE_PACKAGE)
++
++#else
++
++ #error Unknown platform: Cannot define ioctls
++
++ #define PVRSRV_IO(INDEX) (PVRSRV_IOC_GID + INDEX)
++ #define PVRSRV_IOW(INDEX) (PVRSRV_IOC_GID + INDEX)
++ #define PVRSRV_IOR(INDEX) (PVRSRV_IOC_GID + INDEX)
++ #define PVRSRV_IOWR(INDEX) (PVRSRV_IOC_GID + INDEX)
++
++ #define PVRSRV_BRIDGE_BASE PVRSRV_IOC_GID
++#endif
++
++
++#define PVRSRV_BRIDGE_CORE_CMD_FIRST 0UL
++#define PVRSRV_BRIDGE_ENUM_DEVICES PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_ACQUIRE_DEVICEINFO PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_RELEASE_DEVICEINFO PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+2)
++#define PVRSRV_BRIDGE_CREATE_DEVMEMCONTEXT PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+3)
++#define PVRSRV_BRIDGE_DESTROY_DEVMEMCONTEXT PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+4)
++#define PVRSRV_BRIDGE_GET_DEVMEM_HEAPINFO PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+5)
++#define PVRSRV_BRIDGE_ALLOC_DEVICEMEM PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+6)
++#define PVRSRV_BRIDGE_FREE_DEVICEMEM PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+7)
++#define PVRSRV_BRIDGE_GETFREE_DEVICEMEM PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+8)
++#define PVRSRV_BRIDGE_CREATE_COMMANDQUEUE PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+9)
++#define PVRSRV_BRIDGE_DESTROY_COMMANDQUEUE PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+10)
++#define PVRSRV_BRIDGE_MHANDLE_TO_MMAP_DATA PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+11)
++#define PVRSRV_BRIDGE_CONNECT_SERVICES PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+12)
++#define PVRSRV_BRIDGE_DISCONNECT_SERVICES PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+13)
++#define PVRSRV_BRIDGE_WRAP_DEVICE_MEM PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+14)
++#define PVRSRV_BRIDGE_GET_DEVICEMEMINFO PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+15)
++#define PVRSRV_BRIDGE_RESERVE_DEV_VIRTMEM PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+16)
++#define PVRSRV_BRIDGE_FREE_DEV_VIRTMEM PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+17)
++#define PVRSRV_BRIDGE_MAP_EXT_MEMORY PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+18)
++#define PVRSRV_BRIDGE_UNMAP_EXT_MEMORY PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+19)
++#define PVRSRV_BRIDGE_MAP_DEV_MEMORY PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+20)
++#define PVRSRV_BRIDGE_UNMAP_DEV_MEMORY PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+21)
++#define PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+22)
++#define PVRSRV_BRIDGE_UNMAP_DEVICECLASS_MEMORY PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+23)
++#define PVRSRV_BRIDGE_MAP_MEM_INFO_TO_USER PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+24)
++#define PVRSRV_BRIDGE_UNMAP_MEM_INFO_FROM_USER PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+25)
++#define PVRSRV_BRIDGE_EXPORT_DEVICEMEM PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+26)
++#define PVRSRV_BRIDGE_RELEASE_MMAP_DATA PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+27)
++#define PVRSRV_BRIDGE_CORE_CMD_LAST (PVRSRV_BRIDGE_CORE_CMD_FIRST+27)
++
++#define PVRSRV_BRIDGE_SIM_CMD_FIRST (PVRSRV_BRIDGE_CORE_CMD_LAST+1)
++#define PVRSRV_BRIDGE_PROCESS_SIMISR_EVENT PVRSRV_IOWR(PVRSRV_BRIDGE_SIM_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_REGISTER_SIM_PROCESS PVRSRV_IOWR(PVRSRV_BRIDGE_SIM_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_UNREGISTER_SIM_PROCESS PVRSRV_IOWR(PVRSRV_BRIDGE_SIM_CMD_FIRST+2)
++#define PVRSRV_BRIDGE_SIM_CMD_LAST (PVRSRV_BRIDGE_SIM_CMD_FIRST+2)
++
++#define PVRSRV_BRIDGE_MAPPING_CMD_FIRST (PVRSRV_BRIDGE_SIM_CMD_LAST+1)
++#define PVRSRV_BRIDGE_MAPPHYSTOUSERSPACE PVRSRV_IOWR(PVRSRV_BRIDGE_MAPPING_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_UNMAPPHYSTOUSERSPACE PVRSRV_IOWR(PVRSRV_BRIDGE_MAPPING_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_GETPHYSTOUSERSPACEMAP PVRSRV_IOWR(PVRSRV_BRIDGE_MAPPING_CMD_FIRST+2)
++#define PVRSRV_BRIDGE_MAPPING_CMD_LAST (PVRSRV_BRIDGE_MAPPING_CMD_FIRST+2)
++
++#define PVRSRV_BRIDGE_STATS_CMD_FIRST (PVRSRV_BRIDGE_MAPPING_CMD_LAST+1)
++#define PVRSRV_BRIDGE_GET_FB_STATS PVRSRV_IOWR(PVRSRV_BRIDGE_STATS_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_STATS_CMD_LAST (PVRSRV_BRIDGE_STATS_CMD_FIRST+0)
++
++#define PVRSRV_BRIDGE_MISC_CMD_FIRST (PVRSRV_BRIDGE_STATS_CMD_LAST+1)
++#define PVRSRV_BRIDGE_GET_MISC_INFO PVRSRV_IOWR(PVRSRV_BRIDGE_MISC_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_RELEASE_MISC_INFO PVRSRV_IOWR(PVRSRV_BRIDGE_MISC_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_MISC_CMD_LAST (PVRSRV_BRIDGE_MISC_CMD_FIRST+1)
++
++#define PVRSRV_BRIDGE_OVERLAY_CMD_FIRST (PVRSRV_BRIDGE_MISC_CMD_LAST+1)
++#if defined (SUPPORT_OVERLAY_ROTATE_BLIT)
++#define PVRSRV_BRIDGE_INIT_3D_OVL_BLT_RES PVRSRV_IOWR(PVRSRV_BRIDGE_OVERLAY_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_DEINIT_3D_OVL_BLT_RES PVRSRV_IOWR(PVRSRV_BRIDGE_OVERLAY_CMD_FIRST+1)
++#endif
++#define PVRSRV_BRIDGE_OVERLAY_CMD_LAST (PVRSRV_BRIDGE_OVERLAY_CMD_FIRST+1)
++
++#if defined(PDUMP)
++#define PVRSRV_BRIDGE_PDUMP_CMD_FIRST (PVRSRV_BRIDGE_OVERLAY_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_PDUMP_INIT PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_PDUMP_MEMPOL PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_PDUMP_DUMPMEM PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+2)
++#define PVRSRV_BRIDGE_PDUMP_REG PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+3)
++#define PVRSRV_BRIDGE_PDUMP_REGPOL PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+4)
++#define PVRSRV_BRIDGE_PDUMP_COMMENT PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+5)
++#define PVRSRV_BRIDGE_PDUMP_SETFRAME PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+6)
++#define PVRSRV_BRIDGE_PDUMP_ISCAPTURING PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+7)
++#define PVRSRV_BRIDGE_PDUMP_DUMPBITMAP PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+8)
++#define PVRSRV_BRIDGE_PDUMP_DUMPREADREG PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+9)
++#define PVRSRV_BRIDGE_PDUMP_SYNCPOL PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+10)
++#define PVRSRV_BRIDGE_PDUMP_DUMPSYNC PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+11)
++#define PVRSRV_BRIDGE_PDUMP_MEMPAGES PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+12)
++#define PVRSRV_BRIDGE_PDUMP_DRIVERINFO PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+13)
++#define PVRSRV_BRIDGE_PDUMP_PDREG PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+14)
++#define PVRSRV_BRIDGE_PDUMP_DUMPPDDEVPADDR PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+15)
++#define PVRSRV_BRIDGE_PDUMP_CYCLE_COUNT_REG_READ PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+16)
++#define PVRSRV_BRIDGE_PDUMP_STARTINITPHASE PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+17)
++#define PVRSRV_BRIDGE_PDUMP_STOPINITPHASE PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+18)
++#define PVRSRV_BRIDGE_PDUMP_CMD_LAST (PVRSRV_BRIDGE_PDUMP_CMD_FIRST+18)
++#else
++#define PVRSRV_BRIDGE_PDUMP_CMD_LAST PVRSRV_BRIDGE_OVERLAY_CMD_LAST
++#endif
++
++#define PVRSRV_BRIDGE_OEM_CMD_FIRST (PVRSRV_BRIDGE_PDUMP_CMD_LAST+1)
++#define PVRSRV_BRIDGE_GET_OEMJTABLE PVRSRV_IOWR(PVRSRV_BRIDGE_OEM_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_OEM_CMD_LAST (PVRSRV_BRIDGE_OEM_CMD_FIRST+0)
++
++#define PVRSRV_BRIDGE_DEVCLASS_CMD_FIRST (PVRSRV_BRIDGE_OEM_CMD_LAST+1)
++#define PVRSRV_BRIDGE_ENUM_CLASS PVRSRV_IOWR(PVRSRV_BRIDGE_DEVCLASS_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_DEVCLASS_CMD_LAST (PVRSRV_BRIDGE_DEVCLASS_CMD_FIRST+0)
++
++#define PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST (PVRSRV_BRIDGE_DEVCLASS_CMD_LAST+1)
++#define PVRSRV_BRIDGE_OPEN_DISPCLASS_DEVICE PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_CLOSE_DISPCLASS_DEVICE PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_ENUM_DISPCLASS_FORMATS PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+2)
++#define PVRSRV_BRIDGE_ENUM_DISPCLASS_DIMS PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+3)
++#define PVRSRV_BRIDGE_GET_DISPCLASS_SYSBUFFER PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+4)
++#define PVRSRV_BRIDGE_GET_DISPCLASS_INFO PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+5)
++#define PVRSRV_BRIDGE_CREATE_DISPCLASS_SWAPCHAIN PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+6)
++#define PVRSRV_BRIDGE_DESTROY_DISPCLASS_SWAPCHAIN PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+7)
++#define PVRSRV_BRIDGE_SET_DISPCLASS_DSTRECT PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+8)
++#define PVRSRV_BRIDGE_SET_DISPCLASS_SRCRECT PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+9)
++#define PVRSRV_BRIDGE_SET_DISPCLASS_DSTCOLOURKEY PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+10)
++#define PVRSRV_BRIDGE_SET_DISPCLASS_SRCCOLOURKEY PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+11)
++#define PVRSRV_BRIDGE_GET_DISPCLASS_BUFFERS PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+12)
++#define PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_BUFFER PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+13)
++#define PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_SYSTEM PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+14)
++#define PVRSRV_BRIDGE_DISPCLASS_CMD_LAST (PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+14)
++
++
++#define PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST (PVRSRV_BRIDGE_DISPCLASS_CMD_LAST+1)
++#define PVRSRV_BRIDGE_OPEN_BUFFERCLASS_DEVICE PVRSRV_IOWR(PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_CLOSE_BUFFERCLASS_DEVICE PVRSRV_IOWR(PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_GET_BUFFERCLASS_INFO PVRSRV_IOWR(PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST+2)
++#define PVRSRV_BRIDGE_GET_BUFFERCLASS_BUFFER PVRSRV_IOWR(PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST+3)
++#define PVRSRV_BRIDGE_BUFCLASS_CMD_LAST (PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST+3)
++
++#define PVRSRV_BRIDGE_WRAP_CMD_FIRST (PVRSRV_BRIDGE_BUFCLASS_CMD_LAST+1)
++#define PVRSRV_BRIDGE_WRAP_EXT_MEMORY PVRSRV_IOWR(PVRSRV_BRIDGE_WRAP_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_UNWRAP_EXT_MEMORY PVRSRV_IOWR(PVRSRV_BRIDGE_WRAP_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_WRAP_CMD_LAST (PVRSRV_BRIDGE_WRAP_CMD_FIRST+1)
++
++#define PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST (PVRSRV_BRIDGE_WRAP_CMD_LAST+1)
++#define PVRSRV_BRIDGE_ALLOC_SHARED_SYS_MEM PVRSRV_IOWR(PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_FREE_SHARED_SYS_MEM PVRSRV_IOWR(PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_MAP_MEMINFO_MEM PVRSRV_IOWR(PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+2)
++#define PVRSRV_BRIDGE_UNMAP_MEMINFO_MEM PVRSRV_IOWR(PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+3)
++#define PVRSRV_BRIDGE_SHAREDMEM_CMD_LAST (PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+3)
++
++#define PVRSRV_BRIDGE_SERVICES4_TMP_CMD_FIRST (PVRSRV_BRIDGE_SHAREDMEM_CMD_LAST+1)
++#define PVRSRV_BRIDGE_GETMMU_PD_DEVPADDR PVRSRV_IOWR(PVRSRV_BRIDGE_SERVICES4_TMP_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_SERVICES4_TMP_CMD_LAST (PVRSRV_BRIDGE_SERVICES4_TMP_CMD_FIRST+0)
++
++#define PVRSRV_BRIDGE_INITSRV_CMD_FIRST (PVRSRV_BRIDGE_SERVICES4_TMP_CMD_LAST+1)
++#define PVRSRV_BRIDGE_INITSRV_CONNECT PVRSRV_IOWR(PVRSRV_BRIDGE_INITSRV_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_INITSRV_DISCONNECT PVRSRV_IOWR(PVRSRV_BRIDGE_INITSRV_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_INITSRV_CMD_LAST (PVRSRV_BRIDGE_INITSRV_CMD_FIRST+1)
++
++#define PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST (PVRSRV_BRIDGE_INITSRV_CMD_LAST+1)
++#define PVRSRV_BRIDGE_EVENT_OBJECT_WAIT PVRSRV_IOWR(PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_EVENT_OBJECT_OPEN PVRSRV_IOWR(PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_EVENT_OBJECT_CLOSE PVRSRV_IOWR(PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST+2)
++#define PVRSRV_BRIDGE_EVENT_OBJECT_CMD_LAST (PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST+2)
++
++#define PVRSRV_BRIDGE_SYNC_OPS_CMD_FIRST (PVRSRV_BRIDGE_EVENT_OBJECT_CMD_LAST+1)
++#define PVRSRV_BRIDGE_MODIFY_PENDING_SYNC_OPS PVRSRV_IOWR(PVRSRV_BRIDGE_SYNC_OPS_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_MODIFY_COMPLETE_SYNC_OPS PVRSRV_IOWR(PVRSRV_BRIDGE_SYNC_OPS_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_SYNC_OPS_CMD_LAST (PVRSRV_BRIDGE_SYNC_OPS_CMD_FIRST+1)
++
++#define PVRSRV_BRIDGE_LAST_NON_DEVICE_CMD (PVRSRV_BRIDGE_SYNC_OPS_CMD_LAST+1)
++
++
++#define PVRSRV_KERNEL_MODE_CLIENT 1
++
++typedef struct PVRSRV_BRIDGE_RETURN_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_VOID *pvData;
++
++}PVRSRV_BRIDGE_RETURN;
++
++
++typedef struct PVRSRV_BRIDGE_PACKAGE_TAG
++{
++ IMG_UINT32 ui32BridgeID;
++ IMG_UINT32 ui32Size;
++ IMG_VOID *pvParamIn;
++ IMG_UINT32 ui32InBufferSize;
++ IMG_VOID *pvParamOut;
++ IMG_UINT32 ui32OutBufferSize;
++
++ IMG_HANDLE hKernelServices;
++}PVRSRV_BRIDGE_PACKAGE;
++
++
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_ACQUIRE_DEVICEINFO_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_UINT32 uiDevIndex;
++ PVRSRV_DEVICE_TYPE eDeviceType;
++
++} PVRSRV_BRIDGE_IN_ACQUIRE_DEVICEINFO;
++
++
++typedef struct PVRSRV_BRIDGE_IN_ENUMCLASS_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ PVRSRV_DEVICE_CLASS sDeviceClass;
++} PVRSRV_BRIDGE_IN_ENUMCLASS;
++
++
++typedef struct PVRSRV_BRIDGE_IN_CLOSE_DISPCLASS_DEVICE_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDeviceKM;
++} PVRSRV_BRIDGE_IN_CLOSE_DISPCLASS_DEVICE;
++
++
++typedef struct PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_FORMATS_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDeviceKM;
++} PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_FORMATS;
++
++
++typedef struct PVRSRV_BRIDGE_IN_GET_DISPCLASS_SYSBUFFER_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDeviceKM;
++} PVRSRV_BRIDGE_IN_GET_DISPCLASS_SYSBUFFER;
++
++
++typedef struct PVRSRV_BRIDGE_IN_GET_DISPCLASS_INFO_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDeviceKM;
++} PVRSRV_BRIDGE_IN_GET_DISPCLASS_INFO;
++
++
++typedef struct PVRSRV_BRIDGE_IN_CLOSE_BUFFERCLASS_DEVICE_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDeviceKM;
++} PVRSRV_BRIDGE_IN_CLOSE_BUFFERCLASS_DEVICE;
++
++
++typedef struct PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_INFO_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDeviceKM;
++} PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_INFO;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_RELEASE_DEVICEINFO_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++
++} PVRSRV_BRIDGE_IN_RELEASE_DEVICEINFO;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_FREE_CLASSDEVICEINFO_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ PVRSRV_DEVICE_CLASS DeviceClass;
++ IMG_VOID* pvDevInfo;
++
++}PVRSRV_BRIDGE_IN_FREE_CLASSDEVICEINFO;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_GET_DEVMEM_HEAPINFO_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_HANDLE hDevMemContext;
++
++}PVRSRV_BRIDGE_IN_GET_DEVMEM_HEAPINFO;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_CREATE_DEVMEMCONTEXT_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++
++}PVRSRV_BRIDGE_IN_CREATE_DEVMEMCONTEXT;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_DESTROY_DEVMEMCONTEXT_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_HANDLE hDevMemContext;
++
++}PVRSRV_BRIDGE_IN_DESTROY_DEVMEMCONTEXT;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_ALLOCDEVICEMEM_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_HANDLE hDevMemHeap;
++ IMG_UINT32 ui32Attribs;
++ IMG_SIZE_T ui32Size;
++ IMG_SIZE_T ui32Alignment;
++
++}PVRSRV_BRIDGE_IN_ALLOCDEVICEMEM;
++
++
++typedef struct PVRSRV_BRIDGE_IN_MAPMEMINFOTOUSER_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++
++}PVRSRV_BRIDGE_IN_MAPMEMINFOTOUSER;
++
++
++typedef struct PVRSRV_BRIDGE_IN_UNMAPMEMINFOFROMUSER_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ IMG_PVOID pvLinAddr;
++ IMG_HANDLE hMappingInfo;
++
++}PVRSRV_BRIDGE_IN_UNMAPMEMINFOFROMUSER;
++
++
++typedef struct PVRSRV_BRIDGE_IN_FREEDEVICEMEM_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++
++}PVRSRV_BRIDGE_IN_FREEDEVICEMEM;
++
++
++typedef struct PVRSRV_BRIDGE_IN_EXPORTDEVICEMEM_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++
++}PVRSRV_BRIDGE_IN_EXPORTDEVICEMEM;
++
++
++typedef struct PVRSRV_BRIDGE_IN_GETFREEDEVICEMEM_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_UINT32 ui32Flags;
++
++} PVRSRV_BRIDGE_IN_GETFREEDEVICEMEM;
++
++
++typedef struct PVRSRV_BRIDGE_IN_CREATECOMMANDQUEUE_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_SIZE_T ui32QueueSize;
++
++}PVRSRV_BRIDGE_IN_CREATECOMMANDQUEUE;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_DESTROYCOMMANDQUEUE_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ PVRSRV_QUEUE_INFO *psQueueInfo;
++
++}PVRSRV_BRIDGE_IN_DESTROYCOMMANDQUEUE;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_MHANDLE_TO_MMAP_DATA_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hMHandle;
++} PVRSRV_BRIDGE_IN_MHANDLE_TO_MMAP_DATA;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_RELEASE_MMAP_DATA_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hMHandle;
++} PVRSRV_BRIDGE_IN_RELEASE_MMAP_DATA;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_RESERVE_DEV_VIRTMEM_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevMemHeap;
++ IMG_DEV_VIRTADDR *psDevVAddr;
++ IMG_SIZE_T ui32Size;
++ IMG_SIZE_T ui32Alignment;
++
++}PVRSRV_BRIDGE_IN_RESERVE_DEV_VIRTMEM;
++
++
++typedef struct PVRSRV_BRIDGE_OUT_CONNECT_SERVICES_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hKernelServices;
++}PVRSRV_BRIDGE_OUT_CONNECT_SERVICES;
++
++
++typedef struct PVRSRV_BRIDGE_OUT_RESERVE_DEV_VIRTMEM_TAG
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++
++}PVRSRV_BRIDGE_OUT_RESERVE_DEV_VIRTMEM;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_FREE_DEV_VIRTMEM_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++
++}PVRSRV_BRIDGE_IN_FREE_DEV_VIRTMEM;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_MAP_DEV_MEMORY_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hKernelMemInfo;
++ IMG_HANDLE hDstDevMemHeap;
++
++}PVRSRV_BRIDGE_IN_MAP_DEV_MEMORY;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_MAP_DEV_MEMORY_TAG
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_KERNEL_MEM_INFO *psDstKernelMemInfo;
++ PVRSRV_KERNEL_SYNC_INFO *psDstKernelSyncInfo;
++ PVRSRV_CLIENT_MEM_INFO sDstClientMemInfo;
++ PVRSRV_CLIENT_SYNC_INFO sDstClientSyncInfo;
++
++}PVRSRV_BRIDGE_OUT_MAP_DEV_MEMORY;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_UNMAP_DEV_MEMORY_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++
++}PVRSRV_BRIDGE_IN_UNMAP_DEV_MEMORY;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_MAP_EXT_MEMORY_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ IMG_SYS_PHYADDR *psSysPAddr;
++ IMG_UINT32 ui32Flags;
++
++}PVRSRV_BRIDGE_IN_MAP_EXT_MEMORY;
++
++
++typedef struct PVRSRV_BRIDGE_IN_UNMAP_EXT_MEMORY_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++ IMG_UINT32 ui32Flags;
++
++}PVRSRV_BRIDGE_IN_UNMAP_EXT_MEMORY;
++
++
++typedef struct PVRSRV_BRIDGE_IN_MAP_DEVICECLASS_MEMORY_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDeviceClassBuffer;
++ IMG_HANDLE hDevMemContext;
++
++}PVRSRV_BRIDGE_IN_MAP_DEVICECLASS_MEMORY;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_MAP_DEVICECLASS_MEMORY_TAG
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++ IMG_HANDLE hMappingInfo;
++
++}PVRSRV_BRIDGE_OUT_MAP_DEVICECLASS_MEMORY;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_UNMAP_DEVICECLASS_MEMORY_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++
++}PVRSRV_BRIDGE_IN_UNMAP_DEVICECLASS_MEMORY;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_MEMPOL_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ IMG_UINT32 ui32Offset;
++ IMG_UINT32 ui32Value;
++ IMG_UINT32 ui32Mask;
++ IMG_UINT32 ui32Flags;
++
++}PVRSRV_BRIDGE_IN_PDUMP_MEMPOL;
++
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_SYNCPOL_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++ IMG_BOOL bIsRead;
++ IMG_UINT32 ui32Value;
++ IMG_UINT32 ui32Mask;
++
++}PVRSRV_BRIDGE_IN_PDUMP_SYNCPOL;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_DUMPMEM_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_PVOID pvLinAddr;
++ IMG_PVOID pvAltLinAddr;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ IMG_UINT32 ui32Offset;
++ IMG_UINT32 ui32Bytes;
++ IMG_UINT32 ui32Flags;
++
++}PVRSRV_BRIDGE_IN_PDUMP_DUMPMEM;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_DUMPSYNC_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_PVOID pvAltLinAddr;
++ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++ IMG_UINT32 ui32Offset;
++ IMG_UINT32 ui32Bytes;
++
++}PVRSRV_BRIDGE_IN_PDUMP_DUMPSYNC;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_DUMPREG_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ PVRSRV_HWREG sHWReg;
++ IMG_UINT32 ui32Flags;
++
++}PVRSRV_BRIDGE_IN_PDUMP_DUMPREG;
++
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_REGPOL_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ PVRSRV_HWREG sHWReg;
++ IMG_UINT32 ui32Mask;
++ IMG_UINT32 ui32Flags;
++}PVRSRV_BRIDGE_IN_PDUMP_REGPOL;
++
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_DUMPPDREG_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ PVRSRV_HWREG sHWReg;
++ IMG_UINT32 ui32Flags;
++
++}PVRSRV_BRIDGE_IN_PDUMP_DUMPPDREG;
++
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_MEMPAGES_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hKernelMemInfo;
++ IMG_DEV_PHYADDR *pPages;
++ IMG_UINT32 ui32NumPages;
++ IMG_DEV_VIRTADDR sDevAddr;
++ IMG_UINT32 ui32Start;
++ IMG_UINT32 ui32Length;
++ IMG_BOOL bContinuous;
++
++}PVRSRV_BRIDGE_IN_PDUMP_MEMPAGES;
++
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_COMMENT_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_CHAR szComment[PVRSRV_PDUMP_MAX_COMMENT_SIZE];
++ IMG_UINT32 ui32Flags;
++
++}PVRSRV_BRIDGE_IN_PDUMP_COMMENT;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_SETFRAME_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_UINT32 ui32Frame;
++
++}PVRSRV_BRIDGE_IN_PDUMP_SETFRAME;
++
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_BITMAP_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_CHAR szFileName[PVRSRV_PDUMP_MAX_FILENAME_SIZE];
++ IMG_UINT32 ui32FileOffset;
++ IMG_UINT32 ui32Width;
++ IMG_UINT32 ui32Height;
++ IMG_UINT32 ui32StrideInBytes;
++ IMG_DEV_VIRTADDR sDevBaseAddr;
++ IMG_UINT32 ui32Size;
++ PDUMP_PIXEL_FORMAT ePixelFormat;
++ PDUMP_MEM_FORMAT eMemFormat;
++ IMG_UINT32 ui32Flags;
++
++}PVRSRV_BRIDGE_IN_PDUMP_BITMAP;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_READREG_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_CHAR szFileName[PVRSRV_PDUMP_MAX_FILENAME_SIZE];
++ IMG_UINT32 ui32FileOffset;
++ IMG_UINT32 ui32Address;
++ IMG_UINT32 ui32Size;
++ IMG_UINT32 ui32Flags;
++
++}PVRSRV_BRIDGE_IN_PDUMP_READREG;
++
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_DRIVERINFO_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_CHAR szString[PVRSRV_PDUMP_MAX_COMMENT_SIZE];
++ IMG_BOOL bContinuous;
++
++}PVRSRV_BRIDGE_IN_PDUMP_DRIVERINFO;
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_DUMPPDDEVPADDR_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hKernelMemInfo;
++ IMG_UINT32 ui32Offset;
++ IMG_DEV_PHYADDR sPDDevPAddr;
++}PVRSRV_BRIDGE_IN_PDUMP_DUMPPDDEVPADDR;
++
++
++typedef struct PVRSRV_BRIDGE_PDUM_IN_CYCLE_COUNT_REG_READ_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_UINT32 ui32RegOffset;
++ IMG_BOOL bLastFrame;
++}PVRSRV_BRIDGE_IN_PDUMP_CYCLE_COUNT_REG_READ;
++
++
++typedef struct PVRSRV_BRIDGE_OUT_ENUMDEVICE_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_UINT32 ui32NumDevices;
++ PVRSRV_DEVICE_IDENTIFIER asDeviceIdentifier[PVRSRV_MAX_DEVICES];
++
++}PVRSRV_BRIDGE_OUT_ENUMDEVICE;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_ACQUIRE_DEVICEINFO_TAG
++{
++
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hDevCookie;
++
++} PVRSRV_BRIDGE_OUT_ACQUIRE_DEVICEINFO;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_ENUMCLASS_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_UINT32 ui32NumDevices;
++ IMG_UINT32 ui32DevID[PVRSRV_MAX_DEVICES];
++
++}PVRSRV_BRIDGE_OUT_ENUMCLASS;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_OPEN_DISPCLASS_DEVICE_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_UINT32 ui32DeviceID;
++ IMG_HANDLE hDevCookie;
++
++}PVRSRV_BRIDGE_IN_OPEN_DISPCLASS_DEVICE;
++
++
++typedef struct PVRSRV_BRIDGE_OUT_OPEN_DISPCLASS_DEVICE_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hDeviceKM;
++
++}PVRSRV_BRIDGE_OUT_OPEN_DISPCLASS_DEVICE;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_WRAP_EXT_MEMORY_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_HANDLE hDevMemContext;
++ IMG_VOID *pvLinAddr;
++ IMG_SIZE_T ui32ByteSize;
++ IMG_SIZE_T ui32PageOffset;
++ IMG_BOOL bPhysContig;
++ IMG_UINT32 ui32NumPageTableEntries;
++ IMG_SYS_PHYADDR *psSysPAddr;
++ IMG_UINT32 ui32Flags;
++
++}PVRSRV_BRIDGE_IN_WRAP_EXT_MEMORY;
++
++
++typedef struct PVRSRV_BRIDGE_OUT_WRAP_EXT_MEMORY_TAG
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++
++}PVRSRV_BRIDGE_OUT_WRAP_EXT_MEMORY;
++
++
++typedef struct PVRSRV_BRIDGE_IN_UNWRAP_EXT_MEMORY_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hKernelMemInfo;
++ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++
++}PVRSRV_BRIDGE_IN_UNWRAP_EXT_MEMORY;
++
++
++#define PVRSRV_MAX_DC_DISPLAY_FORMATS 10
++#define PVRSRV_MAX_DC_DISPLAY_DIMENSIONS 10
++#define PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS 4
++#define PVRSRV_MAX_DC_CLIP_RECTS 32
++
++
++typedef struct PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_FORMATS_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_UINT32 ui32Count;
++ DISPLAY_FORMAT asFormat[PVRSRV_MAX_DC_DISPLAY_FORMATS];
++
++}PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_FORMATS;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_DIMS_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDeviceKM;
++ DISPLAY_FORMAT sFormat;
++
++}PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_DIMS;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_DIMS_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_UINT32 ui32Count;
++ DISPLAY_DIMS asDim[PVRSRV_MAX_DC_DISPLAY_DIMENSIONS];
++
++}PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_DIMS;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_GET_DISPCLASS_INFO_TAG
++{
++ PVRSRV_ERROR eError;
++ DISPLAY_INFO sDisplayInfo;
++
++}PVRSRV_BRIDGE_OUT_GET_DISPCLASS_INFO;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_GET_DISPCLASS_SYSBUFFER_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hBuffer;
++
++}PVRSRV_BRIDGE_OUT_GET_DISPCLASS_SYSBUFFER;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_CREATE_DISPCLASS_SWAPCHAIN_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDeviceKM;
++ IMG_UINT32 ui32Flags;
++ DISPLAY_SURF_ATTRIBUTES sDstSurfAttrib;
++ DISPLAY_SURF_ATTRIBUTES sSrcSurfAttrib;
++ IMG_UINT32 ui32BufferCount;
++ IMG_UINT32 ui32OEMFlags;
++ IMG_UINT32 ui32SwapChainID;
++
++} PVRSRV_BRIDGE_IN_CREATE_DISPCLASS_SWAPCHAIN;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_CREATE_DISPCLASS_SWAPCHAIN_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hSwapChain;
++ IMG_UINT32 ui32SwapChainID;
++
++} PVRSRV_BRIDGE_OUT_CREATE_DISPCLASS_SWAPCHAIN;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_DESTROY_DISPCLASS_SWAPCHAIN_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDeviceKM;
++ IMG_HANDLE hSwapChain;
++
++} PVRSRV_BRIDGE_IN_DESTROY_DISPCLASS_SWAPCHAIN;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_SET_DISPCLASS_RECT_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDeviceKM;
++ IMG_HANDLE hSwapChain;
++ IMG_RECT sRect;
++
++} PVRSRV_BRIDGE_IN_SET_DISPCLASS_RECT;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_SET_DISPCLASS_COLOURKEY_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDeviceKM;
++ IMG_HANDLE hSwapChain;
++ IMG_UINT32 ui32CKColour;
++
++} PVRSRV_BRIDGE_IN_SET_DISPCLASS_COLOURKEY;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_GET_DISPCLASS_BUFFERS_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDeviceKM;
++ IMG_HANDLE hSwapChain;
++
++} PVRSRV_BRIDGE_IN_GET_DISPCLASS_BUFFERS;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_GET_DISPCLASS_BUFFERS_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_UINT32 ui32BufferCount;
++ IMG_HANDLE ahBuffer[PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS];
++
++} PVRSRV_BRIDGE_OUT_GET_DISPCLASS_BUFFERS;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_BUFFER_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDeviceKM;
++ IMG_HANDLE hBuffer;
++ IMG_UINT32 ui32SwapInterval;
++ IMG_HANDLE hPrivateTag;
++ IMG_UINT32 ui32ClipRectCount;
++ IMG_RECT sClipRect[PVRSRV_MAX_DC_CLIP_RECTS];
++
++} PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_BUFFER;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_SYSTEM_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDeviceKM;
++ IMG_HANDLE hSwapChain;
++
++} PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_SYSTEM;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_OPEN_BUFFERCLASS_DEVICE_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_UINT32 ui32DeviceID;
++ IMG_HANDLE hDevCookie;
++
++} PVRSRV_BRIDGE_IN_OPEN_BUFFERCLASS_DEVICE;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_OPEN_BUFFERCLASS_DEVICE_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hDeviceKM;
++
++} PVRSRV_BRIDGE_OUT_OPEN_BUFFERCLASS_DEVICE;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_INFO_TAG
++{
++ PVRSRV_ERROR eError;
++ BUFFER_INFO sBufferInfo;
++
++} PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_INFO;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_BUFFER_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDeviceKM;
++ IMG_UINT32 ui32BufferIndex;
++
++} PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_BUFFER;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_BUFFER_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hBuffer;
++
++} PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_BUFFER;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_GET_DEVMEM_HEAPINFO_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_UINT32 ui32ClientHeapCount;
++ PVRSRV_HEAP_INFO sHeapInfo[PVRSRV_MAX_CLIENT_HEAPS];
++
++} PVRSRV_BRIDGE_OUT_GET_DEVMEM_HEAPINFO;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_CREATE_DEVMEMCONTEXT_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hDevMemContext;
++ IMG_UINT32 ui32ClientHeapCount;
++ PVRSRV_HEAP_INFO sHeapInfo[PVRSRV_MAX_CLIENT_HEAPS];
++
++} PVRSRV_BRIDGE_OUT_CREATE_DEVMEMCONTEXT;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_CREATE_DEVMEMHEAP_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hDevMemHeap;
++
++} PVRSRV_BRIDGE_OUT_CREATE_DEVMEMHEAP;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_ALLOCDEVICEMEM_TAG
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++
++} PVRSRV_BRIDGE_OUT_ALLOCDEVICEMEM;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_EXPORTDEVICEMEM_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hMemInfo;
++#if defined(SUPPORT_MEMINFO_IDS)
++ IMG_UINT64 ui64Stamp;
++#endif
++
++} PVRSRV_BRIDGE_OUT_EXPORTDEVICEMEM;
++
++
++typedef struct PVRSRV_BRIDGE_OUT_MAPMEMINFOTOUSER_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_PVOID pvLinAddr;
++ IMG_HANDLE hMappingInfo;
++
++}PVRSRV_BRIDGE_OUT_MAPMEMINFOTOUSER;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_GETFREEDEVICEMEM_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_SIZE_T ui32Total;
++ IMG_SIZE_T ui32Free;
++ IMG_SIZE_T ui32LargestBlock;
++
++} PVRSRV_BRIDGE_OUT_GETFREEDEVICEMEM;
++
++
++#include "pvrmmap.h"
++typedef struct PVRSRV_BRIDGE_OUT_MHANDLE_TO_MMAP_DATA_TAG
++{
++ PVRSRV_ERROR eError;
++
++
++ IMG_UINT32 ui32MMapOffset;
++
++
++ IMG_UINT32 ui32ByteOffset;
++
++
++ IMG_UINT32 ui32RealByteSize;
++
++
++ IMG_UINT32 ui32UserVAddr;
++
++} PVRSRV_BRIDGE_OUT_MHANDLE_TO_MMAP_DATA;
++
++typedef struct PVRSRV_BRIDGE_OUT_RELEASE_MMAP_DATA_TAG
++{
++ PVRSRV_ERROR eError;
++
++
++ IMG_BOOL bMUnmap;
++
++
++ IMG_UINT32 ui32UserVAddr;
++
++
++ IMG_UINT32 ui32RealByteSize;
++} PVRSRV_BRIDGE_OUT_RELEASE_MMAP_DATA;
++
++typedef struct PVRSRV_BRIDGE_IN_GET_MISC_INFO_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ PVRSRV_MISC_INFO sMiscInfo;
++
++}PVRSRV_BRIDGE_IN_GET_MISC_INFO;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_GET_MISC_INFO_TAG
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_MISC_INFO sMiscInfo;
++
++}PVRSRV_BRIDGE_OUT_GET_MISC_INFO;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_RELEASE_MISC_INFO_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ PVRSRV_MISC_INFO sMiscInfo;
++
++}PVRSRV_BRIDGE_IN_RELEASE_MISC_INFO;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_RELEASE_MISC_INFO_TAG
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_MISC_INFO sMiscInfo;
++
++}PVRSRV_BRIDGE_OUT_RELEASE_MISC_INFO;
++
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_PDUMP_ISCAPTURING_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_BOOL bIsCapturing;
++
++} PVRSRV_BRIDGE_OUT_PDUMP_ISCAPTURING;
++
++
++typedef struct PVRSRV_BRIDGE_IN_GET_FB_STATS_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_SIZE_T ui32Total;
++ IMG_SIZE_T ui32Available;
++
++} PVRSRV_BRIDGE_IN_GET_FB_STATS;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_MAPPHYSTOUSERSPACE_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_SYS_PHYADDR sSysPhysAddr;
++ IMG_UINT32 uiSizeInBytes;
++
++} PVRSRV_BRIDGE_IN_MAPPHYSTOUSERSPACE;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_MAPPHYSTOUSERSPACE_TAG
++{
++ IMG_PVOID pvUserAddr;
++ IMG_UINT32 uiActualSize;
++ IMG_PVOID pvProcess;
++
++} PVRSRV_BRIDGE_OUT_MAPPHYSTOUSERSPACE;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_UNMAPPHYSTOUSERSPACE_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_PVOID pvUserAddr;
++ IMG_PVOID pvProcess;
++
++} PVRSRV_BRIDGE_IN_UNMAPPHYSTOUSERSPACE;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_GETPHYSTOUSERSPACEMAP_TAG
++{
++ IMG_PVOID *ppvTbl;
++ IMG_UINT32 uiTblSize;
++
++} PVRSRV_BRIDGE_OUT_GETPHYSTOUSERSPACEMAP;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_REGISTER_SIM_PROCESS_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_PVOID pvProcess;
++
++} PVRSRV_BRIDGE_IN_REGISTER_SIM_PROCESS;
++
++
++typedef struct PVRSRV_BRIDGE_OUT_REGISTER_SIM_PROCESS_TAG
++{
++ IMG_SYS_PHYADDR sRegsPhysBase;
++ IMG_VOID *pvRegsBase;
++ IMG_PVOID pvProcess;
++ IMG_UINT32 ulNoOfEntries;
++ IMG_PVOID pvTblLinAddr;
++
++} PVRSRV_BRIDGE_OUT_REGISTER_SIM_PROCESS;
++
++
++typedef struct PVRSRV_BRIDGE_IN_UNREGISTER_SIM_PROCESS_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_PVOID pvProcess;
++ IMG_VOID *pvRegsBase;
++
++} PVRSRV_BRIDGE_IN_UNREGISTER_SIM_PROCESS;
++
++typedef struct PVRSRV_BRIDGE_IN_PROCESS_SIMISR_EVENT_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_UINT32 ui32StatusAndMask;
++ PVRSRV_ERROR eError;
++
++} PVRSRV_BRIDGE_IN_PROCESS_SIMISR_EVENT;
++
++typedef struct PVRSRV_BRIDGE_IN_INITSRV_DISCONNECT_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_BOOL bInitSuccesful;
++} PVRSRV_BRIDGE_IN_INITSRV_DISCONNECT;
++
++
++typedef struct PVRSRV_BRIDGE_IN_ALLOC_SHARED_SYS_MEM_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_UINT32 ui32Flags;
++ IMG_SIZE_T ui32Size;
++}PVRSRV_BRIDGE_IN_ALLOC_SHARED_SYS_MEM;
++
++typedef struct PVRSRV_BRIDGE_OUT_ALLOC_SHARED_SYS_MEM_TAG
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++}PVRSRV_BRIDGE_OUT_ALLOC_SHARED_SYS_MEM;
++
++typedef struct PVRSRV_BRIDGE_IN_FREE_SHARED_SYS_MEM_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++}PVRSRV_BRIDGE_IN_FREE_SHARED_SYS_MEM;
++
++typedef struct PVRSRV_BRIDGE_OUT_FREE_SHARED_SYS_MEM_TAG
++{
++ PVRSRV_ERROR eError;
++}PVRSRV_BRIDGE_OUT_FREE_SHARED_SYS_MEM;
++
++typedef struct PVRSRV_BRIDGE_IN_MAP_MEMINFO_MEM_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hKernelMemInfo;
++}PVRSRV_BRIDGE_IN_MAP_MEMINFO_MEM;
++
++typedef struct PVRSRV_BRIDGE_OUT_MAP_MEMINFO_MEM_TAG
++{
++ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++ PVRSRV_ERROR eError;
++}PVRSRV_BRIDGE_OUT_MAP_MEMINFO_MEM;
++
++typedef struct PVRSRV_BRIDGE_IN_UNMAP_MEMINFO_MEM_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++}PVRSRV_BRIDGE_IN_UNMAP_MEMINFO_MEM;
++
++typedef struct PVRSRV_BRIDGE_OUT_UNMAP_MEMINFO_MEM_TAG
++{
++ PVRSRV_ERROR eError;
++}PVRSRV_BRIDGE_OUT_UNMAP_MEMINFO_MEM;
++
++typedef struct PVRSRV_BRIDGE_IN_GETMMU_PD_DEVPADDR_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevMemContext;
++}PVRSRV_BRIDGE_IN_GETMMU_PD_DEVPADDR;
++
++typedef struct PVRSRV_BRIDGE_OUT_GETMMU_PD_DEVPADDR_TAG
++{
++ IMG_DEV_PHYADDR sPDDevPAddr;
++ PVRSRV_ERROR eError;
++}PVRSRV_BRIDGE_OUT_GETMMU_PD_DEVPADDR;
++
++typedef struct PVRSRV_BRIDGE_IN_EVENT_OBJECT_WAI_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hOSEventKM;
++} PVRSRV_BRIDGE_IN_EVENT_OBJECT_WAIT;
++
++typedef struct PVRSRV_BRIDGE_IN_EVENT_OBJECT_OPEN_TAG
++{
++ PVRSRV_EVENTOBJECT sEventObject;
++} PVRSRV_BRIDGE_IN_EVENT_OBJECT_OPEN;
++
++typedef struct PVRSRV_BRIDGE_OUT_EVENT_OBJECT_OPEN_TAG
++{
++ IMG_HANDLE hOSEvent;
++ PVRSRV_ERROR eError;
++} PVRSRV_BRIDGE_OUT_EVENT_OBJECT_OPEN;
++
++typedef struct PVRSRV_BRIDGE_IN_EVENT_OBJECT_CLOSE_TAG
++{
++ PVRSRV_EVENTOBJECT sEventObject;
++ IMG_HANDLE hOSEventKM;
++} PVRSRV_BRIDGE_IN_EVENT_OBJECT_CLOSE;
++
++typedef struct PVRSRV_BRIDGE_IN_MODIFY_PENDING_SYNC_OPS_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hKernelSyncInfo;
++ IMG_UINT32 ui32ModifyFlags;
++
++} PVRSRV_BRIDGE_IN_MODIFY_PENDING_SYNC_OPS;
++
++typedef struct PVRSRV_BRIDGE_IN_MODIFY_COMPLETE_SYNC_OPS_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hKernelSyncInfo;
++ IMG_UINT32 ui32ModifyFlags;
++
++} PVRSRV_BRIDGE_IN_MODIFY_COMPLETE_SYNC_OPS;
++
++typedef struct PVRSRV_BRIDGE_OUT_MODIFY_PENDING_SYNC_OPS_TAG
++{
++ PVRSRV_ERROR eError;
++
++
++ IMG_UINT32 ui32ReadOpsPending;
++ IMG_UINT32 ui32WriteOpsPending;
++
++} PVRSRV_BRIDGE_OUT_MODIFY_PENDING_SYNC_OPS;
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/include/pvr_bridge_km.h
+@@ -0,0 +1,288 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __PVR_BRIDGE_KM_H_
++#define __PVR_BRIDGE_KM_H_
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#include "pvr_bridge.h"
++#include "perproc.h"
++
++#if defined(__linux__)
++PVRSRV_ERROR LinuxBridgeInit(IMG_VOID);
++IMG_VOID LinuxBridgeDeInit(IMG_VOID);
++#endif
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVEnumerateDevicesKM(IMG_UINT32 *pui32NumDevices,
++ PVRSRV_DEVICE_IDENTIFIER *psDevIdList);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVAcquireDeviceDataKM(IMG_UINT32 uiDevIndex,
++ PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_HANDLE *phDevCookie);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateCommandQueueKM(IMG_SIZE_T ui32QueueSize,
++ PVRSRV_QUEUE_INFO **ppsQueueInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyCommandQueueKM(PVRSRV_QUEUE_INFO *psQueueInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDeviceMemHeapsKM(IMG_HANDLE hDevCookie,
++ PVRSRV_HEAP_INFO *psHeapInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateDeviceMemContextKM(IMG_HANDLE hDevCookie,
++ PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE *phDevMemContext,
++ IMG_UINT32 *pui32ClientHeapCount,
++ PVRSRV_HEAP_INFO *psHeapInfo,
++ IMG_BOOL *pbCreated,
++ IMG_BOOL *pbShared);
++
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyDeviceMemContextKM(IMG_HANDLE hDevCookie,
++ IMG_HANDLE hDevMemContext,
++ IMG_BOOL *pbDestroyed);
++
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDeviceMemHeapInfoKM(IMG_HANDLE hDevCookie,
++ IMG_HANDLE hDevMemContext,
++ IMG_UINT32 *pui32ClientHeapCount,
++ PVRSRV_HEAP_INFO *psHeapInfo,
++ IMG_BOOL *pbShared
++ );
++
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV _PVRSRVAllocDeviceMemKM(IMG_HANDLE hDevCookie,
++ PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hDevMemHeap,
++ IMG_UINT32 ui32Flags,
++ IMG_SIZE_T ui32Size,
++ IMG_SIZE_T ui32Alignment,
++ PVRSRV_KERNEL_MEM_INFO **ppsMemInfo);
++
++
++#if defined(PVRSRV_LOG_MEMORY_ALLOCS)
++ #define PVRSRVAllocDeviceMemKM(devCookie, perProc, devMemHeap, flags, size, alignment, memInfo, logStr) \
++ (PVR_TRACE(("PVRSRVAllocDeviceMemKM(" #devCookie ", " #perProc ", " #devMemHeap ", " #flags ", " #size \
++ ", " #alignment "," #memInfo "): " logStr " (size = 0x%;x)", size)),\
++ _PVRSRVAllocDeviceMemKM(devCookie, perProc, devMemHeap, flags, size, alignment, memInfo))
++#else
++ #define PVRSRVAllocDeviceMemKM(devCookie, perProc, devMemHeap, flags, size, alignment, memInfo, logStr) \
++ _PVRSRVAllocDeviceMemKM(devCookie, perProc, devMemHeap, flags, size, alignment, memInfo)
++#endif
++
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVFreeDeviceMemKM(IMG_HANDLE hDevCookie,
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDissociateDeviceMemKM(IMG_HANDLE hDevCookie,
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVReserveDeviceVirtualMemKM(IMG_HANDLE hDevMemHeap,
++ IMG_DEV_VIRTADDR *psDevVAddr,
++ IMG_SIZE_T ui32Size,
++ IMG_SIZE_T ui32Alignment,
++ PVRSRV_KERNEL_MEM_INFO **ppsMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVFreeDeviceVirtualMemKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVMapDeviceMemoryKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ PVRSRV_KERNEL_MEM_INFO *psSrcMemInfo,
++ IMG_HANDLE hDstDevMemHeap,
++ PVRSRV_KERNEL_MEM_INFO **ppsDstMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapDeviceMemoryKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVWrapExtMemoryKM(IMG_HANDLE hDevCookie,
++ PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hDevMemContext,
++ IMG_SIZE_T ui32ByteSize,
++ IMG_SIZE_T ui32PageOffset,
++ IMG_BOOL bPhysContig,
++ IMG_SYS_PHYADDR *psSysAddr,
++ IMG_VOID *pvLinAddr,
++ IMG_UINT32 ui32Flags,
++ PVRSRV_KERNEL_MEM_INFO **ppsMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnwrapExtMemoryKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVEnumerateDCKM(PVRSRV_DEVICE_CLASS DeviceClass,
++ IMG_UINT32 *pui32DevCount,
++ IMG_UINT32 *pui32DevID );
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVOpenDCDeviceKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_UINT32 ui32DeviceID,
++ IMG_HANDLE hDevCookie,
++ IMG_HANDLE *phDeviceKM);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVCloseDCDeviceKM(IMG_HANDLE hDeviceKM, IMG_BOOL bResManCallback);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVEnumDCFormatsKM(IMG_HANDLE hDeviceKM,
++ IMG_UINT32 *pui32Count,
++ DISPLAY_FORMAT *psFormat);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVEnumDCDimsKM(IMG_HANDLE hDeviceKM,
++ DISPLAY_FORMAT *psFormat,
++ IMG_UINT32 *pui32Count,
++ DISPLAY_DIMS *psDim);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVGetDCSystemBufferKM(IMG_HANDLE hDeviceKM,
++ IMG_HANDLE *phBuffer);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVGetDCInfoKM(IMG_HANDLE hDeviceKM,
++ DISPLAY_INFO *psDisplayInfo);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVCreateDCSwapChainKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hDeviceKM,
++ IMG_UINT32 ui32Flags,
++ DISPLAY_SURF_ATTRIBUTES *psDstSurfAttrib,
++ DISPLAY_SURF_ATTRIBUTES *psSrcSurfAttrib,
++ IMG_UINT32 ui32BufferCount,
++ IMG_UINT32 ui32OEMFlags,
++ IMG_HANDLE *phSwapChain,
++ IMG_UINT32 *pui32SwapChainID);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVDestroyDCSwapChainKM(IMG_HANDLE hSwapChain);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVSetDCDstRectKM(IMG_HANDLE hDeviceKM,
++ IMG_HANDLE hSwapChain,
++ IMG_RECT *psRect);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVSetDCSrcRectKM(IMG_HANDLE hDeviceKM,
++ IMG_HANDLE hSwapChain,
++ IMG_RECT *psRect);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVSetDCDstColourKeyKM(IMG_HANDLE hDeviceKM,
++ IMG_HANDLE hSwapChain,
++ IMG_UINT32 ui32CKColour);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVSetDCSrcColourKeyKM(IMG_HANDLE hDeviceKM,
++ IMG_HANDLE hSwapChain,
++ IMG_UINT32 ui32CKColour);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVGetDCBuffersKM(IMG_HANDLE hDeviceKM,
++ IMG_HANDLE hSwapChain,
++ IMG_UINT32 *pui32BufferCount,
++ IMG_HANDLE *phBuffer);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVSwapToDCBufferKM(IMG_HANDLE hDeviceKM,
++ IMG_HANDLE hBuffer,
++ IMG_UINT32 ui32SwapInterval,
++ IMG_HANDLE hPrivateTag,
++ IMG_UINT32 ui32ClipRectCount,
++ IMG_RECT *psClipRect);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVSwapToDCSystemKM(IMG_HANDLE hDeviceKM,
++ IMG_HANDLE hSwapChain);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVOpenBCDeviceKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_UINT32 ui32DeviceID,
++ IMG_HANDLE hDevCookie,
++ IMG_HANDLE *phDeviceKM);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVCloseBCDeviceKM(IMG_HANDLE hDeviceKM, IMG_BOOL bResManCallback);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVGetBCInfoKM(IMG_HANDLE hDeviceKM,
++ BUFFER_INFO *psBufferInfo);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVGetBCBufferKM(IMG_HANDLE hDeviceKM,
++ IMG_UINT32 ui32BufferIndex,
++ IMG_HANDLE *phBuffer);
++
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVMapDeviceClassMemoryKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hDevMemContext,
++ IMG_HANDLE hDeviceClassBuffer,
++ PVRSRV_KERNEL_MEM_INFO **ppsMemInfo,
++ IMG_HANDLE *phOSMapInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapDeviceClassMemoryKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetFreeDeviceMemKM(IMG_UINT32 ui32Flags,
++ IMG_SIZE_T *pui32Total,
++ IMG_SIZE_T *pui32Free,
++ IMG_SIZE_T *pui32LargestBlock);
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVAllocSyncInfoKM(IMG_HANDLE hDevCookie,
++ IMG_HANDLE hDevMemContext,
++ PVRSRV_KERNEL_SYNC_INFO **ppsKernelSyncInfo);
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVFreeSyncInfoKM(PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetMiscInfoKM(PVRSRV_MISC_INFO *psMiscInfo);
++
++PVRSRV_ERROR PVRSRVGetFBStatsKM(IMG_SIZE_T *pui32Total,
++ IMG_SIZE_T *pui32Available);
++
++IMG_IMPORT PVRSRV_ERROR
++PVRSRVAllocSharedSysMemoryKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_UINT32 ui32Flags,
++ IMG_SIZE_T ui32Size,
++ PVRSRV_KERNEL_MEM_INFO **ppsKernelMemInfo);
++
++IMG_IMPORT PVRSRV_ERROR
++PVRSRVFreeSharedSysMemoryKM(PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo);
++
++IMG_IMPORT PVRSRV_ERROR
++PVRSRVDissociateMemFromResmanKM(PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo);
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/include/pvrmmap.h
+@@ -0,0 +1,36 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __PVRMMAP_H__
++#define __PVRMMAP_H__
++
++PVRSRV_ERROR PVRPMapKMem(IMG_HANDLE hModule, IMG_VOID **ppvLinAddr, IMG_VOID *pvLinAddrKM, IMG_HANDLE *phMappingInfo, IMG_HANDLE hMHandle);
++
++
++IMG_BOOL PVRUnMapKMem(IMG_HANDLE hModule, IMG_HANDLE hMappingInfo, IMG_HANDLE hMHandle);
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/include/servicesint.h
+@@ -0,0 +1,276 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined (__SERVICESINT_H__)
++#define __SERVICESINT_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#include "services.h"
++#include "sysinfo.h"
++
++#define HWREC_DEFAULT_TIMEOUT (500)
++
++#define DRIVERNAME_MAXLENGTH (100)
++
++typedef enum _PVRSRV_MEMTYPE_
++{
++ PVRSRV_MEMTYPE_UNKNOWN = 0,
++ PVRSRV_MEMTYPE_DEVICE = 1,
++ PVRSRV_MEMTYPE_DEVICECLASS = 2,
++ PVRSRV_MEMTYPE_WRAPPED = 3,
++ PVRSRV_MEMTYPE_MAPPED = 4,
++}
++PVRSRV_MEMTYPE;
++
++typedef struct _PVRSRV_KERNEL_MEM_INFO_
++{
++
++ IMG_PVOID pvLinAddrKM;
++
++
++ IMG_DEV_VIRTADDR sDevVAddr;
++
++
++ IMG_UINT32 ui32Flags;
++
++
++ IMG_SIZE_T ui32AllocSize;
++
++
++ PVRSRV_MEMBLK sMemBlk;
++
++
++ IMG_PVOID pvSysBackupBuffer;
++
++
++ IMG_UINT32 ui32RefCount;
++
++
++ IMG_BOOL bPendingFree;
++
++
++ #if defined(SUPPORT_MEMINFO_IDS)
++ #if !defined(USE_CODE)
++
++ IMG_UINT64 ui64Stamp;
++ #else
++ IMG_UINT32 dummy1;
++ IMG_UINT32 dummy2;
++ #endif
++ #endif
++
++
++ struct _PVRSRV_KERNEL_SYNC_INFO_ *psKernelSyncInfo;
++
++ PVRSRV_MEMTYPE memType;
++
++} PVRSRV_KERNEL_MEM_INFO;
++
++
++typedef struct _PVRSRV_KERNEL_SYNC_INFO_
++{
++
++ PVRSRV_SYNC_DATA *psSyncData;
++
++
++ IMG_DEV_VIRTADDR sWriteOpsCompleteDevVAddr;
++
++
++ IMG_DEV_VIRTADDR sReadOpsCompleteDevVAddr;
++
++
++ PVRSRV_KERNEL_MEM_INFO *psSyncDataMemInfoKM;
++
++
++ IMG_HANDLE hResItem;
++
++
++
++ IMG_UINT32 ui32RefCount;
++
++} PVRSRV_KERNEL_SYNC_INFO;
++
++typedef struct _PVRSRV_DEVICE_SYNC_OBJECT_
++{
++
++ IMG_UINT32 ui32ReadOpsPendingVal;
++ IMG_DEV_VIRTADDR sReadOpsCompleteDevVAddr;
++ IMG_UINT32 ui32WriteOpsPendingVal;
++ IMG_DEV_VIRTADDR sWriteOpsCompleteDevVAddr;
++} PVRSRV_DEVICE_SYNC_OBJECT;
++
++typedef struct _PVRSRV_SYNC_OBJECT
++{
++ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfoKM;
++ IMG_UINT32 ui32WriteOpsPending;
++ IMG_UINT32 ui32ReadOpsPending;
++
++}PVRSRV_SYNC_OBJECT, *PPVRSRV_SYNC_OBJECT;
++
++typedef struct _PVRSRV_COMMAND
++{
++ IMG_SIZE_T ui32CmdSize;
++ IMG_UINT32 ui32DevIndex;
++ IMG_UINT32 CommandType;
++ IMG_UINT32 ui32DstSyncCount;
++ IMG_UINT32 ui32SrcSyncCount;
++ PVRSRV_SYNC_OBJECT *psDstSync;
++ PVRSRV_SYNC_OBJECT *psSrcSync;
++ IMG_SIZE_T ui32DataSize;
++ IMG_UINT32 ui32ProcessID;
++ IMG_VOID *pvData;
++}PVRSRV_COMMAND, *PPVRSRV_COMMAND;
++
++
++typedef struct _PVRSRV_QUEUE_INFO_
++{
++ IMG_VOID *pvLinQueueKM;
++ IMG_VOID *pvLinQueueUM;
++ volatile IMG_SIZE_T ui32ReadOffset;
++ volatile IMG_SIZE_T ui32WriteOffset;
++ IMG_UINT32 *pui32KickerAddrKM;
++ IMG_UINT32 *pui32KickerAddrUM;
++ IMG_SIZE_T ui32QueueSize;
++
++ IMG_UINT32 ui32ProcessID;
++
++ IMG_HANDLE hMemBlock[2];
++
++ struct _PVRSRV_QUEUE_INFO_ *psNextKM;
++}PVRSRV_QUEUE_INFO;
++
++typedef PVRSRV_ERROR (*PFN_INSERT_CMD) (PVRSRV_QUEUE_INFO*,
++ PVRSRV_COMMAND**,
++ IMG_UINT32,
++ IMG_UINT16,
++ IMG_UINT32,
++ PVRSRV_KERNEL_SYNC_INFO*[],
++ IMG_UINT32,
++ PVRSRV_KERNEL_SYNC_INFO*[],
++ IMG_UINT32);
++typedef PVRSRV_ERROR (*PFN_SUBMIT_CMD) (PVRSRV_QUEUE_INFO*, PVRSRV_COMMAND*, IMG_BOOL);
++
++
++typedef struct PVRSRV_DEVICECLASS_BUFFER_TAG
++{
++ PFN_GET_BUFFER_ADDR pfnGetBufferAddr;
++ IMG_HANDLE hDevMemContext;
++ IMG_HANDLE hExtDevice;
++ IMG_HANDLE hExtBuffer;
++ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++
++} PVRSRV_DEVICECLASS_BUFFER;
++
++
++typedef struct PVRSRV_CLIENT_DEVICECLASS_INFO_TAG
++{
++ IMG_HANDLE hDeviceKM;
++ IMG_HANDLE hServices;
++} PVRSRV_CLIENT_DEVICECLASS_INFO;
++
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVGetWriteOpsPending)
++#endif
++static INLINE
++IMG_UINT32 PVRSRVGetWriteOpsPending(PVRSRV_KERNEL_SYNC_INFO *psSyncInfo, IMG_BOOL bIsReadOp)
++{
++ IMG_UINT32 ui32WriteOpsPending;
++
++ if(bIsReadOp)
++ {
++ ui32WriteOpsPending = psSyncInfo->psSyncData->ui32WriteOpsPending;
++ }
++ else
++ {
++
++
++
++ ui32WriteOpsPending = psSyncInfo->psSyncData->ui32WriteOpsPending++;
++ }
++
++ return ui32WriteOpsPending;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVGetReadOpsPending)
++#endif
++static INLINE
++IMG_UINT32 PVRSRVGetReadOpsPending(PVRSRV_KERNEL_SYNC_INFO *psSyncInfo, IMG_BOOL bIsReadOp)
++{
++ IMG_UINT32 ui32ReadOpsPending;
++
++ if(bIsReadOp)
++ {
++ ui32ReadOpsPending = psSyncInfo->psSyncData->ui32ReadOpsPending++;
++ }
++ else
++ {
++ ui32ReadOpsPending = psSyncInfo->psSyncData->ui32ReadOpsPending;
++ }
++
++ return ui32ReadOpsPending;
++}
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVQueueCommand(IMG_HANDLE hQueueInfo,
++ PVRSRV_COMMAND *psCommand);
++
++
++
++IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV
++PVRSRVGetMMUContextPDDevPAddr(const PVRSRV_CONNECTION *psConnection,
++ IMG_HANDLE hDevMemContext,
++ IMG_DEV_PHYADDR *sPDDevPAddr);
++
++IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV
++PVRSRVAllocSharedSysMem(const PVRSRV_CONNECTION *psConnection,
++ IMG_UINT32 ui32Flags,
++ IMG_SIZE_T ui32Size,
++ PVRSRV_CLIENT_MEM_INFO **ppsClientMemInfo);
++
++IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV
++PVRSRVFreeSharedSysMem(const PVRSRV_CONNECTION *psConnection,
++ PVRSRV_CLIENT_MEM_INFO *psClientMemInfo);
++
++IMG_IMPORT PVRSRV_ERROR
++PVRSRVUnrefSharedSysMem(const PVRSRV_CONNECTION *psConnection,
++ PVRSRV_CLIENT_MEM_INFO *psClientMemInfo);
++
++IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV
++PVRSRVMapMemInfoMem(const PVRSRV_CONNECTION *psConnection,
++ IMG_HANDLE hKernelMemInfo,
++ PVRSRV_CLIENT_MEM_INFO **ppsClientMemInfo);
++
++
++#if defined (__cplusplus)
++}
++#endif
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/include/sgx_bridge.h
+@@ -0,0 +1,477 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__SGX_BRIDGE_H__)
++#define __SGX_BRIDGE_H__
++
++#include "sgxapi_km.h"
++#include "sgxinfo.h"
++#include "pvr_bridge.h"
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++
++#define PVRSRV_BRIDGE_SGX_CMD_BASE (PVRSRV_BRIDGE_LAST_NON_DEVICE_CMD+1)
++#define PVRSRV_BRIDGE_SGX_GETCLIENTINFO PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+0)
++#define PVRSRV_BRIDGE_SGX_RELEASECLIENTINFO PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+1)
++#define PVRSRV_BRIDGE_SGX_GETINTERNALDEVINFO PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+2)
++#define PVRSRV_BRIDGE_SGX_DOKICK PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+3)
++#define PVRSRV_BRIDGE_SGX_GETPHYSPAGEADDR PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+4)
++#define PVRSRV_BRIDGE_SGX_READREGISTRYDWORD PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+5)
++
++#define PVRSRV_BRIDGE_SGX_2DQUERYBLTSCOMPLETE PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+9)
++
++#define PVRSRV_BRIDGE_SGX_GETMMUPDADDR PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+10)
++
++#if defined(TRANSFER_QUEUE)
++#define PVRSRV_BRIDGE_SGX_SUBMITTRANSFER PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+13)
++#endif
++#define PVRSRV_BRIDGE_SGX_GETMISCINFO PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+14)
++#define PVRSRV_BRIDGE_SGXINFO_FOR_SRVINIT PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+15)
++#define PVRSRV_BRIDGE_SGX_DEVINITPART2 PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+16)
++
++#define PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+17)
++#define PVRSRV_BRIDGE_SGX_UNREFSHAREDPBDESC PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+18)
++#define PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+19)
++#define PVRSRV_BRIDGE_SGX_REGISTER_HW_RENDER_CONTEXT PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+20)
++#define PVRSRV_BRIDGE_SGX_FLUSH_HW_RENDER_TARGET PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+21)
++#define PVRSRV_BRIDGE_SGX_UNREGISTER_HW_RENDER_CONTEXT PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+22)
++#if defined(SGX_FEATURE_2D_HARDWARE)
++#define PVRSRV_BRIDGE_SGX_SUBMIT2D PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+23)
++#define PVRSRV_BRIDGE_SGX_REGISTER_HW_2D_CONTEXT PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+24)
++#define PVRSRV_BRIDGE_SGX_UNREGISTER_HW_2D_CONTEXT PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+25)
++#endif
++#define PVRSRV_BRIDGE_SGX_REGISTER_HW_TRANSFER_CONTEXT PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+26)
++#define PVRSRV_BRIDGE_SGX_UNREGISTER_HW_TRANSFER_CONTEXT PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+27)
++
++#define PVRSRV_BRIDGE_SGX_SCHEDULE_PROCESS_QUEUES PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+28)
++
++#if defined(SUPPORT_SGX_HWPERF)
++#define PVRSRV_BRIDGE_SGX_READ_DIFF_COUNTERS PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+29)
++#define PVRSRV_BRIDGE_SGX_READ_HWPERF_CB PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+30)
++#endif
++
++#if defined(PDUMP)
++#define PVRSRV_BRIDGE_SGX_PDUMP_BUFFER_ARRAY PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+31)
++#define PVRSRV_BRIDGE_SGX_PDUMP_3D_SIGNATURE_REGISTERS PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+32)
++#define PVRSRV_BRIDGE_SGX_PDUMP_COUNTER_REGISTERS PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+33)
++#define PVRSRV_BRIDGE_SGX_PDUMP_TA_SIGNATURE_REGISTERS PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+34)
++#define PVRSRV_BRIDGE_SGX_PDUMP_HWPERFCB PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+35)
++#endif
++
++
++
++#define PVRSRV_BRIDGE_LAST_SGX_CMD (PVRSRV_BRIDGE_SGX_CMD_BASE+35)
++
++
++typedef struct PVRSRV_BRIDGE_IN_GETPHYSPAGEADDR
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevMemHeap;
++ IMG_DEV_VIRTADDR sDevVAddr;
++}PVRSRV_BRIDGE_IN_GETPHYSPAGEADDR;
++
++
++typedef struct PVRSRV_BRIDGE_OUT_GETPHYSPAGEADDR
++{
++ PVRSRV_ERROR eError;
++ IMG_DEV_PHYADDR DevPAddr;
++ IMG_CPU_PHYADDR CpuPAddr;
++}PVRSRV_BRIDGE_OUT_GETPHYSPAGEADDR;
++
++
++typedef struct PVRSRV_BRIDGE_IN_SGX_GETMMU_PDADDR_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_HANDLE hDevMemContext;
++}PVRSRV_BRIDGE_IN_SGX_GETMMU_PDADDR;
++
++
++typedef struct PVRSRV_BRIDGE_OUT_SGX_GETMMU_PDADDR_TAG
++{
++ IMG_DEV_PHYADDR sPDDevPAddr;
++ PVRSRV_ERROR eError;
++}PVRSRV_BRIDGE_OUT_SGX_GETMMU_PDADDR;
++
++
++typedef struct PVRSRV_BRIDGE_IN_GETCLIENTINFO_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++}PVRSRV_BRIDGE_IN_GETCLIENTINFO;
++
++
++typedef struct PVRSRV_BRIDGE_OUT_GETINTERNALDEVINFO_TAG
++{
++ SGX_INTERNAL_DEVINFO sSGXInternalDevInfo;
++ PVRSRV_ERROR eError;
++}PVRSRV_BRIDGE_OUT_GETINTERNALDEVINFO;
++
++
++typedef struct PVRSRV_BRIDGE_IN_GETINTERNALDEVINFO_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++}PVRSRV_BRIDGE_IN_GETINTERNALDEVINFO;
++
++
++typedef struct PVRSRV_BRIDGE_OUT_GETCLIENTINFO_TAG
++{
++ SGX_CLIENT_INFO sClientInfo;
++ PVRSRV_ERROR eError;
++}PVRSRV_BRIDGE_OUT_GETCLIENTINFO;
++
++
++typedef struct PVRSRV_BRIDGE_IN_RELEASECLIENTINFO_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ SGX_CLIENT_INFO sClientInfo;
++}PVRSRV_BRIDGE_IN_RELEASECLIENTINFO;
++
++
++typedef struct PVRSRV_BRIDGE_IN_ISPBREAKPOLL_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++}PVRSRV_BRIDGE_IN_ISPBREAKPOLL;
++
++
++typedef struct PVRSRV_BRIDGE_IN_DOKICK_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ SGX_CCB_KICK sCCBKick;
++}PVRSRV_BRIDGE_IN_DOKICK;
++
++
++typedef struct PVRSRV_BRIDGE_IN_SGX_SCHEDULE_PROCESS_QUEUES_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++}PVRSRV_BRIDGE_IN_SGX_SCHEDULE_PROCESS_QUEUES;
++
++
++#if defined(TRANSFER_QUEUE)
++
++typedef struct PVRSRV_BRIDGE_IN_SUBMITTRANSFER_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ PVRSRV_TRANSFER_SGX_KICK sKick;
++}PVRSRV_BRIDGE_IN_SUBMITTRANSFER;
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++
++typedef struct PVRSRV_BRIDGE_IN_SUBMIT2D_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ PVRSRV_2D_SGX_KICK sKick;
++} PVRSRV_BRIDGE_IN_SUBMIT2D;
++#endif
++#endif
++
++
++typedef struct PVRSRV_BRIDGE_IN_READREGDWORD_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_PCHAR pszKey;
++ IMG_PCHAR pszValue;
++}PVRSRV_BRIDGE_IN_READREGDWORD;
++
++
++typedef struct PVRSRV_BRIDGE_OUT_READREGDWORD_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_UINT32 ui32Data;
++}PVRSRV_BRIDGE_OUT_READREGDWORD;
++
++
++typedef struct PVRSRV_BRIDGE_IN_SGXGETMISCINFO_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ SGX_MISC_INFO *psMiscInfo;
++}PVRSRV_BRIDGE_IN_SGXGETMISCINFO;
++
++typedef struct PVRSRV_BRIDGE_IN_SGXINFO_FOR_SRVINIT_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++}PVRSRV_BRIDGE_IN_SGXINFO_FOR_SRVINIT;
++
++typedef struct PVRSRV_BRIDGE_OUT_SGXINFO_FOR_SRVINIT_TAG
++{
++ PVRSRV_ERROR eError;
++ SGX_BRIDGE_INFO_FOR_SRVINIT sInitInfo;
++}PVRSRV_BRIDGE_OUT_SGXINFO_FOR_SRVINIT;
++
++typedef struct PVRSRV_BRIDGE_IN_SGXDEVINITPART2_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ SGX_BRIDGE_INIT_INFO sInitInfo;
++}PVRSRV_BRIDGE_IN_SGXDEVINITPART2;
++
++
++typedef struct PVRSRV_BRIDGE_IN_2DQUERYBLTSCOMPLETE_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_HANDLE hKernSyncInfo;
++ IMG_BOOL bWaitForComplete;
++}PVRSRV_BRIDGE_IN_2DQUERYBLTSCOMPLETE;
++
++
++#define PVRSRV_BRIDGE_SGX_SHAREDPBDESC_MAX_SUBMEMINFOS 10
++
++typedef struct PVRSRV_BRIDGE_IN_SGXFINDSHAREDPBDESC_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_BOOL bLockOnFailure;
++ IMG_UINT32 ui32TotalPBSize;
++}PVRSRV_BRIDGE_IN_SGXFINDSHAREDPBDESC;
++
++typedef struct PVRSRV_BRIDGE_OUT_SGXFINDSHAREDPBDESC_TAG
++{
++ IMG_HANDLE hKernelMemInfo;
++ IMG_HANDLE hSharedPBDesc;
++ IMG_HANDLE hSharedPBDescKernelMemInfoHandle;
++ IMG_HANDLE hHWPBDescKernelMemInfoHandle;
++ IMG_HANDLE hBlockKernelMemInfoHandle;
++ IMG_HANDLE hHWBlockKernelMemInfoHandle;
++ IMG_HANDLE ahSharedPBDescSubKernelMemInfoHandles[PVRSRV_BRIDGE_SGX_SHAREDPBDESC_MAX_SUBMEMINFOS];
++ IMG_UINT32 ui32SharedPBDescSubKernelMemInfoHandlesCount;
++ PVRSRV_ERROR eError;
++}PVRSRV_BRIDGE_OUT_SGXFINDSHAREDPBDESC;
++
++typedef struct PVRSRV_BRIDGE_IN_SGXUNREFSHAREDPBDESC_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hSharedPBDesc;
++}PVRSRV_BRIDGE_IN_SGXUNREFSHAREDPBDESC;
++
++typedef struct PVRSRV_BRIDGE_OUT_SGXUNREFSHAREDPBDESC_TAG
++{
++ PVRSRV_ERROR eError;
++}PVRSRV_BRIDGE_OUT_SGXUNREFSHAREDPBDESC;
++
++
++typedef struct PVRSRV_BRIDGE_IN_SGXADDSHAREDPBDESC_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_HANDLE hSharedPBDescKernelMemInfo;
++ IMG_HANDLE hHWPBDescKernelMemInfo;
++ IMG_HANDLE hBlockKernelMemInfo;
++ IMG_HANDLE hHWBlockKernelMemInfo;
++ IMG_UINT32 ui32TotalPBSize;
++ IMG_HANDLE *phKernelMemInfoHandles;
++ IMG_UINT32 ui32KernelMemInfoHandlesCount;
++}PVRSRV_BRIDGE_IN_SGXADDSHAREDPBDESC;
++
++typedef struct PVRSRV_BRIDGE_OUT_SGXADDSHAREDPBDESC_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hSharedPBDesc;
++}PVRSRV_BRIDGE_OUT_SGXADDSHAREDPBDESC;
++
++
++#ifdef PDUMP
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_BUFFER_ARRAY_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ SGX_KICKTA_DUMP_BUFFER *psBufferArray;
++ IMG_UINT32 ui32BufferArrayLength;
++ IMG_BOOL bDumpPolls;
++} PVRSRV_BRIDGE_IN_PDUMP_BUFFER_ARRAY;
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_3D_SIGNATURE_REGISTERS_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_UINT32 ui32DumpFrameNum;
++ IMG_BOOL bLastFrame;
++ IMG_UINT32 *pui32Registers;
++ IMG_UINT32 ui32NumRegisters;
++}PVRSRV_BRIDGE_IN_PDUMP_3D_SIGNATURE_REGISTERS;
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMPCOUNTER_REGISTERS_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_UINT32 ui32DumpFrameNum;
++ IMG_BOOL bLastFrame;
++ IMG_UINT32 *pui32Registers;
++ IMG_UINT32 ui32NumRegisters;
++}PVRSRV_BRIDGE_IN_PDUMP_COUNTER_REGISTERS;
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_TA_SIGNATURE_REGISTERS_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_UINT32 ui32DumpFrameNum;
++ IMG_UINT32 ui32TAKickCount;
++ IMG_BOOL bLastFrame;
++ IMG_UINT32 *pui32Registers;
++ IMG_UINT32 ui32NumRegisters;
++}PVRSRV_BRIDGE_IN_PDUMP_TA_SIGNATURE_REGISTERS;
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_HWPERFCB_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_CHAR szFileName[PVRSRV_PDUMP_MAX_FILENAME_SIZE];
++ IMG_UINT32 ui32FileOffset;
++ IMG_UINT32 ui32PDumpFlags;
++
++}PVRSRV_BRIDGE_IN_PDUMP_HWPERFCB;
++
++#endif
++
++typedef struct PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_RENDER_CONTEXT_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_DEV_VIRTADDR sHWRenderContextDevVAddr;
++}PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_RENDER_CONTEXT;
++
++typedef struct PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_RENDER_CONTEXT_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hHWRenderContext;
++}PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_RENDER_CONTEXT;
++
++typedef struct PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_RENDER_CONTEXT_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_HANDLE hHWRenderContext;
++}PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_RENDER_CONTEXT;
++
++typedef struct PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_TRANSFER_CONTEXT_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_DEV_VIRTADDR sHWTransferContextDevVAddr;
++}PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_TRANSFER_CONTEXT;
++
++typedef struct PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_TRANSFER_CONTEXT_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hHWTransferContext;
++}PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_TRANSFER_CONTEXT;
++
++typedef struct PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_TRANSFER_CONTEXT_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_HANDLE hHWTransferContext;
++}PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_TRANSFER_CONTEXT;
++
++typedef struct PVRSRV_BRIDGE_IN_SGX_FLUSH_HW_RENDER_TARGET_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_DEV_VIRTADDR sHWRTDataSetDevVAddr;
++}PVRSRV_BRIDGE_IN_SGX_FLUSH_HW_RENDER_TARGET;
++
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++typedef struct PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_2D_CONTEXT_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_DEV_VIRTADDR sHW2DContextDevVAddr;
++}PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_2D_CONTEXT;
++
++typedef struct PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_2D_CONTEXT_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hHW2DContext;
++}PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_2D_CONTEXT;
++
++typedef struct PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_2D_CONTEXT_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_HANDLE hHW2DContext;
++}PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_2D_CONTEXT;
++
++#define SGX2D_MAX_BLT_CMD_SIZ 256
++#endif
++
++
++typedef struct PVRSRV_BRIDGE_IN_SGX_READ_DIFF_COUNTERS_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_UINT32 ui32Reg;
++ IMG_BOOL bNew;
++ IMG_UINT32 ui32New;
++ IMG_UINT32 ui32NewReset;
++ IMG_UINT32 ui32CountersReg;
++ IMG_UINT32 ui32Reg2;
++} PVRSRV_BRIDGE_IN_SGX_READ_DIFF_COUNTERS;
++
++typedef struct PVRSRV_BRIDGE_OUT_SGX_READ_DIFF_COUNTERS_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_UINT32 ui32Old;
++ IMG_BOOL bActive;
++ PVRSRV_SGXDEV_DIFF_INFO sDiffs;
++} PVRSRV_BRIDGE_OUT_SGX_READ_DIFF_COUNTERS;
++
++
++#if defined(SUPPORT_SGX_HWPERF)
++typedef struct PVRSRV_BRIDGE_IN_SGX_READ_HWPERF_CB_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_UINT32 ui32ArraySize;
++ PVRSRV_SGX_HWPERF_CB_ENTRY *psHWPerfCBData;
++} PVRSRV_BRIDGE_IN_SGX_READ_HWPERF_CB;
++
++typedef struct PVRSRV_BRIDGE_OUT_SGX_READ_HWPERF_CB_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_UINT32 ui32DataCount;
++ IMG_UINT32 ui32ClockSpeed;
++ IMG_UINT32 ui32HostTimeStamp;
++} PVRSRV_BRIDGE_OUT_SGX_READ_HWPERF_CB;
++#endif
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/include/sgx_mkif_km.h
+@@ -0,0 +1,334 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined (__SGX_MKIF_KM_H__)
++#define __SGX_MKIF_KM_H__
++
++#include "img_types.h"
++#include "servicesint.h"
++#include "sgxapi_km.h"
++
++
++#if defined(SGX_FEATURE_MP)
++ #define SGX_REG_BANK_SHIFT (12)
++ #define SGX_REG_BANK_SIZE (0x4000)
++ #if defined(SGX541)
++ #define SGX_REG_BANK_BASE_INDEX (1)
++ #define SGX_REG_BANK_MASTER_INDEX (SGX_REG_BANK_BASE_INDEX + SGX_FEATURE_MP_CORE_COUNT)
++ #else
++ #define SGX_REG_BANK_BASE_INDEX (2)
++ #define SGX_REG_BANK_MASTER_INDEX (1)
++ #endif
++ #define SGX_MP_CORE_SELECT(x,i) (x + ((i + SGX_REG_BANK_BASE_INDEX) * SGX_REG_BANK_SIZE))
++ #define SGX_MP_MASTER_SELECT(x) (x + (SGX_REG_BANK_MASTER_INDEX * SGX_REG_BANK_SIZE))
++#else
++ #define SGX_MP_CORE_SELECT(x,i) (x)
++#endif
++
++
++typedef struct _SGXMKIF_COMMAND_
++{
++ IMG_UINT32 ui32ServiceAddress;
++ IMG_UINT32 ui32CacheControl;
++ IMG_UINT32 ui32Data[2];
++} SGXMKIF_COMMAND;
++
++
++typedef struct _PVRSRV_SGX_KERNEL_CCB_
++{
++ SGXMKIF_COMMAND asCommands[256];
++} PVRSRV_SGX_KERNEL_CCB;
++
++
++typedef struct _PVRSRV_SGX_CCB_CTL_
++{
++ IMG_UINT32 ui32WriteOffset;
++ IMG_UINT32 ui32ReadOffset;
++} PVRSRV_SGX_CCB_CTL;
++
++
++typedef struct _SGXMKIF_HOST_CTL_
++{
++#if defined(PVRSRV_USSE_EDM_BREAKPOINTS)
++ IMG_UINT32 ui32BreakpointDisable;
++ IMG_UINT32 ui32Continue;
++#endif
++
++ volatile IMG_UINT32 ui32InitStatus;
++ volatile IMG_UINT32 ui32PowerStatus;
++ volatile IMG_UINT32 ui32CleanupStatus;
++#if defined(SUPPORT_HW_RECOVERY)
++ IMG_UINT32 ui32uKernelDetectedLockups;
++ IMG_UINT32 ui32HostDetectedLockups;
++ IMG_UINT32 ui32HWRecoverySampleRate;
++#endif
++ IMG_UINT32 ui32uKernelTimerClock;
++ IMG_UINT32 ui32ActivePowManSampleRate;
++ IMG_UINT32 ui32InterruptFlags;
++ IMG_UINT32 ui32InterruptClearFlags;
++
++
++ IMG_UINT32 ui32NumActivePowerEvents;
++
++#if defined(SUPPORT_SGX_HWPERF)
++ IMG_UINT32 ui32HWPerfFlags;
++#endif
++
++
++ IMG_UINT32 ui32TimeWraps;
++} SGXMKIF_HOST_CTL;
++
++#define SGXMKIF_CMDTA_CTRLFLAGS_READY 0x00000001
++typedef struct _SGXMKIF_CMDTA_SHARED_
++{
++ IMG_UINT32 ui32CtrlFlags;
++
++ IMG_UINT32 ui32NumTAStatusVals;
++ IMG_UINT32 ui32Num3DStatusVals;
++
++
++ IMG_UINT32 ui32TATQSyncWriteOpsPendingVal;
++ IMG_DEV_VIRTADDR sTATQSyncWriteOpsCompleteDevVAddr;
++ IMG_UINT32 ui32TATQSyncReadOpsPendingVal;
++ IMG_DEV_VIRTADDR sTATQSyncReadOpsCompleteDevVAddr;
++
++
++ IMG_UINT32 ui323DTQSyncWriteOpsPendingVal;
++ IMG_DEV_VIRTADDR s3DTQSyncWriteOpsCompleteDevVAddr;
++ IMG_UINT32 ui323DTQSyncReadOpsPendingVal;
++ IMG_DEV_VIRTADDR s3DTQSyncReadOpsCompleteDevVAddr;
++
++
++#if defined(SUPPORT_SGX_GENERALISED_SYNCOBJECTS)
++
++ IMG_UINT32 ui32NumTASrcSyncs;
++ PVRSRV_DEVICE_SYNC_OBJECT asTASrcSyncs[SGX_MAX_TA_SRC_SYNCS];
++ IMG_UINT32 ui32NumTADstSyncs;
++ PVRSRV_DEVICE_SYNC_OBJECT asTADstSyncs[SGX_MAX_TA_DST_SYNCS];
++ IMG_UINT32 ui32Num3DSrcSyncs;
++ PVRSRV_DEVICE_SYNC_OBJECT as3DSrcSyncs[SGX_MAX_3D_SRC_SYNCS];
++#else
++
++ IMG_UINT32 ui32NumSrcSyncs;
++ PVRSRV_DEVICE_SYNC_OBJECT asSrcSyncs[SGX_MAX_SRC_SYNCS];
++#endif
++
++
++ PVRSRV_DEVICE_SYNC_OBJECT sTA3DDependency;
++
++ CTL_STATUS sCtlTAStatusInfo[SGX_MAX_TA_STATUS_VALS];
++ CTL_STATUS sCtl3DStatusInfo[SGX_MAX_3D_STATUS_VALS];
++
++} SGXMKIF_CMDTA_SHARED;
++
++#define SGXTQ_MAX_STATUS SGX_MAX_TRANSFER_STATUS_VALS + 2
++
++#define SGXMKIF_TQFLAGS_NOSYNCUPDATE 0x00000001
++#define SGXMKIF_TQFLAGS_KEEPPENDING 0x00000002
++#define SGXMKIF_TQFLAGS_TATQ_SYNC 0x00000004
++#define SGXMKIF_TQFLAGS_3DTQ_SYNC 0x00000008
++#if defined(SGX_FEATURE_FAST_RENDER_CONTEXT_SWITCH)
++#define SGXMKIF_TQFLAGS_CTXSWITCH 0x00000010
++#endif
++#define SGXMKIF_TQFLAGS_DUMMYTRANSFER 0x00000020
++
++typedef struct _SGXMKIF_TRANSFERCMD_SHARED_
++{
++
++
++ IMG_UINT32 ui32SrcReadOpPendingVal;
++ IMG_DEV_VIRTADDR sSrcReadOpsCompleteDevAddr;
++
++ IMG_UINT32 ui32SrcWriteOpPendingVal;
++ IMG_DEV_VIRTADDR sSrcWriteOpsCompleteDevAddr;
++
++
++
++ IMG_UINT32 ui32DstReadOpPendingVal;
++ IMG_DEV_VIRTADDR sDstReadOpsCompleteDevAddr;
++
++ IMG_UINT32 ui32DstWriteOpPendingVal;
++ IMG_DEV_VIRTADDR sDstWriteOpsCompleteDevAddr;
++
++
++ IMG_UINT32 ui32TASyncWriteOpsPendingVal;
++ IMG_DEV_VIRTADDR sTASyncWriteOpsCompleteDevVAddr;
++ IMG_UINT32 ui32TASyncReadOpsPendingVal;
++ IMG_DEV_VIRTADDR sTASyncReadOpsCompleteDevVAddr;
++
++
++ IMG_UINT32 ui323DSyncWriteOpsPendingVal;
++ IMG_DEV_VIRTADDR s3DSyncWriteOpsCompleteDevVAddr;
++ IMG_UINT32 ui323DSyncReadOpsPendingVal;
++ IMG_DEV_VIRTADDR s3DSyncReadOpsCompleteDevVAddr;
++
++ IMG_UINT32 ui32NumStatusVals;
++ CTL_STATUS sCtlStatusInfo[SGXTQ_MAX_STATUS];
++} SGXMKIF_TRANSFERCMD_SHARED, *PSGXMKIF_TRANSFERCMD_SHARED;
++
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++typedef struct _SGXMKIF_2DCMD_SHARED_ {
++
++ IMG_UINT32 ui32NumSrcSync;
++ PVRSRV_DEVICE_SYNC_OBJECT sSrcSyncData[SGX_MAX_2D_SRC_SYNC_OPS];
++
++
++ PVRSRV_DEVICE_SYNC_OBJECT sDstSyncData;
++
++
++ PVRSRV_DEVICE_SYNC_OBJECT sTASyncData;
++
++
++ PVRSRV_DEVICE_SYNC_OBJECT s3DSyncData;
++} SGXMKIF_2DCMD_SHARED, *PSGXMKIF_2DCMD_SHARED;
++#endif
++
++
++typedef struct _SGXMKIF_HWDEVICE_SYNC_LIST_
++{
++ IMG_DEV_VIRTADDR sAccessDevAddr;
++ IMG_UINT32 ui32NumSyncObjects;
++
++ PVRSRV_DEVICE_SYNC_OBJECT asSyncData[1];
++} SGXMKIF_HWDEVICE_SYNC_LIST, *PSGXMKIF_HWDEVICE_SYNC_LIST;
++
++
++#define PVRSRV_USSE_EDM_INIT_COMPLETE (1UL << 0)
++
++#define PVRSRV_USSE_EDM_POWMAN_IDLE_COMPLETE (1UL << 2)
++#define PVRSRV_USSE_EDM_POWMAN_POWEROFF_COMPLETE (1UL << 3)
++#define PVRSRV_USSE_EDM_POWMAN_POWEROFF_RESTART_IMMEDIATE (1UL << 4)
++#define PVRSRV_USSE_EDM_POWMAN_NO_WORK (1UL << 5)
++
++#define PVRSRV_USSE_EDM_INTERRUPT_HWR (1UL << 0)
++#define PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER (1UL << 1)
++
++#define PVRSRV_USSE_EDM_CLEANUPCMD_COMPLETE (1UL << 0)
++
++#define PVRSRV_USSE_MISCINFO_READY 0x1UL
++#define PVRSRV_USSE_MISCINFO_GET_STRUCT_SIZES 0x2UL
++#if defined(SUPPORT_SGX_EDM_MEMORY_DEBUG)
++#define PVRSRV_USSE_MISCINFO_MEMREAD 0x4UL
++
++#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++#define PVRSRV_USSE_MISCINFO_MEMREAD_FAIL 0x1UL << 31
++#endif
++#endif
++
++
++#define PVRSRV_CLEANUPCMD_RT 0x1
++#define PVRSRV_CLEANUPCMD_RC 0x2
++#define PVRSRV_CLEANUPCMD_TC 0x3
++#define PVRSRV_CLEANUPCMD_2DC 0x4
++#define PVRSRV_CLEANUPCMD_PB 0x5
++
++#define PVRSRV_POWERCMD_POWEROFF 0x1
++#define PVRSRV_POWERCMD_IDLE 0x2
++#define PVRSRV_POWERCMD_RESUME 0x3
++
++
++#if defined(SGX_FEATURE_BIF_NUM_DIRLISTS)
++#define SGX_BIF_DIR_LIST_INDEX_EDM (SGX_FEATURE_BIF_NUM_DIRLISTS - 1)
++#else
++#define SGX_BIF_DIR_LIST_INDEX_EDM (0)
++#endif
++
++#define SGXMKIF_CC_INVAL_BIF_PT 0x1
++#define SGXMKIF_CC_INVAL_BIF_PD 0x2
++#define SGXMKIF_CC_INVAL_BIF_SL 0x4
++#define SGXMKIF_CC_INVAL_DATA 0x8
++
++typedef struct _SGX_MISCINFO_STRUCT_SIZES_
++{
++#if defined (SGX_FEATURE_2D_HARDWARE)
++ IMG_UINT32 ui32Sizeof_2DCMD;
++ IMG_UINT32 ui32Sizeof_2DCMD_SHARED;
++#endif
++ IMG_UINT32 ui32Sizeof_CMDTA;
++ IMG_UINT32 ui32Sizeof_CMDTA_SHARED;
++ IMG_UINT32 ui32Sizeof_TRANSFERCMD;
++ IMG_UINT32 ui32Sizeof_TRANSFERCMD_SHARED;
++ IMG_UINT32 ui32Sizeof_3DREGISTERS;
++ IMG_UINT32 ui32Sizeof_HWPBDESC;
++ IMG_UINT32 ui32Sizeof_HWRENDERCONTEXT;
++ IMG_UINT32 ui32Sizeof_HWRENDERDETAILS;
++ IMG_UINT32 ui32Sizeof_HWRTDATA;
++ IMG_UINT32 ui32Sizeof_HWRTDATASET;
++ IMG_UINT32 ui32Sizeof_HWTRANSFERCONTEXT;
++ IMG_UINT32 ui32Sizeof_HOST_CTL;
++ IMG_UINT32 ui32Sizeof_COMMAND;
++} SGX_MISCINFO_STRUCT_SIZES;
++
++
++#if defined(SUPPORT_SGX_EDM_MEMORY_DEBUG)
++typedef struct _PVRSRV_SGX_MISCINFO_MEMREAD
++{
++ IMG_DEV_VIRTADDR sDevVAddr;
++ IMG_DEV_PHYADDR sPDDevPAddr;
++} PVRSRV_SGX_MISCINFO_MEMREAD;
++#endif
++
++typedef struct _PVRSRV_SGX_MISCINFO_INFO
++{
++ IMG_UINT32 ui32MiscInfoFlags;
++ PVRSRV_SGX_MISCINFO_FEATURES sSGXFeatures;
++ SGX_MISCINFO_STRUCT_SIZES sSGXStructSizes;
++#if defined(SUPPORT_SGX_EDM_MEMORY_DEBUG)
++ PVRSRV_SGX_MISCINFO_MEMREAD sSGXMemReadData;
++#endif
++} PVRSRV_SGX_MISCINFO_INFO;
++
++#ifdef PVRSRV_USSE_EDM_STATUS_DEBUG
++#define SGXMK_TRACE_BUFFER_SIZE 512
++#endif
++
++#define SGXMKIF_HWPERF_CB_SIZE 0x100
++
++#if defined(SUPPORT_SGX_HWPERF)
++typedef struct _SGXMKIF_HWPERF_CB_ENTRY_
++{
++ IMG_UINT32 ui32FrameNo;
++ IMG_UINT32 ui32Type;
++ IMG_UINT32 ui32Ordinal;
++ IMG_UINT32 ui32TimeWraps;
++ IMG_UINT32 ui32Time;
++ IMG_UINT32 ui32Counters[PVRSRV_SGX_HWPERF_NUM_COUNTERS];
++} SGXMKIF_HWPERF_CB_ENTRY;
++
++typedef struct _SGXMKIF_HWPERF_CB_
++{
++ IMG_UINT32 ui32Woff;
++ IMG_UINT32 ui32Roff;
++ IMG_UINT32 ui32OrdinalGRAPHICS;
++ IMG_UINT32 ui32OrdinalMK_EXECUTION;
++ SGXMKIF_HWPERF_CB_ENTRY psHWPerfCBData[SGXMKIF_HWPERF_CB_SIZE];
++} SGXMKIF_HWPERF_CB;
++#endif
++
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/include/sgxinfo.h
+@@ -0,0 +1,288 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined (__SGXINFO_H__)
++#define __SGXINFO_H__
++
++#include "sgxscript.h"
++#include "servicesint.h"
++#include "services.h"
++#include "sgxapi_km.h"
++#include "sgx_mkif_km.h"
++
++
++#define SGX_MAX_DEV_DATA 24
++#define SGX_MAX_INIT_MEM_HANDLES 16
++
++
++typedef struct _SGX_BRIDGE_INFO_FOR_SRVINIT
++{
++ IMG_DEV_PHYADDR sPDDevPAddr;
++ PVRSRV_HEAP_INFO asHeapInfo[PVRSRV_MAX_CLIENT_HEAPS];
++} SGX_BRIDGE_INFO_FOR_SRVINIT;
++
++
++typedef enum _SGXMKIF_CMD_TYPE_
++{
++ SGXMKIF_CMD_TA = 0,
++ SGXMKIF_CMD_TRANSFER = 1,
++ SGXMKIF_CMD_2D = 2,
++ SGXMKIF_CMD_POWER = 3,
++ SGXMKIF_CMD_CLEANUP = 4,
++ SGXMKIF_CMD_GETMISCINFO = 5,
++ SGXMKIF_CMD_PROCESS_QUEUES = 6,
++ SGXMKIF_CMD_MAX = 7,
++
++ SGXMKIF_CMD_FORCE_I32 = -1,
++
++} SGXMKIF_CMD_TYPE;
++
++
++typedef struct _SGX_BRIDGE_INIT_INFO_
++{
++ IMG_HANDLE hKernelCCBMemInfo;
++ IMG_HANDLE hKernelCCBCtlMemInfo;
++ IMG_HANDLE hKernelCCBEventKickerMemInfo;
++ IMG_HANDLE hKernelSGXHostCtlMemInfo;
++ IMG_HANDLE hKernelSGXTA3DCtlMemInfo;
++ IMG_HANDLE hKernelSGXMiscMemInfo;
++
++ IMG_UINT32 aui32HostKickAddr[SGXMKIF_CMD_MAX];
++
++ SGX_INIT_SCRIPTS sScripts;
++
++ IMG_UINT32 ui32ClientBuildOptions;
++ SGX_MISCINFO_STRUCT_SIZES sSGXStructSizes;
++
++#if defined(SGX_SUPPORT_HWPROFILING)
++ IMG_HANDLE hKernelHWProfilingMemInfo;
++#endif
++#if defined(SUPPORT_SGX_HWPERF)
++ IMG_HANDLE hKernelHWPerfCBMemInfo;
++#endif
++#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG)
++ IMG_HANDLE hKernelEDMStatusBufferMemInfo;
++#endif
++#if defined(SGX_FEATURE_OVERLAPPED_SPM)
++ IMG_HANDLE hKernelTmpRgnHeaderMemInfo;
++#endif
++#if defined(SGX_FEATURE_SPM_MODE_0)
++ IMG_HANDLE hKernelTmpDPMStateMemInfo;
++#endif
++
++ IMG_UINT32 ui32EDMTaskReg0;
++ IMG_UINT32 ui32EDMTaskReg1;
++
++ IMG_UINT32 ui32ClkGateStatusReg;
++ IMG_UINT32 ui32ClkGateStatusMask;
++#if defined(SGX_FEATURE_MP)
++ IMG_UINT32 ui32MasterClkGateStatusReg;
++ IMG_UINT32 ui32MasterClkGateStatusMask;
++#endif
++
++ IMG_UINT32 ui32CacheControl;
++
++ IMG_UINT32 asInitDevData[SGX_MAX_DEV_DATA];
++ IMG_HANDLE asInitMemHandles[SGX_MAX_INIT_MEM_HANDLES];
++
++} SGX_BRIDGE_INIT_INFO;
++
++
++typedef struct _SGX_DEVICE_SYNC_LIST_
++{
++ PSGXMKIF_HWDEVICE_SYNC_LIST psHWDeviceSyncList;
++
++ IMG_HANDLE hKernelHWSyncListMemInfo;
++ PVRSRV_CLIENT_MEM_INFO *psHWDeviceSyncListClientMemInfo;
++ PVRSRV_CLIENT_MEM_INFO *psAccessResourceClientMemInfo;
++
++ volatile IMG_UINT32 *pui32Lock;
++
++ struct _SGX_DEVICE_SYNC_LIST_ *psNext;
++
++
++ IMG_UINT32 ui32NumSyncObjects;
++ IMG_HANDLE ahSyncHandles[1];
++} SGX_DEVICE_SYNC_LIST, *PSGX_DEVICE_SYNC_LIST;
++
++
++typedef struct _SGX_INTERNEL_STATUS_UPDATE_
++{
++ CTL_STATUS sCtlStatus;
++ IMG_HANDLE hKernelMemInfo;
++
++ IMG_UINT32 ui32LastStatusUpdateDumpVal;
++} SGX_INTERNEL_STATUS_UPDATE;
++
++
++typedef struct _SGX_CCB_KICK_
++{
++ SGXMKIF_COMMAND sCommand;
++ IMG_HANDLE hCCBKernelMemInfo;
++
++ IMG_UINT32 ui32NumDstSyncObjects;
++ IMG_HANDLE hKernelHWSyncListMemInfo;
++
++
++ IMG_HANDLE *pahDstSyncHandles;
++
++ IMG_UINT32 ui32NumTAStatusVals;
++ IMG_UINT32 ui32Num3DStatusVals;
++
++#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
++ SGX_INTERNEL_STATUS_UPDATE asTAStatusUpdate[SGX_MAX_TA_STATUS_VALS];
++ SGX_INTERNEL_STATUS_UPDATE as3DStatusUpdate[SGX_MAX_3D_STATUS_VALS];
++#else
++ IMG_HANDLE ahTAStatusSyncInfo[SGX_MAX_TA_STATUS_VALS];
++ IMG_HANDLE ah3DStatusSyncInfo[SGX_MAX_3D_STATUS_VALS];
++#endif
++
++ IMG_BOOL bFirstKickOrResume;
++#if (defined(NO_HARDWARE) || defined(PDUMP))
++ IMG_BOOL bTerminateOrAbort;
++#endif
++#if defined(SUPPORT_SGX_HWPERF)
++ IMG_BOOL bKickRender;
++#endif
++
++
++ IMG_UINT32 ui32CCBOffset;
++
++#if defined(SUPPORT_SGX_GENERALISED_SYNCOBJECTS)
++
++ IMG_UINT32 ui32NumTASrcSyncs;
++ IMG_HANDLE ahTASrcKernelSyncInfo[SGX_MAX_TA_SRC_SYNCS];
++ IMG_UINT32 ui32NumTADstSyncs;
++ IMG_HANDLE ahTADstKernelSyncInfo[SGX_MAX_TA_DST_SYNCS];
++ IMG_UINT32 ui32Num3DSrcSyncs;
++ IMG_HANDLE ah3DSrcKernelSyncInfo[SGX_MAX_3D_SRC_SYNCS];
++#else
++
++ IMG_UINT32 ui32NumSrcSyncs;
++ IMG_HANDLE ahSrcKernelSyncInfo[SGX_MAX_SRC_SYNCS];
++#endif
++
++
++ IMG_BOOL bTADependency;
++ IMG_HANDLE hTA3DSyncInfo;
++
++ IMG_HANDLE hTASyncInfo;
++ IMG_HANDLE h3DSyncInfo;
++#if defined(PDUMP)
++ IMG_UINT32 ui32CCBDumpWOff;
++#endif
++#if defined(NO_HARDWARE)
++ IMG_UINT32 ui32WriteOpsPendingVal;
++#endif
++} SGX_CCB_KICK;
++
++
++#define SGX_KERNEL_USE_CODE_BASE_INDEX 15
++
++
++typedef struct _SGX_CLIENT_INFO_
++{
++ IMG_UINT32 ui32ProcessID;
++ IMG_VOID *pvProcess;
++ PVRSRV_MISC_INFO sMiscInfo;
++
++ IMG_UINT32 asDevData[SGX_MAX_DEV_DATA];
++
++} SGX_CLIENT_INFO;
++
++typedef struct _SGX_INTERNAL_DEVINFO_
++{
++ IMG_UINT32 ui32Flags;
++ IMG_HANDLE hHostCtlKernelMemInfoHandle;
++ IMG_BOOL bForcePTOff;
++} SGX_INTERNAL_DEVINFO;
++
++
++#if defined(TRANSFER_QUEUE)
++typedef struct _PVRSRV_TRANSFER_SGX_KICK_
++{
++ IMG_HANDLE hCCBMemInfo;
++ IMG_UINT32 ui32SharedCmdCCBOffset;
++
++ IMG_DEV_VIRTADDR sHWTransferContextDevVAddr;
++
++ IMG_HANDLE hTASyncInfo;
++ IMG_HANDLE h3DSyncInfo;
++
++ IMG_UINT32 ui32NumSrcSync;
++ IMG_HANDLE ahSrcSyncInfo[SGX_MAX_TRANSFER_SYNC_OPS];
++
++ IMG_UINT32 ui32NumDstSync;
++ IMG_HANDLE ahDstSyncInfo[SGX_MAX_TRANSFER_SYNC_OPS];
++
++ IMG_UINT32 ui32Flags;
++
++ IMG_UINT32 ui32PDumpFlags;
++#if defined(PDUMP)
++ IMG_UINT32 ui32CCBDumpWOff;
++#endif
++} PVRSRV_TRANSFER_SGX_KICK, *PPVRSRV_TRANSFER_SGX_KICK;
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++typedef struct _PVRSRV_2D_SGX_KICK_
++{
++ IMG_HANDLE hCCBMemInfo;
++ IMG_UINT32 ui32SharedCmdCCBOffset;
++
++ IMG_DEV_VIRTADDR sHW2DContextDevVAddr;
++
++ IMG_UINT32 ui32NumSrcSync;
++ IMG_HANDLE ahSrcSyncInfo[SGX_MAX_2D_SRC_SYNC_OPS];
++
++
++ IMG_HANDLE hDstSyncInfo;
++
++
++ IMG_HANDLE hTASyncInfo;
++
++
++ IMG_HANDLE h3DSyncInfo;
++
++ IMG_UINT32 ui32PDumpFlags;
++#if defined(PDUMP)
++ IMG_UINT32 ui32CCBDumpWOff;
++#endif
++} PVRSRV_2D_SGX_KICK, *PPVRSRV_2D_SGX_KICK;
++#endif
++#endif
++
++#define PVRSRV_SGX_DIFF_NUM_COUNTERS 9
++
++typedef struct _PVRSRV_SGXDEV_DIFF_INFO_
++{
++ IMG_UINT32 aui32Counters[PVRSRV_SGX_DIFF_NUM_COUNTERS];
++ IMG_UINT32 ui32Time[3];
++ IMG_UINT32 ui32Marker[2];
++} PVRSRV_SGXDEV_DIFF_INFO, *PPVRSRV_SGXDEV_DIFF_INFO;
++
++
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/bridged/.gitignore
+@@ -0,0 +1,5 @@
++bin_pc_i686*
++tmp_pc_i686*
++host_pc_i686*
++*.o
++*.o.cmd
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/bridged/bridged_pvr_bridge.c
+@@ -0,0 +1,3409 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++
++
++#include <stddef.h>
++
++#include "img_defs.h"
++#include "services.h"
++#include "pvr_bridge_km.h"
++#include "pvr_debug.h"
++#include "ra.h"
++#include "pvr_bridge.h"
++#if defined(SUPPORT_SGX)
++#include "sgx_bridge.h"
++#endif
++#if defined(SUPPORT_VGX)
++#include "vgx_bridge.h"
++#endif
++#if defined(SUPPORT_MSVDX)
++#include "msvdx_bridge.h"
++#endif
++#include "perproc.h"
++#include "device.h"
++#include "buffer_manager.h"
++
++#include "pdump_km.h"
++#include "syscommon.h"
++
++#include "bridged_pvr_bridge.h"
++#if defined(SUPPORT_SGX)
++#include "bridged_sgx_bridge.h"
++#endif
++#if defined(SUPPORT_VGX)
++#include "bridged_vgx_bridge.h"
++#endif
++#if defined(SUPPORT_MSVDX)
++#include "bridged_msvdx_bridge.h"
++#endif
++
++#include "env_data.h"
++
++#if defined (__linux__)
++#include "mmap.h"
++#endif
++
++#include "srvkm.h"
++
++PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY g_BridgeDispatchTable[BRIDGE_DISPATCH_TABLE_ENTRY_COUNT];
++
++#if defined(DEBUG_BRIDGE_KM)
++PVRSRV_BRIDGE_GLOBAL_STATS g_BridgeGlobalStats;
++#endif
++
++#if defined(PVR_SECURE_HANDLES)
++static IMG_BOOL abSharedDeviceMemHeap[PVRSRV_MAX_CLIENT_HEAPS];
++static IMG_BOOL *pbSharedDeviceMemHeap = abSharedDeviceMemHeap;
++#else
++static IMG_BOOL *pbSharedDeviceMemHeap = (IMG_BOOL*)IMG_NULL;
++#endif
++
++
++#if defined(DEBUG_BRIDGE_KM)
++PVRSRV_ERROR
++CopyFromUserWrapper(PVRSRV_PER_PROCESS_DATA *pProcData,
++ IMG_UINT32 ui32BridgeID,
++ IMG_VOID *pvDest,
++ IMG_VOID *pvSrc,
++ IMG_UINT32 ui32Size)
++{
++ g_BridgeDispatchTable[ui32BridgeID].ui32CopyFromUserTotalBytes+=ui32Size;
++ g_BridgeGlobalStats.ui32TotalCopyFromUserBytes+=ui32Size;
++ return OSCopyFromUser(pProcData, pvDest, pvSrc, ui32Size);
++}
++PVRSRV_ERROR
++CopyToUserWrapper(PVRSRV_PER_PROCESS_DATA *pProcData,
++ IMG_UINT32 ui32BridgeID,
++ IMG_VOID *pvDest,
++ IMG_VOID *pvSrc,
++ IMG_UINT32 ui32Size)
++{
++ g_BridgeDispatchTable[ui32BridgeID].ui32CopyToUserTotalBytes+=ui32Size;
++ g_BridgeGlobalStats.ui32TotalCopyToUserBytes+=ui32Size;
++ return OSCopyToUser(pProcData, pvDest, pvSrc, ui32Size);
++}
++#endif
++
++
++static IMG_INT
++PVRSRVEnumerateDevicesBW(IMG_UINT32 ui32BridgeID,
++ IMG_VOID *psBridgeIn,
++ PVRSRV_BRIDGE_OUT_ENUMDEVICE *psEnumDeviceOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ENUM_DEVICES);
++
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++ PVR_UNREFERENCED_PARAMETER(psBridgeIn);
++
++ psEnumDeviceOUT->eError =
++ PVRSRVEnumerateDevicesKM(&psEnumDeviceOUT->ui32NumDevices,
++ psEnumDeviceOUT->asDeviceIdentifier);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVAcquireDeviceDataBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_ACQUIRE_DEVICEINFO *psAcquireDevInfoIN,
++ PVRSRV_BRIDGE_OUT_ACQUIRE_DEVICEINFO *psAcquireDevInfoOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ACQUIRE_DEVICEINFO);
++
++ psAcquireDevInfoOUT->eError =
++ PVRSRVAcquireDeviceDataKM(psAcquireDevInfoIN->uiDevIndex,
++ psAcquireDevInfoIN->eDeviceType,
++ &hDevCookieInt);
++ if(psAcquireDevInfoOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++
++ psAcquireDevInfoOUT->eError =
++ PVRSRVAllocHandle(psPerProc->psHandleBase,
++ &psAcquireDevInfoOUT->hDevCookie,
++ hDevCookieInt,
++ PVRSRV_HANDLE_TYPE_DEV_NODE,
++ PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
++
++ return 0;
++}
++
++
++static IMG_INT
++PVRSRVCreateDeviceMemContextBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_CREATE_DEVMEMCONTEXT *psCreateDevMemContextIN,
++ PVRSRV_BRIDGE_OUT_CREATE_DEVMEMCONTEXT *psCreateDevMemContextOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ IMG_HANDLE hDevMemContextInt;
++ IMG_UINT32 i;
++ IMG_BOOL bCreated;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CREATE_DEVMEMCONTEXT);
++
++
++ NEW_HANDLE_BATCH_OR_ERROR(psCreateDevMemContextOUT->eError, psPerProc, PVRSRV_MAX_CLIENT_HEAPS + 1);
++
++ psCreateDevMemContextOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psCreateDevMemContextIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if(psCreateDevMemContextOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psCreateDevMemContextOUT->eError =
++ PVRSRVCreateDeviceMemContextKM(hDevCookieInt,
++ psPerProc,
++ &hDevMemContextInt,
++ &psCreateDevMemContextOUT->ui32ClientHeapCount,
++ &psCreateDevMemContextOUT->sHeapInfo[0],
++ &bCreated,
++ pbSharedDeviceMemHeap);
++
++ if(psCreateDevMemContextOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++
++ if(bCreated)
++ {
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psCreateDevMemContextOUT->hDevMemContext,
++ hDevMemContextInt,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++ }
++ else
++ {
++ psCreateDevMemContextOUT->eError =
++ PVRSRVFindHandle(psPerProc->psHandleBase,
++ &psCreateDevMemContextOUT->hDevMemContext,
++ hDevMemContextInt,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
++ if(psCreateDevMemContextOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ for(i = 0; i < psCreateDevMemContextOUT->ui32ClientHeapCount; i++)
++ {
++ IMG_HANDLE hDevMemHeapExt;
++
++#if defined(PVR_SECURE_HANDLES)
++ if(abSharedDeviceMemHeap[i])
++#endif
++ {
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase, &hDevMemHeapExt,
++ psCreateDevMemContextOUT->sHeapInfo[i].hDevMemHeap,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP,
++ PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
++ }
++#if defined(PVR_SECURE_HANDLES)
++ else
++ {
++
++ if(bCreated)
++ {
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, &hDevMemHeapExt,
++ psCreateDevMemContextOUT->sHeapInfo[i].hDevMemHeap,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
++ psCreateDevMemContextOUT->hDevMemContext);
++ }
++ else
++ {
++ psCreateDevMemContextOUT->eError =
++ PVRSRVFindHandle(psPerProc->psHandleBase, &hDevMemHeapExt,
++ psCreateDevMemContextOUT->sHeapInfo[i].hDevMemHeap,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP);
++ if(psCreateDevMemContextOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++ }
++#endif
++ psCreateDevMemContextOUT->sHeapInfo[i].hDevMemHeap = hDevMemHeapExt;
++ }
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psCreateDevMemContextOUT->eError, psPerProc);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVDestroyDeviceMemContextBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_DESTROY_DEVMEMCONTEXT *psDestroyDevMemContextIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ IMG_HANDLE hDevMemContextInt;
++ IMG_BOOL bDestroyed;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_DESTROY_DEVMEMCONTEXT);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psDestroyDevMemContextIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemContextInt,
++ psDestroyDevMemContextIN->hDevMemContext,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVDestroyDeviceMemContextKM(hDevCookieInt, hDevMemContextInt, &bDestroyed);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ if(bDestroyed)
++ {
++ psRetOUT->eError =
++ PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psDestroyDevMemContextIN->hDevMemContext,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
++ }
++
++ return 0;
++}
++
++
++static IMG_INT
++PVRSRVGetDeviceMemHeapInfoBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_GET_DEVMEM_HEAPINFO *psGetDevMemHeapInfoIN,
++ PVRSRV_BRIDGE_OUT_GET_DEVMEM_HEAPINFO *psGetDevMemHeapInfoOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ IMG_HANDLE hDevMemContextInt;
++ IMG_UINT32 i;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_DEVMEM_HEAPINFO);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psGetDevMemHeapInfoOUT->eError, psPerProc, PVRSRV_MAX_CLIENT_HEAPS);
++
++ psGetDevMemHeapInfoOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psGetDevMemHeapInfoIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if(psGetDevMemHeapInfoOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psGetDevMemHeapInfoOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemContextInt,
++ psGetDevMemHeapInfoIN->hDevMemContext,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
++
++ if(psGetDevMemHeapInfoOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psGetDevMemHeapInfoOUT->eError =
++ PVRSRVGetDeviceMemHeapInfoKM(hDevCookieInt,
++ hDevMemContextInt,
++ &psGetDevMemHeapInfoOUT->ui32ClientHeapCount,
++ &psGetDevMemHeapInfoOUT->sHeapInfo[0],
++ pbSharedDeviceMemHeap);
++
++ if(psGetDevMemHeapInfoOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ for(i = 0; i < psGetDevMemHeapInfoOUT->ui32ClientHeapCount; i++)
++ {
++ IMG_HANDLE hDevMemHeapExt;
++
++#if defined(PVR_SECURE_HANDLES)
++ if(abSharedDeviceMemHeap[i])
++#endif
++ {
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase, &hDevMemHeapExt,
++ psGetDevMemHeapInfoOUT->sHeapInfo[i].hDevMemHeap,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP,
++ PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
++ }
++#if defined(PVR_SECURE_HANDLES)
++ else
++ {
++
++ psGetDevMemHeapInfoOUT->eError =
++ PVRSRVFindHandle(psPerProc->psHandleBase, &hDevMemHeapExt,
++ psGetDevMemHeapInfoOUT->sHeapInfo[i].hDevMemHeap,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP);
++ if(psGetDevMemHeapInfoOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++#endif
++ psGetDevMemHeapInfoOUT->sHeapInfo[i].hDevMemHeap = hDevMemHeapExt;
++ }
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psGetDevMemHeapInfoOUT->eError, psPerProc);
++
++ return 0;
++}
++
++
++#if defined(OS_PVRSRV_ALLOC_DEVICE_MEM_BW)
++IMG_INT
++PVRSRVAllocDeviceMemBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_ALLOCDEVICEMEM *psAllocDeviceMemIN,
++ PVRSRV_BRIDGE_OUT_ALLOCDEVICEMEM *psAllocDeviceMemOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc);
++#else
++static IMG_INT
++PVRSRVAllocDeviceMemBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_ALLOCDEVICEMEM *psAllocDeviceMemIN,
++ PVRSRV_BRIDGE_OUT_ALLOCDEVICEMEM *psAllocDeviceMemOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo;
++ IMG_HANDLE hDevCookieInt;
++ IMG_HANDLE hDevMemHeapInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ALLOC_DEVICEMEM);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psAllocDeviceMemOUT->eError, psPerProc, 2);
++
++ psAllocDeviceMemOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psAllocDeviceMemIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if(psAllocDeviceMemOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psAllocDeviceMemOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemHeapInt,
++ psAllocDeviceMemIN->hDevMemHeap,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP);
++
++ if(psAllocDeviceMemOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psAllocDeviceMemOUT->eError =
++ PVRSRVAllocDeviceMemKM(hDevCookieInt,
++ psPerProc,
++ hDevMemHeapInt,
++ psAllocDeviceMemIN->ui32Attribs,
++ psAllocDeviceMemIN->ui32Size,
++ psAllocDeviceMemIN->ui32Alignment,
++ &psMemInfo,
++ "" );
++
++ if(psAllocDeviceMemOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ OSMemSet(&psAllocDeviceMemOUT->sClientMemInfo,
++ 0,
++ sizeof(psAllocDeviceMemOUT->sClientMemInfo));
++
++ psAllocDeviceMemOUT->sClientMemInfo.pvLinAddrKM =
++ psMemInfo->pvLinAddrKM;
++
++#if defined (__linux__)
++ psAllocDeviceMemOUT->sClientMemInfo.pvLinAddr = 0;
++#else
++ psAllocDeviceMemOUT->sClientMemInfo.pvLinAddr = psMemInfo->pvLinAddrKM;
++#endif
++ psAllocDeviceMemOUT->sClientMemInfo.sDevVAddr = psMemInfo->sDevVAddr;
++ psAllocDeviceMemOUT->sClientMemInfo.ui32Flags = psMemInfo->ui32Flags;
++ psAllocDeviceMemOUT->sClientMemInfo.ui32AllocSize = psMemInfo->ui32AllocSize;
++ psAllocDeviceMemOUT->sClientMemInfo.hMappingInfo = psMemInfo->sMemBlk.hOSMemHandle;
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psAllocDeviceMemOUT->sClientMemInfo.hKernelMemInfo,
++ psMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++
++ if(psAllocDeviceMemIN->ui32Attribs & PVRSRV_MEM_NO_SYNCOBJ)
++ {
++
++ OSMemSet(&psAllocDeviceMemOUT->sClientSyncInfo,
++ 0,
++ sizeof (PVRSRV_CLIENT_SYNC_INFO));
++ psAllocDeviceMemOUT->sClientMemInfo.psClientSyncInfo = IMG_NULL;
++ psAllocDeviceMemOUT->psKernelSyncInfo = IMG_NULL;
++ }
++ else
++ {
++
++ psAllocDeviceMemOUT->psKernelSyncInfo = psMemInfo->psKernelSyncInfo;
++
++ psAllocDeviceMemOUT->sClientSyncInfo.psSyncData =
++ psMemInfo->psKernelSyncInfo->psSyncData;
++ psAllocDeviceMemOUT->sClientSyncInfo.sWriteOpsCompleteDevVAddr =
++ psMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr;
++ psAllocDeviceMemOUT->sClientSyncInfo.sReadOpsCompleteDevVAddr =
++ psMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr;
++
++ psAllocDeviceMemOUT->sClientSyncInfo.hMappingInfo =
++ psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle;
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psAllocDeviceMemOUT->sClientSyncInfo.hKernelSyncInfo,
++ psMemInfo->psKernelSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
++ psAllocDeviceMemOUT->sClientMemInfo.hKernelMemInfo);
++
++ psAllocDeviceMemOUT->sClientMemInfo.psClientSyncInfo =
++ &psAllocDeviceMemOUT->sClientSyncInfo;
++
++ }
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psAllocDeviceMemOUT->eError, psPerProc);
++
++ return 0;
++}
++
++#endif
++
++static IMG_INT
++PVRSRVFreeDeviceMemBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_FREEDEVICEMEM *psFreeDeviceMemIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ IMG_VOID *pvKernelMemInfo;
++
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_FREE_DEVICEMEM);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psFreeDeviceMemIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &pvKernelMemInfo,
++ psFreeDeviceMemIN->psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError = PVRSRVFreeDeviceMemKM(hDevCookieInt, pvKernelMemInfo);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psFreeDeviceMemIN->psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVExportDeviceMemBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_EXPORTDEVICEMEM *psExportDeviceMemIN,
++ PVRSRV_BRIDGE_OUT_EXPORTDEVICEMEM *psExportDeviceMemOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_EXPORT_DEVICEMEM);
++
++
++ psExportDeviceMemOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psExportDeviceMemIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if(psExportDeviceMemOUT->eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVExportDeviceMemBW: can't find devcookie"));
++ return 0;
++ }
++
++
++ psExportDeviceMemOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, (IMG_PVOID *)&psKernelMemInfo,
++ psExportDeviceMemIN->psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++ if(psExportDeviceMemOUT->eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVExportDeviceMemBW: can't find kernel meminfo"));
++ return 0;
++ }
++
++
++ psExportDeviceMemOUT->eError =
++ PVRSRVFindHandle(KERNEL_HANDLE_BASE,
++ &psExportDeviceMemOUT->hMemInfo,
++ psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if(psExportDeviceMemOUT->eError == PVRSRV_OK)
++ {
++
++ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVExportDeviceMemBW: allocation is already exported"));
++ return 0;
++ }
++
++
++ psExportDeviceMemOUT->eError = PVRSRVAllocHandle(KERNEL_HANDLE_BASE,
++ &psExportDeviceMemOUT->hMemInfo,
++ psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++ if (psExportDeviceMemOUT->eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVExportDeviceMemBW: failed to allocate handle from global handle list"));
++ return 0;
++ }
++
++
++ psKernelMemInfo->ui32Flags |= PVRSRV_MEM_EXPORTED;
++
++ return 0;
++}
++
++
++static IMG_INT
++PVRSRVMapDeviceMemoryBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_MAP_DEV_MEMORY *psMapDevMemIN,
++ PVRSRV_BRIDGE_OUT_MAP_DEV_MEMORY *psMapDevMemOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_KERNEL_MEM_INFO *psSrcKernelMemInfo = IMG_NULL;
++ PVRSRV_KERNEL_MEM_INFO *psDstKernelMemInfo = IMG_NULL;
++ IMG_HANDLE hDstDevMemHeap = IMG_NULL;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_MAP_DEV_MEMORY);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psMapDevMemOUT->eError, psPerProc, 2);
++
++
++ psMapDevMemOUT->eError = PVRSRVLookupHandle(KERNEL_HANDLE_BASE,
++ (IMG_VOID**)&psSrcKernelMemInfo,
++ psMapDevMemIN->hKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if(psMapDevMemOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++
++ psMapDevMemOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDstDevMemHeap,
++ psMapDevMemIN->hDstDevMemHeap,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP);
++ if(psMapDevMemOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++
++ psMapDevMemOUT->eError = PVRSRVMapDeviceMemoryKM(psPerProc,
++ psSrcKernelMemInfo,
++ hDstDevMemHeap,
++ &psDstKernelMemInfo);
++ if(psMapDevMemOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ OSMemSet(&psMapDevMemOUT->sDstClientMemInfo,
++ 0,
++ sizeof(psMapDevMemOUT->sDstClientMemInfo));
++ OSMemSet(&psMapDevMemOUT->sDstClientSyncInfo,
++ 0,
++ sizeof(psMapDevMemOUT->sDstClientSyncInfo));
++
++ psMapDevMemOUT->sDstClientMemInfo.pvLinAddrKM =
++ psDstKernelMemInfo->pvLinAddrKM;
++
++ psMapDevMemOUT->sDstClientMemInfo.pvLinAddr = 0;
++ psMapDevMemOUT->sDstClientMemInfo.sDevVAddr = psDstKernelMemInfo->sDevVAddr;
++ psMapDevMemOUT->sDstClientMemInfo.ui32Flags = psDstKernelMemInfo->ui32Flags;
++ psMapDevMemOUT->sDstClientMemInfo.ui32AllocSize = psDstKernelMemInfo->ui32AllocSize;
++ psMapDevMemOUT->sDstClientMemInfo.hMappingInfo = psDstKernelMemInfo->sMemBlk.hOSMemHandle;
++
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psMapDevMemOUT->sDstClientMemInfo.hKernelMemInfo,
++ psDstKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++ psMapDevMemOUT->sDstClientSyncInfo.hKernelSyncInfo = IMG_NULL;
++ psMapDevMemOUT->psDstKernelSyncInfo = IMG_NULL;
++
++
++ if(psDstKernelMemInfo->psKernelSyncInfo)
++ {
++ psMapDevMemOUT->psDstKernelSyncInfo = psDstKernelMemInfo->psKernelSyncInfo;
++
++ psMapDevMemOUT->sDstClientSyncInfo.psSyncData =
++ psDstKernelMemInfo->psKernelSyncInfo->psSyncData;
++ psMapDevMemOUT->sDstClientSyncInfo.sWriteOpsCompleteDevVAddr =
++ psDstKernelMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr;
++ psMapDevMemOUT->sDstClientSyncInfo.sReadOpsCompleteDevVAddr =
++ psDstKernelMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr;
++
++ psMapDevMemOUT->sDstClientSyncInfo.hMappingInfo =
++ psDstKernelMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle;
++
++ psMapDevMemOUT->sDstClientMemInfo.psClientSyncInfo = &psMapDevMemOUT->sDstClientSyncInfo;
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psMapDevMemOUT->sDstClientSyncInfo.hKernelSyncInfo,
++ psDstKernelMemInfo->psKernelSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
++ psMapDevMemOUT->sDstClientMemInfo.hKernelMemInfo);
++ }
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psMapDevMemOUT->eError, psPerProc);
++
++ return 0;
++}
++
++
++static IMG_INT
++PVRSRVUnmapDeviceMemoryBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_UNMAP_DEV_MEMORY *psUnmapDevMemIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = IMG_NULL;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_UNMAP_DEV_MEMORY);
++
++ psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ (IMG_VOID**)&psKernelMemInfo,
++ psUnmapDevMemIN->psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError = PVRSRVUnmapDeviceMemoryKM(psKernelMemInfo);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psUnmapDevMemIN->psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++ return 0;
++}
++
++
++
++static IMG_INT
++PVRSRVMapDeviceClassMemoryBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_MAP_DEVICECLASS_MEMORY *psMapDevClassMemIN,
++ PVRSRV_BRIDGE_OUT_MAP_DEVICECLASS_MEMORY *psMapDevClassMemOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo;
++ IMG_HANDLE hOSMapInfo;
++ IMG_HANDLE hDeviceClassBufferInt;
++ IMG_HANDLE hDevMemContextInt;
++ PVRSRV_HANDLE_TYPE eHandleType;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psMapDevClassMemOUT->eError, psPerProc, 2);
++
++
++ psMapDevClassMemOUT->eError =
++ PVRSRVLookupHandleAnyType(psPerProc->psHandleBase, &hDeviceClassBufferInt,
++ &eHandleType,
++ psMapDevClassMemIN->hDeviceClassBuffer);
++
++ if(psMapDevClassMemOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++
++ psMapDevClassMemOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemContextInt,
++ psMapDevClassMemIN->hDevMemContext,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
++
++ if(psMapDevClassMemOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++
++ switch(eHandleType)
++ {
++#if defined(PVR_SECURE_HANDLES)
++ case PVRSRV_HANDLE_TYPE_DISP_BUFFER:
++ case PVRSRV_HANDLE_TYPE_BUF_BUFFER:
++#else
++ case PVRSRV_HANDLE_TYPE_NONE:
++#endif
++ break;
++ default:
++ psMapDevClassMemOUT->eError = PVRSRV_ERROR_GENERIC;
++ return 0;
++ }
++
++ psMapDevClassMemOUT->eError =
++ PVRSRVMapDeviceClassMemoryKM(psPerProc,
++ hDevMemContextInt,
++ hDeviceClassBufferInt,
++ &psMemInfo,
++ &hOSMapInfo);
++ if(psMapDevClassMemOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ OSMemSet(&psMapDevClassMemOUT->sClientMemInfo,
++ 0,
++ sizeof(psMapDevClassMemOUT->sClientMemInfo));
++ OSMemSet(&psMapDevClassMemOUT->sClientSyncInfo,
++ 0,
++ sizeof(psMapDevClassMemOUT->sClientSyncInfo));
++
++ psMapDevClassMemOUT->sClientMemInfo.pvLinAddrKM =
++ psMemInfo->pvLinAddrKM;
++
++ psMapDevClassMemOUT->sClientMemInfo.pvLinAddr = 0;
++ psMapDevClassMemOUT->sClientMemInfo.sDevVAddr = psMemInfo->sDevVAddr;
++ psMapDevClassMemOUT->sClientMemInfo.ui32Flags = psMemInfo->ui32Flags;
++ psMapDevClassMemOUT->sClientMemInfo.ui32AllocSize = psMemInfo->ui32AllocSize;
++ psMapDevClassMemOUT->sClientMemInfo.hMappingInfo = psMemInfo->sMemBlk.hOSMemHandle;
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psMapDevClassMemOUT->sClientMemInfo.hKernelMemInfo,
++ psMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
++ psMapDevClassMemIN->hDeviceClassBuffer);
++
++ psMapDevClassMemOUT->sClientSyncInfo.hKernelSyncInfo = IMG_NULL;
++ psMapDevClassMemOUT->psKernelSyncInfo = IMG_NULL;
++
++
++ if(psMemInfo->psKernelSyncInfo)
++ {
++ psMapDevClassMemOUT->psKernelSyncInfo = psMemInfo->psKernelSyncInfo;
++
++ psMapDevClassMemOUT->sClientSyncInfo.psSyncData =
++ psMemInfo->psKernelSyncInfo->psSyncData;
++ psMapDevClassMemOUT->sClientSyncInfo.sWriteOpsCompleteDevVAddr =
++ psMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr;
++ psMapDevClassMemOUT->sClientSyncInfo.sReadOpsCompleteDevVAddr =
++ psMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr;
++
++ psMapDevClassMemOUT->sClientSyncInfo.hMappingInfo =
++ psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle;
++
++ psMapDevClassMemOUT->sClientMemInfo.psClientSyncInfo = &psMapDevClassMemOUT->sClientSyncInfo;
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psMapDevClassMemOUT->sClientSyncInfo.hKernelSyncInfo,
++ psMemInfo->psKernelSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
++ psMapDevClassMemOUT->sClientMemInfo.hKernelMemInfo);
++ }
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psMapDevClassMemOUT->eError, psPerProc);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVUnmapDeviceClassMemoryBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_UNMAP_DEVICECLASS_MEMORY *psUnmapDevClassMemIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvKernelMemInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_UNMAP_DEVICECLASS_MEMORY);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &pvKernelMemInfo,
++ psUnmapDevClassMemIN->psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError = PVRSRVUnmapDeviceClassMemoryKM(pvKernelMemInfo);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psUnmapDevClassMemIN->psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++ return 0;
++}
++
++
++#if defined(OS_PVRSRV_WRAP_EXT_MEM_BW)
++IMG_INT
++PVRSRVWrapExtMemoryBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_WRAP_EXT_MEMORY *psWrapExtMemIN,
++ PVRSRV_BRIDGE_OUT_WRAP_EXT_MEMORY *psWrapExtMemOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc);
++#else
++static IMG_INT
++PVRSRVWrapExtMemoryBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_WRAP_EXT_MEMORY *psWrapExtMemIN,
++ PVRSRV_BRIDGE_OUT_WRAP_EXT_MEMORY *psWrapExtMemOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ IMG_HANDLE hDevMemContextInt;
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo;
++ IMG_UINT32 ui32PageTableSize = 0;
++ IMG_SYS_PHYADDR *psSysPAddr = IMG_NULL;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_WRAP_EXT_MEMORY);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psWrapExtMemOUT->eError, psPerProc, 2);
++
++
++ psWrapExtMemOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psWrapExtMemIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psWrapExtMemOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++
++ psWrapExtMemOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemContextInt,
++ psWrapExtMemIN->hDevMemContext,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
++
++ if(psWrapExtMemOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ if(psWrapExtMemIN->ui32NumPageTableEntries)
++ {
++ ui32PageTableSize = psWrapExtMemIN->ui32NumPageTableEntries
++ * sizeof(IMG_SYS_PHYADDR);
++
++ ASSIGN_AND_EXIT_ON_ERROR(psWrapExtMemOUT->eError,
++ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32PageTableSize,
++ (IMG_VOID **)&psSysPAddr, 0,
++ "Page Table"));
++
++ if(CopyFromUserWrapper(psPerProc,
++ ui32BridgeID,
++ psSysPAddr,
++ psWrapExtMemIN->psSysPAddr,
++ ui32PageTableSize) != PVRSRV_OK)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32PageTableSize, (IMG_VOID *)psSysPAddr, 0);
++
++ return -EFAULT;
++ }
++ }
++
++ psWrapExtMemOUT->eError =
++ PVRSRVWrapExtMemoryKM(hDevCookieInt,
++ psPerProc,
++ hDevMemContextInt,
++ psWrapExtMemIN->ui32ByteSize,
++ psWrapExtMemIN->ui32PageOffset,
++ psWrapExtMemIN->bPhysContig,
++ psSysPAddr,
++ psWrapExtMemIN->pvLinAddr,
++ psWrapExtMemIN->ui32Flags,
++ &psMemInfo);
++ if(psWrapExtMemIN->ui32NumPageTableEntries)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32PageTableSize,
++ (IMG_VOID *)psSysPAddr, 0);
++
++ }
++ if(psWrapExtMemOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psWrapExtMemOUT->sClientMemInfo.pvLinAddrKM =
++ psMemInfo->pvLinAddrKM;
++
++
++ psWrapExtMemOUT->sClientMemInfo.pvLinAddr = 0;
++ psWrapExtMemOUT->sClientMemInfo.sDevVAddr = psMemInfo->sDevVAddr;
++ psWrapExtMemOUT->sClientMemInfo.ui32Flags = psMemInfo->ui32Flags;
++ psWrapExtMemOUT->sClientMemInfo.ui32AllocSize = psMemInfo->ui32AllocSize;
++ psWrapExtMemOUT->sClientMemInfo.hMappingInfo = psMemInfo->sMemBlk.hOSMemHandle;
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psWrapExtMemOUT->sClientMemInfo.hKernelMemInfo,
++ psMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++
++
++ psWrapExtMemOUT->sClientSyncInfo.psSyncData =
++ psMemInfo->psKernelSyncInfo->psSyncData;
++ psWrapExtMemOUT->sClientSyncInfo.sWriteOpsCompleteDevVAddr =
++ psMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr;
++ psWrapExtMemOUT->sClientSyncInfo.sReadOpsCompleteDevVAddr =
++ psMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr;
++
++ psWrapExtMemOUT->sClientSyncInfo.hMappingInfo =
++ psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle;
++
++ psWrapExtMemOUT->sClientMemInfo.psClientSyncInfo = &psWrapExtMemOUT->sClientSyncInfo;
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psWrapExtMemOUT->sClientSyncInfo.hKernelSyncInfo,
++ (IMG_HANDLE)psMemInfo->psKernelSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
++ psWrapExtMemOUT->sClientMemInfo.hKernelMemInfo);
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psWrapExtMemOUT->eError, psPerProc);
++
++ return 0;
++}
++#endif
++
++static IMG_INT
++PVRSRVUnwrapExtMemoryBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_UNWRAP_EXT_MEMORY *psUnwrapExtMemIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvMemInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_UNWRAP_EXT_MEMORY);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvMemInfo,
++ psUnwrapExtMemIN->hKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVUnwrapExtMemoryKM((PVRSRV_KERNEL_MEM_INFO *)pvMemInfo);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psUnwrapExtMemIN->hKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVGetFreeDeviceMemBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_GETFREEDEVICEMEM *psGetFreeDeviceMemIN,
++ PVRSRV_BRIDGE_OUT_GETFREEDEVICEMEM *psGetFreeDeviceMemOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GETFREE_DEVICEMEM);
++
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++ psGetFreeDeviceMemOUT->eError =
++ PVRSRVGetFreeDeviceMemKM(psGetFreeDeviceMemIN->ui32Flags,
++ &psGetFreeDeviceMemOUT->ui32Total,
++ &psGetFreeDeviceMemOUT->ui32Free,
++ &psGetFreeDeviceMemOUT->ui32LargestBlock);
++
++ return 0;
++}
++
++static IMG_INT
++PVRMMapOSMemHandleToMMapDataBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_MHANDLE_TO_MMAP_DATA *psMMapDataIN,
++ PVRSRV_BRIDGE_OUT_MHANDLE_TO_MMAP_DATA *psMMapDataOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_MHANDLE_TO_MMAP_DATA);
++
++#if defined (__linux__)
++ psMMapDataOUT->eError =
++ PVRMMapOSMemHandleToMMapData(psPerProc,
++ psMMapDataIN->hMHandle,
++ &psMMapDataOUT->ui32MMapOffset,
++ &psMMapDataOUT->ui32ByteOffset,
++ &psMMapDataOUT->ui32RealByteSize,
++ &psMMapDataOUT->ui32UserVAddr);
++#else
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++ PVR_UNREFERENCED_PARAMETER(psMMapDataIN);
++
++ psMMapDataOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
++#endif
++ return 0;
++}
++
++
++static IMG_INT
++PVRMMapReleaseMMapDataBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_RELEASE_MMAP_DATA *psMMapDataIN,
++ PVRSRV_BRIDGE_OUT_RELEASE_MMAP_DATA *psMMapDataOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_RELEASE_MMAP_DATA);
++
++#if defined (__linux__)
++ psMMapDataOUT->eError =
++ PVRMMapReleaseMMapData(psPerProc,
++ psMMapDataIN->hMHandle,
++ &psMMapDataOUT->bMUnmap,
++ &psMMapDataOUT->ui32RealByteSize,
++ &psMMapDataOUT->ui32UserVAddr);
++#else
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++ PVR_UNREFERENCED_PARAMETER(psMMapDataIN);
++
++ psMMapDataOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
++#endif
++ return 0;
++}
++
++
++#ifdef PDUMP
++static IMG_INT
++PDumpIsCaptureFrameBW(IMG_UINT32 ui32BridgeID,
++ IMG_VOID *psBridgeIn,
++ PVRSRV_BRIDGE_OUT_PDUMP_ISCAPTURING *psPDumpIsCapturingOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_ISCAPTURING);
++ PVR_UNREFERENCED_PARAMETER(psBridgeIn);
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++ psPDumpIsCapturingOUT->bIsCapturing = PDumpIsCaptureFrameKM();
++ psPDumpIsCapturingOUT->eError = PVRSRV_OK;
++
++ return 0;
++}
++
++static IMG_INT
++PDumpCommentBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_COMMENT *psPDumpCommentIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_COMMENT);
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++ psRetOUT->eError = PDumpCommentKM(&psPDumpCommentIN->szComment[0],
++ psPDumpCommentIN->ui32Flags);
++ return 0;
++}
++
++static IMG_INT
++PDumpSetFrameBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_SETFRAME *psPDumpSetFrameIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_SETFRAME);
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++ psRetOUT->eError = PDumpSetFrameKM(psPDumpSetFrameIN->ui32Frame);
++
++ return 0;
++}
++
++static IMG_INT
++PDumpRegWithFlagsBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_DUMPREG *psPDumpRegDumpIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_REG);
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++ psRetOUT->eError =
++ PDumpRegWithFlagsKM(psPDumpRegDumpIN->sHWReg.ui32RegAddr,
++ psPDumpRegDumpIN->sHWReg.ui32RegVal,
++ psPDumpRegDumpIN->ui32Flags);
++
++ return 0;
++}
++
++static IMG_INT
++PDumpRegPolBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_REGPOL *psPDumpRegPolIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_REGPOL);
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++ psRetOUT->eError =
++ PDumpRegPolWithFlagsKM(psPDumpRegPolIN->sHWReg.ui32RegAddr,
++ psPDumpRegPolIN->sHWReg.ui32RegVal,
++ psPDumpRegPolIN->ui32Mask,
++ psPDumpRegPolIN->ui32Flags);
++
++ return 0;
++}
++
++static IMG_INT
++PDumpMemPolBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_MEMPOL *psPDumpMemPolIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvMemInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_MEMPOL);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvMemInfo,
++ psPDumpMemPolIN->psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PDumpMemPolKM(((PVRSRV_KERNEL_MEM_INFO *)pvMemInfo),
++ psPDumpMemPolIN->ui32Offset,
++ psPDumpMemPolIN->ui32Value,
++ psPDumpMemPolIN->ui32Mask,
++ PDUMP_POLL_OPERATOR_EQUAL,
++ psPDumpMemPolIN->ui32Flags,
++ MAKEUNIQUETAG(pvMemInfo));
++
++ return 0;
++}
++
++static IMG_INT
++PDumpMemBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_DUMPMEM *psPDumpMemDumpIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvMemInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DUMPMEM);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvMemInfo,
++ psPDumpMemDumpIN->psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PDumpMemUM(psPerProc,
++ psPDumpMemDumpIN->pvAltLinAddr,
++ psPDumpMemDumpIN->pvLinAddr,
++ pvMemInfo,
++ psPDumpMemDumpIN->ui32Offset,
++ psPDumpMemDumpIN->ui32Bytes,
++ psPDumpMemDumpIN->ui32Flags,
++ MAKEUNIQUETAG(pvMemInfo));
++
++ return 0;
++}
++
++static IMG_INT
++PDumpBitmapBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_BITMAP *psPDumpBitmapIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++ PVR_UNREFERENCED_PARAMETER(ui32BridgeID);
++
++ psRetOUT->eError =
++ PDumpBitmapKM(&psPDumpBitmapIN->szFileName[0],
++ psPDumpBitmapIN->ui32FileOffset,
++ psPDumpBitmapIN->ui32Width,
++ psPDumpBitmapIN->ui32Height,
++ psPDumpBitmapIN->ui32StrideInBytes,
++ psPDumpBitmapIN->sDevBaseAddr,
++ psPDumpBitmapIN->ui32Size,
++ psPDumpBitmapIN->ePixelFormat,
++ psPDumpBitmapIN->eMemFormat,
++ psPDumpBitmapIN->ui32Flags);
++
++ return 0;
++}
++
++static IMG_INT
++PDumpReadRegBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_READREG *psPDumpReadRegIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DUMPREADREG);
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++ psRetOUT->eError =
++ PDumpReadRegKM(&psPDumpReadRegIN->szFileName[0],
++ psPDumpReadRegIN->ui32FileOffset,
++ psPDumpReadRegIN->ui32Address,
++ psPDumpReadRegIN->ui32Size,
++ psPDumpReadRegIN->ui32Flags);
++
++ return 0;
++}
++
++static IMG_INT
++PDumpDriverInfoBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_DRIVERINFO *psPDumpDriverInfoIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_UINT32 ui32PDumpFlags;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DRIVERINFO);
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++ ui32PDumpFlags = 0;
++ if(psPDumpDriverInfoIN->bContinuous)
++ {
++ ui32PDumpFlags |= PDUMP_FLAGS_CONTINUOUS;
++ }
++ psRetOUT->eError =
++ PDumpDriverInfoKM(&psPDumpDriverInfoIN->szString[0],
++ ui32PDumpFlags);
++
++ return 0;
++}
++
++static IMG_INT
++PDumpSyncDumpBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_DUMPSYNC *psPDumpSyncDumpIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_UINT32 ui32Bytes = psPDumpSyncDumpIN->ui32Bytes;
++ IMG_VOID *pvSyncInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DUMPSYNC);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &pvSyncInfo,
++ psPDumpSyncDumpIN->psKernelSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PDumpMemUM(psPerProc,
++ psPDumpSyncDumpIN->pvAltLinAddr,
++ IMG_NULL,
++ ((PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo)->psSyncDataMemInfoKM,
++ psPDumpSyncDumpIN->ui32Offset,
++ ui32Bytes,
++ 0,
++ MAKEUNIQUETAG(((PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo)->psSyncDataMemInfoKM));
++
++ return 0;
++}
++
++static IMG_INT
++PDumpSyncPolBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_SYNCPOL *psPDumpSyncPolIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_UINT32 ui32Offset;
++ IMG_VOID *pvSyncInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_SYNCPOL);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &pvSyncInfo,
++ psPDumpSyncPolIN->psKernelSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ if(psPDumpSyncPolIN->bIsRead)
++ {
++ ui32Offset = offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete);
++ }
++ else
++ {
++ ui32Offset = offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete);
++ }
++
++ psRetOUT->eError =
++ PDumpMemPolKM(((PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo)->psSyncDataMemInfoKM,
++ ui32Offset,
++ psPDumpSyncPolIN->ui32Value,
++ psPDumpSyncPolIN->ui32Mask,
++ PDUMP_POLL_OPERATOR_EQUAL,
++ 0,
++ MAKEUNIQUETAG(((PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo)->psSyncDataMemInfoKM));
++
++ return 0;
++}
++
++static IMG_INT
++PDumpPDRegBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_DUMPPDREG *psPDumpPDRegDumpIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_PDREG);
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++ PDumpPDReg(psPDumpPDRegDumpIN->sHWReg.ui32RegAddr,
++ psPDumpPDRegDumpIN->sHWReg.ui32RegVal,
++ PDUMP_PD_UNIQUETAG);
++
++ psRetOUT->eError = PVRSRV_OK;
++ return 0;
++}
++
++static IMG_INT
++PDumpCycleCountRegReadBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_CYCLE_COUNT_REG_READ *psPDumpCycleCountRegReadIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_CYCLE_COUNT_REG_READ);
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++ PDumpCycleCountRegRead(psPDumpCycleCountRegReadIN->ui32RegOffset,
++ psPDumpCycleCountRegReadIN->bLastFrame);
++
++ psRetOUT->eError = PVRSRV_OK;
++
++ return 0;
++}
++
++static IMG_INT
++PDumpPDDevPAddrBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_DUMPPDDEVPADDR *psPDumpPDDevPAddrIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvMemInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DUMPPDDEVPADDR);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &pvMemInfo,
++ psPDumpPDDevPAddrIN->hKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PDumpPDDevPAddrKM((PVRSRV_KERNEL_MEM_INFO *)pvMemInfo,
++ psPDumpPDDevPAddrIN->ui32Offset,
++ psPDumpPDDevPAddrIN->sPDDevPAddr,
++ MAKEUNIQUETAG(pvMemInfo),
++ PDUMP_PD_UNIQUETAG);
++ return 0;
++}
++
++static IMG_INT
++PDumpStartInitPhaseBW(IMG_UINT32 ui32BridgeID,
++ IMG_VOID *psBridgeIn,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_STARTINITPHASE);
++ PVR_UNREFERENCED_PARAMETER(psBridgeIn);
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++ psRetOUT->eError = PDumpStartInitPhaseKM();
++
++ return 0;
++}
++
++static IMG_INT
++PDumpStopInitPhaseBW(IMG_UINT32 ui32BridgeID,
++ IMG_VOID *psBridgeIn,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_STOPINITPHASE);
++ PVR_UNREFERENCED_PARAMETER(psBridgeIn);
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++ psRetOUT->eError = PDumpStopInitPhaseKM();
++
++ return 0;
++}
++
++#endif
++
++
++static IMG_INT
++PVRSRVGetMiscInfoBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_GET_MISC_INFO *psGetMiscInfoIN,
++ PVRSRV_BRIDGE_OUT_GET_MISC_INFO *psGetMiscInfoOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_ERROR eError;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_MISC_INFO);
++
++ OSMemCopy(&psGetMiscInfoOUT->sMiscInfo,
++ &psGetMiscInfoIN->sMiscInfo,
++ sizeof(PVRSRV_MISC_INFO));
++
++ if (((psGetMiscInfoIN->sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_MEMSTATS_PRESENT) != 0) &&
++ ((psGetMiscInfoIN->sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_DDKVERSION_PRESENT) != 0))
++ {
++
++ psGetMiscInfoOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++ return 0;
++ }
++
++ if (((psGetMiscInfoIN->sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_MEMSTATS_PRESENT) != 0) ||
++ ((psGetMiscInfoIN->sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_DDKVERSION_PRESENT) != 0))
++ {
++
++ ASSIGN_AND_EXIT_ON_ERROR(psGetMiscInfoOUT->eError,
++ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ psGetMiscInfoOUT->sMiscInfo.ui32MemoryStrLen,
++ (IMG_VOID **)&psGetMiscInfoOUT->sMiscInfo.pszMemoryStr, 0,
++ "Output string buffer"));
++
++ psGetMiscInfoOUT->eError = PVRSRVGetMiscInfoKM(&psGetMiscInfoOUT->sMiscInfo);
++
++
++ eError = CopyToUserWrapper(psPerProc, ui32BridgeID,
++ psGetMiscInfoIN->sMiscInfo.pszMemoryStr,
++ psGetMiscInfoOUT->sMiscInfo.pszMemoryStr,
++ psGetMiscInfoOUT->sMiscInfo.ui32MemoryStrLen);
++
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ psGetMiscInfoOUT->sMiscInfo.ui32MemoryStrLen,
++ (IMG_VOID *)psGetMiscInfoOUT->sMiscInfo.pszMemoryStr, 0);
++ psGetMiscInfoOUT->sMiscInfo.pszMemoryStr = IMG_NULL;
++
++
++ psGetMiscInfoOUT->sMiscInfo.pszMemoryStr = psGetMiscInfoIN->sMiscInfo.pszMemoryStr;
++
++ if(eError != PVRSRV_OK)
++ {
++
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVGetMiscInfoBW Error copy to user"));
++ return -EFAULT;
++ }
++ }
++ else
++ {
++ psGetMiscInfoOUT->eError = PVRSRVGetMiscInfoKM(&psGetMiscInfoOUT->sMiscInfo);
++ }
++
++
++ if (psGetMiscInfoOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++
++ if (psGetMiscInfoIN->sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT)
++ {
++ psGetMiscInfoOUT->eError = PVRSRVAllocHandle(psPerProc->psHandleBase,
++ &psGetMiscInfoOUT->sMiscInfo.sGlobalEventObject.hOSEventKM,
++ psGetMiscInfoOUT->sMiscInfo.sGlobalEventObject.hOSEventKM,
++ PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT,
++ PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
++
++ if (psGetMiscInfoOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ if (psGetMiscInfoOUT->sMiscInfo.hSOCTimerRegisterOSMemHandle)
++ {
++
++ psGetMiscInfoOUT->eError = PVRSRVAllocHandle(psPerProc->psHandleBase,
++ &psGetMiscInfoOUT->sMiscInfo.hSOCTimerRegisterOSMemHandle,
++ psGetMiscInfoOUT->sMiscInfo.hSOCTimerRegisterOSMemHandle,
++ PVRSRV_HANDLE_TYPE_SOC_TIMER,
++ PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
++
++ if (psGetMiscInfoOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVConnectBW(IMG_UINT32 ui32BridgeID,
++ IMG_VOID *psBridgeIn,
++ PVRSRV_BRIDGE_OUT_CONNECT_SERVICES *psConnectServicesOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVR_UNREFERENCED_PARAMETER(psBridgeIn);
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CONNECT_SERVICES);
++
++ psConnectServicesOUT->hKernelServices = psPerProc->hPerProcData;
++ psConnectServicesOUT->eError = PVRSRV_OK;
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVDisconnectBW(IMG_UINT32 ui32BridgeID,
++ IMG_VOID *psBridgeIn,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++ PVR_UNREFERENCED_PARAMETER(psBridgeIn);
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_DISCONNECT_SERVICES);
++
++
++ psRetOUT->eError = PVRSRV_OK;
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVEnumerateDCBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_ENUMCLASS *psEnumDispClassIN,
++ PVRSRV_BRIDGE_OUT_ENUMCLASS *psEnumDispClassOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ENUM_CLASS);
++
++ psEnumDispClassOUT->eError =
++ PVRSRVEnumerateDCKM(psEnumDispClassIN->sDeviceClass,
++ &psEnumDispClassOUT->ui32NumDevices,
++ &psEnumDispClassOUT->ui32DevID[0]);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVOpenDCDeviceBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_OPEN_DISPCLASS_DEVICE *psOpenDispClassDeviceIN,
++ PVRSRV_BRIDGE_OUT_OPEN_DISPCLASS_DEVICE *psOpenDispClassDeviceOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ IMG_HANDLE hDispClassInfoInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_OPEN_DISPCLASS_DEVICE);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psOpenDispClassDeviceOUT->eError, psPerProc, 1);
++
++ psOpenDispClassDeviceOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psOpenDispClassDeviceIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psOpenDispClassDeviceOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psOpenDispClassDeviceOUT->eError =
++ PVRSRVOpenDCDeviceKM(psPerProc,
++ psOpenDispClassDeviceIN->ui32DeviceID,
++ hDevCookieInt,
++ &hDispClassInfoInt);
++
++ if(psOpenDispClassDeviceOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psOpenDispClassDeviceOUT->hDeviceKM,
++ hDispClassInfoInt,
++ PVRSRV_HANDLE_TYPE_DISP_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++ COMMIT_HANDLE_BATCH_OR_ERROR(psOpenDispClassDeviceOUT->eError, psPerProc);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVCloseDCDeviceBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_CLOSE_DISPCLASS_DEVICE *psCloseDispClassDeviceIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvDispClassInfoInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CLOSE_DISPCLASS_DEVICE);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfoInt,
++ psCloseDispClassDeviceIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError = PVRSRVCloseDCDeviceKM(pvDispClassInfoInt, IMG_FALSE);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psCloseDispClassDeviceIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++ return 0;
++}
++
++static IMG_INT
++PVRSRVEnumDCFormatsBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_FORMATS *psEnumDispClassFormatsIN,
++ PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_FORMATS *psEnumDispClassFormatsOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvDispClassInfoInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ENUM_DISPCLASS_FORMATS);
++
++ psEnumDispClassFormatsOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfoInt,
++ psEnumDispClassFormatsIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++ if(psEnumDispClassFormatsOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psEnumDispClassFormatsOUT->eError =
++ PVRSRVEnumDCFormatsKM(pvDispClassInfoInt,
++ &psEnumDispClassFormatsOUT->ui32Count,
++ psEnumDispClassFormatsOUT->asFormat);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVEnumDCDimsBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_DIMS *psEnumDispClassDimsIN,
++ PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_DIMS *psEnumDispClassDimsOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvDispClassInfoInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ENUM_DISPCLASS_DIMS);
++
++ psEnumDispClassDimsOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfoInt,
++ psEnumDispClassDimsIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++
++ if(psEnumDispClassDimsOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psEnumDispClassDimsOUT->eError =
++ PVRSRVEnumDCDimsKM(pvDispClassInfoInt,
++ &psEnumDispClassDimsIN->sFormat,
++ &psEnumDispClassDimsOUT->ui32Count,
++ psEnumDispClassDimsOUT->asDim);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVGetDCSystemBufferBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_GET_DISPCLASS_SYSBUFFER *psGetDispClassSysBufferIN,
++ PVRSRV_BRIDGE_OUT_GET_DISPCLASS_SYSBUFFER *psGetDispClassSysBufferOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hBufferInt;
++ IMG_VOID *pvDispClassInfoInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_DISPCLASS_SYSBUFFER);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psGetDispClassSysBufferOUT->eError, psPerProc, 1);
++
++ psGetDispClassSysBufferOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfoInt,
++ psGetDispClassSysBufferIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++ if(psGetDispClassSysBufferOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psGetDispClassSysBufferOUT->eError =
++ PVRSRVGetDCSystemBufferKM(pvDispClassInfoInt,
++ &hBufferInt);
++
++ if(psGetDispClassSysBufferOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psGetDispClassSysBufferOUT->hBuffer,
++ hBufferInt,
++ PVRSRV_HANDLE_TYPE_DISP_BUFFER,
++ (PVRSRV_HANDLE_ALLOC_FLAG)(PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE | PVRSRV_HANDLE_ALLOC_FLAG_SHARED),
++ psGetDispClassSysBufferIN->hDeviceKM);
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psGetDispClassSysBufferOUT->eError, psPerProc);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVGetDCInfoBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_GET_DISPCLASS_INFO *psGetDispClassInfoIN,
++ PVRSRV_BRIDGE_OUT_GET_DISPCLASS_INFO *psGetDispClassInfoOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvDispClassInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_DISPCLASS_INFO);
++
++ psGetDispClassInfoOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfo,
++ psGetDispClassInfoIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++ if(psGetDispClassInfoOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psGetDispClassInfoOUT->eError =
++ PVRSRVGetDCInfoKM(pvDispClassInfo,
++ &psGetDispClassInfoOUT->sDisplayInfo);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVCreateDCSwapChainBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_CREATE_DISPCLASS_SWAPCHAIN *psCreateDispClassSwapChainIN,
++ PVRSRV_BRIDGE_OUT_CREATE_DISPCLASS_SWAPCHAIN *psCreateDispClassSwapChainOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvDispClassInfo;
++ IMG_HANDLE hSwapChainInt;
++ IMG_UINT32 ui32SwapChainID;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CREATE_DISPCLASS_SWAPCHAIN);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psCreateDispClassSwapChainOUT->eError, psPerProc, 1);
++
++ psCreateDispClassSwapChainOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfo,
++ psCreateDispClassSwapChainIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++
++ if(psCreateDispClassSwapChainOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++
++ ui32SwapChainID = psCreateDispClassSwapChainIN->ui32SwapChainID;
++
++ psCreateDispClassSwapChainOUT->eError =
++ PVRSRVCreateDCSwapChainKM(psPerProc, pvDispClassInfo,
++ psCreateDispClassSwapChainIN->ui32Flags,
++ &psCreateDispClassSwapChainIN->sDstSurfAttrib,
++ &psCreateDispClassSwapChainIN->sSrcSurfAttrib,
++ psCreateDispClassSwapChainIN->ui32BufferCount,
++ psCreateDispClassSwapChainIN->ui32OEMFlags,
++ &hSwapChainInt,
++ &ui32SwapChainID);
++
++ if(psCreateDispClassSwapChainOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++
++ psCreateDispClassSwapChainOUT->ui32SwapChainID = ui32SwapChainID;
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psCreateDispClassSwapChainOUT->hSwapChain,
++ hSwapChainInt,
++ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
++ psCreateDispClassSwapChainIN->hDeviceKM);
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psCreateDispClassSwapChainOUT->eError, psPerProc);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVDestroyDCSwapChainBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_DESTROY_DISPCLASS_SWAPCHAIN *psDestroyDispClassSwapChainIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvSwapChain;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_DESTROY_DISPCLASS_SWAPCHAIN);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &pvSwapChain,
++ psDestroyDispClassSwapChainIN->hSwapChain,
++ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVDestroyDCSwapChainKM(pvSwapChain);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psDestroyDispClassSwapChainIN->hSwapChain,
++ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVSetDCDstRectBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SET_DISPCLASS_RECT *psSetDispClassDstRectIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvDispClassInfo;
++ IMG_VOID *pvSwapChain;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SET_DISPCLASS_DSTRECT);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfo,
++ psSetDispClassDstRectIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvSwapChain,
++ psSetDispClassDstRectIN->hSwapChain,
++ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVSetDCDstRectKM(pvDispClassInfo,
++ pvSwapChain,
++ &psSetDispClassDstRectIN->sRect);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVSetDCSrcRectBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SET_DISPCLASS_RECT *psSetDispClassSrcRectIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvDispClassInfo;
++ IMG_VOID *pvSwapChain;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SET_DISPCLASS_SRCRECT);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfo,
++ psSetDispClassSrcRectIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvSwapChain,
++ psSetDispClassSrcRectIN->hSwapChain,
++ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVSetDCSrcRectKM(pvDispClassInfo,
++ pvSwapChain,
++ &psSetDispClassSrcRectIN->sRect);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVSetDCDstColourKeyBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SET_DISPCLASS_COLOURKEY *psSetDispClassColKeyIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvDispClassInfo;
++ IMG_VOID *pvSwapChain;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SET_DISPCLASS_DSTCOLOURKEY);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfo,
++ psSetDispClassColKeyIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvSwapChain,
++ psSetDispClassColKeyIN->hSwapChain,
++ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVSetDCDstColourKeyKM(pvDispClassInfo,
++ pvSwapChain,
++ psSetDispClassColKeyIN->ui32CKColour);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVSetDCSrcColourKeyBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SET_DISPCLASS_COLOURKEY *psSetDispClassColKeyIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvDispClassInfo;
++ IMG_VOID *pvSwapChain;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SET_DISPCLASS_SRCCOLOURKEY);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfo,
++ psSetDispClassColKeyIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvSwapChain,
++ psSetDispClassColKeyIN->hSwapChain,
++ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVSetDCSrcColourKeyKM(pvDispClassInfo,
++ pvSwapChain,
++ psSetDispClassColKeyIN->ui32CKColour);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVGetDCBuffersBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_GET_DISPCLASS_BUFFERS *psGetDispClassBuffersIN,
++ PVRSRV_BRIDGE_OUT_GET_DISPCLASS_BUFFERS *psGetDispClassBuffersOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvDispClassInfo;
++ IMG_VOID *pvSwapChain;
++ IMG_UINT32 i;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_DISPCLASS_BUFFERS);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psGetDispClassBuffersOUT->eError, psPerProc, PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS);
++
++ psGetDispClassBuffersOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfo,
++ psGetDispClassBuffersIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++ if(psGetDispClassBuffersOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psGetDispClassBuffersOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvSwapChain,
++ psGetDispClassBuffersIN->hSwapChain,
++ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
++ if(psGetDispClassBuffersOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psGetDispClassBuffersOUT->eError =
++ PVRSRVGetDCBuffersKM(pvDispClassInfo,
++ pvSwapChain,
++ &psGetDispClassBuffersOUT->ui32BufferCount,
++ psGetDispClassBuffersOUT->ahBuffer);
++ if (psGetDispClassBuffersOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ PVR_ASSERT(psGetDispClassBuffersOUT->ui32BufferCount <= PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS);
++
++ for(i = 0; i < psGetDispClassBuffersOUT->ui32BufferCount; i++)
++ {
++ IMG_HANDLE hBufferExt;
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &hBufferExt,
++ psGetDispClassBuffersOUT->ahBuffer[i],
++ PVRSRV_HANDLE_TYPE_DISP_BUFFER,
++ (PVRSRV_HANDLE_ALLOC_FLAG)(PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE | PVRSRV_HANDLE_ALLOC_FLAG_SHARED),
++ psGetDispClassBuffersIN->hSwapChain);
++
++ psGetDispClassBuffersOUT->ahBuffer[i] = hBufferExt;
++ }
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psGetDispClassBuffersOUT->eError, psPerProc);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVSwapToDCBufferBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_BUFFER *psSwapDispClassBufferIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvDispClassInfo;
++ IMG_VOID *pvSwapChainBuf;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_BUFFER);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfo,
++ psSwapDispClassBufferIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVLookupSubHandle(psPerProc->psHandleBase,
++ &pvSwapChainBuf,
++ psSwapDispClassBufferIN->hBuffer,
++ PVRSRV_HANDLE_TYPE_DISP_BUFFER,
++ psSwapDispClassBufferIN->hDeviceKM);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVSwapToDCBufferKM(pvDispClassInfo,
++ pvSwapChainBuf,
++ psSwapDispClassBufferIN->ui32SwapInterval,
++ psSwapDispClassBufferIN->hPrivateTag,
++ psSwapDispClassBufferIN->ui32ClipRectCount,
++ psSwapDispClassBufferIN->sClipRect);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVSwapToDCSystemBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_SYSTEM *psSwapDispClassSystemIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvDispClassInfo;
++ IMG_VOID *pvSwapChain;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_SYSTEM);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfo,
++ psSwapDispClassSystemIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVLookupSubHandle(psPerProc->psHandleBase,
++ &pvSwapChain,
++ psSwapDispClassSystemIN->hSwapChain,
++ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN,
++ psSwapDispClassSystemIN->hDeviceKM);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ psRetOUT->eError =
++ PVRSRVSwapToDCSystemKM(pvDispClassInfo,
++ pvSwapChain);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVOpenBCDeviceBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_OPEN_BUFFERCLASS_DEVICE *psOpenBufferClassDeviceIN,
++ PVRSRV_BRIDGE_OUT_OPEN_BUFFERCLASS_DEVICE *psOpenBufferClassDeviceOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ IMG_HANDLE hBufClassInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_OPEN_BUFFERCLASS_DEVICE);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psOpenBufferClassDeviceOUT->eError, psPerProc, 1);
++
++ psOpenBufferClassDeviceOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psOpenBufferClassDeviceIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psOpenBufferClassDeviceOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psOpenBufferClassDeviceOUT->eError =
++ PVRSRVOpenBCDeviceKM(psPerProc,
++ psOpenBufferClassDeviceIN->ui32DeviceID,
++ hDevCookieInt,
++ &hBufClassInfo);
++ if(psOpenBufferClassDeviceOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psOpenBufferClassDeviceOUT->hDeviceKM,
++ hBufClassInfo,
++ PVRSRV_HANDLE_TYPE_BUF_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psOpenBufferClassDeviceOUT->eError, psPerProc);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVCloseBCDeviceBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_CLOSE_BUFFERCLASS_DEVICE *psCloseBufferClassDeviceIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvBufClassInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CLOSE_BUFFERCLASS_DEVICE);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvBufClassInfo,
++ psCloseBufferClassDeviceIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_BUF_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVCloseBCDeviceKM(pvBufClassInfo, IMG_FALSE);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psCloseBufferClassDeviceIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_BUF_INFO);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVGetBCInfoBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_INFO *psGetBufferClassInfoIN,
++ PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_INFO *psGetBufferClassInfoOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvBufClassInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_BUFFERCLASS_INFO);
++
++ psGetBufferClassInfoOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvBufClassInfo,
++ psGetBufferClassInfoIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_BUF_INFO);
++ if(psGetBufferClassInfoOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psGetBufferClassInfoOUT->eError =
++ PVRSRVGetBCInfoKM(pvBufClassInfo,
++ &psGetBufferClassInfoOUT->sBufferInfo);
++ return 0;
++}
++
++static IMG_INT
++PVRSRVGetBCBufferBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_BUFFER *psGetBufferClassBufferIN,
++ PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_BUFFER *psGetBufferClassBufferOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvBufClassInfo;
++ IMG_HANDLE hBufferInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_BUFFERCLASS_BUFFER);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psGetBufferClassBufferOUT->eError, psPerProc, 1);
++
++ psGetBufferClassBufferOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvBufClassInfo,
++ psGetBufferClassBufferIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_BUF_INFO);
++ if(psGetBufferClassBufferOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psGetBufferClassBufferOUT->eError =
++ PVRSRVGetBCBufferKM(pvBufClassInfo,
++ psGetBufferClassBufferIN->ui32BufferIndex,
++ &hBufferInt);
++
++ if(psGetBufferClassBufferOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psGetBufferClassBufferOUT->hBuffer,
++ hBufferInt,
++ PVRSRV_HANDLE_TYPE_BUF_BUFFER,
++ (PVRSRV_HANDLE_ALLOC_FLAG)(PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE | PVRSRV_HANDLE_ALLOC_FLAG_SHARED),
++ psGetBufferClassBufferIN->hDeviceKM);
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psGetBufferClassBufferOUT->eError, psPerProc);
++
++ return 0;
++}
++
++
++static IMG_INT
++PVRSRVAllocSharedSysMemoryBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_ALLOC_SHARED_SYS_MEM *psAllocSharedSysMemIN,
++ PVRSRV_BRIDGE_OUT_ALLOC_SHARED_SYS_MEM *psAllocSharedSysMemOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ALLOC_SHARED_SYS_MEM);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psAllocSharedSysMemOUT->eError, psPerProc, 1);
++
++ psAllocSharedSysMemOUT->eError =
++ PVRSRVAllocSharedSysMemoryKM(psPerProc,
++ psAllocSharedSysMemIN->ui32Flags,
++ psAllocSharedSysMemIN->ui32Size,
++ &psKernelMemInfo);
++ if(psAllocSharedSysMemOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ OSMemSet(&psAllocSharedSysMemOUT->sClientMemInfo,
++ 0,
++ sizeof(psAllocSharedSysMemOUT->sClientMemInfo));
++
++ psAllocSharedSysMemOUT->sClientMemInfo.pvLinAddrKM =
++ psKernelMemInfo->pvLinAddrKM;
++
++ psAllocSharedSysMemOUT->sClientMemInfo.pvLinAddr = 0;
++ psAllocSharedSysMemOUT->sClientMemInfo.ui32Flags =
++ psKernelMemInfo->ui32Flags;
++ psAllocSharedSysMemOUT->sClientMemInfo.ui32AllocSize =
++ psKernelMemInfo->ui32AllocSize;
++ psAllocSharedSysMemOUT->sClientMemInfo.hMappingInfo = psKernelMemInfo->sMemBlk.hOSMemHandle;
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psAllocSharedSysMemOUT->sClientMemInfo.hKernelMemInfo,
++ psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psAllocSharedSysMemOUT->eError, psPerProc);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVFreeSharedSysMemoryBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_FREE_SHARED_SYS_MEM *psFreeSharedSysMemIN,
++ PVRSRV_BRIDGE_OUT_FREE_SHARED_SYS_MEM *psFreeSharedSysMemOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_FREE_SHARED_SYS_MEM);
++
++ psFreeSharedSysMemOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ (IMG_VOID **)&psKernelMemInfo,
++ psFreeSharedSysMemIN->psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO);
++
++ if(psFreeSharedSysMemOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psFreeSharedSysMemOUT->eError =
++ PVRSRVFreeSharedSysMemoryKM(psKernelMemInfo);
++ if(psFreeSharedSysMemOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psFreeSharedSysMemOUT->eError =
++ PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psFreeSharedSysMemIN->psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO);
++ return 0;
++}
++
++static IMG_INT
++PVRSRVMapMemInfoMemBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_MAP_MEMINFO_MEM *psMapMemInfoMemIN,
++ PVRSRV_BRIDGE_OUT_MAP_MEMINFO_MEM *psMapMemInfoMemOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ PVRSRV_HANDLE_TYPE eHandleType;
++ IMG_HANDLE hParent;
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_MAP_MEMINFO_MEM);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psMapMemInfoMemOUT->eError, psPerProc, 2);
++
++ psMapMemInfoMemOUT->eError =
++ PVRSRVLookupHandleAnyType(psPerProc->psHandleBase,
++ (IMG_VOID **)&psKernelMemInfo,
++ &eHandleType,
++ psMapMemInfoMemIN->hKernelMemInfo);
++ if(psMapMemInfoMemOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ switch (eHandleType)
++ {
++#if defined(PVR_SECURE_HANDLES)
++ case PVRSRV_HANDLE_TYPE_MEM_INFO:
++ case PVRSRV_HANDLE_TYPE_MEM_INFO_REF:
++ case PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO:
++#else
++ case PVRSRV_HANDLE_TYPE_NONE:
++#endif
++ break;
++ default:
++ psMapMemInfoMemOUT->eError = PVRSRV_ERROR_GENERIC;
++ return 0;
++ }
++
++
++ psMapMemInfoMemOUT->eError =
++ PVRSRVGetParentHandle(psPerProc->psHandleBase,
++ &hParent,
++ psMapMemInfoMemIN->hKernelMemInfo,
++ eHandleType);
++ if (psMapMemInfoMemOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ if (hParent == IMG_NULL)
++ {
++ hParent = psMapMemInfoMemIN->hKernelMemInfo;
++ }
++
++ OSMemSet(&psMapMemInfoMemOUT->sClientMemInfo,
++ 0,
++ sizeof(psMapMemInfoMemOUT->sClientMemInfo));
++
++ psMapMemInfoMemOUT->sClientMemInfo.pvLinAddrKM =
++ psKernelMemInfo->pvLinAddrKM;
++
++ psMapMemInfoMemOUT->sClientMemInfo.pvLinAddr = 0;
++ psMapMemInfoMemOUT->sClientMemInfo.sDevVAddr =
++ psKernelMemInfo->sDevVAddr;
++ psMapMemInfoMemOUT->sClientMemInfo.ui32Flags =
++ psKernelMemInfo->ui32Flags;
++ psMapMemInfoMemOUT->sClientMemInfo.ui32AllocSize =
++ psKernelMemInfo->ui32AllocSize;
++ psMapMemInfoMemOUT->sClientMemInfo.hMappingInfo = psKernelMemInfo->sMemBlk.hOSMemHandle;
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psMapMemInfoMemOUT->sClientMemInfo.hKernelMemInfo,
++ psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
++ hParent);
++
++ if(psKernelMemInfo->ui32Flags & PVRSRV_MEM_NO_SYNCOBJ)
++ {
++
++ OSMemSet(&psMapMemInfoMemOUT->sClientSyncInfo,
++ 0,
++ sizeof (PVRSRV_CLIENT_SYNC_INFO));
++ psMapMemInfoMemOUT->psKernelSyncInfo = IMG_NULL;
++ }
++ else
++ {
++
++ psMapMemInfoMemOUT->sClientSyncInfo.psSyncData =
++ psKernelMemInfo->psKernelSyncInfo->psSyncData;
++ psMapMemInfoMemOUT->sClientSyncInfo.sWriteOpsCompleteDevVAddr =
++ psKernelMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr;
++ psMapMemInfoMemOUT->sClientSyncInfo.sReadOpsCompleteDevVAddr =
++ psKernelMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr;
++
++ psMapMemInfoMemOUT->sClientSyncInfo.hMappingInfo =
++ psKernelMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle;
++
++ psMapMemInfoMemOUT->sClientMemInfo.psClientSyncInfo = &psMapMemInfoMemOUT->sClientSyncInfo;
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psMapMemInfoMemOUT->sClientSyncInfo.hKernelSyncInfo,
++ psKernelMemInfo->psKernelSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
++ psMapMemInfoMemOUT->sClientMemInfo.hKernelMemInfo);
++ }
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psMapMemInfoMemOUT->eError, psPerProc);
++
++ return 0;
++}
++
++
++
++static IMG_INT
++MMU_GetPDDevPAddrBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_GETMMU_PD_DEVPADDR *psGetMmuPDDevPAddrIN,
++ PVRSRV_BRIDGE_OUT_GETMMU_PD_DEVPADDR *psGetMmuPDDevPAddrOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevMemContextInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GETMMU_PD_DEVPADDR);
++
++ psGetMmuPDDevPAddrOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemContextInt,
++ psGetMmuPDDevPAddrIN->hDevMemContext,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
++ if(psGetMmuPDDevPAddrOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psGetMmuPDDevPAddrOUT->sPDDevPAddr =
++ BM_GetDeviceNode(hDevMemContextInt)->pfnMMUGetPDDevPAddr(BM_GetMMUContextFromMemContext(hDevMemContextInt));
++ if(psGetMmuPDDevPAddrOUT->sPDDevPAddr.uiAddr)
++ {
++ psGetMmuPDDevPAddrOUT->eError = PVRSRV_OK;
++ }
++ else
++ {
++ psGetMmuPDDevPAddrOUT->eError = PVRSRV_ERROR_GENERIC;
++ }
++ return 0;
++}
++
++
++
++IMG_INT
++DummyBW(IMG_UINT32 ui32BridgeID,
++ IMG_VOID *psBridgeIn,
++ IMG_VOID *psBridgeOut,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++#if !defined(DEBUG)
++ PVR_UNREFERENCED_PARAMETER(ui32BridgeID);
++#endif
++ PVR_UNREFERENCED_PARAMETER(psBridgeIn);
++ PVR_UNREFERENCED_PARAMETER(psBridgeOut);
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++#if defined(DEBUG_BRIDGE_KM)
++ PVR_DPF((PVR_DBG_ERROR, "%s: BRIDGE ERROR: BridgeID %lu (%s) mapped to "
++ "Dummy Wrapper (probably not what you want!)",
++ __FUNCTION__, ui32BridgeID, g_BridgeDispatchTable[ui32BridgeID].pszIOCName));
++#else
++ PVR_DPF((PVR_DBG_ERROR, "%s: BRIDGE ERROR: BridgeID %lu mapped to "
++ "Dummy Wrapper (probably not what you want!)",
++ __FUNCTION__, ui32BridgeID));
++#endif
++ return -ENOTTY;
++}
++
++
++IMG_VOID
++_SetDispatchTableEntry(IMG_UINT32 ui32Index,
++ const IMG_CHAR *pszIOCName,
++ BridgeWrapperFunction pfFunction,
++ const IMG_CHAR *pszFunctionName)
++{
++ static IMG_UINT32 ui32PrevIndex = ~0UL;
++#if !defined(DEBUG)
++ PVR_UNREFERENCED_PARAMETER(pszIOCName);
++#endif
++#if !defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE) && !defined(DEBUG_BRIDGE_KM)
++ PVR_UNREFERENCED_PARAMETER(pszFunctionName);
++#endif
++
++#if defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE)
++
++ PVR_DPF((PVR_DBG_WARNING, "%s: %d %s %s", __FUNCTION__, ui32Index, pszIOCName, pszFunctionName));
++#endif
++
++
++ if(g_BridgeDispatchTable[ui32Index].pfFunction)
++ {
++#if defined(DEBUG_BRIDGE_KM)
++ PVR_DPF((PVR_DBG_ERROR,
++ "%s: BUG!: Adding dispatch table entry for %s clobbers an existing entry for %s",
++ __FUNCTION__, pszIOCName, g_BridgeDispatchTable[ui32Index].pszIOCName));
++#else
++ PVR_DPF((PVR_DBG_ERROR,
++ "%s: BUG!: Adding dispatch table entry for %s clobbers an existing entry (index=%lu)",
++ __FUNCTION__, pszIOCName, ui32Index));
++#endif
++ PVR_DPF((PVR_DBG_ERROR, "NOTE: Enabling DEBUG_BRIDGE_KM_DISPATCH_TABLE may help debug this issue.",
++ __FUNCTION__));
++ }
++
++
++ if((ui32PrevIndex != ~0UL) &&
++ ((ui32Index >= ui32PrevIndex + DISPATCH_TABLE_GAP_THRESHOLD) ||
++ (ui32Index <= ui32PrevIndex)))
++ {
++#if defined(DEBUG_BRIDGE_KM)
++ PVR_DPF((PVR_DBG_WARNING,
++ "%s: There is a gap in the dispatch table between indices %lu (%s) and %lu (%s)",
++ __FUNCTION__, ui32PrevIndex, g_BridgeDispatchTable[ui32PrevIndex].pszIOCName,
++ ui32Index, pszIOCName));
++#else
++ PVR_DPF((PVR_DBG_WARNING,
++ "%s: There is a gap in the dispatch table between indices %u and %u (%s)",
++ __FUNCTION__, (IMG_UINT)ui32PrevIndex, (IMG_UINT)ui32Index, pszIOCName));
++#endif
++ PVR_DPF((PVR_DBG_ERROR, "NOTE: Enabling DEBUG_BRIDGE_KM_DISPATCH_TABLE may help debug this issue.",
++ __FUNCTION__));
++ }
++
++ g_BridgeDispatchTable[ui32Index].pfFunction = pfFunction;
++#if defined(DEBUG_BRIDGE_KM)
++ g_BridgeDispatchTable[ui32Index].pszIOCName = pszIOCName;
++ g_BridgeDispatchTable[ui32Index].pszFunctionName = pszFunctionName;
++ g_BridgeDispatchTable[ui32Index].ui32CallCount = 0;
++ g_BridgeDispatchTable[ui32Index].ui32CopyFromUserTotalBytes = 0;
++#endif
++
++ ui32PrevIndex = ui32Index;
++}
++
++static IMG_INT
++PVRSRVInitSrvConnectBW(IMG_UINT32 ui32BridgeID,
++ IMG_VOID *psBridgeIn,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVR_UNREFERENCED_PARAMETER(psBridgeIn);
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_INITSRV_CONNECT);
++ PVR_UNREFERENCED_PARAMETER(psBridgeIn);
++
++ if(!OSProcHasPrivSrvInit() || PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_RUNNING) || PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_RAN))
++ {
++ psRetOUT->eError = PVRSRV_ERROR_GENERIC;
++ return 0;
++ }
++
++#if defined (__linux__)
++ PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_RUNNING, IMG_TRUE);
++#endif
++ psPerProc->bInitProcess = IMG_TRUE;
++
++ psRetOUT->eError = PVRSRV_OK;
++
++ return 0;
++}
++
++
++static IMG_INT
++PVRSRVInitSrvDisconnectBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_INITSRV_DISCONNECT *psInitSrvDisconnectIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_INITSRV_DISCONNECT);
++
++ if(!psPerProc->bInitProcess)
++ {
++ psRetOUT->eError = PVRSRV_ERROR_GENERIC;
++ return 0;
++ }
++
++ psPerProc->bInitProcess = IMG_FALSE;
++
++ PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_RUNNING, IMG_FALSE);
++ PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_RAN, IMG_TRUE);
++
++ psRetOUT->eError = PVRSRVFinaliseSystem(psInitSrvDisconnectIN->bInitSuccesful);
++
++ PVRSRVSetInitServerState( PVRSRV_INIT_SERVER_SUCCESSFUL,
++ (((psRetOUT->eError == PVRSRV_OK) && (psInitSrvDisconnectIN->bInitSuccesful)))
++ ? IMG_TRUE : IMG_FALSE);
++
++ return 0;
++}
++
++
++static IMG_INT
++PVRSRVEventObjectWaitBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_EVENT_OBJECT_WAIT *psEventObjectWaitIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hOSEventKM;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_EVENT_OBJECT_WAIT);
++
++ psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hOSEventKM,
++ psEventObjectWaitIN->hOSEventKM,
++ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError = OSEventObjectWait(hOSEventKM);
++
++ return 0;
++}
++
++
++static IMG_INT
++PVRSRVEventObjectOpenBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_EVENT_OBJECT_OPEN *psEventObjectOpenIN,
++ PVRSRV_BRIDGE_OUT_EVENT_OBJECT_OPEN *psEventObjectOpenOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_EVENT_OBJECT_OPEN);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psEventObjectOpenOUT->eError, psPerProc, 1);
++
++ psEventObjectOpenOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psEventObjectOpenIN->sEventObject.hOSEventKM,
++ psEventObjectOpenIN->sEventObject.hOSEventKM,
++ PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT);
++
++ if(psEventObjectOpenOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psEventObjectOpenOUT->eError = OSEventObjectOpen(&psEventObjectOpenIN->sEventObject, &psEventObjectOpenOUT->hOSEvent);
++
++ if(psEventObjectOpenOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psEventObjectOpenOUT->hOSEvent,
++ psEventObjectOpenOUT->hOSEvent,
++ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT,
++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI);
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psEventObjectOpenOUT->eError, psPerProc);
++
++ return 0;
++}
++
++
++static IMG_INT
++PVRSRVEventObjectCloseBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_EVENT_OBJECT_CLOSE *psEventObjectCloseIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hOSEventKM;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_EVENT_OBJECT_CLOSE);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psEventObjectCloseIN->sEventObject.hOSEventKM,
++ psEventObjectCloseIN->sEventObject.hOSEventKM,
++ PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ &hOSEventKM,
++ psEventObjectCloseIN->hOSEventKM,
++ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError = OSEventObjectClose(&psEventObjectCloseIN->sEventObject, hOSEventKM);
++
++ return 0;
++}
++
++
++typedef struct _MODIFY_SYNC_OP_INFO
++{
++ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++ IMG_UINT32 ui32ModifyFlags;
++ IMG_UINT32 ui32ReadOpsPendingSnapShot;
++ IMG_UINT32 ui32WriteOpsPendingSnapShot;
++} MODIFY_SYNC_OP_INFO;
++
++
++static PVRSRV_ERROR ModifyCompleteSyncOpsCallBack(IMG_PVOID pvParam,
++ IMG_UINT32 ui32Param)
++{
++ MODIFY_SYNC_OP_INFO *psModSyncOpInfo;
++ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++ if (!pvParam)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "ModifyCompleteSyncOpsCallBack: invalid parameter"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psModSyncOpInfo = (MODIFY_SYNC_OP_INFO*)pvParam;
++ psKernelSyncInfo = psModSyncOpInfo->psKernelSyncInfo;
++
++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
++ {
++ if((psModSyncOpInfo->ui32WriteOpsPendingSnapShot == psKernelSyncInfo->psSyncData->ui32WriteOpsComplete)
++ && (psModSyncOpInfo->ui32ReadOpsPendingSnapShot == psKernelSyncInfo->psSyncData->ui32ReadOpsComplete))
++ {
++ goto OpFlushedComplete;
++ }
++ PVR_DPF((PVR_DBG_ERROR, "ModifyCompleteSyncOpsCallBack: waiting for old Ops to flush"));
++ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
++ } END_LOOP_UNTIL_TIMEOUT();
++
++ PVR_DPF((PVR_DBG_ERROR, "ModifyCompleteSyncOpsCallBack: waiting for old Ops to flush timed out"));
++
++ return PVRSRV_ERROR_TIMEOUT;
++
++OpFlushedComplete:
++
++
++ if(psModSyncOpInfo->ui32ModifyFlags & PVRSRV_MODIFYSYNCOPS_FLAGS_WO_INC)
++ {
++ psKernelSyncInfo->psSyncData->ui32WriteOpsComplete++;
++ }
++
++
++ if(psModSyncOpInfo->ui32ModifyFlags & PVRSRV_MODIFYSYNCOPS_FLAGS_RO_INC)
++ {
++ psKernelSyncInfo->psSyncData->ui32ReadOpsComplete++;
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(MODIFY_SYNC_OP_INFO), (IMG_VOID *)psModSyncOpInfo, 0);
++
++
++
++ PVRSRVCommandCompleteCallbacks();
++
++ return PVRSRV_OK;
++}
++
++
++static IMG_INT
++PVRSRVModifyPendingSyncOpsBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_MODIFY_PENDING_SYNC_OPS *psModifySyncOpsIN,
++ PVRSRV_BRIDGE_OUT_MODIFY_PENDING_SYNC_OPS *psModifySyncOpsOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hKernelSyncInfo;
++ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++ MODIFY_SYNC_OP_INFO *psModSyncOpInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_MODIFY_PENDING_SYNC_OPS);
++
++ psModifySyncOpsOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hKernelSyncInfo,
++ psModifySyncOpsIN->hKernelSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if (psModifySyncOpsOUT->eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVModifyPendingSyncOpsBW: PVRSRVLookupHandle failed"));
++ return 0;
++ }
++
++ psKernelSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)hKernelSyncInfo;
++
++ if(psKernelSyncInfo->hResItem != IMG_NULL)
++ {
++
++ psModifySyncOpsOUT->eError = PVRSRV_ERROR_RETRY;
++ return 0;
++ }
++
++ ASSIGN_AND_EXIT_ON_ERROR(psModifySyncOpsOUT->eError,
++ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(MODIFY_SYNC_OP_INFO),
++ (IMG_VOID **)&psModSyncOpInfo, 0,
++ "ModSyncOpInfo (MODIFY_SYNC_OP_INFO)"));
++
++
++ psModSyncOpInfo->psKernelSyncInfo = psKernelSyncInfo;
++ psModSyncOpInfo->ui32ModifyFlags = psModifySyncOpsIN->ui32ModifyFlags;
++ psModSyncOpInfo->ui32ReadOpsPendingSnapShot = psKernelSyncInfo->psSyncData->ui32ReadOpsPending;
++ psModSyncOpInfo->ui32WriteOpsPendingSnapShot = psKernelSyncInfo->psSyncData->ui32WriteOpsPending;
++
++
++
++ psModifySyncOpsOUT->ui32ReadOpsPending = psKernelSyncInfo->psSyncData->ui32ReadOpsPending;
++ psModifySyncOpsOUT->ui32WriteOpsPending = psKernelSyncInfo->psSyncData->ui32WriteOpsPending;
++
++ if(psModifySyncOpsIN->ui32ModifyFlags & PVRSRV_MODIFYSYNCOPS_FLAGS_WO_INC)
++ {
++ psKernelSyncInfo->psSyncData->ui32WriteOpsPending++;
++ }
++
++ if(psModifySyncOpsIN->ui32ModifyFlags & PVRSRV_MODIFYSYNCOPS_FLAGS_RO_INC)
++ {
++ psKernelSyncInfo->psSyncData->ui32ReadOpsPending++;
++ }
++
++ psKernelSyncInfo->hResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_MODIFY_SYNC_OPS,
++ psModSyncOpInfo,
++ 0,
++ ModifyCompleteSyncOpsCallBack);
++ return 0;
++}
++
++
++static IMG_INT
++PVRSRVModifyCompleteSyncOpsBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_MODIFY_COMPLETE_SYNC_OPS *psModifySyncOpsIN,
++ PVRSRV_BRIDGE_RETURN *psModifySyncOpsOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_MODIFY_COMPLETE_SYNC_OPS);
++
++ psModifySyncOpsOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ (IMG_VOID**)&psKernelSyncInfo,
++ psModifySyncOpsIN->hKernelSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if (psModifySyncOpsOUT->eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVModifyCompleteSyncOpsBW: PVRSRVLookupHandle failed"));
++ return 0;
++ }
++
++ if(psKernelSyncInfo->hResItem == IMG_NULL)
++ {
++
++ psModifySyncOpsOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++ return 0;
++ }
++
++
++
++
++
++
++
++
++
++
++ eError = ResManFreeResByPtr(psKernelSyncInfo->hResItem);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVModifyCompleteSyncOpsBW: ResManFreeResByPtr failed"));
++ return 0;
++ }
++
++ psKernelSyncInfo->hResItem = IMG_NULL;
++
++ return 0;
++}
++
++
++PVRSRV_ERROR
++CommonBridgeInit(IMG_VOID)
++{
++ IMG_UINT32 i;
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_ENUM_DEVICES, PVRSRVEnumerateDevicesBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_ACQUIRE_DEVICEINFO, PVRSRVAcquireDeviceDataBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_RELEASE_DEVICEINFO, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_CREATE_DEVMEMCONTEXT, PVRSRVCreateDeviceMemContextBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_DESTROY_DEVMEMCONTEXT, PVRSRVDestroyDeviceMemContextBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DEVMEM_HEAPINFO, PVRSRVGetDeviceMemHeapInfoBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_ALLOC_DEVICEMEM, PVRSRVAllocDeviceMemBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_FREE_DEVICEMEM, PVRSRVFreeDeviceMemBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GETFREE_DEVICEMEM, PVRSRVGetFreeDeviceMemBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_CREATE_COMMANDQUEUE, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_DESTROY_COMMANDQUEUE, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_MHANDLE_TO_MMAP_DATA, PVRMMapOSMemHandleToMMapDataBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_CONNECT_SERVICES, PVRSRVConnectBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_DISCONNECT_SERVICES, PVRSRVDisconnectBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_WRAP_DEVICE_MEM, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DEVICEMEMINFO, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_RESERVE_DEV_VIRTMEM , DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_FREE_DEV_VIRTMEM, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_EXT_MEMORY, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAP_EXT_MEMORY, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_DEV_MEMORY, PVRSRVMapDeviceMemoryBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAP_DEV_MEMORY, PVRSRVUnmapDeviceMemoryBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY, PVRSRVMapDeviceClassMemoryBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAP_DEVICECLASS_MEMORY, PVRSRVUnmapDeviceClassMemoryBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_MEM_INFO_TO_USER, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAP_MEM_INFO_FROM_USER, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_EXPORT_DEVICEMEM, PVRSRVExportDeviceMemBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_RELEASE_MMAP_DATA, PVRMMapReleaseMMapDataBW);
++
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PROCESS_SIMISR_EVENT, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_REGISTER_SIM_PROCESS, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_UNREGISTER_SIM_PROCESS, DummyBW);
++
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_MAPPHYSTOUSERSPACE, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAPPHYSTOUSERSPACE, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GETPHYSTOUSERSPACEMAP, DummyBW);
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_FB_STATS, DummyBW);
++
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_MISC_INFO, PVRSRVGetMiscInfoBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_RELEASE_MISC_INFO, DummyBW);
++
++
++#if defined (SUPPORT_OVERLAY_ROTATE_BLIT)
++ SetDispatchTableEntry(PVRSRV_BRIDGE_INIT_3D_OVL_BLT_RES, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_DEINIT_3D_OVL_BLT_RES, DummyBW);
++#endif
++
++
++
++#if defined(PDUMP)
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_INIT, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_MEMPOL, PDumpMemPolBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPMEM, PDumpMemBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_REG, PDumpRegWithFlagsBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_REGPOL, PDumpRegPolBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_COMMENT, PDumpCommentBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_SETFRAME, PDumpSetFrameBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_ISCAPTURING, PDumpIsCaptureFrameBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPBITMAP, PDumpBitmapBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPREADREG, PDumpReadRegBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_SYNCPOL, PDumpSyncPolBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPSYNC, PDumpSyncDumpBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DRIVERINFO, PDumpDriverInfoBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_PDREG, PDumpPDRegBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPPDDEVPADDR, PDumpPDDevPAddrBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_CYCLE_COUNT_REG_READ, PDumpCycleCountRegReadBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_STARTINITPHASE, PDumpStartInitPhaseBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_STOPINITPHASE, PDumpStopInitPhaseBW);
++#endif
++
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_OEMJTABLE, DummyBW);
++
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_ENUM_CLASS, PVRSRVEnumerateDCBW);
++
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_OPEN_DISPCLASS_DEVICE, PVRSRVOpenDCDeviceBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_CLOSE_DISPCLASS_DEVICE, PVRSRVCloseDCDeviceBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_ENUM_DISPCLASS_FORMATS, PVRSRVEnumDCFormatsBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_ENUM_DISPCLASS_DIMS, PVRSRVEnumDCDimsBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DISPCLASS_SYSBUFFER, PVRSRVGetDCSystemBufferBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DISPCLASS_INFO, PVRSRVGetDCInfoBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_CREATE_DISPCLASS_SWAPCHAIN, PVRSRVCreateDCSwapChainBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_DESTROY_DISPCLASS_SWAPCHAIN, PVRSRVDestroyDCSwapChainBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SET_DISPCLASS_DSTRECT, PVRSRVSetDCDstRectBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SET_DISPCLASS_SRCRECT, PVRSRVSetDCSrcRectBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SET_DISPCLASS_DSTCOLOURKEY, PVRSRVSetDCDstColourKeyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SET_DISPCLASS_SRCCOLOURKEY, PVRSRVSetDCSrcColourKeyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DISPCLASS_BUFFERS, PVRSRVGetDCBuffersBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_BUFFER, PVRSRVSwapToDCBufferBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_SYSTEM, PVRSRVSwapToDCSystemBW);
++
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_OPEN_BUFFERCLASS_DEVICE, PVRSRVOpenBCDeviceBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_CLOSE_BUFFERCLASS_DEVICE, PVRSRVCloseBCDeviceBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_BUFFERCLASS_INFO, PVRSRVGetBCInfoBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_BUFFERCLASS_BUFFER, PVRSRVGetBCBufferBW);
++
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_WRAP_EXT_MEMORY, PVRSRVWrapExtMemoryBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_UNWRAP_EXT_MEMORY, PVRSRVUnwrapExtMemoryBW);
++
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_ALLOC_SHARED_SYS_MEM, PVRSRVAllocSharedSysMemoryBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_FREE_SHARED_SYS_MEM, PVRSRVFreeSharedSysMemoryBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_MEMINFO_MEM, PVRSRVMapMemInfoMemBW);
++
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GETMMU_PD_DEVPADDR, MMU_GetPDDevPAddrBW);
++
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_INITSRV_CONNECT, PVRSRVInitSrvConnectBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_INITSRV_DISCONNECT, PVRSRVInitSrvDisconnectBW);
++
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_EVENT_OBJECT_WAIT, PVRSRVEventObjectWaitBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_EVENT_OBJECT_OPEN, PVRSRVEventObjectOpenBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_EVENT_OBJECT_CLOSE, PVRSRVEventObjectCloseBW);
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_MODIFY_PENDING_SYNC_OPS, PVRSRVModifyPendingSyncOpsBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_MODIFY_COMPLETE_SYNC_OPS, PVRSRVModifyCompleteSyncOpsBW);
++
++#if defined (SUPPORT_SGX)
++ SetSGXDispatchTableEntry();
++#endif
++#if defined (SUPPORT_VGX)
++ SetVGXDispatchTableEntry();
++#endif
++#if defined (SUPPORT_MSVDX)
++ SetMSVDXDispatchTableEntry();
++#endif
++
++
++
++
++ for(i=0;i<BRIDGE_DISPATCH_TABLE_ENTRY_COUNT;i++)
++ {
++ if(!g_BridgeDispatchTable[i].pfFunction)
++ {
++ g_BridgeDispatchTable[i].pfFunction = DummyBW;
++#if defined(DEBUG_BRIDGE_KM)
++ g_BridgeDispatchTable[i].pszIOCName = "_PVRSRV_BRIDGE_DUMMY";
++ g_BridgeDispatchTable[i].pszFunctionName = "DummyBW";
++ g_BridgeDispatchTable[i].ui32CallCount = 0;
++ g_BridgeDispatchTable[i].ui32CopyFromUserTotalBytes = 0;
++ g_BridgeDispatchTable[i].ui32CopyToUserTotalBytes = 0;
++#endif
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
++
++IMG_INT BridgedDispatchKM(PVRSRV_PER_PROCESS_DATA * psPerProc,
++ PVRSRV_BRIDGE_PACKAGE * psBridgePackageKM)
++{
++
++ IMG_VOID * psBridgeIn;
++ IMG_VOID * psBridgeOut;
++ BridgeWrapperFunction pfBridgeHandler;
++ IMG_UINT32 ui32BridgeID = psBridgePackageKM->ui32BridgeID;
++ IMG_INT err = -EFAULT;
++
++#if defined(DEBUG_TRACE_BRIDGE_KM)
++ PVR_DPF((PVR_DBG_ERROR, "%s: %s",
++ __FUNCTION__,
++ g_BridgeDispatchTable[ui32BridgeID].pszIOCName));
++#endif
++
++#if defined(DEBUG_BRIDGE_KM)
++ g_BridgeDispatchTable[ui32BridgeID].ui32CallCount++;
++ g_BridgeGlobalStats.ui32IOCTLCount++;
++#endif
++
++ if(!psPerProc->bInitProcess)
++ {
++ if(PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_RAN))
++ {
++ if(!PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_SUCCESSFUL))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: Initialisation failed. Driver unusable.",
++ __FUNCTION__));
++ goto return_fault;
++ }
++ }
++ else
++ {
++ if(PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_RUNNING))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: Initialisation is in progress",
++ __FUNCTION__));
++ goto return_fault;
++ }
++ else
++ {
++
++ switch(ui32BridgeID)
++ {
++ case PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_CONNECT_SERVICES):
++ case PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_DISCONNECT_SERVICES):
++ case PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_INITSRV_CONNECT):
++ case PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_INITSRV_DISCONNECT):
++ break;
++ default:
++ PVR_DPF((PVR_DBG_ERROR, "%s: Driver initialisation not completed yet.",
++ __FUNCTION__));
++ goto return_fault;
++ }
++ }
++ }
++ }
++
++
++
++#if defined(__linux__)
++ {
++
++ SYS_DATA *psSysData;
++
++ SysAcquireData(&psSysData);
++
++
++ psBridgeIn = ((ENV_DATA *)psSysData->pvEnvSpecificData)->pvBridgeData;
++ psBridgeOut = (IMG_PVOID)((IMG_PBYTE)psBridgeIn + PVRSRV_MAX_BRIDGE_IN_SIZE);
++
++ if(psBridgePackageKM->ui32InBufferSize > 0)
++ {
++ if(!OSAccessOK(PVR_VERIFY_READ,
++ psBridgePackageKM->pvParamIn,
++ psBridgePackageKM->ui32InBufferSize))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid pvParamIn pointer", __FUNCTION__));
++ }
++
++ if(CopyFromUserWrapper(psPerProc,
++ ui32BridgeID,
++ psBridgeIn,
++ psBridgePackageKM->pvParamIn,
++ psBridgePackageKM->ui32InBufferSize)
++ != PVRSRV_OK)
++ {
++ goto return_fault;
++ }
++ }
++ }
++#else
++ psBridgeIn = psBridgePackageKM->pvParamIn;
++ psBridgeOut = psBridgePackageKM->pvParamOut;
++#endif
++
++ if(ui32BridgeID >= (BRIDGE_DISPATCH_TABLE_ENTRY_COUNT))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: ui32BridgeID = %d is out if range!",
++ __FUNCTION__, ui32BridgeID));
++ goto return_fault;
++ }
++ pfBridgeHandler =
++ (BridgeWrapperFunction)g_BridgeDispatchTable[ui32BridgeID].pfFunction;
++ err = pfBridgeHandler(ui32BridgeID,
++ psBridgeIn,
++ psBridgeOut,
++ psPerProc);
++ if(err < 0)
++ {
++ goto return_fault;
++ }
++
++
++#if defined(__linux__)
++
++ if(CopyToUserWrapper(psPerProc,
++ ui32BridgeID,
++ psBridgePackageKM->pvParamOut,
++ psBridgeOut,
++ psBridgePackageKM->ui32OutBufferSize)
++ != PVRSRV_OK)
++ {
++ goto return_fault;
++ }
++#endif
++
++ err = 0;
++return_fault:
++ ReleaseHandleBatch(psPerProc);
++ return err;
++}
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/bridged/bridged_pvr_bridge.h
+@@ -0,0 +1,231 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __BRIDGED_PVR_BRIDGE_H__
++#define __BRIDGED_PVR_BRIDGE_H__
++
++#include "pvr_bridge.h"
++
++#if defined(__cplusplus)
++extern "C" {
++#endif
++
++#if defined(__linux__)
++#define PVRSRV_GET_BRIDGE_ID(X) _IOC_NR(X)
++#else
++#define PVRSRV_GET_BRIDGE_ID(X) (X - PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST))
++#endif
++
++#ifndef ENOMEM
++#define ENOMEM 12
++#endif
++#ifndef EFAULT
++#define EFAULT 14
++#endif
++#ifndef ENOTTY
++#define ENOTTY 25
++#endif
++
++#if defined(DEBUG_BRIDGE_KM)
++PVRSRV_ERROR
++CopyFromUserWrapper(PVRSRV_PER_PROCESS_DATA *pProcData,
++ IMG_UINT32 ui32BridgeID,
++ IMG_VOID *pvDest,
++ IMG_VOID *pvSrc,
++ IMG_UINT32 ui32Size);
++PVRSRV_ERROR
++CopyToUserWrapper(PVRSRV_PER_PROCESS_DATA *pProcData,
++ IMG_UINT32 ui32BridgeID,
++ IMG_VOID *pvDest,
++ IMG_VOID *pvSrc,
++ IMG_UINT32 ui32Size);
++#else
++#define CopyFromUserWrapper(pProcData, ui32BridgeID, pvDest, pvSrc, ui32Size) \
++ OSCopyFromUser(pProcData, pvDest, pvSrc, ui32Size)
++#define CopyToUserWrapper(pProcData, ui32BridgeID, pvDest, pvSrc, ui32Size) \
++ OSCopyToUser(pProcData, pvDest, pvSrc, ui32Size)
++#endif
++
++
++#define ASSIGN_AND_RETURN_ON_ERROR(error, src, res) \
++ do \
++ { \
++ (error) = (src); \
++ if ((error) != PVRSRV_OK) \
++ { \
++ return (res); \
++ } \
++ } while (error != PVRSRV_OK)
++
++#define ASSIGN_AND_EXIT_ON_ERROR(error, src) \
++ ASSIGN_AND_RETURN_ON_ERROR(error, src, 0)
++
++#if defined (PVR_SECURE_HANDLES)
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(NewHandleBatch)
++#endif
++static INLINE PVRSRV_ERROR
++NewHandleBatch(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_UINT32 ui32BatchSize)
++{
++ PVRSRV_ERROR eError;
++
++ PVR_ASSERT(!psPerProc->bHandlesBatched);
++
++ eError = PVRSRVNewHandleBatch(psPerProc->psHandleBase, ui32BatchSize);
++
++ if (eError == PVRSRV_OK)
++ {
++ psPerProc->bHandlesBatched = IMG_TRUE;
++ }
++
++ return eError;
++}
++
++#define NEW_HANDLE_BATCH_OR_ERROR(error, psPerProc, ui32BatchSize) \
++ ASSIGN_AND_EXIT_ON_ERROR(error, NewHandleBatch(psPerProc, ui32BatchSize))
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(CommitHandleBatch)
++#endif
++static INLINE PVRSRV_ERROR
++CommitHandleBatch(PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVR_ASSERT(psPerProc->bHandlesBatched);
++
++ psPerProc->bHandlesBatched = IMG_FALSE;
++
++ return PVRSRVCommitHandleBatch(psPerProc->psHandleBase);
++}
++
++
++#define COMMIT_HANDLE_BATCH_OR_ERROR(error, psPerProc) \
++ ASSIGN_AND_EXIT_ON_ERROR(error, CommitHandleBatch(psPerProc))
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(ReleaseHandleBatch)
++#endif
++static INLINE IMG_VOID
++ReleaseHandleBatch(PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ if (psPerProc->bHandlesBatched)
++ {
++ psPerProc->bHandlesBatched = IMG_FALSE;
++
++ PVRSRVReleaseHandleBatch(psPerProc->psHandleBase);
++ }
++}
++#else
++#define NEW_HANDLE_BATCH_OR_ERROR(error, psPerProc, ui32BatchSize)
++#define COMMIT_HANDLE_BATCH_OR_ERROR(error, psPerProc)
++#define ReleaseHandleBatch(psPerProc)
++#endif
++
++IMG_INT
++DummyBW(IMG_UINT32 ui32BridgeID,
++ IMG_VOID *psBridgeIn,
++ IMG_VOID *psBridgeOut,
++ PVRSRV_PER_PROCESS_DATA *psPerProc);
++
++typedef IMG_INT (*BridgeWrapperFunction)(IMG_UINT32 ui32BridgeID,
++ IMG_VOID *psBridgeIn,
++ IMG_VOID *psBridgeOut,
++ PVRSRV_PER_PROCESS_DATA *psPerProc);
++
++typedef struct _PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY
++{
++ BridgeWrapperFunction pfFunction;
++#if defined(DEBUG_BRIDGE_KM)
++ const IMG_CHAR *pszIOCName;
++ const IMG_CHAR *pszFunctionName;
++ IMG_UINT32 ui32CallCount;
++ IMG_UINT32 ui32CopyFromUserTotalBytes;
++ IMG_UINT32 ui32CopyToUserTotalBytes;
++#endif
++}PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY;
++
++#if defined(SUPPORT_VGX) || defined(SUPPORT_MSVDX)
++ #if defined(SUPPORT_VGX)
++ #define BRIDGE_DISPATCH_TABLE_ENTRY_COUNT (PVRSRV_BRIDGE_LAST_VGX_CMD+1)
++ #define PVRSRV_BRIDGE_LAST_DEVICE_CMD PVRSRV_BRIDGE_LAST_VGX_CMD
++ #else
++ #define BRIDGE_DISPATCH_TABLE_ENTRY_COUNT (PVRSRV_BRIDGE_LAST_MSVDX_CMD+1)
++ #define PVRSRV_BRIDGE_LAST_DEVICE_CMD PVRSRV_BRIDGE_LAST_MSVDX_CMD
++ #endif
++#else
++ #if defined(SUPPORT_SGX)
++ #define BRIDGE_DISPATCH_TABLE_ENTRY_COUNT (PVRSRV_BRIDGE_LAST_SGX_CMD+1)
++ #define PVRSRV_BRIDGE_LAST_DEVICE_CMD PVRSRV_BRIDGE_LAST_SGX_CMD
++ #else
++ #define BRIDGE_DISPATCH_TABLE_ENTRY_COUNT (PVRSRV_BRIDGE_LAST_NON_DEVICE_CMD+1)
++ #define PVRSRV_BRIDGE_LAST_DEVICE_CMD PVRSRV_BRIDGE_LAST_NON_DEVICE_CMD
++ #endif
++#endif
++
++extern PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY g_BridgeDispatchTable[BRIDGE_DISPATCH_TABLE_ENTRY_COUNT];
++
++IMG_VOID
++_SetDispatchTableEntry(IMG_UINT32 ui32Index,
++ const IMG_CHAR *pszIOCName,
++ BridgeWrapperFunction pfFunction,
++ const IMG_CHAR *pszFunctionName);
++
++
++#define SetDispatchTableEntry(ui32Index, pfFunction) \
++ _SetDispatchTableEntry(PVRSRV_GET_BRIDGE_ID(ui32Index), #ui32Index, (BridgeWrapperFunction)pfFunction, #pfFunction)
++
++#define DISPATCH_TABLE_GAP_THRESHOLD 5
++
++#if defined(DEBUG)
++#define PVRSRV_BRIDGE_ASSERT_CMD(X, Y) PVR_ASSERT(X == PVRSRV_GET_BRIDGE_ID(Y))
++#else
++#define PVRSRV_BRIDGE_ASSERT_CMD(X, Y) PVR_UNREFERENCED_PARAMETER(X)
++#endif
++
++
++#if defined(DEBUG_BRIDGE_KM)
++typedef struct _PVRSRV_BRIDGE_GLOBAL_STATS
++{
++ IMG_UINT32 ui32IOCTLCount;
++ IMG_UINT32 ui32TotalCopyFromUserBytes;
++ IMG_UINT32 ui32TotalCopyToUserBytes;
++}PVRSRV_BRIDGE_GLOBAL_STATS;
++
++extern PVRSRV_BRIDGE_GLOBAL_STATS g_BridgeGlobalStats;
++#endif
++
++
++PVRSRV_ERROR CommonBridgeInit(IMG_VOID);
++
++IMG_INT BridgedDispatchKM(PVRSRV_PER_PROCESS_DATA * psPerProc,
++ PVRSRV_BRIDGE_PACKAGE * psBridgePackageKM);
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/bridged/bridged_support.c
+@@ -0,0 +1,85 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "img_defs.h"
++#include "servicesint.h"
++#include "bridged_support.h"
++
++
++PVRSRV_ERROR
++PVRSRVLookupOSMemHandle(PVRSRV_HANDLE_BASE *psHandleBase, IMG_HANDLE *phOSMemHandle, IMG_HANDLE hMHandle)
++{
++ IMG_HANDLE hMHandleInt;
++ PVRSRV_HANDLE_TYPE eHandleType;
++ PVRSRV_ERROR eError;
++
++
++ eError = PVRSRVLookupHandleAnyType(psHandleBase, &hMHandleInt,
++ &eHandleType,
++ hMHandle);
++ if(eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++
++ switch(eHandleType)
++ {
++#if defined(PVR_SECURE_HANDLES)
++ case PVRSRV_HANDLE_TYPE_MEM_INFO:
++ case PVRSRV_HANDLE_TYPE_MEM_INFO_REF:
++ case PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO:
++ {
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo = (PVRSRV_KERNEL_MEM_INFO *)hMHandleInt;
++
++ *phOSMemHandle = psMemInfo->sMemBlk.hOSMemHandle;
++
++ break;
++ }
++ case PVRSRV_HANDLE_TYPE_SYNC_INFO:
++ {
++ PVRSRV_KERNEL_SYNC_INFO *psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)hMHandleInt;
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo = psSyncInfo->psSyncDataMemInfoKM;
++
++ *phOSMemHandle = psMemInfo->sMemBlk.hOSMemHandle;
++
++ break;
++ }
++ case PVRSRV_HANDLE_TYPE_SOC_TIMER:
++ {
++ *phOSMemHandle = (IMG_VOID *)hMHandleInt;
++ break;
++ }
++#else
++ case PVRSRV_HANDLE_TYPE_NONE:
++ *phOSMemHandle = (IMG_VOID *)hMHandleInt;
++ break;
++#endif
++ default:
++ return PVRSRV_ERROR_BAD_MAPPING;
++ }
++
++ return PVRSRV_OK;
++}
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/bridged/bridged_support.h
+@@ -0,0 +1,43 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __BRIDGED_SUPPORT_H__
++#define __BRIDGED_SUPPORT_H__
++
++#include "handle.h"
++
++#if defined(__cplusplus)
++extern "C" {
++#endif
++
++PVRSRV_ERROR PVRSRVLookupOSMemHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phOSMemHandle, IMG_HANDLE hMHandle);
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/bridged/sgx/bridged_sgx_bridge.c
+@@ -0,0 +1,2511 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++
++
++#include <stddef.h>
++
++#include "img_defs.h"
++
++#if defined(SUPPORT_SGX)
++
++#include "services.h"
++#include "pvr_debug.h"
++#include "pvr_bridge.h"
++#include "sgx_bridge.h"
++#include "perproc.h"
++#include "power.h"
++#include "pvr_bridge_km.h"
++#include "sgx_bridge_km.h"
++
++#if defined(SUPPORT_MSVDX)
++ #include "msvdx_bridge.h"
++#endif
++
++#include "bridged_pvr_bridge.h"
++#include "bridged_sgx_bridge.h"
++#include "sgxutils.h"
++#include "pdump_km.h"
++
++static IMG_INT
++SGXGetClientInfoBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_GETCLIENTINFO *psGetClientInfoIN,
++ PVRSRV_BRIDGE_OUT_GETCLIENTINFO *psGetClientInfoOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_GETCLIENTINFO);
++
++ psGetClientInfoOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psGetClientInfoIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psGetClientInfoOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psGetClientInfoOUT->eError =
++ SGXGetClientInfoKM(hDevCookieInt,
++ &psGetClientInfoOUT->sClientInfo);
++ return 0;
++}
++
++static IMG_INT
++SGXReleaseClientInfoBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_RELEASECLIENTINFO *psReleaseClientInfoIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_SGXDEV_INFO *psDevInfo;
++ IMG_HANDLE hDevCookieInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_RELEASECLIENTINFO);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psReleaseClientInfoIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookieInt)->pvDevice;
++
++ PVR_ASSERT(psDevInfo->ui32ClientRefCount > 0);
++
++ psDevInfo->ui32ClientRefCount--;
++
++ psRetOUT->eError = PVRSRV_OK;
++
++ return 0;
++}
++
++
++static IMG_INT
++SGXGetInternalDevInfoBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_GETINTERNALDEVINFO *psSGXGetInternalDevInfoIN,
++ PVRSRV_BRIDGE_OUT_GETINTERNALDEVINFO *psSGXGetInternalDevInfoOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_GETINTERNALDEVINFO);
++
++ psSGXGetInternalDevInfoOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psSGXGetInternalDevInfoIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psSGXGetInternalDevInfoOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psSGXGetInternalDevInfoOUT->eError =
++ SGXGetInternalDevInfoKM(hDevCookieInt,
++ &psSGXGetInternalDevInfoOUT->sSGXInternalDevInfo);
++
++
++ psSGXGetInternalDevInfoOUT->eError =
++ PVRSRVAllocHandle(psPerProc->psHandleBase,
++ &psSGXGetInternalDevInfoOUT->sSGXInternalDevInfo.hHostCtlKernelMemInfoHandle,
++ psSGXGetInternalDevInfoOUT->sSGXInternalDevInfo.hHostCtlKernelMemInfoHandle,
++ PVRSRV_HANDLE_TYPE_MEM_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
++
++ return 0;
++}
++
++
++static IMG_INT
++SGXDoKickBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_DOKICK *psDoKickIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ IMG_UINT32 i;
++ IMG_INT ret = 0;
++ IMG_UINT32 ui32NumDstSyncs;
++ IMG_HANDLE *phKernelSyncInfoHandles = IMG_NULL;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_DOKICK);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psDoKickIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.hCCBKernelMemInfo,
++ psDoKickIN->sCCBKick.hCCBKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ if(psDoKickIN->sCCBKick.hTA3DSyncInfo != IMG_NULL)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.hTA3DSyncInfo,
++ psDoKickIN->sCCBKick.hTA3DSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ if(psDoKickIN->sCCBKick.hTASyncInfo != IMG_NULL)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.hTASyncInfo,
++ psDoKickIN->sCCBKick.hTASyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ if(psDoKickIN->sCCBKick.h3DSyncInfo != IMG_NULL)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.h3DSyncInfo,
++ psDoKickIN->sCCBKick.h3DSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++
++#if defined(SUPPORT_SGX_GENERALISED_SYNCOBJECTS)
++
++ if (psDoKickIN->sCCBKick.ui32NumTASrcSyncs > SGX_MAX_TA_SRC_SYNCS)
++ {
++ psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++ return 0;
++ }
++
++ for(i=0; i<psDoKickIN->sCCBKick.ui32NumTASrcSyncs; i++)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.ahTASrcKernelSyncInfo[i],
++ psDoKickIN->sCCBKick.ahTASrcKernelSyncInfo[i],
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ if (psDoKickIN->sCCBKick.ui32NumTADstSyncs > SGX_MAX_TA_DST_SYNCS)
++ {
++ psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++ return 0;
++ }
++
++ for(i=0; i<psDoKickIN->sCCBKick.ui32NumTADstSyncs; i++)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.ahTADstKernelSyncInfo[i],
++ psDoKickIN->sCCBKick.ahTADstKernelSyncInfo[i],
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ if (psDoKickIN->sCCBKick.ui32Num3DSrcSyncs > SGX_MAX_3D_SRC_SYNCS)
++ {
++ psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++ return 0;
++ }
++
++ for(i=0; i<psDoKickIN->sCCBKick.ui32Num3DSrcSyncs; i++)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.ah3DSrcKernelSyncInfo[i],
++ psDoKickIN->sCCBKick.ah3DSrcKernelSyncInfo[i],
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++#else
++
++ if (psDoKickIN->sCCBKick.ui32NumSrcSyncs > SGX_MAX_SRC_SYNCS)
++ {
++ psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++ return 0;
++ }
++ for(i=0; i<psDoKickIN->sCCBKick.ui32NumSrcSyncs; i++)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.ahSrcKernelSyncInfo[i],
++ psDoKickIN->sCCBKick.ahSrcKernelSyncInfo[i],
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++#endif
++
++ if (psDoKickIN->sCCBKick.ui32NumTAStatusVals > SGX_MAX_TA_STATUS_VALS)
++ {
++ psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++ return 0;
++ }
++ for (i = 0; i < psDoKickIN->sCCBKick.ui32NumTAStatusVals; i++)
++ {
++ psRetOUT->eError =
++#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.asTAStatusUpdate[i].hKernelMemInfo,
++ psDoKickIN->sCCBKick.asTAStatusUpdate[i].hKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++#else
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.ahTAStatusSyncInfo[i],
++ psDoKickIN->sCCBKick.ahTAStatusSyncInfo[i],
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++#endif
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ if (psDoKickIN->sCCBKick.ui32Num3DStatusVals > SGX_MAX_3D_STATUS_VALS)
++ {
++ psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++ return 0;
++ }
++ for(i = 0; i < psDoKickIN->sCCBKick.ui32Num3DStatusVals; i++)
++ {
++ psRetOUT->eError =
++#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.as3DStatusUpdate[i].hKernelMemInfo,
++ psDoKickIN->sCCBKick.as3DStatusUpdate[i].hKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++#else
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.ah3DStatusSyncInfo[i],
++ psDoKickIN->sCCBKick.ah3DStatusSyncInfo[i],
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++#endif
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ ui32NumDstSyncs = psDoKickIN->sCCBKick.ui32NumDstSyncObjects;
++
++ if(ui32NumDstSyncs > 0)
++ {
++ if(!OSAccessOK(PVR_VERIFY_READ,
++ psDoKickIN->sCCBKick.pahDstSyncHandles,
++ ui32NumDstSyncs * sizeof(IMG_HANDLE)))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: SGXDoKickBW:"
++ " Invalid pasDstSyncHandles pointer", __FUNCTION__));
++ return -EFAULT;
++ }
++
++ psRetOUT->eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32NumDstSyncs * sizeof(IMG_HANDLE),
++ (IMG_VOID **)&phKernelSyncInfoHandles,
++ 0,
++ "Array of Synchronization Info Handles");
++ if (psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ if(CopyFromUserWrapper(psPerProc,
++ ui32BridgeID,
++ phKernelSyncInfoHandles,
++ psDoKickIN->sCCBKick.pahDstSyncHandles,
++ ui32NumDstSyncs * sizeof(IMG_HANDLE)) != PVRSRV_OK)
++ {
++ ret = -EFAULT;
++ goto PVRSRV_BRIDGE_SGX_DOKICK_RETURN_RESULT;
++ }
++
++
++ psDoKickIN->sCCBKick.pahDstSyncHandles = phKernelSyncInfoHandles;
++
++ for( i = 0; i < ui32NumDstSyncs; i++)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.pahDstSyncHandles[i],
++ psDoKickIN->sCCBKick.pahDstSyncHandles[i],
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ goto PVRSRV_BRIDGE_SGX_DOKICK_RETURN_RESULT;
++ }
++
++ }
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.hKernelHWSyncListMemInfo,
++ psDoKickIN->sCCBKick.hKernelHWSyncListMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ goto PVRSRV_BRIDGE_SGX_DOKICK_RETURN_RESULT;
++ }
++ }
++
++ psRetOUT->eError =
++ SGXDoKickKM(hDevCookieInt,
++ &psDoKickIN->sCCBKick);
++
++PVRSRV_BRIDGE_SGX_DOKICK_RETURN_RESULT:
++
++ if(phKernelSyncInfoHandles)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32NumDstSyncs * sizeof(IMG_HANDLE),
++ (IMG_VOID *)phKernelSyncInfoHandles,
++ 0);
++
++ }
++
++ return ret;
++}
++
++
++static IMG_INT
++SGXScheduleProcessQueuesBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGX_SCHEDULE_PROCESS_QUEUES *psScheduleProcQIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_SCHEDULE_PROCESS_QUEUES);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psScheduleProcQIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError = SGXScheduleProcessQueuesKM(hDevCookieInt);
++
++ return 0;
++}
++
++
++#if defined(TRANSFER_QUEUE)
++static IMG_INT
++SGXSubmitTransferBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SUBMITTRANSFER *psSubmitTransferIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ PVRSRV_TRANSFER_SGX_KICK *psKick;
++ IMG_UINT32 i;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_SUBMITTRANSFER);
++ PVR_UNREFERENCED_PARAMETER(ui32BridgeID);
++
++ psKick = &psSubmitTransferIN->sKick;
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psSubmitTransferIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psKick->hCCBMemInfo,
++ psKick->hCCBMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ if (psKick->hTASyncInfo != IMG_NULL)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psKick->hTASyncInfo,
++ psKick->hTASyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ if (psKick->h3DSyncInfo != IMG_NULL)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psKick->h3DSyncInfo,
++ psKick->h3DSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ if (psKick->ui32NumSrcSync > SGX_MAX_TRANSFER_SYNC_OPS)
++ {
++ psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++ return 0;
++ }
++ for (i = 0; i < psKick->ui32NumSrcSync; i++)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psKick->ahSrcSyncInfo[i],
++ psKick->ahSrcSyncInfo[i],
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ if (psKick->ui32NumDstSync > SGX_MAX_TRANSFER_SYNC_OPS)
++ {
++ psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++ return 0;
++ }
++ for (i = 0; i < psKick->ui32NumDstSync; i++)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psKick->ahDstSyncInfo[i],
++ psKick->ahDstSyncInfo[i],
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ psRetOUT->eError = SGXSubmitTransferKM(hDevCookieInt, psKick);
++
++ return 0;
++}
++
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++static IMG_INT
++SGXSubmit2DBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SUBMIT2D *psSubmit2DIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ PVRSRV_2D_SGX_KICK *psKick;
++ IMG_UINT32 i;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_SUBMIT2D);
++ PVR_UNREFERENCED_PARAMETER(ui32BridgeID);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psSubmit2DIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psKick = &psSubmit2DIN->sKick;
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psKick->hCCBMemInfo,
++ psKick->hCCBMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ if (psKick->hTASyncInfo != IMG_NULL)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psKick->hTASyncInfo,
++ psKick->hTASyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ if (psKick->h3DSyncInfo != IMG_NULL)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psKick->h3DSyncInfo,
++ psKick->h3DSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ if (psKick->ui32NumSrcSync > SGX_MAX_2D_SRC_SYNC_OPS)
++ {
++ psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++ return 0;
++ }
++ for (i = 0; i < psKick->ui32NumSrcSync; i++)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psKick->ahSrcSyncInfo[i],
++ psKick->ahSrcSyncInfo[i],
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ if (psKick->hDstSyncInfo != IMG_NULL)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psKick->hDstSyncInfo,
++ psKick->hDstSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ psRetOUT->eError =
++ SGXSubmit2DKM(hDevCookieInt, psKick);
++
++ return 0;
++}
++#endif
++#endif
++
++
++static IMG_INT
++SGXGetMiscInfoBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGXGETMISCINFO *psSGXGetMiscInfoIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ IMG_HANDLE hDevMemContextInt = 0;
++ PVRSRV_SGXDEV_INFO *psDevInfo;
++ SGX_MISC_INFO sMiscInfo;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_SGX_GETMISCINFO);
++
++ psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psSGXGetMiscInfoIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++#if defined(SUPPORT_SGX_EDM_MEMORY_DEBUG)
++
++ if (psSGXGetMiscInfoIN->psMiscInfo->eRequest == SGX_MISC_INFO_REQUEST_MEMREAD)
++ {
++ psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevMemContextInt,
++ psSGXGetMiscInfoIN->psMiscInfo->hDevMemContext,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++#endif
++
++ psDeviceNode = hDevCookieInt;
++ PVR_ASSERT(psDeviceNode != IMG_NULL);
++ if (psDeviceNode == IMG_NULL)
++ {
++ return -EFAULT;
++ }
++
++ psDevInfo = psDeviceNode->pvDevice;
++
++
++ psRetOUT->eError = CopyFromUserWrapper(psPerProc,
++ ui32BridgeID,
++ &sMiscInfo,
++ psSGXGetMiscInfoIN->psMiscInfo,
++ sizeof(SGX_MISC_INFO));
++ if (psRetOUT->eError != PVRSRV_OK)
++ {
++ return -EFAULT;
++ }
++
++#ifdef SUPPORT_SGX_HWPERF
++ if (sMiscInfo.eRequest == SGX_MISC_INFO_REQUEST_HWPERF_RETRIEVE_CB)
++ {
++
++ IMG_VOID * pAllocated;
++ IMG_HANDLE hAllocatedHandle;
++ IMG_VOID * psTmpUserData;
++ IMG_UINT32 allocatedSize;
++
++ allocatedSize = (IMG_UINT32)(sMiscInfo.uData.sRetrieveCB.ui32ArraySize * sizeof(PVRSRV_SGX_HWPERF_CBDATA));
++
++ ASSIGN_AND_EXIT_ON_ERROR(psRetOUT->eError,
++ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ allocatedSize,
++ &pAllocated,
++ &hAllocatedHandle,
++ "Array of Hardware Performance Circular Buffer Data"));
++
++
++ psTmpUserData = sMiscInfo.uData.sRetrieveCB.psHWPerfData;
++ sMiscInfo.uData.sRetrieveCB.psHWPerfData = pAllocated;
++
++ psRetOUT->eError = SGXGetMiscInfoKM(psDevInfo, &sMiscInfo, psDeviceNode, 0);
++ if (psRetOUT->eError != PVRSRV_OK)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ allocatedSize,
++ pAllocated,
++ hAllocatedHandle);
++
++ return 0;
++ }
++
++
++ psRetOUT->eError = CopyToUserWrapper(psPerProc,
++ ui32BridgeID,
++ psTmpUserData,
++ sMiscInfo.uData.sRetrieveCB.psHWPerfData,
++ allocatedSize);
++
++ sMiscInfo.uData.sRetrieveCB.psHWPerfData = psTmpUserData;
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ allocatedSize,
++ pAllocated,
++ hAllocatedHandle);
++
++ if (psRetOUT->eError != PVRSRV_OK)
++ {
++ return -EFAULT;
++ }
++ }
++ else
++#endif
++ {
++ psRetOUT->eError = SGXGetMiscInfoKM(psDevInfo, &sMiscInfo, psDeviceNode, hDevMemContextInt);
++
++ if (psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++
++ psRetOUT->eError = CopyToUserWrapper(psPerProc,
++ ui32BridgeID,
++ psSGXGetMiscInfoIN->psMiscInfo,
++ &sMiscInfo,
++ sizeof(SGX_MISC_INFO));
++ if (psRetOUT->eError != PVRSRV_OK)
++ {
++ return -EFAULT;
++ }
++ return 0;
++}
++
++
++#if defined(SUPPORT_SGX_HWPERF)
++static IMG_INT
++SGXReadDiffCountersBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGX_READ_DIFF_COUNTERS *psSGXReadDiffCountersIN,
++ PVRSRV_BRIDGE_OUT_SGX_READ_DIFF_COUNTERS *psSGXReadDiffCountersOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_READ_DIFF_COUNTERS);
++
++ psSGXReadDiffCountersOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psSGXReadDiffCountersIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if(psSGXReadDiffCountersOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psSGXReadDiffCountersOUT->eError = SGXReadDiffCountersKM(hDevCookieInt,
++ psSGXReadDiffCountersIN->ui32Reg,
++ &psSGXReadDiffCountersOUT->ui32Old,
++ psSGXReadDiffCountersIN->bNew,
++ psSGXReadDiffCountersIN->ui32New,
++ psSGXReadDiffCountersIN->ui32NewReset,
++ psSGXReadDiffCountersIN->ui32CountersReg,
++ psSGXReadDiffCountersIN->ui32Reg2,
++ &psSGXReadDiffCountersOUT->bActive,
++ &psSGXReadDiffCountersOUT->sDiffs);
++
++ return 0;
++}
++
++
++static IMG_INT
++SGXReadHWPerfCBBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGX_READ_HWPERF_CB *psSGXReadHWPerfCBIN,
++ PVRSRV_BRIDGE_OUT_SGX_READ_HWPERF_CB *psSGXReadHWPerfCBOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ PVRSRV_SGX_HWPERF_CB_ENTRY *psAllocated;
++ IMG_HANDLE hAllocatedHandle;
++ IMG_UINT32 ui32AllocatedSize;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_READ_HWPERF_CB);
++
++ psSGXReadHWPerfCBOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psSGXReadHWPerfCBIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if(psSGXReadHWPerfCBOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ ui32AllocatedSize = psSGXReadHWPerfCBIN->ui32ArraySize *
++ sizeof(psSGXReadHWPerfCBIN->psHWPerfCBData[0]);
++ ASSIGN_AND_EXIT_ON_ERROR(psSGXReadHWPerfCBOUT->eError,
++ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32AllocatedSize,
++ (IMG_VOID **)&psAllocated,
++ &hAllocatedHandle,
++ "Array of Hardware Performance Circular Buffer Data"));
++
++ psSGXReadHWPerfCBOUT->eError = SGXReadHWPerfCBKM(hDevCookieInt,
++ psSGXReadHWPerfCBIN->ui32ArraySize,
++ psAllocated,
++ &psSGXReadHWPerfCBOUT->ui32DataCount,
++ &psSGXReadHWPerfCBOUT->ui32ClockSpeed,
++ &psSGXReadHWPerfCBOUT->ui32HostTimeStamp);
++ if (psSGXReadHWPerfCBOUT->eError == PVRSRV_OK)
++ {
++ psSGXReadHWPerfCBOUT->eError = CopyToUserWrapper(psPerProc,
++ ui32BridgeID,
++ psSGXReadHWPerfCBIN->psHWPerfCBData,
++ psAllocated,
++ ui32AllocatedSize);
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32AllocatedSize,
++ psAllocated,
++ hAllocatedHandle);
++
++
++ return 0;
++}
++#endif
++
++
++static IMG_INT
++SGXDevInitPart2BW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGXDEVINITPART2 *psSGXDevInitPart2IN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ PVRSRV_ERROR eError;
++ IMG_BOOL bDissociateFailed = IMG_FALSE;
++ IMG_BOOL bLookupFailed = IMG_FALSE;
++ IMG_BOOL bReleaseFailed = IMG_FALSE;
++ IMG_HANDLE hDummy;
++ IMG_UINT32 i;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_DEVINITPART2);
++
++ if(!psPerProc->bInitProcess)
++ {
++ psRetOUT->eError = PVRSRV_ERROR_GENERIC;
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psSGXDevInitPart2IN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDummy,
++ psSGXDevInitPart2IN->sInitInfo.hKernelCCBMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bLookupFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDummy,
++ psSGXDevInitPart2IN->sInitInfo.hKernelCCBCtlMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bLookupFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDummy,
++ psSGXDevInitPart2IN->sInitInfo.hKernelCCBEventKickerMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bLookupFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDummy,
++ psSGXDevInitPart2IN->sInitInfo.hKernelSGXHostCtlMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bLookupFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDummy,
++ psSGXDevInitPart2IN->sInitInfo.hKernelSGXTA3DCtlMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bLookupFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDummy,
++ psSGXDevInitPart2IN->sInitInfo.hKernelSGXMiscMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bLookupFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++#if defined(SGX_SUPPORT_HWPROFILING)
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDummy,
++ psSGXDevInitPart2IN->sInitInfo.hKernelHWProfilingMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bLookupFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++#endif
++
++#if defined(SUPPORT_SGX_HWPERF)
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDummy,
++ psSGXDevInitPart2IN->sInitInfo.hKernelHWPerfCBMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bLookupFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++#endif
++
++#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG)
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDummy,
++ psSGXDevInitPart2IN->sInitInfo.hKernelEDMStatusBufferMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bLookupFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++#endif
++
++#if defined(SGX_FEATURE_SPM_MODE_0)
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDummy,
++ psSGXDevInitPart2IN->sInitInfo.hKernelTmpDPMStateMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bLookupFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++#endif
++
++ for (i = 0; i < SGX_MAX_INIT_MEM_HANDLES; i++)
++ {
++ IMG_HANDLE hHandle = psSGXDevInitPart2IN->sInitInfo.asInitMemHandles[i];
++
++ if (hHandle == IMG_NULL)
++ {
++ continue;
++ }
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDummy,
++ hHandle,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bLookupFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++ }
++
++ if (bLookupFailed)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "DevInitSGXPart2BW: A handle lookup failed"));
++ psRetOUT->eError = PVRSRV_ERROR_GENERIC;
++ return 0;
++ }
++
++
++ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ &psSGXDevInitPart2IN->sInitInfo.hKernelCCBMemInfo,
++ psSGXDevInitPart2IN->sInitInfo.hKernelCCBMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bReleaseFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ &psSGXDevInitPart2IN->sInitInfo.hKernelCCBCtlMemInfo,
++ psSGXDevInitPart2IN->sInitInfo.hKernelCCBCtlMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bReleaseFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ &psSGXDevInitPart2IN->sInitInfo.hKernelCCBEventKickerMemInfo,
++ psSGXDevInitPart2IN->sInitInfo.hKernelCCBEventKickerMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bReleaseFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++
++ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ &psSGXDevInitPart2IN->sInitInfo.hKernelSGXHostCtlMemInfo,
++ psSGXDevInitPart2IN->sInitInfo.hKernelSGXHostCtlMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bReleaseFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ &psSGXDevInitPart2IN->sInitInfo.hKernelSGXTA3DCtlMemInfo,
++ psSGXDevInitPart2IN->sInitInfo.hKernelSGXTA3DCtlMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bReleaseFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ &psSGXDevInitPart2IN->sInitInfo.hKernelSGXMiscMemInfo,
++ psSGXDevInitPart2IN->sInitInfo.hKernelSGXMiscMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bReleaseFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++
++ #if defined(SGX_SUPPORT_HWPROFILING)
++ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ &psSGXDevInitPart2IN->sInitInfo.hKernelHWProfilingMemInfo,
++ psSGXDevInitPart2IN->sInitInfo.hKernelHWProfilingMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bReleaseFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++#endif
++
++#if defined(SUPPORT_SGX_HWPERF)
++ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ &psSGXDevInitPart2IN->sInitInfo.hKernelHWPerfCBMemInfo,
++ psSGXDevInitPart2IN->sInitInfo.hKernelHWPerfCBMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bReleaseFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++#endif
++
++#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG)
++ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ &psSGXDevInitPart2IN->sInitInfo.hKernelEDMStatusBufferMemInfo,
++ psSGXDevInitPart2IN->sInitInfo.hKernelEDMStatusBufferMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bReleaseFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++#endif
++
++#if defined(SGX_FEATURE_SPM_MODE_0)
++ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ &psSGXDevInitPart2IN->sInitInfo.hKernelTmpDPMStateMemInfo,
++ psSGXDevInitPart2IN->sInitInfo.hKernelTmpDPMStateMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bReleaseFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++#endif
++
++
++ for (i = 0; i < SGX_MAX_INIT_MEM_HANDLES; i++)
++ {
++ IMG_HANDLE *phHandle = &psSGXDevInitPart2IN->sInitInfo.asInitMemHandles[i];
++
++ if (*phHandle == IMG_NULL)
++ continue;
++
++ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ phHandle,
++ *phHandle,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bReleaseFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++ }
++
++ if (bReleaseFailed)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "DevInitSGXPart2BW: A handle release failed"));
++ psRetOUT->eError = PVRSRV_ERROR_GENERIC;
++
++ PVR_DBG_BREAK;
++ return 0;
++ }
++
++
++ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelCCBMemInfo);
++ bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelCCBCtlMemInfo);
++ bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelCCBEventKickerMemInfo);
++ bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelSGXHostCtlMemInfo);
++ bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelSGXTA3DCtlMemInfo);
++ bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++
++ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelSGXMiscMemInfo);
++ bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++
++#if defined(SGX_SUPPORT_HWPROFILING)
++ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelHWProfilingMemInfo);
++ bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++#endif
++
++#if defined(SUPPORT_SGX_HWPERF)
++ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelHWPerfCBMemInfo);
++ bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++#endif
++
++#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG)
++ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelEDMStatusBufferMemInfo);
++ bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++#endif
++
++#if defined(SGX_FEATURE_SPM_MODE_0)
++ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelTmpDPMStateMemInfo);
++ bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++#endif
++
++ for (i = 0; i < SGX_MAX_INIT_MEM_HANDLES; i++)
++ {
++ IMG_HANDLE hHandle = psSGXDevInitPart2IN->sInitInfo.asInitMemHandles[i];
++
++ if (hHandle == IMG_NULL)
++ continue;
++
++ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, hHandle);
++ bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++ }
++
++
++ if(bDissociateFailed)
++ {
++ PVRSRVFreeDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelCCBMemInfo);
++ PVRSRVFreeDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelCCBCtlMemInfo);
++ PVRSRVFreeDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelSGXHostCtlMemInfo);
++ PVRSRVFreeDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelSGXTA3DCtlMemInfo);
++ PVRSRVFreeDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelSGXMiscMemInfo);
++
++ for (i = 0; i < SGX_MAX_INIT_MEM_HANDLES; i++)
++ {
++ IMG_HANDLE hHandle = psSGXDevInitPart2IN->sInitInfo.asInitMemHandles[i];
++
++ if (hHandle == IMG_NULL)
++ continue;
++
++ PVRSRVFreeDeviceMemKM(hDevCookieInt, (PVRSRV_KERNEL_MEM_INFO *)hHandle);
++
++ }
++
++ PVR_DPF((PVR_DBG_ERROR, "DevInitSGXPart2BW: A dissociate failed"));
++
++ psRetOUT->eError = PVRSRV_ERROR_GENERIC;
++
++
++ PVR_DBG_BREAK;
++ return 0;
++ }
++
++ psRetOUT->eError =
++ DevInitSGXPart2KM(psPerProc,
++ hDevCookieInt,
++ &psSGXDevInitPart2IN->sInitInfo);
++
++ return 0;
++}
++
++
++static IMG_INT
++SGXRegisterHWRenderContextBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_RENDER_CONTEXT *psSGXRegHWRenderContextIN,
++ PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_RENDER_CONTEXT *psSGXRegHWRenderContextOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ IMG_HANDLE hHWRenderContextInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_REGISTER_HW_RENDER_CONTEXT);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psSGXRegHWRenderContextOUT->eError, psPerProc, 1);
++
++ psSGXRegHWRenderContextOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psSGXRegHWRenderContextIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psSGXRegHWRenderContextOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ hHWRenderContextInt =
++ SGXRegisterHWRenderContextKM(hDevCookieInt,
++ &psSGXRegHWRenderContextIN->sHWRenderContextDevVAddr,
++ psPerProc);
++
++ if (hHWRenderContextInt == IMG_NULL)
++ {
++ psSGXRegHWRenderContextOUT->eError = PVRSRV_ERROR_GENERIC;
++ return 0;
++ }
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psSGXRegHWRenderContextOUT->hHWRenderContext,
++ hHWRenderContextInt,
++ PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psSGXRegHWRenderContextOUT->eError, psPerProc);
++
++ return 0;
++}
++
++
++static IMG_INT
++SGXUnregisterHWRenderContextBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_RENDER_CONTEXT *psSGXUnregHWRenderContextIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hHWRenderContextInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_UNREGISTER_HW_RENDER_CONTEXT);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hHWRenderContextInt,
++ psSGXUnregHWRenderContextIN->hHWRenderContext,
++ PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError = SGXUnregisterHWRenderContextKM(hHWRenderContextInt);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psSGXUnregHWRenderContextIN->hHWRenderContext,
++ PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT);
++
++ return 0;
++}
++
++
++static IMG_INT
++SGXRegisterHWTransferContextBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_TRANSFER_CONTEXT *psSGXRegHWTransferContextIN,
++ PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_TRANSFER_CONTEXT *psSGXRegHWTransferContextOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ IMG_HANDLE hHWTransferContextInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_REGISTER_HW_TRANSFER_CONTEXT);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psSGXRegHWTransferContextOUT->eError, psPerProc, 1);
++
++ psSGXRegHWTransferContextOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psSGXRegHWTransferContextIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psSGXRegHWTransferContextOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ hHWTransferContextInt =
++ SGXRegisterHWTransferContextKM(hDevCookieInt,
++ &psSGXRegHWTransferContextIN->sHWTransferContextDevVAddr,
++ psPerProc);
++
++ if (hHWTransferContextInt == IMG_NULL)
++ {
++ psSGXRegHWTransferContextOUT->eError = PVRSRV_ERROR_GENERIC;
++ return 0;
++ }
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psSGXRegHWTransferContextOUT->hHWTransferContext,
++ hHWTransferContextInt,
++ PVRSRV_HANDLE_TYPE_SGX_HW_TRANSFER_CONTEXT,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psSGXRegHWTransferContextOUT->eError, psPerProc);
++
++ return 0;
++}
++
++
++static IMG_INT
++SGXUnregisterHWTransferContextBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_TRANSFER_CONTEXT *psSGXUnregHWTransferContextIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hHWTransferContextInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_UNREGISTER_HW_TRANSFER_CONTEXT);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hHWTransferContextInt,
++ psSGXUnregHWTransferContextIN->hHWTransferContext,
++ PVRSRV_HANDLE_TYPE_SGX_HW_TRANSFER_CONTEXT);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError = SGXUnregisterHWTransferContextKM(hHWTransferContextInt);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psSGXUnregHWTransferContextIN->hHWTransferContext,
++ PVRSRV_HANDLE_TYPE_SGX_HW_TRANSFER_CONTEXT);
++
++ return 0;
++}
++
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++static IMG_INT
++SGXRegisterHW2DContextBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_2D_CONTEXT *psSGXRegHW2DContextIN,
++ PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_2D_CONTEXT *psSGXRegHW2DContextOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ IMG_HANDLE hHW2DContextInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_REGISTER_HW_2D_CONTEXT);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psSGXRegHW2DContextOUT->eError, psPerProc, 1);
++
++ psSGXRegHW2DContextOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psSGXRegHW2DContextIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psSGXRegHW2DContextOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ hHW2DContextInt =
++ SGXRegisterHW2DContextKM(hDevCookieInt,
++ &psSGXRegHW2DContextIN->sHW2DContextDevVAddr,
++ psPerProc);
++
++ if (hHW2DContextInt == IMG_NULL)
++ {
++ psSGXRegHW2DContextOUT->eError = PVRSRV_ERROR_GENERIC;
++ return 0;
++ }
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psSGXRegHW2DContextOUT->hHW2DContext,
++ hHW2DContextInt,
++ PVRSRV_HANDLE_TYPE_SGX_HW_2D_CONTEXT,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psSGXRegHW2DContextOUT->eError, psPerProc);
++
++ return 0;
++}
++
++
++static IMG_INT
++SGXUnregisterHW2DContextBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_2D_CONTEXT *psSGXUnregHW2DContextIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hHW2DContextInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_UNREGISTER_HW_2D_CONTEXT);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hHW2DContextInt,
++ psSGXUnregHW2DContextIN->hHW2DContext,
++ PVRSRV_HANDLE_TYPE_SGX_HW_2D_CONTEXT);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError = SGXUnregisterHW2DContextKM(hHW2DContextInt);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psSGXUnregHW2DContextIN->hHW2DContext,
++ PVRSRV_HANDLE_TYPE_SGX_HW_2D_CONTEXT);
++
++ return 0;
++}
++#endif
++
++static IMG_INT
++SGXFlushHWRenderTargetBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGX_FLUSH_HW_RENDER_TARGET *psSGXFlushHWRenderTargetIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_FLUSH_HW_RENDER_TARGET);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psSGXFlushHWRenderTargetIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ SGXFlushHWRenderTargetKM(hDevCookieInt, psSGXFlushHWRenderTargetIN->sHWRTDataSetDevVAddr);
++
++ return 0;
++}
++
++
++static IMG_INT
++SGX2DQueryBlitsCompleteBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_2DQUERYBLTSCOMPLETE *ps2DQueryBltsCompleteIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ IMG_VOID *pvSyncInfo;
++ PVRSRV_SGXDEV_INFO *psDevInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_2DQUERYBLTSCOMPLETE);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ ps2DQueryBltsCompleteIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &pvSyncInfo,
++ ps2DQueryBltsCompleteIN->hKernSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookieInt)->pvDevice;
++
++ psRetOUT->eError =
++ SGX2DQueryBlitsCompleteKM(psDevInfo,
++ (PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo,
++ ps2DQueryBltsCompleteIN->bWaitForComplete);
++
++ return 0;
++}
++
++
++static IMG_INT
++SGXFindSharedPBDescBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGXFINDSHAREDPBDESC *psSGXFindSharedPBDescIN,
++ PVRSRV_BRIDGE_OUT_SGXFINDSHAREDPBDESC *psSGXFindSharedPBDescOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo;
++ PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo;
++ PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo;
++ PVRSRV_KERNEL_MEM_INFO *psHWBlockKernelMemInfo;
++ PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescSubKernelMemInfos = IMG_NULL;
++ IMG_UINT32 ui32SharedPBDescSubKernelMemInfosCount = 0;
++ IMG_UINT32 i;
++ IMG_HANDLE hSharedPBDesc = IMG_NULL;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psSGXFindSharedPBDescOUT->eError, psPerProc, PVRSRV_BRIDGE_SGX_SHAREDPBDESC_MAX_SUBMEMINFOS + 4);
++
++ psSGXFindSharedPBDescOUT->hSharedPBDesc = IMG_NULL;
++
++ psSGXFindSharedPBDescOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psSGXFindSharedPBDescIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psSGXFindSharedPBDescOUT->eError != PVRSRV_OK)
++ goto PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC_EXIT;
++
++ psSGXFindSharedPBDescOUT->eError =
++ SGXFindSharedPBDescKM(psPerProc, hDevCookieInt,
++ psSGXFindSharedPBDescIN->bLockOnFailure,
++ psSGXFindSharedPBDescIN->ui32TotalPBSize,
++ &hSharedPBDesc,
++ &psSharedPBDescKernelMemInfo,
++ &psHWPBDescKernelMemInfo,
++ &psBlockKernelMemInfo,
++ &psHWBlockKernelMemInfo,
++ &ppsSharedPBDescSubKernelMemInfos,
++ &ui32SharedPBDescSubKernelMemInfosCount);
++ if(psSGXFindSharedPBDescOUT->eError != PVRSRV_OK)
++ goto PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC_EXIT;
++
++ PVR_ASSERT(ui32SharedPBDescSubKernelMemInfosCount
++ <= PVRSRV_BRIDGE_SGX_SHAREDPBDESC_MAX_SUBMEMINFOS);
++
++ psSGXFindSharedPBDescOUT->ui32SharedPBDescSubKernelMemInfoHandlesCount =
++ ui32SharedPBDescSubKernelMemInfosCount;
++
++ if(hSharedPBDesc == IMG_NULL)
++ {
++ psSGXFindSharedPBDescOUT->hSharedPBDescKernelMemInfoHandle = 0;
++
++ goto PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC_EXIT;
++ }
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psSGXFindSharedPBDescOUT->hSharedPBDesc,
++ hSharedPBDesc,
++ PVRSRV_HANDLE_TYPE_SHARED_PB_DESC,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psSGXFindSharedPBDescOUT->hSharedPBDescKernelMemInfoHandle,
++ psSharedPBDescKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
++ psSGXFindSharedPBDescOUT->hSharedPBDesc);
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psSGXFindSharedPBDescOUT->hHWPBDescKernelMemInfoHandle,
++ psHWPBDescKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
++ psSGXFindSharedPBDescOUT->hSharedPBDesc);
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psSGXFindSharedPBDescOUT->hBlockKernelMemInfoHandle,
++ psBlockKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
++ psSGXFindSharedPBDescOUT->hSharedPBDesc);
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psSGXFindSharedPBDescOUT->hHWBlockKernelMemInfoHandle,
++ psHWBlockKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
++ psSGXFindSharedPBDescOUT->hSharedPBDesc);
++
++
++ for(i=0; i<ui32SharedPBDescSubKernelMemInfosCount; i++)
++ {
++ PVRSRV_BRIDGE_OUT_SGXFINDSHAREDPBDESC *psSGXFindSharedPBDescOut =
++ psSGXFindSharedPBDescOUT;
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psSGXFindSharedPBDescOut->ahSharedPBDescSubKernelMemInfoHandles[i],
++ ppsSharedPBDescSubKernelMemInfos[i],
++ PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
++ psSGXFindSharedPBDescOUT->hSharedPBDescKernelMemInfoHandle);
++ }
++
++PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC_EXIT:
++ if (ppsSharedPBDescSubKernelMemInfos != IMG_NULL)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_KERNEL_MEM_INFO *) * ui32SharedPBDescSubKernelMemInfosCount,
++ ppsSharedPBDescSubKernelMemInfos,
++ IMG_NULL);
++ }
++
++ if(psSGXFindSharedPBDescOUT->eError != PVRSRV_OK)
++ {
++ if(hSharedPBDesc != IMG_NULL)
++ {
++ SGXUnrefSharedPBDescKM(hSharedPBDesc);
++ }
++ }
++ else
++ {
++ COMMIT_HANDLE_BATCH_OR_ERROR(psSGXFindSharedPBDescOUT->eError, psPerProc);
++ }
++
++ return 0;
++}
++
++
++static IMG_INT
++SGXUnrefSharedPBDescBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGXUNREFSHAREDPBDESC *psSGXUnrefSharedPBDescIN,
++ PVRSRV_BRIDGE_OUT_SGXUNREFSHAREDPBDESC *psSGXUnrefSharedPBDescOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hSharedPBDesc;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_UNREFSHAREDPBDESC);
++
++ psSGXUnrefSharedPBDescOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hSharedPBDesc,
++ psSGXUnrefSharedPBDescIN->hSharedPBDesc,
++ PVRSRV_HANDLE_TYPE_SHARED_PB_DESC);
++ if(psSGXUnrefSharedPBDescOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psSGXUnrefSharedPBDescOUT->eError =
++ SGXUnrefSharedPBDescKM(hSharedPBDesc);
++
++ if(psSGXUnrefSharedPBDescOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psSGXUnrefSharedPBDescOUT->eError =
++ PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psSGXUnrefSharedPBDescIN->hSharedPBDesc,
++ PVRSRV_HANDLE_TYPE_SHARED_PB_DESC);
++
++ return 0;
++}
++
++
++static IMG_INT
++SGXAddSharedPBDescBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGXADDSHAREDPBDESC *psSGXAddSharedPBDescIN,
++ PVRSRV_BRIDGE_OUT_SGXADDSHAREDPBDESC *psSGXAddSharedPBDescOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo;
++ PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo;
++ PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo;
++ PVRSRV_KERNEL_MEM_INFO *psHWBlockKernelMemInfo;
++ IMG_UINT32 ui32KernelMemInfoHandlesCount =
++ psSGXAddSharedPBDescIN->ui32KernelMemInfoHandlesCount;
++ IMG_INT ret = 0;
++ IMG_HANDLE *phKernelMemInfoHandles = IMG_NULL;
++ PVRSRV_KERNEL_MEM_INFO **ppsKernelMemInfos = IMG_NULL;
++ IMG_UINT32 i;
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hSharedPBDesc = IMG_NULL;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psSGXAddSharedPBDescOUT->eError, psPerProc, 1);
++
++ psSGXAddSharedPBDescOUT->hSharedPBDesc = IMG_NULL;
++
++ PVR_ASSERT(ui32KernelMemInfoHandlesCount
++ <= PVRSRV_BRIDGE_SGX_SHAREDPBDESC_MAX_SUBMEMINFOS);
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psSGXAddSharedPBDescIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(eError != PVRSRV_OK)
++ {
++ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++ }
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ (IMG_VOID **)&psSharedPBDescKernelMemInfo,
++ psSGXAddSharedPBDescIN->hSharedPBDescKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO);
++ if(eError != PVRSRV_OK)
++ {
++ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++ }
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ (IMG_VOID **)&psHWPBDescKernelMemInfo,
++ psSGXAddSharedPBDescIN->hHWPBDescKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if(eError != PVRSRV_OK)
++ {
++ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++ }
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ (IMG_VOID **)&psBlockKernelMemInfo,
++ psSGXAddSharedPBDescIN->hBlockKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO);
++ if(eError != PVRSRV_OK)
++ {
++ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++ }
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ (IMG_VOID **)&psHWBlockKernelMemInfo,
++ psSGXAddSharedPBDescIN->hHWBlockKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if(eError != PVRSRV_OK)
++ {
++ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++ }
++
++
++ if(!OSAccessOK(PVR_VERIFY_READ,
++ psSGXAddSharedPBDescIN->phKernelMemInfoHandles,
++ ui32KernelMemInfoHandlesCount * sizeof(IMG_HANDLE)))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC:"
++ " Invalid phKernelMemInfos pointer", __FUNCTION__));
++ ret = -EFAULT;
++ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++ }
++
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32KernelMemInfoHandlesCount * sizeof(IMG_HANDLE),
++ (IMG_VOID **)&phKernelMemInfoHandles,
++ 0,
++ "Array of Handles");
++ if (eError != PVRSRV_OK)
++ {
++ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++ }
++
++ if(CopyFromUserWrapper(psPerProc,
++ ui32BridgeID,
++ phKernelMemInfoHandles,
++ psSGXAddSharedPBDescIN->phKernelMemInfoHandles,
++ ui32KernelMemInfoHandlesCount * sizeof(IMG_HANDLE))
++ != PVRSRV_OK)
++ {
++ ret = -EFAULT;
++ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++ }
++
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32KernelMemInfoHandlesCount * sizeof(PVRSRV_KERNEL_MEM_INFO *),
++ (IMG_VOID **)&ppsKernelMemInfos,
++ 0,
++ "Array of pointers to Kernel Memory Info");
++ if (eError != PVRSRV_OK)
++ {
++ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++ }
++
++ for(i=0; i<ui32KernelMemInfoHandlesCount; i++)
++ {
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ (IMG_VOID **)&ppsKernelMemInfos[i],
++ phKernelMemInfoHandles[i],
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if(eError != PVRSRV_OK)
++ {
++ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++ }
++ }
++
++
++
++ eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psSGXAddSharedPBDescIN->hSharedPBDescKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO);
++ PVR_ASSERT(eError == PVRSRV_OK);
++
++
++ eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psSGXAddSharedPBDescIN->hHWPBDescKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ PVR_ASSERT(eError == PVRSRV_OK);
++
++
++ eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psSGXAddSharedPBDescIN->hBlockKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO);
++ PVR_ASSERT(eError == PVRSRV_OK);
++
++
++ eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psSGXAddSharedPBDescIN->hHWBlockKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ PVR_ASSERT(eError == PVRSRV_OK);
++
++ for(i=0; i<ui32KernelMemInfoHandlesCount; i++)
++ {
++
++ eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ phKernelMemInfoHandles[i],
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ PVR_ASSERT(eError == PVRSRV_OK);
++ }
++
++ eError = SGXAddSharedPBDescKM(psPerProc, hDevCookieInt,
++ psSharedPBDescKernelMemInfo,
++ psHWPBDescKernelMemInfo,
++ psBlockKernelMemInfo,
++ psHWBlockKernelMemInfo,
++ psSGXAddSharedPBDescIN->ui32TotalPBSize,
++ &hSharedPBDesc,
++ ppsKernelMemInfos,
++ ui32KernelMemInfoHandlesCount);
++
++
++ if (eError != PVRSRV_OK)
++ {
++ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++ }
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psSGXAddSharedPBDescOUT->hSharedPBDesc,
++ hSharedPBDesc,
++ PVRSRV_HANDLE_TYPE_SHARED_PB_DESC,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++
++PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT:
++
++ if(phKernelMemInfoHandles)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ psSGXAddSharedPBDescIN->ui32KernelMemInfoHandlesCount * sizeof(IMG_HANDLE),
++ (IMG_VOID *)phKernelMemInfoHandles,
++ 0);
++ }
++ if(ppsKernelMemInfos)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ psSGXAddSharedPBDescIN->ui32KernelMemInfoHandlesCount * sizeof(PVRSRV_KERNEL_MEM_INFO *),
++ (IMG_VOID *)ppsKernelMemInfos,
++ 0);
++ }
++
++ if(ret == 0 && eError == PVRSRV_OK)
++ {
++ COMMIT_HANDLE_BATCH_OR_ERROR(psSGXAddSharedPBDescOUT->eError, psPerProc);
++ }
++
++ psSGXAddSharedPBDescOUT->eError = eError;
++
++ return ret;
++}
++
++static IMG_INT
++SGXGetInfoForSrvinitBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGXINFO_FOR_SRVINIT *psSGXInfoForSrvinitIN,
++ PVRSRV_BRIDGE_OUT_SGXINFO_FOR_SRVINIT *psSGXInfoForSrvinitOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ IMG_UINT32 i;
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGXINFO_FOR_SRVINIT);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psSGXInfoForSrvinitOUT->eError, psPerProc, PVRSRV_MAX_CLIENT_HEAPS);
++
++ if(!psPerProc->bInitProcess)
++ {
++ psSGXInfoForSrvinitOUT->eError = PVRSRV_ERROR_GENERIC;
++ return 0;
++ }
++
++ psSGXInfoForSrvinitOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psSGXInfoForSrvinitIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if(psSGXInfoForSrvinitOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psSGXInfoForSrvinitOUT->eError =
++ SGXGetInfoForSrvinitKM(hDevCookieInt,
++ &psSGXInfoForSrvinitOUT->sInitInfo);
++
++ if(psSGXInfoForSrvinitOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ for(i = 0; i < PVRSRV_MAX_CLIENT_HEAPS; i++)
++ {
++ PVRSRV_HEAP_INFO *psHeapInfo;
++
++ psHeapInfo = &psSGXInfoForSrvinitOUT->sInitInfo.asHeapInfo[i];
++
++ if (psHeapInfo->ui32HeapID != (IMG_UINT32)SGX_UNDEFINED_HEAP_ID)
++ {
++ IMG_HANDLE hDevMemHeapExt;
++
++ if (psHeapInfo->hDevMemHeap != IMG_NULL)
++ {
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &hDevMemHeapExt,
++ psHeapInfo->hDevMemHeap,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP,
++ PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
++ psHeapInfo->hDevMemHeap = hDevMemHeapExt;
++ }
++ }
++ }
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psSGXInfoForSrvinitOUT->eError, psPerProc);
++
++ return 0;
++}
++
++#if defined(PDUMP)
++static IMG_VOID
++DumpBufferArray(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ PSGX_KICKTA_DUMP_BUFFER psBufferArray,
++ IMG_UINT32 ui32BufferArrayLength,
++ IMG_BOOL bDumpPolls)
++{
++ IMG_UINT32 i;
++
++ for (i=0; i<ui32BufferArrayLength; i++)
++ {
++ PSGX_KICKTA_DUMP_BUFFER psBuffer;
++ PVRSRV_KERNEL_MEM_INFO *psCtrlMemInfoKM;
++ IMG_CHAR * pszName;
++ IMG_HANDLE hUniqueTag;
++ IMG_UINT32 ui32Offset;
++
++ psBuffer = &psBufferArray[i];
++ pszName = psBuffer->pszName;
++ if (!pszName)
++ {
++ pszName = "Nameless buffer";
++ }
++
++ hUniqueTag = MAKEUNIQUETAG((PVRSRV_KERNEL_MEM_INFO *)psBuffer->hKernelMemInfo);
++
++ #if defined(SUPPORT_SGX_NEW_STATUS_VALS)
++ psCtrlMemInfoKM = ((PVRSRV_KERNEL_MEM_INFO *)psBuffer->hCtrlKernelMemInfo);
++ ui32Offset = psBuffer->sCtrlDevVAddr.uiAddr - psCtrlMemInfoKM->sDevVAddr.uiAddr;
++ #else
++ psCtrlMemInfoKM = ((PVRSRV_KERNEL_MEM_INFO *)psBuffer->hKernelMemInfo)->psKernelSyncInfo->psSyncDataMemInfoKM;
++ ui32Offset = offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete);
++ #endif
++
++ if (psBuffer->ui32Start <= psBuffer->ui32End)
++ {
++ if (bDumpPolls)
++ {
++ PDUMPCOMMENTWITHFLAGS(0, "Wait for %s space\r\n", pszName);
++ PDUMPCBP(psCtrlMemInfoKM,
++ ui32Offset,
++ psBuffer->ui32Start,
++ psBuffer->ui32SpaceUsed,
++ psBuffer->ui32BufferSize,
++ 0,
++ MAKEUNIQUETAG(psCtrlMemInfoKM));
++ }
++
++ PDUMPCOMMENTWITHFLAGS(0, "%s\r\n", pszName);
++ PDUMPMEMUM(psPerProc,
++ IMG_NULL,
++ psBuffer->pvLinAddr,
++ (PVRSRV_KERNEL_MEM_INFO*)psBuffer->hKernelMemInfo,
++ psBuffer->ui32Start,
++ psBuffer->ui32End - psBuffer->ui32Start,
++ 0,
++ hUniqueTag);
++ }
++ else
++ {
++
++
++ if (bDumpPolls)
++ {
++ PDUMPCOMMENTWITHFLAGS(0, "Wait for %s space\r\n", pszName);
++ PDUMPCBP(psCtrlMemInfoKM,
++ ui32Offset,
++ psBuffer->ui32Start,
++ psBuffer->ui32BackEndLength,
++ psBuffer->ui32BufferSize,
++ 0,
++ MAKEUNIQUETAG(psCtrlMemInfoKM));
++ }
++ PDUMPCOMMENTWITHFLAGS(0, "%s (part 1)\r\n", pszName);
++ PDUMPMEMUM(psPerProc,
++ IMG_NULL,
++ psBuffer->pvLinAddr,
++ (PVRSRV_KERNEL_MEM_INFO*)psBuffer->hKernelMemInfo,
++ psBuffer->ui32Start,
++ psBuffer->ui32BackEndLength,
++ 0,
++ hUniqueTag);
++
++ if (bDumpPolls)
++ {
++ PDUMPMEMPOL(psCtrlMemInfoKM,
++ ui32Offset,
++ 0,
++ 0xFFFFFFFF,
++ PDUMP_POLL_OPERATOR_NOTEQUAL,
++ 0,
++ MAKEUNIQUETAG(psCtrlMemInfoKM));
++
++ PDUMPCOMMENTWITHFLAGS(0, "Wait for %s space\r\n", pszName);
++ PDUMPCBP(psCtrlMemInfoKM,
++ ui32Offset,
++ 0,
++ psBuffer->ui32End,
++ psBuffer->ui32BufferSize,
++ 0,
++ MAKEUNIQUETAG(psCtrlMemInfoKM));
++ }
++ PDUMPCOMMENTWITHFLAGS(0, "%s (part 2)\r\n", pszName);
++ PDUMPMEMUM(psPerProc,
++ IMG_NULL,
++ psBuffer->pvLinAddr,
++ (PVRSRV_KERNEL_MEM_INFO*)psBuffer->hKernelMemInfo,
++ 0,
++ psBuffer->ui32End,
++ 0,
++ hUniqueTag);
++ }
++ }
++}
++static IMG_INT
++SGXPDumpBufferArrayBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_BUFFER_ARRAY *psPDumpBufferArrayIN,
++ IMG_VOID *psBridgeOut,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_UINT32 i;
++ SGX_KICKTA_DUMP_BUFFER *psKickTADumpBuffer;
++ IMG_UINT32 ui32BufferArrayLength =
++ psPDumpBufferArrayIN->ui32BufferArrayLength;
++ IMG_UINT32 ui32BufferArraySize =
++ ui32BufferArrayLength * sizeof(SGX_KICKTA_DUMP_BUFFER);
++ PVRSRV_ERROR eError = PVRSRV_ERROR_GENERIC;
++
++ PVR_UNREFERENCED_PARAMETER(psBridgeOut);
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_PDUMP_BUFFER_ARRAY);
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32BufferArraySize,
++ (IMG_PVOID *)&psKickTADumpBuffer, 0,
++ "Array of Kick Tile Accelerator Dump Buffer") != PVRSRV_OK)
++ {
++ return -ENOMEM;
++ }
++
++ if(CopyFromUserWrapper(psPerProc,
++ ui32BridgeID,
++ psKickTADumpBuffer,
++ psPDumpBufferArrayIN->psBufferArray,
++ ui32BufferArraySize) != PVRSRV_OK)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32BufferArraySize, psKickTADumpBuffer, 0);
++
++ return -EFAULT;
++ }
++
++ for(i = 0; i < ui32BufferArrayLength; i++)
++ {
++ IMG_VOID *pvMemInfo;
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvMemInfo,
++ psKickTADumpBuffer[i].hKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRV_BRIDGE_SGX_PDUMP_BUFFER_ARRAY: "
++ "PVRSRVLookupHandle failed (%d)", eError));
++ break;
++ }
++ psKickTADumpBuffer[i].hKernelMemInfo = pvMemInfo;
++
++#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvMemInfo,
++ psKickTADumpBuffer[i].hCtrlKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRV_BRIDGE_SGX_PDUMP_BUFFER_ARRAY: "
++ "PVRSRVLookupHandle failed (%d)", eError));
++ break;
++ }
++ psKickTADumpBuffer[i].hCtrlKernelMemInfo = pvMemInfo;
++#endif
++ }
++
++ if(eError == PVRSRV_OK)
++ {
++ DumpBufferArray(psPerProc,
++ psKickTADumpBuffer,
++ ui32BufferArrayLength,
++ psPDumpBufferArrayIN->bDumpPolls);
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32BufferArraySize, psKickTADumpBuffer, 0);
++
++
++ return 0;
++}
++
++static IMG_INT
++SGXPDump3DSignatureRegistersBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_3D_SIGNATURE_REGISTERS *psPDump3DSignatureRegistersIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_UINT32 ui32RegisterArraySize = psPDump3DSignatureRegistersIN->ui32NumRegisters * sizeof(IMG_UINT32);
++ IMG_UINT32 *pui32Registers = IMG_NULL;
++#if defined(SGX_FEATURE_MP) && defined(FIX_HW_BRN_27270)
++ PVRSRV_SGXDEV_INFO *psDevInfo = IMG_NULL;
++ IMG_HANDLE hDevCookieInt;
++ IMG_UINT32 ui32RegVal = 0;
++#endif
++ IMG_INT ret = -EFAULT;
++
++ PVR_UNREFERENCED_PARAMETER(psRetOUT);
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_PDUMP_3D_SIGNATURE_REGISTERS);
++
++ if (ui32RegisterArraySize == 0)
++ {
++ goto ExitNoError;
++ }
++
++#if defined(SGX_FEATURE_MP) && defined(FIX_HW_BRN_27270)
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psPDump3DSignatureRegistersIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PDumpTASignatureRegistersBW: hDevCookie lookup failed"));
++ goto Exit;
++ }
++
++ psDevInfo = ((PVRSRV_DEVICE_NODE *)hDevCookieInt)->pvDevice;
++
++
++ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_CORE);
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_CORE, (SGX_FEATURE_MP_CORE_COUNT - 1) << EUR_CR_MASTER_CORE_ENABLE_SHIFT);
++#if defined(PDUMP)
++ PDUMPREGWITHFLAGS(EUR_CR_MASTER_CORE, (SGX_FEATURE_MP_CORE_COUNT - 1) << EUR_CR_MASTER_CORE_ENABLE_SHIFT,
++ psPDump3DSignatureRegistersIN->bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0);
++#endif
++#endif
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32RegisterArraySize,
++ (IMG_PVOID *)&pui32Registers, 0,
++ "Array of Registers") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PDump3DSignatureRegistersBW: OSAllocMem failed"));
++ goto Exit;
++ }
++
++ if(CopyFromUserWrapper(psPerProc,
++ ui32BridgeID,
++ pui32Registers,
++ psPDump3DSignatureRegistersIN->pui32Registers,
++ ui32RegisterArraySize) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PDump3DSignatureRegistersBW: CopyFromUserWrapper failed"));
++ goto Exit;
++ }
++
++ PDump3DSignatureRegisters(psPDump3DSignatureRegistersIN->ui32DumpFrameNum,
++ psPDump3DSignatureRegistersIN->bLastFrame,
++ pui32Registers,
++ psPDump3DSignatureRegistersIN->ui32NumRegisters);
++
++ExitNoError:
++ psRetOUT->eError = PVRSRV_OK;
++ ret = 0;
++Exit:
++ if (pui32Registers != IMG_NULL)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32RegisterArraySize, pui32Registers, 0);
++ }
++
++#if defined(SGX_FEATURE_MP) && defined(FIX_HW_BRN_27270)
++ if (psDevInfo != IMG_NULL)
++ {
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_CORE, ui32RegVal);
++#if defined(PDUMP)
++ PDUMPREGWITHFLAGS(EUR_CR_MASTER_CORE, ui32RegVal,
++ psPDump3DSignatureRegistersIN->bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0);
++#endif
++ }
++#endif
++
++ return ret;
++}
++
++static IMG_INT
++SGXPDumpCounterRegistersBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_COUNTER_REGISTERS *psPDumpCounterRegistersIN,
++ IMG_VOID *psBridgeOut,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_UINT32 ui32RegisterArraySize = psPDumpCounterRegistersIN->ui32NumRegisters * sizeof(IMG_UINT32);
++ IMG_UINT32 *pui32Registers = IMG_NULL;
++ IMG_INT ret = -EFAULT;
++
++ PVR_UNREFERENCED_PARAMETER(psBridgeOut);
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_PDUMP_COUNTER_REGISTERS);
++
++ if (ui32RegisterArraySize == 0)
++ {
++ goto ExitNoError;
++ }
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32RegisterArraySize,
++ (IMG_PVOID *)&pui32Registers, 0,
++ "Array of Registers") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PDumpCounterRegistersBW: OSAllocMem failed"));
++ ret = -ENOMEM;
++ goto Exit;
++ }
++
++ if(CopyFromUserWrapper(psPerProc,
++ ui32BridgeID,
++ pui32Registers,
++ psPDumpCounterRegistersIN->pui32Registers,
++ ui32RegisterArraySize) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PDumpCounterRegistersBW: CopyFromUserWrapper failed"));
++ goto Exit;
++ }
++
++ PDumpCounterRegisters(psPDumpCounterRegistersIN->ui32DumpFrameNum,
++ psPDumpCounterRegistersIN->bLastFrame,
++ pui32Registers,
++ psPDumpCounterRegistersIN->ui32NumRegisters);
++
++ExitNoError:
++ ret = 0;
++Exit:
++ if (pui32Registers != IMG_NULL)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32RegisterArraySize, pui32Registers, 0);
++ }
++
++ return ret;
++}
++
++static IMG_INT
++SGXPDumpTASignatureRegistersBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_TA_SIGNATURE_REGISTERS *psPDumpTASignatureRegistersIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_UINT32 ui32RegisterArraySize = psPDumpTASignatureRegistersIN->ui32NumRegisters * sizeof(IMG_UINT32);
++ IMG_UINT32 *pui32Registers = IMG_NULL;
++#if defined(SGX_FEATURE_MP) && defined(FIX_HW_BRN_27270)
++ PVRSRV_SGXDEV_INFO *psDevInfo = IMG_NULL;
++ IMG_HANDLE hDevCookieInt;
++ IMG_UINT32 ui32RegVal = 0;
++#endif
++ IMG_INT ret = -EFAULT;
++
++ PVR_UNREFERENCED_PARAMETER(psRetOUT);
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_PDUMP_TA_SIGNATURE_REGISTERS);
++
++ if (ui32RegisterArraySize == 0)
++ {
++ goto ExitNoError;
++ }
++
++#if defined(SGX_FEATURE_MP) && defined(FIX_HW_BRN_27270)
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psPDumpTASignatureRegistersIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PDumpTASignatureRegistersBW: hDevCookie lookup failed"));
++ goto Exit;
++ }
++
++ psDevInfo = ((PVRSRV_DEVICE_NODE *)hDevCookieInt)->pvDevice;
++
++
++ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_CORE);
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_CORE, (SGX_FEATURE_MP_CORE_COUNT - 1) << EUR_CR_MASTER_CORE_ENABLE_SHIFT);
++#if defined(PDUMP)
++ PDUMPREGWITHFLAGS(EUR_CR_MASTER_CORE, (SGX_FEATURE_MP_CORE_COUNT - 1) << EUR_CR_MASTER_CORE_ENABLE_SHIFT,
++ psPDumpTASignatureRegistersIN->bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0);
++#endif
++#endif
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32RegisterArraySize,
++ (IMG_PVOID *)&pui32Registers, 0,
++ "Array of Registers") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PDumpTASignatureRegistersBW: OSAllocMem failed"));
++ ret = -ENOMEM;
++ goto Exit;
++ }
++
++ if(CopyFromUserWrapper(psPerProc,
++ ui32BridgeID,
++ pui32Registers,
++ psPDumpTASignatureRegistersIN->pui32Registers,
++ ui32RegisterArraySize) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PDumpTASignatureRegistersBW: CopyFromUserWrapper failed"));
++ goto Exit;
++ }
++
++ PDumpTASignatureRegisters(psPDumpTASignatureRegistersIN->ui32DumpFrameNum,
++ psPDumpTASignatureRegistersIN->ui32TAKickCount,
++ psPDumpTASignatureRegistersIN->bLastFrame,
++ pui32Registers,
++ psPDumpTASignatureRegistersIN->ui32NumRegisters);
++
++ExitNoError:
++ psRetOUT->eError = PVRSRV_OK;
++ ret = 0;
++Exit:
++ if (pui32Registers != IMG_NULL)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32RegisterArraySize, pui32Registers, 0);
++ }
++
++#if defined(SGX_FEATURE_MP) && defined(FIX_HW_BRN_27270)
++ if (psDevInfo != IMG_NULL)
++ {
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_CORE, ui32RegVal);
++#if defined(PDUMP)
++ PDUMPREGWITHFLAGS(EUR_CR_MASTER_CORE, ui32RegVal,
++ psPDumpTASignatureRegistersIN->bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0);
++#endif
++ }
++#endif
++
++ return ret;
++}
++static IMG_INT
++SGXPDumpHWPerfCBBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_HWPERFCB *psPDumpHWPerfCBIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++#if defined(SUPPORT_SGX_HWPERF)
++#if defined(__linux__)
++ PVRSRV_SGXDEV_INFO *psDevInfo;
++ IMG_HANDLE hDevCookieInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_PDUMP_HWPERFCB);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psPDumpHWPerfCBIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psDevInfo = ((PVRSRV_DEVICE_NODE *)hDevCookieInt)->pvDevice;
++
++ PDumpHWPerfCBKM(&psPDumpHWPerfCBIN->szFileName[0],
++ psPDumpHWPerfCBIN->ui32FileOffset,
++ psDevInfo->psKernelHWPerfCBMemInfo->sDevVAddr,
++ psDevInfo->psKernelHWPerfCBMemInfo->ui32AllocSize,
++ psPDumpHWPerfCBIN->ui32PDumpFlags);
++
++ return 0;
++#else
++ PVR_UNREFERENCED_PARAMETER(ui32BridgeID);
++ PVR_UNREFERENCED_PARAMETER(psPDumpHWPerfCBIN);
++ PVR_UNREFERENCED_PARAMETER(psRetOUT);
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++ return 0;
++#endif
++#else
++ PVR_UNREFERENCED_PARAMETER(ui32BridgeID);
++ PVR_UNREFERENCED_PARAMETER(psPDumpHWPerfCBIN);
++ PVR_UNREFERENCED_PARAMETER(psRetOUT);
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++ return -EFAULT;
++#endif
++}
++
++#endif
++
++
++IMG_VOID SetSGXDispatchTableEntry(IMG_VOID)
++{
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_GETCLIENTINFO, SGXGetClientInfoBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_RELEASECLIENTINFO, SGXReleaseClientInfoBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_GETINTERNALDEVINFO, SGXGetInternalDevInfoBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_DOKICK, SGXDoKickBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_GETPHYSPAGEADDR, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_READREGISTRYDWORD, DummyBW);
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_2DQUERYBLTSCOMPLETE, SGX2DQueryBlitsCompleteBW);
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_GETMMUPDADDR, DummyBW);
++
++#if defined(TRANSFER_QUEUE)
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_SUBMITTRANSFER, SGXSubmitTransferBW);
++#endif
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_GETMISCINFO, SGXGetMiscInfoBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGXINFO_FOR_SRVINIT , SGXGetInfoForSrvinitBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_DEVINITPART2, SGXDevInitPart2BW);
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC, SGXFindSharedPBDescBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_UNREFSHAREDPBDESC, SGXUnrefSharedPBDescBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC, SGXAddSharedPBDescBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_REGISTER_HW_RENDER_CONTEXT, SGXRegisterHWRenderContextBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_FLUSH_HW_RENDER_TARGET, SGXFlushHWRenderTargetBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_UNREGISTER_HW_RENDER_CONTEXT, SGXUnregisterHWRenderContextBW);
++#if defined(SGX_FEATURE_2D_HARDWARE)
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_SUBMIT2D, SGXSubmit2DBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_REGISTER_HW_2D_CONTEXT, SGXRegisterHW2DContextBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_UNREGISTER_HW_2D_CONTEXT, SGXUnregisterHW2DContextBW);
++#endif
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_REGISTER_HW_TRANSFER_CONTEXT, SGXRegisterHWTransferContextBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_UNREGISTER_HW_TRANSFER_CONTEXT, SGXUnregisterHWTransferContextBW);
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_SCHEDULE_PROCESS_QUEUES, SGXScheduleProcessQueuesBW);
++
++#if defined(SUPPORT_SGX_HWPERF)
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_READ_DIFF_COUNTERS, SGXReadDiffCountersBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_READ_HWPERF_CB, SGXReadHWPerfCBBW);
++#endif
++
++#if defined(PDUMP)
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_PDUMP_BUFFER_ARRAY, SGXPDumpBufferArrayBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_PDUMP_3D_SIGNATURE_REGISTERS, SGXPDump3DSignatureRegistersBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_PDUMP_COUNTER_REGISTERS, SGXPDumpCounterRegistersBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_PDUMP_TA_SIGNATURE_REGISTERS, SGXPDumpTASignatureRegistersBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_PDUMP_HWPERFCB, SGXPDumpHWPerfCBBW);
++#endif
++}
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/bridged/sgx/bridged_sgx_bridge.h
+@@ -0,0 +1,42 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __BRIDGED_SGX_BRIDGE_H__
++#define __BRIDGED_SGX_BRIDGE_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++
++IMG_VOID SetSGXDispatchTableEntry(IMG_VOID);
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/common/.gitignore
+@@ -0,0 +1,5 @@
++bin_pc_i686*
++tmp_pc_i686*
++host_pc_i686*
++*.o
++*.o.cmd
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/common/buffer_manager.c
+@@ -0,0 +1,2072 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++
++#include "sysconfig.h"
++#include "hash.h"
++#include "ra.h"
++#include "pdump_km.h"
++
++#define MIN(a,b) (a > b ? b : a)
++
++
++#include "lists.h"
++
++DECLARE_LIST_ANY_VA(BM_HEAP);
++DECLARE_LIST_ANY_2(BM_HEAP, PVRSRV_ERROR, PVRSRV_OK);
++DECLARE_LIST_ANY_VA_2(BM_HEAP, PVRSRV_ERROR, PVRSRV_OK);
++DECLARE_LIST_FOR_EACH_VA(BM_HEAP);
++DECLARE_LIST_INSERT(BM_HEAP);
++DECLARE_LIST_REMOVE(BM_HEAP);
++
++DECLARE_LIST_FOR_EACH(BM_CONTEXT);
++DECLARE_LIST_ANY_VA(BM_CONTEXT);
++DECLARE_LIST_ANY_VA_2(BM_CONTEXT, IMG_HANDLE, IMG_NULL);
++DECLARE_LIST_INSERT(BM_CONTEXT);
++DECLARE_LIST_REMOVE(BM_CONTEXT);
++
++
++static IMG_BOOL
++ZeroBuf(BM_BUF *pBuf, BM_MAPPING *pMapping, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags);
++static IMG_VOID
++BM_FreeMemory (IMG_VOID *pH, IMG_UINTPTR_T base, BM_MAPPING *psMapping);
++static IMG_BOOL
++BM_ImportMemory(IMG_VOID *pH, IMG_SIZE_T uSize,
++ IMG_SIZE_T *pActualSize, BM_MAPPING **ppsMapping,
++ IMG_UINT32 uFlags, IMG_UINTPTR_T *pBase);
++
++static IMG_BOOL
++DevMemoryAlloc (BM_CONTEXT *pBMContext,
++ BM_MAPPING *pMapping,
++ IMG_SIZE_T *pActualSize,
++ IMG_UINT32 uFlags,
++ IMG_UINT32 dev_vaddr_alignment,
++ IMG_DEV_VIRTADDR *pDevVAddr);
++static IMG_VOID
++DevMemoryFree (BM_MAPPING *pMapping);
++
++static IMG_BOOL
++AllocMemory (BM_CONTEXT *pBMContext,
++ BM_HEAP *psBMHeap,
++ IMG_DEV_VIRTADDR *psDevVAddr,
++ IMG_SIZE_T uSize,
++ IMG_UINT32 uFlags,
++ IMG_UINT32 uDevVAddrAlignment,
++ BM_BUF *pBuf)
++{
++ BM_MAPPING *pMapping;
++ IMG_UINTPTR_T uOffset;
++ RA_ARENA *pArena = IMG_NULL;
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "AllocMemory (pBMContext=%08X, uSize=0x%x, uFlags=0x%x, align=0x%x, pBuf=%08X)",
++ pBMContext, uSize, uFlags, uDevVAddrAlignment, pBuf));
++
++
++
++
++ if(uFlags & PVRSRV_MEM_RAM_BACKED_ALLOCATION)
++ {
++ if(uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR)
++ {
++
++ PVR_DPF ((PVR_DBG_ERROR, "AllocMemory: combination of DevVAddr management and RAM backing mode unsupported"));
++ return IMG_FALSE;
++ }
++
++
++
++
++ if(psBMHeap->ui32Attribs
++ & (PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG
++ |PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG))
++ {
++
++ pArena = psBMHeap->pImportArena;
++ }
++ else
++ {
++ PVR_DPF ((PVR_DBG_ERROR, "AllocMemory: backing store type doesn't match heap"));
++ return IMG_FALSE;
++ }
++
++
++ if (!RA_Alloc(pArena,
++ uSize,
++ IMG_NULL,
++ (IMG_VOID*) &pMapping,
++ uFlags,
++ uDevVAddrAlignment,
++ 0,
++ (IMG_UINTPTR_T *)&(pBuf->DevVAddr.uiAddr)))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "AllocMemory: RA_Alloc(0x%x) FAILED", uSize));
++ return IMG_FALSE;
++ }
++
++ uOffset = pBuf->DevVAddr.uiAddr - pMapping->DevVAddr.uiAddr;
++ if(pMapping->CpuVAddr)
++ {
++ pBuf->CpuVAddr = (IMG_VOID*) ((IMG_UINTPTR_T)pMapping->CpuVAddr + uOffset);
++ }
++ else
++ {
++ pBuf->CpuVAddr = IMG_NULL;
++ }
++
++ if(uSize == pMapping->uSize)
++ {
++ pBuf->hOSMemHandle = pMapping->hOSMemHandle;
++ }
++ else
++ {
++ if(OSGetSubMemHandle(pMapping->hOSMemHandle,
++ uOffset,
++ uSize,
++ psBMHeap->ui32Attribs,
++ &pBuf->hOSMemHandle)!=PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "AllocMemory: OSGetSubMemHandle FAILED"));
++ return IMG_FALSE;
++ }
++ }
++
++
++ pBuf->CpuPAddr.uiAddr = pMapping->CpuPAddr.uiAddr + uOffset;
++
++ if(uFlags & PVRSRV_MEM_ZERO)
++ {
++ if(!ZeroBuf(pBuf, pMapping, uSize, psBMHeap->ui32Attribs | uFlags))
++ {
++ return IMG_FALSE;
++ }
++ }
++ }
++ else
++ {
++ if(uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR)
++ {
++
++ PVR_ASSERT(psDevVAddr != IMG_NULL);
++
++ if (psDevVAddr == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "AllocMemory: invalid parameter - psDevVAddr"));
++ return IMG_FALSE;
++ }
++
++
++ pBMContext->psDeviceNode->pfnMMUAlloc (psBMHeap->pMMUHeap,
++ uSize,
++ IMG_NULL,
++ PVRSRV_MEM_USER_SUPPLIED_DEVVADDR,
++ uDevVAddrAlignment,
++ psDevVAddr);
++
++
++ pBuf->DevVAddr = *psDevVAddr;
++ }
++ else
++ {
++
++
++
++ pBMContext->psDeviceNode->pfnMMUAlloc (psBMHeap->pMMUHeap,
++ uSize,
++ IMG_NULL,
++ 0,
++ uDevVAddrAlignment,
++ &pBuf->DevVAddr);
++ }
++
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof (struct _BM_MAPPING_),
++ (IMG_PVOID *)&pMapping, IMG_NULL,
++ "Buffer Manager Mapping") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "AllocMemory: OSAllocMem(0x%x) FAILED"));
++ return IMG_FALSE;
++ }
++
++
++ pBuf->CpuVAddr = IMG_NULL;
++ pBuf->hOSMemHandle = 0;
++ pBuf->CpuPAddr.uiAddr = 0;
++
++
++ pMapping->CpuVAddr = IMG_NULL;
++ pMapping->CpuPAddr.uiAddr = 0;
++ pMapping->DevVAddr = pBuf->DevVAddr;
++ pMapping->psSysAddr = IMG_NULL;
++ pMapping->uSize = uSize;
++ pMapping->hOSMemHandle = 0;
++ }
++
++
++ pMapping->pArena = pArena;
++
++
++ pMapping->pBMHeap = psBMHeap;
++ pBuf->pMapping = pMapping;
++
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "AllocMemory: pMapping=%08X: DevV=%08X CpuV=%08X CpuP=%08X uSize=0x%x",
++ pMapping,
++ pMapping->DevVAddr.uiAddr,
++ pMapping->CpuVAddr,
++ pMapping->CpuPAddr.uiAddr,
++ pMapping->uSize));
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "AllocMemory: pBuf=%08X: DevV=%08X CpuV=%08X CpuP=%08X uSize=0x%x",
++ pBuf,
++ pBuf->DevVAddr.uiAddr,
++ pBuf->CpuVAddr,
++ pBuf->CpuPAddr.uiAddr,
++ uSize));
++
++
++ PVR_ASSERT(((pBuf->DevVAddr.uiAddr) & (uDevVAddrAlignment - 1)) == 0);
++
++ return IMG_TRUE;
++}
++
++
++static IMG_BOOL
++WrapMemory (BM_HEAP *psBMHeap,
++ IMG_SIZE_T uSize,
++ IMG_SIZE_T ui32BaseOffset,
++ IMG_BOOL bPhysContig,
++ IMG_SYS_PHYADDR *psAddr,
++ IMG_VOID *pvCPUVAddr,
++ IMG_UINT32 uFlags,
++ BM_BUF *pBuf)
++{
++ IMG_DEV_VIRTADDR DevVAddr = {0};
++ BM_MAPPING *pMapping;
++ IMG_BOOL bResult;
++ IMG_SIZE_T const ui32PageSize = HOST_PAGESIZE();
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "WrapMemory(psBMHeap=%08X, size=0x%x, offset=0x%x, bPhysContig=0x%x, pvCPUVAddr = 0x%x, flags=0x%x, pBuf=%08X)",
++ psBMHeap, uSize, ui32BaseOffset, bPhysContig, pvCPUVAddr, uFlags, pBuf));
++
++ PVR_ASSERT((psAddr->uiAddr & (ui32PageSize - 1)) == 0);
++
++ PVR_ASSERT(((IMG_UINTPTR_T)pvCPUVAddr & (ui32PageSize - 1)) == 0);
++
++ uSize += ui32BaseOffset;
++ uSize = HOST_PAGEALIGN (uSize);
++
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(*pMapping),
++ (IMG_PVOID *)&pMapping, IMG_NULL,
++ "Mocked-up mapping") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSAllocMem(0x%x) FAILED",sizeof(*pMapping)));
++ return IMG_FALSE;
++ }
++
++ OSMemSet(pMapping, 0, sizeof (*pMapping));
++
++ pMapping->uSize = uSize;
++ pMapping->pBMHeap = psBMHeap;
++
++ if(pvCPUVAddr)
++ {
++ pMapping->CpuVAddr = pvCPUVAddr;
++
++ if (bPhysContig)
++ {
++ pMapping->eCpuMemoryOrigin = hm_wrapped_virtaddr;
++ pMapping->CpuPAddr = SysSysPAddrToCpuPAddr(psAddr[0]);
++
++ if(OSRegisterMem(pMapping->CpuPAddr,
++ pMapping->CpuVAddr,
++ pMapping->uSize,
++ uFlags,
++ &pMapping->hOSMemHandle) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSRegisterMem Phys=0x%08X, CpuVAddr = 0x%08X, Size=%d) failed",
++ pMapping->CpuPAddr, pMapping->CpuVAddr, pMapping->uSize));
++ goto fail_cleanup;
++ }
++ }
++ else
++ {
++ pMapping->eCpuMemoryOrigin = hm_wrapped_scatter_virtaddr;
++ pMapping->psSysAddr = psAddr;
++
++ if(OSRegisterDiscontigMem(pMapping->psSysAddr,
++ pMapping->CpuVAddr,
++ pMapping->uSize,
++ uFlags,
++ &pMapping->hOSMemHandle) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSRegisterDiscontigMem CpuVAddr = 0x%08X, Size=%d) failed",
++ pMapping->CpuVAddr, pMapping->uSize));
++ goto fail_cleanup;
++ }
++ }
++ }
++ else
++ {
++ if (bPhysContig)
++ {
++ pMapping->eCpuMemoryOrigin = hm_wrapped;
++ pMapping->CpuPAddr = SysSysPAddrToCpuPAddr(psAddr[0]);
++
++ if(OSReservePhys(pMapping->CpuPAddr,
++ pMapping->uSize,
++ uFlags,
++ &pMapping->CpuVAddr,
++ &pMapping->hOSMemHandle) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSReservePhys Phys=0x%08X, Size=%d) failed",
++ pMapping->CpuPAddr, pMapping->uSize));
++ goto fail_cleanup;
++ }
++ }
++ else
++ {
++ pMapping->eCpuMemoryOrigin = hm_wrapped_scatter;
++ pMapping->psSysAddr = psAddr;
++
++ if(OSReserveDiscontigPhys(pMapping->psSysAddr,
++ pMapping->uSize,
++ uFlags,
++ &pMapping->CpuVAddr,
++ &pMapping->hOSMemHandle) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSReserveDiscontigPhys Size=%d) failed",
++ pMapping->uSize));
++ goto fail_cleanup;
++ }
++ }
++ }
++
++
++ bResult = DevMemoryAlloc(psBMHeap->pBMContext,
++ pMapping,
++ IMG_NULL,
++ uFlags | PVRSRV_MEM_READ | PVRSRV_MEM_WRITE,
++ IMG_CAST_TO_DEVVADDR_UINT(ui32PageSize),
++ &DevVAddr);
++ if (!bResult)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "WrapMemory: DevMemoryAlloc(0x%x) failed",
++ pMapping->uSize));
++ goto fail_cleanup;
++ }
++
++
++ pBuf->CpuPAddr.uiAddr = pMapping->CpuPAddr.uiAddr + ui32BaseOffset;
++ if(!ui32BaseOffset)
++ {
++ pBuf->hOSMemHandle = pMapping->hOSMemHandle;
++ }
++ else
++ {
++ if(OSGetSubMemHandle(pMapping->hOSMemHandle,
++ ui32BaseOffset,
++ (pMapping->uSize-ui32BaseOffset),
++ uFlags,
++ &pBuf->hOSMemHandle)!=PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSGetSubMemHandle failed"));
++ goto fail_cleanup;
++ }
++ }
++ if(pMapping->CpuVAddr)
++ {
++ pBuf->CpuVAddr = (IMG_VOID*) ((IMG_UINTPTR_T)pMapping->CpuVAddr + ui32BaseOffset);
++ }
++ pBuf->DevVAddr.uiAddr = pMapping->DevVAddr.uiAddr + IMG_CAST_TO_DEVVADDR_UINT(ui32BaseOffset);
++
++ if(uFlags & PVRSRV_MEM_ZERO)
++ {
++ if(!ZeroBuf(pBuf, pMapping, uSize, uFlags))
++ {
++ return IMG_FALSE;
++ }
++ }
++
++ PVR_DPF ((PVR_DBG_MESSAGE, "DevVaddr.uiAddr=%08X", DevVAddr.uiAddr));
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "WrapMemory: pMapping=%08X: DevV=%08X CpuV=%08X CpuP=%08X uSize=0x%x",
++ pMapping, pMapping->DevVAddr.uiAddr,
++ pMapping->CpuVAddr, pMapping->CpuPAddr.uiAddr, pMapping->uSize));
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "WrapMemory: pBuf=%08X: DevV=%08X CpuV=%08X CpuP=%08X uSize=0x%x",
++ pBuf, pBuf->DevVAddr.uiAddr,
++ pBuf->CpuVAddr, pBuf->CpuPAddr.uiAddr, uSize));
++
++ pBuf->pMapping = pMapping;
++ return IMG_TRUE;
++
++fail_cleanup:
++ if(ui32BaseOffset && pBuf->hOSMemHandle)
++ {
++ OSReleaseSubMemHandle(pBuf->hOSMemHandle, uFlags);
++ }
++
++ if(pMapping && (pMapping->CpuVAddr || pMapping->hOSMemHandle))
++ {
++ switch(pMapping->eCpuMemoryOrigin)
++ {
++ case hm_wrapped:
++ OSUnReservePhys(pMapping->CpuVAddr, pMapping->uSize, uFlags, pMapping->hOSMemHandle);
++ break;
++ case hm_wrapped_virtaddr:
++ OSUnRegisterMem(pMapping->CpuVAddr, pMapping->uSize, uFlags, pMapping->hOSMemHandle);
++ break;
++ case hm_wrapped_scatter:
++ OSUnReserveDiscontigPhys(pMapping->CpuVAddr, pMapping->uSize, uFlags, pMapping->hOSMemHandle);
++ break;
++ case hm_wrapped_scatter_virtaddr:
++ OSUnRegisterDiscontigMem(pMapping->CpuVAddr, pMapping->uSize, uFlags, pMapping->hOSMemHandle);
++ break;
++ default:
++ break;
++ }
++
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), pMapping, IMG_NULL);
++
++
++ return IMG_FALSE;
++}
++
++
++static IMG_BOOL
++ZeroBuf(BM_BUF *pBuf, BM_MAPPING *pMapping, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags)
++{
++ IMG_VOID *pvCpuVAddr;
++
++ if(pBuf->CpuVAddr)
++ {
++ OSMemSet(pBuf->CpuVAddr, 0, ui32Bytes);
++ }
++ else if(pMapping->eCpuMemoryOrigin == hm_contiguous
++ || pMapping->eCpuMemoryOrigin == hm_wrapped)
++ {
++ pvCpuVAddr = OSMapPhysToLin(pBuf->CpuPAddr,
++ ui32Bytes,
++ PVRSRV_HAP_KERNEL_ONLY
++ | (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK),
++ IMG_NULL);
++ if(!pvCpuVAddr)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "ZeroBuf: OSMapPhysToLin for contiguous buffer failed"));
++ return IMG_FALSE;
++ }
++ OSMemSet(pvCpuVAddr, 0, ui32Bytes);
++ OSUnMapPhysToLin(pvCpuVAddr,
++ ui32Bytes,
++ PVRSRV_HAP_KERNEL_ONLY
++ | (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK),
++ IMG_NULL);
++ }
++ else
++ {
++ IMG_SIZE_T ui32BytesRemaining = ui32Bytes;
++ IMG_SIZE_T ui32CurrentOffset = 0;
++ IMG_CPU_PHYADDR CpuPAddr;
++
++
++ PVR_ASSERT(pBuf->hOSMemHandle);
++
++ while(ui32BytesRemaining > 0)
++ {
++ IMG_SIZE_T ui32BlockBytes = MIN(ui32BytesRemaining, HOST_PAGESIZE());
++ CpuPAddr = OSMemHandleToCpuPAddr(pBuf->hOSMemHandle, ui32CurrentOffset);
++
++ if(CpuPAddr.uiAddr & (HOST_PAGESIZE() -1))
++ {
++ ui32BlockBytes =
++ MIN(ui32BytesRemaining, HOST_PAGEALIGN(CpuPAddr.uiAddr) - CpuPAddr.uiAddr);
++ }
++
++ pvCpuVAddr = OSMapPhysToLin(CpuPAddr,
++ ui32BlockBytes,
++ PVRSRV_HAP_KERNEL_ONLY
++ | (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK),
++ IMG_NULL);
++ if(!pvCpuVAddr)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "ZeroBuf: OSMapPhysToLin while zeroing non-contiguous memory FAILED"));
++ return IMG_FALSE;
++ }
++ OSMemSet(pvCpuVAddr, 0, ui32BlockBytes);
++ OSUnMapPhysToLin(pvCpuVAddr,
++ ui32BlockBytes,
++ PVRSRV_HAP_KERNEL_ONLY
++ | (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK),
++ IMG_NULL);
++
++ ui32BytesRemaining -= ui32BlockBytes;
++ ui32CurrentOffset += ui32BlockBytes;
++ }
++ }
++
++ return IMG_TRUE;
++}
++
++static IMG_VOID
++FreeBuf (BM_BUF *pBuf, IMG_UINT32 ui32Flags, IMG_BOOL bFromAllocator)
++{
++ BM_MAPPING *pMapping;
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "FreeBuf: pBuf=%08X: DevVAddr=%08X CpuVAddr=%08X CpuPAddr=%08X",
++ pBuf, pBuf->DevVAddr.uiAddr, pBuf->CpuVAddr, pBuf->CpuPAddr.uiAddr));
++
++
++ pMapping = pBuf->pMapping;
++
++ if(ui32Flags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR)
++ {
++ if ((pBuf->ui32ExportCount == 0) && (pBuf->ui32RefCount == 0))
++ {
++
++ if(ui32Flags & PVRSRV_MEM_RAM_BACKED_ALLOCATION)
++ {
++
++ PVR_DPF ((PVR_DBG_ERROR, "FreeBuf: combination of DevVAddr management and RAM backing mode unsupported"));
++ }
++ else
++ {
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), pMapping, IMG_NULL);
++ pBuf->pMapping = IMG_NULL;
++ }
++ }
++ }
++ else
++ {
++
++ if(pBuf->hOSMemHandle != pMapping->hOSMemHandle)
++ {
++ if ((pBuf->ui32ExportCount == 0) && (pBuf->ui32RefCount == 0))
++ {
++
++ OSReleaseSubMemHandle(pBuf->hOSMemHandle, ui32Flags);
++ }
++ }
++ if(ui32Flags & PVRSRV_MEM_RAM_BACKED_ALLOCATION)
++ {
++
++ if ((pBuf->ui32ExportCount == 0) && (pBuf->ui32RefCount == 0))
++ {
++
++
++
++ PVR_ASSERT(pBuf->ui32ExportCount == 0);
++ RA_Free (pBuf->pMapping->pArena, pBuf->DevVAddr.uiAddr, IMG_FALSE);
++ }
++ }
++ else
++ {
++ if ((pBuf->ui32ExportCount == 0) && (pBuf->ui32RefCount == 0))
++ {
++ switch (pMapping->eCpuMemoryOrigin)
++ {
++ case hm_wrapped:
++ OSUnReservePhys(pMapping->CpuVAddr, pMapping->uSize, ui32Flags, pMapping->hOSMemHandle);
++ break;
++ case hm_wrapped_virtaddr:
++ OSUnRegisterMem(pMapping->CpuVAddr, pMapping->uSize, ui32Flags, pMapping->hOSMemHandle);
++ break;
++ case hm_wrapped_scatter:
++ OSUnReserveDiscontigPhys(pMapping->CpuVAddr, pMapping->uSize, ui32Flags, pMapping->hOSMemHandle);
++ break;
++ case hm_wrapped_scatter_virtaddr:
++ OSUnRegisterDiscontigMem(pMapping->CpuVAddr, pMapping->uSize, ui32Flags, pMapping->hOSMemHandle);
++ break;
++ default:
++ break;
++ }
++ }
++
++ if (bFromAllocator)
++ DevMemoryFree (pMapping);
++
++ if ((pBuf->ui32ExportCount == 0) && (pBuf->ui32RefCount == 0))
++ {
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), pMapping, IMG_NULL);
++ pBuf->pMapping = IMG_NULL;
++ }
++ }
++
++ if ((pBuf->ui32ExportCount == 0) && (pBuf->ui32RefCount == 0))
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_BUF), pBuf, IMG_NULL);
++
++ }
++ }
++}
++
++PVRSRV_ERROR BM_DestroyContext_AnyCb(BM_HEAP *psBMHeap)
++{
++ if(psBMHeap->ui32Attribs
++ & (PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG
++ |PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG))
++ {
++ if (psBMHeap->pImportArena)
++ {
++ IMG_BOOL bTestDelete = RA_TestDelete(psBMHeap->pImportArena);
++ if (!bTestDelete)
++ {
++ PVR_DPF ((PVR_DBG_ERROR, "BM_DestroyContext_AnyCb: RA_TestDelete failed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ }
++ }
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR
++BM_DestroyContext(IMG_HANDLE hBMContext,
++ IMG_BOOL *pbDestroyed)
++{
++ PVRSRV_ERROR eError;
++ BM_CONTEXT *pBMContext = (BM_CONTEXT*)hBMContext;
++
++ PVR_DPF ((PVR_DBG_MESSAGE, "BM_DestroyContext"));
++
++ if (pbDestroyed != IMG_NULL)
++ {
++ *pbDestroyed = IMG_FALSE;
++ }
++
++
++
++ if (pBMContext == IMG_NULL)
++ {
++ PVR_DPF ((PVR_DBG_ERROR, "BM_DestroyContext: Invalid handle"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ pBMContext->ui32RefCount--;
++
++ if (pBMContext->ui32RefCount > 0)
++ {
++
++ return PVRSRV_OK;
++ }
++
++
++
++
++ eError = List_BM_HEAP_PVRSRV_ERROR_Any(pBMContext->psBMHeap, BM_DestroyContext_AnyCb);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF ((PVR_DBG_ERROR, "BM_DestroyContext: List_BM_HEAP_PVRSRV_ERROR_Any failed"));
++#if 0
++
++
++
++
++ PVR_DPF ((PVR_DBG_ERROR, "BM_DestroyContext: Cleaning up with ResManFreeSpecial"));
++ if(ResManFreeSpecial() != PVRSRV_OK)
++ {
++ PVR_DPF ((PVR_DBG_ERROR, "BM_DestroyContext: ResManFreeSpecial failed %d",eError));
++ }
++
++#endif
++ return eError;
++ }
++ else
++ {
++
++ eError = ResManFreeResByPtr(pBMContext->hResItem);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF ((PVR_DBG_ERROR, "BM_DestroyContext: ResManFreeResByPtr failed %d",eError));
++ return eError;
++ }
++
++
++ if (pbDestroyed != IMG_NULL)
++ {
++ *pbDestroyed = IMG_TRUE;
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR BM_DestroyContextCallBack_AnyVaCb(BM_HEAP *psBMHeap, va_list va)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ psDeviceNode = va_arg(va, PVRSRV_DEVICE_NODE*);
++
++
++ if(psBMHeap->ui32Attribs
++ & (PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG
++ |PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG))
++ {
++ if (psBMHeap->pImportArena)
++ {
++ RA_Delete (psBMHeap->pImportArena);
++ }
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_DestroyContext: backing store type unsupported"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++
++ psDeviceNode->pfnMMUDelete(psBMHeap->pMMUHeap);
++
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_HEAP), psBMHeap, IMG_NULL);
++
++
++ return PVRSRV_OK;
++}
++
++
++static PVRSRV_ERROR BM_DestroyContextCallBack(IMG_PVOID pvParam,
++ IMG_UINT32 ui32Param)
++{
++ BM_CONTEXT *pBMContext = pvParam;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++
++
++ psDeviceNode = pBMContext->psDeviceNode;
++
++
++
++ if(List_BM_HEAP_PVRSRV_ERROR_Any_va(pBMContext->psBMHeap,
++ BM_DestroyContextCallBack_AnyVaCb,
++ psDeviceNode) != PVRSRV_OK)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++
++ if (pBMContext->psMMUContext)
++ {
++ psDeviceNode->pfnMMUFinalise(pBMContext->psMMUContext);
++ }
++
++
++
++ if (pBMContext->pBufferHash)
++ {
++ HASH_Delete(pBMContext->pBufferHash);
++ }
++
++ if (pBMContext == psDeviceNode->sDevMemoryInfo.pBMKernelContext)
++ {
++
++ psDeviceNode->sDevMemoryInfo.pBMKernelContext = IMG_NULL;
++ }
++ else
++ {
++
++ List_BM_CONTEXT_Remove(pBMContext);
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_CONTEXT), pBMContext, IMG_NULL);
++
++
++ return PVRSRV_OK;
++}
++
++
++IMG_HANDLE BM_CreateContext_IncRefCount_AnyVaCb(BM_CONTEXT *pBMContext, va_list va)
++{
++ PRESMAN_CONTEXT hResManContext;
++ hResManContext = va_arg(va, PRESMAN_CONTEXT);
++ if(ResManFindResourceByPtr(hResManContext, pBMContext->hResItem) == PVRSRV_OK)
++ {
++
++ pBMContext->ui32RefCount++;
++ return pBMContext;
++ }
++ return IMG_NULL;
++}
++
++IMG_VOID BM_CreateContext_InsertHeap_ForEachVaCb(BM_HEAP *psBMHeap, va_list va)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ BM_CONTEXT *pBMContext;
++ psDeviceNode = va_arg(va, PVRSRV_DEVICE_NODE*);
++ pBMContext = va_arg(va, BM_CONTEXT*);
++ switch(psBMHeap->sDevArena.DevMemHeapType)
++ {
++ case DEVICE_MEMORY_HEAP_SHARED:
++ case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
++ {
++
++ psDeviceNode->pfnMMUInsertHeap(pBMContext->psMMUContext, psBMHeap->pMMUHeap);
++ break;
++ }
++ }
++}
++
++IMG_HANDLE
++BM_CreateContext(PVRSRV_DEVICE_NODE *psDeviceNode,
++ IMG_DEV_PHYADDR *psPDDevPAddr,
++ PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_BOOL *pbCreated)
++{
++ BM_CONTEXT *pBMContext;
++ DEVICE_MEMORY_INFO *psDevMemoryInfo;
++ IMG_BOOL bKernelContext;
++ PRESMAN_CONTEXT hResManContext;
++
++ PVR_DPF((PVR_DBG_MESSAGE, "BM_CreateContext"));
++
++ if (psPerProc == IMG_NULL)
++ {
++ bKernelContext = IMG_TRUE;
++ hResManContext = psDeviceNode->hResManContext;
++ }
++ else
++ {
++ bKernelContext = IMG_FALSE;
++ hResManContext = psPerProc->hResManContext;
++ }
++
++ if (pbCreated != IMG_NULL)
++ {
++ *pbCreated = IMG_FALSE;
++ }
++
++
++ psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
++
++ if (bKernelContext == IMG_FALSE)
++ {
++ IMG_HANDLE res = (IMG_HANDLE) List_BM_CONTEXT_Any_va(psDevMemoryInfo->pBMContext,
++ BM_CreateContext_IncRefCount_AnyVaCb,
++ hResManContext);
++ if (res)
++ {
++ return res;
++ }
++ }
++
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof (struct _BM_CONTEXT_),
++ (IMG_PVOID *)&pBMContext, IMG_NULL,
++ "Buffer Manager Context") != PVRSRV_OK)
++ {
++ PVR_DPF ((PVR_DBG_ERROR, "BM_CreateContext: Alloc failed"));
++ return IMG_NULL;
++ }
++ OSMemSet(pBMContext, 0, sizeof (BM_CONTEXT));
++
++
++ pBMContext->psDeviceNode = psDeviceNode;
++
++
++
++ pBMContext->pBufferHash = HASH_Create(32);
++ if (pBMContext->pBufferHash==IMG_NULL)
++ {
++ PVR_DPF ((PVR_DBG_ERROR, "BM_CreateContext: HASH_Create failed"));
++ goto cleanup;
++ }
++
++ if(psDeviceNode->pfnMMUInitialise(psDeviceNode,
++ &pBMContext->psMMUContext,
++ psPDDevPAddr) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_CreateContext: MMUInitialise failed"));
++ goto cleanup;
++ }
++
++ if(bKernelContext)
++ {
++
++ PVR_ASSERT(psDevMemoryInfo->pBMKernelContext == IMG_NULL);
++ psDevMemoryInfo->pBMKernelContext = pBMContext;
++ }
++ else
++ {
++
++
++
++
++
++ PVR_ASSERT(psDevMemoryInfo->pBMKernelContext);
++
++ if (psDevMemoryInfo->pBMKernelContext == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_CreateContext: psDevMemoryInfo->pBMKernelContext invalid"));
++ goto cleanup;
++ }
++
++ PVR_ASSERT(psDevMemoryInfo->pBMKernelContext->psBMHeap);
++
++
++
++
++
++ pBMContext->psBMSharedHeap = psDevMemoryInfo->pBMKernelContext->psBMHeap;
++
++
++
++
++ List_BM_HEAP_ForEach_va(pBMContext->psBMSharedHeap,
++ BM_CreateContext_InsertHeap_ForEachVaCb,
++ psDeviceNode,
++ pBMContext);
++
++
++ List_BM_CONTEXT_Insert(&psDevMemoryInfo->pBMContext, pBMContext);
++ }
++
++
++ pBMContext->ui32RefCount++;
++
++
++ pBMContext->hResItem = ResManRegisterRes(hResManContext,
++ RESMAN_TYPE_DEVICEMEM_CONTEXT,
++ pBMContext,
++ 0,
++ BM_DestroyContextCallBack);
++ if (pBMContext->hResItem == IMG_NULL)
++ {
++ PVR_DPF ((PVR_DBG_ERROR, "BM_CreateContext: ResManRegisterRes failed"));
++ goto cleanup;
++ }
++
++ if (pbCreated != IMG_NULL)
++ {
++ *pbCreated = IMG_TRUE;
++ }
++ return (IMG_HANDLE)pBMContext;
++
++cleanup:
++ (IMG_VOID)BM_DestroyContextCallBack(pBMContext, 0);
++
++ return IMG_NULL;
++}
++
++
++IMG_VOID *BM_CreateHeap_AnyVaCb(BM_HEAP *psBMHeap, va_list va)
++{
++ DEVICE_MEMORY_HEAP_INFO *psDevMemHeapInfo;
++ psDevMemHeapInfo = va_arg(va, DEVICE_MEMORY_HEAP_INFO*);
++ if (psBMHeap->sDevArena.ui32HeapID == psDevMemHeapInfo->ui32HeapID)
++ {
++
++ return psBMHeap;
++ }
++ else
++ {
++ return IMG_NULL;
++ }
++}
++
++IMG_HANDLE
++BM_CreateHeap (IMG_HANDLE hBMContext,
++ DEVICE_MEMORY_HEAP_INFO *psDevMemHeapInfo)
++{
++ BM_CONTEXT *pBMContext = (BM_CONTEXT*)hBMContext;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ BM_HEAP *psBMHeap;
++
++ PVR_DPF((PVR_DBG_MESSAGE, "BM_CreateHeap"));
++
++ if(!pBMContext)
++ {
++ return IMG_NULL;
++ }
++
++ psDeviceNode = pBMContext->psDeviceNode;
++
++
++
++
++
++
++ if(pBMContext->ui32RefCount > 0)
++ {
++ psBMHeap = (BM_HEAP*)List_BM_HEAP_Any_va(pBMContext->psBMHeap,
++ BM_CreateHeap_AnyVaCb,
++ psDevMemHeapInfo);
++
++ if (psBMHeap)
++ {
++ return psBMHeap;
++ }
++ }
++
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof (BM_HEAP),
++ (IMG_PVOID *)&psBMHeap, IMG_NULL,
++ "Buffer Manager Heap") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_CreateHeap: Alloc failed"));
++ return IMG_NULL;
++ }
++
++ OSMemSet (psBMHeap, 0, sizeof (BM_HEAP));
++
++ psBMHeap->sDevArena.ui32HeapID = psDevMemHeapInfo->ui32HeapID;
++ psBMHeap->sDevArena.pszName = psDevMemHeapInfo->pszName;
++ psBMHeap->sDevArena.BaseDevVAddr = psDevMemHeapInfo->sDevVAddrBase;
++ psBMHeap->sDevArena.ui32Size = psDevMemHeapInfo->ui32HeapSize;
++ psBMHeap->sDevArena.DevMemHeapType = psDevMemHeapInfo->DevMemHeapType;
++ psBMHeap->sDevArena.ui32DataPageSize = psDevMemHeapInfo->ui32DataPageSize;
++ psBMHeap->sDevArena.psDeviceMemoryHeapInfo = psDevMemHeapInfo;
++ psBMHeap->ui32Attribs = psDevMemHeapInfo->ui32Attribs;
++
++
++ psBMHeap->pBMContext = pBMContext;
++
++ psBMHeap->pMMUHeap = psDeviceNode->pfnMMUCreate (pBMContext->psMMUContext,
++ &psBMHeap->sDevArena,
++ &psBMHeap->pVMArena);
++ if (!psBMHeap->pMMUHeap)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_CreateHeap: MMUCreate failed"));
++ goto ErrorExit;
++ }
++
++
++ psBMHeap->pImportArena = RA_Create (psDevMemHeapInfo->pszBSName,
++ 0, 0, IMG_NULL,
++ psBMHeap->sDevArena.ui32DataPageSize,
++ BM_ImportMemory,
++ BM_FreeMemory,
++ IMG_NULL,
++ psBMHeap);
++ if(psBMHeap->pImportArena == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_CreateHeap: RA_Create failed"));
++ goto ErrorExit;
++ }
++
++ if(psBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG)
++ {
++
++
++
++
++ psBMHeap->pLocalDevMemArena = psDevMemHeapInfo->psLocalDevMemArena;
++ if(psBMHeap->pLocalDevMemArena == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_CreateHeap: LocalDevMemArena null"));
++ goto ErrorExit;
++ }
++ }
++
++
++ List_BM_HEAP_Insert(&pBMContext->psBMHeap, psBMHeap);
++
++ return (IMG_HANDLE)psBMHeap;
++
++
++ErrorExit:
++
++
++ if (psBMHeap->pMMUHeap != IMG_NULL)
++ {
++ psDeviceNode->pfnMMUDelete (psBMHeap->pMMUHeap);
++ psDeviceNode->pfnMMUFinalise (pBMContext->psMMUContext);
++ }
++
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_HEAP), psBMHeap, IMG_NULL);
++
++
++ return IMG_NULL;
++}
++
++IMG_VOID
++BM_DestroyHeap (IMG_HANDLE hDevMemHeap)
++{
++ BM_HEAP* psBMHeap = (BM_HEAP*)hDevMemHeap;
++ PVRSRV_DEVICE_NODE *psDeviceNode = psBMHeap->pBMContext->psDeviceNode;
++
++ PVR_DPF((PVR_DBG_MESSAGE, "BM_DestroyHeap"));
++
++ if(psBMHeap)
++ {
++
++ if(psBMHeap->ui32Attribs
++ & (PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG
++ |PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG))
++ {
++ if (psBMHeap->pImportArena)
++ {
++ RA_Delete (psBMHeap->pImportArena);
++ }
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_DestroyHeap: backing store type unsupported"));
++ return;
++ }
++
++
++ psDeviceNode->pfnMMUDelete (psBMHeap->pMMUHeap);
++
++
++ List_BM_HEAP_Remove(psBMHeap);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_HEAP), psBMHeap, IMG_NULL);
++
++ }
++ else
++ {
++ PVR_DPF ((PVR_DBG_ERROR, "BM_DestroyHeap: invalid heap handle"));
++ }
++}
++
++
++IMG_BOOL
++BM_Reinitialise (PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++
++ PVR_DPF((PVR_DBG_MESSAGE, "BM_Reinitialise"));
++ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
++
++
++ return IMG_TRUE;
++}
++
++IMG_BOOL
++BM_Alloc ( IMG_HANDLE hDevMemHeap,
++ IMG_DEV_VIRTADDR *psDevVAddr,
++ IMG_SIZE_T uSize,
++ IMG_UINT32 *pui32Flags,
++ IMG_UINT32 uDevVAddrAlignment,
++ BM_HANDLE *phBuf)
++{
++ BM_BUF *pBuf;
++ BM_CONTEXT *pBMContext;
++ BM_HEAP *psBMHeap;
++ SYS_DATA *psSysData;
++ IMG_UINT32 uFlags;
++
++ if (pui32Flags == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_Alloc: invalid parameter"));
++ PVR_DBG_BREAK;
++ return IMG_FALSE;
++ }
++
++ uFlags = *pui32Flags;
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "BM_Alloc (uSize=0x%x, uFlags=0x%x, uDevVAddrAlignment=0x%x)",
++ uSize, uFlags, uDevVAddrAlignment));
++
++ SysAcquireData(&psSysData);
++
++ psBMHeap = (BM_HEAP*)hDevMemHeap;
++ pBMContext = psBMHeap->pBMContext;
++
++ if(uDevVAddrAlignment == 0)
++ {
++ uDevVAddrAlignment = 1;
++ }
++
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof (BM_BUF),
++ (IMG_PVOID *)&pBuf, IMG_NULL,
++ "Buffer Manager buffer") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_Alloc: BM_Buf alloc FAILED"));
++ return IMG_FALSE;
++ }
++ OSMemSet(pBuf, 0, sizeof (BM_BUF));
++
++
++ if (AllocMemory(pBMContext,
++ psBMHeap,
++ psDevVAddr,
++ uSize,
++ uFlags,
++ uDevVAddrAlignment,
++ pBuf) != IMG_TRUE)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof (BM_BUF), pBuf, IMG_NULL);
++
++ PVR_DPF((PVR_DBG_ERROR, "BM_Alloc: AllocMemory FAILED"));
++ return IMG_FALSE;
++ }
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "BM_Alloc (uSize=0x%x, uFlags=0x%x)=%08X",
++ uSize, uFlags, pBuf));
++
++
++ pBuf->ui32RefCount = 1;
++ *phBuf = (BM_HANDLE)pBuf;
++ *pui32Flags = uFlags | psBMHeap->ui32Attribs;
++
++
++ if(uFlags & PVRSRV_HAP_CACHETYPE_MASK)
++ {
++ *pui32Flags &= ~PVRSRV_HAP_CACHETYPE_MASK;
++ *pui32Flags |= (uFlags & PVRSRV_HAP_CACHETYPE_MASK);
++ }
++
++ return IMG_TRUE;
++}
++
++
++
++#if defined(PVR_LMA)
++static IMG_BOOL
++ValidSysPAddrArrayForDev(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_SYS_PHYADDR *psSysPAddr, IMG_UINT32 ui32PageCount, IMG_SIZE_T ui32PageSize)
++{
++ IMG_UINT32 i;
++
++ for (i = 0; i < ui32PageCount; i++)
++ {
++ IMG_SYS_PHYADDR sStartSysPAddr = psSysPAddr[i];
++ IMG_SYS_PHYADDR sEndSysPAddr;
++
++ if (!SysVerifySysPAddrToDevPAddr(psDeviceNode->sDevId.eDeviceType, sStartSysPAddr))
++ {
++ return IMG_FALSE;
++ }
++
++ sEndSysPAddr.uiAddr = sStartSysPAddr.uiAddr + ui32PageSize;
++
++ if (!SysVerifySysPAddrToDevPAddr(psDeviceNode->sDevId.eDeviceType, sEndSysPAddr))
++ {
++ return IMG_FALSE;
++ }
++ }
++
++ return IMG_TRUE;
++}
++
++static IMG_BOOL
++ValidSysPAddrRangeForDev(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_SYS_PHYADDR sStartSysPAddr, IMG_SIZE_T ui32Range)
++{
++ IMG_SYS_PHYADDR sEndSysPAddr;
++
++ if (!SysVerifySysPAddrToDevPAddr(psDeviceNode->sDevId.eDeviceType, sStartSysPAddr))
++ {
++ return IMG_FALSE;
++ }
++
++ sEndSysPAddr.uiAddr = sStartSysPAddr.uiAddr + ui32Range;
++
++ if (!SysVerifySysPAddrToDevPAddr(psDeviceNode->sDevId.eDeviceType, sEndSysPAddr))
++ {
++ return IMG_FALSE;
++ }
++
++ return IMG_TRUE;
++}
++
++#define WRAP_MAPPING_SIZE(ui32ByteSize, ui32PageOffset) HOST_PAGEALIGN((ui32ByteSize) + (ui32PageOffset))
++
++#define WRAP_PAGE_COUNT(ui32ByteSize, ui32PageOffset, ui32HostPageSize) (WRAP_MAPPING_SIZE(ui32ByteSize, ui32PageOffset) / (ui32HostPageSize))
++
++#endif
++
++
++IMG_BOOL
++BM_Wrap ( IMG_HANDLE hDevMemHeap,
++ IMG_SIZE_T ui32Size,
++ IMG_SIZE_T ui32Offset,
++ IMG_BOOL bPhysContig,
++ IMG_SYS_PHYADDR *psSysAddr,
++ IMG_VOID *pvCPUVAddr,
++ IMG_UINT32 *pui32Flags,
++ BM_HANDLE *phBuf)
++{
++ BM_BUF *pBuf;
++ BM_CONTEXT *psBMContext;
++ BM_HEAP *psBMHeap;
++ SYS_DATA *psSysData;
++ IMG_SYS_PHYADDR sHashAddress;
++ IMG_UINT32 uFlags;
++
++ psBMHeap = (BM_HEAP*)hDevMemHeap;
++ psBMContext = psBMHeap->pBMContext;
++
++ uFlags = psBMHeap->ui32Attribs & (PVRSRV_HAP_CACHETYPE_MASK | PVRSRV_HAP_MAPTYPE_MASK);
++
++ if ((pui32Flags != IMG_NULL) && ((*pui32Flags & PVRSRV_HAP_CACHETYPE_MASK) != 0))
++ {
++ uFlags &= ~PVRSRV_HAP_CACHETYPE_MASK;
++ uFlags |= *pui32Flags & PVRSRV_HAP_CACHETYPE_MASK;
++ }
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "BM_Wrap (uSize=0x%x, uOffset=0x%x, bPhysContig=0x%x, pvCPUVAddr=0x%x, uFlags=0x%x)",
++ ui32Size, ui32Offset, bPhysContig, pvCPUVAddr, uFlags));
++
++ SysAcquireData(&psSysData);
++
++#if defined(PVR_LMA)
++ if (bPhysContig)
++ {
++ if (!ValidSysPAddrRangeForDev(psBMContext->psDeviceNode, *psSysAddr, WRAP_MAPPING_SIZE(ui32Size, ui32Offset)))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: System address range invalid for device"));
++ return IMG_FALSE;
++ }
++ }
++ else
++ {
++ IMG_SIZE_T ui32HostPageSize = HOST_PAGESIZE();
++
++ if (!ValidSysPAddrArrayForDev(psBMContext->psDeviceNode, psSysAddr, WRAP_PAGE_COUNT(ui32Size, ui32Offset, ui32HostPageSize), ui32HostPageSize))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: Array of system addresses invalid for device"));
++ return IMG_FALSE;
++ }
++ }
++#endif
++
++ sHashAddress = psSysAddr[0];
++
++
++ sHashAddress.uiAddr += ui32Offset;
++
++
++ pBuf = (BM_BUF *)HASH_Retrieve(psBMContext->pBufferHash, (IMG_UINTPTR_T) sHashAddress.uiAddr);
++
++ if(pBuf)
++ {
++ IMG_SIZE_T ui32MappingSize = HOST_PAGEALIGN (ui32Size + ui32Offset);
++
++
++ if(pBuf->pMapping->uSize == ui32MappingSize && (pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped ||
++ pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped_virtaddr))
++ {
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "BM_Wrap (Matched previous Wrap! uSize=0x%x, uOffset=0x%x, SysAddr=%08X)",
++ ui32Size, ui32Offset, sHashAddress.uiAddr));
++
++ pBuf->ui32RefCount++;
++ *phBuf = (BM_HANDLE)pBuf;
++ if(pui32Flags)
++ *pui32Flags = uFlags;
++
++ return IMG_TRUE;
++ }
++ }
++
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof (BM_BUF),
++ (IMG_PVOID *)&pBuf, IMG_NULL,
++ "Buffer Manager buffer") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: BM_Buf alloc FAILED"));
++ return IMG_FALSE;
++ }
++ OSMemSet(pBuf, 0, sizeof (BM_BUF));
++
++
++ if (WrapMemory (psBMHeap, ui32Size, ui32Offset, bPhysContig, psSysAddr, pvCPUVAddr, uFlags, pBuf) != IMG_TRUE)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: WrapMemory FAILED"));
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof (BM_BUF), pBuf, IMG_NULL);
++
++ return IMG_FALSE;
++ }
++
++
++ if(pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped || pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped_virtaddr)
++ {
++
++ PVR_ASSERT(SysSysPAddrToCpuPAddr(sHashAddress).uiAddr == pBuf->CpuPAddr.uiAddr);
++
++ if (!HASH_Insert (psBMContext->pBufferHash, (IMG_UINTPTR_T) sHashAddress.uiAddr, (IMG_UINTPTR_T)pBuf))
++ {
++ FreeBuf (pBuf, uFlags, IMG_TRUE);
++ PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: HASH_Insert FAILED"));
++ return IMG_FALSE;
++ }
++ }
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "BM_Wrap (uSize=0x%x, uFlags=0x%x)=%08X(devVAddr=%08X)",
++ ui32Size, uFlags, pBuf, pBuf->DevVAddr.uiAddr));
++
++
++ pBuf->ui32RefCount = 1;
++ *phBuf = (BM_HANDLE)pBuf;
++ if(pui32Flags)
++ {
++
++ *pui32Flags = (uFlags & ~PVRSRV_HAP_MAPTYPE_MASK) | PVRSRV_HAP_MULTI_PROCESS;
++ }
++
++ return IMG_TRUE;
++}
++
++IMG_VOID BM_Export (BM_HANDLE hBuf)
++{
++ BM_BUF *pBuf = (BM_BUF *)hBuf;
++
++ pBuf->ui32ExportCount++;
++}
++
++IMG_VOID BM_FreeExport (BM_HANDLE hBuf, IMG_UINT32 ui32Flags)
++{
++ BM_BUF *pBuf = (BM_BUF *)hBuf;
++
++ pBuf->ui32ExportCount--;
++ FreeBuf (pBuf, ui32Flags, IMG_FALSE);
++}
++
++IMG_VOID
++BM_Free (BM_HANDLE hBuf,
++ IMG_UINT32 ui32Flags)
++{
++ BM_BUF *pBuf = (BM_BUF *)hBuf;
++ SYS_DATA *psSysData;
++ IMG_SYS_PHYADDR sHashAddr;
++
++ PVR_DPF ((PVR_DBG_MESSAGE, "BM_Free (h=%08X)", hBuf));
++ PVR_ASSERT (pBuf!=IMG_NULL);
++
++ if (pBuf == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_Free: invalid parameter"));
++ return;
++ }
++
++ SysAcquireData(&psSysData);
++
++ pBuf->ui32RefCount--;
++
++ if(pBuf->ui32RefCount == 0)
++ {
++ if(pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped || pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped_virtaddr)
++ {
++ sHashAddr = SysCpuPAddrToSysPAddr(pBuf->CpuPAddr);
++
++ HASH_Remove (pBuf->pMapping->pBMHeap->pBMContext->pBufferHash, (IMG_UINTPTR_T)sHashAddr.uiAddr);
++ }
++ FreeBuf (pBuf, ui32Flags, IMG_TRUE);
++ }
++}
++
++
++IMG_CPU_VIRTADDR
++BM_HandleToCpuVaddr (BM_HANDLE hBuf)
++{
++ BM_BUF *pBuf = (BM_BUF *)hBuf;
++
++ PVR_ASSERT (pBuf != IMG_NULL);
++ if (pBuf == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_HandleToCpuVaddr: invalid parameter"));
++ return IMG_NULL;
++ }
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "BM_HandleToCpuVaddr(h=%08X)=%08X",
++ hBuf, pBuf->CpuVAddr));
++ return pBuf->CpuVAddr;
++}
++
++
++IMG_DEV_VIRTADDR
++BM_HandleToDevVaddr (BM_HANDLE hBuf)
++{
++ BM_BUF *pBuf = (BM_BUF *)hBuf;
++
++ PVR_ASSERT (pBuf != IMG_NULL);
++ if (pBuf == IMG_NULL)
++ {
++ IMG_DEV_VIRTADDR DevVAddr = {0};
++ PVR_DPF((PVR_DBG_ERROR, "BM_HandleToDevVaddr: invalid parameter"));
++ return DevVAddr;
++ }
++
++ PVR_DPF ((PVR_DBG_MESSAGE, "BM_HandleToDevVaddr(h=%08X)=%08X", hBuf, pBuf->DevVAddr));
++ return pBuf->DevVAddr;
++}
++
++
++IMG_SYS_PHYADDR
++BM_HandleToSysPaddr (BM_HANDLE hBuf)
++{
++ BM_BUF *pBuf = (BM_BUF *)hBuf;
++
++ PVR_ASSERT (pBuf != IMG_NULL);
++
++ if (pBuf == IMG_NULL)
++ {
++ IMG_SYS_PHYADDR PhysAddr = {0};
++ PVR_DPF((PVR_DBG_ERROR, "BM_HandleToSysPaddr: invalid parameter"));
++ return PhysAddr;
++ }
++
++ PVR_DPF ((PVR_DBG_MESSAGE, "BM_HandleToSysPaddr(h=%08X)=%08X", hBuf, pBuf->CpuPAddr.uiAddr));
++ return SysCpuPAddrToSysPAddr (pBuf->CpuPAddr);
++}
++
++IMG_HANDLE
++BM_HandleToOSMemHandle(BM_HANDLE hBuf)
++{
++ BM_BUF *pBuf = (BM_BUF *)hBuf;
++
++ PVR_ASSERT (pBuf != IMG_NULL);
++
++ if (pBuf == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_HandleToOSMemHandle: invalid parameter"));
++ return IMG_NULL;
++ }
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "BM_HandleToOSMemHandle(h=%08X)=%08X",
++ hBuf, pBuf->hOSMemHandle));
++ return pBuf->hOSMemHandle;
++}
++
++IMG_BOOL
++BM_ContiguousStatistics (IMG_UINT32 uFlags,
++ IMG_UINT32 *pTotalBytes,
++ IMG_UINT32 *pAvailableBytes)
++{
++ if (pAvailableBytes || pTotalBytes || uFlags);
++ return IMG_FALSE;
++}
++
++
++static IMG_BOOL
++DevMemoryAlloc (BM_CONTEXT *pBMContext,
++ BM_MAPPING *pMapping,
++ IMG_SIZE_T *pActualSize,
++ IMG_UINT32 uFlags,
++ IMG_UINT32 dev_vaddr_alignment,
++ IMG_DEV_VIRTADDR *pDevVAddr)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++#ifdef PDUMP
++ IMG_UINT32 ui32PDumpSize = pMapping->uSize;
++#endif
++
++ psDeviceNode = pBMContext->psDeviceNode;
++
++ if(uFlags & PVRSRV_MEM_INTERLEAVED)
++ {
++
++ pMapping->uSize *= 2;
++ }
++
++#ifdef PDUMP
++ if(uFlags & PVRSRV_MEM_DUMMY)
++ {
++
++ ui32PDumpSize = pMapping->pBMHeap->sDevArena.ui32DataPageSize;
++ }
++#endif
++
++
++ if (!psDeviceNode->pfnMMUAlloc (pMapping->pBMHeap->pMMUHeap,
++ pMapping->uSize,
++ pActualSize,
++ 0,
++ dev_vaddr_alignment,
++ &(pMapping->DevVAddr)))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "DevMemoryAlloc ERROR MMU_Alloc"));
++ return IMG_FALSE;
++ }
++
++#ifdef SUPPORT_SGX_MMU_BYPASS
++ EnableHostAccess(pBMContext->psMMUContext);
++#endif
++
++
++
++ PDUMPMALLOCPAGES(psDeviceNode->sDevId.eDeviceType, pMapping->DevVAddr.uiAddr, pMapping->CpuVAddr, pMapping->hOSMemHandle, ui32PDumpSize, pMapping->pBMHeap->sDevArena.ui32DataPageSize, (IMG_HANDLE)pMapping);
++
++ switch (pMapping->eCpuMemoryOrigin)
++ {
++ case hm_wrapped:
++ case hm_wrapped_virtaddr:
++ case hm_contiguous:
++ {
++ psDeviceNode->pfnMMUMapPages ( pMapping->pBMHeap->pMMUHeap,
++ pMapping->DevVAddr,
++ SysCpuPAddrToSysPAddr (pMapping->CpuPAddr),
++ pMapping->uSize,
++ uFlags,
++ (IMG_HANDLE)pMapping);
++
++ *pDevVAddr = pMapping->DevVAddr;
++ break;
++ }
++ case hm_env:
++ {
++ psDeviceNode->pfnMMUMapShadow ( pMapping->pBMHeap->pMMUHeap,
++ pMapping->DevVAddr,
++ pMapping->uSize,
++ pMapping->CpuVAddr,
++ pMapping->hOSMemHandle,
++ pDevVAddr,
++ uFlags,
++ (IMG_HANDLE)pMapping);
++ break;
++ }
++ case hm_wrapped_scatter:
++ case hm_wrapped_scatter_virtaddr:
++ {
++ psDeviceNode->pfnMMUMapScatter (pMapping->pBMHeap->pMMUHeap,
++ pMapping->DevVAddr,
++ pMapping->psSysAddr,
++ pMapping->uSize,
++ uFlags,
++ (IMG_HANDLE)pMapping);
++
++ *pDevVAddr = pMapping->DevVAddr;
++ break;
++ }
++ default:
++ PVR_DPF((PVR_DBG_ERROR,
++ "Illegal value %d for pMapping->eCpuMemoryOrigin",
++ pMapping->eCpuMemoryOrigin));
++ return IMG_FALSE;
++ }
++
++#ifdef SUPPORT_SGX_MMU_BYPASS
++ DisableHostAccess(pBMContext->psMMUContext);
++#endif
++
++ return IMG_TRUE;
++}
++
++static IMG_VOID
++DevMemoryFree (BM_MAPPING *pMapping)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++#ifdef PDUMP
++ IMG_UINT32 ui32PSize;
++#endif
++
++#ifdef PDUMP
++
++ if(pMapping->ui32Flags & PVRSRV_MEM_DUMMY)
++ {
++
++ ui32PSize = pMapping->pBMHeap->sDevArena.ui32DataPageSize;
++ }
++ else
++ {
++ ui32PSize = pMapping->uSize;
++ }
++
++ PDUMPFREEPAGES(pMapping->pBMHeap,
++ pMapping->DevVAddr,
++ ui32PSize,
++ pMapping->pBMHeap->sDevArena.ui32DataPageSize,
++ (IMG_HANDLE)pMapping,
++ (pMapping->ui32Flags & PVRSRV_MEM_INTERLEAVED) ? IMG_TRUE : IMG_FALSE);
++#endif
++
++ psDeviceNode = pMapping->pBMHeap->pBMContext->psDeviceNode;
++
++ psDeviceNode->pfnMMUFree (pMapping->pBMHeap->pMMUHeap, pMapping->DevVAddr, IMG_CAST_TO_DEVVADDR_UINT(pMapping->uSize));
++}
++
++static IMG_BOOL
++BM_ImportMemory (IMG_VOID *pH,
++ IMG_SIZE_T uRequestSize,
++ IMG_SIZE_T *pActualSize,
++ BM_MAPPING **ppsMapping,
++ IMG_UINT32 uFlags,
++ IMG_UINTPTR_T *pBase)
++{
++ BM_MAPPING *pMapping;
++ BM_HEAP *pBMHeap = pH;
++ BM_CONTEXT *pBMContext = pBMHeap->pBMContext;
++ IMG_BOOL bResult;
++ IMG_SIZE_T uSize;
++ IMG_SIZE_T uPSize;
++ IMG_UINT32 uDevVAddrAlignment = 0;
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "BM_ImportMemory (pBMContext=%08X, uRequestSize=0x%x, uFlags=0x%x, uAlign=0x%x)",
++ pBMContext, uRequestSize, uFlags, uDevVAddrAlignment));
++
++ PVR_ASSERT (ppsMapping != IMG_NULL);
++ PVR_ASSERT (pBMContext != IMG_NULL);
++
++ if (ppsMapping == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_ImportMemory: invalid parameter"));
++ goto fail_exit;
++ }
++
++ uSize = HOST_PAGEALIGN (uRequestSize);
++ PVR_ASSERT (uSize >= uRequestSize);
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof (BM_MAPPING),
++ (IMG_PVOID *)&pMapping, IMG_NULL,
++ "Buffer Manager Mapping") != PVRSRV_OK)
++ {
++ PVR_DPF ((PVR_DBG_ERROR, "BM_ImportMemory: failed BM_MAPPING alloc"));
++ goto fail_exit;
++ }
++
++ pMapping->hOSMemHandle = 0;
++ pMapping->CpuVAddr = 0;
++ pMapping->DevVAddr.uiAddr = 0;
++ pMapping->CpuPAddr.uiAddr = 0;
++ pMapping->uSize = uSize;
++ pMapping->pBMHeap = pBMHeap;
++ pMapping->ui32Flags = uFlags;
++
++
++ if (pActualSize)
++ {
++ *pActualSize = uSize;
++ }
++
++
++ if(pMapping->ui32Flags & PVRSRV_MEM_DUMMY)
++ {
++ uPSize = pBMHeap->sDevArena.ui32DataPageSize;
++ }
++ else
++ {
++ uPSize = pMapping->uSize;
++ }
++
++
++
++ if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG)
++ {
++ IMG_UINT32 ui32Attribs = pBMHeap->ui32Attribs;
++
++
++ if (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK)
++ {
++ ui32Attribs &= ~PVRSRV_HAP_CACHETYPE_MASK;
++ ui32Attribs |= (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK);
++ }
++
++
++ if (OSAllocPages(ui32Attribs,
++ uPSize,
++ pBMHeap->sDevArena.ui32DataPageSize,
++ (IMG_VOID **)&pMapping->CpuVAddr,
++ &pMapping->hOSMemHandle) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "BM_ImportMemory: OSAllocPages(0x%x) failed",
++ uPSize));
++ goto fail_mapping_alloc;
++ }
++
++
++ pMapping->eCpuMemoryOrigin = hm_env;
++ }
++ else if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG)
++ {
++ IMG_SYS_PHYADDR sSysPAddr;
++ IMG_UINT32 ui32Attribs = pBMHeap->ui32Attribs;
++
++
++ if (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK)
++ {
++ ui32Attribs &= ~PVRSRV_HAP_CACHETYPE_MASK;
++ ui32Attribs |= (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK);
++ }
++
++
++ PVR_ASSERT(pBMHeap->pLocalDevMemArena != IMG_NULL);
++
++ if (!RA_Alloc (pBMHeap->pLocalDevMemArena,
++ uPSize,
++ IMG_NULL,
++ IMG_NULL,
++ 0,
++ pBMHeap->sDevArena.ui32DataPageSize,
++ 0,
++ (IMG_UINTPTR_T *)&sSysPAddr.uiAddr))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_ImportMemory: RA_Alloc(0x%x) FAILED", uPSize));
++ goto fail_mapping_alloc;
++ }
++
++
++ pMapping->CpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
++ if(OSReservePhys(pMapping->CpuPAddr,
++ uPSize,
++ ui32Attribs,
++ &pMapping->CpuVAddr,
++ &pMapping->hOSMemHandle) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_ImportMemory: OSReservePhys failed"));
++ goto fail_dev_mem_alloc;
++ }
++
++
++ pMapping->eCpuMemoryOrigin = hm_contiguous;
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_ImportMemory: Invalid backing store type"));
++ goto fail_mapping_alloc;
++ }
++
++
++ bResult = DevMemoryAlloc (pBMContext,
++ pMapping,
++ IMG_NULL,
++ uFlags,
++ uDevVAddrAlignment,
++ &pMapping->DevVAddr);
++ if (!bResult)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "BM_ImportMemory: DevMemoryAlloc(0x%x) failed",
++ pMapping->uSize));
++ goto fail_dev_mem_alloc;
++ }
++
++
++
++ PVR_ASSERT (uDevVAddrAlignment>1?(pMapping->DevVAddr.uiAddr%uDevVAddrAlignment)==0:1);
++
++ *pBase = pMapping->DevVAddr.uiAddr;
++ *ppsMapping = pMapping;
++
++ PVR_DPF ((PVR_DBG_MESSAGE, "BM_ImportMemory: IMG_TRUE"));
++ return IMG_TRUE;
++
++fail_dev_mem_alloc:
++ if (pMapping && (pMapping->CpuVAddr || pMapping->hOSMemHandle))
++ {
++
++ if(pMapping->ui32Flags & PVRSRV_MEM_INTERLEAVED)
++ {
++ pMapping->uSize /= 2;
++ }
++
++ if(pMapping->ui32Flags & PVRSRV_MEM_DUMMY)
++ {
++ uPSize = pBMHeap->sDevArena.ui32DataPageSize;
++ }
++ else
++ {
++ uPSize = pMapping->uSize;
++ }
++
++ if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG)
++ {
++ OSFreePages(pBMHeap->ui32Attribs,
++ uPSize,
++ (IMG_VOID *)pMapping->CpuVAddr,
++ pMapping->hOSMemHandle);
++ }
++ else
++ {
++ IMG_SYS_PHYADDR sSysPAddr;
++
++ if(pMapping->CpuVAddr)
++ {
++ OSUnReservePhys(pMapping->CpuVAddr,
++ uPSize,
++ pBMHeap->ui32Attribs,
++ pMapping->hOSMemHandle);
++ }
++ sSysPAddr = SysCpuPAddrToSysPAddr(pMapping->CpuPAddr);
++ RA_Free (pBMHeap->pLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);
++ }
++ }
++fail_mapping_alloc:
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), pMapping, IMG_NULL);
++
++fail_exit:
++ return IMG_FALSE;
++}
++
++
++static IMG_VOID
++BM_FreeMemory (IMG_VOID *h, IMG_UINTPTR_T _base, BM_MAPPING *psMapping)
++{
++ BM_HEAP *pBMHeap = h;
++ IMG_SIZE_T uPSize;
++
++ PVR_UNREFERENCED_PARAMETER (_base);
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "BM_FreeMemory (h=%08X, base=0x%x, psMapping=0x%x)", h, _base, psMapping));
++
++ PVR_ASSERT (psMapping != IMG_NULL);
++
++ if (psMapping == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_FreeMemory: invalid parameter"));
++ return;
++ }
++
++ DevMemoryFree (psMapping);
++
++
++ if((psMapping->ui32Flags & PVRSRV_MEM_INTERLEAVED) != 0)
++ {
++ psMapping->uSize /= 2;
++ }
++
++ if(psMapping->ui32Flags & PVRSRV_MEM_DUMMY)
++ {
++ uPSize = psMapping->pBMHeap->sDevArena.ui32DataPageSize;
++ }
++ else
++ {
++ uPSize = psMapping->uSize;
++ }
++
++ if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG)
++ {
++ OSFreePages(pBMHeap->ui32Attribs,
++ uPSize,
++ (IMG_VOID *) psMapping->CpuVAddr,
++ psMapping->hOSMemHandle);
++ }
++ else if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG)
++ {
++ IMG_SYS_PHYADDR sSysPAddr;
++
++ OSUnReservePhys(psMapping->CpuVAddr, uPSize, pBMHeap->ui32Attribs, psMapping->hOSMemHandle);
++
++ sSysPAddr = SysCpuPAddrToSysPAddr(psMapping->CpuPAddr);
++
++ RA_Free (pBMHeap->pLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_FreeMemory: Invalid backing store type"));
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), psMapping, IMG_NULL);
++
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "..BM_FreeMemory (h=%08X, base=0x%x, psMapping=0x%x)",
++ h, _base, psMapping));
++}
++
++IMG_VOID BM_GetPhysPageAddr(PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++ IMG_DEV_VIRTADDR sDevVPageAddr,
++ IMG_DEV_PHYADDR *psDevPAddr)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++
++ PVR_DPF((PVR_DBG_MESSAGE, "BM_GetPhysPageAddr"));
++
++ PVR_ASSERT (psMemInfo && psDevPAddr)
++
++
++ PVR_ASSERT((sDevVPageAddr.uiAddr & 0xFFF) == 0);
++
++ psDeviceNode = ((BM_BUF*)psMemInfo->sMemBlk.hBuffer)->pMapping->pBMHeap->pBMContext->psDeviceNode;
++
++ *psDevPAddr = psDeviceNode->pfnMMUGetPhysPageAddr(((BM_BUF*)psMemInfo->sMemBlk.hBuffer)->pMapping->pBMHeap->pMMUHeap,
++ sDevVPageAddr);
++}
++
++
++PVRSRV_ERROR BM_GetHeapInfo(IMG_HANDLE hDevMemHeap, PVRSRV_HEAP_INFO *psHeapInfo)
++{
++ BM_HEAP *psBMHeap = (BM_HEAP *)hDevMemHeap;
++
++ PVR_DPF((PVR_DBG_VERBOSE, "BM_GetHeapInfo"));
++
++ psHeapInfo->hDevMemHeap = hDevMemHeap;
++ psHeapInfo->sDevVAddrBase = psBMHeap->sDevArena.BaseDevVAddr;
++ psHeapInfo->ui32HeapByteSize = psBMHeap->sDevArena.ui32Size;
++ psHeapInfo->ui32Attribs = psBMHeap->ui32Attribs;
++
++ return PVRSRV_OK;
++}
++
++
++MMU_CONTEXT* BM_GetMMUContext(IMG_HANDLE hDevMemHeap)
++{
++ BM_HEAP *pBMHeap = (BM_HEAP*)hDevMemHeap;
++
++ PVR_DPF((PVR_DBG_VERBOSE, "BM_GetMMUContext"));
++
++ return pBMHeap->pBMContext->psMMUContext;
++}
++
++MMU_CONTEXT* BM_GetMMUContextFromMemContext(IMG_HANDLE hDevMemContext)
++{
++ BM_CONTEXT *pBMContext = (BM_CONTEXT*)hDevMemContext;
++
++ PVR_DPF ((PVR_DBG_VERBOSE, "BM_GetMMUContextFromMemContext"));
++
++ return pBMContext->psMMUContext;
++}
++
++IMG_HANDLE BM_GetMMUHeap(IMG_HANDLE hDevMemHeap)
++{
++ PVR_DPF((PVR_DBG_VERBOSE, "BM_GetMMUHeap"));
++
++ return (IMG_HANDLE)((BM_HEAP*)hDevMemHeap)->pMMUHeap;
++}
++
++
++PVRSRV_DEVICE_NODE* BM_GetDeviceNode(IMG_HANDLE hDevMemContext)
++{
++ PVR_DPF((PVR_DBG_VERBOSE, "BM_GetDeviceNode"));
++
++ return ((BM_CONTEXT*)hDevMemContext)->psDeviceNode;
++}
++
++
++IMG_HANDLE BM_GetMappingHandle(PVRSRV_KERNEL_MEM_INFO *psMemInfo)
++{
++ PVR_DPF((PVR_DBG_VERBOSE, "BM_GetMappingHandle"));
++
++ return ((BM_BUF*)psMemInfo->sMemBlk.hBuffer)->pMapping->hOSMemHandle;
++}
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/common/deviceclass.c
+@@ -0,0 +1,1977 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "buffer_manager.h"
++#include "kernelbuffer.h"
++#include "pvr_bridge_km.h"
++
++#include "lists.h"
++DECLARE_LIST_ANY_VA(PVRSRV_DEVICE_NODE);
++DECLARE_LIST_FOR_EACH_VA(PVRSRV_DEVICE_NODE);
++DECLARE_LIST_INSERT(PVRSRV_DEVICE_NODE);
++DECLARE_LIST_REMOVE(PVRSRV_DEVICE_NODE);
++
++IMG_VOID* MatchDeviceKM_AnyVaCb(PVRSRV_DEVICE_NODE* psDeviceNode, va_list va);
++
++PVRSRV_ERROR AllocateDeviceID(SYS_DATA *psSysData, IMG_UINT32 *pui32DevID);
++PVRSRV_ERROR FreeDeviceID(SYS_DATA *psSysData, IMG_UINT32 ui32DevID);
++
++#if defined(SUPPORT_MISR_IN_THREAD)
++void OSVSyncMISR(IMG_HANDLE, IMG_BOOL);
++#endif
++
++#if defined(SUPPORT_CUSTOM_SWAP_OPERATIONS)
++IMG_VOID PVRSRVFreeCommandCompletePacketKM(IMG_HANDLE hCmdCookie,
++ IMG_BOOL bScheduleMISR);
++#endif
++
++typedef struct PVRSRV_DC_SRV2DISP_KMJTABLE_TAG *PPVRSRV_DC_SRV2DISP_KMJTABLE;
++
++typedef struct PVRSRV_DC_BUFFER_TAG
++{
++
++ PVRSRV_DEVICECLASS_BUFFER sDeviceClassBuffer;
++
++ struct PVRSRV_DISPLAYCLASS_INFO_TAG *psDCInfo;
++ struct PVRSRV_DC_SWAPCHAIN_TAG *psSwapChain;
++} PVRSRV_DC_BUFFER;
++
++typedef struct PVRSRV_DC_SWAPCHAIN_TAG
++{
++ IMG_HANDLE hExtSwapChain;
++ IMG_UINT32 ui32SwapChainID;
++ IMG_UINT32 ui32Flags;
++ IMG_UINT32 ui32RefCount;
++ PVRSRV_QUEUE_INFO *psQueue;
++ PVRSRV_DC_BUFFER asBuffer[PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS];
++ IMG_UINT32 ui32BufferCount;
++ PVRSRV_DC_BUFFER *psLastFlipBuffer;
++ struct PVRSRV_DC_SWAPCHAIN_TAG *psNext;
++ struct PVRSRV_DISPLAYCLASS_INFO_TAG *psDCInfo;
++ //IMG_HANDLE hResItem;
++} PVRSRV_DC_SWAPCHAIN;
++
++typedef struct PVRSRV_DC_SWAPCHAIN_REF_TAG
++{
++ struct PVRSRV_DC_SWAPCHAIN_TAG *psSwapChain;
++ IMG_HANDLE hResItem;
++} PVRSRV_DC_SWAPCHAIN_REF;
++
++
++typedef struct PVRSRV_DISPLAYCLASS_INFO_TAG
++{
++ IMG_UINT32 ui32RefCount;
++ IMG_UINT32 ui32DeviceID;
++ IMG_HANDLE hExtDevice;
++ PPVRSRV_DC_SRV2DISP_KMJTABLE psFuncTable;
++ IMG_HANDLE hDevMemContext;
++ PVRSRV_DC_BUFFER sSystemBuffer;
++ struct PVRSRV_DC_SWAPCHAIN_TAG *psDCSwapChainShared;
++} PVRSRV_DISPLAYCLASS_INFO;
++
++
++typedef struct PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO_TAG
++{
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ PRESMAN_ITEM hResItem;
++} PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO;
++
++
++typedef struct PVRSRV_BC_SRV2BUFFER_KMJTABLE_TAG *PPVRSRV_BC_SRV2BUFFER_KMJTABLE;
++
++typedef struct PVRSRV_BC_BUFFER_TAG
++{
++
++ PVRSRV_DEVICECLASS_BUFFER sDeviceClassBuffer;
++
++ struct PVRSRV_BUFFERCLASS_INFO_TAG *psBCInfo;
++} PVRSRV_BC_BUFFER;
++
++
++typedef struct PVRSRV_BUFFERCLASS_INFO_TAG
++{
++ IMG_UINT32 ui32RefCount;
++ IMG_UINT32 ui32DeviceID;
++ IMG_HANDLE hExtDevice;
++ PPVRSRV_BC_SRV2BUFFER_KMJTABLE psFuncTable;
++ IMG_HANDLE hDevMemContext;
++
++ IMG_UINT32 ui32BufferCount;
++ PVRSRV_BC_BUFFER *psBuffer;
++
++} PVRSRV_BUFFERCLASS_INFO;
++
++
++typedef struct PVRSRV_BUFFERCLASS_PERCONTEXT_INFO_TAG
++{
++ PVRSRV_BUFFERCLASS_INFO *psBCInfo;
++ IMG_HANDLE hResItem;
++} PVRSRV_BUFFERCLASS_PERCONTEXT_INFO;
++
++
++static PVRSRV_DISPLAYCLASS_INFO* DCDeviceHandleToDCInfo (IMG_HANDLE hDeviceKM)
++{
++ PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *psDCPerContextInfo;
++
++ psDCPerContextInfo = (PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *)hDeviceKM;
++
++ return psDCPerContextInfo->psDCInfo;
++}
++
++
++static PVRSRV_BUFFERCLASS_INFO* BCDeviceHandleToBCInfo (IMG_HANDLE hDeviceKM)
++{
++ PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *psBCPerContextInfo;
++
++ psBCPerContextInfo = (PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *)hDeviceKM;
++
++ return psBCPerContextInfo->psBCInfo;
++}
++
++IMG_VOID PVRSRVEnumerateDCKM_ForEachVaCb(PVRSRV_DEVICE_NODE *psDeviceNode, va_list va)
++{
++ IMG_UINT *pui32DevCount;
++ IMG_UINT32 **ppui32DevID;
++ PVRSRV_DEVICE_CLASS peDeviceClass;
++
++ pui32DevCount = va_arg(va, IMG_UINT*);
++ ppui32DevID = va_arg(va, IMG_UINT32**);
++ peDeviceClass = va_arg(va, PVRSRV_DEVICE_CLASS);
++
++ if ((psDeviceNode->sDevId.eDeviceClass == peDeviceClass)
++ && (psDeviceNode->sDevId.eDeviceType == PVRSRV_DEVICE_TYPE_EXT))
++ {
++ (*pui32DevCount)++;
++ if(*ppui32DevID)
++ {
++ *(*ppui32DevID)++ = psDeviceNode->sDevId.ui32DeviceIndex;
++ }
++ }
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVEnumerateDCKM (PVRSRV_DEVICE_CLASS DeviceClass,
++ IMG_UINT32 *pui32DevCount,
++ IMG_UINT32 *pui32DevID )
++{
++
++ IMG_UINT ui32DevCount = 0;
++ SYS_DATA *psSysData;
++
++ SysAcquireData(&psSysData);
++
++
++ List_PVRSRV_DEVICE_NODE_ForEach_va(psSysData->psDeviceNodeList,
++ PVRSRVEnumerateDCKM_ForEachVaCb,
++ &ui32DevCount,
++ &pui32DevID,
++ DeviceClass);
++
++ if(pui32DevCount)
++ {
++ *pui32DevCount = ui32DevCount;
++ }
++ else if(pui32DevID == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVEnumerateDCKM: Invalid parameters"));
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR PVRSRVRegisterDCDeviceKM (PVRSRV_DC_SRV2DISP_KMJTABLE *psFuncTable,
++ IMG_UINT32 *pui32DeviceID)
++{
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo = IMG_NULL;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ SYS_DATA *psSysData;
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++ SysAcquireData(&psSysData);
++
++
++
++
++
++
++ if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(*psDCInfo),
++ (IMG_VOID **)&psDCInfo, IMG_NULL,
++ "Display Class Info") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterDCDeviceKM: Failed psDCInfo alloc"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ OSMemSet (psDCInfo, 0, sizeof(*psDCInfo));
++
++
++ if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_DC_SRV2DISP_KMJTABLE),
++ (IMG_VOID **)&psDCInfo->psFuncTable, IMG_NULL,
++ "Function table for SRVKM->DISPLAY") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterDCDeviceKM: Failed psFuncTable alloc"));
++ goto ErrorExit;
++ }
++ OSMemSet (psDCInfo->psFuncTable, 0, sizeof(PVRSRV_DC_SRV2DISP_KMJTABLE));
++
++
++ *psDCInfo->psFuncTable = *psFuncTable;
++
++
++ if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_DEVICE_NODE),
++ (IMG_VOID **)&psDeviceNode, IMG_NULL,
++ "Device Node") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterDCDeviceKM: Failed psDeviceNode alloc"));
++ goto ErrorExit;
++ }
++ OSMemSet (psDeviceNode, 0, sizeof(PVRSRV_DEVICE_NODE));
++
++ psDeviceNode->pvDevice = (IMG_VOID*)psDCInfo;
++ psDeviceNode->ui32pvDeviceSize = sizeof(*psDCInfo);
++ psDeviceNode->ui32RefCount = 1;
++ psDeviceNode->sDevId.eDeviceType = PVRSRV_DEVICE_TYPE_EXT;
++ psDeviceNode->sDevId.eDeviceClass = PVRSRV_DEVICE_CLASS_DISPLAY;
++ psDeviceNode->psSysData = psSysData;
++
++
++ if (AllocateDeviceID(psSysData, &psDeviceNode->sDevId.ui32DeviceIndex) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterBCDeviceKM: Failed to allocate Device ID"));
++ goto ErrorExit;
++ }
++ psDCInfo->ui32DeviceID = psDeviceNode->sDevId.ui32DeviceIndex;
++ if (pui32DeviceID)
++ {
++ *pui32DeviceID = psDeviceNode->sDevId.ui32DeviceIndex;
++ }
++
++
++ SysRegisterExternalDevice(psDeviceNode);
++
++
++ List_PVRSRV_DEVICE_NODE_Insert(&psSysData->psDeviceNodeList, psDeviceNode);
++
++ return PVRSRV_OK;
++
++ErrorExit:
++
++ if(psDCInfo->psFuncTable)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DC_SRV2DISP_KMJTABLE), psDCInfo->psFuncTable, IMG_NULL);
++ psDCInfo->psFuncTable = IMG_NULL;
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DISPLAYCLASS_INFO), psDCInfo, IMG_NULL);
++
++
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++}
++
++PVRSRV_ERROR PVRSRVRemoveDCDeviceKM(IMG_UINT32 ui32DevIndex)
++{
++ SYS_DATA *psSysData;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++
++ SysAcquireData(&psSysData);
++
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE*)
++ List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList,
++ MatchDeviceKM_AnyVaCb,
++ ui32DevIndex,
++ IMG_FALSE,
++ PVRSRV_DEVICE_CLASS_DISPLAY);
++ if (!psDeviceNode)
++ {
++
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRemoveDCDeviceKM: requested device %d not present", ui32DevIndex));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++
++ psDCInfo = (PVRSRV_DISPLAYCLASS_INFO*)psDeviceNode->pvDevice;
++
++
++
++
++ if(psDCInfo->ui32RefCount == 0)
++ {
++
++
++ List_PVRSRV_DEVICE_NODE_Remove(psDeviceNode);
++
++
++ SysRemoveExternalDevice(psDeviceNode);
++
++
++
++
++ PVR_ASSERT(psDCInfo->ui32RefCount == 0);
++ (IMG_VOID)FreeDeviceID(psSysData, ui32DevIndex);
++ (IMG_VOID)OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DC_SRV2DISP_KMJTABLE), psDCInfo->psFuncTable, IMG_NULL);
++ psDCInfo->psFuncTable = IMG_NULL;
++ (IMG_VOID)OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DISPLAYCLASS_INFO), psDCInfo, IMG_NULL);
++
++ (IMG_VOID)OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DEVICE_NODE), psDeviceNode, IMG_NULL);
++
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRemoveDCDeviceKM: failed as %d Services DC API connections are still open", psDCInfo->ui32RefCount));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR PVRSRVRegisterBCDeviceKM (PVRSRV_BC_SRV2BUFFER_KMJTABLE *psFuncTable,
++ IMG_UINT32 *pui32DeviceID)
++{
++ PVRSRV_BUFFERCLASS_INFO *psBCInfo = IMG_NULL;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ SYS_DATA *psSysData;
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++ SysAcquireData(&psSysData);
++
++
++
++
++
++ if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(*psBCInfo),
++ (IMG_VOID **)&psBCInfo, IMG_NULL,
++ "Buffer Class Info") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterBCDeviceKM: Failed psBCInfo alloc"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ OSMemSet (psBCInfo, 0, sizeof(*psBCInfo));
++
++
++ if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_BC_SRV2BUFFER_KMJTABLE),
++ (IMG_VOID **)&psBCInfo->psFuncTable, IMG_NULL,
++ "Function table for SRVKM->BUFFER") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterBCDeviceKM: Failed psFuncTable alloc"));
++ goto ErrorExit;
++ }
++ OSMemSet (psBCInfo->psFuncTable, 0, sizeof(PVRSRV_BC_SRV2BUFFER_KMJTABLE));
++
++
++ *psBCInfo->psFuncTable = *psFuncTable;
++
++
++ if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_DEVICE_NODE),
++ (IMG_VOID **)&psDeviceNode, IMG_NULL,
++ "Device Node") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterBCDeviceKM: Failed psDeviceNode alloc"));
++ goto ErrorExit;
++ }
++ OSMemSet (psDeviceNode, 0, sizeof(PVRSRV_DEVICE_NODE));
++
++ psDeviceNode->pvDevice = (IMG_VOID*)psBCInfo;
++ psDeviceNode->ui32pvDeviceSize = sizeof(*psBCInfo);
++ psDeviceNode->ui32RefCount = 1;
++ psDeviceNode->sDevId.eDeviceType = PVRSRV_DEVICE_TYPE_EXT;
++ psDeviceNode->sDevId.eDeviceClass = PVRSRV_DEVICE_CLASS_BUFFER;
++ psDeviceNode->psSysData = psSysData;
++
++
++ if (AllocateDeviceID(psSysData, &psDeviceNode->sDevId.ui32DeviceIndex) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterBCDeviceKM: Failed to allocate Device ID"));
++ goto ErrorExit;
++ }
++ psBCInfo->ui32DeviceID = psDeviceNode->sDevId.ui32DeviceIndex;
++ if (pui32DeviceID)
++ {
++ *pui32DeviceID = psDeviceNode->sDevId.ui32DeviceIndex;
++ }
++
++
++ List_PVRSRV_DEVICE_NODE_Insert(&psSysData->psDeviceNodeList, psDeviceNode);
++
++ return PVRSRV_OK;
++
++ErrorExit:
++
++ if(psBCInfo->psFuncTable)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PPVRSRV_BC_SRV2BUFFER_KMJTABLE), psBCInfo->psFuncTable, IMG_NULL);
++ psBCInfo->psFuncTable = IMG_NULL;
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_BUFFERCLASS_INFO), psBCInfo, IMG_NULL);
++
++
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++}
++
++
++PVRSRV_ERROR PVRSRVRemoveBCDeviceKM(IMG_UINT32 ui32DevIndex)
++{
++ SYS_DATA *psSysData;
++ PVRSRV_DEVICE_NODE *psDevNode;
++ PVRSRV_BUFFERCLASS_INFO *psBCInfo;
++
++ SysAcquireData(&psSysData);
++
++
++ psDevNode = (PVRSRV_DEVICE_NODE*)
++ List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList,
++ MatchDeviceKM_AnyVaCb,
++ ui32DevIndex,
++ IMG_FALSE,
++ PVRSRV_DEVICE_CLASS_BUFFER);
++
++ if (!psDevNode)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRemoveBCDeviceKM: requested device %d not present", ui32DevIndex));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++
++
++ psBCInfo = (PVRSRV_BUFFERCLASS_INFO*)psDevNode->pvDevice;
++
++
++
++
++ if(psBCInfo->ui32RefCount == 0)
++ {
++
++
++ List_PVRSRV_DEVICE_NODE_Remove(psDevNode);
++
++
++
++
++ (IMG_VOID)FreeDeviceID(psSysData, ui32DevIndex);
++
++
++ (IMG_VOID)OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_BC_SRV2BUFFER_KMJTABLE), psBCInfo->psFuncTable, IMG_NULL);
++ psBCInfo->psFuncTable = IMG_NULL;
++ (IMG_VOID)OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_BUFFERCLASS_INFO), psBCInfo, IMG_NULL);
++
++ (IMG_VOID)OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DEVICE_NODE), psDevNode, IMG_NULL);
++
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRemoveBCDeviceKM: failed as %d Services BC API connections are still open", psBCInfo->ui32RefCount));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ return PVRSRV_OK;
++}
++
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVCloseDCDeviceKM (IMG_HANDLE hDeviceKM,
++ IMG_BOOL bResManCallback)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *psDCPerContextInfo;
++
++ PVR_UNREFERENCED_PARAMETER(bResManCallback);
++
++ psDCPerContextInfo = (PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *)hDeviceKM;
++
++
++ eError = ResManFreeResByPtr(psDCPerContextInfo->hResItem);
++
++ return eError;
++}
++
++
++static PVRSRV_ERROR CloseDCDeviceCallBack(IMG_PVOID pvParam,
++ IMG_UINT32 ui32Param)
++{
++ PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *psDCPerContextInfo;
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++ psDCPerContextInfo = (PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *)pvParam;
++ psDCInfo = psDCPerContextInfo->psDCInfo;
++
++ psDCInfo->ui32RefCount--;
++ if(psDCInfo->ui32RefCount == 0)
++ {
++
++ psDCInfo->psFuncTable->pfnCloseDCDevice(psDCInfo->hExtDevice);
++
++ if (--psDCInfo->sSystemBuffer.sDeviceClassBuffer.psKernelSyncInfo->ui32RefCount == 0)
++ {
++ PVRSRVFreeSyncInfoKM(psDCInfo->sSystemBuffer.sDeviceClassBuffer.psKernelSyncInfo);
++ }
++
++ psDCInfo->hDevMemContext = IMG_NULL;
++ psDCInfo->hExtDevice = IMG_NULL;
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO), psDCPerContextInfo, IMG_NULL);
++
++
++ return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVOpenDCDeviceKM (PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_UINT32 ui32DeviceID,
++ IMG_HANDLE hDevCookie,
++ IMG_HANDLE *phDeviceKM)
++{
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *psDCPerContextInfo;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ SYS_DATA *psSysData;
++ PVRSRV_ERROR eError;
++
++ if(!phDeviceKM || !hDevCookie)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenDCDeviceKM: Invalid params"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ SysAcquireData(&psSysData);
++
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE*)
++ List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList,
++ MatchDeviceKM_AnyVaCb,
++ ui32DeviceID,
++ IMG_FALSE,
++ PVRSRV_DEVICE_CLASS_DISPLAY);
++ if (!psDeviceNode)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenDCDeviceKM: no devnode matching index %d", ui32DeviceID));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ psDCInfo = (PVRSRV_DISPLAYCLASS_INFO*)psDeviceNode->pvDevice;
++
++
++
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(*psDCPerContextInfo),
++ (IMG_VOID **)&psDCPerContextInfo, IMG_NULL,
++ "Display Class per Context Info") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenDCDeviceKM: Failed psDCPerContextInfo alloc"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ OSMemSet(psDCPerContextInfo, 0, sizeof(*psDCPerContextInfo));
++
++ if(psDCInfo->ui32RefCount++ == 0)
++ {
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevCookie;
++
++
++ psDCInfo->hDevMemContext = (IMG_HANDLE)psDeviceNode->sDevMemoryInfo.pBMKernelContext;
++
++
++ eError = PVRSRVAllocSyncInfoKM(IMG_NULL,
++ (IMG_HANDLE)psDeviceNode->sDevMemoryInfo.pBMKernelContext,
++ &psDCInfo->sSystemBuffer.sDeviceClassBuffer.psKernelSyncInfo);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenDCDeviceKM: Failed sync info alloc"));
++ psDCInfo->ui32RefCount--;
++ return eError;
++ }
++
++
++ eError = psDCInfo->psFuncTable->pfnOpenDCDevice(ui32DeviceID,
++ &psDCInfo->hExtDevice,
++ (PVRSRV_SYNC_DATA*)psDCInfo->sSystemBuffer.sDeviceClassBuffer.psKernelSyncInfo->psSyncDataMemInfoKM->pvLinAddrKM);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenDCDeviceKM: Failed to open external DC device"));
++ psDCInfo->ui32RefCount--;
++ PVRSRVFreeSyncInfoKM(psDCInfo->sSystemBuffer.sDeviceClassBuffer.psKernelSyncInfo);
++ return eError;
++ }
++
++ psDCInfo->sSystemBuffer.sDeviceClassBuffer.psKernelSyncInfo->ui32RefCount++;
++ }
++
++ psDCPerContextInfo->psDCInfo = psDCInfo;
++ psDCPerContextInfo->hResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_DISPLAYCLASS_DEVICE,
++ psDCPerContextInfo,
++ 0,
++ CloseDCDeviceCallBack);
++
++
++ *phDeviceKM = (IMG_HANDLE)psDCPerContextInfo;
++
++ return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVEnumDCFormatsKM (IMG_HANDLE hDeviceKM,
++ IMG_UINT32 *pui32Count,
++ DISPLAY_FORMAT *psFormat)
++{
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++
++ if(!hDeviceKM || !pui32Count || !psFormat)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVEnumDCFormatsKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++
++
++ return psDCInfo->psFuncTable->pfnEnumDCFormats(psDCInfo->hExtDevice, pui32Count, psFormat);
++}
++
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVEnumDCDimsKM (IMG_HANDLE hDeviceKM,
++ DISPLAY_FORMAT *psFormat,
++ IMG_UINT32 *pui32Count,
++ DISPLAY_DIMS *psDim)
++{
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++
++ if(!hDeviceKM || !pui32Count || !psFormat)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVEnumDCDimsKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++
++
++ return psDCInfo->psFuncTable->pfnEnumDCDims(psDCInfo->hExtDevice, psFormat, pui32Count, psDim);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVGetDCSystemBufferKM (IMG_HANDLE hDeviceKM,
++ IMG_HANDLE *phBuffer)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ IMG_HANDLE hExtBuffer;
++
++ if(!hDeviceKM || !phBuffer)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetDCSystemBufferKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++
++
++ eError = psDCInfo->psFuncTable->pfnGetDCSystemBuffer(psDCInfo->hExtDevice, &hExtBuffer);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetDCSystemBufferKM: Failed to get valid buffer handle from external driver"));
++ return eError;
++ }
++
++
++ psDCInfo->sSystemBuffer.sDeviceClassBuffer.pfnGetBufferAddr = psDCInfo->psFuncTable->pfnGetBufferAddr;
++ psDCInfo->sSystemBuffer.sDeviceClassBuffer.hDevMemContext = psDCInfo->hDevMemContext;
++ psDCInfo->sSystemBuffer.sDeviceClassBuffer.hExtDevice = psDCInfo->hExtDevice;
++ psDCInfo->sSystemBuffer.sDeviceClassBuffer.hExtBuffer = hExtBuffer;
++
++ psDCInfo->sSystemBuffer.psDCInfo = psDCInfo;
++
++
++ *phBuffer = (IMG_HANDLE)&(psDCInfo->sSystemBuffer);
++
++ return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVGetDCInfoKM (IMG_HANDLE hDeviceKM,
++ DISPLAY_INFO *psDisplayInfo)
++{
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ PVRSRV_ERROR eError;
++
++ if(!hDeviceKM || !psDisplayInfo)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetDCInfoKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++
++
++ eError = psDCInfo->psFuncTable->pfnGetDCInfo(psDCInfo->hExtDevice, psDisplayInfo);
++ if (eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++
++ if (psDisplayInfo->ui32MaxSwapChainBuffers > PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS)
++ {
++ psDisplayInfo->ui32MaxSwapChainBuffers = PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS;
++ }
++
++ return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVDestroyDCSwapChainKM(IMG_HANDLE hSwapChainRef)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_DC_SWAPCHAIN_REF *psSwapChainRef;
++
++ if(!hSwapChainRef)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVDestroyDCSwapChainKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psSwapChainRef = hSwapChainRef;
++
++ eError = ResManFreeResByPtr(psSwapChainRef->hResItem);
++
++ return eError;
++}
++
++
++static PVRSRV_ERROR DestroyDCSwapChain(PVRSRV_DC_SWAPCHAIN *psSwapChain)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo = psSwapChain->psDCInfo;
++ IMG_UINT32 i;
++
++
++
++ if( psDCInfo->psDCSwapChainShared )
++ {
++ if( psDCInfo->psDCSwapChainShared == psSwapChain )
++ {
++ psDCInfo->psDCSwapChainShared = psSwapChain->psNext;
++ }
++ else
++ {
++ PVRSRV_DC_SWAPCHAIN *psCurrentSwapChain;
++ psCurrentSwapChain = psDCInfo->psDCSwapChainShared;
++ while( psCurrentSwapChain->psNext )
++ {
++ if( psCurrentSwapChain->psNext != psSwapChain )
++ {
++ psCurrentSwapChain = psCurrentSwapChain->psNext;
++ continue;
++ }
++ psCurrentSwapChain->psNext = psSwapChain->psNext;
++ break;
++ }
++ }
++ }
++
++
++ PVRSRVDestroyCommandQueueKM(psSwapChain->psQueue);
++
++
++ eError = psDCInfo->psFuncTable->pfnDestroyDCSwapChain(psDCInfo->hExtDevice,
++ psSwapChain->hExtSwapChain);
++
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"DestroyDCSwapChainCallBack: Failed to destroy DC swap chain"));
++ return eError;
++ }
++
++
++ for(i=0; i<psSwapChain->ui32BufferCount; i++)
++ {
++ if(psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo)
++ {
++ if (--psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo->ui32RefCount == 0)
++ {
++ PVRSRVFreeSyncInfoKM(psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo);
++ }
++ }
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DC_SWAPCHAIN), psSwapChain, IMG_NULL);
++
++
++ return eError;
++}
++
++
++static PVRSRV_ERROR DestroyDCSwapChainRefCallBack(IMG_PVOID pvParam, IMG_UINT32 ui32Param)
++{
++ PVRSRV_DC_SWAPCHAIN_REF *psSwapChainRef = (PVRSRV_DC_SWAPCHAIN_REF *) pvParam;
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++ if(--psSwapChainRef->psSwapChain->ui32RefCount == 0)
++ {
++ eError = DestroyDCSwapChain(psSwapChainRef->psSwapChain);
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DC_SWAPCHAIN_REF), psSwapChainRef, IMG_NULL);
++ return eError;
++}
++
++static PVRSRV_DC_SWAPCHAIN* PVRSRVFindSharedDCSwapChainKM(PVRSRV_DISPLAYCLASS_INFO *psDCInfo,
++ IMG_UINT32 ui32SwapChainID)
++{
++ PVRSRV_DC_SWAPCHAIN *psCurrentSwapChain;
++
++ for(psCurrentSwapChain = psDCInfo->psDCSwapChainShared;
++ psCurrentSwapChain;
++ psCurrentSwapChain = psCurrentSwapChain->psNext)
++ {
++ if(psCurrentSwapChain->ui32SwapChainID == ui32SwapChainID)
++ return psCurrentSwapChain;
++ }
++ return IMG_NULL;
++}
++
++static PVRSRV_ERROR PVRSRVCreateDCSwapChainRefKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ PVRSRV_DC_SWAPCHAIN *psSwapChain,
++ PVRSRV_DC_SWAPCHAIN_REF **ppsSwapChainRef)
++{
++ PVRSRV_DC_SWAPCHAIN_REF *psSwapChainRef = IMG_NULL;
++
++
++ if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_DC_SWAPCHAIN_REF),
++ (IMG_VOID **)&psSwapChainRef, IMG_NULL,
++ "Display Class Swapchain Reference") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainRefKM: Failed psSwapChainRef alloc"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ OSMemSet (psSwapChainRef, 0, sizeof(PVRSRV_DC_SWAPCHAIN_REF));
++
++
++ psSwapChain->ui32RefCount++;
++
++
++ psSwapChainRef->psSwapChain = psSwapChain;
++ psSwapChainRef->hResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_DISPLAYCLASS_SWAPCHAIN_REF,
++ psSwapChainRef,
++ 0,
++ &DestroyDCSwapChainRefCallBack);
++ *ppsSwapChainRef = psSwapChainRef;
++
++ return PVRSRV_OK;
++}
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVCreateDCSwapChainKM (PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hDeviceKM,
++ IMG_UINT32 ui32Flags,
++ DISPLAY_SURF_ATTRIBUTES *psDstSurfAttrib,
++ DISPLAY_SURF_ATTRIBUTES *psSrcSurfAttrib,
++ IMG_UINT32 ui32BufferCount,
++ IMG_UINT32 ui32OEMFlags,
++ IMG_HANDLE *phSwapChainRef,
++ IMG_UINT32 *pui32SwapChainID)
++{
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ PVRSRV_DC_SWAPCHAIN *psSwapChain = IMG_NULL;
++ PVRSRV_DC_SWAPCHAIN_REF *psSwapChainRef = IMG_NULL;
++ PVRSRV_SYNC_DATA *apsSyncData[PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS];
++ PVRSRV_QUEUE_INFO *psQueue = IMG_NULL;
++ PVRSRV_ERROR eError;
++ IMG_UINT32 i;
++ DISPLAY_INFO sDisplayInfo;
++
++
++ if(!hDeviceKM
++ || !psDstSurfAttrib
++ || !psSrcSurfAttrib
++ || !phSwapChainRef
++ || !pui32SwapChainID)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ if (ui32BufferCount > PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Too many buffers"));
++ return PVRSRV_ERROR_TOOMANYBUFFERS;
++ }
++
++ if (ui32BufferCount < 2)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Too few buffers"));
++ return PVRSRV_ERROR_TOO_FEW_BUFFERS;
++ }
++
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++
++ if( ui32Flags & PVRSRV_CREATE_SWAPCHAIN_QUERY )
++ {
++
++ psSwapChain = PVRSRVFindSharedDCSwapChainKM(psDCInfo, *pui32SwapChainID );
++ if( psSwapChain )
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: found query"));
++
++ eError = PVRSRVCreateDCSwapChainRefKM(psPerProc,
++ psSwapChain,
++ &psSwapChainRef);
++ if( eError != PVRSRV_OK )
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Couldn't create swap chain reference"));
++ return eError;
++ }
++
++ *phSwapChainRef = (IMG_HANDLE)psSwapChainRef;
++ return PVRSRV_OK;
++ }
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: No shared SwapChain found for query"));
++ return PVRSRV_ERROR_FLIP_CHAIN_EXISTS;
++ }
++
++
++ if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_DC_SWAPCHAIN),
++ (IMG_VOID **)&psSwapChain, IMG_NULL,
++ "Display Class Swapchain") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Failed psSwapChain alloc"));
++ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto ErrorExit;
++ }
++ OSMemSet (psSwapChain, 0, sizeof(PVRSRV_DC_SWAPCHAIN));
++
++
++ eError = PVRSRVCreateCommandQueueKM(1024, &psQueue);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Failed to create CmdQueue"));
++ goto ErrorExit;
++ }
++
++
++ psSwapChain->psQueue = psQueue;
++
++
++ for(i=0; i<ui32BufferCount; i++)
++ {
++ eError = PVRSRVAllocSyncInfoKM(IMG_NULL,
++ psDCInfo->hDevMemContext,
++ &psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Failed to alloc syninfo for psSwapChain"));
++ goto ErrorExit;
++ }
++
++ psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo->ui32RefCount++;
++
++
++ psSwapChain->asBuffer[i].sDeviceClassBuffer.pfnGetBufferAddr = psDCInfo->psFuncTable->pfnGetBufferAddr;
++ psSwapChain->asBuffer[i].sDeviceClassBuffer.hDevMemContext = psDCInfo->hDevMemContext;
++ psSwapChain->asBuffer[i].sDeviceClassBuffer.hExtDevice = psDCInfo->hExtDevice;
++
++
++ psSwapChain->asBuffer[i].psDCInfo = psDCInfo;
++ psSwapChain->asBuffer[i].psSwapChain = psSwapChain;
++
++
++ apsSyncData[i] = (PVRSRV_SYNC_DATA*)psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo->psSyncDataMemInfoKM->pvLinAddrKM;
++ }
++
++ psSwapChain->ui32BufferCount = ui32BufferCount;
++ psSwapChain->psDCInfo = psDCInfo;
++
++ eError = psDCInfo->psFuncTable->pfnGetDCInfo(psDCInfo->hExtDevice, &sDisplayInfo);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Failed to get DC info"));
++ return eError;
++ }
++
++
++ eError = psDCInfo->psFuncTable->pfnCreateDCSwapChain(psDCInfo->hExtDevice,
++ ui32Flags,
++ psDstSurfAttrib,
++ psSrcSurfAttrib,
++ ui32BufferCount,
++ apsSyncData,
++ ui32OEMFlags,
++ &psSwapChain->hExtSwapChain,
++ &psSwapChain->ui32SwapChainID);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Failed to create 3rd party SwapChain"));
++ goto ErrorExit;
++ }
++
++
++ eError = PVRSRVCreateDCSwapChainRefKM(psPerProc,
++ psSwapChain,
++ &psSwapChainRef);
++ if( eError != PVRSRV_OK )
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Couldn't create swap chain reference"));
++ goto ErrorExit;
++ }
++
++ psSwapChain->ui32RefCount = 1;
++ psSwapChain->ui32Flags = ui32Flags;
++
++
++ if( ui32Flags & PVRSRV_CREATE_SWAPCHAIN_SHARED )
++ {
++ if(! psDCInfo->psDCSwapChainShared )
++ {
++ psDCInfo->psDCSwapChainShared = psSwapChain;
++ }
++ else
++ {
++ PVRSRV_DC_SWAPCHAIN *psOldHead = psDCInfo->psDCSwapChainShared;
++ psDCInfo->psDCSwapChainShared = psSwapChain;
++ psSwapChain->psNext = psOldHead;
++ }
++ }
++
++
++ *pui32SwapChainID = psSwapChain->ui32SwapChainID;
++
++
++ *phSwapChainRef= (IMG_HANDLE)psSwapChainRef;
++
++ return eError;
++
++ErrorExit:
++
++ for(i=0; i<ui32BufferCount; i++)
++ {
++ if(psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo)
++ {
++ if (--psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo->ui32RefCount == 0)
++ {
++ PVRSRVFreeSyncInfoKM(psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo);
++ }
++ }
++ }
++
++ if(psQueue)
++ {
++ PVRSRVDestroyCommandQueueKM(psQueue);
++ }
++
++ if(psSwapChain)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DC_SWAPCHAIN), psSwapChain, IMG_NULL);
++
++ }
++
++ return eError;
++}
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVSetDCDstRectKM(IMG_HANDLE hDeviceKM,
++ IMG_HANDLE hSwapChainRef,
++ IMG_RECT *psRect)
++{
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ PVRSRV_DC_SWAPCHAIN *psSwapChain;
++
++ if(!hDeviceKM || !hSwapChainRef)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSetDCDstRectKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++ psSwapChain = ((PVRSRV_DC_SWAPCHAIN_REF*)hSwapChainRef)->psSwapChain;
++
++ return psDCInfo->psFuncTable->pfnSetDCDstRect(psDCInfo->hExtDevice,
++ psSwapChain->hExtSwapChain,
++ psRect);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVSetDCSrcRectKM(IMG_HANDLE hDeviceKM,
++ IMG_HANDLE hSwapChainRef,
++ IMG_RECT *psRect)
++{
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ PVRSRV_DC_SWAPCHAIN *psSwapChain;
++
++ if(!hDeviceKM || !hSwapChainRef)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSetDCSrcRectKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++ psSwapChain = ((PVRSRV_DC_SWAPCHAIN_REF*)hSwapChainRef)->psSwapChain;
++
++ return psDCInfo->psFuncTable->pfnSetDCSrcRect(psDCInfo->hExtDevice,
++ psSwapChain->hExtSwapChain,
++ psRect);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVSetDCDstColourKeyKM(IMG_HANDLE hDeviceKM,
++ IMG_HANDLE hSwapChainRef,
++ IMG_UINT32 ui32CKColour)
++{
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ PVRSRV_DC_SWAPCHAIN *psSwapChain;
++
++ if(!hDeviceKM || !hSwapChainRef)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSetDCDstColourKeyKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++ psSwapChain = ((PVRSRV_DC_SWAPCHAIN_REF*)hSwapChainRef)->psSwapChain;
++
++ return psDCInfo->psFuncTable->pfnSetDCDstColourKey(psDCInfo->hExtDevice,
++ psSwapChain->hExtSwapChain,
++ ui32CKColour);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVSetDCSrcColourKeyKM(IMG_HANDLE hDeviceKM,
++ IMG_HANDLE hSwapChainRef,
++ IMG_UINT32 ui32CKColour)
++{
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ PVRSRV_DC_SWAPCHAIN *psSwapChain;
++
++ if(!hDeviceKM || !hSwapChainRef)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSetDCSrcColourKeyKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++ psSwapChain = ((PVRSRV_DC_SWAPCHAIN_REF*)hSwapChainRef)->psSwapChain;
++
++ return psDCInfo->psFuncTable->pfnSetDCSrcColourKey(psDCInfo->hExtDevice,
++ psSwapChain->hExtSwapChain,
++ ui32CKColour);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVGetDCBuffersKM(IMG_HANDLE hDeviceKM,
++ IMG_HANDLE hSwapChainRef,
++ IMG_UINT32 *pui32BufferCount,
++ IMG_HANDLE *phBuffer)
++{
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ PVRSRV_DC_SWAPCHAIN *psSwapChain;
++ IMG_HANDLE ahExtBuffer[PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS];
++ PVRSRV_ERROR eError;
++ IMG_UINT32 i;
++
++ if(!hDeviceKM || !hSwapChainRef || !phBuffer)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetDCBuffersKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++ psSwapChain = ((PVRSRV_DC_SWAPCHAIN_REF*)hSwapChainRef)->psSwapChain;
++
++
++ eError = psDCInfo->psFuncTable->pfnGetDCBuffers(psDCInfo->hExtDevice,
++ psSwapChain->hExtSwapChain,
++ pui32BufferCount,
++ ahExtBuffer);
++
++ PVR_ASSERT(*pui32BufferCount <= PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS);
++
++
++
++
++ for(i=0; i<*pui32BufferCount; i++)
++ {
++ psSwapChain->asBuffer[i].sDeviceClassBuffer.hExtBuffer = ahExtBuffer[i];
++ phBuffer[i] = (IMG_HANDLE)&psSwapChain->asBuffer[i];
++ }
++
++ return eError;
++}
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVSwapToDCBufferKM(IMG_HANDLE hDeviceKM,
++ IMG_HANDLE hBuffer,
++ IMG_UINT32 ui32SwapInterval,
++ IMG_HANDLE hPrivateTag,
++ IMG_UINT32 ui32ClipRectCount,
++ IMG_RECT *psClipRect)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ PVRSRV_DC_BUFFER *psBuffer;
++ PVRSRV_QUEUE_INFO *psQueue;
++ DISPLAYCLASS_FLIP_COMMAND *psFlipCmd;
++ IMG_UINT32 i;
++ IMG_BOOL bAddReferenceToLast = IMG_TRUE;
++ IMG_UINT16 ui16SwapCommandID = DC_FLIP_COMMAND;
++ IMG_UINT32 ui32NumSrcSyncs = 1;
++ PVRSRV_KERNEL_SYNC_INFO *apsSrcSync[2];
++ PVRSRV_COMMAND *psCommand;
++
++ if(!hDeviceKM || !hBuffer || !psClipRect)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBufferKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++#if defined(SUPPORT_LMA)
++ eError = PVRSRVPowerLock(KERNEL_ID, IMG_FALSE);
++ if(eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++#endif
++
++ psBuffer = (PVRSRV_DC_BUFFER*)hBuffer;
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++
++#if defined(SUPPORT_CUSTOM_SWAP_OPERATIONS)
++
++ if(psDCInfo->psFuncTable->pfnQuerySwapCommandID != IMG_NULL)
++ {
++ psDCInfo->psFuncTable->pfnQuerySwapCommandID(psDCInfo->hExtDevice,
++ psBuffer->psSwapChain->hExtSwapChain,
++ psBuffer->sDeviceClassBuffer.hExtBuffer,
++ hPrivateTag,
++ &ui16SwapCommandID,
++ &bAddReferenceToLast);
++
++ }
++
++#endif
++
++ psQueue = psBuffer->psSwapChain->psQueue;
++
++
++ apsSrcSync[0] = psBuffer->sDeviceClassBuffer.psKernelSyncInfo;
++
++
++
++ if(bAddReferenceToLast && psBuffer->psSwapChain->psLastFlipBuffer &&
++ psBuffer != psBuffer->psSwapChain->psLastFlipBuffer)
++ {
++ apsSrcSync[1] = psBuffer->psSwapChain->psLastFlipBuffer->sDeviceClassBuffer.psKernelSyncInfo;
++
++
++
++ ui32NumSrcSyncs++;
++ }
++
++
++ eError = PVRSRVInsertCommandKM (psQueue,
++ &psCommand,
++ psDCInfo->ui32DeviceID,
++ ui16SwapCommandID,
++ 0,
++ IMG_NULL,
++ ui32NumSrcSyncs,
++ apsSrcSync,
++ sizeof(DISPLAYCLASS_FLIP_COMMAND) + (sizeof(IMG_RECT) * ui32ClipRectCount));
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBufferKM: Failed to get space in queue"));
++ goto Exit;
++ }
++
++
++ psFlipCmd = (DISPLAYCLASS_FLIP_COMMAND*)psCommand->pvData;
++
++
++ psFlipCmd->hExtDevice = psDCInfo->hExtDevice;
++
++
++ psFlipCmd->hExtSwapChain = psBuffer->psSwapChain->hExtSwapChain;
++
++
++ psFlipCmd->hExtBuffer = psBuffer->sDeviceClassBuffer.hExtBuffer;
++
++
++ psFlipCmd->hPrivateTag = hPrivateTag;
++
++
++ psFlipCmd->ui32ClipRectCount = ui32ClipRectCount;
++
++ psFlipCmd->psClipRect = (IMG_RECT*)((IMG_UINT8*)psFlipCmd + sizeof(DISPLAYCLASS_FLIP_COMMAND));
++
++ for(i=0; i<ui32ClipRectCount; i++)
++ {
++ psFlipCmd->psClipRect[i] = psClipRect[i];
++ }
++
++
++ psFlipCmd->ui32SwapInterval = ui32SwapInterval;
++
++
++ eError = PVRSRVSubmitCommandKM (psQueue, psCommand);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBufferKM: Failed to submit command"));
++ goto Exit;
++ }
++
++
++
++
++
++
++
++
++
++
++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
++ {
++ if(PVRSRVProcessQueues(KERNEL_ID, IMG_FALSE) != PVRSRV_ERROR_PROCESSING_BLOCKED)
++ {
++ goto ProcessedQueues;
++ }
++ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
++ } END_LOOP_UNTIL_TIMEOUT();
++
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBufferKM: Failed to process queues"));
++
++ eError = PVRSRV_ERROR_GENERIC;
++ goto Exit;
++
++ProcessedQueues:
++
++ psBuffer->psSwapChain->psLastFlipBuffer = psBuffer;
++
++Exit:
++
++ if(eError == PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE)
++ {
++ eError = PVRSRV_ERROR_RETRY;
++ }
++
++#if defined(SUPPORT_LMA)
++ PVRSRVPowerUnlock(KERNEL_ID);
++#endif
++ return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVSwapToDCSystemKM(IMG_HANDLE hDeviceKM,
++ IMG_HANDLE hSwapChainRef)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_QUEUE_INFO *psQueue;
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ PVRSRV_DC_SWAPCHAIN *psSwapChain;
++ PVRSRV_DC_SWAPCHAIN_REF *psSwapChainRef;
++ DISPLAYCLASS_FLIP_COMMAND *psFlipCmd;
++ IMG_UINT32 ui32NumSrcSyncs = 1;
++ PVRSRV_KERNEL_SYNC_INFO *apsSrcSync[2];
++ PVRSRV_COMMAND *psCommand;
++ IMG_BOOL bAddReferenceToLast = IMG_TRUE;
++ IMG_UINT16 ui16SwapCommandID = DC_FLIP_COMMAND;
++
++ if(!hDeviceKM || !hSwapChainRef)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCSystemKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++#if defined(SUPPORT_LMA)
++ eError = PVRSRVPowerLock(KERNEL_ID, IMG_FALSE);
++ if(eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++#endif
++
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++ psSwapChainRef = (PVRSRV_DC_SWAPCHAIN_REF*)hSwapChainRef;
++ psSwapChain = psSwapChainRef->psSwapChain;
++
++
++ psQueue = psSwapChain->psQueue;
++
++#if defined(SUPPORT_CUSTOM_SWAP_OPERATIONS)
++
++ if(psDCInfo->psFuncTable->pfnQuerySwapCommandID != IMG_NULL)
++ {
++ psDCInfo->psFuncTable->pfnQuerySwapCommandID(psDCInfo->hExtDevice,
++ psSwapChain->hExtSwapChain,
++ psDCInfo->sSystemBuffer.sDeviceClassBuffer.hExtBuffer,
++ 0,
++ &ui16SwapCommandID,
++ &bAddReferenceToLast);
++
++ }
++
++#endif
++
++
++ apsSrcSync[0] = psDCInfo->sSystemBuffer.sDeviceClassBuffer.psKernelSyncInfo;
++
++
++
++ if(bAddReferenceToLast && psSwapChain->psLastFlipBuffer)
++ {
++
++ if (apsSrcSync[0] != psSwapChain->psLastFlipBuffer->sDeviceClassBuffer.psKernelSyncInfo)
++ {
++ apsSrcSync[1] = psSwapChain->psLastFlipBuffer->sDeviceClassBuffer.psKernelSyncInfo;
++
++
++
++ ui32NumSrcSyncs++;
++ }
++ }
++
++
++ eError = PVRSRVInsertCommandKM (psQueue,
++ &psCommand,
++ psDCInfo->ui32DeviceID,
++ ui16SwapCommandID,
++ 0,
++ IMG_NULL,
++ ui32NumSrcSyncs,
++ apsSrcSync,
++ sizeof(DISPLAYCLASS_FLIP_COMMAND));
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCSystemKM: Failed to get space in queue"));
++ goto Exit;
++ }
++
++
++ psFlipCmd = (DISPLAYCLASS_FLIP_COMMAND*)psCommand->pvData;
++
++
++ psFlipCmd->hExtDevice = psDCInfo->hExtDevice;
++
++
++ psFlipCmd->hExtSwapChain = psSwapChain->hExtSwapChain;
++
++
++ psFlipCmd->hExtBuffer = psDCInfo->sSystemBuffer.sDeviceClassBuffer.hExtBuffer;
++
++
++ psFlipCmd->hPrivateTag = IMG_NULL;
++
++
++ psFlipCmd->ui32ClipRectCount = 0;
++
++ psFlipCmd->ui32SwapInterval = 1;
++
++
++ eError = PVRSRVSubmitCommandKM (psQueue, psCommand);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCSystemKM: Failed to submit command"));
++ goto Exit;
++ }
++
++
++
++
++
++
++
++
++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
++ {
++ if(PVRSRVProcessQueues(KERNEL_ID, IMG_FALSE) != PVRSRV_ERROR_PROCESSING_BLOCKED)
++ {
++ goto ProcessedQueues;
++ }
++
++ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
++ } END_LOOP_UNTIL_TIMEOUT();
++
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCSystemKM: Failed to process queues"));
++ eError = PVRSRV_ERROR_GENERIC;
++ goto Exit;
++
++ProcessedQueues:
++
++ psSwapChain->psLastFlipBuffer = &psDCInfo->sSystemBuffer;
++
++ eError = PVRSRV_OK;
++
++Exit:
++
++ if(eError == PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE)
++ {
++ eError = PVRSRV_ERROR_RETRY;
++ }
++
++#if defined(SUPPORT_LMA)
++ PVRSRVPowerUnlock(KERNEL_ID);
++#endif
++ return eError;
++}
++
++
++PVRSRV_ERROR PVRSRVRegisterSystemISRHandler (PFN_ISR_HANDLER pfnISRHandler,
++ IMG_VOID *pvISRHandlerData,
++ IMG_UINT32 ui32ISRSourceMask,
++ IMG_UINT32 ui32DeviceID)
++{
++ SYS_DATA *psSysData;
++ PVRSRV_DEVICE_NODE *psDevNode;
++
++ PVR_UNREFERENCED_PARAMETER(ui32ISRSourceMask);
++
++ SysAcquireData(&psSysData);
++
++
++ psDevNode = (PVRSRV_DEVICE_NODE*)
++ List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList,
++ MatchDeviceKM_AnyVaCb,
++ ui32DeviceID,
++ IMG_TRUE);
++
++ if (psDevNode == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterSystemISRHandler: Failed to get psDevNode"));
++ PVR_DBG_BREAK;
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++
++ psDevNode->pvISRData = (IMG_VOID*) pvISRHandlerData;
++
++
++ psDevNode->pfnDeviceISR = pfnISRHandler;
++
++ return PVRSRV_OK;
++}
++
++IMG_VOID PVRSRVSetDCState_ForEachVaCb(PVRSRV_DEVICE_NODE *psDeviceNode, va_list va)
++{
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ IMG_UINT32 ui32State;
++ ui32State = va_arg(va, IMG_UINT32);
++
++ if (psDeviceNode->sDevId.eDeviceClass == PVRSRV_DEVICE_CLASS_DISPLAY)
++ {
++ psDCInfo = (PVRSRV_DISPLAYCLASS_INFO *)psDeviceNode->pvDevice;
++ if (psDCInfo->psFuncTable->pfnSetDCState && psDCInfo->hExtDevice)
++ {
++ psDCInfo->psFuncTable->pfnSetDCState(psDCInfo->hExtDevice, ui32State);
++ }
++ }
++}
++
++
++IMG_VOID IMG_CALLCONV PVRSRVSetDCState(IMG_UINT32 ui32State)
++{
++ SYS_DATA *psSysData;
++
++ SysAcquireData(&psSysData);
++
++ List_PVRSRV_DEVICE_NODE_ForEach_va(psSysData->psDeviceNodeList,
++ PVRSRVSetDCState_ForEachVaCb,
++ ui32State);
++}
++
++
++IMG_EXPORT
++IMG_BOOL PVRGetDisplayClassJTable(PVRSRV_DC_DISP2SRV_KMJTABLE *psJTable)
++{
++ psJTable->ui32TableSize = sizeof(PVRSRV_DC_DISP2SRV_KMJTABLE);
++ psJTable->pfnPVRSRVRegisterDCDevice = PVRSRVRegisterDCDeviceKM;
++ psJTable->pfnPVRSRVRemoveDCDevice = PVRSRVRemoveDCDeviceKM;
++ psJTable->pfnPVRSRVOEMFunction = SysOEMFunction;
++ psJTable->pfnPVRSRVRegisterCmdProcList = PVRSRVRegisterCmdProcListKM;
++ psJTable->pfnPVRSRVRemoveCmdProcList = PVRSRVRemoveCmdProcListKM;
++#if defined(SUPPORT_MISR_IN_THREAD)
++ psJTable->pfnPVRSRVCmdComplete = OSVSyncMISR;
++#else
++ psJTable->pfnPVRSRVCmdComplete = PVRSRVCommandCompleteKM;
++#endif
++ psJTable->pfnPVRSRVRegisterSystemISRHandler = PVRSRVRegisterSystemISRHandler;
++ psJTable->pfnPVRSRVRegisterPowerDevice = PVRSRVRegisterPowerDevice;
++#if defined(SUPPORT_CUSTOM_SWAP_OPERATIONS)
++ psJTable->pfnPVRSRVFreeCmdCompletePacket = &PVRSRVFreeCommandCompletePacketKM;
++#endif
++
++ return IMG_TRUE;
++}
++
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVCloseBCDeviceKM (IMG_HANDLE hDeviceKM,
++ IMG_BOOL bResManCallback)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *psBCPerContextInfo;
++
++ PVR_UNREFERENCED_PARAMETER(bResManCallback);
++
++ psBCPerContextInfo = (PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *)hDeviceKM;
++
++
++ eError = ResManFreeResByPtr(psBCPerContextInfo->hResItem);
++
++ return eError;
++}
++
++
++static PVRSRV_ERROR CloseBCDeviceCallBack(IMG_PVOID pvParam,
++ IMG_UINT32 ui32Param)
++{
++ PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *psBCPerContextInfo;
++ PVRSRV_BUFFERCLASS_INFO *psBCInfo;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++ psBCPerContextInfo = (PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *)pvParam;
++ psBCInfo = psBCPerContextInfo->psBCInfo;
++
++ psBCInfo->ui32RefCount--;
++ if(psBCInfo->ui32RefCount == 0)
++ {
++ IMG_UINT32 i;
++
++
++ psBCInfo->psFuncTable->pfnCloseBCDevice(psBCInfo->ui32DeviceID, psBCInfo->hExtDevice);
++
++ for(i=0; i<psBCInfo->ui32BufferCount; i++)
++ {
++ if(psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo)
++ {
++ if (--psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo->ui32RefCount == 0)
++ {
++ PVRSRVFreeSyncInfoKM(psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo);
++ }
++ }
++ }
++
++
++ if(psBCInfo->psBuffer)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_BC_BUFFER), psBCInfo->psBuffer, IMG_NULL);
++ psBCInfo->psBuffer = IMG_NULL;
++ }
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_BUFFERCLASS_PERCONTEXT_INFO), psBCPerContextInfo, IMG_NULL);
++
++
++ return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVOpenBCDeviceKM (PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_UINT32 ui32DeviceID,
++ IMG_HANDLE hDevCookie,
++ IMG_HANDLE *phDeviceKM)
++{
++ PVRSRV_BUFFERCLASS_INFO *psBCInfo;
++ PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *psBCPerContextInfo;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ SYS_DATA *psSysData;
++ IMG_UINT32 i;
++ PVRSRV_ERROR eError;
++
++ if(!phDeviceKM || !hDevCookie)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: Invalid params"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ SysAcquireData(&psSysData);
++
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE*)
++ List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList,
++ MatchDeviceKM_AnyVaCb,
++ ui32DeviceID,
++ IMG_FALSE,
++ PVRSRV_DEVICE_CLASS_BUFFER);
++ if (!psDeviceNode)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: No devnode matching index %d", ui32DeviceID));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ psBCInfo = (PVRSRV_BUFFERCLASS_INFO*)psDeviceNode->pvDevice;
++
++
++
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(*psBCPerContextInfo),
++ (IMG_VOID **)&psBCPerContextInfo, IMG_NULL,
++ "Buffer Class per Context Info") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: Failed psBCPerContextInfo alloc"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ OSMemSet(psBCPerContextInfo, 0, sizeof(*psBCPerContextInfo));
++
++ if(psBCInfo->ui32RefCount++ == 0)
++ {
++ BUFFER_INFO sBufferInfo;
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevCookie;
++
++
++ psBCInfo->hDevMemContext = (IMG_HANDLE)psDeviceNode->sDevMemoryInfo.pBMKernelContext;
++
++
++ eError = psBCInfo->psFuncTable->pfnOpenBCDevice(ui32DeviceID, &psBCInfo->hExtDevice);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: Failed to open external BC device"));
++ return eError;
++ }
++
++
++ eError = psBCInfo->psFuncTable->pfnGetBCInfo(psBCInfo->hExtDevice, &sBufferInfo);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM : Failed to get BC Info"));
++ return eError;
++ }
++
++
++ psBCInfo->ui32BufferCount = sBufferInfo.ui32BufferCount;
++
++
++
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_BC_BUFFER) * sBufferInfo.ui32BufferCount,
++ (IMG_VOID **)&psBCInfo->psBuffer,
++ IMG_NULL,
++ "Array of Buffer Class Buffer");
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: Failed to allocate BC buffers"));
++ return eError;
++ }
++ OSMemSet (psBCInfo->psBuffer,
++ 0,
++ sizeof(PVRSRV_BC_BUFFER) * sBufferInfo.ui32BufferCount);
++
++ for(i=0; i<psBCInfo->ui32BufferCount; i++)
++ {
++
++ eError = PVRSRVAllocSyncInfoKM(IMG_NULL,
++ psBCInfo->hDevMemContext,
++ &psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: Failed sync info alloc"));
++ goto ErrorExit;
++ }
++
++ psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo->ui32RefCount++;
++
++
++
++
++ eError = psBCInfo->psFuncTable->pfnGetBCBuffer(psBCInfo->hExtDevice,
++ i,
++ psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo->psSyncData,
++ &psBCInfo->psBuffer[i].sDeviceClassBuffer.hExtBuffer);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: Failed to get BC buffers"));
++ goto ErrorExit;
++ }
++
++
++ psBCInfo->psBuffer[i].sDeviceClassBuffer.pfnGetBufferAddr = psBCInfo->psFuncTable->pfnGetBufferAddr;
++ psBCInfo->psBuffer[i].sDeviceClassBuffer.hDevMemContext = psBCInfo->hDevMemContext;
++ psBCInfo->psBuffer[i].sDeviceClassBuffer.hExtDevice = psBCInfo->hExtDevice;
++ }
++ }
++
++ psBCPerContextInfo->psBCInfo = psBCInfo;
++ psBCPerContextInfo->hResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_BUFFERCLASS_DEVICE,
++ psBCPerContextInfo,
++ 0,
++ CloseBCDeviceCallBack);
++
++
++ *phDeviceKM = (IMG_HANDLE)psBCPerContextInfo;
++
++ return PVRSRV_OK;
++
++ErrorExit:
++
++
++ for(i=0; i<psBCInfo->ui32BufferCount; i++)
++ {
++ if(psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo)
++ {
++ if (--psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo->ui32RefCount == 0)
++ {
++ PVRSRVFreeSyncInfoKM(psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo);
++ }
++ }
++ }
++
++
++ if(psBCInfo->psBuffer)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_BC_BUFFER), psBCInfo->psBuffer, IMG_NULL);
++ psBCInfo->psBuffer = IMG_NULL;
++ }
++
++ return eError;
++}
++
++
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVGetBCInfoKM (IMG_HANDLE hDeviceKM,
++ BUFFER_INFO *psBufferInfo)
++{
++ PVRSRV_BUFFERCLASS_INFO *psBCInfo;
++ PVRSRV_ERROR eError;
++
++ if(!hDeviceKM || !psBufferInfo)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetBCInfoKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psBCInfo = BCDeviceHandleToBCInfo(hDeviceKM);
++
++ eError = psBCInfo->psFuncTable->pfnGetBCInfo(psBCInfo->hExtDevice, psBufferInfo);
++
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetBCInfoKM : Failed to get BC Info"));
++ return eError;
++ }
++
++ return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVGetBCBufferKM (IMG_HANDLE hDeviceKM,
++ IMG_UINT32 ui32BufferIndex,
++ IMG_HANDLE *phBuffer)
++{
++ PVRSRV_BUFFERCLASS_INFO *psBCInfo;
++
++ if(!hDeviceKM || !phBuffer)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetBCBufferKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psBCInfo = BCDeviceHandleToBCInfo(hDeviceKM);
++
++ if(ui32BufferIndex < psBCInfo->ui32BufferCount)
++ {
++ *phBuffer = (IMG_HANDLE)&psBCInfo->psBuffer[ui32BufferIndex];
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetBCBufferKM: Buffer index %d out of range (%d)", ui32BufferIndex,psBCInfo->ui32BufferCount));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++IMG_BOOL PVRGetBufferClassJTable(PVRSRV_BC_BUFFER2SRV_KMJTABLE *psJTable)
++{
++ psJTable->ui32TableSize = sizeof(PVRSRV_BC_BUFFER2SRV_KMJTABLE);
++
++ psJTable->pfnPVRSRVRegisterBCDevice = PVRSRVRegisterBCDeviceKM;
++ psJTable->pfnPVRSRVRemoveBCDevice = PVRSRVRemoveBCDeviceKM;
++
++ return IMG_TRUE;
++}
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/common/devicemem.c
+@@ -0,0 +1,1459 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <stddef.h>
++
++#include "services_headers.h"
++#include "buffer_manager.h"
++#include "pdump_km.h"
++#include "pvr_bridge_km.h"
++
++static PVRSRV_ERROR AllocDeviceMem(IMG_HANDLE hDevCookie,
++ IMG_HANDLE hDevMemHeap,
++ IMG_UINT32 ui32Flags,
++ IMG_SIZE_T ui32Size,
++ IMG_SIZE_T ui32Alignment,
++ PVRSRV_KERNEL_MEM_INFO **ppsMemInfo);
++
++typedef struct _RESMAN_MAP_DEVICE_MEM_DATA_
++{
++
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo;
++
++ PVRSRV_KERNEL_MEM_INFO *psSrcMemInfo;
++} RESMAN_MAP_DEVICE_MEM_DATA;
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDeviceMemHeapsKM(IMG_HANDLE hDevCookie,
++ PVRSRV_HEAP_INFO *psHeapInfo)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ IMG_UINT32 ui32HeapCount;
++ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++ IMG_UINT32 i;
++
++ if (hDevCookie == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVGetDeviceMemHeapsKM: hDevCookie invalid"));
++ PVR_DBG_BREAK;
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevCookie;
++
++
++ ui32HeapCount = psDeviceNode->sDevMemoryInfo.ui32HeapCount;
++ psDeviceMemoryHeap = psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeap;
++
++
++ PVR_ASSERT(ui32HeapCount <= PVRSRV_MAX_CLIENT_HEAPS);
++
++
++ for(i=0; i<ui32HeapCount; i++)
++ {
++
++ psHeapInfo[i].ui32HeapID = psDeviceMemoryHeap[i].ui32HeapID;
++ psHeapInfo[i].hDevMemHeap = psDeviceMemoryHeap[i].hDevMemHeap;
++ psHeapInfo[i].sDevVAddrBase = psDeviceMemoryHeap[i].sDevVAddrBase;
++ psHeapInfo[i].ui32HeapByteSize = psDeviceMemoryHeap[i].ui32HeapSize;
++ psHeapInfo[i].ui32Attribs = psDeviceMemoryHeap[i].ui32Attribs;
++ }
++
++ for(; i < PVRSRV_MAX_CLIENT_HEAPS; i++)
++ {
++ OSMemSet(psHeapInfo + i, 0, sizeof(*psHeapInfo));
++ psHeapInfo[i].ui32HeapID = (IMG_UINT32)PVRSRV_UNDEFINED_HEAP_ID;
++ }
++
++ return PVRSRV_OK;
++}
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateDeviceMemContextKM(IMG_HANDLE hDevCookie,
++ PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE *phDevMemContext,
++ IMG_UINT32 *pui32ClientHeapCount,
++ PVRSRV_HEAP_INFO *psHeapInfo,
++ IMG_BOOL *pbCreated,
++ IMG_BOOL *pbShared)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ IMG_UINT32 ui32HeapCount, ui32ClientHeapCount=0;
++ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++ IMG_HANDLE hDevMemContext;
++ IMG_HANDLE hDevMemHeap;
++ IMG_DEV_PHYADDR sPDDevPAddr;
++ IMG_UINT32 i;
++
++#if !defined(PVR_SECURE_HANDLES)
++ PVR_UNREFERENCED_PARAMETER(pbShared);
++#endif
++
++ if (hDevCookie == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVCreateDeviceMemContextKM: hDevCookie invalid"));
++ PVR_DBG_BREAK;
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevCookie;
++
++
++
++ ui32HeapCount = psDeviceNode->sDevMemoryInfo.ui32HeapCount;
++ psDeviceMemoryHeap = psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeap;
++
++
++
++ PVR_ASSERT(ui32HeapCount <= PVRSRV_MAX_CLIENT_HEAPS);
++
++
++
++ hDevMemContext = BM_CreateContext(psDeviceNode,
++ &sPDDevPAddr,
++ psPerProc,
++ pbCreated);
++ if (hDevMemContext == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDeviceMemContextKM: Failed BM_CreateContext"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++
++ for(i=0; i<ui32HeapCount; i++)
++ {
++ switch(psDeviceMemoryHeap[i].DevMemHeapType)
++ {
++ case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
++ {
++
++ psHeapInfo[ui32ClientHeapCount].ui32HeapID = psDeviceMemoryHeap[i].ui32HeapID;
++ psHeapInfo[ui32ClientHeapCount].hDevMemHeap = psDeviceMemoryHeap[i].hDevMemHeap;
++ psHeapInfo[ui32ClientHeapCount].sDevVAddrBase = psDeviceMemoryHeap[i].sDevVAddrBase;
++ psHeapInfo[ui32ClientHeapCount].ui32HeapByteSize = psDeviceMemoryHeap[i].ui32HeapSize;
++ psHeapInfo[ui32ClientHeapCount].ui32Attribs = psDeviceMemoryHeap[i].ui32Attribs;
++#if defined(PVR_SECURE_HANDLES)
++ pbShared[ui32ClientHeapCount] = IMG_TRUE;
++#endif
++ ui32ClientHeapCount++;
++ break;
++ }
++ case DEVICE_MEMORY_HEAP_PERCONTEXT:
++ {
++ hDevMemHeap = BM_CreateHeap(hDevMemContext,
++ &psDeviceMemoryHeap[i]);
++
++
++ psHeapInfo[ui32ClientHeapCount].ui32HeapID = psDeviceMemoryHeap[i].ui32HeapID;
++ psHeapInfo[ui32ClientHeapCount].hDevMemHeap = hDevMemHeap;
++ psHeapInfo[ui32ClientHeapCount].sDevVAddrBase = psDeviceMemoryHeap[i].sDevVAddrBase;
++ psHeapInfo[ui32ClientHeapCount].ui32HeapByteSize = psDeviceMemoryHeap[i].ui32HeapSize;
++ psHeapInfo[ui32ClientHeapCount].ui32Attribs = psDeviceMemoryHeap[i].ui32Attribs;
++#if defined(PVR_SECURE_HANDLES)
++ pbShared[ui32ClientHeapCount] = IMG_FALSE;
++#endif
++
++ ui32ClientHeapCount++;
++ break;
++ }
++ }
++ }
++
++
++ *pui32ClientHeapCount = ui32ClientHeapCount;
++ *phDevMemContext = hDevMemContext;
++
++ return PVRSRV_OK;
++}
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyDeviceMemContextKM(IMG_HANDLE hDevCookie,
++ IMG_HANDLE hDevMemContext,
++ IMG_BOOL *pbDestroyed)
++{
++ PVR_UNREFERENCED_PARAMETER(hDevCookie);
++
++ return BM_DestroyContext(hDevMemContext, pbDestroyed);
++}
++
++
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDeviceMemHeapInfoKM(IMG_HANDLE hDevCookie,
++ IMG_HANDLE hDevMemContext,
++ IMG_UINT32 *pui32ClientHeapCount,
++ PVRSRV_HEAP_INFO *psHeapInfo,
++ IMG_BOOL *pbShared)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ IMG_UINT32 ui32HeapCount, ui32ClientHeapCount=0;
++ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++ IMG_HANDLE hDevMemHeap;
++ IMG_UINT32 i;
++
++#if !defined(PVR_SECURE_HANDLES)
++ PVR_UNREFERENCED_PARAMETER(pbShared);
++#endif
++
++ if (hDevCookie == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVGetDeviceMemHeapInfoKM: hDevCookie invalid"));
++ PVR_DBG_BREAK;
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevCookie;
++
++
++
++ ui32HeapCount = psDeviceNode->sDevMemoryInfo.ui32HeapCount;
++ psDeviceMemoryHeap = psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeap;
++
++
++
++ PVR_ASSERT(ui32HeapCount <= PVRSRV_MAX_CLIENT_HEAPS);
++
++
++ for(i=0; i<ui32HeapCount; i++)
++ {
++ switch(psDeviceMemoryHeap[i].DevMemHeapType)
++ {
++ case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
++ {
++
++ psHeapInfo[ui32ClientHeapCount].ui32HeapID = psDeviceMemoryHeap[i].ui32HeapID;
++ psHeapInfo[ui32ClientHeapCount].hDevMemHeap = psDeviceMemoryHeap[i].hDevMemHeap;
++ psHeapInfo[ui32ClientHeapCount].sDevVAddrBase = psDeviceMemoryHeap[i].sDevVAddrBase;
++ psHeapInfo[ui32ClientHeapCount].ui32HeapByteSize = psDeviceMemoryHeap[i].ui32HeapSize;
++ psHeapInfo[ui32ClientHeapCount].ui32Attribs = psDeviceMemoryHeap[i].ui32Attribs;
++#if defined(PVR_SECURE_HANDLES)
++ pbShared[ui32ClientHeapCount] = IMG_TRUE;
++#endif
++ ui32ClientHeapCount++;
++ break;
++ }
++ case DEVICE_MEMORY_HEAP_PERCONTEXT:
++ {
++ hDevMemHeap = BM_CreateHeap(hDevMemContext,
++ &psDeviceMemoryHeap[i]);
++
++
++ psHeapInfo[ui32ClientHeapCount].ui32HeapID = psDeviceMemoryHeap[i].ui32HeapID;
++ psHeapInfo[ui32ClientHeapCount].hDevMemHeap = hDevMemHeap;
++ psHeapInfo[ui32ClientHeapCount].sDevVAddrBase = psDeviceMemoryHeap[i].sDevVAddrBase;
++ psHeapInfo[ui32ClientHeapCount].ui32HeapByteSize = psDeviceMemoryHeap[i].ui32HeapSize;
++ psHeapInfo[ui32ClientHeapCount].ui32Attribs = psDeviceMemoryHeap[i].ui32Attribs;
++#if defined(PVR_SECURE_HANDLES)
++ pbShared[ui32ClientHeapCount] = IMG_FALSE;
++#endif
++
++ ui32ClientHeapCount++;
++ break;
++ }
++ }
++ }
++
++
++ *pui32ClientHeapCount = ui32ClientHeapCount;
++
++ return PVRSRV_OK;
++}
++
++
++static PVRSRV_ERROR AllocDeviceMem(IMG_HANDLE hDevCookie,
++ IMG_HANDLE hDevMemHeap,
++ IMG_UINT32 ui32Flags,
++ IMG_SIZE_T ui32Size,
++ IMG_SIZE_T ui32Alignment,
++ PVRSRV_KERNEL_MEM_INFO **ppsMemInfo)
++{
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo;
++ BM_HANDLE hBuffer;
++
++ PVRSRV_MEMBLK *psMemBlock;
++ IMG_BOOL bBMError;
++
++ PVR_UNREFERENCED_PARAMETER(hDevCookie);
++
++ *ppsMemInfo = IMG_NULL;
++
++ if(OSAllocMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof(PVRSRV_KERNEL_MEM_INFO),
++ (IMG_VOID **)&psMemInfo, IMG_NULL,
++ "Kernel Memory Info") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"AllocDeviceMem: Failed to alloc memory for block"));
++ return (PVRSRV_ERROR_OUT_OF_MEMORY);
++ }
++
++ OSMemSet(psMemInfo, 0, sizeof(*psMemInfo));
++
++ psMemBlock = &(psMemInfo->sMemBlk);
++
++
++ psMemInfo->ui32Flags = ui32Flags | PVRSRV_MEM_RAM_BACKED_ALLOCATION;
++
++ bBMError = BM_Alloc (hDevMemHeap,
++ IMG_NULL,
++ ui32Size,
++ &psMemInfo->ui32Flags,
++ IMG_CAST_TO_DEVVADDR_UINT(ui32Alignment),
++ &hBuffer);
++
++ if (!bBMError)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"AllocDeviceMem: BM_Alloc Failed"));
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(PVRSRV_KERNEL_MEM_INFO), psMemInfo, IMG_NULL);
++
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++
++ psMemBlock->sDevVirtAddr = BM_HandleToDevVaddr(hBuffer);
++ psMemBlock->hOSMemHandle = BM_HandleToOSMemHandle(hBuffer);
++
++
++ psMemBlock->hBuffer = (IMG_HANDLE)hBuffer;
++
++
++
++ psMemInfo->pvLinAddrKM = BM_HandleToCpuVaddr(hBuffer);
++
++ psMemInfo->sDevVAddr = psMemBlock->sDevVirtAddr;
++
++ psMemInfo->ui32AllocSize = ui32Size;
++
++
++ psMemInfo->pvSysBackupBuffer = IMG_NULL;
++
++
++ *ppsMemInfo = psMemInfo;
++
++
++ return (PVRSRV_OK);
++}
++
++
++static PVRSRV_ERROR FreeDeviceMem2(PVRSRV_KERNEL_MEM_INFO *psMemInfo, IMG_BOOL bFromAllocator)
++{
++ BM_HANDLE hBuffer;
++
++ if (!psMemInfo)
++ {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ hBuffer = psMemInfo->sMemBlk.hBuffer;
++
++
++ if (bFromAllocator)
++ BM_Free(hBuffer, psMemInfo->ui32Flags);
++ else
++ BM_FreeExport(hBuffer, psMemInfo->ui32Flags);
++
++
++ if ((psMemInfo->pvSysBackupBuffer) && bFromAllocator)
++ {
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, psMemInfo->ui32AllocSize, psMemInfo->pvSysBackupBuffer, IMG_NULL);
++ psMemInfo->pvSysBackupBuffer = IMG_NULL;
++ }
++
++ if (psMemInfo->ui32RefCount == 0)
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(PVRSRV_KERNEL_MEM_INFO), psMemInfo, IMG_NULL);
++
++
++ return(PVRSRV_OK);
++}
++
++
++static PVRSRV_ERROR FreeDeviceMem(PVRSRV_KERNEL_MEM_INFO *psMemInfo)
++{
++ BM_HANDLE hBuffer;
++
++ if (!psMemInfo)
++ {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ hBuffer = psMemInfo->sMemBlk.hBuffer;
++
++
++ BM_Free(hBuffer, psMemInfo->ui32Flags);
++
++ if(psMemInfo->pvSysBackupBuffer)
++ {
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, psMemInfo->ui32AllocSize, psMemInfo->pvSysBackupBuffer, IMG_NULL);
++ psMemInfo->pvSysBackupBuffer = IMG_NULL;
++ }
++
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(PVRSRV_KERNEL_MEM_INFO), psMemInfo, IMG_NULL);
++
++
++ return(PVRSRV_OK);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVAllocSyncInfoKM(IMG_HANDLE hDevCookie,
++ IMG_HANDLE hDevMemContext,
++ PVRSRV_KERNEL_SYNC_INFO **ppsKernelSyncInfo)
++{
++ IMG_HANDLE hSyncDevMemHeap;
++ DEVICE_MEMORY_INFO *psDevMemoryInfo;
++ BM_CONTEXT *pBMContext;
++ PVRSRV_ERROR eError;
++ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++ PVRSRV_SYNC_DATA *psSyncData;
++
++ eError = OSAllocMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof(PVRSRV_KERNEL_SYNC_INFO),
++ (IMG_VOID **)&psKernelSyncInfo, IMG_NULL,
++ "Kernel Synchronization Info");
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVAllocSyncInfoKM: Failed to alloc memory"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ psKernelSyncInfo->ui32RefCount = 0;
++
++
++ pBMContext = (BM_CONTEXT*)hDevMemContext;
++ psDevMemoryInfo = &pBMContext->psDeviceNode->sDevMemoryInfo;
++
++
++ hSyncDevMemHeap = psDevMemoryInfo->psDeviceMemoryHeap[psDevMemoryInfo->ui32SyncHeapID].hDevMemHeap;
++
++
++
++
++ eError = AllocDeviceMem(hDevCookie,
++ hSyncDevMemHeap,
++ PVRSRV_MEM_CACHE_CONSISTENT,
++ sizeof(PVRSRV_SYNC_DATA),
++ sizeof(IMG_UINT32),
++ &psKernelSyncInfo->psSyncDataMemInfoKM);
++
++ if (eError != PVRSRV_OK)
++ {
++
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVAllocSyncInfoKM: Failed to alloc memory"));
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(PVRSRV_KERNEL_SYNC_INFO), psKernelSyncInfo, IMG_NULL);
++
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++
++ psKernelSyncInfo->psSyncData = psKernelSyncInfo->psSyncDataMemInfoKM->pvLinAddrKM;
++ psSyncData = psKernelSyncInfo->psSyncData;
++
++ psSyncData->ui32WriteOpsPending = 0;
++ psSyncData->ui32WriteOpsComplete = 0;
++ psSyncData->ui32ReadOpsPending = 0;
++ psSyncData->ui32ReadOpsComplete = 0;
++ psSyncData->ui32LastOpDumpVal = 0;
++ psSyncData->ui32LastReadOpDumpVal = 0;
++
++#if defined(PDUMP)
++ PDUMPMEM(psKernelSyncInfo->psSyncDataMemInfoKM->pvLinAddrKM,
++ psKernelSyncInfo->psSyncDataMemInfoKM,
++ 0,
++ psKernelSyncInfo->psSyncDataMemInfoKM->ui32AllocSize,
++ PDUMP_FLAGS_CONTINUOUS,
++ MAKEUNIQUETAG(psKernelSyncInfo->psSyncDataMemInfoKM));
++#endif
++
++ psKernelSyncInfo->sWriteOpsCompleteDevVAddr.uiAddr = psKernelSyncInfo->psSyncDataMemInfoKM->sDevVAddr.uiAddr + offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete);
++ psKernelSyncInfo->sReadOpsCompleteDevVAddr.uiAddr = psKernelSyncInfo->psSyncDataMemInfoKM->sDevVAddr.uiAddr + offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete);
++
++
++ psKernelSyncInfo->psSyncDataMemInfoKM->psKernelSyncInfo = IMG_NULL;
++
++
++ *ppsKernelSyncInfo = psKernelSyncInfo;
++
++ return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVFreeSyncInfoKM(PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo)
++{
++ PVRSRV_ERROR eError;
++
++ if (psKernelSyncInfo->ui32RefCount != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "oops: sync info ref count not zero at destruction"));
++
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ eError = FreeDeviceMem(psKernelSyncInfo->psSyncDataMemInfoKM);
++ (IMG_VOID)OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(PVRSRV_KERNEL_SYNC_INFO), psKernelSyncInfo, IMG_NULL);
++
++
++ return eError;
++}
++
++
++static IMG_VOID freeWrapped(PVRSRV_KERNEL_MEM_INFO *psMemInfo)
++{
++ IMG_HANDLE hOSWrapMem = psMemInfo->sMemBlk.hOSWrapMem;
++
++
++ if(psMemInfo->sMemBlk.psIntSysPAddr)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(IMG_SYS_PHYADDR), psMemInfo->sMemBlk.psIntSysPAddr, IMG_NULL);
++ psMemInfo->sMemBlk.psIntSysPAddr = IMG_NULL;
++ }
++
++ if(hOSWrapMem)
++ {
++ OSReleasePhysPageAddr(hOSWrapMem);
++ }
++}
++
++
++static PVRSRV_ERROR FreeMemCallBackCommon(PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++ IMG_UINT32 ui32Param,
++ IMG_BOOL bFromAllocator)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++
++ psMemInfo->ui32RefCount--;
++
++
++ if((psMemInfo->ui32Flags & PVRSRV_MEM_EXPORTED) && (bFromAllocator == IMG_TRUE))
++ {
++ IMG_HANDLE hMemInfo = IMG_NULL;
++
++
++ eError = PVRSRVFindHandle(KERNEL_HANDLE_BASE,
++ &hMemInfo,
++ psMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "FreeMemCallBackCommon: can't find exported meminfo in the global handle list"));
++ return eError;
++ }
++
++
++ eError = PVRSRVReleaseHandle(KERNEL_HANDLE_BASE,
++ hMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "FreeMemCallBackCommon: PVRSRVReleaseHandle failed for exported meminfo"));
++ return eError;
++ }
++ }
++
++
++ if (psMemInfo->ui32RefCount == 0)
++ {
++ switch(psMemInfo->memType)
++ {
++
++ case PVRSRV_MEMTYPE_WRAPPED:
++ freeWrapped(psMemInfo);
++ case PVRSRV_MEMTYPE_DEVICE:
++ if (psMemInfo->psKernelSyncInfo)
++ {
++ psMemInfo->psKernelSyncInfo->ui32RefCount--;
++
++ if (psMemInfo->psKernelSyncInfo->ui32RefCount == 0)
++ {
++ eError = PVRSRVFreeSyncInfoKM(psMemInfo->psKernelSyncInfo);
++ }
++ }
++ case PVRSRV_MEMTYPE_DEVICECLASS:
++ break;
++ default:
++ PVR_DPF((PVR_DBG_ERROR, "FreeMemCallBackCommon: Unknown memType"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ }
++
++
++ return FreeDeviceMem2(psMemInfo, bFromAllocator);
++}
++
++static PVRSRV_ERROR FreeDeviceMemCallBack(IMG_PVOID pvParam,
++ IMG_UINT32 ui32Param)
++{
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo = (PVRSRV_KERNEL_MEM_INFO *)pvParam;
++
++ return FreeMemCallBackCommon(psMemInfo, ui32Param, IMG_TRUE);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVFreeDeviceMemKM(IMG_HANDLE hDevCookie,
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo)
++{
++ PVRSRV_ERROR eError;
++
++ PVR_UNREFERENCED_PARAMETER(hDevCookie);
++
++ if (!psMemInfo)
++ {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ if (psMemInfo->sMemBlk.hResItem != IMG_NULL)
++ {
++ eError = ResManFreeResByPtr(psMemInfo->sMemBlk.hResItem);
++ }
++ else
++ {
++
++ eError = FreeDeviceMemCallBack(psMemInfo, 0);
++ }
++
++ return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV _PVRSRVAllocDeviceMemKM(IMG_HANDLE hDevCookie,
++ PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hDevMemHeap,
++ IMG_UINT32 ui32Flags,
++ IMG_SIZE_T ui32Size,
++ IMG_SIZE_T ui32Alignment,
++ PVRSRV_KERNEL_MEM_INFO **ppsMemInfo)
++{
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo;
++ PVRSRV_ERROR eError;
++ BM_HEAP *psBMHeap;
++ IMG_HANDLE hDevMemContext;
++
++ if (!hDevMemHeap ||
++ (ui32Size == 0))
++ {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++
++ if (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK)
++ {
++ if (((ui32Size % HOST_PAGESIZE()) != 0) ||
++ ((ui32Alignment % HOST_PAGESIZE()) != 0))
++ {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++ }
++
++ eError = AllocDeviceMem(hDevCookie,
++ hDevMemHeap,
++ ui32Flags,
++ ui32Size,
++ ui32Alignment,
++ &psMemInfo);
++
++ if (eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++
++ if (ui32Flags & PVRSRV_MEM_NO_SYNCOBJ)
++ {
++ psMemInfo->psKernelSyncInfo = IMG_NULL;
++ }
++ else
++ {
++
++
++
++ psBMHeap = (BM_HEAP*)hDevMemHeap;
++ hDevMemContext = (IMG_HANDLE)psBMHeap->pBMContext;
++ eError = PVRSRVAllocSyncInfoKM(hDevCookie,
++ hDevMemContext,
++ &psMemInfo->psKernelSyncInfo);
++ if(eError != PVRSRV_OK)
++ {
++ goto free_mainalloc;
++ }
++ psMemInfo->psKernelSyncInfo->ui32RefCount++;
++ }
++
++
++ *ppsMemInfo = psMemInfo;
++
++ if (ui32Flags & PVRSRV_MEM_NO_RESMAN)
++ {
++ psMemInfo->sMemBlk.hResItem = IMG_NULL;
++ }
++ else
++ {
++
++ psMemInfo->sMemBlk.hResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_DEVICEMEM_ALLOCATION,
++ psMemInfo,
++ 0,
++ FreeDeviceMemCallBack);
++ if (psMemInfo->sMemBlk.hResItem == IMG_NULL)
++ {
++
++ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto free_mainalloc;
++ }
++ }
++
++
++ psMemInfo->ui32RefCount++;
++
++ psMemInfo->memType = PVRSRV_MEMTYPE_DEVICE;
++
++ return (PVRSRV_OK);
++
++free_mainalloc:
++ FreeDeviceMem(psMemInfo);
++
++ return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDissociateDeviceMemKM(IMG_HANDLE hDevCookie,
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_DEVICE_NODE *psDeviceNode = hDevCookie;
++
++ PVR_UNREFERENCED_PARAMETER(hDevCookie);
++
++ if (!psMemInfo)
++ {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ eError = ResManDissociateRes(psMemInfo->sMemBlk.hResItem, psDeviceNode->hResManContext);
++
++ PVR_ASSERT(eError == PVRSRV_OK);
++
++ return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetFreeDeviceMemKM(IMG_UINT32 ui32Flags,
++ IMG_SIZE_T *pui32Total,
++ IMG_SIZE_T *pui32Free,
++ IMG_SIZE_T *pui32LargestBlock)
++{
++
++
++ PVR_UNREFERENCED_PARAMETER(ui32Flags);
++ PVR_UNREFERENCED_PARAMETER(pui32Total);
++ PVR_UNREFERENCED_PARAMETER(pui32Free);
++ PVR_UNREFERENCED_PARAMETER(pui32LargestBlock);
++
++ return PVRSRV_OK;
++}
++
++
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnwrapExtMemoryKM (PVRSRV_KERNEL_MEM_INFO *psMemInfo)
++{
++ if (!psMemInfo)
++ {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ return ResManFreeResByPtr(psMemInfo->sMemBlk.hResItem);
++}
++
++
++static PVRSRV_ERROR UnwrapExtMemoryCallBack(IMG_PVOID pvParam,
++ IMG_UINT32 ui32Param)
++{
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo = pvParam;
++
++ return FreeMemCallBackCommon(psMemInfo, ui32Param, IMG_TRUE);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVWrapExtMemoryKM(IMG_HANDLE hDevCookie,
++ PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hDevMemContext,
++ IMG_SIZE_T ui32ByteSize,
++ IMG_SIZE_T ui32PageOffset,
++ IMG_BOOL bPhysContig,
++ IMG_SYS_PHYADDR *psExtSysPAddr,
++ IMG_VOID *pvLinAddr,
++ IMG_UINT32 ui32Flags,
++ PVRSRV_KERNEL_MEM_INFO **ppsMemInfo)
++{
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo = IMG_NULL;
++ DEVICE_MEMORY_INFO *psDevMemoryInfo;
++ IMG_SIZE_T ui32HostPageSize = HOST_PAGESIZE();
++ IMG_HANDLE hDevMemHeap = IMG_NULL;
++ PVRSRV_DEVICE_NODE* psDeviceNode;
++ BM_HANDLE hBuffer;
++ PVRSRV_MEMBLK *psMemBlock;
++ IMG_BOOL bBMError;
++ BM_HEAP *psBMHeap;
++ PVRSRV_ERROR eError;
++ IMG_VOID *pvPageAlignedCPUVAddr;
++ IMG_SYS_PHYADDR *psIntSysPAddr = IMG_NULL;
++ IMG_HANDLE hOSWrapMem = IMG_NULL;
++ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++ IMG_SIZE_T ui32PageCount = 0;
++ IMG_UINT32 i;
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE*)hDevCookie;
++ PVR_ASSERT(psDeviceNode != IMG_NULL);
++
++ if (psDeviceNode == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVWrapExtMemoryKM: invalid parameter"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ if(pvLinAddr)
++ {
++
++ ui32PageOffset = (IMG_UINTPTR_T)pvLinAddr & (ui32HostPageSize - 1);
++
++
++ ui32PageCount = HOST_PAGEALIGN(ui32ByteSize + ui32PageOffset) / ui32HostPageSize;
++ pvPageAlignedCPUVAddr = (IMG_VOID *)((IMG_UINTPTR_T)pvLinAddr - ui32PageOffset);
++
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32PageCount * sizeof(IMG_SYS_PHYADDR),
++ (IMG_VOID **)&psIntSysPAddr, IMG_NULL,
++ "Array of Page Addresses") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVWrapExtMemoryKM: Failed to alloc memory for block"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ eError = OSAcquirePhysPageAddr(pvPageAlignedCPUVAddr,
++ ui32PageCount * ui32HostPageSize,
++ psIntSysPAddr,
++ &hOSWrapMem,
++ (ui32Flags != 0) ? IMG_TRUE : IMG_FALSE);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVWrapExtMemoryKM: Failed to alloc memory for block"));
++ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto ErrorExitPhase1;
++ }
++
++
++ psExtSysPAddr = psIntSysPAddr;
++
++
++
++ bPhysContig = IMG_FALSE;
++ }
++ else
++ {
++
++ }
++
++
++ psDevMemoryInfo = &((BM_CONTEXT*)hDevMemContext)->psDeviceNode->sDevMemoryInfo;
++ psDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap;
++ for(i=0; i<PVRSRV_MAX_CLIENT_HEAPS; i++)
++ {
++ if(HEAP_IDX(psDeviceMemoryHeap[i].ui32HeapID) == psDevMemoryInfo->ui32MappingHeapID)
++ {
++ if(psDeviceMemoryHeap[i].DevMemHeapType == DEVICE_MEMORY_HEAP_PERCONTEXT)
++ {
++
++ hDevMemHeap = BM_CreateHeap(hDevMemContext, &psDeviceMemoryHeap[i]);
++ }
++ else
++ {
++ hDevMemHeap = psDevMemoryInfo->psDeviceMemoryHeap[i].hDevMemHeap;
++ }
++ break;
++ }
++ }
++
++ if(hDevMemHeap == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVWrapExtMemoryKM: unable to find mapping heap"));
++ eError = PVRSRV_ERROR_GENERIC;
++ goto ErrorExitPhase2;
++ }
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_KERNEL_MEM_INFO),
++ (IMG_VOID **)&psMemInfo, IMG_NULL,
++ "Kernel Memory Info") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVWrapExtMemoryKM: Failed to alloc memory for block"));
++ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto ErrorExitPhase2;
++ }
++
++ OSMemSet(psMemInfo, 0, sizeof(*psMemInfo));
++ psMemInfo->ui32Flags = ui32Flags;
++
++ psMemBlock = &(psMemInfo->sMemBlk);
++
++ bBMError = BM_Wrap(hDevMemHeap,
++ ui32ByteSize,
++ ui32PageOffset,
++ bPhysContig,
++ psExtSysPAddr,
++ IMG_NULL,
++ &psMemInfo->ui32Flags,
++ &hBuffer);
++ if (!bBMError)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVWrapExtMemoryKM: BM_Wrap Failed"));
++ eError = PVRSRV_ERROR_BAD_MAPPING;
++ goto ErrorExitPhase3;
++ }
++
++
++ psMemBlock->sDevVirtAddr = BM_HandleToDevVaddr(hBuffer);
++ psMemBlock->hOSMemHandle = BM_HandleToOSMemHandle(hBuffer);
++ psMemBlock->hOSWrapMem = hOSWrapMem;
++ psMemBlock->psIntSysPAddr = psIntSysPAddr;
++
++
++ psMemBlock->hBuffer = (IMG_HANDLE)hBuffer;
++
++
++ psMemInfo->pvLinAddrKM = BM_HandleToCpuVaddr(hBuffer);
++ psMemInfo->sDevVAddr = psMemBlock->sDevVirtAddr;
++ psMemInfo->ui32AllocSize = ui32ByteSize;
++
++
++
++ psMemInfo->pvSysBackupBuffer = IMG_NULL;
++
++
++
++
++ psBMHeap = (BM_HEAP*)hDevMemHeap;
++ hDevMemContext = (IMG_HANDLE)psBMHeap->pBMContext;
++ eError = PVRSRVAllocSyncInfoKM(hDevCookie,
++ hDevMemContext,
++ &psMemInfo->psKernelSyncInfo);
++ if(eError != PVRSRV_OK)
++ {
++ goto ErrorExitPhase4;
++ }
++
++ psMemInfo->psKernelSyncInfo->ui32RefCount++;
++
++ psMemInfo->memType = PVRSRV_MEMTYPE_WRAPPED;
++
++ psMemInfo->ui32RefCount++;
++
++
++ psMemInfo->sMemBlk.hResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_DEVICEMEM_WRAP,
++ psMemInfo,
++ 0,
++ UnwrapExtMemoryCallBack);
++
++
++ *ppsMemInfo = psMemInfo;
++
++ return PVRSRV_OK;
++
++
++
++ErrorExitPhase4:
++ if(psMemInfo)
++ {
++ FreeDeviceMem(psMemInfo);
++
++
++
++ psMemInfo = IMG_NULL;
++ }
++
++ErrorExitPhase3:
++ if(psMemInfo)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_KERNEL_MEM_INFO), psMemInfo, IMG_NULL);
++
++ }
++
++ErrorExitPhase2:
++ if(psIntSysPAddr)
++ {
++ OSReleasePhysPageAddr(hOSWrapMem);
++ }
++
++ErrorExitPhase1:
++ if(psIntSysPAddr)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32PageCount * sizeof(IMG_SYS_PHYADDR), psIntSysPAddr, IMG_NULL);
++
++ }
++
++ return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapDeviceMemoryKM (PVRSRV_KERNEL_MEM_INFO *psMemInfo)
++{
++ if (!psMemInfo)
++ {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ return ResManFreeResByPtr(psMemInfo->sMemBlk.hResItem);
++}
++
++
++static PVRSRV_ERROR UnmapDeviceMemoryCallBack(IMG_PVOID pvParam,
++ IMG_UINT32 ui32Param)
++{
++ PVRSRV_ERROR eError;
++ RESMAN_MAP_DEVICE_MEM_DATA *psMapData = pvParam;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++ if(psMapData->psMemInfo->sMemBlk.psIntSysPAddr)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(IMG_SYS_PHYADDR), psMapData->psMemInfo->sMemBlk.psIntSysPAddr, IMG_NULL);
++ psMapData->psMemInfo->sMemBlk.psIntSysPAddr = IMG_NULL;
++ }
++
++ psMapData->psMemInfo->psKernelSyncInfo->ui32RefCount--;
++ if (psMapData->psMemInfo->psKernelSyncInfo->ui32RefCount == 0)
++ {
++ eError = PVRSRVFreeSyncInfoKM(psMapData->psMemInfo->psKernelSyncInfo);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"UnmapDeviceMemoryCallBack: Failed to free sync info"));
++ return eError;
++ }
++ }
++
++ eError = FreeDeviceMem(psMapData->psMemInfo);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"UnmapDeviceMemoryCallBack: Failed to free DST meminfo"));
++ return eError;
++ }
++
++
++ eError = FreeMemCallBackCommon(psMapData->psSrcMemInfo, 0, IMG_FALSE);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(RESMAN_MAP_DEVICE_MEM_DATA), psMapData, IMG_NULL);
++
++
++ return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVMapDeviceMemoryKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ PVRSRV_KERNEL_MEM_INFO *psSrcMemInfo,
++ IMG_HANDLE hDstDevMemHeap,
++ PVRSRV_KERNEL_MEM_INFO **ppsDstMemInfo)
++{
++ PVRSRV_ERROR eError;
++ IMG_UINT32 i;
++ IMG_SIZE_T ui32PageCount, ui32PageOffset;
++ IMG_SIZE_T ui32HostPageSize = HOST_PAGESIZE();
++ IMG_SYS_PHYADDR *psSysPAddr = IMG_NULL;
++ IMG_DEV_PHYADDR sDevPAddr;
++ BM_BUF *psBuf;
++ IMG_DEV_VIRTADDR sDevVAddr;
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo = IMG_NULL;
++ BM_HANDLE hBuffer;
++ PVRSRV_MEMBLK *psMemBlock;
++ IMG_BOOL bBMError;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ IMG_VOID *pvPageAlignedCPUVAddr;
++ RESMAN_MAP_DEVICE_MEM_DATA *psMapData = IMG_NULL;
++
++
++ if(!psSrcMemInfo || !hDstDevMemHeap || !ppsDstMemInfo)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceMemoryKM: invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++
++ *ppsDstMemInfo = IMG_NULL;
++
++ ui32PageOffset = psSrcMemInfo->sDevVAddr.uiAddr & (ui32HostPageSize - 1);
++ ui32PageCount = HOST_PAGEALIGN(psSrcMemInfo->ui32AllocSize + ui32PageOffset) / ui32HostPageSize;
++ pvPageAlignedCPUVAddr = (IMG_VOID *)((IMG_UINTPTR_T)psSrcMemInfo->pvLinAddrKM - ui32PageOffset);
++
++
++
++
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32PageCount*sizeof(IMG_SYS_PHYADDR),
++ (IMG_VOID **)&psSysPAddr, IMG_NULL,
++ "Array of Page Addresses") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceMemoryKM: Failed to alloc memory for block"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ psBuf = psSrcMemInfo->sMemBlk.hBuffer;
++
++
++ psDeviceNode = psBuf->pMapping->pBMHeap->pBMContext->psDeviceNode;
++
++
++ sDevVAddr.uiAddr = psSrcMemInfo->sDevVAddr.uiAddr - IMG_CAST_TO_DEVVADDR_UINT(ui32PageOffset);
++ for(i=0; i<ui32PageCount; i++)
++ {
++ BM_GetPhysPageAddr(psSrcMemInfo, sDevVAddr, &sDevPAddr);
++
++
++ psSysPAddr[i] = SysDevPAddrToSysPAddr (psDeviceNode->sDevId.eDeviceType, sDevPAddr);
++
++
++ sDevVAddr.uiAddr += IMG_CAST_TO_DEVVADDR_UINT(ui32HostPageSize);
++ }
++
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(RESMAN_MAP_DEVICE_MEM_DATA),
++ (IMG_VOID **)&psMapData, IMG_NULL,
++ "Resource Manager Map Data") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceMemoryKM: Failed to alloc resman map data"));
++ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto ErrorExit;
++ }
++
++
++ if(OSAllocMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof(PVRSRV_KERNEL_MEM_INFO),
++ (IMG_VOID **)&psMemInfo, IMG_NULL,
++ "Kernel Memory Info") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceMemoryKM: Failed to alloc memory for block"));
++ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto ErrorExit;
++ }
++
++ OSMemSet(psMemInfo, 0, sizeof(*psMemInfo));
++ psMemInfo->ui32Flags = psSrcMemInfo->ui32Flags;
++
++ psMemBlock = &(psMemInfo->sMemBlk);
++
++ bBMError = BM_Wrap(hDstDevMemHeap,
++ psSrcMemInfo->ui32AllocSize,
++ ui32PageOffset,
++ IMG_FALSE,
++ psSysPAddr,
++ pvPageAlignedCPUVAddr,
++ &psMemInfo->ui32Flags,
++ &hBuffer);
++
++ if (!bBMError)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceMemoryKM: BM_Wrap Failed"));
++ eError = PVRSRV_ERROR_BAD_MAPPING;
++ goto ErrorExit;
++ }
++
++
++ psMemBlock->sDevVirtAddr = BM_HandleToDevVaddr(hBuffer);
++ psMemBlock->hOSMemHandle = BM_HandleToOSMemHandle(hBuffer);
++
++
++ psMemBlock->hBuffer = (IMG_HANDLE)hBuffer;
++
++
++ psMemBlock->psIntSysPAddr = psSysPAddr;
++
++
++ psMemInfo->pvLinAddrKM = psSrcMemInfo->pvLinAddrKM;
++
++
++ psMemInfo->sDevVAddr = psMemBlock->sDevVirtAddr;
++ psMemInfo->ui32AllocSize = psSrcMemInfo->ui32AllocSize;
++ psMemInfo->psKernelSyncInfo = psSrcMemInfo->psKernelSyncInfo;
++
++
++ psMemInfo->psKernelSyncInfo->ui32RefCount++;
++
++
++
++ psMemInfo->pvSysBackupBuffer = IMG_NULL;
++
++
++ psMemInfo->ui32RefCount++;
++
++
++ psSrcMemInfo->ui32RefCount++;
++
++
++ BM_Export(psSrcMemInfo->sMemBlk.hBuffer);
++
++ psMemInfo->memType = PVRSRV_MEMTYPE_MAPPED;
++
++
++ psMapData->psMemInfo = psMemInfo;
++ psMapData->psSrcMemInfo = psSrcMemInfo;
++
++
++ psMemInfo->sMemBlk.hResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_DEVICEMEM_MAPPING,
++ psMapData,
++ 0,
++ UnmapDeviceMemoryCallBack);
++
++ *ppsDstMemInfo = psMemInfo;
++
++ return PVRSRV_OK;
++
++
++
++ErrorExit:
++
++ if(psSysPAddr)
++ {
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(IMG_SYS_PHYADDR), psSysPAddr, IMG_NULL);
++
++ }
++
++ if(psMemInfo)
++ {
++
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(PVRSRV_KERNEL_MEM_INFO), psMemInfo, IMG_NULL);
++
++ }
++
++ if(psMapData)
++ {
++
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(RESMAN_MAP_DEVICE_MEM_DATA), psMapData, IMG_NULL);
++
++ }
++
++ return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapDeviceClassMemoryKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo)
++{
++ if (!psMemInfo)
++ {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ return ResManFreeResByPtr(psMemInfo->sMemBlk.hResItem);
++}
++
++
++static PVRSRV_ERROR UnmapDeviceClassMemoryCallBack(IMG_PVOID pvParam,
++ IMG_UINT32 ui32Param)
++{
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo = pvParam;
++
++ return FreeMemCallBackCommon(psMemInfo, ui32Param, IMG_TRUE);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVMapDeviceClassMemoryKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hDevMemContext,
++ IMG_HANDLE hDeviceClassBuffer,
++ PVRSRV_KERNEL_MEM_INFO **ppsMemInfo,
++ IMG_HANDLE *phOSMapInfo)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo;
++ PVRSRV_DEVICECLASS_BUFFER *psDeviceClassBuffer;
++ IMG_SYS_PHYADDR *psSysPAddr;
++ IMG_VOID *pvCPUVAddr, *pvPageAlignedCPUVAddr;
++ IMG_BOOL bPhysContig;
++ BM_CONTEXT *psBMContext;
++ DEVICE_MEMORY_INFO *psDevMemoryInfo;
++ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++ IMG_HANDLE hDevMemHeap = IMG_NULL;
++ IMG_SIZE_T ui32ByteSize;
++ IMG_SIZE_T ui32Offset;
++ IMG_SIZE_T ui32PageSize = HOST_PAGESIZE();
++ BM_HANDLE hBuffer;
++ PVRSRV_MEMBLK *psMemBlock;
++ IMG_BOOL bBMError;
++ IMG_UINT32 i;
++
++ if(!hDeviceClassBuffer || !ppsMemInfo || !phOSMapInfo || !hDevMemContext)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceClassMemoryKM: invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDeviceClassBuffer = (PVRSRV_DEVICECLASS_BUFFER*)hDeviceClassBuffer;
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++ eError = psDeviceClassBuffer->pfnGetBufferAddr(psDeviceClassBuffer->hExtDevice,
++ psDeviceClassBuffer->hExtBuffer,
++ &psSysPAddr,
++ &ui32ByteSize,
++ &pvCPUVAddr,
++ phOSMapInfo,
++ &bPhysContig);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceClassMemoryKM: unable to get buffer address"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++
++ psBMContext = (BM_CONTEXT*)psDeviceClassBuffer->hDevMemContext;
++ psDevMemoryInfo = &psBMContext->psDeviceNode->sDevMemoryInfo;
++ psDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap;
++ for(i=0; i<PVRSRV_MAX_CLIENT_HEAPS; i++)
++ {
++ if(HEAP_IDX(psDeviceMemoryHeap[i].ui32HeapID) == psDevMemoryInfo->ui32MappingHeapID)
++ {
++ if(psDeviceMemoryHeap[i].DevMemHeapType == DEVICE_MEMORY_HEAP_PERCONTEXT)
++ {
++
++ hDevMemHeap = BM_CreateHeap(hDevMemContext, &psDeviceMemoryHeap[i]);
++ }
++ else
++ {
++ hDevMemHeap = psDevMemoryInfo->psDeviceMemoryHeap[i].hDevMemHeap;
++ }
++ break;
++ }
++ }
++
++ if(hDevMemHeap == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceClassMemoryKM: unable to find mapping heap"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++
++ ui32Offset = ((IMG_UINTPTR_T)pvCPUVAddr) & (ui32PageSize - 1);
++ pvPageAlignedCPUVAddr = (IMG_VOID *)((IMG_UINTPTR_T)pvCPUVAddr - ui32Offset);
++
++ if(OSAllocMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof(PVRSRV_KERNEL_MEM_INFO),
++ (IMG_VOID **)&psMemInfo, IMG_NULL,
++ "Kernel Memory Info") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceClassMemoryKM: Failed to alloc memory for block"));
++ return (PVRSRV_ERROR_OUT_OF_MEMORY);
++ }
++
++ OSMemSet(psMemInfo, 0, sizeof(*psMemInfo));
++
++ psMemBlock = &(psMemInfo->sMemBlk);
++
++ bBMError = BM_Wrap(hDevMemHeap,
++ ui32ByteSize,
++ ui32Offset,
++ bPhysContig,
++ psSysPAddr,
++ pvPageAlignedCPUVAddr,
++ &psMemInfo->ui32Flags,
++ &hBuffer);
++
++ if (!bBMError)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceClassMemoryKM: BM_Wrap Failed"));
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(PVRSRV_KERNEL_MEM_INFO), psMemInfo, IMG_NULL);
++
++ return PVRSRV_ERROR_BAD_MAPPING;
++ }
++
++
++ psMemBlock->sDevVirtAddr = BM_HandleToDevVaddr(hBuffer);
++ psMemBlock->hOSMemHandle = BM_HandleToOSMemHandle(hBuffer);
++
++
++ psMemBlock->hBuffer = (IMG_HANDLE)hBuffer;
++
++
++
++ psMemInfo->pvLinAddrKM = BM_HandleToCpuVaddr(hBuffer);
++
++
++ psMemInfo->sDevVAddr = psMemBlock->sDevVirtAddr;
++ psMemInfo->ui32AllocSize = ui32ByteSize;
++ psMemInfo->psKernelSyncInfo = psDeviceClassBuffer->psKernelSyncInfo;
++
++
++
++ psMemInfo->pvSysBackupBuffer = IMG_NULL;
++
++
++ psMemInfo->sMemBlk.hResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_DEVICECLASSMEM_MAPPING,
++ psMemInfo,
++ 0,
++ UnmapDeviceClassMemoryCallBack);
++
++ psMemInfo->ui32RefCount++;
++
++ psMemInfo->memType = PVRSRV_MEMTYPE_DEVICECLASS;
++
++ *ppsMemInfo = psMemInfo;
++
++ return PVRSRV_OK;
++}
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/common/handle.c
+@@ -0,0 +1,1547 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifdef PVR_SECURE_HANDLES
++#include <stddef.h>
++
++#include "services_headers.h"
++#include "handle.h"
++
++#ifdef DEBUG
++#define HANDLE_BLOCK_SIZE 1
++#else
++#define HANDLE_BLOCK_SIZE 256
++#endif
++
++#define HANDLE_HASH_TAB_INIT_SIZE 32
++
++#define DEFAULT_MAX_INDEX_PLUS_ONE 0xfffffffful
++#define DEFAULT_MAX_HANDLE DEFAULT_MAX_INDEX_PLUS_ONE
++
++#define INDEX_IS_VALID(psBase, i) ((i) < (psBase)->ui32TotalHandCount)
++
++#define INDEX_TO_HANDLE(psBase, idx) ((IMG_HANDLE)((idx) + 1))
++#define HANDLE_TO_INDEX(psBase, hand) ((IMG_UINT32)(hand) - 1)
++
++#define INDEX_TO_HANDLE_PTR(psBase, i) (((psBase)->psHandleArray) + (i))
++#define HANDLE_TO_HANDLE_PTR(psBase, h) (INDEX_TO_HANDLE_PTR(psBase, HANDLE_TO_INDEX(psBase, h)))
++
++#define HANDLE_PTR_TO_INDEX(psBase, psHandle) (IMG_UINT32)((psHandle) - ((psBase)->psHandleArray))
++#define HANDLE_PTR_TO_HANDLE(psBase, psHandle) \
++ INDEX_TO_HANDLE(psBase, HANDLE_PTR_TO_INDEX(psBase, psHandle))
++
++#define ROUND_UP_TO_MULTIPLE(a, b) ((((a) + (b) - 1) / (b)) * (b))
++
++#define HANDLES_BATCHED(psBase) ((psBase)->ui32HandBatchSize != 0)
++
++#define SET_FLAG(v, f) ((IMG_VOID)((v) |= (f)))
++#define CLEAR_FLAG(v, f) ((IMG_VOID)((v) &= ~(f)))
++#define TEST_FLAG(v, f) ((IMG_BOOL)(((v) & (f)) != 0))
++
++#define TEST_ALLOC_FLAG(psHandle, f) TEST_FLAG((psHandle)->eFlag, f)
++
++#define SET_INTERNAL_FLAG(psHandle, f) SET_FLAG((psHandle)->eInternalFlag, f)
++#define CLEAR_INTERNAL_FLAG(psHandle, f) CLEAR_FLAG((psHandle)->eInternalFlag, f)
++#define TEST_INTERNAL_FLAG(psHandle, f) TEST_FLAG((psHandle)->eInternalFlag, f)
++
++#define BATCHED_HANDLE(psHandle) TEST_INTERNAL_FLAG(psHandle, INTERNAL_HANDLE_FLAG_BATCHED)
++
++#define SET_BATCHED_HANDLE(psHandle) SET_INTERNAL_FLAG(psHandle, INTERNAL_HANDLE_FLAG_BATCHED)
++
++#define SET_UNBATCHED_HANDLE(psHandle) CLEAR_INTERNAL_FLAG(psHandle, INTERNAL_HANDLE_FLAG_BATCHED)
++
++#define BATCHED_HANDLE_PARTIALLY_FREE(psHandle) TEST_INTERNAL_FLAG(psHandle, INTERNAL_HANDLE_FLAG_BATCHED_PARTIALLY_FREE)
++
++#define SET_BATCHED_HANDLE_PARTIALLY_FREE(psHandle) SET_INTERNAL_FLAG(psHandle, INTERNAL_HANDLE_FLAG_BATCHED_PARTIALLY_FREE)
++
++#define HANDLE_STRUCT_IS_FREE(psHandle) ((psHandle)->eType == PVRSRV_HANDLE_TYPE_NONE && (psHandle)->eInternalFlag == INTERNAL_HANDLE_FLAG_NONE)
++
++#ifdef MIN
++#undef MIN
++#endif
++
++#define MIN(x, y) (((x) < (y)) ? (x) : (y))
++
++struct sHandleList
++{
++ IMG_UINT32 ui32Prev;
++ IMG_UINT32 ui32Next;
++ IMG_HANDLE hParent;
++};
++
++enum ePVRSRVInternalHandleFlag
++{
++ INTERNAL_HANDLE_FLAG_NONE = 0x00,
++ INTERNAL_HANDLE_FLAG_BATCHED = 0x01,
++ INTERNAL_HANDLE_FLAG_BATCHED_PARTIALLY_FREE = 0x02,
++};
++
++struct sHandle
++{
++
++ PVRSRV_HANDLE_TYPE eType;
++
++
++ IMG_VOID *pvData;
++
++
++ IMG_UINT32 ui32NextIndexPlusOne;
++
++
++ enum ePVRSRVInternalHandleFlag eInternalFlag;
++
++
++ PVRSRV_HANDLE_ALLOC_FLAG eFlag;
++
++
++ IMG_UINT32 ui32Index;
++
++
++ struct sHandleList sChildren;
++
++
++ struct sHandleList sSiblings;
++};
++
++struct _PVRSRV_HANDLE_BASE_
++{
++
++ IMG_HANDLE hBaseBlockAlloc;
++
++
++ IMG_HANDLE hHandBlockAlloc;
++
++
++ struct sHandle *psHandleArray;
++
++
++ HASH_TABLE *psHashTab;
++
++
++ IMG_UINT32 ui32FreeHandCount;
++
++
++ IMG_UINT32 ui32FirstFreeIndex;
++
++
++ IMG_UINT32 ui32MaxIndexPlusOne;
++
++
++ IMG_UINT32 ui32TotalHandCount;
++
++
++ IMG_UINT32 ui32LastFreeIndexPlusOne;
++
++
++ IMG_UINT32 ui32HandBatchSize;
++
++
++ IMG_UINT32 ui32TotalHandCountPreBatch;
++
++
++ IMG_UINT32 ui32FirstBatchIndexPlusOne;
++
++
++ IMG_UINT32 ui32BatchHandAllocFailures;
++
++
++ IMG_BOOL bPurgingEnabled;
++};
++
++enum eHandKey {
++ HAND_KEY_DATA = 0,
++ HAND_KEY_TYPE,
++ HAND_KEY_PARENT,
++ HAND_KEY_LEN
++};
++
++PVRSRV_HANDLE_BASE *gpsKernelHandleBase = IMG_NULL;
++
++typedef IMG_UINTPTR_T HAND_KEY[HAND_KEY_LEN];
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(HandleListInit)
++#endif
++static INLINE
++IMG_VOID HandleListInit(IMG_UINT32 ui32Index, struct sHandleList *psList, IMG_HANDLE hParent)
++{
++ psList->ui32Next = ui32Index;
++ psList->ui32Prev = ui32Index;
++ psList->hParent = hParent;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(InitParentList)
++#endif
++static INLINE
++IMG_VOID InitParentList(PVRSRV_HANDLE_BASE *psBase, struct sHandle *psHandle)
++{
++ IMG_UINT32 ui32Parent = HANDLE_PTR_TO_INDEX(psBase, psHandle);
++
++ HandleListInit(ui32Parent, &psHandle->sChildren, INDEX_TO_HANDLE(psBase, ui32Parent));
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(InitChildEntry)
++#endif
++static INLINE
++IMG_VOID InitChildEntry(PVRSRV_HANDLE_BASE *psBase, struct sHandle *psHandle)
++{
++ HandleListInit(HANDLE_PTR_TO_INDEX(psBase, psHandle), &psHandle->sSiblings, IMG_NULL);
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(HandleListIsEmpty)
++#endif
++static INLINE
++IMG_BOOL HandleListIsEmpty(IMG_UINT32 ui32Index, struct sHandleList *psList)
++{
++ IMG_BOOL bIsEmpty;
++
++ bIsEmpty = (IMG_BOOL)(psList->ui32Next == ui32Index);
++
++#ifdef DEBUG
++ {
++ IMG_BOOL bIsEmpty2;
++
++ bIsEmpty2 = (IMG_BOOL)(psList->ui32Prev == ui32Index);
++ PVR_ASSERT(bIsEmpty == bIsEmpty2);
++ }
++#endif
++
++ return bIsEmpty;
++}
++
++#ifdef DEBUG
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(NoChildren)
++#endif
++static INLINE
++IMG_BOOL NoChildren(PVRSRV_HANDLE_BASE *psBase, struct sHandle *psHandle)
++{
++ PVR_ASSERT(psHandle->sChildren.hParent == HANDLE_PTR_TO_HANDLE(psBase, psHandle));
++
++ return HandleListIsEmpty(HANDLE_PTR_TO_INDEX(psBase, psHandle), &psHandle->sChildren);
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(NoParent)
++#endif
++static INLINE
++IMG_BOOL NoParent(PVRSRV_HANDLE_BASE *psBase, struct sHandle *psHandle)
++{
++ if (HandleListIsEmpty(HANDLE_PTR_TO_INDEX(psBase, psHandle), &psHandle->sSiblings))
++ {
++ PVR_ASSERT(psHandle->sSiblings.hParent == IMG_NULL);
++
++ return IMG_TRUE;
++ }
++ else
++ {
++ PVR_ASSERT(psHandle->sSiblings.hParent != IMG_NULL);
++ }
++ return IMG_FALSE;
++}
++#endif
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(ParentHandle)
++#endif
++static INLINE
++IMG_HANDLE ParentHandle(struct sHandle *psHandle)
++{
++ return psHandle->sSiblings.hParent;
++}
++
++#define LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, i, p, po, eo) \
++ ((struct sHandleList *)((IMG_CHAR *)(INDEX_TO_HANDLE_PTR(psBase, i)) + (((i) == (p)) ? (po) : (eo))))
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(HandleListInsertBefore)
++#endif
++static INLINE
++IMG_VOID HandleListInsertBefore(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32InsIndex, struct sHandleList *psIns, IMG_SIZE_T uiParentOffset, IMG_UINT32 ui32EntryIndex, struct sHandleList *psEntry, IMG_SIZE_T uiEntryOffset, IMG_UINT32 ui32ParentIndex)
++{
++
++ struct sHandleList *psPrevIns = LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, psIns->ui32Prev, ui32ParentIndex, uiParentOffset, uiEntryOffset);
++
++ PVR_ASSERT(psEntry->hParent == IMG_NULL);
++ PVR_ASSERT(ui32InsIndex == psPrevIns->ui32Next);
++ PVR_ASSERT(LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, ui32ParentIndex, ui32ParentIndex, uiParentOffset, uiParentOffset)->hParent == INDEX_TO_HANDLE(psBase, ui32ParentIndex));
++
++ psEntry->ui32Prev = psIns->ui32Prev;
++ psIns->ui32Prev = ui32EntryIndex;
++ psEntry->ui32Next = ui32InsIndex;
++ psPrevIns->ui32Next = ui32EntryIndex;
++
++ psEntry->hParent = INDEX_TO_HANDLE(psBase, ui32ParentIndex);
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(AdoptChild)
++#endif
++static INLINE
++IMG_VOID AdoptChild(PVRSRV_HANDLE_BASE *psBase, struct sHandle *psParent, struct sHandle *psChild)
++{
++ IMG_UINT32 ui32Parent = HANDLE_TO_INDEX(psBase, psParent->sChildren.hParent);
++
++ PVR_ASSERT(ui32Parent == HANDLE_PTR_TO_INDEX(psBase, psParent));
++
++ HandleListInsertBefore(psBase, ui32Parent, &psParent->sChildren, offsetof(struct sHandle, sChildren), HANDLE_PTR_TO_INDEX(psBase, psChild), &psChild->sSiblings, offsetof(struct sHandle, sSiblings), ui32Parent);
++
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(HandleListRemove)
++#endif
++static INLINE
++IMG_VOID HandleListRemove(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32EntryIndex, struct sHandleList *psEntry, IMG_SIZE_T uiEntryOffset, IMG_SIZE_T uiParentOffset)
++{
++ if (!HandleListIsEmpty(ui32EntryIndex, psEntry))
++ {
++
++ struct sHandleList *psPrev = LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, psEntry->ui32Prev, HANDLE_TO_INDEX(psBase, psEntry->hParent), uiParentOffset, uiEntryOffset);
++ struct sHandleList *psNext = LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, psEntry->ui32Next, HANDLE_TO_INDEX(psBase, psEntry->hParent), uiParentOffset, uiEntryOffset);
++
++
++ PVR_ASSERT(psEntry->hParent != IMG_NULL);
++
++ psPrev->ui32Next = psEntry->ui32Next;
++ psNext->ui32Prev = psEntry->ui32Prev;
++
++ HandleListInit(ui32EntryIndex, psEntry, IMG_NULL);
++ }
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(UnlinkFromParent)
++#endif
++static INLINE
++IMG_VOID UnlinkFromParent(PVRSRV_HANDLE_BASE *psBase, struct sHandle *psHandle)
++{
++ HandleListRemove(psBase, HANDLE_PTR_TO_INDEX(psBase, psHandle), &psHandle->sSiblings, offsetof(struct sHandle, sSiblings), offsetof(struct sHandle, sChildren));
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(HandleListIterate)
++#endif
++static INLINE
++PVRSRV_ERROR HandleListIterate(PVRSRV_HANDLE_BASE *psBase, struct sHandleList *psHead, IMG_SIZE_T uiParentOffset, IMG_SIZE_T uiEntryOffset, PVRSRV_ERROR (*pfnIterFunc)(PVRSRV_HANDLE_BASE *, struct sHandle *))
++{
++ IMG_UINT32 ui32Index;
++ IMG_UINT32 ui32Parent = HANDLE_TO_INDEX(psBase, psHead->hParent);
++
++ PVR_ASSERT(psHead->hParent != IMG_NULL);
++
++
++ for(ui32Index = psHead->ui32Next; ui32Index != ui32Parent; )
++ {
++ struct sHandle *psHandle = INDEX_TO_HANDLE_PTR(psBase, ui32Index);
++
++ struct sHandleList *psEntry = LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, ui32Index, ui32Parent, uiParentOffset, uiEntryOffset);
++ PVRSRV_ERROR eError;
++
++ PVR_ASSERT(psEntry->hParent == psHead->hParent);
++
++ ui32Index = psEntry->ui32Next;
++
++ eError = (*pfnIterFunc)(psBase, psHandle);
++ if (eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(IterateOverChildren)
++#endif
++static INLINE
++PVRSRV_ERROR IterateOverChildren(PVRSRV_HANDLE_BASE *psBase, struct sHandle *psParent, PVRSRV_ERROR (*pfnIterFunc)(PVRSRV_HANDLE_BASE *, struct sHandle *))
++{
++ return HandleListIterate(psBase, &psParent->sChildren, offsetof(struct sHandle, sChildren), offsetof(struct sHandle, sSiblings), pfnIterFunc);
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(GetHandleStructure)
++#endif
++static INLINE
++PVRSRV_ERROR GetHandleStructure(PVRSRV_HANDLE_BASE *psBase, struct sHandle **ppsHandle, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType)
++{
++ IMG_UINT32 ui32Index = HANDLE_TO_INDEX(psBase, hHandle);
++ struct sHandle *psHandle;
++
++
++ if (!INDEX_IS_VALID(psBase, ui32Index))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "GetHandleStructure: Handle index out of range (%u >= %u)", ui32Index, psBase->ui32TotalHandCount));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ psHandle = INDEX_TO_HANDLE_PTR(psBase, ui32Index);
++ if (psHandle->eType == PVRSRV_HANDLE_TYPE_NONE)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "GetHandleStructure: Handle not allocated (index: %u)", ui32Index));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++
++ if (eType != PVRSRV_HANDLE_TYPE_NONE && eType != psHandle->eType)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "GetHandleStructure: Handle type mismatch (%d != %d)", eType, psHandle->eType));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++
++ *ppsHandle = psHandle;
++
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(ParentIfPrivate)
++#endif
++static INLINE
++IMG_HANDLE ParentIfPrivate(struct sHandle *psHandle)
++{
++ return TEST_ALLOC_FLAG(psHandle, PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE) ?
++ ParentHandle(psHandle) : IMG_NULL;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(InitKey)
++#endif
++static INLINE
++IMG_VOID InitKey(HAND_KEY aKey, PVRSRV_HANDLE_BASE *psBase, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, IMG_HANDLE hParent)
++{
++ PVR_UNREFERENCED_PARAMETER(psBase);
++
++ aKey[HAND_KEY_DATA] = (IMG_UINTPTR_T)pvData;
++ aKey[HAND_KEY_TYPE] = (IMG_UINTPTR_T)eType;
++ aKey[HAND_KEY_PARENT] = (IMG_UINTPTR_T)hParent;
++}
++
++static PVRSRV_ERROR FreeHandleArray(PVRSRV_HANDLE_BASE *psBase)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if (psBase->psHandleArray != IMG_NULL)
++ {
++ eError = OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ psBase->ui32TotalHandCount * sizeof(struct sHandle),
++ psBase->psHandleArray,
++ psBase->hHandBlockAlloc);
++
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "FreeHandleArray: Error freeing memory (%d)", eError));
++ }
++ else
++ {
++ psBase->psHandleArray = IMG_NULL;
++ }
++ }
++
++ return eError;
++}
++
++static PVRSRV_ERROR FreeHandle(PVRSRV_HANDLE_BASE *psBase, struct sHandle *psHandle)
++{
++ HAND_KEY aKey;
++ IMG_UINT32 ui32Index = HANDLE_PTR_TO_INDEX(psBase, psHandle);
++ PVRSRV_ERROR eError;
++
++
++ InitKey(aKey, psBase, psHandle->pvData, psHandle->eType, ParentIfPrivate(psHandle));
++
++ if (!TEST_ALLOC_FLAG(psHandle, PVRSRV_HANDLE_ALLOC_FLAG_MULTI) && !BATCHED_HANDLE_PARTIALLY_FREE(psHandle))
++ {
++ IMG_HANDLE hHandle;
++ hHandle = (IMG_HANDLE) HASH_Remove_Extended(psBase->psHashTab, aKey);
++
++ PVR_ASSERT(hHandle != IMG_NULL);
++ PVR_ASSERT(hHandle == INDEX_TO_HANDLE(psBase, ui32Index));
++ PVR_UNREFERENCED_PARAMETER(hHandle);
++ }
++
++
++ UnlinkFromParent(psBase, psHandle);
++
++
++ eError = IterateOverChildren(psBase, psHandle, FreeHandle);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "FreeHandle: Error whilst freeing subhandles (%d)", eError));
++ return eError;
++ }
++
++
++ psHandle->eType = PVRSRV_HANDLE_TYPE_NONE;
++
++ if (BATCHED_HANDLE(psHandle) && !BATCHED_HANDLE_PARTIALLY_FREE(psHandle))
++ {
++ SET_BATCHED_HANDLE_PARTIALLY_FREE(psHandle);
++
++ return PVRSRV_OK;
++ }
++
++
++ if (!psBase->bPurgingEnabled)
++ {
++ if (psBase->ui32FreeHandCount == 0)
++ {
++ PVR_ASSERT(psBase->ui32FirstFreeIndex == 0);
++ PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne == 0);
++
++ psBase->ui32FirstFreeIndex = ui32Index;
++ }
++ else
++ {
++
++ PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne != 0);
++ PVR_ASSERT(INDEX_TO_HANDLE_PTR(psBase, psBase->ui32LastFreeIndexPlusOne - 1)->ui32NextIndexPlusOne == 0);
++ INDEX_TO_HANDLE_PTR(psBase, psBase->ui32LastFreeIndexPlusOne - 1)->ui32NextIndexPlusOne = ui32Index + 1;
++ }
++
++ PVR_ASSERT(psHandle->ui32NextIndexPlusOne == 0);
++
++
++ psBase->ui32LastFreeIndexPlusOne = ui32Index + 1;
++ }
++
++ psBase->ui32FreeHandCount++;
++
++ return PVRSRV_OK;
++}
++
++static PVRSRV_ERROR FreeAllHandles(PVRSRV_HANDLE_BASE *psBase)
++{
++ IMG_UINT32 i;
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if (psBase->ui32FreeHandCount == psBase->ui32TotalHandCount)
++ {
++ return eError;
++ }
++
++ for (i = 0; i < psBase->ui32TotalHandCount; i++)
++ {
++ struct sHandle *psHandle;
++
++ psHandle = INDEX_TO_HANDLE_PTR(psBase, i);
++
++ if (psHandle->eType != PVRSRV_HANDLE_TYPE_NONE)
++ {
++ eError = FreeHandle(psBase, psHandle);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "FreeAllHandles: FreeHandle failed (%d)", eError));
++ break;
++ }
++
++
++ if (psBase->ui32FreeHandCount == psBase->ui32TotalHandCount)
++ {
++ break;
++ }
++ }
++ }
++
++ return eError;
++}
++
++static PVRSRV_ERROR FreeHandleBase(PVRSRV_HANDLE_BASE *psBase)
++{
++ PVRSRV_ERROR eError;
++
++ if (HANDLES_BATCHED(psBase))
++ {
++ PVR_DPF((PVR_DBG_WARNING, "FreeHandleBase: Uncommitted/Unreleased handle batch"));
++ PVRSRVReleaseHandleBatch(psBase);
++ }
++
++
++ eError = FreeAllHandles(psBase);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "FreeHandleBase: Couldn't free handles (%d)", eError));
++ return eError;
++ }
++
++
++ eError = FreeHandleArray(psBase);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "FreeHandleBase: Couldn't free handle array (%d)", eError));
++ return eError;
++ }
++
++ if (psBase->psHashTab != IMG_NULL)
++ {
++
++ HASH_Delete(psBase->psHashTab);
++ }
++
++ eError = OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(*psBase),
++ psBase,
++ psBase->hBaseBlockAlloc);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "FreeHandleBase: Couldn't free handle base (%d)", eError));
++ return eError;
++ }
++
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(FindHandle)
++#endif
++static INLINE
++IMG_HANDLE FindHandle(PVRSRV_HANDLE_BASE *psBase, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, IMG_HANDLE hParent)
++{
++ HAND_KEY aKey;
++
++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++ InitKey(aKey, psBase, pvData, eType, hParent);
++
++ return (IMG_HANDLE) HASH_Retrieve_Extended(psBase->psHashTab, aKey);
++}
++
++static PVRSRV_ERROR ReallocMem(IMG_PVOID *ppvMem, IMG_HANDLE *phBlockAlloc, IMG_UINT32 ui32NewSize, IMG_UINT32 ui32OldSize)
++{
++ IMG_VOID *pvOldMem = *ppvMem;
++ IMG_HANDLE hOldBlockAlloc = *phBlockAlloc;
++ IMG_UINT32 ui32CopySize = MIN(ui32NewSize, ui32OldSize);
++ IMG_VOID *pvNewMem = IMG_NULL;
++ IMG_HANDLE hNewBlockAlloc = IMG_NULL;
++ PVRSRV_ERROR eError;
++
++ if (ui32NewSize == ui32OldSize)
++ {
++ return (PVRSRV_OK);
++ }
++
++ if (ui32NewSize != 0)
++ {
++
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32NewSize,
++ &pvNewMem,
++ &hNewBlockAlloc,
++ "Memory Area");
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "ReallocMem: Couldn't allocate new memory area (%d)", eError));
++ return eError;
++ }
++ }
++
++ if (ui32CopySize != 0)
++ {
++
++ OSMemCopy(pvNewMem, pvOldMem, ui32CopySize);
++ }
++
++ if (ui32OldSize != 0)
++ {
++
++ eError = OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32OldSize,
++ pvOldMem,
++ hOldBlockAlloc);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "ReallocMem: Couldn't free old memory area (%d)", eError));
++ }
++ }
++
++ *ppvMem = pvNewMem;
++ *phBlockAlloc = hNewBlockAlloc;
++
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(ReallocHandleArray)
++#endif
++static INLINE
++PVRSRV_ERROR ReallocHandleArray(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32NewCount, IMG_UINT32 ui32OldCount)
++{
++ return ReallocMem((IMG_PVOID *)&psBase->psHandleArray,
++ &psBase->hHandBlockAlloc,
++ ui32NewCount * sizeof(struct sHandle),
++ ui32OldCount * sizeof(struct sHandle));
++}
++
++static PVRSRV_ERROR IncreaseHandleArraySize(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32Delta)
++{
++ PVRSRV_ERROR eError;
++ struct sHandle *psHandle;
++ IMG_UINT32 ui32DeltaAdjusted = ROUND_UP_TO_MULTIPLE(ui32Delta, HANDLE_BLOCK_SIZE);
++ IMG_UINT32 ui32NewTotalHandCount = psBase->ui32TotalHandCount + ui32DeltaAdjusted;
++;
++
++ PVR_ASSERT(ui32Delta != 0);
++
++
++ if (ui32NewTotalHandCount > psBase->ui32MaxIndexPlusOne || ui32NewTotalHandCount <= psBase->ui32TotalHandCount)
++ {
++ ui32NewTotalHandCount = psBase->ui32MaxIndexPlusOne;
++
++ ui32DeltaAdjusted = ui32NewTotalHandCount - psBase->ui32TotalHandCount;
++
++ if (ui32DeltaAdjusted < ui32Delta)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "IncreaseHandleArraySize: Maximum handle limit reached (%d)", psBase->ui32MaxIndexPlusOne));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ }
++
++ PVR_ASSERT(ui32DeltaAdjusted >= ui32Delta);
++
++
++ eError = ReallocHandleArray(psBase, ui32NewTotalHandCount, psBase->ui32TotalHandCount);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "IncreaseHandleArraySize: ReallocHandleArray failed (%d)", eError));
++ return eError;
++ }
++
++
++ for(psHandle = psBase->psHandleArray + psBase->ui32TotalHandCount;
++ psHandle < psBase->psHandleArray + ui32NewTotalHandCount;
++ psHandle++)
++ {
++ psHandle->eType = PVRSRV_HANDLE_TYPE_NONE;
++ psHandle->eInternalFlag = INTERNAL_HANDLE_FLAG_NONE;
++ psHandle->ui32NextIndexPlusOne = 0;
++ }
++
++
++ psBase->ui32FreeHandCount += ui32DeltaAdjusted;
++
++ if (psBase->ui32FirstFreeIndex == 0)
++ {
++ PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne == 0);
++
++ psBase->ui32FirstFreeIndex = psBase->ui32TotalHandCount;
++ }
++ else
++ {
++ if (!psBase->bPurgingEnabled)
++ {
++ PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne != 0)
++ PVR_ASSERT(INDEX_TO_HANDLE_PTR(psBase, psBase->ui32LastFreeIndexPlusOne - 1)->ui32NextIndexPlusOne == 0);
++
++ INDEX_TO_HANDLE_PTR(psBase, psBase->ui32LastFreeIndexPlusOne - 1)->ui32NextIndexPlusOne = psBase->ui32TotalHandCount + 1;
++ }
++ }
++
++ if (!psBase->bPurgingEnabled)
++ {
++ psBase->ui32LastFreeIndexPlusOne = ui32NewTotalHandCount;
++ }
++
++ psBase->ui32TotalHandCount = ui32NewTotalHandCount;
++
++ return PVRSRV_OK;
++}
++
++static PVRSRV_ERROR EnsureFreeHandles(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32Free)
++{
++ PVRSRV_ERROR eError;
++
++ if (ui32Free > psBase->ui32FreeHandCount)
++ {
++ IMG_UINT32 ui32FreeHandDelta = ui32Free - psBase->ui32FreeHandCount;
++ eError = IncreaseHandleArraySize(psBase, ui32FreeHandDelta);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "EnsureFreeHandles: Couldn't allocate %u handles to ensure %u free handles (IncreaseHandleArraySize failed with error %d)", ui32FreeHandDelta, ui32Free, eError));
++
++ return eError;
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
++static PVRSRV_ERROR AllocHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent)
++{
++ IMG_UINT32 ui32NewIndex;
++ struct sHandle *psNewHandle = IMG_NULL;
++ IMG_HANDLE hHandle;
++ HAND_KEY aKey;
++ PVRSRV_ERROR eError;
++
++
++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++ PVR_ASSERT(psBase->psHashTab != IMG_NULL);
++
++ if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI))
++ {
++
++ PVR_ASSERT(FindHandle(psBase, pvData, eType, hParent) == IMG_NULL);
++ }
++
++ if (psBase->ui32FreeHandCount == 0 && HANDLES_BATCHED(psBase))
++ {
++ PVR_DPF((PVR_DBG_WARNING, "AllocHandle: Handle batch size (%u) was too small, allocating additional space", psBase->ui32HandBatchSize));
++ }
++
++
++ eError = EnsureFreeHandles(psBase, 1);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "AllocHandle: EnsureFreeHandles failed (%d)", eError));
++ return eError;
++ }
++ PVR_ASSERT(psBase->ui32FreeHandCount != 0)
++
++ if (!psBase->bPurgingEnabled)
++ {
++
++ ui32NewIndex = psBase->ui32FirstFreeIndex;
++
++
++ psNewHandle = INDEX_TO_HANDLE_PTR(psBase, ui32NewIndex);
++ }
++ else
++ {
++
++ for(ui32NewIndex = psBase->ui32FirstFreeIndex; ui32NewIndex < psBase->ui32TotalHandCount; ui32NewIndex++)
++ {
++ psNewHandle = INDEX_TO_HANDLE_PTR(psBase, ui32NewIndex);
++ if (HANDLE_STRUCT_IS_FREE(psNewHandle))
++ {
++ break;
++ }
++
++ }
++ psBase->ui32FirstFreeIndex = 0;
++ PVR_ASSERT(ui32NewIndex < psBase->ui32TotalHandCount);
++ }
++ PVR_ASSERT(psNewHandle != IMG_NULL);
++
++
++ hHandle = INDEX_TO_HANDLE(psBase, ui32NewIndex);
++
++
++ if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI))
++ {
++
++ InitKey(aKey, psBase, pvData, eType, hParent);
++
++
++ if (!HASH_Insert_Extended(psBase->psHashTab, aKey, (IMG_UINTPTR_T)hHandle))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "AllocHandle: Couldn't add handle to hash table"));
++
++ return PVRSRV_ERROR_GENERIC;
++ }
++ }
++
++ psBase->ui32FreeHandCount--;
++
++
++ if (!psBase->bPurgingEnabled)
++ {
++
++ if (psBase->ui32FreeHandCount == 0)
++ {
++ PVR_ASSERT(psBase->ui32FirstFreeIndex == ui32NewIndex);
++ PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne == (ui32NewIndex + 1));
++
++ psBase->ui32LastFreeIndexPlusOne = 0;
++ psBase->ui32FirstFreeIndex = 0;
++ }
++ else
++ {
++
++ psBase->ui32FirstFreeIndex = (psNewHandle->ui32NextIndexPlusOne == 0) ?
++ ui32NewIndex + 1 :
++ psNewHandle->ui32NextIndexPlusOne - 1;
++ }
++ }
++
++
++ psNewHandle->eType = eType;
++ psNewHandle->pvData = pvData;
++ psNewHandle->eInternalFlag = INTERNAL_HANDLE_FLAG_NONE;
++ psNewHandle->eFlag = eFlag;
++ psNewHandle->ui32Index = ui32NewIndex;
++
++ InitParentList(psBase, psNewHandle);
++#if defined(DEBUG)
++ PVR_ASSERT(NoChildren(psBase, psNewHandle));
++#endif
++
++ InitChildEntry(psBase, psNewHandle);
++#if defined(DEBUG)
++ PVR_ASSERT(NoParent(psBase, psNewHandle));
++#endif
++
++ if (HANDLES_BATCHED(psBase))
++ {
++
++ psNewHandle->ui32NextIndexPlusOne = psBase->ui32FirstBatchIndexPlusOne;
++
++ psBase->ui32FirstBatchIndexPlusOne = ui32NewIndex + 1;
++
++ SET_BATCHED_HANDLE(psNewHandle);
++ }
++ else
++ {
++ psNewHandle->ui32NextIndexPlusOne = 0;
++ }
++
++
++ *phHandle = hHandle;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag)
++{
++ IMG_HANDLE hHandle;
++ PVRSRV_ERROR eError;
++
++ *phHandle = IMG_NULL;
++
++ if (HANDLES_BATCHED(psBase))
++ {
++
++ psBase->ui32BatchHandAllocFailures++;
++ }
++
++
++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++ if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI))
++ {
++
++ hHandle = FindHandle(psBase, pvData, eType, IMG_NULL);
++ if (hHandle != IMG_NULL)
++ {
++ struct sHandle *psHandle;
++
++ eError = GetHandleStructure(psBase, &psHandle, hHandle, eType);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocHandle: Lookup of existing handle failed"));
++ return eError;
++ }
++
++
++ if (TEST_FLAG(psHandle->eFlag & eFlag, PVRSRV_HANDLE_ALLOC_FLAG_SHARED))
++ {
++ *phHandle = hHandle;
++ eError = PVRSRV_OK;
++ goto exit_ok;
++ }
++ return PVRSRV_ERROR_GENERIC;
++ }
++ }
++
++ eError = AllocHandle(psBase, phHandle, pvData, eType, eFlag, IMG_NULL);
++
++exit_ok:
++ if (HANDLES_BATCHED(psBase) && (eError == PVRSRV_OK))
++ {
++ psBase->ui32BatchHandAllocFailures--;
++ }
++
++ return eError;
++}
++
++PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent)
++{
++ struct sHandle *psPHand;
++ struct sHandle *psCHand;
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hParentKey;
++ IMG_HANDLE hHandle;
++
++ *phHandle = IMG_NULL;
++
++ if (HANDLES_BATCHED(psBase))
++ {
++
++ psBase->ui32BatchHandAllocFailures++;
++ }
++
++
++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++ hParentKey = TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE) ?
++ hParent : IMG_NULL;
++
++
++ eError = GetHandleStructure(psBase, &psPHand, hParent, PVRSRV_HANDLE_TYPE_NONE);
++ if (eError != PVRSRV_OK)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI))
++ {
++
++ hHandle = FindHandle(psBase, pvData, eType, hParentKey);
++ if (hHandle != IMG_NULL)
++ {
++ struct sHandle *psCHandle;
++ PVRSRV_ERROR eErr;
++
++ eErr = GetHandleStructure(psBase, &psCHandle, hHandle, eType);
++ if (eErr != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocSubHandle: Lookup of existing handle failed"));
++ return eErr;
++ }
++
++ PVR_ASSERT(hParentKey != IMG_NULL && ParentHandle(HANDLE_TO_HANDLE_PTR(psBase, hHandle)) == hParent);
++
++
++ if (TEST_FLAG(psCHandle->eFlag & eFlag, PVRSRV_HANDLE_ALLOC_FLAG_SHARED) && ParentHandle(HANDLE_TO_HANDLE_PTR(psBase, hHandle)) == hParent)
++ {
++ *phHandle = hHandle;
++ goto exit_ok;
++ }
++ return PVRSRV_ERROR_GENERIC;
++ }
++ }
++
++ eError = AllocHandle(psBase, &hHandle, pvData, eType, eFlag, hParentKey);
++ if (eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++
++
++ psPHand = HANDLE_TO_HANDLE_PTR(psBase, hParent);
++
++ psCHand = HANDLE_TO_HANDLE_PTR(psBase, hHandle);
++
++ AdoptChild(psBase, psPHand, psCHand);
++
++ *phHandle = hHandle;
++
++exit_ok:
++ if (HANDLES_BATCHED(psBase))
++ {
++ psBase->ui32BatchHandAllocFailures--;
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType)
++{
++ IMG_HANDLE hHandle;
++
++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++
++ hHandle = (IMG_HANDLE) FindHandle(psBase, pvData, eType, IMG_NULL);
++ if (hHandle == IMG_NULL)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ *phHandle = hHandle;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVLookupHandleAnyType(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, PVRSRV_HANDLE_TYPE *peType, IMG_HANDLE hHandle)
++{
++ struct sHandle *psHandle;
++ PVRSRV_ERROR eError;
++
++ eError = GetHandleStructure(psBase, &psHandle, hHandle, PVRSRV_HANDLE_TYPE_NONE);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVLookupHandleAnyType: Error looking up handle (%d)", eError));
++ return eError;
++ }
++
++ *ppvData = psHandle->pvData;
++ *peType = psHandle->eType;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType)
++{
++ struct sHandle *psHandle;
++ PVRSRV_ERROR eError;
++
++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++ eError = GetHandleStructure(psBase, &psHandle, hHandle, eType);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVLookupHandle: Error looking up handle (%d)", eError));
++ return eError;
++ }
++
++ *ppvData = psHandle->pvData;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVLookupSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, IMG_HANDLE hAncestor)
++{
++ struct sHandle *psPHand;
++ struct sHandle *psCHand;
++ PVRSRV_ERROR eError;
++
++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++ eError = GetHandleStructure(psBase, &psCHand, hHandle, eType);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVLookupSubHandle: Error looking up subhandle (%d)", eError));
++ return eError;
++ }
++
++
++ for (psPHand = psCHand; ParentHandle(psPHand) != hAncestor; )
++ {
++ eError = GetHandleStructure(psBase, &psPHand, ParentHandle(psPHand), PVRSRV_HANDLE_TYPE_NONE);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVLookupSubHandle: Subhandle doesn't belong to given ancestor"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ }
++
++ *ppvData = psCHand->pvData;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVGetParentHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *phParent, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType)
++{
++ struct sHandle *psHandle;
++ PVRSRV_ERROR eError;
++
++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++ eError = GetHandleStructure(psBase, &psHandle, hHandle, eType);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVGetParentHandle: Error looking up subhandle (%d)", eError));
++ return eError;
++ }
++
++ *phParent = ParentHandle(psHandle);
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVLookupAndReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType)
++{
++ struct sHandle *psHandle;
++ PVRSRV_ERROR eError;
++
++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++ eError = GetHandleStructure(psBase, &psHandle, hHandle, eType);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVLookupAndReleaseHandle: Error looking up handle (%d)", eError));
++ return eError;
++ }
++
++ *ppvData = psHandle->pvData;
++
++ eError = FreeHandle(psBase, psHandle);
++
++ return eError;
++}
++
++PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType)
++{
++ struct sHandle *psHandle;
++ PVRSRV_ERROR eError;
++
++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++ eError = GetHandleStructure(psBase, &psHandle, hHandle, eType);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVReleaseHandle: Error looking up handle (%d)", eError));
++ return eError;
++ }
++
++ eError = FreeHandle(psBase, psHandle);
++
++ return eError;
++}
++
++PVRSRV_ERROR PVRSRVNewHandleBatch(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32BatchSize)
++{
++ PVRSRV_ERROR eError;
++
++ if (HANDLES_BATCHED(psBase))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVNewHandleBatch: There is a handle batch already in use (size %u)", psBase->ui32HandBatchSize));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if (ui32BatchSize == 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVNewHandleBatch: Invalid batch size (%u)", ui32BatchSize));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ eError = EnsureFreeHandles(psBase, ui32BatchSize);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVNewHandleBatch: EnsureFreeHandles failed (error %d)", eError));
++ return eError;
++ }
++
++ psBase->ui32HandBatchSize = ui32BatchSize;
++
++
++ psBase->ui32TotalHandCountPreBatch = psBase->ui32TotalHandCount;
++
++ PVR_ASSERT(psBase->ui32BatchHandAllocFailures == 0);
++
++ PVR_ASSERT(psBase->ui32FirstBatchIndexPlusOne == 0);
++
++ PVR_ASSERT(HANDLES_BATCHED(psBase));
++
++ return PVRSRV_OK;
++}
++
++static PVRSRV_ERROR PVRSRVHandleBatchCommitOrRelease(PVRSRV_HANDLE_BASE *psBase, IMG_BOOL bCommit)
++{
++
++ IMG_UINT32 ui32IndexPlusOne;
++ IMG_BOOL bCommitBatch = bCommit;
++
++ if (!HANDLES_BATCHED(psBase))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVHandleBatchCommitOrRelease: There is no handle batch"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++
++ }
++
++ if (psBase->ui32BatchHandAllocFailures != 0)
++ {
++ if (bCommit)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVHandleBatchCommitOrRelease: Attempting to commit batch with handle allocation failures."));
++ }
++ bCommitBatch = IMG_FALSE;
++ }
++
++ PVR_ASSERT(psBase->ui32BatchHandAllocFailures == 0 || !bCommit);
++
++ ui32IndexPlusOne = psBase->ui32FirstBatchIndexPlusOne;
++ while(ui32IndexPlusOne != 0)
++ {
++ struct sHandle *psHandle = INDEX_TO_HANDLE_PTR(psBase, ui32IndexPlusOne - 1);
++ IMG_UINT32 ui32NextIndexPlusOne = psHandle->ui32NextIndexPlusOne;
++ PVR_ASSERT(BATCHED_HANDLE(psHandle));
++
++ psHandle->ui32NextIndexPlusOne = 0;
++
++ if (!bCommitBatch || BATCHED_HANDLE_PARTIALLY_FREE(psHandle))
++ {
++ PVRSRV_ERROR eError;
++
++
++ if (!BATCHED_HANDLE_PARTIALLY_FREE(psHandle))
++ {
++ SET_UNBATCHED_HANDLE(psHandle);
++ }
++
++ eError = FreeHandle(psBase, psHandle);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVHandleBatchCommitOrRelease: Error freeing handle (%d)", eError));
++ }
++ PVR_ASSERT(eError == PVRSRV_OK);
++ }
++ else
++ {
++ SET_UNBATCHED_HANDLE(psHandle);
++ }
++
++ ui32IndexPlusOne = ui32NextIndexPlusOne;
++ }
++
++#ifdef DEBUG
++ if (psBase->ui32TotalHandCountPreBatch != psBase->ui32TotalHandCount)
++ {
++ IMG_UINT32 ui32Delta = psBase->ui32TotalHandCount - psBase->ui32TotalHandCountPreBatch;
++
++ PVR_ASSERT(psBase->ui32TotalHandCount > psBase->ui32TotalHandCountPreBatch);
++
++ PVR_DPF((PVR_DBG_WARNING, "PVRSRVHandleBatchCommitOrRelease: The batch size was too small. Batch size was %u, but needs to be %u", psBase->ui32HandBatchSize, psBase->ui32HandBatchSize + ui32Delta));
++
++ }
++#endif
++
++ psBase->ui32HandBatchSize = 0;
++ psBase->ui32FirstBatchIndexPlusOne = 0;
++ psBase->ui32TotalHandCountPreBatch = 0;
++ psBase->ui32BatchHandAllocFailures = 0;
++
++ if (psBase->ui32BatchHandAllocFailures != 0 && bCommit)
++ {
++ PVR_ASSERT(!bCommitBatch);
++
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVCommitHandleBatch(PVRSRV_HANDLE_BASE *psBase)
++{
++ return PVRSRVHandleBatchCommitOrRelease(psBase, IMG_TRUE);
++}
++
++IMG_VOID PVRSRVReleaseHandleBatch(PVRSRV_HANDLE_BASE *psBase)
++{
++ (IMG_VOID) PVRSRVHandleBatchCommitOrRelease(psBase, IMG_FALSE);
++}
++
++PVRSRV_ERROR PVRSRVSetMaxHandle(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32MaxHandle)
++{
++ if (HANDLES_BATCHED(psBase))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVSetMaxHandle: Limit cannot be set whilst in batch mode"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++
++ if (ui32MaxHandle == 0 || ui32MaxHandle >= DEFAULT_MAX_HANDLE)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVSetMaxHandle: Limit must be between %u and %u, inclusive", 0, DEFAULT_MAX_HANDLE));
++
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++
++ if (psBase->ui32TotalHandCount != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVSetMaxHandle: Limit cannot be set becuase handles have already been allocated"));
++
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psBase->ui32MaxIndexPlusOne = ui32MaxHandle;
++
++ return PVRSRV_OK;
++}
++
++IMG_UINT32 PVRSRVGetMaxHandle(PVRSRV_HANDLE_BASE *psBase)
++{
++ return psBase->ui32MaxIndexPlusOne;
++}
++
++PVRSRV_ERROR PVRSRVEnableHandlePurging(PVRSRV_HANDLE_BASE *psBase)
++{
++ if (psBase->bPurgingEnabled)
++ {
++ PVR_DPF((PVR_DBG_WARNING, "PVRSRVEnableHandlePurging: Purging already enabled"));
++ return PVRSRV_OK;
++ }
++
++
++ if (psBase->ui32TotalHandCount != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVEnableHandlePurging: Handles have already been allocated"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psBase->bPurgingEnabled = IMG_TRUE;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVPurgeHandles(PVRSRV_HANDLE_BASE *psBase)
++{
++ IMG_UINT32 ui32Handle;
++ IMG_UINT32 ui32NewHandCount;
++
++ if (!psBase->bPurgingEnabled)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVPurgeHandles: Purging not enabled for this handle base"));
++ return PVRSRV_ERROR_NOT_SUPPORTED;
++ }
++
++ if (HANDLES_BATCHED(psBase))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVPurgeHandles: Purging not allowed whilst in batch mode"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ for (ui32Handle = psBase->ui32TotalHandCount; ui32Handle != 0; ui32Handle--)
++ {
++ struct sHandle *psHandle = HANDLE_TO_HANDLE_PTR(psBase, ui32Handle);
++ if (!HANDLE_STRUCT_IS_FREE(psHandle))
++ {
++ break;
++ }
++ }
++
++ ui32NewHandCount = ROUND_UP_TO_MULTIPLE(ui32Handle, HANDLE_BLOCK_SIZE);
++
++
++ if (ui32NewHandCount >= ui32Handle && ui32NewHandCount <= (psBase->ui32TotalHandCount/2))
++ {
++ IMG_UINT32 ui32Delta = psBase->ui32TotalHandCount - ui32NewHandCount;
++ PVRSRV_ERROR eError;
++
++
++
++ eError = ReallocHandleArray(psBase, ui32NewHandCount, psBase->ui32TotalHandCount);
++ if (eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++
++
++ psBase->ui32TotalHandCount = ui32NewHandCount;
++ psBase->ui32FreeHandCount -= ui32Delta;
++ psBase->ui32FirstFreeIndex = 0;
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVAllocHandleBase(PVRSRV_HANDLE_BASE **ppsBase)
++{
++ PVRSRV_HANDLE_BASE *psBase;
++ IMG_HANDLE hBlockAlloc;
++ PVRSRV_ERROR eError;
++
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(*psBase),
++ (IMG_PVOID *)&psBase,
++ &hBlockAlloc,
++ "Handle Base");
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocHandleBase: Couldn't allocate handle base (%d)", eError));
++ return eError;
++ }
++ OSMemSet(psBase, 0, sizeof(*psBase));
++
++
++ psBase->psHashTab = HASH_Create_Extended(HANDLE_HASH_TAB_INIT_SIZE, sizeof(HAND_KEY), HASH_Func_Default, HASH_Key_Comp_Default);
++ if (psBase->psHashTab == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocHandleBase: Couldn't create data pointer hash table\n"));
++ goto failure;
++ }
++
++ psBase->hBaseBlockAlloc = hBlockAlloc;
++
++ psBase->ui32MaxIndexPlusOne = DEFAULT_MAX_INDEX_PLUS_ONE;
++
++ *ppsBase = psBase;
++
++ return PVRSRV_OK;
++failure:
++ (IMG_VOID)PVRSRVFreeHandleBase(psBase);
++ return PVRSRV_ERROR_GENERIC;
++}
++
++PVRSRV_ERROR PVRSRVFreeHandleBase(PVRSRV_HANDLE_BASE *psBase)
++{
++ PVRSRV_ERROR eError;
++
++ PVR_ASSERT(psBase != gpsKernelHandleBase);
++
++ eError = FreeHandleBase(psBase);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVFreeHandleBase: FreeHandleBase failed (%d)", eError));
++ }
++
++ return eError;
++}
++
++PVRSRV_ERROR PVRSRVHandleInit(IMG_VOID)
++{
++ PVRSRV_ERROR eError;
++
++ PVR_ASSERT(gpsKernelHandleBase == IMG_NULL);
++
++ eError = PVRSRVAllocHandleBase(&gpsKernelHandleBase);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVHandleInit: PVRSRVAllocHandleBase failed (%d)", eError));
++ goto error;
++ }
++
++ eError = PVRSRVEnableHandlePurging(gpsKernelHandleBase);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVHandleInit: PVRSRVEnableHandlePurging failed (%d)", eError));
++ goto error;
++ }
++
++ return PVRSRV_OK;
++error:
++ (IMG_VOID) PVRSRVHandleDeInit();
++ return eError;
++}
++
++PVRSRV_ERROR PVRSRVHandleDeInit(IMG_VOID)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if (gpsKernelHandleBase != IMG_NULL)
++ {
++ eError = FreeHandleBase(gpsKernelHandleBase);
++ if (eError == PVRSRV_OK)
++ {
++ gpsKernelHandleBase = IMG_NULL;
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVHandleDeInit: FreeHandleBase failed (%d)", eError));
++ }
++ }
++
++ return eError;
++}
++#else
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/common/hash.c
+@@ -0,0 +1,463 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "pvr_debug.h"
++#include "img_defs.h"
++#include "services.h"
++#include "servicesint.h"
++#include "hash.h"
++#include "osfunc.h"
++
++#define PRIVATE_MAX(a,b) ((a)>(b)?(a):(b))
++
++#define KEY_TO_INDEX(pHash, key, uSize) \
++ ((pHash)->pfnHashFunc((pHash)->uKeySize, key, uSize) % uSize)
++
++#define KEY_COMPARE(pHash, pKey1, pKey2) \
++ ((pHash)->pfnKeyComp((pHash)->uKeySize, pKey1, pKey2))
++
++struct _BUCKET_
++{
++
++ struct _BUCKET_ *pNext;
++
++
++ IMG_UINTPTR_T v;
++
++
++ IMG_UINTPTR_T k[];
++};
++typedef struct _BUCKET_ BUCKET;
++
++struct _HASH_TABLE_
++{
++
++ BUCKET **ppBucketTable;
++
++
++ IMG_UINT32 uSize;
++
++
++ IMG_UINT32 uCount;
++
++
++ IMG_UINT32 uMinimumSize;
++
++
++ IMG_UINT32 uKeySize;
++
++
++ HASH_FUNC *pfnHashFunc;
++
++
++ HASH_KEY_COMP *pfnKeyComp;
++};
++
++IMG_UINT32
++HASH_Func_Default (IMG_SIZE_T uKeySize, IMG_VOID *pKey, IMG_UINT32 uHashTabLen)
++{
++ IMG_UINTPTR_T *p = (IMG_UINTPTR_T *)pKey;
++ IMG_UINT32 uKeyLen = uKeySize / sizeof(IMG_UINTPTR_T);
++ IMG_UINT32 ui;
++ IMG_UINT32 uHashKey = 0;
++
++ PVR_UNREFERENCED_PARAMETER(uHashTabLen);
++
++ PVR_ASSERT((uKeySize % sizeof(IMG_UINTPTR_T)) == 0);
++
++ for (ui = 0; ui < uKeyLen; ui++)
++ {
++ IMG_UINT32 uHashPart = (IMG_UINT32)*p++;
++
++ uHashPart += (uHashPart << 12);
++ uHashPart ^= (uHashPart >> 22);
++ uHashPart += (uHashPart << 4);
++ uHashPart ^= (uHashPart >> 9);
++ uHashPart += (uHashPart << 10);
++ uHashPart ^= (uHashPart >> 2);
++ uHashPart += (uHashPart << 7);
++ uHashPart ^= (uHashPart >> 12);
++
++ uHashKey += uHashPart;
++ }
++
++ return uHashKey;
++}
++
++IMG_BOOL
++HASH_Key_Comp_Default (IMG_SIZE_T uKeySize, IMG_VOID *pKey1, IMG_VOID *pKey2)
++{
++ IMG_UINTPTR_T *p1 = (IMG_UINTPTR_T *)pKey1;
++ IMG_UINTPTR_T *p2 = (IMG_UINTPTR_T *)pKey2;
++ IMG_UINT32 uKeyLen = uKeySize / sizeof(IMG_UINTPTR_T);
++ IMG_UINT32 ui;
++
++ PVR_ASSERT((uKeySize % sizeof(IMG_UINTPTR_T)) == 0);
++
++ for (ui = 0; ui < uKeyLen; ui++)
++ {
++ if (*p1++ != *p2++)
++ return IMG_FALSE;
++ }
++
++ return IMG_TRUE;
++}
++
++static PVRSRV_ERROR
++_ChainInsert (HASH_TABLE *pHash, BUCKET *pBucket, BUCKET **ppBucketTable, IMG_UINT32 uSize)
++{
++ IMG_UINT32 uIndex;
++
++ PVR_ASSERT (pBucket != IMG_NULL);
++ PVR_ASSERT (ppBucketTable != IMG_NULL);
++ PVR_ASSERT (uSize != 0);
++
++ if ((pBucket == IMG_NULL) || (ppBucketTable == IMG_NULL) || (uSize == 0))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "_ChainInsert: invalid parameter"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ uIndex = KEY_TO_INDEX(pHash, pBucket->k, uSize);
++ pBucket->pNext = ppBucketTable[uIndex];
++ ppBucketTable[uIndex] = pBucket;
++
++ return PVRSRV_OK;
++}
++
++static PVRSRV_ERROR
++_Rehash (HASH_TABLE *pHash,
++ BUCKET **ppOldTable, IMG_UINT32 uOldSize,
++ BUCKET **ppNewTable, IMG_UINT32 uNewSize)
++{
++ IMG_UINT32 uIndex;
++ for (uIndex=0; uIndex< uOldSize; uIndex++)
++ {
++ BUCKET *pBucket;
++ pBucket = ppOldTable[uIndex];
++ while (pBucket != IMG_NULL)
++ {
++ BUCKET *pNextBucket = pBucket->pNext;
++ if (_ChainInsert (pHash, pBucket, ppNewTable, uNewSize) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "_Rehash: call to _ChainInsert failed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ pBucket = pNextBucket;
++ }
++ }
++ return PVRSRV_OK;
++}
++
++static IMG_BOOL
++_Resize (HASH_TABLE *pHash, IMG_UINT32 uNewSize)
++{
++ if (uNewSize != pHash->uSize)
++ {
++ BUCKET **ppNewTable;
++ IMG_UINT32 uIndex;
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "HASH_Resize: oldsize=0x%x newsize=0x%x count=0x%x",
++ pHash->uSize, uNewSize, pHash->uCount));
++
++ OSAllocMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof (BUCKET *) * uNewSize,
++ (IMG_PVOID*)&ppNewTable, IMG_NULL,
++ "Hash Table Buckets");
++ if (ppNewTable == IMG_NULL)
++ return IMG_FALSE;
++
++ for (uIndex=0; uIndex<uNewSize; uIndex++)
++ ppNewTable[uIndex] = IMG_NULL;
++
++ if (_Rehash (pHash, pHash->ppBucketTable, pHash->uSize, ppNewTable, uNewSize) != PVRSRV_OK)
++ {
++ return IMG_FALSE;
++ }
++
++ OSFreeMem (PVRSRV_PAGEABLE_SELECT, sizeof(BUCKET *)*pHash->uSize, pHash->ppBucketTable, IMG_NULL);
++
++ pHash->ppBucketTable = ppNewTable;
++ pHash->uSize = uNewSize;
++ }
++ return IMG_TRUE;
++}
++
++
++HASH_TABLE * HASH_Create_Extended (IMG_UINT32 uInitialLen, IMG_SIZE_T uKeySize, HASH_FUNC *pfnHashFunc, HASH_KEY_COMP *pfnKeyComp)
++{
++ HASH_TABLE *pHash;
++ IMG_UINT32 uIndex;
++
++ PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Create_Extended: InitialSize=0x%x", uInitialLen));
++
++ if(OSAllocMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof(HASH_TABLE),
++ (IMG_VOID **)&pHash, IMG_NULL,
++ "Hash Table") != PVRSRV_OK)
++ {
++ return IMG_NULL;
++ }
++
++ pHash->uCount = 0;
++ pHash->uSize = uInitialLen;
++ pHash->uMinimumSize = uInitialLen;
++ pHash->uKeySize = uKeySize;
++ pHash->pfnHashFunc = pfnHashFunc;
++ pHash->pfnKeyComp = pfnKeyComp;
++
++ OSAllocMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof (BUCKET *) * pHash->uSize,
++ (IMG_PVOID*)&pHash->ppBucketTable, IMG_NULL,
++ "Hash Table Buckets");
++
++ if (pHash->ppBucketTable == IMG_NULL)
++ {
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(HASH_TABLE), pHash, IMG_NULL);
++
++ return IMG_NULL;
++ }
++
++ for (uIndex=0; uIndex<pHash->uSize; uIndex++)
++ pHash->ppBucketTable[uIndex] = IMG_NULL;
++ return pHash;
++}
++
++HASH_TABLE * HASH_Create (IMG_UINT32 uInitialLen)
++{
++ return HASH_Create_Extended(uInitialLen, sizeof(IMG_UINTPTR_T),
++ &HASH_Func_Default, &HASH_Key_Comp_Default);
++}
++
++IMG_VOID
++HASH_Delete (HASH_TABLE *pHash)
++{
++ if (pHash != IMG_NULL)
++ {
++ PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Delete"));
++
++ PVR_ASSERT (pHash->uCount==0);
++ if(pHash->uCount != 0)
++ {
++ PVR_DPF ((PVR_DBG_ERROR, "HASH_Delete: leak detected in hash table!"));
++ PVR_DPF ((PVR_DBG_ERROR, "Likely Cause: client drivers not freeing alocations before destroying devmemcontext"));
++ }
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(BUCKET *)*pHash->uSize, pHash->ppBucketTable, IMG_NULL);
++ pHash->ppBucketTable = IMG_NULL;
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(HASH_TABLE), pHash, IMG_NULL);
++
++ }
++}
++
++IMG_BOOL
++HASH_Insert_Extended (HASH_TABLE *pHash, IMG_VOID *pKey, IMG_UINTPTR_T v)
++{
++ BUCKET *pBucket;
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "HASH_Insert_Extended: Hash=%08X, pKey=%08X, v=0x%x", pHash, pKey, v));
++
++ PVR_ASSERT (pHash != IMG_NULL);
++
++ if (pHash == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "HASH_Insert_Extended: invalid parameter"));
++ return IMG_FALSE;
++ }
++
++ if(OSAllocMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof(BUCKET) + pHash->uKeySize,
++ (IMG_VOID **)&pBucket, IMG_NULL,
++ "Hash Table entry") != PVRSRV_OK)
++ {
++ return IMG_FALSE;
++ }
++
++ pBucket->v = v;
++
++ OSMemCopy(pBucket->k, pKey, pHash->uKeySize);
++ if (_ChainInsert (pHash, pBucket, pHash->ppBucketTable, pHash->uSize) != PVRSRV_OK)
++ {
++ return IMG_FALSE;
++ }
++
++ pHash->uCount++;
++
++
++ if (pHash->uCount << 1 > pHash->uSize)
++ {
++
++
++ _Resize (pHash, pHash->uSize << 1);
++ }
++
++
++ return IMG_TRUE;
++}
++
++IMG_BOOL
++HASH_Insert (HASH_TABLE *pHash, IMG_UINTPTR_T k, IMG_UINTPTR_T v)
++{
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "HASH_Insert: Hash=%08X, k=0x%x, v=0x%x", pHash, k, v));
++
++ return HASH_Insert_Extended(pHash, &k, v);
++}
++
++IMG_UINTPTR_T
++HASH_Remove_Extended(HASH_TABLE *pHash, IMG_VOID *pKey)
++{
++ BUCKET **ppBucket;
++ IMG_UINT32 uIndex;
++
++ PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Remove_Extended: Hash=%08X, pKey=%08X", pHash, pKey));
++
++ PVR_ASSERT (pHash != IMG_NULL);
++
++ if (pHash == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "HASH_Remove_Extended: Null hash table"));
++ return 0;
++ }
++
++ uIndex = KEY_TO_INDEX(pHash, pKey, pHash->uSize);
++
++ for (ppBucket = &(pHash->ppBucketTable[uIndex]); *ppBucket != IMG_NULL; ppBucket = &((*ppBucket)->pNext))
++ {
++
++ if (KEY_COMPARE(pHash, (*ppBucket)->k, pKey))
++ {
++ BUCKET *pBucket = *ppBucket;
++ IMG_UINTPTR_T v = pBucket->v;
++ (*ppBucket) = pBucket->pNext;
++
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(BUCKET) + pHash->uKeySize, pBucket, IMG_NULL);
++
++
++ pHash->uCount--;
++
++
++ if (pHash->uSize > (pHash->uCount << 2) &&
++ pHash->uSize > pHash->uMinimumSize)
++ {
++
++
++ _Resize (pHash,
++ PRIVATE_MAX (pHash->uSize >> 1,
++ pHash->uMinimumSize));
++ }
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "HASH_Remove_Extended: Hash=%08X, pKey=%08X = 0x%x",
++ pHash, pKey, v));
++ return v;
++ }
++ }
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "HASH_Remove_Extended: Hash=%08X, pKey=%08X = 0x0 !!!!", pHash, pKey));
++ return 0;
++}
++
++IMG_UINTPTR_T
++HASH_Remove (HASH_TABLE *pHash, IMG_UINTPTR_T k)
++{
++ PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Remove: Hash=%08X, k=0x%x", pHash, k));
++
++ return HASH_Remove_Extended(pHash, &k);
++}
++
++IMG_UINTPTR_T
++HASH_Retrieve_Extended (HASH_TABLE *pHash, IMG_VOID *pKey)
++{
++ BUCKET **ppBucket;
++ IMG_UINT32 uIndex;
++
++ PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Retrieve_Extended: Hash=%08X, pKey=%08X", pHash,pKey));
++
++ PVR_ASSERT (pHash != IMG_NULL);
++
++ if (pHash == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "HASH_Retrieve_Extended: Null hash table"));
++ return 0;
++ }
++
++ uIndex = KEY_TO_INDEX(pHash, pKey, pHash->uSize);
++
++ for (ppBucket = &(pHash->ppBucketTable[uIndex]); *ppBucket != IMG_NULL; ppBucket = &((*ppBucket)->pNext))
++ {
++
++ if (KEY_COMPARE(pHash, (*ppBucket)->k, pKey))
++ {
++ BUCKET *pBucket = *ppBucket;
++ IMG_UINTPTR_T v = pBucket->v;
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "HASH_Retrieve: Hash=%08X, pKey=%08X = 0x%x",
++ pHash, pKey, v));
++ return v;
++ }
++ }
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "HASH_Retrieve: Hash=%08X, pKey=%08X = 0x0 !!!!", pHash, pKey));
++ return 0;
++}
++
++IMG_UINTPTR_T
++HASH_Retrieve (HASH_TABLE *pHash, IMG_UINTPTR_T k)
++{
++ PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Retrieve: Hash=%08X, k=0x%x", pHash,k));
++ return HASH_Retrieve_Extended(pHash, &k);
++}
++
++#ifdef HASH_TRACE
++IMG_VOID
++HASH_Dump (HASH_TABLE *pHash)
++{
++ IMG_UINT32 uIndex;
++ IMG_UINT32 uMaxLength=0;
++ IMG_UINT32 uEmptyCount=0;
++
++ PVR_ASSERT (pHash != IMG_NULL);
++ for (uIndex=0; uIndex<pHash->uSize; uIndex++)
++ {
++ BUCKET *pBucket;
++ IMG_UINT32 uLength = 0;
++ if (pHash->ppBucketTable[uIndex] == IMG_NULL)
++ uEmptyCount++;
++ for (pBucket=pHash->ppBucketTable[uIndex];
++ pBucket != IMG_NULL;
++ pBucket = pBucket->pNext)
++ uLength++;
++ uMaxLength = PRIVATE_MAX (uMaxLength, uLength);
++ }
++
++ PVR_TRACE(("hash table: uMinimumSize=%d size=%d count=%d",
++ pHash->uMinimumSize, pHash->uSize, pHash->uCount));
++ PVR_TRACE((" empty=%d max=%d", uEmptyCount, uMaxLength));
++}
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/common/lists.c
+@@ -0,0 +1,99 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "lists.h"
++#include "services_headers.h"
++
++IMPLEMENT_LIST_ANY_VA(BM_HEAP)
++IMPLEMENT_LIST_ANY_2(BM_HEAP, PVRSRV_ERROR, PVRSRV_OK)
++IMPLEMENT_LIST_ANY_VA_2(BM_HEAP, PVRSRV_ERROR, PVRSRV_OK)
++IMPLEMENT_LIST_FOR_EACH_VA(BM_HEAP)
++IMPLEMENT_LIST_REMOVE(BM_HEAP)
++IMPLEMENT_LIST_INSERT(BM_HEAP)
++
++IMPLEMENT_LIST_ANY_VA(BM_CONTEXT)
++IMPLEMENT_LIST_ANY_VA_2(BM_CONTEXT, IMG_HANDLE, IMG_NULL)
++IMPLEMENT_LIST_ANY_VA_2(BM_CONTEXT, PVRSRV_ERROR, PVRSRV_OK)
++IMPLEMENT_LIST_FOR_EACH(BM_CONTEXT)
++IMPLEMENT_LIST_REMOVE(BM_CONTEXT)
++IMPLEMENT_LIST_INSERT(BM_CONTEXT)
++
++IMPLEMENT_LIST_ANY_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK)
++IMPLEMENT_LIST_ANY_VA(PVRSRV_DEVICE_NODE)
++IMPLEMENT_LIST_ANY_VA_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK)
++IMPLEMENT_LIST_FOR_EACH(PVRSRV_DEVICE_NODE)
++IMPLEMENT_LIST_FOR_EACH_VA(PVRSRV_DEVICE_NODE)
++IMPLEMENT_LIST_INSERT(PVRSRV_DEVICE_NODE)
++IMPLEMENT_LIST_REMOVE(PVRSRV_DEVICE_NODE)
++
++IMPLEMENT_LIST_ANY_VA(PVRSRV_POWER_DEV)
++IMPLEMENT_LIST_ANY_VA_2(PVRSRV_POWER_DEV, PVRSRV_ERROR, PVRSRV_OK)
++IMPLEMENT_LIST_INSERT(PVRSRV_POWER_DEV)
++IMPLEMENT_LIST_REMOVE(PVRSRV_POWER_DEV)
++
++
++IMG_VOID* MatchDeviceKM_AnyVaCb(PVRSRV_DEVICE_NODE* psDeviceNode, va_list va)
++{
++ IMG_UINT32 ui32DevIndex;
++ IMG_BOOL bIgnoreClass;
++ PVRSRV_DEVICE_CLASS eDevClass;
++
++ ui32DevIndex = va_arg(va, IMG_UINT32);
++ bIgnoreClass = va_arg(va, IMG_BOOL);
++ if (!bIgnoreClass)
++ {
++ eDevClass = va_arg(va, PVRSRV_DEVICE_CLASS);
++ }
++ else
++ {
++
++
++ eDevClass = PVRSRV_DEVICE_CLASS_FORCE_I32;
++ }
++
++ if ((bIgnoreClass || psDeviceNode->sDevId.eDeviceClass == eDevClass) &&
++ psDeviceNode->sDevId.ui32DeviceIndex == ui32DevIndex)
++ {
++ return psDeviceNode;
++ }
++ return IMG_NULL;
++}
++
++IMG_VOID* MatchPowerDeviceIndex_AnyVaCb(PVRSRV_POWER_DEV *psPowerDev, va_list va)
++{
++ IMG_UINT32 ui32DeviceIndex;
++
++ ui32DeviceIndex = va_arg(va, IMG_UINT32);
++
++ if (psPowerDev->ui32DeviceIndex == ui32DeviceIndex)
++ {
++ return psPowerDev;
++ }
++ else
++ {
++ return IMG_NULL;
++ }
++}
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/common/mem.c
+@@ -0,0 +1,151 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "pvr_bridge_km.h"
++
++
++static PVRSRV_ERROR
++FreeSharedSysMemCallBack(IMG_PVOID pvParam,
++ IMG_UINT32 ui32Param)
++{
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = pvParam;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++ OSFreePages(psKernelMemInfo->ui32Flags,
++ psKernelMemInfo->ui32AllocSize,
++ psKernelMemInfo->pvLinAddrKM,
++ psKernelMemInfo->sMemBlk.hOSMemHandle);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_KERNEL_MEM_INFO),
++ psKernelMemInfo,
++ IMG_NULL);
++
++
++ return PVRSRV_OK;
++}
++
++
++IMG_EXPORT PVRSRV_ERROR
++PVRSRVAllocSharedSysMemoryKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_UINT32 ui32Flags,
++ IMG_SIZE_T ui32Size,
++ PVRSRV_KERNEL_MEM_INFO **ppsKernelMemInfo)
++{
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_KERNEL_MEM_INFO),
++ (IMG_VOID **)&psKernelMemInfo, IMG_NULL,
++ "Kernel Memory Info") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVAllocSharedSysMemoryKM: Failed to alloc memory for meminfo"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ OSMemSet(psKernelMemInfo, 0, sizeof(*psKernelMemInfo));
++
++ ui32Flags &= ~PVRSRV_HAP_MAPTYPE_MASK;
++ ui32Flags |= PVRSRV_HAP_MULTI_PROCESS;
++ psKernelMemInfo->ui32Flags = ui32Flags;
++ psKernelMemInfo->ui32AllocSize = ui32Size;
++
++ if(OSAllocPages(psKernelMemInfo->ui32Flags,
++ psKernelMemInfo->ui32AllocSize,
++ HOST_PAGESIZE(),
++ &psKernelMemInfo->pvLinAddrKM,
++ &psKernelMemInfo->sMemBlk.hOSMemHandle)
++ != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocSharedSysMemoryKM: Failed to alloc memory for block"));
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_KERNEL_MEM_INFO),
++ psKernelMemInfo,
++ 0);
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++
++ psKernelMemInfo->sMemBlk.hResItem =
++ ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_SHARED_MEM_INFO,
++ psKernelMemInfo,
++ 0,
++ FreeSharedSysMemCallBack);
++
++ *ppsKernelMemInfo = psKernelMemInfo;
++
++ return PVRSRV_OK;
++}
++
++
++IMG_EXPORT PVRSRV_ERROR
++PVRSRVFreeSharedSysMemoryKM(PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo)
++{
++ PVRSRV_ERROR eError;
++
++ if(psKernelMemInfo->sMemBlk.hResItem)
++ {
++ eError = ResManFreeResByPtr(psKernelMemInfo->sMemBlk.hResItem);
++ }
++ else
++ {
++ eError = FreeSharedSysMemCallBack(psKernelMemInfo, 0);
++ }
++
++ return eError;
++}
++
++
++IMG_EXPORT PVRSRV_ERROR
++PVRSRVDissociateMemFromResmanKM(PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if(!psKernelMemInfo)
++ {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ if(psKernelMemInfo->sMemBlk.hResItem)
++ {
++ eError = ResManDissociateRes(psKernelMemInfo->sMemBlk.hResItem, IMG_NULL);
++
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVDissociateMemFromResmanKM: ResManDissociateRes failed"));
++ PVR_DBG_BREAK;
++ return eError;
++ }
++
++ psKernelMemInfo->sMemBlk.hResItem = IMG_NULL;
++ }
++
++ return eError;
++}
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/common/mem_debug.c
+@@ -0,0 +1,250 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef MEM_DEBUG_C
++#define MEM_DEBUG_C
++
++#if defined(PVRSRV_DEBUG_OS_MEMORY)
++
++#include "img_types.h"
++#include "services_headers.h"
++
++#if defined (__cplusplus)
++extern "C"
++{
++#endif
++
++#define STOP_ON_ERROR 0
++
++
++
++
++
++
++
++
++
++ IMG_BOOL MemCheck(const IMG_PVOID pvAddr, const IMG_UINT8 ui8Pattern, IMG_SIZE_T uSize)
++ {
++ IMG_UINT8 *pui8Addr;
++ for (pui8Addr = (IMG_UINT8*)pvAddr; uSize > 0; uSize--, pui8Addr++)
++ {
++ if (*pui8Addr != ui8Pattern)
++ {
++ return IMG_FALSE;
++ }
++ }
++ return IMG_TRUE;
++ }
++
++
++
++ IMG_VOID OSCheckMemDebug(IMG_PVOID pvCpuVAddr, IMG_SIZE_T uSize, const IMG_CHAR *pszFileName, const IMG_UINT32 uLine)
++ {
++ OSMEM_DEBUG_INFO const *psInfo = (OSMEM_DEBUG_INFO *)((IMG_UINT32)pvCpuVAddr - TEST_BUFFER_PADDING_STATUS);
++
++
++ if (pvCpuVAddr == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "Pointer 0x%X : null pointer"
++ " - referenced %s:%d - allocated %s:%d",
++ pvCpuVAddr,
++ pszFileName, uLine,
++ psInfo->sFileName, psInfo->uLineNo));
++ while (STOP_ON_ERROR);
++ }
++
++
++ if (((IMG_UINT32)pvCpuVAddr&3) != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "Pointer 0x%X : invalid alignment"
++ " - referenced %s:%d - allocated %s:%d",
++ pvCpuVAddr,
++ pszFileName, uLine,
++ psInfo->sFileName, psInfo->uLineNo));
++ while (STOP_ON_ERROR);
++ }
++
++
++ if (!MemCheck((IMG_PVOID)psInfo->sGuardRegionBefore, 0xB1, sizeof(psInfo->sGuardRegionBefore)))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "Pointer 0x%X : guard region before overwritten"
++ " - referenced %s:%d - allocated %s:%d",
++ pvCpuVAddr,
++ pszFileName, uLine,
++ psInfo->sFileName, psInfo->uLineNo));
++ while (STOP_ON_ERROR);
++ }
++
++
++ if (uSize != psInfo->uSize)
++ {
++ PVR_DPF((PVR_DBG_WARNING, "Pointer 0x%X : supplied size was different to stored size (0x%X != 0x%X)"
++ " - referenced %s:%d - allocated %s:%d",
++ pvCpuVAddr, uSize, psInfo->uSize,
++ pszFileName, uLine,
++ psInfo->sFileName, psInfo->uLineNo));
++ while (STOP_ON_ERROR);
++ }
++
++
++ if ((0x01234567 ^ psInfo->uSizeParityCheck) != psInfo->uSize)
++ {
++ PVR_DPF((PVR_DBG_WARNING, "Pointer 0x%X : stored size parity error (0x%X != 0x%X)"
++ " - referenced %s:%d - allocated %s:%d",
++ pvCpuVAddr, psInfo->uSize, 0x01234567 ^ psInfo->uSizeParityCheck,
++ pszFileName, uLine,
++ psInfo->sFileName, psInfo->uLineNo));
++ while (STOP_ON_ERROR);
++ }
++ else
++ {
++
++ uSize = psInfo->uSize;
++ }
++
++
++ if (uSize)
++ {
++ if (!MemCheck((IMG_VOID*)((IMG_UINT32)pvCpuVAddr + uSize), 0xB2, TEST_BUFFER_PADDING_AFTER))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "Pointer 0x%X : guard region after overwritten"
++ " - referenced from %s:%d - allocated from %s:%d",
++ pvCpuVAddr,
++ pszFileName, uLine,
++ psInfo->sFileName, psInfo->uLineNo));
++ }
++ }
++
++
++ if (psInfo->eValid != isAllocated)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "Pointer 0x%X : not allocated (freed? %d)"
++ " - referenced %s:%d - freed %s:%d",
++ pvCpuVAddr, psInfo->eValid == isFree,
++ pszFileName, uLine,
++ psInfo->sFileName, psInfo->uLineNo));
++ while (STOP_ON_ERROR);
++ }
++ }
++
++ IMG_VOID debug_strcpy(IMG_CHAR *pDest, const IMG_CHAR *pSrc)
++ {
++ IMG_SIZE_T i = 0;
++
++ for (; i < 128; i++)
++ {
++ *pDest = *pSrc;
++ if (*pSrc == '\0') break;
++ pDest++;
++ pSrc++;
++ }
++ }
++
++ PVRSRV_ERROR OSAllocMem_Debug_Wrapper(IMG_UINT32 ui32Flags,
++ IMG_UINT32 ui32Size,
++ IMG_PVOID *ppvCpuVAddr,
++ IMG_HANDLE *phBlockAlloc,
++ IMG_CHAR *pszFilename,
++ IMG_UINT32 ui32Line)
++ {
++ OSMEM_DEBUG_INFO *psInfo;
++
++ PVRSRV_ERROR eError;
++
++ eError = OSAllocMem_Debug_Linux_Memory_Allocations(ui32Flags,
++ ui32Size + TEST_BUFFER_PADDING,
++ ppvCpuVAddr,
++ phBlockAlloc,
++ pszFilename,
++ ui32Line);
++
++ if (eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++
++
++ OSMemSet((IMG_CHAR *)(*ppvCpuVAddr) + TEST_BUFFER_PADDING_STATUS, 0xBB, ui32Size);
++ OSMemSet((IMG_CHAR *)(*ppvCpuVAddr) + ui32Size + TEST_BUFFER_PADDING_STATUS, 0xB2, TEST_BUFFER_PADDING_AFTER);
++
++
++ psInfo = (OSMEM_DEBUG_INFO *)(*ppvCpuVAddr);
++
++ OSMemSet(psInfo->sGuardRegionBefore, 0xB1, sizeof(psInfo->sGuardRegionBefore));
++ debug_strcpy(psInfo->sFileName, pszFilename);
++ psInfo->uLineNo = ui32Line;
++ psInfo->eValid = isAllocated;
++ psInfo->uSize = ui32Size;
++ psInfo->uSizeParityCheck = 0x01234567 ^ ui32Size;
++
++
++ *ppvCpuVAddr = (IMG_PVOID) ((IMG_UINT32)*ppvCpuVAddr)+TEST_BUFFER_PADDING_STATUS;
++
++#ifdef PVRSRV_LOG_MEMORY_ALLOCS
++
++ PVR_TRACE(("Allocated pointer (after debug info): 0x%X from %s:%d", *ppvCpuVAddr, pszFilename, ui32Line));
++#endif
++
++ return PVRSRV_OK;
++ }
++
++ PVRSRV_ERROR OSFreeMem_Debug_Wrapper(IMG_UINT32 ui32Flags,
++ IMG_UINT32 ui32Size,
++ IMG_PVOID pvCpuVAddr,
++ IMG_HANDLE hBlockAlloc,
++ IMG_CHAR *pszFilename,
++ IMG_UINT32 ui32Line)
++ {
++ OSMEM_DEBUG_INFO *psInfo;
++
++
++ OSCheckMemDebug(pvCpuVAddr, ui32Size, pszFilename, ui32Line);
++
++
++ OSMemSet(pvCpuVAddr, 0xBF, ui32Size + TEST_BUFFER_PADDING_AFTER);
++
++
++ psInfo = (OSMEM_DEBUG_INFO *)((IMG_UINT32) pvCpuVAddr - TEST_BUFFER_PADDING_STATUS);
++
++
++ psInfo->uSize = 0;
++ psInfo->uSizeParityCheck = 0;
++ psInfo->eValid = isFree;
++ psInfo->uLineNo = ui32Line;
++ debug_strcpy(psInfo->sFileName, pszFilename);
++
++ return OSFreeMem_Debug_Linux_Memory_Allocations(ui32Flags, ui32Size + TEST_BUFFER_PADDING, psInfo, hBlockAlloc, pszFilename, ui32Line);
++ }
++
++#if defined (__cplusplus)
++
++}
++#endif
++
++#endif
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/common/metrics.c
+@@ -0,0 +1,160 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "metrics.h"
++
++#if defined(SUPPORT_VGX)
++#include "vgxapi_km.h"
++#endif
++
++#if defined(SUPPORT_SGX)
++#include "sgxapi_km.h"
++#endif
++
++#if defined(DEBUG) || defined(TIMING)
++
++static volatile IMG_UINT32 *pui32TimerRegister = 0;
++
++#define PVRSRV_TIMER_TOTAL_IN_TICKS(X) asTimers[X].ui32Total
++#define PVRSRV_TIMER_TOTAL_IN_MS(X) ((1000*asTimers[X].ui32Total)/ui32TicksPerMS)
++#define PVRSRV_TIMER_COUNT(X) asTimers[X].ui32Count
++
++
++Temporal_Data asTimers[PVRSRV_NUM_TIMERS];
++
++
++IMG_UINT32 PVRSRVTimeNow(IMG_VOID)
++{
++ if (!pui32TimerRegister)
++ {
++ static IMG_BOOL bFirstTime = IMG_TRUE;
++
++ if (bFirstTime)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVTimeNow: No timer register set up"));
++
++ bFirstTime = IMG_FALSE;
++ }
++
++ return 0;
++ }
++
++#if defined(__sh__)
++
++ return (0xffffffff-*pui32TimerRegister);
++
++#else
++
++ return 0;
++
++#endif
++}
++
++
++static IMG_UINT32 PVRSRVGetCPUFreq(IMG_VOID)
++{
++ IMG_UINT32 ui32Time1, ui32Time2;
++
++ ui32Time1 = PVRSRVTimeNow();
++
++ OSWaitus(1000000);
++
++ ui32Time2 = PVRSRVTimeNow();
++
++ PVR_DPF((PVR_DBG_WARNING, "PVRSRVGetCPUFreq: timer frequency = %d Hz", ui32Time2 - ui32Time1));
++
++ return (ui32Time2 - ui32Time1);
++}
++
++
++IMG_VOID PVRSRVSetupMetricTimers(IMG_VOID *pvDevInfo)
++{
++ IMG_UINT32 ui32Loop;
++
++ PVR_UNREFERENCED_PARAMETER(pvDevInfo);
++
++ for(ui32Loop=0; ui32Loop < (PVRSRV_NUM_TIMERS); ui32Loop++)
++ {
++ asTimers[ui32Loop].ui32Total = 0;
++ asTimers[ui32Loop].ui32Count = 0;
++ }
++
++
++ #if defined(__sh__)
++
++
++
++
++
++ *TCR_2 = TIMER_DIVISOR;
++
++
++ *TCOR_2 = *TCNT_2 = (IMG_UINT)0xffffffff;
++
++
++ *TST_REG |= (IMG_UINT8)0x04;
++
++ pui32TimerRegister = (IMG_UINT32 *)TCNT_2;
++
++ #else
++
++ pui32TimerRegister = 0;
++
++ #endif
++
++}
++
++
++IMG_VOID PVRSRVOutputMetricTotals(IMG_VOID)
++{
++ IMG_UINT32 ui32TicksPerMS, ui32Loop;
++
++ ui32TicksPerMS = PVRSRVGetCPUFreq();
++
++ if (!ui32TicksPerMS)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOutputMetricTotals: Failed to get CPU Freq"));
++ return;
++ }
++
++ for(ui32Loop=0; ui32Loop < (PVRSRV_NUM_TIMERS); ui32Loop++)
++ {
++ if (asTimers[ui32Loop].ui32Count & 0x80000000L)
++ {
++ PVR_DPF((PVR_DBG_WARNING,"PVRSRVOutputMetricTotals: Timer %u is still ON", ui32Loop));
++ }
++ }
++#if 0
++
++ PVR_DPF((PVR_DBG_ERROR," Timer(%u): Total = %u",PVRSRV_TIMER_EXAMPLE_1, PVRSRV_TIMER_TOTAL_IN_TICKS(PVRSRV_TIMER_EXAMPLE_1)));
++ PVR_DPF((PVR_DBG_ERROR," Timer(%u): Time = %ums",PVRSRV_TIMER_EXAMPLE_1, PVRSRV_TIMER_TOTAL_IN_MS(PVRSRV_TIMER_EXAMPLE_1)));
++ PVR_DPF((PVR_DBG_ERROR," Timer(%u): Count = %u",PVRSRV_TIMER_EXAMPLE_1, PVRSRV_TIMER_COUNT(PVRSRV_TIMER_EXAMPLE_1)));
++#endif
++}
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/common/pdump_common.c
+@@ -0,0 +1,1723 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if defined(PDUMP)
++#include <stdarg.h>
++
++#include "services_headers.h"
++#if defined(SUPPORT_SGX)
++#include "sgxdefs.h"
++#include "sgxmmu.h"
++#endif
++#include "pdump_km.h"
++
++#if !defined(PDUMP_TEMP_BUFFER_SIZE)
++#define PDUMP_TEMP_BUFFER_SIZE (64 * 1024L)
++#endif
++
++#if 1
++#define PDUMP_DBG(a) PDumpOSDebugPrintf a
++#else
++#define PDUMP_DBG(a)
++#endif
++
++#define PDUMP_DATAMASTER_PIXEL (1)
++#define PDUMP_DATAMASTER_EDM (3)
++
++#define MIN(x, y) (((x) < (y)) ? (x) : (y))
++#define PTR_PLUS(t, p, x) ((t *)(((IMG_CHAR *)(p)) + (x)))
++#define VPTR_PLUS(p, x) PTR_PLUS(IMG_VOID, p, x)
++#define VPTR_INC(p, x) (p = VPTR_PLUS(p, x))
++#define MAX_PDUMP_MMU_CONTEXTS (32)
++static IMG_VOID *gpvTempBuffer = IMG_NULL;
++static IMG_HANDLE ghTempBufferBlockAlloc;
++static IMG_UINT16 gui16MMUContextUsage = 0;
++
++
++
++static IMG_VOID *GetTempBuffer(IMG_VOID)
++{
++
++ if (gpvTempBuffer == IMG_NULL)
++ {
++ PVRSRV_ERROR eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ PDUMP_TEMP_BUFFER_SIZE,
++ &gpvTempBuffer,
++ &ghTempBufferBlockAlloc,
++ "PDUMP Temporary Buffer");
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "GetTempBuffer: OSAllocMem failed: %d", eError));
++ }
++ }
++
++ return gpvTempBuffer;
++}
++
++static IMG_VOID FreeTempBuffer(IMG_VOID)
++{
++
++ if (gpvTempBuffer != IMG_NULL)
++ {
++ PVRSRV_ERROR eError = OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ PDUMP_TEMP_BUFFER_SIZE,
++ gpvTempBuffer,
++ ghTempBufferBlockAlloc);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "FreeTempBuffer: OSFreeMem failed: %d", eError));
++ }
++ else
++ {
++ gpvTempBuffer = IMG_NULL;
++ }
++ }
++}
++
++IMG_VOID PDumpInitCommon(IMG_VOID)
++{
++
++ (IMG_VOID) GetTempBuffer();
++
++
++ PDumpInit();
++}
++
++IMG_VOID PDumpDeInitCommon(IMG_VOID)
++{
++
++ FreeTempBuffer();
++
++
++ PDumpDeInit();
++}
++
++#if defined(SGX_SUPPORT_COMMON_PDUMP)
++
++IMG_BOOL PDumpIsSuspended(IMG_VOID)
++{
++ return PDumpOSIsSuspended();
++}
++
++PVRSRV_ERROR PDumpRegWithFlagsKM(IMG_UINT32 ui32Reg, IMG_UINT32 ui32Data, IMG_UINT32 ui32Flags)
++{
++ PVRSRV_ERROR eErr;
++ PDUMP_GET_SCRIPT_STRING()
++ PDUMP_DBG(("PDumpRegWithFlagsKM"));
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "WRW :SGXREG:0x%8.8lX 0x%8.8lX\r\n", ui32Reg, ui32Data);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, ui32Flags);
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpRegKM(IMG_UINT32 ui32Reg,IMG_UINT32 ui32Data)
++{
++ return PDumpRegWithFlagsKM(ui32Reg, ui32Data, PDUMP_FLAGS_CONTINUOUS);
++}
++
++PVRSRV_ERROR PDumpRegPolWithFlagsKM(IMG_UINT32 ui32RegAddr, IMG_UINT32 ui32RegValue, IMG_UINT32 ui32Mask, IMG_UINT32 ui32Flags)
++{
++
++ #define POLL_DELAY 1000UL
++ #define POLL_COUNT_LONG (2000000000UL / POLL_DELAY)
++ #define POLL_COUNT_SHORT (1000000UL / POLL_DELAY)
++
++ PVRSRV_ERROR eErr;
++ IMG_UINT32 ui32PollCount;
++
++ PDUMP_GET_SCRIPT_STRING();
++ PDUMP_DBG(("PDumpRegPolWithFlagsKM"));
++
++ if (((ui32RegAddr == EUR_CR_EVENT_STATUS) &&
++ (ui32RegValue & ui32Mask & EUR_CR_EVENT_STATUS_TA_FINISHED_MASK) != 0) ||
++ ((ui32RegAddr == EUR_CR_EVENT_STATUS) &&
++ (ui32RegValue & ui32Mask & EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_MASK) != 0) ||
++ ((ui32RegAddr == EUR_CR_EVENT_STATUS) &&
++ (ui32RegValue & ui32Mask & EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_MASK) != 0))
++ {
++ ui32PollCount = POLL_COUNT_LONG;
++ }
++ else
++ {
++ ui32PollCount = POLL_COUNT_SHORT;
++ }
++
++
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "POL :SGXREG:0x%8.8lX 0x%8.8lX 0x%8.8lX %d %lu %d\r\n",
++ ui32RegAddr, ui32RegValue, ui32Mask, 0, ui32PollCount, POLL_DELAY);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, ui32Flags);
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR PDumpRegPolKM(IMG_UINT32 ui32RegAddr, IMG_UINT32 ui32RegValue, IMG_UINT32 ui32Mask)
++{
++ return PDumpRegPolWithFlagsKM(ui32RegAddr, ui32RegValue, ui32Mask, PDUMP_FLAGS_CONTINUOUS);
++}
++
++PVRSRV_ERROR PDumpMallocPages (PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_UINT32 ui32DevVAddr,
++ IMG_CPU_VIRTADDR pvLinAddr,
++ IMG_HANDLE hOSMemHandle,
++ IMG_UINT32 ui32NumBytes,
++ IMG_UINT32 ui32PageSize,
++ IMG_HANDLE hUniqueTag)
++{
++ PVRSRV_ERROR eErr;
++ IMG_PUINT8 pui8LinAddr;
++ IMG_UINT32 ui32Offset;
++ IMG_UINT32 ui32NumPages;
++ IMG_DEV_PHYADDR sDevPAddr;
++ IMG_UINT32 ui32Page;
++
++ PDUMP_GET_SCRIPT_STRING();
++
++#if defined(LINUX)
++ PVR_ASSERT(hOSMemHandle);
++#else
++
++ PVR_UNREFERENCED_PARAMETER(hOSMemHandle);
++ PVR_ASSERT(((IMG_UINT32) pvLinAddr & (SGX_MMU_PAGE_MASK)) == 0);
++#endif
++
++ PVR_ASSERT(((IMG_UINT32) ui32DevVAddr & (SGX_MMU_PAGE_MASK)) == 0);
++ PVR_ASSERT(((IMG_UINT32) ui32NumBytes & (SGX_MMU_PAGE_MASK)) == 0);
++
++
++
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "-- MALLOC :SGXMEM:VA_%8.8lX 0x%8.8lX %lu\r\n",
++ ui32DevVAddr, ui32NumBytes, ui32PageSize);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++
++
++
++ pui8LinAddr = (IMG_PUINT8) pvLinAddr;
++ ui32Offset = 0;
++ ui32NumPages = ui32NumBytes / ui32PageSize;
++ while (ui32NumPages)
++ {
++ ui32NumPages--;
++
++
++ PDumpOSCPUVAddrToDevPAddr(eDeviceType,
++ hOSMemHandle,
++ ui32Offset,
++ pui8LinAddr,
++ ui32PageSize,
++ &sDevPAddr);
++ ui32Page = sDevPAddr.uiAddr / ui32PageSize;
++
++ pui8LinAddr += ui32PageSize;
++ ui32Offset += ui32PageSize;
++
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "MALLOC :SGXMEM:PA_%8.8lX%8.8lX %lu %lu 0x%8.8lX\r\n",
++ (IMG_UINT32) hUniqueTag,
++ ui32Page * ui32PageSize,
++ ui32PageSize,
++ ui32PageSize,
++ ui32Page * ui32PageSize);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++ }
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpMallocPageTable (PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_CPU_VIRTADDR pvLinAddr,
++ IMG_UINT32 ui32PTSize,
++ IMG_HANDLE hUniqueTag)
++{
++ PVRSRV_ERROR eErr;
++ IMG_DEV_PHYADDR sDevPAddr;
++ IMG_UINT32 ui32Page;
++
++ PDUMP_GET_SCRIPT_STRING();
++
++ PVR_ASSERT(((IMG_UINT32) pvLinAddr & (ui32PTSize - 1)) == 0);
++
++
++
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "-- MALLOC :SGXMEM:PAGE_TABLE 0x%8.8lX %lu\r\n", ui32PTSize, SGX_MMU_PAGE_SIZE);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++ {
++
++
++ PDumpOSCPUVAddrToDevPAddr(eDeviceType,
++ IMG_NULL,
++ 0,
++ (IMG_PUINT8) pvLinAddr,
++ SGX_MMU_PAGE_SIZE,
++ &sDevPAddr);
++ ui32Page = sDevPAddr.uiAddr >> SGX_MMU_PAGE_SHIFT;
++
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "MALLOC :SGXMEM:PA_%8.8lX%8.8lX 0x%lX %lu 0x%8.8lX\r\n",
++ (IMG_UINT32) hUniqueTag,
++ ui32Page * SGX_MMU_PAGE_SIZE,
++ SGX_MMU_PAGE_SIZE,
++ SGX_MMU_PAGE_SIZE,
++ ui32Page * SGX_MMU_PAGE_SIZE);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++
++ }
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpFreePages (BM_HEAP *psBMHeap,
++ IMG_DEV_VIRTADDR sDevVAddr,
++ IMG_UINT32 ui32NumBytes,
++ IMG_UINT32 ui32PageSize,
++ IMG_HANDLE hUniqueTag,
++ IMG_BOOL bInterleaved)
++{
++ PVRSRV_ERROR eErr;
++ IMG_UINT32 ui32NumPages, ui32PageCounter;
++ IMG_DEV_PHYADDR sDevPAddr;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++
++ PDUMP_GET_SCRIPT_STRING();
++
++ PVR_ASSERT(((IMG_UINT32) sDevVAddr.uiAddr & (ui32PageSize - 1)) == 0);
++ PVR_ASSERT(((IMG_UINT32) ui32NumBytes & (ui32PageSize - 1)) == 0);
++
++
++
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "-- FREE :SGXMEM:VA_%8.8lX\r\n", sDevVAddr.uiAddr);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++
++
++
++ ui32NumPages = ui32NumBytes / ui32PageSize;
++ psDeviceNode = psBMHeap->pBMContext->psDeviceNode;
++ for (ui32PageCounter = 0; ui32PageCounter < ui32NumPages; ui32PageCounter++)
++ {
++ if (!bInterleaved || (ui32PageCounter % 2) == 0)
++ {
++ sDevPAddr = psDeviceNode->pfnMMUGetPhysPageAddr(psBMHeap->pMMUHeap, sDevVAddr);
++ {
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "FREE :SGXMEM:PA_%8.8lX%8.8lX\r\n", (IMG_UINT32) hUniqueTag, sDevPAddr.uiAddr);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++ }
++ }
++ else
++ {
++
++ }
++
++ sDevVAddr.uiAddr += ui32PageSize;
++ }
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpFreePageTable (PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_CPU_VIRTADDR pvLinAddr,
++ IMG_UINT32 ui32PTSize,
++ IMG_HANDLE hUniqueTag)
++{
++ PVRSRV_ERROR eErr;
++ IMG_DEV_PHYADDR sDevPAddr;
++ IMG_UINT32 ui32Page;
++
++ PDUMP_GET_SCRIPT_STRING();
++
++ PVR_UNREFERENCED_PARAMETER(ui32PTSize);
++
++
++ PVR_ASSERT(((IMG_UINT32) pvLinAddr & (ui32PTSize-1UL)) == 0);
++
++
++
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "-- FREE :SGXMEM:PAGE_TABLE\r\n");
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++
++
++
++
++
++
++
++
++
++
++
++
++ {
++ PDumpOSCPUVAddrToDevPAddr(eDeviceType,
++ IMG_NULL,
++ 0,
++ (IMG_PUINT8) pvLinAddr,
++ SGX_MMU_PAGE_SIZE,
++ &sDevPAddr);
++ ui32Page = sDevPAddr.uiAddr >> SGX_MMU_PAGE_SHIFT;
++
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "FREE :SGXMEM:PA_%8.8lX%8.8lX\r\n", (IMG_UINT32) hUniqueTag, ui32Page * SGX_MMU_PAGE_SIZE);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++ }
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpPDRegWithFlags(IMG_UINT32 ui32Reg,
++ IMG_UINT32 ui32Data,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE hUniqueTag)
++{
++ PVRSRV_ERROR eErr;
++ PDUMP_GET_SCRIPT_STRING()
++
++
++
++#if defined(SGX_FEATURE_36BIT_MMU)
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen,
++ "WRW :SGXMEM:$1 :SGXMEM:PA_%8.8lX%8.8lX:0x0\r\n",
++ (IMG_UINT32)hUniqueTag,
++ (ui32Data & SGX_MMU_PDE_ADDR_MASK) << SGX_MMU_PDE_ADDR_ALIGNSHIFT);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, ui32Flags);
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "SHR :SGXMEM:$1 :SGXMEM:$1 0x4\r\n");
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, ui32Flags);
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen,
++ "WRW :SGXREG:0x%8.8lX: SGXMEM:$1\r\n",
++ ui32Reg);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, ui32Flags);
++#else
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLen,
++ "WRW :SGXREG:0x%8.8lX :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX\r\n",
++ ui32Reg,
++ (IMG_UINT32) hUniqueTag,
++ (ui32Data & SGX_MMU_PDE_ADDR_MASK) << SGX_MMU_PDE_ADDR_ALIGNSHIFT,
++ ui32Data & ~SGX_MMU_PDE_ADDR_MASK);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, ui32Flags);
++#endif
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpPDReg (IMG_UINT32 ui32Reg,
++ IMG_UINT32 ui32Data,
++ IMG_HANDLE hUniqueTag)
++{
++ return PDumpPDRegWithFlags(ui32Reg, ui32Data, PDUMP_FLAGS_CONTINUOUS, hUniqueTag);
++}
++
++PVRSRV_ERROR PDumpMemPolKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++ IMG_UINT32 ui32Offset,
++ IMG_UINT32 ui32Value,
++ IMG_UINT32 ui32Mask,
++ PDUMP_POLL_OPERATOR eOperator,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE hUniqueTag)
++{
++ #define MEMPOLL_DELAY (1000)
++ #define MEMPOLL_COUNT (2000000000 / MEMPOLL_DELAY)
++
++ PVRSRV_ERROR eErr;
++ IMG_UINT32 ui32PageOffset;
++ IMG_UINT8 *pui8LinAddr;
++ IMG_DEV_PHYADDR sDevPAddr;
++ IMG_DEV_VIRTADDR sDevVPageAddr;
++ PDUMP_GET_SCRIPT_STRING();
++
++
++ PVR_ASSERT((ui32Offset + sizeof(IMG_UINT32)) <= psMemInfo->ui32AllocSize);
++
++
++
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLen,
++ "-- POL :SGXMEM:VA_%8.8lX 0x%8.8lX 0x%8.8lX %d %d %d\r\n",
++ psMemInfo->sDevVAddr.uiAddr + ui32Offset,
++ ui32Value,
++ ui32Mask,
++ eOperator,
++ MEMPOLL_COUNT,
++ MEMPOLL_DELAY);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, ui32Flags);
++
++
++ pui8LinAddr = psMemInfo->pvLinAddrKM;
++
++
++ pui8LinAddr += ui32Offset;
++
++
++
++
++ PDumpOSCPUVAddrToPhysPages(psMemInfo->sMemBlk.hOSMemHandle,
++ ui32Offset,
++ pui8LinAddr,
++ &ui32PageOffset);
++
++
++ sDevVPageAddr.uiAddr = psMemInfo->sDevVAddr.uiAddr + ui32Offset - ui32PageOffset;
++
++ PVR_ASSERT((sDevVPageAddr.uiAddr & 0xFFF) == 0);
++
++
++ BM_GetPhysPageAddr(psMemInfo, sDevVPageAddr, &sDevPAddr);
++
++
++ sDevPAddr.uiAddr += ui32PageOffset;
++
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLen,
++ "POL :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX 0x%8.8lX 0x%8.8lX %d %d %d\r\n",
++ (IMG_UINT32) hUniqueTag,
++ sDevPAddr.uiAddr & ~(SGX_MMU_PAGE_MASK),
++ sDevPAddr.uiAddr & (SGX_MMU_PAGE_MASK),
++ ui32Value,
++ ui32Mask,
++ eOperator,
++ MEMPOLL_COUNT,
++ MEMPOLL_DELAY);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, ui32Flags);
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpMemKM(IMG_PVOID pvAltLinAddr,
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++ IMG_UINT32 ui32Offset,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE hUniqueTag)
++{
++ PVRSRV_ERROR eErr;
++ IMG_UINT32 ui32NumPages;
++ IMG_UINT32 ui32PageByteOffset;
++ IMG_UINT32 ui32BlockBytes;
++ IMG_UINT8* pui8LinAddr;
++ IMG_UINT8* pui8DataLinAddr = IMG_NULL;
++ IMG_DEV_VIRTADDR sDevVPageAddr;
++ IMG_DEV_VIRTADDR sDevVAddr;
++ IMG_DEV_PHYADDR sDevPAddr;
++ IMG_UINT32 ui32ParamOutPos;
++
++ PDUMP_GET_SCRIPT_AND_FILE_STRING();
++
++
++ PVR_ASSERT((ui32Offset + ui32Bytes) <= psMemInfo->ui32AllocSize);
++
++ if (!PDumpOSJTInitialised())
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if (ui32Bytes == 0 || PDumpOSIsSuspended())
++ {
++ return PVRSRV_OK;
++ }
++
++
++ if(pvAltLinAddr)
++ {
++ pui8DataLinAddr = pvAltLinAddr;
++ }
++ else if(psMemInfo->pvLinAddrKM)
++ {
++ pui8DataLinAddr = (IMG_UINT8 *)psMemInfo->pvLinAddrKM + ui32Offset;
++ }
++ pui8LinAddr = (IMG_UINT8 *)psMemInfo->pvLinAddrKM;
++ sDevVAddr = psMemInfo->sDevVAddr;
++
++
++ sDevVAddr.uiAddr += ui32Offset;
++ pui8LinAddr += ui32Offset;
++
++ PVR_ASSERT(pui8DataLinAddr);
++
++ PDumpOSCheckForSplitting(PDumpOSGetStream(PDUMP_STREAM_PARAM2), ui32Bytes, ui32Flags);
++
++ ui32ParamOutPos = PDumpOSGetStreamOffset(PDUMP_STREAM_PARAM2);
++
++
++
++ if(!PDumpOSWriteString(PDumpOSGetStream(PDUMP_STREAM_PARAM2),
++ pui8DataLinAddr,
++ ui32Bytes,
++ ui32Flags))
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if (PDumpOSGetParamFileNum() == 0)
++ {
++ eErr = PDumpOSSprintf(pszFileName, ui32MaxLenFileName, "%%0%%.prm");
++ }
++ else
++ {
++ eErr = PDumpOSSprintf(pszFileName, ui32MaxLenFileName, "%%0%%%lu.prm", PDumpOSGetParamFileNum());
++ }
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++
++
++
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLenScript,
++ "-- LDB :SGXMEM:VA_%8.8lX%8.8lX:0x%8.8lX 0x%8.8lX 0x%8.8lX %s\r\n",
++ (IMG_UINT32)hUniqueTag,
++ psMemInfo->sDevVAddr.uiAddr,
++ ui32Offset,
++ ui32Bytes,
++ ui32ParamOutPos,
++ pszFileName);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, ui32Flags);
++
++
++
++
++ PDumpOSCPUVAddrToPhysPages(psMemInfo->sMemBlk.hOSMemHandle,
++ ui32Offset,
++ pui8LinAddr,
++ &ui32PageByteOffset);
++ ui32NumPages = (ui32PageByteOffset + ui32Bytes + HOST_PAGESIZE() - 1) / HOST_PAGESIZE();
++
++ while(ui32NumPages)
++ {
++#if 0
++ IMG_UINT32 ui32BlockBytes = MIN(ui32BytesRemaining, PAGE_SIZE);
++ CpuPAddr = OSMemHandleToCpuPAddr(psMemInfo->sMemBlk.hOSMemHandle,
++ ui32CurrentOffset);
++#endif
++ ui32NumPages--;
++
++
++ sDevVPageAddr.uiAddr = sDevVAddr.uiAddr - ui32PageByteOffset;
++
++ PVR_ASSERT((sDevVPageAddr.uiAddr & 0xFFF) == 0);
++
++
++ BM_GetPhysPageAddr(psMemInfo, sDevVPageAddr, &sDevPAddr);
++
++
++ sDevPAddr.uiAddr += ui32PageByteOffset;
++#if 0
++ if(ui32PageByteOffset)
++ {
++ ui32BlockBytes =
++ MIN(ui32BytesRemaining, PAGE_ALIGN(CpuPAddr.uiAddr) - CpuPAddr.uiAddr);
++
++ ui32PageByteOffset = 0;
++ }
++#endif
++
++ if (ui32PageByteOffset + ui32Bytes > HOST_PAGESIZE())
++ {
++
++ ui32BlockBytes = HOST_PAGESIZE() - ui32PageByteOffset;
++ }
++ else
++ {
++
++ ui32BlockBytes = ui32Bytes;
++ }
++
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLenScript,
++ "LDB :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX 0x%8.8lX 0x%8.8lX %s\r\n",
++ (IMG_UINT32) hUniqueTag,
++ sDevPAddr.uiAddr & ~(SGX_MMU_PAGE_MASK),
++ sDevPAddr.uiAddr & (SGX_MMU_PAGE_MASK),
++ ui32BlockBytes,
++ ui32ParamOutPos,
++ pszFileName);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, ui32Flags);
++
++
++
++
++ ui32PageByteOffset = 0;
++
++ ui32Bytes -= ui32BlockBytes;
++
++ sDevVAddr.uiAddr += ui32BlockBytes;
++
++ pui8LinAddr += ui32BlockBytes;
++
++ ui32ParamOutPos += ui32BlockBytes;
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpMem2KM(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_CPU_VIRTADDR pvLinAddr,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32Flags,
++ IMG_BOOL bInitialisePages,
++ IMG_HANDLE hUniqueTag1,
++ IMG_HANDLE hUniqueTag2)
++{
++ PVRSRV_ERROR eErr;
++ IMG_UINT32 ui32NumPages;
++ IMG_UINT32 ui32PageOffset;
++ IMG_UINT32 ui32BlockBytes;
++ IMG_UINT8* pui8LinAddr;
++ IMG_DEV_PHYADDR sDevPAddr;
++ IMG_CPU_PHYADDR sCpuPAddr;
++ IMG_UINT32 ui32Offset;
++ IMG_UINT32 ui32ParamOutPos;
++
++ PDUMP_GET_SCRIPT_AND_FILE_STRING();
++
++ if (!pvLinAddr || !PDumpOSJTInitialised())
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if (PDumpOSIsSuspended())
++ {
++ return PVRSRV_OK;
++ }
++
++ PDumpOSCheckForSplitting(PDumpOSGetStream(PDUMP_STREAM_PARAM2), ui32Bytes, ui32Flags);
++
++ ui32ParamOutPos = PDumpOSGetStreamOffset(PDUMP_STREAM_PARAM2);
++
++ if (bInitialisePages)
++ {
++
++
++
++ if (!PDumpOSWriteString(PDumpOSGetStream(PDUMP_STREAM_PARAM2),
++ pvLinAddr,
++ ui32Bytes,
++ PDUMP_FLAGS_CONTINUOUS))
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if (PDumpOSGetParamFileNum() == 0)
++ {
++ eErr = PDumpOSSprintf(pszFileName, ui32MaxLenFileName, "%%0%%.prm");
++ }
++ else
++ {
++ eErr = PDumpOSSprintf(pszFileName, ui32MaxLenFileName, "%%0%%%lu.prm", PDumpOSGetParamFileNum());
++ }
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ }
++
++
++
++
++ ui32PageOffset = (IMG_UINT32) pvLinAddr & (HOST_PAGESIZE() - 1);
++ ui32NumPages = (ui32PageOffset + ui32Bytes + HOST_PAGESIZE() - 1) / HOST_PAGESIZE();
++ pui8LinAddr = (IMG_UINT8*) pvLinAddr;
++
++ while (ui32NumPages)
++ {
++ ui32NumPages--;
++ sCpuPAddr = OSMapLinToCPUPhys(pui8LinAddr);
++ sDevPAddr = SysCpuPAddrToDevPAddr(eDeviceType, sCpuPAddr);
++
++
++ if (ui32PageOffset + ui32Bytes > HOST_PAGESIZE())
++ {
++
++ ui32BlockBytes = HOST_PAGESIZE() - ui32PageOffset;
++ }
++ else
++ {
++
++ ui32BlockBytes = ui32Bytes;
++ }
++
++
++
++ if (bInitialisePages)
++ {
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLenScript,
++ "LDB :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX 0x%8.8lX 0x%8.8lX %s\r\n",
++ (IMG_UINT32) hUniqueTag1,
++ sDevPAddr.uiAddr & ~(SGX_MMU_PAGE_MASK),
++ sDevPAddr.uiAddr & (SGX_MMU_PAGE_MASK),
++ ui32BlockBytes,
++ ui32ParamOutPos,
++ pszFileName);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++ }
++ else
++ {
++ for (ui32Offset = 0; ui32Offset < ui32BlockBytes; ui32Offset += sizeof(IMG_UINT32))
++ {
++ IMG_UINT32 ui32PTE = *((IMG_UINT32 *) (pui8LinAddr + ui32Offset));
++
++ if ((ui32PTE & SGX_MMU_PDE_ADDR_MASK) != 0)
++ {
++#if defined(SGX_FEATURE_36BIT_MMU)
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLenScript,
++ "WRW :SGXMEM:$1 :SGXMEM:PA_%8.8lX%8.8lX:0x0\r\n",
++ (IMG_UINT32)hUniqueTag2,
++ (ui32PTE & SGX_MMU_PDE_ADDR_MASK) << SGX_MMU_PTE_ADDR_ALIGNSHIFT);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLenScript, "SHR :SGXMEM:$1 :SGXMEM:$1 0x4\r\n");
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLenScript, "OR :SGXMEM:$1 :SGXMEM:$1 0x%8.8lX\r\n", ui32PTE & ~SGX_MMU_PDE_ADDR_MASK);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLenScript,
++ "WRW :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX :SGXMEM:$1\r\n",
++ (IMG_UINT32)hUniqueTag1,
++ (sDevPAddr.uiAddr + ui32Offset) & ~(SGX_MMU_PAGE_MASK),
++ (sDevPAddr.uiAddr + ui32Offset) & (SGX_MMU_PAGE_MASK));
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++#else
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLenScript,
++ "WRW :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX\r\n",
++ (IMG_UINT32) hUniqueTag1,
++ (sDevPAddr.uiAddr + ui32Offset) & ~(SGX_MMU_PAGE_MASK),
++ (sDevPAddr.uiAddr + ui32Offset) & (SGX_MMU_PAGE_MASK),
++ (IMG_UINT32) hUniqueTag2,
++ (ui32PTE & SGX_MMU_PDE_ADDR_MASK) << SGX_MMU_PTE_ADDR_ALIGNSHIFT,
++ ui32PTE & ~SGX_MMU_PDE_ADDR_MASK);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++#endif
++ }
++ else
++ {
++ PVR_ASSERT((ui32PTE & SGX_MMU_PTE_VALID) == 0UL);
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLenScript,
++ "WRW :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX 0x%8.8lX%8.8lX\r\n",
++ (IMG_UINT32) hUniqueTag1,
++ (sDevPAddr.uiAddr + ui32Offset) & ~(SGX_MMU_PAGE_MASK),
++ (sDevPAddr.uiAddr + ui32Offset) & (SGX_MMU_PAGE_MASK),
++ (ui32PTE << SGX_MMU_PTE_ADDR_ALIGNSHIFT),
++ (IMG_UINT32) hUniqueTag2);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++ }
++ }
++
++
++
++
++ ui32PageOffset = 0;
++
++ ui32Bytes -= ui32BlockBytes;
++
++ pui8LinAddr += ui32BlockBytes;
++
++ ui32ParamOutPos += ui32BlockBytes;
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpPDDevPAddrKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++ IMG_UINT32 ui32Offset,
++ IMG_DEV_PHYADDR sPDDevPAddr,
++ IMG_HANDLE hUniqueTag1,
++ IMG_HANDLE hUniqueTag2)
++{
++ PVRSRV_ERROR eErr;
++ IMG_UINT32 ui32PageByteOffset;
++ IMG_DEV_VIRTADDR sDevVAddr;
++ IMG_DEV_VIRTADDR sDevVPageAddr;
++ IMG_DEV_PHYADDR sDevPAddr;
++
++ PDUMP_GET_SCRIPT_STRING();
++
++ if(!PDumpOSWriteString(PDumpOSGetStream(PDUMP_STREAM_PARAM2),
++ (IMG_UINT8 *)&sPDDevPAddr,
++ sizeof(IMG_DEV_PHYADDR),
++ PDUMP_FLAGS_CONTINUOUS))
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ sDevVAddr = psMemInfo->sDevVAddr;
++ ui32PageByteOffset = sDevVAddr.uiAddr & (SGX_MMU_PAGE_MASK);
++
++ sDevVPageAddr.uiAddr = sDevVAddr.uiAddr - ui32PageByteOffset;
++ PVR_ASSERT((sDevVPageAddr.uiAddr & 0xFFF) == 0);
++
++ BM_GetPhysPageAddr(psMemInfo, sDevVPageAddr, &sDevPAddr);
++ sDevPAddr.uiAddr += ui32PageByteOffset + ui32Offset;
++
++ if ((sPDDevPAddr.uiAddr & SGX_MMU_PDE_ADDR_MASK) != 0UL)
++ {
++#if defined(SGX_FEATURE_36BIT_MMU)
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLen,
++ "WRW :SGXMEM:$1 :SGXMEM:PA_%8.8lX%8.8lX:0x0\r\n",
++ (IMG_UINT32)hUniqueTag2,
++ sPDDevPAddr.uiAddr);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "AND :SGXMEM:$2 :SGXMEM:$1 0xFFFFFFFF\r\n");
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLen,
++ "WRW :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX :SGXMEM:$2\r\n",
++ (IMG_UINT32)hUniqueTag1,
++ (sDevPAddr.uiAddr) & ~(SGX_MMU_PAGE_MASK),
++ (sDevPAddr.uiAddr) & (SGX_MMU_PAGE_MASK));
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "SHR :SGXMEM:$2 :SGXMEM:$1 0x20\r\n");
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLen,
++ "WRW :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX :SGXMEM:$2\r\n",
++ (IMG_UINT32)hUniqueTag1,
++ (sDevPAddr.uiAddr + 4) & ~(SGX_MMU_PAGE_MASK),
++ (sDevPAddr.uiAddr + 4) & (SGX_MMU_PAGE_MASK));
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++#else
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLen,
++ "WRW :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX\r\n",
++ (IMG_UINT32) hUniqueTag1,
++ sDevPAddr.uiAddr & ~(SGX_MMU_PAGE_MASK),
++ sDevPAddr.uiAddr & (SGX_MMU_PAGE_MASK),
++ (IMG_UINT32) hUniqueTag2,
++ sPDDevPAddr.uiAddr & SGX_MMU_PDE_ADDR_MASK,
++ sPDDevPAddr.uiAddr & ~SGX_MMU_PDE_ADDR_MASK);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++#endif
++ }
++ else
++ {
++ PVR_ASSERT(!(sDevPAddr.uiAddr & SGX_MMU_PTE_VALID));
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLen,
++ "WRW :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX 0x%8.8lX\r\n",
++ (IMG_UINT32) hUniqueTag1,
++ sDevPAddr.uiAddr & ~(SGX_MMU_PAGE_MASK),
++ sDevPAddr.uiAddr & (SGX_MMU_PAGE_MASK),
++ sPDDevPAddr.uiAddr);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpCommentKM(IMG_CHAR *pszComment, IMG_UINT32 ui32Flags)
++{
++ PVRSRV_ERROR eErr;
++ PDUMP_GET_MSG_STRING();
++ PDUMP_DBG(("PDumpCommentKM"));
++
++
++ if (!PDumpOSWriteString2("-- ", ui32Flags))
++ {
++ if(ui32Flags & PDUMP_FLAGS_CONTINUOUS)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ else
++ {
++ return PVRSRV_ERROR_CMD_NOT_PROCESSED;
++ }
++ }
++
++
++ eErr = PDumpOSBufprintf(hMsg, ui32MaxLen, "%s", pszComment);
++ if( (eErr != PVRSRV_OK) &&
++ (eErr != PVRSRV_ERROR_PDUMP_BUF_OVERFLOW))
++ {
++ return eErr;
++ }
++
++
++ PDumpOSVerifyLineEnding(hMsg, ui32MaxLen);
++ PDumpOSWriteString2(hMsg, ui32Flags);
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpCommentWithFlags(IMG_UINT32 ui32Flags, IMG_CHAR * pszFormat, ...)
++{
++ PVRSRV_ERROR eErr;
++ PDUMP_va_list ap;
++ PDUMP_GET_MSG_STRING();
++
++
++ PDUMP_va_start(ap, pszFormat);
++ eErr = PDumpOSVSprintf(hMsg, ui32MaxLen, pszFormat, ap);
++ PDUMP_va_end(ap);
++
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ return PDumpCommentKM(hMsg, ui32Flags);
++}
++
++PVRSRV_ERROR PDumpComment(IMG_CHAR *pszFormat, ...)
++{
++ PVRSRV_ERROR eErr;
++ PDUMP_va_list ap;
++ PDUMP_GET_MSG_STRING();
++
++
++ PDUMP_va_start(ap, pszFormat);
++ eErr = PDumpOSVSprintf(hMsg, ui32MaxLen, pszFormat, ap);
++ PDUMP_va_end(ap);
++
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ return PDumpCommentKM(hMsg, PDUMP_FLAGS_CONTINUOUS);
++}
++
++PVRSRV_ERROR PDumpDriverInfoKM(IMG_CHAR *pszString, IMG_UINT32 ui32Flags)
++{
++ PVRSRV_ERROR eErr;
++ IMG_UINT32 ui32MsgLen;
++ PDUMP_GET_MSG_STRING();
++
++
++ eErr = PDumpOSBufprintf(hMsg, ui32MaxLen, "%s", pszString);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++
++
++ PDumpOSVerifyLineEnding(hMsg, ui32MaxLen);
++ ui32MsgLen = PDumpOSBuflen(hMsg, ui32MaxLen);
++
++ if (!PDumpOSWriteString(PDumpOSGetStream(PDUMP_STREAM_DRIVERINFO),
++ (IMG_UINT8 *)hMsg,
++ ui32MsgLen,
++ ui32Flags))
++ {
++ if (ui32Flags & PDUMP_FLAGS_CONTINUOUS)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ else
++ {
++ return PVRSRV_ERROR_CMD_NOT_PROCESSED;
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpBitmapKM( IMG_CHAR *pszFileName,
++ IMG_UINT32 ui32FileOffset,
++ IMG_UINT32 ui32Width,
++ IMG_UINT32 ui32Height,
++ IMG_UINT32 ui32StrideInBytes,
++ IMG_DEV_VIRTADDR sDevBaseAddr,
++ IMG_UINT32 ui32Size,
++ PDUMP_PIXEL_FORMAT ePixelFormat,
++ PDUMP_MEM_FORMAT eMemFormat,
++ IMG_UINT32 ui32PDumpFlags)
++{
++ PVRSRV_ERROR eErr;
++ PDUMP_GET_SCRIPT_STRING();
++ PDumpCommentWithFlags(ui32PDumpFlags, "\r\n-- Dump bitmap of render\r\n");
++
++#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLen,
++ "SII %s %s.bin :SGXMEM:v%x:0x%08lX 0x%08lX 0x%08lX 0x%08X 0x%08lX 0x%08lX 0x%08lX 0x%08X\r\n",
++ pszFileName,
++ pszFileName,
++ PDUMP_DATAMASTER_PIXEL,
++ sDevBaseAddr.uiAddr,
++ ui32Size,
++ ui32FileOffset,
++ ePixelFormat,
++ ui32Width,
++ ui32Height,
++ ui32StrideInBytes,
++ eMemFormat);
++#else
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLen,
++ "SII %s %s.bin :SGXMEM:v:0x%08lX 0x%08lX 0x%08lX 0x%08X 0x%08lX 0x%08lX 0x%08lX 0x%08X\r\n",
++ pszFileName,
++ pszFileName,
++ sDevBaseAddr.uiAddr,
++ ui32Size,
++ ui32FileOffset,
++ ePixelFormat,
++ ui32Width,
++ ui32Height,
++ ui32StrideInBytes,
++ eMemFormat);
++#endif
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++
++ PDumpOSWriteString2( hScript, ui32PDumpFlags);
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpReadRegKM ( IMG_CHAR *pszFileName,
++ IMG_UINT32 ui32FileOffset,
++ IMG_UINT32 ui32Address,
++ IMG_UINT32 ui32Size,
++ IMG_UINT32 ui32PDumpFlags)
++{
++ PVRSRV_ERROR eErr;
++ PDUMP_GET_SCRIPT_STRING();
++
++ PVR_UNREFERENCED_PARAMETER(ui32Size);
++
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLen,
++ "SAB :SGXREG:0x%08lX 0x%08lX %s\r\n",
++ ui32Address,
++ ui32FileOffset,
++ pszFileName);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++
++ PDumpOSWriteString2( hScript, ui32PDumpFlags);
++
++ return PVRSRV_OK;
++}
++
++IMG_BOOL PDumpTestNextFrame(IMG_UINT32 ui32CurrentFrame)
++{
++ IMG_BOOL bFrameDumped;
++
++
++
++ (IMG_VOID) PDumpSetFrameKM(ui32CurrentFrame + 1);
++ bFrameDumped = PDumpIsCaptureFrameKM();
++ (IMG_VOID) PDumpSetFrameKM(ui32CurrentFrame);
++
++ return bFrameDumped;
++}
++
++static PVRSRV_ERROR PDumpSignatureRegister (IMG_CHAR *pszFileName,
++ IMG_UINT32 ui32Address,
++ IMG_UINT32 ui32Size,
++ IMG_UINT32 *pui32FileOffset,
++ IMG_UINT32 ui32Flags)
++{
++ PVRSRV_ERROR eErr;
++ PDUMP_GET_SCRIPT_STRING();
++
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLen,
++ "SAB :SGXREG:0x%08X 0x%08X %s\r\n",
++ ui32Address,
++ *pui32FileOffset,
++ pszFileName);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++
++ PDumpOSWriteString2(hScript, ui32Flags);
++ *pui32FileOffset += ui32Size;
++ return PVRSRV_OK;
++}
++
++static IMG_VOID PDumpRegisterRange(IMG_CHAR *pszFileName,
++ IMG_UINT32 *pui32Registers,
++ IMG_UINT32 ui32NumRegisters,
++ IMG_UINT32 *pui32FileOffset,
++ IMG_UINT32 ui32Size,
++ IMG_UINT32 ui32Flags)
++{
++ IMG_UINT32 i;
++ for (i = 0; i < ui32NumRegisters; i++)
++ {
++ PDumpSignatureRegister(pszFileName, pui32Registers[i], ui32Size, pui32FileOffset, ui32Flags);
++ }
++}
++
++PVRSRV_ERROR PDump3DSignatureRegisters(IMG_UINT32 ui32DumpFrameNum,
++ IMG_BOOL bLastFrame,
++ IMG_UINT32 *pui32Registers,
++ IMG_UINT32 ui32NumRegisters)
++{
++ PVRSRV_ERROR eErr;
++ IMG_UINT32 ui32FileOffset, ui32Flags;
++
++ PDUMP_GET_FILE_STRING();
++
++ ui32Flags = bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0;
++ ui32FileOffset = 0;
++
++ PDumpCommentWithFlags(ui32Flags, "\r\n-- Dump 3D signature registers\r\n");
++ eErr = PDumpOSSprintf(pszFileName, ui32MaxLen, "out%lu_3d.sig", ui32DumpFrameNum);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++
++ PDumpRegisterRange(pszFileName, pui32Registers, ui32NumRegisters, &ui32FileOffset, sizeof(IMG_UINT32), ui32Flags);
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpTASignatureRegisters (IMG_UINT32 ui32DumpFrameNum,
++ IMG_UINT32 ui32TAKickCount,
++ IMG_BOOL bLastFrame,
++ IMG_UINT32 *pui32Registers,
++ IMG_UINT32 ui32NumRegisters)
++{
++ PVRSRV_ERROR eErr;
++ IMG_UINT32 ui32FileOffset, ui32Flags;
++
++ PDUMP_GET_FILE_STRING();
++
++ ui32Flags = bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0;
++ ui32FileOffset = ui32TAKickCount * ui32NumRegisters * sizeof(IMG_UINT32);
++
++ PDumpCommentWithFlags(ui32Flags, "\r\n-- Dump TA signature registers\r\n");
++ eErr = PDumpOSSprintf(pszFileName, ui32MaxLen, "out%lu_ta.sig", ui32DumpFrameNum);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++
++ PDumpRegisterRange(pszFileName, pui32Registers, ui32NumRegisters, &ui32FileOffset, sizeof(IMG_UINT32), ui32Flags);
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpCounterRegisters (IMG_UINT32 ui32DumpFrameNum,
++ IMG_BOOL bLastFrame,
++ IMG_UINT32 *pui32Registers,
++ IMG_UINT32 ui32NumRegisters)
++{
++ PVRSRV_ERROR eErr;
++ IMG_UINT32 ui32FileOffset, ui32Flags;
++
++ PDUMP_GET_FILE_STRING();
++
++ ui32Flags = bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0UL;
++ ui32FileOffset = 0UL;
++
++ PDumpCommentWithFlags(ui32Flags, "\r\n-- Dump counter registers\r\n");
++ eErr = PDumpOSSprintf(pszFileName, ui32MaxLen, "out%lu.perf", ui32DumpFrameNum);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++
++ PDumpRegisterRange(pszFileName, pui32Registers, ui32NumRegisters, &ui32FileOffset, sizeof(IMG_UINT32), ui32Flags);
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpRegRead(const IMG_UINT32 ui32RegOffset, IMG_UINT32 ui32Flags)
++{
++ PVRSRV_ERROR eErr;
++ PDUMP_GET_SCRIPT_STRING();
++
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "RDW :SGXREG:0x%lX\r\n", ui32RegOffset);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, ui32Flags);
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpCycleCountRegRead(const IMG_UINT32 ui32RegOffset, IMG_BOOL bLastFrame)
++{
++ PVRSRV_ERROR eErr;
++ PDUMP_GET_SCRIPT_STRING();
++
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "RDW :SGXREG:0x%lX\r\n", ui32RegOffset);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0);
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpHWPerfCBKM (IMG_CHAR *pszFileName,
++ IMG_UINT32 ui32FileOffset,
++ IMG_DEV_VIRTADDR sDevBaseAddr,
++ IMG_UINT32 ui32Size,
++ IMG_UINT32 ui32PDumpFlags)
++{
++ PVRSRV_ERROR eErr;
++ PDUMP_GET_SCRIPT_STRING();
++ PDumpCommentWithFlags(ui32PDumpFlags, "\r\n-- Dump Hardware Performance Circular Buffer\r\n");
++
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLen,
++#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++ "SAB :SGXMEM:v%x:0x%08lX 0x%08lX 0x%08lX %s.bin\r\n",
++ PDUMP_DATAMASTER_EDM,
++#else
++ "SAB :SGXMEM:v:0x%08lX 0x%08lX 0x%08lX %s.bin\r\n",
++#endif
++ sDevBaseAddr.uiAddr,
++ ui32Size,
++ ui32FileOffset,
++ pszFileName);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++
++ PDumpOSWriteString2(hScript, ui32PDumpFlags);
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR PDumpCBP(PPVRSRV_KERNEL_MEM_INFO psROffMemInfo,
++ IMG_UINT32 ui32ROffOffset,
++ IMG_UINT32 ui32WPosVal,
++ IMG_UINT32 ui32PacketSize,
++ IMG_UINT32 ui32BufferSize,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE hUniqueTag)
++{
++ PVRSRV_ERROR eErr;
++ IMG_UINT32 ui32PageOffset;
++ IMG_UINT8 *pui8LinAddr;
++ IMG_DEV_VIRTADDR sDevVAddr;
++ IMG_DEV_PHYADDR sDevPAddr;
++ IMG_DEV_VIRTADDR sDevVPageAddr;
++
++
++ PDUMP_GET_SCRIPT_STRING();
++
++
++ PVR_ASSERT((ui32ROffOffset + sizeof(IMG_UINT32)) <= psROffMemInfo->ui32AllocSize);
++
++ pui8LinAddr = psROffMemInfo->pvLinAddrKM;
++ sDevVAddr = psROffMemInfo->sDevVAddr;
++
++
++ pui8LinAddr += ui32ROffOffset;
++ sDevVAddr.uiAddr += ui32ROffOffset;
++
++
++
++
++
++
++ PDumpOSCPUVAddrToPhysPages(psROffMemInfo->sMemBlk.hOSMemHandle,
++ ui32ROffOffset,
++ pui8LinAddr,
++ &ui32PageOffset);
++
++
++ sDevVPageAddr.uiAddr = sDevVAddr.uiAddr - ui32PageOffset;
++
++ PVR_ASSERT((sDevVPageAddr.uiAddr & 0xFFF) == 0);
++
++
++ BM_GetPhysPageAddr(psROffMemInfo, sDevVPageAddr, &sDevPAddr);
++
++
++ sDevPAddr.uiAddr += ui32PageOffset;
++
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLen,
++ "CBP :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX 0x%8.8lX 0x%8.8lX 0x%8.8lX\r\n",
++ (IMG_UINT32) hUniqueTag,
++ sDevPAddr.uiAddr & ~(SGX_MMU_PAGE_MASK),
++ sDevPAddr.uiAddr & (SGX_MMU_PAGE_MASK),
++ ui32WPosVal,
++ ui32PacketSize,
++ ui32BufferSize);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, ui32Flags);
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR PDumpIDLWithFlags(IMG_UINT32 ui32Clocks, IMG_UINT32 ui32Flags)
++{
++ PVRSRV_ERROR eErr;
++ PDUMP_GET_SCRIPT_STRING();
++ PDUMP_DBG(("PDumpIDLWithFlags"));
++
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "IDL %lu\r\n", ui32Clocks);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, ui32Flags);
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR PDumpIDL(IMG_UINT32 ui32Clocks)
++{
++ return PDumpIDLWithFlags(ui32Clocks, PDUMP_FLAGS_CONTINUOUS);
++}
++#endif
++
++
++PVRSRV_ERROR PDumpMemUM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_PVOID pvAltLinAddrUM,
++ IMG_PVOID pvLinAddrUM,
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++ IMG_UINT32 ui32Offset,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE hUniqueTag)
++{
++ IMG_VOID *pvAddrUM;
++ IMG_VOID *pvAddrKM;
++ IMG_UINT32 ui32BytesDumped;
++ IMG_UINT32 ui32CurrentOffset;
++
++ if (psMemInfo->pvLinAddrKM != IMG_NULL && pvAltLinAddrUM == IMG_NULL)
++ {
++
++ return PDumpMemKM(IMG_NULL,
++ psMemInfo,
++ ui32Offset,
++ ui32Bytes,
++ ui32Flags,
++ hUniqueTag);
++ }
++
++ pvAddrUM = (pvAltLinAddrUM != IMG_NULL) ? pvAltLinAddrUM : ((pvLinAddrUM != IMG_NULL) ? VPTR_PLUS(pvLinAddrUM, ui32Offset) : IMG_NULL);
++
++ pvAddrKM = GetTempBuffer();
++
++
++ PVR_ASSERT(pvAddrUM != IMG_NULL && pvAddrKM != IMG_NULL);
++ if (pvAddrUM == IMG_NULL || pvAddrKM == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PDumpMemUM: Nothing to dump"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if (ui32Bytes > PDUMP_TEMP_BUFFER_SIZE)
++ {
++ PDumpCommentWithFlags(ui32Flags, "Dumping 0x%8.8lx bytes of memory, in blocks of 0x%8.8lx bytes", ui32Bytes, (IMG_UINT32)PDUMP_TEMP_BUFFER_SIZE);
++ }
++
++ ui32CurrentOffset = ui32Offset;
++ for (ui32BytesDumped = 0; ui32BytesDumped < ui32Bytes;)
++ {
++ PVRSRV_ERROR eError;
++ IMG_UINT32 ui32BytesToDump = MIN(PDUMP_TEMP_BUFFER_SIZE, ui32Bytes - ui32BytesDumped);
++
++ eError = OSCopyFromUser(psPerProc,
++ pvAddrKM,
++ pvAddrUM,
++ ui32BytesToDump);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PDumpMemUM: OSCopyFromUser failed (%d), eError"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ eError = PDumpMemKM(pvAddrKM,
++ psMemInfo,
++ ui32CurrentOffset,
++ ui32BytesToDump,
++ ui32Flags,
++ hUniqueTag);
++
++ if (eError != PVRSRV_OK)
++ {
++
++ if (ui32BytesDumped != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PDumpMemUM: PDumpMemKM failed (%d)", eError));
++ }
++ PVR_ASSERT(ui32BytesDumped == 0);
++ return eError;
++ }
++
++ VPTR_INC(pvAddrUM, ui32BytesToDump);
++ ui32CurrentOffset += ui32BytesToDump;
++ ui32BytesDumped += ui32BytesToDump;
++ }
++
++ return PVRSRV_OK;
++}
++
++
++static PVRSRV_ERROR _PdumpAllocMMUContext(IMG_UINT32 *pui32MMUContextID)
++{
++ IMG_UINT32 i;
++
++
++ for(i=0; i<MAX_PDUMP_MMU_CONTEXTS; i++)
++ {
++ if((gui16MMUContextUsage & (1U << i)) == 0)
++ {
++
++ gui16MMUContextUsage |= 1U << i;
++ *pui32MMUContextID = i;
++ return PVRSRV_OK;
++ }
++ }
++
++ PVR_DPF((PVR_DBG_ERROR, "_PdumpAllocMMUContext: no free MMU context ids"));
++
++ return PVRSRV_ERROR_GENERIC;
++}
++
++
++static PVRSRV_ERROR _PdumpFreeMMUContext(IMG_UINT32 ui32MMUContextID)
++{
++ if(ui32MMUContextID < MAX_PDUMP_MMU_CONTEXTS)
++ {
++
++ gui16MMUContextUsage &= ~(1U << ui32MMUContextID);
++ return PVRSRV_OK;
++ }
++
++ PVR_DPF((PVR_DBG_ERROR, "_PdumpFreeMMUContext: MMU context ids invalid"));
++
++ return PVRSRV_ERROR_GENERIC;
++}
++
++
++PVRSRV_ERROR PDumpSetMMUContext(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_CHAR *pszMemSpace,
++ IMG_UINT32 *pui32MMUContextID,
++ IMG_UINT32 ui32MMUType,
++ IMG_HANDLE hUniqueTag1,
++ IMG_VOID *pvPDCPUAddr)
++{
++ IMG_UINT8 *pui8LinAddr = (IMG_UINT8 *)pvPDCPUAddr;
++ IMG_CPU_PHYADDR sCpuPAddr;
++ IMG_DEV_PHYADDR sDevPAddr;
++ IMG_UINT32 ui32MMUContextID;
++ PVRSRV_ERROR eError;
++
++ eError = _PdumpAllocMMUContext(&ui32MMUContextID);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PDumpSetMMUContext: _PdumpAllocMMUContext failed: %d", eError));
++ return eError;
++ }
++
++
++ sCpuPAddr = OSMapLinToCPUPhys(pui8LinAddr);
++ sDevPAddr = SysCpuPAddrToDevPAddr(eDeviceType, sCpuPAddr);
++
++ sDevPAddr.uiAddr &= ~((PVRSRV_4K_PAGE_SIZE) -1);
++
++ PDumpComment("Set MMU Context\r\n");
++
++ PDumpComment("MMU :%s:v%d %d :%s:PA_%8.8lX%8.8lX\r\n",
++ pszMemSpace,
++ ui32MMUContextID,
++ ui32MMUType,
++ pszMemSpace,
++ hUniqueTag1,
++ sDevPAddr.uiAddr);
++
++
++ *pui32MMUContextID = ui32MMUContextID;
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR PDumpClearMMUContext(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_CHAR *pszMemSpace,
++ IMG_UINT32 ui32MMUContextID,
++ IMG_UINT32 ui32MMUType)
++{
++ PVRSRV_ERROR eError;
++
++ PVR_UNREFERENCED_PARAMETER(eDeviceType);
++
++
++ PDumpComment("Clear MMU Context for memory space %s\r\n", pszMemSpace);
++
++ PDumpComment("MMU :%s:v%d %d\r\n",
++ pszMemSpace,
++ ui32MMUContextID,
++ ui32MMUType);
++
++ eError = _PdumpFreeMMUContext(ui32MMUContextID);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PDumpClearMMUContext: _PdumpFreeMMUContext failed: %d", eError));
++ return eError;
++ }
++
++ return PVRSRV_OK;
++}
++
++#else
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/common/perproc.c
+@@ -0,0 +1,283 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "resman.h"
++#include "handle.h"
++#include "perproc.h"
++#include "osperproc.h"
++
++#define HASH_TAB_INIT_SIZE 32
++
++static HASH_TABLE *psHashTab = IMG_NULL;
++
++static PVRSRV_ERROR FreePerProcessData(PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_ERROR eError;
++ IMG_UINTPTR_T uiPerProc;
++
++ PVR_ASSERT(psPerProc != IMG_NULL);
++
++ if (psPerProc == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "FreePerProcessData: invalid parameter"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ uiPerProc = HASH_Remove(psHashTab, (IMG_UINTPTR_T)psPerProc->ui32PID);
++ if (uiPerProc == 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "FreePerProcessData: Couldn't find process in per-process data hash table"));
++
++ PVR_ASSERT(psPerProc->ui32PID == 0);
++ }
++ else
++ {
++ PVR_ASSERT((PVRSRV_PER_PROCESS_DATA *)uiPerProc == psPerProc);
++ PVR_ASSERT(((PVRSRV_PER_PROCESS_DATA *)uiPerProc)->ui32PID == psPerProc->ui32PID);
++ }
++
++
++ if (psPerProc->psHandleBase != IMG_NULL)
++ {
++ eError = PVRSRVFreeHandleBase(psPerProc->psHandleBase);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "FreePerProcessData: Couldn't free handle base for process (%d)", eError));
++ return eError;
++ }
++ }
++
++
++ if (psPerProc->hPerProcData != IMG_NULL)
++ {
++ eError = PVRSRVReleaseHandle(KERNEL_HANDLE_BASE, psPerProc->hPerProcData, PVRSRV_HANDLE_TYPE_PERPROC_DATA);
++
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "FreePerProcessData: Couldn't release per-process data handle (%d)", eError));
++ return eError;
++ }
++ }
++
++
++ eError = OSPerProcessPrivateDataDeInit(psPerProc->hOsPrivateData);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "FreePerProcessData: OSPerProcessPrivateDataDeInit failed (%d)", eError));
++ return eError;
++ }
++
++ eError = OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(*psPerProc),
++ psPerProc,
++ psPerProc->hBlockAlloc);
++
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "FreePerProcessData: Couldn't free per-process data (%d)", eError));
++ return eError;
++ }
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_PER_PROCESS_DATA *PVRSRVPerProcessData(IMG_UINT32 ui32PID)
++{
++ PVRSRV_PER_PROCESS_DATA *psPerProc;
++
++ PVR_ASSERT(psHashTab != IMG_NULL);
++
++
++ psPerProc = (PVRSRV_PER_PROCESS_DATA *)HASH_Retrieve(psHashTab, (IMG_UINTPTR_T)ui32PID);
++ return psPerProc;
++}
++
++
++PVRSRV_ERROR PVRSRVPerProcessDataConnect(IMG_UINT32 ui32PID)
++{
++ PVRSRV_PER_PROCESS_DATA *psPerProc;
++ IMG_HANDLE hBlockAlloc;
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ PVR_ASSERT(psHashTab != IMG_NULL);
++
++
++ psPerProc = (PVRSRV_PER_PROCESS_DATA *)HASH_Retrieve(psHashTab, (IMG_UINTPTR_T)ui32PID);
++
++ if (psPerProc == IMG_NULL)
++ {
++
++ eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(*psPerProc),
++ (IMG_PVOID *)&psPerProc,
++ &hBlockAlloc,
++ "Per Process Data");
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: Couldn't allocate per-process data (%d)", eError));
++ return eError;
++ }
++ OSMemSet(psPerProc, 0, sizeof(*psPerProc));
++ psPerProc->hBlockAlloc = hBlockAlloc;
++
++ if (!HASH_Insert(psHashTab, (IMG_UINTPTR_T)ui32PID, (IMG_UINTPTR_T)psPerProc))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: Couldn't insert per-process data into hash table"));
++ eError = PVRSRV_ERROR_GENERIC;
++ goto failure;
++ }
++
++ psPerProc->ui32PID = ui32PID;
++ psPerProc->ui32RefCount = 0;
++
++
++ eError = OSPerProcessPrivateDataInit(&psPerProc->hOsPrivateData);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: OSPerProcessPrivateDataInit failed (%d)", eError));
++ goto failure;
++ }
++
++
++ eError = PVRSRVAllocHandle(KERNEL_HANDLE_BASE,
++ &psPerProc->hPerProcData,
++ psPerProc,
++ PVRSRV_HANDLE_TYPE_PERPROC_DATA,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: Couldn't allocate handle for per-process data (%d)", eError));
++ goto failure;
++ }
++
++
++ eError = PVRSRVAllocHandleBase(&psPerProc->psHandleBase);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: Couldn't allocate handle base for process (%d)", eError));
++ goto failure;
++ }
++
++
++ eError = OSPerProcessSetHandleOptions(psPerProc->psHandleBase);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: Couldn't set handle options (%d)", eError));
++ goto failure;
++ }
++
++
++ eError = PVRSRVResManConnect(psPerProc, &psPerProc->hResManContext);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: Couldn't register with the resource manager"));
++ goto failure;
++ }
++ }
++
++ psPerProc->ui32RefCount++;
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "PVRSRVPerProcessDataConnect: Process 0x%x has ref-count %d",
++ ui32PID, psPerProc->ui32RefCount));
++
++ return eError;
++
++failure:
++ (IMG_VOID)FreePerProcessData(psPerProc);
++ return eError;
++}
++
++
++IMG_VOID PVRSRVPerProcessDataDisconnect(IMG_UINT32 ui32PID)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_PER_PROCESS_DATA *psPerProc;
++
++ PVR_ASSERT(psHashTab != IMG_NULL);
++
++ psPerProc = (PVRSRV_PER_PROCESS_DATA *)HASH_Retrieve(psHashTab, (IMG_UINTPTR_T)ui32PID);
++ if (psPerProc == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataDealloc: Couldn't locate per-process data for PID %u", ui32PID));
++ }
++ else
++ {
++ psPerProc->ui32RefCount--;
++ if (psPerProc->ui32RefCount == 0)
++ {
++ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVPerProcessDataDisconnect: "
++ "Last close from process 0x%x received", ui32PID));
++
++
++ PVRSRVResManDisconnect(psPerProc->hResManContext, IMG_FALSE);
++
++
++ eError = FreePerProcessData(psPerProc);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataDisconnect: Error freeing per-process data"));
++ }
++ }
++ }
++
++ eError = PVRSRVPurgeHandles(KERNEL_HANDLE_BASE);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataDisconnect: Purge of global handle pool failed (%d)", eError));
++ }
++}
++
++
++PVRSRV_ERROR PVRSRVPerProcessDataInit(IMG_VOID)
++{
++ PVR_ASSERT(psHashTab == IMG_NULL);
++
++
++ psHashTab = HASH_Create(HASH_TAB_INIT_SIZE);
++ if (psHashTab == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataInit: Couldn't create per-process data hash table"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVPerProcessDataDeInit(IMG_VOID)
++{
++
++ if (psHashTab != IMG_NULL)
++ {
++
++ HASH_Delete(psHashTab);
++ psHashTab = IMG_NULL;
++ }
++
++ return PVRSRV_OK;
++}
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/common/power.c
+@@ -0,0 +1,820 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "pdump_km.h"
++
++#include "lists.h"
++
++DECLARE_LIST_ANY_VA(PVRSRV_POWER_DEV);
++DECLARE_LIST_ANY_VA_2(PVRSRV_POWER_DEV, PVRSRV_ERROR, PVRSRV_OK);
++DECLARE_LIST_INSERT(PVRSRV_POWER_DEV);
++DECLARE_LIST_REMOVE(PVRSRV_POWER_DEV);
++
++IMG_VOID* MatchPowerDeviceIndex_AnyVaCb(PVRSRV_POWER_DEV *psPowerDev, va_list va);
++
++
++static IMG_BOOL gbInitServerRunning = IMG_FALSE;
++static IMG_BOOL gbInitServerRan = IMG_FALSE;
++static IMG_BOOL gbInitSuccessful = IMG_FALSE;
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_STATE eInitServerState, IMG_BOOL bState)
++{
++
++ switch(eInitServerState)
++ {
++ case PVRSRV_INIT_SERVER_RUNNING:
++ gbInitServerRunning = bState;
++ break;
++ case PVRSRV_INIT_SERVER_RAN:
++ gbInitServerRan = bState;
++ break;
++ case PVRSRV_INIT_SERVER_SUCCESSFUL:
++ gbInitSuccessful = bState;
++ break;
++ default:
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVSetInitServerState : Unknown state %lx", eInitServerState));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ return PVRSRV_OK;
++}
++
++IMG_EXPORT
++IMG_BOOL PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_STATE eInitServerState)
++{
++ IMG_BOOL bReturnVal;
++
++ switch(eInitServerState)
++ {
++ case PVRSRV_INIT_SERVER_RUNNING:
++ bReturnVal = gbInitServerRunning;
++ break;
++ case PVRSRV_INIT_SERVER_RAN:
++ bReturnVal = gbInitServerRan;
++ break;
++ case PVRSRV_INIT_SERVER_SUCCESSFUL:
++ bReturnVal = gbInitSuccessful;
++ break;
++ default:
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVGetInitServerState : Unknown state %lx", eInitServerState));
++ bReturnVal = IMG_FALSE;
++ }
++
++ return bReturnVal;
++}
++
++static IMG_BOOL _IsSystemStatePowered(PVRSRV_SYS_POWER_STATE eSystemPowerState)
++{
++ return (IMG_BOOL)(eSystemPowerState < PVRSRV_SYS_POWER_STATE_D2);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVPowerLock(IMG_UINT32 ui32CallerID,
++ IMG_BOOL bSystemPowerEvent)
++{
++ PVRSRV_ERROR eError;
++ SYS_DATA *psSysData;
++#if !defined(SYS_NO_POWER_LOCK_TIMEOUT)
++ IMG_UINT32 ui32Timeout = 1000000;
++
++#if defined(SUPPORT_LMA)
++ ui32Timeout *= 60;
++#endif
++#endif
++ SysAcquireData(&psSysData);
++
++#if defined(SYS_CUSTOM_POWERLOCK_WRAP)
++ eError = SysPowerLockWrap(psSysData);
++ if (eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++#endif
++ do
++ {
++ eError = OSLockResource(&psSysData->sPowerStateChangeResource,
++ ui32CallerID);
++ if (eError == PVRSRV_OK)
++ {
++ break;
++ }
++ else if (ui32CallerID == ISR_ID)
++ {
++
++
++ eError = PVRSRV_ERROR_RETRY;
++ break;
++ }
++
++ OSWaitus(1);
++#if defined(SYS_NO_POWER_LOCK_TIMEOUT)
++ } while (1);
++#else
++ ui32Timeout--;
++ } while (ui32Timeout > 0);
++#endif
++
++#if defined(SYS_CUSTOM_POWERLOCK_WRAP)
++ if (eError != PVRSRV_OK)
++ {
++ SysPowerLockUnwrap(psSysData);
++ }
++#endif
++ if ((eError == PVRSRV_OK) &&
++ !bSystemPowerEvent &&
++ !_IsSystemStatePowered(psSysData->eCurrentPowerState))
++ {
++
++ PVRSRVPowerUnlock(ui32CallerID);
++ eError = PVRSRV_ERROR_RETRY;
++ }
++
++ return eError;
++}
++
++
++IMG_EXPORT
++IMG_VOID PVRSRVPowerUnlock(IMG_UINT32 ui32CallerID)
++{
++ OSUnlockResource(&gpsSysData->sPowerStateChangeResource, ui32CallerID);
++#if defined(SYS_CUSTOM_POWERLOCK_WRAP)
++ SysPowerLockUnwrap(gpsSysData);
++#endif
++}
++
++
++PVRSRV_ERROR PVRSRVDevicePrePowerStateKM_AnyVaCb(PVRSRV_POWER_DEV *psPowerDevice, va_list va)
++{
++ PVRSRV_DEV_POWER_STATE eNewDevicePowerState;
++ PVRSRV_ERROR eError;
++
++
++ IMG_BOOL bAllDevices;
++ IMG_UINT32 ui32DeviceIndex;
++ PVRSRV_DEV_POWER_STATE eNewPowerState;
++
++
++ bAllDevices = va_arg(va, IMG_BOOL);
++ ui32DeviceIndex = va_arg(va, IMG_UINT32);
++ eNewPowerState = va_arg(va, PVRSRV_DEV_POWER_STATE);
++
++ if (bAllDevices || (ui32DeviceIndex == psPowerDevice->ui32DeviceIndex))
++ {
++ eNewDevicePowerState = (eNewPowerState == PVRSRV_DEV_POWER_STATE_DEFAULT) ?
++ psPowerDevice->eDefaultPowerState : eNewPowerState;
++
++ if (psPowerDevice->eCurrentPowerState != eNewDevicePowerState)
++ {
++ if (psPowerDevice->pfnPrePower != IMG_NULL)
++ {
++
++ eError = psPowerDevice->pfnPrePower(psPowerDevice->hDevCookie,
++ eNewDevicePowerState,
++ psPowerDevice->eCurrentPowerState);
++ if (eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++ }
++
++
++ eError = SysDevicePrePowerState(psPowerDevice->ui32DeviceIndex,
++ eNewDevicePowerState,
++ psPowerDevice->eCurrentPowerState);
++ if (eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
++static
++PVRSRV_ERROR PVRSRVDevicePrePowerStateKM(IMG_BOOL bAllDevices,
++ IMG_UINT32 ui32DeviceIndex,
++ PVRSRV_DEV_POWER_STATE eNewPowerState)
++{
++ PVRSRV_ERROR eError;
++ SYS_DATA *psSysData;
++
++ SysAcquireData(&psSysData);
++
++
++ eError = List_PVRSRV_POWER_DEV_PVRSRV_ERROR_Any_va(psSysData->psPowerDeviceList,
++ PVRSRVDevicePrePowerStateKM_AnyVaCb,
++ bAllDevices,
++ ui32DeviceIndex,
++ eNewPowerState);
++
++ return eError;
++}
++
++PVRSRV_ERROR PVRSRVDevicePostPowerStateKM_AnyVaCb(PVRSRV_POWER_DEV *psPowerDevice, va_list va)
++{
++ PVRSRV_DEV_POWER_STATE eNewDevicePowerState;
++ PVRSRV_ERROR eError;
++
++
++ IMG_BOOL bAllDevices;
++ IMG_UINT32 ui32DeviceIndex;
++ PVRSRV_DEV_POWER_STATE eNewPowerState;
++
++
++ bAllDevices = va_arg(va, IMG_BOOL);
++ ui32DeviceIndex = va_arg(va, IMG_UINT32);
++ eNewPowerState = va_arg(va, PVRSRV_DEV_POWER_STATE);
++
++ if (bAllDevices || (ui32DeviceIndex == psPowerDevice->ui32DeviceIndex))
++ {
++ eNewDevicePowerState = (eNewPowerState == PVRSRV_DEV_POWER_STATE_DEFAULT) ?
++ psPowerDevice->eDefaultPowerState : eNewPowerState;
++
++ if (psPowerDevice->eCurrentPowerState != eNewDevicePowerState)
++ {
++
++ eError = SysDevicePostPowerState(psPowerDevice->ui32DeviceIndex,
++ eNewDevicePowerState,
++ psPowerDevice->eCurrentPowerState);
++ if (eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++
++ if (psPowerDevice->pfnPostPower != IMG_NULL)
++ {
++
++ eError = psPowerDevice->pfnPostPower(psPowerDevice->hDevCookie,
++ eNewDevicePowerState,
++ psPowerDevice->eCurrentPowerState);
++ if (eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++ }
++
++ psPowerDevice->eCurrentPowerState = eNewDevicePowerState;
++ }
++ }
++ return PVRSRV_OK;
++}
++
++static
++PVRSRV_ERROR PVRSRVDevicePostPowerStateKM(IMG_BOOL bAllDevices,
++ IMG_UINT32 ui32DeviceIndex,
++ PVRSRV_DEV_POWER_STATE eNewPowerState)
++{
++ PVRSRV_ERROR eError;
++ SYS_DATA *psSysData;
++
++ SysAcquireData(&psSysData);
++
++
++ eError = List_PVRSRV_POWER_DEV_PVRSRV_ERROR_Any_va(psSysData->psPowerDeviceList,
++ PVRSRVDevicePostPowerStateKM_AnyVaCb,
++ bAllDevices,
++ ui32DeviceIndex,
++ eNewPowerState);
++
++ return eError;
++}
++
++
++PVRSRV_ERROR PVRSRVSetDevicePowerStateCoreKM(IMG_UINT32 ui32DeviceIndex,
++ PVRSRV_DEV_POWER_STATE eNewPowerState)
++{
++ PVRSRV_ERROR eError;
++ eError = PVRSRVDevicePrePowerStateKM(IMG_FALSE, ui32DeviceIndex, eNewPowerState);
++ if(eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++
++ eError = PVRSRVDevicePostPowerStateKM(IMG_FALSE, ui32DeviceIndex, eNewPowerState);
++ return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVSetDevicePowerStateKM(IMG_UINT32 ui32DeviceIndex,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ IMG_UINT32 ui32CallerID,
++ IMG_BOOL bRetainMutex)
++{
++ PVRSRV_ERROR eError;
++ SYS_DATA *psSysData;
++
++ SysAcquireData(&psSysData);
++
++ eError = PVRSRVPowerLock(ui32CallerID, IMG_FALSE);
++ if(eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++
++ #if defined(PDUMP)
++ if (eNewPowerState == PVRSRV_DEV_POWER_STATE_DEFAULT)
++ {
++
++
++
++
++ eError = PVRSRVDevicePrePowerStateKM(IMG_FALSE, ui32DeviceIndex, PVRSRV_DEV_POWER_STATE_ON);
++ if(eError != PVRSRV_OK)
++ {
++ goto Exit;
++ }
++
++ eError = PVRSRVDevicePostPowerStateKM(IMG_FALSE, ui32DeviceIndex, PVRSRV_DEV_POWER_STATE_ON);
++
++ if (eError != PVRSRV_OK)
++ {
++ goto Exit;
++ }
++
++ PDUMPSUSPEND();
++ }
++ #endif
++
++ eError = PVRSRVDevicePrePowerStateKM(IMG_FALSE, ui32DeviceIndex, eNewPowerState);
++ if(eError != PVRSRV_OK)
++ {
++ if (eNewPowerState == PVRSRV_DEV_POWER_STATE_DEFAULT)
++ {
++ PDUMPRESUME();
++ }
++ goto Exit;
++ }
++
++ eError = PVRSRVDevicePostPowerStateKM(IMG_FALSE, ui32DeviceIndex, eNewPowerState);
++
++ if (eNewPowerState == PVRSRV_DEV_POWER_STATE_DEFAULT)
++ {
++ PDUMPRESUME();
++ }
++
++Exit:
++
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVSetDevicePowerStateKM : Transition to %d FAILED 0x%x", eNewPowerState, eError));
++ }
++
++ if (!bRetainMutex || (eError != PVRSRV_OK))
++ {
++ PVRSRVPowerUnlock(ui32CallerID);
++ }
++
++ return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVSystemPrePowerStateKM(PVRSRV_SYS_POWER_STATE eNewSysPowerState)
++{
++ PVRSRV_ERROR eError;
++ SYS_DATA *psSysData;
++ PVRSRV_DEV_POWER_STATE eNewDevicePowerState;
++
++ SysAcquireData(&psSysData);
++
++
++ eError = PVRSRVPowerLock(KERNEL_ID, IMG_TRUE);
++ if(eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++
++ if (_IsSystemStatePowered(eNewSysPowerState) !=
++ _IsSystemStatePowered(psSysData->eCurrentPowerState))
++ {
++ if (_IsSystemStatePowered(eNewSysPowerState))
++ {
++
++ eNewDevicePowerState = PVRSRV_DEV_POWER_STATE_DEFAULT;
++ }
++ else
++ {
++ eNewDevicePowerState = PVRSRV_DEV_POWER_STATE_OFF;
++ }
++
++
++ eError = PVRSRVDevicePrePowerStateKM(IMG_TRUE, 0, eNewDevicePowerState);
++ if (eError != PVRSRV_OK)
++ {
++ goto ErrorExit;
++ }
++ }
++
++ if (eNewSysPowerState != psSysData->eCurrentPowerState)
++ {
++
++ eError = SysSystemPrePowerState(eNewSysPowerState);
++ if (eError != PVRSRV_OK)
++ {
++ goto ErrorExit;
++ }
++ }
++
++ return eError;
++
++ErrorExit:
++
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVSystemPrePowerStateKM: Transition from %d to %d FAILED 0x%x",
++ psSysData->eCurrentPowerState, eNewSysPowerState, eError));
++
++
++ psSysData->eFailedPowerState = eNewSysPowerState;
++
++ PVRSRVPowerUnlock(KERNEL_ID);
++
++ return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVSystemPostPowerStateKM(PVRSRV_SYS_POWER_STATE eNewSysPowerState)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++ SYS_DATA *psSysData;
++ PVRSRV_DEV_POWER_STATE eNewDevicePowerState;
++
++ SysAcquireData(&psSysData);
++
++ if (eNewSysPowerState != psSysData->eCurrentPowerState)
++ {
++
++ eError = SysSystemPostPowerState(eNewSysPowerState);
++ if (eError != PVRSRV_OK)
++ {
++ goto Exit;
++ }
++ }
++
++ if (_IsSystemStatePowered(eNewSysPowerState) !=
++ _IsSystemStatePowered(psSysData->eCurrentPowerState))
++ {
++ if (_IsSystemStatePowered(eNewSysPowerState))
++ {
++
++ eNewDevicePowerState = PVRSRV_DEV_POWER_STATE_DEFAULT;
++ }
++ else
++ {
++ eNewDevicePowerState = PVRSRV_DEV_POWER_STATE_OFF;
++ }
++
++
++ eError = PVRSRVDevicePostPowerStateKM(IMG_TRUE, 0, eNewDevicePowerState);
++ if (eError != PVRSRV_OK)
++ {
++ goto Exit;
++ }
++ }
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "PVRSRVSystemPostPowerStateKM: System Power Transition from %d to %d OK",
++ psSysData->eCurrentPowerState, eNewSysPowerState));
++
++ psSysData->eCurrentPowerState = eNewSysPowerState;
++
++Exit:
++
++ PVRSRVPowerUnlock(KERNEL_ID);
++
++ if (_IsSystemStatePowered(eNewSysPowerState) &&
++ PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_SUCCESSFUL))
++ {
++
++
++
++ PVRSRVCommandCompleteCallbacks();
++ }
++
++ return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE eNewSysPowerState)
++{
++ PVRSRV_ERROR eError;
++ SYS_DATA *psSysData;
++
++ SysAcquireData(&psSysData);
++
++ eError = PVRSRVSystemPrePowerStateKM(eNewSysPowerState);
++ if(eError != PVRSRV_OK)
++ {
++ goto ErrorExit;
++ }
++
++ eError = PVRSRVSystemPostPowerStateKM(eNewSysPowerState);
++ if(eError != PVRSRV_OK)
++ {
++ goto ErrorExit;
++ }
++
++
++ psSysData->eFailedPowerState = PVRSRV_SYS_POWER_STATE_Unspecified;
++
++ return PVRSRV_OK;
++
++ErrorExit:
++
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVSetPowerStateKM: Transition from %d to %d FAILED 0x%x",
++ psSysData->eCurrentPowerState, eNewSysPowerState, eError));
++
++
++ psSysData->eFailedPowerState = eNewSysPowerState;
++
++ return eError;
++}
++
++
++PVRSRV_ERROR PVRSRVRegisterPowerDevice(IMG_UINT32 ui32DeviceIndex,
++ PFN_PRE_POWER pfnPrePower,
++ PFN_POST_POWER pfnPostPower,
++ PFN_PRE_CLOCKSPEED_CHANGE pfnPreClockSpeedChange,
++ PFN_POST_CLOCKSPEED_CHANGE pfnPostClockSpeedChange,
++ IMG_HANDLE hDevCookie,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState,
++ PVRSRV_DEV_POWER_STATE eDefaultPowerState)
++{
++ PVRSRV_ERROR eError;
++ SYS_DATA *psSysData;
++ PVRSRV_POWER_DEV *psPowerDevice;
++
++ if (pfnPrePower == IMG_NULL &&
++ pfnPostPower == IMG_NULL)
++ {
++ return PVRSRVRemovePowerDevice(ui32DeviceIndex);
++ }
++
++ SysAcquireData(&psSysData);
++
++ eError = OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_POWER_DEV),
++ (IMG_VOID **)&psPowerDevice, IMG_NULL,
++ "Power Device");
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterPowerDevice: Failed to alloc PVRSRV_POWER_DEV"));
++ return eError;
++ }
++
++
++ psPowerDevice->pfnPrePower = pfnPrePower;
++ psPowerDevice->pfnPostPower = pfnPostPower;
++ psPowerDevice->pfnPreClockSpeedChange = pfnPreClockSpeedChange;
++ psPowerDevice->pfnPostClockSpeedChange = pfnPostClockSpeedChange;
++ psPowerDevice->hDevCookie = hDevCookie;
++ psPowerDevice->ui32DeviceIndex = ui32DeviceIndex;
++ psPowerDevice->eCurrentPowerState = eCurrentPowerState;
++ psPowerDevice->eDefaultPowerState = eDefaultPowerState;
++
++
++ List_PVRSRV_POWER_DEV_Insert(&(psSysData->psPowerDeviceList), psPowerDevice);
++
++ return (PVRSRV_OK);
++}
++
++
++PVRSRV_ERROR PVRSRVRemovePowerDevice (IMG_UINT32 ui32DeviceIndex)
++{
++ SYS_DATA *psSysData;
++ PVRSRV_POWER_DEV *psPowerDev;
++
++ SysAcquireData(&psSysData);
++
++
++ psPowerDev = (PVRSRV_POWER_DEV*)
++ List_PVRSRV_POWER_DEV_Any_va(psSysData->psPowerDeviceList,
++ MatchPowerDeviceIndex_AnyVaCb,
++ ui32DeviceIndex);
++
++ if (psPowerDev)
++ {
++ List_PVRSRV_POWER_DEV_Remove(psPowerDev);
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_POWER_DEV), psPowerDev, IMG_NULL);
++
++ }
++
++ return (PVRSRV_OK);
++}
++
++
++IMG_EXPORT
++IMG_BOOL PVRSRVIsDevicePowered(IMG_UINT32 ui32DeviceIndex)
++{
++ SYS_DATA *psSysData;
++ PVRSRV_POWER_DEV *psPowerDevice;
++
++ SysAcquireData(&psSysData);
++
++ if (OSIsResourceLocked(&psSysData->sPowerStateChangeResource, KERNEL_ID) ||
++ OSIsResourceLocked(&psSysData->sPowerStateChangeResource, ISR_ID))
++ {
++ return IMG_FALSE;
++ }
++
++ psPowerDevice = (PVRSRV_POWER_DEV*)
++ List_PVRSRV_POWER_DEV_Any_va(psSysData->psPowerDeviceList,
++ MatchPowerDeviceIndex_AnyVaCb,
++ ui32DeviceIndex);
++ return (psPowerDevice && (psPowerDevice->eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON))
++ ? IMG_TRUE : IMG_FALSE;
++}
++
++
++PVRSRV_ERROR PVRSRVDevicePreClockSpeedChange(IMG_UINT32 ui32DeviceIndex,
++ IMG_BOOL bIdleDevice,
++ IMG_VOID *pvInfo)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++ SYS_DATA *psSysData;
++ PVRSRV_POWER_DEV *psPowerDevice;
++
++ PVR_UNREFERENCED_PARAMETER(pvInfo);
++
++ SysAcquireData(&psSysData);
++
++ if (bIdleDevice)
++ {
++
++ eError = PVRSRVPowerLock(KERNEL_ID, IMG_FALSE);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVDevicePreClockSpeedChange : failed to acquire lock, error:0x%lx", eError));
++ return eError;
++ }
++ }
++
++
++ psPowerDevice = (PVRSRV_POWER_DEV*)
++ List_PVRSRV_POWER_DEV_Any_va(psSysData->psPowerDeviceList,
++ MatchPowerDeviceIndex_AnyVaCb,
++ ui32DeviceIndex);
++
++ if (psPowerDevice && psPowerDevice->pfnPostClockSpeedChange)
++ {
++ eError = psPowerDevice->pfnPreClockSpeedChange(psPowerDevice->hDevCookie,
++ bIdleDevice,
++ psPowerDevice->eCurrentPowerState);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVDevicePreClockSpeedChange : Device %lu failed, error:0x%lx",
++ ui32DeviceIndex, eError));
++ }
++ }
++
++ if (bIdleDevice && eError != PVRSRV_OK)
++ {
++ PVRSRVPowerUnlock(KERNEL_ID);
++ }
++
++ return eError;
++}
++
++
++IMG_VOID PVRSRVDevicePostClockSpeedChange(IMG_UINT32 ui32DeviceIndex,
++ IMG_BOOL bIdleDevice,
++ IMG_VOID *pvInfo)
++{
++ PVRSRV_ERROR eError;
++ SYS_DATA *psSysData;
++ PVRSRV_POWER_DEV *psPowerDevice;
++
++ PVR_UNREFERENCED_PARAMETER(pvInfo);
++
++ SysAcquireData(&psSysData);
++
++
++ psPowerDevice = (PVRSRV_POWER_DEV*)
++ List_PVRSRV_POWER_DEV_Any_va(psSysData->psPowerDeviceList,
++ MatchPowerDeviceIndex_AnyVaCb,
++ ui32DeviceIndex);
++
++ if (psPowerDevice && psPowerDevice->pfnPostClockSpeedChange)
++ {
++ eError = psPowerDevice->pfnPostClockSpeedChange(psPowerDevice->hDevCookie,
++ bIdleDevice,
++ psPowerDevice->eCurrentPowerState);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVDevicePostClockSpeedChange : Device %lu failed, error:0x%lx",
++ ui32DeviceIndex, eError));
++ }
++ }
++
++
++ if (bIdleDevice)
++ {
++
++ PVRSRVPowerUnlock(KERNEL_ID);
++ }
++}
++
++#if 0
++/*
++ * PVRSRVPowerOnSystemWithDevice
++ *
++ * Description: Power on the System if it is off, but instead of powering all
++ * of the devices to their "default" state, only turn on the specified
++ * device index.
++ */
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVPowerOnSystemWithDevice(IMG_UINT32 ui32DeviceIndex,
++ IMG_UINT32 ui32CallerID,
++ IMG_BOOL bRetainMutex)
++{
++ PVRSRV_ERROR eError;
++ SYS_DATA *psSysData;
++
++ SysAcquireData(&psSysData);
++
++ eError = PVRSRVPowerLock(ui32CallerID, IMG_TRUE);
++ if(eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++
++ eError = PVRSRVDevicePrePowerStateKM(IMG_FALSE, ui32DeviceIndex, PVRSRV_DEV_POWER_STATE_ON);
++ if (eError != PVRSRV_OK)
++ {
++ goto ErrorExit;
++ }
++
++ if (!_IsSystemStatePowered(psSysData->eCurrentPowerState))
++ {
++ eError = SysSystemPrePowerState(PVRSRV_SYS_POWER_STATE_D0);
++ if (eError != PVRSRV_OK)
++ {
++ goto ErrorExit;
++ }
++
++ eError = SysSystemPostPowerState(PVRSRV_SYS_POWER_STATE_D0);
++ if (eError != PVRSRV_OK)
++ {
++ goto ErrorExit;
++ }
++ psSysData->eCurrentPowerState = PVRSRV_SYS_POWER_STATE_D0;
++ }
++
++ eError = PVRSRVDevicePostPowerStateKM(IMG_FALSE, ui32DeviceIndex, PVRSRV_DEV_POWER_STATE_ON);
++ if (eError != PVRSRV_OK)
++ {
++ goto ErrorExit;
++ }
++
++ErrorExit:
++
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVPowerOnSystemWithDevice : FAILED 0x%x", eError));
++ }
++
++ if (!bRetainMutex || (eError != PVRSRV_OK))
++ {
++ PVRSRVPowerUnlock(ui32CallerID);
++ }
++
++ return eError;
++}
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/common/pvrsrv.c
+@@ -0,0 +1,1195 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "buffer_manager.h"
++#include "handle.h"
++#include "perproc.h"
++#include "pdump_km.h"
++#include "ra.h"
++
++#include "pvrversion.h"
++
++#include "lists.h"
++
++DECLARE_LIST_ANY_VA_2(BM_CONTEXT, PVRSRV_ERROR, PVRSRV_OK);
++
++DECLARE_LIST_FOR_EACH_VA(BM_HEAP);
++
++DECLARE_LIST_ANY_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK);
++DECLARE_LIST_ANY_VA(PVRSRV_DEVICE_NODE);
++DECLARE_LIST_ANY_VA_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK);
++DECLARE_LIST_FOR_EACH_VA(PVRSRV_DEVICE_NODE);
++DECLARE_LIST_FOR_EACH(PVRSRV_DEVICE_NODE);
++DECLARE_LIST_INSERT(PVRSRV_DEVICE_NODE);
++DECLARE_LIST_REMOVE(PVRSRV_DEVICE_NODE);
++
++IMG_VOID* MatchDeviceKM_AnyVaCb(PVRSRV_DEVICE_NODE* psDeviceNode, va_list va);
++
++
++PVRSRV_ERROR AllocateDeviceID(SYS_DATA *psSysData, IMG_UINT32 *pui32DevID)
++{
++ SYS_DEVICE_ID* psDeviceWalker;
++ SYS_DEVICE_ID* psDeviceEnd;
++
++ psDeviceWalker = &psSysData->sDeviceID[0];
++ psDeviceEnd = psDeviceWalker + psSysData->ui32NumDevices;
++
++
++ while (psDeviceWalker < psDeviceEnd)
++ {
++ if (!psDeviceWalker->bInUse)
++ {
++ psDeviceWalker->bInUse = IMG_TRUE;
++ *pui32DevID = psDeviceWalker->uiID;
++ return PVRSRV_OK;
++ }
++ psDeviceWalker++;
++ }
++
++ PVR_DPF((PVR_DBG_ERROR,"AllocateDeviceID: No free and valid device IDs available!"));
++
++
++ PVR_ASSERT(psDeviceWalker < psDeviceEnd);
++
++ return PVRSRV_ERROR_GENERIC;
++}
++
++
++PVRSRV_ERROR FreeDeviceID(SYS_DATA *psSysData, IMG_UINT32 ui32DevID)
++{
++ SYS_DEVICE_ID* psDeviceWalker;
++ SYS_DEVICE_ID* psDeviceEnd;
++
++ psDeviceWalker = &psSysData->sDeviceID[0];
++ psDeviceEnd = psDeviceWalker + psSysData->ui32NumDevices;
++
++
++ while (psDeviceWalker < psDeviceEnd)
++ {
++
++ if (
++ (psDeviceWalker->uiID == ui32DevID) &&
++ (psDeviceWalker->bInUse)
++ )
++ {
++ psDeviceWalker->bInUse = IMG_FALSE;
++ return PVRSRV_OK;
++ }
++ psDeviceWalker++;
++ }
++
++ PVR_DPF((PVR_DBG_ERROR,"FreeDeviceID: no matching dev ID that is in use!"));
++
++
++ PVR_ASSERT(psDeviceWalker < psDeviceEnd);
++
++ return PVRSRV_ERROR_GENERIC;
++}
++
++
++#ifndef ReadHWReg
++IMG_EXPORT
++IMG_UINT32 ReadHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset)
++{
++ return *(volatile IMG_UINT32*)((IMG_UINTPTR_T)pvLinRegBaseAddr+ui32Offset);
++}
++#endif
++
++
++#ifndef WriteHWReg
++IMG_EXPORT
++IMG_VOID WriteHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value)
++{
++ PVR_DPF((PVR_DBG_MESSAGE,"WriteHWReg Base:%x, Offset: %x, Value %x",pvLinRegBaseAddr,ui32Offset,ui32Value));
++
++ *(IMG_UINT32*)((IMG_UINTPTR_T)pvLinRegBaseAddr+ui32Offset) = ui32Value;
++}
++#endif
++
++
++#ifndef WriteHWRegs
++IMG_EXPORT
++IMG_VOID WriteHWRegs(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Count, PVRSRV_HWREG *psHWRegs)
++{
++ while (ui32Count)
++ {
++ WriteHWReg (pvLinRegBaseAddr, psHWRegs->ui32RegAddr, psHWRegs->ui32RegVal);
++ psHWRegs++;
++ ui32Count--;
++ }
++}
++#endif
++
++IMG_VOID PVRSRVEnumerateDevicesKM_ForEachVaCb(PVRSRV_DEVICE_NODE *psDeviceNode, va_list va)
++{
++ IMG_UINT *pui32DevCount;
++ PVRSRV_DEVICE_IDENTIFIER **ppsDevIdList;
++
++ pui32DevCount = va_arg(va, IMG_UINT*);
++ ppsDevIdList = va_arg(va, PVRSRV_DEVICE_IDENTIFIER**);
++
++ if (psDeviceNode->sDevId.eDeviceType != PVRSRV_DEVICE_TYPE_EXT)
++ {
++ *(*ppsDevIdList) = psDeviceNode->sDevId;
++ (*ppsDevIdList)++;
++ (*pui32DevCount)++;
++ }
++}
++
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVEnumerateDevicesKM(IMG_UINT32 *pui32NumDevices,
++ PVRSRV_DEVICE_IDENTIFIER *psDevIdList)
++{
++ SYS_DATA *psSysData;
++ IMG_UINT32 i;
++
++ if (!pui32NumDevices || !psDevIdList)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVEnumerateDevicesKM: Invalid params"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ SysAcquireData(&psSysData);
++
++
++
++ for (i=0; i<PVRSRV_MAX_DEVICES; i++)
++ {
++ psDevIdList[i].eDeviceType = PVRSRV_DEVICE_TYPE_UNKNOWN;
++ }
++
++
++ *pui32NumDevices = 0;
++
++
++
++
++
++ List_PVRSRV_DEVICE_NODE_ForEach_va(psSysData->psDeviceNodeList,
++ PVRSRVEnumerateDevicesKM_ForEachVaCb,
++ pui32NumDevices,
++ &psDevIdList);
++
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR IMG_CALLCONV PVRSRVInit(PSYS_DATA psSysData)
++{
++ PVRSRV_ERROR eError;
++
++
++ eError = ResManInit();
++ if (eError != PVRSRV_OK)
++ {
++ goto Error;
++ }
++
++ eError = PVRSRVPerProcessDataInit();
++ if(eError != PVRSRV_OK)
++ {
++ goto Error;
++ }
++
++
++ eError = PVRSRVHandleInit();
++ if(eError != PVRSRV_OK)
++ {
++ goto Error;
++ }
++
++
++ eError = OSCreateResource(&psSysData->sPowerStateChangeResource);
++ if (eError != PVRSRV_OK)
++ {
++ goto Error;
++ }
++
++
++ psSysData->eCurrentPowerState = PVRSRV_SYS_POWER_STATE_D0;
++ psSysData->eFailedPowerState = PVRSRV_SYS_POWER_STATE_Unspecified;
++
++
++ if(OSAllocMem( PVRSRV_PAGEABLE_SELECT,
++ sizeof(PVRSRV_EVENTOBJECT) ,
++ (IMG_VOID **)&psSysData->psGlobalEventObject, 0,
++ "Event Object") != PVRSRV_OK)
++ {
++
++ goto Error;
++ }
++
++ if(OSEventObjectCreate("PVRSRV_GLOBAL_EVENTOBJECT", psSysData->psGlobalEventObject) != PVRSRV_OK)
++ {
++ goto Error;
++ }
++
++ return eError;
++
++Error:
++ PVRSRVDeInit(psSysData);
++ return eError;
++}
++
++
++
++IMG_VOID IMG_CALLCONV PVRSRVDeInit(PSYS_DATA psSysData)
++{
++ PVRSRV_ERROR eError;
++
++ PVR_UNREFERENCED_PARAMETER(psSysData);
++
++ if (psSysData == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeInit: PVRSRVHandleDeInit failed - invalid param"));
++ return;
++ }
++
++
++ if(psSysData->psGlobalEventObject)
++ {
++ OSEventObjectDestroy(psSysData->psGlobalEventObject);
++ OSFreeMem( PVRSRV_PAGEABLE_SELECT,
++ sizeof(PVRSRV_EVENTOBJECT),
++ psSysData->psGlobalEventObject,
++ 0);
++ psSysData->psGlobalEventObject = IMG_NULL;
++ }
++
++ eError = PVRSRVHandleDeInit();
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeInit: PVRSRVHandleDeInit failed"));
++ }
++
++ eError = PVRSRVPerProcessDataDeInit();
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeInit: PVRSRVPerProcessDataDeInit failed"));
++ }
++
++ ResManDeInit();
++}
++
++
++PVRSRV_ERROR IMG_CALLCONV PVRSRVRegisterDevice(PSYS_DATA psSysData,
++ PVRSRV_ERROR (*pfnRegisterDevice)(PVRSRV_DEVICE_NODE*),
++ IMG_UINT32 ui32SOCInterruptBit,
++ IMG_UINT32 *pui32DeviceIndex)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++
++
++ if(OSAllocMem( PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_DEVICE_NODE),
++ (IMG_VOID **)&psDeviceNode, IMG_NULL,
++ "Device Node") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterDevice : Failed to alloc memory for psDeviceNode"));
++ return (PVRSRV_ERROR_OUT_OF_MEMORY);
++ }
++ OSMemSet (psDeviceNode, 0, sizeof(PVRSRV_DEVICE_NODE));
++
++ eError = pfnRegisterDevice(psDeviceNode);
++ if (eError != PVRSRV_OK)
++ {
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_DEVICE_NODE), psDeviceNode, IMG_NULL);
++
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterDevice : Failed to register device"));
++ return (PVRSRV_ERROR_DEVICE_REGISTER_FAILED);
++ }
++
++
++
++
++
++
++ psDeviceNode->ui32RefCount = 1;
++ psDeviceNode->psSysData = psSysData;
++ psDeviceNode->ui32SOCInterruptBit = ui32SOCInterruptBit;
++
++
++ AllocateDeviceID(psSysData, &psDeviceNode->sDevId.ui32DeviceIndex);
++
++
++ List_PVRSRV_DEVICE_NODE_Insert(&psSysData->psDeviceNodeList, psDeviceNode);
++
++
++ *pui32DeviceIndex = psDeviceNode->sDevId.ui32DeviceIndex;
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR IMG_CALLCONV PVRSRVInitialiseDevice (IMG_UINT32 ui32DevIndex)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ SYS_DATA *psSysData;
++ PVRSRV_ERROR eError;
++
++ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVInitialiseDevice"));
++
++ SysAcquireData(&psSysData);
++
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE*)
++ List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList,
++ MatchDeviceKM_AnyVaCb,
++ ui32DevIndex,
++ IMG_TRUE);
++ if(!psDeviceNode)
++ {
++
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVInitialiseDevice: requested device is not present"));
++ return PVRSRV_ERROR_INIT_FAILURE;
++ }
++ PVR_ASSERT (psDeviceNode->ui32RefCount > 0);
++
++
++
++ eError = PVRSRVResManConnect(IMG_NULL, &psDeviceNode->hResManContext);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVInitialiseDevice: Failed PVRSRVResManConnect call"));
++ return eError;
++ }
++
++
++ if(psDeviceNode->pfnInitDevice != IMG_NULL)
++ {
++ eError = psDeviceNode->pfnInitDevice(psDeviceNode);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVInitialiseDevice: Failed InitDevice call"));
++ return eError;
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR PVRSRVFinaliseSystem_SetPowerState_AnyCb(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ PVRSRV_ERROR eError;
++ eError = PVRSRVSetDevicePowerStateKM(psDeviceNode->sDevId.ui32DeviceIndex,
++ PVRSRV_DEV_POWER_STATE_DEFAULT,
++ KERNEL_ID, IMG_FALSE);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVFinaliseSystem: Failed PVRSRVSetDevicePowerStateKM call (device index: %d)", psDeviceNode->sDevId.ui32DeviceIndex));
++ }
++ return eError;
++}
++
++PVRSRV_ERROR PVRSRVFinaliseSystem_CompatCheck_AnyCb(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ PVRSRV_ERROR eError;
++ eError = PVRSRVDevInitCompatCheck(psDeviceNode);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVFinaliseSystem: Failed PVRSRVDevInitCompatCheck call (device index: %d)", psDeviceNode->sDevId.ui32DeviceIndex));
++ }
++ return eError;
++}
++
++
++PVRSRV_ERROR IMG_CALLCONV PVRSRVFinaliseSystem(IMG_BOOL bInitSuccessful)
++{
++ SYS_DATA *psSysData;
++ PVRSRV_ERROR eError;
++
++ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVFinaliseSystem"));
++
++ SysAcquireData(&psSysData);
++
++ if (bInitSuccessful)
++ {
++ eError = SysFinalise();
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVFinaliseSystem: SysFinalise failed (%d)", eError));
++ return eError;
++ }
++
++
++ eError = List_PVRSRV_DEVICE_NODE_PVRSRV_ERROR_Any(psSysData->psDeviceNodeList,
++ PVRSRVFinaliseSystem_SetPowerState_AnyCb);
++ if (eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++
++
++ eError = List_PVRSRV_DEVICE_NODE_PVRSRV_ERROR_Any(psSysData->psDeviceNodeList,
++ PVRSRVFinaliseSystem_CompatCheck_AnyCb);
++ if (eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++ }
++
++
++
++
++
++
++
++
++#if !defined(SUPPORT_PDUMP_DELAYED_INITPHASE_TERMINATION)
++ PDUMPENDINITPHASE();
++#endif
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR PVRSRVDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++
++ if (psDeviceNode->pfnInitDeviceCompatCheck)
++ return psDeviceNode->pfnInitDeviceCompatCheck(psDeviceNode);
++ else
++ return PVRSRV_OK;
++}
++
++IMG_VOID * PVRSRVAcquireDeviceDataKM_Match_AnyVaCb(PVRSRV_DEVICE_NODE *psDeviceNode, va_list va)
++{
++ PVRSRV_DEVICE_TYPE eDeviceType;
++ IMG_UINT32 ui32DevIndex;
++
++ eDeviceType = va_arg(va, PVRSRV_DEVICE_TYPE);
++ ui32DevIndex = va_arg(va, IMG_UINT32);
++
++ if ((eDeviceType != PVRSRV_DEVICE_TYPE_UNKNOWN &&
++ psDeviceNode->sDevId.eDeviceType == eDeviceType) ||
++ (eDeviceType == PVRSRV_DEVICE_TYPE_UNKNOWN &&
++ psDeviceNode->sDevId.ui32DeviceIndex == ui32DevIndex))
++ {
++ return psDeviceNode;
++ }
++ else
++ {
++ return IMG_NULL;
++ }
++}
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVAcquireDeviceDataKM (IMG_UINT32 ui32DevIndex,
++ PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_HANDLE *phDevCookie)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ SYS_DATA *psSysData;
++
++ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVAcquireDeviceDataKM"));
++
++ SysAcquireData(&psSysData);
++
++
++ psDeviceNode = List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList,
++ PVRSRVAcquireDeviceDataKM_Match_AnyVaCb,
++ eDeviceType,
++ ui32DevIndex);
++
++
++ if (!psDeviceNode)
++ {
++
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVAcquireDeviceDataKM: requested device is not present"));
++ return PVRSRV_ERROR_INIT_FAILURE;
++ }
++
++ PVR_ASSERT (psDeviceNode->ui32RefCount > 0);
++
++
++ if (phDevCookie)
++ {
++ *phDevCookie = (IMG_HANDLE)psDeviceNode;
++ }
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDeinitialiseDevice(IMG_UINT32 ui32DevIndex)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ SYS_DATA *psSysData;
++ PVRSRV_ERROR eError;
++
++ SysAcquireData(&psSysData);
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE*)
++ List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList,
++ MatchDeviceKM_AnyVaCb,
++ ui32DevIndex,
++ IMG_TRUE);
++
++ if (!psDeviceNode)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeinitialiseDevice: requested device %d is not present", ui32DevIndex));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++
++
++ eError = PVRSRVSetDevicePowerStateKM(ui32DevIndex,
++ PVRSRV_DEV_POWER_STATE_OFF,
++ KERNEL_ID,
++ IMG_FALSE);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeinitialiseDevice: Failed PVRSRVSetDevicePowerStateKM call"));
++ return eError;
++ }
++
++
++
++ eError = ResManFreeResByCriteria(psDeviceNode->hResManContext,
++ RESMAN_CRITERIA_RESTYPE,
++ RESMAN_TYPE_DEVICEMEM_ALLOCATION,
++ IMG_NULL, 0);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeinitialiseDevice: Failed ResManFreeResByCriteria call"));
++ return eError;
++ }
++
++
++
++ if(psDeviceNode->pfnDeInitDevice != IMG_NULL)
++ {
++ eError = psDeviceNode->pfnDeInitDevice(psDeviceNode);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeinitialiseDevice: Failed DeInitDevice call"));
++ return eError;
++ }
++ }
++
++
++
++ PVRSRVResManDisconnect(psDeviceNode->hResManContext, IMG_TRUE);
++ psDeviceNode->hResManContext = IMG_NULL;
++
++
++ List_PVRSRV_DEVICE_NODE_Remove(psDeviceNode);
++
++
++ (IMG_VOID)FreeDeviceID(psSysData, ui32DevIndex);
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_DEVICE_NODE), psDeviceNode, IMG_NULL);
++
++
++ return (PVRSRV_OK);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PollForValueKM (volatile IMG_UINT32* pui32LinMemAddr,
++ IMG_UINT32 ui32Value,
++ IMG_UINT32 ui32Mask,
++ IMG_UINT32 ui32Waitus,
++ IMG_UINT32 ui32Tries)
++{
++ {
++ IMG_UINT32 uiMaxTime = ui32Tries * ui32Waitus;
++
++ LOOP_UNTIL_TIMEOUT(uiMaxTime)
++ {
++ if((*pui32LinMemAddr & ui32Mask) == ui32Value)
++ {
++ return PVRSRV_OK;
++ }
++ OSWaitus(ui32Waitus);
++ } END_LOOP_UNTIL_TIMEOUT();
++ }
++
++
++ return PVRSRV_ERROR_GENERIC;
++}
++
++
++#if defined (USING_ISR_INTERRUPTS)
++
++extern IMG_UINT32 gui32EventStatusServicesByISR;
++
++PVRSRV_ERROR PollForInterruptKM (IMG_UINT32 ui32Value,
++ IMG_UINT32 ui32Mask,
++ IMG_UINT32 ui32Waitus,
++ IMG_UINT32 ui32Tries)
++{
++ IMG_UINT32 uiMaxTime;
++
++ uiMaxTime = ui32Tries * ui32Waitus;
++
++
++ LOOP_UNTIL_TIMEOUT(uiMaxTime)
++ {
++ if ((gui32EventStatusServicesByISR & ui32Mask) == ui32Value)
++ {
++ gui32EventStatusServicesByISR = 0;
++ return PVRSRV_OK;
++ }
++ OSWaitus(ui32Waitus);
++ } END_LOOP_UNTIL_TIMEOUT();
++
++ return PVRSRV_ERROR_GENERIC;
++}
++#endif
++
++IMG_VOID PVRSRVGetMiscInfoKM_RA_GetStats_ForEachVaCb(BM_HEAP *psBMHeap, va_list va)
++{
++ IMG_CHAR **ppszStr;
++ IMG_UINT32 *pui32StrLen;
++
++ ppszStr = va_arg(va, IMG_CHAR**);
++ pui32StrLen = va_arg(va, IMG_UINT32*);
++
++ if(psBMHeap->pImportArena)
++ {
++ RA_GetStats(psBMHeap->pImportArena,
++ ppszStr,
++ pui32StrLen);
++ }
++
++ if(psBMHeap->pVMArena)
++ {
++ RA_GetStats(psBMHeap->pVMArena,
++ ppszStr,
++ pui32StrLen);
++ }
++}
++
++PVRSRV_ERROR PVRSRVGetMiscInfoKM_BMContext_AnyVaCb(BM_CONTEXT *psBMContext, va_list va)
++{
++
++ IMG_UINT32 *pui32StrLen;
++ IMG_INT32 *pi32Count;
++ IMG_CHAR **ppszStr;
++
++ pui32StrLen = va_arg(va, IMG_UINT32*);
++ pi32Count = va_arg(va, IMG_INT32*);
++ ppszStr = va_arg(va, IMG_CHAR**);
++
++ CHECK_SPACE(*pui32StrLen);
++ *pi32Count = OSSNPrintf(*ppszStr, 100, "\nApplication Context (hDevMemContext) 0x%08X:\n",
++ (IMG_HANDLE)psBMContext);
++ UPDATE_SPACE(*ppszStr, *pi32Count, *pui32StrLen);
++
++ List_BM_HEAP_ForEach_va(psBMContext->psBMHeap,
++ PVRSRVGetMiscInfoKM_RA_GetStats_ForEachVaCb,
++ ppszStr,
++ pui32StrLen);
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR PVRSRVGetMiscInfoKM_Device_AnyVaCb(PVRSRV_DEVICE_NODE *psDeviceNode, va_list va)
++{
++ IMG_UINT32 *pui32StrLen;
++ IMG_INT32 *pi32Count;
++ IMG_CHAR **ppszStr;
++
++ pui32StrLen = va_arg(va, IMG_UINT32*);
++ pi32Count = va_arg(va, IMG_INT32*);
++ ppszStr = va_arg(va, IMG_CHAR**);
++
++ CHECK_SPACE(*pui32StrLen);
++ *pi32Count = OSSNPrintf(*ppszStr, 100, "\n\nDevice Type %d:\n", psDeviceNode->sDevId.eDeviceType);
++ UPDATE_SPACE(*ppszStr, *pi32Count, *pui32StrLen);
++
++
++ if(psDeviceNode->sDevMemoryInfo.pBMKernelContext)
++ {
++ CHECK_SPACE(*pui32StrLen);
++ *pi32Count = OSSNPrintf(*ppszStr, 100, "\nKernel Context:\n");
++ UPDATE_SPACE(*ppszStr, *pi32Count, *pui32StrLen);
++
++
++ List_BM_HEAP_ForEach_va(psDeviceNode->sDevMemoryInfo.pBMKernelContext->psBMHeap,
++ PVRSRVGetMiscInfoKM_RA_GetStats_ForEachVaCb,
++ ppszStr,
++ pui32StrLen);
++ }
++
++
++ return List_BM_CONTEXT_PVRSRV_ERROR_Any_va(psDeviceNode->sDevMemoryInfo.pBMContext,
++ PVRSRVGetMiscInfoKM_BMContext_AnyVaCb,
++ pui32StrLen,
++ pi32Count,
++ ppszStr);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetMiscInfoKM(PVRSRV_MISC_INFO *psMiscInfo)
++{
++ SYS_DATA *psSysData;
++
++ if(!psMiscInfo)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetMiscInfoKM: invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psMiscInfo->ui32StatePresent = 0;
++
++
++ if(psMiscInfo->ui32StateRequest & ~(PVRSRV_MISC_INFO_TIMER_PRESENT
++ |PVRSRV_MISC_INFO_CLOCKGATE_PRESENT
++ |PVRSRV_MISC_INFO_MEMSTATS_PRESENT
++ |PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT
++ |PVRSRV_MISC_INFO_DDKVERSION_PRESENT
++ |PVRSRV_MISC_INFO_CPUCACHEFLUSH_PRESENT
++ |PVRSRV_MISC_INFO_RESET_PRESENT))
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetMiscInfoKM: invalid state request flags"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ SysAcquireData(&psSysData);
++
++
++ if(((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_TIMER_PRESENT) != 0UL) &&
++ (psSysData->pvSOCTimerRegisterKM != IMG_NULL))
++ {
++ psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_TIMER_PRESENT;
++ psMiscInfo->pvSOCTimerRegisterKM = psSysData->pvSOCTimerRegisterKM;
++ psMiscInfo->hSOCTimerRegisterOSMemHandle = psSysData->hSOCTimerRegisterOSMemHandle;
++ }
++ else
++ {
++ psMiscInfo->pvSOCTimerRegisterKM = IMG_NULL;
++ psMiscInfo->hSOCTimerRegisterOSMemHandle = IMG_NULL;
++ }
++
++
++ if(((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_CLOCKGATE_PRESENT) != 0UL) &&
++ (psSysData->pvSOCClockGateRegsBase != IMG_NULL))
++ {
++ psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_CLOCKGATE_PRESENT;
++ psMiscInfo->pvSOCClockGateRegs = psSysData->pvSOCClockGateRegsBase;
++ psMiscInfo->ui32SOCClockGateRegsSize = psSysData->ui32SOCClockGateRegsSize;
++ }
++
++
++ if(((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_MEMSTATS_PRESENT) != 0UL) &&
++ (psMiscInfo->pszMemoryStr != IMG_NULL))
++ {
++ RA_ARENA **ppArena;
++ IMG_CHAR *pszStr;
++ IMG_UINT32 ui32StrLen;
++ IMG_INT32 i32Count;
++
++ pszStr = psMiscInfo->pszMemoryStr;
++ ui32StrLen = psMiscInfo->ui32MemoryStrLen;
++
++ psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_MEMSTATS_PRESENT;
++
++
++ ppArena = &psSysData->apsLocalDevMemArena[0];
++ while(*ppArena)
++ {
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100, "\nLocal Backing Store:\n");
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ RA_GetStats(*ppArena,
++ &pszStr,
++ &ui32StrLen);
++
++ ppArena++;
++ }
++
++
++
++ List_PVRSRV_DEVICE_NODE_PVRSRV_ERROR_Any_va(psSysData->psDeviceNodeList,
++ PVRSRVGetMiscInfoKM_Device_AnyVaCb,
++ &ui32StrLen,
++ &i32Count,
++ &pszStr);
++
++
++ i32Count = OSSNPrintf(pszStr, 100, "\n\0");
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++ }
++
++ if(((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT) != 0UL) &&
++ (psSysData->psGlobalEventObject != IMG_NULL))
++ {
++ psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT;
++ psMiscInfo->sGlobalEventObject = *psSysData->psGlobalEventObject;
++ }
++
++
++
++ if (((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_DDKVERSION_PRESENT) != 0UL)
++ && ((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_MEMSTATS_PRESENT) == 0UL)
++ && (psMiscInfo->pszMemoryStr != IMG_NULL))
++ {
++ IMG_CHAR *pszStr;
++ IMG_UINT32 ui32StrLen;
++ IMG_UINT32 ui32LenStrPerNum = 12;
++ IMG_INT32 i32Count;
++ IMG_INT i;
++ psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_DDKVERSION_PRESENT;
++
++
++ psMiscInfo->aui32DDKVersion[0] = PVRVERSION_MAJ;
++ psMiscInfo->aui32DDKVersion[1] = PVRVERSION_MIN;
++ psMiscInfo->aui32DDKVersion[2] = PVRVERSION_BRANCH;
++ psMiscInfo->aui32DDKVersion[3] = PVRVERSION_BUILD;
++
++ pszStr = psMiscInfo->pszMemoryStr;
++ ui32StrLen = psMiscInfo->ui32MemoryStrLen;
++
++ for (i=0; i<4; i++)
++ {
++ if (ui32StrLen < ui32LenStrPerNum)
++ {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ i32Count = OSSNPrintf(pszStr, ui32LenStrPerNum, "%ld", psMiscInfo->aui32DDKVersion[i]);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++ if (i != 3)
++ {
++ i32Count = OSSNPrintf(pszStr, 2, ".");
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++ }
++ }
++ }
++
++#if defined(SUPPORT_CPU_CACHED_BUFFERS)
++ if((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_CPUCACHEFLUSH_PRESENT) != 0UL)
++ {
++ if(psMiscInfo->bDeferCPUCacheFlush)
++ {
++
++ if(!psMiscInfo->bCPUCacheFlushAll)
++ {
++
++
++
++ PVR_DPF((PVR_DBG_MESSAGE,"PVRSRVGetMiscInfoKM: don't support deferred range flushes"));
++ PVR_DPF((PVR_DBG_MESSAGE," using deferred flush all instead"));
++ }
++
++ psSysData->bFlushAll = IMG_TRUE;
++ }
++ else
++ {
++
++ if(psMiscInfo->bCPUCacheFlushAll)
++ {
++
++ OSFlushCPUCacheKM();
++
++ psSysData->bFlushAll = IMG_FALSE;
++ }
++ else
++ {
++
++ OSFlushCPUCacheRangeKM(psMiscInfo->pvRangeAddrStart, psMiscInfo->pvRangeAddrEnd);
++ }
++ }
++ }
++#endif
++
++#if defined(PVRSRV_RESET_ON_HWTIMEOUT)
++ if((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_RESET_PRESENT) != 0UL)
++ {
++ PVR_LOG(("User requested OS reset"));
++ OSPanic();
++ }
++#endif
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetFBStatsKM(IMG_UINT32 *pui32Total,
++ IMG_UINT32 *pui32Available)
++{
++ IMG_UINT32 ui32Total = 0, i = 0;
++ IMG_UINT32 ui32Available = 0;
++
++ *pui32Total = 0;
++ *pui32Available = 0;
++
++
++ while(BM_ContiguousStatistics(i, &ui32Total, &ui32Available) == IMG_TRUE)
++ {
++ *pui32Total += ui32Total;
++ *pui32Available += ui32Available;
++
++ i++;
++ }
++
++ return PVRSRV_OK;
++}
++
++
++IMG_BOOL IMG_CALLCONV PVRSRVDeviceLISR(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ SYS_DATA *psSysData;
++ IMG_BOOL bStatus = IMG_FALSE;
++ IMG_UINT32 ui32InterruptSource;
++
++ if(!psDeviceNode)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVDeviceLISR: Invalid params\n"));
++ goto out;
++ }
++ psSysData = psDeviceNode->psSysData;
++
++
++ ui32InterruptSource = SysGetInterruptSource(psSysData, psDeviceNode);
++ if(ui32InterruptSource & psDeviceNode->ui32SOCInterruptBit)
++ {
++ if(psDeviceNode->pfnDeviceISR != IMG_NULL)
++ {
++ bStatus = (*psDeviceNode->pfnDeviceISR)(psDeviceNode->pvISRData);
++ }
++
++ SysClearInterrupts(psSysData, psDeviceNode->ui32SOCInterruptBit);
++ }
++
++out:
++ return bStatus;
++}
++
++IMG_VOID PVRSRVSystemLISR_ForEachVaCb(PVRSRV_DEVICE_NODE *psDeviceNode, va_list va)
++{
++
++ IMG_BOOL *pbStatus;
++ IMG_UINT32 *pui32InterruptSource;
++ IMG_UINT32 *pui32ClearInterrupts;
++
++ pbStatus = va_arg(va, IMG_BOOL*);
++ pui32InterruptSource = va_arg(va, IMG_UINT32*);
++ pui32ClearInterrupts = va_arg(va, IMG_UINT32*);
++
++
++ if(psDeviceNode->pfnDeviceISR != IMG_NULL)
++ {
++ if(*pui32InterruptSource & psDeviceNode->ui32SOCInterruptBit)
++ {
++ if((*psDeviceNode->pfnDeviceISR)(psDeviceNode->pvISRData))
++ {
++
++ *pbStatus = IMG_TRUE;
++ }
++
++ *pui32ClearInterrupts |= psDeviceNode->ui32SOCInterruptBit;
++ }
++ }
++}
++
++IMG_BOOL IMG_CALLCONV PVRSRVSystemLISR(IMG_VOID *pvSysData)
++{
++ SYS_DATA *psSysData = pvSysData;
++ IMG_BOOL bStatus = IMG_FALSE;
++ IMG_UINT32 ui32InterruptSource;
++ IMG_UINT32 ui32ClearInterrupts = 0;
++ if(!psSysData)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVSystemLISR: Invalid params\n"));
++ }
++ else
++ {
++
++ ui32InterruptSource = SysGetInterruptSource(psSysData, IMG_NULL);
++
++
++ if(ui32InterruptSource)
++ {
++
++ List_PVRSRV_DEVICE_NODE_ForEach_va(psSysData->psDeviceNodeList,
++ PVRSRVSystemLISR_ForEachVaCb,
++ &bStatus,
++ &ui32InterruptSource,
++ &ui32ClearInterrupts);
++
++ SysClearInterrupts(psSysData, ui32ClearInterrupts);
++ }
++ }
++ return bStatus;
++}
++
++
++IMG_VOID PVRSRVMISR_ForEachCb(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ if(psDeviceNode->pfnDeviceMISR != IMG_NULL)
++ {
++ (*psDeviceNode->pfnDeviceMISR)(psDeviceNode->pvISRData);
++ }
++}
++
++IMG_VOID IMG_CALLCONV PVRSRVMISR(IMG_VOID *pvSysData)
++{
++ SYS_DATA *psSysData = pvSysData;
++ if(!psSysData)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVMISR: Invalid params\n"));
++ return;
++ }
++
++
++ List_PVRSRV_DEVICE_NODE_ForEach(psSysData->psDeviceNodeList,
++ PVRSRVMISR_ForEachCb);
++
++
++ if (PVRSRVProcessQueues(ISR_ID, IMG_FALSE) == PVRSRV_ERROR_PROCESSING_BLOCKED)
++ {
++ PVRSRVProcessQueues(ISR_ID, IMG_FALSE);
++ }
++
++
++ if (psSysData->psGlobalEventObject)
++ {
++ IMG_HANDLE hOSEventKM = psSysData->psGlobalEventObject->hOSEventKM;
++ if(hOSEventKM)
++ {
++ OSEventObjectSignal(hOSEventKM);
++ }
++ }
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVProcessConnect(IMG_UINT32 ui32PID)
++{
++ return PVRSRVPerProcessDataConnect(ui32PID);
++}
++
++
++IMG_EXPORT
++IMG_VOID IMG_CALLCONV PVRSRVProcessDisconnect(IMG_UINT32 ui32PID)
++{
++ PVRSRVPerProcessDataDisconnect(ui32PID);
++}
++
++
++PVRSRV_ERROR IMG_CALLCONV PVRSRVSaveRestoreLiveSegments(IMG_HANDLE hArena, IMG_PBYTE pbyBuffer,
++ IMG_SIZE_T *puiBufSize, IMG_BOOL bSave)
++{
++ IMG_SIZE_T uiBytesSaved = 0;
++ IMG_PVOID pvLocalMemCPUVAddr;
++ RA_SEGMENT_DETAILS sSegDetails;
++
++ if (hArena == IMG_NULL)
++ {
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++
++ sSegDetails.uiSize = 0;
++ sSegDetails.sCpuPhyAddr.uiAddr = 0;
++ sSegDetails.hSegment = 0;
++
++
++ while (RA_GetNextLiveSegment(hArena, &sSegDetails))
++ {
++ if (pbyBuffer == IMG_NULL)
++ {
++
++ uiBytesSaved += sizeof(sSegDetails.uiSize) + sSegDetails.uiSize;
++ }
++ else
++ {
++ if ((uiBytesSaved + sizeof(sSegDetails.uiSize) + sSegDetails.uiSize) > *puiBufSize)
++ {
++ return (PVRSRV_ERROR_OUT_OF_MEMORY);
++ }
++
++ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVSaveRestoreLiveSegments: Base %08x size %08x", sSegDetails.sCpuPhyAddr.uiAddr, sSegDetails.uiSize));
++
++
++ pvLocalMemCPUVAddr = OSMapPhysToLin(sSegDetails.sCpuPhyAddr,
++ sSegDetails.uiSize,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++ if (pvLocalMemCPUVAddr == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVSaveRestoreLiveSegments: Failed to map local memory to host"));
++ return (PVRSRV_ERROR_OUT_OF_MEMORY);
++ }
++
++ if (bSave)
++ {
++
++ OSMemCopy(pbyBuffer, &sSegDetails.uiSize, sizeof(sSegDetails.uiSize));
++ pbyBuffer += sizeof(sSegDetails.uiSize);
++
++ OSMemCopy(pbyBuffer, pvLocalMemCPUVAddr, sSegDetails.uiSize);
++ pbyBuffer += sSegDetails.uiSize;
++ }
++ else
++ {
++ IMG_UINT32 uiSize;
++
++ OSMemCopy(&uiSize, pbyBuffer, sizeof(sSegDetails.uiSize));
++
++ if (uiSize != sSegDetails.uiSize)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVSaveRestoreLiveSegments: Segment size error"));
++ }
++ else
++ {
++ pbyBuffer += sizeof(sSegDetails.uiSize);
++
++ OSMemCopy(pvLocalMemCPUVAddr, pbyBuffer, sSegDetails.uiSize);
++ pbyBuffer += sSegDetails.uiSize;
++ }
++ }
++
++
++ uiBytesSaved += sizeof(sSegDetails.uiSize) + sSegDetails.uiSize;
++
++ OSUnMapPhysToLin(pvLocalMemCPUVAddr,
++ sSegDetails.uiSize,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++ }
++ }
++
++ if (pbyBuffer == IMG_NULL)
++ {
++ *puiBufSize = uiBytesSaved;
++ }
++
++ return (PVRSRV_OK);
++}
++
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/common/queue.c
+@@ -0,0 +1,1166 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++
++#include "lists.h"
++
++DECLARE_LIST_FOR_EACH(PVRSRV_DEVICE_NODE);
++
++#if defined(__linux__) && defined(__KERNEL__)
++
++#include "proc.h"
++
++static IMG_INT
++QueuePrintCommands (PVRSRV_QUEUE_INFO * psQueue, IMG_CHAR * buffer, size_t size)
++{
++ off_t off = 0;
++ IMG_INT cmds = 0;
++ IMG_SIZE_T ui32ReadOffset = psQueue->ui32ReadOffset;
++ IMG_SIZE_T ui32WriteOffset = psQueue->ui32WriteOffset;
++ PVRSRV_COMMAND * psCmd;
++
++ while (ui32ReadOffset != ui32WriteOffset)
++ {
++ psCmd= (PVRSRV_COMMAND *)((IMG_UINTPTR_T)psQueue->pvLinQueueKM + ui32ReadOffset);
++
++ off = printAppend(buffer, size, off, "%p %p %5lu %6lu %3lu %5lu %2lu %2lu %3lu \n",
++ psQueue,
++ psCmd,
++ psCmd->ui32ProcessID,
++ psCmd->CommandType,
++ psCmd->ui32CmdSize,
++ psCmd->ui32DevIndex,
++ psCmd->ui32DstSyncCount,
++ psCmd->ui32SrcSyncCount,
++ psCmd->ui32DataSize);
++
++ ui32ReadOffset += psCmd->ui32CmdSize;
++ ui32ReadOffset &= psQueue->ui32QueueSize - 1;
++ cmds++;
++ }
++ if (cmds == 0)
++ off = printAppend(buffer, size, off, "%p <empty>\n", psQueue);
++ return off;
++}
++
++
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++
++void ProcSeqShowQueue(struct seq_file *sfile,void* el)
++{
++ PVRSRV_QUEUE_INFO * psQueue = (PVRSRV_QUEUE_INFO*)el;
++ IMG_INT cmds = 0;
++ IMG_SIZE_T ui32ReadOffset;
++ IMG_SIZE_T ui32WriteOffset;
++ PVRSRV_COMMAND * psCmd;
++
++ if(el == PVR_PROC_SEQ_START_TOKEN)
++ {
++ seq_printf( sfile,
++ "Command Queues\n"
++ "Queue CmdPtr Pid Command Size DevInd DSC SSC #Data ...\n");
++ return;
++ }
++
++ ui32ReadOffset = psQueue->ui32ReadOffset;
++ ui32WriteOffset = psQueue->ui32WriteOffset;
++
++ while (ui32ReadOffset != ui32WriteOffset)
++ {
++ psCmd= (PVRSRV_COMMAND *)((IMG_UINTPTR_T)psQueue->pvLinQueueKM + ui32ReadOffset);
++
++ seq_printf(sfile, "%p %p %5lu %6lu %3lu %5lu %2lu %2lu %3lu \n",
++ psQueue,
++ psCmd,
++ psCmd->ui32ProcessID,
++ psCmd->CommandType,
++ psCmd->ui32CmdSize,
++ psCmd->ui32DevIndex,
++ psCmd->ui32DstSyncCount,
++ psCmd->ui32SrcSyncCount,
++ psCmd->ui32DataSize);
++
++ ui32ReadOffset += psCmd->ui32CmdSize;
++ ui32ReadOffset &= psQueue->ui32QueueSize - 1;
++ cmds++;
++ }
++
++ if (cmds == 0)
++ seq_printf(sfile, "%p <empty>\n", psQueue);
++}
++
++void* ProcSeqOff2ElementQueue(struct seq_file * sfile, loff_t off)
++{
++ PVRSRV_QUEUE_INFO * psQueue;
++ SYS_DATA * psSysData;
++
++ if(!off)
++ {
++ return PVR_PROC_SEQ_START_TOKEN;
++ }
++
++
++ SysAcquireData(&psSysData);
++
++ for (psQueue = psSysData->psQueueList; (((--off) > 0) && (psQueue != IMG_NULL)); psQueue = psQueue->psNextKM);
++ return psQueue;
++}
++
++#endif
++
++off_t
++QueuePrintQueues (IMG_CHAR * buffer, size_t size, off_t off)
++{
++ SYS_DATA * psSysData;
++ PVRSRV_QUEUE_INFO * psQueue;
++
++ SysAcquireData(&psSysData);
++
++ if (!off)
++ return printAppend (buffer, size, 0,
++ "Command Queues\n"
++ "Queue CmdPtr Pid Command Size DevInd DSC SSC #Data ...\n");
++
++
++
++ for (psQueue = psSysData->psQueueList; (((--off) > 0) && (psQueue != IMG_NULL)); psQueue = psQueue->psNextKM)
++ ;
++
++ return psQueue ? QueuePrintCommands (psQueue, buffer, size) : END_OF_FILE;
++}
++#endif
++
++#define GET_SPACE_IN_CMDQ(psQueue) \
++ (((psQueue->ui32ReadOffset - psQueue->ui32WriteOffset) \
++ + (psQueue->ui32QueueSize - 1)) & (psQueue->ui32QueueSize - 1))
++
++#define UPDATE_QUEUE_WOFF(psQueue, ui32Size) \
++ psQueue->ui32WriteOffset = (psQueue->ui32WriteOffset + ui32Size) \
++ & (psQueue->ui32QueueSize - 1);
++
++#define SYNCOPS_STALE(ui32OpsComplete, ui32OpsPending) \
++ (ui32OpsComplete >= ui32OpsPending)
++
++
++DECLARE_LIST_FOR_EACH(PVRSRV_DEVICE_NODE);
++
++static IMG_VOID QueueDumpCmdComplete(COMMAND_COMPLETE_DATA *psCmdCompleteData,
++ IMG_UINT32 i,
++ IMG_BOOL bIsSrc)
++{
++ PVRSRV_SYNC_OBJECT *psSyncObject;
++
++ psSyncObject = bIsSrc ? psCmdCompleteData->psSrcSync : psCmdCompleteData->psDstSync;
++
++ if (psCmdCompleteData->bInUse)
++ {
++ PVR_LOG(("\t%s %lu: ROC DevVAddr:0x%lX ROP:0x%lx ROC:0x%lx, WOC DevVAddr:0x%lX WOP:0x%lx WOC:0x%lx",
++ bIsSrc ? "SRC" : "DEST", i,
++ psSyncObject[i].psKernelSyncInfoKM->sReadOpsCompleteDevVAddr.uiAddr,
++ psSyncObject[i].psKernelSyncInfoKM->psSyncData->ui32ReadOpsPending,
++ psSyncObject[i].psKernelSyncInfoKM->psSyncData->ui32ReadOpsComplete,
++ psSyncObject[i].psKernelSyncInfoKM->sWriteOpsCompleteDevVAddr.uiAddr,
++ psSyncObject[i].psKernelSyncInfoKM->psSyncData->ui32WriteOpsPending,
++ psSyncObject[i].psKernelSyncInfoKM->psSyncData->ui32WriteOpsComplete));
++ }
++ else
++ {
++ PVR_LOG(("\t%s %lu: (Not in use)", bIsSrc ? "SRC" : "DEST", i));
++ }
++}
++
++
++static IMG_VOID QueueDumpDebugInfo_ForEachCb(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ if (psDeviceNode->sDevId.eDeviceClass == PVRSRV_DEVICE_CLASS_DISPLAY)
++ {
++ IMG_UINT32 i;
++ SYS_DATA *psSysData;
++ COMMAND_COMPLETE_DATA **ppsCmdCompleteData;
++ COMMAND_COMPLETE_DATA *psCmdCompleteData;
++
++ SysAcquireData(&psSysData);
++
++ ppsCmdCompleteData = psSysData->ppsCmdCompleteData[psDeviceNode->sDevId.ui32DeviceIndex];
++
++ if (ppsCmdCompleteData != IMG_NULL)
++ {
++ psCmdCompleteData = ppsCmdCompleteData[DC_FLIP_COMMAND];
++
++ PVR_LOG(("Command Complete Data for display device %lu:", psDeviceNode->sDevId.ui32DeviceIndex));
++
++ for (i = 0; i < psCmdCompleteData->ui32SrcSyncCount; i++)
++ {
++ QueueDumpCmdComplete(psCmdCompleteData, i, IMG_TRUE);
++ }
++
++ for (i = 0; i < psCmdCompleteData->ui32DstSyncCount; i++)
++ {
++ QueueDumpCmdComplete(psCmdCompleteData, i, IMG_FALSE);
++ }
++ }
++ else
++ {
++ PVR_LOG(("There is no Command Complete Data for display device %u", psDeviceNode->sDevId.ui32DeviceIndex));
++ }
++ }
++}
++
++
++IMG_VOID QueueDumpDebugInfo(IMG_VOID)
++{
++ SYS_DATA *psSysData;
++ SysAcquireData(&psSysData);
++ List_PVRSRV_DEVICE_NODE_ForEach(psSysData->psDeviceNodeList, QueueDumpDebugInfo_ForEachCb);
++}
++
++
++IMG_SIZE_T NearestPower2(IMG_SIZE_T ui32Value)
++{
++ IMG_SIZE_T ui32Temp, ui32Result = 1;
++
++ if(!ui32Value)
++ return 0;
++
++ ui32Temp = ui32Value - 1;
++ while(ui32Temp)
++ {
++ ui32Result <<= 1;
++ ui32Temp >>= 1;
++ }
++
++ return ui32Result;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateCommandQueueKM(IMG_SIZE_T ui32QueueSize,
++ PVRSRV_QUEUE_INFO **ppsQueueInfo)
++{
++ PVRSRV_QUEUE_INFO *psQueueInfo;
++ IMG_SIZE_T ui32Power2QueueSize = NearestPower2(ui32QueueSize);
++ SYS_DATA *psSysData;
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hMemBlock;
++
++ SysAcquireData(&psSysData);
++
++
++ if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_QUEUE_INFO),
++ (IMG_VOID **)&psQueueInfo, &hMemBlock,
++ "Queue Info") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateCommandQueueKM: Failed to alloc queue struct"));
++ goto ErrorExit;
++ }
++ OSMemSet(psQueueInfo, 0, sizeof(PVRSRV_QUEUE_INFO));
++
++ psQueueInfo->hMemBlock[0] = hMemBlock;
++ psQueueInfo->ui32ProcessID = OSGetCurrentProcessIDKM();
++
++
++ if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ ui32Power2QueueSize + PVRSRV_MAX_CMD_SIZE,
++ &psQueueInfo->pvLinQueueKM, &hMemBlock,
++ "Command Queue") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateCommandQueueKM: Failed to alloc queue buffer"));
++ goto ErrorExit;
++ }
++
++ psQueueInfo->hMemBlock[1] = hMemBlock;
++ psQueueInfo->pvLinQueueUM = psQueueInfo->pvLinQueueKM;
++
++
++ PVR_ASSERT(psQueueInfo->ui32ReadOffset == 0);
++ PVR_ASSERT(psQueueInfo->ui32WriteOffset == 0);
++
++ psQueueInfo->ui32QueueSize = ui32Power2QueueSize;
++
++
++ if (psSysData->psQueueList == IMG_NULL)
++ {
++ eError = OSCreateResource(&psSysData->sQProcessResource);
++ if (eError != PVRSRV_OK)
++ {
++ goto ErrorExit;
++ }
++ }
++
++
++ if (OSLockResource(&psSysData->sQProcessResource,
++ KERNEL_ID) != PVRSRV_OK)
++ {
++ goto ErrorExit;
++ }
++
++ psQueueInfo->psNextKM = psSysData->psQueueList;
++ psSysData->psQueueList = psQueueInfo;
++
++ if (OSUnlockResource(&psSysData->sQProcessResource, KERNEL_ID) != PVRSRV_OK)
++ {
++ goto ErrorExit;
++ }
++
++ *ppsQueueInfo = psQueueInfo;
++
++ return PVRSRV_OK;
++
++ErrorExit:
++
++ if(psQueueInfo)
++ {
++ if(psQueueInfo->pvLinQueueKM)
++ {
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ psQueueInfo->ui32QueueSize,
++ psQueueInfo->pvLinQueueKM,
++ psQueueInfo->hMemBlock[1]);
++ psQueueInfo->pvLinQueueKM = IMG_NULL;
++ }
++
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_QUEUE_INFO),
++ psQueueInfo,
++ psQueueInfo->hMemBlock[0]);
++
++ }
++
++ return PVRSRV_ERROR_GENERIC;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyCommandQueueKM(PVRSRV_QUEUE_INFO *psQueueInfo)
++{
++ PVRSRV_QUEUE_INFO *psQueue;
++ SYS_DATA *psSysData;
++ PVRSRV_ERROR eError;
++ IMG_BOOL bTimeout = IMG_TRUE;
++
++ SysAcquireData(&psSysData);
++
++ psQueue = psSysData->psQueueList;
++
++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
++ {
++ if(psQueueInfo->ui32ReadOffset == psQueueInfo->ui32WriteOffset)
++ {
++ bTimeout = IMG_FALSE;
++ break;
++ }
++ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
++ } END_LOOP_UNTIL_TIMEOUT();
++
++ if (bTimeout)
++ {
++
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVDestroyCommandQueueKM : Failed to empty queue"));
++ eError = PVRSRV_ERROR_CANNOT_FLUSH_QUEUE;
++ goto ErrorExit;
++ }
++
++
++ eError = OSLockResource(&psSysData->sQProcessResource,
++ KERNEL_ID);
++ if (eError != PVRSRV_OK)
++ {
++ goto ErrorExit;
++ }
++
++ if(psQueue == psQueueInfo)
++ {
++ psSysData->psQueueList = psQueueInfo->psNextKM;
++
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ NearestPower2(psQueueInfo->ui32QueueSize) + PVRSRV_MAX_CMD_SIZE,
++ psQueueInfo->pvLinQueueKM,
++ psQueueInfo->hMemBlock[1]);
++ psQueueInfo->pvLinQueueKM = IMG_NULL;
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_QUEUE_INFO),
++ psQueueInfo,
++ psQueueInfo->hMemBlock[0]);
++ psQueueInfo = IMG_NULL;
++ }
++ else
++ {
++ while(psQueue)
++ {
++ if(psQueue->psNextKM == psQueueInfo)
++ {
++ psQueue->psNextKM = psQueueInfo->psNextKM;
++
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ psQueueInfo->ui32QueueSize,
++ psQueueInfo->pvLinQueueKM,
++ psQueueInfo->hMemBlock[1]);
++ psQueueInfo->pvLinQueueKM = IMG_NULL;
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_QUEUE_INFO),
++ psQueueInfo,
++ psQueueInfo->hMemBlock[0]);
++ psQueueInfo = IMG_NULL;
++ break;
++ }
++ psQueue = psQueue->psNextKM;
++ }
++
++ if(!psQueue)
++ {
++ eError = OSUnlockResource(&psSysData->sQProcessResource, KERNEL_ID);
++ if (eError != PVRSRV_OK)
++ {
++ goto ErrorExit;
++ }
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ goto ErrorExit;
++ }
++ }
++
++
++ eError = OSUnlockResource(&psSysData->sQProcessResource, KERNEL_ID);
++ if (eError != PVRSRV_OK)
++ {
++ goto ErrorExit;
++ }
++
++
++ if (psSysData->psQueueList == IMG_NULL)
++ {
++ eError = OSDestroyResource(&psSysData->sQProcessResource);
++ if (eError != PVRSRV_OK)
++ {
++ goto ErrorExit;
++ }
++ }
++
++ErrorExit:
++
++ return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetQueueSpaceKM(PVRSRV_QUEUE_INFO *psQueue,
++ IMG_SIZE_T ui32ParamSize,
++ IMG_VOID **ppvSpace)
++{
++ IMG_BOOL bTimeout = IMG_TRUE;
++
++
++ ui32ParamSize = (ui32ParamSize+3) & 0xFFFFFFFC;
++
++ if (ui32ParamSize > PVRSRV_MAX_CMD_SIZE)
++ {
++ PVR_DPF((PVR_DBG_WARNING,"PVRSRVGetQueueSpace: max command size is %d bytes", PVRSRV_MAX_CMD_SIZE));
++ return PVRSRV_ERROR_CMD_TOO_BIG;
++ }
++
++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
++ {
++ if (GET_SPACE_IN_CMDQ(psQueue) > ui32ParamSize)
++ {
++ bTimeout = IMG_FALSE;
++ break;
++ }
++ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
++ } END_LOOP_UNTIL_TIMEOUT();
++
++ if (bTimeout == IMG_TRUE)
++ {
++ *ppvSpace = IMG_NULL;
++
++ return PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE;
++ }
++ else
++ {
++ *ppvSpace = (IMG_VOID *)((IMG_UINTPTR_T)psQueue->pvLinQueueUM + psQueue->ui32WriteOffset);
++ }
++
++ return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVInsertCommandKM(PVRSRV_QUEUE_INFO *psQueue,
++ PVRSRV_COMMAND **ppsCommand,
++ IMG_UINT32 ui32DevIndex,
++ IMG_UINT16 CommandType,
++ IMG_UINT32 ui32DstSyncCount,
++ PVRSRV_KERNEL_SYNC_INFO *apsDstSync[],
++ IMG_UINT32 ui32SrcSyncCount,
++ PVRSRV_KERNEL_SYNC_INFO *apsSrcSync[],
++ IMG_SIZE_T ui32DataByteSize )
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_COMMAND *psCommand;
++ IMG_SIZE_T ui32CommandSize;
++ IMG_UINT32 i;
++
++
++ ui32DataByteSize = (ui32DataByteSize + 3UL) & ~3UL;
++
++
++ ui32CommandSize = sizeof(PVRSRV_COMMAND)
++ + ((ui32DstSyncCount + ui32SrcSyncCount) * sizeof(PVRSRV_SYNC_OBJECT))
++ + ui32DataByteSize;
++
++
++ eError = PVRSRVGetQueueSpaceKM (psQueue, ui32CommandSize, (IMG_VOID**)&psCommand);
++ if(eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++
++ psCommand->ui32ProcessID = OSGetCurrentProcessIDKM();
++
++
++ psCommand->ui32CmdSize = ui32CommandSize;
++ psCommand->ui32DevIndex = ui32DevIndex;
++ psCommand->CommandType = CommandType;
++ psCommand->ui32DstSyncCount = ui32DstSyncCount;
++ psCommand->ui32SrcSyncCount = ui32SrcSyncCount;
++
++
++ psCommand->psDstSync = (PVRSRV_SYNC_OBJECT*)(((IMG_UINTPTR_T)psCommand) + sizeof(PVRSRV_COMMAND));
++
++
++ psCommand->psSrcSync = (PVRSRV_SYNC_OBJECT*)(((IMG_UINTPTR_T)psCommand->psDstSync)
++ + (ui32DstSyncCount * sizeof(PVRSRV_SYNC_OBJECT)));
++
++ psCommand->pvData = (PVRSRV_SYNC_OBJECT*)(((IMG_UINTPTR_T)psCommand->psSrcSync)
++ + (ui32SrcSyncCount * sizeof(PVRSRV_SYNC_OBJECT)));
++ psCommand->ui32DataSize = ui32DataByteSize;
++
++
++ for (i=0; i<ui32DstSyncCount; i++)
++ {
++ psCommand->psDstSync[i].psKernelSyncInfoKM = apsDstSync[i];
++ psCommand->psDstSync[i].ui32WriteOpsPending = PVRSRVGetWriteOpsPending(apsDstSync[i], IMG_FALSE);
++ psCommand->psDstSync[i].ui32ReadOpsPending = PVRSRVGetReadOpsPending(apsDstSync[i], IMG_FALSE);
++
++ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVInsertCommandKM: Dst %lu RO-VA:0x%lx WO-VA:0x%lx ROP:0x%lx WOP:0x%lx",
++ i, psCommand->psDstSync[i].psKernelSyncInfoKM->sReadOpsCompleteDevVAddr.uiAddr,
++ psCommand->psDstSync[i].psKernelSyncInfoKM->sWriteOpsCompleteDevVAddr.uiAddr,
++ psCommand->psDstSync[i].ui32ReadOpsPending,
++ psCommand->psDstSync[i].ui32WriteOpsPending));
++ }
++
++
++ for (i=0; i<ui32SrcSyncCount; i++)
++ {
++ psCommand->psSrcSync[i].psKernelSyncInfoKM = apsSrcSync[i];
++ psCommand->psSrcSync[i].ui32WriteOpsPending = PVRSRVGetWriteOpsPending(apsSrcSync[i], IMG_TRUE);
++ psCommand->psSrcSync[i].ui32ReadOpsPending = PVRSRVGetReadOpsPending(apsSrcSync[i], IMG_TRUE);
++
++ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVInsertCommandKM: Src %lu RO-VA:0x%lx WO-VA:0x%lx ROP:0x%lx WOP:0x%lx",
++ i, psCommand->psSrcSync[i].psKernelSyncInfoKM->sReadOpsCompleteDevVAddr.uiAddr,
++ psCommand->psSrcSync[i].psKernelSyncInfoKM->sWriteOpsCompleteDevVAddr.uiAddr,
++ psCommand->psSrcSync[i].ui32ReadOpsPending,
++ psCommand->psSrcSync[i].ui32WriteOpsPending));
++ }
++
++
++ *ppsCommand = psCommand;
++
++ return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVSubmitCommandKM(PVRSRV_QUEUE_INFO *psQueue,
++ PVRSRV_COMMAND *psCommand)
++{
++
++
++
++ if (psCommand->ui32DstSyncCount > 0)
++ {
++ psCommand->psDstSync = (PVRSRV_SYNC_OBJECT*)(((IMG_UINTPTR_T)psQueue->pvLinQueueKM)
++ + psQueue->ui32WriteOffset + sizeof(PVRSRV_COMMAND));
++ }
++
++ if (psCommand->ui32SrcSyncCount > 0)
++ {
++ psCommand->psSrcSync = (PVRSRV_SYNC_OBJECT*)(((IMG_UINTPTR_T)psQueue->pvLinQueueKM)
++ + psQueue->ui32WriteOffset + sizeof(PVRSRV_COMMAND)
++ + (psCommand->ui32DstSyncCount * sizeof(PVRSRV_SYNC_OBJECT)));
++ }
++
++ psCommand->pvData = (PVRSRV_SYNC_OBJECT*)(((IMG_UINTPTR_T)psQueue->pvLinQueueKM)
++ + psQueue->ui32WriteOffset + sizeof(PVRSRV_COMMAND)
++ + (psCommand->ui32DstSyncCount * sizeof(PVRSRV_SYNC_OBJECT))
++ + (psCommand->ui32SrcSyncCount * sizeof(PVRSRV_SYNC_OBJECT)));
++
++
++ UPDATE_QUEUE_WOFF(psQueue, psCommand->ui32CmdSize);
++
++ return PVRSRV_OK;
++}
++
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVProcessCommand(SYS_DATA *psSysData,
++ PVRSRV_COMMAND *psCommand,
++ IMG_BOOL bFlush)
++{
++ PVRSRV_SYNC_OBJECT *psWalkerObj;
++ PVRSRV_SYNC_OBJECT *psEndObj;
++ IMG_UINT32 i;
++ COMMAND_COMPLETE_DATA *psCmdCompleteData;
++ PVRSRV_ERROR eError = PVRSRV_OK;
++ IMG_UINT32 ui32WriteOpsComplete;
++ IMG_UINT32 ui32ReadOpsComplete;
++
++
++ psWalkerObj = psCommand->psDstSync;
++ psEndObj = psWalkerObj + psCommand->ui32DstSyncCount;
++ while (psWalkerObj < psEndObj)
++ {
++ PVRSRV_SYNC_DATA *psSyncData = psWalkerObj->psKernelSyncInfoKM->psSyncData;
++
++ ui32WriteOpsComplete = psSyncData->ui32WriteOpsComplete;
++ ui32ReadOpsComplete = psSyncData->ui32ReadOpsComplete;
++
++ if ((ui32WriteOpsComplete != psWalkerObj->ui32WriteOpsPending)
++ || (ui32ReadOpsComplete != psWalkerObj->ui32ReadOpsPending))
++ {
++ if (!bFlush ||
++ !SYNCOPS_STALE(ui32WriteOpsComplete, psWalkerObj->ui32WriteOpsPending) ||
++ !SYNCOPS_STALE(ui32ReadOpsComplete, psWalkerObj->ui32ReadOpsPending))
++ {
++ return PVRSRV_ERROR_FAILED_DEPENDENCIES;
++ }
++ }
++
++ psWalkerObj++;
++ }
++
++
++ psWalkerObj = psCommand->psSrcSync;
++ psEndObj = psWalkerObj + psCommand->ui32SrcSyncCount;
++ while (psWalkerObj < psEndObj)
++ {
++ PVRSRV_SYNC_DATA *psSyncData = psWalkerObj->psKernelSyncInfoKM->psSyncData;
++
++ ui32ReadOpsComplete = psSyncData->ui32ReadOpsComplete;
++ ui32WriteOpsComplete = psSyncData->ui32WriteOpsComplete;
++
++ if ((ui32WriteOpsComplete != psWalkerObj->ui32WriteOpsPending)
++ || (ui32ReadOpsComplete != psWalkerObj->ui32ReadOpsPending))
++ {
++ if (!bFlush &&
++ SYNCOPS_STALE(ui32WriteOpsComplete, psWalkerObj->ui32WriteOpsPending) &&
++ SYNCOPS_STALE(ui32ReadOpsComplete, psWalkerObj->ui32ReadOpsPending))
++ {
++ PVR_DPF((PVR_DBG_WARNING,
++ "PVRSRVProcessCommand: Stale syncops psSyncData:0x%x ui32WriteOpsComplete:0x%x ui32WriteOpsPending:0x%x",
++ psSyncData, ui32WriteOpsComplete, psWalkerObj->ui32WriteOpsPending));
++ }
++
++ if (!bFlush ||
++ !SYNCOPS_STALE(ui32WriteOpsComplete, psWalkerObj->ui32WriteOpsPending) ||
++ !SYNCOPS_STALE(ui32ReadOpsComplete, psWalkerObj->ui32ReadOpsPending))
++ {
++ return PVRSRV_ERROR_FAILED_DEPENDENCIES;
++ }
++ }
++ psWalkerObj++;
++ }
++
++
++ if (psCommand->ui32DevIndex >= SYS_DEVICE_COUNT)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVProcessCommand: invalid DeviceType 0x%x",
++ psCommand->ui32DevIndex));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++
++ psCmdCompleteData = psSysData->ppsCmdCompleteData[psCommand->ui32DevIndex][psCommand->CommandType];
++ if (psCmdCompleteData->bInUse)
++ {
++
++ return PVRSRV_ERROR_FAILED_DEPENDENCIES;
++ }
++
++
++ psCmdCompleteData->bInUse = IMG_TRUE;
++
++
++ psCmdCompleteData->ui32DstSyncCount = psCommand->ui32DstSyncCount;
++ for (i=0; i<psCommand->ui32DstSyncCount; i++)
++ {
++ psCmdCompleteData->psDstSync[i] = psCommand->psDstSync[i];
++
++ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVProcessCommand: Dst %lu RO-VA:0x%lx WO-VA:0x%lx ROP:0x%lx WOP:0x%lx",
++ i, psCmdCompleteData->psDstSync[i].psKernelSyncInfoKM->sReadOpsCompleteDevVAddr.uiAddr,
++ psCmdCompleteData->psDstSync[i].psKernelSyncInfoKM->sWriteOpsCompleteDevVAddr.uiAddr,
++ psCmdCompleteData->psDstSync[i].ui32ReadOpsPending,
++ psCmdCompleteData->psDstSync[i].ui32WriteOpsPending));
++ }
++
++
++ psCmdCompleteData->ui32SrcSyncCount = psCommand->ui32SrcSyncCount;
++ for (i=0; i<psCommand->ui32SrcSyncCount; i++)
++ {
++ psCmdCompleteData->psSrcSync[i] = psCommand->psSrcSync[i];
++
++ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVProcessCommand: Src %lu RO-VA:0x%lx WO-VA:0x%lx ROP:0x%lx WOP:0x%lx",
++ i, psCmdCompleteData->psSrcSync[i].psKernelSyncInfoKM->sReadOpsCompleteDevVAddr.uiAddr,
++ psCmdCompleteData->psSrcSync[i].psKernelSyncInfoKM->sWriteOpsCompleteDevVAddr.uiAddr,
++ psCmdCompleteData->psSrcSync[i].ui32ReadOpsPending,
++ psCmdCompleteData->psSrcSync[i].ui32WriteOpsPending));
++ }
++
++
++
++
++
++
++
++
++
++
++
++ if (psSysData->ppfnCmdProcList[psCommand->ui32DevIndex][psCommand->CommandType]((IMG_HANDLE)psCmdCompleteData,
++ psCommand->ui32DataSize,
++ psCommand->pvData) == IMG_FALSE)
++ {
++
++
++
++ psCmdCompleteData->bInUse = IMG_FALSE;
++ eError = PVRSRV_ERROR_CMD_NOT_PROCESSED;
++ }
++
++ return eError;
++}
++
++
++IMG_VOID PVRSRVProcessQueues_ForEachCb(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ if (psDeviceNode->bReProcessDeviceCommandComplete &&
++ psDeviceNode->pfnDeviceCommandComplete != IMG_NULL)
++ {
++ (*psDeviceNode->pfnDeviceCommandComplete)(psDeviceNode);
++ }
++}
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVProcessQueues(IMG_UINT32 ui32CallerID,
++ IMG_BOOL bFlush)
++{
++ PVRSRV_QUEUE_INFO *psQueue;
++ SYS_DATA *psSysData;
++ PVRSRV_COMMAND *psCommand;
++ PVRSRV_ERROR eError;
++
++ SysAcquireData(&psSysData);
++
++
++ psSysData->bReProcessQueues = IMG_FALSE;
++
++
++ eError = OSLockResource(&psSysData->sQProcessResource,
++ ui32CallerID);
++ if(eError != PVRSRV_OK)
++ {
++
++ psSysData->bReProcessQueues = IMG_TRUE;
++
++
++ if(ui32CallerID == ISR_ID)
++ {
++ if (bFlush)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVProcessQueues: Couldn't acquire queue processing lock for FLUSH"));
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_MESSAGE,"PVRSRVProcessQueues: Couldn't acquire queue processing lock"));
++ }
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_MESSAGE,"PVRSRVProcessQueues: Queue processing lock-acquire failed when called from the Services driver."));
++ PVR_DPF((PVR_DBG_MESSAGE," This is due to MISR queue processing being interrupted by the Services driver."));
++ }
++
++ return PVRSRV_OK;
++ }
++
++ psQueue = psSysData->psQueueList;
++
++ if(!psQueue)
++ {
++ PVR_DPF((PVR_DBG_MESSAGE,"No Queues installed - cannot process commands"));
++ }
++
++ if (bFlush)
++ {
++ PVRSRVSetDCState(DC_STATE_FLUSH_COMMANDS);
++ }
++
++ while (psQueue)
++ {
++ while (psQueue->ui32ReadOffset != psQueue->ui32WriteOffset)
++ {
++ psCommand = (PVRSRV_COMMAND*)((IMG_UINTPTR_T)psQueue->pvLinQueueKM + psQueue->ui32ReadOffset);
++
++ if (PVRSRVProcessCommand(psSysData, psCommand, bFlush) == PVRSRV_OK)
++ {
++
++ UPDATE_QUEUE_ROFF(psQueue, psCommand->ui32CmdSize)
++
++ if (bFlush)
++ {
++ continue;
++ }
++ }
++
++ break;
++ }
++ psQueue = psQueue->psNextKM;
++ }
++
++ if (bFlush)
++ {
++ PVRSRVSetDCState(DC_STATE_NO_FLUSH_COMMANDS);
++ }
++
++
++ List_PVRSRV_DEVICE_NODE_ForEach(psSysData->psDeviceNodeList,
++ PVRSRVProcessQueues_ForEachCb);
++
++
++
++ OSUnlockResource(&psSysData->sQProcessResource, ui32CallerID);
++
++
++ if(psSysData->bReProcessQueues)
++ {
++ return PVRSRV_ERROR_PROCESSING_BLOCKED;
++ }
++
++ return PVRSRV_OK;
++}
++
++
++#if defined(SUPPORT_CUSTOM_SWAP_OPERATIONS)
++IMG_INTERNAL
++IMG_VOID PVRSRVFreeCommandCompletePacketKM(IMG_HANDLE hCmdCookie,
++ IMG_BOOL bScheduleMISR)
++{
++ COMMAND_COMPLETE_DATA *psCmdCompleteData = (COMMAND_COMPLETE_DATA *)hCmdCookie;
++ SYS_DATA *psSysData;
++
++ SysAcquireData(&psSysData);
++
++
++ psCmdCompleteData->bInUse = IMG_FALSE;
++
++
++ PVRSRVCommandCompleteCallbacks();
++
++#if defined(SYS_USING_INTERRUPTS)
++ if(bScheduleMISR)
++ {
++ OSScheduleMISR(psSysData);
++ }
++#else
++ PVR_UNREFERENCED_PARAMETER(bScheduleMISR);
++#endif
++}
++
++#endif
++
++
++IMG_EXPORT
++IMG_VOID PVRSRVCommandCompleteKM(IMG_HANDLE hCmdCookie,
++ IMG_BOOL bScheduleMISR)
++{
++ IMG_UINT32 i;
++ COMMAND_COMPLETE_DATA *psCmdCompleteData = (COMMAND_COMPLETE_DATA *)hCmdCookie;
++ SYS_DATA *psSysData;
++
++ SysAcquireData(&psSysData);
++
++
++ for (i=0; i<psCmdCompleteData->ui32DstSyncCount; i++)
++ {
++ psCmdCompleteData->psDstSync[i].psKernelSyncInfoKM->psSyncData->ui32WriteOpsComplete++;
++
++ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVCommandCompleteKM: Dst %lu RO-VA:0x%lx WO-VA:0x%lx ROP:0x%lx WOP:0x%lx",
++ i, psCmdCompleteData->psDstSync[i].psKernelSyncInfoKM->sReadOpsCompleteDevVAddr.uiAddr,
++ psCmdCompleteData->psDstSync[i].psKernelSyncInfoKM->sWriteOpsCompleteDevVAddr.uiAddr,
++ psCmdCompleteData->psDstSync[i].ui32ReadOpsPending,
++ psCmdCompleteData->psDstSync[i].ui32WriteOpsPending));
++ }
++
++
++ for (i=0; i<psCmdCompleteData->ui32SrcSyncCount; i++)
++ {
++ psCmdCompleteData->psSrcSync[i].psKernelSyncInfoKM->psSyncData->ui32ReadOpsComplete++;
++
++ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVCommandCompleteKM: Src %lu RO-VA:0x%lx WO-VA:0x%lx ROP:0x%lx WOP:0x%lx",
++ i, psCmdCompleteData->psSrcSync[i].psKernelSyncInfoKM->sReadOpsCompleteDevVAddr.uiAddr,
++ psCmdCompleteData->psSrcSync[i].psKernelSyncInfoKM->sWriteOpsCompleteDevVAddr.uiAddr,
++ psCmdCompleteData->psSrcSync[i].ui32ReadOpsPending,
++ psCmdCompleteData->psSrcSync[i].ui32WriteOpsPending));
++ }
++
++
++ psCmdCompleteData->bInUse = IMG_FALSE;
++
++
++ PVRSRVCommandCompleteCallbacks();
++
++#if defined(SYS_USING_INTERRUPTS)
++ if(bScheduleMISR)
++ {
++ OSScheduleMISR(psSysData);
++ }
++#else
++ PVR_UNREFERENCED_PARAMETER(bScheduleMISR);
++#endif
++}
++
++
++IMG_VOID PVRSRVCommandCompleteCallbacks_ForEachCb(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ if(psDeviceNode->pfnDeviceCommandComplete != IMG_NULL)
++ {
++
++ (*psDeviceNode->pfnDeviceCommandComplete)(psDeviceNode);
++ }
++}
++
++IMG_VOID PVRSRVCommandCompleteCallbacks(IMG_VOID)
++{
++ SYS_DATA *psSysData;
++ SysAcquireData(&psSysData);
++
++
++ List_PVRSRV_DEVICE_NODE_ForEach(psSysData->psDeviceNodeList,
++ PVRSRVCommandCompleteCallbacks_ForEachCb);
++}
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVRegisterCmdProcListKM(IMG_UINT32 ui32DevIndex,
++ PFN_CMD_PROC *ppfnCmdProcList,
++ IMG_UINT32 ui32MaxSyncsPerCmd[][2],
++ IMG_UINT32 ui32CmdCount)
++{
++ SYS_DATA *psSysData;
++ PVRSRV_ERROR eError;
++ IMG_UINT32 i;
++ IMG_SIZE_T ui32AllocSize;
++ PFN_CMD_PROC *ppfnCmdProc;
++ COMMAND_COMPLETE_DATA *psCmdCompleteData;
++
++
++ if(ui32DevIndex >= SYS_DEVICE_COUNT)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVRegisterCmdProcListKM: invalid DeviceType 0x%x",
++ ui32DevIndex));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++
++ SysAcquireData(&psSysData);
++
++
++ eError = OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
++ ui32CmdCount * sizeof(PFN_CMD_PROC),
++ (IMG_VOID **)&psSysData->ppfnCmdProcList[ui32DevIndex], IMG_NULL,
++ "Internal Queue Info structure");
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterCmdProcListKM: Failed to alloc queue"));
++ return eError;
++ }
++
++
++ ppfnCmdProc = psSysData->ppfnCmdProcList[ui32DevIndex];
++
++
++ for (i=0; i<ui32CmdCount; i++)
++ {
++ ppfnCmdProc[i] = ppfnCmdProcList[i];
++ }
++
++
++ ui32AllocSize = ui32CmdCount * sizeof(COMMAND_COMPLETE_DATA*);
++ eError = OSAllocMem( PVRSRV_OS_NON_PAGEABLE_HEAP,
++ ui32AllocSize,
++ (IMG_VOID **)&psSysData->ppsCmdCompleteData[ui32DevIndex], IMG_NULL,
++ "Array of Pointers for Command Store");
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterCmdProcListKM: Failed to alloc CC data"));
++ goto ErrorExit;
++ }
++
++ for (i=0; i<ui32CmdCount; i++)
++ {
++
++
++ ui32AllocSize = sizeof(COMMAND_COMPLETE_DATA)
++ + ((ui32MaxSyncsPerCmd[i][0]
++ + ui32MaxSyncsPerCmd[i][1])
++ * sizeof(PVRSRV_SYNC_OBJECT));
++
++ eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ ui32AllocSize,
++ (IMG_VOID **)&psSysData->ppsCmdCompleteData[ui32DevIndex][i],
++ IMG_NULL,
++ "Command Complete Data");
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterCmdProcListKM: Failed to alloc cmd %d",i));
++ goto ErrorExit;
++ }
++
++
++ OSMemSet(psSysData->ppsCmdCompleteData[ui32DevIndex][i], 0x00, ui32AllocSize);
++
++ psCmdCompleteData = psSysData->ppsCmdCompleteData[ui32DevIndex][i];
++
++
++ psCmdCompleteData->psDstSync = (PVRSRV_SYNC_OBJECT*)
++ (((IMG_UINTPTR_T)psCmdCompleteData)
++ + sizeof(COMMAND_COMPLETE_DATA));
++ psCmdCompleteData->psSrcSync = (PVRSRV_SYNC_OBJECT*)
++ (((IMG_UINTPTR_T)psCmdCompleteData->psDstSync)
++ + (sizeof(PVRSRV_SYNC_OBJECT) * ui32MaxSyncsPerCmd[i][0]));
++
++ psCmdCompleteData->ui32AllocSize = ui32AllocSize;
++ }
++
++ return PVRSRV_OK;
++
++ErrorExit:
++
++
++
++ if(psSysData->ppsCmdCompleteData[ui32DevIndex] != IMG_NULL)
++ {
++ for (i=0; i<ui32CmdCount; i++)
++ {
++ if (psSysData->ppsCmdCompleteData[ui32DevIndex][i] != IMG_NULL)
++ {
++ ui32AllocSize = sizeof(COMMAND_COMPLETE_DATA)
++ + ((ui32MaxSyncsPerCmd[i][0]
++ + ui32MaxSyncsPerCmd[i][1])
++ * sizeof(PVRSRV_SYNC_OBJECT));
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, ui32AllocSize, psSysData->ppsCmdCompleteData[ui32DevIndex][i], IMG_NULL);
++ psSysData->ppsCmdCompleteData[ui32DevIndex][i] = IMG_NULL;
++ }
++ }
++ ui32AllocSize = ui32CmdCount * sizeof(COMMAND_COMPLETE_DATA*);
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, ui32AllocSize, psSysData->ppsCmdCompleteData[ui32DevIndex], IMG_NULL);
++ psSysData->ppsCmdCompleteData[ui32DevIndex] = IMG_NULL;
++ }
++
++ if(psSysData->ppfnCmdProcList[ui32DevIndex] != IMG_NULL)
++ {
++ ui32AllocSize = ui32CmdCount * sizeof(PFN_CMD_PROC);
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, ui32AllocSize, psSysData->ppfnCmdProcList[ui32DevIndex], IMG_NULL);
++ psSysData->ppfnCmdProcList[ui32DevIndex] = IMG_NULL;
++ }
++
++ return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVRemoveCmdProcListKM(IMG_UINT32 ui32DevIndex,
++ IMG_UINT32 ui32CmdCount)
++{
++ SYS_DATA *psSysData;
++ IMG_UINT32 i;
++
++
++ if(ui32DevIndex >= SYS_DEVICE_COUNT)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVRemoveCmdProcListKM: invalid DeviceType 0x%x",
++ ui32DevIndex));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++
++ SysAcquireData(&psSysData);
++
++ if(psSysData->ppsCmdCompleteData[ui32DevIndex] == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRemoveCmdProcListKM: Invalid command array"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++ else
++ {
++ for(i=0; i<ui32CmdCount; i++)
++ {
++
++ if(psSysData->ppsCmdCompleteData[ui32DevIndex][i] != IMG_NULL)
++ {
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ psSysData->ppsCmdCompleteData[ui32DevIndex][i]->ui32AllocSize,
++ psSysData->ppsCmdCompleteData[ui32DevIndex][i],
++ IMG_NULL);
++ psSysData->ppsCmdCompleteData[ui32DevIndex][i] = IMG_NULL;
++ }
++ }
++
++
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ ui32CmdCount * sizeof(COMMAND_COMPLETE_DATA*),
++ psSysData->ppsCmdCompleteData[ui32DevIndex],
++ IMG_NULL);
++ psSysData->ppsCmdCompleteData[ui32DevIndex] = IMG_NULL;
++ }
++
++
++ if(psSysData->ppfnCmdProcList[ui32DevIndex] != IMG_NULL)
++ {
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ ui32CmdCount * sizeof(PFN_CMD_PROC),
++ psSysData->ppfnCmdProcList[ui32DevIndex],
++ IMG_NULL);
++ psSysData->ppfnCmdProcList[ui32DevIndex] = IMG_NULL;
++ }
++
++ return PVRSRV_OK;
++}
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/common/ra.c
+@@ -0,0 +1,1871 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "hash.h"
++#include "ra.h"
++#include "buffer_manager.h"
++#include "osfunc.h"
++
++#ifdef __linux__
++#include <linux/kernel.h>
++#include "proc.h"
++#endif
++
++#ifdef USE_BM_FREESPACE_CHECK
++#include <stdio.h>
++#endif
++
++#define MINIMUM_HASH_SIZE (64)
++
++#if defined(VALIDATE_ARENA_TEST)
++
++typedef enum RESOURCE_DESCRIPTOR_TAG {
++
++ RESOURCE_SPAN_LIVE = 10,
++ RESOURCE_SPAN_FREE,
++ IMPORTED_RESOURCE_SPAN_START,
++ IMPORTED_RESOURCE_SPAN_LIVE,
++ IMPORTED_RESOURCE_SPAN_FREE,
++ IMPORTED_RESOURCE_SPAN_END,
++
++} RESOURCE_DESCRIPTOR;
++
++typedef enum RESOURCE_TYPE_TAG {
++
++ IMPORTED_RESOURCE_TYPE = 20,
++ NON_IMPORTED_RESOURCE_TYPE
++
++} RESOURCE_TYPE;
++
++
++static IMG_UINT32 ui32BoundaryTagID = 0;
++
++IMG_UINT32 ValidateArena(RA_ARENA *pArena);
++#endif
++
++struct _BT_
++{
++ enum bt_type
++ {
++ btt_span,
++ btt_free,
++ btt_live
++ } type;
++
++
++ IMG_UINTPTR_T base;
++ IMG_SIZE_T uSize;
++
++
++ struct _BT_ *pNextSegment;
++ struct _BT_ *pPrevSegment;
++
++ struct _BT_ *pNextFree;
++ struct _BT_ *pPrevFree;
++
++ BM_MAPPING *psMapping;
++
++#if defined(VALIDATE_ARENA_TEST)
++ RESOURCE_DESCRIPTOR eResourceSpan;
++ RESOURCE_TYPE eResourceType;
++
++
++ IMG_UINT32 ui32BoundaryTagID;
++#endif
++
++};
++typedef struct _BT_ BT;
++
++
++struct _RA_ARENA_
++{
++
++ IMG_CHAR *name;
++
++
++ IMG_SIZE_T uQuantum;
++
++
++ IMG_BOOL (*pImportAlloc)(IMG_VOID *,
++ IMG_SIZE_T uSize,
++ IMG_SIZE_T *pActualSize,
++ BM_MAPPING **ppsMapping,
++ IMG_UINT32 uFlags,
++ IMG_UINTPTR_T *pBase);
++ IMG_VOID (*pImportFree) (IMG_VOID *,
++ IMG_UINTPTR_T,
++ BM_MAPPING *psMapping);
++ IMG_VOID (*pBackingStoreFree) (IMG_VOID *, IMG_SIZE_T, IMG_SIZE_T, IMG_HANDLE);
++
++
++ IMG_VOID *pImportHandle;
++
++
++#define FREE_TABLE_LIMIT 32
++
++
++ BT *aHeadFree [FREE_TABLE_LIMIT];
++
++
++ BT *pHeadSegment;
++ BT *pTailSegment;
++
++
++ HASH_TABLE *pSegmentHash;
++
++#ifdef RA_STATS
++ RA_STATISTICS sStatistics;
++#endif
++
++#if defined(CONFIG_PROC_FS) && defined(DEBUG)
++#define PROC_NAME_SIZE 32
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++ struct proc_dir_entry* pProcInfo;
++ struct proc_dir_entry* pProcSegs;
++#else
++ IMG_CHAR szProcInfoName[PROC_NAME_SIZE];
++ IMG_CHAR szProcSegsName[PROC_NAME_SIZE];
++#endif
++
++ IMG_BOOL bInitProcEntry;
++#endif
++};
++#if defined(ENABLE_RA_DUMP)
++IMG_VOID RA_Dump (RA_ARENA *pArena);
++#endif
++
++#if defined(CONFIG_PROC_FS) && defined(DEBUG)
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++
++static void RA_ProcSeqShowInfo(struct seq_file *sfile, void* el);
++static void* RA_ProcSeqOff2ElementInfo(struct seq_file * sfile, loff_t off);
++
++static void RA_ProcSeqShowRegs(struct seq_file *sfile, void* el);
++static void* RA_ProcSeqOff2ElementRegs(struct seq_file * sfile, loff_t off);
++
++#else
++static IMG_INT
++RA_DumpSegs(IMG_CHAR *page, IMG_CHAR **start, off_t off, IMG_INT count, IMG_INT *eof, IMG_VOID *data);
++static IMG_INT
++RA_DumpInfo(IMG_CHAR *page, IMG_CHAR **start, off_t off, IMG_INT count, IMG_INT *eof, IMG_VOID *data);
++#endif
++
++#endif
++
++#ifdef USE_BM_FREESPACE_CHECK
++IMG_VOID CheckBMFreespace(IMG_VOID);
++#endif
++
++#if defined(CONFIG_PROC_FS) && defined(DEBUG)
++static IMG_CHAR *ReplaceSpaces(IMG_CHAR * const pS)
++{
++ IMG_CHAR *pT;
++
++ for(pT = pS; *pT != 0; pT++)
++ {
++ if (*pT == ' ' || *pT == '\t')
++ {
++ *pT = '_';
++ }
++ }
++
++ return pS;
++}
++#endif
++
++static IMG_BOOL
++_RequestAllocFail (IMG_VOID *_h,
++ IMG_SIZE_T _uSize,
++ IMG_SIZE_T *_pActualSize,
++ BM_MAPPING **_ppsMapping,
++ IMG_UINT32 _uFlags,
++ IMG_UINTPTR_T *_pBase)
++{
++ PVR_UNREFERENCED_PARAMETER (_h);
++ PVR_UNREFERENCED_PARAMETER (_uSize);
++ PVR_UNREFERENCED_PARAMETER (_pActualSize);
++ PVR_UNREFERENCED_PARAMETER (_ppsMapping);
++ PVR_UNREFERENCED_PARAMETER (_uFlags);
++ PVR_UNREFERENCED_PARAMETER (_pBase);
++
++ return IMG_FALSE;
++}
++
++static IMG_UINT32
++pvr_log2 (IMG_SIZE_T n)
++{
++ IMG_UINT32 l = 0;
++ n>>=1;
++ while (n>0)
++ {
++ n>>=1;
++ l++;
++ }
++ return l;
++}
++
++static PVRSRV_ERROR
++_SegmentListInsertAfter (RA_ARENA *pArena,
++ BT *pInsertionPoint,
++ BT *pBT)
++{
++ PVR_ASSERT (pArena != IMG_NULL);
++ PVR_ASSERT (pInsertionPoint != IMG_NULL);
++
++ if ((pInsertionPoint == IMG_NULL) || (pArena == IMG_NULL))
++ {
++ PVR_DPF ((PVR_DBG_ERROR,"_SegmentListInsertAfter: invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ pBT->pNextSegment = pInsertionPoint->pNextSegment;
++ pBT->pPrevSegment = pInsertionPoint;
++ if (pInsertionPoint->pNextSegment == IMG_NULL)
++ pArena->pTailSegment = pBT;
++ else
++ pInsertionPoint->pNextSegment->pPrevSegment = pBT;
++ pInsertionPoint->pNextSegment = pBT;
++
++ return PVRSRV_OK;
++}
++
++static PVRSRV_ERROR
++_SegmentListInsert (RA_ARENA *pArena, BT *pBT)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++
++ if (pArena->pHeadSegment == IMG_NULL)
++ {
++ pArena->pHeadSegment = pArena->pTailSegment = pBT;
++ pBT->pNextSegment = pBT->pPrevSegment = IMG_NULL;
++ }
++ else
++ {
++ BT *pBTScan;
++
++ if (pBT->base < pArena->pHeadSegment->base)
++ {
++
++ pBT->pNextSegment = pArena->pHeadSegment;
++ pArena->pHeadSegment->pPrevSegment = pBT;
++ pArena->pHeadSegment = pBT;
++ pBT->pPrevSegment = IMG_NULL;
++ }
++ else
++ {
++
++
++
++
++ pBTScan = pArena->pHeadSegment;
++
++ while ((pBTScan->pNextSegment != IMG_NULL) && (pBT->base >= pBTScan->pNextSegment->base))
++ {
++ pBTScan = pBTScan->pNextSegment;
++ }
++
++ eError = _SegmentListInsertAfter (pArena, pBTScan, pBT);
++ if (eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++ }
++ }
++ return eError;
++}
++
++static IMG_VOID
++_SegmentListRemove (RA_ARENA *pArena, BT *pBT)
++{
++ if (pBT->pPrevSegment == IMG_NULL)
++ pArena->pHeadSegment = pBT->pNextSegment;
++ else
++ pBT->pPrevSegment->pNextSegment = pBT->pNextSegment;
++
++ if (pBT->pNextSegment == IMG_NULL)
++ pArena->pTailSegment = pBT->pPrevSegment;
++ else
++ pBT->pNextSegment->pPrevSegment = pBT->pPrevSegment;
++}
++
++static BT *
++_SegmentSplit (RA_ARENA *pArena, BT *pBT, IMG_SIZE_T uSize)
++{
++ BT *pNeighbour;
++
++ PVR_ASSERT (pArena != IMG_NULL);
++
++ if (pArena == IMG_NULL)
++ {
++ PVR_DPF ((PVR_DBG_ERROR,"_SegmentSplit: invalid parameter - pArena"));
++ return IMG_NULL;
++ }
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(BT),
++ (IMG_VOID **)&pNeighbour, IMG_NULL,
++ "Boundary Tag") != PVRSRV_OK)
++ {
++ return IMG_NULL;
++ }
++
++ OSMemSet(pNeighbour, 0, sizeof(BT));
++
++#if defined(VALIDATE_ARENA_TEST)
++ pNeighbour->ui32BoundaryTagID = ++ui32BoundaryTagID;
++#endif
++
++ pNeighbour->pPrevSegment = pBT;
++ pNeighbour->pNextSegment = pBT->pNextSegment;
++ if (pBT->pNextSegment == IMG_NULL)
++ pArena->pTailSegment = pNeighbour;
++ else
++ pBT->pNextSegment->pPrevSegment = pNeighbour;
++ pBT->pNextSegment = pNeighbour;
++
++ pNeighbour->type = btt_free;
++ pNeighbour->uSize = pBT->uSize - uSize;
++ pNeighbour->base = pBT->base + uSize;
++ pNeighbour->psMapping = pBT->psMapping;
++ pBT->uSize = uSize;
++
++#if defined(VALIDATE_ARENA_TEST)
++ if (pNeighbour->pPrevSegment->eResourceType == IMPORTED_RESOURCE_TYPE)
++ {
++ pNeighbour->eResourceType = IMPORTED_RESOURCE_TYPE;
++ pNeighbour->eResourceSpan = IMPORTED_RESOURCE_SPAN_FREE;
++ }
++ else if (pNeighbour->pPrevSegment->eResourceType == NON_IMPORTED_RESOURCE_TYPE)
++ {
++ pNeighbour->eResourceType = NON_IMPORTED_RESOURCE_TYPE;
++ pNeighbour->eResourceSpan = RESOURCE_SPAN_FREE;
++ }
++ else
++ {
++ PVR_DPF ((PVR_DBG_ERROR,"_SegmentSplit: pNeighbour->pPrevSegment->eResourceType unrecognized"));
++ PVR_DBG_BREAK;
++ }
++#endif
++
++ return pNeighbour;
++}
++
++static IMG_VOID
++_FreeListInsert (RA_ARENA *pArena, BT *pBT)
++{
++ IMG_UINT32 uIndex;
++ uIndex = pvr_log2 (pBT->uSize);
++ pBT->type = btt_free;
++ pBT->pNextFree = pArena->aHeadFree [uIndex];
++ pBT->pPrevFree = IMG_NULL;
++ if (pArena->aHeadFree[uIndex] != IMG_NULL)
++ pArena->aHeadFree[uIndex]->pPrevFree = pBT;
++ pArena->aHeadFree [uIndex] = pBT;
++}
++
++static IMG_VOID
++_FreeListRemove (RA_ARENA *pArena, BT *pBT)
++{
++ IMG_UINT32 uIndex;
++ uIndex = pvr_log2 (pBT->uSize);
++ if (pBT->pNextFree != IMG_NULL)
++ pBT->pNextFree->pPrevFree = pBT->pPrevFree;
++ if (pBT->pPrevFree == IMG_NULL)
++ pArena->aHeadFree[uIndex] = pBT->pNextFree;
++ else
++ pBT->pPrevFree->pNextFree = pBT->pNextFree;
++}
++
++static BT *
++_BuildSpanMarker (IMG_UINTPTR_T base, IMG_SIZE_T uSize)
++{
++ BT *pBT;
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(BT),
++ (IMG_VOID **)&pBT, IMG_NULL,
++ "Boundary Tag") != PVRSRV_OK)
++ {
++ return IMG_NULL;
++ }
++
++ OSMemSet(pBT, 0, sizeof(BT));
++
++#if defined(VALIDATE_ARENA_TEST)
++ pBT->ui32BoundaryTagID = ++ui32BoundaryTagID;
++#endif
++
++ pBT->type = btt_span;
++ pBT->base = base;
++ pBT->uSize = uSize;
++ pBT->psMapping = IMG_NULL;
++
++ return pBT;
++}
++
++static BT *
++_BuildBT (IMG_UINTPTR_T base, IMG_SIZE_T uSize)
++{
++ BT *pBT;
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(BT),
++ (IMG_VOID **)&pBT, IMG_NULL,
++ "Boundary Tag") != PVRSRV_OK)
++ {
++ return IMG_NULL;
++ }
++
++ OSMemSet(pBT, 0, sizeof(BT));
++
++#if defined(VALIDATE_ARENA_TEST)
++ pBT->ui32BoundaryTagID = ++ui32BoundaryTagID;
++#endif
++
++ pBT->type = btt_free;
++ pBT->base = base;
++ pBT->uSize = uSize;
++
++ return pBT;
++}
++
++static BT *
++_InsertResource (RA_ARENA *pArena, IMG_UINTPTR_T base, IMG_SIZE_T uSize)
++{
++ BT *pBT;
++ PVR_ASSERT (pArena!=IMG_NULL);
++ if (pArena == IMG_NULL)
++ {
++ PVR_DPF ((PVR_DBG_ERROR,"_InsertResource: invalid parameter - pArena"));
++ return IMG_NULL;
++ }
++
++ pBT = _BuildBT (base, uSize);
++ if (pBT != IMG_NULL)
++ {
++
++#if defined(VALIDATE_ARENA_TEST)
++ pBT->eResourceSpan = RESOURCE_SPAN_FREE;
++ pBT->eResourceType = NON_IMPORTED_RESOURCE_TYPE;
++#endif
++
++ if (_SegmentListInsert (pArena, pBT) != PVRSRV_OK)
++ {
++ PVR_DPF ((PVR_DBG_ERROR,"_InsertResource: call to _SegmentListInsert failed"));
++ return IMG_NULL;
++ }
++ _FreeListInsert (pArena, pBT);
++#ifdef RA_STATS
++ pArena->sStatistics.uTotalResourceCount+=uSize;
++ pArena->sStatistics.uFreeResourceCount+=uSize;
++ pArena->sStatistics.uSpanCount++;
++#endif
++ }
++ return pBT;
++}
++
++static BT *
++_InsertResourceSpan (RA_ARENA *pArena, IMG_UINTPTR_T base, IMG_SIZE_T uSize)
++{
++ PVRSRV_ERROR eError;
++ BT *pSpanStart;
++ BT *pSpanEnd;
++ BT *pBT;
++
++ PVR_ASSERT (pArena != IMG_NULL);
++ if (pArena == IMG_NULL)
++ {
++ PVR_DPF ((PVR_DBG_ERROR,"_InsertResourceSpan: invalid parameter - pArena"));
++ return IMG_NULL;
++ }
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "RA_InsertResourceSpan: arena='%s', base=0x%x, size=0x%x",
++ pArena->name, base, uSize));
++
++ pSpanStart = _BuildSpanMarker (base, uSize);
++ if (pSpanStart == IMG_NULL)
++ {
++ goto fail_start;
++ }
++
++#if defined(VALIDATE_ARENA_TEST)
++ pSpanStart->eResourceSpan = IMPORTED_RESOURCE_SPAN_START;
++ pSpanStart->eResourceType = IMPORTED_RESOURCE_TYPE;
++#endif
++
++ pSpanEnd = _BuildSpanMarker (base + uSize, 0);
++ if (pSpanEnd == IMG_NULL)
++ {
++ goto fail_end;
++ }
++
++#if defined(VALIDATE_ARENA_TEST)
++ pSpanEnd->eResourceSpan = IMPORTED_RESOURCE_SPAN_END;
++ pSpanEnd->eResourceType = IMPORTED_RESOURCE_TYPE;
++#endif
++
++ pBT = _BuildBT (base, uSize);
++ if (pBT == IMG_NULL)
++ {
++ goto fail_bt;
++ }
++
++#if defined(VALIDATE_ARENA_TEST)
++ pBT->eResourceSpan = IMPORTED_RESOURCE_SPAN_FREE;
++ pBT->eResourceType = IMPORTED_RESOURCE_TYPE;
++#endif
++
++ eError = _SegmentListInsert (pArena, pSpanStart);
++ if (eError != PVRSRV_OK)
++ {
++ goto fail_SegListInsert;
++ }
++
++ eError = _SegmentListInsertAfter (pArena, pSpanStart, pBT);
++ if (eError != PVRSRV_OK)
++ {
++ goto fail_SegListInsert;
++ }
++
++ _FreeListInsert (pArena, pBT);
++
++ eError = _SegmentListInsertAfter (pArena, pBT, pSpanEnd);
++ if (eError != PVRSRV_OK)
++ {
++ goto fail_SegListInsert;
++ }
++
++#ifdef RA_STATS
++ pArena->sStatistics.uTotalResourceCount+=uSize;
++#endif
++ return pBT;
++
++ fail_SegListInsert:
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pBT, IMG_NULL);
++
++ fail_bt:
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pSpanEnd, IMG_NULL);
++
++ fail_end:
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pSpanStart, IMG_NULL);
++
++ fail_start:
++ return IMG_NULL;
++}
++
++static IMG_VOID
++_FreeBT (RA_ARENA *pArena, BT *pBT, IMG_BOOL bFreeBackingStore)
++{
++ BT *pNeighbour;
++ IMG_UINTPTR_T uOrigBase;
++ IMG_SIZE_T uOrigSize;
++
++ PVR_ASSERT (pArena!=IMG_NULL);
++ PVR_ASSERT (pBT!=IMG_NULL);
++
++ if ((pArena == IMG_NULL) || (pBT == IMG_NULL))
++ {
++ PVR_DPF ((PVR_DBG_ERROR,"_FreeBT: invalid parameter"));
++ return;
++ }
++
++#ifdef RA_STATS
++ pArena->sStatistics.uLiveSegmentCount--;
++ pArena->sStatistics.uFreeSegmentCount++;
++ pArena->sStatistics.uFreeResourceCount+=pBT->uSize;
++#endif
++
++ uOrigBase = pBT->base;
++ uOrigSize = pBT->uSize;
++
++
++ pNeighbour = pBT->pPrevSegment;
++ if (pNeighbour!=IMG_NULL
++ && pNeighbour->type == btt_free
++ && pNeighbour->base + pNeighbour->uSize == pBT->base)
++ {
++ _FreeListRemove (pArena, pNeighbour);
++ _SegmentListRemove (pArena, pNeighbour);
++ pBT->base = pNeighbour->base;
++ pBT->uSize += pNeighbour->uSize;
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pNeighbour, IMG_NULL);
++
++#ifdef RA_STATS
++ pArena->sStatistics.uFreeSegmentCount--;
++#endif
++ }
++
++
++ pNeighbour = pBT->pNextSegment;
++ if (pNeighbour!=IMG_NULL
++ && pNeighbour->type == btt_free
++ && pBT->base + pBT->uSize == pNeighbour->base)
++ {
++ _FreeListRemove (pArena, pNeighbour);
++ _SegmentListRemove (pArena, pNeighbour);
++ pBT->uSize += pNeighbour->uSize;
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pNeighbour, IMG_NULL);
++
++#ifdef RA_STATS
++ pArena->sStatistics.uFreeSegmentCount--;
++#endif
++ }
++
++
++ if (pArena->pBackingStoreFree != IMG_NULL && bFreeBackingStore)
++ {
++ IMG_UINTPTR_T uRoundedStart, uRoundedEnd;
++
++
++ uRoundedStart = (uOrigBase / pArena->uQuantum) * pArena->uQuantum;
++
++ if (uRoundedStart < pBT->base)
++ {
++ uRoundedStart += pArena->uQuantum;
++ }
++
++
++ uRoundedEnd = ((uOrigBase + uOrigSize + pArena->uQuantum - 1) / pArena->uQuantum) * pArena->uQuantum;
++
++ if (uRoundedEnd > (pBT->base + pBT->uSize))
++ {
++ uRoundedEnd -= pArena->uQuantum;
++ }
++
++ if (uRoundedStart < uRoundedEnd)
++ {
++ pArena->pBackingStoreFree(pArena->pImportHandle, uRoundedStart, uRoundedEnd, (IMG_HANDLE)0);
++ }
++ }
++
++ if (pBT->pNextSegment!=IMG_NULL && pBT->pNextSegment->type == btt_span
++ && pBT->pPrevSegment!=IMG_NULL && pBT->pPrevSegment->type == btt_span)
++ {
++ BT *next = pBT->pNextSegment;
++ BT *prev = pBT->pPrevSegment;
++ _SegmentListRemove (pArena, next);
++ _SegmentListRemove (pArena, prev);
++ _SegmentListRemove (pArena, pBT);
++ pArena->pImportFree (pArena->pImportHandle, pBT->base, pBT->psMapping);
++#ifdef RA_STATS
++ pArena->sStatistics.uSpanCount--;
++ pArena->sStatistics.uExportCount++;
++ pArena->sStatistics.uFreeSegmentCount--;
++ pArena->sStatistics.uFreeResourceCount-=pBT->uSize;
++ pArena->sStatistics.uTotalResourceCount-=pBT->uSize;
++#endif
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), next, IMG_NULL);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), prev, IMG_NULL);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pBT, IMG_NULL);
++
++ }
++ else
++ _FreeListInsert (pArena, pBT);
++}
++
++
++static IMG_BOOL
++_AttemptAllocAligned (RA_ARENA *pArena,
++ IMG_SIZE_T uSize,
++ BM_MAPPING **ppsMapping,
++ IMG_UINT32 uFlags,
++ IMG_UINT32 uAlignment,
++ IMG_UINT32 uAlignmentOffset,
++ IMG_UINTPTR_T *base)
++{
++ IMG_UINT32 uIndex;
++ PVR_ASSERT (pArena!=IMG_NULL);
++ if (pArena == IMG_NULL)
++ {
++ PVR_DPF ((PVR_DBG_ERROR,"_AttemptAllocAligned: invalid parameter - pArena"));
++ return IMG_FALSE;
++ }
++
++ if (uAlignment>1)
++ uAlignmentOffset %= uAlignment;
++
++
++
++ uIndex = pvr_log2 (uSize);
++
++#if 0
++
++ if (1u<<uIndex < uSize)
++ uIndex++;
++#endif
++
++ while (uIndex < FREE_TABLE_LIMIT && pArena->aHeadFree[uIndex]==IMG_NULL)
++ uIndex++;
++
++ while (uIndex < FREE_TABLE_LIMIT)
++ {
++ if (pArena->aHeadFree[uIndex]!=IMG_NULL)
++ {
++
++ BT *pBT;
++
++ pBT = pArena->aHeadFree [uIndex];
++ while (pBT!=IMG_NULL)
++ {
++ IMG_UINTPTR_T aligned_base;
++
++ if (uAlignment>1)
++ aligned_base = (pBT->base + uAlignmentOffset + uAlignment - 1) / uAlignment * uAlignment - uAlignmentOffset;
++ else
++ aligned_base = pBT->base;
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "RA_AttemptAllocAligned: pBT-base=0x%x "
++ "pBT-size=0x%x alignedbase=0x%x size=0x%x",
++ pBT->base, pBT->uSize, aligned_base, uSize));
++
++ if (pBT->base + pBT->uSize >= aligned_base + uSize)
++ {
++ if(!pBT->psMapping || pBT->psMapping->ui32Flags == uFlags)
++ {
++ _FreeListRemove (pArena, pBT);
++
++ PVR_ASSERT (pBT->type == btt_free);
++
++#ifdef RA_STATS
++ pArena->sStatistics.uLiveSegmentCount++;
++ pArena->sStatistics.uFreeSegmentCount--;
++ pArena->sStatistics.uFreeResourceCount-=pBT->uSize;
++#endif
++
++
++ if (aligned_base > pBT->base)
++ {
++ BT *pNeighbour;
++
++ pNeighbour = _SegmentSplit (pArena, pBT, aligned_base-pBT->base);
++
++ if (pNeighbour==IMG_NULL)
++ {
++ PVR_DPF ((PVR_DBG_ERROR,"_AttemptAllocAligned: Front split failed"));
++
++ _FreeListInsert (pArena, pBT);
++ return IMG_FALSE;
++ }
++
++ _FreeListInsert (pArena, pBT);
++ #ifdef RA_STATS
++ pArena->sStatistics.uFreeSegmentCount++;
++ pArena->sStatistics.uFreeResourceCount+=pBT->uSize;
++ #endif
++ pBT = pNeighbour;
++ }
++
++
++ if (pBT->uSize > uSize)
++ {
++ BT *pNeighbour;
++ pNeighbour = _SegmentSplit (pArena, pBT, uSize);
++
++ if (pNeighbour==IMG_NULL)
++ {
++ PVR_DPF ((PVR_DBG_ERROR,"_AttemptAllocAligned: Back split failed"));
++
++ _FreeListInsert (pArena, pBT);
++ return IMG_FALSE;
++ }
++
++ _FreeListInsert (pArena, pNeighbour);
++ #ifdef RA_STATS
++ pArena->sStatistics.uFreeSegmentCount++;
++ pArena->sStatistics.uFreeResourceCount+=pNeighbour->uSize;
++ #endif
++ }
++
++ pBT->type = btt_live;
++
++#if defined(VALIDATE_ARENA_TEST)
++ if (pBT->eResourceType == IMPORTED_RESOURCE_TYPE)
++ {
++ pBT->eResourceSpan = IMPORTED_RESOURCE_SPAN_LIVE;
++ }
++ else if (pBT->eResourceType == NON_IMPORTED_RESOURCE_TYPE)
++ {
++ pBT->eResourceSpan = RESOURCE_SPAN_LIVE;
++ }
++ else
++ {
++ PVR_DPF ((PVR_DBG_ERROR,"_AttemptAllocAligned ERROR: pBT->eResourceType unrecognized"));
++ PVR_DBG_BREAK;
++ }
++#endif
++ if (!HASH_Insert (pArena->pSegmentHash, pBT->base, (IMG_UINTPTR_T) pBT))
++ {
++ _FreeBT (pArena, pBT, IMG_FALSE);
++ return IMG_FALSE;
++ }
++
++ if (ppsMapping!=IMG_NULL)
++ *ppsMapping = pBT->psMapping;
++
++ *base = pBT->base;
++
++ return IMG_TRUE;
++ }
++ else
++ {
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "AttemptAllocAligned: mismatch in flags. Import has %x, request was %x", pBT->psMapping->ui32Flags, uFlags));
++
++ }
++ }
++ pBT = pBT->pNextFree;
++ }
++
++ }
++ uIndex++;
++ }
++
++ return IMG_FALSE;
++}
++
++
++
++RA_ARENA *
++RA_Create (IMG_CHAR *name,
++ IMG_UINTPTR_T base,
++ IMG_SIZE_T uSize,
++ BM_MAPPING *psMapping,
++ IMG_SIZE_T uQuantum,
++ IMG_BOOL (*imp_alloc)(IMG_VOID *, IMG_SIZE_T uSize, IMG_SIZE_T *pActualSize,
++ BM_MAPPING **ppsMapping, IMG_UINT32 _flags, IMG_UINTPTR_T *pBase),
++ IMG_VOID (*imp_free) (IMG_VOID *, IMG_UINTPTR_T, BM_MAPPING *),
++ IMG_VOID (*backingstore_free) (IMG_VOID*, IMG_SIZE_T, IMG_SIZE_T, IMG_HANDLE),
++ IMG_VOID *pImportHandle)
++{
++ RA_ARENA *pArena;
++ BT *pBT;
++ IMG_INT i;
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "RA_Create: name='%s', base=0x%x, uSize=0x%x, alloc=0x%x, free=0x%x",
++ name, base, uSize, imp_alloc, imp_free));
++
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof (*pArena),
++ (IMG_VOID **)&pArena, IMG_NULL,
++ "Resource Arena") != PVRSRV_OK)
++ {
++ goto arena_fail;
++ }
++
++ pArena->name = name;
++ pArena->pImportAlloc = (imp_alloc!=IMG_NULL) ? imp_alloc : _RequestAllocFail;
++ pArena->pImportFree = imp_free;
++ pArena->pBackingStoreFree = backingstore_free;
++ pArena->pImportHandle = pImportHandle;
++ for (i=0; i<FREE_TABLE_LIMIT; i++)
++ pArena->aHeadFree[i] = IMG_NULL;
++ pArena->pHeadSegment = IMG_NULL;
++ pArena->pTailSegment = IMG_NULL;
++ pArena->uQuantum = uQuantum;
++
++#ifdef RA_STATS
++ pArena->sStatistics.uSpanCount = 0;
++ pArena->sStatistics.uLiveSegmentCount = 0;
++ pArena->sStatistics.uFreeSegmentCount = 0;
++ pArena->sStatistics.uFreeResourceCount = 0;
++ pArena->sStatistics.uTotalResourceCount = 0;
++ pArena->sStatistics.uCumulativeAllocs = 0;
++ pArena->sStatistics.uCumulativeFrees = 0;
++ pArena->sStatistics.uImportCount = 0;
++ pArena->sStatistics.uExportCount = 0;
++#endif
++
++#if defined(CONFIG_PROC_FS) && defined(DEBUG)
++ if(strcmp(pArena->name,"") != 0)
++ {
++
++#ifndef PVR_PROC_USE_SEQ_FILE
++ IMG_INT ret;
++ IMG_INT (*pfnCreateProcEntry)(const IMG_CHAR *, read_proc_t, write_proc_t, IMG_VOID *);
++
++ pArena->bInitProcEntry = !PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_SUCCESSFUL);
++
++
++ pfnCreateProcEntry = pArena->bInitProcEntry ? CreateProcEntry : CreatePerProcessProcEntry;
++
++ ret = snprintf(pArena->szProcInfoName, sizeof(pArena->szProcInfoName), "ra_info_%s", pArena->name);
++ if (ret > 0 && ret < sizeof(pArena->szProcInfoName))
++ {
++ (IMG_VOID) pfnCreateProcEntry(ReplaceSpaces(pArena->szProcInfoName), RA_DumpInfo, 0, pArena);
++ }
++ else
++ {
++ pArena->szProcInfoName[0] = 0;
++ PVR_DPF((PVR_DBG_ERROR, "RA_Create: couldn't create ra_info proc entry for arena %s", pArena->name));
++ }
++
++ ret = snprintf(pArena->szProcSegsName, sizeof(pArena->szProcSegsName), "ra_segs_%s", pArena->name);
++ if (ret > 0 && ret < sizeof(pArena->szProcSegsName))
++ {
++ (IMG_VOID) pfnCreateProcEntry(ReplaceSpaces(pArena->szProcSegsName), RA_DumpSegs, 0, pArena);
++ }
++ else
++ {
++ pArena->szProcSegsName[0] = 0;
++ PVR_DPF((PVR_DBG_ERROR, "RA_Create: couldn't create ra_segs proc entry for arena %s", pArena->name));
++ }
++#else
++
++ IMG_INT ret;
++ IMG_CHAR szProcInfoName[PROC_NAME_SIZE];
++ IMG_CHAR szProcSegsName[PROC_NAME_SIZE];
++ struct proc_dir_entry* (*pfnCreateProcEntrySeq)(const IMG_CHAR *,
++ IMG_VOID*,
++ pvr_next_proc_seq_t,
++ pvr_show_proc_seq_t,
++ pvr_off2element_proc_seq_t,
++ pvr_startstop_proc_seq_t,
++ write_proc_t);
++
++ pArena->bInitProcEntry = !PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_SUCCESSFUL);
++
++
++ pfnCreateProcEntrySeq = pArena->bInitProcEntry ? CreateProcEntrySeq : CreatePerProcessProcEntrySeq;
++
++ ret = snprintf(szProcInfoName, sizeof(szProcInfoName), "ra_info_%s", pArena->name);
++ if (ret > 0 && ret < sizeof(szProcInfoName))
++ {
++ pArena->pProcInfo = pfnCreateProcEntrySeq(ReplaceSpaces(szProcInfoName), pArena, NULL,
++ RA_ProcSeqShowInfo, RA_ProcSeqOff2ElementInfo, NULL, NULL);
++ }
++ else
++ {
++ pArena->pProcInfo = 0;
++ PVR_DPF((PVR_DBG_ERROR, "RA_Create: couldn't create ra_info proc entry for arena %s", pArena->name));
++ }
++
++ ret = snprintf(szProcSegsName, sizeof(szProcSegsName), "ra_segs_%s", pArena->name);
++ if (ret > 0 && ret < sizeof(szProcInfoName))
++ {
++ pArena->pProcSegs = pfnCreateProcEntrySeq(ReplaceSpaces(szProcSegsName), pArena, NULL,
++ RA_ProcSeqShowRegs, RA_ProcSeqOff2ElementRegs, NULL, NULL);
++ }
++ else
++ {
++ pArena->pProcSegs = 0;
++ PVR_DPF((PVR_DBG_ERROR, "RA_Create: couldn't create ra_segs proc entry for arena %s", pArena->name));
++ }
++
++#endif
++
++ }
++#endif
++
++ pArena->pSegmentHash = HASH_Create (MINIMUM_HASH_SIZE);
++ if (pArena->pSegmentHash==IMG_NULL)
++ {
++ goto hash_fail;
++ }
++ if (uSize>0)
++ {
++ uSize = (uSize + uQuantum - 1) / uQuantum * uQuantum;
++ pBT = _InsertResource (pArena, base, uSize);
++ if (pBT == IMG_NULL)
++ {
++ goto insert_fail;
++ }
++ pBT->psMapping = psMapping;
++
++ }
++ return pArena;
++
++insert_fail:
++ HASH_Delete (pArena->pSegmentHash);
++hash_fail:
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(RA_ARENA), pArena, IMG_NULL);
++
++arena_fail:
++ return IMG_NULL;
++}
++
++IMG_VOID
++RA_Delete (RA_ARENA *pArena)
++{
++ IMG_UINT32 uIndex;
++
++ PVR_ASSERT(pArena != IMG_NULL);
++
++ if (pArena == IMG_NULL)
++ {
++ PVR_DPF ((PVR_DBG_ERROR,"RA_Delete: invalid parameter - pArena"));
++ return;
++ }
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "RA_Delete: name='%s'", pArena->name));
++
++ for (uIndex=0; uIndex<FREE_TABLE_LIMIT; uIndex++)
++ pArena->aHeadFree[uIndex] = IMG_NULL;
++
++ while (pArena->pHeadSegment != IMG_NULL)
++ {
++ BT *pBT = pArena->pHeadSegment;
++
++ if (pBT->type != btt_free)
++ {
++ PVR_DPF ((PVR_DBG_ERROR,"RA_Delete: allocations still exist in the arena that is being destroyed"));
++ PVR_DPF ((PVR_DBG_ERROR,"Likely Cause: client drivers not freeing alocations before destroying devmemcontext"));
++ PVR_DPF ((PVR_DBG_ERROR,"RA_Delete: base = 0x%x size=0x%x", pBT->base, pBT->uSize));
++ }
++
++ _SegmentListRemove (pArena, pBT);
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pBT, IMG_NULL);
++
++#ifdef RA_STATS
++ pArena->sStatistics.uSpanCount--;
++#endif
++ }
++#if defined(CONFIG_PROC_FS) && defined(DEBUG)
++ {
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++ IMG_VOID (*pfnRemoveProcEntrySeq)(struct proc_dir_entry*);
++
++ pfnRemoveProcEntrySeq = pArena->bInitProcEntry ? RemoveProcEntrySeq : RemovePerProcessProcEntrySeq;
++
++ if (pArena->pProcInfo != 0)
++ {
++ pfnRemoveProcEntrySeq( pArena->pProcInfo );
++ }
++
++ if (pArena->pProcSegs != 0)
++ {
++ pfnRemoveProcEntrySeq( pArena->pProcSegs );
++ }
++
++#else
++ IMG_VOID (*pfnRemoveProcEntry)(const IMG_CHAR *);
++
++ pfnRemoveProcEntry = pArena->bInitProcEntry ? RemoveProcEntry : RemovePerProcessProcEntry;
++
++ if (pArena->szProcInfoName[0] != 0)
++ {
++ pfnRemoveProcEntry(pArena->szProcInfoName);
++ }
++
++ if (pArena->szProcSegsName[0] != 0)
++ {
++ pfnRemoveProcEntry(pArena->szProcSegsName);
++ }
++
++#endif
++ }
++#endif
++ HASH_Delete (pArena->pSegmentHash);
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(RA_ARENA), pArena, IMG_NULL);
++
++}
++
++IMG_BOOL
++RA_TestDelete (RA_ARENA *pArena)
++{
++ PVR_ASSERT(pArena != IMG_NULL);
++
++ if (pArena != IMG_NULL)
++ {
++ while (pArena->pHeadSegment != IMG_NULL)
++ {
++ BT *pBT = pArena->pHeadSegment;
++ if (pBT->type != btt_free)
++ {
++ PVR_DPF ((PVR_DBG_ERROR,"RA_TestDelete: detected resource leak!"));
++ PVR_DPF ((PVR_DBG_ERROR,"RA_TestDelete: base = 0x%x size=0x%x", pBT->base, pBT->uSize));
++ return IMG_FALSE;
++ }
++ }
++ }
++
++ return IMG_TRUE;
++}
++
++IMG_BOOL
++RA_Add (RA_ARENA *pArena, IMG_UINTPTR_T base, IMG_SIZE_T uSize)
++{
++ PVR_ASSERT (pArena != IMG_NULL);
++
++ if (pArena == IMG_NULL)
++ {
++ PVR_DPF ((PVR_DBG_ERROR,"RA_Add: invalid parameter - pArena"));
++ return IMG_FALSE;
++ }
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "RA_Add: name='%s', base=0x%x, size=0x%x", pArena->name, base, uSize));
++
++ uSize = (uSize + pArena->uQuantum - 1) / pArena->uQuantum * pArena->uQuantum;
++ return ((IMG_BOOL)(_InsertResource (pArena, base, uSize) != IMG_NULL));
++}
++
++IMG_BOOL
++RA_Alloc (RA_ARENA *pArena,
++ IMG_SIZE_T uRequestSize,
++ IMG_SIZE_T *pActualSize,
++ BM_MAPPING **ppsMapping,
++ IMG_UINT32 uFlags,
++ IMG_UINT32 uAlignment,
++ IMG_UINT32 uAlignmentOffset,
++ IMG_UINTPTR_T *base)
++{
++ IMG_BOOL bResult;
++ IMG_SIZE_T uSize = uRequestSize;
++
++ PVR_ASSERT (pArena!=IMG_NULL);
++
++ if (pArena == IMG_NULL)
++ {
++ PVR_DPF ((PVR_DBG_ERROR,"RA_Alloc: invalid parameter - pArena"));
++ return IMG_FALSE;
++ }
++
++#if defined(VALIDATE_ARENA_TEST)
++ ValidateArena(pArena);
++#endif
++
++#ifdef USE_BM_FREESPACE_CHECK
++ CheckBMFreespace();
++#endif
++
++ if (pActualSize != IMG_NULL)
++ {
++ *pActualSize = uSize;
++ }
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "RA_Alloc: arena='%s', size=0x%x(0x%x), alignment=0x%x, offset=0x%x",
++ pArena->name, uSize, uRequestSize, uAlignment, uAlignmentOffset));
++
++
++
++ bResult = _AttemptAllocAligned (pArena, uSize, ppsMapping, uFlags,
++ uAlignment, uAlignmentOffset, base);
++ if (!bResult)
++ {
++ BM_MAPPING *psImportMapping;
++ IMG_UINTPTR_T import_base;
++ IMG_SIZE_T uImportSize = uSize;
++
++
++
++
++ if (uAlignment > pArena->uQuantum)
++ {
++ uImportSize += (uAlignment - 1);
++ }
++
++
++ uImportSize = ((uImportSize + pArena->uQuantum - 1)/pArena->uQuantum)*pArena->uQuantum;
++
++ bResult =
++ pArena->pImportAlloc (pArena->pImportHandle, uImportSize, &uImportSize,
++ &psImportMapping, uFlags, &import_base);
++ if (bResult)
++ {
++ BT *pBT;
++ pBT = _InsertResourceSpan (pArena, import_base, uImportSize);
++
++ if (pBT == IMG_NULL)
++ {
++
++ pArena->pImportFree(pArena->pImportHandle, import_base,
++ psImportMapping);
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "RA_Alloc: name='%s', size=0x%x failed!",
++ pArena->name, uSize));
++
++ return IMG_FALSE;
++ }
++ pBT->psMapping = psImportMapping;
++#ifdef RA_STATS
++ pArena->sStatistics.uFreeSegmentCount++;
++ pArena->sStatistics.uFreeResourceCount += uImportSize;
++ pArena->sStatistics.uImportCount++;
++ pArena->sStatistics.uSpanCount++;
++#endif
++ bResult = _AttemptAllocAligned(pArena, uSize, ppsMapping, uFlags,
++ uAlignment, uAlignmentOffset,
++ base);
++ if (!bResult)
++ {
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "RA_Alloc: name='%s' uAlignment failed!",
++ pArena->name));
++ }
++ }
++ }
++#ifdef RA_STATS
++ if (bResult)
++ pArena->sStatistics.uCumulativeAllocs++;
++#endif
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "RA_Alloc: name='%s', size=0x%x, *base=0x%x = %d",
++ pArena->name, uSize, *base, bResult));
++
++
++
++#if defined(VALIDATE_ARENA_TEST)
++ ValidateArena(pArena);
++#endif
++
++ return bResult;
++}
++
++
++#if defined(VALIDATE_ARENA_TEST)
++
++IMG_UINT32 ValidateArena(RA_ARENA *pArena)
++{
++ BT* pSegment;
++ RESOURCE_DESCRIPTOR eNextSpan;
++
++ pSegment = pArena->pHeadSegment;
++
++ if (pSegment == IMG_NULL)
++ {
++ return 0;
++ }
++
++ if (pSegment->eResourceType == IMPORTED_RESOURCE_TYPE)
++ {
++ PVR_ASSERT(pSegment->eResourceSpan == IMPORTED_RESOURCE_SPAN_START);
++
++ while (pSegment->pNextSegment)
++ {
++ eNextSpan = pSegment->pNextSegment->eResourceSpan;
++
++ switch (pSegment->eResourceSpan)
++ {
++ case IMPORTED_RESOURCE_SPAN_LIVE:
++
++ if (!((eNextSpan == IMPORTED_RESOURCE_SPAN_LIVE) ||
++ (eNextSpan == IMPORTED_RESOURCE_SPAN_FREE) ||
++ (eNextSpan == IMPORTED_RESOURCE_SPAN_END)))
++ {
++
++ PVR_DPF((PVR_DBG_ERROR, "ValidateArena ERROR: adjacent boundary tags %d (base=0x%x) and %d (base=0x%x) are incompatible (arena: %s)",
++ pSegment->ui32BoundaryTagID, pSegment->base, pSegment->pNextSegment->ui32BoundaryTagID, pSegment->pNextSegment->base, pArena->name));
++
++ PVR_DBG_BREAK;
++ }
++ break;
++
++ case IMPORTED_RESOURCE_SPAN_FREE:
++
++ if (!((eNextSpan == IMPORTED_RESOURCE_SPAN_LIVE) ||
++ (eNextSpan == IMPORTED_RESOURCE_SPAN_END)))
++ {
++
++ PVR_DPF((PVR_DBG_ERROR, "ValidateArena ERROR: adjacent boundary tags %d (base=0x%x) and %d (base=0x%x) are incompatible (arena: %s)",
++ pSegment->ui32BoundaryTagID, pSegment->base, pSegment->pNextSegment->ui32BoundaryTagID, pSegment->pNextSegment->base, pArena->name));
++
++ PVR_DBG_BREAK;
++ }
++ break;
++
++ case IMPORTED_RESOURCE_SPAN_END:
++
++ if ((eNextSpan == IMPORTED_RESOURCE_SPAN_LIVE) ||
++ (eNextSpan == IMPORTED_RESOURCE_SPAN_FREE) ||
++ (eNextSpan == IMPORTED_RESOURCE_SPAN_END))
++ {
++
++ PVR_DPF((PVR_DBG_ERROR, "ValidateArena ERROR: adjacent boundary tags %d (base=0x%x) and %d (base=0x%x) are incompatible (arena: %s)",
++ pSegment->ui32BoundaryTagID, pSegment->base, pSegment->pNextSegment->ui32BoundaryTagID, pSegment->pNextSegment->base, pArena->name));
++
++ PVR_DBG_BREAK;
++ }
++ break;
++
++
++ case IMPORTED_RESOURCE_SPAN_START:
++
++ if (!((eNextSpan == IMPORTED_RESOURCE_SPAN_LIVE) ||
++ (eNextSpan == IMPORTED_RESOURCE_SPAN_FREE)))
++ {
++
++ PVR_DPF((PVR_DBG_ERROR, "ValidateArena ERROR: adjacent boundary tags %d (base=0x%x) and %d (base=0x%x) are incompatible (arena: %s)",
++ pSegment->ui32BoundaryTagID, pSegment->base, pSegment->pNextSegment->ui32BoundaryTagID, pSegment->pNextSegment->base, pArena->name));
++
++ PVR_DBG_BREAK;
++ }
++ break;
++
++ default:
++ PVR_DPF((PVR_DBG_ERROR, "ValidateArena ERROR: adjacent boundary tags %d (base=0x%x) and %d (base=0x%x) are incompatible (arena: %s)",
++ pSegment->ui32BoundaryTagID, pSegment->base, pSegment->pNextSegment->ui32BoundaryTagID, pSegment->pNextSegment->base, pArena->name));
++
++ PVR_DBG_BREAK;
++ break;
++ }
++ pSegment = pSegment->pNextSegment;
++ }
++ }
++ else if (pSegment->eResourceType == NON_IMPORTED_RESOURCE_TYPE)
++ {
++ PVR_ASSERT((pSegment->eResourceSpan == RESOURCE_SPAN_FREE) || (pSegment->eResourceSpan == RESOURCE_SPAN_LIVE));
++
++ while (pSegment->pNextSegment)
++ {
++ eNextSpan = pSegment->pNextSegment->eResourceSpan;
++
++ switch (pSegment->eResourceSpan)
++ {
++ case RESOURCE_SPAN_LIVE:
++
++ if (!((eNextSpan == RESOURCE_SPAN_FREE) ||
++ (eNextSpan == RESOURCE_SPAN_LIVE)))
++ {
++
++ PVR_DPF((PVR_DBG_ERROR, "ValidateArena ERROR: adjacent boundary tags %d (base=0x%x) and %d (base=0x%x) are incompatible (arena: %s)",
++ pSegment->ui32BoundaryTagID, pSegment->base, pSegment->pNextSegment->ui32BoundaryTagID, pSegment->pNextSegment->base, pArena->name));
++
++ PVR_DBG_BREAK;
++ }
++ break;
++
++ case RESOURCE_SPAN_FREE:
++
++ if (!((eNextSpan == RESOURCE_SPAN_FREE) ||
++ (eNextSpan == RESOURCE_SPAN_LIVE)))
++ {
++
++ PVR_DPF((PVR_DBG_ERROR, "ValidateArena ERROR: adjacent boundary tags %d (base=0x%x) and %d (base=0x%x) are incompatible (arena: %s)",
++ pSegment->ui32BoundaryTagID, pSegment->base, pSegment->pNextSegment->ui32BoundaryTagID, pSegment->pNextSegment->base, pArena->name));
++
++ PVR_DBG_BREAK;
++ }
++ break;
++
++ default:
++ PVR_DPF((PVR_DBG_ERROR, "ValidateArena ERROR: adjacent boundary tags %d (base=0x%x) and %d (base=0x%x) are incompatible (arena: %s)",
++ pSegment->ui32BoundaryTagID, pSegment->base, pSegment->pNextSegment->ui32BoundaryTagID, pSegment->pNextSegment->base, pArena->name));
++
++ PVR_DBG_BREAK;
++ break;
++ }
++ pSegment = pSegment->pNextSegment;
++ }
++
++ }
++ else
++ {
++ PVR_DPF ((PVR_DBG_ERROR,"ValidateArena ERROR: pSegment->eResourceType unrecognized"));
++
++ PVR_DBG_BREAK;
++ }
++
++ return 0;
++}
++
++#endif
++
++
++IMG_VOID
++RA_Free (RA_ARENA *pArena, IMG_UINTPTR_T base, IMG_BOOL bFreeBackingStore)
++{
++ BT *pBT;
++
++ PVR_ASSERT (pArena != IMG_NULL);
++
++ if (pArena == IMG_NULL)
++ {
++ PVR_DPF ((PVR_DBG_ERROR,"RA_Free: invalid parameter - pArena"));
++ return;
++ }
++
++#ifdef USE_BM_FREESPACE_CHECK
++ CheckBMFreespace();
++#endif
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "RA_Free: name='%s', base=0x%x", pArena->name, base));
++
++ pBT = (BT *) HASH_Remove (pArena->pSegmentHash, base);
++ PVR_ASSERT (pBT != IMG_NULL);
++
++ if (pBT)
++ {
++ PVR_ASSERT (pBT->base == base);
++
++#ifdef RA_STATS
++ pArena->sStatistics.uCumulativeFrees++;
++#endif
++
++#ifdef USE_BM_FREESPACE_CHECK
++{
++ IMG_BYTE* p;
++ IMG_BYTE* endp;
++
++ p = (IMG_BYTE*)pBT->base + SysGetDevicePhysOffset();
++ endp = (IMG_BYTE*)((IMG_UINT32)(p + pBT->uSize));
++ while ((IMG_UINT32)p & 3)
++ {
++ *p++ = 0xAA;
++ }
++ while (p < (IMG_BYTE*)((IMG_UINT32)endp & 0xfffffffc))
++ {
++ *(IMG_UINT32*)p = 0xAAAAAAAA;
++ p += sizeof(IMG_UINT32);
++ }
++ while (p < endp)
++ {
++ *p++ = 0xAA;
++ }
++ PVR_DPF((PVR_DBG_MESSAGE,"BM_FREESPACE_CHECK: RA_Free Cleared %08X to %08X (size=0x%x)",(IMG_BYTE*)pBT->base + SysGetDevicePhysOffset(),endp-1,pBT->uSize));
++}
++#endif
++ _FreeBT (pArena, pBT, bFreeBackingStore);
++ }
++}
++
++
++IMG_BOOL RA_GetNextLiveSegment(IMG_HANDLE hArena, RA_SEGMENT_DETAILS *psSegDetails)
++{
++ BT *pBT;
++
++ if (psSegDetails->hSegment)
++ {
++ pBT = (BT *)psSegDetails->hSegment;
++ }
++ else
++ {
++ RA_ARENA *pArena = (RA_ARENA *)hArena;
++
++ pBT = pArena->pHeadSegment;
++ }
++
++ while (pBT != IMG_NULL)
++ {
++ if (pBT->type == btt_live)
++ {
++ psSegDetails->uiSize = pBT->uSize;
++ psSegDetails->sCpuPhyAddr.uiAddr = pBT->base;
++ psSegDetails->hSegment = (IMG_HANDLE)pBT->pNextSegment;
++
++ return IMG_TRUE;
++ }
++
++ pBT = pBT->pNextSegment;
++ }
++
++ psSegDetails->uiSize = 0;
++ psSegDetails->sCpuPhyAddr.uiAddr = 0;
++ psSegDetails->hSegment = (IMG_HANDLE)-1;
++
++ return IMG_FALSE;
++}
++
++
++#ifdef USE_BM_FREESPACE_CHECK
++RA_ARENA* pJFSavedArena = IMG_NULL;
++
++IMG_VOID CheckBMFreespace(IMG_VOID)
++{
++ BT *pBT;
++ IMG_BYTE* p;
++ IMG_BYTE* endp;
++
++ if (pJFSavedArena != IMG_NULL)
++ {
++ for (pBT=pJFSavedArena->pHeadSegment; pBT!=IMG_NULL; pBT=pBT->pNextSegment)
++ {
++ if (pBT->type == btt_free)
++ {
++ p = (IMG_BYTE*)pBT->base + SysGetDevicePhysOffset();
++ endp = (IMG_BYTE*)((IMG_UINT32)(p + pBT->uSize) & 0xfffffffc);
++
++ while ((IMG_UINT32)p & 3)
++ {
++ if (*p++ != 0xAA)
++ {
++ fprintf(stderr,"BM_FREESPACE_CHECK: Blank space at %08X has changed to 0x%x\n",p,*(IMG_UINT32*)p);
++ for (;;);
++ break;
++ }
++ }
++ while (p < endp)
++ {
++ if (*(IMG_UINT32*)p != 0xAAAAAAAA)
++ {
++ fprintf(stderr,"BM_FREESPACE_CHECK: Blank space at %08X has changed to 0x%x\n",p,*(IMG_UINT32*)p);
++ for (;;);
++ break;
++ }
++ p += 4;
++ }
++ }
++ }
++ }
++}
++#endif
++
++
++#if (defined(CONFIG_PROC_FS) && defined(DEBUG)) || defined (RA_STATS)
++static IMG_CHAR *
++_BTType (IMG_INT eType)
++{
++ switch (eType)
++ {
++ case btt_span: return "span";
++ case btt_free: return "free";
++ case btt_live: return "live";
++ }
++ return "junk";
++}
++#endif
++
++#if defined(ENABLE_RA_DUMP)
++IMG_VOID
++RA_Dump (RA_ARENA *pArena)
++{
++ BT *pBT;
++ PVR_ASSERT (pArena != IMG_NULL);
++ PVR_DPF ((PVR_DBG_MESSAGE,"Arena '%s':", pArena->name));
++ PVR_DPF ((PVR_DBG_MESSAGE," alloc=%08X free=%08X handle=%08X quantum=%d",
++ pArena->pImportAlloc, pArena->pImportFree, pArena->pImportHandle,
++ pArena->uQuantum));
++ PVR_DPF ((PVR_DBG_MESSAGE," segment Chain:"));
++ if (pArena->pHeadSegment != IMG_NULL &&
++ pArena->pHeadSegment->pPrevSegment != IMG_NULL)
++ PVR_DPF ((PVR_DBG_MESSAGE," error: head boundary tag has invalid pPrevSegment"));
++ if (pArena->pTailSegment != IMG_NULL &&
++ pArena->pTailSegment->pNextSegment != IMG_NULL)
++ PVR_DPF ((PVR_DBG_MESSAGE," error: tail boundary tag has invalid pNextSegment"));
++
++ for (pBT=pArena->pHeadSegment; pBT!=IMG_NULL; pBT=pBT->pNextSegment)
++ {
++ PVR_DPF ((PVR_DBG_MESSAGE,"\tbase=0x%x size=0x%x type=%s ref=%08X",
++ (IMG_UINT32) pBT->base, pBT->uSize, _BTType (pBT->type),
++ pBT->pRef));
++ }
++
++#ifdef HASH_TRACE
++ HASH_Dump (pArena->pSegmentHash);
++#endif
++}
++#endif
++
++
++#if defined(CONFIG_PROC_FS) && defined(DEBUG)
++
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++
++static void RA_ProcSeqShowInfo(struct seq_file *sfile, void* el)
++{
++ PVR_PROC_SEQ_HANDLERS *handlers = (PVR_PROC_SEQ_HANDLERS*)sfile->private;
++ RA_ARENA *pArena = (RA_ARENA *)handlers->data;
++ IMG_INT off = (IMG_INT)el;
++
++ switch (off)
++ {
++ case 1:
++ seq_printf(sfile, "quantum\t\t\t%lu\n", pArena->uQuantum);
++ break;
++ case 2:
++ seq_printf(sfile, "import_handle\t\t%08X\n", (IMG_UINT)pArena->pImportHandle);
++ break;
++#ifdef RA_STATS
++ case 3:
++ seq_printf(sfile,"span count\t\t%lu\n", pArena->sStatistics.uSpanCount);
++ break;
++ case 4:
++ seq_printf(sfile, "live segment count\t%lu\n", pArena->sStatistics.uLiveSegmentCount);
++ break;
++ case 5:
++ seq_printf(sfile, "free segment count\t%lu\n", pArena->sStatistics.uFreeSegmentCount);
++ break;
++ case 6:
++ seq_printf(sfile, "free resource count\t%lu (0x%x)\n",
++ pArena->sStatistics.uFreeResourceCount,
++ (IMG_UINT)pArena->sStatistics.uFreeResourceCount);
++ break;
++ case 7:
++ seq_printf(sfile, "total allocs\t\t%lu\n", pArena->sStatistics.uCumulativeAllocs);
++ break;
++ case 8:
++ seq_printf(sfile, "total frees\t\t%lu\n", pArena->sStatistics.uCumulativeFrees);
++ break;
++ case 9:
++ seq_printf(sfile, "import count\t\t%lu\n", pArena->sStatistics.uImportCount);
++ break;
++ case 10:
++ seq_printf(sfile, "export count\t\t%lu\n", pArena->sStatistics.uExportCount);
++ break;
++#endif
++ }
++
++}
++
++static void* RA_ProcSeqOff2ElementInfo(struct seq_file * sfile, loff_t off)
++{
++#ifdef RA_STATS
++ if(off <= 9)
++#else
++ if(off <= 1)
++#endif
++ return (void*)(IMG_INT)(off+1);
++ return 0;
++}
++
++static void RA_ProcSeqShowRegs(struct seq_file *sfile, void* el)
++{
++ PVR_PROC_SEQ_HANDLERS *handlers = (PVR_PROC_SEQ_HANDLERS*)sfile->private;
++ RA_ARENA *pArena = (RA_ARENA *)handlers->data;
++ BT *pBT = (BT*)el;
++
++ if (el == PVR_PROC_SEQ_START_TOKEN)
++ {
++ seq_printf(sfile, "Arena \"%s\"\nBase Size Type Ref\n", pArena->name);
++ return;
++ }
++
++ if (pBT)
++ {
++ seq_printf(sfile, "%08x %8x %4s %08x\n",
++ (IMG_UINT)pBT->base, (IMG_UINT)pBT->uSize, _BTType (pBT->type),
++ (IMG_UINT)pBT->psMapping);
++ }
++}
++
++static void* RA_ProcSeqOff2ElementRegs(struct seq_file * sfile, loff_t off)
++{
++ PVR_PROC_SEQ_HANDLERS *handlers = (PVR_PROC_SEQ_HANDLERS*)sfile->private;
++ RA_ARENA *pArena = (RA_ARENA *)handlers->data;
++ BT *pBT = 0;
++
++ if(off == 0)
++ return PVR_PROC_SEQ_START_TOKEN;
++
++ for (pBT=pArena->pHeadSegment; --off && pBT; pBT=pBT->pNextSegment);
++
++ return (void*)pBT;
++}
++
++
++
++#else
++static IMG_INT
++RA_DumpSegs(IMG_CHAR *page, IMG_CHAR **start, off_t off, IMG_INT count, IMG_INT *eof, IMG_VOID *data)
++{
++ BT *pBT = 0;
++ IMG_INT len = 0;
++ RA_ARENA *pArena = (RA_ARENA *)data;
++
++ if (count < 80)
++ {
++ *start = (IMG_CHAR *)0;
++ return (0);
++ }
++ *eof = 0;
++ *start = (IMG_CHAR *)1;
++ if (off == 0)
++ {
++ return printAppend(page, count, 0, "Arena \"%s\"\nBase Size Type Ref\n", pArena->name);
++ }
++ for (pBT=pArena->pHeadSegment; --off && pBT; pBT=pBT->pNextSegment)
++ ;
++ if (pBT)
++ {
++ len = printAppend(page, count, 0, "%08x %8x %4s %08x\n",
++ (IMG_UINT)pBT->base, (IMG_UINT)pBT->uSize, _BTType (pBT->type),
++ (IMG_UINT)pBT->psMapping);
++ }
++ else
++ {
++ *eof = 1;
++ }
++ return (len);
++}
++
++static IMG_INT
++RA_DumpInfo(IMG_CHAR *page, IMG_CHAR **start, off_t off, IMG_INT count, IMG_INT *eof, IMG_VOID *data)
++{
++ IMG_INT len = 0;
++ RA_ARENA *pArena = (RA_ARENA *)data;
++
++ if (count < 80)
++ {
++ *start = (IMG_CHAR *)0;
++ return (0);
++ }
++ *eof = 0;
++ switch (off)
++ {
++ case 0:
++ len = printAppend(page, count, 0, "quantum\t\t\t%lu\n", pArena->uQuantum);
++ break;
++ case 1:
++ len = printAppend(page, count, 0, "import_handle\t\t%08X\n", (IMG_UINT)pArena->pImportHandle);
++ break;
++#ifdef RA_STATS
++ case 2:
++ len = printAppend(page, count, 0, "span count\t\t%lu\n", pArena->sStatistics.uSpanCount);
++ break;
++ case 3:
++ len = printAppend(page, count, 0, "live segment count\t%lu\n", pArena->sStatistics.uLiveSegmentCount);
++ break;
++ case 4:
++ len = printAppend(page, count, 0, "free segment count\t%lu\n", pArena->sStatistics.uFreeSegmentCount);
++ break;
++ case 5:
++ len = printAppend(page, count, 0, "free resource count\t%lu (0x%x)\n",
++ pArena->sStatistics.uFreeResourceCount,
++ (IMG_UINT)pArena->sStatistics.uFreeResourceCount);
++ break;
++ case 6:
++ len = printAppend(page, count, 0, "total allocs\t\t%lu\n", pArena->sStatistics.uCumulativeAllocs);
++ break;
++ case 7:
++ len = printAppend(page, count, 0, "total frees\t\t%lu\n", pArena->sStatistics.uCumulativeFrees);
++ break;
++ case 8:
++ len = printAppend(page, count, 0, "import count\t\t%lu\n", pArena->sStatistics.uImportCount);
++ break;
++ case 9:
++ len = printAppend(page, count, 0, "export count\t\t%lu\n", pArena->sStatistics.uExportCount);
++ break;
++#endif
++
++ default:
++ *eof = 1;
++ }
++ *start = (IMG_CHAR *)1;
++ return (len);
++}
++#endif
++#endif
++
++
++#ifdef RA_STATS
++PVRSRV_ERROR RA_GetStats(RA_ARENA *pArena,
++ IMG_CHAR **ppszStr,
++ IMG_UINT32 *pui32StrLen)
++{
++ IMG_CHAR *pszStr = *ppszStr;
++ IMG_UINT32 ui32StrLen = *pui32StrLen;
++ IMG_INT32 i32Count;
++ BT *pBT;
++
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100, "\nArena '%s':\n", pArena->name);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100, " allocCB=%08X freeCB=%08X handle=%08X quantum=%d\n",
++ pArena->pImportAlloc,
++ pArena->pImportFree,
++ pArena->pImportHandle,
++ pArena->uQuantum);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100, "span count\t\t%lu\n", pArena->sStatistics.uSpanCount);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100, "live segment count\t%lu\n", pArena->sStatistics.uLiveSegmentCount);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100, "free segment count\t%lu\n", pArena->sStatistics.uFreeSegmentCount);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100, "free resource count\t%lu (0x%x)\n",
++ pArena->sStatistics.uFreeResourceCount,
++ (IMG_UINT)pArena->sStatistics.uFreeResourceCount);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100, "total allocs\t\t%lu\n", pArena->sStatistics.uCumulativeAllocs);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100, "total frees\t\t%lu\n", pArena->sStatistics.uCumulativeFrees);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100, "import count\t\t%lu\n", pArena->sStatistics.uImportCount);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100, "export count\t\t%lu\n", pArena->sStatistics.uExportCount);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100, " segment Chain:\n");
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ if (pArena->pHeadSegment != IMG_NULL &&
++ pArena->pHeadSegment->pPrevSegment != IMG_NULL)
++ {
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100, " error: head boundary tag has invalid pPrevSegment\n");
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++ }
++
++ if (pArena->pTailSegment != IMG_NULL &&
++ pArena->pTailSegment->pNextSegment != IMG_NULL)
++ {
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100, " error: tail boundary tag has invalid pNextSegment\n");
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++ }
++
++ for (pBT=pArena->pHeadSegment; pBT!=IMG_NULL; pBT=pBT->pNextSegment)
++ {
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100, "\tbase=0x%x size=0x%x type=%s ref=%08X\n",
++ (IMG_UINT32) pBT->base,
++ pBT->uSize,
++ _BTType(pBT->type),
++ pBT->psMapping);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++ }
++
++ *ppszStr = pszStr;
++ *pui32StrLen = ui32StrLen;
++
++ return PVRSRV_OK;
++}
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/common/resman.c
+@@ -0,0 +1,717 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "resman.h"
++
++#ifdef __linux__
++#ifndef AUTOCONF_INCLUDED
++ #include <linux/config.h>
++#endif
++
++#include <linux/version.h>
++#include <linux/sched.h>
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)
++#include <linux/hardirq.h>
++#else
++#include <asm/hardirq.h>
++#endif
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
++#include <linux/semaphore.h>
++#else
++#include <asm/semaphore.h>
++#endif
++
++static DECLARE_MUTEX(lock);
++
++#define ACQUIRE_SYNC_OBJ do { \
++ if (in_interrupt()) { \
++ printk ("ISR cannot take RESMAN mutex\n"); \
++ BUG(); \
++ } \
++ else down (&lock); \
++} while (0)
++#define RELEASE_SYNC_OBJ up (&lock)
++
++#else
++
++#define ACQUIRE_SYNC_OBJ
++#define RELEASE_SYNC_OBJ
++
++#endif
++
++#define RESMAN_SIGNATURE 0x12345678
++
++typedef struct _RESMAN_ITEM_
++{
++#ifdef DEBUG
++ IMG_UINT32 ui32Signature;
++#endif
++ struct _RESMAN_ITEM_ **ppsThis;
++ struct _RESMAN_ITEM_ *psNext;
++
++ IMG_UINT32 ui32Flags;
++ IMG_UINT32 ui32ResType;
++
++ IMG_PVOID pvParam;
++ IMG_UINT32 ui32Param;
++
++ RESMAN_FREE_FN pfnFreeResource;
++} RESMAN_ITEM;
++
++
++typedef struct _RESMAN_CONTEXT_
++{
++#ifdef DEBUG
++ IMG_UINT32 ui32Signature;
++#endif
++ struct _RESMAN_CONTEXT_ **ppsThis;
++ struct _RESMAN_CONTEXT_ *psNext;
++
++ PVRSRV_PER_PROCESS_DATA *psPerProc;
++
++ RESMAN_ITEM *psResItemList;
++
++} RESMAN_CONTEXT;
++
++
++typedef struct
++{
++ RESMAN_CONTEXT *psContextList;
++
++} RESMAN_LIST, *PRESMAN_LIST;
++
++
++PRESMAN_LIST gpsResList = IMG_NULL;
++
++#include "lists.h"
++
++static IMPLEMENT_LIST_ANY_VA(RESMAN_ITEM)
++static IMPLEMENT_LIST_ANY_VA_2(RESMAN_ITEM, IMG_BOOL, IMG_FALSE)
++static IMPLEMENT_LIST_INSERT(RESMAN_ITEM)
++static IMPLEMENT_LIST_REMOVE(RESMAN_ITEM)
++
++static IMPLEMENT_LIST_REMOVE(RESMAN_CONTEXT)
++static IMPLEMENT_LIST_INSERT(RESMAN_CONTEXT)
++
++
++#define PRINT_RESLIST(x, y, z)
++
++static PVRSRV_ERROR FreeResourceByPtr(RESMAN_ITEM *psItem, IMG_BOOL bExecuteCallback);
++
++static PVRSRV_ERROR FreeResourceByCriteria(PRESMAN_CONTEXT psContext,
++ IMG_UINT32 ui32SearchCriteria,
++ IMG_UINT32 ui32ResType,
++ IMG_PVOID pvParam,
++ IMG_UINT32 ui32Param,
++ IMG_BOOL bExecuteCallback);
++
++
++#ifdef DEBUG
++ static IMG_VOID ValidateResList(PRESMAN_LIST psResList);
++ #define VALIDATERESLIST() ValidateResList(gpsResList)
++#else
++ #define VALIDATERESLIST()
++#endif
++
++
++
++
++
++
++PVRSRV_ERROR ResManInit(IMG_VOID)
++{
++ if (gpsResList == IMG_NULL)
++ {
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(*gpsResList),
++ (IMG_VOID **)&gpsResList, IMG_NULL,
++ "Resource Manager List") != PVRSRV_OK)
++ {
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++
++ gpsResList->psContextList = IMG_NULL;
++
++
++ VALIDATERESLIST();
++ }
++
++ return PVRSRV_OK;
++}
++
++
++IMG_VOID ResManDeInit(IMG_VOID)
++{
++ if (gpsResList != IMG_NULL)
++ {
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(*gpsResList), gpsResList, IMG_NULL);
++ gpsResList = IMG_NULL;
++ }
++}
++
++
++PVRSRV_ERROR PVRSRVResManConnect(IMG_HANDLE hPerProc,
++ PRESMAN_CONTEXT *phResManContext)
++{
++ PVRSRV_ERROR eError;
++ PRESMAN_CONTEXT psResManContext;
++
++
++ ACQUIRE_SYNC_OBJ;
++
++
++ VALIDATERESLIST();
++
++
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(*psResManContext),
++ (IMG_VOID **)&psResManContext, IMG_NULL,
++ "Resource Manager Context");
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVResManConnect: ERROR allocating new RESMAN context struct"));
++
++
++ VALIDATERESLIST();
++
++
++ RELEASE_SYNC_OBJ;
++
++ return eError;
++ }
++
++#ifdef DEBUG
++ psResManContext->ui32Signature = RESMAN_SIGNATURE;
++#endif
++ psResManContext->psResItemList = IMG_NULL;
++ psResManContext->psPerProc = hPerProc;
++
++
++ List_RESMAN_CONTEXT_Insert(&gpsResList->psContextList, psResManContext);
++
++
++ VALIDATERESLIST();
++
++
++ RELEASE_SYNC_OBJ;
++
++ *phResManContext = psResManContext;
++
++ return PVRSRV_OK;
++}
++
++
++IMG_VOID PVRSRVResManDisconnect(PRESMAN_CONTEXT psResManContext,
++ IMG_BOOL bKernelContext)
++{
++
++ ACQUIRE_SYNC_OBJ;
++
++
++ VALIDATERESLIST();
++
++
++ PRINT_RESLIST(gpsResList, psResManContext, IMG_TRUE);
++
++
++
++ if (!bKernelContext)
++ {
++
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_OS_USERMODE_MAPPING, 0, 0, IMG_TRUE);
++
++
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_EVENT_OBJECT, 0, 0, IMG_TRUE);
++
++
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_MODIFY_SYNC_OPS, 0, 0, IMG_TRUE);
++
++
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_HW_RENDER_CONTEXT, 0, 0, IMG_TRUE);
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_HW_TRANSFER_CONTEXT, 0, 0, IMG_TRUE);
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_HW_2D_CONTEXT, 0, 0, IMG_TRUE);
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_TRANSFER_CONTEXT, 0, 0, IMG_TRUE);
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_SHARED_PB_DESC_CREATE_LOCK, 0, 0, IMG_TRUE);
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_SHARED_PB_DESC, 0, 0, IMG_TRUE);
++
++
++
++
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DISPLAYCLASS_SWAPCHAIN_REF, 0, 0, IMG_TRUE);
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DISPLAYCLASS_DEVICE, 0, 0, IMG_TRUE);
++
++
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_BUFFERCLASS_DEVICE, 0, 0, IMG_TRUE);
++
++
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DEVICECLASSMEM_MAPPING, 0, 0, IMG_TRUE);
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DEVICEMEM_WRAP, 0, 0, IMG_TRUE);
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DEVICEMEM_MAPPING, 0, 0, IMG_TRUE);
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_KERNEL_DEVICEMEM_ALLOCATION, 0, 0, IMG_TRUE);
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DEVICEMEM_ALLOCATION, 0, 0, IMG_TRUE);
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DEVICEMEM_CONTEXT, 0, 0, IMG_TRUE);
++ }
++
++
++ PVR_ASSERT(psResManContext->psResItemList == IMG_NULL);
++
++
++ List_RESMAN_CONTEXT_Remove(psResManContext);
++
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(RESMAN_CONTEXT), psResManContext, IMG_NULL);
++
++
++
++
++ VALIDATERESLIST();
++
++
++ PRINT_RESLIST(gpsResList, psResManContext, IMG_FALSE);
++
++
++ RELEASE_SYNC_OBJ;
++}
++
++
++PRESMAN_ITEM ResManRegisterRes(PRESMAN_CONTEXT psResManContext,
++ IMG_UINT32 ui32ResType,
++ IMG_PVOID pvParam,
++ IMG_UINT32 ui32Param,
++ RESMAN_FREE_FN pfnFreeResource)
++{
++ PRESMAN_ITEM psNewResItem;
++
++ PVR_ASSERT(psResManContext != IMG_NULL);
++ PVR_ASSERT(ui32ResType != 0);
++
++ if (psResManContext == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "ResManRegisterRes: invalid parameter - psResManContext"));
++ return (PRESMAN_ITEM) IMG_NULL;
++ }
++
++
++ ACQUIRE_SYNC_OBJ;
++
++
++ VALIDATERESLIST();
++
++ PVR_DPF((PVR_DBG_MESSAGE, "ResManRegisterRes: register resource "
++ "Context 0x%x, ResType 0x%x, pvParam 0x%x, ui32Param 0x%x, "
++ "FreeFunc %08X",
++ psResManContext, ui32ResType, (IMG_UINT32)pvParam,
++ ui32Param, pfnFreeResource));
++
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(RESMAN_ITEM), (IMG_VOID **)&psNewResItem,
++ IMG_NULL,
++ "Resource Manager Item") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "ResManRegisterRes: "
++ "ERROR allocating new resource item"));
++
++
++ RELEASE_SYNC_OBJ;
++
++ return((PRESMAN_ITEM)IMG_NULL);
++ }
++
++
++#ifdef DEBUG
++ psNewResItem->ui32Signature = RESMAN_SIGNATURE;
++#endif
++ psNewResItem->ui32ResType = ui32ResType;
++ psNewResItem->pvParam = pvParam;
++ psNewResItem->ui32Param = ui32Param;
++ psNewResItem->pfnFreeResource = pfnFreeResource;
++ psNewResItem->ui32Flags = 0;
++
++
++ List_RESMAN_ITEM_Insert(&psResManContext->psResItemList, psNewResItem);
++
++
++ VALIDATERESLIST();
++
++
++ RELEASE_SYNC_OBJ;
++
++ return(psNewResItem);
++}
++
++PVRSRV_ERROR ResManFreeResByPtr(RESMAN_ITEM *psResItem)
++{
++ PVRSRV_ERROR eError;
++
++ PVR_ASSERT(psResItem != IMG_NULL);
++
++ if (psResItem == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_MESSAGE, "ResManFreeResByPtr: NULL ptr - nothing to do"));
++ return PVRSRV_OK;
++ }
++
++ PVR_DPF((PVR_DBG_MESSAGE, "ResManFreeResByPtr: freeing resource at %08X", psResItem));
++
++
++ ACQUIRE_SYNC_OBJ;
++
++
++ VALIDATERESLIST();
++
++
++ eError = FreeResourceByPtr(psResItem, IMG_TRUE);
++
++
++ VALIDATERESLIST();
++
++
++ RELEASE_SYNC_OBJ;
++
++ return(eError);
++}
++
++
++PVRSRV_ERROR ResManFreeResByCriteria(PRESMAN_CONTEXT psResManContext,
++ IMG_UINT32 ui32SearchCriteria,
++ IMG_UINT32 ui32ResType,
++ IMG_PVOID pvParam,
++ IMG_UINT32 ui32Param)
++{
++ PVRSRV_ERROR eError;
++
++ PVR_ASSERT(psResManContext != IMG_NULL);
++
++
++ ACQUIRE_SYNC_OBJ;
++
++
++ VALIDATERESLIST();
++
++ PVR_DPF((PVR_DBG_MESSAGE, "ResManFreeResByCriteria: "
++ "Context 0x%x, Criteria 0x%x, Type 0x%x, Addr 0x%x, Param 0x%x",
++ psResManContext, ui32SearchCriteria, ui32ResType,
++ (IMG_UINT32)pvParam, ui32Param));
++
++
++ eError = FreeResourceByCriteria(psResManContext, ui32SearchCriteria,
++ ui32ResType, pvParam, ui32Param,
++ IMG_TRUE);
++
++
++ VALIDATERESLIST();
++
++
++ RELEASE_SYNC_OBJ;
++
++ return eError;
++}
++
++
++PVRSRV_ERROR ResManDissociateRes(RESMAN_ITEM *psResItem,
++ PRESMAN_CONTEXT psNewResManContext)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ PVR_ASSERT(psResItem != IMG_NULL);
++
++ if (psResItem == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "ResManDissociateRes: invalid parameter - psResItem"));
++ PVR_DBG_BREAK;
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++#ifdef DEBUG
++ PVR_ASSERT(psResItem->ui32Signature == RESMAN_SIGNATURE);
++#endif
++
++ if (psNewResManContext != IMG_NULL)
++ {
++
++ List_RESMAN_ITEM_Remove(psResItem);
++
++
++ List_RESMAN_ITEM_Insert(&psNewResManContext->psResItemList, psResItem);
++
++ }
++ else
++ {
++ eError = FreeResourceByPtr(psResItem, IMG_FALSE);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "ResManDissociateRes: failed to free resource by pointer"));
++ return eError;
++ }
++ }
++
++ return eError;
++}
++
++IMG_BOOL ResManFindResourceByPtr_AnyVaCb(RESMAN_ITEM *psCurItem, va_list va)
++{
++ RESMAN_ITEM *psItem;
++
++ psItem = va_arg(va, RESMAN_ITEM*);
++
++ return (IMG_BOOL)(psCurItem == psItem);
++}
++
++
++IMG_INTERNAL PVRSRV_ERROR ResManFindResourceByPtr(PRESMAN_CONTEXT psResManContext,
++ RESMAN_ITEM *psItem)
++{
++ PVRSRV_ERROR eResult;
++
++ PVR_ASSERT(psResManContext != IMG_NULL);
++ PVR_ASSERT(psItem != IMG_NULL);
++
++ if ((psItem == IMG_NULL) || (psResManContext == IMG_NULL))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "ResManFindResourceByPtr: invalid parameter"));
++ PVR_DBG_BREAK;
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++#ifdef DEBUG
++ PVR_ASSERT(psItem->ui32Signature == RESMAN_SIGNATURE);
++#endif
++
++
++ ACQUIRE_SYNC_OBJ;
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "FindResourceByPtr: psItem=%08X, psItem->psNext=%08X",
++ psItem, psItem->psNext));
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "FindResourceByPtr: Resource Ctx 0x%x, Type 0x%x, Addr 0x%x, "
++ "Param 0x%x, FnCall %08X, Flags 0x%x",
++ psResManContext,
++ psItem->ui32ResType, (IMG_UINT32)psItem->pvParam, psItem->ui32Param,
++ psItem->pfnFreeResource, psItem->ui32Flags));
++
++
++ if(List_RESMAN_ITEM_IMG_BOOL_Any_va(psResManContext->psResItemList,
++ ResManFindResourceByPtr_AnyVaCb,
++ psItem))
++ {
++ eResult = PVRSRV_OK;
++ }
++ else
++ {
++ eResult = PVRSRV_ERROR_NOT_OWNER;
++ }
++
++
++ RELEASE_SYNC_OBJ;
++
++ return eResult;
++}
++
++static PVRSRV_ERROR FreeResourceByPtr(RESMAN_ITEM *psItem,
++ IMG_BOOL bExecuteCallback)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ PVR_ASSERT(psItem != IMG_NULL);
++
++ if (psItem == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "FreeResourceByPtr: invalid parameter"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++#ifdef DEBUG
++ PVR_ASSERT(psItem->ui32Signature == RESMAN_SIGNATURE);
++#endif
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "FreeResourceByPtr: psItem=%08X, psItem->psNext=%08X",
++ psItem, psItem->psNext));
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "FreeResourceByPtr: Type 0x%x, Addr 0x%x, "
++ "Param 0x%x, FnCall %08X, Flags 0x%x",
++ psItem->ui32ResType, (IMG_UINT32)psItem->pvParam, psItem->ui32Param,
++ psItem->pfnFreeResource, psItem->ui32Flags));
++
++
++ List_RESMAN_ITEM_Remove(psItem);
++
++
++
++ RELEASE_SYNC_OBJ;
++
++
++ if (bExecuteCallback)
++ {
++ eError = psItem->pfnFreeResource(psItem->pvParam, psItem->ui32Param);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "FreeResourceByPtr: ERROR calling FreeResource function"));
++ }
++ }
++
++
++ ACQUIRE_SYNC_OBJ;
++
++
++ if(OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(RESMAN_ITEM), psItem, IMG_NULL) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "FreeResourceByPtr: ERROR freeing resource list item memory"));
++ eError = PVRSRV_ERROR_GENERIC;
++ }
++
++ return(eError);
++}
++
++IMG_VOID* FreeResourceByCriteria_AnyVaCb(RESMAN_ITEM *psCurItem, va_list va)
++{
++ IMG_UINT32 ui32SearchCriteria;
++ IMG_UINT32 ui32ResType;
++ IMG_PVOID pvParam;
++ IMG_UINT32 ui32Param;
++
++ ui32SearchCriteria = va_arg(va, IMG_UINT32);
++ ui32ResType = va_arg(va, IMG_UINT32);
++ pvParam = va_arg(va, IMG_PVOID);
++ ui32Param = va_arg(va, IMG_UINT32);
++
++
++ if(
++
++ (((ui32SearchCriteria & RESMAN_CRITERIA_RESTYPE) == 0UL) ||
++ (psCurItem->ui32ResType == ui32ResType))
++ &&
++
++ (((ui32SearchCriteria & RESMAN_CRITERIA_PVOID_PARAM) == 0UL) ||
++ (psCurItem->pvParam == pvParam))
++ &&
++
++ (((ui32SearchCriteria & RESMAN_CRITERIA_UI32_PARAM) == 0UL) ||
++ (psCurItem->ui32Param == ui32Param))
++ )
++ {
++ return psCurItem;
++ }
++ else
++ {
++ return IMG_NULL;
++ }
++}
++
++static PVRSRV_ERROR FreeResourceByCriteria(PRESMAN_CONTEXT psResManContext,
++ IMG_UINT32 ui32SearchCriteria,
++ IMG_UINT32 ui32ResType,
++ IMG_PVOID pvParam,
++ IMG_UINT32 ui32Param,
++ IMG_BOOL bExecuteCallback)
++{
++ PRESMAN_ITEM psCurItem;
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++
++
++ while((psCurItem = (PRESMAN_ITEM)
++ List_RESMAN_ITEM_Any_va(psResManContext->psResItemList,
++ FreeResourceByCriteria_AnyVaCb,
++ ui32SearchCriteria,
++ ui32ResType,
++ pvParam,
++ ui32Param)) != IMG_NULL
++ && eError == PVRSRV_OK)
++ {
++ eError = FreeResourceByPtr(psCurItem, bExecuteCallback);
++ }
++
++ return eError;
++}
++
++
++#ifdef DEBUG
++static IMG_VOID ValidateResList(PRESMAN_LIST psResList)
++{
++ PRESMAN_ITEM psCurItem, *ppsThisItem;
++ PRESMAN_CONTEXT psCurContext, *ppsThisContext;
++
++
++ if (psResList == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_MESSAGE, "ValidateResList: resman not initialised yet"));
++ return;
++ }
++
++ psCurContext = psResList->psContextList;
++ ppsThisContext = &psResList->psContextList;
++
++
++ while(psCurContext != IMG_NULL)
++ {
++
++ PVR_ASSERT(psCurContext->ui32Signature == RESMAN_SIGNATURE);
++ if (psCurContext->ppsThis != ppsThisContext)
++ {
++ PVR_DPF((PVR_DBG_WARNING,
++ "psCC=%08X psCC->ppsThis=%08X psCC->psNext=%08X ppsTC=%08X",
++ psCurContext, psCurContext->ppsThis,
++ psCurContext->psNext, ppsThisContext));
++ PVR_ASSERT(psCurContext->ppsThis == ppsThisContext);
++ }
++
++
++ psCurItem = psCurContext->psResItemList;
++ ppsThisItem = &psCurContext->psResItemList;
++ while(psCurItem != IMG_NULL)
++ {
++
++ PVR_ASSERT(psCurItem->ui32Signature == RESMAN_SIGNATURE);
++ if (psCurItem->ppsThis != ppsThisItem)
++ {
++ PVR_DPF((PVR_DBG_WARNING,
++ "psCurItem=%08X psCurItem->ppsThis=%08X psCurItem->psNext=%08X ppsThisItem=%08X",
++ psCurItem, psCurItem->ppsThis, psCurItem->psNext, ppsThisItem));
++ PVR_ASSERT(psCurItem->ppsThis == ppsThisItem);
++ }
++
++
++ ppsThisItem = &psCurItem->psNext;
++ psCurItem = psCurItem->psNext;
++ }
++
++
++ ppsThisContext = &psCurContext->psNext;
++ psCurContext = psCurContext->psNext;
++ }
++}
++#endif
++
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/devices/sgx/.gitignore
+@@ -0,0 +1,5 @@
++bin_pc_i686*
++tmp_pc_i686*
++host_pc_i686*
++*.o
++*.o.cmd
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/devices/sgx/mmu.c
+@@ -0,0 +1,2776 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "sgxdefs.h"
++#include "sgxmmu.h"
++#include "services_headers.h"
++#include "buffer_manager.h"
++#include "hash.h"
++#include "ra.h"
++#include "pdump_km.h"
++#include "sgxapi_km.h"
++#include "sgxinfo.h"
++#include "sgxinfokm.h"
++#include "mmu.h"
++#include "sgxconfig.h"
++
++#define UINT32_MAX_VALUE 0xFFFFFFFFUL
++
++#define SGX_MAX_PD_ENTRIES (1<<(SGX_FEATURE_ADDRESS_SPACE_SIZE - SGX_MMU_PT_SHIFT - SGX_MMU_PAGE_SHIFT))
++
++typedef struct _MMU_PT_INFO_
++{
++
++ IMG_VOID *hPTPageOSMemHandle;
++ IMG_CPU_VIRTADDR PTPageCpuVAddr;
++ IMG_UINT32 ui32ValidPTECount;
++} MMU_PT_INFO;
++
++struct _MMU_CONTEXT_
++{
++
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++
++
++ IMG_CPU_VIRTADDR pvPDCpuVAddr;
++ IMG_DEV_PHYADDR sPDDevPAddr;
++
++ IMG_VOID *hPDOSMemHandle;
++
++
++ MMU_PT_INFO *apsPTInfoList[SGX_MAX_PD_ENTRIES];
++
++ PVRSRV_SGXDEV_INFO *psDevInfo;
++
++#if defined(PDUMP)
++ IMG_UINT32 ui32PDumpMMUContextID;
++#endif
++
++ struct _MMU_CONTEXT_ *psNext;
++};
++
++struct _MMU_HEAP_
++{
++
++ MMU_CONTEXT *psMMUContext;
++
++
++
++
++ IMG_UINT32 ui32PDBaseIndex;
++
++ IMG_UINT32 ui32PageTableCount;
++
++ IMG_UINT32 ui32PTETotal;
++
++ IMG_UINT32 ui32PDEPageSizeCtrl;
++
++
++
++
++ IMG_UINT32 ui32DataPageSize;
++
++ IMG_UINT32 ui32DataPageBitWidth;
++
++ IMG_UINT32 ui32DataPageMask;
++
++
++
++
++ IMG_UINT32 ui32PTShift;
++
++ IMG_UINT32 ui32PTBitWidth;
++
++ IMG_UINT32 ui32PTMask;
++
++ IMG_UINT32 ui32PTSize;
++
++ IMG_UINT32 ui32PTECount;
++
++
++
++
++ IMG_UINT32 ui32PDShift;
++
++ IMG_UINT32 ui32PDBitWidth;
++
++ IMG_UINT32 ui32PDMask;
++
++
++
++ RA_ARENA *psVMArena;
++ DEV_ARENA_DESCRIPTOR *psDevArena;
++};
++
++
++
++#if defined (SUPPORT_SGX_MMU_DUMMY_PAGE)
++#define DUMMY_DATA_PAGE_SIGNATURE 0xDEADBEEF
++#endif
++
++#if defined(PDUMP)
++static IMG_VOID
++MMU_PDumpPageTables (MMU_HEAP *pMMUHeap,
++ IMG_DEV_VIRTADDR DevVAddr,
++ IMG_SIZE_T uSize,
++ IMG_BOOL bForUnmap,
++ IMG_HANDLE hUniqueTag);
++#endif
++
++#define PAGE_TEST 0
++#if PAGE_TEST
++static IMG_VOID PageTest(IMG_VOID* pMem, IMG_DEV_PHYADDR sDevPAddr);
++#endif
++
++#define PT_DEBUG 0
++#if PT_DEBUG
++static IMG_VOID DumpPT(MMU_PT_INFO *psPTInfoList)
++{
++ IMG_UINT32 *p = (IMG_UINT32*)psPTInfoList->PTPageCpuVAddr;
++ IMG_UINT32 i;
++
++
++ for(i = 0; i < 1024; i += 8)
++ {
++ PVR_DPF((PVR_DBG_WARNING,
++ "%.8lx %.8lx %.8lx %.8lx %.8lx %.8lx %.8lx %.8lx\n",
++ p[i + 0], p[i + 1], p[i + 2], p[i + 3],
++ p[i + 4], p[i + 5], p[i + 6], p[i + 7]));
++ }
++}
++
++static IMG_VOID CheckPT(MMU_PT_INFO *psPTInfoList)
++{
++ IMG_UINT32 *p = (IMG_UINT32*) psPTInfoList->PTPageCpuVAddr;
++ IMG_UINT32 i, ui32Count = 0;
++
++
++ for(i = 0; i < 1024; i++)
++ if(p[i] & SGX_MMU_PTE_VALID)
++ ui32Count++;
++
++ if(psPTInfoList->ui32ValidPTECount != ui32Count)
++ {
++ PVR_DPF((PVR_DBG_WARNING, "ui32ValidPTECount: %lu ui32Count: %lu\n",
++ psPTInfoList->ui32ValidPTECount, ui32Count));
++ DumpPT(psPTInfoList);
++ BUG();
++ }
++}
++#else
++static INLINE IMG_VOID DumpPT(MMU_PT_INFO *psPTInfoList)
++{
++ PVR_UNREFERENCED_PARAMETER(psPTInfoList);
++}
++
++static INLINE IMG_VOID CheckPT(MMU_PT_INFO *psPTInfoList)
++{
++ PVR_UNREFERENCED_PARAMETER(psPTInfoList);
++}
++#endif
++
++#ifdef SUPPORT_SGX_MMU_BYPASS
++IMG_VOID
++EnableHostAccess (MMU_CONTEXT *psMMUContext)
++{
++ IMG_UINT32 ui32RegVal;
++ IMG_VOID *pvRegsBaseKM = psMMUContext->psDevInfo->pvRegsBaseKM;
++
++
++
++
++ ui32RegVal = OSReadHWReg(pvRegsBaseKM, EUR_CR_BIF_CTRL);
++
++ OSWriteHWReg(pvRegsBaseKM,
++ EUR_CR_BIF_CTRL,
++ ui32RegVal | EUR_CR_BIF_CTRL_MMU_BYPASS_HOST_MASK);
++
++ PDUMPREG(EUR_CR_BIF_CTRL, EUR_CR_BIF_CTRL_MMU_BYPASS_HOST_MASK);
++}
++
++IMG_VOID
++DisableHostAccess (MMU_CONTEXT *psMMUContext)
++{
++ IMG_UINT32 ui32RegVal;
++ IMG_VOID *pvRegsBaseKM = psMMUContext->psDevInfo->pvRegsBaseKM;
++
++
++
++
++
++ OSWriteHWReg(pvRegsBaseKM,
++ EUR_CR_BIF_CTRL,
++ ui32RegVal & ~EUR_CR_BIF_CTRL_MMU_BYPASS_HOST_MASK);
++
++ PDUMPREG(EUR_CR_BIF_CTRL, 0);
++}
++#endif
++
++
++IMG_VOID MMU_InvalidateSystemLevelCache(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++ #if defined(SGX_FEATURE_MP)
++ psDevInfo->ui32CacheControl |= SGXMKIF_CC_INVAL_BIF_SL;
++ #else
++
++ PVR_UNREFERENCED_PARAMETER(psDevInfo);
++ #endif
++}
++
++
++IMG_VOID MMU_InvalidateDirectoryCache(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++ psDevInfo->ui32CacheControl |= SGXMKIF_CC_INVAL_BIF_PD;
++ #if defined(SGX_FEATURE_SYSTEM_CACHE)
++ MMU_InvalidateSystemLevelCache(psDevInfo);
++ #endif
++}
++
++
++IMG_VOID MMU_InvalidatePageTableCache(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++ psDevInfo->ui32CacheControl |= SGXMKIF_CC_INVAL_BIF_PT;
++ #if defined(SGX_FEATURE_SYSTEM_CACHE)
++ MMU_InvalidateSystemLevelCache(psDevInfo);
++ #endif
++}
++
++
++static IMG_BOOL
++_AllocPageTableMemory (MMU_HEAP *pMMUHeap,
++ MMU_PT_INFO *psPTInfoList,
++ IMG_DEV_PHYADDR *psDevPAddr)
++{
++ IMG_DEV_PHYADDR sDevPAddr;
++ IMG_CPU_PHYADDR sCpuPAddr;
++
++
++
++
++ if(pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->psLocalDevMemArena == IMG_NULL)
++ {
++
++ if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ pMMUHeap->ui32PTSize,
++ SGX_MMU_PAGE_SIZE,
++ (IMG_VOID **)&psPTInfoList->PTPageCpuVAddr,
++ &psPTInfoList->hPTPageOSMemHandle) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "_AllocPageTableMemory: ERROR call to OSAllocPages failed"));
++ return IMG_FALSE;
++ }
++
++
++ if(psPTInfoList->PTPageCpuVAddr)
++ {
++ sCpuPAddr = OSMapLinToCPUPhys(psPTInfoList->PTPageCpuVAddr);
++ }
++ else
++ {
++
++ sCpuPAddr = OSMemHandleToCpuPAddr(psPTInfoList->hPTPageOSMemHandle, 0);
++ }
++
++ sDevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
++ }
++ else
++ {
++ IMG_SYS_PHYADDR sSysPAddr;
++
++
++
++
++
++ if(RA_Alloc(pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->psLocalDevMemArena,
++ SGX_MMU_PAGE_SIZE,
++ IMG_NULL,
++ IMG_NULL,
++ 0,
++ SGX_MMU_PAGE_SIZE,
++ 0,
++ &(sSysPAddr.uiAddr))!= IMG_TRUE)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "_AllocPageTableMemory: ERROR call to RA_Alloc failed"));
++ return IMG_FALSE;
++ }
++
++
++ sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
++
++ psPTInfoList->PTPageCpuVAddr = OSMapPhysToLin(sCpuPAddr,
++ SGX_MMU_PAGE_SIZE,
++ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++ &psPTInfoList->hPTPageOSMemHandle);
++ if(!psPTInfoList->PTPageCpuVAddr)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "_AllocPageTableMemory: ERROR failed to map page tables"));
++ return IMG_FALSE;
++ }
++
++
++ sDevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
++
++ #if PAGE_TEST
++ PageTest(psPTInfoList->PTPageCpuVAddr, sDevPAddr);
++ #endif
++ }
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++ {
++ IMG_UINT32 *pui32Tmp;
++ IMG_UINT32 i;
++
++ pui32Tmp = (IMG_UINT32*)psPTInfoList->PTPageCpuVAddr;
++
++ for(i=0; i<pMMUHeap->ui32PTECount; i++)
++ {
++ pui32Tmp[i] = (pMMUHeap->psMMUContext->psDevInfo->sDummyDataDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
++ | SGX_MMU_PTE_VALID;
++ }
++ }
++#else
++
++ OSMemSet(psPTInfoList->PTPageCpuVAddr, 0, pMMUHeap->ui32PTSize);
++#endif
++
++
++ PDUMPMALLOCPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, psPTInfoList->PTPageCpuVAddr, pMMUHeap->ui32PTSize, PDUMP_PT_UNIQUETAG);
++
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, psPTInfoList->PTPageCpuVAddr, pMMUHeap->ui32PTSize, 0, IMG_TRUE, PDUMP_PT_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++
++
++ *psDevPAddr = sDevPAddr;
++
++ return IMG_TRUE;
++}
++
++
++static IMG_VOID
++_FreePageTableMemory (MMU_HEAP *pMMUHeap, MMU_PT_INFO *psPTInfoList)
++{
++
++
++
++
++ if(pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->psLocalDevMemArena == IMG_NULL)
++ {
++
++ OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ pMMUHeap->ui32PTSize,
++ psPTInfoList->PTPageCpuVAddr,
++ psPTInfoList->hPTPageOSMemHandle);
++ }
++ else
++ {
++ IMG_SYS_PHYADDR sSysPAddr;
++ IMG_CPU_PHYADDR sCpuPAddr;
++
++
++ sCpuPAddr = OSMapLinToCPUPhys(psPTInfoList->PTPageCpuVAddr);
++ sSysPAddr = SysCpuPAddrToSysPAddr (sCpuPAddr);
++
++
++
++ OSUnMapPhysToLin(psPTInfoList->PTPageCpuVAddr,
++ SGX_MMU_PAGE_SIZE,
++ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++ psPTInfoList->hPTPageOSMemHandle);
++
++
++
++
++ RA_Free (pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->psLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);
++ }
++}
++
++
++
++static IMG_VOID
++_DeferredFreePageTable (MMU_HEAP *pMMUHeap, IMG_UINT32 ui32PTIndex, IMG_BOOL bOSFreePT)
++{
++ IMG_UINT32 *pui32PDEntry;
++ IMG_UINT32 i;
++ IMG_UINT32 ui32PDIndex;
++ SYS_DATA *psSysData;
++ MMU_PT_INFO **ppsPTInfoList;
++
++ SysAcquireData(&psSysData);
++
++
++ ui32PDIndex = pMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> pMMUHeap->ui32PDShift;
++
++
++ ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
++
++ {
++#if PT_DEBUG
++ if(ppsPTInfoList[ui32PTIndex] && ppsPTInfoList[ui32PTIndex]->ui32ValidPTECount > 0)
++ {
++ DumpPT(ppsPTInfoList[ui32PTIndex]);
++
++ }
++#endif
++
++
++ PVR_ASSERT(ppsPTInfoList[ui32PTIndex] == IMG_NULL || ppsPTInfoList[ui32PTIndex]->ui32ValidPTECount == 0);
++ }
++
++
++ PDUMPCOMMENT("Free page table (page count == %08X)", pMMUHeap->ui32PageTableCount);
++ if(ppsPTInfoList[ui32PTIndex] && ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr)
++ {
++ PDUMPFREEPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr, pMMUHeap->ui32PTSize, PDUMP_PT_UNIQUETAG);
++ }
++
++ switch(pMMUHeap->psDevArena->DevMemHeapType)
++ {
++ case DEVICE_MEMORY_HEAP_SHARED :
++ case DEVICE_MEMORY_HEAP_SHARED_EXPORTED :
++ {
++
++ MMU_CONTEXT *psMMUContext = (MMU_CONTEXT*)pMMUHeap->psMMUContext->psDevInfo->pvMMUContextList;
++
++ while(psMMUContext)
++ {
++
++ pui32PDEntry = (IMG_UINT32*)psMMUContext->pvPDCpuVAddr;
++ pui32PDEntry += ui32PDIndex;
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++
++ pui32PDEntry[ui32PTIndex] = (psMMUContext->psDevInfo->sDummyPTDevPAddr.uiAddr
++ >>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
++ | SGX_MMU_PDE_PAGE_SIZE_4K
++ | SGX_MMU_PDE_VALID;
++#else
++
++ if(bOSFreePT)
++ {
++ pui32PDEntry[ui32PTIndex] = 0;
++ }
++#endif
++
++
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, (IMG_VOID*)&pui32PDEntry[ui32PTIndex], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PT_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++
++
++ psMMUContext = psMMUContext->psNext;
++ }
++ break;
++ }
++ case DEVICE_MEMORY_HEAP_PERCONTEXT :
++ case DEVICE_MEMORY_HEAP_KERNEL :
++ {
++
++ pui32PDEntry = (IMG_UINT32*)pMMUHeap->psMMUContext->pvPDCpuVAddr;
++ pui32PDEntry += ui32PDIndex;
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++
++ pui32PDEntry[ui32PTIndex] = (pMMUHeap->psMMUContext->psDevInfo->sDummyPTDevPAddr.uiAddr
++ >>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
++ | SGX_MMU_PDE_PAGE_SIZE_4K
++ | SGX_MMU_PDE_VALID;
++#else
++
++ if(bOSFreePT)
++ {
++ pui32PDEntry[ui32PTIndex] = 0;
++ }
++#endif
++
++
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, (IMG_VOID*)&pui32PDEntry[ui32PTIndex], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++ break;
++ }
++ default:
++ {
++ PVR_DPF((PVR_DBG_ERROR, "_DeferredFreePagetable: ERROR invalid heap type"));
++ return;
++ }
++ }
++
++
++ if(ppsPTInfoList[ui32PTIndex] != IMG_NULL)
++ {
++ if(ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr != IMG_NULL)
++ {
++ IMG_PUINT32 pui32Tmp;
++
++ pui32Tmp = (IMG_UINT32*)ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr;
++
++
++ for(i=0;
++ (i<pMMUHeap->ui32PTETotal) && (i<pMMUHeap->ui32PTECount);
++ i++)
++ {
++ pui32Tmp[i] = 0;
++ }
++
++
++
++ if(bOSFreePT)
++ {
++ _FreePageTableMemory(pMMUHeap, ppsPTInfoList[ui32PTIndex]);
++ }
++
++
++
++
++ pMMUHeap->ui32PTETotal -= i;
++ }
++ else
++ {
++
++ pMMUHeap->ui32PTETotal -= pMMUHeap->ui32PTECount;
++ }
++
++ if(bOSFreePT)
++ {
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(MMU_PT_INFO),
++ ppsPTInfoList[ui32PTIndex],
++ IMG_NULL);
++ ppsPTInfoList[ui32PTIndex] = IMG_NULL;
++ }
++ }
++ else
++ {
++
++ pMMUHeap->ui32PTETotal -= pMMUHeap->ui32PTECount;
++ }
++
++ PDUMPCOMMENT("Finished free page table (page count == %08X)", pMMUHeap->ui32PageTableCount);
++}
++
++static IMG_VOID
++_DeferredFreePageTables (MMU_HEAP *pMMUHeap)
++{
++ IMG_UINT32 i;
++
++ for(i=0; i<pMMUHeap->ui32PageTableCount; i++)
++ {
++ _DeferredFreePageTable(pMMUHeap, i, IMG_TRUE);
++ }
++ MMU_InvalidateDirectoryCache(pMMUHeap->psMMUContext->psDevInfo);
++}
++
++
++static IMG_BOOL
++_DeferredAllocPagetables(MMU_HEAP *pMMUHeap, IMG_DEV_VIRTADDR DevVAddr, IMG_UINT32 ui32Size)
++{
++ IMG_UINT32 ui32PageTableCount;
++ IMG_UINT32 ui32PDIndex;
++ IMG_UINT32 i;
++ IMG_UINT32 *pui32PDEntry;
++ MMU_PT_INFO **ppsPTInfoList;
++ SYS_DATA *psSysData;
++ IMG_DEV_VIRTADDR sHighDevVAddr;
++
++
++#if SGX_FEATURE_ADDRESS_SPACE_SIZE < 32
++ PVR_ASSERT(DevVAddr.uiAddr < (1<<SGX_FEATURE_ADDRESS_SPACE_SIZE));
++#endif
++
++
++ SysAcquireData(&psSysData);
++
++
++ ui32PDIndex = DevVAddr.uiAddr >> pMMUHeap->ui32PDShift;
++
++
++
++ if((UINT32_MAX_VALUE - DevVAddr.uiAddr)
++ < (ui32Size + pMMUHeap->ui32DataPageMask + pMMUHeap->ui32PTMask))
++ {
++
++ sHighDevVAddr.uiAddr = UINT32_MAX_VALUE;
++ }
++ else
++ {
++ sHighDevVAddr.uiAddr = DevVAddr.uiAddr
++ + ui32Size
++ + pMMUHeap->ui32DataPageMask
++ + pMMUHeap->ui32PTMask;
++ }
++
++ ui32PageTableCount = sHighDevVAddr.uiAddr >> pMMUHeap->ui32PDShift;
++
++ ui32PageTableCount -= ui32PDIndex;
++
++
++ pui32PDEntry = (IMG_UINT32*)pMMUHeap->psMMUContext->pvPDCpuVAddr;
++ pui32PDEntry += ui32PDIndex;
++
++
++ ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
++
++ PDUMPCOMMENT("Alloc page table (page count == %08X)", ui32PageTableCount);
++ PDUMPCOMMENT("Page directory mods (page count == %08X)", ui32PageTableCount);
++
++
++ for(i=0; i<ui32PageTableCount; i++)
++ {
++ if(ppsPTInfoList[i] == IMG_NULL)
++ {
++ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof (MMU_PT_INFO),
++ (IMG_VOID **)&ppsPTInfoList[i], IMG_NULL,
++ "MMU Page Table Info");
++ if (ppsPTInfoList[i] == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "_DeferredAllocPagetables: ERROR call to OSAllocMem failed"));
++ return IMG_FALSE;
++ }
++ OSMemSet (ppsPTInfoList[i], 0, sizeof(MMU_PT_INFO));
++ }
++
++ if(ppsPTInfoList[i]->hPTPageOSMemHandle == IMG_NULL
++ && ppsPTInfoList[i]->PTPageCpuVAddr == IMG_NULL)
++ {
++ IMG_DEV_PHYADDR sDevPAddr;
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++ IMG_UINT32 *pui32Tmp;
++ IMG_UINT32 j;
++#else
++
++ PVR_ASSERT(pui32PDEntry[i] == 0);
++#endif
++
++ if(_AllocPageTableMemory (pMMUHeap, ppsPTInfoList[i], &sDevPAddr) != IMG_TRUE)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "_DeferredAllocPagetables: ERROR call to _AllocPageTableMemory failed"));
++ return IMG_FALSE;
++ }
++
++ switch(pMMUHeap->psDevArena->DevMemHeapType)
++ {
++ case DEVICE_MEMORY_HEAP_SHARED :
++ case DEVICE_MEMORY_HEAP_SHARED_EXPORTED :
++ {
++
++ MMU_CONTEXT *psMMUContext = (MMU_CONTEXT*)pMMUHeap->psMMUContext->psDevInfo->pvMMUContextList;
++
++ while(psMMUContext)
++ {
++
++ pui32PDEntry = (IMG_UINT32*)psMMUContext->pvPDCpuVAddr;
++ pui32PDEntry += ui32PDIndex;
++
++
++ pui32PDEntry[i] = (sDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
++ | pMMUHeap->ui32PDEPageSizeCtrl
++ | SGX_MMU_PDE_VALID;
++
++
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, (IMG_VOID*)&pui32PDEntry[i], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++
++
++ psMMUContext = psMMUContext->psNext;
++ }
++ break;
++ }
++ case DEVICE_MEMORY_HEAP_PERCONTEXT :
++ case DEVICE_MEMORY_HEAP_KERNEL :
++ {
++
++ pui32PDEntry[i] = (sDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
++ | pMMUHeap->ui32PDEPageSizeCtrl
++ | SGX_MMU_PDE_VALID;
++
++
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, (IMG_VOID*)&pui32PDEntry[i], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++ break;
++ }
++ default:
++ {
++ PVR_DPF((PVR_DBG_ERROR, "_DeferredAllocPagetables: ERROR invalid heap type"));
++ return IMG_FALSE;
++ }
++ }
++
++#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++
++
++
++
++ MMU_InvalidateDirectoryCache(pMMUHeap->psMMUContext->psDevInfo);
++#endif
++ }
++ else
++ {
++
++ PVR_ASSERT(pui32PDEntry[i] != 0);
++ }
++ }
++
++ #if defined(SGX_FEATURE_SYSTEM_CACHE)
++ MMU_InvalidateSystemLevelCache(pMMUHeap->psMMUContext->psDevInfo);
++ #endif
++
++ return IMG_TRUE;
++}
++
++
++PVRSRV_ERROR
++MMU_Initialise (PVRSRV_DEVICE_NODE *psDeviceNode, MMU_CONTEXT **ppsMMUContext, IMG_DEV_PHYADDR *psPDDevPAddr)
++{
++ IMG_UINT32 *pui32Tmp;
++ IMG_UINT32 i;
++ IMG_CPU_VIRTADDR pvPDCpuVAddr;
++ IMG_DEV_PHYADDR sPDDevPAddr;
++ IMG_CPU_PHYADDR sCpuPAddr;
++ MMU_CONTEXT *psMMUContext;
++ IMG_HANDLE hPDOSMemHandle;
++ SYS_DATA *psSysData;
++ PVRSRV_SGXDEV_INFO *psDevInfo;
++
++ PVR_DPF ((PVR_DBG_MESSAGE, "MMU_Initialise"));
++
++ SysAcquireData(&psSysData);
++
++ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof (MMU_CONTEXT),
++ (IMG_VOID **)&psMMUContext, IMG_NULL,
++ "MMU Context");
++ if (psMMUContext == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to OSAllocMem failed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ OSMemSet (psMMUContext, 0, sizeof(MMU_CONTEXT));
++
++
++ psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice;
++ psMMUContext->psDevInfo = psDevInfo;
++
++
++ psMMUContext->psDeviceNode = psDeviceNode;
++
++
++ if(psDeviceNode->psLocalDevMemArena == IMG_NULL)
++ {
++ if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ SGX_MMU_PAGE_SIZE,
++ SGX_MMU_PAGE_SIZE,
++ &pvPDCpuVAddr,
++ &hPDOSMemHandle) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to OSAllocPages failed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if(pvPDCpuVAddr)
++ {
++ sCpuPAddr = OSMapLinToCPUPhys(pvPDCpuVAddr);
++ }
++ else
++ {
++
++ sCpuPAddr = OSMemHandleToCpuPAddr(hPDOSMemHandle, 0);
++ }
++ sPDDevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
++
++ #if PAGE_TEST
++ PageTest(pvPDCpuVAddr, sPDDevPAddr);
++ #endif
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++
++ if(!psDevInfo->pvMMUContextList)
++ {
++
++ if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ SGX_MMU_PAGE_SIZE,
++ SGX_MMU_PAGE_SIZE,
++ &psDevInfo->pvDummyPTPageCpuVAddr,
++ &psDevInfo->hDummyPTPageOSMemHandle) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to OSAllocPages failed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if(psDevInfo->pvDummyPTPageCpuVAddr)
++ {
++ sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->pvDummyPTPageCpuVAddr);
++ }
++ else
++ {
++
++ sCpuPAddr = OSMemHandleToCpuPAddr(psDevInfo->hDummyPTPageOSMemHandle, 0);
++ }
++ psDevInfo->sDummyPTDevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
++
++
++ if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ SGX_MMU_PAGE_SIZE,
++ SGX_MMU_PAGE_SIZE,
++ &psDevInfo->pvDummyDataPageCpuVAddr,
++ &psDevInfo->hDummyDataPageOSMemHandle) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to OSAllocPages failed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if(psDevInfo->pvDummyDataPageCpuVAddr)
++ {
++ sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->pvDummyDataPageCpuVAddr);
++ }
++ else
++ {
++ sCpuPAddr = OSMemHandleToCpuPAddr(psDevInfo->hDummyDataPageOSMemHandle, 0);
++ }
++ psDevInfo->sDummyDataDevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
++ }
++#endif
++ }
++ else
++ {
++ IMG_SYS_PHYADDR sSysPAddr;
++
++
++ if(RA_Alloc(psDeviceNode->psLocalDevMemArena,
++ SGX_MMU_PAGE_SIZE,
++ IMG_NULL,
++ IMG_NULL,
++ 0,
++ SGX_MMU_PAGE_SIZE,
++ 0,
++ &(sSysPAddr.uiAddr))!= IMG_TRUE)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to RA_Alloc failed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++
++ sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
++ sPDDevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysPAddr);
++ pvPDCpuVAddr = OSMapPhysToLin(sCpuPAddr,
++ SGX_MMU_PAGE_SIZE,
++ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++ &hPDOSMemHandle);
++ if(!pvPDCpuVAddr)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR failed to map page tables"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ #if PAGE_TEST
++ PageTest(pvPDCpuVAddr, sPDDevPAddr);
++ #endif
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++
++ if(!psDevInfo->pvMMUContextList)
++ {
++
++ if(RA_Alloc(psDeviceNode->psLocalDevMemArena,
++ SGX_MMU_PAGE_SIZE,
++ IMG_NULL,
++ IMG_NULL,
++ 0,
++ SGX_MMU_PAGE_SIZE,
++ 0,
++ &(sSysPAddr.uiAddr))!= IMG_TRUE)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to RA_Alloc failed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++
++ sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
++ psDevInfo->sDummyPTDevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysPAddr);
++ psDevInfo->pvDummyPTPageCpuVAddr = OSMapPhysToLin(sCpuPAddr,
++ SGX_MMU_PAGE_SIZE,
++ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++ &psDevInfo->hDummyPTPageOSMemHandle);
++ if(!psDevInfo->pvDummyPTPageCpuVAddr)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR failed to map page tables"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++
++ if(RA_Alloc(psDeviceNode->psLocalDevMemArena,
++ SGX_MMU_PAGE_SIZE,
++ IMG_NULL,
++ IMG_NULL,
++ 0,
++ SGX_MMU_PAGE_SIZE,
++ 0,
++ &(sSysPAddr.uiAddr))!= IMG_TRUE)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to RA_Alloc failed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++
++ sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
++ psDevInfo->sDummyDataDevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysPAddr);
++ psDevInfo->pvDummyDataPageCpuVAddr = OSMapPhysToLin(sCpuPAddr,
++ SGX_MMU_PAGE_SIZE,
++ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++ &psDevInfo->hDummyDataPageOSMemHandle);
++ if(!psDevInfo->pvDummyDataPageCpuVAddr)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR failed to map page tables"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ }
++#endif
++ }
++
++
++ PDUMPCOMMENT("Alloc page directory");
++#ifdef SUPPORT_SGX_MMU_BYPASS
++ EnableHostAccess(psMMUContext);
++#endif
++
++ PDUMPMALLOCPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, pvPDCpuVAddr, SGX_MMU_PAGE_SIZE, PDUMP_PD_UNIQUETAG);
++
++ if (pvPDCpuVAddr)
++ {
++ pui32Tmp = (IMG_UINT32 *)pvPDCpuVAddr;
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: pvPDCpuVAddr invalid"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++
++ for(i=0; i<SGX_MMU_PD_SIZE; i++)
++ {
++ pui32Tmp[i] = (psDevInfo->sDummyPTDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
++ | SGX_MMU_PDE_PAGE_SIZE_4K
++ | SGX_MMU_PDE_VALID;
++ }
++
++ if(!psDevInfo->pvMMUContextList)
++ {
++
++
++
++ pui32Tmp = (IMG_UINT32 *)psDevInfo->pvDummyPTPageCpuVAddr;
++ for(i=0; i<SGX_MMU_PT_SIZE; i++)
++ {
++ pui32Tmp[i] = (psDevInfo->sDummyDataDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
++ | SGX_MMU_PTE_VALID;
++ }
++
++ PDUMPCOMMENT("Dummy Page table contents");
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, psDevInfo->pvDummyPTPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++
++
++
++ pui32Tmp = (IMG_UINT32 *)psDevInfo->pvDummyDataPageCpuVAddr;
++ for(i=0; i<(SGX_MMU_PAGE_SIZE/4); i++)
++ {
++ pui32Tmp[i] = DUMMY_DATA_PAGE_SIGNATURE;
++ }
++
++ PDUMPCOMMENT("Dummy Data Page contents");
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, psDevInfo->pvDummyDataPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++ }
++#else
++
++ for(i=0; i<SGX_MMU_PD_SIZE; i++)
++ {
++
++ pui32Tmp[i] = 0;
++ }
++#endif
++
++
++ PDUMPCOMMENT("Page directory contents");
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, pvPDCpuVAddr, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++
++
++#if defined(PDUMP)
++ if(PDumpSetMMUContext(PVRSRV_DEVICE_TYPE_SGX,
++ "SGXMEM",
++ &psMMUContext->ui32PDumpMMUContextID,
++ 2,
++ PDUMP_PT_UNIQUETAG,
++ pvPDCpuVAddr) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to PDumpSetMMUContext failed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++#endif
++
++
++ psMMUContext->pvPDCpuVAddr = pvPDCpuVAddr;
++ psMMUContext->sPDDevPAddr = sPDDevPAddr;
++ psMMUContext->hPDOSMemHandle = hPDOSMemHandle;
++
++
++ *ppsMMUContext = psMMUContext;
++
++
++ *psPDDevPAddr = sPDDevPAddr;
++
++
++ psMMUContext->psNext = (MMU_CONTEXT*)psDevInfo->pvMMUContextList;
++ psDevInfo->pvMMUContextList = (IMG_VOID*)psMMUContext;
++
++#ifdef SUPPORT_SGX_MMU_BYPASS
++ DisableHostAccess(psMMUContext);
++#endif
++
++ return PVRSRV_OK;
++}
++
++IMG_VOID
++MMU_Finalise (MMU_CONTEXT *psMMUContext)
++{
++ IMG_UINT32 *pui32Tmp, i;
++ SYS_DATA *psSysData;
++ MMU_CONTEXT **ppsMMUContext;
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO*)psMMUContext->psDevInfo;
++ MMU_CONTEXT *psMMUContextList = (MMU_CONTEXT*)psDevInfo->pvMMUContextList;
++#endif
++
++ SysAcquireData(&psSysData);
++
++
++ PDUMPCLEARMMUCONTEXT(PVRSRV_DEVICE_TYPE_SGX, "SGXMEM", psMMUContext->ui32PDumpMMUContextID, 2);
++
++
++ PDUMPCOMMENT("Free page directory");
++ PDUMPFREEPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, psMMUContext->pvPDCpuVAddr, SGX_MMU_PAGE_SIZE, PDUMP_PT_UNIQUETAG);
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++ PDUMPFREEPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, psDevInfo->pvDummyPTPageCpuVAddr, SGX_MMU_PAGE_SIZE, PDUMP_PT_UNIQUETAG);
++ PDUMPFREEPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, psDevInfo->pvDummyDataPageCpuVAddr, SGX_MMU_PAGE_SIZE, PDUMP_PT_UNIQUETAG);
++#endif
++
++ pui32Tmp = (IMG_UINT32 *)psMMUContext->pvPDCpuVAddr;
++
++
++ for(i=0; i<SGX_MMU_PD_SIZE; i++)
++ {
++
++ pui32Tmp[i] = 0;
++ }
++
++
++
++
++
++ if(psMMUContext->psDeviceNode->psLocalDevMemArena == IMG_NULL)
++ {
++ OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ SGX_MMU_PAGE_SIZE,
++ psMMUContext->pvPDCpuVAddr,
++ psMMUContext->hPDOSMemHandle);
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++
++ if(!psMMUContextList->psNext)
++ {
++ OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ SGX_MMU_PAGE_SIZE,
++ psDevInfo->pvDummyPTPageCpuVAddr,
++ psDevInfo->hDummyPTPageOSMemHandle);
++ OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ SGX_MMU_PAGE_SIZE,
++ psDevInfo->pvDummyDataPageCpuVAddr,
++ psDevInfo->hDummyDataPageOSMemHandle);
++ }
++#endif
++ }
++ else
++ {
++ IMG_SYS_PHYADDR sSysPAddr;
++ IMG_CPU_PHYADDR sCpuPAddr;
++
++
++ sCpuPAddr = OSMapLinToCPUPhys(psMMUContext->pvPDCpuVAddr);
++ sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr);
++
++
++ OSUnMapPhysToLin(psMMUContext->pvPDCpuVAddr,
++ SGX_MMU_PAGE_SIZE,
++ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++ psMMUContext->hPDOSMemHandle);
++
++ RA_Free (psMMUContext->psDeviceNode->psLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++
++ if(!psMMUContextList->psNext)
++ {
++
++ sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->pvDummyPTPageCpuVAddr);
++ sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr);
++
++
++ OSUnMapPhysToLin(psDevInfo->pvDummyPTPageCpuVAddr,
++ SGX_MMU_PAGE_SIZE,
++ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++ psDevInfo->hDummyPTPageOSMemHandle);
++
++ RA_Free (psMMUContext->psDeviceNode->psLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);
++
++
++ sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->pvDummyDataPageCpuVAddr);
++ sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr);
++
++
++ OSUnMapPhysToLin(psDevInfo->pvDummyDataPageCpuVAddr,
++ SGX_MMU_PAGE_SIZE,
++ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++ psDevInfo->hDummyDataPageOSMemHandle);
++
++ RA_Free (psMMUContext->psDeviceNode->psLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);
++ }
++#endif
++ }
++
++ PVR_DPF ((PVR_DBG_MESSAGE, "MMU_Finalise"));
++
++
++ ppsMMUContext = (MMU_CONTEXT**)&psMMUContext->psDevInfo->pvMMUContextList;
++ while(*ppsMMUContext)
++ {
++ if(*ppsMMUContext == psMMUContext)
++ {
++
++ *ppsMMUContext = psMMUContext->psNext;
++ break;
++ }
++
++
++ ppsMMUContext = &((*ppsMMUContext)->psNext);
++ }
++
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(MMU_CONTEXT), psMMUContext, IMG_NULL);
++
++}
++
++
++IMG_VOID
++MMU_InsertHeap(MMU_CONTEXT *psMMUContext, MMU_HEAP *psMMUHeap)
++{
++ IMG_UINT32 *pui32PDCpuVAddr = (IMG_UINT32 *) psMMUContext->pvPDCpuVAddr;
++ IMG_UINT32 *pui32KernelPDCpuVAddr = (IMG_UINT32 *) psMMUHeap->psMMUContext->pvPDCpuVAddr;
++ IMG_UINT32 ui32PDEntry;
++#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++ IMG_BOOL bInvalidateDirectoryCache = IMG_FALSE;
++#endif
++
++
++ pui32PDCpuVAddr += psMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> psMMUHeap->ui32PDShift;
++ pui32KernelPDCpuVAddr += psMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> psMMUHeap->ui32PDShift;
++
++
++
++
++ PDUMPCOMMENT("Page directory shared heap range copy");
++#ifdef SUPPORT_SGX_MMU_BYPASS
++ EnableHostAccess(psMMUContext);
++#endif
++
++ for (ui32PDEntry = 0; ui32PDEntry < psMMUHeap->ui32PageTableCount; ui32PDEntry++)
++ {
++#if !defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++
++ PVR_ASSERT(pui32PDCpuVAddr[ui32PDEntry] == 0);
++#endif
++
++
++ pui32PDCpuVAddr[ui32PDEntry] = pui32KernelPDCpuVAddr[ui32PDEntry];
++ if (pui32PDCpuVAddr[ui32PDEntry])
++ {
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, (IMG_VOID *) &pui32PDCpuVAddr[ui32PDEntry], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++
++#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++ bInvalidateDirectoryCache = IMG_TRUE;
++#endif
++ }
++ }
++
++#ifdef SUPPORT_SGX_MMU_BYPASS
++ DisableHostAccess(psMMUContext);
++#endif
++
++#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++ if (bInvalidateDirectoryCache)
++ {
++
++
++
++
++ MMU_InvalidateDirectoryCache(psMMUContext->psDevInfo);
++ }
++#endif
++}
++
++
++static IMG_VOID
++MMU_UnmapPagesAndFreePTs (MMU_HEAP *psMMUHeap,
++ IMG_DEV_VIRTADDR sDevVAddr,
++ IMG_UINT32 ui32PageCount,
++ IMG_HANDLE hUniqueTag)
++{
++ IMG_DEV_VIRTADDR sTmpDevVAddr;
++ IMG_UINT32 i;
++ IMG_UINT32 ui32PDIndex;
++ IMG_UINT32 ui32PTIndex;
++ IMG_UINT32 *pui32Tmp;
++ IMG_BOOL bInvalidateDirectoryCache = IMG_FALSE;
++
++#if !defined (PDUMP)
++ PVR_UNREFERENCED_PARAMETER(hUniqueTag);
++#endif
++
++ sTmpDevVAddr = sDevVAddr;
++
++ for(i=0; i<ui32PageCount; i++)
++ {
++ MMU_PT_INFO **ppsPTInfoList;
++
++
++ ui32PDIndex = sTmpDevVAddr.uiAddr >> psMMUHeap->ui32PDShift;
++
++
++ ppsPTInfoList = &psMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
++
++ {
++
++ ui32PTIndex = (sTmpDevVAddr.uiAddr & psMMUHeap->ui32PTMask) >> psMMUHeap->ui32PTShift;
++
++
++ if (!ppsPTInfoList[0])
++ {
++ PVR_DPF((PVR_DBG_MESSAGE, "MMU_UnmapPagesAndFreePTs: Invalid PT for alloc at VAddr:0x%08lX (VaddrIni:0x%08lX AllocPage:%u) PDIdx:%u PTIdx:%u",sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr,i, ui32PDIndex, ui32PTIndex ));
++
++
++ sTmpDevVAddr.uiAddr += psMMUHeap->ui32DataPageSize;
++
++
++ continue;
++ }
++
++
++ pui32Tmp = (IMG_UINT32*)ppsPTInfoList[0]->PTPageCpuVAddr;
++
++
++ if (!pui32Tmp)
++ {
++ continue;
++ }
++
++ CheckPT(ppsPTInfoList[0]);
++
++
++ if (pui32Tmp[ui32PTIndex] & SGX_MMU_PTE_VALID)
++ {
++ ppsPTInfoList[0]->ui32ValidPTECount--;
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_MESSAGE, "MMU_UnmapPagesAndFreePTs: Page is already invalid for alloc at VAddr:0x%08lX (VAddrIni:0x%08lX AllocPage:%u) PDIdx:%u PTIdx:%u",sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr,i, ui32PDIndex, ui32PTIndex ));
++ }
++
++
++ PVR_ASSERT((IMG_INT32)ppsPTInfoList[0]->ui32ValidPTECount >= 0);
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++
++ pui32Tmp[ui32PTIndex] = (psMMUHeap->psMMUContext->psDevInfo->sDummyDataDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
++ | SGX_MMU_PTE_VALID;
++#else
++
++ pui32Tmp[ui32PTIndex] = 0;
++#endif
++
++ CheckPT(ppsPTInfoList[0]);
++ }
++
++
++
++ if (ppsPTInfoList[0] && ppsPTInfoList[0]->ui32ValidPTECount == 0)
++ {
++ _DeferredFreePageTable(psMMUHeap, ui32PDIndex - psMMUHeap->ui32PDBaseIndex, IMG_TRUE);
++ bInvalidateDirectoryCache = IMG_TRUE;
++ }
++
++
++ sTmpDevVAddr.uiAddr += psMMUHeap->ui32DataPageSize;
++ }
++
++ if(bInvalidateDirectoryCache)
++ {
++ MMU_InvalidateDirectoryCache(psMMUHeap->psMMUContext->psDevInfo);
++ }
++ else
++ {
++ MMU_InvalidatePageTableCache(psMMUHeap->psMMUContext->psDevInfo);
++ }
++
++#if defined(PDUMP)
++ MMU_PDumpPageTables(psMMUHeap,
++ sDevVAddr,
++ psMMUHeap->ui32DataPageSize * ui32PageCount,
++ IMG_TRUE,
++ hUniqueTag);
++#endif
++}
++
++
++IMG_VOID MMU_FreePageTables(IMG_PVOID pvMMUHeap,
++ IMG_SIZE_T ui32Start,
++ IMG_SIZE_T ui32End,
++ IMG_HANDLE hUniqueTag)
++{
++ MMU_HEAP *pMMUHeap = (MMU_HEAP*)pvMMUHeap;
++ IMG_DEV_VIRTADDR Start;
++
++ Start.uiAddr = ui32Start;
++
++ MMU_UnmapPagesAndFreePTs(pMMUHeap, Start, (ui32End - ui32Start) >> pMMUHeap->ui32PTShift, hUniqueTag);
++}
++
++MMU_HEAP *
++MMU_Create (MMU_CONTEXT *psMMUContext,
++ DEV_ARENA_DESCRIPTOR *psDevArena,
++ RA_ARENA **ppsVMArena)
++{
++ MMU_HEAP *pMMUHeap;
++ IMG_UINT32 ui32ScaleSize;
++
++ PVR_ASSERT (psDevArena != IMG_NULL);
++
++ if (psDevArena == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_Create: invalid parameter"));
++ return IMG_NULL;
++ }
++
++ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof (MMU_HEAP),
++ (IMG_VOID **)&pMMUHeap, IMG_NULL,
++ "MMU Heap");
++ if (pMMUHeap == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_Create: ERROR call to OSAllocMem failed"));
++ return IMG_NULL;
++ }
++
++ pMMUHeap->psMMUContext = psMMUContext;
++ pMMUHeap->psDevArena = psDevArena;
++
++
++
++
++ switch(pMMUHeap->psDevArena->ui32DataPageSize)
++ {
++ case 0x1000:
++ ui32ScaleSize = 0;
++ pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_4K;
++ break;
++#if defined(SGX_FEATURE_VARIABLE_MMU_PAGE_SIZE)
++ case 0x4000:
++ ui32ScaleSize = 2;
++ pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_16K;
++ break;
++ case 0x10000:
++ ui32ScaleSize = 4;
++ pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_64K;
++ break;
++ case 0x40000:
++ ui32ScaleSize = 6;
++ pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_256K;
++ break;
++ case 0x100000:
++ ui32ScaleSize = 8;
++ pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_1M;
++ break;
++ case 0x400000:
++ ui32ScaleSize = 10;
++ pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_4M;
++ break;
++#endif
++ default:
++ PVR_DPF((PVR_DBG_ERROR, "MMU_Create: invalid data page size"));
++ goto ErrorFreeHeap;
++ }
++
++
++ pMMUHeap->ui32DataPageSize = psDevArena->ui32DataPageSize;
++ pMMUHeap->ui32DataPageBitWidth = SGX_MMU_PAGE_SHIFT + ui32ScaleSize;
++ pMMUHeap->ui32DataPageMask = pMMUHeap->ui32DataPageSize - 1;
++
++ pMMUHeap->ui32PTShift = pMMUHeap->ui32DataPageBitWidth;
++ pMMUHeap->ui32PTBitWidth = SGX_MMU_PT_SHIFT - ui32ScaleSize;
++ pMMUHeap->ui32PTMask = SGX_MMU_PT_MASK & (SGX_MMU_PT_MASK<<ui32ScaleSize);
++ pMMUHeap->ui32PTSize = (1UL<<pMMUHeap->ui32PTBitWidth) * sizeof(IMG_UINT32);
++
++ if(pMMUHeap->ui32PTSize < 4 * sizeof(IMG_UINT32))
++ {
++ pMMUHeap->ui32PTSize = 4 * sizeof(IMG_UINT32);
++ }
++ pMMUHeap->ui32PTECount = pMMUHeap->ui32PTSize >> 2;
++
++
++ pMMUHeap->ui32PDShift = pMMUHeap->ui32PTBitWidth + pMMUHeap->ui32PTShift;
++ pMMUHeap->ui32PDBitWidth = SGX_FEATURE_ADDRESS_SPACE_SIZE - pMMUHeap->ui32PTBitWidth - pMMUHeap->ui32DataPageBitWidth;
++ pMMUHeap->ui32PDMask = SGX_MMU_PD_MASK & (SGX_MMU_PD_MASK>>(32-SGX_FEATURE_ADDRESS_SPACE_SIZE));
++
++
++
++
++
++ if(psDevArena->BaseDevVAddr.uiAddr > (pMMUHeap->ui32DataPageMask | pMMUHeap->ui32PTMask))
++ {
++
++
++
++ PVR_ASSERT ((psDevArena->BaseDevVAddr.uiAddr
++ & (pMMUHeap->ui32DataPageMask
++ | pMMUHeap->ui32PTMask)) == 0);
++ }
++
++
++ pMMUHeap->ui32PTETotal = pMMUHeap->psDevArena->ui32Size >> pMMUHeap->ui32PTShift;
++
++
++ pMMUHeap->ui32PDBaseIndex = (pMMUHeap->psDevArena->BaseDevVAddr.uiAddr & pMMUHeap->ui32PDMask) >> pMMUHeap->ui32PDShift;
++
++
++
++
++ pMMUHeap->ui32PageTableCount = (pMMUHeap->ui32PTETotal + pMMUHeap->ui32PTECount - 1)
++ >> pMMUHeap->ui32PTBitWidth;
++
++
++ pMMUHeap->psVMArena = RA_Create(psDevArena->pszName,
++ psDevArena->BaseDevVAddr.uiAddr,
++ psDevArena->ui32Size,
++ IMG_NULL,
++ pMMUHeap->ui32DataPageSize,
++ IMG_NULL,
++ IMG_NULL,
++ MMU_FreePageTables,
++ pMMUHeap);
++
++ if (pMMUHeap->psVMArena == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_Create: ERROR call to RA_Create failed"));
++ goto ErrorFreePagetables;
++ }
++
++#if 0
++
++ if(psDevArena->ui32HeapID == SGX_TILED_HEAP_ID)
++ {
++ IMG_UINT32 ui32RegVal;
++ IMG_UINT32 ui32XTileStride;
++
++
++
++
++
++
++ ui32XTileStride = 2;
++
++ ui32RegVal = (EUR_CR_BIF_TILE0_MIN_ADDRESS_MASK
++ & ((psDevArena->BaseDevVAddr.uiAddr>>20)
++ << EUR_CR_BIF_TILE0_MIN_ADDRESS_SHIFT))
++ |(EUR_CR_BIF_TILE0_MAX_ADDRESS_MASK
++ & (((psDevArena->BaseDevVAddr.uiAddr+psDevArena->ui32Size)>>20)
++ << EUR_CR_BIF_TILE0_MAX_ADDRESS_SHIFT))
++ |(EUR_CR_BIF_TILE0_CFG_MASK
++ & (((ui32XTileStride<<1)|8) << EUR_CR_BIF_TILE0_CFG_SHIFT));
++ PDUMPREG(EUR_CR_BIF_TILE0, ui32RegVal);
++ }
++#endif
++
++
++
++ *ppsVMArena = pMMUHeap->psVMArena;
++
++ return pMMUHeap;
++
++
++ErrorFreePagetables:
++ _DeferredFreePageTables (pMMUHeap);
++
++ErrorFreeHeap:
++ OSFreeMem (PVRSRV_OS_PAGEABLE_HEAP, sizeof(MMU_HEAP), pMMUHeap, IMG_NULL);
++
++
++ return IMG_NULL;
++}
++
++IMG_VOID
++MMU_Delete (MMU_HEAP *pMMUHeap)
++{
++ if (pMMUHeap != IMG_NULL)
++ {
++ PVR_DPF ((PVR_DBG_MESSAGE, "MMU_Delete"));
++
++ if(pMMUHeap->psVMArena)
++ {
++ RA_Delete (pMMUHeap->psVMArena);
++ }
++
++#ifdef SUPPORT_SGX_MMU_BYPASS
++ EnableHostAccess(pMMUHeap->psMMUContext);
++#endif
++ _DeferredFreePageTables (pMMUHeap);
++#ifdef SUPPORT_SGX_MMU_BYPASS
++ DisableHostAccess(pMMUHeap->psMMUContext);
++#endif
++
++ OSFreeMem (PVRSRV_OS_PAGEABLE_HEAP, sizeof(MMU_HEAP), pMMUHeap, IMG_NULL);
++
++ }
++}
++
++IMG_BOOL
++MMU_Alloc (MMU_HEAP *pMMUHeap,
++ IMG_SIZE_T uSize,
++ IMG_SIZE_T *pActualSize,
++ IMG_UINT32 uFlags,
++ IMG_UINT32 uDevVAddrAlignment,
++ IMG_DEV_VIRTADDR *psDevVAddr)
++{
++ IMG_BOOL bStatus;
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "MMU_Alloc: uSize=0x%x, flags=0x%x, align=0x%x",
++ uSize, uFlags, uDevVAddrAlignment));
++
++
++
++ if((uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) == 0)
++ {
++ IMG_UINTPTR_T uiAddr;
++
++ bStatus = RA_Alloc (pMMUHeap->psVMArena,
++ uSize,
++ pActualSize,
++ IMG_NULL,
++ 0,
++ uDevVAddrAlignment,
++ 0,
++ &uiAddr);
++ if(!bStatus)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"MMU_Alloc: RA_Alloc of VMArena failed"));
++ return bStatus;
++ }
++
++ psDevVAddr->uiAddr = IMG_CAST_TO_DEVVADDR_UINT(uiAddr);
++ }
++
++ #ifdef SUPPORT_SGX_MMU_BYPASS
++ EnableHostAccess(pMMUHeap->psMMUContext);
++ #endif
++
++
++ bStatus = _DeferredAllocPagetables(pMMUHeap, *psDevVAddr, uSize);
++
++ #ifdef SUPPORT_SGX_MMU_BYPASS
++ DisableHostAccess(pMMUHeap->psMMUContext);
++ #endif
++
++ if (!bStatus)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"MMU_Alloc: _DeferredAllocPagetables failed"));
++ if((uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) == 0)
++ {
++
++ RA_Free (pMMUHeap->psVMArena, psDevVAddr->uiAddr, IMG_FALSE);
++ }
++ }
++
++ return bStatus;
++}
++
++IMG_VOID
++MMU_Free (MMU_HEAP *pMMUHeap, IMG_DEV_VIRTADDR DevVAddr, IMG_UINT32 ui32Size)
++{
++ PVR_ASSERT (pMMUHeap != IMG_NULL);
++
++ if (pMMUHeap == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_Free: invalid parameter"));
++ return;
++ }
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "MMU_Free: mmu=%08X, dev_vaddr=%08X", pMMUHeap, DevVAddr.uiAddr));
++
++ if((DevVAddr.uiAddr >= pMMUHeap->psDevArena->BaseDevVAddr.uiAddr) &&
++ (DevVAddr.uiAddr + ui32Size <= pMMUHeap->psDevArena->BaseDevVAddr.uiAddr + pMMUHeap->psDevArena->ui32Size))
++ {
++ RA_Free (pMMUHeap->psVMArena, DevVAddr.uiAddr, IMG_TRUE);
++ return;
++ }
++
++ PVR_DPF((PVR_DBG_ERROR,"MMU_Free: Couldn't find DevVAddr %08X in a DevArena",DevVAddr.uiAddr));
++}
++
++IMG_VOID
++MMU_Enable (MMU_HEAP *pMMUHeap)
++{
++ PVR_UNREFERENCED_PARAMETER(pMMUHeap);
++
++}
++
++IMG_VOID
++MMU_Disable (MMU_HEAP *pMMUHeap)
++{
++ PVR_UNREFERENCED_PARAMETER(pMMUHeap);
++
++}
++
++#if defined(PDUMP)
++static IMG_VOID
++MMU_PDumpPageTables (MMU_HEAP *pMMUHeap,
++ IMG_DEV_VIRTADDR DevVAddr,
++ IMG_SIZE_T uSize,
++ IMG_BOOL bForUnmap,
++ IMG_HANDLE hUniqueTag)
++{
++ IMG_UINT32 ui32NumPTEntries;
++ IMG_UINT32 ui32PTIndex;
++ IMG_UINT32 *pui32PTEntry;
++
++ MMU_PT_INFO **ppsPTInfoList;
++ IMG_UINT32 ui32PDIndex;
++ IMG_UINT32 ui32PTDumpCount;
++
++
++ ui32NumPTEntries = (uSize + pMMUHeap->ui32DataPageMask) >> pMMUHeap->ui32PTShift;
++
++
++ ui32PDIndex = DevVAddr.uiAddr >> pMMUHeap->ui32PDShift;
++
++
++ ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
++
++
++ ui32PTIndex = (DevVAddr.uiAddr & pMMUHeap->ui32PTMask) >> pMMUHeap->ui32PTShift;
++
++
++ PDUMPCOMMENT("Page table mods (num entries == %08X) %s", ui32NumPTEntries, bForUnmap ? "(for unmap)" : "");
++
++
++ while(ui32NumPTEntries > 0)
++ {
++ MMU_PT_INFO* psPTInfo = *ppsPTInfoList++;
++
++ if(ui32NumPTEntries <= pMMUHeap->ui32PTECount - ui32PTIndex)
++ {
++ ui32PTDumpCount = ui32NumPTEntries;
++ }
++ else
++ {
++ ui32PTDumpCount = pMMUHeap->ui32PTECount - ui32PTIndex;
++ }
++
++ if (psPTInfo)
++ {
++ pui32PTEntry = (IMG_UINT32*)psPTInfo->PTPageCpuVAddr;
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, (IMG_VOID *) &pui32PTEntry[ui32PTIndex], ui32PTDumpCount * sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PT_UNIQUETAG, hUniqueTag);
++ }
++
++
++ ui32NumPTEntries -= ui32PTDumpCount;
++
++
++ ui32PTIndex = 0;
++ }
++
++ PDUMPCOMMENT("Finished page table mods %s", bForUnmap ? "(for unmap)" : "");
++}
++#endif
++
++
++static IMG_VOID
++MMU_MapPage (MMU_HEAP *pMMUHeap,
++ IMG_DEV_VIRTADDR DevVAddr,
++ IMG_DEV_PHYADDR DevPAddr,
++ IMG_UINT32 ui32MemFlags)
++{
++ IMG_UINT32 ui32Index;
++ IMG_UINT32 *pui32Tmp;
++ IMG_UINT32 ui32MMUFlags = 0;
++ MMU_PT_INFO **ppsPTInfoList;
++
++
++ PVR_ASSERT((DevPAddr.uiAddr & pMMUHeap->ui32DataPageMask) == 0);
++
++
++
++ if(((PVRSRV_MEM_READ|PVRSRV_MEM_WRITE) & ui32MemFlags) == (PVRSRV_MEM_READ|PVRSRV_MEM_WRITE))
++ {
++
++ ui32MMUFlags = 0;
++ }
++ else if(PVRSRV_MEM_READ & ui32MemFlags)
++ {
++
++ ui32MMUFlags |= SGX_MMU_PTE_READONLY;
++ }
++ else if(PVRSRV_MEM_WRITE & ui32MemFlags)
++ {
++
++ ui32MMUFlags |= SGX_MMU_PTE_WRITEONLY;
++ }
++
++
++ if(PVRSRV_MEM_CACHE_CONSISTENT & ui32MemFlags)
++ {
++ ui32MMUFlags |= SGX_MMU_PTE_CACHECONSISTENT;
++ }
++
++#if !defined(FIX_HW_BRN_25503)
++
++ if(PVRSRV_MEM_EDM_PROTECT & ui32MemFlags)
++ {
++ ui32MMUFlags |= SGX_MMU_PTE_EDMPROTECT;
++ }
++#endif
++
++
++
++
++
++ ui32Index = DevVAddr.uiAddr >> pMMUHeap->ui32PDShift;
++
++
++ ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32Index];
++
++ CheckPT(ppsPTInfoList[0]);
++
++
++ ui32Index = (DevVAddr.uiAddr & pMMUHeap->ui32PTMask) >> pMMUHeap->ui32PTShift;
++
++
++ pui32Tmp = (IMG_UINT32*)ppsPTInfoList[0]->PTPageCpuVAddr;
++
++#if !defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++
++ if (pui32Tmp[ui32Index] & SGX_MMU_PTE_VALID)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_MapPage: Page is already valid for alloc at VAddr:0x%08lX PDIdx:%u PTIdx:%u",
++ DevVAddr.uiAddr,
++ DevVAddr.uiAddr >> pMMUHeap->ui32PDShift,
++ ui32Index ));
++ PVR_DPF((PVR_DBG_ERROR, "MMU_MapPage: Page table entry value: 0x%08lX", pui32Tmp[ui32Index]));
++ PVR_DPF((PVR_DBG_ERROR, "MMU_MapPage: Physical page to map: 0x%08lX", DevPAddr.uiAddr));
++ }
++
++ PVR_ASSERT((pui32Tmp[ui32Index] & SGX_MMU_PTE_VALID) == 0);
++#endif
++
++
++ ppsPTInfoList[0]->ui32ValidPTECount++;
++
++
++ pui32Tmp[ui32Index] = ((DevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
++ & ((~pMMUHeap->ui32DataPageMask)>>SGX_MMU_PTE_ADDR_ALIGNSHIFT))
++ | SGX_MMU_PTE_VALID
++ | ui32MMUFlags;
++
++ CheckPT(ppsPTInfoList[0]);
++}
++
++
++IMG_VOID
++MMU_MapScatter (MMU_HEAP *pMMUHeap,
++ IMG_DEV_VIRTADDR DevVAddr,
++ IMG_SYS_PHYADDR *psSysAddr,
++ IMG_SIZE_T uSize,
++ IMG_UINT32 ui32MemFlags,
++ IMG_HANDLE hUniqueTag)
++{
++#if defined(PDUMP)
++ IMG_DEV_VIRTADDR MapBaseDevVAddr;
++#endif
++ IMG_UINT32 uCount, i;
++ IMG_DEV_PHYADDR DevPAddr;
++
++ PVR_ASSERT (pMMUHeap != IMG_NULL);
++
++#if defined(PDUMP)
++ MapBaseDevVAddr = DevVAddr;
++#else
++ PVR_UNREFERENCED_PARAMETER(hUniqueTag);
++#endif
++
++ for (i=0, uCount=0; uCount<uSize; i++, uCount+=pMMUHeap->ui32DataPageSize)
++ {
++ IMG_SYS_PHYADDR sSysAddr;
++
++ sSysAddr = psSysAddr[i];
++
++
++
++ PVR_ASSERT((sSysAddr.uiAddr & pMMUHeap->ui32DataPageMask) == 0);
++
++ DevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysAddr);
++
++ MMU_MapPage (pMMUHeap, DevVAddr, DevPAddr, ui32MemFlags);
++ DevVAddr.uiAddr += pMMUHeap->ui32DataPageSize;
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "MMU_MapScatter: devVAddr=%08X, SysAddr=%08X, size=0x%x/0x%x",
++ DevVAddr.uiAddr, sSysAddr.uiAddr, uCount, uSize));
++ }
++
++#if defined(PDUMP)
++ MMU_PDumpPageTables (pMMUHeap, MapBaseDevVAddr, uSize, IMG_FALSE, hUniqueTag);
++#endif
++}
++
++IMG_VOID
++MMU_MapPages (MMU_HEAP *pMMUHeap,
++ IMG_DEV_VIRTADDR DevVAddr,
++ IMG_SYS_PHYADDR SysPAddr,
++ IMG_SIZE_T uSize,
++ IMG_UINT32 ui32MemFlags,
++ IMG_HANDLE hUniqueTag)
++{
++ IMG_DEV_PHYADDR DevPAddr;
++#if defined(PDUMP)
++ IMG_DEV_VIRTADDR MapBaseDevVAddr;
++#endif
++ IMG_UINT32 uCount;
++ IMG_UINT32 ui32VAdvance;
++ IMG_UINT32 ui32PAdvance;
++
++ PVR_ASSERT (pMMUHeap != IMG_NULL);
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "MMU_MapPages: mmu=%08X, devVAddr=%08X, SysPAddr=%08X, size=0x%x",
++ pMMUHeap, DevVAddr.uiAddr, SysPAddr.uiAddr, uSize));
++
++
++ ui32VAdvance = pMMUHeap->ui32DataPageSize;
++ ui32PAdvance = pMMUHeap->ui32DataPageSize;
++
++#if defined(PDUMP)
++ MapBaseDevVAddr = DevVAddr;
++#else
++ PVR_UNREFERENCED_PARAMETER(hUniqueTag);
++#endif
++
++ DevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, SysPAddr);
++
++
++ PVR_ASSERT((DevPAddr.uiAddr & pMMUHeap->ui32DataPageMask) == 0);
++
++#if defined(FIX_HW_BRN_23281)
++ if(ui32MemFlags & PVRSRV_MEM_INTERLEAVED)
++ {
++ ui32VAdvance *= 2;
++ }
++#endif
++
++
++
++
++ if(ui32MemFlags & PVRSRV_MEM_DUMMY)
++ {
++ ui32PAdvance = 0;
++ }
++
++ for (uCount=0; uCount<uSize; uCount+=ui32VAdvance)
++ {
++ MMU_MapPage (pMMUHeap, DevVAddr, DevPAddr, ui32MemFlags);
++ DevVAddr.uiAddr += ui32VAdvance;
++ DevPAddr.uiAddr += ui32PAdvance;
++ }
++
++#if defined(PDUMP)
++ MMU_PDumpPageTables (pMMUHeap, MapBaseDevVAddr, uSize, IMG_FALSE, hUniqueTag);
++#endif
++}
++
++IMG_VOID
++MMU_MapShadow (MMU_HEAP *pMMUHeap,
++ IMG_DEV_VIRTADDR MapBaseDevVAddr,
++ IMG_SIZE_T uByteSize,
++ IMG_CPU_VIRTADDR CpuVAddr,
++ IMG_HANDLE hOSMemHandle,
++ IMG_DEV_VIRTADDR *pDevVAddr,
++ IMG_UINT32 ui32MemFlags,
++ IMG_HANDLE hUniqueTag)
++{
++ IMG_UINT32 i;
++ IMG_UINT32 uOffset = 0;
++ IMG_DEV_VIRTADDR MapDevVAddr;
++ IMG_UINT32 ui32VAdvance;
++ IMG_UINT32 ui32PAdvance;
++
++#if !defined (PDUMP)
++ PVR_UNREFERENCED_PARAMETER(hUniqueTag);
++#endif
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "MMU_MapShadow: %08X, 0x%x, %08X",
++ MapBaseDevVAddr.uiAddr,
++ uByteSize,
++ CpuVAddr));
++
++
++ ui32VAdvance = pMMUHeap->ui32DataPageSize;
++ ui32PAdvance = pMMUHeap->ui32DataPageSize;
++
++
++ PVR_ASSERT(((IMG_UINT32)CpuVAddr & (SGX_MMU_PAGE_SIZE - 1)) == 0);
++ PVR_ASSERT(((IMG_UINT32)uByteSize & pMMUHeap->ui32DataPageMask) == 0);
++ pDevVAddr->uiAddr = MapBaseDevVAddr.uiAddr;
++
++#if defined(FIX_HW_BRN_23281)
++ if(ui32MemFlags & PVRSRV_MEM_INTERLEAVED)
++ {
++ ui32VAdvance *= 2;
++ }
++#endif
++
++
++
++
++ if(ui32MemFlags & PVRSRV_MEM_DUMMY)
++ {
++ ui32PAdvance = 0;
++ }
++
++
++ MapDevVAddr = MapBaseDevVAddr;
++ for (i=0; i<uByteSize; i+=ui32VAdvance)
++ {
++ IMG_CPU_PHYADDR CpuPAddr;
++ IMG_DEV_PHYADDR DevPAddr;
++
++ if(CpuVAddr)
++ {
++ CpuPAddr = OSMapLinToCPUPhys ((IMG_VOID *)((IMG_UINT32)CpuVAddr + uOffset));
++ }
++ else
++ {
++ CpuPAddr = OSMemHandleToCpuPAddr(hOSMemHandle, uOffset);
++ }
++ DevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, CpuPAddr);
++
++
++ PVR_ASSERT((DevPAddr.uiAddr & pMMUHeap->ui32DataPageMask) == 0);
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "0x%x: CpuVAddr=%08X, CpuPAddr=%08X, DevVAddr=%08X, DevPAddr=%08X",
++ uOffset,
++ (IMG_UINTPTR_T)CpuVAddr + uOffset,
++ CpuPAddr.uiAddr,
++ MapDevVAddr.uiAddr,
++ DevPAddr.uiAddr));
++
++ MMU_MapPage (pMMUHeap, MapDevVAddr, DevPAddr, ui32MemFlags);
++
++
++ MapDevVAddr.uiAddr += ui32VAdvance;
++ uOffset += ui32PAdvance;
++ }
++
++#if defined(PDUMP)
++ MMU_PDumpPageTables (pMMUHeap, MapBaseDevVAddr, uByteSize, IMG_FALSE, hUniqueTag);
++#endif
++}
++
++
++IMG_VOID
++MMU_UnmapPages (MMU_HEAP *psMMUHeap,
++ IMG_DEV_VIRTADDR sDevVAddr,
++ IMG_UINT32 ui32PageCount,
++ IMG_HANDLE hUniqueTag)
++{
++ IMG_UINT32 uPageSize = psMMUHeap->ui32DataPageSize;
++ IMG_DEV_VIRTADDR sTmpDevVAddr;
++ IMG_UINT32 i;
++ IMG_UINT32 ui32PDIndex;
++ IMG_UINT32 ui32PTIndex;
++ IMG_UINT32 *pui32Tmp;
++
++#if !defined (PDUMP)
++ PVR_UNREFERENCED_PARAMETER(hUniqueTag);
++#endif
++
++
++ sTmpDevVAddr = sDevVAddr;
++
++ for(i=0; i<ui32PageCount; i++)
++ {
++ MMU_PT_INFO **ppsPTInfoList;
++
++
++ ui32PDIndex = sTmpDevVAddr.uiAddr >> psMMUHeap->ui32PDShift;
++
++
++ ppsPTInfoList = &psMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
++
++
++ ui32PTIndex = (sTmpDevVAddr.uiAddr & psMMUHeap->ui32PTMask) >> psMMUHeap->ui32PTShift;
++
++
++ if (!ppsPTInfoList[0])
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_UnmapPages: ERROR Invalid PT for alloc at VAddr:0x%08lX (VaddrIni:0x%08lX AllocPage:%u) PDIdx:%u PTIdx:%u",
++ sTmpDevVAddr.uiAddr,
++ sDevVAddr.uiAddr,
++ i,
++ ui32PDIndex,
++ ui32PTIndex));
++
++
++ sTmpDevVAddr.uiAddr += uPageSize;
++
++
++ continue;
++ }
++
++ CheckPT(ppsPTInfoList[0]);
++
++
++ pui32Tmp = (IMG_UINT32*)ppsPTInfoList[0]->PTPageCpuVAddr;
++
++
++ if (pui32Tmp[ui32PTIndex] & SGX_MMU_PTE_VALID)
++ {
++ ppsPTInfoList[0]->ui32ValidPTECount--;
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_UnmapPages: Page is already invalid for alloc at VAddr:0x%08lX (VAddrIni:0x%08lX AllocPage:%u) PDIdx:%u PTIdx:%u",
++ sTmpDevVAddr.uiAddr,
++ sDevVAddr.uiAddr,
++ i,
++ ui32PDIndex,
++ ui32PTIndex));
++ PVR_DPF((PVR_DBG_ERROR, "MMU_UnmapPages: Page table entry value: 0x%08lX", pui32Tmp[ui32PTIndex]));
++ }
++
++
++ PVR_ASSERT((IMG_INT32)ppsPTInfoList[0]->ui32ValidPTECount >= 0);
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++
++ pui32Tmp[ui32PTIndex] = (psMMUHeap->psMMUContext->psDevInfo->sDummyDataDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
++ | SGX_MMU_PTE_VALID;
++#else
++
++ pui32Tmp[ui32PTIndex] = 0;
++#endif
++
++ CheckPT(ppsPTInfoList[0]);
++
++
++ sTmpDevVAddr.uiAddr += uPageSize;
++ }
++
++ MMU_InvalidatePageTableCache(psMMUHeap->psMMUContext->psDevInfo);
++
++#if defined(PDUMP)
++ MMU_PDumpPageTables (psMMUHeap, sDevVAddr, uPageSize*ui32PageCount, IMG_TRUE, hUniqueTag);
++#endif
++}
++
++
++IMG_DEV_PHYADDR
++MMU_GetPhysPageAddr(MMU_HEAP *pMMUHeap, IMG_DEV_VIRTADDR sDevVPageAddr)
++{
++ IMG_UINT32 *pui32PageTable;
++ IMG_UINT32 ui32Index;
++ IMG_DEV_PHYADDR sDevPAddr;
++ MMU_PT_INFO **ppsPTInfoList;
++
++
++ ui32Index = sDevVPageAddr.uiAddr >> pMMUHeap->ui32PDShift;
++
++
++ ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32Index];
++ if (!ppsPTInfoList[0])
++ {
++ PVR_DPF((PVR_DBG_ERROR,"MMU_GetPhysPageAddr: Not mapped in at 0x%08x", sDevVPageAddr.uiAddr));
++ sDevPAddr.uiAddr = 0;
++ return sDevPAddr;
++ }
++
++
++ ui32Index = (sDevVPageAddr.uiAddr & pMMUHeap->ui32PTMask) >> pMMUHeap->ui32PTShift;
++
++
++ pui32PageTable = (IMG_UINT32*)ppsPTInfoList[0]->PTPageCpuVAddr;
++
++
++ sDevPAddr.uiAddr = pui32PageTable[ui32Index];
++
++
++ sDevPAddr.uiAddr &= ~(pMMUHeap->ui32DataPageMask>>SGX_MMU_PTE_ADDR_ALIGNSHIFT);
++
++
++ sDevPAddr.uiAddr <<= SGX_MMU_PTE_ADDR_ALIGNSHIFT;
++
++ return sDevPAddr;
++}
++
++
++IMG_DEV_PHYADDR MMU_GetPDDevPAddr(MMU_CONTEXT *pMMUContext)
++{
++ return (pMMUContext->sPDDevPAddr);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR SGXGetPhysPageAddrKM (IMG_HANDLE hDevMemHeap,
++ IMG_DEV_VIRTADDR sDevVAddr,
++ IMG_DEV_PHYADDR *pDevPAddr,
++ IMG_CPU_PHYADDR *pCpuPAddr)
++{
++ MMU_HEAP *pMMUHeap;
++ IMG_DEV_PHYADDR DevPAddr;
++
++
++
++ pMMUHeap = (MMU_HEAP*)BM_GetMMUHeap(hDevMemHeap);
++
++ DevPAddr = MMU_GetPhysPageAddr(pMMUHeap, sDevVAddr);
++ pCpuPAddr->uiAddr = DevPAddr.uiAddr;
++ pDevPAddr->uiAddr = DevPAddr.uiAddr;
++
++ return (pDevPAddr->uiAddr != 0) ? PVRSRV_OK : PVRSRV_ERROR_INVALID_PARAMS;
++}
++
++
++PVRSRV_ERROR SGXGetMMUPDAddrKM(IMG_HANDLE hDevCookie,
++ IMG_HANDLE hDevMemContext,
++ IMG_DEV_PHYADDR *psPDDevPAddr)
++{
++ if (!hDevCookie || !hDevMemContext || !psPDDevPAddr)
++ {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++
++ *psPDDevPAddr = ((BM_CONTEXT*)hDevMemContext)->psMMUContext->sPDDevPAddr;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR MMU_BIFResetPDAlloc(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++ PVRSRV_ERROR eError;
++ SYS_DATA *psSysData;
++ RA_ARENA *psLocalDevMemArena;
++ IMG_HANDLE hOSMemHandle = IMG_NULL;
++ IMG_BYTE *pui8MemBlock = IMG_NULL;
++ IMG_SYS_PHYADDR sMemBlockSysPAddr;
++ IMG_CPU_PHYADDR sMemBlockCpuPAddr;
++
++ SysAcquireData(&psSysData);
++
++ psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
++
++
++ if(psLocalDevMemArena == IMG_NULL)
++ {
++
++ eError = OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ 3 * SGX_MMU_PAGE_SIZE,
++ SGX_MMU_PAGE_SIZE,
++ (IMG_VOID **)&pui8MemBlock,
++ &hOSMemHandle);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_BIFResetPDAlloc: ERROR call to OSAllocPages failed"));
++ return eError;
++ }
++
++
++ if(pui8MemBlock)
++ {
++ sMemBlockCpuPAddr = OSMapLinToCPUPhys(pui8MemBlock);
++ }
++ else
++ {
++
++ sMemBlockCpuPAddr = OSMemHandleToCpuPAddr(hOSMemHandle, 0);
++ }
++ }
++ else
++ {
++
++
++ if(RA_Alloc(psLocalDevMemArena,
++ 3 * SGX_MMU_PAGE_SIZE,
++ IMG_NULL,
++ IMG_NULL,
++ 0,
++ SGX_MMU_PAGE_SIZE,
++ 0,
++ &(sMemBlockSysPAddr.uiAddr)) != IMG_TRUE)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_BIFResetPDAlloc: ERROR call to RA_Alloc failed"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++
++ sMemBlockCpuPAddr = SysSysPAddrToCpuPAddr(sMemBlockSysPAddr);
++ pui8MemBlock = OSMapPhysToLin(sMemBlockCpuPAddr,
++ SGX_MMU_PAGE_SIZE * 3,
++ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++ &hOSMemHandle);
++ if(!pui8MemBlock)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_BIFResetPDAlloc: ERROR failed to map page tables"));
++ return PVRSRV_ERROR_BAD_MAPPING;
++ }
++ }
++
++ psDevInfo->hBIFResetPDOSMemHandle = hOSMemHandle;
++ psDevInfo->sBIFResetPDDevPAddr = SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sMemBlockCpuPAddr);
++ psDevInfo->sBIFResetPTDevPAddr.uiAddr = psDevInfo->sBIFResetPDDevPAddr.uiAddr + SGX_MMU_PAGE_SIZE;
++ psDevInfo->sBIFResetPageDevPAddr.uiAddr = psDevInfo->sBIFResetPTDevPAddr.uiAddr + SGX_MMU_PAGE_SIZE;
++
++
++ psDevInfo->pui32BIFResetPD = (IMG_UINT32 *)pui8MemBlock;
++ psDevInfo->pui32BIFResetPT = (IMG_UINT32 *)(pui8MemBlock + SGX_MMU_PAGE_SIZE);
++
++
++ OSMemSet(psDevInfo->pui32BIFResetPD, 0, SGX_MMU_PAGE_SIZE);
++ OSMemSet(psDevInfo->pui32BIFResetPT, 0, SGX_MMU_PAGE_SIZE);
++
++ OSMemSet(pui8MemBlock + (2 * SGX_MMU_PAGE_SIZE), 0xDB, SGX_MMU_PAGE_SIZE);
++
++ return PVRSRV_OK;
++}
++
++IMG_VOID MMU_BIFResetPDFree(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++ SYS_DATA *psSysData;
++ RA_ARENA *psLocalDevMemArena;
++ IMG_SYS_PHYADDR sPDSysPAddr;
++
++ SysAcquireData(&psSysData);
++
++ psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
++
++
++ if(psLocalDevMemArena == IMG_NULL)
++ {
++ OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ 3 * SGX_MMU_PAGE_SIZE,
++ psDevInfo->pui32BIFResetPD,
++ psDevInfo->hBIFResetPDOSMemHandle);
++ }
++ else
++ {
++ OSUnMapPhysToLin(psDevInfo->pui32BIFResetPD,
++ 3 * SGX_MMU_PAGE_SIZE,
++ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++ psDevInfo->hBIFResetPDOSMemHandle);
++
++ sPDSysPAddr = SysDevPAddrToSysPAddr(PVRSRV_DEVICE_TYPE_SGX, psDevInfo->sBIFResetPDDevPAddr);
++ RA_Free(psLocalDevMemArena, sPDSysPAddr.uiAddr, IMG_FALSE);
++ }
++}
++
++
++#if defined(FIX_HW_BRN_22997) && defined(FIX_HW_BRN_23030) && defined(SGX_FEATURE_HOST_PORT)
++PVRSRV_ERROR WorkaroundBRN22997Alloc(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++ PVRSRV_ERROR eError;
++ SYS_DATA *psSysData;
++ RA_ARENA *psLocalDevMemArena;
++ IMG_HANDLE hPTPageOSMemHandle = IMG_NULL;
++ IMG_HANDLE hPDPageOSMemHandle = IMG_NULL;
++ IMG_UINT32 *pui32PD = IMG_NULL;
++ IMG_UINT32 *pui32PT = IMG_NULL;
++ IMG_CPU_PHYADDR sCpuPAddr;
++ IMG_DEV_PHYADDR sPTDevPAddr;
++ IMG_DEV_PHYADDR sPDDevPAddr;
++
++ SysAcquireData(&psSysData);
++
++ psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
++
++
++ if(psLocalDevMemArena == IMG_NULL)
++ {
++
++ eError = OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ SGX_MMU_PAGE_SIZE,
++ SGX_MMU_PAGE_SIZE,
++ (IMG_VOID **)&pui32PT,
++ &hPTPageOSMemHandle);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "WorkaroundBRN22997: ERROR call to OSAllocPages failed"));
++ return eError;
++ }
++
++ eError = OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ SGX_MMU_PAGE_SIZE,
++ SGX_MMU_PAGE_SIZE,
++ (IMG_VOID **)&pui32PD,
++ &hPDPageOSMemHandle);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "WorkaroundBRN22997: ERROR call to OSAllocPages failed"));
++ return eError;
++ }
++
++
++ if(pui32PT)
++ {
++ sCpuPAddr = OSMapLinToCPUPhys(pui32PT);
++ }
++ else
++ {
++
++ sCpuPAddr = OSMemHandleToCpuPAddr(hPTPageOSMemHandle, 0);
++ }
++ sPTDevPAddr = SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
++
++ if(pui32PD)
++ {
++ sCpuPAddr = OSMapLinToCPUPhys(pui32PD);
++ }
++ else
++ {
++
++ sCpuPAddr = OSMemHandleToCpuPAddr(hPDPageOSMemHandle, 0);
++ }
++ sPDDevPAddr = SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
++
++ }
++ else
++ {
++
++
++ if(RA_Alloc(psLocalDevMemArena,
++ SGX_MMU_PAGE_SIZE * 2,
++ IMG_NULL,
++ IMG_NULL,
++ 0,
++ SGX_MMU_PAGE_SIZE,
++ 0,
++ &(psDevInfo->sBRN22997SysPAddr.uiAddr))!= IMG_TRUE)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "WorkaroundBRN22997: ERROR call to RA_Alloc failed"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++
++ sCpuPAddr = SysSysPAddrToCpuPAddr(psDevInfo->sBRN22997SysPAddr);
++ pui32PT = OSMapPhysToLin(sCpuPAddr,
++ SGX_MMU_PAGE_SIZE * 2,
++ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++ &hPTPageOSMemHandle);
++ if(!pui32PT)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "WorkaroundBRN22997: ERROR failed to map page tables"));
++ return PVRSRV_ERROR_BAD_MAPPING;
++ }
++
++
++ sPTDevPAddr = SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
++
++ pui32PD = pui32PT + 1024;
++ sPDDevPAddr.uiAddr = sPTDevPAddr.uiAddr + 4096;
++ }
++
++ OSMemSet(pui32PD, 0, SGX_MMU_PAGE_SIZE);
++ OSMemSet(pui32PT, 0, SGX_MMU_PAGE_SIZE);
++
++
++ PDUMPMALLOCPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, pui32PD, SGX_MMU_PAGE_SIZE, PDUMP_PD_UNIQUETAG);
++ PDUMPMALLOCPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, pui32PT, SGX_MMU_PAGE_SIZE, PDUMP_PT_UNIQUETAG);
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, pui32PD, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, pui32PT, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PT_UNIQUETAG, PDUMP_PD_UNIQUETAG);
++
++ psDevInfo->hBRN22997PTPageOSMemHandle = hPTPageOSMemHandle;
++ psDevInfo->hBRN22997PDPageOSMemHandle = hPDPageOSMemHandle;
++ psDevInfo->sBRN22997PTDevPAddr = sPTDevPAddr;
++ psDevInfo->sBRN22997PDDevPAddr = sPDDevPAddr;
++ psDevInfo->pui32BRN22997PD = pui32PD;
++ psDevInfo->pui32BRN22997PT = pui32PT;
++
++ return PVRSRV_OK;
++}
++
++
++IMG_VOID WorkaroundBRN22997ReadHostPort(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++ IMG_UINT32 *pui32PD = psDevInfo->pui32BRN22997PD;
++ IMG_UINT32 *pui32PT = psDevInfo->pui32BRN22997PT;
++ IMG_UINT32 ui32PDIndex;
++ IMG_UINT32 ui32PTIndex;
++ IMG_DEV_VIRTADDR sDevVAddr;
++ volatile IMG_UINT32 *pui32HostPort;
++ IMG_UINT32 ui32BIFCtrl;
++
++
++
++
++ pui32HostPort = (volatile IMG_UINT32*)(((IMG_UINT8*)psDevInfo->pvHostPortBaseKM) + SYS_SGX_HOSTPORT_BRN23030_OFFSET);
++
++
++ sDevVAddr.uiAddr = SYS_SGX_HOSTPORT_BASE_DEVVADDR + SYS_SGX_HOSTPORT_BRN23030_OFFSET;
++
++ ui32PDIndex = (sDevVAddr.uiAddr & SGX_MMU_PD_MASK) >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
++ ui32PTIndex = (sDevVAddr.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
++
++
++ pui32PD[ui32PDIndex] = (psDevInfo->sBRN22997PTDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
++ | SGX_MMU_PDE_VALID;
++
++ pui32PT[ui32PTIndex] = (psDevInfo->sBRN22997PTDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
++ | SGX_MMU_PTE_VALID;
++
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, pui32PD, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, pui32PT, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PT_UNIQUETAG, PDUMP_PD_UNIQUETAG);
++
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_DIR_LIST_BASE0,
++ psDevInfo->sBRN22997PDDevPAddr.uiAddr);
++ PDUMPPDREG(EUR_CR_BIF_DIR_LIST_BASE0, psDevInfo->sBRN22997PDDevPAddr.uiAddr, PDUMP_PD_UNIQUETAG);
++
++
++ ui32BIFCtrl = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL);
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32BIFCtrl | EUR_CR_BIF_CTRL_INVALDC_MASK);
++ PDUMPREG(EUR_CR_BIF_CTRL, ui32BIFCtrl | EUR_CR_BIF_CTRL_INVALDC_MASK);
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32BIFCtrl);
++ PDUMPREG(EUR_CR_BIF_CTRL, ui32BIFCtrl);
++
++
++ if (pui32HostPort)
++ {
++
++ IMG_UINT32 ui32Tmp;
++ ui32Tmp = *pui32HostPort;
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR,"Host Port not present for BRN22997 workaround"));
++ }
++
++
++
++
++
++
++
++ PDUMPCOMMENT("RDW :SGXMEM:v4:%08lX\r\n", sDevVAddr.uiAddr);
++
++ PDUMPCOMMENT("SAB :SGXMEM:v4:%08lX 4 0 hostport.bin", sDevVAddr.uiAddr);
++
++
++ pui32PD[ui32PDIndex] = 0;
++ pui32PT[ui32PTIndex] = 0;
++
++
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, pui32PD, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, pui32PT, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PT_UNIQUETAG, PDUMP_PD_UNIQUETAG);
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32BIFCtrl | EUR_CR_BIF_CTRL_INVALDC_MASK);
++ PDUMPREG(EUR_CR_BIF_CTRL, ui32BIFCtrl | EUR_CR_BIF_CTRL_INVALDC_MASK);
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32BIFCtrl);
++ PDUMPREG(EUR_CR_BIF_CTRL, ui32BIFCtrl);
++}
++
++
++IMG_VOID WorkaroundBRN22997Free(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++ SYS_DATA *psSysData;
++ RA_ARENA *psLocalDevMemArena;
++
++ SysAcquireData(&psSysData);
++
++ psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
++
++ PDUMPFREEPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, psDevInfo->pui32BRN22997PD, SGX_MMU_PAGE_SIZE, PDUMP_PD_UNIQUETAG);
++ PDUMPFREEPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, psDevInfo->pui32BRN22997PT, SGX_MMU_PAGE_SIZE, PDUMP_PT_UNIQUETAG);
++
++
++ if(psLocalDevMemArena == IMG_NULL)
++ {
++ if (psDevInfo->pui32BRN22997PD != IMG_NULL)
++ {
++ OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ SGX_MMU_PAGE_SIZE,
++ psDevInfo->pui32BRN22997PD,
++ psDevInfo->hBRN22997PDPageOSMemHandle);
++ }
++
++ if (psDevInfo->pui32BRN22997PT != IMG_NULL)
++ {
++ OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ SGX_MMU_PAGE_SIZE,
++ psDevInfo->pui32BRN22997PT,
++ psDevInfo->hBRN22997PTPageOSMemHandle);
++ }
++ }
++ else
++ {
++ if (psDevInfo->pui32BRN22997PT != IMG_NULL)
++ {
++ OSUnMapPhysToLin(psDevInfo->pui32BRN22997PT,
++ SGX_MMU_PAGE_SIZE * 2,
++ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++ psDevInfo->hBRN22997PTPageOSMemHandle);
++
++
++ RA_Free(psLocalDevMemArena, psDevInfo->sBRN22997SysPAddr.uiAddr, IMG_FALSE);
++ }
++ }
++}
++#endif
++
++
++#if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE)
++PVRSRV_ERROR MMU_MapExtSystemCacheRegs(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ PVRSRV_ERROR eError;
++ SYS_DATA *psSysData;
++ RA_ARENA *psLocalDevMemArena;
++ IMG_HANDLE hPTPageOSMemHandle = IMG_NULL;
++ IMG_UINT32 *pui32PD;
++ IMG_UINT32 *pui32PT = IMG_NULL;
++ IMG_CPU_PHYADDR sCpuPAddr;
++ IMG_DEV_PHYADDR sPTDevPAddr;
++ PVRSRV_SGXDEV_INFO *psDevInfo;
++ IMG_UINT32 ui32PDIndex;
++ IMG_UINT32 ui32PTIndex;
++
++ psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice;
++ pui32PD = (IMG_UINT32*)psDeviceNode->sDevMemoryInfo.pBMKernelContext->psMMUContext->pvPDCpuVAddr;
++
++ SysAcquireData(&psSysData);
++
++ psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
++
++
++ if(psLocalDevMemArena == IMG_NULL)
++ {
++
++ eError = OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ SGX_MMU_PAGE_SIZE,
++ SGX_MMU_PAGE_SIZE,
++ (IMG_VOID **)&pui32PT,
++ &hPTPageOSMemHandle);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_MapExtSystemCacheRegs: ERROR call to OSAllocPages failed"));
++ return eError;
++ }
++
++
++ if(pui32PT)
++ {
++ sCpuPAddr = OSMapLinToCPUPhys(pui32PT);
++ }
++ else
++ {
++
++ sCpuPAddr = OSMemHandleToCpuPAddr(hPTPageOSMemHandle, 0);
++ }
++ sPTDevPAddr = SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
++ }
++ else
++ {
++ IMG_SYS_PHYADDR sSysPAddr;
++
++
++ if(RA_Alloc(psLocalDevMemArena,
++ SGX_MMU_PAGE_SIZE,
++ IMG_NULL,
++ IMG_NULL,
++ 0,
++ SGX_MMU_PAGE_SIZE,
++ 0,
++ &(sSysPAddr.uiAddr))!= IMG_TRUE)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_MapExtSystemCacheRegs: ERROR call to RA_Alloc failed"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++
++ sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
++ pui32PT = OSMapPhysToLin(sCpuPAddr,
++ SGX_MMU_PAGE_SIZE,
++ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++ &hPTPageOSMemHandle);
++ if(!pui32PT)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_MapExtSystemCacheRegs: ERROR failed to map page tables"));
++ return PVRSRV_ERROR_BAD_MAPPING;
++ }
++
++
++ sPTDevPAddr = SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
++
++
++ psDevInfo->sExtSystemCacheRegsPTSysPAddr = sSysPAddr;
++ }
++
++ OSMemSet(pui32PT, 0, SGX_MMU_PAGE_SIZE);
++
++ ui32PDIndex = (SGX_EXT_SYSTEM_CACHE_REGS_DEVVADDR_BASE & SGX_MMU_PD_MASK) >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
++ ui32PTIndex = (SGX_EXT_SYSTEM_CACHE_REGS_DEVVADDR_BASE & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
++
++
++ pui32PD[ui32PDIndex] = (sPTDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
++ | SGX_MMU_PDE_VALID;
++
++ pui32PT[ui32PTIndex] = (psDevInfo->sExtSysCacheRegsDevPBase.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
++ | SGX_MMU_PTE_VALID;
++
++
++ PDUMPMALLOCPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, pui32PT, SGX_MMU_PAGE_SIZE, PDUMP_PT_UNIQUETAG);
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, pui32PD, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, pui32PT, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PT_UNIQUETAG, PDUMP_PD_UNIQUETAG);
++
++
++ psDevInfo->pui32ExtSystemCacheRegsPT = pui32PT;
++ psDevInfo->hExtSystemCacheRegsPTPageOSMemHandle = hPTPageOSMemHandle;
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR MMU_UnmapExtSystemCacheRegs(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ SYS_DATA *psSysData;
++ RA_ARENA *psLocalDevMemArena;
++ PVRSRV_SGXDEV_INFO *psDevInfo;
++ IMG_UINT32 ui32PDIndex;
++ IMG_UINT32 *pui32PD;
++
++ psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice;
++ pui32PD = (IMG_UINT32*)psDeviceNode->sDevMemoryInfo.pBMKernelContext->psMMUContext->pvPDCpuVAddr;
++
++ SysAcquireData(&psSysData);
++
++ psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
++
++
++ ui32PDIndex = (SGX_EXT_SYSTEM_CACHE_REGS_DEVVADDR_BASE & SGX_MMU_PD_MASK) >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
++ pui32PD[ui32PDIndex] = 0;
++
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, pui32PD, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++ PDUMPFREEPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, psDevInfo->pui32ExtSystemCacheRegsPT, SGX_MMU_PAGE_SIZE, PDUMP_PT_UNIQUETAG);
++
++
++ if(psLocalDevMemArena == IMG_NULL)
++ {
++ if (psDevInfo->pui32ExtSystemCacheRegsPT != IMG_NULL)
++ {
++ OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ SGX_MMU_PAGE_SIZE,
++ psDevInfo->pui32ExtSystemCacheRegsPT,
++ psDevInfo->hExtSystemCacheRegsPTPageOSMemHandle);
++ }
++ }
++ else
++ {
++ if (psDevInfo->pui32ExtSystemCacheRegsPT != IMG_NULL)
++ {
++ OSUnMapPhysToLin(psDevInfo->pui32ExtSystemCacheRegsPT,
++ SGX_MMU_PAGE_SIZE,
++ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++ psDevInfo->hExtSystemCacheRegsPTPageOSMemHandle);
++
++ RA_Free(psLocalDevMemArena, psDevInfo->sExtSystemCacheRegsPTSysPAddr.uiAddr, IMG_FALSE);
++ }
++ }
++
++ return PVRSRV_OK;
++}
++#endif
++
++
++#if PAGE_TEST
++static IMG_VOID PageTest(IMG_VOID* pMem, IMG_DEV_PHYADDR sDevPAddr)
++{
++ volatile IMG_UINT32 ui32WriteData;
++ volatile IMG_UINT32 ui32ReadData;
++ volatile IMG_UINT32 *pMem32 = (volatile IMG_UINT32 *)pMem;
++ IMG_INT n;
++ IMG_BOOL bOK=IMG_TRUE;
++
++ ui32WriteData = 0xffffffff;
++
++ for (n=0; n<1024; n++)
++ {
++ pMem32[n] = ui32WriteData;
++ ui32ReadData = pMem32[n];
++
++ if (ui32WriteData != ui32ReadData)
++ {
++
++ PVR_DPF ((PVR_DBG_ERROR, "Error - memory page test failed at device phys address 0x%08X", sDevPAddr.uiAddr + (n<<2) ));
++ PVR_DBG_BREAK;
++ bOK = IMG_FALSE;
++ }
++ }
++
++ ui32WriteData = 0;
++
++ for (n=0; n<1024; n++)
++ {
++ pMem32[n] = ui32WriteData;
++ ui32ReadData = pMem32[n];
++
++ if (ui32WriteData != ui32ReadData)
++ {
++
++ PVR_DPF ((PVR_DBG_ERROR, "Error - memory page test failed at device phys address 0x%08X", sDevPAddr.uiAddr + (n<<2) ));
++ PVR_DBG_BREAK;
++ bOK = IMG_FALSE;
++ }
++ }
++
++ if (bOK)
++ {
++ PVR_DPF ((PVR_DBG_VERBOSE, "MMU Page 0x%08X is OK", sDevPAddr.uiAddr));
++ }
++ else
++ {
++ PVR_DPF ((PVR_DBG_VERBOSE, "MMU Page 0x%08X *** FAILED ***", sDevPAddr.uiAddr));
++ }
++}
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/devices/sgx/mmu.h
+@@ -0,0 +1,139 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _MMU_H_
++#define _MMU_H_
++
++#include "sgxinfokm.h"
++
++PVRSRV_ERROR
++MMU_Initialise (PVRSRV_DEVICE_NODE *psDeviceNode, MMU_CONTEXT **ppsMMUContext, IMG_DEV_PHYADDR *psPDDevPAddr);
++
++IMG_VOID
++MMU_Finalise (MMU_CONTEXT *psMMUContext);
++
++
++IMG_VOID
++MMU_InsertHeap(MMU_CONTEXT *psMMUContext, MMU_HEAP *psMMUHeap);
++
++MMU_HEAP *
++MMU_Create (MMU_CONTEXT *psMMUContext,
++ DEV_ARENA_DESCRIPTOR *psDevArena,
++ RA_ARENA **ppsVMArena);
++
++IMG_VOID
++MMU_Delete (MMU_HEAP *pMMU);
++
++IMG_BOOL
++MMU_Alloc (MMU_HEAP *pMMU,
++ IMG_SIZE_T uSize,
++ IMG_SIZE_T *pActualSize,
++ IMG_UINT32 uFlags,
++ IMG_UINT32 uDevVAddrAlignment,
++ IMG_DEV_VIRTADDR *pDevVAddr);
++
++IMG_VOID
++MMU_Free (MMU_HEAP *pMMU,
++ IMG_DEV_VIRTADDR DevVAddr,
++ IMG_UINT32 ui32Size);
++
++IMG_VOID
++MMU_Enable (MMU_HEAP *pMMU);
++
++IMG_VOID
++MMU_Disable (MMU_HEAP *pMMU);
++
++IMG_VOID
++MMU_MapPages (MMU_HEAP *pMMU,
++ IMG_DEV_VIRTADDR devVAddr,
++ IMG_SYS_PHYADDR SysPAddr,
++ IMG_SIZE_T uSize,
++ IMG_UINT32 ui32MemFlags,
++ IMG_HANDLE hUniqueTag);
++
++IMG_VOID
++MMU_MapShadow (MMU_HEAP * pMMU,
++ IMG_DEV_VIRTADDR MapBaseDevVAddr,
++ IMG_SIZE_T uSize,
++ IMG_CPU_VIRTADDR CpuVAddr,
++ IMG_HANDLE hOSMemHandle,
++ IMG_DEV_VIRTADDR * pDevVAddr,
++ IMG_UINT32 ui32MemFlags,
++ IMG_HANDLE hUniqueTag);
++
++IMG_VOID
++MMU_UnmapPages (MMU_HEAP *pMMU,
++ IMG_DEV_VIRTADDR dev_vaddr,
++ IMG_UINT32 ui32PageCount,
++ IMG_HANDLE hUniqueTag);
++
++IMG_VOID
++MMU_MapScatter (MMU_HEAP *pMMU,
++ IMG_DEV_VIRTADDR DevVAddr,
++ IMG_SYS_PHYADDR *psSysAddr,
++ IMG_SIZE_T uSize,
++ IMG_UINT32 ui32MemFlags,
++ IMG_HANDLE hUniqueTag);
++
++
++IMG_DEV_PHYADDR
++MMU_GetPhysPageAddr(MMU_HEAP *pMMUHeap, IMG_DEV_VIRTADDR sDevVPageAddr);
++
++
++IMG_DEV_PHYADDR
++MMU_GetPDDevPAddr(MMU_CONTEXT *pMMUContext);
++
++
++#ifdef SUPPORT_SGX_MMU_BYPASS
++IMG_VOID
++EnableHostAccess (MMU_CONTEXT *psMMUContext);
++
++
++IMG_VOID
++DisableHostAccess (MMU_CONTEXT *psMMUContext);
++#endif
++
++IMG_VOID MMU_InvalidateDirectoryCache(PVRSRV_SGXDEV_INFO *psDevInfo);
++
++PVRSRV_ERROR MMU_BIFResetPDAlloc(PVRSRV_SGXDEV_INFO *psDevInfo);
++
++IMG_VOID MMU_BIFResetPDFree(PVRSRV_SGXDEV_INFO *psDevInfo);
++
++#if defined(FIX_HW_BRN_22997) && defined(FIX_HW_BRN_23030) && defined(SGX_FEATURE_HOST_PORT)
++PVRSRV_ERROR WorkaroundBRN22997Alloc(PVRSRV_SGXDEV_INFO *psDevInfo);
++
++IMG_VOID WorkaroundBRN22997ReadHostPort(PVRSRV_SGXDEV_INFO *psDevInfo);
++
++IMG_VOID WorkaroundBRN22997Free(PVRSRV_SGXDEV_INFO *psDevInfo);
++#endif
++
++#if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE)
++PVRSRV_ERROR MMU_MapExtSystemCacheRegs(PVRSRV_DEVICE_NODE *psDeviceNode);
++
++PVRSRV_ERROR MMU_UnmapExtSystemCacheRegs(PVRSRV_DEVICE_NODE *psDeviceNode);
++#endif
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/devices/sgx/pb.c
+@@ -0,0 +1,458 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <stddef.h>
++
++#include "services_headers.h"
++#include "sgxapi_km.h"
++#include "sgxinfo.h"
++#include "sgxinfokm.h"
++#include "pvr_bridge_km.h"
++#include "pdump_km.h"
++#include "sgxutils.h"
++
++#ifndef __linux__
++#pragma message("TODO: Review use of OS_PAGEABLE vs OS_NON_PAGEABLE")
++#endif
++
++#include "lists.h"
++
++static IMPLEMENT_LIST_INSERT(PVRSRV_STUB_PBDESC)
++static IMPLEMENT_LIST_REMOVE(PVRSRV_STUB_PBDESC)
++
++static PRESMAN_ITEM psResItemCreateSharedPB = IMG_NULL;
++static PVRSRV_PER_PROCESS_DATA *psPerProcCreateSharedPB = IMG_NULL;
++
++static PVRSRV_ERROR SGXCleanupSharedPBDescCallback(IMG_PVOID pvParam, IMG_UINT32 ui32Param);
++static PVRSRV_ERROR SGXCleanupSharedPBDescCreateLockCallback(IMG_PVOID pvParam, IMG_UINT32 ui32Param);
++
++IMG_EXPORT PVRSRV_ERROR
++SGXFindSharedPBDescKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hDevCookie,
++ IMG_BOOL bLockOnFailure,
++ IMG_UINT32 ui32TotalPBSize,
++ IMG_HANDLE *phSharedPBDesc,
++ PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO **ppsHWPBDescKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO **ppsBlockKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO **ppsHWBlockKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO ***pppsSharedPBDescSubKernelMemInfos,
++ IMG_UINT32 *ui32SharedPBDescSubKernelMemInfosCount)
++{
++ PVRSRV_STUB_PBDESC *psStubPBDesc;
++ PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescSubKernelMemInfos=IMG_NULL;
++ PVRSRV_SGXDEV_INFO *psSGXDevInfo;
++ PVRSRV_ERROR eError;
++
++ psSGXDevInfo = ((PVRSRV_DEVICE_NODE *)hDevCookie)->pvDevice;
++
++ psStubPBDesc = psSGXDevInfo->psStubPBDescListKM;
++ if (psStubPBDesc != IMG_NULL)
++ {
++ IMG_UINT32 i;
++ PRESMAN_ITEM psResItem;
++
++ if(psStubPBDesc->ui32TotalPBSize != ui32TotalPBSize)
++ {
++ PVR_DPF((PVR_DBG_WARNING,
++ "SGXFindSharedPBDescKM: Shared PB requested with different size (0x%x) from existing shared PB (0x%x) - requested size ignored",
++ ui32TotalPBSize, psStubPBDesc->ui32TotalPBSize));
++ }
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_KERNEL_MEM_INFO *)
++ * psStubPBDesc->ui32SubKernelMemInfosCount,
++ (IMG_VOID **)&ppsSharedPBDescSubKernelMemInfos,
++ IMG_NULL,
++ "Array of Kernel Memory Info") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXFindSharedPBDescKM: OSAllocMem failed"));
++
++ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto ExitNotFound;
++ }
++
++ psResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_SHARED_PB_DESC,
++ psStubPBDesc,
++ 0,
++ &SGXCleanupSharedPBDescCallback);
++
++ if (psResItem == IMG_NULL)
++ {
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_KERNEL_MEM_INFO *) * psStubPBDesc->ui32SubKernelMemInfosCount,
++ ppsSharedPBDescSubKernelMemInfos,
++ 0);
++
++
++ PVR_DPF((PVR_DBG_ERROR, "SGXFindSharedPBDescKM: ResManRegisterRes failed"));
++
++ eError = PVRSRV_ERROR_GENERIC;
++ goto ExitNotFound;
++ }
++
++ *ppsSharedPBDescKernelMemInfo = psStubPBDesc->psSharedPBDescKernelMemInfo;
++ *ppsHWPBDescKernelMemInfo = psStubPBDesc->psHWPBDescKernelMemInfo;
++ *ppsBlockKernelMemInfo = psStubPBDesc->psBlockKernelMemInfo;
++ *ppsHWBlockKernelMemInfo = psStubPBDesc->psHWBlockKernelMemInfo;
++
++ *ui32SharedPBDescSubKernelMemInfosCount =
++ psStubPBDesc->ui32SubKernelMemInfosCount;
++
++ *pppsSharedPBDescSubKernelMemInfos = ppsSharedPBDescSubKernelMemInfos;
++
++ for(i=0; i<psStubPBDesc->ui32SubKernelMemInfosCount; i++)
++ {
++ ppsSharedPBDescSubKernelMemInfos[i] =
++ psStubPBDesc->ppsSubKernelMemInfos[i];
++ }
++
++ psStubPBDesc->ui32RefCount++;
++ *phSharedPBDesc = (IMG_HANDLE)psResItem;
++ return PVRSRV_OK;
++ }
++
++ eError = PVRSRV_OK;
++ if (bLockOnFailure)
++ {
++ if (psResItemCreateSharedPB == IMG_NULL)
++ {
++ psResItemCreateSharedPB = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_SHARED_PB_DESC_CREATE_LOCK,
++ psPerProc,
++ 0,
++ &SGXCleanupSharedPBDescCreateLockCallback);
++
++ if (psResItemCreateSharedPB == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXFindSharedPBDescKM: ResManRegisterRes failed"));
++
++ eError = PVRSRV_ERROR_GENERIC;
++ goto ExitNotFound;
++ }
++ PVR_ASSERT(psPerProcCreateSharedPB == IMG_NULL);
++ psPerProcCreateSharedPB = psPerProc;
++ }
++ else
++ {
++ eError = PVRSRV_ERROR_PROCESSING_BLOCKED;
++ }
++ }
++ExitNotFound:
++ *phSharedPBDesc = IMG_NULL;
++
++ return eError;
++}
++
++
++static PVRSRV_ERROR
++SGXCleanupSharedPBDescKM(PVRSRV_STUB_PBDESC *psStubPBDescIn)
++{
++
++ IMG_UINT32 i;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE*)psStubPBDescIn->hDevCookie;
++
++
++
++
++ psStubPBDescIn->ui32RefCount--;
++ if (psStubPBDescIn->ui32RefCount == 0)
++ {
++ List_PVRSRV_STUB_PBDESC_Remove(psStubPBDescIn);
++ for(i=0 ; i<psStubPBDescIn->ui32SubKernelMemInfosCount; i++)
++ {
++
++ PVRSRVFreeDeviceMemKM(psStubPBDescIn->hDevCookie,
++ psStubPBDescIn->ppsSubKernelMemInfos[i]);
++ }
++
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_KERNEL_MEM_INFO *) * psStubPBDescIn->ui32SubKernelMemInfosCount,
++ psStubPBDescIn->ppsSubKernelMemInfos,
++ 0);
++ psStubPBDescIn->ppsSubKernelMemInfos = IMG_NULL;
++
++ PVRSRVFreeSharedSysMemoryKM(psStubPBDescIn->psBlockKernelMemInfo);
++
++ PVRSRVFreeDeviceMemKM(psStubPBDescIn->hDevCookie, psStubPBDescIn->psHWBlockKernelMemInfo);
++
++ PVRSRVFreeDeviceMemKM(psStubPBDescIn->hDevCookie, psStubPBDescIn->psHWPBDescKernelMemInfo);
++
++ PVRSRVFreeSharedSysMemoryKM(psStubPBDescIn->psSharedPBDescKernelMemInfo);
++
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_STUB_PBDESC),
++ psStubPBDescIn,
++ 0);
++
++
++
++ SGXCleanupRequest(psDeviceNode,
++ IMG_NULL,
++ PVRSRV_CLEANUPCMD_PB);
++ }
++ return PVRSRV_OK;
++
++}
++
++static PVRSRV_ERROR SGXCleanupSharedPBDescCallback(IMG_PVOID pvParam, IMG_UINT32 ui32Param)
++{
++ PVRSRV_STUB_PBDESC *psStubPBDesc = (PVRSRV_STUB_PBDESC *)pvParam;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++ return SGXCleanupSharedPBDescKM(psStubPBDesc);
++}
++
++static PVRSRV_ERROR SGXCleanupSharedPBDescCreateLockCallback(IMG_PVOID pvParam, IMG_UINT32 ui32Param)
++{
++#ifdef DEBUG
++ PVRSRV_PER_PROCESS_DATA *psPerProc = (PVRSRV_PER_PROCESS_DATA *)pvParam;
++ PVR_ASSERT(psPerProc == psPerProcCreateSharedPB);
++#else
++ PVR_UNREFERENCED_PARAMETER(pvParam);
++#endif
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++ psPerProcCreateSharedPB = IMG_NULL;
++ psResItemCreateSharedPB = IMG_NULL;
++
++ return PVRSRV_OK;
++}
++
++
++IMG_EXPORT PVRSRV_ERROR
++SGXUnrefSharedPBDescKM(IMG_HANDLE hSharedPBDesc)
++{
++ PVR_ASSERT(hSharedPBDesc != IMG_NULL);
++
++ return ResManFreeResByPtr(hSharedPBDesc);
++}
++
++
++IMG_EXPORT PVRSRV_ERROR
++SGXAddSharedPBDescKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hDevCookie,
++ PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO *psHWBlockKernelMemInfo,
++ IMG_UINT32 ui32TotalPBSize,
++ IMG_HANDLE *phSharedPBDesc,
++ PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescSubKernelMemInfos,
++ IMG_UINT32 ui32SharedPBDescSubKernelMemInfosCount)
++{
++ PVRSRV_STUB_PBDESC *psStubPBDesc=IMG_NULL;
++ PVRSRV_ERROR eRet = PVRSRV_ERROR_GENERIC;
++ IMG_UINT32 i;
++ PVRSRV_SGXDEV_INFO *psSGXDevInfo;
++ PRESMAN_ITEM psResItem;
++
++
++ if (psPerProcCreateSharedPB != psPerProc)
++ {
++ goto NoAdd;
++ }
++ else
++ {
++ PVR_ASSERT(psResItemCreateSharedPB != IMG_NULL);
++
++ ResManFreeResByPtr(psResItemCreateSharedPB);
++
++ PVR_ASSERT(psResItemCreateSharedPB == IMG_NULL);
++ PVR_ASSERT(psPerProcCreateSharedPB == IMG_NULL);
++ }
++
++ psSGXDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookie)->pvDevice;
++
++ psStubPBDesc = psSGXDevInfo->psStubPBDescListKM;
++ if (psStubPBDesc != IMG_NULL)
++ {
++ if(psStubPBDesc->ui32TotalPBSize != ui32TotalPBSize)
++ {
++ PVR_DPF((PVR_DBG_WARNING,
++ "SGXAddSharedPBDescKM: Shared PB requested with different size (0x%x) from existing shared PB (0x%x) - requested size ignored",
++ ui32TotalPBSize, psStubPBDesc->ui32TotalPBSize));
++
++ }
++
++
++ psResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_SHARED_PB_DESC,
++ psStubPBDesc,
++ 0,
++ &SGXCleanupSharedPBDescCallback);
++ if (psResItem == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SGXAddSharedPBDescKM: "
++ "Failed to register existing shared "
++ "PBDesc with the resource manager"));
++ goto NoAddKeepPB;
++ }
++
++
++ psStubPBDesc->ui32RefCount++;
++
++ *phSharedPBDesc = (IMG_HANDLE)psResItem;
++ eRet = PVRSRV_OK;
++ goto NoAddKeepPB;
++ }
++
++ if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_STUB_PBDESC),
++ (IMG_VOID **)&psStubPBDesc,
++ 0,
++ "Stub Parameter Buffer Description") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXAddSharedPBDescKM: Failed to alloc "
++ "StubPBDesc"));
++ eRet = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto NoAdd;
++ }
++
++
++ psStubPBDesc->ppsSubKernelMemInfos = IMG_NULL;
++
++ if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_KERNEL_MEM_INFO *)
++ * ui32SharedPBDescSubKernelMemInfosCount,
++ (IMG_VOID **)&psStubPBDesc->ppsSubKernelMemInfos,
++ 0,
++ "Array of Kernel Memory Info") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXAddSharedPBDescKM: "
++ "Failed to alloc "
++ "StubPBDesc->ppsSubKernelMemInfos"));
++ eRet = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto NoAdd;
++ }
++
++ if(PVRSRVDissociateMemFromResmanKM(psSharedPBDescKernelMemInfo)
++ != PVRSRV_OK)
++ {
++ goto NoAdd;
++ }
++
++ if(PVRSRVDissociateMemFromResmanKM(psHWPBDescKernelMemInfo)
++ != PVRSRV_OK)
++ {
++ goto NoAdd;
++ }
++
++ if(PVRSRVDissociateMemFromResmanKM(psBlockKernelMemInfo)
++ != PVRSRV_OK)
++ {
++ goto NoAdd;
++ }
++
++ if(PVRSRVDissociateMemFromResmanKM(psHWBlockKernelMemInfo)
++ != PVRSRV_OK)
++ {
++ goto NoAdd;
++ }
++
++ psStubPBDesc->ui32RefCount = 1;
++ psStubPBDesc->ui32TotalPBSize = ui32TotalPBSize;
++ psStubPBDesc->psSharedPBDescKernelMemInfo = psSharedPBDescKernelMemInfo;
++ psStubPBDesc->psHWPBDescKernelMemInfo = psHWPBDescKernelMemInfo;
++ psStubPBDesc->psBlockKernelMemInfo = psBlockKernelMemInfo;
++ psStubPBDesc->psHWBlockKernelMemInfo = psHWBlockKernelMemInfo;
++
++ psStubPBDesc->ui32SubKernelMemInfosCount =
++ ui32SharedPBDescSubKernelMemInfosCount;
++ for(i=0; i<ui32SharedPBDescSubKernelMemInfosCount; i++)
++ {
++ psStubPBDesc->ppsSubKernelMemInfos[i] = ppsSharedPBDescSubKernelMemInfos[i];
++ if(PVRSRVDissociateMemFromResmanKM(ppsSharedPBDescSubKernelMemInfos[i])
++ != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXAddSharedPBDescKM: "
++ "Failed to dissociate shared PBDesc "
++ "from process"));
++ goto NoAdd;
++ }
++ }
++
++ psResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_SHARED_PB_DESC,
++ psStubPBDesc,
++ 0,
++ &SGXCleanupSharedPBDescCallback);
++ if (psResItem == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXAddSharedPBDescKM: "
++ "Failed to register shared PBDesc "
++ " with the resource manager"));
++ goto NoAdd;
++ }
++ psStubPBDesc->hDevCookie = hDevCookie;
++
++
++ List_PVRSRV_STUB_PBDESC_Insert(&(psSGXDevInfo->psStubPBDescListKM),
++ psStubPBDesc);
++
++ *phSharedPBDesc = (IMG_HANDLE)psResItem;
++
++ return PVRSRV_OK;
++
++NoAdd:
++ if(psStubPBDesc)
++ {
++ if(psStubPBDesc->ppsSubKernelMemInfos)
++ {
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_KERNEL_MEM_INFO *) * ui32SharedPBDescSubKernelMemInfosCount,
++ psStubPBDesc->ppsSubKernelMemInfos,
++ 0);
++ psStubPBDesc->ppsSubKernelMemInfos = IMG_NULL;
++ }
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_STUB_PBDESC),
++ psStubPBDesc,
++ 0);
++
++ }
++
++NoAddKeepPB:
++ for (i = 0; i < ui32SharedPBDescSubKernelMemInfosCount; i++)
++ {
++ PVRSRVFreeDeviceMemKM(hDevCookie, ppsSharedPBDescSubKernelMemInfos[i]);
++ }
++
++ PVRSRVFreeSharedSysMemoryKM(psSharedPBDescKernelMemInfo);
++ PVRSRVFreeDeviceMemKM(hDevCookie, psHWPBDescKernelMemInfo);
++
++ PVRSRVFreeSharedSysMemoryKM(psBlockKernelMemInfo);
++ PVRSRVFreeDeviceMemKM(hDevCookie, psHWBlockKernelMemInfo);
++
++ return eRet;
++}
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/devices/sgx/sgx_bridge_km.h
+@@ -0,0 +1,147 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__SGX_BRIDGE_KM_H__)
++#define __SGX_BRIDGE_KM_H__
++
++#include "sgxapi_km.h"
++#include "sgxinfo.h"
++#include "sgxinfokm.h"
++#include "sgx_bridge.h"
++#include "pvr_bridge.h"
++#include "perproc.h"
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++IMG_IMPORT
++PVRSRV_ERROR SGXSubmitTransferKM(IMG_HANDLE hDevHandle, PVRSRV_TRANSFER_SGX_KICK *psKick);
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++IMG_IMPORT
++PVRSRV_ERROR SGXSubmit2DKM(IMG_HANDLE hDevHandle, PVRSRV_2D_SGX_KICK *psKick);
++#endif
++
++IMG_IMPORT
++PVRSRV_ERROR SGXDoKickKM(IMG_HANDLE hDevHandle,
++ SGX_CCB_KICK *psCCBKick);
++
++IMG_IMPORT
++PVRSRV_ERROR SGXGetPhysPageAddrKM(IMG_HANDLE hDevMemHeap,
++ IMG_DEV_VIRTADDR sDevVAddr,
++ IMG_DEV_PHYADDR *pDevPAddr,
++ IMG_CPU_PHYADDR *pCpuPAddr);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV SGXGetMMUPDAddrKM(IMG_HANDLE hDevCookie,
++ IMG_HANDLE hDevMemContext,
++ IMG_DEV_PHYADDR *psPDDevPAddr);
++
++IMG_IMPORT
++PVRSRV_ERROR SGXGetClientInfoKM(IMG_HANDLE hDevCookie,
++ SGX_CLIENT_INFO* psClientInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR SGXGetMiscInfoKM(PVRSRV_SGXDEV_INFO *psDevInfo,
++ SGX_MISC_INFO *psMiscInfo,
++ PVRSRV_DEVICE_NODE *psDeviceNode,
++ IMG_HANDLE hDevMemContext);
++
++#if defined(SUPPORT_SGX_HWPERF)
++IMG_IMPORT
++PVRSRV_ERROR SGXReadDiffCountersKM(IMG_HANDLE hDevHandle,
++ IMG_UINT32 ui32Reg,
++ IMG_UINT32 *pui32Old,
++ IMG_BOOL bNew,
++ IMG_UINT32 ui32New,
++ IMG_UINT32 ui32NewReset,
++ IMG_UINT32 ui32CountersReg,
++ IMG_UINT32 ui32Reg2,
++ IMG_BOOL *pbActive,
++ PVRSRV_SGXDEV_DIFF_INFO *psDiffs);
++IMG_IMPORT
++PVRSRV_ERROR SGXReadHWPerfCBKM(IMG_HANDLE hDevHandle,
++ IMG_UINT32 ui32ArraySize,
++ PVRSRV_SGX_HWPERF_CB_ENTRY *psHWPerfCBData,
++ IMG_UINT32 *pui32DataCount,
++ IMG_UINT32 *pui32ClockSpeed,
++ IMG_UINT32 *pui32HostTimeStamp);
++#endif
++
++IMG_IMPORT
++PVRSRV_ERROR SGX2DQueryBlitsCompleteKM(PVRSRV_SGXDEV_INFO *psDevInfo,
++ PVRSRV_KERNEL_SYNC_INFO *psSyncInfo,
++ IMG_BOOL bWaitForComplete);
++
++IMG_IMPORT
++PVRSRV_ERROR SGXGetInfoForSrvinitKM(IMG_HANDLE hDevHandle,
++ SGX_BRIDGE_INFO_FOR_SRVINIT *psInitInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR DevInitSGXPart2KM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hDevHandle,
++ SGX_BRIDGE_INIT_INFO *psInitInfo);
++
++IMG_IMPORT PVRSRV_ERROR
++SGXFindSharedPBDescKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hDevCookie,
++ IMG_BOOL bLockOnFailure,
++ IMG_UINT32 ui32TotalPBSize,
++ IMG_HANDLE *phSharedPBDesc,
++ PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO **ppsHWPBDescKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO **ppsBlockKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO **ppsHWBlockKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO ***pppsSharedPBDescSubKernelMemInfos,
++ IMG_UINT32 *ui32SharedPBDescSubKernelMemInfosCount);
++
++IMG_IMPORT PVRSRV_ERROR
++SGXUnrefSharedPBDescKM(IMG_HANDLE hSharedPBDesc);
++
++IMG_IMPORT PVRSRV_ERROR
++SGXAddSharedPBDescKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hDevCookie,
++ PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO *psHWBlockKernelMemInfo,
++ IMG_UINT32 ui32TotalPBSize,
++ IMG_HANDLE *phSharedPBDesc,
++ PVRSRV_KERNEL_MEM_INFO **psSharedPBDescSubKernelMemInfos,
++ IMG_UINT32 ui32SharedPBDescSubKernelMemInfosCount);
++
++
++IMG_IMPORT PVRSRV_ERROR
++SGXGetInternalDevInfoKM(IMG_HANDLE hDevCookie,
++ SGX_INTERNAL_DEVINFO *psSGXInternalDevInfo);
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/devices/sgx/sgxconfig.h
+@@ -0,0 +1,134 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __SGXCONFIG_H__
++#define __SGXCONFIG_H__
++
++#include "sgxdefs.h"
++
++#define DEV_DEVICE_TYPE PVRSRV_DEVICE_TYPE_SGX
++#define DEV_DEVICE_CLASS PVRSRV_DEVICE_CLASS_3D
++
++#define DEV_MAJOR_VERSION 1
++#define DEV_MINOR_VERSION 0
++
++#if SGX_FEATURE_ADDRESS_SPACE_SIZE == 32
++ #if defined(SGX_FEATURE_2D_HARDWARE)
++ #define SGX_2D_HEAP_BASE 0x00100000
++ #define SGX_2D_HEAP_SIZE (0x08000000-0x00100000-0x00001000)
++ #else
++ #if defined(FIX_HW_BRN_26915)
++ #define SGX_CGBUFFER_HEAP_BASE 0x00100000
++ #define SGX_CGBUFFER_HEAP_SIZE (0x08000000-0x00100000-0x00001000)
++ #endif
++ #endif
++
++ #if defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP)
++ #define SGX_GENERAL_MAPPING_HEAP_BASE 0x08000000
++ #define SGX_GENERAL_MAPPING_HEAP_SIZE (0x08000000-0x00001000)
++ #endif
++
++ #define SGX_GENERAL_HEAP_BASE 0x10000000
++ #define SGX_GENERAL_HEAP_SIZE (0xC8000000-0x00001000)
++
++ #define SGX_3DPARAMETERS_HEAP_BASE 0xD8000000
++ #define SGX_3DPARAMETERS_HEAP_SIZE (0x10000000-0x00001000)
++
++ #define SGX_TADATA_HEAP_BASE 0xE8000000
++ #define SGX_TADATA_HEAP_SIZE (0x0D000000-0x00001000)
++
++ #define SGX_SYNCINFO_HEAP_BASE 0xF5000000
++ #define SGX_SYNCINFO_HEAP_SIZE (0x01000000-0x00001000)
++
++ #define SGX_PDSPIXEL_CODEDATA_HEAP_BASE 0xF6000000
++ #define SGX_PDSPIXEL_CODEDATA_HEAP_SIZE (0x02000000-0x00001000)
++
++ #define SGX_KERNEL_CODE_HEAP_BASE 0xF8000000
++ #define SGX_KERNEL_CODE_HEAP_SIZE (0x00080000-0x00001000)
++
++ #define SGX_PDSVERTEX_CODEDATA_HEAP_BASE 0xF8400000
++ #define SGX_PDSVERTEX_CODEDATA_HEAP_SIZE (0x01C00000-0x00001000)
++
++ #define SGX_KERNEL_DATA_HEAP_BASE 0xFA000000
++ #define SGX_KERNEL_DATA_HEAP_SIZE (0x05000000-0x00001000)
++
++ #define SGX_PIXELSHADER_HEAP_BASE 0xFF000000
++ #define SGX_PIXELSHADER_HEAP_SIZE (0x00500000-0x00001000)
++
++ #define SGX_VERTEXSHADER_HEAP_BASE 0xFF800000
++ #define SGX_VERTEXSHADER_HEAP_SIZE (0x00200000-0x00001000)
++
++
++ #define SGX_CORE_IDENTIFIED
++#endif
++
++#if SGX_FEATURE_ADDRESS_SPACE_SIZE == 28
++ #if defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP)
++ #define SGX_GENERAL_MAPPING_HEAP_BASE 0x00001000
++ #define SGX_GENERAL_MAPPING_HEAP_SIZE (0x01800000-0x00001000-0x00001000)
++ #endif
++
++ #define SGX_GENERAL_HEAP_BASE 0x01800000
++ #define SGX_GENERAL_HEAP_SIZE (0x07000000-0x00001000)
++
++ #define SGX_3DPARAMETERS_HEAP_BASE 0x08800000
++ #define SGX_3DPARAMETERS_HEAP_SIZE (0x04000000-0x00001000)
++
++ #define SGX_TADATA_HEAP_BASE 0x0C800000
++ #define SGX_TADATA_HEAP_SIZE (0x01000000-0x00001000)
++
++ #define SGX_SYNCINFO_HEAP_BASE 0x0D800000
++ #define SGX_SYNCINFO_HEAP_SIZE (0x00400000-0x00001000)
++
++ #define SGX_PDSPIXEL_CODEDATA_HEAP_BASE 0x0DC00000
++ #define SGX_PDSPIXEL_CODEDATA_HEAP_SIZE (0x00800000-0x00001000)
++
++ #define SGX_KERNEL_CODE_HEAP_BASE 0x0E400000
++ #define SGX_KERNEL_CODE_HEAP_SIZE (0x00080000-0x00001000)
++
++ #define SGX_PDSVERTEX_CODEDATA_HEAP_BASE 0x0E800000
++ #define SGX_PDSVERTEX_CODEDATA_HEAP_SIZE (0x00800000-0x00001000)
++
++ #define SGX_KERNEL_DATA_HEAP_BASE 0x0F000000
++ #define SGX_KERNEL_DATA_HEAP_SIZE (0x00400000-0x00001000)
++
++ #define SGX_PIXELSHADER_HEAP_BASE 0x0F400000
++ #define SGX_PIXELSHADER_HEAP_SIZE (0x00500000-0x00001000)
++
++ #define SGX_VERTEXSHADER_HEAP_BASE 0x0FC00000
++ #define SGX_VERTEXSHADER_HEAP_SIZE (0x00200000-0x00001000)
++
++
++ #define SGX_CORE_IDENTIFIED
++
++#endif
++
++#if !defined(SGX_CORE_IDENTIFIED)
++ #error "sgxconfig.h: ERROR: unspecified SGX Core version"
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/devices/sgx/sgxinfokm.h
+@@ -0,0 +1,352 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __SGXINFOKM_H__
++#define __SGXINFOKM_H__
++
++#include "sgxdefs.h"
++#include "device.h"
++#include "power.h"
++#include "sysconfig.h"
++#include "sgxscript.h"
++#include "sgxinfo.h"
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#define SGX_HOSTPORT_PRESENT 0x00000001UL
++
++
++typedef struct _PVRSRV_STUB_PBDESC_ PVRSRV_STUB_PBDESC;
++
++
++typedef struct _PVRSRV_SGX_CCB_INFO_ *PPVRSRV_SGX_CCB_INFO;
++
++typedef struct _PVRSRV_SGXDEV_INFO_
++{
++ PVRSRV_DEVICE_TYPE eDeviceType;
++ PVRSRV_DEVICE_CLASS eDeviceClass;
++
++ IMG_UINT8 ui8VersionMajor;
++ IMG_UINT8 ui8VersionMinor;
++ IMG_UINT32 ui32CoreConfig;
++ IMG_UINT32 ui32CoreFlags;
++
++
++ IMG_PVOID pvRegsBaseKM;
++
++#if defined(SGX_FEATURE_HOST_PORT)
++
++ IMG_PVOID pvHostPortBaseKM;
++
++ IMG_UINT32 ui32HPSize;
++
++ IMG_SYS_PHYADDR sHPSysPAddr;
++#endif
++
++
++ IMG_HANDLE hRegMapping;
++
++
++ IMG_SYS_PHYADDR sRegsPhysBase;
++
++ IMG_UINT32 ui32RegSize;
++
++#if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE)
++
++ IMG_UINT32 ui32ExtSysCacheRegsSize;
++
++ IMG_DEV_PHYADDR sExtSysCacheRegsDevPBase;
++
++ IMG_UINT32 *pui32ExtSystemCacheRegsPT;
++
++ IMG_HANDLE hExtSystemCacheRegsPTPageOSMemHandle;
++
++ IMG_SYS_PHYADDR sExtSystemCacheRegsPTSysPAddr;
++#endif
++
++
++ IMG_UINT32 ui32CoreClockSpeed;
++ IMG_UINT32 ui32uKernelTimerClock;
++
++ PVRSRV_STUB_PBDESC *psStubPBDescListKM;
++
++
++
++ IMG_DEV_PHYADDR sKernelPDDevPAddr;
++
++ IMG_VOID *pvDeviceMemoryHeap;
++ PPVRSRV_KERNEL_MEM_INFO psKernelCCBMemInfo;
++ PVRSRV_SGX_KERNEL_CCB *psKernelCCB;
++ PPVRSRV_SGX_CCB_INFO psKernelCCBInfo;
++ PPVRSRV_KERNEL_MEM_INFO psKernelCCBCtlMemInfo;
++ PVRSRV_SGX_CCB_CTL *psKernelCCBCtl;
++ PPVRSRV_KERNEL_MEM_INFO psKernelCCBEventKickerMemInfo;
++ IMG_UINT32 *pui32KernelCCBEventKicker;
++#if defined(PDUMP)
++ IMG_UINT32 ui32KernelCCBEventKickerDumpVal;
++#endif
++ PVRSRV_KERNEL_MEM_INFO *psKernelSGXMiscMemInfo;
++ IMG_UINT32 aui32HostKickAddr[SGXMKIF_CMD_MAX];
++#if defined(SGX_SUPPORT_HWPROFILING)
++ PPVRSRV_KERNEL_MEM_INFO psKernelHWProfilingMemInfo;
++#endif
++ IMG_UINT32 ui32KickTACounter;
++ IMG_UINT32 ui32KickTARenderCounter;
++#if defined(SUPPORT_SGX_HWPERF)
++ PPVRSRV_KERNEL_MEM_INFO psKernelHWPerfCBMemInfo;
++ IMG_UINT32 ui32HWGroupRequested;
++ IMG_UINT32 ui32HWReset;
++#endif
++#ifdef PVRSRV_USSE_EDM_STATUS_DEBUG
++ PPVRSRV_KERNEL_MEM_INFO psKernelEDMStatusBufferMemInfo;
++#endif
++#if defined(SGX_FEATURE_OVERLAPPED_SPM)
++ PPVRSRV_KERNEL_MEM_INFO psKernelTmpRgnHeaderMemInfo;
++#endif
++#if defined(SGX_FEATURE_SPM_MODE_0)
++ PPVRSRV_KERNEL_MEM_INFO psKernelTmpDPMStateMemInfo;
++#endif
++
++
++ IMG_UINT32 ui32ClientRefCount;
++
++
++ IMG_UINT32 ui32CacheControl;
++
++
++ IMG_UINT32 ui32ClientBuildOptions;
++
++
++ SGX_MISCINFO_STRUCT_SIZES sSGXStructSizes;
++
++
++
++
++ IMG_VOID *pvMMUContextList;
++
++
++ IMG_BOOL bForcePTOff;
++
++ IMG_UINT32 ui32EDMTaskReg0;
++ IMG_UINT32 ui32EDMTaskReg1;
++
++ IMG_UINT32 ui32ClkGateStatusReg;
++ IMG_UINT32 ui32ClkGateStatusMask;
++#if defined(SGX_FEATURE_MP)
++ IMG_UINT32 ui32MasterClkGateStatusReg;
++ IMG_UINT32 ui32MasterClkGateStatusMask;
++#endif
++ SGX_INIT_SCRIPTS sScripts;
++
++
++ IMG_HANDLE hBIFResetPDOSMemHandle;
++ IMG_DEV_PHYADDR sBIFResetPDDevPAddr;
++ IMG_DEV_PHYADDR sBIFResetPTDevPAddr;
++ IMG_DEV_PHYADDR sBIFResetPageDevPAddr;
++ IMG_UINT32 *pui32BIFResetPD;
++ IMG_UINT32 *pui32BIFResetPT;
++
++#if defined(FIX_HW_BRN_22997) && defined(FIX_HW_BRN_23030) && defined(SGX_FEATURE_HOST_PORT)
++
++ IMG_HANDLE hBRN22997PTPageOSMemHandle;
++ IMG_HANDLE hBRN22997PDPageOSMemHandle;
++ IMG_DEV_PHYADDR sBRN22997PTDevPAddr;
++ IMG_DEV_PHYADDR sBRN22997PDDevPAddr;
++ IMG_UINT32 *pui32BRN22997PT;
++ IMG_UINT32 *pui32BRN22997PD;
++ IMG_SYS_PHYADDR sBRN22997SysPAddr;
++#endif
++
++#if defined(SUPPORT_HW_RECOVERY)
++
++ IMG_HANDLE hTimer;
++
++ IMG_UINT32 ui32TimeStamp;
++#endif
++
++
++ IMG_UINT32 ui32NumResets;
++
++
++ PVRSRV_KERNEL_MEM_INFO *psKernelSGXHostCtlMemInfo;
++ SGXMKIF_HOST_CTL *psSGXHostCtl;
++
++
++ PVRSRV_KERNEL_MEM_INFO *psKernelSGXTA3DCtlMemInfo;
++
++ IMG_UINT32 ui32Flags;
++
++ #if defined(PDUMP)
++ PVRSRV_SGX_PDUMP_CONTEXT sPDContext;
++ #endif
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++
++ IMG_VOID *pvDummyPTPageCpuVAddr;
++ IMG_DEV_PHYADDR sDummyPTDevPAddr;
++ IMG_HANDLE hDummyPTPageOSMemHandle;
++ IMG_VOID *pvDummyDataPageCpuVAddr;
++ IMG_DEV_PHYADDR sDummyDataDevPAddr;
++ IMG_HANDLE hDummyDataPageOSMemHandle;
++#endif
++
++ IMG_UINT32 asSGXDevData[SGX_MAX_DEV_DATA];
++
++} PVRSRV_SGXDEV_INFO;
++
++
++typedef struct _SGX_TIMING_INFORMATION_
++{
++ IMG_UINT32 ui32CoreClockSpeed;
++ IMG_UINT32 ui32HWRecoveryFreq;
++ IMG_BOOL bEnableActivePM;
++ IMG_UINT32 ui32ActivePowManLatencyms;
++ IMG_UINT32 ui32uKernelFreq;
++} SGX_TIMING_INFORMATION;
++
++typedef struct _SGX_DEVICE_MAP_
++{
++ IMG_UINT32 ui32Flags;
++
++
++ IMG_SYS_PHYADDR sRegsSysPBase;
++ IMG_CPU_PHYADDR sRegsCpuPBase;
++ IMG_CPU_VIRTADDR pvRegsCpuVBase;
++ IMG_UINT32 ui32RegsSize;
++
++#if defined(SGX_FEATURE_HOST_PORT)
++ IMG_SYS_PHYADDR sHPSysPBase;
++ IMG_CPU_PHYADDR sHPCpuPBase;
++ IMG_UINT32 ui32HPSize;
++#endif
++
++
++ IMG_SYS_PHYADDR sLocalMemSysPBase;
++ IMG_DEV_PHYADDR sLocalMemDevPBase;
++ IMG_CPU_PHYADDR sLocalMemCpuPBase;
++ IMG_UINT32 ui32LocalMemSize;
++
++#if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE)
++ IMG_UINT32 ui32ExtSysCacheRegsSize;
++ IMG_DEV_PHYADDR sExtSysCacheRegsDevPBase;
++#endif
++
++
++ IMG_UINT32 ui32IRQ;
++
++#if !defined(SGX_DYNAMIC_TIMING_INFO)
++
++ SGX_TIMING_INFORMATION sTimingInfo;
++#endif
++} SGX_DEVICE_MAP;
++
++
++struct _PVRSRV_STUB_PBDESC_
++{
++ IMG_UINT32 ui32RefCount;
++ IMG_UINT32 ui32TotalPBSize;
++ PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo;
++ PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo;
++ PVRSRV_KERNEL_MEM_INFO **ppsSubKernelMemInfos;
++ IMG_UINT32 ui32SubKernelMemInfosCount;
++ IMG_HANDLE hDevCookie;
++ PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo;
++ PVRSRV_KERNEL_MEM_INFO *psHWBlockKernelMemInfo;
++ PVRSRV_STUB_PBDESC *psNext;
++ PVRSRV_STUB_PBDESC **ppsThis;
++};
++
++typedef struct _PVRSRV_SGX_CCB_INFO_
++{
++ PVRSRV_KERNEL_MEM_INFO *psCCBMemInfo;
++ PVRSRV_KERNEL_MEM_INFO *psCCBCtlMemInfo;
++ SGXMKIF_COMMAND *psCommands;
++ IMG_UINT32 *pui32WriteOffset;
++ volatile IMG_UINT32 *pui32ReadOffset;
++#if defined(PDUMP)
++ IMG_UINT32 ui32CCBDumpWOff;
++#endif
++} PVRSRV_SGX_CCB_INFO;
++
++PVRSRV_ERROR SGXRegisterDevice (PVRSRV_DEVICE_NODE *psDeviceNode);
++
++IMG_VOID SGXOSTimer(IMG_VOID *pvData);
++
++IMG_VOID SGXReset(PVRSRV_SGXDEV_INFO *psDevInfo,
++ IMG_UINT32 ui32PDUMPFlags);
++
++PVRSRV_ERROR SGXInitialise(PVRSRV_SGXDEV_INFO *psDevInfo);
++PVRSRV_ERROR SGXDeinitialise(IMG_HANDLE hDevCookie);
++
++PVRSRV_ERROR SGXPrePowerState(IMG_HANDLE hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++
++PVRSRV_ERROR SGXPostPowerState(IMG_HANDLE hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++
++PVRSRV_ERROR SGXPreClockSpeedChange(IMG_HANDLE hDevHandle,
++ IMG_BOOL bIdleDevice,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++
++PVRSRV_ERROR SGXPostClockSpeedChange(IMG_HANDLE hDevHandle,
++ IMG_BOOL bIdleDevice,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++
++IMG_VOID SGXPanic(PVRSRV_DEVICE_NODE *psDeviceNode);
++
++PVRSRV_ERROR SGXDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode);
++
++#if defined(SGX_DYNAMIC_TIMING_INFO)
++IMG_VOID SysGetSGXTimingInformation(SGX_TIMING_INFORMATION *psSGXTimingInfo);
++#endif
++
++#if defined(NO_HARDWARE)
++static INLINE IMG_VOID NoHardwareGenerateEvent(PVRSRV_SGXDEV_INFO *psDevInfo,
++ IMG_UINT32 ui32StatusRegister,
++ IMG_UINT32 ui32StatusValue,
++ IMG_UINT32 ui32StatusMask)
++{
++ IMG_UINT32 ui32RegVal;
++
++ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, ui32StatusRegister);
++
++ ui32RegVal &= ~ui32StatusMask;
++ ui32RegVal |= (ui32StatusValue & ui32StatusMask);
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32StatusRegister, ui32RegVal);
++}
++#endif
++
++#if defined(__cplusplus)
++}
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/devices/sgx/sgxinit.c
+@@ -0,0 +1,2228 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <stddef.h>
++
++#include "sgxdefs.h"
++#include "sgxmmu.h"
++#include "services_headers.h"
++#include "buffer_manager.h"
++#include "sgxapi_km.h"
++#include "sgxinfo.h"
++#include "sgx_mkif_km.h"
++#include "sgxconfig.h"
++#include "sysconfig.h"
++#include "pvr_bridge_km.h"
++
++#include "sgx_bridge_km.h"
++
++#include "pdump_km.h"
++#include "ra.h"
++#include "mmu.h"
++#include "handle.h"
++#include "perproc.h"
++
++#include "sgxutils.h"
++#include "pvrversion.h"
++#include "sgx_options.h"
++
++#include "lists.h"
++#include "srvkm.h"
++
++DECLARE_LIST_ANY_VA(PVRSRV_POWER_DEV);
++
++#if defined(SUPPORT_SGX_HWPERF)
++IMG_VOID* MatchPowerDeviceIndex_AnyVaCb(PVRSRV_POWER_DEV *psPowerDev, va_list va);
++#endif
++
++#define VAR(x) #x
++
++#define CHECK_SIZE(NAME) \
++{ \
++ if (psSGXStructSizes->ui32Sizeof_##NAME != psDevInfo->sSGXStructSizes.ui32Sizeof_##NAME) \
++ { \
++ PVR_DPF((PVR_DBG_ERROR, "SGXDevInitCompatCheck: Size check failed for SGXMKIF_%s (client) = %d bytes, (ukernel) = %d bytes\n", \
++ VAR(NAME), \
++ psDevInfo->sSGXStructSizes.ui32Sizeof_##NAME, \
++ psSGXStructSizes->ui32Sizeof_##NAME )); \
++ bStructSizesFailed = IMG_TRUE; \
++ } \
++}
++
++#if defined (SYS_USING_INTERRUPTS)
++IMG_BOOL SGX_ISRHandler(IMG_VOID *pvData);
++#endif
++
++IMG_UINT32 gui32EventStatusServicesByISR = 0;
++
++
++static
++PVRSRV_ERROR SGXGetMiscInfoUkernel(PVRSRV_SGXDEV_INFO *psDevInfo,
++ PVRSRV_DEVICE_NODE *psDeviceNode);
++
++
++static IMG_VOID SGXCommandComplete(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++#if defined(OS_SUPPORTS_IN_LISR)
++ if (OSInLISR(psDeviceNode->psSysData))
++ {
++
++ psDeviceNode->bReProcessDeviceCommandComplete = IMG_TRUE;
++ }
++ else
++ {
++ SGXScheduleProcessQueuesKM(psDeviceNode);
++ }
++#else
++ SGXScheduleProcessQueuesKM(psDeviceNode);
++#endif
++}
++
++static IMG_UINT32 DeinitDevInfo(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++ if (psDevInfo->psKernelCCBInfo != IMG_NULL)
++ {
++
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_SGX_CCB_INFO), psDevInfo->psKernelCCBInfo, IMG_NULL);
++ }
++
++ return PVRSRV_OK;
++}
++
++static PVRSRV_ERROR InitDevInfo(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ PVRSRV_DEVICE_NODE *psDeviceNode,
++ SGX_BRIDGE_INIT_INFO *psInitInfo)
++{
++ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
++ PVRSRV_ERROR eError;
++
++ PVRSRV_SGX_CCB_INFO *psKernelCCBInfo = IMG_NULL;
++
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++ psDevInfo->sScripts = psInitInfo->sScripts;
++
++ psDevInfo->psKernelCCBMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelCCBMemInfo;
++ psDevInfo->psKernelCCB = (PVRSRV_SGX_KERNEL_CCB *) psDevInfo->psKernelCCBMemInfo->pvLinAddrKM;
++
++ psDevInfo->psKernelCCBCtlMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelCCBCtlMemInfo;
++ psDevInfo->psKernelCCBCtl = (PVRSRV_SGX_CCB_CTL *) psDevInfo->psKernelCCBCtlMemInfo->pvLinAddrKM;
++
++ psDevInfo->psKernelCCBEventKickerMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelCCBEventKickerMemInfo;
++ psDevInfo->pui32KernelCCBEventKicker = (IMG_UINT32 *)psDevInfo->psKernelCCBEventKickerMemInfo->pvLinAddrKM;
++
++ psDevInfo->psKernelSGXHostCtlMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelSGXHostCtlMemInfo;
++ psDevInfo->psSGXHostCtl = (SGXMKIF_HOST_CTL *)psDevInfo->psKernelSGXHostCtlMemInfo->pvLinAddrKM;
++
++ psDevInfo->psKernelSGXTA3DCtlMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelSGXTA3DCtlMemInfo;
++
++ psDevInfo->psKernelSGXMiscMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelSGXMiscMemInfo;
++
++#if defined(SGX_SUPPORT_HWPROFILING)
++ psDevInfo->psKernelHWProfilingMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelHWProfilingMemInfo;
++#endif
++#if defined(SUPPORT_SGX_HWPERF)
++ psDevInfo->psKernelHWPerfCBMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelHWPerfCBMemInfo;
++#endif
++#ifdef PVRSRV_USSE_EDM_STATUS_DEBUG
++ psDevInfo->psKernelEDMStatusBufferMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelEDMStatusBufferMemInfo;
++#endif
++#if defined(SGX_FEATURE_OVERLAPPED_SPM)
++ psDevInfo->psKernelTmpRgnHeaderMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelTmpRgnHeaderMemInfo;
++#endif
++#if defined(SGX_FEATURE_SPM_MODE_0)
++ psDevInfo->psKernelTmpDPMStateMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelTmpDPMStateMemInfo;
++#endif
++
++ psDevInfo->ui32ClientBuildOptions = psInitInfo->ui32ClientBuildOptions;
++
++
++ psDevInfo->sSGXStructSizes = psInitInfo->sSGXStructSizes;
++
++
++
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_SGX_CCB_INFO),
++ (IMG_VOID **)&psKernelCCBInfo, 0,
++ "SGX Circular Command Buffer Info");
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"InitDevInfo: Failed to alloc memory"));
++ goto failed_allockernelccb;
++ }
++
++
++ OSMemSet(psKernelCCBInfo, 0, sizeof(PVRSRV_SGX_CCB_INFO));
++ psKernelCCBInfo->psCCBMemInfo = psDevInfo->psKernelCCBMemInfo;
++ psKernelCCBInfo->psCCBCtlMemInfo = psDevInfo->psKernelCCBCtlMemInfo;
++ psKernelCCBInfo->psCommands = psDevInfo->psKernelCCB->asCommands;
++ psKernelCCBInfo->pui32WriteOffset = &psDevInfo->psKernelCCBCtl->ui32WriteOffset;
++ psKernelCCBInfo->pui32ReadOffset = &psDevInfo->psKernelCCBCtl->ui32ReadOffset;
++ psDevInfo->psKernelCCBInfo = psKernelCCBInfo;
++
++
++
++ OSMemCopy(psDevInfo->aui32HostKickAddr, psInitInfo->aui32HostKickAddr,
++ SGXMKIF_CMD_MAX * sizeof(psDevInfo->aui32HostKickAddr[0]));
++
++ psDevInfo->bForcePTOff = IMG_FALSE;
++
++ psDevInfo->ui32CacheControl = psInitInfo->ui32CacheControl;
++
++ psDevInfo->ui32EDMTaskReg0 = psInitInfo->ui32EDMTaskReg0;
++ psDevInfo->ui32EDMTaskReg1 = psInitInfo->ui32EDMTaskReg1;
++ psDevInfo->ui32ClkGateStatusReg = psInitInfo->ui32ClkGateStatusReg;
++ psDevInfo->ui32ClkGateStatusMask = psInitInfo->ui32ClkGateStatusMask;
++#if defined(SGX_FEATURE_MP)
++ psDevInfo->ui32MasterClkGateStatusReg = psInitInfo->ui32MasterClkGateStatusReg;
++ psDevInfo->ui32MasterClkGateStatusMask = psInitInfo->ui32MasterClkGateStatusMask;
++#endif
++
++
++
++ OSMemCopy(&psDevInfo->asSGXDevData, &psInitInfo->asInitDevData, sizeof(psDevInfo->asSGXDevData));
++
++ return PVRSRV_OK;
++
++failed_allockernelccb:
++ DeinitDevInfo(psDevInfo);
++
++ return eError;
++}
++
++
++
++
++static PVRSRV_ERROR SGXRunScript(PVRSRV_SGXDEV_INFO *psDevInfo, SGX_INIT_COMMAND *psScript, IMG_UINT32 ui32NumInitCommands)
++{
++ IMG_UINT32 ui32PC;
++ SGX_INIT_COMMAND *psComm;
++
++ for (ui32PC = 0, psComm = psScript;
++ ui32PC < ui32NumInitCommands;
++ ui32PC++, psComm++)
++ {
++ switch (psComm->eOp)
++ {
++ case SGX_INIT_OP_WRITE_HW_REG:
++ {
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, psComm->sWriteHWReg.ui32Offset, psComm->sWriteHWReg.ui32Value);
++ PDUMPREG(psComm->sWriteHWReg.ui32Offset, psComm->sWriteHWReg.ui32Value);
++ break;
++ }
++#if defined(PDUMP)
++ case SGX_INIT_OP_PDUMP_HW_REG:
++ {
++ PDUMPREG(psComm->sPDumpHWReg.ui32Offset, psComm->sPDumpHWReg.ui32Value);
++ break;
++ }
++#endif
++ case SGX_INIT_OP_HALT:
++ {
++ return PVRSRV_OK;
++ }
++ case SGX_INIT_OP_ILLEGAL:
++
++ default:
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXRunScript: PC %d: Illegal command: %d", ui32PC, psComm->eOp));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ }
++
++ }
++
++ return PVRSRV_ERROR_GENERIC;
++}
++
++PVRSRV_ERROR SGXInitialise(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_KERNEL_MEM_INFO *psSGXHostCtlMemInfo = psDevInfo->psKernelSGXHostCtlMemInfo;
++ SGXMKIF_HOST_CTL *psSGXHostCtl = psSGXHostCtlMemInfo->pvLinAddrKM;
++#if defined(PDUMP)
++ static IMG_BOOL bFirstTime = IMG_TRUE;
++#endif
++
++
++
++ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "SGX initialisation script part 1\n");
++ eError = SGXRunScript(psDevInfo, psDevInfo->sScripts.asInitCommandsPart1, SGX_MAX_INIT_COMMANDS);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXInitialise: SGXRunScript (part 1) failed (%d)", eError));
++ return (PVRSRV_ERROR_GENERIC);
++ }
++ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "End of SGX initialisation script part 1\n");
++
++
++ SGXReset(psDevInfo, PDUMP_FLAGS_CONTINUOUS);
++
++#if defined(EUR_CR_POWER)
++#if defined(SGX531)
++
++
++
++
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_POWER, 1);
++ PDUMPREG(EUR_CR_POWER, 1);
++#else
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_POWER, 0);
++ PDUMPREG(EUR_CR_POWER, 0);
++#endif
++#endif
++
++
++ *psDevInfo->pui32KernelCCBEventKicker = 0;
++#if defined(PDUMP)
++ if (bFirstTime)
++ {
++ psDevInfo->ui32KernelCCBEventKickerDumpVal = 0;
++ PDUMPMEM(&psDevInfo->ui32KernelCCBEventKickerDumpVal,
++ psDevInfo->psKernelCCBEventKickerMemInfo, 0,
++ sizeof(*psDevInfo->pui32KernelCCBEventKicker), PDUMP_FLAGS_CONTINUOUS,
++ MAKEUNIQUETAG(psDevInfo->psKernelCCBEventKickerMemInfo));
++ }
++#endif
++
++
++
++ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "SGX initialisation script part 2\n");
++ eError = SGXRunScript(psDevInfo, psDevInfo->sScripts.asInitCommandsPart2, SGX_MAX_INIT_COMMANDS);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXInitialise: SGXRunScript (part 2) failed (%d)", eError));
++ return (PVRSRV_ERROR_GENERIC);
++ }
++ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "End of SGX initialisation script part 2\n");
++
++
++ psSGXHostCtl->ui32InitStatus = 0;
++#if defined(PDUMP)
++ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
++ "Reset the SGX microkernel initialisation status\n");
++ PDUMPMEM(IMG_NULL, psSGXHostCtlMemInfo,
++ offsetof(SGXMKIF_HOST_CTL, ui32InitStatus),
++ sizeof(IMG_UINT32), PDUMP_FLAGS_CONTINUOUS,
++ MAKEUNIQUETAG(psSGXHostCtlMemInfo));
++#endif
++
++#if defined(SGX_FEATURE_MULTI_EVENT_KICK)
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM,
++ SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK2, 0),
++ EUR_CR_EVENT_KICK2_NOW_MASK);
++#else
++ *psDevInfo->pui32KernelCCBEventKicker = (*psDevInfo->pui32KernelCCBEventKicker + 1) & 0xFF;
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM,
++ SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK, 0),
++ EUR_CR_EVENT_KICK_NOW_MASK);
++#endif
++
++#if defined(PDUMP)
++
++
++
++
++
++
++ if (bFirstTime)
++ {
++#if defined(SGX_FEATURE_MULTI_EVENT_KICK)
++ PDUMPREG(SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK2, 0), EUR_CR_EVENT_KICK2_NOW_MASK);
++#else
++ psDevInfo->ui32KernelCCBEventKickerDumpVal = 1;
++ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
++ "First increment of the SGX event kicker value\n");
++ PDUMPMEM(&psDevInfo->ui32KernelCCBEventKickerDumpVal,
++ psDevInfo->psKernelCCBEventKickerMemInfo,
++ 0,
++ sizeof(IMG_UINT32),
++ PDUMP_FLAGS_CONTINUOUS,
++ MAKEUNIQUETAG(psDevInfo->psKernelCCBEventKickerMemInfo));
++ PDUMPREG(SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK, 0), EUR_CR_EVENT_KICK_NOW_MASK);
++#endif
++ bFirstTime = IMG_FALSE;
++ }
++#endif
++
++#if !defined(NO_HARDWARE)
++
++
++ if (PollForValueKM(&psSGXHostCtl->ui32InitStatus,
++ PVRSRV_USSE_EDM_INIT_COMPLETE,
++ PVRSRV_USSE_EDM_INIT_COMPLETE,
++ MAX_HW_TIME_US/WAIT_TRY_COUNT,
++ WAIT_TRY_COUNT) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXInitialise: Wait for uKernel initialisation failed"));
++ PVR_DBG_BREAK;
++ return PVRSRV_ERROR_RETRY;
++ }
++#endif
++
++#if defined(PDUMP)
++ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
++ "Wait for the SGX microkernel initialisation to complete");
++ PDUMPMEMPOL(psSGXHostCtlMemInfo,
++ offsetof(SGXMKIF_HOST_CTL, ui32InitStatus),
++ PVRSRV_USSE_EDM_INIT_COMPLETE,
++ PVRSRV_USSE_EDM_INIT_COMPLETE,
++ PDUMP_POLL_OPERATOR_EQUAL,
++ PDUMP_FLAGS_CONTINUOUS,
++ MAKEUNIQUETAG(psSGXHostCtlMemInfo));
++#endif
++
++#if defined(FIX_HW_BRN_22997) && defined(FIX_HW_BRN_23030) && defined(SGX_FEATURE_HOST_PORT)
++
++
++
++ WorkaroundBRN22997ReadHostPort(psDevInfo);
++#endif
++
++ PVR_ASSERT(psDevInfo->psKernelCCBCtl->ui32ReadOffset == psDevInfo->psKernelCCBCtl->ui32WriteOffset);
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR SGXDeinitialise(IMG_HANDLE hDevCookie)
++
++{
++ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO *) hDevCookie;
++ PVRSRV_ERROR eError;
++
++
++ if (psDevInfo->pvRegsBaseKM == IMG_NULL)
++ {
++ return PVRSRV_OK;
++ }
++
++ eError = SGXRunScript(psDevInfo, psDevInfo->sScripts.asDeinitCommands, SGX_MAX_DEINIT_COMMANDS);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXDeinitialise: SGXRunScript failed (%d)", eError));
++ return (PVRSRV_ERROR_GENERIC);
++ }
++
++ return PVRSRV_OK;
++}
++
++
++static PVRSRV_ERROR DevInitSGXPart1 (IMG_VOID *pvDeviceNode)
++{
++ PVRSRV_SGXDEV_INFO *psDevInfo;
++ IMG_HANDLE hKernelDevMemContext;
++ IMG_DEV_PHYADDR sPDDevPAddr;
++ IMG_UINT32 i;
++ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvDeviceNode;
++ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap = psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeap;
++ PVRSRV_ERROR eError;
++
++ PDUMPCOMMENT("SGX Initialisation Part 1");
++
++
++ PDUMPCOMMENT("SGX Core Version Information: %s", SGX_CORE_FRIENDLY_NAME);
++#ifdef SGX_CORE_REV
++ PDUMPCOMMENT("SGX Core Revision Information: %d", SGX_CORE_REV);
++#else
++ PDUMPCOMMENT("SGX Core Revision Information: head rtl");
++#endif
++
++ #if defined(SGX_FEATURE_SYSTEM_CACHE)
++ PDUMPCOMMENT("SGX System Level Cache is present\r\n");
++ #if defined(SGX_BYPASS_SYSTEM_CACHE)
++ PDUMPCOMMENT("SGX System Level Cache is bypassed\r\n");
++ #endif
++ #endif
++
++
++ if(OSAllocMem( PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_SGXDEV_INFO),
++ (IMG_VOID **)&psDevInfo, IMG_NULL,
++ "SGX Device Info") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart1 : Failed to alloc memory for DevInfo"));
++ return (PVRSRV_ERROR_OUT_OF_MEMORY);
++ }
++ OSMemSet (psDevInfo, 0, sizeof(PVRSRV_SGXDEV_INFO));
++
++
++ psDevInfo->eDeviceType = DEV_DEVICE_TYPE;
++ psDevInfo->eDeviceClass = DEV_DEVICE_CLASS;
++
++
++ psDeviceNode->pvDevice = (IMG_PVOID)psDevInfo;
++
++
++ psDevInfo->pvDeviceMemoryHeap = (IMG_VOID*)psDeviceMemoryHeap;
++
++
++ hKernelDevMemContext = BM_CreateContext(psDeviceNode,
++ &sPDDevPAddr,
++ IMG_NULL,
++ IMG_NULL);
++ if (hKernelDevMemContext == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart1: Failed BM_CreateContext"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ psDevInfo->sKernelPDDevPAddr = sPDDevPAddr;
++
++
++ for(i=0; i<psDeviceNode->sDevMemoryInfo.ui32HeapCount; i++)
++ {
++ IMG_HANDLE hDevMemHeap;
++
++ switch(psDeviceMemoryHeap[i].DevMemHeapType)
++ {
++ case DEVICE_MEMORY_HEAP_KERNEL:
++ case DEVICE_MEMORY_HEAP_SHARED:
++ case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
++ {
++ hDevMemHeap = BM_CreateHeap (hKernelDevMemContext,
++ &psDeviceMemoryHeap[i]);
++
++
++
++ psDeviceMemoryHeap[i].hDevMemHeap = hDevMemHeap;
++ break;
++ }
++ }
++ }
++
++ eError = MMU_BIFResetPDAlloc(psDevInfo);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"DevInitSGX : Failed to alloc memory for BIF reset"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ return PVRSRV_OK;
++}
++
++IMG_EXPORT
++PVRSRV_ERROR SGXGetInfoForSrvinitKM(IMG_HANDLE hDevHandle, SGX_BRIDGE_INFO_FOR_SRVINIT *psInitInfo)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ PVRSRV_SGXDEV_INFO *psDevInfo;
++ PVRSRV_ERROR eError;
++
++ PDUMPCOMMENT("SGXGetInfoForSrvinit");
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevHandle;
++ psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
++
++ psInitInfo->sPDDevPAddr = psDevInfo->sKernelPDDevPAddr;
++
++ eError = PVRSRVGetDeviceMemHeapsKM(hDevHandle, &psInitInfo->asHeapInfo[0]);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXGetInfoForSrvinit: PVRSRVGetDeviceMemHeapsKM failed (%d)", eError));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ return eError;
++}
++
++IMG_EXPORT
++PVRSRV_ERROR DevInitSGXPart2KM (PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hDevHandle,
++ SGX_BRIDGE_INIT_INFO *psInitInfo)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ PVRSRV_SGXDEV_INFO *psDevInfo;
++ PVRSRV_ERROR eError;
++ SGX_DEVICE_MAP *psSGXDeviceMap;
++ PVRSRV_DEV_POWER_STATE eDefaultPowerState;
++
++ PDUMPCOMMENT("SGX Initialisation Part 2");
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevHandle;
++ psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
++
++
++
++ eError = InitDevInfo(psPerProc, psDeviceNode, psInitInfo);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: Failed to load EDM program"));
++ goto failed_init_dev_info;
++ }
++
++
++ eError = SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE_SGX,
++ (IMG_VOID**)&psSGXDeviceMap);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: Failed to get device memory map!"));
++ return PVRSRV_ERROR_INIT_FAILURE;
++ }
++
++
++ if (psSGXDeviceMap->pvRegsCpuVBase)
++ {
++ psDevInfo->pvRegsBaseKM = psSGXDeviceMap->pvRegsCpuVBase;
++ }
++ else
++ {
++
++ psDevInfo->pvRegsBaseKM = OSMapPhysToLin(psSGXDeviceMap->sRegsCpuPBase,
++ psSGXDeviceMap->ui32RegsSize,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++ if (!psDevInfo->pvRegsBaseKM)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: Failed to map in regs\n"));
++ return PVRSRV_ERROR_BAD_MAPPING;
++ }
++ }
++ psDevInfo->ui32RegSize = psSGXDeviceMap->ui32RegsSize;
++ psDevInfo->sRegsPhysBase = psSGXDeviceMap->sRegsSysPBase;
++
++
++#if defined(SGX_FEATURE_HOST_PORT)
++ if (psSGXDeviceMap->ui32Flags & SGX_HOSTPORT_PRESENT)
++ {
++
++ psDevInfo->pvHostPortBaseKM = OSMapPhysToLin(psSGXDeviceMap->sHPCpuPBase,
++ psSGXDeviceMap->ui32HPSize,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++ if (!psDevInfo->pvHostPortBaseKM)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: Failed to map in host port\n"));
++ return PVRSRV_ERROR_BAD_MAPPING;
++ }
++ psDevInfo->ui32HPSize = psSGXDeviceMap->ui32HPSize;
++ psDevInfo->sHPSysPAddr = psSGXDeviceMap->sHPSysPBase;
++ }
++#endif
++
++#if defined (SYS_USING_INTERRUPTS)
++
++
++ psDeviceNode->pvISRData = psDeviceNode;
++
++ PVR_ASSERT(psDeviceNode->pfnDeviceISR == SGX_ISRHandler);
++
++#endif
++
++
++ psDevInfo->psSGXHostCtl->ui32PowerStatus |= PVRSRV_USSE_EDM_POWMAN_NO_WORK;
++ eDefaultPowerState = PVRSRV_DEV_POWER_STATE_OFF;
++
++ eError = PVRSRVRegisterPowerDevice (psDeviceNode->sDevId.ui32DeviceIndex,
++ SGXPrePowerState, SGXPostPowerState,
++ SGXPreClockSpeedChange, SGXPostClockSpeedChange,
++ (IMG_HANDLE)psDeviceNode,
++ PVRSRV_DEV_POWER_STATE_OFF,
++ eDefaultPowerState);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: failed to register device with power manager"));
++ return eError;
++ }
++
++#if defined(FIX_HW_BRN_22997) && defined(FIX_HW_BRN_23030) && defined(SGX_FEATURE_HOST_PORT)
++ eError = WorkaroundBRN22997Alloc(psDevInfo);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXInitialise : Failed to alloc memory for BRN22997 workaround"));
++ return eError;
++ }
++#endif
++
++#if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE)
++
++ psDevInfo->ui32ExtSysCacheRegsSize = psSGXDeviceMap->ui32ExtSysCacheRegsSize;
++ psDevInfo->sExtSysCacheRegsDevPBase = psSGXDeviceMap->sExtSysCacheRegsDevPBase;
++ eError = MMU_MapExtSystemCacheRegs(psDeviceNode);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXInitialise : Failed to map external system cache registers"));
++ return eError;
++ }
++#endif
++
++
++
++ OSMemSet(psDevInfo->psKernelCCB, 0, sizeof(PVRSRV_SGX_KERNEL_CCB));
++ OSMemSet(psDevInfo->psKernelCCBCtl, 0, sizeof(PVRSRV_SGX_CCB_CTL));
++ OSMemSet(psDevInfo->pui32KernelCCBEventKicker, 0, sizeof(*psDevInfo->pui32KernelCCBEventKicker));
++ PDUMPCOMMENT("Initialise Kernel CCB");
++ PDUMPMEM(IMG_NULL, psDevInfo->psKernelCCBMemInfo, 0, sizeof(PVRSRV_SGX_KERNEL_CCB), PDUMP_FLAGS_CONTINUOUS, MAKEUNIQUETAG(psDevInfo->psKernelCCBMemInfo));
++ PDUMPCOMMENT("Initialise Kernel CCB Control");
++ PDUMPMEM(IMG_NULL, psDevInfo->psKernelCCBCtlMemInfo, 0, sizeof(PVRSRV_SGX_CCB_CTL), PDUMP_FLAGS_CONTINUOUS, MAKEUNIQUETAG(psDevInfo->psKernelCCBCtlMemInfo));
++ PDUMPCOMMENT("Initialise Kernel CCB Event Kicker");
++ PDUMPMEM(IMG_NULL, psDevInfo->psKernelCCBEventKickerMemInfo, 0, sizeof(*psDevInfo->pui32KernelCCBEventKicker), PDUMP_FLAGS_CONTINUOUS, MAKEUNIQUETAG(psDevInfo->psKernelCCBEventKickerMemInfo));
++
++ return PVRSRV_OK;
++
++failed_init_dev_info:
++ return eError;
++}
++
++static PVRSRV_ERROR DevDeInitSGX (IMG_VOID *pvDeviceNode)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvDeviceNode;
++ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice;
++ PVRSRV_ERROR eError;
++ IMG_UINT32 ui32Heap;
++ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++ SGX_DEVICE_MAP *psSGXDeviceMap;
++
++ if (!psDevInfo)
++ {
++
++ PVR_DPF((PVR_DBG_ERROR,"DevDeInitSGX: Null DevInfo"));
++ return PVRSRV_OK;
++ }
++
++#if defined(SUPPORT_HW_RECOVERY)
++ if (psDevInfo->hTimer)
++ {
++ eError = OSRemoveTimer(psDevInfo->hTimer);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"DevDeInitSGX: Failed to remove timer"));
++ return eError;
++ }
++ psDevInfo->hTimer = IMG_NULL;
++ }
++#endif
++
++#if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE)
++
++ eError = MMU_UnmapExtSystemCacheRegs(psDeviceNode);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"DevDeInitSGX: Failed to unmap ext system cache registers"));
++ return eError;
++ }
++#endif
++
++#if defined(FIX_HW_BRN_22997) && defined(FIX_HW_BRN_23030) && defined(SGX_FEATURE_HOST_PORT)
++ WorkaroundBRN22997Free(psDevInfo);
++#endif
++
++ MMU_BIFResetPDFree(psDevInfo);
++
++
++
++
++ DeinitDevInfo(psDevInfo);
++
++
++ psDeviceMemoryHeap = (DEVICE_MEMORY_HEAP_INFO *)psDevInfo->pvDeviceMemoryHeap;
++ for(ui32Heap=0; ui32Heap<psDeviceNode->sDevMemoryInfo.ui32HeapCount; ui32Heap++)
++ {
++ switch(psDeviceMemoryHeap[ui32Heap].DevMemHeapType)
++ {
++ case DEVICE_MEMORY_HEAP_KERNEL:
++ case DEVICE_MEMORY_HEAP_SHARED:
++ case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
++ {
++ if (psDeviceMemoryHeap[ui32Heap].hDevMemHeap != IMG_NULL)
++ {
++ BM_DestroyHeap(psDeviceMemoryHeap[ui32Heap].hDevMemHeap);
++ }
++ break;
++ }
++ }
++ }
++
++
++ eError = BM_DestroyContext(psDeviceNode->sDevMemoryInfo.pBMKernelContext, IMG_NULL);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"DevDeInitSGX : Failed to destroy kernel context"));
++ return eError;
++ }
++
++
++ eError = PVRSRVRemovePowerDevice (((PVRSRV_DEVICE_NODE*)pvDeviceNode)->sDevId.ui32DeviceIndex);
++ if (eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++
++ eError = SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE_SGX,
++ (IMG_VOID**)&psSGXDeviceMap);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"DevDeInitSGX: Failed to get device memory map!"));
++ return eError;
++ }
++
++
++ if (!psSGXDeviceMap->pvRegsCpuVBase)
++ {
++
++ if (psDevInfo->pvRegsBaseKM != IMG_NULL)
++ {
++ OSUnMapPhysToLin(psDevInfo->pvRegsBaseKM,
++ psDevInfo->ui32RegSize,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++ }
++ }
++
++#if defined(SGX_FEATURE_HOST_PORT)
++ if (psSGXDeviceMap->ui32Flags & SGX_HOSTPORT_PRESENT)
++ {
++
++ if (psDevInfo->pvHostPortBaseKM != IMG_NULL)
++ {
++ OSUnMapPhysToLin(psDevInfo->pvHostPortBaseKM,
++ psDevInfo->ui32HPSize,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++ }
++ }
++#endif
++
++
++
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_SGXDEV_INFO),
++ psDevInfo,
++ 0);
++
++ psDeviceNode->pvDevice = IMG_NULL;
++
++ if (psDeviceMemoryHeap != IMG_NULL)
++ {
++
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(DEVICE_MEMORY_HEAP_INFO) * SGX_MAX_HEAP_ID,
++ psDeviceMemoryHeap,
++ 0);
++ }
++
++ return PVRSRV_OK;
++}
++
++
++IMG_VOID SGXDumpDebugInfo (PVRSRV_DEVICE_NODE *psDeviceNode,
++ IMG_BOOL bDumpSGXRegs)
++{
++ IMG_UINT ui32RegVal;
++ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++
++ if (bDumpSGXRegs)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGX Register Base Address (Linear): 0x%08X", psDevInfo->pvRegsBaseKM));
++ PVR_DPF((PVR_DBG_ERROR,"SGX Register Base Address (Physical): 0x%08X", psDevInfo->sRegsPhysBase));
++
++
++
++
++ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS);
++ if (ui32RegVal & (EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_MASK | EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_MASK))
++ {
++ PVR_LOG(("DPM out of memory!!"));
++ }
++ PVR_LOG(("EUR_CR_EVENT_STATUS: %x", ui32RegVal));
++
++ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS2);
++ PVR_LOG(("EUR_CR_EVENT_STATUS2: %x", ui32RegVal));
++
++ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL);
++ PVR_LOG(("EUR_CR_BIF_CTRL: %x", ui32RegVal));
++
++ #if defined(EUR_CR_BIF_BANK0)
++ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK0);
++ PVR_LOG(("EUR_CR_BIF_BANK0: %x", ui32RegVal));
++ #endif
++
++ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_INT_STAT);
++ PVR_LOG(("EUR_CR_BIF_INT_STAT: %x", ui32RegVal));
++
++ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_FAULT);
++ PVR_LOG(("EUR_CR_BIF_FAULT: %x", ui32RegVal));
++
++ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_MEM_REQ_STAT);
++ PVR_LOG(("EUR_CR_BIF_MEM_REQ_STAT: %x", ui32RegVal));
++
++ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_CLKGATECTL);
++ PVR_LOG(("EUR_CR_CLKGATECTL: %x", ui32RegVal));
++
++ #if defined(EUR_CR_PDS_PC_BASE)
++ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_PDS_PC_BASE);
++ PVR_LOG(("EUR_CR_PDS_PC_BASE: %x", ui32RegVal));
++ #endif
++
++
++ }
++
++ #if defined(PVRSRV_USSE_EDM_STATUS_DEBUG)
++ {
++ IMG_UINT32 *pui32MKTraceBuffer = psDevInfo->psKernelEDMStatusBufferMemInfo->pvLinAddrKM;
++ IMG_UINT32 ui32LastStatusCode, ui32WriteOffset;
++
++ ui32LastStatusCode = *pui32MKTraceBuffer;
++ pui32MKTraceBuffer++;
++ ui32WriteOffset = *pui32MKTraceBuffer;
++ pui32MKTraceBuffer++;
++
++ PVR_LOG(("Last SGX microkernel status code: 0x%x", ui32LastStatusCode));
++
++ #if defined(PVRSRV_DUMP_MK_TRACE)
++
++
++ {
++ IMG_UINT32 ui32LoopCounter;
++
++ for (ui32LoopCounter = 0;
++ ui32LoopCounter < SGXMK_TRACE_BUFFER_SIZE;
++ ui32LoopCounter++)
++ {
++ IMG_UINT32 *pui32BufPtr;
++ pui32BufPtr = pui32MKTraceBuffer +
++ (((ui32WriteOffset + ui32LoopCounter) % SGXMK_TRACE_BUFFER_SIZE) * 4);
++ PVR_LOG(("(MKT%u) %08X %08X %08X %08X", ui32LoopCounter,
++ pui32BufPtr[2], pui32BufPtr[3], pui32BufPtr[1], pui32BufPtr[0]));
++ }
++ }
++ #endif
++ }
++ #endif
++
++ {
++
++
++ IMG_UINT32 *pui32HostCtlBuffer = (IMG_UINT32 *)psDevInfo->psSGXHostCtl;
++ IMG_UINT32 ui32LoopCounter;
++
++ PVR_LOG(("SGX Host control:"));
++
++ for (ui32LoopCounter = 0;
++ ui32LoopCounter < sizeof(*psDevInfo->psSGXHostCtl) / sizeof(*pui32HostCtlBuffer);
++ ui32LoopCounter += 4)
++ {
++ PVR_LOG(("\t0x%X: 0x%08X 0x%08X 0x%08X 0x%08X", ui32LoopCounter * sizeof(*pui32HostCtlBuffer),
++ pui32HostCtlBuffer[ui32LoopCounter + 0], pui32HostCtlBuffer[ui32LoopCounter + 1],
++ pui32HostCtlBuffer[ui32LoopCounter + 2], pui32HostCtlBuffer[ui32LoopCounter + 3]));
++ }
++ }
++
++ {
++
++
++ IMG_UINT32 *pui32TA3DCtlBuffer = psDevInfo->psKernelSGXTA3DCtlMemInfo->pvLinAddrKM;
++ IMG_UINT32 ui32LoopCounter;
++
++ PVR_LOG(("SGX TA/3D control:"));
++
++ for (ui32LoopCounter = 0;
++ ui32LoopCounter < psDevInfo->psKernelSGXTA3DCtlMemInfo->ui32AllocSize / sizeof(*pui32TA3DCtlBuffer);
++ ui32LoopCounter += 4)
++ {
++ PVR_LOG(("\t0x%X: 0x%08X 0x%08X 0x%08X 0x%08X", ui32LoopCounter * sizeof(*pui32TA3DCtlBuffer),
++ pui32TA3DCtlBuffer[ui32LoopCounter + 0], pui32TA3DCtlBuffer[ui32LoopCounter + 1],
++ pui32TA3DCtlBuffer[ui32LoopCounter + 2], pui32TA3DCtlBuffer[ui32LoopCounter + 3]));
++ }
++ }
++
++ QueueDumpDebugInfo();
++}
++
++
++#if defined(SYS_USING_INTERRUPTS) || defined(SUPPORT_HW_RECOVERY)
++static
++IMG_VOID HWRecoveryResetSGX (PVRSRV_DEVICE_NODE *psDeviceNode,
++ IMG_UINT32 ui32Component,
++ IMG_UINT32 ui32CallerID)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice;
++ SGXMKIF_HOST_CTL *psSGXHostCtl = (SGXMKIF_HOST_CTL *)psDevInfo->psSGXHostCtl;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Component);
++
++
++
++ eError = PVRSRVPowerLock(ui32CallerID, IMG_FALSE);
++ if(eError != PVRSRV_OK)
++ {
++
++
++
++ PVR_DPF((PVR_DBG_WARNING,"HWRecoveryResetSGX: Power transition in progress"));
++ return;
++ }
++
++ psSGXHostCtl->ui32InterruptClearFlags |= PVRSRV_USSE_EDM_INTERRUPT_HWR;
++
++ PVR_LOG(("HWRecoveryResetSGX: SGX Hardware Recovery triggered"));
++
++ SGXDumpDebugInfo(psDeviceNode, IMG_TRUE);
++
++
++ PDUMPSUSPEND();
++
++
++ eError = SGXInitialise(psDevInfo);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"HWRecoveryResetSGX: SGXInitialise failed (%d)", eError));
++ }
++
++
++ PDUMPRESUME();
++
++ PVRSRVPowerUnlock(ui32CallerID);
++
++
++ SGXScheduleProcessQueuesKM(psDeviceNode);
++
++
++
++ PVRSRVProcessQueues(ui32CallerID, IMG_TRUE);
++}
++#endif
++
++
++#if defined(SUPPORT_HW_RECOVERY)
++IMG_VOID SGXOSTimer(IMG_VOID *pvData)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode = pvData;
++ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++ static IMG_UINT32 ui32EDMTasks = 0;
++ static IMG_UINT32 ui32LockupCounter = 0;
++ static IMG_UINT32 ui32NumResets = 0;
++ IMG_UINT32 ui32CurrentEDMTasks;
++ IMG_BOOL bLockup = IMG_FALSE;
++ IMG_BOOL bPoweredDown;
++
++
++ psDevInfo->ui32TimeStamp++;
++
++#if defined(NO_HARDWARE)
++ bPoweredDown = IMG_TRUE;
++#else
++ bPoweredDown = SGXIsDevicePowered(psDeviceNode) ? IMG_FALSE : IMG_TRUE;
++#endif
++
++
++
++ if (bPoweredDown)
++ {
++ ui32LockupCounter = 0;
++ }
++ else
++ {
++
++ ui32CurrentEDMTasks = OSReadHWReg(psDevInfo->pvRegsBaseKM, psDevInfo->ui32EDMTaskReg0);
++ if (psDevInfo->ui32EDMTaskReg1 != 0)
++ {
++ ui32CurrentEDMTasks ^= OSReadHWReg(psDevInfo->pvRegsBaseKM, psDevInfo->ui32EDMTaskReg1);
++ }
++ if ((ui32CurrentEDMTasks == ui32EDMTasks) &&
++ (psDevInfo->ui32NumResets == ui32NumResets))
++ {
++ ui32LockupCounter++;
++ if (ui32LockupCounter == 3)
++ {
++ ui32LockupCounter = 0;
++ PVR_DPF((PVR_DBG_ERROR, "SGXOSTimer() detected SGX lockup (0x%x tasks)", ui32EDMTasks));
++
++ bLockup = IMG_TRUE;
++ }
++ }
++ else
++ {
++ ui32LockupCounter = 0;
++ ui32EDMTasks = ui32CurrentEDMTasks;
++ ui32NumResets = psDevInfo->ui32NumResets;
++ }
++ }
++
++ if (bLockup)
++ {
++ SGXMKIF_HOST_CTL *psSGXHostCtl = (SGXMKIF_HOST_CTL *)psDevInfo->psSGXHostCtl;
++
++
++ psSGXHostCtl->ui32HostDetectedLockups ++;
++
++
++ HWRecoveryResetSGX(psDeviceNode, 0, KERNEL_ID);
++ }
++}
++#endif
++
++
++#if defined(SYS_USING_INTERRUPTS)
++
++IMG_BOOL SGX_ISRHandler (IMG_VOID *pvData)
++{
++ IMG_BOOL bInterruptProcessed = IMG_FALSE;
++
++
++
++ {
++ IMG_UINT32 ui32EventStatus, ui32EventEnable;
++ IMG_UINT32 ui32EventClear = 0;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ PVRSRV_SGXDEV_INFO *psDevInfo;
++
++
++ if(pvData == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGX_ISRHandler: Invalid params\n"));
++ return bInterruptProcessed;
++ }
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData;
++ psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
++
++ ui32EventStatus = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS);
++ ui32EventEnable = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_ENABLE);
++
++
++
++ gui32EventStatusServicesByISR = ui32EventStatus;
++
++
++ ui32EventStatus &= ui32EventEnable;
++
++ if (ui32EventStatus & EUR_CR_EVENT_STATUS_SW_EVENT_MASK)
++ {
++ ui32EventClear |= EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_MASK;
++ }
++
++ if (ui32EventClear)
++ {
++ bInterruptProcessed = IMG_TRUE;
++
++
++ ui32EventClear |= EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_MASK;
++
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR, ui32EventClear);
++ }
++ }
++
++ return bInterruptProcessed;
++}
++
++
++IMG_VOID SGX_MISRHandler (IMG_VOID *pvData)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData;
++ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice;
++ SGXMKIF_HOST_CTL *psSGXHostCtl = (SGXMKIF_HOST_CTL *)psDevInfo->psSGXHostCtl;
++
++ if (((psSGXHostCtl->ui32InterruptFlags & PVRSRV_USSE_EDM_INTERRUPT_HWR) != 0UL) &&
++ ((psSGXHostCtl->ui32InterruptClearFlags & PVRSRV_USSE_EDM_INTERRUPT_HWR) == 0UL))
++ {
++ HWRecoveryResetSGX(psDeviceNode, 0, ISR_ID);
++ }
++
++#if defined(OS_SUPPORTS_IN_LISR)
++ if (psDeviceNode->bReProcessDeviceCommandComplete)
++ {
++ SGXScheduleProcessQueuesKM(psDeviceNode);
++ }
++#endif
++
++ SGXTestActivePowerEvent(psDeviceNode, ISR_ID);
++}
++#endif
++
++
++PVRSRV_ERROR SGXRegisterDevice (PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ DEVICE_MEMORY_INFO *psDevMemoryInfo;
++ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++
++
++ psDeviceNode->sDevId.eDeviceType = DEV_DEVICE_TYPE;
++ psDeviceNode->sDevId.eDeviceClass = DEV_DEVICE_CLASS;
++
++ psDeviceNode->pfnInitDevice = DevInitSGXPart1;
++ psDeviceNode->pfnDeInitDevice = DevDeInitSGX;
++
++ psDeviceNode->pfnInitDeviceCompatCheck = SGXDevInitCompatCheck;
++
++
++
++ psDeviceNode->pfnMMUInitialise = MMU_Initialise;
++ psDeviceNode->pfnMMUFinalise = MMU_Finalise;
++ psDeviceNode->pfnMMUInsertHeap = MMU_InsertHeap;
++ psDeviceNode->pfnMMUCreate = MMU_Create;
++ psDeviceNode->pfnMMUDelete = MMU_Delete;
++ psDeviceNode->pfnMMUAlloc = MMU_Alloc;
++ psDeviceNode->pfnMMUFree = MMU_Free;
++ psDeviceNode->pfnMMUMapPages = MMU_MapPages;
++ psDeviceNode->pfnMMUMapShadow = MMU_MapShadow;
++ psDeviceNode->pfnMMUUnmapPages = MMU_UnmapPages;
++ psDeviceNode->pfnMMUMapScatter = MMU_MapScatter;
++ psDeviceNode->pfnMMUGetPhysPageAddr = MMU_GetPhysPageAddr;
++ psDeviceNode->pfnMMUGetPDDevPAddr = MMU_GetPDDevPAddr;
++
++#if defined (SYS_USING_INTERRUPTS)
++
++
++ psDeviceNode->pfnDeviceISR = SGX_ISRHandler;
++ psDeviceNode->pfnDeviceMISR = SGX_MISRHandler;
++#endif
++
++
++
++ psDeviceNode->pfnDeviceCommandComplete = SGXCommandComplete;
++
++
++
++ psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
++
++ psDevMemoryInfo->ui32AddressSpaceSizeLog2 = SGX_FEATURE_ADDRESS_SPACE_SIZE;
++
++
++ psDevMemoryInfo->ui32Flags = 0;
++
++
++ if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(DEVICE_MEMORY_HEAP_INFO) * SGX_MAX_HEAP_ID,
++ (IMG_VOID **)&psDevMemoryInfo->psDeviceMemoryHeap, 0,
++ "Array of Device Memory Heap Info") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXRegisterDevice : Failed to alloc memory for DEVICE_MEMORY_HEAP_INFO"));
++ return (PVRSRV_ERROR_OUT_OF_MEMORY);
++ }
++ OSMemSet(psDevMemoryInfo->psDeviceMemoryHeap, 0, sizeof(DEVICE_MEMORY_HEAP_INFO) * SGX_MAX_HEAP_ID);
++
++ psDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap;
++
++
++
++
++
++ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_GENERAL_HEAP_ID);
++ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_GENERAL_HEAP_BASE;
++ psDeviceMemoryHeap->ui32HeapSize = SGX_GENERAL_HEAP_SIZE;
++ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++ | PVRSRV_HAP_SINGLE_PROCESS;
++ psDeviceMemoryHeap->pszName = "General";
++ psDeviceMemoryHeap->pszBSName = "General BS";
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
++
++ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
++#if !defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP)
++
++ psDevMemoryInfo->ui32MappingHeapID = (IMG_UINT32)(psDeviceMemoryHeap - psDevMemoryInfo->psDeviceMemoryHeap);
++#endif
++ psDeviceMemoryHeap++;
++
++
++
++ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_TADATA_HEAP_ID);
++ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_TADATA_HEAP_BASE;
++ psDeviceMemoryHeap->ui32HeapSize = SGX_TADATA_HEAP_SIZE;
++ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++ | PVRSRV_HAP_MULTI_PROCESS;
++ psDeviceMemoryHeap->pszName = "TA Data";
++ psDeviceMemoryHeap->pszBSName = "TA Data BS";
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
++
++ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
++ psDeviceMemoryHeap++;
++
++
++
++ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_KERNEL_CODE_HEAP_ID);
++ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_KERNEL_CODE_HEAP_BASE;
++ psDeviceMemoryHeap->ui32HeapSize = SGX_KERNEL_CODE_HEAP_SIZE;
++ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++ | PVRSRV_HAP_MULTI_PROCESS;
++ psDeviceMemoryHeap->pszName = "Kernel Code";
++ psDeviceMemoryHeap->pszBSName = "Kernel Code BS";
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
++
++ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
++ psDeviceMemoryHeap++;
++
++
++
++ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_KERNEL_DATA_HEAP_ID);
++ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_KERNEL_DATA_HEAP_BASE;
++ psDeviceMemoryHeap->ui32HeapSize = SGX_KERNEL_DATA_HEAP_SIZE;
++ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++ | PVRSRV_HAP_MULTI_PROCESS;
++ psDeviceMemoryHeap->pszName = "KernelData";
++ psDeviceMemoryHeap->pszBSName = "KernelData BS";
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
++
++ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
++ psDeviceMemoryHeap++;
++
++
++
++ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_PIXELSHADER_HEAP_ID);
++ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_PIXELSHADER_HEAP_BASE;
++ psDeviceMemoryHeap->ui32HeapSize = SGX_PIXELSHADER_HEAP_SIZE;
++ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++ | PVRSRV_HAP_SINGLE_PROCESS;
++ psDeviceMemoryHeap->pszName = "PixelShaderUSSE";
++ psDeviceMemoryHeap->pszBSName = "PixelShaderUSSE BS";
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
++
++ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
++ psDeviceMemoryHeap++;
++
++
++
++ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_VERTEXSHADER_HEAP_ID);
++ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_VERTEXSHADER_HEAP_BASE;
++ psDeviceMemoryHeap->ui32HeapSize = SGX_VERTEXSHADER_HEAP_SIZE;
++ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++ | PVRSRV_HAP_SINGLE_PROCESS;
++ psDeviceMemoryHeap->pszName = "VertexShaderUSSE";
++ psDeviceMemoryHeap->pszBSName = "VertexShaderUSSE BS";
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
++
++ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
++ psDeviceMemoryHeap++;
++
++
++
++ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_PDSPIXEL_CODEDATA_HEAP_ID);
++ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_PDSPIXEL_CODEDATA_HEAP_BASE;
++ psDeviceMemoryHeap->ui32HeapSize = SGX_PDSPIXEL_CODEDATA_HEAP_SIZE;
++ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++ | PVRSRV_HAP_SINGLE_PROCESS;
++ psDeviceMemoryHeap->pszName = "PDSPixelCodeData";
++ psDeviceMemoryHeap->pszBSName = "PDSPixelCodeData BS";
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
++
++ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
++ psDeviceMemoryHeap++;
++
++
++
++ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_PDSVERTEX_CODEDATA_HEAP_ID);
++ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_PDSVERTEX_CODEDATA_HEAP_BASE;
++ psDeviceMemoryHeap->ui32HeapSize = SGX_PDSVERTEX_CODEDATA_HEAP_SIZE;
++ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++ | PVRSRV_HAP_SINGLE_PROCESS;
++ psDeviceMemoryHeap->pszName = "PDSVertexCodeData";
++ psDeviceMemoryHeap->pszBSName = "PDSVertexCodeData BS";
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
++
++ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
++ psDeviceMemoryHeap++;
++
++
++
++ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_SYNCINFO_HEAP_ID);
++ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_SYNCINFO_HEAP_BASE;
++ psDeviceMemoryHeap->ui32HeapSize = SGX_SYNCINFO_HEAP_SIZE;
++ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++ | PVRSRV_HAP_MULTI_PROCESS;
++ psDeviceMemoryHeap->pszName = "CacheCoherent";
++ psDeviceMemoryHeap->pszBSName = "CacheCoherent BS";
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
++
++ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
++
++ psDevMemoryInfo->ui32SyncHeapID = (IMG_UINT32)(psDeviceMemoryHeap - psDevMemoryInfo->psDeviceMemoryHeap);
++ psDeviceMemoryHeap++;
++
++
++
++ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_3DPARAMETERS_HEAP_ID);
++ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_3DPARAMETERS_HEAP_BASE;
++ psDeviceMemoryHeap->ui32HeapSize = SGX_3DPARAMETERS_HEAP_SIZE;
++ psDeviceMemoryHeap->pszName = "3DParameters";
++ psDeviceMemoryHeap->pszBSName = "3DParameters BS";
++#if defined(SUPPORT_PERCONTEXT_PB)
++ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++ | PVRSRV_HAP_SINGLE_PROCESS;
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
++#else
++ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++ | PVRSRV_HAP_MULTI_PROCESS;
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
++#endif
++
++ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
++ psDeviceMemoryHeap++;
++
++
++#if defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP)
++
++ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_GENERAL_MAPPING_HEAP_ID);
++ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_GENERAL_MAPPING_HEAP_BASE;
++ psDeviceMemoryHeap->ui32HeapSize = SGX_GENERAL_MAPPING_HEAP_SIZE;
++ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_MULTI_PROCESS;
++ psDeviceMemoryHeap->pszName = "GeneralMapping";
++ psDeviceMemoryHeap->pszBSName = "GeneralMapping BS";
++ #if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS) && defined(FIX_HW_BRN_23410)
++
++
++
++
++
++
++
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
++#else
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
++#endif
++
++ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
++
++ psDevMemoryInfo->ui32MappingHeapID = (IMG_UINT32)(psDeviceMemoryHeap - psDevMemoryInfo->psDeviceMemoryHeap);
++ psDeviceMemoryHeap++;
++#endif
++
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++
++ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_2D_HEAP_ID);
++ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_2D_HEAP_BASE;
++ psDeviceMemoryHeap->ui32HeapSize = SGX_2D_HEAP_SIZE;
++ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++ | PVRSRV_HAP_SINGLE_PROCESS;
++ psDeviceMemoryHeap->pszName = "2D";
++ psDeviceMemoryHeap->pszBSName = "2D BS";
++
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
++
++ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
++ psDeviceMemoryHeap++;
++#endif
++
++
++#if defined(FIX_HW_BRN_26915)
++
++
++ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_CGBUFFER_HEAP_ID);
++ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_CGBUFFER_HEAP_BASE;
++ psDeviceMemoryHeap->ui32HeapSize = SGX_CGBUFFER_HEAP_SIZE;
++ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++ | PVRSRV_HAP_SINGLE_PROCESS;
++ psDeviceMemoryHeap->pszName = "CGBuffer";
++ psDeviceMemoryHeap->pszBSName = "CGBuffer BS";
++
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
++
++ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
++ psDeviceMemoryHeap++;
++#endif
++
++
++ psDevMemoryInfo->ui32HeapCount = (IMG_UINT32)(psDeviceMemoryHeap - psDevMemoryInfo->psDeviceMemoryHeap);
++
++ return PVRSRV_OK;
++}
++
++IMG_EXPORT
++PVRSRV_ERROR SGXGetClientInfoKM(IMG_HANDLE hDevCookie,
++ SGX_CLIENT_INFO* psClientInfo)
++{
++ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookie)->pvDevice;
++
++
++
++ psDevInfo->ui32ClientRefCount++;
++
++#if defined(PDUMP)
++
++ psDevInfo->psKernelCCBInfo->ui32CCBDumpWOff = 0;
++#endif
++
++
++ psClientInfo->ui32ProcessID = OSGetCurrentProcessIDKM();
++
++
++
++ OSMemCopy(&psClientInfo->asDevData, &psDevInfo->asSGXDevData, sizeof(psClientInfo->asDevData));
++
++
++ return PVRSRV_OK;
++}
++
++
++IMG_VOID SGXPanic(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ PVR_LOG(("SGX panic"));
++ SGXDumpDebugInfo(psDeviceNode, IMG_FALSE);
++ OSPanic();
++}
++
++
++PVRSRV_ERROR SGXDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_SGXDEV_INFO *psDevInfo;
++ IMG_UINT32 ui32BuildOptions, ui32BuildOptionsMismatch;
++#if !defined(NO_HARDWARE)
++ PPVRSRV_KERNEL_MEM_INFO psMemInfo;
++ PVRSRV_SGX_MISCINFO_INFO *psSGXMiscInfoInt;
++ PVRSRV_SGX_MISCINFO_FEATURES *psSGXFeatures;
++ SGX_MISCINFO_STRUCT_SIZES *psSGXStructSizes;
++ IMG_BOOL bStructSizesFailed;
++
++
++ IMG_BOOL bCheckCoreRev;
++ const IMG_UINT32 aui32CoreRevExceptions[] = {
++ 0x10100, 0x10101
++ };
++ const IMG_UINT32 ui32NumCoreExceptions = sizeof(aui32CoreRevExceptions) / (2*sizeof(IMG_UINT32));
++ IMG_UINT i;
++#endif
++
++
++ if(psDeviceNode->sDevId.eDeviceType != PVRSRV_DEVICE_TYPE_SGX)
++ {
++ PVR_LOG(("(FAIL) SGXInit: Device not of type SGX"));
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ goto chk_exit;
++ }
++
++ psDevInfo = psDeviceNode->pvDevice;
++
++
++
++ ui32BuildOptions = (SGX_BUILD_OPTIONS);
++ if (ui32BuildOptions != psDevInfo->ui32ClientBuildOptions)
++ {
++ ui32BuildOptionsMismatch = ui32BuildOptions ^ psDevInfo->ui32ClientBuildOptions;
++ if ( (psDevInfo->ui32ClientBuildOptions & ui32BuildOptionsMismatch) != 0)
++ {
++ PVR_LOG(("(FAIL) SGXInit: Mismatch in client-side and KM driver build options; "
++ "extra options present in client-side driver: (0x%lx). Please check sgx_options.h",
++ psDevInfo->ui32ClientBuildOptions & ui32BuildOptionsMismatch ));
++ }
++
++ if ( (ui32BuildOptions & ui32BuildOptionsMismatch) != 0)
++ {
++ PVR_LOG(("(FAIL) SGXInit: Mismatch in client-side and KM driver build options; "
++ "extra options present in KM: (0x%lx). Please check sgx_options.h",
++ ui32BuildOptions & ui32BuildOptionsMismatch ));
++ }
++ eError = PVRSRV_ERROR_BUILD_MISMATCH;
++ goto chk_exit;
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_MESSAGE, "SGXInit: Client-side and KM driver build options match. [ OK ]"));
++ }
++
++#if !defined (NO_HARDWARE)
++ psMemInfo = psDevInfo->psKernelSGXMiscMemInfo;
++
++
++ psSGXMiscInfoInt = psMemInfo->pvLinAddrKM;
++ psSGXMiscInfoInt->ui32MiscInfoFlags = 0;
++ psSGXMiscInfoInt->ui32MiscInfoFlags |= PVRSRV_USSE_MISCINFO_GET_STRUCT_SIZES;
++ eError = SGXGetMiscInfoUkernel(psDevInfo, psDeviceNode);
++
++
++ if(eError != PVRSRV_OK)
++ {
++ PVR_LOG(("(FAIL) SGXInit: Unable to validate device DDK version"));
++ goto chk_exit;
++ }
++ psSGXFeatures = &((PVRSRV_SGX_MISCINFO_INFO*)(psMemInfo->pvLinAddrKM))->sSGXFeatures;
++ if( (psSGXFeatures->ui32DDKVersion !=
++ ((PVRVERSION_MAJ << 16) |
++ (PVRVERSION_MIN << 8) |
++ PVRVERSION_BRANCH) ) ||
++ (psSGXFeatures->ui32DDKBuild != PVRVERSION_BUILD) )
++ {
++ PVR_LOG(("(FAIL) SGXInit: Incompatible driver DDK revision (%ld)/device DDK revision (%ld).",
++ PVRVERSION_BUILD, psSGXFeatures->ui32DDKBuild));
++ eError = PVRSRV_ERROR_DDK_VERSION_MISMATCH;
++ PVR_DBG_BREAK;
++ goto chk_exit;
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_MESSAGE, "SGXInit: driver DDK (%ld) and device DDK (%ld) match. [ OK ]",
++ PVRVERSION_BUILD, psSGXFeatures->ui32DDKBuild));
++ }
++
++
++ if (psSGXFeatures->ui32CoreRevSW == 0)
++ {
++
++
++ PVR_LOG(("SGXInit: HW core rev (%lx) check skipped.",
++ psSGXFeatures->ui32CoreRev));
++ }
++ else
++ {
++
++ bCheckCoreRev = IMG_TRUE;
++ for(i=0; i<ui32NumCoreExceptions; i+=2)
++ {
++ if( (psSGXFeatures->ui32CoreRev==aui32CoreRevExceptions[i]) &&
++ (psSGXFeatures->ui32CoreRevSW==aui32CoreRevExceptions[i+1]) )
++ {
++ PVR_LOG(("SGXInit: HW core rev (%lx), SW core rev (%lx) check skipped.",
++ psSGXFeatures->ui32CoreRev,
++ psSGXFeatures->ui32CoreRevSW));
++ bCheckCoreRev = IMG_FALSE;
++ }
++ }
++
++ if (bCheckCoreRev)
++ {
++ if (psSGXFeatures->ui32CoreRev != psSGXFeatures->ui32CoreRevSW)
++ {
++ PVR_LOG(("(FAIL) SGXInit: Incompatible HW core rev (%lx) and SW core rev (%lx).",
++ psSGXFeatures->ui32CoreRev, psSGXFeatures->ui32CoreRevSW));
++ eError = PVRSRV_ERROR_BUILD_MISMATCH;
++ goto chk_exit;
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_MESSAGE, "SGXInit: HW core rev (%lx) and SW core rev (%lx) match. [ OK ]",
++ psSGXFeatures->ui32CoreRev, psSGXFeatures->ui32CoreRevSW));
++ }
++ }
++ }
++
++
++ psSGXStructSizes = &((PVRSRV_SGX_MISCINFO_INFO*)(psMemInfo->pvLinAddrKM))->sSGXStructSizes;
++
++ bStructSizesFailed = IMG_FALSE;
++
++ CHECK_SIZE(HOST_CTL);
++ CHECK_SIZE(COMMAND);
++#if defined(SGX_FEATURE_2D_HARDWARE)
++ CHECK_SIZE(2DCMD);
++ CHECK_SIZE(2DCMD_SHARED);
++#endif
++ CHECK_SIZE(CMDTA);
++ CHECK_SIZE(CMDTA_SHARED);
++ CHECK_SIZE(TRANSFERCMD);
++ CHECK_SIZE(TRANSFERCMD_SHARED);
++
++ CHECK_SIZE(3DREGISTERS);
++ CHECK_SIZE(HWPBDESC);
++ CHECK_SIZE(HWRENDERCONTEXT);
++ CHECK_SIZE(HWRENDERDETAILS);
++ CHECK_SIZE(HWRTDATA);
++ CHECK_SIZE(HWRTDATASET);
++ CHECK_SIZE(HWTRANSFERCONTEXT);
++
++ if (bStructSizesFailed == IMG_TRUE)
++ {
++ PVR_LOG(("(FAIL) SGXInit: Mismatch in SGXMKIF structure sizes."));
++ eError = PVRSRV_ERROR_BUILD_MISMATCH;
++ goto chk_exit;
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_MESSAGE, "SGXInit: SGXMKIF structure sizes match. [ OK ]"));
++ }
++
++
++ ui32BuildOptions = psSGXFeatures->ui32BuildOptions;
++ if (ui32BuildOptions != (SGX_BUILD_OPTIONS))
++ {
++ ui32BuildOptionsMismatch = ui32BuildOptions ^ (SGX_BUILD_OPTIONS);
++ if ( ((SGX_BUILD_OPTIONS) & ui32BuildOptionsMismatch) != 0)
++ {
++ PVR_LOG(("(FAIL) SGXInit: Mismatch in driver and microkernel build options; "
++ "extra options present in driver: (0x%lx). Please check sgx_options.h",
++ (SGX_BUILD_OPTIONS) & ui32BuildOptionsMismatch ));
++ }
++
++ if ( (ui32BuildOptions & ui32BuildOptionsMismatch) != 0)
++ {
++ PVR_LOG(("(FAIL) SGXInit: Mismatch in driver and microkernel build options; "
++ "extra options present in microkernel: (0x%lx). Please check sgx_options.h",
++ ui32BuildOptions & ui32BuildOptionsMismatch ));
++ }
++ eError = PVRSRV_ERROR_BUILD_MISMATCH;
++ goto chk_exit;
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_MESSAGE, "SGXInit: Driver and microkernel build options match. [ OK ]"));
++ }
++#endif
++
++ eError = PVRSRV_OK;
++chk_exit:
++#if defined(IGNORE_SGX_INIT_COMPATIBILITY_CHECK)
++ return PVRSRV_OK;
++#else
++ return eError;
++#endif
++}
++
++static
++PVRSRV_ERROR SGXGetMiscInfoUkernel(PVRSRV_SGXDEV_INFO *psDevInfo,
++ PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ PVRSRV_ERROR eError;
++ SGXMKIF_COMMAND sCommandData;
++ PVRSRV_SGX_MISCINFO_INFO *psSGXMiscInfoInt;
++ PVRSRV_SGX_MISCINFO_FEATURES *psSGXFeatures;
++ SGX_MISCINFO_STRUCT_SIZES *psSGXStructSizes;
++
++ PPVRSRV_KERNEL_MEM_INFO psMemInfo = psDevInfo->psKernelSGXMiscMemInfo;
++
++ if (! psMemInfo->pvLinAddrKM)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXGetMiscInfoUkernel: Invalid address."));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++ psSGXMiscInfoInt = psMemInfo->pvLinAddrKM;
++ psSGXFeatures = &psSGXMiscInfoInt->sSGXFeatures;
++ psSGXStructSizes = &psSGXMiscInfoInt->sSGXStructSizes;
++
++ psSGXMiscInfoInt->ui32MiscInfoFlags &= ~PVRSRV_USSE_MISCINFO_READY;
++
++
++ OSMemSet(psSGXFeatures, 0, sizeof(*psSGXFeatures));
++ OSMemSet(psSGXStructSizes, 0, sizeof(*psSGXStructSizes));
++
++
++ sCommandData.ui32Data[1] = psMemInfo->sDevVAddr.uiAddr;
++
++ eError = SGXScheduleCCBCommandKM(psDeviceNode,
++ SGXMKIF_CMD_GETMISCINFO,
++ &sCommandData,
++ KERNEL_ID,
++ 0);
++
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXGetMiscInfoUkernel: SGXScheduleCCBCommandKM failed."));
++ return eError;
++ }
++
++
++#if !defined(NO_HARDWARE)
++ {
++ IMG_BOOL bExit;
++
++ bExit = IMG_FALSE;
++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
++ {
++ if ((psSGXMiscInfoInt->ui32MiscInfoFlags & PVRSRV_USSE_MISCINFO_READY) != 0)
++ {
++ bExit = IMG_TRUE;
++ break;
++ }
++ } END_LOOP_UNTIL_TIMEOUT();
++
++
++ if (!bExit)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXGetMiscInfoUkernel: Timeout occurred waiting for misc info."));
++ return PVRSRV_ERROR_TIMEOUT;
++ }
++ }
++#endif
++
++ return PVRSRV_OK;
++}
++
++
++
++IMG_EXPORT
++PVRSRV_ERROR SGXGetMiscInfoKM(PVRSRV_SGXDEV_INFO *psDevInfo,
++ SGX_MISC_INFO *psMiscInfo,
++ PVRSRV_DEVICE_NODE *psDeviceNode,
++ IMG_HANDLE hDevMemContext)
++{
++ PPVRSRV_KERNEL_MEM_INFO psMemInfo = psDevInfo->psKernelSGXMiscMemInfo;
++ IMG_UINT32 *pui32MiscInfoFlags = &((PVRSRV_SGX_MISCINFO_INFO*)(psMemInfo->pvLinAddrKM))->ui32MiscInfoFlags;
++
++
++ *pui32MiscInfoFlags = 0;
++
++#if !defined(SUPPORT_SGX_EDM_MEMORY_DEBUG)
++ PVR_UNREFERENCED_PARAMETER(hDevMemContext);
++#endif
++
++ switch(psMiscInfo->eRequest)
++ {
++#if defined(SGX_FEATURE_DATA_BREAKPOINTS)
++ case SGX_MISC_INFO_REQUEST_SET_BREAKPOINT:
++ {
++ IMG_UINT32 ui32RegOffset;
++ IMG_UINT32 ui32RegVal;
++ IMG_UINT32 ui32BaseRegOffset;
++ IMG_UINT32 ui32BaseRegVal;
++ IMG_UINT32 ui32MaskRegOffset;
++ IMG_UINT32 ui32MaskRegVal;
++
++ switch(psMiscInfo->uData.sSGXBreakpointInfo.ui32BPIndex)
++ {
++ case 0:
++ ui32RegOffset = EUR_CR_BREAKPOINT0;
++ ui32BaseRegOffset = EUR_CR_BREAKPOINT0_BASE;
++ ui32MaskRegOffset = EUR_CR_BREAKPOINT0_MASK;
++ break;
++ case 1:
++ ui32RegOffset = EUR_CR_BREAKPOINT1;
++ ui32BaseRegOffset = EUR_CR_BREAKPOINT1_BASE;
++ ui32MaskRegOffset = EUR_CR_BREAKPOINT1_MASK;
++ break;
++ case 2:
++ ui32RegOffset = EUR_CR_BREAKPOINT2;
++ ui32BaseRegOffset = EUR_CR_BREAKPOINT2_BASE;
++ ui32MaskRegOffset = EUR_CR_BREAKPOINT2_MASK;
++ break;
++ case 3:
++ ui32RegOffset = EUR_CR_BREAKPOINT3;
++ ui32BaseRegOffset = EUR_CR_BREAKPOINT3_BASE;
++ ui32MaskRegOffset = EUR_CR_BREAKPOINT3_MASK;
++ break;
++ default:
++ PVR_DPF((PVR_DBG_ERROR,"SGXGetMiscInfoKM: SGX_MISC_INFO_REQUEST_SET_BREAKPOINT invalid BP idx %d", psMiscInfo->uData.sSGXBreakpointInfo.ui32BPIndex));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++
++ if(psMiscInfo->uData.sSGXBreakpointInfo.bBPEnable)
++ {
++
++ IMG_DEV_VIRTADDR sBPDevVAddr = psMiscInfo->uData.sSGXBreakpointInfo.sBPDevVAddr;
++
++
++ ui32MaskRegVal = EUR_CR_BREAKPOINT0_MASK_REGION_MASK | EUR_CR_BREAKPOINT0_MASK_DM_MASK;
++
++
++ ui32BaseRegVal = sBPDevVAddr.uiAddr & EUR_CR_BREAKPOINT0_BASE_ADDRESS_MASK;
++
++
++ ui32RegVal = EUR_CR_BREAKPOINT0_CTRL_WENABLE_MASK
++ | EUR_CR_BREAKPOINT0_CTRL_WENABLE_MASK
++ | EUR_CR_BREAKPOINT0_CTRL_TRAPENABLE_MASK;
++ }
++ else
++ {
++
++ ui32RegVal = ui32BaseRegVal = ui32MaskRegVal = 0;
++ }
++
++
++
++
++
++
++
++
++
++
++ return PVRSRV_OK;
++ }
++#endif
++
++ case SGX_MISC_INFO_REQUEST_CLOCKSPEED:
++ {
++ psMiscInfo->uData.ui32SGXClockSpeed = psDevInfo->ui32CoreClockSpeed;
++ return PVRSRV_OK;
++ }
++
++ case SGX_MISC_INFO_REQUEST_SGXREV:
++ {
++ PVRSRV_ERROR eError;
++ PVRSRV_SGX_MISCINFO_FEATURES *psSGXFeatures;
++
++ eError = SGXGetMiscInfoUkernel(psDevInfo, psDeviceNode);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "An error occurred in SGXGetMiscInfoUkernel: %d\n",
++ eError));
++ return eError;
++ }
++ psSGXFeatures = &((PVRSRV_SGX_MISCINFO_INFO*)(psMemInfo->pvLinAddrKM))->sSGXFeatures;
++
++
++ psMiscInfo->uData.sSGXFeatures = *psSGXFeatures;
++
++
++ PVR_DPF((PVR_DBG_MESSAGE, "SGXGetMiscInfoKM: Core 0x%lx, sw ID 0x%lx, sw Rev 0x%lx\n",
++ psSGXFeatures->ui32CoreRev,
++ psSGXFeatures->ui32CoreIdSW,
++ psSGXFeatures->ui32CoreRevSW));
++ PVR_DPF((PVR_DBG_MESSAGE, "SGXGetMiscInfoKM: DDK version 0x%lx, DDK build 0x%lx\n",
++ psSGXFeatures->ui32DDKVersion,
++ psSGXFeatures->ui32DDKBuild));
++
++
++ return PVRSRV_OK;
++ }
++
++ case SGX_MISC_INFO_REQUEST_DRIVER_SGXREV:
++ {
++ PVRSRV_SGX_MISCINFO_FEATURES *psSGXFeatures;
++
++ psSGXFeatures = &((PVRSRV_SGX_MISCINFO_INFO*)(psMemInfo->pvLinAddrKM))->sSGXFeatures;
++
++
++ OSMemSet(psMemInfo->pvLinAddrKM, 0,
++ sizeof(PVRSRV_SGX_MISCINFO_INFO));
++
++ psSGXFeatures->ui32DDKVersion =
++ (PVRVERSION_MAJ << 16) |
++ (PVRVERSION_MIN << 8) |
++ PVRVERSION_BRANCH;
++ psSGXFeatures->ui32DDKBuild = PVRVERSION_BUILD;
++
++
++ psSGXFeatures->ui32BuildOptions = (SGX_BUILD_OPTIONS);
++
++
++ psMiscInfo->uData.sSGXFeatures = *psSGXFeatures;
++ return PVRSRV_OK;
++ }
++
++#if defined(SUPPORT_SGX_EDM_MEMORY_DEBUG)
++ case SGX_MISC_INFO_REQUEST_MEMREAD:
++ {
++ PVRSRV_ERROR eError;
++ PPVRSRV_KERNEL_MEM_INFO psMemInfo = psDevInfo->psKernelSGXMiscMemInfo;
++ PVRSRV_SGX_MISCINFO_FEATURES *psSGXFeatures;
++ PVRSRV_SGX_MISCINFO_MEMREAD *psSGXMemReadData;
++
++ psSGXMemReadData = &((PVRSRV_SGX_MISCINFO_INFO*)(psMemInfo->pvLinAddrKM))->sSGXMemReadData;
++
++
++ *pui32MiscInfoFlags |= PVRSRV_USSE_MISCINFO_MEMREAD;
++
++
++ if(psMiscInfo->hDevMemContext != IMG_NULL)
++ {
++ SGXGetMMUPDAddrKM( (IMG_HANDLE)psDeviceNode, hDevMemContext, &psSGXMemReadData->sPDDevPAddr);
++ }
++ else
++ {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++
++ if(psMiscInfo->sDevVAddr.uiAddr != 0)
++ {
++ psSGXMemReadData->sDevVAddr = psMiscInfo->sDevVAddr;
++ }
++ else
++ {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++
++ eError = SGXGetMiscInfoUkernel(psDevInfo, psDeviceNode);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "An error occurred in SGXGetMiscInfoUkernel: %d\n",
++ eError));
++ return eError;
++ }
++ psSGXFeatures = &((PVRSRV_SGX_MISCINFO_INFO*)(psMemInfo->pvLinAddrKM))->sSGXFeatures;
++
++#if !defined SGX_FEATURE_MULTIPLE_MEM_CONTEXTS
++ if(*pui32MiscInfoFlags & PVRSRV_USSE_MISCINFO_MEMREAD_FAIL)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++#endif
++
++ psMiscInfo->uData.sSGXFeatures = *psSGXFeatures;
++ return PVRSRV_OK;
++ }
++#endif
++
++#ifdef SUPPORT_SGX_HWPERF
++ case SGX_MISC_INFO_REQUEST_SET_HWPERF_STATUS:
++ {
++ SGXMKIF_HWPERF_CB *psHWPerfCB = psDevInfo->psKernelHWPerfCBMemInfo->pvLinAddrKM;
++ IMG_UINT ui32MatchingFlags;
++
++
++ if ((psMiscInfo->uData.ui32NewHWPerfStatus & ~(PVRSRV_SGX_HWPERF_GRAPHICS_ON | PVRSRV_SGX_HWPERF_MK_EXECUTION_ON)) != 0)
++ {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++
++ ui32MatchingFlags = psMiscInfo->uData.ui32NewHWPerfStatus & psDevInfo->psSGXHostCtl->ui32HWPerfFlags;
++ if((ui32MatchingFlags & PVRSRV_SGX_HWPERF_GRAPHICS_ON) == 0UL)
++ {
++ psHWPerfCB->ui32OrdinalGRAPHICS = 0xffffffff;
++ }
++ if((ui32MatchingFlags & PVRSRV_SGX_HWPERF_MK_EXECUTION_ON) == 0UL)
++ {
++ psHWPerfCB->ui32OrdinalMK_EXECUTION = 0xffffffffUL;
++ }
++
++
++ psDevInfo->psSGXHostCtl->ui32HWPerfFlags = psMiscInfo->uData.ui32NewHWPerfStatus;
++ #if defined(PDUMP)
++ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "SGX ukernel HWPerf status %lu\n",
++ psDevInfo->psSGXHostCtl->ui32HWPerfFlags);
++ PDUMPMEM(IMG_NULL, psDevInfo->psKernelSGXHostCtlMemInfo,
++ offsetof(SGXMKIF_HOST_CTL, ui32HWPerfFlags),
++ sizeof(psDevInfo->psSGXHostCtl->ui32HWPerfFlags), PDUMP_FLAGS_CONTINUOUS,
++ MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo));
++ #endif
++
++ return PVRSRV_OK;
++ }
++ case SGX_MISC_INFO_REQUEST_HWPERF_CB_ON:
++ {
++
++ SGXMKIF_HWPERF_CB *psHWPerfCB = psDevInfo->psKernelHWPerfCBMemInfo->pvLinAddrKM;
++ psHWPerfCB->ui32OrdinalGRAPHICS = 0xffffffffUL;
++
++ psDevInfo->psSGXHostCtl->ui32HWPerfFlags |= PVRSRV_SGX_HWPERF_GRAPHICS_ON;
++ return PVRSRV_OK;
++ }
++ case SGX_MISC_INFO_REQUEST_HWPERF_CB_OFF:
++ {
++
++ psDevInfo->psSGXHostCtl->ui32HWPerfFlags = 0;
++ return PVRSRV_OK;
++ }
++ case SGX_MISC_INFO_REQUEST_HWPERF_RETRIEVE_CB:
++ {
++
++ SGX_MISC_INFO_HWPERF_RETRIEVE_CB *psRetrieve = &psMiscInfo->uData.sRetrieveCB;
++ SGXMKIF_HWPERF_CB *psHWPerfCB = psDevInfo->psKernelHWPerfCBMemInfo->pvLinAddrKM;
++ IMG_UINT i;
++
++ for (i = 0; psHWPerfCB->ui32Woff != psHWPerfCB->ui32Roff && i < psRetrieve->ui32ArraySize; i++)
++ {
++ SGXMKIF_HWPERF_CB_ENTRY *psData = &psHWPerfCB->psHWPerfCBData[psHWPerfCB->ui32Roff];
++
++
++
++ psRetrieve->psHWPerfData[i].ui32FrameNo = psData->ui32FrameNo;
++ psRetrieve->psHWPerfData[i].ui32Type = (psData->ui32Type & PVRSRV_SGX_HWPERF_TYPE_OP_MASK);
++ psRetrieve->psHWPerfData[i].ui32StartTime = psData->ui32Time;
++ psRetrieve->psHWPerfData[i].ui32StartTimeWraps = psData->ui32TimeWraps;
++ psRetrieve->psHWPerfData[i].ui32EndTime = psData->ui32Time;
++ psRetrieve->psHWPerfData[i].ui32EndTimeWraps = psData->ui32TimeWraps;
++ psRetrieve->psHWPerfData[i].ui32ClockSpeed = psDevInfo->ui32CoreClockSpeed;
++ psRetrieve->psHWPerfData[i].ui32TimeMax = psDevInfo->ui32uKernelTimerClock;
++ psHWPerfCB->ui32Roff = (psHWPerfCB->ui32Roff + 1) & (SGXMKIF_HWPERF_CB_SIZE - 1);
++ }
++ psRetrieve->ui32DataCount = i;
++ psRetrieve->ui32Time = OSClockus();
++ return PVRSRV_OK;
++ }
++#endif
++ case SGX_MISC_INFO_DUMP_DEBUG_INFO:
++ {
++ PVR_LOG(("User requested SGX debug info"));
++
++
++ SGXDumpDebugInfo(psDeviceNode, IMG_FALSE);
++
++ return PVRSRV_OK;
++ }
++
++ case SGX_MISC_INFO_PANIC:
++ {
++ PVR_LOG(("User requested SGX panic"));
++
++ SGXPanic(psDeviceNode);
++
++ return PVRSRV_OK;
++ }
++
++ default:
++ {
++
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++ }
++}
++
++#if defined(SUPPORT_SGX_HWPERF)
++IMG_EXPORT
++PVRSRV_ERROR SGXReadDiffCountersKM(IMG_HANDLE hDevHandle,
++ IMG_UINT32 ui32Reg,
++ IMG_UINT32 *pui32Old,
++ IMG_BOOL bNew,
++ IMG_UINT32 ui32New,
++ IMG_UINT32 ui32NewReset,
++ IMG_UINT32 ui32CountersReg,
++ IMG_UINT32 ui32Reg2,
++ IMG_BOOL *pbActive,
++ PVRSRV_SGXDEV_DIFF_INFO *psDiffs)
++{
++ PVRSRV_ERROR eError;
++ SYS_DATA *psSysData;
++ PVRSRV_POWER_DEV *psPowerDevice;
++ IMG_BOOL bPowered = IMG_FALSE;
++ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
++ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++
++
++ if(bNew)
++ {
++ psDevInfo->ui32HWGroupRequested = ui32New;
++ }
++ psDevInfo->ui32HWReset |= ui32NewReset;
++
++
++ eError = PVRSRVPowerLock(KERNEL_ID, IMG_FALSE);
++ if (eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++
++ SysAcquireData(&psSysData);
++
++
++ psPowerDevice = (PVRSRV_POWER_DEV*)
++ List_PVRSRV_POWER_DEV_Any_va(psSysData->psPowerDeviceList,
++ MatchPowerDeviceIndex_AnyVaCb,
++ psDeviceNode->sDevId.ui32DeviceIndex);
++
++ if (psPowerDevice)
++ {
++ bPowered = (IMG_BOOL)(psPowerDevice->eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON);
++ }
++
++
++
++ *pbActive = bPowered;
++
++
++
++ {
++ IMG_UINT32 ui32rval = 0;
++
++
++ if(bPowered)
++ {
++ IMG_UINT32 i;
++
++
++ *pui32Old = OSReadHWReg(psDevInfo->pvRegsBaseKM, ui32Reg);
++
++ for (i = 0; i < PVRSRV_SGX_DIFF_NUM_COUNTERS; ++i)
++ {
++ psDiffs->aui32Counters[i] = OSReadHWReg(psDevInfo->pvRegsBaseKM, ui32CountersReg + (i * 4));
++ }
++
++ if(ui32Reg2)
++ {
++ ui32rval = OSReadHWReg(psDevInfo->pvRegsBaseKM, ui32Reg2);
++ }
++
++
++
++ if (psDevInfo->ui32HWGroupRequested != *pui32Old)
++ {
++
++ if(psDevInfo->ui32HWReset != 0)
++ {
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32Reg, psDevInfo->ui32HWGroupRequested | psDevInfo->ui32HWReset);
++ psDevInfo->ui32HWReset = 0;
++ }
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32Reg, psDevInfo->ui32HWGroupRequested);
++ }
++ }
++
++ psDiffs->ui32Time[0] = OSClockus();
++ psDiffs->ui32Time[1] = psDevInfo->psSGXHostCtl->ui32TimeWraps;
++ psDiffs->ui32Time[2] = ui32rval;
++
++ psDiffs->ui32Marker[0] = psDevInfo->ui32KickTACounter;
++ psDiffs->ui32Marker[1] = psDevInfo->ui32KickTARenderCounter;
++ }
++
++
++ PVRSRVPowerUnlock(KERNEL_ID);
++
++ SGXTestActivePowerEvent(psDeviceNode, KERNEL_ID);
++
++ return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR SGXReadHWPerfCBKM(IMG_HANDLE hDevHandle,
++ IMG_UINT32 ui32ArraySize,
++ PVRSRV_SGX_HWPERF_CB_ENTRY *psClientHWPerfEntry,
++ IMG_UINT32 *pui32DataCount,
++ IMG_UINT32 *pui32ClockSpeed,
++ IMG_UINT32 *pui32HostTimeStamp)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
++ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++ SGXMKIF_HWPERF_CB *psHWPerfCB = psDevInfo->psKernelHWPerfCBMemInfo->pvLinAddrKM;
++ IMG_UINT i;
++
++ for (i = 0;
++ psHWPerfCB->ui32Woff != psHWPerfCB->ui32Roff && i < ui32ArraySize;
++ i++)
++ {
++ SGXMKIF_HWPERF_CB_ENTRY *psMKPerfEntry = &psHWPerfCB->psHWPerfCBData[psHWPerfCB->ui32Roff];
++
++ psClientHWPerfEntry[i].ui32FrameNo = psMKPerfEntry->ui32FrameNo;
++ psClientHWPerfEntry[i].ui32Type = psMKPerfEntry->ui32Type;
++ psClientHWPerfEntry[i].ui32Ordinal = psMKPerfEntry->ui32Ordinal;
++ psClientHWPerfEntry[i].ui32Clocksx16 = SGXConvertTimeStamp(psDevInfo,
++ psMKPerfEntry->ui32TimeWraps,
++ psMKPerfEntry->ui32Time);
++ OSMemCopy(&psClientHWPerfEntry[i].ui32Counters[0],
++ &psMKPerfEntry->ui32Counters[0],
++ sizeof(psMKPerfEntry->ui32Counters));
++
++ psHWPerfCB->ui32Roff = (psHWPerfCB->ui32Roff + 1) & (SGXMKIF_HWPERF_CB_SIZE - 1);
++ }
++
++ *pui32DataCount = i;
++ *pui32ClockSpeed = psDevInfo->ui32CoreClockSpeed;
++ *pui32HostTimeStamp = OSClockus();
++
++ return eError;
++}
++#else
++#endif
++
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/devices/sgx/sgxkick.c
+@@ -0,0 +1,744 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <stddef.h>
++#include "services_headers.h"
++#include "sgxinfo.h"
++#include "sgxinfokm.h"
++#if defined (PDUMP)
++#include "sgxapi_km.h"
++#include "pdump_km.h"
++#endif
++#include "sgx_bridge_km.h"
++#include "osfunc.h"
++#include "pvr_debug.h"
++#include "sgxutils.h"
++
++IMG_EXPORT
++PVRSRV_ERROR SGXDoKickKM(IMG_HANDLE hDevHandle, SGX_CCB_KICK *psCCBKick)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_KERNEL_SYNC_INFO *psSyncInfo;
++ PVRSRV_KERNEL_MEM_INFO *psCCBMemInfo = (PVRSRV_KERNEL_MEM_INFO *) psCCBKick->hCCBKernelMemInfo;
++ SGXMKIF_CMDTA_SHARED *psTACmd;
++ IMG_UINT32 i;
++#if defined(SUPPORT_SGX_HWPERF)
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ PVRSRV_SGXDEV_INFO *psDevInfo;
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevHandle;
++ psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
++#endif
++
++#if defined(SUPPORT_SGX_HWPERF)
++ if (psCCBKick->bKickRender)
++ {
++ ++psDevInfo->ui32KickTARenderCounter;
++ }
++ ++psDevInfo->ui32KickTACounter;
++#endif
++
++ if (!CCB_OFFSET_IS_VALID(SGXMKIF_CMDTA_SHARED, psCCBMemInfo, psCCBKick, ui32CCBOffset))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXDoKickKM: Invalid CCB offset"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++
++ psTACmd = CCB_DATA_FROM_OFFSET(SGXMKIF_CMDTA_SHARED, psCCBMemInfo, psCCBKick, ui32CCBOffset);
++
++
++ if (psCCBKick->hTA3DSyncInfo)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hTA3DSyncInfo;
++ psTACmd->sTA3DDependency.sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++
++ psTACmd->sTA3DDependency.ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
++
++ if (psCCBKick->bTADependency)
++ {
++ psSyncInfo->psSyncData->ui32WriteOpsPending++;
++ }
++ }
++
++ if (psCCBKick->hTASyncInfo != IMG_NULL)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hTASyncInfo;
++
++ psTACmd->sTATQSyncReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++ psTACmd->sTATQSyncWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++
++ psTACmd->ui32TATQSyncReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending++;
++ psTACmd->ui32TATQSyncWriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
++ }
++
++ if (psCCBKick->h3DSyncInfo != IMG_NULL)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->h3DSyncInfo;
++
++ psTACmd->s3DTQSyncReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++ psTACmd->s3DTQSyncWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++
++ psTACmd->ui323DTQSyncReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending++;
++ psTACmd->ui323DTQSyncWriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
++ }
++
++ psTACmd->ui32NumTAStatusVals = psCCBKick->ui32NumTAStatusVals;
++ if (psCCBKick->ui32NumTAStatusVals != 0)
++ {
++
++ for (i = 0; i < psCCBKick->ui32NumTAStatusVals; i++)
++ {
++#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
++ psTACmd->sCtlTAStatusInfo[i] = psCCBKick->asTAStatusUpdate[i].sCtlStatus;
++#else
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ahTAStatusSyncInfo[i];
++ psTACmd->sCtlTAStatusInfo[i].sStatusDevAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++ psTACmd->sCtlTAStatusInfo[i].ui32StatusValue = psSyncInfo->psSyncData->ui32ReadOpsPending;
++#endif
++ }
++ }
++
++ psTACmd->ui32Num3DStatusVals = psCCBKick->ui32Num3DStatusVals;
++ if (psCCBKick->ui32Num3DStatusVals != 0)
++ {
++
++ for (i = 0; i < psCCBKick->ui32Num3DStatusVals; i++)
++ {
++#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
++ psTACmd->sCtl3DStatusInfo[i] = psCCBKick->as3DStatusUpdate[i].sCtlStatus;
++#else
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ah3DStatusSyncInfo[i];
++ psTACmd->sCtl3DStatusInfo[i].sStatusDevAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++ psTACmd->sCtl3DStatusInfo[i].ui32StatusValue = psSyncInfo->psSyncData->ui32ReadOpsPending;
++#endif
++ }
++ }
++
++
++#if defined(SUPPORT_SGX_GENERALISED_SYNCOBJECTS)
++
++ psTACmd->ui32NumTASrcSyncs = psCCBKick->ui32NumTASrcSyncs;
++ for (i=0; i<psCCBKick->ui32NumTASrcSyncs; i++)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahTASrcKernelSyncInfo[i];
++
++ psTACmd->asTASrcSyncs[i].sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++ psTACmd->asTASrcSyncs[i].sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++
++
++ psTACmd->asTASrcSyncs[i].ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending++;
++
++ psTACmd->asTASrcSyncs[i].ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
++ }
++
++ psTACmd->ui32NumTADstSyncs = psCCBKick->ui32NumTADstSyncs;
++ for (i=0; i<psCCBKick->ui32NumTADstSyncs; i++)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahTADstKernelSyncInfo[i];
++
++ psTACmd->asTADstSyncs[i].sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++ psTACmd->asTADstSyncs[i].sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++
++
++ psTACmd->asTADstSyncs[i].ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
++
++ psTACmd->asTADstSyncs[i].ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending++;
++ }
++
++ psTACmd->ui32Num3DSrcSyncs = psCCBKick->ui32Num3DSrcSyncs;
++ for (i=0; i<psCCBKick->ui32Num3DSrcSyncs; i++)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ah3DSrcKernelSyncInfo[i];
++
++ psTACmd->as3DSrcSyncs[i].sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++ psTACmd->as3DSrcSyncs[i].sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++
++
++ psTACmd->as3DSrcSyncs[i].ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending++;
++
++ psTACmd->as3DSrcSyncs[i].ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
++ }
++#else
++
++ psTACmd->ui32NumSrcSyncs = psCCBKick->ui32NumSrcSyncs;
++ for (i=0; i<psCCBKick->ui32NumSrcSyncs; i++)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahSrcKernelSyncInfo[i];
++
++ psTACmd->asSrcSyncs[i].sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++ psTACmd->asSrcSyncs[i].sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++
++
++ psTACmd->asSrcSyncs[i].ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending++;
++
++ psTACmd->asSrcSyncs[i].ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
++ }
++#endif
++
++ if (psCCBKick->bFirstKickOrResume && psCCBKick->ui32NumDstSyncObjects > 0)
++ {
++ PVRSRV_KERNEL_MEM_INFO *psHWDstSyncListMemInfo =
++ (PVRSRV_KERNEL_MEM_INFO *)psCCBKick->hKernelHWSyncListMemInfo;
++ SGXMKIF_HWDEVICE_SYNC_LIST *psHWDeviceSyncList = psHWDstSyncListMemInfo->pvLinAddrKM;
++ IMG_UINT32 ui32NumDstSyncs = psCCBKick->ui32NumDstSyncObjects;
++
++ PVR_ASSERT(((PVRSRV_KERNEL_MEM_INFO *)psCCBKick->hKernelHWSyncListMemInfo)->ui32AllocSize >= (sizeof(SGXMKIF_HWDEVICE_SYNC_LIST) +
++ (sizeof(PVRSRV_DEVICE_SYNC_OBJECT) * ui32NumDstSyncs)));
++
++ psHWDeviceSyncList->ui32NumSyncObjects = ui32NumDstSyncs;
++#if defined(PDUMP)
++ if (PDumpIsCaptureFrameKM())
++ {
++ PDUMPCOMMENT("HWDeviceSyncList for TACmd\r\n");
++ PDUMPMEM(IMG_NULL,
++ psHWDstSyncListMemInfo,
++ 0,
++ sizeof(SGXMKIF_HWDEVICE_SYNC_LIST),
++ 0,
++ MAKEUNIQUETAG(psHWDstSyncListMemInfo));
++ }
++#endif
++
++ for (i=0; i<ui32NumDstSyncs; i++)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->pahDstSyncHandles[i];
++
++ if (psSyncInfo)
++ {
++ psHWDeviceSyncList->asSyncData[i].sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++ psHWDeviceSyncList->asSyncData[i].sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++
++ psHWDeviceSyncList->asSyncData[i].ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
++ psHWDeviceSyncList->asSyncData[i].ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending++;
++
++ #if defined(PDUMP)
++ if (PDumpIsCaptureFrameKM())
++ {
++ IMG_UINT32 ui32ModifiedValue;
++ IMG_UINT32 ui32SyncOffset = offsetof(SGXMKIF_HWDEVICE_SYNC_LIST, asSyncData)
++ + (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT));
++ IMG_UINT32 ui32WOpsOffset = ui32SyncOffset
++ + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32WriteOpsPendingVal);
++ IMG_UINT32 ui32ROpsOffset = ui32SyncOffset
++ + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32ReadOpsPendingVal);
++
++ PDUMPCOMMENT("HWDeviceSyncObject for RT: %i\r\n", i);
++
++ PDUMPMEM(IMG_NULL,
++ psHWDstSyncListMemInfo,
++ ui32SyncOffset,
++ sizeof(PVRSRV_DEVICE_SYNC_OBJECT),
++ 0,
++ MAKEUNIQUETAG(psHWDstSyncListMemInfo));
++
++ if ((psSyncInfo->psSyncData->ui32LastOpDumpVal == 0) &&
++ (psSyncInfo->psSyncData->ui32LastReadOpDumpVal == 0))
++ {
++
++ PDUMPCOMMENT("Init RT ROpsComplete\r\n", i);
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
++ psSyncInfo->psSyncDataMemInfoKM,
++ offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete),
++ sizeof(psSyncInfo->psSyncData->ui32ReadOpsComplete),
++ 0,
++ MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
++
++ PDUMPCOMMENT("Init RT WOpsComplete\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++ psSyncInfo->psSyncDataMemInfoKM,
++ offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete),
++ sizeof(psSyncInfo->psSyncData->ui32WriteOpsComplete),
++ 0,
++ MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
++ }
++
++ psSyncInfo->psSyncData->ui32LastOpDumpVal++;
++
++ ui32ModifiedValue = psSyncInfo->psSyncData->ui32LastOpDumpVal - 1;
++
++ PDUMPCOMMENT("Modify RT %d WOpPendingVal in HWDevSyncList\r\n", i);
++
++ PDUMPMEM(&ui32ModifiedValue,
++ psHWDstSyncListMemInfo,
++ ui32WOpsOffset,
++ sizeof(IMG_UINT32),
++ 0,
++ MAKEUNIQUETAG(psHWDstSyncListMemInfo));
++
++ ui32ModifiedValue = 0;
++ PDUMPCOMMENT("Modify RT %d ROpsPendingVal in HWDevSyncList\r\n", i);
++
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
++ psHWDstSyncListMemInfo,
++ ui32ROpsOffset,
++ sizeof(IMG_UINT32),
++ 0,
++ MAKEUNIQUETAG(psHWDstSyncListMemInfo));
++ }
++ #endif
++ }
++ else
++ {
++ psHWDeviceSyncList->asSyncData[i].sWriteOpsCompleteDevVAddr.uiAddr = 0;
++ psHWDeviceSyncList->asSyncData[i].sReadOpsCompleteDevVAddr.uiAddr = 0;
++
++ psHWDeviceSyncList->asSyncData[i].ui32ReadOpsPendingVal = 0;
++ psHWDeviceSyncList->asSyncData[i].ui32WriteOpsPendingVal = 0;
++ }
++ }
++ }
++
++
++
++
++ psTACmd->ui32CtrlFlags |= SGXMKIF_CMDTA_CTRLFLAGS_READY;
++
++#if defined(PDUMP)
++ if (PDumpIsCaptureFrameKM())
++ {
++ PDUMPCOMMENT("Shared part of TA command\r\n");
++
++ PDUMPMEM(psTACmd,
++ psCCBMemInfo,
++ psCCBKick->ui32CCBDumpWOff,
++ sizeof(SGXMKIF_CMDTA_SHARED),
++ 0,
++ MAKEUNIQUETAG(psCCBMemInfo));
++
++#if defined(SUPPORT_SGX_GENERALISED_SYNCOBJECTS)
++ for (i=0; i<psCCBKick->ui32NumTASrcSyncs; i++)
++ {
++ IMG_UINT32 ui32ModifiedValue;
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahTASrcKernelSyncInfo[i];
++
++ if ((psSyncInfo->psSyncData->ui32LastOpDumpVal == 0) &&
++ (psSyncInfo->psSyncData->ui32LastReadOpDumpVal == 0))
++ {
++
++ PDUMPCOMMENT("Init RT TA-SRC ROpsComplete\r\n", i);
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
++ psSyncInfo->psSyncDataMemInfoKM,
++ offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete),
++ sizeof(psSyncInfo->psSyncData->ui32ReadOpsComplete),
++ 0,
++ MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
++
++ PDUMPCOMMENT("Init RT TA-SRC WOpsComplete\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++ psSyncInfo->psSyncDataMemInfoKM,
++ offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete),
++ sizeof(psSyncInfo->psSyncData->ui32WriteOpsComplete),
++ 0,
++ MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
++ }
++
++ psSyncInfo->psSyncData->ui32LastReadOpDumpVal++;
++
++ ui32ModifiedValue = psSyncInfo->psSyncData->ui32LastReadOpDumpVal - 1;
++
++ PDUMPCOMMENT("Modify TA SrcSync %d ROpsPendingVal\r\n", i);
++
++ PDUMPMEM(&ui32ModifiedValue,
++ psCCBMemInfo,
++ psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, asTASrcSyncs) +
++ (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32ReadOpsPendingVal),
++ sizeof(IMG_UINT32),
++ 0,
++ MAKEUNIQUETAG(psCCBMemInfo));
++
++ PDUMPCOMMENT("Modify TA SrcSync %d WOpPendingVal\r\n", i);
++
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++ psCCBMemInfo,
++ psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, asTASrcSyncs) +
++ (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32WriteOpsPendingVal),
++ sizeof(IMG_UINT32),
++ 0,
++ MAKEUNIQUETAG(psCCBMemInfo));
++ }
++
++ for (i=0; i<psCCBKick->ui32NumTADstSyncs; i++)
++ {
++ IMG_UINT32 ui32ModifiedValue;
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahTADstKernelSyncInfo[i];
++
++ if ((psSyncInfo->psSyncData->ui32LastOpDumpVal == 0) &&
++ (psSyncInfo->psSyncData->ui32LastReadOpDumpVal == 0))
++ {
++
++ PDUMPCOMMENT("Init RT TA-DST ROpsComplete\r\n", i);
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
++ psSyncInfo->psSyncDataMemInfoKM,
++ offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete),
++ sizeof(psSyncInfo->psSyncData->ui32ReadOpsComplete),
++ 0,
++ MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
++
++ PDUMPCOMMENT("Init RT TA-DST WOpsComplete\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++ psSyncInfo->psSyncDataMemInfoKM,
++ offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete),
++ sizeof(psSyncInfo->psSyncData->ui32WriteOpsComplete),
++ 0,
++ MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
++ }
++
++ psSyncInfo->psSyncData->ui32LastOpDumpVal++;
++
++ ui32ModifiedValue = psSyncInfo->psSyncData->ui32LastOpDumpVal - 1;
++
++ PDUMPCOMMENT("Modify TA DstSync %d WOpPendingVal\r\n", i);
++
++ PDUMPMEM(&ui32ModifiedValue,
++ psCCBMemInfo,
++ psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, asTADstSyncs) +
++ (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32WriteOpsPendingVal),
++ sizeof(IMG_UINT32),
++ 0,
++ MAKEUNIQUETAG(psCCBMemInfo));
++
++ PDUMPCOMMENT("Modify TA DstSync %d ROpsPendingVal\r\n", i);
++
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
++ psCCBMemInfo,
++ psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, asTADstSyncs) +
++ (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32ReadOpsPendingVal),
++ sizeof(IMG_UINT32),
++ 0,
++ MAKEUNIQUETAG(psCCBMemInfo));
++ }
++
++ for (i=0; i<psCCBKick->ui32Num3DSrcSyncs; i++)
++ {
++ IMG_UINT32 ui32ModifiedValue;
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ah3DSrcKernelSyncInfo[i];
++
++ if ((psSyncInfo->psSyncData->ui32LastOpDumpVal == 0) &&
++ (psSyncInfo->psSyncData->ui32LastReadOpDumpVal == 0))
++ {
++
++ PDUMPCOMMENT("Init RT 3D-SRC ROpsComplete\r\n", i);
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
++ psSyncInfo->psSyncDataMemInfoKM,
++ offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete),
++ sizeof(psSyncInfo->psSyncData->ui32ReadOpsComplete),
++ 0,
++ MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
++
++ PDUMPCOMMENT("Init RT 3D-SRC WOpsComplete\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++ psSyncInfo->psSyncDataMemInfoKM,
++ offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete),
++ sizeof(psSyncInfo->psSyncData->ui32WriteOpsComplete),
++ 0,
++ MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
++ }
++
++ psSyncInfo->psSyncData->ui32LastReadOpDumpVal++;
++
++ ui32ModifiedValue = psSyncInfo->psSyncData->ui32LastReadOpDumpVal - 1;
++
++ PDUMPCOMMENT("Modify 3D SrcSync %d ROpsPendingVal\r\n", i);
++
++ PDUMPMEM(&ui32ModifiedValue,
++ psCCBMemInfo,
++ psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, as3DSrcSyncs) +
++ (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32ReadOpsPendingVal),
++ sizeof(IMG_UINT32),
++ 0,
++ MAKEUNIQUETAG(psCCBMemInfo));
++
++ PDUMPCOMMENT("Modify 3D SrcSync %d WOpPendingVal\r\n", i);
++
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++ psCCBMemInfo,
++ psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, as3DSrcSyncs) +
++ (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32WriteOpsPendingVal),
++ sizeof(IMG_UINT32),
++ 0,
++ MAKEUNIQUETAG(psCCBMemInfo));
++ }
++#else
++ for (i=0; i<psCCBKick->ui32NumSrcSyncs; i++)
++ {
++ IMG_UINT32 ui32ModifiedValue;
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahSrcKernelSyncInfo[i];
++
++ if ((psSyncInfo->psSyncData->ui32LastOpDumpVal == 0) &&
++ (psSyncInfo->psSyncData->ui32LastReadOpDumpVal == 0))
++ {
++
++ PDUMPCOMMENT("Init RT ROpsComplete\r\n", i);
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
++ psSyncInfo->psSyncDataMemInfoKM,
++ offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete),
++ sizeof(psSyncInfo->psSyncData->ui32ReadOpsComplete),
++ 0,
++ MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
++
++ PDUMPCOMMENT("Init RT WOpsComplete\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++ psSyncInfo->psSyncDataMemInfoKM,
++ offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete),
++ sizeof(psSyncInfo->psSyncData->ui32WriteOpsComplete),
++ 0,
++ MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
++ }
++
++ psSyncInfo->psSyncData->ui32LastReadOpDumpVal++;
++
++ ui32ModifiedValue = psSyncInfo->psSyncData->ui32LastReadOpDumpVal - 1;
++
++ PDUMPCOMMENT("Modify SrcSync %d ROpsPendingVal\r\n", i);
++
++ PDUMPMEM(&ui32ModifiedValue,
++ psCCBMemInfo,
++ psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, asSrcSyncs) +
++ (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32ReadOpsPendingVal),
++ sizeof(IMG_UINT32),
++ 0,
++ MAKEUNIQUETAG(psCCBMemInfo));
++
++ PDUMPCOMMENT("Modify SrcSync %d WOpPendingVal\r\n", i);
++
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++ psCCBMemInfo,
++ psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, asSrcSyncs) +
++ (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32WriteOpsPendingVal),
++ sizeof(IMG_UINT32),
++ 0,
++ MAKEUNIQUETAG(psCCBMemInfo));
++ }
++#endif
++
++ for (i = 0; i < psCCBKick->ui32NumTAStatusVals; i++)
++ {
++#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
++ PDUMPCOMMENT("Modify TA status value in TA cmd\r\n");
++ PDUMPMEM(&psCCBKick->asTAStatusUpdate[i].ui32LastStatusUpdateDumpVal,
++ psCCBMemInfo,
++ psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, sCtlTAStatusInfo[i].ui32StatusValue),
++ sizeof(IMG_UINT32),
++ 0,
++ MAKEUNIQUETAG(psCCBMemInfo));
++#else
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ahTAStatusSyncInfo[i];
++ PDUMPCOMMENT("Modify TA status value in TA cmd\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++ psCCBMemInfo,
++ psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, sCtlTAStatusInfo[i].ui32StatusValue),
++ sizeof(IMG_UINT32),
++ 0,
++ MAKEUNIQUETAG(psCCBMemInfo));
++#endif
++ }
++
++ for (i = 0; i < psCCBKick->ui32Num3DStatusVals; i++)
++ {
++#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
++ PDUMPCOMMENT("Modify 3D status value in TA cmd\r\n");
++ PDUMPMEM(&psCCBKick->as3DStatusUpdate[i].ui32LastStatusUpdateDumpVal,
++ psCCBMemInfo,
++ psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, sCtl3DStatusInfo[i].ui32StatusValue),
++ sizeof(IMG_UINT32),
++ 0,
++ MAKEUNIQUETAG(psCCBMemInfo));
++#else
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ah3DStatusSyncInfo[i];
++ PDUMPCOMMENT("Modify 3D status value in TA cmd\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++ psCCBMemInfo,
++ psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, sCtl3DStatusInfo[i].ui32StatusValue),
++ sizeof(IMG_UINT32),
++ 0,
++ MAKEUNIQUETAG(psCCBMemInfo));
++#endif
++ }
++ }
++#endif
++
++ eError = SGXScheduleCCBCommandKM(hDevHandle, SGXMKIF_CMD_TA, &psCCBKick->sCommand, KERNEL_ID, 0);
++ if (eError == PVRSRV_ERROR_RETRY)
++ {
++ if (psCCBKick->bFirstKickOrResume && psCCBKick->ui32NumDstSyncObjects > 0)
++ {
++ for (i=0; i < psCCBKick->ui32NumDstSyncObjects; i++)
++ {
++
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->pahDstSyncHandles[i];
++
++ if (psSyncInfo)
++ {
++ psSyncInfo->psSyncData->ui32WriteOpsPending--;
++#if defined(PDUMP)
++ if (PDumpIsCaptureFrameKM())
++ {
++ psSyncInfo->psSyncData->ui32LastOpDumpVal--;
++ }
++#endif
++ }
++ }
++ }
++
++#if defined(SUPPORT_SGX_GENERALISED_SYNCOBJECTS)
++ for (i=0; i<psCCBKick->ui32NumTASrcSyncs; i++)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahTASrcKernelSyncInfo[i];
++ psSyncInfo->psSyncData->ui32ReadOpsPending--;
++ }
++ for (i=0; i<psCCBKick->ui32NumTADstSyncs; i++)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahTADstKernelSyncInfo[i];
++ psSyncInfo->psSyncData->ui32WriteOpsPending--;
++ }
++ for (i=0; i<psCCBKick->ui32Num3DSrcSyncs; i++)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ah3DSrcKernelSyncInfo[i];
++ psSyncInfo->psSyncData->ui32ReadOpsPending--;
++ }
++#else
++ for (i=0; i<psCCBKick->ui32NumSrcSyncs; i++)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahSrcKernelSyncInfo[i];
++ psSyncInfo->psSyncData->ui32ReadOpsPending--;
++ }
++#endif
++
++ return eError;
++ }
++ else if (PVRSRV_OK != eError)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXDoKickKM: SGXScheduleCCBCommandKM failed."));
++ return eError;
++ }
++
++
++#if defined(NO_HARDWARE)
++
++
++
++ if (psCCBKick->hTA3DSyncInfo)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hTA3DSyncInfo;
++
++ if (psCCBKick->bTADependency)
++ {
++ psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending;
++ }
++ }
++
++ if (psCCBKick->hTASyncInfo != IMG_NULL)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hTASyncInfo;
++
++ psSyncInfo->psSyncData->ui32ReadOpsComplete = psSyncInfo->psSyncData->ui32ReadOpsPending;
++ }
++
++ if (psCCBKick->h3DSyncInfo != IMG_NULL)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->h3DSyncInfo;
++
++ psSyncInfo->psSyncData->ui32ReadOpsComplete = psSyncInfo->psSyncData->ui32ReadOpsPending;
++ }
++
++
++ for (i = 0; i < psCCBKick->ui32NumTAStatusVals; i++)
++ {
++#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = (PVRSRV_KERNEL_MEM_INFO*)psCCBKick->asTAStatusUpdate[i].hKernelMemInfo;
++
++ *(IMG_UINT32*)((IMG_UINTPTR_T)psKernelMemInfo->pvLinAddrKM
++ + (psTACmd->sCtlTAStatusInfo[i].sStatusDevAddr.uiAddr
++ - psKernelMemInfo->sDevVAddr.uiAddr)) = psTACmd->sCtlTAStatusInfo[i].ui32StatusValue;
++#else
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ahTAStatusSyncInfo[i];
++ psSyncInfo->psSyncData->ui32ReadOpsComplete = psTACmd->sCtlTAStatusInfo[i].ui32StatusValue;
++#endif
++ }
++
++#if defined(SUPPORT_SGX_GENERALISED_SYNCOBJECTS)
++
++ for (i=0; i<psCCBKick->ui32NumTASrcSyncs; i++)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahTASrcKernelSyncInfo[i];
++ psSyncInfo->psSyncData->ui32ReadOpsComplete = psSyncInfo->psSyncData->ui32ReadOpsPending;
++ }
++ for (i=0; i<psCCBKick->ui32NumTADstSyncs; i++)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahTADstKernelSyncInfo[i];
++ psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending;
++ }
++ for (i=0; i<psCCBKick->ui32Num3DSrcSyncs; i++)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ah3DSrcKernelSyncInfo[i];
++ psSyncInfo->psSyncData->ui32ReadOpsComplete = psSyncInfo->psSyncData->ui32ReadOpsPending;
++ }
++#else
++
++ for (i=0; i<psCCBKick->ui32NumSrcSyncs; i++)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahSrcKernelSyncInfo[i];
++ psSyncInfo->psSyncData->ui32ReadOpsComplete = psSyncInfo->psSyncData->ui32ReadOpsPending;
++ }
++#endif
++
++ if (psCCBKick->bTerminateOrAbort)
++ {
++ if (psCCBKick->ui32NumDstSyncObjects > 0)
++ {
++ PVRSRV_KERNEL_MEM_INFO *psHWDstSyncListMemInfo =
++ (PVRSRV_KERNEL_MEM_INFO *)psCCBKick->hKernelHWSyncListMemInfo;
++ SGXMKIF_HWDEVICE_SYNC_LIST *psHWDeviceSyncList = psHWDstSyncListMemInfo->pvLinAddrKM;
++
++ for (i=0; i<psCCBKick->ui32NumDstSyncObjects; i++)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->pahDstSyncHandles[i];
++ if (psSyncInfo)
++ psSyncInfo->psSyncData->ui32WriteOpsComplete = psHWDeviceSyncList->asSyncData[i].ui32WriteOpsPendingVal+1;
++ }
++ }
++
++
++ for (i = 0; i < psCCBKick->ui32Num3DStatusVals; i++)
++ {
++#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = (PVRSRV_KERNEL_MEM_INFO*)psCCBKick->as3DStatusUpdate[i].hKernelMemInfo;
++
++ *(IMG_UINT32*)((IMG_UINTPTR_T)psKernelMemInfo->pvLinAddrKM
++ + (psTACmd->sCtl3DStatusInfo[i].sStatusDevAddr.uiAddr
++ - psKernelMemInfo->sDevVAddr.uiAddr)) = psTACmd->sCtl3DStatusInfo[i].ui32StatusValue;
++#else
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ah3DStatusSyncInfo[i];
++ psSyncInfo->psSyncData->ui32ReadOpsComplete = psTACmd->sCtl3DStatusInfo[i].ui32StatusValue;
++#endif
++ }
++ }
++#endif
++
++ return eError;
++}
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/devices/sgx/sgxpower.c
+@@ -0,0 +1,453 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <stddef.h>
++
++#include "sgxdefs.h"
++#include "services_headers.h"
++#include "sgxapi_km.h"
++#include "sgx_mkif_km.h"
++#include "sgxutils.h"
++#include "pdump_km.h"
++
++
++#if defined(SUPPORT_HW_RECOVERY)
++static PVRSRV_ERROR SGXAddTimer(PVRSRV_DEVICE_NODE *psDeviceNode,
++ SGX_TIMING_INFORMATION *psSGXTimingInfo,
++ IMG_HANDLE *phTimer)
++{
++ *phTimer = OSAddTimer(SGXOSTimer, psDeviceNode,
++ 1000 * 50 / psSGXTimingInfo->ui32uKernelFreq);
++ if(*phTimer == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXAddTimer : Failed to register timer callback function"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ return PVRSRV_OK;
++}
++#endif
++
++static PVRSRV_ERROR SGXUpdateTimingInfo(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++#if defined(SGX_DYNAMIC_TIMING_INFO)
++ SGX_TIMING_INFORMATION sSGXTimingInfo = {0};
++#else
++ SGX_DEVICE_MAP *psSGXDeviceMap;
++#endif
++ IMG_UINT32 ui32ActivePowManSampleRate;
++ SGX_TIMING_INFORMATION *psSGXTimingInfo;
++
++
++#if defined(SGX_DYNAMIC_TIMING_INFO)
++ psSGXTimingInfo = &sSGXTimingInfo;
++ SysGetSGXTimingInformation(psSGXTimingInfo);
++#else
++ SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE_SGX,
++ (IMG_VOID**)&psSGXDeviceMap);
++ psSGXTimingInfo = &psSGXDeviceMap->sTimingInfo;
++#endif
++
++#if defined(SUPPORT_HW_RECOVERY)
++ {
++ PVRSRV_ERROR eError;
++ IMG_UINT32 ui32OlduKernelFreq;
++
++ if (psDevInfo->hTimer != IMG_NULL)
++ {
++ ui32OlduKernelFreq = psDevInfo->ui32CoreClockSpeed / psDevInfo->ui32uKernelTimerClock;
++ if (ui32OlduKernelFreq != psSGXTimingInfo->ui32uKernelFreq)
++ {
++ IMG_HANDLE hNewTimer;
++
++ eError = SGXAddTimer(psDeviceNode, psSGXTimingInfo, &hNewTimer);
++ if (eError == PVRSRV_OK)
++ {
++ eError = OSRemoveTimer(psDevInfo->hTimer);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXUpdateTimingInfo: Failed to remove timer"));
++ }
++ psDevInfo->hTimer = hNewTimer;
++ }
++ else
++ {
++
++ }
++ }
++ }
++ else
++ {
++ eError = SGXAddTimer(psDeviceNode, psSGXTimingInfo, &psDevInfo->hTimer);
++ if (eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++ }
++
++ psDevInfo->psSGXHostCtl->ui32HWRecoverySampleRate =
++ psSGXTimingInfo->ui32uKernelFreq / psSGXTimingInfo->ui32HWRecoveryFreq;
++ }
++#endif
++
++
++ psDevInfo->ui32CoreClockSpeed = psSGXTimingInfo->ui32CoreClockSpeed;
++ psDevInfo->ui32uKernelTimerClock = psSGXTimingInfo->ui32CoreClockSpeed / psSGXTimingInfo->ui32uKernelFreq;
++
++
++ psDevInfo->psSGXHostCtl->ui32uKernelTimerClock = psDevInfo->ui32uKernelTimerClock;
++#if defined(PDUMP)
++ PDUMPCOMMENT("Host Control - Microkernel clock");
++ PDUMPMEM(IMG_NULL, psDevInfo->psKernelSGXHostCtlMemInfo,
++ offsetof(SGXMKIF_HOST_CTL, ui32uKernelTimerClock),
++ sizeof(IMG_UINT32), PDUMP_FLAGS_CONTINUOUS,
++ MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo));
++#endif
++
++ if (psSGXTimingInfo->bEnableActivePM)
++ {
++ ui32ActivePowManSampleRate =
++ psSGXTimingInfo->ui32uKernelFreq * psSGXTimingInfo->ui32ActivePowManLatencyms / 1000;
++
++
++
++
++
++
++
++
++ ui32ActivePowManSampleRate += 1;
++ }
++ else
++ {
++ ui32ActivePowManSampleRate = 0;
++ }
++
++ psDevInfo->psSGXHostCtl->ui32ActivePowManSampleRate = ui32ActivePowManSampleRate;
++#if defined(PDUMP)
++ PDUMPMEM(IMG_NULL, psDevInfo->psKernelSGXHostCtlMemInfo,
++ offsetof(SGXMKIF_HOST_CTL, ui32ActivePowManSampleRate),
++ sizeof(IMG_UINT32), PDUMP_FLAGS_CONTINUOUS,
++ MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo));
++#endif
++
++ return PVRSRV_OK;
++}
++
++
++static IMG_VOID SGXStartTimer(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++ #if defined(SUPPORT_HW_RECOVERY)
++ PVRSRV_ERROR eError;
++
++ eError = OSEnableTimer(psDevInfo->hTimer);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXStartTimer : Failed to enable host timer"));
++ }
++ #else
++ PVR_UNREFERENCED_PARAMETER(psDevInfo);
++ #endif
++}
++
++
++static IMG_VOID SGXPollForClockGating (PVRSRV_SGXDEV_INFO *psDevInfo,
++ IMG_UINT32 ui32Register,
++ IMG_UINT32 ui32RegisterValue,
++ IMG_CHAR *pszComment)
++{
++ PVR_UNREFERENCED_PARAMETER(psDevInfo);
++ PVR_UNREFERENCED_PARAMETER(ui32Register);
++ PVR_UNREFERENCED_PARAMETER(ui32RegisterValue);
++ PVR_UNREFERENCED_PARAMETER(pszComment);
++
++ #if !defined(NO_HARDWARE)
++ PVR_ASSERT(psDevInfo != IMG_NULL);
++
++
++ if (PollForValueKM((IMG_UINT32 *)psDevInfo->pvRegsBaseKM + (ui32Register >> 2),
++ 0,
++ ui32RegisterValue,
++ MAX_HW_TIME_US/WAIT_TRY_COUNT,
++ WAIT_TRY_COUNT) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXPrePowerState: %s failed.", pszComment));
++ }
++ #endif
++
++ PDUMPCOMMENT(pszComment);
++ PDUMPREGPOL(ui32Register, 0, ui32RegisterValue);
++}
++
++
++PVRSRV_ERROR SGXPrePowerState (IMG_HANDLE hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ if ((eNewPowerState != eCurrentPowerState) &&
++ (eNewPowerState != PVRSRV_DEV_POWER_STATE_ON))
++ {
++ PVRSRV_ERROR eError;
++ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
++ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++ IMG_UINT32 ui32PowerCmd, ui32CompleteStatus;
++ SGXMKIF_COMMAND sCommand = {0};
++ IMG_UINT32 ui32Core;
++
++ #if defined(SUPPORT_HW_RECOVERY)
++
++ eError = OSDisableTimer(psDevInfo->hTimer);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXPrePowerState: Failed to disable timer"));
++ return eError;
++ }
++ #endif
++
++ if (eNewPowerState == PVRSRV_DEV_POWER_STATE_OFF)
++ {
++
++ ui32PowerCmd = PVRSRV_POWERCMD_POWEROFF;
++ ui32CompleteStatus = PVRSRV_USSE_EDM_POWMAN_POWEROFF_COMPLETE;
++ PDUMPCOMMENT("SGX power off request");
++ }
++ else
++ {
++
++ ui32PowerCmd = PVRSRV_POWERCMD_IDLE;
++ ui32CompleteStatus = PVRSRV_USSE_EDM_POWMAN_IDLE_COMPLETE;
++ PDUMPCOMMENT("SGX idle request");
++ }
++
++ sCommand.ui32Data[1] = ui32PowerCmd;
++
++ eError = SGXScheduleCCBCommand(psDevInfo, SGXMKIF_CMD_POWER, &sCommand, KERNEL_ID, 0);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXPrePowerState: Failed to submit power down command"));
++ return eError;
++ }
++
++
++ #if !defined(NO_HARDWARE)
++ if (PollForValueKM(&psDevInfo->psSGXHostCtl->ui32PowerStatus,
++ ui32CompleteStatus,
++ ui32CompleteStatus,
++ MAX_HW_TIME_US/WAIT_TRY_COUNT,
++ WAIT_TRY_COUNT) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXPrePowerState: Wait for SGX ukernel power transition failed."));
++ PVR_DBG_BREAK;
++ }
++ #endif
++
++ #if defined(PDUMP)
++ PDUMPCOMMENT("TA/3D CCB Control - Wait for power event on uKernel.");
++ PDUMPMEMPOL(psDevInfo->psKernelSGXHostCtlMemInfo,
++ offsetof(SGXMKIF_HOST_CTL, ui32PowerStatus),
++ ui32CompleteStatus,
++ ui32CompleteStatus,
++ PDUMP_POLL_OPERATOR_EQUAL,
++ 0,
++ MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo));
++ #endif
++
++ for (ui32Core = 0; ui32Core < SGX_FEATURE_MP_CORE_COUNT; ui32Core++)
++ {
++
++ SGXPollForClockGating(psDevInfo,
++ SGX_MP_CORE_SELECT(psDevInfo->ui32ClkGateStatusReg, ui32Core),
++ psDevInfo->ui32ClkGateStatusMask,
++ "Wait for SGX clock gating");
++ }
++
++ #if defined(SGX_FEATURE_MP)
++
++ SGXPollForClockGating(psDevInfo,
++ psDevInfo->ui32MasterClkGateStatusReg,
++ psDevInfo->ui32MasterClkGateStatusMask,
++ "Wait for SGX master clock gating");
++ #endif
++
++ if (eNewPowerState == PVRSRV_DEV_POWER_STATE_OFF)
++ {
++
++ eError = SGXDeinitialise(psDevInfo);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXPrePowerState: SGXDeinitialise failed: %lu", eError));
++ return eError;
++ }
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR SGXPostPowerState (IMG_HANDLE hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ if ((eNewPowerState != eCurrentPowerState) &&
++ (eCurrentPowerState != PVRSRV_DEV_POWER_STATE_ON))
++ {
++ PVRSRV_ERROR eError;
++ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
++ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++ SGXMKIF_HOST_CTL *psSGXHostCtl = psDevInfo->psSGXHostCtl;
++
++
++ psSGXHostCtl->ui32PowerStatus = 0;
++ #if defined(PDUMP)
++ PDUMPCOMMENT("TA/3D CCB Control - Reset power status");
++ PDUMPMEM(IMG_NULL, psDevInfo->psKernelSGXHostCtlMemInfo,
++ offsetof(SGXMKIF_HOST_CTL, ui32PowerStatus),
++ sizeof(IMG_UINT32), PDUMP_FLAGS_CONTINUOUS,
++ MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo));
++ #endif
++
++ if (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_OFF)
++ {
++ eError = SGXUpdateTimingInfo(psDeviceNode);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXPostPowerState: SGXUpdateTimingInfo failed"));
++ return eError;
++ }
++
++ eError = SGXInitialise(psDevInfo);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXPostPowerState: SGXInitialise failed"));
++ return eError;
++ }
++ }
++ else
++ {
++
++
++ SGXMKIF_COMMAND sCommand = {0};
++
++ sCommand.ui32Data[1] = PVRSRV_POWERCMD_RESUME;
++ eError = SGXScheduleCCBCommand(psDevInfo, SGXMKIF_CMD_POWER, &sCommand, ISR_ID, 0);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXPostPowerState failed to schedule CCB command: %lu", eError));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ }
++
++ SGXStartTimer(psDevInfo);
++ }
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR SGXPreClockSpeedChange (IMG_HANDLE hDevHandle,
++ IMG_BOOL bIdleDevice,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
++ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++
++ PVR_UNREFERENCED_PARAMETER(psDevInfo);
++
++ if (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON)
++ {
++ if (bIdleDevice)
++ {
++
++ PDUMPSUSPEND();
++
++ eError = SGXPrePowerState(hDevHandle, PVRSRV_DEV_POWER_STATE_IDLE,
++ PVRSRV_DEV_POWER_STATE_ON);
++
++ if (eError != PVRSRV_OK)
++ {
++ PDUMPRESUME();
++ return eError;
++ }
++ }
++ }
++
++ PVR_DPF((PVR_DBG_MESSAGE,"SGXPreClockSpeedChange: SGX clock speed was %luHz",
++ psDevInfo->ui32CoreClockSpeed));
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR SGXPostClockSpeedChange (IMG_HANDLE hDevHandle,
++ IMG_BOOL bIdleDevice,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
++ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++ IMG_UINT32 ui32OldClockSpeed = psDevInfo->ui32CoreClockSpeed;
++
++ PVR_UNREFERENCED_PARAMETER(ui32OldClockSpeed);
++
++ if (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON)
++ {
++ PVRSRV_ERROR eError;
++
++ eError = SGXUpdateTimingInfo(psDeviceNode);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXPostPowerState: SGXUpdateTimingInfo failed"));
++ return eError;
++ }
++
++ if (bIdleDevice)
++ {
++ eError = SGXPostPowerState(hDevHandle, PVRSRV_DEV_POWER_STATE_ON,
++ PVRSRV_DEV_POWER_STATE_IDLE);
++
++ PDUMPRESUME();
++
++ if (eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++ }
++ else
++ {
++ SGXStartTimer(psDevInfo);
++ }
++
++ }
++
++ PVR_DPF((PVR_DBG_MESSAGE,"SGXPostClockSpeedChange: SGX clock speed changed from %luHz to %luHz",
++ ui32OldClockSpeed, psDevInfo->ui32CoreClockSpeed));
++
++ return PVRSRV_OK;
++}
++
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/devices/sgx/sgxreset.c
+@@ -0,0 +1,489 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "sgxdefs.h"
++#include "sgxmmu.h"
++#include "services_headers.h"
++#include "sgxinfokm.h"
++#include "sgxconfig.h"
++
++#include "pdump_km.h"
++
++
++static IMG_VOID SGXResetSoftReset(PVRSRV_SGXDEV_INFO *psDevInfo,
++ IMG_BOOL bResetBIF,
++ IMG_UINT32 ui32PDUMPFlags,
++ IMG_BOOL bPDump)
++{
++ IMG_UINT32 ui32SoftResetRegVal;
++
++#if defined(SGX_FEATURE_MP)
++ ui32SoftResetRegVal =
++ EUR_CR_MASTER_SOFT_RESET_IPF_RESET_MASK |
++ EUR_CR_MASTER_SOFT_RESET_DPM_RESET_MASK |
++ EUR_CR_MASTER_SOFT_RESET_VDM_RESET_MASK;
++
++#if defined(SGX_FEATURE_SYSTEM_CACHE)
++ ui32SoftResetRegVal |= EUR_CR_MASTER_SOFT_RESET_SLC_RESET_MASK;
++#endif
++
++ if (bResetBIF)
++ {
++ ui32SoftResetRegVal |= EUR_CR_MASTER_SOFT_RESET_BIF_RESET_MASK;
++ }
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_SOFT_RESET, ui32SoftResetRegVal);
++ if (bPDump)
++ {
++ PDUMPREGWITHFLAGS(EUR_CR_MASTER_SOFT_RESET, ui32SoftResetRegVal, ui32PDUMPFlags);
++ }
++#endif
++
++ ui32SoftResetRegVal =
++
++ EUR_CR_SOFT_RESET_DPM_RESET_MASK |
++ EUR_CR_SOFT_RESET_TA_RESET_MASK |
++ EUR_CR_SOFT_RESET_USE_RESET_MASK |
++ EUR_CR_SOFT_RESET_ISP_RESET_MASK |
++ EUR_CR_SOFT_RESET_TSP_RESET_MASK;
++
++#ifdef EUR_CR_SOFT_RESET_TWOD_RESET_MASK
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_TWOD_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_TE_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_TE_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_MTE_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_MTE_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_ISP2_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_ISP2_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_PDS_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_PDS_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_PBE_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_PBE_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_CACHEL2_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_CACHEL2_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_TCU_L2_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_TCU_L2_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_UCACHEL2_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_UCACHEL2_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_MADD_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_MADD_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_ITR_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_ITR_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_TEX_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_TEX_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_IDXFIFO_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_IDXFIFO_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_VDM_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_VDM_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_DCU_L2_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_DCU_L2_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_DCU_L0L1_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_DCU_L0L1_RESET_MASK;
++#endif
++
++#if !defined(PDUMP)
++ PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags);
++#endif
++
++ if (bResetBIF)
++ {
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_BIF_RESET_MASK;
++ }
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_SOFT_RESET, ui32SoftResetRegVal);
++ if (bPDump)
++ {
++ PDUMPREGWITHFLAGS(EUR_CR_SOFT_RESET, ui32SoftResetRegVal, ui32PDUMPFlags);
++ }
++}
++
++
++static IMG_VOID SGXResetSleep(PVRSRV_SGXDEV_INFO *psDevInfo,
++ IMG_UINT32 ui32PDUMPFlags,
++ IMG_BOOL bPDump)
++{
++#if !defined(PDUMP)
++ PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags);
++#endif
++
++
++ OSWaitus(1000 * 1000000 / psDevInfo->ui32CoreClockSpeed);
++ if (bPDump)
++ {
++ PDUMPIDLWITHFLAGS(30, ui32PDUMPFlags);
++#if defined(PDUMP)
++ PDumpRegRead(EUR_CR_SOFT_RESET, ui32PDUMPFlags);
++#endif
++ }
++
++
++
++}
++
++
++static IMG_VOID SGXResetInvalDC(PVRSRV_SGXDEV_INFO *psDevInfo,
++ IMG_UINT32 ui32PDUMPFlags,
++ IMG_BOOL bPDump)
++{
++ IMG_UINT32 ui32RegVal;
++
++
++#if defined(EUR_CR_BIF_CTRL_INVAL)
++ ui32RegVal = EUR_CR_BIF_CTRL_INVAL_ALL_MASK;
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL_INVAL, ui32RegVal);
++ if (bPDump)
++ {
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL_INVAL, ui32RegVal, ui32PDUMPFlags);
++ }
++#else
++ ui32RegVal = EUR_CR_BIF_CTRL_INVALDC_MASK;
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
++ if (bPDump)
++ {
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
++ }
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, bPDump);
++
++ ui32RegVal = 0;
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
++ if (bPDump)
++ {
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
++ }
++#endif
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, bPDump);
++
++#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++ {
++
++
++
++ if (PollForValueKM((IMG_UINT32 *)((IMG_UINT8*)psDevInfo->pvRegsBaseKM + EUR_CR_BIF_MEM_REQ_STAT),
++ 0,
++ EUR_CR_BIF_MEM_REQ_STAT_READS_MASK,
++ MAX_HW_TIME_US/WAIT_TRY_COUNT,
++ WAIT_TRY_COUNT) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"Wait for DC invalidate failed."));
++ PVR_DBG_BREAK;
++ }
++
++ if (bPDump)
++ {
++ PDUMPREGPOLWITHFLAGS(EUR_CR_BIF_MEM_REQ_STAT, 0, EUR_CR_BIF_MEM_REQ_STAT_READS_MASK, ui32PDUMPFlags);
++ }
++ }
++#endif
++}
++
++
++IMG_VOID SGXReset(PVRSRV_SGXDEV_INFO *psDevInfo,
++ IMG_UINT32 ui32PDUMPFlags)
++{
++ IMG_UINT32 ui32RegVal;
++#if defined(EUR_CR_BIF_INT_STAT_FAULT_REQ_MASK)
++ const IMG_UINT32 ui32BifFaultMask = EUR_CR_BIF_INT_STAT_FAULT_REQ_MASK;
++#else
++ const IMG_UINT32 ui32BifFaultMask = EUR_CR_BIF_INT_STAT_FAULT_MASK;
++#endif
++
++#ifndef PDUMP
++ PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags);
++#endif
++
++ psDevInfo->ui32NumResets++;
++
++ PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Start of SGX reset sequence\r\n");
++
++#if defined(FIX_HW_BRN_23944)
++
++ ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK;
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
++
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++
++ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_INT_STAT);
++ if (ui32RegVal & ui32BifFaultMask)
++ {
++
++ ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK | EUR_CR_BIF_CTRL_CLEAR_FAULT_MASK;
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
++
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++
++ ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK;
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
++
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++ }
++#endif
++
++
++ SGXResetSoftReset(psDevInfo, IMG_TRUE, ui32PDUMPFlags, IMG_TRUE);
++
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++
++
++
++#if defined(SGX_FEATURE_36BIT_MMU)
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_36BIT_ADDRESSING, EUR_CR_BIF_36BIT_ADDRESSING_ENABLE_MASK);
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_36BIT_ADDRESSING, EUR_CR_BIF_36BIT_ADDRESSING_ENABLE_MASK, ui32PDUMPFlags);
++#endif
++
++ ui32RegVal = 0;
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
++#if defined(SGX_FEATURE_MP)
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_BIF_CTRL, ui32RegVal);
++ PDUMPREGWITHFLAGS(EUR_CR_MASTER_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
++#endif
++#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK_SET, ui32RegVal);
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_BANK_SET, ui32RegVal, ui32PDUMPFlags);
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK0, ui32RegVal);
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_BANK0, ui32RegVal, ui32PDUMPFlags);
++#endif
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_DIR_LIST_BASE0, ui32RegVal);
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_DIR_LIST_BASE0, ui32RegVal, ui32PDUMPFlags);
++
++#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++ {
++ IMG_UINT32 ui32DirList, ui32DirListReg;
++
++ for (ui32DirList = 1;
++ ui32DirList < SGX_FEATURE_BIF_NUM_DIRLISTS;
++ ui32DirList++)
++ {
++ ui32DirListReg = EUR_CR_BIF_DIR_LIST_BASE1 + 4 * (ui32DirList - 1);
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32DirListReg, ui32RegVal);
++ PDUMPREGWITHFLAGS(ui32DirListReg, ui32RegVal, ui32PDUMPFlags);
++ }
++ }
++#endif
++
++#if defined(EUR_CR_BIF_MEM_ARB_CONFIG)
++
++
++ ui32RegVal = (12UL << EUR_CR_BIF_MEM_ARB_CONFIG_PAGE_SIZE_SHIFT) |
++ (7UL << EUR_CR_BIF_MEM_ARB_CONFIG_BEST_CNT_SHIFT) |
++ (12UL << EUR_CR_BIF_MEM_ARB_CONFIG_TTE_THRESH_SHIFT);
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_MEM_ARB_CONFIG, ui32RegVal);
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_MEM_ARB_CONFIG, ui32RegVal, ui32PDUMPFlags);
++#endif
++
++#if defined(SGX_FEATURE_SYSTEM_CACHE)
++#if defined(SGX_FEATURE_MP)
++ #if defined(SGX_BYPASS_SYSTEM_CACHE)
++ #error SGX_BYPASS_SYSTEM_CACHE not supported
++ #else
++ ui32RegVal = EUR_CR_MASTER_SLC_CTRL_USSE_INVAL_REQ0_MASK |
++ (0xC << EUR_CR_MASTER_SLC_CTRL_ARB_PAGE_SIZE_SHIFT);
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_SLC_CTRL, ui32RegVal);
++ PDUMPREG(EUR_CR_MASTER_SLC_CTRL, ui32RegVal);
++
++ ui32RegVal = EUR_CR_MASTER_SLC_CTRL_BYPASS_BYP_CC_MASK;
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_SLC_CTRL_BYPASS, ui32RegVal);
++ PDUMPREG(EUR_CR_MASTER_SLC_CTRL_BYPASS, ui32RegVal);
++ #endif
++#else
++ #if defined(SGX_BYPASS_SYSTEM_CACHE)
++
++ ui32RegVal = EUR_CR_MNE_CR_CTRL_BYPASS_ALL_MASK;
++ #else
++ #if defined(FIX_HW_BRN_26620)
++ ui32RegVal = 0;
++ #else
++
++ ui32RegVal = EUR_CR_MNE_CR_CTRL_BYP_CC_MASK;
++ #endif
++ #endif
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MNE_CR_CTRL, ui32RegVal);
++ PDUMPREG(EUR_CR_MNE_CR_CTRL, ui32RegVal);
++#endif
++#endif
++
++
++
++
++
++
++ ui32RegVal = psDevInfo->sBIFResetPDDevPAddr.uiAddr;
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_DIR_LIST_BASE0, ui32RegVal);
++
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
++
++
++ SGXResetSoftReset(psDevInfo, IMG_FALSE, ui32PDUMPFlags, IMG_TRUE);
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
++
++ SGXResetInvalDC(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
++
++
++
++ for (;;)
++ {
++ IMG_UINT32 ui32BifIntStat = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_INT_STAT);
++ IMG_DEV_VIRTADDR sBifFault;
++ IMG_UINT32 ui32PDIndex, ui32PTIndex;
++
++ if ((ui32BifIntStat & ui32BifFaultMask) == 0)
++ {
++ break;
++ }
++
++
++
++
++ sBifFault.uiAddr = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_FAULT);
++ PVR_DPF((PVR_DBG_WARNING, "SGXReset: Page fault 0x%x/0x%x", ui32BifIntStat, sBifFault.uiAddr));
++ ui32PDIndex = sBifFault.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
++ ui32PTIndex = (sBifFault.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
++
++
++ SGXResetSoftReset(psDevInfo, IMG_TRUE, ui32PDUMPFlags, IMG_FALSE);
++
++
++ psDevInfo->pui32BIFResetPD[ui32PDIndex] = (psDevInfo->sBIFResetPTDevPAddr.uiAddr
++ >>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
++ | SGX_MMU_PDE_PAGE_SIZE_4K
++ | SGX_MMU_PDE_VALID;
++ psDevInfo->pui32BIFResetPT[ui32PTIndex] = (psDevInfo->sBIFResetPageDevPAddr.uiAddr
++ >>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
++ | SGX_MMU_PTE_VALID;
++
++
++ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS);
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR, ui32RegVal);
++ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS2);
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR2, ui32RegVal);
++
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
++
++
++ SGXResetSoftReset(psDevInfo, IMG_FALSE, ui32PDUMPFlags, IMG_FALSE);
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
++
++
++ SGXResetInvalDC(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
++
++
++ psDevInfo->pui32BIFResetPD[ui32PDIndex] = 0;
++ psDevInfo->pui32BIFResetPT[ui32PTIndex] = 0;
++ }
++
++
++
++
++ #if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++
++ ui32RegVal = (SGX_BIF_DIR_LIST_INDEX_EDM << EUR_CR_BIF_BANK0_INDEX_EDM_SHIFT);
++
++ #if defined(SGX_FEATURE_2D_HARDWARE)
++
++ ui32RegVal |= (SGX_BIF_DIR_LIST_INDEX_EDM << EUR_CR_BIF_BANK0_INDEX_2D_SHIFT);
++ #endif
++
++ #if defined(FIX_HW_BRN_23410)
++
++ ui32RegVal |= (SGX_BIF_DIR_LIST_INDEX_EDM << EUR_CR_BIF_BANK0_INDEX_TA_SHIFT);
++ #endif
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK0, ui32RegVal);
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_BANK0, ui32RegVal, ui32PDUMPFlags);
++ #endif
++
++ {
++ IMG_UINT32 ui32EDMDirListReg;
++
++
++ #if (SGX_BIF_DIR_LIST_INDEX_EDM == 0)
++ ui32EDMDirListReg = EUR_CR_BIF_DIR_LIST_BASE0;
++ #else
++
++ ui32EDMDirListReg = EUR_CR_BIF_DIR_LIST_BASE1 + 4 * (SGX_BIF_DIR_LIST_INDEX_EDM - 1);
++ #endif
++
++#if defined(FIX_HW_BRN_28011)
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_DIR_LIST_BASE0, psDevInfo->sKernelPDDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT);
++ PDUMPPDREGWITHFLAGS(EUR_CR_BIF_DIR_LIST_BASE0, psDevInfo->sKernelPDDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT, ui32PDUMPFlags, PDUMP_PD_UNIQUETAG);
++#endif
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32EDMDirListReg, psDevInfo->sKernelPDDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT);
++ PDUMPPDREGWITHFLAGS(ui32EDMDirListReg, psDevInfo->sKernelPDDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT, ui32PDUMPFlags, PDUMP_PD_UNIQUETAG);
++ }
++
++#ifdef SGX_FEATURE_2D_HARDWARE
++
++ #if ((SGX_2D_HEAP_BASE & ~EUR_CR_BIF_TWOD_REQ_BASE_ADDR_MASK) != 0)
++ #error "SGXReset: SGX_2D_HEAP_BASE doesn't match EUR_CR_BIF_TWOD_REQ_BASE_ADDR_MASK alignment"
++ #endif
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_TWOD_REQ_BASE, SGX_2D_HEAP_BASE);
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_TWOD_REQ_BASE, SGX_2D_HEAP_BASE, ui32PDUMPFlags);
++#endif
++
++
++ SGXResetInvalDC(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++
++ PVR_DPF((PVR_DBG_MESSAGE,"Soft Reset of SGX"));
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++
++
++ ui32RegVal = 0;
++#if defined(SGX_FEATURE_MP)
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_SOFT_RESET, ui32RegVal);
++ PDUMPREGWITHFLAGS(EUR_CR_MASTER_SOFT_RESET, ui32RegVal, ui32PDUMPFlags);
++#endif
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_SOFT_RESET, ui32RegVal);
++ PDUMPREGWITHFLAGS(EUR_CR_SOFT_RESET, ui32RegVal, ui32PDUMPFlags);
++
++
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++
++ PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "End of SGX reset sequence\r\n");
++}
++
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/devices/sgx/sgxtransfer.c
+@@ -0,0 +1,543 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if defined(TRANSFER_QUEUE)
++
++#include <stddef.h>
++
++#include "sgxdefs.h"
++#include "services_headers.h"
++#include "buffer_manager.h"
++#include "sgxinfo.h"
++#include "sysconfig.h"
++#include "regpaths.h"
++#include "pdump_km.h"
++#include "mmu.h"
++#include "pvr_bridge.h"
++#include "sgx_bridge_km.h"
++#include "sgxinfokm.h"
++#include "osfunc.h"
++#include "pvr_debug.h"
++#include "sgxutils.h"
++
++IMG_EXPORT PVRSRV_ERROR SGXSubmitTransferKM(IMG_HANDLE hDevHandle, PVRSRV_TRANSFER_SGX_KICK *psKick)
++{
++ PVRSRV_KERNEL_MEM_INFO *psCCBMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psKick->hCCBMemInfo;
++ SGXMKIF_COMMAND sCommand = {0};
++ SGXMKIF_TRANSFERCMD_SHARED *psSharedTransferCmd;
++ PVRSRV_KERNEL_SYNC_INFO *psSyncInfo;
++ PVRSRV_ERROR eError;
++
++
++ if (!CCB_OFFSET_IS_VALID(SGXMKIF_TRANSFERCMD_SHARED, psCCBMemInfo, psKick, ui32SharedCmdCCBOffset))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXSubmitTransferKM: Invalid CCB offset"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++
++ psSharedTransferCmd = CCB_DATA_FROM_OFFSET(SGXMKIF_TRANSFERCMD_SHARED, psCCBMemInfo, psKick, ui32SharedCmdCCBOffset);
++
++ if (psKick->hTASyncInfo != IMG_NULL)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hTASyncInfo;
++
++ psSharedTransferCmd->ui32TASyncWriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending++;
++ psSharedTransferCmd->ui32TASyncReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
++
++ psSharedTransferCmd->sTASyncWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++ psSharedTransferCmd->sTASyncReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++ }
++ else
++ {
++ psSharedTransferCmd->sTASyncWriteOpsCompleteDevVAddr.uiAddr = 0;
++ psSharedTransferCmd->sTASyncReadOpsCompleteDevVAddr.uiAddr = 0;
++ }
++
++ if (psKick->h3DSyncInfo != IMG_NULL)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->h3DSyncInfo;
++
++ psSharedTransferCmd->ui323DSyncWriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending++;
++ psSharedTransferCmd->ui323DSyncReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
++
++ psSharedTransferCmd->s3DSyncWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++ psSharedTransferCmd->s3DSyncReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++ }
++ else
++ {
++ psSharedTransferCmd->s3DSyncWriteOpsCompleteDevVAddr.uiAddr = 0;
++ psSharedTransferCmd->s3DSyncReadOpsCompleteDevVAddr.uiAddr = 0;
++ }
++
++ if ((psKick->ui32Flags & SGXMKIF_TQFLAGS_KEEPPENDING) == 0UL)
++ {
++ if (psKick->ui32NumSrcSync > 0)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[0];
++
++ psSharedTransferCmd->ui32SrcWriteOpPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
++ psSharedTransferCmd->ui32SrcReadOpPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
++
++ psSharedTransferCmd->sSrcWriteOpsCompleteDevAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++ psSharedTransferCmd->sSrcReadOpsCompleteDevAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++
++ }
++
++ if (psKick->ui32NumDstSync > 0)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahDstSyncInfo[0];
++
++ psSharedTransferCmd->ui32DstWriteOpPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
++ psSharedTransferCmd->ui32DstReadOpPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
++
++ psSharedTransferCmd->sDstWriteOpsCompleteDevAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++ psSharedTransferCmd->sDstReadOpsCompleteDevAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++
++ }
++
++
++ if (psKick->ui32NumSrcSync > 0)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[0];
++ psSyncInfo->psSyncData->ui32ReadOpsPending++;
++ }
++ if (psKick->ui32NumDstSync > 0)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahDstSyncInfo[0];
++ psSyncInfo->psSyncData->ui32WriteOpsPending++;
++ }
++ }
++
++
++ if (psKick->ui32NumDstSync > 1 || psKick->ui32NumSrcSync > 1)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "Transfer command doesn't support more than 1 sync object per src/dst\ndst: %d, src: %d",
++ psKick->ui32NumDstSync, psKick->ui32NumSrcSync));
++ }
++
++#if defined(PDUMP)
++ if (PDumpIsCaptureFrameKM()
++ || ((psKick->ui32PDumpFlags & PDUMP_FLAGS_CONTINUOUS) != 0))
++ {
++ PDUMPCOMMENT("Shared part of transfer command\r\n");
++ PDUMPMEM(psSharedTransferCmd,
++ psCCBMemInfo,
++ psKick->ui32CCBDumpWOff,
++ sizeof(SGXMKIF_TRANSFERCMD_SHARED),
++ psKick->ui32PDumpFlags,
++ MAKEUNIQUETAG(psCCBMemInfo));
++
++ if((psKick->ui32NumSrcSync > 0) && ((psKick->ui32Flags & SGXMKIF_TQFLAGS_KEEPPENDING) == 0UL))
++ {
++ psSyncInfo = psKick->ahSrcSyncInfo[0];
++
++ PDUMPCOMMENT("Hack src surface write op in transfer cmd\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++ psCCBMemInfo,
++ psKick->ui32CCBDumpWOff + offsetof(SGXMKIF_TRANSFERCMD_SHARED, ui32SrcWriteOpPendingVal),
++ sizeof(psSyncInfo->psSyncData->ui32LastOpDumpVal),
++ psKick->ui32PDumpFlags,
++ MAKEUNIQUETAG(psCCBMemInfo));
++
++ PDUMPCOMMENT("Hack src surface read op in transfer cmd\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
++ psCCBMemInfo,
++ psKick->ui32CCBDumpWOff + offsetof(SGXMKIF_TRANSFERCMD_SHARED, ui32SrcReadOpPendingVal),
++ sizeof(psSyncInfo->psSyncData->ui32LastReadOpDumpVal),
++ psKick->ui32PDumpFlags,
++ MAKEUNIQUETAG(psCCBMemInfo));
++
++ }
++ if((psKick->ui32NumDstSync > 0) && ((psKick->ui32Flags & SGXMKIF_TQFLAGS_KEEPPENDING) == 0UL))
++ {
++ psSyncInfo = psKick->ahDstSyncInfo[0];
++
++ PDUMPCOMMENT("Hack dest surface write op in transfer cmd\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++ psCCBMemInfo,
++ psKick->ui32CCBDumpWOff + offsetof(SGXMKIF_TRANSFERCMD_SHARED, ui32DstWriteOpPendingVal),
++ sizeof(psSyncInfo->psSyncData->ui32LastOpDumpVal),
++ psKick->ui32PDumpFlags,
++ MAKEUNIQUETAG(psCCBMemInfo));
++
++ PDUMPCOMMENT("Hack dest surface read op in transfer cmd\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
++ psCCBMemInfo,
++ psKick->ui32CCBDumpWOff + offsetof(SGXMKIF_TRANSFERCMD_SHARED, ui32DstReadOpPendingVal),
++ sizeof(psSyncInfo->psSyncData->ui32LastReadOpDumpVal),
++ psKick->ui32PDumpFlags,
++ MAKEUNIQUETAG(psCCBMemInfo));
++
++ }
++
++
++ if((psKick->ui32NumSrcSync > 0) && ((psKick->ui32Flags & SGXMKIF_TQFLAGS_KEEPPENDING)== 0UL))
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[0];
++ psSyncInfo->psSyncData->ui32LastReadOpDumpVal++;
++ }
++
++ if((psKick->ui32NumDstSync > 0) && ((psKick->ui32Flags & SGXMKIF_TQFLAGS_KEEPPENDING) == 0UL))
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahDstSyncInfo[0];
++ psSyncInfo->psSyncData->ui32LastOpDumpVal++;
++ }
++ }
++#endif
++
++ sCommand.ui32Data[1] = psKick->sHWTransferContextDevVAddr.uiAddr;
++
++ eError = SGXScheduleCCBCommandKM(hDevHandle, SGXMKIF_CMD_TRANSFER, &sCommand, KERNEL_ID, psKick->ui32PDumpFlags);
++
++ if (eError == PVRSRV_ERROR_RETRY)
++ {
++
++ if ((psKick->ui32Flags & SGXMKIF_TQFLAGS_KEEPPENDING) == 0UL)
++ {
++ if (psKick->ui32NumSrcSync > 0)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[0];
++ psSyncInfo->psSyncData->ui32ReadOpsPending--;
++ }
++ if (psKick->ui32NumDstSync > 0)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahDstSyncInfo[0];
++ psSyncInfo->psSyncData->ui32WriteOpsPending--;
++ }
++#if defined(PDUMP)
++ if (PDumpIsCaptureFrameKM()
++ || ((psKick->ui32PDumpFlags & PDUMP_FLAGS_CONTINUOUS) != 0))
++ {
++ if (psKick->ui32NumSrcSync > 0)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[0];
++ psSyncInfo->psSyncData->ui32LastReadOpDumpVal--;
++ }
++ if (psKick->ui32NumDstSync > 0)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahDstSyncInfo[0];
++ psSyncInfo->psSyncData->ui32LastOpDumpVal--;
++ }
++ }
++#endif
++ }
++
++
++ if (psKick->hTASyncInfo != IMG_NULL)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hTASyncInfo;
++ psSyncInfo->psSyncData->ui32WriteOpsPending--;
++ }
++
++
++ if (psKick->h3DSyncInfo != IMG_NULL)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->h3DSyncInfo;
++ psSyncInfo->psSyncData->ui32WriteOpsPending--;
++ }
++ }
++ else if (PVRSRV_OK != eError)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXSubmitTransferKM: SGXScheduleCCBCommandKM failed."));
++ return eError;
++ }
++
++
++#if defined(NO_HARDWARE)
++ if ((psKick->ui32Flags & SGXMKIF_TQFLAGS_NOSYNCUPDATE) == 0)
++ {
++ IMG_UINT32 i;
++
++
++ for(i = 0; i < psKick->ui32NumSrcSync; i++)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[i];
++ psSyncInfo->psSyncData->ui32ReadOpsComplete = psSyncInfo->psSyncData->ui32ReadOpsPending;
++ }
++
++ for(i = 0; i < psKick->ui32NumDstSync; i++)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahDstSyncInfo[i];
++ psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending;
++
++ }
++
++ if (psKick->hTASyncInfo != IMG_NULL)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hTASyncInfo;
++
++ psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending;
++ }
++
++ if (psKick->h3DSyncInfo != IMG_NULL)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->h3DSyncInfo;
++
++ psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending;
++ }
++ }
++#endif
++
++ return eError;
++}
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++IMG_EXPORT PVRSRV_ERROR SGXSubmit2DKM(IMG_HANDLE hDevHandle, PVRSRV_2D_SGX_KICK *psKick)
++
++{
++ PVRSRV_KERNEL_MEM_INFO *psCCBMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psKick->hCCBMemInfo;
++ SGXMKIF_COMMAND sCommand = {0};
++ SGXMKIF_2DCMD_SHARED *ps2DCmd;
++ PVRSRV_KERNEL_SYNC_INFO *psSyncInfo;
++ PVRSRV_ERROR eError;
++ IMG_UINT32 i;
++
++ if (!CCB_OFFSET_IS_VALID(SGXMKIF_2DCMD_SHARED, psCCBMemInfo, psKick, ui32SharedCmdCCBOffset))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXSubmit2DKM: Invalid CCB offset"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++
++ ps2DCmd = CCB_DATA_FROM_OFFSET(SGXMKIF_2DCMD_SHARED, psCCBMemInfo, psKick, ui32SharedCmdCCBOffset);
++
++ OSMemSet(ps2DCmd, 0, sizeof(*ps2DCmd));
++
++
++ if (psKick->hTASyncInfo != IMG_NULL)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hTASyncInfo;
++
++ ps2DCmd->sTASyncData.ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending++;
++ ps2DCmd->sTASyncData.ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
++
++ ps2DCmd->sTASyncData.sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++ ps2DCmd->sTASyncData.sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++ }
++
++
++ if (psKick->h3DSyncInfo != IMG_NULL)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->h3DSyncInfo;
++
++ ps2DCmd->s3DSyncData.ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending++;
++ ps2DCmd->s3DSyncData.ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
++
++ ps2DCmd->s3DSyncData.sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++ ps2DCmd->s3DSyncData.sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++ }
++
++
++ ps2DCmd->ui32NumSrcSync = psKick->ui32NumSrcSync;
++ for (i = 0; i < psKick->ui32NumSrcSync; i++)
++ {
++ psSyncInfo = psKick->ahSrcSyncInfo[i];
++
++ ps2DCmd->sSrcSyncData[i].ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
++ ps2DCmd->sSrcSyncData[i].ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
++
++ ps2DCmd->sSrcSyncData[i].sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++ ps2DCmd->sSrcSyncData[i].sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++ }
++
++ if (psKick->hDstSyncInfo != IMG_NULL)
++ {
++ psSyncInfo = psKick->hDstSyncInfo;
++
++ ps2DCmd->sDstSyncData.ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
++ ps2DCmd->sDstSyncData.ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
++
++ ps2DCmd->sDstSyncData.sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++ ps2DCmd->sDstSyncData.sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++ }
++
++
++ for (i = 0; i < psKick->ui32NumSrcSync; i++)
++ {
++ psSyncInfo = psKick->ahSrcSyncInfo[i];
++ psSyncInfo->psSyncData->ui32ReadOpsPending++;
++ }
++
++ if (psKick->hDstSyncInfo != IMG_NULL)
++ {
++ psSyncInfo = psKick->hDstSyncInfo;
++ psSyncInfo->psSyncData->ui32WriteOpsPending++;
++ }
++
++#if defined(PDUMP)
++ if (PDumpIsCaptureFrameKM()
++ || ((psKick->ui32PDumpFlags & PDUMP_FLAGS_CONTINUOUS) != 0))
++ {
++
++ PDUMPCOMMENT("Shared part of 2D command\r\n");
++ PDUMPMEM(ps2DCmd,
++ psCCBMemInfo,
++ psKick->ui32CCBDumpWOff,
++ sizeof(SGXMKIF_2DCMD_SHARED),
++ psKick->ui32PDumpFlags,
++ MAKEUNIQUETAG(psCCBMemInfo));
++
++ for (i = 0; i < psKick->ui32NumSrcSync; i++)
++ {
++ psSyncInfo = psKick->ahSrcSyncInfo[i];
++
++ PDUMPCOMMENT("Hack src surface write op in 2D cmd\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++ psCCBMemInfo,
++ psKick->ui32CCBDumpWOff + offsetof(SGXMKIF_2DCMD_SHARED, sSrcSyncData[i].ui32WriteOpsPendingVal),
++ sizeof(psSyncInfo->psSyncData->ui32LastOpDumpVal),
++ psKick->ui32PDumpFlags,
++ MAKEUNIQUETAG(psCCBMemInfo));
++
++ PDUMPCOMMENT("Hack src surface read op in 2D cmd\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
++ psCCBMemInfo,
++ psKick->ui32CCBDumpWOff + offsetof(SGXMKIF_2DCMD_SHARED, sSrcSyncData[i].ui32ReadOpsPendingVal),
++ sizeof(psSyncInfo->psSyncData->ui32LastReadOpDumpVal),
++ psKick->ui32PDumpFlags,
++ MAKEUNIQUETAG(psCCBMemInfo));
++ }
++
++ if (psKick->hDstSyncInfo != IMG_NULL)
++ {
++ psSyncInfo = psKick->hDstSyncInfo;
++
++ PDUMPCOMMENT("Hack dest surface write op in 2D cmd\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++ psCCBMemInfo,
++ psKick->ui32CCBDumpWOff + offsetof(SGXMKIF_2DCMD_SHARED, sDstSyncData.ui32WriteOpsPendingVal),
++ sizeof(psSyncInfo->psSyncData->ui32LastOpDumpVal),
++ psKick->ui32PDumpFlags,
++ MAKEUNIQUETAG(psCCBMemInfo));
++
++ PDUMPCOMMENT("Hack dest surface read op in 2D cmd\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
++ psCCBMemInfo,
++ psKick->ui32CCBDumpWOff + offsetof(SGXMKIF_2DCMD_SHARED, sDstSyncData.ui32ReadOpsPendingVal),
++ sizeof(psSyncInfo->psSyncData->ui32LastReadOpDumpVal),
++ psKick->ui32PDumpFlags,
++ MAKEUNIQUETAG(psCCBMemInfo));
++ }
++
++
++ for (i = 0; i < psKick->ui32NumSrcSync; i++)
++ {
++ psSyncInfo = psKick->ahSrcSyncInfo[i];
++ psSyncInfo->psSyncData->ui32LastReadOpDumpVal++;
++ }
++
++ if (psKick->hDstSyncInfo != IMG_NULL)
++ {
++ psSyncInfo = psKick->hDstSyncInfo;
++ psSyncInfo->psSyncData->ui32LastOpDumpVal++;
++ }
++ }
++#endif
++
++ sCommand.ui32Data[1] = psKick->sHW2DContextDevVAddr.uiAddr;
++
++ eError = SGXScheduleCCBCommandKM(hDevHandle, SGXMKIF_CMD_2D, &sCommand, KERNEL_ID, psKick->ui32PDumpFlags);
++
++ if (eError == PVRSRV_ERROR_RETRY)
++ {
++#if defined(PDUMP)
++ if (PDumpIsCaptureFrameKM())
++ {
++ for (i = 0; i < psKick->ui32NumSrcSync; i++)
++ {
++ psSyncInfo = psKick->ahSrcSyncInfo[i];
++ psSyncInfo->psSyncData->ui32LastReadOpDumpVal--;
++ }
++
++ if (psKick->hDstSyncInfo != IMG_NULL)
++ {
++ psSyncInfo = psKick->hDstSyncInfo;
++ psSyncInfo->psSyncData->ui32LastOpDumpVal--;
++ }
++ }
++#endif
++
++ for (i = 0; i < psKick->ui32NumSrcSync; i++)
++ {
++ psSyncInfo = psKick->ahSrcSyncInfo[i];
++ psSyncInfo->psSyncData->ui32ReadOpsPending--;
++ }
++
++ if (psKick->hDstSyncInfo != IMG_NULL)
++ {
++ psSyncInfo = psKick->hDstSyncInfo;
++ psSyncInfo->psSyncData->ui32WriteOpsPending--;
++ }
++
++
++ if (psKick->hTASyncInfo != IMG_NULL)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hTASyncInfo;
++
++ psSyncInfo->psSyncData->ui32WriteOpsPending--;
++ }
++
++
++ if (psKick->h3DSyncInfo != IMG_NULL)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->h3DSyncInfo;
++
++ psSyncInfo->psSyncData->ui32WriteOpsPending--;
++ }
++ }
++
++#if defined(NO_HARDWARE)
++
++ for(i = 0; i < psKick->ui32NumSrcSync; i++)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[i];
++ psSyncInfo->psSyncData->ui32ReadOpsComplete = psSyncInfo->psSyncData->ui32ReadOpsPending;
++ }
++
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hDstSyncInfo;
++ psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending;
++
++ if (psKick->hTASyncInfo != IMG_NULL)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hTASyncInfo;
++
++ psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending;
++ }
++
++ if (psKick->h3DSyncInfo != IMG_NULL)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->h3DSyncInfo;
++
++ psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending;
++ }
++#endif
++
++ return eError;
++}
++#endif
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/devices/sgx/sgxutils.c
+@@ -0,0 +1,934 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <stddef.h>
++
++#include "sgxdefs.h"
++#include "services_headers.h"
++#include "buffer_manager.h"
++#include "sgxapi_km.h"
++#include "sgxinfo.h"
++#include "sgx_mkif_km.h"
++#include "sysconfig.h"
++#include "pdump_km.h"
++#include "mmu.h"
++#include "pvr_bridge_km.h"
++#include "osfunc.h"
++#include "pvr_debug.h"
++#include "sgxutils.h"
++
++#ifdef __linux__
++#include <linux/tty.h>
++#else
++#include <stdio.h>
++#endif
++#include "psb_powermgmt.h"
++
++#if defined(SYS_CUSTOM_POWERDOWN)
++PVRSRV_ERROR SysPowerDownMISR(PVRSRV_DEVICE_NODE * psDeviceNode, IMG_UINT32 ui32CallerID);
++#endif
++
++
++
++IMG_VOID SGXPostActivePowerEvent(PVRSRV_DEVICE_NODE * psDeviceNode,
++ IMG_UINT32 ui32CallerID)
++{
++ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++ SGXMKIF_HOST_CTL *psSGXHostCtl = psDevInfo->psSGXHostCtl;
++
++
++ psSGXHostCtl->ui32NumActivePowerEvents++;
++
++ if ((psSGXHostCtl->ui32PowerStatus & PVRSRV_USSE_EDM_POWMAN_POWEROFF_RESTART_IMMEDIATE) != 0)
++ {
++
++
++
++ if (ui32CallerID == ISR_ID)
++ {
++ psDeviceNode->bReProcessDeviceCommandComplete = IMG_TRUE;
++ }
++ else
++ {
++ SGXScheduleProcessQueuesKM(psDeviceNode);
++ }
++ }
++}
++
++
++IMG_VOID SGXTestActivePowerEvent (PVRSRV_DEVICE_NODE *psDeviceNode,
++ IMG_UINT32 ui32CallerID)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++ SGXMKIF_HOST_CTL *psSGXHostCtl = psDevInfo->psSGXHostCtl;
++
++ if (((psSGXHostCtl->ui32InterruptFlags & PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER) != 0) &&
++ ((psSGXHostCtl->ui32InterruptClearFlags & PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER) == 0))
++ {
++
++ psSGXHostCtl->ui32InterruptClearFlags |= PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER;
++
++
++ PDUMPSUSPEND();
++
++#if defined(SYS_CUSTOM_POWERDOWN)
++
++
++
++ eError = SysPowerDownMISR(psDeviceNode, ui32CallerID);
++#else
++ eError = PVRSRVSetDevicePowerStateKM(psDeviceNode->sDevId.ui32DeviceIndex,
++ PVRSRV_DEV_POWER_STATE_OFF,
++ ui32CallerID, IMG_FALSE);
++ if (eError == PVRSRV_OK)
++ {
++ SGXPostActivePowerEvent(psDeviceNode, ui32CallerID);
++ }
++#endif
++ if (eError == PVRSRV_ERROR_RETRY)
++ {
++
++
++ psSGXHostCtl->ui32InterruptClearFlags &= ~PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER;
++ eError = PVRSRV_OK;
++ }
++
++
++ PDUMPRESUME();
++ }
++
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXTestActivePowerEvent error:%lu", eError));
++ }
++}
++
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(SGXAcquireKernelCCBSlot)
++#endif
++static INLINE SGXMKIF_COMMAND * SGXAcquireKernelCCBSlot(PVRSRV_SGX_CCB_INFO *psCCB)
++{
++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
++ {
++ if(((*psCCB->pui32WriteOffset + 1) & 255) != *psCCB->pui32ReadOffset)
++ {
++ return &psCCB->psCommands[*psCCB->pui32WriteOffset];
++ }
++
++ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
++ } END_LOOP_UNTIL_TIMEOUT();
++
++
++ return IMG_NULL;
++}
++
++PVRSRV_ERROR SGXScheduleCCBCommand(PVRSRV_SGXDEV_INFO *psDevInfo,
++ SGXMKIF_CMD_TYPE eCmdType,
++ SGXMKIF_COMMAND *psCommandData,
++ IMG_UINT32 ui32CallerID,
++ IMG_UINT32 ui32PDumpFlags)
++{
++ PVRSRV_SGX_CCB_INFO *psKernelCCB;
++ PVRSRV_ERROR eError = PVRSRV_OK;
++ SGXMKIF_COMMAND *psSGXCommand;
++#if defined(PDUMP)
++ IMG_VOID *pvDumpCommand;
++ IMG_BOOL bPDumpIsSuspended = PDumpIsSuspended();
++#else
++ PVR_UNREFERENCED_PARAMETER(ui32CallerID);
++ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
++#endif
++
++ psKernelCCB = psDevInfo->psKernelCCBInfo;
++
++ psSGXCommand = SGXAcquireKernelCCBSlot(psKernelCCB);
++
++
++ if(!psSGXCommand)
++ {
++ eError = PVRSRV_ERROR_TIMEOUT;
++ goto Exit;
++ }
++
++
++ psCommandData->ui32CacheControl = psDevInfo->ui32CacheControl;
++
++#if defined(PDUMP)
++
++ psDevInfo->sPDContext.ui32CacheControl |= psDevInfo->ui32CacheControl;
++#endif
++
++
++ psDevInfo->ui32CacheControl = 0;
++
++
++ *psSGXCommand = *psCommandData;
++
++ if (eCmdType >= SGXMKIF_CMD_MAX)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXScheduleCCBCommandKM: Unknown command type: %d", eCmdType)) ;
++ eError = PVRSRV_ERROR_GENERIC;
++ goto Exit;
++ }
++
++#if defined(SUPPORT_CPU_CACHED_BUFFERS)
++ {
++ SYS_DATA *psSysData;
++
++ SysAcquireData(&psSysData);
++
++ if (psSysData->bFlushAll)
++ {
++ OSFlushCPUCacheKM();
++
++ psSysData->bFlushAll = IMG_FALSE;
++ }
++ }
++#endif
++
++ psSGXCommand->ui32ServiceAddress = psDevInfo->aui32HostKickAddr[eCmdType];
++
++#if defined(PDUMP)
++ if ((ui32CallerID != ISR_ID) && (bPDumpIsSuspended == IMG_FALSE))
++ {
++
++ PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Poll for space in the Kernel CCB\r\n");
++ PDUMPMEMPOL(psKernelCCB->psCCBCtlMemInfo,
++ offsetof(PVRSRV_SGX_CCB_CTL, ui32ReadOffset),
++ (psKernelCCB->ui32CCBDumpWOff + 1) & 0xff,
++ 0xff,
++ PDUMP_POLL_OPERATOR_NOTEQUAL,
++ ui32PDumpFlags,
++ MAKEUNIQUETAG(psKernelCCB->psCCBCtlMemInfo));
++
++ PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Kernel CCB command\r\n");
++ pvDumpCommand = (IMG_VOID *)((IMG_UINT8 *)psKernelCCB->psCCBMemInfo->pvLinAddrKM + (*psKernelCCB->pui32WriteOffset * sizeof(SGXMKIF_COMMAND)));
++
++ PDUMPMEM(pvDumpCommand,
++ psKernelCCB->psCCBMemInfo,
++ psKernelCCB->ui32CCBDumpWOff * sizeof(SGXMKIF_COMMAND),
++ sizeof(SGXMKIF_COMMAND),
++ ui32PDumpFlags,
++ MAKEUNIQUETAG(psKernelCCB->psCCBMemInfo));
++
++
++ PDUMPMEM(&psDevInfo->sPDContext.ui32CacheControl,
++ psKernelCCB->psCCBMemInfo,
++ psKernelCCB->ui32CCBDumpWOff * sizeof(SGXMKIF_COMMAND) +
++ offsetof(SGXMKIF_COMMAND, ui32CacheControl),
++ sizeof(IMG_UINT32),
++ ui32PDumpFlags,
++ MAKEUNIQUETAG(psKernelCCB->psCCBMemInfo));
++
++ if (PDumpIsCaptureFrameKM()
++ || ((ui32PDumpFlags & PDUMP_FLAGS_CONTINUOUS) != 0))
++ {
++
++ psDevInfo->sPDContext.ui32CacheControl = 0;
++ }
++ }
++#endif
++
++#if defined(FIX_HW_BRN_26620) && defined(SGX_FEATURE_SYSTEM_CACHE) && !defined(SGX_BYPASS_SYSTEM_CACHE)
++
++ eError = PollForValueKM (psKernelCCB->pui32ReadOffset,
++ *psKernelCCB->pui32WriteOffset,
++ 0xFF,
++ MAX_HW_TIME_US/WAIT_TRY_COUNT,
++ WAIT_TRY_COUNT);
++ if (eError != PVRSRV_OK)
++ {
++ eError = PVRSRV_ERROR_TIMEOUT;
++ goto Exit;
++ }
++#endif
++
++
++
++ *psKernelCCB->pui32WriteOffset = (*psKernelCCB->pui32WriteOffset + 1) & 255;
++
++#if defined(PDUMP)
++ if ((ui32CallerID != ISR_ID) && (bPDumpIsSuspended == IMG_FALSE))
++ {
++ #if defined(FIX_HW_BRN_26620) && defined(SGX_FEATURE_SYSTEM_CACHE) && !defined(SGX_BYPASS_SYSTEM_CACHE)
++ PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Poll for previous Kernel CCB CMD to be read\r\n");
++ PDUMPMEMPOL(psKernelCCB->psCCBCtlMemInfo,
++ offsetof(PVRSRV_SGX_CCB_CTL, ui32ReadOffset),
++ (psKernelCCB->ui32CCBDumpWOff),
++ 0xFF,
++ PDUMP_POLL_OPERATOR_EQUAL,
++ ui32PDumpFlags,
++ MAKEUNIQUETAG(psKernelCCB->psCCBCtlMemInfo));
++ #endif
++
++ if (PDumpIsCaptureFrameKM()
++ || ((ui32PDumpFlags & PDUMP_FLAGS_CONTINUOUS) != 0))
++ {
++ psKernelCCB->ui32CCBDumpWOff = (psKernelCCB->ui32CCBDumpWOff + 1) & 0xFF;
++ psDevInfo->ui32KernelCCBEventKickerDumpVal = (psDevInfo->ui32KernelCCBEventKickerDumpVal + 1) & 0xFF;
++ }
++
++ PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Kernel CCB write offset\r\n");
++ PDUMPMEM(&psKernelCCB->ui32CCBDumpWOff,
++ psKernelCCB->psCCBCtlMemInfo,
++ offsetof(PVRSRV_SGX_CCB_CTL, ui32WriteOffset),
++ sizeof(IMG_UINT32),
++ ui32PDumpFlags,
++ MAKEUNIQUETAG(psKernelCCB->psCCBCtlMemInfo));
++ PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Kernel CCB event kicker\r\n");
++ PDUMPMEM(&psDevInfo->ui32KernelCCBEventKickerDumpVal,
++ psDevInfo->psKernelCCBEventKickerMemInfo,
++ 0,
++ sizeof(IMG_UINT32),
++ ui32PDumpFlags,
++ MAKEUNIQUETAG(psDevInfo->psKernelCCBEventKickerMemInfo));
++ PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Kick the SGX microkernel\r\n");
++ #if defined(FIX_HW_BRN_26620) && defined(SGX_FEATURE_SYSTEM_CACHE) && !defined(SGX_BYPASS_SYSTEM_CACHE)
++ PDUMPREGWITHFLAGS(SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK2, 0), EUR_CR_EVENT_KICK2_NOW_MASK, ui32PDumpFlags);
++ #else
++ PDUMPREGWITHFLAGS(SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK, 0), EUR_CR_EVENT_KICK_NOW_MASK, ui32PDumpFlags);
++ #endif
++ }
++#endif
++
++ *psDevInfo->pui32KernelCCBEventKicker = (*psDevInfo->pui32KernelCCBEventKicker + 1) & 0xFF;
++#if defined(FIX_HW_BRN_26620) && defined(SGX_FEATURE_SYSTEM_CACHE) && !defined(SGX_BYPASS_SYSTEM_CACHE)
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM,
++ SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK2, 0),
++ EUR_CR_EVENT_KICK2_NOW_MASK);
++#else
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM,
++ SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK, 0),
++ EUR_CR_EVENT_KICK_NOW_MASK);
++#endif
++
++#if defined(NO_HARDWARE)
++
++ *psKernelCCB->pui32ReadOffset = (*psKernelCCB->pui32ReadOffset + 1) & 255;
++#endif
++
++Exit:
++ return eError;
++}
++
++
++PVRSRV_ERROR SGXScheduleCCBCommandKM(PVRSRV_DEVICE_NODE *psDeviceNode,
++ SGXMKIF_CMD_TYPE eCmdType,
++ SGXMKIF_COMMAND *psCommandData,
++ IMG_UINT32 ui32CallerID,
++ IMG_UINT32 ui32PDumpFlags)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++
++
++ PDUMPSUSPEND();
++
++ ospm_power_using_hw_begin(OSPM_GRAPHICS_ISLAND, OSPM_UHB_FORCE_POWER_ON);
++
++ eError = PVRSRVSetDevicePowerStateKM(psDeviceNode->sDevId.ui32DeviceIndex,
++ PVRSRV_DEV_POWER_STATE_ON,
++ ui32CallerID,
++ IMG_TRUE);
++
++ PDUMPRESUME();
++
++ if (eError == PVRSRV_OK)
++ {
++ psDeviceNode->bReProcessDeviceCommandComplete = IMG_FALSE;
++ }
++ else
++ {
++ if (eError == PVRSRV_ERROR_RETRY)
++ {
++ if (ui32CallerID == ISR_ID)
++ {
++
++
++
++ psDeviceNode->bReProcessDeviceCommandComplete = IMG_TRUE;
++ eError = PVRSRV_OK;
++ }
++ else
++ {
++
++
++ }
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXScheduleCCBCommandKM failed to acquire lock - "
++ "ui32CallerID:%ld eError:%lu", ui32CallerID, eError));
++ }
++
++ ospm_power_using_hw_end(OSPM_GRAPHICS_ISLAND);
++ return eError;
++ }
++
++ eError = SGXScheduleCCBCommand(psDevInfo, eCmdType, psCommandData, ui32CallerID, ui32PDumpFlags);
++
++ PVRSRVPowerUnlock(ui32CallerID);
++
++ ospm_power_using_hw_end(OSPM_GRAPHICS_ISLAND);
++
++ if (ui32CallerID != ISR_ID)
++ {
++
++
++
++ SGXTestActivePowerEvent(psDeviceNode, ui32CallerID);
++ }
++
++ return eError;
++}
++
++
++PVRSRV_ERROR SGXScheduleProcessQueuesKM(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++ SGXMKIF_HOST_CTL *psHostCtl = psDevInfo->psKernelSGXHostCtlMemInfo->pvLinAddrKM;
++ IMG_UINT32 ui32PowerStatus;
++ SGXMKIF_COMMAND sCommand = {0};
++
++ ui32PowerStatus = psHostCtl->ui32PowerStatus;
++ if ((ui32PowerStatus & PVRSRV_USSE_EDM_POWMAN_NO_WORK) != 0)
++ {
++
++ return PVRSRV_OK;
++ }
++
++ eError = SGXScheduleCCBCommandKM(psDeviceNode, SGXMKIF_CMD_PROCESS_QUEUES, &sCommand, ISR_ID, 0);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXScheduleProcessQueuesKM failed to schedule CCB command: %lu", eError));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ return PVRSRV_OK;
++}
++
++
++IMG_BOOL SGXIsDevicePowered(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ return PVRSRVIsDevicePowered(psDeviceNode->sDevId.ui32DeviceIndex);
++}
++
++IMG_EXPORT
++PVRSRV_ERROR SGXGetInternalDevInfoKM(IMG_HANDLE hDevCookie,
++ SGX_INTERNAL_DEVINFO *psSGXInternalDevInfo)
++{
++ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookie)->pvDevice;
++
++ psSGXInternalDevInfo->ui32Flags = psDevInfo->ui32Flags;
++ psSGXInternalDevInfo->bForcePTOff = (IMG_BOOL)psDevInfo->bForcePTOff;
++
++
++ psSGXInternalDevInfo->hHostCtlKernelMemInfoHandle =
++ (IMG_HANDLE)psDevInfo->psKernelSGXHostCtlMemInfo;
++
++ return PVRSRV_OK;
++}
++
++
++IMG_VOID SGXCleanupRequest(PVRSRV_DEVICE_NODE *psDeviceNode,
++ IMG_DEV_VIRTADDR *psHWDataDevVAddr,
++ IMG_UINT32 ui32CleanupType)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_SGXDEV_INFO *psSGXDevInfo = psDeviceNode->pvDevice;
++ PVRSRV_KERNEL_MEM_INFO *psSGXHostCtlMemInfo = psSGXDevInfo->psKernelSGXHostCtlMemInfo;
++ SGXMKIF_HOST_CTL *psSGXHostCtl = psSGXHostCtlMemInfo->pvLinAddrKM;
++
++ if ((psSGXHostCtl->ui32PowerStatus & PVRSRV_USSE_EDM_POWMAN_NO_WORK) != 0)
++ {
++
++ }
++ else
++ {
++ SGXMKIF_COMMAND sCommand = {0};
++
++ PDUMPCOMMENTWITHFLAGS(0, "Request ukernel resouce clean-up");
++ sCommand.ui32Data[0] = ui32CleanupType;
++ sCommand.ui32Data[1] = (psHWDataDevVAddr == IMG_NULL) ? 0 : psHWDataDevVAddr->uiAddr;
++
++ eError = SGXScheduleCCBCommandKM(psDeviceNode, SGXMKIF_CMD_CLEANUP, &sCommand, KERNEL_ID, 0);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXCleanupRequest: Failed to submit clean-up command"));
++ PVR_DBG_BREAK;
++ }
++
++
++ #if !defined(NO_HARDWARE)
++ if(PollForValueKM(&psSGXHostCtl->ui32CleanupStatus,
++ PVRSRV_USSE_EDM_CLEANUPCMD_COMPLETE,
++ PVRSRV_USSE_EDM_CLEANUPCMD_COMPLETE,
++ MAX_HW_TIME_US/WAIT_TRY_COUNT,
++ WAIT_TRY_COUNT) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXCleanupRequest: Wait for uKernel to clean up failed"));
++ PVR_DBG_BREAK;
++ }
++ #endif
++
++ #if defined(PDUMP)
++
++ PDUMPCOMMENTWITHFLAGS(0, "Host Control - Poll for clean-up request to complete");
++ PDUMPMEMPOL(psSGXHostCtlMemInfo,
++ offsetof(SGXMKIF_HOST_CTL, ui32CleanupStatus),
++ PVRSRV_USSE_EDM_CLEANUPCMD_COMPLETE,
++ PVRSRV_USSE_EDM_CLEANUPCMD_COMPLETE,
++ PDUMP_POLL_OPERATOR_EQUAL,
++ 0,
++ MAKEUNIQUETAG(psSGXHostCtlMemInfo));
++ #endif
++
++ psSGXHostCtl->ui32CleanupStatus &= ~(PVRSRV_USSE_EDM_CLEANUPCMD_COMPLETE);
++ PDUMPMEM(IMG_NULL, psSGXHostCtlMemInfo, offsetof(SGXMKIF_HOST_CTL, ui32CleanupStatus), sizeof(IMG_UINT32), 0, MAKEUNIQUETAG(psSGXHostCtlMemInfo));
++
++#if defined(SGX_FEATURE_SYSTEM_CACHE)
++ psSGXDevInfo->ui32CacheControl |= (SGXMKIF_CC_INVAL_BIF_SL | SGXMKIF_CC_INVAL_DATA);
++#else
++ psSGXDevInfo->ui32CacheControl |= SGXMKIF_CC_INVAL_DATA;
++#endif
++ }
++}
++
++
++typedef struct _SGX_HW_RENDER_CONTEXT_CLEANUP_
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ IMG_DEV_VIRTADDR sHWRenderContextDevVAddr;
++ IMG_HANDLE hBlockAlloc;
++ PRESMAN_ITEM psResItem;
++} SGX_HW_RENDER_CONTEXT_CLEANUP;
++
++
++static PVRSRV_ERROR SGXCleanupHWRenderContextCallback(IMG_PVOID pvParam,
++ IMG_UINT32 ui32Param)
++{
++ SGX_HW_RENDER_CONTEXT_CLEANUP *psCleanup = pvParam;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++ SGXCleanupRequest(psCleanup->psDeviceNode,
++ &psCleanup->sHWRenderContextDevVAddr,
++ PVRSRV_CLEANUPCMD_RC);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(SGX_HW_RENDER_CONTEXT_CLEANUP),
++ psCleanup,
++ psCleanup->hBlockAlloc);
++
++
++ return PVRSRV_OK;
++}
++
++typedef struct _SGX_HW_TRANSFER_CONTEXT_CLEANUP_
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ IMG_DEV_VIRTADDR sHWTransferContextDevVAddr;
++ IMG_HANDLE hBlockAlloc;
++ PRESMAN_ITEM psResItem;
++} SGX_HW_TRANSFER_CONTEXT_CLEANUP;
++
++
++static PVRSRV_ERROR SGXCleanupHWTransferContextCallback(IMG_PVOID pvParam,
++ IMG_UINT32 ui32Param)
++{
++ SGX_HW_TRANSFER_CONTEXT_CLEANUP *psCleanup = (SGX_HW_TRANSFER_CONTEXT_CLEANUP *)pvParam;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++ SGXCleanupRequest(psCleanup->psDeviceNode,
++ &psCleanup->sHWTransferContextDevVAddr,
++ PVRSRV_CLEANUPCMD_TC);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(SGX_HW_TRANSFER_CONTEXT_CLEANUP),
++ psCleanup,
++ psCleanup->hBlockAlloc);
++
++
++ return PVRSRV_OK;
++}
++
++IMG_EXPORT
++IMG_HANDLE SGXRegisterHWRenderContextKM(IMG_HANDLE psDeviceNode,
++ IMG_DEV_VIRTADDR *psHWRenderContextDevVAddr,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hBlockAlloc;
++ SGX_HW_RENDER_CONTEXT_CLEANUP *psCleanup;
++ PRESMAN_ITEM psResItem;
++
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(SGX_HW_RENDER_CONTEXT_CLEANUP),
++ (IMG_VOID **)&psCleanup,
++ &hBlockAlloc,
++ "SGX Hardware Render Context Cleanup");
++
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWRenderContextKM: Couldn't allocate memory for SGX_HW_RENDER_CONTEXT_CLEANUP structure"));
++ return IMG_NULL;
++ }
++
++ psCleanup->hBlockAlloc = hBlockAlloc;
++ psCleanup->psDeviceNode = psDeviceNode;
++ psCleanup->sHWRenderContextDevVAddr = *psHWRenderContextDevVAddr;
++
++ psResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_HW_RENDER_CONTEXT,
++ (IMG_VOID *)psCleanup,
++ 0,
++ &SGXCleanupHWRenderContextCallback);
++
++ if (psResItem == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWRenderContextKM: ResManRegisterRes failed"));
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(SGX_HW_RENDER_CONTEXT_CLEANUP),
++ psCleanup,
++ psCleanup->hBlockAlloc);
++
++
++ return IMG_NULL;
++ }
++
++ psCleanup->psResItem = psResItem;
++
++ return (IMG_HANDLE)psCleanup;
++}
++
++IMG_EXPORT
++PVRSRV_ERROR SGXUnregisterHWRenderContextKM(IMG_HANDLE hHWRenderContext)
++{
++ PVRSRV_ERROR eError;
++ SGX_HW_RENDER_CONTEXT_CLEANUP *psCleanup;
++
++ PVR_ASSERT(hHWRenderContext != IMG_NULL);
++
++ psCleanup = (SGX_HW_RENDER_CONTEXT_CLEANUP *)hHWRenderContext;
++
++ if (psCleanup == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXUnregisterHWRenderContextKM: invalid parameter"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ eError = ResManFreeResByPtr(psCleanup->psResItem);
++
++ return eError;
++}
++
++
++IMG_EXPORT
++IMG_HANDLE SGXRegisterHWTransferContextKM(IMG_HANDLE psDeviceNode,
++ IMG_DEV_VIRTADDR *psHWTransferContextDevVAddr,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hBlockAlloc;
++ SGX_HW_TRANSFER_CONTEXT_CLEANUP *psCleanup;
++ PRESMAN_ITEM psResItem;
++
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(SGX_HW_TRANSFER_CONTEXT_CLEANUP),
++ (IMG_VOID **)&psCleanup,
++ &hBlockAlloc,
++ "SGX Hardware Transfer Context Cleanup");
++
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWTransferContextKM: Couldn't allocate memory for SGX_HW_TRANSFER_CONTEXT_CLEANUP structure"));
++ return IMG_NULL;
++ }
++
++ psCleanup->hBlockAlloc = hBlockAlloc;
++ psCleanup->psDeviceNode = psDeviceNode;
++ psCleanup->sHWTransferContextDevVAddr = *psHWTransferContextDevVAddr;
++
++ psResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_HW_TRANSFER_CONTEXT,
++ psCleanup,
++ 0,
++ &SGXCleanupHWTransferContextCallback);
++
++ if (psResItem == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWTransferContextKM: ResManRegisterRes failed"));
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(SGX_HW_TRANSFER_CONTEXT_CLEANUP),
++ psCleanup,
++ psCleanup->hBlockAlloc);
++
++
++ return IMG_NULL;
++ }
++
++ psCleanup->psResItem = psResItem;
++
++ return (IMG_HANDLE)psCleanup;
++}
++
++IMG_EXPORT
++PVRSRV_ERROR SGXUnregisterHWTransferContextKM(IMG_HANDLE hHWTransferContext)
++{
++ PVRSRV_ERROR eError;
++ SGX_HW_TRANSFER_CONTEXT_CLEANUP *psCleanup;
++
++ PVR_ASSERT(hHWTransferContext != IMG_NULL);
++
++ psCleanup = (SGX_HW_TRANSFER_CONTEXT_CLEANUP *)hHWTransferContext;
++
++ if (psCleanup == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXUnregisterHWTransferContextKM: invalid parameter"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ eError = ResManFreeResByPtr(psCleanup->psResItem);
++
++ return eError;
++}
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++typedef struct _SGX_HW_2D_CONTEXT_CLEANUP_
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ IMG_DEV_VIRTADDR sHW2DContextDevVAddr;
++ IMG_HANDLE hBlockAlloc;
++ PRESMAN_ITEM psResItem;
++} SGX_HW_2D_CONTEXT_CLEANUP;
++
++static PVRSRV_ERROR SGXCleanupHW2DContextCallback(IMG_PVOID pvParam, IMG_UINT32 ui32Param)
++{
++ SGX_HW_2D_CONTEXT_CLEANUP *psCleanup = (SGX_HW_2D_CONTEXT_CLEANUP *)pvParam;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++ SGXCleanupRequest(psCleanup->psDeviceNode,
++ &psCleanup->sHW2DContextDevVAddr,
++ PVRSRV_CLEANUPCMD_2DC);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(SGX_HW_2D_CONTEXT_CLEANUP),
++ psCleanup,
++ psCleanup->hBlockAlloc);
++
++
++ return PVRSRV_OK;
++}
++
++IMG_EXPORT
++IMG_HANDLE SGXRegisterHW2DContextKM(IMG_HANDLE psDeviceNode,
++ IMG_DEV_VIRTADDR *psHW2DContextDevVAddr,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hBlockAlloc;
++ SGX_HW_2D_CONTEXT_CLEANUP *psCleanup;
++ PRESMAN_ITEM psResItem;
++
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(SGX_HW_2D_CONTEXT_CLEANUP),
++ (IMG_VOID **)&psCleanup,
++ &hBlockAlloc,
++ "SGX Hardware 2D Context Cleanup");
++
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHW2DContextKM: Couldn't allocate memory for SGX_HW_2D_CONTEXT_CLEANUP structure"));
++ return IMG_NULL;
++ }
++
++ psCleanup->hBlockAlloc = hBlockAlloc;
++ psCleanup->psDeviceNode = psDeviceNode;
++ psCleanup->sHW2DContextDevVAddr = *psHW2DContextDevVAddr;
++
++ psResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_HW_2D_CONTEXT,
++ psCleanup,
++ 0,
++ &SGXCleanupHW2DContextCallback);
++
++ if (psResItem == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHW2DContextKM: ResManRegisterRes failed"));
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(SGX_HW_2D_CONTEXT_CLEANUP),
++ psCleanup,
++ psCleanup->hBlockAlloc);
++
++
++ return IMG_NULL;
++ }
++
++ psCleanup->psResItem = psResItem;
++
++ return (IMG_HANDLE)psCleanup;
++}
++
++IMG_EXPORT
++PVRSRV_ERROR SGXUnregisterHW2DContextKM(IMG_HANDLE hHW2DContext)
++{
++ PVRSRV_ERROR eError;
++ SGX_HW_2D_CONTEXT_CLEANUP *psCleanup;
++
++ PVR_ASSERT(hHW2DContext != IMG_NULL);
++
++ if (hHW2DContext == IMG_NULL)
++ {
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++
++ psCleanup = (SGX_HW_2D_CONTEXT_CLEANUP *)hHW2DContext;
++
++ eError = ResManFreeResByPtr(psCleanup->psResItem);
++
++ return eError;
++}
++#endif
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(SGX2DQuerySyncOpsComplete)
++#endif
++static INLINE
++IMG_BOOL SGX2DQuerySyncOpsComplete(PVRSRV_KERNEL_SYNC_INFO *psSyncInfo,
++ IMG_UINT32 ui32ReadOpsPending,
++ IMG_UINT32 ui32WriteOpsPending)
++{
++ PVRSRV_SYNC_DATA *psSyncData = psSyncInfo->psSyncData;
++
++ return (IMG_BOOL)(
++ (psSyncData->ui32ReadOpsComplete >= ui32ReadOpsPending) &&
++ (psSyncData->ui32WriteOpsComplete >= ui32WriteOpsPending)
++ );
++}
++
++IMG_EXPORT
++PVRSRV_ERROR SGX2DQueryBlitsCompleteKM(PVRSRV_SGXDEV_INFO *psDevInfo,
++ PVRSRV_KERNEL_SYNC_INFO *psSyncInfo,
++ IMG_BOOL bWaitForComplete)
++{
++ IMG_UINT32 ui32ReadOpsPending, ui32WriteOpsPending;
++
++ PVR_UNREFERENCED_PARAMETER(psDevInfo);
++
++ PVR_DPF((PVR_DBG_CALLTRACE, "SGX2DQueryBlitsCompleteKM: Start"));
++
++ ui32ReadOpsPending = psSyncInfo->psSyncData->ui32ReadOpsPending;
++ ui32WriteOpsPending = psSyncInfo->psSyncData->ui32WriteOpsPending;
++
++ if(SGX2DQuerySyncOpsComplete(psSyncInfo, ui32ReadOpsPending, ui32WriteOpsPending))
++ {
++
++ PVR_DPF((PVR_DBG_CALLTRACE, "SGX2DQueryBlitsCompleteKM: No wait. Blits complete."));
++ return PVRSRV_OK;
++ }
++
++
++ if (!bWaitForComplete)
++ {
++
++ PVR_DPF((PVR_DBG_CALLTRACE, "SGX2DQueryBlitsCompleteKM: No wait. Ops pending."));
++ return PVRSRV_ERROR_CMD_NOT_PROCESSED;
++ }
++
++
++ PVR_DPF((PVR_DBG_MESSAGE, "SGX2DQueryBlitsCompleteKM: Ops pending. Start polling."));
++
++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
++ {
++ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
++
++ if(SGX2DQuerySyncOpsComplete(psSyncInfo, ui32ReadOpsPending, ui32WriteOpsPending))
++ {
++
++ PVR_DPF((PVR_DBG_CALLTRACE, "SGX2DQueryBlitsCompleteKM: Wait over. Blits complete."));
++ return PVRSRV_OK;
++ }
++
++ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
++ } END_LOOP_UNTIL_TIMEOUT();
++
++
++ PVR_DPF((PVR_DBG_ERROR,"SGX2DQueryBlitsCompleteKM: Timed out. Ops pending."));
++
++#if defined(DEBUG)
++ {
++ PVRSRV_SYNC_DATA *psSyncData = psSyncInfo->psSyncData;
++
++ PVR_TRACE(("SGX2DQueryBlitsCompleteKM: Syncinfo: %p, Syncdata: %p", psSyncInfo, psSyncData));
++
++ PVR_TRACE(("SGX2DQueryBlitsCompleteKM: Read ops complete: %d, Read ops pending: %d", psSyncData->ui32ReadOpsComplete, psSyncData->ui32ReadOpsPending));
++ PVR_TRACE(("SGX2DQueryBlitsCompleteKM: Write ops complete: %d, Write ops pending: %d", psSyncData->ui32WriteOpsComplete, psSyncData->ui32WriteOpsPending));
++
++ }
++#endif
++
++ return PVRSRV_ERROR_TIMEOUT;
++}
++
++
++IMG_EXPORT
++IMG_VOID SGXFlushHWRenderTargetKM(IMG_HANDLE psDeviceNode, IMG_DEV_VIRTADDR sHWRTDataSetDevVAddr)
++{
++ PVR_ASSERT(sHWRTDataSetDevVAddr.uiAddr != IMG_NULL);
++
++ SGXCleanupRequest(psDeviceNode,
++ &sHWRTDataSetDevVAddr,
++ PVRSRV_CLEANUPCMD_RT);
++}
++
++
++IMG_UINT32 SGXConvertTimeStamp(PVRSRV_SGXDEV_INFO *psDevInfo,
++ IMG_UINT32 ui32TimeWraps,
++ IMG_UINT32 ui32Time)
++{
++#if defined(EUR_CR_TIMER)
++ PVR_UNREFERENCED_PARAMETER(psDevInfo);
++ PVR_UNREFERENCED_PARAMETER(ui32TimeWraps);
++ return ui32Time;
++#else
++ IMG_UINT64 ui64Clocks;
++ IMG_UINT32 ui32Clocksx16;
++
++ ui64Clocks = ((IMG_UINT64)ui32TimeWraps * psDevInfo->ui32uKernelTimerClock) +
++ (psDevInfo->ui32uKernelTimerClock - (ui32Time & EUR_CR_EVENT_TIMER_VALUE_MASK));
++ ui32Clocksx16 = (IMG_UINT32)(ui64Clocks / 16);
++
++ return ui32Clocksx16;
++#endif
++}
++
++
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/devices/sgx/sgxutils.h
+@@ -0,0 +1,99 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "perproc.h"
++#include "sgxinfokm.h"
++
++#define CCB_OFFSET_IS_VALID(type, psCCBMemInfo, psCCBKick, offset) \
++ ((sizeof(type) <= (psCCBMemInfo)->ui32AllocSize) && \
++ ((psCCBKick)->offset <= (psCCBMemInfo)->ui32AllocSize - sizeof(type)))
++
++#define CCB_DATA_FROM_OFFSET(type, psCCBMemInfo, psCCBKick, offset) \
++ ((type *)(((IMG_CHAR *)(psCCBMemInfo)->pvLinAddrKM) + \
++ (psCCBKick)->offset))
++
++
++IMG_IMPORT
++IMG_VOID SGXTestActivePowerEvent(PVRSRV_DEVICE_NODE *psDeviceNode,
++ IMG_UINT32 ui32CallerID);
++
++IMG_IMPORT
++PVRSRV_ERROR SGXScheduleCCBCommand(PVRSRV_SGXDEV_INFO *psDevInfo,
++ SGXMKIF_CMD_TYPE eCommandType,
++ SGXMKIF_COMMAND *psCommandData,
++ IMG_UINT32 ui32CallerID,
++ IMG_UINT32 ui32PDumpFlags);
++IMG_IMPORT
++PVRSRV_ERROR SGXScheduleCCBCommandKM(PVRSRV_DEVICE_NODE *psDeviceNode,
++ SGXMKIF_CMD_TYPE eCommandType,
++ SGXMKIF_COMMAND *psCommandData,
++ IMG_UINT32 ui32CallerID,
++ IMG_UINT32 ui32PDumpFlags);
++
++IMG_IMPORT
++PVRSRV_ERROR SGXScheduleProcessQueuesKM(PVRSRV_DEVICE_NODE *psDeviceNode);
++
++IMG_IMPORT
++IMG_BOOL SGXIsDevicePowered(PVRSRV_DEVICE_NODE *psDeviceNode);
++
++IMG_IMPORT
++IMG_HANDLE SGXRegisterHWRenderContextKM(IMG_HANDLE psDeviceNode,
++ IMG_DEV_VIRTADDR *psHWRenderContextDevVAddr,
++ PVRSRV_PER_PROCESS_DATA *psPerProc);
++
++IMG_IMPORT
++IMG_HANDLE SGXRegisterHWTransferContextKM(IMG_HANDLE psDeviceNode,
++ IMG_DEV_VIRTADDR *psHWTransferContextDevVAddr,
++ PVRSRV_PER_PROCESS_DATA *psPerProc);
++
++IMG_IMPORT
++IMG_VOID SGXFlushHWRenderTargetKM(IMG_HANDLE psSGXDevInfo, IMG_DEV_VIRTADDR psHWRTDataSetDevVAddr);
++
++IMG_IMPORT
++PVRSRV_ERROR SGXUnregisterHWRenderContextKM(IMG_HANDLE hHWRenderContext);
++
++IMG_IMPORT
++PVRSRV_ERROR SGXUnregisterHWTransferContextKM(IMG_HANDLE hHWTransferContext);
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++IMG_IMPORT
++IMG_HANDLE SGXRegisterHW2DContextKM(IMG_HANDLE psDeviceNode,
++ IMG_DEV_VIRTADDR *psHW2DContextDevVAddr,
++ PVRSRV_PER_PROCESS_DATA *psPerProc);
++
++IMG_IMPORT
++PVRSRV_ERROR SGXUnregisterHW2DContextKM(IMG_HANDLE hHW2DContext);
++#endif
++
++IMG_UINT32 SGXConvertTimeStamp(PVRSRV_SGXDEV_INFO *psDevInfo,
++ IMG_UINT32 ui32TimeWraps,
++ IMG_UINT32 ui32Time);
++
++IMG_VOID SGXCleanupRequest(PVRSRV_DEVICE_NODE *psDeviceNode,
++ IMG_DEV_VIRTADDR *psHWDataDevVAddr,
++ IMG_UINT32 ui32CleanupType);
++
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/env/linux-intel/env_data.h
+@@ -0,0 +1,66 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _ENV_DATA_
++#define _ENV_DATA_
++
++#include <linux/interrupt.h>
++#include <linux/pci.h>
++
++#if defined(PVR_LINUX_MISR_USING_WORKQUEUE) || defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE)
++#include <linux/workqueue.h>
++#endif
++
++#define PVRSRV_MAX_BRIDGE_IN_SIZE 0x1000
++#define PVRSRV_MAX_BRIDGE_OUT_SIZE 0x1000
++
++typedef struct _PVR_PCI_DEV_TAG
++{
++ struct pci_dev *psPCIDev;
++ HOST_PCI_INIT_FLAGS ePCIFlags;
++ IMG_BOOL abPCIResourceInUse[DEVICE_COUNT_RESOURCE];
++} PVR_PCI_DEV;
++
++typedef struct _ENV_DATA_TAG
++{
++ IMG_VOID *pvBridgeData;
++ struct pm_dev *psPowerDevice;
++ IMG_BOOL bLISRInstalled;
++ IMG_BOOL bMISRInstalled;
++ IMG_UINT32 ui32IRQ;
++ IMG_VOID *pvISRCookie;
++#if defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE)
++ struct workqueue_struct *psWorkQueue;
++#endif
++#if defined(PVR_LINUX_MISR_USING_WORKQUEUE) || defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE)
++ struct work_struct sMISRWork;
++ IMG_VOID *pvMISRData;
++#else
++ struct tasklet_struct sMISRTasklet;
++#endif
++} ENV_DATA;
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/env/linux-intel/env_perproc.h
+@@ -0,0 +1,56 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __ENV_PERPROC_H__
++#define __ENV_PERPROC_H__
++
++#include <linux/list.h>
++#include <linux/proc_fs.h>
++
++#include "services.h"
++#include "handle.h"
++
++typedef struct _PVRSRV_ENV_PER_PROCESS_DATA_
++{
++ IMG_HANDLE hBlockAlloc;
++ struct proc_dir_entry *psProcDir;
++#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT)
++ struct list_head sDRMAuthListHead;
++#endif
++} PVRSRV_ENV_PER_PROCESS_DATA;
++
++IMG_VOID RemovePerProcessProcDir(PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc);
++
++PVRSRV_ERROR LinuxMMapPerProcessConnect(PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc);
++
++IMG_VOID LinuxMMapPerProcessDisconnect(PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc);
++
++PVRSRV_ERROR LinuxMMapPerProcessHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase);
++
++IMG_HANDLE LinuxTerminatingProcessPrivateData(IMG_VOID);
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/env/linux-intel/event.c
+@@ -0,0 +1,270 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef AUTOCONF_INCLUDED
++ #include <linux/config.h>
++#endif
++
++#include <linux/version.h>
++#include <asm/io.h>
++#include <asm/page.h>
++#include <asm/system.h>
++#include <linux/mm.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/delay.h>
++#include <linux/pci.h>
++
++#include <linux/string.h>
++#include <linux/sched.h>
++#include <linux/interrupt.h>
++#include <asm/hardirq.h>
++#include <linux/timer.h>
++#include <linux/capability.h>
++#include <linux/sched.h>
++#include <asm/uaccess.h>
++
++#include "img_types.h"
++#include "services_headers.h"
++#include "mm.h"
++#include "pvrmmap.h"
++#include "mmap.h"
++#include "env_data.h"
++#include "proc.h"
++#include "lock.h"
++
++typedef struct PVRSRV_LINUX_EVENT_OBJECT_LIST_TAG
++{
++ rwlock_t sLock;
++ struct list_head sList;
++
++} PVRSRV_LINUX_EVENT_OBJECT_LIST;
++
++
++typedef struct PVRSRV_LINUX_EVENT_OBJECT_TAG
++{
++ atomic_t sTimeStamp;
++ IMG_UINT32 ui32TimeStampPrevious;
++#if defined(DEBUG)
++ IMG_UINT ui32Stats;
++#endif
++ wait_queue_head_t sWait;
++ struct list_head sList;
++ IMG_HANDLE hResItem;
++ PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList;
++} PVRSRV_LINUX_EVENT_OBJECT;
++
++PVRSRV_ERROR LinuxEventObjectListCreate(IMG_HANDLE *phEventObjectList)
++{
++ PVRSRV_LINUX_EVENT_OBJECT_LIST *psEvenObjectList;
++
++ if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(PVRSRV_LINUX_EVENT_OBJECT_LIST),
++ (IMG_VOID **)&psEvenObjectList, IMG_NULL,
++ "Linux Event Object List") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectCreate: failed to allocate memory for event list"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ INIT_LIST_HEAD(&psEvenObjectList->sList);
++
++ rwlock_init(&psEvenObjectList->sLock);
++
++ *phEventObjectList = (IMG_HANDLE *) psEvenObjectList;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR LinuxEventObjectListDestroy(IMG_HANDLE hEventObjectList)
++{
++
++ PVRSRV_LINUX_EVENT_OBJECT_LIST *psEvenObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST *) hEventObjectList ;
++
++ if(psEvenObjectList)
++ {
++ if (!list_empty(&psEvenObjectList->sList))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectListDestroy: Event List is not empty"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(PVRSRV_LINUX_EVENT_OBJECT_LIST), psEvenObjectList, IMG_NULL);
++
++ }
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR LinuxEventObjectDelete(IMG_HANDLE hOSEventObjectList, IMG_HANDLE hOSEventObject)
++{
++ if(hOSEventObjectList)
++ {
++ if(hOSEventObject)
++ {
++ PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *)hOSEventObject;
++#if defined(DEBUG)
++ PVR_DPF((PVR_DBG_MESSAGE, "LinuxEventObjectListDelete: Event object waits: %lu", psLinuxEventObject->ui32Stats));
++#endif
++ if(ResManFreeResByPtr(psLinuxEventObject->hResItem) != PVRSRV_OK)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ return PVRSRV_OK;
++ }
++ }
++ return PVRSRV_ERROR_GENERIC;
++
++}
++
++static PVRSRV_ERROR LinuxEventObjectDeleteCallback(IMG_PVOID pvParam, IMG_UINT32 ui32Param)
++{
++ PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = pvParam;
++ PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = psLinuxEventObject->psLinuxEventObjectList;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++ write_lock_bh(&psLinuxEventObjectList->sLock);
++ list_del(&psLinuxEventObject->sList);
++ write_unlock_bh(&psLinuxEventObjectList->sLock);
++
++#if defined(DEBUG)
++ PVR_DPF((PVR_DBG_MESSAGE, "LinuxEventObjectDeleteCallback: Event object waits: %lu", psLinuxEventObject->ui32Stats));
++#endif
++
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(PVRSRV_LINUX_EVENT_OBJECT), psLinuxEventObject, IMG_NULL);
++
++
++ return PVRSRV_OK;
++}
++PVRSRV_ERROR LinuxEventObjectAdd(IMG_HANDLE hOSEventObjectList, IMG_HANDLE *phOSEventObject)
++ {
++ PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject;
++ PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST*)hOSEventObjectList;
++ IMG_UINT32 ui32PID = OSGetCurrentProcessIDKM();
++ PVRSRV_PER_PROCESS_DATA *psPerProc;
++
++ psPerProc = PVRSRVPerProcessData(ui32PID);
++ if (psPerProc == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectAdd: Couldn't find per-process data"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++
++ if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(PVRSRV_LINUX_EVENT_OBJECT),
++ (IMG_VOID **)&psLinuxEventObject, IMG_NULL,
++ "Linux Event Object") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectAdd: failed to allocate memory "));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ INIT_LIST_HEAD(&psLinuxEventObject->sList);
++
++ atomic_set(&psLinuxEventObject->sTimeStamp, 0);
++ psLinuxEventObject->ui32TimeStampPrevious = 0;
++
++#if defined(DEBUG)
++ psLinuxEventObject->ui32Stats = 0;
++#endif
++ init_waitqueue_head(&psLinuxEventObject->sWait);
++
++ psLinuxEventObject->psLinuxEventObjectList = psLinuxEventObjectList;
++
++ psLinuxEventObject->hResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_EVENT_OBJECT,
++ psLinuxEventObject,
++ 0,
++ &LinuxEventObjectDeleteCallback);
++
++ write_lock_bh(&psLinuxEventObjectList->sLock);
++ list_add(&psLinuxEventObject->sList, &psLinuxEventObjectList->sList);
++ write_unlock_bh(&psLinuxEventObjectList->sLock);
++
++ *phOSEventObject = psLinuxEventObject;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR LinuxEventObjectSignal(IMG_HANDLE hOSEventObjectList)
++{
++ PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject;
++ PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST*)hOSEventObjectList;
++ struct list_head *psListEntry, *psListEntryTemp, *psList;
++ psList = &psLinuxEventObjectList->sList;
++
++ list_for_each_safe(psListEntry, psListEntryTemp, psList)
++ {
++
++ psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *)list_entry(psListEntry, PVRSRV_LINUX_EVENT_OBJECT, sList);
++
++ atomic_inc(&psLinuxEventObject->sTimeStamp);
++ wake_up_interruptible(&psLinuxEventObject->sWait);
++ }
++
++ return PVRSRV_OK;
++
++}
++
++PVRSRV_ERROR LinuxEventObjectWait(IMG_HANDLE hOSEventObject, IMG_UINT32 ui32MSTimeout)
++{
++ IMG_UINT32 ui32TimeStamp;
++ DEFINE_WAIT(sWait);
++
++ PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *) hOSEventObject;
++
++ IMG_UINT32 ui32TimeOutJiffies = msecs_to_jiffies(ui32MSTimeout);
++
++ do
++ {
++ prepare_to_wait(&psLinuxEventObject->sWait, &sWait, TASK_INTERRUPTIBLE);
++ ui32TimeStamp = atomic_read(&psLinuxEventObject->sTimeStamp);
++
++ if(psLinuxEventObject->ui32TimeStampPrevious != ui32TimeStamp)
++ {
++ break;
++ }
++
++ mutex_unlock(&gPVRSRVLock);
++
++ ui32TimeOutJiffies = (IMG_UINT32)schedule_timeout((IMG_INT32)ui32TimeOutJiffies);
++
++ mutex_lock(&gPVRSRVLock);
++#if defined(DEBUG)
++ psLinuxEventObject->ui32Stats++;
++#endif
++
++
++ } while (ui32TimeOutJiffies);
++
++ finish_wait(&psLinuxEventObject->sWait, &sWait);
++
++ psLinuxEventObject->ui32TimeStampPrevious = ui32TimeStamp;
++
++ return ui32TimeOutJiffies ? PVRSRV_OK : PVRSRV_ERROR_TIMEOUT;
++
++}
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/env/linux-intel/event.h
+@@ -0,0 +1,32 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++PVRSRV_ERROR LinuxEventObjectListCreate(IMG_HANDLE *phEventObjectList);
++PVRSRV_ERROR LinuxEventObjectListDestroy(IMG_HANDLE hEventObjectList);
++PVRSRV_ERROR LinuxEventObjectAdd(IMG_HANDLE hOSEventObjectList, IMG_HANDLE *phOSEventObject);
++PVRSRV_ERROR LinuxEventObjectDelete(IMG_HANDLE hOSEventObjectList, IMG_HANDLE hOSEventObject);
++PVRSRV_ERROR LinuxEventObjectSignal(IMG_HANDLE hOSEventObjectList);
++PVRSRV_ERROR LinuxEventObjectWait(IMG_HANDLE hOSEventObject, IMG_UINT32 ui32MSTimeout);
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/env/linux-intel/linkage.h
+@@ -0,0 +1,61 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __LINKAGE_H__
++#define __LINKAGE_H__
++
++#if !defined(SUPPORT_DRI_DRM)
++IMG_INT32 PVRSRV_BridgeDispatchKM(struct file *file, IMG_UINT cmd, IMG_UINT32 arg);
++#endif
++
++IMG_VOID PVRDPFInit(IMG_VOID);
++PVRSRV_ERROR PVROSFuncInit(IMG_VOID);
++IMG_VOID PVROSFuncDeInit(IMG_VOID);
++
++#ifdef DEBUG
++IMG_INT PVRDebugProcSetLevel(struct file *file, const IMG_CHAR *buffer, IMG_UINT32 count, IMG_VOID *data);
++IMG_VOID PVRDebugSetLevel(IMG_UINT32 uDebugLevel);
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++void ProcSeqShowDebugLevel(struct seq_file *sfile,void* el);
++#else
++IMG_INT PVRDebugProcGetLevel(IMG_CHAR *page, IMG_CHAR **start, off_t off, IMG_INT count, IMG_INT *eof, IMG_VOID *data);
++#endif
++
++#ifdef PVR_MANUAL_POWER_CONTROL
++IMG_INT PVRProcSetPowerLevel(struct file *file, const IMG_CHAR *buffer, IMG_UINT32 count, IMG_VOID *data);
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++void ProcSeqShowPowerLevel(struct seq_file *sfile,void* el);
++#else
++IMG_INT PVRProcGetPowerLevel(IMG_CHAR *page, IMG_CHAR **start, off_t off, IMG_INT count, IMG_INT *eof, IMG_VOID *data);
++#endif
++
++
++#endif
++#endif
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/env/linux-intel/lock.h
+@@ -0,0 +1,34 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __LOCK_H__
++#define __LOCK_H__
++
++#include <linux/mutex.h>
++
++extern struct mutex gPVRSRVLock;
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/env/linux-intel/mm.c
+@@ -0,0 +1,570 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <linux/mm.h>
++#include <linux/vmalloc.h>
++#include <asm/io.h>
++#include <linux/slab.h>
++#include <linux/highmem.h>
++#include <linux/sched.h>
++#include <linux/mutex.h>
++
++#include "img_defs.h"
++#include "services.h"
++#include "servicesint.h"
++#include "syscommon.h"
++#include "mutils.h"
++#include "mm.h"
++#include "pvrmmap.h"
++#include "mmap.h"
++#include "osfunc.h"
++#include "pvr_debug.h"
++#include "proc.h"
++#include "lock.h"
++
++
++static struct kmem_cache *linux_mem_area_cache;
++
++int linux_mm_init(void)
++{
++ linux_mem_area_cache = kmem_cache_create("img-mm", sizeof(LinuxMemArea), 0, 0, NULL);
++
++ if (!linux_mem_area_cache)
++ {
++ pr_err("%s: failed to allocate kmem_cache", __FUNCTION__);
++ return -ENOMEM;
++ }
++ return 0;
++}
++
++void linux_mm_cleanup(void)
++{
++ if (linux_mem_area_cache)
++ {
++ kmem_cache_destroy(linux_mem_area_cache);
++ linux_mem_area_cache = NULL;
++ }
++}
++
++void *vmalloc_wrapper(u32 bytes, u32 alloc_flags)
++{
++ /*
++ * FIXME: This function creates a memory alias
++ * of a page, with a mismatching PAT type.
++ * This wants to be fixed most likely.
++ */
++ pgprot_t pgprot_flags;
++
++ switch(alloc_flags & PVRSRV_HAP_CACHETYPE_MASK)
++ {
++ case PVRSRV_HAP_CACHED:
++ pgprot_flags = PAGE_KERNEL;
++ break;
++ case PVRSRV_HAP_WRITECOMBINE:
++ pgprot_flags = PGPROT_WC(PAGE_KERNEL);
++ break;
++ case PVRSRV_HAP_UNCACHED:
++ pgprot_flags = PGPROT_UC(PAGE_KERNEL);
++ break;
++ default:
++ WARN(1, "unknown mapping flags=0x%08x", alloc_flags);
++ return NULL;
++ }
++
++
++ return __vmalloc(bytes, GFP_KERNEL | __GFP_HIGHMEM, pgprot_flags);
++}
++
++LinuxMemArea *vmalloc_linux_mem_area(u32 bytes, u32 area_flags)
++{
++ LinuxMemArea *mem_area;
++ void *vptr;
++
++ mem_area = kmem_cache_alloc(linux_mem_area_cache, GFP_KERNEL);
++
++ if (!mem_area)
++ goto failed;
++
++ vptr = vmalloc_wrapper(bytes, area_flags);
++ if (!vptr)
++ goto failed;
++
++ mem_area->eAreaType = LINUX_MEM_AREA_VMALLOC;
++ mem_area->uData.sVmalloc.pvVmallocAddress = vptr;
++ mem_area->ui32ByteSize = bytes;
++ mem_area->ui32AreaFlags = area_flags;
++ mem_area->bMMapRegistered = IMG_FALSE;
++ INIT_LIST_HEAD(&mem_area->sMMapOffsetStructList);
++
++ return mem_area;
++
++failed:
++ pr_err("%s: failed!", __FUNCTION__);
++ if (mem_area)
++ kmem_cache_free(linux_mem_area_cache, mem_area);
++ return NULL;
++}
++
++
++void __iomem *ioremap_wrapper(resource_size_t address,
++ u32 bytes, u32 mapping_flags)
++{
++ void __iomem *cookie;
++
++ switch(mapping_flags & PVRSRV_HAP_CACHETYPE_MASK)
++ {
++ case PVRSRV_HAP_CACHED:
++ cookie = ioremap_cache(address, bytes);
++ break;
++ case PVRSRV_HAP_WRITECOMBINE:
++ cookie = ioremap_wc(address, bytes);
++ break;
++ case PVRSRV_HAP_UNCACHED:
++ cookie = ioremap_nocache(address, bytes);
++ break;
++ default:
++ pr_err("ioremap_wrapper: unknown mapping flags");
++ return NULL;
++ }
++
++ return cookie;
++}
++
++LinuxMemArea *
++ioremap_linux_mem_area(resource_size_t address, u32 bytes, u32 area_flags)
++{
++ LinuxMemArea *mem_area;
++ void __iomem *cookie;
++
++ mem_area = kmem_cache_alloc(linux_mem_area_cache, GFP_KERNEL);
++ if (!mem_area)
++ return NULL;
++
++ cookie = ioremap_wrapper(address, bytes, area_flags);
++ if (!cookie)
++ {
++ kmem_cache_free(linux_mem_area_cache, mem_area);
++ return NULL;
++ }
++
++ mem_area->eAreaType = LINUX_MEM_AREA_IOREMAP;
++ mem_area->uData.sIORemap.pvIORemapCookie = cookie;
++ mem_area->uData.sIORemap.CPUPhysAddr = address;
++ mem_area->ui32ByteSize = bytes;
++ mem_area->ui32AreaFlags = area_flags;
++ mem_area->bMMapRegistered = IMG_FALSE;
++ INIT_LIST_HEAD(&mem_area->sMMapOffsetStructList);
++
++ return mem_area;
++}
++
++
++static IMG_BOOL
++TreatExternalPagesAsContiguous(IMG_SYS_PHYADDR *psSysPhysAddr, u32 ui32Bytes, IMG_BOOL bPhysContig)
++{
++ u32 ui32;
++ u32 ui32AddrChk;
++ u32 ui32NumPages = RANGE_TO_PAGES(ui32Bytes);
++
++ for (ui32 = 0, ui32AddrChk = psSysPhysAddr[0].uiAddr;
++ ui32 < ui32NumPages;
++ ui32++, ui32AddrChk = (bPhysContig) ? (ui32AddrChk + PAGE_SIZE) : psSysPhysAddr[ui32].uiAddr)
++ {
++ if (!pfn_valid(PHYS_TO_PFN(ui32AddrChk)))
++ {
++ break;
++ }
++ }
++ if (ui32 == ui32NumPages)
++ {
++ return IMG_FALSE;
++ }
++
++ if (!bPhysContig)
++ {
++ for (ui32 = 0, ui32AddrChk = psSysPhysAddr[0].uiAddr;
++ ui32 < ui32NumPages;
++ ui32++, ui32AddrChk += PAGE_SIZE)
++ {
++ if (psSysPhysAddr[ui32].uiAddr != ui32AddrChk)
++ {
++ return IMG_FALSE;
++ }
++ }
++ }
++
++ return IMG_TRUE;
++}
++
++
++LinuxMemArea *NewExternalKVLinuxMemArea(IMG_SYS_PHYADDR *pBasePAddr, void *pvCPUVAddr, u32 bytes, IMG_BOOL bPhysContig, u32 area_flags)
++{
++ LinuxMemArea *mem_area;
++
++ mem_area = kmem_cache_alloc(linux_mem_area_cache, GFP_KERNEL);
++
++ if (!mem_area)
++ return NULL;
++
++ mem_area->eAreaType = LINUX_MEM_AREA_EXTERNAL_KV;
++ mem_area->uData.sExternalKV.pvExternalKV = pvCPUVAddr;
++ mem_area->uData.sExternalKV.bPhysContig = (IMG_BOOL)(bPhysContig || TreatExternalPagesAsContiguous(pBasePAddr, bytes, bPhysContig));
++
++ if (mem_area->uData.sExternalKV.bPhysContig)
++ mem_area->uData.sExternalKV.uPhysAddr.SysPhysAddr = *pBasePAddr;
++ else
++ mem_area->uData.sExternalKV.uPhysAddr.pSysPhysAddr = pBasePAddr;
++
++ mem_area->ui32ByteSize = bytes;
++ mem_area->ui32AreaFlags = area_flags;
++ mem_area->bMMapRegistered = IMG_FALSE;
++ INIT_LIST_HEAD(&mem_area->sMMapOffsetStructList);
++
++ return mem_area;
++}
++
++LinuxMemArea *
++NewIOLinuxMemArea(resource_size_t address, u32 bytes, u32 area_flags)
++{
++ LinuxMemArea *mem_area;
++
++ mem_area = kmem_cache_alloc(linux_mem_area_cache, GFP_KERNEL);
++
++ if (!mem_area)
++ return NULL;
++
++ mem_area->eAreaType = LINUX_MEM_AREA_IO;
++ mem_area->uData.sIO.CPUPhysAddr = address;
++ mem_area->ui32ByteSize = bytes;
++ mem_area->ui32AreaFlags = area_flags;
++ mem_area->bMMapRegistered = IMG_FALSE;
++ INIT_LIST_HEAD(&mem_area->sMMapOffsetStructList);
++
++ return mem_area;
++}
++
++
++LinuxMemArea * alloc_pages_linux_mem_area(u32 bytes, u32 area_flags)
++{
++ LinuxMemArea *mem_area;
++ int page_count;
++ struct page **page_list;
++ IMG_HANDLE hBlockPageList;
++ int i;
++ PVRSRV_ERROR eError;
++
++ mem_area = kmem_cache_alloc(linux_mem_area_cache, GFP_KERNEL);
++ if (!mem_area)
++ goto failed_area_alloc;
++
++
++ page_count = RANGE_TO_PAGES(bytes);
++
++ eError = OSAllocMem(0, sizeof(*page_list) * page_count, (void **)&page_list, &hBlockPageList,
++ "Array of pages");
++
++ if(eError != PVRSRV_OK)
++ goto failed_page_list_alloc;
++
++ for(i=0; i<page_count; i++)
++ {
++ page_list[i] = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, 0);
++ if(!page_list[i])
++ goto failed_alloc_pages;
++ }
++
++ mem_area->eAreaType = LINUX_MEM_AREA_ALLOC_PAGES;
++ mem_area->uData.sPageList.pvPageList = page_list;
++ mem_area->uData.sPageList.hBlockPageList = hBlockPageList;
++ mem_area->ui32ByteSize = bytes;
++ mem_area->ui32AreaFlags = area_flags;
++ mem_area->bMMapRegistered = IMG_FALSE;
++ INIT_LIST_HEAD(&mem_area->sMMapOffsetStructList);
++
++ return mem_area;
++
++failed_alloc_pages:
++ for(i--; i >= 0; i--)
++ __free_pages(page_list[i], 0);
++
++ OSFreeMem(0, sizeof(*page_list) * page_count, page_list, hBlockPageList);
++ mem_area->uData.sPageList.pvPageList = NULL;
++failed_page_list_alloc:
++ kmem_cache_free(linux_mem_area_cache, mem_area);
++failed_area_alloc:
++ pr_debug("%s: failed", __FUNCTION__);
++
++ return NULL;
++}
++
++
++void free_pages_linux_mem_area(LinuxMemArea *mem_area)
++{
++ u32 page_count;
++ struct page **page_list;
++ IMG_HANDLE hBlockPageList;
++ int i;
++
++ BUG_ON(!mem_area);
++
++
++ page_count = RANGE_TO_PAGES(mem_area->ui32ByteSize);
++ page_list = mem_area->uData.sPageList.pvPageList;
++ hBlockPageList = mem_area->uData.sPageList.hBlockPageList;
++
++ for(i = 0; i < page_count; i++)
++ __free_pages(page_list[i], 0);
++
++
++ OSFreeMem(0, sizeof(*page_list) * page_count, page_list, hBlockPageList);
++ mem_area->uData.sPageList.pvPageList = NULL;
++}
++
++
++struct page* LinuxMemAreaOffsetToPage(LinuxMemArea *mem_area, u32 offset)
++{
++ u32 page_index;
++ u8 *addr;
++
++ switch(mem_area->eAreaType)
++ {
++ case LINUX_MEM_AREA_ALLOC_PAGES:
++ page_index = PHYS_TO_PFN(offset);
++ return mem_area->uData.sPageList.pvPageList[page_index];
++
++ case LINUX_MEM_AREA_VMALLOC:
++ addr = mem_area->uData.sVmalloc.pvVmallocAddress;
++ addr += offset;
++ return vmalloc_to_page(addr);
++
++ case LINUX_MEM_AREA_SUB_ALLOC:
++ return LinuxMemAreaOffsetToPage(mem_area->uData.sSubAlloc.psParentLinuxMemArea,
++ mem_area->uData.sSubAlloc.ui32ByteOffset
++ + offset);
++ default:
++ pr_err("%s: Unsupported request for struct page from LinuxMemArea with type=%s",
++ __FUNCTION__, LinuxMemAreaTypeToString(mem_area->eAreaType));
++ return NULL;
++ }
++}
++
++
++LinuxMemArea *NewSubLinuxMemArea(LinuxMemArea *parent, u32 offset, u32 bytes)
++{
++ LinuxMemArea *mem_area;
++
++ BUG_ON(offset+bytes > parent->ui32ByteSize);
++
++ mem_area = kmem_cache_alloc(linux_mem_area_cache, GFP_KERNEL);
++ if (!mem_area)
++ return NULL;
++
++ mem_area->eAreaType = LINUX_MEM_AREA_SUB_ALLOC;
++ mem_area->uData.sSubAlloc.psParentLinuxMemArea = parent;
++ mem_area->uData.sSubAlloc.ui32ByteOffset = offset;
++ mem_area->ui32ByteSize = bytes;
++ mem_area->ui32AreaFlags = parent->ui32AreaFlags;
++ mem_area->bMMapRegistered = IMG_FALSE;
++ INIT_LIST_HEAD(&mem_area->sMMapOffsetStructList);
++
++ return mem_area;
++}
++
++
++void LinuxMemAreaDeepFree(LinuxMemArea *mem_area)
++{
++ /* FIXME: call vfree and co direct, and free the mem area centrally at the end */
++ switch(mem_area->eAreaType)
++ {
++ case LINUX_MEM_AREA_VMALLOC:
++ vfree(mem_area->uData.sVmalloc.pvVmallocAddress);
++ break;
++ case LINUX_MEM_AREA_ALLOC_PAGES:
++ free_pages_linux_mem_area(mem_area);
++ break;
++ case LINUX_MEM_AREA_IOREMAP:
++ iounmap(mem_area->uData.sIORemap.pvIORemapCookie);
++ break;
++ case LINUX_MEM_AREA_EXTERNAL_KV:
++ case LINUX_MEM_AREA_IO:
++ case LINUX_MEM_AREA_SUB_ALLOC:
++ break;
++ default:
++ pr_debug("%s: Unknown are type (%d)\n",
++ __FUNCTION__, mem_area->eAreaType);
++ break;
++ }
++ kmem_cache_free(linux_mem_area_cache, mem_area);
++}
++
++
++void * LinuxMemAreaToCpuVAddr(LinuxMemArea *mem_area)
++{
++ switch(mem_area->eAreaType)
++ {
++ case LINUX_MEM_AREA_VMALLOC:
++ return mem_area->uData.sVmalloc.pvVmallocAddress;
++ case LINUX_MEM_AREA_IOREMAP:
++ return mem_area->uData.sIORemap.pvIORemapCookie;
++ case LINUX_MEM_AREA_EXTERNAL_KV:
++ return mem_area->uData.sExternalKV.pvExternalKV;
++ case LINUX_MEM_AREA_SUB_ALLOC:
++ {
++ char *addr = LinuxMemAreaToCpuVAddr(mem_area->uData.sSubAlloc.psParentLinuxMemArea);
++ if (!addr)
++ return NULL;
++ return addr + mem_area->uData.sSubAlloc.ui32ByteOffset;
++ }
++ default:
++ return NULL;
++ }
++}
++
++
++resource_size_t LinuxMemAreaToCpuPAddr(LinuxMemArea *mem_area, u32 offset)
++{
++ resource_size_t address;
++
++ address = 0;
++
++ switch(mem_area->eAreaType)
++ {
++ case LINUX_MEM_AREA_IOREMAP:
++ {
++ address = mem_area->uData.sIORemap.CPUPhysAddr;
++ address += offset;
++ break;
++ }
++ case LINUX_MEM_AREA_EXTERNAL_KV:
++ {
++ if (mem_area->uData.sExternalKV.bPhysContig)
++ {
++ IMG_CPU_PHYADDR CpuPAddr = SysSysPAddrToCpuPAddr(mem_area->uData.sExternalKV.uPhysAddr.SysPhysAddr);
++ address = CpuPAddr.uiAddr + offset;
++ } else {
++ u32 page_index = PHYS_TO_PFN(offset);
++ IMG_SYS_PHYADDR SysPAddr = mem_area->uData.sExternalKV.uPhysAddr.pSysPhysAddr[page_index];
++ IMG_CPU_PHYADDR CpuPAddr = SysSysPAddrToCpuPAddr(SysPAddr);
++ address = CpuPAddr.uiAddr + ADDR_TO_PAGE_OFFSET(offset);
++ }
++ break;
++ }
++ case LINUX_MEM_AREA_IO:
++ {
++ address = mem_area->uData.sIO.CPUPhysAddr;
++ address += offset;
++ break;
++ }
++ case LINUX_MEM_AREA_VMALLOC:
++ {
++ char *vaddr;
++ vaddr = mem_area->uData.sVmalloc.pvVmallocAddress;
++ vaddr += offset;
++ address = VMallocToPhys(vaddr);
++ break;
++ }
++ case LINUX_MEM_AREA_ALLOC_PAGES:
++ {
++ struct page *page;
++ u32 page_index = PHYS_TO_PFN(offset);
++ page = mem_area->uData.sPageList.pvPageList[page_index];
++ address = page_to_phys(page);
++ address += ADDR_TO_PAGE_OFFSET(offset);
++ break;
++ }
++ case LINUX_MEM_AREA_SUB_ALLOC:
++ {
++ IMG_CPU_PHYADDR CpuPAddr =
++ OSMemHandleToCpuPAddr(mem_area->uData.sSubAlloc.psParentLinuxMemArea,
++ mem_area->uData.sSubAlloc.ui32ByteOffset
++ + offset);
++
++ address = CpuPAddr.uiAddr;
++ break;
++ }
++ default:
++ pr_debug("%s: Unknown LinuxMemArea type (%d)\n",
++ __FUNCTION__, mem_area->eAreaType);
++ break;
++ }
++
++ BUG_ON(!address);
++ return address;
++}
++
++
++IMG_BOOL LinuxMemAreaPhysIsContig(LinuxMemArea *mem_area)
++{
++ switch(mem_area->eAreaType)
++ {
++ case LINUX_MEM_AREA_IOREMAP:
++ case LINUX_MEM_AREA_IO:
++ return IMG_TRUE;
++
++ case LINUX_MEM_AREA_EXTERNAL_KV:
++ return mem_area->uData.sExternalKV.bPhysContig;
++
++ case LINUX_MEM_AREA_VMALLOC:
++ case LINUX_MEM_AREA_ALLOC_PAGES:
++ return IMG_FALSE;
++
++ case LINUX_MEM_AREA_SUB_ALLOC:
++
++ return LinuxMemAreaPhysIsContig(mem_area->uData.sSubAlloc.psParentLinuxMemArea);
++
++ default:
++ pr_debug("%s: Unknown LinuxMemArea type (%d)\n",
++ __FUNCTION__, mem_area->eAreaType);
++ break;
++ }
++ return IMG_FALSE;
++}
++
++
++const char *
++LinuxMemAreaTypeToString(LINUX_MEM_AREA_TYPE eMemAreaType)
++{
++
++ switch(eMemAreaType)
++ {
++ case LINUX_MEM_AREA_IOREMAP:
++ return "LINUX_MEM_AREA_IOREMAP";
++ case LINUX_MEM_AREA_EXTERNAL_KV:
++ return "LINUX_MEM_AREA_EXTERNAL_KV";
++ case LINUX_MEM_AREA_IO:
++ return "LINUX_MEM_AREA_IO";
++ case LINUX_MEM_AREA_VMALLOC:
++ return "LINUX_MEM_AREA_VMALLOC";
++ case LINUX_MEM_AREA_SUB_ALLOC:
++ return "LINUX_MEM_AREA_SUB_ALLOC";
++ case LINUX_MEM_AREA_ALLOC_PAGES:
++ return "LINUX_MEM_AREA_ALLOC_PAGES";
++ default:
++ BUG();
++ }
++
++ return "";
++}
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/env/linux-intel/mm.h
+@@ -0,0 +1,198 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __IMG_LINUX_MM_H__
++#define __IMG_LINUX_MM_H__
++
++#include <linux/slab.h>
++#include <linux/mm.h>
++#include <linux/list.h>
++#include <linux/types.h>
++
++#include <asm/io.h>
++
++#define PHYS_TO_PFN(phys) ((phys) >> PAGE_SHIFT)
++#define PFN_TO_PHYS(pfn) ((pfn) << PAGE_SHIFT)
++
++#define RANGE_TO_PAGES(range) (((range) + (PAGE_SIZE - 1)) >> PAGE_SHIFT)
++
++#define ADDR_TO_PAGE_OFFSET(addr) (((unsigned long)(addr)) & (PAGE_SIZE - 1))
++
++static inline u32 VMallocToPhys(void *pCpuVAddr)
++{
++ return (page_to_phys(vmalloc_to_page(pCpuVAddr)) + ADDR_TO_PAGE_OFFSET(pCpuVAddr));
++
++}
++
++typedef enum {
++ LINUX_MEM_AREA_IOREMAP,
++ LINUX_MEM_AREA_EXTERNAL_KV,
++ LINUX_MEM_AREA_IO,
++ LINUX_MEM_AREA_VMALLOC,
++ LINUX_MEM_AREA_ALLOC_PAGES,
++ LINUX_MEM_AREA_SUB_ALLOC,
++ LINUX_MEM_AREA_TYPE_COUNT
++}LINUX_MEM_AREA_TYPE;
++
++typedef struct _LinuxMemArea LinuxMemArea;
++
++
++struct _LinuxMemArea {
++ LINUX_MEM_AREA_TYPE eAreaType;
++ union _uData
++ {
++ struct _sIORemap
++ {
++
++ resource_size_t CPUPhysAddr;
++ void *pvIORemapCookie;
++ }sIORemap;
++ struct _sExternalKV
++ {
++
++ IMG_BOOL bPhysContig;
++ union {
++
++ IMG_SYS_PHYADDR SysPhysAddr;
++ IMG_SYS_PHYADDR *pSysPhysAddr;
++ } uPhysAddr;
++ void *pvExternalKV;
++ }sExternalKV;
++ struct _sIO
++ {
++ resource_size_t CPUPhysAddr;
++ }sIO;
++ struct _sVmalloc
++ {
++
++ void *pvVmallocAddress;
++ }sVmalloc;
++ struct _sPageList
++ {
++
++ struct page **pvPageList;
++ IMG_HANDLE hBlockPageList;
++ }sPageList;
++ struct _sSubAlloc
++ {
++
++ LinuxMemArea *psParentLinuxMemArea;
++ u32 ui32ByteOffset;
++ }sSubAlloc;
++ }uData;
++
++ u32 ui32ByteSize;
++
++ u32 ui32AreaFlags;
++
++ IMG_BOOL bMMapRegistered;
++
++
++ struct list_head sMMapItem;
++
++
++ struct list_head sMMapOffsetStructList;
++};
++
++int linux_mm_init(void);
++
++
++void linux_mm_cleanup(void);
++
++void *vmalloc_wrapper(u32 bytes, u32 alloc_flags);
++
++LinuxMemArea *vmalloc_linux_mem_area(u32 bytes, u32 area_flags);
++
++
++void *ioremap_wrapper(resource_size_t address, u32 bytes, u32 mapping_flags);
++
++
++LinuxMemArea *ioremap_linux_mem_area(resource_size_t address, u32 bytes, u32 area_flags);
++
++
++LinuxMemArea *NewExternalKVLinuxMemArea(IMG_SYS_PHYADDR *pBasePAddr, void *pvCPUVAddr, u32 ui32Bytes, IMG_BOOL bPhysContig, u32 ui32AreaFlags);
++
++
++struct page *LinuxMemAreaOffsetToPage(LinuxMemArea *psLinuxMemArea, u32 ui32ByteOffset);
++
++
++LinuxMemArea *NewIOLinuxMemArea(resource_size_t address, u32 ui32Bytes, u32 ui32AreaFlags);
++
++
++LinuxMemArea *alloc_pages_linux_mem_area(u32 ui32Bytes, u32 ui32AreaFlags);
++
++
++void free_pages_linux_mem_area(LinuxMemArea *psLinuxMemArea);
++
++
++LinuxMemArea *NewSubLinuxMemArea(LinuxMemArea *psParentLinuxMemArea,
++ u32 ui32ByteOffset,
++ u32 ui32Bytes);
++
++
++void LinuxMemAreaDeepFree(LinuxMemArea *psLinuxMemArea);
++
++
++void *LinuxMemAreaToCpuVAddr(LinuxMemArea *psLinuxMemArea);
++
++
++resource_size_t LinuxMemAreaToCpuPAddr(LinuxMemArea *mem_area, u32 offset);
++
++
++#define LinuxMemAreaToCpuPFN(psLinuxMemArea, ui32ByteOffset) PHYS_TO_PFN(LinuxMemAreaToCpuPAddr(psLinuxMemArea, ui32ByteOffset))
++
++IMG_BOOL LinuxMemAreaPhysIsContig(LinuxMemArea *psLinuxMemArea);
++
++static inline LinuxMemArea *
++LinuxMemAreaRoot(LinuxMemArea *psLinuxMemArea)
++{
++ if(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_SUB_ALLOC)
++ {
++ return psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea;
++ }
++ else
++ {
++ return psLinuxMemArea;
++ }
++}
++
++
++static inline LINUX_MEM_AREA_TYPE
++LinuxMemAreaRootType(LinuxMemArea *psLinuxMemArea)
++{
++ return LinuxMemAreaRoot(psLinuxMemArea)->eAreaType;
++}
++
++
++const IMG_CHAR *LinuxMemAreaTypeToString(LINUX_MEM_AREA_TYPE eMemAreaType);
++
++
++#if defined(DEBUG)
++const IMG_CHAR *HAPFlagsToString(u32 ui32Flags);
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/env/linux-intel/mmap.c
+@@ -0,0 +1,844 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef AUTOCONF_INCLUDED
++ #include <linux/config.h>
++#endif
++
++#include <linux/version.h>
++#include <linux/mm.h>
++#include <linux/module.h>
++#include <linux/vmalloc.h>
++#include <linux/slab.h>
++#include <linux/mutex.h>
++#include <asm/io.h>
++#include <asm/page.h>
++#include <asm/shmparam.h>
++#include <asm/pgtable.h>
++#include <linux/sched.h>
++#include <asm/current.h>
++#if defined(SUPPORT_DRI_DRM)
++#include <drm/drmP.h>
++#endif
++
++#include "img_defs.h"
++#include "services.h"
++#include "servicesint.h"
++#include "pvrmmap.h"
++#include "mutils.h"
++#include "mmap.h"
++#include "mm.h"
++#include "pvr_debug.h"
++#include "osfunc.h"
++#include "proc.h"
++#include "handle.h"
++#include "perproc.h"
++#include "env_perproc.h"
++#include "bridged_support.h"
++#if defined(SUPPORT_DRI_DRM)
++#include "pvr_drm.h"
++#endif
++
++#if !defined(PVR_SECURE_HANDLES)
++#error "The mmap code requires PVR_SECURE_HANDLES"
++#endif
++
++static struct mutex g_sMMapMutex;
++
++static struct kmem_cache *g_psMemmapCache = NULL;
++static LIST_HEAD(g_sMMapAreaList);
++static LIST_HEAD(g_sMMapOffsetStructList);
++
++
++#define FIRST_PHYSICAL_PFN 0
++#define LAST_PHYSICAL_PFN 0x7fffffffUL
++#define FIRST_SPECIAL_PFN (LAST_PHYSICAL_PFN + 1)
++#define LAST_SPECIAL_PFN 0xffffffffUL
++
++#define MAX_MMAP_HANDLE 0x7fffffffUL
++
++static inline IMG_BOOL
++PFNIsPhysical(IMG_UINT32 pfn)
++{
++
++ return ((pfn >= FIRST_PHYSICAL_PFN) && (pfn <= LAST_PHYSICAL_PFN)) ? IMG_TRUE : IMG_FALSE;
++}
++
++static inline IMG_BOOL
++PFNIsSpecial(IMG_UINT32 pfn)
++{
++
++ return ((pfn >= FIRST_SPECIAL_PFN) && (pfn <= LAST_SPECIAL_PFN)) ? IMG_TRUE : IMG_FALSE;
++}
++
++static inline IMG_HANDLE
++MMapOffsetToHandle(IMG_UINT32 pfn)
++{
++ if (PFNIsPhysical(pfn))
++ {
++ PVR_ASSERT(PFNIsPhysical(pfn));
++ return IMG_NULL;
++ }
++
++ return (IMG_HANDLE)(pfn - FIRST_SPECIAL_PFN);
++}
++
++static inline IMG_UINT32
++HandleToMMapOffset(IMG_HANDLE hHandle)
++{
++ IMG_UINT32 ulHandle = (IMG_UINT32)hHandle;
++
++ if (PFNIsSpecial(ulHandle))
++ {
++ PVR_ASSERT(PFNIsSpecial(ulHandle));
++ return 0;
++ }
++
++ return ulHandle + FIRST_SPECIAL_PFN;
++}
++
++static inline IMG_BOOL
++LinuxMemAreaUsesPhysicalMap(LinuxMemArea *psLinuxMemArea)
++{
++ return LinuxMemAreaPhysIsContig(psLinuxMemArea);
++}
++
++static inline IMG_UINT32
++GetCurrentThreadID(IMG_VOID)
++{
++ return (IMG_UINT32)current->pid;
++}
++
++static PKV_OFFSET_STRUCT
++CreateOffsetStruct(LinuxMemArea *psLinuxMemArea, IMG_UINT32 ui32Offset, IMG_UINT32 ui32RealByteSize)
++{
++ PKV_OFFSET_STRUCT psOffsetStruct;
++#if defined(DEBUG)
++ const IMG_CHAR *pszName = LinuxMemAreaTypeToString(LinuxMemAreaRootType(psLinuxMemArea));
++#endif
++
++#if defined(DEBUG)
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "%s(%s, psLinuxMemArea: 0x%p, ui32AllocFlags: 0x%8lx)",
++ __FUNCTION__, pszName, psLinuxMemArea, psLinuxMemArea->ui32AreaFlags));
++#endif
++
++ PVR_ASSERT(psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC || LinuxMemAreaRoot(psLinuxMemArea)->eAreaType != LINUX_MEM_AREA_SUB_ALLOC);
++
++ PVR_ASSERT(psLinuxMemArea->bMMapRegistered);
++
++ psOffsetStruct = kmem_cache_alloc(g_psMemmapCache, GFP_KERNEL);
++ if(psOffsetStruct == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRMMapRegisterArea: Couldn't alloc another mapping record from cache"));
++ return IMG_NULL;
++ }
++
++ psOffsetStruct->ui32MMapOffset = ui32Offset;
++
++ psOffsetStruct->psLinuxMemArea = psLinuxMemArea;
++
++ psOffsetStruct->ui32Mapped = 0;
++
++ psOffsetStruct->ui32RealByteSize = ui32RealByteSize;
++
++
++ psOffsetStruct->ui32TID = GetCurrentThreadID();
++
++ psOffsetStruct->ui32PID = OSGetCurrentProcessIDKM();
++
++ psOffsetStruct->bOnMMapList = IMG_FALSE;
++
++ psOffsetStruct->ui32RefCount = 0;
++
++ psOffsetStruct->ui32UserVAddr = 0;
++
++ list_add_tail(&psOffsetStruct->sAreaItem, &psLinuxMemArea->sMMapOffsetStructList);
++
++ return psOffsetStruct;
++}
++
++
++static IMG_VOID
++DestroyOffsetStruct(PKV_OFFSET_STRUCT psOffsetStruct)
++{
++ list_del(&psOffsetStruct->sAreaItem);
++
++ if (psOffsetStruct->bOnMMapList)
++ {
++ list_del(&psOffsetStruct->sMMapItem);
++ }
++
++ PVR_DPF((PVR_DBG_MESSAGE, "%s: Table entry: "
++ "psLinuxMemArea=0x%08lX, CpuPAddr=0x%08lX", __FUNCTION__,
++ psOffsetStruct->psLinuxMemArea,
++ LinuxMemAreaToCpuPAddr(psOffsetStruct->psLinuxMemArea, 0)));
++
++ kmem_cache_free(g_psMemmapCache, psOffsetStruct);
++}
++
++
++static inline IMG_VOID
++DetermineUsersSizeAndByteOffset(LinuxMemArea *psLinuxMemArea,
++ IMG_UINT32 *pui32RealByteSize,
++ IMG_UINT32 *pui32ByteOffset)
++{
++ IMG_UINT32 ui32PageAlignmentOffset;
++ IMG_CPU_PHYADDR CpuPAddr;
++
++ CpuPAddr.uiAddr = LinuxMemAreaToCpuPAddr(psLinuxMemArea, 0);
++ ui32PageAlignmentOffset = ADDR_TO_PAGE_OFFSET(CpuPAddr.uiAddr);
++
++ *pui32ByteOffset = ui32PageAlignmentOffset;
++
++ *pui32RealByteSize = PAGE_ALIGN(psLinuxMemArea->ui32ByteSize + ui32PageAlignmentOffset);
++}
++
++
++PVRSRV_ERROR
++PVRMMapOSMemHandleToMMapData(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hMHandle,
++ IMG_UINT32 *pui32MMapOffset,
++ IMG_UINT32 *pui32ByteOffset,
++ IMG_UINT32 *pui32RealByteSize,
++ IMG_UINT32 *pui32UserVAddr)
++{
++ LinuxMemArea *psLinuxMemArea;
++ PKV_OFFSET_STRUCT psOffsetStruct;
++ IMG_HANDLE hOSMemHandle;
++ PVRSRV_ERROR eError;
++
++ mutex_lock(&g_sMMapMutex);
++
++ PVR_ASSERT(PVRSRVGetMaxHandle(psPerProc->psHandleBase) <= MAX_MMAP_HANDLE);
++
++ eError = PVRSRVLookupOSMemHandle(psPerProc->psHandleBase, &hOSMemHandle, hMHandle);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: Lookup of handle 0x%lx failed", __FUNCTION__, hMHandle));
++
++ goto exit_unlock;
++ }
++
++ psLinuxMemArea = (LinuxMemArea *)hOSMemHandle;
++
++ DetermineUsersSizeAndByteOffset(psLinuxMemArea,
++ pui32RealByteSize,
++ pui32ByteOffset);
++
++
++ list_for_each_entry(psOffsetStruct, &psLinuxMemArea->sMMapOffsetStructList, sAreaItem)
++ {
++ if (psPerProc->ui32PID == psOffsetStruct->ui32PID)
++ {
++
++ PVR_ASSERT(*pui32RealByteSize == psOffsetStruct->ui32RealByteSize);
++
++ *pui32MMapOffset = psOffsetStruct->ui32MMapOffset;
++ *pui32UserVAddr = psOffsetStruct->ui32UserVAddr;
++ psOffsetStruct->ui32RefCount++;
++
++ eError = PVRSRV_OK;
++ goto exit_unlock;
++ }
++ }
++
++
++ *pui32UserVAddr = 0;
++
++ if (LinuxMemAreaUsesPhysicalMap(psLinuxMemArea))
++ {
++ *pui32MMapOffset = LinuxMemAreaToCpuPFN(psLinuxMemArea, 0);
++ PVR_ASSERT(PFNIsPhysical(*pui32MMapOffset));
++ }
++ else
++ {
++ *pui32MMapOffset = HandleToMMapOffset(hMHandle);
++ PVR_ASSERT(PFNIsSpecial(*pui32MMapOffset));
++ }
++
++ psOffsetStruct = CreateOffsetStruct(psLinuxMemArea, *pui32MMapOffset, *pui32RealByteSize);
++ if (psOffsetStruct == IMG_NULL)
++ {
++ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto exit_unlock;
++ }
++
++
++ list_add_tail(&psOffsetStruct->sMMapItem, &g_sMMapOffsetStructList);
++
++ psOffsetStruct->bOnMMapList = IMG_TRUE;
++
++ psOffsetStruct->ui32RefCount++;
++
++ eError = PVRSRV_OK;
++
++exit_unlock:
++ mutex_unlock(&g_sMMapMutex);
++
++ return eError;
++}
++
++
++PVRSRV_ERROR
++PVRMMapReleaseMMapData(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hMHandle,
++ IMG_BOOL *pbMUnmap,
++ IMG_UINT32 *pui32RealByteSize,
++ IMG_UINT32 *pui32UserVAddr)
++{
++ LinuxMemArea *psLinuxMemArea;
++ PKV_OFFSET_STRUCT psOffsetStruct;
++ IMG_HANDLE hOSMemHandle;
++ PVRSRV_ERROR eError;
++ IMG_UINT32 ui32PID = OSGetCurrentProcessIDKM();
++
++ mutex_lock(&g_sMMapMutex);
++
++ PVR_ASSERT(PVRSRVGetMaxHandle(psPerProc->psHandleBase) <= MAX_MMAP_HANDLE);
++
++ eError = PVRSRVLookupOSMemHandle(psPerProc->psHandleBase, &hOSMemHandle, hMHandle);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: Lookup of handle 0x%lx failed", __FUNCTION__, hMHandle));
++
++ goto exit_unlock;
++ }
++
++ psLinuxMemArea = (LinuxMemArea *)hOSMemHandle;
++
++
++ list_for_each_entry(psOffsetStruct, &psLinuxMemArea->sMMapOffsetStructList, sAreaItem)
++ {
++ if (psOffsetStruct->ui32PID == ui32PID)
++ {
++ if (psOffsetStruct->ui32RefCount == 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: Attempt to release mmap data with zero reference count for offset struct 0x%p, memory area 0x%p", __FUNCTION__, psOffsetStruct, psLinuxMemArea));
++ eError = PVRSRV_ERROR_GENERIC;
++ goto exit_unlock;
++ }
++
++ psOffsetStruct->ui32RefCount--;
++
++ *pbMUnmap = (IMG_BOOL)((psOffsetStruct->ui32RefCount == 0) && (psOffsetStruct->ui32UserVAddr != 0));
++
++ *pui32UserVAddr = (*pbMUnmap) ? psOffsetStruct->ui32UserVAddr : 0;
++ *pui32RealByteSize = (*pbMUnmap) ? psOffsetStruct->ui32RealByteSize : 0;
++
++ eError = PVRSRV_OK;
++ goto exit_unlock;
++ }
++ }
++
++
++ PVR_DPF((PVR_DBG_ERROR, "%s: Mapping data not found for handle 0x%lx (memory area 0x%p)", __FUNCTION__, hMHandle, psLinuxMemArea));
++
++ eError = PVRSRV_ERROR_GENERIC;
++
++exit_unlock:
++ mutex_unlock(&g_sMMapMutex);
++
++ return eError;
++}
++
++static inline PKV_OFFSET_STRUCT
++FindOffsetStructByOffset(IMG_UINT32 ui32Offset, IMG_UINT32 ui32RealByteSize)
++{
++ PKV_OFFSET_STRUCT psOffsetStruct;
++ IMG_UINT32 ui32TID = GetCurrentThreadID();
++ IMG_UINT32 ui32PID = OSGetCurrentProcessIDKM();
++
++ list_for_each_entry(psOffsetStruct, &g_sMMapOffsetStructList, sMMapItem)
++ {
++ if (ui32Offset == psOffsetStruct->ui32MMapOffset && ui32RealByteSize == psOffsetStruct->ui32RealByteSize && psOffsetStruct->ui32PID == ui32PID)
++ {
++
++ if (!PFNIsPhysical(ui32Offset) || psOffsetStruct->ui32TID == ui32TID)
++ {
++ return psOffsetStruct;
++ }
++ }
++ }
++
++ return IMG_NULL;
++}
++
++
++static IMG_BOOL
++DoMapToUser(LinuxMemArea *psLinuxMemArea,
++ struct vm_area_struct* ps_vma,
++ IMG_UINT32 ui32ByteOffset)
++{
++ IMG_UINT32 ui32ByteSize;
++
++ if (psLinuxMemArea->eAreaType == LINUX_MEM_AREA_SUB_ALLOC)
++ {
++ return DoMapToUser(LinuxMemAreaRoot(psLinuxMemArea),
++ ps_vma,
++ psLinuxMemArea->uData.sSubAlloc.ui32ByteOffset + ui32ByteOffset);
++ }
++
++
++ ui32ByteSize = ps_vma->vm_end - ps_vma->vm_start;
++ PVR_ASSERT(ADDR_TO_PAGE_OFFSET(ui32ByteSize) == 0);
++
++#if defined (__sparc__)
++
++#error "SPARC not supported"
++#endif
++
++ if (PFNIsPhysical(ps_vma->vm_pgoff))
++ {
++ IMG_INT result;
++
++ PVR_ASSERT(LinuxMemAreaPhysIsContig(psLinuxMemArea));
++ PVR_ASSERT(LinuxMemAreaToCpuPFN(psLinuxMemArea, ui32ByteOffset) == ps_vma->vm_pgoff);
++
++
++ result = io_remap_pfn_range(ps_vma, ps_vma->vm_start, ps_vma->vm_pgoff, ui32ByteSize, ps_vma->vm_page_prot);
++
++ if(result == 0)
++ {
++ return IMG_TRUE;
++ }
++
++ PVR_DPF((PVR_DBG_MESSAGE, "%s: Failed to map contiguous physical address range (%d), trying non-contiguous path", __FUNCTION__, result));
++ }
++
++ {
++
++ IMG_UINT32 ulVMAPos;
++ IMG_UINT32 ui32ByteEnd = ui32ByteOffset + ui32ByteSize;
++ IMG_UINT32 ui32PA;
++
++
++ for(ui32PA = ui32ByteOffset; ui32PA < ui32ByteEnd; ui32PA += PAGE_SIZE)
++ {
++ IMG_UINT32 pfn = LinuxMemAreaToCpuPFN(psLinuxMemArea, ui32PA);
++
++ if (!pfn_valid(pfn))
++ {
++ PVR_DPF((PVR_DBG_ERROR,"%s: Error - PFN invalid: 0x%lx", __FUNCTION__, pfn));
++ return IMG_FALSE;
++ }
++ }
++
++
++ ulVMAPos = ps_vma->vm_start;
++ for(ui32PA = ui32ByteOffset; ui32PA < ui32ByteEnd; ui32PA += PAGE_SIZE)
++ {
++ IMG_UINT32 pfn;
++ struct page *psPage;
++ IMG_INT result;
++
++ pfn = LinuxMemAreaToCpuPFN(psLinuxMemArea, ui32PA);
++ PVR_ASSERT(pfn_valid(pfn));
++
++ psPage = pfn_to_page(pfn);
++
++ result = vm_insert_page(ps_vma, ulVMAPos, psPage);
++ if(result != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"%s: Error - vm_insert_page failed (%d)", __FUNCTION__, result));
++ return IMG_FALSE;
++ }
++ ulVMAPos += PAGE_SIZE;
++ }
++ }
++
++ return IMG_TRUE;
++}
++
++
++static IMG_VOID
++MMapVOpenNoLock(struct vm_area_struct* ps_vma)
++{
++ PKV_OFFSET_STRUCT psOffsetStruct = (PKV_OFFSET_STRUCT)ps_vma->vm_private_data;
++ PVR_ASSERT(psOffsetStruct != IMG_NULL)
++ psOffsetStruct->ui32Mapped++;
++ PVR_ASSERT(!psOffsetStruct->bOnMMapList);
++
++ if (psOffsetStruct->ui32Mapped > 1)
++ {
++ PVR_DPF((PVR_DBG_WARNING, "%s: Offset structure 0x%p is being shared across processes (psOffsetStruct->ui32Mapped: %lu)", __FUNCTION__, psOffsetStruct, psOffsetStruct->ui32Mapped));
++ PVR_ASSERT((ps_vma->vm_flags & VM_DONTCOPY) == 0);
++ }
++
++}
++
++
++static void
++MMapVOpen(struct vm_area_struct* ps_vma)
++{
++ mutex_lock(&g_sMMapMutex);
++
++ MMapVOpenNoLock(ps_vma);
++
++ mutex_unlock(&g_sMMapMutex);
++}
++
++
++static IMG_VOID
++MMapVCloseNoLock(struct vm_area_struct* ps_vma)
++{
++ PKV_OFFSET_STRUCT psOffsetStruct = (PKV_OFFSET_STRUCT)ps_vma->vm_private_data;
++ PVR_ASSERT(psOffsetStruct != IMG_NULL)
++
++ PVR_ASSERT(!psOffsetStruct->bOnMMapList);
++ psOffsetStruct->ui32Mapped--;
++ if (psOffsetStruct->ui32Mapped == 0)
++ {
++ if (psOffsetStruct->ui32RefCount != 0)
++ {
++ PVR_DPF((PVR_DBG_MESSAGE, "%s: psOffsetStruct 0x%p has non-zero reference count (ui32RefCount = %lu). User mode address of start of mapping: 0x%lx", __FUNCTION__, psOffsetStruct, psOffsetStruct->ui32RefCount, psOffsetStruct->ui32UserVAddr));
++ }
++
++ DestroyOffsetStruct(psOffsetStruct);
++ }
++
++ ps_vma->vm_private_data = NULL;
++
++}
++
++static void
++MMapVClose(struct vm_area_struct* ps_vma)
++{
++ mutex_lock(&g_sMMapMutex);
++
++ MMapVCloseNoLock(ps_vma);
++
++ mutex_unlock(&g_sMMapMutex);
++}
++
++
++static struct vm_operations_struct MMapIOOps =
++{
++ .open=MMapVOpen,
++ .close=MMapVClose
++};
++
++
++int
++PVRMMap(struct file* pFile, struct vm_area_struct* ps_vma)
++{
++ IMG_UINT32 ui32ByteSize;
++ PKV_OFFSET_STRUCT psOffsetStruct;
++ int iRetVal = 0;
++
++ PVR_UNREFERENCED_PARAMETER(pFile);
++
++ mutex_lock(&g_sMMapMutex);
++
++ ui32ByteSize = ps_vma->vm_end - ps_vma->vm_start;
++
++ PVR_DPF((PVR_DBG_MESSAGE, "%s: Received mmap(2) request with ui32MMapOffset 0x%08lx,"
++ " and ui32ByteSize %ld(0x%08lx)",
++ __FUNCTION__,
++ ps_vma->vm_pgoff,
++ ui32ByteSize, ui32ByteSize));
++
++ psOffsetStruct = FindOffsetStructByOffset(ps_vma->vm_pgoff, ui32ByteSize);
++ if (psOffsetStruct == IMG_NULL)
++ {
++#if defined(SUPPORT_DRI_DRM)
++ mutex_unlock(&g_sMMapMutex);
++
++
++ return drm_mmap(pFile, ps_vma);
++#else
++ PVR_UNREFERENCED_PARAMETER(pFile);
++
++ PVR_DPF((PVR_DBG_ERROR,
++ "%s: Attempted to mmap unregistered area at vm_pgoff %ld",
++ __FUNCTION__, ps_vma->vm_pgoff));
++ iRetVal = -EINVAL;
++#endif
++ goto unlock_and_return;
++ }
++ list_del(&psOffsetStruct->sMMapItem);
++ psOffsetStruct->bOnMMapList = IMG_FALSE;
++
++
++ if (((ps_vma->vm_flags & VM_WRITE) != 0) &&
++ ((ps_vma->vm_flags & VM_SHARED) == 0))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: Cannot mmap non-shareable writable areas", __FUNCTION__));
++ iRetVal = -EINVAL;
++ goto unlock_and_return;
++ }
++
++ PVR_DPF((PVR_DBG_MESSAGE, "%s: Mapped psLinuxMemArea 0x%p\n",
++ __FUNCTION__, psOffsetStruct->psLinuxMemArea));
++
++ ps_vma->vm_flags |= VM_RESERVED;
++ ps_vma->vm_flags |= VM_IO;
++
++
++ ps_vma->vm_flags |= VM_DONTEXPAND;
++
++
++ ps_vma->vm_flags |= VM_DONTCOPY;
++
++ ps_vma->vm_private_data = (void *)psOffsetStruct;
++
++ switch(psOffsetStruct->psLinuxMemArea->ui32AreaFlags & PVRSRV_HAP_CACHETYPE_MASK)
++ {
++ case PVRSRV_HAP_CACHED:
++
++ break;
++ case PVRSRV_HAP_WRITECOMBINE:
++ ps_vma->vm_page_prot = PGPROT_WC(ps_vma->vm_page_prot);
++ break;
++ case PVRSRV_HAP_UNCACHED:
++ ps_vma->vm_page_prot = PGPROT_UC(ps_vma->vm_page_prot);
++ break;
++ default:
++ PVR_DPF((PVR_DBG_ERROR, "%s: unknown cache type", __FUNCTION__));
++ iRetVal = -EINVAL;
++ goto unlock_and_return;
++ }
++
++
++ ps_vma->vm_ops = &MMapIOOps;
++
++ if(!DoMapToUser(psOffsetStruct->psLinuxMemArea, ps_vma, 0))
++ {
++ iRetVal = -EAGAIN;
++ goto unlock_and_return;
++ }
++
++ PVR_ASSERT(psOffsetStruct->ui32UserVAddr == 0)
++
++ psOffsetStruct->ui32UserVAddr = ps_vma->vm_start;
++
++
++ MMapVOpenNoLock(ps_vma);
++
++ PVR_DPF((PVR_DBG_MESSAGE, "%s: Mapped area at offset 0x%08lx\n",
++ __FUNCTION__, ps_vma->vm_pgoff));
++
++unlock_and_return:
++ if (iRetVal != 0 && psOffsetStruct != IMG_NULL)
++ {
++ DestroyOffsetStruct(psOffsetStruct);
++ }
++
++ mutex_unlock(&g_sMMapMutex);
++
++ return iRetVal;
++}
++
++
++
++
++PVRSRV_ERROR
++PVRMMapRegisterArea(LinuxMemArea *psLinuxMemArea)
++{
++ PVRSRV_ERROR eError;
++#if defined(DEBUG)
++ const IMG_CHAR *pszName = LinuxMemAreaTypeToString(LinuxMemAreaRootType(psLinuxMemArea));
++#endif
++
++ mutex_lock(&g_sMMapMutex);
++
++#if defined(DEBUG)
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "%s(%s, psLinuxMemArea 0x%p, ui32AllocFlags 0x%8lx)",
++ __FUNCTION__, pszName, psLinuxMemArea, psLinuxMemArea->ui32AreaFlags));
++#endif
++
++ PVR_ASSERT(psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC || LinuxMemAreaRoot(psLinuxMemArea)->eAreaType != LINUX_MEM_AREA_SUB_ALLOC);
++
++
++ if(psLinuxMemArea->bMMapRegistered)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: psLinuxMemArea 0x%p is already registered",
++ __FUNCTION__, psLinuxMemArea));
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ goto exit_unlock;
++ }
++
++ list_add_tail(&psLinuxMemArea->sMMapItem, &g_sMMapAreaList);
++
++ psLinuxMemArea->bMMapRegistered = IMG_TRUE;
++
++ eError = PVRSRV_OK;
++
++exit_unlock:
++ mutex_unlock(&g_sMMapMutex);
++
++ return eError;
++}
++
++
++PVRSRV_ERROR
++PVRMMapRemoveRegisteredArea(LinuxMemArea *psLinuxMemArea)
++{
++ PVRSRV_ERROR eError;
++ PKV_OFFSET_STRUCT psOffsetStruct, psTmpOffsetStruct;
++
++ mutex_lock(&g_sMMapMutex);
++
++ PVR_ASSERT(psLinuxMemArea->bMMapRegistered);
++
++ list_for_each_entry_safe(psOffsetStruct, psTmpOffsetStruct, &psLinuxMemArea->sMMapOffsetStructList, sAreaItem)
++ {
++ if (psOffsetStruct->ui32Mapped != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: psOffsetStruct 0x%p for memory area 0x0x%p is still mapped; psOffsetStruct->ui32Mapped %lu", __FUNCTION__, psOffsetStruct, psLinuxMemArea, psOffsetStruct->ui32Mapped));
++ eError = PVRSRV_ERROR_GENERIC;
++ goto exit_unlock;
++ }
++ else
++ {
++
++ PVR_DPF((PVR_DBG_WARNING, "%s: psOffsetStruct 0x%p was never mapped", __FUNCTION__, psOffsetStruct));
++ }
++
++ PVR_ASSERT((psOffsetStruct->ui32Mapped == 0) && psOffsetStruct->bOnMMapList);
++
++ DestroyOffsetStruct(psOffsetStruct);
++ }
++
++ list_del(&psLinuxMemArea->sMMapItem);
++
++ psLinuxMemArea->bMMapRegistered = IMG_FALSE;
++
++ eError = PVRSRV_OK;
++
++exit_unlock:
++ mutex_unlock(&g_sMMapMutex);
++ return eError;
++}
++
++
++PVRSRV_ERROR
++LinuxMMapPerProcessConnect(PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc)
++{
++ PVR_UNREFERENCED_PARAMETER(psEnvPerProc);
++
++ return PVRSRV_OK;
++}
++
++IMG_VOID
++LinuxMMapPerProcessDisconnect(PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc)
++{
++ PKV_OFFSET_STRUCT psOffsetStruct, psTmpOffsetStruct;
++ IMG_BOOL bWarn = IMG_FALSE;
++ IMG_UINT32 ui32PID = OSGetCurrentProcessIDKM();
++
++ PVR_UNREFERENCED_PARAMETER(psEnvPerProc);
++
++ mutex_lock(&g_sMMapMutex);
++
++ list_for_each_entry_safe(psOffsetStruct, psTmpOffsetStruct, &g_sMMapOffsetStructList, sMMapItem)
++ {
++ if (psOffsetStruct->ui32PID == ui32PID)
++ {
++ if (!bWarn)
++ {
++ PVR_DPF((PVR_DBG_WARNING, "%s: process has unmapped offset structures. Removing them", __FUNCTION__));
++ bWarn = IMG_TRUE;
++ }
++ PVR_ASSERT(psOffsetStruct->ui32Mapped == 0);
++ PVR_ASSERT(psOffsetStruct->bOnMMapList);
++
++ DestroyOffsetStruct(psOffsetStruct);
++ }
++ }
++
++ mutex_unlock(&g_sMMapMutex);
++}
++
++
++PVRSRV_ERROR LinuxMMapPerProcessHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase)
++{
++ PVRSRV_ERROR eError;
++
++ eError = PVRSRVSetMaxHandle(psHandleBase, MAX_MMAP_HANDLE);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"%s: failed to set handle limit (%d)", __FUNCTION__, eError));
++ return eError;
++ }
++
++ return eError;
++}
++
++
++IMG_VOID
++PVRMMapInit(IMG_VOID)
++{
++ mutex_init(&g_sMMapMutex);
++
++ g_psMemmapCache = kmem_cache_create("img-mmap", sizeof(KV_OFFSET_STRUCT), 0, 0, NULL);
++ if (!g_psMemmapCache)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"%s: failed to allocate kmem_cache", __FUNCTION__));
++ goto error;
++ }
++
++ return;
++
++error:
++ PVRMMapCleanup();
++ return;
++}
++
++
++IMG_VOID
++PVRMMapCleanup(IMG_VOID)
++{
++ PVRSRV_ERROR eError;
++
++ if (!list_empty(&g_sMMapAreaList))
++ {
++ LinuxMemArea *psLinuxMemArea, *psTmpMemArea;
++
++ PVR_DPF((PVR_DBG_ERROR, "%s: Memory areas are still registered with MMap", __FUNCTION__));
++
++ PVR_TRACE(("%s: Unregistering memory areas", __FUNCTION__));
++ list_for_each_entry_safe(psLinuxMemArea, psTmpMemArea, &g_sMMapAreaList, sMMapItem)
++ {
++ eError = PVRMMapRemoveRegisteredArea(psLinuxMemArea);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: PVRMMapRemoveRegisteredArea failed (%d)", __FUNCTION__, eError));
++ }
++ PVR_ASSERT(eError == PVRSRV_OK);
++
++ LinuxMemAreaDeepFree(psLinuxMemArea);
++ }
++ }
++ PVR_ASSERT(list_empty((&g_sMMapAreaList)));
++
++ if(g_psMemmapCache)
++ {
++ kmem_cache_destroy(g_psMemmapCache);
++ g_psMemmapCache = NULL;
++ }
++}
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/env/linux-intel/mmap.h
+@@ -0,0 +1,102 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__MMAP_H__)
++#define __MMAP_H__
++
++#include <linux/mm.h>
++#include <linux/list.h>
++
++#include "perproc.h"
++#include "mm.h"
++
++typedef struct KV_OFFSET_STRUCT_TAG
++{
++
++ IMG_UINT32 ui32Mapped;
++
++
++ IMG_UINT32 ui32MMapOffset;
++
++ IMG_UINT32 ui32RealByteSize;
++
++
++ LinuxMemArea *psLinuxMemArea;
++
++
++ IMG_UINT32 ui32TID;
++
++
++ IMG_UINT32 ui32PID;
++
++
++ IMG_BOOL bOnMMapList;
++
++
++ IMG_UINT32 ui32RefCount;
++
++
++ IMG_UINT32 ui32UserVAddr;
++
++
++ struct list_head sMMapItem;
++
++
++ struct list_head sAreaItem;
++}KV_OFFSET_STRUCT, *PKV_OFFSET_STRUCT;
++
++
++
++IMG_VOID PVRMMapInit(IMG_VOID);
++
++
++IMG_VOID PVRMMapCleanup(IMG_VOID);
++
++
++PVRSRV_ERROR PVRMMapRegisterArea(LinuxMemArea *psLinuxMemArea);
++
++
++PVRSRV_ERROR PVRMMapRemoveRegisteredArea(LinuxMemArea *psLinuxMemArea);
++
++
++PVRSRV_ERROR PVRMMapOSMemHandleToMMapData(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hMHandle,
++ IMG_UINT32 *pui32MMapOffset,
++ IMG_UINT32 *pui32ByteOffset,
++ IMG_UINT32 *pui32RealByteSize, IMG_UINT32 *pui32UserVAddr);
++
++PVRSRV_ERROR
++PVRMMapReleaseMMapData(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hMHandle,
++ IMG_BOOL *pbMUnmap,
++ IMG_UINT32 *pui32RealByteSize,
++ IMG_UINT32 *pui32UserVAddr);
++
++int PVRMMap(struct file* pFile, struct vm_area_struct* ps_vma);
++
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/env/linux-intel/module.c
+@@ -0,0 +1,747 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef AUTOCONF_INCLUDED
++ #include <linux/config.h>
++#endif
++
++#if !defined(SUPPORT_DRI_DRM)
++
++ #if defined(LDM_PLATFORM)
++ #define PVR_LDM_PLATFORM_MODULE
++ #define PVR_LDM_MODULE
++ #else
++ #if defined(LDM_PCI)
++ #define PVR_LDM_PCI_MODULE
++ #define PVR_LDM_MODULE
++ #endif
++ #endif
++#endif
++
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/version.h>
++#include <linux/fs.h>
++#include <linux/proc_fs.h>
++#include <linux/mutex.h>
++
++#if defined(SUPPORT_DRI_DRM)
++#include <drm/drmP.h>
++#if defined(PVR_SECURE_DRM_AUTH_EXPORT)
++#include "env_perproc.h"
++#endif
++#endif
++
++#if defined(PVR_LDM_PLATFORM_MODULE)
++#include <linux/platform_device.h>
++#endif
++
++#if defined(PVR_LDM_PCI_MODULE)
++#include <linux/pci.h>
++#endif
++
++#if defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL)
++#include <asm/uaccess.h>
++#endif
++
++#include "img_defs.h"
++#include "services.h"
++#include "kerneldisplay.h"
++#include "kernelbuffer.h"
++#include "syscommon.h"
++#include "pvrmmap.h"
++#include "mutils.h"
++#include "mm.h"
++#include "mmap.h"
++#include "pvr_debug.h"
++#include "srvkm.h"
++#include "perproc.h"
++#include "handle.h"
++#include "pvr_bridge_km.h"
++#include "proc.h"
++#include "pvrmodule.h"
++#include "private_data.h"
++#include "lock.h"
++#include "linkage.h"
++
++#if defined(SUPPORT_DRI_DRM)
++#include "pvr_drm.h"
++#endif
++#define DRVNAME "pvrsrvkm"
++#define DEVNAME "pvrsrvkm"
++
++#if defined(SUPPORT_DRI_DRM)
++#define PRIVATE_DATA(pFile) ((pFile)->driver_priv)
++#else
++#define PRIVATE_DATA(pFile) ((pFile)->private_data)
++#endif
++
++MODULE_SUPPORTED_DEVICE(DEVNAME);
++#ifdef DEBUG
++static IMG_INT debug = DBGPRIV_WARNING;
++#include <linux/moduleparam.h>
++module_param(debug, int, 0);
++#endif
++
++
++extern IMG_BOOL PVRGetDisplayClassJTable(PVRSRV_DC_DISP2SRV_KMJTABLE *psJTable);
++extern IMG_BOOL PVRGetBufferClassJTable(PVRSRV_BC_BUFFER2SRV_KMJTABLE *psJTable);
++
++/*EXPORT_SYMBOL(PVRGetDisplayClassJTable); */
++/*EXPORT_SYMBOL(PVRGetBufferClassJTable); */
++
++
++#if defined(PVR_LDM_MODULE)
++static struct class *psPvrClass;
++#endif
++
++#if !defined(SUPPORT_DRI_DRM)
++static IMG_INT AssignedMajorNumber;
++
++static IMG_INT PVRSRVOpen(struct inode* pInode, struct file* pFile);
++static IMG_INT PVRSRVRelease(struct inode* pInode, struct file* pFile);
++
++static struct file_operations pvrsrv_fops = {
++ .owner=THIS_MODULE,
++ .unlocked_ioctl=PVRSRV_BridgeDispatchKM,
++ .open=PVRSRVOpen,
++ .release=PVRSRVRelease,
++ .mmap=PVRMMap,
++};
++#endif
++
++struct mutex gPVRSRVLock;
++
++IMG_UINT32 gui32ReleasePID;
++
++#if defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL)
++static IMG_UINT32 gPVRPowerLevel;
++#endif
++
++#if defined(PVR_LDM_MODULE)
++
++#if defined(PVR_LDM_PLATFORM_MODULE)
++#define LDM_DEV struct platform_device
++#define LDM_DRV struct platform_driver
++#endif
++
++#if defined(PVR_LDM_PCI_MODULE)
++#define LDM_DEV struct pci_dev
++#define LDM_DRV struct pci_driver
++#endif
++
++#if defined(PVR_LDM_PLATFORM_MODULE)
++static IMG_INT PVRSRVDriverRemove(LDM_DEV *device);
++static IMG_INT PVRSRVDriverProbe(LDM_DEV *device);
++#endif
++#if defined(PVR_LDM_PCI_MODULE)
++static IMG_VOID PVRSRVDriverRemove(LDM_DEV *device);
++static IMG_INT PVRSRVDriverProbe(LDM_DEV *device, const struct pci_device_id *id);
++#endif
++static IMG_INT PVRSRVDriverSuspend(LDM_DEV *device, pm_message_t state);
++static IMG_VOID PVRSRVDriverShutdown(LDM_DEV *device);
++static IMG_INT PVRSRVDriverResume(LDM_DEV *device);
++
++#if defined(PVR_LDM_PCI_MODULE)
++struct pci_device_id powervr_id_table[] __devinitdata = {
++ { PCI_DEVICE(SYS_SGX_DEV_VENDOR_ID, SYS_SGX_DEV_DEVICE_ID) },
++ { 0 }
++};
++
++MODULE_DEVICE_TABLE(pci, powervr_id_table);
++#endif
++
++static LDM_DRV powervr_driver = {
++#if defined(PVR_LDM_PLATFORM_MODULE)
++ .driver = {
++ .name = DRVNAME,
++ },
++#endif
++#if defined(PVR_LDM_PCI_MODULE)
++ .name = DRVNAME,
++ .id_table = powervr_id_table,
++#endif
++ .probe = PVRSRVDriverProbe,
++#if defined(PVR_LDM_PLATFORM_MODULE)
++ .remove = PVRSRVDriverRemove,
++#endif
++#if defined(PVR_LDM_PCI_MODULE)
++ .remove = __devexit_p(PVRSRVDriverRemove),
++#endif
++ .suspend = PVRSRVDriverSuspend,
++ .resume = PVRSRVDriverResume,
++ .shutdown = PVRSRVDriverShutdown,
++};
++
++LDM_DEV *gpsPVRLDMDev;
++
++#if defined(MODULE) && defined(PVR_LDM_PLATFORM_MODULE)
++
++static IMG_VOID PVRSRVDeviceRelease(struct device *pDevice)
++{
++ PVR_UNREFERENCED_PARAMETER(pDevice);
++}
++
++static struct platform_device powervr_device = {
++ .name = DEVNAME,
++ .id = -1,
++ .dev = {
++ .release = PVRSRVDeviceRelease
++ }
++};
++
++#endif
++
++#if defined(PVR_LDM_PLATFORM_MODULE)
++static IMG_INT PVRSRVDriverProbe(LDM_DEV *pDevice)
++#endif
++#if defined(PVR_LDM_PCI_MODULE)
++static IMG_INT __devinit PVRSRVDriverProbe(LDM_DEV *pDevice, const struct pci_device_id *id)
++#endif
++{
++ SYS_DATA *psSysData;
++
++ PVR_TRACE(("PVRSRVDriverProbe(pDevice=%p)", pDevice));
++
++#if 0
++
++ if (PerDeviceSysInitialise((IMG_PVOID)pDevice) != PVRSRV_OK)
++ {
++ return -EINVAL;
++ }
++#endif
++
++ if (SysAcquireData(&psSysData) != PVRSRV_OK)
++ {
++ gpsPVRLDMDev = pDevice;
++
++ if (SysInitialise() != PVRSRV_OK)
++ {
++ return -ENODEV;
++ }
++ }
++
++ return 0;
++}
++
++
++#if defined (PVR_LDM_PLATFORM_MODULE)
++static IMG_INT PVRSRVDriverRemove(LDM_DEV *pDevice)
++#endif
++#if defined(PVR_LDM_PCI_MODULE)
++static IMG_VOID __devexit PVRSRVDriverRemove(LDM_DEV *pDevice)
++#endif
++{
++ SYS_DATA *psSysData;
++
++ PVR_TRACE(("PVRSRVDriverRemove(pDevice=%p)", pDevice));
++
++ if (SysAcquireData(&psSysData) == PVRSRV_OK)
++ {
++#if defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL)
++ if (gPVRPowerLevel != 0)
++ {
++ if (PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D0) == PVRSRV_OK)
++ {
++ gPVRPowerLevel = 0;
++ }
++ }
++#endif
++ SysDeinitialise(psSysData);
++
++ gpsPVRLDMDev = IMG_NULL;
++ }
++
++#if 0
++ if (PerDeviceSysDeInitialise((IMG_PVOID)pDevice) != PVRSRV_OK)
++ {
++ return -EINVAL;
++ }
++#endif
++
++#if defined (PVR_LDM_PLATFORM_MODULE)
++ return 0;
++#endif
++#if defined (PVR_LDM_PCI_MODULE)
++ return;
++#endif
++}
++
++
++static IMG_VOID PVRSRVDriverShutdown(LDM_DEV *pDevice)
++{
++ PVR_TRACE(("PVRSRVDriverShutdown(pDevice=%p)", pDevice));
++
++ (IMG_VOID) PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D3);
++}
++
++#endif
++
++
++#if defined(PVR_LDM_MODULE) || defined(SUPPORT_DRI_DRM)
++#if defined(SUPPORT_DRI_DRM)
++IMG_INT PVRSRVDriverSuspend(struct drm_device *pDevice, pm_message_t state)
++#else
++static IMG_INT PVRSRVDriverSuspend(LDM_DEV *pDevice, pm_message_t state)
++#endif
++{
++#if !(defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL) && !defined(SUPPORT_DRI_DRM))
++ PVR_TRACE(( "PVRSRVDriverSuspend(pDevice=%p)", pDevice));
++ printk(KERN_ALERT "PVRSRVDriverSuspend(pDevice=%p)", pDevice);
++
++ if (PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D3) != PVRSRV_OK)
++ {
++ return -EINVAL;
++ }
++#endif
++ return 0;
++}
++
++
++#if defined(SUPPORT_DRI_DRM)
++IMG_INT PVRSRVDriverResume(struct drm_device *pDevice)
++#else
++static IMG_INT PVRSRVDriverResume(LDM_DEV *pDevice)
++#endif
++{
++#if !(defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL) && !defined(SUPPORT_DRI_DRM))
++ PVR_TRACE(("PVRSRVDriverResume(pDevice=%p)", pDevice));
++ printk(KERN_ALERT "PVRSRVDriverResume(pDevice=%p)", pDevice);
++
++ if (PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D0) != PVRSRV_OK)
++ {
++ return -EINVAL;
++ }
++#endif
++ return 0;
++}
++#endif
++
++
++#if defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL) && !defined(SUPPORT_DRI_DRM)
++IMG_INT PVRProcSetPowerLevel(struct file *file, const IMG_CHAR *buffer, IMG_UINT32 count, IMG_VOID *data)
++{
++ IMG_CHAR data_buffer[2];
++ IMG_UINT32 PVRPowerLevel;
++
++ if (count != sizeof(data_buffer))
++ {
++ return -EINVAL;
++ }
++ else
++ {
++ if (copy_from_user(data_buffer, buffer, count))
++ return -EINVAL;
++ if (data_buffer[count - 1] != '\n')
++ return -EINVAL;
++ PVRPowerLevel = data_buffer[0] - '0';
++ if (PVRPowerLevel != gPVRPowerLevel)
++ {
++ if (PVRPowerLevel != 0)
++ {
++ if (PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D3) != PVRSRV_OK)
++ {
++ return -EINVAL;
++ }
++ }
++ else
++ {
++ if (PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D0) != PVRSRV_OK)
++ {
++ return -EINVAL;
++ }
++ }
++
++ gPVRPowerLevel = PVRPowerLevel;
++ }
++ }
++ return (count);
++}
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++void ProcSeqShowPowerLevel(struct seq_file *sfile,void* el)
++{
++ seq_printf(sfile, "%lu\n", gPVRPowerLevel);
++}
++
++#else
++IMG_INT PVRProcGetPowerLevel(IMG_CHAR *page, IMG_CHAR **start, off_t off, IMG_INT count, IMG_INT *eof, IMG_VOID *data)
++{
++ if (off == 0) {
++ *start = (IMG_CHAR *)1;
++ return printAppend(page, count, 0, "%lu\n", gPVRPowerLevel);
++ }
++ *eof = 1;
++ return 0;
++}
++#endif
++
++#endif
++
++#if defined(SUPPORT_DRI_DRM)
++IMG_INT PVRSRVOpen(struct drm_device unref__ *dev, struct drm_file *pFile)
++#else
++static IMG_INT PVRSRVOpen(struct inode unref__ * pInode, struct file *pFile)
++#endif
++{
++ PVRSRV_FILE_PRIVATE_DATA *psPrivateData;
++ IMG_HANDLE hBlockAlloc;
++ IMG_INT iRet = -ENOMEM;
++ PVRSRV_ERROR eError;
++ IMG_UINT32 ui32PID;
++#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT)
++ PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc;
++#endif
++
++#if defined(SUPPORT_DRI_DRM)
++ PVR_UNREFERENCED_PARAMETER(dev);
++#else
++ PVR_UNREFERENCED_PARAMETER(pInode);
++#endif
++
++ mutex_lock(&gPVRSRVLock);
++
++ ui32PID = OSGetCurrentProcessIDKM();
++
++ if (PVRSRVProcessConnect(ui32PID) != PVRSRV_OK)
++ goto err_unlock;
++
++#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT)
++ psEnvPerProc = PVRSRVPerProcessPrivateData(ui32PID);
++ if (psEnvPerProc == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: No per-process private data", __FUNCTION__));
++ goto err_unlock;
++ }
++#endif
++
++ eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_FILE_PRIVATE_DATA),
++ (IMG_PVOID *)&psPrivateData,
++ &hBlockAlloc,
++ "File Private Data");
++
++ if(eError != PVRSRV_OK)
++ goto err_unlock;
++
++#if defined(PVR_SECURE_FD_EXPORT)
++ psPrivateData->hKernelMemInfo = NULL;
++#endif
++#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT)
++ psPrivateData->psDRMFile = pFile;
++
++ list_add_tail(&psPrivateData->sDRMAuthListItem, &psEnvPerProc->sDRMAuthListHead);
++#endif
++ psPrivateData->ui32OpenPID = ui32PID;
++ psPrivateData->hBlockAlloc = hBlockAlloc;
++ PRIVATE_DATA(pFile) = psPrivateData;
++ iRet = 0;
++err_unlock:
++ mutex_unlock(&gPVRSRVLock);
++ return iRet;
++}
++
++
++#if defined(SUPPORT_DRI_DRM)
++IMG_INT PVRSRVRelease(struct drm_device unref__ *dev, struct drm_file *pFile)
++#else
++static IMG_INT PVRSRVRelease(struct inode unref__ * pInode, struct file *pFile)
++#endif
++{
++ PVRSRV_FILE_PRIVATE_DATA *psPrivateData;
++
++#if defined(SUPPORT_DRI_DRM)
++ PVR_UNREFERENCED_PARAMETER(dev);
++#else
++ PVR_UNREFERENCED_PARAMETER(pInode);
++#endif
++
++ mutex_lock(&gPVRSRVLock);
++
++ psPrivateData = PRIVATE_DATA(pFile);
++
++#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT)
++ list_del(&psPrivateData->sDRMAuthListItem);
++#endif
++
++
++ gui32ReleasePID = psPrivateData->ui32OpenPID;
++ PVRSRVProcessDisconnect(psPrivateData->ui32OpenPID);
++ gui32ReleasePID = 0;
++
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_FILE_PRIVATE_DATA),
++ psPrivateData, psPrivateData->hBlockAlloc);
++ PRIVATE_DATA(pFile) = NULL;
++
++ mutex_unlock(&gPVRSRVLock);
++ return 0;
++}
++
++
++#if defined(SUPPORT_DRI_DRM)
++IMG_INT PVRCore_Init(IMG_VOID)
++#else
++static IMG_INT __init PVRCore_Init(IMG_VOID)
++#endif
++{
++ IMG_INT error;
++#if !defined(PVR_LDM_MODULE)
++ PVRSRV_ERROR eError;
++#else
++ struct device *psDev;
++#endif
++
++#if !defined(SUPPORT_DRI_DRM)
++
++ PVRDPFInit();
++#endif
++ PVR_TRACE(("PVRCore_Init"));
++
++ mutex_init(&gPVRSRVLock);
++
++#ifdef DEBUG
++ PVRDebugSetLevel(debug);
++#endif
++
++ if (CreateProcEntries ())
++ {
++ error = -ENOMEM;
++ return error;
++ }
++
++ if (PVROSFuncInit() != PVRSRV_OK)
++ {
++ error = -ENOMEM;
++ goto init_failed;
++ }
++
++ PVRLinuxMUtilsInit();
++
++ if (linux_mm_init())
++ {
++ error = -ENOMEM;
++ goto init_failed;
++ }
++
++ LinuxBridgeInit();
++
++ PVRMMapInit();
++
++#if defined(PVR_LDM_MODULE)
++
++#if defined(PVR_LDM_PLATFORM_MODULE)
++ if ((error = platform_driver_register(&powervr_driver)) != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to register platform driver (%d)", error));
++
++ goto init_failed;
++ }
++
++#if defined(MODULE)
++ if ((error = platform_device_register(&powervr_device)) != 0)
++ {
++ platform_driver_unregister(&powervr_driver);
++
++ PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to register platform device (%d)", error));
++
++ goto init_failed;
++ }
++#endif
++#endif
++
++#if defined(PVR_LDM_PCI_MODULE)
++ if ((error = pci_register_driver(&powervr_driver)) != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to register PCI driver (%d)", error));
++
++ goto init_failed;
++ }
++#endif
++
++#else
++
++ if ((eError = SysInitialise()) != PVRSRV_OK)
++ {
++ error = -ENODEV;
++#if defined(TCF_REV) && (TCF_REV == 110)
++ if(eError == PVRSRV_ERROR_NOT_SUPPORTED)
++ {
++ printk("\nAtlas wrapper (FPGA image) version mismatch");
++ error = -ENODEV;
++ }
++#endif
++ goto init_failed;
++ }
++#endif
++
++#if !defined(SUPPORT_DRI_DRM)
++ AssignedMajorNumber = register_chrdev(0, DEVNAME, &pvrsrv_fops);
++
++ if (AssignedMajorNumber <= 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to get major number"));
++
++ error = -EBUSY;
++ goto sys_deinit;
++ }
++
++ PVR_TRACE(("PVRCore_Init: major device %d", AssignedMajorNumber));
++#endif
++
++#if defined(PVR_LDM_MODULE)
++
++ psPvrClass = class_create(THIS_MODULE, "pvr");
++
++ if (IS_ERR(psPvrClass))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to create class (%ld)", PTR_ERR(psPvrClass)));
++ error = -EBUSY;
++ goto unregister_device;
++ }
++
++ psDev = device_create(psPvrClass, NULL, MKDEV(AssignedMajorNumber, 0), NULL, DEVNAME);
++ if (IS_ERR(psDev))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to create device (%ld)", PTR_ERR(psDev)));
++ error = -EBUSY;
++ goto destroy_class;
++ }
++#endif
++
++ return 0;
++
++#if defined(PVR_LDM_MODULE)
++destroy_class:
++ class_destroy(psPvrClass);
++unregister_device:
++ unregister_chrdev((IMG_UINT)AssignedMajorNumber, DRVNAME);
++#endif
++#if !defined(SUPPORT_DRI_DRM)
++sys_deinit:
++#endif
++#if defined(PVR_LDM_MODULE)
++#if defined(PVR_LDM_PCI_MODULE)
++ pci_unregister_driver(&powervr_driver);
++#endif
++
++#if defined (PVR_LDM_PLATFORM_MODULE)
++#if defined (MODULE)
++ platform_device_unregister(&powervr_device);
++#endif
++ platform_driver_unregister(&powervr_driver);
++#endif
++
++#else
++
++ {
++ SYS_DATA *psSysData;
++
++ SysAcquireData(&psSysData);
++ if (psSysData != IMG_NULL)
++ {
++ SysDeinitialise(psSysData);
++ }
++ }
++#endif
++init_failed:
++ PVRMMapCleanup();
++ linux_mm_cleanup();
++ LinuxBridgeDeInit();
++ PVROSFuncDeInit();
++ RemoveProcEntries();
++
++ return error;
++
++}
++
++
++#if defined(SUPPORT_DRI_DRM)
++IMG_VOID PVRCore_Cleanup(IMG_VOID)
++#else
++static IMG_VOID __exit PVRCore_Cleanup(IMG_VOID)
++#endif
++{
++ SYS_DATA *psSysData;
++
++ PVR_TRACE(("PVRCore_Cleanup"));
++
++ SysAcquireData(&psSysData);
++
++#if defined(PVR_LDM_MODULE)
++ device_destroy(psPvrClass, MKDEV(AssignedMajorNumber, 0));
++ class_destroy(psPvrClass);
++#endif
++
++#if !defined(SUPPORT_DRI_DRM)
++ unregister_chrdev((IMG_UINT)AssignedMajorNumber, DRVNAME);
++#endif
++
++#if defined(PVR_LDM_MODULE)
++
++#if defined(PVR_LDM_PCI_MODULE)
++ pci_unregister_driver(&powervr_driver);
++#endif
++
++#if defined (PVR_LDM_PLATFORM_MODULE)
++#if defined (MODULE)
++ platform_device_unregister(&powervr_device);
++#endif
++ platform_driver_unregister(&powervr_driver);
++#endif
++
++#else
++#if defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL)
++ if (gPVRPowerLevel != 0)
++ {
++ if (PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D0) == PVRSRV_OK)
++ {
++ gPVRPowerLevel = 0;
++ }
++ }
++#endif
++
++ SysDeinitialise(psSysData);
++#endif
++
++ PVRMMapCleanup();
++
++ linux_mm_cleanup();
++
++ LinuxBridgeDeInit();
++
++ PVROSFuncDeInit();
++
++ RemoveProcEntries();
++
++ PVR_TRACE(("PVRCore_Cleanup: unloading"));
++}
++
++#if !defined(SUPPORT_DRI_DRM)
++module_init(PVRCore_Init);
++module_exit(PVRCore_Cleanup);
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/env/linux-intel/mutils.c
+@@ -0,0 +1,133 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef AUTOCONF_INCLUDED
++#include <linux/config.h>
++#endif
++#include <linux/version.h>
++
++#include <linux/spinlock.h>
++#include <linux/mm.h>
++#include <asm/page.h>
++#include <asm/pgtable.h>
++
++#include "img_defs.h"
++#include "pvr_debug.h"
++#include "mutils.h"
++
++#if defined(SUPPORT_LINUX_X86_PAT)
++#define PAT_LINUX_X86_WC 1
++
++#define PAT_X86_ENTRY_BITS 8
++
++#define PAT_X86_BIT_PWT 1U
++#define PAT_X86_BIT_PCD 2U
++#define PAT_X86_BIT_PAT 4U
++#define PAT_X86_BIT_MASK (PAT_X86_BIT_PAT | PAT_X86_BIT_PCD | PAT_X86_BIT_PWT)
++
++static IMG_BOOL g_write_combining_available = IMG_FALSE;
++
++#define PROT_TO_PAT_INDEX(v, B) ((v & _PAGE_ ## B) ? PAT_X86_BIT_ ## B : 0)
++
++static inline IMG_UINT
++pvr_pat_index(pgprotval_t prot_val)
++{
++ IMG_UINT ret = 0;
++ pgprotval_t val = prot_val & _PAGE_CACHE_MASK;
++
++ ret |= PROT_TO_PAT_INDEX(val, PAT);
++ ret |= PROT_TO_PAT_INDEX(val, PCD);
++ ret |= PROT_TO_PAT_INDEX(val, PWT);
++
++ return ret;
++}
++
++static inline IMG_UINT
++pvr_pat_entry(u64 pat, IMG_UINT index)
++{
++ return (IMG_UINT)(pat >> (index * PAT_X86_ENTRY_BITS)) & PAT_X86_BIT_MASK;
++}
++
++static IMG_VOID
++PVRLinuxX86PATProbe(IMG_VOID)
++{
++
++ if (cpu_has_pat)
++ {
++ u64 pat;
++ IMG_UINT pat_index;
++ IMG_UINT pat_entry;
++
++ PVR_TRACE(("%s: PAT available", __FUNCTION__));
++
++ rdmsrl(MSR_IA32_CR_PAT, pat);
++ PVR_TRACE(("%s: Top 32 bits of PAT: 0x%.8x", __FUNCTION__, (IMG_UINT)(pat >> 32)));
++ PVR_TRACE(("%s: Bottom 32 bits of PAT: 0x%.8x", __FUNCTION__, (IMG_UINT)(pat)));
++
++ pat_index = pvr_pat_index(_PAGE_CACHE_WC);
++ PVR_TRACE(("%s: PAT index for write combining: %u", __FUNCTION__, pat_index));
++
++ pat_entry = pvr_pat_entry(pat, pat_index);
++ PVR_TRACE(("%s: PAT entry for write combining: 0x%.2x (should be 0x%.2x)", __FUNCTION__, pat_entry, PAT_LINUX_X86_WC));
++
++#if defined(SUPPORT_LINUX_X86_WRITECOMBINE)
++ g_write_combining_available = (IMG_BOOL)(pat_entry == PAT_LINUX_X86_WC);
++#endif
++ }
++#if defined(DEBUG)
++#if defined(SUPPORT_LINUX_X86_WRITECOMBINE)
++ if (g_write_combining_available)
++ {
++ PVR_TRACE(("%s: Write combining available via PAT", __FUNCTION__));
++ }
++ else
++ {
++ PVR_TRACE(("%s: Write combining not available", __FUNCTION__));
++ }
++#else
++ PVR_TRACE(("%s: Write combining disabled in driver build", __FUNCTION__));
++#endif
++#endif
++}
++
++pgprot_t
++pvr_pgprot_writecombine(pgprot_t prot)
++{
++
++
++ return (g_write_combining_available) ?
++ __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_MASK) | _PAGE_CACHE_WC) : pgprot_noncached(prot);
++}
++#endif
++
++IMG_VOID
++PVRLinuxMUtilsInit(IMG_VOID)
++{
++#if defined(SUPPORT_LINUX_X86_PAT)
++ PVRLinuxX86PATProbe();
++#endif
++}
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/env/linux-intel/mutils.h
+@@ -0,0 +1,89 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __IMG_LINUX_MUTILS_H__
++#define __IMG_LINUX_MUTILS_H__
++
++#ifndef AUTOCONF_INCLUDED
++#include <linux/config.h>
++#endif
++
++#include <linux/version.h>
++
++#if !(defined(__i386__))
++#if defined(SUPPORT_LINUX_X86_PAT)
++#undef SUPPORT_LINUX_X86_PAT
++#endif
++#endif
++
++#if defined(SUPPORT_LINUX_X86_PAT)
++ pgprot_t pvr_pgprot_writecombine(pgprot_t prot);
++ #define PGPROT_WC(pv) pvr_pgprot_writecombine(pv)
++#else
++ #if defined(__arm__) || defined(__sh__)
++ #define PGPROT_WC(pv) pgprot_writecombine(pv)
++ #else
++ #if defined(__i386__)
++ #define PGPROT_WC(pv) pgprot_noncached(pv)
++ #else
++ #define PGPROT_WC(pv) pgprot_noncached(pv)
++ #error Unsupported architecture!
++ #endif
++ #endif
++#endif
++
++#define PGPROT_UC(pv) pgprot_noncached(pv)
++
++#if defined(__i386__)
++ #define IOREMAP(pa, bytes) ioremap_cache(pa, bytes)
++#else
++ #if defined(__arm__)
++ #define IOREMAP(pa, bytes) ioremap_cached(pa, bytes)
++ #else
++ #define IOREMAP(pa, bytes) ioremap(pa, bytes)
++ #endif
++#endif
++
++#if defined(SUPPORT_LINUX_X86_PAT)
++ #if defined(SUPPORT_LINUX_X86_WRITECOMBINE)
++ #define IOREMAP_WC(pa, bytes) ioremap_wc(pa, bytes)
++ #else
++ #define IOREMAP_WC(pa, bytes) ioremap_nocache(pa, bytes)
++ #endif
++#else
++ #if defined(__arm__)
++ #define IOREMAP_WC(pa, bytes) ioremap_wc(pa, bytes)
++ #else
++ #define IOREMAP_WC(pa, bytes) ioremap_nocache(pa, bytes)
++ #endif
++#endif
++
++#define IOREMAP_UC(pa, bytes) ioremap_nocache(pa, bytes)
++
++IMG_VOID PVRLinuxMUtilsInit(IMG_VOID);
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/env/linux-intel/osfunc.c
+@@ -0,0 +1,2461 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef AUTOCONF_INCLUDED
++ #include <linux/config.h>
++#endif
++
++#include <linux/version.h>
++#include <asm/io.h>
++#include <asm/page.h>
++#include <asm/system.h>
++#if defined(SUPPORT_CPU_CACHED_BUFFERS)
++#include <asm/cacheflush.h>
++#endif
++#include <linux/mm.h>
++#include <linux/pagemap.h>
++#include <linux/hugetlb.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/delay.h>
++#include <linux/pci.h>
++
++#include <linux/string.h>
++#include <linux/sched.h>
++#include <linux/interrupt.h>
++#include <asm/hardirq.h>
++#include <linux/timer.h>
++#include <linux/capability.h>
++#include <asm/uaccess.h>
++#include <linux/spinlock.h>
++#if defined(PVR_LINUX_MISR_USING_WORKQUEUE) || \
++ defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE) || \
++ defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || \
++ defined(PVR_LINUX_USING_WORKQUEUES)
++#include <linux/workqueue.h>
++#endif
++
++#include "img_types.h"
++#include "services_headers.h"
++#include "mm.h"
++#include "pvrmmap.h"
++#include "mmap.h"
++#include "env_data.h"
++#include "proc.h"
++#include "event.h"
++#include "linkage.h"
++
++#define EVENT_OBJECT_TIMEOUT_MS (100)
++
++#if defined(SUPPORT_CPU_CACHED_BUFFERS) || \
++ defined(SUPPORT_CACHEFLUSH_ON_ALLOC)
++
++#if defined(__i386__)
++static void per_cpu_cache_flush(void *arg)
++{
++ PVR_UNREFERENCED_PARAMETER(arg);
++ wbinvd();
++}
++#endif
++
++#if !defined(SUPPORT_CPU_CACHED_BUFFERS)
++static
++#endif
++IMG_VOID OSFlushCPUCacheKM(IMG_VOID)
++{
++#if defined(__arm__)
++ flush_cache_all();
++#elif defined(__i386__)
++
++ on_each_cpu(per_cpu_cache_flush, NULL, 1);
++#else
++#error "Implement full CPU cache flush for this CPU!"
++#endif
++}
++
++#endif
++#if defined(SUPPORT_CPU_CACHED_BUFFERS)
++
++IMG_VOID OSFlushCPUCacheRangeKM(IMG_VOID *pvRangeAddrStart,
++ IMG_VOID *pvRangeAddrEnd)
++{
++ PVR_UNREFERENCED_PARAMETER(pvRangeAddrStart);
++ PVR_UNREFERENCED_PARAMETER(pvRangeAddrEnd);
++
++
++ OSFlushCPUCacheKM();
++}
++
++#endif
++
++#define HOST_ALLOC_MEM_USING_KMALLOC ((IMG_HANDLE)0)
++#define HOST_ALLOC_MEM_USING_VMALLOC ((IMG_HANDLE)1)
++
++PVRSRV_ERROR OSAllocMem_Impl(IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, IMG_PVOID *ppvCpuVAddr, IMG_HANDLE *phBlockAlloc)
++{
++ PVR_UNREFERENCED_PARAMETER(ui32Flags);
++
++ *ppvCpuVAddr = kmalloc(ui32Size, GFP_KERNEL);
++
++ if(*ppvCpuVAddr)
++ {
++ if (phBlockAlloc)
++ {
++
++ *phBlockAlloc = HOST_ALLOC_MEM_USING_KMALLOC;
++ }
++ }
++ else
++ {
++ if (!phBlockAlloc)
++ {
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++
++ *ppvCpuVAddr = vmalloc_wrapper(ui32Size, PVRSRV_HAP_CACHED);
++ if (!*ppvCpuVAddr)
++ {
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++
++ *phBlockAlloc = HOST_ALLOC_MEM_USING_VMALLOC;
++ }
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSFreeMem_Impl(IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, IMG_PVOID pvCpuVAddr, IMG_HANDLE hBlockAlloc)
++{
++ PVR_UNREFERENCED_PARAMETER(ui32Flags);
++ PVR_UNREFERENCED_PARAMETER(ui32Size);
++
++ if (hBlockAlloc == HOST_ALLOC_MEM_USING_VMALLOC)
++ {
++ vfree(pvCpuVAddr);
++ }
++ else
++ {
++ kfree(pvCpuVAddr);
++ }
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR
++OSAllocPages_Impl(IMG_UINT32 ui32AllocFlags,
++ IMG_UINT32 ui32Size,
++ IMG_UINT32 ui32PageSize,
++ IMG_VOID **ppvCpuVAddr,
++ IMG_HANDLE *phOSMemHandle)
++{
++ LinuxMemArea *psLinuxMemArea;
++
++ PVR_UNREFERENCED_PARAMETER(ui32PageSize);
++
++#if 0
++
++ if(ui32AllocFlags & PVRSRV_HAP_SINGLE_PROCESS)
++ {
++ ui32AllocFlags &= ~PVRSRV_HAP_SINGLE_PROCESS;
++ ui32AllocFlags |= PVRSRV_HAP_MULTI_PROCESS;
++ }
++#endif
++
++ switch(ui32AllocFlags & PVRSRV_HAP_MAPTYPE_MASK)
++ {
++ case PVRSRV_HAP_KERNEL_ONLY:
++ {
++ psLinuxMemArea = vmalloc_linux_mem_area(ui32Size, ui32AllocFlags);
++ if(!psLinuxMemArea)
++ {
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ break;
++ }
++ case PVRSRV_HAP_SINGLE_PROCESS:
++ {
++
++
++ psLinuxMemArea = alloc_pages_linux_mem_area(ui32Size, ui32AllocFlags);
++ if(!psLinuxMemArea)
++ {
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ PVRMMapRegisterArea(psLinuxMemArea);
++ break;
++ }
++
++ case PVRSRV_HAP_MULTI_PROCESS:
++ {
++
++#if defined(VIVT_CACHE) || defined(__sh__)
++
++ ui32AllocFlags &= ~PVRSRV_HAP_CACHED;
++#endif
++ psLinuxMemArea = vmalloc_linux_mem_area(ui32Size, ui32AllocFlags);
++ if(!psLinuxMemArea)
++ {
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ PVRMMapRegisterArea(psLinuxMemArea);
++ break;
++ }
++ default:
++ PVR_DPF((PVR_DBG_ERROR, "OSAllocPages: invalid flags 0x%x\n", ui32AllocFlags));
++ *ppvCpuVAddr = NULL;
++ *phOSMemHandle = (IMG_HANDLE)0;
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++#if defined(SUPPORT_CACHEFLUSH_ON_ALLOC)
++
++ if(ui32AllocFlags & (PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_UNCACHED))
++ {
++ OSFlushCPUCacheKM();
++ }
++#endif
++
++ *ppvCpuVAddr = LinuxMemAreaToCpuVAddr(psLinuxMemArea);
++ *phOSMemHandle = psLinuxMemArea;
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR
++OSFreePages(IMG_UINT32 ui32AllocFlags, IMG_UINT32 ui32Bytes, IMG_VOID *pvCpuVAddr, IMG_HANDLE hOSMemHandle)
++{
++ LinuxMemArea *psLinuxMemArea;
++ PVR_UNREFERENCED_PARAMETER(ui32Bytes);
++ PVR_UNREFERENCED_PARAMETER(pvCpuVAddr);
++
++ psLinuxMemArea = (LinuxMemArea *)hOSMemHandle;
++
++ switch(ui32AllocFlags & PVRSRV_HAP_MAPTYPE_MASK)
++ {
++ case PVRSRV_HAP_KERNEL_ONLY:
++ break;
++ case PVRSRV_HAP_SINGLE_PROCESS:
++ case PVRSRV_HAP_MULTI_PROCESS:
++ if(PVRMMapRemoveRegisteredArea(psLinuxMemArea) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSFreePages(ui32AllocFlags=0x%08X, ui32Bytes=%ld, "
++ "pvCpuVAddr=%p, hOSMemHandle=%p) FAILED!",
++ ui32AllocFlags, ui32Bytes, pvCpuVAddr, hOSMemHandle));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ break;
++ default:
++ PVR_DPF((PVR_DBG_ERROR,"%s: invalid flags 0x%x\n",
++ __FUNCTION__, ui32AllocFlags));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ LinuxMemAreaDeepFree(psLinuxMemArea);
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR
++OSGetSubMemHandle(IMG_HANDLE hOSMemHandle,
++ IMG_UINT32 ui32ByteOffset,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE *phOSMemHandleRet)
++{
++ LinuxMemArea *psParentLinuxMemArea, *psLinuxMemArea;
++ PVRSRV_ERROR eError;
++
++ psParentLinuxMemArea = (LinuxMemArea *)hOSMemHandle;
++
++ psLinuxMemArea = NewSubLinuxMemArea(psParentLinuxMemArea, ui32ByteOffset, ui32Bytes);
++ if(!psLinuxMemArea)
++ {
++ *phOSMemHandleRet = NULL;
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ *phOSMemHandleRet = psLinuxMemArea;
++
++
++ if(ui32Flags & PVRSRV_HAP_KERNEL_ONLY)
++ {
++ return PVRSRV_OK;
++ }
++
++ eError = PVRMMapRegisterArea(psLinuxMemArea);
++ if(eError != PVRSRV_OK)
++ {
++ goto failed_register_area;
++ }
++
++ return PVRSRV_OK;
++
++failed_register_area:
++ *phOSMemHandleRet = NULL;
++ LinuxMemAreaDeepFree(psLinuxMemArea);
++ return eError;
++}
++
++PVRSRV_ERROR
++OSReleaseSubMemHandle(IMG_VOID *hOSMemHandle, IMG_UINT32 ui32Flags)
++{
++ LinuxMemArea *psLinuxMemArea;
++ PVRSRV_ERROR eError;
++
++ psLinuxMemArea = (LinuxMemArea *)hOSMemHandle;
++ PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_SUB_ALLOC);
++
++ if((ui32Flags & PVRSRV_HAP_KERNEL_ONLY) == 0)
++ {
++ eError = PVRMMapRemoveRegisteredArea(psLinuxMemArea);
++ if(eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++ }
++ LinuxMemAreaDeepFree(psLinuxMemArea);
++
++ return PVRSRV_OK;
++}
++
++
++IMG_CPU_PHYADDR
++OSMemHandleToCpuPAddr(IMG_VOID *hOSMemHandle, IMG_UINT32 ui32ByteOffset)
++{
++ IMG_CPU_PHYADDR cpu_addr;
++ BUG_ON(!hOSMemHandle);
++
++ memset(&cpu_addr, 0, sizeof(cpu_addr));
++
++ cpu_addr.uiAddr = LinuxMemAreaToCpuPAddr(hOSMemHandle, ui32ByteOffset);
++
++ return cpu_addr;
++}
++
++
++
++IMG_VOID OSMemCopy(IMG_VOID *pvDst, IMG_VOID *pvSrc, IMG_UINT32 ui32Size)
++{
++#if defined(USE_UNOPTIMISED_MEMCPY)
++ IMG_UINT8 *Src,*Dst;
++ IMG_INT i;
++
++ Src=(IMG_UINT8 *)pvSrc;
++ Dst=(IMG_UINT8 *)pvDst;
++ for(i=0;i<ui32Size;i++)
++ {
++ Dst[i]=Src[i];
++ }
++#else
++ memcpy(pvDst, pvSrc, ui32Size);
++#endif
++}
++
++
++IMG_VOID OSMemSet(IMG_VOID *pvDest, IMG_UINT8 ui8Value, IMG_UINT32 ui32Size)
++{
++#if defined(USE_UNOPTIMISED_MEMSET)
++ IMG_UINT8 *Buff;
++ IMG_INT i;
++
++ Buff=(IMG_UINT8 *)pvDest;
++ for(i=0;i<ui32Size;i++)
++ {
++ Buff[i]=ui8Value;
++ }
++#else
++ memset(pvDest, (IMG_INT) ui8Value, (size_t) ui32Size);
++#endif
++}
++
++
++IMG_CHAR *OSStringCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc)
++{
++ return (strcpy(pszDest, pszSrc));
++}
++
++IMG_INT32 OSSNPrintf(IMG_CHAR *pStr, IMG_UINT32 ui32Size, const IMG_CHAR *pszFormat, ...)
++{
++ va_list argList;
++ IMG_INT32 iCount;
++
++ va_start(argList, pszFormat);
++ iCount = vsnprintf(pStr, (size_t)ui32Size, pszFormat, argList);
++ va_end(argList);
++
++ return iCount;
++}
++
++IMG_VOID OSBreakResourceLock (PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID)
++{
++ volatile IMG_UINT32 *pui32Access = (volatile IMG_UINT32 *)&psResource->ui32Lock;
++
++ if(*pui32Access)
++ {
++ if(psResource->ui32ID == ui32ID)
++ {
++ psResource->ui32ID = 0;
++ *pui32Access = 0;
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_MESSAGE,"OSBreakResourceLock: Resource is not locked for this process."));
++ }
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_MESSAGE,"OSBreakResourceLock: Resource is not locked"));
++ }
++}
++
++
++PVRSRV_ERROR OSCreateResource(PVRSRV_RESOURCE *psResource)
++{
++ psResource->ui32ID = 0;
++ psResource->ui32Lock = 0;
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSDestroyResource (PVRSRV_RESOURCE *psResource)
++{
++ OSBreakResourceLock (psResource, psResource->ui32ID);
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSInitEnvData(IMG_PVOID *ppvEnvSpecificData)
++{
++ ENV_DATA *psEnvData;
++
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(ENV_DATA), (IMG_VOID **)&psEnvData, IMG_NULL,
++ "Environment Data") != PVRSRV_OK)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, PVRSRV_MAX_BRIDGE_IN_SIZE + PVRSRV_MAX_BRIDGE_OUT_SIZE,
++ &psEnvData->pvBridgeData, IMG_NULL,
++ "Bridge Data") != PVRSRV_OK)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(ENV_DATA), psEnvData, IMG_NULL);
++
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++
++
++ psEnvData->bMISRInstalled = IMG_FALSE;
++ psEnvData->bLISRInstalled = IMG_FALSE;
++
++
++ *ppvEnvSpecificData = psEnvData;
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSDeInitEnvData(IMG_PVOID pvEnvSpecificData)
++{
++ ENV_DATA *psEnvData = (ENV_DATA*)pvEnvSpecificData;
++
++ PVR_ASSERT(!psEnvData->bMISRInstalled);
++ PVR_ASSERT(!psEnvData->bLISRInstalled);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, PVRSRV_MAX_BRIDGE_IN_SIZE + PVRSRV_MAX_BRIDGE_OUT_SIZE, psEnvData->pvBridgeData, IMG_NULL);
++ psEnvData->pvBridgeData = IMG_NULL;
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(ENV_DATA), pvEnvSpecificData, IMG_NULL);
++
++
++ return PVRSRV_OK;
++}
++
++
++
++IMG_VOID OSReleaseThreadQuanta(IMG_VOID)
++{
++ schedule();
++}
++
++
++
++IMG_UINT32 OSClockus(IMG_VOID)
++{
++ IMG_UINT32 time, j = jiffies;
++
++ time = j * (1000000 / HZ);
++
++ return time;
++}
++
++
++
++IMG_VOID OSWaitus(IMG_UINT32 ui32Timeus)
++{
++ udelay(ui32Timeus);
++}
++
++
++IMG_UINT32 OSGetCurrentProcessIDKM(IMG_VOID)
++{
++ if (in_interrupt())
++ {
++ return KERNEL_ID;
++ }
++
++ return (IMG_UINT32)task_tgid_nr(current);
++}
++
++
++IMG_UINT32 OSGetPageSize(IMG_VOID)
++{
++#if defined(__sh__)
++ IMG_UINT32 ui32ReturnValue = PAGE_SIZE;
++
++ return (ui32ReturnValue);
++#else
++ return PAGE_SIZE;
++#endif
++}
++
++static irqreturn_t DeviceISRWrapper(int irq, void *dev_id
++ )
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ IMG_BOOL bStatus = IMG_FALSE;
++
++ PVR_UNREFERENCED_PARAMETER(irq);
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE*)dev_id;
++ if(!psDeviceNode)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "DeviceISRWrapper: invalid params\n"));
++ goto out;
++ }
++
++ bStatus = PVRSRVDeviceLISR(psDeviceNode);
++
++ if (bStatus)
++ {
++ OSScheduleMISR((IMG_VOID *)psDeviceNode->psSysData);
++ }
++
++out:
++ return bStatus ? IRQ_HANDLED : IRQ_NONE;
++}
++
++
++
++static irqreturn_t SystemISRWrapper(int irq, void *dev_id)
++{
++ SYS_DATA *psSysData;
++ IMG_BOOL bStatus = IMG_FALSE;
++
++ PVR_UNREFERENCED_PARAMETER(irq);
++
++ psSysData = (SYS_DATA *)dev_id;
++ if(!psSysData)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SystemISRWrapper: invalid params\n"));
++ goto out;
++ }
++
++ bStatus = PVRSRVSystemLISR(psSysData);
++
++ if (bStatus)
++ {
++ OSScheduleMISR((IMG_VOID *)psSysData);
++ }
++
++out:
++ return bStatus ? IRQ_HANDLED : IRQ_NONE;
++}
++PVRSRV_ERROR OSInstallDeviceLISR(IMG_VOID *pvSysData,
++ IMG_UINT32 ui32Irq,
++ IMG_CHAR *pszISRName,
++ IMG_VOID *pvDeviceNode)
++{
++ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++ if (psEnvData->bLISRInstalled)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSInstallDeviceLISR: An ISR has already been installed: IRQ %d cookie %x", psEnvData->ui32IRQ, psEnvData->pvISRCookie));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ PVR_TRACE(("Installing device LISR %s on IRQ %d with cookie %x", pszISRName, ui32Irq, pvDeviceNode));
++
++ if(request_irq(ui32Irq, DeviceISRWrapper, IRQF_SHARED, pszISRName, pvDeviceNode))
++ {
++ PVR_DPF((PVR_DBG_ERROR,"OSInstallDeviceLISR: Couldn't install device LISR on IRQ %d", ui32Irq));
++
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ psEnvData->ui32IRQ = ui32Irq;
++ psEnvData->pvISRCookie = pvDeviceNode;
++ psEnvData->bLISRInstalled = IMG_TRUE;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSUninstallDeviceLISR(IMG_VOID *pvSysData)
++{
++ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++ if (!psEnvData->bLISRInstalled)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSUninstallDeviceLISR: No LISR has been installed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ PVR_TRACE(("Uninstalling device LISR on IRQ %d with cookie %x", psEnvData->ui32IRQ, psEnvData->pvISRCookie));
++
++ free_irq(psEnvData->ui32IRQ, psEnvData->pvISRCookie);
++
++ psEnvData->bLISRInstalled = IMG_FALSE;
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSInstallSystemLISR(IMG_VOID *pvSysData, IMG_UINT32 ui32Irq)
++{
++ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++ if (psEnvData->bLISRInstalled)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSInstallSystemLISR: An LISR has already been installed: IRQ %d cookie %x", psEnvData->ui32IRQ, psEnvData->pvISRCookie));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ PVR_TRACE(("Installing system LISR on IRQ %d with cookie %x", ui32Irq, pvSysData));
++
++ if(request_irq(ui32Irq, SystemISRWrapper, IRQF_SHARED, "PowerVR", pvSysData))
++ {
++ PVR_DPF((PVR_DBG_ERROR,"OSInstallSystemLISR: Couldn't install system LISR on IRQ %d", ui32Irq));
++
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ psEnvData->ui32IRQ = ui32Irq;
++ psEnvData->pvISRCookie = pvSysData;
++ psEnvData->bLISRInstalled = IMG_TRUE;
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSUninstallSystemLISR(IMG_VOID *pvSysData)
++{
++ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++ if (!psEnvData->bLISRInstalled)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSUninstallSystemLISR: No LISR has been installed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ PVR_TRACE(("Uninstalling system LISR on IRQ %d with cookie %x", psEnvData->ui32IRQ, psEnvData->pvISRCookie));
++
++ free_irq(psEnvData->ui32IRQ, psEnvData->pvISRCookie);
++
++ psEnvData->bLISRInstalled = IMG_FALSE;
++
++ return PVRSRV_OK;
++}
++
++#if defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE)
++static void MISRWrapper(struct work_struct *data)
++{
++ ENV_DATA *psEnvData = container_of(data, ENV_DATA, sMISRWork);
++ SYS_DATA *psSysData = (SYS_DATA *)psEnvData->pvMISRData;
++
++ PVRSRVMISR(psSysData);
++}
++
++
++PVRSRV_ERROR OSInstallMISR(IMG_VOID *pvSysData)
++{
++ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++ if (psEnvData->bMISRInstalled)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSInstallMISR: An MISR has already been installed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ PVR_TRACE(("Installing MISR with cookie %p", pvSysData));
++
++ psEnvData->psWorkQueue = create_singlethread_workqueue("pvr_workqueue");
++
++ if (psEnvData->psWorkQueue == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSInstallMISR: create_singlethreaded_workqueue failed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ INIT_WORK(&psEnvData->sMISRWork, MISRWrapper);
++
++ psEnvData->pvMISRData = pvSysData;
++ psEnvData->bMISRInstalled = IMG_TRUE;
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSUninstallMISR(IMG_VOID *pvSysData)
++{
++ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++ if (!psEnvData->bMISRInstalled)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSUninstallMISR: No MISR has been installed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ PVR_TRACE(("Uninstalling MISR"));
++
++ destroy_workqueue(psEnvData->psWorkQueue);
++
++ psEnvData->bMISRInstalled = IMG_FALSE;
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSScheduleMISR(IMG_VOID *pvSysData)
++{
++ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA*)psSysData->pvEnvSpecificData;
++
++ if (psEnvData->bMISRInstalled)
++ {
++ queue_work(psEnvData->psWorkQueue, &psEnvData->sMISRWork);
++ }
++
++ return PVRSRV_OK;
++}
++#else
++#if defined(PVR_LINUX_MISR_USING_WORKQUEUE)
++static void MISRWrapper(struct work_struct *data)
++{
++ ENV_DATA *psEnvData = container_of(data, ENV_DATA, sMISRWork);
++ SYS_DATA *psSysData = (SYS_DATA *)psEnvData->pvMISRData;
++
++ PVRSRVMISR(psSysData);
++}
++
++
++PVRSRV_ERROR OSInstallMISR(IMG_VOID *pvSysData)
++{
++ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++ if (psEnvData->bMISRInstalled)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSInstallMISR: An MISR has already been installed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ PVR_TRACE(("Installing MISR with cookie %x", pvSysData));
++
++ INIT_WORK(&psEnvData->sMISRWork, MISRWrapper);
++
++ psEnvData->pvMISRData = pvSysData;
++ psEnvData->bMISRInstalled = IMG_TRUE;
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSUninstallMISR(IMG_VOID *pvSysData)
++{
++ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++ if (!psEnvData->bMISRInstalled)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSUninstallMISR: No MISR has been installed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ PVR_TRACE(("Uninstalling MISR"));
++
++ flush_scheduled_work();
++
++ psEnvData->bMISRInstalled = IMG_FALSE;
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSScheduleMISR(IMG_VOID *pvSysData)
++{
++ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA*)psSysData->pvEnvSpecificData;
++
++ if (psEnvData->bMISRInstalled)
++ {
++ schedule_work(&psEnvData->sMISRWork);
++ }
++
++ return PVRSRV_OK;
++}
++
++#else
++
++
++static void MISRWrapper(unsigned long data)
++{
++ SYS_DATA *psSysData;
++
++ psSysData = (SYS_DATA *)data;
++
++ PVRSRVMISR(psSysData);
++}
++
++
++PVRSRV_ERROR OSInstallMISR(IMG_VOID *pvSysData)
++{
++ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++ if (psEnvData->bMISRInstalled)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSInstallMISR: An MISR has already been installed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ PVR_TRACE(("Installing MISR with cookie %x", pvSysData));
++
++ tasklet_init(&psEnvData->sMISRTasklet, MISRWrapper, (unsigned long)pvSysData);
++
++ psEnvData->bMISRInstalled = IMG_TRUE;
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSUninstallMISR(IMG_VOID *pvSysData)
++{
++ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++ if (!psEnvData->bMISRInstalled)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSUninstallMISR: No MISR has been installed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ PVR_TRACE(("Uninstalling MISR"));
++
++ tasklet_kill(&psEnvData->sMISRTasklet);
++
++ psEnvData->bMISRInstalled = IMG_FALSE;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSScheduleMISR(IMG_VOID *pvSysData)
++{
++ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA*)psSysData->pvEnvSpecificData;
++
++ if (psEnvData->bMISRInstalled)
++ {
++ tasklet_schedule(&psEnvData->sMISRTasklet);
++ }
++
++ return PVRSRV_OK;
++}
++
++#endif
++#endif
++
++
++IMG_VOID OSPanic(IMG_VOID)
++{
++ BUG();
++}
++
++#define OS_TAS(p) xchg((p), 1)
++PVRSRV_ERROR OSLockResource ( PVRSRV_RESOURCE *psResource,
++ IMG_UINT32 ui32ID)
++
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if(!OS_TAS(&psResource->ui32Lock))
++ psResource->ui32ID = ui32ID;
++ else
++ eError = PVRSRV_ERROR_GENERIC;
++
++ return eError;
++}
++
++
++PVRSRV_ERROR OSUnlockResource (PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID)
++{
++ volatile IMG_UINT32 *pui32Access = (volatile IMG_UINT32 *)&psResource->ui32Lock;
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if(*pui32Access)
++ {
++ if(psResource->ui32ID == ui32ID)
++ {
++ psResource->ui32ID = 0;
++ *pui32Access = 0;
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR,"OSUnlockResource: Resource %p is not locked with expected value.", psResource));
++ PVR_DPF((PVR_DBG_MESSAGE,"Should be %x is actually %x", ui32ID, psResource->ui32ID));
++ eError = PVRSRV_ERROR_GENERIC;
++ }
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR,"OSUnlockResource: Resource %p is not locked", psResource));
++ eError = PVRSRV_ERROR_GENERIC;
++ }
++
++ return eError;
++}
++
++
++IMG_BOOL OSIsResourceLocked (PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID)
++{
++ volatile IMG_UINT32 *pui32Access = (volatile IMG_UINT32 *)&psResource->ui32Lock;
++
++ return (*(volatile IMG_UINT32 *)pui32Access == 1) && (psResource->ui32ID == ui32ID)
++ ? IMG_TRUE
++ : IMG_FALSE;
++}
++
++
++IMG_CPU_PHYADDR OSMapLinToCPUPhys(IMG_VOID *pvLinAddr)
++{
++ IMG_CPU_PHYADDR CpuPAddr;
++
++ CpuPAddr.uiAddr = (IMG_UINTPTR_T)VMallocToPhys(pvLinAddr);
++
++ return CpuPAddr;
++}
++
++
++IMG_VOID *
++OSMapPhysToLin(IMG_CPU_PHYADDR BasePAddr,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32MappingFlags,
++ IMG_HANDLE *phOSMemHandle)
++{
++ if(phOSMemHandle)
++ {
++ *phOSMemHandle = (IMG_HANDLE)0;
++ }
++
++ if(ui32MappingFlags & PVRSRV_HAP_KERNEL_ONLY)
++ {
++ IMG_VOID *pvIORemapCookie;
++ pvIORemapCookie = ioremap_wrapper(BasePAddr.uiAddr, ui32Bytes, ui32MappingFlags);
++ if(pvIORemapCookie == IMG_NULL)
++ {
++ return NULL;
++ }
++ return pvIORemapCookie;
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSMapPhysToLin should only be used with PVRSRV_HAP_KERNEL_ONLY "
++ " (Use OSReservePhys otherwise)"));
++ return NULL;
++ }
++}
++
++IMG_BOOL
++OSUnMapPhysToLin(IMG_VOID *pvLinAddr, IMG_UINT32 ui32Bytes, IMG_UINT32 ui32MappingFlags, IMG_HANDLE hPageAlloc)
++{
++ PVR_TRACE(("%s: unmapping %d bytes from 0x%08x", __FUNCTION__, ui32Bytes, pvLinAddr));
++
++ PVR_UNREFERENCED_PARAMETER(hPageAlloc);
++ PVR_UNREFERENCED_PARAMETER(ui32Bytes);
++
++ if(ui32MappingFlags & PVRSRV_HAP_KERNEL_ONLY)
++ {
++ iounmap(pvLinAddr);
++ return IMG_TRUE;
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSUnMapPhysToLin should only be used with PVRSRV_HAP_KERNEL_ONLY "
++ " (Use OSUnReservePhys otherwise)"));
++ return IMG_FALSE;
++ }
++}
++
++static PVRSRV_ERROR
++RegisterExternalMem(IMG_SYS_PHYADDR *pBasePAddr,
++ IMG_VOID *pvCPUVAddr,
++ IMG_UINT32 ui32Bytes,
++ IMG_BOOL bPhysContig,
++ IMG_UINT32 ui32MappingFlags,
++ IMG_HANDLE *phOSMemHandle)
++{
++ LinuxMemArea *psLinuxMemArea;
++
++ switch(ui32MappingFlags & PVRSRV_HAP_MAPTYPE_MASK)
++ {
++ case PVRSRV_HAP_KERNEL_ONLY:
++ {
++ psLinuxMemArea = NewExternalKVLinuxMemArea(pBasePAddr, pvCPUVAddr, ui32Bytes, bPhysContig, ui32MappingFlags);
++
++ if(!psLinuxMemArea)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ break;
++ }
++ case PVRSRV_HAP_SINGLE_PROCESS:
++ {
++ psLinuxMemArea = NewExternalKVLinuxMemArea(pBasePAddr, pvCPUVAddr, ui32Bytes, bPhysContig, ui32MappingFlags);
++
++ if(!psLinuxMemArea)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ PVRMMapRegisterArea(psLinuxMemArea);
++ break;
++ }
++ case PVRSRV_HAP_MULTI_PROCESS:
++ {
++
++#if defined(VIVT_CACHE) || defined(__sh__)
++
++ ui32MappingFlags &= ~PVRSRV_HAP_CACHED;
++#endif
++ psLinuxMemArea = NewExternalKVLinuxMemArea(pBasePAddr, pvCPUVAddr, ui32Bytes, bPhysContig, ui32MappingFlags);
++
++ if(!psLinuxMemArea)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ PVRMMapRegisterArea(psLinuxMemArea);
++ break;
++ }
++ default:
++ PVR_DPF((PVR_DBG_ERROR,"OSRegisterMem : invalid flags 0x%x\n", ui32MappingFlags));
++ *phOSMemHandle = (IMG_HANDLE)0;
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ *phOSMemHandle = (IMG_HANDLE)psLinuxMemArea;
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR
++OSRegisterMem(IMG_CPU_PHYADDR BasePAddr,
++ IMG_VOID *pvCPUVAddr,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32MappingFlags,
++ IMG_HANDLE *phOSMemHandle)
++{
++ IMG_SYS_PHYADDR SysPAddr = SysCpuPAddrToSysPAddr(BasePAddr);
++
++ return RegisterExternalMem(&SysPAddr, pvCPUVAddr, ui32Bytes, IMG_TRUE, ui32MappingFlags, phOSMemHandle);
++}
++
++
++PVRSRV_ERROR OSRegisterDiscontigMem(IMG_SYS_PHYADDR *pBasePAddr, IMG_VOID *pvCPUVAddr, IMG_UINT32 ui32Bytes, IMG_UINT32 ui32MappingFlags, IMG_HANDLE *phOSMemHandle)
++{
++ return RegisterExternalMem(pBasePAddr, pvCPUVAddr, ui32Bytes, IMG_FALSE, ui32MappingFlags, phOSMemHandle);
++}
++
++
++PVRSRV_ERROR
++OSUnRegisterMem (IMG_VOID *pvCpuVAddr,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32MappingFlags,
++ IMG_HANDLE hOSMemHandle)
++{
++ LinuxMemArea *psLinuxMemArea = (LinuxMemArea *)hOSMemHandle;
++
++ PVR_UNREFERENCED_PARAMETER(pvCpuVAddr);
++ PVR_UNREFERENCED_PARAMETER(ui32Bytes);
++
++ switch(ui32MappingFlags & PVRSRV_HAP_MAPTYPE_MASK)
++ {
++ case PVRSRV_HAP_KERNEL_ONLY:
++ break;
++ case PVRSRV_HAP_SINGLE_PROCESS:
++ case PVRSRV_HAP_MULTI_PROCESS:
++ {
++ if(PVRMMapRemoveRegisteredArea(psLinuxMemArea) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s(%p, %d, 0x%08X, %p) FAILED!",
++ __FUNCTION__, pvCpuVAddr, ui32Bytes,
++ ui32MappingFlags, hOSMemHandle));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ break;
++ }
++ default:
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSUnRegisterMem : invalid flags 0x%x", ui32MappingFlags));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++ }
++
++ LinuxMemAreaDeepFree(psLinuxMemArea);
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSUnRegisterDiscontigMem(IMG_VOID *pvCpuVAddr, IMG_UINT32 ui32Bytes, IMG_UINT32 ui32Flags, IMG_HANDLE hOSMemHandle)
++{
++ return OSUnRegisterMem(pvCpuVAddr, ui32Bytes, ui32Flags, hOSMemHandle);
++}
++
++PVRSRV_ERROR
++OSReservePhys(IMG_CPU_PHYADDR BasePAddr,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32MappingFlags,
++ IMG_VOID **ppvCpuVAddr,
++ IMG_HANDLE *phOSMemHandle)
++{
++ LinuxMemArea *psLinuxMemArea;
++
++#if 0
++
++ if(ui32MappingFlags & PVRSRV_HAP_SINGLE_PROCESS)
++ {
++ ui32MappingFlags &= ~PVRSRV_HAP_SINGLE_PROCESS;
++ ui32MappingFlags |= PVRSRV_HAP_MULTI_PROCESS;
++ }
++#endif
++
++ switch(ui32MappingFlags & PVRSRV_HAP_MAPTYPE_MASK)
++ {
++ case PVRSRV_HAP_KERNEL_ONLY:
++ {
++
++ psLinuxMemArea = ioremap_linux_mem_area(BasePAddr.uiAddr, ui32Bytes, ui32MappingFlags);
++ if(!psLinuxMemArea)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ break;
++ }
++ case PVRSRV_HAP_SINGLE_PROCESS:
++ {
++
++ psLinuxMemArea = NewIOLinuxMemArea(BasePAddr.uiAddr, ui32Bytes, ui32MappingFlags);
++ if(!psLinuxMemArea)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ PVRMMapRegisterArea(psLinuxMemArea);
++ break;
++ }
++ case PVRSRV_HAP_MULTI_PROCESS:
++ {
++
++#if defined(VIVT_CACHE) || defined(__sh__)
++
++ ui32MappingFlags &= ~PVRSRV_HAP_CACHED;
++#endif
++ psLinuxMemArea = ioremap_linux_mem_area(BasePAddr.uiAddr, ui32Bytes, ui32MappingFlags);
++ if(!psLinuxMemArea)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ PVRMMapRegisterArea(psLinuxMemArea);
++ break;
++ }
++ default:
++ PVR_DPF((PVR_DBG_ERROR,"OSMapPhysToLin : invalid flags 0x%x\n", ui32MappingFlags));
++ *ppvCpuVAddr = NULL;
++ *phOSMemHandle = (IMG_HANDLE)0;
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ *phOSMemHandle = (IMG_HANDLE)psLinuxMemArea;
++ *ppvCpuVAddr = LinuxMemAreaToCpuVAddr(psLinuxMemArea);
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR
++OSUnReservePhys(IMG_VOID *pvCpuVAddr,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32MappingFlags,
++ IMG_HANDLE hOSMemHandle)
++{
++ LinuxMemArea *psLinuxMemArea;
++
++ PVR_UNREFERENCED_PARAMETER(pvCpuVAddr);
++ PVR_UNREFERENCED_PARAMETER(ui32Bytes);
++
++ psLinuxMemArea = (LinuxMemArea *)hOSMemHandle;
++
++ switch(ui32MappingFlags & PVRSRV_HAP_MAPTYPE_MASK)
++ {
++ case PVRSRV_HAP_KERNEL_ONLY:
++ break;
++ case PVRSRV_HAP_SINGLE_PROCESS:
++ case PVRSRV_HAP_MULTI_PROCESS:
++ {
++ if(PVRMMapRemoveRegisteredArea(psLinuxMemArea) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s(%p, %d, 0x%08X, %p) FAILED!",
++ __FUNCTION__, pvCpuVAddr, ui32Bytes,
++ ui32MappingFlags, hOSMemHandle));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ break;
++ }
++ default:
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSUnMapPhysToLin : invalid flags 0x%x", ui32MappingFlags));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++ }
++
++ LinuxMemAreaDeepFree(psLinuxMemArea);
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSBaseAllocContigMemory(IMG_UINT32 ui32Size, IMG_CPU_VIRTADDR *pvLinAddr, IMG_CPU_PHYADDR *psPhysAddr)
++{
++#if !defined(NO_HARDWARE)
++ PVR_UNREFERENCED_PARAMETER(ui32Size);
++ PVR_UNREFERENCED_PARAMETER(pvLinAddr);
++ PVR_UNREFERENCED_PARAMETER(psPhysAddr);
++ PVR_DPF((PVR_DBG_ERROR, "%s: Not available", __FUNCTION__));
++
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++#else
++ IMG_VOID *pvKernLinAddr;
++
++ pvKernLinAddr = kmalloc(ui32Size, GFP_KERNEL);
++
++ if (!pvKernLinAddr)
++ {
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ *pvLinAddr = pvKernLinAddr;
++
++ psPhysAddr->uiAddr = virt_to_phys(pvKernLinAddr);
++
++ return PVRSRV_OK;
++#endif
++}
++
++
++PVRSRV_ERROR OSBaseFreeContigMemory(IMG_UINT32 ui32Size, IMG_CPU_VIRTADDR pvLinAddr, IMG_CPU_PHYADDR psPhysAddr)
++{
++#if !defined(NO_HARDWARE)
++ PVR_UNREFERENCED_PARAMETER(ui32Size);
++ PVR_UNREFERENCED_PARAMETER(pvLinAddr);
++ PVR_UNREFERENCED_PARAMETER(psPhysAddr.uiAddr);
++
++ PVR_DPF((PVR_DBG_WARNING, "%s: Not available", __FUNCTION__));
++#else
++ PVR_UNREFERENCED_PARAMETER(ui32Size);
++ PVR_UNREFERENCED_PARAMETER(psPhysAddr.uiAddr);
++
++ kfree(pvLinAddr);
++#endif
++ return PVRSRV_OK;
++}
++
++IMG_UINT32 OSReadHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset)
++{
++#if !defined(NO_HARDWARE)
++ return (IMG_UINT32) readl((IMG_PBYTE)pvLinRegBaseAddr+ui32Offset);
++#else
++ return *(IMG_UINT32 *)((IMG_PBYTE)pvLinRegBaseAddr+ui32Offset);
++#endif
++}
++
++IMG_VOID OSWriteHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value)
++{
++#if !defined(NO_HARDWARE)
++ writel(ui32Value, (IMG_PBYTE)pvLinRegBaseAddr+ui32Offset);
++#else
++ *(IMG_UINT32 *)((IMG_PBYTE)pvLinRegBaseAddr+ui32Offset) = ui32Value;
++#endif
++}
++
++PVRSRV_PCI_DEV_HANDLE OSPCISetDev(IMG_VOID *pvPCICookie, HOST_PCI_INIT_FLAGS eFlags)
++{
++ int err;
++ IMG_UINT32 i;
++ PVR_PCI_DEV *psPVRPCI;
++
++ PVR_TRACE(("OSPCISetDev"));
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(*psPVRPCI), (IMG_VOID **)&psPVRPCI, IMG_NULL,
++ "PCI Device") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSPCISetDev: Couldn't allocate PVR PCI structure"));
++ return IMG_NULL;
++ }
++
++ psPVRPCI->psPCIDev = (struct pci_dev *)pvPCICookie;
++ psPVRPCI->ePCIFlags = eFlags;
++
++ err = pci_enable_device(psPVRPCI->psPCIDev);
++ if (err != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSPCISetDev: Couldn't enable device (%d)", err));
++ return IMG_NULL;
++ }
++
++ if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER)
++ {
++ pci_set_master(psPVRPCI->psPCIDev);
++ }
++
++ if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_MSI)
++ {
++#if defined(CONFIG_PCI_MSI)
++ if (psPVRPCI->psPCIDev->device == PSB_SYS_SGX_DEV_DEVICE_ID_1 ||
++ psPVRPCI->psPCIDev->device == PSB_SYS_SGX_DEV_DEVICE_ID_2) // Disable MSI for Menlow
++ psPVRPCI->ePCIFlags &= ~HOST_PCI_INIT_FLAG_MSI;
++ else if(!psPVRPCI->psPCIDev->msi_enabled)
++ {
++ err = pci_enable_msi(psPVRPCI->psPCIDev);
++ if (err != 0)
++ {
++ PVR_DPF((PVR_DBG_WARNING, "OSPCISetDev: Couldn't enable MSI (%d)", err));
++ psPVRPCI->ePCIFlags &= ~HOST_PCI_INIT_FLAG_MSI;
++ }
++ }
++#else
++ PVR_DPF((PVR_DBG_WARNING, "OSPCISetDev: MSI support not enabled in the kernel"));
++#endif
++ }
++
++
++ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
++ {
++ psPVRPCI->abPCIResourceInUse[i] = IMG_FALSE;
++ }
++
++ return (PVRSRV_PCI_DEV_HANDLE)psPVRPCI;
++}
++
++PVRSRV_PCI_DEV_HANDLE OSPCIAcquireDev(IMG_UINT16 ui16VendorID, IMG_UINT16 ui16DeviceID, HOST_PCI_INIT_FLAGS eFlags)
++{
++ struct pci_dev *psPCIDev;
++
++ psPCIDev = pci_get_device(ui16VendorID, ui16DeviceID, NULL);
++ if (psPCIDev == NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSPCIAcquireDev: Couldn't acquire device"));
++ return IMG_NULL;
++ }
++
++ return OSPCISetDev((IMG_VOID *)psPCIDev, eFlags);
++}
++
++PVRSRV_ERROR OSPCIIRQ(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 *pui32IRQ)
++{
++ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
++
++ *pui32IRQ = psPVRPCI->psPCIDev->irq;
++
++ return PVRSRV_OK;
++}
++
++enum HOST_PCI_ADDR_RANGE_FUNC
++{
++ HOST_PCI_ADDR_RANGE_FUNC_LEN,
++ HOST_PCI_ADDR_RANGE_FUNC_START,
++ HOST_PCI_ADDR_RANGE_FUNC_END,
++ HOST_PCI_ADDR_RANGE_FUNC_REQUEST,
++ HOST_PCI_ADDR_RANGE_FUNC_RELEASE
++};
++
++static IMG_UINT32 OSPCIAddrRangeFunc(enum HOST_PCI_ADDR_RANGE_FUNC eFunc,
++ PVRSRV_PCI_DEV_HANDLE hPVRPCI,
++ IMG_UINT32 ui32Index)
++{
++ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
++
++ if (ui32Index >= DEVICE_COUNT_RESOURCE)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSPCIAddrRangeFunc: Index out of range"));
++ return 0;
++
++ }
++
++ switch (eFunc)
++ {
++ case HOST_PCI_ADDR_RANGE_FUNC_LEN:
++ return pci_resource_len(psPVRPCI->psPCIDev, ui32Index);
++ case HOST_PCI_ADDR_RANGE_FUNC_START:
++ return pci_resource_start(psPVRPCI->psPCIDev, ui32Index);
++ case HOST_PCI_ADDR_RANGE_FUNC_END:
++ return pci_resource_end(psPVRPCI->psPCIDev, ui32Index);
++ case HOST_PCI_ADDR_RANGE_FUNC_REQUEST:
++ {
++ int err;
++
++ err = pci_request_region(psPVRPCI->psPCIDev, (IMG_INT)ui32Index, "PowerVR");
++ if (err != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSPCIAddrRangeFunc: pci_request_region_failed (%d)", err));
++ return 0;
++ }
++ psPVRPCI->abPCIResourceInUse[ui32Index] = IMG_TRUE;
++ return 1;
++ }
++ case HOST_PCI_ADDR_RANGE_FUNC_RELEASE:
++ if (psPVRPCI->abPCIResourceInUse[ui32Index])
++ {
++ pci_release_region(psPVRPCI->psPCIDev, (IMG_INT)ui32Index);
++ psPVRPCI->abPCIResourceInUse[ui32Index] = IMG_FALSE;
++ }
++ return 1;
++ default:
++ PVR_DPF((PVR_DBG_ERROR, "OSPCIAddrRangeFunc: Unknown function"));
++ break;
++ }
++
++ return 0;
++}
++
++IMG_UINT32 OSPCIAddrRangeLen(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
++{
++ return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_LEN, hPVRPCI, ui32Index);
++}
++
++IMG_UINT32 OSPCIAddrRangeStart(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
++{
++ return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_START, hPVRPCI, ui32Index);
++}
++
++IMG_UINT32 OSPCIAddrRangeEnd(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
++{
++ return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_END, hPVRPCI, ui32Index);
++}
++
++PVRSRV_ERROR OSPCIRequestAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI,
++ IMG_UINT32 ui32Index)
++{
++ return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_REQUEST, hPVRPCI, ui32Index) == 0 ? PVRSRV_ERROR_GENERIC : PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSPCIReleaseAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
++{
++ return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_RELEASE, hPVRPCI, ui32Index) == 0 ? PVRSRV_ERROR_GENERIC : PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSPCIReleaseDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI)
++{
++ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
++ int i;
++
++ PVR_TRACE(("OSPCIReleaseDev"));
++
++
++ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
++ {
++ if (psPVRPCI->abPCIResourceInUse[i])
++ {
++ PVR_TRACE(("OSPCIReleaseDev: Releasing Address range %d", i));
++ pci_release_region(psPVRPCI->psPCIDev, i);
++ psPVRPCI->abPCIResourceInUse[i] = IMG_FALSE;
++ }
++ }
++
++#if defined(CONFIG_PCI_MSI)
++ if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_MSI)
++ {
++ pci_disable_msi(psPVRPCI->psPCIDev);
++ }
++#endif
++
++ if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER)
++ {
++ pci_clear_master(psPVRPCI->psPCIDev);
++ }
++ pci_disable_device(psPVRPCI->psPCIDev);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(*psPVRPCI), (IMG_VOID *)psPVRPCI, IMG_NULL);
++
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSPCISuspendDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI)
++{
++ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
++ int i;
++ int err;
++
++ PVR_TRACE(("OSPCISuspendDev"));
++
++
++ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
++ {
++ if (psPVRPCI->abPCIResourceInUse[i])
++ {
++ pci_release_region(psPVRPCI->psPCIDev, i);
++ }
++ }
++
++ err = pci_save_state(psPVRPCI->psPCIDev);
++ if (err != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSPCISuspendDev: pci_save_state_failed (%d)", err));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ pci_disable_device(psPVRPCI->psPCIDev);
++
++ err = pci_set_power_state(psPVRPCI->psPCIDev, PCI_D3hot);//pci_choose_state(psPVRPCI->psPCIDev, PMSG_SUSPEND));
++ switch(err)
++ {
++ case 0:
++ break;
++ case -EIO:
++ PVR_DPF((PVR_DBG_WARNING, "OSPCISuspendDev: device doesn't support PCI PM"));
++ break;
++ case -EINVAL:
++ PVR_DPF((PVR_DBG_ERROR, "OSPCISuspendDev: can't enter requested power state"));
++ break;
++ default:
++ PVR_DPF((PVR_DBG_ERROR, "OSPCISuspendDev: pci_set_power_state failed (%d)", err));
++ break;
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSPCIResumeDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI)
++{
++ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
++ int err;
++ int i;
++
++ PVR_TRACE(("OSPCIResumeDev"));
++
++ err = pci_set_power_state(psPVRPCI->psPCIDev, PCI_D0);//pci_choose_state(psPVRPCI->psPCIDev, PMSG_ON));
++ switch(err)
++ {
++ case 0:
++ break;
++ case -EIO:
++ PVR_DPF((PVR_DBG_WARNING, "OSPCIResumeDev: device doesn't support PCI PM"));
++ break;
++ case -EINVAL:
++ PVR_DPF((PVR_DBG_ERROR, "OSPCIResumeDev: can't enter requested power state"));
++ return PVRSRV_ERROR_GENERIC;
++ default:
++ PVR_DPF((PVR_DBG_ERROR, "OSPCIResumeDev: pci_set_power_state failed (%d)", err));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ err = pci_restore_state(psPVRPCI->psPCIDev);
++ if (err != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSPCIResumeDev: pci_restore_state failed (%d)", err));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ err = pci_enable_device(psPVRPCI->psPCIDev);
++ if (err != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSPCIResumeDev: Couldn't enable device (%d)", err));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER)
++ pci_set_master(psPVRPCI->psPCIDev);
++
++
++ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
++ {
++ if (psPVRPCI->abPCIResourceInUse[i])
++ {
++ err = pci_request_region(psPVRPCI->psPCIDev, i, "PowerVR");
++ if (err != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSPCIResumeDev: pci_request_region_failed (region %d, error %d)", i, err));
++ }
++ }
++
++ }
++
++ return PVRSRV_OK;
++}
++
++#define OS_MAX_TIMERS 8
++
++typedef struct TIMER_CALLBACK_DATA_TAG
++{
++ IMG_BOOL bInUse;
++ PFN_TIMER_FUNC pfnTimerFunc;
++ IMG_VOID *pvData;
++ struct timer_list sTimer;
++ IMG_UINT32 ui32Delay;
++ IMG_BOOL bActive;
++#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++ struct work_struct sWork;
++#endif
++}TIMER_CALLBACK_DATA;
++
++#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++static struct workqueue_struct *psTimerWorkQueue;
++#endif
++
++static TIMER_CALLBACK_DATA sTimers[OS_MAX_TIMERS];
++
++#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++DEFINE_MUTEX(sTimerStructLock);
++#else
++static spinlock_t sTimerStructLock = SPIN_LOCK_UNLOCKED;
++#endif
++
++static void OSTimerCallbackBody(TIMER_CALLBACK_DATA *psTimerCBData)
++{
++ if (!psTimerCBData->bActive)
++ return;
++
++
++ psTimerCBData->pfnTimerFunc(psTimerCBData->pvData);
++
++
++ mod_timer(&psTimerCBData->sTimer, psTimerCBData->ui32Delay + jiffies);
++}
++
++
++#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++static void OSTimerWorkQueueCallBack(struct work_struct *psWork)
++{
++ TIMER_CALLBACK_DATA *psTimerCBData = container_of(psWork, TIMER_CALLBACK_DATA, sWork);
++
++ OSTimerCallbackBody(psTimerCBData);
++}
++#endif
++
++static IMG_VOID OSTimerCallbackWrapper(IMG_UINT32 ui32Data)
++{
++ TIMER_CALLBACK_DATA *psTimerCBData = (TIMER_CALLBACK_DATA*)ui32Data;
++
++#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++ int res;
++
++ res = queue_work(psTimerWorkQueue, &psTimerCBData->sWork);
++ if (res == 0)
++ {
++ PVR_DPF((PVR_DBG_WARNING, "OSTimerCallbackWrapper: work already queued"));
++ }
++#else
++ OSTimerCallbackBody(psTimerCBData);
++#endif
++}
++
++
++IMG_HANDLE OSAddTimer(PFN_TIMER_FUNC pfnTimerFunc, IMG_VOID *pvData, IMG_UINT32 ui32MsTimeout)
++{
++ TIMER_CALLBACK_DATA *psTimerCBData;
++ IMG_UINT32 ui32i;
++#if !defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++ unsigned long ulLockFlags;
++#endif
++
++
++ if(!pfnTimerFunc)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSAddTimer: passed invalid callback"));
++ return IMG_NULL;
++ }
++
++
++#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++ mutex_lock(&sTimerStructLock);
++#else
++ spin_lock_irqsave(&sTimerStructLock, ulLockFlags);
++#endif
++ for (ui32i = 0; ui32i < OS_MAX_TIMERS; ui32i++)
++ {
++ psTimerCBData = &sTimers[ui32i];
++ if (!psTimerCBData->bInUse)
++ {
++ psTimerCBData->bInUse = IMG_TRUE;
++ break;
++ }
++ }
++#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++ mutex_unlock(&sTimerStructLock);
++#else
++ spin_unlock_irqrestore(&sTimerStructLock, ulLockFlags);
++#endif
++ if (ui32i >= OS_MAX_TIMERS)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSAddTimer: all timers are in use"));
++ return IMG_NULL;
++ }
++
++ psTimerCBData->pfnTimerFunc = pfnTimerFunc;
++ psTimerCBData->pvData = pvData;
++ psTimerCBData->bActive = IMG_FALSE;
++
++
++
++
++ psTimerCBData->ui32Delay = ((HZ * ui32MsTimeout) < 1000)
++ ? 1
++ : ((HZ * ui32MsTimeout) / 1000);
++
++ init_timer(&psTimerCBData->sTimer);
++
++
++ psTimerCBData->sTimer.function = (IMG_VOID *)OSTimerCallbackWrapper;
++ psTimerCBData->sTimer.data = (IMG_UINT32)psTimerCBData;
++ psTimerCBData->sTimer.expires = psTimerCBData->ui32Delay + jiffies;
++
++ return (IMG_HANDLE)(ui32i + 1);
++}
++
++
++static inline TIMER_CALLBACK_DATA *GetTimerStructure(IMG_HANDLE hTimer)
++{
++ IMG_UINT32 ui32i = ((IMG_UINT32)hTimer) - 1;
++
++ PVR_ASSERT(ui32i < OS_MAX_TIMERS);
++
++ return &sTimers[ui32i];
++}
++
++PVRSRV_ERROR OSRemoveTimer (IMG_HANDLE hTimer)
++{
++ TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer);
++
++ PVR_ASSERT(psTimerCBData->bInUse);
++ PVR_ASSERT(!psTimerCBData->bActive);
++
++
++ psTimerCBData->bInUse = IMG_FALSE;
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSEnableTimer (IMG_HANDLE hTimer)
++{
++ TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer);
++
++ PVR_ASSERT(psTimerCBData->bInUse);
++ PVR_ASSERT(!psTimerCBData->bActive);
++
++
++ psTimerCBData->bActive = IMG_TRUE;
++
++
++ psTimerCBData->sTimer.expires = psTimerCBData->ui32Delay + jiffies;
++
++
++ add_timer(&psTimerCBData->sTimer);
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSDisableTimer (IMG_HANDLE hTimer)
++{
++ TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer);
++
++ PVR_ASSERT(psTimerCBData->bInUse);
++ PVR_ASSERT(psTimerCBData->bActive);
++
++
++ psTimerCBData->bActive = IMG_FALSE;
++ smp_mb();
++
++#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++ flush_workqueue(psTimerWorkQueue);
++#endif
++
++
++ del_timer_sync(&psTimerCBData->sTimer);
++
++#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++
++ flush_workqueue(psTimerWorkQueue);
++#endif
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSEventObjectCreate(const IMG_CHAR *pszName, PVRSRV_EVENTOBJECT *psEventObject)
++{
++
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if(psEventObject)
++ {
++ if(pszName)
++ {
++
++ strncpy(psEventObject->szName, pszName, EVENTOBJNAME_MAXLENGTH);
++ }
++ else
++ {
++
++ static IMG_UINT16 ui16NameIndex = 0;
++ snprintf(psEventObject->szName, EVENTOBJNAME_MAXLENGTH, "PVRSRV_EVENTOBJECT_%d", ui16NameIndex++);
++ }
++
++ if(LinuxEventObjectListCreate(&psEventObject->hOSEventKM) != PVRSRV_OK)
++ {
++ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSEventObjectCreate: psEventObject is not a valid pointer"));
++ eError = PVRSRV_ERROR_GENERIC;
++ }
++
++ return eError;
++
++}
++
++
++PVRSRV_ERROR OSEventObjectDestroy(PVRSRV_EVENTOBJECT *psEventObject)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if(psEventObject)
++ {
++ if(psEventObject->hOSEventKM)
++ {
++ LinuxEventObjectListDestroy(psEventObject->hOSEventKM);
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSEventObjectDestroy: hOSEventKM is not a valid pointer"));
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ }
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSEventObjectDestroy: psEventObject is not a valid pointer"));
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ return eError;
++}
++
++PVRSRV_ERROR OSEventObjectWait(IMG_HANDLE hOSEventKM)
++{
++ PVRSRV_ERROR eError;
++
++ if(hOSEventKM)
++ {
++ eError = LinuxEventObjectWait(hOSEventKM, EVENT_OBJECT_TIMEOUT_MS);
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSEventObjectWait: hOSEventKM is not a valid handle"));
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ return eError;
++}
++
++PVRSRV_ERROR OSEventObjectOpen(PVRSRV_EVENTOBJECT *psEventObject,
++ IMG_HANDLE *phOSEvent)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if(psEventObject)
++ {
++ if(LinuxEventObjectAdd(psEventObject->hOSEventKM, phOSEvent) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectAdd: failed"));
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSEventObjectCreate: psEventObject is not a valid pointer"));
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ return eError;
++}
++
++PVRSRV_ERROR OSEventObjectClose(PVRSRV_EVENTOBJECT *psEventObject,
++ IMG_HANDLE hOSEventKM)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if(psEventObject)
++ {
++ if(LinuxEventObjectDelete(psEventObject->hOSEventKM, hOSEventKM) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectDelete: failed"));
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSEventObjectDestroy: psEventObject is not a valid pointer"));
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ return eError;
++
++}
++
++PVRSRV_ERROR OSEventObjectSignal(IMG_HANDLE hOSEventKM)
++{
++ PVRSRV_ERROR eError;
++
++ if(hOSEventKM)
++ {
++ eError = LinuxEventObjectSignal(hOSEventKM);
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSEventObjectSignal: hOSEventKM is not a valid handle"));
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ return eError;
++}
++
++IMG_BOOL OSProcHasPrivSrvInit(IMG_VOID)
++{
++ return (capable(CAP_SYS_MODULE) != 0) ? IMG_TRUE : IMG_FALSE;
++}
++
++PVRSRV_ERROR OSCopyToUser(IMG_PVOID pvProcess,
++ IMG_VOID *pvDest,
++ IMG_VOID *pvSrc,
++ IMG_UINT32 ui32Bytes)
++{
++ PVR_UNREFERENCED_PARAMETER(pvProcess);
++
++ if(copy_to_user(pvDest, pvSrc, ui32Bytes)==0)
++ return PVRSRV_OK;
++ else
++ return PVRSRV_ERROR_GENERIC;
++}
++
++PVRSRV_ERROR OSCopyFromUser( IMG_PVOID pvProcess,
++ IMG_VOID *pvDest,
++ IMG_VOID *pvSrc,
++ IMG_UINT32 ui32Bytes)
++{
++ PVR_UNREFERENCED_PARAMETER(pvProcess);
++
++ if(copy_from_user(pvDest, pvSrc, ui32Bytes)==0)
++ return PVRSRV_OK;
++ else
++ return PVRSRV_ERROR_GENERIC;
++}
++
++IMG_BOOL OSAccessOK(IMG_VERIFY_TEST eVerification, IMG_VOID *pvUserPtr, IMG_UINT32 ui32Bytes)
++{
++ IMG_INT linuxType;
++
++ if (eVerification == PVR_VERIFY_READ)
++ {
++ linuxType = VERIFY_READ;
++ }
++ else
++ {
++ PVR_ASSERT(eVerification == PVR_VERIFY_WRITE);
++ linuxType = VERIFY_WRITE;
++ }
++
++ return access_ok(linuxType, pvUserPtr, ui32Bytes);
++}
++
++typedef enum _eWrapMemType_
++{
++ WRAP_TYPE_CLEANUP,
++ WRAP_TYPE_GET_USER_PAGES,
++ WRAP_TYPE_FIND_VMA_PAGES,
++ WRAP_TYPE_FIND_VMA_PFN
++} eWrapMemType;
++
++typedef struct _sWrapMemInfo_
++{
++ eWrapMemType eType;
++ IMG_INT iNumPages;
++ struct page **ppsPages;
++ IMG_SYS_PHYADDR *psPhysAddr;
++ IMG_INT iPageOffset;
++ IMG_INT iContiguous;
++#if defined(DEBUG)
++ IMG_UINT32 ulStartAddr;
++ IMG_UINT32 ulBeyondEndAddr;
++ struct vm_area_struct *psVMArea;
++#endif
++ IMG_BOOL bWrapWorkaround;
++} sWrapMemInfo;
++
++static IMG_VOID CheckPagesContiguous(sWrapMemInfo *psInfo)
++{
++ IMG_INT i;
++ IMG_UINT32 ui32AddrChk;
++
++ BUG_ON(psInfo == IMG_NULL);
++
++ psInfo->iContiguous = 1;
++
++ for (i = 0, ui32AddrChk = psInfo->psPhysAddr[0].uiAddr;
++ i < psInfo->iNumPages;
++ i++, ui32AddrChk += PAGE_SIZE)
++ {
++ if (psInfo->psPhysAddr[i].uiAddr != ui32AddrChk)
++ {
++ psInfo->iContiguous = 0;
++ break;
++ }
++ }
++}
++
++static struct page *CPUVAddrToPage(struct vm_area_struct *psVMArea, IMG_UINT32 ulCPUVAddr)
++{
++ pgd_t *psPGD;
++ pud_t *psPUD;
++ pmd_t *psPMD;
++ pte_t *psPTE;
++ struct mm_struct *psMM = psVMArea->vm_mm;
++ IMG_UINT32 ulPFN;
++ spinlock_t *psPTLock;
++ struct page *psPage;
++
++ psPGD = pgd_offset(psMM, ulCPUVAddr);
++ if (pgd_none(*psPGD) || pgd_bad(*psPGD))
++ return NULL;
++
++ psPUD = pud_offset(psPGD, ulCPUVAddr);
++ if (pud_none(*psPUD) || pud_bad(*psPUD))
++ return NULL;
++
++ psPMD = pmd_offset(psPUD, ulCPUVAddr);
++ if (pmd_none(*psPMD) || pmd_bad(*psPMD))
++ return NULL;
++
++ psPage = NULL;
++
++ psPTE = (pte_t *)pte_offset_map_lock(psMM, psPMD, ulCPUVAddr, &psPTLock);
++ if ((pte_none(*psPTE) != 0) || (pte_present(*psPTE) == 0) || (pte_write(*psPTE) == 0))
++ goto exit_unlock;
++
++ ulPFN = pte_pfn(*psPTE);
++ if (!pfn_valid(ulPFN))
++ goto exit_unlock;
++
++ psPage = pfn_to_page(ulPFN);
++
++ get_page(psPage);
++
++exit_unlock:
++ pte_unmap_unlock(psPTE, psPTLock);
++
++ return psPage;
++}
++PVRSRV_ERROR OSReleasePhysPageAddr(IMG_HANDLE hOSWrapMem)
++{
++ sWrapMemInfo *psInfo = (sWrapMemInfo *)hOSWrapMem;
++ IMG_INT i;
++
++ BUG_ON(psInfo == IMG_NULL);
++
++ switch (psInfo->eType)
++ {
++ case WRAP_TYPE_CLEANUP:
++ break;
++ case WRAP_TYPE_FIND_VMA_PFN:
++ break;
++ case WRAP_TYPE_GET_USER_PAGES:
++ {
++ for (i = 0; i < psInfo->iNumPages; i++)
++ {
++ struct page *psPage = psInfo->ppsPages[i];
++
++
++ if (!PageReserved(psPage));
++ {
++ SetPageDirty(psPage);
++ }
++ page_cache_release(psPage);
++ }
++ break;
++ }
++ case WRAP_TYPE_FIND_VMA_PAGES:
++ {
++ for (i = 0; i < psInfo->iNumPages; i++)
++ {
++ if(psInfo->bWrapWorkaround)
++ put_page(psInfo->ppsPages[i]);
++ else
++ put_page_testzero(psInfo->ppsPages[i]);
++ }
++ break;
++ }
++ default:
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSReleasePhysPageAddr: Unknown wrap type (%d)", psInfo->eType));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ }
++
++ if (psInfo->ppsPages != IMG_NULL)
++ {
++ kfree(psInfo->ppsPages);
++ }
++
++ if (psInfo->psPhysAddr != IMG_NULL)
++ {
++ kfree(psInfo->psPhysAddr);
++ }
++
++ kfree(psInfo);
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSAcquirePhysPageAddr(IMG_VOID* pvCPUVAddr,
++ IMG_UINT32 ui32Bytes,
++ IMG_SYS_PHYADDR *psSysPAddr,
++ IMG_HANDLE *phOSWrapMem,
++ IMG_BOOL bWrapWorkaround)
++{
++ IMG_UINT32 ulStartAddrOrig = (IMG_UINT32) pvCPUVAddr;
++ IMG_UINT32 ulAddrRangeOrig = (IMG_UINT32) ui32Bytes;
++ IMG_UINT32 ulBeyondEndAddrOrig = ulStartAddrOrig + ulAddrRangeOrig;
++ IMG_UINT32 ulStartAddr;
++ IMG_UINT32 ulAddrRange;
++ IMG_UINT32 ulBeyondEndAddr;
++ IMG_UINT32 ulAddr;
++ IMG_INT iNumPagesMapped;
++ IMG_INT i;
++ struct vm_area_struct *psVMArea;
++ sWrapMemInfo *psInfo;
++
++
++ ulStartAddr = ulStartAddrOrig & PAGE_MASK;
++ ulBeyondEndAddr = PAGE_ALIGN(ulBeyondEndAddrOrig);
++ ulAddrRange = ulBeyondEndAddr - ulStartAddr;
++
++
++ psInfo = kmalloc(sizeof(*psInfo), GFP_KERNEL);
++ if (psInfo == NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSAcquirePhysPageAddr: Couldn't allocate information structure"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ memset(psInfo, 0, sizeof(*psInfo));
++ psInfo->bWrapWorkaround = bWrapWorkaround;
++
++#if defined(DEBUG)
++ psInfo->ulStartAddr = ulStartAddrOrig;
++ psInfo->ulBeyondEndAddr = ulBeyondEndAddrOrig;
++#endif
++
++ psInfo->iNumPages = (IMG_INT)(ulAddrRange >> PAGE_SHIFT);
++ psInfo->iPageOffset = (IMG_INT)(ulStartAddrOrig & ~PAGE_MASK);
++
++
++ psInfo->psPhysAddr = kmalloc((size_t)psInfo->iNumPages * sizeof(*psInfo->psPhysAddr), GFP_KERNEL);
++ if (psInfo->psPhysAddr == NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSAcquirePhysPageAddr: Couldn't allocate page array"));
++ goto error_free;
++ }
++
++
++ psInfo->ppsPages = kmalloc((size_t)psInfo->iNumPages * sizeof(*psInfo->ppsPages), GFP_KERNEL);
++ if (psInfo->ppsPages == NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSAcquirePhysPageAddr: Couldn't allocate page array"));
++ goto error_free;
++ }
++
++
++ down_read(&current->mm->mmap_sem);
++ iNumPagesMapped = get_user_pages(current, current->mm, ulStartAddr, psInfo->iNumPages, 1, 0, psInfo->ppsPages, NULL);
++ up_read(&current->mm->mmap_sem);
++
++ if (iNumPagesMapped >= 0)
++ {
++
++ if (iNumPagesMapped != psInfo->iNumPages)
++ {
++ PVR_TRACE(("OSAcquirePhysPageAddr: Couldn't map all the pages needed (wanted: %d, got %d)", psInfo->iNumPages, iNumPagesMapped));
++
++
++ for (i = 0; i < iNumPagesMapped; i++)
++ {
++ page_cache_release(psInfo->ppsPages[i]);
++
++ }
++ goto error_free;
++ }
++
++
++ for (i = 0; i < psInfo->iNumPages; i++)
++ {
++ IMG_CPU_PHYADDR CPUPhysAddr;
++
++ CPUPhysAddr.uiAddr = page_to_pfn(psInfo->ppsPages[i]) << PAGE_SHIFT;
++ psInfo->psPhysAddr[i] = SysCpuPAddrToSysPAddr(CPUPhysAddr);
++ psSysPAddr[i] = psInfo->psPhysAddr[i];
++
++ }
++
++ psInfo->eType = WRAP_TYPE_GET_USER_PAGES;
++
++ goto exit_check;
++ }
++
++ PVR_DPF((PVR_DBG_MESSAGE, "OSAcquirePhysPageAddr: get_user_pages failed (%d), trying something else", iNumPagesMapped));
++
++
++ down_read(&current->mm->mmap_sem);
++
++ psVMArea = find_vma(current->mm, ulStartAddrOrig);
++ if (psVMArea == NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSAcquirePhysPageAddr: Couldn't find memory region containing start address %lx", ulStartAddrOrig));
++
++ goto error_release_mmap_sem;
++ }
++#if defined(DEBUG)
++ psInfo->psVMArea = psVMArea;
++#endif
++
++
++ if (ulStartAddrOrig < psVMArea->vm_start)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSAcquirePhysPageAddr: Start address %lx is outside of the region returned by find_vma", ulStartAddrOrig));
++ goto error_release_mmap_sem;
++ }
++
++
++ if (ulBeyondEndAddrOrig > psVMArea->vm_end)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSAcquirePhysPageAddr: End address %lx is outside of the region returned by find_vma", ulBeyondEndAddrOrig));
++ goto error_release_mmap_sem;
++ }
++
++
++ if ((psVMArea->vm_flags & (VM_IO | VM_RESERVED)) != (VM_IO | VM_RESERVED))
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSAcquirePhysPageAddr: Memory region does not represent memory mapped I/O (VMA flags: 0x%lx)", psVMArea->vm_flags));
++ goto error_release_mmap_sem;
++ }
++
++
++ if ((psVMArea->vm_flags & (VM_READ | VM_WRITE)) != (VM_READ | VM_WRITE))
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSAcquirePhysPageAddr: No read/write access to memory region (VMA flags: 0x%lx)", psVMArea->vm_flags));
++ goto error_release_mmap_sem;
++ }
++
++
++ for (ulAddr = ulStartAddrOrig, i = 0; ulAddr < ulBeyondEndAddrOrig; ulAddr += PAGE_SIZE, i++)
++ {
++ struct page *psPage;
++
++ BUG_ON(i >= psInfo->iNumPages);
++
++ psPage = CPUVAddrToPage(psVMArea, ulAddr);
++ if (psPage == NULL)
++ {
++ IMG_INT j;
++
++ PVR_TRACE(("OSAcquirePhysPageAddr: Couldn't lookup page structure for address 0x%lx, trying something else", ulAddr));
++
++
++ for (j = 0; j < i; j++)
++ {
++ if(psInfo->bWrapWorkaround)
++ put_page(psInfo->ppsPages[j]);
++ else
++ put_page_testzero(psInfo->ppsPages[j]);
++ }
++ break;
++ }
++
++ psInfo->ppsPages[i] = psPage;
++ }
++
++ BUG_ON(i > psInfo->iNumPages);
++ if (i == psInfo->iNumPages)
++ {
++
++ for (i = 0; i < psInfo->iNumPages; i++)
++ {
++ struct page *psPage = psInfo->ppsPages[i];
++ IMG_CPU_PHYADDR CPUPhysAddr;
++
++
++ CPUPhysAddr.uiAddr = page_to_pfn(psPage) << PAGE_SHIFT;
++
++ psInfo->psPhysAddr[i] = SysCpuPAddrToSysPAddr(CPUPhysAddr);
++ psSysPAddr[i] = psInfo->psPhysAddr[i];
++ }
++
++ psInfo->eType = WRAP_TYPE_FIND_VMA_PAGES;
++ }
++ else
++ {
++#if defined(PVR_SECURE_HANDLES)
++
++
++
++ if ((psVMArea->vm_flags & VM_PFNMAP) == 0)
++ {
++ PVR_DPF((PVR_DBG_WARNING,
++ "OSAcquirePhysPageAddr: Region isn't a raw PFN mapping. Giving up."));
++ goto error_release_mmap_sem;
++ }
++
++ for (ulAddr = ulStartAddrOrig, i = 0; ulAddr < ulBeyondEndAddrOrig; ulAddr += PAGE_SIZE, i++)
++ {
++ IMG_CPU_PHYADDR CPUPhysAddr;
++
++ CPUPhysAddr.uiAddr = ((ulAddr - psVMArea->vm_start) + (psVMArea->vm_pgoff << PAGE_SHIFT)) & PAGE_MASK;
++
++ psInfo->psPhysAddr[i] = SysCpuPAddrToSysPAddr(CPUPhysAddr);
++ psSysPAddr[i] = psInfo->psPhysAddr[i];
++ }
++ BUG_ON(i != psInfo->iNumPages);
++
++ psInfo->eType = WRAP_TYPE_FIND_VMA_PFN;
++
++
++ PVR_DPF((PVR_DBG_WARNING,
++ "OSAcquirePhysPageAddr: Region can't be locked down"));
++#else
++ PVR_DPF((PVR_DBG_WARNING,
++ "OSAcquirePhysPageAddr: Raw PFN mappings not supported. Giving up."));
++ goto error_release_mmap_sem;
++#endif
++ }
++
++ up_read(&current->mm->mmap_sem);
++
++exit_check:
++ CheckPagesContiguous(psInfo);
++
++
++
++ *phOSWrapMem = (IMG_HANDLE)psInfo;
++
++ return PVRSRV_OK;
++
++error_release_mmap_sem:
++ up_read(&current->mm->mmap_sem);
++error_free:
++ psInfo->eType = WRAP_TYPE_CLEANUP;
++ OSReleasePhysPageAddr((IMG_HANDLE)psInfo);
++ return PVRSRV_ERROR_GENERIC;
++}
++
++PVRSRV_ERROR PVROSFuncInit(IMG_VOID)
++{
++#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++ {
++ IMG_UINT32 ui32i;
++
++ psTimerWorkQueue = create_workqueue("pvr_timer");
++ if (psTimerWorkQueue == NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: couldn't create timer workqueue", __FUNCTION__));
++ return PVRSRV_ERROR_GENERIC;
++
++ }
++
++ for (ui32i = 0; ui32i < OS_MAX_TIMERS; ui32i++)
++ {
++ TIMER_CALLBACK_DATA *psTimerCBData = &sTimers[ui32i];
++
++ INIT_WORK(&psTimerCBData->sWork, OSTimerWorkQueueCallBack);
++ }
++ }
++#endif
++ return PVRSRV_OK;
++}
++
++IMG_VOID PVROSFuncDeInit(IMG_VOID)
++{
++#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++ if (psTimerWorkQueue != NULL)
++ {
++ destroy_workqueue(psTimerWorkQueue);
++ }
++#endif
++}
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/env/linux-intel/osperproc.c
+@@ -0,0 +1,113 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "osperproc.h"
++
++#include "env_perproc.h"
++#include "proc.h"
++
++extern IMG_UINT32 gui32ReleasePID;
++
++PVRSRV_ERROR OSPerProcessPrivateDataInit(IMG_HANDLE *phOsPrivateData)
++{
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hBlockAlloc;
++ PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc;
++
++ eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_ENV_PER_PROCESS_DATA),
++ phOsPrivateData,
++ &hBlockAlloc,
++ "Environment per Process Data");
++
++ if (eError != PVRSRV_OK)
++ {
++ *phOsPrivateData = IMG_NULL;
++
++ PVR_DPF((PVR_DBG_ERROR, "%s: OSAllocMem failed (%d)", __FUNCTION__, eError));
++ return eError;
++ }
++
++ psEnvPerProc = (PVRSRV_ENV_PER_PROCESS_DATA *)*phOsPrivateData;
++ OSMemSet(psEnvPerProc, 0, sizeof(*psEnvPerProc));
++
++ psEnvPerProc->hBlockAlloc = hBlockAlloc;
++
++
++ LinuxMMapPerProcessConnect(psEnvPerProc);
++
++#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT)
++
++ INIT_LIST_HEAD(&psEnvPerProc->sDRMAuthListHead);
++#endif
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSPerProcessPrivateDataDeInit(IMG_HANDLE hOsPrivateData)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc;
++
++ if (hOsPrivateData == IMG_NULL)
++ {
++ return PVRSRV_OK;
++ }
++
++ psEnvPerProc = (PVRSRV_ENV_PER_PROCESS_DATA *)hOsPrivateData;
++
++
++ LinuxMMapPerProcessDisconnect(psEnvPerProc);
++
++
++ RemovePerProcessProcDir(psEnvPerProc);
++
++ eError = OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_ENV_PER_PROCESS_DATA),
++ hOsPrivateData,
++ psEnvPerProc->hBlockAlloc);
++
++
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: OSFreeMem failed (%d)", __FUNCTION__, eError));
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSPerProcessSetHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase)
++{
++ return LinuxMMapPerProcessHandleOptions(psHandleBase);
++}
++
++IMG_HANDLE LinuxTerminatingProcessPrivateData(IMG_VOID)
++{
++ if(!gui32ReleasePID)
++ return NULL;
++ return PVRSRVPerProcessPrivateData(gui32ReleasePID);
++}
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/env/linux-intel/pdump.c
+@@ -0,0 +1,662 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if defined (SUPPORT_SGX)
++#if defined (PDUMP)
++
++#include <asm/atomic.h>
++#include <stdarg.h>
++#include "sgxdefs.h"
++#include "services_headers.h"
++
++#include "pvrversion.h"
++#include "pvr_debug.h"
++
++#include "dbgdrvif.h"
++#include "sgxmmu.h"
++#include "mm.h"
++#include "pdump_km.h"
++
++#include <linux/tty.h>
++
++static IMG_BOOL PDumpWriteString2 (IMG_CHAR * pszString, IMG_UINT32 ui32Flags);
++static IMG_BOOL PDumpWriteILock (PDBG_STREAM psStream, IMG_UINT8 *pui8Data, IMG_UINT32 ui32Count, IMG_UINT32 ui32Flags);
++static IMG_VOID DbgSetFrame (PDBG_STREAM psStream, IMG_UINT32 ui32Frame);
++static IMG_UINT32 DbgGetFrame (PDBG_STREAM psStream);
++static IMG_VOID DbgSetMarker (PDBG_STREAM psStream, IMG_UINT32 ui32Marker);
++static IMG_UINT32 DbgWrite (PDBG_STREAM psStream, IMG_UINT8 *pui8Data, IMG_UINT32 ui32BCount, IMG_UINT32 ui32Flags);
++
++#define PDUMP_DATAMASTER_PIXEL (1)
++#define PDUMP_DATAMASTER_EDM (3)
++
++#define MIN(a,b) (a > b ? b : a)
++
++#define MAX_FILE_SIZE 0x40000000
++
++static atomic_t gsPDumpSuspended = ATOMIC_INIT(0);
++
++static PDBGKM_SERVICE_TABLE gpfnDbgDrv = IMG_NULL;
++
++
++
++IMG_CHAR *pszStreamName[PDUMP_NUM_STREAMS] = { "ParamStream2",
++ "ScriptStream2",
++ "DriverInfoStream"};
++typedef struct PDBG_PDUMP_STATE_TAG
++{
++ PDBG_STREAM psStream[PDUMP_NUM_STREAMS];
++ IMG_UINT32 ui32ParamFileNum;
++
++ IMG_CHAR *pszMsg;
++ IMG_CHAR *pszScript;
++ IMG_CHAR *pszFile;
++
++} PDBG_PDUMP_STATE;
++
++static PDBG_PDUMP_STATE gsDBGPdumpState = {{IMG_NULL}, 0, IMG_NULL, IMG_NULL, IMG_NULL};
++
++#define SZ_MSG_SIZE_MAX PVRSRV_PDUMP_MAX_COMMENT_SIZE-1
++#define SZ_SCRIPT_SIZE_MAX PVRSRV_PDUMP_MAX_COMMENT_SIZE-1
++#define SZ_FILENAME_SIZE_MAX PVRSRV_PDUMP_MAX_COMMENT_SIZE-1
++
++
++
++
++IMG_VOID DBGDrvGetServiceTable(IMG_VOID **fn_table);
++
++static inline IMG_BOOL PDumpSuspended(IMG_VOID)
++{
++ return atomic_read(&gsPDumpSuspended) != 0;
++}
++
++PVRSRV_ERROR PDumpOSGetScriptString(IMG_HANDLE *phScript,
++ IMG_UINT32 *pui32MaxLen)
++{
++ *phScript = (IMG_HANDLE)gsDBGPdumpState.pszScript;
++ *pui32MaxLen = SZ_SCRIPT_SIZE_MAX;
++ if ((!*phScript) || PDumpSuspended())
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpOSGetMessageString(IMG_HANDLE *phMsg,
++ IMG_UINT32 *pui32MaxLen)
++{
++ *phMsg = (IMG_HANDLE)gsDBGPdumpState.pszMsg;
++ *pui32MaxLen = SZ_MSG_SIZE_MAX;
++ if ((!*phMsg) || PDumpSuspended())
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpOSGetFilenameString(IMG_CHAR **ppszFile,
++ IMG_UINT32 *pui32MaxLen)
++{
++ *ppszFile = gsDBGPdumpState.pszFile;
++ *pui32MaxLen = SZ_FILENAME_SIZE_MAX;
++ if ((!*ppszFile) || PDumpSuspended())
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ return PVRSRV_OK;
++}
++
++IMG_BOOL PDumpOSWriteString2(IMG_HANDLE hScript, IMG_UINT32 ui32Flags)
++{
++ return PDumpWriteString2(hScript, ui32Flags);
++}
++
++PVRSRV_ERROR PDumpOSBufprintf(IMG_HANDLE hBuf, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR* pszFormat, ...)
++{
++ IMG_CHAR* pszBuf = hBuf;
++ IMG_UINT32 n;
++ va_list vaArgs;
++
++ va_start(vaArgs, pszFormat);
++
++ n = vsnprintf(pszBuf, ui32ScriptSizeMax, pszFormat, vaArgs);
++
++ va_end(vaArgs);
++
++ if (n>=ui32ScriptSizeMax || n==-1)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "Buffer overflow detected, pdump output may be incomplete."));
++
++ return PVRSRV_ERROR_PDUMP_BUF_OVERFLOW;
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpOSVSprintf(IMG_CHAR *pszComment, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR* pszFormat, PDUMP_va_list vaArgs)
++{
++ IMG_UINT32 n;
++
++ n = vsnprintf(pszComment, ui32ScriptSizeMax, pszFormat, vaArgs);
++
++ if (n>=ui32ScriptSizeMax || n==-1)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "Buffer overflow detected, pdump output may be incomplete."));
++
++ return PVRSRV_ERROR_PDUMP_BUF_OVERFLOW;
++ }
++
++ return PVRSRV_OK;
++}
++
++IMG_VOID PDumpOSDebugPrintf(IMG_CHAR* pszFormat, ...)
++{
++
++}
++
++PVRSRV_ERROR PDumpOSSprintf(IMG_CHAR *pszComment, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR *pszFormat, ...)
++{
++ IMG_UINT32 n;
++ va_list vaArgs;
++
++ va_start(vaArgs, pszFormat);
++
++ n = vsnprintf(pszComment, ui32ScriptSizeMax, pszFormat, vaArgs);
++
++ va_end(vaArgs);
++
++ if (n>=ui32ScriptSizeMax || n==-1)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "Buffer overflow detected, pdump output may be incomplete."));
++
++ return PVRSRV_ERROR_PDUMP_BUF_OVERFLOW;
++ }
++
++ return PVRSRV_OK;
++}
++
++IMG_UINT32 PDumpOSBuflen(IMG_HANDLE hBuffer, IMG_UINT32 ui32BufferSizeMax)
++{
++ IMG_CHAR* pszBuf = hBuffer;
++ IMG_UINT32 ui32Count = 0;
++
++ while ((pszBuf[ui32Count]!=0) && (ui32Count<ui32BufferSizeMax) )
++ {
++ ui32Count++;
++ }
++ return(ui32Count);
++}
++
++IMG_VOID PDumpOSVerifyLineEnding(IMG_HANDLE hBuffer, IMG_UINT32 ui32BufferSizeMax)
++{
++ IMG_UINT32 ui32Count = 0;
++ IMG_CHAR* pszBuf = hBuffer;
++
++
++ ui32Count = PDumpOSBuflen(hBuffer, ui32BufferSizeMax);
++
++
++ if ((ui32Count >= 1) && (pszBuf[ui32Count-1] != '\n') && (ui32Count<ui32BufferSizeMax))
++ {
++ pszBuf[ui32Count] = '\n';
++ ui32Count++;
++ pszBuf[ui32Count] = '\0';
++ }
++ if ((ui32Count >= 2) && (pszBuf[ui32Count-2] != '\r') && (ui32Count<ui32BufferSizeMax))
++ {
++ pszBuf[ui32Count-1] = '\r';
++ pszBuf[ui32Count] = '\n';
++ ui32Count++;
++ pszBuf[ui32Count] = '\0';
++ }
++}
++
++IMG_HANDLE PDumpOSGetStream(IMG_UINT32 ePDumpStream)
++{
++ return (IMG_HANDLE)gsDBGPdumpState.psStream[ePDumpStream];
++}
++
++IMG_UINT32 PDumpOSGetStreamOffset(IMG_UINT32 ePDumpStream)
++{
++ PDBG_STREAM psStream = gsDBGPdumpState.psStream[ePDumpStream];
++ return gpfnDbgDrv->pfnGetStreamOffset(psStream);
++}
++
++IMG_UINT32 PDumpOSGetParamFileNum(IMG_VOID)
++{
++ return gsDBGPdumpState.ui32ParamFileNum;
++}
++
++IMG_BOOL PDumpOSWriteString(IMG_HANDLE hStream,
++ IMG_UINT8 *psui8Data,
++ IMG_UINT32 ui32Size,
++ IMG_UINT32 ui32Flags)
++{
++ PDBG_STREAM psStream = (PDBG_STREAM)hStream;
++ return PDumpWriteILock(psStream,
++ psui8Data,
++ ui32Size,
++ ui32Flags);
++}
++
++IMG_VOID PDumpOSCheckForSplitting(IMG_HANDLE hStream, IMG_UINT32 ui32Size, IMG_UINT32 ui32Flags)
++{
++
++ PVR_UNREFERENCED_PARAMETER(hStream);
++ PVR_UNREFERENCED_PARAMETER(ui32Size);
++ PVR_UNREFERENCED_PARAMETER(ui32Size);
++}
++
++IMG_BOOL PDumpOSJTInitialised(IMG_VOID)
++{
++ if(gpfnDbgDrv)
++ {
++ return IMG_TRUE;
++ }
++ return IMG_FALSE;
++}
++
++inline IMG_BOOL PDumpOSIsSuspended(IMG_VOID)
++{
++ return atomic_read(&gsPDumpSuspended) != 0;
++}
++
++IMG_VOID PDumpOSCPUVAddrToDevPAddr(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_HANDLE hOSMemHandle,
++ IMG_UINT32 ui32Offset,
++ IMG_UINT8 *pui8LinAddr,
++ IMG_UINT32 ui32PageSize,
++ IMG_DEV_PHYADDR *psDevPAddr)
++{
++ if(hOSMemHandle)
++ {
++
++ IMG_CPU_PHYADDR sCpuPAddr;
++
++ PVR_UNREFERENCED_PARAMETER(pui8LinAddr);
++
++ sCpuPAddr = OSMemHandleToCpuPAddr(hOSMemHandle, ui32Offset);
++ PVR_ASSERT((sCpuPAddr.uiAddr & (ui32PageSize - 1)) == 0);
++
++
++ *psDevPAddr = SysCpuPAddrToDevPAddr(eDeviceType, sCpuPAddr);
++ }
++ else
++ {
++ IMG_CPU_PHYADDR sCpuPAddr;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Offset);
++
++ sCpuPAddr = OSMapLinToCPUPhys(pui8LinAddr);
++ *psDevPAddr = SysCpuPAddrToDevPAddr(eDeviceType, sCpuPAddr);
++ }
++}
++
++IMG_VOID PDumpOSCPUVAddrToPhysPages(IMG_HANDLE hOSMemHandle,
++ IMG_UINT32 ui32Offset,
++ IMG_PUINT8 pui8LinAddr,
++ IMG_UINT32 *pui32PageOffset)
++{
++ if(hOSMemHandle)
++ {
++
++ IMG_CPU_PHYADDR sCpuPAddr;
++
++ PVR_UNREFERENCED_PARAMETER(pui8LinAddr);
++
++ sCpuPAddr = OSMemHandleToCpuPAddr(hOSMemHandle, ui32Offset);
++ *pui32PageOffset = sCpuPAddr.uiAddr & (HOST_PAGESIZE() -1);
++ }
++ else
++ {
++ PVR_UNREFERENCED_PARAMETER(hOSMemHandle);
++ PVR_UNREFERENCED_PARAMETER(ui32Offset);
++
++ *pui32PageOffset = (IMG_UINT32)pui8LinAddr & (HOST_PAGESIZE() - 1);
++ }
++}
++
++
++
++IMG_VOID PDumpInit(IMG_VOID)
++{
++ IMG_UINT32 i;
++
++
++ if (!gpfnDbgDrv)
++ {
++ DBGDrvGetServiceTable((IMG_VOID **)&gpfnDbgDrv);
++
++
++
++
++ if (gpfnDbgDrv == IMG_NULL)
++ {
++ return;
++ }
++
++ if(!gsDBGPdumpState.pszFile)
++ {
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_FILENAME_SIZE_MAX, (IMG_PVOID *)&gsDBGPdumpState.pszFile, 0,
++ "Filename string") != PVRSRV_OK)
++ {
++ goto init_failed;
++ }
++ }
++
++ if(!gsDBGPdumpState.pszMsg)
++ {
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_MSG_SIZE_MAX, (IMG_PVOID *)&gsDBGPdumpState.pszMsg, 0,
++ "Message string") != PVRSRV_OK)
++ {
++ goto init_failed;
++ }
++ }
++
++ if(!gsDBGPdumpState.pszScript)
++ {
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_SCRIPT_SIZE_MAX, (IMG_PVOID *)&gsDBGPdumpState.pszScript, 0,
++ "Script string") != PVRSRV_OK)
++ {
++ goto init_failed;
++ }
++ }
++
++ for(i=0; i < PDUMP_NUM_STREAMS; i++)
++ {
++ gsDBGPdumpState.psStream[i] = gpfnDbgDrv->pfnCreateStream(pszStreamName[i],
++ DEBUG_CAPMODE_FRAMED,
++ DEBUG_OUTMODE_STREAMENABLE,
++ 0,
++ 10);
++
++ gpfnDbgDrv->pfnSetCaptureMode(gsDBGPdumpState.psStream[i],DEBUG_CAPMODE_FRAMED,0xFFFFFFFF, 0xFFFFFFFF, 1);
++ gpfnDbgDrv->pfnSetFrame(gsDBGPdumpState.psStream[i],0);
++ }
++
++ PDUMPCOMMENT("Driver Product Name: %s", VS_PRODUCT_NAME);
++ PDUMPCOMMENT("Driver Product Version: %s (%s)", PVRVERSION_STRING, PVRVERSION_FILE);
++ PDUMPCOMMENT("Start of Init Phase");
++ }
++
++ return;
++
++init_failed:
++
++ if(gsDBGPdumpState.pszFile)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_FILENAME_SIZE_MAX, (IMG_PVOID) gsDBGPdumpState.pszFile, 0);
++ gsDBGPdumpState.pszFile = IMG_NULL;
++ }
++
++ if(gsDBGPdumpState.pszScript)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_SCRIPT_SIZE_MAX, (IMG_PVOID) gsDBGPdumpState.pszScript, 0);
++ gsDBGPdumpState.pszScript = IMG_NULL;
++ }
++
++ if(gsDBGPdumpState.pszMsg)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_MSG_SIZE_MAX, (IMG_PVOID) gsDBGPdumpState.pszMsg, 0);
++ gsDBGPdumpState.pszMsg = IMG_NULL;
++ }
++
++ gpfnDbgDrv = IMG_NULL;
++}
++
++
++IMG_VOID PDumpDeInit(IMG_VOID)
++{
++ IMG_UINT32 i;
++
++ for(i=0; i < PDUMP_NUM_STREAMS; i++)
++ {
++ gpfnDbgDrv->pfnDestroyStream(gsDBGPdumpState.psStream[i]);
++ }
++
++ if(gsDBGPdumpState.pszFile)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_FILENAME_SIZE_MAX, (IMG_PVOID) gsDBGPdumpState.pszFile, 0);
++ gsDBGPdumpState.pszFile = IMG_NULL;
++ }
++
++ if(gsDBGPdumpState.pszScript)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_SCRIPT_SIZE_MAX, (IMG_PVOID) gsDBGPdumpState.pszScript, 0);
++ gsDBGPdumpState.pszScript = IMG_NULL;
++ }
++
++ if(gsDBGPdumpState.pszMsg)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_MSG_SIZE_MAX, (IMG_PVOID) gsDBGPdumpState.pszMsg, 0);
++ gsDBGPdumpState.pszMsg = IMG_NULL;
++ }
++
++ gpfnDbgDrv = IMG_NULL;
++}
++
++PVRSRV_ERROR PDumpStartInitPhaseKM(IMG_VOID)
++{
++ IMG_UINT32 i;
++
++ if (gpfnDbgDrv)
++ {
++ PDUMPCOMMENT("Start Init Phase");
++ for(i=0; i < PDUMP_NUM_STREAMS; i++)
++ {
++ gpfnDbgDrv->pfnStartInitPhase(gsDBGPdumpState.psStream[i]);
++ }
++ }
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpStopInitPhaseKM(IMG_VOID)
++{
++ IMG_UINT32 i;
++
++ if (gpfnDbgDrv)
++ {
++ PDUMPCOMMENT("Stop Init Phase");
++
++ for(i=0; i < PDUMP_NUM_STREAMS; i++)
++ {
++ gpfnDbgDrv->pfnStopInitPhase(gsDBGPdumpState.psStream[i]);
++ }
++ }
++ return PVRSRV_OK;
++}
++
++IMG_BOOL PDumpIsLastCaptureFrameKM(IMG_VOID)
++{
++ return gpfnDbgDrv->pfnIsLastCaptureFrame(gsDBGPdumpState.psStream[PDUMP_STREAM_SCRIPT2]);
++}
++
++
++IMG_BOOL PDumpIsCaptureFrameKM(IMG_VOID)
++{
++ if (PDumpSuspended())
++ {
++ return IMG_FALSE;
++ }
++ return gpfnDbgDrv->pfnIsCaptureFrame(gsDBGPdumpState.psStream[PDUMP_STREAM_SCRIPT2], IMG_FALSE);
++}
++
++PVRSRV_ERROR PDumpSetFrameKM(IMG_UINT32 ui32Frame)
++{
++ IMG_UINT32 ui32Stream;
++
++ for (ui32Stream = 0; ui32Stream < PDUMP_NUM_STREAMS; ui32Stream++)
++ {
++ if (gsDBGPdumpState.psStream[ui32Stream])
++ {
++ DbgSetFrame(gsDBGPdumpState.psStream[ui32Stream], ui32Frame);
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpGetFrameKM(IMG_PUINT32 pui32Frame)
++{
++ *pui32Frame = DbgGetFrame(gsDBGPdumpState.psStream[PDUMP_STREAM_SCRIPT2]);
++
++ return PVRSRV_OK;
++}
++
++
++
++static IMG_BOOL PDumpWriteString2(IMG_CHAR * pszString, IMG_UINT32 ui32Flags)
++{
++ return PDumpWriteILock(gsDBGPdumpState.psStream[PDUMP_STREAM_SCRIPT2], (IMG_UINT8 *) pszString, strlen(pszString), ui32Flags);
++}
++
++
++static IMG_BOOL PDumpWriteILock(PDBG_STREAM psStream, IMG_UINT8 *pui8Data, IMG_UINT32 ui32Count, IMG_UINT32 ui32Flags)
++{
++ IMG_UINT32 ui32Written = 0;
++ IMG_UINT32 ui32Off = 0;
++
++ if ((psStream == IMG_NULL) || PDumpSuspended() || ((ui32Flags & PDUMP_FLAGS_NEVER) != 0))
++ {
++ return IMG_TRUE;
++ }
++
++
++
++
++ if (psStream == gsDBGPdumpState.psStream[PDUMP_STREAM_PARAM2])
++ {
++ IMG_UINT32 ui32ParamOutPos = gpfnDbgDrv->pfnGetStreamOffset(gsDBGPdumpState.psStream[PDUMP_STREAM_PARAM2]);
++
++ if (ui32ParamOutPos + ui32Count > MAX_FILE_SIZE)
++ {
++ if ((gsDBGPdumpState.psStream[PDUMP_STREAM_SCRIPT2] && PDumpWriteString2("\r\n-- Splitting pdump output file\r\n\r\n", ui32Flags)))
++ {
++ DbgSetMarker(gsDBGPdumpState.psStream[PDUMP_STREAM_PARAM2], ui32ParamOutPos);
++ gsDBGPdumpState.ui32ParamFileNum++;
++ }
++ }
++ }
++
++
++ while (((IMG_UINT32) ui32Count > 0) && (ui32Written != 0xFFFFFFFF))
++ {
++ ui32Written = DbgWrite(psStream, &pui8Data[ui32Off], ui32Count, ui32Flags);
++
++
++
++
++ if (ui32Written == 0)
++ {
++ OSReleaseThreadQuanta();
++ }
++
++ if (ui32Written != 0xFFFFFFFF)
++ {
++ ui32Off += ui32Written;
++ ui32Count -= ui32Written;
++ }
++ }
++
++ if (ui32Written == 0xFFFFFFFF)
++ {
++ return IMG_FALSE;
++ }
++
++ return IMG_TRUE;
++}
++
++static IMG_VOID DbgSetFrame(PDBG_STREAM psStream, IMG_UINT32 ui32Frame)
++{
++ gpfnDbgDrv->pfnSetFrame(psStream, ui32Frame);
++}
++
++
++static IMG_UINT32 DbgGetFrame(PDBG_STREAM psStream)
++{
++ return gpfnDbgDrv->pfnGetFrame(psStream);
++}
++
++static IMG_VOID DbgSetMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker)
++{
++ gpfnDbgDrv->pfnSetMarker(psStream, ui32Marker);
++}
++
++static IMG_UINT32 DbgWrite(PDBG_STREAM psStream, IMG_UINT8 *pui8Data, IMG_UINT32 ui32BCount, IMG_UINT32 ui32Flags)
++{
++ IMG_UINT32 ui32BytesWritten;
++
++ if ((ui32Flags & PDUMP_FLAGS_CONTINUOUS) != 0)
++ {
++
++
++ if (((psStream->ui32CapMode & DEBUG_CAPMODE_FRAMED) != 0) &&
++ (psStream->ui32Start == 0xFFFFFFFFUL) &&
++ (psStream->ui32End == 0xFFFFFFFFUL) &&
++ psStream->bInitPhaseComplete)
++ {
++ ui32BytesWritten = ui32BCount;
++ }
++ else
++ {
++ ui32BytesWritten = gpfnDbgDrv->pfnDBGDrivWrite2(psStream, pui8Data, ui32BCount, 1);
++ }
++ }
++ else
++ {
++ if (ui32Flags & PDUMP_FLAGS_LASTFRAME)
++ {
++ IMG_UINT32 ui32DbgFlags;
++
++ ui32DbgFlags = 0;
++ if (ui32Flags & PDUMP_FLAGS_RESETLFBUFFER)
++ {
++ ui32DbgFlags |= WRITELF_FLAGS_RESETBUF;
++ }
++
++ ui32BytesWritten = gpfnDbgDrv->pfnWriteLF(psStream, pui8Data, ui32BCount, 1, ui32DbgFlags);
++ }
++ else
++ {
++ ui32BytesWritten = gpfnDbgDrv->pfnWriteBINCM(psStream, pui8Data, ui32BCount, 1);
++ }
++ }
++
++ return ui32BytesWritten;
++}
++
++
++IMG_VOID PDumpSuspendKM(IMG_VOID)
++{
++ atomic_inc(&gsPDumpSuspended);
++}
++
++IMG_VOID PDumpResumeKM(IMG_VOID)
++{
++ atomic_dec(&gsPDumpSuspended);
++}
++
++#endif
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/env/linux-intel/private_data.h
+@@ -0,0 +1,67 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __INCLUDED_PRIVATE_DATA_H_
++#define __INCLUDED_PRIVATE_DATA_H_
++
++#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT)
++#include <linux/list.h>
++#include <drm/drmP.h>
++#endif
++
++typedef struct
++{
++
++ IMG_UINT32 ui32OpenPID;
++
++#if defined(PVR_SECURE_FD_EXPORT)
++
++ IMG_HANDLE hKernelMemInfo;
++#endif
++
++#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT)
++
++ struct list_head sDRMAuthListItem;
++
++ struct drm_file *psDRMFile;
++#endif
++
++#if defined(SUPPORT_MEMINFO_IDS)
++
++ IMG_UINT64 ui64Stamp;
++#endif
++
++
++ IMG_HANDLE hBlockAlloc;
++
++#if defined(SUPPORT_DRI_DRM_EXT)
++ IMG_PVOID pPriv;
++#endif
++}
++PVRSRV_FILE_PRIVATE_DATA;
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/env/linux-intel/proc.c
+@@ -0,0 +1,958 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef AUTOCONF_INCLUDED
++ #include <linux/config.h>
++#endif
++
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/version.h>
++#include <linux/fs.h>
++#include <linux/proc_fs.h>
++#include <linux/seq_file.h>
++
++#include "services_headers.h"
++
++#include "queue.h"
++#include "resman.h"
++#include "pvrmmap.h"
++#include "pvr_debug.h"
++#include "pvrversion.h"
++#include "proc.h"
++#include "perproc.h"
++#include "env_perproc.h"
++#include "linkage.h"
++
++#include "lists.h"
++DECLARE_LIST_ANY_VA(PVRSRV_DEVICE_NODE);
++
++
++static struct proc_dir_entry * dir;
++
++#ifndef PVR_PROC_USE_SEQ_FILE
++static off_t procDumpSysNodes(IMG_CHAR *buf, size_t size, off_t off);
++static off_t procDumpVersion(IMG_CHAR *buf, size_t size, off_t off);
++#endif
++
++
++static const IMG_CHAR PVRProcDirRoot[] = "pvr";
++
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++
++#define PVR_PROC_SEQ_START_TOKEN (void*)1
++static IMG_INT pvr_proc_open(struct inode *inode,struct file *file);
++static void *pvr_proc_seq_start (struct seq_file *m, loff_t *pos);
++static void pvr_proc_seq_stop (struct seq_file *m, void *v);
++static void *pvr_proc_seq_next (struct seq_file *m, void *v, loff_t *pos);
++static int pvr_proc_seq_show (struct seq_file *m, void *v);
++static ssize_t pvr_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos);
++
++static struct file_operations pvr_proc_operations =
++{
++ .open = pvr_proc_open,
++ .read = seq_read,
++ .write = pvr_proc_write,
++ .llseek = seq_lseek,
++ .release = seq_release,
++};
++
++static struct seq_operations pvr_proc_seq_operations =
++{
++ .start = pvr_proc_seq_start,
++ .next = pvr_proc_seq_next,
++ .stop = pvr_proc_seq_stop,
++ .show = pvr_proc_seq_show,
++};
++
++static struct proc_dir_entry* g_pProcQueue;
++static struct proc_dir_entry* g_pProcVersion;
++static struct proc_dir_entry* g_pProcSysNodes;
++
++#ifdef DEBUG
++static struct proc_dir_entry* g_pProcDebugLevel;
++#endif
++
++#ifdef PVR_MANUAL_POWER_CONTROL
++static struct proc_dir_entry* g_pProcPowerLevel;
++#endif
++
++
++static void ProcSeqShowVersion(struct seq_file *sfile,void* el);
++
++static void ProcSeqShowSysNodes(struct seq_file *sfile,void* el);
++static void* ProcSeqOff2ElementSysNodes(struct seq_file * sfile, loff_t off);
++
++#endif
++
++off_t printAppend(IMG_CHAR * buffer, size_t size, off_t off, const IMG_CHAR * format, ...)
++{
++ IMG_INT n;
++ size_t space = size - (size_t)off;
++ va_list ap;
++
++ PVR_ASSERT(space >= 0);
++
++ va_start (ap, format);
++
++ n = vsnprintf (buffer+off, space, format, ap);
++
++ va_end (ap);
++
++ if (n >= (IMG_INT)space || n < 0)
++ {
++
++ buffer[size - 1] = 0;
++ return (off_t)(size - 1);
++ }
++ else
++ {
++ return (off + (off_t)n);
++ }
++}
++
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++
++void* ProcSeq1ElementOff2Element(struct seq_file *sfile, loff_t off)
++{
++
++ if(!off)
++ return (void*)2;
++ return NULL;
++}
++
++
++void* ProcSeq1ElementHeaderOff2Element(struct seq_file *sfile, loff_t off)
++{
++ if(!off)
++ {
++ return PVR_PROC_SEQ_START_TOKEN;
++ }
++
++
++ if(off == 1)
++ return (void*)2;
++
++ return NULL;
++}
++
++
++static IMG_INT pvr_proc_open(struct inode *inode,struct file *file)
++{
++ IMG_INT ret = seq_open(file, &pvr_proc_seq_operations);
++
++ struct seq_file *seq = (struct seq_file*)file->private_data;
++ struct proc_dir_entry* pvr_proc_entry = PDE(inode);
++
++
++ seq->private = pvr_proc_entry->data;
++ return ret;
++}
++
++static ssize_t pvr_proc_write(struct file *file, const char __user *buffer,
++ size_t count, loff_t *ppos)
++{
++ struct inode *inode = file->f_path.dentry->d_inode;
++ struct proc_dir_entry * dp;
++
++ dp = PDE(inode);
++
++ if (!dp->write_proc)
++ return -EIO;
++
++ return dp->write_proc(file, buffer, count, dp->data);
++}
++
++
++static void *pvr_proc_seq_start (struct seq_file *proc_seq_file, loff_t *pos)
++{
++ PVR_PROC_SEQ_HANDLERS *handlers = (PVR_PROC_SEQ_HANDLERS*)proc_seq_file->private;
++ if(handlers->startstop != NULL)
++ handlers->startstop(proc_seq_file, IMG_TRUE);
++ return handlers->off2element(proc_seq_file, *pos);
++}
++
++static void pvr_proc_seq_stop (struct seq_file *proc_seq_file, void *v)
++{
++ PVR_PROC_SEQ_HANDLERS *handlers = (PVR_PROC_SEQ_HANDLERS*)proc_seq_file->private;
++ if(handlers->startstop != NULL)
++ handlers->startstop(proc_seq_file, IMG_FALSE);
++}
++
++static void *pvr_proc_seq_next (struct seq_file *proc_seq_file, void *v, loff_t *pos)
++{
++ PVR_PROC_SEQ_HANDLERS *handlers = (PVR_PROC_SEQ_HANDLERS*)proc_seq_file->private;
++ (*pos)++;
++ if( handlers->next != NULL)
++ return handlers->next( proc_seq_file, v, *pos );
++ return handlers->off2element(proc_seq_file, *pos);
++}
++
++static int pvr_proc_seq_show (struct seq_file *proc_seq_file, void *v)
++{
++ PVR_PROC_SEQ_HANDLERS *handlers = (PVR_PROC_SEQ_HANDLERS*)proc_seq_file->private;
++ handlers->show( proc_seq_file,v );
++ return 0;
++}
++
++
++
++static struct proc_dir_entry* CreateProcEntryInDirSeq(
++ struct proc_dir_entry *pdir,
++ const IMG_CHAR * name,
++ IMG_VOID* data,
++ pvr_next_proc_seq_t next_handler,
++ pvr_show_proc_seq_t show_handler,
++ pvr_off2element_proc_seq_t off2element_handler,
++ pvr_startstop_proc_seq_t startstop_handler,
++ write_proc_t whandler
++ )
++{
++
++ struct proc_dir_entry * file;
++ mode_t mode;
++
++ if (!dir)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreateProcEntryInDirSeq: cannot make proc entry /proc/%s/%s: no parent", PVRProcDirRoot, name));
++ return NULL;
++ }
++
++ mode = S_IFREG;
++
++ if (show_handler)
++ {
++ mode |= S_IRUGO;
++ }
++
++ if (whandler)
++ {
++ mode |= S_IWUSR;
++ }
++
++ file=create_proc_entry(name, mode, pdir);
++
++ if (file)
++ {
++ PVR_PROC_SEQ_HANDLERS *seq_handlers;
++
++ file->proc_fops = &pvr_proc_operations;
++ file->write_proc = whandler;
++
++
++ file->data = kmalloc(sizeof(PVR_PROC_SEQ_HANDLERS), GFP_KERNEL);
++ if(file->data)
++ {
++ seq_handlers = (PVR_PROC_SEQ_HANDLERS*)file->data;
++ seq_handlers->next = next_handler;
++ seq_handlers->show = show_handler;
++ seq_handlers->off2element = off2element_handler;
++ seq_handlers->startstop = startstop_handler;
++ seq_handlers->data = data;
++
++ return file;
++ }
++ }
++
++ PVR_DPF((PVR_DBG_ERROR, "CreateProcEntryInDirSeq: cannot make proc entry /proc/%s/%s: no memory", PVRProcDirRoot, name));
++ return 0;
++}
++
++
++struct proc_dir_entry* CreateProcReadEntrySeq (
++ const IMG_CHAR * name,
++ IMG_VOID* data,
++ pvr_next_proc_seq_t next_handler,
++ pvr_show_proc_seq_t show_handler,
++ pvr_off2element_proc_seq_t off2element_handler,
++ pvr_startstop_proc_seq_t startstop_handler
++ )
++{
++ return CreateProcEntrySeq(name,
++ data,
++ next_handler,
++ show_handler,
++ off2element_handler,
++ startstop_handler,
++ NULL);
++}
++
++struct proc_dir_entry* CreateProcEntrySeq (
++ const IMG_CHAR * name,
++ IMG_VOID* data,
++ pvr_next_proc_seq_t next_handler,
++ pvr_show_proc_seq_t show_handler,
++ pvr_off2element_proc_seq_t off2element_handler,
++ pvr_startstop_proc_seq_t startstop_handler,
++ write_proc_t whandler
++ )
++{
++ return CreateProcEntryInDirSeq(
++ dir,
++ name,
++ data,
++ next_handler,
++ show_handler,
++ off2element_handler,
++ startstop_handler,
++ NULL
++ );
++}
++
++
++
++struct proc_dir_entry* CreatePerProcessProcEntrySeq (
++ const IMG_CHAR * name,
++ IMG_VOID* data,
++ pvr_next_proc_seq_t next_handler,
++ pvr_show_proc_seq_t show_handler,
++ pvr_off2element_proc_seq_t off2element_handler,
++ pvr_startstop_proc_seq_t startstop_handler,
++ write_proc_t whandler
++ )
++{
++ PVRSRV_ENV_PER_PROCESS_DATA *psPerProc;
++ IMG_UINT32 ui32PID;
++
++ if (!dir)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntrySeq: /proc/%s doesn't exist", PVRProcDirRoot));
++ return NULL;
++ }
++
++ ui32PID = OSGetCurrentProcessIDKM();
++
++ psPerProc = PVRSRVPerProcessPrivateData(ui32PID);
++ if (!psPerProc)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntrySeq: no per process data"));
++
++ return NULL;
++ }
++
++ if (!psPerProc->psProcDir)
++ {
++ IMG_CHAR dirname[16];
++ IMG_INT ret;
++
++ ret = snprintf(dirname, sizeof(dirname), "%lu", ui32PID);
++
++ if (ret <=0 || ret >= (IMG_INT)sizeof(dirname))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntries: couldn't generate per process proc directory name \"%u\"", ui32PID));
++ return NULL;
++ }
++ else
++ {
++ psPerProc->psProcDir = proc_mkdir(dirname, dir);
++ if (!psPerProc->psProcDir)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntries: couldn't create per process proc directory /proc/%s/%u",
++ PVRProcDirRoot, ui32PID));
++ return NULL;
++ }
++ }
++ }
++
++ return CreateProcEntryInDirSeq(psPerProc->psProcDir, name, data, next_handler,
++ show_handler,off2element_handler,startstop_handler,whandler);
++}
++
++
++IMG_VOID RemoveProcEntrySeq( struct proc_dir_entry* proc_entry )
++{
++ if (dir)
++ {
++ void* data = proc_entry->data ;
++ PVR_DPF((PVR_DBG_MESSAGE, "Removing /proc/%s/%s", PVRProcDirRoot, proc_entry->name));
++
++ remove_proc_entry(proc_entry->name, dir);
++ if( data)
++ kfree( data );
++
++ }
++}
++
++IMG_VOID RemovePerProcessProcEntrySeq(struct proc_dir_entry* proc_entry)
++{
++ PVRSRV_ENV_PER_PROCESS_DATA *psPerProc;
++
++ psPerProc = LinuxTerminatingProcessPrivateData();
++ if (!psPerProc)
++ {
++ psPerProc = PVRSRVFindPerProcessPrivateData();
++ if (!psPerProc)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntries: can't "
++ "remove %s, no per process data", proc_entry->name));
++ return;
++ }
++ }
++
++ if (psPerProc->psProcDir)
++ {
++ void* data = proc_entry->data ;
++ PVR_DPF((PVR_DBG_MESSAGE, "Removing proc entry %s from %s", proc_entry->name, psPerProc->psProcDir->name));
++
++ remove_proc_entry(proc_entry->name, psPerProc->psProcDir);
++ if(data)
++ kfree( data );
++ }
++}
++
++#endif
++
++static IMG_INT pvr_read_proc(IMG_CHAR *page, IMG_CHAR **start, off_t off,
++ IMG_INT count, IMG_INT *eof, IMG_VOID *data)
++{
++ pvr_read_proc_t *pprn = (pvr_read_proc_t *)data;
++
++ off_t len = pprn (page, (size_t)count, off);
++
++ if (len == END_OF_FILE)
++ {
++ len = 0;
++ *eof = 1;
++ }
++ else if (!len)
++ {
++ *start = (IMG_CHAR *) 0;
++ }
++ else
++ {
++ *start = (IMG_CHAR *) 1;
++ }
++
++ return len;
++}
++
++
++static IMG_INT CreateProcEntryInDir(struct proc_dir_entry *pdir, const IMG_CHAR * name, read_proc_t rhandler, write_proc_t whandler, IMG_VOID *data)
++{
++ struct proc_dir_entry * file;
++ mode_t mode;
++
++ if (!pdir)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreateProcEntryInDir: parent directory doesn't exist"));
++
++ return -ENOMEM;
++ }
++
++ mode = S_IFREG;
++
++ if (rhandler)
++ {
++ mode |= S_IRUGO;
++ }
++
++ if (whandler)
++ {
++ mode |= S_IWUSR;
++ }
++
++ file = create_proc_entry(name, mode, pdir);
++
++ if (file)
++ {
++ file->read_proc = rhandler;
++ file->write_proc = whandler;
++ file->data = data;
++
++ PVR_DPF((PVR_DBG_MESSAGE, "Created proc entry %s in %s", name, pdir->name));
++
++ return 0;
++ }
++
++ PVR_DPF((PVR_DBG_ERROR, "CreateProcEntry: cannot create proc entry %s in %s", name, pdir->name));
++
++ return -ENOMEM;
++}
++
++
++IMG_INT CreateProcEntry(const IMG_CHAR * name, read_proc_t rhandler, write_proc_t whandler, IMG_VOID *data)
++{
++ return CreateProcEntryInDir(dir, name, rhandler, whandler, data);
++}
++
++
++IMG_INT CreatePerProcessProcEntry(const IMG_CHAR * name, read_proc_t rhandler, write_proc_t whandler, IMG_VOID *data)
++{
++ PVRSRV_ENV_PER_PROCESS_DATA *psPerProc;
++ IMG_UINT32 ui32PID;
++
++ if (!dir)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntries: /proc/%s doesn't exist", PVRProcDirRoot));
++
++ return -ENOMEM;
++ }
++
++ ui32PID = OSGetCurrentProcessIDKM();
++
++ psPerProc = PVRSRVPerProcessPrivateData(ui32PID);
++ if (!psPerProc)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntries: no per process data"));
++
++ return -ENOMEM;
++ }
++
++ if (!psPerProc->psProcDir)
++ {
++ IMG_CHAR dirname[16];
++ IMG_INT ret;
++
++ ret = snprintf(dirname, sizeof(dirname), "%lu", ui32PID);
++
++ if (ret <=0 || ret >= (IMG_INT)sizeof(dirname))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntries: couldn't generate per process proc directory name \"%u\"", ui32PID));
++
++ return -ENOMEM;
++ }
++ else
++ {
++ psPerProc->psProcDir = proc_mkdir(dirname, dir);
++ if (!psPerProc->psProcDir)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntries: couldn't create per process proc directory /proc/%s/%u", PVRProcDirRoot, ui32PID));
++
++ return -ENOMEM;
++ }
++ }
++ }
++
++ return CreateProcEntryInDir(psPerProc->psProcDir, name, rhandler, whandler, data);
++}
++
++
++IMG_INT CreateProcReadEntry(const IMG_CHAR * name, pvr_read_proc_t handler)
++{
++ struct proc_dir_entry * file;
++
++ if (!dir)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreateProcReadEntry: cannot make proc entry /proc/%s/%s: no parent", PVRProcDirRoot, name));
++
++ return -ENOMEM;
++ }
++
++ file = create_proc_read_entry (name, S_IFREG | S_IRUGO, dir, pvr_read_proc, (IMG_VOID *)handler);
++
++ if (file)
++ return 0;
++
++ PVR_DPF((PVR_DBG_ERROR, "CreateProcReadEntry: cannot make proc entry /proc/%s/%s: no memory", PVRProcDirRoot, name));
++
++ return -ENOMEM;
++}
++
++
++IMG_INT CreateProcEntries(IMG_VOID)
++{
++ dir = proc_mkdir (PVRProcDirRoot, NULL);
++
++ if (!dir)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreateProcEntries: cannot make /proc/%s directory", PVRProcDirRoot));
++
++ return -ENOMEM;
++ }
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++ g_pProcQueue = CreateProcReadEntrySeq("queue", NULL, NULL, ProcSeqShowQueue, ProcSeqOff2ElementQueue, NULL);
++ g_pProcVersion = CreateProcReadEntrySeq("version", NULL, NULL, ProcSeqShowVersion, ProcSeq1ElementHeaderOff2Element, NULL);
++ g_pProcSysNodes = CreateProcReadEntrySeq("nodes", NULL, NULL, ProcSeqShowSysNodes, ProcSeqOff2ElementSysNodes, NULL);
++
++ if(!g_pProcQueue || !g_pProcVersion || !g_pProcSysNodes)
++#else
++ if (CreateProcReadEntry("queue", QueuePrintQueues) ||
++ CreateProcReadEntry("version", procDumpVersion) ||
++ CreateProcReadEntry("nodes", procDumpSysNodes))
++#endif
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreateProcEntries: couldn't make /proc/%s files", PVRProcDirRoot));
++
++ return -ENOMEM;
++ }
++
++
++#ifdef DEBUG
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++ g_pProcDebugLevel = CreateProcEntrySeq("debug_level", NULL, NULL,
++ ProcSeqShowDebugLevel,
++ ProcSeq1ElementOff2Element, NULL,
++ PVRDebugProcSetLevel);
++ if(!g_pProcDebugLevel)
++#else
++ if (CreateProcEntry ("debug_level", PVRDebugProcGetLevel, PVRDebugProcSetLevel, 0))
++#endif
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreateProcEntries: couldn't make /proc/%s/debug_level", PVRProcDirRoot));
++
++ return -ENOMEM;
++ }
++
++#ifdef PVR_MANUAL_POWER_CONTROL
++#ifdef PVR_PROC_USE_SEQ_FILE
++ g_pProcPowerLevel = CreateProcEntrySeq("power_control", NULL, NULL,
++ ProcSeqShowPowerLevel,
++ ProcSeq1ElementOff2Element, NULL,
++ PVRProcSetPowerLevel);
++ if(!g_pProcPowerLevel)
++#else
++ if (CreateProcEntry("power_control", PVRProcGetPowerLevel, PVRProcSetPowerLevel, 0))
++#endif
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreateProcEntries: couldn't make /proc/%s/power_control", PVRProcDirRoot));
++
++ return -ENOMEM;
++ }
++#endif
++#endif
++
++ return 0;
++}
++
++
++IMG_VOID RemoveProcEntry(const IMG_CHAR * name)
++{
++ if (dir)
++ {
++ remove_proc_entry(name, dir);
++ PVR_DPF((PVR_DBG_MESSAGE, "Removing /proc/%s/%s", PVRProcDirRoot, name));
++ }
++}
++
++
++IMG_VOID RemovePerProcessProcEntry(const IMG_CHAR *name)
++{
++ PVRSRV_ENV_PER_PROCESS_DATA *psPerProc;
++
++ psPerProc = LinuxTerminatingProcessPrivateData();
++ if (!psPerProc)
++ {
++ psPerProc = PVRSRVFindPerProcessPrivateData();
++ if (!psPerProc)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntries: can't "
++ "remove %s, no per process data", name));
++ return;
++ }
++ }
++
++ if (psPerProc->psProcDir)
++ {
++ remove_proc_entry(name, psPerProc->psProcDir);
++
++ PVR_DPF((PVR_DBG_MESSAGE, "Removing proc entry %s from %s", name, psPerProc->psProcDir->name));
++ }
++}
++
++
++IMG_VOID RemovePerProcessProcDir(PVRSRV_ENV_PER_PROCESS_DATA *psPerProc)
++{
++ if (psPerProc->psProcDir)
++ {
++ while (psPerProc->psProcDir->subdir)
++ {
++ PVR_DPF((PVR_DBG_WARNING, "Belatedly removing /proc/%s/%s/%s", PVRProcDirRoot, psPerProc->psProcDir->name, psPerProc->psProcDir->subdir->name));
++
++ RemoveProcEntry(psPerProc->psProcDir->subdir->name);
++ }
++ RemoveProcEntry(psPerProc->psProcDir->name);
++ }
++}
++
++IMG_VOID RemoveProcEntries(IMG_VOID)
++{
++#ifdef DEBUG
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++ RemoveProcEntrySeq( g_pProcDebugLevel );
++#else
++ RemoveProcEntry("debug_level");
++#endif
++
++#ifdef PVR_MANUAL_POWER_CONTROL
++#ifdef PVR_PROC_USE_SEQ_FILE
++ RemoveProcEntrySeq( g_pProcPowerLevel );
++#else
++ RemoveProcEntry("power_control");
++#endif
++#endif
++
++#endif
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++ RemoveProcEntrySeq(g_pProcQueue);
++ RemoveProcEntrySeq(g_pProcVersion);
++ RemoveProcEntrySeq(g_pProcSysNodes);
++#else
++ RemoveProcEntry("queue");
++ RemoveProcEntry("version");
++ RemoveProcEntry("nodes");
++#endif
++
++ while (dir->subdir)
++ {
++ PVR_DPF((PVR_DBG_WARNING, "Belatedly removing /proc/%s/%s", PVRProcDirRoot, dir->subdir->name));
++
++ RemoveProcEntry(dir->subdir->name);
++ }
++
++ remove_proc_entry(PVRProcDirRoot, NULL);
++}
++
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++
++static void ProcSeqShowVersion(struct seq_file *sfile,void* el)
++{
++ SYS_DATA * psSysData;
++ IMG_CHAR *pszSystemVersionString = "None";
++
++ if(el == PVR_PROC_SEQ_START_TOKEN)
++ {
++ seq_printf( sfile,
++ "Version %s (%s) %s\n",
++ PVRVERSION_STRING,
++ PVR_BUILD_TYPE, PVR_BUILD_DIR);
++ return;
++ }
++
++ SysAcquireData(&psSysData);
++
++ if(psSysData->pszVersionString)
++ {
++ pszSystemVersionString = psSysData->pszVersionString;
++ }
++
++ seq_printf( sfile, "System Version String: %s\n", pszSystemVersionString);
++}
++
++#else
++
++static off_t procDumpVersion(IMG_CHAR *buf, size_t size, off_t off)
++{
++ SYS_DATA *psSysData;
++
++ if (off == 0)
++ {
++ return printAppend(buf, size, 0,
++ "Version %s (%s) %s\n",
++ PVRVERSION_STRING,
++ PVR_BUILD_TYPE, PVR_BUILD_DIR);
++ }
++
++ SysAcquireData(&psSysData)
++
++ if (off == 1)
++ {
++ IMG_CHAR *pszSystemVersionString = "None";
++
++ if(psSysData->pszVersionString)
++ {
++ pszSystemVersionString = psSysData->pszVersionString;
++ }
++
++ if(strlen(pszSystemVersionString)
++ + strlen("System Version String: \n")
++ + 1 > size)
++ {
++ return 0;
++ }
++ return printAppend(buf, size, 0,
++ "System Version String: %s\n",
++ pszSystemVersionString);
++ }
++
++ return END_OF_FILE;
++}
++
++#endif
++
++
++static const IMG_CHAR *deviceTypeToString(PVRSRV_DEVICE_TYPE deviceType)
++{
++ switch (deviceType)
++ {
++ default:
++ {
++ static IMG_CHAR text[10];
++
++ sprintf(text, "?%x", (IMG_UINT)deviceType);
++
++ return text;
++ }
++ }
++}
++
++
++static const IMG_CHAR *deviceClassToString(PVRSRV_DEVICE_CLASS deviceClass)
++{
++ switch (deviceClass)
++ {
++ case PVRSRV_DEVICE_CLASS_3D:
++ {
++ return "3D";
++ }
++ case PVRSRV_DEVICE_CLASS_DISPLAY:
++ {
++ return "display";
++ }
++ case PVRSRV_DEVICE_CLASS_BUFFER:
++ {
++ return "buffer";
++ }
++ default:
++ {
++ static IMG_CHAR text[10];
++
++ sprintf(text, "?%x", (IMG_UINT)deviceClass);
++ return text;
++ }
++ }
++}
++
++IMG_VOID* DecOffPsDev_AnyVaCb(PVRSRV_DEVICE_NODE *psNode, va_list va)
++{
++ off_t *pOff = va_arg(va, off_t*);
++ if (--(*pOff))
++ {
++ return IMG_NULL;
++ }
++ else
++ {
++ return psNode;
++ }
++}
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++
++static void ProcSeqShowSysNodes(struct seq_file *sfile,void* el)
++{
++ SYS_DATA * psSysData;
++ PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE*)el;
++
++ if(el == PVR_PROC_SEQ_START_TOKEN)
++ {
++ seq_printf( sfile,
++ "Registered nodes\n"
++ "Addr Type Class Index Ref pvDev Size Res\n");
++ return;
++ }
++
++ SysAcquireData(&psSysData);
++
++ seq_printf( sfile,
++ "%p %-8s %-8s %4d %2lu %p %3lu %p\n",
++ psDevNode,
++ deviceTypeToString(psDevNode->sDevId.eDeviceType),
++ deviceClassToString(psDevNode->sDevId.eDeviceClass),
++ psDevNode->sDevId.eDeviceClass,
++ psDevNode->ui32RefCount,
++ psDevNode->pvDevice,
++ psDevNode->ui32pvDeviceSize,
++ psDevNode->hResManContext);
++
++}
++
++static void* ProcSeqOff2ElementSysNodes(struct seq_file * sfile, loff_t off)
++{
++ SYS_DATA *psSysData;
++ PVRSRV_DEVICE_NODE *psDevNode;
++ if(!off)
++ {
++ return PVR_PROC_SEQ_START_TOKEN;
++ }
++
++ SysAcquireData(&psSysData);
++
++
++ psDevNode = (PVRSRV_DEVICE_NODE*)
++ List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList,
++ DecOffPsDev_AnyVaCb,
++ &off);
++
++
++ return (void*)psDevNode;
++}
++
++#else
++
++static
++off_t procDumpSysNodes(IMG_CHAR *buf, size_t size, off_t off)
++{
++ SYS_DATA *psSysData;
++ PVRSRV_DEVICE_NODE *psDevNode;
++ off_t len;
++
++
++ if (size < 80)
++ {
++ return 0;
++ }
++
++ if (off == 0)
++ {
++ return printAppend(buf, size, 0,
++ "Registered nodes\n"
++ "Addr Type Class Index Ref pvDev Size Res\n");
++ }
++
++ SysAcquireData(&psSysData);
++
++
++ psDevNode = (PVRSRV_DEVICE_NODE*)
++ List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList,
++ DecOffPsDev_AnyVaCb,
++ &off);
++
++ if (!psDevNode)
++ {
++ return END_OF_FILE;
++ }
++
++ len = printAppend(buf, size, 0,
++ "%p %-8s %-8s %4d %2lu %p %3lu %p\n",
++ psDevNode,
++ deviceTypeToString(psDevNode->sDevId.eDeviceType),
++ deviceClassToString(psDevNode->sDevId.eDeviceClass),
++ psDevNode->sDevId.eDeviceClass,
++ psDevNode->ui32RefCount,
++ psDevNode->pvDevice,
++ psDevNode->ui32pvDeviceSize,
++ psDevNode->hResManContext);
++ return (len);
++}
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/env/linux-intel/proc.h
+@@ -0,0 +1,115 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __SERVICES_PROC_H__
++#define __SERVICES_PROC_H__
++
++#include <asm/system.h>
++#include <linux/proc_fs.h>
++#include <linux/seq_file.h>
++
++#define END_OF_FILE (off_t) -1
++
++typedef off_t (pvr_read_proc_t)(IMG_CHAR *, size_t, off_t);
++
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++#define PVR_PROC_SEQ_START_TOKEN (void*)1
++typedef void* (pvr_next_proc_seq_t)(struct seq_file *,void*,loff_t);
++typedef void* (pvr_off2element_proc_seq_t)(struct seq_file *, loff_t);
++typedef void (pvr_show_proc_seq_t)(struct seq_file *,void*);
++typedef void (pvr_startstop_proc_seq_t)(struct seq_file *, IMG_BOOL start);
++
++typedef struct _PVR_PROC_SEQ_HANDLERS_ {
++ pvr_next_proc_seq_t *next;
++ pvr_show_proc_seq_t *show;
++ pvr_off2element_proc_seq_t *off2element;
++ pvr_startstop_proc_seq_t *startstop;
++ IMG_VOID *data;
++} PVR_PROC_SEQ_HANDLERS;
++
++
++void* ProcSeq1ElementOff2Element(struct seq_file *sfile, loff_t off);
++
++void* ProcSeq1ElementHeaderOff2Element(struct seq_file *sfile, loff_t off);
++
++
++#endif
++
++off_t printAppend(IMG_CHAR * buffer, size_t size, off_t off, const IMG_CHAR * format, ...)
++ __attribute__((format(printf, 4, 5)));
++
++IMG_INT CreateProcEntries(IMG_VOID);
++
++IMG_INT CreateProcReadEntry (const IMG_CHAR * name, pvr_read_proc_t handler);
++
++IMG_INT CreateProcEntry(const IMG_CHAR * name, read_proc_t rhandler, write_proc_t whandler, IMG_VOID *data);
++
++IMG_INT CreatePerProcessProcEntry(const IMG_CHAR * name, read_proc_t rhandler, write_proc_t whandler, IMG_VOID *data);
++
++IMG_VOID RemoveProcEntry(const IMG_CHAR * name);
++
++IMG_VOID RemovePerProcessProcEntry(const IMG_CHAR * name);
++
++IMG_VOID RemoveProcEntries(IMG_VOID);
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++struct proc_dir_entry* CreateProcReadEntrySeq (
++ const IMG_CHAR* name,
++ IMG_VOID* data,
++ pvr_next_proc_seq_t next_handler,
++ pvr_show_proc_seq_t show_handler,
++ pvr_off2element_proc_seq_t off2element_handler,
++ pvr_startstop_proc_seq_t startstop_handler
++ );
++
++struct proc_dir_entry* CreateProcEntrySeq (
++ const IMG_CHAR* name,
++ IMG_VOID* data,
++ pvr_next_proc_seq_t next_handler,
++ pvr_show_proc_seq_t show_handler,
++ pvr_off2element_proc_seq_t off2element_handler,
++ pvr_startstop_proc_seq_t startstop_handler,
++ write_proc_t whandler
++ );
++
++struct proc_dir_entry* CreatePerProcessProcEntrySeq (
++ const IMG_CHAR* name,
++ IMG_VOID* data,
++ pvr_next_proc_seq_t next_handler,
++ pvr_show_proc_seq_t show_handler,
++ pvr_off2element_proc_seq_t off2element_handler,
++ pvr_startstop_proc_seq_t startstop_handler,
++ write_proc_t whandler
++ );
++
++
++IMG_VOID RemoveProcEntrySeq(struct proc_dir_entry* proc_entry);
++IMG_VOID RemovePerProcessProcEntrySeq(struct proc_dir_entry* proc_entry);
++
++#endif
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/env/linux-intel/pvr_bridge_k.c
+@@ -0,0 +1,652 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "img_defs.h"
++#include "services.h"
++#include "pvr_bridge.h"
++#include "perproc.h"
++#include "syscommon.h"
++#include "pvr_debug.h"
++#include "proc.h"
++#include "private_data.h"
++#include "linkage.h"
++#include "pvr_bridge_km.h"
++
++#include <linux/mutex.h>
++
++#if defined(SUPPORT_DRI_DRM)
++#include <drm/drmP.h>
++#include "pvr_drm.h"
++#if defined(PVR_SECURE_DRM_AUTH_EXPORT)
++#include "env_perproc.h"
++#endif
++#endif
++
++#if defined(SUPPORT_VGX)
++#include "vgx_bridge.h"
++#endif
++
++#if defined(SUPPORT_SGX)
++#include "sgx_bridge.h"
++#endif
++
++#include "bridged_pvr_bridge.h"
++
++#ifdef MODULE_TEST
++#include "pvr_test_bridge.h"
++#include "kern_test.h"
++#endif
++
++
++#if defined(SUPPORT_DRI_DRM)
++#define PRIVATE_DATA(pFile) ((pFile)->driver_priv)
++#else
++#define PRIVATE_DATA(pFile) ((pFile)->private_data)
++#endif
++
++#if defined(DEBUG_BRIDGE_KM)
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++static struct proc_dir_entry *g_ProcBridgeStats =0;
++static void* ProcSeqNextBridgeStats(struct seq_file *sfile,void* el,loff_t off);
++static void ProcSeqShowBridgeStats(struct seq_file *sfile,void* el);
++static void* ProcSeqOff2ElementBridgeStats(struct seq_file * sfile, loff_t off);
++static void ProcSeqStartstopBridgeStats(struct seq_file *sfile,IMG_BOOL start);
++
++#else
++static off_t printLinuxBridgeStats(IMG_CHAR * buffer, size_t size, off_t off);
++#endif
++
++#endif
++
++extern struct mutex gPVRSRVLock;
++
++#if defined(SUPPORT_MEMINFO_IDS)
++static IMG_UINT64 ui64Stamp;
++#endif
++
++PVRSRV_ERROR
++LinuxBridgeInit(IMG_VOID)
++{
++#if defined(DEBUG_BRIDGE_KM)
++ {
++ IMG_INT iStatus;
++#ifdef PVR_PROC_USE_SEQ_FILE
++ g_ProcBridgeStats = CreateProcReadEntrySeq(
++ "bridge_stats",
++ NULL,
++ ProcSeqNextBridgeStats,
++ ProcSeqShowBridgeStats,
++ ProcSeqOff2ElementBridgeStats,
++ ProcSeqStartstopBridgeStats
++ );
++ iStatus = !g_ProcBridgeStats ? -1 : 0;
++#else
++ iStatus = CreateProcReadEntry("bridge_stats", printLinuxBridgeStats);
++#endif
++
++ if(iStatus!=0)
++ {
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ }
++#endif
++ return CommonBridgeInit();
++}
++
++IMG_VOID
++LinuxBridgeDeInit(IMG_VOID)
++{
++#if defined(DEBUG_BRIDGE_KM)
++#ifdef PVR_PROC_USE_SEQ_FILE
++ RemoveProcEntrySeq(g_ProcBridgeStats);
++#else
++ RemoveProcEntry("bridge_stats");
++#endif
++#endif
++}
++
++#if defined(DEBUG_BRIDGE_KM)
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++
++static void ProcSeqStartstopBridgeStats(struct seq_file *sfile,IMG_BOOL start)
++{
++ if(start)
++ {
++ mutex_lock(&gPVRSRVLock);
++ }
++ else
++ {
++ mutex_unlock(&gPVRSRVLock);
++ }
++}
++
++
++static void* ProcSeqOff2ElementBridgeStats(struct seq_file *sfile, loff_t off)
++{
++ if(!off)
++ {
++ return PVR_PROC_SEQ_START_TOKEN;
++ }
++
++ if(off > BRIDGE_DISPATCH_TABLE_ENTRY_COUNT)
++ {
++ return (void*)0;
++ }
++
++
++ return (void*)&g_BridgeDispatchTable[off-1];
++}
++
++static void* ProcSeqNextBridgeStats(struct seq_file *sfile,void* el,loff_t off)
++{
++ return ProcSeqOff2ElementBridgeStats(sfile,off);
++}
++
++
++static void ProcSeqShowBridgeStats(struct seq_file *sfile,void* el)
++{
++ PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *psEntry = ( PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY*)el;
++
++ if(el == PVR_PROC_SEQ_START_TOKEN)
++ {
++ seq_printf(sfile,
++ "Total ioctl call count = %lu\n"
++ "Total number of bytes copied via copy_from_user = %lu\n"
++ "Total number of bytes copied via copy_to_user = %lu\n"
++ "Total number of bytes copied via copy_*_user = %lu\n\n"
++ "%-45s | %-40s | %10s | %20s | %10s\n",
++ g_BridgeGlobalStats.ui32IOCTLCount,
++ g_BridgeGlobalStats.ui32TotalCopyFromUserBytes,
++ g_BridgeGlobalStats.ui32TotalCopyToUserBytes,
++ g_BridgeGlobalStats.ui32TotalCopyFromUserBytes+g_BridgeGlobalStats.ui32TotalCopyToUserBytes,
++ "Bridge Name",
++ "Wrapper Function",
++ "Call Count",
++ "copy_from_user Bytes",
++ "copy_to_user Bytes"
++ );
++ return;
++ }
++
++ seq_printf(sfile,
++ "%-45s %-40s %-10lu %-20lu %-10lu\n",
++ psEntry->pszIOCName,
++ psEntry->pszFunctionName,
++ psEntry->ui32CallCount,
++ psEntry->ui32CopyFromUserTotalBytes,
++ psEntry->ui32CopyToUserTotalBytes);
++}
++
++#else
++
++static off_t
++printLinuxBridgeStats(IMG_CHAR * buffer, size_t count, off_t off)
++{
++ PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *psEntry;
++ off_t Ret;
++
++ mutex_lock(&gPVRSRVLock);
++
++ if(!off)
++ {
++ if(count < 500)
++ {
++ Ret = 0;
++ goto unlock_and_return;
++ }
++ Ret = printAppend(buffer, count, 0,
++ "Total ioctl call count = %lu\n"
++ "Total number of bytes copied via copy_from_user = %lu\n"
++ "Total number of bytes copied via copy_to_user = %lu\n"
++ "Total number of bytes copied via copy_*_user = %lu\n\n"
++ "%-45s | %-40s | %10s | %20s | %10s\n",
++ g_BridgeGlobalStats.ui32IOCTLCount,
++ g_BridgeGlobalStats.ui32TotalCopyFromUserBytes,
++ g_BridgeGlobalStats.ui32TotalCopyToUserBytes,
++ g_BridgeGlobalStats.ui32TotalCopyFromUserBytes+g_BridgeGlobalStats.ui32TotalCopyToUserBytes,
++ "Bridge Name",
++ "Wrapper Function",
++ "Call Count",
++ "copy_from_user Bytes",
++ "copy_to_user Bytes"
++ );
++ goto unlock_and_return;
++ }
++
++ if(off > BRIDGE_DISPATCH_TABLE_ENTRY_COUNT)
++ {
++ Ret = END_OF_FILE;
++ goto unlock_and_return;
++ }
++
++ if(count < 300)
++ {
++ Ret = 0;
++ goto unlock_and_return;
++ }
++
++ psEntry = &g_BridgeDispatchTable[off-1];
++ Ret = printAppend(buffer, count, 0,
++ "%-45s %-40s %-10lu %-20lu %-10lu\n",
++ psEntry->pszIOCName,
++ psEntry->pszFunctionName,
++ psEntry->ui32CallCount,
++ psEntry->ui32CopyFromUserTotalBytes,
++ psEntry->ui32CopyToUserTotalBytes);
++
++unlock_and_return:
++ mutex_unlock(&gPVRSRVLock);
++ return Ret;
++}
++#endif
++#endif
++
++
++
++#if defined(SUPPORT_DRI_DRM)
++IMG_INT
++PVRSRV_BridgeDispatchKM(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile)
++#else
++IMG_INT32
++PVRSRV_BridgeDispatchKM(struct file *pFile, IMG_UINT unref__ ioctlCmd, IMG_UINT32 arg)
++#endif
++{
++ IMG_UINT32 cmd;
++#if !defined(SUPPORT_DRI_DRM)
++ PVRSRV_BRIDGE_PACKAGE *psBridgePackageUM = (PVRSRV_BRIDGE_PACKAGE *)arg;
++ PVRSRV_BRIDGE_PACKAGE sBridgePackageKM;
++#endif
++ PVRSRV_BRIDGE_PACKAGE *psBridgePackageKM;
++ IMG_UINT32 ui32PID = OSGetCurrentProcessIDKM();
++ PVRSRV_PER_PROCESS_DATA *psPerProc;
++ IMG_INT err = -EFAULT;
++
++ mutex_lock(&gPVRSRVLock);
++
++#if defined(SUPPORT_DRI_DRM)
++ PVR_UNREFERENCED_PARAMETER(dev);
++
++ psBridgePackageKM = (PVRSRV_BRIDGE_PACKAGE *)arg;
++ PVR_ASSERT(psBridgePackageKM != IMG_NULL);
++#else
++ PVR_UNREFERENCED_PARAMETER(ioctlCmd);
++
++ psBridgePackageKM = &sBridgePackageKM;
++
++ if(!OSAccessOK(PVR_VERIFY_WRITE,
++ psBridgePackageUM,
++ sizeof(PVRSRV_BRIDGE_PACKAGE)))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: Received invalid pointer to function arguments",
++ __FUNCTION__));
++
++ goto unlock_and_return;
++ }
++
++
++ if(OSCopyFromUser(IMG_NULL,
++ psBridgePackageKM,
++ psBridgePackageUM,
++ sizeof(PVRSRV_BRIDGE_PACKAGE))
++ != PVRSRV_OK)
++ {
++ goto unlock_and_return;
++ }
++#endif
++
++ cmd = psBridgePackageKM->ui32BridgeID;
++
++#if defined(MODULE_TEST)
++ switch (cmd)
++ {
++ case PVRSRV_BRIDGE_SERVICES_TEST_MEM1:
++ {
++ PVRSRV_ERROR eError = MemTest1();
++ if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN))
++ {
++ PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ;
++ pReturn->eError = eError;
++ }
++ }
++ err = 0;
++ goto unlock_and_return;
++ case PVRSRV_BRIDGE_SERVICES_TEST_MEM2:
++ {
++ PVRSRV_ERROR eError = MemTest2();
++ if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN))
++ {
++ PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ;
++ pReturn->eError = eError;
++ }
++ }
++ err = 0;
++ goto unlock_and_return;
++
++ case PVRSRV_BRIDGE_SERVICES_TEST_RESOURCE:
++ {
++ PVRSRV_ERROR eError = ResourceTest();
++ if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN))
++ {
++ PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ;
++ pReturn->eError = eError;
++ }
++ }
++ err = 0;
++ goto unlock_and_return;
++
++ case PVRSRV_BRIDGE_SERVICES_TEST_EVENTOBJECT:
++ {
++ PVRSRV_ERROR eError = EventObjectTest();
++ if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN))
++ {
++ PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ;
++ pReturn->eError = eError;
++ }
++ }
++ err = 0;
++ goto unlock_and_return;
++
++ case PVRSRV_BRIDGE_SERVICES_TEST_MEMMAPPING:
++ {
++ PVRSRV_ERROR eError = MemMappingTest();
++ if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN))
++ {
++ PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ;
++ pReturn->eError = eError;
++ }
++ }
++ err = 0;
++ goto unlock_and_return;
++
++ case PVRSRV_BRIDGE_SERVICES_TEST_PROCESSID:
++ {
++ PVRSRV_ERROR eError = ProcessIDTest();
++ if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN))
++ {
++ PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ;
++ pReturn->eError = eError;
++ }
++ }
++ err = 0;
++ goto unlock_and_return;
++
++ case PVRSRV_BRIDGE_SERVICES_TEST_CLOCKUSWAITUS:
++ {
++ PVRSRV_ERROR eError = ClockusWaitusTest();
++ if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN))
++ {
++ PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ;
++ pReturn->eError = eError;
++ }
++ }
++ err = 0;
++ goto unlock_and_return;
++
++ case PVRSRV_BRIDGE_SERVICES_TEST_TIMER:
++ {
++ PVRSRV_ERROR eError = TimerTest();
++ if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN))
++ {
++ PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ;
++ pReturn->eError = eError;
++ }
++ }
++ err = 0;
++ goto unlock_and_return;
++
++ case PVRSRV_BRIDGE_SERVICES_TEST_PRIVSRV:
++ {
++ PVRSRV_ERROR eError = PrivSrvTest();
++ if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN))
++ {
++ PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ;
++ pReturn->eError = eError;
++ }
++ }
++ err = 0;
++ goto unlock_and_return;
++ case PVRSRV_BRIDGE_SERVICES_TEST_COPYDATA:
++ {
++ IMG_UINT32 ui32PID;
++ PVRSRV_PER_PROCESS_DATA *psPerProc;
++ PVRSRV_ERROR eError;
++
++ ui32PID = OSGetCurrentProcessIDKM();
++
++ PVRSRVTrace("PVRSRV_BRIDGE_SERVICES_TEST_COPYDATA %d", ui32PID);
++
++ psPerProc = PVRSRVPerProcessData(ui32PID);
++
++ eError = CopyDataTest(psBridgePackageKM->pvParamIn, psBridgePackageKM->pvParamOut, psPerProc);
++
++ *(PVRSRV_ERROR*)psBridgePackageKM->pvParamOut = eError;
++ err = 0;
++ goto unlock_and_return;
++ }
++
++
++ case PVRSRV_BRIDGE_SERVICES_TEST_POWERMGMT:
++ {
++ PVRSRV_ERROR eError = PowerMgmtTest();
++ if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN))
++ {
++ PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ;
++ pReturn->eError = eError;
++ }
++ }
++ err = 0;
++ goto unlock_and_return;
++
++ }
++#endif
++
++ if(cmd != PVRSRV_BRIDGE_CONNECT_SERVICES)
++ {
++ PVRSRV_ERROR eError;
++
++ eError = PVRSRVLookupHandle(KERNEL_HANDLE_BASE,
++ (IMG_PVOID *)&psPerProc,
++ psBridgePackageKM->hKernelServices,
++ PVRSRV_HANDLE_TYPE_PERPROC_DATA);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid kernel services handle (%d)",
++ __FUNCTION__, eError));
++ goto unlock_and_return;
++ }
++
++ if(psPerProc->ui32PID != ui32PID)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: Process %d tried to access data "
++ "belonging to process %d", __FUNCTION__, ui32PID,
++ psPerProc->ui32PID));
++ goto unlock_and_return;
++ }
++ }
++ else
++ {
++
++ psPerProc = PVRSRVPerProcessData(ui32PID);
++ if(psPerProc == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRV_BridgeDispatchKM: "
++ "Couldn't create per-process data area"));
++ goto unlock_and_return;
++ }
++ }
++
++ psBridgePackageKM->ui32BridgeID = PVRSRV_GET_BRIDGE_ID(psBridgePackageKM->ui32BridgeID);
++
++#if defined(PVR_SECURE_FD_EXPORT)
++ switch(cmd)
++ {
++ case PVRSRV_BRIDGE_EXPORT_DEVICEMEM:
++ {
++ PVRSRV_FILE_PRIVATE_DATA *psPrivateData = PRIVATE_DATA(pFile);
++
++ if(psPrivateData->hKernelMemInfo)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: Can only export one MemInfo "
++ "per file descriptor", __FUNCTION__));
++ err = -EINVAL;
++ goto unlock_and_return;
++ }
++ break;
++ }
++
++ case PVRSRV_BRIDGE_MAP_DEV_MEMORY:
++ {
++ PVRSRV_BRIDGE_IN_MAP_DEV_MEMORY *psMapDevMemIN =
++ (PVRSRV_BRIDGE_IN_MAP_DEV_MEMORY *)psBridgePackageKM->pvParamIn;
++ PVRSRV_FILE_PRIVATE_DATA *psPrivateData = PRIVATE_DATA(pFile);
++
++ if(!psPrivateData->hKernelMemInfo)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: File descriptor has no "
++ "associated MemInfo handle", __FUNCTION__));
++ err = -EINVAL;
++ goto unlock_and_return;
++ }
++
++ psMapDevMemIN->hKernelMemInfo = psPrivateData->hKernelMemInfo;
++ break;
++ }
++
++ default:
++ {
++ PVRSRV_FILE_PRIVATE_DATA *psPrivateData = PRIVATE_DATA(pFile);
++
++ if(psPrivateData->hKernelMemInfo)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: Import/Export handle tried "
++ "to use privileged service", __FUNCTION__));
++ goto unlock_and_return;
++ }
++ break;
++ }
++ }
++#endif
++#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT)
++ switch(cmd)
++ {
++ case PVRSRV_BRIDGE_MAP_DEV_MEMORY:
++ case PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY:
++ {
++ PVRSRV_FILE_PRIVATE_DATA *psPrivateData;
++ int authenticated = pFile->authenticated;
++ PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc;
++
++ if (authenticated)
++ {
++ break;
++ }
++
++
++ psEnvPerProc = (PVRSRV_ENV_PER_PROCESS_DATA *)PVRSRVProcessPrivateData(psPerProc);
++ if (psEnvPerProc == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: Process private data not allocated", __FUNCTION__));
++ err = -EFAULT;
++ goto unlock_and_return;
++ }
++
++ list_for_each_entry(psPrivateData, &psEnvPerProc->sDRMAuthListHead, sDRMAuthListItem)
++ {
++ struct drm_file *psDRMFile = psPrivateData->psDRMFile;
++
++ if (pFile->master == psDRMFile->master)
++ {
++ authenticated |= psDRMFile->authenticated;
++ if (authenticated)
++ {
++ break;
++ }
++ }
++ }
++
++ if (!authenticated)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: Not authenticated for mapping device or device class memory", __FUNCTION__));
++ err = -EPERM;
++ goto unlock_and_return;
++ }
++ break;
++ }
++ default:
++ break;
++ }
++#endif
++
++ err = BridgedDispatchKM(psPerProc, psBridgePackageKM);
++ if(err != PVRSRV_OK)
++ goto unlock_and_return;
++
++ switch(cmd)
++ {
++#if defined(PVR_SECURE_FD_EXPORT)
++ case PVRSRV_BRIDGE_EXPORT_DEVICEMEM:
++ {
++ PVRSRV_BRIDGE_OUT_EXPORTDEVICEMEM *psExportDeviceMemOUT =
++ (PVRSRV_BRIDGE_OUT_EXPORTDEVICEMEM *)psBridgePackageKM->pvParamOut;
++ PVRSRV_FILE_PRIVATE_DATA *psPrivateData = PRIVATE_DATA(pFile);
++
++ psPrivateData->hKernelMemInfo = psExportDeviceMemOUT->hMemInfo;
++#if defined(SUPPORT_MEMINFO_IDS)
++ psExportDeviceMemOUT->ui64Stamp = psPrivateData->ui64Stamp = ++ui64Stamp;
++#endif
++ break;
++ }
++#endif
++
++#if defined(SUPPORT_MEMINFO_IDS)
++ case PVRSRV_BRIDGE_MAP_DEV_MEMORY:
++ {
++ PVRSRV_BRIDGE_OUT_MAP_DEV_MEMORY *psMapDeviceMemoryOUT =
++ (PVRSRV_BRIDGE_OUT_MAP_DEV_MEMORY *)psBridgePackageKM->pvParamOut;
++ PVRSRV_FILE_PRIVATE_DATA *psPrivateData = PRIVATE_DATA(pFile);
++ psMapDeviceMemoryOUT->sDstClientMemInfo.ui64Stamp = psPrivateData->ui64Stamp;
++ break;
++ }
++
++ case PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY:
++ {
++ PVRSRV_BRIDGE_OUT_MAP_DEVICECLASS_MEMORY *psDeviceClassMemoryOUT =
++ (PVRSRV_BRIDGE_OUT_MAP_DEVICECLASS_MEMORY *)psBridgePackageKM->pvParamOut;
++ psDeviceClassMemoryOUT->sClientMemInfo.ui64Stamp = ++ui64Stamp;
++ break;
++ }
++#endif
++
++ default:
++ break;
++ }
++
++unlock_and_return:
++ mutex_unlock(&gPVRSRVLock);
++ return err;
++}
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/env/linux-intel/pvr_debug.c
+@@ -0,0 +1,428 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++
++#ifndef AUTOCONF_INCLUDED
++ #include <linux/config.h>
++#endif
++
++#include <asm/io.h>
++#include <asm/uaccess.h>
++#include <linux/kernel.h>
++#include <linux/hardirq.h>
++#include <linux/module.h>
++#include <linux/spinlock.h>
++#include <linux/tty.h>
++#include <linux/mutex.h>
++#include <stdarg.h>
++#include "img_types.h"
++#include "servicesext.h"
++#include "pvr_debug.h"
++#include "proc.h"
++#include "linkage.h"
++
++#if defined(PVRSRV_NEED_PVR_DPF)
++
++#define PVR_MAX_FILEPATH_LEN 256
++
++static IMG_UINT32 gPVRDebugLevel = DBGPRIV_WARNING;
++
++#endif
++
++#define PVR_MAX_MSG_LEN PVR_MAX_DEBUG_MESSAGE_LEN
++
++static IMG_CHAR gszBufferNonIRQ[PVR_MAX_MSG_LEN + 1];
++
++static IMG_CHAR gszBufferIRQ[PVR_MAX_MSG_LEN + 1];
++
++static struct mutex gsDebugMutexNonIRQ;
++
++static spinlock_t gsDebugLockIRQ = SPIN_LOCK_UNLOCKED;
++
++#define USE_SPIN_LOCK (in_interrupt() || !preemptible())
++
++static inline void GetBufferLock(unsigned long *pulLockFlags)
++{
++ /* This is broken! */
++ if (USE_SPIN_LOCK)
++ {
++ spin_lock_irqsave(&gsDebugLockIRQ, *pulLockFlags);
++ }
++ else
++ {
++ mutex_lock(&gsDebugMutexNonIRQ);
++ }
++}
++
++static inline void ReleaseBufferLock(unsigned long ulLockFlags)
++{
++ /* and this is even more broken */
++ if (USE_SPIN_LOCK)
++ {
++ spin_unlock_irqrestore(&gsDebugLockIRQ, ulLockFlags);
++ }
++ else
++ {
++ mutex_unlock(&gsDebugMutexNonIRQ);
++ }
++}
++
++static inline void SelectBuffer(IMG_CHAR **ppszBuf, IMG_UINT32 *pui32BufSiz)
++{
++ if (USE_SPIN_LOCK)
++ {
++ *ppszBuf = gszBufferIRQ;
++ *pui32BufSiz = sizeof(gszBufferIRQ);
++ }
++ else
++ {
++ *ppszBuf = gszBufferNonIRQ;
++ *pui32BufSiz = sizeof(gszBufferNonIRQ);
++ }
++}
++
++static IMG_BOOL VBAppend(IMG_CHAR *pszBuf, IMG_UINT32 ui32BufSiz, const IMG_CHAR* pszFormat, va_list VArgs)
++{
++ IMG_UINT32 ui32Used;
++ IMG_UINT32 ui32Space;
++ IMG_INT32 i32Len;
++
++ ui32Used = strlen(pszBuf);
++ BUG_ON(ui32Used >= ui32BufSiz);
++ ui32Space = ui32BufSiz - ui32Used;
++
++ i32Len = vsnprintf(&pszBuf[ui32Used], ui32Space, pszFormat, VArgs);
++ pszBuf[ui32BufSiz - 1] = 0;
++
++
++ return (i32Len < 0 || i32Len >= ui32Space);
++}
++
++IMG_VOID PVRDPFInit(IMG_VOID)
++{
++ mutex_init(&gsDebugMutexNonIRQ);
++}
++
++IMG_VOID PVRSRVReleasePrintf(const IMG_CHAR *pszFormat, ...)
++{
++ va_list vaArgs;
++ unsigned long ulLockFlags = 0;
++ IMG_CHAR *pszBuf;
++ IMG_UINT32 ui32BufSiz;
++
++ SelectBuffer(&pszBuf, &ui32BufSiz);
++
++ va_start(vaArgs, pszFormat);
++
++ GetBufferLock(&ulLockFlags);
++ strncpy (pszBuf, "PVR_K: ", (ui32BufSiz -1));
++
++ if (VBAppend(pszBuf, ui32BufSiz, pszFormat, vaArgs))
++ {
++ printk(KERN_INFO "PVR_K:(Message Truncated): %s\n", pszBuf);
++ }
++ else
++ {
++ printk(KERN_INFO "%s\n", pszBuf);
++ }
++
++ ReleaseBufferLock(ulLockFlags);
++ va_end(vaArgs);
++
++}
++
++#if defined(PVRSRV_NEED_PVR_ASSERT)
++
++IMG_VOID PVRSRVDebugAssertFail(const IMG_CHAR* pszFile, IMG_UINT32 uLine)
++{
++ PVRSRVDebugPrintf(DBGPRIV_FATAL, pszFile, uLine, "Debug assertion failed!");
++ BUG();
++}
++
++#endif
++
++#if defined(PVRSRV_NEED_PVR_TRACE)
++
++IMG_VOID PVRSRVTrace(const IMG_CHAR* pszFormat, ...)
++{
++ va_list VArgs;
++ unsigned long ulLockFlags = 0;
++ IMG_CHAR *pszBuf;
++ IMG_UINT32 ui32BufSiz;
++
++ SelectBuffer(&pszBuf, &ui32BufSiz);
++
++ va_start(VArgs, pszFormat);
++
++ GetBufferLock(&ulLockFlags);
++
++ strncpy(pszBuf, "PVR: ", (ui32BufSiz -1));
++
++ if (VBAppend(pszBuf, ui32BufSiz, pszFormat, VArgs))
++ {
++ printk(KERN_INFO "PVR_K:(Message Truncated): %s\n", pszBuf);
++ }
++ else
++ {
++ printk(KERN_INFO "%s\n", pszBuf);
++ }
++
++ ReleaseBufferLock(ulLockFlags);
++
++ va_end(VArgs);
++}
++
++#endif
++
++#if defined(PVRSRV_NEED_PVR_DPF)
++
++static IMG_BOOL BAppend(IMG_CHAR *pszBuf, IMG_UINT32 ui32BufSiz, const IMG_CHAR *pszFormat, ...)
++{
++ va_list VArgs;
++ IMG_BOOL bTrunc;
++
++ va_start (VArgs, pszFormat);
++
++ bTrunc = VBAppend(pszBuf, ui32BufSiz, pszFormat, VArgs);
++
++ va_end (VArgs);
++
++ return bTrunc;
++}
++
++IMG_VOID PVRSRVDebugPrintf (
++ IMG_UINT32 ui32DebugLevel,
++ const IMG_CHAR* pszFullFileName,
++ IMG_UINT32 ui32Line,
++ const IMG_CHAR* pszFormat,
++ ...
++ )
++{
++ IMG_BOOL bTrace, bDebug;
++ const IMG_CHAR *pszFileName = pszFullFileName;
++ IMG_CHAR *pszLeafName;
++
++ bTrace = gPVRDebugLevel & ui32DebugLevel & DBGPRIV_CALLTRACE;
++ bDebug = ((gPVRDebugLevel & DBGPRIV_ALLLEVELS) >= ui32DebugLevel);
++
++ if (bTrace || bDebug)
++ {
++ va_list vaArgs;
++ unsigned long ulLockFlags = 0;
++ IMG_CHAR *pszBuf;
++ IMG_UINT32 ui32BufSiz;
++
++ SelectBuffer(&pszBuf, &ui32BufSiz);
++
++ va_start(vaArgs, pszFormat);
++
++ GetBufferLock(&ulLockFlags);
++
++
++ if (bDebug)
++ {
++ switch(ui32DebugLevel)
++ {
++ case DBGPRIV_FATAL:
++ {
++ strncpy (pszBuf, "PVR_K:(Fatal): ", (ui32BufSiz -1));
++ break;
++ }
++ case DBGPRIV_ERROR:
++ {
++ strncpy (pszBuf, "PVR_K:(Error): ", (ui32BufSiz -1));
++ break;
++ }
++ case DBGPRIV_WARNING:
++ {
++ strncpy (pszBuf, "PVR_K:(Warning): ", (ui32BufSiz -1));
++ break;
++ }
++ case DBGPRIV_MESSAGE:
++ {
++ strncpy (pszBuf, "PVR_K:(Message): ", (ui32BufSiz -1));
++ break;
++ }
++ case DBGPRIV_VERBOSE:
++ {
++ strncpy (pszBuf, "PVR_K:(Verbose): ", (ui32BufSiz -1));
++ break;
++ }
++ default:
++ {
++ strncpy (pszBuf, "PVR_K:(Unknown message level)", (ui32BufSiz -1));
++ break;
++ }
++ }
++ }
++ else
++ {
++ strncpy (pszBuf, "PVR_K: ", (ui32BufSiz -1));
++ }
++
++ if (VBAppend(pszBuf, ui32BufSiz, pszFormat, vaArgs))
++ {
++ printk(KERN_INFO "PVR_K:(Message Truncated): %s\n", pszBuf);
++ }
++ else
++ {
++
++ if (!bTrace)
++ {
++#ifdef DEBUG_LOG_PATH_TRUNCATE
++
++ static IMG_CHAR szFileNameRewrite[PVR_MAX_FILEPATH_LEN];
++
++ IMG_CHAR* pszTruncIter;
++ IMG_CHAR* pszTruncBackInter;
++
++
++ pszFileName = pszFullFileName + strlen(DEBUG_LOG_PATH_TRUNCATE)+1;
++
++
++ strncpy(szFileNameRewrite, pszFileName,PVR_MAX_FILEPATH_LEN);
++
++ if(strlen(szFileNameRewrite) == PVR_MAX_FILEPATH_LEN-1) {
++ IMG_CHAR szTruncateMassage[] = "FILENAME TRUNCATED";
++ strcpy(szFileNameRewrite + (PVR_MAX_FILEPATH_LEN - 1 - strlen(szTruncateMassage)), szTruncateMassage);
++ }
++
++ pszTruncIter = szFileNameRewrite;
++ while(*pszTruncIter++ != 0)
++ {
++ IMG_CHAR* pszNextStartPoint;
++
++ if(
++ !( ( *pszTruncIter == '/' && (pszTruncIter-4 >= szFileNameRewrite) ) &&
++ ( *(pszTruncIter-1) == '.') &&
++ ( *(pszTruncIter-2) == '.') &&
++ ( *(pszTruncIter-3) == '/') )
++ ) continue;
++
++
++ pszTruncBackInter = pszTruncIter - 3;
++ while(*(--pszTruncBackInter) != '/')
++ {
++ if(pszTruncBackInter <= szFileNameRewrite) break;
++ }
++ pszNextStartPoint = pszTruncBackInter;
++
++
++ while(*pszTruncIter != 0)
++ {
++ *pszTruncBackInter++ = *pszTruncIter++;
++ }
++ *pszTruncBackInter = 0;
++
++
++ pszTruncIter = pszNextStartPoint;
++ }
++
++ pszFileName = szFileNameRewrite;
++
++ if(*pszFileName == '/') pszFileName++;
++#endif
++
++#if !defined(__sh__)
++ pszLeafName = (IMG_CHAR *)strrchr (pszFileName, '\\');
++
++ if (pszLeafName)
++ {
++ pszFileName = pszLeafName;
++ }
++#endif
++
++ if (BAppend(pszBuf, ui32BufSiz, " [%lu, %s]", ui32Line, pszFileName))
++ {
++ printk(KERN_INFO "PVR_K:(Message Truncated): %s\n", pszBuf);
++ }
++ else
++ {
++ printk(KERN_INFO "%s\n", pszBuf);
++ }
++ }
++ else
++ {
++ printk(KERN_INFO "%s\n", pszBuf);
++ }
++ }
++
++ ReleaseBufferLock(ulLockFlags);
++
++ va_end (vaArgs);
++ }
++}
++
++#endif
++
++#if defined(DEBUG)
++
++IMG_VOID PVRDebugSetLevel(IMG_UINT32 uDebugLevel)
++{
++ printk(KERN_INFO "PVR: Setting Debug Level = 0x%x\n",(IMG_UINT)uDebugLevel);
++
++ gPVRDebugLevel = uDebugLevel;
++}
++
++IMG_INT PVRDebugProcSetLevel(struct file *file, const IMG_CHAR *buffer, IMG_UINT32 count, IMG_VOID *data)
++{
++#define _PROC_SET_BUFFER_SZ 2
++ IMG_CHAR data_buffer[_PROC_SET_BUFFER_SZ];
++
++ if (count != _PROC_SET_BUFFER_SZ)
++ {
++ return -EINVAL;
++ }
++ else
++ {
++ if (copy_from_user(data_buffer, buffer, count))
++ return -EINVAL;
++ if (data_buffer[count - 1] != '\n')
++ return -EINVAL;
++ PVRDebugSetLevel(data_buffer[0] - '0');
++ }
++ return (count);
++}
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++void ProcSeqShowDebugLevel(struct seq_file *sfile,void* el)
++{
++ seq_printf(sfile, "%lu\n", gPVRDebugLevel);
++}
++
++#else
++IMG_INT PVRDebugProcGetLevel(IMG_CHAR *page, IMG_CHAR **start, off_t off, IMG_INT count, IMG_INT *eof, IMG_VOID *data)
++{
++ if (off == 0) {
++ *start = (IMG_CHAR *)1;
++ return printAppend(page, count, 0, "%lu\n", gPVRDebugLevel);
++ }
++ *eof = 1;
++ return 0;
++}
++#endif
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/env/linux-intel/pvr_drm.c
+@@ -0,0 +1,306 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if defined(SUPPORT_DRI_DRM)
++
++#ifndef AUTOCONF_INCLUDED
++ #include <linux/config.h>
++#endif
++
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/version.h>
++#include <linux/fs.h>
++#include <linux/proc_fs.h>
++#include <linux/mutex.h>
++#include <asm/ioctl.h>
++#include <drm/drmP.h>
++#include <drm/drm.h>
++
++#include "img_defs.h"
++#include "services.h"
++#include "kerneldisplay.h"
++#include "kernelbuffer.h"
++#include "syscommon.h"
++#include "pvrmmap.h"
++#include "mm.h"
++#include "mmap.h"
++#include "pvr_debug.h"
++#include "srvkm.h"
++#include "perproc.h"
++#include "handle.h"
++#include "pvr_bridge_km.h"
++#include "pvr_bridge.h"
++#include "proc.h"
++#include "pvrmodule.h"
++#include "pvrversion.h"
++#include "lock.h"
++#include "linkage.h"
++#include "pvr_drm_shared.h"
++#include "pvr_drm.h"
++
++#define MAKENAME_HELPER(x, y) x ## y
++#define MAKENAME(x, y) MAKENAME_HELPER(x, y)
++
++#define PVR_DRM_NAME "pvrsrvkm"
++#define PVR_DRM_DESC "Imagination Technologies PVR DRM"
++
++#define PVR_PCI_IDS \
++ {SYS_SGX_DEV_VENDOR_ID, SYS_SGX_DEV_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++ {0, 0, 0}
++
++struct pci_dev *gpsPVRLDMDev;
++struct drm_device *gpsPVRDRMDev;
++
++#define PVR_DRM_FILE struct drm_file *
++
++#if !defined(SUPPORT_DRI_DRM_EXT)
++static struct pci_device_id asPciIdList[] = {
++ PVR_PCI_IDS
++};
++#endif
++
++IMG_INT PVRSRVDrmLoad(struct drm_device *dev, unsigned long flags)
++{
++ IMG_INT iRes;
++
++ PVR_TRACE(("PVRSRVDrmLoad"));
++
++ gpsPVRDRMDev = dev;
++ gpsPVRLDMDev = dev->pdev;
++
++#if defined(PDUMP)
++ iRes = dbgdrv_init();
++ if (iRes != 0)
++ {
++ return iRes;
++ }
++#endif
++
++ iRes = PVRCore_Init();
++ if (iRes != 0)
++ {
++ goto exit_dbgdrv_cleanup;
++ }
++
++#if defined(DISPLAY_CONTROLLER)
++ iRes = PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _Init)(dev);
++ if (iRes != 0)
++ {
++ goto exit_pvrcore_cleanup;
++ }
++#endif
++ return 0;
++
++#if defined(DISPLAY_CONTROLLER)
++exit_pvrcore_cleanup:
++ PVRCore_Cleanup();
++#endif
++exit_dbgdrv_cleanup:
++#if defined(PDUMP)
++ dbgdrv_cleanup();
++#endif
++ return iRes;
++}
++
++IMG_INT PVRSRVDrmUnload(struct drm_device *dev)
++{
++ PVR_TRACE(("PVRSRVDrmUnload"));
++
++#if defined(DISPLAY_CONTROLLER)
++ PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _Cleanup)(dev);
++#endif
++
++ PVRCore_Cleanup();
++
++#if defined(PDUMP)
++ dbgdrv_cleanup();
++#endif
++
++ return 0;
++}
++
++IMG_INT PVRSRVDrmOpen(struct drm_device *dev, struct drm_file *file)
++{
++ return PVRSRVOpen(dev, file);
++}
++
++IMG_VOID PVRSRVDrmPostClose(struct drm_device *dev, struct drm_file *file)
++{
++ PVRSRVRelease(dev, file);
++}
++
++DRI_DRM_STATIC IMG_INT
++PVRDRMIsMaster(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile)
++{
++ return 0;
++}
++
++#if defined(SUPPORT_DRI_DRM_EXT)
++IMG_INT
++PVRDRM_Dummy_ioctl(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile)
++{
++ return 0;
++}
++#endif
++
++static IMG_INT
++PVRDRMPCIBusIDField(struct drm_device *dev, IMG_UINT32 *pui32Field, IMG_UINT32 ui32FieldType)
++{
++ struct pci_dev *psPCIDev = (struct pci_dev *)dev->pdev;
++
++ switch (ui32FieldType)
++ {
++ case PVR_DRM_PCI_DOMAIN:
++ *pui32Field = pci_domain_nr(psPCIDev->bus);
++ break;
++
++ case PVR_DRM_PCI_BUS:
++ *pui32Field = psPCIDev->bus->number;
++ break;
++
++ case PVR_DRM_PCI_DEV:
++ *pui32Field = PCI_SLOT(psPCIDev->devfn);
++ break;
++
++ case PVR_DRM_PCI_FUNC:
++ *pui32Field = PCI_FUNC(psPCIDev->devfn);
++ break;
++
++ default:
++ return -EFAULT;
++ }
++
++ return 0;
++}
++
++DRI_DRM_STATIC IMG_INT
++PVRDRMUnprivCmd(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile)
++{
++ IMG_UINT32 *pui32Args = (IMG_UINT32 *)arg;
++ IMG_UINT32 ui32Cmd = pui32Args[0];
++ IMG_UINT32 ui32Arg1 = pui32Args[1];
++ IMG_UINT32 *pui32OutArg = (IMG_UINT32 *)arg;
++ IMG_INT ret = 0;
++
++ mutex_lock(&gPVRSRVLock);
++
++ switch (ui32Cmd)
++ {
++ case PVR_DRM_UNPRIV_INIT_SUCCESFUL:
++ *pui32OutArg = PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_SUCCESSFUL) ? 1 : 0;
++ break;
++
++ case PVR_DRM_UNPRIV_BUSID_TYPE:
++ *pui32OutArg = PVR_DRM_BUS_TYPE_PCI;
++ break;
++
++ case PVR_DRM_UNPRIV_BUSID_FIELD:
++ ret = PVRDRMPCIBusIDField(dev, pui32OutArg, ui32Arg1);
++
++ default:
++ ret = -EFAULT;
++ }
++
++ mutex_unlock(&gPVRSRVLock);
++
++ return ret;
++}
++
++#if 0
++struct drm_ioctl_desc sPVRDrmIoctls[] = {
++ DRM_IOCTL_DEF(PVR_DRM_SRVKM_IOCTL, PVRSRV_BridgeDispatchKM, 0),
++ DRM_IOCTL_DEF(PVR_DRM_IS_MASTER_IOCTL, PVRDRMIsMaster, DRM_MASTER),
++ DRM_IOCTL_DEF(PVR_DRM_UNPRIV_IOCTL, PVRDRMUnprivCmd, 0),
++#if defined(PDUMP)
++ DRM_IOCTL_DEF(PVR_DRM_DBGDRV_IOCTL, dbgdrv_ioctl, 0),
++#endif
++};
++
++static IMG_INT pvr_max_ioctl = DRM_ARRAY_SIZE(sPVRDrmIoctls);
++
++static struct drm_driver sPVRDrmDriver =
++{
++ .driver_features = 0,
++ .dev_priv_size = sizeof(sPVRDrmBuffer),
++ .load = PVRSRVDrmLoad,
++ .unload = PVRSRVDrmUnload,
++ .open = PVRSRVDrmOpen,
++ .postclose = PVRSRVDrmPostClose,
++ .suspend = PVRSRVDriverSuspend,
++ .resume = PVRSRVDriverResume,
++ .get_map_ofs = drm_core_get_map_ofs,
++ .get_reg_ofs = drm_core_get_reg_ofs,
++ .ioctls = sPVRDrmIoctls,
++ .fops =
++ {
++ .owner = THIS_MODULE,
++ .open = drm_open,
++ .release = drm_release,
++ .ioctl = drm_ioctl,
++ .mmap = PVRMMap,
++ .poll = drm_poll,
++ .fasync = drm_fasync,
++ },
++ .pci_driver =
++ {
++ .name = PVR_DRM_NAME,
++ .id_table = asPciIdList,
++ },
++
++ .name = PVR_DRM_NAME,
++ .desc = PVR_DRM_DESC,
++ .date = PVR_BUILD_DATE,
++ .major = PVRVERSION_MAJ,
++ .minor = PVRVERSION_MIN,
++ .patchlevel = PVRVERSION_BUILD,
++};
++
++static IMG_INT __init PVRSRVDrmInit(IMG_VOID)
++{
++ IMG_INT iRes;
++ sPVRDrmDriver.num_ioctls = pvr_max_ioctl;
++
++
++ PVRDPFInit();
++
++ iRes = drm_init(&sPVRDrmDriver);
++
++ return iRes;
++}
++
++static IMG_VOID __exit PVRSRVDrmExit(IMG_VOID)
++{
++ drm_exit(&sPVRDrmDriver);
++}
++
++module_init(PVRSRVDrmInit);
++module_exit(PVRSRVDrmExit);
++#endif
++#endif
++
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/env/linux-intel/pvr_drm.h
+@@ -0,0 +1,80 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__PVR_DRM_H__)
++#define __PVR_DRM_H__
++
++#include "pvr_drm_shared.h"
++
++#if defined(SUPPORT_DRI_DRM)
++#define PVR_DRM_MAKENAME_HELPER(x, y) x ## y
++#define PVR_DRM_MAKENAME(x, y) PVR_DRM_MAKENAME_HELPER(x, y)
++
++IMG_INT PVRCore_Init(IMG_VOID);
++IMG_VOID PVRCore_Cleanup(IMG_VOID);
++IMG_INT PVRSRVOpen(struct drm_device *dev, struct drm_file *pFile);
++IMG_INT PVRSRVRelease(struct drm_device *dev, struct drm_file *pFile);
++IMG_INT PVRSRVDriverSuspend(struct drm_device *pDevice, pm_message_t state);
++IMG_INT PVRSRVDriverResume(struct drm_device *pDevice);
++
++IMG_INT PVRSRV_BridgeDispatchKM(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile);
++
++#if defined(SUPPORT_DRI_DRM_EXT)
++#define DRI_DRM_STATIC
++IMG_INT PVRSRVDrmLoad(struct drm_device *dev, unsigned long flags);
++IMG_INT PVRSRVDrmUnload(struct drm_device *dev);
++IMG_INT PVRSRVDrmOpen(struct drm_device *dev, struct drm_file *file);
++IMG_VOID PVRSRVDrmPostClose(struct drm_device *dev, struct drm_file *file);
++IMG_INT PVRDRMIsMaster(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile);
++IMG_INT PVRDRMUnprivCmd(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile);
++IMG_INT PVRDRM_Dummy_ioctl(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile);
++#else
++#define DRI_DRM_STATIC static
++#endif
++
++#if defined(DISPLAY_CONTROLLER)
++extern int PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _Init)(struct drm_device *);
++extern void PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _Cleanup)(struct drm_device *);
++#endif
++
++#if defined(PDUMP)
++int dbgdrv_init(void);
++void dbgdrv_cleanup(void);
++IMG_INT dbgdrv_ioctl(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile);
++#endif
++
++#if !defined(SUPPORT_DRI_DRM_EXT)
++#define PVR_DRM_SRVKM_IOCTL _IO(0, PVR_DRM_SRVKM_CMD)
++#define PVR_DRM_IS_MASTER_IOCTL _IO(0, PVR_DRM_IS_MASTER_CMD)
++#define PVR_DRM_UNPRIV_IOCTL _IO(0, PVR_DRM_UNPRIV_CMD)
++#define PVR_DRM_DBGDRV_IOCTL _IO(0, PVR_DRM_DBGDRV_CMD)
++#endif
++
++#endif
++
++#endif
++
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/env/linux/.gitignore
+@@ -0,0 +1,5 @@
++bin_pc_i686*
++tmp_pc_i686*
++host_pc_i686*
++*.o
++*.o.cmd
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/env/linux/env_data.h
+@@ -0,0 +1,66 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _ENV_DATA_
++#define _ENV_DATA_
++
++#include <linux/interrupt.h>
++#include <linux/pci.h>
++
++#if defined(PVR_LINUX_MISR_USING_WORKQUEUE) || defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE)
++#include <linux/workqueue.h>
++#endif
++
++#define PVRSRV_MAX_BRIDGE_IN_SIZE 0x1000
++#define PVRSRV_MAX_BRIDGE_OUT_SIZE 0x1000
++
++typedef struct _PVR_PCI_DEV_TAG
++{
++ struct pci_dev *psPCIDev;
++ HOST_PCI_INIT_FLAGS ePCIFlags;
++ IMG_BOOL abPCIResourceInUse[DEVICE_COUNT_RESOURCE];
++} PVR_PCI_DEV;
++
++typedef struct _ENV_DATA_TAG
++{
++ IMG_VOID *pvBridgeData;
++ struct pm_dev *psPowerDevice;
++ IMG_BOOL bLISRInstalled;
++ IMG_BOOL bMISRInstalled;
++ IMG_UINT32 ui32IRQ;
++ IMG_VOID *pvISRCookie;
++#if defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE)
++ struct workqueue_struct *psWorkQueue;
++#endif
++#if defined(PVR_LINUX_MISR_USING_WORKQUEUE) || defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE)
++ struct work_struct sMISRWork;
++ IMG_VOID *pvMISRData;
++#else
++ struct tasklet_struct sMISRTasklet;
++#endif
++} ENV_DATA;
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/env/linux/env_perproc.h
+@@ -0,0 +1,56 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __ENV_PERPROC_H__
++#define __ENV_PERPROC_H__
++
++#include <linux/list.h>
++#include <linux/proc_fs.h>
++
++#include "services.h"
++#include "handle.h"
++
++typedef struct _PVRSRV_ENV_PER_PROCESS_DATA_
++{
++ IMG_HANDLE hBlockAlloc;
++ struct proc_dir_entry *psProcDir;
++#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT)
++ struct list_head sDRMAuthListHead;
++#endif
++} PVRSRV_ENV_PER_PROCESS_DATA;
++
++IMG_VOID RemovePerProcessProcDir(PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc);
++
++PVRSRV_ERROR LinuxMMapPerProcessConnect(PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc);
++
++IMG_VOID LinuxMMapPerProcessDisconnect(PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc);
++
++PVRSRV_ERROR LinuxMMapPerProcessHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase);
++
++IMG_HANDLE LinuxTerminatingProcessPrivateData(IMG_VOID);
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/env/linux/event.c
+@@ -0,0 +1,273 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef AUTOCONF_INCLUDED
++ #include <linux/config.h>
++#endif
++
++#include <linux/version.h>
++#include <asm/io.h>
++#include <asm/page.h>
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22))
++#include <asm/system.h>
++#endif
++#include <linux/mm.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/delay.h>
++#include <linux/pci.h>
++
++#include <linux/string.h>
++#include <linux/sched.h>
++#include <linux/interrupt.h>
++#include <asm/hardirq.h>
++#include <linux/timer.h>
++#include <linux/capability.h>
++#include <linux/sched.h>
++#include <asm/uaccess.h>
++
++#include "img_types.h"
++#include "services_headers.h"
++#include "mm.h"
++#include "pvrmmap.h"
++#include "mmap.h"
++#include "env_data.h"
++#include "proc.h"
++#include "mutex.h"
++#include "lock.h"
++
++typedef struct PVRSRV_LINUX_EVENT_OBJECT_LIST_TAG
++{
++ rwlock_t sLock;
++ struct list_head sList;
++
++} PVRSRV_LINUX_EVENT_OBJECT_LIST;
++
++
++typedef struct PVRSRV_LINUX_EVENT_OBJECT_TAG
++{
++ atomic_t sTimeStamp;
++ IMG_UINT32 ui32TimeStampPrevious;
++#if defined(DEBUG)
++ IMG_UINT ui32Stats;
++#endif
++ wait_queue_head_t sWait;
++ struct list_head sList;
++ IMG_HANDLE hResItem;
++ PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList;
++} PVRSRV_LINUX_EVENT_OBJECT;
++
++PVRSRV_ERROR LinuxEventObjectListCreate(IMG_HANDLE *phEventObjectList)
++{
++ PVRSRV_LINUX_EVENT_OBJECT_LIST *psEvenObjectList;
++
++ if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(PVRSRV_LINUX_EVENT_OBJECT_LIST),
++ (IMG_VOID **)&psEvenObjectList, IMG_NULL,
++ "Linux Event Object List") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectCreate: failed to allocate memory for event list"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ INIT_LIST_HEAD(&psEvenObjectList->sList);
++
++ rwlock_init(&psEvenObjectList->sLock);
++
++ *phEventObjectList = (IMG_HANDLE *) psEvenObjectList;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR LinuxEventObjectListDestroy(IMG_HANDLE hEventObjectList)
++{
++
++ PVRSRV_LINUX_EVENT_OBJECT_LIST *psEvenObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST *) hEventObjectList ;
++
++ if(psEvenObjectList)
++ {
++ if (!list_empty(&psEvenObjectList->sList))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectListDestroy: Event List is not empty"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(PVRSRV_LINUX_EVENT_OBJECT_LIST), psEvenObjectList, IMG_NULL);
++
++ }
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR LinuxEventObjectDelete(IMG_HANDLE hOSEventObjectList, IMG_HANDLE hOSEventObject)
++{
++ if(hOSEventObjectList)
++ {
++ if(hOSEventObject)
++ {
++ PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *)hOSEventObject;
++#if defined(DEBUG)
++ PVR_DPF((PVR_DBG_MESSAGE, "LinuxEventObjectListDelete: Event object waits: %lu", psLinuxEventObject->ui32Stats));
++#endif
++ if(ResManFreeResByPtr(psLinuxEventObject->hResItem) != PVRSRV_OK)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ return PVRSRV_OK;
++ }
++ }
++ return PVRSRV_ERROR_GENERIC;
++
++}
++
++static PVRSRV_ERROR LinuxEventObjectDeleteCallback(IMG_PVOID pvParam, IMG_UINT32 ui32Param)
++{
++ PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = pvParam;
++ PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = psLinuxEventObject->psLinuxEventObjectList;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++ write_lock_bh(&psLinuxEventObjectList->sLock);
++ list_del(&psLinuxEventObject->sList);
++ write_unlock_bh(&psLinuxEventObjectList->sLock);
++
++#if defined(DEBUG)
++ PVR_DPF((PVR_DBG_MESSAGE, "LinuxEventObjectDeleteCallback: Event object waits: %lu", psLinuxEventObject->ui32Stats));
++#endif
++
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(PVRSRV_LINUX_EVENT_OBJECT), psLinuxEventObject, IMG_NULL);
++
++
++ return PVRSRV_OK;
++}
++PVRSRV_ERROR LinuxEventObjectAdd(IMG_HANDLE hOSEventObjectList, IMG_HANDLE *phOSEventObject)
++ {
++ PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject;
++ PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST*)hOSEventObjectList;
++ IMG_UINT32 ui32PID = OSGetCurrentProcessIDKM();
++ PVRSRV_PER_PROCESS_DATA *psPerProc;
++
++ psPerProc = PVRSRVPerProcessData(ui32PID);
++ if (psPerProc == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectAdd: Couldn't find per-process data"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++
++ if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(PVRSRV_LINUX_EVENT_OBJECT),
++ (IMG_VOID **)&psLinuxEventObject, IMG_NULL,
++ "Linux Event Object") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectAdd: failed to allocate memory "));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ INIT_LIST_HEAD(&psLinuxEventObject->sList);
++
++ atomic_set(&psLinuxEventObject->sTimeStamp, 0);
++ psLinuxEventObject->ui32TimeStampPrevious = 0;
++
++#if defined(DEBUG)
++ psLinuxEventObject->ui32Stats = 0;
++#endif
++ init_waitqueue_head(&psLinuxEventObject->sWait);
++
++ psLinuxEventObject->psLinuxEventObjectList = psLinuxEventObjectList;
++
++ psLinuxEventObject->hResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_EVENT_OBJECT,
++ psLinuxEventObject,
++ 0,
++ &LinuxEventObjectDeleteCallback);
++
++ write_lock_bh(&psLinuxEventObjectList->sLock);
++ list_add(&psLinuxEventObject->sList, &psLinuxEventObjectList->sList);
++ write_unlock_bh(&psLinuxEventObjectList->sLock);
++
++ *phOSEventObject = psLinuxEventObject;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR LinuxEventObjectSignal(IMG_HANDLE hOSEventObjectList)
++{
++ PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject;
++ PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST*)hOSEventObjectList;
++ struct list_head *psListEntry, *psListEntryTemp, *psList;
++ psList = &psLinuxEventObjectList->sList;
++
++ list_for_each_safe(psListEntry, psListEntryTemp, psList)
++ {
++
++ psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *)list_entry(psListEntry, PVRSRV_LINUX_EVENT_OBJECT, sList);
++
++ atomic_inc(&psLinuxEventObject->sTimeStamp);
++ wake_up_interruptible(&psLinuxEventObject->sWait);
++ }
++
++ return PVRSRV_OK;
++
++}
++
++PVRSRV_ERROR LinuxEventObjectWait(IMG_HANDLE hOSEventObject, IMG_UINT32 ui32MSTimeout)
++{
++ IMG_UINT32 ui32TimeStamp;
++ DEFINE_WAIT(sWait);
++
++ PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *) hOSEventObject;
++
++ IMG_UINT32 ui32TimeOutJiffies = msecs_to_jiffies(ui32MSTimeout);
++
++ do
++ {
++ prepare_to_wait(&psLinuxEventObject->sWait, &sWait, TASK_INTERRUPTIBLE);
++ ui32TimeStamp = atomic_read(&psLinuxEventObject->sTimeStamp);
++
++ if(psLinuxEventObject->ui32TimeStampPrevious != ui32TimeStamp)
++ {
++ break;
++ }
++
++ LinuxUnLockMutex(&gPVRSRVLock);
++
++ ui32TimeOutJiffies = (IMG_UINT32)schedule_timeout((IMG_INT32)ui32TimeOutJiffies);
++
++ LinuxLockMutex(&gPVRSRVLock);
++#if defined(DEBUG)
++ psLinuxEventObject->ui32Stats++;
++#endif
++
++
++ } while (ui32TimeOutJiffies);
++
++ finish_wait(&psLinuxEventObject->sWait, &sWait);
++
++ psLinuxEventObject->ui32TimeStampPrevious = ui32TimeStamp;
++
++ return ui32TimeOutJiffies ? PVRSRV_OK : PVRSRV_ERROR_TIMEOUT;
++
++}
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/env/linux/event.h
+@@ -0,0 +1,32 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++PVRSRV_ERROR LinuxEventObjectListCreate(IMG_HANDLE *phEventObjectList);
++PVRSRV_ERROR LinuxEventObjectListDestroy(IMG_HANDLE hEventObjectList);
++PVRSRV_ERROR LinuxEventObjectAdd(IMG_HANDLE hOSEventObjectList, IMG_HANDLE *phOSEventObject);
++PVRSRV_ERROR LinuxEventObjectDelete(IMG_HANDLE hOSEventObjectList, IMG_HANDLE hOSEventObject);
++PVRSRV_ERROR LinuxEventObjectSignal(IMG_HANDLE hOSEventObjectList);
++PVRSRV_ERROR LinuxEventObjectWait(IMG_HANDLE hOSEventObject, IMG_UINT32 ui32MSTimeout);
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/env/linux/linkage.h
+@@ -0,0 +1,61 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __LINKAGE_H__
++#define __LINKAGE_H__
++
++#if !defined(SUPPORT_DRI_DRM)
++IMG_INT32 PVRSRV_BridgeDispatchKM(struct file *file, IMG_UINT cmd, IMG_UINT32 arg);
++#endif
++
++IMG_VOID PVRDPFInit(IMG_VOID);
++PVRSRV_ERROR PVROSFuncInit(IMG_VOID);
++IMG_VOID PVROSFuncDeInit(IMG_VOID);
++
++#ifdef DEBUG
++IMG_INT PVRDebugProcSetLevel(struct file *file, const IMG_CHAR *buffer, IMG_UINT32 count, IMG_VOID *data);
++IMG_VOID PVRDebugSetLevel(IMG_UINT32 uDebugLevel);
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++void ProcSeqShowDebugLevel(struct seq_file *sfile,void* el);
++#else
++IMG_INT PVRDebugProcGetLevel(IMG_CHAR *page, IMG_CHAR **start, off_t off, IMG_INT count, IMG_INT *eof, IMG_VOID *data);
++#endif
++
++#ifdef PVR_MANUAL_POWER_CONTROL
++IMG_INT PVRProcSetPowerLevel(struct file *file, const IMG_CHAR *buffer, IMG_UINT32 count, IMG_VOID *data);
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++void ProcSeqShowPowerLevel(struct seq_file *sfile,void* el);
++#else
++IMG_INT PVRProcGetPowerLevel(IMG_CHAR *page, IMG_CHAR **start, off_t off, IMG_INT count, IMG_INT *eof, IMG_VOID *data);
++#endif
++
++
++#endif
++#endif
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/env/linux/lock.h
+@@ -0,0 +1,32 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __LOCK_H__
++#define __LOCK_H__
++
++extern PVRSRV_LINUX_MUTEX gPVRSRVLock;
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/env/linux/mm.c
+@@ -0,0 +1,2360 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef AUTOCONF_INCLUDED
++ #include <linux/config.h>
++#endif
++
++#include <linux/version.h>
++#include <linux/mm.h>
++#include <linux/vmalloc.h>
++#include <asm/io.h>
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0))
++#include <linux/wrapper.h>
++#endif
++#include <linux/slab.h>
++#include <linux/highmem.h>
++#include <linux/sched.h>
++
++#include "img_defs.h"
++#include "services.h"
++#include "servicesint.h"
++#include "syscommon.h"
++#include "mutils.h"
++#include "mm.h"
++#include "pvrmmap.h"
++#include "mmap.h"
++#include "osfunc.h"
++#include "pvr_debug.h"
++#include "proc.h"
++#include "mutex.h"
++#include "lock.h"
++
++#if defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ #include "lists.h"
++#endif
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++typedef enum {
++ DEBUG_MEM_ALLOC_TYPE_KMALLOC,
++ DEBUG_MEM_ALLOC_TYPE_VMALLOC,
++ DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES,
++ DEBUG_MEM_ALLOC_TYPE_IOREMAP,
++ DEBUG_MEM_ALLOC_TYPE_IO,
++ DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE,
++ DEBUG_MEM_ALLOC_TYPE_COUNT
++}DEBUG_MEM_ALLOC_TYPE;
++
++typedef struct _DEBUG_MEM_ALLOC_REC
++{
++ DEBUG_MEM_ALLOC_TYPE eAllocType;
++ IMG_VOID *pvKey;
++ IMG_VOID *pvCpuVAddr;
++ IMG_UINT32 ulCpuPAddr;
++ IMG_VOID *pvPrivateData;
++ IMG_UINT32 ui32Bytes;
++ pid_t pid;
++ IMG_CHAR *pszFileName;
++ IMG_UINT32 ui32Line;
++
++ struct _DEBUG_MEM_ALLOC_REC *psNext;
++ struct _DEBUG_MEM_ALLOC_REC **ppsThis;
++}DEBUG_MEM_ALLOC_REC;
++
++static IMPLEMENT_LIST_ANY_VA_2(DEBUG_MEM_ALLOC_REC, IMG_BOOL, IMG_FALSE)
++static IMPLEMENT_LIST_ANY_VA(DEBUG_MEM_ALLOC_REC)
++static IMPLEMENT_LIST_FOR_EACH(DEBUG_MEM_ALLOC_REC)
++static IMPLEMENT_LIST_INSERT(DEBUG_MEM_ALLOC_REC)
++static IMPLEMENT_LIST_REMOVE(DEBUG_MEM_ALLOC_REC)
++
++
++static DEBUG_MEM_ALLOC_REC *g_MemoryRecords;
++
++static IMG_UINT32 g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_COUNT];
++static IMG_UINT32 g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_COUNT];
++
++static IMG_UINT32 g_SysRAMWaterMark;
++static IMG_UINT32 g_SysRAMHighWaterMark;
++
++static IMG_UINT32 g_IOMemWaterMark;
++static IMG_UINT32 g_IOMemHighWaterMark;
++
++static IMG_VOID DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE eAllocType,
++ IMG_VOID *pvKey,
++ IMG_VOID *pvCpuVAddr,
++ IMG_UINT32 ulCpuPAddr,
++ IMG_VOID *pvPrivateData,
++ IMG_UINT32 ui32Bytes,
++ IMG_CHAR *pszFileName,
++ IMG_UINT32 ui32Line);
++
++static IMG_VOID DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE eAllocType, IMG_VOID *pvKey, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line);
++
++static IMG_CHAR *DebugMemAllocRecordTypeToString(DEBUG_MEM_ALLOC_TYPE eAllocType);
++
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++static struct proc_dir_entry *g_SeqFileMemoryRecords =0;
++static void* ProcSeqNextMemoryRecords(struct seq_file *sfile,void* el,loff_t off);
++static void ProcSeqShowMemoryRecords(struct seq_file *sfile,void* el);
++static void* ProcSeqOff2ElementMemoryRecords(struct seq_file * sfile, loff_t off);
++
++#else
++static off_t printMemoryRecords(IMG_CHAR * buffer, size_t size, off_t off);
++#endif
++
++#endif
++
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++typedef struct _DEBUG_LINUX_MEM_AREA_REC
++{
++ LinuxMemArea *psLinuxMemArea;
++ IMG_UINT32 ui32Flags;
++ pid_t pid;
++
++ struct _DEBUG_LINUX_MEM_AREA_REC *psNext;
++ struct _DEBUG_LINUX_MEM_AREA_REC **ppsThis;
++}DEBUG_LINUX_MEM_AREA_REC;
++
++
++static IMPLEMENT_LIST_ANY_VA(DEBUG_LINUX_MEM_AREA_REC)
++static IMPLEMENT_LIST_FOR_EACH(DEBUG_LINUX_MEM_AREA_REC)
++static IMPLEMENT_LIST_INSERT(DEBUG_LINUX_MEM_AREA_REC)
++static IMPLEMENT_LIST_REMOVE(DEBUG_LINUX_MEM_AREA_REC)
++
++
++
++#if defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++static PVRSRV_LINUX_MUTEX g_sDebugMutex;
++#endif
++
++static DEBUG_LINUX_MEM_AREA_REC *g_LinuxMemAreaRecords;
++static IMG_UINT32 g_LinuxMemAreaCount;
++static IMG_UINT32 g_LinuxMemAreaWaterMark;
++static IMG_UINT32 g_LinuxMemAreaHighWaterMark;
++
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++static struct proc_dir_entry *g_SeqFileMemArea=0;
++
++static void* ProcSeqNextMemArea(struct seq_file *sfile,void* el,loff_t off);
++static void ProcSeqShowMemArea(struct seq_file *sfile,void* el);
++static void* ProcSeqOff2ElementMemArea(struct seq_file *sfile, loff_t off);
++
++#else
++static off_t printLinuxMemAreaRecords(IMG_CHAR * buffer, size_t size, off_t off);
++#endif
++
++#endif
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++#if (defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MEMORY_ALLOCATIONS))
++static void ProcSeqStartstopDebugMutex(struct seq_file *sfile,IMG_BOOL start);
++#endif
++#endif
++
++static LinuxKMemCache *psLinuxMemAreaCache;
++
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
++static IMG_VOID ReservePages(IMG_VOID *pvAddress, IMG_UINT32 ui32Length);
++static IMG_VOID UnreservePages(IMG_VOID *pvAddress, IMG_UINT32 ui32Length);
++#endif
++
++static LinuxMemArea *LinuxMemAreaStructAlloc(IMG_VOID);
++static IMG_VOID LinuxMemAreaStructFree(LinuxMemArea *psLinuxMemArea);
++#if defined(DEBUG_LINUX_MEM_AREAS)
++static IMG_VOID DebugLinuxMemAreaRecordAdd(LinuxMemArea *psLinuxMemArea, IMG_UINT32 ui32Flags);
++static DEBUG_LINUX_MEM_AREA_REC *DebugLinuxMemAreaRecordFind(LinuxMemArea *psLinuxMemArea);
++static IMG_VOID DebugLinuxMemAreaRecordRemove(LinuxMemArea *psLinuxMemArea);
++#endif
++
++PVRSRV_ERROR
++LinuxMMInit(IMG_VOID)
++{
++#if defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ LinuxInitMutex(&g_sDebugMutex);
++#endif
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ {
++ IMG_INT iStatus;
++#ifdef PVR_PROC_USE_SEQ_FILE
++ g_SeqFileMemArea = CreateProcReadEntrySeq(
++ "mem_areas",
++ NULL,
++ ProcSeqNextMemArea,
++ ProcSeqShowMemArea,
++ ProcSeqOff2ElementMemArea,
++ ProcSeqStartstopDebugMutex
++ );
++ iStatus = !g_SeqFileMemArea ? -1 : 0;
++#else
++ iStatus = CreateProcReadEntry("mem_areas", printLinuxMemAreaRecords);
++#endif
++ if(iStatus!=0)
++ {
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ }
++#endif
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ {
++ IMG_INT iStatus;
++#ifdef PVR_PROC_USE_SEQ_FILE
++ g_SeqFileMemoryRecords =CreateProcReadEntrySeq(
++ "meminfo",
++ NULL,
++ ProcSeqNextMemoryRecords,
++ ProcSeqShowMemoryRecords,
++ ProcSeqOff2ElementMemoryRecords,
++ ProcSeqStartstopDebugMutex
++ );
++
++ iStatus = !g_SeqFileMemoryRecords ? -1 : 0;
++#else
++ iStatus = CreateProcReadEntry("meminfo", printMemoryRecords);
++#endif
++ if(iStatus!=0)
++ {
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ }
++#endif
++
++ psLinuxMemAreaCache = KMemCacheCreateWrapper("img-mm", sizeof(LinuxMemArea), 0, 0);
++ if(!psLinuxMemAreaCache)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"%s: failed to allocate kmem_cache", __FUNCTION__));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ return PVRSRV_OK;
++}
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++IMG_VOID LinuxMMCleanup_MemAreas_ForEachCb(DEBUG_LINUX_MEM_AREA_REC *psCurrentRecord)
++{
++ LinuxMemArea *psLinuxMemArea;
++
++ psLinuxMemArea = psCurrentRecord->psLinuxMemArea;
++ PVR_DPF((PVR_DBG_ERROR, "%s: BUG!: Cleaning up Linux memory area (%p), type=%s, size=%ld bytes",
++ __FUNCTION__,
++ psCurrentRecord->psLinuxMemArea,
++ LinuxMemAreaTypeToString(psCurrentRecord->psLinuxMemArea->eAreaType),
++ psCurrentRecord->psLinuxMemArea->ui32ByteSize));
++
++ LinuxMemAreaDeepFree(psLinuxMemArea);
++}
++#endif
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++IMG_VOID LinuxMMCleanup_MemRecords_ForEachVa(DEBUG_MEM_ALLOC_REC *psCurrentRecord)
++
++{
++
++ PVR_DPF((PVR_DBG_ERROR, "%s: BUG!: Cleaning up memory: "
++ "type=%s "
++ "CpuVAddr=%p "
++ "CpuPAddr=0x%08lx, "
++ "allocated @ file=%s,line=%d",
++ __FUNCTION__,
++ DebugMemAllocRecordTypeToString(psCurrentRecord->eAllocType),
++ psCurrentRecord->pvCpuVAddr,
++ psCurrentRecord->ulCpuPAddr,
++ psCurrentRecord->pszFileName,
++ psCurrentRecord->ui32Line));
++ switch(psCurrentRecord->eAllocType)
++ {
++ case DEBUG_MEM_ALLOC_TYPE_KMALLOC:
++ KFreeWrapper(psCurrentRecord->pvCpuVAddr);
++ break;
++ case DEBUG_MEM_ALLOC_TYPE_IOREMAP:
++ IOUnmapWrapper(psCurrentRecord->pvCpuVAddr);
++ break;
++ case DEBUG_MEM_ALLOC_TYPE_IO:
++
++ DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_IO, psCurrentRecord->pvKey, __FILE__, __LINE__);
++ break;
++ case DEBUG_MEM_ALLOC_TYPE_VMALLOC:
++ VFreeWrapper(psCurrentRecord->pvCpuVAddr);
++ break;
++ case DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES:
++
++ DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES, psCurrentRecord->pvKey, __FILE__, __LINE__);
++ break;
++ case DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE:
++ KMemCacheFreeWrapper(psCurrentRecord->pvPrivateData, psCurrentRecord->pvCpuVAddr);
++ break;
++ default:
++ PVR_ASSERT(0);
++ }
++}
++#endif
++
++
++IMG_VOID
++LinuxMMCleanup(IMG_VOID)
++{
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ {
++ if(g_LinuxMemAreaCount)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: BUG!: There are %d LinuxMemArea allocation unfreed (%ld bytes)",
++ __FUNCTION__, g_LinuxMemAreaCount, g_LinuxMemAreaWaterMark));
++ }
++
++ List_DEBUG_LINUX_MEM_AREA_REC_ForEach(g_LinuxMemAreaRecords,
++ LinuxMMCleanup_MemAreas_ForEachCb);
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++ RemoveProcEntrySeq( g_SeqFileMemArea );
++#else
++ RemoveProcEntry("mem_areas");
++#endif
++ }
++#endif
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ {
++
++
++ List_DEBUG_MEM_ALLOC_REC_ForEach(g_MemoryRecords,
++ LinuxMMCleanup_MemRecords_ForEachVa);
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++ RemoveProcEntrySeq( g_SeqFileMemoryRecords );
++#else
++ RemoveProcEntry("meminfo");
++#endif
++
++ }
++#endif
++
++ if(psLinuxMemAreaCache)
++ {
++ KMemCacheDestroyWrapper(psLinuxMemAreaCache);
++ psLinuxMemAreaCache=NULL;
++ }
++}
++
++
++IMG_VOID *
++_KMallocWrapper(IMG_UINT32 ui32ByteSize, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line)
++{
++ IMG_VOID *pvRet;
++ pvRet = kmalloc(ui32ByteSize, GFP_KERNEL);
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ if(pvRet)
++ {
++ DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_KMALLOC,
++ pvRet,
++ pvRet,
++ 0,
++ NULL,
++ ui32ByteSize,
++ pszFileName,
++ ui32Line
++ );
++ }
++#else
++ PVR_UNREFERENCED_PARAMETER(pszFileName);
++ PVR_UNREFERENCED_PARAMETER(ui32Line);
++#endif
++ return pvRet;
++}
++
++
++IMG_VOID
++_KFreeWrapper(IMG_VOID *pvCpuVAddr, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line)
++{
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_KMALLOC, pvCpuVAddr, pszFileName, ui32Line);
++#else
++ PVR_UNREFERENCED_PARAMETER(pszFileName);
++ PVR_UNREFERENCED_PARAMETER(ui32Line);
++#endif
++ kfree(pvCpuVAddr);
++}
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++static IMG_VOID
++DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE eAllocType,
++ IMG_VOID *pvKey,
++ IMG_VOID *pvCpuVAddr,
++ IMG_UINT32 ulCpuPAddr,
++ IMG_VOID *pvPrivateData,
++ IMG_UINT32 ui32Bytes,
++ IMG_CHAR *pszFileName,
++ IMG_UINT32 ui32Line)
++{
++ DEBUG_MEM_ALLOC_REC *psRecord;
++
++ LinuxLockMutex(&g_sDebugMutex);
++
++ psRecord = kmalloc(sizeof(DEBUG_MEM_ALLOC_REC), GFP_KERNEL);
++
++ psRecord->eAllocType = eAllocType;
++ psRecord->pvKey = pvKey;
++ psRecord->pvCpuVAddr = pvCpuVAddr;
++ psRecord->ulCpuPAddr = ulCpuPAddr;
++ psRecord->pvPrivateData = pvPrivateData;
++ psRecord->pid = current->pid;
++ psRecord->ui32Bytes = ui32Bytes;
++ psRecord->pszFileName = pszFileName;
++ psRecord->ui32Line = ui32Line;
++
++ List_DEBUG_MEM_ALLOC_REC_Insert(&g_MemoryRecords, psRecord);
++
++ g_WaterMarkData[eAllocType] += ui32Bytes;
++ if(g_WaterMarkData[eAllocType] > g_HighWaterMarkData[eAllocType])
++ {
++ g_HighWaterMarkData[eAllocType] = g_WaterMarkData[eAllocType];
++ }
++
++ if(eAllocType == DEBUG_MEM_ALLOC_TYPE_KMALLOC
++ || eAllocType == DEBUG_MEM_ALLOC_TYPE_VMALLOC
++ || eAllocType == DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES
++ || eAllocType == DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE)
++ {
++ g_SysRAMWaterMark += ui32Bytes;
++ if(g_SysRAMWaterMark > g_SysRAMHighWaterMark)
++ {
++ g_SysRAMHighWaterMark = g_SysRAMWaterMark;
++ }
++ }
++ else if(eAllocType == DEBUG_MEM_ALLOC_TYPE_IOREMAP
++ || eAllocType == DEBUG_MEM_ALLOC_TYPE_IO)
++ {
++ g_IOMemWaterMark += ui32Bytes;
++ if(g_IOMemWaterMark > g_IOMemHighWaterMark)
++ {
++ g_IOMemHighWaterMark = g_IOMemWaterMark;
++ }
++ }
++
++ LinuxUnLockMutex(&g_sDebugMutex);
++}
++
++
++IMG_BOOL DebugMemAllocRecordRemove_AnyVaCb(DEBUG_MEM_ALLOC_REC *psCurrentRecord, va_list va)
++{
++ DEBUG_MEM_ALLOC_TYPE eAllocType;
++ IMG_VOID *pvKey;
++
++ eAllocType = va_arg(va, DEBUG_MEM_ALLOC_TYPE);
++ pvKey = va_arg(va, IMG_VOID*);
++
++ if(psCurrentRecord->eAllocType == eAllocType
++ && psCurrentRecord->pvKey == pvKey)
++ {
++ eAllocType = psCurrentRecord->eAllocType;
++ g_WaterMarkData[eAllocType] -= psCurrentRecord->ui32Bytes;
++
++ if(eAllocType == DEBUG_MEM_ALLOC_TYPE_KMALLOC
++ || eAllocType == DEBUG_MEM_ALLOC_TYPE_VMALLOC
++ || eAllocType == DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES
++ || eAllocType == DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE)
++ {
++ g_SysRAMWaterMark -= psCurrentRecord->ui32Bytes;
++ }
++ else if(eAllocType == DEBUG_MEM_ALLOC_TYPE_IOREMAP
++ || eAllocType == DEBUG_MEM_ALLOC_TYPE_IO)
++ {
++ g_IOMemWaterMark -= psCurrentRecord->ui32Bytes;
++ }
++
++ List_DEBUG_MEM_ALLOC_REC_Remove(psCurrentRecord);
++ kfree(psCurrentRecord);
++
++ return IMG_TRUE;
++ }
++ else
++ {
++ return IMG_FALSE;
++ }
++}
++
++
++static IMG_VOID
++DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE eAllocType, IMG_VOID *pvKey, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line)
++{
++ LinuxLockMutex(&g_sDebugMutex);
++
++
++ if(!List_DEBUG_MEM_ALLOC_REC_IMG_BOOL_Any_va(g_MemoryRecords,
++ DebugMemAllocRecordRemove_AnyVaCb,
++ eAllocType,
++ pvKey))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: couldn't find an entry for type=%s with pvKey=%p (called from %s, line %d\n",
++ __FUNCTION__, DebugMemAllocRecordTypeToString(eAllocType), pvKey,
++ pszFileName, ui32Line));
++ }
++
++ LinuxUnLockMutex(&g_sDebugMutex);
++}
++
++
++static IMG_CHAR *
++DebugMemAllocRecordTypeToString(DEBUG_MEM_ALLOC_TYPE eAllocType)
++{
++ IMG_CHAR *apszDebugMemoryRecordTypes[] = {
++ "KMALLOC",
++ "VMALLOC",
++ "ALLOC_PAGES",
++ "IOREMAP",
++ "IO",
++ "KMEM_CACHE_ALLOC"
++ };
++ return apszDebugMemoryRecordTypes[eAllocType];
++}
++#endif
++
++
++
++IMG_VOID *
++_VMallocWrapper(IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32AllocFlags,
++ IMG_CHAR *pszFileName,
++ IMG_UINT32 ui32Line)
++{
++ pgprot_t PGProtFlags;
++ IMG_VOID *pvRet;
++
++ switch(ui32AllocFlags & PVRSRV_HAP_CACHETYPE_MASK)
++ {
++ case PVRSRV_HAP_CACHED:
++ PGProtFlags = PAGE_KERNEL;
++ break;
++ case PVRSRV_HAP_WRITECOMBINE:
++ PGProtFlags = PGPROT_WC(PAGE_KERNEL);
++ break;
++ case PVRSRV_HAP_UNCACHED:
++ PGProtFlags = PGPROT_UC(PAGE_KERNEL);
++ break;
++ default:
++ PVR_DPF((PVR_DBG_ERROR,
++ "VMAllocWrapper: unknown mapping flags=0x%08lx",
++ ui32AllocFlags));
++ dump_stack();
++ return NULL;
++ }
++
++
++ pvRet = __vmalloc(ui32Bytes, GFP_KERNEL | __GFP_HIGHMEM, PGProtFlags);
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ if(pvRet)
++ {
++ DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_VMALLOC,
++ pvRet,
++ pvRet,
++ 0,
++ NULL,
++ PAGE_ALIGN(ui32Bytes),
++ pszFileName,
++ ui32Line
++ );
++ }
++#else
++ PVR_UNREFERENCED_PARAMETER(pszFileName);
++ PVR_UNREFERENCED_PARAMETER(ui32Line);
++#endif
++
++ return pvRet;
++}
++
++
++IMG_VOID
++_VFreeWrapper(IMG_VOID *pvCpuVAddr, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line)
++{
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_VMALLOC, pvCpuVAddr, pszFileName, ui32Line);
++#else
++ PVR_UNREFERENCED_PARAMETER(pszFileName);
++ PVR_UNREFERENCED_PARAMETER(ui32Line);
++#endif
++ vfree(pvCpuVAddr);
++}
++
++
++LinuxMemArea *
++NewVMallocLinuxMemArea(IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AreaFlags)
++{
++ LinuxMemArea *psLinuxMemArea;
++ IMG_VOID *pvCpuVAddr;
++
++ psLinuxMemArea = LinuxMemAreaStructAlloc();
++ if(!psLinuxMemArea)
++ {
++ goto failed;
++ }
++
++ pvCpuVAddr = VMallocWrapper(ui32Bytes, ui32AreaFlags);
++ if(!pvCpuVAddr)
++ {
++ goto failed;
++ }
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
++
++ ReservePages(pvCpuVAddr, ui32Bytes);
++#endif
++
++ psLinuxMemArea->eAreaType = LINUX_MEM_AREA_VMALLOC;
++ psLinuxMemArea->uData.sVmalloc.pvVmallocAddress = pvCpuVAddr;
++ psLinuxMemArea->ui32ByteSize = ui32Bytes;
++ psLinuxMemArea->ui32AreaFlags = ui32AreaFlags;
++ psLinuxMemArea->bMMapRegistered = IMG_FALSE;
++ INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags);
++#endif
++
++ return psLinuxMemArea;
++
++failed:
++ PVR_DPF((PVR_DBG_ERROR, "%s: failed!", __FUNCTION__));
++ if(psLinuxMemArea)
++ LinuxMemAreaStructFree(psLinuxMemArea);
++ return NULL;
++}
++
++
++IMG_VOID
++FreeVMallocLinuxMemArea(LinuxMemArea *psLinuxMemArea)
++{
++ PVR_ASSERT(psLinuxMemArea);
++ PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_VMALLOC);
++ PVR_ASSERT(psLinuxMemArea->uData.sVmalloc.pvVmallocAddress);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
++#endif
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
++ UnreservePages(psLinuxMemArea->uData.sVmalloc.pvVmallocAddress,
++ psLinuxMemArea->ui32ByteSize);
++#endif
++
++ PVR_DPF((PVR_DBG_MESSAGE,"%s: pvCpuVAddr: %p",
++ __FUNCTION__, psLinuxMemArea->uData.sVmalloc.pvVmallocAddress));
++ VFreeWrapper(psLinuxMemArea->uData.sVmalloc.pvVmallocAddress);
++
++ LinuxMemAreaStructFree(psLinuxMemArea);
++}
++
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
++static IMG_VOID
++ReservePages(IMG_VOID *pvAddress, IMG_UINT32 ui32Length)
++{
++ IMG_VOID *pvPage;
++ IMG_VOID *pvEnd = pvAddress + ui32Length;
++
++ for(pvPage = pvAddress; pvPage < pvEnd; pvPage += PAGE_SIZE)
++ {
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0))
++ SetPageReserved(vmalloc_to_page(pvPage));
++#else
++ mem_map_reserve(vmalloc_to_page(pvPage));
++#endif
++ }
++}
++
++
++static IMG_VOID
++UnreservePages(IMG_VOID *pvAddress, IMG_UINT32 ui32Length)
++{
++ IMG_VOID *pvPage;
++ IMG_VOID *pvEnd = pvAddress + ui32Length;
++
++ for(pvPage = pvAddress; pvPage < pvEnd; pvPage += PAGE_SIZE)
++ {
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0))
++ ClearPageReserved(vmalloc_to_page(pvPage));
++#else
++ mem_map_unreserve(vmalloc_to_page(pvPage));
++#endif
++ }
++}
++#endif
++
++
++IMG_VOID *
++_IORemapWrapper(IMG_CPU_PHYADDR BasePAddr,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32MappingFlags,
++ IMG_CHAR *pszFileName,
++ IMG_UINT32 ui32Line)
++{
++ IMG_VOID *pvIORemapCookie;
++
++ switch(ui32MappingFlags & PVRSRV_HAP_CACHETYPE_MASK)
++ {
++ case PVRSRV_HAP_CACHED:
++ pvIORemapCookie = (IMG_VOID *)IOREMAP(BasePAddr.uiAddr, ui32Bytes);
++ break;
++ case PVRSRV_HAP_WRITECOMBINE:
++ pvIORemapCookie = (IMG_VOID *)IOREMAP_WC(BasePAddr.uiAddr, ui32Bytes);
++ break;
++ case PVRSRV_HAP_UNCACHED:
++ pvIORemapCookie = (IMG_VOID *)IOREMAP_UC(BasePAddr.uiAddr, ui32Bytes);
++ break;
++ default:
++ PVR_DPF((PVR_DBG_ERROR, "IORemapWrapper: unknown mapping flags"));
++ return NULL;
++ }
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ if(pvIORemapCookie)
++ {
++ DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_IOREMAP,
++ pvIORemapCookie,
++ pvIORemapCookie,
++ BasePAddr.uiAddr,
++ NULL,
++ ui32Bytes,
++ pszFileName,
++ ui32Line
++ );
++ }
++#else
++ PVR_UNREFERENCED_PARAMETER(pszFileName);
++ PVR_UNREFERENCED_PARAMETER(ui32Line);
++#endif
++
++ return pvIORemapCookie;
++}
++
++
++IMG_VOID
++_IOUnmapWrapper(IMG_VOID *pvIORemapCookie, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line)
++{
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_IOREMAP, pvIORemapCookie, pszFileName, ui32Line);
++#else
++ PVR_UNREFERENCED_PARAMETER(pszFileName);
++ PVR_UNREFERENCED_PARAMETER(ui32Line);
++#endif
++ iounmap(pvIORemapCookie);
++}
++
++
++LinuxMemArea *
++NewIORemapLinuxMemArea(IMG_CPU_PHYADDR BasePAddr,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32AreaFlags)
++{
++ LinuxMemArea *psLinuxMemArea;
++ IMG_VOID *pvIORemapCookie;
++
++ psLinuxMemArea = LinuxMemAreaStructAlloc();
++ if(!psLinuxMemArea)
++ {
++ return NULL;
++ }
++
++ pvIORemapCookie = IORemapWrapper(BasePAddr, ui32Bytes, ui32AreaFlags);
++ if(!pvIORemapCookie)
++ {
++ LinuxMemAreaStructFree(psLinuxMemArea);
++ return NULL;
++ }
++
++ psLinuxMemArea->eAreaType = LINUX_MEM_AREA_IOREMAP;
++ psLinuxMemArea->uData.sIORemap.pvIORemapCookie = pvIORemapCookie;
++ psLinuxMemArea->uData.sIORemap.CPUPhysAddr = BasePAddr;
++ psLinuxMemArea->ui32ByteSize = ui32Bytes;
++ psLinuxMemArea->ui32AreaFlags = ui32AreaFlags;
++ psLinuxMemArea->bMMapRegistered = IMG_FALSE;
++ INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags);
++#endif
++
++ return psLinuxMemArea;
++}
++
++
++IMG_VOID
++FreeIORemapLinuxMemArea(LinuxMemArea *psLinuxMemArea)
++{
++ PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_IOREMAP);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
++#endif
++
++ IOUnmapWrapper(psLinuxMemArea->uData.sIORemap.pvIORemapCookie);
++
++ LinuxMemAreaStructFree(psLinuxMemArea);
++}
++
++
++static IMG_BOOL
++TreatExternalPagesAsContiguous(IMG_SYS_PHYADDR *psSysPhysAddr, IMG_UINT32 ui32Bytes, IMG_BOOL bPhysContig)
++{
++ IMG_UINT32 ui32;
++ IMG_UINT32 ui32AddrChk;
++ IMG_UINT32 ui32NumPages = RANGE_TO_PAGES(ui32Bytes);
++
++ for (ui32 = 0, ui32AddrChk = psSysPhysAddr[0].uiAddr;
++ ui32 < ui32NumPages;
++ ui32++, ui32AddrChk = (bPhysContig) ? (ui32AddrChk + PAGE_SIZE) : psSysPhysAddr[ui32].uiAddr)
++ {
++ if (!pfn_valid(PHYS_TO_PFN(ui32AddrChk)))
++ {
++ break;
++ }
++ }
++ if (ui32 == ui32NumPages)
++ {
++ return IMG_FALSE;
++ }
++
++ if (!bPhysContig)
++ {
++ for (ui32 = 0, ui32AddrChk = psSysPhysAddr[0].uiAddr;
++ ui32 < ui32NumPages;
++ ui32++, ui32AddrChk += PAGE_SIZE)
++ {
++ if (psSysPhysAddr[ui32].uiAddr != ui32AddrChk)
++ {
++ return IMG_FALSE;
++ }
++ }
++ }
++
++ return IMG_TRUE;
++}
++
++LinuxMemArea *NewExternalKVLinuxMemArea(IMG_SYS_PHYADDR *pBasePAddr, IMG_VOID *pvCPUVAddr, IMG_UINT32 ui32Bytes, IMG_BOOL bPhysContig, IMG_UINT32 ui32AreaFlags)
++{
++ LinuxMemArea *psLinuxMemArea;
++
++ psLinuxMemArea = LinuxMemAreaStructAlloc();
++ if(!psLinuxMemArea)
++ {
++ return NULL;
++ }
++
++ psLinuxMemArea->eAreaType = LINUX_MEM_AREA_EXTERNAL_KV;
++ psLinuxMemArea->uData.sExternalKV.pvExternalKV = pvCPUVAddr;
++ psLinuxMemArea->uData.sExternalKV.bPhysContig = (IMG_BOOL)(bPhysContig || TreatExternalPagesAsContiguous(pBasePAddr, ui32Bytes, bPhysContig));
++
++ if (psLinuxMemArea->uData.sExternalKV.bPhysContig)
++ {
++ psLinuxMemArea->uData.sExternalKV.uPhysAddr.SysPhysAddr = *pBasePAddr;
++ }
++ else
++ {
++ psLinuxMemArea->uData.sExternalKV.uPhysAddr.pSysPhysAddr = pBasePAddr;
++ }
++ psLinuxMemArea->ui32ByteSize = ui32Bytes;
++ psLinuxMemArea->ui32AreaFlags = ui32AreaFlags;
++ psLinuxMemArea->bMMapRegistered = IMG_FALSE;
++ INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags);
++#endif
++
++ return psLinuxMemArea;
++}
++
++
++IMG_VOID
++FreeExternalKVLinuxMemArea(LinuxMemArea *psLinuxMemArea)
++{
++ PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_EXTERNAL_KV);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
++#endif
++
++ LinuxMemAreaStructFree(psLinuxMemArea);
++}
++
++
++LinuxMemArea *
++NewIOLinuxMemArea(IMG_CPU_PHYADDR BasePAddr,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32AreaFlags)
++{
++ LinuxMemArea *psLinuxMemArea = LinuxMemAreaStructAlloc();
++ if(!psLinuxMemArea)
++ {
++ return NULL;
++ }
++
++
++ psLinuxMemArea->eAreaType = LINUX_MEM_AREA_IO;
++ psLinuxMemArea->uData.sIO.CPUPhysAddr.uiAddr = BasePAddr.uiAddr;
++ psLinuxMemArea->ui32ByteSize = ui32Bytes;
++ psLinuxMemArea->ui32AreaFlags = ui32AreaFlags;
++ psLinuxMemArea->bMMapRegistered = IMG_FALSE;
++ INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList);
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_IO,
++ (IMG_VOID *)BasePAddr.uiAddr,
++ 0,
++ BasePAddr.uiAddr,
++ NULL,
++ ui32Bytes,
++ "unknown",
++ 0
++ );
++#endif
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags);
++#endif
++
++ return psLinuxMemArea;
++}
++
++
++IMG_VOID
++FreeIOLinuxMemArea(LinuxMemArea *psLinuxMemArea)
++{
++ PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_IO);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
++#endif
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_IO,
++ (IMG_VOID *)psLinuxMemArea->uData.sIO.CPUPhysAddr.uiAddr, __FILE__, __LINE__);
++#endif
++
++
++
++ LinuxMemAreaStructFree(psLinuxMemArea);
++}
++
++
++LinuxMemArea *
++NewAllocPagesLinuxMemArea(IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AreaFlags)
++{
++ LinuxMemArea *psLinuxMemArea;
++ IMG_UINT32 ui32PageCount;
++ struct page **pvPageList;
++ IMG_HANDLE hBlockPageList;
++ IMG_INT32 i;
++ PVRSRV_ERROR eError;
++
++ psLinuxMemArea = LinuxMemAreaStructAlloc();
++ if(!psLinuxMemArea)
++ {
++ goto failed_area_alloc;
++ }
++
++ ui32PageCount = RANGE_TO_PAGES(ui32Bytes);
++ eError = OSAllocMem(0, sizeof(*pvPageList) * ui32PageCount, (IMG_VOID **)&pvPageList, &hBlockPageList,
++ "Array of pages");
++ if(eError != PVRSRV_OK)
++ {
++ goto failed_page_list_alloc;
++ }
++
++ for(i=0; i<(IMG_INT32)ui32PageCount; i++)
++ {
++ pvPageList[i] = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, 0);
++ if(!pvPageList[i])
++ {
++ goto failed_alloc_pages;
++ }
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0))
++ SetPageReserved(pvPageList[i]);
++#else
++ mem_map_reserve(pvPageList[i]);
++#endif
++#endif
++
++ }
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES,
++ pvPageList,
++ 0,
++ 0,
++ NULL,
++ PAGE_ALIGN(ui32Bytes),
++ "unknown",
++ 0
++ );
++#endif
++
++ psLinuxMemArea->eAreaType = LINUX_MEM_AREA_ALLOC_PAGES;
++ psLinuxMemArea->uData.sPageList.pvPageList = pvPageList;
++ psLinuxMemArea->uData.sPageList.hBlockPageList = hBlockPageList;
++ psLinuxMemArea->ui32ByteSize = ui32Bytes;
++ psLinuxMemArea->ui32AreaFlags = ui32AreaFlags;
++ psLinuxMemArea->bMMapRegistered = IMG_FALSE;
++ INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags);
++#endif
++
++ return psLinuxMemArea;
++
++failed_alloc_pages:
++ for(i--; i >= 0; i--)
++ {
++ __free_pages(pvPageList[i], 0);
++ }
++ (IMG_VOID) OSFreeMem(0, sizeof(*pvPageList) * ui32PageCount, pvPageList, hBlockPageList);
++ psLinuxMemArea->uData.sPageList.pvPageList = IMG_NULL;
++failed_page_list_alloc:
++ LinuxMemAreaStructFree(psLinuxMemArea);
++failed_area_alloc:
++ PVR_DPF((PVR_DBG_ERROR, "%s: failed", __FUNCTION__));
++
++ return NULL;
++}
++
++
++IMG_VOID
++FreeAllocPagesLinuxMemArea(LinuxMemArea *psLinuxMemArea)
++{
++ IMG_UINT32 ui32PageCount;
++ struct page **pvPageList;
++ IMG_HANDLE hBlockPageList;
++ IMG_INT32 i;
++
++ PVR_ASSERT(psLinuxMemArea);
++ PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_ALLOC_PAGES);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
++#endif
++
++ ui32PageCount = RANGE_TO_PAGES(psLinuxMemArea->ui32ByteSize);
++ pvPageList = psLinuxMemArea->uData.sPageList.pvPageList;
++ hBlockPageList = psLinuxMemArea->uData.sPageList.hBlockPageList;
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES, pvPageList, __FILE__, __LINE__);
++#endif
++
++ for(i=0;i<(IMG_INT32)ui32PageCount;i++)
++ {
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0))
++ ClearPageReserved(pvPageList[i]);
++#else
++ mem_map_reserve(pvPageList[i]);
++#endif
++#endif
++ __free_pages(pvPageList[i], 0);
++ }
++
++ (IMG_VOID) OSFreeMem(0, sizeof(*pvPageList) * ui32PageCount, pvPageList, hBlockPageList);
++ psLinuxMemArea->uData.sPageList.pvPageList = IMG_NULL;
++
++ LinuxMemAreaStructFree(psLinuxMemArea);
++}
++
++
++struct page*
++LinuxMemAreaOffsetToPage(LinuxMemArea *psLinuxMemArea,
++ IMG_UINT32 ui32ByteOffset)
++{
++ IMG_UINT32 ui32PageIndex;
++ IMG_CHAR *pui8Addr;
++
++ switch(psLinuxMemArea->eAreaType)
++ {
++ case LINUX_MEM_AREA_ALLOC_PAGES:
++ ui32PageIndex = PHYS_TO_PFN(ui32ByteOffset);
++ return psLinuxMemArea->uData.sPageList.pvPageList[ui32PageIndex];
++
++ case LINUX_MEM_AREA_VMALLOC:
++ pui8Addr = psLinuxMemArea->uData.sVmalloc.pvVmallocAddress;
++ pui8Addr += ui32ByteOffset;
++ return vmalloc_to_page(pui8Addr);
++
++ case LINUX_MEM_AREA_SUB_ALLOC:
++
++ return LinuxMemAreaOffsetToPage(psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea,
++ psLinuxMemArea->uData.sSubAlloc.ui32ByteOffset
++ + ui32ByteOffset);
++ default:
++ PVR_DPF((PVR_DBG_ERROR,
++ "%s: Unsupported request for struct page from LinuxMemArea with type=%s",
++ __FUNCTION__, LinuxMemAreaTypeToString(psLinuxMemArea->eAreaType)));
++ return NULL;
++ }
++}
++
++
++LinuxKMemCache *
++KMemCacheCreateWrapper(IMG_CHAR *pszName,
++ size_t Size,
++ size_t Align,
++ IMG_UINT32 ui32Flags)
++{
++#if defined(DEBUG_LINUX_SLAB_ALLOCATIONS)
++ ui32Flags |= SLAB_POISON|SLAB_RED_ZONE;
++#endif
++ return kmem_cache_create(pszName, Size, Align, ui32Flags, NULL
++#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,22))
++ , NULL
++#endif
++ );
++}
++
++
++IMG_VOID
++KMemCacheDestroyWrapper(LinuxKMemCache *psCache)
++{
++ kmem_cache_destroy(psCache);
++}
++
++
++IMG_VOID *
++_KMemCacheAllocWrapper(LinuxKMemCache *psCache,
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14))
++ gfp_t Flags,
++#else
++ IMG_INT Flags,
++#endif
++ IMG_CHAR *pszFileName,
++ IMG_UINT32 ui32Line)
++{
++ IMG_VOID *pvRet;
++
++ pvRet = kmem_cache_alloc(psCache, Flags);
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE,
++ pvRet,
++ pvRet,
++ 0,
++ psCache,
++ kmem_cache_size(psCache),
++ pszFileName,
++ ui32Line
++ );
++#else
++ PVR_UNREFERENCED_PARAMETER(pszFileName);
++ PVR_UNREFERENCED_PARAMETER(ui32Line);
++#endif
++
++ return pvRet;
++}
++
++
++IMG_VOID
++_KMemCacheFreeWrapper(LinuxKMemCache *psCache, IMG_VOID *pvObject, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line)
++{
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE, pvObject, pszFileName, ui32Line);
++#else
++ PVR_UNREFERENCED_PARAMETER(pszFileName);
++ PVR_UNREFERENCED_PARAMETER(ui32Line);
++#endif
++
++ kmem_cache_free(psCache, pvObject);
++}
++
++
++const IMG_CHAR *
++KMemCacheNameWrapper(LinuxKMemCache *psCache)
++{
++ PVR_UNREFERENCED_PARAMETER(psCache);
++
++
++ return "";
++}
++
++
++LinuxMemArea *
++NewSubLinuxMemArea(LinuxMemArea *psParentLinuxMemArea,
++ IMG_UINT32 ui32ByteOffset,
++ IMG_UINT32 ui32Bytes)
++{
++ LinuxMemArea *psLinuxMemArea;
++
++ PVR_ASSERT((ui32ByteOffset+ui32Bytes) <= psParentLinuxMemArea->ui32ByteSize);
++
++ psLinuxMemArea = LinuxMemAreaStructAlloc();
++ if(!psLinuxMemArea)
++ {
++ return NULL;
++ }
++
++ psLinuxMemArea->eAreaType = LINUX_MEM_AREA_SUB_ALLOC;
++ psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea = psParentLinuxMemArea;
++ psLinuxMemArea->uData.sSubAlloc.ui32ByteOffset = ui32ByteOffset;
++ psLinuxMemArea->ui32ByteSize = ui32Bytes;
++ psLinuxMemArea->ui32AreaFlags = psParentLinuxMemArea->ui32AreaFlags;
++ psLinuxMemArea->bMMapRegistered = IMG_FALSE;
++ INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ {
++ DEBUG_LINUX_MEM_AREA_REC *psParentRecord;
++ psParentRecord = DebugLinuxMemAreaRecordFind(psParentLinuxMemArea);
++ DebugLinuxMemAreaRecordAdd(psLinuxMemArea, psParentRecord->ui32Flags);
++ }
++#endif
++
++ return psLinuxMemArea;
++}
++
++
++IMG_VOID
++FreeSubLinuxMemArea(LinuxMemArea *psLinuxMemArea)
++{
++ PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_SUB_ALLOC);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
++#endif
++
++
++
++ LinuxMemAreaStructFree(psLinuxMemArea);
++}
++
++
++static LinuxMemArea *
++LinuxMemAreaStructAlloc(IMG_VOID)
++{
++#if 0
++ LinuxMemArea *psLinuxMemArea;
++ psLinuxMemArea = kmem_cache_alloc(psLinuxMemAreaCache, GFP_KERNEL);
++ printk(KERN_ERR "%s: psLinuxMemArea=%p\n", __FUNCTION__, psLinuxMemArea);
++ dump_stack();
++ return psLinuxMemArea;
++#else
++ return KMemCacheAllocWrapper(psLinuxMemAreaCache, GFP_KERNEL);
++#endif
++}
++
++
++static IMG_VOID
++LinuxMemAreaStructFree(LinuxMemArea *psLinuxMemArea)
++{
++ KMemCacheFreeWrapper(psLinuxMemAreaCache, psLinuxMemArea);
++
++
++}
++
++
++IMG_VOID
++LinuxMemAreaDeepFree(LinuxMemArea *psLinuxMemArea)
++{
++ switch(psLinuxMemArea->eAreaType)
++ {
++ case LINUX_MEM_AREA_VMALLOC:
++ FreeVMallocLinuxMemArea(psLinuxMemArea);
++ break;
++ case LINUX_MEM_AREA_ALLOC_PAGES:
++ FreeAllocPagesLinuxMemArea(psLinuxMemArea);
++ break;
++ case LINUX_MEM_AREA_IOREMAP:
++ FreeIORemapLinuxMemArea(psLinuxMemArea);
++ break;
++ case LINUX_MEM_AREA_EXTERNAL_KV:
++ FreeExternalKVLinuxMemArea(psLinuxMemArea);
++ break;
++ case LINUX_MEM_AREA_IO:
++ FreeIOLinuxMemArea(psLinuxMemArea);
++ break;
++ case LINUX_MEM_AREA_SUB_ALLOC:
++ FreeSubLinuxMemArea(psLinuxMemArea);
++ break;
++ default:
++ PVR_DPF((PVR_DBG_ERROR, "%s: Unknown are type (%d)\n",
++ __FUNCTION__, psLinuxMemArea->eAreaType));
++ break;
++ }
++}
++
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++static IMG_VOID
++DebugLinuxMemAreaRecordAdd(LinuxMemArea *psLinuxMemArea, IMG_UINT32 ui32Flags)
++{
++ DEBUG_LINUX_MEM_AREA_REC *psNewRecord;
++ const IMG_CHAR *pi8FlagsString;
++
++ LinuxLockMutex(&g_sDebugMutex);
++
++ if(psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC)
++ {
++ g_LinuxMemAreaWaterMark += psLinuxMemArea->ui32ByteSize;
++ if(g_LinuxMemAreaWaterMark > g_LinuxMemAreaHighWaterMark)
++ {
++ g_LinuxMemAreaHighWaterMark = g_LinuxMemAreaWaterMark;
++ }
++ }
++ g_LinuxMemAreaCount++;
++
++
++ psNewRecord = kmalloc(sizeof(DEBUG_LINUX_MEM_AREA_REC), GFP_KERNEL);
++ if(psNewRecord)
++ {
++
++ psNewRecord->psLinuxMemArea = psLinuxMemArea;
++ psNewRecord->ui32Flags = ui32Flags;
++ psNewRecord->pid = current->pid;
++
++ List_DEBUG_LINUX_MEM_AREA_REC_Insert(&g_LinuxMemAreaRecords, psNewRecord);
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "%s: failed to allocate linux memory area record.",
++ __FUNCTION__));
++ }
++
++
++ pi8FlagsString = HAPFlagsToString(ui32Flags);
++ if(strstr(pi8FlagsString, "UNKNOWN"))
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "%s: Unexpected flags (0x%08lx) associated with psLinuxMemArea @ 0x%08lx",
++ __FUNCTION__,
++ ui32Flags,
++ psLinuxMemArea));
++
++ }
++
++ LinuxUnLockMutex(&g_sDebugMutex);
++}
++
++
++
++IMG_VOID* MatchLinuxMemArea_AnyVaCb(DEBUG_LINUX_MEM_AREA_REC *psCurrentRecord,
++ va_list va)
++{
++ LinuxMemArea *psLinuxMemArea;
++
++ psLinuxMemArea = va_arg(va, LinuxMemArea*);
++ if(psCurrentRecord->psLinuxMemArea == psLinuxMemArea)
++ {
++ return psCurrentRecord;
++ }
++ else
++ {
++ return IMG_NULL;
++ }
++}
++
++
++static DEBUG_LINUX_MEM_AREA_REC *
++DebugLinuxMemAreaRecordFind(LinuxMemArea *psLinuxMemArea)
++{
++ DEBUG_LINUX_MEM_AREA_REC *psCurrentRecord;
++
++ LinuxLockMutex(&g_sDebugMutex);
++ psCurrentRecord = List_DEBUG_LINUX_MEM_AREA_REC_Any_va(g_LinuxMemAreaRecords,
++ MatchLinuxMemArea_AnyVaCb,
++ psLinuxMemArea);
++
++ LinuxUnLockMutex(&g_sDebugMutex);
++
++ return psCurrentRecord;
++}
++
++
++static IMG_VOID
++DebugLinuxMemAreaRecordRemove(LinuxMemArea *psLinuxMemArea)
++{
++ DEBUG_LINUX_MEM_AREA_REC *psCurrentRecord;
++
++ LinuxLockMutex(&g_sDebugMutex);
++
++ if(psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC)
++ {
++ g_LinuxMemAreaWaterMark -= psLinuxMemArea->ui32ByteSize;
++ }
++ g_LinuxMemAreaCount--;
++
++
++ psCurrentRecord = List_DEBUG_LINUX_MEM_AREA_REC_Any_va(g_LinuxMemAreaRecords,
++ MatchLinuxMemArea_AnyVaCb,
++ psLinuxMemArea);
++ if(psCurrentRecord)
++ {
++
++ List_DEBUG_LINUX_MEM_AREA_REC_Remove(psCurrentRecord);
++ kfree(psCurrentRecord);
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: couldn't find an entry for psLinuxMemArea=%p\n",
++ __FUNCTION__, psLinuxMemArea));
++ }
++
++ LinuxUnLockMutex(&g_sDebugMutex);
++}
++#endif
++
++
++IMG_VOID *
++LinuxMemAreaToCpuVAddr(LinuxMemArea *psLinuxMemArea)
++{
++ switch(psLinuxMemArea->eAreaType)
++ {
++ case LINUX_MEM_AREA_VMALLOC:
++ return psLinuxMemArea->uData.sVmalloc.pvVmallocAddress;
++ case LINUX_MEM_AREA_IOREMAP:
++ return psLinuxMemArea->uData.sIORemap.pvIORemapCookie;
++ case LINUX_MEM_AREA_EXTERNAL_KV:
++ return psLinuxMemArea->uData.sExternalKV.pvExternalKV;
++ case LINUX_MEM_AREA_SUB_ALLOC:
++ {
++ IMG_CHAR *pAddr =
++ LinuxMemAreaToCpuVAddr(psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea);
++ if(!pAddr)
++ {
++ return NULL;
++ }
++ return pAddr + psLinuxMemArea->uData.sSubAlloc.ui32ByteOffset;
++ }
++ default:
++ return NULL;
++ }
++}
++
++
++IMG_CPU_PHYADDR
++LinuxMemAreaToCpuPAddr(LinuxMemArea *psLinuxMemArea, IMG_UINT32 ui32ByteOffset)
++{
++ IMG_CPU_PHYADDR CpuPAddr;
++
++ CpuPAddr.uiAddr = 0;
++
++ switch(psLinuxMemArea->eAreaType)
++ {
++ case LINUX_MEM_AREA_IOREMAP:
++ {
++ CpuPAddr = psLinuxMemArea->uData.sIORemap.CPUPhysAddr;
++ CpuPAddr.uiAddr += ui32ByteOffset;
++ break;
++ }
++ case LINUX_MEM_AREA_EXTERNAL_KV:
++ {
++ if (psLinuxMemArea->uData.sExternalKV.bPhysContig)
++ {
++ CpuPAddr = SysSysPAddrToCpuPAddr(psLinuxMemArea->uData.sExternalKV.uPhysAddr.SysPhysAddr);
++ CpuPAddr.uiAddr += ui32ByteOffset;
++ }
++ else
++ {
++ IMG_UINT32 ui32PageIndex = PHYS_TO_PFN(ui32ByteOffset);
++ IMG_SYS_PHYADDR SysPAddr = psLinuxMemArea->uData.sExternalKV.uPhysAddr.pSysPhysAddr[ui32PageIndex];
++
++ CpuPAddr = SysSysPAddrToCpuPAddr(SysPAddr);
++ CpuPAddr.uiAddr += ADDR_TO_PAGE_OFFSET(ui32ByteOffset);
++ }
++ break;
++ }
++ case LINUX_MEM_AREA_IO:
++ {
++ CpuPAddr = psLinuxMemArea->uData.sIO.CPUPhysAddr;
++ CpuPAddr.uiAddr += ui32ByteOffset;
++ break;
++ }
++ case LINUX_MEM_AREA_VMALLOC:
++ {
++ IMG_CHAR *pCpuVAddr;
++ pCpuVAddr =
++ (IMG_CHAR *)psLinuxMemArea->uData.sVmalloc.pvVmallocAddress;
++ pCpuVAddr += ui32ByteOffset;
++ CpuPAddr.uiAddr = VMallocToPhys(pCpuVAddr);
++ break;
++ }
++ case LINUX_MEM_AREA_ALLOC_PAGES:
++ {
++ struct page *page;
++ IMG_UINT32 ui32PageIndex = PHYS_TO_PFN(ui32ByteOffset);
++ page = psLinuxMemArea->uData.sPageList.pvPageList[ui32PageIndex];
++ CpuPAddr.uiAddr = page_to_phys(page);
++ CpuPAddr.uiAddr += ADDR_TO_PAGE_OFFSET(ui32ByteOffset);
++ break;
++ }
++ case LINUX_MEM_AREA_SUB_ALLOC:
++ {
++ CpuPAddr =
++ OSMemHandleToCpuPAddr(psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea,
++ psLinuxMemArea->uData.sSubAlloc.ui32ByteOffset
++ + ui32ByteOffset);
++ break;
++ }
++ default:
++ PVR_DPF((PVR_DBG_ERROR, "%s: Unknown LinuxMemArea type (%d)\n",
++ __FUNCTION__, psLinuxMemArea->eAreaType));
++ break;
++ }
++
++ PVR_ASSERT(CpuPAddr.uiAddr);
++ return CpuPAddr;
++}
++
++
++IMG_BOOL
++LinuxMemAreaPhysIsContig(LinuxMemArea *psLinuxMemArea)
++{
++ switch(psLinuxMemArea->eAreaType)
++ {
++ case LINUX_MEM_AREA_IOREMAP:
++ case LINUX_MEM_AREA_IO:
++ return IMG_TRUE;
++
++ case LINUX_MEM_AREA_EXTERNAL_KV:
++ return psLinuxMemArea->uData.sExternalKV.bPhysContig;
++
++ case LINUX_MEM_AREA_VMALLOC:
++ case LINUX_MEM_AREA_ALLOC_PAGES:
++ return IMG_FALSE;
++
++ case LINUX_MEM_AREA_SUB_ALLOC:
++
++ return LinuxMemAreaPhysIsContig(psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea);
++
++ default:
++ PVR_DPF((PVR_DBG_ERROR, "%s: Unknown LinuxMemArea type (%d)\n",
++ __FUNCTION__, psLinuxMemArea->eAreaType));
++ break;
++ }
++ return IMG_FALSE;
++}
++
++
++const IMG_CHAR *
++LinuxMemAreaTypeToString(LINUX_MEM_AREA_TYPE eMemAreaType)
++{
++
++ switch(eMemAreaType)
++ {
++ case LINUX_MEM_AREA_IOREMAP:
++ return "LINUX_MEM_AREA_IOREMAP";
++ case LINUX_MEM_AREA_EXTERNAL_KV:
++ return "LINUX_MEM_AREA_EXTERNAL_KV";
++ case LINUX_MEM_AREA_IO:
++ return "LINUX_MEM_AREA_IO";
++ case LINUX_MEM_AREA_VMALLOC:
++ return "LINUX_MEM_AREA_VMALLOC";
++ case LINUX_MEM_AREA_SUB_ALLOC:
++ return "LINUX_MEM_AREA_SUB_ALLOC";
++ case LINUX_MEM_AREA_ALLOC_PAGES:
++ return "LINUX_MEM_AREA_ALLOC_PAGES";
++ default:
++ PVR_ASSERT(0);
++ }
++
++ return "";
++}
++
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++#if defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++static void ProcSeqStartstopDebugMutex(struct seq_file *sfile, IMG_BOOL start)
++{
++ if(start)
++ {
++ LinuxLockMutex(&g_sDebugMutex);
++ }
++ else
++ {
++ LinuxUnLockMutex(&g_sDebugMutex);
++ }
++}
++#endif
++#endif
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++
++IMG_VOID* DecOffMemAreaRec_AnyVaCb(DEBUG_LINUX_MEM_AREA_REC *psNode, va_list va)
++{
++ off_t *pOff = va_arg(va, off_t*);
++ if (--(*pOff))
++ {
++ return IMG_NULL;
++ }
++ else
++ {
++ return psNode;
++ }
++}
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++
++static void* ProcSeqNextMemArea(struct seq_file *sfile,void* el,loff_t off)
++{
++ DEBUG_LINUX_MEM_AREA_REC *psRecord;
++ psRecord = (DEBUG_LINUX_MEM_AREA_REC*)
++ List_DEBUG_LINUX_MEM_AREA_REC_Any_va(g_LinuxMemAreaRecords,
++ DecOffMemAreaRec_AnyVaCb,
++ &off);
++ return (void*)psRecord;
++}
++
++static void* ProcSeqOff2ElementMemArea(struct seq_file * sfile, loff_t off)
++{
++ DEBUG_LINUX_MEM_AREA_REC *psRecord;
++ if(!off)
++ {
++ return PVR_PROC_SEQ_START_TOKEN;
++ }
++
++ psRecord = (DEBUG_LINUX_MEM_AREA_REC*)
++ List_DEBUG_LINUX_MEM_AREA_REC_Any_va(g_LinuxMemAreaRecords,
++ DecOffMemAreaRec_AnyVaCb,
++ &off);
++ return (void*)psRecord;
++}
++
++
++static void ProcSeqShowMemArea(struct seq_file *sfile,void* el)
++{
++ DEBUG_LINUX_MEM_AREA_REC *psRecord = (DEBUG_LINUX_MEM_AREA_REC*)el;
++ if(el == PVR_PROC_SEQ_START_TOKEN)
++ {
++
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++ seq_printf( sfile,
++ "Number of Linux Memory Areas: %lu\n"
++ "At the current water mark these areas correspond to %lu bytes (excluding SUB areas)\n"
++ "At the highest water mark these areas corresponded to %lu bytes (excluding SUB areas)\n"
++ "\nDetails for all Linux Memory Areas:\n"
++ "%s %-24s %s %s %-8s %-5s %s\n",
++ g_LinuxMemAreaCount,
++ g_LinuxMemAreaWaterMark,
++ g_LinuxMemAreaHighWaterMark,
++ "psLinuxMemArea",
++ "LinuxMemType",
++ "CpuVAddr",
++ "CpuPAddr",
++ "Bytes",
++ "Pid",
++ "Flags"
++ );
++#else
++ seq_printf( sfile,
++ "<mem_areas_header>\n"
++ "\t<count>%lu</count>\n"
++ "\t<watermark key=\"mar0\" description=\"current\" bytes=\"%lu\"/>\n"
++ "\t<watermark key=\"mar1\" description=\"high\" bytes=\"%lu\"/>\n"
++ "</mem_areas_header>\n",
++ g_LinuxMemAreaCount,
++ g_LinuxMemAreaWaterMark,
++ g_LinuxMemAreaHighWaterMark
++ );
++#endif
++ return;
++ }
++
++ seq_printf( sfile,
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++ "%8p %-24s %8p %08lx %-8ld %-5u %08lx=(%s)\n",
++#else
++ "<linux_mem_area>\n"
++ "\t<pointer>%8p</pointer>\n"
++ "\t<type>%s</type>\n"
++ "\t<cpu_virtual>%8p</cpu_virtual>\n"
++ "\t<cpu_physical>%08lx</cpu_physical>\n"
++ "\t<bytes>%ld</bytes>\n"
++ "\t<pid>%u</pid>\n"
++ "\t<flags>%08lx</flags>\n"
++ "\t<flags_string>%s</flags_string>\n"
++ "</linux_mem_area>\n",
++#endif
++ psRecord->psLinuxMemArea,
++ LinuxMemAreaTypeToString(psRecord->psLinuxMemArea->eAreaType),
++ LinuxMemAreaToCpuVAddr(psRecord->psLinuxMemArea),
++ LinuxMemAreaToCpuPAddr(psRecord->psLinuxMemArea,0).uiAddr,
++ psRecord->psLinuxMemArea->ui32ByteSize,
++ psRecord->pid,
++ psRecord->ui32Flags,
++ HAPFlagsToString(psRecord->ui32Flags)
++ );
++
++}
++
++#else
++
++static off_t
++printLinuxMemAreaRecords(IMG_CHAR * buffer, size_t count, off_t off)
++{
++ DEBUG_LINUX_MEM_AREA_REC *psRecord;
++ off_t Ret;
++
++ LinuxLockMutex(&g_sDebugMutex);
++
++ if(!off)
++ {
++ if(count < 500)
++ {
++ Ret = 0;
++ goto unlock_and_return;
++ }
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++ Ret = printAppend(buffer, count, 0,
++ "Number of Linux Memory Areas: %lu\n"
++ "At the current water mark these areas correspond to %lu bytes (excluding SUB areas)\n"
++ "At the highest water mark these areas corresponded to %lu bytes (excluding SUB areas)\n"
++ "\nDetails for all Linux Memory Areas:\n"
++ "%s %-24s %s %s %-8s %-5s %s\n",
++ g_LinuxMemAreaCount,
++ g_LinuxMemAreaWaterMark,
++ g_LinuxMemAreaHighWaterMark,
++ "psLinuxMemArea",
++ "LinuxMemType",
++ "CpuVAddr",
++ "CpuPAddr",
++ "Bytes",
++ "Pid",
++ "Flags"
++ );
++#else
++ Ret = printAppend(buffer, count, 0,
++ "<mem_areas_header>\n"
++ "\t<count>%lu</count>\n"
++ "\t<watermark key=\"mar0\" description=\"current\" bytes=\"%lu\"/>\n"
++ "\t<watermark key=\"mar1\" description=\"high\" bytes=\"%lu\"/>\n"
++ "</mem_areas_header>\n",
++ g_LinuxMemAreaCount,
++ g_LinuxMemAreaWaterMark,
++ g_LinuxMemAreaHighWaterMark
++ );
++#endif
++ goto unlock_and_return;
++ }
++
++ psRecord = (DEBUG_LINUX_MEM_AREA_REC*)
++ List_DEBUG_LINUX_MEM_AREA_REC_Any_va(g_LinuxMemAreaRecords,
++ DecOffMemAreaRec_AnyVaCb,
++ &off);
++
++ if(!psRecord)
++ {
++ Ret = END_OF_FILE;
++ goto unlock_and_return;
++ }
++
++ if(count < 500)
++ {
++ Ret = 0;
++ goto unlock_and_return;
++ }
++
++ Ret = printAppend(buffer, count, 0,
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++ "%8p %-24s %8p %08lx %-8ld %-5u %08lx=(%s)\n",
++#else
++ "<linux_mem_area>\n"
++ "\t<pointer>%8p</pointer>\n"
++ "\t<type>%s</type>\n"
++ "\t<cpu_virtual>%8p</cpu_virtual>\n"
++ "\t<cpu_physical>%08lx</cpu_physical>\n"
++ "\t<bytes>%ld</bytes>\n"
++ "\t<pid>%u</pid>\n"
++ "\t<flags>%08lx</flags>\n"
++ "\t<flags_string>%s</flags_string>\n"
++ "</linux_mem_area>\n",
++#endif
++ psRecord->psLinuxMemArea,
++ LinuxMemAreaTypeToString(psRecord->psLinuxMemArea->eAreaType),
++ LinuxMemAreaToCpuVAddr(psRecord->psLinuxMemArea),
++ LinuxMemAreaToCpuPAddr(psRecord->psLinuxMemArea,0).uiAddr,
++ psRecord->psLinuxMemArea->ui32ByteSize,
++ psRecord->pid,
++ psRecord->ui32Flags,
++ HAPFlagsToString(psRecord->ui32Flags)
++ );
++
++unlock_and_return:
++ LinuxUnLockMutex(&g_sDebugMutex);
++ return Ret;
++}
++#endif
++
++#endif
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++
++IMG_VOID* DecOffMemAllocRec_AnyVaCb(DEBUG_MEM_ALLOC_REC *psNode, va_list va)
++{
++ off_t *pOff = va_arg(va, off_t*);
++ if (--(*pOff))
++ {
++ return IMG_NULL;
++ }
++ else
++ {
++ return psNode;
++ }
++}
++
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++
++static void* ProcSeqNextMemoryRecords(struct seq_file *sfile,void* el,loff_t off)
++{
++ DEBUG_MEM_ALLOC_REC *psRecord;
++ psRecord = (DEBUG_MEM_ALLOC_REC*)
++ List_DEBUG_MEM_ALLOC_REC_Any_va(g_MemoryRecords,
++ DecOffMemAllocRec_AnyVaCb,
++ &off);
++#if defined(DEBUG_LINUX_XML_PROC_FILES)
++ if(!psRecord)
++ {
++ seq_printf( sfile, "</meminfo>\n");
++ }
++#endif
++
++ return (void*)psRecord;
++}
++
++static void* ProcSeqOff2ElementMemoryRecords(struct seq_file *sfile, loff_t off)
++{
++ DEBUG_MEM_ALLOC_REC *psRecord;
++ if(!off)
++ {
++ return PVR_PROC_SEQ_START_TOKEN;
++ }
++
++ psRecord = (DEBUG_MEM_ALLOC_REC*)
++ List_DEBUG_MEM_ALLOC_REC_Any_va(g_MemoryRecords,
++ DecOffMemAllocRec_AnyVaCb,
++ &off);
++
++#if defined(DEBUG_LINUX_XML_PROC_FILES)
++ if(!psRecord)
++ {
++ seq_printf( sfile, "</meminfo>\n");
++ }
++#endif
++
++ return (void*)psRecord;
++}
++
++static void ProcSeqShowMemoryRecords(struct seq_file *sfile,void* el)
++{
++ DEBUG_MEM_ALLOC_REC *psRecord = (DEBUG_MEM_ALLOC_REC*)el;
++ if(el == PVR_PROC_SEQ_START_TOKEN)
++ {
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++
++ seq_printf( sfile, "%-60s: %ld bytes\n",
++ "Current Water Mark of bytes allocated via kmalloc",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMALLOC]);
++ seq_printf( sfile, "%-60s: %ld bytes\n",
++ "Highest Water Mark of bytes allocated via kmalloc",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMALLOC]);
++ seq_printf( sfile, "%-60s: %ld bytes\n",
++ "Current Water Mark of bytes allocated via vmalloc",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]);
++ seq_printf( sfile, "%-60s: %ld bytes\n",
++ "Highest Water Mark of bytes allocated via vmalloc",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]);
++ seq_printf( sfile, "%-60s: %ld bytes\n",
++ "Current Water Mark of bytes allocated via alloc_pages",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]);
++ seq_printf( sfile, "%-60s: %ld bytes\n",
++ "Highest Water Mark of bytes allocated via alloc_pages",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]);
++ seq_printf( sfile, "%-60s: %ld bytes\n",
++ "Current Water Mark of bytes allocated via ioremap",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]);
++ seq_printf( sfile, "%-60s: %ld bytes\n",
++ "Highest Water Mark of bytes allocated via ioremap",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]);
++ seq_printf( sfile, "%-60s: %ld bytes\n",
++ "Current Water Mark of bytes reserved for \"IO\" memory areas",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]);
++ seq_printf( sfile, "%-60s: %ld bytes\n",
++ "Highest Water Mark of bytes allocated for \"IO\" memory areas",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]);
++ seq_printf( sfile, "%-60s: %ld bytes\n",
++ "Current Water Mark of bytes allocated via kmem_cache_alloc",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]);
++ seq_printf( sfile, "%-60s: %ld bytes\n",
++ "Highest Water Mark of bytes allocated via kmem_cache_alloc",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]);
++ seq_printf( sfile, "\n");
++
++ seq_printf( sfile, "%-60s: %ld bytes\n",
++ "The Current Water Mark for memory allocated from system RAM",
++ g_SysRAMWaterMark);
++ seq_printf( sfile, "%-60s: %ld bytes\n",
++ "The Highest Water Mark for memory allocated from system RAM",
++ g_SysRAMHighWaterMark);
++ seq_printf( sfile, "%-60s: %ld bytes\n",
++ "The Current Water Mark for memory allocated from IO memory",
++ g_IOMemWaterMark);
++ seq_printf( sfile, "%-60s: %ld bytes\n",
++ "The Highest Water Mark for memory allocated from IO memory",
++ g_IOMemHighWaterMark);
++
++ seq_printf( sfile, "\n");
++
++ seq_printf( sfile, "Details for all known allocations:\n"
++ "%-16s %-8s %-8s %-10s %-5s %-10s %s\n",
++ "Type",
++ "CpuVAddr",
++ "CpuPAddr",
++ "Bytes",
++ "PID",
++ "PrivateData",
++ "Filename:Line");
++
++#else
++
++
++ seq_printf( sfile, "<meminfo>\n<meminfo_header>\n");
++ seq_printf( sfile,
++ "<watermark key=\"mr0\" description=\"kmalloc_current\" bytes=\"%ld\"/>\n",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMALLOC]);
++ seq_printf( sfile,
++ "<watermark key=\"mr1\" description=\"kmalloc_high\" bytes=\"%ld\"/>\n",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMALLOC]);
++ seq_printf( sfile,
++ "<watermark key=\"mr2\" description=\"vmalloc_current\" bytes=\"%ld\"/>\n",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]);
++ seq_printf( sfile,
++ "<watermark key=\"mr3\" description=\"vmalloc_high\" bytes=\"%ld\"/>\n",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]);
++ seq_printf( sfile,
++ "<watermark key=\"mr4\" description=\"alloc_pages_current\" bytes=\"%ld\"/>\n",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]);
++ seq_printf( sfile,
++ "<watermark key=\"mr5\" description=\"alloc_pages_high\" bytes=\"%ld\"/>\n",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]);
++ seq_printf( sfile,
++ "<watermark key=\"mr6\" description=\"ioremap_current\" bytes=\"%ld\"/>\n",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]);
++ seq_printf( sfile,
++ "<watermark key=\"mr7\" description=\"ioremap_high\" bytes=\"%ld\"/>\n",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]);
++ seq_printf( sfile,
++ "<watermark key=\"mr8\" description=\"io_current\" bytes=\"%ld\"/>\n",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]);
++ seq_printf( sfile,
++ "<watermark key=\"mr9\" description=\"io_high\" bytes=\"%ld\"/>\n",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]);
++ seq_printf( sfile,
++ "<watermark key=\"mr10\" description=\"kmem_cache_current\" bytes=\"%ld\"/>\n",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]);
++ seq_printf( sfile,
++ "<watermark key=\"mr11\" description=\"kmem_cache_high\" bytes=\"%ld\"/>\n",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]);
++ seq_printf( sfile,"\n" );
++
++ seq_printf( sfile,
++ "<watermark key=\"mr14\" description=\"system_ram_current\" bytes=\"%ld\"/>\n",
++ g_SysRAMWaterMark);
++ seq_printf( sfile,
++ "<watermark key=\"mr15\" description=\"system_ram_high\" bytes=\"%ld\"/>\n",
++ g_SysRAMHighWaterMark);
++ seq_printf( sfile,
++ "<watermark key=\"mr16\" description=\"system_io_current\" bytes=\"%ld\"/>\n",
++ g_IOMemWaterMark);
++ seq_printf( sfile,
++ "<watermark key=\"mr17\" description=\"system_io_high\" bytes=\"%ld\"/>\n",
++ g_IOMemHighWaterMark);
++
++ seq_printf( sfile, "</meminfo_header>\n");
++
++#endif
++ return;
++ }
++
++ if(psRecord->eAllocType != DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE)
++ {
++ seq_printf( sfile,
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++ "%-16s %-8p %08lx %-10ld %-5d %-10s %s:%ld\n",
++#else
++ "<allocation>\n"
++ "\t<type>%s</type>\n"
++ "\t<cpu_virtual>%-8p</cpu_virtual>\n"
++ "\t<cpu_physical>%08lx</cpu_physical>\n"
++ "\t<bytes>%ld</bytes>\n"
++ "\t<pid>%d</pid>\n"
++ "\t<private>%s</private>\n"
++ "\t<filename>%s</filename>\n"
++ "\t<line>%ld</line>\n"
++ "</allocation>\n",
++#endif
++ DebugMemAllocRecordTypeToString(psRecord->eAllocType),
++ psRecord->pvCpuVAddr,
++ psRecord->ulCpuPAddr,
++ psRecord->ui32Bytes,
++ psRecord->pid,
++ "NULL",
++ psRecord->pszFileName,
++ psRecord->ui32Line);
++ }
++ else
++ {
++ seq_printf( sfile,
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++ "%-16s %-8p %08lx %-10ld %-5d %-10s %s:%ld\n",
++#else
++ "<allocation>\n"
++ "\t<type>%s</type>\n"
++ "\t<cpu_virtual>%-8p</cpu_virtual>\n"
++ "\t<cpu_physical>%08lx</cpu_physical>\n"
++ "\t<bytes>%ld</bytes>\n"
++ "\t<pid>%d</pid>\n"
++ "\t<private>%s</private>\n"
++ "\t<filename>%s</filename>\n"
++ "\t<line>%ld</line>\n"
++ "</allocation>\n",
++#endif
++ DebugMemAllocRecordTypeToString(psRecord->eAllocType),
++ psRecord->pvCpuVAddr,
++ psRecord->ulCpuPAddr,
++ psRecord->ui32Bytes,
++ psRecord->pid,
++ KMemCacheNameWrapper(psRecord->pvPrivateData),
++ psRecord->pszFileName,
++ psRecord->ui32Line);
++ }
++}
++
++
++
++#else
++
++static off_t
++printMemoryRecords(IMG_CHAR * buffer, size_t count, off_t off)
++{
++ DEBUG_MEM_ALLOC_REC *psRecord;
++ off_t Ret;
++
++ LinuxLockMutex(&g_sDebugMutex);
++
++ if(!off)
++ {
++ if(count < 1000)
++ {
++ Ret = 0;
++ goto unlock_and_return;
++ }
++
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++
++ Ret = printAppend(buffer, count, 0, "%-60s: %ld bytes\n",
++ "Current Water Mark of bytes allocated via kmalloc",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMALLOC]);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "Highest Water Mark of bytes allocated via kmalloc",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMALLOC]);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "Current Water Mark of bytes allocated via vmalloc",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "Highest Water Mark of bytes allocated via vmalloc",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "Current Water Mark of bytes allocated via alloc_pages",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "Highest Water Mark of bytes allocated via alloc_pages",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "Current Water Mark of bytes allocated via ioremap",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "Highest Water Mark of bytes allocated via ioremap",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "Current Water Mark of bytes reserved for \"IO\" memory areas",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "Highest Water Mark of bytes allocated for \"IO\" memory areas",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "Current Water Mark of bytes allocated via kmem_cache_alloc",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "Highest Water Mark of bytes allocated via kmem_cache_alloc",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]);
++ Ret = printAppend(buffer, count, Ret, "\n");
++
++ Ret = printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "The Current Water Mark for memory allocated from system RAM",
++ g_SysRAMWaterMark);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "The Highest Water Mark for memory allocated from system RAM",
++ g_SysRAMHighWaterMark);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "The Current Water Mark for memory allocated from IO memory",
++ g_IOMemWaterMark);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "The Highest Water Mark for memory allocated from IO memory",
++ g_IOMemHighWaterMark);
++
++ Ret = printAppend(buffer, count, Ret, "\n");
++
++ Ret = printAppend(buffer, count, Ret, "Details for all known allocations:\n"
++ "%-16s %-8s %-8s %-10s %-5s %-10s %s\n",
++ "Type",
++ "CpuVAddr",
++ "CpuPAddr",
++ "Bytes",
++ "PID",
++ "PrivateData",
++ "Filename:Line");
++
++#else
++
++
++ Ret = printAppend(buffer, count, 0, "<meminfo>\n<meminfo_header>\n");
++ Ret = printAppend(buffer, count, Ret,
++ "<watermark key=\"mr0\" description=\"kmalloc_current\" bytes=\"%ld\"/>\n",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMALLOC]);
++ Ret = printAppend(buffer, count, Ret,
++ "<watermark key=\"mr1\" description=\"kmalloc_high\" bytes=\"%ld\"/>\n",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMALLOC]);
++ Ret = printAppend(buffer, count, Ret,
++ "<watermark key=\"mr2\" description=\"vmalloc_current\" bytes=\"%ld\"/>\n",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]);
++ Ret = printAppend(buffer, count, Ret,
++ "<watermark key=\"mr3\" description=\"vmalloc_high\" bytes=\"%ld\"/>\n",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]);
++ Ret = printAppend(buffer, count, Ret,
++ "<watermark key=\"mr4\" description=\"alloc_pages_current\" bytes=\"%ld\"/>\n",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]);
++ Ret = printAppend(buffer, count, Ret,
++ "<watermark key=\"mr5\" description=\"alloc_pages_high\" bytes=\"%ld\"/>\n",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]);
++ Ret = printAppend(buffer, count, Ret,
++ "<watermark key=\"mr6\" description=\"ioremap_current\" bytes=\"%ld\"/>\n",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]);
++ Ret = printAppend(buffer, count, Ret,
++ "<watermark key=\"mr7\" description=\"ioremap_high\" bytes=\"%ld\"/>\n",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]);
++ Ret = printAppend(buffer, count, Ret,
++ "<watermark key=\"mr8\" description=\"io_current\" bytes=\"%ld\"/>\n",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]);
++ Ret = printAppend(buffer, count, Ret,
++ "<watermark key=\"mr9\" description=\"io_high\" bytes=\"%ld\"/>\n",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]);
++ Ret = printAppend(buffer, count, Ret,
++ "<watermark key=\"mr10\" description=\"kmem_cache_current\" bytes=\"%ld\"/>\n",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]);
++ Ret = printAppend(buffer, count, Ret,
++ "<watermark key=\"mr11\" description=\"kmem_cache_high\" bytes=\"%ld\"/>\n",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]);
++ Ret = printAppend(buffer, count, Ret, "\n");
++
++ Ret = printAppend(buffer, count, Ret,
++ "<watermark key=\"mr14\" description=\"system_ram_current\" bytes=\"%ld\"/>\n",
++ g_SysRAMWaterMark);
++ Ret = printAppend(buffer, count, Ret,
++ "<watermark key=\"mr15\" description=\"system_ram_high\" bytes=\"%ld\"/>\n",
++ g_SysRAMHighWaterMark);
++ Ret = printAppend(buffer, count, Ret,
++ "<watermark key=\"mr16\" description=\"system_io_current\" bytes=\"%ld\"/>\n",
++ g_IOMemWaterMark);
++ Ret = printAppend(buffer, count, Ret,
++ "<watermark key=\"mr17\" description=\"system_io_high\" bytes=\"%ld\"/>\n",
++ g_IOMemHighWaterMark);
++
++ Ret = printAppend(buffer, count, Ret, "</meminfo_header>\n");
++
++#endif
++
++ goto unlock_and_return;
++ }
++
++ if(count < 1000)
++ {
++ Ret = 0;
++ goto unlock_and_return;
++ }
++
++ psRecord = (DEBUG_MEM_ALLOC_REC*)
++ List_DEBUG_MEM_ALLOC_REC_Any_va(g_MemoryRecords,
++ DecOffMemAllocRec_AnyVaCb,
++ &off);
++ if(!psRecord)
++ {
++#if defined(DEBUG_LINUX_XML_PROC_FILES)
++ if(off == 0)
++ {
++ Ret = printAppend(buffer, count, 0, "</meminfo>\n");
++ goto unlock_and_return;
++ }
++#endif
++ Ret = END_OF_FILE;
++ goto unlock_and_return;
++ }
++
++ if(psRecord->eAllocType != DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE)
++ {
++ Ret = printAppend(buffer, count, 0,
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++ "%-16s %-8p %08lx %-10ld %-5d %-10s %s:%ld\n",
++#else
++ "<allocation>\n"
++ "\t<type>%s</type>\n"
++ "\t<cpu_virtual>%-8p</cpu_virtual>\n"
++ "\t<cpu_physical>%08lx</cpu_physical>\n"
++ "\t<bytes>%ld</bytes>\n"
++ "\t<pid>%d</pid>\n"
++ "\t<private>%s</private>\n"
++ "\t<filename>%s</filename>\n"
++ "\t<line>%ld</line>\n"
++ "</allocation>\n",
++#endif
++ DebugMemAllocRecordTypeToString(psRecord->eAllocType),
++ psRecord->pvCpuVAddr,
++ psRecord->ulCpuPAddr,
++ psRecord->ui32Bytes,
++ psRecord->pid,
++ "NULL",
++ psRecord->pszFileName,
++ psRecord->ui32Line);
++ }
++ else
++ {
++ Ret = printAppend(buffer, count, 0,
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++ "%-16s %-8p %08lx %-10ld %-5d %-10s %s:%ld\n",
++#else
++ "<allocation>\n"
++ "\t<type>%s</type>\n"
++ "\t<cpu_virtual>%-8p</cpu_virtual>\n"
++ "\t<cpu_physical>%08lx</cpu_physical>\n"
++ "\t<bytes>%ld</bytes>\n"
++ "\t<pid>%d</pid>\n"
++ "\t<private>%s</private>\n"
++ "\t<filename>%s</filename>\n"
++ "\t<line>%ld</line>\n"
++ "</allocation>\n",
++#endif
++ DebugMemAllocRecordTypeToString(psRecord->eAllocType),
++ psRecord->pvCpuVAddr,
++ psRecord->ulCpuPAddr,
++ psRecord->ui32Bytes,
++ psRecord->pid,
++ KMemCacheNameWrapper(psRecord->pvPrivateData),
++ psRecord->pszFileName,
++ psRecord->ui32Line);
++ }
++
++unlock_and_return:
++ LinuxUnLockMutex(&g_sDebugMutex);
++ return Ret;
++}
++#endif
++#endif
++
++
++#if defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MMAP_AREAS)
++const IMG_CHAR *
++HAPFlagsToString(IMG_UINT32 ui32Flags)
++{
++ static IMG_CHAR szFlags[50];
++ IMG_INT32 i32Pos = 0;
++ IMG_UINT32 ui32CacheTypeIndex, ui32MapTypeIndex;
++ IMG_CHAR *apszCacheTypes[] = {
++ "UNCACHED",
++ "CACHED",
++ "WRITECOMBINE",
++ "UNKNOWN"
++ };
++ IMG_CHAR *apszMapType[] = {
++ "KERNEL_ONLY",
++ "SINGLE_PROCESS",
++ "MULTI_PROCESS",
++ "FROM_EXISTING_PROCESS",
++ "NO_CPU_VIRTUAL",
++ "UNKNOWN"
++ };
++
++
++ if(ui32Flags & PVRSRV_HAP_UNCACHED){
++ ui32CacheTypeIndex=0;
++ }else if(ui32Flags & PVRSRV_HAP_CACHED){
++ ui32CacheTypeIndex=1;
++ }else if(ui32Flags & PVRSRV_HAP_WRITECOMBINE){
++ ui32CacheTypeIndex=2;
++ }else{
++ ui32CacheTypeIndex=3;
++ PVR_DPF((PVR_DBG_ERROR, "%s: unknown cache type (%u)",
++ __FUNCTION__, (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK)));
++ }
++
++
++ if(ui32Flags & PVRSRV_HAP_KERNEL_ONLY){
++ ui32MapTypeIndex = 0;
++ }else if(ui32Flags & PVRSRV_HAP_SINGLE_PROCESS){
++ ui32MapTypeIndex = 1;
++ }else if(ui32Flags & PVRSRV_HAP_MULTI_PROCESS){
++ ui32MapTypeIndex = 2;
++ }else if(ui32Flags & PVRSRV_HAP_FROM_EXISTING_PROCESS){
++ ui32MapTypeIndex = 3;
++ }else if(ui32Flags & PVRSRV_HAP_NO_CPU_VIRTUAL){
++ ui32MapTypeIndex = 4;
++ }else{
++ ui32MapTypeIndex = 5;
++ PVR_DPF((PVR_DBG_ERROR, "%s: unknown map type (%u)",
++ __FUNCTION__, (ui32Flags & PVRSRV_HAP_MAPTYPE_MASK)));
++ }
++
++ i32Pos = sprintf(szFlags, "%s|", apszCacheTypes[ui32CacheTypeIndex]);
++ if (i32Pos <= 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: sprintf for cache type %u failed (%d)",
++ __FUNCTION__, ui32CacheTypeIndex, i32Pos));
++ szFlags[0] = 0;
++ }
++ else
++ {
++ sprintf(szFlags + i32Pos, "%s", apszMapType[ui32MapTypeIndex]);
++ }
++
++ return szFlags;
++}
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/env/linux/mm.h
+@@ -0,0 +1,331 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __IMG_LINUX_MM_H__
++#define __IMG_LINUX_MM_H__
++
++#ifndef AUTOCONF_INCLUDED
++ #include <linux/config.h>
++#endif
++
++#include <linux/version.h>
++#include <linux/slab.h>
++#include <linux/mm.h>
++#include <linux/list.h>
++
++#include <asm/io.h>
++
++#define PHYS_TO_PFN(phys) ((phys) >> PAGE_SHIFT)
++#define PFN_TO_PHYS(pfn) ((pfn) << PAGE_SHIFT)
++
++#define RANGE_TO_PAGES(range) (((range) + (PAGE_SIZE - 1)) >> PAGE_SHIFT)
++
++#define ADDR_TO_PAGE_OFFSET(addr) (((unsigned long)(addr)) & (PAGE_SIZE - 1))
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10))
++#define REMAP_PFN_RANGE(vma, addr, pfn, size, prot) remap_pfn_range(vma, addr, pfn, size, prot)
++#else
++#define REMAP_PFN_RANGE(vma, addr, pfn, size, prot) remap_page_range(vma, addr, PFN_TO_PHYS(pfn), size, prot)
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12))
++#define IO_REMAP_PFN_RANGE(vma, addr, pfn, size, prot) io_remap_pfn_range(vma, addr, pfn, size, prot)
++#else
++#define IO_REMAP_PFN_RANGE(vma, addr, pfn, size, prot) io_remap_page_range(vma, addr, PFN_TO_PHYS(pfn), size, prot)
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
++#define VM_INSERT_PAGE(vma, addr, page) vm_insert_page(vma, addr, page)
++#else
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10))
++#define VM_INSERT_PAGE(vma, addr, page) remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE, vma->vm_page_prot);
++#else
++#define VM_INSERT_PAGE(vma, addr, page) remap_page_range(vma, addr, page_to_phys(page), PAGE_SIZE, vma->vm_page_prot);
++#endif
++#endif
++
++static inline IMG_UINT32 VMallocToPhys(IMG_VOID *pCpuVAddr)
++{
++ return (page_to_phys(vmalloc_to_page(pCpuVAddr)) + ADDR_TO_PAGE_OFFSET(pCpuVAddr));
++
++}
++
++typedef enum {
++ LINUX_MEM_AREA_IOREMAP,
++ LINUX_MEM_AREA_EXTERNAL_KV,
++ LINUX_MEM_AREA_IO,
++ LINUX_MEM_AREA_VMALLOC,
++ LINUX_MEM_AREA_ALLOC_PAGES,
++ LINUX_MEM_AREA_SUB_ALLOC,
++ LINUX_MEM_AREA_TYPE_COUNT
++}LINUX_MEM_AREA_TYPE;
++
++typedef struct _LinuxMemArea LinuxMemArea;
++
++
++struct _LinuxMemArea {
++ LINUX_MEM_AREA_TYPE eAreaType;
++ union _uData
++ {
++ struct _sIORemap
++ {
++
++ IMG_CPU_PHYADDR CPUPhysAddr;
++ IMG_VOID *pvIORemapCookie;
++ }sIORemap;
++ struct _sExternalKV
++ {
++
++ IMG_BOOL bPhysContig;
++ union {
++
++ IMG_SYS_PHYADDR SysPhysAddr;
++ IMG_SYS_PHYADDR *pSysPhysAddr;
++ } uPhysAddr;
++ IMG_VOID *pvExternalKV;
++ }sExternalKV;
++ struct _sIO
++ {
++
++ IMG_CPU_PHYADDR CPUPhysAddr;
++ }sIO;
++ struct _sVmalloc
++ {
++
++ IMG_VOID *pvVmallocAddress;
++ }sVmalloc;
++ struct _sPageList
++ {
++
++ struct page **pvPageList;
++ IMG_HANDLE hBlockPageList;
++ }sPageList;
++ struct _sSubAlloc
++ {
++
++ LinuxMemArea *psParentLinuxMemArea;
++ IMG_UINT32 ui32ByteOffset;
++ }sSubAlloc;
++ }uData;
++
++ IMG_UINT32 ui32ByteSize;
++
++ IMG_UINT32 ui32AreaFlags;
++
++ IMG_BOOL bMMapRegistered;
++
++
++ struct list_head sMMapItem;
++
++
++ struct list_head sMMapOffsetStructList;
++};
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17))
++typedef kmem_cache_t LinuxKMemCache;
++#else
++typedef struct kmem_cache LinuxKMemCache;
++#endif
++
++
++PVRSRV_ERROR LinuxMMInit(IMG_VOID);
++
++
++IMG_VOID LinuxMMCleanup(IMG_VOID);
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++#define KMallocWrapper(ui32ByteSize) _KMallocWrapper(ui32ByteSize, __FILE__, __LINE__)
++#else
++#define KMallocWrapper(ui32ByteSize) _KMallocWrapper(ui32ByteSize, NULL, 0)
++#endif
++IMG_VOID *_KMallocWrapper(IMG_UINT32 ui32ByteSize, IMG_CHAR *szFileName, IMG_UINT32 ui32Line);
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++#define KFreeWrapper(pvCpuVAddr) _KFreeWrapper(pvCpuVAddr, __FILE__, __LINE__)
++#else
++#define KFreeWrapper(pvCpuVAddr) _KFreeWrapper(pvCpuVAddr, NULL, 0)
++#endif
++IMG_VOID _KFreeWrapper(IMG_VOID *pvCpuVAddr, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line);
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++#define VMallocWrapper(ui32Bytes, ui32AllocFlags) _VMallocWrapper(ui32Bytes, ui32AllocFlags, __FILE__, __LINE__)
++#else
++#define VMallocWrapper(ui32Bytes, ui32AllocFlags) _VMallocWrapper(ui32Bytes, ui32AllocFlags, NULL, 0)
++#endif
++IMG_VOID *_VMallocWrapper(IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AllocFlags, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line);
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++#define VFreeWrapper(pvCpuVAddr) _VFreeWrapper(pvCpuVAddr, __FILE__, __LINE__)
++#else
++#define VFreeWrapper(pvCpuVAddr) _VFreeWrapper(pvCpuVAddr, NULL, 0)
++#endif
++IMG_VOID _VFreeWrapper(IMG_VOID *pvCpuVAddr, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line);
++
++
++LinuxMemArea *NewVMallocLinuxMemArea(IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AreaFlags);
++
++
++IMG_VOID FreeVMallocLinuxMemArea(LinuxMemArea *psLinuxMemArea);
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++#define IORemapWrapper(BasePAddr, ui32Bytes, ui32MappingFlags) \
++ _IORemapWrapper(BasePAddr, ui32Bytes, ui32MappingFlags, __FILE__, __LINE__)
++#else
++#define IORemapWrapper(BasePAddr, ui32Bytes, ui32MappingFlags) \
++ _IORemapWrapper(BasePAddr, ui32Bytes, ui32MappingFlags, NULL, 0)
++#endif
++IMG_VOID *_IORemapWrapper(IMG_CPU_PHYADDR BasePAddr,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32MappingFlags,
++ IMG_CHAR *pszFileName,
++ IMG_UINT32 ui32Line);
++
++
++LinuxMemArea *NewIORemapLinuxMemArea(IMG_CPU_PHYADDR BasePAddr, IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AreaFlags);
++
++
++IMG_VOID FreeIORemapLinuxMemArea(LinuxMemArea *psLinuxMemArea);
++
++LinuxMemArea *NewExternalKVLinuxMemArea(IMG_SYS_PHYADDR *pBasePAddr, IMG_VOID *pvCPUVAddr, IMG_UINT32 ui32Bytes, IMG_BOOL bPhysContig, IMG_UINT32 ui32AreaFlags);
++
++
++IMG_VOID FreeExternalKVLinuxMemArea(LinuxMemArea *psLinuxMemArea);
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++#define IOUnmapWrapper(pvIORemapCookie) \
++ _IOUnmapWrapper(pvIORemapCookie, __FILE__, __LINE__)
++#else
++#define IOUnmapWrapper(pvIORemapCookie) \
++ _IOUnmapWrapper(pvIORemapCookie, NULL, 0)
++#endif
++IMG_VOID _IOUnmapWrapper(IMG_VOID *pvIORemapCookie, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line);
++
++
++struct page *LinuxMemAreaOffsetToPage(LinuxMemArea *psLinuxMemArea, IMG_UINT32 ui32ByteOffset);
++
++
++LinuxKMemCache *KMemCacheCreateWrapper(IMG_CHAR *pszName, size_t Size, size_t Align, IMG_UINT32 ui32Flags);
++
++
++IMG_VOID KMemCacheDestroyWrapper(LinuxKMemCache *psCache);
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++#define KMemCacheAllocWrapper(psCache, Flags) _KMemCacheAllocWrapper(psCache, Flags, __FILE__, __LINE__)
++#else
++#define KMemCacheAllocWrapper(psCache, Flags) _KMemCacheAllocWrapper(psCache, Flags, NULL, 0)
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14))
++IMG_VOID *_KMemCacheAllocWrapper(LinuxKMemCache *psCache, gfp_t Flags, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line);
++#else
++IMG_VOID *_KMemCacheAllocWrapper(LinuxKMemCache *psCache, int Flags, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line);
++#endif
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++#define KMemCacheFreeWrapper(psCache, pvObject) _KMemCacheFreeWrapper(psCache, pvObject, __FILE__, __LINE__)
++#else
++#define KMemCacheFreeWrapper(psCache, pvObject) _KMemCacheFreeWrapper(psCache, pvObject, NULL, 0)
++#endif
++IMG_VOID _KMemCacheFreeWrapper(LinuxKMemCache *psCache, IMG_VOID *pvObject, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line);
++
++
++const IMG_CHAR *KMemCacheNameWrapper(LinuxKMemCache *psCache);
++
++
++LinuxMemArea *NewIOLinuxMemArea(IMG_CPU_PHYADDR BasePAddr, IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AreaFlags);
++
++
++IMG_VOID FreeIOLinuxMemArea(LinuxMemArea *psLinuxMemArea);
++
++
++LinuxMemArea *NewAllocPagesLinuxMemArea(IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AreaFlags);
++
++
++IMG_VOID FreeAllocPagesLinuxMemArea(LinuxMemArea *psLinuxMemArea);
++
++
++LinuxMemArea *NewSubLinuxMemArea(LinuxMemArea *psParentLinuxMemArea,
++ IMG_UINT32 ui32ByteOffset,
++ IMG_UINT32 ui32Bytes);
++
++
++IMG_VOID LinuxMemAreaDeepFree(LinuxMemArea *psLinuxMemArea);
++
++
++#if defined(LINUX_MEM_AREAS_DEBUG)
++IMG_VOID LinuxMemAreaRegister(LinuxMemArea *psLinuxMemArea);
++#else
++#define LinuxMemAreaRegister(X)
++#endif
++
++
++IMG_VOID *LinuxMemAreaToCpuVAddr(LinuxMemArea *psLinuxMemArea);
++
++
++IMG_CPU_PHYADDR LinuxMemAreaToCpuPAddr(LinuxMemArea *psLinuxMemArea, IMG_UINT32 ui32ByteOffset);
++
++
++#define LinuxMemAreaToCpuPFN(psLinuxMemArea, ui32ByteOffset) PHYS_TO_PFN(LinuxMemAreaToCpuPAddr(psLinuxMemArea, ui32ByteOffset).uiAddr)
++
++IMG_BOOL LinuxMemAreaPhysIsContig(LinuxMemArea *psLinuxMemArea);
++
++static inline LinuxMemArea *
++LinuxMemAreaRoot(LinuxMemArea *psLinuxMemArea)
++{
++ if(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_SUB_ALLOC)
++ {
++ return psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea;
++ }
++ else
++ {
++ return psLinuxMemArea;
++ }
++}
++
++
++static inline LINUX_MEM_AREA_TYPE
++LinuxMemAreaRootType(LinuxMemArea *psLinuxMemArea)
++{
++ return LinuxMemAreaRoot(psLinuxMemArea)->eAreaType;
++}
++
++
++const IMG_CHAR *LinuxMemAreaTypeToString(LINUX_MEM_AREA_TYPE eMemAreaType);
++
++
++#if defined(DEBUG) || defined(DEBUG_LINUX_MEM_AREAS)
++const IMG_CHAR *HAPFlagsToString(IMG_UINT32 ui32Flags);
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/env/linux/mmap.c
+@@ -0,0 +1,1148 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef AUTOCONF_INCLUDED
++ #include <linux/config.h>
++#endif
++
++#include <linux/version.h>
++#include <linux/mm.h>
++#include <linux/module.h>
++#include <linux/vmalloc.h>
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0))
++#include <linux/wrapper.h>
++#endif
++#include <linux/slab.h>
++#include <asm/io.h>
++#include <asm/page.h>
++#include <asm/shmparam.h>
++#include <asm/pgtable.h>
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22))
++#include <linux/sched.h>
++#include <asm/current.h>
++#endif
++#if defined(SUPPORT_DRI_DRM)
++#include <drm/drmP.h>
++#endif
++
++#include "img_defs.h"
++#include "services.h"
++#include "servicesint.h"
++#include "pvrmmap.h"
++#include "mutils.h"
++#include "mmap.h"
++#include "mm.h"
++#include "pvr_debug.h"
++#include "osfunc.h"
++#include "proc.h"
++#include "mutex.h"
++#include "handle.h"
++#include "perproc.h"
++#include "env_perproc.h"
++#include "bridged_support.h"
++#if defined(SUPPORT_DRI_DRM)
++#include "pvr_drm.h"
++#endif
++
++#if !defined(PVR_SECURE_HANDLES)
++#error "The mmap code requires PVR_SECURE_HANDLES"
++#endif
++
++static PVRSRV_LINUX_MUTEX g_sMMapMutex;
++
++static LinuxKMemCache *g_psMemmapCache = NULL;
++static LIST_HEAD(g_sMMapAreaList);
++static LIST_HEAD(g_sMMapOffsetStructList);
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++static IMG_UINT32 g_ui32RegisteredAreas = 0;
++static IMG_UINT32 g_ui32TotalByteSize = 0;
++#endif
++
++
++#if defined(PVR_PROC_USE_SEQ_FILE) && defined(DEBUG_LINUX_MMAP_AREAS)
++static struct proc_dir_entry *g_ProcMMap;
++#endif
++
++#define FIRST_PHYSICAL_PFN 0
++#define LAST_PHYSICAL_PFN 0x7fffffffUL
++#define FIRST_SPECIAL_PFN (LAST_PHYSICAL_PFN + 1)
++#define LAST_SPECIAL_PFN 0xffffffffUL
++
++#define MAX_MMAP_HANDLE 0x7fffffffUL
++
++static inline IMG_BOOL
++PFNIsPhysical(IMG_UINT32 pfn)
++{
++
++ return ((pfn >= FIRST_PHYSICAL_PFN) && (pfn <= LAST_PHYSICAL_PFN)) ? IMG_TRUE : IMG_FALSE;
++}
++
++static inline IMG_BOOL
++PFNIsSpecial(IMG_UINT32 pfn)
++{
++
++ return ((pfn >= FIRST_SPECIAL_PFN) && (pfn <= LAST_SPECIAL_PFN)) ? IMG_TRUE : IMG_FALSE;
++}
++
++static inline IMG_HANDLE
++MMapOffsetToHandle(IMG_UINT32 pfn)
++{
++ if (PFNIsPhysical(pfn))
++ {
++ PVR_ASSERT(PFNIsPhysical(pfn));
++ return IMG_NULL;
++ }
++
++ return (IMG_HANDLE)(pfn - FIRST_SPECIAL_PFN);
++}
++
++static inline IMG_UINT32
++HandleToMMapOffset(IMG_HANDLE hHandle)
++{
++ IMG_UINT32 ulHandle = (IMG_UINT32)hHandle;
++
++ if (PFNIsSpecial(ulHandle))
++ {
++ PVR_ASSERT(PFNIsSpecial(ulHandle));
++ return 0;
++ }
++
++ return ulHandle + FIRST_SPECIAL_PFN;
++}
++
++static inline IMG_BOOL
++LinuxMemAreaUsesPhysicalMap(LinuxMemArea *psLinuxMemArea)
++{
++ return LinuxMemAreaPhysIsContig(psLinuxMemArea);
++}
++
++static inline IMG_UINT32
++GetCurrentThreadID(IMG_VOID)
++{
++
++ return (IMG_UINT32)current->pid;
++}
++
++static PKV_OFFSET_STRUCT
++CreateOffsetStruct(LinuxMemArea *psLinuxMemArea, IMG_UINT32 ui32Offset, IMG_UINT32 ui32RealByteSize)
++{
++ PKV_OFFSET_STRUCT psOffsetStruct;
++#if defined(DEBUG) || defined(DEBUG_LINUX_MMAP_AREAS)
++ const IMG_CHAR *pszName = LinuxMemAreaTypeToString(LinuxMemAreaRootType(psLinuxMemArea));
++#endif
++
++#if defined(DEBUG) || defined(DEBUG_LINUX_MMAP_AREAS)
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "%s(%s, psLinuxMemArea: 0x%p, ui32AllocFlags: 0x%8lx)",
++ __FUNCTION__, pszName, psLinuxMemArea, psLinuxMemArea->ui32AreaFlags));
++#endif
++
++ PVR_ASSERT(psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC || LinuxMemAreaRoot(psLinuxMemArea)->eAreaType != LINUX_MEM_AREA_SUB_ALLOC);
++
++ PVR_ASSERT(psLinuxMemArea->bMMapRegistered);
++
++ psOffsetStruct = KMemCacheAllocWrapper(g_psMemmapCache, GFP_KERNEL);
++ if(psOffsetStruct == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRMMapRegisterArea: Couldn't alloc another mapping record from cache"));
++ return IMG_NULL;
++ }
++
++ psOffsetStruct->ui32MMapOffset = ui32Offset;
++
++ psOffsetStruct->psLinuxMemArea = psLinuxMemArea;
++
++ psOffsetStruct->ui32Mapped = 0;
++
++ psOffsetStruct->ui32RealByteSize = ui32RealByteSize;
++
++
++ psOffsetStruct->ui32TID = GetCurrentThreadID();
++
++ psOffsetStruct->ui32PID = OSGetCurrentProcessIDKM();
++
++ psOffsetStruct->bOnMMapList = IMG_FALSE;
++
++ psOffsetStruct->ui32RefCount = 0;
++
++ psOffsetStruct->ui32UserVAddr = 0;
++
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++
++ psOffsetStruct->pszName = pszName;
++#endif
++
++ list_add_tail(&psOffsetStruct->sAreaItem, &psLinuxMemArea->sMMapOffsetStructList);
++
++ return psOffsetStruct;
++}
++
++
++static IMG_VOID
++DestroyOffsetStruct(PKV_OFFSET_STRUCT psOffsetStruct)
++{
++ list_del(&psOffsetStruct->sAreaItem);
++
++ if (psOffsetStruct->bOnMMapList)
++ {
++ list_del(&psOffsetStruct->sMMapItem);
++ }
++
++ PVR_DPF((PVR_DBG_MESSAGE, "%s: Table entry: "
++ "psLinuxMemArea=0x%08lX, CpuPAddr=0x%08lX", __FUNCTION__,
++ psOffsetStruct->psLinuxMemArea,
++ LinuxMemAreaToCpuPAddr(psOffsetStruct->psLinuxMemArea, 0)));
++
++ KMemCacheFreeWrapper(g_psMemmapCache, psOffsetStruct);
++}
++
++
++static inline IMG_VOID
++DetermineUsersSizeAndByteOffset(LinuxMemArea *psLinuxMemArea,
++ IMG_UINT32 *pui32RealByteSize,
++ IMG_UINT32 *pui32ByteOffset)
++{
++ IMG_UINT32 ui32PageAlignmentOffset;
++ IMG_CPU_PHYADDR CpuPAddr;
++
++ CpuPAddr = LinuxMemAreaToCpuPAddr(psLinuxMemArea, 0);
++ ui32PageAlignmentOffset = ADDR_TO_PAGE_OFFSET(CpuPAddr.uiAddr);
++
++ *pui32ByteOffset = ui32PageAlignmentOffset;
++
++ *pui32RealByteSize = PAGE_ALIGN(psLinuxMemArea->ui32ByteSize + ui32PageAlignmentOffset);
++}
++
++
++PVRSRV_ERROR
++PVRMMapOSMemHandleToMMapData(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hMHandle,
++ IMG_UINT32 *pui32MMapOffset,
++ IMG_UINT32 *pui32ByteOffset,
++ IMG_UINT32 *pui32RealByteSize,
++ IMG_UINT32 *pui32UserVAddr)
++{
++ LinuxMemArea *psLinuxMemArea;
++ PKV_OFFSET_STRUCT psOffsetStruct;
++ IMG_HANDLE hOSMemHandle;
++ PVRSRV_ERROR eError;
++
++ LinuxLockMutex(&g_sMMapMutex);
++
++ PVR_ASSERT(PVRSRVGetMaxHandle(psPerProc->psHandleBase) <= MAX_MMAP_HANDLE);
++
++ eError = PVRSRVLookupOSMemHandle(psPerProc->psHandleBase, &hOSMemHandle, hMHandle);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: Lookup of handle 0x%lx failed", __FUNCTION__, hMHandle));
++
++ goto exit_unlock;
++ }
++
++ psLinuxMemArea = (LinuxMemArea *)hOSMemHandle;
++
++ DetermineUsersSizeAndByteOffset(psLinuxMemArea,
++ pui32RealByteSize,
++ pui32ByteOffset);
++
++
++ list_for_each_entry(psOffsetStruct, &psLinuxMemArea->sMMapOffsetStructList, sAreaItem)
++ {
++ if (psPerProc->ui32PID == psOffsetStruct->ui32PID)
++ {
++
++ PVR_ASSERT(*pui32RealByteSize == psOffsetStruct->ui32RealByteSize);
++
++ *pui32MMapOffset = psOffsetStruct->ui32MMapOffset;
++ *pui32UserVAddr = psOffsetStruct->ui32UserVAddr;
++ psOffsetStruct->ui32RefCount++;
++
++ eError = PVRSRV_OK;
++ goto exit_unlock;
++ }
++ }
++
++
++ *pui32UserVAddr = 0;
++
++ if (LinuxMemAreaUsesPhysicalMap(psLinuxMemArea))
++ {
++ *pui32MMapOffset = LinuxMemAreaToCpuPFN(psLinuxMemArea, 0);
++ PVR_ASSERT(PFNIsPhysical(*pui32MMapOffset));
++ }
++ else
++ {
++ *pui32MMapOffset = HandleToMMapOffset(hMHandle);
++ PVR_ASSERT(PFNIsSpecial(*pui32MMapOffset));
++ }
++
++ psOffsetStruct = CreateOffsetStruct(psLinuxMemArea, *pui32MMapOffset, *pui32RealByteSize);
++ if (psOffsetStruct == IMG_NULL)
++ {
++ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto exit_unlock;
++ }
++
++
++ list_add_tail(&psOffsetStruct->sMMapItem, &g_sMMapOffsetStructList);
++
++ psOffsetStruct->bOnMMapList = IMG_TRUE;
++
++ psOffsetStruct->ui32RefCount++;
++
++ eError = PVRSRV_OK;
++
++exit_unlock:
++ LinuxUnLockMutex(&g_sMMapMutex);
++
++ return eError;
++}
++
++
++PVRSRV_ERROR
++PVRMMapReleaseMMapData(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hMHandle,
++ IMG_BOOL *pbMUnmap,
++ IMG_UINT32 *pui32RealByteSize,
++ IMG_UINT32 *pui32UserVAddr)
++{
++ LinuxMemArea *psLinuxMemArea;
++ PKV_OFFSET_STRUCT psOffsetStruct;
++ IMG_HANDLE hOSMemHandle;
++ PVRSRV_ERROR eError;
++ IMG_UINT32 ui32PID = OSGetCurrentProcessIDKM();
++
++ LinuxLockMutex(&g_sMMapMutex);
++
++ PVR_ASSERT(PVRSRVGetMaxHandle(psPerProc->psHandleBase) <= MAX_MMAP_HANDLE);
++
++ eError = PVRSRVLookupOSMemHandle(psPerProc->psHandleBase, &hOSMemHandle, hMHandle);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: Lookup of handle 0x%lx failed", __FUNCTION__, hMHandle));
++
++ goto exit_unlock;
++ }
++
++ psLinuxMemArea = (LinuxMemArea *)hOSMemHandle;
++
++
++ list_for_each_entry(psOffsetStruct, &psLinuxMemArea->sMMapOffsetStructList, sAreaItem)
++ {
++ if (psOffsetStruct->ui32PID == ui32PID)
++ {
++ if (psOffsetStruct->ui32RefCount == 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: Attempt to release mmap data with zero reference count for offset struct 0x%p, memory area 0x%p", __FUNCTION__, psOffsetStruct, psLinuxMemArea));
++ eError = PVRSRV_ERROR_GENERIC;
++ goto exit_unlock;
++ }
++
++ psOffsetStruct->ui32RefCount--;
++
++ *pbMUnmap = (IMG_BOOL)((psOffsetStruct->ui32RefCount == 0) && (psOffsetStruct->ui32UserVAddr != 0));
++
++ *pui32UserVAddr = (*pbMUnmap) ? psOffsetStruct->ui32UserVAddr : 0;
++ *pui32RealByteSize = (*pbMUnmap) ? psOffsetStruct->ui32RealByteSize : 0;
++
++ eError = PVRSRV_OK;
++ goto exit_unlock;
++ }
++ }
++
++
++ PVR_DPF((PVR_DBG_ERROR, "%s: Mapping data not found for handle 0x%lx (memory area 0x%p)", __FUNCTION__, hMHandle, psLinuxMemArea));
++
++ eError = PVRSRV_ERROR_GENERIC;
++
++exit_unlock:
++ LinuxUnLockMutex(&g_sMMapMutex);
++
++ return eError;
++}
++
++static inline PKV_OFFSET_STRUCT
++FindOffsetStructByOffset(IMG_UINT32 ui32Offset, IMG_UINT32 ui32RealByteSize)
++{
++ PKV_OFFSET_STRUCT psOffsetStruct;
++ IMG_UINT32 ui32TID = GetCurrentThreadID();
++ IMG_UINT32 ui32PID = OSGetCurrentProcessIDKM();
++
++ list_for_each_entry(psOffsetStruct, &g_sMMapOffsetStructList, sMMapItem)
++ {
++ if (ui32Offset == psOffsetStruct->ui32MMapOffset && ui32RealByteSize == psOffsetStruct->ui32RealByteSize && psOffsetStruct->ui32PID == ui32PID)
++ {
++
++ if (!PFNIsPhysical(ui32Offset) || psOffsetStruct->ui32TID == ui32TID)
++ {
++ return psOffsetStruct;
++ }
++ }
++ }
++
++ return IMG_NULL;
++}
++
++
++static IMG_BOOL
++DoMapToUser(LinuxMemArea *psLinuxMemArea,
++ struct vm_area_struct* ps_vma,
++ IMG_UINT32 ui32ByteOffset)
++{
++ IMG_UINT32 ui32ByteSize;
++
++ if (psLinuxMemArea->eAreaType == LINUX_MEM_AREA_SUB_ALLOC)
++ {
++ return DoMapToUser(LinuxMemAreaRoot(psLinuxMemArea),
++ ps_vma,
++ psLinuxMemArea->uData.sSubAlloc.ui32ByteOffset + ui32ByteOffset);
++ }
++
++
++ ui32ByteSize = ps_vma->vm_end - ps_vma->vm_start;
++ PVR_ASSERT(ADDR_TO_PAGE_OFFSET(ui32ByteSize) == 0);
++
++#if defined (__sparc__)
++
++#error "SPARC not supported"
++#endif
++
++ if (PFNIsPhysical(ps_vma->vm_pgoff))
++ {
++ IMG_INT result;
++
++ PVR_ASSERT(LinuxMemAreaPhysIsContig(psLinuxMemArea));
++ PVR_ASSERT(LinuxMemAreaToCpuPFN(psLinuxMemArea, ui32ByteOffset) == ps_vma->vm_pgoff);
++
++
++ result = IO_REMAP_PFN_RANGE(ps_vma, ps_vma->vm_start, ps_vma->vm_pgoff, ui32ByteSize, ps_vma->vm_page_prot);
++
++ if(result == 0)
++ {
++ return IMG_TRUE;
++ }
++
++ PVR_DPF((PVR_DBG_MESSAGE, "%s: Failed to map contiguous physical address range (%d), trying non-contiguous path", __FUNCTION__, result));
++ }
++
++ {
++
++ IMG_UINT32 ulVMAPos;
++ IMG_UINT32 ui32ByteEnd = ui32ByteOffset + ui32ByteSize;
++ IMG_UINT32 ui32PA;
++
++
++ for(ui32PA = ui32ByteOffset; ui32PA < ui32ByteEnd; ui32PA += PAGE_SIZE)
++ {
++ IMG_UINT32 pfn = LinuxMemAreaToCpuPFN(psLinuxMemArea, ui32PA);
++
++ if (!pfn_valid(pfn))
++ {
++ PVR_DPF((PVR_DBG_ERROR,"%s: Error - PFN invalid: 0x%lx", __FUNCTION__, pfn));
++ return IMG_FALSE;
++ }
++ }
++
++
++ ulVMAPos = ps_vma->vm_start;
++ for(ui32PA = ui32ByteOffset; ui32PA < ui32ByteEnd; ui32PA += PAGE_SIZE)
++ {
++ IMG_UINT32 pfn;
++ struct page *psPage;
++ IMG_INT result;
++
++ pfn = LinuxMemAreaToCpuPFN(psLinuxMemArea, ui32PA);
++ PVR_ASSERT(pfn_valid(pfn));
++
++ psPage = pfn_to_page(pfn);
++
++ result = VM_INSERT_PAGE(ps_vma, ulVMAPos, psPage);
++ if(result != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"%s: Error - VM_INSERT_PAGE failed (%d)", __FUNCTION__, result));
++ return IMG_FALSE;
++ }
++ ulVMAPos += PAGE_SIZE;
++ }
++ }
++
++ return IMG_TRUE;
++}
++
++
++static IMG_VOID
++MMapVOpenNoLock(struct vm_area_struct* ps_vma)
++{
++ PKV_OFFSET_STRUCT psOffsetStruct = (PKV_OFFSET_STRUCT)ps_vma->vm_private_data;
++ PVR_ASSERT(psOffsetStruct != IMG_NULL)
++ psOffsetStruct->ui32Mapped++;
++ PVR_ASSERT(!psOffsetStruct->bOnMMapList);
++
++ if (psOffsetStruct->ui32Mapped > 1)
++ {
++ PVR_DPF((PVR_DBG_WARNING, "%s: Offset structure 0x%p is being shared across processes (psOffsetStruct->ui32Mapped: %lu)", __FUNCTION__, psOffsetStruct, psOffsetStruct->ui32Mapped));
++ PVR_ASSERT((ps_vma->vm_flags & VM_DONTCOPY) == 0);
++ }
++
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "%s: psLinuxMemArea 0x%p, KVAddress 0x%p MMapOffset %ld, ui32Mapped %d",
++ __FUNCTION__,
++ psOffsetStruct->psLinuxMemArea,
++ LinuxMemAreaToCpuVAddr(psOffsetStruct->psLinuxMemArea),
++ psOffsetStruct->ui32MMapOffset,
++ psOffsetStruct->ui32Mapped));
++#endif
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0))
++ MOD_INC_USE_COUNT;
++#endif
++}
++
++
++static void
++MMapVOpen(struct vm_area_struct* ps_vma)
++{
++ LinuxLockMutex(&g_sMMapMutex);
++
++ MMapVOpenNoLock(ps_vma);
++
++ LinuxUnLockMutex(&g_sMMapMutex);
++}
++
++
++static IMG_VOID
++MMapVCloseNoLock(struct vm_area_struct* ps_vma)
++{
++ PKV_OFFSET_STRUCT psOffsetStruct = (PKV_OFFSET_STRUCT)ps_vma->vm_private_data;
++ PVR_ASSERT(psOffsetStruct != IMG_NULL)
++
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "%s: psLinuxMemArea 0x%p, CpuVAddr 0x%p ui32MMapOffset %ld, ui32Mapped %d",
++ __FUNCTION__,
++ psOffsetStruct->psLinuxMemArea,
++ LinuxMemAreaToCpuVAddr(psOffsetStruct->psLinuxMemArea),
++ psOffsetStruct->ui32MMapOffset,
++ psOffsetStruct->ui32Mapped));
++#endif
++
++ PVR_ASSERT(!psOffsetStruct->bOnMMapList);
++ psOffsetStruct->ui32Mapped--;
++ if (psOffsetStruct->ui32Mapped == 0)
++ {
++ if (psOffsetStruct->ui32RefCount != 0)
++ {
++ PVR_DPF((PVR_DBG_MESSAGE, "%s: psOffsetStruct 0x%p has non-zero reference count (ui32RefCount = %lu). User mode address of start of mapping: 0x%lx", __FUNCTION__, psOffsetStruct, psOffsetStruct->ui32RefCount, psOffsetStruct->ui32UserVAddr));
++ }
++
++ DestroyOffsetStruct(psOffsetStruct);
++ }
++
++ ps_vma->vm_private_data = NULL;
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0))
++ MOD_DEC_USE_COUNT;
++#endif
++}
++
++static void
++MMapVClose(struct vm_area_struct* ps_vma)
++{
++ LinuxLockMutex(&g_sMMapMutex);
++
++ MMapVCloseNoLock(ps_vma);
++
++ LinuxUnLockMutex(&g_sMMapMutex);
++}
++
++
++static struct vm_operations_struct MMapIOOps =
++{
++ .open=MMapVOpen,
++ .close=MMapVClose
++};
++
++
++int
++PVRMMap(struct file* pFile, struct vm_area_struct* ps_vma)
++{
++ IMG_UINT32 ui32ByteSize;
++ PKV_OFFSET_STRUCT psOffsetStruct;
++ int iRetVal = 0;
++
++ PVR_UNREFERENCED_PARAMETER(pFile);
++
++ LinuxLockMutex(&g_sMMapMutex);
++
++ ui32ByteSize = ps_vma->vm_end - ps_vma->vm_start;
++
++ PVR_DPF((PVR_DBG_MESSAGE, "%s: Received mmap(2) request with ui32MMapOffset 0x%08lx,"
++ " and ui32ByteSize %ld(0x%08lx)",
++ __FUNCTION__,
++ ps_vma->vm_pgoff,
++ ui32ByteSize, ui32ByteSize));
++
++ psOffsetStruct = FindOffsetStructByOffset(ps_vma->vm_pgoff, ui32ByteSize);
++ if (psOffsetStruct == IMG_NULL)
++ {
++#if defined(SUPPORT_DRI_DRM)
++ LinuxUnLockMutex(&g_sMMapMutex);
++
++
++ return drm_mmap(pFile, ps_vma);
++#else
++ PVR_UNREFERENCED_PARAMETER(pFile);
++
++ PVR_DPF((PVR_DBG_ERROR,
++ "%s: Attempted to mmap unregistered area at vm_pgoff %ld",
++ __FUNCTION__, ps_vma->vm_pgoff));
++ iRetVal = -EINVAL;
++#endif
++ goto unlock_and_return;
++ }
++ list_del(&psOffsetStruct->sMMapItem);
++ psOffsetStruct->bOnMMapList = IMG_FALSE;
++
++
++ if (((ps_vma->vm_flags & VM_WRITE) != 0) &&
++ ((ps_vma->vm_flags & VM_SHARED) == 0))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: Cannot mmap non-shareable writable areas", __FUNCTION__));
++ iRetVal = -EINVAL;
++ goto unlock_and_return;
++ }
++
++ PVR_DPF((PVR_DBG_MESSAGE, "%s: Mapped psLinuxMemArea 0x%p\n",
++ __FUNCTION__, psOffsetStruct->psLinuxMemArea));
++
++ ps_vma->vm_flags |= VM_RESERVED;
++ ps_vma->vm_flags |= VM_IO;
++
++
++ ps_vma->vm_flags |= VM_DONTEXPAND;
++
++
++ ps_vma->vm_flags |= VM_DONTCOPY;
++
++ ps_vma->vm_private_data = (void *)psOffsetStruct;
++
++ switch(psOffsetStruct->psLinuxMemArea->ui32AreaFlags & PVRSRV_HAP_CACHETYPE_MASK)
++ {
++ case PVRSRV_HAP_CACHED:
++
++ break;
++ case PVRSRV_HAP_WRITECOMBINE:
++ ps_vma->vm_page_prot = PGPROT_WC(ps_vma->vm_page_prot);
++ break;
++ case PVRSRV_HAP_UNCACHED:
++ ps_vma->vm_page_prot = PGPROT_UC(ps_vma->vm_page_prot);
++ break;
++ default:
++ PVR_DPF((PVR_DBG_ERROR, "%s: unknown cache type", __FUNCTION__));
++ iRetVal = -EINVAL;
++ goto unlock_and_return;
++ }
++
++
++ ps_vma->vm_ops = &MMapIOOps;
++
++ if(!DoMapToUser(psOffsetStruct->psLinuxMemArea, ps_vma, 0))
++ {
++ iRetVal = -EAGAIN;
++ goto unlock_and_return;
++ }
++
++ PVR_ASSERT(psOffsetStruct->ui32UserVAddr == 0)
++
++ psOffsetStruct->ui32UserVAddr = ps_vma->vm_start;
++
++
++ MMapVOpenNoLock(ps_vma);
++
++ PVR_DPF((PVR_DBG_MESSAGE, "%s: Mapped area at offset 0x%08lx\n",
++ __FUNCTION__, ps_vma->vm_pgoff));
++
++unlock_and_return:
++ if (iRetVal != 0 && psOffsetStruct != IMG_NULL)
++ {
++ DestroyOffsetStruct(psOffsetStruct);
++ }
++
++ LinuxUnLockMutex(&g_sMMapMutex);
++
++ return iRetVal;
++}
++
++
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++
++static void ProcSeqStartstopMMapRegistations(struct seq_file *sfile,IMG_BOOL start)
++{
++ if(start)
++ {
++ LinuxLockMutex(&g_sMMapMutex);
++ }
++ else
++ {
++ LinuxUnLockMutex(&g_sMMapMutex);
++ }
++}
++
++
++static void* ProcSeqOff2ElementMMapRegistrations(struct seq_file *sfile, loff_t off)
++{
++ LinuxMemArea *psLinuxMemArea;
++ if(!off)
++ {
++ return PVR_PROC_SEQ_START_TOKEN;
++ }
++
++ list_for_each_entry(psLinuxMemArea, &g_sMMapAreaList, sMMapItem)
++ {
++ PKV_OFFSET_STRUCT psOffsetStruct;
++
++ list_for_each_entry(psOffsetStruct, &psLinuxMemArea->sMMapOffsetStructList, sAreaItem)
++ {
++ off--;
++ if (off == 0)
++ {
++ PVR_ASSERT(psOffsetStruct->psLinuxMemArea == psLinuxMemArea);
++ return (void*)psOffsetStruct;
++ }
++ }
++ }
++ return (void*)0;
++}
++
++static void* ProcSeqNextMMapRegistrations(struct seq_file *sfile,void* el,loff_t off)
++{
++ return ProcSeqOff2ElementMMapRegistrations(sfile,off);
++}
++
++
++static void ProcSeqShowMMapRegistrations(struct seq_file *sfile,void* el)
++{
++ KV_OFFSET_STRUCT *psOffsetStruct = (KV_OFFSET_STRUCT*)el;
++ LinuxMemArea *psLinuxMemArea;
++ IMG_UINT32 ui32RealByteSize;
++ IMG_UINT32 ui32ByteOffset;
++
++ if(el == PVR_PROC_SEQ_START_TOKEN)
++ {
++ seq_printf( sfile,
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++ "Allocations registered for mmap: %lu\n"
++ "In total these areas correspond to %lu bytes\n"
++ "psLinuxMemArea "
++ "UserVAddr "
++ "KernelVAddr "
++ "CpuPAddr "
++ "MMapOffset "
++ "ByteLength "
++ "LinuxMemType "
++ "Pid Name Flags\n",
++#else
++ "<mmap_header>\n"
++ "\t<count>%lu</count>\n"
++ "\t<bytes>%lu</bytes>\n"
++ "</mmap_header>\n",
++#endif
++ g_ui32RegisteredAreas,
++ g_ui32TotalByteSize
++ );
++ return;
++ }
++
++ psLinuxMemArea = psOffsetStruct->psLinuxMemArea;
++
++ DetermineUsersSizeAndByteOffset(psLinuxMemArea,
++ &ui32RealByteSize,
++ &ui32ByteOffset);
++
++ seq_printf( sfile,
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++ "%-8p %08lx %-8p %08lx %08lx %-8ld %-24s %-5lu %-8s %08lx(%s)\n",
++#else
++ "<mmap_record>\n"
++ "\t<pointer>%-8p</pointer>\n"
++ "\t<user_virtual>%-8lx</user_virtual>\n"
++ "\t<kernel_virtual>%-8p</kernel_virtual>\n"
++ "\t<cpu_physical>%08lx</cpu_physical>\n"
++ "\t<mmap_offset>%08lx</mmap_offset>\n"
++ "\t<bytes>%-8ld</bytes>\n"
++ "\t<linux_mem_area_type>%-24s</linux_mem_area_type>\n"
++ "\t<pid>%-5lu</pid>\n"
++ "\t<name>%-8s</name>\n"
++ "\t<flags>%08lx</flags>\n"
++ "\t<flags_string>%s</flags_string>\n"
++ "</mmap_record>\n",
++#endif
++ psLinuxMemArea,
++ psOffsetStruct->ui32UserVAddr + ui32ByteOffset,
++ LinuxMemAreaToCpuVAddr(psLinuxMemArea),
++ LinuxMemAreaToCpuPAddr(psLinuxMemArea,0).uiAddr,
++ psOffsetStruct->ui32MMapOffset,
++ psLinuxMemArea->ui32ByteSize,
++ LinuxMemAreaTypeToString(psLinuxMemArea->eAreaType),
++ psOffsetStruct->ui32PID,
++ psOffsetStruct->pszName,
++ psLinuxMemArea->ui32AreaFlags,
++ HAPFlagsToString(psLinuxMemArea->ui32AreaFlags));
++}
++
++#else
++
++static off_t
++PrintMMapRegistrations(IMG_CHAR *buffer, size_t size, off_t off)
++{
++ LinuxMemArea *psLinuxMemArea;
++ off_t Ret;
++
++ LinuxLockMutex(&g_sMMapMutex);
++
++ if(!off)
++ {
++ Ret = printAppend(buffer, size, 0,
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++ "Allocations registered for mmap: %lu\n"
++ "In total these areas correspond to %lu bytes\n"
++ "psLinuxMemArea "
++ "UserVAddr "
++ "KernelVAddr "
++ "CpuPAddr "
++ "MMapOffset "
++ "ByteLength "
++ "LinuxMemType "
++ "Pid Name Flags\n",
++#else
++ "<mmap_header>\n"
++ "\t<count>%lu</count>\n"
++ "\t<bytes>%lu</bytes>\n"
++ "</mmap_header>\n",
++#endif
++ g_ui32RegisteredAreas,
++ g_ui32TotalByteSize
++ );
++
++ goto unlock_and_return;
++ }
++
++ if (size < 135)
++ {
++ Ret = 0;
++ goto unlock_and_return;
++ }
++
++ PVR_ASSERT(off != 0);
++ list_for_each_entry(psLinuxMemArea, &g_sMMapAreaList, sMMapItem)
++ {
++ PKV_OFFSET_STRUCT psOffsetStruct;
++
++ list_for_each_entry(psOffsetStruct, &psLinuxMemArea->sMMapOffsetStructList, sAreaItem)
++ {
++ off--;
++ if (off == 0)
++ {
++ IMG_UINT32 ui32RealByteSize;
++ IMG_UINT32 ui32ByteOffset;
++
++ PVR_ASSERT(psOffsetStruct->psLinuxMemArea == psLinuxMemArea);
++
++ DetermineUsersSizeAndByteOffset(psLinuxMemArea,
++ &ui32RealByteSize,
++ &ui32ByteOffset);
++
++ Ret = printAppend (buffer, size, 0,
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++ "%-8p %08lx %-8p %08lx %08lx %-8ld %-24s %-5lu %-8s %08lx(%s)\n",
++#else
++ "<mmap_record>\n"
++ "\t<pointer>%-8p</pointer>\n"
++ "\t<user_virtual>%-8lx</user_virtual>\n"
++ "\t<kernel_virtual>%-8p</kernel_virtual>\n"
++ "\t<cpu_physical>%08lx</cpu_physical>\n"
++ "\t<mmap_offset>%08lx</mmap_offset>\n"
++ "\t<bytes>%-8ld</bytes>\n"
++ "\t<linux_mem_area_type>%-24s</linux_mem_area_type>\n"
++ "\t<pid>%-5lu</pid>\n"
++ "\t<name>%-8s</name>\n"
++ "\t<flags>%08lx</flags>\n"
++ "\t<flags_string>%s</flags_string>\n"
++ "</mmap_record>\n",
++#endif
++ psLinuxMemArea,
++ psOffsetStruct->ui32UserVAddr + ui32ByteOffset,
++ LinuxMemAreaToCpuVAddr(psLinuxMemArea),
++ LinuxMemAreaToCpuPAddr(psLinuxMemArea,0).uiAddr,
++ psOffsetStruct->ui32MMapOffset,
++ psLinuxMemArea->ui32ByteSize,
++ LinuxMemAreaTypeToString(psLinuxMemArea->eAreaType),
++ psOffsetStruct->ui32PID,
++ psOffsetStruct->pszName,
++ psLinuxMemArea->ui32AreaFlags,
++ HAPFlagsToString(psLinuxMemArea->ui32AreaFlags));
++ goto unlock_and_return;
++ }
++ }
++ }
++ Ret = END_OF_FILE;
++
++unlock_and_return:
++ LinuxUnLockMutex(&g_sMMapMutex);
++ return Ret;
++}
++#endif
++#endif
++
++
++PVRSRV_ERROR
++PVRMMapRegisterArea(LinuxMemArea *psLinuxMemArea)
++{
++ PVRSRV_ERROR eError;
++#if defined(DEBUG) || defined(DEBUG_LINUX_MMAP_AREAS)
++ const IMG_CHAR *pszName = LinuxMemAreaTypeToString(LinuxMemAreaRootType(psLinuxMemArea));
++#endif
++
++ LinuxLockMutex(&g_sMMapMutex);
++
++#if defined(DEBUG) || defined(DEBUG_LINUX_MMAP_AREAS)
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "%s(%s, psLinuxMemArea 0x%p, ui32AllocFlags 0x%8lx)",
++ __FUNCTION__, pszName, psLinuxMemArea, psLinuxMemArea->ui32AreaFlags));
++#endif
++
++ PVR_ASSERT(psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC || LinuxMemAreaRoot(psLinuxMemArea)->eAreaType != LINUX_MEM_AREA_SUB_ALLOC);
++
++
++ if(psLinuxMemArea->bMMapRegistered)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: psLinuxMemArea 0x%p is already registered",
++ __FUNCTION__, psLinuxMemArea));
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ goto exit_unlock;
++ }
++
++ list_add_tail(&psLinuxMemArea->sMMapItem, &g_sMMapAreaList);
++
++ psLinuxMemArea->bMMapRegistered = IMG_TRUE;
++
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++ g_ui32RegisteredAreas++;
++
++ if (psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC)
++ {
++ g_ui32TotalByteSize += psLinuxMemArea->ui32ByteSize;
++ }
++#endif
++
++ eError = PVRSRV_OK;
++
++exit_unlock:
++ LinuxUnLockMutex(&g_sMMapMutex);
++
++ return eError;
++}
++
++
++PVRSRV_ERROR
++PVRMMapRemoveRegisteredArea(LinuxMemArea *psLinuxMemArea)
++{
++ PVRSRV_ERROR eError;
++ PKV_OFFSET_STRUCT psOffsetStruct, psTmpOffsetStruct;
++
++ LinuxLockMutex(&g_sMMapMutex);
++
++ PVR_ASSERT(psLinuxMemArea->bMMapRegistered);
++
++ list_for_each_entry_safe(psOffsetStruct, psTmpOffsetStruct, &psLinuxMemArea->sMMapOffsetStructList, sAreaItem)
++ {
++ if (psOffsetStruct->ui32Mapped != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: psOffsetStruct 0x%p for memory area 0x0x%p is still mapped; psOffsetStruct->ui32Mapped %lu", __FUNCTION__, psOffsetStruct, psLinuxMemArea, psOffsetStruct->ui32Mapped));
++ eError = PVRSRV_ERROR_GENERIC;
++ goto exit_unlock;
++ }
++ else
++ {
++
++ PVR_DPF((PVR_DBG_WARNING, "%s: psOffsetStruct 0x%p was never mapped", __FUNCTION__, psOffsetStruct));
++ }
++
++ PVR_ASSERT((psOffsetStruct->ui32Mapped == 0) && psOffsetStruct->bOnMMapList);
++
++ DestroyOffsetStruct(psOffsetStruct);
++ }
++
++ list_del(&psLinuxMemArea->sMMapItem);
++
++ psLinuxMemArea->bMMapRegistered = IMG_FALSE;
++
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++ g_ui32RegisteredAreas--;
++ if (psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC)
++ {
++ g_ui32TotalByteSize -= psLinuxMemArea->ui32ByteSize;
++ }
++#endif
++
++ eError = PVRSRV_OK;
++
++exit_unlock:
++ LinuxUnLockMutex(&g_sMMapMutex);
++ return eError;
++}
++
++
++PVRSRV_ERROR
++LinuxMMapPerProcessConnect(PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc)
++{
++ PVR_UNREFERENCED_PARAMETER(psEnvPerProc);
++
++ return PVRSRV_OK;
++}
++
++IMG_VOID
++LinuxMMapPerProcessDisconnect(PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc)
++{
++ PKV_OFFSET_STRUCT psOffsetStruct, psTmpOffsetStruct;
++ IMG_BOOL bWarn = IMG_FALSE;
++ IMG_UINT32 ui32PID = OSGetCurrentProcessIDKM();
++
++ PVR_UNREFERENCED_PARAMETER(psEnvPerProc);
++
++ LinuxLockMutex(&g_sMMapMutex);
++
++ list_for_each_entry_safe(psOffsetStruct, psTmpOffsetStruct, &g_sMMapOffsetStructList, sMMapItem)
++ {
++ if (psOffsetStruct->ui32PID == ui32PID)
++ {
++ if (!bWarn)
++ {
++ PVR_DPF((PVR_DBG_WARNING, "%s: process has unmapped offset structures. Removing them", __FUNCTION__));
++ bWarn = IMG_TRUE;
++ }
++ PVR_ASSERT(psOffsetStruct->ui32Mapped == 0);
++ PVR_ASSERT(psOffsetStruct->bOnMMapList);
++
++ DestroyOffsetStruct(psOffsetStruct);
++ }
++ }
++
++ LinuxUnLockMutex(&g_sMMapMutex);
++}
++
++
++PVRSRV_ERROR LinuxMMapPerProcessHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase)
++{
++ PVRSRV_ERROR eError;
++
++ eError = PVRSRVSetMaxHandle(psHandleBase, MAX_MMAP_HANDLE);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"%s: failed to set handle limit (%d)", __FUNCTION__, eError));
++ return eError;
++ }
++
++ return eError;
++}
++
++
++IMG_VOID
++PVRMMapInit(IMG_VOID)
++{
++ LinuxInitMutex(&g_sMMapMutex);
++
++ g_psMemmapCache = KMemCacheCreateWrapper("img-mmap", sizeof(KV_OFFSET_STRUCT), 0, 0);
++ if (!g_psMemmapCache)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"%s: failed to allocate kmem_cache", __FUNCTION__));
++ goto error;
++ }
++
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++#ifdef PVR_PROC_USE_SEQ_FILE
++ g_ProcMMap = CreateProcReadEntrySeq("mmap", NULL,
++ ProcSeqNextMMapRegistrations,
++ ProcSeqShowMMapRegistrations,
++ ProcSeqOff2ElementMMapRegistrations,
++ ProcSeqStartstopMMapRegistations
++ );
++#else
++ CreateProcReadEntry("mmap", PrintMMapRegistrations);
++#endif
++#endif
++ return;
++
++error:
++ PVRMMapCleanup();
++ return;
++}
++
++
++IMG_VOID
++PVRMMapCleanup(IMG_VOID)
++{
++ PVRSRV_ERROR eError;
++
++ if (!list_empty(&g_sMMapAreaList))
++ {
++ LinuxMemArea *psLinuxMemArea, *psTmpMemArea;
++
++ PVR_DPF((PVR_DBG_ERROR, "%s: Memory areas are still registered with MMap", __FUNCTION__));
++
++ PVR_TRACE(("%s: Unregistering memory areas", __FUNCTION__));
++ list_for_each_entry_safe(psLinuxMemArea, psTmpMemArea, &g_sMMapAreaList, sMMapItem)
++ {
++ eError = PVRMMapRemoveRegisteredArea(psLinuxMemArea);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: PVRMMapRemoveRegisteredArea failed (%d)", __FUNCTION__, eError));
++ }
++ PVR_ASSERT(eError == PVRSRV_OK);
++
++ LinuxMemAreaDeepFree(psLinuxMemArea);
++ }
++ }
++ PVR_ASSERT(list_empty((&g_sMMapAreaList)));
++
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++#ifdef PVR_PROC_USE_SEQ_FILE
++ RemoveProcEntrySeq(g_ProcMMap);
++#else
++ RemoveProcEntry("mmap");
++#endif
++#endif
++
++ if(g_psMemmapCache)
++ {
++ KMemCacheDestroyWrapper(g_psMemmapCache);
++ g_psMemmapCache = NULL;
++ }
++}
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/env/linux/mmap.h
+@@ -0,0 +1,107 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__MMAP_H__)
++#define __MMAP_H__
++
++#include <linux/mm.h>
++#include <linux/list.h>
++
++#include "perproc.h"
++#include "mm.h"
++
++typedef struct KV_OFFSET_STRUCT_TAG
++{
++
++ IMG_UINT32 ui32Mapped;
++
++
++ IMG_UINT32 ui32MMapOffset;
++
++ IMG_UINT32 ui32RealByteSize;
++
++
++ LinuxMemArea *psLinuxMemArea;
++
++
++ IMG_UINT32 ui32TID;
++
++
++ IMG_UINT32 ui32PID;
++
++
++ IMG_BOOL bOnMMapList;
++
++
++ IMG_UINT32 ui32RefCount;
++
++
++ IMG_UINT32 ui32UserVAddr;
++
++
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++ const IMG_CHAR *pszName;
++#endif
++
++
++ struct list_head sMMapItem;
++
++
++ struct list_head sAreaItem;
++}KV_OFFSET_STRUCT, *PKV_OFFSET_STRUCT;
++
++
++
++IMG_VOID PVRMMapInit(IMG_VOID);
++
++
++IMG_VOID PVRMMapCleanup(IMG_VOID);
++
++
++PVRSRV_ERROR PVRMMapRegisterArea(LinuxMemArea *psLinuxMemArea);
++
++
++PVRSRV_ERROR PVRMMapRemoveRegisteredArea(LinuxMemArea *psLinuxMemArea);
++
++
++PVRSRV_ERROR PVRMMapOSMemHandleToMMapData(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hMHandle,
++ IMG_UINT32 *pui32MMapOffset,
++ IMG_UINT32 *pui32ByteOffset,
++ IMG_UINT32 *pui32RealByteSize, IMG_UINT32 *pui32UserVAddr);
++
++PVRSRV_ERROR
++PVRMMapReleaseMMapData(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hMHandle,
++ IMG_BOOL *pbMUnmap,
++ IMG_UINT32 *pui32RealByteSize,
++ IMG_UINT32 *pui32UserVAddr);
++
++int PVRMMap(struct file* pFile, struct vm_area_struct* ps_vma);
++
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/env/linux/module.c
+@@ -0,0 +1,767 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef AUTOCONF_INCLUDED
++ #include <linux/config.h>
++#endif
++
++#if !defined(SUPPORT_DRI_DRM)
++
++ #if defined(LDM_PLATFORM)
++ #define PVR_LDM_PLATFORM_MODULE
++ #define PVR_LDM_MODULE
++ #else
++ #if defined(LDM_PCI)
++ #define PVR_LDM_PCI_MODULE
++ #define PVR_LDM_MODULE
++ #endif
++ #endif
++#endif
++
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/version.h>
++#include <linux/fs.h>
++#include <linux/proc_fs.h>
++
++#if defined(SUPPORT_DRI_DRM)
++#include <drm/drmP.h>
++#if defined(PVR_SECURE_DRM_AUTH_EXPORT)
++#include "env_perproc.h"
++#endif
++#endif
++
++#if defined(PVR_LDM_PLATFORM_MODULE)
++#include <linux/platform_device.h>
++#endif
++
++#if defined(PVR_LDM_PCI_MODULE)
++#include <linux/pci.h>
++#endif
++
++#if defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL)
++#include <asm/uaccess.h>
++#endif
++
++#include "img_defs.h"
++#include "services.h"
++#include "kerneldisplay.h"
++#include "kernelbuffer.h"
++#include "syscommon.h"
++#include "pvrmmap.h"
++#include "mutils.h"
++#include "mm.h"
++#include "mmap.h"
++#include "mutex.h"
++#include "pvr_debug.h"
++#include "srvkm.h"
++#include "perproc.h"
++#include "handle.h"
++#include "pvr_bridge_km.h"
++#include "proc.h"
++#include "pvrmodule.h"
++#include "private_data.h"
++#include "lock.h"
++#include "linkage.h"
++
++#if defined(SUPPORT_DRI_DRM)
++#include "pvr_drm.h"
++#endif
++#define DRVNAME "pvrsrvkm"
++#define DEVNAME "pvrsrvkm"
++
++#if defined(SUPPORT_DRI_DRM)
++#define PRIVATE_DATA(pFile) ((pFile)->driver_priv)
++#else
++#define PRIVATE_DATA(pFile) ((pFile)->private_data)
++#endif
++
++MODULE_SUPPORTED_DEVICE(DEVNAME);
++#ifdef DEBUG
++static IMG_INT debug = DBGPRIV_WARNING;
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0))
++#include <linux/moduleparam.h>
++module_param(debug, int, 0);
++#else
++MODULE_PARM(debug, "i");
++MODULE_PARM_DESC(debug, "Sets the level of debug output (default=0x4)");
++#endif
++#endif
++
++
++extern IMG_BOOL PVRGetDisplayClassJTable(PVRSRV_DC_DISP2SRV_KMJTABLE *psJTable);
++extern IMG_BOOL PVRGetBufferClassJTable(PVRSRV_BC_BUFFER2SRV_KMJTABLE *psJTable);
++
++EXPORT_SYMBOL(PVRGetDisplayClassJTable);
++EXPORT_SYMBOL(PVRGetBufferClassJTable);
++
++
++#if defined(PVR_LDM_MODULE)
++static struct class *psPvrClass;
++#endif
++
++#if !defined(SUPPORT_DRI_DRM)
++static IMG_INT AssignedMajorNumber;
++
++static IMG_INT PVRSRVOpen(struct inode* pInode, struct file* pFile);
++static IMG_INT PVRSRVRelease(struct inode* pInode, struct file* pFile);
++
++static struct file_operations pvrsrv_fops = {
++ .owner=THIS_MODULE,
++ .unlocked_ioctl=PVRSRV_BridgeDispatchKM,
++ .open=PVRSRVOpen,
++ .release=PVRSRVRelease,
++ .mmap=PVRMMap,
++};
++#endif
++
++PVRSRV_LINUX_MUTEX gPVRSRVLock;
++
++IMG_UINT32 gui32ReleasePID;
++
++#if defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL)
++static IMG_UINT32 gPVRPowerLevel;
++#endif
++
++#if defined(PVR_LDM_MODULE)
++
++#if defined(PVR_LDM_PLATFORM_MODULE)
++#define LDM_DEV struct platform_device
++#define LDM_DRV struct platform_driver
++#endif
++
++#if defined(PVR_LDM_PCI_MODULE)
++#define LDM_DEV struct pci_dev
++#define LDM_DRV struct pci_driver
++#endif
++
++#if defined(PVR_LDM_PLATFORM_MODULE)
++static IMG_INT PVRSRVDriverRemove(LDM_DEV *device);
++static IMG_INT PVRSRVDriverProbe(LDM_DEV *device);
++#endif
++#if defined(PVR_LDM_PCI_MODULE)
++static IMG_VOID PVRSRVDriverRemove(LDM_DEV *device);
++static IMG_INT PVRSRVDriverProbe(LDM_DEV *device, const struct pci_device_id *id);
++#endif
++static IMG_INT PVRSRVDriverSuspend(LDM_DEV *device, pm_message_t state);
++static IMG_VOID PVRSRVDriverShutdown(LDM_DEV *device);
++static IMG_INT PVRSRVDriverResume(LDM_DEV *device);
++
++#if defined(PVR_LDM_PCI_MODULE)
++struct pci_device_id powervr_id_table[] __devinitdata = {
++ { PCI_DEVICE(SYS_SGX_DEV_VENDOR_ID, SYS_SGX_DEV_DEVICE_ID) },
++ { 0 }
++};
++
++MODULE_DEVICE_TABLE(pci, powervr_id_table);
++#endif
++
++static LDM_DRV powervr_driver = {
++#if defined(PVR_LDM_PLATFORM_MODULE)
++ .driver = {
++ .name = DRVNAME,
++ },
++#endif
++#if defined(PVR_LDM_PCI_MODULE)
++ .name = DRVNAME,
++ .id_table = powervr_id_table,
++#endif
++ .probe = PVRSRVDriverProbe,
++#if defined(PVR_LDM_PLATFORM_MODULE)
++ .remove = PVRSRVDriverRemove,
++#endif
++#if defined(PVR_LDM_PCI_MODULE)
++ .remove = __devexit_p(PVRSRVDriverRemove),
++#endif
++ .suspend = PVRSRVDriverSuspend,
++ .resume = PVRSRVDriverResume,
++ .shutdown = PVRSRVDriverShutdown,
++};
++
++LDM_DEV *gpsPVRLDMDev;
++
++#if defined(MODULE) && defined(PVR_LDM_PLATFORM_MODULE)
++
++static IMG_VOID PVRSRVDeviceRelease(struct device *pDevice)
++{
++ PVR_UNREFERENCED_PARAMETER(pDevice);
++}
++
++static struct platform_device powervr_device = {
++ .name = DEVNAME,
++ .id = -1,
++ .dev = {
++ .release = PVRSRVDeviceRelease
++ }
++};
++
++#endif
++
++#if defined(PVR_LDM_PLATFORM_MODULE)
++static IMG_INT PVRSRVDriverProbe(LDM_DEV *pDevice)
++#endif
++#if defined(PVR_LDM_PCI_MODULE)
++static IMG_INT __devinit PVRSRVDriverProbe(LDM_DEV *pDevice, const struct pci_device_id *id)
++#endif
++{
++ SYS_DATA *psSysData;
++
++ PVR_TRACE(("PVRSRVDriverProbe(pDevice=%p)", pDevice));
++
++#if 0
++
++ if (PerDeviceSysInitialise((IMG_PVOID)pDevice) != PVRSRV_OK)
++ {
++ return -EINVAL;
++ }
++#endif
++
++ if (SysAcquireData(&psSysData) != PVRSRV_OK)
++ {
++ gpsPVRLDMDev = pDevice;
++
++ if (SysInitialise() != PVRSRV_OK)
++ {
++ return -ENODEV;
++ }
++ }
++
++ return 0;
++}
++
++
++#if defined (PVR_LDM_PLATFORM_MODULE)
++static IMG_INT PVRSRVDriverRemove(LDM_DEV *pDevice)
++#endif
++#if defined(PVR_LDM_PCI_MODULE)
++static IMG_VOID __devexit PVRSRVDriverRemove(LDM_DEV *pDevice)
++#endif
++{
++ SYS_DATA *psSysData;
++
++ PVR_TRACE(("PVRSRVDriverRemove(pDevice=%p)", pDevice));
++
++ if (SysAcquireData(&psSysData) == PVRSRV_OK)
++ {
++#if defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL)
++ if (gPVRPowerLevel != 0)
++ {
++ if (PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D0) == PVRSRV_OK)
++ {
++ gPVRPowerLevel = 0;
++ }
++ }
++#endif
++ SysDeinitialise(psSysData);
++
++ gpsPVRLDMDev = IMG_NULL;
++ }
++
++#if 0
++ if (PerDeviceSysDeInitialise((IMG_PVOID)pDevice) != PVRSRV_OK)
++ {
++ return -EINVAL;
++ }
++#endif
++
++#if defined (PVR_LDM_PLATFORM_MODULE)
++ return 0;
++#endif
++#if defined (PVR_LDM_PCI_MODULE)
++ return;
++#endif
++}
++
++
++static IMG_VOID PVRSRVDriverShutdown(LDM_DEV *pDevice)
++{
++ PVR_TRACE(("PVRSRVDriverShutdown(pDevice=%p)", pDevice));
++
++ (IMG_VOID) PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D3);
++}
++
++#endif
++
++
++#if defined(PVR_LDM_MODULE) || defined(SUPPORT_DRI_DRM)
++#if defined(SUPPORT_DRI_DRM)
++IMG_INT PVRSRVDriverSuspend(struct drm_device *pDevice, pm_message_t state)
++#else
++static IMG_INT PVRSRVDriverSuspend(LDM_DEV *pDevice, pm_message_t state)
++#endif
++{
++#if !(defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL) && !defined(SUPPORT_DRI_DRM))
++ PVR_TRACE(( "PVRSRVDriverSuspend(pDevice=%p)", pDevice));
++ printk(KERN_ALERT "PVRSRVDriverSuspend(pDevice=%p)", pDevice);
++
++ if (PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D3) != PVRSRV_OK)
++ {
++ return -EINVAL;
++ }
++#endif
++ return 0;
++}
++
++
++#if defined(SUPPORT_DRI_DRM)
++IMG_INT PVRSRVDriverResume(struct drm_device *pDevice)
++#else
++static IMG_INT PVRSRVDriverResume(LDM_DEV *pDevice)
++#endif
++{
++#if !(defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL) && !defined(SUPPORT_DRI_DRM))
++ PVR_TRACE(("PVRSRVDriverResume(pDevice=%p)", pDevice));
++ printk(KERN_ALERT "PVRSRVDriverResume(pDevice=%p)", pDevice);
++
++ if (PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D0) != PVRSRV_OK)
++ {
++ return -EINVAL;
++ }
++#endif
++ return 0;
++}
++#endif
++
++
++#if defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL) && !defined(SUPPORT_DRI_DRM)
++IMG_INT PVRProcSetPowerLevel(struct file *file, const IMG_CHAR *buffer, IMG_UINT32 count, IMG_VOID *data)
++{
++ IMG_CHAR data_buffer[2];
++ IMG_UINT32 PVRPowerLevel;
++
++ if (count != sizeof(data_buffer))
++ {
++ return -EINVAL;
++ }
++ else
++ {
++ if (copy_from_user(data_buffer, buffer, count))
++ return -EINVAL;
++ if (data_buffer[count - 1] != '\n')
++ return -EINVAL;
++ PVRPowerLevel = data_buffer[0] - '0';
++ if (PVRPowerLevel != gPVRPowerLevel)
++ {
++ if (PVRPowerLevel != 0)
++ {
++ if (PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D3) != PVRSRV_OK)
++ {
++ return -EINVAL;
++ }
++ }
++ else
++ {
++ if (PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D0) != PVRSRV_OK)
++ {
++ return -EINVAL;
++ }
++ }
++
++ gPVRPowerLevel = PVRPowerLevel;
++ }
++ }
++ return (count);
++}
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++void ProcSeqShowPowerLevel(struct seq_file *sfile,void* el)
++{
++ seq_printf(sfile, "%lu\n", gPVRPowerLevel);
++}
++
++#else
++IMG_INT PVRProcGetPowerLevel(IMG_CHAR *page, IMG_CHAR **start, off_t off, IMG_INT count, IMG_INT *eof, IMG_VOID *data)
++{
++ if (off == 0) {
++ *start = (IMG_CHAR *)1;
++ return printAppend(page, count, 0, "%lu\n", gPVRPowerLevel);
++ }
++ *eof = 1;
++ return 0;
++}
++#endif
++
++#endif
++
++#if defined(SUPPORT_DRI_DRM)
++IMG_INT PVRSRVOpen(struct drm_device unref__ *dev, struct drm_file *pFile)
++#else
++static IMG_INT PVRSRVOpen(struct inode unref__ * pInode, struct file *pFile)
++#endif
++{
++ PVRSRV_FILE_PRIVATE_DATA *psPrivateData;
++ IMG_HANDLE hBlockAlloc;
++ IMG_INT iRet = -ENOMEM;
++ PVRSRV_ERROR eError;
++ IMG_UINT32 ui32PID;
++#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT)
++ PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc;
++#endif
++
++#if defined(SUPPORT_DRI_DRM)
++ PVR_UNREFERENCED_PARAMETER(dev);
++#else
++ PVR_UNREFERENCED_PARAMETER(pInode);
++#endif
++
++ LinuxLockMutex(&gPVRSRVLock);
++
++ ui32PID = OSGetCurrentProcessIDKM();
++
++ if (PVRSRVProcessConnect(ui32PID) != PVRSRV_OK)
++ goto err_unlock;
++
++#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT)
++ psEnvPerProc = PVRSRVPerProcessPrivateData(ui32PID);
++ if (psEnvPerProc == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: No per-process private data", __FUNCTION__));
++ goto err_unlock;
++ }
++#endif
++
++ eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_FILE_PRIVATE_DATA),
++ (IMG_PVOID *)&psPrivateData,
++ &hBlockAlloc,
++ "File Private Data");
++
++ if(eError != PVRSRV_OK)
++ goto err_unlock;
++
++#if defined(PVR_SECURE_FD_EXPORT)
++ psPrivateData->hKernelMemInfo = NULL;
++#endif
++#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT)
++ psPrivateData->psDRMFile = pFile;
++
++ list_add_tail(&psPrivateData->sDRMAuthListItem, &psEnvPerProc->sDRMAuthListHead);
++#endif
++ psPrivateData->ui32OpenPID = ui32PID;
++ psPrivateData->hBlockAlloc = hBlockAlloc;
++ PRIVATE_DATA(pFile) = psPrivateData;
++ iRet = 0;
++err_unlock:
++ LinuxUnLockMutex(&gPVRSRVLock);
++ return iRet;
++}
++
++
++#if defined(SUPPORT_DRI_DRM)
++IMG_INT PVRSRVRelease(struct drm_device unref__ *dev, struct drm_file *pFile)
++#else
++static IMG_INT PVRSRVRelease(struct inode unref__ * pInode, struct file *pFile)
++#endif
++{
++ PVRSRV_FILE_PRIVATE_DATA *psPrivateData;
++
++#if defined(SUPPORT_DRI_DRM)
++ PVR_UNREFERENCED_PARAMETER(dev);
++#else
++ PVR_UNREFERENCED_PARAMETER(pInode);
++#endif
++
++ LinuxLockMutex(&gPVRSRVLock);
++
++ psPrivateData = PRIVATE_DATA(pFile);
++
++#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT)
++ list_del(&psPrivateData->sDRMAuthListItem);
++#endif
++
++
++ gui32ReleasePID = psPrivateData->ui32OpenPID;
++ PVRSRVProcessDisconnect(psPrivateData->ui32OpenPID);
++ gui32ReleasePID = 0;
++
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_FILE_PRIVATE_DATA),
++ psPrivateData, psPrivateData->hBlockAlloc);
++ PRIVATE_DATA(pFile) = NULL;
++
++ LinuxUnLockMutex(&gPVRSRVLock);
++ return 0;
++}
++
++
++#if defined(SUPPORT_DRI_DRM)
++IMG_INT PVRCore_Init(IMG_VOID)
++#else
++static IMG_INT __init PVRCore_Init(IMG_VOID)
++#endif
++{
++ IMG_INT error;
++#if !defined(PVR_LDM_MODULE)
++ PVRSRV_ERROR eError;
++#else
++ struct device *psDev;
++#endif
++
++#if !defined(SUPPORT_DRI_DRM)
++
++ PVRDPFInit();
++#endif
++ PVR_TRACE(("PVRCore_Init"));
++
++ LinuxInitMutex(&gPVRSRVLock);
++
++#ifdef DEBUG
++ PVRDebugSetLevel(debug);
++#endif
++
++ if (CreateProcEntries ())
++ {
++ error = -ENOMEM;
++ return error;
++ }
++
++ if (PVROSFuncInit() != PVRSRV_OK)
++ {
++ error = -ENOMEM;
++ goto init_failed;
++ }
++
++ PVRLinuxMUtilsInit();
++
++ if(LinuxMMInit() != PVRSRV_OK)
++ {
++ error = -ENOMEM;
++ goto init_failed;
++ }
++
++ LinuxBridgeInit();
++
++ PVRMMapInit();
++
++#if defined(PVR_LDM_MODULE)
++
++#if defined(PVR_LDM_PLATFORM_MODULE)
++ if ((error = platform_driver_register(&powervr_driver)) != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to register platform driver (%d)", error));
++
++ goto init_failed;
++ }
++
++#if defined(MODULE)
++ if ((error = platform_device_register(&powervr_device)) != 0)
++ {
++ platform_driver_unregister(&powervr_driver);
++
++ PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to register platform device (%d)", error));
++
++ goto init_failed;
++ }
++#endif
++#endif
++
++#if defined(PVR_LDM_PCI_MODULE)
++ if ((error = pci_register_driver(&powervr_driver)) != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to register PCI driver (%d)", error));
++
++ goto init_failed;
++ }
++#endif
++
++#else
++
++ if ((eError = SysInitialise()) != PVRSRV_OK)
++ {
++ error = -ENODEV;
++#if defined(TCF_REV) && (TCF_REV == 110)
++ if(eError == PVRSRV_ERROR_NOT_SUPPORTED)
++ {
++ printk("\nAtlas wrapper (FPGA image) version mismatch");
++ error = -ENODEV;
++ }
++#endif
++ goto init_failed;
++ }
++#endif
++
++#if !defined(SUPPORT_DRI_DRM)
++ AssignedMajorNumber = register_chrdev(0, DEVNAME, &pvrsrv_fops);
++
++ if (AssignedMajorNumber <= 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to get major number"));
++
++ error = -EBUSY;
++ goto sys_deinit;
++ }
++
++ PVR_TRACE(("PVRCore_Init: major device %d", AssignedMajorNumber));
++#endif
++
++#if defined(PVR_LDM_MODULE)
++
++ psPvrClass = class_create(THIS_MODULE, "pvr");
++
++ if (IS_ERR(psPvrClass))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to create class (%ld)", PTR_ERR(psPvrClass)));
++ error = -EBUSY;
++ goto unregister_device;
++ }
++
++ psDev = device_create(psPvrClass, NULL, MKDEV(AssignedMajorNumber, 0),
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26))
++ NULL,
++#endif
++ DEVNAME);
++ if (IS_ERR(psDev))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to create device (%ld)", PTR_ERR(psDev)));
++ error = -EBUSY;
++ goto destroy_class;
++ }
++#endif
++
++ return 0;
++
++#if defined(PVR_LDM_MODULE)
++destroy_class:
++ class_destroy(psPvrClass);
++unregister_device:
++ unregister_chrdev((IMG_UINT)AssignedMajorNumber, DRVNAME);
++#endif
++#if !defined(SUPPORT_DRI_DRM)
++sys_deinit:
++#endif
++#if defined(PVR_LDM_MODULE)
++#if defined(PVR_LDM_PCI_MODULE)
++ pci_unregister_driver(&powervr_driver);
++#endif
++
++#if defined (PVR_LDM_PLATFORM_MODULE)
++#if defined (MODULE)
++ platform_device_unregister(&powervr_device);
++#endif
++ platform_driver_unregister(&powervr_driver);
++#endif
++
++#else
++
++ {
++ SYS_DATA *psSysData;
++
++ SysAcquireData(&psSysData);
++ if (psSysData != IMG_NULL)
++ {
++ SysDeinitialise(psSysData);
++ }
++ }
++#endif
++init_failed:
++ PVRMMapCleanup();
++ LinuxMMCleanup();
++ LinuxBridgeDeInit();
++ PVROSFuncDeInit();
++ RemoveProcEntries();
++
++ return error;
++
++}
++
++
++#if defined(SUPPORT_DRI_DRM)
++IMG_VOID PVRCore_Cleanup(IMG_VOID)
++#else
++static IMG_VOID __exit PVRCore_Cleanup(IMG_VOID)
++#endif
++{
++ SYS_DATA *psSysData;
++
++ PVR_TRACE(("PVRCore_Cleanup"));
++
++ SysAcquireData(&psSysData);
++
++#if defined(PVR_LDM_MODULE)
++ device_destroy(psPvrClass, MKDEV(AssignedMajorNumber, 0));
++ class_destroy(psPvrClass);
++#endif
++
++#if !defined(SUPPORT_DRI_DRM)
++#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,22))
++ if (
++#endif
++ unregister_chrdev((IMG_UINT)AssignedMajorNumber, DRVNAME)
++#if !(LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,22))
++ ;
++#else
++ )
++ {
++ PVR_DPF((PVR_DBG_ERROR," can't unregister device major %d", AssignedMajorNumber));
++ }
++#endif
++#endif
++
++#if defined(PVR_LDM_MODULE)
++
++#if defined(PVR_LDM_PCI_MODULE)
++ pci_unregister_driver(&powervr_driver);
++#endif
++
++#if defined (PVR_LDM_PLATFORM_MODULE)
++#if defined (MODULE)
++ platform_device_unregister(&powervr_device);
++#endif
++ platform_driver_unregister(&powervr_driver);
++#endif
++
++#else
++#if defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL)
++ if (gPVRPowerLevel != 0)
++ {
++ if (PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D0) == PVRSRV_OK)
++ {
++ gPVRPowerLevel = 0;
++ }
++ }
++#endif
++
++ SysDeinitialise(psSysData);
++#endif
++
++ PVRMMapCleanup();
++
++ LinuxMMCleanup();
++
++ LinuxBridgeDeInit();
++
++ PVROSFuncDeInit();
++
++ RemoveProcEntries();
++
++ PVR_TRACE(("PVRCore_Cleanup: unloading"));
++}
++
++#if !defined(SUPPORT_DRI_DRM)
++module_init(PVRCore_Init);
++module_exit(PVRCore_Cleanup);
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/env/linux/mutex.c
+@@ -0,0 +1,131 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <linux/version.h>
++#include <linux/errno.h>
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
++#include <linux/mutex.h>
++#else
++#include <asm/semaphore.h>
++#endif
++#include <linux/module.h>
++
++#include <img_defs.h>
++#include <services.h>
++
++#include "mutex.h"
++
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
++
++IMG_VOID LinuxLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++ mutex_lock(psPVRSRVMutex);
++}
++
++PVRSRV_ERROR LinuxLockMutexInterruptible(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++ if(mutex_lock_interruptible(psPVRSRVMutex) == -EINTR)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ else
++ {
++ return PVRSRV_OK;
++ }
++}
++
++IMG_INT32 LinuxTryLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++ return mutex_trylock(psPVRSRVMutex);
++}
++
++IMG_VOID LinuxUnLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++ mutex_unlock(psPVRSRVMutex);
++}
++
++IMG_BOOL LinuxIsLockedMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++ return (IMG_BOOL)mutex_is_locked(psPVRSRVMutex);
++}
++
++
++#else
++
++
++IMG_VOID LinuxInitMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++ init_MUTEX(&psPVRSRVMutex->sSemaphore);
++ atomic_set(&psPVRSRVMutex->Count, 0);
++}
++
++IMG_VOID LinuxLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++ down(&psPVRSRVMutex->sSemaphore);
++ atomic_dec(&psPVRSRVMutex->Count);
++}
++
++PVRSRV_ERROR LinuxLockMutexInterruptible(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++ if(down_interruptible(&psPVRSRVMutex->sSemaphore) == -EINTR)
++ {
++
++ return PVRSRV_ERROR_GENERIC;
++ }else{
++ atomic_dec(&psPVRSRVMutex->Count);
++ return PVRSRV_OK;
++ }
++}
++
++IMG_INT32 LinuxTryLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++ IMG_INT32 Status = down_trylock(&psPVRSRVMutex->sSemaphore);
++ if(Status == 0)
++ {
++ atomic_dec(&psPVRSRVMutex->Count);
++ }
++
++ return Status;
++}
++
++IMG_VOID LinuxUnLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++ atomic_inc(&psPVRSRVMutex->Count);
++ up(&psPVRSRVMutex->sSemaphore);
++}
++
++IMG_BOOL LinuxIsLockedMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++ IMG_INT32 iCount;
++
++ iCount = atomic_read(&psPVRSRVMutex->Count);
++
++ return (IMG_BOOL)iCount;
++}
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/env/linux/mutex.h
+@@ -0,0 +1,77 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __INCLUDED_LINUX_MUTEX_H_
++#define __INCLUDED_LINUX_MUTEX_H_
++
++#include <linux/version.h>
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
++#include <linux/mutex.h>
++#else
++#include <asm/semaphore.h>
++#endif
++
++
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
++
++typedef struct mutex PVRSRV_LINUX_MUTEX;
++
++#else
++
++
++typedef struct {
++ struct semaphore sSemaphore;
++
++ atomic_t Count;
++}PVRSRV_LINUX_MUTEX;
++
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
++
++#define LinuxInitMutex(psPVRSRVMutex) \
++do { \
++ mutex_init(psPVRSRVMutex); \
++} while(0)
++#else
++extern IMG_VOID LinuxInitMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex);
++#endif
++
++extern IMG_VOID LinuxLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex);
++
++extern PVRSRV_ERROR LinuxLockMutexInterruptible(PVRSRV_LINUX_MUTEX *psPVRSRVMutex);
++
++extern IMG_INT32 LinuxTryLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex);
++
++extern IMG_VOID LinuxUnLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex);
++
++extern IMG_BOOL LinuxIsLockedMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex);
++
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/env/linux/mutils.c
+@@ -0,0 +1,133 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef AUTOCONF_INCLUDED
++#include <linux/config.h>
++#endif
++#include <linux/version.h>
++
++#include <linux/spinlock.h>
++#include <linux/mm.h>
++#include <asm/page.h>
++#include <asm/pgtable.h>
++
++#include "img_defs.h"
++#include "pvr_debug.h"
++#include "mutils.h"
++
++#if defined(SUPPORT_LINUX_X86_PAT)
++#define PAT_LINUX_X86_WC 1
++
++#define PAT_X86_ENTRY_BITS 8
++
++#define PAT_X86_BIT_PWT 1U
++#define PAT_X86_BIT_PCD 2U
++#define PAT_X86_BIT_PAT 4U
++#define PAT_X86_BIT_MASK (PAT_X86_BIT_PAT | PAT_X86_BIT_PCD | PAT_X86_BIT_PWT)
++
++static IMG_BOOL g_write_combining_available = IMG_FALSE;
++
++#define PROT_TO_PAT_INDEX(v, B) ((v & _PAGE_ ## B) ? PAT_X86_BIT_ ## B : 0)
++
++static inline IMG_UINT
++pvr_pat_index(pgprotval_t prot_val)
++{
++ IMG_UINT ret = 0;
++ pgprotval_t val = prot_val & _PAGE_CACHE_MASK;
++
++ ret |= PROT_TO_PAT_INDEX(val, PAT);
++ ret |= PROT_TO_PAT_INDEX(val, PCD);
++ ret |= PROT_TO_PAT_INDEX(val, PWT);
++
++ return ret;
++}
++
++static inline IMG_UINT
++pvr_pat_entry(u64 pat, IMG_UINT index)
++{
++ return (IMG_UINT)(pat >> (index * PAT_X86_ENTRY_BITS)) & PAT_X86_BIT_MASK;
++}
++
++static IMG_VOID
++PVRLinuxX86PATProbe(IMG_VOID)
++{
++
++ if (cpu_has_pat)
++ {
++ u64 pat;
++ IMG_UINT pat_index;
++ IMG_UINT pat_entry;
++
++ PVR_TRACE(("%s: PAT available", __FUNCTION__));
++
++ rdmsrl(MSR_IA32_CR_PAT, pat);
++ PVR_TRACE(("%s: Top 32 bits of PAT: 0x%.8x", __FUNCTION__, (IMG_UINT)(pat >> 32)));
++ PVR_TRACE(("%s: Bottom 32 bits of PAT: 0x%.8x", __FUNCTION__, (IMG_UINT)(pat)));
++
++ pat_index = pvr_pat_index(_PAGE_CACHE_WC);
++ PVR_TRACE(("%s: PAT index for write combining: %u", __FUNCTION__, pat_index));
++
++ pat_entry = pvr_pat_entry(pat, pat_index);
++ PVR_TRACE(("%s: PAT entry for write combining: 0x%.2x (should be 0x%.2x)", __FUNCTION__, pat_entry, PAT_LINUX_X86_WC));
++
++#if defined(SUPPORT_LINUX_X86_WRITECOMBINE)
++ g_write_combining_available = (IMG_BOOL)(pat_entry == PAT_LINUX_X86_WC);
++#endif
++ }
++#if defined(DEBUG)
++#if defined(SUPPORT_LINUX_X86_WRITECOMBINE)
++ if (g_write_combining_available)
++ {
++ PVR_TRACE(("%s: Write combining available via PAT", __FUNCTION__));
++ }
++ else
++ {
++ PVR_TRACE(("%s: Write combining not available", __FUNCTION__));
++ }
++#else
++ PVR_TRACE(("%s: Write combining disabled in driver build", __FUNCTION__));
++#endif
++#endif
++}
++
++pgprot_t
++pvr_pgprot_writecombine(pgprot_t prot)
++{
++
++
++ return (g_write_combining_available) ?
++ __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_MASK) | _PAGE_CACHE_WC) : pgprot_noncached(prot);
++}
++#endif
++
++IMG_VOID
++PVRLinuxMUtilsInit(IMG_VOID)
++{
++#if defined(SUPPORT_LINUX_X86_PAT)
++ PVRLinuxX86PATProbe();
++#endif
++}
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/env/linux/mutils.h
+@@ -0,0 +1,101 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __IMG_LINUX_MUTILS_H__
++#define __IMG_LINUX_MUTILS_H__
++
++#ifndef AUTOCONF_INCLUDED
++#include <linux/config.h>
++#endif
++
++#include <linux/version.h>
++
++#if !(defined(__i386__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)))
++#if defined(SUPPORT_LINUX_X86_PAT)
++#undef SUPPORT_LINUX_X86_PAT
++#endif
++#endif
++
++#if defined(SUPPORT_LINUX_X86_PAT)
++ pgprot_t pvr_pgprot_writecombine(pgprot_t prot);
++ #define PGPROT_WC(pv) pvr_pgprot_writecombine(pv)
++#else
++ #if defined(__arm__) || defined(__sh__)
++ #define PGPROT_WC(pv) pgprot_writecombine(pv)
++ #else
++ #if defined(__i386__)
++ #define PGPROT_WC(pv) pgprot_noncached(pv)
++ #else
++ #define PGPROT_WC(pv) pgprot_noncached(pv)
++ #error Unsupported architecture!
++ #endif
++ #endif
++#endif
++
++#define PGPROT_UC(pv) pgprot_noncached(pv)
++
++#if defined(__i386__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))
++ #define IOREMAP(pa, bytes) ioremap_cache(pa, bytes)
++#else
++ #if defined(__arm__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0))
++ #define IOREMAP(pa, bytes) ioremap_cached(pa, bytes)
++ #else
++ #define IOREMAP(pa, bytes) ioremap(pa, bytes)
++ #endif
++#endif
++
++#if defined(SUPPORT_LINUX_X86_PAT)
++ #if defined(SUPPORT_LINUX_X86_WRITECOMBINE)
++ #define IOREMAP_WC(pa, bytes) ioremap_wc(pa, bytes)
++ #else
++ #define IOREMAP_WC(pa, bytes) ioremap_nocache(pa, bytes)
++ #endif
++#else
++ #if defined(__arm__)
++ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27))
++ #define IOREMAP_WC(pa, bytes) ioremap_wc(pa, bytes)
++ #else
++ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22))
++ #define IOREMAP_WC(pa, bytes) ioremap_nocache(pa, bytes)
++ #else
++ #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)) || (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17))
++ #define IOREMAP_WC(pa, bytes) __ioremap(pa, bytes, L_PTE_BUFFERABLE)
++ #else
++ #define IOREMAP_WC(pa, bytes) __ioremap(pa, bytes, , L_PTE_BUFFERABLE, 1)
++ #endif
++ #endif
++ #endif
++ #else
++ #define IOREMAP_WC(pa, bytes) ioremap_nocache(pa, bytes)
++ #endif
++#endif
++
++#define IOREMAP_UC(pa, bytes) ioremap_nocache(pa, bytes)
++
++IMG_VOID PVRLinuxMUtilsInit(IMG_VOID);
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/env/linux/osfunc.c
+@@ -0,0 +1,2564 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef AUTOCONF_INCLUDED
++ #include <linux/config.h>
++#endif
++
++#include <linux/version.h>
++#include <asm/io.h>
++#include <asm/page.h>
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22))
++#include <asm/system.h>
++#endif
++#if defined(SUPPORT_CPU_CACHED_BUFFERS)
++#include <asm/cacheflush.h>
++#endif
++#include <linux/mm.h>
++#include <linux/pagemap.h>
++#include <linux/hugetlb.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/delay.h>
++#include <linux/pci.h>
++
++#include <linux/string.h>
++#include <linux/sched.h>
++#include <linux/interrupt.h>
++#include <asm/hardirq.h>
++#include <linux/timer.h>
++#include <linux/capability.h>
++#include <asm/uaccess.h>
++#include <linux/spinlock.h>
++#if defined(PVR_LINUX_MISR_USING_WORKQUEUE) || \
++ defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE) || \
++ defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || \
++ defined(PVR_LINUX_USING_WORKQUEUES)
++#include <linux/workqueue.h>
++#endif
++
++#include "img_types.h"
++#include "services_headers.h"
++#include "mm.h"
++#include "pvrmmap.h"
++#include "mmap.h"
++#include "env_data.h"
++#include "proc.h"
++#include "mutex.h"
++#include "event.h"
++#include "linkage.h"
++
++#define EVENT_OBJECT_TIMEOUT_MS (100)
++
++#if defined(SUPPORT_CPU_CACHED_BUFFERS) || \
++ defined(SUPPORT_CACHEFLUSH_ON_ALLOC)
++
++#if defined(__i386__)
++static void per_cpu_cache_flush(void *arg)
++{
++ PVR_UNREFERENCED_PARAMETER(arg);
++ wbinvd();
++}
++#endif
++
++#if !defined(SUPPORT_CPU_CACHED_BUFFERS)
++static
++#endif
++IMG_VOID OSFlushCPUCacheKM(IMG_VOID)
++{
++#if defined(__arm__)
++ flush_cache_all();
++#elif defined(__i386__)
++
++ on_each_cpu(per_cpu_cache_flush, NULL, 1);
++#else
++#error "Implement full CPU cache flush for this CPU!"
++#endif
++}
++
++#endif
++#if defined(SUPPORT_CPU_CACHED_BUFFERS)
++
++IMG_VOID OSFlushCPUCacheRangeKM(IMG_VOID *pvRangeAddrStart,
++ IMG_VOID *pvRangeAddrEnd)
++{
++ PVR_UNREFERENCED_PARAMETER(pvRangeAddrStart);
++ PVR_UNREFERENCED_PARAMETER(pvRangeAddrEnd);
++
++
++ OSFlushCPUCacheKM();
++}
++
++#endif
++
++#define HOST_ALLOC_MEM_USING_KMALLOC ((IMG_HANDLE)0)
++#define HOST_ALLOC_MEM_USING_VMALLOC ((IMG_HANDLE)1)
++
++#if !defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++PVRSRV_ERROR OSAllocMem_Impl(IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, IMG_PVOID *ppvCpuVAddr, IMG_HANDLE *phBlockAlloc)
++#else
++PVRSRV_ERROR OSAllocMem_Impl(IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, IMG_PVOID *ppvCpuVAddr, IMG_HANDLE *phBlockAlloc, IMG_CHAR *pszFilename, IMG_UINT32 ui32Line)
++#endif
++{
++ PVR_UNREFERENCED_PARAMETER(ui32Flags);
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ *ppvCpuVAddr = _KMallocWrapper(ui32Size, pszFilename, ui32Line);
++#else
++ *ppvCpuVAddr = KMallocWrapper(ui32Size);
++#endif
++ if(*ppvCpuVAddr)
++ {
++ if (phBlockAlloc)
++ {
++
++ *phBlockAlloc = HOST_ALLOC_MEM_USING_KMALLOC;
++ }
++ }
++ else
++ {
++ if (!phBlockAlloc)
++ {
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ *ppvCpuVAddr = _VMallocWrapper(ui32Size, PVRSRV_HAP_CACHED, pszFilename, ui32Line);
++#else
++ *ppvCpuVAddr = VMallocWrapper(ui32Size, PVRSRV_HAP_CACHED);
++#endif
++ if (!*ppvCpuVAddr)
++ {
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++
++ *phBlockAlloc = HOST_ALLOC_MEM_USING_VMALLOC;
++ }
++
++ return PVRSRV_OK;
++}
++
++
++#if !defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++PVRSRV_ERROR OSFreeMem_Impl(IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, IMG_PVOID pvCpuVAddr, IMG_HANDLE hBlockAlloc)
++#else
++PVRSRV_ERROR OSFreeMem_Impl(IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, IMG_PVOID pvCpuVAddr, IMG_HANDLE hBlockAlloc, IMG_CHAR *pszFilename, IMG_UINT32 ui32Line)
++#endif
++{
++ PVR_UNREFERENCED_PARAMETER(ui32Flags);
++ PVR_UNREFERENCED_PARAMETER(ui32Size);
++
++ if (hBlockAlloc == HOST_ALLOC_MEM_USING_VMALLOC)
++ {
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ _VFreeWrapper(pvCpuVAddr, pszFilename, ui32Line);
++#else
++ VFreeWrapper(pvCpuVAddr);
++#endif
++ }
++ else
++ {
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ _KFreeWrapper(pvCpuVAddr, pszFilename, ui32Line);
++#else
++ KFreeWrapper(pvCpuVAddr);
++#endif
++ }
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR
++OSAllocPages_Impl(IMG_UINT32 ui32AllocFlags,
++ IMG_UINT32 ui32Size,
++ IMG_UINT32 ui32PageSize,
++ IMG_VOID **ppvCpuVAddr,
++ IMG_HANDLE *phOSMemHandle)
++{
++ LinuxMemArea *psLinuxMemArea;
++
++ PVR_UNREFERENCED_PARAMETER(ui32PageSize);
++
++#if 0
++
++ if(ui32AllocFlags & PVRSRV_HAP_SINGLE_PROCESS)
++ {
++ ui32AllocFlags &= ~PVRSRV_HAP_SINGLE_PROCESS;
++ ui32AllocFlags |= PVRSRV_HAP_MULTI_PROCESS;
++ }
++#endif
++
++ switch(ui32AllocFlags & PVRSRV_HAP_MAPTYPE_MASK)
++ {
++ case PVRSRV_HAP_KERNEL_ONLY:
++ {
++ psLinuxMemArea = NewVMallocLinuxMemArea(ui32Size, ui32AllocFlags);
++ if(!psLinuxMemArea)
++ {
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ break;
++ }
++ case PVRSRV_HAP_SINGLE_PROCESS:
++ {
++
++
++ psLinuxMemArea = NewAllocPagesLinuxMemArea(ui32Size, ui32AllocFlags);
++ if(!psLinuxMemArea)
++ {
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ PVRMMapRegisterArea(psLinuxMemArea);
++ break;
++ }
++
++ case PVRSRV_HAP_MULTI_PROCESS:
++ {
++
++#if defined(VIVT_CACHE) || defined(__sh__)
++
++ ui32AllocFlags &= ~PVRSRV_HAP_CACHED;
++#endif
++ psLinuxMemArea = NewVMallocLinuxMemArea(ui32Size, ui32AllocFlags);
++ if(!psLinuxMemArea)
++ {
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ PVRMMapRegisterArea(psLinuxMemArea);
++ break;
++ }
++ default:
++ PVR_DPF((PVR_DBG_ERROR, "OSAllocPages: invalid flags 0x%x\n", ui32AllocFlags));
++ *ppvCpuVAddr = NULL;
++ *phOSMemHandle = (IMG_HANDLE)0;
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++#if defined(SUPPORT_CACHEFLUSH_ON_ALLOC)
++
++ if(ui32AllocFlags & (PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_UNCACHED))
++ {
++ OSFlushCPUCacheKM();
++ }
++#endif
++
++ *ppvCpuVAddr = LinuxMemAreaToCpuVAddr(psLinuxMemArea);
++ *phOSMemHandle = psLinuxMemArea;
++
++ LinuxMemAreaRegister(psLinuxMemArea);
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR
++OSFreePages(IMG_UINT32 ui32AllocFlags, IMG_UINT32 ui32Bytes, IMG_VOID *pvCpuVAddr, IMG_HANDLE hOSMemHandle)
++{
++ LinuxMemArea *psLinuxMemArea;
++ PVR_UNREFERENCED_PARAMETER(ui32Bytes);
++ PVR_UNREFERENCED_PARAMETER(pvCpuVAddr);
++
++ psLinuxMemArea = (LinuxMemArea *)hOSMemHandle;
++
++ switch(ui32AllocFlags & PVRSRV_HAP_MAPTYPE_MASK)
++ {
++ case PVRSRV_HAP_KERNEL_ONLY:
++ break;
++ case PVRSRV_HAP_SINGLE_PROCESS:
++ case PVRSRV_HAP_MULTI_PROCESS:
++ if(PVRMMapRemoveRegisteredArea(psLinuxMemArea) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSFreePages(ui32AllocFlags=0x%08X, ui32Bytes=%ld, "
++ "pvCpuVAddr=%p, hOSMemHandle=%p) FAILED!",
++ ui32AllocFlags, ui32Bytes, pvCpuVAddr, hOSMemHandle));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ break;
++ default:
++ PVR_DPF((PVR_DBG_ERROR,"%s: invalid flags 0x%x\n",
++ __FUNCTION__, ui32AllocFlags));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ LinuxMemAreaDeepFree(psLinuxMemArea);
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR
++OSGetSubMemHandle(IMG_HANDLE hOSMemHandle,
++ IMG_UINT32 ui32ByteOffset,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE *phOSMemHandleRet)
++{
++ LinuxMemArea *psParentLinuxMemArea, *psLinuxMemArea;
++ PVRSRV_ERROR eError;
++
++ psParentLinuxMemArea = (LinuxMemArea *)hOSMemHandle;
++
++ psLinuxMemArea = NewSubLinuxMemArea(psParentLinuxMemArea, ui32ByteOffset, ui32Bytes);
++ if(!psLinuxMemArea)
++ {
++ *phOSMemHandleRet = NULL;
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ *phOSMemHandleRet = psLinuxMemArea;
++
++
++ if(ui32Flags & PVRSRV_HAP_KERNEL_ONLY)
++ {
++ return PVRSRV_OK;
++ }
++
++ eError = PVRMMapRegisterArea(psLinuxMemArea);
++ if(eError != PVRSRV_OK)
++ {
++ goto failed_register_area;
++ }
++
++ return PVRSRV_OK;
++
++failed_register_area:
++ *phOSMemHandleRet = NULL;
++ LinuxMemAreaDeepFree(psLinuxMemArea);
++ return eError;
++}
++
++PVRSRV_ERROR
++OSReleaseSubMemHandle(IMG_VOID *hOSMemHandle, IMG_UINT32 ui32Flags)
++{
++ LinuxMemArea *psLinuxMemArea;
++ PVRSRV_ERROR eError;
++
++ psLinuxMemArea = (LinuxMemArea *)hOSMemHandle;
++ PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_SUB_ALLOC);
++
++ if((ui32Flags & PVRSRV_HAP_KERNEL_ONLY) == 0)
++ {
++ eError = PVRMMapRemoveRegisteredArea(psLinuxMemArea);
++ if(eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++ }
++ LinuxMemAreaDeepFree(psLinuxMemArea);
++
++ return PVRSRV_OK;
++}
++
++
++IMG_CPU_PHYADDR
++OSMemHandleToCpuPAddr(IMG_VOID *hOSMemHandle, IMG_UINT32 ui32ByteOffset)
++{
++ PVR_ASSERT(hOSMemHandle);
++
++ return LinuxMemAreaToCpuPAddr(hOSMemHandle, ui32ByteOffset);
++}
++
++
++
++IMG_VOID OSMemCopy(IMG_VOID *pvDst, IMG_VOID *pvSrc, IMG_UINT32 ui32Size)
++{
++#if defined(USE_UNOPTIMISED_MEMCPY)
++ IMG_UINT8 *Src,*Dst;
++ IMG_INT i;
++
++ Src=(IMG_UINT8 *)pvSrc;
++ Dst=(IMG_UINT8 *)pvDst;
++ for(i=0;i<ui32Size;i++)
++ {
++ Dst[i]=Src[i];
++ }
++#else
++ memcpy(pvDst, pvSrc, ui32Size);
++#endif
++}
++
++
++IMG_VOID OSMemSet(IMG_VOID *pvDest, IMG_UINT8 ui8Value, IMG_UINT32 ui32Size)
++{
++#if defined(USE_UNOPTIMISED_MEMSET)
++ IMG_UINT8 *Buff;
++ IMG_INT i;
++
++ Buff=(IMG_UINT8 *)pvDest;
++ for(i=0;i<ui32Size;i++)
++ {
++ Buff[i]=ui8Value;
++ }
++#else
++ memset(pvDest, (IMG_INT) ui8Value, (size_t) ui32Size);
++#endif
++}
++
++
++IMG_CHAR *OSStringCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc)
++{
++ return (strcpy(pszDest, pszSrc));
++}
++
++IMG_INT32 OSSNPrintf(IMG_CHAR *pStr, IMG_UINT32 ui32Size, const IMG_CHAR *pszFormat, ...)
++{
++ va_list argList;
++ IMG_INT32 iCount;
++
++ va_start(argList, pszFormat);
++ iCount = vsnprintf(pStr, (size_t)ui32Size, pszFormat, argList);
++ va_end(argList);
++
++ return iCount;
++}
++
++IMG_VOID OSBreakResourceLock (PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID)
++{
++ volatile IMG_UINT32 *pui32Access = (volatile IMG_UINT32 *)&psResource->ui32Lock;
++
++ if(*pui32Access)
++ {
++ if(psResource->ui32ID == ui32ID)
++ {
++ psResource->ui32ID = 0;
++ *pui32Access = 0;
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_MESSAGE,"OSBreakResourceLock: Resource is not locked for this process."));
++ }
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_MESSAGE,"OSBreakResourceLock: Resource is not locked"));
++ }
++}
++
++
++PVRSRV_ERROR OSCreateResource(PVRSRV_RESOURCE *psResource)
++{
++ psResource->ui32ID = 0;
++ psResource->ui32Lock = 0;
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSDestroyResource (PVRSRV_RESOURCE *psResource)
++{
++ OSBreakResourceLock (psResource, psResource->ui32ID);
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSInitEnvData(IMG_PVOID *ppvEnvSpecificData)
++{
++ ENV_DATA *psEnvData;
++
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(ENV_DATA), (IMG_VOID **)&psEnvData, IMG_NULL,
++ "Environment Data") != PVRSRV_OK)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, PVRSRV_MAX_BRIDGE_IN_SIZE + PVRSRV_MAX_BRIDGE_OUT_SIZE,
++ &psEnvData->pvBridgeData, IMG_NULL,
++ "Bridge Data") != PVRSRV_OK)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(ENV_DATA), psEnvData, IMG_NULL);
++
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++
++
++ psEnvData->bMISRInstalled = IMG_FALSE;
++ psEnvData->bLISRInstalled = IMG_FALSE;
++
++
++ *ppvEnvSpecificData = psEnvData;
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSDeInitEnvData(IMG_PVOID pvEnvSpecificData)
++{
++ ENV_DATA *psEnvData = (ENV_DATA*)pvEnvSpecificData;
++
++ PVR_ASSERT(!psEnvData->bMISRInstalled);
++ PVR_ASSERT(!psEnvData->bLISRInstalled);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, PVRSRV_MAX_BRIDGE_IN_SIZE + PVRSRV_MAX_BRIDGE_OUT_SIZE, psEnvData->pvBridgeData, IMG_NULL);
++ psEnvData->pvBridgeData = IMG_NULL;
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(ENV_DATA), pvEnvSpecificData, IMG_NULL);
++
++
++ return PVRSRV_OK;
++}
++
++
++
++IMG_VOID OSReleaseThreadQuanta(IMG_VOID)
++{
++ schedule();
++}
++
++
++
++IMG_UINT32 OSClockus(IMG_VOID)
++{
++ IMG_UINT32 time, j = jiffies;
++
++ time = j * (1000000 / HZ);
++
++ return time;
++}
++
++
++
++IMG_VOID OSWaitus(IMG_UINT32 ui32Timeus)
++{
++ udelay(ui32Timeus);
++}
++
++
++IMG_UINT32 OSGetCurrentProcessIDKM(IMG_VOID)
++{
++ if (in_interrupt())
++ {
++ return KERNEL_ID;
++ }
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0))
++ return (IMG_UINT32)current->pgrp;
++#else
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
++ return (IMG_UINT32)task_tgid_nr(current);
++#else
++ return (IMG_UINT32)current->tgid;
++#endif
++#endif
++}
++
++
++IMG_UINT32 OSGetPageSize(IMG_VOID)
++{
++#if defined(__sh__)
++ IMG_UINT32 ui32ReturnValue = PAGE_SIZE;
++
++ return (ui32ReturnValue);
++#else
++ return PAGE_SIZE;
++#endif
++}
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0))
++static irqreturn_t DeviceISRWrapper(int irq, void *dev_id
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
++ , struct pt_regs *regs
++#endif
++ )
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ IMG_BOOL bStatus = IMG_FALSE;
++
++ PVR_UNREFERENCED_PARAMETER(irq);
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
++ PVR_UNREFERENCED_PARAMETER(regs);
++#endif
++ psDeviceNode = (PVRSRV_DEVICE_NODE*)dev_id;
++ if(!psDeviceNode)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "DeviceISRWrapper: invalid params\n"));
++ goto out;
++ }
++
++ bStatus = PVRSRVDeviceLISR(psDeviceNode);
++
++ if (bStatus)
++ {
++ OSScheduleMISR((IMG_VOID *)psDeviceNode->psSysData);
++ }
++
++out:
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0))
++ return bStatus ? IRQ_HANDLED : IRQ_NONE;
++#endif
++}
++
++
++
++static irqreturn_t SystemISRWrapper(int irq, void *dev_id
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
++ , struct pt_regs *regs
++#endif
++ )
++{
++ SYS_DATA *psSysData;
++ IMG_BOOL bStatus = IMG_FALSE;
++
++ PVR_UNREFERENCED_PARAMETER(irq);
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
++ PVR_UNREFERENCED_PARAMETER(regs);
++#endif
++ psSysData = (SYS_DATA *)dev_id;
++ if(!psSysData)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SystemISRWrapper: invalid params\n"));
++ goto out;
++ }
++
++ bStatus = PVRSRVSystemLISR(psSysData);
++
++ if (bStatus)
++ {
++ OSScheduleMISR((IMG_VOID *)psSysData);
++ }
++
++out:
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0))
++ return bStatus ? IRQ_HANDLED : IRQ_NONE;
++#endif
++}
++PVRSRV_ERROR OSInstallDeviceLISR(IMG_VOID *pvSysData,
++ IMG_UINT32 ui32Irq,
++ IMG_CHAR *pszISRName,
++ IMG_VOID *pvDeviceNode)
++{
++ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++ if (psEnvData->bLISRInstalled)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSInstallDeviceLISR: An ISR has already been installed: IRQ %d cookie %x", psEnvData->ui32IRQ, psEnvData->pvISRCookie));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ PVR_TRACE(("Installing device LISR %s on IRQ %d with cookie %x", pszISRName, ui32Irq, pvDeviceNode));
++
++ if(request_irq(ui32Irq, DeviceISRWrapper,
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22))
++ SA_SHIRQ
++#else
++ IRQF_SHARED
++#endif
++ , pszISRName, pvDeviceNode))
++ {
++ PVR_DPF((PVR_DBG_ERROR,"OSInstallDeviceLISR: Couldn't install device LISR on IRQ %d", ui32Irq));
++
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ psEnvData->ui32IRQ = ui32Irq;
++ psEnvData->pvISRCookie = pvDeviceNode;
++ psEnvData->bLISRInstalled = IMG_TRUE;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSUninstallDeviceLISR(IMG_VOID *pvSysData)
++{
++ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++ if (!psEnvData->bLISRInstalled)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSUninstallDeviceLISR: No LISR has been installed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ PVR_TRACE(("Uninstalling device LISR on IRQ %d with cookie %x", psEnvData->ui32IRQ, psEnvData->pvISRCookie));
++
++ free_irq(psEnvData->ui32IRQ, psEnvData->pvISRCookie);
++
++ psEnvData->bLISRInstalled = IMG_FALSE;
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSInstallSystemLISR(IMG_VOID *pvSysData, IMG_UINT32 ui32Irq)
++{
++ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++ if (psEnvData->bLISRInstalled)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSInstallSystemLISR: An LISR has already been installed: IRQ %d cookie %x", psEnvData->ui32IRQ, psEnvData->pvISRCookie));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ PVR_TRACE(("Installing system LISR on IRQ %d with cookie %x", ui32Irq, pvSysData));
++
++ if(request_irq(ui32Irq, SystemISRWrapper,
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22))
++ SA_SHIRQ
++#else
++ IRQF_SHARED
++#endif
++ , "PowerVR", pvSysData))
++ {
++ PVR_DPF((PVR_DBG_ERROR,"OSInstallSystemLISR: Couldn't install system LISR on IRQ %d", ui32Irq));
++
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ psEnvData->ui32IRQ = ui32Irq;
++ psEnvData->pvISRCookie = pvSysData;
++ psEnvData->bLISRInstalled = IMG_TRUE;
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSUninstallSystemLISR(IMG_VOID *pvSysData)
++{
++ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++ if (!psEnvData->bLISRInstalled)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSUninstallSystemLISR: No LISR has been installed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ PVR_TRACE(("Uninstalling system LISR on IRQ %d with cookie %x", psEnvData->ui32IRQ, psEnvData->pvISRCookie));
++
++ free_irq(psEnvData->ui32IRQ, psEnvData->pvISRCookie);
++
++ psEnvData->bLISRInstalled = IMG_FALSE;
++
++ return PVRSRV_OK;
++}
++
++#if defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE)
++static void MISRWrapper(
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
++ void *data
++#else
++ struct work_struct *data
++#endif
++)
++{
++ ENV_DATA *psEnvData = container_of(data, ENV_DATA, sMISRWork);
++ SYS_DATA *psSysData = (SYS_DATA *)psEnvData->pvMISRData;
++
++ PVRSRVMISR(psSysData);
++}
++
++
++PVRSRV_ERROR OSInstallMISR(IMG_VOID *pvSysData)
++{
++ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++ if (psEnvData->bMISRInstalled)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSInstallMISR: An MISR has already been installed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ PVR_TRACE(("Installing MISR with cookie %p", pvSysData));
++
++ psEnvData->psWorkQueue = create_singlethread_workqueue("pvr_workqueue");
++
++ if (psEnvData->psWorkQueue == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSInstallMISR: create_singlethreaded_workqueue failed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ INIT_WORK(&psEnvData->sMISRWork, MISRWrapper
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
++ , (void *)&psEnvData->sMISRWork
++#endif
++ );
++
++ psEnvData->pvMISRData = pvSysData;
++ psEnvData->bMISRInstalled = IMG_TRUE;
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSUninstallMISR(IMG_VOID *pvSysData)
++{
++ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++ if (!psEnvData->bMISRInstalled)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSUninstallMISR: No MISR has been installed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ PVR_TRACE(("Uninstalling MISR"));
++
++ destroy_workqueue(psEnvData->psWorkQueue);
++
++ psEnvData->bMISRInstalled = IMG_FALSE;
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSScheduleMISR(IMG_VOID *pvSysData)
++{
++ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA*)psSysData->pvEnvSpecificData;
++
++ if (psEnvData->bMISRInstalled)
++ {
++ queue_work(psEnvData->psWorkQueue, &psEnvData->sMISRWork);
++ }
++
++ return PVRSRV_OK;
++}
++#else
++#if defined(PVR_LINUX_MISR_USING_WORKQUEUE)
++static void MISRWrapper(
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
++ void *data
++#else
++ struct work_struct *data
++#endif
++)
++{
++ ENV_DATA *psEnvData = container_of(data, ENV_DATA, sMISRWork);
++ SYS_DATA *psSysData = (SYS_DATA *)psEnvData->pvMISRData;
++
++ PVRSRVMISR(psSysData);
++}
++
++
++PVRSRV_ERROR OSInstallMISR(IMG_VOID *pvSysData)
++{
++ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++ if (psEnvData->bMISRInstalled)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSInstallMISR: An MISR has already been installed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ PVR_TRACE(("Installing MISR with cookie %x", pvSysData));
++
++ INIT_WORK(&psEnvData->sMISRWork, MISRWrapper
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
++ , (void *)&psEnvData->sMISRWork
++#endif
++ );
++
++ psEnvData->pvMISRData = pvSysData;
++ psEnvData->bMISRInstalled = IMG_TRUE;
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSUninstallMISR(IMG_VOID *pvSysData)
++{
++ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++ if (!psEnvData->bMISRInstalled)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSUninstallMISR: No MISR has been installed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ PVR_TRACE(("Uninstalling MISR"));
++
++ flush_scheduled_work();
++
++ psEnvData->bMISRInstalled = IMG_FALSE;
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSScheduleMISR(IMG_VOID *pvSysData)
++{
++ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA*)psSysData->pvEnvSpecificData;
++
++ if (psEnvData->bMISRInstalled)
++ {
++ schedule_work(&psEnvData->sMISRWork);
++ }
++
++ return PVRSRV_OK;
++}
++
++#else
++
++
++static void MISRWrapper(unsigned long data)
++{
++ SYS_DATA *psSysData;
++
++ psSysData = (SYS_DATA *)data;
++
++ PVRSRVMISR(psSysData);
++}
++
++
++PVRSRV_ERROR OSInstallMISR(IMG_VOID *pvSysData)
++{
++ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++ if (psEnvData->bMISRInstalled)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSInstallMISR: An MISR has already been installed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ PVR_TRACE(("Installing MISR with cookie %x", pvSysData));
++
++ tasklet_init(&psEnvData->sMISRTasklet, MISRWrapper, (unsigned long)pvSysData);
++
++ psEnvData->bMISRInstalled = IMG_TRUE;
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSUninstallMISR(IMG_VOID *pvSysData)
++{
++ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++ if (!psEnvData->bMISRInstalled)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSUninstallMISR: No MISR has been installed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ PVR_TRACE(("Uninstalling MISR"));
++
++ tasklet_kill(&psEnvData->sMISRTasklet);
++
++ psEnvData->bMISRInstalled = IMG_FALSE;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSScheduleMISR(IMG_VOID *pvSysData)
++{
++ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA*)psSysData->pvEnvSpecificData;
++
++ if (psEnvData->bMISRInstalled)
++ {
++ tasklet_schedule(&psEnvData->sMISRTasklet);
++ }
++
++ return PVRSRV_OK;
++}
++
++#endif
++#endif
++
++#endif
++
++IMG_VOID OSPanic(IMG_VOID)
++{
++ BUG();
++}
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22))
++#define OS_TAS(p) xchg((p), 1)
++#else
++#define OS_TAS(p) tas(p)
++#endif
++PVRSRV_ERROR OSLockResource ( PVRSRV_RESOURCE *psResource,
++ IMG_UINT32 ui32ID)
++
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if(!OS_TAS(&psResource->ui32Lock))
++ psResource->ui32ID = ui32ID;
++ else
++ eError = PVRSRV_ERROR_GENERIC;
++
++ return eError;
++}
++
++
++PVRSRV_ERROR OSUnlockResource (PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID)
++{
++ volatile IMG_UINT32 *pui32Access = (volatile IMG_UINT32 *)&psResource->ui32Lock;
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if(*pui32Access)
++ {
++ if(psResource->ui32ID == ui32ID)
++ {
++ psResource->ui32ID = 0;
++ *pui32Access = 0;
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR,"OSUnlockResource: Resource %p is not locked with expected value.", psResource));
++ PVR_DPF((PVR_DBG_MESSAGE,"Should be %x is actually %x", ui32ID, psResource->ui32ID));
++ eError = PVRSRV_ERROR_GENERIC;
++ }
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR,"OSUnlockResource: Resource %p is not locked", psResource));
++ eError = PVRSRV_ERROR_GENERIC;
++ }
++
++ return eError;
++}
++
++
++IMG_BOOL OSIsResourceLocked (PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID)
++{
++ volatile IMG_UINT32 *pui32Access = (volatile IMG_UINT32 *)&psResource->ui32Lock;
++
++ return (*(volatile IMG_UINT32 *)pui32Access == 1) && (psResource->ui32ID == ui32ID)
++ ? IMG_TRUE
++ : IMG_FALSE;
++}
++
++
++IMG_CPU_PHYADDR OSMapLinToCPUPhys(IMG_VOID *pvLinAddr)
++{
++ IMG_CPU_PHYADDR CpuPAddr;
++
++ CpuPAddr.uiAddr = (IMG_UINTPTR_T)VMallocToPhys(pvLinAddr);
++
++ return CpuPAddr;
++}
++
++
++IMG_VOID *
++OSMapPhysToLin(IMG_CPU_PHYADDR BasePAddr,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32MappingFlags,
++ IMG_HANDLE *phOSMemHandle)
++{
++ if(phOSMemHandle)
++ {
++ *phOSMemHandle = (IMG_HANDLE)0;
++ }
++
++ if(ui32MappingFlags & PVRSRV_HAP_KERNEL_ONLY)
++ {
++ IMG_VOID *pvIORemapCookie;
++ pvIORemapCookie = IORemapWrapper(BasePAddr, ui32Bytes, ui32MappingFlags);
++ if(pvIORemapCookie == IMG_NULL)
++ {
++ return NULL;
++ }
++ return pvIORemapCookie;
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSMapPhysToLin should only be used with PVRSRV_HAP_KERNEL_ONLY "
++ " (Use OSReservePhys otherwise)"));
++ return NULL;
++ }
++}
++
++IMG_BOOL
++OSUnMapPhysToLin(IMG_VOID *pvLinAddr, IMG_UINT32 ui32Bytes, IMG_UINT32 ui32MappingFlags, IMG_HANDLE hPageAlloc)
++{
++ PVR_TRACE(("%s: unmapping %d bytes from 0x%08x", __FUNCTION__, ui32Bytes, pvLinAddr));
++
++ PVR_UNREFERENCED_PARAMETER(hPageAlloc);
++ PVR_UNREFERENCED_PARAMETER(ui32Bytes);
++
++ if(ui32MappingFlags & PVRSRV_HAP_KERNEL_ONLY)
++ {
++ IOUnmapWrapper(pvLinAddr);
++ return IMG_TRUE;
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSUnMapPhysToLin should only be used with PVRSRV_HAP_KERNEL_ONLY "
++ " (Use OSUnReservePhys otherwise)"));
++ return IMG_FALSE;
++ }
++}
++
++static PVRSRV_ERROR
++RegisterExternalMem(IMG_SYS_PHYADDR *pBasePAddr,
++ IMG_VOID *pvCPUVAddr,
++ IMG_UINT32 ui32Bytes,
++ IMG_BOOL bPhysContig,
++ IMG_UINT32 ui32MappingFlags,
++ IMG_HANDLE *phOSMemHandle)
++{
++ LinuxMemArea *psLinuxMemArea;
++
++ switch(ui32MappingFlags & PVRSRV_HAP_MAPTYPE_MASK)
++ {
++ case PVRSRV_HAP_KERNEL_ONLY:
++ {
++ psLinuxMemArea = NewExternalKVLinuxMemArea(pBasePAddr, pvCPUVAddr, ui32Bytes, bPhysContig, ui32MappingFlags);
++
++ if(!psLinuxMemArea)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ break;
++ }
++ case PVRSRV_HAP_SINGLE_PROCESS:
++ {
++ psLinuxMemArea = NewExternalKVLinuxMemArea(pBasePAddr, pvCPUVAddr, ui32Bytes, bPhysContig, ui32MappingFlags);
++
++ if(!psLinuxMemArea)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ PVRMMapRegisterArea(psLinuxMemArea);
++ break;
++ }
++ case PVRSRV_HAP_MULTI_PROCESS:
++ {
++
++#if defined(VIVT_CACHE) || defined(__sh__)
++
++ ui32MappingFlags &= ~PVRSRV_HAP_CACHED;
++#endif
++ psLinuxMemArea = NewExternalKVLinuxMemArea(pBasePAddr, pvCPUVAddr, ui32Bytes, bPhysContig, ui32MappingFlags);
++
++ if(!psLinuxMemArea)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ PVRMMapRegisterArea(psLinuxMemArea);
++ break;
++ }
++ default:
++ PVR_DPF((PVR_DBG_ERROR,"OSRegisterMem : invalid flags 0x%x\n", ui32MappingFlags));
++ *phOSMemHandle = (IMG_HANDLE)0;
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ *phOSMemHandle = (IMG_HANDLE)psLinuxMemArea;
++
++ LinuxMemAreaRegister(psLinuxMemArea);
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR
++OSRegisterMem(IMG_CPU_PHYADDR BasePAddr,
++ IMG_VOID *pvCPUVAddr,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32MappingFlags,
++ IMG_HANDLE *phOSMemHandle)
++{
++ IMG_SYS_PHYADDR SysPAddr = SysCpuPAddrToSysPAddr(BasePAddr);
++
++ return RegisterExternalMem(&SysPAddr, pvCPUVAddr, ui32Bytes, IMG_TRUE, ui32MappingFlags, phOSMemHandle);
++}
++
++
++PVRSRV_ERROR OSRegisterDiscontigMem(IMG_SYS_PHYADDR *pBasePAddr, IMG_VOID *pvCPUVAddr, IMG_UINT32 ui32Bytes, IMG_UINT32 ui32MappingFlags, IMG_HANDLE *phOSMemHandle)
++{
++ return RegisterExternalMem(pBasePAddr, pvCPUVAddr, ui32Bytes, IMG_FALSE, ui32MappingFlags, phOSMemHandle);
++}
++
++
++PVRSRV_ERROR
++OSUnRegisterMem (IMG_VOID *pvCpuVAddr,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32MappingFlags,
++ IMG_HANDLE hOSMemHandle)
++{
++ LinuxMemArea *psLinuxMemArea = (LinuxMemArea *)hOSMemHandle;
++
++ PVR_UNREFERENCED_PARAMETER(pvCpuVAddr);
++ PVR_UNREFERENCED_PARAMETER(ui32Bytes);
++
++ switch(ui32MappingFlags & PVRSRV_HAP_MAPTYPE_MASK)
++ {
++ case PVRSRV_HAP_KERNEL_ONLY:
++ break;
++ case PVRSRV_HAP_SINGLE_PROCESS:
++ case PVRSRV_HAP_MULTI_PROCESS:
++ {
++ if(PVRMMapRemoveRegisteredArea(psLinuxMemArea) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s(%p, %d, 0x%08X, %p) FAILED!",
++ __FUNCTION__, pvCpuVAddr, ui32Bytes,
++ ui32MappingFlags, hOSMemHandle));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ break;
++ }
++ default:
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSUnRegisterMem : invalid flags 0x%x", ui32MappingFlags));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++ }
++
++ LinuxMemAreaDeepFree(psLinuxMemArea);
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSUnRegisterDiscontigMem(IMG_VOID *pvCpuVAddr, IMG_UINT32 ui32Bytes, IMG_UINT32 ui32Flags, IMG_HANDLE hOSMemHandle)
++{
++ return OSUnRegisterMem(pvCpuVAddr, ui32Bytes, ui32Flags, hOSMemHandle);
++}
++
++PVRSRV_ERROR
++OSReservePhys(IMG_CPU_PHYADDR BasePAddr,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32MappingFlags,
++ IMG_VOID **ppvCpuVAddr,
++ IMG_HANDLE *phOSMemHandle)
++{
++ LinuxMemArea *psLinuxMemArea;
++
++#if 0
++
++ if(ui32MappingFlags & PVRSRV_HAP_SINGLE_PROCESS)
++ {
++ ui32MappingFlags &= ~PVRSRV_HAP_SINGLE_PROCESS;
++ ui32MappingFlags |= PVRSRV_HAP_MULTI_PROCESS;
++ }
++#endif
++
++ switch(ui32MappingFlags & PVRSRV_HAP_MAPTYPE_MASK)
++ {
++ case PVRSRV_HAP_KERNEL_ONLY:
++ {
++
++ psLinuxMemArea = NewIORemapLinuxMemArea(BasePAddr, ui32Bytes, ui32MappingFlags);
++ if(!psLinuxMemArea)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ break;
++ }
++ case PVRSRV_HAP_SINGLE_PROCESS:
++ {
++
++ psLinuxMemArea = NewIOLinuxMemArea(BasePAddr, ui32Bytes, ui32MappingFlags);
++ if(!psLinuxMemArea)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ PVRMMapRegisterArea(psLinuxMemArea);
++ break;
++ }
++ case PVRSRV_HAP_MULTI_PROCESS:
++ {
++
++#if defined(VIVT_CACHE) || defined(__sh__)
++
++ ui32MappingFlags &= ~PVRSRV_HAP_CACHED;
++#endif
++ psLinuxMemArea = NewIORemapLinuxMemArea(BasePAddr, ui32Bytes, ui32MappingFlags);
++ if(!psLinuxMemArea)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ PVRMMapRegisterArea(psLinuxMemArea);
++ break;
++ }
++ default:
++ PVR_DPF((PVR_DBG_ERROR,"OSMapPhysToLin : invalid flags 0x%x\n", ui32MappingFlags));
++ *ppvCpuVAddr = NULL;
++ *phOSMemHandle = (IMG_HANDLE)0;
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ *phOSMemHandle = (IMG_HANDLE)psLinuxMemArea;
++ *ppvCpuVAddr = LinuxMemAreaToCpuVAddr(psLinuxMemArea);
++
++ LinuxMemAreaRegister(psLinuxMemArea);
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR
++OSUnReservePhys(IMG_VOID *pvCpuVAddr,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32MappingFlags,
++ IMG_HANDLE hOSMemHandle)
++{
++ LinuxMemArea *psLinuxMemArea;
++
++ PVR_UNREFERENCED_PARAMETER(pvCpuVAddr);
++ PVR_UNREFERENCED_PARAMETER(ui32Bytes);
++
++ psLinuxMemArea = (LinuxMemArea *)hOSMemHandle;
++
++ switch(ui32MappingFlags & PVRSRV_HAP_MAPTYPE_MASK)
++ {
++ case PVRSRV_HAP_KERNEL_ONLY:
++ break;
++ case PVRSRV_HAP_SINGLE_PROCESS:
++ case PVRSRV_HAP_MULTI_PROCESS:
++ {
++ if(PVRMMapRemoveRegisteredArea(psLinuxMemArea) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s(%p, %d, 0x%08X, %p) FAILED!",
++ __FUNCTION__, pvCpuVAddr, ui32Bytes,
++ ui32MappingFlags, hOSMemHandle));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ break;
++ }
++ default:
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSUnMapPhysToLin : invalid flags 0x%x", ui32MappingFlags));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++ }
++
++ LinuxMemAreaDeepFree(psLinuxMemArea);
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSBaseAllocContigMemory(IMG_UINT32 ui32Size, IMG_CPU_VIRTADDR *pvLinAddr, IMG_CPU_PHYADDR *psPhysAddr)
++{
++#if !defined(NO_HARDWARE)
++ PVR_UNREFERENCED_PARAMETER(ui32Size);
++ PVR_UNREFERENCED_PARAMETER(pvLinAddr);
++ PVR_UNREFERENCED_PARAMETER(psPhysAddr);
++ PVR_DPF((PVR_DBG_ERROR, "%s: Not available", __FUNCTION__));
++
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++#else
++ IMG_VOID *pvKernLinAddr;
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ pvKernLinAddr = _KMallocWrapper(ui32Size, __FILE__, __LINE__);
++#else
++ pvKernLinAddr = KMallocWrapper(ui32Size);
++#endif
++ if (!pvKernLinAddr)
++ {
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ *pvLinAddr = pvKernLinAddr;
++
++ psPhysAddr->uiAddr = virt_to_phys(pvKernLinAddr);
++
++ return PVRSRV_OK;
++#endif
++}
++
++
++PVRSRV_ERROR OSBaseFreeContigMemory(IMG_UINT32 ui32Size, IMG_CPU_VIRTADDR pvLinAddr, IMG_CPU_PHYADDR psPhysAddr)
++{
++#if !defined(NO_HARDWARE)
++ PVR_UNREFERENCED_PARAMETER(ui32Size);
++ PVR_UNREFERENCED_PARAMETER(pvLinAddr);
++ PVR_UNREFERENCED_PARAMETER(psPhysAddr.uiAddr);
++
++ PVR_DPF((PVR_DBG_WARNING, "%s: Not available", __FUNCTION__));
++#else
++ PVR_UNREFERENCED_PARAMETER(ui32Size);
++ PVR_UNREFERENCED_PARAMETER(psPhysAddr.uiAddr);
++
++ KFreeWrapper(pvLinAddr);
++#endif
++ return PVRSRV_OK;
++}
++
++IMG_UINT32 OSReadHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset)
++{
++#if !defined(NO_HARDWARE)
++ return (IMG_UINT32) readl((IMG_PBYTE)pvLinRegBaseAddr+ui32Offset);
++#else
++ return *(IMG_UINT32 *)((IMG_PBYTE)pvLinRegBaseAddr+ui32Offset);
++#endif
++}
++
++IMG_VOID OSWriteHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value)
++{
++#if !defined(NO_HARDWARE)
++ writel(ui32Value, (IMG_PBYTE)pvLinRegBaseAddr+ui32Offset);
++#else
++ *(IMG_UINT32 *)((IMG_PBYTE)pvLinRegBaseAddr+ui32Offset) = ui32Value;
++#endif
++}
++
++#if defined(CONFIG_PCI) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14))
++
++PVRSRV_PCI_DEV_HANDLE OSPCISetDev(IMG_VOID *pvPCICookie, HOST_PCI_INIT_FLAGS eFlags)
++{
++ int err;
++ IMG_UINT32 i;
++ PVR_PCI_DEV *psPVRPCI;
++
++ PVR_TRACE(("OSPCISetDev"));
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(*psPVRPCI), (IMG_VOID **)&psPVRPCI, IMG_NULL,
++ "PCI Device") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSPCISetDev: Couldn't allocate PVR PCI structure"));
++ return IMG_NULL;
++ }
++
++ psPVRPCI->psPCIDev = (struct pci_dev *)pvPCICookie;
++ psPVRPCI->ePCIFlags = eFlags;
++
++ err = pci_enable_device(psPVRPCI->psPCIDev);
++ if (err != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSPCISetDev: Couldn't enable device (%d)", err));
++ return IMG_NULL;
++ }
++
++ if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER)
++ {
++ pci_set_master(psPVRPCI->psPCIDev);
++ }
++
++ if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_MSI)
++ {
++#if defined(CONFIG_PCI_MSI)
++ if (psPVRPCI->psPCIDev->device == PSB_SYS_SGX_DEV_DEVICE_ID_1 ||
++ psPVRPCI->psPCIDev->device == PSB_SYS_SGX_DEV_DEVICE_ID_2) // Disable MSI for Menlow
++ psPVRPCI->ePCIFlags &= ~HOST_PCI_INIT_FLAG_MSI;
++ else if(!psPVRPCI->psPCIDev->msi_enabled)
++ {
++ err = pci_enable_msi(psPVRPCI->psPCIDev);
++ if (err != 0)
++ {
++ PVR_DPF((PVR_DBG_WARNING, "OSPCISetDev: Couldn't enable MSI (%d)", err));
++ psPVRPCI->ePCIFlags &= ~HOST_PCI_INIT_FLAG_MSI;
++ }
++ }
++#else
++ PVR_DPF((PVR_DBG_WARNING, "OSPCISetDev: MSI support not enabled in the kernel"));
++#endif
++ }
++
++
++ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
++ {
++ psPVRPCI->abPCIResourceInUse[i] = IMG_FALSE;
++ }
++
++ return (PVRSRV_PCI_DEV_HANDLE)psPVRPCI;
++}
++
++PVRSRV_PCI_DEV_HANDLE OSPCIAcquireDev(IMG_UINT16 ui16VendorID, IMG_UINT16 ui16DeviceID, HOST_PCI_INIT_FLAGS eFlags)
++{
++ struct pci_dev *psPCIDev;
++
++ psPCIDev = pci_get_device(ui16VendorID, ui16DeviceID, NULL);
++ if (psPCIDev == NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSPCIAcquireDev: Couldn't acquire device"));
++ return IMG_NULL;
++ }
++
++ return OSPCISetDev((IMG_VOID *)psPCIDev, eFlags);
++}
++
++PVRSRV_ERROR OSPCIIRQ(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 *pui32IRQ)
++{
++ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
++
++ *pui32IRQ = psPVRPCI->psPCIDev->irq;
++
++ return PVRSRV_OK;
++}
++
++enum HOST_PCI_ADDR_RANGE_FUNC
++{
++ HOST_PCI_ADDR_RANGE_FUNC_LEN,
++ HOST_PCI_ADDR_RANGE_FUNC_START,
++ HOST_PCI_ADDR_RANGE_FUNC_END,
++ HOST_PCI_ADDR_RANGE_FUNC_REQUEST,
++ HOST_PCI_ADDR_RANGE_FUNC_RELEASE
++};
++
++static IMG_UINT32 OSPCIAddrRangeFunc(enum HOST_PCI_ADDR_RANGE_FUNC eFunc,
++ PVRSRV_PCI_DEV_HANDLE hPVRPCI,
++ IMG_UINT32 ui32Index)
++{
++ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
++
++ if (ui32Index >= DEVICE_COUNT_RESOURCE)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSPCIAddrRangeFunc: Index out of range"));
++ return 0;
++
++ }
++
++ switch (eFunc)
++ {
++ case HOST_PCI_ADDR_RANGE_FUNC_LEN:
++ return pci_resource_len(psPVRPCI->psPCIDev, ui32Index);
++ case HOST_PCI_ADDR_RANGE_FUNC_START:
++ return pci_resource_start(psPVRPCI->psPCIDev, ui32Index);
++ case HOST_PCI_ADDR_RANGE_FUNC_END:
++ return pci_resource_end(psPVRPCI->psPCIDev, ui32Index);
++ case HOST_PCI_ADDR_RANGE_FUNC_REQUEST:
++ {
++ int err;
++
++ err = pci_request_region(psPVRPCI->psPCIDev, (IMG_INT)ui32Index, "PowerVR");
++ if (err != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSPCIAddrRangeFunc: pci_request_region_failed (%d)", err));
++ return 0;
++ }
++ psPVRPCI->abPCIResourceInUse[ui32Index] = IMG_TRUE;
++ return 1;
++ }
++ case HOST_PCI_ADDR_RANGE_FUNC_RELEASE:
++ if (psPVRPCI->abPCIResourceInUse[ui32Index])
++ {
++ pci_release_region(psPVRPCI->psPCIDev, (IMG_INT)ui32Index);
++ psPVRPCI->abPCIResourceInUse[ui32Index] = IMG_FALSE;
++ }
++ return 1;
++ default:
++ PVR_DPF((PVR_DBG_ERROR, "OSPCIAddrRangeFunc: Unknown function"));
++ break;
++ }
++
++ return 0;
++}
++
++IMG_UINT32 OSPCIAddrRangeLen(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
++{
++ return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_LEN, hPVRPCI, ui32Index);
++}
++
++IMG_UINT32 OSPCIAddrRangeStart(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
++{
++ return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_START, hPVRPCI, ui32Index);
++}
++
++IMG_UINT32 OSPCIAddrRangeEnd(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
++{
++ return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_END, hPVRPCI, ui32Index);
++}
++
++PVRSRV_ERROR OSPCIRequestAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI,
++ IMG_UINT32 ui32Index)
++{
++ return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_REQUEST, hPVRPCI, ui32Index) == 0 ? PVRSRV_ERROR_GENERIC : PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSPCIReleaseAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
++{
++ return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_RELEASE, hPVRPCI, ui32Index) == 0 ? PVRSRV_ERROR_GENERIC : PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSPCIReleaseDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI)
++{
++ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
++ int i;
++
++ PVR_TRACE(("OSPCIReleaseDev"));
++
++
++ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
++ {
++ if (psPVRPCI->abPCIResourceInUse[i])
++ {
++ PVR_TRACE(("OSPCIReleaseDev: Releasing Address range %d", i));
++ pci_release_region(psPVRPCI->psPCIDev, i);
++ psPVRPCI->abPCIResourceInUse[i] = IMG_FALSE;
++ }
++ }
++
++#if defined(CONFIG_PCI_MSI)
++ if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_MSI)
++ {
++ pci_disable_msi(psPVRPCI->psPCIDev);
++ }
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29))
++ if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER)
++ {
++ pci_clear_master(psPVRPCI->psPCIDev);
++ }
++#endif
++ pci_disable_device(psPVRPCI->psPCIDev);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(*psPVRPCI), (IMG_VOID *)psPVRPCI, IMG_NULL);
++
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSPCISuspendDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI)
++{
++ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
++ int i;
++ int err;
++
++ PVR_TRACE(("OSPCISuspendDev"));
++
++
++ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
++ {
++ if (psPVRPCI->abPCIResourceInUse[i])
++ {
++ pci_release_region(psPVRPCI->psPCIDev, i);
++ }
++ }
++
++ err = pci_save_state(psPVRPCI->psPCIDev);
++ if (err != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSPCISuspendDev: pci_save_state_failed (%d)", err));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ pci_disable_device(psPVRPCI->psPCIDev);
++
++ err = pci_set_power_state(psPVRPCI->psPCIDev, PCI_D3hot);//pci_choose_state(psPVRPCI->psPCIDev, PMSG_SUSPEND));
++ switch(err)
++ {
++ case 0:
++ break;
++ case -EIO:
++ PVR_DPF((PVR_DBG_WARNING, "OSPCISuspendDev: device doesn't support PCI PM"));
++ break;
++ case -EINVAL:
++ PVR_DPF((PVR_DBG_ERROR, "OSPCISuspendDev: can't enter requested power state"));
++ break;
++ default:
++ PVR_DPF((PVR_DBG_ERROR, "OSPCISuspendDev: pci_set_power_state failed (%d)", err));
++ break;
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSPCIResumeDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI)
++{
++ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
++ int err;
++ int i;
++
++ PVR_TRACE(("OSPCIResumeDev"));
++
++ err = pci_set_power_state(psPVRPCI->psPCIDev, PCI_D0);//pci_choose_state(psPVRPCI->psPCIDev, PMSG_ON));
++ switch(err)
++ {
++ case 0:
++ break;
++ case -EIO:
++ PVR_DPF((PVR_DBG_WARNING, "OSPCIResumeDev: device doesn't support PCI PM"));
++ break;
++ case -EINVAL:
++ PVR_DPF((PVR_DBG_ERROR, "OSPCIResumeDev: can't enter requested power state"));
++ return PVRSRV_ERROR_GENERIC;
++ default:
++ PVR_DPF((PVR_DBG_ERROR, "OSPCIResumeDev: pci_set_power_state failed (%d)", err));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ err = pci_restore_state(psPVRPCI->psPCIDev);
++ if (err != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSPCIResumeDev: pci_restore_state failed (%d)", err));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ err = pci_enable_device(psPVRPCI->psPCIDev);
++ if (err != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSPCIResumeDev: Couldn't enable device (%d)", err));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER)
++ pci_set_master(psPVRPCI->psPCIDev);
++
++
++ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
++ {
++ if (psPVRPCI->abPCIResourceInUse[i])
++ {
++ err = pci_request_region(psPVRPCI->psPCIDev, i, "PowerVR");
++ if (err != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSPCIResumeDev: pci_request_region_failed (region %d, error %d)", i, err));
++ }
++ }
++
++ }
++
++ return PVRSRV_OK;
++}
++
++#endif
++
++#define OS_MAX_TIMERS 8
++
++typedef struct TIMER_CALLBACK_DATA_TAG
++{
++ IMG_BOOL bInUse;
++ PFN_TIMER_FUNC pfnTimerFunc;
++ IMG_VOID *pvData;
++ struct timer_list sTimer;
++ IMG_UINT32 ui32Delay;
++ IMG_BOOL bActive;
++#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++ struct work_struct sWork;
++#endif
++}TIMER_CALLBACK_DATA;
++
++#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++static struct workqueue_struct *psTimerWorkQueue;
++#endif
++
++static TIMER_CALLBACK_DATA sTimers[OS_MAX_TIMERS];
++
++#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++DEFINE_MUTEX(sTimerStructLock);
++#else
++static spinlock_t sTimerStructLock = SPIN_LOCK_UNLOCKED;
++#endif
++
++static void OSTimerCallbackBody(TIMER_CALLBACK_DATA *psTimerCBData)
++{
++ if (!psTimerCBData->bActive)
++ return;
++
++
++ psTimerCBData->pfnTimerFunc(psTimerCBData->pvData);
++
++
++ mod_timer(&psTimerCBData->sTimer, psTimerCBData->ui32Delay + jiffies);
++}
++
++
++#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++static void OSTimerWorkQueueCallBack(struct work_struct *psWork)
++{
++ TIMER_CALLBACK_DATA *psTimerCBData = container_of(psWork, TIMER_CALLBACK_DATA, sWork);
++
++ OSTimerCallbackBody(psTimerCBData);
++}
++#endif
++
++static IMG_VOID OSTimerCallbackWrapper(IMG_UINT32 ui32Data)
++{
++ TIMER_CALLBACK_DATA *psTimerCBData = (TIMER_CALLBACK_DATA*)ui32Data;
++
++#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++ int res;
++
++ res = queue_work(psTimerWorkQueue, &psTimerCBData->sWork);
++ if (res == 0)
++ {
++ PVR_DPF((PVR_DBG_WARNING, "OSTimerCallbackWrapper: work already queued"));
++ }
++#else
++ OSTimerCallbackBody(psTimerCBData);
++#endif
++}
++
++
++IMG_HANDLE OSAddTimer(PFN_TIMER_FUNC pfnTimerFunc, IMG_VOID *pvData, IMG_UINT32 ui32MsTimeout)
++{
++ TIMER_CALLBACK_DATA *psTimerCBData;
++ IMG_UINT32 ui32i;
++#if !defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++ unsigned long ulLockFlags;
++#endif
++
++
++ if(!pfnTimerFunc)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSAddTimer: passed invalid callback"));
++ return IMG_NULL;
++ }
++
++
++#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++ mutex_lock(&sTimerStructLock);
++#else
++ spin_lock_irqsave(&sTimerStructLock, ulLockFlags);
++#endif
++ for (ui32i = 0; ui32i < OS_MAX_TIMERS; ui32i++)
++ {
++ psTimerCBData = &sTimers[ui32i];
++ if (!psTimerCBData->bInUse)
++ {
++ psTimerCBData->bInUse = IMG_TRUE;
++ break;
++ }
++ }
++#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++ mutex_unlock(&sTimerStructLock);
++#else
++ spin_unlock_irqrestore(&sTimerStructLock, ulLockFlags);
++#endif
++ if (ui32i >= OS_MAX_TIMERS)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSAddTimer: all timers are in use"));
++ return IMG_NULL;
++ }
++
++ psTimerCBData->pfnTimerFunc = pfnTimerFunc;
++ psTimerCBData->pvData = pvData;
++ psTimerCBData->bActive = IMG_FALSE;
++
++
++
++
++ psTimerCBData->ui32Delay = ((HZ * ui32MsTimeout) < 1000)
++ ? 1
++ : ((HZ * ui32MsTimeout) / 1000);
++
++ init_timer(&psTimerCBData->sTimer);
++
++
++ psTimerCBData->sTimer.function = (IMG_VOID *)OSTimerCallbackWrapper;
++ psTimerCBData->sTimer.data = (IMG_UINT32)psTimerCBData;
++ psTimerCBData->sTimer.expires = psTimerCBData->ui32Delay + jiffies;
++
++ return (IMG_HANDLE)(ui32i + 1);
++}
++
++
++static inline TIMER_CALLBACK_DATA *GetTimerStructure(IMG_HANDLE hTimer)
++{
++ IMG_UINT32 ui32i = ((IMG_UINT32)hTimer) - 1;
++
++ PVR_ASSERT(ui32i < OS_MAX_TIMERS);
++
++ return &sTimers[ui32i];
++}
++
++PVRSRV_ERROR OSRemoveTimer (IMG_HANDLE hTimer)
++{
++ TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer);
++
++ PVR_ASSERT(psTimerCBData->bInUse);
++ PVR_ASSERT(!psTimerCBData->bActive);
++
++
++ psTimerCBData->bInUse = IMG_FALSE;
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSEnableTimer (IMG_HANDLE hTimer)
++{
++ TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer);
++
++ PVR_ASSERT(psTimerCBData->bInUse);
++ PVR_ASSERT(!psTimerCBData->bActive);
++
++
++ psTimerCBData->bActive = IMG_TRUE;
++
++
++ psTimerCBData->sTimer.expires = psTimerCBData->ui32Delay + jiffies;
++
++
++ add_timer(&psTimerCBData->sTimer);
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSDisableTimer (IMG_HANDLE hTimer)
++{
++ TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer);
++
++ PVR_ASSERT(psTimerCBData->bInUse);
++ PVR_ASSERT(psTimerCBData->bActive);
++
++
++ psTimerCBData->bActive = IMG_FALSE;
++ smp_mb();
++
++#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++ flush_workqueue(psTimerWorkQueue);
++#endif
++
++
++ del_timer_sync(&psTimerCBData->sTimer);
++
++#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++
++ flush_workqueue(psTimerWorkQueue);
++#endif
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSEventObjectCreate(const IMG_CHAR *pszName, PVRSRV_EVENTOBJECT *psEventObject)
++{
++
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if(psEventObject)
++ {
++ if(pszName)
++ {
++
++ strncpy(psEventObject->szName, pszName, EVENTOBJNAME_MAXLENGTH);
++ }
++ else
++ {
++
++ static IMG_UINT16 ui16NameIndex = 0;
++ snprintf(psEventObject->szName, EVENTOBJNAME_MAXLENGTH, "PVRSRV_EVENTOBJECT_%d", ui16NameIndex++);
++ }
++
++ if(LinuxEventObjectListCreate(&psEventObject->hOSEventKM) != PVRSRV_OK)
++ {
++ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSEventObjectCreate: psEventObject is not a valid pointer"));
++ eError = PVRSRV_ERROR_GENERIC;
++ }
++
++ return eError;
++
++}
++
++
++PVRSRV_ERROR OSEventObjectDestroy(PVRSRV_EVENTOBJECT *psEventObject)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if(psEventObject)
++ {
++ if(psEventObject->hOSEventKM)
++ {
++ LinuxEventObjectListDestroy(psEventObject->hOSEventKM);
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSEventObjectDestroy: hOSEventKM is not a valid pointer"));
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ }
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSEventObjectDestroy: psEventObject is not a valid pointer"));
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ return eError;
++}
++
++PVRSRV_ERROR OSEventObjectWait(IMG_HANDLE hOSEventKM)
++{
++ PVRSRV_ERROR eError;
++
++ if(hOSEventKM)
++ {
++ eError = LinuxEventObjectWait(hOSEventKM, EVENT_OBJECT_TIMEOUT_MS);
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSEventObjectWait: hOSEventKM is not a valid handle"));
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ return eError;
++}
++
++PVRSRV_ERROR OSEventObjectOpen(PVRSRV_EVENTOBJECT *psEventObject,
++ IMG_HANDLE *phOSEvent)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if(psEventObject)
++ {
++ if(LinuxEventObjectAdd(psEventObject->hOSEventKM, phOSEvent) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectAdd: failed"));
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSEventObjectCreate: psEventObject is not a valid pointer"));
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ return eError;
++}
++
++PVRSRV_ERROR OSEventObjectClose(PVRSRV_EVENTOBJECT *psEventObject,
++ IMG_HANDLE hOSEventKM)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if(psEventObject)
++ {
++ if(LinuxEventObjectDelete(psEventObject->hOSEventKM, hOSEventKM) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectDelete: failed"));
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSEventObjectDestroy: psEventObject is not a valid pointer"));
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ return eError;
++
++}
++
++PVRSRV_ERROR OSEventObjectSignal(IMG_HANDLE hOSEventKM)
++{
++ PVRSRV_ERROR eError;
++
++ if(hOSEventKM)
++ {
++ eError = LinuxEventObjectSignal(hOSEventKM);
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSEventObjectSignal: hOSEventKM is not a valid handle"));
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ return eError;
++}
++
++IMG_BOOL OSProcHasPrivSrvInit(IMG_VOID)
++{
++ return (capable(CAP_SYS_MODULE) != 0) ? IMG_TRUE : IMG_FALSE;
++}
++
++PVRSRV_ERROR OSCopyToUser(IMG_PVOID pvProcess,
++ IMG_VOID *pvDest,
++ IMG_VOID *pvSrc,
++ IMG_UINT32 ui32Bytes)
++{
++ PVR_UNREFERENCED_PARAMETER(pvProcess);
++
++ if(copy_to_user(pvDest, pvSrc, ui32Bytes)==0)
++ return PVRSRV_OK;
++ else
++ return PVRSRV_ERROR_GENERIC;
++}
++
++PVRSRV_ERROR OSCopyFromUser( IMG_PVOID pvProcess,
++ IMG_VOID *pvDest,
++ IMG_VOID *pvSrc,
++ IMG_UINT32 ui32Bytes)
++{
++ PVR_UNREFERENCED_PARAMETER(pvProcess);
++
++ if(copy_from_user(pvDest, pvSrc, ui32Bytes)==0)
++ return PVRSRV_OK;
++ else
++ return PVRSRV_ERROR_GENERIC;
++}
++
++IMG_BOOL OSAccessOK(IMG_VERIFY_TEST eVerification, IMG_VOID *pvUserPtr, IMG_UINT32 ui32Bytes)
++{
++ IMG_INT linuxType;
++
++ if (eVerification == PVR_VERIFY_READ)
++ {
++ linuxType = VERIFY_READ;
++ }
++ else
++ {
++ PVR_ASSERT(eVerification == PVR_VERIFY_WRITE);
++ linuxType = VERIFY_WRITE;
++ }
++
++ return access_ok(linuxType, pvUserPtr, ui32Bytes);
++}
++
++typedef enum _eWrapMemType_
++{
++ WRAP_TYPE_CLEANUP,
++ WRAP_TYPE_GET_USER_PAGES,
++ WRAP_TYPE_FIND_VMA_PAGES,
++ WRAP_TYPE_FIND_VMA_PFN
++} eWrapMemType;
++
++typedef struct _sWrapMemInfo_
++{
++ eWrapMemType eType;
++ IMG_INT iNumPages;
++ struct page **ppsPages;
++ IMG_SYS_PHYADDR *psPhysAddr;
++ IMG_INT iPageOffset;
++ IMG_INT iContiguous;
++#if defined(DEBUG)
++ IMG_UINT32 ulStartAddr;
++ IMG_UINT32 ulBeyondEndAddr;
++ struct vm_area_struct *psVMArea;
++#endif
++ IMG_BOOL bWrapWorkaround;
++} sWrapMemInfo;
++
++static IMG_VOID CheckPagesContiguous(sWrapMemInfo *psInfo)
++{
++ IMG_INT i;
++ IMG_UINT32 ui32AddrChk;
++
++ BUG_ON(psInfo == IMG_NULL);
++
++ psInfo->iContiguous = 1;
++
++ for (i = 0, ui32AddrChk = psInfo->psPhysAddr[0].uiAddr;
++ i < psInfo->iNumPages;
++ i++, ui32AddrChk += PAGE_SIZE)
++ {
++ if (psInfo->psPhysAddr[i].uiAddr != ui32AddrChk)
++ {
++ psInfo->iContiguous = 0;
++ break;
++ }
++ }
++}
++
++static struct page *CPUVAddrToPage(struct vm_area_struct *psVMArea, IMG_UINT32 ulCPUVAddr)
++{
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,10))
++ pgd_t *psPGD;
++ pud_t *psPUD;
++ pmd_t *psPMD;
++ pte_t *psPTE;
++ struct mm_struct *psMM = psVMArea->vm_mm;
++ IMG_UINT32 ulPFN;
++ spinlock_t *psPTLock;
++ struct page *psPage;
++
++ psPGD = pgd_offset(psMM, ulCPUVAddr);
++ if (pgd_none(*psPGD) || pgd_bad(*psPGD))
++ return NULL;
++
++ psPUD = pud_offset(psPGD, ulCPUVAddr);
++ if (pud_none(*psPUD) || pud_bad(*psPUD))
++ return NULL;
++
++ psPMD = pmd_offset(psPUD, ulCPUVAddr);
++ if (pmd_none(*psPMD) || pmd_bad(*psPMD))
++ return NULL;
++
++ psPage = NULL;
++
++ psPTE = (pte_t *)pte_offset_map_lock(psMM, psPMD, ulCPUVAddr, &psPTLock);
++ if ((pte_none(*psPTE) != 0) || (pte_present(*psPTE) == 0) || (pte_write(*psPTE) == 0))
++ goto exit_unlock;
++
++ ulPFN = pte_pfn(*psPTE);
++ if (!pfn_valid(ulPFN))
++ goto exit_unlock;
++
++ psPage = pfn_to_page(ulPFN);
++
++ get_page(psPage);
++
++exit_unlock:
++ pte_unmap_unlock(psPTE, psPTLock);
++
++ return psPage;
++#else
++ return NULL;
++#endif
++}
++PVRSRV_ERROR OSReleasePhysPageAddr(IMG_HANDLE hOSWrapMem)
++{
++ sWrapMemInfo *psInfo = (sWrapMemInfo *)hOSWrapMem;
++ IMG_INT i;
++
++ BUG_ON(psInfo == IMG_NULL);
++
++ switch (psInfo->eType)
++ {
++ case WRAP_TYPE_CLEANUP:
++ break;
++ case WRAP_TYPE_FIND_VMA_PFN:
++ break;
++ case WRAP_TYPE_GET_USER_PAGES:
++ {
++ for (i = 0; i < psInfo->iNumPages; i++)
++ {
++ struct page *psPage = psInfo->ppsPages[i];
++
++
++ if (!PageReserved(psPage));
++ {
++ SetPageDirty(psPage);
++ }
++ page_cache_release(psPage);
++ }
++ break;
++ }
++ case WRAP_TYPE_FIND_VMA_PAGES:
++ {
++ for (i = 0; i < psInfo->iNumPages; i++)
++ {
++ if(psInfo->bWrapWorkaround)
++ put_page(psInfo->ppsPages[i]);
++ else
++ put_page_testzero(psInfo->ppsPages[i]);
++ }
++ break;
++ }
++ default:
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSReleasePhysPageAddr: Unknown wrap type (%d)", psInfo->eType));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ }
++
++ if (psInfo->ppsPages != IMG_NULL)
++ {
++ kfree(psInfo->ppsPages);
++ }
++
++ if (psInfo->psPhysAddr != IMG_NULL)
++ {
++ kfree(psInfo->psPhysAddr);
++ }
++
++ kfree(psInfo);
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSAcquirePhysPageAddr(IMG_VOID* pvCPUVAddr,
++ IMG_UINT32 ui32Bytes,
++ IMG_SYS_PHYADDR *psSysPAddr,
++ IMG_HANDLE *phOSWrapMem,
++ IMG_BOOL bWrapWorkaround)
++{
++ IMG_UINT32 ulStartAddrOrig = (IMG_UINT32) pvCPUVAddr;
++ IMG_UINT32 ulAddrRangeOrig = (IMG_UINT32) ui32Bytes;
++ IMG_UINT32 ulBeyondEndAddrOrig = ulStartAddrOrig + ulAddrRangeOrig;
++ IMG_UINT32 ulStartAddr;
++ IMG_UINT32 ulAddrRange;
++ IMG_UINT32 ulBeyondEndAddr;
++ IMG_UINT32 ulAddr;
++ IMG_INT iNumPagesMapped;
++ IMG_INT i;
++ struct vm_area_struct *psVMArea;
++ sWrapMemInfo *psInfo;
++
++
++ ulStartAddr = ulStartAddrOrig & PAGE_MASK;
++ ulBeyondEndAddr = PAGE_ALIGN(ulBeyondEndAddrOrig);
++ ulAddrRange = ulBeyondEndAddr - ulStartAddr;
++
++
++ psInfo = kmalloc(sizeof(*psInfo), GFP_KERNEL);
++ if (psInfo == NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSAcquirePhysPageAddr: Couldn't allocate information structure"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ memset(psInfo, 0, sizeof(*psInfo));
++ psInfo->bWrapWorkaround = bWrapWorkaround;
++
++#if defined(DEBUG)
++ psInfo->ulStartAddr = ulStartAddrOrig;
++ psInfo->ulBeyondEndAddr = ulBeyondEndAddrOrig;
++#endif
++
++ psInfo->iNumPages = (IMG_INT)(ulAddrRange >> PAGE_SHIFT);
++ psInfo->iPageOffset = (IMG_INT)(ulStartAddrOrig & ~PAGE_MASK);
++
++
++ psInfo->psPhysAddr = kmalloc((size_t)psInfo->iNumPages * sizeof(*psInfo->psPhysAddr), GFP_KERNEL);
++ if (psInfo->psPhysAddr == NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSAcquirePhysPageAddr: Couldn't allocate page array"));
++ goto error_free;
++ }
++
++
++ psInfo->ppsPages = kmalloc((size_t)psInfo->iNumPages * sizeof(*psInfo->ppsPages), GFP_KERNEL);
++ if (psInfo->ppsPages == NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSAcquirePhysPageAddr: Couldn't allocate page array"));
++ goto error_free;
++ }
++
++
++ down_read(&current->mm->mmap_sem);
++ iNumPagesMapped = get_user_pages(current, current->mm, ulStartAddr, psInfo->iNumPages, 1, 0, psInfo->ppsPages, NULL);
++ up_read(&current->mm->mmap_sem);
++
++ if (iNumPagesMapped >= 0)
++ {
++
++ if (iNumPagesMapped != psInfo->iNumPages)
++ {
++ PVR_TRACE(("OSAcquirePhysPageAddr: Couldn't map all the pages needed (wanted: %d, got %d)", psInfo->iNumPages, iNumPagesMapped));
++
++
++ for (i = 0; i < iNumPagesMapped; i++)
++ {
++ page_cache_release(psInfo->ppsPages[i]);
++
++ }
++ goto error_free;
++ }
++
++
++ for (i = 0; i < psInfo->iNumPages; i++)
++ {
++ IMG_CPU_PHYADDR CPUPhysAddr;
++
++ CPUPhysAddr.uiAddr = page_to_pfn(psInfo->ppsPages[i]) << PAGE_SHIFT;
++ psInfo->psPhysAddr[i] = SysCpuPAddrToSysPAddr(CPUPhysAddr);
++ psSysPAddr[i] = psInfo->psPhysAddr[i];
++
++ }
++
++ psInfo->eType = WRAP_TYPE_GET_USER_PAGES;
++
++ goto exit_check;
++ }
++
++ PVR_DPF((PVR_DBG_MESSAGE, "OSAcquirePhysPageAddr: get_user_pages failed (%d), trying something else", iNumPagesMapped));
++
++
++ down_read(&current->mm->mmap_sem);
++
++ psVMArea = find_vma(current->mm, ulStartAddrOrig);
++ if (psVMArea == NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSAcquirePhysPageAddr: Couldn't find memory region containing start address %lx", ulStartAddrOrig));
++
++ goto error_release_mmap_sem;
++ }
++#if defined(DEBUG)
++ psInfo->psVMArea = psVMArea;
++#endif
++
++
++ if (ulStartAddrOrig < psVMArea->vm_start)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSAcquirePhysPageAddr: Start address %lx is outside of the region returned by find_vma", ulStartAddrOrig));
++ goto error_release_mmap_sem;
++ }
++
++
++ if (ulBeyondEndAddrOrig > psVMArea->vm_end)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSAcquirePhysPageAddr: End address %lx is outside of the region returned by find_vma", ulBeyondEndAddrOrig));
++ goto error_release_mmap_sem;
++ }
++
++
++ if ((psVMArea->vm_flags & (VM_IO | VM_RESERVED)) != (VM_IO | VM_RESERVED))
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSAcquirePhysPageAddr: Memory region does not represent memory mapped I/O (VMA flags: 0x%lx)", psVMArea->vm_flags));
++ goto error_release_mmap_sem;
++ }
++
++
++ if ((psVMArea->vm_flags & (VM_READ | VM_WRITE)) != (VM_READ | VM_WRITE))
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSAcquirePhysPageAddr: No read/write access to memory region (VMA flags: 0x%lx)", psVMArea->vm_flags));
++ goto error_release_mmap_sem;
++ }
++
++
++ for (ulAddr = ulStartAddrOrig, i = 0; ulAddr < ulBeyondEndAddrOrig; ulAddr += PAGE_SIZE, i++)
++ {
++ struct page *psPage;
++
++ BUG_ON(i >= psInfo->iNumPages);
++
++ psPage = CPUVAddrToPage(psVMArea, ulAddr);
++ if (psPage == NULL)
++ {
++ IMG_INT j;
++
++ PVR_TRACE(("OSAcquirePhysPageAddr: Couldn't lookup page structure for address 0x%lx, trying something else", ulAddr));
++
++
++ for (j = 0; j < i; j++)
++ {
++ if(psInfo->bWrapWorkaround)
++ put_page(psInfo->ppsPages[j]);
++ else
++ put_page_testzero(psInfo->ppsPages[j]);
++ }
++ break;
++ }
++
++ psInfo->ppsPages[i] = psPage;
++ }
++
++ BUG_ON(i > psInfo->iNumPages);
++ if (i == psInfo->iNumPages)
++ {
++
++ for (i = 0; i < psInfo->iNumPages; i++)
++ {
++ struct page *psPage = psInfo->ppsPages[i];
++ IMG_CPU_PHYADDR CPUPhysAddr;
++
++
++ CPUPhysAddr.uiAddr = page_to_pfn(psPage) << PAGE_SHIFT;
++
++ psInfo->psPhysAddr[i] = SysCpuPAddrToSysPAddr(CPUPhysAddr);
++ psSysPAddr[i] = psInfo->psPhysAddr[i];
++ }
++
++ psInfo->eType = WRAP_TYPE_FIND_VMA_PAGES;
++ }
++ else
++ {
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,10)) && defined(PVR_SECURE_HANDLES)
++
++
++
++ if ((psVMArea->vm_flags & VM_PFNMAP) == 0)
++ {
++ PVR_DPF((PVR_DBG_WARNING,
++ "OSAcquirePhysPageAddr: Region isn't a raw PFN mapping. Giving up."));
++ goto error_release_mmap_sem;
++ }
++
++ for (ulAddr = ulStartAddrOrig, i = 0; ulAddr < ulBeyondEndAddrOrig; ulAddr += PAGE_SIZE, i++)
++ {
++ IMG_CPU_PHYADDR CPUPhysAddr;
++
++ CPUPhysAddr.uiAddr = ((ulAddr - psVMArea->vm_start) + (psVMArea->vm_pgoff << PAGE_SHIFT)) & PAGE_MASK;
++
++ psInfo->psPhysAddr[i] = SysCpuPAddrToSysPAddr(CPUPhysAddr);
++ psSysPAddr[i] = psInfo->psPhysAddr[i];
++ }
++ BUG_ON(i != psInfo->iNumPages);
++
++ psInfo->eType = WRAP_TYPE_FIND_VMA_PFN;
++
++
++ PVR_DPF((PVR_DBG_WARNING,
++ "OSAcquirePhysPageAddr: Region can't be locked down"));
++#else
++ PVR_DPF((PVR_DBG_WARNING,
++ "OSAcquirePhysPageAddr: Raw PFN mappings not supported. Giving up."));
++ goto error_release_mmap_sem;
++#endif
++ }
++
++ up_read(&current->mm->mmap_sem);
++
++exit_check:
++ CheckPagesContiguous(psInfo);
++
++
++
++ *phOSWrapMem = (IMG_HANDLE)psInfo;
++
++ return PVRSRV_OK;
++
++error_release_mmap_sem:
++ up_read(&current->mm->mmap_sem);
++error_free:
++ psInfo->eType = WRAP_TYPE_CLEANUP;
++ OSReleasePhysPageAddr((IMG_HANDLE)psInfo);
++ return PVRSRV_ERROR_GENERIC;
++}
++
++PVRSRV_ERROR PVROSFuncInit(IMG_VOID)
++{
++#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++ {
++ IMG_UINT32 ui32i;
++
++ psTimerWorkQueue = create_workqueue("pvr_timer");
++ if (psTimerWorkQueue == NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: couldn't create timer workqueue", __FUNCTION__));
++ return PVRSRV_ERROR_GENERIC;
++
++ }
++
++ for (ui32i = 0; ui32i < OS_MAX_TIMERS; ui32i++)
++ {
++ TIMER_CALLBACK_DATA *psTimerCBData = &sTimers[ui32i];
++
++ INIT_WORK(&psTimerCBData->sWork, OSTimerWorkQueueCallBack);
++ }
++ }
++#endif
++ return PVRSRV_OK;
++}
++
++IMG_VOID PVROSFuncDeInit(IMG_VOID)
++{
++#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++ if (psTimerWorkQueue != NULL)
++ {
++ destroy_workqueue(psTimerWorkQueue);
++ }
++#endif
++}
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/env/linux/osperproc.c
+@@ -0,0 +1,113 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "osperproc.h"
++
++#include "env_perproc.h"
++#include "proc.h"
++
++extern IMG_UINT32 gui32ReleasePID;
++
++PVRSRV_ERROR OSPerProcessPrivateDataInit(IMG_HANDLE *phOsPrivateData)
++{
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hBlockAlloc;
++ PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc;
++
++ eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_ENV_PER_PROCESS_DATA),
++ phOsPrivateData,
++ &hBlockAlloc,
++ "Environment per Process Data");
++
++ if (eError != PVRSRV_OK)
++ {
++ *phOsPrivateData = IMG_NULL;
++
++ PVR_DPF((PVR_DBG_ERROR, "%s: OSAllocMem failed (%d)", __FUNCTION__, eError));
++ return eError;
++ }
++
++ psEnvPerProc = (PVRSRV_ENV_PER_PROCESS_DATA *)*phOsPrivateData;
++ OSMemSet(psEnvPerProc, 0, sizeof(*psEnvPerProc));
++
++ psEnvPerProc->hBlockAlloc = hBlockAlloc;
++
++
++ LinuxMMapPerProcessConnect(psEnvPerProc);
++
++#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT)
++
++ INIT_LIST_HEAD(&psEnvPerProc->sDRMAuthListHead);
++#endif
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSPerProcessPrivateDataDeInit(IMG_HANDLE hOsPrivateData)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc;
++
++ if (hOsPrivateData == IMG_NULL)
++ {
++ return PVRSRV_OK;
++ }
++
++ psEnvPerProc = (PVRSRV_ENV_PER_PROCESS_DATA *)hOsPrivateData;
++
++
++ LinuxMMapPerProcessDisconnect(psEnvPerProc);
++
++
++ RemovePerProcessProcDir(psEnvPerProc);
++
++ eError = OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_ENV_PER_PROCESS_DATA),
++ hOsPrivateData,
++ psEnvPerProc->hBlockAlloc);
++
++
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: OSFreeMem failed (%d)", __FUNCTION__, eError));
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSPerProcessSetHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase)
++{
++ return LinuxMMapPerProcessHandleOptions(psHandleBase);
++}
++
++IMG_HANDLE LinuxTerminatingProcessPrivateData(IMG_VOID)
++{
++ if(!gui32ReleasePID)
++ return NULL;
++ return PVRSRVPerProcessPrivateData(gui32ReleasePID);
++}
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/env/linux/pdump.c
+@@ -0,0 +1,662 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if defined (SUPPORT_SGX)
++#if defined (PDUMP)
++
++#include <asm/atomic.h>
++#include <stdarg.h>
++#include "sgxdefs.h"
++#include "services_headers.h"
++
++#include "pvrversion.h"
++#include "pvr_debug.h"
++
++#include "dbgdrvif.h"
++#include "sgxmmu.h"
++#include "mm.h"
++#include "pdump_km.h"
++
++#include <linux/tty.h>
++
++static IMG_BOOL PDumpWriteString2 (IMG_CHAR * pszString, IMG_UINT32 ui32Flags);
++static IMG_BOOL PDumpWriteILock (PDBG_STREAM psStream, IMG_UINT8 *pui8Data, IMG_UINT32 ui32Count, IMG_UINT32 ui32Flags);
++static IMG_VOID DbgSetFrame (PDBG_STREAM psStream, IMG_UINT32 ui32Frame);
++static IMG_UINT32 DbgGetFrame (PDBG_STREAM psStream);
++static IMG_VOID DbgSetMarker (PDBG_STREAM psStream, IMG_UINT32 ui32Marker);
++static IMG_UINT32 DbgWrite (PDBG_STREAM psStream, IMG_UINT8 *pui8Data, IMG_UINT32 ui32BCount, IMG_UINT32 ui32Flags);
++
++#define PDUMP_DATAMASTER_PIXEL (1)
++#define PDUMP_DATAMASTER_EDM (3)
++
++#define MIN(a,b) (a > b ? b : a)
++
++#define MAX_FILE_SIZE 0x40000000
++
++static atomic_t gsPDumpSuspended = ATOMIC_INIT(0);
++
++static PDBGKM_SERVICE_TABLE gpfnDbgDrv = IMG_NULL;
++
++
++
++IMG_CHAR *pszStreamName[PDUMP_NUM_STREAMS] = { "ParamStream2",
++ "ScriptStream2",
++ "DriverInfoStream"};
++typedef struct PDBG_PDUMP_STATE_TAG
++{
++ PDBG_STREAM psStream[PDUMP_NUM_STREAMS];
++ IMG_UINT32 ui32ParamFileNum;
++
++ IMG_CHAR *pszMsg;
++ IMG_CHAR *pszScript;
++ IMG_CHAR *pszFile;
++
++} PDBG_PDUMP_STATE;
++
++static PDBG_PDUMP_STATE gsDBGPdumpState = {{IMG_NULL}, 0, IMG_NULL, IMG_NULL, IMG_NULL};
++
++#define SZ_MSG_SIZE_MAX PVRSRV_PDUMP_MAX_COMMENT_SIZE-1
++#define SZ_SCRIPT_SIZE_MAX PVRSRV_PDUMP_MAX_COMMENT_SIZE-1
++#define SZ_FILENAME_SIZE_MAX PVRSRV_PDUMP_MAX_COMMENT_SIZE-1
++
++
++
++
++IMG_VOID DBGDrvGetServiceTable(IMG_VOID **fn_table);
++
++static inline IMG_BOOL PDumpSuspended(IMG_VOID)
++{
++ return atomic_read(&gsPDumpSuspended) != 0;
++}
++
++PVRSRV_ERROR PDumpOSGetScriptString(IMG_HANDLE *phScript,
++ IMG_UINT32 *pui32MaxLen)
++{
++ *phScript = (IMG_HANDLE)gsDBGPdumpState.pszScript;
++ *pui32MaxLen = SZ_SCRIPT_SIZE_MAX;
++ if ((!*phScript) || PDumpSuspended())
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpOSGetMessageString(IMG_HANDLE *phMsg,
++ IMG_UINT32 *pui32MaxLen)
++{
++ *phMsg = (IMG_HANDLE)gsDBGPdumpState.pszMsg;
++ *pui32MaxLen = SZ_MSG_SIZE_MAX;
++ if ((!*phMsg) || PDumpSuspended())
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpOSGetFilenameString(IMG_CHAR **ppszFile,
++ IMG_UINT32 *pui32MaxLen)
++{
++ *ppszFile = gsDBGPdumpState.pszFile;
++ *pui32MaxLen = SZ_FILENAME_SIZE_MAX;
++ if ((!*ppszFile) || PDumpSuspended())
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ return PVRSRV_OK;
++}
++
++IMG_BOOL PDumpOSWriteString2(IMG_HANDLE hScript, IMG_UINT32 ui32Flags)
++{
++ return PDumpWriteString2(hScript, ui32Flags);
++}
++
++PVRSRV_ERROR PDumpOSBufprintf(IMG_HANDLE hBuf, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR* pszFormat, ...)
++{
++ IMG_CHAR* pszBuf = hBuf;
++ IMG_UINT32 n;
++ va_list vaArgs;
++
++ va_start(vaArgs, pszFormat);
++
++ n = vsnprintf(pszBuf, ui32ScriptSizeMax, pszFormat, vaArgs);
++
++ va_end(vaArgs);
++
++ if (n>=ui32ScriptSizeMax || n==-1)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "Buffer overflow detected, pdump output may be incomplete."));
++
++ return PVRSRV_ERROR_PDUMP_BUF_OVERFLOW;
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpOSVSprintf(IMG_CHAR *pszComment, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR* pszFormat, PDUMP_va_list vaArgs)
++{
++ IMG_UINT32 n;
++
++ n = vsnprintf(pszComment, ui32ScriptSizeMax, pszFormat, vaArgs);
++
++ if (n>=ui32ScriptSizeMax || n==-1)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "Buffer overflow detected, pdump output may be incomplete."));
++
++ return PVRSRV_ERROR_PDUMP_BUF_OVERFLOW;
++ }
++
++ return PVRSRV_OK;
++}
++
++IMG_VOID PDumpOSDebugPrintf(IMG_CHAR* pszFormat, ...)
++{
++
++}
++
++PVRSRV_ERROR PDumpOSSprintf(IMG_CHAR *pszComment, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR *pszFormat, ...)
++{
++ IMG_UINT32 n;
++ va_list vaArgs;
++
++ va_start(vaArgs, pszFormat);
++
++ n = vsnprintf(pszComment, ui32ScriptSizeMax, pszFormat, vaArgs);
++
++ va_end(vaArgs);
++
++ if (n>=ui32ScriptSizeMax || n==-1)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "Buffer overflow detected, pdump output may be incomplete."));
++
++ return PVRSRV_ERROR_PDUMP_BUF_OVERFLOW;
++ }
++
++ return PVRSRV_OK;
++}
++
++IMG_UINT32 PDumpOSBuflen(IMG_HANDLE hBuffer, IMG_UINT32 ui32BufferSizeMax)
++{
++ IMG_CHAR* pszBuf = hBuffer;
++ IMG_UINT32 ui32Count = 0;
++
++ while ((pszBuf[ui32Count]!=0) && (ui32Count<ui32BufferSizeMax) )
++ {
++ ui32Count++;
++ }
++ return(ui32Count);
++}
++
++IMG_VOID PDumpOSVerifyLineEnding(IMG_HANDLE hBuffer, IMG_UINT32 ui32BufferSizeMax)
++{
++ IMG_UINT32 ui32Count = 0;
++ IMG_CHAR* pszBuf = hBuffer;
++
++
++ ui32Count = PDumpOSBuflen(hBuffer, ui32BufferSizeMax);
++
++
++ if ((ui32Count >= 1) && (pszBuf[ui32Count-1] != '\n') && (ui32Count<ui32BufferSizeMax))
++ {
++ pszBuf[ui32Count] = '\n';
++ ui32Count++;
++ pszBuf[ui32Count] = '\0';
++ }
++ if ((ui32Count >= 2) && (pszBuf[ui32Count-2] != '\r') && (ui32Count<ui32BufferSizeMax))
++ {
++ pszBuf[ui32Count-1] = '\r';
++ pszBuf[ui32Count] = '\n';
++ ui32Count++;
++ pszBuf[ui32Count] = '\0';
++ }
++}
++
++IMG_HANDLE PDumpOSGetStream(IMG_UINT32 ePDumpStream)
++{
++ return (IMG_HANDLE)gsDBGPdumpState.psStream[ePDumpStream];
++}
++
++IMG_UINT32 PDumpOSGetStreamOffset(IMG_UINT32 ePDumpStream)
++{
++ PDBG_STREAM psStream = gsDBGPdumpState.psStream[ePDumpStream];
++ return gpfnDbgDrv->pfnGetStreamOffset(psStream);
++}
++
++IMG_UINT32 PDumpOSGetParamFileNum(IMG_VOID)
++{
++ return gsDBGPdumpState.ui32ParamFileNum;
++}
++
++IMG_BOOL PDumpOSWriteString(IMG_HANDLE hStream,
++ IMG_UINT8 *psui8Data,
++ IMG_UINT32 ui32Size,
++ IMG_UINT32 ui32Flags)
++{
++ PDBG_STREAM psStream = (PDBG_STREAM)hStream;
++ return PDumpWriteILock(psStream,
++ psui8Data,
++ ui32Size,
++ ui32Flags);
++}
++
++IMG_VOID PDumpOSCheckForSplitting(IMG_HANDLE hStream, IMG_UINT32 ui32Size, IMG_UINT32 ui32Flags)
++{
++
++ PVR_UNREFERENCED_PARAMETER(hStream);
++ PVR_UNREFERENCED_PARAMETER(ui32Size);
++ PVR_UNREFERENCED_PARAMETER(ui32Size);
++}
++
++IMG_BOOL PDumpOSJTInitialised(IMG_VOID)
++{
++ if(gpfnDbgDrv)
++ {
++ return IMG_TRUE;
++ }
++ return IMG_FALSE;
++}
++
++inline IMG_BOOL PDumpOSIsSuspended(IMG_VOID)
++{
++ return atomic_read(&gsPDumpSuspended) != 0;
++}
++
++IMG_VOID PDumpOSCPUVAddrToDevPAddr(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_HANDLE hOSMemHandle,
++ IMG_UINT32 ui32Offset,
++ IMG_UINT8 *pui8LinAddr,
++ IMG_UINT32 ui32PageSize,
++ IMG_DEV_PHYADDR *psDevPAddr)
++{
++ if(hOSMemHandle)
++ {
++
++ IMG_CPU_PHYADDR sCpuPAddr;
++
++ PVR_UNREFERENCED_PARAMETER(pui8LinAddr);
++
++ sCpuPAddr = OSMemHandleToCpuPAddr(hOSMemHandle, ui32Offset);
++ PVR_ASSERT((sCpuPAddr.uiAddr & (ui32PageSize - 1)) == 0);
++
++
++ *psDevPAddr = SysCpuPAddrToDevPAddr(eDeviceType, sCpuPAddr);
++ }
++ else
++ {
++ IMG_CPU_PHYADDR sCpuPAddr;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Offset);
++
++ sCpuPAddr = OSMapLinToCPUPhys(pui8LinAddr);
++ *psDevPAddr = SysCpuPAddrToDevPAddr(eDeviceType, sCpuPAddr);
++ }
++}
++
++IMG_VOID PDumpOSCPUVAddrToPhysPages(IMG_HANDLE hOSMemHandle,
++ IMG_UINT32 ui32Offset,
++ IMG_PUINT8 pui8LinAddr,
++ IMG_UINT32 *pui32PageOffset)
++{
++ if(hOSMemHandle)
++ {
++
++ IMG_CPU_PHYADDR sCpuPAddr;
++
++ PVR_UNREFERENCED_PARAMETER(pui8LinAddr);
++
++ sCpuPAddr = OSMemHandleToCpuPAddr(hOSMemHandle, ui32Offset);
++ *pui32PageOffset = sCpuPAddr.uiAddr & (HOST_PAGESIZE() -1);
++ }
++ else
++ {
++ PVR_UNREFERENCED_PARAMETER(hOSMemHandle);
++ PVR_UNREFERENCED_PARAMETER(ui32Offset);
++
++ *pui32PageOffset = (IMG_UINT32)pui8LinAddr & (HOST_PAGESIZE() - 1);
++ }
++}
++
++
++
++IMG_VOID PDumpInit(IMG_VOID)
++{
++ IMG_UINT32 i;
++
++
++ if (!gpfnDbgDrv)
++ {
++ DBGDrvGetServiceTable((IMG_VOID **)&gpfnDbgDrv);
++
++
++
++
++ if (gpfnDbgDrv == IMG_NULL)
++ {
++ return;
++ }
++
++ if(!gsDBGPdumpState.pszFile)
++ {
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_FILENAME_SIZE_MAX, (IMG_PVOID *)&gsDBGPdumpState.pszFile, 0,
++ "Filename string") != PVRSRV_OK)
++ {
++ goto init_failed;
++ }
++ }
++
++ if(!gsDBGPdumpState.pszMsg)
++ {
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_MSG_SIZE_MAX, (IMG_PVOID *)&gsDBGPdumpState.pszMsg, 0,
++ "Message string") != PVRSRV_OK)
++ {
++ goto init_failed;
++ }
++ }
++
++ if(!gsDBGPdumpState.pszScript)
++ {
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_SCRIPT_SIZE_MAX, (IMG_PVOID *)&gsDBGPdumpState.pszScript, 0,
++ "Script string") != PVRSRV_OK)
++ {
++ goto init_failed;
++ }
++ }
++
++ for(i=0; i < PDUMP_NUM_STREAMS; i++)
++ {
++ gsDBGPdumpState.psStream[i] = gpfnDbgDrv->pfnCreateStream(pszStreamName[i],
++ DEBUG_CAPMODE_FRAMED,
++ DEBUG_OUTMODE_STREAMENABLE,
++ 0,
++ 10);
++
++ gpfnDbgDrv->pfnSetCaptureMode(gsDBGPdumpState.psStream[i],DEBUG_CAPMODE_FRAMED,0xFFFFFFFF, 0xFFFFFFFF, 1);
++ gpfnDbgDrv->pfnSetFrame(gsDBGPdumpState.psStream[i],0);
++ }
++
++ PDUMPCOMMENT("Driver Product Name: %s", VS_PRODUCT_NAME);
++ PDUMPCOMMENT("Driver Product Version: %s (%s)", PVRVERSION_STRING, PVRVERSION_FILE);
++ PDUMPCOMMENT("Start of Init Phase");
++ }
++
++ return;
++
++init_failed:
++
++ if(gsDBGPdumpState.pszFile)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_FILENAME_SIZE_MAX, (IMG_PVOID) gsDBGPdumpState.pszFile, 0);
++ gsDBGPdumpState.pszFile = IMG_NULL;
++ }
++
++ if(gsDBGPdumpState.pszScript)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_SCRIPT_SIZE_MAX, (IMG_PVOID) gsDBGPdumpState.pszScript, 0);
++ gsDBGPdumpState.pszScript = IMG_NULL;
++ }
++
++ if(gsDBGPdumpState.pszMsg)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_MSG_SIZE_MAX, (IMG_PVOID) gsDBGPdumpState.pszMsg, 0);
++ gsDBGPdumpState.pszMsg = IMG_NULL;
++ }
++
++ gpfnDbgDrv = IMG_NULL;
++}
++
++
++IMG_VOID PDumpDeInit(IMG_VOID)
++{
++ IMG_UINT32 i;
++
++ for(i=0; i < PDUMP_NUM_STREAMS; i++)
++ {
++ gpfnDbgDrv->pfnDestroyStream(gsDBGPdumpState.psStream[i]);
++ }
++
++ if(gsDBGPdumpState.pszFile)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_FILENAME_SIZE_MAX, (IMG_PVOID) gsDBGPdumpState.pszFile, 0);
++ gsDBGPdumpState.pszFile = IMG_NULL;
++ }
++
++ if(gsDBGPdumpState.pszScript)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_SCRIPT_SIZE_MAX, (IMG_PVOID) gsDBGPdumpState.pszScript, 0);
++ gsDBGPdumpState.pszScript = IMG_NULL;
++ }
++
++ if(gsDBGPdumpState.pszMsg)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_MSG_SIZE_MAX, (IMG_PVOID) gsDBGPdumpState.pszMsg, 0);
++ gsDBGPdumpState.pszMsg = IMG_NULL;
++ }
++
++ gpfnDbgDrv = IMG_NULL;
++}
++
++PVRSRV_ERROR PDumpStartInitPhaseKM(IMG_VOID)
++{
++ IMG_UINT32 i;
++
++ if (gpfnDbgDrv)
++ {
++ PDUMPCOMMENT("Start Init Phase");
++ for(i=0; i < PDUMP_NUM_STREAMS; i++)
++ {
++ gpfnDbgDrv->pfnStartInitPhase(gsDBGPdumpState.psStream[i]);
++ }
++ }
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpStopInitPhaseKM(IMG_VOID)
++{
++ IMG_UINT32 i;
++
++ if (gpfnDbgDrv)
++ {
++ PDUMPCOMMENT("Stop Init Phase");
++
++ for(i=0; i < PDUMP_NUM_STREAMS; i++)
++ {
++ gpfnDbgDrv->pfnStopInitPhase(gsDBGPdumpState.psStream[i]);
++ }
++ }
++ return PVRSRV_OK;
++}
++
++IMG_BOOL PDumpIsLastCaptureFrameKM(IMG_VOID)
++{
++ return gpfnDbgDrv->pfnIsLastCaptureFrame(gsDBGPdumpState.psStream[PDUMP_STREAM_SCRIPT2]);
++}
++
++
++IMG_BOOL PDumpIsCaptureFrameKM(IMG_VOID)
++{
++ if (PDumpSuspended())
++ {
++ return IMG_FALSE;
++ }
++ return gpfnDbgDrv->pfnIsCaptureFrame(gsDBGPdumpState.psStream[PDUMP_STREAM_SCRIPT2], IMG_FALSE);
++}
++
++PVRSRV_ERROR PDumpSetFrameKM(IMG_UINT32 ui32Frame)
++{
++ IMG_UINT32 ui32Stream;
++
++ for (ui32Stream = 0; ui32Stream < PDUMP_NUM_STREAMS; ui32Stream++)
++ {
++ if (gsDBGPdumpState.psStream[ui32Stream])
++ {
++ DbgSetFrame(gsDBGPdumpState.psStream[ui32Stream], ui32Frame);
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpGetFrameKM(IMG_PUINT32 pui32Frame)
++{
++ *pui32Frame = DbgGetFrame(gsDBGPdumpState.psStream[PDUMP_STREAM_SCRIPT2]);
++
++ return PVRSRV_OK;
++}
++
++
++
++static IMG_BOOL PDumpWriteString2(IMG_CHAR * pszString, IMG_UINT32 ui32Flags)
++{
++ return PDumpWriteILock(gsDBGPdumpState.psStream[PDUMP_STREAM_SCRIPT2], (IMG_UINT8 *) pszString, strlen(pszString), ui32Flags);
++}
++
++
++static IMG_BOOL PDumpWriteILock(PDBG_STREAM psStream, IMG_UINT8 *pui8Data, IMG_UINT32 ui32Count, IMG_UINT32 ui32Flags)
++{
++ IMG_UINT32 ui32Written = 0;
++ IMG_UINT32 ui32Off = 0;
++
++ if ((psStream == IMG_NULL) || PDumpSuspended() || ((ui32Flags & PDUMP_FLAGS_NEVER) != 0))
++ {
++ return IMG_TRUE;
++ }
++
++
++
++
++ if (psStream == gsDBGPdumpState.psStream[PDUMP_STREAM_PARAM2])
++ {
++ IMG_UINT32 ui32ParamOutPos = gpfnDbgDrv->pfnGetStreamOffset(gsDBGPdumpState.psStream[PDUMP_STREAM_PARAM2]);
++
++ if (ui32ParamOutPos + ui32Count > MAX_FILE_SIZE)
++ {
++ if ((gsDBGPdumpState.psStream[PDUMP_STREAM_SCRIPT2] && PDumpWriteString2("\r\n-- Splitting pdump output file\r\n\r\n", ui32Flags)))
++ {
++ DbgSetMarker(gsDBGPdumpState.psStream[PDUMP_STREAM_PARAM2], ui32ParamOutPos);
++ gsDBGPdumpState.ui32ParamFileNum++;
++ }
++ }
++ }
++
++
++ while (((IMG_UINT32) ui32Count > 0) && (ui32Written != 0xFFFFFFFF))
++ {
++ ui32Written = DbgWrite(psStream, &pui8Data[ui32Off], ui32Count, ui32Flags);
++
++
++
++
++ if (ui32Written == 0)
++ {
++ OSReleaseThreadQuanta();
++ }
++
++ if (ui32Written != 0xFFFFFFFF)
++ {
++ ui32Off += ui32Written;
++ ui32Count -= ui32Written;
++ }
++ }
++
++ if (ui32Written == 0xFFFFFFFF)
++ {
++ return IMG_FALSE;
++ }
++
++ return IMG_TRUE;
++}
++
++static IMG_VOID DbgSetFrame(PDBG_STREAM psStream, IMG_UINT32 ui32Frame)
++{
++ gpfnDbgDrv->pfnSetFrame(psStream, ui32Frame);
++}
++
++
++static IMG_UINT32 DbgGetFrame(PDBG_STREAM psStream)
++{
++ return gpfnDbgDrv->pfnGetFrame(psStream);
++}
++
++static IMG_VOID DbgSetMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker)
++{
++ gpfnDbgDrv->pfnSetMarker(psStream, ui32Marker);
++}
++
++static IMG_UINT32 DbgWrite(PDBG_STREAM psStream, IMG_UINT8 *pui8Data, IMG_UINT32 ui32BCount, IMG_UINT32 ui32Flags)
++{
++ IMG_UINT32 ui32BytesWritten;
++
++ if ((ui32Flags & PDUMP_FLAGS_CONTINUOUS) != 0)
++ {
++
++
++ if (((psStream->ui32CapMode & DEBUG_CAPMODE_FRAMED) != 0) &&
++ (psStream->ui32Start == 0xFFFFFFFFUL) &&
++ (psStream->ui32End == 0xFFFFFFFFUL) &&
++ psStream->bInitPhaseComplete)
++ {
++ ui32BytesWritten = ui32BCount;
++ }
++ else
++ {
++ ui32BytesWritten = gpfnDbgDrv->pfnDBGDrivWrite2(psStream, pui8Data, ui32BCount, 1);
++ }
++ }
++ else
++ {
++ if (ui32Flags & PDUMP_FLAGS_LASTFRAME)
++ {
++ IMG_UINT32 ui32DbgFlags;
++
++ ui32DbgFlags = 0;
++ if (ui32Flags & PDUMP_FLAGS_RESETLFBUFFER)
++ {
++ ui32DbgFlags |= WRITELF_FLAGS_RESETBUF;
++ }
++
++ ui32BytesWritten = gpfnDbgDrv->pfnWriteLF(psStream, pui8Data, ui32BCount, 1, ui32DbgFlags);
++ }
++ else
++ {
++ ui32BytesWritten = gpfnDbgDrv->pfnWriteBINCM(psStream, pui8Data, ui32BCount, 1);
++ }
++ }
++
++ return ui32BytesWritten;
++}
++
++
++IMG_VOID PDumpSuspendKM(IMG_VOID)
++{
++ atomic_inc(&gsPDumpSuspended);
++}
++
++IMG_VOID PDumpResumeKM(IMG_VOID)
++{
++ atomic_dec(&gsPDumpSuspended);
++}
++
++#endif
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/env/linux/private_data.h
+@@ -0,0 +1,67 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __INCLUDED_PRIVATE_DATA_H_
++#define __INCLUDED_PRIVATE_DATA_H_
++
++#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT)
++#include <linux/list.h>
++#include <drm/drmP.h>
++#endif
++
++typedef struct
++{
++
++ IMG_UINT32 ui32OpenPID;
++
++#if defined(PVR_SECURE_FD_EXPORT)
++
++ IMG_HANDLE hKernelMemInfo;
++#endif
++
++#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT)
++
++ struct list_head sDRMAuthListItem;
++
++ struct drm_file *psDRMFile;
++#endif
++
++#if defined(SUPPORT_MEMINFO_IDS)
++
++ IMG_UINT64 ui64Stamp;
++#endif
++
++
++ IMG_HANDLE hBlockAlloc;
++
++#if defined(SUPPORT_DRI_DRM_EXT)
++ IMG_PVOID pPriv;
++#endif
++}
++PVRSRV_FILE_PRIVATE_DATA;
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/env/linux/proc.c
+@@ -0,0 +1,970 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef AUTOCONF_INCLUDED
++ #include <linux/config.h>
++#endif
++
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/version.h>
++#include <linux/fs.h>
++#include <linux/proc_fs.h>
++#include <linux/seq_file.h>
++
++#include "services_headers.h"
++
++#include "queue.h"
++#include "resman.h"
++#include "pvrmmap.h"
++#include "pvr_debug.h"
++#include "pvrversion.h"
++#include "proc.h"
++#include "perproc.h"
++#include "env_perproc.h"
++#include "linkage.h"
++
++#include "lists.h"
++DECLARE_LIST_ANY_VA(PVRSRV_DEVICE_NODE);
++
++
++static struct proc_dir_entry * dir;
++
++#ifndef PVR_PROC_USE_SEQ_FILE
++static off_t procDumpSysNodes(IMG_CHAR *buf, size_t size, off_t off);
++static off_t procDumpVersion(IMG_CHAR *buf, size_t size, off_t off);
++#endif
++
++
++static const IMG_CHAR PVRProcDirRoot[] = "pvr";
++
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++
++#define PVR_PROC_SEQ_START_TOKEN (void*)1
++static IMG_INT pvr_proc_open(struct inode *inode,struct file *file);
++static void *pvr_proc_seq_start (struct seq_file *m, loff_t *pos);
++static void pvr_proc_seq_stop (struct seq_file *m, void *v);
++static void *pvr_proc_seq_next (struct seq_file *m, void *v, loff_t *pos);
++static int pvr_proc_seq_show (struct seq_file *m, void *v);
++static ssize_t pvr_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos);
++
++static struct file_operations pvr_proc_operations =
++{
++ .open = pvr_proc_open,
++ .read = seq_read,
++ .write = pvr_proc_write,
++ .llseek = seq_lseek,
++ .release = seq_release,
++};
++
++static struct seq_operations pvr_proc_seq_operations =
++{
++ .start = pvr_proc_seq_start,
++ .next = pvr_proc_seq_next,
++ .stop = pvr_proc_seq_stop,
++ .show = pvr_proc_seq_show,
++};
++
++static struct proc_dir_entry* g_pProcQueue;
++static struct proc_dir_entry* g_pProcVersion;
++static struct proc_dir_entry* g_pProcSysNodes;
++
++#ifdef DEBUG
++static struct proc_dir_entry* g_pProcDebugLevel;
++#endif
++
++#ifdef PVR_MANUAL_POWER_CONTROL
++static struct proc_dir_entry* g_pProcPowerLevel;
++#endif
++
++
++static void ProcSeqShowVersion(struct seq_file *sfile,void* el);
++
++static void ProcSeqShowSysNodes(struct seq_file *sfile,void* el);
++static void* ProcSeqOff2ElementSysNodes(struct seq_file * sfile, loff_t off);
++
++#endif
++
++off_t printAppend(IMG_CHAR * buffer, size_t size, off_t off, const IMG_CHAR * format, ...)
++{
++ IMG_INT n;
++ size_t space = size - (size_t)off;
++ va_list ap;
++
++ PVR_ASSERT(space >= 0);
++
++ va_start (ap, format);
++
++ n = vsnprintf (buffer+off, space, format, ap);
++
++ va_end (ap);
++
++ if (n >= (IMG_INT)space || n < 0)
++ {
++
++ buffer[size - 1] = 0;
++ return (off_t)(size - 1);
++ }
++ else
++ {
++ return (off + (off_t)n);
++ }
++}
++
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++
++void* ProcSeq1ElementOff2Element(struct seq_file *sfile, loff_t off)
++{
++
++ if(!off)
++ return (void*)2;
++ return NULL;
++}
++
++
++void* ProcSeq1ElementHeaderOff2Element(struct seq_file *sfile, loff_t off)
++{
++ if(!off)
++ {
++ return PVR_PROC_SEQ_START_TOKEN;
++ }
++
++
++ if(off == 1)
++ return (void*)2;
++
++ return NULL;
++}
++
++
++static IMG_INT pvr_proc_open(struct inode *inode,struct file *file)
++{
++ IMG_INT ret = seq_open(file, &pvr_proc_seq_operations);
++
++ struct seq_file *seq = (struct seq_file*)file->private_data;
++ struct proc_dir_entry* pvr_proc_entry = PDE(inode);
++
++
++ seq->private = pvr_proc_entry->data;
++ return ret;
++}
++
++static ssize_t pvr_proc_write(struct file *file, const char __user *buffer,
++ size_t count, loff_t *ppos)
++{
++ struct inode *inode = file->f_path.dentry->d_inode;
++ struct proc_dir_entry * dp;
++
++ dp = PDE(inode);
++
++ if (!dp->write_proc)
++ return -EIO;
++
++ return dp->write_proc(file, buffer, count, dp->data);
++}
++
++
++static void *pvr_proc_seq_start (struct seq_file *proc_seq_file, loff_t *pos)
++{
++ PVR_PROC_SEQ_HANDLERS *handlers = (PVR_PROC_SEQ_HANDLERS*)proc_seq_file->private;
++ if(handlers->startstop != NULL)
++ handlers->startstop(proc_seq_file, IMG_TRUE);
++ return handlers->off2element(proc_seq_file, *pos);
++}
++
++static void pvr_proc_seq_stop (struct seq_file *proc_seq_file, void *v)
++{
++ PVR_PROC_SEQ_HANDLERS *handlers = (PVR_PROC_SEQ_HANDLERS*)proc_seq_file->private;
++ if(handlers->startstop != NULL)
++ handlers->startstop(proc_seq_file, IMG_FALSE);
++}
++
++static void *pvr_proc_seq_next (struct seq_file *proc_seq_file, void *v, loff_t *pos)
++{
++ PVR_PROC_SEQ_HANDLERS *handlers = (PVR_PROC_SEQ_HANDLERS*)proc_seq_file->private;
++ (*pos)++;
++ if( handlers->next != NULL)
++ return handlers->next( proc_seq_file, v, *pos );
++ return handlers->off2element(proc_seq_file, *pos);
++}
++
++static int pvr_proc_seq_show (struct seq_file *proc_seq_file, void *v)
++{
++ PVR_PROC_SEQ_HANDLERS *handlers = (PVR_PROC_SEQ_HANDLERS*)proc_seq_file->private;
++ handlers->show( proc_seq_file,v );
++ return 0;
++}
++
++
++
++static struct proc_dir_entry* CreateProcEntryInDirSeq(
++ struct proc_dir_entry *pdir,
++ const IMG_CHAR * name,
++ IMG_VOID* data,
++ pvr_next_proc_seq_t next_handler,
++ pvr_show_proc_seq_t show_handler,
++ pvr_off2element_proc_seq_t off2element_handler,
++ pvr_startstop_proc_seq_t startstop_handler,
++ write_proc_t whandler
++ )
++{
++
++ struct proc_dir_entry * file;
++ mode_t mode;
++
++ if (!dir)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreateProcEntryInDirSeq: cannot make proc entry /proc/%s/%s: no parent", PVRProcDirRoot, name));
++ return NULL;
++ }
++
++ mode = S_IFREG;
++
++ if (show_handler)
++ {
++ mode |= S_IRUGO;
++ }
++
++ if (whandler)
++ {
++ mode |= S_IWUSR;
++ }
++
++ file=create_proc_entry(name, mode, pdir);
++
++ if (file)
++ {
++ PVR_PROC_SEQ_HANDLERS *seq_handlers;
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30))
++ file->owner = THIS_MODULE;
++#endif
++
++ file->proc_fops = &pvr_proc_operations;
++ file->write_proc = whandler;
++
++
++ file->data = kmalloc(sizeof(PVR_PROC_SEQ_HANDLERS), GFP_KERNEL);
++ if(file->data)
++ {
++ seq_handlers = (PVR_PROC_SEQ_HANDLERS*)file->data;
++ seq_handlers->next = next_handler;
++ seq_handlers->show = show_handler;
++ seq_handlers->off2element = off2element_handler;
++ seq_handlers->startstop = startstop_handler;
++ seq_handlers->data = data;
++
++ return file;
++ }
++ }
++
++ PVR_DPF((PVR_DBG_ERROR, "CreateProcEntryInDirSeq: cannot make proc entry /proc/%s/%s: no memory", PVRProcDirRoot, name));
++ return 0;
++}
++
++
++struct proc_dir_entry* CreateProcReadEntrySeq (
++ const IMG_CHAR * name,
++ IMG_VOID* data,
++ pvr_next_proc_seq_t next_handler,
++ pvr_show_proc_seq_t show_handler,
++ pvr_off2element_proc_seq_t off2element_handler,
++ pvr_startstop_proc_seq_t startstop_handler
++ )
++{
++ return CreateProcEntrySeq(name,
++ data,
++ next_handler,
++ show_handler,
++ off2element_handler,
++ startstop_handler,
++ NULL);
++}
++
++struct proc_dir_entry* CreateProcEntrySeq (
++ const IMG_CHAR * name,
++ IMG_VOID* data,
++ pvr_next_proc_seq_t next_handler,
++ pvr_show_proc_seq_t show_handler,
++ pvr_off2element_proc_seq_t off2element_handler,
++ pvr_startstop_proc_seq_t startstop_handler,
++ write_proc_t whandler
++ )
++{
++ return CreateProcEntryInDirSeq(
++ dir,
++ name,
++ data,
++ next_handler,
++ show_handler,
++ off2element_handler,
++ startstop_handler,
++ NULL
++ );
++}
++
++
++
++struct proc_dir_entry* CreatePerProcessProcEntrySeq (
++ const IMG_CHAR * name,
++ IMG_VOID* data,
++ pvr_next_proc_seq_t next_handler,
++ pvr_show_proc_seq_t show_handler,
++ pvr_off2element_proc_seq_t off2element_handler,
++ pvr_startstop_proc_seq_t startstop_handler,
++ write_proc_t whandler
++ )
++{
++ PVRSRV_ENV_PER_PROCESS_DATA *psPerProc;
++ IMG_UINT32 ui32PID;
++
++ if (!dir)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntrySeq: /proc/%s doesn't exist", PVRProcDirRoot));
++ return NULL;
++ }
++
++ ui32PID = OSGetCurrentProcessIDKM();
++
++ psPerProc = PVRSRVPerProcessPrivateData(ui32PID);
++ if (!psPerProc)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntrySeq: no per process data"));
++
++ return NULL;
++ }
++
++ if (!psPerProc->psProcDir)
++ {
++ IMG_CHAR dirname[16];
++ IMG_INT ret;
++
++ ret = snprintf(dirname, sizeof(dirname), "%lu", ui32PID);
++
++ if (ret <=0 || ret >= (IMG_INT)sizeof(dirname))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntries: couldn't generate per process proc directory name \"%u\"", ui32PID));
++ return NULL;
++ }
++ else
++ {
++ psPerProc->psProcDir = proc_mkdir(dirname, dir);
++ if (!psPerProc->psProcDir)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntries: couldn't create per process proc directory /proc/%s/%u",
++ PVRProcDirRoot, ui32PID));
++ return NULL;
++ }
++ }
++ }
++
++ return CreateProcEntryInDirSeq(psPerProc->psProcDir, name, data, next_handler,
++ show_handler,off2element_handler,startstop_handler,whandler);
++}
++
++
++IMG_VOID RemoveProcEntrySeq( struct proc_dir_entry* proc_entry )
++{
++ if (dir)
++ {
++ void* data = proc_entry->data ;
++ PVR_DPF((PVR_DBG_MESSAGE, "Removing /proc/%s/%s", PVRProcDirRoot, proc_entry->name));
++
++ remove_proc_entry(proc_entry->name, dir);
++ if( data)
++ kfree( data );
++
++ }
++}
++
++IMG_VOID RemovePerProcessProcEntrySeq(struct proc_dir_entry* proc_entry)
++{
++ PVRSRV_ENV_PER_PROCESS_DATA *psPerProc;
++
++ psPerProc = LinuxTerminatingProcessPrivateData();
++ if (!psPerProc)
++ {
++ psPerProc = PVRSRVFindPerProcessPrivateData();
++ if (!psPerProc)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntries: can't "
++ "remove %s, no per process data", proc_entry->name));
++ return;
++ }
++ }
++
++ if (psPerProc->psProcDir)
++ {
++ void* data = proc_entry->data ;
++ PVR_DPF((PVR_DBG_MESSAGE, "Removing proc entry %s from %s", proc_entry->name, psPerProc->psProcDir->name));
++
++ remove_proc_entry(proc_entry->name, psPerProc->psProcDir);
++ if(data)
++ kfree( data );
++ }
++}
++
++#endif
++
++static IMG_INT pvr_read_proc(IMG_CHAR *page, IMG_CHAR **start, off_t off,
++ IMG_INT count, IMG_INT *eof, IMG_VOID *data)
++{
++ pvr_read_proc_t *pprn = (pvr_read_proc_t *)data;
++
++ off_t len = pprn (page, (size_t)count, off);
++
++ if (len == END_OF_FILE)
++ {
++ len = 0;
++ *eof = 1;
++ }
++ else if (!len)
++ {
++ *start = (IMG_CHAR *) 0;
++ }
++ else
++ {
++ *start = (IMG_CHAR *) 1;
++ }
++
++ return len;
++}
++
++
++static IMG_INT CreateProcEntryInDir(struct proc_dir_entry *pdir, const IMG_CHAR * name, read_proc_t rhandler, write_proc_t whandler, IMG_VOID *data)
++{
++ struct proc_dir_entry * file;
++ mode_t mode;
++
++ if (!pdir)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreateProcEntryInDir: parent directory doesn't exist"));
++
++ return -ENOMEM;
++ }
++
++ mode = S_IFREG;
++
++ if (rhandler)
++ {
++ mode |= S_IRUGO;
++ }
++
++ if (whandler)
++ {
++ mode |= S_IWUSR;
++ }
++
++ file = create_proc_entry(name, mode, pdir);
++
++ if (file)
++ {
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30))
++ file->owner = THIS_MODULE;
++#endif
++ file->read_proc = rhandler;
++ file->write_proc = whandler;
++ file->data = data;
++
++ PVR_DPF((PVR_DBG_MESSAGE, "Created proc entry %s in %s", name, pdir->name));
++
++ return 0;
++ }
++
++ PVR_DPF((PVR_DBG_ERROR, "CreateProcEntry: cannot create proc entry %s in %s", name, pdir->name));
++
++ return -ENOMEM;
++}
++
++
++IMG_INT CreateProcEntry(const IMG_CHAR * name, read_proc_t rhandler, write_proc_t whandler, IMG_VOID *data)
++{
++ return CreateProcEntryInDir(dir, name, rhandler, whandler, data);
++}
++
++
++IMG_INT CreatePerProcessProcEntry(const IMG_CHAR * name, read_proc_t rhandler, write_proc_t whandler, IMG_VOID *data)
++{
++ PVRSRV_ENV_PER_PROCESS_DATA *psPerProc;
++ IMG_UINT32 ui32PID;
++
++ if (!dir)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntries: /proc/%s doesn't exist", PVRProcDirRoot));
++
++ return -ENOMEM;
++ }
++
++ ui32PID = OSGetCurrentProcessIDKM();
++
++ psPerProc = PVRSRVPerProcessPrivateData(ui32PID);
++ if (!psPerProc)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntries: no per process data"));
++
++ return -ENOMEM;
++ }
++
++ if (!psPerProc->psProcDir)
++ {
++ IMG_CHAR dirname[16];
++ IMG_INT ret;
++
++ ret = snprintf(dirname, sizeof(dirname), "%lu", ui32PID);
++
++ if (ret <=0 || ret >= (IMG_INT)sizeof(dirname))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntries: couldn't generate per process proc directory name \"%u\"", ui32PID));
++
++ return -ENOMEM;
++ }
++ else
++ {
++ psPerProc->psProcDir = proc_mkdir(dirname, dir);
++ if (!psPerProc->psProcDir)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntries: couldn't create per process proc directory /proc/%s/%u", PVRProcDirRoot, ui32PID));
++
++ return -ENOMEM;
++ }
++ }
++ }
++
++ return CreateProcEntryInDir(psPerProc->psProcDir, name, rhandler, whandler, data);
++}
++
++
++IMG_INT CreateProcReadEntry(const IMG_CHAR * name, pvr_read_proc_t handler)
++{
++ struct proc_dir_entry * file;
++
++ if (!dir)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreateProcReadEntry: cannot make proc entry /proc/%s/%s: no parent", PVRProcDirRoot, name));
++
++ return -ENOMEM;
++ }
++
++ file = create_proc_read_entry (name, S_IFREG | S_IRUGO, dir, pvr_read_proc, (IMG_VOID *)handler);
++
++ if (file)
++ {
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30))
++ file->owner = THIS_MODULE;
++#endif
++ return 0;
++ }
++
++ PVR_DPF((PVR_DBG_ERROR, "CreateProcReadEntry: cannot make proc entry /proc/%s/%s: no memory", PVRProcDirRoot, name));
++
++ return -ENOMEM;
++}
++
++
++IMG_INT CreateProcEntries(IMG_VOID)
++{
++ dir = proc_mkdir (PVRProcDirRoot, NULL);
++
++ if (!dir)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreateProcEntries: cannot make /proc/%s directory", PVRProcDirRoot));
++
++ return -ENOMEM;
++ }
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++ g_pProcQueue = CreateProcReadEntrySeq("queue", NULL, NULL, ProcSeqShowQueue, ProcSeqOff2ElementQueue, NULL);
++ g_pProcVersion = CreateProcReadEntrySeq("version", NULL, NULL, ProcSeqShowVersion, ProcSeq1ElementHeaderOff2Element, NULL);
++ g_pProcSysNodes = CreateProcReadEntrySeq("nodes", NULL, NULL, ProcSeqShowSysNodes, ProcSeqOff2ElementSysNodes, NULL);
++
++ if(!g_pProcQueue || !g_pProcVersion || !g_pProcSysNodes)
++#else
++ if (CreateProcReadEntry("queue", QueuePrintQueues) ||
++ CreateProcReadEntry("version", procDumpVersion) ||
++ CreateProcReadEntry("nodes", procDumpSysNodes))
++#endif
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreateProcEntries: couldn't make /proc/%s files", PVRProcDirRoot));
++
++ return -ENOMEM;
++ }
++
++
++#ifdef DEBUG
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++ g_pProcDebugLevel = CreateProcEntrySeq("debug_level", NULL, NULL,
++ ProcSeqShowDebugLevel,
++ ProcSeq1ElementOff2Element, NULL,
++ PVRDebugProcSetLevel);
++ if(!g_pProcDebugLevel)
++#else
++ if (CreateProcEntry ("debug_level", PVRDebugProcGetLevel, PVRDebugProcSetLevel, 0))
++#endif
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreateProcEntries: couldn't make /proc/%s/debug_level", PVRProcDirRoot));
++
++ return -ENOMEM;
++ }
++
++#ifdef PVR_MANUAL_POWER_CONTROL
++#ifdef PVR_PROC_USE_SEQ_FILE
++ g_pProcPowerLevel = CreateProcEntrySeq("power_control", NULL, NULL,
++ ProcSeqShowPowerLevel,
++ ProcSeq1ElementOff2Element, NULL,
++ PVRProcSetPowerLevel);
++ if(!g_pProcPowerLevel)
++#else
++ if (CreateProcEntry("power_control", PVRProcGetPowerLevel, PVRProcSetPowerLevel, 0))
++#endif
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreateProcEntries: couldn't make /proc/%s/power_control", PVRProcDirRoot));
++
++ return -ENOMEM;
++ }
++#endif
++#endif
++
++ return 0;
++}
++
++
++IMG_VOID RemoveProcEntry(const IMG_CHAR * name)
++{
++ if (dir)
++ {
++ remove_proc_entry(name, dir);
++ PVR_DPF((PVR_DBG_MESSAGE, "Removing /proc/%s/%s", PVRProcDirRoot, name));
++ }
++}
++
++
++IMG_VOID RemovePerProcessProcEntry(const IMG_CHAR *name)
++{
++ PVRSRV_ENV_PER_PROCESS_DATA *psPerProc;
++
++ psPerProc = LinuxTerminatingProcessPrivateData();
++ if (!psPerProc)
++ {
++ psPerProc = PVRSRVFindPerProcessPrivateData();
++ if (!psPerProc)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntries: can't "
++ "remove %s, no per process data", name));
++ return;
++ }
++ }
++
++ if (psPerProc->psProcDir)
++ {
++ remove_proc_entry(name, psPerProc->psProcDir);
++
++ PVR_DPF((PVR_DBG_MESSAGE, "Removing proc entry %s from %s", name, psPerProc->psProcDir->name));
++ }
++}
++
++
++IMG_VOID RemovePerProcessProcDir(PVRSRV_ENV_PER_PROCESS_DATA *psPerProc)
++{
++ if (psPerProc->psProcDir)
++ {
++ while (psPerProc->psProcDir->subdir)
++ {
++ PVR_DPF((PVR_DBG_WARNING, "Belatedly removing /proc/%s/%s/%s", PVRProcDirRoot, psPerProc->psProcDir->name, psPerProc->psProcDir->subdir->name));
++
++ RemoveProcEntry(psPerProc->psProcDir->subdir->name);
++ }
++ RemoveProcEntry(psPerProc->psProcDir->name);
++ }
++}
++
++IMG_VOID RemoveProcEntries(IMG_VOID)
++{
++#ifdef DEBUG
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++ RemoveProcEntrySeq( g_pProcDebugLevel );
++#else
++ RemoveProcEntry("debug_level");
++#endif
++
++#ifdef PVR_MANUAL_POWER_CONTROL
++#ifdef PVR_PROC_USE_SEQ_FILE
++ RemoveProcEntrySeq( g_pProcPowerLevel );
++#else
++ RemoveProcEntry("power_control");
++#endif
++#endif
++
++#endif
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++ RemoveProcEntrySeq(g_pProcQueue);
++ RemoveProcEntrySeq(g_pProcVersion);
++ RemoveProcEntrySeq(g_pProcSysNodes);
++#else
++ RemoveProcEntry("queue");
++ RemoveProcEntry("version");
++ RemoveProcEntry("nodes");
++#endif
++
++ while (dir->subdir)
++ {
++ PVR_DPF((PVR_DBG_WARNING, "Belatedly removing /proc/%s/%s", PVRProcDirRoot, dir->subdir->name));
++
++ RemoveProcEntry(dir->subdir->name);
++ }
++
++ remove_proc_entry(PVRProcDirRoot, NULL);
++}
++
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++
++static void ProcSeqShowVersion(struct seq_file *sfile,void* el)
++{
++ SYS_DATA * psSysData;
++ IMG_CHAR *pszSystemVersionString = "None";
++
++ if(el == PVR_PROC_SEQ_START_TOKEN)
++ {
++ seq_printf( sfile,
++ "Version %s (%s) %s\n",
++ PVRVERSION_STRING,
++ PVR_BUILD_TYPE, PVR_BUILD_DIR);
++ return;
++ }
++
++ SysAcquireData(&psSysData);
++
++ if(psSysData->pszVersionString)
++ {
++ pszSystemVersionString = psSysData->pszVersionString;
++ }
++
++ seq_printf( sfile, "System Version String: %s\n", pszSystemVersionString);
++}
++
++#else
++
++static off_t procDumpVersion(IMG_CHAR *buf, size_t size, off_t off)
++{
++ SYS_DATA *psSysData;
++
++ if (off == 0)
++ {
++ return printAppend(buf, size, 0,
++ "Version %s (%s) %s\n",
++ PVRVERSION_STRING,
++ PVR_BUILD_TYPE, PVR_BUILD_DIR);
++ }
++
++ SysAcquireData(&psSysData)
++
++ if (off == 1)
++ {
++ IMG_CHAR *pszSystemVersionString = "None";
++
++ if(psSysData->pszVersionString)
++ {
++ pszSystemVersionString = psSysData->pszVersionString;
++ }
++
++ if(strlen(pszSystemVersionString)
++ + strlen("System Version String: \n")
++ + 1 > size)
++ {
++ return 0;
++ }
++ return printAppend(buf, size, 0,
++ "System Version String: %s\n",
++ pszSystemVersionString);
++ }
++
++ return END_OF_FILE;
++}
++
++#endif
++
++
++static const IMG_CHAR *deviceTypeToString(PVRSRV_DEVICE_TYPE deviceType)
++{
++ switch (deviceType)
++ {
++ default:
++ {
++ static IMG_CHAR text[10];
++
++ sprintf(text, "?%x", (IMG_UINT)deviceType);
++
++ return text;
++ }
++ }
++}
++
++
++static const IMG_CHAR *deviceClassToString(PVRSRV_DEVICE_CLASS deviceClass)
++{
++ switch (deviceClass)
++ {
++ case PVRSRV_DEVICE_CLASS_3D:
++ {
++ return "3D";
++ }
++ case PVRSRV_DEVICE_CLASS_DISPLAY:
++ {
++ return "display";
++ }
++ case PVRSRV_DEVICE_CLASS_BUFFER:
++ {
++ return "buffer";
++ }
++ default:
++ {
++ static IMG_CHAR text[10];
++
++ sprintf(text, "?%x", (IMG_UINT)deviceClass);
++ return text;
++ }
++ }
++}
++
++IMG_VOID* DecOffPsDev_AnyVaCb(PVRSRV_DEVICE_NODE *psNode, va_list va)
++{
++ off_t *pOff = va_arg(va, off_t*);
++ if (--(*pOff))
++ {
++ return IMG_NULL;
++ }
++ else
++ {
++ return psNode;
++ }
++}
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++
++static void ProcSeqShowSysNodes(struct seq_file *sfile,void* el)
++{
++ SYS_DATA * psSysData;
++ PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE*)el;
++
++ if(el == PVR_PROC_SEQ_START_TOKEN)
++ {
++ seq_printf( sfile,
++ "Registered nodes\n"
++ "Addr Type Class Index Ref pvDev Size Res\n");
++ return;
++ }
++
++ SysAcquireData(&psSysData);
++
++ seq_printf( sfile,
++ "%p %-8s %-8s %4d %2lu %p %3lu %p\n",
++ psDevNode,
++ deviceTypeToString(psDevNode->sDevId.eDeviceType),
++ deviceClassToString(psDevNode->sDevId.eDeviceClass),
++ psDevNode->sDevId.eDeviceClass,
++ psDevNode->ui32RefCount,
++ psDevNode->pvDevice,
++ psDevNode->ui32pvDeviceSize,
++ psDevNode->hResManContext);
++
++}
++
++static void* ProcSeqOff2ElementSysNodes(struct seq_file * sfile, loff_t off)
++{
++ SYS_DATA *psSysData;
++ PVRSRV_DEVICE_NODE *psDevNode;
++ if(!off)
++ {
++ return PVR_PROC_SEQ_START_TOKEN;
++ }
++
++ SysAcquireData(&psSysData);
++
++
++ psDevNode = (PVRSRV_DEVICE_NODE*)
++ List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList,
++ DecOffPsDev_AnyVaCb,
++ &off);
++
++
++ return (void*)psDevNode;
++}
++
++#else
++
++static
++off_t procDumpSysNodes(IMG_CHAR *buf, size_t size, off_t off)
++{
++ SYS_DATA *psSysData;
++ PVRSRV_DEVICE_NODE *psDevNode;
++ off_t len;
++
++
++ if (size < 80)
++ {
++ return 0;
++ }
++
++ if (off == 0)
++ {
++ return printAppend(buf, size, 0,
++ "Registered nodes\n"
++ "Addr Type Class Index Ref pvDev Size Res\n");
++ }
++
++ SysAcquireData(&psSysData);
++
++
++ psDevNode = (PVRSRV_DEVICE_NODE*)
++ List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList,
++ DecOffPsDev_AnyVaCb,
++ &off);
++
++ if (!psDevNode)
++ {
++ return END_OF_FILE;
++ }
++
++ len = printAppend(buf, size, 0,
++ "%p %-8s %-8s %4d %2lu %p %3lu %p\n",
++ psDevNode,
++ deviceTypeToString(psDevNode->sDevId.eDeviceType),
++ deviceClassToString(psDevNode->sDevId.eDeviceClass),
++ psDevNode->sDevId.eDeviceClass,
++ psDevNode->ui32RefCount,
++ psDevNode->pvDevice,
++ psDevNode->ui32pvDeviceSize,
++ psDevNode->hResManContext);
++ return (len);
++}
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/env/linux/proc.h
+@@ -0,0 +1,115 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __SERVICES_PROC_H__
++#define __SERVICES_PROC_H__
++
++#include <asm/system.h>
++#include <linux/proc_fs.h>
++#include <linux/seq_file.h>
++
++#define END_OF_FILE (off_t) -1
++
++typedef off_t (pvr_read_proc_t)(IMG_CHAR *, size_t, off_t);
++
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++#define PVR_PROC_SEQ_START_TOKEN (void*)1
++typedef void* (pvr_next_proc_seq_t)(struct seq_file *,void*,loff_t);
++typedef void* (pvr_off2element_proc_seq_t)(struct seq_file *, loff_t);
++typedef void (pvr_show_proc_seq_t)(struct seq_file *,void*);
++typedef void (pvr_startstop_proc_seq_t)(struct seq_file *, IMG_BOOL start);
++
++typedef struct _PVR_PROC_SEQ_HANDLERS_ {
++ pvr_next_proc_seq_t *next;
++ pvr_show_proc_seq_t *show;
++ pvr_off2element_proc_seq_t *off2element;
++ pvr_startstop_proc_seq_t *startstop;
++ IMG_VOID *data;
++} PVR_PROC_SEQ_HANDLERS;
++
++
++void* ProcSeq1ElementOff2Element(struct seq_file *sfile, loff_t off);
++
++void* ProcSeq1ElementHeaderOff2Element(struct seq_file *sfile, loff_t off);
++
++
++#endif
++
++off_t printAppend(IMG_CHAR * buffer, size_t size, off_t off, const IMG_CHAR * format, ...)
++ __attribute__((format(printf, 4, 5)));
++
++IMG_INT CreateProcEntries(IMG_VOID);
++
++IMG_INT CreateProcReadEntry (const IMG_CHAR * name, pvr_read_proc_t handler);
++
++IMG_INT CreateProcEntry(const IMG_CHAR * name, read_proc_t rhandler, write_proc_t whandler, IMG_VOID *data);
++
++IMG_INT CreatePerProcessProcEntry(const IMG_CHAR * name, read_proc_t rhandler, write_proc_t whandler, IMG_VOID *data);
++
++IMG_VOID RemoveProcEntry(const IMG_CHAR * name);
++
++IMG_VOID RemovePerProcessProcEntry(const IMG_CHAR * name);
++
++IMG_VOID RemoveProcEntries(IMG_VOID);
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++struct proc_dir_entry* CreateProcReadEntrySeq (
++ const IMG_CHAR* name,
++ IMG_VOID* data,
++ pvr_next_proc_seq_t next_handler,
++ pvr_show_proc_seq_t show_handler,
++ pvr_off2element_proc_seq_t off2element_handler,
++ pvr_startstop_proc_seq_t startstop_handler
++ );
++
++struct proc_dir_entry* CreateProcEntrySeq (
++ const IMG_CHAR* name,
++ IMG_VOID* data,
++ pvr_next_proc_seq_t next_handler,
++ pvr_show_proc_seq_t show_handler,
++ pvr_off2element_proc_seq_t off2element_handler,
++ pvr_startstop_proc_seq_t startstop_handler,
++ write_proc_t whandler
++ );
++
++struct proc_dir_entry* CreatePerProcessProcEntrySeq (
++ const IMG_CHAR* name,
++ IMG_VOID* data,
++ pvr_next_proc_seq_t next_handler,
++ pvr_show_proc_seq_t show_handler,
++ pvr_off2element_proc_seq_t off2element_handler,
++ pvr_startstop_proc_seq_t startstop_handler,
++ write_proc_t whandler
++ );
++
++
++IMG_VOID RemoveProcEntrySeq(struct proc_dir_entry* proc_entry);
++IMG_VOID RemovePerProcessProcEntrySeq(struct proc_dir_entry* proc_entry);
++
++#endif
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/env/linux/pvr_bridge_k.c
+@@ -0,0 +1,651 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "img_defs.h"
++#include "services.h"
++#include "pvr_bridge.h"
++#include "perproc.h"
++#include "mutex.h"
++#include "syscommon.h"
++#include "pvr_debug.h"
++#include "proc.h"
++#include "private_data.h"
++#include "linkage.h"
++#include "pvr_bridge_km.h"
++
++#if defined(SUPPORT_DRI_DRM)
++#include <drm/drmP.h>
++#include "pvr_drm.h"
++#if defined(PVR_SECURE_DRM_AUTH_EXPORT)
++#include "env_perproc.h"
++#endif
++#endif
++
++#if defined(SUPPORT_VGX)
++#include "vgx_bridge.h"
++#endif
++
++#if defined(SUPPORT_SGX)
++#include "sgx_bridge.h"
++#endif
++
++#include "bridged_pvr_bridge.h"
++
++#ifdef MODULE_TEST
++#include "pvr_test_bridge.h"
++#include "kern_test.h"
++#endif
++
++
++#if defined(SUPPORT_DRI_DRM)
++#define PRIVATE_DATA(pFile) ((pFile)->driver_priv)
++#else
++#define PRIVATE_DATA(pFile) ((pFile)->private_data)
++#endif
++
++#if defined(DEBUG_BRIDGE_KM)
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++static struct proc_dir_entry *g_ProcBridgeStats =0;
++static void* ProcSeqNextBridgeStats(struct seq_file *sfile,void* el,loff_t off);
++static void ProcSeqShowBridgeStats(struct seq_file *sfile,void* el);
++static void* ProcSeqOff2ElementBridgeStats(struct seq_file * sfile, loff_t off);
++static void ProcSeqStartstopBridgeStats(struct seq_file *sfile,IMG_BOOL start);
++
++#else
++static off_t printLinuxBridgeStats(IMG_CHAR * buffer, size_t size, off_t off);
++#endif
++
++#endif
++
++extern PVRSRV_LINUX_MUTEX gPVRSRVLock;
++
++#if defined(SUPPORT_MEMINFO_IDS)
++static IMG_UINT64 ui64Stamp;
++#endif
++
++PVRSRV_ERROR
++LinuxBridgeInit(IMG_VOID)
++{
++#if defined(DEBUG_BRIDGE_KM)
++ {
++ IMG_INT iStatus;
++#ifdef PVR_PROC_USE_SEQ_FILE
++ g_ProcBridgeStats = CreateProcReadEntrySeq(
++ "bridge_stats",
++ NULL,
++ ProcSeqNextBridgeStats,
++ ProcSeqShowBridgeStats,
++ ProcSeqOff2ElementBridgeStats,
++ ProcSeqStartstopBridgeStats
++ );
++ iStatus = !g_ProcBridgeStats ? -1 : 0;
++#else
++ iStatus = CreateProcReadEntry("bridge_stats", printLinuxBridgeStats);
++#endif
++
++ if(iStatus!=0)
++ {
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ }
++#endif
++ return CommonBridgeInit();
++}
++
++IMG_VOID
++LinuxBridgeDeInit(IMG_VOID)
++{
++#if defined(DEBUG_BRIDGE_KM)
++#ifdef PVR_PROC_USE_SEQ_FILE
++ RemoveProcEntrySeq(g_ProcBridgeStats);
++#else
++ RemoveProcEntry("bridge_stats");
++#endif
++#endif
++}
++
++#if defined(DEBUG_BRIDGE_KM)
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++
++static void ProcSeqStartstopBridgeStats(struct seq_file *sfile,IMG_BOOL start)
++{
++ if(start)
++ {
++ LinuxLockMutex(&gPVRSRVLock);
++ }
++ else
++ {
++ LinuxUnLockMutex(&gPVRSRVLock);
++ }
++}
++
++
++static void* ProcSeqOff2ElementBridgeStats(struct seq_file *sfile, loff_t off)
++{
++ if(!off)
++ {
++ return PVR_PROC_SEQ_START_TOKEN;
++ }
++
++ if(off > BRIDGE_DISPATCH_TABLE_ENTRY_COUNT)
++ {
++ return (void*)0;
++ }
++
++
++ return (void*)&g_BridgeDispatchTable[off-1];
++}
++
++static void* ProcSeqNextBridgeStats(struct seq_file *sfile,void* el,loff_t off)
++{
++ return ProcSeqOff2ElementBridgeStats(sfile,off);
++}
++
++
++static void ProcSeqShowBridgeStats(struct seq_file *sfile,void* el)
++{
++ PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *psEntry = ( PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY*)el;
++
++ if(el == PVR_PROC_SEQ_START_TOKEN)
++ {
++ seq_printf(sfile,
++ "Total ioctl call count = %lu\n"
++ "Total number of bytes copied via copy_from_user = %lu\n"
++ "Total number of bytes copied via copy_to_user = %lu\n"
++ "Total number of bytes copied via copy_*_user = %lu\n\n"
++ "%-45s | %-40s | %10s | %20s | %10s\n",
++ g_BridgeGlobalStats.ui32IOCTLCount,
++ g_BridgeGlobalStats.ui32TotalCopyFromUserBytes,
++ g_BridgeGlobalStats.ui32TotalCopyToUserBytes,
++ g_BridgeGlobalStats.ui32TotalCopyFromUserBytes+g_BridgeGlobalStats.ui32TotalCopyToUserBytes,
++ "Bridge Name",
++ "Wrapper Function",
++ "Call Count",
++ "copy_from_user Bytes",
++ "copy_to_user Bytes"
++ );
++ return;
++ }
++
++ seq_printf(sfile,
++ "%-45s %-40s %-10lu %-20lu %-10lu\n",
++ psEntry->pszIOCName,
++ psEntry->pszFunctionName,
++ psEntry->ui32CallCount,
++ psEntry->ui32CopyFromUserTotalBytes,
++ psEntry->ui32CopyToUserTotalBytes);
++}
++
++#else
++
++static off_t
++printLinuxBridgeStats(IMG_CHAR * buffer, size_t count, off_t off)
++{
++ PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *psEntry;
++ off_t Ret;
++
++ LinuxLockMutex(&gPVRSRVLock);
++
++ if(!off)
++ {
++ if(count < 500)
++ {
++ Ret = 0;
++ goto unlock_and_return;
++ }
++ Ret = printAppend(buffer, count, 0,
++ "Total ioctl call count = %lu\n"
++ "Total number of bytes copied via copy_from_user = %lu\n"
++ "Total number of bytes copied via copy_to_user = %lu\n"
++ "Total number of bytes copied via copy_*_user = %lu\n\n"
++ "%-45s | %-40s | %10s | %20s | %10s\n",
++ g_BridgeGlobalStats.ui32IOCTLCount,
++ g_BridgeGlobalStats.ui32TotalCopyFromUserBytes,
++ g_BridgeGlobalStats.ui32TotalCopyToUserBytes,
++ g_BridgeGlobalStats.ui32TotalCopyFromUserBytes+g_BridgeGlobalStats.ui32TotalCopyToUserBytes,
++ "Bridge Name",
++ "Wrapper Function",
++ "Call Count",
++ "copy_from_user Bytes",
++ "copy_to_user Bytes"
++ );
++ goto unlock_and_return;
++ }
++
++ if(off > BRIDGE_DISPATCH_TABLE_ENTRY_COUNT)
++ {
++ Ret = END_OF_FILE;
++ goto unlock_and_return;
++ }
++
++ if(count < 300)
++ {
++ Ret = 0;
++ goto unlock_and_return;
++ }
++
++ psEntry = &g_BridgeDispatchTable[off-1];
++ Ret = printAppend(buffer, count, 0,
++ "%-45s %-40s %-10lu %-20lu %-10lu\n",
++ psEntry->pszIOCName,
++ psEntry->pszFunctionName,
++ psEntry->ui32CallCount,
++ psEntry->ui32CopyFromUserTotalBytes,
++ psEntry->ui32CopyToUserTotalBytes);
++
++unlock_and_return:
++ LinuxUnLockMutex(&gPVRSRVLock);
++ return Ret;
++}
++#endif
++#endif
++
++
++
++#if defined(SUPPORT_DRI_DRM)
++IMG_INT
++PVRSRV_BridgeDispatchKM(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile)
++#else
++IMG_INT32
++PVRSRV_BridgeDispatchKM(struct file *pFile, IMG_UINT unref__ ioctlCmd, IMG_UINT32 arg)
++#endif
++{
++ IMG_UINT32 cmd;
++#if !defined(SUPPORT_DRI_DRM)
++ PVRSRV_BRIDGE_PACKAGE *psBridgePackageUM = (PVRSRV_BRIDGE_PACKAGE *)arg;
++ PVRSRV_BRIDGE_PACKAGE sBridgePackageKM;
++#endif
++ PVRSRV_BRIDGE_PACKAGE *psBridgePackageKM;
++ IMG_UINT32 ui32PID = OSGetCurrentProcessIDKM();
++ PVRSRV_PER_PROCESS_DATA *psPerProc;
++ IMG_INT err = -EFAULT;
++
++ LinuxLockMutex(&gPVRSRVLock);
++
++#if defined(SUPPORT_DRI_DRM)
++ PVR_UNREFERENCED_PARAMETER(dev);
++
++ psBridgePackageKM = (PVRSRV_BRIDGE_PACKAGE *)arg;
++ PVR_ASSERT(psBridgePackageKM != IMG_NULL);
++#else
++ PVR_UNREFERENCED_PARAMETER(ioctlCmd);
++
++ psBridgePackageKM = &sBridgePackageKM;
++
++ if(!OSAccessOK(PVR_VERIFY_WRITE,
++ psBridgePackageUM,
++ sizeof(PVRSRV_BRIDGE_PACKAGE)))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: Received invalid pointer to function arguments",
++ __FUNCTION__));
++
++ goto unlock_and_return;
++ }
++
++
++ if(OSCopyFromUser(IMG_NULL,
++ psBridgePackageKM,
++ psBridgePackageUM,
++ sizeof(PVRSRV_BRIDGE_PACKAGE))
++ != PVRSRV_OK)
++ {
++ goto unlock_and_return;
++ }
++#endif
++
++ cmd = psBridgePackageKM->ui32BridgeID;
++
++#if defined(MODULE_TEST)
++ switch (cmd)
++ {
++ case PVRSRV_BRIDGE_SERVICES_TEST_MEM1:
++ {
++ PVRSRV_ERROR eError = MemTest1();
++ if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN))
++ {
++ PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ;
++ pReturn->eError = eError;
++ }
++ }
++ err = 0;
++ goto unlock_and_return;
++ case PVRSRV_BRIDGE_SERVICES_TEST_MEM2:
++ {
++ PVRSRV_ERROR eError = MemTest2();
++ if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN))
++ {
++ PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ;
++ pReturn->eError = eError;
++ }
++ }
++ err = 0;
++ goto unlock_and_return;
++
++ case PVRSRV_BRIDGE_SERVICES_TEST_RESOURCE:
++ {
++ PVRSRV_ERROR eError = ResourceTest();
++ if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN))
++ {
++ PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ;
++ pReturn->eError = eError;
++ }
++ }
++ err = 0;
++ goto unlock_and_return;
++
++ case PVRSRV_BRIDGE_SERVICES_TEST_EVENTOBJECT:
++ {
++ PVRSRV_ERROR eError = EventObjectTest();
++ if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN))
++ {
++ PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ;
++ pReturn->eError = eError;
++ }
++ }
++ err = 0;
++ goto unlock_and_return;
++
++ case PVRSRV_BRIDGE_SERVICES_TEST_MEMMAPPING:
++ {
++ PVRSRV_ERROR eError = MemMappingTest();
++ if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN))
++ {
++ PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ;
++ pReturn->eError = eError;
++ }
++ }
++ err = 0;
++ goto unlock_and_return;
++
++ case PVRSRV_BRIDGE_SERVICES_TEST_PROCESSID:
++ {
++ PVRSRV_ERROR eError = ProcessIDTest();
++ if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN))
++ {
++ PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ;
++ pReturn->eError = eError;
++ }
++ }
++ err = 0;
++ goto unlock_and_return;
++
++ case PVRSRV_BRIDGE_SERVICES_TEST_CLOCKUSWAITUS:
++ {
++ PVRSRV_ERROR eError = ClockusWaitusTest();
++ if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN))
++ {
++ PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ;
++ pReturn->eError = eError;
++ }
++ }
++ err = 0;
++ goto unlock_and_return;
++
++ case PVRSRV_BRIDGE_SERVICES_TEST_TIMER:
++ {
++ PVRSRV_ERROR eError = TimerTest();
++ if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN))
++ {
++ PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ;
++ pReturn->eError = eError;
++ }
++ }
++ err = 0;
++ goto unlock_and_return;
++
++ case PVRSRV_BRIDGE_SERVICES_TEST_PRIVSRV:
++ {
++ PVRSRV_ERROR eError = PrivSrvTest();
++ if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN))
++ {
++ PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ;
++ pReturn->eError = eError;
++ }
++ }
++ err = 0;
++ goto unlock_and_return;
++ case PVRSRV_BRIDGE_SERVICES_TEST_COPYDATA:
++ {
++ IMG_UINT32 ui32PID;
++ PVRSRV_PER_PROCESS_DATA *psPerProc;
++ PVRSRV_ERROR eError;
++
++ ui32PID = OSGetCurrentProcessIDKM();
++
++ PVRSRVTrace("PVRSRV_BRIDGE_SERVICES_TEST_COPYDATA %d", ui32PID);
++
++ psPerProc = PVRSRVPerProcessData(ui32PID);
++
++ eError = CopyDataTest(psBridgePackageKM->pvParamIn, psBridgePackageKM->pvParamOut, psPerProc);
++
++ *(PVRSRV_ERROR*)psBridgePackageKM->pvParamOut = eError;
++ err = 0;
++ goto unlock_and_return;
++ }
++
++
++ case PVRSRV_BRIDGE_SERVICES_TEST_POWERMGMT:
++ {
++ PVRSRV_ERROR eError = PowerMgmtTest();
++ if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN))
++ {
++ PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ;
++ pReturn->eError = eError;
++ }
++ }
++ err = 0;
++ goto unlock_and_return;
++
++ }
++#endif
++
++ if(cmd != PVRSRV_BRIDGE_CONNECT_SERVICES)
++ {
++ PVRSRV_ERROR eError;
++
++ eError = PVRSRVLookupHandle(KERNEL_HANDLE_BASE,
++ (IMG_PVOID *)&psPerProc,
++ psBridgePackageKM->hKernelServices,
++ PVRSRV_HANDLE_TYPE_PERPROC_DATA);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid kernel services handle (%d)",
++ __FUNCTION__, eError));
++ goto unlock_and_return;
++ }
++
++ if(psPerProc->ui32PID != ui32PID)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: Process %d tried to access data "
++ "belonging to process %d", __FUNCTION__, ui32PID,
++ psPerProc->ui32PID));
++ goto unlock_and_return;
++ }
++ }
++ else
++ {
++
++ psPerProc = PVRSRVPerProcessData(ui32PID);
++ if(psPerProc == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRV_BridgeDispatchKM: "
++ "Couldn't create per-process data area"));
++ goto unlock_and_return;
++ }
++ }
++
++ psBridgePackageKM->ui32BridgeID = PVRSRV_GET_BRIDGE_ID(psBridgePackageKM->ui32BridgeID);
++
++#if defined(PVR_SECURE_FD_EXPORT)
++ switch(cmd)
++ {
++ case PVRSRV_BRIDGE_EXPORT_DEVICEMEM:
++ {
++ PVRSRV_FILE_PRIVATE_DATA *psPrivateData = PRIVATE_DATA(pFile);
++
++ if(psPrivateData->hKernelMemInfo)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: Can only export one MemInfo "
++ "per file descriptor", __FUNCTION__));
++ err = -EINVAL;
++ goto unlock_and_return;
++ }
++ break;
++ }
++
++ case PVRSRV_BRIDGE_MAP_DEV_MEMORY:
++ {
++ PVRSRV_BRIDGE_IN_MAP_DEV_MEMORY *psMapDevMemIN =
++ (PVRSRV_BRIDGE_IN_MAP_DEV_MEMORY *)psBridgePackageKM->pvParamIn;
++ PVRSRV_FILE_PRIVATE_DATA *psPrivateData = PRIVATE_DATA(pFile);
++
++ if(!psPrivateData->hKernelMemInfo)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: File descriptor has no "
++ "associated MemInfo handle", __FUNCTION__));
++ err = -EINVAL;
++ goto unlock_and_return;
++ }
++
++ psMapDevMemIN->hKernelMemInfo = psPrivateData->hKernelMemInfo;
++ break;
++ }
++
++ default:
++ {
++ PVRSRV_FILE_PRIVATE_DATA *psPrivateData = PRIVATE_DATA(pFile);
++
++ if(psPrivateData->hKernelMemInfo)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: Import/Export handle tried "
++ "to use privileged service", __FUNCTION__));
++ goto unlock_and_return;
++ }
++ break;
++ }
++ }
++#endif
++#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT)
++ switch(cmd)
++ {
++ case PVRSRV_BRIDGE_MAP_DEV_MEMORY:
++ case PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY:
++ {
++ PVRSRV_FILE_PRIVATE_DATA *psPrivateData;
++ int authenticated = pFile->authenticated;
++ PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc;
++
++ if (authenticated)
++ {
++ break;
++ }
++
++
++ psEnvPerProc = (PVRSRV_ENV_PER_PROCESS_DATA *)PVRSRVProcessPrivateData(psPerProc);
++ if (psEnvPerProc == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: Process private data not allocated", __FUNCTION__));
++ err = -EFAULT;
++ goto unlock_and_return;
++ }
++
++ list_for_each_entry(psPrivateData, &psEnvPerProc->sDRMAuthListHead, sDRMAuthListItem)
++ {
++ struct drm_file *psDRMFile = psPrivateData->psDRMFile;
++
++ if (pFile->master == psDRMFile->master)
++ {
++ authenticated |= psDRMFile->authenticated;
++ if (authenticated)
++ {
++ break;
++ }
++ }
++ }
++
++ if (!authenticated)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: Not authenticated for mapping device or device class memory", __FUNCTION__));
++ err = -EPERM;
++ goto unlock_and_return;
++ }
++ break;
++ }
++ default:
++ break;
++ }
++#endif
++
++ err = BridgedDispatchKM(psPerProc, psBridgePackageKM);
++ if(err != PVRSRV_OK)
++ goto unlock_and_return;
++
++ switch(cmd)
++ {
++#if defined(PVR_SECURE_FD_EXPORT)
++ case PVRSRV_BRIDGE_EXPORT_DEVICEMEM:
++ {
++ PVRSRV_BRIDGE_OUT_EXPORTDEVICEMEM *psExportDeviceMemOUT =
++ (PVRSRV_BRIDGE_OUT_EXPORTDEVICEMEM *)psBridgePackageKM->pvParamOut;
++ PVRSRV_FILE_PRIVATE_DATA *psPrivateData = PRIVATE_DATA(pFile);
++
++ psPrivateData->hKernelMemInfo = psExportDeviceMemOUT->hMemInfo;
++#if defined(SUPPORT_MEMINFO_IDS)
++ psExportDeviceMemOUT->ui64Stamp = psPrivateData->ui64Stamp = ++ui64Stamp;
++#endif
++ break;
++ }
++#endif
++
++#if defined(SUPPORT_MEMINFO_IDS)
++ case PVRSRV_BRIDGE_MAP_DEV_MEMORY:
++ {
++ PVRSRV_BRIDGE_OUT_MAP_DEV_MEMORY *psMapDeviceMemoryOUT =
++ (PVRSRV_BRIDGE_OUT_MAP_DEV_MEMORY *)psBridgePackageKM->pvParamOut;
++ PVRSRV_FILE_PRIVATE_DATA *psPrivateData = PRIVATE_DATA(pFile);
++ psMapDeviceMemoryOUT->sDstClientMemInfo.ui64Stamp = psPrivateData->ui64Stamp;
++ break;
++ }
++
++ case PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY:
++ {
++ PVRSRV_BRIDGE_OUT_MAP_DEVICECLASS_MEMORY *psDeviceClassMemoryOUT =
++ (PVRSRV_BRIDGE_OUT_MAP_DEVICECLASS_MEMORY *)psBridgePackageKM->pvParamOut;
++ psDeviceClassMemoryOUT->sClientMemInfo.ui64Stamp = ++ui64Stamp;
++ break;
++ }
++#endif
++
++ default:
++ break;
++ }
++
++unlock_and_return:
++ LinuxUnLockMutex(&gPVRSRVLock);
++ return err;
++}
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/env/linux/pvr_debug.c
+@@ -0,0 +1,426 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++
++#ifndef AUTOCONF_INCLUDED
++ #include <linux/config.h>
++#endif
++
++#include <asm/io.h>
++#include <asm/uaccess.h>
++#include <linux/kernel.h>
++#include <linux/hardirq.h>
++#include <linux/module.h>
++#include <linux/spinlock.h>
++#include <linux/tty.h>
++#include <stdarg.h>
++#include "img_types.h"
++#include "servicesext.h"
++#include "pvr_debug.h"
++#include "proc.h"
++#include "mutex.h"
++#include "linkage.h"
++
++#if defined(PVRSRV_NEED_PVR_DPF)
++
++#define PVR_MAX_FILEPATH_LEN 256
++
++static IMG_UINT32 gPVRDebugLevel = DBGPRIV_WARNING;
++
++#endif
++
++#define PVR_MAX_MSG_LEN PVR_MAX_DEBUG_MESSAGE_LEN
++
++static IMG_CHAR gszBufferNonIRQ[PVR_MAX_MSG_LEN + 1];
++
++static IMG_CHAR gszBufferIRQ[PVR_MAX_MSG_LEN + 1];
++
++static PVRSRV_LINUX_MUTEX gsDebugMutexNonIRQ;
++
++static spinlock_t gsDebugLockIRQ = SPIN_LOCK_UNLOCKED;
++
++#define USE_SPIN_LOCK (in_interrupt() || !preemptible())
++
++static inline void GetBufferLock(unsigned long *pulLockFlags)
++{
++ if (USE_SPIN_LOCK)
++ {
++ spin_lock_irqsave(&gsDebugLockIRQ, *pulLockFlags);
++ }
++ else
++ {
++ LinuxLockMutex(&gsDebugMutexNonIRQ);
++ }
++}
++
++static inline void ReleaseBufferLock(unsigned long ulLockFlags)
++{
++ if (USE_SPIN_LOCK)
++ {
++ spin_unlock_irqrestore(&gsDebugLockIRQ, ulLockFlags);
++ }
++ else
++ {
++ LinuxUnLockMutex(&gsDebugMutexNonIRQ);
++ }
++}
++
++static inline void SelectBuffer(IMG_CHAR **ppszBuf, IMG_UINT32 *pui32BufSiz)
++{
++ if (USE_SPIN_LOCK)
++ {
++ *ppszBuf = gszBufferIRQ;
++ *pui32BufSiz = sizeof(gszBufferIRQ);
++ }
++ else
++ {
++ *ppszBuf = gszBufferNonIRQ;
++ *pui32BufSiz = sizeof(gszBufferNonIRQ);
++ }
++}
++
++static IMG_BOOL VBAppend(IMG_CHAR *pszBuf, IMG_UINT32 ui32BufSiz, const IMG_CHAR* pszFormat, va_list VArgs)
++{
++ IMG_UINT32 ui32Used;
++ IMG_UINT32 ui32Space;
++ IMG_INT32 i32Len;
++
++ ui32Used = strlen(pszBuf);
++ BUG_ON(ui32Used >= ui32BufSiz);
++ ui32Space = ui32BufSiz - ui32Used;
++
++ i32Len = vsnprintf(&pszBuf[ui32Used], ui32Space, pszFormat, VArgs);
++ pszBuf[ui32BufSiz - 1] = 0;
++
++
++ return (i32Len < 0 || i32Len >= ui32Space);
++}
++
++IMG_VOID PVRDPFInit(IMG_VOID)
++{
++ LinuxInitMutex(&gsDebugMutexNonIRQ);
++}
++
++IMG_VOID PVRSRVReleasePrintf(const IMG_CHAR *pszFormat, ...)
++{
++ va_list vaArgs;
++ unsigned long ulLockFlags = 0;
++ IMG_CHAR *pszBuf;
++ IMG_UINT32 ui32BufSiz;
++
++ SelectBuffer(&pszBuf, &ui32BufSiz);
++
++ va_start(vaArgs, pszFormat);
++
++ GetBufferLock(&ulLockFlags);
++ strncpy (pszBuf, "PVR_K: ", (ui32BufSiz -1));
++
++ if (VBAppend(pszBuf, ui32BufSiz, pszFormat, vaArgs))
++ {
++ printk(KERN_INFO "PVR_K:(Message Truncated): %s\n", pszBuf);
++ }
++ else
++ {
++ printk(KERN_INFO "%s\n", pszBuf);
++ }
++
++ ReleaseBufferLock(ulLockFlags);
++ va_end(vaArgs);
++
++}
++
++#if defined(PVRSRV_NEED_PVR_ASSERT)
++
++IMG_VOID PVRSRVDebugAssertFail(const IMG_CHAR* pszFile, IMG_UINT32 uLine)
++{
++ PVRSRVDebugPrintf(DBGPRIV_FATAL, pszFile, uLine, "Debug assertion failed!");
++ BUG();
++}
++
++#endif
++
++#if defined(PVRSRV_NEED_PVR_TRACE)
++
++IMG_VOID PVRSRVTrace(const IMG_CHAR* pszFormat, ...)
++{
++ va_list VArgs;
++ unsigned long ulLockFlags = 0;
++ IMG_CHAR *pszBuf;
++ IMG_UINT32 ui32BufSiz;
++
++ SelectBuffer(&pszBuf, &ui32BufSiz);
++
++ va_start(VArgs, pszFormat);
++
++ GetBufferLock(&ulLockFlags);
++
++ strncpy(pszBuf, "PVR: ", (ui32BufSiz -1));
++
++ if (VBAppend(pszBuf, ui32BufSiz, pszFormat, VArgs))
++ {
++ printk(KERN_INFO "PVR_K:(Message Truncated): %s\n", pszBuf);
++ }
++ else
++ {
++ printk(KERN_INFO "%s\n", pszBuf);
++ }
++
++ ReleaseBufferLock(ulLockFlags);
++
++ va_end(VArgs);
++}
++
++#endif
++
++#if defined(PVRSRV_NEED_PVR_DPF)
++
++static IMG_BOOL BAppend(IMG_CHAR *pszBuf, IMG_UINT32 ui32BufSiz, const IMG_CHAR *pszFormat, ...)
++{
++ va_list VArgs;
++ IMG_BOOL bTrunc;
++
++ va_start (VArgs, pszFormat);
++
++ bTrunc = VBAppend(pszBuf, ui32BufSiz, pszFormat, VArgs);
++
++ va_end (VArgs);
++
++ return bTrunc;
++}
++
++IMG_VOID PVRSRVDebugPrintf (
++ IMG_UINT32 ui32DebugLevel,
++ const IMG_CHAR* pszFullFileName,
++ IMG_UINT32 ui32Line,
++ const IMG_CHAR* pszFormat,
++ ...
++ )
++{
++ IMG_BOOL bTrace, bDebug;
++ const IMG_CHAR *pszFileName = pszFullFileName;
++ IMG_CHAR *pszLeafName;
++
++ bTrace = gPVRDebugLevel & ui32DebugLevel & DBGPRIV_CALLTRACE;
++ bDebug = ((gPVRDebugLevel & DBGPRIV_ALLLEVELS) >= ui32DebugLevel);
++
++ if (bTrace || bDebug)
++ {
++ va_list vaArgs;
++ unsigned long ulLockFlags = 0;
++ IMG_CHAR *pszBuf;
++ IMG_UINT32 ui32BufSiz;
++
++ SelectBuffer(&pszBuf, &ui32BufSiz);
++
++ va_start(vaArgs, pszFormat);
++
++ GetBufferLock(&ulLockFlags);
++
++
++ if (bDebug)
++ {
++ switch(ui32DebugLevel)
++ {
++ case DBGPRIV_FATAL:
++ {
++ strncpy (pszBuf, "PVR_K:(Fatal): ", (ui32BufSiz -1));
++ break;
++ }
++ case DBGPRIV_ERROR:
++ {
++ strncpy (pszBuf, "PVR_K:(Error): ", (ui32BufSiz -1));
++ break;
++ }
++ case DBGPRIV_WARNING:
++ {
++ strncpy (pszBuf, "PVR_K:(Warning): ", (ui32BufSiz -1));
++ break;
++ }
++ case DBGPRIV_MESSAGE:
++ {
++ strncpy (pszBuf, "PVR_K:(Message): ", (ui32BufSiz -1));
++ break;
++ }
++ case DBGPRIV_VERBOSE:
++ {
++ strncpy (pszBuf, "PVR_K:(Verbose): ", (ui32BufSiz -1));
++ break;
++ }
++ default:
++ {
++ strncpy (pszBuf, "PVR_K:(Unknown message level)", (ui32BufSiz -1));
++ break;
++ }
++ }
++ }
++ else
++ {
++ strncpy (pszBuf, "PVR_K: ", (ui32BufSiz -1));
++ }
++
++ if (VBAppend(pszBuf, ui32BufSiz, pszFormat, vaArgs))
++ {
++ printk(KERN_INFO "PVR_K:(Message Truncated): %s\n", pszBuf);
++ }
++ else
++ {
++
++ if (!bTrace)
++ {
++#ifdef DEBUG_LOG_PATH_TRUNCATE
++
++ static IMG_CHAR szFileNameRewrite[PVR_MAX_FILEPATH_LEN];
++
++ IMG_CHAR* pszTruncIter;
++ IMG_CHAR* pszTruncBackInter;
++
++
++ pszFileName = pszFullFileName + strlen(DEBUG_LOG_PATH_TRUNCATE)+1;
++
++
++ strncpy(szFileNameRewrite, pszFileName,PVR_MAX_FILEPATH_LEN);
++
++ if(strlen(szFileNameRewrite) == PVR_MAX_FILEPATH_LEN-1) {
++ IMG_CHAR szTruncateMassage[] = "FILENAME TRUNCATED";
++ strcpy(szFileNameRewrite + (PVR_MAX_FILEPATH_LEN - 1 - strlen(szTruncateMassage)), szTruncateMassage);
++ }
++
++ pszTruncIter = szFileNameRewrite;
++ while(*pszTruncIter++ != 0)
++ {
++ IMG_CHAR* pszNextStartPoint;
++
++ if(
++ !( ( *pszTruncIter == '/' && (pszTruncIter-4 >= szFileNameRewrite) ) &&
++ ( *(pszTruncIter-1) == '.') &&
++ ( *(pszTruncIter-2) == '.') &&
++ ( *(pszTruncIter-3) == '/') )
++ ) continue;
++
++
++ pszTruncBackInter = pszTruncIter - 3;
++ while(*(--pszTruncBackInter) != '/')
++ {
++ if(pszTruncBackInter <= szFileNameRewrite) break;
++ }
++ pszNextStartPoint = pszTruncBackInter;
++
++
++ while(*pszTruncIter != 0)
++ {
++ *pszTruncBackInter++ = *pszTruncIter++;
++ }
++ *pszTruncBackInter = 0;
++
++
++ pszTruncIter = pszNextStartPoint;
++ }
++
++ pszFileName = szFileNameRewrite;
++
++ if(*pszFileName == '/') pszFileName++;
++#endif
++
++#if !defined(__sh__)
++ pszLeafName = (IMG_CHAR *)strrchr (pszFileName, '\\');
++
++ if (pszLeafName)
++ {
++ pszFileName = pszLeafName;
++ }
++#endif
++
++ if (BAppend(pszBuf, ui32BufSiz, " [%lu, %s]", ui32Line, pszFileName))
++ {
++ printk(KERN_INFO "PVR_K:(Message Truncated): %s\n", pszBuf);
++ }
++ else
++ {
++ printk(KERN_INFO "%s\n", pszBuf);
++ }
++ }
++ else
++ {
++ printk(KERN_INFO "%s\n", pszBuf);
++ }
++ }
++
++ ReleaseBufferLock(ulLockFlags);
++
++ va_end (vaArgs);
++ }
++}
++
++#endif
++
++#if defined(DEBUG)
++
++IMG_VOID PVRDebugSetLevel(IMG_UINT32 uDebugLevel)
++{
++ printk(KERN_INFO "PVR: Setting Debug Level = 0x%x\n",(IMG_UINT)uDebugLevel);
++
++ gPVRDebugLevel = uDebugLevel;
++}
++
++IMG_INT PVRDebugProcSetLevel(struct file *file, const IMG_CHAR *buffer, IMG_UINT32 count, IMG_VOID *data)
++{
++#define _PROC_SET_BUFFER_SZ 2
++ IMG_CHAR data_buffer[_PROC_SET_BUFFER_SZ];
++
++ if (count != _PROC_SET_BUFFER_SZ)
++ {
++ return -EINVAL;
++ }
++ else
++ {
++ if (copy_from_user(data_buffer, buffer, count))
++ return -EINVAL;
++ if (data_buffer[count - 1] != '\n')
++ return -EINVAL;
++ PVRDebugSetLevel(data_buffer[0] - '0');
++ }
++ return (count);
++}
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++void ProcSeqShowDebugLevel(struct seq_file *sfile,void* el)
++{
++ seq_printf(sfile, "%lu\n", gPVRDebugLevel);
++}
++
++#else
++IMG_INT PVRDebugProcGetLevel(IMG_CHAR *page, IMG_CHAR **start, off_t off, IMG_INT count, IMG_INT *eof, IMG_VOID *data)
++{
++ if (off == 0) {
++ *start = (IMG_CHAR *)1;
++ return printAppend(page, count, 0, "%lu\n", gPVRDebugLevel);
++ }
++ *eof = 1;
++ return 0;
++}
++#endif
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/env/linux/pvr_drm.c
+@@ -0,0 +1,310 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if defined(SUPPORT_DRI_DRM)
++
++#ifndef AUTOCONF_INCLUDED
++ #include <linux/config.h>
++#endif
++
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/version.h>
++#include <linux/fs.h>
++#include <linux/proc_fs.h>
++#include <asm/ioctl.h>
++#include <drm/drmP.h>
++#include <drm/drm.h>
++
++#include "img_defs.h"
++#include "services.h"
++#include "kerneldisplay.h"
++#include "kernelbuffer.h"
++#include "syscommon.h"
++#include "pvrmmap.h"
++#include "mm.h"
++#include "mmap.h"
++#include "mutex.h"
++#include "pvr_debug.h"
++#include "srvkm.h"
++#include "perproc.h"
++#include "handle.h"
++#include "pvr_bridge_km.h"
++#include "pvr_bridge.h"
++#include "proc.h"
++#include "pvrmodule.h"
++#include "pvrversion.h"
++#include "lock.h"
++#include "linkage.h"
++#include "pvr_drm_shared.h"
++#include "pvr_drm.h"
++
++#define MAKENAME_HELPER(x, y) x ## y
++#define MAKENAME(x, y) MAKENAME_HELPER(x, y)
++
++#define PVR_DRM_NAME "pvrsrvkm"
++#define PVR_DRM_DESC "Imagination Technologies PVR DRM"
++
++#define PVR_PCI_IDS \
++ {SYS_SGX_DEV_VENDOR_ID, SYS_SGX_DEV_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++ {0, 0, 0}
++
++struct pci_dev *gpsPVRLDMDev;
++struct drm_device *gpsPVRDRMDev;
++
++#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,24))
++#error "Linux kernel version 2.6.25 or later required for PVR DRM support"
++#endif
++
++#define PVR_DRM_FILE struct drm_file *
++
++#if !defined(SUPPORT_DRI_DRM_EXT)
++static struct pci_device_id asPciIdList[] = {
++ PVR_PCI_IDS
++};
++#endif
++
++IMG_INT PVRSRVDrmLoad(struct drm_device *dev, unsigned long flags)
++{
++ IMG_INT iRes;
++
++ PVR_TRACE(("PVRSRVDrmLoad"));
++
++ gpsPVRDRMDev = dev;
++ gpsPVRLDMDev = dev->pdev;
++
++#if defined(PDUMP)
++ iRes = dbgdrv_init();
++ if (iRes != 0)
++ {
++ return iRes;
++ }
++#endif
++
++ iRes = PVRCore_Init();
++ if (iRes != 0)
++ {
++ goto exit_dbgdrv_cleanup;
++ }
++
++#if defined(DISPLAY_CONTROLLER)
++ iRes = PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _Init)(dev);
++ if (iRes != 0)
++ {
++ goto exit_pvrcore_cleanup;
++ }
++#endif
++ return 0;
++
++#if defined(DISPLAY_CONTROLLER)
++exit_pvrcore_cleanup:
++ PVRCore_Cleanup();
++#endif
++exit_dbgdrv_cleanup:
++#if defined(PDUMP)
++ dbgdrv_cleanup();
++#endif
++ return iRes;
++}
++
++IMG_INT PVRSRVDrmUnload(struct drm_device *dev)
++{
++ PVR_TRACE(("PVRSRVDrmUnload"));
++
++#if defined(DISPLAY_CONTROLLER)
++ PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _Cleanup)(dev);
++#endif
++
++ PVRCore_Cleanup();
++
++#if defined(PDUMP)
++ dbgdrv_cleanup();
++#endif
++
++ return 0;
++}
++
++IMG_INT PVRSRVDrmOpen(struct drm_device *dev, struct drm_file *file)
++{
++ return PVRSRVOpen(dev, file);
++}
++
++IMG_VOID PVRSRVDrmPostClose(struct drm_device *dev, struct drm_file *file)
++{
++ PVRSRVRelease(dev, file);
++}
++
++DRI_DRM_STATIC IMG_INT
++PVRDRMIsMaster(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile)
++{
++ return 0;
++}
++
++#if defined(SUPPORT_DRI_DRM_EXT)
++IMG_INT
++PVRDRM_Dummy_ioctl(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile)
++{
++ return 0;
++}
++#endif
++
++static IMG_INT
++PVRDRMPCIBusIDField(struct drm_device *dev, IMG_UINT32 *pui32Field, IMG_UINT32 ui32FieldType)
++{
++ struct pci_dev *psPCIDev = (struct pci_dev *)dev->pdev;
++
++ switch (ui32FieldType)
++ {
++ case PVR_DRM_PCI_DOMAIN:
++ *pui32Field = pci_domain_nr(psPCIDev->bus);
++ break;
++
++ case PVR_DRM_PCI_BUS:
++ *pui32Field = psPCIDev->bus->number;
++ break;
++
++ case PVR_DRM_PCI_DEV:
++ *pui32Field = PCI_SLOT(psPCIDev->devfn);
++ break;
++
++ case PVR_DRM_PCI_FUNC:
++ *pui32Field = PCI_FUNC(psPCIDev->devfn);
++ break;
++
++ default:
++ return -EFAULT;
++ }
++
++ return 0;
++}
++
++DRI_DRM_STATIC IMG_INT
++PVRDRMUnprivCmd(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile)
++{
++ IMG_UINT32 *pui32Args = (IMG_UINT32 *)arg;
++ IMG_UINT32 ui32Cmd = pui32Args[0];
++ IMG_UINT32 ui32Arg1 = pui32Args[1];
++ IMG_UINT32 *pui32OutArg = (IMG_UINT32 *)arg;
++ IMG_INT ret = 0;
++
++ LinuxLockMutex(&gPVRSRVLock);
++
++ switch (ui32Cmd)
++ {
++ case PVR_DRM_UNPRIV_INIT_SUCCESFUL:
++ *pui32OutArg = PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_SUCCESSFUL) ? 1 : 0;
++ break;
++
++ case PVR_DRM_UNPRIV_BUSID_TYPE:
++ *pui32OutArg = PVR_DRM_BUS_TYPE_PCI;
++ break;
++
++ case PVR_DRM_UNPRIV_BUSID_FIELD:
++ ret = PVRDRMPCIBusIDField(dev, pui32OutArg, ui32Arg1);
++
++ default:
++ ret = -EFAULT;
++ }
++
++ LinuxUnLockMutex(&gPVRSRVLock);
++
++ return ret;
++}
++
++#if 0
++struct drm_ioctl_desc sPVRDrmIoctls[] = {
++ DRM_IOCTL_DEF(PVR_DRM_SRVKM_IOCTL, PVRSRV_BridgeDispatchKM, 0),
++ DRM_IOCTL_DEF(PVR_DRM_IS_MASTER_IOCTL, PVRDRMIsMaster, DRM_MASTER),
++ DRM_IOCTL_DEF(PVR_DRM_UNPRIV_IOCTL, PVRDRMUnprivCmd, 0),
++#if defined(PDUMP)
++ DRM_IOCTL_DEF(PVR_DRM_DBGDRV_IOCTL, dbgdrv_ioctl, 0),
++#endif
++};
++
++static IMG_INT pvr_max_ioctl = DRM_ARRAY_SIZE(sPVRDrmIoctls);
++
++static struct drm_driver sPVRDrmDriver =
++{
++ .driver_features = 0,
++ .dev_priv_size = sizeof(sPVRDrmBuffer),
++ .load = PVRSRVDrmLoad,
++ .unload = PVRSRVDrmUnload,
++ .open = PVRSRVDrmOpen,
++ .postclose = PVRSRVDrmPostClose,
++ .suspend = PVRSRVDriverSuspend,
++ .resume = PVRSRVDriverResume,
++ .get_map_ofs = drm_core_get_map_ofs,
++ .get_reg_ofs = drm_core_get_reg_ofs,
++ .ioctls = sPVRDrmIoctls,
++ .fops =
++ {
++ .owner = THIS_MODULE,
++ .open = drm_open,
++ .release = drm_release,
++ .ioctl = drm_ioctl,
++ .mmap = PVRMMap,
++ .poll = drm_poll,
++ .fasync = drm_fasync,
++ },
++ .pci_driver =
++ {
++ .name = PVR_DRM_NAME,
++ .id_table = asPciIdList,
++ },
++
++ .name = PVR_DRM_NAME,
++ .desc = PVR_DRM_DESC,
++ .date = PVR_BUILD_DATE,
++ .major = PVRVERSION_MAJ,
++ .minor = PVRVERSION_MIN,
++ .patchlevel = PVRVERSION_BUILD,
++};
++
++static IMG_INT __init PVRSRVDrmInit(IMG_VOID)
++{
++ IMG_INT iRes;
++ sPVRDrmDriver.num_ioctls = pvr_max_ioctl;
++
++
++ PVRDPFInit();
++
++ iRes = drm_init(&sPVRDrmDriver);
++
++ return iRes;
++}
++
++static IMG_VOID __exit PVRSRVDrmExit(IMG_VOID)
++{
++ drm_exit(&sPVRDrmDriver);
++}
++
++module_init(PVRSRVDrmInit);
++module_exit(PVRSRVDrmExit);
++#endif
++#endif
++
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/env/linux/pvr_drm.h
+@@ -0,0 +1,80 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__PVR_DRM_H__)
++#define __PVR_DRM_H__
++
++#include "pvr_drm_shared.h"
++
++#if defined(SUPPORT_DRI_DRM)
++#define PVR_DRM_MAKENAME_HELPER(x, y) x ## y
++#define PVR_DRM_MAKENAME(x, y) PVR_DRM_MAKENAME_HELPER(x, y)
++
++IMG_INT PVRCore_Init(IMG_VOID);
++IMG_VOID PVRCore_Cleanup(IMG_VOID);
++IMG_INT PVRSRVOpen(struct drm_device *dev, struct drm_file *pFile);
++IMG_INT PVRSRVRelease(struct drm_device *dev, struct drm_file *pFile);
++IMG_INT PVRSRVDriverSuspend(struct drm_device *pDevice, pm_message_t state);
++IMG_INT PVRSRVDriverResume(struct drm_device *pDevice);
++
++IMG_INT PVRSRV_BridgeDispatchKM(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile);
++
++#if defined(SUPPORT_DRI_DRM_EXT)
++#define DRI_DRM_STATIC
++IMG_INT PVRSRVDrmLoad(struct drm_device *dev, unsigned long flags);
++IMG_INT PVRSRVDrmUnload(struct drm_device *dev);
++IMG_INT PVRSRVDrmOpen(struct drm_device *dev, struct drm_file *file);
++IMG_VOID PVRSRVDrmPostClose(struct drm_device *dev, struct drm_file *file);
++IMG_INT PVRDRMIsMaster(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile);
++IMG_INT PVRDRMUnprivCmd(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile);
++IMG_INT PVRDRM_Dummy_ioctl(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile);
++#else
++#define DRI_DRM_STATIC static
++#endif
++
++#if defined(DISPLAY_CONTROLLER)
++extern int PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _Init)(struct drm_device *);
++extern void PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _Cleanup)(struct drm_device *);
++#endif
++
++#if defined(PDUMP)
++int dbgdrv_init(void);
++void dbgdrv_cleanup(void);
++IMG_INT dbgdrv_ioctl(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile);
++#endif
++
++#if !defined(SUPPORT_DRI_DRM_EXT)
++#define PVR_DRM_SRVKM_IOCTL _IO(0, PVR_DRM_SRVKM_CMD)
++#define PVR_DRM_IS_MASTER_IOCTL _IO(0, PVR_DRM_IS_MASTER_CMD)
++#define PVR_DRM_UNPRIV_IOCTL _IO(0, PVR_DRM_UNPRIV_CMD)
++#define PVR_DRM_DBGDRV_IOCTL _IO(0, PVR_DRM_DBGDRV_CMD)
++#endif
++
++#endif
++
++#endif
++
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/hwdefs/sgx535defs.h
+@@ -0,0 +1,637 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _SGX535DEFS_KM_H_
++#define _SGX535DEFS_KM_H_
++
++#define EUR_CR_CLKGATECTL 0x0000
++#define EUR_CR_CLKGATECTL_2D_CLKG_MASK 0x00000003UL
++#define EUR_CR_CLKGATECTL_2D_CLKG_SHIFT 0
++#define EUR_CR_CLKGATECTL_ISP_CLKG_MASK 0x00000030UL
++#define EUR_CR_CLKGATECTL_ISP_CLKG_SHIFT 4
++#define EUR_CR_CLKGATECTL_TSP_CLKG_MASK 0x00000300UL
++#define EUR_CR_CLKGATECTL_TSP_CLKG_SHIFT 8
++#define EUR_CR_CLKGATECTL_TA_CLKG_MASK 0x00003000UL
++#define EUR_CR_CLKGATECTL_TA_CLKG_SHIFT 12
++#define EUR_CR_CLKGATECTL_DPM_CLKG_MASK 0x00030000UL
++#define EUR_CR_CLKGATECTL_DPM_CLKG_SHIFT 16
++#define EUR_CR_CLKGATECTL_USE_CLKG_MASK 0x00300000UL
++#define EUR_CR_CLKGATECTL_USE_CLKG_SHIFT 20
++#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_MASK 0x01000000UL
++#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_SHIFT 24
++#define EUR_CR_CLKGATESTATUS 0x0004
++#define EUR_CR_CLKGATESTATUS_2D_CLKS_MASK 0x00000001UL
++#define EUR_CR_CLKGATESTATUS_2D_CLKS_SHIFT 0
++#define EUR_CR_CLKGATESTATUS_ISP_CLKS_MASK 0x00000010UL
++#define EUR_CR_CLKGATESTATUS_ISP_CLKS_SHIFT 4
++#define EUR_CR_CLKGATESTATUS_TSP_CLKS_MASK 0x00000100UL
++#define EUR_CR_CLKGATESTATUS_TSP_CLKS_SHIFT 8
++#define EUR_CR_CLKGATESTATUS_TA_CLKS_MASK 0x00001000UL
++#define EUR_CR_CLKGATESTATUS_TA_CLKS_SHIFT 12
++#define EUR_CR_CLKGATESTATUS_DPM_CLKS_MASK 0x00010000UL
++#define EUR_CR_CLKGATESTATUS_DPM_CLKS_SHIFT 16
++#define EUR_CR_CLKGATESTATUS_USE_CLKS_MASK 0x00100000UL
++#define EUR_CR_CLKGATESTATUS_USE_CLKS_SHIFT 20
++#define EUR_CR_CLKGATECTLOVR 0x0008
++#define EUR_CR_CLKGATECTLOVR_2D_CLKO_MASK 0x00000003UL
++#define EUR_CR_CLKGATECTLOVR_2D_CLKO_SHIFT 0
++#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_MASK 0x00000030UL
++#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_SHIFT 4
++#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_MASK 0x00000300UL
++#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_SHIFT 8
++#define EUR_CR_CLKGATECTLOVR_TA_CLKO_MASK 0x00003000UL
++#define EUR_CR_CLKGATECTLOVR_TA_CLKO_SHIFT 12
++#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_MASK 0x00030000UL
++#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_SHIFT 16
++#define EUR_CR_CLKGATECTLOVR_USE_CLKO_MASK 0x00300000UL
++#define EUR_CR_CLKGATECTLOVR_USE_CLKO_SHIFT 20
++#define EUR_CR_CORE_ID 0x0010
++#define EUR_CR_CORE_ID_CONFIG_MASK 0x0000FFFFUL
++#define EUR_CR_CORE_ID_CONFIG_SHIFT 0
++#define EUR_CR_CORE_ID_ID_MASK 0xFFFF0000UL
++#define EUR_CR_CORE_ID_ID_SHIFT 16
++#define EUR_CR_CORE_REVISION 0x0014
++#define EUR_CR_CORE_REVISION_MAINTENANCE_MASK 0x000000FFUL
++#define EUR_CR_CORE_REVISION_MAINTENANCE_SHIFT 0
++#define EUR_CR_CORE_REVISION_MINOR_MASK 0x0000FF00UL
++#define EUR_CR_CORE_REVISION_MINOR_SHIFT 8
++#define EUR_CR_CORE_REVISION_MAJOR_MASK 0x00FF0000UL
++#define EUR_CR_CORE_REVISION_MAJOR_SHIFT 16
++#define EUR_CR_CORE_REVISION_DESIGNER_MASK 0xFF000000UL
++#define EUR_CR_CORE_REVISION_DESIGNER_SHIFT 24
++#define EUR_CR_DESIGNER_REV_FIELD1 0x0018
++#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_MASK 0xFFFFFFFFUL
++#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_SHIFT 0
++#define EUR_CR_DESIGNER_REV_FIELD2 0x001C
++#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_MASK 0xFFFFFFFFUL
++#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_SHIFT 0
++#define EUR_CR_SOFT_RESET 0x0080
++#define EUR_CR_SOFT_RESET_BIF_RESET_MASK 0x00000001UL
++#define EUR_CR_SOFT_RESET_BIF_RESET_SHIFT 0
++#define EUR_CR_SOFT_RESET_TWOD_RESET_MASK 0x00000002UL
++#define EUR_CR_SOFT_RESET_TWOD_RESET_SHIFT 1
++#define EUR_CR_SOFT_RESET_DPM_RESET_MASK 0x00000004UL
++#define EUR_CR_SOFT_RESET_DPM_RESET_SHIFT 2
++#define EUR_CR_SOFT_RESET_TA_RESET_MASK 0x00000008UL
++#define EUR_CR_SOFT_RESET_TA_RESET_SHIFT 3
++#define EUR_CR_SOFT_RESET_USE_RESET_MASK 0x00000010UL
++#define EUR_CR_SOFT_RESET_USE_RESET_SHIFT 4
++#define EUR_CR_SOFT_RESET_ISP_RESET_MASK 0x00000020UL
++#define EUR_CR_SOFT_RESET_ISP_RESET_SHIFT 5
++#define EUR_CR_SOFT_RESET_TSP_RESET_MASK 0x00000040UL
++#define EUR_CR_SOFT_RESET_TSP_RESET_SHIFT 6
++#define EUR_CR_EVENT_HOST_ENABLE2 0x0110
++#define EUR_CR_EVENT_HOST_ENABLE2_BIF_REQUESTER_FAULT_MASK 0x00000010UL
++#define EUR_CR_EVENT_HOST_ENABLE2_BIF_REQUESTER_FAULT_SHIFT 4
++#define EUR_CR_EVENT_HOST_ENABLE2_DPM_DHOST_FREE_LOAD_MASK 0x00000008UL
++#define EUR_CR_EVENT_HOST_ENABLE2_DPM_DHOST_FREE_LOAD_SHIFT 3
++#define EUR_CR_EVENT_HOST_ENABLE2_DPM_HOST_FREE_LOAD_MASK 0x00000004UL
++#define EUR_CR_EVENT_HOST_ENABLE2_DPM_HOST_FREE_LOAD_SHIFT 2
++#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_MASK 0x00000002UL
++#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_SHIFT 1
++#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_MASK 0x00000001UL
++#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_SHIFT 0
++#define EUR_CR_EVENT_HOST_CLEAR2 0x0114
++#define EUR_CR_EVENT_HOST_CLEAR2_BIF_REQUESTER_FAULT_MASK 0x00000010UL
++#define EUR_CR_EVENT_HOST_CLEAR2_BIF_REQUESTER_FAULT_SHIFT 4
++#define EUR_CR_EVENT_HOST_CLEAR2_DPM_DHOST_FREE_LOAD_MASK 0x00000008UL
++#define EUR_CR_EVENT_HOST_CLEAR2_DPM_DHOST_FREE_LOAD_SHIFT 3
++#define EUR_CR_EVENT_HOST_CLEAR2_DPM_HOST_FREE_LOAD_MASK 0x00000004UL
++#define EUR_CR_EVENT_HOST_CLEAR2_DPM_HOST_FREE_LOAD_SHIFT 2
++#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_MASK 0x00000002UL
++#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_SHIFT 1
++#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_MASK 0x00000001UL
++#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_SHIFT 0
++#define EUR_CR_EVENT_STATUS2 0x0118
++#define EUR_CR_EVENT_STATUS2_BIF_REQUESTER_FAULT_MASK 0x00000010UL
++#define EUR_CR_EVENT_STATUS2_BIF_REQUESTER_FAULT_SHIFT 4
++#define EUR_CR_EVENT_STATUS2_DPM_DHOST_FREE_LOAD_MASK 0x00000008UL
++#define EUR_CR_EVENT_STATUS2_DPM_DHOST_FREE_LOAD_SHIFT 3
++#define EUR_CR_EVENT_STATUS2_DPM_HOST_FREE_LOAD_MASK 0x00000004UL
++#define EUR_CR_EVENT_STATUS2_DPM_HOST_FREE_LOAD_SHIFT 2
++#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_MASK 0x00000002UL
++#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_SHIFT 1
++#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_MASK 0x00000001UL
++#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_SHIFT 0
++#define EUR_CR_EVENT_STATUS 0x012CUL
++#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_MASK 0x80000000UL
++#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_SHIFT 31
++#define EUR_CR_EVENT_STATUS_TIMER_MASK 0x20000000UL
++#define EUR_CR_EVENT_STATUS_TIMER_SHIFT 29
++#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_MASK 0x10000000UL
++#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_SHIFT 28
++#define EUR_CR_EVENT_STATUS_TWOD_COMPLETE_MASK 0x08000000UL
++#define EUR_CR_EVENT_STATUS_TWOD_COMPLETE_SHIFT 27
++#define EUR_CR_EVENT_STATUS_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000UL
++#define EUR_CR_EVENT_STATUS_MADD_CACHE_INVALCOMPLETE_SHIFT 26
++#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000UL
++#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25
++#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_MASK 0x01000000UL
++#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_SHIFT 24
++#define EUR_CR_EVENT_STATUS_ISP_END_TILE_MASK 0x00800000UL
++#define EUR_CR_EVENT_STATUS_ISP_END_TILE_SHIFT 23
++#define EUR_CR_EVENT_STATUS_DPM_INITEND_MASK 0x00400000UL
++#define EUR_CR_EVENT_STATUS_DPM_INITEND_SHIFT 22
++#define EUR_CR_EVENT_STATUS_OTPM_LOADED_MASK 0x00200000UL
++#define EUR_CR_EVENT_STATUS_OTPM_LOADED_SHIFT 21
++#define EUR_CR_EVENT_STATUS_OTPM_INV_MASK 0x00100000UL
++#define EUR_CR_EVENT_STATUS_OTPM_INV_SHIFT 20
++#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_MASK 0x00080000UL
++#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_SHIFT 19
++#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_MASK 0x00040000UL
++#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_SHIFT 18
++#define EUR_CR_EVENT_STATUS_ISP_HALT_MASK 0x00020000UL
++#define EUR_CR_EVENT_STATUS_ISP_HALT_SHIFT 17
++#define EUR_CR_EVENT_STATUS_ISP_VISIBILITY_FAIL_MASK 0x00010000UL
++#define EUR_CR_EVENT_STATUS_ISP_VISIBILITY_FAIL_SHIFT 16
++#define EUR_CR_EVENT_STATUS_BREAKPOINT_MASK 0x00008000UL
++#define EUR_CR_EVENT_STATUS_BREAKPOINT_SHIFT 15
++#define EUR_CR_EVENT_STATUS_SW_EVENT_MASK 0x00004000UL
++#define EUR_CR_EVENT_STATUS_SW_EVENT_SHIFT 14
++#define EUR_CR_EVENT_STATUS_TA_FINISHED_MASK 0x00002000UL
++#define EUR_CR_EVENT_STATUS_TA_FINISHED_SHIFT 13
++#define EUR_CR_EVENT_STATUS_TA_TERMINATE_MASK 0x00001000UL
++#define EUR_CR_EVENT_STATUS_TA_TERMINATE_SHIFT 12
++#define EUR_CR_EVENT_STATUS_TPC_CLEAR_MASK 0x00000800UL
++#define EUR_CR_EVENT_STATUS_TPC_CLEAR_SHIFT 11
++#define EUR_CR_EVENT_STATUS_TPC_FLUSH_MASK 0x00000400UL
++#define EUR_CR_EVENT_STATUS_TPC_FLUSH_SHIFT 10
++#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_MASK 0x00000200UL
++#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_SHIFT 9
++#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_MASK 0x00000100UL
++#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_SHIFT 8
++#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_MASK 0x00000080UL
++#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_SHIFT 7
++#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_MASK 0x00000040UL
++#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_SHIFT 6
++#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_MASK 0x00000020UL
++#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_SHIFT 5
++#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_MASK 0x00000010UL
++#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_SHIFT 4
++#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_MASK 0x00000008UL
++#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_SHIFT 3
++#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004UL
++#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_SHIFT 2
++#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002UL
++#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_SHIFT 1
++#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_MASK 0x00000001UL
++#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_SHIFT 0
++#define EUR_CR_EVENT_HOST_ENABLE 0x0130
++#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_MASK 0x80000000UL
++#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_SHIFT 31
++#define EUR_CR_EVENT_HOST_ENABLE_TIMER_MASK 0x20000000UL
++#define EUR_CR_EVENT_HOST_ENABLE_TIMER_SHIFT 29
++#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_MASK 0x10000000UL
++#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_SHIFT 28
++#define EUR_CR_EVENT_HOST_ENABLE_TWOD_COMPLETE_MASK 0x08000000UL
++#define EUR_CR_EVENT_HOST_ENABLE_TWOD_COMPLETE_SHIFT 27
++#define EUR_CR_EVENT_HOST_ENABLE_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000UL
++#define EUR_CR_EVENT_HOST_ENABLE_MADD_CACHE_INVALCOMPLETE_SHIFT 26
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000UL
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_MASK 0x01000000UL
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_SHIFT 24
++#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_MASK 0x00800000UL
++#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_SHIFT 23
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_MASK 0x00400000UL
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_SHIFT 22
++#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_MASK 0x00200000UL
++#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_SHIFT 21
++#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_MASK 0x00100000UL
++#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_SHIFT 20
++#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_MASK 0x00080000UL
++#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_SHIFT 19
++#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_MASK 0x00040000UL
++#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_SHIFT 18
++#define EUR_CR_EVENT_HOST_ENABLE_ISP_HALT_MASK 0x00020000UL
++#define EUR_CR_EVENT_HOST_ENABLE_ISP_HALT_SHIFT 17
++#define EUR_CR_EVENT_HOST_ENABLE_ISP_VISIBILITY_FAIL_MASK 0x00010000UL
++#define EUR_CR_EVENT_HOST_ENABLE_ISP_VISIBILITY_FAIL_SHIFT 16
++#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_MASK 0x00008000UL
++#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_SHIFT 15
++#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_MASK 0x00004000UL
++#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_SHIFT 14
++#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_MASK 0x00002000UL
++#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_SHIFT 13
++#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_MASK 0x00001000UL
++#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_SHIFT 12
++#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_MASK 0x00000800UL
++#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_SHIFT 11
++#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_MASK 0x00000400UL
++#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_SHIFT 10
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_MASK 0x00000200UL
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_SHIFT 9
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_MASK 0x00000100UL
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_SHIFT 8
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_MASK 0x00000080UL
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_SHIFT 7
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_MASK 0x00000040UL
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_SHIFT 6
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_MASK 0x00000020UL
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_SHIFT 5
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_MASK 0x00000010UL
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_SHIFT 4
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_MASK 0x00000008UL
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_SHIFT 3
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004UL
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_SHIFT 2
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002UL
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_SHIFT 1
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_MASK 0x00000001UL
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_SHIFT 0
++#define EUR_CR_EVENT_HOST_CLEAR 0x0134
++#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_MASK 0x80000000UL
++#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_SHIFT 31
++#define EUR_CR_EVENT_HOST_CLEAR_TIMER_MASK 0x20000000UL
++#define EUR_CR_EVENT_HOST_CLEAR_TIMER_SHIFT 29
++#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_MASK 0x10000000UL
++#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_SHIFT 28
++#define EUR_CR_EVENT_HOST_CLEAR_TWOD_COMPLETE_MASK 0x08000000UL
++#define EUR_CR_EVENT_HOST_CLEAR_TWOD_COMPLETE_SHIFT 27
++#define EUR_CR_EVENT_HOST_CLEAR_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000UL
++#define EUR_CR_EVENT_HOST_CLEAR_MADD_CACHE_INVALCOMPLETE_SHIFT 26
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000UL
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_MASK 0x01000000UL
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_SHIFT 24
++#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_MASK 0x00800000UL
++#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_SHIFT 23
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_MASK 0x00400000UL
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_SHIFT 22
++#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_MASK 0x00200000UL
++#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_SHIFT 21
++#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_MASK 0x00100000UL
++#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_SHIFT 20
++#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_MASK 0x00080000UL
++#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_SHIFT 19
++#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_MASK 0x00040000UL
++#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_SHIFT 18
++#define EUR_CR_EVENT_HOST_CLEAR_ISP_HALT_MASK 0x00020000UL
++#define EUR_CR_EVENT_HOST_CLEAR_ISP_HALT_SHIFT 17
++#define EUR_CR_EVENT_HOST_CLEAR_ISP_VISIBILITY_FAIL_MASK 0x00010000UL
++#define EUR_CR_EVENT_HOST_CLEAR_ISP_VISIBILITY_FAIL_SHIFT 16
++#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_MASK 0x00008000UL
++#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_SHIFT 15
++#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_MASK 0x00004000UL
++#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_SHIFT 14
++#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_MASK 0x00002000UL
++#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_SHIFT 13
++#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_MASK 0x00001000UL
++#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_SHIFT 12
++#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_MASK 0x00000800UL
++#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_SHIFT 11
++#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_MASK 0x00000400UL
++#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_SHIFT 10
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_MASK 0x00000200UL
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_SHIFT 9
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_MASK 0x00000100UL
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_SHIFT 8
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_MASK 0x00000080UL
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_SHIFT 7
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_MASK 0x00000040UL
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_SHIFT 6
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_MASK 0x00000020UL
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_SHIFT 5
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_MASK 0x00000010UL
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_SHIFT 4
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_MASK 0x00000008UL
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_SHIFT 3
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004UL
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_SHIFT 2
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002UL
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_SHIFT 1
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_MASK 0x00000001UL
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_SHIFT 0
++#define EUR_CR_PDS 0x0ABC
++#define EUR_CR_PDS_DOUT_TIMEOUT_DISABLE_MASK 0x00000040UL
++#define EUR_CR_PDS_DOUT_TIMEOUT_DISABLE_SHIFT 6
++#define EUR_CR_PDS_EXEC_BASE 0x0AB8
++#define EUR_CR_PDS_EXEC_BASE_ADDR_MASK 0xFFF00000UL
++#define EUR_CR_PDS_EXEC_BASE_ADDR_SHIFT 20
++#define EUR_CR_EVENT_KICKER 0x0AC4
++#define EUR_CR_EVENT_KICKER_ADDRESS_MASK 0xFFFFFFF0UL
++#define EUR_CR_EVENT_KICKER_ADDRESS_SHIFT 4
++#define EUR_CR_EVENT_KICK 0x0AC8
++#define EUR_CR_EVENT_KICK_NOW_MASK 0x00000001UL
++#define EUR_CR_EVENT_KICK_NOW_SHIFT 0
++#define EUR_CR_EVENT_TIMER 0x0ACC
++#define EUR_CR_EVENT_TIMER_ENABLE_MASK 0x01000000UL
++#define EUR_CR_EVENT_TIMER_ENABLE_SHIFT 24
++#define EUR_CR_EVENT_TIMER_VALUE_MASK 0x00FFFFFFUL
++#define EUR_CR_EVENT_TIMER_VALUE_SHIFT 0
++#define EUR_CR_PDS_INV0 0x0AD0
++#define EUR_CR_PDS_INV0_DSC_MASK 0x00000001UL
++#define EUR_CR_PDS_INV0_DSC_SHIFT 0
++#define EUR_CR_PDS_INV1 0x0AD4
++#define EUR_CR_PDS_INV1_DSC_MASK 0x00000001UL
++#define EUR_CR_PDS_INV1_DSC_SHIFT 0
++#define EUR_CR_PDS_INV2 0x0AD8
++#define EUR_CR_PDS_INV2_DSC_MASK 0x00000001UL
++#define EUR_CR_PDS_INV2_DSC_SHIFT 0
++#define EUR_CR_PDS_INV3 0x0ADC
++#define EUR_CR_PDS_INV3_DSC_MASK 0x00000001UL
++#define EUR_CR_PDS_INV3_DSC_SHIFT 0
++#define EUR_CR_PDS_INV_CSC 0x0AE0
++#define EUR_CR_PDS_INV_CSC_KICK_MASK 0x00000001UL
++#define EUR_CR_PDS_INV_CSC_KICK_SHIFT 0
++#define EUR_CR_PDS_PC_BASE 0x0B2C
++#define EUR_CR_PDS_PC_BASE_ADDRESS_MASK 0x3FFFFFFFUL
++#define EUR_CR_PDS_PC_BASE_ADDRESS_SHIFT 0
++#define EUR_CR_BIF_CTRL 0x0C00
++#define EUR_CR_BIF_CTRL_NOREORDER_MASK 0x00000001UL
++#define EUR_CR_BIF_CTRL_NOREORDER_SHIFT 0
++#define EUR_CR_BIF_CTRL_PAUSE_MASK 0x00000002UL
++#define EUR_CR_BIF_CTRL_PAUSE_SHIFT 1
++#define EUR_CR_BIF_CTRL_FLUSH_MASK 0x00000004UL
++#define EUR_CR_BIF_CTRL_FLUSH_SHIFT 2
++#define EUR_CR_BIF_CTRL_INVALDC_MASK 0x00000008UL
++#define EUR_CR_BIF_CTRL_INVALDC_SHIFT 3
++#define EUR_CR_BIF_CTRL_CLEAR_FAULT_MASK 0x00000010UL
++#define EUR_CR_BIF_CTRL_CLEAR_FAULT_SHIFT 4
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_CACHE_MASK 0x00000100UL
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_CACHE_SHIFT 8
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_MASK 0x00000200UL
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_SHIFT 9
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_TE_MASK 0x00000400UL
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_TE_SHIFT 10
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_TWOD_MASK 0x00000800UL
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_TWOD_SHIFT 11
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_MASK 0x00001000UL
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_SHIFT 12
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_MASK 0x00002000UL
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_SHIFT 13
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_MASK 0x00004000UL
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_SHIFT 14
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_MASK 0x00008000UL
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_SHIFT 15
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_HOST_MASK 0x00010000UL
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_HOST_SHIFT 16
++#define EUR_CR_BIF_INT_STAT 0x0C04
++#define EUR_CR_BIF_INT_STAT_FAULT_MASK 0x00003FFFUL
++#define EUR_CR_BIF_INT_STAT_FAULT_SHIFT 0
++#define EUR_CR_BIF_INT_STAT_PF_N_RW_MASK 0x00004000UL
++#define EUR_CR_BIF_INT_STAT_PF_N_RW_SHIFT 14
++#define EUR_CR_BIF_FAULT 0x0C08
++#define EUR_CR_BIF_FAULT_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_FAULT_ADDR_SHIFT 12
++#define EUR_CR_BIF_TILE0 0x0C0C
++#define EUR_CR_BIF_TILE0_MIN_ADDRESS_MASK 0x00000FFFUL
++#define EUR_CR_BIF_TILE0_MIN_ADDRESS_SHIFT 0
++#define EUR_CR_BIF_TILE0_MAX_ADDRESS_MASK 0x00FFF000UL
++#define EUR_CR_BIF_TILE0_MAX_ADDRESS_SHIFT 12
++#define EUR_CR_BIF_TILE0_CFG_MASK 0x0F000000UL
++#define EUR_CR_BIF_TILE0_CFG_SHIFT 24
++#define EUR_CR_BIF_TILE1 0x0C10
++#define EUR_CR_BIF_TILE1_MIN_ADDRESS_MASK 0x00000FFFUL
++#define EUR_CR_BIF_TILE1_MIN_ADDRESS_SHIFT 0
++#define EUR_CR_BIF_TILE1_MAX_ADDRESS_MASK 0x00FFF000UL
++#define EUR_CR_BIF_TILE1_MAX_ADDRESS_SHIFT 12
++#define EUR_CR_BIF_TILE1_CFG_MASK 0x0F000000UL
++#define EUR_CR_BIF_TILE1_CFG_SHIFT 24
++#define EUR_CR_BIF_TILE2 0x0C14
++#define EUR_CR_BIF_TILE2_MIN_ADDRESS_MASK 0x00000FFFUL
++#define EUR_CR_BIF_TILE2_MIN_ADDRESS_SHIFT 0
++#define EUR_CR_BIF_TILE2_MAX_ADDRESS_MASK 0x00FFF000UL
++#define EUR_CR_BIF_TILE2_MAX_ADDRESS_SHIFT 12
++#define EUR_CR_BIF_TILE2_CFG_MASK 0x0F000000UL
++#define EUR_CR_BIF_TILE2_CFG_SHIFT 24
++#define EUR_CR_BIF_TILE3 0x0C18
++#define EUR_CR_BIF_TILE3_MIN_ADDRESS_MASK 0x00000FFFUL
++#define EUR_CR_BIF_TILE3_MIN_ADDRESS_SHIFT 0
++#define EUR_CR_BIF_TILE3_MAX_ADDRESS_MASK 0x00FFF000UL
++#define EUR_CR_BIF_TILE3_MAX_ADDRESS_SHIFT 12
++#define EUR_CR_BIF_TILE3_CFG_MASK 0x0F000000UL
++#define EUR_CR_BIF_TILE3_CFG_SHIFT 24
++#define EUR_CR_BIF_TILE4 0x0C1C
++#define EUR_CR_BIF_TILE4_MIN_ADDRESS_MASK 0x00000FFFUL
++#define EUR_CR_BIF_TILE4_MIN_ADDRESS_SHIFT 0
++#define EUR_CR_BIF_TILE4_MAX_ADDRESS_MASK 0x00FFF000UL
++#define EUR_CR_BIF_TILE4_MAX_ADDRESS_SHIFT 12
++#define EUR_CR_BIF_TILE4_CFG_MASK 0x0F000000UL
++#define EUR_CR_BIF_TILE4_CFG_SHIFT 24
++#define EUR_CR_BIF_TILE5 0x0C20
++#define EUR_CR_BIF_TILE5_MIN_ADDRESS_MASK 0x00000FFFUL
++#define EUR_CR_BIF_TILE5_MIN_ADDRESS_SHIFT 0
++#define EUR_CR_BIF_TILE5_MAX_ADDRESS_MASK 0x00FFF000UL
++#define EUR_CR_BIF_TILE5_MAX_ADDRESS_SHIFT 12
++#define EUR_CR_BIF_TILE5_CFG_MASK 0x0F000000UL
++#define EUR_CR_BIF_TILE5_CFG_SHIFT 24
++#define EUR_CR_BIF_TILE6 0x0C24
++#define EUR_CR_BIF_TILE6_MIN_ADDRESS_MASK 0x00000FFFUL
++#define EUR_CR_BIF_TILE6_MIN_ADDRESS_SHIFT 0
++#define EUR_CR_BIF_TILE6_MAX_ADDRESS_MASK 0x00FFF000UL
++#define EUR_CR_BIF_TILE6_MAX_ADDRESS_SHIFT 12
++#define EUR_CR_BIF_TILE6_CFG_MASK 0x0F000000UL
++#define EUR_CR_BIF_TILE6_CFG_SHIFT 24
++#define EUR_CR_BIF_TILE7 0x0C28
++#define EUR_CR_BIF_TILE7_MIN_ADDRESS_MASK 0x00000FFFUL
++#define EUR_CR_BIF_TILE7_MIN_ADDRESS_SHIFT 0
++#define EUR_CR_BIF_TILE7_MAX_ADDRESS_MASK 0x00FFF000UL
++#define EUR_CR_BIF_TILE7_MAX_ADDRESS_SHIFT 12
++#define EUR_CR_BIF_TILE7_CFG_MASK 0x0F000000UL
++#define EUR_CR_BIF_TILE7_CFG_SHIFT 24
++#define EUR_CR_BIF_TILE8 0x0C2C
++#define EUR_CR_BIF_TILE8_MIN_ADDRESS_MASK 0x00000FFFUL
++#define EUR_CR_BIF_TILE8_MIN_ADDRESS_SHIFT 0
++#define EUR_CR_BIF_TILE8_MAX_ADDRESS_MASK 0x00FFF000UL
++#define EUR_CR_BIF_TILE8_MAX_ADDRESS_SHIFT 12
++#define EUR_CR_BIF_TILE8_CFG_MASK 0x0F000000UL
++#define EUR_CR_BIF_TILE8_CFG_SHIFT 24
++#define EUR_CR_BIF_TILE9 0x0C30
++#define EUR_CR_BIF_TILE9_MIN_ADDRESS_MASK 0x00000FFFUL
++#define EUR_CR_BIF_TILE9_MIN_ADDRESS_SHIFT 0
++#define EUR_CR_BIF_TILE9_MAX_ADDRESS_MASK 0x00FFF000UL
++#define EUR_CR_BIF_TILE9_MAX_ADDRESS_SHIFT 12
++#define EUR_CR_BIF_TILE9_CFG_MASK 0x0F000000UL
++#define EUR_CR_BIF_TILE9_CFG_SHIFT 24
++#define EUR_CR_BIF_DIR_LIST_BASE1 0x0C38
++#define EUR_CR_BIF_DIR_LIST_BASE1_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE1_ADDR_SHIFT 12
++#define EUR_CR_BIF_DIR_LIST_BASE2 0x0C3C
++#define EUR_CR_BIF_DIR_LIST_BASE2_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE2_ADDR_SHIFT 12
++#define EUR_CR_BIF_DIR_LIST_BASE3 0x0C40
++#define EUR_CR_BIF_DIR_LIST_BASE3_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE3_ADDR_SHIFT 12
++#define EUR_CR_BIF_DIR_LIST_BASE4 0x0C44
++#define EUR_CR_BIF_DIR_LIST_BASE4_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE4_ADDR_SHIFT 12
++#define EUR_CR_BIF_DIR_LIST_BASE5 0x0C48
++#define EUR_CR_BIF_DIR_LIST_BASE5_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE5_ADDR_SHIFT 12
++#define EUR_CR_BIF_DIR_LIST_BASE6 0x0C4C
++#define EUR_CR_BIF_DIR_LIST_BASE6_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE6_ADDR_SHIFT 12
++#define EUR_CR_BIF_DIR_LIST_BASE7 0x0C50
++#define EUR_CR_BIF_DIR_LIST_BASE7_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE7_ADDR_SHIFT 12
++#define EUR_CR_BIF_DIR_LIST_BASE8 0x0C54
++#define EUR_CR_BIF_DIR_LIST_BASE8_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE8_ADDR_SHIFT 12
++#define EUR_CR_BIF_DIR_LIST_BASE9 0x0C58
++#define EUR_CR_BIF_DIR_LIST_BASE9_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE9_ADDR_SHIFT 12
++#define EUR_CR_BIF_DIR_LIST_BASE10 0x0C5C
++#define EUR_CR_BIF_DIR_LIST_BASE10_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE10_ADDR_SHIFT 12
++#define EUR_CR_BIF_DIR_LIST_BASE11 0x0C60
++#define EUR_CR_BIF_DIR_LIST_BASE11_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE11_ADDR_SHIFT 12
++#define EUR_CR_BIF_DIR_LIST_BASE12 0x0C64
++#define EUR_CR_BIF_DIR_LIST_BASE12_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE12_ADDR_SHIFT 12
++#define EUR_CR_BIF_DIR_LIST_BASE13 0x0C68
++#define EUR_CR_BIF_DIR_LIST_BASE13_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE13_ADDR_SHIFT 12
++#define EUR_CR_BIF_DIR_LIST_BASE14 0x0C6C
++#define EUR_CR_BIF_DIR_LIST_BASE14_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE14_ADDR_SHIFT 12
++#define EUR_CR_BIF_DIR_LIST_BASE15 0x0C70
++#define EUR_CR_BIF_DIR_LIST_BASE15_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE15_ADDR_SHIFT 12
++#define EUR_CR_BIF_BANK_SET 0x0C74
++#define EUR_CR_BIF_BANK_SET_SELECT_MASK 0x000003FFUL
++#define EUR_CR_BIF_BANK_SET_SELECT_SHIFT 0
++#define EUR_CR_BIF_BANK0 0x0C78
++#define EUR_CR_BIF_BANK0_INDEX_EDM_MASK 0x0000000FUL
++#define EUR_CR_BIF_BANK0_INDEX_EDM_SHIFT 0
++#define EUR_CR_BIF_BANK0_INDEX_TA_MASK 0x000000F0UL
++#define EUR_CR_BIF_BANK0_INDEX_TA_SHIFT 4
++#define EUR_CR_BIF_BANK0_INDEX_HOST_MASK 0x00000F00UL
++#define EUR_CR_BIF_BANK0_INDEX_HOST_SHIFT 8
++#define EUR_CR_BIF_BANK0_INDEX_3D_MASK 0x0000F000UL
++#define EUR_CR_BIF_BANK0_INDEX_3D_SHIFT 12
++#define EUR_CR_BIF_BANK0_INDEX_2D_MASK 0x000F0000UL
++#define EUR_CR_BIF_BANK0_INDEX_2D_SHIFT 16
++#define EUR_CR_BIF_BANK1 0x0C7C
++#define EUR_CR_BIF_BANK1_INDEX_EDM_MASK 0x0000000FUL
++#define EUR_CR_BIF_BANK1_INDEX_EDM_SHIFT 0
++#define EUR_CR_BIF_BANK1_INDEX_TA_MASK 0x000000F0UL
++#define EUR_CR_BIF_BANK1_INDEX_TA_SHIFT 4
++#define EUR_CR_BIF_BANK1_INDEX_HOST_MASK 0x00000F00UL
++#define EUR_CR_BIF_BANK1_INDEX_HOST_SHIFT 8
++#define EUR_CR_BIF_BANK1_INDEX_3D_MASK 0x0000F000UL
++#define EUR_CR_BIF_BANK1_INDEX_3D_SHIFT 12
++#define EUR_CR_BIF_BANK1_INDEX_2D_MASK 0x000F0000UL
++#define EUR_CR_BIF_BANK1_INDEX_2D_SHIFT 16
++#define EUR_CR_BIF_ADT_TTE 0x0C80
++#define EUR_CR_BIF_ADT_TTE_VALUE_MASK 0x000000FFUL
++#define EUR_CR_BIF_ADT_TTE_VALUE_SHIFT 0
++#define EUR_CR_BIF_DIR_LIST_BASE0 0x0C84
++#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_SHIFT 12
++#define EUR_CR_BIF_TWOD_REQ_BASE 0x0C88
++#define EUR_CR_BIF_TWOD_REQ_BASE_ADDR_MASK 0xFFF00000UL
++#define EUR_CR_BIF_TWOD_REQ_BASE_ADDR_SHIFT 20
++#define EUR_CR_BIF_TA_REQ_BASE 0x0C90
++#define EUR_CR_BIF_TA_REQ_BASE_ADDR_MASK 0xFFF00000UL
++#define EUR_CR_BIF_TA_REQ_BASE_ADDR_SHIFT 20
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1 0x0C94
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_MMU_MASK 0x00000007UL
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_MMU_SHIFT 0
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_CACHE_MASK 0x00000038UL
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_CACHE_SHIFT 3
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_VDM_MASK 0x000001C0UL
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_VDM_SHIFT 6
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_TE_MASK 0x00000E00UL
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_TE_SHIFT 9
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_TWOD_MASK 0x00007000UL
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_TWOD_SHIFT 12
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_PBE_MASK 0x00038000UL
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_PBE_SHIFT 15
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_2 0x0C98
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_2_HOST_MASK 0x00000007UL
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_2_HOST_SHIFT 0
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_2_USE_MASK 0x00000038UL
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_2_USE_SHIFT 3
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_2_ISP_MASK 0x000001C0UL
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_2_ISP_SHIFT 6
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_2_TSPP_MASK 0x00000E00UL
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_2_TSPP_SHIFT 9
++#define EUR_CR_BIF_MEM_ARB_CONFIG 0x0CA0
++#define EUR_CR_BIF_MEM_ARB_CONFIG_PAGE_SIZE_MASK 0x0000000FUL
++#define EUR_CR_BIF_MEM_ARB_CONFIG_PAGE_SIZE_SHIFT 0
++#define EUR_CR_BIF_MEM_ARB_CONFIG_BEST_CNT_MASK 0x00000FF0UL
++#define EUR_CR_BIF_MEM_ARB_CONFIG_BEST_CNT_SHIFT 4
++#define EUR_CR_BIF_MEM_ARB_CONFIG_TTE_THRESH_MASK 0x00FFF000UL
++#define EUR_CR_BIF_MEM_ARB_CONFIG_TTE_THRESH_SHIFT 12
++#define EUR_CR_BIF_MEM_REQ_STAT 0x0CA8
++#define EUR_CR_BIF_MEM_REQ_STAT_READS_MASK 0x000000FFUL
++#define EUR_CR_BIF_MEM_REQ_STAT_READS_SHIFT 0
++#define EUR_CR_BIF_3D_REQ_BASE 0x0CAC
++#define EUR_CR_BIF_3D_REQ_BASE_ADDR_MASK 0xFFF00000UL
++#define EUR_CR_BIF_3D_REQ_BASE_ADDR_SHIFT 20
++#define EUR_CR_BIF_ZLS_REQ_BASE 0x0CB0
++#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_MASK 0xFFF00000UL
++#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_SHIFT 20
++#define EUR_CR_BIF_BANK_STATUS 0x0CB4
++#define EUR_CR_BIF_BANK_STATUS_3D_CURRENT_BANK_MASK 0x00000001UL
++#define EUR_CR_BIF_BANK_STATUS_3D_CURRENT_BANK_SHIFT 0
++#define EUR_CR_BIF_BANK_STATUS_TA_CURRENT_BANK_MASK 0x00000002UL
++#define EUR_CR_BIF_BANK_STATUS_TA_CURRENT_BANK_SHIFT 1
++#define EUR_CR_2D_BLIT_STATUS 0x0E04
++#define EUR_CR_2D_BLIT_STATUS_COMPLETE_MASK 0x00FFFFFFUL
++#define EUR_CR_2D_BLIT_STATUS_COMPLETE_SHIFT 0
++#define EUR_CR_2D_BLIT_STATUS_BUSY_MASK 0x01000000UL
++#define EUR_CR_2D_BLIT_STATUS_BUSY_SHIFT 24
++#define EUR_CR_2D_VIRTUAL_FIFO_0 0x0E10
++#define EUR_CR_2D_VIRTUAL_FIFO_0_ENABLE_MASK 0x00000001UL
++#define EUR_CR_2D_VIRTUAL_FIFO_0_ENABLE_SHIFT 0
++#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MASK 0x0000000EUL
++#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_SHIFT 1
++#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_DIV_MASK 0x00000FF0UL
++#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_DIV_SHIFT 4
++#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MUL_MASK 0x0000F000UL
++#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MUL_SHIFT 12
++#define EUR_CR_2D_VIRTUAL_FIFO_1 0x0E14
++#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_ACC_MASK 0x00000FFFUL
++#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_ACC_SHIFT 0
++#define EUR_CR_2D_VIRTUAL_FIFO_1_MAX_ACC_MASK 0x00FFF000UL
++#define EUR_CR_2D_VIRTUAL_FIFO_1_MAX_ACC_SHIFT 12
++#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_METRIC_MASK 0xFF000000UL
++#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_METRIC_SHIFT 24
++#define EUR_CR_2D_SOCIF 0x0E18
++#define EUR_CR_2D_SOCIF_FREESPACE_MASK 0x000000FFUL
++#define EUR_CR_2D_SOCIF_FREESPACE_SHIFT 0
++#define EUR_CR_2D_ALPHA 0x0E1C
++#define EUR_CR_2D_ALPHA_COMPONENT_ONE_MASK 0x0000FF00UL
++#define EUR_CR_2D_ALPHA_COMPONENT_ONE_SHIFT 8
++#define EUR_CR_2D_ALPHA_COMPONENT_ZERO_MASK 0x000000FFUL
++#define EUR_CR_2D_ALPHA_COMPONENT_ZERO_SHIFT 0
++#define EUR_CR_USE_CODE_BASE(X) (0x0A0C + (4 * (X)))
++#define EUR_CR_USE_CODE_BASE_ADDR_MASK 0x01FFFFFFUL
++#define EUR_CR_USE_CODE_BASE_ADDR_SHIFT 0
++#define EUR_CR_USE_CODE_BASE_DM_MASK 0x06000000UL
++#define EUR_CR_USE_CODE_BASE_DM_SHIFT 25
++#define EUR_CR_USE_CODE_BASE_SIZE_UINT32 16
++#define EUR_CR_USE_CODE_BASE_NUM_ENTRIES 16
++
++#define EUR_CR_MNE_CR_CTRL 0x0D00
++#define EUR_CR_MNE_CR_CTRL_BYP_CC_MASK 0x00008000UL
++#define EUR_CR_MNE_CR_CTRL_INVAL 0x0D20
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/hwdefs/sgx540defs.h
+@@ -0,0 +1,620 @@
++/****************************************************************************
++ Name : sgx540defs.h
++ Author : Autogenerated
++ Copyright : 2009 by Imagination Technologies Limited.
++ All rights reserved. No part of this software, either
++ material or conceptual may be copied or distributed,
++ transmitted, transcribed, stored in a retrieval system or
++ translated into any human or computer language in any form
++ by any means, electronic, mechanical, manual or otherwise,
++ or disclosed to third parties without the express written
++ permission of Imagination Technologies Limited,
++ Home Park Estate, Kings Langley, Hertfordshire,
++ WD4 8LZ, U.K.
++****************************************************************************/
++
++#ifndef _SGX540DEFS_KM_H_
++#define _SGX540DEFS_KM_H_
++
++/* Register EUR_CR_CLKGATECTL */
++#define EUR_CR_CLKGATECTL 0x0000
++#define EUR_CR_CLKGATECTL_ISP_CLKG_MASK 0x00000003
++#define EUR_CR_CLKGATECTL_ISP_CLKG_SHIFT 0
++#define EUR_CR_CLKGATECTL_ISP2_CLKG_MASK 0x0000000C
++#define EUR_CR_CLKGATECTL_ISP2_CLKG_SHIFT 2
++#define EUR_CR_CLKGATECTL_TSP_CLKG_MASK 0x00000030
++#define EUR_CR_CLKGATECTL_TSP_CLKG_SHIFT 4
++#define EUR_CR_CLKGATECTL_TE_CLKG_MASK 0x000000C0
++#define EUR_CR_CLKGATECTL_TE_CLKG_SHIFT 6
++#define EUR_CR_CLKGATECTL_MTE_CLKG_MASK 0x00000300
++#define EUR_CR_CLKGATECTL_MTE_CLKG_SHIFT 8
++#define EUR_CR_CLKGATECTL_DPM_CLKG_MASK 0x00000C00
++#define EUR_CR_CLKGATECTL_DPM_CLKG_SHIFT 10
++#define EUR_CR_CLKGATECTL_VDM_CLKG_MASK 0x00003000
++#define EUR_CR_CLKGATECTL_VDM_CLKG_SHIFT 12
++#define EUR_CR_CLKGATECTL_PDS_CLKG_MASK 0x0000C000
++#define EUR_CR_CLKGATECTL_PDS_CLKG_SHIFT 14
++#define EUR_CR_CLKGATECTL_IDXFIFO_CLKG_MASK 0x00030000
++#define EUR_CR_CLKGATECTL_IDXFIFO_CLKG_SHIFT 16
++#define EUR_CR_CLKGATECTL_TA_CLKG_MASK 0x000C0000
++#define EUR_CR_CLKGATECTL_TA_CLKG_SHIFT 18
++#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_MASK 0x01000000
++#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_SHIFT 24
++#define EUR_CR_CLKGATECTL_SYSTEM_CLKG_MASK 0x10000000
++#define EUR_CR_CLKGATECTL_SYSTEM_CLKG_SHIFT 28
++/* Register EUR_CR_CLKGATECTL2 */
++#define EUR_CR_CLKGATECTL2 0x0004
++#define EUR_CR_CLKGATECTL2_PBE_CLKG_MASK 0x00000003
++#define EUR_CR_CLKGATECTL2_PBE_CLKG_SHIFT 0
++#define EUR_CR_CLKGATECTL2_CACHEL2_CLKG_MASK 0x0000000C
++#define EUR_CR_CLKGATECTL2_CACHEL2_CLKG_SHIFT 2
++#define EUR_CR_CLKGATECTL2_UCACHEL2_CLKG_MASK 0x00000030
++#define EUR_CR_CLKGATECTL2_UCACHEL2_CLKG_SHIFT 4
++#define EUR_CR_CLKGATECTL2_USE0_CLKG_MASK 0x000000C0
++#define EUR_CR_CLKGATECTL2_USE0_CLKG_SHIFT 6
++#define EUR_CR_CLKGATECTL2_ITR0_CLKG_MASK 0x00000300
++#define EUR_CR_CLKGATECTL2_ITR0_CLKG_SHIFT 8
++#define EUR_CR_CLKGATECTL2_TEX0_CLKG_MASK 0x00000C00
++#define EUR_CR_CLKGATECTL2_TEX0_CLKG_SHIFT 10
++#define EUR_CR_CLKGATECTL2_MADD0_CLKG_MASK 0x00003000
++#define EUR_CR_CLKGATECTL2_MADD0_CLKG_SHIFT 12
++#define EUR_CR_CLKGATECTL2_USE1_CLKG_MASK 0x0000C000
++#define EUR_CR_CLKGATECTL2_USE1_CLKG_SHIFT 14
++#define EUR_CR_CLKGATECTL2_ITR1_CLKG_MASK 0x00030000
++#define EUR_CR_CLKGATECTL2_ITR1_CLKG_SHIFT 16
++#define EUR_CR_CLKGATECTL2_TEX1_CLKG_MASK 0x000C0000
++#define EUR_CR_CLKGATECTL2_TEX1_CLKG_SHIFT 18
++#define EUR_CR_CLKGATECTL2_MADD1_CLKG_MASK 0x00300000
++#define EUR_CR_CLKGATECTL2_MADD1_CLKG_SHIFT 20
++/* Register EUR_CR_CLKGATESTATUS */
++#define EUR_CR_CLKGATESTATUS 0x0008
++#define EUR_CR_CLKGATESTATUS_ISP_CLKS_MASK 0x00000001
++#define EUR_CR_CLKGATESTATUS_ISP_CLKS_SHIFT 0
++#define EUR_CR_CLKGATESTATUS_ISP2_CLKS_MASK 0x00000002
++#define EUR_CR_CLKGATESTATUS_ISP2_CLKS_SHIFT 1
++#define EUR_CR_CLKGATESTATUS_TSP_CLKS_MASK 0x00000004
++#define EUR_CR_CLKGATESTATUS_TSP_CLKS_SHIFT 2
++#define EUR_CR_CLKGATESTATUS_TE_CLKS_MASK 0x00000008
++#define EUR_CR_CLKGATESTATUS_TE_CLKS_SHIFT 3
++#define EUR_CR_CLKGATESTATUS_MTE_CLKS_MASK 0x00000010
++#define EUR_CR_CLKGATESTATUS_MTE_CLKS_SHIFT 4
++#define EUR_CR_CLKGATESTATUS_DPM_CLKS_MASK 0x00000020
++#define EUR_CR_CLKGATESTATUS_DPM_CLKS_SHIFT 5
++#define EUR_CR_CLKGATESTATUS_VDM_CLKS_MASK 0x00000040
++#define EUR_CR_CLKGATESTATUS_VDM_CLKS_SHIFT 6
++#define EUR_CR_CLKGATESTATUS_PDS_CLKS_MASK 0x00000080
++#define EUR_CR_CLKGATESTATUS_PDS_CLKS_SHIFT 7
++#define EUR_CR_CLKGATESTATUS_PBE_CLKS_MASK 0x00000100
++#define EUR_CR_CLKGATESTATUS_PBE_CLKS_SHIFT 8
++#define EUR_CR_CLKGATESTATUS_CACHEL2_CLKS_MASK 0x00000200
++#define EUR_CR_CLKGATESTATUS_CACHEL2_CLKS_SHIFT 9
++#define EUR_CR_CLKGATESTATUS_UCACHEL2_CLKS_MASK 0x00000400
++#define EUR_CR_CLKGATESTATUS_UCACHEL2_CLKS_SHIFT 10
++#define EUR_CR_CLKGATESTATUS_USE0_CLKS_MASK 0x00000800
++#define EUR_CR_CLKGATESTATUS_USE0_CLKS_SHIFT 11
++#define EUR_CR_CLKGATESTATUS_ITR0_CLKS_MASK 0x00001000
++#define EUR_CR_CLKGATESTATUS_ITR0_CLKS_SHIFT 12
++#define EUR_CR_CLKGATESTATUS_TEX0_CLKS_MASK 0x00002000
++#define EUR_CR_CLKGATESTATUS_TEX0_CLKS_SHIFT 13
++#define EUR_CR_CLKGATESTATUS_MADD0_CLKS_MASK 0x00004000
++#define EUR_CR_CLKGATESTATUS_MADD0_CLKS_SHIFT 14
++#define EUR_CR_CLKGATESTATUS_USE1_CLKS_MASK 0x00008000
++#define EUR_CR_CLKGATESTATUS_USE1_CLKS_SHIFT 15
++#define EUR_CR_CLKGATESTATUS_ITR1_CLKS_MASK 0x00010000
++#define EUR_CR_CLKGATESTATUS_ITR1_CLKS_SHIFT 16
++#define EUR_CR_CLKGATESTATUS_TEX1_CLKS_MASK 0x00020000
++#define EUR_CR_CLKGATESTATUS_TEX1_CLKS_SHIFT 17
++#define EUR_CR_CLKGATESTATUS_MADD1_CLKS_MASK 0x00040000
++#define EUR_CR_CLKGATESTATUS_MADD1_CLKS_SHIFT 18
++#define EUR_CR_CLKGATESTATUS_IDXFIFO_CLKS_MASK 0x00080000
++#define EUR_CR_CLKGATESTATUS_IDXFIFO_CLKS_SHIFT 19
++#define EUR_CR_CLKGATESTATUS_TA_CLKS_MASK 0x00100000
++#define EUR_CR_CLKGATESTATUS_TA_CLKS_SHIFT 20
++/* Register EUR_CR_CLKGATECTLOVR */
++#define EUR_CR_CLKGATECTLOVR 0x000C
++#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_MASK 0x00000003
++#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_SHIFT 0
++#define EUR_CR_CLKGATECTLOVR_ISP2_CLKO_MASK 0x0000000C
++#define EUR_CR_CLKGATECTLOVR_ISP2_CLKO_SHIFT 2
++#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_MASK 0x00000030
++#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_SHIFT 4
++#define EUR_CR_CLKGATECTLOVR_TE_CLKO_MASK 0x000000C0
++#define EUR_CR_CLKGATECTLOVR_TE_CLKO_SHIFT 6
++#define EUR_CR_CLKGATECTLOVR_MTE_CLKO_MASK 0x00000300
++#define EUR_CR_CLKGATECTLOVR_MTE_CLKO_SHIFT 8
++#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_MASK 0x00000C00
++#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_SHIFT 10
++#define EUR_CR_CLKGATECTLOVR_VDM_CLKO_MASK 0x00003000
++#define EUR_CR_CLKGATECTLOVR_VDM_CLKO_SHIFT 12
++#define EUR_CR_CLKGATECTLOVR_PDS_CLKO_MASK 0x0000C000
++#define EUR_CR_CLKGATECTLOVR_PDS_CLKO_SHIFT 14
++#define EUR_CR_CLKGATECTLOVR_IDXFIFO_CLKO_MASK 0x00030000
++#define EUR_CR_CLKGATECTLOVR_IDXFIFO_CLKO_SHIFT 16
++#define EUR_CR_CLKGATECTLOVR_TA_CLKO_MASK 0x000C0000
++#define EUR_CR_CLKGATECTLOVR_TA_CLKO_SHIFT 18
++/* Register EUR_CR_POWER */
++#define EUR_CR_POWER 0x001C
++#define EUR_CR_POWER_PIPE_DISABLE_MASK 0x00000001
++#define EUR_CR_POWER_PIPE_DISABLE_SHIFT 0
++/* Register EUR_CR_CORE_ID */
++#define EUR_CR_CORE_ID 0x0020
++#define EUR_CR_CORE_ID_CONFIG_MASK 0x0000FFFF
++#define EUR_CR_CORE_ID_CONFIG_SHIFT 0
++#define EUR_CR_CORE_ID_ID_MASK 0xFFFF0000
++#define EUR_CR_CORE_ID_ID_SHIFT 16
++/* Register EUR_CR_CORE_REVISION */
++#define EUR_CR_CORE_REVISION 0x0024
++#define EUR_CR_CORE_REVISION_MAINTENANCE_MASK 0x000000FF
++#define EUR_CR_CORE_REVISION_MAINTENANCE_SHIFT 0
++#define EUR_CR_CORE_REVISION_MINOR_MASK 0x0000FF00
++#define EUR_CR_CORE_REVISION_MINOR_SHIFT 8
++#define EUR_CR_CORE_REVISION_MAJOR_MASK 0x00FF0000
++#define EUR_CR_CORE_REVISION_MAJOR_SHIFT 16
++#define EUR_CR_CORE_REVISION_DESIGNER_MASK 0xFF000000
++#define EUR_CR_CORE_REVISION_DESIGNER_SHIFT 24
++/* Register EUR_CR_DESIGNER_REV_FIELD1 */
++#define EUR_CR_DESIGNER_REV_FIELD1 0x0028
++#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_MASK 0xFFFFFFFF
++#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_SHIFT 0
++/* Register EUR_CR_DESIGNER_REV_FIELD2 */
++#define EUR_CR_DESIGNER_REV_FIELD2 0x002C
++#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_MASK 0xFFFFFFFF
++#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_SHIFT 0
++/* Register EUR_CR_SOFT_RESET */
++#define EUR_CR_SOFT_RESET 0x0080
++#define EUR_CR_SOFT_RESET_BIF_RESET_MASK 0x00000001
++#define EUR_CR_SOFT_RESET_BIF_RESET_SHIFT 0
++#define EUR_CR_SOFT_RESET_VDM_RESET_MASK 0x00000002
++#define EUR_CR_SOFT_RESET_VDM_RESET_SHIFT 1
++#define EUR_CR_SOFT_RESET_DPM_RESET_MASK 0x00000004
++#define EUR_CR_SOFT_RESET_DPM_RESET_SHIFT 2
++#define EUR_CR_SOFT_RESET_TE_RESET_MASK 0x00000008
++#define EUR_CR_SOFT_RESET_TE_RESET_SHIFT 3
++#define EUR_CR_SOFT_RESET_MTE_RESET_MASK 0x00000010
++#define EUR_CR_SOFT_RESET_MTE_RESET_SHIFT 4
++#define EUR_CR_SOFT_RESET_ISP_RESET_MASK 0x00000020
++#define EUR_CR_SOFT_RESET_ISP_RESET_SHIFT 5
++#define EUR_CR_SOFT_RESET_ISP2_RESET_MASK 0x00000040
++#define EUR_CR_SOFT_RESET_ISP2_RESET_SHIFT 6
++#define EUR_CR_SOFT_RESET_TSP_RESET_MASK 0x00000080
++#define EUR_CR_SOFT_RESET_TSP_RESET_SHIFT 7
++#define EUR_CR_SOFT_RESET_PDS_RESET_MASK 0x00000100
++#define EUR_CR_SOFT_RESET_PDS_RESET_SHIFT 8
++#define EUR_CR_SOFT_RESET_PBE_RESET_MASK 0x00000200
++#define EUR_CR_SOFT_RESET_PBE_RESET_SHIFT 9
++#define EUR_CR_SOFT_RESET_CACHEL2_RESET_MASK 0x00000400
++#define EUR_CR_SOFT_RESET_CACHEL2_RESET_SHIFT 10
++#define EUR_CR_SOFT_RESET_UCACHEL2_RESET_MASK 0x00000800
++#define EUR_CR_SOFT_RESET_UCACHEL2_RESET_SHIFT 11
++#define EUR_CR_SOFT_RESET_MADD_RESET_MASK 0x00001000
++#define EUR_CR_SOFT_RESET_MADD_RESET_SHIFT 12
++#define EUR_CR_SOFT_RESET_ITR_RESET_MASK 0x00002000
++#define EUR_CR_SOFT_RESET_ITR_RESET_SHIFT 13
++#define EUR_CR_SOFT_RESET_TEX_RESET_MASK 0x00004000
++#define EUR_CR_SOFT_RESET_TEX_RESET_SHIFT 14
++#define EUR_CR_SOFT_RESET_USE_RESET_MASK 0x00008000
++#define EUR_CR_SOFT_RESET_USE_RESET_SHIFT 15
++#define EUR_CR_SOFT_RESET_IDXFIFO_RESET_MASK 0x00010000
++#define EUR_CR_SOFT_RESET_IDXFIFO_RESET_SHIFT 16
++#define EUR_CR_SOFT_RESET_TA_RESET_MASK 0x00020000
++#define EUR_CR_SOFT_RESET_TA_RESET_SHIFT 17
++/* Register EUR_CR_EVENT_HOST_ENABLE2 */
++#define EUR_CR_EVENT_HOST_ENABLE2 0x0110
++#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_TA_MASK 0x00000010
++#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_TA_SHIFT 4
++#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_3D_MASK 0x00000008
++#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_3D_SHIFT 3
++#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_DL_MASK 0x00000004
++#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_DL_SHIFT 2
++#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_MASK 0x00000002
++#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_SHIFT 1
++#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_MASK 0x00000001
++#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_SHIFT 0
++/* Register EUR_CR_EVENT_HOST_CLEAR2 */
++#define EUR_CR_EVENT_HOST_CLEAR2 0x0114
++#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_TA_MASK 0x00000010
++#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_TA_SHIFT 4
++#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_3D_MASK 0x00000008
++#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_3D_SHIFT 3
++#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_DL_MASK 0x00000004
++#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_DL_SHIFT 2
++#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_MASK 0x00000002
++#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_SHIFT 1
++#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_MASK 0x00000001
++#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_SHIFT 0
++/* Register EUR_CR_EVENT_STATUS2 */
++#define EUR_CR_EVENT_STATUS2 0x0118
++#define EUR_CR_EVENT_STATUS2_TRIG_TA_MASK 0x00000010
++#define EUR_CR_EVENT_STATUS2_TRIG_TA_SHIFT 4
++#define EUR_CR_EVENT_STATUS2_TRIG_3D_MASK 0x00000008
++#define EUR_CR_EVENT_STATUS2_TRIG_3D_SHIFT 3
++#define EUR_CR_EVENT_STATUS2_TRIG_DL_MASK 0x00000004
++#define EUR_CR_EVENT_STATUS2_TRIG_DL_SHIFT 2
++#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_MASK 0x00000002
++#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_SHIFT 1
++#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_MASK 0x00000001
++#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_SHIFT 0
++/* Register EUR_CR_EVENT_STATUS */
++#define EUR_CR_EVENT_STATUS 0x012C
++#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_MASK 0x80000000
++#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_SHIFT 31
++#define EUR_CR_EVENT_STATUS_TIMER_MASK 0x20000000
++#define EUR_CR_EVENT_STATUS_TIMER_SHIFT 29
++#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_MASK 0x10000000
++#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_SHIFT 28
++#define EUR_CR_EVENT_STATUS_TWOD_COMPLETE_MASK 0x08000000
++#define EUR_CR_EVENT_STATUS_TWOD_COMPLETE_SHIFT 27
++#define EUR_CR_EVENT_STATUS_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000
++#define EUR_CR_EVENT_STATUS_MADD_CACHE_INVALCOMPLETE_SHIFT 26
++#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000
++#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25
++#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_MASK 0x01000000
++#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_SHIFT 24
++#define EUR_CR_EVENT_STATUS_ISP_END_TILE_MASK 0x00800000
++#define EUR_CR_EVENT_STATUS_ISP_END_TILE_SHIFT 23
++#define EUR_CR_EVENT_STATUS_DPM_INITEND_MASK 0x00400000
++#define EUR_CR_EVENT_STATUS_DPM_INITEND_SHIFT 22
++#define EUR_CR_EVENT_STATUS_OTPM_LOADED_MASK 0x00200000
++#define EUR_CR_EVENT_STATUS_OTPM_LOADED_SHIFT 21
++#define EUR_CR_EVENT_STATUS_OTPM_INV_MASK 0x00100000
++#define EUR_CR_EVENT_STATUS_OTPM_INV_SHIFT 20
++#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_MASK 0x00080000
++#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_SHIFT 19
++#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_MASK 0x00040000
++#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_SHIFT 18
++#define EUR_CR_EVENT_STATUS_ISP_HALT_MASK 0x00020000
++#define EUR_CR_EVENT_STATUS_ISP_HALT_SHIFT 17
++#define EUR_CR_EVENT_STATUS_ISP_VISIBILITY_FAIL_MASK 0x00010000
++#define EUR_CR_EVENT_STATUS_ISP_VISIBILITY_FAIL_SHIFT 16
++#define EUR_CR_EVENT_STATUS_BREAKPOINT_MASK 0x00008000
++#define EUR_CR_EVENT_STATUS_BREAKPOINT_SHIFT 15
++#define EUR_CR_EVENT_STATUS_SW_EVENT_MASK 0x00004000
++#define EUR_CR_EVENT_STATUS_SW_EVENT_SHIFT 14
++#define EUR_CR_EVENT_STATUS_TA_FINISHED_MASK 0x00002000
++#define EUR_CR_EVENT_STATUS_TA_FINISHED_SHIFT 13
++#define EUR_CR_EVENT_STATUS_TA_TERMINATE_MASK 0x00001000
++#define EUR_CR_EVENT_STATUS_TA_TERMINATE_SHIFT 12
++#define EUR_CR_EVENT_STATUS_TPC_CLEAR_MASK 0x00000800
++#define EUR_CR_EVENT_STATUS_TPC_CLEAR_SHIFT 11
++#define EUR_CR_EVENT_STATUS_TPC_FLUSH_MASK 0x00000400
++#define EUR_CR_EVENT_STATUS_TPC_FLUSH_SHIFT 10
++#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_MASK 0x00000200
++#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_SHIFT 9
++#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_MASK 0x00000100
++#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_SHIFT 8
++#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_MASK 0x00000080
++#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_SHIFT 7
++#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_MASK 0x00000040
++#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_SHIFT 6
++#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_MASK 0x00000020
++#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_SHIFT 5
++#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_MASK 0x00000010
++#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_SHIFT 4
++#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_MASK 0x00000008
++#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_SHIFT 3
++#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004
++#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_SHIFT 2
++#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002
++#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_SHIFT 1
++#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_MASK 0x00000001
++#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_SHIFT 0
++/* Register EUR_CR_EVENT_HOST_ENABLE */
++#define EUR_CR_EVENT_HOST_ENABLE 0x0130
++#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_MASK 0x80000000
++#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_SHIFT 31
++#define EUR_CR_EVENT_HOST_ENABLE_TIMER_MASK 0x20000000
++#define EUR_CR_EVENT_HOST_ENABLE_TIMER_SHIFT 29
++#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_MASK 0x10000000
++#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_SHIFT 28
++#define EUR_CR_EVENT_HOST_ENABLE_TWOD_COMPLETE_MASK 0x08000000
++#define EUR_CR_EVENT_HOST_ENABLE_TWOD_COMPLETE_SHIFT 27
++#define EUR_CR_EVENT_HOST_ENABLE_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000
++#define EUR_CR_EVENT_HOST_ENABLE_MADD_CACHE_INVALCOMPLETE_SHIFT 26
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_MASK 0x01000000
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_SHIFT 24
++#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_MASK 0x00800000
++#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_SHIFT 23
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_MASK 0x00400000
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_SHIFT 22
++#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_MASK 0x00200000
++#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_SHIFT 21
++#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_MASK 0x00100000
++#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_SHIFT 20
++#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_MASK 0x00080000
++#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_SHIFT 19
++#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_MASK 0x00040000
++#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_SHIFT 18
++#define EUR_CR_EVENT_HOST_ENABLE_ISP_HALT_MASK 0x00020000
++#define EUR_CR_EVENT_HOST_ENABLE_ISP_HALT_SHIFT 17
++#define EUR_CR_EVENT_HOST_ENABLE_ISP_VISIBILITY_FAIL_MASK 0x00010000
++#define EUR_CR_EVENT_HOST_ENABLE_ISP_VISIBILITY_FAIL_SHIFT 16
++#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_MASK 0x00008000
++#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_SHIFT 15
++#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_MASK 0x00004000
++#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_SHIFT 14
++#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_MASK 0x00002000
++#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_SHIFT 13
++#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_MASK 0x00001000
++#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_SHIFT 12
++#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_MASK 0x00000800
++#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_SHIFT 11
++#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_MASK 0x00000400
++#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_SHIFT 10
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_MASK 0x00000200
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_SHIFT 9
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_MASK 0x00000100
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_SHIFT 8
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_MASK 0x00000080
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_SHIFT 7
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_MASK 0x00000040
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_SHIFT 6
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_MASK 0x00000020
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_SHIFT 5
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_MASK 0x00000010
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_SHIFT 4
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_MASK 0x00000008
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_SHIFT 3
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_SHIFT 2
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_SHIFT 1
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_MASK 0x00000001
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_SHIFT 0
++/* Register EUR_CR_EVENT_HOST_CLEAR */
++#define EUR_CR_EVENT_HOST_CLEAR 0x0134
++#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_MASK 0x80000000
++#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_SHIFT 31
++#define EUR_CR_EVENT_HOST_CLEAR_TIMER_MASK 0x20000000
++#define EUR_CR_EVENT_HOST_CLEAR_TIMER_SHIFT 29
++#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_MASK 0x10000000
++#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_SHIFT 28
++#define EUR_CR_EVENT_HOST_CLEAR_TWOD_COMPLETE_MASK 0x08000000
++#define EUR_CR_EVENT_HOST_CLEAR_TWOD_COMPLETE_SHIFT 27
++#define EUR_CR_EVENT_HOST_CLEAR_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000
++#define EUR_CR_EVENT_HOST_CLEAR_MADD_CACHE_INVALCOMPLETE_SHIFT 26
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_MASK 0x01000000
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_SHIFT 24
++#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_MASK 0x00800000
++#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_SHIFT 23
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_MASK 0x00400000
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_SHIFT 22
++#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_MASK 0x00200000
++#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_SHIFT 21
++#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_MASK 0x00100000
++#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_SHIFT 20
++#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_MASK 0x00080000
++#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_SHIFT 19
++#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_MASK 0x00040000
++#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_SHIFT 18
++#define EUR_CR_EVENT_HOST_CLEAR_ISP_HALT_MASK 0x00020000
++#define EUR_CR_EVENT_HOST_CLEAR_ISP_HALT_SHIFT 17
++#define EUR_CR_EVENT_HOST_CLEAR_ISP_VISIBILITY_FAIL_MASK 0x00010000
++#define EUR_CR_EVENT_HOST_CLEAR_ISP_VISIBILITY_FAIL_SHIFT 16
++#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_MASK 0x00008000
++#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_SHIFT 15
++#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_MASK 0x00004000
++#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_SHIFT 14
++#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_MASK 0x00002000
++#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_SHIFT 13
++#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_MASK 0x00001000
++#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_SHIFT 12
++#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_MASK 0x00000800
++#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_SHIFT 11
++#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_MASK 0x00000400
++#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_SHIFT 10
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_MASK 0x00000200
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_SHIFT 9
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_MASK 0x00000100
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_SHIFT 8
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_MASK 0x00000080
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_SHIFT 7
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_MASK 0x00000040
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_SHIFT 6
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_MASK 0x00000020
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_SHIFT 5
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_MASK 0x00000010
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_SHIFT 4
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_MASK 0x00000008
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_SHIFT 3
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_SHIFT 2
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_SHIFT 1
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_MASK 0x00000001
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_SHIFT 0
++/* Register EUR_CR_EVENT_KICK1 */
++#define EUR_CR_EVENT_KICK1 0x0AB0
++#define EUR_CR_EVENT_KICK1_NOW_MASK 0x000000FF
++#define EUR_CR_EVENT_KICK1_NOW_SHIFT 0
++/* Register EUR_CR_PDS_EXEC_BASE */
++#define EUR_CR_PDS_EXEC_BASE 0x0AB8
++#define EUR_CR_PDS_EXEC_BASE_ADDR_MASK 0x0FF00000
++#define EUR_CR_PDS_EXEC_BASE_ADDR_SHIFT 20
++/* Register EUR_CR_EVENT_KICK2 */
++#define EUR_CR_EVENT_KICK2 0x0AC0
++#define EUR_CR_EVENT_KICK2_NOW_MASK 0x00000001
++#define EUR_CR_EVENT_KICK2_NOW_SHIFT 0
++/* Register EUR_CR_EVENT_KICKER */
++#define EUR_CR_EVENT_KICKER 0x0AC4
++#define EUR_CR_EVENT_KICKER_ADDRESS_MASK 0x0FFFFFF0
++#define EUR_CR_EVENT_KICKER_ADDRESS_SHIFT 4
++/* Register EUR_CR_EVENT_KICK */
++#define EUR_CR_EVENT_KICK 0x0AC8
++#define EUR_CR_EVENT_KICK_NOW_MASK 0x00000001
++#define EUR_CR_EVENT_KICK_NOW_SHIFT 0
++/* Register EUR_CR_EVENT_TIMER */
++#define EUR_CR_EVENT_TIMER 0x0ACC
++#define EUR_CR_EVENT_TIMER_ENABLE_MASK 0x01000000
++#define EUR_CR_EVENT_TIMER_ENABLE_SHIFT 24
++#define EUR_CR_EVENT_TIMER_VALUE_MASK 0x00FFFFFF
++#define EUR_CR_EVENT_TIMER_VALUE_SHIFT 0
++/* Register EUR_CR_PDS_INV0 */
++#define EUR_CR_PDS_INV0 0x0AD0
++#define EUR_CR_PDS_INV0_DSC_MASK 0x00000001
++#define EUR_CR_PDS_INV0_DSC_SHIFT 0
++/* Register EUR_CR_PDS_INV1 */
++#define EUR_CR_PDS_INV1 0x0AD4
++#define EUR_CR_PDS_INV1_DSC_MASK 0x00000001
++#define EUR_CR_PDS_INV1_DSC_SHIFT 0
++/* Register EUR_CR_EVENT_KICK3 */
++#define EUR_CR_EVENT_KICK3 0x0AD8
++#define EUR_CR_EVENT_KICK3_NOW_MASK 0x00000001
++#define EUR_CR_EVENT_KICK3_NOW_SHIFT 0
++/* Register EUR_CR_PDS_INV3 */
++#define EUR_CR_PDS_INV3 0x0ADC
++#define EUR_CR_PDS_INV3_DSC_MASK 0x00000001
++#define EUR_CR_PDS_INV3_DSC_SHIFT 0
++/* Register EUR_CR_PDS_INV_CSC */
++#define EUR_CR_PDS_INV_CSC 0x0AE0
++#define EUR_CR_PDS_INV_CSC_KICK_MASK 0x00000001
++#define EUR_CR_PDS_INV_CSC_KICK_SHIFT 0
++/* Register EUR_CR_PDS_PC_BASE */
++#define EUR_CR_PDS_PC_BASE 0x0B2C
++#define EUR_CR_PDS_PC_BASE_ADDRESS_MASK 0x00FFFFFF
++#define EUR_CR_PDS_PC_BASE_ADDRESS_SHIFT 0
++/* Register EUR_CR_BIF_CTRL */
++#define EUR_CR_BIF_CTRL 0x0C00
++#define EUR_CR_BIF_CTRL_NOREORDER_MASK 0x00000001
++#define EUR_CR_BIF_CTRL_NOREORDER_SHIFT 0
++#define EUR_CR_BIF_CTRL_PAUSE_MASK 0x00000002
++#define EUR_CR_BIF_CTRL_PAUSE_SHIFT 1
++#define EUR_CR_BIF_CTRL_FLUSH_MASK 0x00000004
++#define EUR_CR_BIF_CTRL_FLUSH_SHIFT 2
++#define EUR_CR_BIF_CTRL_INVALDC_MASK 0x00000008
++#define EUR_CR_BIF_CTRL_INVALDC_SHIFT 3
++#define EUR_CR_BIF_CTRL_CLEAR_FAULT_MASK 0x00000010
++#define EUR_CR_BIF_CTRL_CLEAR_FAULT_SHIFT 4
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_CACHE_MASK 0x00000100
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_CACHE_SHIFT 8
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_MASK 0x00000200
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_SHIFT 9
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_TE_MASK 0x00000400
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_TE_SHIFT 10
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_MASK 0x00001000
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_SHIFT 12
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_MASK 0x00002000
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_SHIFT 13
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_MASK 0x00004000
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_SHIFT 14
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_MASK 0x00008000
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_SHIFT 15
++/* Register EUR_CR_BIF_INT_STAT */
++#define EUR_CR_BIF_INT_STAT 0x0C04
++#define EUR_CR_BIF_INT_STAT_FAULT_MASK 0x00003FFF
++#define EUR_CR_BIF_INT_STAT_FAULT_SHIFT 0
++#define EUR_CR_BIF_INT_STAT_PF_N_RW_MASK 0x00004000
++#define EUR_CR_BIF_INT_STAT_PF_N_RW_SHIFT 14
++#define EUR_CR_BIF_INT_STAT_FLUSH_COMPLETE_MASK 0x00008000
++#define EUR_CR_BIF_INT_STAT_FLUSH_COMPLETE_SHIFT 15
++/* Register EUR_CR_BIF_FAULT */
++#define EUR_CR_BIF_FAULT 0x0C08
++#define EUR_CR_BIF_FAULT_SB_MASK 0x000001F0
++#define EUR_CR_BIF_FAULT_SB_SHIFT 4
++#define EUR_CR_BIF_FAULT_ADDR_MASK 0x0FFFF000
++#define EUR_CR_BIF_FAULT_ADDR_SHIFT 12
++/* Register EUR_CR_BIF_DIR_LIST_BASE0 */
++#define EUR_CR_BIF_DIR_LIST_BASE0 0x0C84
++#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_MASK 0xFFFFF000
++#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_SHIFT 12
++/* Register EUR_CR_BIF_TA_REQ_BASE */
++#define EUR_CR_BIF_TA_REQ_BASE 0x0C90
++#define EUR_CR_BIF_TA_REQ_BASE_ADDR_MASK 0x0FF00000
++#define EUR_CR_BIF_TA_REQ_BASE_ADDR_SHIFT 20
++/* Register EUR_CR_BIF_MEM_REQ_STAT */
++#define EUR_CR_BIF_MEM_REQ_STAT 0x0CA8
++#define EUR_CR_BIF_MEM_REQ_STAT_READS_MASK 0x000000FF
++#define EUR_CR_BIF_MEM_REQ_STAT_READS_SHIFT 0
++/* Register EUR_CR_BIF_3D_REQ_BASE */
++#define EUR_CR_BIF_3D_REQ_BASE 0x0CAC
++#define EUR_CR_BIF_3D_REQ_BASE_ADDR_MASK 0x0FF00000
++#define EUR_CR_BIF_3D_REQ_BASE_ADDR_SHIFT 20
++/* Register EUR_CR_BIF_ZLS_REQ_BASE */
++#define EUR_CR_BIF_ZLS_REQ_BASE 0x0CB0
++#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_MASK 0x0FF00000
++#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_SHIFT 20
++/* Register EUR_CR_2D_BLIT_STATUS */
++#define EUR_CR_2D_BLIT_STATUS 0x0E04
++#define EUR_CR_2D_BLIT_STATUS_COMPLETE_MASK 0x00FFFFFF
++#define EUR_CR_2D_BLIT_STATUS_COMPLETE_SHIFT 0
++#define EUR_CR_2D_BLIT_STATUS_BUSY_MASK 0x01000000
++#define EUR_CR_2D_BLIT_STATUS_BUSY_SHIFT 24
++/* Register EUR_CR_2D_VIRTUAL_FIFO_0 */
++#define EUR_CR_2D_VIRTUAL_FIFO_0 0x0E10
++#define EUR_CR_2D_VIRTUAL_FIFO_0_ENABLE_MASK 0x00000001
++#define EUR_CR_2D_VIRTUAL_FIFO_0_ENABLE_SHIFT 0
++#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MASK 0x0000000E
++#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_SHIFT 1
++#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_DIV_MASK 0x00000FF0
++#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_DIV_SHIFT 4
++#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MUL_MASK 0x0000F000
++#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MUL_SHIFT 12
++/* Register EUR_CR_2D_VIRTUAL_FIFO_1 */
++#define EUR_CR_2D_VIRTUAL_FIFO_1 0x0E14
++#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_ACC_MASK 0x00000FFF
++#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_ACC_SHIFT 0
++#define EUR_CR_2D_VIRTUAL_FIFO_1_MAX_ACC_MASK 0x00FFF000
++#define EUR_CR_2D_VIRTUAL_FIFO_1_MAX_ACC_SHIFT 12
++#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_METRIC_MASK 0xFF000000
++#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_METRIC_SHIFT 24
++/* Table EUR_CR_USE_CODE_BASE */
++/* Register EUR_CR_USE_CODE_BASE */
++#define EUR_CR_USE_CODE_BASE(X) (0x0A0C + (4 * (X)))
++#define EUR_CR_USE_CODE_BASE_ADDR_MASK 0x00FFFFFF
++#define EUR_CR_USE_CODE_BASE_ADDR_SHIFT 0
++#define EUR_CR_USE_CODE_BASE_DM_MASK 0x03000000
++#define EUR_CR_USE_CODE_BASE_DM_SHIFT 24
++/* Number of entries in table EUR_CR_USE_CODE_BASE */
++#define EUR_CR_USE_CODE_BASE_SIZE_UINT32 16
++#define EUR_CR_USE_CODE_BASE_NUM_ENTRIES 16
++#define EUR_CR_MNE_CR_CTRL 0x0D00
++#define EUR_CR_MNE_CR_CTRL_BYP_CC_N_MASK 0x00010000
++#define EUR_CR_MNE_CR_CTRL_BYP_CC_N_SHIFT 16
++#define EUR_CR_MNE_CR_CTRL_BYP_CC_MASK 0x00008000
++#define EUR_CR_MNE_CR_CTRL_BYP_CC_SHIFT 15
++#define EUR_CR_MNE_CR_CTRL_USE_INVAL_ADDR_MASK 0x00007800
++#define EUR_CR_MNE_CR_CTRL_USE_INVAL_ADDR_SHIFT 11
++#define EUR_CR_MNE_CR_CTRL_BYPASS_ALL_MASK 0x00000400
++#define EUR_CR_MNE_CR_CTRL_BYPASS_ALL_SHIFT 10
++#define EUR_CR_MNE_CR_CTRL_BYPASS_MASK 0x000003E0
++#define EUR_CR_MNE_CR_CTRL_BYPASS_SHIFT 5
++#define EUR_CR_MNE_CR_CTRL_PAUSE_MASK 0x00000010
++#define EUR_CR_MNE_CR_CTRL_PAUSE_SHIFT 4
++#define EUR_CR_MNE_CR_CTRL_INVAL_PREQ_MASK 0x0000000E
++#define EUR_CR_MNE_CR_CTRL_INVAL_PREQ_SHIFT 1
++#define EUR_CR_MNE_CR_CTRL_INVAL_PREQ_PDS_MASK (1<<EUR_CR_MNE_CR_CTRL_INVAL_PREQ_SHIFT+2)
++#define EUR_CR_MNE_CR_CTRL_INVAL_PREQ_USEC_MASK (1<<EUR_CR_MNE_CR_CTRL_INVAL_PREQ_SHIFT+1)
++#define EUR_CR_MNE_CR_CTRL_INVAL_PREQ_CACHE_MASK (1<<EUR_CR_MNE_CR_CTRL_INVAL_PREQ_SHIFT)
++#define EUR_CR_MNE_CR_CTRL_INVAL_MASK 0x00000001
++#define EUR_CR_MNE_CR_CTRL_INVAL_SHIFT 0
++#define EUR_CR_MNE_CR_USE_INVAL 0x0D04
++#define EUR_CR_MNE_CR_USE_INVAL_ADDR_MASK 0xFFFFFFFF
++#define EUR_CR_MNE_CR_USE_INVAL_ADDR_SHIFT 0
++#define EUR_CR_MNE_CR_STAT 0x0D08
++#define EUR_CR_MNE_CR_STAT_PAUSED_MASK 0x00000400
++#define EUR_CR_MNE_CR_STAT_PAUSED_SHIFT 10
++#define EUR_CR_MNE_CR_STAT_READS_MASK 0x000003FF
++#define EUR_CR_MNE_CR_STAT_READS_SHIFT 0
++#define EUR_CR_MNE_CR_STAT_STATS 0x0D0C
++#define EUR_CR_MNE_CR_STAT_STATS_RST_MASK 0x000FFFF0
++#define EUR_CR_MNE_CR_STAT_STATS_RST_SHIFT 4
++#define EUR_CR_MNE_CR_STAT_STATS_SEL_MASK 0x0000000F
++#define EUR_CR_MNE_CR_STAT_STATS_SEL_SHIFT 0
++#define EUR_CR_MNE_CR_STAT_STATS_OUT 0x0D10
++#define EUR_CR_MNE_CR_STAT_STATS_OUT_VALUE_MASK 0xFFFFFFFF
++#define EUR_CR_MNE_CR_STAT_STATS_OUT_VALUE_SHIFT 0
++#define EUR_CR_MNE_CR_EVENT_STATUS 0x0D14
++#define EUR_CR_MNE_CR_EVENT_STATUS_INVAL_MASK 0x00000001
++#define EUR_CR_MNE_CR_EVENT_STATUS_INVAL_SHIFT 0
++#define EUR_CR_MNE_CR_EVENT_CLEAR 0x0D18
++#define EUR_CR_MNE_CR_EVENT_CLEAR_INVAL_MASK 0x00000001
++#define EUR_CR_MNE_CR_EVENT_CLEAR_INVAL_SHIFT 0
++
++#endif /* _SGX540DEFS_KM_H_ */
++
++/*****************************************************************************
++ End of file (sgx540defs.h)
++*****************************************************************************/
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/hwdefs/sgxdefs.h
+@@ -0,0 +1,82 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _SGXDEFS_H_
++#define _SGXDEFS_H_
++
++#include "sgxerrata.h"
++#include "sgxfeaturedefs.h"
++
++#if defined(SGX520)
++#include "sgx520defs.h"
++#else
++#if defined(SGX530)
++#include "sgx530defs.h"
++#else
++#if defined(SGX535)
++#include "sgx535defs.h"
++#else
++#if defined(SGX535_V1_1)
++#include "sgx535defs.h"
++#else
++#if defined(SGX540)
++#include "sgx540defs.h"
++#else
++#if defined(SGX541)
++#include "sgx541defs.h"
++#else
++#if defined(SGX543)
++#include "sgx543defs.h"
++#else
++#if defined(SGX545)
++#include "sgx545defs.h"
++#else
++#if defined(SGX531)
++#include "sgx531defs.h"
++#endif
++#endif
++#endif
++#endif
++#endif
++#endif
++#endif
++#endif
++#endif
++
++#if defined(SGX_FEATURE_MP)
++#if defined(SGX541)
++#if SGX_CORE_REV == 100
++#include "sgx541_100mpdefs.h"
++#else
++#include "sgx541mpdefs.h"
++#endif
++#else
++#include "sgxmpdefs.h"
++#endif
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/hwdefs/sgxerrata.h
+@@ -0,0 +1,309 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _SGXERRATA_KM_H_
++#define _SGXERRATA_KM_H_
++
++
++#if defined(SGX520) && !defined(SGX_CORE_DEFINED)
++
++ #define SGX_CORE_REV_HEAD 0
++ #if defined(USE_SGX_CORE_REV_HEAD)
++
++ #define SGX_CORE_REV SGX_CORE_REV_HEAD
++ #endif
++
++ #if SGX_CORE_REV == 100
++ #else
++ #if SGX_CORE_REV == SGX_CORE_REV_HEAD
++
++ #else
++ #error "sgxerrata.h: SGX520 Core Revision unspecified"
++ #endif
++ #endif
++
++ #define SGX_CORE_DEFINED
++#endif
++
++#if defined(SGX530) && !defined(SGX_CORE_DEFINED)
++
++ #define SGX_CORE_REV_HEAD 0
++ #if defined(USE_SGX_CORE_REV_HEAD)
++
++ #define SGX_CORE_REV SGX_CORE_REV_HEAD
++ #endif
++
++ #if SGX_CORE_REV == 103
++ #define FIX_HW_BRN_22934
++ #else
++ #if SGX_CORE_REV == 110
++ #define FIX_HW_BRN_22934
++ #else
++ #if SGX_CORE_REV == 111
++ #define FIX_HW_BRN_22934
++ #else
++ #if SGX_CORE_REV == 120
++ #define FIX_HW_BRN_22934
++ #else
++ #if SGX_CORE_REV == 121
++ #define FIX_HW_BRN_22934
++ #else
++ #if SGX_CORE_REV == 125
++ #define FIX_HW_BRN_22934
++ #else
++ #if SGX_CORE_REV == SGX_CORE_REV_HEAD
++
++ #else
++ #error "sgxerrata.h: SGX530 Core Revision unspecified"
++ #endif
++ #endif
++ #endif
++ #endif
++ #endif
++#endif
++ #endif
++
++ #define SGX_CORE_DEFINED
++#endif
++
++#if defined(SGX531) && !defined(SGX_CORE_DEFINED)
++
++ #define SGX_CORE_REV_HEAD 0
++ #if defined(USE_SGX_CORE_REV_HEAD)
++
++ #define SGX_CORE_REV SGX_CORE_REV_HEAD
++ #endif
++
++ #if SGX_CORE_REV == 101
++ #define FIX_HW_BRN_26620
++ #define FIX_HW_BRN_28011
++ #else
++ #if SGX_CORE_REV == SGX_CORE_REV_HEAD
++
++ #else
++ #error "sgxerrata.h: SGX531 Core Revision unspecified"
++ #endif
++ #endif
++
++ #define SGX_CORE_DEFINED
++#endif
++
++#if (defined(SGX535) || defined(SGX535_V1_1)) && !defined(SGX_CORE_DEFINED)
++
++ #define SGX_CORE_REV_HEAD 0
++ #if defined(USE_SGX_CORE_REV_HEAD)
++
++ #define SGX_CORE_REV SGX_CORE_REV_HEAD
++ #endif
++
++ #if SGX_CORE_REV == 111
++ #define FIX_HW_BRN_23281
++ #define FIX_HW_BRN_23410
++ #define FIX_HW_BRN_22693
++ #define FIX_HW_BRN_22934
++ #define FIX_HW_BRN_22997
++ #define FIX_HW_BRN_23030
++ #else
++ #if SGX_CORE_REV == 1111
++ #define FIX_HW_BRN_23281
++ #define FIX_HW_BRN_23410
++ #define FIX_HW_BRN_22693
++ #define FIX_HW_BRN_22934
++ #define FIX_HW_BRN_22997
++ #define FIX_HW_BRN_23030
++ #else
++ #if SGX_CORE_REV == 112
++ #define FIX_HW_BRN_23281
++ #define FIX_HW_BRN_23410
++ #define FIX_HW_BRN_22693
++ #define FIX_HW_BRN_22934
++ #define FIX_HW_BRN_22997
++ #define FIX_HW_BRN_23030
++ #else
++ #if SGX_CORE_REV == 113
++ #define FIX_HW_BRN_22934
++ #define FIX_HW_BRN_23281
++ #define FIX_HW_BRN_23944
++ #define FIX_HW_BRN_23410
++ #else
++ #if SGX_CORE_REV == 121
++ #define FIX_HW_BRN_22934
++ #define FIX_HW_BRN_23944
++ #define FIX_HW_BRN_23410
++ #else
++ #if SGX_CORE_REV == 126
++ #define FIX_HW_BRN_22934
++ #else
++ #if SGX_CORE_REV == SGX_CORE_REV_HEAD
++
++ #else
++ #error "sgxerrata.h: SGX535 Core Revision unspecified"
++
++ #endif
++ #endif
++ #endif
++ #endif
++ #endif
++ #endif
++ #endif
++
++ #define SGX_CORE_DEFINED
++#endif
++
++#if defined(SGX540) && !defined(SGX_CORE_DEFINED)
++
++ #define SGX_CORE_REV_HEAD 0
++ #if defined(USE_SGX_CORE_REV_HEAD)
++
++ #define SGX_CORE_REV SGX_CORE_REV_HEAD
++ #endif
++
++ #if SGX_CORE_REV == 101
++ #define FIX_HW_BRN_25499
++ #define FIX_HW_BRN_25503
++ #define FIX_HW_BRN_26620
++ #define FIX_HW_BRN_28011
++ #else
++ #if SGX_CORE_REV == 110
++ #define FIX_HW_BRN_25503
++ #define FIX_HW_BRN_26620
++ #define FIX_HW_BRN_28011
++ #else
++ #if SGX_CORE_REV == 120
++ #define FIX_HW_BRN_26620
++ #define FIX_HW_BRN_28011
++ #else
++ #if SGX_CORE_REV == 121
++ #define FIX_HW_BRN_28011
++ #else
++ #if SGX_CORE_REV == SGX_CORE_REV_HEAD
++
++ #else
++ #error "sgxerrata.h: SGX540 Core Revision unspecified"
++ #endif
++ #endif
++ #endif
++ #endif
++ #endif
++
++ #define SGX_CORE_DEFINED
++#endif
++
++#if defined(SGX541) && !defined(SGX_CORE_DEFINED)
++ #if defined(SGX_FEATURE_MP)
++
++ #define SGX_CORE_REV_HEAD 0
++ #if defined(USE_SGX_CORE_REV_HEAD)
++
++ #define SGX_CORE_REV SGX_CORE_REV_HEAD
++ #endif
++
++ #if SGX_CORE_REV == 100
++ #define FIX_HW_BRN_27270
++ #define FIX_HW_BRN_28011
++ #define FIX_HW_BRN_27510
++
++ #else
++ #if SGX_CORE_REV == 101
++
++ #else
++ #if SGX_CORE_REV == SGX_CORE_REV_HEAD
++
++ #else
++ #error "sgxerrata.h: SGX541 Core Revision unspecified"
++ #endif
++ #endif
++ #endif
++
++ #define SGX_CORE_DEFINED
++ #else
++ #error "sgxerrata.h: SGX541 only supports MP configs (SGX_FEATURE_MP)"
++ #endif
++#endif
++
++#if defined(SGX543) && !defined(SGX_CORE_DEFINED)
++ #if defined(SGX_FEATURE_MP)
++
++ #define SGX_CORE_REV_HEAD 0
++ #if defined(USE_SGX_CORE_REV_HEAD)
++
++ #define SGX_CORE_REV SGX_CORE_REV_HEAD
++ #endif
++
++ #if SGX_CORE_REV == 100
++
++ #else
++ #if SGX_CORE_REV == SGX_CORE_REV_HEAD
++
++ #else
++ #error "sgxerrata.h: SGX543 Core Revision unspecified"
++ #endif
++ #endif
++
++ #define SGX_CORE_DEFINED
++ #else
++ #error "sgxerrata.h: SGX543 only supports MP configs (SGX_FEATURE_MP)"
++ #endif
++#endif
++
++#if defined(SGX545) && !defined(SGX_CORE_DEFINED)
++
++ #define SGX_CORE_REV_HEAD 0
++ #if defined(USE_SGX_CORE_REV_HEAD)
++
++ #define SGX_CORE_REV SGX_CORE_REV_HEAD
++ #endif
++
++ #if SGX_CORE_REV == 100
++ #define FIX_HW_BRN_26620
++ #define FIX_HW_BRN_27266
++ #define FIX_HW_BRN_27456
++ #else
++ #if SGX_CORE_REV == 109
++
++ #else
++ #if SGX_CORE_REV == SGX_CORE_REV_HEAD
++
++ #else
++ #error "sgxerrata.h: SGX545 Core Revision unspecified"
++ #endif
++ #endif
++ #endif
++
++ #define SGX_CORE_DEFINED
++#endif
++
++#if !defined(SGX_CORE_DEFINED)
++#if defined (__GNUC__)
++ #warning "sgxerrata.h: SGX Core Version unspecified"
++#else
++ #pragma message("sgxerrata.h: SGX Core Version unspecified")
++#endif
++#endif
++
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/hwdefs/sgxfeaturedefs.h
+@@ -0,0 +1,163 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if defined(SGX520)
++ #define SGX_CORE_FRIENDLY_NAME "SGX520"
++ #define SGX_CORE_ID SGX_CORE_ID_520
++ #define SGX_FEATURE_ADDRESS_SPACE_SIZE (28)
++ #define SGX_FEATURE_AUTOCLOCKGATING
++#else
++#if defined(SGX530)
++ #define SGX_CORE_FRIENDLY_NAME "SGX530"
++ #define SGX_CORE_ID SGX_CORE_ID_530
++ #define SGX_FEATURE_ADDRESS_SPACE_SIZE (28)
++ #define SGX_FEATURE_AUTOCLOCKGATING
++#else
++#if defined(SGX535)
++ #define SGX_CORE_FRIENDLY_NAME "SGX535"
++ #define SGX_CORE_ID SGX_CORE_ID_535
++ #define SGX_FEATURE_ADDRESS_SPACE_SIZE (32)
++ #define SGX_FEATURE_MULTIPLE_MEM_CONTEXTS
++ #define SGX_FEATURE_BIF_NUM_DIRLISTS (16)
++ #define SGX_FEATURE_2D_HARDWARE
++ #define SGX_FEATURE_AUTOCLOCKGATING
++ #define SUPPORT_SGX_GENERAL_MAPPING_HEAP
++#else
++#if defined(SGX540)
++ #define SGX_CORE_FRIENDLY_NAME "SGX540"
++ #define SGX_CORE_ID SGX_CORE_ID_540
++ #define SGX_FEATURE_ADDRESS_SPACE_SIZE (28)
++ #define SGX_FEATURE_AUTOCLOCKGATING
++ #define SGX_FEATURE_MULTI_EVENT_KICK
++#else
++#if defined(SGX541)
++ #define SGX_CORE_FRIENDLY_NAME "SGX541"
++ #define SGX_CORE_ID SGX_CORE_ID_541
++ #define SGX_FEATURE_ADDRESS_SPACE_SIZE (32)
++ #define SGX_FEATURE_MULTIPLE_MEM_CONTEXTS
++ #define SGX_FEATURE_BIF_NUM_DIRLISTS (8)
++ #define SGX_FEATURE_AUTOCLOCKGATING
++ #define SGX_FEATURE_SPM_MODE_0
++ #define SGX_FEATURE_MULTI_EVENT_KICK
++#else
++#if defined(SGX543)
++ #define SGX_CORE_FRIENDLY_NAME "SGX543"
++ #define SGX_CORE_ID SGX_CORE_ID_543
++ #define SGX_FEATURE_USE_NO_INSTRUCTION_PAIRING
++ #define SGX_FEATURE_USE_UNLIMITED_PHASES
++ #define SGX_FEATURE_ADDRESS_SPACE_SIZE (32)
++ #define SGX_FEATURE_MULTIPLE_MEM_CONTEXTS
++ #define SGX_FEATURE_BIF_NUM_DIRLISTS (8)
++ #define SGX_FEATURE_AUTOCLOCKGATING
++ #define SGX_FEATURE_MONOLITHIC_UKERNEL
++ #define SGX_FEATURE_MULTI_EVENT_KICK
++ #define SGX_FEATURE_DATA_BREAKPOINTS
++#else
++#if defined(SGX531)
++ #define SGX_CORE_FRIENDLY_NAME "SGX531"
++ #define SGX_CORE_ID SGX_CORE_ID_531
++ #define SGX_FEATURE_ADDRESS_SPACE_SIZE (28)
++ #define SGX_FEATURE_AUTOCLOCKGATING
++ #define SGX_FEATURE_MULTI_EVENT_KICK
++#else
++#if defined(SGX545)
++ #define SGX_CORE_FRIENDLY_NAME "SGX545"
++ #define SGX_CORE_ID SGX_CORE_ID_545
++ #define SGX_FEATURE_ADDRESS_SPACE_SIZE (32)
++ #define SGX_FEATURE_AUTOCLOCKGATING
++ #define SGX_FEATURE_USE_NO_INSTRUCTION_PAIRING
++ #define SGX_FEATURE_USE_UNLIMITED_PHASES
++ #define SGX_FEATURE_DXT_TEXTURES
++ #define SGX_FEATURE_VOLUME_TEXTURES
++ #define SGX_FEATURE_HOST_ALLOC_FROM_DPM
++ #define SGX_FEATURE_MULTIPLE_MEM_CONTEXTS
++ #define SGX_FEATURE_BIF_NUM_DIRLISTS (16)
++ #define SGX_FEATURE_NUM_USE_PIPES (4)
++ #define SGX_FEATURE_TEXTURESTRIDE_EXTENSION
++ #define SGX_FEATURE_PDS_DATA_INTERLEAVE_2DWORDS
++ #define SGX_FEATURE_MONOLITHIC_UKERNEL
++ #define SGX_FEATURE_ZLS_EXTERNALZ
++ #define SGX_FEATURE_VDM_CONTEXT_SWITCH_REV_2
++ #define SGX_FEATURE_ISP_CONTEXT_SWITCH_REV_2
++ #define SGX_FEATURE_NUM_PDS_PIPES (2)
++ #define SGX_FEATURE_NATIVE_BACKWARD_BLIT
++ #define SGX_FEATURE_MAX_TA_RENDER_TARGETS (512)
++ #define SGX_FEATURE_SPM_MODE_0
++ #define SGX_FEATURE_SECONDARY_REQUIRES_USE_KICK
++ #define SGX_FEATURE_DCU
++
++
++ #define SGX_FEATURE_BIF_WIDE_TILING_AND_4K_ADDRESS
++ #define SGX_FEATURE_MULTI_EVENT_KICK
++#endif
++#endif
++#endif
++#endif
++#endif
++#endif
++#endif
++#endif
++
++#if defined(FIX_HW_BRN_22693)
++#undef SGX_FEATURE_AUTOCLOCKGATING
++#endif
++
++#if defined(FIX_HW_BRN_27266)
++#undef SGX_FEATURE_36BIT_MMU
++#endif
++
++#if defined(FIX_HW_BRN_27456)
++#undef SGX_FEATURE_BIF_WIDE_TILING_AND_4K_ADDRESS
++#endif
++
++#if defined(FIX_HW_BRN_22934) \
++ || defined(FIX_HW_BRN_25499)
++#undef SGX_FEATURE_MULTI_EVENT_KICK
++#endif
++
++#if defined(SGX_FEATURE_SYSTEM_CACHE)
++ #if defined(SGX_FEATURE_36BIT_MMU)
++ #error SGX_FEATURE_SYSTEM_CACHE is incompatible with SGX_FEATURE_36BIT_MMU
++ #endif
++ #if defined(FIX_HW_BRN_26620) && !defined(SGX_FEATURE_MULTI_EVENT_KICK)
++ #define SGX_BYPASS_SYSTEM_CACHE
++ #endif
++#endif
++
++#if defined(SGX_FEATURE_MP)
++#if !defined(SGX_FEATURE_MP_CORE_COUNT)
++#error SGX_FEATURE_MP_CORE_COUNT must be defined when SGX_FEATURE_MP is defined
++#endif
++#else
++#define SGX_FEATURE_MP_CORE_COUNT (1)
++#endif
++
++#if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && !defined(SUPPORT_SGX_PRIORITY_SCHEDULING)
++#define SUPPORT_SGX_PRIORITY_SCHEDULING
++#endif
++
++#include "img_types.h"
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/hwdefs/sgxmmu.h
+@@ -0,0 +1,79 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__SGXMMU_KM_H__)
++#define __SGXMMU_KM_H__
++
++#define SGX_MMU_PAGE_SHIFT (12)
++#define SGX_MMU_PAGE_SIZE (1UL<<SGX_MMU_PAGE_SHIFT)
++#define SGX_MMU_PAGE_MASK (SGX_MMU_PAGE_SIZE - 1UL)
++
++#define SGX_MMU_PD_SHIFT (10)
++#define SGX_MMU_PD_SIZE (1UL<<SGX_MMU_PD_SHIFT)
++#define SGX_MMU_PD_MASK (0xFFC00000UL)
++
++#if defined(SGX_FEATURE_36BIT_MMU)
++ #define SGX_MMU_PDE_ADDR_MASK (0xFFFFFF00UL)
++ #define SGX_MMU_PDE_ADDR_ALIGNSHIFT (4)
++#else
++ #define SGX_MMU_PDE_ADDR_MASK (0xFFFFF000UL)
++ #define SGX_MMU_PDE_ADDR_ALIGNSHIFT (0)
++#endif
++#define SGX_MMU_PDE_VALID (0x00000001UL)
++#define SGX_MMU_PDE_PAGE_SIZE_4K (0x00000000UL)
++#if defined(SGX_FEATURE_VARIABLE_MMU_PAGE_SIZE)
++ #define SGX_MMU_PDE_PAGE_SIZE_16K (0x00000002UL)
++ #define SGX_MMU_PDE_PAGE_SIZE_64K (0x00000004UL)
++ #define SGX_MMU_PDE_PAGE_SIZE_256K (0x00000006UL)
++ #define SGX_MMU_PDE_PAGE_SIZE_1M (0x00000008UL)
++ #define SGX_MMU_PDE_PAGE_SIZE_4M (0x0000000AUL)
++ #define SGX_MMU_PDE_PAGE_SIZE_MASK (0x0000000EUL)
++#else
++ #define SGX_MMU_PDE_WRITEONLY (0x00000002UL)
++ #define SGX_MMU_PDE_READONLY (0x00000004UL)
++ #define SGX_MMU_PDE_CACHECONSISTENT (0x00000008UL)
++ #define SGX_MMU_PDE_EDMPROTECT (0x00000010UL)
++#endif
++
++#define SGX_MMU_PT_SHIFT (10)
++#define SGX_MMU_PT_SIZE (1UL<<SGX_MMU_PT_SHIFT)
++#define SGX_MMU_PT_MASK (0x003FF000UL)
++
++#if defined(SGX_FEATURE_36BIT_MMU)
++ #define SGX_MMU_PTE_ADDR_MASK (0xFFFFFF00UL)
++ #define SGX_MMU_PTE_ADDR_ALIGNSHIFT (4)
++#else
++ #define SGX_MMU_PTE_ADDR_MASK (0xFFFFF000UL)
++ #define SGX_MMU_PTE_ADDR_ALIGNSHIFT (0)
++#endif
++#define SGX_MMU_PTE_VALID (0x00000001UL)
++#define SGX_MMU_PTE_WRITEONLY (0x00000002UL)
++#define SGX_MMU_PTE_READONLY (0x00000004UL)
++#define SGX_MMU_PTE_CACHECONSISTENT (0x00000008UL)
++#define SGX_MMU_PTE_EDMPROTECT (0x00000010UL)
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/include/buffer_manager.h
+@@ -0,0 +1,218 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _BUFFER_MANAGER_H_
++#define _BUFFER_MANAGER_H_
++
++#include "img_types.h"
++#include "ra.h"
++#include "perproc.h"
++
++#if defined(__cplusplus)
++extern "C"{
++#endif
++
++typedef struct _BM_HEAP_ BM_HEAP;
++
++struct _BM_MAPPING_
++{
++ enum
++ {
++ hm_wrapped = 1,
++ hm_wrapped_scatter,
++ hm_wrapped_virtaddr,
++ hm_wrapped_scatter_virtaddr,
++ hm_env,
++ hm_contiguous
++ } eCpuMemoryOrigin;
++
++ BM_HEAP *pBMHeap;
++ RA_ARENA *pArena;
++
++ IMG_CPU_VIRTADDR CpuVAddr;
++ IMG_CPU_PHYADDR CpuPAddr;
++ IMG_DEV_VIRTADDR DevVAddr;
++ IMG_SYS_PHYADDR *psSysAddr;
++ IMG_SIZE_T uSize;
++ IMG_HANDLE hOSMemHandle;
++ IMG_UINT32 ui32Flags;
++};
++
++typedef struct _BM_BUF_
++{
++ IMG_CPU_VIRTADDR *CpuVAddr;
++ IMG_VOID *hOSMemHandle;
++ IMG_CPU_PHYADDR CpuPAddr;
++ IMG_DEV_VIRTADDR DevVAddr;
++
++ BM_MAPPING *pMapping;
++ IMG_UINT32 ui32RefCount;
++ IMG_UINT32 ui32ExportCount;
++} BM_BUF;
++
++struct _BM_HEAP_
++{
++ IMG_UINT32 ui32Attribs;
++ BM_CONTEXT *pBMContext;
++ RA_ARENA *pImportArena;
++ RA_ARENA *pLocalDevMemArena;
++ RA_ARENA *pVMArena;
++ DEV_ARENA_DESCRIPTOR sDevArena;
++ MMU_HEAP *pMMUHeap;
++
++ struct _BM_HEAP_ *psNext;
++ struct _BM_HEAP_ **ppsThis;
++};
++
++struct _BM_CONTEXT_
++{
++ MMU_CONTEXT *psMMUContext;
++
++
++ BM_HEAP *psBMHeap;
++
++
++ BM_HEAP *psBMSharedHeap;
++
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++
++
++ HASH_TABLE *pBufferHash;
++
++
++ IMG_HANDLE hResItem;
++
++ IMG_UINT32 ui32RefCount;
++
++
++
++ struct _BM_CONTEXT_ *psNext;
++ struct _BM_CONTEXT_ **ppsThis;
++};
++
++
++
++typedef IMG_VOID *BM_HANDLE;
++
++#define BP_POOL_MASK 0x7
++
++#define BP_CONTIGUOUS (1 << 3)
++#define BP_PARAMBUFFER (1 << 4)
++
++#define BM_MAX_DEVMEM_ARENAS 2
++
++IMG_HANDLE
++BM_CreateContext(PVRSRV_DEVICE_NODE *psDeviceNode,
++ IMG_DEV_PHYADDR *psPDDevPAddr,
++ PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_BOOL *pbCreated);
++
++
++PVRSRV_ERROR
++BM_DestroyContext (IMG_HANDLE hBMContext,
++ IMG_BOOL *pbCreated);
++
++
++IMG_HANDLE
++BM_CreateHeap (IMG_HANDLE hBMContext,
++ DEVICE_MEMORY_HEAP_INFO *psDevMemHeapInfo);
++
++IMG_VOID
++BM_DestroyHeap (IMG_HANDLE hDevMemHeap);
++
++
++IMG_BOOL
++BM_Reinitialise (PVRSRV_DEVICE_NODE *psDeviceNode);
++
++IMG_BOOL
++BM_Alloc (IMG_HANDLE hDevMemHeap,
++ IMG_DEV_VIRTADDR *psDevVAddr,
++ IMG_SIZE_T uSize,
++ IMG_UINT32 *pui32Flags,
++ IMG_UINT32 uDevVAddrAlignment,
++ BM_HANDLE *phBuf);
++
++IMG_BOOL
++BM_Wrap ( IMG_HANDLE hDevMemHeap,
++ IMG_SIZE_T ui32Size,
++ IMG_SIZE_T ui32Offset,
++ IMG_BOOL bPhysContig,
++ IMG_SYS_PHYADDR *psSysAddr,
++ IMG_VOID *pvCPUVAddr,
++ IMG_UINT32 *pui32Flags,
++ BM_HANDLE *phBuf);
++
++IMG_VOID
++BM_Free (BM_HANDLE hBuf,
++ IMG_UINT32 ui32Flags);
++
++
++IMG_CPU_VIRTADDR
++BM_HandleToCpuVaddr (BM_HANDLE hBuf);
++
++IMG_DEV_VIRTADDR
++BM_HandleToDevVaddr (BM_HANDLE hBuf);
++
++IMG_SYS_PHYADDR
++BM_HandleToSysPaddr (BM_HANDLE hBuf);
++
++IMG_HANDLE
++BM_HandleToOSMemHandle (BM_HANDLE hBuf);
++
++IMG_BOOL
++BM_ContiguousStatistics (IMG_UINT32 uFlags,
++ IMG_UINT32 *pTotalBytes,
++ IMG_UINT32 *pAvailableBytes);
++
++
++IMG_VOID BM_GetPhysPageAddr(PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++ IMG_DEV_VIRTADDR sDevVPageAddr,
++ IMG_DEV_PHYADDR *psDevPAddr);
++
++PVRSRV_ERROR BM_GetHeapInfo(IMG_HANDLE hDevMemHeap,
++ PVRSRV_HEAP_INFO *psHeapInfo);
++
++MMU_CONTEXT* BM_GetMMUContext(IMG_HANDLE hDevMemHeap);
++
++MMU_CONTEXT* BM_GetMMUContextFromMemContext(IMG_HANDLE hDevMemContext);
++
++IMG_HANDLE BM_GetMMUHeap(IMG_HANDLE hDevMemHeap);
++
++PVRSRV_DEVICE_NODE* BM_GetDeviceNode(IMG_HANDLE hDevMemContext);
++
++
++IMG_HANDLE BM_GetMappingHandle(PVRSRV_KERNEL_MEM_INFO *psMemInfo);
++
++IMG_VOID BM_Export(BM_HANDLE hBuf);
++
++IMG_VOID BM_FreeExport(BM_HANDLE hBuf, IMG_UINT32 ui32Flags);
++
++#if defined(__cplusplus)
++}
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/include/device.h
+@@ -0,0 +1,278 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __DEVICE_H__
++#define __DEVICE_H__
++
++#if defined(__cplusplus)
++extern "C" {
++#endif
++
++#include "ra.h"
++#include "resman.h"
++
++typedef struct _BM_CONTEXT_ BM_CONTEXT;
++
++typedef struct _MMU_HEAP_ MMU_HEAP;
++typedef struct _MMU_CONTEXT_ MMU_CONTEXT;
++
++#define PVRSRV_BACKINGSTORE_SYSMEM_CONTIG (1<<(PVRSRV_MEM_BACKINGSTORE_FIELD_SHIFT+0))
++#define PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG (1<<(PVRSRV_MEM_BACKINGSTORE_FIELD_SHIFT+1))
++#define PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG (1<<(PVRSRV_MEM_BACKINGSTORE_FIELD_SHIFT+2))
++#define PVRSRV_BACKINGSTORE_LOCALMEM_NONCONTIG (1<<(PVRSRV_MEM_BACKINGSTORE_FIELD_SHIFT+3))
++
++typedef IMG_UINT32 DEVICE_MEMORY_HEAP_TYPE;
++#define DEVICE_MEMORY_HEAP_PERCONTEXT 0
++#define DEVICE_MEMORY_HEAP_KERNEL 1
++#define DEVICE_MEMORY_HEAP_SHARED 2
++#define DEVICE_MEMORY_HEAP_SHARED_EXPORTED 3
++
++#define PVRSRV_DEVICE_NODE_FLAGS_PORT80DISPLAY 1
++#define PVRSRV_DEVICE_NODE_FLAGS_MMU_OPT_INV 2
++
++typedef struct _DEVICE_MEMORY_HEAP_INFO_
++{
++
++ IMG_UINT32 ui32HeapID;
++
++
++ IMG_CHAR *pszName;
++
++
++ IMG_CHAR *pszBSName;
++
++
++ IMG_DEV_VIRTADDR sDevVAddrBase;
++
++
++ IMG_UINT32 ui32HeapSize;
++
++
++ IMG_UINT32 ui32Attribs;
++
++
++ DEVICE_MEMORY_HEAP_TYPE DevMemHeapType;
++
++
++ IMG_HANDLE hDevMemHeap;
++
++
++ RA_ARENA *psLocalDevMemArena;
++
++
++ IMG_UINT32 ui32DataPageSize;
++
++} DEVICE_MEMORY_HEAP_INFO;
++
++typedef struct _DEVICE_MEMORY_INFO_
++{
++
++ IMG_UINT32 ui32AddressSpaceSizeLog2;
++
++
++
++
++ IMG_UINT32 ui32Flags;
++
++
++ IMG_UINT32 ui32HeapCount;
++
++
++ IMG_UINT32 ui32SyncHeapID;
++
++
++ IMG_UINT32 ui32MappingHeapID;
++
++
++ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++
++
++ BM_CONTEXT *pBMKernelContext;
++
++
++ BM_CONTEXT *pBMContext;
++
++} DEVICE_MEMORY_INFO;
++
++
++typedef struct DEV_ARENA_DESCRIPTOR_TAG
++{
++ IMG_UINT32 ui32HeapID;
++
++ IMG_CHAR *pszName;
++
++ IMG_DEV_VIRTADDR BaseDevVAddr;
++
++ IMG_UINT32 ui32Size;
++
++ DEVICE_MEMORY_HEAP_TYPE DevMemHeapType;
++
++
++ IMG_UINT32 ui32DataPageSize;
++
++ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeapInfo;
++
++} DEV_ARENA_DESCRIPTOR;
++
++typedef struct _SYS_DATA_TAG_ *PSYS_DATA;
++
++typedef struct _PVRSRV_DEVICE_NODE_
++{
++ PVRSRV_DEVICE_IDENTIFIER sDevId;
++ IMG_UINT32 ui32RefCount;
++
++
++
++
++ PVRSRV_ERROR (*pfnInitDevice) (IMG_VOID*);
++
++ PVRSRV_ERROR (*pfnDeInitDevice) (IMG_VOID*);
++
++
++ PVRSRV_ERROR (*pfnInitDeviceCompatCheck) (struct _PVRSRV_DEVICE_NODE_*);
++
++
++ PVRSRV_ERROR (*pfnMMUInitialise)(struct _PVRSRV_DEVICE_NODE_*, MMU_CONTEXT**, IMG_DEV_PHYADDR*);
++ IMG_VOID (*pfnMMUFinalise)(MMU_CONTEXT*);
++ IMG_VOID (*pfnMMUInsertHeap)(MMU_CONTEXT*, MMU_HEAP*);
++ MMU_HEAP* (*pfnMMUCreate)(MMU_CONTEXT*,DEV_ARENA_DESCRIPTOR*,RA_ARENA**);
++ IMG_VOID (*pfnMMUDelete)(MMU_HEAP*);
++ IMG_BOOL (*pfnMMUAlloc)(MMU_HEAP*pMMU,
++ IMG_SIZE_T uSize,
++ IMG_SIZE_T *pActualSize,
++ IMG_UINT32 uFlags,
++ IMG_UINT32 uDevVAddrAlignment,
++ IMG_DEV_VIRTADDR *pDevVAddr);
++ IMG_VOID (*pfnMMUFree)(MMU_HEAP*,IMG_DEV_VIRTADDR,IMG_UINT32);
++ IMG_VOID (*pfnMMUEnable)(MMU_HEAP*);
++ IMG_VOID (*pfnMMUDisable)(MMU_HEAP*);
++ IMG_VOID (*pfnMMUMapPages)(MMU_HEAP *pMMU,
++ IMG_DEV_VIRTADDR devVAddr,
++ IMG_SYS_PHYADDR SysPAddr,
++ IMG_SIZE_T uSize,
++ IMG_UINT32 ui32MemFlags,
++ IMG_HANDLE hUniqueTag);
++ IMG_VOID (*pfnMMUMapShadow)(MMU_HEAP *pMMU,
++ IMG_DEV_VIRTADDR MapBaseDevVAddr,
++ IMG_SIZE_T uSize,
++ IMG_CPU_VIRTADDR CpuVAddr,
++ IMG_HANDLE hOSMemHandle,
++ IMG_DEV_VIRTADDR *pDevVAddr,
++ IMG_UINT32 ui32MemFlags,
++ IMG_HANDLE hUniqueTag);
++ IMG_VOID (*pfnMMUUnmapPages)(MMU_HEAP *pMMU,
++ IMG_DEV_VIRTADDR dev_vaddr,
++ IMG_UINT32 ui32PageCount,
++ IMG_HANDLE hUniqueTag);
++
++ IMG_VOID (*pfnMMUMapScatter)(MMU_HEAP *pMMU,
++ IMG_DEV_VIRTADDR DevVAddr,
++ IMG_SYS_PHYADDR *psSysAddr,
++ IMG_SIZE_T uSize,
++ IMG_UINT32 ui32MemFlags,
++ IMG_HANDLE hUniqueTag);
++
++ IMG_DEV_PHYADDR (*pfnMMUGetPhysPageAddr)(MMU_HEAP *pMMUHeap, IMG_DEV_VIRTADDR sDevVPageAddr);
++ IMG_DEV_PHYADDR (*pfnMMUGetPDDevPAddr)(MMU_CONTEXT *pMMUContext);
++
++
++ IMG_BOOL (*pfnDeviceISR)(IMG_VOID*);
++
++ IMG_VOID *pvISRData;
++
++ IMG_UINT32 ui32SOCInterruptBit;
++
++ IMG_VOID (*pfnDeviceMISR)(IMG_VOID*);
++
++
++ IMG_VOID (*pfnDeviceCommandComplete)(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
++
++ IMG_BOOL bReProcessDeviceCommandComplete;
++
++
++ DEVICE_MEMORY_INFO sDevMemoryInfo;
++
++
++ IMG_VOID *pvDevice;
++ IMG_UINT32 ui32pvDeviceSize;
++
++
++ PRESMAN_CONTEXT hResManContext;
++
++
++ PSYS_DATA psSysData;
++
++
++ RA_ARENA *psLocalDevMemArena;
++
++ IMG_UINT32 ui32Flags;
++
++ struct _PVRSRV_DEVICE_NODE_ *psNext;
++ struct _PVRSRV_DEVICE_NODE_ **ppsThis;
++} PVRSRV_DEVICE_NODE;
++
++PVRSRV_ERROR IMG_CALLCONV PVRSRVRegisterDevice(PSYS_DATA psSysData,
++ PVRSRV_ERROR (*pfnRegisterDevice)(PVRSRV_DEVICE_NODE*),
++ IMG_UINT32 ui32SOCInterruptBit,
++ IMG_UINT32 *pui32DeviceIndex );
++
++PVRSRV_ERROR IMG_CALLCONV PVRSRVInitialiseDevice(IMG_UINT32 ui32DevIndex);
++PVRSRV_ERROR IMG_CALLCONV PVRSRVFinaliseSystem(IMG_BOOL bInitSuccesful);
++
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode);
++
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDeinitialiseDevice(IMG_UINT32 ui32DevIndex);
++
++#if !defined(USE_CODE)
++
++IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV PollForValueKM(volatile IMG_UINT32* pui32LinMemAddr,
++ IMG_UINT32 ui32Value,
++ IMG_UINT32 ui32Mask,
++ IMG_UINT32 ui32Waitus,
++ IMG_UINT32 ui32Tries);
++
++#endif
++
++
++#if defined (USING_ISR_INTERRUPTS)
++PVRSRV_ERROR IMG_CALLCONV PollForInterruptKM(IMG_UINT32 ui32Value,
++ IMG_UINT32 ui32Mask,
++ IMG_UINT32 ui32Waitus,
++ IMG_UINT32 ui32Tries);
++#endif
++
++PVRSRV_ERROR IMG_CALLCONV PVRSRVInit(PSYS_DATA psSysData);
++IMG_VOID IMG_CALLCONV PVRSRVDeInit(PSYS_DATA psSysData);
++IMG_BOOL IMG_CALLCONV PVRSRVDeviceLISR(PVRSRV_DEVICE_NODE *psDeviceNode);
++IMG_BOOL IMG_CALLCONV PVRSRVSystemLISR(IMG_VOID *pvSysData);
++IMG_VOID IMG_CALLCONV PVRSRVMISR(IMG_VOID *pvSysData);
++
++#if defined(__cplusplus)
++}
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/include/handle.h
+@@ -0,0 +1,382 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __HANDLE_H__
++#define __HANDLE_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#include "img_types.h"
++#include "hash.h"
++#include "resman.h"
++
++typedef enum
++{
++ PVRSRV_HANDLE_TYPE_NONE = 0,
++ PVRSRV_HANDLE_TYPE_PERPROC_DATA,
++ PVRSRV_HANDLE_TYPE_DEV_NODE,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP,
++ PVRSRV_HANDLE_TYPE_MEM_INFO,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO,
++ PVRSRV_HANDLE_TYPE_DISP_INFO,
++ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN,
++ PVRSRV_HANDLE_TYPE_BUF_INFO,
++ PVRSRV_HANDLE_TYPE_DISP_BUFFER,
++ PVRSRV_HANDLE_TYPE_BUF_BUFFER,
++ PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT,
++ PVRSRV_HANDLE_TYPE_SGX_HW_TRANSFER_CONTEXT,
++ PVRSRV_HANDLE_TYPE_SGX_HW_2D_CONTEXT,
++ PVRSRV_HANDLE_TYPE_SHARED_PB_DESC,
++ PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
++ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO,
++ PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT,
++ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT,
++ PVRSRV_HANDLE_TYPE_MMAP_INFO,
++ PVRSRV_HANDLE_TYPE_SOC_TIMER
++} PVRSRV_HANDLE_TYPE;
++
++typedef enum
++{
++
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE = 0,
++
++ PVRSRV_HANDLE_ALLOC_FLAG_SHARED = 0x01,
++
++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI = 0x02,
++
++ PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE = 0x04
++} PVRSRV_HANDLE_ALLOC_FLAG;
++
++struct _PVRSRV_HANDLE_BASE_;
++typedef struct _PVRSRV_HANDLE_BASE_ PVRSRV_HANDLE_BASE;
++
++#ifdef PVR_SECURE_HANDLES
++extern PVRSRV_HANDLE_BASE *gpsKernelHandleBase;
++
++#define KERNEL_HANDLE_BASE (gpsKernelHandleBase)
++
++PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag);
++
++PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent);
++
++PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType);
++
++PVRSRV_ERROR PVRSRVLookupHandleAnyType(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, PVRSRV_HANDLE_TYPE *peType, IMG_HANDLE hHandle);
++
++PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
++
++PVRSRV_ERROR PVRSRVLookupSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, IMG_HANDLE hAncestor);
++
++PVRSRV_ERROR PVRSRVGetParentHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *phParent, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
++
++PVRSRV_ERROR PVRSRVLookupAndReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
++
++PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
++
++PVRSRV_ERROR PVRSRVNewHandleBatch(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32BatchSize);
++
++PVRSRV_ERROR PVRSRVCommitHandleBatch(PVRSRV_HANDLE_BASE *psBase);
++
++IMG_VOID PVRSRVReleaseHandleBatch(PVRSRV_HANDLE_BASE *psBase);
++
++PVRSRV_ERROR PVRSRVSetMaxHandle(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32MaxHandle);
++
++IMG_UINT32 PVRSRVGetMaxHandle(PVRSRV_HANDLE_BASE *psBase);
++
++PVRSRV_ERROR PVRSRVEnableHandlePurging(PVRSRV_HANDLE_BASE *psBase);
++
++PVRSRV_ERROR PVRSRVPurgeHandles(PVRSRV_HANDLE_BASE *psBase);
++
++PVRSRV_ERROR PVRSRVAllocHandleBase(PVRSRV_HANDLE_BASE **ppsBase);
++
++PVRSRV_ERROR PVRSRVFreeHandleBase(PVRSRV_HANDLE_BASE *psBase);
++
++PVRSRV_ERROR PVRSRVHandleInit(IMG_VOID);
++
++PVRSRV_ERROR PVRSRVHandleDeInit(IMG_VOID);
++
++#else
++
++#define KERNEL_HANDLE_BASE IMG_NULL
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVAllocHandle)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag)
++{
++ PVR_UNREFERENCED_PARAMETER(eType);
++ PVR_UNREFERENCED_PARAMETER(eFlag);
++ PVR_UNREFERENCED_PARAMETER(psBase);
++
++ *phHandle = pvData;
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVAllocSubHandle)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent)
++{
++ PVR_UNREFERENCED_PARAMETER(eType);
++ PVR_UNREFERENCED_PARAMETER(eFlag);
++ PVR_UNREFERENCED_PARAMETER(hParent);
++ PVR_UNREFERENCED_PARAMETER(psBase);
++
++ *phHandle = pvData;
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVFindHandle)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType)
++{
++ PVR_UNREFERENCED_PARAMETER(eType);
++ PVR_UNREFERENCED_PARAMETER(psBase);
++
++ *phHandle = pvData;
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVLookupHandleAnyType)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVLookupHandleAnyType(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, PVRSRV_HANDLE_TYPE *peType, IMG_HANDLE hHandle)
++{
++ PVR_UNREFERENCED_PARAMETER(psBase);
++
++ *peType = PVRSRV_HANDLE_TYPE_NONE;
++
++ *ppvData = hHandle;
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVLookupHandle)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType)
++{
++ PVR_UNREFERENCED_PARAMETER(psBase);
++ PVR_UNREFERENCED_PARAMETER(eType);
++
++ *ppvData = hHandle;
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVLookupSubHandle)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVLookupSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, IMG_HANDLE hAncestor)
++{
++ PVR_UNREFERENCED_PARAMETER(psBase);
++ PVR_UNREFERENCED_PARAMETER(eType);
++ PVR_UNREFERENCED_PARAMETER(hAncestor);
++
++ *ppvData = hHandle;
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVGetParentHandle)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVGetParentHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *phParent, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType)
++{
++ PVR_UNREFERENCED_PARAMETER(psBase);
++ PVR_UNREFERENCED_PARAMETER(eType);
++ PVR_UNREFERENCED_PARAMETER(hHandle);
++
++ *phParent = IMG_NULL;
++
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVLookupAndReleaseHandle)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVLookupAndReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType)
++{
++ PVR_UNREFERENCED_PARAMETER(eType);
++ PVR_UNREFERENCED_PARAMETER(psBase);
++
++ *ppvData = hHandle;
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVReleaseHandle)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType)
++{
++ PVR_UNREFERENCED_PARAMETER(hHandle);
++ PVR_UNREFERENCED_PARAMETER(eType);
++ PVR_UNREFERENCED_PARAMETER(psBase);
++
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVNewHandleBatch)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVNewHandleBatch(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32BatchSize)
++{
++ PVR_UNREFERENCED_PARAMETER(psBase);
++ PVR_UNREFERENCED_PARAMETER(ui32BatchSize);
++
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVCommitHandleBatch)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVCommitHandleBatch(PVRSRV_HANDLE_BASE *psBase)
++{
++ PVR_UNREFERENCED_PARAMETER(psBase);
++
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVReleaseHandleBatch)
++#endif
++static INLINE
++IMG_VOID PVRSRVReleaseHandleBatch(PVRSRV_HANDLE_BASE *psBase)
++{
++ PVR_UNREFERENCED_PARAMETER(psBase);
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVSetMaxHandle)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVSetMaxHandle(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32MaxHandle)
++{
++ PVR_UNREFERENCED_PARAMETER(psBase);
++ PVR_UNREFERENCED_PARAMETER(ui32MaxHandle);
++
++ return PVRSRV_ERROR_NOT_SUPPORTED;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVGetMaxHandle)
++#endif
++static INLINE
++IMG_UINT32 PVRSRVGetMaxHandle(PVRSRV_HANDLE_BASE *psBase)
++{
++ PVR_UNREFERENCED_PARAMETER(psBase);
++
++ return 0;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVEnableHandlePurging)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVEnableHandlePurging(PVRSRV_HANDLE_BASE *psBase)
++{
++ PVR_UNREFERENCED_PARAMETER(psBase);
++
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVPurgeHandles)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVPurgeHandles(PVRSRV_HANDLE_BASE *psBase)
++{
++ PVR_UNREFERENCED_PARAMETER(psBase);
++
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVAllocHandleBase)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVAllocHandleBase(PVRSRV_HANDLE_BASE **ppsBase)
++{
++ *ppsBase = IMG_NULL;
++
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVFreeHandleBase)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVFreeHandleBase(PVRSRV_HANDLE_BASE *psBase)
++{
++ PVR_UNREFERENCED_PARAMETER(psBase);
++
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVHandleInit)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVHandleInit(IMG_VOID)
++{
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVHandleDeInit)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVHandleDeInit(IMG_VOID)
++{
++ return PVRSRV_OK;
++}
++
++#endif
++
++#define PVRSRVAllocHandleNR(psBase, phHandle, pvData, eType, eFlag) \
++ (IMG_VOID)PVRSRVAllocHandle(psBase, phHandle, pvData, eType, eFlag)
++
++#define PVRSRVAllocSubHandleNR(psBase, phHandle, pvData, eType, eFlag, hParent) \
++ (IMG_VOID)PVRSRVAllocSubHandle(psBase, phHandle, pvData, eType, eFlag, hParent)
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/include/hash.h
+@@ -0,0 +1,73 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _HASH_H_
++#define _HASH_H_
++
++#include "img_types.h"
++#include "osfunc.h"
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++typedef IMG_UINT32 HASH_FUNC(IMG_SIZE_T uKeySize, IMG_VOID *pKey, IMG_UINT32 uHashTabLen);
++typedef IMG_BOOL HASH_KEY_COMP(IMG_SIZE_T uKeySize, IMG_VOID *pKey1, IMG_VOID *pKey2);
++
++typedef struct _HASH_TABLE_ HASH_TABLE;
++
++IMG_UINT32 HASH_Func_Default (IMG_SIZE_T uKeySize, IMG_VOID *pKey, IMG_UINT32 uHashTabLen);
++
++IMG_BOOL HASH_Key_Comp_Default (IMG_SIZE_T uKeySize, IMG_VOID *pKey1, IMG_VOID *pKey2);
++
++HASH_TABLE * HASH_Create_Extended (IMG_UINT32 uInitialLen, IMG_SIZE_T uKeySize, HASH_FUNC *pfnHashFunc, HASH_KEY_COMP *pfnKeyComp);
++
++HASH_TABLE * HASH_Create (IMG_UINT32 uInitialLen);
++
++IMG_VOID HASH_Delete (HASH_TABLE *pHash);
++
++IMG_BOOL HASH_Insert_Extended (HASH_TABLE *pHash, IMG_VOID *pKey, IMG_UINTPTR_T v);
++
++IMG_BOOL HASH_Insert (HASH_TABLE *pHash, IMG_UINTPTR_T k, IMG_UINTPTR_T v);
++
++IMG_UINTPTR_T HASH_Remove_Extended(HASH_TABLE *pHash, IMG_VOID *pKey);
++
++IMG_UINTPTR_T HASH_Remove (HASH_TABLE *pHash, IMG_UINTPTR_T k);
++
++IMG_UINTPTR_T HASH_Retrieve_Extended (HASH_TABLE *pHash, IMG_VOID *pKey);
++
++IMG_UINTPTR_T HASH_Retrieve (HASH_TABLE *pHash, IMG_UINTPTR_T k);
++
++#ifdef HASH_TRACE
++IMG_VOID HASH_Dump (HASH_TABLE *pHash);
++#endif
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/include/lists.h
+@@ -0,0 +1,176 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __LISTS_UTILS__
++#define __LISTS_UTILS__
++
++#include <stdarg.h>
++#include "img_types.h"
++
++#define DECLARE_LIST_FOR_EACH(TYPE) \
++IMG_VOID List_##TYPE##_ForEach(TYPE *psHead, IMG_VOID(*pfnCallBack)(TYPE* psNode))
++
++#define IMPLEMENT_LIST_FOR_EACH(TYPE) \
++IMG_VOID List_##TYPE##_ForEach(TYPE *psHead, IMG_VOID(*pfnCallBack)(TYPE* psNode))\
++{\
++ while(psHead)\
++ {\
++ pfnCallBack(psHead);\
++ psHead = psHead->psNext;\
++ }\
++}
++
++
++#define DECLARE_LIST_FOR_EACH_VA(TYPE) \
++IMG_VOID List_##TYPE##_ForEach_va(TYPE *psHead, IMG_VOID(*pfnCallBack)(TYPE* psNode, va_list va), ...)
++
++#define IMPLEMENT_LIST_FOR_EACH_VA(TYPE) \
++IMG_VOID List_##TYPE##_ForEach_va(TYPE *psHead, IMG_VOID(*pfnCallBack)(TYPE* psNode, va_list va), ...) \
++{\
++ va_list ap;\
++ while(psHead)\
++ {\
++ va_start(ap, pfnCallBack);\
++ pfnCallBack(psHead, ap);\
++ psHead = psHead->psNext;\
++ va_end(ap);\
++ }\
++}
++
++
++#define DECLARE_LIST_ANY(TYPE) \
++IMG_VOID* List_##TYPE##_Any(TYPE *psHead, IMG_VOID* (*pfnCallBack)(TYPE* psNode))
++
++#define IMPLEMENT_LIST_ANY(TYPE) \
++IMG_VOID* List_##TYPE##_Any(TYPE *psHead, IMG_VOID* (*pfnCallBack)(TYPE* psNode))\
++{ \
++ IMG_VOID *pResult;\
++ TYPE *psNextNode;\
++ pResult = IMG_NULL;\
++ psNextNode = psHead;\
++ while(psHead && !pResult)\
++ {\
++ psNextNode = psNextNode->psNext;\
++ pResult = pfnCallBack(psHead);\
++ psHead = psNextNode;\
++ }\
++ return pResult;\
++}
++
++
++#define DECLARE_LIST_ANY_VA(TYPE) \
++IMG_VOID* List_##TYPE##_Any_va(TYPE *psHead, IMG_VOID*(*pfnCallBack)(TYPE* psNode, va_list va), ...)
++
++#define IMPLEMENT_LIST_ANY_VA(TYPE) \
++IMG_VOID* List_##TYPE##_Any_va(TYPE *psHead, IMG_VOID*(*pfnCallBack)(TYPE* psNode, va_list va), ...)\
++{\
++ va_list ap;\
++ TYPE *psNextNode;\
++ IMG_VOID* pResult = IMG_NULL;\
++ while(psHead && !pResult)\
++ {\
++ psNextNode = psHead->psNext;\
++ va_start(ap, pfnCallBack);\
++ pResult = pfnCallBack(psHead, ap);\
++ va_end(ap);\
++ psHead = psNextNode;\
++ }\
++ return pResult;\
++}
++
++#define DECLARE_LIST_ANY_2(TYPE, RTYPE, CONTINUE) \
++RTYPE List_##TYPE##_##RTYPE##_Any(TYPE *psHead, RTYPE (*pfnCallBack)(TYPE* psNode))
++
++#define IMPLEMENT_LIST_ANY_2(TYPE, RTYPE, CONTINUE) \
++RTYPE List_##TYPE##_##RTYPE##_Any(TYPE *psHead, RTYPE (*pfnCallBack)(TYPE* psNode))\
++{ \
++ RTYPE result;\
++ TYPE *psNextNode;\
++ result = CONTINUE;\
++ psNextNode = psHead;\
++ while(psHead && result == CONTINUE)\
++ {\
++ psNextNode = psNextNode->psNext;\
++ result = pfnCallBack(psHead);\
++ psHead = psNextNode;\
++ }\
++ return result;\
++}
++
++
++#define DECLARE_LIST_ANY_VA_2(TYPE, RTYPE, CONTINUE) \
++RTYPE List_##TYPE##_##RTYPE##_Any_va(TYPE *psHead, RTYPE(*pfnCallBack)(TYPE* psNode, va_list va), ...)
++
++#define IMPLEMENT_LIST_ANY_VA_2(TYPE, RTYPE, CONTINUE) \
++RTYPE List_##TYPE##_##RTYPE##_Any_va(TYPE *psHead, RTYPE(*pfnCallBack)(TYPE* psNode, va_list va), ...)\
++{\
++ va_list ap;\
++ TYPE *psNextNode;\
++ RTYPE result = CONTINUE;\
++ while(psHead && result == CONTINUE)\
++ {\
++ psNextNode = psHead->psNext;\
++ va_start(ap, pfnCallBack);\
++ result = pfnCallBack(psHead, ap);\
++ va_end(ap);\
++ psHead = psNextNode;\
++ }\
++ return result;\
++}
++
++
++#define DECLARE_LIST_REMOVE(TYPE) \
++IMG_VOID List_##TYPE##_Remove(TYPE *psNode)
++
++#define IMPLEMENT_LIST_REMOVE(TYPE) \
++IMG_VOID List_##TYPE##_Remove(TYPE *psNode)\
++{\
++ (*psNode->ppsThis)=psNode->psNext;\
++ if(psNode->psNext)\
++ {\
++ psNode->psNext->ppsThis = psNode->ppsThis;\
++ }\
++}
++
++#define DECLARE_LIST_INSERT(TYPE) \
++IMG_VOID List_##TYPE##_Insert(TYPE **ppsHead, TYPE *psNewNode)
++
++#define IMPLEMENT_LIST_INSERT(TYPE) \
++IMG_VOID List_##TYPE##_Insert(TYPE **ppsHead, TYPE *psNewNode)\
++{\
++ psNewNode->ppsThis = ppsHead;\
++ psNewNode->psNext = *ppsHead;\
++ *ppsHead = psNewNode;\
++ if(psNewNode->psNext)\
++ {\
++ psNewNode->psNext->ppsThis = &(psNewNode->psNext);\
++ }\
++}
++
++
++#define IS_LAST_ELEMENT(x) ((x)->psNext == IMG_NULL)
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/include/metrics.h
+@@ -0,0 +1,130 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _METRICS_
++#define _METRICS_
++
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++
++#if defined(DEBUG) || defined(TIMING)
++
++
++typedef struct
++{
++ IMG_UINT32 ui32Start;
++ IMG_UINT32 ui32Stop;
++ IMG_UINT32 ui32Total;
++ IMG_UINT32 ui32Count;
++} Temporal_Data;
++
++extern Temporal_Data asTimers[];
++
++extern IMG_UINT32 PVRSRVTimeNow(IMG_VOID);
++extern IMG_VOID PVRSRVSetupMetricTimers(IMG_VOID *pvDevInfo);
++extern IMG_VOID PVRSRVOutputMetricTotals(IMG_VOID);
++
++
++#define PVRSRV_TIMER_DUMMY 0
++
++#define PVRSRV_TIMER_EXAMPLE_1 1
++#define PVRSRV_TIMER_EXAMPLE_2 2
++
++
++#define PVRSRV_NUM_TIMERS (PVRSRV_TIMER_EXAMPLE_2 + 1)
++
++#define PVRSRV_TIME_START(X) { \
++ asTimers[X].ui32Count += 1; \
++ asTimers[X].ui32Count |= 0x80000000L; \
++ asTimers[X].ui32Start = PVRSRVTimeNow(); \
++ asTimers[X].ui32Stop = 0; \
++ }
++
++#define PVRSRV_TIME_SUSPEND(X) { \
++ asTimers[X].ui32Stop += PVRSRVTimeNow() - asTimers[X].ui32Start; \
++ }
++
++#define PVRSRV_TIME_RESUME(X) { \
++ asTimers[X].ui32Start = PVRSRVTimeNow(); \
++ }
++
++#define PVRSRV_TIME_STOP(X) { \
++ asTimers[X].ui32Stop += PVRSRVTimeNow() - asTimers[X].ui32Start; \
++ asTimers[X].ui32Total += asTimers[X].ui32Stop; \
++ asTimers[X].ui32Count &= 0x7FFFFFFFL; \
++ }
++
++#define PVRSRV_TIME_RESET(X) { \
++ asTimers[X].ui32Start = 0; \
++ asTimers[X].ui32Stop = 0; \
++ asTimers[X].ui32Total = 0; \
++ asTimers[X].ui32Count = 0; \
++ }
++
++
++#if defined(__sh__)
++
++#define TST_REG ((volatile IMG_UINT8 *) (psDevInfo->pvSOCRegsBaseKM))
++
++#define TCOR_2 ((volatile IMG_UINT *) (psDevInfo->pvSOCRegsBaseKM+28))
++#define TCNT_2 ((volatile IMG_UINT *) (psDevInfo->pvSOCRegsBaseKM+32))
++#define TCR_2 ((volatile IMG_UINT16 *)(psDevInfo->pvSOCRegsBaseKM+36))
++
++#define TIMER_DIVISOR 4
++
++#endif
++
++
++
++
++
++#else
++
++
++
++#define PVRSRV_TIME_START(X)
++#define PVRSRV_TIME_SUSPEND(X)
++#define PVRSRV_TIME_RESUME(X)
++#define PVRSRV_TIME_STOP(X)
++#define PVRSRV_TIME_RESET(X)
++
++#define PVRSRVSetupMetricTimers(X)
++#define PVRSRVOutputMetricTotals()
++
++
++
++#endif
++
++#if defined(__cplusplus)
++}
++#endif
++
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/include/osfunc.h
+@@ -0,0 +1,487 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifdef DEBUG_RELEASE_BUILD
++#pragma optimize( "", off )
++#define DEBUG 1
++#endif
++
++#ifndef __OSFUNC_H__
++#define __OSFUNC_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#if defined(__linux__) && defined(__KERNEL__)
++#include <linux/hardirq.h>
++#include <linux/string.h>
++#endif
++
++
++
++ #define PVRSRV_PAGEABLE_SELECT PVRSRV_OS_PAGEABLE_HEAP
++
++#define KERNEL_ID 0xffffffffL
++#define POWER_MANAGER_ID 0xfffffffeL
++#define ISR_ID 0xfffffffdL
++#define TIMER_ID 0xfffffffcL
++
++
++#define HOST_PAGESIZE OSGetPageSize
++#define HOST_PAGEMASK (~(HOST_PAGESIZE()-1))
++#define HOST_PAGEALIGN(addr) (((addr)+HOST_PAGESIZE()-1)&HOST_PAGEMASK)
++
++#define PVRSRV_OS_HEAP_MASK 0xf
++#define PVRSRV_OS_PAGEABLE_HEAP 0x1
++#define PVRSRV_OS_NON_PAGEABLE_HEAP 0x2
++
++
++IMG_UINT32 OSClockus(IMG_VOID);
++IMG_SIZE_T OSGetPageSize(IMG_VOID);
++PVRSRV_ERROR OSInstallDeviceLISR(IMG_VOID *pvSysData,
++ IMG_UINT32 ui32Irq,
++ IMG_CHAR *pszISRName,
++ IMG_VOID *pvDeviceNode);
++PVRSRV_ERROR OSUninstallDeviceLISR(IMG_VOID *pvSysData);
++PVRSRV_ERROR OSInstallSystemLISR(IMG_VOID *pvSysData, IMG_UINT32 ui32Irq);
++PVRSRV_ERROR OSUninstallSystemLISR(IMG_VOID *pvSysData);
++PVRSRV_ERROR OSInstallMISR(IMG_VOID *pvSysData);
++PVRSRV_ERROR OSUninstallMISR(IMG_VOID *pvSysData);
++IMG_CPU_PHYADDR OSMapLinToCPUPhys(IMG_VOID* pvLinAddr);
++IMG_VOID OSMemCopy(IMG_VOID *pvDst, IMG_VOID *pvSrc, IMG_SIZE_T ui32Size);
++IMG_VOID *OSMapPhysToLin(IMG_CPU_PHYADDR BasePAddr, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags, IMG_HANDLE *phOSMemHandle);
++IMG_BOOL OSUnMapPhysToLin(IMG_VOID *pvLinAddr, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags, IMG_HANDLE hOSMemHandle);
++
++PVRSRV_ERROR OSReservePhys(IMG_CPU_PHYADDR BasePAddr, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags, IMG_VOID **ppvCpuVAddr, IMG_HANDLE *phOSMemHandle);
++PVRSRV_ERROR OSUnReservePhys(IMG_VOID *pvCpuVAddr, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags, IMG_HANDLE hOSMemHandle);
++
++#if defined(SUPPORT_CPU_CACHED_BUFFERS)
++IMG_VOID OSFlushCPUCacheKM(IMG_VOID);
++IMG_VOID OSFlushCPUCacheRangeKM(IMG_VOID *pvRangeAddrStart,
++ IMG_VOID *pvRangeAddrEnd);
++#endif
++
++#if defined(__linux__)
++PVRSRV_ERROR OSRegisterDiscontigMem(IMG_SYS_PHYADDR *pBasePAddr,
++ IMG_VOID *pvCpuVAddr,
++ IMG_SIZE_T ui32Bytes,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE *phOSMemHandle);
++PVRSRV_ERROR OSUnRegisterDiscontigMem(IMG_VOID *pvCpuVAddr,
++ IMG_SIZE_T ui32Bytes,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE hOSMemHandle);
++#else
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(OSRegisterDiscontigMem)
++#endif
++static INLINE PVRSRV_ERROR OSRegisterDiscontigMem(IMG_SYS_PHYADDR *pBasePAddr,
++ IMG_VOID *pvCpuVAddr,
++ IMG_SIZE_T ui32Bytes,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE *phOSMemHandle)
++{
++ PVR_UNREFERENCED_PARAMETER(pBasePAddr);
++ PVR_UNREFERENCED_PARAMETER(pvCpuVAddr);
++ PVR_UNREFERENCED_PARAMETER(ui32Bytes);
++ PVR_UNREFERENCED_PARAMETER(ui32Flags);
++ PVR_UNREFERENCED_PARAMETER(phOSMemHandle);
++
++ return PVRSRV_ERROR_NOT_SUPPORTED;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(OSUnRegisterDiscontigMem)
++#endif
++static INLINE PVRSRV_ERROR OSUnRegisterDiscontigMem(IMG_VOID *pvCpuVAddr,
++ IMG_SIZE_T ui32Bytes,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE hOSMemHandle)
++{
++ PVR_UNREFERENCED_PARAMETER(pvCpuVAddr);
++ PVR_UNREFERENCED_PARAMETER(ui32Bytes);
++ PVR_UNREFERENCED_PARAMETER(ui32Flags);
++ PVR_UNREFERENCED_PARAMETER(hOSMemHandle);
++
++ return PVRSRV_ERROR_NOT_SUPPORTED;
++}
++#endif
++
++
++#if defined(__linux__)
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(OSReserveDiscontigPhys)
++#endif
++static INLINE PVRSRV_ERROR OSReserveDiscontigPhys(IMG_SYS_PHYADDR *pBasePAddr, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags, IMG_VOID **ppvCpuVAddr, IMG_HANDLE *phOSMemHandle)
++{
++#if defined(__linux__)
++ *ppvCpuVAddr = IMG_NULL;
++ return OSRegisterDiscontigMem(pBasePAddr, *ppvCpuVAddr, ui32Bytes, ui32Flags, phOSMemHandle);
++#else
++ extern IMG_CPU_PHYADDR SysSysPAddrToCpuPAddr(IMG_SYS_PHYADDR SysPAddr);
++
++
++ return OSReservePhys(SysSysPAddrToCpuPAddr(pBasePAddr[0]), ui32Bytes, ui32Flags, ppvCpuVAddr, phOSMemHandle);
++#endif
++}
++
++static INLINE PVRSRV_ERROR OSUnReserveDiscontigPhys(IMG_VOID *pvCpuVAddr, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags, IMG_HANDLE hOSMemHandle)
++{
++#if defined(__linux__)
++ OSUnRegisterDiscontigMem(pvCpuVAddr, ui32Bytes, ui32Flags, hOSMemHandle);
++#endif
++
++ return PVRSRV_OK;
++}
++#else
++
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(OSReserveDiscontigPhys)
++#endif
++static INLINE PVRSRV_ERROR OSReserveDiscontigPhys(IMG_SYS_PHYADDR *pBasePAddr, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags, IMG_VOID **ppvCpuVAddr, IMG_HANDLE *phOSMemHandle)
++{
++ PVR_UNREFERENCED_PARAMETER(pBasePAddr);
++ PVR_UNREFERENCED_PARAMETER(ui32Bytes);
++ PVR_UNREFERENCED_PARAMETER(ui32Flags);
++ PVR_UNREFERENCED_PARAMETER(ppvCpuVAddr);
++ PVR_UNREFERENCED_PARAMETER(phOSMemHandle);
++
++ return PVRSRV_ERROR_NOT_SUPPORTED;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(OSUnReserveDiscontigPhys)
++#endif
++static INLINE PVRSRV_ERROR OSUnReserveDiscontigPhys(IMG_VOID *pvCpuVAddr, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags, IMG_HANDLE hOSMemHandle)
++{
++ PVR_UNREFERENCED_PARAMETER(pvCpuVAddr);
++ PVR_UNREFERENCED_PARAMETER(ui32Bytes);
++ PVR_UNREFERENCED_PARAMETER(ui32Flags);
++ PVR_UNREFERENCED_PARAMETER(hOSMemHandle);
++
++ return PVRSRV_ERROR_NOT_SUPPORTED;
++}
++#endif
++
++PVRSRV_ERROR OSRegisterMem(IMG_CPU_PHYADDR BasePAddr,
++ IMG_VOID *pvCpuVAddr,
++ IMG_SIZE_T ui32Bytes,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE *phOSMemHandle);
++PVRSRV_ERROR OSUnRegisterMem(IMG_VOID *pvCpuVAddr,
++ IMG_SIZE_T ui32Bytes,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE hOSMemHandle);
++
++
++
++#if defined(__linux__)
++PVRSRV_ERROR OSGetSubMemHandle(IMG_HANDLE hOSMemHandle,
++ IMG_UINTPTR_T ui32ByteOffset,
++ IMG_SIZE_T ui32Bytes,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE *phOSMemHandleRet);
++PVRSRV_ERROR OSReleaseSubMemHandle(IMG_HANDLE hOSMemHandle, IMG_UINT32 ui32Flags);
++#else
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(OSGetSubMemHandle)
++#endif
++static INLINE PVRSRV_ERROR OSGetSubMemHandle(IMG_HANDLE hOSMemHandle,
++ IMG_UINTPTR_T ui32ByteOffset,
++ IMG_SIZE_T ui32Bytes,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE *phOSMemHandleRet)
++{
++ PVR_UNREFERENCED_PARAMETER(ui32ByteOffset);
++ PVR_UNREFERENCED_PARAMETER(ui32Bytes);
++ PVR_UNREFERENCED_PARAMETER(ui32Flags);
++
++ *phOSMemHandleRet = hOSMemHandle;
++ return PVRSRV_OK;
++}
++
++static INLINE PVRSRV_ERROR OSReleaseSubMemHandle(IMG_HANDLE hOSMemHandle, IMG_UINT32 ui32Flags)
++{
++ PVR_UNREFERENCED_PARAMETER(hOSMemHandle);
++ PVR_UNREFERENCED_PARAMETER(ui32Flags);
++ return PVRSRV_OK;
++}
++#endif
++
++IMG_UINT32 OSGetCurrentProcessIDKM(IMG_VOID);
++IMG_UINT32 OSGetCurrentThreadID( IMG_VOID );
++IMG_VOID OSMemSet(IMG_VOID *pvDest, IMG_UINT8 ui8Value, IMG_SIZE_T ui32Size);
++
++PVRSRV_ERROR OSAllocPages_Impl(IMG_UINT32 ui32Flags, IMG_SIZE_T ui32Size, IMG_UINT32 ui32PageSize, IMG_PVOID *ppvLinAddr, IMG_HANDLE *phPageAlloc);
++PVRSRV_ERROR OSFreePages(IMG_UINT32 ui32Flags, IMG_SIZE_T ui32Size, IMG_PVOID pvLinAddr, IMG_HANDLE hPageAlloc);
++
++
++#ifdef PVRSRV_LOG_MEMORY_ALLOCS
++ #define OSAllocMem(flags, size, linAddr, blockAlloc, logStr) \
++ (PVR_TRACE(("OSAllocMem(" #flags ", " #size ", " #linAddr ", " #blockAlloc "): " logStr " (size = 0x%lx)", size)), \
++ OSAllocMem_Debug_Wrapper(flags, size, linAddr, blockAlloc, __FILE__, __LINE__))
++
++ #define OSAllocPages(flags, size, pageSize, linAddr, pageAlloc) \
++ (PVR_TRACE(("OSAllocPages(" #flags ", " #size ", " #pageSize ", " #linAddr ", " #pageAlloc "): (size = 0x%lx)", size)), \
++ OSAllocPages_Impl(flags, size, pageSize, linAddr, pageAlloc))
++
++ #define OSFreeMem(flags, size, linAddr, blockAlloc) \
++ (PVR_TRACE(("OSFreeMem(" #flags ", " #size ", " #linAddr ", " #blockAlloc "): (pointer = 0x%X)", linAddr)), \
++ OSFreeMem_Debug_Wrapper(flags, size, linAddr, blockAlloc, __FILE__, __LINE__))
++#else
++ #define OSAllocMem(flags, size, linAddr, blockAlloc, logString) \
++ OSAllocMem_Debug_Wrapper(flags, size, linAddr, blockAlloc, __FILE__, __LINE__)
++
++ #define OSAllocPages OSAllocPages_Impl
++
++ #define OSFreeMem(flags, size, linAddr, blockAlloc) \
++ OSFreeMem_Debug_Wrapper(flags, size, linAddr, blockAlloc, __FILE__, __LINE__)
++#endif
++
++#ifdef PVRSRV_DEBUG_OS_MEMORY
++
++ PVRSRV_ERROR OSAllocMem_Debug_Wrapper(IMG_UINT32 ui32Flags,
++ IMG_UINT32 ui32Size,
++ IMG_PVOID *ppvCpuVAddr,
++ IMG_HANDLE *phBlockAlloc,
++ IMG_CHAR *pszFilename,
++ IMG_UINT32 ui32Line);
++
++ PVRSRV_ERROR OSFreeMem_Debug_Wrapper(IMG_UINT32 ui32Flags,
++ IMG_UINT32 ui32Size,
++ IMG_PVOID pvCpuVAddr,
++ IMG_HANDLE hBlockAlloc,
++ IMG_CHAR *pszFilename,
++ IMG_UINT32 ui32Line);
++
++
++ typedef struct
++ {
++ IMG_UINT8 sGuardRegionBefore[8];
++ IMG_CHAR sFileName[128];
++ IMG_UINT32 uLineNo;
++ IMG_SIZE_T uSize;
++ IMG_SIZE_T uSizeParityCheck;
++ enum valid_tag
++ { isFree = 0x277260FF,
++ isAllocated = 0x260511AA
++ } eValid;
++ } OSMEM_DEBUG_INFO;
++
++ #define TEST_BUFFER_PADDING_STATUS (sizeof(OSMEM_DEBUG_INFO))
++ #define TEST_BUFFER_PADDING_AFTER (8)
++ #define TEST_BUFFER_PADDING (TEST_BUFFER_PADDING_STATUS + TEST_BUFFER_PADDING_AFTER)
++#else
++ #define OSAllocMem_Debug_Wrapper OSAllocMem_Debug_Linux_Memory_Allocations
++ #define OSFreeMem_Debug_Wrapper OSFreeMem_Debug_Linux_Memory_Allocations
++#endif
++
++#if defined(__linux__) && defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ PVRSRV_ERROR OSAllocMem_Impl(IMG_UINT32 ui32Flags, IMG_SIZE_T ui32Size, IMG_PVOID *ppvLinAddr, IMG_HANDLE *phBlockAlloc, IMG_CHAR *pszFilename, IMG_UINT32 ui32Line);
++ PVRSRV_ERROR OSFreeMem_Impl(IMG_UINT32 ui32Flags, IMG_SIZE_T ui32Size, IMG_PVOID pvLinAddr, IMG_HANDLE hBlockAlloc, IMG_CHAR *pszFilename, IMG_UINT32 ui32Line);
++
++ #define OSAllocMem_Debug_Linux_Memory_Allocations OSAllocMem_Impl
++ #define OSFreeMem_Debug_Linux_Memory_Allocations OSFreeMem_Impl
++#else
++ PVRSRV_ERROR OSAllocMem_Impl(IMG_UINT32 ui32Flags, IMG_SIZE_T ui32Size, IMG_PVOID *ppvLinAddr, IMG_HANDLE *phBlockAlloc);
++ PVRSRV_ERROR OSFreeMem_Impl(IMG_UINT32 ui32Flags, IMG_SIZE_T ui32Size, IMG_PVOID pvLinAddr, IMG_HANDLE hBlockAlloc);
++
++ #define OSAllocMem_Debug_Linux_Memory_Allocations(flags, size, addr, blockAlloc, file, line) \
++ OSAllocMem_Impl(flags, size, addr, blockAlloc)
++ #define OSFreeMem_Debug_Linux_Memory_Allocations(flags, size, addr, blockAlloc, file, line) \
++ OSFreeMem_Impl(flags, size, addr, blockAlloc)
++#endif
++
++
++
++#if defined(__linux__)
++IMG_CPU_PHYADDR OSMemHandleToCpuPAddr(IMG_VOID *hOSMemHandle, IMG_SIZE_T ui32ByteOffset);
++#else
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(OSMemHandleToCpuPAddr)
++#endif
++static INLINE IMG_CPU_PHYADDR OSMemHandleToCpuPAddr(IMG_HANDLE hOSMemHandle, IMG_SIZE_T ui32ByteOffset)
++{
++ IMG_CPU_PHYADDR sCpuPAddr;
++ PVR_UNREFERENCED_PARAMETER(hOSMemHandle);
++ PVR_UNREFERENCED_PARAMETER(ui32ByteOffset);
++ sCpuPAddr.uiAddr = 0;
++ return sCpuPAddr;
++}
++#endif
++PVRSRV_ERROR OSInitEnvData(IMG_PVOID *ppvEnvSpecificData);
++PVRSRV_ERROR OSDeInitEnvData(IMG_PVOID pvEnvSpecificData);
++IMG_CHAR* OSStringCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc);
++IMG_INT32 OSSNPrintf(IMG_CHAR *pStr, IMG_SIZE_T ui32Size, const IMG_CHAR *pszFormat, ...);
++#define OSStringLength(pszString) strlen(pszString)
++
++PVRSRV_ERROR OSEventObjectCreate(const IMG_CHAR *pszName,
++ PVRSRV_EVENTOBJECT *psEventObject);
++PVRSRV_ERROR OSEventObjectDestroy(PVRSRV_EVENTOBJECT *psEventObject);
++PVRSRV_ERROR OSEventObjectSignal(IMG_HANDLE hOSEventKM);
++PVRSRV_ERROR OSEventObjectWait(IMG_HANDLE hOSEventKM);
++PVRSRV_ERROR OSEventObjectOpen(PVRSRV_EVENTOBJECT *psEventObject,
++ IMG_HANDLE *phOSEvent);
++PVRSRV_ERROR OSEventObjectClose(PVRSRV_EVENTOBJECT *psEventObject,
++ IMG_HANDLE hOSEventKM);
++
++
++PVRSRV_ERROR OSBaseAllocContigMemory(IMG_SIZE_T ui32Size, IMG_CPU_VIRTADDR *pLinAddr, IMG_CPU_PHYADDR *pPhysAddr);
++PVRSRV_ERROR OSBaseFreeContigMemory(IMG_SIZE_T ui32Size, IMG_CPU_VIRTADDR LinAddr, IMG_CPU_PHYADDR PhysAddr);
++
++IMG_PVOID MapUserFromKernel(IMG_PVOID pvLinAddrKM,IMG_SIZE_T ui32Size,IMG_HANDLE *phMemBlock);
++IMG_PVOID OSMapHWRegsIntoUserSpace(IMG_HANDLE hDevCookie, IMG_SYS_PHYADDR sRegAddr, IMG_UINT32 ulSize, IMG_PVOID *ppvProcess);
++IMG_VOID OSUnmapHWRegsFromUserSpace(IMG_HANDLE hDevCookie, IMG_PVOID pvUserAddr, IMG_PVOID pvProcess);
++
++IMG_VOID UnmapUserFromKernel(IMG_PVOID pvLinAddrUM, IMG_SIZE_T ui32Size, IMG_HANDLE hMemBlock);
++
++PVRSRV_ERROR OSMapPhysToUserSpace(IMG_HANDLE hDevCookie,
++ IMG_SYS_PHYADDR sCPUPhysAddr,
++ IMG_SIZE_T uiSizeInBytes,
++ IMG_UINT32 ui32CacheFlags,
++ IMG_PVOID *ppvUserAddr,
++ IMG_SIZE_T *puiActualSize,
++ IMG_HANDLE hMappingHandle);
++
++PVRSRV_ERROR OSUnmapPhysToUserSpace(IMG_HANDLE hDevCookie,
++ IMG_PVOID pvUserAddr,
++ IMG_PVOID pvProcess);
++
++PVRSRV_ERROR OSLockResource(PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID);
++PVRSRV_ERROR OSUnlockResource(PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID);
++IMG_BOOL OSIsResourceLocked(PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID);
++PVRSRV_ERROR OSCreateResource(PVRSRV_RESOURCE *psResource);
++PVRSRV_ERROR OSDestroyResource(PVRSRV_RESOURCE *psResource);
++IMG_VOID OSBreakResourceLock(PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID);
++IMG_VOID OSWaitus(IMG_UINT32 ui32Timeus);
++IMG_VOID OSReleaseThreadQuanta(IMG_VOID);
++IMG_UINT32 OSPCIReadDword(IMG_UINT32 ui32Bus, IMG_UINT32 ui32Dev, IMG_UINT32 ui32Func, IMG_UINT32 ui32Reg);
++IMG_VOID OSPCIWriteDword(IMG_UINT32 ui32Bus, IMG_UINT32 ui32Dev, IMG_UINT32 ui32Func, IMG_UINT32 ui32Reg, IMG_UINT32 ui32Value);
++
++#ifndef OSReadHWReg
++IMG_UINT32 OSReadHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset);
++#endif
++#ifndef OSWriteHWReg
++IMG_VOID OSWriteHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value);
++#endif
++
++typedef IMG_VOID (*PFN_TIMER_FUNC)(IMG_VOID*);
++IMG_HANDLE OSAddTimer(PFN_TIMER_FUNC pfnTimerFunc, IMG_VOID *pvData, IMG_UINT32 ui32MsTimeout);
++PVRSRV_ERROR OSRemoveTimer (IMG_HANDLE hTimer);
++PVRSRV_ERROR OSEnableTimer (IMG_HANDLE hTimer);
++PVRSRV_ERROR OSDisableTimer (IMG_HANDLE hTimer);
++
++PVRSRV_ERROR OSGetSysMemSize(IMG_SIZE_T *pui32Bytes);
++
++typedef enum _HOST_PCI_INIT_FLAGS_
++{
++ HOST_PCI_INIT_FLAG_BUS_MASTER = 0x00000001,
++ HOST_PCI_INIT_FLAG_MSI = 0x00000002,
++ HOST_PCI_INIT_FLAG_FORCE_I32 = 0x7fffffff
++} HOST_PCI_INIT_FLAGS;
++
++struct _PVRSRV_PCI_DEV_OPAQUE_STRUCT_;
++typedef struct _PVRSRV_PCI_DEV_OPAQUE_STRUCT_ *PVRSRV_PCI_DEV_HANDLE;
++
++PVRSRV_PCI_DEV_HANDLE OSPCIAcquireDev(IMG_UINT16 ui16VendorID, IMG_UINT16 ui16DeviceID, HOST_PCI_INIT_FLAGS eFlags);
++PVRSRV_PCI_DEV_HANDLE OSPCISetDev(IMG_VOID *pvPCICookie, HOST_PCI_INIT_FLAGS eFlags);
++PVRSRV_ERROR OSPCIReleaseDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI);
++PVRSRV_ERROR OSPCIIRQ(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 *pui32IRQ);
++IMG_UINT32 OSPCIAddrRangeLen(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
++IMG_UINT32 OSPCIAddrRangeStart(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
++IMG_UINT32 OSPCIAddrRangeEnd(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
++PVRSRV_ERROR OSPCIRequestAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
++PVRSRV_ERROR OSPCIReleaseAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
++PVRSRV_ERROR OSPCISuspendDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI);
++PVRSRV_ERROR OSPCIResumeDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI);
++
++PVRSRV_ERROR OSScheduleMISR(IMG_VOID *pvSysData);
++
++IMG_VOID OSPanic(IMG_VOID);
++
++IMG_BOOL OSProcHasPrivSrvInit(IMG_VOID);
++
++typedef enum _img_verify_test
++{
++ PVR_VERIFY_WRITE = 0,
++ PVR_VERIFY_READ
++} IMG_VERIFY_TEST;
++
++IMG_BOOL OSAccessOK(IMG_VERIFY_TEST eVerification, IMG_VOID *pvUserPtr, IMG_SIZE_T ui32Bytes);
++
++PVRSRV_ERROR OSCopyToUser(IMG_PVOID pvProcess, IMG_VOID *pvDest, IMG_VOID *pvSrc, IMG_SIZE_T ui32Bytes);
++PVRSRV_ERROR OSCopyFromUser(IMG_PVOID pvProcess, IMG_VOID *pvDest, IMG_VOID *pvSrc, IMG_SIZE_T ui32Bytes);
++
++#if defined(__linux__)
++PVRSRV_ERROR OSAcquirePhysPageAddr(IMG_VOID* pvCPUVAddr,
++ IMG_SIZE_T ui32Bytes,
++ IMG_SYS_PHYADDR *psSysPAddr,
++ IMG_HANDLE *phOSWrapMem,
++ IMG_BOOL bWrapWorkaround);
++PVRSRV_ERROR OSReleasePhysPageAddr(IMG_HANDLE hOSWrapMem);
++#else
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(OSAcquirePhysPageAddr)
++#endif
++static INLINE PVRSRV_ERROR OSAcquirePhysPageAddr(IMG_VOID* pvCPUVAddr,
++ IMG_SIZE_T ui32Bytes,
++ IMG_SYS_PHYADDR *psSysPAddr,
++ IMG_HANDLE *phOSWrapMem,
++ IMG_BOOL bWrapWorkaround)
++{
++ PVR_UNREFERENCED_PARAMETER(pvCPUVAddr);
++ PVR_UNREFERENCED_PARAMETER(ui32Bytes);
++ PVR_UNREFERENCED_PARAMETER(psSysPAddr);
++ PVR_UNREFERENCED_PARAMETER(phOSWrapMem);
++ PVR_UNREFERENCED_PARAMETER(bWrapWorkaround);
++ return PVRSRV_OK;
++}
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(OSReleasePhysPageAddr)
++#endif
++static INLINE PVRSRV_ERROR OSReleasePhysPageAddr(IMG_HANDLE hOSWrapMem)
++{
++ PVR_UNREFERENCED_PARAMETER(hOSWrapMem);
++ return PVRSRV_OK;
++}
++#endif
++
++#if defined(__linux__) && defined(__KERNEL__)
++#define OS_SUPPORTS_IN_LISR
++static inline IMG_BOOL OSInLISR(IMG_VOID unref__ *pvSysData)
++{
++ return in_irq();
++}
++#endif
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/include/osperproc.h
+@@ -0,0 +1,76 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __OSPERPROC_H__
++#define __OSPERPROC_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#if defined(__linux__)
++PVRSRV_ERROR OSPerProcessPrivateDataInit(IMG_HANDLE *phOsPrivateData);
++PVRSRV_ERROR OSPerProcessPrivateDataDeInit(IMG_HANDLE hOsPrivateData);
++
++PVRSRV_ERROR OSPerProcessSetHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase);
++#else
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(OSPerProcessPrivateDataInit)
++#endif
++static INLINE PVRSRV_ERROR OSPerProcessPrivateDataInit(IMG_HANDLE *phOsPrivateData)
++{
++ PVR_UNREFERENCED_PARAMETER(phOsPrivateData);
++
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(OSPerProcessPrivateDataDeInit)
++#endif
++static INLINE PVRSRV_ERROR OSPerProcessPrivateDataDeInit(IMG_HANDLE hOsPrivateData)
++{
++ PVR_UNREFERENCED_PARAMETER(hOsPrivateData);
++
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(OSPerProcessSetHandleOptions)
++#endif
++static INLINE PVRSRV_ERROR OSPerProcessSetHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase)
++{
++ PVR_UNREFERENCED_PARAMETER(psHandleBase);
++
++ return PVRSRV_OK;
++}
++#endif
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/include/pdump_km.h
+@@ -0,0 +1,451 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _PDUMP_KM_H_
++#define _PDUMP_KM_H_
++
++#if (defined(LINUX) && (defined(SUPPORT_SGX) || defined(SUPPORT_MSVDX)))
++
++#define SGX_SUPPORT_COMMON_PDUMP
++
++#if defined(SGX_SUPPORT_COMMON_PDUMP)
++#include <pdump_osfunc.h>
++#endif
++#endif
++
++#if defined(__cplusplus)
++extern "C" {
++#endif
++
++#define PDUMP_FLAGS_NEVER 0x08000000UL
++#define PDUMP_FLAGS_TOOUT2MEM 0x10000000UL
++#define PDUMP_FLAGS_LASTFRAME 0x20000000UL
++#define PDUMP_FLAGS_RESETLFBUFFER 0x40000000UL
++#define PDUMP_FLAGS_CONTINUOUS 0x80000000UL
++
++#define PDUMP_PD_UNIQUETAG (IMG_HANDLE)0
++#define PDUMP_PT_UNIQUETAG (IMG_HANDLE)0
++
++#define PDUMP_STREAM_PARAM2 0
++#define PDUMP_STREAM_SCRIPT2 1
++#define PDUMP_STREAM_DRIVERINFO 2
++#define PDUMP_NUM_STREAMS 3
++
++
++#ifndef PDUMP
++#define MAKEUNIQUETAG(hMemInfo) (0)
++#endif
++
++#ifdef PDUMP
++
++#define MAKEUNIQUETAG(hMemInfo) (((BM_BUF *)(((PVRSRV_KERNEL_MEM_INFO *)hMemInfo)->sMemBlk.hBuffer))->pMapping)
++
++ IMG_IMPORT PVRSRV_ERROR PDumpMemPolKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++ IMG_UINT32 ui32Offset,
++ IMG_UINT32 ui32Value,
++ IMG_UINT32 ui32Mask,
++ PDUMP_POLL_OPERATOR eOperator,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE hUniqueTag);
++
++ IMG_IMPORT PVRSRV_ERROR PDumpMemUM(PVRSRV_PER_PROCESS_DATA *psProcData,
++ IMG_PVOID pvAltLinAddr,
++ IMG_PVOID pvLinAddr,
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++ IMG_UINT32 ui32Offset,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE hUniqueTag);
++
++ IMG_IMPORT PVRSRV_ERROR PDumpMemKM(IMG_PVOID pvAltLinAddr,
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++ IMG_UINT32 ui32Offset,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE hUniqueTag);
++ PVRSRV_ERROR PDumpMemPagesKM(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_DEV_PHYADDR *pPages,
++ IMG_UINT32 ui32NumPages,
++ IMG_DEV_VIRTADDR sDevAddr,
++ IMG_UINT32 ui32Start,
++ IMG_UINT32 ui32Length,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE hUniqueTag);
++
++ PVRSRV_ERROR PDumpMem2KM(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_CPU_VIRTADDR pvLinAddr,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32Flags,
++ IMG_BOOL bInitialisePages,
++ IMG_HANDLE hUniqueTag1,
++ IMG_HANDLE hUniqueTag2);
++ IMG_VOID PDumpInitCommon(IMG_VOID);
++ IMG_VOID PDumpDeInitCommon(IMG_VOID);
++ IMG_VOID PDumpInit(IMG_VOID);
++ IMG_VOID PDumpDeInit(IMG_VOID);
++ PVRSRV_ERROR PDumpStartInitPhaseKM(IMG_VOID);
++ PVRSRV_ERROR PDumpStopInitPhaseKM(IMG_VOID);
++ IMG_IMPORT PVRSRV_ERROR PDumpSetFrameKM(IMG_UINT32 ui32Frame);
++ IMG_IMPORT PVRSRV_ERROR PDumpCommentKM(IMG_CHAR *pszComment, IMG_UINT32 ui32Flags);
++ IMG_IMPORT PVRSRV_ERROR PDumpDriverInfoKM(IMG_CHAR *pszString, IMG_UINT32 ui32Flags);
++
++ PVRSRV_ERROR PDumpRegWithFlagsKM(IMG_UINT32 ui32RegAddr,
++ IMG_UINT32 ui32RegValue,
++ IMG_UINT32 ui32Flags);
++ PVRSRV_ERROR PDumpRegPolWithFlagsKM(IMG_UINT32 ui32RegAddr,
++ IMG_UINT32 ui32RegValue,
++ IMG_UINT32 ui32Mask,
++ IMG_UINT32 ui32Flags);
++ PVRSRV_ERROR PDumpRegPolKM(IMG_UINT32 ui32RegAddr,
++ IMG_UINT32 ui32RegValue,
++ IMG_UINT32 ui32Mask);
++
++ IMG_IMPORT PVRSRV_ERROR PDumpBitmapKM(IMG_CHAR *pszFileName,
++ IMG_UINT32 ui32FileOffset,
++ IMG_UINT32 ui32Width,
++ IMG_UINT32 ui32Height,
++ IMG_UINT32 ui32StrideInBytes,
++ IMG_DEV_VIRTADDR sDevBaseAddr,
++ IMG_UINT32 ui32Size,
++ PDUMP_PIXEL_FORMAT ePixelFormat,
++ PDUMP_MEM_FORMAT eMemFormat,
++ IMG_UINT32 ui32PDumpFlags);
++ IMG_IMPORT PVRSRV_ERROR PDumpReadRegKM(IMG_CHAR *pszFileName,
++ IMG_UINT32 ui32FileOffset,
++ IMG_UINT32 ui32Address,
++ IMG_UINT32 ui32Size,
++ IMG_UINT32 ui32PDumpFlags);
++
++ IMG_BOOL PDumpIsSuspended(IMG_VOID);
++
++#if defined(SGX_SUPPORT_COMMON_PDUMP) || !defined(SUPPORT_VGX)
++
++ PVRSRV_ERROR PDumpRegKM(IMG_UINT32 dwReg,
++ IMG_UINT32 dwData);
++ PVRSRV_ERROR PDumpComment(IMG_CHAR* pszFormat, ...);
++ PVRSRV_ERROR PDumpCommentWithFlags(IMG_UINT32 ui32Flags,
++ IMG_CHAR* pszFormat,
++ ...);
++
++ PVRSRV_ERROR PDumpPDReg(IMG_UINT32 ui32Reg,
++ IMG_UINT32 ui32dwData,
++ IMG_HANDLE hUniqueTag);
++ PVRSRV_ERROR PDumpPDRegWithFlags(IMG_UINT32 ui32Reg,
++ IMG_UINT32 ui32Data,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE hUniqueTag);
++#else
++ IMG_VOID PDumpRegKM(IMG_UINT32 dwReg,
++ IMG_UINT32 dwData);
++ IMG_VOID PDumpComment(IMG_CHAR* pszFormat, ...);
++ IMG_VOID PDumpCommentWithFlags(IMG_UINT32 ui32Flags,
++ IMG_CHAR* pszFormat,
++ ...);
++
++
++ IMG_VOID PDumpPDReg(IMG_UINT32 ui32Reg,
++ IMG_UINT32 ui32dwData,
++ IMG_HANDLE hUniqueTag);
++ IMG_VOID PDumpPDRegWithFlags(IMG_UINT32 ui32Reg,
++ IMG_UINT32 ui32Data,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE hUniqueTag);
++#endif
++
++ IMG_VOID PDumpMsvdxRegRead(const IMG_CHAR* const pRegRegion,
++ const IMG_UINT32 dwRegOffset);
++
++ IMG_VOID PDumpMsvdxRegWrite(const IMG_CHAR* const pRegRegion,
++ const IMG_UINT32 dwRegOffset,
++ const IMG_UINT32 dwData);
++
++ PVRSRV_ERROR PDumpMsvdxRegPol(const IMG_CHAR* const pRegRegion,
++ const IMG_UINT32 ui32Offset,
++ const IMG_UINT32 ui32CheckFuncIdExt,
++ const IMG_UINT32 ui32RequValue,
++ const IMG_UINT32 ui32Enable,
++ const IMG_UINT32 ui32PollCount,
++ const IMG_UINT32 ui32TimeOut);
++
++ PVRSRV_ERROR PDumpMsvdxWriteRef(const IMG_CHAR* const pRegRegion,
++ const IMG_UINT32 ui32VLROffset,
++ const IMG_UINT32 ui32Physical );
++
++ IMG_BOOL PDumpIsLastCaptureFrameKM(IMG_VOID);
++ IMG_IMPORT IMG_BOOL PDumpIsCaptureFrameKM(IMG_VOID);
++
++ IMG_VOID PDumpMallocPagesPhys(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_UINT32 ui32DevVAddr,
++ IMG_PUINT32 pui32PhysPages,
++ IMG_UINT32 ui32NumPages,
++ IMG_HANDLE hUniqueTag);
++ PVRSRV_ERROR PDumpSetMMUContext(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_CHAR *pszMemSpace,
++ IMG_UINT32 *pui32MMUContextID,
++ IMG_UINT32 ui32MMUType,
++ IMG_HANDLE hUniqueTag1,
++ IMG_VOID *pvPDCPUAddr);
++ PVRSRV_ERROR PDumpClearMMUContext(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_CHAR *pszMemSpace,
++ IMG_UINT32 ui32MMUContextID,
++ IMG_UINT32 ui32MMUType);
++
++ PVRSRV_ERROR PDumpPDDevPAddrKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++ IMG_UINT32 ui32Offset,
++ IMG_DEV_PHYADDR sPDDevPAddr,
++ IMG_HANDLE hUniqueTag1,
++ IMG_HANDLE hUniqueTag2);
++
++ IMG_BOOL PDumpTestNextFrame(IMG_UINT32 ui32CurrentFrame);
++
++
++#if defined(LINUX)
++#define COMMON_PDUMP_OS_SUPPORT
++#endif
++
++#if defined (COMMON_PDUMP_OS_SUPPORT) && !defined(SUPPORT_VGX)
++
++ PVRSRV_ERROR PDumpTASignatureRegisters(IMG_UINT32 ui32DumpFrameNum,
++ IMG_UINT32 ui32TAKickCount,
++ IMG_BOOL bLastFrame,
++ IMG_UINT32 *pui32Registers,
++ IMG_UINT32 ui32NumRegisters);
++
++ PVRSRV_ERROR PDump3DSignatureRegisters(IMG_UINT32 ui32DumpFrameNum,
++ IMG_BOOL bLastFrame,
++ IMG_UINT32 *pui32Registers,
++ IMG_UINT32 ui32NumRegisters);
++
++ PVRSRV_ERROR PDumpCounterRegisters(IMG_UINT32 ui32DumpFrameNum,
++ IMG_BOOL bLastFrame,
++ IMG_UINT32 *pui32Registers,
++ IMG_UINT32 ui32NumRegisters);
++
++ PVRSRV_ERROR PDumpRegRead(const IMG_UINT32 dwRegOffset, IMG_UINT32 ui32Flags);
++
++ PVRSRV_ERROR PDumpCycleCountRegRead(const IMG_UINT32 dwRegOffset, IMG_BOOL bLastFrame);
++
++ PVRSRV_ERROR PDumpIDLWithFlags(IMG_UINT32 ui32Clocks, IMG_UINT32 ui32Flags);
++ PVRSRV_ERROR PDumpIDL(IMG_UINT32 ui32Clocks);
++
++ PVRSRV_ERROR PDumpMallocPages(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_UINT32 ui32DevVAddr,
++ IMG_CPU_VIRTADDR pvLinAddr,
++ IMG_HANDLE hOSMemHandle,
++ IMG_UINT32 ui32NumBytes,
++ IMG_UINT32 ui32PageSize,
++ IMG_HANDLE hUniqueTag);
++ PVRSRV_ERROR PDumpMallocPageTable(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_CPU_VIRTADDR pvLinAddr,
++ IMG_UINT32 ui32NumBytes,
++ IMG_HANDLE hUniqueTag);
++ PVRSRV_ERROR PDumpFreePages(struct _BM_HEAP_ *psBMHeap,
++ IMG_DEV_VIRTADDR sDevVAddr,
++ IMG_UINT32 ui32NumBytes,
++ IMG_UINT32 ui32PageSize,
++ IMG_HANDLE hUniqueTag,
++ IMG_BOOL bInterleaved);
++ PVRSRV_ERROR PDumpFreePageTable(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_CPU_VIRTADDR pvLinAddr,
++ IMG_UINT32 ui32NumBytes,
++ IMG_HANDLE hUniqueTag);
++
++ IMG_IMPORT PVRSRV_ERROR PDumpHWPerfCBKM(IMG_CHAR *pszFileName,
++ IMG_UINT32 ui32FileOffset,
++ IMG_DEV_VIRTADDR sDevBaseAddr,
++ IMG_UINT32 ui32Size,
++ IMG_UINT32 ui32PDumpFlags);
++
++ PVRSRV_ERROR PDumpCBP(PPVRSRV_KERNEL_MEM_INFO psROffMemInfo,
++ IMG_UINT32 ui32ROffOffset,
++ IMG_UINT32 ui32WPosVal,
++ IMG_UINT32 ui32PacketSize,
++ IMG_UINT32 ui32BufferSize,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE hUniqueTag);
++
++#else
++ IMG_VOID PDumpTASignatureRegisters(IMG_UINT32 ui32DumpFrameNum,
++ IMG_UINT32 ui32TAKickCount,
++ IMG_BOOL bLastFrame,
++ IMG_UINT32 *pui32Registers,
++ IMG_UINT32 ui32NumRegisters);
++ IMG_VOID PDump3DSignatureRegisters(IMG_UINT32 ui32DumpFrameNum,
++ IMG_BOOL bLastFrame,
++ IMG_UINT32 *pui32Registers,
++ IMG_UINT32 ui32NumRegisters);
++ IMG_VOID PDumpCounterRegisters(IMG_UINT32 ui32DumpFrameNum,
++ IMG_BOOL bLastFrame,
++ IMG_UINT32 *pui32Registers,
++ IMG_UINT32 ui32NumRegisters);
++
++ IMG_VOID PDumpRegRead(const IMG_UINT32 dwRegOffset, IMG_UINT32 ui32Flags);
++ IMG_VOID PDumpCycleCountRegRead(const IMG_UINT32 dwRegOffset, IMG_BOOL bLastFrame);
++
++ IMG_VOID PDumpIDLWithFlags(IMG_UINT32 ui32Clocks, IMG_UINT32 ui32Flags);
++ IMG_VOID PDumpIDL(IMG_UINT32 ui32Clocks);
++
++
++ IMG_VOID PDumpMallocPages(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_UINT32 ui32DevVAddr,
++ IMG_CPU_VIRTADDR pvLinAddr,
++ IMG_HANDLE hOSMemHandle,
++ IMG_UINT32 ui32NumBytes,
++ IMG_UINT32 ui32PageSize,
++ IMG_HANDLE hUniqueTag);
++ IMG_VOID PDumpMallocPageTable(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_CPU_VIRTADDR pvLinAddr,
++ IMG_UINT32 ui32NumBytes,
++ IMG_HANDLE hUniqueTag);
++ IMG_VOID PDumpFreePages(struct _BM_HEAP_ *psBMHeap,
++ IMG_DEV_VIRTADDR sDevVAddr,
++ IMG_UINT32 ui32NumBytes,
++ IMG_UINT32 ui32PageSize,
++ IMG_HANDLE hUniqueTag,
++ IMG_BOOL bInterleaved);
++ IMG_VOID PDumpFreePageTable(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_CPU_VIRTADDR pvLinAddr,
++ IMG_UINT32 ui32NumBytes,
++ IMG_HANDLE hUniqueTag);
++
++ IMG_IMPORT IMG_VOID PDumpHWPerfCBKM(IMG_CHAR *pszFileName,
++ IMG_UINT32 ui32FileOffset,
++ IMG_DEV_VIRTADDR sDevBaseAddr,
++ IMG_UINT32 ui32Size,
++ IMG_UINT32 ui32PDumpFlags);
++
++ IMG_VOID PDumpCBP(PPVRSRV_KERNEL_MEM_INFO psROffMemInfo,
++ IMG_UINT32 ui32ROffOffset,
++ IMG_UINT32 ui32WPosVal,
++ IMG_UINT32 ui32PacketSize,
++ IMG_UINT32 ui32BufferSize,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE hUniqueTag);
++
++#endif
++
++ IMG_VOID PDumpVGXMemToFile(IMG_CHAR *pszFileName,
++ IMG_UINT32 ui32FileOffset,
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++ IMG_UINT32 uiAddr,
++ IMG_UINT32 ui32Size,
++ IMG_UINT32 ui32PDumpFlags,
++ IMG_HANDLE hUniqueTag);
++
++ IMG_VOID PDumpSuspendKM(IMG_VOID);
++ IMG_VOID PDumpResumeKM(IMG_VOID);
++
++ #define PDUMPMEMPOL PDumpMemPolKM
++ #define PDUMPMEM PDumpMemKM
++ #define PDUMPMEM2 PDumpMem2KM
++ #define PDUMPMEMUM PDumpMemUM
++ #define PDUMPINIT PDumpInitCommon
++ #define PDUMPDEINIT PDumpDeInitCommon
++ #define PDUMPISLASTFRAME PDumpIsLastCaptureFrameKM
++ #define PDUMPTESTFRAME PDumpIsCaptureFrameKM
++ #define PDUMPTESTNEXTFRAME PDumpTestNextFrame
++ #define PDUMPREGWITHFLAGS PDumpRegWithFlagsKM
++ #define PDUMPREG PDumpRegKM
++ #define PDUMPCOMMENT PDumpComment
++ #define PDUMPCOMMENTWITHFLAGS PDumpCommentWithFlags
++ #define PDUMPREGPOL PDumpRegPolKM
++ #define PDUMPREGPOLWITHFLAGS PDumpRegPolWithFlagsKM
++ #define PDUMPMALLOCPAGES PDumpMallocPages
++ #define PDUMPMALLOCPAGETABLE PDumpMallocPageTable
++ #define PDUMPSETMMUCONTEXT PDumpSetMMUContext
++ #define PDUMPCLEARMMUCONTEXT PDumpClearMMUContext
++ #define PDUMPFREEPAGES PDumpFreePages
++ #define PDUMPFREEPAGETABLE PDumpFreePageTable
++ #define PDUMPPDREG PDumpPDReg
++ #define PDUMPPDREGWITHFLAGS PDumpPDRegWithFlags
++ #define PDUMPCBP PDumpCBP
++ #define PDUMPMALLOCPAGESPHYS PDumpMallocPagesPhys
++ #define PDUMPENDINITPHASE PDumpStopInitPhaseKM
++ #define PDUMPMSVDXREGWRITE PDumpMsvdxRegWrite
++ #define PDUMPMSVDXREGREAD PDumpMsvdxRegRead
++ #define PDUMPMSVDXPOL PDumpMsvdxRegPol
++ #define PDUMPMSVDXWRITEREF PDumpMsvdxWriteRef
++ #define PDUMPBITMAPKM PDumpBitmapKM
++ #define PDUMPDRIVERINFO PDumpDriverInfoKM
++ #define PDUMPIDLWITHFLAGS PDumpIDLWithFlags
++ #define PDUMPIDL PDumpIDL
++ #define PDUMPSUSPEND PDumpSuspendKM
++ #define PDUMPRESUME PDumpResumeKM
++
++#else
++ #if ((defined(LINUX) || defined(GCC_IA32)) || defined(GCC_ARM))
++ #define PDUMPMEMPOL(args...)
++ #define PDUMPMEM(args...)
++ #define PDUMPMEM2(args...)
++ #define PDUMPMEMUM(args...)
++ #define PDUMPINIT(args...)
++ #define PDUMPDEINIT(args...)
++ #define PDUMPISLASTFRAME(args...)
++ #define PDUMPTESTFRAME(args...)
++ #define PDUMPTESTNEXTFRAME(args...)
++ #define PDUMPREGWITHFLAGS(args...)
++ #define PDUMPREG(args...)
++ #define PDUMPCOMMENT(args...)
++ #define PDUMPREGPOL(args...)
++ #define PDUMPREGPOLWITHFLAGS(args...)
++ #define PDUMPMALLOCPAGES(args...)
++ #define PDUMPMALLOCPAGETABLE(args...)
++ #define PDUMPSETMMUCONTEXT(args...)
++ #define PDUMPCLEARMMUCONTEXT(args...)
++ #define PDUMPFREEPAGES(args...)
++ #define PDUMPFREEPAGETABLE(args...)
++ #define PDUMPPDREG(args...)
++ #define PDUMPPDREGWITHFLAGS(args...)
++ #define PDUMPSYNC(args...)
++ #define PDUMPCOPYTOMEM(args...)
++ #define PDUMPWRITE(args...)
++ #define PDUMPCBP(args...)
++ #define PDUMPCOMMENTWITHFLAGS(args...)
++ #define PDUMPMALLOCPAGESPHYS(args...)
++ #define PDUMPENDINITPHASE(args...)
++ #define PDUMPMSVDXREG(args...)
++ #define PDUMPMSVDXREGWRITE(args...)
++ #define PDUMPMSVDXREGREAD(args...)
++ #define PDUMPMSVDXPOLEQ(args...)
++ #define PDUMPMSVDXPOL(args...)
++ #define PDUMPBITMAPKM(args...)
++ #define PDUMPDRIVERINFO(args...)
++ #define PDUMPIDLWITHFLAGS(args...)
++ #define PDUMPIDL(args...)
++ #define PDUMPSUSPEND(args...)
++ #define PDUMPRESUME(args...)
++ #define PDUMPMSVDXWRITEREF(args...)
++ #else
++ #error Compiler not specified
++ #endif
++#endif
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/include/pdump_osfunc.h
+@@ -0,0 +1,137 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __PDUMP_OSFUNC_H__
++#define __PDUMP_OSFUNC_H__
++
++#include <stdarg.h>
++
++#if defined(__cplusplus)
++extern "C" {
++#endif
++
++
++#define MAX_PDUMP_STRING_LENGTH (256)
++#define PDUMP_GET_SCRIPT_STRING() \
++ IMG_HANDLE hScript; \
++ IMG_UINT32 ui32MaxLen; \
++ PVRSRV_ERROR eError; \
++ eError = PDumpOSGetScriptString(&hScript, &ui32MaxLen);\
++ if(eError != PVRSRV_OK) return eError;
++
++#define PDUMP_GET_MSG_STRING() \
++ IMG_HANDLE hMsg; \
++ IMG_UINT32 ui32MaxLen; \
++ PVRSRV_ERROR eError; \
++ eError = PDumpOSGetMessageString(&hMsg, &ui32MaxLen);\
++ if(eError != PVRSRV_OK) return eError;
++
++#define PDUMP_GET_FILE_STRING() \
++ IMG_CHAR *pszFileName; \
++ IMG_UINT32 ui32MaxLen; \
++ PVRSRV_ERROR eError; \
++ eError = PDumpOSGetFilenameString(&pszFileName, &ui32MaxLen);\
++ if(eError != PVRSRV_OK) return eError;
++
++#define PDUMP_GET_SCRIPT_AND_FILE_STRING() \
++ IMG_HANDLE hScript; \
++ IMG_CHAR *pszFileName; \
++ IMG_UINT32 ui32MaxLenScript; \
++ IMG_UINT32 ui32MaxLenFileName; \
++ PVRSRV_ERROR eError; \
++ eError = PDumpOSGetScriptString(&hScript, &ui32MaxLenScript);\
++ if(eError != PVRSRV_OK) return eError; \
++ eError = PDumpOSGetFilenameString(&pszFileName, &ui32MaxLenFileName);\
++ if(eError != PVRSRV_OK) return eError;
++
++
++
++ PVRSRV_ERROR PDumpOSGetScriptString(IMG_HANDLE *phScript, IMG_UINT32 *pui32MaxLen);
++
++
++ PVRSRV_ERROR PDumpOSGetMessageString(IMG_HANDLE *phMsg, IMG_UINT32 *pui32MaxLen);
++
++
++ PVRSRV_ERROR PDumpOSGetFilenameString(IMG_CHAR **ppszFile, IMG_UINT32 *pui32MaxLen);
++
++
++
++
++#define PDUMP_va_list va_list
++#define PDUMP_va_start va_start
++#define PDUMP_va_end va_end
++
++
++
++IMG_HANDLE PDumpOSGetStream(IMG_UINT32 ePDumpStream);
++
++IMG_UINT32 PDumpOSGetStreamOffset(IMG_UINT32 ePDumpStream);
++
++IMG_UINT32 PDumpOSGetParamFileNum(IMG_VOID);
++
++IMG_VOID PDumpOSCheckForSplitting(IMG_HANDLE hStream, IMG_UINT32 ui32Size, IMG_UINT32 ui32Flags);
++
++IMG_BOOL PDumpOSIsSuspended(IMG_VOID);
++
++IMG_BOOL PDumpOSJTInitialised(IMG_VOID);
++
++IMG_BOOL PDumpOSWriteString(IMG_HANDLE hDbgStream,
++ IMG_UINT8 *psui8Data,
++ IMG_UINT32 ui32Size,
++ IMG_UINT32 ui32Flags);
++
++IMG_BOOL PDumpOSWriteString2(IMG_HANDLE hScript, IMG_UINT32 ui32Flags);
++
++PVRSRV_ERROR PDumpOSBufprintf(IMG_HANDLE hBuf, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR* pszFormat, ...);
++
++IMG_VOID PDumpOSDebugPrintf(IMG_CHAR* pszFormat, ...);
++
++PVRSRV_ERROR PDumpOSSprintf(IMG_CHAR *pszComment, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR *pszFormat, ...);
++
++PVRSRV_ERROR PDumpOSVSprintf(IMG_CHAR *pszMsg, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR* pszFormat, PDUMP_va_list vaArgs);
++
++IMG_UINT32 PDumpOSBuflen(IMG_HANDLE hBuffer, IMG_UINT32 ui32BufferSizeMax);
++
++IMG_VOID PDumpOSVerifyLineEnding(IMG_HANDLE hBuffer, IMG_UINT32 ui32BufferSizeMax);
++
++IMG_VOID PDumpOSCPUVAddrToDevPAddr(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_HANDLE hOSMemHandle,
++ IMG_UINT32 ui32Offset,
++ IMG_UINT8 *pui8LinAddr,
++ IMG_UINT32 ui32PageSize,
++ IMG_DEV_PHYADDR *psDevPAddr);
++
++IMG_VOID PDumpOSCPUVAddrToPhysPages(IMG_HANDLE hOSMemHandle,
++ IMG_UINT32 ui32Offset,
++ IMG_PUINT8 pui8LinAddr,
++ IMG_UINT32 *pui32PageOffset);
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/include/perproc.h
+@@ -0,0 +1,110 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __PERPROC_H__
++#define __PERPROC_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#include "img_types.h"
++#include "resman.h"
++
++#include "handle.h"
++
++typedef struct _PVRSRV_PER_PROCESS_DATA_
++{
++ IMG_UINT32 ui32PID;
++ IMG_HANDLE hBlockAlloc;
++ PRESMAN_CONTEXT hResManContext;
++ IMG_HANDLE hPerProcData;
++ PVRSRV_HANDLE_BASE *psHandleBase;
++#if defined (PVR_SECURE_HANDLES)
++
++ IMG_BOOL bHandlesBatched;
++#endif
++ IMG_UINT32 ui32RefCount;
++
++
++ IMG_BOOL bInitProcess;
++
++
++ IMG_HANDLE hOsPrivateData;
++} PVRSRV_PER_PROCESS_DATA;
++
++PVRSRV_PER_PROCESS_DATA *PVRSRVPerProcessData(IMG_UINT32 ui32PID);
++
++PVRSRV_ERROR PVRSRVPerProcessDataConnect(IMG_UINT32 ui32PID);
++IMG_VOID PVRSRVPerProcessDataDisconnect(IMG_UINT32 ui32PID);
++
++PVRSRV_ERROR PVRSRVPerProcessDataInit(IMG_VOID);
++PVRSRV_ERROR PVRSRVPerProcessDataDeInit(IMG_VOID);
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVFindPerProcessData)
++#endif
++static INLINE
++PVRSRV_PER_PROCESS_DATA *PVRSRVFindPerProcessData(IMG_VOID)
++{
++ return PVRSRVPerProcessData(OSGetCurrentProcessIDKM());
++}
++
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVProcessPrivateData)
++#endif
++static INLINE
++IMG_HANDLE PVRSRVProcessPrivateData(PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ return (psPerProc != IMG_NULL) ? psPerProc->hOsPrivateData : IMG_NULL;
++}
++
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVPerProcessPrivateData)
++#endif
++static INLINE
++IMG_HANDLE PVRSRVPerProcessPrivateData(IMG_UINT32 ui32PID)
++{
++ return PVRSRVProcessPrivateData(PVRSRVPerProcessData(ui32PID));
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVFindPerProcessPrivateData)
++#endif
++static INLINE
++IMG_HANDLE PVRSRVFindPerProcessPrivateData(IMG_VOID)
++{
++ return PVRSRVProcessPrivateData(PVRSRVFindPerProcessData());
++}
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/include/power.h
+@@ -0,0 +1,133 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef POWER_H
++#define POWER_H
++
++#if defined(__cplusplus)
++extern "C" {
++#endif
++
++
++
++typedef struct _PVRSRV_POWER_DEV_TAG_
++{
++ PFN_PRE_POWER pfnPrePower;
++ PFN_POST_POWER pfnPostPower;
++ PFN_PRE_CLOCKSPEED_CHANGE pfnPreClockSpeedChange;
++ PFN_POST_CLOCKSPEED_CHANGE pfnPostClockSpeedChange;
++ IMG_HANDLE hDevCookie;
++ IMG_UINT32 ui32DeviceIndex;
++ PVRSRV_DEV_POWER_STATE eDefaultPowerState;
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState;
++ struct _PVRSRV_POWER_DEV_TAG_ *psNext;
++ struct _PVRSRV_POWER_DEV_TAG_ **ppsThis;
++
++} PVRSRV_POWER_DEV;
++
++typedef enum _PVRSRV_INIT_SERVER_STATE_
++{
++ PVRSRV_INIT_SERVER_Unspecified = -1,
++ PVRSRV_INIT_SERVER_RUNNING = 0,
++ PVRSRV_INIT_SERVER_RAN = 1,
++ PVRSRV_INIT_SERVER_SUCCESSFUL = 2,
++ PVRSRV_INIT_SERVER_NUM = 3,
++ PVRSRV_INIT_SERVER_FORCE_I32 = 0x7fffffff
++
++} PVRSRV_INIT_SERVER_STATE, *PPVRSRV_INIT_SERVER_STATE;
++
++IMG_IMPORT
++IMG_BOOL PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_STATE eInitServerState);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_STATE eInitServerState, IMG_BOOL bState);
++
++
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVPowerLock(IMG_UINT32 ui32CallerID,
++ IMG_BOOL bSystemPowerEvent);
++IMG_IMPORT
++IMG_VOID PVRSRVPowerUnlock(IMG_UINT32 ui32CallerID);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVSetDevicePowerStateKM(IMG_UINT32 ui32DeviceIndex,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ IMG_UINT32 ui32CallerID,
++ IMG_BOOL bRetainMutex);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVSystemPrePowerStateKM(PVRSRV_SYS_POWER_STATE eNewPowerState);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVSystemPostPowerStateKM(PVRSRV_SYS_POWER_STATE eNewPowerState);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVSetPowerStateKM (PVRSRV_SYS_POWER_STATE ePVRState);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVRegisterPowerDevice(IMG_UINT32 ui32DeviceIndex,
++ PFN_PRE_POWER pfnPrePower,
++ PFN_POST_POWER pfnPostPower,
++ PFN_PRE_CLOCKSPEED_CHANGE pfnPreClockSpeedChange,
++ PFN_POST_CLOCKSPEED_CHANGE pfnPostClockSpeedChange,
++ IMG_HANDLE hDevCookie,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState,
++ PVRSRV_DEV_POWER_STATE eDefaultPowerState);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVRemovePowerDevice (IMG_UINT32 ui32DeviceIndex);
++
++IMG_IMPORT
++IMG_BOOL PVRSRVIsDevicePowered(IMG_UINT32 ui32DeviceIndex);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVDevicePreClockSpeedChange(IMG_UINT32 ui32DeviceIndex,
++ IMG_BOOL bIdleDevice,
++ IMG_VOID *pvInfo);
++
++IMG_IMPORT
++IMG_VOID PVRSRVDevicePostClockSpeedChange(IMG_UINT32 ui32DeviceIndex,
++ IMG_BOOL bIdleDevice,
++ IMG_VOID *pvInfo);
++
++
++/*
++ * PVRSRVPowerOnSystemWithDevice
++ *
++ * Description: Power on the System if it is off, but instead of powering all
++ * of the devices to their "default" state, only turn on the specified
++ * device index.
++ */
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVPowerOnSystemWithDevice(IMG_UINT32 ui32DeviceIndex,
++ IMG_UINT32 ui32CallerID,
++ IMG_BOOL bRetainMutex);
++
++#if defined (__cplusplus)
++}
++#endif
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/include/queue.h
+@@ -0,0 +1,119 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef QUEUE_H
++#define QUEUE_H
++
++
++#if defined(__cplusplus)
++extern "C" {
++#endif
++
++#define UPDATE_QUEUE_ROFF(psQueue, ui32Size) \
++ psQueue->ui32ReadOffset = (psQueue->ui32ReadOffset + ui32Size) \
++ & (psQueue->ui32QueueSize - 1);
++
++ typedef struct _COMMAND_COMPLETE_DATA_
++ {
++ IMG_BOOL bInUse;
++
++ IMG_UINT32 ui32DstSyncCount;
++ IMG_UINT32 ui32SrcSyncCount;
++ PVRSRV_SYNC_OBJECT *psDstSync;
++ PVRSRV_SYNC_OBJECT *psSrcSync;
++ IMG_UINT32 ui32AllocSize;
++ }COMMAND_COMPLETE_DATA, *PCOMMAND_COMPLETE_DATA;
++
++#if !defined(USE_CODE)
++IMG_VOID QueueDumpDebugInfo(IMG_VOID);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVProcessQueues (IMG_UINT32 ui32CallerID,
++ IMG_BOOL bFlush);
++
++#if defined(__linux__) && defined(__KERNEL__)
++#include <linux/types.h>
++#include <linux/seq_file.h>
++off_t
++QueuePrintQueues (IMG_CHAR * buffer, size_t size, off_t off);
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++void* ProcSeqOff2ElementQueue(struct seq_file * sfile, loff_t off);
++void ProcSeqShowQueue(struct seq_file *sfile,void* el);
++#endif
++
++#endif
++
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateCommandQueueKM(IMG_SIZE_T ui32QueueSize,
++ PVRSRV_QUEUE_INFO **ppsQueueInfo);
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyCommandQueueKM(PVRSRV_QUEUE_INFO *psQueueInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVInsertCommandKM(PVRSRV_QUEUE_INFO *psQueue,
++ PVRSRV_COMMAND **ppsCommand,
++ IMG_UINT32 ui32DevIndex,
++ IMG_UINT16 CommandType,
++ IMG_UINT32 ui32DstSyncCount,
++ PVRSRV_KERNEL_SYNC_INFO *apsDstSync[],
++ IMG_UINT32 ui32SrcSyncCount,
++ PVRSRV_KERNEL_SYNC_INFO *apsSrcSync[],
++ IMG_SIZE_T ui32DataByteSize );
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetQueueSpaceKM(PVRSRV_QUEUE_INFO *psQueue,
++ IMG_SIZE_T ui32ParamSize,
++ IMG_VOID **ppvSpace);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVSubmitCommandKM(PVRSRV_QUEUE_INFO *psQueue,
++ PVRSRV_COMMAND *psCommand);
++
++IMG_IMPORT
++IMG_VOID PVRSRVCommandCompleteKM(IMG_HANDLE hCmdCookie, IMG_BOOL bScheduleMISR);
++
++IMG_VOID PVRSRVCommandCompleteCallbacks(IMG_VOID);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVRegisterCmdProcListKM(IMG_UINT32 ui32DevIndex,
++ PFN_CMD_PROC *ppfnCmdProcList,
++ IMG_UINT32 ui32MaxSyncsPerCmd[][2],
++ IMG_UINT32 ui32CmdCount);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVRemoveCmdProcListKM(IMG_UINT32 ui32DevIndex,
++ IMG_UINT32 ui32CmdCount);
++
++#endif
++
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/include/ra.h
+@@ -0,0 +1,155 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _RA_H_
++#define _RA_H_
++
++#include "img_types.h"
++#include "hash.h"
++#include "osfunc.h"
++
++typedef struct _RA_ARENA_ RA_ARENA;
++typedef struct _BM_MAPPING_ BM_MAPPING;
++
++
++
++#define RA_STATS
++
++
++struct _RA_STATISTICS_
++{
++
++ IMG_SIZE_T uSpanCount;
++
++
++ IMG_SIZE_T uLiveSegmentCount;
++
++
++ IMG_SIZE_T uFreeSegmentCount;
++
++
++ IMG_SIZE_T uTotalResourceCount;
++
++
++ IMG_SIZE_T uFreeResourceCount;
++
++
++ IMG_SIZE_T uCumulativeAllocs;
++
++
++ IMG_SIZE_T uCumulativeFrees;
++
++
++ IMG_SIZE_T uImportCount;
++
++
++ IMG_SIZE_T uExportCount;
++};
++typedef struct _RA_STATISTICS_ RA_STATISTICS;
++
++struct _RA_SEGMENT_DETAILS_
++{
++ IMG_SIZE_T uiSize;
++ IMG_CPU_PHYADDR sCpuPhyAddr;
++ IMG_HANDLE hSegment;
++};
++typedef struct _RA_SEGMENT_DETAILS_ RA_SEGMENT_DETAILS;
++
++RA_ARENA *
++RA_Create (IMG_CHAR *name,
++ IMG_UINTPTR_T base,
++ IMG_SIZE_T uSize,
++ BM_MAPPING *psMapping,
++ IMG_SIZE_T uQuantum,
++ IMG_BOOL (*imp_alloc)(IMG_VOID *_h,
++ IMG_SIZE_T uSize,
++ IMG_SIZE_T *pActualSize,
++ BM_MAPPING **ppsMapping,
++ IMG_UINT32 uFlags,
++ IMG_UINTPTR_T *pBase),
++ IMG_VOID (*imp_free) (IMG_VOID *,
++ IMG_UINTPTR_T,
++ BM_MAPPING *),
++ IMG_VOID (*backingstore_free) (IMG_VOID *,
++ IMG_SIZE_T,
++ IMG_SIZE_T,
++ IMG_HANDLE),
++ IMG_VOID *import_handle);
++
++IMG_VOID
++RA_Delete (RA_ARENA *pArena);
++
++IMG_BOOL
++RA_TestDelete (RA_ARENA *pArena);
++
++IMG_BOOL
++RA_Add (RA_ARENA *pArena, IMG_UINTPTR_T base, IMG_SIZE_T uSize);
++
++IMG_BOOL
++RA_Alloc (RA_ARENA *pArena,
++ IMG_SIZE_T uSize,
++ IMG_SIZE_T *pActualSize,
++ BM_MAPPING **ppsMapping,
++ IMG_UINT32 uFlags,
++ IMG_UINT32 uAlignment,
++ IMG_UINT32 uAlignmentOffset,
++ IMG_UINTPTR_T *pBase);
++
++IMG_VOID
++RA_Free (RA_ARENA *pArena, IMG_UINTPTR_T base, IMG_BOOL bFreeBackingStore);
++
++
++#ifdef RA_STATS
++
++#define CHECK_SPACE(total) \
++{ \
++ if(total<100) \
++ return PVRSRV_ERROR_INVALID_PARAMS; \
++}
++
++#define UPDATE_SPACE(str, count, total) \
++{ \
++ if(count == -1) \
++ return PVRSRV_ERROR_INVALID_PARAMS; \
++ else \
++ { \
++ str += count; \
++ total -= count; \
++ } \
++}
++
++
++IMG_BOOL RA_GetNextLiveSegment(IMG_HANDLE hArena, RA_SEGMENT_DETAILS *psSegDetails);
++
++
++PVRSRV_ERROR RA_GetStats(RA_ARENA *pArena,
++ IMG_CHAR **ppszStr,
++ IMG_UINT32 *pui32StrLen);
++
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/include/resman.h
+@@ -0,0 +1,113 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __RESMAN_H__
++#define __RESMAN_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++enum {
++
++ RESMAN_TYPE_SHARED_PB_DESC = 1,
++ RESMAN_TYPE_SHARED_PB_DESC_CREATE_LOCK,
++ RESMAN_TYPE_HW_RENDER_CONTEXT,
++ RESMAN_TYPE_HW_TRANSFER_CONTEXT,
++ RESMAN_TYPE_HW_2D_CONTEXT,
++ RESMAN_TYPE_TRANSFER_CONTEXT,
++
++
++
++
++
++ RESMAN_TYPE_DISPLAYCLASS_SWAPCHAIN_REF,
++ RESMAN_TYPE_DISPLAYCLASS_DEVICE,
++
++
++ RESMAN_TYPE_BUFFERCLASS_DEVICE,
++
++
++ RESMAN_TYPE_OS_USERMODE_MAPPING,
++
++
++ RESMAN_TYPE_DEVICEMEM_CONTEXT,
++ RESMAN_TYPE_DEVICECLASSMEM_MAPPING,
++ RESMAN_TYPE_DEVICEMEM_MAPPING,
++ RESMAN_TYPE_DEVICEMEM_WRAP,
++ RESMAN_TYPE_DEVICEMEM_ALLOCATION,
++ RESMAN_TYPE_EVENT_OBJECT,
++ RESMAN_TYPE_SHARED_MEM_INFO,
++ RESMAN_TYPE_MODIFY_SYNC_OPS,
++
++
++ RESMAN_TYPE_KERNEL_DEVICEMEM_ALLOCATION
++};
++
++#define RESMAN_CRITERIA_ALL 0x00000000
++#define RESMAN_CRITERIA_RESTYPE 0x00000001
++#define RESMAN_CRITERIA_PVOID_PARAM 0x00000002
++#define RESMAN_CRITERIA_UI32_PARAM 0x00000004
++
++typedef PVRSRV_ERROR (*RESMAN_FREE_FN)(IMG_PVOID pvParam, IMG_UINT32 ui32Param);
++
++typedef struct _RESMAN_ITEM_ *PRESMAN_ITEM;
++typedef struct _RESMAN_CONTEXT_ *PRESMAN_CONTEXT;
++
++PVRSRV_ERROR ResManInit(IMG_VOID);
++IMG_VOID ResManDeInit(IMG_VOID);
++
++PRESMAN_ITEM ResManRegisterRes(PRESMAN_CONTEXT hResManContext,
++ IMG_UINT32 ui32ResType,
++ IMG_PVOID pvParam,
++ IMG_UINT32 ui32Param,
++ RESMAN_FREE_FN pfnFreeResource);
++
++PVRSRV_ERROR ResManFreeResByPtr(PRESMAN_ITEM psResItem);
++
++PVRSRV_ERROR ResManFreeResByCriteria(PRESMAN_CONTEXT hResManContext,
++ IMG_UINT32 ui32SearchCriteria,
++ IMG_UINT32 ui32ResType,
++ IMG_PVOID pvParam,
++ IMG_UINT32 ui32Param);
++
++PVRSRV_ERROR ResManDissociateRes(PRESMAN_ITEM psResItem,
++ PRESMAN_CONTEXT psNewResManContext);
++
++PVRSRV_ERROR ResManFindResourceByPtr(PRESMAN_CONTEXT hResManContext,
++ PRESMAN_ITEM psItem);
++
++PVRSRV_ERROR PVRSRVResManConnect(IMG_HANDLE hPerProc,
++ PRESMAN_CONTEXT *phResManContext);
++IMG_VOID PVRSRVResManDisconnect(PRESMAN_CONTEXT hResManContext,
++ IMG_BOOL bKernelContext);
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/include/services_headers.h
+@@ -0,0 +1,49 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef SERVICES_HEADERS_H
++#define SERVICES_HEADERS_H
++
++#ifdef DEBUG_RELEASE_BUILD
++#pragma optimize( "", off )
++#define DEBUG 1
++#endif
++
++#include "img_defs.h"
++#include "services.h"
++#include "servicesint.h"
++#include "power.h"
++#include "resman.h"
++#include "queue.h"
++#include "srvkm.h"
++#include "kerneldisplay.h"
++#include "syscommon.h"
++#include "pvr_debug.h"
++#include "metrics.h"
++#include "osfunc.h"
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/srvkm/include/srvkm.h
+@@ -0,0 +1,69 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef SRVKM_H
++#define SRVKM_H
++
++
++#if defined(__cplusplus)
++extern "C" {
++#endif
++
++
++ #ifdef PVR_DISABLE_LOGGING
++ #define PVR_LOG(X)
++ #else
++ #define PVR_LOG(X) PVRSRVReleasePrintf X
++ #endif
++
++ IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVReleasePrintf(const IMG_CHAR *pszFormat,
++ ...);
++
++ IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV PVRSRVProcessConnect(IMG_UINT32 ui32PID);
++ IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVProcessDisconnect(IMG_UINT32 ui32PID);
++
++ IMG_VOID IMG_CALLCONV PVRSRVSetDCState(IMG_UINT32 ui32State);
++
++ PVRSRV_ERROR IMG_CALLCONV PVRSRVSaveRestoreLiveSegments(IMG_HANDLE hArena, IMG_PBYTE pbyBuffer, IMG_SIZE_T *puiBufSize, IMG_BOOL bSave);
++
++#if defined (__cplusplus)
++}
++#endif
++
++#define LOOP_UNTIL_TIMEOUT(TIMEOUT) \
++{\
++ IMG_UINT32 uiOffset, uiStart, uiCurrent, uiNotLastLoop; \
++ for(uiOffset = 0, uiStart = OSClockus(), uiCurrent = uiStart + 1, uiNotLastLoop = 1;\
++ ((uiCurrent - uiStart + uiOffset) < TIMEOUT) || uiNotLastLoop--; \
++ uiCurrent = OSClockus(), \
++ uiOffset = uiCurrent < uiStart ? IMG_UINT32_MAX - uiStart : uiOffset, \
++ uiStart = uiCurrent < uiStart ? 0 : uiStart)
++
++#define END_LOOP_UNTIL_TIMEOUT() \
++}
++
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/system/include/syscommon.h
+@@ -0,0 +1,217 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _SYSCOMMON_H
++#define _SYSCOMMON_H
++
++#include "sysconfig.h"
++#include "sysinfo.h"
++#include "servicesint.h"
++#include "queue.h"
++#include "power.h"
++#include "resman.h"
++#include "ra.h"
++#include "device.h"
++#include "buffer_manager.h"
++
++#if defined(NO_HARDWARE) && defined(__linux__) && defined(__KERNEL__)
++#include <asm/io.h>
++#endif
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++typedef struct _SYS_DEVICE_ID_TAG
++{
++ IMG_UINT32 uiID;
++ IMG_BOOL bInUse;
++
++} SYS_DEVICE_ID;
++
++
++#define SYS_MAX_LOCAL_DEVMEM_ARENAS 4
++
++typedef struct _SYS_DATA_TAG_
++{
++ IMG_UINT32 ui32NumDevices;
++ SYS_DEVICE_ID sDeviceID[SYS_DEVICE_COUNT];
++ PVRSRV_DEVICE_NODE *psDeviceNodeList;
++ PVRSRV_POWER_DEV *psPowerDeviceList;
++ PVRSRV_RESOURCE sPowerStateChangeResource;
++ PVRSRV_SYS_POWER_STATE eCurrentPowerState;
++ PVRSRV_SYS_POWER_STATE eFailedPowerState;
++ IMG_UINT32 ui32CurrentOSPowerState;
++ PVRSRV_QUEUE_INFO *psQueueList;
++ PVRSRV_KERNEL_SYNC_INFO *psSharedSyncInfoList;
++ IMG_PVOID pvEnvSpecificData;
++ IMG_PVOID pvSysSpecificData;
++ PVRSRV_RESOURCE sQProcessResource;
++ IMG_VOID *pvSOCRegsBase;
++ IMG_HANDLE hSOCTimerRegisterOSMemHandle;
++ IMG_UINT32 *pvSOCTimerRegisterKM;
++ IMG_VOID *pvSOCClockGateRegsBase;
++ IMG_UINT32 ui32SOCClockGateRegsSize;
++ PFN_CMD_PROC *ppfnCmdProcList[SYS_DEVICE_COUNT];
++
++
++
++ PCOMMAND_COMPLETE_DATA *ppsCmdCompleteData[SYS_DEVICE_COUNT];
++
++
++ IMG_BOOL bReProcessQueues;
++
++ RA_ARENA *apsLocalDevMemArena[SYS_MAX_LOCAL_DEVMEM_ARENAS];
++
++ IMG_CHAR *pszVersionString;
++ PVRSRV_EVENTOBJECT *psGlobalEventObject;
++
++ IMG_BOOL bFlushAll;
++
++} SYS_DATA;
++
++
++
++PVRSRV_ERROR SysInitialise(IMG_VOID);
++PVRSRV_ERROR SysFinalise(IMG_VOID);
++
++PVRSRV_ERROR SysDeinitialise(SYS_DATA *psSysData);
++PVRSRV_ERROR SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_VOID **ppvDeviceMap);
++
++IMG_VOID SysRegisterExternalDevice(PVRSRV_DEVICE_NODE *psDeviceNode);
++IMG_VOID SysRemoveExternalDevice(PVRSRV_DEVICE_NODE *psDeviceNode);
++
++IMG_UINT32 SysGetInterruptSource(SYS_DATA *psSysData,
++ PVRSRV_DEVICE_NODE *psDeviceNode);
++
++IMG_VOID SysClearInterrupts(SYS_DATA* psSysData, IMG_UINT32 ui32ClearBits);
++
++PVRSRV_ERROR SysResetDevice(IMG_UINT32 ui32DeviceIndex);
++
++PVRSRV_ERROR SysSystemPrePowerState(PVRSRV_SYS_POWER_STATE eNewPowerState);
++PVRSRV_ERROR SysSystemPostPowerState(PVRSRV_SYS_POWER_STATE eNewPowerState);
++PVRSRV_ERROR SysDevicePrePowerState(IMG_UINT32 ui32DeviceIndex,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++PVRSRV_ERROR SysDevicePostPowerState(IMG_UINT32 ui32DeviceIndex,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++
++#if defined(SYS_CUSTOM_POWERLOCK_WRAP)
++PVRSRV_ERROR SysPowerLockWrap(SYS_DATA *psSysData);
++IMG_VOID SysPowerLockUnwrap(SYS_DATA *psSysData);
++#endif
++
++PVRSRV_ERROR SysOEMFunction ( IMG_UINT32 ui32ID,
++ IMG_VOID *pvIn,
++ IMG_UINT32 ulInSize,
++ IMG_VOID *pvOut,
++ IMG_UINT32 ulOutSize);
++
++
++IMG_DEV_PHYADDR SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE eDeviceType, IMG_CPU_PHYADDR cpu_paddr);
++IMG_DEV_PHYADDR SysSysPAddrToDevPAddr (PVRSRV_DEVICE_TYPE eDeviceType, IMG_SYS_PHYADDR SysPAddr);
++IMG_SYS_PHYADDR SysDevPAddrToSysPAddr (PVRSRV_DEVICE_TYPE eDeviceType, IMG_DEV_PHYADDR SysPAddr);
++IMG_CPU_PHYADDR SysSysPAddrToCpuPAddr (IMG_SYS_PHYADDR SysPAddr);
++IMG_SYS_PHYADDR SysCpuPAddrToSysPAddr (IMG_CPU_PHYADDR cpu_paddr);
++#if defined(PVR_LMA)
++IMG_BOOL SysVerifyCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE eDeviceType, IMG_CPU_PHYADDR CpuPAddr);
++IMG_BOOL SysVerifySysPAddrToDevPAddr (PVRSRV_DEVICE_TYPE eDeviceType, IMG_SYS_PHYADDR SysPAddr);
++#endif
++
++extern SYS_DATA* gpsSysData;
++
++#if !defined(USE_CODE)
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(SysAcquireData)
++#endif
++static INLINE PVRSRV_ERROR SysAcquireData(SYS_DATA **ppsSysData)
++{
++
++ *ppsSysData = gpsSysData;
++
++
++
++
++
++ if (!gpsSysData)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ return PVRSRV_OK;
++}
++
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(SysInitialiseCommon)
++#endif
++static INLINE PVRSRV_ERROR SysInitialiseCommon(SYS_DATA *psSysData)
++{
++ PVRSRV_ERROR eError;
++
++
++ eError = PVRSRVInit(psSysData);
++
++ return eError;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(SysDeinitialiseCommon)
++#endif
++static INLINE IMG_VOID SysDeinitialiseCommon(SYS_DATA *psSysData)
++{
++
++ PVRSRVDeInit(psSysData);
++
++ OSDestroyResource(&psSysData->sPowerStateChangeResource);
++}
++#endif
++
++
++#if !(defined(NO_HARDWARE) && defined(__linux__) && defined(__KERNEL__))
++#define SysReadHWReg(p, o) OSReadHWReg(p, o)
++#define SysWriteHWReg(p, o, v) OSWriteHWReg(p, o, v)
++#else
++static inline IMG_UINT32 SysReadHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset)
++{
++ return (IMG_UINT32) readl(pvLinRegBaseAddr + ui32Offset);
++}
++
++static inline IMG_VOID SysWriteHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value)
++{
++ writel(ui32Value, pvLinRegBaseAddr + ui32Offset);
++}
++#endif
++
++#if defined(__cplusplus)
++}
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/system/medfield/.gitignore
+@@ -0,0 +1,5 @@
++bin_pc_i686*
++tmp_pc_i686*
++host_pc_i686*
++*.o
++*.o.cmd
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/system/medfield/oemfuncs.h
+@@ -0,0 +1,64 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__OEMFUNCS_H__)
++#define __OEMFUNCS_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#define OEM_EXCHANGE_POWER_STATE (1<<0)
++#define OEM_DEVICE_MEMORY_POWER (1<<1)
++#define OEM_DISPLAY_POWER (1<<2)
++#define OEM_GET_EXT_FUNCS (1<<3)
++
++typedef struct OEM_ACCESS_INFO_TAG
++{
++ IMG_UINT32 ui32Size;
++ IMG_UINT32 ui32FBPhysBaseAddress;
++ IMG_UINT32 ui32FBMemAvailable;
++ IMG_UINT32 ui32SysPhysBaseAddress;
++ IMG_UINT32 ui32SysSize;
++ IMG_UINT32 ui32DevIRQ;
++} OEM_ACCESS_INFO, *POEM_ACCESS_INFO;
++
++
++typedef PVRSRV_ERROR (*PFN_SRV_READREGSTRING)(PPVRSRV_REGISTRY_INFO psRegInfo);
++
++
++typedef struct PVRSRV_DC_OEM_JTABLE_TAG
++{
++ PFN_SRV_READREGSTRING pfnOEMReadRegistryString;
++ PFN_SRV_READREGSTRING pfnOEMWriteRegistryString;
++
++} PVRSRV_DC_OEM_JTABLE;
++#if defined(__cplusplus)
++}
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/system/medfield/ospm_power.c
+@@ -0,0 +1,517 @@
++/**************************************************************************
++ * Copyright (c) 2009, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * Benjamin Defnet <benjamin.r.defnet@intel.com>
++ * Rajesh Poornachandran <rajesh.poornachandran@intel.com>
++ *
++ **************************************************************************/
++
++#include "ospm_power.h"
++#include "psb_drv.h"
++#include "psb_msvdx.h"
++#include "lnc_topaz.h"
++#include "servicesext.h"
++#include "power.h"
++#include "services.h"
++#include "osfunc.h"
++#include <linux/mutex.h>
++
++extern IMG_UINT32 gui32SGXDeviceID;
++extern IMG_UINT32 gui32MRSTDisplayDeviceID;
++extern IMG_UINT32 gui32MRSTMSVDXDeviceID;
++extern IMG_UINT32 gui32MRSTTOPAZDeviceID;
++
++struct drm_device *gpDrmDevice = NULL;
++static struct mutex g_ospm_mutex;
++static bool gbSuspendInProgress = false;
++static bool gbResumeInProgress = false;
++static int g_hw_power_status_mask;
++static atomic_t g_display_access_count;
++static atomic_t g_graphics_access_count;
++static atomic_t g_videoenc_access_count;
++static atomic_t g_videodec_access_count;
++
++/*
++ * ospm_power_init
++ *
++ * Description: Initialize this ospm power management module
++ */
++void ospm_power_init(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = (struct drm_psb_private *)dev->dev_private;
++ struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
++
++ /* JB Hack */
++ gpDrmDevice = dev;
++ return;
++
++ pci_write_config_dword(pci_root, 0xD0, 0xd0047800);
++ pci_read_config_dword(pci_root, 0xD4, &dev_priv->ospm_base);
++ dev_priv->ospm_base &= 0xffff;
++
++ dev_priv->apm_reg = MSG_READ32(PSB_PUNIT_PORT, PSB_APMBA);
++ dev_priv->apm_base = dev_priv->apm_reg & 0xffff;
++
++ gpDrmDevice = dev;
++ mutex_init(&g_ospm_mutex);
++ g_hw_power_status_mask = OSPM_ALL_ISLANDS;
++ atomic_set(&g_display_access_count, 0);
++ atomic_set(&g_graphics_access_count, 0);
++ atomic_set(&g_videoenc_access_count, 0);
++ atomic_set(&g_videodec_access_count, 0);
++
++
++#ifdef OSPM_STAT
++ dev_priv->graphics_state = PSB_PWR_STATE_ON;
++ dev_priv->gfx_last_mode_change = jiffies;
++ dev_priv->gfx_on_time = 0;
++ dev_priv->gfx_off_time = 0;
++#endif
++}
++
++/*
++ * ospm_power_uninit
++ *
++ * Description: Uninitialize this ospm power management module
++ */
++void ospm_power_uninit(void)
++{
++ /* JB Hack */
++ return;
++
++ mutex_destroy(&g_ospm_mutex);
++}
++
++/*
++ * ospm_power_suspend
++ *
++ * Description: OSPM is telling our driver to suspend so save state
++ * and power down all hardware.
++ */
++int ospm_power_suspend(struct pci_dev *pdev, pm_message_t state)
++{
++ struct drm_device *dev = pci_get_drvdata(pdev);
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) gpDrmDevice->dev_private;
++ struct drm_mode_config *mode_config = &dev->mode_config;
++ struct drm_connector *connector = NULL;
++ int ret = 0;
++ bool bDisplayOff = false;
++
++ /* JB Hack */
++ return ret;
++
++ mutex_lock(&g_ospm_mutex);
++
++ if (atomic_read(&g_graphics_access_count) ||
++ atomic_read(&g_videoenc_access_count) ||
++ atomic_read(&g_videodec_access_count) ||
++ atomic_read(&g_display_access_count))
++ ret = -EBUSY;
++ //SGX will be powered off when idle due to D0i3 support. If we don't wait
++ //for D0i3, then we hit cases where user mode driver gets stuck waiting
++ //for command completion when SGX is powered off.
++ else if (ospm_power_is_hw_on(OSPM_GRAPHICS_ISLAND))
++ ret = -EBUSY;
++ else if (psb_check_msvdx_idle(dev))
++ ret = -EBUSY;
++ else if (IS_MRST(dev) && !dev_priv->topaz_disabled && lnc_check_topaz_idle(dev))
++ ret = -EBUSY;
++
++ gbSuspendInProgress = true;
++
++ if (!ret) {
++ PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D3);
++ bDisplayOff = true;
++ } else if (!atomic_read(&g_display_access_count)) {
++ //At least power down the display
++ PVRSRVSetDevicePowerStateKM(gui32MRSTDisplayDeviceID,
++ PVRSRV_DEV_POWER_STATE_OFF,
++ KERNEL_ID,
++ IMG_FALSE);
++ bDisplayOff = true;
++ }
++
++ if (bDisplayOff) {
++ //Set dpms status to off so that an "xset dpms force on" from the
++ //OSPM Framework (or elsewhere) actually executes
++ list_for_each_entry(connector, &mode_config->connector_list, head) {
++ connector->dpms = DRM_MODE_DPMS_OFF;
++ }
++ }
++
++ gbSuspendInProgress = false;
++
++ mutex_unlock(&g_ospm_mutex);
++ return ret;
++}
++
++/*
++ * ospm_power_resume
++ *
++ * Description: OSPM is telling our driver to resume so restore state
++ * and power up necessary hardware.
++ */
++int ospm_power_resume(struct pci_dev *pdev)
++{
++ struct drm_device *dev = pci_get_drvdata(pdev);
++ struct drm_mode_config *mode_config = &dev->mode_config;
++ struct drm_connector *connector = NULL;
++
++ /* JB Hack */
++ return 0;
++
++ mutex_lock(&g_ospm_mutex);
++ gbResumeInProgress = true;
++ PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D0);
++
++ //Set dpms status to on. We should probably only do this for
++ //connectors that were on prior to the suspend, but for Moorestown
++ //we only have one connector so just brute force it.
++ list_for_each_entry(connector, &mode_config->connector_list, head) {
++ connector->dpms = DRM_MODE_DPMS_ON;
++ }
++
++ gbResumeInProgress = false;
++ mutex_unlock(&g_ospm_mutex);
++ return 0;
++}
++
++
++/*
++ * ospm_power_island_down
++ *
++ * Description: Cut power to the specified island(s) (powergating)
++ */
++void ospm_power_island_down(int hw_islands)
++{
++ u32 pwr_cnt = 0;
++ u32 pwr_mask = 0;
++ u32 pwr_sts = 0;
++
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) gpDrmDevice->dev_private;
++
++#if 1 /* MDFLD_JLIU7_DSR */
++ DRM_INFO("%s, hw_islands = 0x%x. \n", __FUNCTION__, hw_islands);
++ if (hw_islands & (OSPM_VIDEO_DEC_ISLAND | OSPM_GRAPHICS_ISLAND)) {
++ dev_priv->dsr_fb_update &= ~hw_islands;
++ }
++#endif /* MDFLD_JLIU7_DSR */
++ /* JB Hack */
++ return;
++
++ g_hw_power_status_mask &= ~hw_islands;
++
++ if (hw_islands & OSPM_GRAPHICS_ISLAND) {
++ pwr_cnt |= PSB_PWRGT_GFX_MASK;
++ pwr_mask |= PSB_PWRGT_GFX_MASK;
++ #ifdef OSPM_STAT
++ if (dev_priv->graphics_state == PSB_PWR_STATE_ON) {
++ dev_priv->gfx_on_time += (jiffies - dev_priv->gfx_last_mode_change) * 1000 / HZ;
++ dev_priv->gfx_last_mode_change = jiffies;
++ dev_priv->graphics_state = PSB_PWR_STATE_OFF;
++ dev_priv->gfx_off_cnt++;
++ }
++ #endif
++ }
++ if (hw_islands & OSPM_VIDEO_ENC_ISLAND) {
++ pwr_cnt |= PSB_PWRGT_VID_ENC_MASK;
++ pwr_mask |= PSB_PWRGT_VID_ENC_MASK;
++ }
++ if (hw_islands & OSPM_VIDEO_DEC_ISLAND) {
++ pwr_cnt |= PSB_PWRGT_VID_DEC_MASK;
++ pwr_mask |= PSB_PWRGT_VID_DEC_MASK;
++ }
++ if (pwr_cnt) {
++ pwr_cnt |= inl(dev_priv->apm_base);
++ outl(pwr_cnt, dev_priv->apm_base);
++ while (true) {
++ pwr_sts = inl(dev_priv->apm_base + PSB_APM_STS);
++ if ((pwr_sts & pwr_mask) == pwr_mask)
++ break;
++ else
++ udelay(10);
++ }
++ }
++
++ if (hw_islands & OSPM_DISPLAY_ISLAND) {
++ pwr_mask = PSB_PWRGT_DISPLAY_MASK;
++ outl(PSB_PWRGT_DISPLAY_MASK, (dev_priv->ospm_base + PSB_PM_SSC));
++ while (true) {
++ pwr_sts = inl(dev_priv->ospm_base + PSB_PM_SSS);
++ if ((pwr_sts & pwr_mask) == pwr_mask)
++ break;
++ else
++ udelay(10);
++ }
++ }
++}
++
++/*
++ * ospm_power_island_up
++ *
++ * Description: Restore power to the specified island(s) (powergating)
++ */
++void ospm_power_island_up(int hw_islands)
++{
++ u32 pwr_cnt = 0;
++ u32 pwr_sts = 0;
++ u32 pwr_mask = 0;
++
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) gpDrmDevice->dev_private;
++
++#if 1 /* MDFLD_JLIU7_DSR */
++ DRM_INFO("%s, hw_islands = 0x%x. \n", __FUNCTION__, hw_islands);
++ if (hw_islands & (OSPM_VIDEO_DEC_ISLAND | OSPM_GRAPHICS_ISLAND)) {
++ dev_priv->dsr_fb_update |= hw_islands;
++ }
++#endif /* MDFLD_JLIU7_DSR */
++
++ /* JB Hack */
++ return;
++
++ if (IS_MRST(gpDrmDevice) &&
++ (hw_islands & (OSPM_GRAPHICS_ISLAND | OSPM_VIDEO_ENC_ISLAND |
++ OSPM_VIDEO_DEC_ISLAND))) {
++ pwr_cnt = inl(dev_priv->apm_base + PSB_APM_CMD);
++ pwr_mask = 0;
++ if (hw_islands & OSPM_GRAPHICS_ISLAND) {
++ pwr_cnt &= ~PSB_PWRGT_GFX_MASK;
++ pwr_mask |= PSB_PWRGT_GFX_MASK;
++ #ifdef OSPM_STAT
++ if (dev_priv->graphics_state == PSB_PWR_STATE_OFF) {
++ dev_priv->gfx_off_time += (jiffies - dev_priv->gfx_last_mode_change) * 1000 / HZ;
++ dev_priv->gfx_last_mode_change = jiffies;
++ dev_priv->graphics_state = PSB_PWR_STATE_ON;
++ dev_priv->gfx_on_cnt++;
++ }
++ #endif
++ }
++ if (hw_islands & OSPM_VIDEO_ENC_ISLAND) {
++ pwr_cnt &= ~PSB_PWRGT_VID_ENC_MASK;
++ pwr_mask |= PSB_PWRGT_VID_ENC_MASK;
++ }
++ if (hw_islands & OSPM_VIDEO_DEC_ISLAND) {
++ pwr_cnt &= ~PSB_PWRGT_VID_DEC_MASK;
++ pwr_mask |= PSB_PWRGT_VID_DEC_MASK;
++ }
++
++ outl(pwr_cnt, dev_priv->apm_base + PSB_APM_CMD);
++ while (true) {
++ pwr_sts = inl(dev_priv->apm_base + PSB_APM_STS);
++ if ((pwr_sts & pwr_mask) == 0)
++ break;
++ else
++ udelay(10);
++ }
++ }
++
++ if (hw_islands & OSPM_DISPLAY_ISLAND) {
++ pwr_cnt = inl(dev_priv->ospm_base + PSB_PM_SSC);
++ pwr_cnt &= ~PSB_PWRGT_DISPLAY_MASK;
++ pwr_mask = PSB_PWRGT_DISPLAY_MASK;
++ outl(pwr_cnt, (dev_priv->ospm_base + PSB_PM_SSC));
++ while (true) {
++ pwr_sts = inl(dev_priv->ospm_base + PSB_PM_SSS);
++ if ((pwr_sts & pwr_mask) == 0)
++ break;
++ else
++ udelay(10);
++ }
++ }
++
++ g_hw_power_status_mask |= hw_islands;
++}
++
++/*
++ * ospm_power_using_hw_begin
++ *
++ * Description: Notify PowerMgmt module that you will be accessing the
++ * specified island's hw so don't power it off. If the island is off,
++ * this function will behave differently depending on the type param.
++ *
++ * OSPM_UHB_FORCE_POWER_ON:
++ * Power on the specified island.
++ * OSPM_UHB_IGNORE_POWER_OFF:
++ * Increment the access counters. The caller is expected to power on
++ * the island if necessary.
++ * OSPM_UHB_ONLY_IF_ON:
++ * Return false and the caller is expected to not access the hw.
++ *
++ * NOTE *** If this is called from and interrupt handler or other atomic
++ * context, then it will return false if we are in the middle of a
++ * power state transition and the caller will be expected to handle that
++ * even if type is OSPM_UHB_FORCE_POWER_ON.
++ */
++bool ospm_power_using_hw_begin(int hw_island, UHBUsage usage)
++{
++ bool ret = false;
++ bool b_island_is_off = false;
++ bool b_atomic = (in_interrupt() || in_atomic());
++ bool b_force_on = (usage == OSPM_UHB_FORCE_POWER_ON);
++ bool b_ignore_off = (usage == OSPM_UHB_IGNORE_POWER_OFF);
++ IMG_UINT32 deviceID = 0;
++
++ /* JB Hack */
++ return true;
++
++ if (!b_atomic)
++ mutex_lock(&g_ospm_mutex);
++ else if ((gbSuspendInProgress || gbResumeInProgress) && b_force_on)
++ goto FailExit;
++
++ b_island_is_off = hw_island & (OSPM_ALL_ISLANDS & ~g_hw_power_status_mask);
++
++ if (b_island_is_off && !b_force_on && !b_ignore_off)
++ goto FailExit;
++
++ if (b_island_is_off && b_force_on) {
++ switch(hw_island)
++ {
++ case OSPM_GRAPHICS_ISLAND:
++ deviceID = gui32SGXDeviceID;
++ break;
++ case OSPM_DISPLAY_ISLAND:
++ deviceID = gui32MRSTDisplayDeviceID;
++ break;
++ case OSPM_VIDEO_DEC_ISLAND:
++ deviceID = gui32MRSTMSVDXDeviceID;
++ break;
++ case OSPM_VIDEO_ENC_ISLAND:
++ deviceID = gui32MRSTTOPAZDeviceID;
++ break;
++ }
++
++ if (PVRSRVPowerOnSystemWithDevice(deviceID, b_atomic ? ISR_ID : KERNEL_ID, IMG_FALSE) != PVRSRV_OK)
++ goto FailExit;
++ }
++
++ switch(hw_island)
++ {
++ case OSPM_GRAPHICS_ISLAND:
++ atomic_inc(&g_graphics_access_count);
++ case OSPM_VIDEO_ENC_ISLAND:
++ atomic_inc(&g_videoenc_access_count);
++ case OSPM_VIDEO_DEC_ISLAND:
++ atomic_inc(&g_videodec_access_count);
++ case OSPM_DISPLAY_ISLAND:
++ atomic_inc(&g_display_access_count);
++ }
++
++ ret = true;
++FailExit:
++
++ if (!b_atomic)
++ mutex_unlock(&g_ospm_mutex);
++
++ return ret;
++}
++
++
++/*
++ * ospm_power_using_hw_end
++ *
++ * Description: Notify PowerMgmt module that you are done accessing the
++ * specified island's hw so feel free to power it off. Note that this
++ * function doesn't actually power off the islands.
++ */
++void ospm_power_using_hw_end(int hw_island)
++{
++ /* JB Hack */
++ return;
++
++ switch(hw_island)
++ {
++ case OSPM_GRAPHICS_ISLAND:
++ atomic_dec(&g_graphics_access_count);
++ case OSPM_VIDEO_ENC_ISLAND:
++ atomic_dec(&g_videoenc_access_count);
++ case OSPM_VIDEO_DEC_ISLAND:
++ atomic_dec(&g_videodec_access_count);
++ case OSPM_DISPLAY_ISLAND:
++ atomic_dec(&g_display_access_count);
++ }
++
++ WARN_ON(atomic_read(&g_graphics_access_count) < 0);
++ WARN_ON(atomic_read(&g_videoenc_access_count) < 0);
++ WARN_ON(atomic_read(&g_videodec_access_count) < 0);
++ WARN_ON(atomic_read(&g_display_access_count) < 0);
++}
++
++/*
++ * ospm_power_is_hw_on
++ *
++ * Description: do an instantaneous check for if the specified islands
++ * are on. Only use this in cases where you know the g_state_change_mutex
++ * is already held such as in irq install/uninstall. Otherwise, use
++ * ospm_power_using_hw_begin().
++ */
++bool ospm_power_is_hw_on(int hw_islands)
++{
++ /* JB Hack */
++ return true;
++ return ((g_hw_power_status_mask & hw_islands) == hw_islands);
++}
++
++void ospm_apm_power_down_msvdx(struct drm_device *dev)
++{
++ /* JB Hack */
++ return;
++
++ mutex_lock(&g_ospm_mutex);
++
++ if (atomic_read(&g_videodec_access_count))
++ goto out;
++ if (psb_check_msvdx_idle(dev))
++ goto out;
++
++ gbSuspendInProgress = true;
++ PVRSRVSetDevicePowerStateKM(gui32MRSTMSVDXDeviceID,
++ PVRSRV_DEV_POWER_STATE_OFF,
++ ISR_ID,
++ IMG_FALSE);
++ gbSuspendInProgress = false;
++out:
++ mutex_unlock(&g_ospm_mutex);
++ return;
++}
++
++void ospm_apm_power_down_topaz(struct drm_device *dev)
++{
++ /* JB Hack */
++ return;
++
++ mutex_lock(&g_ospm_mutex);
++
++ if (atomic_read(&g_videoenc_access_count))
++ goto out;
++ if (lnc_check_topaz_idle(dev))
++ goto out;
++
++ gbSuspendInProgress = true;
++ PVRSRVSetDevicePowerStateKM(gui32MRSTTOPAZDeviceID,
++ PVRSRV_DEV_POWER_STATE_OFF,
++ ISR_ID,
++ IMG_FALSE);
++ gbSuspendInProgress = false;
++out:
++ mutex_unlock(&g_ospm_mutex);
++ return;
++}
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/system/medfield/ospm_power.h
+@@ -0,0 +1,79 @@
++/**************************************************************************
++ * Copyright (c) 2009, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * Benjamin Defnet <benjamin.r.defnet@intel.com>
++ * Rajesh Poornachandran <rajesh.poornachandran@intel.com>
++ *
++ **************************************************************************/
++
++#ifndef _OSPM_POWER_H_
++#define _OSPM_POWER_H_
++
++#include <linux/pci.h>
++#include <drm/drmP.h>
++
++#define OSPM_GRAPHICS_ISLAND 0x1
++#define OSPM_VIDEO_ENC_ISLAND 0x2
++#define OSPM_VIDEO_DEC_ISLAND 0x4
++#define OSPM_DISPLAY_ISLAND 0x8
++#define OSPM_ALL_ISLANDS 0xf
++
++
++typedef enum _UHBUsage
++{
++ OSPM_UHB_ONLY_IF_ON = 0,
++ OSPM_UHB_FORCE_POWER_ON,
++ OSPM_UHB_IGNORE_POWER_OFF,
++} UHBUsage;
++
++
++void ospm_power_init(struct drm_device *dev);
++void ospm_power_uninit(void);
++
++/*
++ * OSPM will call these functions
++ */
++int ospm_power_suspend(struct pci_dev *pdev, pm_message_t state);
++int ospm_power_resume(struct pci_dev *pdev);
++
++/*
++ * These are the functions the driver should use to wrap all hw access
++ * (i.e. register reads and writes)
++ */
++bool ospm_power_using_hw_begin(int hw_island, UHBUsage type);
++void ospm_power_using_hw_end(int hw_island);
++
++/*
++ * Power up/down different hw component rails/islands
++ */
++void ospm_power_island_down(int hw_islands);
++void ospm_power_island_up(int hw_islands);
++
++/*
++ * Use this function to do an instantaneous check for if the hw is on.
++ * Only use this in cases where you know the g_state_change_mutex
++ * is already held such as in irq install/uninstall and you need to
++ * prevent a deadlock situation. Otherwise use ospm_power_using_hw_begin().
++ */
++bool ospm_power_is_hw_on(int hw_islands);
++
++/* Use these functions to power down video HW for D0i3 purpose */
++void ospm_apm_power_down_msvdx(struct drm_device *dev);
++void ospm_apm_power_down_topaz(struct drm_device *dev);
++
++#endif /*_OSPM_POWER_H_*/
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/system/medfield/psb_powermgmt.h
+@@ -0,0 +1,85 @@
++/**************************************************************************
++ * Copyright (c) 2009, Intel Corporation.
++ * All Rights Reserved.
++
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
++ * SOFTWARE.
++ *
++ * Authors:
++ * Benjamin Defnet <benjamin.r.defnet@intel.com>
++ * Rajesh Poornachandran <rajesh.poornachandran@intel.com>
++ *
++ */
++#ifndef _PSB_POWERMGMT_H_
++#define _PSB_POWERMGMT_H_
++
++#include <linux/pci.h>
++#include <drm/drmP.h>
++
++#define OSPM_GRAPHICS_ISLAND 0x1
++#define OSPM_VIDEO_ENC_ISLAND 0x2
++#define OSPM_VIDEO_DEC_ISLAND 0x4
++#define OSPM_DISPLAY_ISLAND 0x8
++#define OSPM_ALL_ISLANDS 0xf
++
++typedef enum _UHBUsage
++{
++ OSPM_UHB_ONLY_IF_ON = 0,
++ OSPM_UHB_FORCE_POWER_ON,
++ OSPM_UHB_IGNORE_POWER_OFF,
++} UHBUsage;
++
++//extern int psb_check_msvdx_idle(struct drm_device *dev);
++//extern int lnc_check_topaz_idle(struct drm_device *dev);
++/* Use these functions to power down video HW for D0i3 purpose */
++void ospm_apm_power_down_msvdx(struct drm_device *dev);
++void ospm_apm_power_down_topaz(struct drm_device *dev);
++
++void ospm_power_init(struct drm_device *dev);
++void ospm_power_uninit(void);
++
++
++/*
++ * OSPM will call these functions
++ */
++int ospm_power_suspend(struct pci_dev *pdev, pm_message_t state);
++int ospm_power_resume(struct pci_dev *pdev);
++
++/*
++ * These are the functions the driver should use to wrap all hw access
++ * (i.e. register reads and writes)
++ */
++bool ospm_power_using_hw_begin(int hw_island, bool force_on);
++void ospm_power_using_hw_end(int hw_island);
++
++/*
++ * Use this function to do an instantaneous check for if the hw is on.
++ * Only use this in cases where you know the g_state_change_mutex
++ * is already held such as in irq install/uninstall and you need to
++ * prevent a deadlock situation. Otherwise use ospm_power_using_hw_begin().
++ */
++bool ospm_power_is_hw_on(int hw_islands);
++
++/*
++ * Power up/down different hw component rails/islands
++ */
++void ospm_power_island_down(int hw_islands);
++void ospm_power_island_up(int hw_islands);
++void ospm_suspend_graphics(void);
++#endif /*_PSB_POWERMGMT_H_*/
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/system/medfield/sys_pvr_drm_export.c
+@@ -0,0 +1,135 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <drm/drmP.h>
++#include <drm/drm.h>
++
++#include "pvr_drm_shared.h"
++
++#include "services_headers.h"
++#include "private_data.h"
++#include "pvr_drm.h"
++
++#include "pvr_bridge.h"
++#include "linkage.h"
++#include "mmap.h"
++
++#if defined(PDUMP)
++#include "client/linuxsrv.h"
++#endif
++
++#include "sys_pvr_drm_import.h"
++
++#include "sys_pvr_drm_export.h"
++
++int
++SYSPVRInit(void)
++{
++ PVRDPFInit();
++
++ return 0;
++}
++
++
++int
++SYSPVRLoad(struct drm_device *dev, unsigned long flags)
++{
++ return PVRSRVDrmLoad(dev, flags);
++}
++
++int
++SYSPVROpen(struct drm_device *dev, struct drm_file *pFile)
++{
++ return PVRSRVDrmOpen(dev, pFile);
++}
++
++int
++SYSPVRUnload(struct drm_device *dev)
++{
++ return PVRSRVDrmUnload(dev);
++}
++
++void
++SYSPVRPostClose(struct drm_device *dev, struct drm_file *file)
++{
++ return PVRSRVDrmPostClose(dev, file);
++}
++
++int
++SYSPVRBridgeDispatch(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile)
++{
++ return PVRSRV_BridgeDispatchKM(dev, arg, pFile);
++}
++
++int
++SYSPVRDCDriverIoctl(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile)
++{
++ return PVRDRM_Dummy_ioctl(dev, arg, pFile);
++
++}
++
++int
++SYSPVRBCDriverIoctl(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile)
++{
++ return PVRDRM_Dummy_ioctl(dev, arg, pFile);
++
++}
++
++int
++SYSPVRIsMaster(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile)
++{
++ return PVRDRMIsMaster(dev, arg, pFile);
++}
++
++int
++SYSPVRUnprivCmd(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile)
++{
++ return PVRDRMUnprivCmd(dev, arg, pFile);
++}
++
++int
++SYSPVRMMap(struct file* pFile, struct vm_area_struct* ps_vma)
++{
++ int ret;
++
++ ret = PVRMMap(pFile, ps_vma);
++ if (ret == -ENOENT)
++ {
++ ret = drm_mmap(pFile, ps_vma);
++ }
++
++ return ret;
++}
++
++int
++SYSPVRDBGDrivIoctl(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile)
++{
++#if defined(PDUMP)
++ return dbgdrv_ioctl(dev, arg, pFile);
++#else
++ return -EINVAL;
++#endif
++}
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/system/medfield/sys_pvr_drm_export.h
+@@ -0,0 +1,87 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__SYS_PVR_DRM_EXPORT_H__)
++#define __SYS_PVR_DRM_EXPORT_H__
++
++#include "pvr_drm_shared.h"
++
++#if defined(__KERNEL__)
++
++#include "services_headers.h"
++#include "private_data.h"
++#include "pvr_drm.h"
++
++#include "pvr_bridge.h"
++
++#if defined(PDUMP)
++#include "client/linuxsrv.h"
++#endif
++
++#define PVR_DRM_SRVKM_IOCTL \
++ DRM_IOW(DRM_COMMAND_BASE + PVR_DRM_SRVKM_CMD, PVRSRV_BRIDGE_PACKAGE)
++
++#define PVR_DRM_DISP_IOCTL \
++ DRM_IO(DRM_COMMAND_BASE + PVR_DRM_DISP_CMD)
++
++#define PVR_DRM_BC_IOCTL \
++ DRM_IO(DRM_COMMAND_BASE + PVR_DRM_BC_CMD)
++
++#define PVR_DRM_IS_MASTER_IOCTL \
++ DRM_IO(DRM_COMMAND_BASE + PVR_DRM_IS_MASTER_CMD)
++
++#define PVR_DRM_UNPRIV_IOCTL \
++ DRM_IOWR(DRM_COMMAND_BASE + PVR_DRM_UNPRIV_CMD, IMG_UINT32)
++
++#if defined(PDUMP)
++#define PVR_DRM_DBGDRV_IOCTL \
++ DRM_IOW(DRM_COMMAND_BASE + PVR_DRM_DBGDRV_CMD, IOCTL_PACKAGE)
++#else
++#define PVR_DRM_DBGDRV_IOCTL \
++ DRM_IO(DRM_COMMAND_BASE + PVR_DRM_DBGDRV_CMD)
++#endif
++
++int SYSPVRInit(void);
++int SYSPVRLoad(struct drm_device *dev, unsigned long flags);
++int SYSPVROpen(struct drm_device *dev, struct drm_file *pFile);
++int SYSPVRUnload(struct drm_device *dev);
++void SYSPVRPostClose(struct drm_device *dev, struct drm_file *file);
++int SYSPVRBridgeDispatch(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile);
++int SYSPVRDCDriverIoctl(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile);
++int SYSPVRBCDriverIoctl(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile);
++int SYSPVRIsMaster(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile);
++int SYSPVRUnprivCmd(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile);
++
++int SYSPVRMMap(struct file* pFile, struct vm_area_struct* ps_vma);
++
++int SYSPVRDBGDrivIoctl(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile);
++
++int SYSPVRServiceSGXInterrupt(struct drm_device *dev);
++
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/system/medfield/sys_pvr_drm_import.h
+@@ -0,0 +1,44 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__SYS_PVR_DRM_IMPORT_H__)
++#define __SYS_PVR_DRM_IMPORT_H__
++
++#if defined(__KERNEL__)
++#include "psb_drm.h"
++#endif
++
++#define DRM_PSB_PLACEMENT_OFFSET 0x13
++#if 0
++#define DRM_PVR_RESERVED1 0x0D
++#define DRM_PVR_RESERVED2 0x0E
++#define DRM_PVR_RESERVED3 0x0F
++#define DRM_PVR_RESERVED4 0x10
++#define DRM_PVR_RESERVED5 0x11
++#define DRM_PVR_RESERVED6 0x12
++#endif
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/system/medfield/sysconfig.c
+@@ -0,0 +1,1274 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if defined(LDM_PCI) || defined(SUPPORT_DRI_DRM)
++#include "linux/pci.h"
++#endif
++
++#include "sgxdefs.h"
++#include "services_headers.h"
++#include "kerneldisplay.h"
++#include "oemfuncs.h"
++#include "sgxinfo.h"
++#include "sgxinfokm.h"
++#include "pdump_km.h"
++#include "syslocal.h"
++#if defined(SUPPORT_DRI_DRM_EXT)
++#include "env_data.h"
++#include "psb_drv.h"
++#include "psb_powermgmt.h"
++#include "sys_pvr_drm_export.h"
++#include "msvdx_power.h"
++#include "topaz_power.h"
++#endif
++
++
++
++/* Graphics MSI address and data region in PCIx */
++#define MRST_PCIx_MSI_ADDR_LOC 0x94
++#define MRST_PCIx_MSI_DATA_LOC 0x98
++
++#define SYS_SGX_CLOCK_SPEED (400000000)
++#define SYS_SGX_HWRECOVERY_TIMEOUT_FREQ (100)
++#define SYS_SGX_PDS_TIMER_FREQ (1000)
++#define SYS_SGX_ACTIVE_POWER_LATENCY_MS (50)
++
++#if defined(SUPPORT_DRI_DRM_EXT)
++#define DRI_DRM_STATIC
++#else
++#define DRI_DRM_STATIC static
++#endif
++SYS_DATA* gpsSysData = (SYS_DATA*)IMG_NULL;
++SYS_DATA gsSysData;
++
++static SYS_SPECIFIC_DATA gsSysSpecificData;
++
++IMG_UINT32 gui32SGXDeviceID;
++extern IMG_UINT32 gui32MRSTDisplayDeviceID;
++IMG_UINT32 gui32MRSTMSVDXDeviceID;
++IMG_UINT32 gui32MRSTTOPAZDeviceID;
++
++static SGX_DEVICE_MAP gsSGXDeviceMap;
++extern struct drm_device *gpDrmDevice;
++
++#if defined(SUPPORT_DRI_DRM_EXT)
++static PVRSRV_DEVICE_NODE *gpsSGXDevNode;
++#endif
++
++#if !defined(NO_HARDWARE)
++IMG_CPU_VIRTADDR gsPoulsboRegsCPUVaddr;
++
++IMG_CPU_VIRTADDR gsPoulsboDisplayRegsCPUVaddr;
++#endif
++
++#if defined(LDM_PCI) || defined(SUPPORT_DRI_DRM)
++extern struct pci_dev *gpsPVRLDMDev;
++#endif
++
++#define POULSBO_ADDR_RANGE_INDEX (MMADR_INDEX - 4)
++#define POULSBO_HP_ADDR_RANGE_INDEX (GMADR_INDEX - 4)
++static PVRSRV_ERROR PCIInitDev(SYS_DATA *psSysData)
++{
++ SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
++
++#ifdef LDM_PCI
++ psSysSpecData->hSGXPCI = OSPCISetDev((IMG_VOID *)psSysSpecData->psPCIDev, HOST_PCI_INIT_FLAG_BUS_MASTER | HOST_PCI_INIT_FLAG_MSI);
++#else
++ psSysSpecData->hSGXPCI = OSPCIAcquireDev(SYS_SGX_DEV_VENDOR_ID, gpDrmDevice->pci_device, HOST_PCI_INIT_FLAG_BUS_MASTER | HOST_PCI_INIT_FLAG_MSI);
++#endif
++ if (!psSysSpecData->hSGXPCI)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PCIInitDev: Failed to acquire PCI device"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ SYS_SPECIFIC_DATA_SET(psSysSpecData, SYS_SPECIFIC_DATA_PCI_ACQUIRE_DEV);
++
++ PVR_TRACE(("PCI memory region: %x to %x", OSPCIAddrRangeStart(psSysSpecData->hSGXPCI, POULSBO_ADDR_RANGE_INDEX), OSPCIAddrRangeEnd(psSysSpecData->hSGXPCI, POULSBO_ADDR_RANGE_INDEX)));
++ PVR_TRACE(("Host Port region: %x to %x", OSPCIAddrRangeStart(psSysSpecData->hSGXPCI, POULSBO_HP_ADDR_RANGE_INDEX), OSPCIAddrRangeEnd(psSysSpecData->hSGXPCI, POULSBO_HP_ADDR_RANGE_INDEX)));
++
++
++ if (OSPCIAddrRangeLen(psSysSpecData->hSGXPCI, POULSBO_ADDR_RANGE_INDEX) < (IS_MDFLD(gpDrmDevice)? POULSBO_MAX_OFFSET:PSB_POULSBO_MAX_OFFSET))
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PCIInitDev: Device memory region isn't big enough"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++
++ if (OSPCIRequestAddrRange(psSysSpecData->hSGXPCI, POULSBO_ADDR_RANGE_INDEX) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PCIInitDev: Device memory region not available"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ SYS_SPECIFIC_DATA_SET(psSysSpecData, SYS_SPECIFIC_DATA_PCI_REQUEST_SGX_ADDR_RANGE);
++
++
++ if (OSPCIRequestAddrRange(psSysSpecData->hSGXPCI, POULSBO_HP_ADDR_RANGE_INDEX) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PCIInitDev: Host Port region not available"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ SYS_SPECIFIC_DATA_SET(psSysSpecData, SYS_SPECIFIC_DATA_PCI_REQUEST_HOST_PORT_RANGE);
++
++ return PVRSRV_OK;
++}
++
++static IMG_VOID PCIDeInitDev(SYS_DATA *psSysData)
++{
++ SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
++
++ if (SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_PCI_REQUEST_SGX_ADDR_RANGE))
++ {
++ OSPCIReleaseAddrRange(psSysSpecData->hSGXPCI, POULSBO_ADDR_RANGE_INDEX);
++ }
++
++ if (SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_PCI_REQUEST_HOST_PORT_RANGE))
++ {
++ OSPCIReleaseAddrRange(psSysSpecData->hSGXPCI, POULSBO_HP_ADDR_RANGE_INDEX);
++ }
++
++ if (SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_PCI_ACQUIRE_DEV))
++ {
++ OSPCIReleaseDev(psSysSpecData->hSGXPCI);
++ }
++}
++static PVRSRV_ERROR SysLocateDevices(SYS_DATA *psSysData)
++{
++ IMG_UINT32 ui32BaseAddr = 0;
++ IMG_UINT32 ui32IRQ = 0;
++ IMG_UINT32 ui32HostPortAddr = 0;
++ SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
++
++ ui32BaseAddr = OSPCIAddrRangeStart(psSysSpecData->hSGXPCI, POULSBO_ADDR_RANGE_INDEX);
++ ui32HostPortAddr = OSPCIAddrRangeStart(psSysSpecData->hSGXPCI, POULSBO_HP_ADDR_RANGE_INDEX);
++ if (OSPCIIRQ(psSysSpecData->hSGXPCI, &ui32IRQ) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysLocateDevices: Couldn't get IRQ"));
++ return PVRSRV_ERROR_INVALID_DEVICE;
++ }
++
++ PVR_TRACE(("ui32BaseAddr: %p", ui32BaseAddr));
++ PVR_TRACE(("ui32HostPortAddr: %p", ui32HostPortAddr));
++ PVR_TRACE(("IRQ: %d", ui32IRQ));
++
++
++ gsSGXDeviceMap.ui32Flags = 0x0;
++ gsSGXDeviceMap.ui32IRQ = ui32IRQ;
++
++ if (IS_MDFLD(gpDrmDevice))
++ gsSGXDeviceMap.sRegsSysPBase.uiAddr = ui32BaseAddr + SGX_REGS_OFFSET;
++ else
++ gsSGXDeviceMap.sRegsSysPBase.uiAddr = ui32BaseAddr + PSB_SGX_REGS_OFFSET;
++
++ gsSGXDeviceMap.sRegsCpuPBase = SysSysPAddrToCpuPAddr(gsSGXDeviceMap.sRegsSysPBase);
++ gsSGXDeviceMap.ui32RegsSize = SGX_REG_SIZE;
++
++#if defined(SGX_FEATURE_HOST_PORT)
++
++ gsSGXDeviceMap.ui32Flags = SGX_HOSTPORT_PRESENT;
++ gsSGXDeviceMap.sHPSysPBase.uiAddr = ui32HostPortAddr;
++ gsSGXDeviceMap.sHPCpuPBase = SysSysPAddrToCpuPAddr(gsSGXDeviceMap.sHPSysPBase);
++ if (IS_MDFLD(gpDrmDevice))
++ gsSGXDeviceMap.ui32HPSize = SYS_SGX_HP_SIZE;
++ else
++ gsSGXDeviceMap.ui32HPSize = PSB_SYS_SGX_HP_SIZE;
++#endif
++
++#if defined(MRST_SLAVEPORT)
++
++ gsSGXDeviceMap.sSPSysPBase.uiAddr = ui32BaseAddr + MRST_SGX_SP_OFFSET;
++ gsSGXDeviceMap.sSPCpuPBase = SysSysPAddrToCpuPAddr(gsSGXDeviceMap.sSPSysPBase);
++ gsSGXDeviceMap.ui32SPSize = SGX_SP_SIZE;
++#endif
++
++
++
++
++ gsSGXDeviceMap.sLocalMemSysPBase.uiAddr = 0;
++ gsSGXDeviceMap.sLocalMemDevPBase.uiAddr = 0;
++ gsSGXDeviceMap.sLocalMemCpuPBase.uiAddr = 0;
++ gsSGXDeviceMap.ui32LocalMemSize = 0;
++
++
++ {
++ IMG_SYS_PHYADDR sPoulsboRegsCpuPBase;
++ sPoulsboRegsCpuPBase.uiAddr = ui32BaseAddr + POULSBO_REGS_OFFSET;
++ gsPoulsboRegsCPUVaddr = OSMapPhysToLin(SysSysPAddrToCpuPAddr(sPoulsboRegsCpuPBase),
++ POULSBO_REG_SIZE,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++
++ sPoulsboRegsCpuPBase.uiAddr = ui32BaseAddr + POULSBO_DISPLAY_REGS_OFFSET;
++ gsPoulsboDisplayRegsCPUVaddr = OSMapPhysToLin(SysSysPAddrToCpuPAddr(sPoulsboRegsCpuPBase),
++ POULSBO_DISPLAY_REG_SIZE,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++ }
++
++ return PVRSRV_OK;
++}
++
++
++#define VERSION_STR_MAX_LEN_TEMPLATE "SGX revision = 000.000.000"
++static PVRSRV_ERROR SysCreateVersionString(SYS_DATA *psSysData)
++{
++ IMG_UINT32 ui32MaxStrLen;
++ PVRSRV_ERROR eError;
++ IMG_INT32 i32Count;
++ IMG_CHAR *pszVersionString;
++ IMG_UINT32 ui32SGXRevision = 0;
++ IMG_VOID *pvSGXRegs;
++
++ pvSGXRegs = OSMapPhysToLin(gsSGXDeviceMap.sRegsCpuPBase,
++ gsSGXDeviceMap.ui32RegsSize,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++
++ if (pvSGXRegs != IMG_NULL)
++ {
++ ui32SGXRevision = OSReadHWReg(pvSGXRegs, EUR_CR_CORE_REVISION);
++ OSUnMapPhysToLin(pvSGXRegs,
++ gsSGXDeviceMap.ui32RegsSize,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysCreateVersionString: Couldn't map SGX registers"));
++ }
++
++ ui32MaxStrLen = OSStringLength(VERSION_STR_MAX_LEN_TEMPLATE);
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32MaxStrLen + 1,
++ (IMG_PVOID *)&pszVersionString,
++ IMG_NULL,
++ "Version String");
++ if(eError != PVRSRV_OK)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ i32Count = OSSNPrintf(pszVersionString, ui32MaxStrLen + 1,
++ "SGX revision = %u.%u.%u",
++ (IMG_UINT)((ui32SGXRevision & EUR_CR_CORE_REVISION_MAJOR_MASK)
++ >> EUR_CR_CORE_REVISION_MAJOR_SHIFT),
++ (IMG_UINT)((ui32SGXRevision & EUR_CR_CORE_REVISION_MINOR_MASK)
++ >> EUR_CR_CORE_REVISION_MINOR_SHIFT),
++ (IMG_UINT)((ui32SGXRevision & EUR_CR_CORE_REVISION_MAINTENANCE_MASK)
++ >> EUR_CR_CORE_REVISION_MAINTENANCE_SHIFT)
++ );
++ if(i32Count == -1)
++ {
++ ui32MaxStrLen = OSStringLength(VERSION_STR_MAX_LEN_TEMPLATE);
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32MaxStrLen + 1,
++ pszVersionString,
++ IMG_NULL);
++
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ psSysData->pszVersionString = pszVersionString;
++
++ return PVRSRV_OK;
++}
++
++static IMG_VOID SysFreeVersionString(SYS_DATA *psSysData)
++{
++ if(psSysData->pszVersionString)
++ {
++ IMG_UINT32 ui32MaxStrLen;
++ ui32MaxStrLen = OSStringLength(VERSION_STR_MAX_LEN_TEMPLATE);
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32MaxStrLen+1,
++ psSysData->pszVersionString,
++ IMG_NULL);
++ psSysData->pszVersionString = IMG_NULL;
++ }
++}
++
++PVRSRV_ERROR SysInitialise(IMG_VOID)
++{
++ IMG_UINT32 i = 0;
++ PVRSRV_ERROR eError;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ SGX_TIMING_INFORMATION* psTimingInfo;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) gpDrmDevice->dev_private;
++
++ gpsSysData = &gsSysData;
++ OSMemSet(gpsSysData, 0, sizeof(SYS_DATA));
++
++ gpsSysData->pvSysSpecificData = &gsSysSpecificData;
++ gsSysSpecificData.ui32SysSpecificData = 0;
++#if defined(LDM_PCI) || defined(SUPPORT_DRI_DRM)
++
++ PVR_ASSERT(gpsPVRLDMDev != IMG_NULL);
++ gsSysSpecificData.psPCIDev = gpsPVRLDMDev;
++#endif
++
++ eError = OSInitEnvData(&gpsSysData->pvEnvSpecificData);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to setup env structure"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++
++
++ psTimingInfo = &gsSGXDeviceMap.sTimingInfo;
++ psTimingInfo->ui32CoreClockSpeed = SYS_SGX_CLOCK_SPEED;
++ psTimingInfo->ui32HWRecoveryFreq = SYS_SGX_HWRECOVERY_TIMEOUT_FREQ;
++#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
++ psTimingInfo->bEnableActivePM = (drm_psb_ospm != 0);
++ /*printk(KERN_ERR "SGX APM is %s\n", (drm_psb_ospm != 0)? "enabled":"disabled"); */
++#else
++ psTimingInfo->bEnableActivePM = IMG_FALSE;
++#endif
++ psTimingInfo->ui32ActivePowManLatencyms = SYS_SGX_ACTIVE_POWER_LATENCY_MS;
++ psTimingInfo->ui32uKernelFreq = SYS_SGX_PDS_TIMER_FREQ;
++
++ eError = PCIInitDev(gpsSysData);
++ if (eError != PVRSRV_OK)
++ {
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++
++ gpsSysData->ui32NumDevices = SYS_DEVICE_COUNT;
++
++
++ for(i=0; i<SYS_DEVICE_COUNT; i++)
++ {
++ gpsSysData->sDeviceID[i].uiID = i;
++ gpsSysData->sDeviceID[i].bInUse = IMG_FALSE;
++ }
++
++ gpsSysData->psDeviceNodeList = IMG_NULL;
++ gpsSysData->psQueueList = IMG_NULL;
++
++ eError = SysInitialiseCommon(gpsSysData);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed in SysInitialiseCommon"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++
++
++
++
++
++ eError = SysLocateDevices(gpsSysData);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to locate devices"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++
++
++
++
++ eError = PVRSRVRegisterDevice(gpsSysData, SGXRegisterDevice,
++ DEVICE_SGX_INTERRUPT, &gui32SGXDeviceID);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to register device!"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++
++
++ /* register MSVDX, with 0 interrupt bit, no interrupt will be served */
++ eError = PVRSRVRegisterDevice(gpsSysData, MSVDXRegisterDevice,
++ DEVICE_MSVDX_INTERRUPT, &gui32MRSTMSVDXDeviceID);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to register MSVDXdevice!"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++
++ if (IS_MDFLD(gpDrmDevice) && !dev_priv->topaz_disabled)
++ {
++ /* register TOPAZ, with 0 interrupt bit, no interrupt will be served */
++ eError = PVRSRVRegisterDevice(gpsSysData, TOPAZRegisterDevice,
++ DEVICE_TOPAZ_INTERRUPT, &gui32MRSTTOPAZDeviceID);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to register TOPAZdevice!"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++ }
++
++ psDeviceNode = gpsSysData->psDeviceNodeList;
++
++ while(psDeviceNode)
++ {
++
++ switch(psDeviceNode->sDevId.eDeviceType)
++ {
++ case PVRSRV_DEVICE_TYPE_SGX:
++ {
++ DEVICE_MEMORY_INFO *psDevMemoryInfo;
++ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++
++
++ psDeviceNode->psLocalDevMemArena = IMG_NULL;
++
++
++ psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
++ psDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap;
++
++
++ for(i=0; i<psDevMemoryInfo->ui32HeapCount; i++)
++ {
++ psDeviceMemoryHeap[i].ui32Attribs |= PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG;
++#ifdef OEM_CUSTOMISE
++
++#endif
++ }
++
++#if defined(SUPPORT_DRI_DRM_EXT)
++ gpsSGXDevNode = psDeviceNode;
++#endif
++ break;
++ }
++ case PVRSRV_DEVICE_TYPE_MSVDX:
++ /* nothing need to do here */
++ break;
++ case PVRSRV_DEVICE_TYPE_TOPAZ:
++ break;
++ default:
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to find SGX device node!"));
++ return PVRSRV_ERROR_INIT_FAILURE;
++ }
++ }
++
++
++ psDeviceNode = psDeviceNode->psNext;
++ }
++
++ PDUMPINIT();
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_PDUMP_INIT);
++
++
++ eError = PVRSRVInitialiseDevice (gui32SGXDeviceID);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to initialise device!"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_SGX_INITIALISED);
++
++ eError = PVRSRVInitialiseDevice (gui32MRSTMSVDXDeviceID);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to initialise device!"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++ if (IS_MDFLD(gpDrmDevice) && !dev_priv->topaz_disabled)
++ {
++ eError = PVRSRVInitialiseDevice (gui32MRSTTOPAZDeviceID);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to initialise device!"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
++#if !defined(SUPPORT_DRI_DRM_EXT)
++static IMG_VOID SysEnableInterrupts(SYS_DATA *psSysData)
++{
++#if !defined(NO_HARDWARE)
++ IMG_UINT32 ui32RegData;
++ IMG_UINT32 ui32Mask;
++
++ ui32Mask = POULSBO_THALIA_MASK;
++
++
++ ui32RegData = OSReadHWReg(gsPoulsboRegsCPUVaddr, POULSBO_INTERRUPT_IDENTITY_REG);
++ OSWriteHWReg(gsPoulsboRegsCPUVaddr, POULSBO_INTERRUPT_IDENTITY_REG, ui32RegData | ui32Mask);
++
++
++ ui32RegData = OSReadHWReg(gsPoulsboRegsCPUVaddr, POULSBO_INTERRUPT_MASK_REG);
++ OSWriteHWReg(gsPoulsboRegsCPUVaddr, POULSBO_INTERRUPT_MASK_REG, ui32RegData & (~ui32Mask));
++
++
++ ui32RegData = OSReadHWReg(gsPoulsboRegsCPUVaddr, POULSBO_INTERRUPT_ENABLE_REG);
++ OSWriteHWReg(gsPoulsboRegsCPUVaddr, POULSBO_INTERRUPT_ENABLE_REG, ui32RegData | ui32Mask);
++
++ PVR_DPF((PVR_DBG_MESSAGE, "SysEnableInterrupts: Interrupts enabled"));
++#endif
++ PVR_UNREFERENCED_PARAMETER(psSysData);
++}
++#endif
++
++#if !defined(SUPPORT_DRI_DRM_EXT)
++static IMG_VOID SysDisableInterrupts(SYS_DATA *psSysData)
++{
++#if !defined(NO_HARDWARE)
++ IMG_UINT32 ui32RegData;
++ IMG_UINT32 ui32Mask;
++ ui32Mask = POULSBO_THALIA_MASK;
++
++
++ ui32RegData = OSReadHWReg(gsPoulsboRegsCPUVaddr, POULSBO_INTERRUPT_ENABLE_REG);
++ OSWriteHWReg(gsPoulsboRegsCPUVaddr, POULSBO_INTERRUPT_ENABLE_REG, ui32RegData & (~ui32Mask));
++
++
++ ui32RegData = OSReadHWReg(gsPoulsboRegsCPUVaddr, POULSBO_INTERRUPT_MASK_REG);
++ OSWriteHWReg(gsPoulsboRegsCPUVaddr, POULSBO_INTERRUPT_MASK_REG, ui32RegData | ui32Mask);
++
++ PVR_TRACE(("SysDisableInterrupts: Interrupts disabled"));
++#endif
++ PVR_UNREFERENCED_PARAMETER(psSysData);
++}
++#endif
++
++PVRSRV_ERROR SysFinalise(IMG_VOID)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++#if defined(SYS_USING_INTERRUPTS)
++ eError = OSInstallMISR(gpsSysData);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysFinalise: OSInstallMISR failed"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_MISR_INSTALLED);
++
++#if !defined(SUPPORT_DRI_DRM_EXT)
++ eError = OSInstallSystemLISR(gpsSysData, gsSGXDeviceMap.ui32IRQ);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysFinalise: OSInstallSystemLISR failed"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_LISR_INSTALLED);
++#endif
++#endif
++
++#if !defined(SUPPORT_DRI_DRM_EXT)
++ SysEnableInterrupts(gpsSysData);
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_IRQ_ENABLED);
++#endif
++ eError = SysCreateVersionString(gpsSysData);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to create a system version string"));
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_WARNING, "SysFinalise: Version string: %s", gpsSysData->pszVersionString));
++ }
++
++ return eError;
++}
++
++PVRSRV_ERROR SysDeinitialise (SYS_DATA *psSysData)
++{
++ PVRSRV_ERROR eError;
++
++ SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
++
++#if !defined(SUPPORT_DRI_DRM_EXT)
++ if (SYS_SPECIFIC_DATA_TEST(&gsSysSpecificData, SYS_SPECIFIC_DATA_IRQ_ENABLED))
++ {
++ SysDisableInterrupts(psSysData);
++ }
++#endif
++
++#if defined(SYS_USING_INTERRUPTS)
++#if !defined(SUPPORT_DRI_DRM_EXT)
++ if (SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_LISR_INSTALLED))
++ {
++ eError = OSUninstallSystemLISR(psSysData);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: OSUninstallSystemLISR failed"));
++ return eError;
++ }
++ }
++#endif
++ if (SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_MISR_INSTALLED))
++ {
++ eError = OSUninstallMISR(psSysData);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: OSUninstallMISR failed"));
++ return eError;
++ }
++ }
++#endif
++
++ if (SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_SGX_INITIALISED))
++ {
++
++ eError = PVRSRVDeinitialiseDevice(gui32SGXDeviceID);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: failed to de-init the device"));
++ return eError;
++ }
++ }
++
++ SysFreeVersionString(psSysData);
++
++ PCIDeInitDev(psSysData);
++
++ eError = OSDeInitEnvData(psSysData->pvEnvSpecificData);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: failed to de-init env structure"));
++ return eError;
++ }
++
++ SysDeinitialiseCommon(gpsSysData);
++
++
++#if !defined(NO_HARDWARE)
++
++ OSUnMapPhysToLin(gsPoulsboRegsCPUVaddr,
++ POULSBO_REG_SIZE,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++
++ OSUnMapPhysToLin(gsPoulsboDisplayRegsCPUVaddr,
++ POULSBO_DISPLAY_REG_SIZE,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++#endif
++ if (SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_PDUMP_INIT))
++ {
++ PDUMPDEINIT();
++ }
++
++ gpsSysData = IMG_NULL;
++
++ return PVRSRV_OK;
++}
++
++
++IMG_UINT32 SysGetInterruptSource(SYS_DATA* psSysData,
++ PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++#if !defined(SUPPORT_DRI_DRM_EXT)
++ IMG_UINT32 ui32Devices = 0;
++ IMG_UINT32 ui32Data, ui32DIMMask;
++
++ PVR_UNREFERENCED_PARAMETER(psSysData);
++ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
++
++
++ ui32Data = OSReadHWReg(gsPoulsboRegsCPUVaddr, POULSBO_INTERRUPT_IDENTITY_REG);
++
++ if (ui32Data & POULSBO_THALIA_MASK)
++ {
++ ui32Devices |= DEVICE_SGX_INTERRUPT;
++ }
++
++ if (ui32Data & POULSBO_MSVDX_MASK)
++ {
++ ui32Devices |= DEVICE_MSVDX_INTERRUPT;
++ }
++
++
++ ui32DIMMask = OSReadHWReg(gsPoulsboRegsCPUVaddr, POULSBO_INTERRUPT_ENABLE_REG);
++ ui32DIMMask &= ~(POULSBO_THALIA_MASK | POULSBO_MSVDX_MASK);
++
++
++ if (ui32Data & ui32DIMMask)
++ {
++ ui32Devices |= DEVICE_DISP_INTERRUPT;
++ }
++
++ return (ui32Devices);
++#else
++ PVR_UNREFERENCED_PARAMETER(psSysData);
++ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
++
++ return 0;
++#endif
++}
++
++IMG_VOID SysClearInterrupts(SYS_DATA* psSysData, IMG_UINT32 ui32ClearBits)
++{
++#if !defined(SUPPORT_DRI_DRM_EXT)
++ IMG_UINT32 ui32Data;
++ IMG_UINT32 ui32Mask = 0;
++
++ PVR_UNREFERENCED_PARAMETER(psSysData);
++
++ ui32Data = OSReadHWReg(gsPoulsboRegsCPUVaddr, POULSBO_INTERRUPT_IDENTITY_REG);
++
++ if ((ui32ClearBits & DEVICE_SGX_INTERRUPT) &&
++ (ui32Data & POULSBO_THALIA_MASK))
++ {
++ ui32Mask |= POULSBO_THALIA_MASK;
++ }
++
++ if ((ui32ClearBits & DEVICE_MSVDX_INTERRUPT) &&
++ (ui32Data & POULSBO_MSVDX_MASK))
++ {
++ ui32Mask |= POULSBO_MSVDX_MASK;
++ }
++
++ if ((ui32ClearBits & DEVICE_DISP_INTERRUPT) &&
++ (ui32Data & POULSBO_VSYNC_PIPEA_VBLANK_MASK))
++ {
++ ui32Mask |= POULSBO_VSYNC_PIPEA_VBLANK_MASK;
++ }
++
++ if (ui32Mask)
++ {
++ OSWriteHWReg(gsPoulsboRegsCPUVaddr, POULSBO_INTERRUPT_IDENTITY_REG, ui32Mask);
++ }
++#else
++ PVR_UNREFERENCED_PARAMETER(psSysData);
++ PVR_UNREFERENCED_PARAMETER(ui32ClearBits);
++#endif
++}
++
++
++PVRSRV_ERROR SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_VOID **ppvDeviceMap)
++{
++ switch(eDeviceType)
++ {
++ case PVRSRV_DEVICE_TYPE_SGX:
++ {
++
++ *ppvDeviceMap = (IMG_VOID*)&gsSGXDeviceMap;
++ break;
++ }
++ default:
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysGetDeviceMemoryMap: unsupported device type"));
++ }
++ }
++ return PVRSRV_OK;
++}
++
++
++IMG_DEV_PHYADDR SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_CPU_PHYADDR CpuPAddr)
++{
++ IMG_DEV_PHYADDR DevPAddr;
++
++ PVR_UNREFERENCED_PARAMETER(eDeviceType);
++
++
++ DevPAddr.uiAddr = CpuPAddr.uiAddr;
++
++ return DevPAddr;
++}
++
++
++IMG_CPU_PHYADDR SysSysPAddrToCpuPAddr (IMG_SYS_PHYADDR sys_paddr)
++{
++ IMG_CPU_PHYADDR cpu_paddr;
++
++
++ cpu_paddr.uiAddr = sys_paddr.uiAddr;
++ return cpu_paddr;
++}
++
++IMG_SYS_PHYADDR SysCpuPAddrToSysPAddr (IMG_CPU_PHYADDR cpu_paddr)
++{
++ IMG_SYS_PHYADDR sys_paddr;
++
++
++ sys_paddr.uiAddr = cpu_paddr.uiAddr;
++ return sys_paddr;
++}
++
++
++IMG_DEV_PHYADDR SysSysPAddrToDevPAddr (PVRSRV_DEVICE_TYPE eDeviceType, IMG_SYS_PHYADDR SysPAddr)
++{
++ IMG_DEV_PHYADDR DevPAddr;
++
++ PVR_UNREFERENCED_PARAMETER(eDeviceType);
++
++
++ DevPAddr.uiAddr = SysPAddr.uiAddr;
++
++ return DevPAddr;
++}
++
++
++IMG_SYS_PHYADDR SysDevPAddrToSysPAddr (PVRSRV_DEVICE_TYPE eDeviceType, IMG_DEV_PHYADDR DevPAddr)
++{
++ IMG_SYS_PHYADDR SysPAddr;
++
++ PVR_UNREFERENCED_PARAMETER(eDeviceType);
++
++
++ SysPAddr.uiAddr = DevPAddr.uiAddr;
++
++ return SysPAddr;
++}
++
++
++IMG_VOID SysRegisterExternalDevice(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++
++ psDeviceNode->ui32SOCInterruptBit = DEVICE_DISP_INTERRUPT;
++}
++
++
++IMG_VOID SysRemoveExternalDevice(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
++}
++
++PVRSRV_ERROR SysOEMFunction ( IMG_UINT32 ui32ID,
++ IMG_VOID *pvIn,
++ IMG_UINT32 ulInSize,
++ IMG_VOID *pvOut,
++ IMG_UINT32 ulOutSize)
++{
++ if (ulInSize || pvIn);
++
++ if ((ui32ID == OEM_GET_EXT_FUNCS) &&
++ (ulOutSize == sizeof(PVRSRV_DC_OEM_JTABLE)))
++ {
++ PVRSRV_DC_OEM_JTABLE *psOEMJTable = (PVRSRV_DC_OEM_JTABLE*)pvOut;
++
++ psOEMJTable->pfnOEMReadRegistryString = IMG_NULL;
++ psOEMJTable->pfnOEMWriteRegistryString = IMG_NULL;
++
++ return PVRSRV_OK;
++ }
++
++ return PVRSRV_ERROR_INVALID_PARAMS;
++}
++
++
++static PVRSRV_ERROR SysMapInRegisters(IMG_VOID)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNodeList;
++
++ psDeviceNodeList = gpsSysData->psDeviceNodeList;
++
++ while (psDeviceNodeList)
++ {
++ switch(psDeviceNodeList->sDevId.eDeviceType)
++ {
++ case PVRSRV_DEVICE_TYPE_SGX:
++ {
++ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNodeList->pvDevice;
++
++ if (SYS_SPECIFIC_DATA_TEST(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_UNMAP_SGX_REGS))
++ {
++ psDevInfo->pvRegsBaseKM = OSMapPhysToLin(gsSGXDeviceMap.sRegsCpuPBase,
++ gsSGXDeviceMap.ui32RegsSize,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++
++ if (!psDevInfo->pvRegsBaseKM)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysMapInRegisters : Failed to map in SGX registers\n"));
++ return PVRSRV_ERROR_BAD_MAPPING;
++ }
++ SYS_SPECIFIC_DATA_CLEAR(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_UNMAP_SGX_REGS);
++ }
++ psDevInfo->ui32RegSize = gsSGXDeviceMap.ui32RegsSize;
++ psDevInfo->sRegsPhysBase = gsSGXDeviceMap.sRegsSysPBase;
++
++#if defined(SGX_FEATURE_HOST_PORT)
++ if (gsSGXDeviceMap.ui32Flags & SGX_HOSTPORT_PRESENT)
++ {
++ if (SYS_SPECIFIC_DATA_TEST(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_UNMAP_SGX_HP))
++ {
++
++ psDevInfo->pvHostPortBaseKM = OSMapPhysToLin(gsSGXDeviceMap.sHPCpuPBase,
++ gsSGXDeviceMap.ui32HPSize,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++ if (!psDevInfo->pvHostPortBaseKM)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysMapInRegisters : Failed to map in host port\n"));
++ return PVRSRV_ERROR_BAD_MAPPING;
++ }
++ SYS_SPECIFIC_DATA_CLEAR(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_UNMAP_SGX_HP);
++ }
++ psDevInfo->ui32HPSize = gsSGXDeviceMap.ui32HPSize;
++ psDevInfo->sHPSysPAddr = gsSGXDeviceMap.sHPSysPBase;
++ }
++#endif
++ break;
++ }
++ default:
++ break;
++ }
++ psDeviceNodeList = psDeviceNodeList->psNext;
++ }
++
++ return PVRSRV_OK;
++}
++
++
++static PVRSRV_ERROR SysUnmapRegisters(IMG_VOID)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNodeList;
++
++ psDeviceNodeList = gpsSysData->psDeviceNodeList;
++
++ while (psDeviceNodeList)
++ {
++ switch (psDeviceNodeList->sDevId.eDeviceType)
++ {
++ case PVRSRV_DEVICE_TYPE_SGX:
++ {
++ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNodeList->pvDevice;
++#if !(defined(NO_HARDWARE) && defined(__linux__))
++
++ if (psDevInfo->pvRegsBaseKM)
++ {
++ OSUnMapPhysToLin(psDevInfo->pvRegsBaseKM,
++ gsSGXDeviceMap.ui32RegsSize,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_UNMAP_SGX_REGS);
++ }
++#endif
++
++ psDevInfo->pvRegsBaseKM = IMG_NULL;
++ psDevInfo->ui32RegSize = 0;
++ psDevInfo->sRegsPhysBase.uiAddr = 0;
++
++#if defined(SGX_FEATURE_HOST_PORT)
++ if (gsSGXDeviceMap.ui32Flags & SGX_HOSTPORT_PRESENT)
++ {
++
++ if (psDevInfo->pvHostPortBaseKM)
++ {
++ OSUnMapPhysToLin(psDevInfo->pvHostPortBaseKM,
++ gsSGXDeviceMap.ui32HPSize,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_UNMAP_SGX_HP);
++
++ psDevInfo->pvHostPortBaseKM = IMG_NULL;
++ }
++
++ psDevInfo->ui32HPSize = 0;
++ psDevInfo->sHPSysPAddr.uiAddr = 0;
++ }
++#endif
++ break;
++ }
++ default:
++ break;
++ }
++ psDeviceNodeList = psDeviceNodeList->psNext;
++ }
++
++#if !(defined(NO_HARDWARE) && defined(__linux__))
++
++ OSUnMapPhysToLin(gsPoulsboRegsCPUVaddr,
++ POULSBO_REG_SIZE,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++
++
++ OSUnMapPhysToLin(gsPoulsboDisplayRegsCPUVaddr,
++ POULSBO_DISPLAY_REG_SIZE,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++
++#endif
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR SysSystemPrePowerState(PVRSRV_SYS_POWER_STATE eNewPowerState)
++{
++ PVRSRV_ERROR eError= PVRSRV_OK;
++ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)(gsSysSpecificData.hSGXPCI);
++
++ if (eNewPowerState != gpsSysData->eCurrentPowerState)
++ {
++ if ((eNewPowerState == PVRSRV_SYS_POWER_STATE_D3) &&
++ (gpsSysData->eCurrentPowerState < PVRSRV_SYS_POWER_STATE_D3))
++ {
++ drm_irq_uninstall(gpDrmDevice);
++
++ SysUnmapRegisters();
++
++ //Save some pci state that won't get saved properly by pci_save_state()
++ pci_read_config_dword(psPVRPCI->psPCIDev, 0x5C, &gsSysSpecificData.saveBSM);
++ pci_read_config_dword(psPVRPCI->psPCIDev, 0xFC, &gsSysSpecificData.saveVBT);
++ pci_read_config_dword(psPVRPCI->psPCIDev, MRST_PCIx_MSI_ADDR_LOC, &gsSysSpecificData.msi_addr);
++ pci_read_config_dword(psPVRPCI->psPCIDev, MRST_PCIx_MSI_DATA_LOC, &gsSysSpecificData.msi_data);
++
++ eError = OSPCISuspendDev(gsSysSpecificData.hSGXPCI);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysSystemPrePowerState: OSPCISuspendDev failed (%d)", eError));
++ }
++ }
++ }
++
++ return eError;
++}
++
++PVRSRV_ERROR SysSystemPostPowerState(PVRSRV_SYS_POWER_STATE eNewPowerState)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)(gsSysSpecificData.hSGXPCI);
++
++ if (eNewPowerState != gpsSysData->eCurrentPowerState)
++ {
++ if ((gpsSysData->eCurrentPowerState == PVRSRV_SYS_POWER_STATE_D3) &&
++ (eNewPowerState < PVRSRV_SYS_POWER_STATE_D3))
++ {
++ eError = OSPCIResumeDev(gsSysSpecificData.hSGXPCI);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysSystemPostPowerState: OSPCIResumeDev failed (%d)", eError));
++ return eError;
++ }
++
++ //Restore some pci state that will not have gotten restored properly by pci_restore_state()
++ pci_write_config_dword(psPVRPCI->psPCIDev, 0x5c, gsSysSpecificData.saveBSM);
++ pci_write_config_dword(psPVRPCI->psPCIDev, 0xFC, gsSysSpecificData.saveVBT);
++ pci_write_config_dword(psPVRPCI->psPCIDev, MRST_PCIx_MSI_ADDR_LOC, gsSysSpecificData.msi_addr);
++ pci_write_config_dword(psPVRPCI->psPCIDev, MRST_PCIx_MSI_DATA_LOC, gsSysSpecificData.msi_data);
++
++ eError = SysLocateDevices(gpsSysData);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysSystemPostPowerState: Failed to locate devices"));
++ return eError;
++ }
++
++ eError = SysMapInRegisters();
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysSystemPostPowerState: Failed to map in registers"));
++ return eError;
++ }
++
++ drm_irq_install(gpDrmDevice);
++ }
++ }
++ return eError;
++}
++
++
++PVRSRV_ERROR SysDevicePrePowerState(IMG_UINT32 ui32DeviceIndex,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ if ((eNewPowerState != eCurrentPowerState) &&
++ (eNewPowerState == PVRSRV_DEV_POWER_STATE_OFF))
++ {
++ if (ui32DeviceIndex == gui32SGXDeviceID)
++ {
++ PVR_DPF((PVR_DBG_MESSAGE,"SysDevicePrePowerState: Remove SGX power"));
++#if defined(SUPPORT_DRI_DRM_EXT)
++ ospm_power_using_hw_end(OSPM_GRAPHICS_ISLAND);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++#endif
++ }
++ else if (ui32DeviceIndex == gui32MRSTMSVDXDeviceID)
++ {
++ psb_irq_uninstall_islands(gpDrmDevice, OSPM_VIDEO_DEC_ISLAND);
++ if (ospm_power_is_hw_on(OSPM_DISPLAY_ISLAND)) {
++ ospm_power_island_down(OSPM_VIDEO_DEC_ISLAND);
++ } else {
++ ospm_power_island_up(OSPM_DISPLAY_ISLAND);
++ ospm_power_island_down(OSPM_VIDEO_DEC_ISLAND);
++ ospm_power_island_down(OSPM_DISPLAY_ISLAND);
++ }
++#if 0
++#if defined(SUPPORT_DRI_DRM_EXT)
++ ospm_power_using_hw_end(OSPM_VIDEO_DEC_ISLAND);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++#endif
++#endif
++ }
++ else if (ui32DeviceIndex == gui32MRSTTOPAZDeviceID)
++ {
++ if (ospm_power_is_hw_on(OSPM_DISPLAY_ISLAND)) {
++ ospm_power_island_down(OSPM_VIDEO_ENC_ISLAND);
++ } else {
++ ospm_power_island_up(OSPM_DISPLAY_ISLAND);
++ ospm_power_island_down(OSPM_VIDEO_ENC_ISLAND);
++ ospm_power_island_down(OSPM_DISPLAY_ISLAND);
++ }
++ }
++#if 0
++#if defined(SUPPORT_DRI_DRM_EXT)
++ ospm_power_using_hw_end(OSPM_VIDEO_ENC_ISLAND);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++#endif
++#endif
++ }
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR SysDevicePostPowerState(IMG_UINT32 ui32DeviceIndex,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ if ((eNewPowerState != eCurrentPowerState) &&
++ (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_OFF))
++ {
++ if (ui32DeviceIndex == gui32SGXDeviceID)
++ {
++ PVR_DPF((PVR_DBG_MESSAGE,"SysDevicePrePowerState: Restore SGX power"));
++#if defined(SUPPORT_DRI_DRM_EXT)
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, true))
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if (!ospm_power_using_hw_begin(OSPM_GRAPHICS_ISLAND, true))
++ {
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++
++ return PVRSRV_ERROR_GENERIC;
++ }
++#endif
++
++ }
++
++ else if (ui32DeviceIndex == gui32MRSTMSVDXDeviceID)
++ {
++/*hshamilt : I'm not sure how this is supposed to be handled. Need an OSPM engineer to verify*/
++ PVR_DPF((PVR_DBG_MESSAGE,"SysDevicePrePowerState: Restore MSVDX power"));
++ if (ospm_power_is_hw_on(OSPM_DISPLAY_ISLAND)) {
++ ospm_power_island_up(OSPM_VIDEO_DEC_ISLAND);
++ } else {
++ ospm_power_island_up(OSPM_DISPLAY_ISLAND);
++ ospm_power_island_up(OSPM_VIDEO_DEC_ISLAND);
++ ospm_power_island_down(OSPM_DISPLAY_ISLAND);
++ }
++// psb_irq_preinstall_islands(gpDrmDevice, OSPM_VIDEO_DEC_ISLAND);
++// psb_irq_postinstall_islands(gpDrmDevice, OSPM_VIDEO_DEC_ISLAND);
++
++#if 0
++#if defined(SUPPORT_DRI_DRM_EXT)
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, true))
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if (!ospm_power_using_hw_begin(OSPM_VIDEO_DEC_ISLAND, true))
++ {
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++
++ return PVRSRV_ERROR_GENERIC;
++ }
++#endif
++#endif
++ }
++ else if (ui32DeviceIndex == gui32MRSTTOPAZDeviceID)
++ {
++ PVR_DPF((PVR_DBG_MESSAGE,"SysDevicePrePowerState: Restore TOPAZ power"));
++ if (ospm_power_is_hw_on(OSPM_DISPLAY_ISLAND)) {
++ ospm_power_island_up(OSPM_VIDEO_ENC_ISLAND);
++ } else {
++ ospm_power_island_up(OSPM_DISPLAY_ISLAND);
++ ospm_power_island_up(OSPM_VIDEO_ENC_ISLAND);
++ ospm_power_island_down(OSPM_DISPLAY_ISLAND);
++ }
++/*hshamilt : I'm not sure how this is supposed to be handled. Need an OSPM engineer to verify*/
++
++
++#if 0
++#if defined(SUPPORT_DRI_DRM_EXT)
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, true))
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if (!ospm_power_using_hw_begin(OSPM_VIDEO_ENC_ISLAND, true))
++ {
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++
++ return PVRSRV_ERROR_GENERIC;
++ }
++#endif
++#endif
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
++#if defined(SUPPORT_DRI_DRM_EXT)
++int SYSPVRServiceSGXInterrupt(struct drm_device *dev)
++{
++ IMG_BOOL bStatus = IMG_FALSE;
++
++ PVR_UNREFERENCED_PARAMETER(dev);
++
++ if (gpsSGXDevNode != IMG_NULL)
++ {
++ bStatus = (*gpsSGXDevNode->pfnDeviceISR)(gpsSGXDevNode->pvISRData);
++ if (bStatus)
++ {
++ OSScheduleMISR((IMG_VOID *)gpsSGXDevNode->psSysData);
++ }
++ }
++
++ return bStatus ? 1 : 0;
++}
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/system/medfield/sysconfig.h
+@@ -0,0 +1,147 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__SOCCONFIG_H__)
++#define __SOCCONFIG_H__
++#include "syscommon.h"
++
++#define VS_PRODUCT_NAME "SGX Medfield"
++
++#define SYS_NO_POWER_LOCK_TIMEOUT
++
++#define SGX_FEATURE_HOST_PORT
++
++#define SYS_SGX_USSE_COUNT (2)
++
++#define POULSBO_REGS_OFFSET 0x00000
++#define POULSBO_REG_SIZE 0x2100
++
++#define SGX_REGS_OFFSET 0x80000
++#define PSB_SGX_REGS_OFFSET 0x40000
++#define SGX_REG_SIZE 0x4000
++#define MSVDX_REGS_OFFSET 0x50000
++
++#ifdef SUPPORT_MSVDX
++#define POULSBO_MAX_OFFSET (MSVDX_REGS_OFFSET + MSVDX_REG_SIZE)
++#else
++#define POULSBO_MAX_OFFSET (SGX_REGS_OFFSET + SGX_REG_SIZE)
++#define PSB_POULSBO_MAX_OFFSET (PSB_SGX_REGS_OFFSET + SGX_REG_SIZE)
++#endif
++
++#define SYS_SGX_DEV_VENDOR_ID 0x8086
++#define PSB_SYS_SGX_DEV_DEVICE_ID_1 0x8108
++#define PSB_SYS_SGX_DEV_DEVICE_ID_2 0x8109
++
++#define SYS_SGX_DEVICE_IDS \
++ {0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8108}, \
++ {0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8109}, \
++ {0x8086, 0x4100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x4101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x4102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x4103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x4104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x4105, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x4106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x4107, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x0130, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MDFLD_0130}, \
++ {0x8086, 0x0131, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MDFLD_0130}, \
++ {0x8086, 0x0132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MDFLD_0130}, \
++ {0x8086, 0x0133, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MDFLD_0130}, \
++ {0x8086, 0x0134, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MDFLD_0130}, \
++ {0x8086, 0x0135, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MDFLD_0130}, \
++ {0x8086, 0x0136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MDFLD_0130}, \
++ {0x8086, 0x0137, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MDFLD_0130}, \
++ {0, 0, 0}
++
++
++#define MMADR_INDEX 4
++#define IOPORT_INDEX 5
++#define GMADR_INDEX 6
++#define MMUADR_INDEX 7
++#define FBADR_INDEX 23
++#define FBSIZE_INDEX 24
++
++#define DISPLAY_SURFACE_SIZE (4 * 1024 * 1024)
++
++#define DEVICE_SGX_INTERRUPT (1<<0)
++#define DEVICE_MSVDX_INTERRUPT (1<<1)
++#define DEVICE_DISP_INTERRUPT (1<<2)
++#define DEVICE_TOPAZ_INTERRUPT (1<<3)
++
++#define POULSBO_DISP_MASK (1<<17)
++#define POULSBO_THALIA_MASK (1<<18)
++#define POULSBO_MSVDX_MASK (1<<19)
++#define POULSBO_VSYNC_PIPEA_VBLANK_MASK (1<<7)
++#define POULSBO_VSYNC_PIPEA_EVENT_MASK (1<<6)
++#define POULSBO_VSYNC_PIPEB_VBLANK_MASK (1<<5)
++#define POULSBO_VSYNC_PIPEB_EVENT_MASK (1<<4)
++
++#define POULSBO_DISPLAY_REGS_OFFSET 0x70000
++#define POULSBO_DISPLAY_REG_SIZE 0x2000
++
++#define POULSBO_DISPLAY_A_CONFIG 0x00008
++#define POULSBO_DISPLAY_A_STATUS_SELECT 0x00024
++#define POULSBO_DISPLAY_B_CONFIG 0x01008
++#define POULSBO_DISPLAY_B_STATUS_SELECT 0x01024
++
++#define POULSBO_DISPLAY_PIPE_ENABLE (1<<31)
++#define POULSBO_DISPLAY_VSYNC_STS_EN (1<<25)
++#define POULSBO_DISPLAY_VSYNC_STS (1<<9)
++
++#if defined(SGX_FEATURE_HOST_PORT)
++ #define SYS_SGX_HP_SIZE 0x8000000
++ #define PSB_SYS_SGX_HP_SIZE 0x4000000
++
++ #define SYS_SGX_HOSTPORT_BASE_DEVVADDR 0xD0000000
++ #if defined(FIX_HW_BRN_22997) && defined(FIX_HW_BRN_23030)
++
++
++
++ #define SYS_SGX_HOSTPORT_BRN23030_OFFSET 0x7C00000
++ #endif
++#endif
++
++
++typedef struct
++{
++ union
++ {
++#if !defined(VISTA)
++ IMG_UINT8 aui8PCISpace[256];
++ IMG_UINT16 aui16PCISpace[128];
++ IMG_UINT32 aui32PCISpace[64];
++#endif
++ struct
++ {
++ IMG_UINT16 ui16VenID;
++ IMG_UINT16 ui16DevID;
++ IMG_UINT16 ui16PCICmd;
++ IMG_UINT16 ui16PCIStatus;
++ }s;
++ }u;
++} PCICONFIG_SPACE, *PPCICONFIG_SPACE;
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/system/medfield/sysinfo.h
+@@ -0,0 +1,43 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__SYSINFO_H__)
++#define __SYSINFO_H__
++
++#define MAX_HW_TIME_US (500000)
++#define WAIT_TRY_COUNT (10000)
++
++typedef enum _SYS_DEVICE_TYPE_
++{
++ SYS_DEVICE_SGX = 0,
++
++ SYS_DEVICE_FORCE_I16 = 0x7fff
++
++} SYS_DEVICE_TYPE;
++
++#define SYS_DEVICE_COUNT 4
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/system/medfield/sysirq.h
+@@ -0,0 +1,49 @@
++/**************************************************************************
++ * Copyright (c) 2009, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * Benjamin Defnet <benjamin.r.defnet@intel.com>
++ * Rajesh Poornachandran <rajesh.poornachandran@intel.com>
++ *
++ **************************************************************************/
++
++#ifndef _SYSIRQ_H_
++#define _SYSIRQ_H_
++
++#include <drm/drmP.h>
++
++bool sysirq_init(struct drm_device *dev);
++void sysirq_uninit(struct drm_device *dev);
++
++void psb_irq_preinstall(struct drm_device *dev);
++int psb_irq_postinstall(struct drm_device *dev);
++void psb_irq_uninstall(struct drm_device *dev);
++irqreturn_t psb_irq_handler(DRM_IRQ_ARGS);
++
++void psb_irq_preinstall_islands(struct drm_device *dev, int hw_islands);
++int psb_irq_postinstall_islands(struct drm_device *dev, int hw_islands);
++void psb_irq_uninstall_islands(struct drm_device *dev, int hw_islands);
++
++int psb_irq_enable_dpst(struct drm_device *dev);
++int psb_irq_disable_dpst(struct drm_device *dev);
++void sysirq_turn_on_dpst(struct drm_device *dev);
++void sysirq_turn_off_dpst(struct drm_device *dev);
++int psb_enable_vblank(struct drm_device *dev, int pipe);
++void psb_disable_vblank(struct drm_device *dev, int pipe);
++u32 psb_get_vblank_counter(struct drm_device *dev, int pipe);
++
++#endif //_SYSIRQ_H_
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/system/medfield/syslocal.h
+@@ -0,0 +1,82 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__SYSLOCAL_H__)
++#define __SYSLOCAL_H__
++
++#define SYS_SPECIFIC_DATA_PCI_ACQUIRE_DEV 0x00000001
++#define SYS_SPECIFIC_DATA_PCI_REQUEST_SGX_ADDR_RANGE 0x00000002
++#define SYS_SPECIFIC_DATA_PCI_REQUEST_HOST_PORT_RANGE 0x00000004
++#if defined(NO_HARDWARE)
++#define SYS_SPECIFIC_DATA_ALLOC_DUMMY_SGX_REGS 0x00000008
++#if defined(SUPPORT_MSVDX)
++#define SYS_SPECIFIC_DATA_ALLOC_DUMMY_MSVDX_REGS 0x00000020
++#endif
++#endif
++#define SYS_SPECIFIC_DATA_SGX_INITIALISED 0x00000040
++#if defined(SUPPORT_MSVDX)
++#define SYS_SPECIFIC_DATA_MSVDX_INITIALISED 0x00000080
++#endif
++#define SYS_SPECIFIC_DATA_MISR_INSTALLED 0x00000100
++#define SYS_SPECIFIC_DATA_LISR_INSTALLED 0x00000200
++#define SYS_SPECIFIC_DATA_PDUMP_INIT 0x00000400
++#define SYS_SPECIFIC_DATA_IRQ_ENABLED 0x00000800
++
++#define SYS_SPECIFIC_DATA_PM_UNMAP_SGX_REGS 0x00001000
++#define SYS_SPECIFIC_DATA_PM_UNMAP_SGX_HP 0x00004000
++#define SYS_SPECIFIC_DATA_PM_UNMAP_MSVDX_REGS 0x00008000
++#define SYS_SPECIFIC_DATA_PM_IRQ_DISABLE 0x00010000
++#define SYS_SPECIFIC_DATA_PM_UNINSTALL_LISR 0x00020000
++
++#define SYS_SPECIFIC_DATA_SET(psSysSpecData, flag) ((IMG_VOID)((psSysSpecData)->ui32SysSpecificData |= (flag)))
++
++#define SYS_SPECIFIC_DATA_CLEAR(psSysSpecData, flag) ((IMG_VOID)((psSysSpecData)->ui32SysSpecificData &= ~(flag)))
++
++#define SYS_SPECIFIC_DATA_TEST(psSysSpecData, flag) (((psSysSpecData)->ui32SysSpecificData & (flag)) != 0)
++
++
++typedef struct _SYS_SPECIFIC_DATA_TAG_
++{
++
++ IMG_UINT32 ui32SysSpecificData;
++#ifdef __linux__
++ PVRSRV_PCI_DEV_HANDLE hSGXPCI;
++#endif
++#if defined(LDM_PCI) || defined(SUPPORT_DRI_DRM_EXT)
++ struct pci_dev *psPCIDev;
++#endif
++ /* MSI reg save */
++ uint32_t msi_addr;
++ uint32_t msi_data;
++
++ uint32_t saveBSM;
++ uint32_t saveVBT;
++} SYS_SPECIFIC_DATA;
++
++
++#endif
++
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/system/medfield/sysutils.c
+@@ -0,0 +1,30 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "sysinfo.h"
++#include "syslocal.h"
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/system/moorestown/.gitignore
+@@ -0,0 +1,5 @@
++bin_pc_i686*
++tmp_pc_i686*
++host_pc_i686*
++*.o
++*.o.cmd
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/system/moorestown/oemfuncs.h
+@@ -0,0 +1,72 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__OEMFUNCS_H__)
++#define __OEMFUNCS_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#define OEM_EXCHANGE_POWER_STATE (1<<0)
++#define OEM_DEVICE_MEMORY_POWER (1<<1)
++#define OEM_DISPLAY_POWER (1<<2)
++#define OEM_GET_EXT_FUNCS (1<<3)
++
++typedef struct OEM_ACCESS_INFO_TAG
++{
++ IMG_UINT32 ui32Size;
++ IMG_UINT32 ui32FBPhysBaseAddress;
++ IMG_UINT32 ui32FBMemAvailable;
++ IMG_UINT32 ui32SysPhysBaseAddress;
++ IMG_UINT32 ui32SysSize;
++ IMG_UINT32 ui32DevIRQ;
++} OEM_ACCESS_INFO, *POEM_ACCESS_INFO;
++
++typedef IMG_UINT32 (*PFN_SRV_BRIDGEDISPATCH)( IMG_UINT32 Ioctl,
++ IMG_BYTE *pInBuf,
++ IMG_UINT32 InBufLen,
++ IMG_BYTE *pOutBuf,
++ IMG_UINT32 OutBufLen,
++ IMG_UINT32 *pdwBytesTransferred);
++
++
++typedef PVRSRV_ERROR (*PFN_SRV_READREGSTRING)(PPVRSRV_REGISTRY_INFO psRegInfo);
++
++
++typedef struct PVRSRV_DC_OEM_JTABLE_TAG
++{
++ PFN_SRV_BRIDGEDISPATCH pfnOEMBridgeDispatch;
++ PFN_SRV_READREGSTRING pfnOEMReadRegistryString;
++ PFN_SRV_READREGSTRING pfnOEMWriteRegistryString;
++
++} PVRSRV_DC_OEM_JTABLE;
++#if defined(__cplusplus)
++}
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/system/moorestown/sys_pvr_drm_export.c
+@@ -0,0 +1,135 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <drm/drmP.h>
++#include <drm/drm.h>
++
++#include "pvr_drm_shared.h"
++
++#include "services_headers.h"
++#include "private_data.h"
++#include "pvr_drm.h"
++
++#include "pvr_bridge.h"
++#include "linkage.h"
++#include "mmap.h"
++
++#if defined(PDUMP)
++#include "client/linuxsrv.h"
++#endif
++
++#include "sys_pvr_drm_import.h"
++
++#include "sys_pvr_drm_export.h"
++
++int
++SYSPVRInit(void)
++{
++ PVRDPFInit();
++
++ return 0;
++}
++
++
++int
++SYSPVRLoad(struct drm_device *dev, unsigned long flags)
++{
++ return PVRSRVDrmLoad(dev, flags);
++}
++
++int
++SYSPVROpen(struct drm_device *dev, struct drm_file *pFile)
++{
++ return PVRSRVDrmOpen(dev, pFile);
++}
++
++int
++SYSPVRUnload(struct drm_device *dev)
++{
++ return PVRSRVDrmUnload(dev);
++}
++
++void
++SYSPVRPostClose(struct drm_device *dev, struct drm_file *file)
++{
++ return PVRSRVDrmPostClose(dev, file);
++}
++
++int
++SYSPVRBridgeDispatch(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile)
++{
++ return PVRSRV_BridgeDispatchKM(dev, arg, pFile);
++}
++
++int
++SYSPVRDCDriverIoctl(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile)
++{
++ return PVRDRM_Dummy_ioctl(dev, arg, pFile);
++
++}
++
++int
++SYSPVRBCDriverIoctl(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile)
++{
++ return PVRDRM_Dummy_ioctl(dev, arg, pFile);
++
++}
++
++int
++SYSPVRIsMaster(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile)
++{
++ return PVRDRMIsMaster(dev, arg, pFile);
++}
++
++int
++SYSPVRUnprivCmd(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile)
++{
++ return PVRDRMUnprivCmd(dev, arg, pFile);
++}
++
++int
++SYSPVRMMap(struct file* pFile, struct vm_area_struct* ps_vma)
++{
++ int ret;
++
++ ret = PVRMMap(pFile, ps_vma);
++ if (ret == -ENOENT)
++ {
++ ret = drm_mmap(pFile, ps_vma);
++ }
++
++ return ret;
++}
++
++int
++SYSPVRDBGDrivIoctl(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile)
++{
++#if defined(PDUMP)
++ return dbgdrv_ioctl(dev, arg, pFile);
++#else
++ return -EINVAL;
++#endif
++}
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/system/moorestown/sys_pvr_drm_export.h
+@@ -0,0 +1,87 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__SYS_PVR_DRM_EXPORT_H__)
++#define __SYS_PVR_DRM_EXPORT_H__
++
++#include "pvr_drm_shared.h"
++
++#if defined(__KERNEL__)
++
++#include "services_headers.h"
++#include "private_data.h"
++#include "pvr_drm.h"
++
++#include "pvr_bridge.h"
++
++#if defined(PDUMP)
++#include "client/linuxsrv.h"
++#endif
++
++#define PVR_DRM_SRVKM_IOCTL \
++ DRM_IOW(DRM_COMMAND_BASE + PVR_DRM_SRVKM_CMD, PVRSRV_BRIDGE_PACKAGE)
++
++#define PVR_DRM_DISP_IOCTL \
++ DRM_IO(DRM_COMMAND_BASE + PVR_DRM_DISP_CMD)
++
++#define PVR_DRM_BC_IOCTL \
++ DRM_IO(DRM_COMMAND_BASE + PVR_DRM_BC_CMD)
++
++#define PVR_DRM_IS_MASTER_IOCTL \
++ DRM_IO(DRM_COMMAND_BASE + PVR_DRM_IS_MASTER_CMD)
++
++#define PVR_DRM_UNPRIV_IOCTL \
++ DRM_IOWR(DRM_COMMAND_BASE + PVR_DRM_UNPRIV_CMD, IMG_UINT32)
++
++#if defined(PDUMP)
++#define PVR_DRM_DBGDRV_IOCTL \
++ DRM_IOW(DRM_COMMAND_BASE + PVR_DRM_DBGDRV_CMD, IOCTL_PACKAGE)
++#else
++#define PVR_DRM_DBGDRV_IOCTL \
++ DRM_IO(DRM_COMMAND_BASE + PVR_DRM_DBGDRV_CMD)
++#endif
++
++int SYSPVRInit(void);
++int SYSPVRLoad(struct drm_device *dev, unsigned long flags);
++int SYSPVROpen(struct drm_device *dev, struct drm_file *pFile);
++int SYSPVRUnload(struct drm_device *dev);
++void SYSPVRPostClose(struct drm_device *dev, struct drm_file *file);
++int SYSPVRBridgeDispatch(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile);
++int SYSPVRDCDriverIoctl(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile);
++int SYSPVRBCDriverIoctl(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile);
++int SYSPVRIsMaster(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile);
++int SYSPVRUnprivCmd(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile);
++
++int SYSPVRMMap(struct file* pFile, struct vm_area_struct* ps_vma);
++
++int SYSPVRDBGDrivIoctl(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile);
++
++int SYSPVRServiceSGXInterrupt(struct drm_device *dev);
++
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/system/moorestown/sys_pvr_drm_import.h
+@@ -0,0 +1,45 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__SYS_PVR_DRM_IMPORT_H__)
++#define __SYS_PVR_DRM_IMPORT_H__
++
++#if defined(__KERNEL__)
++#include "psb_drm.h"
++#endif
++
++#define DRM_PSB_PLACEMENT_OFFSET 0x13
++
++#if 0
++#define DRM_PVR_RESERVED1 0x0D
++#define DRM_PVR_RESERVED2 0x0E
++#define DRM_PVR_RESERVED3 0x0F
++#define DRM_PVR_RESERVED4 0x10
++#define DRM_PVR_RESERVED5 0x11
++#define DRM_PVR_RESERVED6 0x12
++#endif
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/system/moorestown/sysconfig.c
+@@ -0,0 +1,1203 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if defined(LDM_PCI) || defined(SUPPORT_DRI_DRM)
++#include "linux/pci.h"
++#endif
++
++#include "sgxdefs.h"
++#include "services_headers.h"
++#include "kerneldisplay.h"
++#include "oemfuncs.h"
++#include "sgxinfo.h"
++#include "sgxinfokm.h"
++#include "pdump_km.h"
++#include "syslocal.h"
++#if defined(SUPPORT_DRI_DRM_EXT)
++#include "env_data.h"
++#include "psb_drv.h"
++#include "psb_powermgmt.h"
++#include "sys_pvr_drm_export.h"
++#include "msvdx_power.h"
++#include "topaz_power.h"
++#endif
++
++/* Graphics MSI address and data region in PCIx */
++#define MRST_PCIx_MSI_ADDR_LOC 0x94
++#define MRST_PCIx_MSI_DATA_LOC 0x98
++
++#define SYS_SGX_CLOCK_SPEED (400000000)
++#define SYS_SGX_HWRECOVERY_TIMEOUT_FREQ (100)
++#define SYS_SGX_PDS_TIMER_FREQ (1000)
++#define SYS_SGX_ACTIVE_POWER_LATENCY_MS (50)
++
++#if defined(SUPPORT_DRI_DRM_EXT)
++#define DRI_DRM_STATIC
++#else
++#define DRI_DRM_STATIC static
++#endif
++SYS_DATA* gpsSysData = (SYS_DATA*)IMG_NULL;
++SYS_DATA gsSysData;
++
++static SYS_SPECIFIC_DATA gsSysSpecificData;
++
++IMG_UINT32 gui32SGXDeviceID;
++static SGX_DEVICE_MAP gsSGXDeviceMap;
++extern IMG_UINT32 gui32MRSTDisplayDeviceID;
++IMG_UINT32 gui32MRSTMSVDXDeviceID;
++IMG_UINT32 gui32MRSTTOPAZDeviceID;
++
++extern void ospm_suspend_graphics(void);
++
++static SGX_DEVICE_MAP gsSGXDeviceMap;
++extern struct drm_device *gpDrmDevice;
++#if defined(SUPPORT_DRI_DRM_EXT)
++static PVRSRV_DEVICE_NODE *gpsSGXDevNode;
++#endif
++
++#if !defined(NO_HARDWARE)
++IMG_CPU_VIRTADDR gsPoulsboRegsCPUVaddr;
++
++IMG_CPU_VIRTADDR gsPoulsboDisplayRegsCPUVaddr;
++#endif
++
++#if defined(LDM_PCI) || defined(SUPPORT_DRI_DRM)
++extern struct pci_dev *gpsPVRLDMDev;
++#endif
++
++#define POULSBO_ADDR_RANGE_INDEX (MMADR_INDEX - 4)
++#define POULSBO_HP_ADDR_RANGE_INDEX (GMADR_INDEX - 4)
++static PVRSRV_ERROR PCIInitDev(SYS_DATA *psSysData)
++{
++ SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
++
++#ifdef LDM_PCI
++ psSysSpecData->hSGXPCI = OSPCISetDev((IMG_VOID *)psSysSpecData->psPCIDev, HOST_PCI_INIT_FLAG_BUS_MASTER | HOST_PCI_INIT_FLAG_MSI);
++#else
++ psSysSpecData->hSGXPCI = OSPCIAcquireDev(SYS_SGX_DEV_VENDOR_ID, gpDrmDevice->pci_device, HOST_PCI_INIT_FLAG_BUS_MASTER | HOST_PCI_INIT_FLAG_MSI);
++#endif
++ if (!psSysSpecData->hSGXPCI)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PCIInitDev: Failed to acquire PCI device"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ SYS_SPECIFIC_DATA_SET(psSysSpecData, SYS_SPECIFIC_DATA_PCI_ACQUIRE_DEV);
++
++ PVR_TRACE(("PCI memory region: %x to %x", OSPCIAddrRangeStart(psSysSpecData->hSGXPCI, POULSBO_ADDR_RANGE_INDEX), OSPCIAddrRangeEnd(psSysSpecData->hSGXPCI, POULSBO_ADDR_RANGE_INDEX)));
++ PVR_TRACE(("Host Port region: %x to %x", OSPCIAddrRangeStart(psSysSpecData->hSGXPCI, POULSBO_HP_ADDR_RANGE_INDEX), OSPCIAddrRangeEnd(psSysSpecData->hSGXPCI, POULSBO_HP_ADDR_RANGE_INDEX)));
++
++
++ if (OSPCIAddrRangeLen(psSysSpecData->hSGXPCI, POULSBO_ADDR_RANGE_INDEX) < (IS_MRST(gpDrmDevice)? POULSBO_MAX_OFFSET:PSB_POULSBO_MAX_OFFSET))
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PCIInitDev: Device memory region isn't big enough"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++
++ if (OSPCIRequestAddrRange(psSysSpecData->hSGXPCI, POULSBO_ADDR_RANGE_INDEX) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PCIInitDev: Device memory region not available"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ SYS_SPECIFIC_DATA_SET(psSysSpecData, SYS_SPECIFIC_DATA_PCI_REQUEST_SGX_ADDR_RANGE);
++
++
++ if (OSPCIRequestAddrRange(psSysSpecData->hSGXPCI, POULSBO_HP_ADDR_RANGE_INDEX) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PCIInitDev: Host Port region not available"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ SYS_SPECIFIC_DATA_SET(psSysSpecData, SYS_SPECIFIC_DATA_PCI_REQUEST_HOST_PORT_RANGE);
++
++ return PVRSRV_OK;
++}
++
++static IMG_VOID PCIDeInitDev(SYS_DATA *psSysData)
++{
++ SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
++
++ if (SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_PCI_REQUEST_SGX_ADDR_RANGE))
++ {
++ OSPCIReleaseAddrRange(psSysSpecData->hSGXPCI, POULSBO_ADDR_RANGE_INDEX);
++ }
++
++ if (SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_PCI_REQUEST_HOST_PORT_RANGE))
++ {
++ OSPCIReleaseAddrRange(psSysSpecData->hSGXPCI, POULSBO_HP_ADDR_RANGE_INDEX);
++ }
++
++ if (SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_PCI_ACQUIRE_DEV))
++ {
++ OSPCIReleaseDev(psSysSpecData->hSGXPCI);
++ }
++}
++static PVRSRV_ERROR SysLocateDevices(SYS_DATA *psSysData)
++{
++ IMG_UINT32 ui32BaseAddr = 0;
++ IMG_UINT32 ui32IRQ = 0;
++ IMG_UINT32 ui32HostPortAddr = 0;
++ SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
++
++ ui32BaseAddr = OSPCIAddrRangeStart(psSysSpecData->hSGXPCI, POULSBO_ADDR_RANGE_INDEX);
++ ui32HostPortAddr = OSPCIAddrRangeStart(psSysSpecData->hSGXPCI, POULSBO_HP_ADDR_RANGE_INDEX);
++ if (OSPCIIRQ(psSysSpecData->hSGXPCI, &ui32IRQ) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysLocateDevices: Couldn't get IRQ"));
++ return PVRSRV_ERROR_INVALID_DEVICE;
++ }
++
++ PVR_TRACE(("ui32BaseAddr: %p", ui32BaseAddr));
++ PVR_TRACE(("ui32HostPortAddr: %p", ui32HostPortAddr));
++ PVR_TRACE(("IRQ: %d", ui32IRQ));
++
++
++ gsSGXDeviceMap.ui32Flags = 0x0;
++ gsSGXDeviceMap.ui32IRQ = ui32IRQ;
++
++ if (IS_MRST(gpDrmDevice))
++ gsSGXDeviceMap.sRegsSysPBase.uiAddr = ui32BaseAddr + SGX_REGS_OFFSET;
++ else
++ gsSGXDeviceMap.sRegsSysPBase.uiAddr = ui32BaseAddr + PSB_SGX_REGS_OFFSET;
++
++ gsSGXDeviceMap.sRegsCpuPBase = SysSysPAddrToCpuPAddr(gsSGXDeviceMap.sRegsSysPBase);
++ gsSGXDeviceMap.ui32RegsSize = SGX_REG_SIZE;
++
++#if defined(SGX_FEATURE_HOST_PORT)
++
++ gsSGXDeviceMap.ui32Flags = SGX_HOSTPORT_PRESENT;
++ gsSGXDeviceMap.sHPSysPBase.uiAddr = ui32HostPortAddr;
++ gsSGXDeviceMap.sHPCpuPBase = SysSysPAddrToCpuPAddr(gsSGXDeviceMap.sHPSysPBase);
++ if (IS_MRST(gpDrmDevice))
++ gsSGXDeviceMap.ui32HPSize = SYS_SGX_HP_SIZE;
++ else
++ gsSGXDeviceMap.ui32HPSize = PSB_SYS_SGX_HP_SIZE;
++#endif
++
++#if defined(MRST_SLAVEPORT)
++
++ gsSGXDeviceMap.sSPSysPBase.uiAddr = ui32BaseAddr + MRST_SGX_SP_OFFSET;
++ gsSGXDeviceMap.sSPCpuPBase = SysSysPAddrToCpuPAddr(gsSGXDeviceMap.sSPSysPBase);
++ gsSGXDeviceMap.ui32SPSize = SGX_SP_SIZE;
++#endif
++
++
++
++
++ gsSGXDeviceMap.sLocalMemSysPBase.uiAddr = 0;
++ gsSGXDeviceMap.sLocalMemDevPBase.uiAddr = 0;
++ gsSGXDeviceMap.sLocalMemCpuPBase.uiAddr = 0;
++ gsSGXDeviceMap.ui32LocalMemSize = 0;
++
++
++ {
++ IMG_SYS_PHYADDR sPoulsboRegsCpuPBase;
++ sPoulsboRegsCpuPBase.uiAddr = ui32BaseAddr + POULSBO_REGS_OFFSET;
++ gsPoulsboRegsCPUVaddr = OSMapPhysToLin(SysSysPAddrToCpuPAddr(sPoulsboRegsCpuPBase),
++ POULSBO_REG_SIZE,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++
++ sPoulsboRegsCpuPBase.uiAddr = ui32BaseAddr + POULSBO_DISPLAY_REGS_OFFSET;
++ gsPoulsboDisplayRegsCPUVaddr = OSMapPhysToLin(SysSysPAddrToCpuPAddr(sPoulsboRegsCpuPBase),
++ POULSBO_DISPLAY_REG_SIZE,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++ }
++
++ return PVRSRV_OK;
++}
++
++
++#define VERSION_STR_MAX_LEN_TEMPLATE "SGX revision = 000.000.000"
++static PVRSRV_ERROR SysCreateVersionString(SYS_DATA *psSysData)
++{
++ IMG_UINT32 ui32MaxStrLen;
++ PVRSRV_ERROR eError;
++ IMG_INT32 i32Count;
++ IMG_CHAR *pszVersionString;
++ IMG_UINT32 ui32SGXRevision = 0;
++ IMG_VOID *pvSGXRegs;
++
++ pvSGXRegs = OSMapPhysToLin(gsSGXDeviceMap.sRegsCpuPBase,
++ gsSGXDeviceMap.ui32RegsSize,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++
++ if (pvSGXRegs != IMG_NULL)
++ {
++ ui32SGXRevision = OSReadHWReg(pvSGXRegs, EUR_CR_CORE_REVISION);
++ OSUnMapPhysToLin(pvSGXRegs,
++ gsSGXDeviceMap.ui32RegsSize,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysCreateVersionString: Couldn't map SGX registers"));
++ }
++
++ ui32MaxStrLen = OSStringLength(VERSION_STR_MAX_LEN_TEMPLATE);
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32MaxStrLen + 1,
++ (IMG_PVOID *)&pszVersionString,
++ IMG_NULL,
++ "Version String");
++ if(eError != PVRSRV_OK)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ i32Count = OSSNPrintf(pszVersionString, ui32MaxStrLen + 1,
++ "SGX revision = %u.%u.%u",
++ (IMG_UINT)((ui32SGXRevision & EUR_CR_CORE_REVISION_MAJOR_MASK)
++ >> EUR_CR_CORE_REVISION_MAJOR_SHIFT),
++ (IMG_UINT)((ui32SGXRevision & EUR_CR_CORE_REVISION_MINOR_MASK)
++ >> EUR_CR_CORE_REVISION_MINOR_SHIFT),
++ (IMG_UINT)((ui32SGXRevision & EUR_CR_CORE_REVISION_MAINTENANCE_MASK)
++ >> EUR_CR_CORE_REVISION_MAINTENANCE_SHIFT)
++ );
++ if(i32Count == -1)
++ {
++ ui32MaxStrLen = OSStringLength(VERSION_STR_MAX_LEN_TEMPLATE);
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32MaxStrLen + 1,
++ pszVersionString,
++ IMG_NULL);
++
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ psSysData->pszVersionString = pszVersionString;
++
++ return PVRSRV_OK;
++}
++
++static IMG_VOID SysFreeVersionString(SYS_DATA *psSysData)
++{
++ if(psSysData->pszVersionString)
++ {
++ IMG_UINT32 ui32MaxStrLen;
++ ui32MaxStrLen = OSStringLength(VERSION_STR_MAX_LEN_TEMPLATE);
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32MaxStrLen+1,
++ psSysData->pszVersionString,
++ IMG_NULL);
++ psSysData->pszVersionString = IMG_NULL;
++ }
++}
++
++PVRSRV_ERROR SysInitialise(IMG_VOID)
++{
++ IMG_UINT32 i = 0;
++ PVRSRV_ERROR eError;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ SGX_TIMING_INFORMATION* psTimingInfo;
++ struct drm_psb_private *dev_priv;
++ dev_priv = (struct drm_psb_private *) gpDrmDevice->dev_private;
++
++ gpsSysData = &gsSysData;
++ OSMemSet(gpsSysData, 0, sizeof(SYS_DATA));
++
++ gpsSysData->pvSysSpecificData = &gsSysSpecificData;
++ gsSysSpecificData.ui32SysSpecificData = 0;
++#if defined(LDM_PCI) || defined(SUPPORT_DRI_DRM)
++
++ PVR_ASSERT(gpsPVRLDMDev != IMG_NULL);
++ gsSysSpecificData.psPCIDev = gpsPVRLDMDev;
++#endif
++
++ eError = OSInitEnvData(&gpsSysData->pvEnvSpecificData);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to setup env structure"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++
++
++ psTimingInfo = &gsSGXDeviceMap.sTimingInfo;
++ psTimingInfo->ui32CoreClockSpeed = SYS_SGX_CLOCK_SPEED;
++ psTimingInfo->ui32HWRecoveryFreq = SYS_SGX_HWRECOVERY_TIMEOUT_FREQ;
++#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
++ psTimingInfo->bEnableActivePM = (drm_psb_ospm != 0);
++ /*printk(KERN_ERR "SGX APM is %s\n", (drm_psb_ospm != 0)? "enabled":"disabled"); */
++#else
++ psTimingInfo->bEnableActivePM = IMG_FALSE;
++#endif
++ psTimingInfo->ui32ActivePowManLatencyms = SYS_SGX_ACTIVE_POWER_LATENCY_MS;
++ psTimingInfo->ui32uKernelFreq = SYS_SGX_PDS_TIMER_FREQ;
++
++ eError = PCIInitDev(gpsSysData);
++ if (eError != PVRSRV_OK)
++ {
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++
++ gpsSysData->ui32NumDevices = SYS_DEVICE_COUNT;
++
++
++ for(i=0; i<SYS_DEVICE_COUNT; i++)
++ {
++ gpsSysData->sDeviceID[i].uiID = i;
++ gpsSysData->sDeviceID[i].bInUse = IMG_FALSE;
++ }
++
++ gpsSysData->psDeviceNodeList = IMG_NULL;
++ gpsSysData->psQueueList = IMG_NULL;
++
++ eError = SysInitialiseCommon(gpsSysData);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed in SysInitialiseCommon"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++
++
++
++
++
++ eError = SysLocateDevices(gpsSysData);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to locate devices"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++
++
++
++
++ eError = PVRSRVRegisterDevice(gpsSysData, SGXRegisterDevice,
++ DEVICE_SGX_INTERRUPT, &gui32SGXDeviceID);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to register device!"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++
++#if !defined(SUPPORT_DRI_DRM_EXT)
++ /* register MSVDX, with 0 interrupt bit, no interrupt will be served */
++ eError = PVRSRVRegisterDevice(gpsSysData, MSVDXRegisterDevice,
++ DEVICE_MSVDX_INTERRUPT, &gui32MRSTMSVDXDeviceID);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to register MSVDXdevice!"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++
++ if (IS_MRST(gpDrmDevice) && !dev_priv->topaz_disabled)
++ {
++ /* register TOPAZ, with 0 interrupt bit, no interrupt will be served */
++ eError = PVRSRVRegisterDevice(gpsSysData, TOPAZRegisterDevice,
++ DEVICE_TOPAZ_INTERRUPT, &gui32MRSTTOPAZDeviceID);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to register TOPAZdevice!"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++ }
++#endif
++ psDeviceNode = gpsSysData->psDeviceNodeList;
++
++ while(psDeviceNode)
++ {
++
++ switch(psDeviceNode->sDevId.eDeviceType)
++ {
++ case PVRSRV_DEVICE_TYPE_SGX:
++ {
++ DEVICE_MEMORY_INFO *psDevMemoryInfo;
++ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++
++
++ psDeviceNode->psLocalDevMemArena = IMG_NULL;
++
++
++ psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
++ psDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap;
++
++
++ for(i=0; i<psDevMemoryInfo->ui32HeapCount; i++)
++ {
++ psDeviceMemoryHeap[i].ui32Attribs |= PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG;
++#ifdef OEM_CUSTOMISE
++
++#endif
++ }
++
++#if defined(SUPPORT_DRI_DRM_EXT)
++ gpsSGXDevNode = psDeviceNode;
++#endif
++ break;
++ }
++ case PVRSRV_DEVICE_TYPE_MSVDX:
++ /* nothing need to do here */
++ break;
++ case PVRSRV_DEVICE_TYPE_TOPAZ:
++ break;
++ default:
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to find SGX device node!"));
++ return PVRSRV_ERROR_INIT_FAILURE;
++ }
++ }
++
++
++ psDeviceNode = psDeviceNode->psNext;
++ }
++
++ PDUMPINIT();
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_PDUMP_INIT);
++
++
++ eError = PVRSRVInitialiseDevice (gui32SGXDeviceID);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to initialise device!"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_SGX_INITIALISED);
++
++#if !defined(SUPPORT_DRI_DRM_EXT)
++ /* Initialize MSVDX and TOPAZ,
++ * to be matched with the PVRSRVRegisterDevice() calling
++ * for MSVDX and TOPAZ
++ */
++ eError = PVRSRVInitialiseDevice (gui32MRSTMSVDXDeviceID);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to initialise device!"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++ if (IS_MRST(gpDrmDevice) && !dev_priv->topaz_disabled)
++ {
++ eError = PVRSRVInitialiseDevice (gui32MRSTTOPAZDeviceID);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to initialise device!"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++ }
++#endif
++
++ return PVRSRV_OK;
++}
++
++#if !defined(SUPPORT_DRI_DRM_EXT)
++static IMG_VOID SysEnableInterrupts(SYS_DATA *psSysData)
++{
++#if !defined(NO_HARDWARE)
++ IMG_UINT32 ui32RegData;
++ IMG_UINT32 ui32Mask;
++
++ ui32Mask = POULSBO_THALIA_MASK;
++
++
++ ui32RegData = OSReadHWReg(gsPoulsboRegsCPUVaddr, POULSBO_INTERRUPT_IDENTITY_REG);
++ OSWriteHWReg(gsPoulsboRegsCPUVaddr, POULSBO_INTERRUPT_IDENTITY_REG, ui32RegData | ui32Mask);
++
++
++ ui32RegData = OSReadHWReg(gsPoulsboRegsCPUVaddr, POULSBO_INTERRUPT_MASK_REG);
++ OSWriteHWReg(gsPoulsboRegsCPUVaddr, POULSBO_INTERRUPT_MASK_REG, ui32RegData & (~ui32Mask));
++
++
++ ui32RegData = OSReadHWReg(gsPoulsboRegsCPUVaddr, POULSBO_INTERRUPT_ENABLE_REG);
++ OSWriteHWReg(gsPoulsboRegsCPUVaddr, POULSBO_INTERRUPT_ENABLE_REG, ui32RegData | ui32Mask);
++
++ PVR_DPF((PVR_DBG_MESSAGE, "SysEnableInterrupts: Interrupts enabled"));
++#endif
++ PVR_UNREFERENCED_PARAMETER(psSysData);
++}
++#endif
++
++#if !defined(SUPPORT_DRI_DRM_EXT)
++static IMG_VOID SysDisableInterrupts(SYS_DATA *psSysData)
++{
++#if !defined(NO_HARDWARE)
++ IMG_UINT32 ui32RegData;
++ IMG_UINT32 ui32Mask;
++ ui32Mask = POULSBO_THALIA_MASK;
++
++
++ ui32RegData = OSReadHWReg(gsPoulsboRegsCPUVaddr, POULSBO_INTERRUPT_ENABLE_REG);
++ OSWriteHWReg(gsPoulsboRegsCPUVaddr, POULSBO_INTERRUPT_ENABLE_REG, ui32RegData & (~ui32Mask));
++
++
++ ui32RegData = OSReadHWReg(gsPoulsboRegsCPUVaddr, POULSBO_INTERRUPT_MASK_REG);
++ OSWriteHWReg(gsPoulsboRegsCPUVaddr, POULSBO_INTERRUPT_MASK_REG, ui32RegData | ui32Mask);
++
++ PVR_TRACE(("SysDisableInterrupts: Interrupts disabled"));
++#endif
++ PVR_UNREFERENCED_PARAMETER(psSysData);
++}
++#endif
++PVRSRV_ERROR SysFinalise(IMG_VOID)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++#if defined(SYS_USING_INTERRUPTS)
++ eError = OSInstallMISR(gpsSysData);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysFinalise: OSInstallMISR failed"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_MISR_INSTALLED);
++#endif
++
++ eError = SysCreateVersionString(gpsSysData);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to create a system version string"));
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_WARNING, "SysFinalise: Version string: %s", gpsSysData->pszVersionString));
++ }
++
++ return eError;
++}
++
++PVRSRV_ERROR SysDeinitialise (SYS_DATA *psSysData)
++{
++ PVRSRV_ERROR eError;
++
++ SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
++
++#if !defined(SUPPORT_DRI_DRM_EXT)
++ if (SYS_SPECIFIC_DATA_TEST(&gsSysSpecificData, SYS_SPECIFIC_DATA_IRQ_ENABLED))
++ {
++ SysDisableInterrupts(psSysData);
++ }
++#endif
++
++#if defined(SYS_USING_INTERRUPTS)
++#if !defined(SUPPORT_DRI_DRM_EXT)
++ if (SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_LISR_INSTALLED))
++ {
++ eError = OSUninstallSystemLISR(psSysData);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: OSUninstallSystemLISR failed"));
++ return eError;
++ }
++ }
++#endif
++ if (SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_MISR_INSTALLED))
++ {
++ eError = OSUninstallMISR(psSysData);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: OSUninstallMISR failed"));
++ return eError;
++ }
++ }
++#endif
++
++ if (SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_SGX_INITIALISED))
++ {
++
++ eError = PVRSRVDeinitialiseDevice(gui32SGXDeviceID);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: failed to de-init the device"));
++ return eError;
++ }
++ }
++
++ SysFreeVersionString(psSysData);
++
++ PCIDeInitDev(psSysData);
++
++ eError = OSDeInitEnvData(psSysData->pvEnvSpecificData);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: failed to de-init env structure"));
++ return eError;
++ }
++
++ SysDeinitialiseCommon(gpsSysData);
++
++
++#if !defined(NO_HARDWARE)
++
++ OSUnMapPhysToLin(gsPoulsboRegsCPUVaddr,
++ POULSBO_REG_SIZE,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++
++ OSUnMapPhysToLin(gsPoulsboDisplayRegsCPUVaddr,
++ POULSBO_DISPLAY_REG_SIZE,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++#endif
++ if (SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_PDUMP_INIT))
++ {
++ PDUMPDEINIT();
++ }
++
++ gpsSysData = IMG_NULL;
++
++ return PVRSRV_OK;
++}
++
++
++IMG_UINT32 SysGetInterruptSource(SYS_DATA* psSysData,
++ PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++#if !defined(SUPPORT_DRI_DRM_EXT)
++ IMG_UINT32 ui32Devices = 0;
++ IMG_UINT32 ui32Data, ui32DIMMask;
++
++ PVR_UNREFERENCED_PARAMETER(psSysData);
++ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
++
++
++ ui32Data = OSReadHWReg(gsPoulsboRegsCPUVaddr, POULSBO_INTERRUPT_IDENTITY_REG);
++
++ if (ui32Data & POULSBO_THALIA_MASK)
++ {
++ ui32Devices |= DEVICE_SGX_INTERRUPT;
++ }
++
++ if (ui32Data & POULSBO_MSVDX_MASK)
++ {
++ ui32Devices |= DEVICE_MSVDX_INTERRUPT;
++ }
++
++
++ ui32DIMMask = OSReadHWReg(gsPoulsboRegsCPUVaddr, POULSBO_INTERRUPT_ENABLE_REG);
++ ui32DIMMask &= ~(POULSBO_THALIA_MASK | POULSBO_MSVDX_MASK);
++
++
++ if (ui32Data & ui32DIMMask)
++ {
++ ui32Devices |= DEVICE_DISP_INTERRUPT;
++ }
++
++ return (ui32Devices);
++#else
++ PVR_UNREFERENCED_PARAMETER(psSysData);
++ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
++
++ return 0;
++#endif
++}
++
++IMG_VOID SysClearInterrupts(SYS_DATA* psSysData, IMG_UINT32 ui32ClearBits)
++{
++#if !defined(SUPPORT_DRI_DRM_EXT)
++ IMG_UINT32 ui32Data;
++ IMG_UINT32 ui32Mask = 0;
++
++ PVR_UNREFERENCED_PARAMETER(psSysData);
++
++ ui32Data = OSReadHWReg(gsPoulsboRegsCPUVaddr, POULSBO_INTERRUPT_IDENTITY_REG);
++
++ if ((ui32ClearBits & DEVICE_SGX_INTERRUPT) &&
++ (ui32Data & POULSBO_THALIA_MASK))
++ {
++ ui32Mask |= POULSBO_THALIA_MASK;
++ }
++
++ if ((ui32ClearBits & DEVICE_MSVDX_INTERRUPT) &&
++ (ui32Data & POULSBO_MSVDX_MASK))
++ {
++ ui32Mask |= POULSBO_MSVDX_MASK;
++ }
++
++ if ((ui32ClearBits & DEVICE_DISP_INTERRUPT) &&
++ (ui32Data & POULSBO_VSYNC_PIPEA_VBLANK_MASK))
++ {
++ ui32Mask |= POULSBO_VSYNC_PIPEA_VBLANK_MASK;
++ }
++
++ if (ui32Mask)
++ {
++ OSWriteHWReg(gsPoulsboRegsCPUVaddr, POULSBO_INTERRUPT_IDENTITY_REG, ui32Mask);
++ }
++#else
++ PVR_UNREFERENCED_PARAMETER(psSysData);
++ PVR_UNREFERENCED_PARAMETER(ui32ClearBits);
++#endif
++}
++
++
++PVRSRV_ERROR SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_VOID **ppvDeviceMap)
++{
++ switch(eDeviceType)
++ {
++ case PVRSRV_DEVICE_TYPE_SGX:
++ {
++
++ *ppvDeviceMap = (IMG_VOID*)&gsSGXDeviceMap;
++ break;
++ }
++ default:
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysGetDeviceMemoryMap: unsupported device type"));
++ }
++ }
++ return PVRSRV_OK;
++}
++
++
++IMG_DEV_PHYADDR SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_CPU_PHYADDR CpuPAddr)
++{
++ IMG_DEV_PHYADDR DevPAddr;
++
++ PVR_UNREFERENCED_PARAMETER(eDeviceType);
++
++
++ DevPAddr.uiAddr = CpuPAddr.uiAddr;
++
++ return DevPAddr;
++}
++
++
++IMG_CPU_PHYADDR SysSysPAddrToCpuPAddr (IMG_SYS_PHYADDR sys_paddr)
++{
++ IMG_CPU_PHYADDR cpu_paddr;
++
++
++ cpu_paddr.uiAddr = sys_paddr.uiAddr;
++ return cpu_paddr;
++}
++
++IMG_SYS_PHYADDR SysCpuPAddrToSysPAddr (IMG_CPU_PHYADDR cpu_paddr)
++{
++ IMG_SYS_PHYADDR sys_paddr;
++
++
++ sys_paddr.uiAddr = cpu_paddr.uiAddr;
++ return sys_paddr;
++}
++
++
++IMG_DEV_PHYADDR SysSysPAddrToDevPAddr (PVRSRV_DEVICE_TYPE eDeviceType, IMG_SYS_PHYADDR SysPAddr)
++{
++ IMG_DEV_PHYADDR DevPAddr;
++
++ PVR_UNREFERENCED_PARAMETER(eDeviceType);
++
++
++ DevPAddr.uiAddr = SysPAddr.uiAddr;
++
++ return DevPAddr;
++}
++
++
++IMG_SYS_PHYADDR SysDevPAddrToSysPAddr (PVRSRV_DEVICE_TYPE eDeviceType, IMG_DEV_PHYADDR DevPAddr)
++{
++ IMG_SYS_PHYADDR SysPAddr;
++
++ PVR_UNREFERENCED_PARAMETER(eDeviceType);
++
++
++ SysPAddr.uiAddr = DevPAddr.uiAddr;
++
++ return SysPAddr;
++}
++
++
++IMG_VOID SysRegisterExternalDevice(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++
++ psDeviceNode->ui32SOCInterruptBit = DEVICE_DISP_INTERRUPT;
++}
++
++
++IMG_VOID SysRemoveExternalDevice(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
++}
++
++PVRSRV_ERROR SysOEMFunction ( IMG_UINT32 ui32ID,
++ IMG_VOID *pvIn,
++ IMG_UINT32 ulInSize,
++ IMG_VOID *pvOut,
++ IMG_UINT32 ulOutSize)
++{
++ if (ulInSize || pvIn);
++
++ if ((ui32ID == OEM_GET_EXT_FUNCS) &&
++ (ulOutSize == sizeof(PVRSRV_DC_OEM_JTABLE)))
++ {
++ PVRSRV_DC_OEM_JTABLE *psOEMJTable = (PVRSRV_DC_OEM_JTABLE*)pvOut;
++
++ psOEMJTable->pfnOEMReadRegistryString = IMG_NULL;
++ psOEMJTable->pfnOEMWriteRegistryString = IMG_NULL;
++
++ return PVRSRV_OK;
++ }
++
++ return PVRSRV_ERROR_INVALID_PARAMS;
++}
++
++
++static PVRSRV_ERROR SysMapInRegisters(IMG_VOID)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNodeList;
++
++ psDeviceNodeList = gpsSysData->psDeviceNodeList;
++
++ while (psDeviceNodeList)
++ {
++ switch(psDeviceNodeList->sDevId.eDeviceType)
++ {
++ case PVRSRV_DEVICE_TYPE_SGX:
++ {
++ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNodeList->pvDevice;
++
++ if (SYS_SPECIFIC_DATA_TEST(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_UNMAP_SGX_REGS))
++ {
++ psDevInfo->pvRegsBaseKM = OSMapPhysToLin(gsSGXDeviceMap.sRegsCpuPBase,
++ gsSGXDeviceMap.ui32RegsSize,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++
++ if (!psDevInfo->pvRegsBaseKM)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysMapInRegisters : Failed to map in SGX registers\n"));
++ return PVRSRV_ERROR_BAD_MAPPING;
++ }
++ SYS_SPECIFIC_DATA_CLEAR(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_UNMAP_SGX_REGS);
++ }
++ psDevInfo->ui32RegSize = gsSGXDeviceMap.ui32RegsSize;
++ psDevInfo->sRegsPhysBase = gsSGXDeviceMap.sRegsSysPBase;
++
++#if defined(SGX_FEATURE_HOST_PORT)
++ if (gsSGXDeviceMap.ui32Flags & SGX_HOSTPORT_PRESENT)
++ {
++ if (SYS_SPECIFIC_DATA_TEST(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_UNMAP_SGX_HP))
++ {
++
++ psDevInfo->pvHostPortBaseKM = OSMapPhysToLin(gsSGXDeviceMap.sHPCpuPBase,
++ gsSGXDeviceMap.ui32HPSize,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++ if (!psDevInfo->pvHostPortBaseKM)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysMapInRegisters : Failed to map in host port\n"));
++ return PVRSRV_ERROR_BAD_MAPPING;
++ }
++ SYS_SPECIFIC_DATA_CLEAR(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_UNMAP_SGX_HP);
++ }
++ psDevInfo->ui32HPSize = gsSGXDeviceMap.ui32HPSize;
++ psDevInfo->sHPSysPAddr = gsSGXDeviceMap.sHPSysPBase;
++ }
++#endif
++ break;
++ }
++ default:
++ break;
++ }
++ psDeviceNodeList = psDeviceNodeList->psNext;
++ }
++
++ return PVRSRV_OK;
++}
++
++
++static PVRSRV_ERROR SysUnmapRegisters(IMG_VOID)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNodeList;
++
++ psDeviceNodeList = gpsSysData->psDeviceNodeList;
++
++ while (psDeviceNodeList)
++ {
++ switch (psDeviceNodeList->sDevId.eDeviceType)
++ {
++ case PVRSRV_DEVICE_TYPE_SGX:
++ {
++ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNodeList->pvDevice;
++#if !(defined(NO_HARDWARE) && defined(__linux__))
++
++ if (psDevInfo->pvRegsBaseKM)
++ {
++ OSUnMapPhysToLin(psDevInfo->pvRegsBaseKM,
++ gsSGXDeviceMap.ui32RegsSize,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_UNMAP_SGX_REGS);
++ }
++#endif
++
++ psDevInfo->pvRegsBaseKM = IMG_NULL;
++ psDevInfo->ui32RegSize = 0;
++ psDevInfo->sRegsPhysBase.uiAddr = 0;
++
++#if defined(SGX_FEATURE_HOST_PORT)
++ if (gsSGXDeviceMap.ui32Flags & SGX_HOSTPORT_PRESENT)
++ {
++
++ if (psDevInfo->pvHostPortBaseKM)
++ {
++ OSUnMapPhysToLin(psDevInfo->pvHostPortBaseKM,
++ gsSGXDeviceMap.ui32HPSize,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_UNMAP_SGX_HP);
++
++ psDevInfo->pvHostPortBaseKM = IMG_NULL;
++ }
++
++ psDevInfo->ui32HPSize = 0;
++ psDevInfo->sHPSysPAddr.uiAddr = 0;
++ }
++#endif
++ break;
++ }
++ default:
++ break;
++ }
++ psDeviceNodeList = psDeviceNodeList->psNext;
++ }
++
++#if !(defined(NO_HARDWARE) || defined(__linux__))
++
++ OSUnMapPhysToLin(gsPoulsboRegsCPUVaddr,
++ POULSBO_REG_SIZE,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++
++
++ OSUnMapPhysToLin(gsPoulsboDisplayRegsCPUVaddr,
++ POULSBO_DISPLAY_REG_SIZE,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++
++#endif
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR SysSystemPrePowerState(PVRSRV_SYS_POWER_STATE eNewPowerState)
++{
++ PVRSRV_ERROR eError= PVRSRV_OK;
++ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)(gsSysSpecificData.hSGXPCI);
++
++ if (eNewPowerState != gpsSysData->eCurrentPowerState)
++ {
++ if ((eNewPowerState == PVRSRV_SYS_POWER_STATE_D3) &&
++ (gpsSysData->eCurrentPowerState < PVRSRV_SYS_POWER_STATE_D3))
++ {
++ drm_irq_uninstall(gpDrmDevice);
++
++ SysUnmapRegisters();
++
++ //Save some pci state that won't get saved properly by pci_save_state()
++ pci_read_config_dword(psPVRPCI->psPCIDev, 0x5C, &gsSysSpecificData.saveBSM);
++ pci_read_config_dword(psPVRPCI->psPCIDev, 0xFC, &gsSysSpecificData.saveVBT);
++ pci_read_config_dword(psPVRPCI->psPCIDev, MRST_PCIx_MSI_ADDR_LOC, &gsSysSpecificData.msi_addr);
++ pci_read_config_dword(psPVRPCI->psPCIDev, MRST_PCIx_MSI_DATA_LOC, &gsSysSpecificData.msi_data);
++
++ eError = OSPCISuspendDev(gsSysSpecificData.hSGXPCI);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysSystemPrePowerState: OSPCISuspendDev failed (%d)", eError));
++ }
++ }
++ }
++
++ return eError;
++}
++
++PVRSRV_ERROR SysSystemPostPowerState(PVRSRV_SYS_POWER_STATE eNewPowerState)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)(gsSysSpecificData.hSGXPCI);
++
++ if (eNewPowerState != gpsSysData->eCurrentPowerState)
++ {
++ if ((gpsSysData->eCurrentPowerState == PVRSRV_SYS_POWER_STATE_D3) &&
++ (eNewPowerState < PVRSRV_SYS_POWER_STATE_D3))
++ {
++ eError = OSPCIResumeDev(gsSysSpecificData.hSGXPCI);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysSystemPostPowerState: OSPCIResumeDev failed (%d)", eError));
++ return eError;
++ }
++
++ //Restore some pci state that will not have gotten restored properly by pci_restore_state()
++ pci_write_config_dword(psPVRPCI->psPCIDev, 0x5c, gsSysSpecificData.saveBSM);
++ pci_write_config_dword(psPVRPCI->psPCIDev, 0xFC, gsSysSpecificData.saveVBT);
++ pci_write_config_dword(psPVRPCI->psPCIDev, MRST_PCIx_MSI_ADDR_LOC, gsSysSpecificData.msi_addr);
++ pci_write_config_dword(psPVRPCI->psPCIDev, MRST_PCIx_MSI_DATA_LOC, gsSysSpecificData.msi_data);
++
++ eError = SysLocateDevices(gpsSysData);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysSystemPostPowerState: Failed to locate devices"));
++ return eError;
++ }
++
++ eError = SysMapInRegisters();
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysSystemPostPowerState: Failed to map in registers"));
++ return eError;
++ }
++
++ drm_irq_install(gpDrmDevice);
++ }
++ }
++ return eError;
++}
++
++
++PVRSRV_ERROR SysDevicePrePowerState(IMG_UINT32 ui32DeviceIndex,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ if ((eNewPowerState != eCurrentPowerState) &&
++ (eNewPowerState == PVRSRV_DEV_POWER_STATE_OFF))
++ {
++ if (ui32DeviceIndex == gui32SGXDeviceID)
++ {
++ PVR_DPF((PVR_DBG_MESSAGE,"SysDevicePrePowerState: Remove SGX power"));
++ /*printk(KERN_ALERT "SysDevicePrePowerState: Remove SGX power - D0i3 "); */
++ psb_irq_uninstall_islands(gpDrmDevice, OSPM_GRAPHICS_ISLAND);
++ ospm_power_island_down(OSPM_GRAPHICS_ISLAND);
++ }
++#if 0
++ else if (ui32DeviceIndex == gui32MRSTMSVDXDeviceID)
++ {
++#if defined(SUPPORT_DRI_DRM_EXT)
++ ospm_power_using_hw_end(OSPM_VIDEO_DEC_ISLAND);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++#endif
++ }
++ else if (ui32DeviceIndex == gui32MRSTTOPAZDeviceID)
++ {
++#if defined(SUPPORT_DRI_DRM_EXT)
++ ospm_power_using_hw_end(OSPM_VIDEO_ENC_ISLAND);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++#endif
++ }
++#endif
++ }
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR SysDevicePostPowerState(IMG_UINT32 ui32DeviceIndex,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ if ((eNewPowerState != eCurrentPowerState) &&
++ (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_OFF))
++ {
++ if (ui32DeviceIndex == gui32SGXDeviceID)
++ {
++ PVR_DPF((PVR_DBG_MESSAGE,"SysDevicePrePowerState: Restore SGX power"));
++ }
++#if 0
++ else if (ui32DeviceIndex == gui32MRSTMSVDXDeviceID)
++ {
++ PVR_DPF((PVR_DBG_MESSAGE,"SysDevicePrePowerState: Restore SGX power"));
++#if defined(SUPPORT_DRI_DRM_EXT)
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, true))
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ if (!ospm_power_using_hw_begin(OSPM_VIDEO_DEC_ISLAND, true))
++ {
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++
++ return PVRSRV_ERROR_GENERIC;
++ }
++#endif
++ }
++ else if (ui32DeviceIndex == gui32MRSTTOPAZDeviceID)
++ {
++ PVR_DPF((PVR_DBG_MESSAGE,"SysDevicePrePowerState: Restore SGX power"));
++#if defined(SUPPORT_DRI_DRM_EXT)
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, true))
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ if (!ospm_power_using_hw_begin(OSPM_VIDEO_ENC_ISLAND, true))
++ {
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++
++ return PVRSRV_ERROR_GENERIC;
++ }
++#endif
++ }
++#endif
++ }
++
++ return PVRSRV_OK;
++}
++
++#if defined(SUPPORT_DRI_DRM_EXT)
++int SYSPVRServiceSGXInterrupt(struct drm_device *dev)
++{
++ IMG_BOOL bStatus = IMG_FALSE;
++ PVR_UNREFERENCED_PARAMETER(dev);
++
++ if (gpsSGXDevNode != IMG_NULL)
++ {
++ bStatus = (*gpsSGXDevNode->pfnDeviceISR)(gpsSGXDevNode->pvISRData);
++ if (bStatus)
++ {
++ OSScheduleMISR((IMG_VOID *)gpsSGXDevNode->psSysData);
++ }
++ }
++
++ return bStatus ? 1 : 0;
++}
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/system/moorestown/sysconfig.h
+@@ -0,0 +1,139 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__SOCCONFIG_H__)
++#define __SOCCONFIG_H__
++#include "syscommon.h"
++
++#define VS_PRODUCT_NAME "SGX Moorestown"
++
++#define SYS_NO_POWER_LOCK_TIMEOUT
++
++#define SGX_FEATURE_HOST_PORT
++
++#define SYS_SGX_USSE_COUNT (2)
++
++#define POULSBO_REGS_OFFSET 0x00000
++#define POULSBO_REG_SIZE 0x2100
++
++#define SGX_REGS_OFFSET 0x80000
++#define PSB_SGX_REGS_OFFSET 0x40000
++#define SGX_REG_SIZE 0x4000
++#define MSVDX_REGS_OFFSET 0x50000
++
++#ifdef SUPPORT_MSVDX
++#define POULSBO_MAX_OFFSET (MSVDX_REGS_OFFSET + MSVDX_REG_SIZE)
++#else
++#define POULSBO_MAX_OFFSET (SGX_REGS_OFFSET + SGX_REG_SIZE)
++#define PSB_POULSBO_MAX_OFFSET (PSB_SGX_REGS_OFFSET + SGX_REG_SIZE)
++#endif
++
++#define SYS_SGX_DEV_VENDOR_ID 0x8086
++#define PSB_SYS_SGX_DEV_DEVICE_ID_1 0x8108
++#define PSB_SYS_SGX_DEV_DEVICE_ID_2 0x8109
++
++#define SYS_SGX_DEVICE_IDS \
++ {0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8108}, \
++ {0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8109}, \
++ {0x8086, 0x4100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x4101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x4102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x4103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x4104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x4105, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x4106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x4107, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0, 0, 0}
++
++
++#define MMADR_INDEX 4
++#define IOPORT_INDEX 5
++#define GMADR_INDEX 6
++#define MMUADR_INDEX 7
++#define FBADR_INDEX 23
++#define FBSIZE_INDEX 24
++
++#define DISPLAY_SURFACE_SIZE (4 * 1024 * 1024)
++
++#define DEVICE_SGX_INTERRUPT (1<<0)
++#define DEVICE_MSVDX_INTERRUPT (1<<1)
++#define DEVICE_DISP_INTERRUPT (1<<2)
++#define DEVICE_TOPAZ_INTERRUPT (1<<3)
++
++#define POULSBO_DISP_MASK (1<<17)
++#define POULSBO_THALIA_MASK (1<<18)
++#define POULSBO_MSVDX_MASK (1<<19)
++#define POULSBO_VSYNC_PIPEA_VBLANK_MASK (1<<7)
++#define POULSBO_VSYNC_PIPEA_EVENT_MASK (1<<6)
++#define POULSBO_VSYNC_PIPEB_VBLANK_MASK (1<<5)
++#define POULSBO_VSYNC_PIPEB_EVENT_MASK (1<<4)
++
++#define POULSBO_DISPLAY_REGS_OFFSET 0x70000
++#define POULSBO_DISPLAY_REG_SIZE 0x2000
++
++#define POULSBO_DISPLAY_A_CONFIG 0x00008
++#define POULSBO_DISPLAY_A_STATUS_SELECT 0x00024
++#define POULSBO_DISPLAY_B_CONFIG 0x01008
++#define POULSBO_DISPLAY_B_STATUS_SELECT 0x01024
++
++#define POULSBO_DISPLAY_PIPE_ENABLE (1<<31)
++#define POULSBO_DISPLAY_VSYNC_STS_EN (1<<25)
++#define POULSBO_DISPLAY_VSYNC_STS (1<<9)
++
++#if defined(SGX_FEATURE_HOST_PORT)
++ #define SYS_SGX_HP_SIZE 0x8000000
++ #define PSB_SYS_SGX_HP_SIZE 0x4000000
++
++ #define SYS_SGX_HOSTPORT_BASE_DEVVADDR 0xD0000000
++ #if defined(FIX_HW_BRN_22997) && defined(FIX_HW_BRN_23030)
++
++
++
++ #define SYS_SGX_HOSTPORT_BRN23030_OFFSET 0x7C00000
++ #endif
++#endif
++
++
++typedef struct
++{
++ union
++ {
++#if !defined(VISTA)
++ IMG_UINT8 aui8PCISpace[256];
++ IMG_UINT16 aui16PCISpace[128];
++ IMG_UINT32 aui32PCISpace[64];
++#endif
++ struct
++ {
++ IMG_UINT16 ui16VenID;
++ IMG_UINT16 ui16DevID;
++ IMG_UINT16 ui16PCICmd;
++ IMG_UINT16 ui16PCIStatus;
++ }s;
++ }u;
++} PCICONFIG_SPACE, *PPCICONFIG_SPACE;
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/system/moorestown/sysinfo.h
+@@ -0,0 +1,43 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__SYSINFO_H__)
++#define __SYSINFO_H__
++
++#define MAX_HW_TIME_US (500000)
++#define WAIT_TRY_COUNT (10000)
++
++typedef enum _SYS_DEVICE_TYPE_
++{
++ SYS_DEVICE_SGX = 0,
++
++ SYS_DEVICE_FORCE_I16 = 0x7fff
++
++} SYS_DEVICE_TYPE;
++
++#define SYS_DEVICE_COUNT 4
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/system/moorestown/syslocal.h
+@@ -0,0 +1,83 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__SYSLOCAL_H__)
++#define __SYSLOCAL_H__
++
++#define SYS_SPECIFIC_DATA_PCI_ACQUIRE_DEV 0x00000001
++#define SYS_SPECIFIC_DATA_PCI_REQUEST_SGX_ADDR_RANGE 0x00000002
++#define SYS_SPECIFIC_DATA_PCI_REQUEST_HOST_PORT_RANGE 0x00000004
++#if defined(NO_HARDWARE)
++#define SYS_SPECIFIC_DATA_ALLOC_DUMMY_SGX_REGS 0x00000008
++#if defined(SUPPORT_MSVDX)
++#define SYS_SPECIFIC_DATA_ALLOC_DUMMY_MSVDX_REGS 0x00000020
++#endif
++#endif
++#define SYS_SPECIFIC_DATA_SGX_INITIALISED 0x00000040
++#if defined(SUPPORT_MSVDX)
++#define SYS_SPECIFIC_DATA_MSVDX_INITIALISED 0x00000080
++#endif
++#define SYS_SPECIFIC_DATA_MISR_INSTALLED 0x00000100
++#define SYS_SPECIFIC_DATA_LISR_INSTALLED 0x00000200
++#define SYS_SPECIFIC_DATA_PDUMP_INIT 0x00000400
++#define SYS_SPECIFIC_DATA_IRQ_ENABLED 0x00000800
++
++#define SYS_SPECIFIC_DATA_PM_UNMAP_SGX_REGS 0x00001000
++#define SYS_SPECIFIC_DATA_PM_UNMAP_SGX_HP 0x00004000
++#define SYS_SPECIFIC_DATA_PM_UNMAP_MSVDX_REGS 0x00008000
++#define SYS_SPECIFIC_DATA_PM_IRQ_DISABLE 0x00010000
++#define SYS_SPECIFIC_DATA_PM_UNINSTALL_LISR 0x00020000
++
++#define SYS_SPECIFIC_DATA_SET(psSysSpecData, flag) ((IMG_VOID)((psSysSpecData)->ui32SysSpecificData |= (flag)))
++
++#define SYS_SPECIFIC_DATA_CLEAR(psSysSpecData, flag) ((IMG_VOID)((psSysSpecData)->ui32SysSpecificData &= ~(flag)))
++
++#define SYS_SPECIFIC_DATA_TEST(psSysSpecData, flag) (((psSysSpecData)->ui32SysSpecificData & (flag)) != 0)
++
++
++typedef struct _SYS_SPECIFIC_DATA_TAG_
++{
++
++ IMG_UINT32 ui32SysSpecificData;
++#ifdef __linux__
++ PVRSRV_PCI_DEV_HANDLE hSGXPCI;
++#endif
++//#ifdef LDM_PCI
++#if defined(LDM_PCI) || defined(SUPPORT_DRI_DRM)
++ struct pci_dev *psPCIDev;
++#endif
++ /* MSI reg save */
++ uint32_t msi_addr;
++ uint32_t msi_data;
++
++ uint32_t saveBSM;
++ uint32_t saveVBT;
++} SYS_SPECIFIC_DATA;
++
++
++#endif
++
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/system/moorestown/sysutils.c
+@@ -0,0 +1,30 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "sysinfo.h"
++#include "syslocal.h"
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/system/unified/oemfuncs.h
+@@ -0,0 +1,74 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__OEMFUNCS_H__)
++#define __OEMFUNCS_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#define OEM_EXCHANGE_POWER_STATE (1<<0)
++#define OEM_DEVICE_MEMORY_POWER (1<<1)
++#define OEM_DISPLAY_POWER (1<<2)
++#define OEM_GET_EXT_FUNCS (1<<3)
++
++typedef struct OEM_ACCESS_INFO_TAG
++{
++ IMG_UINT32 ui32Size;
++ IMG_UINT32 ui32FBPhysBaseAddress;
++ IMG_UINT32 ui32FBMemAvailable;
++ IMG_UINT32 ui32SysPhysBaseAddress;
++ IMG_UINT32 ui32SysSize;
++ IMG_UINT32 ui32DevIRQ;
++} OEM_ACCESS_INFO, *POEM_ACCESS_INFO;
++
++typedef IMG_UINT32 (*PFN_SRV_BRIDGEDISPATCH)( IMG_UINT32 Ioctl,
++ IMG_BYTE *pInBuf,
++ IMG_UINT32 InBufLen,
++ IMG_BYTE *pOutBuf,
++ IMG_UINT32 OutBufLen,
++ IMG_UINT32 *pdwBytesTransferred);
++
++
++typedef PVRSRV_ERROR (*PFN_SRV_READREGSTRING)(PPVRSRV_REGISTRY_INFO psRegInfo);
++
++
++typedef struct PVRSRV_DC_OEM_JTABLE_TAG
++{
++#ifdef CONFIG_DRM_MRST
++ PFN_SRV_BRIDGEDISPATCH pfnOEMBridgeDispatch;
++#endif
++ PFN_SRV_READREGSTRING pfnOEMReadRegistryString;
++ PFN_SRV_READREGSTRING pfnOEMWriteRegistryString;
++
++} PVRSRV_DC_OEM_JTABLE;
++#if defined(__cplusplus)
++}
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/system/unified/ospm_power.c
+@@ -0,0 +1,517 @@
++/**************************************************************************
++ * Copyright (c) 2009, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * Benjamin Defnet <benjamin.r.defnet@intel.com>
++ * Rajesh Poornachandran <rajesh.poornachandran@intel.com>
++ *
++ **************************************************************************/
++
++#include "ospm_power.h"
++#include "psb_drv.h"
++#include "psb_msvdx.h"
++#include "lnc_topaz.h"
++#include "servicesext.h"
++#include "power.h"
++#include "services.h"
++#include "osfunc.h"
++#include <linux/mutex.h>
++
++extern IMG_UINT32 gui32SGXDeviceID;
++extern IMG_UINT32 gui32MRSTDisplayDeviceID;
++extern IMG_UINT32 gui32MRSTMSVDXDeviceID;
++extern IMG_UINT32 gui32MRSTTOPAZDeviceID;
++
++struct drm_device *gpDrmDevice = NULL;
++static struct mutex g_ospm_mutex;
++static bool gbSuspendInProgress = false;
++static bool gbResumeInProgress = false;
++static int g_hw_power_status_mask;
++static atomic_t g_display_access_count;
++static atomic_t g_graphics_access_count;
++static atomic_t g_videoenc_access_count;
++static atomic_t g_videodec_access_count;
++
++/*
++ * ospm_power_init
++ *
++ * Description: Initialize this ospm power management module
++ */
++void ospm_power_init(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = (struct drm_psb_private *)dev->dev_private;
++ struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
++
++ /* JB Hack */
++ gpDrmDevice = dev;
++ return;
++
++ pci_write_config_dword(pci_root, 0xD0, 0xd0047800);
++ pci_read_config_dword(pci_root, 0xD4, &dev_priv->ospm_base);
++ dev_priv->ospm_base &= 0xffff;
++
++ dev_priv->apm_reg = MSG_READ32(PSB_PUNIT_PORT, PSB_APMBA);
++ dev_priv->apm_base = dev_priv->apm_reg & 0xffff;
++
++ gpDrmDevice = dev;
++ mutex_init(&g_ospm_mutex);
++ g_hw_power_status_mask = OSPM_ALL_ISLANDS;
++ atomic_set(&g_display_access_count, 0);
++ atomic_set(&g_graphics_access_count, 0);
++ atomic_set(&g_videoenc_access_count, 0);
++ atomic_set(&g_videodec_access_count, 0);
++
++
++#ifdef OSPM_STAT
++ dev_priv->graphics_state = PSB_PWR_STATE_ON;
++ dev_priv->gfx_last_mode_change = jiffies;
++ dev_priv->gfx_on_time = 0;
++ dev_priv->gfx_off_time = 0;
++#endif
++}
++
++/*
++ * ospm_power_uninit
++ *
++ * Description: Uninitialize this ospm power management module
++ */
++void ospm_power_uninit(void)
++{
++ /* JB Hack */
++ return;
++
++ mutex_destroy(&g_ospm_mutex);
++}
++
++/*
++ * ospm_power_suspend
++ *
++ * Description: OSPM is telling our driver to suspend so save state
++ * and power down all hardware.
++ */
++int ospm_power_suspend(struct pci_dev *pdev, pm_message_t state)
++{
++ struct drm_device *dev = pci_get_drvdata(pdev);
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) gpDrmDevice->dev_private;
++ struct drm_mode_config *mode_config = &dev->mode_config;
++ struct drm_connector *connector = NULL;
++ int ret = 0;
++ bool bDisplayOff = false;
++
++ /* JB Hack */
++ return ret;
++
++ mutex_lock(&g_ospm_mutex);
++
++ if (atomic_read(&g_graphics_access_count) ||
++ atomic_read(&g_videoenc_access_count) ||
++ atomic_read(&g_videodec_access_count) ||
++ atomic_read(&g_display_access_count))
++ ret = -EBUSY;
++ //SGX will be powered off when idle due to D0i3 support. If we don't wait
++ //for D0i3, then we hit cases where user mode driver gets stuck waiting
++ //for command completion when SGX is powered off.
++ else if (ospm_power_is_hw_on(OSPM_GRAPHICS_ISLAND))
++ ret = -EBUSY;
++ else if (psb_check_msvdx_idle(dev))
++ ret = -EBUSY;
++ else if (IS_MRST(dev) && !dev_priv->topaz_disabled && lnc_check_topaz_idle(dev))
++ ret = -EBUSY;
++
++ gbSuspendInProgress = true;
++
++ if (!ret) {
++ PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D3);
++ bDisplayOff = true;
++ } else if (!atomic_read(&g_display_access_count)) {
++ //At least power down the display
++ PVRSRVSetDevicePowerStateKM(gui32MRSTDisplayDeviceID,
++ PVRSRV_DEV_POWER_STATE_OFF,
++ KERNEL_ID,
++ IMG_FALSE);
++ bDisplayOff = true;
++ }
++
++ if (bDisplayOff) {
++ //Set dpms status to off so that an "xset dpms force on" from the
++ //OSPM Framework (or elsewhere) actually executes
++ list_for_each_entry(connector, &mode_config->connector_list, head) {
++ connector->dpms = DRM_MODE_DPMS_OFF;
++ }
++ }
++
++ gbSuspendInProgress = false;
++
++ mutex_unlock(&g_ospm_mutex);
++ return ret;
++}
++
++/*
++ * ospm_power_resume
++ *
++ * Description: OSPM is telling our driver to resume so restore state
++ * and power up necessary hardware.
++ */
++int ospm_power_resume(struct pci_dev *pdev)
++{
++ struct drm_device *dev = pci_get_drvdata(pdev);
++ struct drm_mode_config *mode_config = &dev->mode_config;
++ struct drm_connector *connector = NULL;
++
++ /* JB Hack */
++ return 0;
++
++ mutex_lock(&g_ospm_mutex);
++ gbResumeInProgress = true;
++ PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D0);
++
++ //Set dpms status to on. We should probably only do this for
++ //connectors that were on prior to the suspend, but for Moorestown
++ //we only have one connector so just brute force it.
++ list_for_each_entry(connector, &mode_config->connector_list, head) {
++ connector->dpms = DRM_MODE_DPMS_ON;
++ }
++
++ gbResumeInProgress = false;
++ mutex_unlock(&g_ospm_mutex);
++ return 0;
++}
++
++
++/*
++ * ospm_power_island_down
++ *
++ * Description: Cut power to the specified island(s) (powergating)
++ */
++void ospm_power_island_down(int hw_islands)
++{
++ u32 pwr_cnt = 0;
++ u32 pwr_mask = 0;
++ u32 pwr_sts = 0;
++
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) gpDrmDevice->dev_private;
++
++#if 1 /* MDFLD_JLIU7_DSR */
++ DRM_INFO("%s, hw_islands = 0x%x. \n", __FUNCTION__, hw_islands);
++ if (hw_islands & (OSPM_VIDEO_DEC_ISLAND | OSPM_GRAPHICS_ISLAND)) {
++ dev_priv->dsr_fb_update &= ~hw_islands;
++ }
++#endif /* MDFLD_JLIU7_DSR */
++ /* JB Hack */
++ return;
++
++ g_hw_power_status_mask &= ~hw_islands;
++
++ if (hw_islands & OSPM_GRAPHICS_ISLAND) {
++ pwr_cnt |= PSB_PWRGT_GFX_MASK;
++ pwr_mask |= PSB_PWRGT_GFX_MASK;
++ #ifdef OSPM_STAT
++ if (dev_priv->graphics_state == PSB_PWR_STATE_ON) {
++ dev_priv->gfx_on_time += (jiffies - dev_priv->gfx_last_mode_change) * 1000 / HZ;
++ dev_priv->gfx_last_mode_change = jiffies;
++ dev_priv->graphics_state = PSB_PWR_STATE_OFF;
++ dev_priv->gfx_off_cnt++;
++ }
++ #endif
++ }
++ if (hw_islands & OSPM_VIDEO_ENC_ISLAND) {
++ pwr_cnt |= PSB_PWRGT_VID_ENC_MASK;
++ pwr_mask |= PSB_PWRGT_VID_ENC_MASK;
++ }
++ if (hw_islands & OSPM_VIDEO_DEC_ISLAND) {
++ pwr_cnt |= PSB_PWRGT_VID_DEC_MASK;
++ pwr_mask |= PSB_PWRGT_VID_DEC_MASK;
++ }
++ if (pwr_cnt) {
++ pwr_cnt |= inl(dev_priv->apm_base);
++ outl(pwr_cnt, dev_priv->apm_base);
++ while (true) {
++ pwr_sts = inl(dev_priv->apm_base + PSB_APM_STS);
++ if ((pwr_sts & pwr_mask) == pwr_mask)
++ break;
++ else
++ udelay(10);
++ }
++ }
++
++ if (hw_islands & OSPM_DISPLAY_ISLAND) {
++ pwr_mask = PSB_PWRGT_DISPLAY_MASK;
++ outl(PSB_PWRGT_DISPLAY_MASK, (dev_priv->ospm_base + PSB_PM_SSC));
++ while (true) {
++ pwr_sts = inl(dev_priv->ospm_base + PSB_PM_SSS);
++ if ((pwr_sts & pwr_mask) == pwr_mask)
++ break;
++ else
++ udelay(10);
++ }
++ }
++}
++
++/*
++ * ospm_power_island_up
++ *
++ * Description: Restore power to the specified island(s) (powergating)
++ */
++void ospm_power_island_up(int hw_islands)
++{
++ u32 pwr_cnt = 0;
++ u32 pwr_sts = 0;
++ u32 pwr_mask = 0;
++
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) gpDrmDevice->dev_private;
++
++#if 1 /* MDFLD_JLIU7_DSR */
++ DRM_INFO("%s, hw_islands = 0x%x. \n", __FUNCTION__, hw_islands);
++ if (hw_islands & (OSPM_VIDEO_DEC_ISLAND | OSPM_GRAPHICS_ISLAND)) {
++ dev_priv->dsr_fb_update |= hw_islands;
++ }
++#endif /* MDFLD_JLIU7_DSR */
++
++ /* JB Hack */
++ return;
++
++ if (IS_MRST(gpDrmDevice) &&
++ (hw_islands & (OSPM_GRAPHICS_ISLAND | OSPM_VIDEO_ENC_ISLAND |
++ OSPM_VIDEO_DEC_ISLAND))) {
++ pwr_cnt = inl(dev_priv->apm_base + PSB_APM_CMD);
++ pwr_mask = 0;
++ if (hw_islands & OSPM_GRAPHICS_ISLAND) {
++ pwr_cnt &= ~PSB_PWRGT_GFX_MASK;
++ pwr_mask |= PSB_PWRGT_GFX_MASK;
++ #ifdef OSPM_STAT
++ if (dev_priv->graphics_state == PSB_PWR_STATE_OFF) {
++ dev_priv->gfx_off_time += (jiffies - dev_priv->gfx_last_mode_change) * 1000 / HZ;
++ dev_priv->gfx_last_mode_change = jiffies;
++ dev_priv->graphics_state = PSB_PWR_STATE_ON;
++ dev_priv->gfx_on_cnt++;
++ }
++ #endif
++ }
++ if (hw_islands & OSPM_VIDEO_ENC_ISLAND) {
++ pwr_cnt &= ~PSB_PWRGT_VID_ENC_MASK;
++ pwr_mask |= PSB_PWRGT_VID_ENC_MASK;
++ }
++ if (hw_islands & OSPM_VIDEO_DEC_ISLAND) {
++ pwr_cnt &= ~PSB_PWRGT_VID_DEC_MASK;
++ pwr_mask |= PSB_PWRGT_VID_DEC_MASK;
++ }
++
++ outl(pwr_cnt, dev_priv->apm_base + PSB_APM_CMD);
++ while (true) {
++ pwr_sts = inl(dev_priv->apm_base + PSB_APM_STS);
++ if ((pwr_sts & pwr_mask) == 0)
++ break;
++ else
++ udelay(10);
++ }
++ }
++
++ if (hw_islands & OSPM_DISPLAY_ISLAND) {
++ pwr_cnt = inl(dev_priv->ospm_base + PSB_PM_SSC);
++ pwr_cnt &= ~PSB_PWRGT_DISPLAY_MASK;
++ pwr_mask = PSB_PWRGT_DISPLAY_MASK;
++ outl(pwr_cnt, (dev_priv->ospm_base + PSB_PM_SSC));
++ while (true) {
++ pwr_sts = inl(dev_priv->ospm_base + PSB_PM_SSS);
++ if ((pwr_sts & pwr_mask) == 0)
++ break;
++ else
++ udelay(10);
++ }
++ }
++
++ g_hw_power_status_mask |= hw_islands;
++}
++
++/*
++ * ospm_power_using_hw_begin
++ *
++ * Description: Notify PowerMgmt module that you will be accessing the
++ * specified island's hw so don't power it off. If the island is off,
++ * this function will behave differently depending on the type param.
++ *
++ * OSPM_UHB_FORCE_POWER_ON:
++ * Power on the specified island.
++ * OSPM_UHB_IGNORE_POWER_OFF:
++ * Increment the access counters. The caller is expected to power on
++ * the island if necessary.
++ * OSPM_UHB_ONLY_IF_ON:
++ * Return false and the caller is expected to not access the hw.
++ *
++ * NOTE *** If this is called from and interrupt handler or other atomic
++ * context, then it will return false if we are in the middle of a
++ * power state transition and the caller will be expected to handle that
++ * even if type is OSPM_UHB_FORCE_POWER_ON.
++ */
++bool ospm_power_using_hw_begin(int hw_island, UHBUsage usage)
++{
++ bool ret = false;
++ bool b_island_is_off = false;
++ bool b_atomic = (in_interrupt() || in_atomic());
++ bool b_force_on = (usage == OSPM_UHB_FORCE_POWER_ON);
++ bool b_ignore_off = (usage == OSPM_UHB_IGNORE_POWER_OFF);
++ IMG_UINT32 deviceID = 0;
++
++ /* JB Hack */
++ return true;
++
++ if (!b_atomic)
++ mutex_lock(&g_ospm_mutex);
++ else if ((gbSuspendInProgress || gbResumeInProgress) && b_force_on)
++ goto FailExit;
++
++ b_island_is_off = hw_island & (OSPM_ALL_ISLANDS & ~g_hw_power_status_mask);
++
++ if (b_island_is_off && !b_force_on && !b_ignore_off)
++ goto FailExit;
++
++ if (b_island_is_off && b_force_on) {
++ switch(hw_island)
++ {
++ case OSPM_GRAPHICS_ISLAND:
++ deviceID = gui32SGXDeviceID;
++ break;
++ case OSPM_DISPLAY_ISLAND:
++ deviceID = gui32MRSTDisplayDeviceID;
++ break;
++ case OSPM_VIDEO_DEC_ISLAND:
++ deviceID = gui32MRSTMSVDXDeviceID;
++ break;
++ case OSPM_VIDEO_ENC_ISLAND:
++ deviceID = gui32MRSTTOPAZDeviceID;
++ break;
++ }
++
++ if (PVRSRVPowerOnSystemWithDevice(deviceID, b_atomic ? ISR_ID : KERNEL_ID, IMG_FALSE) != PVRSRV_OK)
++ goto FailExit;
++ }
++
++ switch(hw_island)
++ {
++ case OSPM_GRAPHICS_ISLAND:
++ atomic_inc(&g_graphics_access_count);
++ case OSPM_VIDEO_ENC_ISLAND:
++ atomic_inc(&g_videoenc_access_count);
++ case OSPM_VIDEO_DEC_ISLAND:
++ atomic_inc(&g_videodec_access_count);
++ case OSPM_DISPLAY_ISLAND:
++ atomic_inc(&g_display_access_count);
++ }
++
++ ret = true;
++FailExit:
++
++ if (!b_atomic)
++ mutex_unlock(&g_ospm_mutex);
++
++ return ret;
++}
++
++
++/*
++ * ospm_power_using_hw_end
++ *
++ * Description: Notify PowerMgmt module that you are done accessing the
++ * specified island's hw so feel free to power it off. Note that this
++ * function doesn't actually power off the islands.
++ */
++void ospm_power_using_hw_end(int hw_island)
++{
++ /* JB Hack */
++ return;
++
++ switch(hw_island)
++ {
++ case OSPM_GRAPHICS_ISLAND:
++ atomic_dec(&g_graphics_access_count);
++ case OSPM_VIDEO_ENC_ISLAND:
++ atomic_dec(&g_videoenc_access_count);
++ case OSPM_VIDEO_DEC_ISLAND:
++ atomic_dec(&g_videodec_access_count);
++ case OSPM_DISPLAY_ISLAND:
++ atomic_dec(&g_display_access_count);
++ }
++
++ WARN_ON(atomic_read(&g_graphics_access_count) < 0);
++ WARN_ON(atomic_read(&g_videoenc_access_count) < 0);
++ WARN_ON(atomic_read(&g_videodec_access_count) < 0);
++ WARN_ON(atomic_read(&g_display_access_count) < 0);
++}
++
++/*
++ * ospm_power_is_hw_on
++ *
++ * Description: do an instantaneous check for if the specified islands
++ * are on. Only use this in cases where you know the g_state_change_mutex
++ * is already held such as in irq install/uninstall. Otherwise, use
++ * ospm_power_using_hw_begin().
++ */
++bool ospm_power_is_hw_on(int hw_islands)
++{
++ /* JB Hack */
++ return true;
++ return ((g_hw_power_status_mask & hw_islands) == hw_islands);
++}
++
++void ospm_apm_power_down_msvdx(struct drm_device *dev)
++{
++ /* JB Hack */
++ return;
++
++ mutex_lock(&g_ospm_mutex);
++
++ if (atomic_read(&g_videodec_access_count))
++ goto out;
++ if (psb_check_msvdx_idle(dev))
++ goto out;
++
++ gbSuspendInProgress = true;
++ PVRSRVSetDevicePowerStateKM(gui32MRSTMSVDXDeviceID,
++ PVRSRV_DEV_POWER_STATE_OFF,
++ ISR_ID,
++ IMG_FALSE);
++ gbSuspendInProgress = false;
++out:
++ mutex_unlock(&g_ospm_mutex);
++ return;
++}
++
++void ospm_apm_power_down_topaz(struct drm_device *dev)
++{
++ /* JB Hack */
++ return;
++
++ mutex_lock(&g_ospm_mutex);
++
++ if (atomic_read(&g_videoenc_access_count))
++ goto out;
++ if (lnc_check_topaz_idle(dev))
++ goto out;
++
++ gbSuspendInProgress = true;
++ PVRSRVSetDevicePowerStateKM(gui32MRSTTOPAZDeviceID,
++ PVRSRV_DEV_POWER_STATE_OFF,
++ ISR_ID,
++ IMG_FALSE);
++ gbSuspendInProgress = false;
++out:
++ mutex_unlock(&g_ospm_mutex);
++ return;
++}
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/system/unified/ospm_power.h
+@@ -0,0 +1,79 @@
++/**************************************************************************
++ * Copyright (c) 2009, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * Benjamin Defnet <benjamin.r.defnet@intel.com>
++ * Rajesh Poornachandran <rajesh.poornachandran@intel.com>
++ *
++ **************************************************************************/
++
++#ifndef _OSPM_POWER_H_
++#define _OSPM_POWER_H_
++
++#include <linux/pci.h>
++#include <drm/drmP.h>
++
++#define OSPM_GRAPHICS_ISLAND 0x1
++#define OSPM_VIDEO_ENC_ISLAND 0x2
++#define OSPM_VIDEO_DEC_ISLAND 0x4
++#define OSPM_DISPLAY_ISLAND 0x8
++#define OSPM_ALL_ISLANDS 0xf
++
++
++typedef enum _UHBUsage
++{
++ OSPM_UHB_ONLY_IF_ON = 0,
++ OSPM_UHB_FORCE_POWER_ON,
++ OSPM_UHB_IGNORE_POWER_OFF,
++} UHBUsage;
++
++
++void ospm_power_init(struct drm_device *dev);
++void ospm_power_uninit(void);
++
++/*
++ * OSPM will call these functions
++ */
++int ospm_power_suspend(struct pci_dev *pdev, pm_message_t state);
++int ospm_power_resume(struct pci_dev *pdev);
++
++/*
++ * These are the functions the driver should use to wrap all hw access
++ * (i.e. register reads and writes)
++ */
++bool ospm_power_using_hw_begin(int hw_island, UHBUsage type);
++void ospm_power_using_hw_end(int hw_island);
++
++/*
++ * Power up/down different hw component rails/islands
++ */
++void ospm_power_island_down(int hw_islands);
++void ospm_power_island_up(int hw_islands);
++
++/*
++ * Use this function to do an instantaneous check for if the hw is on.
++ * Only use this in cases where you know the g_state_change_mutex
++ * is already held such as in irq install/uninstall and you need to
++ * prevent a deadlock situation. Otherwise use ospm_power_using_hw_begin().
++ */
++bool ospm_power_is_hw_on(int hw_islands);
++
++/* Use these functions to power down video HW for D0i3 purpose */
++void ospm_apm_power_down_msvdx(struct drm_device *dev);
++void ospm_apm_power_down_topaz(struct drm_device *dev);
++
++#endif /*_OSPM_POWER_H_*/
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/system/unified/psb_powermgmt.h
+@@ -0,0 +1,85 @@
++/**************************************************************************
++ * Copyright (c) 2009, Intel Corporation.
++ * All Rights Reserved.
++
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
++ * SOFTWARE.
++ *
++ * Authors:
++ * Benjamin Defnet <benjamin.r.defnet@intel.com>
++ * Rajesh Poornachandran <rajesh.poornachandran@intel.com>
++ *
++ */
++#ifndef _PSB_POWERMGMT_H_
++#define _PSB_POWERMGMT_H_
++
++#include <linux/pci.h>
++#include <drm/drmP.h>
++
++#define OSPM_GRAPHICS_ISLAND 0x1
++#define OSPM_VIDEO_ENC_ISLAND 0x2
++#define OSPM_VIDEO_DEC_ISLAND 0x4
++#define OSPM_DISPLAY_ISLAND 0x8
++#define OSPM_ALL_ISLANDS 0xf
++
++typedef enum _UHBUsage
++{
++ OSPM_UHB_ONLY_IF_ON = 0,
++ OSPM_UHB_FORCE_POWER_ON,
++ OSPM_UHB_IGNORE_POWER_OFF,
++} UHBUsage;
++
++//extern int psb_check_msvdx_idle(struct drm_device *dev);
++//extern int lnc_check_topaz_idle(struct drm_device *dev);
++/* Use these functions to power down video HW for D0i3 purpose */
++void ospm_apm_power_down_msvdx(struct drm_device *dev);
++void ospm_apm_power_down_topaz(struct drm_device *dev);
++
++void ospm_power_init(struct drm_device *dev);
++void ospm_power_uninit(void);
++
++
++/*
++ * OSPM will call these functions
++ */
++int ospm_power_suspend(struct pci_dev *pdev, pm_message_t state);
++int ospm_power_resume(struct pci_dev *pdev);
++
++/*
++ * These are the functions the driver should use to wrap all hw access
++ * (i.e. register reads and writes)
++ */
++bool ospm_power_using_hw_begin(int hw_island, bool force_on);
++void ospm_power_using_hw_end(int hw_island);
++
++/*
++ * Use this function to do an instantaneous check for if the hw is on.
++ * Only use this in cases where you know the g_state_change_mutex
++ * is already held such as in irq install/uninstall and you need to
++ * prevent a deadlock situation. Otherwise use ospm_power_using_hw_begin().
++ */
++bool ospm_power_is_hw_on(int hw_islands);
++
++/*
++ * Power up/down different hw component rails/islands
++ */
++void ospm_power_island_down(int hw_islands);
++void ospm_power_island_up(int hw_islands);
++void ospm_suspend_graphics(void);
++#endif /*_PSB_POWERMGMT_H_*/
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/system/unified/sys_pvr_drm_export.c
+@@ -0,0 +1,135 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <drm/drmP.h>
++#include <drm/drm.h>
++
++#include "pvr_drm_shared.h"
++
++#include "services_headers.h"
++#include "private_data.h"
++#include "pvr_drm.h"
++
++#include "pvr_bridge.h"
++#include "linkage.h"
++#include "mmap.h"
++
++#if defined(PDUMP)
++#include "client/linuxsrv.h"
++#endif
++
++#include "sys_pvr_drm_import.h"
++
++#include "sys_pvr_drm_export.h"
++
++int
++SYSPVRInit(void)
++{
++ PVRDPFInit();
++
++ return 0;
++}
++
++
++int
++SYSPVRLoad(struct drm_device *dev, unsigned long flags)
++{
++ return PVRSRVDrmLoad(dev, flags);
++}
++
++int
++SYSPVROpen(struct drm_device *dev, struct drm_file *pFile)
++{
++ return PVRSRVDrmOpen(dev, pFile);
++}
++
++int
++SYSPVRUnload(struct drm_device *dev)
++{
++ return PVRSRVDrmUnload(dev);
++}
++
++void
++SYSPVRPostClose(struct drm_device *dev, struct drm_file *file)
++{
++ return PVRSRVDrmPostClose(dev, file);
++}
++
++int
++SYSPVRBridgeDispatch(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile)
++{
++ return PVRSRV_BridgeDispatchKM(dev, arg, pFile);
++}
++
++int
++SYSPVRDCDriverIoctl(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile)
++{
++ return PVRDRM_Dummy_ioctl(dev, arg, pFile);
++
++}
++
++int
++SYSPVRBCDriverIoctl(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile)
++{
++ return PVRDRM_Dummy_ioctl(dev, arg, pFile);
++
++}
++
++int
++SYSPVRIsMaster(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile)
++{
++ return PVRDRMIsMaster(dev, arg, pFile);
++}
++
++int
++SYSPVRUnprivCmd(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile)
++{
++ return PVRDRMUnprivCmd(dev, arg, pFile);
++}
++
++int
++SYSPVRMMap(struct file* pFile, struct vm_area_struct* ps_vma)
++{
++ int ret;
++
++ ret = PVRMMap(pFile, ps_vma);
++ if (ret == -ENOENT)
++ {
++ ret = drm_mmap(pFile, ps_vma);
++ }
++
++ return ret;
++}
++
++int
++SYSPVRDBGDrivIoctl(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile)
++{
++#if defined(PDUMP)
++ return dbgdrv_ioctl(dev, arg, pFile);
++#else
++ return -EINVAL;
++#endif
++}
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/system/unified/sys_pvr_drm_export.h
+@@ -0,0 +1,87 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__SYS_PVR_DRM_EXPORT_H__)
++#define __SYS_PVR_DRM_EXPORT_H__
++
++#include "pvr_drm_shared.h"
++
++#if defined(__KERNEL__)
++
++#include "services_headers.h"
++#include "private_data.h"
++#include "pvr_drm.h"
++
++#include "pvr_bridge.h"
++
++#if defined(PDUMP)
++#include "client/linuxsrv.h"
++#endif
++
++#define PVR_DRM_SRVKM_IOCTL \
++ DRM_IOW(DRM_COMMAND_BASE + PVR_DRM_SRVKM_CMD, PVRSRV_BRIDGE_PACKAGE)
++
++#define PVR_DRM_DISP_IOCTL \
++ DRM_IO(DRM_COMMAND_BASE + PVR_DRM_DISP_CMD)
++
++#define PVR_DRM_BC_IOCTL \
++ DRM_IO(DRM_COMMAND_BASE + PVR_DRM_BC_CMD)
++
++#define PVR_DRM_IS_MASTER_IOCTL \
++ DRM_IO(DRM_COMMAND_BASE + PVR_DRM_IS_MASTER_CMD)
++
++#define PVR_DRM_UNPRIV_IOCTL \
++ DRM_IOWR(DRM_COMMAND_BASE + PVR_DRM_UNPRIV_CMD, IMG_UINT32)
++
++#if defined(PDUMP)
++#define PVR_DRM_DBGDRV_IOCTL \
++ DRM_IOW(DRM_COMMAND_BASE + PVR_DRM_DBGDRV_CMD, IOCTL_PACKAGE)
++#else
++#define PVR_DRM_DBGDRV_IOCTL \
++ DRM_IO(DRM_COMMAND_BASE + PVR_DRM_DBGDRV_CMD)
++#endif
++
++int SYSPVRInit(void);
++int SYSPVRLoad(struct drm_device *dev, unsigned long flags);
++int SYSPVROpen(struct drm_device *dev, struct drm_file *pFile);
++int SYSPVRUnload(struct drm_device *dev);
++void SYSPVRPostClose(struct drm_device *dev, struct drm_file *file);
++int SYSPVRBridgeDispatch(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile);
++int SYSPVRDCDriverIoctl(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile);
++int SYSPVRBCDriverIoctl(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile);
++int SYSPVRIsMaster(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile);
++int SYSPVRUnprivCmd(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile);
++
++int SYSPVRMMap(struct file* pFile, struct vm_area_struct* ps_vma);
++
++int SYSPVRDBGDrivIoctl(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile);
++
++int SYSPVRServiceSGXInterrupt(struct drm_device *dev);
++
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/system/unified/sys_pvr_drm_import.h
+@@ -0,0 +1,44 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__SYS_PVR_DRM_IMPORT_H__)
++#define __SYS_PVR_DRM_IMPORT_H__
++
++#if defined(__KERNEL__)
++#include "psb_drm.h"
++#endif
++
++#define DRM_PSB_PLACEMENT_OFFSET 0x13
++#if 0
++#define DRM_PVR_RESERVED1 0x0D
++#define DRM_PVR_RESERVED2 0x0E
++#define DRM_PVR_RESERVED3 0x0F
++#define DRM_PVR_RESERVED4 0x10
++#define DRM_PVR_RESERVED5 0x11
++#define DRM_PVR_RESERVED6 0x12
++#endif
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/system/unified/sysconfig-medfield.c
+@@ -0,0 +1,1279 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if defined(LDM_PCI) || defined(SUPPORT_DRI_DRM)
++#include "linux/pci.h"
++#endif
++
++#include "sgxdefs.h"
++#include "services_headers.h"
++#include "kerneldisplay.h"
++#include "oemfuncs.h"
++#include "sgxinfo.h"
++#include "sgxinfokm.h"
++#include "pdump_km.h"
++#include "syslocal.h"
++#if defined(SUPPORT_DRI_DRM_EXT)
++#include "env_data.h"
++#include "psb_drv.h"
++#include "psb_powermgmt.h"
++#include "sys_pvr_drm_export.h"
++#include "msvdx_power.h"
++#include "topaz_power.h"
++#endif
++
++/* Graphics MSI address and data region in PCIx */
++#define MRST_PCIx_MSI_ADDR_LOC 0x94
++#define MRST_PCIx_MSI_DATA_LOC 0x98
++
++#define SYS_SGX_CLOCK_SPEED (400000000)
++#define SYS_SGX_HWRECOVERY_TIMEOUT_FREQ (100)
++#define SYS_SGX_PDS_TIMER_FREQ (1000)
++#define SYS_SGX_ACTIVE_POWER_LATENCY_MS (50)
++
++#if defined(SUPPORT_DRI_DRM_EXT)
++#define DRI_DRM_STATIC
++#else
++#define DRI_DRM_STATIC static
++#endif
++SYS_DATA* gpsSysData = (SYS_DATA*)IMG_NULL;
++SYS_DATA gsSysData;
++
++static SYS_SPECIFIC_DATA gsSysSpecificData;
++
++IMG_UINT32 gui32SGXDeviceID;
++extern IMG_UINT32 gui32MRSTDisplayDeviceID;
++IMG_UINT32 gui32MRSTMSVDXDeviceID;
++IMG_UINT32 gui32MRSTTOPAZDeviceID;
++
++extern void ospm_suspend_graphics(void);
++
++static SGX_DEVICE_MAP gsSGXDeviceMap;
++extern struct drm_device *gpDrmDevice;
++
++#if defined(SUPPORT_DRI_DRM_EXT)
++static PVRSRV_DEVICE_NODE *gpsSGXDevNode;
++#endif
++
++#if !defined(NO_HARDWARE)
++IMG_CPU_VIRTADDR gsPoulsboRegsCPUVaddr;
++
++IMG_CPU_VIRTADDR gsPoulsboDisplayRegsCPUVaddr;
++#endif
++
++#if defined(LDM_PCI) || defined(SUPPORT_DRI_DRM)
++extern struct pci_dev *gpsPVRLDMDev;
++#endif
++
++#define POULSBO_ADDR_RANGE_INDEX (MMADR_INDEX - 4)
++#define POULSBO_HP_ADDR_RANGE_INDEX (GMADR_INDEX - 4)
++static PVRSRV_ERROR PCIInitDev(SYS_DATA *psSysData)
++{
++ SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
++
++#ifdef LDM_PCI
++ psSysSpecData->hSGXPCI = OSPCISetDev((IMG_VOID *)psSysSpecData->psPCIDev, HOST_PCI_INIT_FLAG_BUS_MASTER | HOST_PCI_INIT_FLAG_MSI);
++#else
++ psSysSpecData->hSGXPCI = OSPCIAcquireDev(SYS_SGX_DEV_VENDOR_ID, gpDrmDevice->pci_device, HOST_PCI_INIT_FLAG_BUS_MASTER | HOST_PCI_INIT_FLAG_MSI);
++#endif
++ if (!psSysSpecData->hSGXPCI)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PCIInitDev: Failed to acquire PCI device"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ SYS_SPECIFIC_DATA_SET(psSysSpecData, SYS_SPECIFIC_DATA_PCI_ACQUIRE_DEV);
++
++ PVR_TRACE(("PCI memory region: %x to %x", OSPCIAddrRangeStart(psSysSpecData->hSGXPCI, POULSBO_ADDR_RANGE_INDEX), OSPCIAddrRangeEnd(psSysSpecData->hSGXPCI, POULSBO_ADDR_RANGE_INDEX)));
++ PVR_TRACE(("Host Port region: %x to %x", OSPCIAddrRangeStart(psSysSpecData->hSGXPCI, POULSBO_HP_ADDR_RANGE_INDEX), OSPCIAddrRangeEnd(psSysSpecData->hSGXPCI, POULSBO_HP_ADDR_RANGE_INDEX)));
++
++
++ if (OSPCIAddrRangeLen(psSysSpecData->hSGXPCI, POULSBO_ADDR_RANGE_INDEX) < (IS_MID(gpDrmDevice) ? POULSBO_MAX_OFFSET:PSB_POULSBO_MAX_OFFSET))
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PCIInitDev: Device memory region isn't big enough"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++
++ if (OSPCIRequestAddrRange(psSysSpecData->hSGXPCI, POULSBO_ADDR_RANGE_INDEX) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PCIInitDev: Device memory region not available"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ SYS_SPECIFIC_DATA_SET(psSysSpecData, SYS_SPECIFIC_DATA_PCI_REQUEST_SGX_ADDR_RANGE);
++
++
++ if (OSPCIRequestAddrRange(psSysSpecData->hSGXPCI, POULSBO_HP_ADDR_RANGE_INDEX) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PCIInitDev: Host Port region not available"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ SYS_SPECIFIC_DATA_SET(psSysSpecData, SYS_SPECIFIC_DATA_PCI_REQUEST_HOST_PORT_RANGE);
++
++ return PVRSRV_OK;
++}
++
++static IMG_VOID PCIDeInitDev(SYS_DATA *psSysData)
++{
++ SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
++
++ if (SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_PCI_REQUEST_SGX_ADDR_RANGE))
++ {
++ OSPCIReleaseAddrRange(psSysSpecData->hSGXPCI, POULSBO_ADDR_RANGE_INDEX);
++ }
++
++ if (SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_PCI_REQUEST_HOST_PORT_RANGE))
++ {
++ OSPCIReleaseAddrRange(psSysSpecData->hSGXPCI, POULSBO_HP_ADDR_RANGE_INDEX);
++ }
++
++ if (SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_PCI_ACQUIRE_DEV))
++ {
++ OSPCIReleaseDev(psSysSpecData->hSGXPCI);
++ }
++}
++static PVRSRV_ERROR SysLocateDevices(SYS_DATA *psSysData)
++{
++ IMG_UINT32 ui32BaseAddr = 0;
++ IMG_UINT32 ui32IRQ = 0;
++ IMG_UINT32 ui32HostPortAddr = 0;
++ SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
++
++ ui32BaseAddr = OSPCIAddrRangeStart(psSysSpecData->hSGXPCI, POULSBO_ADDR_RANGE_INDEX);
++ ui32HostPortAddr = OSPCIAddrRangeStart(psSysSpecData->hSGXPCI, POULSBO_HP_ADDR_RANGE_INDEX);
++ if (OSPCIIRQ(psSysSpecData->hSGXPCI, &ui32IRQ) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysLocateDevices: Couldn't get IRQ"));
++ return PVRSRV_ERROR_INVALID_DEVICE;
++ }
++
++ PVR_TRACE(("ui32BaseAddr: %p", ui32BaseAddr));
++ PVR_TRACE(("ui32HostPortAddr: %p", ui32HostPortAddr));
++ PVR_TRACE(("IRQ: %d", ui32IRQ));
++
++
++ gsSGXDeviceMap.ui32Flags = 0x0;
++ gsSGXDeviceMap.ui32IRQ = ui32IRQ;
++
++ if (IS_MID(gpDrmDevice))
++ gsSGXDeviceMap.sRegsSysPBase.uiAddr = ui32BaseAddr + SGX_REGS_OFFSET;
++ else
++ gsSGXDeviceMap.sRegsSysPBase.uiAddr = ui32BaseAddr + PSB_SGX_REGS_OFFSET;
++
++ gsSGXDeviceMap.sRegsCpuPBase = SysSysPAddrToCpuPAddr(gsSGXDeviceMap.sRegsSysPBase);
++ gsSGXDeviceMap.ui32RegsSize = SGX_REG_SIZE;
++
++#if defined(SGX_FEATURE_HOST_PORT)
++
++ gsSGXDeviceMap.ui32Flags = SGX_HOSTPORT_PRESENT;
++ gsSGXDeviceMap.sHPSysPBase.uiAddr = ui32HostPortAddr;
++ gsSGXDeviceMap.sHPCpuPBase = SysSysPAddrToCpuPAddr(gsSGXDeviceMap.sHPSysPBase);
++ if (IS_MID(gpDrmDevice))
++ gsSGXDeviceMap.ui32HPSize = SYS_SGX_HP_SIZE;
++ else
++ gsSGXDeviceMap.ui32HPSize = PSB_SYS_SGX_HP_SIZE;
++#endif
++
++#if defined(MRST_SLAVEPORT)
++
++ gsSGXDeviceMap.sSPSysPBase.uiAddr = ui32BaseAddr + MRST_SGX_SP_OFFSET;
++ gsSGXDeviceMap.sSPCpuPBase = SysSysPAddrToCpuPAddr(gsSGXDeviceMap.sSPSysPBase);
++ gsSGXDeviceMap.ui32SPSize = SGX_SP_SIZE;
++#endif
++
++
++
++
++ gsSGXDeviceMap.sLocalMemSysPBase.uiAddr = 0;
++ gsSGXDeviceMap.sLocalMemDevPBase.uiAddr = 0;
++ gsSGXDeviceMap.sLocalMemCpuPBase.uiAddr = 0;
++ gsSGXDeviceMap.ui32LocalMemSize = 0;
++
++
++ {
++ IMG_SYS_PHYADDR sPoulsboRegsCpuPBase;
++ sPoulsboRegsCpuPBase.uiAddr = ui32BaseAddr + POULSBO_REGS_OFFSET;
++ gsPoulsboRegsCPUVaddr = OSMapPhysToLin(SysSysPAddrToCpuPAddr(sPoulsboRegsCpuPBase),
++ POULSBO_REG_SIZE,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++
++ sPoulsboRegsCpuPBase.uiAddr = ui32BaseAddr + POULSBO_DISPLAY_REGS_OFFSET;
++ gsPoulsboDisplayRegsCPUVaddr = OSMapPhysToLin(SysSysPAddrToCpuPAddr(sPoulsboRegsCpuPBase),
++ POULSBO_DISPLAY_REG_SIZE,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++ }
++
++ return PVRSRV_OK;
++}
++
++
++#define VERSION_STR_MAX_LEN_TEMPLATE "SGX revision = 000.000.000"
++static PVRSRV_ERROR SysCreateVersionString(SYS_DATA *psSysData)
++{
++ IMG_UINT32 ui32MaxStrLen;
++ PVRSRV_ERROR eError;
++ IMG_INT32 i32Count;
++ IMG_CHAR *pszVersionString;
++ IMG_UINT32 ui32SGXRevision = 0;
++ IMG_VOID *pvSGXRegs;
++
++ pvSGXRegs = OSMapPhysToLin(gsSGXDeviceMap.sRegsCpuPBase,
++ gsSGXDeviceMap.ui32RegsSize,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++
++ if (pvSGXRegs != IMG_NULL)
++ {
++ ui32SGXRevision = OSReadHWReg(pvSGXRegs, EUR_CR_CORE_REVISION);
++ OSUnMapPhysToLin(pvSGXRegs,
++ gsSGXDeviceMap.ui32RegsSize,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysCreateVersionString: Couldn't map SGX registers"));
++ }
++
++ ui32MaxStrLen = OSStringLength(VERSION_STR_MAX_LEN_TEMPLATE);
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32MaxStrLen + 1,
++ (IMG_PVOID *)&pszVersionString,
++ IMG_NULL,
++ "Version String");
++ if(eError != PVRSRV_OK)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ i32Count = OSSNPrintf(pszVersionString, ui32MaxStrLen + 1,
++ "SGX revision = %u.%u.%u",
++ (IMG_UINT)((ui32SGXRevision & EUR_CR_CORE_REVISION_MAJOR_MASK)
++ >> EUR_CR_CORE_REVISION_MAJOR_SHIFT),
++ (IMG_UINT)((ui32SGXRevision & EUR_CR_CORE_REVISION_MINOR_MASK)
++ >> EUR_CR_CORE_REVISION_MINOR_SHIFT),
++ (IMG_UINT)((ui32SGXRevision & EUR_CR_CORE_REVISION_MAINTENANCE_MASK)
++ >> EUR_CR_CORE_REVISION_MAINTENANCE_SHIFT)
++ );
++ if(i32Count == -1)
++ {
++ ui32MaxStrLen = OSStringLength(VERSION_STR_MAX_LEN_TEMPLATE);
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32MaxStrLen + 1,
++ pszVersionString,
++ IMG_NULL);
++
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ psSysData->pszVersionString = pszVersionString;
++
++ return PVRSRV_OK;
++}
++
++static IMG_VOID SysFreeVersionString(SYS_DATA *psSysData)
++{
++ if(psSysData->pszVersionString)
++ {
++ IMG_UINT32 ui32MaxStrLen;
++ ui32MaxStrLen = OSStringLength(VERSION_STR_MAX_LEN_TEMPLATE);
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32MaxStrLen+1,
++ psSysData->pszVersionString,
++ IMG_NULL);
++ psSysData->pszVersionString = IMG_NULL;
++ }
++}
++
++PVRSRV_ERROR SysInitialise(IMG_VOID)
++{
++ IMG_UINT32 i = 0;
++ PVRSRV_ERROR eError;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ SGX_TIMING_INFORMATION* psTimingInfo;
++ struct drm_psb_private *dev_priv;
++ dev_priv = (struct drm_psb_private *) gpDrmDevice->dev_private;
++
++ gpsSysData = &gsSysData;
++ OSMemSet(gpsSysData, 0, sizeof(SYS_DATA));
++
++ gpsSysData->pvSysSpecificData = &gsSysSpecificData;
++ gsSysSpecificData.ui32SysSpecificData = 0;
++#if defined(LDM_PCI) || defined(SUPPORT_DRI_DRM)
++
++ PVR_ASSERT(gpsPVRLDMDev != IMG_NULL);
++ gsSysSpecificData.psPCIDev = gpsPVRLDMDev;
++#endif
++
++ eError = OSInitEnvData(&gpsSysData->pvEnvSpecificData);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to setup env structure"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++
++
++ psTimingInfo = &gsSGXDeviceMap.sTimingInfo;
++ psTimingInfo->ui32CoreClockSpeed = SYS_SGX_CLOCK_SPEED;
++ psTimingInfo->ui32HWRecoveryFreq = SYS_SGX_HWRECOVERY_TIMEOUT_FREQ;
++#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
++ psTimingInfo->bEnableActivePM = (drm_psb_ospm != 0);
++ /*printk(KERN_ERR "SGX APM is %s\n", (drm_psb_ospm != 0)? "enabled":"disabled"); */
++#else
++ psTimingInfo->bEnableActivePM = IMG_FALSE;
++#endif
++ psTimingInfo->ui32ActivePowManLatencyms = SYS_SGX_ACTIVE_POWER_LATENCY_MS;
++ psTimingInfo->ui32uKernelFreq = SYS_SGX_PDS_TIMER_FREQ;
++
++ eError = PCIInitDev(gpsSysData);
++ if (eError != PVRSRV_OK)
++ {
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++
++ gpsSysData->ui32NumDevices = SYS_DEVICE_COUNT;
++
++
++ for(i=0; i<SYS_DEVICE_COUNT; i++)
++ {
++ gpsSysData->sDeviceID[i].uiID = i;
++ gpsSysData->sDeviceID[i].bInUse = IMG_FALSE;
++ }
++
++ gpsSysData->psDeviceNodeList = IMG_NULL;
++ gpsSysData->psQueueList = IMG_NULL;
++
++ eError = SysInitialiseCommon(gpsSysData);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed in SysInitialiseCommon"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++
++
++
++
++
++ eError = SysLocateDevices(gpsSysData);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to locate devices"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++
++
++
++
++ eError = PVRSRVRegisterDevice(gpsSysData, SGXRegisterDevice,
++ DEVICE_SGX_INTERRUPT, &gui32SGXDeviceID);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to register device!"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++
++#if !defined(SUPPORT_DRI_DRM_EXT) || defined(MDFLD)
++ /* register MSVDX, with 0 interrupt bit, no interrupt will be served */
++ eError = PVRSRVRegisterDevice(gpsSysData, MSVDXRegisterDevice,
++ DEVICE_MSVDX_INTERRUPT, &gui32MRSTMSVDXDeviceID);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to register MSVDXdevice!"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++
++ if (IS_MID(gpDrmDevice) && !dev_priv->topaz_disabled)
++ {
++ /* register TOPAZ, with 0 interrupt bit, no interrupt will be served */
++ eError = PVRSRVRegisterDevice(gpsSysData, TOPAZRegisterDevice,
++ DEVICE_TOPAZ_INTERRUPT, &gui32MRSTTOPAZDeviceID);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to register TOPAZdevice!"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++ }
++#endif
++ psDeviceNode = gpsSysData->psDeviceNodeList;
++
++ while(psDeviceNode)
++ {
++
++ switch(psDeviceNode->sDevId.eDeviceType)
++ {
++ case PVRSRV_DEVICE_TYPE_SGX:
++ {
++ DEVICE_MEMORY_INFO *psDevMemoryInfo;
++ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++
++
++ psDeviceNode->psLocalDevMemArena = IMG_NULL;
++
++
++ psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
++ psDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap;
++
++
++ for(i=0; i<psDevMemoryInfo->ui32HeapCount; i++)
++ {
++ psDeviceMemoryHeap[i].ui32Attribs |= PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG;
++#ifdef OEM_CUSTOMISE
++
++#endif
++ }
++
++#if defined(SUPPORT_DRI_DRM_EXT)
++ gpsSGXDevNode = psDeviceNode;
++#endif
++ break;
++ }
++ case PVRSRV_DEVICE_TYPE_MSVDX:
++ /* nothing need to do here */
++ break;
++ case PVRSRV_DEVICE_TYPE_TOPAZ:
++ break;
++ default:
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to find SGX device node!"));
++ return PVRSRV_ERROR_INIT_FAILURE;
++ }
++ }
++
++
++ psDeviceNode = psDeviceNode->psNext;
++ }
++
++ PDUMPINIT();
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_PDUMP_INIT);
++
++
++ eError = PVRSRVInitialiseDevice (gui32SGXDeviceID);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to initialise device!"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_SGX_INITIALISED);
++
++#if defined(CONFIG_MRST) && !defined(SUPPORT_DRI_DRM_EXT)
++ /* Initialize MSVDX and TOPAZ,
++ * to be matched with the PVRSRVRegisterDevice() calling
++ * for MSVDX and TOPAZ
++ */
++ eError = PVRSRVInitialiseDevice (gui32MRSTMSVDXDeviceID);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to initialise device!"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++ if (IS_MID(gpDrmDevice) && !dev_priv->topaz_disabled)
++ {
++ eError = PVRSRVInitialiseDevice (gui32MRSTTOPAZDeviceID);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to initialise device!"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++ }
++#endif
++
++ return PVRSRV_OK;
++}
++
++#if !defined(SUPPORT_DRI_DRM_EXT)
++static IMG_VOID SysEnableInterrupts(SYS_DATA *psSysData)
++{
++#if !defined(NO_HARDWARE)
++ IMG_UINT32 ui32RegData;
++ IMG_UINT32 ui32Mask;
++
++ ui32Mask = POULSBO_THALIA_MASK;
++
++
++ ui32RegData = OSReadHWReg(gsPoulsboRegsCPUVaddr, POULSBO_INTERRUPT_IDENTITY_REG);
++ OSWriteHWReg(gsPoulsboRegsCPUVaddr, POULSBO_INTERRUPT_IDENTITY_REG, ui32RegData | ui32Mask);
++
++
++ ui32RegData = OSReadHWReg(gsPoulsboRegsCPUVaddr, POULSBO_INTERRUPT_MASK_REG);
++ OSWriteHWReg(gsPoulsboRegsCPUVaddr, POULSBO_INTERRUPT_MASK_REG, ui32RegData & (~ui32Mask));
++
++
++ ui32RegData = OSReadHWReg(gsPoulsboRegsCPUVaddr, POULSBO_INTERRUPT_ENABLE_REG);
++ OSWriteHWReg(gsPoulsboRegsCPUVaddr, POULSBO_INTERRUPT_ENABLE_REG, ui32RegData | ui32Mask);
++
++ PVR_DPF((PVR_DBG_MESSAGE, "SysEnableInterrupts: Interrupts enabled"));
++#endif
++ PVR_UNREFERENCED_PARAMETER(psSysData);
++}
++#endif
++
++#if !defined(SUPPORT_DRI_DRM_EXT)
++static IMG_VOID SysDisableInterrupts(SYS_DATA *psSysData)
++{
++#if !defined(NO_HARDWARE)
++ IMG_UINT32 ui32RegData;
++ IMG_UINT32 ui32Mask;
++ ui32Mask = POULSBO_THALIA_MASK;
++
++
++ ui32RegData = OSReadHWReg(gsPoulsboRegsCPUVaddr, POULSBO_INTERRUPT_ENABLE_REG);
++ OSWriteHWReg(gsPoulsboRegsCPUVaddr, POULSBO_INTERRUPT_ENABLE_REG, ui32RegData & (~ui32Mask));
++
++
++ ui32RegData = OSReadHWReg(gsPoulsboRegsCPUVaddr, POULSBO_INTERRUPT_MASK_REG);
++ OSWriteHWReg(gsPoulsboRegsCPUVaddr, POULSBO_INTERRUPT_MASK_REG, ui32RegData | ui32Mask);
++
++ PVR_TRACE(("SysDisableInterrupts: Interrupts disabled"));
++#endif
++ PVR_UNREFERENCED_PARAMETER(psSysData);
++}
++#endif
++
++PVRSRV_ERROR SysFinalise(IMG_VOID)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++#if defined(SYS_USING_INTERRUPTS)
++ eError = OSInstallMISR(gpsSysData);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysFinalise: OSInstallMISR failed"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_MISR_INSTALLED);
++
++#if defined(CONFIG_MDFLD) && !defined(SUPPORT_DRI_DRM_EXT)
++ eError = OSInstallSystemLISR(gpsSysData, gsSGXDeviceMap.ui32IRQ);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysFinalise: OSInstallSystemLISR failed"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_LISR_INSTALLED);
++#endif
++#endif
++
++#if defined(CONFIG_MDFLD) && !defined(SUPPORT_DRI_DRM_EXT)
++ SysEnableInterrupts(gpsSysData);
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_IRQ_ENABLED);
++#endif
++ eError = SysCreateVersionString(gpsSysData);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to create a system version string"));
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_WARNING, "SysFinalise: Version string: %s", gpsSysData->pszVersionString));
++ }
++
++ return eError;
++}
++
++PVRSRV_ERROR SysDeinitialise (SYS_DATA *psSysData)
++{
++ PVRSRV_ERROR eError;
++
++ SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
++
++#if !defined(SUPPORT_DRI_DRM_EXT)
++ if (SYS_SPECIFIC_DATA_TEST(&gsSysSpecificData, SYS_SPECIFIC_DATA_IRQ_ENABLED))
++ {
++ SysDisableInterrupts(psSysData);
++ }
++#endif
++
++#if defined(SYS_USING_INTERRUPTS)
++#if !defined(SUPPORT_DRI_DRM_EXT)
++ if (SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_LISR_INSTALLED))
++ {
++ eError = OSUninstallSystemLISR(psSysData);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: OSUninstallSystemLISR failed"));
++ return eError;
++ }
++ }
++#endif
++ if (SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_MISR_INSTALLED))
++ {
++ eError = OSUninstallMISR(psSysData);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: OSUninstallMISR failed"));
++ return eError;
++ }
++ }
++#endif
++
++ if (SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_SGX_INITIALISED))
++ {
++
++ eError = PVRSRVDeinitialiseDevice(gui32SGXDeviceID);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: failed to de-init the device"));
++ return eError;
++ }
++ }
++
++ SysFreeVersionString(psSysData);
++
++ PCIDeInitDev(psSysData);
++
++ eError = OSDeInitEnvData(psSysData->pvEnvSpecificData);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: failed to de-init env structure"));
++ return eError;
++ }
++
++ SysDeinitialiseCommon(gpsSysData);
++
++
++#if !defined(NO_HARDWARE)
++
++ OSUnMapPhysToLin(gsPoulsboRegsCPUVaddr,
++ POULSBO_REG_SIZE,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++
++ OSUnMapPhysToLin(gsPoulsboDisplayRegsCPUVaddr,
++ POULSBO_DISPLAY_REG_SIZE,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++#endif
++ if (SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_PDUMP_INIT))
++ {
++ PDUMPDEINIT();
++ }
++
++ gpsSysData = IMG_NULL;
++
++ return PVRSRV_OK;
++}
++
++
++IMG_UINT32 SysGetInterruptSource(SYS_DATA* psSysData,
++ PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++#if !defined(SUPPORT_DRI_DRM_EXT)
++ IMG_UINT32 ui32Devices = 0;
++ IMG_UINT32 ui32Data, ui32DIMMask;
++
++ PVR_UNREFERENCED_PARAMETER(psSysData);
++ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
++
++
++ ui32Data = OSReadHWReg(gsPoulsboRegsCPUVaddr, POULSBO_INTERRUPT_IDENTITY_REG);
++
++ if (ui32Data & POULSBO_THALIA_MASK)
++ {
++ ui32Devices |= DEVICE_SGX_INTERRUPT;
++ }
++
++ if (ui32Data & POULSBO_MSVDX_MASK)
++ {
++ ui32Devices |= DEVICE_MSVDX_INTERRUPT;
++ }
++
++
++ ui32DIMMask = OSReadHWReg(gsPoulsboRegsCPUVaddr, POULSBO_INTERRUPT_ENABLE_REG);
++ ui32DIMMask &= ~(POULSBO_THALIA_MASK | POULSBO_MSVDX_MASK);
++
++
++ if (ui32Data & ui32DIMMask)
++ {
++ ui32Devices |= DEVICE_DISP_INTERRUPT;
++ }
++
++ return (ui32Devices);
++#else
++ PVR_UNREFERENCED_PARAMETER(psSysData);
++ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
++
++ return 0;
++#endif
++}
++
++IMG_VOID SysClearInterrupts(SYS_DATA* psSysData, IMG_UINT32 ui32ClearBits)
++{
++#if !defined(SUPPORT_DRI_DRM_EXT)
++ IMG_UINT32 ui32Data;
++ IMG_UINT32 ui32Mask = 0;
++
++ PVR_UNREFERENCED_PARAMETER(psSysData);
++
++ ui32Data = OSReadHWReg(gsPoulsboRegsCPUVaddr, POULSBO_INTERRUPT_IDENTITY_REG);
++
++ if ((ui32ClearBits & DEVICE_SGX_INTERRUPT) &&
++ (ui32Data & POULSBO_THALIA_MASK))
++ {
++ ui32Mask |= POULSBO_THALIA_MASK;
++ }
++
++ if ((ui32ClearBits & DEVICE_MSVDX_INTERRUPT) &&
++ (ui32Data & POULSBO_MSVDX_MASK))
++ {
++ ui32Mask |= POULSBO_MSVDX_MASK;
++ }
++
++ if ((ui32ClearBits & DEVICE_DISP_INTERRUPT) &&
++ (ui32Data & POULSBO_VSYNC_PIPEA_VBLANK_MASK))
++ {
++ ui32Mask |= POULSBO_VSYNC_PIPEA_VBLANK_MASK;
++ }
++
++ if (ui32Mask)
++ {
++ OSWriteHWReg(gsPoulsboRegsCPUVaddr, POULSBO_INTERRUPT_IDENTITY_REG, ui32Mask);
++ }
++#else
++ PVR_UNREFERENCED_PARAMETER(psSysData);
++ PVR_UNREFERENCED_PARAMETER(ui32ClearBits);
++#endif
++}
++
++
++PVRSRV_ERROR SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_VOID **ppvDeviceMap)
++{
++ switch(eDeviceType)
++ {
++ case PVRSRV_DEVICE_TYPE_SGX:
++ {
++
++ *ppvDeviceMap = (IMG_VOID*)&gsSGXDeviceMap;
++ break;
++ }
++ default:
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysGetDeviceMemoryMap: unsupported device type"));
++ }
++ }
++ return PVRSRV_OK;
++}
++
++
++IMG_DEV_PHYADDR SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_CPU_PHYADDR CpuPAddr)
++{
++ IMG_DEV_PHYADDR DevPAddr;
++
++ PVR_UNREFERENCED_PARAMETER(eDeviceType);
++
++
++ DevPAddr.uiAddr = CpuPAddr.uiAddr;
++
++ return DevPAddr;
++}
++
++
++IMG_CPU_PHYADDR SysSysPAddrToCpuPAddr (IMG_SYS_PHYADDR sys_paddr)
++{
++ IMG_CPU_PHYADDR cpu_paddr;
++
++
++ cpu_paddr.uiAddr = sys_paddr.uiAddr;
++ return cpu_paddr;
++}
++
++IMG_SYS_PHYADDR SysCpuPAddrToSysPAddr (IMG_CPU_PHYADDR cpu_paddr)
++{
++ IMG_SYS_PHYADDR sys_paddr;
++
++
++ sys_paddr.uiAddr = cpu_paddr.uiAddr;
++ return sys_paddr;
++}
++
++
++IMG_DEV_PHYADDR SysSysPAddrToDevPAddr (PVRSRV_DEVICE_TYPE eDeviceType, IMG_SYS_PHYADDR SysPAddr)
++{
++ IMG_DEV_PHYADDR DevPAddr;
++
++ PVR_UNREFERENCED_PARAMETER(eDeviceType);
++
++
++ DevPAddr.uiAddr = SysPAddr.uiAddr;
++
++ return DevPAddr;
++}
++
++
++IMG_SYS_PHYADDR SysDevPAddrToSysPAddr (PVRSRV_DEVICE_TYPE eDeviceType, IMG_DEV_PHYADDR DevPAddr)
++{
++ IMG_SYS_PHYADDR SysPAddr;
++
++ PVR_UNREFERENCED_PARAMETER(eDeviceType);
++
++
++ SysPAddr.uiAddr = DevPAddr.uiAddr;
++
++ return SysPAddr;
++}
++
++
++IMG_VOID SysRegisterExternalDevice(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++
++ psDeviceNode->ui32SOCInterruptBit = DEVICE_DISP_INTERRUPT;
++}
++
++
++IMG_VOID SysRemoveExternalDevice(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
++}
++
++PVRSRV_ERROR SysOEMFunction ( IMG_UINT32 ui32ID,
++ IMG_VOID *pvIn,
++ IMG_UINT32 ulInSize,
++ IMG_VOID *pvOut,
++ IMG_UINT32 ulOutSize)
++{
++ if (ulInSize || pvIn);
++
++ if ((ui32ID == OEM_GET_EXT_FUNCS) &&
++ (ulOutSize == sizeof(PVRSRV_DC_OEM_JTABLE)))
++ {
++ PVRSRV_DC_OEM_JTABLE *psOEMJTable = (PVRSRV_DC_OEM_JTABLE*)pvOut;
++
++ psOEMJTable->pfnOEMReadRegistryString = IMG_NULL;
++ psOEMJTable->pfnOEMWriteRegistryString = IMG_NULL;
++
++ return PVRSRV_OK;
++ }
++
++ return PVRSRV_ERROR_INVALID_PARAMS;
++}
++
++
++static PVRSRV_ERROR SysMapInRegisters(IMG_VOID)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNodeList;
++
++ psDeviceNodeList = gpsSysData->psDeviceNodeList;
++
++ while (psDeviceNodeList)
++ {
++ switch(psDeviceNodeList->sDevId.eDeviceType)
++ {
++ case PVRSRV_DEVICE_TYPE_SGX:
++ {
++ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNodeList->pvDevice;
++
++ if (SYS_SPECIFIC_DATA_TEST(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_UNMAP_SGX_REGS))
++ {
++ psDevInfo->pvRegsBaseKM = OSMapPhysToLin(gsSGXDeviceMap.sRegsCpuPBase,
++ gsSGXDeviceMap.ui32RegsSize,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++
++ if (!psDevInfo->pvRegsBaseKM)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysMapInRegisters : Failed to map in SGX registers\n"));
++ return PVRSRV_ERROR_BAD_MAPPING;
++ }
++ SYS_SPECIFIC_DATA_CLEAR(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_UNMAP_SGX_REGS);
++ }
++ psDevInfo->ui32RegSize = gsSGXDeviceMap.ui32RegsSize;
++ psDevInfo->sRegsPhysBase = gsSGXDeviceMap.sRegsSysPBase;
++
++#if defined(SGX_FEATURE_HOST_PORT)
++ if (gsSGXDeviceMap.ui32Flags & SGX_HOSTPORT_PRESENT)
++ {
++ if (SYS_SPECIFIC_DATA_TEST(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_UNMAP_SGX_HP))
++ {
++
++ psDevInfo->pvHostPortBaseKM = OSMapPhysToLin(gsSGXDeviceMap.sHPCpuPBase,
++ gsSGXDeviceMap.ui32HPSize,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++ if (!psDevInfo->pvHostPortBaseKM)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysMapInRegisters : Failed to map in host port\n"));
++ return PVRSRV_ERROR_BAD_MAPPING;
++ }
++ SYS_SPECIFIC_DATA_CLEAR(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_UNMAP_SGX_HP);
++ }
++ psDevInfo->ui32HPSize = gsSGXDeviceMap.ui32HPSize;
++ psDevInfo->sHPSysPAddr = gsSGXDeviceMap.sHPSysPBase;
++ }
++#endif
++ break;
++ }
++ default:
++ break;
++ }
++ psDeviceNodeList = psDeviceNodeList->psNext;
++ }
++
++ return PVRSRV_OK;
++}
++
++
++static PVRSRV_ERROR SysUnmapRegisters(IMG_VOID)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNodeList;
++
++ psDeviceNodeList = gpsSysData->psDeviceNodeList;
++
++ while (psDeviceNodeList)
++ {
++ switch (psDeviceNodeList->sDevId.eDeviceType)
++ {
++ case PVRSRV_DEVICE_TYPE_SGX:
++ {
++ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNodeList->pvDevice;
++#if !(defined(NO_HARDWARE) && defined(__linux__))
++
++ if (psDevInfo->pvRegsBaseKM)
++ {
++ OSUnMapPhysToLin(psDevInfo->pvRegsBaseKM,
++ gsSGXDeviceMap.ui32RegsSize,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_UNMAP_SGX_REGS);
++ }
++#endif
++
++ psDevInfo->pvRegsBaseKM = IMG_NULL;
++ psDevInfo->ui32RegSize = 0;
++ psDevInfo->sRegsPhysBase.uiAddr = 0;
++
++#if defined(SGX_FEATURE_HOST_PORT)
++ if (gsSGXDeviceMap.ui32Flags & SGX_HOSTPORT_PRESENT)
++ {
++
++ if (psDevInfo->pvHostPortBaseKM)
++ {
++ OSUnMapPhysToLin(psDevInfo->pvHostPortBaseKM,
++ gsSGXDeviceMap.ui32HPSize,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_UNMAP_SGX_HP);
++
++ psDevInfo->pvHostPortBaseKM = IMG_NULL;
++ }
++
++ psDevInfo->ui32HPSize = 0;
++ psDevInfo->sHPSysPAddr.uiAddr = 0;
++ }
++#endif
++ break;
++ }
++ default:
++ break;
++ }
++ psDeviceNodeList = psDeviceNodeList->psNext;
++ }
++
++#if !(defined(NO_HARDWARE) && defined(__linux__))
++
++ OSUnMapPhysToLin(gsPoulsboRegsCPUVaddr,
++ POULSBO_REG_SIZE,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++
++
++ OSUnMapPhysToLin(gsPoulsboDisplayRegsCPUVaddr,
++ POULSBO_DISPLAY_REG_SIZE,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++
++#endif
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR SysSystemPrePowerState(PVRSRV_SYS_POWER_STATE eNewPowerState)
++{
++ PVRSRV_ERROR eError= PVRSRV_OK;
++ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)(gsSysSpecificData.hSGXPCI);
++
++ if (eNewPowerState != gpsSysData->eCurrentPowerState)
++ {
++ if ((eNewPowerState == PVRSRV_SYS_POWER_STATE_D3) &&
++ (gpsSysData->eCurrentPowerState < PVRSRV_SYS_POWER_STATE_D3))
++ {
++ drm_irq_uninstall(gpDrmDevice);
++
++ SysUnmapRegisters();
++
++ //Save some pci state that won't get saved properly by pci_save_state()
++ pci_read_config_dword(psPVRPCI->psPCIDev, 0x5C, &gsSysSpecificData.saveBSM);
++ pci_read_config_dword(psPVRPCI->psPCIDev, 0xFC, &gsSysSpecificData.saveVBT);
++ pci_read_config_dword(psPVRPCI->psPCIDev, MRST_PCIx_MSI_ADDR_LOC, &gsSysSpecificData.msi_addr);
++ pci_read_config_dword(psPVRPCI->psPCIDev, MRST_PCIx_MSI_DATA_LOC, &gsSysSpecificData.msi_data);
++
++ eError = OSPCISuspendDev(gsSysSpecificData.hSGXPCI);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysSystemPrePowerState: OSPCISuspendDev failed (%d)", eError));
++ }
++ }
++ }
++
++ return eError;
++}
++
++PVRSRV_ERROR SysSystemPostPowerState(PVRSRV_SYS_POWER_STATE eNewPowerState)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)(gsSysSpecificData.hSGXPCI);
++
++ if (eNewPowerState != gpsSysData->eCurrentPowerState)
++ {
++ if ((gpsSysData->eCurrentPowerState == PVRSRV_SYS_POWER_STATE_D3) &&
++ (eNewPowerState < PVRSRV_SYS_POWER_STATE_D3))
++ {
++ eError = OSPCIResumeDev(gsSysSpecificData.hSGXPCI);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysSystemPostPowerState: OSPCIResumeDev failed (%d)", eError));
++ return eError;
++ }
++
++ //Restore some pci state that will not have gotten restored properly by pci_restore_state()
++ pci_write_config_dword(psPVRPCI->psPCIDev, 0x5c, gsSysSpecificData.saveBSM);
++ pci_write_config_dword(psPVRPCI->psPCIDev, 0xFC, gsSysSpecificData.saveVBT);
++ pci_write_config_dword(psPVRPCI->psPCIDev, MRST_PCIx_MSI_ADDR_LOC, gsSysSpecificData.msi_addr);
++ pci_write_config_dword(psPVRPCI->psPCIDev, MRST_PCIx_MSI_DATA_LOC, gsSysSpecificData.msi_data);
++
++ eError = SysLocateDevices(gpsSysData);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysSystemPostPowerState: Failed to locate devices"));
++ return eError;
++ }
++
++ eError = SysMapInRegisters();
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysSystemPostPowerState: Failed to map in registers"));
++ return eError;
++ }
++
++ drm_irq_install(gpDrmDevice);
++ }
++ }
++ return eError;
++}
++
++
++PVRSRV_ERROR SysDevicePrePowerState(IMG_UINT32 ui32DeviceIndex,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ if ((eNewPowerState != eCurrentPowerState) &&
++ (eNewPowerState == PVRSRV_DEV_POWER_STATE_OFF))
++ {
++ if (ui32DeviceIndex == gui32SGXDeviceID)
++ {
++ PVR_DPF((PVR_DBG_MESSAGE,"SysDevicePrePowerState: Remove SGX power"));
++#if defined(SUPPORT_DRI_DRM_EXT)
++ ospm_power_using_hw_end(OSPM_GRAPHICS_ISLAND);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++#endif
++ }
++ else if (ui32DeviceIndex == gui32MRSTMSVDXDeviceID)
++ {
++ psb_irq_uninstall_islands(gpDrmDevice, OSPM_VIDEO_DEC_ISLAND);
++ if (ospm_power_is_hw_on(OSPM_DISPLAY_ISLAND)) {
++ ospm_power_island_down(OSPM_VIDEO_DEC_ISLAND);
++ } else {
++ ospm_power_island_up(OSPM_DISPLAY_ISLAND);
++ ospm_power_island_down(OSPM_VIDEO_DEC_ISLAND);
++ ospm_power_island_down(OSPM_DISPLAY_ISLAND);
++ }
++#if 0
++#if defined(SUPPORT_DRI_DRM_EXT)
++ ospm_power_using_hw_end(OSPM_VIDEO_DEC_ISLAND);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++#endif
++#endif
++ }
++ else if (ui32DeviceIndex == gui32MRSTTOPAZDeviceID)
++ {
++ if (ospm_power_is_hw_on(OSPM_DISPLAY_ISLAND)) {
++ ospm_power_island_down(OSPM_VIDEO_ENC_ISLAND);
++ } else {
++ ospm_power_island_up(OSPM_DISPLAY_ISLAND);
++ ospm_power_island_down(OSPM_VIDEO_ENC_ISLAND);
++ ospm_power_island_down(OSPM_DISPLAY_ISLAND);
++ }
++ }
++#if 0
++#if defined(SUPPORT_DRI_DRM_EXT)
++ ospm_power_using_hw_end(OSPM_VIDEO_ENC_ISLAND);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++#endif
++#endif
++ }
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR SysDevicePostPowerState(IMG_UINT32 ui32DeviceIndex,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ if ((eNewPowerState != eCurrentPowerState) &&
++ (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_OFF))
++ {
++ if (ui32DeviceIndex == gui32SGXDeviceID)
++ {
++ PVR_DPF((PVR_DBG_MESSAGE,"SysDevicePrePowerState: Restore SGX power"));
++#if defined(SUPPORT_DRI_DRM_EXT)
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, true))
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if (!ospm_power_using_hw_begin(OSPM_GRAPHICS_ISLAND, true))
++ {
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++
++ return PVRSRV_ERROR_GENERIC;
++ }
++#endif
++
++ }
++
++ else if (ui32DeviceIndex == gui32MRSTMSVDXDeviceID)
++ {
++/*hshamilt : I'm not sure how this is supposed to be handled. Need an OSPM engineer to verify*/
++ PVR_DPF((PVR_DBG_MESSAGE,"SysDevicePrePowerState: Restore MSVDX power"));
++ if (ospm_power_is_hw_on(OSPM_DISPLAY_ISLAND)) {
++ ospm_power_island_up(OSPM_VIDEO_DEC_ISLAND);
++ } else {
++ ospm_power_island_up(OSPM_DISPLAY_ISLAND);
++ ospm_power_island_up(OSPM_VIDEO_DEC_ISLAND);
++ ospm_power_island_down(OSPM_DISPLAY_ISLAND);
++ }
++// psb_irq_preinstall_islands(gpDrmDevice, OSPM_VIDEO_DEC_ISLAND);
++// psb_irq_postinstall_islands(gpDrmDevice, OSPM_VIDEO_DEC_ISLAND);
++
++#if 0
++#if defined(SUPPORT_DRI_DRM_EXT)
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, true))
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if (!ospm_power_using_hw_begin(OSPM_VIDEO_DEC_ISLAND, true))
++ {
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++
++ return PVRSRV_ERROR_GENERIC;
++ }
++#endif
++#endif
++ }
++ else if (ui32DeviceIndex == gui32MRSTTOPAZDeviceID)
++ {
++ PVR_DPF((PVR_DBG_MESSAGE,"SysDevicePrePowerState: Restore TOPAZ power"));
++ if (ospm_power_is_hw_on(OSPM_DISPLAY_ISLAND)) {
++ ospm_power_island_up(OSPM_VIDEO_ENC_ISLAND);
++ } else {
++ ospm_power_island_up(OSPM_DISPLAY_ISLAND);
++ ospm_power_island_up(OSPM_VIDEO_ENC_ISLAND);
++ ospm_power_island_down(OSPM_DISPLAY_ISLAND);
++ }
++/*hshamilt : I'm not sure how this is supposed to be handled. Need an OSPM engineer to verify*/
++
++
++#if 0
++#if defined(SUPPORT_DRI_DRM_EXT)
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, true))
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if (!ospm_power_using_hw_begin(OSPM_VIDEO_ENC_ISLAND, true))
++ {
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++
++ return PVRSRV_ERROR_GENERIC;
++ }
++#endif
++#endif
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
++#if defined(SUPPORT_DRI_DRM_EXT)
++int SYSPVRServiceSGXInterrupt(struct drm_device *dev)
++{
++ IMG_BOOL bStatus = IMG_FALSE;
++ PVR_UNREFERENCED_PARAMETER(dev);
++
++ if (gpsSGXDevNode != IMG_NULL)
++ {
++ bStatus = (*gpsSGXDevNode->pfnDeviceISR)(gpsSGXDevNode->pvISRData);
++ if (bStatus)
++ {
++ OSScheduleMISR((IMG_VOID *)gpsSGXDevNode->psSysData);
++ }
++ }
++
++ return bStatus ? 1 : 0;
++}
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/system/unified/sysconfig-moorestown.c
+@@ -0,0 +1,1219 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if defined(LDM_PCI) || defined(SUPPORT_DRI_DRM)
++#include "linux/pci.h"
++#endif
++
++#include "sgxdefs.h"
++#include "services_headers.h"
++#include "kerneldisplay.h"
++#include "oemfuncs.h"
++#include "sgxinfo.h"
++#include "sgxinfokm.h"
++#include "pdump_km.h"
++#include "syslocal.h"
++#if defined(SUPPORT_DRI_DRM_EXT)
++#include "env_data.h"
++#include "psb_drv.h"
++#include "psb_powermgmt.h"
++#include "sys_pvr_drm_export.h"
++#include "msvdx_power.h"
++#include "topaz_power.h"
++#endif
++
++/* Graphics MSI address and data region in PCIx */
++#define MRST_PCIx_MSI_ADDR_LOC 0x94
++#define MRST_PCIx_MSI_DATA_LOC 0x98
++
++#define SYS_SGX_CLOCK_SPEED (400000000)
++#define SYS_SGX_HWRECOVERY_TIMEOUT_FREQ (100)
++#define SYS_SGX_PDS_TIMER_FREQ (1000)
++#define SYS_SGX_ACTIVE_POWER_LATENCY_MS (50)
++
++#if defined(SUPPORT_DRI_DRM_EXT)
++#define DRI_DRM_STATIC
++#else
++#define DRI_DRM_STATIC static
++#endif
++SYS_DATA* gpsSysData = (SYS_DATA*)IMG_NULL;
++SYS_DATA gsSysData;
++
++static SYS_SPECIFIC_DATA gsSysSpecificData;
++
++IMG_UINT32 gui32SGXDeviceID;
++extern IMG_UINT32 gui32MRSTDisplayDeviceID;
++IMG_UINT32 gui32MRSTMSVDXDeviceID;
++IMG_UINT32 gui32MRSTTOPAZDeviceID;
++
++extern void ospm_suspend_graphics(void);
++
++static SGX_DEVICE_MAP gsSGXDeviceMap;
++extern struct drm_device *gpDrmDevice;
++
++#if defined(SUPPORT_DRI_DRM_EXT)
++static PVRSRV_DEVICE_NODE *gpsSGXDevNode;
++#endif
++
++#if !defined(NO_HARDWARE)
++IMG_CPU_VIRTADDR gsPoulsboRegsCPUVaddr;
++
++IMG_CPU_VIRTADDR gsPoulsboDisplayRegsCPUVaddr;
++#endif
++
++#if defined(LDM_PCI) || defined(SUPPORT_DRI_DRM)
++extern struct pci_dev *gpsPVRLDMDev;
++#endif
++
++#define POULSBO_ADDR_RANGE_INDEX (MMADR_INDEX - 4)
++#define POULSBO_HP_ADDR_RANGE_INDEX (GMADR_INDEX - 4)
++static PVRSRV_ERROR PCIInitDev(SYS_DATA *psSysData)
++{
++ SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
++
++#ifdef LDM_PCI
++ psSysSpecData->hSGXPCI = OSPCISetDev((IMG_VOID *)psSysSpecData->psPCIDev, HOST_PCI_INIT_FLAG_BUS_MASTER | HOST_PCI_INIT_FLAG_MSI);
++#else
++ psSysSpecData->hSGXPCI = OSPCIAcquireDev(SYS_SGX_DEV_VENDOR_ID, gpDrmDevice->pci_device, HOST_PCI_INIT_FLAG_BUS_MASTER | HOST_PCI_INIT_FLAG_MSI);
++#endif
++ if (!psSysSpecData->hSGXPCI)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PCIInitDev: Failed to acquire PCI device"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ SYS_SPECIFIC_DATA_SET(psSysSpecData, SYS_SPECIFIC_DATA_PCI_ACQUIRE_DEV);
++
++ PVR_TRACE(("PCI memory region: %x to %x", OSPCIAddrRangeStart(psSysSpecData->hSGXPCI, POULSBO_ADDR_RANGE_INDEX), OSPCIAddrRangeEnd(psSysSpecData->hSGXPCI, POULSBO_ADDR_RANGE_INDEX)));
++ PVR_TRACE(("Host Port region: %x to %x", OSPCIAddrRangeStart(psSysSpecData->hSGXPCI, POULSBO_HP_ADDR_RANGE_INDEX), OSPCIAddrRangeEnd(psSysSpecData->hSGXPCI, POULSBO_HP_ADDR_RANGE_INDEX)));
++
++
++ if (OSPCIAddrRangeLen(psSysSpecData->hSGXPCI, POULSBO_ADDR_RANGE_INDEX) < (IS_MID(gpDrmDevice) ? POULSBO_MAX_OFFSET:PSB_POULSBO_MAX_OFFSET))
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PCIInitDev: Device memory region isn't big enough"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++
++ if (OSPCIRequestAddrRange(psSysSpecData->hSGXPCI, POULSBO_ADDR_RANGE_INDEX) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PCIInitDev: Device memory region not available"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ SYS_SPECIFIC_DATA_SET(psSysSpecData, SYS_SPECIFIC_DATA_PCI_REQUEST_SGX_ADDR_RANGE);
++
++
++ if (OSPCIRequestAddrRange(psSysSpecData->hSGXPCI, POULSBO_HP_ADDR_RANGE_INDEX) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PCIInitDev: Host Port region not available"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ SYS_SPECIFIC_DATA_SET(psSysSpecData, SYS_SPECIFIC_DATA_PCI_REQUEST_HOST_PORT_RANGE);
++
++ return PVRSRV_OK;
++}
++
++static IMG_VOID PCIDeInitDev(SYS_DATA *psSysData)
++{
++ SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
++
++ if (SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_PCI_REQUEST_SGX_ADDR_RANGE))
++ {
++ OSPCIReleaseAddrRange(psSysSpecData->hSGXPCI, POULSBO_ADDR_RANGE_INDEX);
++ }
++
++ if (SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_PCI_REQUEST_HOST_PORT_RANGE))
++ {
++ OSPCIReleaseAddrRange(psSysSpecData->hSGXPCI, POULSBO_HP_ADDR_RANGE_INDEX);
++ }
++
++ if (SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_PCI_ACQUIRE_DEV))
++ {
++ OSPCIReleaseDev(psSysSpecData->hSGXPCI);
++ }
++}
++static PVRSRV_ERROR SysLocateDevices(SYS_DATA *psSysData)
++{
++ IMG_UINT32 ui32BaseAddr = 0;
++ IMG_UINT32 ui32IRQ = 0;
++ IMG_UINT32 ui32HostPortAddr = 0;
++ SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
++
++ ui32BaseAddr = OSPCIAddrRangeStart(psSysSpecData->hSGXPCI, POULSBO_ADDR_RANGE_INDEX);
++ ui32HostPortAddr = OSPCIAddrRangeStart(psSysSpecData->hSGXPCI, POULSBO_HP_ADDR_RANGE_INDEX);
++ if (OSPCIIRQ(psSysSpecData->hSGXPCI, &ui32IRQ) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysLocateDevices: Couldn't get IRQ"));
++ return PVRSRV_ERROR_INVALID_DEVICE;
++ }
++
++ PVR_TRACE(("ui32BaseAddr: %p", ui32BaseAddr));
++ PVR_TRACE(("ui32HostPortAddr: %p", ui32HostPortAddr));
++ PVR_TRACE(("IRQ: %d", ui32IRQ));
++
++
++ gsSGXDeviceMap.ui32Flags = 0x0;
++ gsSGXDeviceMap.ui32IRQ = ui32IRQ;
++
++ if (IS_MID(gpDrmDevice))
++ gsSGXDeviceMap.sRegsSysPBase.uiAddr = ui32BaseAddr + SGX_REGS_OFFSET;
++ else
++ gsSGXDeviceMap.sRegsSysPBase.uiAddr = ui32BaseAddr + PSB_SGX_REGS_OFFSET;
++
++ gsSGXDeviceMap.sRegsCpuPBase = SysSysPAddrToCpuPAddr(gsSGXDeviceMap.sRegsSysPBase);
++ gsSGXDeviceMap.ui32RegsSize = SGX_REG_SIZE;
++
++#if defined(SGX_FEATURE_HOST_PORT)
++
++ gsSGXDeviceMap.ui32Flags = SGX_HOSTPORT_PRESENT;
++ gsSGXDeviceMap.sHPSysPBase.uiAddr = ui32HostPortAddr;
++ gsSGXDeviceMap.sHPCpuPBase = SysSysPAddrToCpuPAddr(gsSGXDeviceMap.sHPSysPBase);
++ if (IS_MID(gpDrmDevice))
++ gsSGXDeviceMap.ui32HPSize = SYS_SGX_HP_SIZE;
++ else
++ gsSGXDeviceMap.ui32HPSize = PSB_SYS_SGX_HP_SIZE;
++#endif
++
++#if defined(MRST_SLAVEPORT)
++
++ gsSGXDeviceMap.sSPSysPBase.uiAddr = ui32BaseAddr + MRST_SGX_SP_OFFSET;
++ gsSGXDeviceMap.sSPCpuPBase = SysSysPAddrToCpuPAddr(gsSGXDeviceMap.sSPSysPBase);
++ gsSGXDeviceMap.ui32SPSize = SGX_SP_SIZE;
++#endif
++
++
++
++
++ gsSGXDeviceMap.sLocalMemSysPBase.uiAddr = 0;
++ gsSGXDeviceMap.sLocalMemDevPBase.uiAddr = 0;
++ gsSGXDeviceMap.sLocalMemCpuPBase.uiAddr = 0;
++ gsSGXDeviceMap.ui32LocalMemSize = 0;
++
++
++ {
++ IMG_SYS_PHYADDR sPoulsboRegsCpuPBase;
++ sPoulsboRegsCpuPBase.uiAddr = ui32BaseAddr + POULSBO_REGS_OFFSET;
++ gsPoulsboRegsCPUVaddr = OSMapPhysToLin(SysSysPAddrToCpuPAddr(sPoulsboRegsCpuPBase),
++ POULSBO_REG_SIZE,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++
++ sPoulsboRegsCpuPBase.uiAddr = ui32BaseAddr + POULSBO_DISPLAY_REGS_OFFSET;
++ gsPoulsboDisplayRegsCPUVaddr = OSMapPhysToLin(SysSysPAddrToCpuPAddr(sPoulsboRegsCpuPBase),
++ POULSBO_DISPLAY_REG_SIZE,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++ }
++
++ return PVRSRV_OK;
++}
++
++
++#define VERSION_STR_MAX_LEN_TEMPLATE "SGX revision = 000.000.000"
++static PVRSRV_ERROR SysCreateVersionString(SYS_DATA *psSysData)
++{
++ IMG_UINT32 ui32MaxStrLen;
++ PVRSRV_ERROR eError;
++ IMG_INT32 i32Count;
++ IMG_CHAR *pszVersionString;
++ IMG_UINT32 ui32SGXRevision = 0;
++ IMG_VOID *pvSGXRegs;
++
++ pvSGXRegs = OSMapPhysToLin(gsSGXDeviceMap.sRegsCpuPBase,
++ gsSGXDeviceMap.ui32RegsSize,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++
++ if (pvSGXRegs != IMG_NULL)
++ {
++ ui32SGXRevision = OSReadHWReg(pvSGXRegs, EUR_CR_CORE_REVISION);
++ OSUnMapPhysToLin(pvSGXRegs,
++ gsSGXDeviceMap.ui32RegsSize,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysCreateVersionString: Couldn't map SGX registers"));
++ }
++
++ ui32MaxStrLen = OSStringLength(VERSION_STR_MAX_LEN_TEMPLATE);
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32MaxStrLen + 1,
++ (IMG_PVOID *)&pszVersionString,
++ IMG_NULL,
++ "Version String");
++ if(eError != PVRSRV_OK)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ i32Count = OSSNPrintf(pszVersionString, ui32MaxStrLen + 1,
++ "SGX revision = %u.%u.%u",
++ (IMG_UINT)((ui32SGXRevision & EUR_CR_CORE_REVISION_MAJOR_MASK)
++ >> EUR_CR_CORE_REVISION_MAJOR_SHIFT),
++ (IMG_UINT)((ui32SGXRevision & EUR_CR_CORE_REVISION_MINOR_MASK)
++ >> EUR_CR_CORE_REVISION_MINOR_SHIFT),
++ (IMG_UINT)((ui32SGXRevision & EUR_CR_CORE_REVISION_MAINTENANCE_MASK)
++ >> EUR_CR_CORE_REVISION_MAINTENANCE_SHIFT)
++ );
++ if(i32Count == -1)
++ {
++ ui32MaxStrLen = OSStringLength(VERSION_STR_MAX_LEN_TEMPLATE);
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32MaxStrLen + 1,
++ pszVersionString,
++ IMG_NULL);
++
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ psSysData->pszVersionString = pszVersionString;
++
++ return PVRSRV_OK;
++}
++
++static IMG_VOID SysFreeVersionString(SYS_DATA *psSysData)
++{
++ if(psSysData->pszVersionString)
++ {
++ IMG_UINT32 ui32MaxStrLen;
++ ui32MaxStrLen = OSStringLength(VERSION_STR_MAX_LEN_TEMPLATE);
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32MaxStrLen+1,
++ psSysData->pszVersionString,
++ IMG_NULL);
++ psSysData->pszVersionString = IMG_NULL;
++ }
++}
++
++PVRSRV_ERROR SysInitialise(IMG_VOID)
++{
++ IMG_UINT32 i = 0;
++ PVRSRV_ERROR eError;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ SGX_TIMING_INFORMATION* psTimingInfo;
++ struct drm_psb_private *dev_priv;
++ dev_priv = (struct drm_psb_private *) gpDrmDevice->dev_private;
++
++ gpsSysData = &gsSysData;
++ OSMemSet(gpsSysData, 0, sizeof(SYS_DATA));
++
++ gpsSysData->pvSysSpecificData = &gsSysSpecificData;
++ gsSysSpecificData.ui32SysSpecificData = 0;
++#if defined(LDM_PCI) || defined(SUPPORT_DRI_DRM)
++
++ PVR_ASSERT(gpsPVRLDMDev != IMG_NULL);
++ gsSysSpecificData.psPCIDev = gpsPVRLDMDev;
++#endif
++
++ eError = OSInitEnvData(&gpsSysData->pvEnvSpecificData);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to setup env structure"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++
++
++ psTimingInfo = &gsSGXDeviceMap.sTimingInfo;
++ psTimingInfo->ui32CoreClockSpeed = SYS_SGX_CLOCK_SPEED;
++ psTimingInfo->ui32HWRecoveryFreq = SYS_SGX_HWRECOVERY_TIMEOUT_FREQ;
++#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
++ psTimingInfo->bEnableActivePM = (drm_psb_ospm != 0);
++ /*printk(KERN_ERR "SGX APM is %s\n", (drm_psb_ospm != 0)? "enabled":"disabled"); */
++#else
++ psTimingInfo->bEnableActivePM = IMG_FALSE;
++#endif
++ psTimingInfo->ui32ActivePowManLatencyms = SYS_SGX_ACTIVE_POWER_LATENCY_MS;
++ psTimingInfo->ui32uKernelFreq = SYS_SGX_PDS_TIMER_FREQ;
++
++ eError = PCIInitDev(gpsSysData);
++ if (eError != PVRSRV_OK)
++ {
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++
++ gpsSysData->ui32NumDevices = SYS_DEVICE_COUNT;
++
++
++ for(i=0; i<SYS_DEVICE_COUNT; i++)
++ {
++ gpsSysData->sDeviceID[i].uiID = i;
++ gpsSysData->sDeviceID[i].bInUse = IMG_FALSE;
++ }
++
++ gpsSysData->psDeviceNodeList = IMG_NULL;
++ gpsSysData->psQueueList = IMG_NULL;
++
++ eError = SysInitialiseCommon(gpsSysData);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed in SysInitialiseCommon"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++
++
++
++
++
++ eError = SysLocateDevices(gpsSysData);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to locate devices"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++
++
++
++
++ eError = PVRSRVRegisterDevice(gpsSysData, SGXRegisterDevice,
++ DEVICE_SGX_INTERRUPT, &gui32SGXDeviceID);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to register device!"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++
++#if !defined(SUPPORT_DRI_DRM_EXT) || defined(MDFLD)
++ /* register MSVDX, with 0 interrupt bit, no interrupt will be served */
++ eError = PVRSRVRegisterDevice(gpsSysData, MSVDXRegisterDevice,
++ DEVICE_MSVDX_INTERRUPT, &gui32MRSTMSVDXDeviceID);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to register MSVDXdevice!"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++
++ if (IS_MID(gpDrmDevice) && !dev_priv->topaz_disabled)
++ {
++ /* register TOPAZ, with 0 interrupt bit, no interrupt will be served */
++ eError = PVRSRVRegisterDevice(gpsSysData, TOPAZRegisterDevice,
++ DEVICE_TOPAZ_INTERRUPT, &gui32MRSTTOPAZDeviceID);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to register TOPAZdevice!"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++ }
++#endif
++ psDeviceNode = gpsSysData->psDeviceNodeList;
++
++ while(psDeviceNode)
++ {
++
++ switch(psDeviceNode->sDevId.eDeviceType)
++ {
++ case PVRSRV_DEVICE_TYPE_SGX:
++ {
++ DEVICE_MEMORY_INFO *psDevMemoryInfo;
++ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++
++
++ psDeviceNode->psLocalDevMemArena = IMG_NULL;
++
++
++ psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
++ psDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap;
++
++
++ for(i=0; i<psDevMemoryInfo->ui32HeapCount; i++)
++ {
++ psDeviceMemoryHeap[i].ui32Attribs |= PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG;
++#ifdef OEM_CUSTOMISE
++
++#endif
++ }
++
++#if defined(SUPPORT_DRI_DRM_EXT)
++ gpsSGXDevNode = psDeviceNode;
++#endif
++ break;
++ }
++ case PVRSRV_DEVICE_TYPE_MSVDX:
++ /* nothing need to do here */
++ break;
++ case PVRSRV_DEVICE_TYPE_TOPAZ:
++ break;
++ default:
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to find SGX device node!"));
++ return PVRSRV_ERROR_INIT_FAILURE;
++ }
++ }
++
++
++ psDeviceNode = psDeviceNode->psNext;
++ }
++
++ PDUMPINIT();
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_PDUMP_INIT);
++
++
++ eError = PVRSRVInitialiseDevice (gui32SGXDeviceID);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to initialise device!"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_SGX_INITIALISED);
++
++#if defined(CONFIG_MRST) && !defined(SUPPORT_DRI_DRM_EXT)
++ /* Initialize MSVDX and TOPAZ,
++ * to be matched with the PVRSRVRegisterDevice() calling
++ * for MSVDX and TOPAZ
++ */
++ eError = PVRSRVInitialiseDevice (gui32MRSTMSVDXDeviceID);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to initialise device!"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++ if (IS_MID(gpDrmDevice) && !dev_priv->topaz_disabled)
++ {
++ eError = PVRSRVInitialiseDevice (gui32MRSTTOPAZDeviceID);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to initialise device!"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++ }
++#endif
++
++ return PVRSRV_OK;
++}
++
++#if !defined(SUPPORT_DRI_DRM_EXT)
++static IMG_VOID SysEnableInterrupts(SYS_DATA *psSysData)
++{
++#if !defined(NO_HARDWARE)
++ IMG_UINT32 ui32RegData;
++ IMG_UINT32 ui32Mask;
++
++ ui32Mask = POULSBO_THALIA_MASK;
++
++
++ ui32RegData = OSReadHWReg(gsPoulsboRegsCPUVaddr, POULSBO_INTERRUPT_IDENTITY_REG);
++ OSWriteHWReg(gsPoulsboRegsCPUVaddr, POULSBO_INTERRUPT_IDENTITY_REG, ui32RegData | ui32Mask);
++
++
++ ui32RegData = OSReadHWReg(gsPoulsboRegsCPUVaddr, POULSBO_INTERRUPT_MASK_REG);
++ OSWriteHWReg(gsPoulsboRegsCPUVaddr, POULSBO_INTERRUPT_MASK_REG, ui32RegData & (~ui32Mask));
++
++
++ ui32RegData = OSReadHWReg(gsPoulsboRegsCPUVaddr, POULSBO_INTERRUPT_ENABLE_REG);
++ OSWriteHWReg(gsPoulsboRegsCPUVaddr, POULSBO_INTERRUPT_ENABLE_REG, ui32RegData | ui32Mask);
++
++ PVR_DPF((PVR_DBG_MESSAGE, "SysEnableInterrupts: Interrupts enabled"));
++#endif
++ PVR_UNREFERENCED_PARAMETER(psSysData);
++}
++#endif
++
++#if !defined(SUPPORT_DRI_DRM_EXT)
++static IMG_VOID SysDisableInterrupts(SYS_DATA *psSysData)
++{
++#if !defined(NO_HARDWARE)
++ IMG_UINT32 ui32RegData;
++ IMG_UINT32 ui32Mask;
++ ui32Mask = POULSBO_THALIA_MASK;
++
++
++ ui32RegData = OSReadHWReg(gsPoulsboRegsCPUVaddr, POULSBO_INTERRUPT_ENABLE_REG);
++ OSWriteHWReg(gsPoulsboRegsCPUVaddr, POULSBO_INTERRUPT_ENABLE_REG, ui32RegData & (~ui32Mask));
++
++
++ ui32RegData = OSReadHWReg(gsPoulsboRegsCPUVaddr, POULSBO_INTERRUPT_MASK_REG);
++ OSWriteHWReg(gsPoulsboRegsCPUVaddr, POULSBO_INTERRUPT_MASK_REG, ui32RegData | ui32Mask);
++
++ PVR_TRACE(("SysDisableInterrupts: Interrupts disabled"));
++#endif
++ PVR_UNREFERENCED_PARAMETER(psSysData);
++}
++#endif
++
++PVRSRV_ERROR SysFinalise(IMG_VOID)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++#if defined(SYS_USING_INTERRUPTS)
++ eError = OSInstallMISR(gpsSysData);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysFinalise: OSInstallMISR failed"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_MISR_INSTALLED);
++
++#if defined(CONFIG_MDFLD) && !defined(SUPPORT_DRI_DRM_EXT)
++ eError = OSInstallSystemLISR(gpsSysData, gsSGXDeviceMap.ui32IRQ);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysFinalise: OSInstallSystemLISR failed"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_LISR_INSTALLED);
++#endif
++#endif
++
++#if defined(CONFIG_MDFLD) && !defined(SUPPORT_DRI_DRM_EXT)
++ SysEnableInterrupts(gpsSysData);
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_IRQ_ENABLED);
++#endif
++ eError = SysCreateVersionString(gpsSysData);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to create a system version string"));
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_WARNING, "SysFinalise: Version string: %s", gpsSysData->pszVersionString));
++ }
++
++ return eError;
++}
++
++PVRSRV_ERROR SysDeinitialise (SYS_DATA *psSysData)
++{
++ PVRSRV_ERROR eError;
++
++ SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
++
++#if !defined(SUPPORT_DRI_DRM_EXT)
++ if (SYS_SPECIFIC_DATA_TEST(&gsSysSpecificData, SYS_SPECIFIC_DATA_IRQ_ENABLED))
++ {
++ SysDisableInterrupts(psSysData);
++ }
++#endif
++
++#if defined(SYS_USING_INTERRUPTS)
++#if !defined(SUPPORT_DRI_DRM_EXT)
++ if (SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_LISR_INSTALLED))
++ {
++ eError = OSUninstallSystemLISR(psSysData);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: OSUninstallSystemLISR failed"));
++ return eError;
++ }
++ }
++#endif
++ if (SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_MISR_INSTALLED))
++ {
++ eError = OSUninstallMISR(psSysData);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: OSUninstallMISR failed"));
++ return eError;
++ }
++ }
++#endif
++
++ if (SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_SGX_INITIALISED))
++ {
++
++ eError = PVRSRVDeinitialiseDevice(gui32SGXDeviceID);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: failed to de-init the device"));
++ return eError;
++ }
++ }
++
++ SysFreeVersionString(psSysData);
++
++ PCIDeInitDev(psSysData);
++
++ eError = OSDeInitEnvData(psSysData->pvEnvSpecificData);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: failed to de-init env structure"));
++ return eError;
++ }
++
++ SysDeinitialiseCommon(gpsSysData);
++
++
++#if !defined(NO_HARDWARE)
++
++ OSUnMapPhysToLin(gsPoulsboRegsCPUVaddr,
++ POULSBO_REG_SIZE,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++
++ OSUnMapPhysToLin(gsPoulsboDisplayRegsCPUVaddr,
++ POULSBO_DISPLAY_REG_SIZE,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++#endif
++ if (SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_PDUMP_INIT))
++ {
++ PDUMPDEINIT();
++ }
++
++ gpsSysData = IMG_NULL;
++
++ return PVRSRV_OK;
++}
++
++
++IMG_UINT32 SysGetInterruptSource(SYS_DATA* psSysData,
++ PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++#if !defined(SUPPORT_DRI_DRM_EXT)
++ IMG_UINT32 ui32Devices = 0;
++ IMG_UINT32 ui32Data, ui32DIMMask;
++
++ PVR_UNREFERENCED_PARAMETER(psSysData);
++ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
++
++
++ ui32Data = OSReadHWReg(gsPoulsboRegsCPUVaddr, POULSBO_INTERRUPT_IDENTITY_REG);
++
++ if (ui32Data & POULSBO_THALIA_MASK)
++ {
++ ui32Devices |= DEVICE_SGX_INTERRUPT;
++ }
++
++ if (ui32Data & POULSBO_MSVDX_MASK)
++ {
++ ui32Devices |= DEVICE_MSVDX_INTERRUPT;
++ }
++
++
++ ui32DIMMask = OSReadHWReg(gsPoulsboRegsCPUVaddr, POULSBO_INTERRUPT_ENABLE_REG);
++ ui32DIMMask &= ~(POULSBO_THALIA_MASK | POULSBO_MSVDX_MASK);
++
++
++ if (ui32Data & ui32DIMMask)
++ {
++ ui32Devices |= DEVICE_DISP_INTERRUPT;
++ }
++
++ return (ui32Devices);
++#else
++ PVR_UNREFERENCED_PARAMETER(psSysData);
++ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
++
++ return 0;
++#endif
++}
++
++IMG_VOID SysClearInterrupts(SYS_DATA* psSysData, IMG_UINT32 ui32ClearBits)
++{
++#if !defined(SUPPORT_DRI_DRM_EXT)
++ IMG_UINT32 ui32Data;
++ IMG_UINT32 ui32Mask = 0;
++
++ PVR_UNREFERENCED_PARAMETER(psSysData);
++
++ ui32Data = OSReadHWReg(gsPoulsboRegsCPUVaddr, POULSBO_INTERRUPT_IDENTITY_REG);
++
++ if ((ui32ClearBits & DEVICE_SGX_INTERRUPT) &&
++ (ui32Data & POULSBO_THALIA_MASK))
++ {
++ ui32Mask |= POULSBO_THALIA_MASK;
++ }
++
++ if ((ui32ClearBits & DEVICE_MSVDX_INTERRUPT) &&
++ (ui32Data & POULSBO_MSVDX_MASK))
++ {
++ ui32Mask |= POULSBO_MSVDX_MASK;
++ }
++
++ if ((ui32ClearBits & DEVICE_DISP_INTERRUPT) &&
++ (ui32Data & POULSBO_VSYNC_PIPEA_VBLANK_MASK))
++ {
++ ui32Mask |= POULSBO_VSYNC_PIPEA_VBLANK_MASK;
++ }
++
++ if (ui32Mask)
++ {
++ OSWriteHWReg(gsPoulsboRegsCPUVaddr, POULSBO_INTERRUPT_IDENTITY_REG, ui32Mask);
++ }
++#else
++ PVR_UNREFERENCED_PARAMETER(psSysData);
++ PVR_UNREFERENCED_PARAMETER(ui32ClearBits);
++#endif
++}
++
++
++PVRSRV_ERROR SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_VOID **ppvDeviceMap)
++{
++ switch(eDeviceType)
++ {
++ case PVRSRV_DEVICE_TYPE_SGX:
++ {
++
++ *ppvDeviceMap = (IMG_VOID*)&gsSGXDeviceMap;
++ break;
++ }
++ default:
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysGetDeviceMemoryMap: unsupported device type"));
++ }
++ }
++ return PVRSRV_OK;
++}
++
++
++IMG_DEV_PHYADDR SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_CPU_PHYADDR CpuPAddr)
++{
++ IMG_DEV_PHYADDR DevPAddr;
++
++ PVR_UNREFERENCED_PARAMETER(eDeviceType);
++
++
++ DevPAddr.uiAddr = CpuPAddr.uiAddr;
++
++ return DevPAddr;
++}
++
++
++IMG_CPU_PHYADDR SysSysPAddrToCpuPAddr (IMG_SYS_PHYADDR sys_paddr)
++{
++ IMG_CPU_PHYADDR cpu_paddr;
++
++
++ cpu_paddr.uiAddr = sys_paddr.uiAddr;
++ return cpu_paddr;
++}
++
++IMG_SYS_PHYADDR SysCpuPAddrToSysPAddr (IMG_CPU_PHYADDR cpu_paddr)
++{
++ IMG_SYS_PHYADDR sys_paddr;
++
++
++ sys_paddr.uiAddr = cpu_paddr.uiAddr;
++ return sys_paddr;
++}
++
++
++IMG_DEV_PHYADDR SysSysPAddrToDevPAddr (PVRSRV_DEVICE_TYPE eDeviceType, IMG_SYS_PHYADDR SysPAddr)
++{
++ IMG_DEV_PHYADDR DevPAddr;
++
++ PVR_UNREFERENCED_PARAMETER(eDeviceType);
++
++
++ DevPAddr.uiAddr = SysPAddr.uiAddr;
++
++ return DevPAddr;
++}
++
++
++IMG_SYS_PHYADDR SysDevPAddrToSysPAddr (PVRSRV_DEVICE_TYPE eDeviceType, IMG_DEV_PHYADDR DevPAddr)
++{
++ IMG_SYS_PHYADDR SysPAddr;
++
++ PVR_UNREFERENCED_PARAMETER(eDeviceType);
++
++
++ SysPAddr.uiAddr = DevPAddr.uiAddr;
++
++ return SysPAddr;
++}
++
++
++IMG_VOID SysRegisterExternalDevice(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++
++ psDeviceNode->ui32SOCInterruptBit = DEVICE_DISP_INTERRUPT;
++}
++
++
++IMG_VOID SysRemoveExternalDevice(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
++}
++
++PVRSRV_ERROR SysOEMFunction ( IMG_UINT32 ui32ID,
++ IMG_VOID *pvIn,
++ IMG_UINT32 ulInSize,
++ IMG_VOID *pvOut,
++ IMG_UINT32 ulOutSize)
++{
++ if (ulInSize || pvIn);
++
++ if ((ui32ID == OEM_GET_EXT_FUNCS) &&
++ (ulOutSize == sizeof(PVRSRV_DC_OEM_JTABLE)))
++ {
++ PVRSRV_DC_OEM_JTABLE *psOEMJTable = (PVRSRV_DC_OEM_JTABLE*)pvOut;
++
++ psOEMJTable->pfnOEMReadRegistryString = IMG_NULL;
++ psOEMJTable->pfnOEMWriteRegistryString = IMG_NULL;
++
++ return PVRSRV_OK;
++ }
++
++ return PVRSRV_ERROR_INVALID_PARAMS;
++}
++
++
++static PVRSRV_ERROR SysMapInRegisters(IMG_VOID)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNodeList;
++
++ psDeviceNodeList = gpsSysData->psDeviceNodeList;
++
++ while (psDeviceNodeList)
++ {
++ switch(psDeviceNodeList->sDevId.eDeviceType)
++ {
++ case PVRSRV_DEVICE_TYPE_SGX:
++ {
++ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNodeList->pvDevice;
++
++ if (SYS_SPECIFIC_DATA_TEST(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_UNMAP_SGX_REGS))
++ {
++ psDevInfo->pvRegsBaseKM = OSMapPhysToLin(gsSGXDeviceMap.sRegsCpuPBase,
++ gsSGXDeviceMap.ui32RegsSize,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++
++ if (!psDevInfo->pvRegsBaseKM)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysMapInRegisters : Failed to map in SGX registers\n"));
++ return PVRSRV_ERROR_BAD_MAPPING;
++ }
++ SYS_SPECIFIC_DATA_CLEAR(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_UNMAP_SGX_REGS);
++ }
++ psDevInfo->ui32RegSize = gsSGXDeviceMap.ui32RegsSize;
++ psDevInfo->sRegsPhysBase = gsSGXDeviceMap.sRegsSysPBase;
++
++#if defined(SGX_FEATURE_HOST_PORT)
++ if (gsSGXDeviceMap.ui32Flags & SGX_HOSTPORT_PRESENT)
++ {
++ if (SYS_SPECIFIC_DATA_TEST(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_UNMAP_SGX_HP))
++ {
++
++ psDevInfo->pvHostPortBaseKM = OSMapPhysToLin(gsSGXDeviceMap.sHPCpuPBase,
++ gsSGXDeviceMap.ui32HPSize,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++ if (!psDevInfo->pvHostPortBaseKM)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysMapInRegisters : Failed to map in host port\n"));
++ return PVRSRV_ERROR_BAD_MAPPING;
++ }
++ SYS_SPECIFIC_DATA_CLEAR(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_UNMAP_SGX_HP);
++ }
++ psDevInfo->ui32HPSize = gsSGXDeviceMap.ui32HPSize;
++ psDevInfo->sHPSysPAddr = gsSGXDeviceMap.sHPSysPBase;
++ }
++#endif
++ break;
++ }
++ default:
++ break;
++ }
++ psDeviceNodeList = psDeviceNodeList->psNext;
++ }
++
++ return PVRSRV_OK;
++}
++
++
++static PVRSRV_ERROR SysUnmapRegisters(IMG_VOID)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNodeList;
++
++ psDeviceNodeList = gpsSysData->psDeviceNodeList;
++
++ while (psDeviceNodeList)
++ {
++ switch (psDeviceNodeList->sDevId.eDeviceType)
++ {
++ case PVRSRV_DEVICE_TYPE_SGX:
++ {
++ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNodeList->pvDevice;
++#if !(defined(NO_HARDWARE) && defined(__linux__))
++
++ if (psDevInfo->pvRegsBaseKM)
++ {
++ OSUnMapPhysToLin(psDevInfo->pvRegsBaseKM,
++ gsSGXDeviceMap.ui32RegsSize,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_UNMAP_SGX_REGS);
++ }
++#endif
++
++ psDevInfo->pvRegsBaseKM = IMG_NULL;
++ psDevInfo->ui32RegSize = 0;
++ psDevInfo->sRegsPhysBase.uiAddr = 0;
++
++#if defined(SGX_FEATURE_HOST_PORT)
++ if (gsSGXDeviceMap.ui32Flags & SGX_HOSTPORT_PRESENT)
++ {
++
++ if (psDevInfo->pvHostPortBaseKM)
++ {
++ OSUnMapPhysToLin(psDevInfo->pvHostPortBaseKM,
++ gsSGXDeviceMap.ui32HPSize,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_UNMAP_SGX_HP);
++
++ psDevInfo->pvHostPortBaseKM = IMG_NULL;
++ }
++
++ psDevInfo->ui32HPSize = 0;
++ psDevInfo->sHPSysPAddr.uiAddr = 0;
++ }
++#endif
++ break;
++ }
++ default:
++ break;
++ }
++ psDeviceNodeList = psDeviceNodeList->psNext;
++ }
++
++#if !(defined(NO_HARDWARE) || defined(__linux__))
++
++ OSUnMapPhysToLin(gsPoulsboRegsCPUVaddr,
++ POULSBO_REG_SIZE,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++
++
++ OSUnMapPhysToLin(gsPoulsboDisplayRegsCPUVaddr,
++ POULSBO_DISPLAY_REG_SIZE,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++
++#endif
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR SysSystemPrePowerState(PVRSRV_SYS_POWER_STATE eNewPowerState)
++{
++ PVRSRV_ERROR eError= PVRSRV_OK;
++ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)(gsSysSpecificData.hSGXPCI);
++
++ if (eNewPowerState != gpsSysData->eCurrentPowerState)
++ {
++ if ((eNewPowerState == PVRSRV_SYS_POWER_STATE_D3) &&
++ (gpsSysData->eCurrentPowerState < PVRSRV_SYS_POWER_STATE_D3))
++ {
++ drm_irq_uninstall(gpDrmDevice);
++
++ SysUnmapRegisters();
++
++ //Save some pci state that won't get saved properly by pci_save_state()
++ pci_read_config_dword(psPVRPCI->psPCIDev, 0x5C, &gsSysSpecificData.saveBSM);
++ pci_read_config_dword(psPVRPCI->psPCIDev, 0xFC, &gsSysSpecificData.saveVBT);
++ pci_read_config_dword(psPVRPCI->psPCIDev, MRST_PCIx_MSI_ADDR_LOC, &gsSysSpecificData.msi_addr);
++ pci_read_config_dword(psPVRPCI->psPCIDev, MRST_PCIx_MSI_DATA_LOC, &gsSysSpecificData.msi_data);
++
++ eError = OSPCISuspendDev(gsSysSpecificData.hSGXPCI);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysSystemPrePowerState: OSPCISuspendDev failed (%d)", eError));
++ }
++ }
++ }
++
++ return eError;
++}
++
++PVRSRV_ERROR SysSystemPostPowerState(PVRSRV_SYS_POWER_STATE eNewPowerState)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)(gsSysSpecificData.hSGXPCI);
++
++ if (eNewPowerState != gpsSysData->eCurrentPowerState)
++ {
++ if ((gpsSysData->eCurrentPowerState == PVRSRV_SYS_POWER_STATE_D3) &&
++ (eNewPowerState < PVRSRV_SYS_POWER_STATE_D3))
++ {
++ eError = OSPCIResumeDev(gsSysSpecificData.hSGXPCI);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysSystemPostPowerState: OSPCIResumeDev failed (%d)", eError));
++ return eError;
++ }
++
++ //Restore some pci state that will not have gotten restored properly by pci_restore_state()
++ pci_write_config_dword(psPVRPCI->psPCIDev, 0x5c, gsSysSpecificData.saveBSM);
++ pci_write_config_dword(psPVRPCI->psPCIDev, 0xFC, gsSysSpecificData.saveVBT);
++ pci_write_config_dword(psPVRPCI->psPCIDev, MRST_PCIx_MSI_ADDR_LOC, gsSysSpecificData.msi_addr);
++ pci_write_config_dword(psPVRPCI->psPCIDev, MRST_PCIx_MSI_DATA_LOC, gsSysSpecificData.msi_data);
++
++ eError = SysLocateDevices(gpsSysData);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysSystemPostPowerState: Failed to locate devices"));
++ return eError;
++ }
++
++ eError = SysMapInRegisters();
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysSystemPostPowerState: Failed to map in registers"));
++ return eError;
++ }
++
++ drm_irq_install(gpDrmDevice);
++ }
++ }
++ return eError;
++}
++
++
++PVRSRV_ERROR SysDevicePrePowerState(IMG_UINT32 ui32DeviceIndex,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ if ((eNewPowerState != eCurrentPowerState) &&
++ (eNewPowerState == PVRSRV_DEV_POWER_STATE_OFF))
++ {
++ if (ui32DeviceIndex == gui32SGXDeviceID)
++ {
++ PVR_DPF((PVR_DBG_MESSAGE,"SysDevicePrePowerState: Remove SGX power"));
++ psb_irq_uninstall_islands(gpDrmDevice, OSPM_GRAPHICS_ISLAND);
++ ospm_power_island_down(OSPM_GRAPHICS_ISLAND);
++ }
++#if 0
++ else if (ui32DeviceIndex == gui32MRSTMSVDXDeviceID)
++ {
++#if defined(SUPPORT_DRI_DRM_EXT)
++ ospm_power_using_hw_end(OSPM_VIDEO_DEC_ISLAND);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++#endif
++ }
++ else if (ui32DeviceIndex == gui32MRSTTOPAZDeviceID)
++ {
++#if defined(SUPPORT_DRI_DRM_EXT)
++ ospm_power_using_hw_end(OSPM_VIDEO_ENC_ISLAND);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++#endif
++ }
++#endif
++ }
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR SysDevicePostPowerState(IMG_UINT32 ui32DeviceIndex,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ if ((eNewPowerState != eCurrentPowerState) &&
++ (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_OFF))
++ {
++ if (ui32DeviceIndex == gui32SGXDeviceID)
++ {
++ PVR_DPF((PVR_DBG_MESSAGE,"SysDevicePrePowerState: Restore SGX power"));
++ }
++#if 0
++ else if (ui32DeviceIndex == gui32MRSTMSVDXDeviceID)
++ {
++ PVR_DPF((PVR_DBG_MESSAGE,"SysDevicePrePowerState: Restore SGX power"));
++#if defined(SUPPORT_DRI_DRM_EXT)
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, true))
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ if (!ospm_power_using_hw_begin(OSPM_VIDEO_DEC_ISLAND, true))
++ {
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++
++ return PVRSRV_ERROR_GENERIC;
++ }
++#endif
++ }
++ else if (ui32DeviceIndex == gui32MRSTTOPAZDeviceID)
++ {
++ PVR_DPF((PVR_DBG_MESSAGE,"SysDevicePrePowerState: Restore SGX power"));
++#if defined(SUPPORT_DRI_DRM_EXT)
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, true))
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ if (!ospm_power_using_hw_begin(OSPM_VIDEO_ENC_ISLAND, true))
++ {
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++
++ return PVRSRV_ERROR_GENERIC;
++ }
++#endif
++ }
++#endif
++ }
++
++ return PVRSRV_OK;
++}
++
++#if defined(SUPPORT_DRI_DRM_EXT)
++int SYSPVRServiceSGXInterrupt(struct drm_device *dev)
++{
++ IMG_BOOL bStatus = IMG_FALSE;
++ PVR_UNREFERENCED_PARAMETER(dev);
++
++ if (gpsSGXDevNode != IMG_NULL)
++ {
++ bStatus = (*gpsSGXDevNode->pfnDeviceISR)(gpsSGXDevNode->pvISRData);
++ if (bStatus)
++ {
++ OSScheduleMISR((IMG_VOID *)gpsSGXDevNode->psSysData);
++ }
++ }
++
++ return bStatus ? 1 : 0;
++}
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/system/unified/sysconfig.h
+@@ -0,0 +1,151 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__SOCCONFIG_H__)
++#define __SOCCONFIG_H__
++#include "syscommon.h"
++
++#ifdef CONFIG_DRM_MDFLD
++#define VS_PRODUCT_NAME "SGX Medfield"
++#else
++#define VS_PRODUCT_NAME "SGX Moorestown"
++#endif
++
++#define SYS_NO_POWER_LOCK_TIMEOUT
++
++/*#define SGX_FEATURE_HOST_PORT */
++
++#define SYS_SGX_USSE_COUNT (2)
++
++#define POULSBO_REGS_OFFSET 0x00000
++#define POULSBO_REG_SIZE 0x2100
++
++#define SGX_REGS_OFFSET 0x80000
++#define PSB_SGX_REGS_OFFSET 0x40000
++#define SGX_REG_SIZE 0x4000
++#define MSVDX_REGS_OFFSET 0x50000
++
++#ifdef SUPPORT_MSVDX
++#define POULSBO_MAX_OFFSET (MSVDX_REGS_OFFSET + MSVDX_REG_SIZE)
++#else
++#define POULSBO_MAX_OFFSET (SGX_REGS_OFFSET + SGX_REG_SIZE)
++#define PSB_POULSBO_MAX_OFFSET (PSB_SGX_REGS_OFFSET + SGX_REG_SIZE)
++#endif
++
++#define SYS_SGX_DEV_VENDOR_ID 0x8086
++#define PSB_SYS_SGX_DEV_DEVICE_ID_1 0x8108
++#define PSB_SYS_SGX_DEV_DEVICE_ID_2 0x8109
++
++#define SYS_SGX_DEVICE_IDS \
++ {0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8108}, \
++ {0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8109}, \
++ {0x8086, 0x4100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x4101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x4102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x4103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x4104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x4105, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x4106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x4107, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x0130, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MDFLD_0130}, \
++ {0x8086, 0x0131, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MDFLD_0130}, \
++ {0x8086, 0x0132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MDFLD_0130}, \
++ {0x8086, 0x0133, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MDFLD_0130}, \
++ {0x8086, 0x0134, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MDFLD_0130}, \
++ {0x8086, 0x0135, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MDFLD_0130}, \
++ {0x8086, 0x0136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MDFLD_0130}, \
++ {0x8086, 0x0137, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MDFLD_0130}, \
++ {0, 0, 0}
++
++
++#define MMADR_INDEX 4
++#define IOPORT_INDEX 5
++#define GMADR_INDEX 6
++#define MMUADR_INDEX 7
++#define FBADR_INDEX 23
++#define FBSIZE_INDEX 24
++
++#define DISPLAY_SURFACE_SIZE (4 * 1024 * 1024)
++
++#define DEVICE_SGX_INTERRUPT (1<<0)
++#define DEVICE_MSVDX_INTERRUPT (1<<1)
++#define DEVICE_DISP_INTERRUPT (1<<2)
++#define DEVICE_TOPAZ_INTERRUPT (1<<3)
++
++#define POULSBO_DISP_MASK (1<<17)
++#define POULSBO_THALIA_MASK (1<<18)
++#define POULSBO_MSVDX_MASK (1<<19)
++#define POULSBO_VSYNC_PIPEA_VBLANK_MASK (1<<7)
++#define POULSBO_VSYNC_PIPEA_EVENT_MASK (1<<6)
++#define POULSBO_VSYNC_PIPEB_VBLANK_MASK (1<<5)
++#define POULSBO_VSYNC_PIPEB_EVENT_MASK (1<<4)
++
++#define POULSBO_DISPLAY_REGS_OFFSET 0x70000
++#define POULSBO_DISPLAY_REG_SIZE 0x2000
++
++#define POULSBO_DISPLAY_A_CONFIG 0x00008
++#define POULSBO_DISPLAY_A_STATUS_SELECT 0x00024
++#define POULSBO_DISPLAY_B_CONFIG 0x01008
++#define POULSBO_DISPLAY_B_STATUS_SELECT 0x01024
++
++#define POULSBO_DISPLAY_PIPE_ENABLE (1<<31)
++#define POULSBO_DISPLAY_VSYNC_STS_EN (1<<25)
++#define POULSBO_DISPLAY_VSYNC_STS (1<<9)
++
++#if defined(SGX_FEATURE_HOST_PORT)
++ #define SYS_SGX_HP_SIZE 0x8000000
++ #define PSB_SYS_SGX_HP_SIZE 0x4000000
++
++ #define SYS_SGX_HOSTPORT_BASE_DEVVADDR 0xD0000000
++ #if defined(FIX_HW_BRN_22997) && defined(FIX_HW_BRN_23030)
++
++
++
++ #define SYS_SGX_HOSTPORT_BRN23030_OFFSET 0x7C00000
++ #endif
++#endif
++
++
++typedef struct
++{
++ union
++ {
++#if !defined(VISTA)
++ IMG_UINT8 aui8PCISpace[256];
++ IMG_UINT16 aui16PCISpace[128];
++ IMG_UINT32 aui32PCISpace[64];
++#endif
++ struct
++ {
++ IMG_UINT16 ui16VenID;
++ IMG_UINT16 ui16DevID;
++ IMG_UINT16 ui16PCICmd;
++ IMG_UINT16 ui16PCIStatus;
++ }s;
++ }u;
++} PCICONFIG_SPACE, *PPCICONFIG_SPACE;
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/system/unified/sysinfo.h
+@@ -0,0 +1,43 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__SYSINFO_H__)
++#define __SYSINFO_H__
++
++#define MAX_HW_TIME_US (500000)
++#define WAIT_TRY_COUNT (10000)
++
++typedef enum _SYS_DEVICE_TYPE_
++{
++ SYS_DEVICE_SGX = 0,
++
++ SYS_DEVICE_FORCE_I16 = 0x7fff
++
++} SYS_DEVICE_TYPE;
++
++#define SYS_DEVICE_COUNT 4
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/system/unified/sysirq.h
+@@ -0,0 +1,49 @@
++/**************************************************************************
++ * Copyright (c) 2009, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * Benjamin Defnet <benjamin.r.defnet@intel.com>
++ * Rajesh Poornachandran <rajesh.poornachandran@intel.com>
++ *
++ **************************************************************************/
++
++#ifndef _SYSIRQ_H_
++#define _SYSIRQ_H_
++
++#include <drm/drmP.h>
++
++bool sysirq_init(struct drm_device *dev);
++void sysirq_uninit(struct drm_device *dev);
++
++void psb_irq_preinstall(struct drm_device *dev);
++int psb_irq_postinstall(struct drm_device *dev);
++void psb_irq_uninstall(struct drm_device *dev);
++irqreturn_t psb_irq_handler(DRM_IRQ_ARGS);
++
++void psb_irq_preinstall_islands(struct drm_device *dev, int hw_islands);
++int psb_irq_postinstall_islands(struct drm_device *dev, int hw_islands);
++void psb_irq_uninstall_islands(struct drm_device *dev, int hw_islands);
++
++int psb_irq_enable_dpst(struct drm_device *dev);
++int psb_irq_disable_dpst(struct drm_device *dev);
++void sysirq_turn_on_dpst(struct drm_device *dev);
++void sysirq_turn_off_dpst(struct drm_device *dev);
++int psb_enable_vblank(struct drm_device *dev, int pipe);
++void psb_disable_vblank(struct drm_device *dev, int pipe);
++u32 psb_get_vblank_counter(struct drm_device *dev, int pipe);
++
++#endif //_SYSIRQ_H_
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/system/unified/syslocal.h
+@@ -0,0 +1,82 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__SYSLOCAL_H__)
++#define __SYSLOCAL_H__
++
++#define SYS_SPECIFIC_DATA_PCI_ACQUIRE_DEV 0x00000001
++#define SYS_SPECIFIC_DATA_PCI_REQUEST_SGX_ADDR_RANGE 0x00000002
++#define SYS_SPECIFIC_DATA_PCI_REQUEST_HOST_PORT_RANGE 0x00000004
++#if defined(NO_HARDWARE)
++#define SYS_SPECIFIC_DATA_ALLOC_DUMMY_SGX_REGS 0x00000008
++#if defined(SUPPORT_MSVDX)
++#define SYS_SPECIFIC_DATA_ALLOC_DUMMY_MSVDX_REGS 0x00000020
++#endif
++#endif
++#define SYS_SPECIFIC_DATA_SGX_INITIALISED 0x00000040
++#if defined(SUPPORT_MSVDX)
++#define SYS_SPECIFIC_DATA_MSVDX_INITIALISED 0x00000080
++#endif
++#define SYS_SPECIFIC_DATA_MISR_INSTALLED 0x00000100
++#define SYS_SPECIFIC_DATA_LISR_INSTALLED 0x00000200
++#define SYS_SPECIFIC_DATA_PDUMP_INIT 0x00000400
++#define SYS_SPECIFIC_DATA_IRQ_ENABLED 0x00000800
++
++#define SYS_SPECIFIC_DATA_PM_UNMAP_SGX_REGS 0x00001000
++#define SYS_SPECIFIC_DATA_PM_UNMAP_SGX_HP 0x00004000
++#define SYS_SPECIFIC_DATA_PM_UNMAP_MSVDX_REGS 0x00008000
++#define SYS_SPECIFIC_DATA_PM_IRQ_DISABLE 0x00010000
++#define SYS_SPECIFIC_DATA_PM_UNINSTALL_LISR 0x00020000
++
++#define SYS_SPECIFIC_DATA_SET(psSysSpecData, flag) ((IMG_VOID)((psSysSpecData)->ui32SysSpecificData |= (flag)))
++
++#define SYS_SPECIFIC_DATA_CLEAR(psSysSpecData, flag) ((IMG_VOID)((psSysSpecData)->ui32SysSpecificData &= ~(flag)))
++
++#define SYS_SPECIFIC_DATA_TEST(psSysSpecData, flag) (((psSysSpecData)->ui32SysSpecificData & (flag)) != 0)
++
++
++typedef struct _SYS_SPECIFIC_DATA_TAG_
++{
++
++ IMG_UINT32 ui32SysSpecificData;
++#ifdef __linux__
++ PVRSRV_PCI_DEV_HANDLE hSGXPCI;
++#endif
++#if defined(LDM_PCI) || defined(SUPPORT_DRI_DRM_EXT)
++ struct pci_dev *psPCIDev;
++#endif
++ /* MSI reg save */
++ uint32_t msi_addr;
++ uint32_t msi_data;
++
++ uint32_t saveBSM;
++ uint32_t saveVBT;
++} SYS_SPECIFIC_DATA;
++
++
++#endif
++
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/services4/system/unified/sysutils.c
+@@ -0,0 +1,30 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "sysinfo.h"
++#include "syslocal.h"
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/tools/intern/debug/client/linuxsrv.h
+@@ -0,0 +1,48 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _LINUXSRV_H__
++#define _LINUXSRV_H__
++
++typedef struct tagIOCTL_PACKAGE
++{
++ IMG_UINT32 ui32Cmd;
++ IMG_UINT32 ui32Size;
++ IMG_VOID *pInBuffer;
++ IMG_UINT32 ui32InBufferSize;
++ IMG_VOID *pOutBuffer;
++ IMG_UINT32 ui32OutBufferSize;
++} IOCTL_PACKAGE;
++
++IMG_UINT32 DeviceIoControl(IMG_UINT32 hDevice,
++ IMG_UINT32 ui32ControlCode,
++ IMG_VOID *pInBuffer,
++ IMG_UINT32 ui32InBufferSize,
++ IMG_VOID *pOutBuffer,
++ IMG_UINT32 ui32OutBufferSize,
++ IMG_UINT32 *pui32BytesReturned);
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/tools/intern/debug/dbgdriv/common/dbgdriv.c
+@@ -0,0 +1,2075 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++
++#ifdef LINUX
++#include <linux/string.h>
++#endif
++
++#include "img_types.h"
++#include "pvr_debug.h"
++#include "dbgdrvif.h"
++#include "dbgdriv.h"
++#include "hotkey.h"
++#include "hostfunc.h"
++
++
++
++
++#define LAST_FRAME_BUF_SIZE 1024
++
++typedef struct _DBG_LASTFRAME_BUFFER_ {
++ PDBG_STREAM psStream;
++ IMG_UINT8 ui8Buffer[LAST_FRAME_BUF_SIZE];
++ IMG_UINT32 ui32BufLen;
++ struct _DBG_LASTFRAME_BUFFER_ *psNext;
++} *PDBG_LASTFRAME_BUFFER;
++
++
++static PDBG_STREAM g_psStreamList = 0;
++static PDBG_LASTFRAME_BUFFER g_psLFBufferList;
++
++static IMG_UINT32 g_ui32LOff = 0;
++static IMG_UINT32 g_ui32Line = 0;
++static IMG_UINT32 g_ui32MonoLines = 25;
++
++static IMG_BOOL g_bHotkeyMiddump = IMG_FALSE;
++static IMG_UINT32 g_ui32HotkeyMiddumpStart = 0xffffffff;
++static IMG_UINT32 g_ui32HotkeyMiddumpEnd = 0xffffffff;
++
++IMG_VOID * g_pvAPIMutex=IMG_NULL;
++
++extern IMG_UINT32 g_ui32HotKeyFrame;
++extern IMG_BOOL g_bHotKeyPressed;
++extern IMG_BOOL g_bHotKeyRegistered;
++
++IMG_BOOL gbDumpThisFrame = IMG_FALSE;
++
++
++IMG_UINT32 SpaceInStream(PDBG_STREAM psStream);
++IMG_BOOL ExpandStreamBuffer(PDBG_STREAM psStream, IMG_UINT32 ui32NewSize);
++PDBG_LASTFRAME_BUFFER FindLFBuf(PDBG_STREAM psStream);
++
++DBGKM_SERVICE_TABLE g_sDBGKMServices =
++{
++ sizeof (DBGKM_SERVICE_TABLE),
++ ExtDBGDrivCreateStream,
++ ExtDBGDrivDestroyStream,
++ ExtDBGDrivFindStream,
++ ExtDBGDrivWriteString,
++ ExtDBGDrivReadString,
++ ExtDBGDrivWrite,
++ ExtDBGDrivRead,
++ ExtDBGDrivSetCaptureMode,
++ ExtDBGDrivSetOutputMode,
++ ExtDBGDrivSetDebugLevel,
++ ExtDBGDrivSetFrame,
++ ExtDBGDrivGetFrame,
++ ExtDBGDrivOverrideMode,
++ ExtDBGDrivDefaultMode,
++ ExtDBGDrivWrite2,
++ ExtDBGDrivWriteStringCM,
++ ExtDBGDrivWriteCM,
++ ExtDBGDrivSetMarker,
++ ExtDBGDrivGetMarker,
++ ExtDBGDrivStartInitPhase,
++ ExtDBGDrivStopInitPhase,
++ ExtDBGDrivIsCaptureFrame,
++ ExtDBGDrivWriteLF,
++ ExtDBGDrivReadLF,
++ ExtDBGDrivGetStreamOffset,
++ ExtDBGDrivSetStreamOffset,
++ ExtDBGDrivIsLastCaptureFrame,
++ ExtDBGDrivWaitForEvent
++};
++
++
++
++
++
++IMG_VOID * IMG_CALLCONV ExtDBGDrivCreateStream(IMG_CHAR * pszName, IMG_UINT32 ui32CapMode, IMG_UINT32 ui32OutMode, IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size)
++{
++ IMG_VOID * pvRet;
++
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ pvRet=DBGDrivCreateStream(pszName, ui32CapMode, ui32OutMode, ui32Flags, ui32Size);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return pvRet;
++}
++
++void IMG_CALLCONV ExtDBGDrivDestroyStream(PDBG_STREAM psStream)
++{
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ DBGDrivDestroyStream(psStream);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return;
++}
++
++IMG_VOID * IMG_CALLCONV ExtDBGDrivFindStream(IMG_CHAR * pszName, IMG_BOOL bResetStream)
++{
++ IMG_VOID * pvRet;
++
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ pvRet=DBGDrivFindStream(pszName, bResetStream);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return pvRet;
++}
++
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivWriteString(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Level)
++{
++ IMG_UINT32 ui32Ret;
++
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ ui32Ret=DBGDrivWriteString(psStream, pszString, ui32Level);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return ui32Ret;
++}
++
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivReadString(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Limit)
++{
++ IMG_UINT32 ui32Ret;
++
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ ui32Ret=DBGDrivReadString(psStream, pszString, ui32Limit);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return ui32Ret;
++}
++
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivWrite(PDBG_STREAM psStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level)
++{
++ IMG_UINT32 ui32Ret;
++
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ ui32Ret=DBGDrivWrite(psStream, pui8InBuf, ui32InBuffSize, ui32Level);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return ui32Ret;
++}
++
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivRead(PDBG_STREAM psStream, IMG_BOOL bReadInitBuffer, IMG_UINT32 ui32OutBuffSize,IMG_UINT8 * pui8OutBuf)
++{
++ IMG_UINT32 ui32Ret;
++
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ ui32Ret=DBGDrivRead(psStream, bReadInitBuffer, ui32OutBuffSize, pui8OutBuf);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return ui32Ret;
++}
++
++void IMG_CALLCONV ExtDBGDrivSetCaptureMode(PDBG_STREAM psStream,IMG_UINT32 ui32Mode,IMG_UINT32 ui32Start,IMG_UINT32 ui32End,IMG_UINT32 ui32SampleRate)
++{
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ DBGDrivSetCaptureMode(psStream, ui32Mode, ui32Start, ui32End, ui32SampleRate);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return;
++}
++
++void IMG_CALLCONV ExtDBGDrivSetOutputMode(PDBG_STREAM psStream,IMG_UINT32 ui32OutMode)
++{
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ DBGDrivSetOutputMode(psStream, ui32OutMode);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return;
++}
++
++void IMG_CALLCONV ExtDBGDrivSetDebugLevel(PDBG_STREAM psStream,IMG_UINT32 ui32DebugLevel)
++{
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ DBGDrivSetDebugLevel(psStream, ui32DebugLevel);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return;
++}
++
++void IMG_CALLCONV ExtDBGDrivSetFrame(PDBG_STREAM psStream,IMG_UINT32 ui32Frame)
++{
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ DBGDrivSetFrame(psStream, ui32Frame);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return;
++}
++
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetFrame(PDBG_STREAM psStream)
++{
++ IMG_UINT32 ui32Ret;
++
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ ui32Ret=DBGDrivGetFrame(psStream);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return ui32Ret;
++}
++
++IMG_BOOL IMG_CALLCONV ExtDBGDrivIsLastCaptureFrame(PDBG_STREAM psStream)
++{
++ IMG_BOOL bRet;
++
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ bRet = DBGDrivIsLastCaptureFrame(psStream);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return bRet;
++}
++
++IMG_BOOL IMG_CALLCONV ExtDBGDrivIsCaptureFrame(PDBG_STREAM psStream, IMG_BOOL bCheckPreviousFrame)
++{
++ IMG_BOOL bRet;
++
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ bRet = DBGDrivIsCaptureFrame(psStream, bCheckPreviousFrame);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return bRet;
++}
++
++void IMG_CALLCONV ExtDBGDrivOverrideMode(PDBG_STREAM psStream,IMG_UINT32 ui32Mode)
++{
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ DBGDrivOverrideMode(psStream, ui32Mode);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return;
++}
++
++void IMG_CALLCONV ExtDBGDrivDefaultMode(PDBG_STREAM psStream)
++{
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ DBGDrivDefaultMode(psStream);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return;
++}
++
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivWrite2(PDBG_STREAM psStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level)
++{
++ IMG_UINT32 ui32Ret;
++
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ ui32Ret=DBGDrivWrite2(psStream, pui8InBuf, ui32InBuffSize, ui32Level);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return ui32Ret;
++}
++
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivWriteStringCM(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Level)
++{
++ IMG_UINT32 ui32Ret;
++
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ ui32Ret=DBGDrivWriteStringCM(psStream, pszString, ui32Level);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return ui32Ret;
++}
++
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivWriteCM(PDBG_STREAM psStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level)
++{
++ IMG_UINT32 ui32Ret;
++
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ ui32Ret=DBGDrivWriteCM(psStream, pui8InBuf, ui32InBuffSize, ui32Level);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return ui32Ret;
++}
++
++void IMG_CALLCONV ExtDBGDrivSetMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker)
++{
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ DBGDrivSetMarker(psStream, ui32Marker);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return;
++}
++
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetMarker(PDBG_STREAM psStream)
++{
++ IMG_UINT32 ui32Marker;
++
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ ui32Marker = DBGDrivGetMarker(psStream);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return ui32Marker;
++}
++
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivWriteLF(PDBG_STREAM psStream, IMG_UINT8 * pui8InBuf, IMG_UINT32 ui32InBuffSize, IMG_UINT32 ui32Level, IMG_UINT32 ui32Flags)
++{
++ IMG_UINT32 ui32Ret;
++
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ ui32Ret = DBGDrivWriteLF(psStream, pui8InBuf, ui32InBuffSize, ui32Level, ui32Flags);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return ui32Ret;
++}
++
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivReadLF(PDBG_STREAM psStream, IMG_UINT32 ui32OutBuffSize, IMG_UINT8 * pui8OutBuf)
++{
++ IMG_UINT32 ui32Ret;
++
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ ui32Ret = DBGDrivReadLF(psStream, ui32OutBuffSize, pui8OutBuf);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return ui32Ret;
++}
++
++
++IMG_VOID IMG_CALLCONV ExtDBGDrivStartInitPhase(PDBG_STREAM psStream)
++{
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ DBGDrivStartInitPhase(psStream);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return;
++}
++
++IMG_VOID IMG_CALLCONV ExtDBGDrivStopInitPhase(PDBG_STREAM psStream)
++{
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ DBGDrivStopInitPhase(psStream);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return;
++}
++
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetStreamOffset(PDBG_STREAM psStream)
++{
++ IMG_UINT32 ui32Ret;
++
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ ui32Ret = DBGDrivGetStreamOffset(psStream);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return ui32Ret;
++}
++
++IMG_VOID IMG_CALLCONV ExtDBGDrivSetStreamOffset(PDBG_STREAM psStream, IMG_UINT32 ui32StreamOffset)
++{
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ DBGDrivSetStreamOffset(psStream, ui32StreamOffset);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++}
++
++IMG_VOID IMG_CALLCONV ExtDBGDrivWaitForEvent(DBG_EVENT eEvent)
++{
++#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
++ DBGDrivWaitForEvent(eEvent);
++#else
++ PVR_UNREFERENCED_PARAMETER(eEvent);
++#endif
++}
++
++IMG_UINT32 AtoI(IMG_CHAR *szIn)
++{
++ IMG_INT iLen = 0;
++ IMG_UINT32 ui32Value = 0;
++ IMG_UINT32 ui32Digit=1;
++ IMG_UINT32 ui32Base=10;
++ IMG_INT iPos;
++ IMG_CHAR bc;
++
++
++ while (szIn[iLen] > 0)
++ {
++ iLen ++;
++ }
++
++
++ if (iLen == 0)
++ {
++ return (0);
++ }
++
++
++ iPos=0;
++ while (szIn[iPos] == '0')
++ {
++ iPos++;
++ }
++ if (szIn[iPos] == '\0')
++ {
++ return 0;
++ }
++ if (szIn[iPos] == 'x' || szIn[iPos] == 'X')
++ {
++ ui32Base=16;
++ szIn[iPos]='0';
++ }
++
++
++ for (iPos = iLen - 1; iPos >= 0; iPos --)
++ {
++ bc = szIn[iPos];
++
++ if ( (bc >= 'a') && (bc <= 'f') && ui32Base == 16)
++ {
++ bc -= 'a' - 0xa;
++ }
++ else
++ if ( (bc >= 'A') && (bc <= 'F') && ui32Base == 16)
++ {
++ bc -= 'A' - 0xa;
++ }
++ else
++ if ((bc >= '0') && (bc <= '9'))
++ {
++ bc -= '0';
++ }
++ else
++ return (0);
++
++ ui32Value += (IMG_UINT32)bc * ui32Digit;
++
++ ui32Digit = ui32Digit * ui32Base;
++ }
++ return (ui32Value);
++}
++
++
++IMG_BOOL StreamValid(PDBG_STREAM psStream)
++{
++ PDBG_STREAM psThis;
++
++ psThis = g_psStreamList;
++
++ while (psThis)
++ {
++ if (psStream && (psThis == psStream))
++ {
++ return(IMG_TRUE);
++ }
++ else
++ {
++ psThis = psThis->psNext;
++ }
++ }
++
++ return(IMG_FALSE);
++}
++
++
++void Write(PDBG_STREAM psStream,IMG_UINT8 * pui8Data,IMG_UINT32 ui32InBuffSize)
++{
++
++
++ if ((psStream->ui32WPtr + ui32InBuffSize) > psStream->ui32Size)
++ {
++ IMG_UINT32 ui32B1 = psStream->ui32Size - psStream->ui32WPtr;
++ IMG_UINT32 ui32B2 = ui32InBuffSize - ui32B1;
++
++
++ HostMemCopy((IMG_VOID *)(psStream->ui32Base + psStream->ui32WPtr),
++ (IMG_VOID *) pui8Data,
++ ui32B1);
++
++
++ HostMemCopy((IMG_VOID *)psStream->ui32Base,
++ (IMG_VOID *)((IMG_UINT32) pui8Data + ui32B1),
++ ui32B2);
++
++
++ psStream->ui32WPtr = ui32B2;
++ }
++ else
++ {
++ HostMemCopy((IMG_VOID *)(psStream->ui32Base + psStream->ui32WPtr),
++ (IMG_VOID *) pui8Data,
++ ui32InBuffSize);
++
++ psStream->ui32WPtr += ui32InBuffSize;
++
++ if (psStream->ui32WPtr == psStream->ui32Size)
++ {
++ psStream->ui32WPtr = 0;
++ }
++ }
++ psStream->ui32DataWritten += ui32InBuffSize;
++}
++
++
++void MonoOut(IMG_CHAR * pszString,IMG_BOOL bNewLine)
++{
++ IMG_UINT32 i;
++ IMG_CHAR * pScreen;
++
++ pScreen = (IMG_CHAR *) DBGDRIV_MONOBASE;
++
++ pScreen += g_ui32Line * 160;
++
++
++
++ i=0;
++ do
++ {
++ pScreen[g_ui32LOff + (i*2)] = pszString[i];
++ pScreen[g_ui32LOff + (i*2)+1] = 127;
++ i++;
++ }
++ while ((pszString[i] != 0) && (i < 4096));
++
++ g_ui32LOff += i * 2;
++
++ if (bNewLine)
++ {
++ g_ui32LOff = 0;
++ g_ui32Line++;
++ }
++
++
++
++ if (g_ui32Line == g_ui32MonoLines)
++ {
++ g_ui32Line = g_ui32MonoLines - 1;
++
++ HostMemCopy((IMG_VOID *)DBGDRIV_MONOBASE,(IMG_VOID *)(DBGDRIV_MONOBASE + 160),160 * (g_ui32MonoLines - 1));
++
++ HostMemSet((IMG_VOID *)(DBGDRIV_MONOBASE + (160 * (g_ui32MonoLines - 1))),0,160);
++ }
++}
++
++
++
++void AppendName(IMG_CHAR * pszOut,IMG_CHAR * pszBase,IMG_CHAR * pszName)
++{
++ IMG_UINT32 i;
++ IMG_UINT32 ui32Off;
++
++ i = 0;
++
++ while (pszBase[i] != 0)
++ {
++ pszOut[i] = pszBase[i];
++ i++;
++ }
++
++ ui32Off = i;
++ i = 0;
++
++ while (pszName[i] != 0)
++ {
++ pszOut[ui32Off+i] = pszName[i];
++ i++;
++ }
++
++ pszOut[ui32Off+i] = pszName[i];
++}
++
++
++IMG_VOID * IMG_CALLCONV DBGDrivCreateStream(IMG_CHAR * pszName,
++ IMG_UINT32 ui32CapMode,
++ IMG_UINT32 ui32OutMode,
++ IMG_UINT32 ui32Flags,
++ IMG_UINT32 ui32Size)
++{
++ PDBG_STREAM psStream;
++ PDBG_STREAM psInitStream;
++ PDBG_LASTFRAME_BUFFER psLFBuffer;
++ IMG_UINT32 ui32Off;
++ IMG_VOID * pvBase;
++
++
++
++
++ psStream = (PDBG_STREAM) DBGDrivFindStream(pszName, IMG_FALSE);
++
++ if (psStream)
++ {
++ return ((IMG_VOID *) psStream);
++ }
++
++
++
++ psStream = HostNonPageablePageAlloc(1);
++ psInitStream = HostNonPageablePageAlloc(1);
++ psLFBuffer = HostNonPageablePageAlloc(1);
++ if (
++ (!psStream) ||
++ (!psInitStream) ||
++ (!psLFBuffer)
++ )
++ {
++ PVR_DPF((PVR_DBG_ERROR,"DBGDriv: Couldn't alloc control structs\n\r"));
++ return((IMG_VOID *) 0);
++ }
++
++
++ if ((ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0)
++ {
++ pvBase = HostNonPageablePageAlloc(ui32Size);
++ }
++ else
++ {
++ pvBase = HostPageablePageAlloc(ui32Size);
++ }
++
++ if (!pvBase)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"DBGDriv: Couldn't alloc Stream buffer\n\r"));
++ HostNonPageablePageFree(psStream);
++ return((IMG_VOID *) 0);
++ }
++
++
++
++ psStream->psNext = 0;
++ psStream->ui32Flags = ui32Flags;
++ psStream->ui32Base = (IMG_UINT32)pvBase;
++ psStream->ui32Size = ui32Size * 4096UL;
++ psStream->ui32RPtr = 0;
++ psStream->ui32WPtr = 0;
++ psStream->ui32DataWritten = 0;
++ psStream->ui32CapMode = ui32CapMode;
++ psStream->ui32OutMode = ui32OutMode;
++ psStream->ui32DebugLevel = DEBUG_LEVEL_0;
++ psStream->ui32DefaultMode = ui32CapMode;
++ psStream->ui32Start = 0;
++ psStream->ui32End = 0;
++ psStream->ui32Current = 0;
++ psStream->ui32SampleRate = 1;
++ psStream->ui32Access = 0;
++ psStream->ui32Timeout = 0;
++ psStream->ui32Marker = 0;
++ psStream->bInitPhaseComplete = IMG_FALSE;
++
++
++ if ((ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0)
++ {
++ pvBase = HostNonPageablePageAlloc(ui32Size);
++ }
++ else
++ {
++ pvBase = HostPageablePageAlloc(ui32Size);
++ }
++
++ if (!pvBase)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"DBGDriv: Couldn't alloc InitStream buffer\n\r"));
++
++ if ((psStream->ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0)
++ {
++ HostNonPageablePageFree((IMG_VOID *)psStream->ui32Base);
++ }
++ else
++ {
++ HostPageablePageFree((IMG_VOID *)psStream->ui32Base);
++ }
++ HostNonPageablePageFree(psStream);
++ return((IMG_VOID *) 0);
++ }
++
++ psInitStream->psNext = 0;
++ psInitStream->ui32Flags = ui32Flags;
++ psInitStream->ui32Base = (IMG_UINT32)pvBase;
++ psInitStream->ui32Size = ui32Size * 4096UL;
++ psInitStream->ui32RPtr = 0;
++ psInitStream->ui32WPtr = 0;
++ psInitStream->ui32DataWritten = 0;
++ psInitStream->ui32CapMode = ui32CapMode;
++ psInitStream->ui32OutMode = ui32OutMode;
++ psInitStream->ui32DebugLevel = DEBUG_LEVEL_0;
++ psInitStream->ui32DefaultMode = ui32CapMode;
++ psInitStream->ui32Start = 0;
++ psInitStream->ui32End = 0;
++ psInitStream->ui32Current = 0;
++ psInitStream->ui32SampleRate = 1;
++ psInitStream->ui32Access = 0;
++ psInitStream->ui32Timeout = 0;
++ psInitStream->ui32Marker = 0;
++ psInitStream->bInitPhaseComplete = IMG_FALSE;
++
++ psStream->psInitStream = psInitStream;
++
++
++ psLFBuffer->psStream = psStream;
++ psLFBuffer->ui32BufLen = 0UL;
++
++ g_bHotkeyMiddump = IMG_FALSE;
++ g_ui32HotkeyMiddumpStart = 0xffffffffUL;
++ g_ui32HotkeyMiddumpEnd = 0xffffffffUL;
++
++
++
++ ui32Off = 0;
++
++ do
++ {
++ psStream->szName[ui32Off] = pszName[ui32Off];
++
++ ui32Off++;
++ }
++ while ((pszName[ui32Off] != 0) && (ui32Off < (4096UL - sizeof(DBG_STREAM))));
++
++ psStream->szName[ui32Off] = pszName[ui32Off];
++
++
++
++ psStream->psNext = g_psStreamList;
++ g_psStreamList = psStream;
++
++ psLFBuffer->psNext = g_psLFBufferList;
++ g_psLFBufferList = psLFBuffer;
++
++
++ return((IMG_VOID *) psStream);
++}
++
++void IMG_CALLCONV DBGDrivDestroyStream(PDBG_STREAM psStream)
++{
++ PDBG_STREAM psStreamThis;
++ PDBG_STREAM psStreamPrev;
++ PDBG_LASTFRAME_BUFFER psLFBuffer;
++ PDBG_LASTFRAME_BUFFER psLFThis;
++ PDBG_LASTFRAME_BUFFER psLFPrev;
++
++ PVR_DPF((PVR_DBG_MESSAGE, "DBGDriv: Destroying stream %s\r\n", psStream->szName ));
++
++
++
++ if (!StreamValid(psStream))
++ {
++ return;
++ }
++
++ psLFBuffer = FindLFBuf(psStream);
++
++
++
++ psStreamThis = g_psStreamList;
++ psStreamPrev = 0;
++
++ while (psStreamThis)
++ {
++ if (psStreamThis == psStream)
++ {
++ if (psStreamPrev)
++ {
++ psStreamPrev->psNext = psStreamThis->psNext;
++ }
++ else
++ {
++ g_psStreamList = psStreamThis->psNext;
++ }
++
++ psStreamThis = 0;
++ }
++ else
++ {
++ psStreamPrev = psStreamThis;
++ psStreamThis = psStreamThis->psNext;
++ }
++ }
++
++ psLFThis = g_psLFBufferList;
++ psLFPrev = 0;
++
++ while (psLFThis)
++ {
++ if (psLFThis == psLFBuffer)
++ {
++ if (psLFPrev)
++ {
++ psLFPrev->psNext = psLFThis->psNext;
++ }
++ else
++ {
++ g_psLFBufferList = psLFThis->psNext;
++ }
++
++ psLFThis = 0;
++ }
++ else
++ {
++ psLFPrev = psLFThis;
++ psLFThis = psLFThis->psNext;
++ }
++ }
++
++
++ if (psStream->ui32CapMode & DEBUG_CAPMODE_HOTKEY)
++ {
++ DeactivateHotKeys();
++ }
++
++
++
++ if ((psStream->ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0)
++ {
++ HostNonPageablePageFree((IMG_VOID *)psStream->ui32Base);
++ HostNonPageablePageFree((IMG_VOID *)psStream->psInitStream->ui32Base);
++ }
++ else
++ {
++ HostPageablePageFree((IMG_VOID *)psStream->ui32Base);
++ HostPageablePageFree((IMG_VOID *)psStream->psInitStream->ui32Base);
++ }
++
++ HostNonPageablePageFree(psStream->psInitStream);
++ HostNonPageablePageFree(psStream);
++ HostNonPageablePageFree(psLFBuffer);
++
++ if (g_psStreamList == 0)
++ {
++ PVR_DPF((PVR_DBG_MESSAGE,"DBGDriv: Stream list now empty" ));
++ }
++
++ return;
++}
++
++IMG_VOID * IMG_CALLCONV DBGDrivFindStream(IMG_CHAR * pszName, IMG_BOOL bResetStream)
++{
++ PDBG_STREAM psStream;
++ PDBG_STREAM psThis;
++ IMG_UINT32 ui32Off;
++ IMG_BOOL bAreSame;
++
++ psStream = 0;
++
++
++
++ for (psThis = g_psStreamList; psThis != IMG_NULL; psThis = psThis->psNext)
++ {
++ bAreSame = IMG_TRUE;
++ ui32Off = 0;
++
++ if (strlen(psThis->szName) == strlen(pszName))
++ {
++ while ((psThis->szName[ui32Off] != 0) && (pszName[ui32Off] != 0) && (ui32Off < 128) && bAreSame)
++ {
++ if (psThis->szName[ui32Off] != pszName[ui32Off])
++ {
++ bAreSame = IMG_FALSE;
++ }
++
++ ui32Off++;
++ }
++ }
++ else
++ {
++ bAreSame = IMG_FALSE;
++ }
++
++ if (bAreSame)
++ {
++ psStream = psThis;
++ break;
++ }
++ }
++
++ if(bResetStream && psStream)
++ {
++ static IMG_CHAR szComment[] = "-- Init phase terminated\r\n";
++ psStream->psInitStream->ui32RPtr = 0;
++ psStream->ui32RPtr = 0;
++ psStream->ui32WPtr = 0;
++ psStream->ui32DataWritten = psStream->psInitStream->ui32DataWritten;
++ if (psStream->bInitPhaseComplete == IMG_FALSE)
++ {
++ if (psStream->ui32Flags & DEBUG_FLAGS_TEXTSTREAM)
++ {
++ DBGDrivWrite2(psStream, (IMG_UINT8 *)szComment, sizeof(szComment) - 1, 0x01);
++ }
++ psStream->bInitPhaseComplete = IMG_TRUE;
++ }
++ }
++
++ return((IMG_VOID *) psStream);
++}
++
++IMG_UINT32 IMG_CALLCONV DBGDrivWriteStringCM(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Level)
++{
++
++
++ if (!StreamValid(psStream))
++ {
++ return(0xFFFFFFFFUL);
++ }
++
++
++
++ if (psStream->ui32CapMode & DEBUG_CAPMODE_FRAMED)
++ {
++ if ((psStream->ui32Flags & DEBUG_FLAGS_ENABLESAMPLE) == 0)
++ {
++ return(0);
++ }
++ }
++ else
++ {
++ if (psStream->ui32CapMode == DEBUG_CAPMODE_HOTKEY)
++ {
++ if ((psStream->ui32Current != g_ui32HotKeyFrame) || (g_bHotKeyPressed == IMG_FALSE))
++ {
++ return(0);
++ }
++ }
++ }
++
++ return(DBGDrivWriteString(psStream,pszString,ui32Level));
++
++}
++
++IMG_UINT32 IMG_CALLCONV DBGDrivWriteString(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Level)
++{
++ IMG_UINT32 ui32Len;
++ IMG_UINT32 ui32Space;
++ IMG_UINT32 ui32WPtr;
++ IMG_UINT8 * pui8Buffer;
++
++
++
++ if (!StreamValid(psStream))
++ {
++ return(0xFFFFFFFFUL);
++ }
++
++
++
++ if ((psStream->ui32DebugLevel & ui32Level) == 0)
++ {
++ return(0xFFFFFFFFUL);
++ }
++
++
++
++
++ if ((psStream->ui32OutMode & DEBUG_OUTMODE_ASYNC) == 0)
++ {
++ if (psStream->ui32OutMode & DEBUG_OUTMODE_STANDARDDBG)
++ {
++ PVR_DPF((PVR_DBG_MESSAGE,"%s: %s\r\n",psStream->szName, pszString));
++ }
++
++
++
++ if (psStream->ui32OutMode & DEBUG_OUTMODE_MONO)
++ {
++ MonoOut(psStream->szName,IMG_FALSE);
++ MonoOut(": ",IMG_FALSE);
++ MonoOut(pszString,IMG_TRUE);
++ }
++ }
++
++
++
++ if (
++ !(
++ ((psStream->ui32OutMode & DEBUG_OUTMODE_STREAMENABLE) != 0) ||
++ ((psStream->ui32OutMode & DEBUG_OUTMODE_ASYNC) != 0)
++ )
++ )
++ {
++ return(0xFFFFFFFFUL);
++ }
++
++
++
++ ui32Space=SpaceInStream(psStream);
++
++ if(ui32Space > 0)
++ {
++ ui32Space--;
++ }
++
++ ui32Len = 0;
++ ui32WPtr = psStream->ui32WPtr;
++ pui8Buffer = (IMG_UINT8 *) psStream->ui32Base;
++
++ while((pszString[ui32Len] != 0) && (ui32Len < ui32Space))
++ {
++ pui8Buffer[ui32WPtr] = (IMG_UINT8)pszString[ui32Len];
++ ui32Len++;
++ ui32WPtr++;
++ if (ui32WPtr == psStream->ui32Size)
++ {
++ ui32WPtr = 0;
++ }
++ }
++
++ if (ui32Len < ui32Space)
++ {
++
++ pui8Buffer[ui32WPtr] = (IMG_UINT8)pszString[ui32Len];
++ ui32Len++;
++ ui32WPtr++;
++ if (ui32WPtr == psStream->ui32Size)
++ {
++ ui32WPtr = 0;
++ }
++
++
++ psStream->ui32WPtr = ui32WPtr;
++ psStream->ui32DataWritten+= ui32Len;
++ } else
++ {
++ ui32Len = 0;
++ }
++
++#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
++ if (ui32Len)
++ {
++ HostSignalEvent(DBG_EVENT_STREAM_DATA);
++ }
++#endif
++
++ return(ui32Len);
++}
++
++IMG_UINT32 IMG_CALLCONV DBGDrivReadString(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Limit)
++{
++ IMG_UINT32 ui32OutLen;
++ IMG_UINT32 ui32Len;
++ IMG_UINT32 ui32Offset;
++ IMG_UINT8 *pui8Buff;
++
++
++
++ if (!StreamValid(psStream))
++ {
++ return(0);
++ }
++
++
++
++ pui8Buff = (IMG_UINT8 *) psStream->ui32Base;
++ ui32Offset = psStream->ui32RPtr;
++
++ if (psStream->ui32RPtr == psStream->ui32WPtr)
++ {
++ return(0);
++ }
++
++
++
++ ui32Len = 0;
++ while((pui8Buff[ui32Offset] != 0) && (ui32Offset != psStream->ui32WPtr))
++ {
++ ui32Offset++;
++ ui32Len++;
++
++
++
++ if (ui32Offset == psStream->ui32Size)
++ {
++ ui32Offset = 0;
++ }
++ }
++
++ ui32OutLen = ui32Len + 1;
++
++
++
++ if (ui32Len > ui32Limit)
++ {
++ return(0);
++ }
++
++
++
++ ui32Offset = psStream->ui32RPtr;
++ ui32Len = 0;
++
++ while ((pui8Buff[ui32Offset] != 0) && (ui32Len < ui32Limit))
++ {
++ pszString[ui32Len] = (IMG_CHAR)pui8Buff[ui32Offset];
++ ui32Offset++;
++ ui32Len++;
++
++
++
++ if (ui32Offset == psStream->ui32Size)
++ {
++ ui32Offset = 0;
++ }
++ }
++
++ pszString[ui32Len] = (IMG_CHAR)pui8Buff[ui32Offset];
++
++ psStream->ui32RPtr = ui32Offset + 1;
++
++ if (psStream->ui32RPtr == psStream->ui32Size)
++ {
++ psStream->ui32RPtr = 0;
++ }
++
++ return(ui32OutLen);
++}
++
++IMG_UINT32 IMG_CALLCONV DBGDrivWrite(PDBG_STREAM psMainStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level)
++{
++ IMG_UINT32 ui32Space;
++ DBG_STREAM *psStream;
++
++
++
++ if (!StreamValid(psMainStream))
++ {
++ return(0xFFFFFFFFUL);
++ }
++
++
++
++ if ((psMainStream->ui32DebugLevel & ui32Level) == 0)
++ {
++ return(0xFFFFFFFFUL);
++ }
++
++
++
++ if (psMainStream->ui32CapMode & DEBUG_CAPMODE_FRAMED)
++ {
++ if ((psMainStream->ui32Flags & DEBUG_FLAGS_ENABLESAMPLE) == 0)
++ {
++ return(0xFFFFFFFFUL);
++ }
++ }
++ else if (psMainStream->ui32CapMode == DEBUG_CAPMODE_HOTKEY)
++ {
++ if ((psMainStream->ui32Current != g_ui32HotKeyFrame) || (g_bHotKeyPressed == IMG_FALSE))
++ return(0xFFFFFFFFUL);
++ }
++
++ if(psMainStream->bInitPhaseComplete)
++ {
++ psStream = psMainStream;
++ }
++ else
++ {
++ psStream = psMainStream->psInitStream;
++ }
++
++
++
++ ui32Space=SpaceInStream(psStream);
++
++
++
++ if ((psStream->ui32OutMode & DEBUG_OUTMODE_STREAMENABLE) == 0)
++ {
++ return(0);
++ }
++
++ if (ui32Space < 8)
++ {
++ return(0);
++ }
++
++
++
++ if (ui32Space <= (ui32InBuffSize + 4))
++ {
++ ui32InBuffSize = ui32Space - 8;
++ }
++
++
++
++ Write(psStream,(IMG_UINT8 *) &ui32InBuffSize,4);
++ Write(psStream,pui8InBuf,ui32InBuffSize);
++
++#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
++ if (ui32InBuffSize)
++ {
++ HostSignalEvent(DBG_EVENT_STREAM_DATA);
++ }
++#endif
++ return(ui32InBuffSize);
++}
++
++IMG_UINT32 IMG_CALLCONV DBGDrivWriteCM(PDBG_STREAM psStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level)
++{
++
++
++ if (!StreamValid(psStream))
++ {
++ return(0xFFFFFFFFUL);
++ }
++
++
++
++ if (psStream->ui32CapMode & DEBUG_CAPMODE_FRAMED)
++ {
++ if ((psStream->ui32Flags & DEBUG_FLAGS_ENABLESAMPLE) == 0)
++ {
++ return(0xFFFFFFFFUL);
++ }
++ }
++ else
++ {
++ if (psStream->ui32CapMode == DEBUG_CAPMODE_HOTKEY)
++ {
++ if ((psStream->ui32Current != g_ui32HotKeyFrame) || (g_bHotKeyPressed == IMG_FALSE))
++ {
++ return(0xFFFFFFFFUL);
++ }
++ }
++ }
++
++ return(DBGDrivWrite2(psStream,pui8InBuf,ui32InBuffSize,ui32Level));
++}
++
++IMG_UINT32 IMG_CALLCONV DBGDrivWrite2(PDBG_STREAM psMainStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level)
++{
++ IMG_UINT32 ui32Space;
++ DBG_STREAM *psStream;
++
++
++
++ if (!StreamValid(psMainStream))
++ {
++ return(0xFFFFFFFFUL);
++ }
++
++
++
++ if ((psMainStream->ui32DebugLevel & ui32Level) == 0)
++ {
++ return(0xFFFFFFFFUL);
++ }
++
++ if(psMainStream->bInitPhaseComplete)
++ {
++ psStream = psMainStream;
++ }
++ else
++ {
++ psStream = psMainStream->psInitStream;
++ }
++
++
++
++ ui32Space=SpaceInStream(psStream);
++
++
++
++ if ((psStream->ui32OutMode & DEBUG_OUTMODE_STREAMENABLE) == 0)
++ {
++ return(0);
++ }
++
++
++
++ if (psStream->ui32Flags & DEBUG_FLAGS_NO_BUF_EXPANDSION)
++ {
++
++
++
++ if (ui32Space < 32)
++ {
++ return(0);
++ }
++ }
++ else
++ {
++ if ((ui32Space < 32) || (ui32Space <= (ui32InBuffSize + 4)))
++ {
++ IMG_UINT32 ui32NewBufSize;
++
++
++
++ ui32NewBufSize = 2 * psStream->ui32Size;
++
++ if (ui32InBuffSize > psStream->ui32Size)
++ {
++ ui32NewBufSize += ui32InBuffSize;
++ }
++
++
++
++ if (!ExpandStreamBuffer(psStream,ui32NewBufSize))
++ {
++ if (ui32Space < 32)
++ {
++ return(0);
++ }
++ }
++
++
++
++ ui32Space = SpaceInStream(psStream);
++ }
++ }
++
++
++
++ if (ui32Space <= (ui32InBuffSize + 4))
++ {
++ ui32InBuffSize = ui32Space - 4;
++ }
++
++
++
++ Write(psStream,pui8InBuf,ui32InBuffSize);
++
++#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
++ if (ui32InBuffSize)
++ {
++ HostSignalEvent(DBG_EVENT_STREAM_DATA);
++ }
++#endif
++ return(ui32InBuffSize);
++}
++
++IMG_UINT32 IMG_CALLCONV DBGDrivRead(PDBG_STREAM psMainStream, IMG_BOOL bReadInitBuffer, IMG_UINT32 ui32OutBuffSize,IMG_UINT8 * pui8OutBuf)
++{
++ IMG_UINT32 ui32Data;
++ DBG_STREAM *psStream;
++
++
++
++ if (!StreamValid(psMainStream))
++ {
++ return(0);
++ }
++
++ if(bReadInitBuffer)
++ {
++ psStream = psMainStream->psInitStream;
++ }
++ else
++ {
++ psStream = psMainStream;
++ }
++
++ if (psStream->ui32RPtr == psStream->ui32WPtr)
++ {
++ return(0);
++ }
++
++
++
++ if (psStream->ui32RPtr <= psStream->ui32WPtr)
++ {
++ ui32Data = psStream->ui32WPtr - psStream->ui32RPtr;
++ }
++ else
++ {
++ ui32Data = psStream->ui32WPtr + (psStream->ui32Size - psStream->ui32RPtr);
++ }
++
++
++
++ if (ui32Data > ui32OutBuffSize)
++ {
++ ui32Data = ui32OutBuffSize;
++ }
++
++
++
++ if ((psStream->ui32RPtr + ui32Data) > psStream->ui32Size)
++ {
++ IMG_UINT32 ui32B1 = psStream->ui32Size - psStream->ui32RPtr;
++ IMG_UINT32 ui32B2 = ui32Data - ui32B1;
++
++
++ HostMemCopy((IMG_VOID *) pui8OutBuf,
++ (IMG_VOID *)(psStream->ui32Base + psStream->ui32RPtr),
++ ui32B1);
++
++
++ HostMemCopy((IMG_VOID *)((IMG_UINT32) pui8OutBuf + ui32B1),
++ (IMG_VOID *)psStream->ui32Base,
++ ui32B2);
++
++
++ psStream->ui32RPtr = ui32B2;
++ }
++ else
++ {
++ HostMemCopy((IMG_VOID *) pui8OutBuf,
++ (IMG_VOID *)(psStream->ui32Base + psStream->ui32RPtr),
++ ui32Data);
++
++
++ psStream->ui32RPtr += ui32Data;
++
++
++ if (psStream->ui32RPtr == psStream->ui32Size)
++ {
++ psStream->ui32RPtr = 0;
++ }
++ }
++
++ return(ui32Data);
++}
++
++void IMG_CALLCONV DBGDrivSetCaptureMode(PDBG_STREAM psStream,IMG_UINT32 ui32Mode,IMG_UINT32 ui32Start,IMG_UINT32 ui32End,IMG_UINT32 ui32SampleRate)
++{
++
++
++ if (!StreamValid(psStream))
++ {
++ return;
++ }
++
++ psStream->ui32CapMode = ui32Mode;
++ psStream->ui32DefaultMode = ui32Mode;
++ psStream->ui32Start = ui32Start;
++ psStream->ui32End = ui32End;
++ psStream->ui32SampleRate = ui32SampleRate;
++
++
++
++ if (psStream->ui32CapMode & DEBUG_CAPMODE_HOTKEY)
++ {
++ ActivateHotKeys(psStream);
++ }
++}
++
++void IMG_CALLCONV DBGDrivSetOutputMode(PDBG_STREAM psStream,IMG_UINT32 ui32OutMode)
++{
++
++
++ if (!StreamValid(psStream))
++ {
++ return;
++ }
++
++ psStream->ui32OutMode = ui32OutMode;
++}
++
++void IMG_CALLCONV DBGDrivSetDebugLevel(PDBG_STREAM psStream,IMG_UINT32 ui32DebugLevel)
++{
++
++
++ if (!StreamValid(psStream))
++ {
++ return;
++ }
++
++ psStream->ui32DebugLevel = ui32DebugLevel;
++}
++
++void IMG_CALLCONV DBGDrivSetFrame(PDBG_STREAM psStream,IMG_UINT32 ui32Frame)
++{
++
++
++ if (!StreamValid(psStream))
++ {
++ return;
++ }
++
++ psStream->ui32Current = ui32Frame;
++
++ if ((ui32Frame >= psStream->ui32Start) &&
++ (ui32Frame <= psStream->ui32End) &&
++ (((ui32Frame - psStream->ui32Start) % psStream->ui32SampleRate) == 0))
++ {
++ psStream->ui32Flags |= DEBUG_FLAGS_ENABLESAMPLE;
++ }
++ else
++ {
++ psStream->ui32Flags &= ~DEBUG_FLAGS_ENABLESAMPLE;
++ }
++
++ if (g_bHotkeyMiddump)
++ {
++ if ((ui32Frame >= g_ui32HotkeyMiddumpStart) &&
++ (ui32Frame <= g_ui32HotkeyMiddumpEnd) &&
++ (((ui32Frame - g_ui32HotkeyMiddumpStart) % psStream->ui32SampleRate) == 0))
++ {
++ psStream->ui32Flags |= DEBUG_FLAGS_ENABLESAMPLE;
++ }
++ else
++ {
++ psStream->ui32Flags &= ~DEBUG_FLAGS_ENABLESAMPLE;
++ if (psStream->ui32Current > g_ui32HotkeyMiddumpEnd)
++ {
++ g_bHotkeyMiddump = IMG_FALSE;
++ }
++ }
++ }
++
++
++ if (g_bHotKeyRegistered)
++ {
++ g_bHotKeyRegistered = IMG_FALSE;
++
++ PVR_DPF((PVR_DBG_MESSAGE,"Hotkey pressed (%08x)!\n",psStream));
++
++ if (!g_bHotKeyPressed)
++ {
++
++
++ g_ui32HotKeyFrame = psStream->ui32Current + 2;
++
++
++
++ g_bHotKeyPressed = IMG_TRUE;
++ }
++
++
++
++ if (((psStream->ui32CapMode & DEBUG_CAPMODE_FRAMED) != 0) &&
++ ((psStream->ui32CapMode & DEBUG_CAPMODE_HOTKEY) != 0))
++ {
++ if (!g_bHotkeyMiddump)
++ {
++
++ g_ui32HotkeyMiddumpStart = g_ui32HotKeyFrame + 1;
++ g_ui32HotkeyMiddumpEnd = 0xffffffff;
++ g_bHotkeyMiddump = IMG_TRUE;
++ PVR_DPF((PVR_DBG_MESSAGE,"Sampling every %d frame(s)\n", psStream->ui32SampleRate));
++ }
++ else
++ {
++
++ g_ui32HotkeyMiddumpEnd = g_ui32HotKeyFrame;
++ PVR_DPF((PVR_DBG_MESSAGE,"Turning off sampling\n"));
++ }
++ }
++
++ }
++
++
++
++ if (psStream->ui32Current > g_ui32HotKeyFrame)
++ {
++ g_bHotKeyPressed = IMG_FALSE;
++ }
++}
++
++IMG_UINT32 IMG_CALLCONV DBGDrivGetFrame(PDBG_STREAM psStream)
++{
++
++
++ if (!StreamValid(psStream))
++ {
++ return(0);
++ }
++
++ return(psStream->ui32Current);
++}
++
++IMG_BOOL IMG_CALLCONV DBGDrivIsLastCaptureFrame(PDBG_STREAM psStream)
++{
++ IMG_UINT32 ui32NextFrame;
++
++
++
++ if (!StreamValid(psStream))
++ {
++ return IMG_FALSE;
++ }
++
++ if (psStream->ui32CapMode & DEBUG_CAPMODE_FRAMED)
++ {
++ ui32NextFrame = psStream->ui32Current + psStream->ui32SampleRate;
++ if (ui32NextFrame > psStream->ui32End)
++ {
++ return IMG_TRUE;
++ }
++ }
++ return IMG_FALSE;
++}
++
++IMG_BOOL IMG_CALLCONV DBGDrivIsCaptureFrame(PDBG_STREAM psStream, IMG_BOOL bCheckPreviousFrame)
++{
++ IMG_UINT32 ui32FrameShift = bCheckPreviousFrame ? 1UL : 0UL;
++
++
++
++ if (!StreamValid(psStream))
++ {
++ return IMG_FALSE;
++ }
++
++ if (psStream->ui32CapMode & DEBUG_CAPMODE_FRAMED)
++ {
++
++ if (g_bHotkeyMiddump)
++ {
++ if ((psStream->ui32Current >= (g_ui32HotkeyMiddumpStart - ui32FrameShift)) &&
++ (psStream->ui32Current <= (g_ui32HotkeyMiddumpEnd - ui32FrameShift)) &&
++ ((((psStream->ui32Current + ui32FrameShift) - g_ui32HotkeyMiddumpStart) % psStream->ui32SampleRate) == 0))
++ {
++ return IMG_TRUE;
++ }
++ }
++ else
++ {
++ if ((psStream->ui32Current >= (psStream->ui32Start - ui32FrameShift)) &&
++ (psStream->ui32Current <= (psStream->ui32End - ui32FrameShift)) &&
++ ((((psStream->ui32Current + ui32FrameShift) - psStream->ui32Start) % psStream->ui32SampleRate) == 0))
++ {
++ return IMG_TRUE;
++ }
++ }
++ }
++ else if (psStream->ui32CapMode == DEBUG_CAPMODE_HOTKEY)
++ {
++ if ((psStream->ui32Current == (g_ui32HotKeyFrame-ui32FrameShift)) && (g_bHotKeyPressed))
++ {
++ return IMG_TRUE;
++ }
++ }
++ return IMG_FALSE;
++}
++
++void IMG_CALLCONV DBGDrivOverrideMode(PDBG_STREAM psStream,IMG_UINT32 ui32Mode)
++{
++
++
++ if (!StreamValid(psStream))
++ {
++ return;
++ }
++
++ psStream->ui32CapMode = ui32Mode;
++}
++
++void IMG_CALLCONV DBGDrivDefaultMode(PDBG_STREAM psStream)
++{
++
++
++ if (!StreamValid(psStream))
++ {
++ return;
++ }
++
++ psStream->ui32CapMode = psStream->ui32DefaultMode;
++}
++
++void IMG_CALLCONV DBGDrivSetMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker)
++{
++
++
++ if (!StreamValid(psStream))
++ {
++ return;
++ }
++
++ psStream->ui32Marker = ui32Marker;
++}
++
++IMG_UINT32 IMG_CALLCONV DBGDrivGetMarker(PDBG_STREAM psStream)
++{
++
++
++ if (!StreamValid(psStream))
++ {
++ return 0;
++ }
++
++ return psStream->ui32Marker;
++}
++
++
++IMG_UINT32 IMG_CALLCONV DBGDrivGetStreamOffset(PDBG_STREAM psMainStream)
++{
++ PDBG_STREAM psStream;
++
++
++
++ if (!StreamValid(psMainStream))
++ {
++ return 0;
++ }
++
++ if(psMainStream->bInitPhaseComplete)
++ {
++ psStream = psMainStream;
++ }
++ else
++ {
++ psStream = psMainStream->psInitStream;
++ }
++
++ return psStream->ui32DataWritten;
++}
++
++IMG_VOID IMG_CALLCONV DBGDrivSetStreamOffset(PDBG_STREAM psMainStream, IMG_UINT32 ui32StreamOffset)
++{
++ PDBG_STREAM psStream;
++
++
++
++ if (!StreamValid(psMainStream))
++ {
++ return;
++ }
++
++ if(psMainStream->bInitPhaseComplete)
++ {
++ psStream = psMainStream;
++ }
++ else
++ {
++ psStream = psMainStream->psInitStream;
++ }
++
++ psStream->ui32DataWritten = ui32StreamOffset;
++}
++
++IMG_UINT32 IMG_CALLCONV DBGDrivGetServiceTable(void)
++{
++ return((IMG_UINT32) &g_sDBGKMServices);
++}
++
++IMG_UINT32 IMG_CALLCONV DBGDrivWriteLF(PDBG_STREAM psStream, IMG_UINT8 * pui8InBuf, IMG_UINT32 ui32InBuffSize, IMG_UINT32 ui32Level, IMG_UINT32 ui32Flags)
++{
++ PDBG_LASTFRAME_BUFFER psLFBuffer;
++
++
++
++ if (!StreamValid(psStream))
++ {
++ return(0xFFFFFFFFUL);
++ }
++
++
++
++ if ((psStream->ui32DebugLevel & ui32Level) == 0)
++ {
++ return(0xFFFFFFFFUL);
++ }
++
++
++
++ if ((psStream->ui32CapMode & DEBUG_CAPMODE_FRAMED) != 0)
++ {
++ if ((psStream->ui32Flags & DEBUG_FLAGS_ENABLESAMPLE) == 0)
++ {
++ return(0xFFFFFFFFUL);
++ }
++ }
++ else if (psStream->ui32CapMode == DEBUG_CAPMODE_HOTKEY)
++ {
++ if ((psStream->ui32Current != g_ui32HotKeyFrame) || (g_bHotKeyPressed == IMG_FALSE))
++ return(0xFFFFFFFFUL);
++ }
++
++ psLFBuffer = FindLFBuf(psStream);
++
++ if (ui32Flags & WRITELF_FLAGS_RESETBUF)
++ {
++
++
++ ui32InBuffSize = (ui32InBuffSize > LAST_FRAME_BUF_SIZE) ? LAST_FRAME_BUF_SIZE : ui32InBuffSize;
++ HostMemCopy((IMG_VOID *)psLFBuffer->ui8Buffer, (IMG_VOID *)pui8InBuf, ui32InBuffSize);
++ psLFBuffer->ui32BufLen = ui32InBuffSize;
++ }
++ else
++ {
++
++
++ ui32InBuffSize = ((psLFBuffer->ui32BufLen + ui32InBuffSize) > LAST_FRAME_BUF_SIZE) ? (LAST_FRAME_BUF_SIZE - psLFBuffer->ui32BufLen) : ui32InBuffSize;
++ HostMemCopy((IMG_VOID *)(&psLFBuffer->ui8Buffer[psLFBuffer->ui32BufLen]), (IMG_VOID *)pui8InBuf, ui32InBuffSize);
++ psLFBuffer->ui32BufLen += ui32InBuffSize;
++ }
++
++ return(ui32InBuffSize);
++}
++
++IMG_UINT32 IMG_CALLCONV DBGDrivReadLF(PDBG_STREAM psStream, IMG_UINT32 ui32OutBuffSize, IMG_UINT8 * pui8OutBuf)
++{
++ PDBG_LASTFRAME_BUFFER psLFBuffer;
++ IMG_UINT32 ui32Data;
++
++
++
++ if (!StreamValid(psStream))
++ {
++ return(0);
++ }
++
++ psLFBuffer = FindLFBuf(psStream);
++
++
++
++ ui32Data = (ui32OutBuffSize < psLFBuffer->ui32BufLen) ? ui32OutBuffSize : psLFBuffer->ui32BufLen;
++
++
++
++ HostMemCopy((IMG_VOID *)pui8OutBuf, (IMG_VOID *)psLFBuffer->ui8Buffer, ui32Data);
++
++ return ui32Data;
++}
++
++IMG_VOID IMG_CALLCONV DBGDrivStartInitPhase(PDBG_STREAM psStream)
++{
++ psStream->bInitPhaseComplete = IMG_FALSE;
++}
++
++IMG_VOID IMG_CALLCONV DBGDrivStopInitPhase(PDBG_STREAM psStream)
++{
++ psStream->bInitPhaseComplete = IMG_TRUE;
++}
++
++#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
++IMG_VOID IMG_CALLCONV DBGDrivWaitForEvent(DBG_EVENT eEvent)
++{
++ HostWaitForEvent(eEvent);
++}
++#endif
++
++IMG_BOOL ExpandStreamBuffer(PDBG_STREAM psStream, IMG_UINT32 ui32NewSize)
++{
++ IMG_VOID * pvNewBuf;
++ IMG_UINT32 ui32NewSizeInPages;
++ IMG_UINT32 ui32NewWOffset;
++ IMG_UINT32 ui32SpaceInOldBuf;
++
++
++
++ if (psStream->ui32Size >= ui32NewSize)
++ {
++ return IMG_FALSE;
++ }
++
++
++
++ ui32SpaceInOldBuf = SpaceInStream(psStream);
++
++
++
++ ui32NewSizeInPages = ((ui32NewSize + 0xfffUL) & ~0xfffUL) / 4096UL;
++
++ if ((psStream->ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0)
++ {
++ pvNewBuf = HostNonPageablePageAlloc(ui32NewSizeInPages);
++ }
++ else
++ {
++ pvNewBuf = HostPageablePageAlloc(ui32NewSizeInPages);
++ }
++
++ if (pvNewBuf == IMG_NULL)
++ {
++ return IMG_FALSE;
++ }
++
++
++
++
++ if (psStream->ui32RPtr <= psStream->ui32WPtr)
++ {
++
++
++ HostMemCopy((IMG_VOID *)pvNewBuf, (IMG_VOID *)(psStream->ui32Base + psStream->ui32RPtr), psStream->ui32WPtr - psStream->ui32RPtr);
++ }
++ else
++ {
++ IMG_UINT32 ui32FirstCopySize;
++
++
++
++ ui32FirstCopySize = psStream->ui32Size - psStream->ui32RPtr;
++
++ HostMemCopy((IMG_VOID *)pvNewBuf, (IMG_VOID *)(psStream->ui32Base + psStream->ui32RPtr), ui32FirstCopySize);
++
++
++
++ HostMemCopy((IMG_VOID *)((IMG_UINT32)pvNewBuf + ui32FirstCopySize), (IMG_VOID *)psStream->ui32Base, psStream->ui32WPtr);
++ }
++
++
++
++ ui32NewWOffset = psStream->ui32Size - ui32SpaceInOldBuf;
++
++
++
++ if ((psStream->ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0)
++ {
++ HostNonPageablePageFree((IMG_VOID *)psStream->ui32Base);
++ }
++ else
++ {
++ HostPageablePageFree((IMG_VOID *)psStream->ui32Base);
++ }
++
++
++
++ psStream->ui32Base = (IMG_UINT32)pvNewBuf;
++ psStream->ui32RPtr = 0;
++ psStream->ui32WPtr = ui32NewWOffset;
++ psStream->ui32Size = ui32NewSizeInPages * 4096;
++
++ return IMG_TRUE;
++}
++
++IMG_UINT32 SpaceInStream(PDBG_STREAM psStream)
++{
++ IMG_UINT32 ui32Space;
++
++ if (psStream->ui32RPtr > psStream->ui32WPtr)
++ {
++ ui32Space = psStream->ui32RPtr - psStream->ui32WPtr;
++ }
++ else
++ {
++ ui32Space = psStream->ui32RPtr + (psStream->ui32Size - psStream->ui32WPtr);
++ }
++
++ return ui32Space;
++}
++
++
++void DestroyAllStreams(void)
++{
++ while (g_psStreamList != IMG_NULL)
++ {
++ DBGDrivDestroyStream(g_psStreamList);
++ }
++ return;
++}
++
++PDBG_LASTFRAME_BUFFER FindLFBuf(PDBG_STREAM psStream)
++{
++ PDBG_LASTFRAME_BUFFER psLFBuffer;
++
++ psLFBuffer = g_psLFBufferList;
++
++ while (psLFBuffer)
++ {
++ if (psLFBuffer->psStream == psStream)
++ {
++ break;
++ }
++
++ psLFBuffer = psLFBuffer->psNext;
++ }
++
++ return psLFBuffer;
++}
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/tools/intern/debug/dbgdriv/common/dbgdriv.h
+@@ -0,0 +1,116 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _DBGDRIV_
++#define _DBGDRIV_
++
++#define BUFFER_SIZE 64*PAGESIZE
++
++#define DBGDRIV_VERSION 0x100
++#define MAX_PROCESSES 2
++#define BLOCK_USED 0x01
++#define BLOCK_LOCKED 0x02
++#define DBGDRIV_MONOBASE 0x000B0000
++
++
++extern IMG_VOID * g_pvAPIMutex;
++
++IMG_VOID * IMG_CALLCONV DBGDrivCreateStream(IMG_CHAR * pszName,
++ IMG_UINT32 ui32CapMode,
++ IMG_UINT32 ui32OutMode,
++ IMG_UINT32 ui32Flags,
++ IMG_UINT32 ui32Pages);
++IMG_VOID IMG_CALLCONV DBGDrivDestroyStream(PDBG_STREAM psStream);
++IMG_VOID * IMG_CALLCONV DBGDrivFindStream(IMG_CHAR *pszName, IMG_BOOL bResetStream);
++IMG_UINT32 IMG_CALLCONV DBGDrivWriteString(PDBG_STREAM psStream, IMG_CHAR *pszString, IMG_UINT32 ui32Level);
++IMG_UINT32 IMG_CALLCONV DBGDrivReadString(PDBG_STREAM psStream,IMG_CHAR *pszString, IMG_UINT32 ui32Limit);
++IMG_UINT32 IMG_CALLCONV DBGDrivWrite(PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf, IMG_UINT32 ui32InBuffSize, IMG_UINT32 ui32Level);
++IMG_UINT32 IMG_CALLCONV DBGDrivWrite2(PDBG_STREAM psStream, IMG_UINT8 *pui8InBuf, IMG_UINT32 ui32InBuffSize, IMG_UINT32 ui32Level);
++IMG_UINT32 IMG_CALLCONV DBGDrivRead(PDBG_STREAM psStream, IMG_BOOL bReadInitBuffer, IMG_UINT32 ui32OutBufferSize, IMG_UINT8 *pui8OutBuf);
++IMG_VOID IMG_CALLCONV DBGDrivSetCaptureMode(PDBG_STREAM psStream, IMG_UINT32 ui32Mode, IMG_UINT32 ui32Start, IMG_UINT32 ui32Stop, IMG_UINT32 ui32SampleRate);
++IMG_VOID IMG_CALLCONV DBGDrivSetOutputMode(PDBG_STREAM psStream, IMG_UINT32 ui32OutMode);
++IMG_VOID IMG_CALLCONV DBGDrivSetDebugLevel(PDBG_STREAM psStream, IMG_UINT32 ui32DebugLevel);
++IMG_VOID IMG_CALLCONV DBGDrivSetFrame(PDBG_STREAM psStream, IMG_UINT32 ui32Frame);
++IMG_UINT32 IMG_CALLCONV DBGDrivGetFrame(PDBG_STREAM psStream);
++IMG_VOID IMG_CALLCONV DBGDrivOverrideMode(PDBG_STREAM psStream, IMG_UINT32 ui32Mode);
++IMG_VOID IMG_CALLCONV DBGDrivDefaultMode(PDBG_STREAM psStream);
++IMG_UINT32 IMG_CALLCONV DBGDrivGetServiceTable(IMG_VOID);
++IMG_UINT32 IMG_CALLCONV DBGDrivWriteStringCM(PDBG_STREAM psStream, IMG_CHAR *pszString, IMG_UINT32 ui32Level);
++IMG_UINT32 IMG_CALLCONV DBGDrivWriteCM(PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level);
++IMG_VOID IMG_CALLCONV DBGDrivSetMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker);
++IMG_UINT32 IMG_CALLCONV DBGDrivGetMarker(PDBG_STREAM psStream);
++IMG_BOOL IMG_CALLCONV DBGDrivIsLastCaptureFrame(PDBG_STREAM psStream);
++IMG_BOOL IMG_CALLCONV DBGDrivIsCaptureFrame(PDBG_STREAM psStream, IMG_BOOL bCheckPreviousFrame);
++IMG_UINT32 IMG_CALLCONV DBGDrivWriteLF(PDBG_STREAM psStream, IMG_UINT8 *pui8InBuf, IMG_UINT32 ui32InBuffSize, IMG_UINT32 ui32Level, IMG_UINT32 ui32Flags);
++IMG_UINT32 IMG_CALLCONV DBGDrivReadLF(PDBG_STREAM psStream, IMG_UINT32 ui32OutBuffSize, IMG_UINT8 *pui8OutBuf);
++IMG_VOID IMG_CALLCONV DBGDrivStartInitPhase(PDBG_STREAM psStream);
++IMG_VOID IMG_CALLCONV DBGDrivStopInitPhase(PDBG_STREAM psStream);
++IMG_UINT32 IMG_CALLCONV DBGDrivGetStreamOffset(PDBG_STREAM psStream);
++IMG_VOID IMG_CALLCONV DBGDrivSetStreamOffset(PDBG_STREAM psStream, IMG_UINT32 ui32StreamOffset);
++IMG_VOID IMG_CALLCONV DBGDrivWaitForEvent(DBG_EVENT eEvent);
++
++IMG_VOID DestroyAllStreams(IMG_VOID);
++
++IMG_UINT32 AtoI(IMG_CHAR *szIn);
++
++IMG_VOID HostMemSet(IMG_VOID *pvDest, IMG_UINT8 ui8Value, IMG_UINT32 ui32Size);
++IMG_VOID HostMemCopy(IMG_VOID *pvDest, IMG_VOID *pvSrc, IMG_UINT32 ui32Size);
++IMG_BOOL StreamValid(PDBG_STREAM psStream);
++IMG_VOID Write(PDBG_STREAM psStream,IMG_UINT8 *pui8Data, IMG_UINT32 ui32InBuffSize);
++IMG_VOID MonoOut(IMG_CHAR *pszString, IMG_BOOL bNewLine);
++
++
++IMG_VOID * IMG_CALLCONV ExtDBGDrivCreateStream(IMG_CHAR *pszName, IMG_UINT32 ui32CapMode, IMG_UINT32 ui32OutMode, IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size);
++IMG_VOID IMG_CALLCONV ExtDBGDrivDestroyStream(PDBG_STREAM psStream);
++IMG_VOID * IMG_CALLCONV ExtDBGDrivFindStream(IMG_CHAR *pszName, IMG_BOOL bResetStream);
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivWriteString(PDBG_STREAM psStream,IMG_CHAR *pszString, IMG_UINT32 ui32Level);
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivReadString(PDBG_STREAM psStream,IMG_CHAR *pszString, IMG_UINT32 ui32Limit);
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivWrite(PDBG_STREAM psStream, IMG_UINT8 *pui8InBuf, IMG_UINT32 ui32InBuffSize, IMG_UINT32 ui32Level);
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivRead(PDBG_STREAM psStream, IMG_BOOL bReadInitBuffer, IMG_UINT32 ui32OutBuffSize,IMG_UINT8 *pui8OutBuf);
++IMG_VOID IMG_CALLCONV ExtDBGDrivSetCaptureMode(PDBG_STREAM psStream, IMG_UINT32 ui32Mode, IMG_UINT32 ui32Start, IMG_UINT32 ui32End, IMG_UINT32 ui32SampleRate);
++IMG_VOID IMG_CALLCONV ExtDBGDrivSetOutputMode(PDBG_STREAM psStream, IMG_UINT32 ui32OutMode);
++IMG_VOID IMG_CALLCONV ExtDBGDrivSetDebugLevel(PDBG_STREAM psStream, IMG_UINT32 ui32DebugLevel);
++IMG_VOID IMG_CALLCONV ExtDBGDrivSetFrame(PDBG_STREAM psStream, IMG_UINT32 ui32Frame);
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetFrame(PDBG_STREAM psStream);
++IMG_VOID IMG_CALLCONV ExtDBGDrivOverrideMode(PDBG_STREAM psStream, IMG_UINT32 ui32Mode);
++IMG_VOID IMG_CALLCONV ExtDBGDrivDefaultMode(PDBG_STREAM psStream);
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivWrite2(PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf, IMG_UINT32 ui32InBuffSize, IMG_UINT32 ui32Level);
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivWriteStringCM(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Level);
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivWriteCM(PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level);
++IMG_VOID IMG_CALLCONV ExtDBGDrivSetMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker);
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetMarker(PDBG_STREAM psStream);
++IMG_VOID IMG_CALLCONV ExtDBGDrivStartInitPhase(PDBG_STREAM psStream);
++IMG_VOID IMG_CALLCONV ExtDBGDrivStopInitPhase(PDBG_STREAM psStream);
++IMG_BOOL IMG_CALLCONV ExtDBGDrivIsLastCaptureFrame(PDBG_STREAM psStream);
++IMG_BOOL IMG_CALLCONV ExtDBGDrivIsCaptureFrame(PDBG_STREAM psStream, IMG_BOOL bCheckPreviousFrame);
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivWriteLF(PDBG_STREAM psStream, IMG_UINT8 *pui8InBuf, IMG_UINT32 ui32InBuffSize, IMG_UINT32 ui32Level, IMG_UINT32 ui32Flags);
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivReadLF(PDBG_STREAM psStream, IMG_UINT32 ui32OutBuffSize, IMG_UINT8 *pui8OutBuf);
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetStreamOffset(PDBG_STREAM psStream);
++IMG_VOID IMG_CALLCONV ExtDBGDrivSetStreamOffset(PDBG_STREAM psStream, IMG_UINT32 ui32StreamOffset);
++IMG_VOID IMG_CALLCONV ExtDBGDrivWaitForEvent(DBG_EVENT eEvent);
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/tools/intern/debug/dbgdriv/common/hostfunc.h
+@@ -0,0 +1,58 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _HOSTFUNC_
++#define _HOSTFUNC_
++
++#define HOST_PAGESIZE (4096)
++#define DBG_MEMORY_INITIALIZER (0xe2)
++
++IMG_UINT32 HostReadRegistryDWORDFromString(IMG_CHAR *pcKey, IMG_CHAR *pcValueName, IMG_UINT32 *pui32Data);
++
++IMG_VOID * HostPageablePageAlloc(IMG_UINT32 ui32Pages);
++IMG_VOID HostPageablePageFree(IMG_VOID * pvBase);
++IMG_VOID * HostNonPageablePageAlloc(IMG_UINT32 ui32Pages);
++IMG_VOID HostNonPageablePageFree(IMG_VOID * pvBase);
++
++IMG_VOID * HostMapKrnBufIntoUser(IMG_VOID * pvKrnAddr, IMG_UINT32 ui32Size, IMG_VOID * *ppvMdl);
++IMG_VOID HostUnMapKrnBufFromUser(IMG_VOID * pvUserAddr, IMG_VOID * pvMdl, IMG_VOID * pvProcess);
++
++IMG_VOID HostCreateRegDeclStreams(IMG_VOID);
++
++IMG_VOID * HostCreateMutex(IMG_VOID);
++IMG_VOID HostAquireMutex(IMG_VOID * pvMutex);
++IMG_VOID HostReleaseMutex(IMG_VOID * pvMutex);
++IMG_VOID HostDestroyMutex(IMG_VOID * pvMutex);
++
++#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
++IMG_INT32 HostCreateEventObjects(IMG_VOID);
++IMG_VOID HostWaitForEvent(DBG_EVENT eEvent);
++IMG_VOID HostSignalEvent(DBG_EVENT eEvent);
++IMG_VOID HostDestroyEventObjects(IMG_VOID);
++#endif
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/tools/intern/debug/dbgdriv/common/hotkey.c
+@@ -0,0 +1,135 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++
++#if !defined(LINUX)
++#include <ntddk.h>
++#include <windef.h>
++#endif
++
++#include "img_types.h"
++#include "pvr_debug.h"
++#include "dbgdrvif.h"
++#include "dbgdriv.h"
++#include "hotkey.h"
++#include "hostfunc.h"
++
++
++
++
++
++IMG_UINT32 g_ui32HotKeyFrame = 0xFFFFFFFF;
++IMG_BOOL g_bHotKeyPressed = IMG_FALSE;
++IMG_BOOL g_bHotKeyRegistered = IMG_FALSE;
++
++PRIVATEHOTKEYDATA g_PrivateHotKeyData;
++
++
++IMG_VOID ReadInHotKeys(IMG_VOID)
++{
++ g_PrivateHotKeyData.ui32ScanCode = 0x58;
++ g_PrivateHotKeyData.ui32ShiftState = 0x0;
++
++
++
++#if 0
++ if (_RegOpenKey(HKEY_LOCAL_MACHINE,pszRegPath,&hKey) == ERROR_SUCCESS)
++ {
++
++
++ QueryReg(hKey,"ui32ScanCode",&g_PrivateHotKeyData.ui32ScanCode);
++ QueryReg(hKey,"ui32ShiftState",&g_PrivateHotKeyData.ui32ShiftState);
++ }
++#else
++ HostReadRegistryDWORDFromString("DEBUG\\Streams", "ui32ScanCode" , &g_PrivateHotKeyData.ui32ScanCode);
++ HostReadRegistryDWORDFromString("DEBUG\\Streams", "ui32ShiftState", &g_PrivateHotKeyData.ui32ShiftState);
++#endif
++}
++
++IMG_VOID RegisterKeyPressed(IMG_UINT32 dwui32ScanCode, PHOTKEYINFO pInfo)
++{
++ PDBG_STREAM psStream;
++
++ PVR_UNREFERENCED_PARAMETER(pInfo);
++
++ if (dwui32ScanCode == g_PrivateHotKeyData.ui32ScanCode)
++ {
++ PVR_DPF((PVR_DBG_MESSAGE,"PDUMP Hotkey pressed !\n"));
++
++ psStream = (PDBG_STREAM) g_PrivateHotKeyData.sHotKeyInfo.pvStream;
++
++ if (!g_bHotKeyPressed)
++ {
++
++
++ g_ui32HotKeyFrame = psStream->ui32Current + 2;
++
++
++
++ g_bHotKeyPressed = IMG_TRUE;
++ }
++ }
++}
++
++IMG_VOID ActivateHotKeys(PDBG_STREAM psStream)
++{
++
++
++ ReadInHotKeys();
++
++
++
++ if (!g_PrivateHotKeyData.sHotKeyInfo.hHotKey)
++ {
++ if (g_PrivateHotKeyData.ui32ScanCode != 0)
++ {
++ PVR_DPF((PVR_DBG_MESSAGE,"Activate HotKey for PDUMP.\n"));
++
++
++
++ g_PrivateHotKeyData.sHotKeyInfo.pvStream = psStream;
++
++ DefineHotKey(g_PrivateHotKeyData.ui32ScanCode, g_PrivateHotKeyData.ui32ShiftState, &g_PrivateHotKeyData.sHotKeyInfo);
++ }
++ else
++ {
++ g_PrivateHotKeyData.sHotKeyInfo.hHotKey = 0;
++ }
++ }
++}
++
++IMG_VOID DeactivateHotKeys(IMG_VOID)
++{
++ if (g_PrivateHotKeyData.sHotKeyInfo.hHotKey != 0)
++ {
++ PVR_DPF((PVR_DBG_MESSAGE,"Deactivate HotKey.\n"));
++
++ RemoveHotKey(g_PrivateHotKeyData.sHotKeyInfo.hHotKey);
++ g_PrivateHotKeyData.sHotKeyInfo.hHotKey = 0;
++ }
++}
++
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/tools/intern/debug/dbgdriv/common/hotkey.h
+@@ -0,0 +1,60 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _HOTKEY_
++#define _HOTKEY_
++
++
++typedef struct _hotkeyinfo
++{
++ IMG_UINT8 ui8ScanCode;
++ IMG_UINT8 ui8Type;
++ IMG_UINT8 ui8Flag;
++ IMG_UINT8 ui8Filler1;
++ IMG_UINT32 ui32ShiftState;
++ IMG_UINT32 ui32HotKeyProc;
++ IMG_VOID *pvStream;
++ IMG_UINT32 hHotKey;
++} HOTKEYINFO, *PHOTKEYINFO;
++
++typedef struct _privatehotkeydata
++{
++ IMG_UINT32 ui32ScanCode;
++ IMG_UINT32 ui32ShiftState;
++ HOTKEYINFO sHotKeyInfo;
++} PRIVATEHOTKEYDATA, *PPRIVATEHOTKEYDATA;
++
++
++IMG_VOID ReadInHotKeys (IMG_VOID);
++IMG_VOID ActivateHotKeys(PDBG_STREAM psStream);
++IMG_VOID DeactivateHotKeys(IMG_VOID);
++
++IMG_VOID RemoveHotKey (IMG_UINT32 hHotKey);
++IMG_VOID DefineHotKey (IMG_UINT32 ui32ScanCode, IMG_UINT32 ui32ShiftState, PHOTKEYINFO psInfo);
++IMG_VOID RegisterKeyPressed (IMG_UINT32 ui32ScanCode, PHOTKEYINFO psInfo);
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/tools/intern/debug/dbgdriv/common/ioctl.c
+@@ -0,0 +1,371 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++
++
++#ifdef LINUX
++#include <asm/uaccess.h>
++#endif
++
++#include "img_types.h"
++#include "dbgdrvif.h"
++#include "dbgdriv.h"
++#include "hotkey.h"
++
++
++IMG_UINT32 DBGDIOCDrivCreateStream(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ PDBG_IN_CREATESTREAM psIn;
++ IMG_VOID * *ppvOut;
++ #ifdef LINUX
++ static IMG_CHAR name[32];
++ #endif
++
++ psIn = (PDBG_IN_CREATESTREAM) pvInBuffer;
++ ppvOut = (IMG_VOID * *) pvOutBuffer;
++
++ #ifdef LINUX
++
++ if(copy_from_user(name, psIn->pszName, 32) != 0)
++ {
++ return IMG_FALSE;
++ }
++
++ *ppvOut = ExtDBGDrivCreateStream(name, psIn->ui32CapMode, psIn->ui32OutMode, 0, psIn->ui32Pages);
++
++ #else
++ *ppvOut = ExtDBGDrivCreateStream(psIn->pszName, psIn->ui32CapMode, psIn->ui32OutMode, DEBUG_FLAGS_NO_BUF_EXPANDSION, psIn->ui32Pages);
++ #endif
++
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivDestroyStream(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ IMG_UINT32 * pStream;
++ PDBG_STREAM psStream;
++
++ pStream = (IMG_UINT32 *) pvInBuffer;
++ psStream = (PDBG_STREAM) *pStream;
++
++ PVR_UNREFERENCED_PARAMETER( pvOutBuffer);
++
++ ExtDBGDrivDestroyStream(psStream);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivGetStream(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ PDBG_IN_FINDSTREAM psParams;
++ IMG_UINT32 * pui32Stream;
++
++ psParams = (PDBG_IN_FINDSTREAM)pvInBuffer;
++ pui32Stream = (IMG_UINT32 *)pvOutBuffer;
++
++ *pui32Stream = (IMG_UINT32)ExtDBGDrivFindStream(psParams->pszName, psParams->bResetStream);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivWriteString(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ PDBG_IN_WRITESTRING psParams;
++ IMG_UINT32 * pui32OutLen;
++
++ psParams = (PDBG_IN_WRITESTRING) pvInBuffer;
++ pui32OutLen = (IMG_UINT32 *) pvOutBuffer;
++
++ *pui32OutLen = ExtDBGDrivWriteString((PDBG_STREAM) psParams->pvStream,psParams->pszString,psParams->ui32Level);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivWriteStringCM(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ PDBG_IN_WRITESTRING psParams;
++ IMG_UINT32 * pui32OutLen;
++
++ psParams = (PDBG_IN_WRITESTRING) pvInBuffer;
++ pui32OutLen = (IMG_UINT32 *) pvOutBuffer;
++
++ *pui32OutLen = ExtDBGDrivWriteStringCM((PDBG_STREAM) psParams->pvStream,psParams->pszString,psParams->ui32Level);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivReadString(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ IMG_UINT32 * pui32OutLen;
++ PDBG_IN_READSTRING psParams;
++
++ psParams = (PDBG_IN_READSTRING) pvInBuffer;
++ pui32OutLen = (IMG_UINT32 *) pvOutBuffer;
++
++ *pui32OutLen = ExtDBGDrivReadString(psParams->pvStream,psParams->pszString,psParams->ui32StringLen);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivWrite(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ IMG_UINT32 * pui32BytesCopied;
++ PDBG_IN_WRITE psInParams;
++
++ psInParams = (PDBG_IN_WRITE) pvInBuffer;
++ pui32BytesCopied = (IMG_UINT32 *) pvOutBuffer;
++
++ *pui32BytesCopied = ExtDBGDrivWrite((PDBG_STREAM) psInParams->pvStream,psInParams->pui8InBuffer,psInParams->ui32TransferSize,psInParams->ui32Level);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivWrite2(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ IMG_UINT32 * pui32BytesCopied;
++ PDBG_IN_WRITE psInParams;
++
++ psInParams = (PDBG_IN_WRITE) pvInBuffer;
++ pui32BytesCopied = (IMG_UINT32 *) pvOutBuffer;
++
++ *pui32BytesCopied = ExtDBGDrivWrite2((PDBG_STREAM) psInParams->pvStream,psInParams->pui8InBuffer,psInParams->ui32TransferSize,psInParams->ui32Level);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivWriteCM(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ IMG_UINT32 * pui32BytesCopied;
++ PDBG_IN_WRITE psInParams;
++
++ psInParams = (PDBG_IN_WRITE) pvInBuffer;
++ pui32BytesCopied = (IMG_UINT32 *) pvOutBuffer;
++
++ *pui32BytesCopied = ExtDBGDrivWriteCM((PDBG_STREAM) psInParams->pvStream,psInParams->pui8InBuffer,psInParams->ui32TransferSize,psInParams->ui32Level);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivRead(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ IMG_UINT32 * pui32BytesCopied;
++ PDBG_IN_READ psInParams;
++
++ psInParams = (PDBG_IN_READ) pvInBuffer;
++ pui32BytesCopied = (IMG_UINT32 *) pvOutBuffer;
++
++ *pui32BytesCopied = ExtDBGDrivRead((PDBG_STREAM) psInParams->pvStream,psInParams->bReadInitBuffer, psInParams->ui32OutBufferSize,psInParams->pui8OutBuffer);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivSetCaptureMode(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ PDBG_IN_SETDEBUGMODE psParams;
++
++ psParams = (PDBG_IN_SETDEBUGMODE) pvInBuffer;
++ PVR_UNREFERENCED_PARAMETER(pvOutBuffer);
++
++ ExtDBGDrivSetCaptureMode((PDBG_STREAM) psParams->pvStream,
++ psParams->ui32Mode,
++ psParams->ui32Start,
++ psParams->ui32End,
++ psParams->ui32SampleRate);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivSetOutMode(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ PDBG_IN_SETDEBUGOUTMODE psParams;
++
++ psParams = (PDBG_IN_SETDEBUGOUTMODE) pvInBuffer;
++ PVR_UNREFERENCED_PARAMETER(pvOutBuffer);
++
++ ExtDBGDrivSetOutputMode((PDBG_STREAM) psParams->pvStream,psParams->ui32Mode);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivSetDebugLevel(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ PDBG_IN_SETDEBUGLEVEL psParams;
++
++ psParams = (PDBG_IN_SETDEBUGLEVEL) pvInBuffer;
++ PVR_UNREFERENCED_PARAMETER(pvOutBuffer);
++
++ ExtDBGDrivSetDebugLevel((PDBG_STREAM) psParams->pvStream,psParams->ui32Level);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivSetFrame(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ PDBG_IN_SETFRAME psParams;
++
++ psParams = (PDBG_IN_SETFRAME) pvInBuffer;
++ PVR_UNREFERENCED_PARAMETER(pvOutBuffer);
++
++ ExtDBGDrivSetFrame((PDBG_STREAM) psParams->pvStream,psParams->ui32Frame);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivGetFrame(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ IMG_UINT32 * pStream;
++ PDBG_STREAM psStream;
++ IMG_UINT32 * pui32Current;
++
++ pStream = (IMG_UINT32 *) pvInBuffer;
++ psStream = (PDBG_STREAM) *pStream;
++ pui32Current = (IMG_UINT32 *) pvOutBuffer;
++
++ *pui32Current = ExtDBGDrivGetFrame(psStream);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivIsCaptureFrame(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ PDBG_IN_ISCAPTUREFRAME psParams;
++ IMG_UINT32 * pui32Current;
++
++ psParams = (PDBG_IN_ISCAPTUREFRAME) pvInBuffer;
++ pui32Current = (IMG_UINT32 *) pvOutBuffer;
++
++ *pui32Current = ExtDBGDrivIsCaptureFrame((PDBG_STREAM) psParams->pvStream, psParams->bCheckPreviousFrame);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivOverrideMode(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ PDBG_IN_OVERRIDEMODE psParams;
++
++ psParams = (PDBG_IN_OVERRIDEMODE) pvInBuffer;
++ PVR_UNREFERENCED_PARAMETER( pvOutBuffer);
++
++ ExtDBGDrivOverrideMode((PDBG_STREAM) psParams->pvStream,psParams->ui32Mode);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivDefaultMode(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ IMG_UINT32 * pStream;
++ PDBG_STREAM psStream;
++
++ pStream = (IMG_UINT32 *) pvInBuffer;
++ psStream = (PDBG_STREAM) *pStream;
++
++ PVR_UNREFERENCED_PARAMETER(pvOutBuffer);
++
++ ExtDBGDrivDefaultMode(psStream);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivSetMarker(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ PDBG_IN_SETMARKER psParams;
++
++ psParams = (PDBG_IN_SETMARKER) pvInBuffer;
++ PVR_UNREFERENCED_PARAMETER(pvOutBuffer);
++
++ ExtDBGDrivSetMarker((PDBG_STREAM) psParams->pvStream, psParams->ui32Marker);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivGetMarker(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ IMG_UINT32 * pStream;
++ PDBG_STREAM psStream;
++ IMG_UINT32 * pui32Current;
++
++ pStream = (IMG_UINT32 *) pvInBuffer;
++ psStream = (PDBG_STREAM) *pStream;
++ pui32Current = (IMG_UINT32 *) pvOutBuffer;
++
++ *pui32Current = ExtDBGDrivGetMarker(psStream);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivGetServiceTable(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ IMG_UINT32 * pui32Out;
++
++ PVR_UNREFERENCED_PARAMETER(pvInBuffer);
++ pui32Out = (IMG_UINT32 *) pvOutBuffer;
++
++ *pui32Out = DBGDrivGetServiceTable();
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivWriteLF(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ PDBG_IN_WRITE_LF psInParams;
++ IMG_UINT32 * pui32BytesCopied;
++
++ psInParams = (PDBG_IN_WRITE_LF) pvInBuffer;
++ pui32BytesCopied = (IMG_UINT32 *) pvOutBuffer;
++
++ *pui32BytesCopied = ExtDBGDrivWriteLF(psInParams->pvStream,
++ psInParams->pui8InBuffer,
++ psInParams->ui32BufferSize,
++ psInParams->ui32Level,
++ psInParams->ui32Flags);
++
++ return IMG_TRUE;
++}
++
++IMG_UINT32 DBGDIOCDrivReadLF(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ IMG_UINT32 * pui32BytesCopied;
++ PDBG_IN_READ psInParams;
++
++ psInParams = (PDBG_IN_READ) pvInBuffer;
++ pui32BytesCopied = (IMG_UINT32 *) pvOutBuffer;
++
++ *pui32BytesCopied = ExtDBGDrivReadLF((PDBG_STREAM) psInParams->pvStream,psInParams->ui32OutBufferSize,psInParams->pui8OutBuffer);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivWaitForEvent(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ DBG_EVENT eEvent = (DBG_EVENT)(*(IMG_UINT32 *)pvInBuffer);
++
++ PVR_UNREFERENCED_PARAMETER(pvOutBuffer);
++
++ ExtDBGDrivWaitForEvent(eEvent);
++
++ return(IMG_TRUE);
++}
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/tools/intern/debug/dbgdriv/common/ioctl.h
+@@ -0,0 +1,87 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _IOCTL_
++#define _IOCTL_
++
++
++IMG_UINT32 DBGDIOCDrivCreateStream(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivDestroyStream(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivGetStream(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivWriteString(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivReadString(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivWrite(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivWrite2(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivRead(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivSetCaptureMode(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivSetOutMode(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivSetDebugLevel(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivSetFrame(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivGetFrame(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivOverrideMode(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivDefaultMode(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivGetServiceTable(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivWriteStringCM(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivWriteCM(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivSetMarker(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivGetMarker(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivIsCaptureFrame(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivWriteLF(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivReadLF(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivWaitForEvent(IMG_VOID*, IMG_VOID *);
++
++IMG_UINT32 (*g_DBGDrivProc[])(IMG_VOID *, IMG_VOID *) =
++{
++ DBGDIOCDrivCreateStream,
++ DBGDIOCDrivDestroyStream,
++ DBGDIOCDrivGetStream,
++ DBGDIOCDrivWriteString,
++ DBGDIOCDrivReadString,
++ DBGDIOCDrivWrite,
++ DBGDIOCDrivRead,
++ DBGDIOCDrivSetCaptureMode,
++ DBGDIOCDrivSetOutMode,
++ DBGDIOCDrivSetDebugLevel,
++ DBGDIOCDrivSetFrame,
++ DBGDIOCDrivGetFrame,
++ DBGDIOCDrivOverrideMode,
++ DBGDIOCDrivDefaultMode,
++ DBGDIOCDrivGetServiceTable,
++ DBGDIOCDrivWrite2,
++ DBGDIOCDrivWriteStringCM,
++ DBGDIOCDrivWriteCM,
++ DBGDIOCDrivSetMarker,
++ DBGDIOCDrivGetMarker,
++ DBGDIOCDrivIsCaptureFrame,
++ DBGDIOCDrivWriteLF,
++ DBGDIOCDrivReadLF,
++ DBGDIOCDrivWaitForEvent
++};
++
++#define MAX_DBGVXD_W32_API (sizeof(g_DBGDrivProc)/sizeof(IMG_UINT32))
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/tools/intern/debug/dbgdriv/linux/hostfunc.c
+@@ -0,0 +1,302 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <linux/version.h>
++#include <linux/errno.h>
++#include <linux/module.h>
++#include <linux/fs.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/string.h>
++#include <asm/page.h>
++#include <linux/vmalloc.h>
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
++#include <linux/mutex.h>
++#else
++#include <asm/semaphore.h>
++#endif
++#include <linux/hardirq.h>
++
++#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
++#include <linux/sched.h>
++#include <linux/wait.h>
++#include <linux/jiffies.h>
++#include <linux/delay.h>
++#endif
++
++#include "img_types.h"
++#include "pvr_debug.h"
++
++#include "dbgdrvif.h"
++#include "dbgdriv/common/hostfunc.h"
++
++#if !defined(SUPPORT_DRI_DRM)
++IMG_UINT32 gPVRDebugLevel = DBGPRIV_WARNING;
++
++#define PVR_STRING_TERMINATOR '\0'
++#define PVR_IS_FILE_SEPARATOR(character) ( ((character) == '\\') || ((character) == '/') )
++
++void PVRSRVDebugPrintf (
++ IMG_UINT32 ui32DebugLevel,
++ const IMG_CHAR* pszFileName,
++ IMG_UINT32 ui32Line,
++ const IMG_CHAR* pszFormat,
++ ...
++ )
++{
++ IMG_BOOL bTrace, bDebug;
++#if !defined(__sh__)
++ IMG_CHAR *pszLeafName;
++
++ pszLeafName = (char *)strrchr (pszFileName, '\\');
++
++ if (pszLeafName)
++ {
++ pszFileName = pszLeafName;
++ }
++#endif
++
++ bTrace = gPVRDebugLevel & ui32DebugLevel & DBGPRIV_CALLTRACE;
++ bDebug = ((gPVRDebugLevel & DBGPRIV_ALLLEVELS) >= ui32DebugLevel);
++
++ if (bTrace || bDebug)
++ {
++ va_list vaArgs;
++ static char szBuffer[256];
++
++ va_start (vaArgs, pszFormat);
++
++
++ if (bDebug)
++ {
++ switch(ui32DebugLevel)
++ {
++ case DBGPRIV_FATAL:
++ {
++ strncpy (szBuffer, "PVR_K:(Fatal): ", sizeof(szBuffer));
++ break;
++ }
++ case DBGPRIV_ERROR:
++ {
++ strncpy (szBuffer, "PVR_K:(Error): ", sizeof(szBuffer));
++ break;
++ }
++ case DBGPRIV_WARNING:
++ {
++ strncpy (szBuffer, "PVR_K:(Warning): ", sizeof(szBuffer));
++ break;
++ }
++ case DBGPRIV_MESSAGE:
++ {
++ strncpy (szBuffer, "PVR_K:(Message): ", sizeof(szBuffer));
++ break;
++ }
++ case DBGPRIV_VERBOSE:
++ {
++ strncpy (szBuffer, "PVR_K:(Verbose): ", sizeof(szBuffer));
++ break;
++ }
++ default:
++ {
++ strncpy (szBuffer, "PVR_K:(Unknown message level)", sizeof(szBuffer));
++ break;
++ }
++ }
++ }
++ else
++ {
++ strncpy (szBuffer, "PVR_K: ", sizeof(szBuffer));
++ }
++
++ vsnprintf (&szBuffer[strlen(szBuffer)], sizeof(szBuffer), pszFormat, vaArgs);
++
++
++
++ if (!bTrace)
++ {
++ snprintf (&szBuffer[strlen(szBuffer)], sizeof(szBuffer), " [%d, %s]", (int)ui32Line, pszFileName);
++ }
++
++ printk(KERN_INFO "%s\r\n", szBuffer);
++
++ va_end (vaArgs);
++ }
++}
++#endif
++
++IMG_VOID HostMemSet(IMG_VOID *pvDest, IMG_UINT8 ui8Value, IMG_UINT32 ui32Size)
++{
++ memset(pvDest, (int) ui8Value, (size_t) ui32Size);
++}
++
++IMG_VOID HostMemCopy(IMG_VOID *pvDst, IMG_VOID *pvSrc, IMG_UINT32 ui32Size)
++{
++#if defined(USE_UNOPTIMISED_MEMCPY)
++ unsigned char *src,*dst;
++ int i;
++
++ src=(unsigned char *)pvSrc;
++ dst=(unsigned char *)pvDst;
++ for(i=0;i<ui32Size;i++)
++ {
++ dst[i]=src[i];
++ }
++#else
++ memcpy(pvDst, pvSrc, ui32Size);
++#endif
++}
++
++IMG_UINT32 HostReadRegistryDWORDFromString(char *pcKey, char *pcValueName, IMG_UINT32 *pui32Data)
++{
++
++ return 0;
++}
++
++IMG_VOID * HostPageablePageAlloc(IMG_UINT32 ui32Pages)
++{
++ return (void*)vmalloc(ui32Pages * PAGE_SIZE);
++}
++
++IMG_VOID HostPageablePageFree(IMG_VOID * pvBase)
++{
++ vfree(pvBase);
++}
++
++IMG_VOID * HostNonPageablePageAlloc(IMG_UINT32 ui32Pages)
++{
++ return (void*)vmalloc(ui32Pages * PAGE_SIZE);
++}
++
++IMG_VOID HostNonPageablePageFree(IMG_VOID * pvBase)
++{
++ vfree(pvBase);
++}
++
++IMG_VOID * HostMapKrnBufIntoUser(IMG_VOID * pvKrnAddr, IMG_UINT32 ui32Size, IMG_VOID **ppvMdl)
++{
++
++ return IMG_NULL;
++}
++
++IMG_VOID HostUnMapKrnBufFromUser(IMG_VOID * pvUserAddr, IMG_VOID * pvMdl, IMG_VOID * pvProcess)
++{
++
++}
++
++IMG_VOID HostCreateRegDeclStreams(IMG_VOID)
++{
++
++}
++
++IMG_VOID * HostCreateMutex(IMG_VOID)
++{
++ struct semaphore *psSem;
++
++ psSem = kmalloc(sizeof(*psSem), GFP_KERNEL);
++ if (psSem)
++ {
++ init_MUTEX(psSem);
++ }
++
++ return psSem;
++}
++
++IMG_VOID HostAquireMutex(IMG_VOID * pvMutex)
++{
++ BUG_ON(in_interrupt());
++
++#if defined(PVR_DEBUG_DBGDRV_DETECT_HOST_MUTEX_COLLISIONS)
++ if (down_trylock((struct semaphore *)pvMutex))
++ {
++ printk(KERN_INFO "HostAquireMutex: Waiting for mutex\n");
++ down((struct semaphore *)pvMutex);
++ }
++#else
++ down((struct semaphore *)pvMutex);
++#endif
++}
++
++IMG_VOID HostReleaseMutex(IMG_VOID * pvMutex)
++{
++ up((struct semaphore *)pvMutex);
++}
++
++IMG_VOID HostDestroyMutex(IMG_VOID * pvMutex)
++{
++ if (pvMutex)
++ {
++ kfree(pvMutex);
++ }
++}
++
++#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
++
++#define EVENT_WAIT_TIMEOUT_MS 500
++#define EVENT_WAIT_TIMEOUT_JIFFIES (EVENT_WAIT_TIMEOUT_MS * HZ / 1000)
++
++static int iStreamData;
++static wait_queue_head_t sStreamDataEvent;
++
++IMG_INT32 HostCreateEventObjects(IMG_VOID)
++{
++ init_waitqueue_head(&sStreamDataEvent);
++
++ return 0;
++}
++
++IMG_VOID HostWaitForEvent(DBG_EVENT eEvent)
++{
++ switch(eEvent)
++ {
++ case DBG_EVENT_STREAM_DATA:
++
++ wait_event_interruptible_timeout(sStreamDataEvent, iStreamData != 0, EVENT_WAIT_TIMEOUT_JIFFIES);
++ iStreamData = 0;
++ break;
++ default:
++
++ msleep_interruptible(EVENT_WAIT_TIMEOUT_MS);
++ break;
++ }
++}
++
++IMG_VOID HostSignalEvent(DBG_EVENT eEvent)
++{
++ switch(eEvent)
++ {
++ case DBG_EVENT_STREAM_DATA:
++ iStreamData = 1;
++ wake_up_interruptible(&sStreamDataEvent);
++ break;
++ default:
++ break;
++ }
++}
++
++IMG_VOID HostDestroyEventObjects(IMG_VOID)
++{
++}
++#endif
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/tools/intern/debug/dbgdriv/linux/kbuild/Makefile
+@@ -0,0 +1,35 @@
++#
++# Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++#
++# This program is free software; you can redistribute it and/or modify it
++# under the terms and conditions of the GNU General Public License,
++# version 2, as published by the Free Software Foundation.
++#
++# This program is distributed in the hope it will be useful but, except
++# as otherwise stated in writing, without any warranty; without even the
++# implied warranty of merchantability or fitness for a particular purpose.
++# See the GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License along with
++# this program; if not, write to the Free Software Foundation, Inc.,
++# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++#
++# The full GNU General Public License is included in this distribution in
++# the file called "COPYING".
++#
++# Contact Information:
++# Imagination Technologies Ltd. <gpl-support@imgtec.com>
++# Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++#
++#
++#
++
++include $(EURASIAROOT)/eurasiacon/build/linux/kbuild/Makefile.kbuild_subdir_common
++
++MODULE = dbgdrv
++
++INCLUDES =
++
++SOURCES =
++
++include $(EURASIAROOT)/tools/intern/debug/dbgdriv/linux/makefile.linux.common
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/tools/intern/debug/dbgdriv/linux/main.c
+@@ -0,0 +1,298 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <linux/errno.h>
++#include <linux/module.h>
++#include <linux/fs.h>
++#include <linux/kernel.h>
++#include <linux/kdev_t.h>
++#include <linux/pci.h>
++#include <linux/list.h>
++#include <linux/init.h>
++#include <linux/vmalloc.h>
++#include <linux/version.h>
++
++#if defined(LDM_PLATFORM) && !defined(SUPPORT_DRI_DRM)
++#include <linux/platform_device.h>
++#endif
++
++#if defined(LDM_PCI) && !defined(SUPPORT_DRI_DRM)
++#include <linux/pci.h>
++#endif
++
++#include <asm/uaccess.h>
++
++#if defined(SUPPORT_DRI_DRM)
++#include "drmP.h"
++#include "drm.h"
++#endif
++
++#include "img_types.h"
++#include "client/linuxsrv.h"
++#include "dbgdriv/common/ioctl.h"
++#include "dbgdrvif.h"
++#include "dbgdriv/common/dbgdriv.h"
++#include "dbgdriv/common/hostfunc.h"
++#include "pvr_debug.h"
++#include "pvrmodule.h"
++
++#if defined(SUPPORT_DRI_DRM)
++
++#include "pvr_drm_shared.h"
++#include "pvr_drm.h"
++
++#else
++
++#define DRVNAME "dbgdrv"
++MODULE_SUPPORTED_DEVICE(DRVNAME);
++
++#if (defined(LDM_PLATFORM) || defined(LDM_PCI)) && !defined(SUPPORT_DRI_DRM)
++static struct class *psDbgDrvClass;
++#endif
++
++static int AssignedMajorNumber = 0;
++
++long dbgdrv_ioctl(struct file *, unsigned int, unsigned long);
++
++static int dbgdrv_open(struct inode unref__ * pInode, struct file unref__ * pFile)
++{
++ return 0;
++}
++
++static int dbgdrv_release(struct inode unref__ * pInode, struct file unref__ * pFile)
++{
++ return 0;
++}
++
++static int dbgdrv_mmap(struct file* pFile, struct vm_area_struct* ps_vma)
++{
++ return 0;
++}
++
++static struct file_operations dbgdrv_fops = {
++ .owner = THIS_MODULE,
++ .unlocked_ioctl = dbgdrv_ioctl,
++ .open = dbgdrv_open,
++ .release = dbgdrv_release,
++ .mmap = dbgdrv_mmap,
++};
++
++#endif
++
++void DBGDrvGetServiceTable(void **fn_table)
++{
++ extern DBGKM_SERVICE_TABLE g_sDBGKMServices;
++
++ *fn_table = &g_sDBGKMServices;
++}
++
++#if defined(SUPPORT_DRI_DRM)
++void dbgdrv_cleanup(void)
++#else
++void cleanup_module(void)
++#endif
++{
++#if !defined(SUPPORT_DRI_DRM)
++#if defined(LDM_PLATFORM) || defined(LDM_PCI)
++ device_destroy(psDbgDrvClass, MKDEV(AssignedMajorNumber, 0));
++ class_destroy(psDbgDrvClass);
++#endif
++ unregister_chrdev(AssignedMajorNumber, DRVNAME);
++#endif
++#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
++ HostDestroyEventObjects();
++#endif
++ HostDestroyMutex(g_pvAPIMutex);
++ return;
++}
++
++#if defined(SUPPORT_DRI_DRM)
++IMG_INT dbgdrv_init(void)
++#else
++int init_module(void)
++#endif
++{
++#if (defined(LDM_PLATFORM) || defined(LDM_PCI)) && !defined(SUPPORT_DRI_DRM)
++ struct device *psDev;
++#endif
++
++#if !defined(SUPPORT_DRI_DRM)
++ int err = -EBUSY;
++#endif
++
++
++ if ((g_pvAPIMutex=HostCreateMutex()) == IMG_NULL)
++ {
++ return -ENOMEM;
++ }
++
++#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
++
++ (void) HostCreateEventObjects();
++#endif
++
++#if !defined(SUPPORT_DRI_DRM)
++ AssignedMajorNumber =
++ register_chrdev(AssignedMajorNumber, DRVNAME, &dbgdrv_fops);
++
++ if (AssignedMajorNumber <= 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR," unable to get major\n"));
++ goto ErrDestroyEventObjects;
++ }
++
++#if defined(LDM_PLATFORM) || defined(LDM_PCI)
++
++ psDbgDrvClass = class_create(THIS_MODULE, DRVNAME);
++ if (IS_ERR(psDbgDrvClass))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: unable to create class (%ld)",
++ __func__, PTR_ERR(psDbgDrvClass)));
++ goto ErrUnregisterCharDev;
++ }
++
++ psDev = device_create(psDbgDrvClass, NULL, MKDEV(AssignedMajorNumber, 0),
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26))
++ NULL,
++#endif
++ DRVNAME);
++ if (IS_ERR(psDev))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: unable to create device (%ld)",
++ __func__, PTR_ERR(psDev)));
++ goto ErrDestroyClass;
++ }
++#endif
++#endif
++
++ return 0;
++
++#if !defined(SUPPORT_DRI_DRM)
++ErrDestroyEventObjects:
++#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
++ HostDestroyEventObjects();
++#endif
++#if defined(LDM_PLATFORM) || defined(LDM_PCI)
++ErrUnregisterCharDev:
++ unregister_chrdev(AssignedMajorNumber, DRVNAME);
++ErrDestroyClass:
++ class_destroy(psDbgDrvClass);
++#endif
++ return err;
++#endif
++}
++
++#if defined(SUPPORT_DRI_DRM)
++IMG_INT dbgdrv_ioctl(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile)
++#else
++long dbgdrv_ioctl(struct file *file, unsigned int ioctlCmd, unsigned long arg)
++#endif
++{
++ IOCTL_PACKAGE *pIP = (IOCTL_PACKAGE *) arg;
++ char *buffer, *in, *out;
++ unsigned int cmd;
++
++ if((pIP->ui32InBufferSize > (PAGE_SIZE >> 1) ) || (pIP->ui32OutBufferSize > (PAGE_SIZE >> 1)))
++ {
++ PVR_DPF((PVR_DBG_ERROR,"Sizes of the buffers are too large, cannot do ioctl\n"));
++ return -1;
++ }
++
++ buffer = (char *) HostPageablePageAlloc(1);
++ if(!buffer)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"Failed to allocate buffer, cannot do ioctl\n"));
++ return -EFAULT;
++ }
++
++ in = buffer;
++ out = buffer + (PAGE_SIZE >>1);
++
++ if(copy_from_user(in, pIP->pInBuffer, pIP->ui32InBufferSize) != 0)
++ {
++ goto init_failed;
++ }
++
++ cmd = ((pIP->ui32Cmd >> 2) & 0xFFF) - 0x801;
++
++ if(pIP->ui32Cmd == DEBUG_SERVICE_READ)
++ {
++ IMG_CHAR *ui8Tmp;
++ IMG_UINT32 *pui32BytesCopied = (IMG_UINT32 *)out;
++ DBG_IN_READ *psReadInParams = (DBG_IN_READ *)in;
++
++ ui8Tmp = vmalloc(psReadInParams->ui32OutBufferSize);
++
++ if(!ui8Tmp)
++ {
++ goto init_failed;
++ }
++
++ *pui32BytesCopied = ExtDBGDrivRead((DBG_STREAM *)psReadInParams->pvStream,
++ psReadInParams->bReadInitBuffer,
++ psReadInParams->ui32OutBufferSize,
++ ui8Tmp);
++
++ if(copy_to_user(psReadInParams->pui8OutBuffer,
++ ui8Tmp,
++ *pui32BytesCopied) != 0)
++ {
++ vfree(ui8Tmp);
++ goto init_failed;
++ }
++
++ vfree(ui8Tmp);
++ }
++ else
++ {
++ (g_DBGDrivProc[cmd])(in, out);
++ }
++
++ if(copy_to_user(pIP->pOutBuffer, out, pIP->ui32OutBufferSize) != 0)
++ {
++ goto init_failed;
++ }
++
++ HostPageablePageFree((IMG_VOID *)buffer);
++ return 0;
++
++init_failed:
++ HostPageablePageFree((IMG_VOID *)buffer);
++ return -EFAULT;
++}
++
++
++void RemoveHotKey(unsigned hHotKey)
++{
++
++}
++
++void DefineHotKey(unsigned ScanCode, unsigned ShiftState, void *pInfo)
++{
++
++}
++
++/*EXPORT_SYMBOL(DBGDrvGetServiceTable); */
+--- /dev/null
++++ b/drivers/staging/mrst/pvr/tools/intern/debug/dbgdriv/linux/makefile.linux.common
+@@ -0,0 +1,40 @@
++#
++# Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++#
++# This program is free software; you can redistribute it and/or modify it
++# under the terms and conditions of the GNU General Public License,
++# version 2, as published by the Free Software Foundation.
++#
++# This program is distributed in the hope it will be useful but, except
++# as otherwise stated in writing, without any warranty; without even the
++# implied warranty of merchantability or fitness for a particular purpose.
++# See the GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License along with
++# this program; if not, write to the Free Software Foundation, Inc.,
++# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++#
++# The full GNU General Public License is included in this distribution in
++# the file called "COPYING".
++#
++# Contact Information:
++# Imagination Technologies Ltd. <gpl-support@imgtec.com>
++# Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++#
++#
++#
++
++ifeq ($(SUPPORT_DRI_DRM),1)
++DBGDRV_SOURCES_ROOT = $(KBUILDROOT)/../tools/intern/debug/dbgdriv
++else
++DBGDRV_SOURCES_ROOT = ../..
++endif
++
++INCLUDES += -I$(EURASIAROOT)/include4 \
++ -I$(EURASIAROOT)/tools/intern/debug
++
++SOURCES += $(DBGDRV_SOURCES_ROOT)/linux/main.c \
++ $(DBGDRV_SOURCES_ROOT)/common/dbgdriv.c \
++ $(DBGDRV_SOURCES_ROOT)/common/ioctl.c \
++ $(DBGDRV_SOURCES_ROOT)/linux/hostfunc.c \
++ $(DBGDRV_SOURCES_ROOT)/common/hotkey.c
+--- /dev/null
++++ b/drivers/staging/mrstci/Kconfig
+@@ -0,0 +1,28 @@
++menuconfig VIDEO_MRSTCI
++ bool "Moorestown Langwell Camera Imaging Subsystem support"
++ depends on VIDEO_V4L2 && I2C
++ default y
++
++ ---help---
++ Say Y here to enable selecting the Intel Moorestown Langwell Camera Imaging Subsystem for webcams.
++
++if VIDEO_MRSTCI && VIDEO_V4L2
++
++source "drivers/staging/mrstci/mrstisp/Kconfig"
++
++source "drivers/staging/mrstci/mrstov5630/Kconfig"
++source "drivers/staging/mrstci/mrstov5630_motor/Kconfig"
++
++source "drivers/staging/mrstci/mrsts5k4e1/Kconfig"
++source "drivers/staging/mrstci/mrsts5k4e1_motor/Kconfig"
++
++source "drivers/staging/mrstci/mrstflash/Kconfig"
++
++source "drivers/staging/mrstci/mrstov2650/Kconfig"
++
++source "drivers/staging/mrstci/mrstov9665/Kconfig"
++
++source "drivers/staging/mrstci/mrstmt9d113/Kconfig"
++
++endif # VIDEO_MRSTCI
++
+--- /dev/null
++++ b/drivers/staging/mrstci/Makefile
+@@ -0,0 +1,9 @@
++obj-$(CONFIG_VIDEO_MRST_OV2650) += mrstov2650/
++obj-$(CONFIG_VIDEO_MRST_OV9665) += mrstov9665/
++obj-$(CONFIG_VIDEO_MRST_OV5630) += mrstov5630/
++obj-$(CONFIG_VIDEO_MRST_OV5630_MOTOR) += mrstov5630_motor/
++obj-$(CONFIG_VIDEO_MRST_S5K4E1) += mrsts5k4e1/
++obj-$(CONFIG_VIDEO_MRST_S5K4E1_MOTOR) += mrsts5k4e1_motor/
++obj-$(CONFIG_VIDEO_MRST_FLASH) += mrstflash/
++obj-$(CONFIG_VIDEO_MRST_MT9D113) += mrstmt9d113/
++obj-$(CONFIG_VIDEO_MRST_ISP) += mrstisp/
+--- /dev/null
++++ b/drivers/staging/mrstci/include/ci_isp_common.h
+@@ -0,0 +1,1416 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * Copyright (c) Silicon Image 2008 www.siliconimage.com
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#ifndef _CI_ISP_COMMON_H
++#define _CI_ISP_COMMON_H
++
++extern u32 rsz_scaler_bypass;
++extern u32 rsz_upscaler_enable;
++#include "v4l2_jpg_review.h"
++#define CHIP_ID_MARVIN_5_V4_R20 0x20453010
++#define CHIP_ID_MARVIN_5_V4_R11 0x10453010
++#define CHIP_ID_MARVIN_12_V1_R21 0x30153017
++
++/*
++ * MARVIN VI ID defines -> changed to MARVIN_FEATURE_CHIP_ID and moved to
++ * the other chip features in project_settings.h
++ * JPEG compression ratio defines
++ */
++
++#define CI_ISP_JPEG_HIGH_COMPRESSION 1
++#define CI_ISP_JPEG_LOW_COMPRESSION 2
++/* Low Compression / High Quality */
++#define CI_ISP_JPEG_01_PERCENT 3
++#define CI_ISP_JPEG_20_PERCENT 4
++#define CI_ISP_JPEG_30_PERCENT 5
++#define CI_ISP_JPEG_40_PERCENT 6
++/* Mid Compression / Mid Quality */
++#define CI_ISP_JPEG_50_PERCENT 7
++#define CI_ISP_JPEG_60_PERCENT 8
++#define CI_ISP_JPEG_70_PERCENT 9
++#define CI_ISP_JPEG_80_PERCENT 10
++#define CI_ISP_JPEG_90_PERCENT 11
++/* High Compression / Low Quality */
++#define CI_ISP_JPEG_99_PERCENT 12
++
++/* Size of lens shading data table in 16 Bit words */
++#define CI_ISP_DATA_TBL_SIZE 289
++/* Size of lens shading grad table in 16 Bit words */
++#define CI_ISP_GRAD_TBL_SIZE 8
++/* Number of lens shading sectors in x or y direction */
++#define CI_ISP_MAX_LSC_SECTORS 16
++
++/*
++ * Value representing 1.0 for fixed-point values
++ * used by marvin drivers
++ */
++#define CI_ISP_FIXEDPOINT_ONE (0x1000)
++/* JPEG encoding */
++
++enum ci_isp_jpe_enc_mode {
++ /* motion JPEG with header generation */
++ CI_ISP_JPE_LARGE_CONT_MODE = 0x04,
++ /* motion JPEG only first frame with header */
++ CI_ISP_JPE_SHORT_CONT_MODE = 0x02,
++ /* JPEG with single snapshot */
++ CI_ISP_JPE_SINGLE_SHOT = 0x01
++};
++
++/* for demosaic mode */
++enum ci_isp_demosaic_mode {
++ CI_ISP_DEMOSAIC_STANDARD,
++ CI_ISP_DEMOSAIC_ENHANCED
++};
++
++struct ci_isp_window{
++ unsigned short hoffs;
++ unsigned short voffs;
++ unsigned short hsize;
++ unsigned short vsize;
++};
++
++/* scale settings for both self and main resize unit */
++struct ci_isp_scale {
++ u32 scale_hy;
++ u32 scale_hcb;
++ u32 scale_hcr;
++ u32 scale_vy;
++ u32 scale_vc;
++ u16 phase_hy;
++ u16 phase_hc;
++ u16 phase_vy;
++ u16 phase_vc;
++};
++
++/* A Lookup table for the upscale parameter in the self and main scaler */
++struct ci_isp_rsz_lut{
++ u8 rsz_lut[64];
++};
++
++/* flag to set in scalefactor values to enable upscaling */
++#define RSZ_UPSCALE_ENABLE (rsz_upscaler_enable)
++
++
++/*
++ * Flag to set in scalefactor values to bypass the scaler block.
++ * Since this define is also used in calculations of scale factors and
++ * coordinates, it needs to reflect the scale factor precision. In other
++ * words:
++ * RSZ_SCALER_BYPASS = max. scalefactor value> + 1
++ */
++#define RSZ_SCALER_BYPASS (rsz_scaler_bypass)
++
++#define RSZ_FLAGS_MASK (RSZ_UPSCALE_ENABLE | RSZ_SCALER_BYPASS)
++
++/* color settings */
++struct ci_isp_color_settings {
++ u8 contrast;
++ u8 brightness;
++ u8 saturation;
++ u8 hue;
++ u32 flags;
++};
++
++/* color processing chrominance clipping range */
++#define CI_ISP_CPROC_C_OUT_RANGE 0x08
++/* color processing luminance input range (offset processing) */
++#define CI_ISP_CPROC_Y_IN_RANGE 0x04
++/* color processing luminance output clipping range */
++#define CI_ISP_CPROC_Y_OUT_RANGE 0x02
++/* color processing enable */
++#define CI_ISP_CPROC_ENABLE 0x01
++
++/* black level config */
++struct ci_isp_blc_config {
++ int bls_auto;
++ int henable;
++ int venable;
++ u16 hstart;
++ u16 hstop;
++ u16 vstart;
++ u16 vstop;
++ u8 blc_samples;
++ u8 ref_a;
++ u8 ref_b;
++ u8 ref_c;
++ u8 ref_d;
++};
++
++/* black level compensation mean values */
++struct ci_isp_blc_mean {
++ u8 mean_a;
++ u8 mean_b;
++ u8 mean_c;
++ u8 mean_d;
++};
++
++/* BLS window */
++struct ci_isp_bls_window {
++
++ /* En-/disable the measurement window. */
++ int enable_window;
++ /* Horizontal start address. */
++ u16 start_h;
++ /* Horizontal stop address. */
++ u16 stop_h;
++ /* Vertical start address. */
++ u16 start_v;
++ /* Vertical stop address. */
++ u16 stop_v;
++};
++
++/* BLS mean measured values */
++struct ci_isp_bls_measured {
++ /* Mean measured value for Bayer pattern position A. */
++ u16 meas_a;
++ /* Mean measured value for Bayer pattern position B. */
++ u16 meas_b;
++ /* Mean measured value for Bayer pattern position C. */
++ u16 meas_c;
++ /* Mean measured value for Bayer pattern position D. */
++ u16 meas_d;
++};
++
++/*
++ * BLS fixed subtraction values. The values will be subtracted from the sensor
++ * values. Therefore a negative value means addition instead of subtraction
++ */
++struct ci_isp_bls_subtraction {
++ /* Fixed (signed ) subtraction value for Bayer pattern position A. */
++ s16 fixed_a;
++ /* Fixed (signed ) subtraction value for Bayer pattern position B. */
++ s16 fixed_b;
++ /* Fixed (signed ) subtraction value for Bayer pattern position C. */
++ s16 fixed_c;
++ /* Fixed (signed ) subtraction value for Bayer pattern position D. */
++ s16 fixed_d;
++};
++
++/* BLS configuration */
++struct ci_isp_bls_config {
++ /*
++ * Automatic mode activated means that the measured values are
++ * subtracted. Otherwise the fixed subtraction values will be
++ * subtracted.
++ */
++ int enable_automatic;
++ /* En-/disable horizontal accumulation for mean black value. */
++ int disable_h;
++ /*
++ * BLS module versions 4 or higher imply that it is enabled.
++ * En-/disable vertical accumulation for mean black value.
++ */
++ int disable_v;
++ /* Measurement window 1. */
++ struct ci_isp_bls_window isp_bls_window1;
++ /* Measurement window 2. */
++ struct ci_isp_bls_window isp_bls_window2;
++
++ /*
++ * BLS module version 3 and lower do not support a second
++ * measurement window. Therefore the second window has to
++ * be disabled for these versions.
++ */
++
++ /*
++ * Set amount of measured pixels for each Bayer position (A, B,
++ * C and D) to 2^bls_samples.
++ */
++ u8 bls_samples;
++ /* Fixed subtraction values. */
++ struct ci_isp_bls_subtraction bls_subtraction;
++};
++
++/* white balancing modes for the marvin hardware */
++enum ci_isp_awb_mode {
++ CI_ISP_AWB_COMPLETELY_OFF = 0,
++ CI_ISP_AWB_AUTO,
++ CI_ISP_AWB_MAN_MEAS,
++ CI_ISP_AWB_MAN_NOMEAS,
++ CI_ISP_AWB_MAN_PUSH_AUTO,
++ CI_ISP_AWB_ONLY_MEAS
++};
++
++/* white balancing modes for the marvin hardware */
++enum ci_isp_awb_sub_mode {
++ CI_ISP_AWB_SUB_OFF = 0,
++ CI_ISP_AWB_MAN_DAYLIGHT,
++ CI_ISP_AWB_MAN_CLOUDY,
++ CI_ISP_AWB_MAN_SHADE,
++ CI_ISP_AWB_MAN_FLUORCNT,
++ CI_ISP_AWB_MAN_FLUORCNTH,
++ CI_ISP_AWB_MAN_TUNGSTEN,
++ CI_ISP_AWB_MAN_TWILIGHT,
++ CI_ISP_AWB_MAN_SUNSET,
++ CI_ISP_AWB_MAN_FLASH,
++ CI_ISP_AWB_MAN_CIE_D65,
++ CI_ISP_AWB_MAN_CIE_D75,
++ CI_ISP_AWB_MAN_CIE_F2,
++ CI_ISP_AWB_MAN_CIE_F11,
++ CI_ISP_AWB_MAN_CIE_F12,
++ CI_ISP_AWB_MAN_CIE_A,
++ CI_ISP_AWB_AUTO_ON
++};
++
++/*
++ * white balancing gains
++ * xiaolin, typedef ci_sensor_component_gain tsMrvWbGains;
++ * white balancing measurement configuration
++ */
++struct ci_isp_wb_meas_config {
++ /* white balance measurement window (in pixels) */
++ struct ci_isp_window awb_window;
++ /*
++ * only pixels values max_y contribute to WB measurement
++ * (set to 0 to disable this feature)
++ */
++ u8 max_y;
++ /* only pixels values > min_y contribute to WB measurement */
++ u8 ref_cr_MaxR;
++ u8 minY_MaxG;
++ u8 ref_cb_MaxB;
++ /*
++ * Chrominance sum maximum value, only consider pixels with Cb+Cr
++ * smaller than threshold for WB measurements
++ */
++ u8 max_csum;
++
++ /*
++ * Chrominance minimum value, only consider pixels with Cb/Cr each
++ * greater than threshold value for WB measurements
++ */
++ u8 min_c;
++ /*
++ * number of frames+1 used for mean value calculation (frames=0
++ * means 1 Frame)
++ */
++ u8 frames;
++ u8 meas_mode;
++};
++
++/* white balancing measurement configuration limits */
++struct ci_isp_wb_meas_conf_limit {
++ /* maximum value for MinY */
++ u8 min_y_max;
++ /* minimum value for MinY */
++ u8 min_y_min;
++ /* maximum value for MinC */
++ u8 min_c_max;
++ /* minimum value for MinC */
++ u8 min_c_min;
++ /* maximum value for MaxCSum */
++ u8 max_csum_max;
++ /* minimum value for MaxCSum */
++ u8 max_csum_min;
++ /* maximum value for white pixel percentage */
++ u8 white_percent_max;
++ /* minimum value for white pixel percentage */
++ u8 white_percent_min;
++ /*
++ * maximum number of not measured frames until the gain values
++ * will be set to their initial values
++ */
++ u8 error_counter;
++};
++
++/* white balancing HW automatic configuration */
++struct ci_isp_wb_auto_hw_config {
++ /* reference C values */
++ u8 ref_cr;
++ u8 ref_cb;
++ /* lock / unlock settings */
++ u8 unlock_dly;
++ u8 unlock_rng;
++ u8 lock_dly;
++ u8 lock_rng;
++ /* maximum gain step size */
++ u8 step;
++ /* gain limits */
++ u8 max_gain;
++ u8 min_gain;
++};
++
++/* white balancing configuration */
++struct ci_isp_wb_config {
++ /* mode of operation */
++ enum ci_isp_awb_mode mrv_wb_mode;
++ enum ci_isp_awb_sub_mode mrv_wb_sub_mode;
++ /* measurement configuration */
++ struct ci_isp_wb_meas_config mrv_wb_meas_conf;
++ /* HW automatic configuration */
++ struct ci_isp_wb_auto_hw_config mrv_wb_auto_hw_conf;
++ /*
++ * gain values
++ * xiaolin, tsMrvWbGains mrv_wb_gains;
++ * measurement limits
++ */
++ struct ci_isp_wb_meas_conf_limit mrv_wb_meas_conf_limit;
++ /* Pca Damping for awb auto mode */
++ u8 awb_pca_damping;
++ /* PriorExp Damping for awb auto mode */
++ u8 awb_prior_exp_damping;
++ /* Pca Damping for AWB auto push mode */
++ u8 awb_pca_push_damping;
++ /* PriorExp Damping for AWB auto push mode */
++ u8 awb_prior_exp_push_damping;
++ /* Max Y in AWB auto mode */
++ u8 awb_auto_max_y;
++ /* Max Y in AWB auto push mode */
++ u8 awb_push_max_y;
++ /* Max Y in AWB measurement only mode */
++ u8 awb_measure_max_y;
++ /* Distance for underexposure detecture */
++ u16 awb_underexp_det;
++ /* Distance for underexposure push detecture */
++ u16 awb_push_underexp_det;
++
++};
++
++/* possible AEC modes */
++enum ci_isp_aec_mode {
++ /* AEC turned off */
++ CI_ISP_AEC_OFF,
++ /* AEC measurements based on (almost) the entire picture */
++ CI_ISP_AEC_INTEGRAL,
++ /*
++ * AEC measurements based on a single little square in the center of
++ * the picture
++ */
++ CI_ISP_AEC_SPOT,
++ /*
++ * AEC measurements based on 5 little squares spread over the picture
++ */
++ CI_ISP_AEC_MFIELD5,
++ /*
++ * AEC measurements based on 9 little squares spread over the picture
++ */
++ CI_ISP_AEC_MFIELD9
++};
++
++
++/*
++ * histogram weight 5x5 matrix coefficients
++* (possible values are 1=0x10,15/16=0x0F,14/16,...,1/16,0)
++*/
++struct ci_isp_hist_matrix {
++ u8 weight_00; u8 weight_10; u8 weight_20; u8 weight_30; u8 weight_40;
++ u8 weight_01; u8 weight_11; u8 weight_21; u8 weight_31; u8 weight_41;
++ u8 weight_02; u8 weight_12; u8 weight_22; u8 weight_32; u8 weight_42;
++ u8 weight_03; u8 weight_13; u8 weight_23; u8 weight_33; u8 weight_43;
++ u8 weight_04; u8 weight_14; u8 weight_24; u8 weight_34; u8 weight_44;
++};
++
++/* autoexposure config */
++struct ci_isp_aec_config {
++ /*
++ * Size of 1 window of MARVIN's 5x5 mean luminance
++ * measurement grid and offset of grid
++ */
++ struct ci_isp_window isp_aecmean_lumaWindow;
++ /* Size and offset of histogram window */
++ struct ci_isp_window isp_aechist_calcWindow;
++ /* Weight martix of histogram */
++ struct ci_isp_hist_matrix isp_aechist_calcWeight;
++ /* possible AEC modes */
++ enum ci_isp_aec_mode advanced_aec_mode;
++};
++
++/* autoexposure mean values */
++struct ci_isp_aec_mean {
++ u8 occ;
++ u8 mean;
++ u8 max;
++ u8 min;
++};
++
++
++
++/* histogram weight 5x5 matrix coefficients
++ * (possible values are 1=0x10,15/16=0x0F,14/16,...,1/16,0)
++ */
++struct tsMrvHistMatrix {
++ u8 weight_00; u8 weight_10; u8 weight_20; u8 weight_30; u8 weight_40;
++ u8 weight_01; u8 weight_11; u8 weight_21; u8 weight_31; u8 weight_41;
++ u8 weight_02; u8 weight_12; u8 weight_22; u8 weight_32; u8 weight_42;
++ u8 weight_03; u8 weight_13; u8 weight_23; u8 weight_33; u8 weight_43;
++ u8 weight_04; u8 weight_14; u8 weight_24; u8 weight_34; u8 weight_44;
++};
++
++/*
++ * vi_dpcl path selector, channel mode
++ * Configuration of the Y/C splitter
++ */
++enum ci_isp_ycs_chn_mode {
++ /*
++ * 8bit data/Y only output (depreciated, please use CI_ISP_YCS_MVRaw for
++ * new implementations)
++ */
++ CI_ISP_YCS_Y,
++ /* separated 8bit Y, C routed to both main and self path */
++ CI_ISP_YCS_MV_SP,
++ /*
++ * separated 8bit Y, C routed to main path only (self path input
++ * switched off)
++ */
++ CI_ISP_YCS_MV,
++ /*
++ * separated 8bit Y, C routed to self path only (main path input
++ * switched off)
++ */
++ CI_ISP_YCS_SP,
++ /*
++ * raw camera data routed to main path (8 or 16 bits, depends on
++ * marvin drivative)
++ */
++ CI_ISP_YCS_MVRaw,
++ /* both main and self path input switched off */
++ CI_ISP_YCS_OFF
++};
++
++/* vi_dpcl path selector, main path cross-switch */
++enum ci_isp_dp_switch {
++ /* raw data mode */
++ CI_ISP_DP_RAW,
++ /* JPEG encoding mode */
++ CI_ISP_DP_JPEG,
++ /* main video path only */
++ CI_ISP_DP_MV
++};
++
++/* DMA-read mode selector */
++enum ci_isp_dma_read_mode {
++ /* DMA-read feature deactivated */
++ CI_ISP_DMA_RD_OFF = 0,
++ /* data from the DMA-read block feeds the self path */
++ CI_ISP_DMA_RD_SELF_PATH = 1,
++ /* data from the DMA-read block feeds the Superimpose block */
++ CI_ISP_DMA_RD_SUPERIMPOSE = 2,
++ /* data from the DMA-read block feeds the Image effects path */
++ CI_ISP_DMA_RD_IE_PATH = 3,
++ /* data from the DMA-read block feeds the JPEG encoder directly */
++ CI_ISP_DMA_RD_JPG_ENC = 4
++};
++
++/* ISP path selector */
++enum ci_isp_path {
++ /* Isp path is unknown or invalid */
++ CI_ISP_PATH_UNKNOWN = 0,
++ /* Raw data bypass */
++ CI_ISP_PATH_RAW = 1,
++ /* YCbCr path */
++ CI_ISP_PATH_YCBCR = 2,
++ /* Bayer RGB path */
++ CI_ISP_PATH_BAYER = 3
++};
++
++/* possible autofocus measurement modes */
++enum ci_isp_afm_mode {
++ /* no autofocus measurement */
++ CI_ISP_AFM_OFF,
++ /* use AF hardware to measure sharpness */
++ CI_ISP_AFM_HW,
++ /* use "Tenengrad" algorithm implemented in software */
++ CI_ISP_AFM_SW_TENENGRAD,
++ /*
++ * use "Threshold Squared Gradient" algorithm implemented in software
++ */
++ CI_ISP_AFM_SW_TRESH_SQRT_GRAD,
++ /*
++ * use "Frequency selective weighted median" algorithm implemented in
++ * software
++ */
++ CI_ISP_AFM_SW_FSWMEDIAN,
++ /* use AF hardware and normalize with mean luminance */
++ CI_ISP_AFM_HW_norm,
++ /* use "Tenengrad" algorithm and normalize with mean luminance */
++ CI_ISP_AFM_SW_TENENGRAD_norm,
++ /*
++ * use "Frequency selective weighted median" algorithm and normalize
++ * with mean luminance
++ */
++ CI_ISP_AFM_SW_FSWMEDIAN_norm
++};
++
++/* possible autofocus search strategy modes */
++enum ci_isp_afss_mode {
++ /* no focus searching */
++ CI_ISP_AFSS_OFF,
++ /* scan the full focus range to find the point of best focus */
++ CI_ISP_AFSS_FULL_RANGE,
++ /* use hillclimbing search */
++ CI_ISP_AFSS_HILLCLIMBING,
++ /*
++ * similar to full range search, but with multiple subsequent scans
++ * with
++ */
++ CI_ISP_AFSS_ADAPTIVE_RANGE,
++ /*
++ * decreasing range and step size will be performed. search strategy
++ * suggested by OneLimited for their Helimorph actuator
++ */
++ CI_ISP_AFSS_HELIMORPH_OPT,
++ /*
++ * search strategy optimized for omnivision 2630 module equipped with
++ */
++ CI_ISP_AFSS_OV2630_LPD4_OPT
++ /*
++ * autofocus lend driven through a LPD4 stepper motor produced by
++ * Nidec Copal USA Corp. of Torrance, CA.
++ */
++};
++
++/* possible bad pixel correction type */
++enum ci_isp_bp_corr_type {
++ /* correction of bad pixel from the table */
++ CI_ISP_BP_CORR_TABLE,
++ /* direct detection and correction */
++ CI_ISP_BP_CORR_DIRECT
++};
++
++/* possible bad pixel replace approach */
++enum ci_isp_bp_corr_rep {
++ /* nearest neighbour approach */
++ CI_ISP_BP_CORR_REP_NB,
++ /* simple bilinear interpolation approach */
++ CI_ISP_BP_CORR_REP_LIN
++};
++
++/* possible bad pixel correction mode */
++enum ci_isp_bp_corr_mode {
++ /* hot pixel correction */
++ CI_ISP_BP_CORR_HOT_EN,
++ /* dead pixel correction */
++ CI_ISP_BP_CORR_DEAD_EN,
++ /* hot and dead pixel correction */
++ CI_ISP_BP_CORR_HOT_DEAD_EN
++};
++
++/* Gamma out curve (independent from the sensor characteristic). */
++#define CI_ISP_GAMMA_OUT_CURVE_ARR_SIZE (17)
++
++struct ci_isp_gamma_out_curve {
++ u16 isp_gamma_y[CI_ISP_GAMMA_OUT_CURVE_ARR_SIZE];
++ u8 gamma_segmentation;
++};
++
++/* configuration of autofocus measurement block */
++struct ci_isp_af_config {
++ /* position and size of measurement window A */
++ struct ci_isp_window wnd_pos_a;
++ /* position and size of measurement window B */
++ struct ci_isp_window wnd_pos_b;
++ /* position and size of measurement window C */
++ struct ci_isp_window wnd_pos_c;
++ /* AF measurment threshold */
++ u32 threshold;
++ /* measurement variable shift (before sum operation) */
++ u32 var_shift;
++};
++
++/* measurement results of autofocus measurement block */
++struct ci_isp_af_meas {
++ /* sharpness value of window A */
++ u32 afm_sum_a;
++ /* sharpness value of window B */
++ u32 afm_sum_b;
++ /* sharpness value of window C */
++ u32 afm_sum_c;
++ /* luminance value of window A */
++ u32 afm_lum_a;
++ /* luminance value of window B */
++ u32 afm_lum_b;
++ /* luminance value of window C */
++ u32 afm_lum_c;
++};
++
++/* configuration for correction of bad pixel block */
++struct ci_isp_bp_corr_config {
++ /* bad pixel correction type */
++ enum ci_isp_bp_corr_type bp_corr_type;
++ /* replace approach */
++ enum ci_isp_bp_corr_rep bp_corr_rep;
++ /* bad pixel correction mode */
++ enum ci_isp_bp_corr_mode bp_corr_mode;
++ /* Absolute hot pixel threshold */
++ u16 bp_abs_hot_thres;
++ /* Absolute dead pixel threshold */
++ u16 bp_abs_dead_thres;
++ /* Hot Pixel deviation Threshold */
++ u16 bp_dev_hot_thres;
++ /* Dead Pixel deviation Threshold */
++ u16 bp_dev_dead_thres;
++};
++
++/* configuration for correction of lens shading */
++struct ci_isp_ls_corr_config {
++ /* correction values of R color part */
++ u16 ls_rdata_tbl[CI_ISP_DATA_TBL_SIZE];
++ /* correction values of G color part */
++ u16 ls_gdata_tbl[CI_ISP_DATA_TBL_SIZE];
++ /* correction values of B color part */
++ u16 ls_bdata_tbl[CI_ISP_DATA_TBL_SIZE];
++ /* multiplication factors of x direction */
++ u16 ls_xgrad_tbl[CI_ISP_GRAD_TBL_SIZE];
++ /* multiplication factors of y direction */
++ u16 ls_ygrad_tbl[CI_ISP_GRAD_TBL_SIZE];
++ /* sector sizes of x direction */
++ u16 ls_xsize_tbl[CI_ISP_GRAD_TBL_SIZE];
++ /* sector sizes of y direction */
++ u16 ls_ysize_tbl[CI_ISP_GRAD_TBL_SIZE];
++
++};
++
++/* configuration for detection of bad pixel block */
++struct ci_isp_bp_det_config {
++ /* abs_dead_thres Absolute dead pixel threshold */
++ u32 bp_dead_thres;
++};
++
++/* new table element */
++struct ci_isp_bp_new_table_elem {
++ /* Bad Pixel vertical address */
++ u16 bp_ver_addr;
++ /* Bad Pixel horizontal address */
++ u16 bp_hor_addr;
++ /* MSB value of fixed pixel (deceide if dead or hot) */
++ u8 bp_msb_value;
++};
++
++/* new Bad Pixel table */
++struct ci_isp_bp_new_table {
++ /* Number of possible new detected bad pixel */
++ u32 bp_number;
++ /* Array of Table element */
++ struct ci_isp_bp_new_table_elem bp_new_table_elem[8];
++};
++
++/* image effect modes */
++enum ci_isp_ie_mode {
++ /* no image effect (bypass) */
++ CI_ISP_IE_MODE_OFF,
++ /* Set a fixed chrominance of 128 (neutral grey) */
++ CI_ISP_IE_MODE_GRAYSCALE,
++ /* Luminance and chrominance data is being inverted */
++ CI_ISP_IE_MODE_NEGATIVE,
++ /*
++ * Chrominance is changed to produce a historical like brownish image
++ * color
++ */
++ CI_ISP_IE_MODE_SEPIA,
++ /*
++ * Converting picture to grayscale while maintaining one color
++ * component.
++ */
++ CI_ISP_IE_MODE_COLOR_SEL,
++ /* Edge detection, will look like an relief made of metal */
++ CI_ISP_IE_MODE_EMBOSS,
++ /* Edge detection, will look like a pencil drawing */
++ CI_ISP_IE_MODE_SKETCH
++};
++
++/* image effect color selection */
++enum ci_isp_ie_color_sel {
++ /* in CI_ISP_IE_MODE_COLOR_SEL mode, maintain the red color */
++ CI_ISP_IE_MAINTAIN_RED = 0x04,
++ /* in CI_ISP_IE_MODE_COLOR_SEL mode, maintain the green color */
++ CI_ISP_IE_MAINTAIN_GREEN = 0x02,
++ /* in CI_ISP_IE_MODE_COLOR_SEL mode, maintain the blue color */
++ CI_ISP_IE_MAINTAIN_BLUE = 0x01
++};
++
++/*
++ * image effect 3x3 matrix coefficients (possible values are -8, -4, -2, -1,
++ * 0, 1, 2, 4, 8)
++ */
++struct ci_isp_ie_matrix {
++ s8 coeff_11;
++ s8 coeff_12;
++ s8 coeff_13;
++ s8 coeff_21;
++ s8 coeff_22;
++ s8 coeff_23;
++ s8 coeff_31;
++ s8 coeff_32;
++ s8 coeff_33;
++};
++
++/* image effect configuration struct */
++struct ci_isp_ie_config {
++ /* image effect mode */
++ enum ci_isp_ie_mode mode;
++ u8 color_sel;
++ /* threshold for color selection */
++ u8 color_thres;
++ /* Cb chroma component of 'tint' color for sepia effect */
++ u8 tint_cb;
++ /* Cr chroma component of 'tint' color for sepia effect */
++ u8 tint_cr;
++ /* coefficient maxrix for emboss effect */
++ struct ci_isp_ie_matrix mat_emboss;
++ /* coefficient maxrix for sketch effect */
++ struct ci_isp_ie_matrix mat_sketch;
++};
++
++/* super impose transparency modes */
++enum ci_isp_si_trans_mode {
++ /* SI transparency mode is unknown (module is switched off) */
++ CI_ISP_SI_TRANS_UNKNOWN = 0,
++ /* SI transparency mode enabled */
++ CI_ISP_SI_TRANS_ENABLE = 1,
++ /* SI transparency mode disabled */
++ CI_ISP_SI_TRANS_DISABLE = 2
++};
++
++/* super impose reference image */
++enum ci_isp_si_ref_image {
++ /* SI reference image is unknown (module is switched off) */
++ CI_ISP_SI_REF_IMG_UNKNOWN = 0,
++ /* SI reference image from sensor */
++ CI_ISP_SI_REF_IMG_SENSOR = 1,
++ /* SI reference image from memory */
++ CI_ISP_SI_REF_IMG_MEMORY = 2
++};
++
++/* super impose configuration struct */
++struct ci_isp_si_config {
++ /* transparency mode on/off */
++ enum ci_isp_si_trans_mode trans_mode;
++ /* reference image from sensor/memory */
++ enum ci_isp_si_ref_image ref_image;
++ /* x offset (coordinate system of the reference image) */
++ u16 offs_x;
++ /* y offset (coordinate system of the reference image) */
++ u16 offs_y;
++ /* Y component of transparent key color */
++ u8 trans_comp_y;
++ /* Cb component of transparent key color */
++ u8 trans_comp_cb;
++ /* Cr component of transparent key color */
++ u8 trans_comp_cr;
++};
++
++/* image stabilisation modes */
++enum ci_isp_is_mode {
++ /* IS mode is unknown (module is switched off) */
++ CI_ISP_IS_MODE_UNKNOWN = 0,
++ /* IS mode enabled */
++ CI_ISP_IS_MODE_ON = 1,
++ /* IS mode disabled */
++ CI_ISP_IS_MODE_OFF = 2
++};
++
++/* image stabilisation configuration struct */
++struct ci_isp_is_config {
++ /* position and size of image stabilisation window */
++ struct ci_isp_window mrv_is_window;
++ /* maximal margin distance for X */
++ u16 max_dx;
++ /* maximal margin distance for Y */
++ u16 max_dy;
++};
++
++/* image stabilisation control struct */
++struct ci_isp_is_ctrl {
++ /* image stabilisation mode on/off */
++ enum ci_isp_is_mode is_mode;
++ /* recenter every frame by ((cur_v_offsxV_OFFS)/(2^RECENTER)) */
++ u8 recenter;
++};
++
++/* for data path switching */
++enum ci_isp_data_path {
++ CI_ISP_PATH_RAW816,
++ CI_ISP_PATH_RAW8,
++ CI_ISP_PATH_JPE,
++ CI_ISP_PATH_OFF,
++ CI_ISP_PATH_ON
++};
++
++/* buffer for memory interface */
++struct ci_isp_bufferOld {
++ u8 *pucbuffer;
++ u32 size;
++ u32 offs;
++ /* not used for Cb and Cr buffers, IRQ offset for */
++ u32 irq_offs_llength;
++ /* stores the malloc pointer address */
++ u8 *pucmalloc_start;
++ /* main buffer and line length for self buffer */
++};
++
++/* buffer for DMA memory interface */
++struct ci_isp_dma_buffer {
++ /*
++ * start of the buffer memory. Note that panning in an larger picture
++ * memory is possible by altering the buffer start address (and
++ * choosing pic_width llength)
++ */
++ u8 *pucbuffer;
++ /* size of the entire picture in bytes */
++ u32 pic_size;
++ /*
++ * width of the picture area of interest (not necessaryly the entire
++ * picture)
++ */
++ u32 pic_width;
++ /* inter-line-increment. This is the amount of bytes between */
++ u32 llength;
++ /* pixels in the same column but on different lines. */
++
++};
++
++/* color format for self picture input/output and DMA input */
++enum ci_isp_mif_col_format {
++ /* YCbCr 4:2:2 format */
++ CI_ISP_MIF_COL_FORMAT_YCBCR_422 = 0,
++ /* YCbCr 4:4:4 format */
++ CI_ISP_MIF_COL_FORMAT_YCBCR_444 = 1,
++ /* YCbCr 4:2:0 format */
++ CI_ISP_MIF_COL_FORMAT_YCBCR_420 = 2,
++ /* YCbCr 4:0:0 format */
++ CI_ISP_MIF_COL_FORMAT_YCBCR_400 = 3,
++ /* RGB 565 format */
++ CI_ISP_MIF_COL_FORMAT_RGB_565 = 4,
++ /* RGB 666 format */
++ CI_ISP_MIF_COL_FORMAT_RGB_666 = 5,
++ /* RGB 888 format */
++ CI_ISP_MIF_COL_FORMAT_RGB_888 = 6
++};
++
++/* color range for self picture input of RGB m*/
++enum ci_isp_mif_col_range {
++ mrv_mif_col_range_std = 0,
++ mrv_mif_col_range_full = 1
++};
++
++/* color phase for self picture input of RGB */
++enum ci_isp_mif_col_phase {
++ mrv_mif_col_phase_cosited = 0,
++ mrv_mif_col_phase_non_cosited = 1
++};
++
++/*
++ * picture write/read format
++ * The following examples apply to YCbCr 4:2:2 images, as all modes
++ */
++ enum ci_isp_mif_pic_form {
++ /* planar : separated buffers for Y, Cb and Cr */
++ CI_ISP_MIF_PIC_FORM_PLANAR = 0,
++ /* semi-planar: one buffer for Y and a combined buffer for Cb and Cr */
++ CI_ISP_MIF_PIC_FORM_SEMI_PLANAR = 1,
++ /* interleaved: one buffer for all */
++ CI_ISP_MIF_PIC_FORM_INTERLEAVED = 2
++};
++
++/* self picture operating modes */
++enum ci_isp_mif_sp_mode {
++ /* no rotation, no horizontal or vertical flipping */
++ CI_ISP_MIF_SP_ORIGINAL = 0,
++ /* vertical flipping (no additional rotation) */
++ CI_ISP_MIF_SP_VERTICAL_FLIP = 1,
++ /* horizontal flipping (no additional rotation) */
++ CI_ISP_MIF_SP_HORIZONTAL_FLIP = 2,
++ /* rotation 90 degrees ccw (no additional flipping) */
++ CI_ISP_MIF_SP_ROTATION_090_DEG = 3,
++ /*
++ * rotation 180 degrees ccw (equal to horizontal plus vertical
++ * flipping)
++ */
++ CI_ISP_MIF_SP_ROTATION_180_DEG = 4,
++ /* rotation 270 degrees ccw (no additional flipping) */
++ CI_ISP_MIF_SP_ROTATION_270_DEG = 5,
++ /* rotation 90 degrees ccw plus vertical flipping */
++ CI_ISP_MIF_SP_ROT_090_V_FLIP = 6,
++ /* rotation 270 degrees ccw plus vertical flipping */
++ CI_ISP_MIF_SP_ROT_270_V_FLIP = 7
++};
++
++/* MI burst length settings */
++enum ci_isp_mif_burst_length {
++ /* burst length = 4 */
++ CI_ISP_MIF_BURST_LENGTH_4 = 0,
++ /* burst length = 8 */
++ CI_ISP_MIF_BURST_LENGTH_8 = 1,
++ /* burst length = 16 */
++ CI_ISP_MIF_BURST_LENGTH_16 = 2
++};
++
++
++/* MI apply initial values settings */
++enum ci_isp_mif_init_vals {
++ /* do not set initial values */
++ CI_ISP_MIF_NO_INIT_VALS = 0,
++ /* set initial values for offset registers */
++ CI_ISP_MIF_INIT_OFFS = 1,
++ /* set initial values for base address registers */
++ CI_ISP_MIF_INIT_BASE = 2,
++ /* set initial values for offset and base address registers */
++ CI_ISP_MIF_INIT_OFFSAndBase = 3
++};
++
++/* MI when to update configuration */
++enum ci_isp_conf_update_time {
++ CI_ISP_CFG_UPDATE_FRAME_SYNC = 0,
++ CI_ISP_CFG_UPDATE_IMMEDIATE = 1,
++ CI_ISP_CFG_UPDATE_LATER = 2
++};
++
++/* control register of the MI */
++struct ci_isp_mi_ctrl {
++ /* self picture path output format */
++ enum ci_isp_mif_col_format mrv_mif_sp_out_form;
++ /* self picture path input format */
++ enum ci_isp_mif_col_format mrv_mif_sp_in_form;
++ enum ci_isp_mif_col_range mrv_mif_sp_in_range;
++ enum ci_isp_mif_col_phase mrv_mif_sp_in_phase;
++ /* self picture path write format */
++ enum ci_isp_mif_pic_form mrv_mif_sp_pic_form;
++ /* main picture path write format */
++ enum ci_isp_mif_pic_form mrv_mif_mp_pic_form;
++ /* burst length for chrominance for write port */
++ enum ci_isp_mif_burst_length burst_length_chrom;
++ /* burst length for luminance for write port */
++ enum ci_isp_mif_burst_length burst_length_lum;
++ /* enable updating of the shadow registers */
++ enum ci_isp_mif_init_vals init_vals;
++ /*
++ * for main and self picture to their init values
++ */
++ /* enable change of byte order for write port */
++ int byte_swap_enable;
++ /* enable the last pixel signalization */
++ int last_pixel_enable;
++ /* self picture path operating mode */
++ enum ci_isp_mif_sp_mode mrv_mif_sp_mode;
++ /* enable path */
++ enum ci_isp_data_path main_path;
++ /* enable path */
++ enum ci_isp_data_path self_path;
++ /*
++ * offset counter interrupt generation for fill_mp_y (counted in
++ * bytes)
++ */
++ u32 irq_offs_init;
++
++};
++
++/* buffer for memory interface */
++struct ci_isp_buffer {
++ /* buffer start address */
++ u8 *pucbuffer;
++ /* buffer size (counted in bytes) */
++ u32 size;
++ /* buffer offset count (counted in bytes) */
++ u32 offs;
++};
++
++/* main or self picture path, or DMA configuration */
++struct ci_isp_mi_path_conf {
++ /* Y picture width (counted in pixels) */
++ u32 ypic_width;
++ /* Y picture height (counted in pixels) */
++ u32 ypic_height;
++ /*
++ * line length means the distance from one pixel to the vertically
++ * next
++ */
++ u32 llength;
++ /*
++ * pixel below including the not-used blanking area, etc.
++ * (counted in pixels)
++ */
++ /* Y buffer structure */
++ struct ci_isp_buffer ybuffer;
++ /* Cb buffer structure */
++ struct ci_isp_buffer cb_buffer;
++ /* Cr buffer structure */
++ struct ci_isp_buffer cr_buffer;
++};
++
++/* DMA configuration */
++struct ci_isp_mi_dma_conf {
++ /* start DMA immediately after configuration */
++ int start_dma;
++ /* suppress v_end so that no frame end can be */
++ int frame_end_disable;
++ /*
++ * detected by the following instances
++ * enable change of byte order for read port
++ */
++ int byte_swap_enable;
++ /*
++ * Enables continuous mode. If set the same frame is read back
++ * over and over. A start pulse on dma_start is need only for the
++ * first time. To stop continuous mode reset this bit (takes
++ * effect after the next frame end) or execute a soft reset.
++ */
++ int continuous_enable;
++ /* DMA input color format */
++ enum ci_isp_mif_col_format mrv_mif_col_format;
++ /* DMA read buffer format */
++ enum ci_isp_mif_pic_form mrv_mif_pic_form;
++ /* burst length for chrominance for read port */
++ enum ci_isp_mif_burst_length burst_length_chrom;
++ /* burst length for luminance for read port */
++ enum ci_isp_mif_burst_length burst_length_lum;
++ /*
++ * Set this to TRUE if the DMA-read data is routed through
++ * the path that is normally used for the live camera
++ * data (e.g. through the image effects module).
++ */
++ int via_cam_path;
++};
++
++/* Public CAC Defines and Typedefs */
++
++/*
++ * configuration of chromatic aberration correction block (given to the
++ * CAC driver)
++ */
++struct ci_isp_cac_config {
++ /* size of the input image in pixels */
++ u16 hsize;
++ u16 vsize;
++ /* offset between image center and optical */
++ s16 hcenter_offset;
++ /* center of the input image in pixels */
++ s16 vcenter_offset;
++ /* maximum red/blue pixel shift in horizontal */
++ u8 hclip_mode;
++ /* and vertival direction, range 0..2 */
++ u8 vclip_mode;
++ /* parameters for radial shift calculation, */
++ u16 ablue;
++ /* 9 bit twos complement with 4 fractional */
++ u16 ared;
++ /* digits, valid range -16..15.9375 */
++ u16 bblue;
++ u16 bred;
++ u16 cblue;
++ u16 cred;
++ /* 0 = square pixel sensor, all other = aspect */
++ float aspect_ratio;
++ /* ratio of non-square pixel sensor */
++
++};
++
++/*
++ * register values of chromatic aberration correction block (delivered by
++ * the CAC driver)
++ */
++struct ci_isp_cac_reg_values {
++ /* maximum red/blue pixel shift in horizontal */
++ u8 hclip_mode;
++ /* and vertival direction, range 0..2 */
++ u8 vclip_mode;
++ /* TRUE=enabled, FALSE=disabled */
++ int cac_enabled;
++ /*
++ * preload value of the horizontal CAC pixel
++ * counter, range 1..4095
++ */
++ u16 hcount_start;
++ /*
++ * preload value of the vertical CAC pixel
++ * counter, range 1..4095
++ */
++ u16 vcount_start;
++ /* parameters for radial shift calculation, */
++ u16 ablue;
++ /* 9 bit twos complement with 4 fractional */
++ u16 ared;
++ /* digits, valid range -16..15.9375 */
++ u16 bblue;
++ u16 bred;
++ u16 cblue;
++ u16 cred;
++ /* horizontal normalization shift, range 0..7 */
++ u8 xnorm_shift;
++ /* horizontal normalization factor, range 16..31 */
++ u8 xnorm_factor;
++ /* vertical normalization shift, range 0..7 */
++ u8 ynorm_shift;
++ /* vertical normalization factor, range 16..31 */
++ u8 ynorm_factor;
++};
++
++struct ci_snapshot_config {
++ /* snapshot flags */
++ u32 flags;
++ /* user zoom factor to use ( Zoomfactor = 1 + (<value>*1024) ) */
++ int user_zoom;
++ /* user width (in pixel) */
++ int user_w;
++ /* user height (in pixel) */
++ int user_h;
++ /* compression ratio for JPEG snapshots */
++ u8 compression_ratio;
++};
++
++struct ci_isp_view_finder_config {
++ /* how to display the viewfinder */
++ u32 flags;
++ /* zoom factor to use ( Zoomfactor = 1 + (<value>*1024) ) */
++ int zoom;
++ /* contrast setting for LCD */
++ int lcd_contrast;
++ /* following settings are only used in VFFLAG_MODE_USER mode */
++
++ /* start pixel of upper left corner on LCD */
++ int x;
++ /* start pixel of upper left corner on LCD */
++ int y;
++ /* width (in pixel) */
++ int w;
++ /* height (in pixel) */
++ int h;
++ /* keeps the aspect ratio by cropping the input to match the output
++ * aspect ratio. */
++ int keep_aspect;
++};
++
++/* ! Number of supported DIP-Switches */
++#define FF_DIPSWITCH_COUNT 10
++
++
++#define CI_ISP_HIST_DATA_BIN_ARR_SIZE 16
++
++struct ci_isp_hist_data_bin {
++ u8 hist_bin[CI_ISP_HIST_DATA_BIN_ARR_SIZE];
++};
++
++#define MRV_MEAN_LUMA_ARR_SIZE_COL 5
++#define MRV_MEAN_LUMA_ARR_SIZE_ROW 5
++#define MRV_MEAN_LUMA_ARR_SIZE \
++ (MRV_MEAN_LUMA_ARR_SIZE_COL*MRV_MEAN_LUMA_ARR_SIZE_ROW)
++
++/* Structure contains a 2-dim 5x5 array
++ * for mean luminance values from 5x5 MARVIN measurement grid.
++ */
++struct ci_isp_mean_luma {
++ u8 mean_luma_block[MRV_MEAN_LUMA_ARR_SIZE_COL][MRV_MEAN_LUMA_ARR_SIZE_ROW];
++};
++
++/* Structure contains bits autostop and exp_meas_mode of isp_exp_ctrl */
++struct ci_isp_exp_ctrl {
++ int auto_stop;
++ int exp_meas_mode;
++ int exp_start;
++} ;
++
++
++struct ci_isp_cfg_flags {
++ /*
++ * following flag tripels controls the behaviour of the associated
++ * marvin control loops.
++ * For feature XXX, the 3 flags are totally independant and
++ * have the following meaning:
++ * fXXX:
++ * If set, there is any kind of software interaction during runtime
++ * thatmay lead to a modification of the feature-dependant settings.
++ * For each frame, a feature specific loop control routine is called
++ * may perform other actions based on feature specific configuration.
++ * If not set, only base settings will be applied during setup, or the
++ * reset values are left unchanged. No control routine will be called
++ * inside the processing loop.
++ * fXXXprint:
++ * If set, some status informations will be printed out inside
++ * the processing loop. Status printing is independant of the
++ * other flags regarding this feature.
++ * fXXX_dis:
++ * If set, the feature dependant submodule of the marvin is
++ * disabled or is turned into bypass mode. Note that it is
++ * still possible to set one or more of the other flags too,
++ * but this wouldn't make much sense...
++ * lens shading correction
++ */
++
++ unsigned int lsc:1;
++ unsigned int lscprint:1;
++ unsigned int lsc_dis:1;
++
++ /* bad pixel correction */
++
++ unsigned int bpc:1;
++ unsigned int bpcprint:1;
++ unsigned int bpc_dis:1;
++
++ /* black level correction */
++
++ unsigned int bls:1;
++ /* only fixed values */
++ unsigned int bls_man:1;
++ /* fixed value read from smia interface */
++ unsigned int bls_smia:1;
++ unsigned int blsprint:1;
++ unsigned int bls_dis:1;
++
++ /* (automatic) white balancing
++ * (if automatic or manual can be configured elsewhere) */
++
++ unsigned int awb:1;
++ unsigned int awbprint:1;
++ unsigned int awbprint2:1;
++ unsigned int awb_dis:1;
++
++ /* automatic exposure (and gain) control */
++
++ unsigned int aec:1;
++ unsigned int aecprint:1;
++ unsigned int aec_dis:1;
++ unsigned int aec_sceval:1;
++
++ /* auto focus */
++
++ unsigned int af:1;
++ unsigned int afprint:1;
++ unsigned int af_dis:1;
++
++ /* enable flags for various other components of the marvin */
++
++ /* color processing (brightness, contrast, saturation, hue) */
++ unsigned int cp:1;
++ /* input gamma block */
++ unsigned int gamma:1;
++ /* color conversion matrix */
++ unsigned int cconv:1;
++ /* demosaicing */
++ unsigned int demosaic:1;
++ /* output gamma block */
++ unsigned int gamma2:1;
++ /* Isp de-noise and sharpenize filters */
++ unsigned int isp_filters:1;
++ /* Isp CAC */
++ unsigned int cac:1;
++
++ /* demo stuff */
++
++ /* demo: saturation loop enable */
++ unsigned int cp_sat_loop:1;
++ /* demo: contrast loop enable */
++ unsigned int cp_contr_loop:1;
++ /* demo: brightness loop enable */
++ unsigned int cp_bright_loop:1;
++ /* demo: scaler loop enable */
++ unsigned int scaler_loop:1;
++ /* demo: use standard color conversion matrix */
++ unsigned int cconv_basic:1;
++
++ /* demo: use joystick to cycle through the image effect modes */
++ unsigned int cycle_ie_mode:1;
++
++ /* others */
++
++ /* enable continous autofocus */
++ unsigned int continous_af:1;
++
++ unsigned int bad_pixel_generation:1;
++ /* enable YCbCr full range */
++ unsigned int ycbcr_full_range:1;
++ /* enable YCbCr color phase non cosited */
++ unsigned int ycbcr_non_cosited:1;
++
++};
++
++struct ci_isp_config {
++ struct ci_isp_cfg_flags flags;
++ struct ci_sensor_ls_corr_config lsc_cfg;
++ struct ci_isp_bp_corr_config bpc_cfg;
++ struct ci_isp_bp_det_config bpd_cfg;
++ struct ci_isp_wb_config wb_config;
++ struct ci_isp_cac_config cac_config;
++ struct ci_isp_aec_config aec_cfg;
++ struct ci_isp_window aec_v2_wnd;
++ struct ci_isp_bls_config bls_cfg;
++ struct ci_isp_af_config af_cfg;
++ struct ci_isp_color_settings color;
++ struct ci_isp_ie_config img_eff_cfg;
++ enum ci_isp_demosaic_mode demosaic_mode;
++ u8 demosaic_th;
++ u8 exposure;
++ enum ci_isp_aec_mode advanced_aec_mode;
++ /* what to include in reports; */
++ u32 report_details;
++ /* an or'ed combination of the FF_REPORT_xxx defines */
++ struct ci_isp_view_finder_config view_finder;
++ /* primary snapshot */
++ struct ci_snapshot_config snapshot_a;
++ /* secondary snapshot */
++ struct ci_snapshot_config snapshot_b;
++ /* auto focus measurement mode */
++ enum ci_isp_afm_mode afm_mode;
++ /* auto focus search strategy mode */
++ enum ci_isp_afss_mode afss_mode;
++ int wb_get_gains_from_sensor_driver;
++ u8 filter_level_noise_reduc;
++ u8 filter_level_sharp;
++ u8 jpeg_enc_ratio;
++};
++
++struct ci_isp_mem_info {
++ u32 isp_bar0_pa;
++ u32 isp_bar0_size;
++ u32 isp_bar1_pa;
++ u32 isp_bar1_size;
++};
++
++struct ci_pl_system_config {
++ /* to be removed */
++ struct ci_sensor_config *isi_config;
++ struct ci_sensor_caps *isi_caps;
++ struct ci_sensor_awb_profile *sensor_awb_profile;
++
++ struct ci_isp_config isp_cfg;
++ u32 focus_max;
++ unsigned int isp_hal_enable;
++ struct v4l2_jpg_review_buffer jpg_review;
++ int jpg_review_enable;
++};
++
++/* intel private ioctl code for ci isp hal interface */
++#define BASE BASE_VIDIOC_PRIVATE
++
++#define VIDIOC_SET_SYS_CFG _IOWR('V', BASE + 1, struct ci_pl_system_config)
++#define VIDIOC_SET_JPG_ENC_RATIO _IOWR('V', BASE + 2, int)
++#define VIDIOC_GET_ISP_MEM_INFO _IOWR('V', BASE + 4, struct ci_isp_mem_info)
++
++#include "ci_va.h"
++
++/* support camera flash on CDK */
++struct ci_isp_flash_cmd {
++ int preflash_on;
++ int flash_on;
++ int prelight_on;
++};
++
++struct ci_isp_flash_config {
++ int prelight_off_at_end_of_flash;
++ int vsync_edge_positive;
++ int output_polarity_low_active;
++ int use_external_trigger;
++ u8 capture_delay;
++};
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrstci/include/ci_isp_fmts_common.h
+@@ -0,0 +1,128 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#ifndef _ISP_FMTS_COMMON_H
++#define _ISP_FMTS_COMMON_H
++
++#define intel_fourcc(d, c, b, a) \
++ (((__u32)(d)<<0)|((__u32)(c)<<8)|((__u32)(b)<<16)|((__u32)(a)<<24))
++
++/* more bayer pattern formats support by ISP */
++
++/* RAW 8-bit */
++#define INTEL_PIX_FMT_RAW08 intel_fourcc('R', 'W', '0', '8')
++/* RAW 10-bit */
++#define INTEL_PIX_FMT_RAW10 intel_fourcc('R', 'W', '1', '0')
++/* RAW 12-bit */
++#define INTEL_PIX_FMT_RAW12 intel_fourcc('R', 'W', '1', '2')
++
++
++/*
++ * various config and info structs concentrated into one struct
++ * for simplification
++ */
++#define FORMAT_FLAGS_DITHER 0x01
++#define FORMAT_FLAGS_PACKED 0x02
++#define FORMAT_FLAGS_PLANAR 0x04
++#define FORMAT_FLAGS_RAW 0x08
++#define FORMAT_FLAGS_CrCb 0x10
++
++struct intel_fmt {
++ char *name;
++ unsigned long fourcc; /* v4l2 format id */
++ int depth;
++ int flags;
++};
++
++static struct intel_fmt fmts[] = {
++ {
++ .name = "565 bpp RGB",
++ .fourcc = V4L2_PIX_FMT_RGB565,
++ .depth = 16,
++ .flags = FORMAT_FLAGS_PACKED,
++ },
++ {
++ .name = "888 bpp BGR",
++ .fourcc = V4L2_PIX_FMT_BGR32,
++ .depth = 32,
++ .flags = FORMAT_FLAGS_PLANAR,
++ },
++ {
++ .name = "4:2:2, packed, YUYV",
++ .fourcc = V4L2_PIX_FMT_YUYV,
++ .depth = 16,
++ .flags = FORMAT_FLAGS_PACKED,
++ },
++ {
++ .name = "4:2:2 planar, YUV422P",
++ .fourcc = V4L2_PIX_FMT_YUV422P,
++ .depth = 16,
++ .flags = FORMAT_FLAGS_PLANAR,
++ },
++ {
++ .name = "4:2:0 planar, YUV420",
++ .fourcc = V4L2_PIX_FMT_YUV420,
++ .depth = 12,
++ .flags = FORMAT_FLAGS_PLANAR,
++ },
++ {
++ .name = "4:2:0 planar, YVU420",
++ .fourcc = V4L2_PIX_FMT_YVU420,
++ .depth = 12,
++ .flags = FORMAT_FLAGS_PLANAR,
++ },
++ {
++ .name = "4:2:0 semi planar, NV12",
++ .fourcc = V4L2_PIX_FMT_NV12,
++ .depth = 12,
++ .flags = FORMAT_FLAGS_PLANAR,
++ },
++ {
++ .name = "Compressed format, JPEG",
++ .fourcc = V4L2_PIX_FMT_JPEG,
++ .depth = 12,
++ .flags = FORMAT_FLAGS_PLANAR,
++ },
++ {
++ .name = "Sequential RGB",
++ .fourcc = INTEL_PIX_FMT_RAW08,
++ .depth = 8,
++ .flags = FORMAT_FLAGS_RAW,
++ },
++ {
++ .name = "Sequential RGB",
++ .fourcc = INTEL_PIX_FMT_RAW10,
++ .depth = 16,
++ .flags = FORMAT_FLAGS_RAW,
++ },
++ {
++ .name = "Sequential RGB",
++ .fourcc = INTEL_PIX_FMT_RAW12,
++ .depth = 16,
++ .flags = FORMAT_FLAGS_RAW,
++ },
++};
++
++static int NUM_FORMATS = sizeof(fmts) / sizeof(struct intel_fmt);
++#endif /* _ISP_FMTS_H */
++
+--- /dev/null
++++ b/drivers/staging/mrstci/include/ci_sensor_common.h
+@@ -0,0 +1,1233 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#ifndef _SENSOR_COMMON_H
++#define _SENSOR_COMMON_H
++
++#include <media/v4l2-subdev.h>
++
++#define AEC_ALGO_V1 1
++#define AEC_ALGO_V2 2
++#define AEC_ALGO_V3 3
++#define AEC_ALGO_V4 4
++
++#ifndef AEC_ALGO
++#define AEC_ALGO AEC_ALGO_V3 /*AEC_ALGO_V2*/
++#endif
++/*
++ * interface version
++ * please increment the version if you add something new to the interface.
++ * This helps upper layer software to deal with different interface versions.
++ */
++#define SENSOR_INTERFACE_VERSION 4
++#define SENSOR_TYPE_SOC 0
++#define SENSOR_TYPE_RAW 1
++/* Just for current use case */
++#define SENSOR_TYPE_2M 0
++#define SENSOR_TYPE_5M 1
++
++/*
++ * capabilities / configuration
++ */
++
++/* ulBusWidth; */
++/*
++ * to expand to a (possibly higher) resolution in marvin, the LSBs will be set
++ * to zero
++ */
++#define SENSOR_BUSWIDTH_8BIT_ZZ 0x00000001
++/*
++ * to expand to a (possibly higher) resolution in marvin, the LSBs will be
++ * copied from the MSBs
++ */
++#define SENSOR_BUSWIDTH_8BIT_EX 0x00000002
++/*
++ * formerly known as SENSOR_BUSWIDTH_10BIT (at times no marvin derivative was
++ * able to process more than 10 bit)
++ */
++#define SENSOR_BUSWIDTH_10BIT_EX 0x00000004
++#define SENSOR_BUSWIDTH_10BIT_ZZ 0x00000008
++#define SENSOR_BUSWIDTH_12BIT 0x00000010
++
++#define SENSOR_BUSWIDTH_10BIT SENSOR_BUSWIDTH_10BIT_EX
++
++/*
++ * ulMode, operating mode of the image sensor in termns of output data format
++ * and
++ */
++
++/* timing data transmission */
++
++/* YUV-Data with separate h/v sync lines (ITU-R BT.601) */
++#define SENSOR_MODE_BT601 0x00000001
++/* YUV-Data with sync words inside the datastream (ITU-R BT.656) */
++#define SENSOR_MODE_BT656 0x00000002
++/* Bayer data with separate h/v sync lines */
++#define SENSOR_MODE_BAYER 0x00000004
++/*
++ * Any binary data without line/column-structure, (e.g. already JPEG encoded)
++ * h/v sync lines act as data valid signals
++ */
++#define SENSOR_MODE_DATA 0x00000008
++/* RAW picture data with separate h/v sync lines */
++#define SENSOR_MODE_PICT 0x00000010
++/* RGB565 data with separate h/v sync lines */
++#define SENSOR_MODE_RGB565 0x00000020
++/* SMIA conform data stream (see ulSmiaMode for details) */
++#define SENSOR_MODE_SMIA 0x00000040
++/* MIPI conform data stream (see ulMipiMode for details) */
++#define SENSOR_MODE_MIPI 0x00000080
++/*
++ * Bayer data with sync words inside the datastream (similar to ITU-R BT.656)
++ */
++#define SENSOR_MODE_BAY_BT656 0x00000100
++/*
++ * Raw picture data with sync words inside the datastream (similar to ITU-R
++ * BT.656)
++ */
++#define SENSOR_MODE_RAW_BT656 0x00000200
++
++/* ulSmiaMode */
++
++/* compression mode */
++#define SENSOR_SMIA_MODE_COMPRESSED 0x00000001
++/* 8bit to 10 bit decompression */
++#define SENSOR_SMIA_MODE_RAW_8_TO_10_DECOMP 0x00000002
++/* 12 bit RAW Bayer Data */
++#define SENSOR_SMIA_MODE_RAW_12 0x00000004
++/* 10 bit RAW Bayer Data */
++#define SENSOR_SMIA_MODE_RAW_10 0x00000008
++/* 8 bit RAW Bayer Data */
++#define SENSOR_SMIA_MODE_RAW_8 0x00000010
++/* 7 bit RAW Bayer Data */
++#define SENSOR_SMIA_MODE_RAW_7 0x00000020
++/* 6 bit RAW Bayer Data */
++#define SENSOR_SMIA_MODE_RAW_6 0x00000040
++/* RGB 888 Display ready Data */
++#define SENSOR_SMIA_MODE_RGB_888 0x00000080
++/* RGB 565 Display ready Data */
++#define SENSOR_SMIA_MODE_RGB_565 0x00000100
++/* RGB 444 Display ready Data */
++#define SENSOR_SMIA_MODE_RGB_444 0x00000200
++/* YUV420 Data */
++#define SENSOR_SMIA_MODE_YUV_420 0x00000400
++/* YUV422 Data */
++#define SENSOR_SMIA_MODE_YUV_422 0x00000800
++/* SMIA is disabled */
++#define SENSOR_SMIA_OFF 0x80000000
++
++/* ulMipiMode */
++
++/* YUV 420 8-bit */
++#define SENSOR_MIPI_MODE_YUV420_8 0x00000001
++/* YUV 420 10-bit */
++#define SENSOR_MIPI_MODE_YUV420_10 0x00000002
++/* Legacy YUV 420 8-bit */
++#define SENSOR_MIPI_MODE_LEGACY_YUV420_8 0x00000004
++/* YUV 420 8-bit (CSPS) */
++#define SENSOR_MIPI_MODE_YUV420_CSPS_8 0x00000008
++/* YUV 420 10-bit (CSPS) */
++#define SENSOR_MIPI_MODE_YUV420_CSPS_10 0x00000010
++/* YUV 422 8-bit */
++#define SENSOR_MIPI_MODE_YUV422_8 0x00000020
++/* YUV 422 10-bit */
++#define SENSOR_MIPI_MODE_YUV422_10 0x00000040
++/* RGB 444 */
++#define SENSOR_MIPI_MODE_RGB444 0x00000080
++/* RGB 555 */
++#define SENSOR_MIPI_MODE_RGB555 0x00000100
++/* RGB 565 */
++#define SENSOR_MIPI_MODE_RGB565 0x00000200
++/* RGB 666 */
++#define SENSOR_MIPI_MODE_RGB666 0x00000400
++/* RGB 888 */
++#define SENSOR_MIPI_MODE_RGB888 0x00000800
++/* RAW_6 */
++#define SENSOR_MIPI_MODE_RAW_6 0x00001000
++/* RAW_7 */
++#define SENSOR_MIPI_MODE_RAW_7 0x00002000
++/* RAW_8 */
++#define SENSOR_MIPI_MODE_RAW_8 0x00004000
++/* RAW_10 */
++#define SENSOR_MIPI_MODE_RAW_10 0x00008000
++/* RAW_12 */
++#define SENSOR_MIPI_MODE_RAW_12 0x00010000
++/* MIPI is disabled */
++#define SENSOR_MIPI_OFF 0x80000000
++
++/* ulFieldInv; */
++
++#define SENSOR_FIELDINV_NOSWAP 0x00000001
++#define SENSOR_FIELDINV_SWAP 0x00000002
++
++/* ulFieldSel; */
++
++#define SENSOR_FIELDSEL_BOTH 0x00000001
++#define SENSOR_FIELDSEL_EVEN 0x00000002
++#define SENSOR_FIELDSEL_ODD 0x00000004
++
++/* ulYCSeq; */
++
++#define SENSOR_YCSEQ_YCBYCR 0x00000001
++#define SENSOR_YCSEQ_YCRYCB 0x00000002
++#define SENSOR_YCSEQ_CBYCRY 0x00000004
++#define SENSOR_YCSEQ_CRYCBY 0x00000008
++
++/* ulConv422; */
++#if 0
++#define SENSOR_CONV422_COSITED 0x00000001
++#define SENSOR_CONV422_NOCOSITED 0x00000002
++#define SENSOR_CONV422_COLORINT 0x00000004
++#endif
++#define SENSOR_CONV422_COSITED 0x00000001
++#define SENSOR_CONV422_INTER 0x00000002
++#define SENSOR_CONV422_NOCOSITED 0x00000004
++
++/* ulBPat; */
++
++#define SENSOR_BPAT_RGRGGBGB 0x00000001
++#define SENSOR_BPAT_GRGRBGBG 0x00000002
++#define SENSOR_BPAT_GBGBRGRG 0x00000004
++#define SENSOR_BPAT_BGBGGRGR 0x00000008
++
++/* ulHPol; */
++
++/* sync signal pulses high between lines */
++#define SENSOR_HPOL_SYNCPOS 0x00000001
++/* sync signal pulses low between lines */
++#define SENSOR_HPOL_SYNCNEG 0x00000002
++/* reference signal is high as long as sensor puts out line data */
++#define SENSOR_HPOL_REFPOS 0x00000004
++/* reference signal is low as long as sensor puts out line data */
++#define SENSOR_HPOL_REFNEG 0x00000008
++
++/* ulVPol; */
++
++#define SENSOR_VPOL_POS 0x00000001
++#define SENSOR_VPOL_NEG 0x00000002
++
++/* ulEdge; */
++
++#define SENSOR_EDGE_RISING 0x00000001
++#define SENSOR_EDGE_FALLING 0x00000002
++
++/* ulBls; */
++
++/* turns on/off additional black lines at frame start */
++#define SENSOR_BLS_OFF 0x00000001
++#define SENSOR_BLS_TWO_LINES 0x00000002
++/* two lines top and two lines bottom */
++#define SENSOR_BLS_FOUR_LINES 0x00000004
++
++/* ulGamma; */
++
++/* turns on/off gamma correction in the sensor ISP */
++#define SENSOR_GAMMA_ON 0x00000001
++#define SENSOR_GAMMA_OFF 0x00000002
++
++/* ulCConv; */
++
++/* turns on/off color conversion matrix in the sensor ISP */
++#define SENSOR_CCONV_ON 0x00000001
++#define SENSOR_CCONV_OFF 0x00000002
++
++/* ulRes; */
++
++/* 88x72 */
++#define SENSOR_RES_QQCIF 0x00000001
++/* 160x120 */
++#define SENSOR_RES_QQVGA 0x00000002
++/* 176x144 */
++#define SENSOR_RES_QCIF 0x00000004
++/* 320x240 */
++#define SENSOR_RES_QVGA 0x00000008
++/* 352x288 */
++#define SENSOR_RES_CIF 0x00000010
++/* 640x480 */
++#define SENSOR_RES_VGA 0x00000020
++/* 800x600 */
++#define SENSOR_RES_SVGA 0x00000040
++/* 1024x768 */
++#define SENSOR_RES_XGA 0x00000080
++/* 1280x960 max. resolution of OV9640 (QuadVGA) */
++#define SENSOR_RES_XGA_PLUS 0x00000100
++/* 1280x1024 */
++#define SENSOR_RES_SXGA 0x00000200
++/* 1600x1200 */
++#define SENSOR_RES_UXGA 0x00000400
++/* 2048x1536 */
++#define SENSOR_RES_QXGA 0x00000800
++#define SENSOR_RES_QXGA_PLUS 0x00001000
++#define SENSOR_RES_RAWMAX 0x00002000
++/* 4080x1024 */
++#define SENSOR_RES_YUV_HMAX 0x00004000
++/* 1024x4080 */
++#define SENSOR_RES_YUV_VMAX 0x00008000
++#ifdef _DEBUG
++/* depends on further defines (TEST_SIZE_H and TEST_SIZE_V) */
++#define SENSOR_RES_TEST 0x00010000
++
++#define TEST_SIZE_H (2600)
++#define TEST_SIZE_V (2046)
++/* #define TEST_SIZE_V (1950) */
++
++/* #ifdef _DEBUG */
++#endif
++/* 720x480 */
++#define SENSOR_RES_L_AFM 0x00020000
++/* 128x96 */
++#define SENSOR_RES_M_AFM 0x00040000
++/* 64x32 */
++#define SENSOR_RES_S_AFM 0x00080000
++/* 352x240 */
++#define SENSOR_RES_BP1 0x00100000
++/* 2586x2048, quadruple SXGA, 5,3 Mpix */
++#define SENSOR_RES_QSXGA 0x00200000
++/* 2600x2048, max. resolution of M5, 5,32 Mpix */
++#define SENSOR_RES_QSXGA_PLUS 0x00400000
++/* 2600x1950 */
++#define SENSOR_RES_QSXGA_PLUS2 0x00800000
++/* 2686x2048, 5.30M */
++#define SENSOR_RES_QSXGA_PLUS3 0x01000000
++/* 3200x2048, 6.56M */
++#define SENSOR_RES_WQSXGA 0x02000000
++/* 3200x2400, 7.68M */
++#define SENSOR_RES_QUXGA 0x04000000
++/* 3840x2400, 9.22M */
++#define SENSOR_RES_WQUXGA 0x08000000
++/* 4096x3072, 12.59M */
++#define SENSOR_RES_HXGA 0x10000000
++
++/* 2592x1044 replace with SENSOR_RES_QXGA_PLUS */
++/*#define SENSOR_RES_QSXGA_PLUS4 0x10000000*/
++/* 1920x1080 */
++#define SENSOR_RES_1080P 0x20000000
++/* 1280x720 */
++#define SENSOR_RES_720P 0x40000000
++
++/* FIXME 1304x980*/
++#define SENSOR_RES_VGA_PLUS 0x80000000
++#define VGA_PLUS_SIZE_H (1304)
++#define VGA_PLUS_SIZE_V (980)
++
++#define QSXGA_PLUS4_SIZE_H (2592)
++#define QSXGA_PLUS4_SIZE_V (1944)
++#define RES_1080P_SIZE_H (1920)
++#define RES_1080P_SIZE_V (1080)
++#define RES_720P_SIZE_H (1280)
++#define RES_720P_SIZE_V (720)
++#define QQCIF_SIZE_H (88)
++#define QQCIF_SIZE_V (72)
++#define QQVGA_SIZE_H (160)
++#define QQVGA_SIZE_V (120)
++#define QCIF_SIZE_H (176)
++#define QCIF_SIZE_V (144)
++#define QVGA_SIZE_H (320)
++#define QVGA_SIZE_V (240)
++#define CIF_SIZE_H (352)
++#define CIF_SIZE_V (288)
++#define VGA_SIZE_H (640)
++#define VGA_SIZE_V (480)
++#define SVGA_SIZE_H (800)
++#define SVGA_SIZE_V (600)
++#define XGA_SIZE_H (1024)
++#define XGA_SIZE_V (768)
++#define XGA_PLUS_SIZE_H (1280)
++#define XGA_PLUS_SIZE_V (960)
++#define SXGA_SIZE_H (1280)
++#define SXGA_SIZE_V (1024)
++/* will be removed soon */
++#define QSVGA_SIZE_H (1600)
++/* will be removed soon */
++#define QSVGA_SIZE_V (1200)
++#define UXGA_SIZE_H (1600)
++#define UXGA_SIZE_V (1200)
++#define QXGA_SIZE_H (2048)
++#define QXGA_SIZE_V (1536)
++#define QXGA_PLUS_SIZE_H (2592)
++#define QXGA_PLUS_SIZE_V (1944)
++#define RAWMAX_SIZE_H (4096)
++#define RAWMAX_SIZE_V (2048)
++#define YUV_HMAX_SIZE_H (4080)
++#define YUV_HMAX_SIZE_V (1024)
++#define YUV_VMAX_SIZE_H (1024)
++#define YUV_VMAX_SIZE_V (4080)
++#define BP1_SIZE_H (352)
++#define BP1_SIZE_V (240)
++#define L_AFM_SIZE_H (720)
++#define L_AFM_SIZE_V (480)
++#define M_AFM_SIZE_H (128)
++#define M_AFM_SIZE_V (96)
++#define S_AFM_SIZE_H (64)
++#define S_AFM_SIZE_V (32)
++#define QSXGA_SIZE_H (2560)
++#define QSXGA_SIZE_V (2048)
++#define QSXGA_MINUS_SIZE_V (1920)
++#define QSXGA_PLUS_SIZE_H (2600)
++#define QSXGA_PLUS_SIZE_V (2048)
++#define QSXGA_PLUS2_SIZE_H (2600)
++#define QSXGA_PLUS2_SIZE_V (1950)
++#define QUXGA_SIZE_H (3200)
++#define QUXGA_SIZE_V (2400)
++#define SIZE_H_2500 (2500)
++#define QSXGA_PLUS3_SIZE_H (2686)
++#define QSXGA_PLUS3_SIZE_V (2048)
++#define QSXGA_PLUS4_SIZE_V (1944)
++#define WQSXGA_SIZE_H (3200)
++#define WQSXGA_SIZE_V (2048)
++#define WQUXGA_SIZE_H (3200)
++#define WQUXGA_SIZE_V (2400)
++#define HXGA_SIZE_H (4096)
++#define HXGA_SIZE_V (3072)
++
++/* ulBLC; */
++#define SENSOR_DWNSZ_SUBSMPL 0x00000001
++#define SENSOR_DWNSZ_SCAL_BAY 0x00000002
++#define SENSOR_DWNSZ_SCAL_COS 0x00000004
++
++/* Camera BlackLevelCorrection on */
++#define SENSOR_BLC_AUTO 0x00000001
++/* Camera BlackLevelCorrection off */
++#define SENSOR_BLC_OFF 0x00000002
++
++/* ulAGC; */
++
++/* Camera AutoGainControl on */
++#define SENSOR_AGC_AUTO 0x00000001
++/* Camera AutoGainControl off */
++#define SENSOR_AGC_OFF 0x00000002
++
++/* ulAWB; */
++
++/* Camera AutoWhiteBalance on */
++#define SENSOR_AWB_AUTO 0x00000001
++/* Camera AutoWhiteBalance off */
++#define SENSOR_AWB_OFF 0x00000002
++
++/* ulAEC; */
++
++/* Camera AutoExposureControl on */
++#define SENSOR_AEC_AUTO 0x00000001
++/* Camera AutoExposureControl off */
++#define SENSOR_AEC_OFF 0x00000002
++
++/* ulCieProfile; */
++#define ISI_AEC_MODE_STAND 0x00000001
++#define ISI_AEC_MODE_SLOW 0x00000002
++#define ISI_AEC_MODE_FAST 0x00000004
++#define ISI_AEC_MODE_NORMAL 0x00000008
++#define SENSOR_CIEPROF_A 0x00000001
++#define SENSOR_CIEPROF_B 0x00000002
++#define SENSOR_CIEPROF_C 0x00000004
++#define SENSOR_CIEPROF_D50 0x00000008
++#define SENSOR_CIEPROF_D55 0x00000010
++#define SENSOR_CIEPROF_D65 0x00000020
++#define SENSOR_CIEPROF_D75 0x00000040
++#define SENSOR_CIEPROF_E 0x00000080
++#define SENSOR_CIEPROF_FLUOR 0x00000100
++#define SENSOR_CIEPROF_FLUORH 0x00000200
++#define SENSOR_CIEPROF_TUNG 0x00000400
++#define SENSOR_CIEPROF_TWI 0x00000800
++#define SENSOR_CIEPROF_SUN 0x00001000
++#define SENSOR_CIEPROF_FLASH 0x00002000
++#define SENSOR_CIEPROF_SHADE 0x00004000
++#define SENSOR_CIEPROF_DAY 0x00008000
++#define SENSOR_CIEPROF_F1 0x00010000
++#define SENSOR_CIEPROF_F2 0x00020000
++#define SENSOR_CIEPROF_F3 0x00040000
++#define SENSOR_CIEPROF_F4 0x00080000
++#define SENSOR_CIEPROF_F5 0x00100000
++#define SENSOR_CIEPROF_F6 0x00200000
++#define SENSOR_CIEPROF_F7 0x00400000
++#define SENSOR_CIEPROF_F8 0x00800000
++#define SENSOR_CIEPROF_F9 0x01000000
++#define SENSOR_CIEPROF_F10 0x02000000
++#define SENSOR_CIEPROF_F11 0x04000000
++#define SENSOR_CIEPROF_F12 0x08000000
++#define SENSOR_CIEPROF_CLOUDY 0x10000000
++#define SENSOR_CIEPROF_SUNNY 0x20000000
++#define SENSOR_CIEPROF_OLDISS 0x80000000
++#define SENSOR_CIEPROF_DEFAULT 0x00000000
++
++/* ulFlickerFreq */
++
++/* no compensation for flickering environmental illumination */
++#define SENSOR_FLICKER_OFF 0x00000001
++/* compensation for 100Hz flicker frequency (at 50Hz mains frequency) */
++#define SENSOR_FLICKER_100 0x00000002
++/* compensation for 120Hz flicker frequency (at 60Hz mains frequency) */
++#define SENSOR_FLICKER_120 0x00000004
++
++/*
++ * sensor capabilities struct: a struct member may have 0, 1 or several bits
++ * set according to the capabilities of the sensor. All struct members must be
++ * unsigned int and no padding is allowed. Thus, access to the fields is also
++ * possible by means of a field of unsigned int values. Indicees for the
++ * field-like access are given below.
++ */
++struct ci_sensor_caps{
++ unsigned int bus_width;
++ unsigned int mode;
++ unsigned int field_inv;
++ unsigned int field_sel;
++ unsigned int ycseq;
++ unsigned int conv422;
++ unsigned int bpat;
++ unsigned int hpol;
++ unsigned int vpol;
++ unsigned int edge;
++ unsigned int bls;
++ unsigned int gamma;
++ unsigned int cconv;
++ unsigned int res;
++ unsigned int dwn_sz;
++ unsigned int blc;
++ unsigned int agc;
++ unsigned int awb;
++ unsigned int aec;
++ /* extention SENSOR version 2 */
++ unsigned int cie_profile;
++
++ /* extention SENSOR version 3 */
++ unsigned int flicker_freq;
++
++ /* extension SENSOR version 4 */
++ unsigned int smia_mode;
++ unsigned int mipi_mode;
++
++ /* Add name here to load shared library */
++ unsigned int type;
++ char name[32];
++
++ struct v4l2_subdev sd;
++};
++
++#define SENSOR_CAP_BUSWIDTH 0
++#define SENSOR_CAP_MODE 1
++#define SENSOR_CAP_FIELDINV 2
++#define SENSOR_CAP_FIELDSEL 3
++#define SENSOR_CAP_YCSEQ 4
++#define SENSOR_CAP_CONV422 5
++#define SENSOR_CAP_BPAT 6
++#define SENSOR_CAP_HPOL 7
++#define SENSOR_CAP_VPOL 8
++#define SENSOR_CAP_EDGE 9
++#define SENSOR_CAP_BLS 10
++#define SENSOR_CAP_GAMMA 11
++#define SENSOR_CAP_CCONF 12
++#define SENSOR_CAP_RES 13
++#define SENSOR_CAP_DWNSZ 14
++#define SENSOR_CAP_BLC 15
++#define SENSOR_CAP_AGC 16
++#define SENSOR_CAP_AWB 17
++#define SENSOR_CAP_AEC 18
++#define SENSOR_CAP_CIEPROFILE 19
++#define SENSOR_CAP_FLICKERFREQ 20
++#define SENSOR_CAP_SMIAMODE 21
++#define SENSOR_CAP_MIPIMODE 22
++#define SENSOR_CAP_AECMODE 23
++
++
++/* size of capabilities array (in number of unsigned int fields) */
++#define SENSOR_CAP_COUNT 24
++
++/*
++ * Sensor configuration struct: same layout as the capabilities struct, but to
++ * configure the sensor all struct members which are supported by the sensor
++ * must have only 1 bit set. Members which are not supported by the sensor
++ * must not have any bits set.
++ */
++#define ci_sensor_config ci_sensor_caps
++
++/* single parameter support */
++
++/* exposure time */
++#define SENSOR_PARM_EXPOSURE 0
++/* index in the AE control table */
++#define SENSOR_PARM_EXPTBL_INDEX 1
++
++/* gain */
++/* overall gain (all components) */
++#define SENSOR_PARM_GAIN 2
++/* component gain of the red pixels */
++#define SENSOR_PARM_CGAIN_R 3
++/* component gain of the green pixels */
++#define SENSOR_PARM_CGAIN_G 4
++/* component gain of the blue pixels */
++#define SENSOR_PARM_CGAIN_B 5
++/*
++ * component gain of the green pixels sharing a bayer line with the red ones
++ */
++#define SENSOR_PARM_CGAINB_GR 6
++/*
++ * component gain of the green pixels sharing a bayer line with the blue ones
++ */
++#define SENSOR_PARM_CGAINB_GB 7
++
++/* blacklevel */
++
++/* black-level adjustment (all components) */
++#define SENSOR_PARM_BLKL 8
++/* component black-level of the red pixels */
++#define SENSOR_PARM_CBLKL_R 9
++/* component black-level of the green pixels */
++#define SENSOR_PARM_CBLKL_G 10
++/* component black-level of the blue pixels */
++#define SENSOR_PARM_CBLKL_B 11
++/*
++ * component black-level of the green pixels sharing a bayer line with the red
++ * ones
++ */
++#define SENSOR_PARM_CBLKLB_GR 12
++/*
++ * component black-level of the green pixels sharing a bayer line with the
++ * blue ones
++ */
++#define SENSOR_PARM_CBLKLB_GB 13
++
++/* resolution & cropping */
++
++/* base resolution in pixel (X) */
++#define SENSOR_PARM_BASERES_X 14
++/* base resolution in pixel (Y) */
++#define SENSOR_PARM_BASERES_Y 15
++/* window top-left pixel (X) */
++#define SENSOR_PARM_WINDOW_X 16
++/* window top-left pixel (Y) */
++#define SENSOR_PARM_WINDOW_Y 17
++/* window width in pixel */
++#define SENSOR_PARM_WINDOW_W 18
++/* window height in pixel */
++#define SENSOR_PARM_WINDOW_H 19
++
++/* frame rate / clock */
++
++/*
++ * frame rate in frames per second, fixed point format, 16 bit fractional part
++ */
++#define SENSOR_PARM_FRAMERATE_FPS 20
++/* frame rate fine adjustment */
++#define SENSOR_PARM_FRAMERATE_PITCH 21
++/* clock divider setting */
++#define SENSOR_PARM_CLK_DIVIDER 22
++/* input clock in Hz. */
++#define SENSOR_PARM_CLK_INPUT 23
++/*
++ * output (pixel-) clock in Hz. Note that for e.g. YUV422-formats, 2 pixel
++ * clock cycles are needed per pixel
++ */
++#define SENSOR_PARM_CLK_PIXEL 24
++
++/* number of parameter IDs */
++
++#define SENSOR_PARM__COUNT 25
++
++/* bit description of the result of the IsiParmInfo routine */
++
++/* parameter can be retrieved from the sensor */
++#define SENSOR_PARMINFO_GET 0x00000001
++/* parameter can be set into the sensor */
++#define SENSOR_PARMINFO_SET 0x00000002
++/* parameter can change at any time during operation */
++#define SENSOR_PARMINFO_VOLATILE 0x00000004
++/* range information available for the parameter in question */
++#define SENSOR_PARMINFO_RANGE 0x00000008
++/* range of possible values is not continous. */
++#define SENSOR_PARMINFO_DISCRETE 0x00000010
++/* parameter may change after a configuration update. */
++#define SENSOR_PARMINFO_CONFIG 0x00000020
++/* range information may change after a configuration update. */
++#define SENSOR_PARMINFO_RCONFIG 0x00000040
++
++/* multi-camera support */
++#define SENSOR_UNKNOWN_SENSOR_ID (0)
++
++/* structure / type definitions */
++/*
++ * Input gamma correction curve for R, G or B of the sensor. Since this gamma
++ * curve is sensor specific, it will be deliveres by the sensors specific code.
++ * This curve will be programmed into Marvin registers.
++ */
++#define SENSOR_GAMMA_CURVE_ARR_SIZE (17)
++
++struct ci_sensor_gamma_curve{
++ unsigned short isp_gamma_y[SENSOR_GAMMA_CURVE_ARR_SIZE];
++
++ /* if three curves are given separately for RGB */
++ unsigned int gamma_dx0;
++
++ /* only the struct for R holds valid DX values */
++ unsigned int gamma_dx1;
++};
++
++/*
++ * SENSOR fixed point constant values They are represented as signed fixed point
++ * numbers with 12 bit integer and 20 bit fractional part, thus ranging from
++ * -2048.0000000 (0x80000000) to +2047.9999990 (0x7FFFFFFF). In the following
++ * some frequently used constant values are defined.
++ */
++/* - 0.794944 */
++#define SENSOR_FP_M0000_794944 (0xFFF347E9)
++/* - 0.500000 */
++#define SENSOR_FP_M0000_500000 (0xFFF80000)
++/* - 0.404473 */
++#define SENSOR_FP_M0000_404473 (0xFFF98748)
++/* - 0.062227 */
++#define SENSOR_FP_M0000_062227 (0xFFFF011F)
++/* - 0.024891 */
++#define SENSOR_FP_M0000_024891 (0xFFFF9A0C)
++
++/* 0.000000 */
++#define SENSOR_FP_P0000_000000 (0x00000000)
++
++/* + 0.500000 */
++#define SENSOR_FP_P0000_500000 (0x00080000)
++/* + 1.000000 */
++#define SENSOR_FP_P0001_000000 (0x00100000)
++/* + 1.163636 */
++#define SENSOR_FP_P0001_163636 (0x00129E40)
++/* + 1.600778 */
++#define SENSOR_FP_P0001_600778 (0x00199CC9)
++/* + 1.991249 */
++#define SENSOR_FP_P0001_991249 (0x001FDC27)
++/* + 16.000000 */
++#define SENSOR_FP_P0016_000000 (0x01000000)
++/* + 128.000000 */
++#define SENSOR_FP_P0128_000000 (0x08000000)
++/* + 255.000000 */
++#define SENSOR_FP_P0255_000000 (0x0FF00000)
++/* + 256.000000 */
++#define SENSOR_FP_P0256_000000 (0x10000000)
++
++/*
++ * Matrix coefficients used for CrossTalk and/or color conversion. The 9
++ * coefficients are laid out as follows (zero based index):
++ * 0 | 1 | 2
++ * 3 | 4 | 5
++ * 6 | 7 | 8
++ * They are represented as signed fixed point numbers with 12 bit integer and
++ * 20 bit fractional part, thus ranging from -2048.0000000 (0x80000000) to
++ * +2047.9999990 (0x7FFFFFFF).
++ */
++struct ci_sensor_3x3_matrix{
++ int coeff[9];
++};
++
++/*
++ * Matrix coefficients used for CrossTalk and/or color conversion. The 9
++ * coefficients are laid out as follows (zero based index):
++ * 0 | 1 | 2
++ * 3 | 4 | 5
++ * 6 | 7 | 8
++ * They are represented as float numbers
++ */
++struct ci_sensor_3x3_float_matrix{
++ float coeff[9];
++};
++
++struct ci_sensor_3x1_float_matrix{
++ float coeff[3];
++};
++
++struct ci_sensor_4x1_float_matrix{
++ float coeff[4];
++};
++
++struct ci_sensor_3x2_float_matrix{
++ float coeff[6];
++};
++
++struct ci_sensor_2x1_float_matrix{
++ float coeff[2];
++};
++
++struct ci_sensor_2x2_float_matrix{
++ float coeff[4];
++};
++
++struct ci_sensor_1x1_float_matrix{
++ float coeff[1];
++};
++
++struct ci_sensor_gauss_factor{
++ float gauss_factor;
++};
++
++struct isp_pca_values{
++ float pcac1;
++ float pcac2;
++};
++
++/*
++ * CrossTalk offset. In addition to the matrix multiplication an offset can be
++ * added to the pixel values for R, G and B separately. This offset is applied
++ * after the matrix multiplication. The values are arranged as unified, see
++ * above.
++ */
++struct ci_sensor_xtalk_offset{
++ int ct_offset_red;
++ int ct_offset_green;
++ int ct_offset_blue;
++};
++
++struct ci_sensor_xtalk_float_offset{
++ float ct_offset_red;
++ float ct_offset_green;
++ float ct_offset_blue;
++};
++
++/*
++ * white balancing gains There are two green gains: One for the green Bayer
++ * patterns in the red and one for the blue line. In the case the used MARVIN
++ * derivative is not able to apply separate green gains the mean value of both
++ * greens will be used for the green gain. The component gains are represented
++ * as signed fixed point numbers with 12 bit integer and 20 bit fractional
++ * part, thus ranging from -2048.0000000 (0x80000000) to +2047.9999990
++ * (0x7FFFFFFF). Example: +1.0 is represented by 0x00100000.
++ */
++struct ci_sensor_component_gain{
++ float red;
++ float green_r;
++ float green_b;
++ float blue;
++};
++
++/*
++ * white balance values, default is 0x80 for all components. The struct can be
++ * used to provide linear scaling factors to achive a suitable white balance
++ * for certain lightning conditions.
++ */
++struct ci_sensor_comp_gain{
++ float red;
++ float green;
++ float blue;
++};
++
++/*
++ * cross-talk matrix dependent minimum / maximum red and blue gains
++ */
++struct ci_sensor_component_gain_limits{
++ unsigned short red_lower_limit;
++ unsigned short red_upper_limit;
++ unsigned short blue_lower_limit;
++ unsigned short blue_upper_limit;
++ unsigned int next_cie_higher_temp;
++ unsigned int next_cie_lower_temp;
++};
++
++/*
++* sensor characteristic struct. Is filled in by sensor specific code after
++* main configuration. Features not supported by the sensor driver code
++* will be initialized with default values (1x linear gamma, standard
++* color conversion, cross talk and component gain settings).
++*/
++struct ci_sensor_awb_profile{
++
++ /*
++ * In the case that all 3 gamma curves are identically, just
++ * set all 3 pointers to the same address.
++ */
++
++ /* input gammaR */
++ const struct ci_sensor_gamma_curve *gamma_curve_r;
++
++ /* input gammaG */
++ const struct ci_sensor_gamma_curve *gamma_curve_g;
++
++ /* input gammaB */
++ const struct ci_sensor_gamma_curve *gamma_curve_b;
++
++ /* ColorConversion matrix coefficients */
++ const struct ci_sensor_3x3_float_matrix *color_conv_coeff;
++
++ /* CrossTalk matrix coefficients */
++ const struct ci_sensor_3x3_float_matrix *cross_talk_coeff;
++
++ /* CrossTalk offsets */
++ const struct ci_sensor_xtalk_float_offset *cross_talk_offset;
++ const struct ci_sensor_3x1_float_matrix *svd_mean_value;
++ const struct ci_sensor_3x2_float_matrix *pca_matrix;
++ const struct ci_sensor_2x1_float_matrix *gauss_mean_value;
++ const struct ci_sensor_2x2_float_matrix *covariance_matrix;
++ const struct ci_sensor_gauss_factor *gauss_factor;
++ const struct ci_sensor_2x1_float_matrix *threshold;
++ const struct ci_sensor_1x1_float_matrix *k_factor;
++ const struct ci_sensor_1x1_float_matrix *gexp_middle;
++ const struct ci_sensor_1x1_float_matrix *var_distr_in;
++ const struct ci_sensor_1x1_float_matrix *mean_distr_in;
++ const struct ci_sensor_1x1_float_matrix *var_distr_out;
++ const struct ci_sensor_1x1_float_matrix *mean_distr_out;
++ const struct ci_sensor_component_gain *component_gain;
++ const struct ci_sensor_loc_dist *loc_dist;
++
++};
++
++/*
++ * General purpose window. Normally it is used to describe a WOI (Window Of
++ * Interest) inside the background area (e.g. image data area). The offset
++ * values count from 0 of the background area. The defined point is the upper
++ * left corner of the WOI with the specified width and height.
++ */
++struct ci_sensor_window{
++ unsigned short hoffs;
++ unsigned short voffs;
++ unsigned short hsize;
++ unsigned short vsize;
++};
++
++/*
++ * Image data description. The frame size describes the complete image data
++ * area output of the sensor. This includes dummy, black, dark, visible and
++ * manufacturer specific pixels which could be combined in rows and / or in
++ * columns. The visible window describes the visible pixel area inside the
++ * image data area. In the case the image data area does only contain visible
++ * pixels, the offset values have to be 0 and the horizontal and vertical
++ * sizes are equal to the frame size.
++ */
++struct ci_sensor_image_data_info{
++ unsigned short frame_h_size;
++ unsigned short frame_v_size;
++ struct ci_sensor_window visible_window;
++};
++
++/* black level compensation mean values */
++struct ci_sensor_blc_mean{
++ unsigned char mean_a;
++ unsigned char mean_b;
++ unsigned char mean_c;
++ unsigned char mean_d;
++};
++
++/* autowhitebalance mean values */
++
++struct ci_sensor_awb_mean{
++#if 0
++ unsigned int white;
++ unsigned char mean_y;
++ unsigned char mean_cb;
++ unsigned char mean_cr;
++#else
++ unsigned int white;
++ unsigned char mean_Y__G;
++ unsigned char mean_cb__B;
++ unsigned char mean_cr__R;
++#endif
++};
++
++/* autowhitebalance mean values */
++
++struct ci_sensor_awb_float_mean{
++ unsigned int white;
++ float mean_y;
++ float mean_cb;
++ float mean_cr;
++};
++
++/* autoexposure mean values */
++
++struct ci_sensor_aec_mean{
++ unsigned char occ;
++ unsigned char mean;
++ unsigned char max;
++ unsigned char min;
++};
++
++/* bad pixel element attribute */
++
++enum ci_sensor_bp_corr_attr{
++
++ /* hot pixel */
++ SENSOR_BP_HOT,
++
++ /* dead pixel */
++ SENSOR_BP_DEAD
++};
++
++/* table element */
++
++struct ci_sensor_bp_table_elem{
++
++ /* Bad Pixel vertical address */
++ unsigned short bp_ver_addr;
++
++ /* Bad Pixel horizontal address */
++ unsigned short bp_hor_addr;
++
++ /* Bad pixel type (dead or hot) */
++ enum ci_sensor_bp_corr_attr bp_type;
++};
++
++/* Bad Pixel table */
++
++struct ci_sensor_bp_table{
++
++ /* Number of detected bad pixel */
++ unsigned int bp_number;
++
++ /* Pointer to BP Table */
++ struct ci_sensor_bp_table_elem *bp_table_elem;
++
++ /* Number of Table elements */
++ unsigned int bp_table_elem_num;
++};
++
++#define SENSOR_CTRL_TYPE_INTEGER 1
++#define SENSOR_CTRL_TYPE_BOOLEAN 2
++#define SENSOR_CTRL_TYPE_MENU 3
++#define SENSOR_CTRL_TYPE_BUTTON 4
++#define SENSOR_CTRL_TYPE_INTEGER64 5
++#define SENSOR_CTRL_TYPE_CTRL_CLASS 6
++
++#define SENSOR_CTRL_CLASS_USER 0x00980000
++#define SENSOR_CID_BASE (SENSOR_CTRL_CLASS_USER | 0x900)
++#define SENSOR_CID_USER_BASE SENSOR_CID_BASE
++/* IDs reserved for driver specific controls */
++#define SENSOR_CID_PRIVATE_BASE 0x08000000
++
++#define SENSOR_CID_USER_CLASS (SENSOR_CTRL_CLASS_USER | 1)
++#define SENSOR_CID_BRIGHTNESS (SENSOR_CID_BASE+0)
++#define SENSOR_CID_CONTRAST (SENSOR_CID_BASE+1)
++#define SENSOR_CID_SATURATION (SENSOR_CID_BASE+2)
++#define SENSOR_CID_HUE (SENSOR_CID_BASE+3)
++#define SENSOR_CID_AUDIO_VOLUME (SENSOR_CID_BASE+5)
++#define SENSOR_CID_AUDIO_BALANCE (SENSOR_CID_BASE+6)
++#define SENSOR_CID_AUDIO_BASS (SENSOR_CID_BASE+7)
++#define SENSOR_CID_AUDIO_TREBLE (SENSOR_CID_BASE+8)
++#define SENSOR_CID_AUDIO_MUTE (SENSOR_CID_BASE+9)
++#define SENSOR_CID_AUDIO_LOUDNESS (SENSOR_CID_BASE+10)
++#define SENSOR_CID_BLACK_LEVEL (SENSOR_CID_BASE+11)
++#define SENSOR_CID_AUTO_WHITE_BALANCE (SENSOR_CID_BASE+12)
++#define SENSOR_CID_DO_WHITE_BALANCE (SENSOR_CID_BASE+13)
++#define SENSOR_CID_RED_BALANCE (SENSOR_CID_BASE+14)
++#define SENSOR_CID_BLUE_BALANCE (SENSOR_CID_BASE+15)
++#define SENSOR_CID_GAMMA (SENSOR_CID_BASE+16)
++#define SENSOR_CID_WHITENESS (SENSOR_CID_GAMMA)
++#define SENSOR_CID_EXPOSURE (SENSOR_CID_BASE+17)
++#define SENSOR_CID_AUTOGAIN (SENSOR_CID_BASE+18)
++#define SENSOR_CID_GAIN (SENSOR_CID_BASE+19)
++#define SENSOR_CID_HFLIP (SENSOR_CID_BASE+20)
++#define SENSOR_CID_VFLIP (SENSOR_CID_BASE+21)
++#define SENSOR_CID_HCENTER (SENSOR_CID_BASE+22)
++#define SENSOR_CID_VCENTER (SENSOR_CID_BASE+23)
++#define SENSOR_CID_LASTP1 (SENSOR_CID_BASE+24)
++
++struct ci_sensor_parm{
++ unsigned int index;
++ int value;
++ int max;
++ int min;
++ int info;
++ int type;
++ char name[32];
++ int step;
++ int def_value;
++ int flags;
++};
++
++#define MRV_GRAD_TBL_SIZE 8
++#define MRV_DATA_TBL_SIZE 289
++struct ci_sensor_ls_corr_config{
++ /* correction values of R color part */
++ unsigned short ls_rdata_tbl[MRV_DATA_TBL_SIZE];
++ /* correction values of G color part */
++ unsigned short ls_gdata_tbl[MRV_DATA_TBL_SIZE];
++ /* correction values of B color part */
++ unsigned short ls_bdata_tbl[MRV_DATA_TBL_SIZE];
++ /* multiplication factors of x direction */
++ unsigned short ls_xgrad_tbl[MRV_GRAD_TBL_SIZE];
++ /* multiplication factors of y direction */
++ unsigned short ls_ygrad_tbl[MRV_GRAD_TBL_SIZE];
++ /* sector sizes of x direction */
++ unsigned short ls_xsize_tbl[MRV_GRAD_TBL_SIZE];
++ /* sector sizes of y direction */
++ unsigned short ls_ysize_tbl[MRV_GRAD_TBL_SIZE];
++};
++
++struct ci_sensor_reg{
++ unsigned int addr;
++ unsigned int value;
++};
++
++struct ci_sensor_loc_dist{
++ float pca1_low_temp;
++ float pca1_high_temp;
++ float locus_distance;
++ float a2;
++ float a1;
++ float a0;
++};
++
++static inline int ci_sensor_res2size(unsigned int res, unsigned short *h_size,
++ unsigned short *v_size)
++{
++ unsigned short hsize;
++ unsigned short vsize;
++ int err = 0;
++
++ switch (res) {
++ case SENSOR_RES_QQCIF:
++ hsize = QQCIF_SIZE_H;
++ vsize = QQCIF_SIZE_V;
++ break;
++ case SENSOR_RES_QQVGA:
++ hsize = QQVGA_SIZE_H;
++ vsize = QQVGA_SIZE_V;
++ break;
++ case SENSOR_RES_QCIF:
++ hsize = QCIF_SIZE_H;
++ vsize = QCIF_SIZE_V;
++ break;
++ case SENSOR_RES_QVGA:
++ hsize = QVGA_SIZE_H;
++ vsize = QVGA_SIZE_V;
++ break;
++ case SENSOR_RES_CIF:
++ hsize = CIF_SIZE_H;
++ vsize = CIF_SIZE_V;
++ break;
++ case SENSOR_RES_VGA:
++ hsize = VGA_SIZE_H;
++ vsize = VGA_SIZE_V;
++ break;
++ case SENSOR_RES_SVGA:
++ hsize = SVGA_SIZE_H;
++ vsize = SVGA_SIZE_V;
++ break;
++ case SENSOR_RES_XGA:
++ hsize = XGA_SIZE_H;
++ vsize = XGA_SIZE_V;
++ break;
++ case SENSOR_RES_XGA_PLUS:
++ hsize = XGA_PLUS_SIZE_H;
++ vsize = XGA_PLUS_SIZE_V;
++ break;
++ case SENSOR_RES_SXGA:
++ hsize = SXGA_SIZE_H;
++ vsize = SXGA_SIZE_V;
++ break;
++ case SENSOR_RES_UXGA:
++ hsize = UXGA_SIZE_H;
++ vsize = UXGA_SIZE_V;
++ break;
++ case SENSOR_RES_QXGA:
++ hsize = QXGA_SIZE_H;
++ vsize = QXGA_SIZE_V;
++ break;
++ case SENSOR_RES_QSXGA:
++ hsize = QSXGA_SIZE_H;
++ vsize = QSXGA_SIZE_V;
++ break;
++ case SENSOR_RES_QSXGA_PLUS:
++ hsize = QSXGA_PLUS_SIZE_H;
++ vsize = QSXGA_PLUS_SIZE_V;
++ break;
++ case SENSOR_RES_QSXGA_PLUS2:
++ hsize = QSXGA_PLUS2_SIZE_H;
++ vsize = QSXGA_PLUS2_SIZE_V;
++ break;
++ case SENSOR_RES_QSXGA_PLUS3:
++ hsize = QSXGA_PLUS3_SIZE_H;
++ vsize = QSXGA_PLUS3_SIZE_V;
++ break;
++ case SENSOR_RES_WQSXGA:
++ hsize = WQSXGA_SIZE_H;
++ vsize = WQSXGA_SIZE_V;
++ break;
++ case SENSOR_RES_QUXGA:
++ hsize = QUXGA_SIZE_H;
++ vsize = QUXGA_SIZE_V;
++ break;
++ case SENSOR_RES_WQUXGA:
++ hsize = WQUXGA_SIZE_H;
++ vsize = WQUXGA_SIZE_V;
++ break;
++ case SENSOR_RES_HXGA:
++ hsize = HXGA_SIZE_H;
++ vsize = HXGA_SIZE_V;
++ break;
++ case SENSOR_RES_RAWMAX:
++ hsize = RAWMAX_SIZE_H;
++ vsize = RAWMAX_SIZE_V;
++ break;
++ case SENSOR_RES_YUV_HMAX:
++ hsize = YUV_HMAX_SIZE_H;
++ vsize = YUV_HMAX_SIZE_V;
++ break;
++ case SENSOR_RES_YUV_VMAX:
++ hsize = YUV_VMAX_SIZE_H;
++ vsize = YUV_VMAX_SIZE_V;
++ break;
++ case SENSOR_RES_BP1:
++ hsize = BP1_SIZE_H;
++ vsize = BP1_SIZE_V;
++ break;
++ case SENSOR_RES_L_AFM:
++ hsize = L_AFM_SIZE_H;
++ vsize = L_AFM_SIZE_V;
++ break;
++ case SENSOR_RES_M_AFM:
++ hsize = M_AFM_SIZE_H;
++ vsize = M_AFM_SIZE_V;
++ break;
++ case SENSOR_RES_S_AFM:
++ hsize = S_AFM_SIZE_H;
++ vsize = S_AFM_SIZE_V;
++ break;
++
++ case SENSOR_RES_QXGA_PLUS:
++ hsize = QXGA_PLUS_SIZE_H;
++ vsize = QXGA_PLUS_SIZE_V;
++ break;
++
++ case SENSOR_RES_1080P:
++ hsize = RES_1080P_SIZE_H;
++ vsize = 1080;
++ break;
++
++ case SENSOR_RES_720P:
++ hsize = RES_720P_SIZE_H;
++ vsize = RES_720P_SIZE_V;
++ break;
++
++ case SENSOR_RES_VGA_PLUS:
++ hsize = VGA_PLUS_SIZE_H;
++ vsize = VGA_PLUS_SIZE_V;
++ break;
++
++ default:
++ hsize = 0;
++ vsize = 0;
++ err = -1;
++ printk(KERN_ERR "ci_sensor_res2size: Resolution 0x%08x"
++ "unknown\n", res);
++ break;
++ }
++
++ if (h_size != NULL)
++ *h_size = hsize;
++ if (v_size != NULL)
++ *v_size = vsize;
++
++ return err;
++}
++#endif
+--- /dev/null
++++ b/drivers/staging/mrstci/include/ci_va.h
+@@ -0,0 +1,42 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * Copyright (c) Silicon Image 2008 www.siliconimage.com
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++/* for buffer sharing between CI and VA */
++#ifndef _CI_VA_H
++#define _CI_VA_H
++
++struct ci_frame_info {
++ unsigned long frame_id; /* in */
++ unsigned int width; /* out */
++ unsigned int height; /* out */
++ unsigned int stride; /* out */
++ unsigned int fourcc; /* out */
++ unsigned int offset; /* out */
++};
++
++#define ISP_IOCTL_GET_FRAME_INFO _IOWR('V', 192 + 5, struct ci_frame_info)
++
++#endif
++
+--- /dev/null
++++ b/drivers/staging/mrstci/include/v4l2_jpg_review.h
+@@ -0,0 +1,48 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#ifndef __V4L2_JPG_REVIEW_EXT_H
++#define __V4L2_JPG_REVIEW_EXT_H
++
++#include <linux/videodev2.h>
++
++/*
++ * Moorestown JPG image auto review structure and IOCTL.
++ */
++struct v4l2_jpg_review_buffer{
++ __u32 width; /* in: frame width */
++ __u32 height; /* in: frame height */
++ __u32 pix_fmt; /* in: frame fourcc */
++ __u32 jpg_frame; /* in: corresponding jpg frame id */
++ __u32 bytesperline; /* out: 0 if not used */
++ __u32 frame_size; /* out: frame size */
++ __u32 offset; /* out: mmap offset */
++};
++
++#define BASE_VIDIOC_PRIVATE_JPG_REVIEW (BASE_VIDIOC_PRIVATE + 10)
++
++#define VIDIOC_CREATE_JPG_REVIEW_BUF _IOWR('V', \
++ BASE_VIDIOC_PRIVATE_JPG_REVIEW + 1, \
++ struct v4l2_jpg_review_buffer)
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrstci/mrstflash/Kconfig
+@@ -0,0 +1,9 @@
++config VIDEO_MRST_FLASH
++ tristate "Moorestown flash"
++ depends on I2C && VIDEO_MRST_ISP
++
++ ---help---
++ Say Y here if your platform support moorestown flash.
++
++ To compile this driver as a module, choose M here: the
++ module will be called mrstov2650.ko.
+--- /dev/null
++++ b/drivers/staging/mrstci/mrstflash/Makefile
+@@ -0,0 +1,3 @@
++obj-$(CONFIG_VIDEO_MRST_FLASH) += mrstflash.o
++
++EXTRA_CFLAGS += -I$(src)/../include
+--- /dev/null
++++ b/drivers/staging/mrstci/mrstflash/mrstflash.c
+@@ -0,0 +1,168 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging camera flash.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/string.h>
++#include <linux/errno.h>
++#include <linux/init.h>
++#include <linux/kmod.h>
++#include <linux/device.h>
++#include <linux/delay.h>
++#include <linux/fs.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/delay.h>
++#include <linux/i2c.h>
++#include <linux/gpio.h>
++#include <linux/videodev2.h>
++#include <media/v4l2-device.h>
++#include <media/v4l2-chip-ident.h>
++#include <media/v4l2-i2c-drv.h>
++#include <asm/mrst.h>
++
++static int debug;
++module_param(debug, bool, 0644);
++MODULE_PARM_DESC(debug, "Debug level (0-1)");
++
++MODULE_AUTHOR("Xiaolin Zhang <xiaolin.zhang@intel.com>");
++MODULE_DESCRIPTION("A low-level driver for mrst flash");
++MODULE_LICENSE("GPL");
++
++static int flash_g_chip_ident(struct v4l2_subdev *sd,
++ struct v4l2_dbg_chip_ident *chip)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++
++#define V4L2_IDENT_MRST_FLASH 8248
++ return v4l2_chip_ident_i2c_client(client, chip,
++ V4L2_IDENT_MRST_FLASH, 0);
++}
++
++static const struct v4l2_subdev_core_ops flash_core_ops = {
++ .g_chip_ident = flash_g_chip_ident,
++};
++static const struct v4l2_subdev_ops flash_ops = {
++ .core = &flash_core_ops,
++};
++
++static int flash_detect(struct i2c_client *client)
++{
++ struct i2c_adapter *adapter = client->adapter;
++ u8 pid;
++ u8 piddata[2] = {0x10, 0x18};
++
++ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
++ return -ENODEV;
++
++ if ((adapter->nr != 1) && (mrst_platform_id() == MRST_PLATFORM_AAVA_SC))
++ return -ENODEV;
++ else if (adapter->nr != 0)
++ return -ENODEV;
++
++ if (mrst_platform_id() == MRST_PLATFORM_AAVA_SC) {
++ piddata[0] = 0x1A;
++ piddata[1] = 0x11;
++ }
++
++ pid = i2c_smbus_read_byte_data(client, piddata[0]);
++ if (pid == piddata[1]) {
++ printk(KERN_ERR "camera flash device found\n");
++ v4l_dbg(1, debug, client, "found camera flash device");
++ } else {
++ printk(KERN_ERR "no camera flash device found\n");
++ return -ENODEV;
++ }
++
++ return 0;
++}
++
++static int flash_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ u8 pid, ver;
++ int ret = -1;
++ struct v4l2_subdev *sd;
++
++ v4l_info(client, "chip found @ 0x%x (%s)\n",
++ client->addr << 1, client->adapter->name);
++
++ sd = kzalloc(sizeof(struct v4l2_subdev), GFP_KERNEL);
++ ret = flash_detect(client);
++ if (ret)
++ return -ENODEV;
++
++ v4l2_i2c_subdev_init(sd, client, &flash_ops);
++
++ if (mrst_platform_id() == MRST_PLATFORM_AAVA_SC) {
++ /* Init MAX 8834 */
++ i2c_smbus_write_byte_data(client, 0x00, 0x70);
++ i2c_smbus_write_byte_data(client, 0x01, 0x80);
++ i2c_smbus_write_byte_data(client, 0x02, 0x80);
++ i2c_smbus_write_byte_data(client, 0x03, 0x00);
++ i2c_smbus_write_byte_data(client, 0x09, 0x7);
++ } else {
++ ver = i2c_smbus_read_byte_data(client, 0x50);
++ v4l_dbg(1, debug, client, "detect:CST from device is 0x%x", ver);
++ pid = i2c_smbus_read_byte_data(client, 0x20);
++ v4l_dbg(1, debug, client, "detect:MFPC from device is 0x%x", pid);
++ pid = i2c_smbus_read_byte_data(client, 0xA0);
++ v4l_dbg(1, debug, client, "detect:TCC from device is 0x%x", pid);
++ pid = i2c_smbus_read_byte_data(client, 0xB0);
++ v4l_dbg(1, debug, client, "detect:FCC from device is 0x%x", pid);
++ pid = i2c_smbus_read_byte_data(client, 0xC0);
++ v4l_dbg(1, debug, client, "detect:FDC from device is 0x%x", pid);
++ i2c_smbus_write_byte_data(client, 0xc0, 0xff); /*set FST to 1000us*/
++ pid = i2c_smbus_read_byte_data(client, 0xc0);
++ v4l_dbg(1, debug, client, "FDC from device is 0x%x", pid);
++ }
++
++ v4l_dbg(1, debug, client,
++ "successfully load camera flash device driver");
++ return 0;
++}
++
++static int flash_remove(struct i2c_client *client)
++{
++ struct v4l2_subdev *sd = i2c_get_clientdata(client);
++
++ v4l2_device_unregister_subdev(sd);
++
++ return 0;
++}
++
++static const struct i2c_device_id flash_id[] = {
++ {"mrst_camera_flash", 0},
++ {}
++};
++
++MODULE_DEVICE_TABLE(i2c, flash_id);
++
++static struct v4l2_i2c_driver_data v4l2_i2c_data = {
++ .name = "mrst_camera_flash",
++ .probe = flash_probe,
++ .remove = flash_remove,
++ .id_table = flash_id,
++};
+--- /dev/null
++++ b/drivers/staging/mrstci/mrstisp/Kconfig
+@@ -0,0 +1,10 @@
++config VIDEO_MRST_ISP
++ tristate "Moorstown Marvin - ISP Driver"
++ depends on VIDEO_V4L2
++ select VIDEOBUF_DMA_CONTIG
++ default y
++ ---help---
++ Say Y here if you want support for cameras based on the Intel Moorestown platform.
++
++ To compile this driver as a module, choose M here: the
++ module will be called mrstisp.ko.
+--- /dev/null
++++ b/drivers/staging/mrstci/mrstisp/Makefile
+@@ -0,0 +1,7 @@
++mrstisp-objs := mrstisp_main.o mrstisp_hw.o mrstisp_isp.o \
++ mrstisp_dp.o mrstisp_mif.o mrstisp_jpe.o \
++ __mrstisp_private_ioctl.o
++
++obj-$(CONFIG_VIDEO_MRST_ISP) += mrstisp.o
++
++EXTRA_CFLAGS += -I$(src)/../include -I$(src)/include
+--- /dev/null
++++ b/drivers/staging/mrstci/mrstisp/__mrstisp_private_ioctl.c
+@@ -0,0 +1,324 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#include "mrstisp_stdinc.h"
++
++/*
++static u32 copy_sensor_config_from_user(struct ci_sensor_config *des,
++ struct ci_sensor_config *src)
++{
++ u32 ret = 0;
++ ret = copy_from_user((void *)des, (const void *)src,
++ sizeof(struct ci_sensor_config));
++ if (ret)
++ return -EFAULT;
++ return ret;
++}
++
++static u32 copy_sensor_caps_from_user(struct ci_sensor_caps *des,
++ struct ci_sensor_caps *src)
++{
++ u32 ret = 0;
++ ret = copy_from_user((void *)des, (const void *)src,
++ sizeof(struct ci_sensor_caps));
++ if (ret)
++ return -EFAULT;
++ return ret;
++}
++
++static u32 copy_isp_config_from_user(struct ci_isp_config *des,
++ struct ci_isp_config *src)
++{
++ int ret = 0;
++ ret = copy_from_user((void *)des, (const void *)src,
++ sizeof(struct ci_isp_config));
++ if (ret) {
++ eprintk("returning %d", ret);
++ return ret;
++ }
++ return 0;
++}
++*/
++
++static void print_bls_cfg(struct ci_isp_config *isp_cfg)
++{
++ struct ci_isp_bls_config *bls_cfg = &isp_cfg->bls_cfg;
++
++ dprintk(4, "print_bls_cfg:");
++ dprintk(4, "enable_automatic:%d", (bls_cfg->enable_automatic ? 1 : 0));
++ dprintk(4, "disable_h:%d", (bls_cfg->disable_h ? 1 : 0));
++ dprintk(4, "disable_v:%d", (bls_cfg->disable_v ? 1 : 0));
++ dprintk(4, "enable_window1:%d",
++ (bls_cfg->isp_bls_window1.enable_window ? 1 : 0));
++ dprintk(4, "start_h:%d", (int)bls_cfg->isp_bls_window1.start_h);
++ dprintk(4, "stop_h:%d", (int)bls_cfg->isp_bls_window1.stop_h);
++ dprintk(4, "start_v:%d", (int)bls_cfg->isp_bls_window1.start_v);
++ dprintk(4, "stop_v:%d", (int)bls_cfg->isp_bls_window1.stop_v);
++ dprintk(4, "enable_window2: %d",
++ (bls_cfg->isp_bls_window2.enable_window ? 1 : 0));
++ dprintk(4, "start_h%d", (int)bls_cfg->isp_bls_window2.start_h);
++ dprintk(4, "stop_h%d", (int)bls_cfg->isp_bls_window2.stop_h);
++ dprintk(4, "start_v%d", (int)bls_cfg->isp_bls_window2.start_v);
++ dprintk(4, "stop_v%d", (int)bls_cfg->isp_bls_window2.stop_v);
++ dprintk(4, "bls_samples%d", (int)bls_cfg->bls_samples);
++ dprintk(4, "fixed_a0x%02x", (int)bls_cfg->bls_subtraction.fixed_a);
++ dprintk(4, "fixed_b0x%02x", (int)bls_cfg->bls_subtraction.fixed_b);
++ dprintk(4, "fixed_c0x%02x", (int)bls_cfg->bls_subtraction.fixed_c);
++ dprintk(4, "fixed_d0x%02x", (int)bls_cfg->bls_subtraction.fixed_d);
++ dprintk(4, "\n");
++}
++
++static int mrst_isp_set_cfg(struct file *file, void *priv,
++ struct ci_pl_system_config *arg)
++{
++ struct video_device *dev = video_devdata(file);
++ struct mrst_isp_device *isp = video_get_drvdata(dev);
++
++ WARN_ON(priv != file->private_data);
++
++ DBG_entering;
++
++ if (arg == NULL) {
++ eprintk("NULL pointer of arg");
++ return 0;
++ }
++ mutex_lock(&isp->mutex);
++
++ /*
++ if (arg->isi_config != NULL) {
++ dprintk(2, "sync isi cfg");
++ copy_sensor_config_from_user(isp->sys_conf.isi_config,
++ arg->isi_config);
++ } else {
++ eprintk("NULL arg->isi_config");
++ ret = CI_STATUS_NULL_POINTER;
++ goto exit_unlock;
++ }
++
++ if (arg->isi_caps != NULL) {
++ dprintk(2, "sync isi caps");
++ copy_sensor_caps_from_user(isp->sys_conf.isi_caps,
++ arg->isi_caps);
++ } else {
++ eprintk("NULL arg->isi_caps");
++ ret = CI_STATUS_NULL_POINTER;
++ goto exit_unlock;
++ }
++ */
++
++ memcpy(&isp->sys_conf.isp_cfg, &arg->isp_cfg,
++ sizeof(struct ci_isp_config));
++
++ print_bls_cfg(&isp->sys_conf.isp_cfg);
++
++ dprintk(2, "gammagamma2 = %d", arg->isp_cfg.flags.gamma2);
++ dprintk(2, "gammagamma2 now = %d", isp->sys_conf.isp_cfg.flags.gamma2);
++ mutex_unlock(&isp->mutex);
++
++ isp->sys_conf.isp_hal_enable = 1;
++
++ DBG_leaving;
++ return 0;
++}
++
++/* for buffer sharing between CI and VA */
++static int mrst_isp_get_frame_info(struct file *file, void *priv,
++ struct ci_frame_info *arg)
++{
++ struct video_device *dev = video_devdata(file);
++ struct mrst_isp_device *isp = video_get_drvdata(dev);
++
++ WARN_ON(priv != file->private_data);
++
++ DBG_entering;
++
++ mutex_lock(&isp->mutex);
++
++ arg->width = isp->bufwidth;
++ arg->height = isp->bufheight;
++ arg->fourcc = isp->pixelformat;
++ arg->stride = isp->bufwidth; /* should be 64 bit alignment*/
++ arg->offset = arg->frame_id * PAGE_ALIGN(isp->frame_size);
++#if 0
++ if (isp->bufwidth == 640 && isp->bufheight == 480)
++ arg->offset = arg->frame_id * 0x71000;
++ else if (isp->bufwidth == 1280 && isp->bufheight == 720)
++ arg->offset = arg->frame_id * 0x152000;
++#endif
++
++
++ dprintk(2, "w=%d, h=%d, 4cc =%x, stride=%d, offset=%d,fsize=%d",
++ arg->width, arg->height, arg->fourcc, arg->stride,
++ arg->offset, isp->frame_size);
++
++ mutex_unlock(&isp->mutex);
++
++ DBG_leaving;
++ return 0;
++}
++
++static int mrst_isp_set_jpg_enc_ratio(struct file *file, void *priv, int *arg)
++{
++ struct video_device *dev = video_devdata(file);
++ struct mrst_isp_device *isp = video_get_drvdata(dev);
++
++ WARN_ON(priv != file->private_data);
++
++ DBG_entering;
++
++ dprintk(2, "set jpg compression ratio is %d", *arg);
++
++ mutex_lock(&isp->mutex);
++ isp->sys_conf.isp_cfg.jpeg_enc_ratio = *arg;
++ mutex_unlock(&isp->mutex);
++
++ DBG_leaving;
++ return 0;
++}
++
++int mrst_isp_get_isp_mem_info(struct file *file, void *priv,
++ struct ci_isp_mem_info *arg)
++{
++ u32 ret = 0;
++ struct video_device *dev = video_devdata(file);
++ struct mrst_isp_device *isp = video_get_drvdata(dev);
++
++ WARN_ON(priv != file->private_data);
++
++ DBG_entering;
++
++ mutex_lock(&isp->mutex);
++ arg->isp_bar0_pa = isp->mb0;
++ arg->isp_bar0_size = isp->mb0_size;
++ arg->isp_bar1_pa = isp->mb1;
++ arg->isp_bar1_size = isp->mb1_size;
++ mutex_unlock(&isp->mutex);
++
++ DBG_leaving;
++ return ret;
++}
++
++int mrst_isp_create_jpg_review_frame(struct file *file, void *priv,
++ struct v4l2_jpg_review_buffer *arg)
++{
++ struct video_device *dev = video_devdata(file);
++ struct mrst_isp_device *isp = video_get_drvdata(dev);
++
++ u32 width = arg->width;
++ u32 height = arg->height;
++ u32 pix_fmt = arg->pix_fmt;
++ u32 jpg_frame = arg->jpg_frame;
++
++ static struct v4l2_jpg_review_buffer *jpg_review;
++
++ jpg_review = &isp->sys_conf.jpg_review;
++
++ WARN_ON(priv != file->private_data);
++
++ DBG_entering;
++
++ if (width > 640 || height > 480 || width < 32 || height < 16) {
++ eprintk("unsupported resolution: %d * %d", width, height);
++ return -EINVAL;
++ }
++
++ if (jpg_frame >= isp->num_frames) {
++ eprintk("error jpeg frame id");
++ return -1;
++ }
++
++ jpg_review->width = width;
++ jpg_review->height = height;
++ jpg_review->pix_fmt = pix_fmt;
++ jpg_review->jpg_frame = jpg_frame;
++
++ switch (arg->pix_fmt) {
++ case V4L2_PIX_FMT_YUV422P:
++ jpg_review->bytesperline = width * 2;
++ jpg_review->frame_size = width * height * 2;
++ break;
++ case V4L2_PIX_FMT_YUV420:
++ case V4L2_PIX_FMT_YVU420:
++ case V4L2_PIX_FMT_NV12:
++ jpg_review->bytesperline = width * 3/2;
++ jpg_review->frame_size = width * height * 3/2;
++ break;
++ default:
++ eprintk("unsupported pix_fmt: %d", arg->pix_fmt);
++ return -EINVAL;
++ }
++
++ jpg_review->offset = isp->mb1_size - 640*480*2;
++
++ isp->sys_conf.jpg_review_enable = 1; /* enable jpg review flag */
++
++ /* set user space data */
++ arg->bytesperline = jpg_review->bytesperline;
++ arg->frame_size = jpg_review->frame_size;
++ arg->offset = jpg_review->offset;
++
++ dprintk(1, "create jpg review frame successfully: "
++ "bytesperline = %d, frame_size = %d,"
++ " offset = %d\n", arg->bytesperline,
++ arg->frame_size, arg->offset);
++
++ DBG_leaving;
++ return 0;
++}
++
++/* isp private ioctl for libci */
++long mrst_isp_vidioc_default(struct file *file, void *fh,
++ int cmd, void *arg)
++{
++ void *priv = file->private_data;
++
++ DBG_entering;
++
++ switch (cmd) {
++ case VIDIOC_GET_ISP_MEM_INFO:
++ return mrst_isp_get_isp_mem_info(file, priv,
++ (struct ci_isp_mem_info *)arg);
++
++ case VIDIOC_SET_SYS_CFG:
++ return mrst_isp_set_cfg(file, priv,
++ (struct ci_pl_system_config *)arg);
++
++ case VIDIOC_SET_JPG_ENC_RATIO:
++ return mrst_isp_set_jpg_enc_ratio(file, priv, (int *)arg);
++
++ case ISP_IOCTL_GET_FRAME_INFO:
++ return mrst_isp_get_frame_info(file, priv,
++ (struct ci_frame_info *)arg);
++
++ case VIDIOC_CREATE_JPG_REVIEW_BUF:
++ return mrst_isp_create_jpg_review_frame(file, priv,
++ (struct v4l2_jpg_review_buffer *)arg);
++ default:
++ v4l_print_ioctl("lnw_isp", cmd);
++ dprintk(2, "VIDIOC_SET_SYS_CFG = %x", VIDIOC_SET_SYS_CFG);
++ return -EINVAL;
++ }
++
++ DBG_leaving;
++ return 0;
++}
+--- /dev/null
++++ b/drivers/staging/mrstci/mrstisp/include/def.h
+@@ -0,0 +1,122 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * Copyright (c) Silicon Image 2008 www.siliconimage.com
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#ifndef _DEF_H
++#define _DEF_H
++
++#include <linux/stddef.h>
++
++#ifndef ON
++/* all bits to '1' but prevent "shift overflow" warning */
++#define ON -1
++#endif
++#ifndef OFF
++#define OFF 0
++#endif
++
++#ifndef ENABLE
++/* all bits to '1' but prevent "shift overflow" warning */
++#define ENABLE -1
++#endif
++#ifndef DISABLE
++#define DISABLE 0
++#endif
++
++/* this is crop flag, to enable crop, define it to be 1*/
++#define crop_flag 0
++
++/* this has to be 0, if clauses rely on it */
++#define CI_STATUS_SUCCESS 0
++/* general failure */
++#define CI_STATUS_FAILURE 1
++/* feature not supported */
++#define CI_STATUS_NOTSUPP 2
++/* there's already something going on... */
++#define CI_STATUS_BUSY 3
++/* operation canceled */
++#define CI_STATUS_CANCELED 4
++/* out of memory */
++#define CI_STATUS_OUTOFMEM 5
++/* parameter/value out of range */
++#define CI_STATUS_OUTOFRANGE 6
++/* feature/subsystem is in idle state */
++#define CI_STATUS_IDLE 7
++/* handle is wrong */
++#define CI_STATUS_WRONG_HANDLE 8
++/* the/one/all parameter(s) is a(are) NULL pointer(s) */
++#define CI_STATUS_NULL_POINTER 9
++/* profile not available */
++#define CI_STATUS_NOTAVAILABLE 10
++
++#ifndef UNUSED_PARAM
++#define UNUSED_PARAM(x) ((x) = (x))
++#endif
++
++/* to avoid Lint warnings, use it within const context */
++
++#ifndef UNUSED_PARAM1
++#define UNUSED_PARAM1(x)
++#endif
++
++/*
++ * documentation keywords for pointer arguments, to tell the direction of the
++ * passing
++ */
++
++#ifndef OUT
++/* pointer content is expected to be filled by called function */
++#define OUT
++#endif
++#ifndef IN
++/* pointer content contains parameters from the caller */
++#define IN
++#endif
++#ifndef INOUT
++/* content is expected to be read and changed */
++#define INOUT
++#endif
++
++/* some useful macros */
++
++#ifndef MIN
++#define MIN(x, y) ((x) < (y) ? (x) : (y))
++#endif
++
++#ifndef MAX
++#define MAX(x, y) ((x) > (y) ? (x) : (y))
++#endif
++
++#ifndef ABS
++#define ABS(val) ((val) < 0 ? -(val) : (val))
++#endif
++
++/*
++ * converts a term to a string (two macros are required, never use _VAL2STR()
++ * directly)
++ */
++#define _VAL2STR(x) #x
++#define VAL2STR(x) _VAL2STR(x)
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrstci/mrstisp/include/mrstisp.h
+@@ -0,0 +1,279 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#ifndef _MRSTISP_H
++#define _MRSTISP_H
++
++#define INTEL_MAJ_VER 0
++#define INTEL_MIN_VER 5
++#define INTEL_PATCH_VER 0
++#define DRIVER_NAME "lnw isp"
++#define VID_HARDWARE_INTEL 100
++
++#define INTEL_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
++
++#define MRST_ISP_REG_MEMORY_MAP 0xFF0E0000
++
++/* self path maximum width/height, VGA */
++#define INTEL_MAX_WIDTH 640
++#define INTEL_MAX_HEIGHT 480
++
++#define INTEL_MIN_WIDTH 32
++#define INTEL_MIN_HEIGHT 16
++
++/* main path maximum widh/height, 5M */
++#define INTEL_MAX_WIDTH_MP 2600
++#define INTEL_MAX_HEIGHT_MP 2048
++
++/* image size returned by the driver */
++#define INTEL_IMAGE_WIDTH 640
++#define INTEL_IMAGE_HEIGHT 480
++
++/* Default capture queue buffers. */
++#define INTEL_CAPTURE_BUFFERS 3
++
++/* Default capture buffer size. */
++#define INTEL_CAPTURE_BUFSIZE PAGE_ALIGN(INTEL_MAX_WIDTH * INTEL_MAX_HEIGHT * 2)
++#define INTEL_IMAGE_BUFSIEZE (INTEL_IMAGE_WIDTH * INTEL_IMAGE_HEIGHT * 2)
++
++#define MAX_KMALLOC_MEM (4*1024*1024)
++
++#define MEM_SNAPSHOT_MAX_SIZE (1*1024*1024)
++
++#include <media/v4l2-device.h>
++
++enum frame_state {
++ S_UNUSED = 0, /* unused */
++ S_QUEUED, /* ready to capture */
++ S_GRABBING, /* in the process of being captured */
++ S_DONE, /* finished grabbing, but not been synced yet */
++ S_ERROR, /* something bad happened while capturing */
++};
++
++struct frame_info {
++ enum frame_state state;
++ u32 flags;
++};
++
++struct fifo {
++ int front;
++ int back;
++ int data[INTEL_CAPTURE_BUFFERS + 1];
++ struct frame_info info[INTEL_CAPTURE_BUFFERS + 1];
++};
++
++enum mrst_isp_state {
++ S_NOTREADY, /* Not yet initialized */
++ S_IDLE, /* Just hanging around */
++ S_FLAKED, /* Some sort of problem */
++ S_STREAMING /* Streaming data */
++};
++
++struct mrst_isp_buffer {
++ struct videobuf_buffer vb;
++ int fmt_useless;
++};
++
++struct mrst_isp_device {
++ struct v4l2_device v4l2_dev;
++ /* v4l2 device handler */
++ struct video_device *vdev;
++
++ /* locks this structure */
++ struct mutex mutex;
++
++ /* if the port is open or not */
++ int open;
++
++ /* pci information */
++ struct pci_dev *pci_dev;
++ unsigned long mb0;
++ unsigned long mb0_size;
++ unsigned char *regs;
++ unsigned long mb1;
++ unsigned long mb1_size;
++ unsigned char *mb1_va;
++ unsigned short vendorID;
++ unsigned short deviceID;
++ unsigned char revision;
++
++ /* subdev */
++ struct v4l2_subdev *sensor_soc;
++ int sensor_soc_index;
++ struct v4l2_subdev *sensor_raw;
++ int sensor_raw_index;
++ struct v4l2_subdev *sensor_curr;
++ struct v4l2_subdev *motor;
++ struct v4l2_subdev *flash;
++ struct i2c_adapter *adapter_sensor;
++ struct i2c_adapter *adapter_flash;
++
++ int streaming;
++ int buffer_required;
++
++ /* interrupt */
++ unsigned char int_enable;
++ unsigned long int_flag;
++ unsigned long interrupt_count;
++
++ /* frame management */
++
++ /* allocated memory for km_mmap */
++ char *fbuffer;
++
++ /* virtual address of cap buf */
++ char *capbuf;
++
++ /* physcial address of cap buf */
++ u32 capbuf_pa;
++
++ struct fifo frame_queue;
++
++ /* current capture frame number */
++ int cap_frame;
++ /* total frames */
++ int num_frames;
++
++ u32 field_count;
++ u32 pixelformat;
++ u16 depth;
++ u32 bufwidth;
++ u32 bufheight;
++ u32 frame_size;
++ u32 frame_size_used;
++
++
++ enum mrst_isp_state state;
++
++ /* active mappings*/
++ int vmas;
++
++ /* isp system configuration */
++ struct ci_pl_system_config sys_conf;
++
++ struct completion jpe_complete;
++ struct completion mi_complete;
++ int irq_stat;
++
++ spinlock_t lock;
++ spinlock_t qlock;
++ struct videobuf_buffer *active;
++ struct videobuf_buffer *next;
++ struct list_head capture;
++ u32 streambufs;
++ u32 stopbuf;
++ u32 stopflag;
++};
++
++struct mrst_isp_fh {
++ struct mrst_isp_device *dev;
++ struct videobuf_queue vb_q;
++ u32 qbuf_flag;
++};
++
++/* viewfinder mode mask */
++#define VFFLAG_MODE_MASK 0x0000000F
++/*
++ * play on complete LCD, but do not use upscaler
++ * or small camera resolutions, the picture will be
++ * played in the upper left corner)
++ */
++#define VFFLAG_MODE_FULLLCD_DSONLY 0x00000000
++/* display on complete LCD, use upscaler if necessary */
++#define VFFLAG_MODE_FULLLCD_USDS 0x00000001
++/* display full camera picture with black borders on top and bottom */
++#define VFFLAG_MODE_LETTERBOX 0x00000002
++/* use the values given by the user (x, y, w, h, keep_aspect) */
++#define VFFLAG_MODE_USER 0x00000003
++/* hardware RGB conversion */
++#define VFFLAG_HWRGB 0x00000010
++/* horizontal mirror */
++#define VFFLAG_MIRROR 0x00000020
++/* use the main path for viewfinding too. */
++#define VFFLAG_USE_MAINPATH 0x00000040
++/* vertical flipping (mirror) (MARVIN_FEATURE_MI_V3) */
++#define VFFLAG_V_FLIP 0x00000100
++/* rotation 90 degree counterclockwise (left) (MARVIN_FEATURE_MI_V3) */
++#define VFFLAG_ROT90_CCW 0x00000200
++
++/* abbreviations for local debug control ( level | module ) */
++#define DERR (DBG_ERR | DBG_MRV)
++#define DWARN (DBG_WARN | DBG_MRV)
++#define DINFO (DBG_INFO | DBG_MRV)
++
++struct ci_isp_rect {
++ /* zero based x coordinate of the upper left edge of the
++ * rectangle (in pixels)
++ */
++ int x;
++ /* zero based y coordinate of the upper left edge of the
++ * rectangle (in pixels)
++ */
++ int y;
++ /* width of the rectangle in pixels */
++ int w;
++ /* height of the rectangle in pixels */
++ int h;
++};
++
++/* the address/size of one region */
++struct ci_frame_region {
++ unsigned char *phy_addr;
++ unsigned int size;
++};
++
++struct ci_frame_addr {
++ /*
++ * how many regions of the frame, a region is
++ * pages with contiguous physical address
++ */
++ int num_of_regs;
++ struct ci_frame_region *regs;
++};
++
++/* type in mrst_camer*/
++#define MRST_CAMERA_NONE -1
++#define MRST_CAMERA_SOC 0
++#define MRST_CAMERA_RAW 1
++
++struct mrst_camera {
++ int type;
++ char *name;
++ u8 sensor_addr;
++ char *motor_name;
++ u8 motor_addr;
++};
++
++#define MRST_I2C_BUS_FLASH 0
++#define MRST_I2C_BUS_SENSOR 1
++
++long mrst_isp_vidioc_default(struct file *file, void *fh,
++ int cmd, void *arg);
++
++void mrst_timer_start(void);
++
++void mrst_timer_stop(void);
++
++unsigned long mrst_get_micro_sec(void);
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrstci/mrstisp/include/mrstisp_dp.h
+@@ -0,0 +1,317 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * Copyright (c) Silicon Image 2008 www.siliconimage.com
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++
++#ifndef _MRV_SLS_H
++#define _MRV_SLS_H
++
++/*
++ * simplified datapath and output formatter/resizer adjustment
++ * can be used to setup the main and self datapathes in a convenient way.
++ */
++
++/* data path descriptor */
++struct ci_isp_datapath_desc {
++ /* width of output picture (after scaling) in pixels */
++ u16 out_w;
++ /* height of output picture (after scaling) in pixels */
++ u16 out_h;
++ /* how to configure the datapath. An or'ed combination of the */
++ u32 flags;
++ /* MRV_DPD_xxx defines */
++};
++
++/*
++ * possible Frags for the Datapath descriptor general features
++ */
++
++/* disables the datapath */
++#define CI_ISP_DPD_DISABLE 0x00000000
++/* enables the datapath in general */
++#define CI_ISP_DPD_ENABLE 0x00000001
++/*
++ * the out_w and out_h members will be ignored. and the
++ * resize module of the datapath is switched off. Note that
++ * the resize module is also needed for croma subsampling
++ */
++#define CI_ISP_DPD_NORESIZE 0x00000002
++/*
++ * The input picture from ISP is being cropped to match the
++ * aspect ratio of the desired output. If this flag is not
++ * set, different scaling factors for X and Y axis
++ * may be used.
++ */
++#define CI_ISP_DPD_KEEPRATIO 0x00000004
++/* mirror the output picture (only applicable for self path) data path mode */
++#define CI_ISP_DPD_MIRROR 0x00000008
++/* mode mask (3 bits) */
++#define CI_ISP_DPD_MODE_MASK 0x00000070
++/* 16(12) bit raw data from ISP block (only applicable for main path) */
++#define CI_ISP_DPD_MODE_ISPRAW_16B 0x00000000
++/* separated Y, Cb and Cr data from ISP block */
++#define CI_ISP_DPD_MODE_ISPYC 0x00000010
++/* raw data from ISP block (only applicable for main path) */
++#define CI_ISP_DPD_MODE_ISPRAW 0x00000020
++/* Jpeg encoding with data from ISP block (only applicable for main path) */
++#define CI_ISP_DPD_MODE_ISPJPEG 0x00000030
++/*
++ * YCbCr data from system memory directly routed to the main/self
++ * path (DMA-read, only applicable for self path)
++ */
++#define CI_ISP_DPD_MODE_DMAYC_DIRECT 0x00000040
++/*
++ * YCbCr data from system memory routed through the main processing
++ * chain substituting ISP data (DMA-read)
++ */
++#define CI_ISP_DPD_MODE_DMAYC_ISP 0x00000050
++/*
++ * YCbCr data from system memory directly routed to the jpeg encoder
++ * (DMA-read, R2B-bufferless encoding, only applicable for main path)
++ */
++#define CI_ISP_DPD_MODE_DMAJPEG_DIRECT 0x00000060
++/*
++ * Jpeg encoding with YCbCr data from system memory routed through the
++ * main processing chain substituting ISP data (DMA-read, only applicable
++ * for main path) top blackline support
++ */
++#define CI_ISP_DPD_MODE_DMAJPEG_ISP 0x00000070
++
++/*
++ * If set, blacklines at the top of the sensor are
++ * shown in the output (if there are any). Note that this
++ * will move the window of interest out of the center
++ * to the upper border, so especially at configurations
++ * with digital zoom, the field of sight is not centered
++ * on the optical axis anymore. If the sensor does not deliver
++ * blacklines, setting this bit has no effect.
++ * additional chroma subsampling (CSS) amount and sample position
++ */
++#define CI_ISP_DPD_BLACKLINES_TOP 0x00000080
++/* horizontal subsampling */
++#define CI_ISP_DPD_CSS_H_MASK 0x00000700
++/* no horizontal subsampling */
++#define CI_ISP_DPD_CSS_H_OFF 0x00000000
++/* horizontal subsampling by 2 */
++#define CI_ISP_DPD_CSS_H2 0x00000100
++/* horizontal subsampling by 4 */
++#define CI_ISP_DPD_CSS_H4 0x00000200
++/* 2 times horizontal upsampling */
++#define CI_ISP_DPD_CSS_HUP2 0x00000500
++/* 4 times horizontal upsampling */
++#define CI_ISP_DPD_CSS_HUP4 0x00000600
++/* vertical subsampling */
++#define CI_ISP_DPD_CSS_V_MASK 0x00003800
++/* no vertical subsampling */
++#define CI_ISP_DPD_CSS_V_OFF 0x00000000
++/* vertical subsampling by 2 */
++#define CI_ISP_DPD_CSS_V2 0x00000800
++/* vertical subsampling by 4 */
++#define CI_ISP_DPD_CSS_V4 0x00001000
++/* 2 times vertical upsampling */
++#define CI_ISP_DPD_CSS_VUP2 0x00002800
++/* 4 times vertical upsampling */
++#define CI_ISP_DPD_CSS_VUP4 0x00003000
++/* apply horizontal chroma phase shift by half the sample distance */
++#define CI_ISP_DPD_CSS_HSHIFT 0x00004000
++/* apply vertical chroma phase shift by half the sample distance */
++#define CI_ISP_DPD_CSS_VSHIFT 0x00008000
++
++/*
++ * Hardware RGB conversion (currly, only supported for self path)
++ * output mode mask (3 bits, not all combination used yet)
++ */
++#define CI_ISP_DPD_HWRGB_MASK 0x00070000
++/* no rgb conversion */
++#define CI_ISP_DPD_HWRGB_OFF 0x00000000
++/* conversion to RGB565 */
++#define CI_ISP_DPD_HWRGB_565 0x00010000
++/* conversion to RGB666 */
++#define CI_ISP_DPD_HWRGB_666 0x00020000
++/* conversion to RGB888 */
++#define CI_ISP_DPD_HWRGB_888 0x00030000
++
++#define CI_ISP_DPD_YUV_420 0x00040000
++#define CI_ISP_DPD_YUV_422 0x00050000
++#define CI_ISP_DPD_YUV_NV12 0x00060000
++#define CI_ISP_DPD_YUV_YUYV 0x00070000
++/*
++ * DMA-read feature input format. (depends on chip derivative if
++ * supported for both pathes, self or not at all)
++ */
++
++/* input mode mask (2 bits) */
++#define CI_ISP_DPD_DMA_IN_MASK 0x00180000
++/* input is YCbCr 422 */
++#define CI_ISP_DPD_DMA_IN_422 0x00000000
++/* input is YCbCr 444 */
++#define CI_ISP_DPD_DMA_IN_444 0x00080000
++/* input is YCbCr 420 */
++#define CI_ISP_DPD_DMA_IN_420 0x00100000
++/* input is YCbCr 411 */
++#define CI_ISP_DPD_DMA_IN_411 0x00180000
++
++/*
++ * Upscaling interpolation mode (tells how newly created pixels
++ * will be interpolated from the existing ones)
++ * Upscaling interpolation mode mask (2 bits, not all combinations
++ * used yet)
++ */
++#define CI_ISP_DPD_UPSCALE_MASK 0x00600000
++/* smooth edges, linear interpolation */
++#define CI_ISP_DPD_UPSCALE_SMOOTH_LIN 0x00000000
++/*
++ * sharp edges, no interpolation, just duplicate pixels, creates
++ * the typical 'blocky' effect.
++ */
++#define CI_ISP_DPD_UPSCALE_SHARP 0x00200000
++
++/*
++ * additional luminance phase shift
++ * apply horizontal luminance phase shift by half the sample distance
++ */
++#define CI_ISP_DPD_LUMA_HSHIFT 0x00800000
++/* apply vertical luminance phase shift by half the sample distance */
++#define CI_ISP_DPD_LUMA_VSHIFT 0x01000000
++
++/*
++ * picture flipping and rotation
++ * Note that when combining the flags, the rotation is applied first.
++ * This enables to configure all 8 possible orientations
++ */
++
++/* horizontal flipping - same as mirroring */
++#define CI_ISP_DPD_H_FLIP CI_ISP_DPD_MIRROR
++/* vertical flipping */
++#define CI_ISP_DPD_V_FLIP 0x02000000
++/* rotation 90 degrees counter-clockwise */
++#define CI_ISP_DPD_90DEG_CCW 0x04000000
++
++/*
++ * switch to differentiate between full range of values for YCbCr (0-255)
++ * and restricted range (16-235 for Y) (16-240 for CbCr)'
++ * if set leads to unrestricted range (0-255) for YCbCr
++ * package length of a system interface transfer
++ */
++#define CI_ISP_DPD_YCBCREXT 0x10000000
++/* burst mask (2 bits) */
++#define CI_ISP_DPD_BURST_MASK 0x60000000
++/* AHB 4 beat burst */
++#define CI_ISP_DPD_BURST_4 0x00000000
++/* AHB 8 beat burst */
++#define CI_ISP_DPD_BURST_8 0x20000000
++/* AHB 16 beat burst */
++#define CI_ISP_DPD_BURST_16 0x40000000
++
++/* configures main and self datapathes and scaler for data coming from the
++ * ISP */
++
++
++int ci_datapath_isp(const struct ci_pl_system_config *sys_conf,
++ const struct ci_sensor_config *isi_config,
++ const struct ci_isp_datapath_desc *main,
++ const struct ci_isp_datapath_desc *self, int zoom);
++
++
++/*
++ * Coordinate transformations: The pixel data coming from the sensor passes
++ * through the ISP output formatter where they may be cropped and through
++ * the main path scaler where they may be stretched and/or squeezed. Thus,
++ * the coordinate systems of input and output are different, but somewhat
++ * related. Further, we can do digital zoom, which adds a third coordinate
++ * system: the virtual input (e.g. a cropped sensor frame zoomed in to the
++ * full sensor frame size. Following routines are intended to transform
++ * pixel resp. window positions from one coordinate systen to another.
++ * Folloin coordinate systems exist: Cam : original frame coming from the
++ * camera VCam : virtual camera; a system in which a cropped original
++ * camera frame is up-scaled to the camera frame size. If no digital zoom
++ * is to be done, Cam and VCam are identical. Main : output of main path
++ * Self : output of self path
++ */
++/* coordinate transformation from (real) camera coordinate system to main
++ * path output */
++int ci_transform_cam2_main(
++ const struct ci_isp_window *wnd_in,
++ struct ci_isp_window *wnd_out
++);
++/* coordinate transformation from (real) camera coordinate system to self
++ * path output */
++int ci_transform_cam2_self(
++ const struct ci_isp_window *wnd_in,
++ struct ci_isp_window *wnd_out
++);
++/* coordinate transformation from virtual camera to real camera coordinate
++ * system */
++void ci_transform_vcam2_cam(
++ const struct ci_sensor_config *isi_sensor_config,
++ const struct ci_isp_window *wnd_in,
++ struct ci_isp_window *wnd_out
++);
++
++/*
++ * Still image snapshot support
++ * The routine re-configures the main path for taking the snapshot. On
++ * successful return, the snapshot has been stored in the given memory
++ * location. Note that the settings of MARVIN will not be restored.
++ */
++
++/*
++ * take the desired snapshot. The type of snapshot (YUV, RAW or JPEG) is
++ * determined by the datapath selection bits in ci_isp_datapath_desc::flags.
++ * Note that the MARVIN configuration may be changed but will not be
++ * restored after the snapshot.
++ */
++int ci_do_snapshot(
++ const struct ci_sensor_config *isi_sensor_config,
++ const struct ci_isp_datapath_desc *main,
++ int zoom,
++ u8 jpeg_compression,
++ struct ci_isp_mi_path_conf *isp_mi_path_conf
++);
++
++
++/* Initialization of the Bad Pixel Detection and Correction */
++int ci_bp_init(
++ const struct ci_isp_bp_corr_config *bp_corr_config,
++ const struct ci_isp_bp_det_config *bp_det_config
++);
++/* Bad Pixel Correction */
++int ci_bp_correction(void);
++/* Disable Bad Pixel Correction and dectection */
++int ci_bp_end(const struct ci_isp_bp_corr_config *bp_corr_config);
++
++/* Capture a whole JPEG snapshot */
++u32 ci_jpe_capture(struct mrst_isp_device *intel,
++ enum ci_isp_conf_update_time update_time);
++int ci_jpe_encode(struct mrst_isp_device *intel,
++ enum ci_isp_conf_update_time update_time,
++ enum ci_isp_jpe_enc_mode mrv_jpe_encMode);
++/* Encode motion JPEG */
++int ci_isp_jpe_enc_motion(enum ci_isp_jpe_enc_mode jpe_enc_mode,
++ u16 frames_num, u32 *byte_count);
++
++void ci_isp_set_yc_mode(void);
++
++/* _MRV_SLS_H */
++#endif
+--- /dev/null
++++ b/drivers/staging/mrstci/mrstisp/include/mrstisp_hw.h
+@@ -0,0 +1,245 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * Copyright (c) Silicon Image 2008 www.siliconimage.com
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++
++#ifndef _MRV_H
++#define _MRV_H
++
++/* move structure definination to ci_isp_common.h */
++#include "ci_isp_common.h"
++
++/*
++ * FUNCTIONS
++ */
++
++/* sensor struct related functions */
++int ci_isp_bp_write_table(
++ const struct ci_sensor_bp_table *bp_table
++);
++
++int ci_isp_bp_read_table(struct ci_sensor_bp_table *bp_table);
++
++enum ci_isp_path ci_isp_select_path(
++ const struct ci_sensor_config *isi_cfg,
++ u8 *words_per_pixel
++);
++
++int ci_isp_set_input_aquisition(
++ const struct ci_sensor_config *isi_cfg
++);
++
++void ci_isp_set_gamma(
++ const struct ci_sensor_gamma_curve *r,
++ const struct ci_sensor_gamma_curve *g,
++ const struct ci_sensor_gamma_curve *b
++);
++
++int ci_isp_get_wb_meas(struct ci_sensor_awb_mean *awb_mean);
++
++int ci_isp_set_bp_correction(
++ const struct ci_isp_bp_corr_config *bp_corr_config
++);
++
++int ci_isp_set_bp_detection(
++ const struct ci_isp_bp_det_config *bp_det_config
++);
++
++
++int ci_isp_clear_bp_int(void);
++
++u32 ci_isp_get_frame_end_irq_mask_dma(void);
++
++u32 ci_isp_get_frame_end_irq_mask_isp(void);
++int ci_isp_wait_for_frame_end(struct mrst_isp_device *intel);
++
++void ci_isp_set_output_formatter(
++ const struct ci_isp_window *window,
++ enum ci_isp_conf_update_time update_time
++);
++
++int ci_isp_is_set_config(const struct ci_isp_is_config *is_config);
++
++int ci_isp_set_data_path(
++ enum ci_isp_ycs_chn_mode ycs_chn_mode,
++ enum ci_isp_dp_switch dp_switch
++);
++
++void ci_isp_res_set_main_resize(const struct ci_isp_scale *scale,
++ enum ci_isp_conf_update_time update_time,
++ const struct ci_isp_rsz_lut *rsz_lut
++);
++
++void ci_isp_res_get_main_resize(struct ci_isp_scale *scale);
++
++void ci_isp_res_set_self_resize(const struct ci_isp_scale *scale,
++ enum ci_isp_conf_update_time update_time,
++ const struct ci_isp_rsz_lut *rsz_lut
++);
++
++void ci_isp_res_get_self_resize(struct ci_isp_scale *scale);
++
++int ci_isp_mif_set_main_buffer(
++ const struct ci_isp_mi_path_conf *mrv_mi_path_conf,
++ enum ci_isp_conf_update_time update_time
++);
++
++int ci_isp_mif_set_self_buffer(
++ const struct ci_isp_mi_path_conf *mrv_mi_path_conf,
++ enum ci_isp_conf_update_time update_time
++);
++
++int ci_isp_mif_set_dma_buffer(
++ const struct ci_isp_mi_path_conf *mrv_mi_path_conf
++);
++
++void ci_isp_mif_disable_all_paths(int perform_wait_for_frame_end);
++
++int ci_isp_mif_get_main_buffer(
++ struct ci_isp_mi_path_conf *mrv_mi_path_conf
++);
++
++int ci_isp_mif_get_self_buffer(
++ struct ci_isp_mi_path_conf *mrv_mi_path_conf
++);
++
++int ci_isp_mif_set_path_and_orientation(
++ const struct ci_isp_mi_ctrl *mrv_mi_ctrl
++);
++
++int ci_isp_mif_get_path_and_orientation(
++ struct ci_isp_mi_ctrl *mrv_mi_ctrl
++);
++
++int ci_isp_mif_set_configuration(
++ const struct ci_isp_mi_ctrl *mrv_mi_ctrl,
++ const struct ci_isp_mi_path_conf *mrv_mi_mp_path_conf,
++ const struct ci_isp_mi_path_conf *mrv_mi_sp_path_conf,
++ const struct ci_isp_mi_dma_conf *mrv_mi_dma_conf
++);
++
++int ci_isp_mif_set_dma_config(
++ const struct ci_isp_mi_dma_conf *mrv_mi_dma_conf
++);
++
++int ci_isp_mif_get_pixel_per32_bit_of_line(
++ u8 *pixel_per32_bit,
++ enum ci_isp_mif_col_format mrv_mif_sp_format,
++ enum ci_isp_mif_pic_form mrv_mif_pic_form,
++ int luminance_buffer
++);
++
++void ci_isp_set_ext_ycmode(void);
++
++int ci_isp_set_mipi_smia(u32 mode);
++
++void ci_isp_sml_out_set_path(enum ci_isp_data_path main_path);
++
++void ci_isp_set_dma_read_mode(
++ enum ci_isp_dma_read_mode mode,
++ enum ci_isp_conf_update_time update_time
++);
++
++u32 ci_isp_mif_get_byte_cnt(void);
++
++void ci_isp_start(
++ u16 number_of_frames,
++ enum ci_isp_conf_update_time update_time
++);
++
++int ci_isp_jpe_init_ex(
++ u16 hsize,
++ u16 vsize,
++ u8 compression_ratio,
++ u8 jpe_scale
++);
++
++void ci_isp_reset_interrupt_status(void);
++
++void ci_isp_get_output_formatter(struct ci_isp_window *window);
++
++int ci_isp_set_auto_focus(const struct ci_isp_af_config *af_config);
++
++void ci_isp_get_auto_focus_meas(struct ci_isp_af_meas *af_meas);
++
++int ci_isp_chk_bp_int_stat(void);
++
++int ci_isp_bls_get_measured_values(
++ struct ci_isp_bls_measured *bls_measured
++);
++
++int ci_isp_get_wb_measConfig(
++ struct ci_isp_wb_meas_config *wb_meas_config
++);
++
++void ci_isp_col_set_color_processing(
++ const struct ci_isp_color_settings *col
++);
++
++int ci_isp_ie_set_config(const struct ci_isp_ie_config *ie_config);
++
++int ci_isp_set_ls_correction(struct ci_sensor_ls_corr_config *ls_corr_config);
++
++int ci_isp_ls_correction_on_off(int ls_corr_on_off);
++
++int ci_isp_activate_filter(int activate_filter);
++
++int ci_isp_set_filter_params(u8 noise_reduc_level, u8 sharp_level);
++
++int ci_isp_bls_set_config(const struct ci_isp_bls_config *bls_config);
++
++int ci_isp_set_wb_mode(enum ci_isp_awb_mode wb_mode);
++
++int ci_isp_set_wb_meas_config(
++ const struct ci_isp_wb_meas_config *wb_meas_config
++);
++
++int ci_isp_set_wb_auto_hw_config(
++ const struct ci_isp_wb_auto_hw_config *wb_auto_hw_config
++);
++
++void ci_isp_init(void);
++void ci_isp_off(void);
++
++void ci_isp_stop(enum ci_isp_conf_update_time update_time);
++
++void ci_isp_mif_reset_offsets(enum ci_isp_conf_update_time update_time);
++
++int ci_isp_get_wb_measConfig(
++ struct ci_isp_wb_meas_config *wb_meas_config
++);
++
++void ci_isp_set_gamma2(const struct ci_isp_gamma_out_curve *gamma);
++
++void ci_isp_set_demosaic(
++ enum ci_isp_demosaic_mode demosaic_mode,
++ u8 demosaic_th
++);
++
++void mrst_isp_disable_interrupt(struct mrst_isp_device *isp);
++
++void mrst_isp_enable_interrupt(struct mrst_isp_device *isp);
++
++/* #ifndef _MRV_H */
++#endif
+--- /dev/null
++++ b/drivers/staging/mrstci/mrstisp/include/mrstisp_isp.h
+@@ -0,0 +1,42 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * Copyright (c) Silicon Image 2008 www.siliconimage.com
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++#define MRV_MEAN_LUMA_ARR_SIZE_COL 5
++#define MRV_MEAN_LUMA_ARR_SIZE_ROW 5
++#define MRV_MEAN_LUMA_ARR_SIZE \
++ (MRV_MEAN_LUMA_ARR_SIZE_COL*MRV_MEAN_LUMA_ARR_SIZE_ROW)
++int ci_isp_meas_exposure_initialize_module(void);
++
++int ci_isp_meas_exposure_set_config(const struct ci_isp_window *wnd,
++ const struct ci_isp_exp_ctrl *isp_exp_ctrl);
++int ci_isp_meas_exposure_get_config(struct ci_isp_window *wnd,
++ struct ci_isp_exp_ctrl *isp_exp_ctrl);
++
++int ci_isp_meas_exposure_get_mean_luma_values(
++ struct ci_isp_mean_luma *mrv_mean_luma);
++int ci_isp_meas_exposure_get_mean_luma_by_num(
++ u8 BlockNum, u8 *luma);
++int ci_isp_meas_exposure_get_mean_luma_by_pos(
++ u8 XPos, u8 YPos, u8 *luma);
++int mrst_isp_set_color_conversion_ex(void);
+--- /dev/null
++++ b/drivers/staging/mrstci/mrstisp/include/mrstisp_jpe.h
+@@ -0,0 +1,426 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * Copyright (c) Silicon Image 2008 www.siliconimage.com
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#include "mrstisp.h"
++
++/* DC luma table according to ISO/IEC 10918-1 annex K */
++static const u8 ci_isp_dc_luma_table_annex_k[] = {
++ 0x00, 0x01, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01,
++ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
++ 0x08, 0x09, 0x0a, 0x0b
++};
++
++/* DC chroma table according to ISO/IEC 10918-1 annex K */
++static const u8 ci_isp_dc_chroma_table_annex_k[] = {
++ 0x00, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
++ 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
++ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
++ 0x08, 0x09, 0x0a, 0x0b
++};
++
++/* AC luma table according to ISO/IEC 10918-1 annex K */
++static const u8 ci_isp_ac_luma_table_annex_k[] = {
++ 0x00, 0x02, 0x01, 0x03, 0x03, 0x02, 0x04, 0x03,
++ 0x05, 0x05, 0x04, 0x04, 0x00, 0x00, 0x01, 0x7d,
++ 0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12,
++ 0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07,
++ 0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xa1, 0x08,
++ 0x23, 0x42, 0xb1, 0xc1, 0x15, 0x52, 0xd1, 0xf0,
++ 0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0a, 0x16,
++ 0x17, 0x18, 0x19, 0x1a, 0x25, 0x26, 0x27, 0x28,
++ 0x29, 0x2a, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39,
++ 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49,
++ 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59,
++ 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69,
++ 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79,
++ 0x7a, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89,
++ 0x8a, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98,
++ 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
++ 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6,
++ 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3, 0xc4, 0xc5,
++ 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2, 0xd3, 0xd4,
++ 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xe1, 0xe2,
++ 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea,
++ 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8,
++ 0xf9, 0xfa
++};
++
++/* AC Chroma table according to ISO/IEC 10918-1 annex K */
++static const u8 ci_isp_ac_chroma_table_annex_k[] = {
++ 0x00, 0x02, 0x01, 0x02, 0x04, 0x04, 0x03, 0x04,
++ 0x07, 0x05, 0x04, 0x04, 0x00, 0x01, 0x02, 0x77,
++ 0x00, 0x01, 0x02, 0x03, 0x11, 0x04, 0x05, 0x21,
++ 0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71,
++ 0x13, 0x22, 0x32, 0x81, 0x08, 0x14, 0x42, 0x91,
++ 0xa1, 0xb1, 0xc1, 0x09, 0x23, 0x33, 0x52, 0xf0,
++ 0x15, 0x62, 0x72, 0xd1, 0x0a, 0x16, 0x24, 0x34,
++ 0xe1, 0x25, 0xf1, 0x17, 0x18, 0x19, 0x1a, 0x26,
++ 0x27, 0x28, 0x29, 0x2a, 0x35, 0x36, 0x37, 0x38,
++ 0x39, 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48,
++ 0x49, 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
++ 0x59, 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68,
++ 0x69, 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
++ 0x79, 0x7a, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
++ 0x88, 0x89, 0x8a, 0x92, 0x93, 0x94, 0x95, 0x96,
++ 0x97, 0x98, 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5,
++ 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4,
++ 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3,
++ 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2,
++ 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda,
++ 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9,
++ 0xea, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8,
++ 0xf9, 0xfa
++};
++
++/* luma quantization table 75% quality setting */
++static const u8 ci_isp_yq_table75_per_cent[] = {
++ 0x08, 0x06, 0x06, 0x07, 0x06, 0x05, 0x08, 0x07,
++ 0x07, 0x07, 0x09, 0x09, 0x08, 0x0a, 0x0c, 0x14,
++ 0x0d, 0x0c, 0x0b, 0x0b, 0x0c, 0x19, 0x12, 0x13,
++ 0x0f, 0x14, 0x1d, 0x1a, 0x1f, 0x1e, 0x1d, 0x1a,
++ 0x1c, 0x1c, 0x20, 0x24, 0x2e, 0x27, 0x20, 0x22,
++ 0x2c, 0x23, 0x1c, 0x1c, 0x28, 0x37, 0x29, 0x2c,
++ 0x30, 0x31, 0x34, 0x34, 0x34, 0x1f, 0x27, 0x39,
++ 0x3d, 0x38, 0x32, 0x3c, 0x2e, 0x33, 0x34, 0x32
++};
++
++/* chroma quantization table 75% quality setting */
++static const u8 ci_isp_uv_qtable75_per_cent[] = {
++ 0x09, 0x09, 0x09, 0x0c, 0x0b, 0x0c, 0x18, 0x0d,
++ 0x0d, 0x18, 0x32, 0x21, 0x1c, 0x21, 0x32, 0x32,
++ 0x32, 0x32, 0x32, 0x32, 0x32, 0x32, 0x32, 0x32,
++ 0x32, 0x32, 0x32, 0x32, 0x32, 0x32, 0x32, 0x32,
++ 0x32, 0x32, 0x32, 0x32, 0x32, 0x32, 0x32, 0x32,
++ 0x32, 0x32, 0x32, 0x32, 0x32, 0x32, 0x32, 0x32,
++ 0x32, 0x32, 0x32, 0x32, 0x32, 0x32, 0x32, 0x32,
++ 0x32, 0x32, 0x32, 0x32, 0x32, 0x32, 0x32, 0x32
++};
++
++/*
++ * luma quantization table very low compression(about factor 2)
++ */
++static const u8 ci_isp_yq_table_low_comp1[] = {
++ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
++ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
++ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
++ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
++ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
++ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
++ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
++ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02
++};
++
++/*
++ * chroma quantization table very low compression
++ * (about factor 2)
++ */
++static const u8 ci_isp_uv_qtable_low_comp1[] = {
++ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
++ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
++ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
++ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
++ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
++ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
++ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
++ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02
++};
++
++/*
++ * The jpg Quantization Tables were parsed by jpeg_parser from
++ * jpg images generated by Jasc PaintShopPro.
++ *
++ */
++
++/* 01% */
++
++/* luma quantization table */
++static const u8 ci_isp_yq_table01_per_cent[] = {
++ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
++ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
++ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
++ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
++ 0x01, 0x01, 0x01, 0x01, 0x02, 0x02, 0x01, 0x01,
++ 0x02, 0x01, 0x01, 0x01, 0x02, 0x02, 0x02, 0x02,
++ 0x02, 0x02, 0x02, 0x02, 0x02, 0x01, 0x02, 0x02,
++ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02
++};
++
++/* chroma quantization table */
++static const u8 ci_isp_uv_qtable01_per_cent[] = {
++ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
++ 0x01, 0x01, 0x02, 0x01, 0x01, 0x01, 0x02, 0x02,
++ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
++ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
++ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
++ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
++ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
++ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02
++};
++
++/* 20% */
++
++/* luma quantization table */
++static const u8 ci_isp_yq_table20_per_cent[] = {
++ 0x06, 0x04, 0x05, 0x06, 0x05, 0x04, 0x06, 0x06,
++ 0x05, 0x06, 0x07, 0x07, 0x06, 0x08, 0x0a, 0x10,
++ 0x0a, 0x0a, 0x09, 0x09, 0x0a, 0x14, 0x0e, 0x0f,
++ 0x0c, 0x10, 0x17, 0x14, 0x18, 0x18, 0x17, 0x14,
++ 0x16, 0x16, 0x1a, 0x1d, 0x25, 0x1f, 0x1a, 0x1b,
++ 0x23, 0x1c, 0x16, 0x16, 0x20, 0x2c, 0x20, 0x23,
++ 0x26, 0x27, 0x29, 0x2a, 0x29, 0x19, 0x1f, 0x2d,
++ 0x30, 0x2d, 0x28, 0x30, 0x25, 0x28, 0x29, 0x28
++};
++
++/* chroma quantization table */
++static const u8 ci_isp_uv_qtable20_per_cent[] = {
++ 0x07, 0x07, 0x07, 0x0a, 0x08, 0x0a, 0x13, 0x0a,
++ 0x0a, 0x13, 0x28, 0x1a, 0x16, 0x1a, 0x28, 0x28,
++ 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28,
++ 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28,
++ 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28,
++ 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28,
++ 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28,
++ 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28
++};
++
++/* 30% */
++
++/* luma quantization table */
++static const u8 ci_isp_yq_table30_per_cent[] = {
++ 0x0a, 0x07, 0x07, 0x08, 0x07, 0x06, 0x0a, 0x08,
++ 0x08, 0x08, 0x0b, 0x0a, 0x0a, 0x0b, 0x0e, 0x18,
++ 0x10, 0x0e, 0x0d, 0x0d, 0x0e, 0x1d, 0x15, 0x16,
++ 0x11, 0x18, 0x23, 0x1f, 0x25, 0x24, 0x22, 0x1f,
++ 0x22, 0x21, 0x26, 0x2b, 0x37, 0x2f, 0x26, 0x29,
++ 0x34, 0x29, 0x21, 0x22, 0x30, 0x41, 0x31, 0x34,
++ 0x39, 0x3b, 0x3e, 0x3e, 0x3e, 0x25, 0x2e, 0x44,
++ 0x49, 0x43, 0x3c, 0x48, 0x37, 0x3d, 0x3e, 0x3b
++};
++
++/* chroma quantization table */
++static const u8 ci_isp_uv_qtable30_per_cent[] = {
++ 0x0a, 0x0b, 0x0b, 0x0e, 0x0d, 0x0e, 0x1c, 0x10,
++ 0x10, 0x1c, 0x3b, 0x28, 0x22, 0x28, 0x3b, 0x3b,
++ 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b,
++ 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b,
++ 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b,
++ 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b,
++ 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b,
++ 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b
++};
++
++
++/* 40% */
++
++/* luma quantization table */
++static const u8 ci_isp_yq_table40_per_cent[] = {
++ 0x0d, 0x09, 0x0a, 0x0b, 0x0a, 0x08, 0x0d, 0x0b,
++ 0x0a, 0x0b, 0x0e, 0x0e, 0x0d, 0x0f, 0x13, 0x20,
++ 0x15, 0x13, 0x12, 0x12, 0x13, 0x27, 0x1c, 0x1e,
++ 0x17, 0x20, 0x2e, 0x29, 0x31, 0x30, 0x2e, 0x29,
++ 0x2d, 0x2c, 0x33, 0x3a, 0x4a, 0x3e, 0x33, 0x36,
++ 0x46, 0x37, 0x2c, 0x2d, 0x40, 0x57, 0x41, 0x46,
++ 0x4c, 0x4e, 0x52, 0x53, 0x52, 0x32, 0x3e, 0x5a,
++ 0x61, 0x5a, 0x50, 0x60, 0x4a, 0x51, 0x52, 0x4f
++};
++
++/* chroma quantization table */
++static const u8 ci_isp_uv_qtable40_per_cent[] = {
++ 0x0e, 0x0e, 0x0e, 0x13, 0x11, 0x13, 0x26, 0x15,
++ 0x15, 0x26, 0x4f, 0x35, 0x2d, 0x35, 0x4f, 0x4f,
++ 0x4f, 0x4f, 0x4f, 0x4f, 0x4f, 0x4f, 0x4f, 0x4f,
++ 0x4f, 0x4f, 0x4f, 0x4f, 0x4f, 0x4f, 0x4f, 0x4f,
++ 0x4f, 0x4f, 0x4f, 0x4f, 0x4f, 0x4f, 0x4f, 0x4f,
++ 0x4f, 0x4f, 0x4f, 0x4f, 0x4f, 0x4f, 0x4f, 0x4f,
++ 0x4f, 0x4f, 0x4f, 0x4f, 0x4f, 0x4f, 0x4f, 0x4f,
++ 0x4f, 0x4f, 0x4f, 0x4f, 0x4f, 0x4f, 0x4f, 0x4f
++};
++
++/* 50% */
++
++/* luma quantization table */
++static const u8 ci_isp_yq_table50_per_cent[] = {
++ 0x10, 0x0b, 0x0c, 0x0e, 0x0c, 0x0a, 0x10, 0x0e,
++ 0x0d, 0x0e, 0x12, 0x11, 0x10, 0x13, 0x18, 0x28,
++ 0x1a, 0x18, 0x16, 0x16, 0x18, 0x31, 0x23, 0x25,
++ 0x1d, 0x28, 0x3a, 0x33, 0x3d, 0x3c, 0x39, 0x33,
++ 0x38, 0x37, 0x40, 0x48, 0x5c, 0x4e, 0x40, 0x44,
++ 0x57, 0x45, 0x37, 0x38, 0x50, 0x6d, 0x51, 0x57,
++ 0x5f, 0x62, 0x67, 0x68, 0x67, 0x3e, 0x4d, 0x71,
++ 0x79, 0x70, 0x64, 0x78, 0x5c, 0x65, 0x67, 0x63
++};
++
++/* chroma quantization table */
++static const u8 ci_isp_uv_qtable50_per_cent[] = {
++ 0x11, 0x12, 0x12, 0x18, 0x15, 0x18, 0x2f, 0x1a,
++ 0x1a, 0x2f, 0x63, 0x42, 0x38, 0x42, 0x63, 0x63,
++ 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
++ 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
++ 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
++ 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
++ 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
++ 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63
++};
++
++/* 60% */
++
++/* luma quantization table */
++static const u8 ci_isp_yq_table60_per_cent[] = {
++ 0x14, 0x0e, 0x0f, 0x12, 0x0f, 0x0d, 0x14, 0x12,
++ 0x10, 0x12, 0x17, 0x15, 0x14, 0x18, 0x1e, 0x32,
++ 0x21, 0x1e, 0x1c, 0x1c, 0x1e, 0x3d, 0x2c, 0x2e,
++ 0x24, 0x32, 0x49, 0x40, 0x4c, 0x4b, 0x47, 0x40,
++ 0x46, 0x45, 0x50, 0x5a, 0x73, 0x62, 0x50, 0x55,
++ 0x6d, 0x56, 0x45, 0x46, 0x64, 0x88, 0x65, 0x6d,
++ 0x77, 0x7b, 0x81, 0x82, 0x81, 0x4e, 0x60, 0x8d,
++ 0x97, 0x8c, 0x7d, 0x96, 0x73, 0x7e, 0x81, 0x7c
++};
++
++/* chroma quantization table */
++static const u8 ci_isp_uv_qtable60_per_cent[] = {
++ 0x15, 0x17, 0x17, 0x1e, 0x1a, 0x1e, 0x3b, 0x21,
++ 0x21, 0x3b, 0x7c, 0x53, 0x46, 0x53, 0x7c, 0x7c,
++ 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c,
++ 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c,
++ 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c,
++ 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c,
++ 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c,
++ 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c
++};
++
++/* 70% */
++
++/* luma quantization table */
++static const u8 ci_isp_yq_table70_per_cent[] = {
++ 0x1b, 0x12, 0x14, 0x17, 0x14, 0x11, 0x1b, 0x17,
++ 0x16, 0x17, 0x1e, 0x1c, 0x1b, 0x20, 0x28, 0x42,
++ 0x2b, 0x28, 0x25, 0x25, 0x28, 0x51, 0x3a, 0x3d,
++ 0x30, 0x42, 0x60, 0x55, 0x65, 0x64, 0x5f, 0x55,
++ 0x5d, 0x5b, 0x6a, 0x78, 0x99, 0x81, 0x6a, 0x71,
++ 0x90, 0x73, 0x5b, 0x5d, 0x85, 0xb5, 0x86, 0x90,
++ 0x9e, 0xa3, 0xab, 0xad, 0xab, 0x67, 0x80, 0xbc,
++ 0xc9, 0xba, 0xa6, 0xc7, 0x99, 0xa8, 0xab, 0xa4
++};
++
++/* chroma quantization table */
++static const u8 ci_isp_uv_qtable70_per_cent[] = {
++ 0x1c, 0x1e, 0x1e, 0x28, 0x23, 0x28, 0x4e, 0x2b,
++ 0x2b, 0x4e, 0xa4, 0x6e, 0x5d, 0x6e, 0xa4, 0xa4,
++ 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4,
++ 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4,
++ 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4,
++ 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4,
++ 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4,
++ 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4
++};
++
++/* 80% */
++
++/* luma quantization table */
++static const u8 ci_isp_yq_table80_per_cent[] = {
++ 0x28, 0x1c, 0x1e, 0x23, 0x1e, 0x19, 0x28, 0x23,
++ 0x21, 0x23, 0x2d, 0x2b, 0x28, 0x30, 0x3c, 0x64,
++ 0x41, 0x3c, 0x37, 0x37, 0x3c, 0x7b, 0x58, 0x5d,
++ 0x49, 0x64, 0x91, 0x80, 0x99, 0x96, 0x8f, 0x80,
++ 0x8c, 0x8a, 0xa0, 0xb4, 0xe6, 0xc3, 0xa0, 0xaa,
++ 0xda, 0xad, 0x8a, 0x8c, 0xc8, 0xff, 0xcb, 0xda,
++ 0xee, 0xf5, 0xff, 0xff, 0xff, 0x9b, 0xc1, 0xff,
++ 0xff, 0xff, 0xfa, 0xff, 0xe6, 0xfd, 0xff, 0xf8
++};
++
++/* chroma quantization table */
++static const u8 ci_isp_uv_qtable80_per_cent[] = {
++ 0x2b, 0x2d, 0x2d, 0x3c, 0x35, 0x3c, 0x76, 0x41,
++ 0x41, 0x76, 0xf8, 0xa5, 0x8c, 0xa5, 0xf8, 0xf8,
++ 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8,
++ 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8,
++ 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8,
++ 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8,
++ 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8,
++ 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8
++};
++
++/* 90% */
++
++/* luma quantization table */
++static const u8 ci_isp_yq_table90_per_cent[] = {
++ 0x50, 0x37, 0x3c, 0x46, 0x3c, 0x32, 0x50, 0x46,
++ 0x41, 0x46, 0x5a, 0x55, 0x50, 0x5f, 0x78, 0xc8,
++ 0x82, 0x78, 0x6e, 0x6e, 0x78, 0xf5, 0xaf, 0xb9,
++ 0x91, 0xc8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
++};
++
++/* chroma quantization table */
++static const u8 ci_isp_uv_qtable90_per_cent[] = {
++ 0x55, 0x5a, 0x5a, 0x78, 0x69, 0x78, 0xeb, 0x82,
++ 0x82, 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
++};
++
++/* 99% */
++
++/* luma quantization table */
++static const u8 ci_isp_yq_table99_per_cent[] = {
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
++};
++
++/* chroma quantization table */
++static const u8 ci_isp_uv_qtable99_per_cent[] = {
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
++};
++
++int ci_isp_wait_for_vsyncHelper(void);
++void ci_isp_jpe_set_tables(u8 compression_ratio);
++void ci_isp_jpe_select_tables(void);
++void ci_isp_jpe_set_config(u16 hsize, u16 vsize, int jpe_scale);
++int ci_isp_jpe_generate_header(struct mrst_isp_device *intel, u8 header_mode);
++void ci_isp_jpe_prep_enc(enum ci_isp_jpe_enc_mode jpe_enc_mode);
++int ci_isp_jpe_wait_for_header_gen_done(struct mrst_isp_device *intel);
++int ci_isp_jpe_wait_for_encode_done(struct mrst_isp_device *intel);
++
+--- /dev/null
++++ b/drivers/staging/mrstci/mrstisp/include/mrstisp_reg.h
+@@ -0,0 +1,4700 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * Copyright (c) Silicon Image 2008 www.siliconimage.com
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#ifndef _MRV_PRIV_H
++#define _MRV_PRIV_H
++
++extern u32 mrv_rsz_scale_mask;
++
++#define MRV_ISP_GAMMA_R_Y_ARR_SIZE 17
++#define MRV_ISP_GAMMA_G_Y_ARR_SIZE 17
++#define MRV_ISP_GAMMA_B_Y_ARR_SIZE 17
++#define MRV_ISP_CT_COEFF_ARR_SIZE 9
++#define MRV_ISP_GAMMA_OUT_Y_ARR_SIZE 17
++#define MRV_ISP_BP_NEW_TABLE_ARR_SIZE 8
++#define MRV_ISP_HIST_BIN_ARR_SIZE 16
++
++struct isp_register {
++ u32 vi_ccl;
++ u32 vi_custom_reg1;
++ u32 vi_id;
++ u32 vi_custom_reg2;
++ u32 vi_iccl;
++ u32 vi_ircl;
++ u32 vi_dpcl;
++
++ u32 notused_mrvbase1;
++
++
++ u32 notused_mrvbase2[(0x200 - 0x20) / 4];
++
++ u32 img_eff_ctrl;
++ u32 img_eff_color_sel;
++ u32 img_eff_mat_1;
++ u32 img_eff_mat_2;
++ u32 img_eff_mat_3;
++ u32 img_eff_mat_4;
++ u32 img_eff_mat_5;
++ u32 img_eff_tint;
++ u32 img_eff_ctrl_shd;
++ u32 notused_imgeff[(0x300 - 0x224) / 4];
++
++
++ u32 super_imp_ctrl;
++ u32 super_imp_offset_x;
++ u32 super_imp_offset_y;
++ u32 super_imp_color_y;
++ u32 super_imp_color_cb;
++ u32 super_imp_color_cr;
++ u32 notused_simp[(0x400 - 0x318) / 4];
++
++ u32 isp_ctrl;
++ u32 isp_acq_prop;
++ u32 isp_acq_h_offs;
++ u32 isp_acq_v_offs;
++ u32 isp_acq_h_size;
++ u32 isp_acq_v_size;
++ u32 isp_acq_nr_frames;
++ u32 isp_gamma_dx_lo;
++ u32 isp_gamma_dx_hi;
++ u32 isp_gamma_r_y[MRV_ISP_GAMMA_R_Y_ARR_SIZE];
++ u32 isp_gamma_g_y[MRV_ISP_GAMMA_G_Y_ARR_SIZE];
++ u32 isp_gamma_b_y[MRV_ISP_GAMMA_B_Y_ARR_SIZE];
++
++
++ u32 notused_ispbls1[(0x510 - 0x4F0) / 4];
++
++ u32 isp_awb_prop;
++ u32 isp_awb_h_offs;
++ u32 isp_awb_v_offs;
++ u32 isp_awb_h_size;
++ u32 isp_awb_v_size;
++ u32 isp_awb_frames;
++ u32 isp_awb_ref;
++ u32 isp_awb_thresh;
++
++ u32 notused_ispawb2[(0x538-0x530)/4];
++
++ u32 isp_awb_gain_g;
++ u32 isp_awb_gain_rb;
++
++ u32 isp_awb_white_cnt;
++ u32 isp_awb_mean;
++
++ u32 notused_ispae[(0x570 - 0x548) / 4];
++ u32 isp_cc_coeff_0;
++ u32 isp_cc_coeff_1;
++ u32 isp_cc_coeff_2;
++ u32 isp_cc_coeff_3;
++ u32 isp_cc_coeff_4;
++ u32 isp_cc_coeff_5;
++ u32 isp_cc_coeff_6;
++ u32 isp_cc_coeff_7;
++ u32 isp_cc_coeff_8;
++
++ u32 isp_out_h_offs;
++ u32 isp_out_v_offs;
++ u32 isp_out_h_size;
++ u32 isp_out_v_size;
++
++
++ u32 isp_demosaic;
++ u32 isp_flags_shd;
++
++ u32 isp_out_h_offs_shd;
++ u32 isp_out_v_offs_shd;
++ u32 isp_out_h_size_shd;
++ u32 isp_out_v_size_shd;
++
++
++ u32 isp_imsc;
++ u32 isp_ris;
++ u32 isp_mis;
++ u32 isp_icr;
++ u32 isp_isr;
++
++ u32 isp_ct_coeff[MRV_ISP_CT_COEFF_ARR_SIZE];
++
++ u32 isp_gamma_out_mode;
++ u32 isp_gamma_out_y[MRV_ISP_GAMMA_OUT_Y_ARR_SIZE];
++
++
++ u32 isp_err;
++ u32 isp_err_clr;
++
++
++ u32 isp_frame_count;
++
++ u32 isp_ct_offset_r;
++ u32 isp_ct_offset_g;
++ u32 isp_ct_offset_b;
++ u32 notused_ispctoffs[(0x660 - 0x654) / 4];
++
++
++ u32 isp_flash_cmd;
++ u32 isp_flash_config;
++ u32 isp_flash_prediv;
++ u32 isp_flash_delay;
++ u32 isp_flash_time;
++ u32 isp_flash_maxp;
++ u32 notused_ispflash[(0x680 - 0x678) / 4];
++
++
++ u32 isp_sh_ctrl;
++ u32 isp_sh_prediv;
++ u32 isp_sh_delay;
++ u32 isp_sh_time;
++ u32 notused_ispsh[(0x800 - 0x690) / 4];
++
++ u32 c_proc_ctrl;
++ u32 c_proc_contrast;
++ u32 c_proc_brightness;
++ u32 c_proc_saturation;
++ u32 c_proc_hue;
++ u32 notused_cproc[(0xC00 - 0x814) / 4];
++
++ u32 mrsz_ctrl;
++ u32 mrsz_scale_hy;
++ u32 mrsz_scale_hcb;
++ u32 mrsz_scale_hcr;
++ u32 mrsz_scale_vy;
++ u32 mrsz_scale_vc;
++ u32 mrsz_phase_hy;
++ u32 mrsz_phase_hc;
++ u32 mrsz_phase_vy;
++ u32 mrsz_phase_vc;
++ u32 mrsz_scale_lut_addr;
++ u32 mrsz_scale_lut;
++ u32 mrsz_ctrl_shd;
++ u32 mrsz_scale_hy_shd;
++ u32 mrsz_scale_hcb_shd;
++ u32 mrsz_scale_hcr_shd;
++ u32 mrsz_scale_vy_shd;
++ u32 mrsz_scale_vc_shd;
++ u32 mrsz_phase_hy_shd;
++ u32 mrsz_phase_hc_shd;
++ u32 mrsz_phase_vy_shd;
++ u32 mrsz_phase_vc_shd;
++ u32 notused_mrsz[(0x1000 - 0x0C58) / 4];
++
++ u32 srsz_ctrl;
++ u32 srsz_scale_hy;
++ u32 srsz_scale_hcb;
++ u32 srsz_scale_hcr;
++ u32 srsz_scale_vy;
++ u32 srsz_scale_vc;
++ u32 srsz_phase_hy;
++ u32 srsz_phase_hc;
++ u32 srsz_phase_vy;
++ u32 srsz_phase_vc;
++ u32 srsz_scale_lut_addr;
++ u32 srsz_scale_lut;
++ u32 srsz_ctrl_shd;
++ u32 srsz_scale_hy_shd;
++ u32 srsz_scale_hcb_shd;
++ u32 srsz_scale_hcr_shd;
++ u32 srsz_scale_vy_shd;
++ u32 srsz_scale_vc_shd;
++ u32 srsz_phase_hy_shd;
++ u32 srsz_phase_hc_shd;
++ u32 srsz_phase_vy_shd;
++ u32 srsz_phase_vc_shd;
++ u32 notused_srsz[(0x1400 - 0x1058) / 4];
++
++ u32 mi_ctrl;
++ u32 mi_init;
++ u32 mi_mp_y_base_ad_init;
++ u32 mi_mp_y_size_init;
++ u32 mi_mp_y_offs_cnt_init;
++ u32 mi_mp_y_offs_cnt_start;
++ u32 mi_mp_y_irq_offs_init;
++ u32 mi_mp_cb_base_ad_init;
++ u32 mi_mp_cb_size_init;
++ u32 mi_mp_cb_offs_cnt_init;
++ u32 mi_mp_cb_offs_cnt_start;
++ u32 mi_mp_cr_base_ad_init;
++ u32 mi_mp_cr_size_init;
++ u32 mi_mp_cr_offs_cnt_init;
++ u32 mi_mp_cr_offs_cnt_start;
++ u32 mi_sp_y_base_ad_init;
++ u32 mi_sp_y_size_init;
++ u32 mi_sp_y_offs_cnt_init;
++ u32 mi_sp_y_offs_cnt_start;
++ u32 mi_sp_y_llength;
++ u32 mi_sp_cb_base_ad_init;
++ u32 mi_sp_cb_size_init;
++ u32 mi_sp_cb_offs_cnt_init;
++ u32 mi_sp_cb_offs_cnt_start;
++ u32 mi_sp_cr_base_ad_init;
++ u32 mi_sp_cr_size_init;
++ u32 mi_sp_cr_offs_cnt_init;
++ u32 mi_sp_cr_offs_cnt_start;
++ u32 mi_byte_cnt;
++ u32 mi_ctrl_shd;
++ u32 mi_mp_y_base_ad_shd;
++ u32 mi_mp_y_size_shd;
++ u32 mi_mp_y_offs_cnt_shd;
++ u32 mi_mp_y_irq_offs_shd;
++ u32 mi_mp_cb_base_ad_shd;
++ u32 mi_mp_cb_size_shd;
++ u32 mi_mp_cb_offs_cnt_shd;
++ u32 mi_mp_cr_base_ad_shd;
++ u32 mi_mp_cr_size_shd;
++ u32 mi_mp_cr_offs_cnt_shd;
++ u32 mi_sp_y_base_ad_shd;
++ u32 mi_sp_y_size_shd;
++ u32 mi_sp_y_offs_cnt_shd;
++
++ u32 notused_mi1;
++
++ u32 mi_sp_cb_base_ad_shd;
++ u32 mi_sp_cb_size_shd;
++ u32 mi_sp_cb_offs_cnt_shd;
++ u32 mi_sp_cr_base_ad_shd;
++ u32 mi_sp_cr_size_shd;
++ u32 mi_sp_cr_offs_cnt_shd;
++ u32 mi_dma_y_pic_start_ad;
++ u32 mi_dma_y_pic_width;
++ u32 mi_dma_y_llength;
++ u32 mi_dma_y_pic_size;
++ u32 mi_dma_cb_pic_start_ad;
++ u32 notused_mi2[(0x14E8 - 0x14DC) / 4];
++ u32 mi_dma_cr_pic_start_ad;
++ u32 notused_mi3[(0x14F8 - 0x14EC) / 4];
++ u32 mi_imsc;
++ u32 mi_ris;
++ u32 mi_mis;
++ u32 mi_icr;
++ u32 mi_isr;
++ u32 mi_status;
++ u32 mi_status_clr;
++ u32 mi_sp_y_pic_width;
++ u32 mi_sp_y_pic_height;
++ u32 mi_sp_y_pic_size;
++ u32 mi_dma_ctrl;
++ u32 mi_dma_start;
++ u32 mi_dma_status;
++ u32 notused_mi6[(0x1800 - 0x152C) / 4];
++ u32 jpe_gen_header;
++ u32 jpe_encode;
++
++ u32 jpe_init;
++
++ u32 jpe_y_scale_en;
++ u32 jpe_cbcr_scale_en;
++ u32 jpe_table_flush;
++ u32 jpe_enc_hsize;
++ u32 jpe_enc_vsize;
++ u32 jpe_pic_format;
++ u32 jpe_restart_interval;
++ u32 jpe_tq_y_select;
++ u32 jpe_tq_u_select;
++ u32 jpe_tq_v_select;
++ u32 jpe_dc_table_select;
++ u32 jpe_ac_table_select;
++ u32 jpe_table_data;
++ u32 jpe_table_id;
++ u32 jpe_tac0_len;
++ u32 jpe_tdc0_len;
++ u32 jpe_tac1_len;
++ u32 jpe_tdc1_len;
++ u32 notused_jpe2;
++ u32 jpe_encoder_busy;
++ u32 jpe_header_mode;
++ u32 jpe_encode_mode;
++ u32 jpe_debug;
++ u32 jpe_error_imr;
++ u32 jpe_error_ris;
++ u32 jpe_error_mis;
++ u32 jpe_error_icr;
++ u32 jpe_error_isr;
++ u32 jpe_status_imr;
++ u32 jpe_status_ris;
++ u32 jpe_status_mis;
++ u32 jpe_status_icr;
++ u32 jpe_status_isr;
++ u32 notused_jpe3[(0x1A00 - 0x1890) / 4];
++
++ u32 smia_ctrl;
++ u32 smia_status;
++ u32 smia_imsc;
++ u32 smia_ris;
++ u32 smia_mis;
++ u32 smia_icr;
++ u32 smia_isr;
++ u32 smia_data_format_sel;
++ u32 smia_sof_emb_data_lines;
++
++ u32 smia_emb_hstart;
++ u32 smia_emb_hsize;
++ u32 smia_emb_vstart;
++
++ u32 smia_num_lines;
++ u32 smia_emb_data_fifo;
++
++ u32 smia_fifo_fill_level;
++ u32 notused_smia2[(0x1A40 - 0x1A3C) / 4];
++
++ u32 notused_smia3[(0x1A60 - 0x1A40) / 4];
++ u32 notused_smia4[(0x1C00 - 0x1A60) / 4];
++
++
++ u32 mipi_ctrl;
++ u32 mipi_status;
++ u32 mipi_imsc;
++ u32 mipi_ris;
++ u32 mipi_mis;
++ u32 mipi_icr;
++ u32 mipi_isr;
++ u32 mipi_cur_data_id;
++ u32 mipi_img_data_sel;
++ u32 mipi_add_data_sel_1;
++ u32 mipi_add_data_sel_2;
++ u32 mipi_add_data_sel_3;
++ u32 mipi_add_data_sel_4;
++ u32 mipi_add_data_fifo;
++ u32 mipi_add_data_fill_level;
++ u32 notused_mipi[(0x2000 - 0x1C3C) / 4];
++
++
++ u32 isp_afm_ctrl;
++ u32 isp_afm_lt_a;
++ u32 isp_afm_rb_a;
++ u32 isp_afm_lt_b;
++ u32 isp_afm_rb_b;
++ u32 isp_afm_lt_c;
++ u32 isp_afm_rb_c;
++ u32 isp_afm_thres;
++ u32 isp_afm_var_shift;
++ u32 isp_afm_sum_a;
++ u32 isp_afm_sum_b;
++ u32 isp_afm_sum_c;
++ u32 isp_afm_lum_a;
++ u32 isp_afm_lum_b;
++ u32 isp_afm_lum_c;
++ u32 notused_ispafm[(0x2100 - 0x203C) / 4];
++
++
++ u32 isp_bp_ctrl;
++ u32 isp_bp_cfg1;
++ u32 isp_bp_cfg2;
++ u32 isp_bp_number;
++ u32 isp_bp_table_addr;
++ u32 isp_bp_table_data;
++ u32 isp_bp_new_number;
++ u32 isp_bp_new_table[MRV_ISP_BP_NEW_TABLE_ARR_SIZE];
++
++ u32 notused_ispbp[(0x2200 - 0x213C) / 4];
++
++
++ u32 isp_lsc_ctrl;
++ u32 isp_lsc_r_table_addr;
++ u32 isp_lsc_g_table_addr;
++ u32 isp_lsc_b_table_addr;
++ u32 isp_lsc_r_table_data;
++ u32 isp_lsc_g_table_data;
++ u32 isp_lsc_b_table_data;
++ u32 notused_isplsc1;
++ u32 isp_lsc_xgrad_01;
++ u32 isp_lsc_xgrad_23;
++ u32 isp_lsc_xgrad_45;
++ u32 isp_lsc_xgrad_67;
++ u32 isp_lsc_ygrad_01;
++ u32 isp_lsc_ygrad_23;
++ u32 isp_lsc_ygrad_45;
++ u32 isp_lsc_ygrad_67;
++ u32 isp_lsc_xsize_01;
++ u32 isp_lsc_xsize_23;
++ u32 isp_lsc_xsize_45;
++ u32 isp_lsc_xsize_67;
++ u32 isp_lsc_ysize_01;
++ u32 isp_lsc_ysize_23;
++ u32 isp_lsc_ysize_45;
++ u32 isp_lsc_ysize_67;
++ u32 notused_isplsc2[(0x2300 - 0x2260) / 4];
++
++
++ u32 isp_is_ctrl;
++ u32 isp_is_recenter;
++
++ u32 isp_is_h_offs;
++ u32 isp_is_v_offs;
++ u32 isp_is_h_size;
++ u32 isp_is_v_size;
++
++ u32 isp_is_max_dx;
++ u32 isp_is_max_dy;
++ u32 isp_is_displace;
++
++ u32 isp_is_h_offs_shd;
++ u32 isp_is_v_offs_shd;
++ u32 isp_is_h_size_shd;
++ u32 isp_is_v_size_shd;
++ u32 notused_ispis4[(0x2400 - 0x2334) / 4];
++
++ u32 isp_hist_prop;
++ u32 isp_hist_h_offs;
++ u32 isp_hist_v_offs;
++ u32 isp_hist_h_size;
++ u32 isp_hist_v_size;
++ u32 isp_hist_bin[MRV_ISP_HIST_BIN_ARR_SIZE];
++ u32 notused_isphist[(0x2500-0x2454)/4];
++
++ u32 isp_filt_mode;
++ u32 _notused_28[(0x2528 - 0x2504) / 4];
++ u32 isp_filt_thresh_bl0;
++ u32 isp_filt_thresh_bl1;
++ u32 isp_filt_thresh_sh0;
++ u32 isp_filt_thresh_sh1;
++ u32 isp_filt_lum_weight;
++ u32 isp_filt_fac_sh1;
++ u32 isp_filt_fac_sh0;
++ u32 isp_filt_fac_mid;
++ u32 isp_filt_fac_bl0;
++ u32 isp_filt_fac_bl1;
++ u32 notused_ispfilt[(0x2580 - 0x2550) / 4];
++
++ u32 notused_ispcac[(0x2600 - 0x2580) / 4];
++
++ u32 isp_exp_ctrl;
++ u32 isp_exp_h_offset;
++ u32 isp_exp_v_offset;
++ u32 isp_exp_h_size;
++ u32 isp_exp_v_size;
++ u32 isp_exp_mean_00;
++ u32 isp_exp_mean_10;
++ u32 isp_exp_mean_20;
++ u32 isp_exp_mean_30;
++ u32 isp_exp_mean_40;
++ u32 isp_exp_mean_01;
++ u32 isp_exp_mean_11;
++ u32 isp_exp_mean_21;
++ u32 isp_exp_mean_31;
++ u32 isp_exp_mean_41;
++ u32 isp_exp_mean_02;
++ u32 isp_exp_mean_12;
++ u32 isp_exp_mean_22;
++ u32 isp_exp_mean_32;
++ u32 isp_exp_mean_42;
++ u32 isp_exp_mean_03;
++ u32 isp_exp_mean_13;
++ u32 isp_exp_mean_23;
++ u32 isp_exp_mean_33;
++ u32 isp_exp_mean_43;
++ u32 isp_exp_mean_04;
++ u32 isp_exp_mean_14;
++ u32 isp_exp_mean_24;
++ u32 isp_exp_mean_34;
++ u32 isp_exp_mean_44;
++ u32 notused_ispexp[(0x2700 - 0x2678) / 4];
++
++ u32 isp_bls_ctrl;
++ u32 isp_bls_samples;
++ u32 isp_bls_h1_start;
++ u32 isp_bls_h1_stop;
++ u32 isp_bls_v1_start;
++ u32 isp_bls_v1_stop;
++ u32 isp_bls_h2_start;
++ u32 isp_bls_h2_stop;
++ u32 isp_bls_v2_start;
++ u32 isp_bls_v2_stop;
++ u32 isp_bls_a_fixed;
++ u32 isp_bls_b_fixed;
++ u32 isp_bls_c_fixed;
++ u32 isp_bls_d_fixed;
++ u32 isp_bls_a_measured;
++ u32 isp_bls_b_measured;
++ u32 isp_bls_c_measured;
++ u32 isp_bls_d_measured;
++ u32 notused_ispbls2[(0x2800 - 0x2748) / 4];
++
++
++};
++
++
++
++
++
++
++
++#define MRV_VI_CCLFDIS
++#define MRV_VI_CCLFDIS_MASK 0x00000004
++#define MRV_VI_CCLFDIS_SHIFT 2
++#define MRV_VI_CCLFDIS_ENABLE 0
++#define MRV_VI_CCLFDIS_DISABLE 1
++
++#define MRV_VI_CCLDISS
++#define MRV_VI_CCLDISS_MASK 0x00000002
++#define MRV_VI_CCLDISS_SHIFT 1
++
++#define MRV_REV_ID
++#define MRV_REV_ID_MASK 0xFFFFFFFF
++#define MRV_REV_ID_SHIFT 0
++
++#define MRV_VI_MIPI_CLK_ENABLE
++#define MRV_VI_MIPI_CLK_ENABLE_MASK 0x00000800
++#define MRV_VI_MIPI_CLK_ENABLE_SHIFT 11
++
++
++#define MRV_VI_SMIA_CLK_ENABLE
++#define MRV_VI_SMIA_CLK_ENABLE_MASK 0x00000400
++#define MRV_VI_SMIA_CLK_ENABLE_SHIFT 10
++#define MRV_VI_SIMP_CLK_ENABLE
++#define MRV_VI_SIMP_CLK_ENABLE_MASK 0x00000200
++#define MRV_VI_SIMP_CLK_ENABLE_SHIFT 9
++
++#define MRV_VI_IE_CLK_ENABLE
++#define MRV_VI_IE_CLK_ENABLE_MASK 0x00000100
++#define MRV_VI_IE_CLK_ENABLE_SHIFT 8
++
++#define MRV_VI_EMP_CLK_ENABLE_MASK 0
++#define MRV_VI_MI_CLK_ENABLE
++#define MRV_VI_MI_CLK_ENABLE_MASK 0x00000040
++#define MRV_VI_MI_CLK_ENABLE_SHIFT 6
++
++#define MRV_VI_JPEG_CLK_ENABLE
++#define MRV_VI_JPEG_CLK_ENABLE_MASK 0x00000020
++#define MRV_VI_JPEG_CLK_ENABLE_SHIFT 5
++#define MRV_VI_SRSZ_CLK_ENABLE
++#define MRV_VI_SRSZ_CLK_ENABLE_MASK 0x00000010
++#define MRV_VI_SRSZ_CLK_ENABLE_SHIFT 4
++
++#define MRV_VI_MRSZ_CLK_ENABLE
++#define MRV_VI_MRSZ_CLK_ENABLE_MASK 0x00000008
++#define MRV_VI_MRSZ_CLK_ENABLE_SHIFT 3
++#define MRV_VI_CP_CLK_ENABLE
++#define MRV_VI_CP_CLK_ENABLE_MASK 0x00000002
++#define MRV_VI_CP_CLK_ENABLE_SHIFT 1
++#define MRV_VI_ISP_CLK_ENABLE
++#define MRV_VI_ISP_CLK_ENABLE_MASK 0x00000001
++#define MRV_VI_ISP_CLK_ENABLE_SHIFT 0
++
++
++#define MRV_VI_ALL_CLK_ENABLE
++#define MRV_VI_ALL_CLK_ENABLE_MASK \
++(0 \
++| MRV_VI_MIPI_CLK_ENABLE_MASK \
++| MRV_VI_SMIA_CLK_ENABLE_MASK \
++| MRV_VI_SIMP_CLK_ENABLE_MASK \
++| MRV_VI_IE_CLK_ENABLE_MASK \
++| MRV_VI_EMP_CLK_ENABLE_MASK \
++| MRV_VI_MI_CLK_ENABLE_MASK \
++| MRV_VI_JPEG_CLK_ENABLE_MASK \
++| MRV_VI_SRSZ_CLK_ENABLE_MASK \
++| MRV_VI_MRSZ_CLK_ENABLE_MASK \
++| MRV_VI_CP_CLK_ENABLE_MASK \
++| MRV_VI_ISP_CLK_ENABLE_MASK \
++)
++#define MRV_VI_ALL_CLK_ENABLE_SHIFT 0
++
++#define MRV_VI_MIPI_SOFT_RST
++#define MRV_VI_MIPI_SOFT_RST_MASK 0x00000800
++#define MRV_VI_MIPI_SOFT_RST_SHIFT 11
++
++#define MRV_VI_SMIA_SOFT_RST
++#define MRV_VI_SMIA_SOFT_RST_MASK 0x00000400
++#define MRV_VI_SMIA_SOFT_RST_SHIFT 10
++#define MRV_VI_SIMP_SOFT_RST
++#define MRV_VI_SIMP_SOFT_RST_MASK 0x00000200
++#define MRV_VI_SIMP_SOFT_RST_SHIFT 9
++
++#define MRV_VI_IE_SOFT_RST
++#define MRV_VI_IE_SOFT_RST_MASK 0x00000100
++#define MRV_VI_IE_SOFT_RST_SHIFT 8
++#define MRV_VI_MARVIN_RST
++#define MRV_VI_MARVIN_RST_MASK 0x00000080
++#define MRV_VI_MARVIN_RST_SHIFT 7
++
++#define MRV_VI_EMP_SOFT_RST_MASK 0
++#define MRV_VI_MI_SOFT_RST
++#define MRV_VI_MI_SOFT_RST_MASK 0x00000040
++#define MRV_VI_MI_SOFT_RST_SHIFT 6
++
++#define MRV_VI_JPEG_SOFT_RST
++#define MRV_VI_JPEG_SOFT_RST_MASK 0x00000020
++#define MRV_VI_JPEG_SOFT_RST_SHIFT 5
++#define MRV_VI_SRSZ_SOFT_RST
++#define MRV_VI_SRSZ_SOFT_RST_MASK 0x00000010
++#define MRV_VI_SRSZ_SOFT_RST_SHIFT 4
++
++#define MRV_VI_MRSZ_SOFT_RST
++#define MRV_VI_MRSZ_SOFT_RST_MASK 0x00000008
++#define MRV_VI_MRSZ_SOFT_RST_SHIFT 3
++#define MRV_VI_YCS_SOFT_RST
++#define MRV_VI_YCS_SOFT_RST_MASK 0x00000004
++#define MRV_VI_YCS_SOFT_RST_SHIFT 2
++#define MRV_VI_CP_SOFT_RST
++#define MRV_VI_CP_SOFT_RST_MASK 0x00000002
++#define MRV_VI_CP_SOFT_RST_SHIFT 1
++#define MRV_VI_ISP_SOFT_RST
++#define MRV_VI_ISP_SOFT_RST_MASK 0x00000001
++#define MRV_VI_ISP_SOFT_RST_SHIFT 0
++
++#define MRV_VI_ALL_SOFT_RST
++#define MRV_VI_ALL_SOFT_RST_MASK \
++(0 \
++| MRV_VI_MIPI_SOFT_RST_MASK \
++| MRV_VI_SMIA_SOFT_RST_MASK \
++| MRV_VI_SIMP_SOFT_RST_MASK \
++| MRV_VI_IE_SOFT_RST_MASK \
++| MRV_VI_EMP_SOFT_RST_MASK \
++| MRV_VI_MI_SOFT_RST_MASK \
++| MRV_VI_JPEG_SOFT_RST_MASK \
++| MRV_VI_SRSZ_SOFT_RST_MASK \
++| MRV_VI_MRSZ_SOFT_RST_MASK \
++| MRV_VI_YCS_SOFT_RST_MASK \
++| MRV_VI_CP_SOFT_RST_MASK \
++| MRV_VI_ISP_SOFT_RST_MASK \
++)
++#define MRV_VI_ALL_SOFT_RST_SHIFT 0
++
++
++#define MRV_VI_DMA_SPMUX
++#define MRV_VI_DMA_SPMUX_MASK 0x00000800
++#define MRV_VI_DMA_SPMUX_SHIFT 11
++#define MRV_VI_DMA_SPMUX_CAM 0
++#define MRV_VI_DMA_SPMUX_DMA 1
++#define MRV_VI_DMA_IEMUX
++#define MRV_VI_DMA_IEMUX_MASK 0x00000400
++#define MRV_VI_DMA_IEMUX_SHIFT 10
++#define MRV_VI_DMA_IEMUX_CAM 0
++#define MRV_VI_DMA_IEMUX_DMA 1
++#define MRV_IF_SELECT
++#define MRV_IF_SELECT_MASK 0x00000300
++#define MRV_IF_SELECT_SHIFT 8
++#define MRV_IF_SELECT_PAR 0
++#define MRV_IF_SELECT_SMIA 1
++#define MRV_IF_SELECT_MIPI 2
++#define MRV_VI_DMA_SWITCH
++#define MRV_VI_DMA_SWITCH_MASK 0x00000070
++#define MRV_VI_DMA_SWITCH_SHIFT 4
++#define MRV_VI_DMA_SWITCH_SELF 0
++#define MRV_VI_DMA_SWITCH_SI 1
++#define MRV_VI_DMA_SWITCH_IE 2
++#define MRV_VI_DMA_SWITCH_JPG 3
++#define MRV_VI_CHAN_MODE
++#define MRV_VI_CHAN_MODE_MASK 0x0000000C
++#define MRV_VI_CHAN_MODE_SHIFT 2
++
++#define MRV_VI_CHAN_MODE_OFF 0x00
++#define MRV_VI_CHAN_MODE_Y 0xFF
++#define MRV_VI_CHAN_MODE_MP_RAW 0x01
++#define MRV_VI_CHAN_MODE_MP 0x01
++#define MRV_VI_CHAN_MODE_SP 0x02
++#define MRV_VI_CHAN_MODE_MP_SP 0x03
++
++#define MRV_VI_MP_MUX
++#define MRV_VI_MP_MUX_MASK 0x00000003
++#define MRV_VI_MP_MUX_SHIFT 0
++
++#define MRV_VI_MP_MUX_JPGDIRECT 0x00
++#define MRV_VI_MP_MUX_MP 0x01
++#define MRV_VI_MP_MUX_RAW 0x01
++#define MRV_VI_MP_MUX_JPEG 0x02
++
++
++
++
++#define MRV_IMGEFF_CFG_UPD
++#define MRV_IMGEFF_CFG_UPD_MASK 0x00000010
++#define MRV_IMGEFF_CFG_UPD_SHIFT 4
++#define MRV_IMGEFF_EFFECT_MODE
++#define MRV_IMGEFF_EFFECT_MODE_MASK 0x0000000E
++#define MRV_IMGEFF_EFFECT_MODE_SHIFT 1
++#define MRV_IMGEFF_EFFECT_MODE_GRAY 0
++#define MRV_IMGEFF_EFFECT_MODE_NEGATIVE 1
++#define MRV_IMGEFF_EFFECT_MODE_SEPIA 2
++#define MRV_IMGEFF_EFFECT_MODE_COLOR_SEL 3
++#define MRV_IMGEFF_EFFECT_MODE_EMBOSS 4
++#define MRV_IMGEFF_EFFECT_MODE_SKETCH 5
++#define MRV_IMGEFF_BYPASS_MODE
++#define MRV_IMGEFF_BYPASS_MODE_MASK 0x00000001
++#define MRV_IMGEFF_BYPASS_MODE_SHIFT 0
++#define MRV_IMGEFF_BYPASS_MODE_PROCESS 1
++#define MRV_IMGEFF_BYPASS_MODE_BYPASS 0
++
++#define MRV_IMGEFF_COLOR_THRESHOLD
++#define MRV_IMGEFF_COLOR_THRESHOLD_MASK 0x0000FF00
++#define MRV_IMGEFF_COLOR_THRESHOLD_SHIFT 8
++#define MRV_IMGEFF_COLOR_SELECTION
++#define MRV_IMGEFF_COLOR_SELECTION_MASK 0x00000007
++#define MRV_IMGEFF_COLOR_SELECTION_SHIFT 0
++#define MRV_IMGEFF_COLOR_SELECTION_RGB 0
++#define MRV_IMGEFF_COLOR_SELECTION_B 1
++#define MRV_IMGEFF_COLOR_SELECTION_G 2
++#define MRV_IMGEFF_COLOR_SELECTION_BG 3
++#define MRV_IMGEFF_COLOR_SELECTION_R 4
++#define MRV_IMGEFF_COLOR_SELECTION_BR 5
++#define MRV_IMGEFF_COLOR_SELECTION_GR 6
++#define MRV_IMGEFF_COLOR_SELECTION_BGR 7
++
++#define MRV_IMGEFF_EMB_COEF_21_EN
++#define MRV_IMGEFF_EMB_COEF_21_EN_MASK 0x00008000
++#define MRV_IMGEFF_EMB_COEF_21_EN_SHIFT 15
++#define MRV_IMGEFF_EMB_COEF_21
++#define MRV_IMGEFF_EMB_COEF_21_MASK 0x00007000
++#define MRV_IMGEFF_EMB_COEF_21_SHIFT 12
++
++#define MRV_IMGEFF_EMB_COEF_21_4
++#define MRV_IMGEFF_EMB_COEF_21_4_MASK 0x0000F000
++#define MRV_IMGEFF_EMB_COEF_21_4_SHIFT 12
++#define MRV_IMGEFF_EMB_COEF_13_EN
++#define MRV_IMGEFF_EMB_COEF_13_EN_MASK 0x00000800
++#define MRV_IMGEFF_EMB_COEF_13_EN_SHIFT 11
++#define MRV_IMGEFF_EMB_COEF_13
++#define MRV_IMGEFF_EMB_COEF_13_MASK 0x00000700
++#define MRV_IMGEFF_EMB_COEF_13_SHIFT 8
++
++#define MRV_IMGEFF_EMB_COEF_13_4
++#define MRV_IMGEFF_EMB_COEF_13_4_MASK 0x00000F00
++#define MRV_IMGEFF_EMB_COEF_13_4_SHIFT 8
++#define MRV_IMGEFF_EMB_COEF_12_EN
++#define MRV_IMGEFF_EMB_COEF_12_EN_MASK 0x00000080
++#define MRV_IMGEFF_EMB_COEF_12_EN_SHIFT 7
++#define MRV_IMGEFF_EMB_COEF_12
++#define MRV_IMGEFF_EMB_COEF_12_MASK 0x00000070
++#define MRV_IMGEFF_EMB_COEF_12_SHIFT 4
++
++#define MRV_IMGEFF_EMB_COEF_12_4
++#define MRV_IMGEFF_EMB_COEF_12_4_MASK 0x000000F0
++#define MRV_IMGEFF_EMB_COEF_12_4_SHIFT 4
++#define MRV_IMGEFF_EMB_COEF_11_EN
++#define MRV_IMGEFF_EMB_COEF_11_EN_MASK 0x00000008
++#define MRV_IMGEFF_EMB_COEF_11_EN_SHIFT 3
++#define MRV_IMGEFF_EMB_COEF_11
++#define MRV_IMGEFF_EMB_COEF_11_MASK 0x00000007
++#define MRV_IMGEFF_EMB_COEF_11_SHIFT 0
++
++#define MRV_IMGEFF_EMB_COEF_11_4
++#define MRV_IMGEFF_EMB_COEF_11_4_MASK 0x0000000F
++#define MRV_IMGEFF_EMB_COEF_11_4_SHIFT 0
++
++#define MRV_IMGEFF_EMB_COEF_32_EN
++#define MRV_IMGEFF_EMB_COEF_32_EN_MASK 0x00008000
++#define MRV_IMGEFF_EMB_COEF_32_EN_SHIFT 15
++#define MRV_IMGEFF_EMB_COEF_32
++#define MRV_IMGEFF_EMB_COEF_32_MASK 0x00007000
++#define MRV_IMGEFF_EMB_COEF_32_SHIFT 12
++
++#define MRV_IMGEFF_EMB_COEF_32_4
++#define MRV_IMGEFF_EMB_COEF_32_4_MASK 0x0000F000
++#define MRV_IMGEFF_EMB_COEF_32_4_SHIFT 12
++#define MRV_IMGEFF_EMB_COEF_31_EN
++#define MRV_IMGEFF_EMB_COEF_31_EN_MASK 0x00000800
++#define MRV_IMGEFF_EMB_COEF_31_EN_SHIFT 11
++#define MRV_IMGEFF_EMB_COEF_31
++#define MRV_IMGEFF_EMB_COEF_31_MASK 0x00000700
++#define MRV_IMGEFF_EMB_COEF_31_SHIFT 8
++
++#define MRV_IMGEFF_EMB_COEF_31_4
++#define MRV_IMGEFF_EMB_COEF_31_4_MASK 0x00000F00
++#define MRV_IMGEFF_EMB_COEF_31_4_SHIFT 8
++#define MRV_IMGEFF_EMB_COEF_23_EN
++#define MRV_IMGEFF_EMB_COEF_23_EN_MASK 0x00000080
++#define MRV_IMGEFF_EMB_COEF_23_EN_SHIFT 7
++#define MRV_IMGEFF_EMB_COEF_23
++#define MRV_IMGEFF_EMB_COEF_23_MASK 0x00000070
++#define MRV_IMGEFF_EMB_COEF_23_SHIFT 4
++
++#define MRV_IMGEFF_EMB_COEF_23_4
++#define MRV_IMGEFF_EMB_COEF_23_4_MASK 0x000000F0
++#define MRV_IMGEFF_EMB_COEF_23_4_SHIFT 4
++
++#define MRV_IMGEFF_EMB_COEF_22_EN
++#define MRV_IMGEFF_EMB_COEF_22_EN_MASK 0x00000008
++#define MRV_IMGEFF_EMB_COEF_22_EN_SHIFT 3
++#define MRV_IMGEFF_EMB_COEF_22
++#define MRV_IMGEFF_EMB_COEF_22_MASK 0x00000007
++#define MRV_IMGEFF_EMB_COEF_22_SHIFT 0
++
++#define MRV_IMGEFF_EMB_COEF_22_4
++#define MRV_IMGEFF_EMB_COEF_22_4_MASK 0x0000000F
++#define MRV_IMGEFF_EMB_COEF_22_4_SHIFT 0
++
++#define MRV_IMGEFF_SKET_COEF_13_EN
++#define MRV_IMGEFF_SKET_COEF_13_EN_MASK 0x00008000
++#define MRV_IMGEFF_SKET_COEF_13_EN_SHIFT 15
++#define MRV_IMGEFF_SKET_COEF_13
++#define MRV_IMGEFF_SKET_COEF_13_MASK 0x00007000
++#define MRV_IMGEFF_SKET_COEF_13_SHIFT 12
++
++#define MRV_IMGEFF_SKET_COEF_13_4
++#define MRV_IMGEFF_SKET_COEF_13_4_MASK 0x0000F000
++#define MRV_IMGEFF_SKET_COEF_13_4_SHIFT 12
++#define MRV_IMGEFF_SKET_COEF_12_EN
++#define MRV_IMGEFF_SKET_COEF_12_EN_MASK 0x00000800
++#define MRV_IMGEFF_SKET_COEF_12_EN_SHIFT 11
++#define MRV_IMGEFF_SKET_COEF_12
++#define MRV_IMGEFF_SKET_COEF_12_MASK 0x00000700
++#define MRV_IMGEFF_SKET_COEF_12_SHIFT 8
++
++#define MRV_IMGEFF_SKET_COEF_12_4
++#define MRV_IMGEFF_SKET_COEF_12_4_MASK 0x00000F00
++#define MRV_IMGEFF_SKET_COEF_12_4_SHIFT 8
++#define MRV_IMGEFF_SKET_COEF_11_EN
++#define MRV_IMGEFF_SKET_COEF_11_EN_MASK 0x00000080
++#define MRV_IMGEFF_SKET_COEF_11_EN_SHIFT 7
++#define MRV_IMGEFF_SKET_COEF_11
++#define MRV_IMGEFF_SKET_COEF_11_MASK 0x00000070
++#define MRV_IMGEFF_SKET_COEF_11_SHIFT 4
++
++#define MRV_IMGEFF_SKET_COEF_11_4
++#define MRV_IMGEFF_SKET_COEF_11_4_MASK 0x000000F0
++#define MRV_IMGEFF_SKET_COEF_11_4_SHIFT 4
++#define MRV_IMGEFF_EMB_COEF_33_EN
++#define MRV_IMGEFF_EMB_COEF_33_EN_MASK 0x00000008
++#define MRV_IMGEFF_EMB_COEF_33_EN_SHIFT 3
++#define MRV_IMGEFF_EMB_COEF_33
++#define MRV_IMGEFF_EMB_COEF_33_MASK 0x00000007
++#define MRV_IMGEFF_EMB_COEF_33_SHIFT 0
++
++#define MRV_IMGEFF_EMB_COEF_33_4
++#define MRV_IMGEFF_EMB_COEF_33_4_MASK 0x0000000F
++#define MRV_IMGEFF_EMB_COEF_33_4_SHIFT 0
++
++#define MRV_IMGEFF_SKET_COEF_31_EN
++#define MRV_IMGEFF_SKET_COEF_31_EN_MASK 0x00008000
++#define MRV_IMGEFF_SKET_COEF_31_EN_SHIFT 15
++#define MRV_IMGEFF_SKET_COEF_31
++#define MRV_IMGEFF_SKET_COEF_31_MASK 0x00007000
++#define MRV_IMGEFF_SKET_COEF_31_SHIFT 12
++
++#define MRV_IMGEFF_SKET_COEF_31_4
++#define MRV_IMGEFF_SKET_COEF_31_4_MASK 0x0000F000
++#define MRV_IMGEFF_SKET_COEF_31_4_SHIFT 12
++#define MRV_IMGEFF_SKET_COEF_23_EN
++#define MRV_IMGEFF_SKET_COEF_23_EN_MASK 0x00000800
++#define MRV_IMGEFF_SKET_COEF_23_EN_SHIFT 11
++#define MRV_IMGEFF_SKET_COEF_23
++#define MRV_IMGEFF_SKET_COEF_23_MASK 0x00000700
++#define MRV_IMGEFF_SKET_COEF_23_SHIFT 8
++
++#define MRV_IMGEFF_SKET_COEF_23_4
++#define MRV_IMGEFF_SKET_COEF_23_4_MASK 0x00000F00
++#define MRV_IMGEFF_SKET_COEF_23_4_SHIFT 8
++#define MRV_IMGEFF_SKET_COEF_22_EN
++#define MRV_IMGEFF_SKET_COEF_22_EN_MASK 0x00000080
++#define MRV_IMGEFF_SKET_COEF_22_EN_SHIFT 7
++#define MRV_IMGEFF_SKET_COEF_22
++#define MRV_IMGEFF_SKET_COEF_22_MASK 0x00000070
++#define MRV_IMGEFF_SKET_COEF_22_SHIFT 4
++
++#define MRV_IMGEFF_SKET_COEF_22_4
++#define MRV_IMGEFF_SKET_COEF_22_4_MASK 0x000000F0
++#define MRV_IMGEFF_SKET_COEF_22_4_SHIFT 4
++#define MRV_IMGEFF_SKET_COEF_21_EN
++#define MRV_IMGEFF_SKET_COEF_21_EN_MASK 0x00000008
++#define MRV_IMGEFF_SKET_COEF_21_EN_SHIFT 3
++#define MRV_IMGEFF_SKET_COEF_21
++#define MRV_IMGEFF_SKET_COEF_21_MASK 0x00000007
++#define MRV_IMGEFF_SKET_COEF_21_SHIFT 0
++
++#define MRV_IMGEFF_SKET_COEF_21_4
++#define MRV_IMGEFF_SKET_COEF_21_4_MASK 0x0000000F
++#define MRV_IMGEFF_SKET_COEF_21_4_SHIFT 0
++
++#define MRV_IMGEFF_SKET_COEF_33_EN
++#define MRV_IMGEFF_SKET_COEF_33_EN_MASK 0x00000080
++#define MRV_IMGEFF_SKET_COEF_33_EN_SHIFT 7
++#define MRV_IMGEFF_SKET_COEF_33
++#define MRV_IMGEFF_SKET_COEF_33_MASK 0x00000070
++#define MRV_IMGEFF_SKET_COEF_33_SHIFT 4
++
++#define MRV_IMGEFF_SKET_COEF_33_4
++#define MRV_IMGEFF_SKET_COEF_33_4_MASK 0x000000F0
++#define MRV_IMGEFF_SKET_COEF_33_4_SHIFT 4
++#define MRV_IMGEFF_SKET_COEF_32_EN
++#define MRV_IMGEFF_SKET_COEF_32_EN_MASK 0x00000008
++#define MRV_IMGEFF_SKET_COEF_32_EN_SHIFT 3
++#define MRV_IMGEFF_SKET_COEF_32
++#define MRV_IMGEFF_SKET_COEF_32_MASK 0x00000007
++#define MRV_IMGEFF_SKET_COEF_32_SHIFT 0
++
++#define MRV_IMGEFF_SKET_COEF_32_4
++#define MRV_IMGEFF_SKET_COEF_32_4_MASK 0x0000000F
++#define MRV_IMGEFF_SKET_COEF_32_4_SHIFT 0
++
++#define MRV_IMGEFF_INCR_CR
++#define MRV_IMGEFF_INCR_CR_MASK 0x0000FF00
++#define MRV_IMGEFF_INCR_CR_SHIFT 8
++#define MRV_IMGEFF_INCR_CB
++#define MRV_IMGEFF_INCR_CB_MASK 0x000000FF
++#define MRV_IMGEFF_INCR_CB_SHIFT 0
++
++#define MRV_IMGEFF_EFFECT_MODE_SHD
++#define MRV_IMGEFF_EFFECT_MODE_SHD_MASK 0x0000000E
++#define MRV_IMGEFF_EFFECT_MODE_SHD_SHIFT 1
++
++
++#define MRV_SI_TRANSPARENCY_MODE
++#define MRV_SI_TRANSPARENCY_MODE_MASK 0x00000004
++#define MRV_SI_TRANSPARENCY_MODE_SHIFT 2
++#define MRV_SI_TRANSPARENCY_MODE_DISABLED 1
++#define MRV_SI_TRANSPARENCY_MODE_ENABLED 0
++#define MRV_SI_REF_IMAGE
++#define MRV_SI_REF_IMAGE_MASK 0x00000002
++#define MRV_SI_REF_IMAGE_SHIFT 1
++#define MRV_SI_REF_IMAGE_MEM 1
++#define MRV_SI_REF_IMAGE_IE 0
++#define MRV_SI_BYPASS_MODE
++#define MRV_SI_BYPASS_MODE_MASK 0x00000001
++#define MRV_SI_BYPASS_MODE_SHIFT 0
++#define MRV_SI_BYPASS_MODE_BYPASS 0
++#define MRV_SI_BYPASS_MODE_PROCESS 1
++
++#define MRV_SI_OFFSET_X
++#define MRV_SI_OFFSET_X_MASK 0x00001FFE
++#define MRV_SI_OFFSET_X_SHIFT 0
++#define MRV_SI_OFFSET_X_MAX 0x00001FFE
++
++#define MRV_SI_OFFSET_Y
++#define MRV_SI_OFFSET_Y_MASK 0x00000FFF
++#define MRV_SI_OFFSET_Y_SHIFT 0
++#define MRV_SI_OFFSET_Y_MAX 0x00000FFF
++
++#define MRV_SI_Y_COMP
++#define MRV_SI_Y_COMP_MASK 0x000000FF
++#define MRV_SI_Y_COMP_SHIFT 0
++
++#define MRV_SI_CB_COMP
++#define MRV_SI_CB_COMP_MASK 0x000000FF
++#define MRV_SI_CB_COMP_SHIFT 0
++
++#define MRV_SI_CR_COMP
++#define MRV_SI_CR_COMP_MASK 0x000000FF
++#define MRV_SI_CR_COMP_SHIFT 0
++
++#define MRV_ISP_ISP_CSM_C_RANGE
++#define MRV_ISP_ISP_CSM_C_RANGE_MASK 0x00004000
++#define MRV_ISP_ISP_CSM_C_RANGE_SHIFT 14
++#define MRV_ISP_ISP_CSM_C_RANGE_BT601 0
++#define MRV_ISP_ISP_CSM_C_RANGE_FULL 1
++
++#define MRV_ISP_ISP_CSM_Y_RANGE
++#define MRV_ISP_ISP_CSM_Y_RANGE_MASK 0x00002000
++#define MRV_ISP_ISP_CSM_Y_RANGE_SHIFT 13
++#define MRV_ISP_ISP_CSM_Y_RANGE_BT601 0
++#define MRV_ISP_ISP_CSM_Y_RANGE_FULL 1
++#define MRV_ISP_ISP_FLASH_MODE
++#define MRV_ISP_ISP_FLASH_MODE_MASK 0x00001000
++#define MRV_ISP_ISP_FLASH_MODE_SHIFT 12
++#define MRV_ISP_ISP_FLASH_MODE_INDEP 0
++#define MRV_ISP_ISP_FLASH_MODE_SYNC 1
++#define MRV_ISP_ISP_GAMMA_OUT_ENABLE
++#define MRV_ISP_ISP_GAMMA_OUT_ENABLE_MASK 0x00000800
++#define MRV_ISP_ISP_GAMMA_OUT_ENABLE_SHIFT 11
++
++#define MRV_ISP_ISP_GEN_CFG_UPD
++#define MRV_ISP_ISP_GEN_CFG_UPD_MASK 0x00000400
++#define MRV_ISP_ISP_GEN_CFG_UPD_SHIFT 10
++
++#define MRV_ISP_ISP_CFG_UPD
++#define MRV_ISP_ISP_CFG_UPD_MASK 0x00000200
++#define MRV_ISP_ISP_CFG_UPD_SHIFT 9
++
++
++#define MRV_ISP_ISP_AWB_ENABLE
++#define MRV_ISP_ISP_AWB_ENABLE_MASK 0x00000080
++#define MRV_ISP_ISP_AWB_ENABLE_SHIFT 7
++#define MRV_ISP_ISP_GAMMA_IN_ENABLE
++#define MRV_ISP_ISP_GAMMA_IN_ENABLE_MASK 0x00000040
++#define MRV_ISP_ISP_GAMMA_IN_ENABLE_SHIFT 6
++
++#define MRV_ISP_ISP_INFORM_ENABLE
++#define MRV_ISP_ISP_INFORM_ENABLE_MASK 0x00000010
++#define MRV_ISP_ISP_INFORM_ENABLE_SHIFT 4
++#define MRV_ISP_ISP_MODE
++#define MRV_ISP_ISP_MODE_MASK 0x0000000E
++#define MRV_ISP_ISP_MODE_SHIFT 1
++#define MRV_ISP_ISP_MODE_RAW 0
++#define MRV_ISP_ISP_MODE_656 1
++#define MRV_ISP_ISP_MODE_601 2
++#define MRV_ISP_ISP_MODE_RGB 3
++#define MRV_ISP_ISP_MODE_DATA 4
++#define MRV_ISP_ISP_MODE_RGB656 5
++#define MRV_ISP_ISP_MODE_RAW656 6
++#define MRV_ISP_ISP_ENABLE
++#define MRV_ISP_ISP_ENABLE_MASK 0x00000001
++#define MRV_ISP_ISP_ENABLE_SHIFT 0
++
++#define MRV_ISP_INPUT_SELECTION
++#define MRV_ISP_INPUT_SELECTION_MASK 0x00007000
++#define MRV_ISP_INPUT_SELECTION_SHIFT 12
++#define MRV_ISP_INPUT_SELECTION_12EXT 0
++#define MRV_ISP_INPUT_SELECTION_10ZERO 1
++#define MRV_ISP_INPUT_SELECTION_10MSB 2
++#define MRV_ISP_INPUT_SELECTION_8ZERO 3
++#define MRV_ISP_INPUT_SELECTION_8MSB 4
++#define MRV_ISP_FIELD_SELECTION
++#define MRV_ISP_FIELD_SELECTION_MASK 0x00000600
++#define MRV_ISP_FIELD_SELECTION_SHIFT 9
++#define MRV_ISP_FIELD_SELECTION_BOTH 0
++#define MRV_ISP_FIELD_SELECTION_EVEN 1
++#define MRV_ISP_FIELD_SELECTION_ODD 2
++#define MRV_ISP_CCIR_SEQ
++#define MRV_ISP_CCIR_SEQ_MASK 0x00000180
++#define MRV_ISP_CCIR_SEQ_SHIFT 7
++#define MRV_ISP_CCIR_SEQ_YCBYCR 0
++#define MRV_ISP_CCIR_SEQ_YCRYCB 1
++#define MRV_ISP_CCIR_SEQ_CBYCRY 2
++#define MRV_ISP_CCIR_SEQ_CRYCBY 3
++#define MRV_ISP_CONV_422
++#define MRV_ISP_CONV_422_MASK 0x00000060
++#define MRV_ISP_CONV_422_SHIFT 5
++#define MRV_ISP_CONV_422_CO 0
++#define MRV_ISP_CONV_422_INTER 1
++#define MRV_ISP_CONV_422_NONCO 2
++#define MRV_ISP_BAYER_PAT
++#define MRV_ISP_BAYER_PAT_MASK 0x00000018
++#define MRV_ISP_BAYER_PAT_SHIFT 3
++#define MRV_ISP_BAYER_PAT_RG 0
++#define MRV_ISP_BAYER_PAT_GR 1
++#define MRV_ISP_BAYER_PAT_GB 2
++#define MRV_ISP_BAYER_PAT_BG 3
++#define MRV_ISP_VSYNC_POL
++#define MRV_ISP_VSYNC_POL_MASK 0x00000004
++#define MRV_ISP_VSYNC_POL_SHIFT 2
++#define MRV_ISP_HSYNC_POL
++#define MRV_ISP_HSYNC_POL_MASK 0x00000002
++#define MRV_ISP_HSYNC_POL_SHIFT 1
++#define MRV_ISP_SAMPLE_EDGE
++#define MRV_ISP_SAMPLE_EDGE_MASK 0x00000001
++#define MRV_ISP_SAMPLE_EDGE_SHIFT 0
++
++#define MRV_ISP_ACQ_H_OFFS
++#define MRV_ISP_ACQ_H_OFFS_MASK 0x00003FFF
++#define MRV_ISP_ACQ_H_OFFS_SHIFT 0
++
++#define MRV_ISP_ACQ_V_OFFS
++#define MRV_ISP_ACQ_V_OFFS_MASK 0x00000FFF
++#define MRV_ISP_ACQ_V_OFFS_SHIFT 0
++
++#define MRV_ISP_ACQ_H_SIZE
++#define MRV_ISP_ACQ_H_SIZE_MASK 0x00003FFF
++#define MRV_ISP_ACQ_H_SIZE_SHIFT 0
++
++#define MRV_ISP_ACQ_V_SIZE
++#define MRV_ISP_ACQ_V_SIZE_MASK 0x00000FFF
++#define MRV_ISP_ACQ_V_SIZE_SHIFT 0
++
++
++#define MRV_ISP_ACQ_NR_FRAMES
++#define MRV_ISP_ACQ_NR_FRAMES_MASK 0x000003FF
++#define MRV_ISP_ACQ_NR_FRAMES_SHIFT 0
++#define MRV_ISP_ACQ_NR_FRAMES_MAX \
++ (MRV_ISP_ACQ_NR_FRAMES_MASK >> MRV_ISP_ACQ_NR_FRAMES_SHIFT)
++
++#define MRV_ISP_GAMMA_DX_8
++#define MRV_ISP_GAMMA_DX_8_MASK 0x70000000
++#define MRV_ISP_GAMMA_DX_8_SHIFT 28
++
++#define MRV_ISP_GAMMA_DX_7
++#define MRV_ISP_GAMMA_DX_7_MASK 0x07000000
++#define MRV_ISP_GAMMA_DX_7_SHIFT 24
++
++#define MRV_ISP_GAMMA_DX_6
++#define MRV_ISP_GAMMA_DX_6_MASK 0x00700000
++#define MRV_ISP_GAMMA_DX_6_SHIFT 20
++
++#define MRV_ISP_GAMMA_DX_5
++#define MRV_ISP_GAMMA_DX_5_MASK 0x00070000
++#define MRV_ISP_GAMMA_DX_5_SHIFT 16
++
++#define MRV_ISP_GAMMA_DX_4
++#define MRV_ISP_GAMMA_DX_4_MASK 0x00007000
++#define MRV_ISP_GAMMA_DX_4_SHIFT 12
++
++#define MRV_ISP_GAMMA_DX_3
++#define MRV_ISP_GAMMA_DX_3_MASK 0x00000700
++#define MRV_ISP_GAMMA_DX_3_SHIFT 8
++
++#define MRV_ISP_GAMMA_DX_2
++#define MRV_ISP_GAMMA_DX_2_MASK 0x00000070
++#define MRV_ISP_GAMMA_DX_2_SHIFT 4
++
++#define MRV_ISP_GAMMA_DX_1
++#define MRV_ISP_GAMMA_DX_1_MASK 0x00000007
++#define MRV_ISP_GAMMA_DX_1_SHIFT 0
++
++#define MRV_ISP_GAMMA_DX_16
++#define MRV_ISP_GAMMA_DX_16_MASK 0x70000000
++#define MRV_ISP_GAMMA_DX_16_SHIFT 28
++
++#define MRV_ISP_GAMMA_DX_15
++#define MRV_ISP_GAMMA_DX_15_MASK 0x07000000
++#define MRV_ISP_GAMMA_DX_15_SHIFT 24
++
++#define MRV_ISP_GAMMA_DX_14
++#define MRV_ISP_GAMMA_DX_14_MASK 0x00700000
++#define MRV_ISP_GAMMA_DX_14_SHIFT 20
++
++#define MRV_ISP_GAMMA_DX_13
++#define MRV_ISP_GAMMA_DX_13_MASK 0x00070000
++#define MRV_ISP_GAMMA_DX_13_SHIFT 16
++
++#define MRV_ISP_GAMMA_DX_12
++#define MRV_ISP_GAMMA_DX_12_MASK 0x00007000
++#define MRV_ISP_GAMMA_DX_12_SHIFT 12
++
++#define MRV_ISP_GAMMA_DX_11
++#define MRV_ISP_GAMMA_DX_11_MASK 0x00000700
++#define MRV_ISP_GAMMA_DX_11_SHIFT 8
++
++#define MRV_ISP_GAMMA_DX_10
++#define MRV_ISP_GAMMA_DX_10_MASK 0x00000070
++#define MRV_ISP_GAMMA_DX_10_SHIFT 4
++
++#define MRV_ISP_GAMMA_DX_9
++#define MRV_ISP_GAMMA_DX_9_MASK 0x00000007
++#define MRV_ISP_GAMMA_DX_9_SHIFT 0
++
++#define MRV_ISP_GAMMA_Y
++
++#define MRV_ISP_GAMMA_Y_MASK 0x00000FFF
++
++#define MRV_ISP_GAMMA_Y_SHIFT 0
++#define MRV_ISP_GAMMA_Y_MAX (MRV_ISP_GAMMA_Y_MASK >> MRV_ISP_GAMMA_Y_SHIFT)
++
++#define MRV_ISP_GAMMA_R_Y
++#define MRV_ISP_GAMMA_R_Y_MASK MRV_ISP_GAMMA_Y_MASK
++#define MRV_ISP_GAMMA_R_Y_SHIFT MRV_ISP_GAMMA_Y_SHIFT
++
++#define MRV_ISP_GAMMA_G_Y
++#define MRV_ISP_GAMMA_G_Y_MASK MRV_ISP_GAMMA_Y_MASK
++#define MRV_ISP_GAMMA_G_Y_SHIFT MRV_ISP_GAMMA_Y_SHIFT
++
++#define MRV_ISP_GAMMA_B_Y
++#define MRV_ISP_GAMMA_B_Y_MASK MRV_ISP_GAMMA_Y_MASK
++#define MRV_ISP_GAMMA_B_Y_SHIFT MRV_ISP_GAMMA_Y_SHIFT
++
++ #define MRV_ISP_AWB_MEAS_MODE
++ #define MRV_ISP_AWB_MEAS_MODE_MASK 0x80000000
++ #define MRV_ISP_AWB_MEAS_MODE_SHIFT 31
++#define MRV_ISP_AWB_MAX_EN
++#define MRV_ISP_AWB_MAX_EN_MASK 0x00000004
++#define MRV_ISP_AWB_MAX_EN_SHIFT 2
++#define MRV_ISP_AWB_MODE
++#define MRV_ISP_AWB_MODE_MASK 0x00000003
++#define MRV_ISP_AWB_MODE_SHIFT 0
++#define MRV_ISP_AWB_MODE_MEAS 2
++#define MRV_ISP_AWB_MODE_NOMEAS 0
++
++#define MRV_ISP_AWB_H_OFFS
++#define MRV_ISP_AWB_H_OFFS_MASK 0x00000FFF
++#define MRV_ISP_AWB_H_OFFS_SHIFT 0
++
++#define MRV_ISP_AWB_V_OFFS
++#define MRV_ISP_AWB_V_OFFS_MASK 0x00000FFF
++#define MRV_ISP_AWB_V_OFFS_SHIFT 0
++
++#define MRV_ISP_AWB_H_SIZE
++#define MRV_ISP_AWB_H_SIZE_MASK 0x00001FFF
++#define MRV_ISP_AWB_H_SIZE_SHIFT 0
++
++#define MRV_ISP_AWB_V_SIZE
++#define MRV_ISP_AWB_V_SIZE_MASK 0x00000FFF
++#define MRV_ISP_AWB_V_SIZE_SHIFT 0
++
++
++#define MRV_ISP_AWB_FRAMES
++#define MRV_ISP_AWB_FRAMES_MASK 0x00000007
++#define MRV_ISP_AWB_FRAMES_SHIFT 0
++
++#define MRV_ISP_AWB_REF_CR__MAX_R
++#define MRV_ISP_AWB_REF_CR__MAX_R_MASK 0x0000FF00
++#define MRV_ISP_AWB_REF_CR__MAX_R_SHIFT 8
++#define MRV_ISP_AWB_REF_CB__MAX_B
++#define MRV_ISP_AWB_REF_CB__MAX_B_MASK 0x000000FF
++#define MRV_ISP_AWB_REF_CB__MAX_B_SHIFT 0
++
++#define MRV_ISP_AWB_MAX_Y
++#define MRV_ISP_AWB_MAX_Y_MASK 0xFF000000
++#define MRV_ISP_AWB_MAX_Y_SHIFT 24
++
++#define MRV_ISP_AWB_MIN_Y__MAX_G
++#define MRV_ISP_AWB_MIN_Y__MAX_G_MASK 0x00FF0000
++#define MRV_ISP_AWB_MIN_Y__MAX_G_SHIFT 16
++
++#define MRV_ISP_AWB_MAX_CSUM
++#define MRV_ISP_AWB_MAX_CSUM_MASK 0x0000FF00
++#define MRV_ISP_AWB_MAX_CSUM_SHIFT 8
++#define MRV_ISP_AWB_MIN_C
++#define MRV_ISP_AWB_MIN_C_MASK 0x000000FF
++#define MRV_ISP_AWB_MIN_C_SHIFT 0
++
++#define MRV_ISP_AWB_GAIN_GR
++#define MRV_ISP_AWB_GAIN_GR_MASK 0x03FF0000
++#define MRV_ISP_AWB_GAIN_GR_SHIFT 16
++#define MRV_ISP_AWB_GAIN_GR_MAX (MRV_ISP_AWB_GAIN_GR_MASK >> \
++ MRV_ISP_AWB_GAIN_GR_SHIFT)
++#define MRV_ISP_AWB_GAIN_GB
++#define MRV_ISP_AWB_GAIN_GB_MASK 0x000003FF
++#define MRV_ISP_AWB_GAIN_GB_SHIFT 0
++#define MRV_ISP_AWB_GAIN_GB_MAX (MRV_ISP_AWB_GAIN_GB_MASK >> \
++ MRV_ISP_AWB_GAIN_GB_SHIFT)
++
++#define MRV_ISP_AWB_GAIN_R
++#define MRV_ISP_AWB_GAIN_R_MASK 0x03FF0000
++#define MRV_ISP_AWB_GAIN_R_SHIFT 16
++#define MRV_ISP_AWB_GAIN_R_MAX (MRV_ISP_AWB_GAIN_R_MASK >> \
++ MRV_ISP_AWB_GAIN_R_SHIFT)
++#define MRV_ISP_AWB_GAIN_B
++#define MRV_ISP_AWB_GAIN_B_MASK 0x000003FF
++#define MRV_ISP_AWB_GAIN_B_SHIFT 0
++#define MRV_ISP_AWB_GAIN_B_MAX (MRV_ISP_AWB_GAIN_B_MASK >> \
++ MRV_ISP_AWB_GAIN_B_SHIFT)
++
++#define MRV_ISP_AWB_WHITE_CNT
++#define MRV_ISP_AWB_WHITE_CNT_MASK 0x03FFFFFF
++#define MRV_ISP_AWB_WHITE_CNT_SHIFT 0
++
++#define MRV_ISP_AWB_MEAN_Y__G
++#define MRV_ISP_AWB_MEAN_Y__G_MASK 0x00FF0000
++#define MRV_ISP_AWB_MEAN_Y__G_SHIFT 16
++#define MRV_ISP_AWB_MEAN_CB__B
++#define MRV_ISP_AWB_MEAN_CB__B_MASK 0x0000FF00
++#define MRV_ISP_AWB_MEAN_CB__B_SHIFT 8
++#define MRV_ISP_AWB_MEAN_CR__R
++#define MRV_ISP_AWB_MEAN_CR__R_MASK 0x000000FF
++#define MRV_ISP_AWB_MEAN_CR__R_SHIFT 0
++
++
++
++#define MRV_ISP_CC_COEFF_0
++#define MRV_ISP_CC_COEFF_0_MASK 0x000001FF
++#define MRV_ISP_CC_COEFF_0_SHIFT 0
++
++#define MRV_ISP_CC_COEFF_1
++#define MRV_ISP_CC_COEFF_1_MASK 0x000001FF
++#define MRV_ISP_CC_COEFF_1_SHIFT 0
++
++#define MRV_ISP_CC_COEFF_2
++#define MRV_ISP_CC_COEFF_2_MASK 0x000001FF
++#define MRV_ISP_CC_COEFF_2_SHIFT 0
++
++#define MRV_ISP_CC_COEFF_3
++#define MRV_ISP_CC_COEFF_3_MASK 0x000001FF
++#define MRV_ISP_CC_COEFF_3_SHIFT 0
++
++#define MRV_ISP_CC_COEFF_4
++#define MRV_ISP_CC_COEFF_4_MASK 0x000001FF
++#define MRV_ISP_CC_COEFF_4_SHIFT 0
++
++#define MRV_ISP_CC_COEFF_5
++#define MRV_ISP_CC_COEFF_5_MASK 0x000001FF
++#define MRV_ISP_CC_COEFF_5_SHIFT 0
++
++#define MRV_ISP_CC_COEFF_6
++#define MRV_ISP_CC_COEFF_6_MASK 0x000001FF
++#define MRV_ISP_CC_COEFF_6_SHIFT 0
++
++#define MRV_ISP_CC_COEFF_7
++#define MRV_ISP_CC_COEFF_7_MASK 0x000001FF
++#define MRV_ISP_CC_COEFF_7_SHIFT 0
++
++#define MRV_ISP_CC_COEFF_8
++#define MRV_ISP_CC_COEFF_8_MASK 0x000001FF
++#define MRV_ISP_CC_COEFF_8_SHIFT 0
++
++#define MRV_ISP_ISP_OUT_H_OFFS
++#define MRV_ISP_ISP_OUT_H_OFFS_MASK 0x00000FFF
++#define MRV_ISP_ISP_OUT_H_OFFS_SHIFT 0
++
++#define MRV_ISP_ISP_OUT_V_OFFS
++#define MRV_ISP_ISP_OUT_V_OFFS_MASK 0x00000FFF
++#define MRV_ISP_ISP_OUT_V_OFFS_SHIFT 0
++
++#define MRV_ISP_ISP_OUT_H_SIZE
++#define MRV_ISP_ISP_OUT_H_SIZE_MASK 0x00003FFF
++#define MRV_ISP_ISP_OUT_H_SIZE_SHIFT 0
++
++#define MRV_ISP_ISP_OUT_V_SIZE
++#define MRV_ISP_ISP_OUT_V_SIZE_MASK 0x00000FFF
++#define MRV_ISP_ISP_OUT_V_SIZE_SHIFT 0
++
++#define MRV_ISP_DEMOSAIC_BYPASS
++#define MRV_ISP_DEMOSAIC_BYPASS_MASK 0x00000400
++#define MRV_ISP_DEMOSAIC_BYPASS_SHIFT 10
++
++#define MRV_ISP_DEMOSAIC_MODE
++#define MRV_ISP_DEMOSAIC_MODE_MASK 0x00000300
++#define MRV_ISP_DEMOSAIC_MODE_SHIFT 8
++#define MRV_ISP_DEMOSAIC_MODE_STD 0
++#define MRV_ISP_DEMOSAIC_MODE_ENH 1
++#define MRV_ISP_DEMOSAIC_TH
++#define MRV_ISP_DEMOSAIC_TH_MASK 0x000000FF
++#define MRV_ISP_DEMOSAIC_TH_SHIFT 0
++
++#define MRV_ISP_S_HSYNC
++
++#define MRV_ISP_S_HSYNC_MASK 0x80000000
++#define MRV_ISP_S_HSYNC_SHIFT 31
++
++#define MRV_ISP_S_VSYNC
++
++#define MRV_ISP_S_VSYNC_MASK 0x40000000
++#define MRV_ISP_S_VSYNC_SHIFT 30
++
++#define MRV_ISP_S_DATA
++
++#define MRV_ISP_S_DATA_MASK 0x0FFF0000
++
++#define MRV_ISP_S_DATA_SHIFT 16
++#define MRV_ISP_INFORM_FIELD
++#define MRV_ISP_INFORM_FIELD_MASK 0x00000004
++#define MRV_ISP_INFORM_FIELD_SHIFT 2
++#define MRV_ISP_INFORM_FIELD_ODD 0
++#define MRV_ISP_INFORM_FIELD_EVEN 1
++#define MRV_ISP_INFORM_EN_SHD
++#define MRV_ISP_INFORM_EN_SHD_MASK 0x00000002
++#define MRV_ISP_INFORM_EN_SHD_SHIFT 1
++#define MRV_ISP_ISP_ON_SHD
++#define MRV_ISP_ISP_ON_SHD_MASK 0x00000001
++#define MRV_ISP_ISP_ON_SHD_SHIFT 0
++
++
++#define MRV_ISP_ISP_OUT_H_OFFS_SHD
++#define MRV_ISP_ISP_OUT_H_OFFS_SHD_MASK 0x00000FFF
++#define MRV_ISP_ISP_OUT_H_OFFS_SHD_SHIFT 0
++
++#define MRV_ISP_ISP_OUT_V_OFFS_SHD
++#define MRV_ISP_ISP_OUT_V_OFFS_SHD_MASK 0x00000FFF
++#define MRV_ISP_ISP_OUT_V_OFFS_SHD_SHIFT 0
++
++
++#define MRV_ISP_ISP_OUT_H_SIZE_SHD
++#define MRV_ISP_ISP_OUT_H_SIZE_SHD_MASK 0x00003FFF
++#define MRV_ISP_ISP_OUT_H_SIZE_SHD_SHIFT 0
++
++
++#define MRV_ISP_ISP_OUT_V_SIZE_SHD
++#define MRV_ISP_ISP_OUT_V_SIZE_SHD_MASK 0x00000FFF
++#define MRV_ISP_ISP_OUT_V_SIZE_SHD_SHIFT 0
++
++#define MRV_ISP_IMSC_EXP_END
++#define MRV_ISP_IMSC_EXP_END_MASK 0x00040000
++#define MRV_ISP_IMSC_EXP_END_SHIFT 18
++
++#define MRV_ISP_IMSC_FLASH_CAP
++#define MRV_ISP_IMSC_FLASH_CAP_MASK 0x00020000
++#define MRV_ISP_IMSC_FLASH_CAP_SHIFT 17
++
++#define MRV_ISP_IMSC_BP_DET
++#define MRV_ISP_IMSC_BP_DET_MASK 0x00010000
++#define MRV_ISP_IMSC_BP_DET_SHIFT 16
++#define MRV_ISP_IMSC_BP_NEW_TAB_FUL
++#define MRV_ISP_IMSC_BP_NEW_TAB_FUL_MASK 0x00008000
++#define MRV_ISP_IMSC_BP_NEW_TAB_FUL_SHIFT 15
++#define MRV_ISP_IMSC_AFM_FIN
++#define MRV_ISP_IMSC_AFM_FIN_MASK 0x00004000
++#define MRV_ISP_IMSC_AFM_FIN_SHIFT 14
++#define MRV_ISP_IMSC_AFM_LUM_OF
++#define MRV_ISP_IMSC_AFM_LUM_OF_MASK 0x00002000
++#define MRV_ISP_IMSC_AFM_LUM_OF_SHIFT 13
++#define MRV_ISP_IMSC_AFM_SUM_OF
++#define MRV_ISP_IMSC_AFM_SUM_OF_MASK 0x00001000
++#define MRV_ISP_IMSC_AFM_SUM_OF_SHIFT 12
++#define MRV_ISP_IMSC_SHUTTER_OFF
++#define MRV_ISP_IMSC_SHUTTER_OFF_MASK 0x00000800
++#define MRV_ISP_IMSC_SHUTTER_OFF_SHIFT 11
++#define MRV_ISP_IMSC_SHUTTER_ON
++#define MRV_ISP_IMSC_SHUTTER_ON_MASK 0x00000400
++#define MRV_ISP_IMSC_SHUTTER_ON_SHIFT 10
++#define MRV_ISP_IMSC_FLASH_OFF
++#define MRV_ISP_IMSC_FLASH_OFF_MASK 0x00000200
++#define MRV_ISP_IMSC_FLASH_OFF_SHIFT 9
++#define MRV_ISP_IMSC_FLASH_ON
++#define MRV_ISP_IMSC_FLASH_ON_MASK 0x00000100
++#define MRV_ISP_IMSC_FLASH_ON_SHIFT 8
++
++#define MRV_ISP_IMSC_H_START
++#define MRV_ISP_IMSC_H_START_MASK 0x00000080
++#define MRV_ISP_IMSC_H_START_SHIFT 7
++#define MRV_ISP_IMSC_V_START
++#define MRV_ISP_IMSC_V_START_MASK 0x00000040
++#define MRV_ISP_IMSC_V_START_SHIFT 6
++#define MRV_ISP_IMSC_FRAME_IN
++#define MRV_ISP_IMSC_FRAME_IN_MASK 0x00000020
++#define MRV_ISP_IMSC_FRAME_IN_SHIFT 5
++#define MRV_ISP_IMSC_AWB_DONE
++#define MRV_ISP_IMSC_AWB_DONE_MASK 0x00000010
++#define MRV_ISP_IMSC_AWB_DONE_SHIFT 4
++#define MRV_ISP_IMSC_PIC_SIZE_ERR
++#define MRV_ISP_IMSC_PIC_SIZE_ERR_MASK 0x00000008
++#define MRV_ISP_IMSC_PIC_SIZE_ERR_SHIFT 3
++#define MRV_ISP_IMSC_DATA_LOSS
++#define MRV_ISP_IMSC_DATA_LOSS_MASK 0x00000004
++#define MRV_ISP_IMSC_DATA_LOSS_SHIFT 2
++#define MRV_ISP_IMSC_FRAME
++#define MRV_ISP_IMSC_FRAME_MASK 0x00000002
++#define MRV_ISP_IMSC_FRAME_SHIFT 1
++#define MRV_ISP_IMSC_ISP_OFF
++#define MRV_ISP_IMSC_ISP_OFF_MASK 0x00000001
++#define MRV_ISP_IMSC_ISP_OFF_SHIFT 0
++
++#define MRV_ISP_IMSC_ALL
++#define MRV_ISP_IMSC_ALL_MASK \
++(0 \
++| MRV_ISP_IMSC_EXP_END_MASK \
++| MRV_ISP_IMSC_FLASH_CAP_MASK \
++| MRV_ISP_IMSC_BP_DET_MASK \
++| MRV_ISP_IMSC_BP_NEW_TAB_FUL_MASK \
++| MRV_ISP_IMSC_AFM_FIN_MASK \
++| MRV_ISP_IMSC_AFM_LUM_OF_MASK \
++| MRV_ISP_IMSC_AFM_SUM_OF_MASK \
++| MRV_ISP_IMSC_SHUTTER_OFF_MASK \
++| MRV_ISP_IMSC_SHUTTER_ON_MASK \
++| MRV_ISP_IMSC_FLASH_OFF_MASK \
++| MRV_ISP_IMSC_FLASH_ON_MASK \
++| MRV_ISP_IMSC_H_START_MASK \
++| MRV_ISP_IMSC_V_START_MASK \
++| MRV_ISP_IMSC_FRAME_IN_MASK \
++| MRV_ISP_IMSC_AWB_DONE_MASK \
++| MRV_ISP_IMSC_PIC_SIZE_ERR_MASK \
++| MRV_ISP_IMSC_DATA_LOSS_MASK \
++| MRV_ISP_IMSC_FRAME_MASK \
++| MRV_ISP_IMSC_ISP_OFF_MASK \
++)
++#define MRV_ISP_IMSC_ALL_SHIFT 0
++
++#define MRV_ISP_RIS_EXP_END
++#define MRV_ISP_RIS_EXP_END_MASK 0x00040000
++#define MRV_ISP_RIS_EXP_END_SHIFT 18
++
++#define MRV_ISP_RIS_FLASH_CAP
++#define MRV_ISP_RIS_FLASH_CAP_MASK 0x00020000
++#define MRV_ISP_RIS_FLASH_CAP_SHIFT 17
++
++#define MRV_ISP_RIS_BP_DET
++#define MRV_ISP_RIS_BP_DET_MASK 0x00010000
++#define MRV_ISP_RIS_BP_DET_SHIFT 16
++#define MRV_ISP_RIS_BP_NEW_TAB_FUL
++#define MRV_ISP_RIS_BP_NEW_TAB_FUL_MASK 0x00008000
++#define MRV_ISP_RIS_BP_NEW_TAB_FUL_SHIFT 15
++#define MRV_ISP_RIS_AFM_FIN
++#define MRV_ISP_RIS_AFM_FIN_MASK 0x00004000
++#define MRV_ISP_RIS_AFM_FIN_SHIFT 14
++#define MRV_ISP_RIS_AFM_LUM_OF
++#define MRV_ISP_RIS_AFM_LUM_OF_MASK 0x00002000
++#define MRV_ISP_RIS_AFM_LUM_OF_SHIFT 13
++#define MRV_ISP_RIS_AFM_SUM_OF
++#define MRV_ISP_RIS_AFM_SUM_OF_MASK 0x00001000
++#define MRV_ISP_RIS_AFM_SUM_OF_SHIFT 12
++#define MRV_ISP_RIS_SHUTTER_OFF
++#define MRV_ISP_RIS_SHUTTER_OFF_MASK 0x00000800
++#define MRV_ISP_RIS_SHUTTER_OFF_SHIFT 11
++#define MRV_ISP_RIS_SHUTTER_ON
++#define MRV_ISP_RIS_SHUTTER_ON_MASK 0x00000400
++#define MRV_ISP_RIS_SHUTTER_ON_SHIFT 10
++#define MRV_ISP_RIS_FLASH_OFF
++#define MRV_ISP_RIS_FLASH_OFF_MASK 0x00000200
++#define MRV_ISP_RIS_FLASH_OFF_SHIFT 9
++#define MRV_ISP_RIS_FLASH_ON
++#define MRV_ISP_RIS_FLASH_ON_MASK 0x00000100
++#define MRV_ISP_RIS_FLASH_ON_SHIFT 8
++
++#define MRV_ISP_RIS_H_START
++#define MRV_ISP_RIS_H_START_MASK 0x00000080
++#define MRV_ISP_RIS_H_START_SHIFT 7
++#define MRV_ISP_RIS_V_START
++#define MRV_ISP_RIS_V_START_MASK 0x00000040
++#define MRV_ISP_RIS_V_START_SHIFT 6
++#define MRV_ISP_RIS_FRAME_IN
++#define MRV_ISP_RIS_FRAME_IN_MASK 0x00000020
++#define MRV_ISP_RIS_FRAME_IN_SHIFT 5
++#define MRV_ISP_RIS_AWB_DONE
++#define MRV_ISP_RIS_AWB_DONE_MASK 0x00000010
++#define MRV_ISP_RIS_AWB_DONE_SHIFT 4
++#define MRV_ISP_RIS_PIC_SIZE_ERR
++#define MRV_ISP_RIS_PIC_SIZE_ERR_MASK 0x00000008
++#define MRV_ISP_RIS_PIC_SIZE_ERR_SHIFT 3
++#define MRV_ISP_RIS_DATA_LOSS
++#define MRV_ISP_RIS_DATA_LOSS_MASK 0x00000004
++#define MRV_ISP_RIS_DATA_LOSS_SHIFT 2
++#define MRV_ISP_RIS_FRAME
++#define MRV_ISP_RIS_FRAME_MASK 0x00000002
++#define MRV_ISP_RIS_FRAME_SHIFT 1
++#define MRV_ISP_RIS_ISP_OFF
++#define MRV_ISP_RIS_ISP_OFF_MASK 0x00000001
++#define MRV_ISP_RIS_ISP_OFF_SHIFT 0
++
++#define MRV_ISP_RIS_ALL
++#define MRV_ISP_RIS_ALL_MASK \
++(0 \
++| MRV_ISP_RIS_EXP_END_MASK \
++| MRV_ISP_RIS_FLASH_CAP_MASK \
++| MRV_ISP_RIS_BP_DET_MASK \
++| MRV_ISP_RIS_BP_NEW_TAB_FUL_MASK \
++| MRV_ISP_RIS_AFM_FIN_MASK \
++| MRV_ISP_RIS_AFM_LUM_OF_MASK \
++| MRV_ISP_RIS_AFM_SUM_OF_MASK \
++| MRV_ISP_RIS_SHUTTER_OFF_MASK \
++| MRV_ISP_RIS_SHUTTER_ON_MASK \
++| MRV_ISP_RIS_FLASH_OFF_MASK \
++| MRV_ISP_RIS_FLASH_ON_MASK \
++| MRV_ISP_RIS_H_START_MASK \
++| MRV_ISP_RIS_V_START_MASK \
++| MRV_ISP_RIS_FRAME_IN_MASK \
++| MRV_ISP_RIS_AWB_DONE_MASK \
++| MRV_ISP_RIS_PIC_SIZE_ERR_MASK \
++| MRV_ISP_RIS_DATA_LOSS_MASK \
++| MRV_ISP_RIS_FRAME_MASK \
++| MRV_ISP_RIS_ISP_OFF_MASK \
++)
++#define MRV_ISP_RIS_ALL_SHIFT 0
++
++#define MRV_ISP_MIS_EXP_END
++#define MRV_ISP_MIS_EXP_END_MASK 0x00040000
++#define MRV_ISP_MIS_EXP_END_SHIFT 18
++
++#define MRV_ISP_MIS_FLASH_CAP
++#define MRV_ISP_MIS_FLASH_CAP_MASK 0x00020000
++#define MRV_ISP_MIS_FLASH_CAP_SHIFT 17
++
++#define MRV_ISP_MIS_BP_DET
++#define MRV_ISP_MIS_BP_DET_MASK 0x00010000
++#define MRV_ISP_MIS_BP_DET_SHIFT 16
++#define MRV_ISP_MIS_BP_NEW_TAB_FUL
++#define MRV_ISP_MIS_BP_NEW_TAB_FUL_MASK 0x00008000
++#define MRV_ISP_MIS_BP_NEW_TAB_FUL_SHIFT 15
++#define MRV_ISP_MIS_AFM_FIN
++#define MRV_ISP_MIS_AFM_FIN_MASK 0x00004000
++#define MRV_ISP_MIS_AFM_FIN_SHIFT 14
++#define MRV_ISP_MIS_AFM_LUM_OF
++#define MRV_ISP_MIS_AFM_LUM_OF_MASK 0x00002000
++#define MRV_ISP_MIS_AFM_LUM_OF_SHIFT 13
++#define MRV_ISP_MIS_AFM_SUM_OF
++#define MRV_ISP_MIS_AFM_SUM_OF_MASK 0x00001000
++#define MRV_ISP_MIS_AFM_SUM_OF_SHIFT 12
++#define MRV_ISP_MIS_SHUTTER_OFF
++#define MRV_ISP_MIS_SHUTTER_OFF_MASK 0x00000800
++#define MRV_ISP_MIS_SHUTTER_OFF_SHIFT 11
++#define MRV_ISP_MIS_SHUTTER_ON
++#define MRV_ISP_MIS_SHUTTER_ON_MASK 0x00000400
++#define MRV_ISP_MIS_SHUTTER_ON_SHIFT 10
++#define MRV_ISP_MIS_FLASH_OFF
++#define MRV_ISP_MIS_FLASH_OFF_MASK 0x00000200
++#define MRV_ISP_MIS_FLASH_OFF_SHIFT 9
++#define MRV_ISP_MIS_FLASH_ON
++#define MRV_ISP_MIS_FLASH_ON_MASK 0x00000100
++#define MRV_ISP_MIS_FLASH_ON_SHIFT 8
++
++#define MRV_ISP_MIS_H_START
++#define MRV_ISP_MIS_H_START_MASK 0x00000080
++#define MRV_ISP_MIS_H_START_SHIFT 7
++#define MRV_ISP_MIS_V_START
++#define MRV_ISP_MIS_V_START_MASK 0x00000040
++#define MRV_ISP_MIS_V_START_SHIFT 6
++#define MRV_ISP_MIS_FRAME_IN
++#define MRV_ISP_MIS_FRAME_IN_MASK 0x00000020
++#define MRV_ISP_MIS_FRAME_IN_SHIFT 5
++#define MRV_ISP_MIS_AWB_DONE
++#define MRV_ISP_MIS_AWB_DONE_MASK 0x00000010
++#define MRV_ISP_MIS_AWB_DONE_SHIFT 4
++#define MRV_ISP_MIS_PIC_SIZE_ERR
++#define MRV_ISP_MIS_PIC_SIZE_ERR_MASK 0x00000008
++#define MRV_ISP_MIS_PIC_SIZE_ERR_SHIFT 3
++#define MRV_ISP_MIS_DATA_LOSS
++#define MRV_ISP_MIS_DATA_LOSS_MASK 0x00000004
++#define MRV_ISP_MIS_DATA_LOSS_SHIFT 2
++#define MRV_ISP_MIS_FRAME
++#define MRV_ISP_MIS_FRAME_MASK 0x00000002
++#define MRV_ISP_MIS_FRAME_SHIFT 1
++#define MRV_ISP_MIS_ISP_OFF
++#define MRV_ISP_MIS_ISP_OFF_MASK 0x00000001
++#define MRV_ISP_MIS_ISP_OFF_SHIFT 0
++
++#define MRV_ISP_MIS_ALL
++#define MRV_ISP_MIS_ALL_MASK \
++(0 \
++| MRV_ISP_MIS_EXP_END_MASK \
++| MRV_ISP_MIS_FLASH_CAP_MASK \
++| MRV_ISP_MIS_BP_DET_MASK \
++| MRV_ISP_MIS_BP_NEW_TAB_FUL_MASK \
++| MRV_ISP_MIS_AFM_FIN_MASK \
++| MRV_ISP_MIS_AFM_LUM_OF_MASK \
++| MRV_ISP_MIS_AFM_SUM_OF_MASK \
++| MRV_ISP_MIS_SHUTTER_OFF_MASK \
++| MRV_ISP_MIS_SHUTTER_ON_MASK \
++| MRV_ISP_MIS_FLASH_OFF_MASK \
++| MRV_ISP_MIS_FLASH_ON_MASK \
++| MRV_ISP_MIS_H_START_MASK \
++| MRV_ISP_MIS_V_START_MASK \
++| MRV_ISP_MIS_FRAME_IN_MASK \
++| MRV_ISP_MIS_AWB_DONE_MASK \
++| MRV_ISP_MIS_PIC_SIZE_ERR_MASK \
++| MRV_ISP_MIS_DATA_LOSS_MASK \
++| MRV_ISP_MIS_FRAME_MASK \
++| MRV_ISP_MIS_ISP_OFF_MASK \
++)
++#define MRV_ISP_MIS_ALL_SHIFT 0
++
++#define MRV_ISP_ICR_EXP_END
++#define MRV_ISP_ICR_EXP_END_MASK 0x00040000
++#define MRV_ISP_ICR_EXP_END_SHIFT 18
++#define MRV_ISP_ICR_FLASH_CAP
++#define MRV_ISP_ICR_FLASH_CAP_MASK 0x00020000
++#define MRV_ISP_ICR_FLASH_CAP_SHIFT 17
++
++#define MRV_ISP_ICR_BP_DET
++#define MRV_ISP_ICR_BP_DET_MASK 0x00010000
++#define MRV_ISP_ICR_BP_DET_SHIFT 16
++#define MRV_ISP_ICR_BP_NEW_TAB_FUL
++#define MRV_ISP_ICR_BP_NEW_TAB_FUL_MASK 0x00008000
++#define MRV_ISP_ICR_BP_NEW_TAB_FUL_SHIFT 15
++#define MRV_ISP_ICR_AFM_FIN
++#define MRV_ISP_ICR_AFM_FIN_MASK 0x00004000
++#define MRV_ISP_ICR_AFM_FIN_SHIFT 14
++#define MRV_ISP_ICR_AFM_LUM_OF
++#define MRV_ISP_ICR_AFM_LUM_OF_MASK 0x00002000
++#define MRV_ISP_ICR_AFM_LUM_OF_SHIFT 13
++#define MRV_ISP_ICR_AFM_SUM_OF
++#define MRV_ISP_ICR_AFM_SUM_OF_MASK 0x00001000
++#define MRV_ISP_ICR_AFM_SUM_OF_SHIFT 12
++#define MRV_ISP_ICR_SHUTTER_OFF
++#define MRV_ISP_ICR_SHUTTER_OFF_MASK 0x00000800
++#define MRV_ISP_ICR_SHUTTER_OFF_SHIFT 11
++#define MRV_ISP_ICR_SHUTTER_ON
++#define MRV_ISP_ICR_SHUTTER_ON_MASK 0x00000400
++#define MRV_ISP_ICR_SHUTTER_ON_SHIFT 10
++#define MRV_ISP_ICR_FLASH_OFF
++#define MRV_ISP_ICR_FLASH_OFF_MASK 0x00000200
++#define MRV_ISP_ICR_FLASH_OFF_SHIFT 9
++#define MRV_ISP_ICR_FLASH_ON
++#define MRV_ISP_ICR_FLASH_ON_MASK 0x00000100
++#define MRV_ISP_ICR_FLASH_ON_SHIFT 8
++
++#define MRV_ISP_ICR_H_START
++#define MRV_ISP_ICR_H_START_MASK 0x00000080
++#define MRV_ISP_ICR_H_START_SHIFT 7
++#define MRV_ISP_ICR_V_START
++#define MRV_ISP_ICR_V_START_MASK 0x00000040
++#define MRV_ISP_ICR_V_START_SHIFT 6
++#define MRV_ISP_ICR_FRAME_IN
++#define MRV_ISP_ICR_FRAME_IN_MASK 0x00000020
++#define MRV_ISP_ICR_FRAME_IN_SHIFT 5
++#define MRV_ISP_ICR_AWB_DONE
++#define MRV_ISP_ICR_AWB_DONE_MASK 0x00000010
++#define MRV_ISP_ICR_AWB_DONE_SHIFT 4
++#define MRV_ISP_ICR_PIC_SIZE_ERR
++#define MRV_ISP_ICR_PIC_SIZE_ERR_MASK 0x00000008
++#define MRV_ISP_ICR_PIC_SIZE_ERR_SHIFT 3
++#define MRV_ISP_ICR_DATA_LOSS
++#define MRV_ISP_ICR_DATA_LOSS_MASK 0x00000004
++#define MRV_ISP_ICR_DATA_LOSS_SHIFT 2
++#define MRV_ISP_ICR_FRAME
++#define MRV_ISP_ICR_FRAME_MASK 0x00000002
++#define MRV_ISP_ICR_FRAME_SHIFT 1
++#define MRV_ISP_ICR_ISP_OFF
++#define MRV_ISP_ICR_ISP_OFF_MASK 0x00000001
++#define MRV_ISP_ICR_ISP_OFF_SHIFT 0
++
++#define MRV_ISP_ICR_ALL
++#define MRV_ISP_ICR_ALL_MASK \
++(0 \
++| MRV_ISP_ICR_EXP_END_MASK \
++| MRV_ISP_ICR_FLASH_CAP_MASK \
++| MRV_ISP_ICR_BP_DET_MASK \
++| MRV_ISP_ICR_BP_NEW_TAB_FUL_MASK \
++| MRV_ISP_ICR_AFM_FIN_MASK \
++| MRV_ISP_ICR_AFM_LUM_OF_MASK \
++| MRV_ISP_ICR_AFM_SUM_OF_MASK \
++| MRV_ISP_ICR_SHUTTER_OFF_MASK \
++| MRV_ISP_ICR_SHUTTER_ON_MASK \
++| MRV_ISP_ICR_FLASH_OFF_MASK \
++| MRV_ISP_ICR_FLASH_ON_MASK \
++| MRV_ISP_ICR_H_START_MASK \
++| MRV_ISP_ICR_V_START_MASK \
++| MRV_ISP_ICR_FRAME_IN_MASK \
++| MRV_ISP_ICR_AWB_DONE_MASK \
++| MRV_ISP_ICR_PIC_SIZE_ERR_MASK \
++| MRV_ISP_ICR_DATA_LOSS_MASK \
++| MRV_ISP_ICR_FRAME_MASK \
++| MRV_ISP_ICR_ISP_OFF_MASK \
++)
++#define MRV_ISP_ICR_ALL_SHIFT 0
++
++#define MRV_ISP_ISR_EXP_END
++#define MRV_ISP_ISR_EXP_END_MASK 0x00040000
++#define MRV_ISP_ISR_EXP_END_SHIFT 18
++#define MRV_ISP_ISR_FLASH_CAP
++#define MRV_ISP_ISR_FLASH_CAP_MASK 0x00020000
++#define MRV_ISP_ISR_FLASH_CAP_SHIFT 17
++
++#define MRV_ISP_ISR_BP_DET
++#define MRV_ISP_ISR_BP_DET_MASK 0x00010000
++#define MRV_ISP_ISR_BP_DET_SHIFT 16
++#define MRV_ISP_ISR_BP_NEW_TAB_FUL
++#define MRV_ISP_ISR_BP_NEW_TAB_FUL_MASK 0x00008000
++#define MRV_ISP_ISR_BP_NEW_TAB_FUL_SHIFT 15
++#define MRV_ISP_ISR_AFM_FIN
++#define MRV_ISP_ISR_AFM_FIN_MASK 0x00004000
++#define MRV_ISP_ISR_AFM_FIN_SHIFT 14
++#define MRV_ISP_ISR_AFM_LUM_OF
++#define MRV_ISP_ISR_AFM_LUM_OF_MASK 0x00002000
++#define MRV_ISP_ISR_AFM_LUM_OF_SHIFT 13
++#define MRV_ISP_ISR_AFM_SUM_OF
++#define MRV_ISP_ISR_AFM_SUM_OF_MASK 0x00001000
++#define MRV_ISP_ISR_AFM_SUM_OF_SHIFT 12
++#define MRV_ISP_ISR_SHUTTER_OFF
++#define MRV_ISP_ISR_SHUTTER_OFF_MASK 0x00000800
++#define MRV_ISP_ISR_SHUTTER_OFF_SHIFT 11
++#define MRV_ISP_ISR_SHUTTER_ON
++#define MRV_ISP_ISR_SHUTTER_ON_MASK 0x00000400
++#define MRV_ISP_ISR_SHUTTER_ON_SHIFT 10
++#define MRV_ISP_ISR_FLASH_OFF
++#define MRV_ISP_ISR_FLASH_OFF_MASK 0x00000200
++#define MRV_ISP_ISR_FLASH_OFF_SHIFT 9
++#define MRV_ISP_ISR_FLASH_ON
++#define MRV_ISP_ISR_FLASH_ON_MASK 0x00000100
++#define MRV_ISP_ISR_FLASH_ON_SHIFT 8
++
++#define MRV_ISP_ISR_H_START
++#define MRV_ISP_ISR_H_START_MASK 0x00000080
++#define MRV_ISP_ISR_H_START_SHIFT 7
++#define MRV_ISP_ISR_V_START
++#define MRV_ISP_ISR_V_START_MASK 0x00000040
++#define MRV_ISP_ISR_V_START_SHIFT 6
++#define MRV_ISP_ISR_FRAME_IN
++#define MRV_ISP_ISR_FRAME_IN_MASK 0x00000020
++#define MRV_ISP_ISR_FRAME_IN_SHIFT 5
++#define MRV_ISP_ISR_AWB_DONE
++#define MRV_ISP_ISR_AWB_DONE_MASK 0x00000010
++#define MRV_ISP_ISR_AWB_DONE_SHIFT 4
++#define MRV_ISP_ISR_PIC_SIZE_ERR
++#define MRV_ISP_ISR_PIC_SIZE_ERR_MASK 0x00000008
++#define MRV_ISP_ISR_PIC_SIZE_ERR_SHIFT 3
++#define MRV_ISP_ISR_DATA_LOSS
++#define MRV_ISP_ISR_DATA_LOSS_MASK 0x00000004
++#define MRV_ISP_ISR_DATA_LOSS_SHIFT 2
++#define MRV_ISP_ISR_FRAME
++#define MRV_ISP_ISR_FRAME_MASK 0x00000002
++#define MRV_ISP_ISR_FRAME_SHIFT 1
++#define MRV_ISP_ISR_ISP_OFF
++#define MRV_ISP_ISR_ISP_OFF_MASK 0x00000001
++#define MRV_ISP_ISR_ISP_OFF_SHIFT 0
++
++#define MRV_ISP_ISR_ALL
++#define MRV_ISP_ISR_ALL_MASK \
++(0 \
++| MRV_ISP_ISR_EXP_END_MASK \
++| MRV_ISP_ISR_FLASH_CAP_MASK \
++| MRV_ISP_ISR_BP_DET_MASK \
++| MRV_ISP_ISR_BP_NEW_TAB_FUL_MASK \
++| MRV_ISP_ISR_AFM_FIN_MASK \
++| MRV_ISP_ISR_AFM_LUM_OF_MASK \
++| MRV_ISP_ISR_AFM_SUM_OF_MASK \
++| MRV_ISP_ISR_SHUTTER_OFF_MASK \
++| MRV_ISP_ISR_SHUTTER_ON_MASK \
++| MRV_ISP_ISR_FLASH_OFF_MASK \
++| MRV_ISP_ISR_FLASH_ON_MASK \
++| MRV_ISP_ISR_H_START_MASK \
++| MRV_ISP_ISR_V_START_MASK \
++| MRV_ISP_ISR_FRAME_IN_MASK \
++| MRV_ISP_ISR_AWB_DONE_MASK \
++| MRV_ISP_ISR_PIC_SIZE_ERR_MASK \
++| MRV_ISP_ISR_DATA_LOSS_MASK \
++| MRV_ISP_ISR_FRAME_MASK \
++| MRV_ISP_ISR_ISP_OFF_MASK \
++)
++#define MRV_ISP_ISR_ALL_SHIFT 0
++
++#define MRV_ISP_CT_COEFF
++#define MRV_ISP_CT_COEFF_MASK 0x000007FF
++#define MRV_ISP_CT_COEFF_SHIFT 0
++#define MRV_ISP_CT_COEFF_MAX (MRV_ISP_CT_COEFF_MASK >> MRV_ISP_CT_COEFF_SHIFT)
++
++#define MRV_ISP_EQU_SEGM
++#define MRV_ISP_EQU_SEGM_MASK 0x00000001
++#define MRV_ISP_EQU_SEGM_SHIFT 0
++#define MRV_ISP_EQU_SEGM_LOG 0
++#define MRV_ISP_EQU_SEGM_EQU 1
++
++#define MRV_ISP_ISP_GAMMA_OUT_Y
++#define MRV_ISP_ISP_GAMMA_OUT_Y_MASK 0x000003FF
++#define MRV_ISP_ISP_GAMMA_OUT_Y_SHIFT 0
++
++#define MRV_ISP_OUTFORM_SIZE_ERR
++#define MRV_ISP_OUTFORM_SIZE_ERR_MASK 0x00000004
++#define MRV_ISP_OUTFORM_SIZE_ERR_SHIFT 2
++#define MRV_ISP_IS_SIZE_ERR
++#define MRV_ISP_IS_SIZE_ERR_MASK 0x00000002
++#define MRV_ISP_IS_SIZE_ERR_SHIFT 1
++#define MRV_ISP_INFORM_SIZE_ERR
++#define MRV_ISP_INFORM_SIZE_ERR_MASK 0x00000001
++#define MRV_ISP_INFORM_SIZE_ERR_SHIFT 0
++
++#define MRV_ISP_ALL_ERR
++#define MRV_ISP_ALL_ERR_MASK \
++(0 \
++| MRV_ISP_OUTFORM_SIZE_ERR_MASK \
++| MRV_ISP_IS_SIZE_ERR_MASK \
++| MRV_ISP_INFORM_SIZE_ERR_MASK \
++)
++#define MRV_ISP_ALL_ERR_SHIFT 0
++
++#define MRV_ISP_OUTFORM_SIZE_ERR_CLR
++#define MRV_ISP_OUTFORM_SIZE_ERR_CLR_MASK 0x00000004
++#define MRV_ISP_OUTFORM_SIZE_ERR_CLR_SHIFT 2
++#define MRV_ISP_IS_SIZE_ERR_CLR
++#define MRV_ISP_IS_SIZE_ERR_CLR_MASK 0x00000002
++#define MRV_ISP_IS_SIZE_ERR_CLR_SHIFT 1
++#define MRV_ISP_INFORM_SIZE_ERR_CLR
++#define MRV_ISP_INFORM_SIZE_ERR_CLR_MASK 0x00000001
++#define MRV_ISP_INFORM_SIZE_ERR_CLR_SHIFT 0
++
++
++#define MRV_ISP_FRAME_COUNTER
++#define MRV_ISP_FRAME_COUNTER_MASK 0x000003FF
++#define MRV_ISP_FRAME_COUNTER_SHIFT 0
++
++#define MRV_ISP_CT_OFFSET_R
++#define MRV_ISP_CT_OFFSET_R_MASK 0x00000FFF
++#define MRV_ISP_CT_OFFSET_R_SHIFT 0
++
++#define MRV_ISP_CT_OFFSET_G
++#define MRV_ISP_CT_OFFSET_G_MASK 0x00000FFF
++#define MRV_ISP_CT_OFFSET_G_SHIFT 0
++
++#define MRV_ISP_CT_OFFSET_B
++#define MRV_ISP_CT_OFFSET_B_MASK 0x00000FFF
++#define MRV_ISP_CT_OFFSET_B_SHIFT 0
++
++
++#define MRV_FLASH_PREFLASH_ON
++#define MRV_FLASH_PREFLASH_ON_MASK 0x00000004
++#define MRV_FLASH_PREFLASH_ON_SHIFT 2
++#define MRV_FLASH_FLASH_ON
++#define MRV_FLASH_FLASH_ON_MASK 0x00000002
++#define MRV_FLASH_FLASH_ON_SHIFT 1
++#define MRV_FLASH_PRELIGHT_ON
++#define MRV_FLASH_PRELIGHT_ON_MASK 0x00000001
++#define MRV_FLASH_PRELIGHT_ON_SHIFT 0
++
++#define MRV_FLASH_FL_CAP_DEL
++#define MRV_FLASH_FL_CAP_DEL_MASK 0x000000F0
++#define MRV_FLASH_FL_CAP_DEL_SHIFT 4
++#define MRV_FLASH_FL_CAP_DEL_MAX \
++ (MRV_FLASH_FL_CAP_DEL_MASK >> MRV_FLASH_FL_CAP_DEL_SHIFT)
++#define MRV_FLASH_FL_TRIG_SRC
++#define MRV_FLASH_FL_TRIG_SRC_MASK 0x00000008
++#define MRV_FLASH_FL_TRIG_SRC_SHIFT 3
++#define MRV_FLASH_FL_TRIG_SRC_VDS 0
++#define MRV_FLASH_FL_TRIG_SRC_FL 1
++#define MRV_FLASH_FL_POL
++#define MRV_FLASH_FL_POL_MASK 0x00000004
++#define MRV_FLASH_FL_POL_SHIFT 2
++#define MRV_FLASH_FL_POL_HIGH 0
++#define MRV_FLASH_FL_POL_LOW 1
++#define MRV_FLASH_VS_IN_EDGE
++#define MRV_FLASH_VS_IN_EDGE_MASK 0x00000002
++#define MRV_FLASH_VS_IN_EDGE_SHIFT 1
++#define MRV_FLASH_VS_IN_EDGE_NEG 0
++#define MRV_FLASH_VS_IN_EDGE_POS 1
++#define MRV_FLASH_PRELIGHT_MODE
++#define MRV_FLASH_PRELIGHT_MODE_MASK 0x00000001
++#define MRV_FLASH_PRELIGHT_MODE_SHIFT 0
++#define MRV_FLASH_PRELIGHT_MODE_OASF 0
++#define MRV_FLASH_PRELIGHT_MODE_OAEF 1
++
++#define MRV_FLASH_FL_PRE_DIV
++#define MRV_FLASH_FL_PRE_DIV_MASK 0x000003FF
++#define MRV_FLASH_FL_PRE_DIV_SHIFT 0
++#define MRV_FLASH_FL_PRE_DIV_MAX \
++ (MRV_FLASH_FL_PRE_DIV_MASK >> MRV_FLASH_FL_PRE_DIV_SHIFT)
++
++#define MRV_FLASH_FL_DELAY
++#define MRV_FLASH_FL_DELAY_MASK 0x0003FFFF
++#define MRV_FLASH_FL_DELAY_SHIFT 0
++#define MRV_FLASH_FL_DELAY_MAX \
++ (MRV_FLASH_FL_DELAY_MASK >> MRV_FLASH_FL_DELAY_SHIFT)
++
++#define MRV_FLASH_FL_TIME
++#define MRV_FLASH_FL_TIME_MASK 0x0003FFFF
++#define MRV_FLASH_FL_TIME_SHIFT 0
++#define MRV_FLASH_FL_TIME_MAX \
++ (MRV_FLASH_FL_TIME_MASK >> MRV_FLASH_FL_TIME_SHIFT)
++
++#define MRV_FLASH_FL_MAXP
++#define MRV_FLASH_FL_MAXP_MASK 0x0000FFFF
++#define MRV_FLASH_FL_MAXP_SHIFT 0
++#define MRV_FLASH_FL_MAXP_MAX \
++ (MRV_FLASH_FL_MAXP_MASK >> MRV_FLASH_FL_MAXP_SHIFT)
++
++#define MRV_SHUT_SH_OPEN_POL
++#define MRV_SHUT_SH_OPEN_POL_MASK 0x00000010
++#define MRV_SHUT_SH_OPEN_POL_SHIFT 4
++#define MRV_SHUT_SH_OPEN_POL_HIGH 0
++#define MRV_SHUT_SH_OPEN_POL_LOW 1
++#define MRV_SHUT_SH_TRIG_EN
++#define MRV_SHUT_SH_TRIG_EN_MASK 0x00000008
++#define MRV_SHUT_SH_TRIG_EN_SHIFT 3
++#define MRV_SHUT_SH_TRIG_EN_NEG 0
++#define MRV_SHUT_SH_TRIG_EN_POS 1
++#define MRV_SHUT_SH_TRIG_SRC
++#define MRV_SHUT_SH_TRIG_SRC_MASK 0x00000004
++#define MRV_SHUT_SH_TRIG_SRC_SHIFT 2
++#define MRV_SHUT_SH_TRIG_SRC_VDS 0
++#define MRV_SHUT_SH_TRIG_SRC_SHUT 1
++#define MRV_SHUT_SH_REP_EN
++#define MRV_SHUT_SH_REP_EN_MASK 0x00000002
++#define MRV_SHUT_SH_REP_EN_SHIFT 1
++#define MRV_SHUT_SH_REP_EN_ONCE 0
++#define MRV_SHUT_SH_REP_EN_REP 1
++#define MRV_SHUT_SH_EN
++#define MRV_SHUT_SH_EN_MASK 0x00000001
++#define MRV_SHUT_SH_EN_SHIFT 0
++
++#define MRV_SHUT_SH_PRE_DIV
++#define MRV_SHUT_SH_PRE_DIV_MASK 0x000003FF
++#define MRV_SHUT_SH_PRE_DIV_SHIFT 0
++#define MRV_SHUT_SH_PRE_DIV_MAX \
++ (MRV_SHUT_SH_PRE_DIV_MASK >> MRV_SHUT_SH_PRE_DIV_SHIFT)
++
++#define MRV_SHUT_SH_DELAY
++#define MRV_SHUT_SH_DELAY_MASK 0x000FFFFF
++#define MRV_SHUT_SH_DELAY_SHIFT 0
++#define MRV_SHUT_SH_DELAY_MAX \
++ (MRV_SHUT_SH_DELAY_MASK >> MRV_SHUT_SH_DELAY_SHIFT)
++
++#define MRV_SHUT_SH_TIME
++#define MRV_SHUT_SH_TIME_MASK 0x000FFFFF
++#define MRV_SHUT_SH_TIME_SHIFT 0
++#define MRV_SHUT_SH_TIME_MAX (MRV_SHUT_SH_TIME_MASK >> MRV_SHUT_SH_TIME_SHIFT)
++
++#define MRV_CPROC_CPROC_C_OUT_RANGE
++#define MRV_CPROC_CPROC_C_OUT_RANGE_MASK 0x00000008
++#define MRV_CPROC_CPROC_C_OUT_RANGE_SHIFT 3
++#define MRV_CPROC_CPROC_C_OUT_RANGE_BT601 0
++#define MRV_CPROC_CPROC_C_OUT_RANGE_FULL 1
++#define MRV_CPROC_CPROC_Y_IN_RANGE
++#define MRV_CPROC_CPROC_Y_IN_RANGE_MASK 0x00000004
++#define MRV_CPROC_CPROC_Y_IN_RANGE_SHIFT 2
++#define MRV_CPROC_CPROC_Y_IN_RANGE_BT601 0
++#define MRV_CPROC_CPROC_Y_IN_RANGE_FULL 1
++#define MRV_CPROC_CPROC_Y_OUT_RANGE
++#define MRV_CPROC_CPROC_Y_OUT_RANGE_MASK 0x00000002
++#define MRV_CPROC_CPROC_Y_OUT_RANGE_SHIFT 1
++#define MRV_CPROC_CPROC_Y_OUT_RANGE_BT601 0
++#define MRV_CPROC_CPROC_Y_OUT_RANGE_FULL 1
++#define MRV_CPROC_CPROC_ENABLE
++#define MRV_CPROC_CPROC_ENABLE_MASK 0x00000001
++#define MRV_CPROC_CPROC_ENABLE_SHIFT 0
++
++#define MRV_CPROC_CPROC_CONTRAST
++#define MRV_CPROC_CPROC_CONTRAST_MASK 0x000000FF
++#define MRV_CPROC_CPROC_CONTRAST_SHIFT 0
++
++#define MRV_CPROC_CPROC_BRIGHTNESS
++#define MRV_CPROC_CPROC_BRIGHTNESS_MASK 0x000000FF
++#define MRV_CPROC_CPROC_BRIGHTNESS_SHIFT 0
++
++#define MRV_CPROC_CPROC_SATURATION
++#define MRV_CPROC_CPROC_SATURATION_MASK 0x000000FF
++#define MRV_CPROC_CPROC_SATURATION_SHIFT 0
++
++#define MRV_CPROC_CPROC_HUE
++#define MRV_CPROC_CPROC_HUE_MASK 0x000000FF
++#define MRV_CPROC_CPROC_HUE_SHIFT 0
++
++#define MRV_RSZ_SCALE
++
++/* resize scale mask depends on IP core resvion ID */
++#define MRV_RSZ_SCALE_MASK (mrv_rsz_scale_mask) /*0x00003FFF*/
++
++#define MRV_RSZ_SCALE_SHIFT 0
++#define MRV_RSZ_SCALE_MAX (MRV_RSZ_SCALE_MASK >> MRV_RSZ_SCALE_SHIFT)
++
++
++
++#define MRV_MRSZ_CFG_UPD
++#define MRV_MRSZ_CFG_UPD_MASK 0x00000100
++#define MRV_MRSZ_CFG_UPD_SHIFT 8
++#define MRV_MRSZ_SCALE_VC_UP
++#define MRV_MRSZ_SCALE_VC_UP_MASK 0x00000080
++#define MRV_MRSZ_SCALE_VC_UP_SHIFT 7
++#define MRV_MRSZ_SCALE_VC_UP_UPSCALE 1
++#define MRV_MRSZ_SCALE_VC_UP_DOWNSCALE 0
++#define MRV_MRSZ_SCALE_VY_UP
++#define MRV_MRSZ_SCALE_VY_UP_MASK 0x00000040
++#define MRV_MRSZ_SCALE_VY_UP_SHIFT 6
++#define MRV_MRSZ_SCALE_VY_UP_UPSCALE 1
++#define MRV_MRSZ_SCALE_VY_UP_DOWNSCALE 0
++#define MRV_MRSZ_SCALE_HC_UP
++#define MRV_MRSZ_SCALE_HC_UP_MASK 0x00000020
++#define MRV_MRSZ_SCALE_HC_UP_SHIFT 5
++#define MRV_MRSZ_SCALE_HC_UP_UPSCALE 1
++#define MRV_MRSZ_SCALE_HC_UP_DOWNSCALE 0
++#define MRV_MRSZ_SCALE_HY_UP
++#define MRV_MRSZ_SCALE_HY_UP_MASK 0x00000010
++#define MRV_MRSZ_SCALE_HY_UP_SHIFT 4
++#define MRV_MRSZ_SCALE_HY_UP_UPSCALE 1
++#define MRV_MRSZ_SCALE_HY_UP_DOWNSCALE 0
++#define MRV_MRSZ_SCALE_VC_ENABLE
++#define MRV_MRSZ_SCALE_VC_ENABLE_MASK 0x00000008
++#define MRV_MRSZ_SCALE_VC_ENABLE_SHIFT 3
++#define MRV_MRSZ_SCALE_VY_ENABLE
++#define MRV_MRSZ_SCALE_VY_ENABLE_MASK 0x00000004
++#define MRV_MRSZ_SCALE_VY_ENABLE_SHIFT 2
++#define MRV_MRSZ_SCALE_HC_ENABLE
++#define MRV_MRSZ_SCALE_HC_ENABLE_MASK 0x00000002
++#define MRV_MRSZ_SCALE_HC_ENABLE_SHIFT 1
++#define MRV_MRSZ_SCALE_HY_ENABLE
++#define MRV_MRSZ_SCALE_HY_ENABLE_MASK 0x00000001
++#define MRV_MRSZ_SCALE_HY_ENABLE_SHIFT 0
++
++#define MRV_MRSZ_SCALE_HY
++#define MRV_MRSZ_SCALE_HY_MASK MRV_RSZ_SCALE_MASK
++#define MRV_MRSZ_SCALE_HY_SHIFT MRV_RSZ_SCALE_SHIFT
++
++#define MRV_MRSZ_SCALE_HCB
++#define MRV_MRSZ_SCALE_HCB_MASK MRV_RSZ_SCALE_MASK
++#define MRV_MRSZ_SCALE_HCB_SHIFT MRV_RSZ_SCALE_SHIFT
++
++#define MRV_MRSZ_SCALE_HCR
++#define MRV_MRSZ_SCALE_HCR_MASK MRV_RSZ_SCALE_MASK
++#define MRV_MRSZ_SCALE_HCR_SHIFT MRV_RSZ_SCALE_SHIFT
++
++#define MRV_MRSZ_SCALE_VY
++#define MRV_MRSZ_SCALE_VY_MASK MRV_RSZ_SCALE_MASK
++#define MRV_MRSZ_SCALE_VY_SHIFT MRV_RSZ_SCALE_SHIFT
++
++#define MRV_MRSZ_SCALE_VC
++#define MRV_MRSZ_SCALE_VC_MASK MRV_RSZ_SCALE_MASK
++#define MRV_MRSZ_SCALE_VC_SHIFT MRV_RSZ_SCALE_SHIFT
++
++
++#define MRV_MRSZ_PHASE_HY
++#define MRV_MRSZ_PHASE_HY_MASK MRV_RSZ_SCALE_MASK
++#define MRV_MRSZ_PHASE_HY_SHIFT MRV_RSZ_SCALE_SHIFT
++
++
++#define MRV_MRSZ_PHASE_HC
++#define MRV_MRSZ_PHASE_HC_MASK MRV_RSZ_SCALE_MASK
++#define MRV_MRSZ_PHASE_HC_SHIFT MRV_RSZ_SCALE_SHIFT
++
++
++#define MRV_MRSZ_PHASE_VY
++#define MRV_MRSZ_PHASE_VY_MASK MRV_RSZ_SCALE_MASK
++#define MRV_MRSZ_PHASE_VY_SHIFT MRV_RSZ_SCALE_SHIFT
++
++
++#define MRV_MRSZ_PHASE_VC
++#define MRV_MRSZ_PHASE_VC_MASK MRV_RSZ_SCALE_MASK
++#define MRV_MRSZ_PHASE_VC_SHIFT MRV_RSZ_SCALE_SHIFT
++
++
++#define MRV_MRSZ_SCALE_LUT_ADDR
++#define MRV_MRSZ_SCALE_LUT_ADDR_MASK 0x0000003F
++#define MRV_MRSZ_SCALE_LUT_ADDR_SHIFT 0
++
++
++#define MRV_MRSZ_SCALE_LUT
++#define MRV_MRSZ_SCALE_LUT_MASK 0x0000003F
++#define MRV_MRSZ_SCALE_LUT_SHIFT 0
++
++
++#define MRV_MRSZ_SCALE_VC_UP_SHD
++#define MRV_MRSZ_SCALE_VC_UP_SHD_MASK 0x00000080
++#define MRV_MRSZ_SCALE_VC_UP_SHD_SHIFT 7
++#define MRV_MRSZ_SCALE_VC_UP_SHD_UPSCALE 1
++#define MRV_MRSZ_SCALE_VC_UP_SHD_DOWNSCALE 0
++#define MRV_MRSZ_SCALE_VY_UP_SHD
++#define MRV_MRSZ_SCALE_VY_UP_SHD_MASK 0x00000040
++#define MRV_MRSZ_SCALE_VY_UP_SHD_SHIFT 6
++#define MRV_MRSZ_SCALE_VY_UP_SHD_UPSCALE 1
++#define MRV_MRSZ_SCALE_VY_UP_SHD_DOWNSCALE 0
++#define MRV_MRSZ_SCALE_HC_UP_SHD
++#define MRV_MRSZ_SCALE_HC_UP_SHD_MASK 0x00000020
++#define MRV_MRSZ_SCALE_HC_UP_SHD_SHIFT 5
++#define MRV_MRSZ_SCALE_HC_UP_SHD_UPSCALE 1
++#define MRV_MRSZ_SCALE_HC_UP_SHD_DOWNSCALE 0
++#define MRV_MRSZ_SCALE_HY_UP_SHD
++#define MRV_MRSZ_SCALE_HY_UP_SHD_MASK 0x00000010
++#define MRV_MRSZ_SCALE_HY_UP_SHD_SHIFT 4
++#define MRV_MRSZ_SCALE_HY_UP_SHD_UPSCALE 1
++#define MRV_MRSZ_SCALE_HY_UP_SHD_DOWNSCALE 0
++#define MRV_MRSZ_SCALE_VC_ENABLE_SHD
++#define MRV_MRSZ_SCALE_VC_ENABLE_SHD_MASK 0x00000008
++#define MRV_MRSZ_SCALE_VC_ENABLE_SHD_SHIFT 3
++#define MRV_MRSZ_SCALE_VY_ENABLE_SHD
++#define MRV_MRSZ_SCALE_VY_ENABLE_SHD_MASK 0x00000004
++#define MRV_MRSZ_SCALE_VY_ENABLE_SHD_SHIFT 2
++#define MRV_MRSZ_SCALE_HC_ENABLE_SHD
++#define MRV_MRSZ_SCALE_HC_ENABLE_SHD_MASK 0x00000002
++#define MRV_MRSZ_SCALE_HC_ENABLE_SHD_SHIFT 1
++#define MRV_MRSZ_SCALE_HY_ENABLE_SHD
++#define MRV_MRSZ_SCALE_HY_ENABLE_SHD_MASK 0x00000001
++#define MRV_MRSZ_SCALE_HY_ENABLE_SHD_SHIFT 0
++
++#define MRV_MRSZ_SCALE_HY_SHD
++#define MRV_MRSZ_SCALE_HY_SHD_MASK MRV_RSZ_SCALE_MASK
++#define MRV_MRSZ_SCALE_HY_SHD_SHIFT MRV_RSZ_SCALE_SHIFT
++
++#define MRV_MRSZ_SCALE_HCB_SHD
++#define MRV_MRSZ_SCALE_HCB_SHD_MASK MRV_RSZ_SCALE_MASK
++#define MRV_MRSZ_SCALE_HCB_SHD_SHIFT MRV_RSZ_SCALE_SHIFT
++
++#define MRV_MRSZ_SCALE_HCR_SHD
++#define MRV_MRSZ_SCALE_HCR_SHD_MASK MRV_RSZ_SCALE_MASK
++#define MRV_MRSZ_SCALE_HCR_SHD_SHIFT MRV_RSZ_SCALE_SHIFT
++
++
++#define MRV_MRSZ_SCALE_VY_SHD
++#define MRV_MRSZ_SCALE_VY_SHD_MASK MRV_RSZ_SCALE_MASK
++#define MRV_MRSZ_SCALE_VY_SHD_SHIFT MRV_RSZ_SCALE_SHIFT
++
++#define MRV_MRSZ_SCALE_VC_SHD
++#define MRV_MRSZ_SCALE_VC_SHD_MASK MRV_RSZ_SCALE_MASK
++#define MRV_MRSZ_SCALE_VC_SHD_SHIFT MRV_RSZ_SCALE_SHIFT
++
++#define MRV_MRSZ_PHASE_HY_SHD
++#define MRV_MRSZ_PHASE_HY_SHD_MASK MRV_RSZ_SCALE_MASK
++#define MRV_MRSZ_PHASE_HY_SHD_SHIFT MRV_RSZ_SCALE_SHIFT
++
++#define MRV_MRSZ_PHASE_HC_SHD
++#define MRV_MRSZ_PHASE_HC_SHD_MASK MRV_RSZ_SCALE_MASK
++#define MRV_MRSZ_PHASE_HC_SHD_SHIFT MRV_RSZ_SCALE_SHIFT
++
++#define MRV_MRSZ_PHASE_VY_SHD
++#define MRV_MRSZ_PHASE_VY_SHD_MASK MRV_RSZ_SCALE_MASK
++#define MRV_MRSZ_PHASE_VY_SHD_SHIFT MRV_RSZ_SCALE_SHIFT
++
++#define MRV_MRSZ_PHASE_VC_SHD
++#define MRV_MRSZ_PHASE_VC_SHD_MASK MRV_RSZ_SCALE_MASK
++#define MRV_MRSZ_PHASE_VC_SHD_SHIFT MRV_RSZ_SCALE_SHIFT
++
++#define MRV_SRSZ_CFG_UPD
++#define MRV_SRSZ_CFG_UPD_MASK 0x00000100
++#define MRV_SRSZ_CFG_UPD_SHIFT 8
++#define MRV_SRSZ_SCALE_VC_UP
++#define MRV_SRSZ_SCALE_VC_UP_MASK 0x00000080
++#define MRV_SRSZ_SCALE_VC_UP_SHIFT 7
++#define MRV_SRSZ_SCALE_VC_UP_UPSCALE 1
++#define MRV_SRSZ_SCALE_VC_UP_DOWNSCALE 0
++#define MRV_SRSZ_SCALE_VY_UP
++#define MRV_SRSZ_SCALE_VY_UP_MASK 0x00000040
++#define MRV_SRSZ_SCALE_VY_UP_SHIFT 6
++#define MRV_SRSZ_SCALE_VY_UP_UPSCALE 1
++#define MRV_SRSZ_SCALE_VY_UP_DOWNSCALE 0
++#define MRV_SRSZ_SCALE_HC_UP
++#define MRV_SRSZ_SCALE_HC_UP_MASK 0x00000020
++#define MRV_SRSZ_SCALE_HC_UP_SHIFT 5
++#define MRV_SRSZ_SCALE_HC_UP_UPSCALE 1
++#define MRV_SRSZ_SCALE_HC_UP_DOWNSCALE 0
++#define MRV_SRSZ_SCALE_HY_UP
++#define MRV_SRSZ_SCALE_HY_UP_MASK 0x00000010
++#define MRV_SRSZ_SCALE_HY_UP_SHIFT 4
++#define MRV_SRSZ_SCALE_HY_UP_UPSCALE 1
++#define MRV_SRSZ_SCALE_HY_UP_DOWNSCALE 0
++
++#define MRV_SRSZ_SCALE_VC_ENABLE
++#define MRV_SRSZ_SCALE_VC_ENABLE_MASK 0x00000008
++#define MRV_SRSZ_SCALE_VC_ENABLE_SHIFT 3
++#define MRV_SRSZ_SCALE_VY_ENABLE
++#define MRV_SRSZ_SCALE_VY_ENABLE_MASK 0x00000004
++#define MRV_SRSZ_SCALE_VY_ENABLE_SHIFT 2
++#define MRV_SRSZ_SCALE_HC_ENABLE
++#define MRV_SRSZ_SCALE_HC_ENABLE_MASK 0x00000002
++#define MRV_SRSZ_SCALE_HC_ENABLE_SHIFT 1
++#define MRV_SRSZ_SCALE_HY_ENABLE
++#define MRV_SRSZ_SCALE_HY_ENABLE_MASK 0x00000001
++#define MRV_SRSZ_SCALE_HY_ENABLE_SHIFT 0
++
++#define MRV_SRSZ_SCALE_HY
++#define MRV_SRSZ_SCALE_HY_MASK MRV_RSZ_SCALE_MASK
++#define MRV_SRSZ_SCALE_HY_SHIFT MRV_RSZ_SCALE_SHIFT
++
++#define MRV_SRSZ_SCALE_HCB
++#define MRV_SRSZ_SCALE_HCB_MASK MRV_RSZ_SCALE_MASK
++#define MRV_SRSZ_SCALE_HCB_SHIFT MRV_RSZ_SCALE_SHIFT
++
++#define MRV_SRSZ_SCALE_HCR
++#define MRV_SRSZ_SCALE_HCR_MASK MRV_RSZ_SCALE_MASK
++#define MRV_SRSZ_SCALE_HCR_SHIFT MRV_RSZ_SCALE_SHIFT
++
++
++#define MRV_SRSZ_SCALE_VY
++#define MRV_SRSZ_SCALE_VY_MASK MRV_RSZ_SCALE_MASK
++#define MRV_SRSZ_SCALE_VY_SHIFT MRV_RSZ_SCALE_SHIFT
++
++#define MRV_SRSZ_SCALE_VC
++#define MRV_SRSZ_SCALE_VC_MASK MRV_RSZ_SCALE_MASK
++#define MRV_SRSZ_SCALE_VC_SHIFT MRV_RSZ_SCALE_SHIFT
++
++#define MRV_SRSZ_PHASE_HY
++#define MRV_SRSZ_PHASE_HY_MASK MRV_RSZ_SCALE_MASK
++#define MRV_SRSZ_PHASE_HY_SHIFT MRV_RSZ_SCALE_SHIFT
++
++#define MRV_SRSZ_PHASE_HC
++#define MRV_SRSZ_PHASE_HC_MASK MRV_RSZ_SCALE_MASK
++#define MRV_SRSZ_PHASE_HC_SHIFT MRV_RSZ_SCALE_SHIFT
++
++#define MRV_SRSZ_PHASE_VY
++#define MRV_SRSZ_PHASE_VY_MASK MRV_RSZ_SCALE_MASK
++#define MRV_SRSZ_PHASE_VY_SHIFT MRV_RSZ_SCALE_SHIFT
++
++#define MRV_SRSZ_PHASE_VC
++#define MRV_SRSZ_PHASE_VC_MASK MRV_RSZ_SCALE_MASK
++#define MRV_SRSZ_PHASE_VC_SHIFT MRV_RSZ_SCALE_SHIFT
++
++#define MRV_SRSZ_SCALE_LUT_ADDR
++#define MRV_SRSZ_SCALE_LUT_ADDR_MASK 0x0000003F
++#define MRV_SRSZ_SCALE_LUT_ADDR_SHIFT 0
++
++
++#define MRV_SRSZ_SCALE_LUT
++#define MRV_SRSZ_SCALE_LUT_MASK 0x0000003F
++#define MRV_SRSZ_SCALE_LUT_SHIFT 0
++
++
++#define MRV_SRSZ_SCALE_VC_UP_SHD
++#define MRV_SRSZ_SCALE_VC_UP_SHD_MASK 0x00000080
++#define MRV_SRSZ_SCALE_VC_UP_SHD_SHIFT 7
++#define MRV_SRSZ_SCALE_VC_UP_SHD_UPSCALE 1
++#define MRV_SRSZ_SCALE_VC_UP_SHD_DOWNSCALE 0
++#define MRV_SRSZ_SCALE_VY_UP_SHD
++#define MRV_SRSZ_SCALE_VY_UP_SHD_MASK 0x00000040
++#define MRV_SRSZ_SCALE_VY_UP_SHD_SHIFT 6
++#define MRV_SRSZ_SCALE_VY_UP_SHD_UPSCALE 1
++#define MRV_SRSZ_SCALE_VY_UP_SHD_DOWNSCALE 0
++#define MRV_SRSZ_SCALE_HC_UP_SHD
++#define MRV_SRSZ_SCALE_HC_UP_SHD_MASK 0x00000020
++#define MRV_SRSZ_SCALE_HC_UP_SHD_SHIFT 5
++#define MRV_SRSZ_SCALE_HC_UP_SHD_UPSCALE 1
++#define MRV_SRSZ_SCALE_HC_UP_SHD_DOWNSCALE 0
++#define MRV_SRSZ_SCALE_HY_UP_SHD
++#define MRV_SRSZ_SCALE_HY_UP_SHD_MASK 0x00000010
++#define MRV_SRSZ_SCALE_HY_UP_SHD_SHIFT 4
++#define MRV_SRSZ_SCALE_HY_UP_SHD_UPSCALE 1
++#define MRV_SRSZ_SCALE_HY_UP_SHD_DOWNSCALE 0
++#define MRV_SRSZ_SCALE_VC_ENABLE_SHD
++#define MRV_SRSZ_SCALE_VC_ENABLE_SHD_MASK 0x00000008
++#define MRV_SRSZ_SCALE_VC_ENABLE_SHD_SHIFT 3
++#define MRV_SRSZ_SCALE_VY_ENABLE_SHD
++#define MRV_SRSZ_SCALE_VY_ENABLE_SHD_MASK 0x00000004
++#define MRV_SRSZ_SCALE_VY_ENABLE_SHD_SHIFT 2
++#define MRV_SRSZ_SCALE_HC_ENABLE_SHD
++#define MRV_SRSZ_SCALE_HC_ENABLE_SHD_MASK 0x00000002
++#define MRV_SRSZ_SCALE_HC_ENABLE_SHD_SHIFT 1
++#define MRV_SRSZ_SCALE_HY_ENABLE_SHD
++#define MRV_SRSZ_SCALE_HY_ENABLE_SHD_MASK 0x00000001
++#define MRV_SRSZ_SCALE_HY_ENABLE_SHD_SHIFT 0
++
++#define MRV_SRSZ_SCALE_HY_SHD
++#define MRV_SRSZ_SCALE_HY_SHD_MASK MRV_RSZ_SCALE_MASK
++#define MRV_SRSZ_SCALE_HY_SHD_SHIFT MRV_RSZ_SCALE_SHIFT
++
++#define MRV_SRSZ_SCALE_HCB_SHD
++#define MRV_SRSZ_SCALE_HCB_SHD_MASK MRV_RSZ_SCALE_MASK
++#define MRV_SRSZ_SCALE_HCB_SHD_SHIFT MRV_RSZ_SCALE_SHIFT
++
++#define MRV_SRSZ_SCALE_HCR_SHD
++#define MRV_SRSZ_SCALE_HCR_SHD_MASK MRV_RSZ_SCALE_MASK
++#define MRV_SRSZ_SCALE_HCR_SHD_SHIFT MRV_RSZ_SCALE_SHIFT
++
++
++#define MRV_SRSZ_SCALE_VY_SHD
++#define MRV_SRSZ_SCALE_VY_SHD_MASK MRV_RSZ_SCALE_MASK
++#define MRV_SRSZ_SCALE_VY_SHD_SHIFT MRV_RSZ_SCALE_SHIFT
++
++#define MRV_SRSZ_SCALE_VC_SHD
++#define MRV_SRSZ_SCALE_VC_SHD_MASK MRV_RSZ_SCALE_MASK
++#define MRV_SRSZ_SCALE_VC_SHD_SHIFT MRV_RSZ_SCALE_SHIFT
++
++#define MRV_SRSZ_PHASE_HY_SHD
++#define MRV_SRSZ_PHASE_HY_SHD_MASK MRV_RSZ_SCALE_MASK
++#define MRV_SRSZ_PHASE_HY_SHD_SHIFT MRV_RSZ_SCALE_SHIFT
++
++#define MRV_SRSZ_PHASE_HC_SHD
++#define MRV_SRSZ_PHASE_HC_SHD_MASK MRV_RSZ_SCALE_MASK
++#define MRV_SRSZ_PHASE_HC_SHD_SHIFT MRV_RSZ_SCALE_SHIFT
++
++#define MRV_SRSZ_PHASE_VY_SHD
++#define MRV_SRSZ_PHASE_VY_SHD_MASK MRV_RSZ_SCALE_MASK
++#define MRV_SRSZ_PHASE_VY_SHD_SHIFT MRV_RSZ_SCALE_SHIFT
++
++#define MRV_SRSZ_PHASE_VC_SHD
++#define MRV_SRSZ_PHASE_VC_SHD_MASK MRV_RSZ_SCALE_MASK
++#define MRV_SRSZ_PHASE_VC_SHD_SHIFT MRV_RSZ_SCALE_SHIFT
++
++#define MRV_MI_SP_OUTPUT_FORMAT
++#define MRV_MI_SP_OUTPUT_FORMAT_MASK 0x70000000
++#define MRV_MI_SP_OUTPUT_FORMAT_SHIFT 28
++#define MRV_MI_SP_OUTPUT_FORMAT_RGB888 6
++#define MRV_MI_SP_OUTPUT_FORMAT_RGB666 5
++#define MRV_MI_SP_OUTPUT_FORMAT_RGB565 4
++#define MRV_MI_SP_OUTPUT_FORMAT_YUV444 3
++#define MRV_MI_SP_OUTPUT_FORMAT_YUV422 2
++#define MRV_MI_SP_OUTPUT_FORMAT_YUV420 1
++#define MRV_MI_SP_OUTPUT_FORMAT_YUV400 0
++#define MRV_MI_SP_INPUT_FORMAT
++#define MRV_MI_SP_INPUT_FORMAT_MASK 0x0C000000
++#define MRV_MI_SP_INPUT_FORMAT_SHIFT 26
++#define MRV_MI_SP_INPUT_FORMAT_YUV444 3
++#define MRV_MI_SP_INPUT_FORMAT_YUV422 2
++#define MRV_MI_SP_INPUT_FORMAT_YUV420 1
++#define MRV_MI_SP_INPUT_FORMAT_YUV400 0
++#define MRV_MI_SP_WRITE_FORMAT
++#define MRV_MI_SP_WRITE_FORMAT_MASK 0x03000000
++#define MRV_MI_SP_WRITE_FORMAT_SHIFT 24
++#define MRV_MI_SP_WRITE_FORMAT_PLANAR 0
++#define MRV_MI_SP_WRITE_FORMAT_SEMIPLANAR 1
++#define MRV_MI_SP_WRITE_FORMAT_INTERLEAVED 2
++#define MRV_MI_MP_WRITE_FORMAT
++#define MRV_MI_MP_WRITE_FORMAT_MASK 0x00C00000
++#define MRV_MI_MP_WRITE_FORMAT_SHIFT 22
++#define MRV_MI_MP_WRITE_FORMAT_PLANAR 0
++#define MRV_MI_MP_WRITE_FORMAT_SEMIPLANAR 1
++#define MRV_MI_MP_WRITE_FORMAT_INTERLEAVED 2
++#define MRV_MI_MP_WRITE_FORMAT_RAW_8 0
++#define MRV_MI_MP_WRITE_FORMAT_RAW_12 2
++#define MRV_MI_INIT_OFFSET_EN
++#define MRV_MI_INIT_OFFSET_EN_MASK 0x00200000
++#define MRV_MI_INIT_OFFSET_EN_SHIFT 21
++
++#define MRV_MI_INIT_BASE_EN
++#define MRV_MI_INIT_BASE_EN_MASK 0x00100000
++#define MRV_MI_INIT_BASE_EN_SHIFT 20
++#define MRV_MI_BURST_LEN_CHROM
++#define MRV_MI_BURST_LEN_CHROM_MASK 0x000C0000
++#define MRV_MI_BURST_LEN_CHROM_SHIFT 18
++#define MRV_MI_BURST_LEN_CHROM_4 0
++#define MRV_MI_BURST_LEN_CHROM_8 1
++#define MRV_MI_BURST_LEN_CHROM_16 2
++
++#define MRV_MI_BURST_LEN_LUM
++#define MRV_MI_BURST_LEN_LUM_MASK 0x00030000
++#define MRV_MI_BURST_LEN_LUM_SHIFT 16
++#define MRV_MI_BURST_LEN_LUM_4 0
++#define MRV_MI_BURST_LEN_LUM_8 1
++#define MRV_MI_BURST_LEN_LUM_16 2
++
++#define MRV_MI_LAST_PIXEL_SIG_EN
++#define MRV_MI_LAST_PIXEL_SIG_EN_MASK 0x00008000
++#define MRV_MI_LAST_PIXEL_SIG_EN_SHIFT 15
++
++ #define MRV_MI_422NONCOSITED
++ #define MRV_MI_422NONCOSITED_MASK 0x00000400
++ #define MRV_MI_422NONCOSITED_SHIFT 10
++ #define MRV_MI_CBCR_FULL_RANGE
++ #define MRV_MI_CBCR_FULL_RANGE_MASK 0x00000200
++ #define MRV_MI_CBCR_FULL_RANGE_SHIFT 9
++ #define MRV_MI_Y_FULL_RANGE
++ #define MRV_MI_Y_FULL_RANGE_MASK 0x00000100
++ #define MRV_MI_Y_FULL_RANGE_SHIFT 8
++#define MRV_MI_BYTE_SWAP
++#define MRV_MI_BYTE_SWAP_MASK 0x00000080
++#define MRV_MI_BYTE_SWAP_SHIFT 7
++#define MRV_MI_ROT
++#define MRV_MI_ROT_MASK 0x00000040
++#define MRV_MI_ROT_SHIFT 6
++#define MRV_MI_V_FLIP
++#define MRV_MI_V_FLIP_MASK 0x00000020
++#define MRV_MI_V_FLIP_SHIFT 5
++
++#define MRV_MI_H_FLIP
++#define MRV_MI_H_FLIP_MASK 0x00000010
++#define MRV_MI_H_FLIP_SHIFT 4
++#define MRV_MI_RAW_ENABLE
++#define MRV_MI_RAW_ENABLE_MASK 0x00000008
++#define MRV_MI_RAW_ENABLE_SHIFT 3
++#define MRV_MI_JPEG_ENABLE
++#define MRV_MI_JPEG_ENABLE_MASK 0x00000004
++#define MRV_MI_JPEG_ENABLE_SHIFT 2
++#define MRV_MI_SP_ENABLE
++#define MRV_MI_SP_ENABLE_MASK 0x00000002
++#define MRV_MI_SP_ENABLE_SHIFT 1
++#define MRV_MI_MP_ENABLE
++#define MRV_MI_MP_ENABLE_MASK 0x00000001
++#define MRV_MI_MP_ENABLE_SHIFT 0
++
++
++#define MRV_MI_ROT_AND_FLIP
++#define MRV_MI_ROT_AND_FLIP_MASK \
++ (MRV_MI_H_FLIP_MASK | MRV_MI_V_FLIP_MASK | MRV_MI_ROT_MASK)
++#define MRV_MI_ROT_AND_FLIP_SHIFT \
++ (MRV_MI_H_FLIP_SHIFT)
++#define MRV_MI_ROT_AND_FLIP_H_FLIP \
++ (MRV_MI_H_FLIP_MASK >> MRV_MI_ROT_AND_FLIP_SHIFT)
++#define MRV_MI_ROT_AND_FLIP_V_FLIP \
++ (MRV_MI_V_FLIP_MASK >> MRV_MI_ROT_AND_FLIP_SHIFT)
++#define MRV_MI_ROT_AND_FLIP_ROTATE \
++ (MRV_MI_ROT_MASK >> MRV_MI_ROT_AND_FLIP_SHIFT)
++
++#define MRV_MI_MI_CFG_UPD
++#define MRV_MI_MI_CFG_UPD_MASK 0x00000010
++#define MRV_MI_MI_CFG_UPD_SHIFT 4
++#define MRV_MI_MI_SKIP
++#define MRV_MI_MI_SKIP_MASK 0x00000004
++#define MRV_MI_MI_SKIP_SHIFT 2
++
++#define MRV_MI_MP_Y_BASE_AD_INIT
++#define MRV_MI_MP_Y_BASE_AD_INIT_MASK 0xFFFFFFFC
++#define MRV_MI_MP_Y_BASE_AD_INIT_SHIFT 0
++#define MRV_MI_MP_Y_BASE_AD_INIT_VALID_MASK (MRV_MI_MP_Y_BASE_AD_INIT_MASK &\
++ ~0x00000003)
++#define MRV_MI_MP_Y_SIZE_INIT
++#define MRV_MI_MP_Y_SIZE_INIT_MASK 0x01FFFFFC
++#define MRV_MI_MP_Y_SIZE_INIT_SHIFT 0
++#define MRV_MI_MP_Y_SIZE_INIT_VALID_MASK (MRV_MI_MP_Y_SIZE_INIT_MASK &\
++ ~0x00000003)
++#define MRV_MI_MP_Y_OFFS_CNT_INIT
++#define MRV_MI_MP_Y_OFFS_CNT_INIT_MASK 0x01FFFFFC
++#define MRV_MI_MP_Y_OFFS_CNT_INIT_SHIFT 0
++#define MRV_MI_MP_Y_OFFS_CNT_INIT_VALID_MASK \
++ (MRV_MI_MP_Y_OFFS_CNT_INIT_MASK & ~0x00000003)
++
++#define MRV_MI_MP_Y_OFFS_CNT_START
++#define MRV_MI_MP_Y_OFFS_CNT_START_MASK 0x01FFFFFC
++#define MRV_MI_MP_Y_OFFS_CNT_START_SHIFT 0
++#define MRV_MI_MP_Y_OFFS_CNT_START_VALID_MASK \
++ (MRV_MI_MP_Y_OFFS_CNT_START_MASK & ~0x00000003)
++
++#define MRV_MI_MP_Y_IRQ_OFFS_INIT
++#define MRV_MI_MP_Y_IRQ_OFFS_INIT_MASK 0x01FFFFFC
++#define MRV_MI_MP_Y_IRQ_OFFS_INIT_SHIFT 0
++#define MRV_MI_MP_Y_IRQ_OFFS_INIT_VALID_MASK \
++ (MRV_MI_MP_Y_IRQ_OFFS_INIT_MASK & ~0x00000003)
++#define MRV_MI_MP_CB_BASE_AD_INIT
++#define MRV_MI_MP_CB_BASE_AD_INIT_MASK 0xFFFFFFFC
++#define MRV_MI_MP_CB_BASE_AD_INIT_SHIFT 0
++#define MRV_MI_MP_CB_BASE_AD_INIT_VALID_MASK \
++ (MRV_MI_MP_CB_BASE_AD_INIT_MASK & ~0x00000003)
++
++#define MRV_MI_MP_CB_SIZE_INIT
++#define MRV_MI_MP_CB_SIZE_INIT_MASK 0x00FFFFFC
++#define MRV_MI_MP_CB_SIZE_INIT_SHIFT 0
++#define MRV_MI_MP_CB_SIZE_INIT_VALID_MASK \
++ (MRV_MI_MP_CB_SIZE_INIT_MASK & ~0x00000003)
++
++#define MRV_MI_MP_CB_OFFS_CNT_INIT
++#define MRV_MI_MP_CB_OFFS_CNT_INIT_MASK 0x00FFFFFC
++#define MRV_MI_MP_CB_OFFS_CNT_INIT_SHIFT 0
++#define MRV_MI_MP_CB_OFFS_CNT_INIT_VALID_MASK \
++ (MRV_MI_MP_CB_OFFS_CNT_INIT_MASK & ~0x00000003)
++
++#define MRV_MI_MP_CB_OFFS_CNT_START
++#define MRV_MI_MP_CB_OFFS_CNT_START_MASK 0x00FFFFFC
++#define MRV_MI_MP_CB_OFFS_CNT_START_SHIFT 0
++#define MRV_MI_MP_CB_OFFS_CNT_START_VALID_MASK \
++ (MRV_MI_MP_CB_OFFS_CNT_START_MASK & ~0x00000003)
++
++#define MRV_MI_MP_CR_BASE_AD_INIT
++#define MRV_MI_MP_CR_BASE_AD_INIT_MASK 0xFFFFFFFC
++#define MRV_MI_MP_CR_BASE_AD_INIT_SHIFT 0
++#define MRV_MI_MP_CR_BASE_AD_INIT_VALID_MASK \
++ (MRV_MI_MP_CR_BASE_AD_INIT_MASK & ~0x00000003)
++
++#define MRV_MI_MP_CR_SIZE_INIT
++#define MRV_MI_MP_CR_SIZE_INIT_MASK 0x00FFFFFC
++#define MRV_MI_MP_CR_SIZE_INIT_SHIFT 0
++#define MRV_MI_MP_CR_SIZE_INIT_VALID_MASK \
++ (MRV_MI_MP_CR_SIZE_INIT_MASK & ~0x00000003)
++
++#define MRV_MI_MP_CR_OFFS_CNT_INIT
++#define MRV_MI_MP_CR_OFFS_CNT_INIT_MASK 0x00FFFFFC
++#define MRV_MI_MP_CR_OFFS_CNT_INIT_SHIFT 0
++#define MRV_MI_MP_CR_OFFS_CNT_INIT_VALID_MASK \
++ (MRV_MI_MP_CR_OFFS_CNT_INIT_MASK & ~0x00000003)
++
++#define MRV_MI_MP_CR_OFFS_CNT_START
++#define MRV_MI_MP_CR_OFFS_CNT_START_MASK 0x00FFFFFC
++#define MRV_MI_MP_CR_OFFS_CNT_START_SHIFT 0
++#define MRV_MI_MP_CR_OFFS_CNT_START_VALID_MASK \
++ (MRV_MI_MP_CR_OFFS_CNT_START_MASK & ~0x00000003)
++
++#define MRV_MI_SP_Y_BASE_AD_INIT
++#define MRV_MI_SP_Y_BASE_AD_INIT_MASK 0xFFFFFFFC
++#define MRV_MI_SP_Y_BASE_AD_INIT_SHIFT 0
++#define MRV_MI_SP_Y_BASE_AD_INIT_VALID_MASK \
++ (MRV_MI_SP_Y_BASE_AD_INIT_MASK & ~0x00000003)
++
++#define MRV_MI_SP_Y_SIZE_INIT
++#define MRV_MI_SP_Y_SIZE_INIT_MASK 0x01FFFFFC
++#define MRV_MI_SP_Y_SIZE_INIT_SHIFT 0
++#define MRV_MI_SP_Y_SIZE_INIT_VALID_MASK \
++ (MRV_MI_SP_Y_SIZE_INIT_MASK & ~0x00000003)
++
++#define MRV_MI_SP_Y_OFFS_CNT_INIT
++#define MRV_MI_SP_Y_OFFS_CNT_INIT_MASK 0x01FFFFFC
++#define MRV_MI_SP_Y_OFFS_CNT_INIT_SHIFT 0
++#define MRV_MI_SP_Y_OFFS_CNT_INIT_VALID_MASK \
++ (MRV_MI_SP_Y_OFFS_CNT_INIT_MASK & ~0x00000003)
++
++#define MRV_MI_SP_Y_OFFS_CNT_START
++#define MRV_MI_SP_Y_OFFS_CNT_START_MASK 0x01FFFFFC
++#define MRV_MI_SP_Y_OFFS_CNT_START_SHIFT 0
++#define MRV_MI_SP_Y_OFFS_CNT_START_VALID_MASK \
++ (MRV_MI_SP_Y_OFFS_CNT_START_MASK & ~0x00000003)
++
++#define MRV_MI_SP_Y_LLENGTH
++#define MRV_MI_SP_Y_LLENGTH_MASK 0x00001FFF
++#define MRV_MI_SP_Y_LLENGTH_SHIFT 0
++#define MRV_MI_SP_Y_LLENGTH_VALID_MASK \
++ (MRV_MI_SP_Y_LLENGTH_MASK & ~0x00000000)
++
++#define MRV_MI_SP_CB_BASE_AD_INIT
++#define MRV_MI_SP_CB_BASE_AD_INIT_MASK 0xFFFFFFFF
++#define MRV_MI_SP_CB_BASE_AD_INIT_SHIFT 0
++#define MRV_MI_SP_CB_BASE_AD_INIT_VALID_MASK \
++ (MRV_MI_SP_CB_BASE_AD_INIT_MASK & ~0x00000003)
++
++#define MRV_MI_SP_CB_SIZE_INIT
++#define MRV_MI_SP_CB_SIZE_INIT_MASK 0x00FFFFFF
++#define MRV_MI_SP_CB_SIZE_INIT_SHIFT 0
++#define MRV_MI_SP_CB_SIZE_INIT_VALID_MASK \
++ (MRV_MI_SP_CB_SIZE_INIT_MASK & ~0x00000003)
++
++#define MRV_MI_SP_CB_OFFS_CNT_INIT
++#define MRV_MI_SP_CB_OFFS_CNT_INIT_MASK 0x00FFFFFF
++#define MRV_MI_SP_CB_OFFS_CNT_INIT_SHIFT 0
++#define MRV_MI_SP_CB_OFFS_CNT_INIT_VALID_MASK \
++ (MRV_MI_SP_CB_OFFS_CNT_INIT_MASK & ~0x00000003)
++
++#define MRV_MI_SP_CB_OFFS_CNT_START
++#define MRV_MI_SP_CB_OFFS_CNT_START_MASK 0x00FFFFFF
++#define MRV_MI_SP_CB_OFFS_CNT_START_SHIFT 0
++#define MRV_MI_SP_CB_OFFS_CNT_START_VALID_MASK \
++ (MRV_MI_SP_CB_OFFS_CNT_START_MASK & ~0x00000003)
++
++#define MRV_MI_SP_CR_BASE_AD_INIT
++#define MRV_MI_SP_CR_BASE_AD_INIT_MASK 0xFFFFFFFF
++#define MRV_MI_SP_CR_BASE_AD_INIT_SHIFT 0
++#define MRV_MI_SP_CR_BASE_AD_INIT_VALID_MASK \
++ (MRV_MI_SP_CR_BASE_AD_INIT_MASK & ~0x00000003)
++
++#define MRV_MI_SP_CR_SIZE_INIT
++#define MRV_MI_SP_CR_SIZE_INIT_MASK 0x00FFFFFF
++#define MRV_MI_SP_CR_SIZE_INIT_SHIFT 0
++#define MRV_MI_SP_CR_SIZE_INIT_VALID_MASK \
++ (MRV_MI_SP_CR_SIZE_INIT_MASK & ~0x00000003)
++
++#define MRV_MI_SP_CR_OFFS_CNT_INIT
++#define MRV_MI_SP_CR_OFFS_CNT_INIT_MASK 0x00FFFFFF
++#define MRV_MI_SP_CR_OFFS_CNT_INIT_SHIFT 0
++#define MRV_MI_SP_CR_OFFS_CNT_INIT_VALID_MASK \
++ (MRV_MI_SP_CR_OFFS_CNT_INIT_MASK & ~0x00000003)
++
++#define MRV_MI_SP_CR_OFFS_CNT_START
++#define MRV_MI_SP_CR_OFFS_CNT_START_MASK 0x00FFFFFF
++#define MRV_MI_SP_CR_OFFS_CNT_START_SHIFT 0
++#define MRV_MI_SP_CR_OFFS_CNT_START_VALID_MASK \
++ (MRV_MI_SP_CR_OFFS_CNT_START_MASK & ~0x00000003)
++
++#define MRV_MI_BYTE_CNT
++#define MRV_MI_BYTE_CNT_MASK 0x01FFFFFF
++#define MRV_MI_BYTE_CNT_SHIFT 0
++
++#define MRV_MI_RAW_ENABLE_OUT
++#define MRV_MI_RAW_ENABLE_OUT_MASK 0x00080000
++#define MRV_MI_RAW_ENABLE_OUT_SHIFT 19
++#define MRV_MI_JPEG_ENABLE_OUT
++#define MRV_MI_JPEG_ENABLE_OUT_MASK 0x00040000
++#define MRV_MI_JPEG_ENABLE_OUT_SHIFT 18
++#define MRV_MI_SP_ENABLE_OUT
++#define MRV_MI_SP_ENABLE_OUT_MASK 0x00020000
++#define MRV_MI_SP_ENABLE_OUT_SHIFT 17
++#define MRV_MI_MP_ENABLE_OUT
++#define MRV_MI_MP_ENABLE_OUT_MASK 0x00010000
++#define MRV_MI_MP_ENABLE_OUT_SHIFT 16
++#define MRV_MI_RAW_ENABLE_IN
++#define MRV_MI_RAW_ENABLE_IN_MASK 0x00000020
++#define MRV_MI_RAW_ENABLE_IN_SHIFT 5
++#define MRV_MI_JPEG_ENABLE_IN
++#define MRV_MI_JPEG_ENABLE_IN_MASK 0x00000010
++#define MRV_MI_JPEG_ENABLE_IN_SHIFT 4
++#define MRV_MI_SP_ENABLE_IN
++#define MRV_MI_SP_ENABLE_IN_MASK 0x00000004
++#define MRV_MI_SP_ENABLE_IN_SHIFT 2
++#define MRV_MI_MP_ENABLE_IN
++#define MRV_MI_MP_ENABLE_IN_MASK 0x00000001
++#define MRV_MI_MP_ENABLE_IN_SHIFT 0
++
++#define MRV_MI_MP_Y_BASE_AD
++#define MRV_MI_MP_Y_BASE_AD_MASK 0xFFFFFFFC
++#define MRV_MI_MP_Y_BASE_AD_SHIFT 0
++#define MRV_MI_MP_Y_BASE_AD_VALID_MASK \
++ (MRV_MI_MP_Y_BASE_AD_MASK & ~0x00000003)
++
++#define MRV_MI_MP_Y_SIZE
++#define MRV_MI_MP_Y_SIZE_MASK 0x01FFFFFC
++#define MRV_MI_MP_Y_SIZE_SHIFT 0
++#define MRV_MI_MP_Y_SIZE_VALID_MASK (MRV_MI_MP_Y_SIZE_MASK & ~0x00000003)
++
++#define MRV_MI_MP_Y_OFFS_CNT
++#define MRV_MI_MP_Y_OFFS_CNT_MASK 0x01FFFFFC
++#define MRV_MI_MP_Y_OFFS_CNT_SHIFT 0
++#define MRV_MI_MP_Y_OFFS_CNT_VALID_MASK \
++ (MRV_MI_MP_Y_OFFS_CNT_MASK & ~0x00000003)
++
++#define MRV_MI_MP_Y_IRQ_OFFS
++#define MRV_MI_MP_Y_IRQ_OFFS_MASK 0x01FFFFFC
++#define MRV_MI_MP_Y_IRQ_OFFS_SHIFT 0
++#define MRV_MI_MP_Y_IRQ_OFFS_VALID_MASK \
++ (MRV_MI_MP_Y_IRQ_OFFS_MASK & ~0x00000003)
++
++#define MRV_MI_MP_CB_BASE_AD
++#define MRV_MI_MP_CB_BASE_AD_MASK 0xFFFFFFFF
++#define MRV_MI_MP_CB_BASE_AD_SHIFT 0
++#define MRV_MI_MP_CB_BASE_AD_VALID_MASK \
++ (MRV_MI_MP_CB_BASE_AD_MASK & ~0x00000003)
++
++#define MRV_MI_MP_CB_SIZE
++#define MRV_MI_MP_CB_SIZE_MASK 0x00FFFFFF
++#define MRV_MI_MP_CB_SIZE_SHIFT 0
++#define MRV_MI_MP_CB_SIZE_VALID_MASK (MRV_MI_MP_CB_SIZE_MASK & ~0x00000003)
++
++#define MRV_MI_MP_CB_OFFS_CNT
++#define MRV_MI_MP_CB_OFFS_CNT_MASK 0x00FFFFFF
++#define MRV_MI_MP_CB_OFFS_CNT_SHIFT 0
++#define MRV_MI_MP_CB_OFFS_CNT_VALID_MASK \
++ (MRV_MI_MP_CB_OFFS_CNT_MASK & ~0x00000003)
++
++#define MRV_MI_MP_CR_BASE_AD
++#define MRV_MI_MP_CR_BASE_AD_MASK 0xFFFFFFFF
++#define MRV_MI_MP_CR_BASE_AD_SHIFT 0
++#define MRV_MI_MP_CR_BASE_AD_VALID_MASK \
++ (MRV_MI_MP_CR_BASE_AD_MASK & ~0x00000003)
++
++#define MRV_MI_MP_CR_SIZE
++#define MRV_MI_MP_CR_SIZE_MASK 0x00FFFFFF
++#define MRV_MI_MP_CR_SIZE_SHIFT 0
++#define MRV_MI_MP_CR_SIZE_VALID_MASK (MRV_MI_MP_CR_SIZE_MASK & ~0x00000003)
++
++#define MRV_MI_MP_CR_OFFS_CNT
++#define MRV_MI_MP_CR_OFFS_CNT_MASK 0x00FFFFFF
++#define MRV_MI_MP_CR_OFFS_CNT_SHIFT 0
++#define MRV_MI_MP_CR_OFFS_CNT_VALID_MASK \
++ (MRV_MI_MP_CR_OFFS_CNT_MASK & ~0x00000003)
++
++#define MRV_MI_SP_Y_BASE_AD
++#define MRV_MI_SP_Y_BASE_AD_MASK 0xFFFFFFFF
++#define MRV_MI_SP_Y_BASE_AD_SHIFT 0
++#define MRV_MI_SP_Y_BASE_AD_VALID_MASK \
++ (MRV_MI_SP_Y_BASE_AD_MASK & ~0x00000003)
++
++#define MRV_MI_SP_Y_SIZE
++#define MRV_MI_SP_Y_SIZE_MASK 0x01FFFFFC
++#define MRV_MI_SP_Y_SIZE_SHIFT 0
++#define MRV_MI_SP_Y_SIZE_VALID_MASK (MRV_MI_SP_Y_SIZE_MASK & ~0x00000003)
++
++#define MRV_MI_SP_Y_OFFS_CNT
++#define MRV_MI_SP_Y_OFFS_CNT_MASK 0x01FFFFFC
++#define MRV_MI_SP_Y_OFFS_CNT_SHIFT 0
++#define MRV_MI_SP_Y_OFFS_CNT_VALID_MASK \
++ (MRV_MI_SP_Y_OFFS_CNT_MASK & ~0x00000003)
++
++#define MRV_MI_SP_CB_BASE_AD
++#define MRV_MI_SP_CB_BASE_AD_MASK 0xFFFFFFFF
++#define MRV_MI_SP_CB_BASE_AD_SHIFT 0
++#define MRV_MI_SP_CB_BASE_AD_VALID_MASK \
++ (MRV_MI_SP_CB_BASE_AD_MASK & ~0x00000003)
++
++#define MRV_MI_SP_CB_SIZE
++#define MRV_MI_SP_CB_SIZE_MASK 0x00FFFFFF
++#define MRV_MI_SP_CB_SIZE_SHIFT 0
++#define MRV_MI_SP_CB_SIZE_VALID_MASK (MRV_MI_SP_CB_SIZE_MASK & ~0x00000003)
++
++#define MRV_MI_SP_CB_OFFS_CNT
++#define MRV_MI_SP_CB_OFFS_CNT_MASK 0x00FFFFFF
++#define MRV_MI_SP_CB_OFFS_CNT_SHIFT 0
++#define MRV_MI_SP_CB_OFFS_CNT_VALID_MASK \
++ (MRV_MI_SP_CB_OFFS_CNT_MASK & ~0x00000003)
++
++#define MRV_MI_SP_CR_BASE_AD
++#define MRV_MI_SP_CR_BASE_AD_MASK 0xFFFFFFFF
++#define MRV_MI_SP_CR_BASE_AD_SHIFT 0
++#define MRV_MI_SP_CR_BASE_AD_VALID_MASK \
++ (MRV_MI_SP_CR_BASE_AD_MASK & ~0x00000003)
++
++#define MRV_MI_SP_CR_SIZE
++#define MRV_MI_SP_CR_SIZE_MASK 0x00FFFFFF
++#define MRV_MI_SP_CR_SIZE_SHIFT 0
++#define MRV_MI_SP_CR_SIZE_VALID_MASK (MRV_MI_SP_CR_SIZE_MASK & ~0x00000003)
++
++#define MRV_MI_SP_CR_OFFS_CNT
++#define MRV_MI_SP_CR_OFFS_CNT_MASK 0x00FFFFFF
++#define MRV_MI_SP_CR_OFFS_CNT_SHIFT 0
++#define MRV_MI_SP_CR_OFFS_CNT_VALID_MASK \
++ (MRV_MI_SP_CR_OFFS_CNT_MASK & ~0x00000003)
++
++
++#define MRV_MI_DMA_Y_PIC_START_AD
++#define MRV_MI_DMA_Y_PIC_START_AD_MASK 0xFFFFFFFF
++#define MRV_MI_DMA_Y_PIC_START_AD_SHIFT 0
++
++#define MRV_MI_DMA_Y_PIC_WIDTH
++#define MRV_MI_DMA_Y_PIC_WIDTH_MASK 0x00001FFF
++#define MRV_MI_DMA_Y_PIC_WIDTH_SHIFT 0
++
++#define MRV_MI_DMA_Y_LLENGTH
++#define MRV_MI_DMA_Y_LLENGTH_MASK 0x00001FFF
++#define MRV_MI_DMA_Y_LLENGTH_SHIFT 0
++
++#define MRV_MI_DMA_Y_PIC_SIZE
++#define MRV_MI_DMA_Y_PIC_SIZE_MASK 0x00FFFFFF
++#define MRV_MI_DMA_Y_PIC_SIZE_SHIFT 0
++
++#define MRV_MI_DMA_CB_PIC_START_AD
++#define MRV_MI_DMA_CB_PIC_START_AD_MASK 0xFFFFFFFF
++#define MRV_MI_DMA_CB_PIC_START_AD_SHIFT 0
++
++
++#define MRV_MI_DMA_CR_PIC_START_AD
++#define MRV_MI_DMA_CR_PIC_START_AD_MASK 0xFFFFFFFF
++#define MRV_MI_DMA_CR_PIC_START_AD_SHIFT 0
++
++
++#define MRV_MI_DMA_READY
++#define MRV_MI_DMA_READY_MASK 0x00000800
++#define MRV_MI_DMA_READY_SHIFT 11
++
++#define MRV_MI_AHB_ERROR
++
++#define MRV_MI_AHB_ERROR_MASK 0x00000400
++#define MRV_MI_AHB_ERROR_SHIFT 10
++#define MRV_MI_WRAP_SP_CR
++
++#define MRV_MI_WRAP_SP_CR_MASK 0x00000200
++#define MRV_MI_WRAP_SP_CR_SHIFT 9
++#define MRV_MI_WRAP_SP_CB
++
++#define MRV_MI_WRAP_SP_CB_MASK 0x00000100
++#define MRV_MI_WRAP_SP_CB_SHIFT 8
++#define MRV_MI_WRAP_SP_Y
++
++#define MRV_MI_WRAP_SP_Y_MASK 0x00000080
++#define MRV_MI_WRAP_SP_Y_SHIFT 7
++#define MRV_MI_WRAP_MP_CR
++
++#define MRV_MI_WRAP_MP_CR_MASK 0x00000040
++#define MRV_MI_WRAP_MP_CR_SHIFT 6
++#define MRV_MI_WRAP_MP_CB
++
++#define MRV_MI_WRAP_MP_CB_MASK 0x00000020
++#define MRV_MI_WRAP_MP_CB_SHIFT 5
++#define MRV_MI_WRAP_MP_Y
++
++#define MRV_MI_WRAP_MP_Y_MASK 0x00000010
++#define MRV_MI_WRAP_MP_Y_SHIFT 4
++#define MRV_MI_FILL_MP_Y
++
++#define MRV_MI_FILL_MP_Y_MASK 0x00000008
++#define MRV_MI_FILL_MP_Y_SHIFT 3
++#define MRV_MI_MBLK_LINE
++
++#define MRV_MI_MBLK_LINE_MASK 0x00000004
++#define MRV_MI_MBLK_LINE_SHIFT 2
++#define MRV_MI_SP_FRAME_END
++#define MRV_MI_SP_FRAME_END_MASK 0x00000002
++#define MRV_MI_SP_FRAME_END_SHIFT 1
++
++#define MRV_MI_MP_FRAME_END
++#define MRV_MI_MP_FRAME_END_MASK 0x00000001
++#define MRV_MI_MP_FRAME_END_SHIFT 0
++
++#ifndef MRV_MI_SP_FRAME_END
++#define MRV_MI_SP_FRAME_END_MASK 0
++#endif
++#ifndef MRV_MI_DMA_FRAME_END
++#define MRV_MI_DMA_FRAME_END_MASK 0
++#endif
++
++
++#define MRV_MI_ALLIRQS
++#define MRV_MI_ALLIRQS_MASK \
++(0 \
++| MRV_MI_DMA_READY_MASK \
++| MRV_MI_AHB_ERROR_MASK \
++| MRV_MI_WRAP_SP_CR_MASK \
++| MRV_MI_WRAP_SP_CB_MASK \
++| MRV_MI_WRAP_SP_Y_MASK \
++| MRV_MI_WRAP_MP_CR_MASK \
++| MRV_MI_WRAP_MP_CB_MASK \
++| MRV_MI_WRAP_MP_Y_MASK \
++| MRV_MI_FILL_MP_Y_MASK \
++| MRV_MI_MBLK_LINE_MASK \
++| MRV_MI_SP_FRAME_END_MASK \
++| MRV_MI_DMA_FRAME_END_MASK \
++| MRV_MI_MP_FRAME_END_MASK \
++)
++#define MRV_MI_ALLIRQS_SHIFT 0
++
++#define MRV_MI_AHB_READ_ERROR
++#define MRV_MI_AHB_READ_ERROR_MASK 0x00000200
++#define MRV_MI_AHB_READ_ERROR_SHIFT 9
++#define MRV_MI_AHB_WRITE_ERROR
++#define MRV_MI_AHB_WRITE_ERROR_MASK 0x00000100
++#define MRV_MI_AHB_WRITE_ERROR_SHIFT 8
++#define MRV_MI_SP_CR_FIFO_FULL
++#define MRV_MI_SP_CR_FIFO_FULL_MASK 0x00000040
++#define MRV_MI_SP_CR_FIFO_FULL_SHIFT 6
++#define MRV_MI_SP_CB_FIFO_FULL
++#define MRV_MI_SP_CB_FIFO_FULL_MASK 0x00000020
++#define MRV_MI_SP_CB_FIFO_FULL_SHIFT 5
++#define MRV_MI_SP_Y_FIFO_FULL
++#define MRV_MI_SP_Y_FIFO_FULL_MASK 0x00000010
++#define MRV_MI_SP_Y_FIFO_FULL_SHIFT 4
++#define MRV_MI_MP_CR_FIFO_FULL
++#define MRV_MI_MP_CR_FIFO_FULL_MASK 0x00000004
++#define MRV_MI_MP_CR_FIFO_FULL_SHIFT 2
++#define MRV_MI_MP_CB_FIFO_FULL
++#define MRV_MI_MP_CB_FIFO_FULL_MASK 0x00000002
++#define MRV_MI_MP_CB_FIFO_FULL_SHIFT 1
++#define MRV_MI_MP_Y_FIFO_FULL
++#define MRV_MI_MP_Y_FIFO_FULL_MASK 0x00000001
++#define MRV_MI_MP_Y_FIFO_FULL_SHIFT 0
++
++
++#define MRV_MI_ALL_STAT
++#define MRV_MI_ALL_STAT_MASK \
++(0 \
++| MRV_MI_AHB_READ_ERROR_MASK \
++| MRV_MI_AHB_WRITE_ERROR_MASK \
++| MRV_MI_SP_CR_FIFO_FULL_MASK \
++| MRV_MI_SP_CB_FIFO_FULL_MASK \
++| MRV_MI_SP_Y_FIFO_FULL_MASK \
++| MRV_MI_MP_CR_FIFO_FULL_MASK \
++| MRV_MI_MP_CB_FIFO_FULL_MASK \
++| MRV_MI_MP_Y_FIFO_FULL_MASK \
++)
++#define MRV_MI_ALL_STAT_SHIFT 0
++
++
++
++#define MRV_MI_SP_Y_PIC_WIDTH
++#define MRV_MI_SP_Y_PIC_WIDTH_MASK 0x00000FFF
++#define MRV_MI_SP_Y_PIC_WIDTH_SHIFT 0
++
++#define MRV_MI_SP_Y_PIC_HEIGHT
++#define MRV_MI_SP_Y_PIC_HEIGHT_MASK 0x00000FFF
++#define MRV_MI_SP_Y_PIC_HEIGHT_SHIFT 0
++
++#define MRV_MI_SP_Y_PIC_SIZE
++#define MRV_MI_SP_Y_PIC_SIZE_MASK 0x01FFFFFF
++#define MRV_MI_SP_Y_PIC_SIZE_SHIFT 0
++
++
++
++
++#define MRV_MI_DMA_FRAME_END_DISABLE
++#define MRV_MI_DMA_FRAME_END_DISABLE_MASK 0x00000400
++#define MRV_MI_DMA_FRAME_END_DISABLE_SHIFT 10
++#define MRV_MI_DMA_CONTINUOUS_EN
++#define MRV_MI_DMA_CONTINUOUS_EN_MASK 0x00000200
++#define MRV_MI_DMA_CONTINUOUS_EN_SHIFT 9
++#define MRV_MI_DMA_BYTE_SWAP
++#define MRV_MI_DMA_BYTE_SWAP_MASK 0x00000100
++#define MRV_MI_DMA_BYTE_SWAP_SHIFT 8
++#define MRV_MI_DMA_INOUT_FORMAT
++#define MRV_MI_DMA_INOUT_FORMAT_MASK 0x000000C0
++#define MRV_MI_DMA_INOUT_FORMAT_SHIFT 6
++#define MRV_MI_DMA_INOUT_FORMAT_YUV444 3
++#define MRV_MI_DMA_INOUT_FORMAT_YUV422 2
++#define MRV_MI_DMA_INOUT_FORMAT_YUV420 1
++#define MRV_MI_DMA_INOUT_FORMAT_YUV400 0
++#define MRV_MI_DMA_READ_FORMAT
++#define MRV_MI_DMA_READ_FORMAT_MASK 0x00000030
++#define MRV_MI_DMA_READ_FORMAT_SHIFT 4
++#define MRV_MI_DMA_READ_FORMAT_PLANAR 0
++#define MRV_MI_DMA_READ_FORMAT_SEMIPLANAR 1
++#define MRV_MI_DMA_READ_FORMAT_INTERLEAVED 2
++#define MRV_MI_DMA_BURST_LEN_CHROM
++#define MRV_MI_DMA_BURST_LEN_CHROM_MASK 0x0000000C
++#define MRV_MI_DMA_BURST_LEN_CHROM_SHIFT 2
++#define MRV_MI_DMA_BURST_LEN_CHROM_4 0
++#define MRV_MI_DMA_BURST_LEN_CHROM_8 1
++#define MRV_MI_DMA_BURST_LEN_CHROM_16 2
++#define MRV_MI_DMA_BURST_LEN_LUM
++#define MRV_MI_DMA_BURST_LEN_LUM_MASK 0x00000003
++#define MRV_MI_DMA_BURST_LEN_LUM_SHIFT 0
++#define MRV_MI_DMA_BURST_LEN_LUM_4 0
++#define MRV_MI_DMA_BURST_LEN_LUM_8 1
++#define MRV_MI_DMA_BURST_LEN_LUM_16 2
++
++
++
++#define MRV_MI_DMA_START
++#define MRV_MI_DMA_START_MASK 0x00000001
++#define MRV_MI_DMA_START_SHIFT 0
++
++
++#define MRV_MI_DMA_ACTIVE
++#define MRV_MI_DMA_ACTIVE_MASK 0x00000001
++#define MRV_MI_DMA_ACTIVE_SHIFT 0
++
++
++
++#define MRV_JPE_GEN_HEADER
++#define MRV_JPE_GEN_HEADER_MASK 0x00000001
++#define MRV_JPE_GEN_HEADER_SHIFT 0
++
++
++#define MRV_JPE_CONT_MODE
++#define MRV_JPE_CONT_MODE_MASK 0x00000030
++#define MRV_JPE_CONT_MODE_SHIFT 4
++#define MRV_JPE_CONT_MODE_STOP 0
++#define MRV_JPE_CONT_MODE_NEXT 1
++#define MRV_JPE_CONT_MODE_HEADER 3
++#define MRV_JPE_ENCODE
++#define MRV_JPE_ENCODE_MASK 0x00000001
++#define MRV_JPE_ENCODE_SHIFT 0
++
++
++#define MRV_JPE_JP_INIT
++#define MRV_JPE_JP_INIT_MASK 0x00000001
++#define MRV_JPE_JP_INIT_SHIFT 0
++
++
++#define MRV_JPE_Y_SCALE_EN
++#define MRV_JPE_Y_SCALE_EN_MASK 0x00000001
++#define MRV_JPE_Y_SCALE_EN_SHIFT 0
++
++
++#define MRV_JPE_CBCR_SCALE_EN
++#define MRV_JPE_CBCR_SCALE_EN_MASK 0x00000001
++#define MRV_JPE_CBCR_SCALE_EN_SHIFT 0
++
++#define MRV_JPE_TABLE_FLUSH
++#define MRV_JPE_TABLE_FLUSH_MASK 0x00000001
++#define MRV_JPE_TABLE_FLUSH_SHIFT 0
++
++
++#define MRV_JPE_ENC_HSIZE
++
++#define MRV_JPE_ENC_HSIZE_MASK 0x00001FFF
++
++#define MRV_JPE_ENC_HSIZE_SHIFT 0
++
++#define MRV_JPE_ENC_VSIZE
++
++#define MRV_JPE_ENC_VSIZE_MASK 0x00000FFF
++
++#define MRV_JPE_ENC_VSIZE_SHIFT 0
++
++
++#define MRV_JPE_ENC_PIC_FORMAT
++#define MRV_JPE_ENC_PIC_FORMAT_MASK 0x00000007
++#define MRV_JPE_ENC_PIC_FORMAT_SHIFT 0
++#define MRV_JPE_ENC_PIC_FORMAT_422 1
++#define MRV_JPE_ENC_PIC_FORMAT_400 4
++
++#define MRV_JPE_RESTART_INTERVAL
++#define MRV_JPE_RESTART_INTERVAL_MASK 0x0000FFFF
++#define MRV_JPE_RESTART_INTERVAL_SHIFT 0
++
++#define MRV_JPE_TQ0_SELECT
++#define MRV_JPE_TQ0_SELECT_MASK 0x00000003
++#define MRV_JPE_TQ0_SELECT_SHIFT 0
++#define MRV_JPE_TQ1_SELECT
++#define MRV_JPE_TQ1_SELECT_MASK 0x00000003
++#define MRV_JPE_TQ1_SELECT_SHIFT 0
++
++
++#define MRV_JPE_TQ2_SELECT
++#define MRV_JPE_TQ2_SELECT_MASK 0x00000003
++#define MRV_JPE_TQ2_SELECT_SHIFT 0
++
++#define MRV_JPE_TQ_SELECT_TAB3 3
++#define MRV_JPE_TQ_SELECT_TAB2 2
++#define MRV_JPE_TQ_SELECT_TAB1 1
++#define MRV_JPE_TQ_SELECT_TAB0 0
++
++
++#define MRV_JPE_DC_TABLE_SELECT_Y
++#define MRV_JPE_DC_TABLE_SELECT_Y_MASK 0x00000001
++#define MRV_JPE_DC_TABLE_SELECT_Y_SHIFT 0
++#define MRV_JPE_DC_TABLE_SELECT_U
++#define MRV_JPE_DC_TABLE_SELECT_U_MASK 0x00000002
++#define MRV_JPE_DC_TABLE_SELECT_U_SHIFT 1
++#define MRV_JPE_DC_TABLE_SELECT_V
++#define MRV_JPE_DC_TABLE_SELECT_V_MASK 0x00000004
++#define MRV_JPE_DC_TABLE_SELECT_V_SHIFT 2
++
++
++#define MRV_JPE_AC_TABLE_SELECT_Y
++#define MRV_JPE_AC_TABLE_SELECT_Y_MASK 0x00000001
++#define MRV_JPE_AC_TABLE_SELECT_Y_SHIFT 0
++#define MRV_JPE_AC_TABLE_SELECT_U
++#define MRV_JPE_AC_TABLE_SELECT_U_MASK 0x00000002
++#define MRV_JPE_AC_TABLE_SELECT_U_SHIFT 1
++#define MRV_JPE_AC_TABLE_SELECT_V
++#define MRV_JPE_AC_TABLE_SELECT_V_MASK 0x00000004
++#define MRV_JPE_AC_TABLE_SELECT_V_SHIFT 2
++
++
++#define MRV_JPE_TABLE_WDATA_H
++#define MRV_JPE_TABLE_WDATA_H_MASK 0x0000FF00
++#define MRV_JPE_TABLE_WDATA_H_SHIFT 8
++#define MRV_JPE_TABLE_WDATA_L
++#define MRV_JPE_TABLE_WDATA_L_MASK 0x000000FF
++#define MRV_JPE_TABLE_WDATA_L_SHIFT 0
++
++
++#define MRV_JPE_TABLE_ID
++#define MRV_JPE_TABLE_ID_MASK 0x0000000F
++#define MRV_JPE_TABLE_ID_SHIFT 0
++#define MRV_JPE_TABLE_ID_QUANT0 0
++#define MRV_JPE_TABLE_ID_QUANT1 1
++#define MRV_JPE_TABLE_ID_QUANT2 2
++#define MRV_JPE_TABLE_ID_QUANT3 3
++#define MRV_JPE_TABLE_ID_VLC_DC0 4
++#define MRV_JPE_TABLE_ID_VLC_AC0 5
++#define MRV_JPE_TABLE_ID_VLC_DC1 6
++#define MRV_JPE_TABLE_ID_VLC_AC1 7
++
++#define MRV_JPE_TAC0_LEN
++#define MRV_JPE_TAC0_LEN_MASK 0x000000FF
++#define MRV_JPE_TAC0_LEN_SHIFT 0
++
++#define MRV_JPE_TDC0_LEN
++#define MRV_JPE_TDC0_LEN_MASK 0x000000FF
++#define MRV_JPE_TDC0_LEN_SHIFT 0
++
++#define MRV_JPE_TAC1_LEN
++#define MRV_JPE_TAC1_LEN_MASK 0x000000FF
++#define MRV_JPE_TAC1_LEN_SHIFT 0
++
++#define MRV_JPE_TDC1_LEN
++#define MRV_JPE_TDC1_LEN_MASK 0x000000FF
++#define MRV_JPE_TDC1_LEN_SHIFT 0
++
++
++#define MRV_JPE_CODEC_BUSY
++#define MRV_JPE_CODEC_BUSY_MASK 0x00000001
++#define MRV_JPE_CODEC_BUSY_SHIFT 0
++
++
++#define MRV_JPE_HEADER_MODE
++#define MRV_JPE_HEADER_MODE_MASK 0x00000003
++#define MRV_JPE_HEADER_MODE_SHIFT 0
++#define MRV_JPE_HEADER_MODE_NO 0
++#define MRV_JPE_HEADER_MODE_JFIF 2
++
++#define MRV_JPE_ENCODE_MODE
++#define MRV_JPE_ENCODE_MODE_MASK 0x00000001
++#define MRV_JPE_ENCODE_MODE_SHIFT 0
++
++#define MRV_JPE_DEB_BAD_TABLE_ACCESS
++#define MRV_JPE_DEB_BAD_TABLE_ACCESS_MASK 0x00000100
++#define MRV_JPE_DEB_BAD_TABLE_ACCESS_SHIFT 8
++#define MRV_JPE_DEB_VLC_TABLE_BUSY
++#define MRV_JPE_DEB_VLC_TABLE_BUSY_MASK 0x00000020
++#define MRV_JPE_DEB_VLC_TABLE_BUSY_SHIFT 5
++#define MRV_JPE_DEB_R2B_MEMORY_FULL
++#define MRV_JPE_DEB_R2B_MEMORY_FULL_MASK 0x00000010
++#define MRV_JPE_DEB_R2B_MEMORY_FULL_SHIFT 4
++#define MRV_JPE_DEB_VLC_ENCODE_BUSY
++#define MRV_JPE_DEB_VLC_ENCODE_BUSY_MASK 0x00000008
++#define MRV_JPE_DEB_VLC_ENCODE_BUSY_SHIFT 3
++#define MRV_JPE_DEB_QIQ_TABLE_ACC
++#define MRV_JPE_DEB_QIQ_TABLE_ACC_MASK 0x00000004
++#define MRV_JPE_DEB_QIQ_TABLE_ACC_SHIFT 2
++
++#define MRV_JPE_VLC_TABLE_ERR
++#define MRV_JPE_VLC_TABLE_ERR_MASK 0x00000400
++#define MRV_JPE_VLC_TABLE_ERR_SHIFT 10
++#define MRV_JPE_R2B_IMG_SIZE_ERR
++#define MRV_JPE_R2B_IMG_SIZE_ERR_MASK 0x00000200
++#define MRV_JPE_R2B_IMG_SIZE_ERR_SHIFT 9
++#define MRV_JPE_DCT_ERR
++#define MRV_JPE_DCT_ERR_MASK 0x00000080
++#define MRV_JPE_DCT_ERR_SHIFT 7
++#define MRV_JPE_VLC_SYMBOL_ERR
++#define MRV_JPE_VLC_SYMBOL_ERR_MASK 0x00000010
++#define MRV_JPE_VLC_SYMBOL_ERR_SHIFT 4
++
++
++#define MRV_JPE_ALL_ERR
++#define MRV_JPE_ALL_ERR_MASK \
++(0 \
++| MRV_JPE_VLC_TABLE_ERR_MASK \
++| MRV_JPE_R2B_IMG_SIZE_ERR_MASK \
++| MRV_JPE_DCT_ERR_MASK \
++| MRV_JPE_VLC_SYMBOL_ERR_MASK \
++)
++#define MRV_JPE_ALL_ERR_SHIFT 0
++
++#define MRV_JPE_GEN_HEADER_DONE
++#define MRV_JPE_GEN_HEADER_DONE_MASK 0x00000020
++#define MRV_JPE_GEN_HEADER_DONE_SHIFT 5
++#define MRV_JPE_ENCODE_DONE
++#define MRV_JPE_ENCODE_DONE_MASK 0x00000010
++#define MRV_JPE_ENCODE_DONE_SHIFT 4
++
++/* FIXME | MRV_JPE_GEN_HEADER_DONE_MASK \ */
++
++#define MRV_JPE_ALL_STAT
++#define MRV_JPE_ALL_STAT_MASK \
++(0 \
++| MRV_JPE_ENCODE_DONE_MASK \
++)
++#define MRV_JPE_ALL_STAT_SHIFT 0
++
++
++#define MRV_SMIA_DMA_CHANNEL_SEL
++#define MRV_SMIA_DMA_CHANNEL_SEL_MASK 0x00000700
++#define MRV_SMIA_DMA_CHANNEL_SEL_SHIFT 8
++#define MRV_SMIA_SHUTDOWN_LANE
++#define MRV_SMIA_SHUTDOWN_LANE_MASK 0x00000008
++#define MRV_SMIA_SHUTDOWN_LANE_SHIFT 3
++
++#define MRV_SMIA_FLUSH_FIFO
++#define MRV_SMIA_FLUSH_FIFO_MASK 0x00000002
++#define MRV_SMIA_FLUSH_FIFO_SHIFT 1
++
++#define MRV_SMIA_OUTPUT_ENA
++#define MRV_SMIA_OUTPUT_ENA_MASK 0x00000001
++#define MRV_SMIA_OUTPUT_ENA_SHIFT 0
++
++#define MRV_SMIA_DMA_CHANNEL
++#define MRV_SMIA_DMA_CHANNEL_MASK 0x00000700
++#define MRV_SMIA_DMA_CHANNEL_SHIFT 8
++#define MRV_SMIA_EMB_DATA_AVAIL
++#define MRV_SMIA_EMB_DATA_AVAIL_MASK 0x00000001
++#define MRV_SMIA_EMB_DATA_AVAIL_SHIFT 0
++
++#define MRV_SMIA_IMSC_FIFO_FILL_LEVEL
++#define MRV_SMIA_IMSC_FIFO_FILL_LEVEL_MASK 0x00000020
++#define MRV_SMIA_IMSC_FIFO_FILL_LEVEL_SHIFT 5
++
++#define MRV_SMIA_IMSC_SYNC_FIFO_OVFLW
++#define MRV_SMIA_IMSC_SYNC_FIFO_OVFLW_MASK 0x00000010
++#define MRV_SMIA_IMSC_SYNC_FIFO_OVFLW_SHIFT 4
++#define MRV_SMIA_IMSC_ERR_CS
++#define MRV_SMIA_IMSC_ERR_CS_MASK 0x00000008
++#define MRV_SMIA_IMSC_ERR_CS_SHIFT 3
++#define MRV_SMIA_IMSC_ERR_PROTOCOL
++#define MRV_SMIA_IMSC_ERR_PROTOCOL_MASK 0x00000004
++#define MRV_SMIA_IMSC_ERR_PROTOCOL_SHIFT 2
++
++#define MRV_SMIA_IMSC_EMB_DATA_OVFLW
++#define MRV_SMIA_IMSC_EMB_DATA_OVFLW_MASK 0x00000002
++#define MRV_SMIA_IMSC_EMB_DATA_OVFLW_SHIFT 1
++#define MRV_SMIA_IMSC_FRAME_END
++#define MRV_SMIA_IMSC_FRAME_END_MASK 0x00000001
++#define MRV_SMIA_IMSC_FRAME_END_SHIFT 0
++
++#define MRV_SMIA_IMSC_ALL_IRQS
++#define MRV_SMIA_IMSC_ALL_IRQS_MASK \
++(0 \
++| MRV_SMIA_IMSC_FIFO_FILL_LEVEL_MASK \
++| MRV_SMIA_IMSC_SYNC_FIFO_OVFLW_MASK \
++| MRV_SMIA_IMSC_ERR_CS_MASK \
++| MRV_SMIA_IMSC_ERR_PROTOCOL_MASK \
++| MRV_SMIA_IMSC_EMB_DATA_OVFLW_MASK \
++| MRV_SMIA_IMSC_FRAME_END_MASK \
++)
++#define MRV_SMIA_IMSC_ALL_IRQS_SHIFT 0
++
++#define MRV_SMIA_RIS_FIFO_FILL_LEVEL
++#define MRV_SMIA_RIS_FIFO_FILL_LEVEL_MASK 0x00000020
++#define MRV_SMIA_RIS_FIFO_FILL_LEVEL_SHIFT 5
++#define MRV_SMIA_RIS_SYNC_FIFO_OVFLW
++#define MRV_SMIA_RIS_SYNC_FIFO_OVFLW_MASK 0x00000010
++#define MRV_SMIA_RIS_SYNC_FIFO_OVFLW_SHIFT 4
++#define MRV_SMIA_RIS_ERR_CS
++#define MRV_SMIA_RIS_ERR_CS_MASK 0x00000008
++#define MRV_SMIA_RIS_ERR_CS_SHIFT 3
++#define MRV_SMIA_RIS_ERR_PROTOCOL
++#define MRV_SMIA_RIS_ERR_PROTOCOL_MASK 0x00000004
++#define MRV_SMIA_RIS_ERR_PROTOCOL_SHIFT 2
++
++#define MRV_SMIA_RIS_EMB_DATA_OVFLW
++#define MRV_SMIA_RIS_EMB_DATA_OVFLW_MASK 0x00000002
++#define MRV_SMIA_RIS_EMB_DATA_OVFLW_SHIFT 1
++#define MRV_SMIA_RIS_FRAME_END
++#define MRV_SMIA_RIS_FRAME_END_MASK 0x00000001
++#define MRV_SMIA_RIS_FRAME_END_SHIFT 0
++
++#define MRV_SMIA_RIS_ALL_IRQS
++#define MRV_SMIA_RIS_ALL_IRQS_MASK \
++(0 \
++| MRV_SMIA_RIS_FIFO_FILL_LEVEL_MASK \
++| MRV_SMIA_RIS_SYNC_FIFO_OVFLW_MASK \
++| MRV_SMIA_RIS_ERR_CS_MASK \
++| MRV_SMIA_RIS_ERR_PROTOCOL_MASK \
++| MRV_SMIA_RIS_EMB_DATA_OVFLW_MASK \
++| MRV_SMIA_RIS_FRAME_END_MASK \
++)
++#define MRV_SMIA_RIS_ALL_IRQS_SHIFT 0
++
++#define MRV_SMIA_MIS_FIFO_FILL_LEVEL
++#define MRV_SMIA_MIS_FIFO_FILL_LEVEL_MASK 0x00000020
++#define MRV_SMIA_MIS_FIFO_FILL_LEVEL_SHIFT 5
++#define MRV_SMIA_MIS_SYNC_FIFO_OVFLW
++#define MRV_SMIA_MIS_SYNC_FIFO_OVFLW_MASK 0x00000010
++#define MRV_SMIA_MIS_SYNC_FIFO_OVFLW_SHIFT 4
++#define MRV_SMIA_MIS_ERR_CS
++#define MRV_SMIA_MIS_ERR_CS_MASK 0x00000008
++#define MRV_SMIA_MIS_ERR_CS_SHIFT 3
++#define MRV_SMIA_MIS_ERR_PROTOCOL
++#define MRV_SMIA_MIS_ERR_PROTOCOL_MASK 0x00000004
++#define MRV_SMIA_MIS_ERR_PROTOCOL_SHIFT 2
++
++#define MRV_SMIA_MIS_EMB_DATA_OVFLW
++#define MRV_SMIA_MIS_EMB_DATA_OVFLW_MASK 0x00000002
++#define MRV_SMIA_MIS_EMB_DATA_OVFLW_SHIFT 1
++#define MRV_SMIA_MIS_FRAME_END
++#define MRV_SMIA_MIS_FRAME_END_MASK 0x00000001
++#define MRV_SMIA_MIS_FRAME_END_SHIFT 0
++
++#define MRV_SMIA_MIS_ALL_IRQS
++#define MRV_SMIA_MIS_ALL_IRQS_MASK \
++(0 \
++| MRV_SMIA_MIS_FIFO_FILL_LEVEL_MASK \
++| MRV_SMIA_MIS_SYNC_FIFO_OVFLW_MASK \
++| MRV_SMIA_MIS_ERR_CS_MASK \
++| MRV_SMIA_MIS_ERR_PROTOCOL_MASK \
++| MRV_SMIA_MIS_EMB_DATA_OVFLW_MASK \
++| MRV_SMIA_MIS_FRAME_END_MASK \
++)
++#define MRV_SMIA_MIS_ALL_IRQS_SHIFT 0
++
++
++#define MRV_SMIA_ICR_FIFO_FILL_LEVEL
++#define MRV_SMIA_ICR_FIFO_FILL_LEVEL_MASK 0x00000020
++#define MRV_SMIA_ICR_FIFO_FILL_LEVEL_SHIFT 5
++#define MRV_SMIA_ICR_SYNC_FIFO_OVFLW
++#define MRV_SMIA_ICR_SYNC_FIFO_OVFLW_MASK 0x00000010
++#define MRV_SMIA_ICR_SYNC_FIFO_OVFLW_SHIFT 4
++#define MRV_SMIA_ICR_ERR_CS
++#define MRV_SMIA_ICR_ERR_CS_MASK 0x00000008
++#define MRV_SMIA_ICR_ERR_CS_SHIFT 3
++#define MRV_SMIA_ICR_ERR_PROTOCOL
++#define MRV_SMIA_ICR_ERR_PROTOCOL_MASK 0x00000004
++#define MRV_SMIA_ICR_ERR_PROTOCOL_SHIFT 2
++
++#define MRV_SMIA_ICR_EMB_DATA_OVFLW
++#define MRV_SMIA_ICR_EMB_DATA_OVFLW_MASK 0x00000002
++#define MRV_SMIA_ICR_EMB_DATA_OVFLW_SHIFT 1
++#define MRV_SMIA_ICR_FRAME_END
++#define MRV_SMIA_ICR_FRAME_END_MASK 0x00000001
++#define MRV_SMIA_ICR_FRAME_END_SHIFT 0
++
++#define MRV_SMIA_ICR_ALL_IRQS
++#define MRV_SMIA_ICR_ALL_IRQS_MASK \
++(0 \
++| MRV_SMIA_ICR_FIFO_FILL_LEVEL_MASK \
++| MRV_SMIA_ICR_SYNC_FIFO_OVFLW_MASK \
++| MRV_SMIA_ICR_ERR_CS_MASK \
++| MRV_SMIA_ICR_ERR_PROTOCOL_MASK \
++| MRV_SMIA_ICR_EMB_DATA_OVFLW_MASK \
++| MRV_SMIA_ICR_FRAME_END_MASK \
++)
++#define MRV_SMIA_ICR_ALL_IRQS_SHIFT 0
++
++
++#define MRV_SMIA_ISR_FIFO_FILL_LEVEL
++#define MRV_SMIA_ISR_FIFO_FILL_LEVEL_MASK 0x00000020
++#define MRV_SMIA_ISR_FIFO_FILL_LEVEL_SHIFT 5
++#define MRV_SMIA_ISR_SYNC_FIFO_OVFLW
++#define MRV_SMIA_ISR_SYNC_FIFO_OVFLW_MASK 0x00000010
++#define MRV_SMIA_ISR_SYNC_FIFO_OVFLW_SHIFT 4
++#define MRV_SMIA_ISR_ERR_CS
++#define MRV_SMIA_ISR_ERR_CS_MASK 0x00000008
++#define MRV_SMIA_ISR_ERR_CS_SHIFT 3
++#define MRV_SMIA_ISR_ERR_PROTOCOL
++#define MRV_SMIA_ISR_ERR_PROTOCOL_MASK 0x00000004
++#define MRV_SMIA_ISR_ERR_PROTOCOL_SHIFT 2
++
++#define MRV_SMIA_ISR_EMB_DATA_OVFLW
++#define MRV_SMIA_ISR_EMB_DATA_OVFLW_MASK 0x00000002
++#define MRV_SMIA_ISR_EMB_DATA_OVFLW_SHIFT 1
++#define MRV_SMIA_ISR_FRAME_END
++#define MRV_SMIA_ISR_FRAME_END_MASK 0x00000001
++#define MRV_SMIA_ISR_FRAME_END_SHIFT 0
++
++#define MRV_SMIA_ISR_ALL_IRQS
++#define MRV_SMIA_ISR_ALL_IRQS_MASK \
++(0 \
++| MRV_SMIA_ISR_FIFO_FILL_LEVEL_MASK \
++| MRV_SMIA_ISR_SYNC_FIFO_OVFLW_MASK \
++| MRV_SMIA_ISR_ERR_CS_MASK \
++| MRV_SMIA_ISR_ERR_PROTOCOL_MASK \
++| MRV_SMIA_ISR_EMB_DATA_OVFLW_MASK \
++| MRV_SMIA_ISR_FRAME_END_MASK \
++)
++#define MRV_SMIA_ISR_ALL_IRQS_SHIFT 0
++
++#define MRV_SMIA_DATA_FORMAT_SEL
++#define MRV_SMIA_DATA_FORMAT_SEL_MASK 0x0000000F
++#define MRV_SMIA_DATA_FORMAT_SEL_SHIFT 0
++#define MRV_SMIA_DATA_FORMAT_SEL_YUV422 0
++#define MRV_SMIA_DATA_FORMAT_SEL_YUV420 1
++#define MRV_SMIA_DATA_FORMAT_SEL_RGB444 4
++#define MRV_SMIA_DATA_FORMAT_SEL_RGB565 5
++#define MRV_SMIA_DATA_FORMAT_SEL_RGB888 6
++#define MRV_SMIA_DATA_FORMAT_SEL_RAW6 8
++#define MRV_SMIA_DATA_FORMAT_SEL_RAW7 9
++#define MRV_SMIA_DATA_FORMAT_SEL_RAW8 10
++#define MRV_SMIA_DATA_FORMAT_SEL_RAW10 11
++#define MRV_SMIA_DATA_FORMAT_SEL_RAW12 12
++#define MRV_SMIA_DATA_FORMAT_SEL_RAW8TO10 13
++#define MRV_SMIA_DATA_FORMAT_SEL_COMPRESSED 15
++
++
++#define MRV_SMIA_SOF_EMB_DATA_LINES
++#define MRV_SMIA_SOF_EMB_DATA_LINES_MASK 0x00000007
++#define MRV_SMIA_SOF_EMB_DATA_LINES_SHIFT 0
++#define MRV_SMIA_SOF_EMB_DATA_LINES_MIN 0
++#define MRV_SMIA_SOF_EMB_DATA_LINES_MAX \
++ (MRV_SMIA_SOF_EMB_DATA_LINES_MASK >> MRV_SMIA_SOF_EMB_DATA_LINES_SHIFT)
++#define MRV_SMIA_EMB_HSTART
++#define MRV_SMIA_EMB_HSTART_MASK 0x00003FFF
++#define MRV_SMIA_EMB_HSTART_SHIFT 0
++#define MRV_SMIA_EMB_HSTART_VALID_MASK (MRV_SMIA_EMB_HSTART_MASK & ~0x00000003)
++
++#define MRV_SMIA_EMB_HSIZE
++#define MRV_SMIA_EMB_HSIZE_MASK 0x00003FFF
++#define MRV_SMIA_EMB_HSIZE_SHIFT 0
++#define MRV_SMIA_EMB_HSIZE_VALID_MASK (MRV_SMIA_EMB_HSIZE_MASK & ~0x00000003)
++
++#define MRV_SMIA_EMB_VSTART
++#define MRV_SMIA_EMB_VSTART_MASK 0x00000FFF
++#define MRV_SMIA_EMB_VSTART_SHIFT 0
++
++#define MRV_SMIA_NUM_LINES
++#define MRV_SMIA_NUM_LINES_MASK 0x00000FFF
++
++#define MRV_SMIA_NUM_LINES_SHIFT 0
++#define MRV_SMIA_NUM_LINES_MIN 1
++#define MRV_SMIA_NUM_LINES_MAX \
++ (MRV_SMIA_NUM_LINES_MASK >> MRV_SMIA_NUM_LINES_SHIFT)
++
++#define MRV_SMIA_EMB_DATA_FIFO
++#define MRV_SMIA_EMB_DATA_FIFO_MASK 0xFFFFFFFF
++#define MRV_SMIA_EMB_DATA_FIFO_SHIFT 0
++
++#define MRV_SMIA_FIFO_FILL_LEVEL
++#define MRV_SMIA_FIFO_FILL_LEVEL_MASK 0x000003FF
++#define MRV_SMIA_FIFO_FILL_LEVEL_SHIFT 0
++#define MRV_SMIA_FIFO_FILL_LEVEL_VALID_MASK \
++ (MRV_SMIA_FIFO_FILL_LEVEL_MASK & ~0x00000003)
++
++#define MRV_MIPI_ERR_SOT_SYNC_HS_SKIP
++#define MRV_MIPI_ERR_SOT_SYNC_HS_SKIP_MASK 0x00020000
++#define MRV_MIPI_ERR_SOT_SYNC_HS_SKIP_SHIFT 17
++#define MRV_MIPI_ERR_SOT_HS_SKIP
++#define MRV_MIPI_ERR_SOT_HS_SKIP_MASK 0x00010000
++#define MRV_MIPI_ERR_SOT_HS_SKIP_SHIFT 16
++
++#define MRV_MIPI_NUM_LANES
++#define MRV_MIPI_NUM_LANES_MASK 0x00003000
++#define MRV_MIPI_NUM_LANES_SHIFT 12
++#define MRV_MIPI_SHUTDOWN_LANE
++#define MRV_MIPI_SHUTDOWN_LANE_MASK 0x00000F00
++#define MRV_MIPI_SHUTDOWN_LANE_SHIFT 8
++#define MRV_MIPI_FLUSH_FIFO
++#define MRV_MIPI_FLUSH_FIFO_MASK 0x00000002
++#define MRV_MIPI_FLUSH_FIFO_SHIFT 1
++#define MRV_MIPI_OUTPUT_ENA
++#define MRV_MIPI_OUTPUT_ENA_MASK 0x00000001
++#define MRV_MIPI_OUTPUT_ENA_SHIFT 0
++
++#define MRV_MIPI_STOPSTATE
++#define MRV_MIPI_STOPSTATE_MASK 0x00000F00
++#define MRV_MIPI_STOPSTATE_SHIFT 8
++#define MRV_MIPI_ADD_DATA_AVAIL
++#define MRV_MIPI_ADD_DATA_AVAIL_MASK 0x00000001
++#define MRV_MIPI_ADD_DATA_AVAIL_SHIFT 0
++
++#define MRV_MIPI_IMSC_ADD_DATA_FILL_LEVEL
++#define MRV_MIPI_IMSC_ADD_DATA_FILL_LEVEL_MASK 0x04000000
++#define MRV_MIPI_IMSC_ADD_DATA_FILL_LEVEL_SHIFT 26
++#define MRV_MIPI_IMSC_ADD_DATA_OVFLW
++#define MRV_MIPI_IMSC_ADD_DATA_OVFLW_MASK 0x02000000
++#define MRV_MIPI_IMSC_ADD_DATA_OVFLW_SHIFT 25
++#define MRV_MIPI_IMSC_FRAME_END
++#define MRV_MIPI_IMSC_FRAME_END_MASK 0x01000000
++#define MRV_MIPI_IMSC_FRAME_END_SHIFT 24
++#define MRV_MIPI_IMSC_ERR_CS
++#define MRV_MIPI_IMSC_ERR_CS_MASK 0x00800000
++#define MRV_MIPI_IMSC_ERR_CS_SHIFT 23
++#define MRV_MIPI_IMSC_ERR_ECC1
++#define MRV_MIPI_IMSC_ERR_ECC1_MASK 0x00400000
++#define MRV_MIPI_IMSC_ERR_ECC1_SHIFT 22
++#define MRV_MIPI_IMSC_ERR_ECC2
++#define MRV_MIPI_IMSC_ERR_ECC2_MASK 0x00200000
++#define MRV_MIPI_IMSC_ERR_ECC2_SHIFT 21
++#define MRV_MIPI_IMSC_ERR_PROTOCOL
++#define MRV_MIPI_IMSC_ERR_PROTOCOL_MASK 0x00100000
++#define MRV_MIPI_IMSC_ERR_PROTOCOL_SHIFT 20
++#define MRV_MIPI_IMSC_ERR_CONTROL
++#define MRV_MIPI_IMSC_ERR_CONTROL_MASK 0x000F0000
++#define MRV_MIPI_IMSC_ERR_CONTROL_SHIFT 16
++
++#define MRV_MIPI_IMSC_ERR_EOT_SYNC
++#define MRV_MIPI_IMSC_ERR_EOT_SYNC_MASK 0x0000F000
++#define MRV_MIPI_IMSC_ERR_EOT_SYNC_SHIFT 12
++#define MRV_MIPI_IMSC_ERR_SOT_SYNC
++#define MRV_MIPI_IMSC_ERR_SOT_SYNC_MASK 0x00000F00
++#define MRV_MIPI_IMSC_ERR_SOT_SYNC_SHIFT 8
++#define MRV_MIPI_IMSC_ERR_SOT
++#define MRV_MIPI_IMSC_ERR_SOT_MASK 0x000000F0
++#define MRV_MIPI_IMSC_ERR_SOT_SHIFT 4
++#define MRV_MIPI_IMSC_SYNC_FIFO_OVFLW
++#define MRV_MIPI_IMSC_SYNC_FIFO_OVFLW_MASK 0x0000000F
++#define MRV_MIPI_IMSC_SYNC_FIFO_OVFLW_SHIFT 0
++
++#define MRV_MIPI_IMSC_ALL_IRQS
++#define MRV_MIPI_IMSC_ALL_IRQS_MASK \
++(0 \
++| MRV_MIPI_IMSC_ADD_DATA_FILL_LEVEL_MASK \
++| MRV_MIPI_IMSC_ADD_DATA_OVFLW_MASK \
++| MRV_MIPI_IMSC_FRAME_END_MASK \
++| MRV_MIPI_IMSC_ERR_CS_MASK \
++| MRV_MIPI_IMSC_ERR_ECC1_MASK \
++| MRV_MIPI_IMSC_ERR_ECC2_MASK \
++| MRV_MIPI_IMSC_ERR_PROTOCOL_MASK \
++| MRV_MIPI_IMSC_ERR_CONTROL_MASK \
++| MRV_MIPI_IMSC_ERR_EOT_SYNC_MASK \
++| MRV_MIPI_IMSC_ERR_SOT_SYNC_MASK \
++| MRV_MIPI_IMSC_ERR_SOT_MASK \
++| MRV_MIPI_IMSC_SYNC_FIFO_OVFLW_MASK \
++)
++#define MRV_MIPI_IMSC_ALL_IRQS_SHIFT 0
++
++#define MRV_MIPI_RIS_ADD_DATA_FILL_LEVEL
++#define MRV_MIPI_RIS_ADD_DATA_FILL_LEVEL_MASK 0x04000000
++#define MRV_MIPI_RIS_ADD_DATA_FILL_LEVEL_SHIFT 26
++#define MRV_MIPI_RIS_ADD_DATA_OVFLW
++#define MRV_MIPI_RIS_ADD_DATA_OVFLW_MASK 0x02000000
++#define MRV_MIPI_RIS_ADD_DATA_OVFLW_SHIFT 25
++#define MRV_MIPI_RIS_FRAME_END
++#define MRV_MIPI_RIS_FRAME_END_MASK 0x01000000
++#define MRV_MIPI_RIS_FRAME_END_SHIFT 24
++#define MRV_MIPI_RIS_ERR_CS
++#define MRV_MIPI_RIS_ERR_CS_MASK 0x00800000
++#define MRV_MIPI_RIS_ERR_CS_SHIFT 23
++#define MRV_MIPI_RIS_ERR_ECC1
++#define MRV_MIPI_RIS_ERR_ECC1_MASK 0x00400000
++#define MRV_MIPI_RIS_ERR_ECC1_SHIFT 22
++#define MRV_MIPI_RIS_ERR_ECC2
++#define MRV_MIPI_RIS_ERR_ECC2_MASK 0x00200000
++#define MRV_MIPI_RIS_ERR_ECC2_SHIFT 21
++#define MRV_MIPI_RIS_ERR_PROTOCOL
++#define MRV_MIPI_RIS_ERR_PROTOCOL_MASK 0x00100000
++#define MRV_MIPI_RIS_ERR_PROTOCOL_SHIFT 20
++#define MRV_MIPI_RIS_ERR_CONTROL
++#define MRV_MIPI_RIS_ERR_CONTROL_MASK 0x000F0000
++#define MRV_MIPI_RIS_ERR_CONTROL_SHIFT 16
++#define MRV_MIPI_RIS_ERR_EOT_SYNC
++#define MRV_MIPI_RIS_ERR_EOT_SYNC_MASK 0x0000F000
++#define MRV_MIPI_RIS_ERR_EOT_SYNC_SHIFT 12
++#define MRV_MIPI_RIS_ERR_SOT_SYNC
++#define MRV_MIPI_RIS_ERR_SOT_SYNC_MASK 0x00000F00
++#define MRV_MIPI_RIS_ERR_SOT_SYNC_SHIFT 8
++#define MRV_MIPI_RIS_ERR_SOT
++#define MRV_MIPI_RIS_ERR_SOT_MASK 0x000000F0
++#define MRV_MIPI_RIS_ERR_SOT_SHIFT 4
++#define MRV_MIPI_RIS_SYNC_FIFO_OVFLW
++#define MRV_MIPI_RIS_SYNC_FIFO_OVFLW_MASK 0x0000000F
++#define MRV_MIPI_RIS_SYNC_FIFO_OVFLW_SHIFT 0
++
++#define MRV_MIPI_RIS_ALL_IRQS
++#define MRV_MIPI_RIS_ALL_IRQS_MASK \
++(0 \
++| MRV_MIPI_RIS_ADD_DATA_FILL_LEVEL_MASK \
++| MRV_MIPI_RIS_ADD_DATA_OVFLW_MASK \
++| MRV_MIPI_RIS_FRAME_END_MASK \
++| MRV_MIPI_RIS_ERR_CS_MASK \
++| MRV_MIPI_RIS_ERR_ECC1_MASK \
++| MRV_MIPI_RIS_ERR_ECC2_MASK \
++| MRV_MIPI_RIS_ERR_PROTOCOL_MASK \
++| MRV_MIPI_RIS_ERR_CONTROL_MASK \
++| MRV_MIPI_RIS_ERR_EOT_SYNC_MASK \
++| MRV_MIPI_RIS_ERR_SOT_SYNC_MASK \
++| MRV_MIPI_RIS_ERR_SOT_MASK \
++| MRV_MIPI_RIS_SYNC_FIFO_OVFLW_MASK \
++)
++#define MRV_MIPI_RIS_ALL_IRQS_SHIFT 0
++
++#define MRV_MIPI_MIS_ADD_DATA_FILL_LEVEL
++#define MRV_MIPI_MIS_ADD_DATA_FILL_LEVEL_MASK 0x04000000
++#define MRV_MIPI_MIS_ADD_DATA_FILL_LEVEL_SHIFT 26
++#define MRV_MIPI_MIS_ADD_DATA_OVFLW
++#define MRV_MIPI_MIS_ADD_DATA_OVFLW_MASK 0x02000000
++#define MRV_MIPI_MIS_ADD_DATA_OVFLW_SHIFT 25
++#define MRV_MIPI_MIS_FRAME_END
++#define MRV_MIPI_MIS_FRAME_END_MASK 0x01000000
++#define MRV_MIPI_MIS_FRAME_END_SHIFT 24
++#define MRV_MIPI_MIS_ERR_CS
++#define MRV_MIPI_MIS_ERR_CS_MASK 0x00800000
++#define MRV_MIPI_MIS_ERR_CS_SHIFT 23
++#define MRV_MIPI_MIS_ERR_ECC1
++#define MRV_MIPI_MIS_ERR_ECC1_MASK 0x00400000
++#define MRV_MIPI_MIS_ERR_ECC1_SHIFT 22
++#define MRV_MIPI_MIS_ERR_ECC2
++#define MRV_MIPI_MIS_ERR_ECC2_MASK 0x00200000
++#define MRV_MIPI_MIS_ERR_ECC2_SHIFT 21
++#define MRV_MIPI_MIS_ERR_PROTOCOL
++#define MRV_MIPI_MIS_ERR_PROTOCOL_MASK 0x00100000
++#define MRV_MIPI_MIS_ERR_PROTOCOL_SHIFT 20
++#define MRV_MIPI_MIS_ERR_CONTROL
++#define MRV_MIPI_MIS_ERR_CONTROL_MASK 0x000F0000
++#define MRV_MIPI_MIS_ERR_CONTROL_SHIFT 16
++#define MRV_MIPI_MIS_ERR_EOT_SYNC
++#define MRV_MIPI_MIS_ERR_EOT_SYNC_MASK 0x0000F000
++#define MRV_MIPI_MIS_ERR_EOT_SYNC_SHIFT 12
++#define MRV_MIPI_MIS_ERR_SOT_SYNC
++#define MRV_MIPI_MIS_ERR_SOT_SYNC_MASK 0x00000F00
++#define MRV_MIPI_MIS_ERR_SOT_SYNC_SHIFT 8
++#define MRV_MIPI_MIS_ERR_SOT
++#define MRV_MIPI_MIS_ERR_SOT_MASK 0x000000F0
++#define MRV_MIPI_MIS_ERR_SOT_SHIFT 4
++#define MRV_MIPI_MIS_SYNC_FIFO_OVFLW
++#define MRV_MIPI_MIS_SYNC_FIFO_OVFLW_MASK 0x0000000F
++#define MRV_MIPI_MIS_SYNC_FIFO_OVFLW_SHIFT 0
++
++#define MRV_MIPI_MIS_ALL_IRQS
++#define MRV_MIPI_MIS_ALL_IRQS_MASK \
++(0 \
++| MRV_MIPI_MIS_ADD_DATA_FILL_LEVEL_MASK \
++| MRV_MIPI_MIS_ADD_DATA_OVFLW_MASK \
++| MRV_MIPI_MIS_FRAME_END_MASK \
++| MRV_MIPI_MIS_ERR_CS_MASK \
++| MRV_MIPI_MIS_ERR_ECC1_MASK \
++| MRV_MIPI_MIS_ERR_ECC2_MASK \
++| MRV_MIPI_MIS_ERR_PROTOCOL_MASK \
++| MRV_MIPI_MIS_ERR_CONTROL_MASK \
++| MRV_MIPI_MIS_ERR_EOT_SYNC_MASK \
++| MRV_MIPI_MIS_ERR_SOT_SYNC_MASK \
++| MRV_MIPI_MIS_ERR_SOT_MASK \
++| MRV_MIPI_MIS_SYNC_FIFO_OVFLW_MASK \
++)
++#define MRV_MIPI_MIS_ALL_IRQS_SHIFT 0
++
++#define MRV_MIPI_ICR_ADD_DATA_FILL_LEVEL
++#define MRV_MIPI_ICR_ADD_DATA_FILL_LEVEL_MASK 0x04000000
++#define MRV_MIPI_ICR_ADD_DATA_FILL_LEVEL_SHIFT 26
++#define MRV_MIPI_ICR_ADD_DATA_OVFLW
++#define MRV_MIPI_ICR_ADD_DATA_OVFLW_MASK 0x02000000
++#define MRV_MIPI_ICR_ADD_DATA_OVFLW_SHIFT 25
++#define MRV_MIPI_ICR_FRAME_END
++#define MRV_MIPI_ICR_FRAME_END_MASK 0x01000000
++#define MRV_MIPI_ICR_FRAME_END_SHIFT 24
++#define MRV_MIPI_ICR_ERR_CS
++#define MRV_MIPI_ICR_ERR_CS_MASK 0x00800000
++#define MRV_MIPI_ICR_ERR_CS_SHIFT 23
++#define MRV_MIPI_ICR_ERR_ECC1
++#define MRV_MIPI_ICR_ERR_ECC1_MASK 0x00400000
++#define MRV_MIPI_ICR_ERR_ECC1_SHIFT 22
++#define MRV_MIPI_ICR_ERR_ECC2
++#define MRV_MIPI_ICR_ERR_ECC2_MASK 0x00200000
++#define MRV_MIPI_ICR_ERR_ECC2_SHIFT 21
++#define MRV_MIPI_ICR_ERR_PROTOCOL
++#define MRV_MIPI_ICR_ERR_PROTOCOL_MASK 0x00100000
++#define MRV_MIPI_ICR_ERR_PROTOCOL_SHIFT 20
++#define MRV_MIPI_ICR_ERR_CONTROL
++#define MRV_MIPI_ICR_ERR_CONTROL_MASK 0x000F0000
++#define MRV_MIPI_ICR_ERR_CONTROL_SHIFT 16
++#define MRV_MIPI_ICR_ERR_EOT_SYNC
++#define MRV_MIPI_ICR_ERR_EOT_SYNC_MASK 0x0000F000
++#define MRV_MIPI_ICR_ERR_EOT_SYNC_SHIFT 12
++#define MRV_MIPI_ICR_ERR_SOT_SYNC
++#define MRV_MIPI_ICR_ERR_SOT_SYNC_MASK 0x00000F00
++#define MRV_MIPI_ICR_ERR_SOT_SYNC_SHIFT 8
++#define MRV_MIPI_ICR_ERR_SOT
++#define MRV_MIPI_ICR_ERR_SOT_MASK 0x000000F0
++#define MRV_MIPI_ICR_ERR_SOT_SHIFT 4
++#define MRV_MIPI_ICR_SYNC_FIFO_OVFLW
++#define MRV_MIPI_ICR_SYNC_FIFO_OVFLW_MASK 0x0000000F
++#define MRV_MIPI_ICR_SYNC_FIFO_OVFLW_SHIFT 0
++
++#define MRV_MIPI_ICR_ALL_IRQS
++#define MRV_MIPI_ICR_ALL_IRQS_MASK \
++(0 \
++| MRV_MIPI_ICR_ADD_DATA_FILL_LEVEL_MASK \
++| MRV_MIPI_ICR_ADD_DATA_OVFLW_MASK \
++| MRV_MIPI_ICR_FRAME_END_MASK \
++| MRV_MIPI_ICR_ERR_CS_MASK \
++| MRV_MIPI_ICR_ERR_ECC1_MASK \
++| MRV_MIPI_ICR_ERR_ECC2_MASK \
++| MRV_MIPI_ICR_ERR_PROTOCOL_MASK \
++| MRV_MIPI_ICR_ERR_CONTROL_MASK \
++| MRV_MIPI_ICR_ERR_EOT_SYNC_MASK \
++| MRV_MIPI_ICR_ERR_SOT_SYNC_MASK \
++| MRV_MIPI_ICR_ERR_SOT_MASK \
++| MRV_MIPI_ICR_SYNC_FIFO_OVFLW_MASK \
++)
++#define MRV_MIPI_ICR_ALL_IRQS_SHIFT 0
++
++
++#define MRV_MIPI_ISR_ADD_DATA_FILL_LEVEL
++#define MRV_MIPI_ISR_ADD_DATA_FILL_LEVEL_MASK 0x04000000
++#define MRV_MIPI_ISR_ADD_DATA_FILL_LEVEL_SHIFT 26
++#define MRV_MIPI_ISR_ADD_DATA_OVFLW
++#define MRV_MIPI_ISR_ADD_DATA_OVFLW_MASK 0x02000000
++#define MRV_MIPI_ISR_ADD_DATA_OVFLW_SHIFT 25
++#define MRV_MIPI_ISR_FRAME_END
++#define MRV_MIPI_ISR_FRAME_END_MASK 0x01000000
++#define MRV_MIPI_ISR_FRAME_END_SHIFT 24
++#define MRV_MIPI_ISR_ERR_CS
++#define MRV_MIPI_ISR_ERR_CS_MASK 0x00800000
++#define MRV_MIPI_ISR_ERR_CS_SHIFT 23
++#define MRV_MIPI_ISR_ERR_ECC1
++#define MRV_MIPI_ISR_ERR_ECC1_MASK 0x00400000
++#define MRV_MIPI_ISR_ERR_ECC1_SHIFT 22
++#define MRV_MIPI_ISR_ERR_ECC2
++#define MRV_MIPI_ISR_ERR_ECC2_MASK 0x00200000
++#define MRV_MIPI_ISR_ERR_ECC2_SHIFT 21
++#define MRV_MIPI_ISR_ERR_PROTOCOL
++#define MRV_MIPI_ISR_ERR_PROTOCOL_MASK 0x00100000
++#define MRV_MIPI_ISR_ERR_PROTOCOL_SHIFT 20
++#define MRV_MIPI_ISR_ERR_CONTROL
++#define MRV_MIPI_ISR_ERR_CONTROL_MASK 0x000F0000
++#define MRV_MIPI_ISR_ERR_CONTROL_SHIFT 16
++#define MRV_MIPI_ISR_ERR_EOT_SYNC
++#define MRV_MIPI_ISR_ERR_EOT_SYNC_MASK 0x0000F000
++#define MRV_MIPI_ISR_ERR_EOT_SYNC_SHIFT 12
++#define MRV_MIPI_ISR_ERR_SOT_SYNC
++#define MRV_MIPI_ISR_ERR_SOT_SYNC_MASK 0x00000F00
++#define MRV_MIPI_ISR_ERR_SOT_SYNC_SHIFT 8
++#define MRV_MIPI_ISR_ERR_SOT
++#define MRV_MIPI_ISR_ERR_SOT_MASK 0x000000F0
++#define MRV_MIPI_ISR_ERR_SOT_SHIFT 4
++#define MRV_MIPI_ISR_SYNC_FIFO_OVFLW
++#define MRV_MIPI_ISR_SYNC_FIFO_OVFLW_MASK 0x0000000F
++#define MRV_MIPI_ISR_SYNC_FIFO_OVFLW_SHIFT 0
++
++#define MRV_MIPI_ISR_ALL_IRQS
++#define MRV_MIPI_ISR_ALL_IRQS_MASK \
++(0 \
++| MRV_MIPI_ISR_ADD_DATA_FILL_LEVEL_MASK \
++| MRV_MIPI_ISR_ADD_DATA_OVFLW_MASK \
++| MRV_MIPI_ISR_FRAME_END_MASK \
++| MRV_MIPI_ISR_ERR_CS_MASK \
++| MRV_MIPI_ISR_ERR_ECC1_MASK \
++| MRV_MIPI_ISR_ERR_ECC2_MASK \
++| MRV_MIPI_ISR_ERR_PROTOCOL_MASK \
++| MRV_MIPI_ISR_ERR_CONTROL_MASK \
++| MRV_MIPI_ISR_ERR_EOT_SYNC_MASK \
++| MRV_MIPI_ISR_ERR_SOT_SYNC_MASK \
++| MRV_MIPI_ISR_ERR_SOT_MASK \
++| MRV_MIPI_ISR_SYNC_FIFO_OVFLW_MASK \
++)
++#define MRV_MIPI_ISR_ALL_IRQS_SHIFT 0
++
++
++#define MRV_MIPI_VIRTUAL_CHANNEL
++#define MRV_MIPI_VIRTUAL_CHANNEL_MASK 0x000000C0
++#define MRV_MIPI_VIRTUAL_CHANNEL_SHIFT 6
++
++#define MRV_MIPI_VIRTUAL_CHANNEL_MAX \
++ (MRV_MIPI_VIRTUAL_CHANNEL_MASK >> MRV_MIPI_VIRTUAL_CHANNEL_SHIFT)
++#define MRV_MIPI_DATA_TYPE
++#define MRV_MIPI_DATA_TYPE_MASK 0x0000003F
++#define MRV_MIPI_DATA_TYPE_SHIFT 0
++
++#define MRV_MIPI_DATA_TYPE_MAX \
++ (MRV_MIPI_DATA_TYPE_MASK >> MRV_MIPI_DATA_TYPE_SHIFT)
++
++
++#define MRV_MIPI_VIRTUAL_CHANNEL_SEL
++#define MRV_MIPI_VIRTUAL_CHANNEL_SEL_MASK 0x000000C0
++#define MRV_MIPI_VIRTUAL_CHANNEL_SEL_SHIFT 6
++#define MRV_MIPI_DATA_TYPE_SEL
++#define MRV_MIPI_DATA_TYPE_SEL_MASK 0x0000003F
++#define MRV_MIPI_DATA_TYPE_SEL_SHIFT 0
++#define MRV_MIPI_DATA_TYPE_SEL_YUV420_8BIT 24
++#define MRV_MIPI_DATA_TYPE_SEL_YUV420_10BIT 25
++#define MRV_MIPI_DATA_TYPE_SEL_YUV420_8BIT_LEGACY 26
++#define MRV_MIPI_DATA_TYPE_SEL_YUV420_8BIT_CSPS 28
++#define MRV_MIPI_DATA_TYPE_SEL_YUV420_10BIT_CSPS 29
++#define MRV_MIPI_DATA_TYPE_SEL_YUV422_8BIT 30
++#define MRV_MIPI_DATA_TYPE_SEL_YUV422_10BIT 31
++#define MRV_MIPI_DATA_TYPE_SEL_RGB444 32
++#define MRV_MIPI_DATA_TYPE_SEL_RGB555 33
++#define MRV_MIPI_DATA_TYPE_SEL_RGB565 34
++#define MRV_MIPI_DATA_TYPE_SEL_RGB666 35
++#define MRV_MIPI_DATA_TYPE_SEL_RGB888 36
++#define MRV_MIPI_DATA_TYPE_SEL_RAW6 40
++#define MRV_MIPI_DATA_TYPE_SEL_RAW7 41
++#define MRV_MIPI_DATA_TYPE_SEL_RAW8 42
++#define MRV_MIPI_DATA_TYPE_SEL_RAW10 43
++#define MRV_MIPI_DATA_TYPE_SEL_RAW12 44
++#define MRV_MIPI_DATA_TYPE_SEL_USER1 48
++#define MRV_MIPI_DATA_TYPE_SEL_USER2 49
++#define MRV_MIPI_DATA_TYPE_SEL_USER3 50
++#define MRV_MIPI_DATA_TYPE_SEL_USER4 51
++
++
++#define MRV_MIPI_ADD_DATA_VC_1
++#define MRV_MIPI_ADD_DATA_VC_1_MASK 0x000000C0
++#define MRV_MIPI_ADD_DATA_VC_1_SHIFT 6
++#define MRV_MIPI_ADD_DATA_TYPE_1
++#define MRV_MIPI_ADD_DATA_TYPE_1_MASK 0x0000003F
++#define MRV_MIPI_ADD_DATA_TYPE_1_SHIFT 0
++
++
++#define MRV_MIPI_ADD_DATA_VC_2
++#define MRV_MIPI_ADD_DATA_VC_2_MASK 0x000000C0
++#define MRV_MIPI_ADD_DATA_VC_2_SHIFT 6
++#define MRV_MIPI_ADD_DATA_TYPE_2
++#define MRV_MIPI_ADD_DATA_TYPE_2_MASK 0x0000003F
++#define MRV_MIPI_ADD_DATA_TYPE_2_SHIFT 0
++
++
++#define MRV_MIPI_ADD_DATA_VC_3
++#define MRV_MIPI_ADD_DATA_VC_3_MASK 0x000000C0
++#define MRV_MIPI_ADD_DATA_VC_3_SHIFT 6
++#define MRV_MIPI_ADD_DATA_TYPE_3
++#define MRV_MIPI_ADD_DATA_TYPE_3_MASK 0x0000003F
++#define MRV_MIPI_ADD_DATA_TYPE_3_SHIFT 0
++
++
++#define MRV_MIPI_ADD_DATA_VC_4
++#define MRV_MIPI_ADD_DATA_VC_4_MASK 0x000000C0
++#define MRV_MIPI_ADD_DATA_VC_4_SHIFT 6
++#define MRV_MIPI_ADD_DATA_TYPE_4
++#define MRV_MIPI_ADD_DATA_TYPE_4_MASK 0x0000003F
++#define MRV_MIPI_ADD_DATA_TYPE_4_SHIFT 0
++
++#define MRV_MIPI_ADD_DATA_FIFO
++#define MRV_MIPI_ADD_DATA_FIFO_MASK 0xFFFFFFFF
++#define MRV_MIPI_ADD_DATA_FIFO_SHIFT 0
++
++#define MRV_MIPI_ADD_DATA_FILL_LEVEL
++#define MRV_MIPI_ADD_DATA_FILL_LEVEL_MASK 0x00001FFC
++#define MRV_MIPI_ADD_DATA_FILL_LEVEL_SHIFT 0
++#define MRV_MIPI_ADD_DATA_FILL_LEVEL_MAX 0x00001FFC
++
++#define MRV_AFM_AFM_EN
++#define MRV_AFM_AFM_EN_MASK 0x00000001
++#define MRV_AFM_AFM_EN_SHIFT 0
++
++#define MRV_AFM_A_H_L
++#define MRV_AFM_A_H_L_MASK 0x0FFF0000
++#define MRV_AFM_A_H_L_SHIFT 16
++#define MRV_AFM_A_H_L_MIN 5
++#define MRV_AFM_A_H_L_MAX (MRV_AFM_A_H_L_MASK >> MRV_AFM_A_H_L_SHIFT)
++#define MRV_AFM_A_V_T
++#define MRV_AFM_A_V_T_MASK 0x00000FFF
++#define MRV_AFM_A_V_T_SHIFT 0
++#define MRV_AFM_A_V_T_MIN 2
++#define MRV_AFM_A_V_T_MAX (MRV_AFM_A_V_T_MASK >> MRV_AFM_A_V_T_SHIFT)
++
++
++#define MRV_AFM_A_H_R
++#define MRV_AFM_A_H_R_MASK 0x0FFF0000
++#define MRV_AFM_A_H_R_SHIFT 16
++#define MRV_AFM_A_H_R_MIN 5
++#define MRV_AFM_A_H_R_MAX (MRV_AFM_A_H_R_MASK >> MRV_AFM_A_H_R_SHIFT)
++#define MRV_AFM_A_V_B
++#define MRV_AFM_A_V_B_MASK 0x00000FFF
++#define MRV_AFM_A_V_B_SHIFT 0
++#define MRV_AFM_A_V_B_MIN 2
++#define MRV_AFM_A_V_B_MAX (MRV_AFM_A_V_B_MASK >> MRV_AFM_A_V_B_SHIFT)
++
++
++#define MRV_AFM_B_H_L
++#define MRV_AFM_B_H_L_MASK 0x0FFF0000
++#define MRV_AFM_B_H_L_SHIFT 16
++#define MRV_AFM_B_H_L_MIN 5
++#define MRV_AFM_B_H_L_MAX (MRV_AFM_B_H_L_MASK >> MRV_AFM_B_H_L_SHIFT)
++#define MRV_AFM_B_V_T
++#define MRV_AFM_B_V_T_MASK 0x00000FFF
++#define MRV_AFM_B_V_T_SHIFT 0
++#define MRV_AFM_B_V_T_MIN 2
++#define MRV_AFM_B_V_T_MAX (MRV_AFM_B_V_T_MASK >> MRV_AFM_B_V_T_SHIFT)
++
++
++#define MRV_AFM_B_H_R
++#define MRV_AFM_B_H_R_MASK 0x0FFF0000
++#define MRV_AFM_B_H_R_SHIFT 16
++#define MRV_AFM_B_H_R_MIN 5
++#define MRV_AFM_B_H_R_MAX (MRV_AFM_B_H_R_MASK >> MRV_AFM_B_H_R_SHIFT)
++#define MRV_AFM_B_V_B
++#define MRV_AFM_B_V_B_MASK 0x00000FFF
++#define MRV_AFM_B_V_B_SHIFT 0
++#define MRV_AFM_B_V_B_MIN 2
++#define MRV_AFM_B_V_B_MAX (MRV_AFM_B_V_B_MASK >> MRV_AFM_B_V_B_SHIFT)
++
++
++#define MRV_AFM_C_H_L
++#define MRV_AFM_C_H_L_MASK 0x0FFF0000
++#define MRV_AFM_C_H_L_SHIFT 16
++#define MRV_AFM_C_H_L_MIN 5
++#define MRV_AFM_C_H_L_MAX (MRV_AFM_C_H_L_MASK >> MRV_AFM_C_H_L_SHIFT)
++#define MRV_AFM_C_V_T
++#define MRV_AFM_C_V_T_MASK 0x00000FFF
++#define MRV_AFM_C_V_T_SHIFT 0
++#define MRV_AFM_C_V_T_MIN 2
++#define MRV_AFM_C_V_T_MAX (MRV_AFM_C_V_T_MASK >> MRV_AFM_C_V_T_SHIFT)
++
++
++#define MRV_AFM_C_H_R
++#define MRV_AFM_C_H_R_MASK 0x0FFF0000
++#define MRV_AFM_C_H_R_SHIFT 16
++#define MRV_AFM_C_H_R_MIN 5
++#define MRV_AFM_C_H_R_MAX (MRV_AFM_C_H_R_MASK >> MRV_AFM_C_H_R_SHIFT)
++#define MRV_AFM_C_V_B
++#define MRV_AFM_C_V_B_MASK 0x00000FFF
++#define MRV_AFM_C_V_B_SHIFT 0
++#define MRV_AFM_C_V_B_MIN 2
++#define MRV_AFM_C_V_B_MAX (MRV_AFM_C_V_B_MASK >> MRV_AFM_C_V_B_SHIFT)
++
++#define MRV_AFM_AFM_THRES
++#define MRV_AFM_AFM_THRES_MASK 0x0000FFFF
++#define MRV_AFM_AFM_THRES_SHIFT 0
++
++#define MRV_AFM_LUM_VAR_SHIFT
++#define MRV_AFM_LUM_VAR_SHIFT_MASK 0x00070000
++#define MRV_AFM_LUM_VAR_SHIFT_SHIFT 16
++#define MRV_AFM_AFM_VAR_SHIFT
++#define MRV_AFM_AFM_VAR_SHIFT_MASK 0x00000007
++#define MRV_AFM_AFM_VAR_SHIFT_SHIFT 0
++
++#define MRV_AFM_AFM_SUM_A
++#define MRV_AFM_AFM_SUM_A_MASK 0xFFFFFFFF
++#define MRV_AFM_AFM_SUM_A_SHIFT 0
++
++#define MRV_AFM_AFM_SUM_B
++#define MRV_AFM_AFM_SUM_B_MASK 0xFFFFFFFF
++#define MRV_AFM_AFM_SUM_B_SHIFT 0
++
++#define MRV_AFM_AFM_SUM_C
++#define MRV_AFM_AFM_SUM_C_MASK 0xFFFFFFFF
++#define MRV_AFM_AFM_SUM_C_SHIFT 0
++
++#define MRV_AFM_AFM_LUM_A
++#define MRV_AFM_AFM_LUM_A_MASK 0x00FFFFFF
++#define MRV_AFM_AFM_LUM_A_SHIFT 0
++
++#define MRV_AFM_AFM_LUM_B
++#define MRV_AFM_AFM_LUM_B_MASK 0x00FFFFFF
++#define MRV_AFM_AFM_LUM_B_SHIFT 0
++
++#define MRV_AFM_AFM_LUM_C
++#define MRV_AFM_AFM_LUM_C_MASK 0x00FFFFFF
++#define MRV_AFM_AFM_LUM_C_SHIFT 0
++
++
++#define MRV_BP_COR_TYPE
++#define MRV_BP_COR_TYPE_MASK 0x00000010
++#define MRV_BP_COR_TYPE_SHIFT 4
++#define MRV_BP_COR_TYPE_TABLE 0
++#define MRV_BP_COR_TYPE_DIRECT 1
++#define MRV_BP_REP_APPR
++#define MRV_BP_REP_APPR_MASK 0x00000008
++#define MRV_BP_REP_APPR_SHIFT 3
++#define MRV_BP_REP_APPR_NEAREST 0
++#define MRV_BP_REP_APPR_INTERPOL 1
++#define MRV_BP_DEAD_COR_EN
++#define MRV_BP_DEAD_COR_EN_MASK 0x00000004
++#define MRV_BP_DEAD_COR_EN_SHIFT 2
++#define MRV_BP_HOT_COR_EN
++#define MRV_BP_HOT_COR_EN_MASK 0x00000002
++#define MRV_BP_HOT_COR_EN_SHIFT 1
++#define MRV_BP_BP_DET_EN
++#define MRV_BP_BP_DET_EN_MASK 0x00000001
++#define MRV_BP_BP_DET_EN_SHIFT 0
++
++
++
++#define MRV_BP_HOT_THRES
++#define MRV_BP_HOT_THRES_MASK 0x0FFF0000
++#define MRV_BP_HOT_THRES_SHIFT 16
++#define MRV_BP_DEAD_THRES
++#define MRV_BP_DEAD_THRES_MASK 0x00000FFF
++#define MRV_BP_DEAD_THRES_SHIFT 0
++
++
++
++#define MRV_BP_DEV_HOT_THRES
++#define MRV_BP_DEV_HOT_THRES_MASK 0x0FFF0000
++#define MRV_BP_DEV_HOT_THRES_SHIFT 16
++#define MRV_BP_DEV_DEAD_THRES
++#define MRV_BP_DEV_DEAD_THRES_MASK 0x00000FFF
++#define MRV_BP_DEV_DEAD_THRES_SHIFT 0
++
++
++#define MRV_BP_BP_NUMBER
++
++#define MRV_BP_BP_NUMBER_MASK 0x00000FFF
++#define MRV_BP_BP_NUMBER_SHIFT 0
++
++#define MRV_BP_BP_TABLE_ADDR
++#define MRV_BP_BP_TABLE_ADDR_MASK 0x000007FF
++
++#define MRV_BP_BP_TABLE_ADDR_SHIFT 0
++#define MRV_BP_BP_TABLE_ADDR_MAX MRV_BP_BP_TABLE_ADDR_MASK
++
++
++#define MRV_BP_PIX_TYPE
++#define MRV_BP_PIX_TYPE_MASK 0x80000000
++#define MRV_BP_PIX_TYPE_SHIFT 31
++#define MRV_BP_PIX_TYPE_DEAD 0u
++#define MRV_BP_PIX_TYPE_HOT 1u
++#define MRV_BP_V_ADDR
++
++#define MRV_BP_V_ADDR_MASK 0x0FFF0000
++
++#define MRV_BP_V_ADDR_SHIFT 16
++#define MRV_BP_H_ADDR
++#define MRV_BP_H_ADDR_MASK 0x00000FFF
++#define MRV_BP_H_ADDR_SHIFT 0
++
++
++#define MRV_BP_BP_NEW_NUMBER
++#define MRV_BP_BP_NEW_NUMBER_MASK 0x0000000F
++#define MRV_BP_BP_NEW_NUMBER_SHIFT 0
++
++
++#define MRV_BP_NEW_VALUE
++
++#define MRV_BP_NEW_VALUE_MASK 0xF8000000
++#define MRV_BP_NEW_VALUE_SHIFT 27
++#define MRV_BP_NEW_V_ADDR
++
++#define MRV_BP_NEW_V_ADDR_MASK 0x07FF0000
++#define MRV_BP_NEW_V_ADDR_SHIFT 16
++#define MRV_BP_NEW_H_ADDR
++#define MRV_BP_NEW_H_ADDR_MASK 0x00000FFF
++#define MRV_BP_NEW_H_ADDR_SHIFT 0
++
++
++
++#define MRV_LSC_LSC_EN
++#define MRV_LSC_LSC_EN_MASK 0x00000001
++#define MRV_LSC_LSC_EN_SHIFT 0
++
++#define MRV_LSC_R_RAM_ADDR
++#define MRV_LSC_R_RAM_ADDR_MASK 0x000000FF
++#define MRV_LSC_R_RAM_ADDR_SHIFT 0
++#define MRV_LSC_R_RAM_ADDR_MIN 0x00000000
++#define MRV_LSC_R_RAM_ADDR_MAX 0x00000098
++
++#define MRV_LSC_G_RAM_ADDR
++#define MRV_LSC_G_RAM_ADDR_MASK 0x000000FF
++#define MRV_LSC_G_RAM_ADDR_SHIFT 0
++#define MRV_LSC_G_RAM_ADDR_MIN 0x00000000
++#define MRV_LSC_G_RAM_ADDR_MAX 0x00000098
++
++#define MRV_LSC_B_RAM_ADDR
++#define MRV_LSC_B_RAM_ADDR_MASK 0x000000FF
++#define MRV_LSC_B_RAM_ADDR_SHIFT 0
++#define MRV_LSC_B_RAM_ADDR_MIN 0x00000000
++#define MRV_LSC_B_RAM_ADDR_MAX 0x00000098
++
++#define MRV_LSC_R_SAMPLE_1
++#define MRV_LSC_R_SAMPLE_1_MASK 0x00FFF000
++#define MRV_LSC_R_SAMPLE_1_SHIFT 12
++#define MRV_LSC_R_SAMPLE_0
++#define MRV_LSC_R_SAMPLE_0_MASK 0x00000FFF
++#define MRV_LSC_R_SAMPLE_0_SHIFT 0
++
++
++#define MRV_LSC_G_SAMPLE_1
++#define MRV_LSC_G_SAMPLE_1_MASK 0x00FFF000
++#define MRV_LSC_G_SAMPLE_1_SHIFT 12
++#define MRV_LSC_G_SAMPLE_0
++#define MRV_LSC_G_SAMPLE_0_MASK 0x00000FFF
++#define MRV_LSC_G_SAMPLE_0_SHIFT 0
++
++
++#define MRV_LSC_B_SAMPLE_1
++#define MRV_LSC_B_SAMPLE_1_MASK 0x00FFF000
++#define MRV_LSC_B_SAMPLE_1_SHIFT 12
++#define MRV_LSC_B_SAMPLE_0
++#define MRV_LSC_B_SAMPLE_0_MASK 0x00000FFF
++#define MRV_LSC_B_SAMPLE_0_SHIFT 0
++
++#define MRV_LSC_XGRAD_1
++#define MRV_LSC_XGRAD_1_MASK 0x0FFF0000
++#define MRV_LSC_XGRAD_1_SHIFT 16
++#define MRV_LSC_XGRAD_0
++#define MRV_LSC_XGRAD_0_MASK 0x00000FFF
++#define MRV_LSC_XGRAD_0_SHIFT 0
++
++#define MRV_LSC_XGRAD_3
++#define MRV_LSC_XGRAD_3_MASK 0x0FFF0000
++#define MRV_LSC_XGRAD_3_SHIFT 16
++#define MRV_LSC_XGRAD_2
++#define MRV_LSC_XGRAD_2_MASK 0x00000FFF
++#define MRV_LSC_XGRAD_2_SHIFT 0
++
++#define MRV_LSC_XGRAD_5
++#define MRV_LSC_XGRAD_5_MASK 0x0FFF0000
++#define MRV_LSC_XGRAD_5_SHIFT 16
++
++#define MRV_LSC_XGRAD_4
++#define MRV_LSC_XGRAD_4_MASK 0x00000FFF
++#define MRV_LSC_XGRAD_4_SHIFT 0
++
++
++#define MRV_LSC_XGRAD_7
++#define MRV_LSC_XGRAD_7_MASK 0x0FFF0000
++#define MRV_LSC_XGRAD_7_SHIFT 16
++
++#define MRV_LSC_XGRAD_6
++#define MRV_LSC_XGRAD_6_MASK 0x00000FFF
++#define MRV_LSC_XGRAD_6_SHIFT 0
++
++
++#define MRV_LSC_YGRAD_1
++#define MRV_LSC_YGRAD_1_MASK 0x0FFF0000
++#define MRV_LSC_YGRAD_1_SHIFT 16
++#define MRV_LSC_YGRAD_0
++#define MRV_LSC_YGRAD_0_MASK 0x00000FFF
++#define MRV_LSC_YGRAD_0_SHIFT 0
++
++
++#define MRV_LSC_YGRAD_3
++#define MRV_LSC_YGRAD_3_MASK 0x0FFF0000
++#define MRV_LSC_YGRAD_3_SHIFT 16
++
++#define MRV_LSC_YGRAD_2
++#define MRV_LSC_YGRAD_2_MASK 0x00000FFF
++#define MRV_LSC_YGRAD_2_SHIFT 0
++
++
++#define MRV_LSC_YGRAD_5
++#define MRV_LSC_YGRAD_5_MASK 0x0FFF0000
++#define MRV_LSC_YGRAD_5_SHIFT 16
++
++#define MRV_LSC_YGRAD_4
++#define MRV_LSC_YGRAD_4_MASK 0x00000FFF
++#define MRV_LSC_YGRAD_4_SHIFT 0
++
++
++#define MRV_LSC_YGRAD_7
++#define MRV_LSC_YGRAD_7_MASK 0x0FFF0000
++#define MRV_LSC_YGRAD_7_SHIFT 16
++
++#define MRV_LSC_YGRAD_6
++#define MRV_LSC_YGRAD_6_MASK 0x00000FFF
++#define MRV_LSC_YGRAD_6_SHIFT 0
++
++
++#define MRV_LSC_X_SECT_SIZE_1
++#define MRV_LSC_X_SECT_SIZE_1_MASK 0x03FF0000
++#define MRV_LSC_X_SECT_SIZE_1_SHIFT 16
++
++#define MRV_LSC_X_SECT_SIZE_0
++#define MRV_LSC_X_SECT_SIZE_0_MASK 0x000003FF
++#define MRV_LSC_X_SECT_SIZE_0_SHIFT 0
++
++
++#define MRV_LSC_X_SECT_SIZE_3
++#define MRV_LSC_X_SECT_SIZE_3_MASK 0x03FF0000
++#define MRV_LSC_X_SECT_SIZE_3_SHIFT 16
++
++#define MRV_LSC_X_SECT_SIZE_2
++#define MRV_LSC_X_SECT_SIZE_2_MASK 0x000003FF
++#define MRV_LSC_X_SECT_SIZE_2_SHIFT 0
++
++
++#define MRV_LSC_X_SECT_SIZE_5
++#define MRV_LSC_X_SECT_SIZE_5_MASK 0x03FF0000
++#define MRV_LSC_X_SECT_SIZE_5_SHIFT 16
++
++#define MRV_LSC_X_SECT_SIZE_4
++#define MRV_LSC_X_SECT_SIZE_4_MASK 0x000003FF
++#define MRV_LSC_X_SECT_SIZE_4_SHIFT 0
++
++
++#define MRV_LSC_X_SECT_SIZE_7
++#define MRV_LSC_X_SECT_SIZE_7_MASK 0x03FF0000
++#define MRV_LSC_X_SECT_SIZE_7_SHIFT 16
++
++#define MRV_LSC_X_SECT_SIZE_6
++#define MRV_LSC_X_SECT_SIZE_6_MASK 0x000003FF
++#define MRV_LSC_X_SECT_SIZE_6_SHIFT 0
++
++
++#define MRV_LSC_Y_SECT_SIZE_1
++#define MRV_LSC_Y_SECT_SIZE_1_MASK 0x03FF0000
++#define MRV_LSC_Y_SECT_SIZE_1_SHIFT 16
++#define MRV_LSC_Y_SECT_SIZE_0
++#define MRV_LSC_Y_SECT_SIZE_0_MASK 0x000003FF
++#define MRV_LSC_Y_SECT_SIZE_0_SHIFT 0
++
++
++#define MRV_LSC_Y_SECT_SIZE_3
++#define MRV_LSC_Y_SECT_SIZE_3_MASK 0x03FF0000
++#define MRV_LSC_Y_SECT_SIZE_3_SHIFT 16
++#define MRV_LSC_Y_SECT_SIZE_2
++#define MRV_LSC_Y_SECT_SIZE_2_MASK 0x000003FF
++#define MRV_LSC_Y_SECT_SIZE_2_SHIFT 0
++
++
++#define MRV_LSC_Y_SECT_SIZE_5
++#define MRV_LSC_Y_SECT_SIZE_5_MASK 0x03FF0000
++#define MRV_LSC_Y_SECT_SIZE_5_SHIFT 16
++#define MRV_LSC_Y_SECT_SIZE_4
++#define MRV_LSC_Y_SECT_SIZE_4_MASK 0x000003FF
++#define MRV_LSC_Y_SECT_SIZE_4_SHIFT 0
++
++
++#define MRV_LSC_Y_SECT_SIZE_7
++#define MRV_LSC_Y_SECT_SIZE_7_MASK 0x03FF0000
++#define MRV_LSC_Y_SECT_SIZE_7_SHIFT 16
++#define MRV_LSC_Y_SECT_SIZE_6
++#define MRV_LSC_Y_SECT_SIZE_6_MASK 0x000003FF
++#define MRV_LSC_Y_SECT_SIZE_6_SHIFT 0
++
++
++#define MRV_IS_IS_EN
++#define MRV_IS_IS_EN_MASK 0x00000001
++#define MRV_IS_IS_EN_SHIFT 0
++
++
++#define MRV_IS_IS_RECENTER
++#define MRV_IS_IS_RECENTER_MASK 0x00000007
++#define MRV_IS_IS_RECENTER_SHIFT 0
++#define MRV_IS_IS_RECENTER_MAX \
++ (MRV_IS_IS_RECENTER_MASK >> MRV_IS_IS_RECENTER_SHIFT)
++
++
++#define MRV_IS_IS_H_OFFS
++#define MRV_IS_IS_H_OFFS_MASK 0x00001FFF
++#define MRV_IS_IS_H_OFFS_SHIFT 0
++
++
++#define MRV_IS_IS_V_OFFS
++#define MRV_IS_IS_V_OFFS_MASK 0x00000FFF
++#define MRV_IS_IS_V_OFFS_SHIFT 0
++
++#define MRV_IS_IS_H_SIZE
++#define MRV_IS_IS_H_SIZE_MASK 0x00003FFF
++#define MRV_IS_IS_H_SIZE_SHIFT 0
++
++#define MRV_IS_IS_V_SIZE
++#define MRV_IS_IS_V_SIZE_MASK 0x00000FFF
++#define MRV_IS_IS_V_SIZE_SHIFT 0
++
++#define MRV_IS_IS_MAX_DX
++#define MRV_IS_IS_MAX_DX_MASK 0x00000FFF
++#define MRV_IS_IS_MAX_DX_SHIFT 0
++#define MRV_IS_IS_MAX_DX_MAX (MRV_IS_IS_MAX_DX_MASK >> MRV_IS_IS_MAX_DX_SHIFT)
++
++
++
++#define MRV_IS_IS_MAX_DY
++#define MRV_IS_IS_MAX_DY_MASK 0x00000FFF
++#define MRV_IS_IS_MAX_DY_SHIFT 0
++#define MRV_IS_IS_MAX_DY_MAX (MRV_IS_IS_MAX_DY_MASK >> MRV_IS_IS_MAX_DY_SHIFT)
++#define MRV_IS_DY
++#define MRV_IS_DY_MASK 0x0FFF0000
++#define MRV_IS_DY_SHIFT 16
++#define MRV_IS_DY_MAX 0x000007FF
++#define MRV_IS_DY_MIN (~MRV_IS_DY_MAX)
++#define MRV_IS_DX
++#define MRV_IS_DX_MASK 0x00000FFF
++#define MRV_IS_DX_SHIFT 0
++#define MRV_IS_DX_MAX 0x000007FF
++#define MRV_IS_DX_MIN (~MRV_IS_DX_MAX)
++
++
++#define MRV_IS_IS_H_OFFS_SHD
++#define MRV_IS_IS_H_OFFS_SHD_MASK 0x00001FFF
++#define MRV_IS_IS_H_OFFS_SHD_SHIFT 0
++
++
++#define MRV_IS_IS_V_OFFS_SHD
++#define MRV_IS_IS_V_OFFS_SHD_MASK 0x00000FFF
++#define MRV_IS_IS_V_OFFS_SHD_SHIFT 0
++
++
++#define MRV_IS_ISP_H_SIZE_SHD
++#define MRV_IS_ISP_H_SIZE_SHD_MASK 0x00001FFF
++#define MRV_IS_ISP_H_SIZE_SHD_SHIFT 0
++
++
++#define MRV_IS_ISP_V_SIZE_SHD
++#define MRV_IS_ISP_V_SIZE_SHD_MASK 0x00000FFF
++#define MRV_IS_ISP_V_SIZE_SHD_SHIFT 0
++
++
++#define MRV_HIST_HIST_PDIV
++#define MRV_HIST_HIST_PDIV_MASK 0x000007F8
++#define MRV_HIST_HIST_PDIV_SHIFT 3
++#define MRV_HIST_HIST_PDIV_MIN 0x00000003
++#define MRV_HIST_HIST_PDIV_MAX 0x000000FF
++#define MRV_HIST_HIST_MODE
++#define MRV_HIST_HIST_MODE_MASK 0x00000007
++#define MRV_HIST_HIST_MODE_SHIFT 0
++#define MRV_HIST_HIST_MODE_MAX 5
++#define MRV_HIST_HIST_MODE_LUM 5
++#define MRV_HIST_HIST_MODE_B 4
++#define MRV_HIST_HIST_MODE_G 3
++#define MRV_HIST_HIST_MODE_R 2
++#define MRV_HIST_HIST_MODE_RGB 1
++#define MRV_HIST_HIST_MODE_NONE 0
++
++#define MRV_HIST_HIST_H_OFFS
++#define MRV_HIST_HIST_H_OFFS_MASK 0x00000FFF
++#define MRV_HIST_HIST_H_OFFS_SHIFT 0
++#define MRV_HIST_HIST_H_OFFS_MAX \
++ (MRV_HIST_HIST_H_OFFS_MASK >> MRV_HIST_HIST_H_OFFS_SHIFT)
++
++#define MRV_HIST_HIST_V_OFFS
++#define MRV_HIST_HIST_V_OFFS_MASK 0x00000FFF
++#define MRV_HIST_HIST_V_OFFS_SHIFT 0
++#define MRV_HIST_HIST_V_OFFS_MAX \
++ (MRV_HIST_HIST_V_OFFS_MASK >> MRV_HIST_HIST_V_OFFS_SHIFT)
++
++#define MRV_HIST_HIST_H_SIZE
++#define MRV_HIST_HIST_H_SIZE_MASK 0x00000FFF
++#define MRV_HIST_HIST_H_SIZE_SHIFT 0
++#define MRV_HIST_HIST_H_SIZE_MAX \
++ (MRV_HIST_HIST_H_SIZE_MASK >> MRV_HIST_HIST_H_SIZE_SHIFT)
++
++#define MRV_HIST_HIST_V_SIZE
++#define MRV_HIST_HIST_V_SIZE_MASK 0x00000FFF
++#define MRV_HIST_HIST_V_SIZE_SHIFT 0
++#define MRV_HIST_HIST_V_SIZE_MAX \
++ (MRV_HIST_HIST_V_SIZE_MASK >> MRV_HIST_HIST_V_SIZE_SHIFT)
++
++
++#define MRV_HIST_HIST_BIN_N
++#define MRV_HIST_HIST_BIN_N_MASK 0x000000FF
++#define MRV_HIST_HIST_BIN_N_SHIFT 0
++#define MRV_HIST_HIST_BIN_N_MAX \
++ (MRV_HIST_HIST_BIN_N_MASK >> MRV_HIST_HIST_BIN_N_SHIFT)
++
++
++
++#define MRV_FILT_STAGE1_SELECT
++#define MRV_FILT_STAGE1_SELECT_MASK 0x00000F00
++#define MRV_FILT_STAGE1_SELECT_SHIFT 8
++#define MRV_FILT_STAGE1_SELECT_MAX_BLUR 0
++#define MRV_FILT_STAGE1_SELECT_DEFAULT 4
++#define MRV_FILT_STAGE1_SELECT_MIN_BLUR 7
++#define MRV_FILT_STAGE1_SELECT_BYPASS 8
++#define MRV_FILT_FILT_CHR_H_MODE
++#define MRV_FILT_FILT_CHR_H_MODE_MASK 0x000000C0
++#define MRV_FILT_FILT_CHR_H_MODE_SHIFT 6
++#define MRV_FILT_FILT_CHR_H_MODE_BYPASS 0
++#define MRV_FILT_FILT_CHR_H_MODE_STATIC 1
++#define MRV_FILT_FILT_CHR_H_MODE_DYN_1 2
++#define MRV_FILT_FILT_CHR_H_MODE_DYN_2 3
++#define MRV_FILT_FILT_CHR_V_MODE
++#define MRV_FILT_FILT_CHR_V_MODE_MASK 0x00000030
++#define MRV_FILT_FILT_CHR_V_MODE_SHIFT 4
++#define MRV_FILT_FILT_CHR_V_MODE_BYPASS 0
++#define MRV_FILT_FILT_CHR_V_MODE_STATIC8 1
++#define MRV_FILT_FILT_CHR_V_MODE_STATIC10 2
++#define MRV_FILT_FILT_CHR_V_MODE_STATIC12 3
++
++#define MRV_FILT_FILT_MODE
++#define MRV_FILT_FILT_MODE_MASK 0x00000002
++#define MRV_FILT_FILT_MODE_SHIFT 1
++#define MRV_FILT_FILT_MODE_STATIC 0
++#define MRV_FILT_FILT_MODE_DYNAMIC 1
++
++#define MRV_FILT_FILT_ENABLE
++#define MRV_FILT_FILT_ENABLE_MASK 0x00000001
++#define MRV_FILT_FILT_ENABLE_SHIFT 0
++
++
++#define MRV_FILT_FILT_THRESH_BL0
++#define MRV_FILT_FILT_THRESH_BL0_MASK 0x000003FF
++#define MRV_FILT_FILT_THRESH_BL0_SHIFT 0
++
++
++#define MRV_FILT_FILT_THRESH_BL1
++#define MRV_FILT_FILT_THRESH_BL1_MASK 0x000003FF
++#define MRV_FILT_FILT_THRESH_BL1_SHIFT 0
++
++
++#define MRV_FILT_FILT_THRESH_SH0
++#define MRV_FILT_FILT_THRESH_SH0_MASK 0x000003FF
++#define MRV_FILT_FILT_THRESH_SH0_SHIFT 0
++
++
++#define MRV_FILT_FILT_THRESH_SH1
++#define MRV_FILT_FILT_THRESH_SH1_MASK 0x000003FF
++#define MRV_FILT_FILT_THRESH_SH1_SHIFT 0
++
++
++#define MRV_FILT_LUM_WEIGHT_GAIN
++#define MRV_FILT_LUM_WEIGHT_GAIN_MASK 0x00070000
++#define MRV_FILT_LUM_WEIGHT_GAIN_SHIFT 16
++#define MRV_FILT_LUM_WEIGHT_KINK
++#define MRV_FILT_LUM_WEIGHT_KINK_MASK 0x0000FF00
++#define MRV_FILT_LUM_WEIGHT_KINK_SHIFT 8
++#define MRV_FILT_LUM_WEIGHT_MIN
++#define MRV_FILT_LUM_WEIGHT_MIN_MASK 0x000000FF
++#define MRV_FILT_LUM_WEIGHT_MIN_SHIFT 0
++
++
++#define MRV_FILT_FILT_FAC_SH1
++#define MRV_FILT_FILT_FAC_SH1_MASK 0x0000003F
++#define MRV_FILT_FILT_FAC_SH1_SHIFT 0
++
++
++#define MRV_FILT_FILT_FAC_SH0
++#define MRV_FILT_FILT_FAC_SH0_MASK 0x0000003F
++#define MRV_FILT_FILT_FAC_SH0_SHIFT 0
++
++
++#define MRV_FILT_FILT_FAC_MID
++#define MRV_FILT_FILT_FAC_MID_MASK 0x0000003F
++#define MRV_FILT_FILT_FAC_MID_SHIFT 0
++
++
++#define MRV_FILT_FILT_FAC_BL0
++#define MRV_FILT_FILT_FAC_BL0_MASK 0x0000003F
++#define MRV_FILT_FILT_FAC_BL0_SHIFT 0
++
++
++#define MRV_FILT_FILT_FAC_BL1
++#define MRV_FILT_FILT_FAC_BL1_MASK 0x0000003F
++#define MRV_FILT_FILT_FAC_BL1_SHIFT 0
++
++
++
++
++ #define MRV_AE_EXP_MEAS_MODE
++ #define MRV_AE_EXP_MEAS_MODE_MASK 0x80000000
++ #define MRV_AE_EXP_MEAS_MODE_SHIFT 31
++
++#define MRV_AE_AUTOSTOP
++#define MRV_AE_AUTOSTOP_MASK 0x00000002
++#define MRV_AE_AUTOSTOP_SHIFT 1
++
++#define MRV_AE_EXP_START
++#define MRV_AE_EXP_START_MASK 0x00000001
++#define MRV_AE_EXP_START_SHIFT 0
++
++
++
++
++
++#define MRV_AE_ISP_EXP_H_OFFSET
++#define MRV_AE_ISP_EXP_H_OFFSET_MASK 0x00000FFF
++#define MRV_AE_ISP_EXP_H_OFFSET_SHIFT 0
++#define MRV_AE_ISP_EXP_H_OFFSET_MIN 0x00000000
++#define MRV_AE_ISP_EXP_H_OFFSET_MAX 0x00000F50
++
++
++
++#define MRV_AE_ISP_EXP_V_OFFSET
++#define MRV_AE_ISP_EXP_V_OFFSET_MASK 0x00000FFF
++#define MRV_AE_ISP_EXP_V_OFFSET_SHIFT 0
++#define MRV_AE_ISP_EXP_V_OFFSET_MIN 0x00000000
++#define MRV_AE_ISP_EXP_V_OFFSET_MAX 0x00000B74
++
++
++#define MRV_AE_ISP_EXP_H_SIZE
++#define MRV_AE_ISP_EXP_H_SIZE_MASK 0x000003FF
++#define MRV_AE_ISP_EXP_H_SIZE_SHIFT 0
++#define MRV_AE_ISP_EXP_H_SIZE_MIN 0x00000023
++#define MRV_AE_ISP_EXP_H_SIZE_MAX 0x00000333
++
++
++#define MRV_AE_ISP_EXP_V_SIZE
++#define MRV_AE_ISP_EXP_V_SIZE_MASK 0x000003FE
++#define MRV_AE_ISP_EXP_V_SIZE_SHIFT 0
++#define MRV_AE_ISP_EXP_V_SIZE_VALID_MASK \
++ (MRV_AE_ISP_EXP_V_SIZE_MASK & ~0x00000001)
++#define MRV_AE_ISP_EXP_V_SIZE_MIN 0x0000001C
++#define MRV_AE_ISP_EXP_V_SIZE_MAX 0x00000266
++
++#define MRV_AE_ISP_EXP_MEAN_ARR_SIZE1 5
++#define MRV_AE_ISP_EXP_MEAN_ARR_SIZE2 5
++#define MRV_AE_ISP_EXP_MEAN_ARR_OFS1 1
++#define MRV_AE_ISP_EXP_MEAN_ARR_OFS2 MRV_AE_ISP_EXP_MEAN_ARR_SIZE1
++#define MRV_AE_ISP_EXP_MEAN
++#define MRV_AE_ISP_EXP_MEAN_MASK 0x000000FF
++#define MRV_AE_ISP_EXP_MEAN_SHIFT 0
++
++
++#define MRV_AE_ISP_EXP_MEAN_00
++#define MRV_AE_ISP_EXP_MEAN_00_MASK 0x000000FF
++#define MRV_AE_ISP_EXP_MEAN_00_SHIFT 0
++
++#define MRV_AE_ISP_EXP_MEAN_10
++#define MRV_AE_ISP_EXP_MEAN_10_MASK 0x000000FF
++#define MRV_AE_ISP_EXP_MEAN_10_SHIFT 0
++
++
++#define MRV_AE_ISP_EXP_MEAN_20
++#define MRV_AE_ISP_EXP_MEAN_20_MASK 0x000000FF
++#define MRV_AE_ISP_EXP_MEAN_20_SHIFT 0
++
++
++#define MRV_AE_ISP_EXP_MEAN_30
++#define MRV_AE_ISP_EXP_MEAN_30_MASK 0x000000FF
++#define MRV_AE_ISP_EXP_MEAN_30_SHIFT 0
++
++
++#define MRV_AE_ISP_EXP_MEAN_40
++#define MRV_AE_ISP_EXP_MEAN_40_MASK 0x000000FF
++#define MRV_AE_ISP_EXP_MEAN_40_SHIFT 0
++
++
++#define MRV_AE_ISP_EXP_MEAN_01
++#define MRV_AE_ISP_EXP_MEAN_01_MASK 0x000000FF
++#define MRV_AE_ISP_EXP_MEAN_01_SHIFT 0
++
++
++#define MRV_AE_ISP_EXP_MEAN_11
++#define MRV_AE_ISP_EXP_MEAN_11_MASK 0x000000FF
++#define MRV_AE_ISP_EXP_MEAN_11_SHIFT 0
++
++
++#define MRV_AE_ISP_EXP_MEAN_21
++#define MRV_AE_ISP_EXP_MEAN_21_MASK 0x000000FF
++#define MRV_AE_ISP_EXP_MEAN_21_SHIFT 0
++
++
++#define MRV_AE_ISP_EXP_MEAN_31
++#define MRV_AE_ISP_EXP_MEAN_31_MASK 0x000000FF
++#define MRV_AE_ISP_EXP_MEAN_31_SHIFT 0
++
++
++#define MRV_AE_ISP_EXP_MEAN_41
++#define MRV_AE_ISP_EXP_MEAN_41_MASK 0x000000FF
++#define MRV_AE_ISP_EXP_MEAN_41_SHIFT 0
++
++
++#define MRV_AE_ISP_EXP_MEAN_02
++#define MRV_AE_ISP_EXP_MEAN_02_MASK 0x000000FF
++#define MRV_AE_ISP_EXP_MEAN_02_SHIFT 0
++
++
++
++#define MRV_AE_ISP_EXP_MEAN_12
++#define MRV_AE_ISP_EXP_MEAN_12_MASK 0x000000FF
++#define MRV_AE_ISP_EXP_MEAN_12_SHIFT 0
++
++
++#define MRV_AE_ISP_EXP_MEAN_22
++#define MRV_AE_ISP_EXP_MEAN_22_MASK 0x000000FF
++#define MRV_AE_ISP_EXP_MEAN_22_SHIFT 0
++
++
++#define MRV_AE_ISP_EXP_MEAN_32
++#define MRV_AE_ISP_EXP_MEAN_32_MASK 0x000000FF
++#define MRV_AE_ISP_EXP_MEAN_32_SHIFT 0
++
++
++#define MRV_AE_ISP_EXP_MEAN_42
++#define MRV_AE_ISP_EXP_MEAN_42_MASK 0x000000FF
++#define MRV_AE_ISP_EXP_MEAN_42_SHIFT 0
++
++
++#define MRV_AE_ISP_EXP_MEAN_03
++#define MRV_AE_ISP_EXP_MEAN_03_MASK 0x000000FF
++#define MRV_AE_ISP_EXP_MEAN_03_SHIFT 0
++
++
++#define MRV_AE_ISP_EXP_MEAN_13
++#define MRV_AE_ISP_EXP_MEAN_13_MASK 0x000000FF
++#define MRV_AE_ISP_EXP_MEAN_13_SHIFT 0
++
++
++#define MRV_AE_ISP_EXP_MEAN_23
++#define MRV_AE_ISP_EXP_MEAN_23_MASK 0x000000FF
++#define MRV_AE_ISP_EXP_MEAN_23_SHIFT 0
++
++
++#define MRV_AE_ISP_EXP_MEAN_33
++#define MRV_AE_ISP_EXP_MEAN_33_MASK 0x000000FF
++#define MRV_AE_ISP_EXP_MEAN_33_SHIFT 0
++
++
++#define MRV_AE_ISP_EXP_MEAN_43
++#define MRV_AE_ISP_EXP_MEAN_43_MASK 0x000000FF
++#define MRV_AE_ISP_EXP_MEAN_43_SHIFT 0
++
++
++#define MRV_AE_ISP_EXP_MEAN_04
++#define MRV_AE_ISP_EXP_MEAN_04_MASK 0x000000FF
++#define MRV_AE_ISP_EXP_MEAN_04_SHIFT 0
++
++
++#define MRV_AE_ISP_EXP_MEAN_14
++#define MRV_AE_ISP_EXP_MEAN_14_MASK 0x000000FF
++#define MRV_AE_ISP_EXP_MEAN_14_SHIFT 0
++
++
++#define MRV_AE_ISP_EXP_MEAN_24
++#define MRV_AE_ISP_EXP_MEAN_24_MASK 0x000000FF
++#define MRV_AE_ISP_EXP_MEAN_24_SHIFT 0
++
++
++#define MRV_AE_ISP_EXP_MEAN_34
++#define MRV_AE_ISP_EXP_MEAN_34_MASK 0x000000FF
++#define MRV_AE_ISP_EXP_MEAN_34_SHIFT 0
++
++
++#define MRV_AE_ISP_EXP_MEAN_44
++#define MRV_AE_ISP_EXP_MEAN_44_MASK 0x000000FF
++#define MRV_AE_ISP_EXP_MEAN_44_SHIFT 0
++
++
++
++#define MRV_BLS_WINDOW_ENABLE
++#define MRV_BLS_WINDOW_ENABLE_MASK 0x0000000C
++#define MRV_BLS_WINDOW_ENABLE_SHIFT 2
++#define MRV_BLS_WINDOW_ENABLE_NONE 0
++#define MRV_BLS_WINDOW_ENABLE_WND1 1
++#define MRV_BLS_WINDOW_ENABLE_WND2 2
++#define MRV_BLS_WINDOW_ENABLE_BOTH 3
++
++#define MRV_BLS_BLS_MODE
++#define MRV_BLS_BLS_MODE_MASK 0x00000002
++#define MRV_BLS_BLS_MODE_SHIFT 1
++#define MRV_BLS_BLS_MODE_MEAS 1
++#define MRV_BLS_BLS_MODE_FIX 0
++
++#define MRV_BLS_BLS_ENABLE
++#define MRV_BLS_BLS_ENABLE_MASK 0x00000001
++#define MRV_BLS_BLS_ENABLE_SHIFT 0
++
++
++#define MRV_BLS_BLS_SAMPLES
++#define MRV_BLS_BLS_SAMPLES_MASK 0x0000001F
++#define MRV_BLS_BLS_SAMPLES_SHIFT 0
++
++#define MRV_BLS_BLS_SAMPLES_MAX (0x00000014)
++
++
++#define MRV_BLS_BLS_H1_START
++#define MRV_BLS_BLS_H1_START_MASK 0x00000FFF
++#define MRV_BLS_BLS_H1_START_SHIFT 0
++#define MRV_BLS_BLS_H1_START_MAX \
++ (MRV_BLS_BLS_H1_START_MASK >> MRV_BLS_BLS_H1_START_SHIFT)
++
++
++#define MRV_BLS_BLS_H1_STOP
++#define MRV_BLS_BLS_H1_STOP_MASK 0x00001FFF
++#define MRV_BLS_BLS_H1_STOP_SHIFT 0
++#define MRV_BLS_BLS_H1_STOP_MAX \
++ (MRV_BLS_BLS_H1_STOP_MASK >> MRV_BLS_BLS_H1_STOP_SHIFT)
++
++
++#define MRV_BLS_BLS_V1_START
++#define MRV_BLS_BLS_V1_START_MASK 0x00001FFF
++#define MRV_BLS_BLS_V1_START_SHIFT 0
++#define MRV_BLS_BLS_V1_START_MAX \
++ (MRV_BLS_BLS_V1_START_MASK >> MRV_BLS_BLS_V1_START_SHIFT)
++
++#define MRV_BLS_BLS_V1_STOP
++#define MRV_BLS_BLS_V1_STOP_MASK 0x00001FFF
++#define MRV_BLS_BLS_V1_STOP_SHIFT 0
++#define MRV_BLS_BLS_V1_STOP_MAX \
++ (MRV_BLS_BLS_V1_STOP_MASK >> MRV_BLS_BLS_V1_STOP_SHIFT)
++
++#define MRV_BLS_BLS_H2_START
++#define MRV_BLS_BLS_H2_START_MASK 0x00001FFF
++#define MRV_BLS_BLS_H2_START_SHIFT 0
++#define MRV_BLS_BLS_H2_START_MAX \
++ (MRV_BLS_BLS_H2_START_MASK >> MRV_BLS_BLS_H2_START_SHIFT)
++
++
++#define MRV_BLS_BLS_H2_STOP
++#define MRV_BLS_BLS_H2_STOP_MASK 0x00001FFF
++#define MRV_BLS_BLS_H2_STOP_SHIFT 0
++#define MRV_BLS_BLS_H2_STOP_MAX \
++ (MRV_BLS_BLS_H2_STOP_MASK >> MRV_BLS_BLS_H2_STOP_SHIFT)
++
++
++#define MRV_BLS_BLS_V2_START
++#define MRV_BLS_BLS_V2_START_MASK 0x00001FFF
++#define MRV_BLS_BLS_V2_START_SHIFT 0
++#define MRV_BLS_BLS_V2_START_MAX \
++ (MRV_BLS_BLS_V2_START_MASK >> MRV_BLS_BLS_V2_START_SHIFT)
++
++
++#define MRV_BLS_BLS_V2_STOP
++#define MRV_BLS_BLS_V2_STOP_MASK 0x00001FFF
++#define MRV_BLS_BLS_V2_STOP_SHIFT 0
++#define MRV_BLS_BLS_V2_STOP_MAX \
++ (MRV_BLS_BLS_V2_STOP_MASK >> MRV_BLS_BLS_V2_STOP_SHIFT)
++
++#define MRV_ISP_BLS_FIX_SUB_MIN (0xFFFFF001)
++#define MRV_ISP_BLS_FIX_SUB_MAX (0x00000FFF)
++#define MRV_ISP_BLS_FIX_MASK (0x00001FFF)
++#define MRV_ISP_BLS_FIX_SHIFT_A (0)
++#define MRV_ISP_BLS_FIX_SHIFT_B (0)
++#define MRV_ISP_BLS_FIX_SHIFT_C (0)
++#define MRV_ISP_BLS_FIX_SHIFT_D (0)
++#define MRV_ISP_BLS_MEAN_MASK (0x00000FFF)
++#define MRV_ISP_BLS_MEAN_SHIFT_A (0)
++#define MRV_ISP_BLS_MEAN_SHIFT_B (0)
++#define MRV_ISP_BLS_MEAN_SHIFT_C (0)
++#define MRV_ISP_BLS_MEAN_SHIFT_D (0)
++
++#define MRV_BLS_BLS_A_FIXED
++#define MRV_BLS_BLS_A_FIXED_MASK (MRV_ISP_BLS_FIX_MASK <<\
++ MRV_ISP_BLS_FIX_SHIFT_A)
++#define MRV_BLS_BLS_A_FIXED_SHIFT MRV_ISP_BLS_FIX_SHIFT_A
++
++#define MRV_BLS_BLS_B_FIXED
++#define MRV_BLS_BLS_B_FIXED_MASK (MRV_ISP_BLS_FIX_MASK <<\
++ MRV_ISP_BLS_FIX_SHIFT_B)
++#define MRV_BLS_BLS_B_FIXED_SHIFT MRV_ISP_BLS_FIX_SHIFT_B
++
++#define MRV_BLS_BLS_C_FIXED
++#define MRV_BLS_BLS_C_FIXED_MASK (MRV_ISP_BLS_FIX_MASK <<\
++ MRV_ISP_BLS_FIX_SHIFT_C)
++#define MRV_BLS_BLS_C_FIXED_SHIFT MRV_ISP_BLS_FIX_SHIFT_C
++
++#define MRV_BLS_BLS_D_FIXED
++#define MRV_BLS_BLS_D_FIXED_MASK (MRV_ISP_BLS_FIX_MASK <<\
++ MRV_ISP_BLS_FIX_SHIFT_D)
++#define MRV_BLS_BLS_D_FIXED_SHIFT MRV_ISP_BLS_FIX_SHIFT_D
++
++
++#define MRV_BLS_BLS_A_MEASURED
++#define MRV_BLS_BLS_A_MEASURED_MASK (MRV_ISP_BLS_MEAN_MASK <<\
++ MRV_ISP_BLS_MEAN_SHIFT_A)
++#define MRV_BLS_BLS_A_MEASURED_SHIFT MRV_ISP_BLS_MEAN_SHIFT_A
++
++
++#define MRV_BLS_BLS_B_MEASURED
++#define MRV_BLS_BLS_B_MEASURED_MASK (MRV_ISP_BLS_MEAN_MASK <<\
++ MRV_ISP_BLS_MEAN_SHIFT_B)
++#define MRV_BLS_BLS_B_MEASURED_SHIFT MRV_ISP_BLS_MEAN_SHIFT_B
++
++
++#define MRV_BLS_BLS_C_MEASURED
++#define MRV_BLS_BLS_C_MEASURED_MASK (MRV_ISP_BLS_MEAN_MASK <<\
++ MRV_ISP_BLS_MEAN_SHIFT_C)
++#define MRV_BLS_BLS_C_MEASURED_SHIFT MRV_ISP_BLS_MEAN_SHIFT_C
++
++
++#define MRV_BLS_BLS_D_MEASURED
++#define MRV_BLS_BLS_D_MEASURED_MASK (MRV_ISP_BLS_MEAN_MASK <<\
++ MRV_ISP_BLS_MEAN_SHIFT_D)
++#define MRV_BLS_BLS_D_MEASURED_SHIFT MRV_ISP_BLS_MEAN_SHIFT_D
++
++#define CI_ISP_DELAY_AFTER_RESET 15
++
++#define IRQ_ISP_ERROR -1
++#define IRQ_JPE_ERROR 0
++#define IRQ_JPE_SUCCESS 1
++#define IRQ_MI_SUCCESS 2
++#define IRQ_MI_SP_SUCCESS 3
++#define IRQ 1
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrstci/mrstisp/include/mrstisp_stdinc.h
+@@ -0,0 +1,118 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * Copyright (c) Silicon Image 2008 www.siliconimage.com
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#ifndef _MRSTISP_STDINC_H
++#define _MRSTISP_STDINC_H
++
++#ifdef __KERNEL__
++#include <linux/module.h>
++#include <linux/version.h>
++#include <linux/moduleparam.h>
++#include <linux/init.h>
++#include <linux/fs.h>
++
++#include <linux/proc_fs.h>
++#include <linux/ctype.h>
++#include <linux/pagemap.h>
++#include <linux/delay.h>
++#include <linux/io.h>
++
++#include <linux/uaccess.h>
++#include <linux/videodev2.h>
++#include <media/v4l2-common.h>
++#include <media/v4l2-ioctl.h>
++
++#include <linux/mutex.h>
++#include <linux/list.h>
++#include <linux/string.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/types.h>
++#include <linux/errno.h>
++#include <linux/sched.h>
++#include <linux/moduleparam.h>
++#include <linux/smp_lock.h>
++#include <asm/kmap_types.h>
++#include <linux/delay.h>
++#include <linux/pci.h>
++#include <linux/interrupt.h>
++#include <linux/timer.h>
++#include <asm/system.h>
++#include <asm/page.h>
++#include <asm/pgtable.h>
++#include <linux/time.h>
++#include <linux/syscalls.h>
++
++#include <linux/i2c.h>
++#include <linux/gpio.h>
++#include <linux/dma-mapping.h>
++#include <media/videobuf-core.h>
++#include <media/videobuf-dma-contig.h>
++
++#ifdef CONFIG_KMOD
++#include <linux/kmod.h>
++#endif
++
++#include "project_settings_mrv.h"
++
++#include "ci_sensor_common.h"
++#include "ci_isp_common.h"
++#include "ci_va.h"
++#include "v4l2_jpg_review.h"
++
++#include "def.h"
++#include "mrstisp_reg.h"
++#include "mrstisp.h"
++#include "mrstisp_isp.h"
++#include "mrstisp_hw.h"
++#include "mrstisp_jpe.h"
++#include "mrstisp_dp.h"
++/* #include "mrstisp_mif.h" */
++
++extern unsigned char *mrst_isp_regs;
++#define MEM_CSC_REG_BASE (0x08500000)
++#define MEM_MRV_REG_BASE (mrst_isp_regs)
++#define ALIGN_TO_4(f) (((f) + 3) & ~3)
++
++/* for debug */
++extern int mrstisp_debug;
++#define dprintk(level, fmt, arg...) do { \
++ if (mrstisp_debug >= level) \
++ printk(KERN_DEBUG "mrstisp@%s: " fmt "\n", \
++ __func__, ## arg); } \
++ while (0)
++
++#define eprintk(fmt, arg...) \
++ printk(KERN_ERR "mrstisp@%s" fmt "\n", \
++ __func__, ## arg);
++
++#define DBG_entering dprintk(1, "entering");
++#define DBG_leaving dprintk(1, "leaving");
++#define DBG_line dprintk(1, " line: %d", __LINE__);
++
++#include "reg_access.h"
++
++#endif
++#endif
+--- /dev/null
++++ b/drivers/staging/mrstci/mrstisp/include/project_settings_mrv.h
+@@ -0,0 +1,622 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * Copyright (c) Silicon Image 2008 www.siliconimage.com
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++
++#ifndef _PROJECT_SETTTINGS_MRV_H
++#define _PROJECT_SETTTINGS_MRV_H
++
++/* !< information mask */
++#define DBG_INFO 0x00000001
++/* !< warning mask */
++#define DBG_WARN 0x00000002
++/* !< error mask */
++#define DBG_ERR 0x00000004
++/* !< assert mask */
++#define DBG_ASSERT 0x00000008
++/* !< mask to get all categories */
++#define DBG_CAT_ALL 0x000000FF
++
++/* !< currly used category mask */
++#define DBG_CAT_FILTER (DBG_CAT_ALL)
++
++/* !< application mask */
++#define DBG_APP 0x00002000
++/* !< MARVIN driver mask */
++#define DBG_MRV 0x00001000
++/* !< mipi driver mask */
++#define DBG_MIPI 0x00040000
++
++#define CAMERA_VB6850 11
++#define CAMERA_HW CAMERA_VB6850
++/*
++ * \name MARVIN_HW
++ * select which MARVIN hardware is used.
++ */
++
++/* MARVIN 3, basically MARVIN 1 with more resolution */
++#define MARVIN_3 2
++/* Codename: "ISP upgrade" */
++#define MARVIN_3_V2 3
++/* MARVIN_3_V2 upgrade */
++#define MARVIN_3_V22 11
++/* MARVIN_3_V2 upgrade + MI patch from 12/2006 */
++#define MARVIN_3_V22X 13
++/* MARVIN_3_V2 upgrade + MI patch from 12/2006 (package tag 15.01.07) */
++#define MARVIN_3_V22X2 15
++/* just a quick-made test version for AF, other features see below */
++#define MARVIN_3_V2B 5
++/* Codename: "M3plus" */
++#define MARVIN_3_V3 4
++/* Codename: "Autofocus/bad pixel" */
++#define MARVIN_3_V4 6
++/* MARVIN_3_V4 upgrade */
++#define MARVIN_3_V42 12
++/* MARVIN_3_V4 upgrade + MI patch from 12/2006 */
++#define MARVIN_3_V42X 14
++/* MARVIN_3_V4 upgrade + MI patch from 12/2006 + (package tag 15.01.07) */
++#define MARVIN_3_V42X2 16
++/* Codename: "EMP" */
++#define MARVIN_3_V5 7
++/* successor of MARVIN_3_V5 */
++#define MARVIN_5_V5 18
++
++/*
++ * FPGA Bitstream ID 12 (Marvin5 tag 27.02.06), used for USA roadshow in
++ * 03/2006
++ */
++#define MARVIN_5_BS12 10
++/* MARVIN 5 (product) */
++#define MARVIN_5_V1 9
++/* MARVIN 5 (product with new isp filter) */
++#define MARVIN_5_V2 20
++/* MARVIN 5 (product with Chromatic Aberration) */
++#define MARVIN_5_V3 21
++/* MARVIN 5 (with 16 beat burst) */
++#define MARVIN_5_V4 22
++/* MARVIN XF */
++#define MARVIN_5_XF 17
++/* MARVIN XF */
++#define MARVIN_5_XF_TMP 19
++
++/* MARVIN 12MP */
++#define MARVIN_12_V1 23
++
++/* MARVIN 5 (with 16 beat burst) */
++#define MARVIN_5_V4_R20 30
++
++
++/* Currently used MARVIN version */
++#define MARVIN_HW MARVIN_5_V4_R20
++
++/*
++ * MRV_SUPPORT_xxx
++ * Some compile-time-configurable features of the MARVIN driver.
++ * Set the certain defines to a non-zero value to enable the feature
++ */
++
++/*
++ * routines to convert state and configuration enums into human readable
++ * text. (useful in e.g. debug outputs)
++ */
++#define MRV_SUPPORT_STATE2STRING 1
++
++/*
++ * routines to read, write and dump the register set of the MARVIN module
++ */
++#define MRV_SUPPORT_REGDUMP 1
++
++/*
++ * second level support routines for e.g. exposure control, auto focus
++ * and white balance. Second level support routines are
++ * those using the plain routines of mrv.h to implement a kind of
++ * "helper" to program/access/use the MARVIN with a bit more
++ * abstraction.
++ */
++#define MRV_SUPPORT_SL 1
++
++/*
++ * \mainpage MARVIN SOFTWARE
++ * \b File: project_settings_mrv.h
++ *
++ * <!-- \section Global Descriptions and Informations
++ * (settings, definitions, software changes) -->
++ *
++ * For global descriptions and informations see under "Modules"
++ *
++ */
++
++/*
++ * \addtogroup MARVIN_DEFINES_00 MARVIN Features
++ * \b File: project_settings_mrv.h
++ *
++ * \par MARVIN Features
++ * Depends on the used MARVIN_HW. Direct usage of MARVIN_HW should be
++ * avoided wherever possible.
++ * This makes it VERY MUCH easier to adapt the driver to new MARVIN
++ * versions with a feature set suited to a certain customer.
++ *
++ * \par MARVIN_FEATURE_CHIP_ID (integer)
++ * ID value contained in the chip. This is to be able to identify
++ * the chip derivative during runtime of the software
++ *
++ * \par MARVIN_FEATURE_CAMBUSWIDTH: (integer)
++ * How many bits can be captured from the image sensor?
++ * MARVIN_FEATURE_8BITS = sensor bus width is 8 bits
++ * MARVIN_FEATURE_10BITS = sensor bus width is 10 bits
++ * MARVIN_FEATURE_12BITS = sensor bus width is 12 bits
++ *
++ * \par MARVIN_FEATURE_XTALK: (integer)
++ * separate crosstalk matrix. without this feature, the crosstalk
++ * coefficients have to be combined with the color conversion matrix
++ * MARVIN_FEATURE_XTALK_9BITS = coefficient range -2.0 ... +1.992 ( 9 Bit)
++ * MARVIN_FEATURE_XTALK_10BITS = coefficient range -4.0 ... +3.992 (10 Bit)
++ * MARVIN_FEATURE_XTALK_11BITS = coefficient range -8.0 ... +7.992 (11 Bit)
++ * MARVIN_FEATURE_EXIST_NOT = no separate xtalk matrix
++ *
++ * \par MARVIN_FEATURE_XTALK_OFFSET: (boolean)
++ * add a offset to the crosstalk matrix output
++ *
++ * \par MARVIN_FEATURE_GAMMAOUT: (boolean)
++ * gamma correction for luminance channel at the output of the
++ * ISP block.
++ *
++ * \par MARVIN_FEATURE_FRAMESIZE: (integer)
++ * Maximum frame size (at sensor input) MARVIN can handle.
++ * MARVIN_FEATURE_1M9 = 1.9 megapixels
++ * MARVIN_FEATURE_3M1 = 3.1 megapixels
++ * MARVIN_FEATURE_5M3 = 5.3 megapixels
++ *
++ * \par MARVIN_FEATURE_SSCALE: (boolean)
++ * Selfpath feature, and therefore selfpath scaler feautel also. If set to
++ * MARVIN_FEATURE_EXIST_NOT, the whole self data path is not present.
++ *
++ * \par MARVIN_FEATURE_SSCALE_UP: (boolean)
++ * Upscale capability of the self path scaler. This feature enables
++ * the scaler to do upscaling up to the factor of 5
++ *
++ * \par MARVIN_FEATURE_SSCALE_FACTORCALC: (integer)
++ * Specifies the strategy to calculate the scale factors for the self scaler.
++ * Note that this define is the successor of the MARVIN_FEATURE_SSCALE_LANES,
++ * which does not longer reflect the 'true' implementations of the scaler
++ * hardware and therefore has been removed.
++ * MARVIN_FEATURE_SCALEFACTOR_COMBINED_UV = 'traditional' behaviour: The
++ * scaler handles the U and V chroma components as if they were two joined
++ * pixels. Thus, when YUV422 subsampled input data is to be processed and
++ * no further subsampling is required, the scale facrors of luma and chroma
++ * pathes must be identical.
++ * MARVIN_FEATURE_SCALEFACTOR_SEPARATED_UV = 'new style' behaviour: The
++ * scaler handles the U and V chroma components as if they belong to
++ * totally different picture planes. Thus, when YUV422 subsampled input
++ * data is to be processed and no further subsampling is required, the
++ * scale facrors of the chroma path must be calculated like those of the
++ * luma path, but with only half of the image width.
++ *
++ * \par MARVIN_FEATURE_MSCALE_UP: (boolean)
++ * Upscale capability of the main path scaler. This feature enables
++ * the scaler to do upscaling up to the factor of 5
++ *
++ * \par MARVIN_FEATURE_MSCALE_FACTORCALC: (integer)
++ * Specifies the strategy to calculate the scale factors for the main scaler.
++ * MARVIN_FEATURE_SCALEFACTOR_COMBINED_UV = 'traditional' behaviour: The
++ * scaler handles the U and V chroma components as if they were two joined
++ * pixels. Thus, when YUV422 subsampled input data is to be processed and
++ * no further subsampling is required, the scale facrors of luma and chroma
++ * pathes must be identical.
++ * MARVIN_FEATURE_SCALEFACTOR_SEPARATED_UV = 'new style' behaviour: The
++ * scaler handles the U and V chroma components as if they belong to
++ * totally different picture planes. Thus, when YUV422 subsampled input
++ * data is to be processed and no further subsampling is required, the
++ * scale facrors of the chroma path must be calculated like those of the
++ * luma path, but with only half of the image width.
++ *
++ * \par MARVIN_FEATURE_SCALE_FACTORWIDTH: (integer)
++ * Width of the scalefactors for both main and self scaler
++ * MARVIN_FEATURE_12BITS = 12 bits precision
++ * MARVIN_FEATURE_14BITS = 14 bits precision
++ * MARVIN_FEATURE_16BITS = 16 bits precision
++ *
++ * \par MARVIN_FEATURE_AF_MEASURE: (boolean)
++ * Autofocus measurement block (attached to the demosaicing block)
++ *
++ * \par MARVIN_FEATURE_BAD_PIXEL: (boolean)
++ * Bad pixel detection/correction block
++ *
++ * \par MARVIN_FEATURE_BAD_PIXEL_WIDTH: (integer)
++ * Bad pixel detection/correction block register size
++ * MARVIN_FEATURE_10BITS = 10 bits precision
++ * MARVIN_FEATURE_12BITS = 12 bits precision
++ * MARVIN_FEATURE_EXIST_NOT = no bad pixel detection/correction block
++ *
++ * \par MARVIN_FEATURE_BAD_PIXEL_RAM: (integer)
++ * Bad pixel table ram address size
++ * MARVIN_FEATURE_7BITS = 128 entries
++ * MARVIN_FEATURE_11BITS = 2048 entries
++ * MARVIN_FEATURE_EXIST_NOT = no bad pixel ram block
++ *
++ * \par MARVIN_FEATURE_SUPERIMPOSE: (boolean)
++ * Superimpose block, used to combine camera picture with a static
++ * one coming from the system memory (chroma keying)
++ *
++ * \par MARVIN_FEATURE_CHROM_ABERRATION: (boolean)
++ * Chromatic aberration block corrects color fringing
++ *
++ * \par MARVIN_FEATURE_IMAGE_EFFECTS: (boolean)
++ * Image effects block, various modes like grayscale, sepia, emboss
++ * sketch, etc.
++ *
++ * \par MARVIN_FEATURE_LENS_SHADING: (boolean)
++ * Lens shading compensation block
++ *
++ * \par MARVIN_FEATURE_ISP_ERRORFLAGS: (boolean)
++ * Some registers containing more detailed error flags of the ISP.
++ * These may help during system integration.
++ *
++ * \par MARVIN_FEATURE_FRAMECOUNTER: (boolean)
++ * Frame counter register
++ *
++ * \par MARVIN_FEATURE_FLASH_LIGHT: (boolean)
++ * Support for frame-synchronized triggering of a LED or xenon based
++ * flashlight
++ *
++ * \par MARVIN_FEATURE_SHUTTER: (boolean)
++ * Support for driving an external mechanical shutter
++ *
++ * \par MARVIN_FEATURE_IMG_STABILIZATION: (integer)
++ * Support for digital image stabilization (=compensation against
++ * small movements)
++ * MARVIN_FEATURE_IMG_STABILIZATION_V1 = represents second output formatter
++ * at ISP output, no image
++ * stabilization functionality, located
++ * in the ISP bayer path only.
++ * MARVIN_FEATURE_IMG_STABILIZATION_V2 = represents image stabilization
++ * including output formatter, located
++ * in both bayer and YCbCr paths, but
++ * not in the raw data path.
++ * MARVIN_FEATURE_EXIST_NOT = there is no output image stabilization
++ *
++ * \par MARVIN_FEATURE_ISP_HISTOGRAM: (boolean)
++ * Histogram measurement block
++ *
++ * \par MARVIN_FEATURE_ISP_FILTER: (boolean)
++ * Additional burring/sharpness filer
++ *
++ * \par MARVIN_FEATURE_SMIA: (integer)
++ * SMIA camera protocol version switch
++ * MARVIN_FEATURE_SMIA_MIPI_EMP = EMP version that contains just the the
++ * SMIA and MIPI application protocol
++ * with two embedded data areas
++ * MARVIN_FEATURE_SMIA_EM = EMP version that contains just the the SMIA
++ * application protocol
++ * MARVIN_FEATURE_SMIA_COMPLETE= SMIA module that contains the complete SMIA
++ * functionality.
++ * MARVIN_FEATURE_EXIST_NOT = there is no SMIA module
++ *
++ * \par MARVIN_FEATURE_AUTO_EXPOSURE: (integer)
++ * measurement unit for automatic exposure control
++ * MARVIN_FEATURE_AUTO_EXPOSURE_V1 = First implemented auto-exposure
++ * algorithm version
++ * MARVIN_FEATURE_AUTO_EXPOSURE_V2 = Second implemented auto-exposure
++ * algorithm version
++ *
++ * \par MARVIN_FEATURE_MI_STATUSFLAGS: (boolean)
++ * MI status flags needed for debugging purposes
++ *
++ * \par MARVIN_FEATURE_MIPI: (boolean)
++ * MIPI camera protocol block
++ *
++ * \par MARVIN_FEATURE_SMALL_OUTUNIT: (boolean)
++ * A small output unit instead of MI module
++ *
++ * \par MARVIN_FEATURE_CLOCK_DOMAINS: (integer)
++ * MARVIN_CLOCK_DOMAINS_1 = One clock domain for the complete MARVIN.
++ * MARVIN_CLOCK_DOMAINS_2 = Two clock domains (Camera data clock and AHB
++ * clock)
++ *
++ * \par MARVIN_FEATURE_WB: (integer)
++ * measurement and correction unit for white balance
++ * MARVIN_FEATURE_WB_V1 = basic white balance block
++ * MARVIN_FEATURE_WB_V2 = like version 1, but Y_max added
++ * MARVIN_FEATURE_WB_V3 = like version 2, but green_diff_gain added
++ * MARVIN_FEATURE_WB_V4 = version 4of the white balance block. Extended gain
++ * range 0..4, resolution 10 bit, separated green
++ * gains for red and blue rows of bayer pattern.
++ *
++ * \par MARVIN_FEATURE_OUTPUT_FORMATTER: (integer)
++ * position of the output formatter
++ * MARVIN_FEATURE_OUTPUT_FORMATTER_V1 = exists at ISP output (old style)
++ * MARVIN_FEATURE_OUTPUT_FORMATTER_V2 = exists at ISP input
++ * MARVIN_FEATURE_EXIST_NOT = there is no output formatter, as
++ * the image stabilization contains
++ * already this functionality
++ *
++ * \par MARVIN_FEATURE_MI: (integer)
++ * MARVIN_FEATURE_MI_V1 = basic version
++ * MARVIN_FEATURE_MI_V2 = introducing self-path DMA read
++ * MARVIN_FEATURE_MI_V3 = self path DMA read, rotation, line stripe, 8
++ * beat burst
++ *
++ * \par MARVIN_FEATURE_DMA_READ: (integer)
++ * MARVIN_FEATURE_DMA_READ_V1 = version 1
++ * MARVIN_FEATURE_DMA_READ_V2 = version 2
++ * MARVIN_FEATURE_DMA_READ_V3 = version 3
++ * MARVIN_FEATURE_DMA_READ_V4 = version 4
++ * MARVIN_FEATURE_EXIST_NOT = there is no DMA read feature
++ *
++ * \par MARVIN_FEATURE_JPE_SIZE: (integer)
++ * MARVIN_FEATURE_JPE_SIZE_11BITS =11 Bits for JPE_HSIZE and JPE_VSIZE, only
++ * Marvin1.
++ * MARVIN_FEATURE_JPE_SIZE_12BITS =12 Bits for JPE_HSIZE and JPE_VSIZE, all
++ * MARVIN3.
++ * MARVIN_FEATURE_JPE_SIZE_13BITS = 13 Bits for JPE_HSIZE and JPE_VSIZE, all
++ * MARVIN5.
++ *
++ * \par MARVIN_FEATURE_BLACK_LEVEL: (integer)
++ * MARVIN_FEATURE_EXIST_NOT = there is no BLS module
++ * MARVIN_FEATURE_BLACK_LEVEL_V1 = version 1; basic version with 8 Bit
++ * registers
++ * MARVIN_FEATURE_BLACK_LEVEL_V2 = version 2; extended version with 10 Bit
++ * registers
++ * MARVIN_FEATURE_BLACK_LEVEL_V3 = version 3; extended version with 12 Bit
++ * registers
++ * MARVIN_FEATURE_BLACK_LEVEL_V4 = version 4; advanced version with 2
++ * independent measurement windows
++ * and signed values; 10 Bit
++ * registers
++ * MARVIN_FEATURE_BLACK_LEVEL_V5 = version 5; like version 4
++ * with 12 Bit registers
++ *
++ * \par MARVIN_FEATURE_DPMUX_YCSPLIT: (integer)
++ * MARVIN_FEATURE_YCS_V1 = traditional datapath setup; separate datapath for
++ * raw data, y/c splitter does not support self path
++ * only mode
++ * MARVIN_FEATURE_YCS_V2 = version 2, raw data routed through main path,
++ * y/c splitter supports self path only mode.
++ *
++ * \par MARVIN_FEATURE_DPMUX_MAINPATH: (integer)
++ * MARVIN_FEATURE_DPMUX_MAIN_V1 = Traditional mainpath muxer. No direct path
++ * from DMA-read to JPEG encoder, explicit RAW
++ * datapath to MI
++ * MARVIN_FEATURE_DPMUX_MAIN_V2 = new DPCL register settings,
++ * possibility to feed
++ * JPEG encoder directly via DMA-Read
++ *
++ * \par MARVIN_FEATURE_INPUT_AQUISITION: (integer)
++ * MARVIN_FEATURE_IAQU_V1 = Traditional version, supports following modes:
++ * raw data mode,
++ * raw picture according to ITU-R BT.601,
++ * RGB Bayer according to ITU-R BT.601,
++ * ITU-R BT601 (YCbCr data),
++ * ITU-R BT656 (YCbCr data)
++ * MARVIN_FEATURE_IAQU_V2 = Additional modes:
++ * RGB Bayer according to ITU-R BT.656, raw
++ * picture according to ITU-R BT.656
++ *
++ * \par MARVIN_FEATURE_JPE: (integer)
++ * MARVIN_FEATURE_JPE_V1 = Basic version
++ * MARVIN_FEATURE_JPE_V2 = Enable bit frame synchronization
++ * MARVIN_FEATURE_JPE_V3 = flags for Motion JPEG
++ *
++ * \par MARVIN_FEATURE_EXT_YCBCR_RANGE: (boolean)
++ * ???
++ *
++ * \par MARVIN_FEATURE_SP_DMA: (boolean)
++ * ???
++ * \par MARVIN_FEATURE_MI_BURST_16: (boolean)
++ * MARVIN_FEATURE_EXIST = AHB 16 beat burst
++ * MARVIN_FEATURE_EXIST_NOT = AHB burst to 8 or 4 is possible
++ * \par MARVIN_FEATURE_MI_LAST_PIXEL: (boolean)
++ * last pixel signalization
++ */
++
++/* \name Values for all boolean features */
++#define MARVIN_FEATURE_EXIST_NOT (0)
++#define MARVIN_FEATURE_EXIST (1)
++
++/*
++ * \name Values for MARVIN_FEATURE_FRAMESIZE and
++ * MARVIN_FEATURE_MI_FRAMESIZE
++ */
++#define MARVIN_FEATURE_1M9 1
++#define MARVIN_FEATURE_3M1 2
++#define MARVIN_FEATURE_5M3 3
++#define MARVIN_FEATURE_12M6 4
++
++/* \name Values for MARVIN_FEATURE_CAMBUSWIDTH and
++ * MARVIN_FEATURE_SCALE_FACTORWIDTH
++ */
++#define MARVIN_FEATURE_7BITS 7
++#define MARVIN_FEATURE_8BITS 8
++#define MARVIN_FEATURE_9BITS 9
++#define MARVIN_FEATURE_10BITS 10
++#define MARVIN_FEATURE_11BITS 11
++#define MARVIN_FEATURE_12BITS 12
++#define MARVIN_FEATURE_14BITS 14
++#define MARVIN_FEATURE_16BITS 16
++
++/* \name Values for MARVIN_FEATURE_SMIA */
++#define MARVIN_FEATURE_SMIA_COMPLETE 1
++#define MARVIN_FEATURE_SMIA_EMP 2
++#define MARVIN_FEATURE_SMIA_MIPI_EMP 3
++
++/* \name Values for MARVIN_FEATURE_AUTO_EXPOSURE */
++#define MARVIN_FEATURE_AUTO_EXPOSURE_V1 1
++#define MARVIN_FEATURE_AUTO_EXPOSURE_V2 2
++#define MARVIN_FEATURE_AUTO_EXPOSURE_V3 3
++
++/* \name Values for MARVIN_FEATURE_CLOCK_DOMAINS */
++#define MARVIN_CLOCK_DOMAINS_1 1
++#define MARVIN_CLOCK_DOMAINS_2 2
++
++/* \name Values for MARVIN_FEATURE_WB: (integer) */
++#define MARVIN_FEATURE_WB_V4 4
++#define MARVIN_FEATURE_WB_V5 5
++
++/* \name Values for MARVIN_FEATURE_XTALK: (integer) */
++/* coefficient range -2.0 ... +1.992 ( 9 Bit) */
++#define MARVIN_FEATURE_XTALK_9BITS 2
++/* coefficient range -4.0 ... +3.992 (10 Bit) */
++#define MARVIN_FEATURE_XTALK_10BITS 3
++/* coefficient range -8.0 ... +7.992 (11 Bit) */
++#define MARVIN_FEATURE_XTALK_11BITS 4
++
++#define MARVIN_FEATURE_GAMMAIN_10BITS 1
++#define MARVIN_FEATURE_GAMMAIN_12BITS 2
++/* \name Values for MARVIN_FEATURE_OUTPUT_FORMATTER: (integer) */
++#define MARVIN_FEATURE_OUTPUT_FORMATTER_V1 (2)
++#define MARVIN_FEATURE_OUTPUT_FORMATTER_V2 (3)
++
++/* \name Values for MARVIN_FEATURE_IMG_STABILIZATION: (integer) */
++#define MARVIN_FEATURE_IMG_STABILIZATION_V1 (2)
++#define MARVIN_FEATURE_IMG_STABILIZATION_V2 (3)
++
++/*
++ * \name Values for MARVIN_FEATURE_SSCALE_FACTORCALC and
++ * MARVIN_FEATURE_MSCALE_FACTORCALC: (integer)
++ */
++#define MARVIN_FEATURE_SCALEFACTOR_COMBINED_UV (2)
++#define MARVIN_FEATURE_SCALEFACTOR_SEPARATED_UV (3)
++
++/* \name Values for MARVIN_FEATURE_MI: (integer) */
++#define MARVIN_FEATURE_MI_V1 (2)
++#define MARVIN_FEATURE_MI_V2 (3)
++#define MARVIN_FEATURE_MI_V3 (4)
++#define MARVIN_FEATURE_MI_V4 (5)
++
++/* \name Values for MARVIN_FEATURE_DMA_READ: (integer) */
++#define MARVIN_FEATURE_DMA_READ_V1 (2)
++#define MARVIN_FEATURE_DMA_READ_V2 (3)
++#define MARVIN_FEATURE_DMA_READ_V3 (4)
++#define MARVIN_FEATURE_DMA_READ_V4 (5)
++
++/* \name Values for MARVIN_FEATURE_JPE_SIZE: (integer) */
++#define MARVIN_FEATURE_JPE_SIZE_11BITS 1
++#define MARVIN_FEATURE_JPE_SIZE_12BITS 2
++#define MARVIN_FEATURE_JPE_SIZE_13BITS 3
++
++/* \name Values for MARVIN_FEATURE_BLACK_LEVEL: (integer) */
++#define MARVIN_FEATURE_BLACK_LEVEL_V1 (2)
++#define MARVIN_FEATURE_BLACK_LEVEL_V2 (3)
++#define MARVIN_FEATURE_BLACK_LEVEL_V3 (4)
++#define MARVIN_FEATURE_BLACK_LEVEL_V4 (5)
++#define MARVIN_FEATURE_BLACK_LEVEL_V5 (6)
++
++/* \name Values for MARVIN_FEATURE_DPMUX_YCSPLIT: (integer) */
++#define MARVIN_FEATURE_YCS_V1 1
++#define MARVIN_FEATURE_YCS_V2 2
++
++/* \name Values for MARVIN_FEATURE_DPMUX_MAINPATH: (integer) */
++#define MARVIN_FEATURE_DPMUX_MAIN_V1 1
++#define MARVIN_FEATURE_DPMUX_MAIN_V2 2
++
++/* \name Values for MARVIN_FEATURE_INPUT_AQUISITION: (integer) */
++#define MARVIN_FEATURE_IAQU_V1 1
++#define MARVIN_FEATURE_IAQU_V2 2
++
++/* \name Values for MARVIN_FEATURE_JPE: (integer) */
++#define MARVIN_FEATURE_JPE_V1 (2)
++#define MARVIN_FEATURE_JPE_V2 (3)
++#define MARVIN_FEATURE_JPE_V3 (4)
++
++/* \name Values for MARVIN_FEATURE_JPE_CFG: (integer) */
++#define MARVIN_FEATURE_JPE_CFG_V1 (2)
++#define MARVIN_FEATURE_JPE_CFG_V2 (3)
++
++
++/* \name Values for MARVIN_FEATURE_ISP_FILTER: (integer) */
++#define MARVIN_FEATURE_ISP_FILTER_V1 1
++#define MARVIN_FEATURE_ISP_FILTER_V2 2
++
++/* \name Values for MARVIN_FEATURE_LENS_SHADING: (integer) */
++#define MARVIN_FEATURE_LSC_V1 1
++#define MARVIN_FEATURE_LSC_V2 2
++
++/* \name Values for MARVIN_FEATURE_HISTOGRAM: (integer) */
++#define MARVIN_FEATURE_HIST_V1 1
++#define MARVIN_FEATURE_HIST_V2 2
++#define MARVIN_FEATURE_HIST_V3 3
++
++#define MARVIN_FEATURE_IE_V1 (2)
++#define MARVIN_FEATURE_IE_V2 (3)
++#define MARVIN_FEATURE_IE_V3 (4)
++
++#if (MARVIN_HW == MARVIN_5_V4_R20)
++#define MARVIN_FEATURE_CHIP_ID 0x20453010
++#define MARVIN_FEATURE_CAMBUSWIDTH MARVIN_FEATURE_12BITS
++#define MARVIN_FEATURE_XTALK MARVIN_FEATURE_XTALK_11BITS
++#define MARVIN_FEATURE_GAMMAIN MARVIN_FEATURE_GAMMAIN_12BITS
++#define MARVIN_FEATURE_GAMMAOUT MARVIN_FEATURE_EXIST
++#define MARVIN_FEATURE_FRAMESIZE MARVIN_FEATURE_5M3
++#define MARVIN_FEATURE_SP_DMA MARVIN_FEATURE_EXIST
++#define MARVIN_FEATURE_SSCALE MARVIN_FEATURE_EXIST
++#define MARVIN_FEATURE_SSCALE_UP MARVIN_FEATURE_EXIST
++#define MARVIN_FEATURE_SSCALE_FACTORCALC \
++ MARVIN_FEATURE_SCALEFACTOR_SEPARATED_UV
++#define MARVIN_FEATURE_MSCALE_UP MARVIN_FEATURE_EXIST
++#define MARVIN_FEATURE_MSCALE_FACTORCALC \
++ MARVIN_FEATURE_SCALEFACTOR_SEPARATED_UV
++#define MARVIN_FEATURE_SCALE_FACTORWIDTH MARVIN_FEATURE_14BITS
++#define MARVIN_FEATURE_AF_MEASURE MARVIN_FEATURE_EXIST
++#define MARVIN_FEATURE_BAD_PIXEL MARVIN_FEATURE_EXIST
++#define MARVIN_FEATURE_BAD_PIXEL_WIDTH MARVIN_FEATURE_12BITS
++#define MARVIN_FEATURE_BAD_PIXEL_RAM MARVIN_FEATURE_11BITS
++#define MARVIN_FEATURE_SUPERIMPOSE MARVIN_FEATURE_EXIST
++#define MARVIN_FEATURE_IMAGE_EFFECTS MARVIN_FEATURE_IE_V1
++#define MARVIN_FEATURE_LENS_SHADING MARVIN_FEATURE_LSC_V2
++#define MARVIN_FEATURE_ISP_ERRORFLAGS MARVIN_FEATURE_EXIST
++#define MARVIN_FEATURE_FRAMECOUNTER MARVIN_FEATURE_EXIST
++#define MARVIN_FEATURE_FLASH_LIGHT MARVIN_FEATURE_EXIST
++#define MARVIN_FEATURE_EXT_YCBCR_RANGE MARVIN_FEATURE_EXIST
++#define MARVIN_FEATURE_SHUTTER MARVIN_FEATURE_EXIST
++#define MARVIN_FEATURE_IMG_STABILIZATION MARVIN_FEATURE_IMG_STABILIZATION_V2
++#define MARVIN_FEATURE_ISP_HISTOGRAM MARVIN_FEATURE_HIST_V2
++#define MARVIN_FEATURE_ISP_CSM MARVIN_FEATURE_EXIST
++#define MARVIN_FEATURE_ISP_FILTER MARVIN_FEATURE_ISP_FILTER_V2
++#define MARVIN_FEATURE_SMIA MARVIN_FEATURE_SMIA_COMPLETE
++#define MARVIN_FEATURE_AUTO_EXPOSURE MARVIN_FEATURE_AUTO_EXPOSURE_V3
++#define MARVIN_FEATURE_MI_STATUSFLAGS MARVIN_FEATURE_EXIST
++#define MARVIN_FEATURE_MIPI MARVIN_FEATURE_EXIST
++#define MARVIN_FEATURE_SMALL_OUTUNIT MARVIN_FEATURE_EXIST_NOT
++#define MARVIN_FEATURE_CLOCK_DOMAINS MARVIN_CLOCK_DOMAINS_1
++#define MARVIN_FEATURE_WB MARVIN_FEATURE_WB_V5
++#define MARVIN_FEATURE_OUTPUT_FORMATTER MARVIN_FEATURE_OUTPUT_FORMATTER_V2
++#define MARVIN_FEATURE_MI MARVIN_FEATURE_MI_V4
++#define MARVIN_FEATURE_DMA_READ MARVIN_FEATURE_DMA_READ_V3
++#define MARVIN_FEATURE_JPE_SIZE MARVIN_FEATURE_JPE_SIZE_13BITS
++#define MARVIN_FEATURE_BLACK_LEVEL MARVIN_FEATURE_BLACK_LEVEL_V5
++#define MARVIN_FEATURE_DPMUX_YCSPLIT MARVIN_FEATURE_YCS_V2
++#define MARVIN_FEATURE_DPMUX_MAINPATH MARVIN_FEATURE_DPMUX_MAIN_V2
++#define MARVIN_FEATURE_INPUT_AQUISITION MARVIN_FEATURE_IAQU_V2
++#define MARVIN_FEATURE_JPE MARVIN_FEATURE_JPE_V3
++#define MARVIN_FEATURE_JPE_CFG MARVIN_FEATURE_JPE_CFG_V1
++#define MARVIN_FEATURE_XTALK_OFFSET MARVIN_FEATURE_EXIST
++#define MARVIN_FEATURE_CHROM_ABERRATION MARVIN_FEATURE_EXIST_NOT
++#define MARVIN_FEATURE_MI_BURST_16 MARVIN_FEATURE_EXIST
++#define MARVIN_FEATURE_MI_LAST_PIXEL MARVIN_FEATURE_EXIST
++#define MARVIN_FEATURE_MI_FRAMESIZE MARVIN_FEATURE_12M6
++#define MARVIN_FEATURE_BAYER_RGB MARVIN_FEATURE_EXIST
++
++#endif /* MARVIN_HW */
++
++#endif
+--- /dev/null
++++ b/drivers/staging/mrstci/mrstisp/include/reg_access.h
+@@ -0,0 +1,233 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * Copyright (c) Silicon Image 2008 www.siliconimage.com
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#ifndef _REG_ACCESS_H
++#define _REG_ACCESS_H
++
++/*
++ * Notes:
++ *
++ * registers:
++ * - use these macros to allow a central way e.g. to print out debug
++ * information on register access
++ *
++ * slices:
++ * - "parameter" \a reg could be a hardware register or a (32bit) variable,
++ * but not a pointer!
++ * - each slice (specified as "parameter" \a name) requires two \#defines:
++ * \b \<name\>_MASK : defines the mask to use on register side
++ * \b \<name\>_SHIFT : defines the shift value to use (left on write, right
++ * on read)
++ *
++ * arrays:
++ * - "parameter" \a areg could be an (32bit) array or a pointer to the
++ * first array element
++ * - each one-dimensional array (specified as "parameter" \a name) requires
++ * one \#define
++ * - <tt> \<name\>_ARR_SIZE </tt>: number of elements
++ * - each two-dimensional array (specified as "parameter" <name>) requires
++ * four \#defines:
++ * - <tt> \<name\>_ARR_SIZE1 </tt>: number of elements in first dimension
++ * - <tt> \<name\>_ARR_SIZE2 </tt>: number of elements in second dimension
++ * - <tt> \<name\>_ARR_OFS1 </tt>: offset between two consecutive elements
++ * in first dimension
++ * - <tt> \<name\>_ARR_OFS2 </tt>: offset between two consecutive elements
++ * in second dimension
++ */
++
++/*
++ * reads and returns the complete value of register \a reg
++ * \note Use these macro to allow a central way e.g. to print out debug
++ * information on register access.
++ */
++
++/* helper function to let REG_READ return the value */
++
++#define DBG_DD(x) \
++ do { \
++ if (mrstisp_debug >= 4) { \
++ printk(KERN_INFO "mrstisp@%s ", __func__); \
++ printk x; \
++ } \
++ } while (0)
++
++static inline u32 _reg_read(u32 reg, const char *text)
++{
++ u32 variable = reg;
++ DBG_DD((text, variable));
++ return variable;
++}
++
++#define REG_READ(reg) \
++_reg_read((reg), "REG_READ(" VAL2STR(reg) "): 0x%08X\n")
++
++static inline u32 _reg_read_ex(u32 reg, const char *text)
++{
++ u32 variable = reg;
++ DBG_DD((text, variable));
++ return variable;
++}
++
++#define REG_READ_EX(reg) \
++_reg_read_ex((reg), "REG_READ_EX(" VAL2STR(reg) "): 0x%08X\n")
++/*
++ * writes the complete value \a value into register \a reg
++ * \note Use these macro to allow a central way e.g. to print out debug
++ * information on register access.
++ */
++#define REG_WRITE(reg, value) \
++{ \
++ dprintk(4, \
++ "REG_WRITE(" VAL2STR(reg) ", " VAL2STR(value) "): 0x%08X", (value)); \
++ (reg) = (value); \
++}
++
++#define REG_WRITE_EX(reg, value) \
++{ \
++ (reg) = (value); \
++}
++
++
++/*
++ * returns the value of slice \a name from register or variable \a reg
++ * \note "parameter" \a reg could be a hardware register or a (32bit)
++ * variable, but not a pointer! \n
++ * each slice (specified as "parameter" \a name) requires two \#defines: \n
++ * - <tt>\<name\>_MASK </tt>: defines the mask to use on register side
++ * - <tt>\<name\>_SHIFT </tt>: defines the shift value to use (left on write,
++ * right on read)
++ */
++
++static inline u32 _reg_get_slice(const char *text, u32 val)
++{
++ u32 variable = val;
++ DBG_DD((text, variable));
++ return val;
++}
++
++#define REG_GET_SLICE_EX(reg, name) \
++ (((reg) & (name##_MASK)) >> (name##_SHIFT))
++
++#define REG_GET_SLICE(reg, name) \
++ _reg_get_slice("REG_GET_SLICE(" VAL2STR(reg) ", " VAL2STR(name) \
++ "): 0x%08X\n" , \
++ (((reg) & (name##_MASK)) >> (name##_SHIFT)))
++
++/*
++ * writes the value \a value into slice \a name of register or variable \a reg
++ * \note "parameter" \a reg could be a hardware register or a (32bit) variable,
++ * but not a pointer! \n
++ * each slice (specified as "parameter" \a name) requires two \#defines: \n
++ * - <tt>\<name\>_MASK </tt>: defines the mask to use on register side
++ * - <tt>\<name\>_SHIFT </tt>: defines the shift value to use (left on write,
++ * right on read)
++ */
++#define REG_SET_SLICE(reg, name, value) \
++{ \
++ dprintk(4, \
++ "REG_SET_SLICE(" VAL2STR(reg) ", " VAL2STR(name) \
++ ", " VAL2STR(value) "): 0x%08X", \
++ (value)); \
++ ((reg) = (((reg) & ~(name##_MASK)) | \
++ (((value) << (name##_SHIFT)) & (name##_MASK)))); \
++}
++
++#define REG_SET_SLICE_EX(reg, name, value) \
++{ \
++ ((reg) = (((reg) & ~(name##_MASK)) | \
++ (((value) << (name##_SHIFT)) & (name##_MASK)))); \
++}
++
++/*
++ * returns the value of element \a idx from register array or array variable \a
++ * areg
++ * \note "parameter" \a areg could be an (32bit) array or a pointer to the first
++ * array element \n
++ * each one-dimensional array (specified as "parameter" \a name) requires one
++ * \#define: \n
++ * - <tt>\<name\>_ARR_SIZE </tt>: number of elements
++ */
++#define REG_GET_ARRAY_ELEM1(areg, name, idx) \
++((idx < name##_ARR_SIZE) \
++? areg[idx] \
++: 0)
++
++
++/*
++ * writes the value \a value into element \a idx of register array or array
++ * variable \a areg
++ * \note "parameter" \a areg could be an (32bit) array or a pointer to the
++ * first array element \n
++ * each one-dimensional array (specified as "parameter" \a name) requires
++ * one \#define: \n
++ * - <tt>\<name\>_ARR_SIZE </tt>: number of elements
++ */
++#define REG_SET_ARRAY_ELEM1(areg, name, idx, value) \
++((idx < name##_ARR_SIZE) \
++? areg[idx] = value \
++: 0)
++
++
++/*
++ * returns the value of element \a idx1, \a idx2 from two-dimensional register
++ * array or array variable \a areg
++ * \note "parameter" \a areg could be an (32bit) array or a pointer to the
++ * first array element \n
++ * each two-dimensional array (specified as "parameter" \a name) requires
++ * four \#defines:
++ * - <tt>\<name\>_ARR_SIZE1 </tt>: number of elements in first dimension
++ * - <tt>\<name\>_ARR_SIZE2 </tt>: number of elements in second dimension
++ * - <tt>\<name\>_ARR_OFS1 </tt>: offset between two consecutive
++ * elements in first dimension
++ * - <tt>\<name\>_ARR_OFS2 </tt>: offset between two consecutive
++ * elements in second dimension
++ */
++#define REG_GET_ARRAY_ELEM2(areg, name, idx1, idx2) \
++(((idx1 < name##_ARR_SIZE1) && (idx2 < name##_ARR_SIZE2)) \
++? areg[(idx1 * name##_ARR_OFS1) + (idx2 * name##_ARR_OFS2)] \
++: 0)
++
++
++/*
++ * writes the value \a value into element \a idx1, \a idx2 of two-dimensional
++ * register array or array variable \a areg
++ * \note "parameter" \a areg could be an (32bit) array or a pointer to the
++ * first array element \n
++ * each two-dimensional array (specified as "parameter" \a name) requires
++ * four \#defines:
++ * - <tt>\<name\>_ARR_SIZE1 </tt>: number of elements in first dimension
++ * - <tt>\<name\>_ARR_SIZE2 </tt>: number of elements in second dimension
++ * - <tt>\<name\>_ARR_OFS1 </tt>: offset between two consecutive
++ * elements in first dimension
++ * - <tt>\<name\>_ARR_OFS2 </tt>: offset between two consecutive
++ * elements in second dimension
++ */
++#define REG_SET_ARRAY_ELEM2(areg, name, idx1, idx2, value) \
++(((idx1 < name##_ARR_SIZE1) && (idx2 < name##_ARR_SIZE2)) \
++? areg[(idx1 * name##_ARR_OFS1) + (idx2 * name##_ARR_OFS2)] = value \
++: 0)
++
++/* _REG_ACCESS_H */
++#endif
+--- /dev/null
++++ b/drivers/staging/mrstci/mrstisp/mrstisp_dp.c
+@@ -0,0 +1,1303 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * Copyright (c) Silicon Image 2008 www.siliconimage.com
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#include "mrstisp_stdinc.h"
++
++extern u16 scaler_coffs_noncosited;
++
++/* mask for all chroma subsampling settings */
++#define CI_ISP_DPD_CSS_MASK (CI_ISP_DPD_CSS_H_MASK | CI_ISP_DPD_CSS_V_MASK)
++
++#define SCALER_COFFS_NONCOSITED (scaler_coffs_noncosited)
++#define FIXEDPOINT_ONE 0x1000
++
++/* limitations of main and self scaler */
++#define MAIN_SCALER_WIDTH_MAX 2600
++
++#define SELF_SCALER_WIDTH_MAX 640
++#define SCALER_MIN 16
++
++#define SELF_UPSCALE_FACTOR_MAX 5
++
++#define MAIN_UPSCALE_FACTOR_MAX 5
++
++/*
++ * upscale lookup table for smooth edges
++ * (linear interpolation between pixels)
++ */
++
++/* smooth edges */
++static const struct ci_isp_rsz_lut isp_rsz_lut_smooth_lin = {
++ {
++ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
++ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
++ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
++ 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F,
++ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
++ 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F,
++ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
++ 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0x3E, 0x3F
++ }
++};
++
++/*
++ * upscale lookup table for sharp edges
++ * (no interpolation, just duplicate pixels)
++ */
++
++/* sharp edges */
++static const struct ci_isp_rsz_lut isp_rsz_lut_sharp = {
++ {
++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
++ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
++ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
++ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F
++ }
++};
++
++/* structure combining virtual ISP windows settings */
++struct ci_isp_virtual_isp_wnds {
++ struct ci_isp_window wnd_blacklines;
++ struct ci_isp_window wnd_zoom_crop;
++};
++
++/* static storage to remember last applied virtual ISP window settings */
++static struct ci_isp_virtual_isp_wnds last_isp_wnds;
++
++/*
++ * Calculates the value to program into the struct ci_isp_scale or
++ * tsMrvSScale structures to scale from in pixels to out pixels.
++ *
++ * The formulas are taken from the MARVIN / MARVIN3PLUS user
++ * manuals (fixed-point calculation using 32 bit during
++ * processing, will overflow at an output size of 1048575 pixels).
++ */
++static u32 ci_get_scale_reg(u16 in, u16 out)
++{
++ if (in > out) {
++ /* downscaling */
++ return (u32) (((((u32) out - 1) * RSZ_SCALER_BYPASS) /
++ (u32) (in - 1)) + 1);
++ } else if (in < out) {
++ /* upscaling */
++ return (u32) (((((u32) in - 1) * RSZ_SCALER_BYPASS) /
++ (u32) (out - 1)) | (u32) RSZ_UPSCALE_ENABLE);
++ }
++
++ /* no scaling */
++ return RSZ_SCALER_BYPASS;
++}
++
++/*
++ * Calculates the values of the ci_isp_scale structure for the
++ * given input size and data path descriptor.
++ */
++static u32 ci_calc_scale_factors(const struct ci_isp_datapath_desc *source,
++ const struct ci_isp_datapath_desc *path,
++ struct ci_isp_scale *scale, int implementation)
++{
++ u32 scaler_output_format;
++ u32 cssflags;
++ u32 scaler_input_format;
++
++ u16 chroma_in_w;
++ u16 chroma_in_h;
++ u16 chroma_out_wcr;
++ u16 chroma_out_wcb;
++ u16 chroma_out_h;
++
++ memset(scale, 0, sizeof(struct ci_isp_scale));
++ dprintk(1, "srcw %d, srch %d;", source->out_w, source->out_h);
++ dprintk(1, "dstw %d, dsth %d", path->out_w, path->out_h);
++
++ /* calculate Y scale factors */
++ scale->scale_hy = ci_get_scale_reg(source->out_w, path->out_w);
++ scale->scale_vy = ci_get_scale_reg(source->out_h, path->out_h);
++
++ /* figure out the color input format of the scaler */
++ switch (path->flags & CI_ISP_DPD_MODE_MASK) {
++ case CI_ISP_DPD_MODE_DMAYC_DIRECT:
++ case CI_ISP_DPD_MODE_DMAYC_ISP:
++ case CI_ISP_DPD_MODE_DMAJPEG_DIRECT:
++ case CI_ISP_DPD_MODE_DMAJPEG_ISP:
++ /* DMA-read originated data */
++ scaler_input_format = path->flags & CI_ISP_DPD_DMA_IN_MASK;
++ break;
++ default:
++ /* ISP originated data */
++ scaler_input_format = CI_ISP_DPD_DMA_IN_422;
++ break;
++ }
++
++ dprintk(1, "scaler_input_format is 0x%x", scaler_input_format);
++
++ switch (scaler_input_format) {
++ case CI_ISP_DPD_DMA_IN_422:
++ chroma_in_w = source->out_w / 2;
++ chroma_in_h = source->out_h;
++ chroma_out_wcr = path->out_w / 2;
++ chroma_out_wcb = (path->out_w + 1) / 2;
++ chroma_out_h = path->out_h;
++ break;
++ case CI_ISP_DPD_DMA_IN_420:
++ chroma_in_w = source->out_w / 2;
++ chroma_in_h = source->out_h / 2;
++ chroma_out_wcr = path->out_w / 2;
++ chroma_out_wcb = (path->out_w + 1) / 2;
++ chroma_out_h = path->out_h / 2;
++ break;
++ case CI_ISP_DPD_DMA_IN_411:
++ chroma_in_w = source->out_w / 4;
++ chroma_in_h = source->out_h;
++ chroma_out_wcr = path->out_w / 4;
++ chroma_out_wcb = (path->out_w + 2) / 4;
++ chroma_out_h = path->out_h;
++ break;
++ case CI_ISP_DPD_DMA_IN_444:
++ default:
++ chroma_in_w = source->out_w;
++ chroma_in_h = source->out_h;
++ chroma_out_wcb = chroma_out_wcr = path->out_w;
++ chroma_out_h = path->out_h;
++ break;
++ }
++
++ /* calculate chrominance scale factors */
++ switch (path->flags & CI_ISP_DPD_CSS_H_MASK) {
++ case CI_ISP_DPD_CSS_H2:
++ chroma_out_wcb /= 2;
++ chroma_out_wcr /= 2;
++ break;
++ case CI_ISP_DPD_CSS_H4:
++ chroma_out_wcb /= 4;
++ chroma_out_wcr /= 4;
++ break;
++ case CI_ISP_DPD_CSS_HUP2:
++ chroma_out_wcb *= 2;
++ chroma_out_wcr *= 2;
++ break;
++ case CI_ISP_DPD_CSS_HUP4:
++ chroma_out_wcb *= 4;
++ chroma_out_wcr *= 4;
++ break;
++ default:
++ /*leave chroma_out_w untouched*/
++ break;
++ }
++
++ scale->scale_hcr = ci_get_scale_reg(chroma_in_w, chroma_out_wcr);
++ scale->scale_hcb = ci_get_scale_reg(chroma_in_w, chroma_out_wcb);
++ scale->scale_hcb = scale->scale_hcr;
++
++ switch (path->flags & CI_ISP_DPD_CSS_V_MASK) {
++ case CI_ISP_DPD_CSS_V2:
++ chroma_out_h /= 2;
++ break;
++ case CI_ISP_DPD_CSS_V4:
++ chroma_out_h /= 4;
++ break;
++ case CI_ISP_DPD_CSS_VUP2:
++ chroma_out_h *= 2;
++ break;
++ case CI_ISP_DPD_CSS_VUP4:
++ chroma_out_h *= 4;
++ break;
++ default:
++ /* leave chroma_out_h untouched */
++ break;
++ }
++
++ scale->scale_vc = ci_get_scale_reg(chroma_in_h, chroma_out_h);
++
++ /* additional chrominance phase shifts */
++ if (path->flags & CI_ISP_DPD_CSS_HSHIFT)
++ scale->phase_hc = SCALER_COFFS_NONCOSITED;
++ if (path->flags & CI_ISP_DPD_CSS_VSHIFT)
++ scale->phase_vc = SCALER_COFFS_NONCOSITED;
++
++ /* additional luminance phase shifts */
++ if (path->flags & CI_ISP_DPD_LUMA_HSHIFT)
++ scale->phase_hy = SCALER_COFFS_NONCOSITED;
++ if (path->flags & CI_ISP_DPD_LUMA_VSHIFT)
++ scale->phase_vy = SCALER_COFFS_NONCOSITED;
++
++ /* try to figure out the outcoming YCbCr format */
++ cssflags = path->flags & CI_ISP_DPD_CSS_MASK;
++ if (cssflags == (CI_ISP_DPD_CSS_H_OFF | CI_ISP_DPD_CSS_V_OFF)) {
++ /* trivial case: the output format is not changed */
++ scaler_output_format = scaler_input_format;
++ } else {
++ /* output format gets changed by the scaler setting */
++ /* assume invalid format by default */
++ scaler_output_format = (u32) (-1);
++ switch (scaler_input_format) {
++ case CI_ISP_DPD_DMA_IN_444:
++ if (cssflags == (CI_ISP_DPD_CSS_H2
++ | CI_ISP_DPD_CSS_V_OFF)) {
++ /* conversion 444 -> 422 */
++ scaler_output_format = CI_ISP_DPD_DMA_IN_422;
++ } else if (cssflags == (CI_ISP_DPD_CSS_H4
++ | CI_ISP_DPD_CSS_V_OFF)) {
++ /* conversion 444 -> 411 */
++ scaler_output_format = CI_ISP_DPD_DMA_IN_411;
++ } else if (cssflags == (CI_ISP_DPD_CSS_H2
++ | CI_ISP_DPD_CSS_V2)) {
++ /* conversion 444 -> 420 */
++ scaler_output_format = CI_ISP_DPD_DMA_IN_420;
++ }
++ break;
++
++ case CI_ISP_DPD_DMA_IN_422:
++ if (cssflags == (CI_ISP_DPD_CSS_HUP2
++ | CI_ISP_DPD_CSS_V_OFF)) {
++ /* conversion 422 -> 444 */
++ scaler_output_format = CI_ISP_DPD_DMA_IN_444;
++ } else if (cssflags == (CI_ISP_DPD_CSS_H2
++ | CI_ISP_DPD_CSS_V_OFF)) {
++ /* conversion 422 -> 411 */
++ scaler_output_format = CI_ISP_DPD_DMA_IN_411;
++ } else if (cssflags == (CI_ISP_DPD_CSS_H_OFF
++ | CI_ISP_DPD_CSS_V2)) {
++ /* conversion 422 -> 420 */
++ scaler_output_format = CI_ISP_DPD_DMA_IN_420;
++ }
++ break;
++
++ case CI_ISP_DPD_DMA_IN_420:
++ if (cssflags == (CI_ISP_DPD_CSS_HUP2
++ | CI_ISP_DPD_CSS_VUP2)) {
++ /* conversion 420 -> 444 */
++ scaler_output_format = CI_ISP_DPD_DMA_IN_444;
++ } else if (cssflags == (CI_ISP_DPD_CSS_H2
++ | CI_ISP_DPD_CSS_VUP2)) {
++ /* conversion 420 -> 411 */
++ scaler_output_format = CI_ISP_DPD_DMA_IN_411;
++ } else if (cssflags == (CI_ISP_DPD_CSS_H_OFF
++ | CI_ISP_DPD_CSS_VUP2)) {
++ /* conversion 420 -> 422 */
++ scaler_output_format = CI_ISP_DPD_DMA_IN_422;
++ }
++ break;
++
++ case CI_ISP_DPD_DMA_IN_411:
++ if (cssflags == (CI_ISP_DPD_CSS_HUP4
++ | CI_ISP_DPD_CSS_V_OFF)) {
++ /* conversion 411 -> 444 */
++ scaler_output_format = CI_ISP_DPD_DMA_IN_444;
++ } else if (cssflags == (CI_ISP_DPD_CSS_HUP2
++ | CI_ISP_DPD_CSS_V_OFF)) {
++ /* conversion 411 -> 422 */
++ scaler_output_format = CI_ISP_DPD_DMA_IN_422;
++ } else if (cssflags == (CI_ISP_DPD_CSS_HUP2
++ | CI_ISP_DPD_CSS_V2)) {
++ /* conversion 411 -> 420 */
++ scaler_output_format = CI_ISP_DPD_DMA_IN_420;
++ }
++ break;
++
++ default:
++ /* DMA input format not supported */
++ break;
++ }
++ }
++
++ return scaler_output_format;
++}
++
++/*
++ * Returns the address of up-scaling lookup table to use for
++ * the given data path flags.
++ */
++static const struct ci_isp_rsz_lut *ci_get_rsz_lut(u32 flags)
++{
++ const struct ci_isp_rsz_lut *ret_val;
++ switch (flags & CI_ISP_DPD_UPSCALE_MASK) {
++ case CI_ISP_DPD_UPSCALE_SHARP:
++ ret_val = &isp_rsz_lut_sharp;
++ break;
++ default:
++ ret_val = &isp_rsz_lut_smooth_lin;
++ break;
++ }
++ return ret_val;
++}
++
++/*
++ * Fills in scale factors and MI configuration for the main path.
++ * Note that only self path related settings will be written into
++ * the MI configuration struct, so this routine can be used for
++ * both ISP and DMA originated data path setups.
++ *
++ * Following fields are being filled in:
++ * scale_main: [all fields]
++ * mrv_mi_ctrl: mrv_mif_mp_pic_form main_path
++ */
++static int ci_calc_main_path_settings(const struct ci_isp_datapath_desc *source,
++ const struct ci_isp_datapath_desc *main,
++ struct ci_isp_scale *scale_main,
++ struct ci_isp_mi_ctrl *mrv_mi_ctrl)
++{
++ u32 main_flag;
++
++ WARN_ON(!(source != NULL));
++ WARN_ON(!(scale_main != NULL));
++ WARN_ON(!(mrv_mi_ctrl != NULL));
++
++ /* assume datapath deactivation if no selfpath pointer is given) */
++ if (main)
++ main_flag = main->flags;
++ else
++ main_flag = 0;
++
++ /* initialize the given parameters */
++ memset(scale_main, 0, sizeof(struct ci_isp_scale));
++ scale_main->scale_hy = RSZ_SCALER_BYPASS;
++ scale_main->scale_hcb = RSZ_SCALER_BYPASS;
++ scale_main->scale_hcr = RSZ_SCALER_BYPASS;
++ scale_main->scale_vy = RSZ_SCALER_BYPASS;
++ scale_main->scale_vc = RSZ_SCALER_BYPASS;
++
++ if (main_flag & CI_ISP_DPD_ENABLE) {
++ switch (main_flag & CI_ISP_DPD_MODE_MASK) {
++ case CI_ISP_DPD_MODE_ISPYC:
++ case CI_ISP_DPD_MODE_DMAYC_ISP:
++ mrv_mi_ctrl->main_path = CI_ISP_PATH_ON;
++ break;
++ case CI_ISP_DPD_MODE_ISPJPEG:
++ case CI_ISP_DPD_MODE_DMAJPEG_DIRECT:
++ case CI_ISP_DPD_MODE_DMAJPEG_ISP:
++ mrv_mi_ctrl->main_path = CI_ISP_PATH_JPE;
++ break;
++ case CI_ISP_DPD_MODE_ISPRAW:
++ mrv_mi_ctrl->main_path = CI_ISP_PATH_RAW8;
++ break;
++ case CI_ISP_DPD_MODE_ISPRAW_16B:
++ mrv_mi_ctrl->main_path = CI_ISP_PATH_RAW816;
++ break;
++ default:
++ eprintk("unsupported mode for main path");
++ return CI_STATUS_NOTSUPP;
++ }
++ if (main_flag & (CI_ISP_DPD_H_FLIP | CI_ISP_DPD_V_FLIP |
++ CI_ISP_DPD_90DEG_CCW)) {
++ eprintk("not supported for main path");
++ return CI_STATUS_NOTSUPP;
++ }
++ if (main_flag & CI_ISP_DPD_NORESIZE) {
++ if (main_flag & CI_ISP_DPD_CSS_MASK) {
++ eprintk("main path needs rezizer");
++ return CI_STATUS_NOTSUPP;
++ }
++ if (main_flag &
++ (CI_ISP_DPD_LUMA_HSHIFT | CI_ISP_DPD_LUMA_VSHIFT)) {
++ eprintk("main path needs rezizer");
++ return CI_STATUS_NOTSUPP;
++ }
++ } else {
++ if ((mrv_mi_ctrl->main_path == CI_ISP_PATH_RAW8)
++ || (mrv_mi_ctrl->main_path == CI_ISP_PATH_RAW8)) {
++ eprintk("scaler not in RAW mode");
++ return CI_STATUS_NOTSUPP;
++ }
++ /* changed to avoid LINT warnings (Warning 613) */
++ if (main != NULL) {
++ if ((((u32) (source->out_w) *
++ MAIN_UPSCALE_FACTOR_MAX) < main->out_w)
++ ||
++ (((u32) (source->out_h) *
++ MAIN_UPSCALE_FACTOR_MAX) <
++ main->out_h)) {
++ eprintk("main upscaling exceeded");
++ return CI_STATUS_NOTSUPP;
++ }
++ if ((main->out_w >
++ MAIN_SCALER_WIDTH_MAX)
++ || (main->out_w < SCALER_MIN)
++ || (main->out_h < SCALER_MIN)) {
++ eprintk("main scaler ange exceeded");
++ return CI_STATUS_NOTSUPP;
++ }
++ } else {
++ WARN_ON(main == NULL);
++ }
++
++ if (source->out_w & 0x01) {
++ eprintk("input width must be even!");
++ return CI_STATUS_NOTSUPP;
++ }
++
++ /* calculate scale factors. */
++ (void)ci_calc_scale_factors(source, main, scale_main,
++ MARVIN_FEATURE_MSCALE_FACTORCALC);
++ }
++ } else {
++ mrv_mi_ctrl->main_path = CI_ISP_PATH_OFF;
++ }
++
++ /* hardcoded MI settings */
++ dprintk(1, "main_flag is 0x%x", main_flag);
++ if (main_flag & CI_ISP_DPD_HWRGB_MASK) {
++ switch (main_flag & CI_ISP_DPD_HWRGB_MASK) {
++ case CI_ISP_DPD_YUV_420:
++ case CI_ISP_DPD_YUV_422:
++ mrv_mi_ctrl->mrv_mif_mp_pic_form =
++ CI_ISP_MIF_PIC_FORM_PLANAR;
++ break;
++ case CI_ISP_DPD_YUV_NV12:
++ mrv_mi_ctrl->mrv_mif_mp_pic_form =
++ CI_ISP_MIF_PIC_FORM_SEMI_PLANAR;
++ break;
++ case CI_ISP_DPD_YUV_YUYV:
++ mrv_mi_ctrl->mrv_mif_mp_pic_form =
++ CI_ISP_MIF_PIC_FORM_INTERLEAVED;
++ break;
++ default:
++ mrv_mi_ctrl->mrv_mif_mp_pic_form =
++ CI_ISP_MIF_PIC_FORM_PLANAR;
++ }
++ }
++
++ return CI_STATUS_SUCCESS;
++}
++
++/*
++ * Fills in scale factors and MI configuration for the self
++ * path. Note that only self path related settings will be written into
++ * the MI config struct, so this routine can be used for both ISP and DMA
++ * originated datapath setups.
++ *
++ * Following fields are being filled in:
++ * scale_flag :
++ * [all fields]
++ * mrv_mi_ctrl :
++ * mrv_mif_sp_out_form
++ * mrv_mif_sp_in_form
++ * mrv_mif_sp_pic_form
++ * mrv_mif_sp_mode
++ * self_path
++ */
++static int ci_calc_self_path_settings(const struct ci_isp_datapath_desc *source,
++ const struct ci_isp_datapath_desc *self,
++ struct ci_isp_scale *scale_flag,
++ struct ci_isp_mi_ctrl *mrv_mi_ctrl)
++{
++ u32 scaler_out_col_format;
++ u32 self_flag;
++
++ WARN_ON(!(source != NULL));
++ WARN_ON(!(scale_flag != NULL));
++ WARN_ON(!(mrv_mi_ctrl != NULL));
++
++ /* assume datapath deactivation if no selfpath pointer is given) */
++ if (self)
++ self_flag = self->flags;
++ else
++ self_flag = 0;
++
++ /* initialize the given parameters */
++ memset(scale_flag, 0, sizeof(struct ci_isp_scale));
++ scale_flag->scale_hy = RSZ_SCALER_BYPASS;
++ scale_flag->scale_hcb = RSZ_SCALER_BYPASS;
++ scale_flag->scale_hcr = RSZ_SCALER_BYPASS;
++ scale_flag->scale_vy = RSZ_SCALER_BYPASS;
++ scale_flag->scale_vc = RSZ_SCALER_BYPASS;
++
++ if (self_flag & CI_ISP_DPD_ENABLE) {
++
++ switch (self_flag & CI_ISP_DPD_MODE_MASK) {
++ case CI_ISP_DPD_MODE_ISPYC:
++ mrv_mi_ctrl->self_path = CI_ISP_PATH_ON;
++ scaler_out_col_format = CI_ISP_DPD_DMA_IN_422;
++ break;
++ case CI_ISP_DPD_MODE_DMAYC_ISP:
++ case CI_ISP_DPD_MODE_DMAYC_DIRECT:
++ mrv_mi_ctrl->self_path = CI_ISP_PATH_ON;
++ scaler_out_col_format =
++ self_flag & CI_ISP_DPD_DMA_IN_MASK;
++ break;
++ default:
++ eprintk("unsupported mode for self path");
++ return CI_STATUS_NOTSUPP;
++ }
++
++ if (self_flag & CI_ISP_DPD_NORESIZE) {
++ if (self_flag & CI_ISP_DPD_CSS_MASK) {
++ eprintk("in self path needs rezizer");
++ return CI_STATUS_NOTSUPP;
++ }
++ if (self_flag &
++ (CI_ISP_DPD_LUMA_HSHIFT | CI_ISP_DPD_LUMA_VSHIFT)) {
++ eprintk("n self path needs rezizer");
++ return CI_STATUS_NOTSUPP;
++ }
++ /* changed to avoid LINT warnings (Warning 613) */
++ if (self != NULL) {
++ if ((source->out_w != self->out_w) ||
++ (source->out_h != self->out_h)) {
++ eprintk("sizes needs resizer");
++ return CI_STATUS_NOTSUPP;
++ }
++ } else {
++ WARN_ON(self == NULL);
++ }
++ } else {
++ /* changed to avoid LINT warnings (Warning 613) */
++ if (self != NULL) {
++ /* upscaling only to factor
++ * SELF_UPSCALE_FACTOR_MAX possible
++ */
++ if ((((u32) (source->out_w) *
++ SELF_UPSCALE_FACTOR_MAX) <
++ self->out_w)
++ ||
++ (((u32) (source->out_h) *
++ SELF_UPSCALE_FACTOR_MAX) <
++ self->out_h)) {
++ eprintk("apability exceeded");
++ return CI_STATUS_NOTSUPP;
++ }
++ if ((self->out_w >
++ SELF_SCALER_WIDTH_MAX)
++ || (self->out_w < SCALER_MIN)
++ || (self->out_h < SCALER_MIN)) {
++ eprintk("out range exceeded");
++ return CI_STATUS_NOTSUPP;
++ }
++ } else {
++ WARN_ON(self == NULL);
++ }
++ /* Remember that the input picture width should be
++ * even if the scaler is used */
++
++ /* (otherwise the scaler may show unexpected
++ * behaviour in some rare cases) */
++ if (source->out_w & 0x01) {
++ eprintk("width must be even!");
++ return CI_STATUS_NOTSUPP;
++ }
++
++ /* calculate scale factors. */
++ scaler_out_col_format =
++ ci_calc_scale_factors(source, self, scale_flag,
++ MARVIN_FEATURE_SSCALE_FACTORCALC);
++ }
++
++ dprintk(2, "step1");
++ /* figure out the input format setting */
++ switch (scaler_out_col_format) {
++ case CI_ISP_DPD_DMA_IN_444:
++ mrv_mi_ctrl->mrv_mif_sp_in_form =
++ CI_ISP_MIF_COL_FORMAT_YCBCR_444;
++ break;
++ case CI_ISP_DPD_DMA_IN_422:
++ mrv_mi_ctrl->mrv_mif_sp_in_form =
++ CI_ISP_MIF_COL_FORMAT_YCBCR_422;
++ break;
++ case CI_ISP_DPD_DMA_IN_420:
++ mrv_mi_ctrl->mrv_mif_sp_in_form =
++ CI_ISP_MIF_COL_FORMAT_YCBCR_420;
++ break;
++ /* no break, does not seem to be supported by HW */
++ case CI_ISP_DPD_DMA_IN_411:
++ default:
++ eprintk("input color format not supported");
++ return CI_STATUS_NOTSUPP;
++ }
++
++ /* figure out the output format setting */
++ dprintk(2, "step2, self_flag is 0x%x", self_flag);
++
++ switch (self_flag & CI_ISP_DPD_HWRGB_MASK) {
++ case CI_ISP_DPD_HWRGB_565:
++ mrv_mi_ctrl->mrv_mif_sp_out_form =
++ CI_ISP_MIF_COL_FORMAT_RGB_565;
++ mrv_mi_ctrl->mrv_mif_sp_pic_form =
++ CI_ISP_MIF_PIC_FORM_PLANAR;
++ break;
++ case CI_ISP_DPD_HWRGB_666:
++ mrv_mi_ctrl->mrv_mif_sp_out_form =
++ CI_ISP_MIF_COL_FORMAT_RGB_666;
++ mrv_mi_ctrl->mrv_mif_sp_pic_form =
++ CI_ISP_MIF_PIC_FORM_PLANAR;
++ break;
++ case CI_ISP_DPD_HWRGB_888:
++ mrv_mi_ctrl->mrv_mif_sp_out_form =
++ CI_ISP_MIF_COL_FORMAT_RGB_888;
++ mrv_mi_ctrl->mrv_mif_sp_pic_form =
++ CI_ISP_MIF_PIC_FORM_PLANAR;
++ break;
++ case CI_ISP_DPD_YUV_420:
++ mrv_mi_ctrl->mrv_mif_sp_pic_form =
++ CI_ISP_MIF_PIC_FORM_PLANAR;
++ mrv_mi_ctrl->mrv_mif_sp_out_form =
++ CI_ISP_MIF_COL_FORMAT_YCBCR_420;
++ break;
++ case CI_ISP_DPD_YUV_422:
++ mrv_mi_ctrl->mrv_mif_sp_pic_form =
++ CI_ISP_MIF_PIC_FORM_PLANAR;
++ mrv_mi_ctrl->mrv_mif_sp_out_form =
++ CI_ISP_MIF_COL_FORMAT_YCBCR_422;
++ break;
++ case CI_ISP_DPD_YUV_NV12:
++ mrv_mi_ctrl->mrv_mif_sp_pic_form =
++ CI_ISP_MIF_PIC_FORM_SEMI_PLANAR;
++ mrv_mi_ctrl->mrv_mif_sp_out_form =
++ CI_ISP_MIF_COL_FORMAT_YCBCR_420;
++ break;
++ case CI_ISP_DPD_YUV_YUYV:
++ mrv_mi_ctrl->mrv_mif_sp_pic_form =
++ CI_ISP_MIF_PIC_FORM_INTERLEAVED;
++ mrv_mi_ctrl->mrv_mif_sp_out_form =
++ CI_ISP_MIF_COL_FORMAT_YCBCR_422;
++ break;
++
++ case CI_ISP_DPD_HWRGB_OFF:
++ mrv_mi_ctrl->mrv_mif_sp_out_form =
++ mrv_mi_ctrl->mrv_mif_sp_in_form;
++ mrv_mi_ctrl->mrv_mif_sp_pic_form =
++ CI_ISP_MIF_PIC_FORM_PLANAR;
++ break;
++ default:
++ eprintk("output color format not supported");
++ return CI_STATUS_NOTSUPP;
++ }
++
++ /* picture flipping / rotation */
++ dprintk(2, "step3");
++
++ switch (self_flag &
++ (CI_ISP_DPD_90DEG_CCW | CI_ISP_DPD_V_FLIP |
++ CI_ISP_DPD_H_FLIP)) {
++ case (CI_ISP_DPD_H_FLIP):
++ mrv_mi_ctrl->mrv_mif_sp_mode =
++ CI_ISP_MIF_SP_HORIZONTAL_FLIP;
++ break;
++ case (CI_ISP_DPD_V_FLIP):
++ mrv_mi_ctrl->mrv_mif_sp_mode =
++ CI_ISP_MIF_SP_VERTICAL_FLIP;
++ break;
++ case (CI_ISP_DPD_V_FLIP | CI_ISP_DPD_H_FLIP):
++ mrv_mi_ctrl->mrv_mif_sp_mode =
++ CI_ISP_MIF_SP_ROTATION_180_DEG;
++ break;
++ case (CI_ISP_DPD_90DEG_CCW):
++ mrv_mi_ctrl->mrv_mif_sp_mode =
++ CI_ISP_MIF_SP_ROTATION_090_DEG;
++ break;
++ case (CI_ISP_DPD_90DEG_CCW | CI_ISP_DPD_H_FLIP):
++ mrv_mi_ctrl->mrv_mif_sp_mode =
++ CI_ISP_MIF_SP_ROT_270_V_FLIP;
++ break;
++ case (CI_ISP_DPD_90DEG_CCW | CI_ISP_DPD_V_FLIP):
++ mrv_mi_ctrl->mrv_mif_sp_mode =
++ CI_ISP_MIF_SP_ROT_090_V_FLIP;
++ break;
++ case (CI_ISP_DPD_90DEG_CCW | CI_ISP_DPD_V_FLIP |
++ CI_ISP_DPD_H_FLIP):
++ mrv_mi_ctrl->mrv_mif_sp_mode =
++ CI_ISP_MIF_SP_ROTATION_270_DEG;
++ break;
++ default:
++ mrv_mi_ctrl->mrv_mif_sp_mode = CI_ISP_MIF_SP_ORIGINAL;
++ break;
++ }
++
++ } else {
++ mrv_mi_ctrl->self_path = CI_ISP_PATH_OFF;
++ }
++
++ dprintk(2, "step4");
++ /*mrv_mi_ctrl->mrv_mif_sp_pic_form = CI_ISP_MIF_PIC_FORM_PLANAR;*/
++
++ return CI_STATUS_SUCCESS;
++}
++
++/*
++ * Translates the given memory interface configuration struct
++ * into appropriate values to program the data path multiplexers.
++ */
++static int ci_calc_dp_mux_settings(const struct ci_isp_mi_ctrl *mi_ctrl,
++ enum ci_isp_ycs_chn_mode *peYcsChnMode,
++ enum ci_isp_dp_switch *peDpSwitch)
++{
++ switch (mi_ctrl->main_path) {
++ case CI_ISP_PATH_RAW8:
++ case CI_ISP_PATH_RAW816:
++ *peDpSwitch = CI_ISP_DP_RAW;
++ *peYcsChnMode = CI_ISP_YCS_MVRaw;
++ if (mi_ctrl->self_path != CI_ISP_PATH_OFF) {
++ eprintk("ombined with RAW mode of main path");
++ return CI_STATUS_NOTSUPP;
++ }
++ break;
++
++ case CI_ISP_PATH_JPE:
++ *peDpSwitch = CI_ISP_DP_JPEG;
++ if (mi_ctrl->self_path != CI_ISP_PATH_OFF)
++ *peYcsChnMode = CI_ISP_YCS_MV_SP;
++ else
++ *peYcsChnMode = CI_ISP_YCS_MV;
++ break;
++
++ case CI_ISP_PATH_ON:
++ *peDpSwitch = CI_ISP_DP_MV;
++ if (mi_ctrl->self_path != CI_ISP_PATH_OFF)
++ *peYcsChnMode = CI_ISP_YCS_MV_SP;
++ else
++ *peYcsChnMode = CI_ISP_YCS_MV;
++ break;
++
++ case CI_ISP_PATH_OFF:
++ *peDpSwitch = CI_ISP_DP_MV;
++ if (mi_ctrl->self_path != CI_ISP_PATH_OFF)
++ *peYcsChnMode = CI_ISP_YCS_SP;
++ else
++ *peYcsChnMode = CI_ISP_YCS_OFF;
++ break;
++
++ default:
++ return CI_STATUS_NOTSUPP;
++ }
++
++ return CI_STATUS_SUCCESS;
++}
++
++/* the windows to cut away black pixels and to zoom/crop the */
++#define ISPWND_COMBINE_WNDS 0x00000001
++/* image must be combined before they are applyed to the marvin registers */
++/* call of the ci_isp_set_output_formatter() routine necessary */
++#define ISPWND_APPLY_OUTFORM 0x00000002
++/* call of the ci_isp_is_set_config() routine necessary */
++#define ISPWND_APPLY_ISCONF 0x00000004
++/* no cropping supported at all */
++#define ISPWND_NO_CROPPING 0x00000008
++
++/*
++ * Returns information about how to combine black pixel and
++ * zoom/crop windows for programming the ISP output formatter and the image
++ * stabilization unit for the given marvin derivative and ISP path.
++ */
++static u32 ci_get_isp_wnd_style(enum ci_isp_path isp_path)
++{
++ u32 res = 0;
++
++ /* output formatter exists at ISP input */
++ /* image stabilization in both bayer and YCbCr paths */
++ if ((isp_path == CI_ISP_PATH_BAYER) ||
++ (isp_path == CI_ISP_PATH_YCBCR))
++ /*we need to program the output formatter with the blackline
++ * window and */
++ res = ISPWND_APPLY_OUTFORM | ISPWND_APPLY_ISCONF;
++ else
++ res = ISPWND_COMBINE_WNDS | ISPWND_APPLY_OUTFORM;
++
++ return res;
++}
++
++/*
++ * the given windows for cutting away blacklines coming from
++ * the image sensor and further cropping of the image for other
++ * purposes like e.g. digital zoom to the output formatter and/or
++ * image stabilisation modules of Marvins ISP.
++ */
++static int ci_set_isp_windows(const struct ci_sensor_config *isi_sensor_config,
++ const struct ci_isp_window *wnd_blackline,
++ const struct ci_isp_window *wnd_zoom_crop)
++{
++ struct ci_isp_window wnd_out_form;
++ struct ci_isp_is_config is_conf;
++ enum ci_isp_path isp_path;
++ u32 wnd_style;
++
++ memset(&wnd_out_form, 0, sizeof(wnd_out_form));
++ memset(&is_conf, 0, sizeof(is_conf));
++
++ /*
++ * figure out the path through the ISP to process the data from the
++ * image sensor
++ */
++ isp_path = ci_isp_select_path(isi_sensor_config, NULL);
++ if (isp_path == CI_ISP_PATH_UNKNOWN) {
++ eprintk("detect marvin ISP path to use");
++ return CI_STATUS_NOTSUPP;
++ }
++
++ /*
++ * get the recommended way to configure output formatter and/or
++ * image stabilization
++ */
++ wnd_style = ci_get_isp_wnd_style(isp_path);
++ if (wnd_style & ISPWND_NO_CROPPING) {
++ /*
++ * cropping not possible -> make sure that it is *not*
++ * supposed to be used
++ */
++ u16 isiX;
++ u16 isiY;
++ /* changed to avoid LINT warnings (Warning 534) */
++ (void)ci_sensor_res2size(isi_sensor_config->res, &isiX, &isiY);
++ if ((wnd_zoom_crop->hsize != isiX)
++ || (wnd_zoom_crop->vsize != isiY)
++ || (wnd_zoom_crop->hoffs != 0)
++ || (wnd_zoom_crop->voffs != 0)) {
++ eprintk("in selected ISP data path");
++ return CI_STATUS_NOTSUPP;
++ }
++ if ((wnd_blackline->hsize != isiX) ||
++ (wnd_blackline->vsize != isiY) ||
++ (wnd_blackline->hoffs != 0) ||
++ (wnd_blackline->voffs != 0)) {
++ eprintk("supported in selected ISP data path");
++ return CI_STATUS_NOTSUPP;
++ }
++ }
++
++ /*
++ * The image stabilization is allowed to move the window in both
++ * directions by the same amount of pixels we have calculated for
++ * the offsets. The initial image stabilization window is equal to
++ * the zoom/crop window
++ */
++ is_conf.max_dx = wnd_zoom_crop->hoffs;
++ is_conf.max_dy = wnd_zoom_crop->voffs;
++ is_conf.mrv_is_window = *wnd_zoom_crop;
++
++ /* combine both blackline and zoom/crop window */
++ if (wnd_style & ISPWND_COMBINE_WNDS) {
++ /* combine both blackline and zoom/crop window */
++ wnd_out_form = *wnd_zoom_crop;
++ wnd_out_form.voffs += wnd_blackline->voffs;
++ wnd_out_form.hoffs += wnd_blackline->hoffs;
++ is_conf.mrv_is_window = wnd_out_form;
++ if (wnd_style & ISPWND_APPLY_OUTFORM) {
++ /*
++ * if the output formatter is to be used, offsets
++ * are cut away there, so
++ * we don't need additional ones in the imags
++ * stabilization unit
++ */
++ is_conf.mrv_is_window.hoffs = 0;
++ is_conf.mrv_is_window.voffs = 0;
++ }
++ } else {
++ /*
++ * do not combine windows --> blacklines done with output
++ * formatter, zoom/cropping done with image stabilization
++ */
++ wnd_out_form = *wnd_blackline;
++ is_conf.mrv_is_window = *wnd_zoom_crop;
++ }
++
++ /* finally, apply the settings to marvin */
++ if (wnd_style & ISPWND_APPLY_OUTFORM) {
++ ci_isp_set_output_formatter(&wnd_out_form,
++ CI_ISP_CFG_UPDATE_IMMEDIATE);
++ }
++ if (wnd_style & ISPWND_APPLY_ISCONF) {
++ int res = ci_isp_is_set_config(&is_conf);
++ if (res != CI_STATUS_SUCCESS) {
++ eprintk("set image stabilization config");
++ return res;
++ }
++ }
++
++ /* success - remember our virtual settings */
++ last_isp_wnds.wnd_blacklines = *wnd_blackline;
++ last_isp_wnds.wnd_zoom_crop = *wnd_zoom_crop;
++
++ return CI_STATUS_SUCCESS;
++}
++
++/* sets extended YCbCr mode */
++static int ci_ext_ycb_cr_mode(const struct ci_isp_datapath_desc *path)
++{
++ u32 main_flag;
++
++ WARN_ON(!(path != NULL));
++
++ /* assume datapath deactivation if no selfpath pointer is given) */
++ if (path)
++ main_flag = path->flags;
++ else
++ main_flag = 0;
++
++ /* if flag CI_ISP_DPD_YCBCREXT is set set extended YCbCr mode */
++ if (main_flag & CI_ISP_DPD_ENABLE) {
++ if (main_flag & CI_ISP_DPD_YCBCREXT)
++ ci_isp_set_ext_ycmode();
++ }
++
++ return CI_STATUS_SUCCESS;
++}
++
++/*
++ * Configures main and self data pathes and scaler for data coming from the ISP.
++ *
++ * Following MARVIN subsystems are programmed:
++ * - ISP output formatter
++ * - Image stabilization module
++ * - YC-Splitter
++ * - Self path DMA-read multiplexer
++ * - Main path multiplexer
++ * - Main & Self path resizer
++ * - Small output unit
++ * - Memory Interface (MI) input source, en/disable and data format
++ *
++ * Following MARVIN subsystems are *NOT* programmed:
++ * - All ISP functionality but the output formatter & image stabilization module
++ * - color Processing block
++ * - JPEG encode subsystem (quantisation tables etc.)
++ * - Memory Interface (MI) output buffer addresses and sizes
++ */
++int ci_datapath_isp(const struct ci_pl_system_config *sys_conf,
++ const struct ci_sensor_config *isi_config,
++ const struct ci_isp_datapath_desc *main,
++ const struct ci_isp_datapath_desc *self, int zoom)
++{
++ int res;
++ /*
++ * copy of flags for main and self path to simplify access (no
++ * pointer de-reference)
++ */
++ u32 main_flag;
++ u32 self_flag;
++ /* resolution from sensor configuration */
++ u16 isiX;
++ u16 isiY;
++ /* things to apply to MARVIN */
++ struct ci_isp_scale scale_main;
++ struct ci_isp_scale scale_flag;
++ enum ci_isp_ycs_chn_mode chn_mode = 0;
++ enum ci_isp_dp_switch dp_switch = 0;
++ struct ci_isp_mi_ctrl mrv_mi_ctrl;
++ struct ci_isp_datapath_desc source;
++ /* ISP windowing because of cutting away blacklines from the sensor */
++ struct ci_isp_window wnd_blackline;
++ /* ISP windowing because of aspect ratio change and/or zoom */
++ struct ci_isp_window wnd_zoom_crop;
++
++ const struct ci_isp_datapath_desc *target = NULL;
++
++ /* assume dapapath deactivation for not provided descriptors */
++ main_flag = 0;
++ self_flag = 0;
++ if (main)
++ main_flag = main->flags; /* 0x012 */
++
++ if (self)
++ self_flag = self->flags; /* 0x10015 */
++
++ /* initialize variables on the stack */
++ res = CI_STATUS_SUCCESS;
++ /* changed to avoid LINT warnings (Warning 534) */
++ (void)ci_sensor_res2size(isi_config->res, &isiX, &isiY);
++ memset(&mrv_mi_ctrl, 0, sizeof(struct ci_isp_mi_ctrl));
++ memset(&wnd_blackline, 0, sizeof(wnd_blackline));
++ memset(&wnd_zoom_crop, 0, sizeof(wnd_zoom_crop));
++
++ /*
++ * ISP Windowing - fill in wnd_out_form, apply_out_form, is_conf and
++ * apply_is_conf
++ */
++
++ /*
++ * by default, size of both blackline and zoom/crop window
++ * is what the camera delivers.
++ */
++
++ /* (no cropping, no offset) */
++ wnd_blackline.hsize = isiX;
++ wnd_blackline.vsize = isiY;
++ wnd_zoom_crop = wnd_blackline;
++
++ /*
++ * check if we have to crop because of aspect ratio
++ * preservement of an
++ */
++
++ /* output channel */
++ if ((main_flag & CI_ISP_DPD_ENABLE) &&
++ (main_flag & CI_ISP_DPD_KEEPRATIO)) {
++ target = main;
++ }
++ if ((self_flag & CI_ISP_DPD_ENABLE) &&
++ (self_flag & CI_ISP_DPD_KEEPRATIO)) {
++ if (target) {
++ eprintk("only allowed for one path");
++ return CI_STATUS_NOTSUPP;
++ }
++ target = self;
++ }
++
++ /* if so, calculate the cropping */
++ if (target) {
++ u32 aspect_cam = (0x1000 * ((u32) isiX)) / isiY;
++ u32 aspect_target = (0x1000 * ((u32) (target->out_w))) /
++ target->out_h;
++ if (aspect_cam < aspect_target) {
++ /*
++ * camera aspect is more 'portrait-like' as
++ * target aspect. We have to crop the
++ * camera picture by cutting off a bit of
++ * the top & bottom changed to avoid LINT
++ * warnings (Info 734)
++ */
++ wnd_zoom_crop.vsize = (u16) (((u32) isiX *
++ (u32) (target->out_h)) / target->out_w);
++ } else {
++ /* camera aspect is more 'landscape-like'
++ * as target aspect. We have to crop the
++ * camera picture by cutting off a bit of
++ * the left and right changed to avoid LINT
++ * warnings (Info 734) */
++ wnd_zoom_crop.hsize = (u16) (((u32) isiY *
++ (u32) (target->out_w)) / target->out_h);
++ }
++ }
++
++ /*
++ * now, we may also want to do digital zoom. If so, we need
++ * to shrink the ISP window by the desired zoom factor.
++ */
++ if (zoom > 0) {
++ /* changed to avoid LINT warnings (Warning 573) */
++ wnd_zoom_crop.vsize = (u16) (((u32) (wnd_zoom_crop.vsize) *
++ 1024) / (1024 + (u32) zoom));
++ /* changed to avoid LINT warnings (Warning 573) */
++ wnd_zoom_crop.hsize = (u16) (((u32) (wnd_zoom_crop.hsize) *
++ 1024) / (1024 + (u32) zoom));
++ }
++ /*
++ * Remember that the output formatter h_size should be
++ * even if the scaler is used
++ * (otherwise the scaler may show unexpected behaviour in
++ * some rare cases)
++ */
++ wnd_zoom_crop.hsize &= ~0x01;
++ /*
++ * At last, we care about the offset of the ISP window. We
++ * want it centered on the image data delivered by the
++ * sensor (not counting possible black lines)
++ */
++ wnd_zoom_crop.hoffs = (isiX - wnd_zoom_crop.hsize) / 2;
++ wnd_zoom_crop.voffs = (isiY - wnd_zoom_crop.vsize) / 2;
++ /*
++ * If the image sensor delivers blacklines, we cut them
++ * away with moving wnd_blackline window by the given
++ * amount of lines
++ */
++ switch (isi_config->bls) {
++ /* no black lines */
++ case SENSOR_BLS_OFF:
++ break;
++ /* two black lines at frame start */
++ case SENSOR_BLS_TWO_LINES:
++ wnd_blackline.voffs += 2;
++ break;
++ /* two black lines at frame start and two at the end */
++ case SENSOR_BLS_FOUR_LINES:
++ wnd_blackline.voffs += 2;
++ break;
++ default:
++ eprintk("config");
++ return CI_STATUS_NOTSUPP;
++ }
++ /*
++ * if we are instructed to show the blacklines and the
++ * sensor generates them,
++ * we have to move the ISP windows to the upper border of
++ * the whole sensor, and deny the image stabilization to
++ * move around the window in vertical direction.
++ */
++ if (isi_config->bls != SENSOR_BLS_OFF) {
++ if (((main_flag & CI_ISP_DPD_ENABLE)
++ && (main_flag & CI_ISP_DPD_BLACKLINES_TOP))
++ || ((self_flag & CI_ISP_DPD_ENABLE)
++ && (self_flag & CI_ISP_DPD_BLACKLINES_TOP))) {
++ if ((main_flag & CI_ISP_DPD_ENABLE)
++ && (self_flag & CI_ISP_DPD_ENABLE)
++ && ((main_flag & CI_ISP_DPD_BLACKLINES_TOP)
++ != (self_flag & CI_ISP_DPD_BLACKLINES_TOP))) {
++ eprintk("and self path");
++ return CI_STATUS_NOTSUPP;
++ }
++ wnd_blackline.voffs = 0;
++ wnd_zoom_crop.voffs = 0;
++ }
++ }
++
++ source.out_w = wnd_zoom_crop.hsize;
++ source.out_h = wnd_zoom_crop.vsize;
++ source.flags = CI_ISP_DPD_DMA_IN_422;
++
++ /*to use crop set crop_flag first*/
++ if (crop_flag) {
++ wnd_zoom_crop.hsize = main->out_w;
++ wnd_zoom_crop.vsize = main->out_h;
++ }
++
++ dprintk(1, "source.out_w %d, source.out_h %d",
++ source.out_w, source.out_h);
++ if (main)
++ dprintk(1, "main.out_w %d, main.out_h %d",
++ main->out_w, main->out_h);
++ if (self)
++ dprintk(1, "self.out_w %d, self.out_h %d",
++ self->out_w, self->out_h);
++
++ /*
++ * At this point, wnd_zoom_crop and wnd_blackline contain
++ * the window sizes that reflect the users request. We have
++ * to configure the ISP output formatter and the image
++ * stabilization formatter in order to achieve this, but
++ * how they interact is highly dependant of the curr
++ * marvin derivative and which datapath of the ISP is
++ * activated. Therefore, translating wnd_zoom_crop and
++ * wnd_blackline into marvin register settings is a bit
++ * complicated and will be done by the
++ * ci_set_isp_windows() routine.
++ */
++
++ /* ISP Window */
++ /* MAIN path - fill in main_path, scale_main and main_rsz_lut */
++ /* basic selfpath settings */
++ res = ci_calc_main_path_settings(&source, main, &scale_main,
++ &mrv_mi_ctrl);
++ if (res != CI_STATUS_SUCCESS)
++ return res;
++
++ /* additional settings specific for main path fed from ISP */
++ if (main_flag & CI_ISP_DPD_ENABLE) {
++ switch (main_flag & CI_ISP_DPD_MODE_MASK) {
++ case CI_ISP_DPD_MODE_ISPYC:
++ case CI_ISP_DPD_MODE_ISPRAW:
++ case CI_ISP_DPD_MODE_ISPRAW_16B:
++ case CI_ISP_DPD_MODE_ISPJPEG:
++ /* allowed cases, just proceed */
++ break;
++ default:
++ eprintk("data coming from the ISP");
++ return CI_STATUS_NOTSUPP;
++ }
++ }
++
++ /* SELF path - fill in self_path & scale_flag */
++ /* basic selfpath settings */
++ res = ci_calc_self_path_settings(&source, self, &scale_flag,
++ &mrv_mi_ctrl);
++ if (res != CI_STATUS_SUCCESS)
++ return res;
++
++ if (sys_conf->isp_cfg.flags.ycbcr_non_cosited)
++ mrv_mi_ctrl.mrv_mif_sp_in_phase = mrv_mif_col_phase_non_cosited;
++ else
++ mrv_mi_ctrl.mrv_mif_sp_in_phase = mrv_mif_col_phase_cosited;
++ if (sys_conf->isp_cfg.flags.ycbcr_full_range)
++ mrv_mi_ctrl.mrv_mif_sp_in_range = mrv_mif_col_range_full;
++ else
++ mrv_mi_ctrl.mrv_mif_sp_in_range = mrv_mif_col_range_std;
++ if (self_flag & CI_ISP_DPD_ENABLE) {
++ switch (self_flag & CI_ISP_DPD_MODE_MASK) {
++ case CI_ISP_DPD_MODE_ISPYC:
++ /* only allowed case, just proceed */
++ break;
++ default:
++ eprintk("data coming from the ISP");
++ return CI_STATUS_NOTSUPP;
++ }
++ }
++
++ /* Datapath multiplexers */
++ res = ci_calc_dp_mux_settings(&mrv_mi_ctrl, &chn_mode, &dp_switch);
++ if (res != CI_STATUS_SUCCESS)
++ return res;
++
++ /* hardcoded global settings of the memory interface */
++ mrv_mi_ctrl.byte_swap_enable = false;
++
++ mrv_mi_ctrl.init_vals = CI_ISP_MIF_INIT_OFFSAndBase;
++
++ /*
++ * If we reach this point, we have collected all values to program
++ * the MARVIN for the requested datapath setup. Now all we've left
++ * to do is apply these to MARVINs register set. For this, we
++ * mostly use the low level MARVIN driver routines.
++ */
++ /*to use crop set crop_flag first*/
++ if (crop_flag) {
++ wnd_blackline.hsize = main->out_w;
++ wnd_blackline.vsize = main->out_h;
++ }
++
++ res = ci_set_isp_windows(isi_config, &wnd_blackline,
++ &wnd_zoom_crop);
++ if (res != CI_STATUS_SUCCESS) {
++ eprintk("failed to set ISP window configuration");
++ return res;
++ }
++ res = ci_isp_set_data_path(chn_mode, dp_switch);
++ if (res != CI_STATUS_SUCCESS)
++ return res;
++
++ res = ci_isp_set_mipi_smia(isi_config->mode);
++ if (res != CI_STATUS_SUCCESS)
++ return res;
++
++ if (mrv_mi_ctrl.self_path != CI_ISP_PATH_OFF)
++ ci_isp_res_set_self_resize(&scale_flag,
++ CI_ISP_CFG_UPDATE_IMMEDIATE,
++ ci_get_rsz_lut(self_flag));
++
++ if (mrv_mi_ctrl.main_path != CI_ISP_PATH_OFF)
++ ci_isp_res_set_main_resize(&scale_main,
++ CI_ISP_CFG_UPDATE_IMMEDIATE,
++ ci_get_rsz_lut(main_flag));
++
++ ci_isp_set_dma_read_mode(CI_ISP_DMA_RD_OFF,
++ CI_ISP_CFG_UPDATE_IMMEDIATE);
++
++ res = ci_isp_mif_set_path_and_orientation(&mrv_mi_ctrl);
++ if (res != CI_STATUS_SUCCESS) {
++ eprintk("failed to set MI path and orientation");
++ return res;
++ }
++
++ /* here the extended YCbCr mode is configured */
++ if (sys_conf->isp_cfg.flags.ycbcr_full_range)
++ res = ci_ext_ycb_cr_mode(main);
++ else
++ (void)ci_isp_set_yc_mode();
++
++ if (res != CI_STATUS_SUCCESS) {
++ eprintk("failed to set ISP YCbCr extended mode");
++ return res;
++ }
++
++ return CI_STATUS_SUCCESS;
++}
+--- /dev/null
++++ b/drivers/staging/mrstci/mrstisp/mrstisp_hw.c
+@@ -0,0 +1,1640 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * Copyright (c) Silicon Image 2008 www.siliconimage.com
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#include "mrstisp_stdinc.h"
++
++/* different Marvin IP has a different resize scaler factor width */
++u16 scaler_coffs_noncosited = 0x1000;
++u32 rsz_scaler_bypass = 0x4000;
++u32 rsz_upscaler_enable = 0x8000;
++u32 mrv_rsz_scale_mask = 0x00003fff;
++
++static unsigned long jiffies_start;
++
++void mrst_timer_start(void)
++{
++ jiffies_start = jiffies;
++}
++
++void mrst_timer_stop(void)
++{
++ jiffies_start = 0;
++}
++
++unsigned long mrst_get_micro_sec(void)
++{
++ unsigned long time_diff = 0;
++
++ time_diff = jiffies - jiffies_start;
++
++ return jiffies_to_msecs(time_diff);
++}
++
++/*
++ * Returns the ISP hardware ID.
++ */
++static u32 ci_isp_get_ci_isp_id(void)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++ u32 result = 0;
++
++ result = REG_GET_SLICE(mrv_reg->vi_id, MRV_REV_ID);
++
++ return result;
++}
++
++/*
++ * Gets the hardware ID and compares it with the expected one.
++ */
++static int ci_isp_verify_chip_id(void)
++{
++ u32 mrv_id = ci_isp_get_ci_isp_id();
++ dprintk(1, "Marvin HW-Id: 0x%08X", mrv_id);
++
++ if (mrv_id == CHIP_ID_MARVIN_12_V1_R21) {
++ dprintk(0, "LNW B0 or C0");
++ scaler_coffs_noncosited = 0x4000;
++ rsz_scaler_bypass = 0x10000;
++ rsz_upscaler_enable = 0x20000;
++ mrv_rsz_scale_mask = 0x0000ffff;
++ } else if (mrv_id == CHIP_ID_MARVIN_5_V4_R11 ||
++ mrv_id == CHIP_ID_MARVIN_5_V4_R20) {
++ dprintk(0, "LNW A3");
++ } else
++ eprintk("unknown Marvin revision ID");
++
++ return CI_STATUS_SUCCESS;
++}
++
++/*
++ * Triggers an entire reset of MARVIN (equaling an asynchronous
++ * hardware reset).
++ * Checks the hardware ID. A debug warning is issued if the
++ * module ID does not match the expected ID.
++ * Enables all clocks of all sub-modules.
++ * MARVIN is in idle state afterwards.
++ */
++void ci_isp_init(void)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++
++ /* verify ID, but no consequences if it doesn't match */
++ (void)ci_isp_verify_chip_id();
++
++ /* enable main clock */
++ REG_SET_SLICE(mrv_reg->vi_ccl, MRV_VI_CCLFDIS, MRV_VI_CCLFDIS_ENABLE);
++
++ /*
++ * enable all clocks to make sure that all submodules will be able to
++ * perform the reset correctly
++ */
++ REG_SET_SLICE(mrv_reg->vi_iccl, MRV_VI_ALL_CLK_ENABLE, ENABLE);
++
++ /*
++ * Reset of the entire MARVIN triggered by software. The minimum time
++ * permitted by mdelay ensures enough delay.
++ */
++
++ /* The reset bit will be cleared by the reset itself. */
++
++ /*
++ * The default value of the clock registers is all clocks on. So we
++ * don't have to enable the clocks again afterwards.
++ */
++
++ REG_SET_SLICE(mrv_reg->vi_ircl, MRV_VI_MARVIN_RST, ON);
++ /*mdelay(CI_ISP_DELAY_AFTER_RESET);*/
++ msleep(CI_ISP_DELAY_AFTER_RESET);
++}
++
++void ci_isp_off(void)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++
++ /* enable main clock */
++ REG_SET_SLICE(mrv_reg->vi_ccl, MRV_VI_CCLFDIS,
++ MRV_VI_CCLFDIS_DISABLE);
++
++ /*
++ * enable all clocks to make sure that all submodules will be able to
++ * perform the reset correctly
++ */
++ REG_SET_SLICE(mrv_reg->vi_iccl, MRV_VI_ALL_CLK_ENABLE, DISABLE);
++}
++
++/*
++ * Returns the mask for the frame end interrupts, which are
++ * used for Isp.
++ */
++u32 ci_isp_get_frame_end_irq_mask_isp(void)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++
++ switch (REG_GET_SLICE(mrv_reg->vi_dpcl, MRV_VI_DMA_SWITCH)) {
++ /*
++ * 2: path to image effects block (i.e. replacement for data coming
++ * from the ISP)
++ */
++ case MRV_VI_DMA_SWITCH_IE:
++ /* datapath is used by DMA */
++ return 0;
++ /*
++ * 0: direct path to self path mux
++ */
++ case MRV_VI_DMA_SWITCH_SELF:
++ /*
++ * 1: path to superimpose block
++ */
++ case MRV_VI_DMA_SWITCH_SI:
++ /*
++ * 3: direct path to JPEG encoder (R2B-buffer-less encodein mode)
++ */
++ case MRV_VI_DMA_SWITCH_JPG:
++ default:
++ /* main and/or self path depends on the YC-splitter setting */
++ {
++ switch (REG_GET_SLICE
++ (mrv_reg->vi_dpcl, MRV_VI_CHAN_MODE)) {
++ case MRV_VI_CHAN_MODE_MP:
++ return MRV_MI_MP_FRAME_END_MASK;
++ case MRV_VI_CHAN_MODE_SP:
++ return MRV_MI_SP_FRAME_END_MASK;
++ case MRV_VI_CHAN_MODE_MP_SP:
++ return MRV_MI_MP_FRAME_END_MASK |
++ MRV_MI_SP_FRAME_END_MASK;
++ default:
++ return 0;
++ }
++ }
++ }
++
++}
++
++/*
++ * Programs the number of frames to capture. Clears frame end
++ * interrupt to allow waiting in ci_isp_wait_for_frame_end().
++ * Enables the ISP input acquisition and output formatter.
++ * If immediate=false, the hardware assures that enabling is
++ * done frame synchronously.
++ */
++void ci_isp_start(u16 number_of_frames,
++ enum ci_isp_conf_update_time update_time)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++ u32 isp_ctrl = REG_READ(mrv_reg->isp_ctrl);
++ u32 eof_irq_mask = ci_isp_get_frame_end_irq_mask_isp();
++
++ /* max. 10 bits allowed */
++ WARN_ON(!(number_of_frames <= MRV_ISP_ACQ_NR_FRAMES_MAX));
++
++ REG_SET_SLICE(mrv_reg->isp_acq_nr_frames, MRV_ISP_ACQ_NR_FRAMES,
++ number_of_frames);
++
++ /* clear frame end interrupt */
++ REG_WRITE(mrv_reg->mi_icr, eof_irq_mask);
++
++ /* Enable ISP input Acquisition and output formatter. */
++
++ /*
++ * Input Acquisition is always enabled synchronous to the image sensor
++ * (no configuration update required). As soon as the input
++ * acquisition is started bit in_enable_shd in the register
++ * isp_flags_shd is set by hardware. In the following a frame end
++ * recognized by the input acquisition unit leads to
++ * ris_in_frame_end=1 in isp_ris. However a recognized frame end and
++ * no signaled errors are no guarantee for a valid configuration.
++ */
++
++ /*
++ * The output formatter is enabled frame synchronously according to
++ * the internal sync signals. Bit MRV_GEN_CFG_UPD has to be set. Bit
++ * isp_on_shd in isp_flags_shd is set when the output formatter is
++ * started. A recognized frame end is signaled with ris_out_frame_end
++ * in isp_ris.
++ */
++
++ /*
++ * The configuration of the input acquisition and the output
++ * formatter has to be correct to generate proper internal sync
++ * signals and thus a proper frame-synchronous update signal.
++ */
++
++ /* If the output formatter does not start check the following:
++ * sync polarities
++ * sample edge
++ * mode in register isp_ctrl
++ * sampling window of input acquisition <= picture size of image
++ * sensor
++ * output formatter window <= sampling window of input
++ * acquisition
++ */
++
++ /*
++ * If problems with the window sizes are suspected preferably add some
++ * offsets and reduce the window sizes, so that the above relations
++ * are true by all means.
++ */
++
++ switch (update_time) {
++ case CI_ISP_CFG_UPDATE_FRAME_SYNC:
++ REG_SET_SLICE(isp_ctrl, MRV_ISP_ISP_GEN_CFG_UPD, ENABLE);
++ break;
++ case CI_ISP_CFG_UPDATE_IMMEDIATE:
++ /*
++ * MRV_ISP_ISP_CFG_UPD is used instead of
++ * MRV_ISP_ISP_GEN_CFG_UPD. This updates the configuration
++ * right away and MARVIN is ready to aquire the next incoming
++ * frame.
++ */
++ REG_SET_SLICE(isp_ctrl, MRV_ISP_ISP_CFG_UPD, ENABLE);
++ break;
++ case CI_ISP_CFG_UPDATE_LATER:
++ /* no update from within this function
++ * but enable ISP and Input */
++ break;
++ default:
++ break;
++ }
++
++ REG_SET_SLICE(isp_ctrl, MRV_ISP_ISP_INFORM_ENABLE, ENABLE);
++ REG_SET_SLICE(isp_ctrl, MRV_ISP_ISP_ENABLE, ENABLE);
++ REG_WRITE(mrv_reg->isp_ctrl, isp_ctrl);
++
++ dprintk(3, "ISP_CTRL = 0x%08X", mrv_reg->isp_ctrl);
++}
++
++/*
++ * Clear frame end interrupt to allow waiting in
++ * ci_isp_wait_for_frame_end(). Disable output formatter (frame
++ * synchronously).
++ */
++void ci_isp_stop(enum ci_isp_conf_update_time update_time)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++ u32 isp_ctrl = REG_READ(mrv_reg->isp_ctrl);
++ u32 eof_irq_mask = ci_isp_get_frame_end_irq_mask_isp();
++
++ /* clear frame end interrupt */
++ REG_WRITE(mrv_reg->mi_icr, eof_irq_mask);
++ /* disable output formatter */
++ REG_SET_SLICE(isp_ctrl, MRV_ISP_ISP_ENABLE, DISABLE);
++
++ switch (update_time) {
++ case CI_ISP_CFG_UPDATE_FRAME_SYNC:
++ REG_SET_SLICE(isp_ctrl, MRV_ISP_ISP_GEN_CFG_UPD, ENABLE);
++ break;
++ case CI_ISP_CFG_UPDATE_IMMEDIATE:
++ REG_SET_SLICE(isp_ctrl, MRV_ISP_ISP_CFG_UPD, ENABLE);
++ break;
++ case CI_ISP_CFG_UPDATE_LATER:
++ break;
++ default:
++ break;
++ }
++
++ REG_WRITE(mrv_reg->isp_ctrl, isp_ctrl);
++}
++
++/*
++ * Changes the data path settings.
++ */
++int ci_isp_set_data_path(enum ci_isp_ycs_chn_mode ycs_chn_mode,
++ enum ci_isp_dp_switch dp_switch)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++ u32 vi_dpcl = REG_READ(mrv_reg->vi_dpcl);
++ u32 vi_chan_mode;
++ u32 vi_mp_mux;
++
++ /* get desired setting for ycs_chan_mode (or vi_chan_mode) bits */
++ switch (ycs_chn_mode) {
++ case CI_ISP_YCS_OFF:
++ vi_chan_mode = MRV_VI_CHAN_MODE_OFF;
++ break;
++ case CI_ISP_YCS_Y:
++ vi_chan_mode = MRV_VI_CHAN_MODE_Y;
++ break;
++ case CI_ISP_YCS_MVRaw:
++ vi_chan_mode = MRV_VI_CHAN_MODE_MP_RAW;
++ break;
++ case CI_ISP_YCS_MV:
++ vi_chan_mode = MRV_VI_CHAN_MODE_MP;
++ break;
++ case CI_ISP_YCS_SP:
++ vi_chan_mode = MRV_VI_CHAN_MODE_SP;
++ break;
++ case CI_ISP_YCS_MV_SP:
++ vi_chan_mode = MRV_VI_CHAN_MODE_MP_SP;
++ break;
++ default:
++ eprintk("unknown value for ycs_chn_mode");
++ return CI_STATUS_NOTSUPP;
++ }
++
++ if (vi_chan_mode & ~(MRV_VI_CHAN_MODE_MASK >> MRV_VI_CHAN_MODE_SHIFT)) {
++ eprintk("enum ci_isp_ycs_chn_mode not supported");
++ return CI_STATUS_NOTSUPP;
++ }
++
++ /* get desired setting for vi_dp_switch (or vi_dp_mux) bits */
++ switch (dp_switch) {
++ case CI_ISP_DP_RAW:
++ vi_mp_mux = MRV_VI_MP_MUX_RAW;
++ break;
++ case CI_ISP_DP_JPEG:
++ vi_mp_mux = MRV_VI_MP_MUX_JPEG;
++ break;
++ case CI_ISP_DP_MV:
++ vi_mp_mux = MRV_VI_MP_MUX_MP;
++ break;
++ default:
++ eprintk("unknown value for dp_switch");
++ return CI_STATUS_NOTSUPP;
++ }
++
++ if (vi_mp_mux & ~MRV_VI_MP_MUX_MASK) {
++ eprintk("dp_switch value not supported");
++ return CI_STATUS_NOTSUPP;
++ }
++
++ /* program settings into MARVIN vi_dpcl register */
++ REG_SET_SLICE(vi_dpcl, MRV_VI_CHAN_MODE, vi_chan_mode);
++ REG_SET_SLICE(vi_dpcl, MRV_VI_MP_MUX, vi_mp_mux);
++ REG_WRITE(mrv_reg->vi_dpcl, vi_dpcl);
++
++ return CI_STATUS_SUCCESS;
++}
++
++/*
++ * Changes the data path settings to SMIA or MIPI.
++ */
++int ci_isp_set_mipi_smia(u32 mode)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++ u32 if_select;
++
++ /* get desired setting for if_select bits */
++ switch (mode) {
++ case SENSOR_MODE_SMIA:
++ if_select = MRV_IF_SELECT_SMIA;
++ break;
++ case SENSOR_MODE_MIPI:
++ if_select = MRV_IF_SELECT_MIPI;
++ break;
++ case SENSOR_MODE_BAYER:
++ case SENSOR_MODE_BT601:
++ case SENSOR_MODE_BT656:
++ case SENSOR_MODE_PICT:
++ case SENSOR_MODE_DATA:
++ case SENSOR_MODE_BAY_BT656:
++ case SENSOR_MODE_RAW_BT656:
++ if_select = MRV_IF_SELECT_PAR;
++ break;
++ default:
++ eprintk("unknown value for mode");
++ return CI_STATUS_NOTSUPP;
++ }
++
++ /* program settings into MARVIN vi_dpcl register */
++ REG_SET_SLICE(mrv_reg->vi_dpcl, MRV_IF_SELECT, if_select);
++
++ if (if_select == MRV_IF_SELECT_MIPI) {
++ REG_WRITE(mrv_reg->mipi_ctrl, 0x1001); /*XXX FLUSH_FIFO? */
++ /* REG_WRITE(mrv_reg->mipi_ctrl, 0x0001); FLUSH_FIFO? */
++ }
++
++ return CI_STATUS_SUCCESS;
++}
++
++/*
++ * Waits until the specified bits becomes signaled in the mi_ris
++ * register.
++ */
++static int ci_isp_wait_for_mi(struct mrst_isp_device *intel, u32 bit_mask)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++#if 0
++ int ret = 0;
++ INIT_COMPLETION(intel->mi_complete);
++ ret = wait_for_completion_interruptible_timeout(&intel->mi_complete,
++ 10*HZ);
++ if (ret == 0) {
++ eprintk("time out in wait for mi");
++ /*
++ * Try to recover. Softreset of submodules (but not
++ * entire marvin) resets processing and status
++ * information, but not configuration register
++ * content. Bits are sticky. So we have to clear them.
++ * Reset affects the MARVIN 1..2 clock cycles after
++ * the bits are set to high. So we don't have to wait
++ * in software before clearing them.
++ */
++
++ /*
++ * Note that only modules with clock enabled will be
++ * affected.
++ */
++ REG_SET_SLICE(mrv_reg->vi_ircl, MRV_VI_ALL_SOFT_RST, ON);
++ REG_SET_SLICE(mrv_reg->vi_ircl, MRV_VI_ALL_SOFT_RST, OFF);
++ mdelay(CI_ISP_DELAY_AFTER_RESET);
++ /*
++ * isp config update, neccessary to update v/h_size
++ * into shadow registers
++ */
++ REG_SET_SLICE(mrv_reg->isp_ctrl, MRV_ISP_ISP_CFG_UPD, ON);
++ return CI_STATUS_FAILURE;
++ }
++ return CI_STATUS_SUCCESS;
++#endif
++ u32 irq;
++ static int err_frame_cnt;
++ mrst_timer_start();
++ /*
++ * Wait for the curr BitMask. If the BitMask is zero, then it's no
++ * waiting.
++ */
++ while ((mrv_reg->mi_ris & bit_mask) != bit_mask) {
++
++ irq = REG_READ(mrv_reg->isp_ris);
++ if (irq & (MRV_ISP_RIS_DATA_LOSS_MASK
++ | MRV_ISP_RIS_PIC_SIZE_ERR_MASK)){
++ err_frame_cnt++;
++ dprintk(1, "irq = 0x%x, err rumber = %d", irq,
++ err_frame_cnt);
++ }
++ if (mrst_get_micro_sec() > 1000) {
++ /*
++ * Note: Don't use REG_READ because content of
++ * registers would be already printed here.
++ */
++ dprintk(1, "time out");
++ mrst_timer_stop();
++ /*
++ * Try to recover. Softreset of submodules (but not
++ * entire marvin) resets processing and status
++ * information, but not configuration register
++ * content. Bits are sticky. So we have to clear them.
++ * Reset affects the MARVIN 1..2 clock cycles after
++ * the bits are set to high. So we don't have to wait
++ * in software before clearing them.
++ */
++
++ /*
++ * Note that only modules with clock enabled will be
++ * affected.
++ */
++ REG_SET_SLICE(mrv_reg->vi_ircl,
++ MRV_VI_ALL_SOFT_RST, ON);
++ REG_SET_SLICE(mrv_reg->vi_ircl,
++ MRV_VI_ALL_SOFT_RST, OFF);
++ /*mdelay(CI_ISP_DELAY_AFTER_RESET);*/
++ msleep(CI_ISP_DELAY_AFTER_RESET);
++ /*
++ * isp config update, neccessary to update v/h_size
++ * into shadow registers
++ */
++ REG_SET_SLICE(mrv_reg->isp_ctrl, MRV_ISP_ISP_CFG_UPD,
++ ON);
++ return CI_STATUS_FAILURE;
++ }
++ }
++
++ mrst_timer_stop();
++ if (REG_GET_SLICE(mrv_reg->isp_ris, MRV_ISP_RIS_DATA_LOSS))
++ dprintk(1, "no failure, but MRV_ISPINT_DATA_LOSS");
++
++ return CI_STATUS_SUCCESS;
++}
++
++/*
++ * Waits until a frame is written to memory (frame end
++ * interrupt occurs).
++ * Waits for the frame end interrupt of the memory
++ * interface.
++ */
++int ci_isp_wait_for_frame_end(struct mrst_isp_device *intel)
++{
++ return ci_isp_wait_for_mi(intel, ci_isp_get_frame_end_irq_mask_isp());
++}
++
++/*
++ * Writes '0xFFFFFFFF' into all *_icr registers to clear all
++ * interrupts.
++ */
++void ci_isp_reset_interrupt_status(void)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++
++ /* ISP interrupt clear register */
++ REG_SET_SLICE(mrv_reg->isp_icr, MRV_ISP_ICR_ALL, ON);
++ REG_SET_SLICE(mrv_reg->isp_err_clr, MRV_ISP_ALL_ERR, ON);
++ REG_SET_SLICE(mrv_reg->mi_icr, MRV_MI_ALLIRQS, ON);
++ /* JPEG error interrupt clear register */
++ REG_SET_SLICE(mrv_reg->jpe_error_icr, MRV_JPE_ALL_ERR, ON);
++ /* JPEG status interrupt clear register */
++ REG_SET_SLICE(mrv_reg->jpe_status_icr, MRV_JPE_ALL_STAT, ON);
++
++ REG_WRITE(mrv_reg->mipi_icr, 0xffffffff); /*XXX replace by a macro */
++}
++
++void mrst_isp_disable_interrupt(struct mrst_isp_device *isp)
++{
++ struct isp_register *mrv_reg = (struct isp_register *)MEM_MRV_REG_BASE;
++ REG_SET_SLICE(mrv_reg->isp_imsc, MRV_ISP_IMSC_ALL, OFF);
++ REG_SET_SLICE(mrv_reg->mi_imsc, MRV_MI_ALLIRQS, OFF);
++ REG_SET_SLICE(mrv_reg->jpe_error_imr, MRV_JPE_ALL_ERR, OFF);
++ REG_SET_SLICE(mrv_reg->jpe_status_imr, MRV_JPE_ALL_STAT, OFF);
++ REG_WRITE(mrv_reg->mipi_imsc, 0x00000000);
++}
++
++void mrst_isp_enable_interrupt(struct mrst_isp_device *isp)
++{
++ struct isp_register *mrv_reg = (struct isp_register *)MEM_MRV_REG_BASE;
++ extern int isp_extra_sync_for_mt9d113;
++
++ /* For some reason ISP needs some "extra syncs" with sensor mt9d113 */
++ if (isp_extra_sync_for_mt9d113)
++ REG_SET_SLICE(mrv_reg->isp_imsc, MRV_ISP_IMSC_V_START, ON);
++
++ REG_SET_SLICE(mrv_reg->isp_imsc, MRV_ISP_IMSC_DATA_LOSS, ON);
++ REG_SET_SLICE(mrv_reg->isp_imsc, MRV_ISP_IMSC_PIC_SIZE_ERR, ON);
++
++ REG_WRITE(mrv_reg->mi_imsc, MRV_MI_MP_FRAME_END_MASK);
++
++ REG_SET_SLICE(mrv_reg->mi_imsc, MRV_MI_MBLK_LINE, ON);
++
++ REG_SET_SLICE(mrv_reg->jpe_error_imr, MRV_JPE_ALL_ERR, ON);
++ REG_SET_SLICE(mrv_reg->jpe_status_imr, MRV_JPE_ALL_STAT, ON);
++
++ REG_WRITE(mrv_reg->mipi_imsc, 0x00f00000);
++
++ ci_isp_reset_interrupt_status();
++}
++
++/*
++ * Selects DMA read mode (i.e. sink of the data read from system
++ * memory by the DMA-read block).
++ * update_time is only used on Marvin3plus,
++ * on all other Marvin derivates immediate update is made
++ */
++void ci_isp_set_dma_read_mode(enum ci_isp_dma_read_mode mode,
++ enum ci_isp_conf_update_time update_time)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++ /* added to avoid LINT warnings (Info 530) */
++ u32 vi_dma_switch = 0;
++ /* added to avoid LINT warnings (Info 530) */
++ u32 vi_dma_spmux = 0;
++ /* added to avoid LINT warnings (Info 530) */
++ u32 vi_dma_iemux = 0;
++ /* added to avoid LINT warnings (Info 530) */
++ int dma_jpeg_select = false;
++
++ u32 vi_dpcl = REG_READ(mrv_reg->vi_dpcl);
++
++ /*
++ * DMA-read feature connected through a dedicated DMA-read
++ * multiplexer.
++ */
++
++ /* Programming is done via vi_dpcl register only */
++#define DMA_READ_MODE_PROGRAMMING_VI_SPMCL 0
++#define DMA_READ_MODE_PROGRAMMING_VI_DPCL 1
++ WARN_ON(!((mode == CI_ISP_DMA_RD_OFF) ||
++ (mode == CI_ISP_DMA_RD_SELF_PATH) ||
++ (mode == CI_ISP_DMA_RD_IE_PATH) ||
++ (mode == CI_ISP_DMA_RD_SUPERIMPOSE)));
++
++ switch (mode) {
++ case CI_ISP_DMA_RD_OFF:
++ vi_dma_switch = MRV_VI_DMA_SWITCH_SELF;
++ vi_dma_spmux = MRV_VI_DMA_SPMUX_CAM;
++ vi_dma_iemux = MRV_VI_DMA_IEMUX_CAM;
++ dma_jpeg_select = false;
++ break;
++ case CI_ISP_DMA_RD_SELF_PATH:
++ vi_dma_switch = MRV_VI_DMA_SWITCH_SELF;
++ vi_dma_spmux = MRV_VI_DMA_SPMUX_DMA;
++ vi_dma_iemux = MRV_VI_DMA_IEMUX_CAM;
++ dma_jpeg_select = false;
++ break;
++ case CI_ISP_DMA_RD_IE_PATH:
++ vi_dma_switch = MRV_VI_DMA_SWITCH_IE;
++ vi_dma_spmux = MRV_VI_DMA_SPMUX_CAM;
++ vi_dma_iemux = MRV_VI_DMA_IEMUX_DMA;
++ dma_jpeg_select = false;
++ break;
++ case CI_ISP_DMA_RD_JPG_ENC:
++ vi_dma_switch = MRV_VI_DMA_SWITCH_JPG;
++ vi_dma_spmux = MRV_VI_DMA_SPMUX_CAM;
++ vi_dma_iemux = MRV_VI_DMA_IEMUX_CAM;
++ dma_jpeg_select = true;
++ break;
++ case CI_ISP_DMA_RD_SUPERIMPOSE:
++ vi_dma_switch = MRV_VI_DMA_SWITCH_SI;
++ vi_dma_spmux = MRV_VI_DMA_SPMUX_CAM;
++ vi_dma_iemux = MRV_VI_DMA_IEMUX_CAM;
++ dma_jpeg_select = false;
++ break;
++ default:
++ /* unknown DMA-read mode */
++ WARN_ON(1);
++ }
++
++ REG_SET_SLICE(vi_dpcl, MRV_VI_DMA_SWITCH, vi_dma_switch);
++ REG_SET_SLICE(vi_dpcl, MRV_VI_DMA_SPMUX, vi_dma_spmux);
++ REG_SET_SLICE(vi_dpcl, MRV_VI_DMA_IEMUX, vi_dma_iemux);
++#if ((MRV_VI_MP_MUX_JPGDIRECT & \
++~(MRV_VI_MP_MUX_MASK >> MRV_VI_MP_MUX_SHIFT)) == 0)
++ if (dma_jpeg_select) {
++ REG_SET_SLICE(vi_dpcl, MRV_VI_MP_MUX,
++ MRV_VI_MP_MUX_JPGDIRECT);
++ }
++#else
++ /* direct DMA to JPEG not supported */
++ UNUSED_PARAM(dma_jpeg_select);
++#endif
++ REG_WRITE(mrv_reg->vi_dpcl, vi_dpcl);
++}
++
++/*
++ * Set extended mode with unrestricted values for YCbCr
++ * Y (0-255) CbCr (0-255)
++ */
++void ci_isp_set_ext_ycmode(void)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++ u32 isp_ctrl = REG_READ(mrv_reg->isp_ctrl);
++
++ /* modify isp_ctrl register */
++ REG_SET_SLICE(isp_ctrl, MRV_ISP_ISP_CSM_C_RANGE,
++ MRV_ISP_ISP_CSM_C_RANGE_FULL);
++ REG_SET_SLICE(isp_ctrl, MRV_ISP_ISP_CSM_Y_RANGE,
++ MRV_ISP_ISP_CSM_Y_RANGE_FULL);
++ REG_WRITE(mrv_reg->isp_ctrl, isp_ctrl);
++
++ /* program RGB to YUV color conversion with extended range */
++ REG_SET_SLICE(mrv_reg->isp_cc_coeff_0, MRV_ISP_CC_COEFF_0, 0x0026);
++ REG_SET_SLICE(mrv_reg->isp_cc_coeff_1, MRV_ISP_CC_COEFF_1, 0x004B);
++ REG_SET_SLICE(mrv_reg->isp_cc_coeff_2, MRV_ISP_CC_COEFF_2, 0x000F);
++ REG_SET_SLICE(mrv_reg->isp_cc_coeff_3, MRV_ISP_CC_COEFF_3, 0x01EA);
++ REG_SET_SLICE(mrv_reg->isp_cc_coeff_4, MRV_ISP_CC_COEFF_4, 0x01D6);
++ REG_SET_SLICE(mrv_reg->isp_cc_coeff_5, MRV_ISP_CC_COEFF_5, 0x0040);
++ REG_SET_SLICE(mrv_reg->isp_cc_coeff_6, MRV_ISP_CC_COEFF_6, 0x0040);
++ REG_SET_SLICE(mrv_reg->isp_cc_coeff_7, MRV_ISP_CC_COEFF_7, 0x01CA);
++ REG_SET_SLICE(mrv_reg->isp_cc_coeff_8, MRV_ISP_CC_COEFF_8, 0x01F6);
++}
++
++void ci_isp_set_yc_mode(void)
++{
++ struct isp_register *mrv_reg = (struct isp_register *)MEM_MRV_REG_BASE;
++ u32 isp_ctrl = REG_READ(mrv_reg->isp_ctrl);
++
++ /* modify isp_ctrl register */
++ REG_SET_SLICE(isp_ctrl, MRV_ISP_ISP_CSM_C_RANGE,
++ MRV_ISP_ISP_CSM_Y_RANGE_BT601);
++ REG_SET_SLICE(isp_ctrl, MRV_ISP_ISP_CSM_Y_RANGE,
++ MRV_ISP_ISP_CSM_Y_RANGE_BT601);
++ REG_WRITE(mrv_reg->isp_ctrl, isp_ctrl);
++
++ /* program RGB to YUV color conversion with extended range */
++ REG_SET_SLICE(mrv_reg->isp_cc_coeff_0, MRV_ISP_CC_COEFF_0, 0x0021);
++ REG_SET_SLICE(mrv_reg->isp_cc_coeff_1, MRV_ISP_CC_COEFF_1, 0x0040);
++ REG_SET_SLICE(mrv_reg->isp_cc_coeff_2, MRV_ISP_CC_COEFF_2, 0x000D);
++ REG_SET_SLICE(mrv_reg->isp_cc_coeff_3, MRV_ISP_CC_COEFF_3, 0x01ED);
++ REG_SET_SLICE(mrv_reg->isp_cc_coeff_4, MRV_ISP_CC_COEFF_4, 0x01DB);
++ REG_SET_SLICE(mrv_reg->isp_cc_coeff_5, MRV_ISP_CC_COEFF_5, 0x0038);
++ REG_SET_SLICE(mrv_reg->isp_cc_coeff_6, MRV_ISP_CC_COEFF_6, 0x0038);
++ REG_SET_SLICE(mrv_reg->isp_cc_coeff_7, MRV_ISP_CC_COEFF_7, 0x01D1);
++ REG_SET_SLICE(mrv_reg->isp_cc_coeff_8, MRV_ISP_CC_COEFF_8, 0x01F7);
++}
++
++/*
++ * writes the color values for contrast, brightness,
++ * saturation and hue into the appropriate Marvin
++ * registers
++ */
++void ci_isp_col_set_color_processing(
++ const struct ci_isp_color_settings *col)
++{
++ struct isp_register *mrv_reg =
++ (struct isp_register *) MEM_MRV_REG_BASE;
++
++ if (col == NULL) {
++ /* disable color processing (bypass) */
++ mrv_reg->c_proc_ctrl = 0;
++ } else {
++ mrv_reg->c_proc_contrast = col->contrast;
++ mrv_reg->c_proc_brightness = col->brightness;
++ mrv_reg->c_proc_saturation = col->saturation;
++ mrv_reg->c_proc_hue = col->hue;
++
++ /* modify color processing registers */
++
++ if (col->flags & CI_ISP_CPROC_C_OUT_RANGE) {
++ mrv_reg->c_proc_ctrl =
++ mrv_reg->c_proc_ctrl | CI_ISP_CPROC_C_OUT_RANGE;
++ }
++
++ if (col->flags & CI_ISP_CPROC_Y_IN_RANGE) {
++ mrv_reg->c_proc_ctrl =
++ mrv_reg->c_proc_ctrl | CI_ISP_CPROC_Y_IN_RANGE;
++ }
++
++ if (col->flags & CI_ISP_CPROC_Y_OUT_RANGE) {
++ mrv_reg->c_proc_ctrl =
++ mrv_reg->c_proc_ctrl | CI_ISP_CPROC_Y_OUT_RANGE;
++ }
++
++ if (col->flags & CI_ISP_CPROC_ENABLE) {
++ mrv_reg->c_proc_ctrl =
++ mrv_reg->c_proc_ctrl | CI_ISP_CPROC_ENABLE;
++ }
++ }
++}
++
++/*
++ * Translates a chrominance component value from usual
++ * representation (range 16..240, 128=neutral grey)
++ * to the one used by the ie_tint register
++ * The value is returned as 32 bit unsigned to support shift
++ * operation without explicit cast.
++ * The translation formular implemented here is taken from
++ * the image effects functional specification document,
++ * Doc-ID 30-001-481.130, revision 1.1 from november, 21st. 2005
++ */
++static u32 ci_isp_ie_tint_cx2_reg_val(u8 cx)
++{
++ s32 temp;
++ u32 reg_val;
++
++ /*
++ * apply scaling as specified in the image effects functional
++ * specification
++ */
++ temp = 128 - (s32) cx;
++ temp = ((temp * 64) / 110);
++
++ /* convert from two's complement to sign/value */
++ if (temp < 0) {
++ reg_val = 0x80;
++ temp *= (-1);
++ } else
++ reg_val = 0;
++
++ /* saturate at 7 bits */
++ if (temp > 0x7F)
++ temp = 0x7F;
++
++ /* combine sign and value to build the regiter value */
++ reg_val |= (u32) temp;
++
++ return reg_val;
++}
++
++/*
++ * Translates usual (decimal) matrix coefficient into the
++ * 4 bit register representation (used in the ie_mat_X registers).
++ * for unsupported decimal numbers, a supported replacement is
++ * selected automatically.
++ * The value is returned as 32 bit unsigned to support shift
++ * operation without explicit cast.
++ * The translation formular implemented here is taken from
++ * the image effects functional specification document,
++ * Doc-ID 30-001-481.130, revision 1.1 from november, 21st. 2005
++ */
++static u32 ci_isp_ie_mx_dec2_reg_val(s8 dec)
++{
++ if (dec <= (-6)) {
++ /* equivlent to -8 */
++ return 0x0f;
++ } else if (dec <= (-3)) {
++ /* equivlent to -4 */
++ return 0x0e;
++ } else if (dec == (-2)) {
++ /* equivlent to -2 */
++ return 0x0d;
++ } else if (dec == (-1)) {
++ /* equivlent to -1 */
++ return 0x0c;
++ } else if (dec == 0) {
++ /* equivlent to 0 (entry not used) */
++ return 0x00;
++ } else if (dec == 1) {
++ /* equivlent to 1 */
++ return 0x08;
++ } else if (dec == 2) {
++ /* equivlent to 2 */
++ return 0x09;
++ } else if (dec < 6) {
++ /* equivlent to 4 */
++ return 0x0a;
++ } else {
++ /* equivlent to 8 */
++ return 0x0b;
++ }
++}
++
++/*
++ * translates the values of the given configuration
++ * structure into register settings for the image effects
++ * submodule and loads the registers.
++ */
++int ci_isp_ie_set_config(const struct ci_isp_ie_config *ie_config)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++
++ if (!ie_config) {
++ /* just disable the module, i.e. put it in bypass mode */
++ REG_SET_SLICE(mrv_reg->img_eff_ctrl, MRV_IMGEFF_BYPASS_MODE,
++ MRV_IMGEFF_BYPASS_MODE_BYPASS);
++ } else {
++ /* apply the given settings */
++ u32 ul_ie_ctrl = REG_READ(mrv_reg->img_eff_ctrl);
++ u32 ul_ie_csel = REG_READ(mrv_reg->img_eff_color_sel);
++ u32 ul_ie_tint = REG_READ(mrv_reg->img_eff_tint);
++ u32 ul_ie_mat1 = REG_READ(mrv_reg->img_eff_mat_1);
++ u32 ul_ie_mat2 = REG_READ(mrv_reg->img_eff_mat_2);
++ u32 ul_ie_mat3 = REG_READ(mrv_reg->img_eff_mat_3);
++ u32 ul_ie_mat4 = REG_READ(mrv_reg->img_eff_mat_4);
++ u32 ul_ie_mat5 = REG_READ(mrv_reg->img_eff_mat_5);
++
++ /* overall operation mode */
++ switch (ie_config->mode) {
++ case CI_ISP_IE_MODE_OFF:
++ REG_SET_SLICE(ul_ie_ctrl, MRV_IMGEFF_BYPASS_MODE,
++ MRV_IMGEFF_BYPASS_MODE_BYPASS);
++ break;
++ case CI_ISP_IE_MODE_GRAYSCALE:
++ REG_SET_SLICE(ul_ie_ctrl, MRV_IMGEFF_EFFECT_MODE,
++ MRV_IMGEFF_EFFECT_MODE_GRAY);
++ REG_SET_SLICE(ul_ie_ctrl, MRV_IMGEFF_BYPASS_MODE,
++ MRV_IMGEFF_BYPASS_MODE_PROCESS);
++ break;
++ case CI_ISP_IE_MODE_NEGATIVE:
++ REG_SET_SLICE(ul_ie_ctrl, MRV_IMGEFF_EFFECT_MODE,
++ MRV_IMGEFF_EFFECT_MODE_NEGATIVE);
++ REG_SET_SLICE(ul_ie_ctrl, MRV_IMGEFF_BYPASS_MODE,
++ MRV_IMGEFF_BYPASS_MODE_PROCESS);
++ break;
++ case CI_ISP_IE_MODE_SEPIA:
++ REG_SET_SLICE(ul_ie_ctrl, MRV_IMGEFF_EFFECT_MODE,
++ MRV_IMGEFF_EFFECT_MODE_SEPIA);
++ REG_SET_SLICE(ul_ie_ctrl, MRV_IMGEFF_BYPASS_MODE,
++ MRV_IMGEFF_BYPASS_MODE_PROCESS);
++ break;
++ case CI_ISP_IE_MODE_COLOR_SEL:
++ REG_SET_SLICE(ul_ie_ctrl, MRV_IMGEFF_EFFECT_MODE,
++ MRV_IMGEFF_EFFECT_MODE_COLOR_SEL);
++ REG_SET_SLICE(ul_ie_ctrl, MRV_IMGEFF_BYPASS_MODE,
++ MRV_IMGEFF_BYPASS_MODE_PROCESS);
++ break;
++ case CI_ISP_IE_MODE_EMBOSS:
++ REG_SET_SLICE(ul_ie_ctrl, MRV_IMGEFF_EFFECT_MODE,
++ MRV_IMGEFF_EFFECT_MODE_EMBOSS);
++ REG_SET_SLICE(ul_ie_ctrl, MRV_IMGEFF_BYPASS_MODE,
++ MRV_IMGEFF_BYPASS_MODE_PROCESS);
++ break;
++ case CI_ISP_IE_MODE_SKETCH:
++ REG_SET_SLICE(ul_ie_ctrl, MRV_IMGEFF_EFFECT_MODE,
++ MRV_IMGEFF_EFFECT_MODE_SKETCH);
++ REG_SET_SLICE(ul_ie_ctrl, MRV_IMGEFF_BYPASS_MODE,
++ MRV_IMGEFF_BYPASS_MODE_PROCESS);
++ break;
++ default:
++ return CI_STATUS_OUTOFRANGE;
++ }
++
++ /* use next frame sync update */
++ REG_SET_SLICE(ul_ie_ctrl, MRV_IMGEFF_CFG_UPD, ON);
++
++ /* color selection settings */
++ REG_SET_SLICE(ul_ie_csel, MRV_IMGEFF_COLOR_THRESHOLD,
++ (u32) (ie_config->color_thres));
++ REG_SET_SLICE(ul_ie_csel, MRV_IMGEFF_COLOR_SELECTION,
++ (u32) (ie_config->color_sel));
++
++ /* tint color settings */
++ REG_SET_SLICE(ul_ie_tint, MRV_IMGEFF_INCR_CB,
++ ci_isp_ie_tint_cx2_reg_val(ie_config->tint_cb));
++ REG_SET_SLICE(ul_ie_tint, MRV_IMGEFF_INCR_CR,
++ ci_isp_ie_tint_cx2_reg_val(ie_config->tint_cr));
++
++ /* matrix coefficients */
++ REG_SET_SLICE(ul_ie_mat1, MRV_IMGEFF_EMB_COEF_11_4,
++ ci_isp_ie_mx_dec2_reg_val(ie_config->mat_emboss.
++ coeff_11));
++ REG_SET_SLICE(ul_ie_mat1, MRV_IMGEFF_EMB_COEF_12_4,
++ ci_isp_ie_mx_dec2_reg_val(ie_config->mat_emboss.
++ coeff_12));
++ REG_SET_SLICE(ul_ie_mat1, MRV_IMGEFF_EMB_COEF_13_4,
++ ci_isp_ie_mx_dec2_reg_val(ie_config->mat_emboss.
++ coeff_13));
++ REG_SET_SLICE(ul_ie_mat1, MRV_IMGEFF_EMB_COEF_21_4,
++ ci_isp_ie_mx_dec2_reg_val(ie_config->mat_emboss.
++ coeff_21));
++ REG_SET_SLICE(ul_ie_mat2, MRV_IMGEFF_EMB_COEF_22_4,
++ ci_isp_ie_mx_dec2_reg_val(ie_config->mat_emboss.
++ coeff_22));
++ REG_SET_SLICE(ul_ie_mat2, MRV_IMGEFF_EMB_COEF_23_4,
++ ci_isp_ie_mx_dec2_reg_val(ie_config->mat_emboss.
++ coeff_23));
++ REG_SET_SLICE(ul_ie_mat2, MRV_IMGEFF_EMB_COEF_31_4,
++ ci_isp_ie_mx_dec2_reg_val(ie_config->mat_emboss.
++ coeff_31));
++ REG_SET_SLICE(ul_ie_mat2, MRV_IMGEFF_EMB_COEF_32_4,
++ ci_isp_ie_mx_dec2_reg_val(ie_config->mat_emboss.
++ coeff_32));
++ REG_SET_SLICE(ul_ie_mat3, MRV_IMGEFF_EMB_COEF_33_4,
++ ci_isp_ie_mx_dec2_reg_val(ie_config->mat_emboss.
++ coeff_33));
++ REG_SET_SLICE(ul_ie_mat3, MRV_IMGEFF_SKET_COEF_11_4,
++ ci_isp_ie_mx_dec2_reg_val(ie_config->mat_sketch.
++ coeff_11));
++ REG_SET_SLICE(ul_ie_mat3, MRV_IMGEFF_SKET_COEF_12_4,
++ ci_isp_ie_mx_dec2_reg_val(ie_config->mat_sketch.
++ coeff_12));
++ REG_SET_SLICE(ul_ie_mat3, MRV_IMGEFF_SKET_COEF_13_4,
++ ci_isp_ie_mx_dec2_reg_val(ie_config->mat_sketch.
++ coeff_13));
++ REG_SET_SLICE(ul_ie_mat4, MRV_IMGEFF_SKET_COEF_21_4,
++ ci_isp_ie_mx_dec2_reg_val(ie_config->mat_sketch.
++ coeff_21));
++ REG_SET_SLICE(ul_ie_mat4, MRV_IMGEFF_SKET_COEF_22_4,
++ ci_isp_ie_mx_dec2_reg_val(ie_config->mat_sketch.
++ coeff_22));
++ REG_SET_SLICE(ul_ie_mat4, MRV_IMGEFF_SKET_COEF_23_4,
++ ci_isp_ie_mx_dec2_reg_val(ie_config->mat_sketch.
++ coeff_23));
++ REG_SET_SLICE(ul_ie_mat4, MRV_IMGEFF_SKET_COEF_31_4,
++ ci_isp_ie_mx_dec2_reg_val(ie_config->mat_sketch.
++ coeff_31));
++ REG_SET_SLICE(ul_ie_mat5, MRV_IMGEFF_SKET_COEF_32_4,
++ ci_isp_ie_mx_dec2_reg_val(ie_config->mat_sketch.
++ coeff_32));
++ REG_SET_SLICE(ul_ie_mat5, MRV_IMGEFF_SKET_COEF_33_4,
++ ci_isp_ie_mx_dec2_reg_val(ie_config->mat_sketch.
++ coeff_33));
++
++ /* write changed values back to registers */
++ REG_WRITE(mrv_reg->img_eff_ctrl, ul_ie_ctrl);
++ REG_WRITE(mrv_reg->img_eff_color_sel, ul_ie_csel);
++ REG_WRITE(mrv_reg->img_eff_tint, ul_ie_tint);
++ REG_WRITE(mrv_reg->img_eff_mat_1, ul_ie_mat1);
++ REG_WRITE(mrv_reg->img_eff_mat_2, ul_ie_mat2);
++ REG_WRITE(mrv_reg->img_eff_mat_3, ul_ie_mat3);
++ REG_WRITE(mrv_reg->img_eff_mat_4, ul_ie_mat4);
++ REG_WRITE(mrv_reg->img_eff_mat_5, ul_ie_mat5);
++
++ /* frame synchronous update of shadow registers */
++ REG_SET_SLICE(mrv_reg->isp_ctrl, MRV_ISP_ISP_GEN_CFG_UPD, ON);
++ }
++
++ return CI_STATUS_SUCCESS;
++}
++
++/*
++ * Applies the new image stabilisation settings to the module.
++ */
++int ci_isp_is_set_config(const struct ci_isp_is_config *is_config)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++
++ if (!is_config) {
++ eprintk("is_config NULL");
++ return CI_STATUS_NULL_POINTER;
++ }
++
++ /* set maximal margin distance for X */
++ if (is_config->max_dx > MRV_IS_IS_MAX_DX_MAX) {
++ REG_SET_SLICE(mrv_reg->isp_is_max_dx, MRV_IS_IS_MAX_DX,
++ (u32) (MRV_IS_IS_MAX_DX_MAX));
++ } else {
++ REG_SET_SLICE(mrv_reg->isp_is_max_dx, MRV_IS_IS_MAX_DX,
++ (u32) (is_config->max_dx));
++ }
++
++ /* set maximal margin distance for Y */
++ if (is_config->max_dy > MRV_IS_IS_MAX_DY_MAX) {
++ REG_SET_SLICE(mrv_reg->isp_is_max_dy, MRV_IS_IS_MAX_DY,
++ (u32) (MRV_IS_IS_MAX_DY_MAX));
++ } else {
++ REG_SET_SLICE(mrv_reg->isp_is_max_dy, MRV_IS_IS_MAX_DY,
++ (u32) (is_config->max_dy));
++ }
++
++ /* set H offset */
++ REG_SET_SLICE(mrv_reg->isp_is_h_offs, MRV_IS_IS_H_OFFS,
++ (u32) (is_config->mrv_is_window.hoffs));
++ /* set V offset */
++ REG_SET_SLICE(mrv_reg->isp_is_v_offs, MRV_IS_IS_V_OFFS,
++ (u32) (is_config->mrv_is_window.voffs));
++ /* set H size */
++ REG_SET_SLICE(mrv_reg->isp_is_h_size, MRV_IS_IS_H_SIZE,
++ (u32) (is_config->mrv_is_window.hsize));
++ /* set V size */
++ REG_SET_SLICE(mrv_reg->isp_is_v_size, MRV_IS_IS_V_SIZE,
++ (u32) (is_config->mrv_is_window.vsize));
++
++ return CI_STATUS_SUCCESS;
++}
++
++static int ci_isp_bls_set_fixed_values(const struct ci_isp_bls_subtraction
++ *bls_subtraction)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++
++ if (!bls_subtraction)
++ return CI_STATUS_NULL_POINTER;
++
++ if ((bls_subtraction->fixed_a > MRV_ISP_BLS_FIX_SUB_MAX) ||
++ (bls_subtraction->fixed_b > MRV_ISP_BLS_FIX_SUB_MAX) ||
++ (bls_subtraction->fixed_c > MRV_ISP_BLS_FIX_SUB_MAX) ||
++ (bls_subtraction->fixed_d > MRV_ISP_BLS_FIX_SUB_MAX) ||
++ (bls_subtraction->fixed_a < (s16) MRV_ISP_BLS_FIX_SUB_MIN) ||
++ (bls_subtraction->fixed_b < (s16) MRV_ISP_BLS_FIX_SUB_MIN) ||
++ (bls_subtraction->fixed_c < (s16) MRV_ISP_BLS_FIX_SUB_MIN) ||
++ (bls_subtraction->fixed_d < (s16) MRV_ISP_BLS_FIX_SUB_MIN)) {
++ return CI_STATUS_OUTOFRANGE;
++ } else {
++ /* we are in this path */
++ REG_SET_SLICE(mrv_reg->isp_bls_a_fixed, MRV_BLS_BLS_A_FIXED,
++ bls_subtraction->fixed_a);
++ REG_SET_SLICE(mrv_reg->isp_bls_b_fixed, MRV_BLS_BLS_B_FIXED, \
++ bls_subtraction->fixed_b);
++ REG_SET_SLICE(mrv_reg->isp_bls_c_fixed, MRV_BLS_BLS_C_FIXED,
++ bls_subtraction->fixed_c);
++ REG_SET_SLICE(mrv_reg->isp_bls_d_fixed, MRV_BLS_BLS_D_FIXED,
++ bls_subtraction->fixed_d);
++ }
++
++ return CI_STATUS_SUCCESS;
++}
++
++/*
++ * Sets the desired configuration values to the BLS registers,
++ * if possible. In the case the parameter (bls_config == NULL)
++ * the BLS module will be deactivated.
++ */
++int ci_isp_bls_set_config(const struct ci_isp_bls_config *bls_config)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++ u32 isp_bls_ctrl = 0;
++
++ int error = CI_STATUS_FAILURE;
++
++ if (!bls_config) {
++ /* disable the BLS module */
++ REG_SET_SLICE(mrv_reg->isp_bls_ctrl,
++ MRV_BLS_BLS_ENABLE, DISABLE);
++ return CI_STATUS_SUCCESS;
++ }
++
++ /* measurement window 2, enable_window =0 */
++ if (bls_config->isp_bls_window2.enable_window) {
++ if ((bls_config->isp_bls_window2.start_h >
++ MRV_BLS_BLS_H2_START_MAX)
++ || (bls_config->isp_bls_window2.stop_h >
++ MRV_BLS_BLS_H2_STOP_MAX)
++ || (bls_config->isp_bls_window2.start_v >
++ MRV_BLS_BLS_V2_START_MAX)
++ || (bls_config->isp_bls_window2.stop_v >
++ MRV_BLS_BLS_V2_STOP_MAX)) {
++ return CI_STATUS_OUTOFRANGE;
++ } else {
++ REG_SET_SLICE(mrv_reg->isp_bls_h2_start,
++ MRV_BLS_BLS_H2_START,
++ bls_config->isp_bls_window2.start_h);
++ REG_SET_SLICE(mrv_reg->isp_bls_h2_stop,
++ MRV_BLS_BLS_H2_STOP,
++ bls_config->isp_bls_window2.stop_h);
++ REG_SET_SLICE(mrv_reg->isp_bls_v2_start,
++ MRV_BLS_BLS_V2_START,
++ bls_config->isp_bls_window2.start_v);
++ REG_SET_SLICE(mrv_reg->isp_bls_v2_stop,
++ MRV_BLS_BLS_V2_STOP,
++ bls_config->isp_bls_window2.stop_v);
++ }
++ }
++
++ /* measurement window 1, enable_window=0 */
++ if (bls_config->isp_bls_window1.enable_window) {
++ if ((bls_config->isp_bls_window1.start_h >
++ MRV_BLS_BLS_H1_START_MAX)
++ || (bls_config->isp_bls_window1.stop_h >
++ MRV_BLS_BLS_H1_STOP_MAX)
++ || (bls_config->isp_bls_window1.start_v >
++ MRV_BLS_BLS_V1_START_MAX)
++ || (bls_config->isp_bls_window1.stop_v >
++ MRV_BLS_BLS_V1_STOP_MAX)) {
++ return CI_STATUS_OUTOFRANGE;
++ } else {
++ REG_SET_SLICE(mrv_reg->isp_bls_h1_start,
++ MRV_BLS_BLS_H1_START,
++ bls_config->isp_bls_window1.start_h);
++ REG_SET_SLICE(mrv_reg->isp_bls_h1_stop,
++ MRV_BLS_BLS_H1_STOP,
++ bls_config->isp_bls_window1.stop_h);
++ REG_SET_SLICE(mrv_reg->isp_bls_v1_start,
++ MRV_BLS_BLS_V1_START,
++ bls_config->isp_bls_window1.start_v);
++ REG_SET_SLICE(mrv_reg->isp_bls_v1_stop,
++ MRV_BLS_BLS_V1_STOP,
++ bls_config->isp_bls_window1.stop_v);
++ }
++ }
++
++ if (bls_config->bls_samples > MRV_BLS_BLS_SAMPLES_MAX) {
++ return CI_STATUS_OUTOFRANGE;
++ } else {
++ REG_SET_SLICE(mrv_reg->isp_bls_samples, MRV_BLS_BLS_SAMPLES,
++ bls_config->bls_samples);
++ }
++
++ /* fixed subtraction values, enable_automatic=0 */
++ if (!bls_config->enable_automatic) {
++ error = ci_isp_bls_set_fixed_values(
++ &(bls_config->bls_subtraction));
++ if (error != CI_STATUS_SUCCESS)
++ return error;
++ }
++
++ if ((bls_config->disable_h) || (bls_config->disable_v))
++ return CI_STATUS_OUTOFRANGE;
++
++ isp_bls_ctrl = REG_READ(mrv_reg->isp_bls_ctrl);
++
++ /* enable measurement window(s) */
++ REG_SET_SLICE(isp_bls_ctrl, MRV_BLS_WINDOW_ENABLE,
++ ((bls_config->isp_bls_window1.enable_window)
++ ? MRV_BLS_WINDOW_ENABLE_WND1 : 0) |
++ ((bls_config->isp_bls_window2.enable_window)
++ ? MRV_BLS_WINDOW_ENABLE_WND2 : 0));
++
++ /* set Mode */
++ REG_SET_SLICE(isp_bls_ctrl, MRV_BLS_BLS_MODE,
++ (bls_config->enable_automatic) ? MRV_BLS_BLS_MODE_MEAS :
++ MRV_BLS_BLS_MODE_FIX);
++
++ /* enable module */
++ REG_SET_SLICE(isp_bls_ctrl, MRV_BLS_BLS_ENABLE, ENABLE);
++
++ /* write into register */
++ REG_WRITE(mrv_reg->isp_bls_ctrl, isp_bls_ctrl);
++
++ return CI_STATUS_SUCCESS;
++}
++
++/* #define RSZ_FLAGS_MASK (rsz_upscaler_enable | rsz_scaler_bypass) */
++
++/*
++ * writes the scaler values to the appropriate Marvin registers.
++ */
++void ci_isp_res_set_main_resize(const struct ci_isp_scale *scale,
++ enum ci_isp_conf_update_time update_time,
++ const struct ci_isp_rsz_lut *rsz_lut)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++ u32 mrsz_ctrl = REG_READ(mrv_reg->mrsz_ctrl);
++ u32 i;
++ int upscaling = false;
++
++ /* flags must be "outside" scaler value */
++ WARN_ON(!((RSZ_FLAGS_MASK & MRV_RSZ_SCALE_MASK) == 0));
++ WARN_ON(!((scale->scale_hy & ~RSZ_FLAGS_MASK) <= MRV_RSZ_SCALE_MAX));
++ WARN_ON(!((scale->scale_hcb & ~RSZ_FLAGS_MASK) <= MRV_RSZ_SCALE_MAX));
++ WARN_ON(!((scale->scale_hcr & ~RSZ_FLAGS_MASK) <= MRV_RSZ_SCALE_MAX));
++ WARN_ON(!((scale->scale_vy & ~RSZ_FLAGS_MASK) <= MRV_RSZ_SCALE_MAX));
++ WARN_ON(!((scale->scale_vc & ~RSZ_FLAGS_MASK) <= MRV_RSZ_SCALE_MAX));
++
++ /* horizontal luminance scale factor */
++ dprintk(1, "scale_hy = %d( %x )", scale->scale_hy, scale->scale_hy);
++
++ if (scale->scale_hy & RSZ_SCALER_BYPASS) {
++ /* disable (bypass) scaler */
++ REG_SET_SLICE(mrsz_ctrl, MRV_MRSZ_SCALE_HY_ENABLE, DISABLE);
++ } else {
++ /* enable scaler */
++ REG_SET_SLICE(mrsz_ctrl, MRV_MRSZ_SCALE_HY_ENABLE, ENABLE);
++ /* program scale factor and phase */
++ REG_SET_SLICE(mrv_reg->mrsz_scale_hy, MRV_MRSZ_SCALE_HY,
++ (u32) scale->scale_hy);
++ REG_SET_SLICE(mrv_reg->mrsz_phase_hy, MRV_MRSZ_PHASE_HY,
++ (u32) scale->phase_hy);
++
++ if (scale->scale_hy & RSZ_UPSCALE_ENABLE) {
++ /* enable upscaling mode */
++ dprintk(1, "enable up scale");
++ REG_SET_SLICE(mrsz_ctrl, MRV_MRSZ_SCALE_HY_UP,
++ MRV_MRSZ_SCALE_HY_UP_UPSCALE);
++ /* scaler and upscaling enabled */
++ upscaling = true;
++ } else
++ /* disable upscaling mode */
++ REG_SET_SLICE(mrsz_ctrl, MRV_MRSZ_SCALE_HY_UP,
++ MRV_MRSZ_SCALE_HY_UP_DOWNSCALE);
++ }
++
++ /* horizontal chrominance scale factors */
++ WARN_ON(!((scale->scale_hcb & RSZ_FLAGS_MASK) == (scale->scale_hcr &
++ RSZ_FLAGS_MASK)));
++ dprintk(1, "scale_hcb = %d( %x )", scale->scale_hcb, scale->scale_hcb);
++
++ if (scale->scale_hcb & RSZ_SCALER_BYPASS) {
++ /* disable (bypass) scaler */
++ REG_SET_SLICE(mrsz_ctrl, MRV_MRSZ_SCALE_HC_ENABLE, DISABLE);
++ } else {
++ /* enable scaler */
++ REG_SET_SLICE(mrsz_ctrl, MRV_MRSZ_SCALE_HC_ENABLE, ENABLE);
++ /* program scale factor and phase */
++ REG_SET_SLICE(mrv_reg->mrsz_scale_hcb, MRV_MRSZ_SCALE_HCB,
++ (u32) scale->scale_hcb);
++ REG_SET_SLICE(mrv_reg->mrsz_scale_hcr, MRV_MRSZ_SCALE_HCB,
++ (u32) scale->scale_hcr);
++ REG_SET_SLICE(mrv_reg->mrsz_phase_hc, MRV_MRSZ_PHASE_HC,
++ (u32) scale->phase_hc);
++
++ if (scale->scale_hcb & RSZ_UPSCALE_ENABLE) {
++ /* enable upscaling mode */
++ REG_SET_SLICE(mrsz_ctrl, MRV_MRSZ_SCALE_HC_UP,
++ MRV_MRSZ_SCALE_HC_UP_UPSCALE);
++ /* scaler and upscaling enabled */
++ upscaling = true;
++ } else {
++ /* disable upscaling mode */
++ REG_SET_SLICE(mrsz_ctrl, MRV_MRSZ_SCALE_HC_UP,
++ MRV_MRSZ_SCALE_HC_UP_DOWNSCALE);
++ }
++ }
++
++ /* vertical luminance scale factor */
++ dprintk(1, "scale_vy = %d ( %x )", scale->scale_vy, scale->scale_vy);
++
++ if (scale->scale_vy & RSZ_SCALER_BYPASS) {
++ /* disable (bypass) scaler */
++ REG_SET_SLICE(mrsz_ctrl, MRV_MRSZ_SCALE_VY_ENABLE,
++ DISABLE);
++ } else {
++ /* enable scaler */
++ REG_SET_SLICE(mrsz_ctrl, MRV_MRSZ_SCALE_VY_ENABLE, ENABLE);
++ /* program scale factor and phase */
++ REG_SET_SLICE(mrv_reg->mrsz_scale_vy, MRV_MRSZ_SCALE_VY,
++ (u32) scale->scale_vy);
++ REG_SET_SLICE(mrv_reg->mrsz_phase_vy, MRV_MRSZ_PHASE_VY,
++ (u32) scale->phase_vy);
++
++ if (scale->scale_vy & RSZ_UPSCALE_ENABLE) {
++ /* enable upscaling mode */
++ REG_SET_SLICE(mrsz_ctrl, MRV_MRSZ_SCALE_VY_UP,
++ MRV_MRSZ_SCALE_VY_UP_UPSCALE);
++ /* scaler and upscaling enabled */
++ upscaling = true;
++ } else {
++ /* disable upscaling mode */
++ REG_SET_SLICE(mrsz_ctrl, MRV_MRSZ_SCALE_VY_UP,
++ MRV_MRSZ_SCALE_VY_UP_DOWNSCALE);
++ }
++ }
++
++ /* vertical chrominance scale factor */
++ dprintk(1, "scale_vc = %d( %x )", scale->scale_vc, scale->scale_vc);
++
++ if (scale->scale_vc & RSZ_SCALER_BYPASS) {
++ /* disable (bypass) scaler */
++ REG_SET_SLICE(mrsz_ctrl, MRV_MRSZ_SCALE_VC_ENABLE,
++ DISABLE);
++ } else {
++ /* enable scaler */
++ REG_SET_SLICE(mrsz_ctrl, MRV_MRSZ_SCALE_VC_ENABLE, ENABLE);
++ /* program scale factor and phase */
++ REG_SET_SLICE(mrv_reg->mrsz_scale_vc, MRV_MRSZ_SCALE_VC,
++ (u32) scale->scale_vc);
++ REG_SET_SLICE(mrv_reg->mrsz_phase_vc, MRV_MRSZ_PHASE_VC,
++ (u32) scale->phase_vc);
++
++ if (scale->scale_vc & RSZ_UPSCALE_ENABLE) {
++ /* enable upscaling mode */
++ REG_SET_SLICE(mrsz_ctrl, MRV_MRSZ_SCALE_VC_UP,
++ MRV_MRSZ_SCALE_VC_UP_UPSCALE);
++ /* scaler and upscaling enabled */
++ upscaling = true;
++ } else {
++ /* disable upscaling mode */
++ REG_SET_SLICE(mrsz_ctrl, MRV_MRSZ_SCALE_VC_UP,
++ MRV_MRSZ_SCALE_VC_UP_DOWNSCALE);
++ }
++ }
++
++ /* apply upscaling lookup table */
++ if (rsz_lut) {
++ for (i = 0; i <= MRV_MRSZ_SCALE_LUT_ADDR_MASK; i++) {
++ REG_SET_SLICE(mrv_reg->mrsz_scale_lut_addr,
++ MRV_MRSZ_SCALE_LUT_ADDR, i);
++ REG_SET_SLICE(mrv_reg->mrsz_scale_lut,
++ MRV_MRSZ_SCALE_LUT,
++ rsz_lut->rsz_lut[i]);
++ }
++ } else if (upscaling) {
++ eprintk("Upscaling requires lookup table!");
++ WARN_ON(1);
++ }
++
++ /* handle immediate update flag and write mrsz_ctrl */
++ switch (update_time) {
++ case CI_ISP_CFG_UPDATE_FRAME_SYNC:
++ /* frame synchronous update of shadow registers */
++ REG_WRITE(mrv_reg->mrsz_ctrl, mrsz_ctrl);
++ REG_SET_SLICE(mrv_reg->isp_ctrl, MRV_ISP_ISP_GEN_CFG_UPD, ON);
++ break;
++ case CI_ISP_CFG_UPDATE_IMMEDIATE:
++ /* immediate update of shadow registers */
++ REG_SET_SLICE(mrsz_ctrl, MRV_MRSZ_CFG_UPD, ON);
++ REG_WRITE(mrv_reg->mrsz_ctrl, mrsz_ctrl);
++ break;
++ case CI_ISP_CFG_UPDATE_LATER:
++ default:
++ /* no update from within this function */
++ REG_WRITE(mrv_reg->mrsz_ctrl, mrsz_ctrl);
++ break;
++ }
++}
++
++/*
++ * writes the scaler values to the appropriate Marvin registers.
++ */
++void ci_isp_res_set_self_resize(const struct ci_isp_scale *scale,
++ enum ci_isp_conf_update_time update_time,
++ const struct ci_isp_rsz_lut *rsz_lut)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++ u32 srsz_ctrl = REG_READ(mrv_reg->srsz_ctrl);
++ u32 i;
++ int upscaling = false;
++
++ /* flags must be "outside" scaler value */
++ WARN_ON(!((RSZ_FLAGS_MASK & MRV_RSZ_SCALE_MASK) == 0));
++ WARN_ON(!((scale->scale_hy & ~RSZ_FLAGS_MASK) <= MRV_RSZ_SCALE_MAX));
++ WARN_ON(!((scale->scale_hcb & ~RSZ_FLAGS_MASK) <= MRV_RSZ_SCALE_MAX));
++ WARN_ON(!((scale->scale_hcr & ~RSZ_FLAGS_MASK) <= MRV_RSZ_SCALE_MAX));
++ WARN_ON(!((scale->scale_vy & ~RSZ_FLAGS_MASK) <= MRV_RSZ_SCALE_MAX));
++ WARN_ON(!((scale->scale_vc & ~RSZ_FLAGS_MASK) <= MRV_RSZ_SCALE_MAX));
++
++ /* horizontal luminance scale factor */
++ dprintk(1, "scale_hy = %d,%x", scale->scale_hy, scale->scale_hy);
++
++ if (scale->scale_hy & RSZ_SCALER_BYPASS) {
++ /* disable (bypass) scaler */
++ REG_SET_SLICE(srsz_ctrl, MRV_SRSZ_SCALE_HY_ENABLE,
++ DISABLE);
++ } else {
++ /* enable scaler */
++ REG_SET_SLICE(srsz_ctrl, MRV_SRSZ_SCALE_HY_ENABLE, ENABLE);
++ /* program scale factor and phase */
++ REG_SET_SLICE(mrv_reg->srsz_scale_hy, MRV_SRSZ_SCALE_HY,
++ (u32) scale->scale_hy);
++ REG_SET_SLICE(mrv_reg->srsz_phase_hy, MRV_SRSZ_PHASE_HY,
++ (u32) scale->phase_hy);
++
++ if (scale->scale_hy & RSZ_UPSCALE_ENABLE) {
++ /* enable upscaling mode */
++ REG_SET_SLICE(srsz_ctrl, MRV_SRSZ_SCALE_HY_UP,
++ MRV_SRSZ_SCALE_HY_UP_UPSCALE);
++ /* scaler and upscaling enabled */
++ upscaling = true;
++ } else {
++ /* disable upscaling mode */
++ REG_SET_SLICE(srsz_ctrl, MRV_SRSZ_SCALE_HY_UP,
++ MRV_SRSZ_SCALE_HY_UP_DOWNSCALE);
++ }
++ }
++
++ /* horizontal chrominance scale factors */
++ WARN_ON(!((scale->scale_hcb & RSZ_FLAGS_MASK) == (scale->scale_hcr &
++ RSZ_FLAGS_MASK)));
++
++ dprintk(1, "scale_hcb = %d,%x", scale->scale_hcb, scale->scale_hcb);
++
++ if (scale->scale_hcb & RSZ_SCALER_BYPASS) {
++ /* disable (bypass) scaler */
++ REG_SET_SLICE(srsz_ctrl, MRV_SRSZ_SCALE_HC_ENABLE,
++ DISABLE);
++ } else {
++ /* enable scaler */
++ REG_SET_SLICE(srsz_ctrl, MRV_SRSZ_SCALE_HC_ENABLE, ENABLE);
++ /* program scale factor and phase */
++ REG_SET_SLICE(mrv_reg->srsz_scale_hcb, MRV_SRSZ_SCALE_HCB,
++ (u32) scale->scale_hcb);
++ REG_SET_SLICE(mrv_reg->srsz_scale_hcr, MRV_SRSZ_SCALE_HCB,
++ (u32) scale->scale_hcr);
++
++ REG_SET_SLICE(mrv_reg->srsz_phase_hc, MRV_SRSZ_PHASE_HC,
++ (u32) scale->phase_hc);
++
++ if (scale->scale_hcb & RSZ_UPSCALE_ENABLE) {
++ REG_SET_SLICE(srsz_ctrl, MRV_SRSZ_SCALE_HC_UP,
++ MRV_SRSZ_SCALE_HC_UP_UPSCALE);
++ /* scaler and upscaling enabled */
++ upscaling = true;
++ } else {
++ REG_SET_SLICE(srsz_ctrl, MRV_SRSZ_SCALE_HC_UP,
++ MRV_SRSZ_SCALE_HC_UP_DOWNSCALE);
++ }
++ }
++
++ /* vertical luminance scale factor */
++ dprintk(1, "scale_vy = %d,%x", scale->scale_vy, scale->scale_vy);
++
++ if (scale->scale_vy & RSZ_SCALER_BYPASS) {
++ /* disable (bypass) scaler */
++ REG_SET_SLICE(srsz_ctrl, MRV_SRSZ_SCALE_VY_ENABLE,
++ DISABLE);
++ } else {
++ /* enable scaler */
++ REG_SET_SLICE(srsz_ctrl, MRV_SRSZ_SCALE_VY_ENABLE, ENABLE);
++ /* program scale factor and phase */
++ REG_SET_SLICE(mrv_reg->srsz_scale_vy, MRV_SRSZ_SCALE_VY,
++ (u32) scale->scale_vy);
++ REG_SET_SLICE(mrv_reg->srsz_phase_vy, MRV_SRSZ_PHASE_VY,
++ (u32) scale->phase_vy);
++
++ if (scale->scale_vy & RSZ_UPSCALE_ENABLE) {
++ /* enable upscaling mode */
++ REG_SET_SLICE(srsz_ctrl, MRV_SRSZ_SCALE_VY_UP,
++ MRV_SRSZ_SCALE_VY_UP_UPSCALE);
++ /* scaler and upscaling enabled */
++ upscaling = true;
++ } else {
++ /* disable upscaling mode */
++ REG_SET_SLICE(srsz_ctrl, MRV_SRSZ_SCALE_VY_UP,
++ MRV_SRSZ_SCALE_VY_UP_DOWNSCALE);
++ }
++ }
++
++ /* vertical chrominance scale factor */
++ dprintk(1, "scale_vc = %d,%x", scale->scale_vc, scale->scale_vc);
++
++ if (scale->scale_vc & RSZ_SCALER_BYPASS) {
++ /* disable (bypass) scaler */
++ REG_SET_SLICE(srsz_ctrl, MRV_SRSZ_SCALE_VC_ENABLE,
++ DISABLE);
++ } else {
++ /* enable scaler */
++ REG_SET_SLICE(srsz_ctrl, MRV_SRSZ_SCALE_VC_ENABLE, ENABLE);
++ /* program scale factor and phase */
++ REG_SET_SLICE(mrv_reg->srsz_scale_vc, MRV_SRSZ_SCALE_VC,
++ (u32) scale->scale_vc);
++ REG_SET_SLICE(mrv_reg->srsz_phase_vc, MRV_SRSZ_PHASE_VC,
++ (u32) scale->phase_vc);
++
++ if (scale->scale_vc & RSZ_UPSCALE_ENABLE) {
++ /* enable upscaling mode */
++ REG_SET_SLICE(srsz_ctrl, MRV_SRSZ_SCALE_VC_UP,
++ MRV_SRSZ_SCALE_VC_UP_UPSCALE);
++ /* scaler and upscaling enabled */
++ upscaling = true;
++ } else {
++ /* disable upscaling mode */
++ REG_SET_SLICE(srsz_ctrl, MRV_SRSZ_SCALE_VC_UP,
++ MRV_SRSZ_SCALE_VC_UP_DOWNSCALE);
++ }
++ }
++
++ /* apply upscaling lookup table */
++ if (rsz_lut) {
++ for (i = 0; i <= MRV_SRSZ_SCALE_LUT_ADDR_MASK; i++) {
++ REG_SET_SLICE(mrv_reg->srsz_scale_lut_addr,
++ MRV_SRSZ_SCALE_LUT_ADDR, i);
++ REG_SET_SLICE(mrv_reg->srsz_scale_lut,
++ MRV_SRSZ_SCALE_LUT,
++ rsz_lut->rsz_lut[i]);
++ }
++ } else if (upscaling) {
++ eprintk("Upscaling requires lookup table!");
++ WARN_ON(1);
++ }
++
++ /* handle immediate update flag and write mrsz_ctrl */
++ switch (update_time) {
++ case CI_ISP_CFG_UPDATE_FRAME_SYNC:
++ /* frame synchronous update of shadow registers */
++ REG_WRITE(mrv_reg->srsz_ctrl, srsz_ctrl);
++ REG_SET_SLICE(mrv_reg->isp_ctrl, MRV_ISP_ISP_GEN_CFG_UPD,
++ ON);
++ break;
++ case CI_ISP_CFG_UPDATE_IMMEDIATE:
++ /* immediate update of shadow registers */
++ REG_SET_SLICE(srsz_ctrl, MRV_SRSZ_CFG_UPD, ON);
++ REG_WRITE(mrv_reg->srsz_ctrl, srsz_ctrl);
++ break;
++ case CI_ISP_CFG_UPDATE_LATER:
++ default:
++ /* no update from within this function */
++ REG_WRITE(mrv_reg->srsz_ctrl, srsz_ctrl);
++ break;
++ }
++}
++
++#if MRV_SUPPORT_SL
++
++/* bad pixel table */
++static struct ci_sensor_bp_table bp_table = { 0 };
++
++/*
++ * Initialization of the Bad Pixel Detection and Correction.
++ */
++int ci_bp_init(const struct ci_isp_bp_corr_config *bp_corr_config,
++ const struct ci_isp_bp_det_config *bp_det_config)
++{
++ int error = CI_STATUS_SUCCESS;
++
++ /* number of table elements */
++ /* number of table elements */
++#define MRVSLS_BPINIT_MAX_TABLE 2048
++
++ /* check the parameters */
++ if (!bp_corr_config || !bp_det_config)
++ return CI_STATUS_NULL_POINTER;
++
++ if (bp_corr_config->bp_corr_type == CI_ISP_BP_CORR_TABLE) {
++ /* set badpixel correction */
++ error |= ci_isp_set_bp_correction(bp_corr_config);
++ /* set badpixel detection */
++ error |= ci_isp_set_bp_detection(bp_det_config);
++ /* zero element inside */
++ bp_table.bp_number = 0;
++ if (!bp_table.bp_table_elem) {
++ /* allocate mem space for the table */
++ bp_table.bp_table_elem =
++ (struct ci_sensor_bp_table_elem *)
++ kmalloc((sizeof(struct ci_sensor_bp_table_elem)*
++ MRVSLS_BPINIT_MAX_TABLE), GFP_KERNEL);
++ if (!bp_table.bp_table_elem)
++ error |= CI_STATUS_FAILURE;
++ }
++ /* max count of elements */
++ bp_table.bp_table_elem_num = MRVSLS_BPINIT_MAX_TABLE;
++ /* Clear Interrupt Status */
++ error |= ci_isp_clear_bp_int();
++ } else {
++ if (bp_corr_config->bp_corr_type == CI_ISP_BP_CORR_DIRECT) {
++ /* set badpixel correction */
++ error |= ci_isp_set_bp_correction(bp_corr_config);
++ /* set badpixel detection */
++ error |= ci_isp_set_bp_detection(NULL);
++ } else {
++ return CI_STATUS_NOTSUPP;
++ }
++ }
++ return error;
++}
++
++/*
++ * Disable the Bad Pixel Detection and Correction.
++ */
++int ci_bp_end(const struct ci_isp_bp_corr_config *bp_corr_config)
++{
++ int uiResult = CI_STATUS_SUCCESS;
++
++ /* check the parameter */
++ if (!bp_corr_config)
++ return CI_STATUS_NULL_POINTER;
++
++ /* disable badpixel correction */
++ uiResult |= ci_isp_set_bp_correction(NULL);
++
++ /* disable badpixel detection */
++ uiResult |= ci_isp_set_bp_detection(NULL);
++
++ if (bp_corr_config->bp_corr_type == CI_ISP_BP_CORR_TABLE) {
++ /* Clear Interrupt Status */
++ uiResult |= ci_isp_clear_bp_int();
++
++ /* deallocate BP Table */
++ kfree(bp_table.bp_table_elem);
++ bp_table.bp_table_elem = NULL;
++ }
++ return uiResult;
++}
++#endif
+--- /dev/null
++++ b/drivers/staging/mrstci/mrstisp/mrstisp_isp.c
+@@ -0,0 +1,1993 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * Copyright (c) Silicon Image 2008 www.siliconimage.com
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#include "mrstisp_stdinc.h"
++
++int mrst_isp_set_color_conversion_ex(void)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++
++ REG_SET_SLICE(mrv_reg->isp_cc_coeff_0, MRV_ISP_CC_COEFF_0, 0x00001021);
++ REG_SET_SLICE(mrv_reg->isp_cc_coeff_1, MRV_ISP_CC_COEFF_1, 0x00001040);
++ REG_SET_SLICE(mrv_reg->isp_cc_coeff_2, MRV_ISP_CC_COEFF_2, 0x0000100D);
++ REG_SET_SLICE(mrv_reg->isp_cc_coeff_3, MRV_ISP_CC_COEFF_3, 0x00000FED);
++ REG_SET_SLICE(mrv_reg->isp_cc_coeff_4, MRV_ISP_CC_COEFF_4, 0x00000FDB);
++ REG_SET_SLICE(mrv_reg->isp_cc_coeff_5, MRV_ISP_CC_COEFF_5, 0x00001038);
++ REG_SET_SLICE(mrv_reg->isp_cc_coeff_6, MRV_ISP_CC_COEFF_6, 0x00001038);
++ REG_SET_SLICE(mrv_reg->isp_cc_coeff_7, MRV_ISP_CC_COEFF_7, 0x00000FD1);
++ REG_SET_SLICE(mrv_reg->isp_cc_coeff_8, MRV_ISP_CC_COEFF_8, 0x00000FF7);
++
++ return CI_STATUS_SUCCESS;
++}
++
++/*
++ * Selects the ISP path that will become active while processing
++ * data coming from an image sensor configured by the given ISI
++ * configuration struct.
++ */
++enum ci_isp_path ci_isp_select_path(const struct ci_sensor_config *isi_cfg,
++ u8 *words_per_pixel)
++{
++ u8 words;
++ enum ci_isp_path ret_val;
++
++ switch (isi_cfg->mode) {
++ case SENSOR_MODE_DATA:
++ ret_val = CI_ISP_PATH_RAW;
++ words = 1;
++ break;
++ case SENSOR_MODE_PICT:
++ ret_val = CI_ISP_PATH_RAW;
++ words = 1;
++ break;
++ case SENSOR_MODE_RGB565:
++ ret_val = CI_ISP_PATH_RAW;
++ words = 2;
++ break;
++ case SENSOR_MODE_BT601:
++ ret_val = CI_ISP_PATH_YCBCR;
++ words = 2;
++ break;
++ case SENSOR_MODE_BT656:
++ ret_val = CI_ISP_PATH_YCBCR;
++ words = 2;
++ break;
++ case SENSOR_MODE_BAYER:
++ ret_val = CI_ISP_PATH_BAYER;
++ words = 1;
++ break;
++
++ case SENSOR_MODE_SMIA:
++ switch (isi_cfg->smia_mode) {
++ case SENSOR_SMIA_MODE_RAW_12:
++ case SENSOR_SMIA_MODE_RAW_10:
++ case SENSOR_SMIA_MODE_RAW_8:
++ case SENSOR_SMIA_MODE_RAW_8_TO_10_DECOMP:
++ ret_val = CI_ISP_PATH_BAYER;
++ words = 1;
++ break;
++ case SENSOR_SMIA_MODE_YUV_422:
++ ret_val = CI_ISP_PATH_YCBCR;
++ words = 2;
++ break;
++ case SENSOR_SMIA_MODE_YUV_420:
++ case SENSOR_SMIA_MODE_RGB_444:
++ case SENSOR_SMIA_MODE_RGB_565:
++ case SENSOR_SMIA_MODE_RGB_888:
++ case SENSOR_SMIA_MODE_COMPRESSED:
++ case SENSOR_SMIA_MODE_RAW_7:
++ case SENSOR_SMIA_MODE_RAW_6:
++ default:
++ ret_val = CI_ISP_PATH_RAW;
++ words = 1;
++ break;
++ }
++ break;
++
++ case SENSOR_MODE_MIPI:
++ switch (isi_cfg->mipi_mode) {
++ case SENSOR_MIPI_MODE_RAW_12:
++ case SENSOR_MIPI_MODE_RAW_10:
++ case SENSOR_MIPI_MODE_RAW_8:
++ ret_val = CI_ISP_PATH_BAYER;
++ words = 1;
++ break;
++ case SENSOR_MIPI_MODE_YUV422_8:
++ case SENSOR_MIPI_MODE_YUV422_10:
++ ret_val = CI_ISP_PATH_YCBCR;
++ words = 2;
++ break;
++ case SENSOR_MIPI_MODE_YUV420_8:
++ case SENSOR_MIPI_MODE_YUV420_10:
++ case SENSOR_MIPI_MODE_LEGACY_YUV420_8:
++ case SENSOR_MIPI_MODE_YUV420_CSPS_8:
++ case SENSOR_MIPI_MODE_YUV420_CSPS_10:
++ case SENSOR_MIPI_MODE_RGB444:
++ case SENSOR_MIPI_MODE_RGB555:
++ case SENSOR_MIPI_MODE_RGB565:
++ case SENSOR_MIPI_MODE_RGB666:
++ case SENSOR_MIPI_MODE_RGB888:
++ case SENSOR_MIPI_MODE_RAW_7:
++ case SENSOR_MIPI_MODE_RAW_6:
++ default:
++ ret_val = CI_ISP_PATH_RAW;
++ words = 1;
++ break;
++ }
++ break;
++ case SENSOR_MODE_BAY_BT656:
++ ret_val = CI_ISP_PATH_BAYER;
++ words = 1;
++ break;
++ case SENSOR_MODE_RAW_BT656:
++ ret_val = CI_ISP_PATH_RAW;
++ words = 1;
++ break;
++ default:
++ ret_val = CI_ISP_PATH_UNKNOWN;
++ words = 1;
++ }
++
++ if (words_per_pixel)
++ *words_per_pixel = words ;
++ return ret_val;
++}
++
++/*
++ * configures the input acquisition according to the
++ * given config structure
++ */
++int ci_isp_set_input_aquisition(const struct ci_sensor_config *isi_cfg)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++ u32 isp_ctrl = REG_READ(mrv_reg->isp_ctrl);
++ u32 isp_acq_prop = REG_READ(mrv_reg->isp_acq_prop);
++ /* factor between pixel count and amount of bytes to sample */
++ u8 sample_factor;
++ /* number of additional black lines at frame start */
++ u8 black_lines;
++
++ if (ci_isp_select_path(isi_cfg, &sample_factor)
++ == CI_ISP_PATH_UNKNOWN) {
++ eprintk("failed to select path");
++ return CI_STATUS_NOTSUPP;
++ }
++
++ switch (isi_cfg->mode) {
++ case SENSOR_MODE_DATA:
++ REG_SET_SLICE(isp_ctrl, MRV_ISP_ISP_MODE,
++ MRV_ISP_ISP_MODE_DATA);
++ break;
++ case SENSOR_MODE_PICT:
++ REG_SET_SLICE(isp_ctrl, MRV_ISP_ISP_MODE,
++ MRV_ISP_ISP_MODE_RAW);
++ break;
++ case SENSOR_MODE_RGB565:
++ REG_SET_SLICE(isp_ctrl, MRV_ISP_ISP_MODE,
++ MRV_ISP_ISP_MODE_RAW);
++ break;
++ case SENSOR_MODE_BT601:
++ REG_SET_SLICE(isp_ctrl, MRV_ISP_ISP_MODE,
++ MRV_ISP_ISP_MODE_601);
++ break;
++ case SENSOR_MODE_BT656:
++ REG_SET_SLICE(isp_ctrl, MRV_ISP_ISP_MODE,
++ MRV_ISP_ISP_MODE_656);
++ break;
++ case SENSOR_MODE_BAYER:
++ REG_SET_SLICE(isp_ctrl, MRV_ISP_ISP_MODE,
++ MRV_ISP_ISP_MODE_RGB);
++ break;
++ case SENSOR_MODE_BAY_BT656:
++ REG_SET_SLICE(isp_ctrl, MRV_ISP_ISP_MODE,
++ MRV_ISP_ISP_MODE_RGB656);
++ break;
++ case SENSOR_MODE_RAW_BT656:
++ REG_SET_SLICE(isp_ctrl, MRV_ISP_ISP_MODE,
++ MRV_ISP_ISP_MODE_RAW656);
++ break;
++
++ case SENSOR_MODE_SMIA:
++ switch (isi_cfg->smia_mode) {
++ case SENSOR_SMIA_MODE_RAW_12:
++ case SENSOR_SMIA_MODE_RAW_10:
++ case SENSOR_SMIA_MODE_RAW_8:
++ case SENSOR_SMIA_MODE_RAW_8_TO_10_DECOMP:
++ case SENSOR_SMIA_MODE_RAW_7:
++ case SENSOR_SMIA_MODE_RAW_6:
++ case SENSOR_SMIA_MODE_YUV_422:
++ case SENSOR_SMIA_MODE_YUV_420:
++ case SENSOR_SMIA_MODE_RGB_888:
++ case SENSOR_SMIA_MODE_RGB_565:
++ case SENSOR_SMIA_MODE_RGB_444:
++ case SENSOR_SMIA_MODE_COMPRESSED:
++ return CI_STATUS_SUCCESS;
++ break;
++ default:
++ return CI_STATUS_NOTSUPP;
++ }
++ break;
++
++ case SENSOR_MODE_MIPI:
++ REG_SET_SLICE(isp_ctrl, MRV_ISP_ISP_MODE,
++ MRV_ISP_ISP_MODE_RGB);
++ REG_WRITE(mrv_reg->mipi_img_data_sel, 0x02b);
++ break;
++
++ default:
++ return CI_STATUS_NOTSUPP;
++ }
++
++ switch (isi_cfg->bus_width) {
++ case SENSOR_BUSWIDTH_12BIT:
++ /* 000- 12Bit external Interface */
++ REG_SET_SLICE(isp_acq_prop, MRV_ISP_INPUT_SELECTION,
++ MRV_ISP_INPUT_SELECTION_12EXT);
++ break;
++ case SENSOR_BUSWIDTH_10BIT_ZZ:
++ /* 001- 10Bit Interface, append 2 zeroes as LSBs */
++ REG_SET_SLICE(isp_acq_prop, MRV_ISP_INPUT_SELECTION,
++ MRV_ISP_INPUT_SELECTION_10ZERO);
++ break;
++ case SENSOR_BUSWIDTH_10BIT_EX:
++ /* 010- 10Bit Interface, append 2 MSBs as LSBs */
++ REG_SET_SLICE(isp_acq_prop, MRV_ISP_INPUT_SELECTION,
++ MRV_ISP_INPUT_SELECTION_10MSB);
++ break;
++ case SENSOR_BUSWIDTH_8BIT_ZZ:
++ /* 011- 8Bit Interface, append 4 zeroes as LSBs */
++ REG_SET_SLICE(isp_acq_prop, MRV_ISP_INPUT_SELECTION,
++ MRV_ISP_INPUT_SELECTION_8ZERO);
++ break;
++ case SENSOR_BUSWIDTH_8BIT_EX:
++ /* 100- 8Bit Interface, append 4 MSBs as LSBs */
++ REG_SET_SLICE(isp_acq_prop, MRV_ISP_INPUT_SELECTION,
++ MRV_ISP_INPUT_SELECTION_8MSB);
++ break;
++ /* 101...111 reserved */
++ default:
++ return CI_STATUS_NOTSUPP;
++ }
++
++ switch (isi_cfg->field_sel) {
++ case SENSOR_FIELDSEL_ODD:
++ REG_SET_SLICE(isp_acq_prop, MRV_ISP_FIELD_SELECTION,
++ MRV_ISP_FIELD_SELECTION_ODD);
++ break;
++ case SENSOR_FIELDSEL_EVEN:
++ REG_SET_SLICE(isp_acq_prop, MRV_ISP_FIELD_SELECTION,
++ MRV_ISP_FIELD_SELECTION_EVEN);
++ break;
++ case SENSOR_FIELDSEL_BOTH:
++ REG_SET_SLICE(isp_acq_prop, MRV_ISP_FIELD_SELECTION,
++ MRV_ISP_FIELD_SELECTION_BOTH);
++ break;
++ default:
++ return CI_STATUS_NOTSUPP;
++ }
++
++ switch (isi_cfg->ycseq) {
++ case SENSOR_YCSEQ_CRYCBY:
++ REG_SET_SLICE(isp_acq_prop, MRV_ISP_CCIR_SEQ,
++ MRV_ISP_CCIR_SEQ_CRYCBY);
++ break;
++ case SENSOR_YCSEQ_CBYCRY:
++ REG_SET_SLICE(isp_acq_prop, MRV_ISP_CCIR_SEQ,
++ MRV_ISP_CCIR_SEQ_CBYCRY);
++ break;
++ case SENSOR_YCSEQ_YCRYCB:
++ REG_SET_SLICE(isp_acq_prop, MRV_ISP_CCIR_SEQ,
++ MRV_ISP_CCIR_SEQ_YCRYCB);
++ break;
++ case SENSOR_YCSEQ_YCBYCR:
++ REG_SET_SLICE(isp_acq_prop, MRV_ISP_CCIR_SEQ,
++ MRV_ISP_CCIR_SEQ_YCBYCR);
++ break;
++ default:
++ return CI_STATUS_NOTSUPP;
++ }
++
++ switch (isi_cfg->conv422) {
++ case SENSOR_CONV422_INTER:
++ REG_SET_SLICE(isp_acq_prop, MRV_ISP_CONV_422,
++ MRV_ISP_CONV_422_INTER);
++ break;
++
++ case SENSOR_CONV422_NOCOSITED:
++ REG_SET_SLICE(isp_acq_prop, MRV_ISP_CONV_422,
++ MRV_ISP_CONV_422_NONCO);
++ break;
++ case SENSOR_CONV422_COSITED:
++ REG_SET_SLICE(isp_acq_prop, MRV_ISP_CONV_422,
++ MRV_ISP_CONV_422_CO);
++ break;
++ default:
++ return CI_STATUS_NOTSUPP;
++ }
++
++ switch (isi_cfg->bpat) {
++ case SENSOR_BPAT_BGBGGRGR:
++ REG_SET_SLICE(isp_acq_prop, MRV_ISP_BAYER_PAT,
++ MRV_ISP_BAYER_PAT_BG);
++ break;
++ case SENSOR_BPAT_GBGBRGRG:
++ REG_SET_SLICE(isp_acq_prop, MRV_ISP_BAYER_PAT,
++ MRV_ISP_BAYER_PAT_GB);
++ break;
++ case SENSOR_BPAT_GRGRBGBG:
++ REG_SET_SLICE(isp_acq_prop, MRV_ISP_BAYER_PAT,
++ MRV_ISP_BAYER_PAT_GR);
++ break;
++ case SENSOR_BPAT_RGRGGBGB:
++ REG_SET_SLICE(isp_acq_prop, MRV_ISP_BAYER_PAT,
++ MRV_ISP_BAYER_PAT_RG);
++ break;
++ default:
++ return CI_STATUS_NOTSUPP;
++ }
++
++ switch (isi_cfg->vpol) {
++ case SENSOR_VPOL_POS:
++ REG_SET_SLICE(isp_acq_prop, MRV_ISP_VSYNC_POL, 1);
++ break;
++ case SENSOR_VPOL_NEG:
++ REG_SET_SLICE(isp_acq_prop, MRV_ISP_VSYNC_POL, 0);
++ break;
++ default:
++ return CI_STATUS_NOTSUPP;
++ }
++
++ switch (isi_cfg->hpol) {
++ /* The trigger edge differs for vsync_pol and hsync_pol. */
++ /* vsync_pol = 1 triggers on positive edge whereas */
++ /* hsync_pol = 1 triggers on negative edge and vice versa */
++ case SENSOR_HPOL_SYNCPOS:
++ /* trigger on negative edge */
++ REG_SET_SLICE(isp_acq_prop, MRV_ISP_HSYNC_POL, 1);
++ break;
++ case SENSOR_HPOL_SYNCNEG:
++ /* trigger on positive edge */
++ REG_SET_SLICE(isp_acq_prop, MRV_ISP_HSYNC_POL, 0);
++ break;
++ case SENSOR_HPOL_REFPOS:
++ /* trigger on positive edge */
++ REG_SET_SLICE(isp_acq_prop, MRV_ISP_HSYNC_POL, 0);
++ break;
++ case SENSOR_HPOL_REFNEG:
++ /* trigger on negative edge */
++ REG_SET_SLICE(isp_acq_prop, MRV_ISP_HSYNC_POL, 1);
++ break;
++ default:
++ return CI_STATUS_NOTSUPP;
++ }
++
++ switch (isi_cfg->edge) {
++ case SENSOR_EDGE_RISING:
++ REG_SET_SLICE(isp_acq_prop, MRV_ISP_SAMPLE_EDGE, 1);
++ break;
++ case SENSOR_EDGE_FALLING:
++ REG_SET_SLICE(isp_acq_prop, MRV_ISP_SAMPLE_EDGE, 0);
++ break;
++ default:
++ return CI_STATUS_NOTSUPP;
++ }
++ dprintk(2, "isp_acq_prop = 0x%x", isp_acq_prop);
++
++ /* now write values to registers */
++ REG_WRITE(mrv_reg->isp_ctrl, isp_ctrl);
++ REG_WRITE(mrv_reg->isp_acq_prop, isp_acq_prop);
++
++ /* number of additional black lines at frame start */
++ switch (isi_cfg->bls) {
++ case SENSOR_BLS_OFF:
++ black_lines = 0;
++ break;
++ case SENSOR_BLS_TWO_LINES:
++ black_lines = 2;
++ break;
++ case SENSOR_BLS_FOUR_LINES:
++ black_lines = 4;
++ break;
++ default:
++ return CI_STATUS_NOTSUPP;
++ }
++
++ REG_SET_SLICE(mrv_reg->isp_acq_h_offs, MRV_ISP_ACQ_H_OFFS,
++ 0 * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_offs, MRV_ISP_ACQ_V_OFFS, 0);
++
++ dprintk(2, "res = %x", isi_cfg->res);
++ switch (isi_cfg->res) {
++ /* 88x72 */
++ case SENSOR_RES_QQCIF:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ QQCIF_SIZE_H * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ QQCIF_SIZE_V + black_lines);
++ break;
++ /* 160x120 */
++ case SENSOR_RES_QQVGA:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ QQVGA_SIZE_H * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ QQVGA_SIZE_V + black_lines);
++ break;
++ /* 176x144 */
++ case SENSOR_RES_QCIF:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ QCIF_SIZE_H * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ QCIF_SIZE_V + black_lines);
++ break;
++ /* 320x240 */
++ case SENSOR_RES_QVGA:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ QVGA_SIZE_H * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ QVGA_SIZE_V + black_lines);
++ break;
++ /* 352x288 */
++ case SENSOR_RES_CIF:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ CIF_SIZE_H * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ CIF_SIZE_V + black_lines);
++ break;
++ /* 640x480 */
++ case SENSOR_RES_VGA:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ VGA_SIZE_H * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ VGA_SIZE_V + black_lines);
++ break;
++ /* 800x600 */
++ case SENSOR_RES_SVGA:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ SVGA_SIZE_H * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ SVGA_SIZE_V + black_lines);
++ break;
++ /* 1024x768 */
++ case SENSOR_RES_XGA:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ XGA_SIZE_H * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ XGA_SIZE_V + black_lines);
++ break;
++ case SENSOR_RES_720P:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ RES_720P_SIZE_H * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ RES_720P_SIZE_V + black_lines);
++ break;
++ /* 1280x960 */
++ case SENSOR_RES_XGA_PLUS:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ XGA_PLUS_SIZE_H * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ XGA_PLUS_SIZE_V + black_lines);
++ break;
++ /* 1280x1024 */
++ case SENSOR_RES_SXGA:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ SXGA_SIZE_H * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ SXGA_SIZE_V + black_lines);
++ break;
++ /* 1600x1200 */
++ case SENSOR_RES_UXGA:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ QSVGA_SIZE_H * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ QSVGA_SIZE_V + black_lines);
++ break;
++ /* 1920x1280 */
++ case SENSOR_RES_1080P:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ 1920 * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ 1080 + black_lines);
++ break;
++ /* 2048x1536 */
++ case SENSOR_RES_QXGA:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ QXGA_SIZE_H * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ QXGA_SIZE_V + black_lines);
++ break;
++ /* 2586x2048 */
++ case SENSOR_RES_QSXGA:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ QSXGA_SIZE_H * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ QSXGA_SIZE_V + black_lines);
++ break;
++ /* 2600x2048 */
++ case SENSOR_RES_QSXGA_PLUS:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ QSXGA_PLUS_SIZE_H * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ QSXGA_PLUS_SIZE_V + black_lines);
++ break;
++ /* 2600x1950 */
++ case SENSOR_RES_QSXGA_PLUS2:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ QSXGA_PLUS2_SIZE_H * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ QSXGA_PLUS2_SIZE_V + black_lines);
++ break;
++ /* 2686x2048, 5.30M */
++ case SENSOR_RES_QSXGA_PLUS3:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ QSXGA_PLUS3_SIZE_V * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ QSXGA_PLUS3_SIZE_V + black_lines);
++ break;
++ /* 2592*1944 5M */
++ case SENSOR_RES_QXGA_PLUS:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ QXGA_PLUS_SIZE_H * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ QXGA_PLUS_SIZE_V + black_lines);
++ break;
++ /* 3200x2048, 6.56M */
++ case SENSOR_RES_WQSXGA:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ WQSXGA_SIZE_H * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ WQSXGA_SIZE_V + black_lines);
++ break;
++ /* 3200x2400, 7.68M */
++ case SENSOR_RES_QUXGA:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ QUXGA_SIZE_H * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ QUXGA_SIZE_V + black_lines);
++ break;
++ /* 3840x2400, 9.22M */
++ case SENSOR_RES_WQUXGA:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ WQUXGA_SIZE_H * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ WQUXGA_SIZE_V + black_lines);
++ break;
++ /* 4096x3072, 12.59M */
++ case SENSOR_RES_HXGA:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ HXGA_SIZE_H * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ HXGA_SIZE_V + black_lines);
++ break;
++ /* 4080x1024 */
++ case SENSOR_RES_YUV_HMAX:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ YUV_HMAX_SIZE_H * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ YUV_HMAX_SIZE_V);
++ break;
++ /* 1024x4080 */
++ case SENSOR_RES_YUV_VMAX:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ YUV_VMAX_SIZE_H * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ YUV_VMAX_SIZE_V);
++ break;
++ /* 4096x2048 */
++ case SENSOR_RES_RAWMAX:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ RAWMAX_SIZE_H);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ RAWMAX_SIZE_V);
++ break;
++ /* 352x240 */
++ case SENSOR_RES_BP1:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ BP1_SIZE_H * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ BP1_SIZE_V);
++ break;
++ /* 720x480 */
++ case SENSOR_RES_L_AFM:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ L_AFM_SIZE_H * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ L_AFM_SIZE_V);
++ break;
++ /* 128x96 */
++ case SENSOR_RES_M_AFM:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ M_AFM_SIZE_H * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ M_AFM_SIZE_V);
++ break;
++ /* 64x32 */
++ case SENSOR_RES_S_AFM:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ S_AFM_SIZE_H * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ S_AFM_SIZE_V);
++ break;
++ /* 1304x980 */
++ case SENSOR_RES_VGA_PLUS:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ VGA_PLUS_SIZE_H * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ VGA_PLUS_SIZE_V);
++ break;
++
++ default:
++ return CI_STATUS_NOTSUPP;
++ }
++
++ return CI_STATUS_SUCCESS;
++}
++
++/*
++ * sets output window
++ */
++void ci_isp_set_output_formatter(const struct ci_isp_window *window,
++ enum ci_isp_conf_update_time update_time)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++
++ if (window) {
++ /* set output window */
++ REG_SET_SLICE(mrv_reg->isp_out_h_offs, MRV_IS_IS_H_OFFS,
++ window->hoffs);
++ REG_SET_SLICE(mrv_reg->isp_out_v_offs, MRV_IS_IS_V_OFFS,
++ window->voffs);
++ REG_SET_SLICE(mrv_reg->isp_out_h_size, MRV_IS_IS_H_SIZE,
++ window->hsize);
++ REG_SET_SLICE(mrv_reg->isp_out_v_size, MRV_IS_IS_V_SIZE,
++ window->vsize);
++
++ REG_SET_SLICE(mrv_reg->isp_is_h_offs, MRV_IS_IS_H_OFFS, 0);
++ REG_SET_SLICE(mrv_reg->isp_is_v_offs, MRV_IS_IS_V_OFFS, 0);
++ REG_SET_SLICE(mrv_reg->isp_is_h_size, MRV_IS_IS_H_SIZE,
++ window->hsize);
++ REG_SET_SLICE(mrv_reg->isp_is_v_size, MRV_IS_IS_V_SIZE,
++ window->vsize);
++
++ switch (update_time) {
++ case CI_ISP_CFG_UPDATE_FRAME_SYNC:
++ /* frame synchronous update of shadow registers */
++ REG_SET_SLICE(mrv_reg->isp_ctrl,
++ MRV_ISP_ISP_GEN_CFG_UPD, ON);
++ break;
++ case CI_ISP_CFG_UPDATE_IMMEDIATE:
++ /* immediate update of shadow registers */
++ REG_SET_SLICE(mrv_reg->isp_ctrl,
++ MRV_ISP_ISP_CFG_UPD, ON);
++ break;
++ case CI_ISP_CFG_UPDATE_LATER:
++ /* no update from within this function */
++ break;
++ default:
++ break;
++ }
++ }
++}
++
++/*
++ * programs the given Bayer pattern demosaic parameters
++ */
++void ci_isp_set_demosaic(enum ci_isp_demosaic_mode demosaic_mode,
++ u8 demosaic_th)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++ u32 isp_demosaic = REG_READ(mrv_reg->isp_demosaic);
++
++ /* set demosaic mode */
++ switch (demosaic_mode) {
++ case CI_ISP_DEMOSAIC_STANDARD:
++ REG_SET_SLICE(isp_demosaic, MRV_ISP_DEMOSAIC_MODE,
++ MRV_ISP_DEMOSAIC_MODE_STD);
++ break;
++ case CI_ISP_DEMOSAIC_ENHANCED:
++ REG_SET_SLICE(isp_demosaic, MRV_ISP_DEMOSAIC_MODE,
++ MRV_ISP_DEMOSAIC_MODE_ENH);
++ break;
++ default:
++ WARN_ON(!(false));
++ }
++
++ /* set demosaic threshold */
++ REG_SET_SLICE(isp_demosaic, MRV_ISP_DEMOSAIC_TH, demosaic_th);
++ REG_WRITE(mrv_reg->isp_demosaic, isp_demosaic);
++}
++
++/*
++ * Sets the dedicated AWB block mode.
++ */
++int ci_isp_set_wb_mode(enum ci_isp_awb_mode wb_mode)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++
++ switch (wb_mode) {
++ case CI_ISP_AWB_COMPLETELY_OFF:
++ /* manual WB, no measurements*/
++ REG_SET_SLICE(mrv_reg->isp_awb_prop, MRV_ISP_AWB_MODE,
++ MRV_ISP_AWB_MODE_NOMEAS);
++ /* switch ABW block off */
++ REG_SET_SLICE(mrv_reg->isp_ctrl, MRV_ISP_ISP_AWB_ENABLE,
++ DISABLE);
++ break;
++ case CI_ISP_AWB_MAN_MEAS:
++ case CI_ISP_AWB_AUTO:
++ case CI_ISP_AWB_MAN_PUSH_AUTO:
++ case CI_ISP_AWB_ONLY_MEAS:
++ /* manual white balance, measure YCbCr means */
++ REG_SET_SLICE(mrv_reg->isp_awb_prop, MRV_ISP_AWB_MODE,
++ MRV_ISP_AWB_MODE_MEAS);
++ /* switch ABW block on */
++ REG_SET_SLICE(mrv_reg->isp_ctrl, MRV_ISP_ISP_AWB_ENABLE,
++ ENABLE);
++ break;
++ case CI_ISP_AWB_MAN_NOMEAS:
++ /* manual white balance, no measurements */
++ REG_SET_SLICE(mrv_reg->isp_awb_prop, MRV_ISP_AWB_MODE,
++ MRV_ISP_AWB_MODE_NOMEAS);
++ /* switch ABW block on */
++ REG_SET_SLICE(mrv_reg->isp_ctrl, MRV_ISP_ISP_AWB_ENABLE,
++ ENABLE);
++ break;
++ default:
++ /* to be sure that a regular value is set: */
++ /* manual white balance, no measurements */
++ REG_SET_SLICE(mrv_reg->isp_awb_prop, MRV_ISP_AWB_MODE,
++ MRV_ISP_AWB_MODE_NOMEAS);
++ /* switch ABW block off */
++ REG_SET_SLICE(mrv_reg->isp_ctrl, MRV_ISP_ISP_AWB_ENABLE,
++ DISABLE);
++ return CI_STATUS_FAILURE;
++ }
++
++ return CI_STATUS_SUCCESS;
++}
++
++int ci_isp_get_wb_mode(enum ci_isp_awb_mode *wb_mode)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++
++ if (!wb_mode)
++ return CI_STATUS_NULL_POINTER;
++
++ if (REG_GET_SLICE(mrv_reg->isp_ctrl, MRV_ISP_ISP_AWB_ENABLE) ==
++ DISABLE) {
++ *wb_mode = CI_ISP_AWB_COMPLETELY_OFF;
++ } else {
++
++ switch (REG_GET_SLICE(mrv_reg->isp_awb_prop,
++ MRV_ISP_AWB_MODE)) {
++ case MRV_ISP_AWB_MODE_MEAS:
++ *wb_mode = CI_ISP_AWB_MAN_MEAS;
++ break;
++ case MRV_ISP_AWB_MODE_NOMEAS:
++ *wb_mode = CI_ISP_AWB_MAN_NOMEAS;
++ break;
++ default:
++ *wb_mode = CI_ISP_AWB_COMPLETELY_OFF;
++ return CI_STATUS_FAILURE;
++ }
++ }
++ return CI_STATUS_SUCCESS;
++}
++int ci_isp_set_wb_meas_config(const struct ci_isp_wb_meas_config
++ *wb_meas_config)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++ u32 isp_awb_thresh = REG_READ(mrv_reg->isp_awb_thresh);
++
++ if (!wb_meas_config)
++ return CI_STATUS_NULL_POINTER;
++
++ /* measurement window */
++ REG_SET_SLICE(mrv_reg->isp_awb_h_size, MRV_ISP_AWB_H_SIZE,
++ (u32) wb_meas_config->awb_window.hsize);
++ REG_SET_SLICE(mrv_reg->isp_awb_v_size, MRV_ISP_AWB_V_SIZE,
++ (u32) wb_meas_config->awb_window.vsize);
++ REG_SET_SLICE(mrv_reg->isp_awb_h_offs, MRV_ISP_AWB_H_OFFS,
++ (u32) wb_meas_config->awb_window.hoffs);
++ REG_SET_SLICE(mrv_reg->isp_awb_v_offs, MRV_ISP_AWB_V_OFFS,
++ (u32) wb_meas_config->awb_window.voffs);
++
++ /* adjust awb properties (Y_MAX compare) */
++ if (wb_meas_config->max_y == 0) {
++ REG_SET_SLICE(mrv_reg->isp_awb_prop, MRV_ISP_AWB_MAX_EN,
++ DISABLE);
++ } else {
++ REG_SET_SLICE(mrv_reg->isp_awb_prop, MRV_ISP_AWB_MAX_EN,
++ ENABLE);
++ }
++
++ /* measurement thresholds */
++ REG_SET_SLICE(isp_awb_thresh, MRV_ISP_AWB_MAX_Y,
++ (u32) wb_meas_config->max_y);
++ REG_SET_SLICE(isp_awb_thresh, MRV_ISP_AWB_MIN_Y__MAX_G,
++ (u32) wb_meas_config->minY_MaxG);
++ REG_SET_SLICE(isp_awb_thresh, MRV_ISP_AWB_MAX_CSUM,
++ (u32) wb_meas_config->max_csum);
++ REG_SET_SLICE(isp_awb_thresh, MRV_ISP_AWB_MIN_C,
++ (u32) wb_meas_config->min_c);
++ REG_WRITE(mrv_reg->isp_awb_thresh, isp_awb_thresh);
++ REG_SET_SLICE(mrv_reg->isp_awb_ref, MRV_ISP_AWB_REF_CR__MAX_R,
++ (u32)(wb_meas_config->ref_cr_MaxR));
++ REG_SET_SLICE(mrv_reg->isp_awb_ref, MRV_ISP_AWB_REF_CB__MAX_B,
++ (u32)(wb_meas_config->ref_cb_MaxB));
++
++ /* amount of measurement frames */
++ REG_SET_SLICE(mrv_reg->isp_awb_frames, MRV_ISP_AWB_FRAMES,
++ (u32) wb_meas_config->frames);
++
++ /* set measurement mode */
++ REG_SET_SLICE(mrv_reg->isp_awb_prop, MRV_ISP_AWB_MEAS_MODE,
++ (u32)(wb_meas_config->meas_mode));
++
++ return CI_STATUS_SUCCESS;
++}
++
++int ci_isp_get_wb_meas_config(struct ci_isp_wb_meas_config *wb_meas_config)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++
++ if (!wb_meas_config)
++ return CI_STATUS_NULL_POINTER;
++
++ /* measurement window */
++ wb_meas_config->awb_window.hsize =
++ (u16) REG_GET_SLICE(mrv_reg->isp_awb_h_size, MRV_ISP_AWB_H_SIZE);
++ wb_meas_config->awb_window.vsize =
++ (u16) REG_GET_SLICE(mrv_reg->isp_awb_v_size, MRV_ISP_AWB_V_SIZE);
++ wb_meas_config->awb_window.hoffs =
++ (u16) REG_GET_SLICE(mrv_reg->isp_awb_h_offs, MRV_ISP_AWB_H_OFFS);
++ wb_meas_config->awb_window.voffs =
++ (u16) REG_GET_SLICE(mrv_reg->isp_awb_v_offs, MRV_ISP_AWB_V_OFFS);
++
++ /* measurement thresholds */
++ wb_meas_config->min_c =
++ (u8) REG_GET_SLICE(mrv_reg->isp_awb_thresh, MRV_ISP_AWB_MIN_C);
++ wb_meas_config->max_csum =
++ (u8) REG_GET_SLICE(mrv_reg->isp_awb_thresh, MRV_ISP_AWB_MAX_CSUM);
++ wb_meas_config->minY_MaxG =
++ (u8) REG_GET_SLICE(mrv_reg->isp_awb_thresh,
++ MRV_ISP_AWB_MIN_Y__MAX_G);
++ wb_meas_config->max_y =
++ (u8) REG_GET_SLICE(mrv_reg->isp_awb_thresh, MRV_ISP_AWB_MAX_Y);
++ wb_meas_config->ref_cb_MaxB =
++ (u8)REG_GET_SLICE(mrv_reg->isp_awb_ref, MRV_ISP_AWB_REF_CB__MAX_B);
++ wb_meas_config->ref_cr_MaxR =
++ (u8)REG_GET_SLICE(mrv_reg->isp_awb_ref, MRV_ISP_AWB_REF_CR__MAX_R);
++
++ /* amount of measurement frames */
++ wb_meas_config->frames =
++ (u8) REG_GET_SLICE(mrv_reg->isp_awb_frames, MRV_ISP_AWB_FRAMES);
++
++ /* overwrite max_y if the feature is disabled */
++ if (REG_GET_SLICE(mrv_reg->isp_awb_prop, MRV_ISP_AWB_MAX_EN) ==
++ DISABLE) {
++ wb_meas_config->max_y = 0;
++ }
++
++ /* get measurement mode */
++ wb_meas_config->meas_mode = REG_GET_SLICE(mrv_reg->isp_awb_prop,
++ MRV_ISP_AWB_MEAS_MODE);
++ return CI_STATUS_SUCCESS;
++}
++
++int ci_isp_get_wb_meas(struct ci_sensor_awb_mean *awb_mean)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++
++ if (awb_mean == NULL)
++ return CI_STATUS_NULL_POINTER;
++
++ awb_mean->white = REG_GET_SLICE(mrv_reg->isp_awb_white_cnt,
++ MRV_ISP_AWB_WHITE_CNT);
++ awb_mean->mean_Y__G = (u8) REG_GET_SLICE(mrv_reg->isp_awb_mean,
++ MRV_ISP_AWB_MEAN_Y__G);
++ awb_mean->mean_cb__B = (u8) REG_GET_SLICE(mrv_reg->isp_awb_mean,
++ MRV_ISP_AWB_MEAN_CB__B);
++ awb_mean->mean_cr__R = (u8) REG_GET_SLICE(mrv_reg->isp_awb_mean,
++ MRV_ISP_AWB_MEAN_CR__R);
++ return CI_STATUS_SUCCESS;
++}
++
++/*
++ * calculates left-top and right-bottom register values
++ * for a given AF measurement window
++ */
++static int ci_isp_afm_wnd2_regs(const struct ci_isp_window *wnd, u32 *lt,
++ u32 *rb)
++{
++ WARN_ON(!((wnd != NULL) && (lt != NULL) && (rb != NULL)));
++
++ if (wnd->hsize && wnd->vsize) {
++ u32 left = wnd->hoffs;
++ u32 top = wnd->voffs;
++ u32 right = left + wnd->hsize - 1;
++ u32 bottom = top + wnd->vsize - 1;
++
++ if ((left < MRV_AFM_A_H_L_MIN)
++ || (left > MRV_AFM_A_H_L_MAX)
++ || (top < MRV_AFM_A_V_T_MIN)
++ || (top > MRV_AFM_A_V_T_MAX)
++ || (right < MRV_AFM_A_H_R_MIN)
++ || (right > MRV_AFM_A_H_R_MAX)
++ || (bottom < MRV_AFM_A_V_B_MIN)
++ || (bottom > MRV_AFM_A_V_B_MAX)) {
++ return CI_STATUS_OUTOFRANGE;
++ }
++
++ /* combine the values and return */
++ REG_SET_SLICE(*lt, MRV_AFM_A_H_L, left);
++ REG_SET_SLICE(*lt, MRV_AFM_A_V_T, top);
++ REG_SET_SLICE(*rb, MRV_AFM_A_H_R, right);
++ REG_SET_SLICE(*rb, MRV_AFM_A_V_B, bottom);
++ } else {
++ *lt = 0;
++ *rb = 0;
++ }
++
++ return CI_STATUS_SUCCESS;
++}
++
++int ci_isp_set_auto_focus(const struct ci_isp_af_config *af_config)
++{
++
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++ u32 result = CI_STATUS_SUCCESS;
++
++ /* disable measurement module */
++ REG_SET_SLICE(mrv_reg->isp_afm_ctrl, MRV_AFM_AFM_EN, DISABLE);
++
++ if (af_config) {
++ u32 lt;
++ u32 rb;
++ result = ci_isp_afm_wnd2_regs(&(af_config->wnd_pos_a),
++ &lt, &rb);
++ /* set measurement window boundaries */
++ if (result != CI_STATUS_SUCCESS)
++ return result;
++
++ REG_WRITE(mrv_reg->isp_afm_lt_a, lt);
++ REG_WRITE(mrv_reg->isp_afm_rb_a, rb);
++
++ result = ci_isp_afm_wnd2_regs(&(af_config->wnd_pos_b),
++ &lt, &rb);
++
++ if (result != CI_STATUS_SUCCESS)
++ return result;
++
++ REG_WRITE(mrv_reg->isp_afm_lt_b, lt);
++ REG_WRITE(mrv_reg->isp_afm_rb_b, rb);
++
++ result = ci_isp_afm_wnd2_regs(&(af_config->wnd_pos_c),
++ &lt, &rb);
++
++ if (result != CI_STATUS_SUCCESS)
++ return result;
++
++ REG_WRITE(mrv_reg->isp_afm_lt_c, lt);
++ REG_WRITE(mrv_reg->isp_afm_rb_c, rb);
++
++ /* set other af measurement paraneters */
++ REG_SET_SLICE(mrv_reg->isp_afm_thres, MRV_AFM_AFM_THRES,
++ af_config->threshold);
++ REG_SET_SLICE(mrv_reg->isp_afm_var_shift, MRV_AFM_LUM_VAR_SHIFT,
++ (af_config->var_shift >> 16));
++ REG_SET_SLICE(mrv_reg->isp_afm_var_shift, MRV_AFM_AFM_VAR_SHIFT,
++ (af_config->var_shift >> 0));
++
++ /* enable measurement module */
++ REG_SET_SLICE(mrv_reg->isp_afm_ctrl, MRV_AFM_AFM_EN, ENABLE);
++ }
++
++ return result;
++}
++
++
++void ci_isp_get_auto_focus_meas(struct ci_isp_af_meas *af_meas)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++
++ WARN_ON(!(af_meas != NULL));
++
++ af_meas->afm_sum_a =
++ REG_GET_SLICE(mrv_reg->isp_afm_sum_a, MRV_AFM_AFM_SUM_A);
++ af_meas->afm_sum_b =
++ REG_GET_SLICE(mrv_reg->isp_afm_sum_b, MRV_AFM_AFM_SUM_B);
++ af_meas->afm_sum_c =
++ REG_GET_SLICE(mrv_reg->isp_afm_sum_c, MRV_AFM_AFM_SUM_C);
++ af_meas->afm_lum_a =
++ REG_GET_SLICE(mrv_reg->isp_afm_lum_a, MRV_AFM_AFM_LUM_A);
++ af_meas->afm_lum_b =
++ REG_GET_SLICE(mrv_reg->isp_afm_lum_b, MRV_AFM_AFM_LUM_B);
++ af_meas->afm_lum_c =
++ REG_GET_SLICE(mrv_reg->isp_afm_lum_c, MRV_AFM_AFM_LUM_C);
++}
++
++int ci_isp_set_ls_correction(struct ci_sensor_ls_corr_config *ls_corr_config)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++ u32 i, n;
++ u32 data = 0;
++ int enabled = false;
++
++ if (!ls_corr_config) {
++ /* disable lens shading module */
++ REG_SET_SLICE(mrv_reg->isp_lsc_ctrl, MRV_LSC_LSC_EN, DISABLE);
++ } else {
++ /* test if lens shading correction is enabled */
++ if (REG_GET_SLICE(mrv_reg->isp_lsc_ctrl, MRV_LSC_LSC_EN)) {
++ /* switch off lens shading correction */
++ REG_SET_SLICE(mrv_reg->isp_lsc_ctrl,
++ MRV_LSC_LSC_EN, DISABLE);
++ /* wait 1ms to make sure that
++ * the LSC have time enough to switch off */
++ /* wait over 1 ms */
++ /*mdelay(1000);*/
++ msleep(1000);
++ enabled = true;
++ }
++
++ /* clear address counters */
++ REG_WRITE(mrv_reg->isp_lsc_r_table_addr, 0);
++ REG_WRITE(mrv_reg->isp_lsc_g_table_addr, 0);
++ REG_WRITE(mrv_reg->isp_lsc_b_table_addr, 0);
++
++ /* program data tables (table size is 9 * 17 = 153;
++ * see also MRV_LSC_?_RAM_ADDR_MAX) */
++ WARN_ON(!(((CI_ISP_MAX_LSC_SECTORS + 1) *
++ ((CI_ISP_MAX_LSC_SECTORS + 2) / 2)) ==
++ (MRV_LSC_R_RAM_ADDR_MAX + 1)));
++
++ /* 17 steps */
++ for (n = 0;
++ n < ((CI_ISP_MAX_LSC_SECTORS + 1) *
++ (CI_ISP_MAX_LSC_SECTORS + 1));
++ n += CI_ISP_MAX_LSC_SECTORS + 1) {
++ dprintk(2, "set ls correct step n = %d", n);
++ /* 17 sectors with 2 values in one DWORD = 9
++ * DWORDs (8 steps + 1 outside loop) */
++ for (i = 0; i < (CI_ISP_MAX_LSC_SECTORS); i += 2) {
++ REG_SET_SLICE(data, MRV_LSC_R_SAMPLE_0,
++ ls_corr_config->ls_rdata_tbl[n + i]);
++ REG_SET_SLICE(data, MRV_LSC_R_SAMPLE_1,
++ ls_corr_config->ls_rdata_tbl
++ [n + i + 1]);
++ REG_WRITE(mrv_reg->isp_lsc_r_table_data, data);
++ REG_SET_SLICE(data, MRV_LSC_G_SAMPLE_0,
++ ls_corr_config->ls_gdata_tbl
++ [n + i]);
++ REG_SET_SLICE(data, MRV_LSC_G_SAMPLE_1,
++ ls_corr_config->ls_gdata_tbl
++ [n + i + 1]);
++ REG_WRITE(mrv_reg->isp_lsc_g_table_data, data);
++ REG_SET_SLICE(data, MRV_LSC_B_SAMPLE_0,
++ ls_corr_config->ls_bdata_tbl[n + i]);
++ REG_SET_SLICE(data, MRV_LSC_B_SAMPLE_1,
++ ls_corr_config->ls_bdata_tbl
++ [n + i + 1]);
++ REG_WRITE(mrv_reg->isp_lsc_b_table_data, data);
++ }
++ REG_SET_SLICE(data, MRV_LSC_R_SAMPLE_0,
++ ls_corr_config->ls_rdata_tbl
++ [n + CI_ISP_MAX_LSC_SECTORS]);
++ REG_SET_SLICE(data, MRV_LSC_R_SAMPLE_1, 0);
++ REG_WRITE(mrv_reg->isp_lsc_r_table_data, data);
++ REG_SET_SLICE(data, MRV_LSC_G_SAMPLE_0,
++ ls_corr_config->ls_gdata_tbl
++ [n + CI_ISP_MAX_LSC_SECTORS]);
++ REG_SET_SLICE(data, MRV_LSC_G_SAMPLE_1, 0);
++ REG_WRITE(mrv_reg->isp_lsc_g_table_data, data);
++ REG_SET_SLICE(data, MRV_LSC_B_SAMPLE_0,
++ ls_corr_config->ls_bdata_tbl
++ [n + CI_ISP_MAX_LSC_SECTORS]);
++ REG_SET_SLICE(data, MRV_LSC_B_SAMPLE_1, 0);
++ REG_WRITE(mrv_reg->isp_lsc_b_table_data, data);
++ }
++
++ /* program x size tables */
++ REG_SET_SLICE(mrv_reg->isp_lsc_xsize_01, MRV_LSC_X_SECT_SIZE_0,
++ ls_corr_config->ls_xsize_tbl[0]);
++ REG_SET_SLICE(mrv_reg->isp_lsc_xsize_01, MRV_LSC_X_SECT_SIZE_1,
++ ls_corr_config->ls_xsize_tbl[1]);
++ REG_SET_SLICE(mrv_reg->isp_lsc_xsize_23, MRV_LSC_X_SECT_SIZE_2,
++ ls_corr_config->ls_xsize_tbl[2]);
++ REG_SET_SLICE(mrv_reg->isp_lsc_xsize_23, MRV_LSC_X_SECT_SIZE_3,
++ ls_corr_config->ls_xsize_tbl[3]);
++ REG_SET_SLICE(mrv_reg->isp_lsc_xsize_45, MRV_LSC_X_SECT_SIZE_4,
++ ls_corr_config->ls_xsize_tbl[4]);
++ REG_SET_SLICE(mrv_reg->isp_lsc_xsize_45, MRV_LSC_X_SECT_SIZE_5,
++ ls_corr_config->ls_xsize_tbl[5]);
++ REG_SET_SLICE(mrv_reg->isp_lsc_xsize_67, MRV_LSC_X_SECT_SIZE_6,
++ ls_corr_config->ls_xsize_tbl[6]);
++ REG_SET_SLICE(mrv_reg->isp_lsc_xsize_67, MRV_LSC_X_SECT_SIZE_7,
++ ls_corr_config->ls_xsize_tbl[7]);
++
++ /* program y size tables */
++ REG_SET_SLICE(mrv_reg->isp_lsc_ysize_01, MRV_LSC_Y_SECT_SIZE_0,
++ ls_corr_config->ls_ysize_tbl[0]);
++ REG_SET_SLICE(mrv_reg->isp_lsc_ysize_01, MRV_LSC_Y_SECT_SIZE_1,
++ ls_corr_config->ls_ysize_tbl[1]);
++ REG_SET_SLICE(mrv_reg->isp_lsc_ysize_23, MRV_LSC_Y_SECT_SIZE_2,
++ ls_corr_config->ls_ysize_tbl[2]);
++ REG_SET_SLICE(mrv_reg->isp_lsc_ysize_23, MRV_LSC_Y_SECT_SIZE_3,
++ ls_corr_config->ls_ysize_tbl[3]);
++ REG_SET_SLICE(mrv_reg->isp_lsc_ysize_45, MRV_LSC_Y_SECT_SIZE_4,
++ ls_corr_config->ls_ysize_tbl[4]);
++ REG_SET_SLICE(mrv_reg->isp_lsc_ysize_45, MRV_LSC_Y_SECT_SIZE_5,
++ ls_corr_config->ls_ysize_tbl[5]);
++ REG_SET_SLICE(mrv_reg->isp_lsc_ysize_67, MRV_LSC_Y_SECT_SIZE_6,
++ ls_corr_config->ls_ysize_tbl[6]);
++ REG_SET_SLICE(mrv_reg->isp_lsc_ysize_67, MRV_LSC_Y_SECT_SIZE_7,
++ ls_corr_config->ls_ysize_tbl[7]);
++
++ /* program x grad tables */
++ REG_SET_SLICE(mrv_reg->isp_lsc_xgrad_01, MRV_LSC_XGRAD_0,
++ ls_corr_config->ls_xgrad_tbl[0]);
++ REG_SET_SLICE(mrv_reg->isp_lsc_xgrad_01, MRV_LSC_XGRAD_1,
++ ls_corr_config->ls_xgrad_tbl[1]);
++ REG_SET_SLICE(mrv_reg->isp_lsc_xgrad_23, MRV_LSC_XGRAD_2,
++ ls_corr_config->ls_xgrad_tbl[2]);
++ REG_SET_SLICE(mrv_reg->isp_lsc_xgrad_23, MRV_LSC_XGRAD_3,
++ ls_corr_config->ls_xgrad_tbl[3]);
++ REG_SET_SLICE(mrv_reg->isp_lsc_xgrad_45, MRV_LSC_XGRAD_4,
++ ls_corr_config->ls_xgrad_tbl[4]);
++ REG_SET_SLICE(mrv_reg->isp_lsc_xgrad_45, MRV_LSC_XGRAD_5,
++ ls_corr_config->ls_xgrad_tbl[5]);
++ REG_SET_SLICE(mrv_reg->isp_lsc_xgrad_67, MRV_LSC_XGRAD_6,
++ ls_corr_config->ls_xgrad_tbl[6]);
++ REG_SET_SLICE(mrv_reg->isp_lsc_xgrad_67, MRV_LSC_XGRAD_7,
++ ls_corr_config->ls_xgrad_tbl[7]);
++
++ /* program y grad tables */
++ REG_SET_SLICE(mrv_reg->isp_lsc_ygrad_01, MRV_LSC_YGRAD_0,
++ ls_corr_config->ls_ygrad_tbl[0]);
++ REG_SET_SLICE(mrv_reg->isp_lsc_ygrad_01, MRV_LSC_YGRAD_1,
++ ls_corr_config->ls_ygrad_tbl[1]);
++ REG_SET_SLICE(mrv_reg->isp_lsc_ygrad_23, MRV_LSC_YGRAD_2,
++ ls_corr_config->ls_ygrad_tbl[2]);
++ REG_SET_SLICE(mrv_reg->isp_lsc_ygrad_23, MRV_LSC_YGRAD_3,
++ ls_corr_config->ls_ygrad_tbl[3]);
++ REG_SET_SLICE(mrv_reg->isp_lsc_ygrad_45, MRV_LSC_YGRAD_4,
++ ls_corr_config->ls_ygrad_tbl[4]);
++ REG_SET_SLICE(mrv_reg->isp_lsc_ygrad_45, MRV_LSC_YGRAD_5,
++ ls_corr_config->ls_ygrad_tbl[5]);
++ REG_SET_SLICE(mrv_reg->isp_lsc_ygrad_67, MRV_LSC_YGRAD_6,
++ ls_corr_config->ls_ygrad_tbl[6]);
++ REG_SET_SLICE(mrv_reg->isp_lsc_ygrad_67, MRV_LSC_YGRAD_7,
++ ls_corr_config->ls_ygrad_tbl[7]);
++
++ if (enabled) {
++ /* switch on lens chading correction */
++ REG_SET_SLICE(mrv_reg->isp_lsc_ctrl,
++ MRV_LSC_LSC_EN, ENABLE);
++ }
++ }
++
++ return CI_STATUS_SUCCESS;
++}
++
++int ci_isp_ls_correction_on_off(int ls_corr_on_off)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++
++ if (ls_corr_on_off) {
++ /* switch on lens chading correction */
++ REG_SET_SLICE(mrv_reg->isp_lsc_ctrl, MRV_LSC_LSC_EN, ENABLE);
++ } else {
++ /* switch off lens chading correction */
++ REG_SET_SLICE(mrv_reg->isp_lsc_ctrl, MRV_LSC_LSC_EN, DISABLE);
++ }
++
++ return CI_STATUS_SUCCESS;
++}
++
++/*
++ * Sets the Bad Pixel Correction configuration
++ */
++int ci_isp_set_bp_correction(const struct ci_isp_bp_corr_config
++ *bp_corr_config)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++ u32 isp_bp_ctrl = REG_READ(mrv_reg->isp_bp_ctrl);
++
++ if (!bp_corr_config) {
++ /* disable correction module */
++ REG_SET_SLICE(isp_bp_ctrl, MRV_BP_HOT_COR_EN, DISABLE);
++ REG_SET_SLICE(isp_bp_ctrl, MRV_BP_DEAD_COR_EN, DISABLE);
++ } else {
++ /* set bad pixel configuration */
++ if (bp_corr_config->bp_corr_type == CI_ISP_BP_CORR_DIRECT) {
++ /* direct detection */
++ u32 isp_bp_cfg1 = REG_READ(mrv_reg->isp_bp_cfg1);
++ u32 isp_bp_cfg2 = REG_READ(mrv_reg->isp_bp_cfg2);
++
++ REG_SET_SLICE(isp_bp_ctrl, MRV_BP_COR_TYPE,
++ MRV_BP_COR_TYPE_DIRECT);
++
++ WARN_ON(!(!REG_GET_SLICE(mrv_reg->isp_bp_ctrl,
++ MRV_BP_BP_DET_EN)));
++
++ /* threshold register only used for direct mode */
++ REG_SET_SLICE(isp_bp_cfg1, MRV_BP_HOT_THRES,
++ bp_corr_config->bp_abs_hot_thres);
++ REG_SET_SLICE(isp_bp_cfg1, MRV_BP_DEAD_THRES,
++ bp_corr_config->bp_abs_dead_thres);
++ REG_WRITE(mrv_reg->isp_bp_cfg1, isp_bp_cfg1);
++ REG_SET_SLICE(isp_bp_cfg2, MRV_BP_DEV_HOT_THRES,
++ bp_corr_config->bp_dev_hot_thres);
++ REG_SET_SLICE(isp_bp_cfg2, MRV_BP_DEV_DEAD_THRES,
++ bp_corr_config->bp_dev_dead_thres);
++ REG_WRITE(mrv_reg->isp_bp_cfg2, isp_bp_cfg2);
++ } else {
++ /* use bad pixel table */
++ REG_SET_SLICE(isp_bp_ctrl, MRV_BP_COR_TYPE,
++ MRV_BP_COR_TYPE_TABLE);
++ }
++
++ if (bp_corr_config->bp_corr_rep == CI_ISP_BP_CORR_REP_LIN) {
++ /* use linear approch */
++ REG_SET_SLICE(isp_bp_ctrl, MRV_BP_REP_APPR,
++ MRV_BP_REP_APPR_INTERPOL);
++ } else {
++ /* use best neighbour */
++ REG_SET_SLICE(isp_bp_ctrl, MRV_BP_REP_APPR,
++ MRV_BP_REP_APPR_NEAREST);
++ }
++
++ switch (bp_corr_config->bp_corr_mode) {
++ case CI_ISP_BP_CORR_HOT_EN:
++ /* enable Hot */
++ REG_SET_SLICE(isp_bp_ctrl, MRV_BP_HOT_COR_EN, ENABLE);
++ /* disable Dead */
++ REG_SET_SLICE(isp_bp_ctrl, MRV_BP_DEAD_COR_EN, DISABLE);
++ break;
++ case CI_ISP_BP_CORR_DEAD_EN:
++ /* disable Hot */
++ REG_SET_SLICE(isp_bp_ctrl, MRV_BP_HOT_COR_EN, DISABLE);
++ /* enable Dead */
++ REG_SET_SLICE(isp_bp_ctrl, MRV_BP_DEAD_COR_EN, ENABLE);
++ break;
++ case CI_ISP_BP_CORR_HOT_DEAD_EN:
++ default:
++ /* enable Hot */
++ REG_SET_SLICE(isp_bp_ctrl, MRV_BP_HOT_COR_EN, ENABLE);
++ /* enable Dead */
++ REG_SET_SLICE(isp_bp_ctrl, MRV_BP_DEAD_COR_EN, ENABLE);
++ break;
++ }
++ }
++
++ REG_WRITE(mrv_reg->isp_bp_ctrl, isp_bp_ctrl);
++
++ return CI_STATUS_SUCCESS;
++
++}
++
++/*
++ * Sets the Bad Pixel configuration for detection
++ */
++int ci_isp_set_bp_detection(const struct ci_isp_bp_det_config *bp_det_config)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++
++ if (!bp_det_config) {
++ /* disable measurement module */
++ REG_SET_SLICE(mrv_reg->isp_bp_ctrl, MRV_BP_BP_DET_EN, DISABLE);
++ } else {
++ WARN_ON(!(REG_GET_SLICE(mrv_reg->isp_bp_ctrl, MRV_BP_COR_TYPE)
++ == MRV_BP_COR_TYPE_TABLE));
++
++ /* set dead threshold for bad pixel detection */
++ REG_SET_SLICE(mrv_reg->isp_bp_cfg1, MRV_BP_DEAD_THRES,
++ bp_det_config->bp_dead_thres);
++
++ /* enable measurement module */
++ REG_SET_SLICE(mrv_reg->isp_bp_ctrl, MRV_BP_BP_DET_EN, ENABLE);
++ }
++
++ return CI_STATUS_SUCCESS;
++}
++
++int ci_isp_clear_bp_int(void)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++
++ /* clear bp_det irq (only if it is signalled to prevent loss of irqs) */
++ if (REG_GET_SLICE(mrv_reg->isp_ris, MRV_ISP_RIS_BP_DET))
++ REG_SET_SLICE(mrv_reg->isp_icr, MRV_ISP_ICR_BP_DET, 1);
++
++ return CI_STATUS_SUCCESS;
++}
++
++/*
++ * Initializes Isp filter registers with default reset values.
++ */
++static int ci_isp_initialize_filter_registers(void)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++
++ mrv_reg->isp_filt_mode = 0x00000000;
++ mrv_reg->isp_filt_fac_sh1 = 0x00000010;
++ mrv_reg->isp_filt_fac_sh0 = 0x0000000C;
++ mrv_reg->isp_filt_fac_mid = 0x0000000A;
++ mrv_reg->isp_filt_fac_bl0 = 0x00000006;
++ mrv_reg->isp_filt_fac_bl1 = 0x00000002;
++ mrv_reg->isp_filt_thresh_bl0 = 0x0000000D;
++ mrv_reg->isp_filt_thresh_bl1 = 0x00000005;
++ mrv_reg->isp_filt_thresh_sh0 = 0x0000001A;
++ mrv_reg->isp_filt_thresh_sh1 = 0x0000002C;
++ mrv_reg->isp_filt_lum_weight = 0x00032040;
++
++ return CI_STATUS_SUCCESS;
++}
++
++int ci_isp_activate_filter(int activate_filter)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++ int retval = CI_STATUS_SUCCESS;
++
++ /* Initialize ISP filter control registers first */
++ retval = ci_isp_initialize_filter_registers();
++ if (retval != CI_STATUS_SUCCESS)
++ return retval;
++
++ /* Activate or deactivate filter algorythm */
++ REG_SET_SLICE(mrv_reg->isp_filt_mode, MRV_FILT_FILT_ENABLE,
++ (activate_filter) ? ENABLE : DISABLE);
++
++ return retval;
++}
++
++/*
++ * Write coefficient and threshold values into Isp filter
++ * registers for noise, sharpness and blurring filtering.
++ */
++int ci_isp_set_filter_params(u8 noise_reduc_level, u8 sharp_level)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++ u32 isp_filt_mode = 0;
++
++ if (!REG_GET_SLICE(mrv_reg->isp_filt_mode, MRV_FILT_FILT_ENABLE))
++ return CI_STATUS_CANCELED;
++
++ REG_WRITE(mrv_reg->isp_filt_mode, isp_filt_mode);
++
++ if (((noise_reduc_level <= 10) || (noise_reduc_level == 99))
++ && (sharp_level <= 10)) {
++ switch (noise_reduc_level) {
++ /* Test Mode */
++ case 99:
++ /* 10 bit max value */
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh1,
++ MRV_FILT_FILT_THRESH_SH1, 0x000003FF);
++ /* 10 bit max value */
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh0,
++ MRV_FILT_FILT_THRESH_SH0, 0x000003FF);
++ /* 10 bit max value */
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_bl0,
++ MRV_FILT_FILT_THRESH_BL0, 0x000003FF);
++ /* 10 bit max value */
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_bl1,
++ MRV_FILT_FILT_THRESH_BL1, 0x000003FF);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_STAGE1_SELECT, 0
++ /* MRV_FILT_STAGE1_SELECT_MAX_BLUR */);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_FILT_CHR_V_MODE,
++ MRV_FILT_FILT_CHR_V_MODE_BYPASS);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_FILT_CHR_H_MODE,
++ MRV_FILT_FILT_CHR_H_MODE_BYPASS);
++ break;
++
++ case 0:
++ /* NoiseReductionLevel = 0 */
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh1,
++ MRV_FILT_FILT_THRESH_SH1, 0x000000);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh0,
++ MRV_FILT_FILT_THRESH_SH0, 0x000000);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_bl0,
++ MRV_FILT_FILT_THRESH_BL0, 0x000000);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_bl1,
++ MRV_FILT_FILT_THRESH_BL1, 0x000000);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_STAGE1_SELECT, 6);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_FILT_CHR_V_MODE,
++ MRV_FILT_FILT_CHR_V_MODE_STATIC8);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_FILT_CHR_H_MODE,
++ MRV_FILT_FILT_CHR_H_MODE_BYPASS);
++ break;
++
++ case 1:
++ /* NoiseReductionLevel = 1; */
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh1,
++ MRV_FILT_FILT_THRESH_SH1, 33);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh0,
++ MRV_FILT_FILT_THRESH_SH0, 18);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_bl0,
++ MRV_FILT_FILT_THRESH_BL0, 8);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_bl1,
++ MRV_FILT_FILT_THRESH_BL1, 2);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_STAGE1_SELECT, 6);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_FILT_CHR_V_MODE,
++ MRV_FILT_FILT_CHR_V_MODE_STATIC12);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_FILT_CHR_H_MODE,
++ MRV_FILT_FILT_CHR_H_MODE_DYN_2);
++ break;
++
++ case 2:
++ /* NoiseReductionLevel = 2; */
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh1,
++ MRV_FILT_FILT_THRESH_SH1, 44);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh0,
++ MRV_FILT_FILT_THRESH_SH0, 26);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_bl0,
++ MRV_FILT_FILT_THRESH_BL0, 13);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_bl1,
++ MRV_FILT_FILT_THRESH_BL1, 5);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_STAGE1_SELECT, 4
++ /* MRV_FILT_STAGE1_SELECT_DEFAULT */);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_FILT_CHR_V_MODE,
++ MRV_FILT_FILT_CHR_V_MODE_STATIC12);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_FILT_CHR_H_MODE,
++ MRV_FILT_FILT_CHR_H_MODE_DYN_2);
++ break;
++
++ case 3:
++ /* NoiseReductionLevel = 3; */
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh1,
++ MRV_FILT_FILT_THRESH_SH1, 51);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh0,
++ MRV_FILT_FILT_THRESH_SH0, 36);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_bl0,
++ MRV_FILT_FILT_THRESH_BL0, 23);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_bl1,
++ MRV_FILT_FILT_THRESH_BL1, 10);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_STAGE1_SELECT, 4
++ /* MRV_FILT_STAGE1_SELECT_DEFAULT */);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_FILT_CHR_V_MODE,
++ MRV_FILT_FILT_CHR_V_MODE_STATIC12);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_FILT_CHR_H_MODE,
++ MRV_FILT_FILT_CHR_H_MODE_DYN_2);
++ break;
++
++ case 4:
++ /* NoiseReductionLevel = 4; */
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh1,
++ MRV_FILT_FILT_THRESH_SH1, 67);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh0,
++ MRV_FILT_FILT_THRESH_SH0, 41);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_bl0,
++ MRV_FILT_FILT_THRESH_BL0, 26);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_bl1,
++ MRV_FILT_FILT_THRESH_BL1, 15);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_STAGE1_SELECT, 3);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_FILT_CHR_V_MODE,
++ MRV_FILT_FILT_CHR_V_MODE_STATIC12);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_FILT_CHR_H_MODE,
++ MRV_FILT_FILT_CHR_H_MODE_DYN_2);
++ break;
++
++ case 5:
++ /* NoiseReductionLevel = 5; */
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh1,
++ MRV_FILT_FILT_THRESH_SH1, 100);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh0,
++ MRV_FILT_FILT_THRESH_SH0, 75);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_bl0,
++ MRV_FILT_FILT_THRESH_BL0, 50);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_bl1,
++ MRV_FILT_FILT_THRESH_BL1, 20);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_STAGE1_SELECT, 3);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_FILT_CHR_V_MODE,
++ MRV_FILT_FILT_CHR_V_MODE_STATIC12);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_FILT_CHR_H_MODE,
++ MRV_FILT_FILT_CHR_H_MODE_DYN_2);
++ break;
++
++ case 6:
++ /* NoiseReductionLevel = 6; */
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh1,
++ MRV_FILT_FILT_THRESH_SH1, 120);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh0,
++ MRV_FILT_FILT_THRESH_SH0, 90);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_bl0,
++ MRV_FILT_FILT_THRESH_BL0, 60);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_bl1,
++ MRV_FILT_FILT_THRESH_BL1, 26);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_STAGE1_SELECT, 2);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_FILT_CHR_V_MODE,
++ MRV_FILT_FILT_CHR_V_MODE_STATIC12);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_FILT_CHR_H_MODE,
++ MRV_FILT_FILT_CHR_H_MODE_DYN_2);
++ break;
++
++ case 7:
++ /* NoiseReductionLevel = 7; */
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh1,
++ MRV_FILT_FILT_THRESH_SH1, 150);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh0,
++ MRV_FILT_FILT_THRESH_SH0, 120);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_bl0,
++ MRV_FILT_FILT_THRESH_BL0, 80);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_bl1,
++ MRV_FILT_FILT_THRESH_BL1, 51);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_STAGE1_SELECT, 2);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_FILT_CHR_V_MODE,
++ MRV_FILT_FILT_CHR_V_MODE_STATIC12);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_FILT_CHR_H_MODE,
++ MRV_FILT_FILT_CHR_H_MODE_DYN_2);
++ break;
++
++ case 8:
++ /* NoiseReductionLevel = 8; */
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh1,
++ MRV_FILT_FILT_THRESH_SH1, 200);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh0,
++ MRV_FILT_FILT_THRESH_SH0, 170);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_bl0,
++ MRV_FILT_FILT_THRESH_BL0, 140);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_bl1,
++ MRV_FILT_FILT_THRESH_BL1, 100);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_STAGE1_SELECT, 2);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_FILT_CHR_V_MODE,
++ MRV_FILT_FILT_CHR_V_MODE_STATIC12);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_FILT_CHR_H_MODE,
++ MRV_FILT_FILT_CHR_H_MODE_DYN_2);
++ break;
++
++ case 9:
++ /* NoiseReductionLevel = 9; */
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh1,
++ MRV_FILT_FILT_THRESH_SH1, 300);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh0,
++ MRV_FILT_FILT_THRESH_SH0, 250);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_bl0,
++ MRV_FILT_FILT_THRESH_BL0, 180);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_bl1,
++ MRV_FILT_FILT_THRESH_BL1, 150);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_STAGE1_SELECT,
++ (sharp_level > 3) ? 2 : 1);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_FILT_CHR_V_MODE,
++ MRV_FILT_FILT_CHR_V_MODE_STATIC12);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_FILT_CHR_H_MODE,
++ MRV_FILT_FILT_CHR_H_MODE_DYN_2);
++ break;
++
++ case 10:
++ /* NoiseReductionLevel = 10; extrem noise */
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh1,
++ MRV_FILT_FILT_THRESH_SH1, 1023);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh0,
++ MRV_FILT_FILT_THRESH_SH0, 1023);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_bl0,
++ MRV_FILT_FILT_THRESH_BL0, 1023);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_bl1,
++ MRV_FILT_FILT_THRESH_BL1, 1023);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_STAGE1_SELECT,
++ (sharp_level > 5) ? 2 :
++ ((sharp_level > 3) ? 1 : 0));
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_FILT_CHR_V_MODE,
++ MRV_FILT_FILT_CHR_V_MODE_STATIC12);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_FILT_CHR_H_MODE,
++ MRV_FILT_FILT_CHR_H_MODE_DYN_2);
++ break;
++
++ default:
++ return CI_STATUS_OUTOFRANGE;
++ }
++
++ switch (sharp_level) {
++ /* SharpLevel = 0; no sharp enhancement */
++ case 0:
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_sh1,
++ MRV_FILT_FILT_FAC_SH1, 0x00000004);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_sh0,
++ MRV_FILT_FILT_FAC_SH0, 0x00000004);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_mid,
++ MRV_FILT_FILT_FAC_MID, 0x00000004);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_bl0,
++ MRV_FILT_FILT_FAC_BL0, 0x00000002);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_bl1,
++ MRV_FILT_FILT_FAC_BL1, 0x00000000);
++ break;
++
++ /* SharpLevel = 1; */
++ case 1:
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_sh1,
++ MRV_FILT_FILT_FAC_SH1, 0x00000008);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_sh0,
++ MRV_FILT_FILT_FAC_SH0, 0x00000007);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_mid,
++ MRV_FILT_FILT_FAC_MID, 0x00000006);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_bl0,
++ MRV_FILT_FILT_FAC_BL0, 0x00000002);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_bl1,
++ MRV_FILT_FILT_FAC_BL1, 0x00000000);
++ break;
++
++ /* SharpLevel = 2; */
++ case 2:
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_sh1,
++ MRV_FILT_FILT_FAC_SH1, 0x0000000C);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_sh0,
++ MRV_FILT_FILT_FAC_SH0, 0x0000000A);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_mid,
++ MRV_FILT_FILT_FAC_MID, 0x00000008);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_bl0,
++ MRV_FILT_FILT_FAC_BL0, 0x00000004);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_bl1,
++ MRV_FILT_FILT_FAC_BL1, 0x00000000);
++ break;
++
++ /* SharpLevel = 3; */
++ case 3:
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_sh1,
++ MRV_FILT_FILT_FAC_SH1, 0x00000010);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_sh0,
++ MRV_FILT_FILT_FAC_SH0, 0x0000000C);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_mid,
++ MRV_FILT_FILT_FAC_MID, 0x0000000A);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_bl0,
++ MRV_FILT_FILT_FAC_BL0, 0x00000006);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_bl1,
++ MRV_FILT_FILT_FAC_BL1, 0x00000002);
++ break;
++
++ /* SharpLevel = 4; */
++ case 4:
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_sh1,
++ MRV_FILT_FILT_FAC_SH1, 0x00000016);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_sh0,
++ MRV_FILT_FILT_FAC_SH0, 0x00000010);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_mid,
++ MRV_FILT_FILT_FAC_MID, 0x0000000C);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_bl0,
++ MRV_FILT_FILT_FAC_BL0, 0x00000008);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_bl1,
++ MRV_FILT_FILT_FAC_BL1, 0x00000004);
++ break;
++
++ /* SharpLevel = 5; */
++ case 5:
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_sh1,
++ MRV_FILT_FILT_FAC_SH1, 0x0000001B);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_sh0,
++ MRV_FILT_FILT_FAC_SH0, 0x00000014);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_mid,
++ MRV_FILT_FILT_FAC_MID, 0x00000010);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_bl0,
++ MRV_FILT_FILT_FAC_BL0, 0x0000000A);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_bl1,
++ MRV_FILT_FILT_FAC_BL1, 0x00000004);
++ break;
++
++ /* SharpLevel = 6; */
++ case 6:
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_sh1,
++ MRV_FILT_FILT_FAC_SH1, 0x00000020);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_sh0,
++ MRV_FILT_FILT_FAC_SH0, 0x0000001A);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_mid,
++ MRV_FILT_FILT_FAC_MID, 0x00000013);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_bl0,
++ MRV_FILT_FILT_FAC_BL0, 0x0000000C);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_bl1,
++ MRV_FILT_FILT_FAC_BL1, 0x00000006);
++ break;
++
++ /* SharpLevel = 7; */
++ case 7:
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_sh1,
++ MRV_FILT_FILT_FAC_SH1, 0x00000026);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_sh0,
++ MRV_FILT_FILT_FAC_SH0, 0x0000001E);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_mid,
++ MRV_FILT_FILT_FAC_MID, 0x00000017);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_bl0,
++ MRV_FILT_FILT_FAC_BL0, 0x00000010);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_bl1,
++ MRV_FILT_FILT_FAC_BL1, 0x00000008);
++ break;
++
++ /* SharpLevel = 8; */
++ case 8:
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh0,
++ MRV_FILT_FILT_THRESH_SH0, 0x00000013);
++ if (REG_GET_SLICE(mrv_reg->isp_filt_thresh_sh1,
++ MRV_FILT_FILT_THRESH_SH1) > 0x0000008A) {
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh1,
++ MRV_FILT_FILT_THRESH_SH1,
++ 0x0000008A);
++ }
++ /* 43 */
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_sh1,
++ MRV_FILT_FILT_FAC_SH1, 0x0000002C);
++ /* 36 */
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_sh0,
++ MRV_FILT_FILT_FAC_SH0, 0x00000024);
++ /* 29 */
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_mid,
++ MRV_FILT_FILT_FAC_MID, 0x0000001D);
++ /* 21 */
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_bl0,
++ MRV_FILT_FILT_FAC_BL0, 0x00000015);
++ /* 14 */
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_bl1,
++ MRV_FILT_FILT_FAC_BL1, 0x0000000D);
++ break;
++
++ /* SharpLevel = 9; */
++ case 9:
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh0,
++ MRV_FILT_FILT_THRESH_SH0, 0x00000013);
++ if (REG_GET_SLICE(mrv_reg->isp_filt_thresh_sh1,
++ MRV_FILT_FILT_THRESH_SH1) > 0x0000008A) {
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh1,
++ MRV_FILT_FILT_THRESH_SH1,
++ 0x0000008A);
++ }
++ /* 48 */
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_sh1,
++ MRV_FILT_FILT_FAC_SH1, 0x00000030);
++ /* 42 */
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_sh0,
++ MRV_FILT_FILT_FAC_SH0, 0x0000002A);
++ /* 34 */
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_mid,
++ MRV_FILT_FILT_FAC_MID, 0x00000022);
++ /* 26 */
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_bl0,
++ MRV_FILT_FILT_FAC_BL0, 0x0000001A);
++ /* 20 */
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_bl1,
++ MRV_FILT_FILT_FAC_BL1, 0x00000014);
++ break;
++
++ /* SharpLevel = 10; */
++ case 10:
++ /* REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh0,
++ * MRV_FILT_FILT_THRESH_SH0, 0x00000013); */
++ /* if (REG_GET_SLICE(mrv_reg->isp_filt_thresh_sh1,
++ * MRV_FILT_FILT_THRESH_SH1) > 0x0000008A) */
++ /* { */
++ /* REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh1,
++ * MRV_FILT_FILT_THRESH_SH1, 0x0000008A); */
++ /* } */
++
++ /* 63 */
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_sh1,
++ MRV_FILT_FILT_FAC_SH1, 0x0000003F);
++ /* 48 */
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_sh0,
++ MRV_FILT_FILT_FAC_SH0, 0x00000030);
++ /* 40 */
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_mid,
++ MRV_FILT_FILT_FAC_MID, 0x00000028);
++ /* 36 */
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_bl0,
++ MRV_FILT_FILT_FAC_BL0, 0x00000024);
++ /* 32 */
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_bl1,
++ MRV_FILT_FILT_FAC_BL1, 0x00000020);
++ break;
++
++ default:
++ return CI_STATUS_OUTOFRANGE;
++ }
++
++ if (noise_reduc_level > 7) {
++ if (sharp_level > 7) {
++ u32 filt_fac_bl0 = REG_GET_SLICE
++ (mrv_reg->isp_filt_fac_bl0,
++ MRV_FILT_FILT_FAC_BL0);
++ u32 filt_fac_bl1 =
++ REG_GET_SLICE(mrv_reg->isp_filt_fac_bl1,
++ MRV_FILT_FILT_FAC_BL1);
++ /* * 0.50 */
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_bl0,
++ MRV_FILT_FILT_FAC_BL0,
++ (filt_fac_bl0) >> 1);
++ /* * 0.25 */
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_bl1,
++ MRV_FILT_FILT_FAC_BL1,
++ (filt_fac_bl1) >> 2);
++ } else if (sharp_level > 4) {
++ u32 filt_fac_bl0 =
++ REG_GET_SLICE(mrv_reg->isp_filt_fac_bl0,
++ MRV_FILT_FILT_FAC_BL0);
++ u32 filt_fac_bl1 =
++ REG_GET_SLICE(mrv_reg->
++ isp_filt_fac_bl1,
++ MRV_FILT_FILT_FAC_BL1);
++ /* * 0.75 */
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_bl0,
++ MRV_FILT_FILT_FAC_BL0,
++ (filt_fac_bl0 * 3) >> 2);
++ /* * 0.50 */
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_bl1,
++ MRV_FILT_FILT_FAC_BL1,
++ (filt_fac_bl1) >> 1);
++ }
++ }
++
++ /* Set ISP filter mode register values */
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_FILT_MODE,
++ MRV_FILT_FILT_MODE_DYNAMIC);
++
++ /* enable filter */
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_FILT_ENABLE, ENABLE);
++ REG_WRITE(mrv_reg->isp_filt_mode, isp_filt_mode);
++
++ return CI_STATUS_SUCCESS;
++ } else {
++ /* At least one function parameter is out of range */
++ return CI_STATUS_OUTOFRANGE;
++ }
++}
++
++int ci_isp_meas_exposure_initialize_module(void)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++
++ REG_SET_SLICE(mrv_reg->isp_exp_h_size, MRV_AE_ISP_EXP_H_SIZE, 0);
++ REG_SET_SLICE(mrv_reg->isp_exp_v_size, MRV_AE_ISP_EXP_V_SIZE, 0);
++ REG_SET_SLICE(mrv_reg->isp_exp_h_offset, MRV_AE_ISP_EXP_H_OFFSET, 0);
++ REG_SET_SLICE(mrv_reg->isp_exp_v_offset, MRV_AE_ISP_EXP_V_OFFSET, 0);
++
++ return CI_STATUS_SUCCESS;
++
++}
++
++/*
++ * Configures the exposure measurement module.
++ */
++int ci_isp_meas_exposure_set_config(const struct ci_isp_window *wnd,
++ const struct ci_isp_exp_ctrl *isp_exp_ctrl)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++
++ if (!wnd) {
++ /* stop loop if running */
++ REG_SET_SLICE(mrv_reg->isp_exp_ctrl, MRV_AE_AUTOSTOP, ON);
++ /* required? */
++ REG_SET_SLICE(mrv_reg->isp_exp_ctrl, MRV_AE_EXP_START, OFF);
++ return CI_STATUS_SUCCESS;
++ }
++
++ /* range check */
++ if ((wnd->hoffs > MRV_AE_ISP_EXP_H_OFFSET_MAX)
++ || (wnd->hsize > MRV_AE_ISP_EXP_H_SIZE_MAX)
++ || (wnd->voffs > MRV_AE_ISP_EXP_V_OFFSET_MAX)
++ || (wnd->vsize > MRV_AE_ISP_EXP_V_SIZE_MAX)
++ || (wnd->vsize & ~MRV_AE_ISP_EXP_V_SIZE_VALID_MASK))
++ return CI_STATUS_OUTOFRANGE;
++
++ /* configure measurement windows */
++ REG_SET_SLICE(mrv_reg->isp_exp_h_size, MRV_AE_ISP_EXP_H_SIZE,
++ wnd->hsize);
++ REG_SET_SLICE(mrv_reg->isp_exp_v_size, MRV_AE_ISP_EXP_V_SIZE,
++ wnd->vsize);
++ REG_SET_SLICE(mrv_reg->isp_exp_h_offset, MRV_AE_ISP_EXP_H_OFFSET,
++ wnd->hoffs);
++ REG_SET_SLICE(mrv_reg->isp_exp_v_offset, MRV_AE_ISP_EXP_V_OFFSET,
++ wnd->voffs);
++
++ /* set exposure measurement mode */
++ REG_SET_SLICE(mrv_reg->isp_exp_ctrl, MRV_AE_EXP_MEAS_MODE,
++ (isp_exp_ctrl->exp_meas_mode) ? ON : OFF);
++
++ /* set or clear AE autostop bit */
++ REG_SET_SLICE(mrv_reg->isp_exp_ctrl, MRV_AE_AUTOSTOP,
++ (isp_exp_ctrl->auto_stop) ? ON : OFF);
++ REG_SET_SLICE(mrv_reg->isp_exp_ctrl, MRV_AE_EXP_START,
++ (isp_exp_ctrl->exp_start) ? ON : OFF);
++
++ return CI_STATUS_SUCCESS;
++}
++
++/*
++ * Programs the given gamma curve for the input gamma
++ * block. Enables or disables gamma processing for the
++ * input gamma block.
++ */
++void ci_isp_set_gamma(const struct ci_sensor_gamma_curve *r,
++ const struct ci_sensor_gamma_curve *g,
++ const struct ci_sensor_gamma_curve *b)
++{
++ struct isp_register *mrv_reg = (struct isp_register *)MEM_MRV_REG_BASE;
++ /* values stored as 16bit - use MSBs if cambuswidth is smaller */
++ const u8 shift_val = 16 - MARVIN_FEATURE_CAMBUSWIDTH;
++ /* used to round up values */
++ const u16 round_ofs = 0 << (shift_val - 1);
++ s32 i;
++
++ if (r) {
++
++ /*
++ * Note: gamma curve increments are already register conform,
++ * so REG_WRITE is used instead of REG_SET_SLICE
++ */
++
++ /*
++ * better would be split into 16 separate values to be
++ * register independant
++ */
++
++ /* gamma curve dx1..dx16 increments (each nibble of */
++ REG_WRITE(mrv_reg->isp_gamma_dx_lo, r->gamma_dx0);
++ /* the 32bit-values hold 3 valid bits, see register) */
++ REG_WRITE(mrv_reg->isp_gamma_dx_hi, r->gamma_dx1);
++
++ for (i = 0; i < MRV_ISP_GAMMA_R_Y_ARR_SIZE; i++) {
++ REG_SET_SLICE(mrv_reg->isp_gamma_r_y[i],
++ MRV_ISP_GAMMA_R_Y,
++ (r->isp_gamma_y[i] + round_ofs) >> shift_val);
++ REG_SET_SLICE(mrv_reg->isp_gamma_g_y[i],
++ MRV_ISP_GAMMA_G_Y,
++ (g->isp_gamma_y[i] + round_ofs) >> shift_val);
++ REG_SET_SLICE(mrv_reg->isp_gamma_b_y[i],
++ MRV_ISP_GAMMA_B_Y,
++ (b->isp_gamma_y[i] + round_ofs) >> shift_val);
++ }
++
++ REG_SET_SLICE(mrv_reg->isp_ctrl,
++ MRV_ISP_ISP_GAMMA_IN_ENABLE, ENABLE);
++ } else {
++ REG_SET_SLICE(mrv_reg->isp_ctrl,
++ MRV_ISP_ISP_GAMMA_IN_ENABLE, DISABLE);
++ }
++}
++
++/*
++ * Programs the given gamma curve for the output gamma
++ * block. Enables or disables gamma processing for the
++ * output gamma block.
++ */
++void ci_isp_set_gamma2(const struct ci_isp_gamma_out_curve *gamma)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++ s32 i;
++
++ if (gamma) {
++ WARN_ON(!(MRV_ISP_GAMMA_OUT_Y_ARR_SIZE ==
++ CI_ISP_GAMMA_OUT_CURVE_ARR_SIZE));
++
++ for (i = 0; i < MRV_ISP_GAMMA_OUT_Y_ARR_SIZE; i++) {
++ REG_SET_SLICE(mrv_reg->isp_gamma_out_y[i],
++ MRV_ISP_ISP_GAMMA_OUT_Y,
++ gamma->isp_gamma_y[i]);
++ }
++
++ /* gamma curve linear or log */
++ REG_SET_SLICE(mrv_reg->isp_gamma_out_mode, MRV_ISP_EQU_SEGM,
++ gamma->gamma_segmentation);
++ REG_SET_SLICE(mrv_reg->isp_ctrl, MRV_ISP_ISP_GAMMA_OUT_ENABLE,
++ ENABLE);
++ } else {
++ REG_SET_SLICE(mrv_reg->isp_ctrl,
++ MRV_ISP_ISP_GAMMA_OUT_ENABLE, DISABLE);
++ }
++
++}
+--- /dev/null
++++ b/drivers/staging/mrstci/mrstisp/mrstisp_jpe.c
+@@ -0,0 +1,577 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * Copyright (c) Silicon Image 2008 www.siliconimage.com
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#include "mrstisp_stdinc.h"
++
++int ci_isp_jpe_init_ex(u16 hsize, u16 vsize, u8 compression_ratio, u8 jpe_scale)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++
++ /*
++ * Reset JPEG-Encoder. In contrast to other software resets
++ * this triggers the modules asynchronous reset resulting
++ * in loss of all data.
++ */
++
++ REG_SET_SLICE(mrv_reg->vi_ircl, MRV_VI_JPEG_SOFT_RST, ON);
++ REG_SET_SLICE(mrv_reg->vi_ircl, MRV_VI_JPEG_SOFT_RST, OFF);
++
++ /* set configuration for the Jpeg capturing */
++ ci_isp_jpe_set_config(hsize, vsize, jpe_scale);
++
++ /*
++ * Sleep a while before setting up tables because of the 400
++ * clock cycles required to initialize the table RAM after a
++ * reset was issued. On FPGA we are running with only 30MHz,
++ * so at least 13us are required.
++ */
++
++
++ /*
++ * Note: this func is called when holding spin lock,
++ * so can not change to msleep.
++ */
++ mdelay(15);
++
++ /* program tables */
++ ci_isp_jpe_set_tables(compression_ratio);
++
++ /* choose tables */
++ ci_isp_jpe_select_tables();
++
++ return CI_STATUS_SUCCESS;
++}
++
++/*
++ * initialization of JPEG encoder
++ */
++int ci_isp_jpe_init(u32 resolution, u8 compression_ratio, int jpe_scale)
++{
++ u16 hsize = 0;
++ u16 vsize = 0;
++
++ switch (resolution) {
++ case SENSOR_RES_BP1:
++ /* 352; */
++ hsize = BP1_SIZE_H;
++ /* 240; */
++ vsize = BP1_SIZE_V;
++ break;
++ case SENSOR_RES_S_AFM:
++ /* 64; */
++ hsize = S_AFM_SIZE_H;
++ /* 32; */
++ vsize = S_AFM_SIZE_V;
++ break;
++ case SENSOR_RES_M_AFM:
++ /* 128; */
++ hsize = M_AFM_SIZE_H;
++ /* 96; */
++ vsize = M_AFM_SIZE_V;
++ break;
++ case SENSOR_RES_L_AFM:
++ /* 720; */
++ hsize = L_AFM_SIZE_H;
++ /* 480; */
++ vsize = L_AFM_SIZE_V;
++ break;
++ case SENSOR_RES_QQCIF:
++ /* 88; */
++ hsize = QQCIF_SIZE_H;
++ /* 72; */
++ vsize = QQCIF_SIZE_V;
++ break;
++ case SENSOR_RES_QQVGA:
++ /* 160; */
++ hsize = QQVGA_SIZE_H;
++ /* 120; */
++ vsize = QQVGA_SIZE_V;
++ break;
++ case SENSOR_RES_QCIF:
++ /* 176; */
++ hsize = QCIF_SIZE_H;
++ /* 144; */
++ vsize = QCIF_SIZE_V;
++ break;
++ case SENSOR_RES_QVGA:
++ /* 320; */
++ hsize = QVGA_SIZE_H;
++ /* 240; */
++ vsize = QVGA_SIZE_V;
++ break;
++ case SENSOR_RES_CIF:
++ /* 352; */
++ hsize = CIF_SIZE_H;
++ /* 288; */
++ vsize = CIF_SIZE_V;
++ break;
++ case SENSOR_RES_VGA:
++ /* 640; */
++ hsize = VGA_SIZE_H;
++ /* 480; */
++ vsize = VGA_SIZE_V;
++ break;
++ case SENSOR_RES_SVGA:
++ /* 800; */
++ hsize = SVGA_SIZE_H;
++ /* 600; */
++ vsize = SVGA_SIZE_V;
++ break;
++ case SENSOR_RES_XGA:
++ /* 1024; */
++ hsize = XGA_SIZE_H;
++ /* 768; */
++ vsize = XGA_SIZE_V;
++ break;
++ case SENSOR_RES_XGA_PLUS:
++ /* 1280; */
++ hsize = XGA_PLUS_SIZE_H;
++ /* 960; */
++ vsize = XGA_PLUS_SIZE_V;
++ break;
++ case SENSOR_RES_SXGA:
++ /* 1280; */
++ hsize = SXGA_SIZE_H;
++ /* 1024; */
++ vsize = SXGA_SIZE_V;
++ break;
++ case SENSOR_RES_UXGA:
++ /* 1600; */
++ hsize = UXGA_SIZE_H;
++ /* 1200; */
++ vsize = UXGA_SIZE_V;
++ break;
++ case SENSOR_RES_QXGA:
++ /* 2048; */
++ hsize = QXGA_SIZE_H;
++ /* 1536; */
++ vsize = QXGA_SIZE_V;
++ break;
++ case SENSOR_RES_QSXGA:
++ /* 2586; */
++ hsize = QSXGA_SIZE_H;
++ /* 2048; */
++ vsize = QSXGA_SIZE_V;
++ break;
++ case SENSOR_RES_QSXGA_PLUS:
++ /* 2600; */
++ hsize = QSXGA_PLUS_SIZE_H;
++ /* 2048; */
++ vsize = QSXGA_PLUS_SIZE_V;
++ break;
++ case SENSOR_RES_QSXGA_PLUS2:
++ /* 2600; */
++ hsize = QSXGA_PLUS2_SIZE_H;
++ /* 1950; */
++ vsize = QSXGA_PLUS2_SIZE_V;
++ break;
++ case SENSOR_RES_QSXGA_PLUS3:
++ /* 2686; */
++ hsize = QSXGA_PLUS3_SIZE_H;
++ /* 2048; */
++ vsize = QSXGA_PLUS3_SIZE_V;
++ break;
++ case SENSOR_RES_WQSXGA:
++ /* 3200 */
++ hsize = WQSXGA_SIZE_H;
++ /* 2048 */
++ vsize = WQSXGA_SIZE_V;
++ break;
++ case SENSOR_RES_QUXGA:
++ /* 3200 */
++ hsize = QUXGA_SIZE_H;
++ /* 2400 */
++ vsize = QUXGA_SIZE_V;
++ break;
++ case SENSOR_RES_WQUXGA:
++ /* 3840 */
++ hsize = WQUXGA_SIZE_H;
++ /* 2400 */
++ vsize = WQUXGA_SIZE_V;
++ break;
++ case SENSOR_RES_HXGA:
++ /* 4096 */
++ hsize = HXGA_SIZE_H;
++ /* 3072 */
++ vsize = HXGA_SIZE_V;
++ break;
++ default:
++ eprintk("resolution not supported");
++ return CI_STATUS_NOTSUPP;
++ }
++
++ return ci_isp_jpe_init_ex(hsize, vsize, compression_ratio, jpe_scale);
++}
++
++void ci_isp_jpe_set_tables(u8 compression_ratio)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++ /* required because auto-increment register */
++ u32 jpe_table_data = 0;
++
++ u8 idx, size;
++ const u8 *yqtable = NULL;
++ const u8 *uvqtable = NULL;
++
++ switch (compression_ratio) {
++ case CI_ISP_JPEG_LOW_COMPRESSION:
++ yqtable = ci_isp_yq_table_low_comp1;
++ uvqtable = ci_isp_uv_qtable_low_comp1;
++ break;
++ case CI_ISP_JPEG_01_PERCENT:
++ yqtable = ci_isp_yq_table01_per_cent;
++ uvqtable = ci_isp_uv_qtable01_per_cent;
++ break;
++ case CI_ISP_JPEG_20_PERCENT:
++ yqtable = ci_isp_yq_table20_per_cent;
++ uvqtable = ci_isp_uv_qtable20_per_cent;
++ break;
++ case CI_ISP_JPEG_30_PERCENT:
++ yqtable = ci_isp_yq_table30_per_cent;
++ uvqtable = ci_isp_uv_qtable30_per_cent;
++ break;
++ case CI_ISP_JPEG_40_PERCENT:
++ yqtable = ci_isp_yq_table40_per_cent;
++ uvqtable = ci_isp_uv_qtable40_per_cent;
++ break;
++ case CI_ISP_JPEG_50_PERCENT:
++ yqtable = ci_isp_yq_table50_per_cent;
++ uvqtable = ci_isp_uv_qtable50_per_cent;
++ break;
++ case CI_ISP_JPEG_60_PERCENT:
++ yqtable = ci_isp_yq_table60_per_cent;
++ uvqtable = ci_isp_uv_qtable60_per_cent;
++ break;
++ case CI_ISP_JPEG_70_PERCENT:
++ yqtable = ci_isp_yq_table70_per_cent;
++ uvqtable = ci_isp_uv_qtable70_per_cent;
++ break;
++ case CI_ISP_JPEG_80_PERCENT:
++ yqtable = ci_isp_yq_table80_per_cent;
++ uvqtable = ci_isp_uv_qtable80_per_cent;
++ break;
++ case CI_ISP_JPEG_90_PERCENT:
++ yqtable = ci_isp_yq_table90_per_cent;
++ uvqtable = ci_isp_uv_qtable90_per_cent;
++ break;
++ case CI_ISP_JPEG_99_PERCENT:
++ yqtable = ci_isp_yq_table99_per_cent;
++ uvqtable = ci_isp_uv_qtable99_per_cent;
++ break;
++ case CI_ISP_JPEG_HIGH_COMPRESSION:
++ default:
++ /*
++ * in the case an unknown value is set,
++ * use CI_JPEG_HIGH_COMPRESSION
++ */
++ yqtable = ci_isp_yq_table75_per_cent;
++ uvqtable = ci_isp_uv_qtable75_per_cent;
++ break;
++ }
++
++ /* Y q-table 0 programming */
++
++ /* all possible assigned tables have same size */
++ size = sizeof(ci_isp_yq_table75_per_cent)/
++ sizeof(ci_isp_yq_table75_per_cent[0]);
++ REG_SET_SLICE(mrv_reg->jpe_table_id, MRV_JPE_TABLE_ID,
++ MRV_JPE_TABLE_ID_QUANT0);
++ for (idx = 0; idx < (size - 1); idx += 2) {
++ REG_SET_SLICE(jpe_table_data, MRV_JPE_TABLE_WDATA_H,
++ yqtable[idx]);
++ REG_SET_SLICE(jpe_table_data, MRV_JPE_TABLE_WDATA_L,
++ yqtable[idx + 1]);
++ /* auto-increment register! */
++ REG_WRITE(mrv_reg->jpe_table_data, jpe_table_data);
++ }
++
++ /* U/V q-table 0 programming */
++
++ /* all possible assigned tables have same size */
++ size = sizeof(ci_isp_uv_qtable75_per_cent) /
++ sizeof(ci_isp_uv_qtable75_per_cent[0]);
++ REG_SET_SLICE(mrv_reg->jpe_table_id, MRV_JPE_TABLE_ID,
++ MRV_JPE_TABLE_ID_QUANT1);
++ for (idx = 0; idx < (size - 1); idx += 2) {
++ REG_SET_SLICE(jpe_table_data, MRV_JPE_TABLE_WDATA_H,
++ uvqtable[idx]);
++ REG_SET_SLICE(jpe_table_data, MRV_JPE_TABLE_WDATA_L,
++ uvqtable[idx + 1]);
++ /* auto-increment register! */
++ REG_WRITE(mrv_reg->jpe_table_data, jpe_table_data);
++ }
++
++ /* Y AC-table 0 programming */
++
++ size = sizeof(ci_isp_ac_luma_table_annex_k) /
++ sizeof(ci_isp_ac_luma_table_annex_k[0]);
++ REG_SET_SLICE(mrv_reg->jpe_table_id, MRV_JPE_TABLE_ID,
++ MRV_JPE_TABLE_ID_VLC_AC0);
++ REG_SET_SLICE(mrv_reg->jpe_tac0_len, MRV_JPE_TAC0_LEN, size);
++ for (idx = 0; idx < (size - 1); idx += 2) {
++ REG_SET_SLICE(jpe_table_data, MRV_JPE_TABLE_WDATA_H,
++ ci_isp_ac_luma_table_annex_k[idx]);
++ REG_SET_SLICE(jpe_table_data, MRV_JPE_TABLE_WDATA_L,
++ ci_isp_ac_luma_table_annex_k[idx + 1]);
++ /* auto-increment register! */
++ REG_WRITE(mrv_reg->jpe_table_data, jpe_table_data);
++ }
++
++ /* U/V AC-table 1 programming */
++
++ size = sizeof(ci_isp_ac_chroma_table_annex_k) /
++ sizeof(ci_isp_ac_chroma_table_annex_k[0]);
++ REG_SET_SLICE(mrv_reg->jpe_table_id, MRV_JPE_TABLE_ID,
++ MRV_JPE_TABLE_ID_VLC_AC1);
++ REG_SET_SLICE(mrv_reg->jpe_tac1_len, MRV_JPE_TAC1_LEN, size);
++ for (idx = 0; idx < (size - 1); idx += 2) {
++ REG_SET_SLICE(jpe_table_data, MRV_JPE_TABLE_WDATA_H,
++ ci_isp_ac_chroma_table_annex_k[idx]);
++ REG_SET_SLICE(jpe_table_data, MRV_JPE_TABLE_WDATA_L,
++ ci_isp_ac_chroma_table_annex_k[idx + 1]);
++ /* auto-increment register! */
++ REG_WRITE(mrv_reg->jpe_table_data, jpe_table_data);
++ }
++
++ /* Y DC-table 0 programming */
++
++ size = sizeof(ci_isp_dc_luma_table_annex_k) /
++ sizeof(ci_isp_dc_luma_table_annex_k[0]);
++ REG_SET_SLICE(mrv_reg->jpe_table_id, MRV_JPE_TABLE_ID,
++ MRV_JPE_TABLE_ID_VLC_DC0);
++ REG_SET_SLICE(mrv_reg->jpe_tdc0_len, MRV_JPE_TDC0_LEN, size);
++ for (idx = 0; idx < (size - 1); idx += 2) {
++ REG_SET_SLICE(jpe_table_data, MRV_JPE_TABLE_WDATA_H,
++ ci_isp_dc_luma_table_annex_k[idx]);
++ REG_SET_SLICE(jpe_table_data, MRV_JPE_TABLE_WDATA_L,
++ ci_isp_dc_luma_table_annex_k[idx + 1]);
++ /* auto-increment register! */
++ REG_WRITE(mrv_reg->jpe_table_data, jpe_table_data);
++ }
++
++ /* U/V DC-table 1 programming */
++
++ size = sizeof(ci_isp_dc_chroma_table_annex_k) /
++ sizeof(ci_isp_dc_chroma_table_annex_k[0]);
++ REG_SET_SLICE(mrv_reg->jpe_table_id, MRV_JPE_TABLE_ID,
++ MRV_JPE_TABLE_ID_VLC_DC1);
++ REG_SET_SLICE(mrv_reg->jpe_tdc1_len, MRV_JPE_TDC1_LEN, size);
++ for (idx = 0; idx < (size - 1); idx += 2) {
++ REG_SET_SLICE(jpe_table_data, MRV_JPE_TABLE_WDATA_H,
++ ci_isp_dc_chroma_table_annex_k[idx]);
++ REG_SET_SLICE(jpe_table_data, MRV_JPE_TABLE_WDATA_L,
++ ci_isp_dc_chroma_table_annex_k[idx + 1]);
++ /* auto-increment register! */
++ REG_WRITE(mrv_reg->jpe_table_data, jpe_table_data);
++ }
++}
++
++/*
++ * selects tables to be used by encoder
++ */
++void ci_isp_jpe_select_tables(void)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++
++ /* selects quantization table for Y */
++ REG_SET_SLICE(mrv_reg->jpe_tq_y_select, MRV_JPE_TQ0_SELECT,
++ MRV_JPE_TQ_SELECT_TAB0);
++ /* selects quantization table for U */
++ REG_SET_SLICE(mrv_reg->jpe_tq_u_select, MRV_JPE_TQ1_SELECT,
++ MRV_JPE_TQ_SELECT_TAB1);
++ /* selects quantization table for V */
++ REG_SET_SLICE(mrv_reg->jpe_tq_v_select, MRV_JPE_TQ2_SELECT,
++ MRV_JPE_TQ_SELECT_TAB1);
++ /* selects Huffman DC table */
++ REG_SET_SLICE(mrv_reg->jpe_dc_table_select,
++ MRV_JPE_DC_TABLE_SELECT_Y, 0);
++ REG_SET_SLICE(mrv_reg->jpe_dc_table_select,
++ MRV_JPE_DC_TABLE_SELECT_U, 1);
++ REG_SET_SLICE(mrv_reg->jpe_dc_table_select,
++ MRV_JPE_DC_TABLE_SELECT_V, 1);
++ /* selects Huffman AC table */
++ REG_SET_SLICE(mrv_reg->jpe_ac_table_select,
++ MRV_JPE_AC_TABLE_SELECT_Y, 0);
++ REG_SET_SLICE(mrv_reg->jpe_ac_table_select,
++ MRV_JPE_AC_TABLE_SELECT_U, 1);
++ REG_SET_SLICE(mrv_reg->jpe_ac_table_select,
++ MRV_JPE_AC_TABLE_SELECT_V, 1);
++}
++
++/*
++ * configure JPEG encoder
++ */
++void ci_isp_jpe_set_config(u16 hsize, u16 vsize, int jpe_scale)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++
++ /* JPEG image size */
++
++ REG_SET_SLICE(mrv_reg->jpe_enc_hsize, MRV_JPE_ENC_HSIZE, hsize);
++ REG_SET_SLICE(mrv_reg->jpe_enc_vsize, MRV_JPE_ENC_VSIZE, vsize);
++
++ if (jpe_scale) {
++ /* upscaling of BT601 color space to full range 0..255 */
++ REG_SET_SLICE(mrv_reg->jpe_y_scale_en, MRV_JPE_Y_SCALE_EN,
++ ENABLE);
++ REG_SET_SLICE(mrv_reg->jpe_cbcr_scale_en,
++ MRV_JPE_CBCR_SCALE_EN, ENABLE);
++ } else {
++ /* bypass scaler */
++ REG_SET_SLICE(mrv_reg->jpe_y_scale_en,
++ MRV_JPE_Y_SCALE_EN, DISABLE);
++ REG_SET_SLICE(mrv_reg->jpe_cbcr_scale_en,
++ MRV_JPE_CBCR_SCALE_EN, DISABLE);
++ }
++
++ /* picture format YUV 422 */
++ REG_SET_SLICE(mrv_reg->jpe_pic_format, MRV_JPE_ENC_PIC_FORMAT,
++ MRV_JPE_ENC_PIC_FORMAT_422);
++ REG_SET_SLICE(mrv_reg->jpe_table_flush, MRV_JPE_TABLE_FLUSH, 0);
++}
++
++int ci_isp_jpe_generate_header(struct mrst_isp_device *intel, u8 header_mode)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++
++ WARN_ON(!((header_mode == MRV_JPE_HEADER_MODE_JFIF)
++ || (header_mode == MRV_JPE_HEADER_MODE_NO)));
++
++ /* clear jpeg gen_header_done interrupt */
++ /* since we poll them later to detect command completion */
++
++ REG_SET_SLICE(mrv_reg->jpe_status_icr, MRV_JPE_GEN_HEADER_DONE, 1);
++ REG_SET_SLICE(mrv_reg->jpe_header_mode, MRV_JPE_HEADER_MODE,
++ header_mode);
++
++ /* start header generation */
++ REG_SET_SLICE(mrv_reg->jpe_gen_header, MRV_JPE_GEN_HEADER, ON);
++
++ return ci_isp_jpe_wait_for_header_gen_done(intel);
++}
++
++void ci_isp_jpe_prep_enc(enum ci_isp_jpe_enc_mode jpe_enc_mode)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++ u32 jpe_encode = REG_READ(mrv_reg->jpe_encode);
++
++ /* clear jpeg encode_done interrupt */
++ /* since we poll them later to detect command completion */
++
++ REG_SET_SLICE(mrv_reg->jpe_status_icr, MRV_JPE_ENCODE_DONE, 1);
++ REG_SET_SLICE(jpe_encode, MRV_JPE_ENCODE, ON);
++
++ switch (jpe_enc_mode) {
++ case CI_ISP_JPE_LARGE_CONT_MODE:
++ /* motion JPEG with header generation */
++ REG_SET_SLICE(jpe_encode, MRV_JPE_CONT_MODE,
++ MRV_JPE_CONT_MODE_HEADER);
++ break;
++ case CI_ISP_JPE_SHORT_CONT_MODE:
++ /* motion JPEG only first frame with header */
++ REG_SET_SLICE(jpe_encode, MRV_JPE_CONT_MODE,
++ MRV_JPE_CONT_MODE_NEXT);
++ break;
++ default:
++ /* single shot JPEG */
++ REG_SET_SLICE(jpe_encode, MRV_JPE_CONT_MODE,
++ MRV_JPE_CONT_MODE_STOP);
++ break;
++ }
++
++ REG_WRITE(mrv_reg->jpe_encode, jpe_encode);
++ REG_SET_SLICE(mrv_reg->jpe_init, MRV_JPE_JP_INIT, 1);
++}
++
++/*
++ * wait until JPG Header is generated (MRV_JPGINT_GEN_HEADER_DONE
++ * interrupt occurs)
++ * waiting for JPG Header to be generated
++ */
++int ci_isp_jpe_wait_for_header_gen_done(struct mrst_isp_device *intel)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++ struct timeval sts, ets;
++ unsigned long ms = 0;
++
++ mrst_timer_start();
++
++ do_gettimeofday(&sts);
++ while (!REG_GET_SLICE(mrv_reg->jpe_status_ris,
++ MRV_JPE_GEN_HEADER_DONE)) {
++ do_gettimeofday(&ets);
++ if (ets.tv_sec - sts.tv_sec >= 5) {
++ mrst_timer_stop();
++ eprintk("timeout");
++ return CI_STATUS_FAILURE;
++ }
++ }
++
++ mrst_timer_stop();
++
++ return CI_STATUS_SUCCESS;
++}
++
++/*
++ * wait until JPG Encoder is done (MRV_JPGINT_ENCODE_DONE
++ * interrupt occurs) waiting for the JPG Encoder to be done
++ */
++int ci_isp_jpe_wait_for_encode_done(struct mrst_isp_device *intel)
++{
++#if 0
++ int ret = 0;
++ INIT_COMPLETION(intel->jpe_complete);
++ ret = wait_for_completion_interruptible_timeout(&intel->jpe_complete,
++ 100 * HZ);
++ if ((ret == 0) | (intel->irq_stat == IRQ_JPE_ERROR)) {
++ eprintk("timeout");
++ return CI_STATUS_FAILURE;
++ }
++
++ return CI_STATUS_SUCCESS;
++#endif
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++ struct timeval sts, ets;
++ unsigned long ms = 0;
++
++ do_gettimeofday(&sts);
++ mrst_timer_start();
++ while (!REG_GET_SLICE(mrv_reg->jpe_status_ris,
++ MRV_JPE_ENCODE_DONE)) {
++ do_gettimeofday(&ets);
++ if (ets.tv_sec - sts.tv_sec >= 5) {
++ mrst_timer_stop();
++ eprintk("timeout");
++ return CI_STATUS_FAILURE;
++ }
++ }
++
++ mrst_timer_stop();
++
++ /* clear jpeg encode_done interrupt */
++ REG_SET_SLICE(mrv_reg->jpe_status_icr, MRV_JPE_ENCODE_DONE, 1);
++
++ return CI_STATUS_SUCCESS;
++}
+--- /dev/null
++++ b/drivers/staging/mrstci/mrstisp/mrstisp_main.c
+@@ -0,0 +1,3138 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#include "mrstisp_stdinc.h"
++#include "ci_isp_fmts_common.h"
++#include <asm/mrst.h>
++
++
++#define GPIO_SCLK_25 44
++#define GPIO_STDBY1_PIN 48
++#define GPIO_STDBY2_PIN 49
++#define GPIO_RESET_PIN 50
++
++int mrstisp_debug = 2;
++module_param(mrstisp_debug, int, 0644);
++/* For some reason ISP needs some "extra syncs" with sensor mt9d113 */
++int isp_extra_sync_for_mt9d113;
++
++/*XXX*/
++static int frame_cnt;
++static long mipi_error_num;
++static u32 mipi_error_flag;
++static long isp_error_num;
++static u32 isp_error_flag;
++static unsigned long jiffies_start;
++static int mipi_flag;
++
++#define ISP_CLOCK_GATING
++#ifdef ISP_CLOCK_GATING
++
++volatile static int prevent_sleep_flag;
++volatile static int btimerexist;
++static struct prevent_isp_sleep_struct {
++ u32 kthid;
++ u32 base;
++ u32 mipi;
++ int timeout;
++} prevent_isp_sleep;
++static void prevent_sleep_func(unsigned long data);
++static struct timer_list prevent_sleep_timer = TIMER_INITIALIZER(prevent_sleep_func, 0, 0);
++#endif
++
++
++void intel_timer_start(void)
++{
++ jiffies_start = jiffies;
++}
++void intel_timer_stop(void)
++{
++ jiffies_start = 0;
++}
++unsigned long intel_get_micro_sec(void)
++{
++ unsigned long time_diff = 0;
++
++ time_diff = jiffies - jiffies_start;
++
++ return jiffies_to_msecs(time_diff);
++}
++
++
++static inline struct mrst_isp_device *to_isp(struct v4l2_device *dev)
++{
++ return container_of(dev, struct mrst_isp_device, v4l2_dev);
++}
++
++static struct mrst_camera mrst_camera_table[] = {
++ {
++ .type = MRST_CAMERA_SOC,
++ .name = "ov2650",
++ .sensor_addr = 0x30,
++ },
++ {
++ .type = MRST_CAMERA_SOC,
++ .name = "mt9d113",
++ .sensor_addr = (0x78 >> 1),
++ },
++ {
++ .type = MRST_CAMERA_SOC,
++ .name = "ov9665",
++ .sensor_addr = 0x30,
++ },
++ {
++ .type = MRST_CAMERA_RAW,
++ .name = "ov5630",
++ .sensor_addr = 0x36,
++ .motor_name = "ov5630_motor",
++ .motor_addr = (0x18 >> 1),
++ },
++ {
++ .type = MRST_CAMERA_RAW,
++ .name = "s5k4e1",
++ .sensor_addr = 0x36,
++ .motor_name = "s5k4e1_motor",
++ .motor_addr = (0x18 >> 1),
++ },
++};
++
++#define N_CAMERA (ARRAY_SIZE(mrst_camera_table))
++
++struct videobuf_dma_contig_memory {
++ u32 magic;
++ void *vaddr;
++ dma_addr_t dma_handle;
++ unsigned long size;
++ int is_userptr;
++};
++#define MAGIC_DC_MEM 0x0733ac61
++#define MAGIC_CHECK(is, should) \
++ if (unlikely((is) != (should))) { \
++ pr_err("magic mismatch: %x expected %x\n", (is), (should)); \
++ BUG(); \
++ }
++/* flag to determine whether to do the handler of mblk_line irq */
++int mrst_isp_to_do_mblk_line;
++unsigned char *mrst_isp_regs;
++
++static inline struct ci_sensor_config *to_sensor_config(struct v4l2_subdev *sd)
++{
++ return container_of(sd, struct ci_sensor_config, sd);
++}
++
++/* g45-th20-b5 gamma out curve with enhanced black level */
++static struct ci_isp_gamma_out_curve g45_th20_b5 = {
++ {
++ 0x0000, 0x0014, 0x003C, 0x0064,
++ 0x00A0, 0x0118, 0x0171, 0x01A7,
++ 0x01D8, 0x0230, 0x027A, 0x02BB,
++ 0x0323, 0x0371, 0x03AD, 0x03DB,
++ 0x03FF}
++ ,
++ 0
++};
++
++static void print_snr_cfg(struct ci_sensor_config *cfg)
++{
++ dprintk(2, "bus width = %x", cfg->bus_width);
++ dprintk(2, "mode = %x", cfg->mode);
++ dprintk(2, "field_inv = %x", cfg->field_inv);
++ dprintk(2, "field_sel = %x", cfg->field_sel);
++ dprintk(2, "ycseq = %x", cfg->ycseq);
++ dprintk(2, "conv422 = %x", cfg->conv422);
++ dprintk(2, "bpat = %x", cfg->bpat);
++ dprintk(2, "hpol = %x", cfg->hpol);
++ dprintk(2, "vpol = %x", cfg->vpol);
++ dprintk(2, "edge = %x", cfg->edge);
++ dprintk(2, "bls = %x", cfg->bls);
++ dprintk(2, "gamma = %x", cfg->gamma);
++ dprintk(2, "cconv = %x", cfg->cconv);
++ dprintk(2, "res = %x", cfg->res);
++ dprintk(2, "blc = %x", cfg->blc);
++ dprintk(2, "agc = %x", cfg->agc);
++ dprintk(2, "awb = %x", cfg->awb);
++ dprintk(2, "aec = %x", cfg->aec);
++ dprintk(2, "cie_profile = %x", cfg->cie_profile);
++ dprintk(2, "flicker_freq = %x", cfg->flicker_freq);
++ dprintk(2, "smia_mode = %x", cfg->smia_mode);
++ dprintk(2, "mipi_mode = %x", cfg->mipi_mode);
++ dprintk(2, "type = %x", cfg->type);
++ dprintk(2, "name = %s", cfg->name);
++}
++
++static int mrst_isp_defcfg_all_load(struct ci_isp_config *isp_config)
++{
++
++ DBG_entering;
++
++ /* demosaic mode */
++ isp_config->demosaic_mode = CI_ISP_DEMOSAIC_ENHANCED;
++
++ /* bpc */
++ isp_config->bpc_cfg.bp_corr_type = CI_ISP_BP_CORR_DIRECT;
++ isp_config->bpc_cfg.bp_corr_rep = CI_ISP_BP_CORR_REP_NB;
++ isp_config->bpc_cfg.bp_corr_mode = CI_ISP_BP_CORR_HOT_DEAD_EN;
++ isp_config->bpc_cfg.bp_abs_hot_thres = 496;
++ isp_config->bpc_cfg.bp_abs_dead_thres = 20;
++ isp_config->bpc_cfg.bp_dev_hot_thres = 328;
++ isp_config->bpc_cfg.bp_dev_dead_thres = 328;
++ isp_config->bpd_cfg.bp_dead_thres = 1;
++
++ /* WB */
++ isp_config->wb_config.mrv_wb_mode = CI_ISP_AWB_AUTO;
++ isp_config->wb_config.mrv_wb_sub_mode = CI_ISP_AWB_AUTO_ON;
++ isp_config->wb_config.awb_pca_damping = 16;
++ isp_config->wb_config.awb_prior_exp_damping = 12;
++ isp_config->wb_config.awb_pca_push_damping = 16;
++ isp_config->wb_config.awb_prior_exp_push_damping = 12;
++ isp_config->wb_config.awb_auto_max_y = 254;
++ isp_config->wb_config.awb_push_max_y = 250;
++ isp_config->wb_config.awb_measure_max_y = 200;
++ isp_config->wb_config.awb_underexp_det = 10;
++ isp_config->wb_config.awb_push_underexp_det = 170;
++
++ /* CAC */
++ isp_config->cac_config.hsize = 2048;
++ isp_config->cac_config.vsize = 1536;
++ isp_config->cac_config.hcenter_offset = 0;
++ isp_config->cac_config.vcenter_offset = 0;
++ isp_config->cac_config.hclip_mode = 1;
++ isp_config->cac_config.vclip_mode = 2;
++ isp_config->cac_config.ablue = 24;
++ isp_config->cac_config.ared = 489;
++ isp_config->cac_config.bblue = 450;
++ isp_config->cac_config.bred = 53;
++ isp_config->cac_config.cblue = 40;
++ isp_config->cac_config.cred = 479;
++ isp_config->cac_config.aspect_ratio = 0.000000;
++
++ /* BLS */
++ isp_config->bls_cfg.enable_automatic = 0;
++ isp_config->bls_cfg.disable_h = 0;
++ isp_config->bls_cfg.disable_v = 0;
++ isp_config->bls_cfg.isp_bls_window1.enable_window = 0;
++ isp_config->bls_cfg.isp_bls_window1.start_h = 0;
++ isp_config->bls_cfg.isp_bls_window1.stop_h = 0;
++ isp_config->bls_cfg.isp_bls_window1.start_v = 0;
++ isp_config->bls_cfg.isp_bls_window1.stop_v = 0;
++ isp_config->bls_cfg.isp_bls_window2.enable_window = 0;
++ isp_config->bls_cfg.isp_bls_window2.start_h = 0;
++ isp_config->bls_cfg.isp_bls_window2.stop_h = 0;
++ isp_config->bls_cfg.isp_bls_window2.start_v = 0;
++ isp_config->bls_cfg.isp_bls_window2.stop_v = 0;
++ isp_config->bls_cfg.bls_samples = 5;
++ isp_config->bls_cfg.bls_subtraction.fixed_a = 0x100;
++ isp_config->bls_cfg.bls_subtraction.fixed_b = 0x100;
++ isp_config->bls_cfg.bls_subtraction.fixed_c = 0x100;
++ isp_config->bls_cfg.bls_subtraction.fixed_d = 0x100;
++
++ /* AF */
++ isp_config->af_cfg.wnd_pos_a.hoffs = 874;
++ isp_config->af_cfg.wnd_pos_a.voffs = 618;
++ isp_config->af_cfg.wnd_pos_a.hsize = 300;
++ isp_config->af_cfg.wnd_pos_a.vsize = 300;
++ isp_config->af_cfg.wnd_pos_b.hoffs = 0;
++ isp_config->af_cfg.wnd_pos_b.voffs = 0;
++ isp_config->af_cfg.wnd_pos_b.hsize = 0;
++ isp_config->af_cfg.wnd_pos_b.vsize = 0;
++ isp_config->af_cfg.wnd_pos_c.hoffs = 0;
++ isp_config->af_cfg.wnd_pos_c.voffs = 0;
++ isp_config->af_cfg.wnd_pos_c.hsize = 0;
++ isp_config->af_cfg.wnd_pos_c.vsize = 0;
++ isp_config->af_cfg.threshold = 0x00000000;
++
++ /* color */
++ isp_config->color.contrast = 128;
++ isp_config->color.brightness = 0;
++ isp_config->color.saturation = 128;
++ isp_config->color.hue = 0;
++
++ /* Img Effect */
++ isp_config->img_eff_cfg.mode = CI_ISP_IE_MODE_OFF;
++ isp_config->img_eff_cfg.color_sel = 4;
++ isp_config->img_eff_cfg.color_thres = 128;
++ isp_config->img_eff_cfg.tint_cb = 108;
++ isp_config->img_eff_cfg.tint_cr = 141;
++ isp_config->img_eff_cfg.mat_emboss.coeff_11 = 2;
++ isp_config->img_eff_cfg.mat_emboss.coeff_12 = 1;
++ isp_config->img_eff_cfg.mat_emboss.coeff_13 = 0;
++ isp_config->img_eff_cfg.mat_emboss.coeff_21 = 1;
++ isp_config->img_eff_cfg.mat_emboss.coeff_22 = 0;
++ isp_config->img_eff_cfg.mat_emboss.coeff_23 = -1;
++ isp_config->img_eff_cfg.mat_emboss.coeff_31 = 0;
++ isp_config->img_eff_cfg.mat_emboss.coeff_32 = -1;
++ isp_config->img_eff_cfg.mat_emboss.coeff_33 = -2;
++ isp_config->img_eff_cfg.mat_sketch.coeff_11 = -1;
++ isp_config->img_eff_cfg.mat_sketch.coeff_12 = -1;
++ isp_config->img_eff_cfg.mat_sketch.coeff_13 = -1;
++ isp_config->img_eff_cfg.mat_sketch.coeff_21 = -1;
++ isp_config->img_eff_cfg.mat_sketch.coeff_22 = 8;
++ isp_config->img_eff_cfg.mat_sketch.coeff_23 = -1;
++ isp_config->img_eff_cfg.mat_sketch.coeff_31 = -1;
++ isp_config->img_eff_cfg.mat_sketch.coeff_32 = -1;
++ isp_config->img_eff_cfg.mat_sketch.coeff_33 = -1;
++
++ /* Framefun */
++ isp_config->flags.bls = 0;
++ isp_config->flags.lsc = 0;
++ isp_config->flags.bpc = 0;
++ isp_config->flags.awb = 0;
++ isp_config->flags.aec = 0;
++ isp_config->flags.af = 0;
++ isp_config->flags.cp = 0;
++ isp_config->flags.gamma = 0;
++ isp_config->flags.cconv = 0;
++ isp_config->flags.demosaic = 0;
++ isp_config->flags.gamma2 = 0;
++ isp_config->flags.isp_filters = 0;
++ isp_config->flags.cac = 0;
++ isp_config->flags.cconv_basic = 0;
++ isp_config->demosaic_th = 4;
++
++ isp_config->view_finder.flags = VFFLAG_HWRGB;
++
++ isp_config->afm_mode = 1;
++ isp_config->filter_level_noise_reduc = 4;
++ isp_config->filter_level_sharp = 4;
++
++ isp_config->jpeg_enc_ratio = 1;
++
++ DBG_leaving;
++ return 0;
++}
++
++static void mrst_isp_update_marvinvfaddr(struct mrst_isp_device *isp,
++ u32 buffer_base,
++ enum ci_isp_conf_update_time update_time)
++{
++ struct ci_isp_mi_path_conf isp_mi_path_conf;
++ struct ci_isp_mi_path_conf isp_sf_mi_path_conf;
++ static struct v4l2_jpg_review_buffer *jpg_review;
++ u32 bufsize = 0;
++ u32 w;
++ u32 h;
++
++ jpg_review = &isp->sys_conf.jpg_review;
++ memset(&isp_mi_path_conf, 0, sizeof(struct ci_isp_mi_path_conf));
++ memset(&isp_sf_mi_path_conf, 0, sizeof(struct ci_isp_mi_path_conf));
++
++ w = isp_mi_path_conf.llength = isp->bufwidth;
++ h = isp_mi_path_conf.ypic_height = isp->bufheight;
++ isp_mi_path_conf.ypic_width = isp->bufwidth;
++
++ /*XXX Zheng: disable jpg review for MIPI sensor */
++ /*if ((isp->sys_conf.isi_config)->mipi_mode == SENSOR_MIPI_MODE_RAW_10)
++ isp->sys_conf.jpg_review_enable = 0;
++ */
++
++ if (isp->sys_conf.jpg_review_enable) {
++
++ /* for self path, JPEG review */
++ isp_sf_mi_path_conf.ypic_width = jpg_review->width;
++ isp_sf_mi_path_conf.llength = jpg_review->width;
++ isp_sf_mi_path_conf.ypic_height = jpg_review->height;
++
++ bufsize = jpg_review->width * jpg_review->height;
++
++ /* buffer size in bytes */
++ if (jpg_review->pix_fmt == V4L2_PIX_FMT_YUV420
++ || jpg_review->pix_fmt == V4L2_PIX_FMT_YVU420) {
++
++ dprintk(3, "VF yuv420 fmt");
++ isp_sf_mi_path_conf.ybuffer.size = bufsize;
++ isp_sf_mi_path_conf.cb_buffer.size = bufsize/4;
++ isp_sf_mi_path_conf.cr_buffer.size = bufsize/4;
++
++ } else if (jpg_review->pix_fmt == V4L2_PIX_FMT_YUV422P) {
++
++ dprintk(3, "VF yuv422 fmt");
++ isp_sf_mi_path_conf.ybuffer.size = bufsize;
++ isp_sf_mi_path_conf.cb_buffer.size = bufsize/2;
++ isp_sf_mi_path_conf.cr_buffer.size = bufsize/2;
++
++ } else if (jpg_review->pix_fmt == V4L2_PIX_FMT_NV12) {
++
++ dprintk(3, "VF nv12 fmt");
++ isp_sf_mi_path_conf.ybuffer.size = bufsize;
++ isp_sf_mi_path_conf.cb_buffer.size = bufsize/2;
++ isp_sf_mi_path_conf.cr_buffer.size = 0;
++
++ } else {
++ printk(KERN_ERR "mrstisp: no support jpg review fmt\n");
++ }
++
++ /* buffer address */
++ if (isp_sf_mi_path_conf.ybuffer.size != 0) {
++ isp_sf_mi_path_conf.ybuffer.pucbuffer =
++ (u8 *)(unsigned long)
++ isp->mb1 + isp->mb1_size - 640*480*2;
++ }
++
++ if (isp_sf_mi_path_conf.cb_buffer.size != 0) {
++ isp_sf_mi_path_conf.cb_buffer.pucbuffer =
++ isp_sf_mi_path_conf.ybuffer.pucbuffer +
++ isp_sf_mi_path_conf.ybuffer.size;
++ }
++
++ if (isp_sf_mi_path_conf.cr_buffer.size != 0) {
++ isp_sf_mi_path_conf.cr_buffer.pucbuffer =
++ isp_sf_mi_path_conf.cb_buffer.pucbuffer +
++ isp_sf_mi_path_conf.cb_buffer.size;
++ }
++
++ if (jpg_review->pix_fmt == V4L2_PIX_FMT_YVU420) {
++ isp_sf_mi_path_conf.cr_buffer.pucbuffer =
++ isp_sf_mi_path_conf.ybuffer.pucbuffer +
++ isp_sf_mi_path_conf.ybuffer.size;
++ isp_sf_mi_path_conf.cb_buffer.pucbuffer =
++ isp_sf_mi_path_conf.cr_buffer.pucbuffer +
++ isp_sf_mi_path_conf.cr_buffer.size;
++ }
++
++ }
++
++ if (isp->pixelformat == V4L2_PIX_FMT_YUV420 ||
++ isp->pixelformat == V4L2_PIX_FMT_YVU420 ||
++ isp->pixelformat == V4L2_PIX_FMT_YUV422P ||
++ isp->pixelformat == V4L2_PIX_FMT_NV12) {
++ bufsize = w*h;
++ } else
++ bufsize = isp->frame_size;
++
++ /* buffer size in bytes */
++ if (isp->pixelformat == V4L2_PIX_FMT_YUV420
++ || isp->pixelformat == V4L2_PIX_FMT_YVU420) {
++
++ dprintk(3, "yuv420 fmt");
++ isp_mi_path_conf.ybuffer.size = bufsize;
++ isp_mi_path_conf.cb_buffer.size = bufsize/4;
++ isp_mi_path_conf.cr_buffer.size = bufsize/4;
++ } else if (isp->pixelformat == V4L2_PIX_FMT_YUV422P) {
++
++ dprintk(3, "yuv422 fmt");
++ isp_mi_path_conf.ybuffer.size = bufsize;
++ isp_mi_path_conf.cb_buffer.size = bufsize/2;
++ isp_mi_path_conf.cr_buffer.size = bufsize/2;
++ } else if (isp->pixelformat == V4L2_PIX_FMT_NV12) {
++
++ dprintk(3, "nv12 fmt");
++ isp_mi_path_conf.ybuffer.size = bufsize;
++ isp_mi_path_conf.cb_buffer.size = bufsize/2;
++ isp_mi_path_conf.cr_buffer.size = 0;
++ } else {
++
++ dprintk(3, "jpeg and rgb fmt");
++ isp_mi_path_conf.ybuffer.size = bufsize;
++ isp_mi_path_conf.cb_buffer.size = 0;
++ isp_mi_path_conf.cr_buffer.size = 0;
++ }
++
++ /* buffer address */
++ if (isp_mi_path_conf.ybuffer.size != 0) {
++ isp_mi_path_conf.ybuffer.pucbuffer =
++ (u8 *)(unsigned long) buffer_base;
++ }
++
++ if (isp_mi_path_conf.cb_buffer.size != 0) {
++ isp_mi_path_conf.cb_buffer.pucbuffer =
++ isp_mi_path_conf.ybuffer.pucbuffer +
++ isp_mi_path_conf.ybuffer.size;
++ }
++
++ if (isp_mi_path_conf.cr_buffer.size != 0) {
++ isp_mi_path_conf.cr_buffer.pucbuffer =
++ isp_mi_path_conf.cb_buffer.pucbuffer +
++ isp_mi_path_conf.cb_buffer.size;
++ }
++
++ if (isp->pixelformat == V4L2_PIX_FMT_YVU420) {
++ isp_mi_path_conf.cr_buffer.pucbuffer =
++ isp_mi_path_conf.ybuffer.pucbuffer +
++ isp_mi_path_conf.ybuffer.size;
++ isp_mi_path_conf.cb_buffer.pucbuffer =
++ isp_mi_path_conf.cr_buffer.pucbuffer +
++ isp_mi_path_conf.cr_buffer.size;
++ }
++
++ if (isp->sys_conf.isp_cfg.view_finder.flags & VFFLAG_USE_MAINPATH) {
++ ci_isp_mif_set_main_buffer(&isp_mi_path_conf, update_time);
++ if (isp->pixelformat == V4L2_PIX_FMT_JPEG)
++ if (isp->sys_conf.jpg_review_enable)
++ ci_isp_mif_set_self_buffer(
++ &isp_sf_mi_path_conf, update_time);
++ } else {
++ ci_isp_mif_set_self_buffer(&isp_mi_path_conf, update_time);
++ }
++}
++
++static int mrst_isp_setup_viewfinder_path(struct mrst_isp_device *isp,
++ struct ci_sensor_config *isi_config,
++ int zoom)
++{
++ int error = CI_STATUS_SUCCESS;
++ struct ci_isp_datapath_desc dp_main;
++ struct ci_isp_datapath_desc dp_self;
++ struct ci_isp_rect self_rect;
++ u16 isi_hsize;
++ u16 isi_vsize;
++ int jpe_scale;
++ struct ci_pl_system_config *sys_conf = &isp->sys_conf;
++ struct ci_isp_config *config = &sys_conf->isp_cfg;
++ struct v4l2_jpg_review_buffer *jpg_review = &sys_conf->jpg_review;
++ u32 dp_mode;
++
++ DBG_entering;
++
++ if (sys_conf->isp_cfg.flags.ycbcr_full_range)
++ jpe_scale = false;
++ else
++ jpe_scale = true;
++
++ memset(&dp_main, 0, sizeof(struct ci_isp_datapath_desc));
++ memset(&dp_self, 0, sizeof(struct ci_isp_datapath_desc));
++
++ self_rect.x = 0;
++ self_rect.y = 0;
++ self_rect.w = isp->bufwidth; /* 640 */
++ self_rect.h = isp->bufheight; /* 480 */
++
++ if (isp->pixelformat == V4L2_PIX_FMT_JPEG) {
++
++ dprintk(1, "jpeg fmt");
++
++ dp_main.flags = CI_ISP_DPD_ENABLE | CI_ISP_DPD_MODE_ISPJPEG;
++ config->view_finder.flags |= VFFLAG_USE_MAINPATH;
++
++ dp_main.out_w = (u16) isp->bufwidth;
++ dp_main.out_h = (u16) isp->bufheight;
++
++ if (isp->sys_conf.jpg_review_enable) {
++
++ dprintk(1, "jpg_review enabled in VF");
++
++ self_rect.w = jpg_review->width;
++ self_rect.h = jpg_review->height;
++
++ dp_self.flags = (CI_ISP_DPD_ENABLE
++ | CI_ISP_DPD_MODE_ISPYC);
++ if (jpg_review->pix_fmt == V4L2_PIX_FMT_YUV420 ||
++ jpg_review->pix_fmt == V4L2_PIX_FMT_YVU420)
++ dp_self.flags |= CI_ISP_DPD_YUV_420
++ | CI_ISP_DPD_CSS_V2;
++ else if (jpg_review->pix_fmt == V4L2_PIX_FMT_YUV422P)
++ dp_self.flags |= CI_ISP_DPD_YUV_422;
++ else if (jpg_review->pix_fmt == V4L2_PIX_FMT_NV12)
++ dp_self.flags |= CI_ISP_DPD_YUV_NV12
++ | CI_ISP_DPD_CSS_V2;
++ else if (jpg_review->pix_fmt == V4L2_PIX_FMT_YUYV)
++ dp_self.flags |= CI_ISP_DPD_YUV_YUYV;
++
++ dprintk(1, "dp_self.flags is 0x%x", dp_self.flags);
++ }
++
++ } else if (isp->pixelformat == INTEL_PIX_FMT_RAW08) {
++
++ dp_main.flags = CI_ISP_DPD_ENABLE | CI_ISP_DPD_MODE_ISPRAW;
++ config->view_finder.flags |= VFFLAG_USE_MAINPATH;
++
++ /*just take the output of the sensor without any resizing*/
++ dp_main.flags |= CI_ISP_DPD_NORESIZE;
++ (void)ci_sensor_res2size(isi_config->res,
++ &(dp_main.out_w), &(dp_main.out_h));
++
++ dprintk(1, "RAW08 dp_main.flags is 0x%x", dp_main.flags);
++
++ } else if (isp->pixelformat == INTEL_PIX_FMT_RAW10
++ || isp->pixelformat == INTEL_PIX_FMT_RAW12) {
++
++ dp_main.flags = (CI_ISP_DPD_ENABLE
++ | CI_ISP_DPD_MODE_ISPRAW_16B);
++ config->view_finder.flags |= VFFLAG_USE_MAINPATH;
++
++ /*just take the output of the sensor without any resizing*/
++ dp_main.flags |= CI_ISP_DPD_NORESIZE;
++ (void)ci_sensor_res2size(isi_config->res,
++ &(dp_main.out_w), &(dp_main.out_h));
++
++ dprintk(1, "RAW10 dp_main.flags is 0x%x", dp_main.flags);
++
++ } /*else if (isp->bufwidth >= 640 && isp->bufheight >= 480) {*/
++ else if (isp->bufwidth >= 32 && isp->bufheight >= 16) {
++
++ dp_main.flags = (CI_ISP_DPD_ENABLE | CI_ISP_DPD_MODE_ISPYC);
++ dp_main.out_w = (u16) isp->bufwidth;
++ dp_main.out_h = (u16) isp->bufheight;
++ config->view_finder.flags |= VFFLAG_USE_MAINPATH;
++
++ if (isp->pixelformat == V4L2_PIX_FMT_YUV420 ||
++ isp->pixelformat == V4L2_PIX_FMT_YVU420)
++ dp_main.flags |= CI_ISP_DPD_YUV_420 | CI_ISP_DPD_CSS_V2;
++ else if (isp->pixelformat == V4L2_PIX_FMT_YUV422P)
++ dp_main.flags |= CI_ISP_DPD_YUV_422;
++ else if (isp->pixelformat == V4L2_PIX_FMT_NV12) {
++ /* to use crop set crop_flag first */
++ dp_main.flags |= CI_ISP_DPD_YUV_NV12;
++ if (!crop_flag)
++ dp_main.flags |= CI_ISP_DPD_CSS_V2;
++ } else if (isp->pixelformat == V4L2_PIX_FMT_YUYV)
++ dp_main.flags |= CI_ISP_DPD_YUV_YUYV;
++
++ dprintk(1, "YUV dp_main.flags is 0x%x", dp_main.flags);
++
++ } /* else if (isp->bufwidth <= 640 && isp->bufheight <= 480) {
++
++ dp_self.flags = (CI_ISP_DPD_ENABLE | CI_ISP_DPD_MODE_ISPYC);
++
++ if (isp->pixelformat == V4L2_PIX_FMT_YUV420 ||
++ isp->pixelformat == V4L2_PIX_FMT_YVU420)
++ dp_self.flags |= CI_ISP_DPD_YUV_420 | CI_ISP_DPD_CSS_V2;
++ else if (isp->pixelformat == V4L2_PIX_FMT_YUV422P)
++ dp_self.flags |= CI_ISP_DPD_YUV_422;
++ else if (isp->pixelformat == V4L2_PIX_FMT_NV12)
++ dp_self.flags |= CI_ISP_DPD_YUV_NV12
++ | CI_ISP_DPD_CSS_V2;
++ else if (isp->pixelformat == V4L2_PIX_FMT_YUYV)
++ dp_self.flags |= CI_ISP_DPD_YUV_YUYV;
++ else if (isp->pixelformat == V4L2_PIX_FMT_RGB565)
++ dp_self.flags |= CI_ISP_DPD_HWRGB_565;
++ else if (isp->pixelformat == V4L2_PIX_FMT_BGR32)
++ dp_self.flags |= CI_ISP_DPD_HWRGB_888;
++
++ dprintk(1, "YUV dp_self.flags is 0x%x", dp_self.flags);
++ }
++ */
++
++ dprintk(1, "sensor_res = %x", isi_config->res);
++
++ (void)ci_sensor_res2size(isi_config->res, &isi_hsize, &isi_vsize);
++ dprintk(1, "self path: w:%d, h:%d; sensor: w:%d, h:%d",
++ self_rect.w, self_rect.h, isi_hsize, isi_vsize);
++ dprintk(1, "main path: out_w:%d, out_h:%d ",
++ dp_main.out_w, dp_main.out_h);
++
++ /* no stretching/squeezing */
++ if (dp_self.flags && CI_ISP_DPD_ENABLE)
++ dp_self.flags |= CI_ISP_DPD_KEEPRATIO;
++ else
++ dp_main.flags |= CI_ISP_DPD_KEEPRATIO;
++
++ /* prepare datapath, 640x480, can changed to the bufsize */
++ dp_self.out_w = (u16) self_rect.w;
++ dp_self.out_h = (u16) self_rect.h;
++
++ if (sys_conf->isp_cfg.view_finder.flags & VFFLAG_HWRGB) {
++ /* YCbCr to RGB conversion in hardware */
++ if (isp->pixelformat == V4L2_PIX_FMT_RGB565)
++ dp_self.flags |= CI_ISP_DPD_HWRGB_565;
++ if (isp->pixelformat == V4L2_PIX_FMT_BGR32)
++ dp_self.flags |= CI_ISP_DPD_HWRGB_888;
++ }
++
++ if (sys_conf->isp_cfg.view_finder.flags & VFFLAG_MIRROR)
++ dp_self.flags |= CI_ISP_DPD_H_FLIP;
++
++
++ if (sys_conf->isp_cfg.view_finder.flags & VFFLAG_V_FLIP)
++ dp_self.flags |= CI_ISP_DPD_V_FLIP;
++
++
++ if (sys_conf->isp_cfg.view_finder.flags & VFFLAG_ROT90_CCW)
++ dp_self.flags |= CI_ISP_DPD_90DEG_CCW;
++
++ /* setup self & main path with zoom */
++ if (zoom < 0)
++ zoom = sys_conf->isp_cfg.view_finder.zoom;
++
++ if (sys_conf->isp_cfg.view_finder.flags & VFFLAG_USE_MAINPATH) {
++ /* For RAW snapshots, we have to bypass the ISP too */
++ dp_mode = dp_main.flags & CI_ISP_DPD_MODE_MASK;
++ if ((dp_mode == CI_ISP_DPD_MODE_ISPRAW) ||
++ (dp_mode == CI_ISP_DPD_MODE_ISPRAW_16B)) {
++ struct ci_sensor_config isi_conf;
++ /* isi_conf = *sys_conf->isi_config; */
++ isi_conf = *isi_config;
++ isi_conf.mode = SENSOR_MODE_PICT;
++ error = ci_isp_set_input_aquisition(&isi_conf);
++ if (error != CI_STATUS_SUCCESS)
++ eprintk("33");
++ }
++ }
++ /* to use crop mode, set crop_flag */
++ if (crop_flag)
++ dp_main.flags |= CI_ISP_DPD_NORESIZE;
++
++ error = ci_datapath_isp(sys_conf, isi_config, &dp_main, &dp_self, zoom);
++ if (error != CI_STATUS_SUCCESS) {
++ printk(KERN_ERR "mrstisp: failed to setup marvins datapath\n");
++ return error;
++ }
++
++ DBG_leaving;
++ return error;
++}
++
++static int mrst_isp_init_mrv_image_effects(struct ci_pl_system_config *sys_conf,
++ int enable)
++{
++ int res;
++
++ DBG_entering;
++
++ if (enable && sys_conf->isp_cfg.img_eff_cfg.mode
++ != CI_ISP_IE_MODE_OFF) {
++ res = ci_isp_ie_set_config(&(sys_conf->isp_cfg.img_eff_cfg));
++ if (res != CI_STATUS_SUCCESS)
++ printk(KERN_ERR "mrstisp: error setting ie config\n");
++ } else {
++ (void)ci_isp_ie_set_config(NULL);
++ res = CI_STATUS_SUCCESS;
++ }
++
++ DBG_leaving;
++ return res;
++}
++
++static int mrst_isp_init_mrvisp_lensshade(struct ci_pl_system_config *sys_conf,
++ int enable)
++{
++ if (enable) {
++ ci_isp_set_ls_correction(&sys_conf->isp_cfg.lsc_cfg);
++ ci_isp_ls_correction_on_off(1);
++ } else {
++ ci_isp_ls_correction_on_off(0);
++ }
++ return CI_STATUS_SUCCESS;
++}
++
++static int mrst_isp_init_mrvisp_badpixel(const struct ci_pl_system_config
++ *sys_conf, int enable)
++{
++ if ((enable) && (sys_conf->isp_cfg.flags.bpc)) {
++ (void)ci_bp_init(&sys_conf->isp_cfg.bpc_cfg,
++ &sys_conf->isp_cfg.bpd_cfg);
++ } else {
++ (void)ci_bp_end(&sys_conf->isp_cfg.bpc_cfg);
++ (void)ci_isp_set_bp_correction(NULL);
++ (void)ci_isp_set_bp_detection(NULL);
++ }
++ return CI_STATUS_SUCCESS;
++}
++
++static int mrst_isp_init_mrv_ispfilter(const struct ci_pl_system_config
++ *sys_conf, int enable)
++{
++ int res;
++
++ DBG_entering;
++
++ if ((enable) && (sys_conf->isp_cfg.flags.isp_filters)) {
++ ci_isp_activate_filter(true);
++ res = ci_isp_set_filter_params(sys_conf->isp_cfg.
++ filter_level_noise_reduc,
++ sys_conf->isp_cfg.
++ filter_level_sharp);
++ if (res != CI_STATUS_SUCCESS)
++ printk(KERN_ERR "mrstisp: error set filter param\n");
++ } else {
++ ci_isp_activate_filter(false);
++ res = CI_STATUS_SUCCESS;
++ }
++
++ DBG_leaving;
++ return res;
++}
++
++static int mrst_isp_init_mrvisp_cac(const struct ci_pl_system_config *sys_conf,
++ int enable)
++{
++ return 0;
++}
++
++static int mrst_isp_initbls(const struct ci_pl_system_config *sys_conf)
++{
++ struct ci_isp_bls_config *bls_config =
++ (struct ci_isp_bls_config *)&sys_conf->isp_cfg.bls_cfg;
++ return ci_isp_bls_set_config(bls_config);
++}
++
++static int mrst_isp_dp_init(struct ci_pl_system_config *sys_conf,
++ struct ci_sensor_config *isi_config)
++{
++ int error;
++ u8 words_per_pixel;
++
++ DBG_entering;
++
++ /* base initialisation of Marvin */
++ ci_isp_init();
++
++ /* setup input acquisition according to image sensor settings */
++ print_snr_cfg(isi_config);
++ error = ci_isp_set_input_aquisition(isi_config);
++ if (error) {
++ printk(KERN_ERR "mrstisp: error setting input acquisition\n");
++ return error;
++ }
++
++ /* setup functional blocks for Bayer pattern processing */
++ if (ci_isp_select_path(isi_config, &words_per_pixel)
++ == CI_ISP_PATH_BAYER) {
++
++ /* black level */
++ if (sys_conf->isp_cfg.flags.bls) {
++ error = mrst_isp_initbls(sys_conf);
++ if (error != CI_STATUS_SUCCESS) {
++ printk(KERN_ERR "mrstisp: error set bls\n");
++ return error;
++ }
++ } else {
++ ci_isp_bls_set_config(NULL);
++ }
++
++ /* gamma */
++ if (sys_conf->isp_cfg.flags.gamma2) {
++ dprintk(1, "setting gamma 2 ");
++ ci_isp_set_gamma2(&g45_th20_b5);
++ } else {
++ dprintk(1, "no setting gamma 2 ");
++ ci_isp_set_gamma2(NULL);
++ }
++
++ /* demosaic */
++ ci_isp_set_demosaic(sys_conf->isp_cfg.demosaic_mode,
++ sys_conf->isp_cfg.demosaic_th);
++
++ /* color convertion */
++ if (sys_conf->isp_cfg.flags.cconv) {
++ if (!sys_conf->isp_cfg.flags.cconv_basic) {
++ mrst_isp_set_color_conversion_ex();
++ /* set color converstion skipped by xiaolin,
++ * to be done in libci */
++ if (error != CI_STATUS_SUCCESS) {
++ printk(KERN_ERR "mrstisp: error set"
++ " color conversion\n");
++ return error;
++ }
++ }
++ }
++
++ /* af setting */
++ if (sys_conf->isp_cfg.flags.af)
++ ci_isp_set_auto_focus(&sys_conf->isp_cfg.af_cfg);
++ else
++ ci_isp_set_auto_focus(NULL);
++
++ /* filter */
++ mrst_isp_init_mrv_ispfilter(sys_conf, true);
++
++ /* cac */
++ mrst_isp_init_mrvisp_cac(sys_conf, true);
++ }
++
++ /*
++ * disable color processing for now (will be set under user control
++ * in the main loop)
++ */
++ ci_isp_col_set_color_processing(NULL);
++
++ /* configure image effects */
++ mrst_isp_init_mrv_image_effects(sys_conf, true);
++
++ /* configure lens shading correction */
++ if (strcmp(isi_config->name, "s5k4e1") == 0
++ && (isi_config->res == SENSOR_RES_720P
++ || isi_config->res == SENSOR_RES_QXGA_PLUS)) {
++ dprintk(1, "enabling lsc for kmot 720p and qsxga\n");
++ mrst_isp_init_mrvisp_lensshade(sys_conf, true);
++ } else
++ mrst_isp_init_mrvisp_lensshade(sys_conf,
++ sys_conf->isp_cfg.flags.lsc);
++
++ /* configure bad pixel detection/correction */
++ mrst_isp_init_mrvisp_badpixel(sys_conf, true);
++
++ DBG_leaving;
++ return CI_STATUS_SUCCESS;
++}
++
++int ci_jpe_encode(struct mrst_isp_device *intel,
++ enum ci_isp_conf_update_time update_time,
++ enum ci_isp_jpe_enc_mode mrv_jpe_encMode)
++{
++ u32 mipi_data_id = 1;
++ struct isp_register *mrv_reg =
++ (struct isp_register *) MEM_MRV_REG_BASE;
++
++ ci_isp_jpe_prep_enc(mrv_jpe_encMode);
++
++ if (to_sensor_config(intel->sensor_curr)->mipi_mode) {
++ ci_isp_start(1, update_time);
++ v4l2_subdev_call(intel->sensor_curr, video, s_stream, 1);
++ if (mipi_flag)
++ while (mipi_data_id)
++ mipi_data_id =
++ REG_READ_EX(mrv_reg->mipi_cur_data_id);
++ mipi_flag = 0;
++
++ } else
++ ci_isp_start(1, update_time);
++
++ return ci_isp_jpe_wait_for_encode_done(intel);
++}
++
++/* capture one frame */
++u32 ci_jpe_capture(struct mrst_isp_device *isp,
++ enum ci_isp_conf_update_time update_time)
++{
++ int retval = CI_STATUS_SUCCESS;
++
++ /* generate header */
++ retval = ci_isp_jpe_generate_header(isp, MRV_JPE_HEADER_MODE_JFIF);
++ if (retval != CI_STATUS_SUCCESS)
++ return 0;
++
++ /* now encode JPEG */
++ retval = ci_jpe_encode(isp, update_time, CI_ISP_JPE_SINGLE_SHOT);
++ if (retval != CI_STATUS_SUCCESS)
++ return 0;
++
++ /* return ci_isp_mif_get_byte_cnt(); */
++ return 0;
++}
++#ifdef ISP_CLOCK_GATING
++static void prevent_sleep_func(unsigned long data)
++{
++ struct prevent_isp_sleep_struct * s = (struct prevent_isp_sleep_struct *)data;
++ u32 reg1, reg2, reg3;
++ bool btimerdel = false;
++ struct isp_register *mrv_reg = (struct isp_register *) s->base;
++ s->timeout--;
++ if (s->timeout <= 0) {
++ s->timeout = 0x3FFFFFFF; /*didnt' use the timeout method right now */
++ }
++
++ if (s->mipi) {
++ reg1 = REG_READ_EX(mrv_reg->mipi_cur_data_id);/*MIPI*/
++ reg2 = REG_READ_EX(mrv_reg->isp_ris); /*ISP*/
++ reg3 = REG_READ_EX(mrv_reg->mi_ris); /*MI*/
++ REG_READ_EX(mrv_reg->c_proc_brightness); /*CPROC*/
++ REG_READ_EX(mrv_reg->img_eff_ctrl); /*IE*/
++ REG_READ_EX(mrv_reg->mrsz_ctrl); /*MRSZ*/
++ REG_READ_EX(mrv_reg->srsz_ctrl); /*SRSZ*/
++ REG_READ_EX(mrv_reg->super_imp_ctrl); /*SI*/
++ REG_READ_EX(mrv_reg->vi_ccl); /*CCL*/
++ REG_READ_EX(mrv_reg->vi_iccl); /*ICCL*/
++ REG_READ_EX(mrv_reg->jpe_encode); /*JPEG*/
++ if ((0 >= s->timeout) || (prevent_sleep_flag == 0)) {
++ btimerdel = true;
++ pr_err("JW:MIPI reg1:%x,reg2:%x,reg3:%x, timeout is 0x%x, prevent_sleep=%d\n",
++ reg1, reg2, reg3, s->timeout, prevent_sleep_flag);
++ }
++ } else {
++ /*try to keep ISP alive through polling registers, page 141, figure 55*/
++ reg1 = REG_READ_EX(mrv_reg->mipi_cur_data_id); /*MIPI*/
++ reg2 = REG_READ_EX(mrv_reg->isp_ris); /*ISP*/
++ reg3 = REG_READ_EX(mrv_reg->mi_ris); /*MI*/
++ REG_READ_EX(mrv_reg->c_proc_brightness); /*CPROC*/
++ REG_READ_EX(mrv_reg->img_eff_ctrl); /*IE*/
++ REG_READ_EX(mrv_reg->mrsz_ctrl); /*MRSZ*/
++ REG_READ_EX(mrv_reg->srsz_ctrl); /*SRSZ*/
++ REG_READ_EX(mrv_reg->super_imp_ctrl); /*SI*/
++ REG_READ_EX(mrv_reg->vi_ccl); /*CCL*/
++ REG_READ_EX(mrv_reg->vi_iccl); /*ICCL*/
++ REG_READ_EX(mrv_reg->jpe_encode); /*JPEG*/
++ if ((0 >= s->timeout) || (prevent_sleep_flag == 0)) {
++ btimerdel = true;
++ pr_err("JW: Parallel reg1:%x,reg2:%x,reg3:%x, timeout is 0x%x, prevent_sleep=%d\n",
++ reg1, reg2, reg3, s->timeout, prevent_sleep_flag);
++ }
++ }
++ if (btimerdel) {
++ del_timer(&prevent_sleep_timer);
++ btimerexist = false;
++ pr_err("JW:del timer in timer function,timeout=0x%x,prevent_sleep=0x%x\n",
++ s->timeout, prevent_sleep_flag);
++ } else {
++ add_timer(&prevent_sleep_timer);
++ btimerexist = true;
++ }
++}
++
++static void setup_prevent_sleep_timer(struct mrst_isp_device *isp, struct isp_register *mrv_reg)
++{
++ /*for VGA camera, its FPS, at worst case, is ~5, so, the ISP may have a 200ms clock gating.
++ * assert the prevent flag in ISR will make the ISP clock gating within low FPS
++ */
++ if ((prevent_sleep_flag) && (btimerexist == false)) {
++ prevent_isp_sleep.base = (u32) mrv_reg;
++ pr_info("JW: add timer to polling\n");
++ prevent_isp_sleep.mipi = to_sensor_config(isp->sensor_curr)->mipi_mode;
++ prevent_isp_sleep.timeout = 0x3FFFFFFF; /*infinate loop */
++
++ prevent_sleep_timer.expires = jiffies + msecs_to_jiffies(20);
++ prevent_sleep_timer.data = (unsigned long) &prevent_isp_sleep;
++ mod_timer(&prevent_sleep_timer, prevent_sleep_timer.expires);
++ btimerexist = true;
++ }
++ return;
++}
++#endif
++
++static int mrst_ci_capture(struct mrst_isp_device *isp)
++{
++ u32 bufbase;
++ u32 mipi_data_id = 1;
++ struct videobuf_buffer *vb;
++ struct isp_register *mrv_reg =
++ (struct isp_register *) MEM_MRV_REG_BASE;
++
++ bufbase = videobuf_to_dma_contig(isp->active);
++ mrst_isp_update_marvinvfaddr(isp, bufbase, CI_ISP_CFG_UPDATE_IMMEDIATE);
++ ci_isp_mif_reset_offsets(CI_ISP_CFG_UPDATE_IMMEDIATE);
++
++ ci_isp_reset_interrupt_status();
++ mrst_isp_enable_interrupt(isp);
++
++ if (isp->pixelformat == V4L2_PIX_FMT_JPEG) {
++ mrst_isp_disable_interrupt(isp);
++ ci_isp_jpe_init_ex(isp->bufwidth, isp->bufheight,
++ isp->sys_conf.isp_cfg.jpeg_enc_ratio,
++ true);
++ ci_jpe_capture(isp, CI_ISP_CFG_UPDATE_FRAME_SYNC);
++
++ vb = isp->active;
++ vb->size = ci_isp_mif_get_byte_cnt();
++ vb->state = VIDEOBUF_DONE;
++ do_gettimeofday(&vb->ts);
++ vb->field_count++;
++ wake_up(&vb->done);
++ isp->active = NULL;
++
++ dprintk(2, "countcount = %lx", vb->size);
++ } else if (isp->pixelformat == INTEL_PIX_FMT_RAW08
++ || isp->pixelformat == INTEL_PIX_FMT_RAW10
++ || isp->pixelformat == INTEL_PIX_FMT_RAW12) {
++ mrst_isp_disable_interrupt(isp);
++ ci_isp_start(1, CI_ISP_CFG_UPDATE_FRAME_SYNC);
++ ci_isp_wait_for_frame_end(isp);
++
++ /* update captured frame status */
++ vb = isp->active;
++ /* vb->size = ci_isp_mif_get_byte_cnt(); */
++ vb->state = VIDEOBUF_DONE;
++ do_gettimeofday(&vb->ts);
++ vb->field_count++;
++ wake_up(&vb->done);
++ isp->active = NULL;
++ /* ci_isp_reg_dump_all(); */
++ dprintk(3, "captured index = %d", vb->i);
++ } else if (to_sensor_config(isp->sensor_curr)->mipi_mode) {
++ ci_isp_start(0, CI_ISP_CFG_UPDATE_IMMEDIATE);
++#ifdef ISP_CLOCK_GATING
++ if (mrst_platform_id() == MRST_PLATFORM_AAVA_SC)
++ setup_prevent_sleep_timer(isp, mrv_reg);
++#endif
++ if (mipi_flag) {
++ v4l2_subdev_call(isp->sensor_curr, video, s_stream, 1);
++ if (mrst_platform_id() != MRST_PLATFORM_AAVA_SC) {
++ while (mipi_data_id) {
++ mipi_data_id =
++ REG_READ_EX(mrv_reg->mipi_cur_data_id);
++ dprintk(5, "mipi_cur_data_id = %x",
++ mipi_data_id);
++ }
++ }
++ mipi_flag = 0;
++ }
++ } else {
++ ci_isp_start(0, CI_ISP_CFG_UPDATE_FRAME_SYNC);
++#ifdef ISP_CLOCK_GATING
++ if (mrst_platform_id() == MRST_PLATFORM_AAVA_SC)
++ setup_prevent_sleep_timer(isp, mrv_reg);
++#endif
++ }
++
++ return 0;
++}
++
++static int buffer_setup(struct videobuf_queue *vq, unsigned int *count,
++ unsigned int *size)
++{
++ struct mrst_isp_fh *fh = vq->priv_data;
++ struct mrst_isp_device *isp = fh->dev;
++
++ u32 w = isp->bufwidth;
++ u32 h = isp->bufheight;
++ u32 depth = isp->depth;
++ u32 fourcc = isp->pixelformat;
++
++ if (fourcc == V4L2_PIX_FMT_JPEG) {
++ *size = PAGE_ALIGN((isp->mb1_size
++ - 640*480*2)/(*count)) - PAGE_SIZE;
++ /* *size = PAGE_ALIGN(2 * 1024 * 1024); */
++ } else if (fourcc == INTEL_PIX_FMT_RAW08
++ || fourcc == INTEL_PIX_FMT_RAW10
++ || fourcc == INTEL_PIX_FMT_RAW12) {
++ *size = (w * h * depth)/8;
++ } else {
++ *size = (w * h * depth)/8;
++ }
++
++ isp->frame_size = *size;
++ isp->num_frames = *count;
++
++ if (0 == *count)
++ *count = 3;
++
++ while (*size * *count > isp->mb1_size)
++ (*count)--;
++
++ dprintk(1, "count=%d, size=%d", *count, *size);
++ return 0;
++}
++
++static void free_buffer(struct videobuf_queue *vq, struct mrst_isp_buffer *buf)
++{
++ struct videobuf_buffer *vb = &buf->vb;
++
++ dprintk(1, "(vb=0x%p) baddr = 0x%08lx bsize = %d", vb,
++ vb->baddr, vb->bsize);
++
++ videobuf_dma_contig_free(vq, vb);
++
++ buf->vb.state = VIDEOBUF_NEEDS_INIT;
++ dprintk(1, "free_buffer: freed");
++}
++
++static int buffer_prepare(struct videobuf_queue *vq,
++ struct videobuf_buffer *vb, enum v4l2_field field)
++{
++ struct mrst_isp_fh *fh = vq->priv_data;
++ struct mrst_isp_device *isp = fh->dev;
++ struct mrst_isp_buffer *buf = container_of(vb, struct mrst_isp_buffer,
++ vb);
++ int ret;
++
++ if (vb->width != isp->bufwidth || vb->height != isp->bufheight
++ || vb->field != field) {
++ /* buf->fmt = isp->pixelformat; */
++ vb->width = isp->bufwidth;
++ vb->height = isp->bufheight;
++ vb->field = field;
++ vb->state = VIDEOBUF_NEEDS_INIT;
++ }
++
++ vb->size = isp->frame_size;
++
++ if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
++ ret = videobuf_iolock(vq, vb, NULL);
++ if (ret)
++ goto fail;
++ vb->state = VIDEOBUF_PREPARED;
++ }
++
++ return 0;
++
++fail:
++ printk(KERN_ERR "mrstisp: error calling videobuf_iolock");
++ free_buffer(vq, buf);
++ return ret;
++}
++
++static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
++{
++ struct mrst_isp_fh *fh = vq->priv_data;
++ struct mrst_isp_device *isp = fh->dev;
++ u32 bufbase;
++
++ vb->state = VIDEOBUF_QUEUED;
++ dprintk(1, "buffer %d in buffer querue", vb->i);
++ if (isp->stopflag) {
++ list_add_tail(&vb->queue, &isp->capture);
++ if (isp->active) {
++ /* dprintk(1, "AAAAAAAAAA in flag condition"); */
++ /* isp->active->state = VIDEOBUF_ACTIVE; */
++ /* mrst_isp_to_do_mblk_line = 1; */
++ bufbase = videobuf_to_dma_contig(vb);
++ mrst_isp_update_marvinvfaddr(isp, bufbase, 0);
++ /* mrst_isp_enable_interrupt(isp); */
++ } else {
++ isp->active = vb;
++ mrst_isp_enable_interrupt(isp);
++ /*
++ dprintk(1, "xxxxxxxxx in flag condition");
++ isp->active->state = VIDEOBUF_ACTIVE;
++ mrst_isp_to_do_mblk_line = 1;
++ bufbase = videobuf_to_dma_contig(isp->active);
++ mrst_isp_update_marvinvfaddr(isp, bufbase,
++ CI_ISP_CFG_UPDATE_FRAME_SYNC);
++ */
++ }
++ isp->stopflag = 0;
++ } else if (!isp->active) {
++ dprintk(1, "no active queue");
++ isp->active = vb;
++ isp->active->state = VIDEOBUF_ACTIVE;
++ mrst_isp_to_do_mblk_line = 1;
++ mrst_ci_capture(isp);
++ } else {
++ dprintk(1, "capture to active queue");
++ list_add_tail(&vb->queue, &isp->capture);
++ }
++
++ return;
++}
++
++static void buffer_release(struct videobuf_queue *vq,
++ struct videobuf_buffer *vb)
++{
++ struct mrst_isp_buffer *buf = container_of(vb,
++ struct mrst_isp_buffer, vb);
++ DBG_entering;
++ free_buffer(vq, buf);
++ DBG_leaving;
++}
++
++static struct videobuf_queue_ops mrst_isp_videobuf_qops = {
++ .buf_setup = buffer_setup,
++ .buf_prepare = buffer_prepare,
++ .buf_queue = buffer_queue,
++ .buf_release = buffer_release,
++};
++
++static int mrst_isp_open(struct file *file)
++{
++ struct video_device *vdev = video_devdata(file);
++ struct mrst_isp_device *isp = video_get_drvdata(vdev);
++ struct mrst_isp_fh *fh = NULL;
++ struct v4l2_format sensor_format;
++ int ret;
++
++ DBG_entering;
++
++ if (!isp) {
++ printk(KERN_ERR "null in mrst_isp_open\n");
++ return -ENODEV;
++ }
++
++ dprintk(2, "open = %d", isp->open);
++ mutex_lock(&isp->mutex);
++ if (isp->open == 0) {
++ if (isp->sensor_soc) {
++ dprintk(0, "cur senfor soc");
++ isp->sensor_curr = isp->sensor_soc;
++ } else {
++ dprintk(0, "cur sensor raw");
++ isp->sensor_curr = isp->sensor_raw;
++ }
++ }
++ ++isp->open;
++
++ ret = v4l2_subdev_call(isp->sensor_curr, video, g_fmt,
++ &sensor_format);
++ if (ret) {
++ printk(KERN_ERR "can't get current pix from sensor!\n");
++ ret = -EINVAL;
++ goto exit_unlock;
++ }
++
++ dprintk(1, "current sensor format: %d x %d",
++ sensor_format.fmt.pix.width,
++ sensor_format.fmt.pix.height);
++
++ fh = kzalloc(sizeof(*fh), GFP_KERNEL);
++ if (NULL == fh) {
++ printk(KERN_ERR "no mem for fh \n");
++ ret = -ENOMEM;
++ goto exit_unlock;
++ }
++
++ file->private_data = fh;
++ fh->dev = isp;
++
++ videobuf_queue_dma_contig_init(&fh->vb_q, &mrst_isp_videobuf_qops,
++ vdev->parent, &isp->lock,
++ V4L2_BUF_TYPE_VIDEO_CAPTURE,
++ V4L2_FIELD_NONE,
++ sizeof(struct mrst_isp_buffer), fh);
++#ifdef ISP_CLOCK_GATING
++ if ((isp->open == 1) && (mrst_platform_id() == MRST_PLATFORM_AAVA_SC))
++ init_timer(&prevent_sleep_timer);
++#endif
++
++exit_unlock:
++ mutex_unlock(&isp->mutex);
++ DBG_leaving;
++ return 0;
++}
++
++static int mrst_isp_close(struct file *file)
++{
++ struct video_device *dev = video_devdata(file);
++ struct mrst_isp_device *isp = video_get_drvdata(dev);
++ struct mrst_isp_fh *fh = file->private_data;
++ unsigned long flags;
++
++ DBG_entering;
++ mutex_lock(&isp->mutex);
++ --isp->open;
++ dprintk(2, "close = %d", isp->open);
++ if (isp->open == 0) {
++#ifdef ISP_CLOCK_GATING
++ if (mrst_platform_id() == MRST_PLATFORM_AAVA_SC) {
++ prevent_sleep_flag = 0;
++ del_timer_sync(&prevent_sleep_timer);
++ btimerexist = false;
++ }
++#endif
++ if (isp->streaming == 1) {
++ videobuf_streamoff(&fh->vb_q);
++ isp->streaming = 0;
++ isp->buffer_required = 0;
++ isp->stopflag = 0;
++
++ spin_lock_irqsave(&isp->lock, flags);
++ INIT_LIST_HEAD(&isp->capture);
++ isp->active = NULL;
++ isp->next = NULL;
++ isp->sys_conf.isp_hal_enable = 0;
++ isp->sys_conf.jpg_review_enable = 0;
++ spin_unlock_irqrestore(&isp->lock, flags);
++
++ ci_isp_stop(CI_ISP_CFG_UPDATE_FRAME_SYNC);
++ v4l2_subdev_call(isp->sensor_curr, video, s_stream, 0);
++ isp->sensor_curr = NULL;
++ }
++ if (isp->sensor_soc)
++ v4l2_subdev_call(isp->sensor_soc, core, s_gpio, 1);
++ if (isp->sensor_raw)
++ v4l2_subdev_call(isp->sensor_raw, core, s_gpio, 1);
++ }
++
++ kfree(file->private_data);
++
++ mutex_unlock(&isp->mutex);
++
++ /*XXX zheng*/
++ if (isp->open == 0)
++ frame_cnt = 0;
++
++ DBG_leaving;
++ return 0;
++}
++
++static ssize_t mrst_isp_read(struct file *file, char __user *buf,
++ size_t count, loff_t *ppos)
++{
++ return 0;
++}
++
++static void mrst_isp_videobuf_vm_open(struct vm_area_struct *vma)
++{
++ struct videobuf_mapping *map = vma->vm_private_data;
++
++ dprintk(2, "vm_open %p [count=%u,vma=%08lx-%08lx]\n",
++ map, map->count, vma->vm_start, vma->vm_end);
++
++ map->count++;
++}
++
++static void mrst_isp_videobuf_vm_close(struct vm_area_struct *vma)
++{
++ struct videobuf_mapping *map = vma->vm_private_data;
++ struct videobuf_queue *q = map->q;
++ int i;
++
++ dprintk(2, "vm_close %p [count=%u,vma=%08lx-%08lx]\n",
++ map, map->count, vma->vm_start, vma->vm_end);
++
++ map->count--;
++ if (0 == map->count) {
++ struct videobuf_dma_contig_memory *mem;
++
++ dprintk(2, "munmap %p q=%p\n", map, q);
++ mutex_lock(&q->vb_lock);
++
++ /* We need first to cancel streams, before unmapping */
++ if (q->streaming)
++ videobuf_queue_cancel(q);
++
++ for (i = 0; i < VIDEO_MAX_FRAME; i++) {
++ if (NULL == q->bufs[i])
++ continue;
++
++ if (q->bufs[i]->map != map)
++ continue;
++
++ mem = q->bufs[i]->priv;
++ if (mem) {
++ /* This callback is called only if kernel has
++ allocated memory and this memory is mmapped.
++ In this case, memory should be freed,
++ in order to do memory unmap.
++ */
++
++ MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
++
++ /* vfree is not atomic - can't be
++ called with IRQ's disabled
++ */
++ dprintk(2, "buf[%d] freeing %p\n",
++ i, mem->vaddr);
++
++ /*
++ dma_free_coherent(q->dev, mem->size,
++ mem->vaddr, mem->dma_handle);
++ */
++ mem->vaddr = NULL;
++ }
++
++ q->bufs[i]->map = NULL;
++ q->bufs[i]->baddr = 0;
++ }
++
++ kfree(map);
++
++ mutex_unlock(&q->vb_lock);
++ }
++}
++
++static struct vm_operations_struct mrst_isp_videobuf_vm_ops = {
++ .open = mrst_isp_videobuf_vm_open,
++ .close = mrst_isp_videobuf_vm_close,
++};
++
++static int mrst_isp_mmap_mapper(struct videobuf_queue *q,
++ struct vm_area_struct *vma)
++{
++ struct videobuf_dma_contig_memory *mem;
++ struct videobuf_mapping *map;
++ unsigned int first;
++ int retval;
++ unsigned long size, offset = vma->vm_pgoff << PAGE_SHIFT;
++
++ struct mrst_isp_fh *fh = q->priv_data;
++ struct mrst_isp_device *isp = fh->dev;
++
++ DBG_entering;
++
++ if (!(vma->vm_flags & VM_WRITE) || !(vma->vm_flags & VM_SHARED))
++ return -EINVAL;
++
++ /* look for first buffer to map */
++ for (first = 0; first < VIDEO_MAX_FRAME; first++) {
++ if (!q->bufs[first])
++ continue;
++
++ if (V4L2_MEMORY_MMAP != q->bufs[first]->memory)
++ continue;
++ if (q->bufs[first]->boff == offset) {
++ dprintk(1, "buff id %d is mapped", first);
++ break;
++ }
++ }
++ if (VIDEO_MAX_FRAME == first) {
++ eprintk("invalid user space offset [offset=0x%lx]", offset);
++ return -EINVAL;
++ }
++
++ /* create mapping + update buffer list */
++ map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
++ if (!map)
++ return -ENOMEM;
++
++ q->bufs[first]->map = map;
++ map->start = vma->vm_start;
++ map->end = vma->vm_end;
++ map->q = q;
++
++ q->bufs[first]->baddr = vma->vm_start;
++
++ mem = q->bufs[first]->priv;
++ BUG_ON(!mem);
++ MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
++
++ mem->size = PAGE_ALIGN(q->bufs[first]->bsize);
++ mem->dma_handle = isp->mb1 + (mem->size * first);
++ mem->vaddr = (void *)0x1;
++ /*
++ mem->vaddr = dma_alloc_coherent(q->dev, mem->size,
++ &mem->dma_handle, GFP_KERNEL);
++ */
++ if (mem->size > isp->mb1_size) {
++ eprintk("to big size, can not be mmapped");
++ return -EINVAL;
++ }
++
++ /* Try to remap memory */
++
++ size = vma->vm_end - vma->vm_start;
++ size = (size < mem->size) ? size : mem->size;
++
++ dprintk(1, "vm_end - vm_start = %ld, mem-size = %ld", size, mem->size);
++
++ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
++ retval = remap_pfn_range(vma, vma->vm_start,
++ mem->dma_handle >> PAGE_SHIFT,
++ size, vma->vm_page_prot);
++ if (retval) {
++ eprintk("mmap: remap failed with error %d. ", retval);
++ goto error;
++ }
++
++ vma->vm_ops = &mrst_isp_videobuf_vm_ops;
++ vma->vm_flags |= VM_DONTEXPAND;
++ vma->vm_private_data = map;
++
++ dprintk(1, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
++ map, q, vma->vm_start, vma->vm_end,
++ (long int) q->bufs[first]->bsize,
++ vma->vm_pgoff, first);
++
++ mrst_isp_videobuf_vm_open(vma);
++
++ return 0;
++
++error:
++ kfree(map);
++ return -ENOMEM;
++}
++int mrst_isp_videobuf_mmap_mapper(struct videobuf_queue *q,
++ struct vm_area_struct *vma)
++{
++ MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
++
++ mutex_lock(&q->vb_lock);
++ mrst_isp_mmap_mapper(q, vma);
++ mutex_unlock(&q->vb_lock);
++
++ return 0;
++}
++static int mrst_isp_mmap(struct file *file, struct vm_area_struct *vma)
++{
++ int ret;
++ int map_by_myself;
++ struct mrst_isp_fh *fh;
++ struct video_device *dev = video_devdata(file);
++ struct mrst_isp_device *isp = video_get_drvdata(dev);
++ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
++ unsigned long size = vma->vm_end-vma->vm_start;
++ unsigned long page;
++
++ DBG_entering;
++
++ /* temporarily put here */
++ if (isp->open > 1) {
++ printk(KERN_ERR "ISP already opened...");
++ return -EINVAL;
++ }
++
++ fh = file->private_data;
++
++ if (!(vma->vm_flags & (VM_WRITE | VM_READ))
++ || !(vma->vm_flags & VM_SHARED)) {
++ printk(KERN_ERR "mrstisp: wrong vma flag");
++ return -EINVAL;
++ }
++
++ /* to check whether if it is ISP bar 0 map */
++ if (offset == isp->mb0_size + isp->mb1_size) {
++ dprintk(1, "---- map bar0 ----");
++ page = isp->mb0;
++ map_by_myself = 1;
++ } else if (offset == 0 && size == isp->mb1_size) {
++ dprintk(1, "---- map bar1 ----");
++ page = isp->mb1;
++ map_by_myself = 1;
++ } else if (isp->pixelformat == V4L2_PIX_FMT_JPEG
++ && isp->sys_conf.jpg_review_enable == 1
++ && offset == isp->sys_conf.jpg_review.offset) {
++ dprintk(1, "---- map jpeg review buffer----");
++ page = isp->mb1 + isp->sys_conf.jpg_review.offset;
++ map_by_myself = 1;
++ } else {
++ dprintk(1, "----map one certain buffer----");
++ map_by_myself = 0;
++ }
++
++ if (map_by_myself) {
++ vma->vm_flags |= VM_IO;
++ vma->vm_flags |= VM_RESERVED; /* avoid to swap out this VMA */
++
++ page = page >> PAGE_SHIFT;
++
++ if (remap_pfn_range(vma, vma->vm_start, page, size,
++ PAGE_SHARED)) {
++ printk(KERN_ERR "fail to put MMAP buffer to user space\n");
++ return -EAGAIN;
++ }
++
++ return 0;
++ }
++
++ if (size > isp->num_frames * PAGE_ALIGN(isp->frame_size)) {
++ eprintk("length is larger than num * size");
++ return -EINVAL;
++ }
++
++ ret = mrst_isp_videobuf_mmap_mapper(&fh->vb_q, vma);
++
++ dprintk(1, "vma start=0x%08lx, size=%ld, offset=%ld ret=%d",
++ (unsigned long)vma->vm_start,
++ (unsigned long)vma->vm_end-(unsigned long)vma->vm_start,
++ (unsigned long)offset, ret);
++
++ return ret;
++}
++
++static int mrst_isp_g_fmt_cap(struct file *file, void *priv,
++ struct v4l2_format *f)
++{
++ struct video_device *dev = video_devdata(file);
++ struct mrst_isp_device *isp = video_get_drvdata(dev);
++ int ret;
++
++ WARN_ON(priv != file->private_data);
++
++ DBG_entering;
++
++#ifdef ISP_CLOCK_GATING
++ if (mrst_platform_id() == MRST_PLATFORM_AAVA_SC)
++ prevent_sleep_flag = 1;
++#endif
++ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
++ f->fmt.pix.width = isp->bufwidth;
++ f->fmt.pix.height = isp->bufheight;
++ f->fmt.pix.pixelformat = isp->pixelformat;
++ f->fmt.pix.bytesperline = (f->fmt.pix.width * isp->depth) >> 3;
++ f->fmt.pix.sizeimage = f->fmt.pix.height
++ * f->fmt.pix.bytesperline;
++ ret = 0;
++ } else {
++ ret = -EINVAL;
++ }
++
++ dprintk(1, "get fmt %d x %d ", f->fmt.pix.width, f->fmt.pix.height);
++ DBG_leaving;
++ return ret;
++}
++
++static struct intel_fmt *fmt_by_fourcc(unsigned int fourcc)
++{
++ unsigned int i;
++
++ for (i = 0; i < NUM_FORMATS; i++)
++ if (fmts[i].fourcc == fourcc)
++ return fmts+i;
++ return NULL;
++}
++
++static int mrst_isp_try_fmt_cap(struct file *file, void *priv,
++ struct v4l2_format *f)
++{
++ struct video_device *dev = video_devdata(file);
++ struct mrst_isp_device *isp = video_get_drvdata(dev);
++
++ struct intel_fmt *fmt;
++ int w, h;
++ int ret;
++
++ WARN_ON(priv != file->private_data);
++
++ DBG_entering;
++
++ mutex_lock(&isp->mutex);
++
++ fmt = fmt_by_fourcc(f->fmt.pix.pixelformat);
++ if (NULL == fmt && f->fmt.pix.pixelformat != V4L2_PIX_FMT_MPEG) {
++ printk(KERN_ERR "mrstisp: fmt not found\n");
++ ret = -EINVAL;
++ goto exit_unlock;
++ }
++
++ w = f->fmt.pix.width;
++ h = f->fmt.pix.height;
++
++ dprintk(1, "sensor name %s: before w = %d, h = %d",
++ isp->sensor_curr->name, w, h);
++
++ ret = v4l2_subdev_call(isp->sensor_curr, video, try_fmt, f);
++ if (ret)
++ goto exit_unlock;
++
++
++ w = f->fmt.pix.width;
++ h = f->fmt.pix.height;
++
++ if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_RGB565 ||
++ f->fmt.pix.pixelformat == V4L2_PIX_FMT_BGR32) {
++ if (w < INTEL_MIN_WIDTH)
++ w = INTEL_MIN_WIDTH;
++ if (w > INTEL_MAX_WIDTH)
++ w = INTEL_MAX_WIDTH;
++ if (h < INTEL_MIN_HEIGHT)
++ h = INTEL_MIN_HEIGHT;
++ if (h > INTEL_MAX_HEIGHT)
++ h = INTEL_MAX_HEIGHT;
++ f->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
++ } else {
++ if (w < INTEL_MIN_WIDTH)
++ w = INTEL_MIN_WIDTH;
++ if (w > INTEL_MAX_WIDTH_MP)
++ w = INTEL_MAX_WIDTH_MP;
++ if (h < INTEL_MIN_HEIGHT)
++ h = INTEL_MIN_HEIGHT;
++ if (h > INTEL_MAX_HEIGHT_MP)
++ h = INTEL_MAX_HEIGHT_MP;
++ f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
++ }
++
++ f->fmt.pix.width = w;
++ f->fmt.pix.height = h;
++
++ f->fmt.pix.field = V4L2_FIELD_NONE;
++ f->fmt.pix.bytesperline = (w * h)/8;
++ if (fmt)
++ f->fmt.pix.sizeimage = (w * h * fmt->depth)/8;
++ if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_JPEG)
++ f->fmt.pix.colorspace = V4L2_COLORSPACE_JPEG;
++ f->fmt.pix.priv = 0;
++
++ dprintk(3, "after w = %d, h = %d", w, h);
++ ret = 0;
++
++exit_unlock:
++ mutex_unlock(&isp->mutex);
++
++ DBG_leaving;
++ return ret;
++}
++
++static int mrst_isp_s_fmt_cap(struct file *file, void *priv,
++ struct v4l2_format *f)
++{
++ struct video_device *dev = video_devdata(file);
++ struct mrst_isp_device *isp = video_get_drvdata(dev);
++ struct intel_fmt *fmt;
++ int ret;
++ unsigned int width_o, height_o;
++ unsigned short width_sensor, height_sensor;
++ unsigned int w, h;
++
++ WARN_ON(priv != file->private_data);
++
++ DBG_entering;
++
++ mipi_flag = 1;
++
++ w = f->fmt.pix.width;
++ h = f->fmt.pix.height;
++
++ if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_RGB565 ||
++ f->fmt.pix.pixelformat == V4L2_PIX_FMT_BGR32) {
++ if (w < INTEL_MIN_WIDTH)
++ w = INTEL_MIN_WIDTH;
++ if (w > INTEL_MAX_WIDTH)
++ w = INTEL_MAX_WIDTH;
++ if (h < INTEL_MIN_HEIGHT)
++ h = INTEL_MIN_HEIGHT;
++ if (h > INTEL_MAX_HEIGHT)
++ h = INTEL_MAX_HEIGHT;
++ f->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
++ } else {
++ if (w < INTEL_MIN_WIDTH)
++ w = INTEL_MIN_WIDTH;
++ if (w > INTEL_MAX_WIDTH_MP)
++ w = INTEL_MAX_WIDTH_MP;
++ if (h < INTEL_MIN_HEIGHT)
++ h = INTEL_MIN_HEIGHT;
++ if (h > INTEL_MAX_HEIGHT_MP)
++ h = INTEL_MAX_HEIGHT_MP;
++ f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
++ }
++
++ f->fmt.pix.width = w;
++ f->fmt.pix.height = h;
++
++ width_o = f->fmt.pix.width;
++ height_o = f->fmt.pix.height;
++
++ (void)ci_sensor_res2size(to_sensor_config(isp->sensor_curr)->res,
++ &width_sensor, &height_sensor);
++
++ ret = mrst_isp_try_fmt_cap(file, priv, f);
++ if (0 != ret) {
++ printk(KERN_ERR "mrstisp: set format failed\n");
++ return ret;
++ }
++
++ /* set fmt for only sensor */
++ if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_MPEG) {
++ ret = v4l2_subdev_call(isp->sensor_curr, video, s_fmt, f);
++ dprintk(1, "------------set fmt only for sensor (%d x %d)",
++ f->fmt.pix.width, f->fmt.pix.height);
++ return ret;
++ }
++
++ if (isp->sys_conf.isp_hal_enable) {
++ /* set fmt for isp */
++ mutex_lock(&isp->mutex);
++ fmt = fmt_by_fourcc(f->fmt.pix.pixelformat);
++
++ isp->pixelformat = fmt->fourcc;
++ isp->depth = fmt->depth;
++
++ dprintk(1, "sensor (%d x %d)", width_sensor, height_sensor);
++ if (width_o < f->fmt.pix.width &&
++ height_o < f->fmt.pix.height) {
++ isp->bufwidth = width_o;
++ isp->bufheight = height_o;
++ } else if (width_sensor < f->fmt.pix.width &&
++ height_sensor < f->fmt.pix.height) {
++ isp->bufwidth = width_sensor;
++ isp->bufheight = height_sensor;
++ f->fmt.pix.width = width_sensor;
++ f->fmt.pix.height = height_sensor;
++ } else {
++ isp->bufwidth = f->fmt.pix.width;
++ isp->bufheight = f->fmt.pix.height;
++ }
++
++ /* FIXME
++ * check if buf res is larger than
++ * sensor real res(1304x980)
++ * if yes, down buf res to VGA
++ */
++ if (to_sensor_config(isp->sensor_curr)->res ==
++ SENSOR_RES_VGA_PLUS)
++ if (isp->bufwidth >= VGA_SIZE_H &&
++ isp->bufheight >= VGA_SIZE_V) {
++ isp->bufwidth = VGA_SIZE_H;
++ isp->bufheight = VGA_SIZE_V;
++ }
++
++ mutex_unlock(&isp->mutex);
++
++ dprintk(1, "----------set fmt only to isp: w %d, h%d, "
++ "fourcc: %lx", isp->bufwidth,
++ isp->bufheight, fmt->fourcc);
++ } else {
++
++ /* set fmt for both isp and sensor */
++ mutex_lock(&isp->mutex);
++ fmt = fmt_by_fourcc(f->fmt.pix.pixelformat);
++
++ isp->pixelformat = fmt->fourcc;
++ isp->depth = fmt->depth;
++ isp->bufwidth = width_o;
++ isp->bufheight = height_o;
++
++ mutex_unlock(&isp->mutex);
++
++ dprintk(1, "--------set fmt for isp : w%d, h%d, fourcc: %lx",
++ isp->bufwidth, isp->bufheight, fmt->fourcc);
++ dprintk(1, "--------set fmt for sesnro : w%d, h%d, fourcc: %lx",
++ f->fmt.pix.width, f->fmt.pix.height, fmt->fourcc);
++
++ ret = v4l2_subdev_call(isp->sensor_curr, video, s_fmt, f);
++ }
++
++ DBG_leaving;
++ return ret;
++}
++
++static int mrst_isp_enum_framesizes(struct file *file, void *priv,
++ struct v4l2_frmsizeenum *arg)
++{
++ struct video_device *dev = video_devdata(file);
++ struct mrst_isp_device *isp = video_get_drvdata(dev);
++ int ret;
++
++ DBG_entering;
++
++ WARN_ON(priv != file->private_data);
++
++ ret = v4l2_subdev_call(isp->sensor_curr, video, enum_framesizes, arg);
++
++ DBG_leaving;
++ return ret;
++}
++
++static int mrst_isp_enum_frameintervals(struct file *file, void *priv,
++ struct v4l2_frmivalenum *arg)
++{
++ struct video_device *dev = video_devdata(file);
++ struct mrst_isp_device *isp = video_get_drvdata(dev);
++ int ret;
++
++ DBG_entering;
++
++ WARN_ON(priv != file->private_data);
++
++ ret = v4l2_subdev_call(isp->sensor_curr, video, enum_frameintervals,
++ arg);
++ DBG_leaving;
++ return ret;
++}
++
++static int mrst_isp_queryctrl(struct file *file, void *priv,
++ struct v4l2_queryctrl *c)
++{
++ struct video_device *vdev = video_devdata(file);
++ struct mrst_isp_device *isp = video_get_drvdata(vdev);
++
++ WARN_ON(priv != file->private_data);
++
++ DBG_entering;
++
++ if (!v4l2_subdev_call(isp->sensor_curr, core, queryctrl, c))
++ return 0;
++ else if (!v4l2_subdev_call(isp->motor, core, queryctrl, c))
++ return 0;
++
++ /* No controls supported */
++ return -EINVAL;
++}
++
++static int mrst_isp_g_ctrl(struct file *file, void *priv,
++ struct v4l2_control *c)
++{
++ struct video_device *dev = video_devdata(file);
++ struct mrst_isp_device *isp = video_get_drvdata(dev);
++ int ret;
++
++ WARN_ON(priv != file->private_data);
++
++ DBG_entering;
++ if (c->id == V4L2_CID_FOCUS_ABSOLUTE) {
++ ret = v4l2_subdev_call(isp->motor, core, g_ctrl, c);
++ dprintk(2, "get focus from motor : %d", c->value);
++ return ret;
++ } else {
++ ret = v4l2_subdev_call(isp->sensor_curr, core, g_ctrl, c);
++ dprintk(2, "get other cotrol from senrsor : %d", c->value);
++ return ret;
++ }
++}
++
++static int mrst_isp_s_ctrl(struct file *file, void *fh, struct v4l2_control *c)
++{
++ struct video_device *dev = video_devdata(file);
++ struct mrst_isp_device *isp = video_get_drvdata(dev);
++
++ DBG_entering;
++
++ if (c->id == V4L2_CID_FOCUS_ABSOLUTE) {
++ dprintk(2, "setting focus %d to motor", c->value);
++ return v4l2_subdev_call(isp->motor, core, s_ctrl, c);
++ } else {
++ dprintk(2, "setting other ctrls, value = %d", c->value);
++ return v4l2_subdev_call(isp->sensor_curr, core, s_ctrl, c);
++ }
++}
++
++static int mrst_isp_index_to_camera(struct mrst_isp_device *isp, u32 index)
++{
++ int camera = MRST_CAMERA_NONE;
++
++ if (isp->sensor_soc && isp->sensor_raw) {
++ switch (index) {
++ case 0:
++ camera = isp->sensor_soc_index;
++ break;
++ case 1:
++ camera = isp->sensor_raw_index;
++ break;
++ }
++ } else if (isp->sensor_soc) {
++ switch (index) {
++ case 0:
++ camera = isp->sensor_soc_index;
++ break;
++ }
++ } else if (isp->sensor_raw) {
++ switch (index) {
++ case 0:
++ camera = isp->sensor_raw_index;
++ break;
++ }
++ }
++
++ return camera;
++}
++
++static int mrst_isp_enum_input(struct file *file, void *priv,
++ struct v4l2_input *i)
++{
++ struct video_device *vdev = video_devdata(file);
++ struct mrst_isp_device *isp = video_get_drvdata(vdev);
++ int camera;
++
++ DBG_entering;
++
++ WARN_ON(priv != file->private_data);
++
++ camera = mrst_isp_index_to_camera(isp, i->index);
++ if (MRST_CAMERA_NONE == camera)
++ return -EINVAL;
++
++ i->type = V4L2_INPUT_TYPE_CAMERA;
++ i->std = V4L2_STD_UNKNOWN;
++ strcpy(i->name, mrst_camera_table[camera].name);
++
++ DBG_leaving;
++ return 0;
++}
++static int mrst_isp_g_input(struct file *file, void *priv, unsigned int *i)
++{
++ struct video_device *vdev = video_devdata(file);
++ struct mrst_isp_device *isp = video_get_drvdata(vdev);
++
++ DBG_entering;
++
++ WARN_ON(priv != file->private_data);
++
++ if (isp->sensor_soc && isp->sensor_raw)
++ if (isp->sensor_curr == isp->sensor_soc)
++ *i = 0;
++ else
++ *i = 1;
++ else
++ *i = 0;
++
++ DBG_leaving;
++ return 0;
++}
++
++static int mrst_isp_s_input(struct file *file, void *priv, unsigned int i)
++{
++ struct video_device *vdev = video_devdata(file);
++ struct mrst_isp_device *isp = video_get_drvdata(vdev);
++
++ int camera;
++
++ DBG_entering;
++
++ if (isp->streaming) {
++ printk(KERN_WARNING "VIDIOC_S_INPUT error: ISP is streaming\n");
++ return -EBUSY;
++ }
++
++ camera = mrst_isp_index_to_camera(isp, i);
++ if (MRST_CAMERA_NONE == camera)
++ return -EINVAL;
++
++ if (mrst_camera_table[camera].type == MRST_CAMERA_SOC)
++ isp->sensor_curr = isp->sensor_soc;
++ else
++ isp->sensor_curr = isp->sensor_raw;
++
++ dprintk(1, "set sensor %s as input", isp->sensor_curr->name);
++
++ DBG_leaving;
++ return 0;
++}
++
++static int mrst_isp_g_ext_ctrls(struct file *file,
++ void *fh,
++ struct v4l2_ext_controls *c)
++{
++ struct video_device *dev = video_devdata(file);
++ struct mrst_isp_device *isp = video_get_drvdata(dev);
++
++ int ret = -EINVAL;
++
++ DBG_entering;
++
++ if (c->ctrl_class != V4L2_CTRL_CLASS_CAMERA) {
++ printk(KERN_ERR "Invalid control class\n");
++ return ret;
++ }
++
++ c->error_idx = 0;
++ if (isp->motor) {
++ ret = v4l2_subdev_call(isp->motor, core, g_ext_ctrls, c);
++ if (c->error_idx) {
++ printk(KERN_ERR "mrst: error call g_ext_ctrls\n");
++ return ret;
++ }
++ }
++
++ DBG_leaving;
++ return 0;
++}
++
++static int mrst_isp_s_ext_ctrls(struct file *file, void *fh,
++ struct v4l2_ext_controls *c)
++{
++ struct video_device *dev = video_devdata(file);
++ struct mrst_isp_device *isp = video_get_drvdata(dev);
++
++ int ret = -EINVAL;
++
++ DBG_entering;
++
++ if (c->ctrl_class != V4L2_CTRL_CLASS_CAMERA) {
++ printk(KERN_INFO "Invalid control class\n");
++ return ret;
++ }
++
++ c->error_idx = 0;
++ if (isp->motor) {
++ ret = v4l2_subdev_call(isp->motor, core, s_ext_ctrls, c);
++ if (c->error_idx) {
++ printk(KERN_ERR "mrst: error call s_ext_ctrls\n");
++ return ret;
++ }
++ }
++
++ DBG_leaving;
++ return 0;
++}
++
++static int mrst_isp_s_std(struct file *filp, void *priv, v4l2_std_id *a)
++{
++ DBG_entering;
++ DBG_leaving;
++ return 0;
++}
++
++static int mrst_isp_querycap(struct file *file, void *priv,
++ struct v4l2_capability *cap)
++{
++ struct video_device *dev = video_devdata(file);
++
++ DBG_entering;
++
++ strlcpy(cap->driver, DRIVER_NAME, sizeof(cap->driver));
++ strlcpy(cap->card, dev->name, sizeof(cap->card));
++
++ cap->version = INTEL_VERSION(0, 5, 0);
++ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
++
++ DBG_leaving;
++
++ return 0;
++}
++
++static int mrst_isp_cropcap(struct file *file, void *priv,
++ struct v4l2_cropcap *cap)
++{
++ struct video_device *dev = video_devdata(file);
++ struct mrst_isp_device *isp = video_get_drvdata(dev);
++
++ WARN_ON(priv != file->private_data);
++
++ DBG_entering;
++
++ if (cap->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
++ return -EINVAL;
++
++ cap->bounds.left = 0;
++ cap->bounds.top = 0;
++ cap->bounds.width = isp->bufwidth;
++ cap->bounds.height = isp->bufheight;
++ cap->defrect = cap->bounds;
++ cap->pixelaspect.numerator = 1;
++ cap->pixelaspect.denominator = 1;
++
++ DBG_leaving;
++
++ return 0;
++}
++
++static int mrst_isp_enum_fmt_cap(struct file *file, void *priv,
++ struct v4l2_fmtdesc *f)
++{
++ struct video_device *dev = video_devdata(file);
++ struct mrst_isp_device *isp = video_get_drvdata(dev);
++
++ unsigned int index;
++
++ DBG_entering;
++
++ index = f->index;
++
++ if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
++ return -EINVAL;
++ else {
++ if (isp->sensor_curr == isp->sensor_soc)
++ if (index >= 8)
++ return -EINVAL;
++ if (index >= sizeof(fmts) / sizeof(*fmts))
++ return -EINVAL;
++
++ f->index = index;
++ f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
++ strlcpy(f->description, fmts[index].name,
++ sizeof(f->description));
++ f->pixelformat = fmts[index].fourcc;
++ if (fmts[index].fourcc == V4L2_PIX_FMT_JPEG)
++ f->flags = V4L2_FMT_FLAG_COMPRESSED;
++ }
++
++ DBG_leaving;
++
++ return 0;
++
++}
++
++#define ALIGN4(x) ((((long)(x)) & 0x3) == 0)
++
++static int mrst_isp_reqbufs(struct file *file, void *priv,
++ struct v4l2_requestbuffers *req)
++{
++ int ret;
++ struct mrst_isp_fh *fh = file->private_data;
++ struct video_device *dev = video_devdata(file);
++ struct mrst_isp_device *isp = video_get_drvdata(dev);
++
++ WARN_ON(priv != file->private_data);
++
++ DBG_entering;
++ if (req->count == 0)
++ return 0;
++
++ /*
++ * if (req->count > 3)
++ req->count = 3;
++ */
++
++ if (req->memory != V4L2_MEMORY_MMAP) {
++ eprintk("wrong memory type");
++ return -EINVAL;
++ }
++ ret = videobuf_reqbufs(&fh->vb_q, req);
++ if (ret)
++ eprintk("err calling videobuf_reqbufs ret = %d", ret);
++
++ if (!ret)
++ isp->buffer_required = 1;
++
++ DBG_leaving;
++ return ret;
++}
++
++static int mrst_isp_querybuf(struct file *file, void *priv,
++ struct v4l2_buffer *buf)
++{
++ int ret;
++ struct mrst_isp_fh *fh = file->private_data;
++
++ WARN_ON(priv != file->private_data);
++
++ DBG_entering;
++ ret = videobuf_querybuf(&fh->vb_q, buf);
++
++ DBG_leaving;
++ return ret;
++}
++
++static int mrst_isp_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
++{
++ int ret;
++ struct mrst_isp_fh *fh = file->private_data;
++
++ WARN_ON(priv != file->private_data);
++
++ DBG_entering;
++ ret = videobuf_qbuf(&fh->vb_q, buf);
++ /* identify which video buffer was q-ed */
++ if (ret == 0)
++ fh->qbuf_flag |= (1<<buf->index);
++ dprintk(1, "q-ed index = %d", buf->index);
++
++ DBG_leaving;
++
++ return ret;
++}
++
++static int mrst_isp_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b)
++{
++ int ret;
++ struct mrst_isp_fh *fh = file->private_data;
++
++ WARN_ON(priv != file->private_data);
++
++ /*XXX zheng*/
++ /*
++ if (frame_cnt == 0) {
++ printk(KERN_WARNING "timer start\n");
++ intel_timer_start();
++ }
++ */
++
++ DBG_entering;
++
++ if (b->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
++ return -EINVAL;
++ if (b->memory != V4L2_MEMORY_MMAP)
++ return -EINVAL;
++ if (fh->qbuf_flag == 0) {
++ dprintk(1, "no buffer can be dq-ed\n");
++ return -EINVAL;
++ }
++
++ /*dprintk(3, "entering");*/
++ /* ret = videobuf_dqbuf(&fh->vb_q, b, file->f_flags & O_NONBLOCK); */
++ ret = videobuf_dqbuf(&fh->vb_q, b, 0);
++ /* identify which video buffer was dq-ed */
++ if (ret == 0)
++ fh->qbuf_flag &= ~(1<<b->index);
++
++ /*XXX zheng*/
++ ++frame_cnt;
++ /*
++ if (frame_cnt % 10 == 0)
++ printk(KERN_WARNING "%d frames takes %dms to go, fps = %d\n",
++ frame_cnt, intel_get_micro_sec(),
++ frame_cnt * 1000 / intel_get_micro_sec());
++ */
++
++ dprintk(1, "dq-ed index = %d", b->index);
++ DBG_leaving;
++ return ret;
++}
++
++static int mrst_isp_streamon(struct file *file, void *priv,
++ enum v4l2_buf_type type)
++{
++ struct mrst_isp_fh *fh = file->private_data;
++ struct video_device *dev = video_devdata(file);
++ struct mrst_isp_device *isp = video_get_drvdata(dev);
++ int ret;
++
++ DBG_entering;
++
++ if (!isp->buffer_required) {
++ eprintk("buffer is not required, can not stream on ");
++ return -EINVAL;
++ }
++
++ dprintk(2, "gamma2 = %d", isp->sys_conf.isp_cfg.flags.gamma2);
++ WARN_ON(priv != file->private_data);
++
++ if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
++ return -EINVAL;
++
++ mutex_lock(&isp->mutex);
++
++ if (!to_sensor_config(isp->sensor_curr)->mipi_mode)
++ v4l2_subdev_call(isp->sensor_curr, video, s_stream, 1);
++
++ mrst_isp_dp_init(&isp->sys_conf, to_sensor_config(isp->sensor_curr));
++ mrst_isp_setup_viewfinder_path(isp,
++ to_sensor_config(isp->sensor_curr), -1);
++
++ ret = videobuf_streamon(&fh->vb_q);
++ isp->streaming = 1;
++
++#ifdef ISP_CLOCK_GATING
++ if (mrst_platform_id() == MRST_PLATFORM_AAVA_SC)
++ prevent_sleep_flag = 1;
++#endif
++
++ mutex_unlock(&isp->mutex);
++
++ dprintk(1, "isp->active = %p", isp->active);
++ DBG_leaving;
++ return ret;
++}
++
++static int mrst_isp_streamoff(struct file *file, void *priv,
++ enum v4l2_buf_type type)
++{
++ struct mrst_isp_fh *fh = file->private_data;
++ struct video_device *dev = video_devdata(file);
++ struct mrst_isp_device *isp = video_get_drvdata(dev);
++
++ unsigned long flags;
++ int ret;
++
++ DBG_entering;
++
++ WARN_ON(priv != file->private_data);
++
++ if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
++ return -EINVAL;
++
++ mutex_lock(&isp->mutex);
++
++ ret = videobuf_streamoff(&fh->vb_q);
++ dprintk(1, "ret of videobuf_streamoff = %d", ret);
++ isp->streaming = 0;
++
++ spin_lock_irqsave(&isp->lock, flags);
++ INIT_LIST_HEAD(&isp->capture);
++ isp->active = NULL;
++ isp->next = NULL;
++ isp->stopflag = 0;
++ isp->sys_conf.isp_hal_enable = 0;
++ isp->sys_conf.jpg_review_enable = 0;
++ isp->sys_conf.isp_cfg.img_eff_cfg.mode = CI_ISP_IE_MODE_OFF;
++ isp->sys_conf.isp_cfg.jpeg_enc_ratio = 1;
++
++ spin_unlock_irqrestore(&isp->lock, flags);
++
++ v4l2_subdev_call(isp->sensor_curr, video, s_stream, 0);
++ ci_isp_stop(CI_ISP_CFG_UPDATE_FRAME_SYNC);
++
++#ifdef ISP_CLOCK_GATING
++ if (mrst_platform_id() == MRST_PLATFORM_AAVA_SC)
++ prevent_sleep_flag = 0;
++#endif
++
++ mutex_unlock(&isp->mutex);
++
++ DBG_leaving;
++ return ret;
++}
++
++static const struct v4l2_file_operations mrst_isp_fops = {
++ .owner = THIS_MODULE,
++ .open = mrst_isp_open,
++ .release = mrst_isp_close,
++ .read = mrst_isp_read,
++ .mmap = mrst_isp_mmap,
++ .ioctl = video_ioctl2,
++};
++
++static const struct v4l2_ioctl_ops mrst_isp_ioctl_ops = {
++ .vidioc_querycap = mrst_isp_querycap,
++ .vidioc_enum_fmt_vid_cap = mrst_isp_enum_fmt_cap,
++ .vidioc_g_fmt_vid_cap = mrst_isp_g_fmt_cap,
++ /* .vidioc_g_fmt_vid_out =
++ * mrst_isp_g_fmt_cap_for_sensor_hal, */
++ .vidioc_try_fmt_vid_cap = mrst_isp_try_fmt_cap,
++ .vidioc_s_fmt_vid_cap = mrst_isp_s_fmt_cap,
++ .vidioc_cropcap = mrst_isp_cropcap,
++ .vidioc_reqbufs = mrst_isp_reqbufs,
++ .vidioc_querybuf = mrst_isp_querybuf,
++ .vidioc_qbuf = mrst_isp_qbuf,
++ .vidioc_dqbuf = mrst_isp_dqbuf,
++ .vidioc_enum_input = mrst_isp_enum_input,
++ .vidioc_g_input = mrst_isp_g_input,
++ .vidioc_s_input = mrst_isp_s_input,
++ .vidioc_s_std = mrst_isp_s_std,
++ .vidioc_queryctrl = mrst_isp_queryctrl,
++ .vidioc_streamon = mrst_isp_streamon,
++ .vidioc_streamoff = mrst_isp_streamoff,
++ .vidioc_g_ctrl = mrst_isp_g_ctrl,
++ .vidioc_s_ctrl = mrst_isp_s_ctrl,
++ .vidioc_enum_framesizes = mrst_isp_enum_framesizes,
++ .vidioc_enum_frameintervals = mrst_isp_enum_frameintervals,
++ .vidioc_g_ext_ctrls = mrst_isp_g_ext_ctrls,
++ .vidioc_s_ext_ctrls = mrst_isp_s_ext_ctrls,
++ /* FIXME private ioctls */
++ .vidioc_default = mrst_isp_vidioc_default,
++};
++
++static struct video_device mrst_isp_vdev = {
++ .name = "mrst_isp",
++ .minor = -1,
++ .fops = &mrst_isp_fops,
++ .ioctl_ops = &mrst_isp_ioctl_ops,
++ .release = video_device_release_empty,
++};
++
++static int mrst_ci_sensor_probe(struct mrst_isp_device *isp)
++{
++ struct v4l2_subdev *sensor = NULL, *motor = NULL;
++ int i;
++ char *name;
++ u8 addr;
++
++ isp->adapter_sensor = i2c_get_adapter(MRST_I2C_BUS_SENSOR);
++ if (NULL == isp->adapter_sensor) {
++ printk(KERN_ERR "mrstisp: no sensor i2c adapter\n");
++ return -ENODEV;
++ }
++
++ dprintk(1, "got sensor i2c adapter: %s", isp->adapter_sensor->name);
++
++ gpio_request(GPIO_STDBY1_PIN, "Sensor Standby1");
++ gpio_request(GPIO_STDBY2_PIN, "Sensor Standby2");
++ gpio_request(GPIO_RESET_PIN, "Sensor Reset");
++ gpio_request(GPIO_SCLK_25, "Sensor clock");
++ gpio_request(95, "Camera Motor");
++
++ /* Enable sensor related GPIO in system */
++ gpio_direction_output(GPIO_STDBY1_PIN, 0);
++ if (mrst_platform_id() == MRST_PLATFORM_AAVA_SC)
++ gpio_direction_output(GPIO_STDBY2_PIN, 1);
++ else
++ gpio_direction_output(GPIO_STDBY2_PIN, 0);
++ gpio_direction_output(GPIO_RESET_PIN, 1);
++ gpio_direction_output(GPIO_SCLK_25, 0);
++ /* gpio_direction_output(GPIO_AF_PD, 1); */
++
++ /*
++ gpio_alt_func(GPIO_STDBY1_PIN, 0);
++ gpio_alt_func(GPIO_STDBY2_PIN, 0);
++ gpio_alt_func(GPIO_RESET_PIN, 0);
++ gpio_alt_func(GPIO_SCLK_25, 1);
++ */
++
++ for (i = 0; i < N_CAMERA; i++) {
++ name = mrst_camera_table[i].name;
++ addr = mrst_camera_table[i].sensor_addr;
++ if (mrst_camera_table[i].type == MRST_CAMERA_SOC) {
++#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 31))
++ sensor = v4l2_i2c_new_subdev(&isp->v4l2_dev,
++ isp->adapter_sensor,
++ name, name, addr);
++#else
++ sensor = v4l2_i2c_new_subdev(&isp->v4l2_dev,
++ isp->adapter_sensor,
++ name, name, addr, NULL);
++#endif
++
++ if (sensor == NULL) {
++ dprintk(2, "sensor %s not found", name);
++ continue;
++ }
++ isp->sensor_soc = sensor;
++ isp->sensor_soc_index = i;
++ dprintk(0, "soc camera sensor %s-%s successfully found",
++ name, sensor->name);
++ }
++
++ if (mrst_camera_table[i].type == MRST_CAMERA_RAW) {
++#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 31))
++ sensor = v4l2_i2c_new_subdev(&isp->v4l2_dev,
++ isp->adapter_sensor,
++ name, name, addr);
++#else
++ sensor = v4l2_i2c_new_subdev(&isp->v4l2_dev,
++ isp->adapter_sensor,
++ name, name, addr, NULL);
++#endif
++
++ if (sensor == NULL) {
++ dprintk(2, "sensor %s not found", name);
++ continue;
++ }
++ isp->sensor_raw = sensor;
++ isp->sensor_raw_index = i;
++ dprintk(0, "raw camera sensor %s successfully found",
++ name);
++ name = mrst_camera_table[i].motor_name;
++ addr = mrst_camera_table[i].motor_addr;
++#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 31))
++ motor = v4l2_i2c_new_subdev(&isp->v4l2_dev,
++ isp->adapter_sensor,
++ name, name, addr);
++#else
++ motor = v4l2_i2c_new_subdev(&isp->v4l2_dev,
++ isp->adapter_sensor,
++ name, name, addr, NULL);
++#endif
++
++ if (motor == NULL)
++ dprintk(2, "motor %s not found", name);
++ else {
++ isp->motor = motor;
++ dprintk(0, "motor %s successfully found", name);
++ }
++ }
++ }
++
++ if (!isp->sensor_soc && !isp->sensor_raw) {
++ dprintk(0, "no camera sensor device attached");
++ return -ENODEV;
++ } else {
++ if (isp->sensor_soc)
++ isp->sensor_curr = isp->sensor_soc;
++ else
++ isp->sensor_curr = isp->sensor_raw;
++
++ /* For some reason ISP needs some "extra syncs" with sensor mt9d113 */
++ if (!strcmp(to_sensor_config(isp->sensor_curr)->name, "mt9d113"))
++ isp_extra_sync_for_mt9d113 = 0;
++ else
++ isp_extra_sync_for_mt9d113 = 0;
++
++ return 0;
++ }
++}
++
++static int mrst_ci_flash_probe(struct mrst_isp_device *isp)
++{
++ struct v4l2_subdev *flash = NULL;
++ char *name = "mrst_camera_flash";
++ u8 addr = 0x53;
++
++ gpio_request(45, "Camera Flash");
++ gpio_direction_output(45, 0);
++
++ if (mrst_platform_id() == MRST_PLATFORM_AAVA_SC)
++ isp->adapter_flash = i2c_get_adapter(1);
++ else
++ isp->adapter_flash = i2c_get_adapter(MRST_I2C_BUS_FLASH);
++ if (NULL == isp->adapter_flash) {
++ dprintk(0, "no flash i2c adapter\n");
++ return -ENODEV;
++ }
++ if (mrst_platform_id() == MRST_PLATFORM_AAVA_SC)
++ addr = 0x4A;
++ dprintk(1, "got flash i2c adapter: %s", isp->adapter_flash->name);
++
++#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 31))
++ flash = v4l2_i2c_new_subdev(&isp->v4l2_dev,
++ isp->adapter_flash,
++ name, name, addr);
++#else
++ flash = v4l2_i2c_new_subdev(&isp->v4l2_dev,
++ isp->adapter_flash,
++ name, name, addr, NULL);
++#endif
++
++ if (flash == NULL) {
++ dprintk(0, "no flash IC found\n");
++ return -ENODEV;
++ }
++
++ dprintk(0, "flash IC found");
++ return 0;
++}
++
++#if IRQ
++static irqreturn_t mrst_isp_irq_handler(int this_irq, void *dev_id)
++{
++ struct isp_register *mrv_reg =
++ (struct isp_register *) MEM_MRV_REG_BASE;
++ struct mrst_isp_device *isp = dev_id;
++ struct videobuf_buffer *vb;
++ unsigned long flags;
++
++ u32 mi_mask = ci_isp_get_frame_end_irq_mask_isp();
++ u32 isp_mask = MRV_ISP_RIS_DATA_LOSS_MASK
++ | MRV_ISP_RIS_PIC_SIZE_ERR_MASK;
++ u32 jpe_status_mask = MRV_JPE_ALL_STAT_MASK;
++ u32 jpe_error_mask = MRV_JPE_ALL_ERR_MASK;
++ u32 mblk_line_mask = MRV_MI_MBLK_LINE_MASK;
++
++ u32 isp_irq;
++ u32 mi_irq;
++ u32 jpe_status_irq;
++ u32 jpe_error_irq;
++ u32 mipi_irq;
++ u32 mblk_line;
++ u32 bufbase;
++ /*For some reason ISP needs some "extra syncs" with sensor mt9d113 */
++ if (isp_extra_sync_for_mt9d113) {
++ isp_irq = REG_READ_EX(mrv_reg->isp_mis);
++ if (isp_irq & MRV_ISP_MIS_V_START_MASK) {
++ /*Clear frame start int and return; */
++ REG_SET_SLICE_EX(mrv_reg->isp_icr, MRV_ISP_ICR_V_START, ON);
++ return IRQ_HANDLED;
++ }
++ }
++ isp_irq = REG_READ_EX(mrv_reg->isp_ris) & isp_mask;
++ mi_irq = REG_READ_EX(mrv_reg->mi_ris) & mi_mask;
++
++ mblk_line = REG_READ_EX(mrv_reg->mi_ris) & mblk_line_mask;
++
++ jpe_status_irq = REG_READ_EX(mrv_reg->jpe_status_ris) & jpe_status_mask;
++ jpe_error_irq = REG_READ_EX(mrv_reg->jpe_error_ris) & jpe_error_mask;
++
++ mipi_irq = REG_READ_EX(mrv_reg->mipi_ris) & 0x00f00000;
++
++ dprintk(3, "IRQ: mblk_line = %x, mi_irq = %x, jpe_status_irq = %x,"
++ " jpe_error_irq = %x, isp_irq = %x", mblk_line, mi_irq,
++ jpe_status_irq, jpe_error_irq, isp_irq);
++
++ if (!(isp_irq | mi_irq | jpe_status_irq | jpe_error_irq | mblk_line
++ | mipi_irq)) {
++ dprintk(2, "unknown interrupt");
++ return IRQ_HANDLED;
++ }
++
++ REG_SET_SLICE_EX(mrv_reg->isp_icr, MRV_ISP_ICR_ALL, ON);
++ REG_SET_SLICE_EX(mrv_reg->mi_icr, MRV_MI_ALLIRQS, ON);
++ REG_SET_SLICE_EX(mrv_reg->jpe_error_icr, MRV_JPE_ALL_ERR, ON);
++ REG_SET_SLICE_EX(mrv_reg->jpe_status_icr, MRV_JPE_ALL_STAT, ON);
++ REG_WRITE_EX(mrv_reg->mipi_icr, 0xffffffff);
++ REG_SET_SLICE(mrv_reg->isp_err_clr, MRV_ISP_ALL_ERR, ON);
++
++ if (isp_irq) {
++ /* Currently we don't reset hardware even error detect */
++ dprintk(3, "ISP error IRQ received %x", isp_irq);
++ isp_error_num++;
++ isp_error_flag |= isp_irq;
++ return IRQ_HANDLED;
++ }
++
++ if (mipi_irq) {
++ dprintk(3, "error in mipi_irq %x", mipi_irq);
++ mipi_error_num++;
++ mipi_error_flag |= mipi_irq;
++ return IRQ_HANDLED;
++ }
++
++ if (mblk_line && mrst_isp_to_do_mblk_line) {
++ REG_SET_SLICE(mrv_reg->mi_imsc, MRV_MI_MBLK_LINE, OFF);
++ dprintk(3, "enter mblk_line irq");
++
++ if (!(isp->active && !isp->next)) {
++ dprintk(3, "wrong isq status");
++ if (isp->active)
++ dprintk(2, "actie->i = %d", isp->active->i);
++ else
++ dprintk(2, "actie = NULL");
++ if (isp->next)
++ dprintk(2, "next->i = %d", isp->next->i);
++ else
++ dprintk(2, "next = NULL");
++ return IRQ_HANDLED;
++ }
++
++ spin_lock_irqsave(&isp->lock, flags);
++
++ if (!list_empty(&isp->capture)) {
++ isp->next = list_entry(isp->capture.next,
++ struct videobuf_buffer, queue);
++ isp->next->state = VIDEOBUF_ACTIVE;
++ bufbase = videobuf_to_dma_contig(isp->next);
++ mrst_isp_update_marvinvfaddr(isp, bufbase,
++ CI_ISP_CFG_UPDATE_FRAME_SYNC);
++ dprintk(1, "updating new addr, next = %d",
++ isp->next->i);
++ } else {
++ isp->stopflag = 1;
++ dprintk(0, "stop isp");
++ }
++
++ mrst_isp_to_do_mblk_line = 0;
++
++ spin_unlock_irqrestore(&isp->lock, flags);
++
++ /* return IRQ_HANDLED; */
++ }
++
++ if (mi_irq && isp->pixelformat != V4L2_PIX_FMT_JPEG &&
++ !jpe_status_irq) {
++ dprintk(1, "view finding case");
++
++ if (!isp->active) {
++ dprintk(0, "no active queue, You should not go here");
++ mrst_isp_to_do_mblk_line = 1;
++ REG_SET_SLICE(mrv_reg->mi_imsc, MRV_MI_MBLK_LINE, ON);
++ return IRQ_HANDLED;
++ }
++
++ spin_lock_irqsave(&isp->lock, flags);
++
++ /* update captured frame status */
++ vb = isp->active;
++ /* vb->size = ci_isp_mif_get_byte_cnt(); */
++ /* if this buffer has been dq-ed, set nothing to state*/
++ if (vb->state != VIDEOBUF_IDLE)
++ vb->state = VIDEOBUF_DONE;
++ vb->field_count++;
++
++ isp->active = NULL;
++ dprintk(1, "buf %d size = %lx", vb->i, vb->size);
++ do_gettimeofday(&vb->ts);
++ wake_up(&vb->done);
++
++ if (!isp->next) {
++ if (!list_empty(&isp->capture)) {
++ isp->active = list_entry(isp->capture.next,
++ struct videobuf_buffer, queue);
++ list_del_init(&isp->active->queue);
++ isp->active->state = VIDEOBUF_ACTIVE;
++ dprintk(3, "start next frame %d",
++ isp->active->i);
++ mrst_isp_to_do_mblk_line = 1;
++ REG_SET_SLICE(mrv_reg->mi_imsc,
++ MRV_MI_MBLK_LINE, ON);
++ } else {
++ mrst_isp_to_do_mblk_line = 1;
++ REG_SET_SLICE(mrv_reg->mi_imsc,
++ MRV_MI_MBLK_LINE, ON);
++ mrst_isp_disable_interrupt(isp);
++ dprintk(3, "no frame right now");
++ }
++ } else {
++ isp->active = isp->next;
++ list_del_init(&isp->next->queue);
++ isp->next = NULL;
++ dprintk(1, "active = next = %d, next = NULL",
++ isp->active->i);
++ mrst_isp_to_do_mblk_line = 1;
++ REG_SET_SLICE(mrv_reg->mi_imsc, MRV_MI_MBLK_LINE, ON);
++ }
++
++ spin_unlock_irqrestore(&isp->lock, flags);
++ return IRQ_HANDLED;
++ }
++
++ if (jpe_status_irq) {
++ dprintk(2, "jpeg capture case");
++
++ if (!isp->active)
++ return IRQ_HANDLED;
++
++ spin_lock_irqsave(&isp->lock, flags);
++
++ vb = isp->active;
++ vb->size = ci_isp_mif_get_byte_cnt();
++ vb->state = VIDEOBUF_DONE;
++ do_gettimeofday(&vb->ts);
++ vb->field_count++;
++ wake_up(&vb->done);
++ isp->active = NULL;
++
++ dprintk(2, "index =%d, bufsize = %lx", vb->i, vb->size);
++
++ spin_unlock_irqrestore(&isp->lock, flags);
++
++ return IRQ_HANDLED;
++ }
++
++ if (jpe_error_irq)
++ dprintk(2, "entered jpe_error_irq");
++
++ return IRQ_HANDLED;
++}
++#endif
++
++static void __devexit mrst_isp_pci_remove(struct pci_dev *pdev)
++{
++ struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev);
++ struct mrst_isp_device *isp = to_isp(v4l2_dev);
++
++ DBG_entering;
++
++ ci_isp_stop(CI_ISP_CFG_UPDATE_FRAME_SYNC);
++ mrst_isp_disable_interrupt(isp);
++
++#if IRQ
++ free_irq(pdev->irq, isp);
++#endif
++
++ if (isp->vdev) {
++ dprintk(2, "isp->vdev = %p", isp->vdev);
++ video_unregister_device(isp->vdev);
++ }
++
++ dma_release_declared_memory(&pdev->dev);
++
++ iounmap(isp->regs);
++
++ pci_release_regions(pdev);
++
++ pci_disable_device(pdev);
++
++ v4l2_device_unregister(&isp->v4l2_dev);
++
++ kfree(isp);
++
++ DBG_leaving;
++}
++
++static int __devinit mrst_isp_pci_probe(struct pci_dev *pdev,
++ const struct pci_device_id *pci_id)
++{
++ struct mrst_isp_device *isp;
++ unsigned int start = 0;
++ unsigned int len = 0;
++ int ret = 0;
++
++ DBG_entering;
++ printk ("Xiaolin-test entry\n");
++ /* alloc device struct */
++ isp = kzalloc(sizeof(struct mrst_isp_device), GFP_KERNEL);
++ if (NULL == isp) {
++ printk(KERN_ERR "mrstisp: fail to kzalloc mrst_isp_device\n");
++ ret = -ENOMEM;
++ goto exit;
++ }
++
++ /* register v4l2 device */
++ ret = v4l2_device_register(&pdev->dev, &isp->v4l2_dev);
++ if (ret) {
++ printk(KERN_ERR "mrstisp: fail to register v4l2 device\n");
++ goto exit_free_isp;
++ }
++
++ /* PCI operations */
++ ret = pci_enable_device(pdev);
++ if (ret) {
++ printk(KERN_ERR "mrstisp: can't enable isp\n");
++ goto exit_unregister_v4l2;
++ }
++
++ pci_set_master(pdev);
++
++ ret = pci_request_regions(pdev, "mrst isp");
++ if (ret) {
++ printk(KERN_ERR "mrstisp: can't request regions\n");
++ goto exit_disable_isp;
++ }
++
++ /* mem bar 0 */
++ start = isp->mb0 = pci_resource_start(pdev, 0);
++ len = isp->mb0_size = pci_resource_len(pdev, 0);
++
++ isp->regs = ioremap_nocache(start, len);
++ mrst_isp_regs = isp->regs;
++ if (isp->regs == NULL) {
++ printk(KERN_ERR "mrstisp: fail to ioremap isp registers\n");
++ goto exit_release_regions;
++ }
++
++ dprintk(1, "isp mb0 = %lx, mb0_size = %lx, regs = %p",
++ isp->mb0, isp->mb0_size, isp->regs);
++
++ /* mem bar 1 */
++ start = isp->mb1 = pci_resource_start(pdev, 1);
++ len = isp->mb1_size = pci_resource_len(pdev, 1);
++
++ dprintk(1, "isp mb1 = %lx, mb1_size = %lx", isp->mb1, isp->mb1_size);
++
++ ret = dma_declare_coherent_memory(&pdev->dev, start,
++ /* start, len - 640 * 480 * 2, */
++ start, len,
++ DMA_MEMORY_MAP);
++ /*
++ DMA_MEMORY_MAP
++ | DMA_MEMORY_EXCLUSIVE);
++ */
++ if (!ret) {
++ dprintk(0, "failed to declare dma memory");
++ ret = -ENXIO;
++ goto exit_iounmap;
++ }
++
++ /* init device struct */
++ INIT_LIST_HEAD(&isp->capture);
++ spin_lock_init(&isp->lock);
++ mutex_init(&isp->mutex);
++
++ pci_read_config_word(pdev, PCI_VENDOR_ID, &isp->vendorID);
++ pci_read_config_word(pdev, PCI_DEVICE_ID, &isp->deviceID);
++
++ mrst_isp_defcfg_all_load(&isp->sys_conf.isp_cfg);
++
++ isp->bufwidth = 640;
++ isp->bufheight = 480;
++ isp->depth = 12;
++ isp->pixelformat = V4L2_PIX_FMT_YVU420;
++ isp->streaming = 0;
++ isp->buffer_required = 0;
++
++
++ /* probe sensor */
++ ret = mrst_ci_sensor_probe(isp);
++ if (ret) {
++ dprintk(0, "failed to sensor probe\n");
++ goto exit_dma_release;
++ }
++
++ /* regiter video device */
++ isp->vdev = &mrst_isp_vdev;
++ isp->vdev->parent = &pdev->dev;
++ video_set_drvdata(isp->vdev, isp);
++
++ ret = video_register_device(isp->vdev, VFL_TYPE_GRABBER, -1);
++ if (ret) {
++ dprintk(0, "fail to register video deivice");
++ goto exit_dma_release;
++ }
++
++ dprintk(0, "registered dev/video%d", isp->vdev->num);
++ dprintk(0, "isp->vdev = %p", isp->vdev);
++
++#if IRQ
++ /* request irq */
++ ret = request_irq(pdev->irq, mrst_isp_irq_handler, IRQF_SHARED,
++ /* pci_name(pdev), isp); */
++ "mrst_camera_imaging", isp);
++ if (ret) {
++ dprintk(0, "fail to request irq");
++ goto exit_unregister_video;
++ }
++
++ mrst_isp_disable_interrupt(isp);
++#endif
++
++ /* probe flash */
++ mrst_ci_flash_probe(isp);
++
++ mrst_isp_to_do_mblk_line = 0;
++
++ dprintk(0, "mrstisp driver module successfully loaded");
++ return 0;
++
++exit_unregister_video:
++ video_unregister_device(isp->vdev);
++exit_dma_release:
++ dma_release_declared_memory(&pdev->dev);
++exit_iounmap:
++ iounmap(isp->regs);
++exit_release_regions:
++ pci_release_regions(pdev);
++exit_disable_isp:
++ pci_disable_device(pdev);
++exit_unregister_v4l2:
++ v4l2_device_unregister(&isp->v4l2_dev);
++exit_free_isp:
++ kfree(isp);
++exit:
++ return ret;
++}
++
++#ifdef CONFIG_PM
++static int mrst_isp_pci_suspend(struct pci_dev *pdev, pm_message_t state)
++{
++ struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev);
++ struct mrst_isp_device *isp = to_isp(v4l2_dev);
++ int ret;
++
++ DBG_entering;
++
++ ci_isp_off();
++
++ ret = pci_save_state(pdev);
++ if (ret) {
++ printk(KERN_ERR "mrstisp: pci_save_state failed %d\n", ret);
++ return ret;
++ }
++
++ ret = pci_set_power_state(pdev, PCI_D3cold);
++ if (ret) {
++ printk(KERN_ERR "mrstisp: fail to set power state\n");
++ return ret;
++ }
++
++/*
++ ret = ci_sensor_suspend();
++ if (ret) {
++ printk(KERN_ERR "mrstisp: Fail to suspend sensor\n");
++ return ret;
++ }
++*/
++ if (isp->sensor_soc)
++ v4l2_subdev_call(isp->sensor_soc, core, s_gpio, 1);
++ if (isp->sensor_raw)
++ v4l2_subdev_call(isp->sensor_raw, core, s_gpio, 1);
++
++ DBG_leaving;
++ return 0;
++}
++
++static int mrst_isp_pci_resume(struct pci_dev *pdev)
++{
++ struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev);
++ struct mrst_isp_device *isp = to_isp(v4l2_dev);
++ int ret;
++
++ DBG_entering;
++
++ pci_set_power_state(pdev, PCI_D0);
++ pci_restore_state(pdev);
++
++ ret = pci_enable_device(pdev);
++ if (ret) {
++ printk(KERN_ERR "mrstisp: fail to enable device in resume\n");
++ return ret;
++ }
++
++/*
++ ret = ci_sensor_resume();
++ if (ret) {
++ printk(KERN_ERR "mrstisp: Fail to resume sensor\n");
++ return ret;
++ }
++*/
++ if (isp->sensor_soc)
++ v4l2_subdev_call(isp->sensor_soc, core, s_gpio, 0);
++ if (isp->sensor_raw)
++ v4l2_subdev_call(isp->sensor_raw, core, s_gpio, 0);
++
++ ci_isp_init();
++
++ DBG_leaving;
++ return 0;
++}
++#endif
++
++static struct pci_device_id mrst_isp_pci_tbl[] __devinitdata = {
++ { PCI_DEVICE(0x8086, 0x080B) },
++ {0,}
++};
++
++MODULE_DEVICE_TABLE(pci, mrst_isp_pci_tbl);
++
++static struct pci_driver mrst_isp_pci_driver = {
++ .name = "mrstisp",
++ .id_table = mrst_isp_pci_tbl,
++ .probe = mrst_isp_pci_probe,
++ .remove = mrst_isp_pci_remove,
++ #ifdef CONFIG_PM
++ .suspend = mrst_isp_pci_suspend,
++ .resume = mrst_isp_pci_resume,
++ #endif
++};
++
++static int __init mrst_isp_pci_init(void)
++{
++ int ret;
++
++ /* DBG_entering; */
++
++ pr_err("xiaoin@test init\n");
++
++ ret = pci_register_driver(&mrst_isp_pci_driver);
++ if (ret) {
++ pr_err("mrstisp: Unable to register driver\n");
++ return ret;
++ }
++
++ if (ret)
++ dprintk(1, "Unable to register flash driver");
++
++ DBG_leaving;
++ return 0;
++}
++
++static void __exit mrst_isp_pci_exit(void)
++{
++ DBG_entering;
++
++ pci_unregister_driver(&mrst_isp_pci_driver);
++
++ DBG_leaving;
++}
++
++module_init(mrst_isp_pci_init);
++/* late_initcall(mrst_isp_pci_init); */
++module_exit(mrst_isp_pci_exit);
++
++MODULE_DESCRIPTION("Intel Moorestown ISP driver");
++MODULE_AUTHOR("Xiaolin Zhang <xiaolin.zhang@intel.com>");
++MODULE_LICENSE("GPL");
++MODULE_SUPPORTED_DEVICE("video");
++
+--- /dev/null
++++ b/drivers/staging/mrstci/mrstisp/mrstisp_mif.c
+@@ -0,0 +1,763 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * Copyright (c) Silicon Image 2008 www.siliconimage.com
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#include "mrstisp_stdinc.h"
++
++/*
++ * sets all main picture and self picture buffer offsets back to 0
++ */
++void ci_isp_mif_reset_offsets(enum ci_isp_conf_update_time update_time)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++
++ REG_SET_SLICE(mrv_reg->mi_mp_y_offs_cnt_init,
++ MRV_MI_MP_Y_OFFS_CNT_INIT, 0);
++ REG_SET_SLICE(mrv_reg->mi_mp_cb_offs_cnt_init,
++ MRV_MI_MP_CB_OFFS_CNT_INIT, 0);
++ REG_SET_SLICE(mrv_reg->mi_mp_cr_offs_cnt_init,
++ MRV_MI_MP_CR_OFFS_CNT_INIT, 0);
++
++ REG_SET_SLICE(mrv_reg->mi_sp_y_offs_cnt_init,
++ MRV_MI_SP_Y_OFFS_CNT_INIT, 0);
++ REG_SET_SLICE(mrv_reg->mi_sp_cb_offs_cnt_init,
++ MRV_MI_SP_CB_OFFS_CNT_INIT, 0);
++ REG_SET_SLICE(mrv_reg->mi_sp_cr_offs_cnt_init,
++ MRV_MI_SP_CR_OFFS_CNT_INIT, 0);
++
++ REG_SET_SLICE(mrv_reg->mi_ctrl, MRV_MI_INIT_OFFSET_EN, ON);
++ REG_SET_SLICE(mrv_reg->mi_ctrl, MRV_MI_INIT_BASE_EN, ON);
++
++ switch (update_time) {
++ case CI_ISP_CFG_UPDATE_FRAME_SYNC:
++ break;
++ case CI_ISP_CFG_UPDATE_IMMEDIATE:
++ REG_SET_SLICE(mrv_reg->mi_init, MRV_MI_MI_CFG_UPD, ON);
++ break;
++ case CI_ISP_CFG_UPDATE_LATER:
++ break;
++ default:
++ break;
++ }
++}
++
++/*
++ * This function get the byte count from the last JPEG or raw data transfer
++ */
++u32 ci_isp_mif_get_byte_cnt(void)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++
++ return (u32) REG_GET_SLICE(mrv_reg->mi_byte_cnt, MRV_MI_BYTE_CNT);
++}
++
++/*
++ * Sets the desired self picture orientation, if possible.
++ */
++static int ci_isp_mif_set_self_pic_orientation(enum ci_isp_mif_sp_mode
++ mrv_mif_sp_mode,
++ int activate_self_path)
++{
++
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++ u32 mi_ctrl = REG_READ(mrv_reg->mi_ctrl);
++
++ u32 output_format = REG_GET_SLICE(mi_ctrl, MRV_MI_SP_OUTPUT_FORMAT);
++
++ /* apply the desired self picture orientation, if possible */
++ switch (mrv_mif_sp_mode) {
++ case CI_ISP_MIF_SP_ORIGINAL:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_ROT_AND_FLIP, 0);
++ break;
++
++ case CI_ISP_MIF_SP_HORIZONTAL_FLIP:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_ROT_AND_FLIP,
++ MRV_MI_ROT_AND_FLIP_H_FLIP);
++ break;
++
++ case CI_ISP_MIF_SP_VERTICAL_FLIP:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_ROT_AND_FLIP,
++ MRV_MI_ROT_AND_FLIP_V_FLIP);
++ break;
++
++ case CI_ISP_MIF_SP_ROTATION_090_DEG:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_ROT_AND_FLIP,
++ MRV_MI_ROT_AND_FLIP_ROTATE);
++ break;
++
++ case CI_ISP_MIF_SP_ROTATION_180_DEG:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_ROT_AND_FLIP,
++ MRV_MI_ROT_AND_FLIP_H_FLIP |
++ MRV_MI_ROT_AND_FLIP_V_FLIP);
++ break;
++
++ case CI_ISP_MIF_SP_ROTATION_270_DEG:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_ROT_AND_FLIP,
++ MRV_MI_ROT_AND_FLIP_H_FLIP |
++ MRV_MI_ROT_AND_FLIP_V_FLIP |
++ MRV_MI_ROT_AND_FLIP_ROTATE);
++ break;
++
++ case CI_ISP_MIF_SP_ROT_090_V_FLIP:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_ROT_AND_FLIP,
++ MRV_MI_ROT_AND_FLIP_V_FLIP |
++ MRV_MI_ROT_AND_FLIP_ROTATE);
++ break;
++
++ case CI_ISP_MIF_SP_ROT_270_V_FLIP:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_ROT_AND_FLIP,
++ MRV_MI_ROT_AND_FLIP_H_FLIP |
++ MRV_MI_ROT_AND_FLIP_ROTATE);
++ break;
++
++ default:
++ eprintk("unknown value for mrv_mif_sp_mode");
++ return CI_STATUS_NOTSUPP;
++ }
++
++ if (REG_GET_SLICE(mi_ctrl, MRV_MI_ROT_AND_FLIP) &
++ MRV_MI_ROT_AND_FLIP_ROTATE) {
++ switch (output_format) {
++ case MRV_MI_SP_OUTPUT_FORMAT_RGB888:
++ case MRV_MI_SP_OUTPUT_FORMAT_RGB666:
++ case MRV_MI_SP_OUTPUT_FORMAT_RGB565:
++ /* rotation supported on this output modes */
++ break;
++ default:
++ eprintk("rotation is only allowed for RGB modes.");
++ return CI_STATUS_NOTSUPP;
++ }
++ }
++
++ REG_SET_SLICE(mi_ctrl, MRV_MI_SP_ENABLE,
++ (activate_self_path) ? ENABLE : DISABLE);
++ REG_WRITE(mrv_reg->mi_ctrl, mi_ctrl);
++ REG_SET_SLICE(mrv_reg->mi_init, MRV_MI_MI_CFG_UPD, ON);
++
++ return CI_STATUS_SUCCESS;
++}
++
++/*
++ * Checks the main or self picture path buffer structure.
++ */
++static int ci_isp_mif_check_mi_path_conf(const struct ci_isp_mi_path_conf
++ *isp_mi_path_conf, int main_buffer)
++{
++ if (!isp_mi_path_conf) {
++ eprintk("isp_mi_path_conf is NULL");
++ return CI_STATUS_NULL_POINTER;
++ }
++
++ if (!isp_mi_path_conf->ybuffer.pucbuffer) {
++ eprintk("isp_mi_path_conf->ybuffer.pucbuffer is NULL");
++ return CI_STATUS_NULL_POINTER;
++ }
++
++ if (main_buffer) {
++ if ((((unsigned long)(isp_mi_path_conf->ybuffer.pucbuffer)
++ & ~(MRV_MI_MP_Y_BASE_AD_INIT_VALID_MASK)) != 0)
++ ||
++ ((isp_mi_path_conf->ybuffer.size
++ & ~(MRV_MI_MP_Y_SIZE_INIT_VALID_MASK)) != 0)
++ ||
++ ((isp_mi_path_conf->ybuffer.size
++ & (MRV_MI_MP_Y_SIZE_INIT_VALID_MASK)) == 0)
++ ||
++ ((isp_mi_path_conf->ybuffer.offs
++ & ~(MRV_MI_MP_Y_OFFS_CNT_INIT_VALID_MASK)) != 0)) {
++ return CI_STATUS_OUTOFRANGE;
++ }
++ } else {
++ if ((((unsigned long) isp_mi_path_conf->ybuffer.pucbuffer
++ & ~(MRV_MI_SP_Y_BASE_AD_INIT_VALID_MASK)) != 0)
++ ||
++ ((isp_mi_path_conf->ybuffer.size &
++ ~(MRV_MI_SP_Y_SIZE_INIT_VALID_MASK)) != 0)
++ ||
++ ((isp_mi_path_conf->ybuffer.size &
++ (MRV_MI_SP_Y_SIZE_INIT_VALID_MASK)) == 0)
++ ||
++ ((isp_mi_path_conf->ybuffer.offs &
++ ~(MRV_MI_SP_Y_OFFS_CNT_INIT_VALID_MASK)) !=
++ 0)
++ ||
++ ((isp_mi_path_conf->llength &
++ ~(MRV_MI_SP_Y_LLENGTH_VALID_MASK)) != 0)
++ ||
++ ((isp_mi_path_conf->
++ llength & (MRV_MI_SP_Y_LLENGTH_VALID_MASK)) == 0)) {
++ return CI_STATUS_OUTOFRANGE;
++ }
++ }
++
++ if (isp_mi_path_conf->cb_buffer.pucbuffer != 0) {
++ if (main_buffer) {
++ if ((((unsigned long)
++ isp_mi_path_conf->cb_buffer.pucbuffer
++ & ~(MRV_MI_MP_CB_BASE_AD_INIT_VALID_MASK)) !=
++ 0)
++ ||
++ ((isp_mi_path_conf->cb_buffer.size &
++ ~(MRV_MI_MP_CB_SIZE_INIT_VALID_MASK)) != 0)
++ ||
++ ((isp_mi_path_conf->cb_buffer.size &
++ (MRV_MI_MP_CB_SIZE_INIT_VALID_MASK)) == 0)
++ ||
++ ((isp_mi_path_conf->cb_buffer.offs &
++ ~(MRV_MI_MP_CB_OFFS_CNT_INIT_VALID_MASK)) !=
++ 0)) {
++ return CI_STATUS_OUTOFRANGE;
++ }
++ } else {
++ if ((((unsigned long)
++ isp_mi_path_conf->cb_buffer.pucbuffer
++ & ~(MRV_MI_SP_CB_BASE_AD_INIT_VALID_MASK)) !=
++ 0)
++ ||
++ ((isp_mi_path_conf->cb_buffer.size &
++ ~(MRV_MI_SP_CB_SIZE_INIT_VALID_MASK)) != 0)
++ ||
++ ((isp_mi_path_conf->cb_buffer.size &
++ (MRV_MI_SP_CB_SIZE_INIT_VALID_MASK)) == 0)
++ ||
++ ((isp_mi_path_conf->cb_buffer.offs &
++ ~(MRV_MI_SP_CB_OFFS_CNT_INIT_VALID_MASK)) !=
++ 0)) {
++ return CI_STATUS_OUTOFRANGE;
++ }
++ }
++ }
++
++ if (isp_mi_path_conf->cr_buffer.pucbuffer != 0) {
++ if (main_buffer) {
++ if ((((unsigned long)
++ isp_mi_path_conf->cr_buffer.pucbuffer
++ & ~(MRV_MI_MP_CR_BASE_AD_INIT_VALID_MASK)) !=
++ 0)
++ ||
++ ((isp_mi_path_conf->cr_buffer.size &
++ ~(MRV_MI_MP_CR_SIZE_INIT_VALID_MASK)) != 0)
++ ||
++ ((isp_mi_path_conf->cr_buffer.size &
++ (MRV_MI_MP_CR_SIZE_INIT_VALID_MASK)) == 0)
++ ||
++ ((isp_mi_path_conf->cr_buffer.offs &
++ ~(MRV_MI_MP_CR_OFFS_CNT_INIT_VALID_MASK)) !=
++ 0)){
++ return CI_STATUS_OUTOFRANGE;
++ }
++ } else {
++ if ((((unsigned long)
++ isp_mi_path_conf->cr_buffer.pucbuffer
++ & ~(MRV_MI_SP_CR_BASE_AD_INIT_VALID_MASK))
++ != 0)
++ ||
++ ((isp_mi_path_conf->cr_buffer.size &
++ ~(MRV_MI_SP_CR_SIZE_INIT_VALID_MASK)) != 0)
++ ||
++ ((isp_mi_path_conf->cr_buffer.size &
++ (MRV_MI_SP_CR_SIZE_INIT_VALID_MASK)) == 0)
++ ||
++ ((isp_mi_path_conf->cr_buffer.offs &
++ ~(MRV_MI_SP_CR_OFFS_CNT_INIT_VALID_MASK)) != 0)) {
++ return CI_STATUS_OUTOFRANGE;
++ }
++ }
++ }
++
++ return CI_STATUS_SUCCESS;
++}
++
++/*
++ * Configures the main picture path buffers of the MI.
++ */
++int ci_isp_mif_set_main_buffer(const struct ci_isp_mi_path_conf
++ *isp_mi_path_conf,
++ enum ci_isp_conf_update_time update_time)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++ int error = CI_STATUS_FAILURE;
++
++ error = ci_isp_mif_check_mi_path_conf(isp_mi_path_conf, true);
++ if (error != CI_STATUS_SUCCESS)
++ return error;
++
++ /* set register values */
++ REG_SET_SLICE(mrv_reg->mi_mp_y_base_ad_init,
++ MRV_MI_MP_Y_BASE_AD_INIT,
++ (u32)(unsigned long)isp_mi_path_conf->ybuffer.pucbuffer);
++ REG_SET_SLICE(mrv_reg->mi_mp_y_size_init, MRV_MI_MP_Y_SIZE_INIT,
++ isp_mi_path_conf->ybuffer.size);
++ REG_SET_SLICE(mrv_reg->mi_mp_y_offs_cnt_init,
++ MRV_MI_MP_Y_OFFS_CNT_INIT,
++ isp_mi_path_conf->ybuffer.offs);
++
++ if (isp_mi_path_conf->cb_buffer.pucbuffer != 0) {
++ REG_SET_SLICE(mrv_reg->mi_mp_cb_base_ad_init,
++ MRV_MI_MP_CB_BASE_AD_INIT,
++ (u32)(unsigned long) isp_mi_path_conf->cb_buffer.
++ pucbuffer);
++ REG_SET_SLICE(mrv_reg->mi_mp_cb_size_init,
++ MRV_MI_MP_CB_SIZE_INIT,
++ isp_mi_path_conf->cb_buffer.size);
++ REG_SET_SLICE(mrv_reg->mi_mp_cb_offs_cnt_init,
++ MRV_MI_MP_CB_OFFS_CNT_INIT,
++ isp_mi_path_conf->cb_buffer.offs);
++ }
++
++ if (isp_mi_path_conf->cr_buffer.pucbuffer != 0) {
++ REG_SET_SLICE(mrv_reg->mi_mp_cr_base_ad_init,
++ MRV_MI_MP_CR_BASE_AD_INIT,
++ (u32)(unsigned long) isp_mi_path_conf->cr_buffer.
++ pucbuffer);
++ REG_SET_SLICE(mrv_reg->mi_mp_cr_size_init,
++ MRV_MI_MP_CR_SIZE_INIT,
++ isp_mi_path_conf->cr_buffer.size);
++ REG_SET_SLICE(mrv_reg->mi_mp_cr_offs_cnt_init,
++ MRV_MI_MP_CR_OFFS_CNT_INIT,
++ isp_mi_path_conf->cr_buffer.offs);
++ }
++
++ /*
++ * update base and offset registers during next immediate or
++ * automatic update request
++ */
++ REG_SET_SLICE(mrv_reg->mi_ctrl, MRV_MI_INIT_OFFSET_EN, ENABLE);
++ REG_SET_SLICE(mrv_reg->mi_ctrl, MRV_MI_INIT_BASE_EN, ENABLE);
++
++ switch (update_time) {
++ case CI_ISP_CFG_UPDATE_FRAME_SYNC:
++ /*
++ * frame synchronous update of shadow registers,
++ * update is done after the curr frame
++ */
++ REG_SET_SLICE(mrv_reg->isp_ctrl, MRV_ISP_ISP_GEN_CFG_UPD, ON);
++ break;
++ case CI_ISP_CFG_UPDATE_IMMEDIATE:
++ /*
++ * immediate update of shadow registers
++ * (will disturb an ongoing frame processing)
++ */
++ REG_SET_SLICE(mrv_reg->mi_init, MRV_MI_MI_CFG_UPD, ON);
++ break;
++ case CI_ISP_CFG_UPDATE_LATER:
++ /* no update from within this function */
++ break;
++ default:
++ break;
++ }
++
++ return error;
++}
++
++/*
++ * Configures the self picture path buffers of the MI.
++ *
++ */
++int ci_isp_mif_set_self_buffer(const struct ci_isp_mi_path_conf
++ *isp_mi_path_conf,
++ enum ci_isp_conf_update_time update_time)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++ int error = CI_STATUS_FAILURE;
++
++ error = ci_isp_mif_check_mi_path_conf(isp_mi_path_conf, false);
++ if (error != CI_STATUS_SUCCESS)
++ return error;
++
++ /* set register values */
++ REG_SET_SLICE(mrv_reg->mi_sp_y_base_ad_init,
++ MRV_MI_SP_Y_BASE_AD_INIT,
++ (u32)(unsigned long)isp_mi_path_conf->ybuffer.pucbuffer);
++ REG_SET_SLICE(mrv_reg->mi_sp_y_size_init, MRV_MI_SP_Y_SIZE_INIT,
++ isp_mi_path_conf->ybuffer.size);
++ REG_SET_SLICE(mrv_reg->mi_sp_y_offs_cnt_init,
++ MRV_MI_SP_Y_OFFS_CNT_INIT,
++ isp_mi_path_conf->ybuffer.offs);
++
++ /*
++ * llength is counted in pixels and this value could be stored
++ * directly into the register
++ */
++ REG_SET_SLICE(mrv_reg->mi_sp_y_llength, MRV_MI_SP_Y_LLENGTH,
++ isp_mi_path_conf->llength);
++
++ if (isp_mi_path_conf->cb_buffer.pucbuffer) {
++ REG_SET_SLICE(mrv_reg->mi_sp_cb_base_ad_init,
++ MRV_MI_SP_CB_BASE_AD_INIT,
++ (u32) (unsigned long)isp_mi_path_conf->cb_buffer.
++ pucbuffer);
++ REG_SET_SLICE(mrv_reg->mi_sp_cb_size_init,
++ MRV_MI_SP_CB_SIZE_INIT,
++ isp_mi_path_conf->cb_buffer.size);
++ REG_SET_SLICE(mrv_reg->mi_sp_cb_offs_cnt_init,
++ MRV_MI_SP_CB_OFFS_CNT_INIT,
++ isp_mi_path_conf->cb_buffer.offs);
++ }
++
++ if (isp_mi_path_conf->cr_buffer.pucbuffer) {
++ REG_SET_SLICE(mrv_reg->mi_sp_cr_base_ad_init,
++ MRV_MI_SP_CR_BASE_AD_INIT,
++ (u32) (unsigned long)isp_mi_path_conf->cr_buffer.
++ pucbuffer);
++ REG_SET_SLICE(mrv_reg->mi_sp_cr_size_init,
++ MRV_MI_SP_CR_SIZE_INIT,
++ isp_mi_path_conf->cr_buffer.size);
++ REG_SET_SLICE(mrv_reg->mi_sp_cr_offs_cnt_init,
++ MRV_MI_SP_CR_OFFS_CNT_INIT,
++ isp_mi_path_conf->cr_buffer.offs);
++ }
++
++ if ((!isp_mi_path_conf->ypic_width)
++ || (!isp_mi_path_conf->ypic_height)) {
++ return CI_STATUS_FAILURE;
++ }
++
++ REG_SET_SLICE(mrv_reg->mi_sp_y_pic_width, MRV_MI_SP_Y_PIC_WIDTH,
++ isp_mi_path_conf->ypic_width);
++ REG_SET_SLICE(mrv_reg->mi_sp_y_pic_height, MRV_MI_SP_Y_PIC_HEIGHT,
++ isp_mi_path_conf->ypic_height);
++ REG_SET_SLICE(mrv_reg->mi_sp_y_pic_size, MRV_MI_SP_Y_PIC_SIZE,
++ isp_mi_path_conf->ypic_height *
++ isp_mi_path_conf->llength);
++
++ /*
++ * update base and offset registers during next immediate or
++ * automatic update request
++ */
++ REG_SET_SLICE(mrv_reg->mi_ctrl, MRV_MI_INIT_OFFSET_EN, ENABLE);
++ REG_SET_SLICE(mrv_reg->mi_ctrl, MRV_MI_INIT_BASE_EN, ENABLE);
++
++ switch (update_time) {
++ case CI_ISP_CFG_UPDATE_FRAME_SYNC:
++ REG_SET_SLICE(mrv_reg->isp_ctrl, MRV_ISP_ISP_GEN_CFG_UPD,
++ ON);
++ break;
++ case CI_ISP_CFG_UPDATE_IMMEDIATE:
++ REG_SET_SLICE(mrv_reg->mi_init, MRV_MI_MI_CFG_UPD, ON);
++ break;
++ case CI_ISP_CFG_UPDATE_LATER:
++ break;
++ default:
++ break;
++ }
++
++ return error;
++}
++
++/*
++ * Configures the DMA path of the MI.
++ *
++ */
++int ci_isp_mif_set_path_and_orientation(const struct ci_isp_mi_ctrl
++ *mrv_mi_ctrl)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++ int error = CI_STATUS_OUTOFRANGE;
++ u32 mi_ctrl = 0;
++
++ if (!mrv_mi_ctrl) {
++ eprintk("mrv_mi_ctrl is NULL");
++ return CI_STATUS_NULL_POINTER;
++ }
++
++ if ((mrv_mi_ctrl->irq_offs_init &
++ ~(MRV_MI_MP_Y_IRQ_OFFS_INIT_VALID_MASK)) != 0) {
++ eprintk("bad mrv_mi_ctrl->irq_offs_init value");
++ return error;
++ }
++
++ REG_SET_SLICE(mrv_reg->mi_mp_y_irq_offs_init,
++ MRV_MI_MP_Y_IRQ_OFFS_INIT, mrv_mi_ctrl->irq_offs_init);
++
++ /* main picture path */
++ switch (mrv_mi_ctrl->main_path) {
++ case CI_ISP_PATH_OFF:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_MP_ENABLE, OFF);
++ break;
++ case CI_ISP_PATH_ON:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_MP_ENABLE, ON);
++ break;
++ case CI_ISP_PATH_JPE:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_JPEG_ENABLE, ON);
++ break;
++ case CI_ISP_PATH_RAW8:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_RAW_ENABLE, ON);
++ break;
++ case CI_ISP_PATH_RAW816:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_RAW_ENABLE, ON);
++ REG_SET_SLICE(mi_ctrl, MRV_MI_MP_WRITE_FORMAT,
++ MRV_MI_MP_WRITE_FORMAT_INTERLEAVED);
++ break;
++ default:
++ eprintk("bad mrv_mi_ctrl->main_path value");
++ return error;
++ }
++
++ /* self picture path output format */
++ switch (mrv_mi_ctrl->mrv_mif_sp_out_form) {
++ case CI_ISP_MIF_COL_FORMAT_YCBCR_422:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_SP_OUTPUT_FORMAT,
++ MRV_MI_SP_OUTPUT_FORMAT_YUV422);
++ break;
++ case CI_ISP_MIF_COL_FORMAT_YCBCR_444:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_SP_OUTPUT_FORMAT,
++ MRV_MI_SP_OUTPUT_FORMAT_YUV444);
++ break;
++ case CI_ISP_MIF_COL_FORMAT_YCBCR_420:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_SP_OUTPUT_FORMAT,
++ MRV_MI_SP_OUTPUT_FORMAT_YUV420);
++ break;
++ case CI_ISP_MIF_COL_FORMAT_YCBCR_400:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_SP_OUTPUT_FORMAT,
++ MRV_MI_SP_OUTPUT_FORMAT_YUV400);
++ break;
++ case CI_ISP_MIF_COL_FORMAT_RGB_565:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_SP_OUTPUT_FORMAT,
++ MRV_MI_SP_OUTPUT_FORMAT_RGB565);
++ break;
++ case CI_ISP_MIF_COL_FORMAT_RGB_888:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_SP_OUTPUT_FORMAT,
++ MRV_MI_SP_OUTPUT_FORMAT_RGB888);
++ break;
++ case CI_ISP_MIF_COL_FORMAT_RGB_666:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_SP_OUTPUT_FORMAT,
++ MRV_MI_SP_OUTPUT_FORMAT_RGB666);
++ break;
++
++ default:
++ eprintk("bad mrv_mi_ctrl->mrv_mif_sp_out_form value");
++ return error;
++ }
++
++ /* self picture path input format */
++ switch (mrv_mi_ctrl->mrv_mif_sp_in_form) {
++ case CI_ISP_MIF_COL_FORMAT_YCBCR_422:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_SP_INPUT_FORMAT,
++ MRV_MI_SP_INPUT_FORMAT_YUV422);
++ break;
++ case CI_ISP_MIF_COL_FORMAT_YCBCR_444:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_SP_INPUT_FORMAT,
++ MRV_MI_SP_INPUT_FORMAT_YUV444);
++ break;
++ case CI_ISP_MIF_COL_FORMAT_YCBCR_420:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_SP_INPUT_FORMAT,
++ MRV_MI_SP_INPUT_FORMAT_YUV420);
++ break;
++ case CI_ISP_MIF_COL_FORMAT_YCBCR_400:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_SP_INPUT_FORMAT,
++ MRV_MI_SP_INPUT_FORMAT_YUV400);
++ break;
++ case CI_ISP_MIF_COL_FORMAT_RGB_565:
++ case CI_ISP_MIF_COL_FORMAT_RGB_666:
++ case CI_ISP_MIF_COL_FORMAT_RGB_888:
++ default:
++ eprintk("bad mrv_mi_ctrl->mrv_mif_sp_in_form value");
++ return error;
++ }
++
++ error = CI_STATUS_SUCCESS;
++
++ /* self picture path write format */
++ switch (mrv_mi_ctrl->mrv_mif_sp_pic_form) {
++ case CI_ISP_MIF_PIC_FORM_PLANAR:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_SP_WRITE_FORMAT,
++ MRV_MI_SP_WRITE_FORMAT_PLANAR);
++ break;
++ case CI_ISP_MIF_PIC_FORM_SEMI_PLANAR:
++ if ((mrv_mi_ctrl->mrv_mif_sp_out_form ==
++ CI_ISP_MIF_COL_FORMAT_YCBCR_422)
++ || (mrv_mi_ctrl->mrv_mif_sp_out_form ==
++ CI_ISP_MIF_COL_FORMAT_YCBCR_420)) {
++ REG_SET_SLICE(mi_ctrl, MRV_MI_SP_WRITE_FORMAT,
++ MRV_MI_SP_WRITE_FORMAT_SEMIPLANAR);
++ } else {
++ error = CI_STATUS_NOTSUPP;
++ }
++ break;
++ case CI_ISP_MIF_PIC_FORM_INTERLEAVED:
++ if (mrv_mi_ctrl->mrv_mif_sp_out_form ==
++ CI_ISP_MIF_COL_FORMAT_YCBCR_422) {
++ REG_SET_SLICE(mi_ctrl, MRV_MI_SP_WRITE_FORMAT,
++ MRV_MI_SP_WRITE_FORMAT_INTERLEAVED);
++ } else {
++ error = CI_STATUS_NOTSUPP;
++ }
++ break;
++ default:
++ error = CI_STATUS_OUTOFRANGE;
++ break;
++
++ }
++
++ if (error != CI_STATUS_SUCCESS) {
++ eprintk("bad mrv_mi_ctrl->mrv_mif_sp_pic_form value");
++ return error;
++ }
++
++ if (mrv_mi_ctrl->main_path == CI_ISP_PATH_ON) {
++ /* for YCbCr mode only, permitted for raw mode */
++ /* main picture path write format */
++ switch (mrv_mi_ctrl->mrv_mif_mp_pic_form) {
++ case CI_ISP_MIF_PIC_FORM_PLANAR:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_MP_WRITE_FORMAT,
++ MRV_MI_MP_WRITE_FORMAT_PLANAR);
++ break;
++ case CI_ISP_MIF_PIC_FORM_SEMI_PLANAR:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_MP_WRITE_FORMAT,
++ MRV_MI_MP_WRITE_FORMAT_SEMIPLANAR);
++ break;
++ case CI_ISP_MIF_PIC_FORM_INTERLEAVED:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_MP_WRITE_FORMAT,
++ MRV_MI_MP_WRITE_FORMAT_INTERLEAVED);
++ break;
++ default:
++ error = CI_STATUS_OUTOFRANGE;
++ break;
++ }
++ }
++
++ if (error != CI_STATUS_SUCCESS) {
++ eprintk("bad mrv_mi_ctrl->mrv_mif_mp_pic_form value");
++ return error;
++ }
++
++ /* burst length for chrominance for write port */
++ /* setting burst mode to 16 bits
++ switch (mrv_mi_ctrl->burst_length_chrom) {
++ case CI_ISP_MIF_BURST_LENGTH_4:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_BURST_LEN_CHROM,
++ MRV_MI_BURST_LEN_CHROM_4);
++ break;
++ case CI_ISP_MIF_BURST_LENGTH_8:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_BURST_LEN_CHROM,
++ MRV_MI_BURST_LEN_CHROM_8);
++ break;
++ case CI_ISP_MIF_BURST_LENGTH_16:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_BURST_LEN_CHROM,
++ MRV_MI_BURST_LEN_CHROM_16);
++ break;
++ default:
++ error = CI_STATUS_OUTOFRANGE;
++ break;
++ }
++ */
++ REG_SET_SLICE(mi_ctrl, MRV_MI_BURST_LEN_CHROM,
++ MRV_MI_BURST_LEN_CHROM_16);
++
++ if (error != CI_STATUS_SUCCESS) {
++ eprintk("bad mrv_mi_ctrl->burst_length_chrom value");
++ return error;
++ }
++
++ /* burst length for luminance for write port */
++ /* setting burst mode to 16 bits
++ switch (mrv_mi_ctrl->burst_length_lum) {
++ case CI_ISP_MIF_BURST_LENGTH_4:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_BURST_LEN_LUM,
++ MRV_MI_BURST_LEN_LUM_4);
++ break;
++ case CI_ISP_MIF_BURST_LENGTH_8:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_BURST_LEN_LUM,
++ MRV_MI_BURST_LEN_LUM_8);
++ break;
++ case CI_ISP_MIF_BURST_LENGTH_16:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_BURST_LEN_LUM,
++ MRV_MI_BURST_LEN_LUM_16);
++ break;
++ default:
++ error = CI_STATUS_OUTOFRANGE;
++ break;
++ }
++ */
++ REG_SET_SLICE(mi_ctrl, MRV_MI_BURST_LEN_LUM,
++ MRV_MI_BURST_LEN_LUM_16);
++
++ if (error != CI_STATUS_SUCCESS) {
++ eprintk("bad mrv_mi_ctrl->burst_length_lum value");
++ return error;
++ }
++
++ /* enable updating of the shadow registers for main and self picture
++ * to their init values
++ */
++ switch (mrv_mi_ctrl->init_vals) {
++ case CI_ISP_MIF_NO_INIT_VALS:
++ break;
++ case CI_ISP_MIF_INIT_OFFS:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_INIT_OFFSET_EN, ENABLE);
++ break;
++ case CI_ISP_MIF_INIT_BASE:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_INIT_BASE_EN, ENABLE);
++ break;
++ case CI_ISP_MIF_INIT_OFFSAndBase:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_INIT_OFFSET_EN, ENABLE);
++ REG_SET_SLICE(mi_ctrl, MRV_MI_INIT_BASE_EN, ENABLE);
++ break;
++ default:
++ error = CI_STATUS_OUTOFRANGE;
++ break;
++ }
++
++ if (error != CI_STATUS_SUCCESS) {
++ eprintk("bad mrv_mi_ctrl->init_vals value");
++ return error;
++ }
++
++ /* enable change of byte order for write port */
++ REG_SET_SLICE(mi_ctrl, MRV_MI_BYTE_SWAP,
++ (mrv_mi_ctrl->byte_swap_enable) ? ON : OFF);
++
++ /* enable or disable the last pixel signalization */
++ REG_SET_SLICE(mi_ctrl, MRV_MI_LAST_PIXEL_SIG_EN,
++ (mrv_mi_ctrl->last_pixel_enable) ? ON : OFF);
++
++ /* now write settings into register */
++ REG_WRITE(mrv_reg->mi_ctrl, mi_ctrl);
++
++ dprintk(2, "mi_ctrl = 0x%x", mi_ctrl);
++
++ /* self picture path operating mode */
++ if ((mrv_mi_ctrl->self_path == CI_ISP_PATH_ON) ||
++ (mrv_mi_ctrl->self_path == CI_ISP_PATH_OFF)) {
++
++ /* do not call if not supported */
++
++ /* support has been restricted to >= MI_V2 && <= MI_V3 in
++ * ci_isp_mif_set_self_pic_orientation, so we do the same here
++ */
++
++ error = ci_isp_mif_set_self_pic_orientation(
++ mrv_mi_ctrl->mrv_mif_sp_mode,
++ (int) (mrv_mi_ctrl->self_path
++ == CI_ISP_PATH_ON));
++ } else {
++ eprintk("bad mrv_mi_ctrl->self_path value");
++ error = CI_STATUS_OUTOFRANGE;
++ }
++
++ REG_SET_SLICE(mrv_reg->mi_init, MRV_MI_MI_CFG_UPD, ON);
++
++ return error;
++}
+--- /dev/null
++++ b/drivers/staging/mrstci/mrstmt9d113/Kconfig
+@@ -0,0 +1,9 @@
++config VIDEO_MRST_MT9D113
++ tristate "Moorestown MT9D113 (Aptina SOC2030) SoC Sensor"
++ depends on I2C && VIDEO_MRST_ISP
++
++ ---help---
++ Say Y here if your platform support MT9D113 SoC Sensor.
++
++ To compile this driver as a module, choose M here: the
++ module will be called mrstmt9d113.ko.
+--- /dev/null
++++ b/drivers/staging/mrstci/mrstmt9d113/Makefile
+@@ -0,0 +1,3 @@
++obj-$(CONFIG_VIDEO_MRST_MT9D113) += mrstmt9d113.o
++
++EXTRA_CFLAGS += -I$(src)/../include
+--- /dev/null
++++ b/drivers/staging/mrstci/mrstmt9d113/mrstmt9d113.c
+@@ -0,0 +1,1188 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Low level driver for Aptina SOC2030 sensor
++ * Based on ../mrstov2650/mrstov2650
++
++ * Copyright (c) 2010 Aava Mobile Oy.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * still needed
++ * - register settings, when available
++ * - if hw standby used...number of channges needed, also to ISP driver
++ */
++
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/string.h>
++#include <linux/errno.h>
++#include <linux/init.h>
++#include <linux/kmod.h>
++#include <linux/device.h>
++#include <linux/delay.h>
++#include <linux/fs.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/delay.h>
++#include <linux/i2c.h>
++#include <linux/gpio.h>
++#include <linux/videodev2.h>
++#include <media/v4l2-device.h>
++#include <media/v4l2-chip-ident.h>
++#include <media/v4l2-i2c-drv.h>
++
++#include "ci_sensor_common.h"
++#include "mrstmt9d113.h"
++
++
++static int mrstmt9d113_debug;
++module_param(mrstmt9d113_debug, int, 0644);
++MODULE_PARM_DESC(mrstmt9d113_debug, "Debug level (0-1)");
++
++#define dprintk(level, fmt, arg...) do { \
++ if (mrstmt9d113_debug >= level) \
++ printk(KERN_DEBUG "MT9D113:%s: " fmt "\n", __func__, ## arg); } \
++ while (0)
++
++#define eprintk(fmt, arg...) \
++ printk(KERN_ERR "!!!MT9D113: %s: line %d: " fmt "\n", \
++ __func__, __LINE__, ## arg);
++
++#define DBG_entering dprintk(2, "entering");
++#define DBG_leaving dprintk(2, "leaving");
++#define DBG_line dprintk(2, " line: %d", __LINE__);
++
++static int streaming_is_active;
++static int active_context;
++
++static inline struct ci_sensor_config *to_sensor_config(struct v4l2_subdev *sd)
++{
++ return container_of(sd, struct ci_sensor_config, sd);
++}
++
++static struct mt9d113_format_struct {
++ __u8 *desc;
++ __u32 pixelformat;
++ struct regval_list *regs;
++} mt9d113_formats[] = {
++ {
++ .desc = "YUYV 4:2:2",
++ .pixelformat = SENSOR_MODE_BT601,
++ .regs = NULL,
++ },
++};
++#define N_MT9D113_FMTS ARRAY_SIZE(mt9d113_formats)
++
++static struct mt9d113_res_struct {
++ __u8 *desc;
++ int res;
++ int width;
++ int height;
++ int fps;
++ bool used;
++ struct regval_list *regs;
++} mt9d113_res[] = {
++ {
++ .desc = "UXGA",
++ .res = SENSOR_RES_UXGA,
++ .width = 1600,
++ .height = 1200,
++ .fps = 15,
++ .used = 0,
++ .regs = mt9d113_res_uxga,
++ },
++ {
++ .desc = "SXGA",
++ .res = SENSOR_RES_SXGA,
++ .width = 1280,
++ .height = 1024,
++ .fps = 15,
++ .used = 0,
++ .regs = mt9d113_res_sxga,
++ },
++ {
++ .desc = "SVGA",
++ .res = SENSOR_RES_SVGA,
++ .width = 800,
++ .height = 600,
++ .fps = 15,
++ .used = 0,
++ .regs = mt9d113_res_svga,
++ },
++ {
++ .desc = "VGA",
++ .res = SENSOR_RES_VGA,
++ .width = 640,
++ .height = 480,
++ .fps = 15,
++ .used = 0,
++ .regs = mt9d113_res_vga_vario,
++ },
++ {
++ .desc = "QVGA",
++ .res = SENSOR_RES_QVGA,
++ .width = 320,
++ .height = 240,
++ .fps = 15,
++ .used = 0,
++ .regs = mt9d113_res_qvga,
++ },
++};
++#define N_RES (ARRAY_SIZE(mt9d113_res))
++
++static int mt9d113_i2c_read(struct i2c_client *client, u16 reg, u16 *value)
++{
++ struct i2c_msg msg[2];
++ int ret;
++ u8 msgbuf[2];
++ u8 ret_val[2];
++
++ memset(&msg, 0, sizeof(msg));
++
++ msgbuf[0] = (u8)(reg >> 8);
++ msgbuf[1] = (u8)(reg & 0xff);
++ msg[0].addr = client->addr;
++ msg[0].buf = msgbuf;
++ msg[0].len = 2;
++
++ msg[1].addr = client->addr;
++ msg[1].flags = I2C_M_RD;
++ msg[1].buf = ret_val;
++ msg[1].len = 2;
++
++ ret = i2c_transfer(client->adapter, &msg[0], 2);
++ ret = (ret == 2) ? 0 : -1;
++ *value = (ret_val[0] << 8 | ret_val[1]);
++
++
++ return ret;
++}
++
++static int mt9d113_i2c_write(struct i2c_client *client, u16 reg, u16 value)
++{
++ int ret, i;
++ struct i2c_msg msg;
++ u8 msgbuf[4];
++
++ memset(&msg, 0, sizeof(msg));
++ i = 0;
++ msgbuf[i++] = (u8)(reg >> 8);
++ msgbuf[i++] = (u8)(reg & 0xff);
++ msgbuf[i++] = (u8)(value >> 8);
++ msgbuf[i++] = (u8)(value & 0xff);
++
++ msg.addr = client->addr;
++ msg.flags = 0;
++ msg.buf = msgbuf;
++ msg.len = i;
++
++ ret = i2c_transfer(client->adapter, &msg, 1);
++ ret = (ret == 1) ? 0 : -1;
++
++
++ return ret;
++}
++
++static int mt9d113_read(int dest, struct i2c_client *client, u16 reg_or_var, u16 *value)
++{
++ int ret;
++
++ if (dest == DEST_REG) {
++ ret = mt9d113_i2c_read(client, reg_or_var, value);
++ } else if (dest == DEST_VAR) {
++ ret = mt9d113_i2c_write(client, MT9D113_MCU_VARIABLE_ADDRESS_REG, reg_or_var);
++ ret |= mt9d113_i2c_read(client, MT9D113_MCU_VARIABLE_DATA_REG, value);
++ } else {
++ printk(KERN_ERR "mt9d113_read: Error, unknown destination\n");
++ ret = -1;
++ }
++
++ return ret;
++
++}
++static int mt9d113_write(int dest, struct i2c_client *client, u16 reg_or_var, u16 value)
++{
++ int ret;
++
++ if (dest == DEST_REG) {
++ ret = mt9d113_i2c_write(client, reg_or_var, value);
++ } else if (dest == DEST_VAR) {
++ ret = mt9d113_i2c_write(client, MT9D113_MCU_VARIABLE_ADDRESS_REG, reg_or_var);
++ ret |= mt9d113_i2c_write(client, MT9D113_MCU_VARIABLE_DATA_REG, value);
++ } else {
++ printk(KERN_ERR "mt9d113_write: Error, unknown destination\n");
++ ret = -1;
++ }
++
++ return ret;
++}
++
++static int mt9d113_write_array(struct i2c_client *client, struct regval_list *vals)
++{
++ struct regval_list *p = vals;
++ int ret;
++
++ while (p->dest != DEST_NON) {
++ ret = mt9d113_write(p->dest, client, p->reg_or_var, p->value);
++ p++;
++ if (ret != 0)
++ return ret;
++ }
++
++ return ret;
++}
++
++static int mt9d113_enable_parallel_if(struct i2c_client *client)
++{
++ int ret;
++ u16 reg_value;
++
++ ret = mt9d113_read(DEST_REG, client, 0x1a, &reg_value);
++ reg_value |= BIT(9);
++ ret |= mt9d113_write(DEST_REG, client, 0x1a, reg_value);
++
++ return ret;
++}
++
++static int mt9d113_disable_parallel_if(struct i2c_client *client)
++{
++ int ret;
++ u16 reg_value;
++
++ ret = mt9d113_read(DEST_REG, client, 0x1a, &reg_value);
++ reg_value &= ~BIT(9);
++ ret |= mt9d113_write(DEST_REG, client, 0x1a, reg_value);
++
++ return ret;
++}
++
++static int mt9d113_hw_standby(void)
++{
++ gpio_set_value(GPIO_STDBY_PIN, 1);
++ return 0;
++}
++
++static int mt9d113_hw_wakeup(void)
++{
++ gpio_set_value(GPIO_STDBY_PIN, 0);
++ return 0;
++}
++
++static int mt9d113_soft_standby_ctrl(struct i2c_client *client, int sleep_or_wakeup)
++{
++ u16 reg_val;
++ int ret, timeout = 0;
++
++ ret = mt9d113_read(DEST_REG, client, 0x18, &reg_val);
++ dprintk(1, "JW: REG[DEST_REG]=0x%04x\n", reg_val);
++ if (sleep_or_wakeup == SENSOR_SLEEP) {
++
++ reg_val |= BIT(0);
++ ret |= mt9d113_write(DEST_REG, client, 0x18, reg_val);
++
++ do {
++ msleep(SLEEP_TIME_IN_LOOP);
++ ret |= mt9d113_read(DEST_REG, client, 0x18, &reg_val);
++ } while ((!(reg_val & BIT(14))) && (++timeout < TIMEOUT_LOOP_VALUE));
++ } else {
++
++ reg_val &= ~BIT(0);
++ ret |= mt9d113_write(DEST_REG, client, 0x18, reg_val);
++
++ do {
++ msleep(SLEEP_TIME_IN_LOOP);
++ ret |= mt9d113_read(DEST_REG, client, 0x18, &reg_val);
++ } while ((reg_val & BIT(14)) && (++timeout < TIMEOUT_LOOP_VALUE));
++ }
++
++ if (timeout >= TIMEOUT_LOOP_VALUE) {
++ dprintk(1, "JW: sensor soft reset fail,REG[DEST_REG]=0x%04x\n", reg_val);
++ ret = -1;
++ }
++
++ return ret;
++}
++
++static int mt9d113_reset(struct i2c_client *client)
++{
++ u16 reg_val;
++ int ret;
++
++ ret = mt9d113_write(DEST_REG, client, 0x1A, 0x3);
++ msleep(10);
++ ret |= mt9d113_read(DEST_REG, client, 0x1A, &reg_val);
++ reg_val &= ~BIT(0);
++ ret |= mt9d113_write(DEST_REG, client, 0x1A, reg_val);
++ msleep(10);
++
++ return ret;
++}
++
++static int mt9d113_write_refresh_cmd(struct i2c_client *client)
++{
++ u16 reg_val;
++ int timeout, ret;
++
++ ret = mt9d113_write(DEST_VAR, client, SET_ID_VAR8(1, 3), 0x5);
++
++ timeout = 0;
++ do {
++ msleep(SLEEP_TIME_IN_LOOP);
++ ret |= mt9d113_read(DEST_VAR, client, SET_ID_VAR8(1, 3), &reg_val);
++ } while ((reg_val != 0) && (++timeout < TIMEOUT_LOOP_VALUE));
++
++ if (timeout >= TIMEOUT_LOOP_VALUE)
++ ret = -1;
++
++ return ret;
++}
++
++static int mt9d113_write_refresh_mode_cmd(struct i2c_client *client)
++{
++ u16 reg_val;
++ int timeout, ret;
++
++ ret = mt9d113_write(DEST_VAR, client, SET_ID_VAR8(1, 3), 0x6);
++
++ timeout = 0;
++ do {
++ msleep(SLEEP_TIME_IN_LOOP);
++ ret |= mt9d113_read(DEST_VAR, client, SET_ID_VAR8(1, 3), &reg_val);
++ } while ((reg_val != 0) && (++timeout < TIMEOUT_LOOP_VALUE));
++
++ if (timeout >= TIMEOUT_LOOP_VALUE)
++ ret = -1;
++
++ return ret;
++}
++
++static int mt9d113_write_refresh(struct i2c_client *client)
++{
++ int ret;
++
++ ret = mt9d113_write_refresh_mode_cmd(client);
++ ret |= mt9d113_write_refresh_cmd(client);
++
++ return ret;
++}
++
++static int mt9d113_set_active_context(struct i2c_client *client, int context)
++{
++ u16 reg_val;
++ int timeout, ret;
++
++ /*Context A*/
++ if (context == CONTEXT_A) {
++ ret = mt9d113_write(DEST_VAR, client, SET_ID_VAR8(1, 3), 0x1);
++
++ timeout = 0;
++ do {
++ msleep(SLEEP_TIME_IN_LOOP);
++ ret |= mt9d113_read(DEST_VAR, client, SET_ID_VAR8(1, 3), &reg_val);
++ } while ((reg_val != 0) && (++timeout < TIMEOUT_LOOP_VALUE));
++
++ timeout = 0;
++ do {
++ msleep(SLEEP_TIME_IN_LOOP);
++ ret |= mt9d113_read(DEST_VAR, client, SET_ID_VAR8(1, 4), &reg_val);
++ } while ((reg_val != 3) && (++timeout < TIMEOUT_LOOP_VALUE));
++
++ } else { /*Context B*/
++ ret = mt9d113_write(DEST_VAR, client, SET_ID_VAR8(1, 3), 0x2);
++
++ timeout = 0;
++ do {
++ msleep(SLEEP_TIME_IN_LOOP);
++ ret |= mt9d113_read(DEST_VAR, client, SET_ID_VAR8(1, 3), &reg_val);
++ } while ((reg_val != 0) && (++timeout < TIMEOUT_LOOP_VALUE));
++
++ timeout = 0;
++ do {
++ msleep(SLEEP_TIME_IN_LOOP);
++ ret |= mt9d113_read(DEST_VAR, client, SET_ID_VAR8(1, 4), &reg_val);
++ } while ((reg_val != 7) && (++timeout < TIMEOUT_LOOP_VALUE));
++ }
++
++ return ret;
++}
++
++static int mt9d113_load_errata(struct i2c_client *c)
++{
++ int ret;
++
++ ret = mt9d113_write_array(c, errata_issue02);
++
++ return ret;
++}
++
++static int mt9d113_init(struct i2c_client *client)
++{
++ int ret = 0;
++ u16 reg_value, timeout;
++ struct v4l2_subdev *sd = i2c_get_clientdata(client);
++ struct ci_sensor_config *info = to_sensor_config(sd);
++
++ /* Fill the configuration structure */
++ /* Note this default configuration value */
++ info->mode = mt9d113_formats[0].pixelformat;
++ info->res = mt9d113_res[0].res;
++ info->type = SENSOR_TYPE_SOC;
++ info->bls = SENSOR_BLS_OFF;
++ info->gamma = SENSOR_GAMMA_ON;
++ info->cconv = SENSOR_CCONV_ON;
++ info->blc = SENSOR_BLC_AUTO;
++ info->agc = SENSOR_AGC_AUTO;
++ info->awb = SENSOR_AWB_AUTO;
++ info->aec = SENSOR_AEC_AUTO;
++
++ info->bus_width = SENSOR_BUSWIDTH_8BIT_ZZ;
++ info->ycseq = SENSOR_YCSEQ_YCBYCR;
++ info->conv422 = SENSOR_CONV422_COSITED;
++ info->bpat = SENSOR_BPAT_RGRGGBGB;
++ info->field_inv = SENSOR_FIELDINV_NOSWAP;
++ info->field_sel = SENSOR_FIELDSEL_BOTH;
++ info->hpol = SENSOR_HPOL_REFPOS;
++ info->vpol = SENSOR_VPOL_NEG;
++ info->edge = SENSOR_EDGE_RISING;
++ info->flicker_freq = SENSOR_FLICKER_100;
++ info->cie_profile = 0;
++ memcpy(info->name, "mt9d113", 8);
++
++ /* Set all regs to default values */
++ mt9d113_reset(client);
++
++ ret |= mt9d113_write(DEST_REG, client, 0x16, 0x200); /* EXTCLK enable*/
++ ret |= mt9d113_write(DEST_REG, client, 0x1e, 0x404); /* PAD_SLEW*/
++ ret |= mt9d113_write(DEST_REG, client, 0x22, 0x1f4); /* Internal standby delay for 25Mhz*/
++
++ ret = mt9d113_write(DEST_REG, client, 0x14, 0x21f9); /* PLL control: BYPASS PLL*/
++/*
++ * ov2650 uses belows
++ * 10 -> a72
++ * 12 -> 1ffd
++ *
++ ret |= mt9d113_write(DEST_REG, client, 0x10, (10<<8) | (114)); // PLL n, m Dividers
++ ret |= mt9d113_write(DEST_REG, client, 0x12, (0<<12) | (15<<8) | (13)); // PLL wcd, p3, p1 Dividers
++*/
++ ret |= mt9d113_write(DEST_REG, client, 0x10, 0xa72); /* PLL n, m Dividers*/
++ ret |= mt9d113_write(DEST_REG, client, 0x12, 0x1ffd); /* PLL wcd, p3, p1 Dividers*/
++/* ret |= mt9d113_write(DEST_REG, client, 0x10, 0x444);*/ /* PLL n, m Dividers*/
++/* ret |= mt9d113_write(DEST_REG, client, 0x12, 0x1ff7);*/ /* PLL wcd, p3, p1 Dividers*/
++
++
++ ret |= mt9d113_write(DEST_REG, client, 0x14, 0x21fb); /* PLL control: PLL_ENABLE on*/
++ ret |= mt9d113_write(DEST_REG, client, 0x14, 0x20fb); /* PLL control: SEL_LOCK_DET on*/
++
++ timeout = 0;
++ do {
++ ret |= mt9d113_read(DEST_REG, client, 0x14, &reg_value);
++ msleep(SLEEP_TIME_IN_LOOP);
++ } while ((!(reg_value & 0x8000)) && (++timeout < TIMEOUT_LOOP_VALUE));
++
++ if (timeout >= TIMEOUT_LOOP_VALUE)
++ return -1;
++
++ /*Set MCU Powerup Stop*/
++ ret |= mt9d113_read(DEST_REG, client, 0x18, &reg_value);
++ reg_value |= BIT(2);
++ ret |= mt9d113_write(DEST_REG, client, 0x18, reg_value);
++
++ ret |= mt9d113_write(DEST_REG, client, 0x28, 0x0); /* The registers and state variables are retained when STANDBY is asserted*/
++ ret |= mt9d113_write(DEST_REG, client, 0x14, 0x20fa); /* PLL control: PLL_BYPASS off*/
++
++ /*Start MCU sequencer*/
++ ret |= mt9d113_soft_standby_ctrl(client, SENSOR_WAKEUP);
++
++ /*Disable i/f*/
++ mt9d113_disable_parallel_if(client);
++
++ /*set default output size*/
++ ret |= mt9d113_write(DEST_VAR, client, SET_ID_VAR(7, 0x3), 640);
++ ret |= mt9d113_write(DEST_VAR, client, SET_ID_VAR(7, 0x5), 480);
++ active_context = CONTEXT_A;
++
++ /*swap clock and chrominance luma*/
++ ret |= mt9d113_write(DEST_VAR, client, SET_ID_VAR(7, 0x55), 0x202);
++ ret |= mt9d113_write(DEST_VAR, client, SET_ID_VAR(7, 0x57), 0x202);
++
++ /*Set vert flip*/
++ ret |= mt9d113_read(DEST_VAR, client, SET_ID_VAR(7, 0x17), &reg_value);
++ reg_value |= BIT(1);
++ ret |= mt9d113_write(DEST_VAR, client, SET_ID_VAR(7, 0x17), reg_value);
++
++ ret |= mt9d113_read(DEST_VAR, client, SET_ID_VAR(7, 0x2d), &reg_value);
++ reg_value |= BIT(1);
++ ret |= mt9d113_write(DEST_VAR, client, SET_ID_VAR(7, 0x2d), reg_value);
++
++ /* set video mode for context B*/
++ ret |= mt9d113_write(DEST_VAR, client, SET_ID_VAR8(1, 0x15), 0x2);
++
++ mt9d113_load_errata(client);
++
++ /*release MCU Powerup Stop*/
++ ret |= mt9d113_read(DEST_REG, client, 0x18, &reg_value);
++ reg_value &= ~BIT(2);
++ ret |= mt9d113_write(DEST_REG, client, 0x18, reg_value);
++
++ /*wait that sensor enters to preview state*/
++ timeout = 0;
++ do {
++ msleep(SLEEP_TIME_IN_LOOP);
++ ret |= mt9d113_read(DEST_VAR, client, SET_ID_VAR8(1, 0x4), &reg_value);
++ } while (((reg_value & 0x3) != 3) && (++timeout < TIMEOUT_LOOP_VALUE));
++
++ /* syncronize the FW with the sensor*/
++ ret |= mt9d113_write_refresh(client);
++
++ /* Make sure that parallel i/f is disabled and set sensor to standby*/
++ mt9d113_disable_parallel_if(client);
++ ret |= mt9d113_soft_standby_ctrl(client, SENSOR_SLEEP);
++
++ return ret;
++}
++
++static int distance(struct mt9d113_res_struct *res, u32 w, u32 h)
++{
++ int ret;
++ if (res->width < w || res->height < h)
++ return -1;
++
++ ret = ((res->width - w) + (res->height - h));
++ return ret;
++}
++
++static int mt9d113_try_res(u32 *w, u32 *h)
++{
++ struct mt9d113_res_struct *res_index, *p = NULL;
++ int dis, last_dis = mt9d113_res->width + mt9d113_res->height;
++
++ dprintk(1, "&&&&& before %dx%d", *w, *h);
++ for (res_index = mt9d113_res;
++ res_index < mt9d113_res + N_RES;
++ res_index++) {
++ if ((res_index->width <= *w) && (res_index->height <= *h))
++ break;
++ dis = distance(res_index, *w, *h);
++ if (dis < last_dis) {
++ last_dis = dis;
++ p = res_index;
++ }
++ }
++
++ /*
++ if (p == NULL) {
++ p = mt9d113_res;
++ }
++
++ if ((w != NULL) && (h != NULL)) {
++ *w = p->width;
++ *h = p->height;
++ }
++ */
++ if (res_index == mt9d113_res + N_RES)
++ res_index = mt9d113_res + N_RES - 1;
++
++ *w = res_index->width;
++ *h = res_index->height;
++
++ dprintk(1, "&&&&& after %dx%d", *w, *h);
++ return 0;
++}
++
++static struct mt9d113_res_struct *mt9d113_to_res(u32 w, u32 h)
++{
++ struct mt9d113_res_struct *res_index;
++
++ for (res_index = mt9d113_res;
++ res_index < mt9d113_res + N_RES;
++ res_index++)
++ if ((res_index->width == w) && (res_index->height == h))
++ break;
++
++ if (res_index >= mt9d113_res + N_RES)
++ res_index--; /* Take the bigger one */
++
++ return res_index;
++}
++
++static int mt9d113_try_fmt(struct v4l2_subdev *sd,
++ struct v4l2_format *fmt)
++{
++ DBG_entering;
++ dprintk(1, "&&&&& before %dx%d\n", fmt->fmt.pix.width,
++ fmt->fmt.pix.height);
++
++ return mt9d113_try_res(&fmt->fmt.pix.width, &fmt->fmt.pix.height);
++
++ dprintk(1, "&&&&& after %dx%d", fmt->fmt.pix.width,
++ fmt->fmt.pix.height);
++ DBG_leaving;
++
++ return 0;
++}
++
++static int mt9d113_get_fmt(struct v4l2_subdev *sd,
++ struct v4l2_format *fmt)
++{
++ struct ci_sensor_config *info = to_sensor_config(sd);
++ unsigned short width, height;
++ int index;
++
++ ci_sensor_res2size(info->res, &width, &height);
++ /* Marked the current sensor res as being "used" */
++ for (index = 0; index < N_RES; index++) {
++ if ((width == mt9d113_res[index].width) &&
++ (height == mt9d113_res[index].height)) {
++ mt9d113_res[index].used = 1;
++ continue;
++ }
++ mt9d113_res[index].used = 0;
++ }
++ fmt->fmt.pix.width = width;
++ fmt->fmt.pix.height = height;
++
++ return 0;
++}
++
++static int mt9d113_set_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++ struct ci_sensor_config *info = to_sensor_config(sd);
++ int ret = 0;
++ struct mt9d113_res_struct *res_index;
++ u32 width, height;
++ int index;
++
++ DBG_entering;
++
++ width = fmt->fmt.pix.width;
++ height = fmt->fmt.pix.height;
++
++ ret = mt9d113_try_res(&width, &height);
++ res_index = mt9d113_to_res(width, height);
++
++ if ((info->res != res_index->res) && (res_index->regs)) {
++
++ dprintk(2, "changing res from to %dx%d", width, height);
++
++ if (!streaming_is_active)
++ ret |= mt9d113_soft_standby_ctrl(client, SENSOR_WAKEUP);
++
++ if (width <= 800 && height <= 600) {
++ ret |= mt9d113_write(DEST_VAR, client, SET_ID_VAR(7, 3), width);
++ ret |= mt9d113_write(DEST_VAR, client, SET_ID_VAR(7, 5), height);
++ active_context = CONTEXT_A;
++ } else {
++ ret |= mt9d113_write(DEST_VAR, client, SET_ID_VAR(7, 7), width);
++ ret |= mt9d113_write(DEST_VAR, client, SET_ID_VAR(7, 9), height);
++ active_context = CONTEXT_B;
++ }
++ /* Marked current sensor res as being "used" */
++ for (index = 0; index < N_RES; index++) {
++ if ((width == mt9d113_res[index].width) &&
++ (height == mt9d113_res[index].height)) {
++ mt9d113_res[index].used = 1;
++ continue;
++ }
++ mt9d113_res[index].used = 0;
++ }
++ if (!streaming_is_active)
++ ret |= mt9d113_soft_standby_ctrl(client, SENSOR_SLEEP);
++ else
++ ret |= mt9d113_write_refresh(client);
++ }
++
++ info->res = res_index->res;
++
++ DBG_leaving;
++
++ return ret;
++}
++
++static int mt9d113_q_hflip(struct v4l2_subdev *sd, __s32 *value)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++ u16 val;
++ int ret;
++
++ if (!streaming_is_active)
++ ret = mt9d113_soft_standby_ctrl(client, SENSOR_WAKEUP);
++
++ ret = mt9d113_read(DEST_VAR, client, SET_ID_VAR(7, 0x17), &val);
++ *value = (val & BIT(0)) ? 1 : 0;
++
++ if (!streaming_is_active)
++ ret |= mt9d113_soft_standby_ctrl(client, SENSOR_SLEEP);
++
++ return ret;
++}
++
++static int mt9d113_t_hflip(struct v4l2_subdev *sd, int value)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++ u16 val;
++ int ret;
++
++ if (!streaming_is_active)
++ ret = mt9d113_soft_standby_ctrl(client, SENSOR_WAKEUP);
++
++ ret = mt9d113_read(DEST_VAR, client, SET_ID_VAR(7, 0x17), &val);
++ if (value)
++ val |= BIT(0);
++ else
++ val &= ~BIT(0);
++ ret |= mt9d113_write(DEST_VAR, client, SET_ID_VAR(7, 0x17), val);
++
++ ret = mt9d113_read(DEST_VAR, client, SET_ID_VAR(7, 0x2d), &val);
++ if (value)
++ val |= BIT(0);
++ else
++ val &= ~BIT(0);
++ ret |= mt9d113_write(DEST_VAR, client, SET_ID_VAR(7, 0x2d), val);
++
++ ret |= mt9d113_write_refresh(client);
++
++ if (!streaming_is_active)
++ ret |= mt9d113_soft_standby_ctrl(client, SENSOR_SLEEP);
++
++ return ret;
++}
++
++static int mt9d113_q_vflip(struct v4l2_subdev *sd, __s32 *value)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++ u16 val;
++ int ret;
++
++ if (!streaming_is_active)
++ ret = mt9d113_soft_standby_ctrl(client, SENSOR_WAKEUP);
++
++ ret = mt9d113_read(DEST_VAR, client, SET_ID_VAR(7, 0x17), &val);
++ *value = (val & BIT(1)) ? 1 : 0;
++
++ if (!streaming_is_active)
++ ret |= mt9d113_soft_standby_ctrl(client, SENSOR_SLEEP);
++
++ return ret;
++}
++
++static int mt9d113_t_vflip(struct v4l2_subdev *sd, int value)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++ u16 val;
++ int ret;
++
++ if (!streaming_is_active)
++ ret = mt9d113_soft_standby_ctrl(client, SENSOR_WAKEUP);
++
++ ret = mt9d113_read(DEST_VAR, client, SET_ID_VAR(7, 0x17), &val);
++ if (value)
++ val |= BIT(1);
++ else
++ val &= ~BIT(1);
++ ret |= mt9d113_write(DEST_VAR, client, SET_ID_VAR(7, 0x17), val);
++
++ ret = mt9d113_read(DEST_VAR, client, SET_ID_VAR(7, 0x2d), &val);
++ if (value)
++ val |= BIT(1);
++ else
++ val &= ~BIT(1);
++ ret |= mt9d113_write(DEST_VAR, client, SET_ID_VAR(7, 0x2d), val);
++
++ ret |= mt9d113_write_refresh(client);
++
++ if (!streaming_is_active)
++ ret |= mt9d113_soft_standby_ctrl(client, SENSOR_SLEEP);
++
++ return ret;
++}
++
++static struct mt9d113_control {
++ struct v4l2_queryctrl qc;
++ int (*query)(struct v4l2_subdev *sd, __s32 *value);
++ int (*tweak)(struct v4l2_subdev *sd, int value);
++} mt9d113_controls[] = {
++ {
++ .qc = {
++ .id = V4L2_CID_VFLIP,
++ .type = V4L2_CTRL_TYPE_BOOLEAN,
++ .name = "Vertical flip",
++ .minimum = 0,
++ .maximum = 1,
++ .step = 1,
++ .default_value = 0,
++ },
++ .tweak = mt9d113_t_vflip,
++ .query = mt9d113_q_vflip,
++ },
++ {
++ .qc = {
++ .id = V4L2_CID_HFLIP,
++ .type = V4L2_CTRL_TYPE_BOOLEAN,
++ .name = "Horizontal mirror",
++ .minimum = 0,
++ .maximum = 1,
++ .step = 1,
++ .default_value = 0,
++ },
++ .tweak = mt9d113_t_hflip,
++ .query = mt9d113_q_hflip,
++ },
++#if 0
++ {
++ .parm = {
++ .index = V4L2_CID_AUTO_WHITE_BALANCE,
++ .type = V4L2_CTRL_TYPE_BOOLEAN,
++ .name = "Auto White Balance",
++ .min = 0,
++ .max = 1,
++ .step = 1,
++ .def_value = 1,
++ },
++ .tweak = mt9d113_t_awb,
++ .query = mt9d113_q_awb,
++ },
++ {
++ .parm = {
++ .index = V4L2_CID_AUTOGAIN,
++ .type = V4L2_CTRL_TYPE_BOOLEAN,
++ .name = "Auto Gain Control",
++ .min = 0,
++ .max = 1,
++ .step = 1,
++ .def_value = 1,
++ },
++ .tweak = mt9d113_t_agc,
++ .query = mt9d113_q_agc,
++
++ },
++ {
++ .parm = {
++ .index = V4L2_CID_BLACK_LEVEL,
++ .type = V4L2_CTRL_TYPE_BOOLEAN,
++ .name = "Black Level Control",
++ .min = 0,
++ .max = 1,
++ .step = 1,
++ .def_value = 1,
++ },
++ .tweak = mt9d113_t_blc,
++ .query = mt9d113_q_blc,
++
++ },
++#endif
++};
++#define N_CONTROLS (ARRAY_SIZE(mt9d113_controls))
++
++static struct mt9d113_control *mt9d113_find_control(__u32 id)
++{
++ int i;
++
++ for (i = 0; i < N_CONTROLS; i++)
++ if (mt9d113_controls[i].qc.id == id)
++ return mt9d113_controls + i;
++
++ return NULL;
++}
++
++static int mt9d113_queryctrl(struct v4l2_subdev *sd,
++ struct v4l2_queryctrl *qc)
++{
++ struct mt9d113_control *octrl;
++
++ octrl = mt9d113_find_control(qc->id);
++ if (NULL == octrl)
++ return -EINVAL;
++ *qc = octrl->qc;
++
++ return 0;
++}
++
++static int mt9d113_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
++{
++ struct mt9d113_control *octrl = mt9d113_find_control(ctrl->id);
++ int ret;
++
++ if (octrl == NULL)
++ return -EINVAL;
++ ret = octrl->query(sd, &ctrl->value);
++ if (ret >= 0)
++ return 0;
++
++ return ret;
++}
++
++static int mt9d113_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
++{
++ struct mt9d113_control *octrl = mt9d113_find_control(ctrl->id);
++ int ret;
++
++ if (octrl == NULL)
++ return -EINVAL;
++ ret = octrl->tweak(sd, ctrl->value);
++ if (ret >= 0)
++ return 0;
++
++ return ret;
++}
++
++static int mt9d113_s_stream(struct v4l2_subdev *sd, int enable)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++ int ret = 0;
++
++ DBG_entering;
++ dprintk(1, "enable=%d\n", enable);
++ if (enable) {
++
++ ret |= mt9d113_soft_standby_ctrl(client, SENSOR_WAKEUP);
++ msleep(100);
++ ret |= mt9d113_set_active_context(client, active_context);
++
++ msleep(100);
++ ret |= mt9d113_write(DEST_VAR, client, SET_ID_VAR8(1, 0x2), 0);
++
++ msleep(100);
++ ret |= mt9d113_enable_parallel_if(client);
++ msleep(300);
++
++ streaming_is_active = 1;
++
++ } else {
++
++ ret |= mt9d113_disable_parallel_if(client);
++ ret |= mt9d113_soft_standby_ctrl(client, SENSOR_SLEEP);
++
++ streaming_is_active = 0;
++ }
++
++ DBG_leaving;
++
++ return ret;
++}
++
++static int mt9d113_g_chip_ident(struct v4l2_subdev *sd,
++ struct v4l2_dbg_chip_ident *chip)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++
++#define V4L2_IDENT_MT9D113 8251
++ return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_MT9D113, 0);
++}
++
++#ifdef CONFIG_VIDEO_ADV_DEBUG
++static int mt9d113_g_register(struct v4l2_subdev *sd,
++ struct v4l2_dbg_register *reg)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++ u16 val = 0;
++ int ret;
++
++ if (!v4l2_chip_match_i2c_client(client, &reg->match))
++ return -EINVAL;
++ if (!capable(CAP_SYS_ADMIN))
++ return -EPERM;
++
++ ret = mt9d113_read(DEST_REG, client, reg->reg & 0xffff, &val);
++ reg->val = val;
++ reg->size = 2;
++
++ return ret;
++}
++
++static int mt9d113_s_register(struct v4l2_subdev *sd,
++ struct v4l2_dbg_register *reg)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++ int ret;
++
++ if (!v4l2_chip_match_i2c_client(client, &reg->match))
++ return -EINVAL;
++ if (!capable(CAP_SYS_ADMIN))
++ return -EPERM;
++
++ ret = mt9d113_write(DEST_REG, client, reg->reg & 0xffff, reg->val & 0xffff);
++
++ return ret;
++}
++#endif
++static int mt9d113_enum_framesizes(struct v4l2_subdev *sd,
++ struct v4l2_frmsizeenum *fsize)
++{
++ unsigned int index = fsize->index;
++
++ DBG_entering;
++ if (index >= N_RES)
++ return -EINVAL;
++ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
++ fsize->discrete.width = mt9d113_res[index].width;
++ fsize->discrete.height = mt9d113_res[index].height;
++ fsize->reserved[0] = mt9d113_res[index].used;
++ DBG_leaving;
++ return 0;
++}
++static int mt9d113_enum_frameintervals(struct v4l2_subdev *sd,
++ struct v4l2_frmivalenum *fival)
++{
++ unsigned int index = fival->index;
++
++ DBG_entering;
++
++ if (index >= N_RES)
++ return -EINVAL;
++ fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
++ fival->discrete.numerator = 1;
++ fival->discrete.denominator = 15;
++
++ DBG_leaving;
++
++ return 0;
++}
++
++static const struct v4l2_subdev_video_ops mt9d113_video_ops = {
++ .try_fmt = mt9d113_try_fmt,
++ .s_fmt = mt9d113_set_fmt,
++ .g_fmt = mt9d113_get_fmt,
++ .s_stream = mt9d113_s_stream,
++ .enum_framesizes = mt9d113_enum_framesizes,
++ .enum_frameintervals = mt9d113_enum_frameintervals,
++};
++
++static const struct v4l2_subdev_core_ops mt9d113_core_ops = {
++ .g_chip_ident = mt9d113_g_chip_ident,
++ .queryctrl = mt9d113_queryctrl,
++ .g_ctrl = mt9d113_g_ctrl,
++ .s_ctrl = mt9d113_s_ctrl,
++ /*.g_ext_ctrls = mt9d113_g_ext_ctrls,*/
++ /*.s_ext_ctrls = mt9d113_s_ext_ctrls,*/
++#ifdef CONFIG_VIDEO_ADV_DEBUG
++ .g_register = mt9d113_g_register,
++ .s_register = mt9d113_s_register,
++#endif
++};
++
++static const struct v4l2_subdev_ops mt9d113_ops = {
++ .core = &mt9d113_core_ops,
++ .video = &mt9d113_video_ops,
++};
++
++/*
++ * Basic i2c stuff
++ */
++#if 0
++static unsigned short normal_i2c[] = {I2C_MT9D113 >> 1, I2C_CLIENT_END};
++I2C_CLIENT_INSMOD;
++
++static struct i2c_driver mt9d113_driver;
++#endif
++static int mt9d113_detect(struct i2c_client *client)
++{
++ struct i2c_adapter *adapter = client->adapter;
++ int adap_id = i2c_adapter_id(adapter);
++ u16 val;
++ int ret;
++
++ dprintk(1, "Now start mt9d113 detect\n");
++ if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
++ return -ENODEV;
++
++ if (adap_id != 1)
++ return -ENODEV;
++
++ ret = mt9d113_hw_wakeup();
++ /*
++ * Read and check MT9D113 chip ID (SYSCTL 0x0000)
++ */
++ ret |= mt9d113_read(DEST_REG, client, 0x00, &val);
++
++ if (ret != 0 || val != 0x2580)
++ return -ENODEV;
++
++ return 0;
++}
++static int mt9d113_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ struct ci_sensor_config *info;
++ struct v4l2_subdev *sd;
++ int ret = -1;
++
++ DBG_entering;
++
++ dprintk(2, "Init mt9d113 sensor \n");
++
++
++ v4l_info(client, "chip found @ 0x%x (%s)\n",
++ client->addr << 1, client->adapter->name);
++ /*
++ * Setup sensor configuration structure
++ */
++ info = kzalloc(sizeof(struct ci_sensor_config), GFP_KERNEL);
++ if (!info)
++ return -ENOMEM;
++
++ ret = mt9d113_detect(client);
++ if (ret) {
++ kfree(info);
++ return -ENODEV;
++ }
++
++ sd = &info->sd;
++ v4l2_i2c_subdev_init(sd, client, &mt9d113_ops);
++
++ ret |= mt9d113_init(client);
++
++ if (ret < 0)
++ dprintk(2, "mt9d113 sensor init fails\n");
++ else
++ dprintk(2, "Init mt9d113 sensor success.\n");
++
++ DBG_leaving;
++
++ return ret;
++}
++
++static int mt9d113_remove(struct i2c_client *client)
++{
++ struct v4l2_subdev *sd = i2c_get_clientdata(client);
++
++ v4l2_device_unregister_subdev(sd);
++ kfree(to_sensor_config(sd));
++ return 0;
++}
++
++static const struct i2c_device_id mt9d113_id[] = {
++ {"mt9d113", 0},
++ {}
++};
++
++MODULE_DEVICE_TABLE(i2c, mt9d113_id);
++
++static struct v4l2_i2c_driver_data v4l2_i2c_data = {
++ .name = "mt9d113",
++ .probe = mt9d113_probe,
++ .remove = mt9d113_remove,
++ /* .suspend = mt9d113_suspend,
++ * .resume = mt9d113_resume, */
++ .id_table = mt9d113_id,
++};
++
++MODULE_AUTHOR("<aavamobile.com>");
++MODULE_DESCRIPTION("A low-level driver for mt9d113 (Aptina SOC2030) sensors");
++MODULE_LICENSE("GPL");
+--- /dev/null
++++ b/drivers/staging/mrstci/mrstmt9d113/mrstmt9d113.h
+@@ -0,0 +1,107 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Low level driver for Aptina SOC2030 sensor
++ * Based on ../mrstov2650/mrstov2650
++ *
++ * Copyright (c) 2010 Aava Mobile Oy.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ */
++
++#define GPIO_STDBY_PIN 48
++
++#define MT9D113_MCU_VARIABLE_ADDRESS_REG 0x98c
++#define MT9D113_MCU_VARIABLE_DATA_REG 0x990
++
++/* General definition for mt9d113 */
++
++#define SLEEP_TIME_IN_LOOP 10
++#define TIMEOUT_LOOP_VALUE 100
++
++#define SENSOR_SLEEP 0
++#define SENSOR_WAKEUP 1
++
++#define CONTEXT_A 0
++#define CONTEXT_B 1
++
++#define DEST_NON 0
++#define DEST_REG 1
++#define DEST_VAR 2
++
++#define SET_ID_VAR(id, var) ((1<<13) | (id<<8) | (var))
++#define SET_ID_VAR8(id, var) ((1<<15) | (1<<13) | (id<<8) | (var))
++
++struct regval_list {
++ u8 dest;
++ u16 reg_or_var;
++ u16 value;
++};
++
++static struct regval_list errata_issue02[] = {
++
++ {DEST_REG, 0x3084, 0x240C},
++ {DEST_REG, 0x3092, 0x0A4C},
++ {DEST_REG, 0x3094, 0x4C4C},
++ {DEST_REG, 0x3096, 0x4C54},
++ {DEST_NON, 0xffff, 0xffff}
++};
++
++/*
++ * Default register value
++ * 1600x1200 YUV
++ */
++static struct regval_list mt9d113_def_reg[] = {
++
++ {DEST_NON, 0xffff, 0xffff}
++};
++
++/* 800x600 */
++static struct regval_list mt9d113_res_svga[] = {
++
++ {DEST_NON, 0xffff, 0xffff}
++};
++
++/* 640x480 */
++static struct regval_list mt9d113_res_vga_vario[] = {
++
++/* {DEST_VAR, SET_ID_VAR(1,0), 0x00}, */
++ {DEST_NON, 0xffff, 0xffff}
++};
++
++/* 640x480 reverse */
++/*
++static struct regval_list mt9d113_res_vga_reverse[] = {
++
++ {DEST_NON, 0xffff, 0xffff}
++};
++*/
++
++/* 320x240 */
++static struct regval_list mt9d113_res_qvga[] = {
++
++ {DEST_NON, 0xffff, 0xffff}
++};
++
++static struct regval_list mt9d113_res_uxga[] = {
++
++ {DEST_NON, 0xffff, 0xffff}
++};
++
++static struct regval_list mt9d113_res_sxga[] = {
++
++ {DEST_NON, 0xffff, 0xffff}
++};
+--- /dev/null
++++ b/drivers/staging/mrstci/mrstov2650/Kconfig
+@@ -0,0 +1,9 @@
++config VIDEO_MRST_OV2650
++ tristate "Moorestown OV2650 SoC Sensor"
++ depends on I2C && VIDEO_MRST_ISP
++
++ ---help---
++ Say Y here if your platform support OV2650 SoC Sensor.
++
++ To compile this driver as a module, choose M here: the
++ module will be called mrstov2650.ko.
+--- /dev/null
++++ b/drivers/staging/mrstci/mrstov2650/Makefile
+@@ -0,0 +1,3 @@
++obj-$(CONFIG_VIDEO_MRST_OV2650) += mrstov2650.o
++
++EXTRA_CFLAGS += -I$(src)/../include
+\ No newline at end of file
+--- /dev/null
++++ b/drivers/staging/mrstci/mrstov2650/mrstov2650.c
+@@ -0,0 +1,1190 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/string.h>
++#include <linux/errno.h>
++#include <linux/init.h>
++#include <linux/kmod.h>
++#include <linux/device.h>
++#include <linux/delay.h>
++#include <linux/fs.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/delay.h>
++#include <linux/i2c.h>
++#include <linux/gpio.h>
++#include <linux/videodev2.h>
++#include <media/v4l2-device.h>
++#include <media/v4l2-chip-ident.h>
++#include <media/v4l2-i2c-drv.h>
++
++#include "ci_sensor_common.h"
++#include "ov2650.h"
++
++static int mrstov2650_debug;
++module_param(mrstov2650_debug, int, 0644);
++MODULE_PARM_DESC(mrstov2650_debug, "Debug level (0-1)");
++
++#define dprintk(level, fmt, arg...) do { \
++ if (mrstov2650_debug >= level) \
++ printk(KERN_DEBUG "mrstisp@%s: " fmt "\n", \
++ __func__, ## arg); } \
++ while (0)
++
++#define eprintk(fmt, arg...) \
++ printk(KERN_ERR "mrstisp@%s: line %d: " fmt "\n", \
++ __func__, __LINE__, ## arg);
++
++#define DBG_entering dprintk(2, "entering");
++#define DBG_leaving dprintk(2, "leaving");
++#define DBG_line dprintk(2, " line: %d", __LINE__);
++
++static inline struct ci_sensor_config *to_sensor_config(struct v4l2_subdev *sd)
++{
++ return container_of(sd, struct ci_sensor_config, sd);
++}
++
++static struct ov2650_format_struct {
++ __u8 *desc;
++ __u32 pixelformat;
++ struct regval_list *regs;
++} ov2650_formats[] = {
++ {
++ .desc = "YUYV 4:2:2",
++ .pixelformat = SENSOR_MODE_BT601,
++ .regs = NULL,
++ },
++};
++#define N_OV2650_FMTS ARRAY_SIZE(ov2650_formats)
++
++static struct ov2650_res_struct {
++ __u8 *desc;
++ int res;
++ int width;
++ int height;
++ /* FIXME: correct the fps values.. */
++ int fps;
++ bool used;
++ struct regval_list *regs;
++} ov2650_res[] = {
++ {
++ .desc = "UXGA",
++ .res = SENSOR_RES_UXGA,
++ .width = 1600,
++ .height = 1200,
++ .fps = 15,
++ .used = 0,
++ .regs = ov2650_res_uxga,
++ },
++ {
++ .desc = "SXGA",
++ .res = SENSOR_RES_SXGA,
++ .width = 1280,
++ .height = 1024,
++ .fps = 15,
++ .used = 0,
++ .regs = ov2650_res_sxga,
++ },
++ {
++ .desc = "SVGA",
++ .res = SENSOR_RES_SVGA,
++ .width = 800,
++ .height = 600,
++ .fps = 15,
++ .used = 0,
++ .regs = ov2650_res_svga,
++ },
++ {
++ .desc = "VGA",
++ .res = SENSOR_RES_VGA,
++ .width = 640,
++ .height = 480,
++ .fps = 15,
++ .used = 0,
++ .regs = ov2650_res_vga_vario,
++ },
++ {
++ .desc = "QVGA",
++ .res = SENSOR_RES_QVGA,
++ .width = 320,
++ .height = 240,
++ .fps = 15,
++ .used = 0,
++ .regs = ov2650_res_qvga,
++ },
++};
++
++#define N_RES (ARRAY_SIZE(ov2650_res))
++
++/*
++ * I2C Read & Write stuff
++ */
++static int ov2650_read(struct i2c_client *c, u16 reg, u8 *value)
++{
++ int ret;
++ int i;
++ struct i2c_msg msg[2];
++ u8 msgbuf[2];
++ u8 ret_val = 0;
++ *value = 0;
++ /* Read needs two message to go */
++ memset(&msg, 0, sizeof(msg));
++ msgbuf[0] = 0;
++ msgbuf[1] = 0;
++ i = 0;
++ msgbuf[i++] = reg >> 8;
++ msgbuf[i++] = reg;
++ msg[0].addr = c->addr;
++ msg[0].buf = msgbuf;
++ msg[0].len = i;
++
++ msg[1].addr = c->addr;
++ msg[1].flags = I2C_M_RD;
++ msg[1].buf = &ret_val;
++ msg[1].len = 1;
++
++ ret = i2c_transfer(c->adapter, &msg[0], 2);
++ *value = ret_val;
++
++ ret = (ret == 2) ? 0 : -1;
++ return ret;
++}
++
++static int ov2650_write(struct i2c_client *c, u16 reg, u8 value)
++{
++ int ret, i;
++ struct i2c_msg msg;
++ u8 msgbuf[3];
++
++ /* Writing only needs one message */
++ memset(&msg, 0, sizeof(msg));
++ i = 0;
++ msgbuf[i++] = reg >> 8;
++ msgbuf[i++] = reg;
++ msgbuf[i++] = value;
++
++ msg.addr = c->addr;
++ msg.flags = 0;
++ msg.buf = msgbuf;
++ msg.len = i;
++
++ ret = i2c_transfer(c->adapter, &msg, 1);
++
++ /* If this is a reset register, wait for 1ms */
++ if (reg == OV2650_SYS && (value & 0x80))
++ msleep(3);
++
++ ret = (ret == 1) ? 0 : -1;
++ return ret;
++}
++
++static int ov2650_write_array(struct i2c_client *c, struct regval_list *vals)
++{
++ struct regval_list *p;
++ u8 read_val = 0;
++ int err_num = 0;
++ int i = 0;
++ p = vals;
++ while (p->reg_num != 0xffff) {
++ ov2650_write(c, p->reg_num, p->value);
++ ov2650_read(c, p->reg_num, &read_val);
++ if (read_val != p->value)
++ err_num++;
++ p++;
++ i++;
++ }
++ return 0;
++}
++
++static int ov2650_set_data_pin_in(struct i2c_client *client)
++{
++ int ret = 0;
++ u8 reg;
++
++ ret += ov2650_write(client, 0x30b0, 0x00);
++
++ ret += ov2650_read(client, 0x30b1, &reg);
++ reg &= 0xfc;
++ ret += ov2650_write(client, 0x30b1, reg);
++
++ return ret;
++}
++
++static int ov2650_set_data_pin_out(struct i2c_client *client)
++{
++ int ret = 0;
++ u8 reg;
++
++ ret += ov2650_write(client, 0x30b0, 0xff);
++
++ ret += ov2650_read(client, 0x30b1, &reg);
++ reg &= 0xfc;
++ reg |= 0x03;
++ ret += ov2650_write(client, 0x30b1, reg);
++
++ return ret;
++}
++/*
++ * Sensor specific helper function
++ */
++static int ov2650_standby(void)
++{
++ gpio_set_value(GPIO_STDBY_PIN, 1);
++ dprintk(1, "PM: standby called\n");
++ return 0;
++}
++
++static int ov2650_wakeup(void)
++{
++ gpio_set_value(GPIO_STDBY_PIN, 0);
++ dprintk(1, "PM: wakeup called\n");
++ return 0;
++}
++
++static int ov2650_s_power(struct v4l2_subdev *sd, u32 val)
++{
++ if (val == 1)
++ ov2650_standby();
++ if (val == 0)
++ ov2650_wakeup();
++ return 0;
++}
++
++static int ov2650_init(struct i2c_client *c)
++{
++ int ret;
++ struct v4l2_subdev *sd = i2c_get_clientdata(c);
++ struct ci_sensor_config *info = to_sensor_config(sd);
++
++ /* Fill the configuration structure */
++ /* Note this default configuration value */
++ info->mode = ov2650_formats[0].pixelformat;
++ info->res = ov2650_res[0].res;
++ info->type = SENSOR_TYPE_SOC;
++ info->bls = SENSOR_BLS_OFF;
++ info->gamma = SENSOR_GAMMA_ON;
++ info->cconv = SENSOR_CCONV_ON;
++ info->blc = SENSOR_BLC_AUTO;
++ info->agc = SENSOR_AGC_AUTO;
++ info->awb = SENSOR_AWB_AUTO;
++ info->aec = SENSOR_AEC_AUTO;
++ info->bus_width = SENSOR_BUSWIDTH_8BIT_ZZ;
++ info->ycseq = SENSOR_YCSEQ_YCBYCR;
++ info->conv422 = SENSOR_CONV422_COSITED;
++ info->bpat = SENSOR_BPAT_BGBGGRGR;/* GRGRBGBG; */
++ info->field_inv = SENSOR_FIELDINV_NOSWAP;
++ info->field_sel = SENSOR_FIELDSEL_BOTH;
++ info->hpol = SENSOR_HPOL_REFPOS;
++ info->vpol = SENSOR_VPOL_POS;
++ info->edge = SENSOR_EDGE_RISING;
++ info->flicker_freq = SENSOR_FLICKER_100;
++ info->cie_profile = 0;
++ memcpy(info->name, "ov2650", 7);
++
++ ret = ov2650_write(c, OV2650_SYS, 0x80);
++ /* Set registers into default config value */
++ ret += ov2650_write_array(c, ov2650_def_reg);
++
++ /* added by wen to stop sensor from streaming */
++ ov2650_write(c, 0x3086, 0x0f);
++ ov2650_set_data_pin_in(c);
++ ssleep(1);
++
++ return ret;
++}
++
++static int distance(struct ov2650_res_struct *res, u32 w, u32 h)
++{
++ int ret;
++ if (res->width < w || res->height < h)
++ return -1;
++
++ ret = ((res->width - w) + (res->height - h));
++ return ret;
++}
++
++static int ov2650_try_res(u32 *w, u32 *h)
++{
++ struct ov2650_res_struct *res_index, *p = NULL;
++ int dis, last_dis = ov2650_res->width + ov2650_res->height;
++
++ dprintk(1, "&&&&& before %dx%d", *w, *h);
++ for (res_index = ov2650_res;
++ res_index < ov2650_res + N_RES;
++ res_index++) {
++ if ((res_index->width <= *w) && (res_index->height <= *h))
++ break;
++ dis = distance(res_index, *w, *h);
++ if (dis < last_dis) {
++ last_dis = dis;
++ p = res_index;
++ }
++ }
++ if ((res_index->width < *w) || (res_index->height < *h)) {
++ if (res_index != ov2650_res)
++ res_index--;
++ }
++
++ /*
++ if (p == NULL) {
++ p = ov2650_res;
++ }
++
++ if ((w != NULL) && (h != NULL)) {
++ *w = p->width;
++ *h = p->height;
++ }
++ */
++ if (res_index == ov2650_res + N_RES)
++ res_index = ov2650_res + N_RES - 1;
++
++ *w = res_index->width;
++ *h = res_index->height;
++
++ dprintk(1, "&&&&& after %dx%d", *w, *h);
++ return 0;
++}
++
++static struct ov2650_res_struct *ov2650_to_res(u32 w, u32 h)
++{
++ struct ov2650_res_struct *res_index;
++
++ for (res_index = ov2650_res;
++ res_index < ov2650_res + N_RES;
++ res_index++)
++ if ((res_index->width == w) && (res_index->height == h))
++ break;
++
++ if (res_index >= ov2650_res + N_RES)
++ res_index--; /* Take the bigger one */
++
++ return res_index;
++}
++
++static int ov2650_try_fmt(struct v4l2_subdev *sd,
++ struct v4l2_format *fmt)
++{
++ DBG_entering;
++ dprintk(1, "&&&&& before %dx%d", fmt->fmt.pix.width,
++ fmt->fmt.pix.height);
++ return ov2650_try_res(&fmt->fmt.pix.width, &fmt->fmt.pix.height);
++ dprintk(1, "&&&&& after %dx%d", fmt->fmt.pix.width,
++ fmt->fmt.pix.height);
++ DBG_leaving;
++}
++
++static int ov2650_get_fmt(struct v4l2_subdev *sd,
++ struct v4l2_format *fmt)
++{
++ struct ci_sensor_config *info = to_sensor_config(sd);
++ unsigned short width, height;
++ int index;
++
++ ci_sensor_res2size(info->res, &width, &height);
++
++ /* Marked the current sensor res as being "used" */
++ for (index = 0; index < N_RES; index++) {
++ if ((width == ov2650_res[index].width) &&
++ (height == ov2650_res[index].height)) {
++ ov2650_res[index].used = 1;
++ continue;
++ }
++ ov2650_res[index].used = 0;
++ }
++
++ fmt->fmt.pix.width = width;
++ fmt->fmt.pix.height = height;
++ return 0;
++}
++
++static int ov2650_set_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++ struct ci_sensor_config *info = to_sensor_config(sd);
++ int ret = 0;
++ struct ov2650_res_struct *res_index;
++ u32 width, height;
++ int index;
++
++ DBG_entering;
++
++ width = fmt->fmt.pix.width;
++ height = fmt->fmt.pix.height;
++
++ ret = ov2650_try_res(&width, &height);
++ res_index = ov2650_to_res(width, height);
++
++ ov2650_wakeup();
++
++ /* if ((info->res != res_index->res) && (res_index->regs)) { */
++ if (res_index->regs) {
++
++ dprintk(2, "changing res from to %dx%d", width, height);
++ ret = ov2650_write(client, OV2650_SYS, 0x80);
++ ret += ov2650_write_array(client, ov2650_def_reg);
++ ret += ov2650_write_array(client, res_index->regs);
++
++ /* add to debug
++ if(res_index->res == SENSOR_RES_VGA) {
++ ret += ov2650_write_array(c, ov2650_def_reg);
++ ret += ov2650_write_array(c, res_index->regs);
++ } else {
++ ret += ov2650_write_array(c, ov2650_res_vga_reverse);
++ ret += ov2650_write_array(c, res_index->regs);
++ }
++ */
++
++ /* Add delay here to get better image */
++ /*
++ if (res_index->res == SENSOR_RES_SXGA ||
++ res_index->res == SENSOR_RES_UXGA)
++ msleep(2000);
++ else
++ msleep(900);
++ */
++
++ /* Marked current sensor res as being "used" */
++ for (index = 0; index < N_RES; index++) {
++ if ((width == ov2650_res[index].width) &&
++ (height == ov2650_res[index].height)) {
++ ov2650_res[index].used = 1;
++ continue;
++ }
++ ov2650_res[index].used = 0;
++ }
++
++ for (index = 0; index < N_RES; index++)
++ dprintk(2, "index = %d, used = %d\n", index,
++ ov2650_res[index].used);
++
++ }
++
++ info->res = res_index->res;
++
++ /*
++ int i;
++ unsigned char value;
++ printk(KERN_WARNING "2650 reg dumping start:\n");
++ for(i = 0x3000; i <= 0x360B; i ++) {
++ ov2650_read(c, i, &value);
++ printk(KERN_WARNING "reg at offset %4x = %x\n", i, value);
++ }
++ printk(KERN_WARNING "2650 reg dumping finished:\n");
++ */
++
++ DBG_leaving;
++
++ return ret;
++}
++
++static int ov2650_q_hflip(struct v4l2_subdev *sd, __s32 *value)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++ int err;
++ unsigned char v;
++
++ err = ov2650_read(client, OV2650_TMC_6, &v);
++ *value = (v & 0x02) == 0x02;
++ return err;
++}
++
++static int ov2650_t_hflip(struct v4l2_subdev *sd, int value)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++ struct ci_sensor_config *info = to_sensor_config(sd);
++ unsigned char v, v1 = 0;
++ int err;
++
++ value = value >= 1 ? 1 : 0;
++ err = ov2650_read(client, OV2650_TMC_6, &v);
++ if (value) {
++ v |= 0x02;
++ v1 |= 0x08;
++ info->bpat = SENSOR_BPAT_GRGRBGBG;/*BGBGGRGR;*/
++ } else {
++ v &= ~0x02;
++ v1 &= ~0x08;
++ info->bpat = SENSOR_BPAT_BGBGGRGR;
++ }
++ err += ov2650_write(client, OV2650_TMC_6, v);
++ err += ov2650_write(client, 0x3090, v1);
++ msleep(10); /* FIXME */
++
++ return err;
++}
++
++static int ov2650_q_vflip(struct v4l2_subdev *sd, __s32 *value)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++ int err;
++ unsigned char v;
++
++ err = ov2650_read(client, OV2650_TMC_6, &v);
++ *value = (v & 0x01) == 0x01;
++ return err;
++}
++
++
++static int ov2650_t_vflip(struct v4l2_subdev *sd, int value)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++ int err = 0;
++ unsigned char v;
++
++ value = value >= 1 ? 1 : 0;
++ err = ov2650_read(client, OV2650_TMC_6, &v);
++ if (value)
++ v |= 0x01;
++ else
++ v &= ~0x01;
++ err += ov2650_write(client, OV2650_TMC_6, v);
++ msleep(10); /* FIXME */
++
++ return err;
++}
++
++#if 0
++static int ov2650_t_awb(struct i2c_client *c, int value)
++{
++ unsigned char v;
++ int ret;
++ struct ci_sensor_config *info = i2c_get_clientdata(c);
++
++ value = value >= 1 ? 1 : 0;
++ ret = ov2650_read(c, OV2650_ISP_CTL_0, &v);
++ if (value & 0x01) {
++ v |= 0x30;
++ info->awb = SENSOR_AWB_AUTO;
++ } else {
++ v &= ~0x30;
++ info->awb = SENSOR_AWB_OFF;
++ }
++ ret += ov2650_write(c, OV2650_ISP_CTL_0, v);
++ msleep(10); /* FIXME */
++
++ return ret;
++}
++
++static int ov2650_q_awb(struct i2c_client *c, int *value)
++{
++ int ret;
++ unsigned char v;
++
++ ret = ov2650_read(c, OV2650_ISP_CTL_0, &v);
++ *value = (v & 0x30) == 0x30;
++ return ret;
++}
++
++static int ov2650_t_agc(struct i2c_client *c, int value)
++{
++ unsigned char v;
++ int ret;
++ struct ci_sensor_config *info = i2c_get_clientdata(c);
++
++ value = value >= 1 ? 1 : 0;
++ ret = ov2650_read(c, OV2650_ISP_CTL_0, &v);
++ if (value & 0x01) {
++ v |= 0x10;
++ info->agc = SENSOR_AGC_AUTO;
++ } else {
++ v &= ~0x10;
++ info->agc = SENSOR_AGC_OFF;
++ }
++ ret += ov2650_write(c, OV2650_ISP_CTL_0, v);
++ msleep(10); /* FIXME */
++
++ return ret;
++}
++
++static int ov2650_q_agc(struct i2c_client *c, int *value)
++{
++ int ret;
++ unsigned char v;
++
++ ret = ov2650_read(c, OV2650_ISP_CTL_0, &v);
++ *value = (v & 0x10) == 0x10;
++ return ret;
++}
++
++static int ov2650_t_blc(struct i2c_client *c, int value)
++{
++ unsigned char v;
++ int ret;
++
++ value = value >= 1 ? 1 : 0;
++
++ ret = ov2650_read(c, OV2650_BLCC, &v);
++ if (value & 0x01)
++ v |= 0x10;
++ else
++ v &= ~0x10;
++ ret += ov2650_write(c, OV2650_BLCC, v);
++ msleep(10); /* FIXME */
++
++ return ret;
++}
++
++static int ov2650_q_blc(struct i2c_client *c, int *value)
++{
++ int ret;
++ unsigned char v;
++
++ ret = ov2650_read(c, OV2650_BLCC, &v);
++ *value = (v & 0x10) == 0x10;
++ return ret;
++}
++#endif
++
++static struct ov2650_control {
++ struct v4l2_queryctrl qc;
++ int (*query)(struct v4l2_subdev *sd, __s32 *value);
++ int (*tweak)(struct v4l2_subdev *sd, int value);
++} ov2650_controls[] = {
++ {
++ .qc = {
++ .id = V4L2_CID_VFLIP,
++ .type = V4L2_CTRL_TYPE_BOOLEAN,
++ .name = "Vertical flip",
++ .minimum = 0,
++ .maximum = 1,
++ .step = 1,
++ .default_value = 0,
++ },
++ .tweak = ov2650_t_vflip,
++ .query = ov2650_q_vflip,
++ },
++ {
++ .qc = {
++ .id = V4L2_CID_HFLIP,
++ .type = V4L2_CTRL_TYPE_BOOLEAN,
++ .name = "Horizontal mirror",
++ .minimum = 0,
++ .maximum = 1,
++ .step = 1,
++ .default_value = 0,
++ },
++ .tweak = ov2650_t_hflip,
++ .query = ov2650_q_hflip,
++ },
++#if 0
++ {
++ .parm = {
++ .index = V4L2_CID_AUTO_WHITE_BALANCE,
++ .type = V4L2_CTRL_TYPE_BOOLEAN,
++ .name = "Auto White Balance",
++ .min = 0,
++ .max = 1,
++ .step = 1,
++ .def_value = 1,
++ },
++ .tweak = ov2650_t_awb,
++ .query = ov2650_q_awb,
++ },
++ {
++ .parm = {
++ .index = V4L2_CID_AUTOGAIN,
++ .type = V4L2_CTRL_TYPE_BOOLEAN,
++ .name = "Auto Gain Control",
++ .min = 0,
++ .max = 1,
++ .step = 1,
++ .def_value = 1,
++ },
++ .tweak = ov2650_t_agc,
++ .query = ov2650_q_agc,
++
++ },
++ {
++ .parm = {
++ .index = V4L2_CID_BLACK_LEVEL,
++ .type = V4L2_CTRL_TYPE_BOOLEAN,
++ .name = "Black Level Control",
++ .min = 0,
++ .max = 1,
++ .step = 1,
++ .def_value = 1,
++ },
++ .tweak = ov2650_t_blc,
++ .query = ov2650_q_blc,
++
++ },
++#endif
++};
++#define N_CONTROLS (ARRAY_SIZE(ov2650_controls))
++
++static struct ov2650_control *ov2650_find_control(__u32 id)
++{
++ int i;
++
++ for (i = 0; i < N_CONTROLS; i++)
++ if (ov2650_controls[i].qc.id == id)
++ return ov2650_controls + i;
++ return NULL;
++}
++
++static int ov2650_queryctrl(struct v4l2_subdev *sd,
++ struct v4l2_queryctrl *qc)
++{
++ struct ov2650_control *octrl;
++ octrl = ov2650_find_control(qc->id);
++ if (NULL == octrl)
++ return -EINVAL;
++ *qc = octrl->qc;
++ return 0;
++}
++
++static int ov2650_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
++{
++ struct ov2650_control *octrl = ov2650_find_control(ctrl->id);
++ int ret;
++
++ if (octrl == NULL)
++ return -EINVAL;
++ ret = octrl->query(sd, &ctrl->value);
++ if (ret >= 0)
++ return 0;
++ return ret;
++}
++
++static int ov2650_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
++{
++ struct ov2650_control *octrl = ov2650_find_control(ctrl->id);
++ int ret;
++
++ if (octrl == NULL)
++ return -EINVAL;
++ ret = octrl->tweak(sd, ctrl->value);
++ if (ret >= 0)
++ return 0;
++ return ret;
++}
++#if 0
++static int ov2650_get_caps(struct i2c_client *c, struct ci_sensor_caps *caps)
++{
++ if (caps == NULL)
++ return -EIO;
++
++ caps->bus_width = SENSOR_BUSWIDTH_8BIT_ZZ;
++ caps->mode = SENSOR_MODE_BT601;
++ caps->field_inv = SENSOR_FIELDINV_NOSWAP;
++ caps->field_sel = SENSOR_FIELDSEL_BOTH;
++ caps->ycseq = SENSOR_YCSEQ_YCBYCR;
++ caps->conv422 = SENSOR_CONV422_COSITED;
++ caps->bpat = SENSOR_BPAT_BGBGGRGR;
++ caps->hpol = SENSOR_HPOL_REFPOS;
++ caps->vpol = SENSOR_VPOL_POS;
++ caps->edge = SENSOR_EDGE_RISING;
++ caps->bls = SENSOR_BLS_OFF;
++ caps->gamma = SENSOR_GAMMA_ON;
++ caps->cconv = SENSOR_CCONV_ON;
++ caps->res = SENSOR_RES_UXGA | SENSOR_RES_SXGA | SENSOR_RES_SVGA
++ | SENSOR_RES_VGA | SENSOR_RES_QVGA;
++ caps->blc = SENSOR_BLC_AUTO;
++ caps->agc = SENSOR_AGC_AUTO;
++ caps->awb = SENSOR_AWB_AUTO;
++ caps->aec = SENSOR_AEC_AUTO;
++ caps->cie_profile = 0;
++ caps->flicker_freq = SENSOR_FLICKER_100 | SENSOR_FLICKER_120;
++ caps->type = SENSOR_TYPE_SOC;
++ /* caps->name = "ov2650"; */
++ strcpy(caps->name, "ov2650");
++
++ return 0;
++}
++
++static int ov2650_get_config(struct i2c_client *c,
++ struct ci_sensor_config *config)
++{
++ struct ci_sensor_config *info = i2c_get_clientdata(c);
++
++ if (config == NULL) {
++ printk(KERN_WARNING "sensor_get_config: NULL pointer\n");
++ return -EIO;
++ }
++
++ memcpy(config, info, sizeof(struct ci_sensor_config));
++
++ return 0;
++}
++
++static int ov2650_setup(struct i2c_client *c,
++ const struct ci_sensor_config *config)
++{
++ int ret;
++ struct ov2650_res_struct *res_index;
++ struct ci_sensor_config *info = i2c_get_clientdata(c);
++ u16 width, high;
++
++ /* Soft reset camera first*/
++ ret = ov2650_write(c, OV2650_SYS, 0x80);
++
++ /* Set registers into default config value */
++ ret += ov2650_write_array(c, ov2650_def_reg);
++
++ /* set image resolution */
++ ci_sensor_res2size(config->res, &width, &high);
++ ret += ov2650_try_res(c, &width, &high);
++ res_index = ov2650_find_res(width, high);
++ if (res_index->regs)
++ ret += ov2650_write_array(c, res_index->regs);
++ if (!ret)
++ info->res = res_index->res;
++
++
++ if (config->blc != info->blc) {
++ ret += ov2650_t_blc(c, config->blc);
++ info->blc = config->blc;
++ }
++
++ if (config->agc != info->agc) {
++ ret += ov2650_t_agc(c, config->agc);
++ info->agc = config->agc;
++ }
++
++ if (config->awb != info->awb) {
++ ret += ov2650_t_awb(c, config->awb);
++ info->awb = config->awb;
++ }
++ /* Add some delay here to get a better image*/
++ if (res_index->res == SENSOR_RES_SXGA ||
++ res_index->res == SENSOR_RES_UXGA)
++ msleep(2000);
++ else
++ msleep(900);
++
++ return ret;
++}
++
++/*
++ * File operation functions
++ */
++
++
++
++static int ov2650_open(struct i2c_setting *c, void *priv)
++{
++ struct i2c_client *client = c->sensor_client;
++ /* Just wake up sensor */
++ if (ov2650_wakeup())
++ return -EIO;
++ ov2650_init(client);
++ /*Sleep sensor now*/
++ ov2650_write(client, 0x3086, 0x0f);
++
++ /* set data pin to input */
++ if (ov2650_set_data_pin_in(client))
++ return -EIO;
++
++ return 0;
++}
++
++static int ov2650_release(struct i2c_setting *c, void *priv)
++{
++ /* Just suspend the sensor */
++ ov2650_standby();
++ return 0;
++}
++
++static int ov2650_on(struct i2c_setting *c)
++{
++ int ret;
++
++ /* Software wake up sensor */
++ ret = ov2650_write(c->sensor_client, 0x3086, 0x00);
++
++ /* set data pin to output */
++ return ret + ov2650_set_data_pin_out(c->sensor_client);
++}
++
++static int ov2650_off(struct i2c_setting *c)
++{
++ int ret;
++
++ /* Software standby sensor */
++ ret = ov2650_write(c->sensor_client, 0x3086, 0x0f);
++
++ /* set data pin to input */
++ return ret + ov2650_set_data_pin_in(c->sensor_client);
++}
++
++static struct sensor_device ov2650 = {
++ .name = "OV2650",
++ .type = SENSOR_TYPE_SOC,
++ .minor = -1,
++ .open = ov2650_open,
++ .release = ov2650_release,
++ .on = ov2650_on,
++ .off = ov2650_off,
++ .querycap = ov2650_get_caps,
++ .get_config = ov2650_get_config,
++ .set_config = ov2650_setup,
++ .enum_parm = ov2650_queryctrl,
++ .get_parm = ov2650_g_ctrl,
++ .set_parm = ov2650_s_ctrl,
++ .try_res = ov2650_try_res,
++ .set_res = ov2650_set_res,
++ .suspend = ov2650_standby,
++ .resume = ov2650_wakeup,
++ .get_ls_corr_config = NULL,
++ .set_awb = NULL,
++ .set_aec = NULL,
++ .set_blc = NULL,
++ /* TBC */
++};
++#endif
++
++static int ov2650_s_stream(struct v4l2_subdev *sd, int enable)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++
++ DBG_entering;
++
++
++ if (enable) {
++ ov2650_write(client, 0x3086, 0x00);
++ ov2650_set_data_pin_out(client);
++ msleep(2000);
++ } else {
++ ov2650_write(client, 0x3086, 0x0f);
++ ov2650_set_data_pin_in(client);
++ }
++
++ DBG_leaving;
++ return 0;
++}
++
++static int ov2650_enum_framesizes(struct v4l2_subdev *sd,
++ struct v4l2_frmsizeenum *fsize)
++{
++ unsigned int index = fsize->index;
++
++ DBG_entering;
++
++ if (index >= N_RES)
++ return -EINVAL;
++
++ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
++ fsize->discrete.width = ov2650_res[index].width;
++ fsize->discrete.height = ov2650_res[index].height;
++ fsize->reserved[0] = ov2650_res[index].used;
++
++ DBG_leaving;
++
++ return 0;
++}
++
++static int ov2650_enum_frameintervals(struct v4l2_subdev *sd,
++ struct v4l2_frmivalenum *fival)
++{
++ unsigned int index = fival->index;
++
++ DBG_entering;
++
++ if (index >= N_RES)
++ return -EINVAL;
++
++ fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
++ fival->discrete.numerator = 1;
++ fival->discrete.denominator = ov2650_res[index].fps;
++
++ DBG_leaving;
++
++ return 0;
++}
++static int ov2650_g_chip_ident(struct v4l2_subdev *sd,
++ struct v4l2_dbg_chip_ident *chip)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++
++#define V4L2_IDENT_OV2650 8244
++ return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_OV2650, 0);
++}
++
++#ifdef CONFIG_VIDEO_ADV_DEBUG
++static int ov2650_g_register(struct v4l2_subdev *sd,
++ struct v4l2_dbg_register *reg)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++ unsigned char val = 0;
++ int ret;
++
++ if (!v4l2_chip_match_i2c_client(client, &reg->match))
++ return -EINVAL;
++ if (!capable(CAP_SYS_ADMIN))
++ return -EPERM;
++ ret = ov2650_read(client, reg->reg & 0xffff, &val);
++ reg->val = val;
++ reg->size = 1;
++ return ret;
++}
++
++static int ov2650_s_register(struct v4l2_subdev *sd,
++ struct v4l2_dbg_register *reg)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++
++ if (!v4l2_chip_match_i2c_client(client, &reg->match))
++ return -EINVAL;
++ if (!capable(CAP_SYS_ADMIN))
++ return -EPERM;
++ ov2650_write(client, reg->reg & 0xffff, reg->val & 0xff);
++ return 0;
++}
++#endif
++
++static const struct v4l2_subdev_video_ops ov2650_video_ops = {
++ .try_fmt = ov2650_try_fmt,
++ .s_fmt = ov2650_set_fmt,
++ .g_fmt = ov2650_get_fmt,
++ .s_stream = ov2650_s_stream,
++ .enum_framesizes = ov2650_enum_framesizes,
++ .enum_frameintervals = ov2650_enum_frameintervals,
++};
++
++static const struct v4l2_subdev_core_ops ov2650_core_ops = {
++ .g_chip_ident = ov2650_g_chip_ident,
++ .queryctrl = ov2650_queryctrl,
++ .g_ctrl = ov2650_g_ctrl,
++ .s_ctrl = ov2650_s_ctrl,
++ .s_gpio = ov2650_s_power,
++ /*.g_ext_ctrls = ov2650_g_ext_ctrls,*/
++ /*.s_ext_ctrls = ov2650_s_ext_ctrls,*/
++#ifdef CONFIG_VIDEO_ADV_DEBUG
++ .g_register = ov2650_g_register,
++ .s_register = ov2650_s_register,
++#endif
++};
++
++static const struct v4l2_subdev_ops ov2650_ops = {
++ .core = &ov2650_core_ops,
++ .video = &ov2650_video_ops,
++};
++
++/*
++ * Basic i2c stuff
++ */
++#if 0
++static unsigned short normal_i2c[] = {I2C_OV2650 >> 1, I2C_CLIENT_END};
++I2C_CLIENT_INSMOD;
++
++static struct i2c_driver ov2650_driver;
++#endif
++static int ov2650_detect(struct i2c_client *client)
++{
++ struct i2c_adapter *adapter = client->adapter;
++ int adap_id = i2c_adapter_id(adapter);
++ u8 value;
++
++ printk(KERN_WARNING "Now start ov2650 detect\n");
++ if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
++ return -ENODEV;
++
++ if (adap_id != 1)
++ return -ENODEV;
++
++ /* if (ov2650_wakeup()) */
++ /* return -ENODEV; */
++ ov2650_wakeup();
++
++ ov2650_read(client, OV2650_PID_L, &value);
++ if (value != 0x52)
++ return -ENODEV;
++
++ return 0;
++}
++
++static int ov2650_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ struct ci_sensor_config *info;
++ struct v4l2_subdev *sd;
++ int ret = -1;
++
++ DBG_entering;
++
++ printk(KERN_INFO "Init ov2650 sensor \n");
++
++ v4l_info(client, "chip found @ 0x%x (%s)\n",
++ client->addr << 1, client->adapter->name);
++ /*
++ * Setup sensor configuration structure
++ */
++ info = kzalloc(sizeof(struct ci_sensor_config), GFP_KERNEL);
++ if (!info)
++ return -ENOMEM;
++
++ ret = ov2650_detect(client);
++ if (ret) {
++ kfree(info);
++ return -ENODEV;
++ }
++
++ sd = &info->sd;
++ v4l2_i2c_subdev_init(sd, client, &ov2650_ops);
++
++ /*
++ * TODO: Need to check if this can be here.
++ * Turn into standby mode
++ */
++ /* ov2650_standby(); */
++ ret += ov2650_init(client);
++ ov2650_standby();
++
++ printk(KERN_INFO "Init ov2650 sensor success, ret = %d\n", ret);
++
++ DBG_leaving;
++ return 0;
++}
++
++static int ov2650_remove(struct i2c_client *client)
++{
++ struct v4l2_subdev *sd = i2c_get_clientdata(client);
++
++ v4l2_device_unregister_subdev(sd);
++ kfree(to_sensor_config(sd));
++ return 0;
++}
++
++static const struct i2c_device_id ov2650_id[] = {
++ {"ov2650", 0},
++ {}
++};
++
++MODULE_DEVICE_TABLE(i2c, ov2650_id);
++
++static struct v4l2_i2c_driver_data v4l2_i2c_data = {
++ .name = "ov2650",
++ .probe = ov2650_probe,
++ .remove = ov2650_remove,
++ /* .suspend = ov2650_suspend,
++ * .resume = ov2650_resume, */
++ .id_table = ov2650_id,
++};
++
++MODULE_AUTHOR("Xiaolin Zhang <xiaolin.zhang@intel.com>");
++MODULE_DESCRIPTION("A low-level driver for OmniVision 2650 sensors");
++MODULE_LICENSE("GPL");
+--- /dev/null
++++ b/drivers/staging/mrstci/mrstov2650/ov2650.h
+@@ -0,0 +1,766 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#define I2C_OV2650 0x60
++/* Should add to kernel source */
++#define I2C_DRIVERID_OV2650 1047
++/* GPIO pin on Moorestown */
++#define GPIO_SCLK_25 44
++#define GPIO_STB_PIN 47
++#define GPIO_STDBY_PIN 48
++#define GPIO_RESET_PIN 50
++
++/* System control register */
++#define OV2650_AGC 0x3000
++#define OV2650_AGCS 0x3001
++#define OV2650_AEC_H 0x3002
++#define OV2650_AEC_L 0x3003
++#define OV2650_AECL 0x3004
++#define OV2650_AECS_H 0x3008
++#define OV2650_AECS_L 0x3009
++#define OV2650_PID_H 0x300A
++#define OV2650_PID_L 0x300B
++#define OV2650_SCCB 0x300C
++#define OV2650_PCLK 0x300D
++#define OV2650_PLL_1 0x300E
++#define OV2650_PLL_2 0x300F
++#define OV2650_PLL_3 0x3010
++#define OV2650_CLK 0x3011
++#define OV2650_SYS 0x3012
++#define OV2650_AUTO_1 0x3013
++#define OV2650_AUTO_2 0x3014
++#define OV2650_AUTO_3 0x3015
++#define OV2650_AUTO_4 0x3016
++#define OV2650_AUTO_5 0x3017
++#define OV2650_WPT 0x3018
++#define OV2650_BPT 0x3019
++#define OV2650_VPT 0x301A
++#define OV2650_YAVG 0x301B
++#define OV2650_AECG_50 0x301C
++#define OV2650_AECG_60 0x301D
++#define OV2650_RZM_H 0x301E
++#define OV2650_RZM_L 0x301F
++#define OV2650_HS_H 0x3020
++#define OV2650_HS_L 0x3021
++#define OV2650_VS_H 0x3022
++#define OV2650_VS_L 0x3023
++#define OV2650_HW_H 0x3024
++#define OV2650_HW_L 0x3025
++#define OV2650_VH_H 0x3026
++#define OV2650_VH_L 0x3027
++#define OV2650_HTS_H 0x3028
++#define OV2650_HTS_L 0x3029
++#define OV2650_VTS_H 0x302A
++#define OV2650_VTS_L 0x302B
++#define OV2650_EXHTS 0x302C
++#define OV2650_EXVTS_H 0x302D
++#define OV2650_EXVTS_L 0x302E
++#define OV2650_WET_0 0x3030
++#define OV2650_WET_1 0x3031
++#define OV2650_WET_2 0x3032
++#define OV2650_WET_3 0x3033
++#define OV2650_AHS_H 0x3038
++#define OV2650_AHS_L 0x3039
++#define OV2650_AVS_H 0x303A
++#define OV2650_AVS_L 0x303B
++#define OV2650_AHW_H 0x303C
++#define OV2650_AHW_L 0x303D
++#define OV2650_AVH_H 0x303E
++#define OV2650_AVH_L 0x303F
++#define OV2650_HISTO_0 0x3040
++#define OV2650_HISTO_1 0x3041
++#define OV2650_HISTO_2 0x3042
++#define OV2650_HISTO_3 0x3043
++#define OV2650_HISTO_4 0x3044
++#define OV2650_BLC9A 0x3069
++#define OV2650_BLCC 0x306C
++#define OV2650_BLCD 0x306D
++#define OV2650_BLCF 0x306F
++#define OV2650_BD50_L 0x3070
++#define OV2650_BD50_H 0x3071
++#define OV2650_BD60_L 0x3072
++#define OV2650_BD60_H 0x3073
++#define OV2650_TMC_0 0x3076
++#define OV2650_TMC_1 0x3077
++#define OV2650_TMC_2 0x3078
++#define OV2650_TMC_4 0x307A
++#define OV2650_TMC_6 0x307C
++#define OV2650_TMC_8 0x307E
++#define OV2650_TMC_I2C 0x3084
++#define OV2650_TMC_10 0x3086
++#define OV2650_TMC_11 0x3087
++#define OV2650_ISP_XO_H 0x3088
++#define OV2650_ISP_XO_L 0x3089
++#define OV2650_ISP_YO_H 0x308A
++#define OV2650_ISP_YO_L 0x308B
++#define OV2650_TMC_12 0x308C
++#define OV2650_TMC_13 0x308D
++#define OV2650_EFUSE 0x308F
++#define OV2650_IO_CTL_0 0x30B0
++#define OV2650_IO_CRL_1 0x30B1
++#define OV2650_IO_CTL_2 0x30B2
++#define OV2650_LAEC 0x30F0
++#define OV2650_GRP_EOP 0x30FF
++
++/* SC registers */
++#define OV2650_SC_CTL_0 0x3100
++#define OV2650_SC_SYN_CTL_0 0x3104
++#define OV2650_SC_SYN_CTL_1 0x3105
++#define OV2650_SC_SYN_CTL_3 0x3107
++#define OV2650_SC_SYN_CTL_4 0x3108
++
++/* DSP control register */
++#define OV2650_ISP_CTL_0 0x3300
++#define OV2650_ISP_CTL_1 0x3301
++#define OV2650_ISP_CTL_2 0x3302
++#define OV2650_ISP_CTL_3 0x3303
++#define OV2650_ISP_CTL_4 0x3304
++#define OV2650_ISP_CTL_5 0x3305
++#define OV2650_ISP_CTL_6 0x3306
++#define OV2650_ISP_CTL_7 0x3307
++#define OV2650_ISP_CTL_8 0x3308
++#define OV2650_ISP_CTL_9 0x3309
++#define OV2650_ISP_CTL_A 0x330A
++#define OV2650_ISP_CTL_B 0x330B
++#define OV2650_ISP_CTL_10 0x3310
++#define OV2650_ISP_CTL_11 0x3311
++#define OV2650_ISP_CTL_12 0x3312
++#define OV2650_ISP_CTL_13 0x3313
++#define OV2650_ISP_CTL_14 0x3314
++#define OV2650_ISP_CTL_15 0x3315
++#define OV2650_ISP_CTL_16 0x3316
++#define OV2650_ISP_CTL_17 0x3317
++#define OV2650_ISP_CTL_18 0x3318
++#define OV2650_ISP_CTL_19 0x3319
++#define OV2650_ISP_CTL_1A 0x331A
++#define OV2650_ISP_CTL_1B 0x331B
++#define OV2650_ISP_CTL_1C 0x331C
++#define OV2650_ISP_CTL_1D 0x331D
++#define OV2650_ISP_CTL_1E 0x331E
++#define OV2650_ISP_CTL_20 0x3320
++#define OV2650_ISP_CTL_21 0x3321
++#define OV2650_ISP_CTL_22 0x3322
++#define OV2650_ISP_CTL_23 0x3323
++#define OV2650_ISP_CTL_24 0x3324
++#define OV2650_ISP_CTL_27 0x3327
++#define OV2650_ISP_CTL_28 0x3328
++#define OV2650_ISP_CTL_29 0x3329
++#define OV2650_ISP_CTL_2A 0x332A
++#define OV2650_ISP_CTL_2B 0x332B
++#define OV2650_ISP_CTL_2C 0x332C
++#define OV2650_ISP_CTL_2D 0x332D
++#define OV2650_ISP_CTL_2E 0x332E
++#define OV2650_ISP_CTL_2F 0x332F
++#define OV2650_ISP_CTL_30 0x3330
++#define OV2650_ISP_CTL_31 0x3331
++#define OV2650_ISP_CTL_32 0x3332
++#define OV2650_ISP_CTL_33 0x3333
++#define OV2650_ISP_CTL_34 0x3334
++#define OV2650_ISP_CTL_35 0x3335
++#define OV2650_ISP_CTL_36 0x3336
++#define OV2650_ISP_CTL_40 0x3340
++#define OV2650_ISP_CTL_41 0x3341
++#define OV2650_ISP_CTL_42 0x3342
++#define OV2650_ISP_CTL_43 0x3343
++#define OV2650_ISP_CTL_44 0x3344
++#define OV2650_ISP_CTL_45 0x3345
++#define OV2650_ISP_CTL_46 0x3346
++#define OV2650_ISP_CTL_47 0x3347
++#define OV2650_ISP_CTL_48 0x3348
++#define OV2650_ISP_CTL_49 0x3349
++#define OV2650_ISP_CTL_4A 0x334A
++#define OV2650_ISP_CTL_4B 0x334B
++#define OV2650_ISP_CTL_4C 0x334C
++#define OV2650_ISP_CTL_4D 0x334D
++#define OV2650_ISP_CTL_4E 0x334E
++#define OV2650_ISP_CTL_4F 0x334F
++#define OV2650_ISP_CTL_50 0x3350
++#define OV2650_ISP_CTL_51 0x3351
++#define OV2650_ISP_CTL_52 0x3352
++#define OV2650_ISP_CTL_53 0x3353
++#define OV2650_ISP_CTL_54 0x3354
++#define OV2650_ISP_CTL_55 0x3355
++#define OV2650_ISP_CTL_56 0x3356
++#define OV2650_ISP_CTL_57 0x3357
++#define OV2650_ISP_CTL_58 0x3358
++#define OV2650_ISP_CTL_59 0x3359
++#define OV2650_ISP_CTL_5A 0x335A
++#define OV2650_ISP_CTL_5B 0x335B
++#define OV2650_ISP_CTL_5C 0x335C
++#define OV2650_ISP_CTL_5D 0x335D
++#define OV2650_ISP_CTL_5E 0x335E
++#define OV2650_ISP_CTL_5F 0x335F
++#define OV2650_ISP_CTL_60 0x3360
++#define OV2650_ISP_CTL_61 0x3361
++#define OV2650_ISP_CTL_62 0x3362
++#define OV2650_ISP_CTL_63 0x3363
++#define OV2650_ISP_CTL_64 0x3364
++#define OV2650_ISP_CTL_65 0x3365
++#define OV2650_ISP_CTL_6A 0x336A
++#define OV2650_ISP_CTL_6B 0x336B
++#define OV2650_ISP_CTL_6C 0x336C
++#define OV2650_ISP_CTL_6E 0x336E
++#define OV2650_ISP_CTL_71 0x3371
++#define OV2650_ISP_CTL_72 0x3372
++#define OV2650_ISP_CTL_73 0x3373
++#define OV2650_ISP_CTL_74 0x3374
++#define OV2650_ISP_CTL_75 0x3375
++#define OV2650_ISP_CTL_76 0x3376
++#define OV2650_ISP_CTL_77 0x3377
++#define OV2650_ISP_CTL_78 0x3378
++#define OV2650_ISP_CTL_79 0x3379
++#define OV2650_ISP_CTL_7A 0x337A
++#define OV2650_ISP_CTL_7B 0x337B
++#define OV2650_ISP_CTL_7C 0x337C
++#define OV2650_ISP_CTL_80 0x3380
++#define OV2650_ISP_CTL_81 0x3381
++#define OV2650_ISP_CTL_82 0x3382
++#define OV2650_ISP_CTL_83 0x3383
++#define OV2650_ISP_CTL_84 0x3384
++#define OV2650_ISP_CTL_85 0x3385
++#define OV2650_ISP_CTL_86 0x3386
++#define OV2650_ISP_CTL_87 0x3387
++#define OV2650_ISP_CTL_88 0x3388
++#define OV2650_ISP_CTL_89 0x3389
++#define OV2650_ISP_CTL_8A 0x338A
++#define OV2650_ISP_CTL_8B 0x338B
++#define OV2650_ISP_CTL_8C 0x338C
++#define OV2650_ISP_CTL_8D 0x338D
++#define OV2650_ISP_CTL_8E 0x338E
++#define OV2650_ISP_CTL_90 0x3390
++#define OV2650_ISP_CTL_91 0x3391
++#define OV2650_ISP_CTL_92 0x3392
++#define OV2650_ISP_CTL_93 0x3393
++#define OV2650_ISP_CTL_94 0x3394
++#define OV2650_ISP_CTL_95 0x3395
++#define OV2650_ISP_CTL_96 0x3396
++#define OV2650_ISP_CTL_97 0x3397
++#define OV2650_ISP_CTL_98 0x3398
++#define OV2650_ISP_CTL_99 0x3399
++#define OV2650_ISP_CTL_9A 0x339A
++#define OV2650_ISP_CTL_A0 0x33A0
++#define OV2650_ISP_CTL_A1 0x33A1
++#define OV2650_ISP_CTL_A2 0x33A2
++#define OV2650_ISP_CTL_A3 0x33A3
++#define OV2650_ISP_CTL_A4 0x33A4
++#define OV2650_ISP_CTL_A5 0x33A5
++#define OV2650_ISP_CTL_A6 0x33A6
++#define OV2650_ISP_CTL_A7 0x33A7
++#define OV2650_ISP_CTL_A8 0x33A8
++#define OV2650_ISP_CTL_AA 0x33AA
++#define OV2650_ISP_CTL_AB 0x33AB
++#define OV2650_ISP_CTL_AC 0x33AC
++#define OV2650_ISP_CTL_AD 0x33AD
++#define OV2650_ISP_CTL_AE 0x33AE
++#define OV2650_ISP_CTL_AF 0x33AF
++#define OV2650_ISP_CTL_B0 0x33B0
++#define OV2650_ISP_CTL_B1 0x33B1
++#define OV2650_ISP_CTL_B2 0x33B2
++#define OV2650_ISP_CTL_B3 0x33B3
++#define OV2650_ISP_CTL_B4 0x33B4
++#define OV2650_ISP_CTL_B5 0x33B5
++#define OV2650_ISP_CTL_B6 0x33B6
++#define OV2650_ISP_CTL_B7 0x33B7
++#define OV2650_ISP_CTL_B8 0x33B8
++#define OV2650_ISP_CTL_B9 0x33B9
++
++/* Format register */
++#define OV2650_FMT_CTL_0 0x3400
++#define OV2650_FMT_CTL_1 0x3401
++#define OV2650_FMT_CTL_2 0x3402
++#define OV2650_FMT_CTL_3 0x3403
++#define OV2650_FMT_CTL_4 0x3404
++#define OV2650_FMT_CTL_5 0x3405
++#define OV2650_FMT_CTL_6 0x3406
++#define OV2650_FMT_CTL_7 0x3407
++#define OV2650_FMT_CTL_8 0x3408
++#define OV2650_DITHER_CTL 0x3409
++#define OV2650_DVP_CTL_0 0x3600
++#define OV2650_DVP_CTL_1 0x3601
++#define OV2650_DVP_CTL_6 0x3606
++#define OV2650_DVP_CTL_7 0x3607
++#define OV2650_DVP_CTL_9 0x3609
++#define OV2650_DVP_CTL_B 0x360B
++
++/* General definition for ov2650 */
++#define OV2650_OUTWND_MAX_H UXGA_SIZE_H
++#define OV2650_OUTWND_MAX_V UXGA_SIZE_V
++
++struct regval_list {
++ u16 reg_num;
++ u8 value;
++};
++
++/*
++ * Default register value
++ * 1600x1200 YUV
++ */
++static struct regval_list ov2650_def_reg[] = {
++ {0x3012, 0x80},
++ {0x308c, 0x80},
++ {0x308d, 0x0e},
++ {0x360b, 0x00},
++ {0x30b0, 0xff},
++ {0x30b1, 0xff},
++ {0x30b2, 0x27},
++
++ {0x300e, 0x34},
++ {0x300f, 0xa6},
++ {0x3010, 0x81},
++ {0x3082, 0x01},
++ {0x30f4, 0x01},
++ {0x3090, 0x33},
++ {0x3091, 0xc0},
++ {0x30ac, 0x42},
++
++ {0x30d1, 0x08},
++ {0x30a8, 0x56},
++ {0x3015, 0x03},
++ {0x3093, 0x00},
++ {0x307e, 0xe5},
++ {0x3079, 0x00},
++ {0x30aa, 0x42},
++ {0x3017, 0x40},
++ {0x30f3, 0x82},
++ {0x306a, 0x0c},
++ {0x306d, 0x00},
++ {0x336a, 0x3c},
++ {0x3076, 0x6a},
++ {0x30d9, 0x8c},
++ {0x3016, 0x82},
++ {0x3601, 0x30},
++ {0x304e, 0x88},
++ {0x30f1, 0x82},
++ {0x3011, 0x02},
++
++ {0x3013, 0xf7},
++ {0x301c, 0x13},
++ {0x301d, 0x17},
++ {0x3070, 0x3e},
++ {0x3072, 0x34},
++
++ {0x30af, 0x00},
++ {0x3048, 0x1f},
++ {0x3049, 0x4e},
++ {0x304a, 0x20},
++ {0x304f, 0x20},
++ {0x304b, 0x02},
++ {0x304c, 0x00},
++ {0x304d, 0x02},
++ {0x304f, 0x20},
++ {0x30a3, 0x10},
++ {0x3013, 0xf7},
++ {0x3014, 0x44},
++ {0x3071, 0x00},
++ {0x3070, 0x3e},
++ {0x3073, 0x00},
++ {0x3072, 0x34},
++ {0x301c, 0x12},
++ {0x301d, 0x16},
++ {0x304d, 0x42},
++ {0x304a, 0x40},
++ {0x304f, 0x40},
++ {0x3095, 0x07},
++ {0x3096, 0x16},
++ {0x3097, 0x1d},
++
++ {0x3020, 0x01},
++ {0x3021, 0x18},
++ {0x3022, 0x00},
++ {0x3023, 0x0a},
++ {0x3024, 0x06},
++ {0x3025, 0x58},
++ {0x3026, 0x04},
++ {0x3027, 0xbc},
++ {0x3088, 0x06},
++ {0x3089, 0x40},
++ {0x308a, 0x04},
++ {0x308b, 0xb0},
++ {0x3316, 0x64},
++ {0x3317, 0x4b},
++ {0x3318, 0x00},
++ {0x331a, 0x64},
++ {0x331b, 0x4b},
++ {0x331c, 0x00},
++ {0x3100, 0x00},
++
++ {0x3320, 0xfa},
++ {0x3321, 0x11},
++ {0x3322, 0x92},
++ {0x3323, 0x01},
++ {0x3324, 0x97},
++ {0x3325, 0x02},
++ {0x3326, 0xff},
++ {0x3327, 0x0c},
++ {0x3328, 0x10},
++ {0x3329, 0x10},
++ {0x332a, 0x58},
++ {0x332b, 0x50},
++ {0x332c, 0xbe},
++ {0x332d, 0xe1},
++ {0x332e, 0x43},
++ {0x332f, 0x36},
++ {0x3330, 0x4d},
++ {0x3331, 0x44},
++ {0x3332, 0xf8},
++ {0x3333, 0x0a},
++ {0x3334, 0xf0},
++ {0x3335, 0xf0},
++ {0x3336, 0xf0},
++ {0x3337, 0x40},
++ {0x3338, 0x40},
++ {0x3339, 0x40},
++ {0x333a, 0x00},
++ {0x333b, 0x00},
++
++ {0x3380, 0x28},
++ {0x3381, 0x48},
++ {0x3382, 0x10},
++ {0x3383, 0x23},
++ {0x3384, 0xc0},
++ {0x3385, 0xe5},
++ {0x3386, 0xc2},
++ {0x3387, 0xb3},
++ {0x3388, 0x0e},
++ {0x3389, 0x98},
++ {0x338a, 0x01},
++
++ {0x3340, 0x0e},
++ {0x3341, 0x1a},
++ {0x3342, 0x31},
++ {0x3343, 0x45},
++ {0x3344, 0x5a},
++ {0x3345, 0x69},
++ {0x3346, 0x75},
++ {0x3347, 0x7e},
++ {0x3348, 0x88},
++ {0x3349, 0x96},
++ {0x334a, 0xa3},
++ {0x334b, 0xaf},
++ {0x334c, 0xc4},
++ {0x334d, 0xd7},
++ {0x334e, 0xe8},
++ {0x334f, 0x20},
++
++ {0x3350, 0x32},
++ {0x3351, 0x25},
++ {0x3352, 0x80},
++ {0x3353, 0x1e},
++ {0x3354, 0x00},
++ {0x3355, 0x85},
++ {0x3356, 0x32},
++ {0x3357, 0x25},
++ {0x3358, 0x80},
++ {0x3359, 0x1b},
++ {0x335a, 0x00},
++ {0x335b, 0x85},
++ {0x335c, 0x32},
++ {0x335d, 0x25},
++ {0x335e, 0x80},
++ {0x335f, 0x1b},
++ {0x3360, 0x00},
++ {0x3361, 0x85},
++ {0x3363, 0x70},
++ {0x3364, 0x7f},
++ {0x3365, 0x00},
++ {0x3366, 0x00},
++
++ {0x3301, 0xff},
++ {0x338B, 0x11},
++ {0x338c, 0x10},
++ {0x338d, 0x40},
++
++ {0x3370, 0xd0},
++ {0x3371, 0x00},
++ {0x3372, 0x00},
++ {0x3373, 0x40},
++ {0x3374, 0x10},
++ {0x3375, 0x10},
++ {0x3376, 0x04},
++ {0x3377, 0x00},
++ {0x3378, 0x04},
++ {0x3379, 0x80},
++
++ {0x3069, 0x84},
++ {0x307c, 0x10},
++ {0x3087, 0x02},
++
++ {0x3300, 0xfc},
++ {0x3302, 0x01},
++ {0x3400, 0x00},
++ {0x3606, 0x20},
++ {0x3601, 0x30},
++ {0x30f3, 0x83},
++ {0x304e, 0x88},
++
++ {0x3086, 0x0f},
++ {0x3086, 0x00},
++
++ {0xffff, 0xff},
++};
++
++/* 800x600 */
++static struct regval_list ov2650_res_svga[] = {
++
++ {0x306f, 0x14},
++ {0x302a, 0x02},
++ {0x302b, 0x84},
++ {0x3012, 0x10},
++ {0x3011, 0x01},
++
++ {0x3070, 0x5d},
++ {0x3072, 0x4d},
++
++ {0x3014, 0x84},
++ {0x301c, 0x07},
++ {0x301d, 0x09},
++ {0x3070, 0x50},
++ {0x3071, 0x00},
++ {0x3072, 0x42},
++ {0x3073, 0x00},
++
++ {0x3020, 0x01},
++ {0x3021, 0x18},
++ {0x3022, 0x00},
++ {0x3023, 0x06},
++ {0x3024, 0x06},
++ {0x3025, 0x58},
++ {0x3026, 0x02},
++ {0x3027, 0x5e},
++ {0x3088, 0x03},
++ {0x3089, 0x20},
++ {0x308a, 0x02},
++ {0x308b, 0x58},
++ {0x3316, 0x64},
++ {0x3317, 0x25},
++ {0x3318, 0x80},
++ {0x3319, 0x08},
++ {0x331a, 0x64},
++ {0x331b, 0x4b},
++ {0x331c, 0x00},
++ {0x331d, 0x38},
++ {0x3100, 0x00},
++
++ {0x3302, 0x11},
++
++ {0x3011, 0x01},
++ {0x300f, 0xa6},
++ {0x300e, 0x36},
++ {0x3010, 0x81},
++ {0x302e, 0x00},
++ {0x302d, 0x00},
++ {0x302c, 0x00},
++ {0x302b, 0x84},
++ {0x3014, 0x84},
++ {0x301c, 0x07},
++ {0x301d, 0x09},
++ {0x3070, 0x50},
++ {0x3071, 0x00},
++ {0x3072, 0x42},
++ {0x3073, 0x00},
++
++ {0x3086, 0x0f},
++ {0x3086, 0x00},
++ {0xffff, 0xff},
++};
++
++/* 640x480 */
++static struct regval_list ov2650_res_vga_vario[] = {
++ {0x306f, 0x14},
++ {0x302a, 0x02},
++ {0x302b, 0x6a},
++ {0x3012, 0x10},
++ {0x3011, 0x01},
++
++ {0x3070, 0x5d},
++ {0x3072, 0x4d},
++
++ {0x301c, 0x05},
++ {0x301d, 0x06},
++
++ {0x3020, 0x01},
++ {0x3021, 0x18},
++ {0x3022, 0x00},
++ {0x3023, 0x06},
++ {0x3024, 0x06},
++ {0x3025, 0x58},
++ {0x3026, 0x02},
++ {0x3027, 0x61},
++ {0x3088, 0x02},
++ {0x3089, 0x80},
++ {0x308a, 0x01},
++ {0x308b, 0xe0},
++ {0x3316, 0x64},
++ {0x3317, 0x25},
++ {0x3318, 0x80},
++ {0x3319, 0x08},
++ {0x331a, 0x28},
++ {0x331b, 0x1e},
++ {0x331c, 0x00},
++ {0x331d, 0x38},
++ {0x3100, 0x00},
++
++ {0x3302, 0x11},
++ {0x3011, 0x00},
++
++ {0x3014, 0x84}, /* note this */
++ {0x3086, 0x0f},
++ {0x3086, 0x00},
++ {0xffff, 0xff},
++};
++
++/* 640x480 reverse */
++/*
++static struct regval_list ov2650_res_vga_reverse[] = {
++ {0x306f, 0x10},
++ {0x302a, 0x04},
++ {0x302b, 0xd4},
++ {0x3012, 0x00},
++ {0x3011, 0x02},
++
++ {0x3070, 0x3e},
++ {0x3072, 0x34},
++
++ {0x301c, 0x12},
++ {0x301d, 0x16},
++
++ {0x3020, 0x01},
++ {0x3021, 0x18},
++ {0x3022, 0x00},
++ {0x3023, 0x0a},
++ {0x3024, 0x06},
++ {0x3025, 0x58},
++ {0x3026, 0x04},
++ {0x3027, 0xbc},
++ {0x3088, 0x06},
++ {0x3089, 0x40},
++ {0x308a, 0x04},
++ {0x308b, 0xb0},
++ {0x3316, 0x64},
++ {0x3317, 0xb4},
++ {0x3318, 0x00},
++ {0x3319, 0x6c},
++ {0x331a, 0x64},
++ {0x331b, 0x4b},
++ {0x331c, 0x00},
++ {0x331d, 0x6c},
++ {0x3100, 0x00},
++
++ {0x3302, 0x01},
++ {0x3011, 0x02},
++
++ {0x3014, 0x44},
++ {0x3086, 0x0f},
++ {0x3086, 0x00},
++ {0xffff, 0xff},
++};
++
++*/
++/* 320x240 */
++static struct regval_list ov2650_res_qvga[] = {
++ {0x306f, 0x14},
++ {0x302a, 0x02},
++ {0x302b, 0x6a},
++
++ {0x3012, 0x10},
++ {0x3011, 0x01},
++
++ {0x3070, 0x5d},
++ {0x3072, 0x4d},
++ {0x301c, 0x05},
++ {0x301d, 0x06},
++
++ {0x3023, 0x06},
++ {0x3026, 0x02},
++ {0x3027, 0x61},
++ {0x3088, 0x01},
++ {0x3089, 0x40},
++ {0x308a, 0x00},
++ {0x308b, 0xf0},
++ {0x3316, 0x64},
++ {0x3317, 0x25},
++ {0x3318, 0x80},
++ {0x3319, 0x08},
++ {0x331a, 0x14},
++ {0x331b, 0x0f},
++ {0x331c, 0x00},
++ {0x331d, 0x38},
++ {0x3100, 0x00},
++
++ {0x3015, 0x02}, /* note this */
++ {0x3014, 0x84},
++ {0x3302, 0x11},
++ {0x3086, 0x0f},
++ {0x3086, 0x00},
++ {0xffff, 0xff},
++};
++
++static struct regval_list ov2650_res_uxga[] = {
++ /* Note this added by debug */
++ {0x3014, 0x84},
++ {0x301c, 0x13},
++ {0x301d, 0x17},
++ {0x3070, 0x40},
++ {0x3071, 0x00},
++ {0x3072, 0x36},
++ {0x3073, 0x00},
++
++ {0xffff, 0xff},
++};
++
++static struct regval_list ov2650_res_sxga[] = {
++ {0x3011, 0x02},
++
++ {0x3020, 0x01},
++ {0x3021, 0x18},
++ {0x3022, 0x00},
++ {0x3023, 0x0a},
++ {0x3024, 0x06},
++ {0x3025, 0x58},
++ {0x3026, 0x04},
++ {0x3027, 0xbc},
++ {0x3088, 0x05},
++ {0x3089, 0x00},
++ {0x308a, 0x04},
++ {0x308b, 0x00},
++ {0x3316, 0x64},
++ {0x3317, 0x4b},
++ {0x3318, 0x00},
++ {0x331a, 0x50},
++ {0x331b, 0x40},
++ {0x331c, 0x00},
++
++ {0x3302, 0x11},
++
++ {0x3014, 0x84},
++ {0x301c, 0x13},
++ {0x301d, 0x17},
++ {0x3070, 0x40},
++ {0x3071, 0x00},
++ {0x3072, 0x36},
++ {0x3073, 0x00},
++
++ {0x3086, 0x0f},
++ {0x3086, 0x00},
++ {0xffff, 0xff},
++};
+--- /dev/null
++++ b/drivers/staging/mrstci/mrstov5630/Kconfig
+@@ -0,0 +1,9 @@
++config VIDEO_MRST_OV5630
++ tristate "Moorestown OV5630 RAW Sensor"
++ depends on I2C && VIDEO_MRST_ISP
++
++ ---help---
++ Say Y here if your platform support OV5630 RAW Sensor.
++
++ To compile this driver as a module, choose M here: the
++ module will be called mrstov2650.ko.
+--- /dev/null
++++ b/drivers/staging/mrstci/mrstov5630/Makefile
+@@ -0,0 +1,4 @@
++mrstov5630-objs = ov5630.o
++obj-$(CONFIG_VIDEO_MRST_OV5630) += mrstov5630.o
++
++EXTRA_CFLAGS += -I$(src)/../include
+--- /dev/null
++++ b/drivers/staging/mrstci/mrstov5630/ov5630.c
+@@ -0,0 +1,1153 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/string.h>
++#include <linux/errno.h>
++#include <linux/init.h>
++#include <linux/kmod.h>
++#include <linux/device.h>
++#include <linux/delay.h>
++#include <linux/fs.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/delay.h>
++#include <linux/i2c.h>
++#include <linux/gpio.h>
++
++#include <media/v4l2-device.h>
++#include <media/v4l2-chip-ident.h>
++#include <media/v4l2-i2c-drv.h>
++
++#include "ci_sensor_common.h"
++#include "ov5630.h"
++
++static int mrstov5630_debug;
++module_param(mrstov5630_debug, int, 0644);
++MODULE_PARM_DESC(mrstov5630_debug, "Debug level (0-1)");
++
++#define dprintk(level, fmt, arg...) do { \
++ if (mrstov5630_debug >= level) \
++ printk(KERN_DEBUG "mrstisp@%s: " fmt "\n", \
++ __func__, ## arg); } \
++ while (0)
++
++#define eprintk(fmt, arg...) \
++ printk(KERN_ERR "mrstisp@%s: line %d: " fmt "\n", \
++ __func__, __LINE__, ## arg);
++
++#define DBG_entering dprintk(2, "entering");
++#define DBG_leaving dprintk(2, "leaving");
++#define DBG_line dprintk(2, " line: %d", __LINE__);
++
++static inline struct ci_sensor_config *to_sensor_config(struct v4l2_subdev *sd)
++{
++ return container_of(sd, struct ci_sensor_config, sd);
++}
++
++/* static int ov5630_set_res(struct i2c_client *c, const int w, const int h);
++ */
++static struct ov5630_format_struct {
++ __u8 *desc;
++ __u32 pixelformat;
++ struct regval_list *regs;
++} ov5630_formats[] = {
++ {
++ .desc = "Raw RGB Bayer",
++ .pixelformat = SENSOR_MODE_BAYER,
++ .regs = NULL,
++ },
++};
++#define N_OV5630_FMTS ARRAY_SIZE(ov5630_formats)
++
++static struct ov5630_res_struct {
++ __u8 *desc;
++ int res;
++ int width;
++ int height;
++ /* FIXME: correct the fps values.. */
++ int fps;
++ bool used;
++ struct regval_list *regs;
++} ov5630_res[] = {
++ {
++ .desc = "QSXGA_PLUS4",
++ .res = SENSOR_RES_QXGA_PLUS,
++ .width = 2592,
++ .height = 1944,
++ .fps = 15,
++ .used = 0,
++ .regs = ov5630_res_qsxga_plus4,
++ },
++ {
++ .desc = "1080P",
++ .res = SENSOR_RES_1080P,
++ .width = 1920,
++ .height = 1080,
++ .fps = 25,
++ .used = 0,
++ .regs = ov5630_res_1080p,
++ },
++ {
++ .desc = "XGA_PLUS",
++ .res = SENSOR_RES_XGA_PLUS,
++ .width = 1280,
++ .height = 960,
++ .fps = 30,
++ .used = 0,
++ .regs = ov5630_res_xga_plus,
++ },
++ {
++ .desc = "720p",
++ .res = SENSOR_RES_720P,
++ .width = 1280,
++ .height = 720,
++ .fps = 34,
++ .used = 0,
++ .regs = ov5630_res_720p,
++ },
++ {
++ .desc = "VGA",
++ .res = SENSOR_RES_VGA,
++ .width = 640,
++ .height = 480,
++ .fps = 39,
++ .used = 0,
++ .regs = ov5630_res_vga_ac04_bill,
++ },
++};
++
++#define N_RES (ARRAY_SIZE(ov5630_res))
++
++/*
++ * I2C Read & Write stuff
++ */
++static int ov5630_read(struct i2c_client *c, u32 reg, u32 *value)
++{
++ int ret;
++ int i;
++ struct i2c_msg msg[2];
++ u8 msgbuf[2];
++ u8 ret_val = 0;
++ *value = 0;
++ /* Read needs two message to go */
++ memset(&msg, 0, sizeof(msg));
++ msgbuf[0] = 0;
++ msgbuf[1] = 0;
++ i = 0;
++
++ msgbuf[i++] = ((u16)reg) >> 8;
++ msgbuf[i++] = ((u16)reg) & 0xff;
++ msg[0].addr = c->addr;
++ msg[0].buf = msgbuf;
++ msg[0].len = i;
++
++ msg[1].addr = c->addr;
++ msg[1].flags = I2C_M_RD;
++ msg[1].buf = &ret_val;
++ msg[1].len = 1;
++
++ ret = i2c_transfer(c->adapter, &msg[0], 2);
++ *value = ret_val;
++
++ ret = (ret == 2) ? 0 : -1;
++ return ret;
++}
++
++static int ov5630_write(struct i2c_client *c, u32 reg, u32 value)
++{
++ int ret, i;
++ struct i2c_msg msg;
++ u8 msgbuf[3];
++
++ /* Writing only needs one message */
++ memset(&msg, 0, sizeof(msg));
++ i = 0;
++ msgbuf[i++] = ((u16)reg) >> 8;
++ msgbuf[i++] = (u16)reg & 0xff;
++ msgbuf[i++] = (u8)value;
++
++ msg.addr = c->addr;
++ msg.flags = 0;
++ msg.buf = msgbuf;
++ msg.len = i;
++
++ ret = i2c_transfer(c->adapter, &msg, 1);
++
++ /* If this is a reset register, wait for 1ms */
++ if (reg == OV5630_SYS && (value & 0x80))
++ msleep(3);
++
++ ret = (ret == 1) ? 0 : -1;
++ return ret;
++}
++
++static int ov5630_write_array(struct i2c_client *c, struct regval_list *vals)
++{
++ struct regval_list *p;
++ u32 read_val = 0;
++ int err_num = 0;
++ int i = 0;
++ p = vals;
++ while (p->reg_num != 0xffff) {
++ ov5630_write(c, (u32)p->reg_num, (u32)p->value);
++ ov5630_read(c, (u32)p->reg_num, &read_val);
++ if (read_val != p->value)
++ err_num++;
++ p++;
++ i++;
++ }
++ return 0;
++}
++
++/*
++ * Sensor specific helper function
++ */
++static int ov5630_standby(void)
++{
++ gpio_set_value(GPIO_STDBY_PIN, 1);
++ /* ov5630_motor_standby(); */
++ dprintk(1, "PM: standby called\n");
++ return 0;
++}
++
++static int ov5630_wakeup(void)
++{
++ gpio_set_value(GPIO_STDBY_PIN, 0);
++ /* ov5630_motor_wakeup(); */
++ dprintk(1, "PM: wakeup called\n");
++ return 0;
++}
++
++static int ov5630_s_power(struct v4l2_subdev *sd, u32 val)
++{
++ if (val == 1)
++ ov5630_standby();
++ if (val == 0)
++ ov5630_wakeup();
++ return 0;
++}
++
++static int ov5630_set_img_ctrl(struct i2c_client *c,
++ const struct ci_sensor_config *config)
++{
++ int err = 0;
++ u32 reg_val = 0;
++ /* struct ci_sensor_config *info = i2c_get_clientdata(c); */
++
++ switch (config->blc) {
++ case SENSOR_BLC_OFF:
++ err |= ov5630_read(c, OV5630_ISP_CTL00, &reg_val);
++ err |= ov5630_write(c, OV5630_ISP_CTL00, reg_val & 0xFE);
++ break;
++ case SENSOR_BLC_AUTO:
++ err |= ov5630_read(c, OV5630_ISP_CTL00, &reg_val);
++ err |= ov5630_write(c, OV5630_ISP_CTL00, reg_val | 0x01);
++ break;
++ }
++
++ switch (config->agc) {
++ case SENSOR_AGC_AUTO:
++ err |= ov5630_read(c, OV5630_AUTO_1, &reg_val);
++ err |= ov5630_write(c, OV5630_AUTO_1, reg_val | 0x04);
++ break;
++ case SENSOR_AGC_OFF:
++ err |= ov5630_read(c, OV5630_AUTO_1, &reg_val);
++ err |= ov5630_write(c, OV5630_AUTO_1, reg_val & ~0x04);
++ break;
++ }
++
++ switch (config->awb) {
++ case SENSOR_AWB_AUTO:
++ err |= ov5630_read(c, OV5630_ISP_CTL00, &reg_val);
++ err |= ov5630_write(c, OV5630_ISP_CTL00, reg_val | 0x30);
++ break;
++ case SENSOR_AWB_OFF:
++ err |= ov5630_read(c, OV5630_ISP_CTL00, &reg_val);
++ err |= ov5630_write(c, OV5630_ISP_CTL00, reg_val & ~0x30);
++ break;
++ }
++
++ switch (config->aec) {
++ case SENSOR_AEC_AUTO:
++ err |= ov5630_read(c, OV5630_AUTO_1, &reg_val);
++ err |= ov5630_write(c, OV5630_AUTO_1, reg_val | 0xFB);
++ break;
++ case SENSOR_AEC_OFF:
++ err |= ov5630_read(c, OV5630_AUTO_1, &reg_val);
++ err |= ov5630_write(c, OV5630_AUTO_1, reg_val & 0xF6);
++ break;
++ }
++
++ return err;
++}
++
++static int ov5630_init(struct i2c_client *c)
++{
++ int ret;
++ struct v4l2_subdev *sd = i2c_get_clientdata(c);
++ struct ci_sensor_config *info = to_sensor_config(sd);
++ char *name = "";
++
++ /* Fill the configuration structure */
++ /* Note this default configuration value */
++ info->mode = ov5630_formats[0].pixelformat;
++ info->res = ov5630_res[0].res;
++ info->type = SENSOR_TYPE_RAW;
++ info->bls = SENSOR_BLS_OFF;
++ info->gamma = SENSOR_GAMMA_OFF;
++ info->cconv = SENSOR_CCONV_OFF;
++ info->blc = SENSOR_BLC_AUTO;
++ info->agc = SENSOR_AGC_AUTO;
++ info->awb = SENSOR_AWB_AUTO;
++ info->aec = SENSOR_AEC_AUTO;
++ /* info->bus_width = SENSOR_BUSWIDTH_10BIT; */
++ info->bus_width = SENSOR_BUSWIDTH_10BIT_ZZ;
++ info->ycseq = SENSOR_YCSEQ_YCBYCR;
++ /* info->conv422 = SENSOR_CONV422_NOCOSITED; */
++ info->conv422 = SENSOR_CONV422_COSITED;
++ info->bpat = SENSOR_BPAT_BGBGGRGR;
++ info->field_inv = SENSOR_FIELDINV_NOSWAP;
++ info->field_sel = SENSOR_FIELDSEL_BOTH;
++ info->hpol = SENSOR_HPOL_REFPOS;
++ info->vpol = SENSOR_VPOL_NEG;
++ info->edge = SENSOR_EDGE_RISING;
++ info->flicker_freq = SENSOR_FLICKER_100;
++ info->cie_profile = SENSOR_CIEPROF_F11;
++ name = "ov5630";
++ memcpy(info->name, name, 7);
++
++ /* Reset sensor hardware, and implement the setting*/
++ ret = ov5630_write(c, (u32)OV5630_SYS, (u32)0x80);
++ ret += ov5630_write(c, (u32)OV5630_IMAGE_SYSTEM, (u32)0x00);
++
++ /* Set registers into default config value */
++ ret += ov5630_write_array(c, ov5630_def_reg);
++
++ /* Set MIPI interface */
++#ifdef OV5630_MIPI
++ ret += ov5630_write_array(c, ov5630_mipi);
++#endif
++
++ /* turn off AE AEB AGC */
++ ret += ov5630_set_img_ctrl(c, info);
++
++ /* streaming */
++ /* ret += ov5630_write(c, (u32)OV5630_IMAGE_SYSTEM, (u32)0x01); */
++ /* ret += ov5630_write(c, (u32)0x3096, (u32)0x50); */
++ /* /ssleep(1); */
++
++ /* Added by wen to stop sensor from streaming */
++ ov5630_write(c, (u32)OV5630_IMAGE_SYSTEM, (u32)0x00);
++ ov5630_write(c, 0x30b0, 0x00);
++ ov5630_write(c, 0x30b1, 0x00);
++ return ret;
++}
++
++static int distance(struct ov5630_res_struct *res, u32 w, u32 h)
++{
++ int ret;
++ if (res->width < w || res->height < h)
++ return -1;
++
++ ret = ((res->width - w) + (res->height - h));
++ return ret;
++}
++static int ov5630_try_res(u32 *w, u32 *h)
++{
++ struct ov5630_res_struct *res_index, *p = NULL;
++ int dis, last_dis = ov5630_res->width + ov5630_res->height;
++
++ DBG_entering;
++
++ for (res_index = ov5630_res;
++ res_index < ov5630_res + N_RES;
++ res_index++) {
++ if ((res_index->width < *w) || (res_index->height < *h))
++ break;
++ dis = distance(res_index, *w, *h);
++ if (dis < last_dis) {
++ last_dis = dis;
++ p = res_index;
++ }
++ }
++
++ if (p == NULL)
++ p = ov5630_res;
++ else if ((p->width < *w) || (p->height < *h)) {
++ if (p != ov5630_res)
++ p--;
++ }
++
++ if ((w != NULL) && (h != NULL)) {
++ *w = p->width;
++ *h = p->height;
++ }
++
++ DBG_leaving;
++ return 0;
++}
++
++static struct ov5630_res_struct *ov5630_to_res(u32 w, u32 h)
++{
++ struct ov5630_res_struct *res_index;
++
++ for (res_index = ov5630_res;
++ res_index < ov5630_res + N_RES;
++ res_index++)
++ if ((res_index->width == w) && (res_index->height == h))
++ break;
++
++ if (res_index >= ov5630_res + N_RES)
++ res_index--; /* Take the bigger one */
++
++ return res_index;
++}
++
++static int ov5630_try_fmt(struct v4l2_subdev *sd,
++ struct v4l2_format *fmt)
++{
++ DBG_entering;
++ return ov5630_try_res(&fmt->fmt.pix.width, &fmt->fmt.pix.height);
++ DBG_leaving;
++}
++
++static int ov5630_get_fmt(struct v4l2_subdev *sd,
++ struct v4l2_format *fmt)
++{
++ struct ci_sensor_config *info = to_sensor_config(sd);
++ unsigned short width, height;
++ int index;
++
++ ci_sensor_res2size(info->res, &width, &height);
++
++ /* Marked the current sensor res as being "used" */
++ for (index = 0; index < N_RES; index++) {
++ if ((width == ov5630_res[index].width) &&
++ (height == ov5630_res[index].height)) {
++ ov5630_res[index].used = 1;
++ continue;
++ }
++ ov5630_res[index].used = 0;
++ }
++
++ fmt->fmt.pix.width = width;
++ fmt->fmt.pix.height = height;
++ return 0;
++}
++
++static int ov5630_set_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
++{
++ struct i2c_client *c = v4l2_get_subdevdata(sd);
++ struct ci_sensor_config *info = to_sensor_config(sd);
++ int ret = 0;
++ struct ov5630_res_struct *res_index;
++ u32 width, height;
++ int index;
++
++ DBG_entering;
++
++ width = fmt->fmt.pix.width;
++ height = fmt->fmt.pix.height;
++
++ dprintk(1, "was told to set fmt (%d x %d) ", width, height);
++
++ ret = ov5630_try_res(&width, &height);
++
++ dprintk(1, "setting fmt (%d x %d) ", width, height);
++
++ res_index = ov5630_to_res(width, height);
++
++ ov5630_wakeup();
++
++ if (res_index->regs) {
++ /* Soft reset camera first*/
++ ret = ov5630_write(c, (u32)OV5630_SYS, (u32)0x80);
++
++ /* software sleep/standby */
++ ret += ov5630_write(c, (u32)OV5630_IMAGE_SYSTEM, (u32)0x00);
++
++ /* Set registers into default config value */
++ ret += ov5630_write_array(c, ov5630_def_reg);
++
++ /* set image resolution */
++ ret += ov5630_write_array(c, res_index->regs);
++
++ /* turn off AE AEB AGC */
++ ret += ov5630_set_img_ctrl(c, info);
++
++ /* Set MIPI interface */
++#ifdef OV5630_MIPI
++ ret += ov5630_write_array(c, ov5630_mipi);
++#endif
++
++ if (res_index->res == SENSOR_RES_VGA)
++ ret += ov5630_write(c, (u32)0x3015, (u32)0x03);
++
++ /* streaming */
++ ret = ov5630_write(c, (u32)OV5630_IMAGE_SYSTEM, (u32)0x01);
++ ret = ov5630_write(c, (u32)0x3096, (u32)0x50);
++
++ info->res = res_index->res;
++
++ /* Marked current sensor res as being "used" */
++ for (index = 0; index < N_RES; index++) {
++ if ((width == ov5630_res[index].width) &&
++ (height == ov5630_res[index].height)) {
++ ov5630_res[index].used = 1;
++ continue;
++ }
++ ov5630_res[index].used = 0;
++ }
++
++ for (index = 0; index < N_RES; index++)
++ dprintk(2, "index = %d, used = %d\n", index,
++ ov5630_res[index].used);
++ } else {
++ eprintk("no res for (%d x %d)", width, height);
++ }
++
++ DBG_leaving;
++ return ret;
++}
++
++static int ov5630_t_gain(struct v4l2_subdev *sd, int value)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++ u32 v;
++
++ DBG_entering;
++
++ dprintk(2, "writing gain %x to 0x3000", value);
++
++ ov5630_read(client, 0x3000, &v);
++ v = (v & 0x80) + value;
++ ov5630_write(client, 0x3000, v);
++
++ dprintk(2, "gain %x was writen to 0x3000", v);
++
++ DBG_leaving;
++ return 0;
++}
++
++static int ov5630_t_exposure(struct v4l2_subdev *sd, int value)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++ u32 v;
++ u32 reg_val;
++
++ DBG_entering;
++
++ ov5630_read(client, 0x3013, &v);
++ dprintk(2, "0x3013 = %x", v);
++ if (v & 0x05) {
++ /* turn off agc/aec */
++ v = v & 0xfa;
++ ov5630_write(client, 0x3013, v);
++ /* turn off awb */
++ ov5630_read(client, OV5630_ISP_CTL00, &reg_val);
++ ov5630_write(client, OV5630_ISP_CTL00, reg_val & ~0x30);
++ }
++ ov5630_read(client, 0x3014, &v);
++ dprintk(2, "0x3014 = %x", v);
++ ov5630_read(client, 0x3002, &v);
++ dprintk(2, "0x3002 = %x", v);
++ ov5630_read(client, 0x3003, &v);
++ dprintk(2, "0x3003 = %x", v);
++
++ dprintk(2, "writing exposure %x to 0x3002/3", value);
++
++ v = value >> 8;
++ ov5630_write(client, 0x3002, v);
++ dprintk(2, "exposure %x was writen to 0x3002", v);
++
++ v = value & 0xff;
++ ov5630_write(client, 0x3003, v);
++ dprintk(2, "exposure %x was writen to 0x3003", v);
++
++ DBG_leaving;
++ return 0;
++}
++
++static struct ov5630_control {
++ struct v4l2_queryctrl qc;
++ int (*query)(struct v4l2_subdev *sd, __s32 *value);
++ int (*tweak)(struct v4l2_subdev *sd, int value);
++} ov5630_controls[] = {
++ {
++ .qc = {
++ .id = V4L2_CID_GAIN,
++ .type = V4L2_CTRL_TYPE_INTEGER,
++ .name = "global gain",
++ .minimum = 0x0,
++ .maximum = 0xFF,
++ .step = 0x01,
++ .default_value = 0x00,
++ .flags = 0,
++ },
++ .tweak = ov5630_t_gain,
++/* .query = ov5630_q_gain, */
++ },
++ {
++ .qc = {
++ .id = V4L2_CID_EXPOSURE,
++ .type = V4L2_CTRL_TYPE_INTEGER,
++ .name = "exposure",
++ .minimum = 0x0,
++ .maximum = 0xFFFF,
++ .step = 0x01,
++ .default_value = 0x00,
++ .flags = 0,
++ },
++ .tweak = ov5630_t_exposure,
++/* .query = ov5630_q_exposure; */
++ },
++};
++#define N_CONTROLS (ARRAY_SIZE(ov5630_controls))
++
++/*
++static int ov5630_g_gain(struct v4l2_subdev *sd, int value)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++ unsigned char v;
++
++ DBG_entering;
++
++ ov5630_write(client, 0x3000, &v);
++ dprintk(2, "writing gain %x to 0x3000", value);
++
++ value
++ DBG_leaving;
++ return 0
++}
++*/
++
++static struct ov5630_control *ov5630_find_control(__u32 id)
++{
++ int i;
++
++ for (i = 0; i < N_CONTROLS; i++)
++ if (ov5630_controls[i].qc.id == id)
++ return ov5630_controls + i;
++ return NULL;
++}
++
++static int ov5630_queryctrl(struct v4l2_subdev *sd,
++ struct v4l2_queryctrl *qc)
++{
++ struct ov5630_control *ctrl = ov5630_find_control(qc->id);
++
++ if (ctrl == NULL)
++ return -EINVAL;
++ *qc = ctrl->qc;
++ return 0;
++}
++
++static int ov5630_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
++{
++ /*
++ struct ov5630_control *octrl = ov5630_find_control(ctrl->id);
++
++ int ret;
++
++ if (octrl == NULL)
++ return -EINVAL;
++ ret = octrl->query(sd, &ctrl->value);
++ if (ret >= 0)
++ return 0;
++ return ret;
++ */
++ return 0;
++}
++
++static int ov5630_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
++{
++ struct ov5630_control *octrl = ov5630_find_control(ctrl->id);
++ int ret;
++
++ if (octrl == NULL)
++ return -EINVAL;
++ ret = octrl->tweak(sd, ctrl->value);
++ if (ret >= 0)
++ return 0;
++ return ret;
++}
++
++#if 0
++static int ov5630_get_caps(struct i2c_client *c, struct ci_sensor_caps *caps)
++{
++ if (caps == NULL)
++ return -EIO;
++
++ caps->bus_width = SENSOR_BUSWIDTH_10BIT;
++ caps->mode = SENSOR_MODE_BAYER;
++ caps->field_inv = SENSOR_FIELDINV_NOSWAP;
++ caps->field_sel = SENSOR_FIELDSEL_BOTH;
++ caps->ycseq = SENSOR_YCSEQ_YCBYCR;
++ caps->conv422 = SENSOR_CONV422_NOCOSITED;
++ caps->bpat = SENSOR_BPAT_BGBGGRGR;
++ caps->hpol = SENSOR_HPOL_REFPOS;
++ caps->vpol = SENSOR_VPOL_NEG;
++ caps->edge = SENSOR_EDGE_RISING;
++ caps->bls = SENSOR_BLS_OFF;
++ caps->gamma = SENSOR_GAMMA_OFF;
++ caps->cconv = SENSOR_CCONV_OFF;
++ caps->res = SENSOR_RES_QXGA_PLUS | SENSOR_RES_1080P |
++ SENSOR_RES_XGA_PLUS | SENSOR_RES_720P | SENSOR_RES_VGA;
++ caps->blc = SENSOR_BLC_OFF;
++ caps->agc = SENSOR_AGC_OFF;
++ caps->awb = SENSOR_AWB_OFF;
++ caps->aec = SENSOR_AEC_OFF;
++ caps->cie_profile = SENSOR_CIEPROF_D65 | SENSOR_CIEPROF_D75 |
++ SENSOR_CIEPROF_F11 | SENSOR_CIEPROF_F12 | SENSOR_CIEPROF_A |
++ SENSOR_CIEPROF_F2;
++ caps->flicker_freq = SENSOR_FLICKER_100 | SENSOR_FLICKER_120;
++ caps->type = SENSOR_TYPE_RAW;
++ /* caps->name = "ov5630"; */
++ strcpy(caps->name, "ov5630");
++
++ return 0;
++}
++
++static int ov5630_get_config(struct i2c_client *c,
++ struct ci_sensor_config *config)
++{
++ struct ci_sensor_config *info = i2c_get_clientdata(c);
++
++ if (config == NULL) {
++ printk(KERN_WARNING "sensor_get_config: NULL pointer\n");
++ return -EIO;
++ }
++
++ memcpy(config, info, sizeof(struct ci_sensor_config));
++
++ return 0;
++}
++
++static int ov5630_setup(struct i2c_client *c,
++ const struct ci_sensor_config *config)
++{
++ int ret;
++ u16 width, high;
++ struct ov5630_res_struct *res_index;
++ struct ci_sensor_config *info = i2c_get_clientdata(c);
++
++ /* Soft reset camera first*/
++ ret = ov5630_write(c, (u32)OV5630_SYS, (u32)0x80);
++
++ /* software sleep/standby */
++ ret = ov5630_write(c, (u32)OV5630_IMAGE_SYSTEM, (u32)0x00);
++
++ /* Set registers into default config value */
++ ret = ov5630_write_array(c, ov5630_def_reg);
++
++ /* set image resolution */
++ ci_sensor_res2size(config->res, &width, &high);
++ ret += ov5630_try_res(&width, &high);
++ res_index = ov5630_find_res(width, high);
++ if (res_index->regs)
++ ret += ov5630_write_array(c, res_index->regs);
++ if (!ret)
++ info->res = res_index->res;
++
++ ret += ov5630_set_img_ctrl(c, config);
++
++ /* Set MIPI interface */
++#ifdef OV5630_MIPI
++ ret += ov5630_write_array(c, ov5630_mipi);
++#endif
++
++ /* streaming */
++ ret += ov5630_write(c, (u32)OV5630_IMAGE_SYSTEM, (u32)0x01);
++ ret += ov5630_write(c, (u32)0x3096, (u32)0x50);
++
++ /*Note here for the time delay */
++ /* ssleep(1); */
++ msleep(500);
++ return ret;
++}
++
++/*
++ * File operation functions
++ */
++static int ov5630_dvp_enable(struct i2c_client *client)
++{
++ int ret;
++
++ u8 reg;
++
++ ret = ov5630_read(client, 0x3506, &reg);
++ reg &= 0xdf;
++ reg |= 0x20;
++ ret += ov5630_write(client, 0x3506, reg);
++
++ return ret;
++}
++
++static int ov5630_dvp_disable(struct i2c_client *client)
++{
++ int ret;
++
++ u8 reg;
++
++ ret = ov5630_read(client, 0x3506, &reg);
++ reg &= 0xdf;
++ ret += ov5630_write(client, 0x3506, reg);
++
++ return ret;
++}
++
++static int ov5630_open(struct i2c_setting *c, void *priv)
++{
++ /* Just wake up sensor */
++ if (ov5630_wakeup())
++ return -EIO;
++ ov5630_init(c->sensor_client);
++ /* ov5630_motor_init(c->motor_client); */
++ ov5630_write(c->sensor_client, (u32)OV5630_IMAGE_SYSTEM, (u32)0x00);
++
++ /* disable dvp_en */
++ ov5630_dvp_disable(c->sensor_client);
++
++ return 0;
++}
++
++static int ov5630_release(struct i2c_setting *c, void *priv)
++{
++ /* Just suspend the sensor */
++ if (ov5630_standby())
++ return -EIO;
++ return 0;
++}
++
++static int ov5630_on(struct i2c_setting *c)
++{
++ int ret;
++
++ /* Software wake up sensor */
++ ret = ov5630_write(c->sensor_client,
++ (u32)OV5630_IMAGE_SYSTEM, (u32)0x01);
++
++ /* enable dvp_en */
++ return ret + ov5630_dvp_enable(c->sensor_client);
++}
++
++static int ov5630_off(struct i2c_setting *c)
++{
++ int ret;
++
++ /* Software standby sensor */
++ ret = ov5630_write(c->sensor_client,
++ (u32)OV5630_IMAGE_SYSTEM, (u32)0x00);
++ /* disable dvp_en */
++ return ret + ov5630_dvp_disable(c->sensor_client);
++}
++
++static struct sensor_device ov5630 = {
++ .name = "ov5630",
++ .type = SENSOR_TYPE_RAW,
++ .minor = -1,
++ .open = ov5630_open,
++ .release = ov5630_release,
++ .on = ov5630_on,
++ .off = ov5630_off,
++ .querycap = ov5630_get_caps,
++ .get_config = ov5630_get_config,
++ .set_config = ov5630_setup,
++ .enum_parm = ov5630_queryctrl,
++ .get_parm = ov5630_g_ctrl,
++ .set_parm = ov5630_s_ctrl,
++ .try_res = ov5630_try_res,
++ .set_res = ov5630_set_res,
++ .get_ls_corr_config = NULL,
++ .mdi_get_focus = ov5630_motor_get_focus,
++ .mdi_set_focus = ov5630_motor_set_focus,
++ .mdi_max_step = ov5630_motor_max_step,
++ .mdi_calibrate = NULL,
++ .read = ov5630_read,
++ .write = ov5630_write,
++ .suspend = ov5630_standby,
++ .resume = ov5630_wakeup,
++ /* TBC */
++};
++#endif
++
++static int ov5630_s_stream(struct v4l2_subdev *sd, int enable)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++ DBG_entering;
++
++ if (enable) {
++ ov5630_write(client, (u32)OV5630_IMAGE_SYSTEM, (u32)0x01);
++ ov5630_write(client, 0x30b0, 0xff);
++ ov5630_write(client, 0x30b1, 0xff);
++ msleep(500);
++ } else {
++ ov5630_write(client, (u32)OV5630_IMAGE_SYSTEM, (u32)0x00);
++ ov5630_write(client, 0x30b0, 0x00);
++ ov5630_write(client, 0x30b1, 0x00);
++ }
++
++ DBG_leaving;
++ return 0;
++}
++
++static int ov5630_enum_framesizes(struct v4l2_subdev *sd,
++ struct v4l2_frmsizeenum *fsize)
++{
++ unsigned int index = fsize->index;
++
++ DBG_entering;
++
++ if (index >= N_RES)
++ return -EINVAL;
++
++ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
++ fsize->discrete.width = ov5630_res[index].width;
++ fsize->discrete.height = ov5630_res[index].height;
++ fsize->reserved[0] = ov5630_res[index].used;
++
++ DBG_leaving;
++
++ return 0;
++}
++
++static int ov5630_enum_frameintervals(struct v4l2_subdev *sd,
++ struct v4l2_frmivalenum *fival)
++{
++ unsigned int index = fival->index;
++
++ DBG_entering;
++
++ if (index >= N_RES)
++ return -EINVAL;
++
++ fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
++ fival->discrete.numerator = 1;
++ fival->discrete.denominator = ov5630_res[index].fps;
++
++ DBG_leaving;
++
++ return 0;
++}
++
++static int ov5630_g_chip_ident(struct v4l2_subdev *sd,
++ struct v4l2_dbg_chip_ident *chip)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++
++#define V4L2_IDENT_OV5630 8245
++ return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_OV5630, 0);
++}
++
++#ifdef CONFIG_VIDEO_ADV_DEBUG
++static int ov5630_g_register(struct v4l2_subdev *sd,
++ struct v4l2_dbg_register *reg)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++ unsigned char val = 0;
++ int ret;
++
++ if (!v4l2_chip_match_i2c_client(client, &reg->match))
++ return -EINVAL;
++ if (!capable(CAP_SYS_ADMIN))
++ return -EPERM;
++ ret = ov5630_read(client, reg->reg & 0xffff, &val);
++ reg->val = val;
++ reg->size = 1;
++ return ret;
++}
++
++static int ov5630_s_register(struct v4l2_subdev *sd,
++ struct v4l2_dbg_register *reg)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++
++ if (!v4l2_chip_match_i2c_client(client, &reg->match))
++ return -EINVAL;
++ if (!capable(CAP_SYS_ADMIN))
++ return -EPERM;
++ ov5630_write(client, reg->reg & 0xffff, reg->val & 0xff);
++ return 0;
++}
++#endif
++
++static const struct v4l2_subdev_video_ops ov5630_video_ops = {
++ .try_fmt = ov5630_try_fmt,
++ .s_fmt = ov5630_set_fmt,
++ .g_fmt = ov5630_get_fmt,
++ .s_stream = ov5630_s_stream,
++ .enum_framesizes = ov5630_enum_framesizes,
++ .enum_frameintervals = ov5630_enum_frameintervals,
++};
++
++static const struct v4l2_subdev_core_ops ov5630_core_ops = {
++ .g_chip_ident = ov5630_g_chip_ident,
++ .queryctrl = ov5630_queryctrl,
++ .g_ctrl = ov5630_g_ctrl,
++ .s_ctrl = ov5630_s_ctrl,
++ .s_gpio = ov5630_s_power,
++ /*.g_ext_ctrls = ov5630_g_ext_ctrls,*/
++ /*.s_ext_ctrls = ov5630_s_ext_ctrls,*/
++#ifdef CONFIG_VIDEO_ADV_DEBUG
++ .g_register = ov5630_g_register,
++ .s_register = ov5630_s_register,
++#endif
++};
++
++static const struct v4l2_subdev_ops ov5630_ops = {
++ .core = &ov5630_core_ops,
++ .video = &ov5630_video_ops,
++};
++
++/*
++ * Basic i2c stuff
++ */
++/*
++static unsigned short normal_i2c[] = {I2C_OV5630 >> 1,
++ I2C_CLIENT_END};
++I2C_CLIENT_INSMOD;
++
++static struct i2c_driver ov5630_driver;
++*/
++static int ov5630_detect(struct i2c_client *client)
++{
++ struct i2c_adapter *adapter = client->adapter;
++ int adap_id = i2c_adapter_id(adapter);
++ u32 value;
++
++ if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) {
++ eprintk("error i2c check func");
++ return -ENODEV;
++ }
++
++ if (adap_id != 1) {
++ eprintk("adap_id != 1");
++ return -ENODEV;
++ }
++
++ /* if (ov5630_wakeup()) */
++ /* return -ENODEV; */
++ ov5630_wakeup();
++
++ ov5630_read(client, (u32)OV5630_PID_H, &value);
++ if ((u8)value != 0x56) {
++ dprintk(1, "PID != 0x56, but %x", value);
++ dprintk(2, "client->addr = %x", client->addr);
++ return -ENODEV;
++ }
++
++ printk(KERN_INFO "Init ov5630 sensor success\n");
++ return 0;
++}
++
++static int ov5630_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ struct ci_sensor_config *info;
++ struct v4l2_subdev *sd;
++ int ret = -1;
++/* struct i2c_client *motor; */
++
++ DBG_entering;
++ v4l_info(client, "chip found @ 0x%x (%s)\n",
++ client->addr << 1, client->adapter->name);
++ /*
++ * Setup sensor configuration structure
++ */
++ info = kzalloc(sizeof(struct ci_sensor_config), GFP_KERNEL);
++ if (!info) {
++ eprintk("fail to malloc for ci_sensor_config");
++ ret = -ENOMEM;
++ goto out;
++ }
++
++ ret = ov5630_detect(client);
++ if (ret) {
++ dprintk(1, "error ov5630_detect");
++ goto out_free;
++ }
++
++ sd = &info->sd;
++ v4l2_i2c_subdev_init(sd, client, &ov5630_ops);
++
++ /*
++ * Initialization OV5630
++ * then turn into standby mode
++ */
++ /* ret = ov5630_standby(); */
++ ret = ov5630_init(client);
++ if (ret) {
++ eprintk("error calling ov5630_init");
++ goto out_free;
++ }
++ ov5630_standby();
++
++ ret = 0;
++ goto out;
++
++out_free:
++ kfree(info);
++ DBG_leaving;
++out:
++ return ret;
++}
++
++/*
++ * XXX: Need to be checked
++ */
++static int ov5630_remove(struct i2c_client *client)
++{
++ struct v4l2_subdev *sd = i2c_get_clientdata(client);
++
++ DBG_entering;
++
++ v4l2_device_unregister_subdev(sd);
++ kfree(to_sensor_config(sd));
++
++ DBG_leaving;
++ return 0;
++}
++
++static const struct i2c_device_id ov5630_id[] = {
++ {"ov5630", 0},
++ {}
++};
++
++MODULE_DEVICE_TABLE(i2c, ov5630_id);
++
++static struct v4l2_i2c_driver_data v4l2_i2c_data = {
++ .name = "ov5630",
++ .probe = ov5630_probe,
++ .remove = ov5630_remove,
++ /* .suspend = ov5630_suspend,
++ * .resume = ov5630_resume, */
++ .id_table = ov5630_id,
++};
++
++MODULE_AUTHOR("Xiaolin Zhang <xiaolin.zhang@intel.com>");
++MODULE_DESCRIPTION("A low-level driver for OmniVision 5630 sensors");
++MODULE_LICENSE("GPL");
+--- /dev/null
++++ b/drivers/staging/mrstci/mrstov5630/ov5630.h
+@@ -0,0 +1,672 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#define I2C_OV5630 0x6C
++/* Should add to kernel source */
++#define I2C_DRIVERID_OV5630 1046
++/* GPIO pin on Moorestown */
++#define GPIO_SCLK_25 44
++#define GPIO_STB_PIN 47
++#define GPIO_STDBY_PIN 49
++#define GPIO_RESET_PIN 50
++
++/* System control register */
++#define OV5630_AGC 0x3000
++#define OV5630_AGCS 0x3001
++#define OV5630_AEC_H 0x3002
++#define OV5630_AEC_L 0x3003
++#define OV5630_LAEC_H 0x3004
++#define OV5630_LAEC_L 0x3005
++#define OV5630_AECS_H 0x3008
++#define OV5630_AECS_L 0x3009
++#define OV5630_PID_H 0x300A
++#define OV5630_PID_L 0x300B
++#define OV5630_SCCB_ID 0x300C
++#define OV5630_PLL_1 0x300E
++#define OV5630_PLL_2 0x300F
++#define OV5630_PLL_3 0x3010
++#define OV5630_PLL_4 0x3011
++#define OV5630_SYS 0x3012
++#define OV5630_AUTO_1 0x3013
++#define OV5630_AUTO_2 0x3014
++#define OV5630_AUTO_3 0x3015
++#define OV5630_AUTO_4 0x3016
++#define OV5630_AUTO_5 0x3017
++#define OV5630_WPT 0x3018
++#define OV5630_BPT 0x3019
++#define OV5630_VPT 0x301A
++#define OV5630_YAVG 0x301B
++#define OV5630_AECG_50 0x301C
++#define OV5630_AECG_60 0x301D
++#define OV5630_ADDVS_H 0x301E
++#define OV5630_ADDVS_L 0x301F
++#define OV5630_FRAME_LENGTH_LINES_H 0x3020
++#define OV5630_FRAME_LENGTH_LINES_L 0x3021
++#define OV5630_LINE_LENGTH_PCK_H 0x3022
++#define OV5630_LINE_LENGTH_PCK_L 0x3023
++#define OV5630_X_ADDR_START_H 0x3024
++#define OV5630_X_ADDR_START_L 0x3025
++#define OV5630_Y_ADDR_START_H 0x3026
++#define OV5630_Y_ADDR_START_L 0x3027
++#define OV5630_X_ADDR_END_H 0x3028
++#define OV5630_X_ADDR_END_L 0x3029
++#define OV5630_Y_ADDR_END_H 0x302A
++#define OV5630_Y_ADDR_END_L 0x302B
++#define OV5630_X_OUTPUT_SIZE_H 0x302C
++#define OV5630_X_OUTPUT_SIZE_L 0x302D
++#define OV5630_Y_OUTPUT_SIZE_H 0x302E
++#define OV5630_Y_OUTPUT_SIZE_L 0x302F
++#define OV5630_FRAME_CNT 0x3030
++#define OV5630_DATR_LMO_0 0x3038
++#define OV5630_DATR_LMO_1 0x3039
++#define OV5630_DATR_LMO_2 0x303A
++#define OV5630_DATR_D56 0x303D
++#define OV5630_DATR_EF 0x303E
++#define OV5630_R_SIGMA_0 0x3048
++#define OV5630_R_SIGMA_1 0x3049
++#define OV5630_R_SIGMA_2 0x304A
++#define OV5630_R_SIGMA_3 0x304B
++#define OV5630_R_SIGMA_4 0x304C
++#define OV5630_R_SIGMA_5 0x304D
++#define OV5630_D56COM 0x304E
++#define OV5630_5060TH 0x3050
++#define OV5630_LMO_TH1 0x3058
++#define OV5630_LMO_TH2 0x3059
++#define OV5630_LMO_K 0x305A
++#define OV5630_BD50ST_H 0x305C
++#define OV5630_BD50ST_L 0x305D
++#define OV5630_BD60ST_H 0x305E
++#define OV5630_BD60ST_L 0x305F
++#define OV5630_HSYNST 0x306D
++#define OV5630_HSYNED 0x306E
++#define OV5630_HSYNED_HSYNST 0x306F
++#define OV5630_TMC_RWIN0 0x3070
++#define OV5630_IO_CTRL0 0x30B0
++#define OV5630_IO_CTRL1 0x30B1
++#define OV5630_IO_CTRL2 0x30B2
++#define OV5630_DSIO_0 0x30B3
++#define OV5630_DSIO_1 0x30B4
++#define OV5630_DSIO_2 0x30B5
++#define OV5630_TMC_10 0x30B6
++#define OV5630_TMC_12 0x30B7
++#define OV5630_TMC_14 0x30B9
++#define OV5630_TMC_COM4 0x30BA
++#define OV5630_TMC_REG6C 0x30BB
++#define OV5630_TMC_REG6E 0x30BC
++#define OV5630_R_CLK_S 0x30BD
++#define OV5630_R_CLK_A 0x30BE
++#define OV5630_R_CLK_A1 0x30BF
++#define OV5630_FRS_0 0x30E0
++#define OV5630_FRS_1 0x30E1
++#define OV5630_FRS_2 0x30E2
++#define OV5630_FRS_3 0x30E3
++#define OV5630_FRS_FECNT 0x30E4
++#define OV5630_FRS_FECNT_0 0x30E5
++#define OV5630_FRS_FECNT_1 0x30E6
++#define OV5630_FRS_RFRM 0x30E7
++#define OV5630_FRS_RSTRB 0x30E8
++#define OV5630_SA1TMC 0x30E9
++#define OV5630_TMC_MISC0 0x30EA
++#define OV5630_TMC_MISC1 0x30EB
++#define OV5630_FLEX_TXP 0x30F0
++#define OV5630_FLEX_FLT 0x30F1
++#define OV5630_FLEX_TXT 0x30F2
++#define OV5630_FLEX_HBK 0x30F3
++#define OV5630_FLEX_HSG 0x30F4
++#define OV5630_FLEX_SA1SFT 0x30F5
++#define OV5630_RVSOPT 0x30F6
++#define OV5630_AUTO 0x30F7
++#define OV5630_IMAGE_TRANSFORM 0x30F8
++#define OV5630_IMAGE_LUM 0x30F9
++#define OV5630_IMAGE_SYSTEM 0x30FA
++#define OV5630_GROUP_WR 0x30FF
++
++/* CIF control register */
++#define OV5630_CIF_CTRL2 0x3202
++
++/* ISP control register */
++#define OV5630_ISP_CTL00 0x3300
++#define OV5630_ISP_CTL01 0x3301
++#define OV5630_ISP_CTL02 0x3302
++#define OV5630_ISP_03 0x3303
++#define OV5630_ISP_DIG_GAIN_MAN 0x3304
++#define OV5630_ISP_BIAS_MAN 0x3305
++#define OV5630_ISP_06 0x3306
++#define OV5630_ISP_STABLE_RANGE 0x3307
++#define OV5630_ISP_R_GAIN_MAN_1 0x3308
++#define OV5630_ISP_R_GAIN_MAN_0 0x3309
++#define OV5630_ISP_G_GAIN_MAN_1 0x330A
++#define OV5630_ISP_G_GAIN_MAN_0 0x330B
++#define OV5630_ISP_B_GAIN_MAN_1 0x330C
++#define OV5630_ISP_B_GAIN_MAN_0 0x330D
++#define OV5630_ISP_STABLE_RANGEW 0x330E
++#define OV5630_ISP_AWB_FRAME_CNT 0x330F
++#define OV5630_ISP_11 0x3311
++#define OV5630_ISP_12 0x3312
++#define OV5630_ISP_13 0x3313
++#define OV5630_ISP_HSIZE_IN_1 0x3314
++#define OV5630_ISP_HSIZE_IN_0 0x3315
++#define OV5630_ISP_VSIZE_IN_1 0x3316
++#define OV5630_ISP_VSIZE_IN_0 0x3317
++#define OV5630_ISP_18 0x3318
++#define OV5630_ISP_19 0x3319
++#define OV5630_ISP_EVEN_MAN0 0x331A
++#define OV5630_ISP_EVEN_MAN1 0x331B
++#define OV5630_ISP_EVEN_MAN2 0x331C
++#define OV5630_ISP_EVEN_MAN3 0x331D
++#define OV5630_ISP_1E 0x331E
++#define OV5630_ISP_1F 0x331F
++#define OV5630_ISP_BLC_LMT_OPTION 0x3320
++#define OV5630_ISP_BLC_THRE 0x3321
++#define OV5630_ISP_22 0x3322
++#define OV5630_ISP_23 0x3323
++#define OV5630_ISP_BLC_MAN0_1 0x3324
++#define OV5630_ISP_BLC_MAN0_0 0x3325
++#define OV5630_ISP_BLC_MAN1_1 0x3326
++#define OV5630_ISP_BLC_MAN1_0 0x3327
++#define OV5630_ISP_BLC_MAN2_1 0x3328
++#define OV5630_ISP_BLC_MAN2_0 0x3329
++#define OV5630_ISP_BLC_MAN3_1 0x332A
++#define OV5630_ISP_BLC_MAN3_0 0x332B
++#define OV5630_ISP_BLC_MAN4_1 0x332C
++#define OV5630_ISP_BLC_MAN4_0 0x332D
++#define OV5630_ISP_BLC_MAN5_1 0x332E
++#define OV5630_ISP_BLC_MAN5_0 0x332F
++#define OV5630_ISP_BLC_MAN6_1 0x3330
++#define OV5630_ISP_BLC_MAN6_0 0x3331
++#define OV5630_ISP_BLC_MAN7_1 0x3332
++#define OV5630_ISP_BLC_MAN7_0 0x3333
++#define OV5630_ISP_CD 0x33CD
++#define OV5630_ISP_FF 0x33FF
++
++/* clipping control register */
++#define OV5630_CLIP_CTRL0 0x3400
++#define OV5630_CLIP_CTRL1 0x3401
++#define OV5630_CLIP_CTRL2 0x3402
++#define OV5630_CLIP_CTRL3 0x3403
++#define OV5630_CLIP_CTRL4 0x3404
++#define OV5630_CLIP_CTRL5 0x3405
++#define OV5630_CLIP_CTRL6 0x3406
++#define OV5630_CLIP_CTRL7 0x3407
++
++/* DVP control register */
++#define OV5630_DVP_CTRL00 0x3500
++#define OV5630_DVP_CTRL01 0x3501
++#define OV5630_DVP_CTRL02 0x3502
++#define OV5630_DVP_CTRL03 0x3503
++#define OV5630_DVP_CTRL04 0x3504
++#define OV5630_DVP_CTRL05 0x3505
++#define OV5630_DVP_CTRL06 0x3506
++#define OV5630_DVP_CTRL07 0x3507
++#define OV5630_DVP_CTRL08 0x3508
++#define OV5630_DVP_CTRL09 0x3509
++#define OV5630_DVP_CTRL0A 0x350A
++#define OV5630_DVP_CTRL0B 0x350B
++#define OV5630_DVP_CTRL0C 0x350C
++#define OV5630_DVP_CTRL0D 0x350D
++#define OV5630_DVP_CTRL0E 0x350E
++#define OV5630_DVP_CTRL0F 0x350F
++#define OV5630_DVP_CTRL10 0x3510
++#define OV5630_DVP_CTRL11 0x3511
++#define OV5630_DVP_CTRL12 0x3512
++#define OV5630_DVP_CTRL13 0x3513
++#define OV5630_DVP_CTRL14 0x3514
++#define OV5630_DVP_CTRL15 0x3515
++#define OV5630_DVP_CTRL16 0x3516
++#define OV5630_DVP_CTRL17 0x3517
++#define OV5630_DVP_CTRL18 0x3518
++#define OV5630_DVP_CTRL19 0x3519
++#define OV5630_DVP_CTRL1A 0x351A
++#define OV5630_DVP_CTRL1B 0x351B
++#define OV5630_DVP_CTRL1C 0x351C
++#define OV5630_DVP_CTRL1D 0x351D
++#define OV5630_DVP_CTRL1E 0x351E
++#define OV5630_DVP_CTRL1F 0x351F
++
++/* MIPI control register */
++#define OV5630_MIPI_CTRL00 0x3600
++#define OV5630_MIPI_CTRL01 0x3601
++#define OV5630_MIPI_CTRL02 0x3602
++#define OV5630_MIPI_CTRL03 0x3603
++#define OV5630_MIPI_CTRL04 0x3604
++#define OV5630_MIPI_CTRL05 0x3605
++#define OV5630_MIPI_CTRL06 0x3606
++#define OV5630_MIPI_CTRL07 0x3607
++#define OV5630_MIPI_CTRL08 0x3608
++#define OV5630_MIPI_CTRL09 0x3609
++#define OV5630_MIPI_CTRL0A 0x360A
++#define OV5630_MIPI_CTRL0B 0x360B
++#define OV5630_MIPI_CTRL0C 0x360C
++#define OV5630_MIPI_CTRL0D 0x360D
++#define OV5630_MIPI_CTRL0E 0x360E
++#define OV5630_MIPI_CTRL0F 0x360F
++#define OV5630_MIPI_CTRL10 0x3610
++#define OV5630_MIPI_CTRL11 0x3611
++#define OV5630_MIPI_CTRL12 0x3612
++#define OV5630_MIPI_CTRL13 0x3613
++#define OV5630_MIPI_CTRL14 0x3614
++#define OV5630_MIPI_CTRL15 0x3615
++#define OV5630_MIPI_CTRL16 0x3616
++#define OV5630_MIPI_CTRL17 0x3617
++#define OV5630_MIPI_CTRL18 0x3618
++#define OV5630_MIPI_CTRL19 0x3619
++#define OV5630_MIPI_CTRL1A 0x361A
++#define OV5630_MIPI_CTRL1B 0x361B
++#define OV5630_MIPI_CTRL1C 0x361C
++#define OV5630_MIPI_CTRL1D 0x361D
++#define OV5630_MIPI_CTRL1E 0x361E
++#define OV5630_MIPI_CTRL1F 0x361F
++#define OV5630_MIPI_CTRL20 0x3620
++#define OV5630_MIPI_CTRL21 0x3621
++#define OV5630_MIPI_CTRL22 0x3622
++#define OV5630_MIPI_CTRL23 0x3623
++#define OV5630_MIPI_CTRL24 0x3624
++#define OV5630_MIPI_CTRL25 0x3625
++#define OV5630_MIPI_CTRL26 0x3626
++#define OV5630_MIPI_CTRL27 0x3627
++#define OV5630_MIPI_CTRL28 0x3628
++#define OV5630_MIPI_CTRL29 0x3629
++#define OV5630_MIPI_CTRL2A 0x362A
++#define OV5630_MIPI_CTRL2B 0x362B
++#define OV5630_MIPI_CTRL2C 0x362C
++#define OV5630_MIPI_CTRL2D 0x362D
++#define OV5630_MIPI_CTRL2E 0x362E
++#define OV5630_MIPI_CTRL2F 0x362F
++#define OV5630_MIPI_CTRL30 0x3630
++#define OV5630_MIPI_CTRL31 0x3631
++#define OV5630_MIPI_CTRL32 0x3632
++#define OV5630_MIPI_CTRL33 0x3633
++#define OV5630_MIPI_CTRL34 0x3634
++#define OV5630_MIPI_CTRL35 0x3635
++#define OV5630_MIPI_CTRL36 0x3636
++#define OV5630_MIPI_CTRL37 0x3637
++#define OV5630_MIPI_CTRL38 0x3638
++#define OV5630_MIPI_CTRL39 0x3639
++#define OV5630_MIPI_CTRL3A 0x363A
++#define OV5630_MIPI_CTRL3B 0x363B
++#define OV5630_MIPI_CTRL3C 0x363C
++#define OV5630_MIPI_CTRL3D 0x363D
++#define OV5630_MIPI_CTRL3E 0x363E
++#define OV5630_MIPI_CTRL3F 0x363F
++#define OV5630_MIPI_RO61 0x3661
++#define OV5630_MIPI_RO62 0x3662
++#define OV5630_MIPI_RO63 0x3663
++#define OV5630_MIPI_RO64 0x3664
++#define OV5630_MIPI_RO65 0x3665
++#define OV5630_MIPI_RO66 0x3666
++
++/* General definition for ov5630 */
++#define OV5630_OUTWND_MAX_H QSXXGA_PLUS4_SIZE_H
++#define OV5630_OUTWND_MAX_V QSXGA_PLUS4_SIZE_V
++
++struct regval_list {
++ u16 reg_num;
++ u8 value;
++};
++
++/*
++ * Default register value
++ * 5Mega Pixel, 2592x1944
++ */
++static struct regval_list ov5630_def_reg[] = {
++ {0x300f, 0x00}, /*00*/
++ {0x30b2, 0x32},
++ {0x3084, 0x44},
++ {0x3016, 0x01},
++ {0x308a, 0x25},
++
++ {0x3013, 0xff},
++ {0x3015, 0x03},
++ {0x30bf, 0x02},
++
++ {0x3065, 0x50},
++ {0x3068, 0x08},
++ {0x30ac, 0x05},
++ {0x309e, 0x24},
++ {0x3091, 0x04},
++
++ {0x3075, 0x22},
++ {0x3076, 0x23},
++ {0x3077, 0x24},
++ {0x3078, 0x25},
++
++ {0x30b5, 0x0c},
++ {0x3090, 0x67},
++
++ {0x30f9, 0x11},
++ {0x3311, 0x80},
++ {0x3312, 0x1f},
++
++ {0x3103, 0x10},
++ {0x305c, 0x01},
++ {0x305d, 0x29},
++ {0x305e, 0x00},
++ {0x305f, 0xf7},
++ {0x308d, 0x0b},
++ {0x30ad, 0x20},
++ {0x3072, 0x0d},
++ {0x308b, 0x82},
++ {0x3317, 0x9c},
++ {0x3318, 0x22},
++ {0x3025, 0x20},
++ {0x3027, 0x08},
++ {0x3029, 0x3f},
++ {0x302b, 0xa3},
++ {0x3319, 0x22},
++ {0x30a1, 0xc4},
++ {0x306a, 0x05},
++ {0x3315, 0x22},
++ {0x30ae, 0x25},
++ {0x3304, 0x40},
++ {0x3099, 0x49},
++
++ {0x300e, 0xb1/*b0*/}, /* Note this PLL setting*/
++ {0x300f, 0x10}, /*00*/
++ {0x3010, 0x07}, /*change from 0f according to SV */
++ {0x3011, 0x40},
++ {0x30af, 0x10},
++ {0x304a, 0x00},
++ {0x304d, 0x00},
++
++ {0x304e, 0x22},
++ {0x304d, 0xa0},
++ {0x3058, 0x00},
++ {0x3059, 0xff},
++ {0x305a, 0x00},
++
++ {0x30e9, 0x04},
++ {0x3084, 0x44},
++ {0x3090, 0x67},
++ {0x30e9, 0x04},
++
++ {0x30b5, 0x1c},
++ {0x331f, 0x22},
++ {0x30ae, 0x15},
++ {0x3304, 0x4c},
++
++ {0x3300, 0xfb},
++ {0x3071, 0x34},
++ {0x30e7, 0x01},
++ {0x3302, 0x60},
++ {0x331e, 0x05},
++ {0x3321, 0x04},
++
++ /* Mark end */
++ {0xffff, 0xff},
++
++};
++
++/* MIPI register are removed by Wen */
++
++/* 2592x1944 */
++static struct regval_list ov5630_res_qsxga_plus4[] = {
++ {0x3020, 0x07},
++ {0x3021, 0xbc},
++ {0x3022, 0x0c/*0a*/},
++ {0x3023, 0xa0/*00*/},
++ {0x305c, 0x01},
++ {0x305d, 0x29},
++ {0x305e, 0x00},
++ {0x305f, 0xf7},
++
++ /* 30fps , 96 MHZ*/
++ /* {0x300f, 0x10}, */
++ {0x300f, 0x10},
++ {0x300e, 0xb1},
++ /* mipi */
++#ifdef MIPI
++ {0x30b0, 0x00},
++ {0x30b1, 0xfc},
++ {0x3603, 0x50},
++ {0x3601, 0x0F},
++ /* lan2 bit 10*/
++ {0x3010, 0x07},
++ {0x30fa, 0x01},
++ /* {0x 30f8 09 */
++ {0x3096, 0x50},
++ /* end mipi*/
++#else
++ /* parrral */
++ {0x30fa, 0x01},
++#endif
++ /* end post*/
++ {0xffff, 0xff},
++};
++
++/* 1920x1080 */
++static struct regval_list ov5630_res_1080p[] = {
++ /*res start*/
++ {0x3020, 0x04},
++ {0x3021, 0x5c},
++ {0x3022, 0x0b/*0a*/},
++ {0x3023, 0x32/*00*/},
++ {0x305c, 0x01},
++ {0x305d, 0x2c},
++ {0x3024, 0x01},
++ {0x3025, 0x6e/*70*/},
++ {0x3026, 0x01},
++ {0x3027, 0xb8},
++ {0x3028, 0x08},
++ {0x3029, 0xef},
++ {0x302a, 0x05},
++ {0x302b, 0xf3},
++ {0x302c, 0x07},
++ {0x302d, 0x80},
++ {0x302e, 0x04},
++ {0x302f, 0x38},
++ {0x3314, 0x07},
++ {0x3315, 0x82/*80*/},
++ {0x3316, 0x04},
++ {0x3317, 0x3c},
++
++ /* 30fps , 96 MHZ*/
++ {0x300f, 0x10}, /* 00 */
++ {0x300e, 0xb1},
++
++ /* mipi */
++#ifdef MIPI
++ {0x30b0, 0x00},
++ {0x30b1, 0xfc},
++ {0x3603, 0x50},
++ {0x3601, 0x0F},
++ /* lan2 bit 10*/
++ {0x3010, 0x07},
++ {0x30fa, 0x01},
++ /* {0x 30f8 09 */
++ {0x3096, 0x50},
++ /* end mipi*/
++#else
++ /* parrral */
++ {0x30fa, 0x01},
++#endif
++ /* end post*/
++ {0xffff, 0xff},
++};
++
++/* 1280x960 V1F2_H1F2 */
++static struct regval_list ov5630_res_xga_plus[] = {
++ {0x3020, 0x03},
++ {0x3021, 0xe4},
++ {0x3022, 0x0c/*07*/},
++ {0x3023, 0x8c/*76*/},
++ {0x305c, 0x00},
++ {0x305d, 0xb1},
++ {0x3024, 0x00},
++ {0x3025, 0x30},
++ {0x3026, 0x00},
++ {0x3027, 0x10/*14*/},
++ {0x3028, 0x0a},
++ {0x3029, 0x2f},
++ {0x302a, 0x07},
++ {0x302b, 0xa7/*a7*/},
++ {0x302c, 0x05},
++ {0x302d, 0x00},
++ {0x302e, 0x03},
++ {0x302f, 0xc0},
++
++ {0x30f8, 0x05},
++ {0x30f9, 0x13},
++ {0x3314, 0x05},
++ {0x3315, 0x02/*00*/},
++ {0x3316, 0x03},
++ {0x3317, 0xc4},
++
++ {0x300f, 0x10}, /* 00 */
++ {0x300e, 0xb1},
++
++#ifdef MIPI
++ {0x30b0, 0x00},
++ {0x30b1, 0xfc},
++ {0x3603, 0x50},
++ {0x3601, 0x0F},
++ /* lan2 bit 10*/
++ {0x3010, 0x07},
++ {0x30fa, 0x01},
++ /* {0x 30f8 09 */
++ {0x3096, 0x50},
++ /* end mipi*/
++#else
++ /* parrral */
++ {0x30fa, 0x01},
++#endif
++
++ {0xffff, 0xff},
++};
++
++/* 1280x720, V1F2 & H1F2 */
++static struct regval_list ov5630_res_720p[] = {
++ {0x3020, 0x02},
++ {0x3021, 0xf4},
++ {0x3022, 0x07},
++ {0x3023, 0x80},
++ {0x305c, 0x00},
++ {0x305d, 0xff},
++ {0x305e, 0x00},
++ {0x305f, 0xd4},
++
++ /* Crop then downscale */
++ {0x3024, 0x00},
++ {0x3025, 0x2c},
++ {0x3026, 0x00},
++ {0x3027, 0xf0},
++ {0x3028, 0x0a},
++ {0x3029, 0x2f},
++ {0x302a, 0x08},
++ {0x302b, 0x97},
++
++ {0x30f8, 0x05},
++
++ {0x302c, 0x05},
++ {0x302d, 0x00},
++ {0x302e, 0x02},
++ {0x302f, 0xd0},
++
++ {0x30f9, 0x13},
++ {0x3314, 0x05},
++ {0x3315, 0x04},
++ {0x3316, 0x02},
++ {0x3317, 0xd4},
++
++ /* Add this to test setting from OVT */
++ {0x300f, 0x10}, /*00*/
++ {0x300e, 0xb0},
++
++#ifdef MIPI
++ {0x30b0, 0x00},
++ {0x30b1, 0xfc},
++ {0x3603, 0x50},
++ {0x3601, 0x0F},
++ /* lan2 bit 10*/
++ {0x3010, 0x07},
++ {0x30fa, 0x01},
++ /* {0x 30f8 09 */
++ {0x3096, 0x50},
++ /* end mipi*/
++#else
++ /* parrral */
++ {0x30fa, 0x01},
++#endif
++
++ {0xffff, 0xff},
++};
++
++/*VGA 40fps now*/
++static struct regval_list ov5630_res_vga_ac04_bill[] = {
++ /* res setting*/
++ {0x3020, 0x02},
++ {0x3021, 0x04},
++ {0x3022, 0x08},
++ {0x3023, 0x48},
++ {0x305c, 0x00},
++ {0x305d, 0x5e},
++ {0x3024, 0x00},
++ {0x3025, 0x2c},/*2c*/
++ {0x3026, 0x00},
++ {0x3027, 0x14},
++ {0x3028, 0x0a},
++ {0x3029, 0x2f},
++ {0x302a, 0x07},
++ {0x302b, 0xa3},
++ {0x302c, 0x02},
++ {0x302d, 0x80},
++ {0x302e, 0x01},
++ {0x302f, 0xe0},
++
++ {0x30b3, 0x09},
++ {0x3301, 0xc1},
++ {0x3313, 0xf1},
++ {0x3314, 0x05},
++ {0x3315, 0x04},/*04*/
++ {0x3316, 0x01},
++ {0x3317, 0xe4},
++ {0x3318, 0x20},
++
++ {0x300f, 0x10/*00*/},
++ {0x30f8, 0x09},
++
++ {0x300f, 0x11},
++ {0x300e, 0xb2},
++
++ {0x3015, 0x02},
++ /* mipi */
++#ifdef MIPI
++ {0x30b0, 0x00},
++ {0x30b1, 0xfc},
++ {0x3603, 0x50},
++ {0x3601, 0x0F},
++ /* lan2 bit 10*/
++ {0x3010, 0x07},
++ {0x30fa, 0x01},
++ /* {0x 30f8 09 */
++ {0x3096, 0x50},
++ /* end mipi*/
++#else
++
++ /* parrral */
++ {0x30fa, 0x01},
++ {0x30f8, 0x09},
++ {0x3096, 0x50},
++#endif
++
++ {0xffff, 0xff},
++};
+--- /dev/null
++++ b/drivers/staging/mrstci/mrstov5630_motor/Kconfig
+@@ -0,0 +1,9 @@
++config VIDEO_MRST_OV5630_MOTOR
++ tristate "Moorestown OV5630 motor"
++ depends on I2C && VIDEO_MRST_ISP && VIDEO_MRST_OV5630
++
++ ---help---
++ Say Y here if your platform support OV5630 motor
++
++ To compile this driver as a module, choose M here: the
++ module will be called mrstov2650.ko.
+--- /dev/null
++++ b/drivers/staging/mrstci/mrstov5630_motor/Makefile
+@@ -0,0 +1,3 @@
++obj-$(CONFIG_VIDEO_MRST_OV2650) += mrstov5630_motor.o
++
++EXTRA_CFLAGS += -I$(src)/../include
+--- /dev/null
++++ b/drivers/staging/mrstci/mrstov5630_motor/mrstov5630_motor.c
+@@ -0,0 +1,414 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/string.h>
++#include <linux/errno.h>
++#include <linux/init.h>
++#include <linux/kmod.h>
++#include <linux/device.h>
++#include <linux/delay.h>
++#include <linux/fs.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/delay.h>
++#include <linux/i2c.h>
++#include <linux/gpio.h>
++
++#include <media/v4l2-device.h>
++#include <media/v4l2-chip-ident.h>
++#include <media/v4l2-i2c-drv.h>
++
++#include "ov5630_motor.h"
++
++static int mrstov5630_motor_debug;
++module_param(mrstov5630_motor_debug, int, 0644);
++MODULE_PARM_DESC(mrstov5630_motor_debug, "Debug level (0-1)");
++
++#define dprintk(level, fmt, arg...) do { \
++ if (mrstov5630_motor_debug >= level) \
++ printk(KERN_DEBUG "mrstisp@%s: " fmt "\n", \
++ __func__, ## arg); } \
++ while (0)
++
++#define eprintk(fmt, arg...) \
++ printk(KERN_ERR "mrstisp@%s: line %d: " fmt "\n", \
++ __func__, __LINE__, ## arg);
++
++#define DBG_entering dprintk(2, "entering");
++#define DBG_leaving dprintk(2, "leaving");
++#define DBG_line dprintk(2, " line: %d", __LINE__);
++
++static inline struct ov5630_motor *to_motor_config(struct v4l2_subdev *sd)
++{
++ return container_of(sd, struct ov5630_motor, sd);
++}
++
++static int motor_read(struct i2c_client *c, u16 *reg)
++{
++ int ret;
++ struct i2c_msg msg;
++ u8 msgbuf[2];
++
++ /* Read needs two message to go */
++ msgbuf[0] = 0;
++ msgbuf[1] = 0;
++
++ memset(&msg, 0, sizeof(msg));
++ msg.addr = c->addr;
++ msg.buf = msgbuf;
++ msg.len = 2;
++ msg.flags = I2C_M_RD;
++
++ ret = i2c_transfer(c->adapter, &msg, 1);
++
++ *reg = (msgbuf[0] << 8 | msgbuf[1]);
++
++ ret = (ret == 1) ? 0 : -1;
++ return ret;
++}
++
++static int motor_write(struct i2c_client *c, u16 reg)
++{
++ int ret;
++ struct i2c_msg msg;
++ u8 msgbuf[2];
++
++ /* Writing only needs one message */
++ memset(&msg, 0, sizeof(msg));
++ msgbuf[0] = reg >> 8;
++ msgbuf[1] = reg;
++
++ msg.addr = c->addr;
++ msg.flags = 0;
++ msg.buf = msgbuf;
++ msg.len = 2;
++
++ ret = i2c_transfer(c->adapter, &msg, 1);
++
++ ret = (ret == 1) ? 0 : -1;
++ return ret;
++}
++
++static int ov5630_motor_goto_position(struct i2c_client *c,
++ unsigned short code,
++ struct ov5630_motor *config)
++{
++ int max_code, min_code;
++ u8 cmdh, cmdl;
++ u16 cmd, val = 0;
++
++ max_code = config->macro_code;
++ min_code = config->infin_code;
++
++ if (code > max_code)
++ code = max_code;
++ if (code < min_code)
++ code = min_code;
++
++ cmdh = (MOTOR_DAC_CODE_H(code));
++ cmdl = (MOTOR_DAC_CODE_L(code) | MOTOR_DAC_CTRL_MODE_2(SUB_MODE_4));
++ cmd = cmdh << 8 | cmdl;
++
++ motor_write(c, cmd);
++ /*Delay more than full-scale transition time 8.8ms*/
++ msleep(8);
++ motor_read(c, &val);
++
++ return (cmd == val ? 0 : -1);
++}
++
++int ov5630_motor_wakeup(void)
++{
++ return gpio_direction_output(GPIO_AF_PD, 1);
++}
++
++int ov5630_motor_standby(void)
++{
++ return gpio_direction_output(GPIO_AF_PD, 0);
++}
++
++int ov5630_motor_init(struct i2c_client *client, struct ov5630_motor *config)
++{
++ int ret;
++ int infin_cur, macro_cur;
++#ifdef OSPM
++ /* Power on motor */
++ struct ipc_pmic_reg_data ipcbuf;
++
++ PMIC_WRITE1(ipcbuf, 0x50, 0x27);
++ pr_warning("Power on Vcc33 for motor\n");
++#endif
++
++ infin_cur = MAX(MOTOR_INFIN_CUR, MOTOR_DAC_MIN_CUR);
++ macro_cur = MIN(MOTOR_MACRO_CUR, MOTOR_DAC_MAX_CUR);
++
++ config->infin_cur = infin_cur;
++ config->macro_cur = macro_cur;
++
++ config->infin_code = (int)((infin_cur * MOTOR_DAC_MAX_CODE)
++ / MOTOR_DAC_MAX_CUR);
++ config->macro_code = (int)((macro_cur * MOTOR_DAC_MAX_CODE)
++ / MOTOR_DAC_MAX_CUR);
++
++ config->max_step = ((config->macro_code - config->infin_code)
++ >> MOTOR_STEP_SHIFT) + 1;
++ /* Note here, maybe macro_code */
++ ret = ov5630_motor_goto_position(client, config->infin_code, config);
++ if (!ret)
++ config->cur_code = config->infin_code;
++ else
++ pr_err("Error while initializing motor\n");
++
++ return ret;
++}
++
++int ov5630_motor_set_focus(struct i2c_client *c, int step,
++ struct ov5630_motor *config)
++{
++ int s_code, ret;
++ int max_step = config->max_step;
++ unsigned int val = step;
++
++ DBG_entering;
++ dprintk(1, "setting setp %d", step);
++ if (val > max_step)
++ val = max_step;
++
++ s_code = (val << MOTOR_STEP_SHIFT);
++ s_code += config->infin_code;
++
++ ret = ov5630_motor_goto_position(c, s_code, config);
++ if (!ret)
++ config->cur_code = s_code;
++
++ DBG_leaving;
++ return ret;
++}
++
++static int ov5630_motor_s_ctrl(struct v4l2_subdev *sd,
++ struct v4l2_control *ctrl)
++{
++ struct i2c_client *c = v4l2_get_subdevdata(sd);
++ struct ov5630_motor *config = to_motor_config(sd);
++ int ret;
++
++ DBG_entering;
++ ret = ov5630_motor_set_focus(c, ctrl->value, config);
++ if (ret) {
++ eprintk("error call ov5630_motor_set_focue");
++ return ret;
++ }
++ DBG_leaving;
++ return 0;
++}
++int ov5630_motor_get_focus(struct i2c_client *c, unsigned int *step,
++ struct ov5630_motor *config)
++{
++ int ret_step;
++
++ ret_step = ((config->cur_code - config->infin_code)
++ >> MOTOR_STEP_SHIFT);
++
++ if (ret_step <= config->max_step)
++ *step = ret_step;
++ else
++ *step = config->max_step;
++
++ return 0;
++}
++
++static int ov5630_motor_g_ctrl(struct v4l2_subdev *sd,
++ struct v4l2_control *ctrl)
++{
++ struct i2c_client *c = v4l2_get_subdevdata(sd);
++ struct ov5630_motor *config = to_motor_config(sd);
++ int ret;
++
++ DBG_entering;
++ dprintk(2, "c = %p, config = %p, ctrl = %p", c, config, ctrl);
++ ret = ov5630_motor_get_focus(c, &ctrl->value, config);
++ if (ret) {
++ eprintk("error call ov5630_motor_get_focue");
++ return ret;
++ }
++ DBG_leaving;
++ return 0;
++}
++int ov5630_motor_max_step(struct i2c_client *c, unsigned int *max_code,
++ struct ov5630_motor *config)
++{
++ if (config->max_step != 0)
++ *max_code = config->max_step;
++ return 0;
++}
++
++static int ov5630_motor_queryctrl(struct v4l2_subdev *sd,
++ struct v4l2_queryctrl *qc)
++{
++ struct ov5630_motor *config = to_motor_config(sd);
++
++ DBG_entering;
++
++ if (qc->id != V4L2_CID_FOCUS_ABSOLUTE)
++ return -EINVAL;
++
++ dprintk(1, "got focus range of %d", config->max_step);
++ if (config->max_step != 0)
++ qc->maximum = config->max_step;
++ DBG_leaving;
++ return 0;
++}
++static const struct v4l2_subdev_core_ops ov5630_motor_core_ops = {
++ /*
++ .queryctrl = ov5630_queryctrl,
++ .g_ctrl = ov5630_g_ctrl,
++ */
++ .g_ctrl = ov5630_motor_g_ctrl,
++ .s_ctrl = ov5630_motor_s_ctrl,
++ .queryctrl = ov5630_motor_queryctrl,
++};
++
++static const struct v4l2_subdev_ops ov5630_motor_ops = {
++ .core = &ov5630_motor_core_ops,
++};
++
++static int ov5630_motor_detect(struct i2c_client *client)
++{
++ struct i2c_adapter *adapter = client->adapter;
++ int adap_id = i2c_adapter_id(adapter);
++
++ if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) {
++ eprintk("error i2c check func");
++ return -ENODEV;
++ }
++
++ if (adap_id != 1) {
++ eprintk("adap_id != 1");
++ return -ENODEV;
++ }
++
++ /* if (ov5630_motor_wakeup()) */
++ /* return -ENODEV; */
++ ov5630_motor_wakeup();
++ ssleep(1);
++
++ /*
++ ov5630_motor_read(client, (u32)OV5630_PID_H, &value);
++ if ((u8)value != 0x56) {
++ eprintk("PID != 0x56, but %x", value);
++ dprintk(2, "client->addr = %x", client->addr);
++ return -ENODEV;
++ }
++ */
++
++ return 0;
++}
++
++static int ov5630_motor_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ struct ov5630_motor *info;
++ struct v4l2_subdev *sd;
++ int ret = -1;
++/* struct i2c_client *motor; */
++
++ DBG_entering;
++ v4l_info(client, "chip found @ 0x%x (%s)\n",
++ client->addr << 1, client->adapter->name);
++ /*
++ * Setup sensor configuration structure
++ */
++ info = kzalloc(sizeof(struct ov5630_motor), GFP_KERNEL);
++ if (!info) {
++ eprintk("fail to malloc for ci_motor");
++ ret = -ENOMEM;
++ goto out;
++ }
++
++ ret = ov5630_motor_detect(client);
++ if (ret) {
++ eprintk("error ov5630_motor_detect");
++ goto out_free;
++ }
++
++ sd = &info->sd;
++ v4l2_i2c_subdev_init(sd, client, &ov5630_motor_ops);
++
++ /*
++ * Initialization OV5630
++ * then turn into standby mode
++ */
++ /* ret = ov5630_motor_standby(); */
++ ret = ov5630_motor_init(client, info);
++ if (ret) {
++ eprintk("error calling ov5630_motor_init");
++ goto out_free;
++ }
++
++ ret = 0;
++ goto out;
++
++out_free:
++ kfree(info);
++ DBG_leaving;
++out:
++ return ret;
++}
++
++/*
++ * XXX: Need to be checked
++ */
++static int ov5630_motor_remove(struct i2c_client *client)
++{
++ struct v4l2_subdev *sd = i2c_get_clientdata(client);
++
++ DBG_entering;
++
++ v4l2_device_unregister_subdev(sd);
++ kfree(to_motor_config(sd));
++
++ DBG_leaving;
++ return 0;
++}
++
++static const struct i2c_device_id ov5630_motor_id[] = {
++ {"ov5630_motor", 0},
++ {}
++};
++MODULE_DEVICE_TABLE(i2c, ov5630_motor_id);
++
++static struct v4l2_i2c_driver_data v4l2_i2c_data = {
++ .name = "ov5630_motor",
++ .probe = ov5630_motor_probe,
++ .remove = ov5630_motor_remove,
++ /* .suspend = ov5630_suspend,
++ * .resume = ov5630_resume, */
++ .id_table = ov5630_motor_id,
++};
++MODULE_AUTHOR("Xiaolin Zhang <xiaolin.zhang@intel.com>");
++MODULE_DESCRIPTION("A low-level driver for OmniVision 5630 sensors");
++MODULE_LICENSE("GPL");
+--- /dev/null
++++ b/drivers/staging/mrstci/mrstov5630_motor/ov5630_motor.h
+@@ -0,0 +1,86 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#include <media/v4l2-subdev.h>
++
++/* VCM start current (mA) */
++#define MOTOR_INFIN_CUR 15
++/* VCM max current for Macro (mA) */
++#define MOTOR_MACRO_CUR 90
++/* DAC output max current (mA) */
++#define MOTOR_DAC_MAX_CUR 100
++/* DAC output min current (mA) */
++#define MOTOR_DAC_MIN_CUR 3
++
++#define MOTOR_DAC_BIT_RES 10
++#define MOTOR_DAC_MAX_CODE ((1 << MOTOR_DAC_BIT_RES) - 1)
++
++#define MOTOR_STEP_SHIFT 4
++
++#define MAX(x, y) ((x) > (y) ? (x) : (y))
++#define MIN(x, y) ((x) < (y) ? (x) : (y))
++
++/* DAC register related define */
++#define MOTOR_POWER_DOWN (1 << 7)
++#define PD_ENABLE (1 << 7)
++#define PD_DISABLE (0)
++
++#define MOTOR_DAC_CODE_H(x) ((x >> 4) & 0x3f)
++#define MOTOR_DAC_CODE_L(x) ((x << 4) & 0xf0)
++
++#define MOTOR_DAC_CTRL_MODE_0 0x00
++#define MOTOR_DAC_CTRL_MODE_1(x) (x & 0x07)
++#define MOTOR_DAC_CTRL_MODE_2(x) ((x & 0x07) | 0x08)
++
++#define SUB_MODE_1 0x01
++#define SUB_MODE_2 0x02
++#define SUB_MODE_3 0x03
++#define SUB_MODE_4 0x04
++#define SUB_MODE_5 0x05
++#define SUB_MODE_6 0x06
++#define SUB_MODE_7 0x07
++
++#define OV5630_MOTOR_ADDR (0x18 >> 1)
++#define POWER_EN_PIN 7
++#define GPIO_AF_PD 95
++
++struct ov5630_motor{
++ unsigned int infin_cur;
++ unsigned int infin_code;
++ unsigned int macro_cur;
++ unsigned int macro_code;
++ unsigned int max_step;
++ unsigned int cur_code;
++ struct v4l2_subdev sd;
++};
++
++extern int ov5630_motor_init(struct i2c_client *client, struct ov5630_motor
++ *config);
++extern int ov5630_motor_standby(void);
++extern int ov5630_motor_wakeup(void);
++extern int ov5630_motor_set_focus(struct i2c_client *c, int step,
++ struct ov5630_motor *config);
++extern int ov5630_motor_get_focus(struct i2c_client *c, unsigned int *step,
++ struct ov5630_motor *config);
++extern int ov5630_motor_max_step(struct i2c_client *c, unsigned int *max_code,
++ struct ov5630_motor *config);
+--- /dev/null
++++ b/drivers/staging/mrstci/mrstov9665/Kconfig
+@@ -0,0 +1,9 @@
++config VIDEO_MRST_OV9665
++ tristate "Moorestown OV9665 SoC Sensor"
++ depends on I2C && VIDEO_MRST_ISP
++
++ ---help---
++ Say Y here if your platform support OV9665 SoC Sensor.
++
++ To compile this driver as a module, choose M here: the
++ module will be called mrstov9665.ko.
+--- /dev/null
++++ b/drivers/staging/mrstci/mrstov9665/Makefile
+@@ -0,0 +1,3 @@
++obj-$(CONFIG_VIDEO_MRST_OV9665) += mrstov9665.o
++
++EXTRA_CFLAGS += -I$(src)/../include
+--- /dev/null
++++ b/drivers/staging/mrstci/mrstov9665/mrstov9665.c
+@@ -0,0 +1,972 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/string.h>
++#include <linux/errno.h>
++#include <linux/init.h>
++#include <linux/kmod.h>
++#include <linux/device.h>
++#include <linux/delay.h>
++#include <linux/fs.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/delay.h>
++#include <linux/i2c.h>
++#include <linux/gpio.h>
++#include <linux/videodev2.h>
++
++#include <media/v4l2-device.h>
++#include <media/v4l2-chip-ident.h>
++#include <media/v4l2-i2c-drv.h>
++
++#include "ci_sensor_common.h"
++#include "ov9665.h"
++
++static int mrstov9665_debug;
++module_param(mrstov9665_debug, int, 0644);
++MODULE_PARM_DESC(mrstov9665_debug, "Debug level (0-1)");
++
++#define dprintk(level, fmt, arg...) do { \
++ if (mrstov9665_debug >= level) \
++ printk(KERN_DEBUG "mrstisp@%s: " fmt "\n", \
++ __func__, ## arg); } \
++ while (0)
++
++#define eprintk(fmt, arg...) \
++ printk(KERN_ERR "mrstisp@%s: line %d: " fmt "\n", \
++ __func__, __LINE__, ## arg);
++
++#define DBG_entering dprintk(2, "entering");
++#define DBG_leaving dprintk(2, "leaving");
++#define DBG_line dprintk(2, " line: %d", __LINE__);
++
++static inline struct ci_sensor_config *to_sensor_config(struct v4l2_subdev *sd)
++{
++ return container_of(sd, struct ci_sensor_config, sd);
++}
++
++static struct ov9665_format_struct {
++ __u8 *desc;
++ __u32 pixelformat;
++ struct regval_list *regs;
++} ov9665_formats[] = {
++ {
++ .desc = "YUYV 4:2:2",
++ .pixelformat = SENSOR_MODE_BT601,
++ .regs = NULL,
++ },
++};
++#define N_OV9665_FMTS ARRAY_SIZE(ov9665_formats)
++
++static struct ov9665_res_struct {
++ __u8 *desc;
++ int res;
++ int width;
++ int height;
++ /* FIXME: correct the fps values.. */
++ int fps;
++ bool used;
++ struct regval_list *regs;
++} ov9665_res[] = {
++ {
++ .desc = "SXGA",
++ .res = SENSOR_RES_SXGA,
++ .width = 1280,
++ .height = 1024,
++ .fps = 15,
++ .used = 0,
++ .regs = ov9665_res_sxga,
++ },
++ {
++ .desc = "VGA",
++ .res = SENSOR_RES_VGA,
++ .width = 640,
++ .height = 480,
++ .fps = 15,
++ .used = 0,
++ .regs = ov9665_res_vga,
++ },
++};
++#define N_RES (ARRAY_SIZE(ov9665_res))
++
++/*
++ * I2C Read & Write stuff
++ */
++static int ov9665_read(struct i2c_client *c, unsigned char reg,
++ unsigned char *value)
++{
++ int ret;
++
++ ret = i2c_smbus_read_byte_data(c, reg);
++ if (ret >= 0) {
++ *value = (unsigned char) ret;
++ ret = 0;
++ }
++ return ret;
++}
++
++static int ov9665_write(struct i2c_client *c, unsigned char reg,
++ unsigned char value)
++{
++ int ret = i2c_smbus_write_byte_data(c, reg, value);
++ if (reg == 0x12 && (value & 0x80))
++ msleep(2); /* Wait for reset to run */
++ return ret;
++}
++
++/*
++ * Write a list of register settings; ff/ff stops the process.
++ */
++static int ov9665_write_array(struct i2c_client *c, struct regval_list *vals)
++{
++ struct regval_list *p;
++ u8 read_val = 0;
++ int err_num = 0;
++ int i = 0;
++ p = vals;
++ while (p->reg_num != 0xff) {
++ ov9665_write(c, p->reg_num, p->value);
++ ov9665_read(c, p->reg_num, &read_val);
++ if (read_val != p->value)
++ err_num++;
++ p++;
++ i++;
++ }
++
++ return 0;
++}
++
++static int ov9665_set_data_pin_in(struct i2c_client *client)
++{
++ int ret = 0;
++
++ ret += ov9665_write(client, 0xd5, 0x00);
++ ret += ov9665_write(client, 0xd6, 0x00);
++
++ return ret;
++}
++
++static int ov9665_set_data_pin_out(struct i2c_client *client)
++{
++ int ret = 0;
++
++ ret += ov9665_write(client, 0xd5, 0xff);
++ ret += ov9665_write(client, 0xd6, 0xff);
++
++ return ret;
++}
++/*
++ * Sensor specific helper function
++ */
++static int ov9665_standby(void)
++{
++ /* Pull the pin to high to hardware standby */
++ gpio_set_value(GPIO_STDBY_PIN, 1);
++ dprintk(1, "PM: standby called\n");
++ return 0;
++}
++
++static int ov9665_wakeup(void)
++{
++ /* Pull the pin to low*/
++ gpio_set_value(GPIO_STDBY_PIN, 0);
++ dprintk(1, "PM: wakeup called\n");
++ msleep(10);
++ return 0;
++}
++
++static int ov9665_s_power(struct v4l2_subdev *sd, u32 val)
++{
++ if (val == 1)
++ ov9665_standby();
++ if (val == 0)
++ ov9665_wakeup();
++ return 0;
++}
++
++static int ov9665_init(struct i2c_client *c)
++{
++ int ret;
++ struct v4l2_subdev *sd = i2c_get_clientdata(c);
++ struct ci_sensor_config *info = to_sensor_config(sd);
++ u8 reg = 0;
++
++ /* Fill the configuration structure */
++ /* Note this default configuration value */
++ info->mode = ov9665_formats[0].pixelformat;
++ info->res = ov9665_res[0].res;
++ info->type = SENSOR_TYPE_SOC;
++ info->bls = SENSOR_BLS_OFF;
++ info->gamma = SENSOR_GAMMA_ON;
++ info->cconv = SENSOR_CCONV_ON;
++ info->blc = SENSOR_BLC_AUTO;
++ info->agc = SENSOR_AGC_AUTO;
++ info->awb = SENSOR_AWB_AUTO;
++ info->aec = SENSOR_AEC_AUTO;
++ info->bus_width = SENSOR_BUSWIDTH_8BIT_ZZ;
++ info->ycseq = SENSOR_YCSEQ_YCBYCR;
++ info->conv422 = SENSOR_CONV422_COSITED;
++ info->bpat = SENSOR_BPAT_GRGRBGBG;
++ info->field_inv = SENSOR_FIELDINV_NOSWAP;
++ info->field_sel = SENSOR_FIELDSEL_BOTH;
++ info->hpol = SENSOR_HPOL_REFPOS;
++ info->vpol = SENSOR_VPOL_POS;
++ info->edge = SENSOR_EDGE_FALLING;
++ info->flicker_freq = SENSOR_FLICKER_100;
++ info->cie_profile = 0;
++ memcpy(info->name, "ov9665", 7);
++
++ ret = ov9665_write(c, 0x12, 0x80);
++ /* Set registers into default config value */
++ ret += ov9665_write_array(c, ov9665_def_reg);
++
++ ov9665_read(c, 0x09, &reg);
++ reg = reg | 0x10;
++ ov9665_write(c, 0x09, reg);
++ ov9665_set_data_pin_in(c);
++ ssleep(1);
++
++ return ret;
++}
++
++static int distance(struct ov9665_res_struct *res, u32 w, u32 h)
++{
++ int ret;
++ if (res->width < w || res->height < h)
++ return -1;
++
++ ret = ((res->width - w) + (res->height - h));
++ return ret;
++}
++static int ov9665_try_res(u32 *w, u32 *h)
++{
++ struct ov9665_res_struct *res_index, *p = NULL;
++ int dis, last_dis = ov9665_res->width + ov9665_res->height;
++
++ dprintk(1, "&&&&& before %dx%d", *w, *h);
++ for (res_index = ov9665_res;
++ res_index < ov9665_res + N_RES;
++ res_index++) {
++ if ((res_index->width <= *w) && (res_index->height <= *h))
++ break;
++ dis = distance(res_index, *w, *h);
++ if (dis < last_dis) {
++ last_dis = dis;
++ p = res_index;
++ }
++ }
++ if ((res_index->width < *w) || (res_index->height < *h)) {
++ if (res_index != ov9665_res)
++ res_index--;
++ }
++
++ /*
++ if (p == NULL) {
++ p = ov2650_res;
++ }
++
++ if ((w != NULL) && (h != NULL)) {
++ *w = p->width;
++ *h = p->height;
++ }
++ */
++ if (res_index == ov9665_res + N_RES)
++ res_index = ov9665_res + N_RES - 1;
++
++ *w = res_index->width;
++ *h = res_index->height;
++
++ dprintk(1, "&&&&& after %dx%d", *w, *h);
++ return 0;
++}
++
++static struct ov9665_res_struct *ov9665_to_res(u32 w, u32 h)
++{
++ struct ov9665_res_struct *res_index;
++
++ for (res_index = ov9665_res;
++ res_index < ov9665_res + N_RES;
++ res_index++)
++ if ((res_index->width == w) && (res_index->height == h))
++ break;
++
++ if (res_index >= ov9665_res + N_RES)
++ res_index--; /* Take the bigger one */
++
++ return res_index;
++}
++
++static int ov9665_try_fmt(struct v4l2_subdev *sd,
++ struct v4l2_format *fmt)
++{
++ DBG_entering;
++ return ov9665_try_res(&fmt->fmt.pix.width, &fmt->fmt.pix.height);
++ DBG_leaving;
++}
++
++static int ov9665_get_fmt(struct v4l2_subdev *sd,
++ struct v4l2_format *fmt)
++{
++ struct ci_sensor_config *info = to_sensor_config(sd);
++ unsigned short width, height;
++ int index;
++
++ ci_sensor_res2size(info->res, &width, &height);
++
++ /* Marked the current sensor res as being "used" */
++ for (index = 0; index < N_RES; index++) {
++ if ((width == ov9665_res[index].width) &&
++ (height == ov9665_res[index].height)) {
++ ov9665_res[index].used = 1;
++ continue;
++ }
++ ov9665_res[index].used = 0;
++ }
++
++ fmt->fmt.pix.width = width;
++ fmt->fmt.pix.height = height;
++ return 0;
++}
++
++static int ov9665_set_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
++{
++ struct i2c_client *c = v4l2_get_subdevdata(sd);
++ struct ci_sensor_config *info = to_sensor_config(sd);
++ int ret = 0;
++ struct ov9665_res_struct *res_index;
++ u32 width, height;
++ int index;
++
++ DBG_entering;
++
++ width = fmt->fmt.pix.width;
++ height = fmt->fmt.pix.height;
++
++ ret = ov9665_try_res(&width, &height);
++ res_index = ov9665_to_res(width, height);
++
++ ov9665_wakeup();
++ /* if ((info->res != res_index->res) && (res_index->regs)) { */
++ if ( res_index->regs) {
++ ret = ov9665_write(c, 0x12, 0x80);
++ ret += ov9665_write_array(c, ov9665_def_reg);
++ ret += ov9665_write_array(c, res_index->regs);
++ /* Add delay here to get better image */
++
++ for (index = 0; index < N_RES; index++) {
++ if ((width == ov9665_res[index].width) &&
++ (height == ov9665_res[index].height)) {
++ ov9665_res[index].used = 1;
++ continue;
++ }
++ ov9665_res[index].used = 0;
++ }
++
++ for (index = 0; index < N_RES; index++)
++ dprintk(2, "index = %d, used = %d\n", index,
++ ov9665_res[index].used);
++
++ }
++ info->res = res_index->res;
++
++ DBG_leaving;
++ return ret;
++}
++
++static int ov9665_q_hflip(struct v4l2_subdev *sd, __s32 *value)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++ int ret;
++ unsigned char v = 0;
++
++ ret = ov9665_read(client, 0x04, &v);
++ *value = ((v & 0x80) == 0x80);
++ return ret;
++}
++
++static int ov9665_t_hflip(struct v4l2_subdev *sd, int value)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++ unsigned char v = 0;
++ int ret;
++
++ value = value >= 1 ? 1 : 0;
++ ret = ov9665_read(client, 0x33, &v);
++ if (value)
++ v |= 0x08;
++ else
++ v &= ~0x08;
++ ret += ov9665_write(client, 0x33, v);
++
++ ret += ov9665_read(client, 0x04, &v);
++ if (value)
++ v |= 0x80;
++ else
++ v &= ~0x80;
++ ret += ov9665_write(client, 0x04, v);
++ msleep(10); /* FIXME */
++ return ret;
++}
++
++static int ov9665_q_vflip(struct v4l2_subdev *sd, __s32 *value)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++ int ret;
++ unsigned char v = 0;
++
++ ret = ov9665_read(client, 0x04, &v);
++ *value = ((v & 0x40) == 0x40);
++ return ret;
++}
++
++static int ov9665_t_vflip(struct v4l2_subdev *sd, int value)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++ unsigned char v = 0;
++ int ret;
++
++ value = value >= 1 ? 1 : 0;
++ ret = ov9665_read(client, 0x04, &v);
++ if (value)
++ v |= 0x40;
++ else
++ v &= ~0x40;
++ ret += ov9665_write(client, 0x04, v);
++ msleep(10); /* FIXME */
++ return ret;
++}
++
++static struct ov9665_control {
++ struct v4l2_queryctrl qc;
++ int (*query)(struct v4l2_subdev *sd, __s32 *value);
++ int (*tweak)(struct v4l2_subdev *sd, int value);
++} ov9665_controls[] = {
++ {
++ .qc = {
++ .id = V4L2_CID_VFLIP,
++ .type = V4L2_CTRL_TYPE_BOOLEAN,
++ .name = "Vertical flip",
++ .minimum = 0,
++ .maximum = 1,
++ .step = 1,
++ .default_value = 0,
++ },
++ .tweak = ov9665_t_vflip,
++ .query = ov9665_q_vflip,
++ },
++ {
++ .qc = {
++ .id = V4L2_CID_HFLIP,
++ .type = V4L2_CTRL_TYPE_BOOLEAN,
++ .name = "Horizontal mirror",
++ .minimum = 0,
++ .maximum = 1,
++ .step = 1,
++ .default_value = 0,
++ },
++ .tweak = ov9665_t_hflip,
++ .query = ov9665_q_hflip,
++ },
++};
++#define N_CONTROLS (ARRAY_SIZE(ov9665_controls))
++
++static struct ov9665_control *ov9665_find_control(__u32 id)
++{
++ int i;
++
++ for (i = 0; i < N_CONTROLS; i++)
++ if (ov9665_controls[i].qc.id == id)
++ return ov9665_controls + i;
++ return NULL;
++}
++
++static int ov9665_queryctrl(struct v4l2_subdev *sd,
++ struct v4l2_queryctrl *qc)
++{
++ struct ov9665_control *ctrl = ov9665_find_control(qc->id);
++
++ if (ctrl == NULL)
++ return -EINVAL;
++ *qc = ctrl->qc;
++ return 0;
++}
++
++static int ov9665_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
++{
++ struct ov9665_control *octrl = ov9665_find_control(ctrl->id);
++ int ret;
++
++ if (octrl == NULL)
++ return -EINVAL;
++ ret = octrl->query(sd, &ctrl->value);
++ if (ret >= 0)
++ return 0;
++ return ret;
++}
++
++static int ov9665_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
++{
++ struct ov9665_control *octrl = ov9665_find_control(ctrl->id);
++ int ret;
++
++ if (octrl == NULL)
++ return -EINVAL;
++ ret = octrl->tweak(sd, ctrl->value);
++ if (ret >= 0)
++ return 0;
++ return ret;
++}
++
++#if 0
++static int ov9665_get_caps(struct i2c_client *c, struct ci_sensor_caps *caps)
++{
++ if (caps == NULL)
++ return -EIO;
++
++ caps->bus_width = SENSOR_BUSWIDTH_8BIT_ZZ;
++ caps->mode = SENSOR_MODE_BT601;
++ caps->field_inv = SENSOR_FIELDINV_NOSWAP;
++ caps->field_sel = SENSOR_FIELDSEL_BOTH;
++ caps->ycseq = SENSOR_YCSEQ_YCBYCR;
++ caps->conv422 = SENSOR_CONV422_COSITED;
++ caps->bpat = SENSOR_BPAT_GRGRBGBG;
++ caps->hpol = SENSOR_HPOL_REFPOS;
++ caps->vpol = SENSOR_VPOL_POS;
++ caps->edge = SENSOR_EDGE_FALLING;
++ caps->bls = SENSOR_BLS_OFF;
++ caps->gamma = SENSOR_GAMMA_ON;
++ caps->cconv = SENSOR_CCONV_ON;
++ caps->res = SENSOR_RES_SXGA | SENSOR_RES_VGA;
++ caps->blc = SENSOR_BLC_AUTO;
++ caps->agc = SENSOR_AGC_AUTO;
++ caps->awb = SENSOR_AWB_AUTO;
++ caps->aec = SENSOR_AEC_AUTO;
++ caps->cie_profile = 0;
++ caps->flicker_freq = SENSOR_FLICKER_100 | SENSOR_FLICKER_120;
++ caps->type = SENSOR_TYPE_SOC;
++ /* caps->name = "ov9665"; */
++ strcpy(caps->name, "ov9665");
++
++ return 0;
++}
++
++static int ov9665_get_config(struct i2c_client *c,
++ struct ci_sensor_config *config)
++{
++ struct ci_sensor_config *info = i2c_get_clientdata(c);
++
++ if (config == NULL) {
++ printk(KERN_WARNING "sensor_get_config: NULL pointer\n");
++ return -EIO;
++ }
++
++ memset(config, 0, sizeof(struct ci_sensor_config *));
++ memcpy(config, info, sizeof(struct ci_sensor_config));
++
++ return 0;
++}
++
++static int ov9665_setup(struct i2c_client *c,
++ const struct ci_sensor_config *config)
++{
++ int ret;
++ struct ov9665_res_struct *res_index;
++ struct ci_sensor_config *info = i2c_get_clientdata(c);
++ u16 width, high;
++
++ /* Soft reset camera first*/
++ ret = ov9665_write(c, 0x12, 0x80);
++
++ /* Set registers into default config value */
++ ret += ov9665_write_array(c, ov9665_def_reg);
++
++ /* set image resolution */
++ ci_sensor_res2size(config->res, &width, &high);
++ ret += ov9665_try_res(c, &width, &high);
++ res_index = ov9665_find_res(width, high);
++ if (res_index->regs)
++ ret += ov9665_write_array(c, res_index->regs);
++ if (!ret)
++ info->res = res_index->res;
++
++ /* Add some delay here to get a better image*/
++ ssleep(1);
++
++ return ret;
++}
++
++static int ov9665_set_data_pin_in(struct i2c_client *client)
++{
++ int ret = 0;
++
++ ret += ov9665_write(client, 0xd5, 0x00);
++ ret += ov9665_write(client, 0xd6, 0x00);
++
++ return ret;
++}
++
++static int ov9665_set_data_pin_out(struct i2c_client *client)
++{
++ int ret = 0;
++
++ ret += ov9665_write(client, 0xd5, 0xff);
++ ret += ov9665_write(client, 0xd6, 0xff);
++
++ return ret;
++}
++/*
++ * File operation functions
++ */
++static int ov9665_open(struct i2c_setting *c, void *priv)
++{
++ struct i2c_client *client = c->sensor_client;
++ int ret = 0;
++ u8 reg = 0;
++ /* Just wake up sensor */
++ if (ov9665_wakeup())
++ return -EIO;
++
++ ov9665_init(client);
++ ret = ov9665_read(client, 0x09, &reg);
++ reg = reg | 0x10;
++ ret += ov9665_write(client, 0x09, reg);
++
++ if (ov9665_set_data_pin_in(client))
++ return EIO;
++/*
++ if (ov9665_standby())
++ return EIO;
++*/
++ return ret;
++}
++
++static int ov9665_release(struct i2c_setting *c, void *priv)
++{
++ /* Just suspend the sensor */
++ if (ov9665_standby())
++ return EIO;
++ return 0;
++}
++
++static int ov9665_on(struct i2c_setting *c)
++{
++ struct i2c_client *client = c->sensor_client;
++ int ret = 0;
++ u8 reg = 0;
++
++ ret = ov9665_read(client, 0x09, &reg);
++ reg = reg & ~0x10;
++ ret = ov9665_write(client, 0x09, reg);
++
++ if (ov9665_set_data_pin_out(client))
++ return EIO;
++
++ return ret;
++}
++
++static int ov9665_off(struct i2c_setting *c)
++{
++ struct i2c_client *client = c->sensor_client;
++ int ret = 0;
++ u8 reg = 0;
++/*
++ ret = ov9665_read(client, 0x09, &reg);
++ reg = reg | 0x10;
++ ret += ov9665_write(client, 0x09, reg);
++*/
++ if (ov9665_set_data_pin_in(client))
++ return EIO;
++
++ return ret;
++}
++
++static struct sensor_device ov9665 = {
++ .name = "OV9665",
++ .type = SENSOR_TYPE_SOC,
++ .minor = -1,
++ .open = ov9665_open,
++ .release = ov9665_release,
++ .on = ov9665_on,
++ .off = ov9665_off,
++ .querycap = ov9665_get_caps,
++ .get_config = ov9665_get_config,
++ .set_config = ov9665_setup,
++ .enum_parm = ov9665_queryctrl,
++ .get_parm = ov9665_g_ctrl,
++ .set_parm = ov9665_s_ctrl,
++ .try_res = ov9665_try_res,
++ .set_res = ov9665_set_res,
++ .suspend = ov9665_standby,
++ .resume = ov9665_wakeup,
++ .get_ls_corr_config = NULL,
++ .set_awb = NULL,
++ .set_aec = NULL,
++ .set_blc = NULL,
++ /* TBC */
++};
++#endif
++
++static int ov9665_s_stream(struct v4l2_subdev *sd, int enable)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++ u8 reg = 0;
++
++ DBG_entering;
++ if (enable) {
++ ov9665_read(client, 0x09, &reg);
++ reg = reg & ~0x10;
++ ov9665_write(client, 0x09, reg);
++ ov9665_set_data_pin_out(client);
++ ssleep(1);
++
++ } else {
++ ov9665_read(client, 0x09, &reg);
++ reg = reg | 0x10;
++ ov9665_write(client, 0x09, reg);
++ ov9665_set_data_pin_in(client);
++ }
++
++ DBG_leaving;
++ return 0;
++}
++
++static int ov9665_enum_framesizes(struct v4l2_subdev *sd,
++ struct v4l2_frmsizeenum *fsize)
++{
++ unsigned int index = fsize->index;
++
++ DBG_entering;
++
++ if (index >= N_RES)
++ return -EINVAL;
++
++ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
++ fsize->discrete.width = ov9665_res[index].width;
++ fsize->discrete.height = ov9665_res[index].height;
++ fsize->reserved[0] = ov9665_res[index].used;
++
++ DBG_leaving;
++
++ return 0;
++}
++
++static int ov9665_enum_frameintervals(struct v4l2_subdev *sd,
++ struct v4l2_frmivalenum *fival)
++{
++ unsigned int index = fival->index;
++
++ DBG_entering;
++
++ if (index >= N_RES)
++ return -EINVAL;
++
++ fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
++ fival->discrete.numerator = 1;
++ fival->discrete.denominator = ov9665_res[index].fps;
++
++ DBG_leaving;
++
++ return 0;
++}
++
++static int ov9665_g_chip_ident(struct v4l2_subdev *sd,
++ struct v4l2_dbg_chip_ident *chip)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++
++#define V4L2_IDENT_OV9665 8246
++ return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_OV9665, 0);
++}
++
++#ifdef CONFIG_VIDEO_ADV_DEBUG
++static int ov9665_g_register(struct v4l2_subdev *sd,
++ struct v4l2_dbg_register *reg)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++ unsigned char val = 0;
++ int ret;
++
++ if (!v4l2_chip_match_i2c_client(client, &reg->match))
++ return -EINVAL;
++ if (!capable(CAP_SYS_ADMIN))
++ return -EPERM;
++ ret = ov9665_read(client, reg->reg & 0xffff, &val);
++ reg->val = val;
++ reg->size = 1;
++ return ret;
++}
++
++static int ov9665_s_register(struct v4l2_subdev *sd,
++ struct v4l2_dbg_register *reg)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++
++ if (!v4l2_chip_match_i2c_client(client, &reg->match))
++ return -EINVAL;
++ if (!capable(CAP_SYS_ADMIN))
++ return -EPERM;
++ ov9665_write(client, reg->reg & 0xffff, reg->val & 0xff);
++ return 0;
++}
++#endif
++
++static const struct v4l2_subdev_video_ops ov9665_video_ops = {
++ .try_fmt = ov9665_try_fmt,
++ .s_fmt = ov9665_set_fmt,
++ .g_fmt = ov9665_get_fmt,
++ .s_stream = ov9665_s_stream,
++ .enum_framesizes = ov9665_enum_framesizes,
++ .enum_frameintervals = ov9665_enum_frameintervals,
++};
++
++static const struct v4l2_subdev_core_ops ov9665_core_ops = {
++ .g_chip_ident = ov9665_g_chip_ident,
++ .queryctrl = ov9665_queryctrl,
++ .g_ctrl = ov9665_g_ctrl,
++ .s_ctrl = ov9665_s_ctrl,
++ .s_gpio = ov9665_s_power,
++ /*.g_ext_ctrls = ov9665_g_ext_ctrls,*/
++ /*.s_ext_ctrls = ov9665_s_ext_ctrls,*/
++#ifdef CONFIG_VIDEO_ADV_DEBUG
++ .g_register = ov9665_g_register,
++ .s_register = ov9665_s_register,
++#endif
++};
++
++static const struct v4l2_subdev_ops ov9665_ops = {
++ .core = &ov9665_core_ops,
++ .video = &ov9665_video_ops,
++};
++/*
++ * Basic i2c stuff
++ */
++/*
++static unsigned short normal_i2c[] = {0x30, I2C_CLIENT_END};
++I2C_CLIENT_INSMOD;
++
++static struct i2c_driver ov9665_driver;
++*/
++static int ov9665_detect(struct i2c_client *client)
++{
++ struct i2c_adapter *adapter = client->adapter;
++ int adap_id = i2c_adapter_id(adapter);
++ u8 config = 0;
++
++ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
++ return -ENODEV;
++
++ if (adap_id != 1)
++ return -ENODEV;
++
++ ov9665_wakeup();
++
++ ov9665_read(client, 0x0a, &config);
++ if (config != 0x96)
++ return -ENODEV;
++
++ ov9665_read(client, 0x0b, &config);
++ if (config != 0x63)
++ return -ENODEV;
++
++ return 0;
++}
++
++static int ov9665_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ struct ci_sensor_config *info;
++ struct v4l2_subdev *sd;
++ int ret = -1;
++
++ DBG_entering;
++ /*
++ * Setup sensor configuration structure
++ */
++ info = kzalloc(sizeof(struct ci_sensor_config), GFP_KERNEL);
++ if (!info)
++ return -ENOMEM;
++
++ ret = ov9665_detect(client);
++ if (ret) {
++ kfree(info);
++ return -ENODEV;
++ }
++
++ sd = &info->sd;
++ v4l2_i2c_subdev_init(sd, client, &ov9665_ops);
++
++ /*
++ * Initialization OV9665
++ * then turn into standby mode
++ */
++ /* ret = ov9665_standby(); */
++ ret = ov9665_init(client);
++ if (ret) {
++ eprintk("error init ov9665");
++ goto err_1;
++ }
++
++ ov9665_standby();
++ printk(KERN_INFO "Init ov9665 sensor success\n");
++ DBG_leaving;
++ return 0;
++
++err_1:
++ kfree(info);
++ return ret;
++}
++
++/*
++ * XXX: Need to be checked
++ */
++static int ov9665_remove(struct i2c_client *client)
++{
++ struct v4l2_subdev *sd = i2c_get_clientdata(client);
++
++ v4l2_device_unregister_subdev(sd);
++ kfree(to_sensor_config(sd));
++
++ return 0;
++}
++
++static const struct i2c_device_id ov9665_id[] = {
++ {"ov9665", 0},
++ {}
++};
++
++MODULE_DEVICE_TABLE(i2c, ov9665_id);
++
++static struct v4l2_i2c_driver_data v4l2_i2c_data = {
++ .name = "ov9665",
++ .probe = ov9665_probe,
++ .remove = ov9665_remove,
++ .id_table = ov9665_id,
++};
++
++MODULE_AUTHOR("Xiaolin Zhang <xiaolin.zhang@intel.com>");
++MODULE_DESCRIPTION("A low-level driver for OmniVision 9665 sensors");
++MODULE_LICENSE("GPL");
+--- /dev/null
++++ b/drivers/staging/mrstci/mrstov9665/ov9665.h
+@@ -0,0 +1,263 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#define I2C_OV9665 0x60
++/* Should add to kernel source */
++#define I2C_DRIVERID_OV9665 1047
++/* GPIO pin on Moorestown */
++#define GPIO_SCLK_25 44
++#define GPIO_STB_PIN 47
++#define GPIO_STDBY_PIN 48
++#define GPIO_RESET_PIN 50
++
++struct regval_list {
++ u8 reg_num;
++ u8 value;
++};
++
++/*
++ * Default register value
++ * 1280x1024 YUV
++ */
++static struct regval_list ov9665_def_reg[] = {
++ {0x3E, 0x80},
++ {0x12, 0x80},
++
++ {0xd5, 0xff},
++ {0xd6, 0x3f},
++
++ {0x3d, 0x3c},
++ {0x11, 0x81},
++ {0x2a, 0x00},
++ {0x2b, 0x00},
++
++ {0x3a, 0xf1},
++ {0x3b, 0x00},
++ {0x3c, 0x58},
++ {0x3e, 0x50},
++ {0x71, 0x00},
++
++ {0x15, 0x00},
++ {0x6a, 0x24},
++ {0x85, 0xe7},
++
++ {0x63, 0x01},
++
++ {0x17, 0x0c},
++ {0x18, 0x5c},
++ {0x19, 0x01},
++ {0x1a, 0x82},
++ {0x03, 0x03},
++ {0x2b, 0x00},
++
++ {0x36, 0xb4},
++ {0x65, 0x10},
++ {0x70, 0x02},
++ {0x71, 0x9f},
++ {0x64, 0x24},
++
++ {0x43, 0x00},
++ {0x5D, 0x55},
++ {0x5E, 0x57},
++ {0x5F, 0x21},
++
++ {0x24, 0x3e},
++ {0x25, 0x38},
++ {0x26, 0x72},
++
++ {0x14, 0x68},
++ {0x0C, 0x3a}, /* Auto detect for 50/60 */
++ {0x4F, 0x9E},
++ {0x50, 0x84},
++ {0x5A, 0x67},
++
++ {0x7d, 0x30},
++ {0x7e, 0x00},
++ {0x82, 0x03},
++ {0x7f, 0x00},
++ {0x83, 0x07},
++ {0x80, 0x03},
++ {0x81, 0x04},
++
++ {0x96, 0xf0},
++ {0x97, 0x00},
++ {0x92, 0x33},
++ {0x94, 0x5a},
++ {0x93, 0x3a},
++ {0x95, 0x48},
++ {0x91, 0xfc},
++ {0x90, 0xff},
++ {0x8e, 0x4e},
++ {0x8f, 0x4e},
++ {0x8d, 0x13},
++ {0x8c, 0x0c},
++ {0x8b, 0x0c},
++ {0x86, 0x9e},
++ {0x87, 0x11},
++ {0x88, 0x22},
++ {0x89, 0x05},
++ {0x8a, 0x03},
++
++ {0x9b, 0x0e},
++ {0x9c, 0x1c},
++ {0x9d, 0x34},
++ {0x9e, 0x5a},
++ {0x9f, 0x68},
++ {0xa0, 0x76},
++ {0xa1, 0x82},
++ {0xa2, 0x8e},
++ {0xa3, 0x98},
++ {0xa4, 0xa0},
++ {0xa5, 0xb0},
++ {0xa6, 0xbe},
++ {0xa7, 0xd2},
++ {0xa8, 0xe2},
++ {0xa9, 0xee},
++ {0xaa, 0x18},
++
++ {0xAB, 0xe7},
++ {0xb0, 0x43},
++ {0xac, 0x04},
++ {0x84, 0x40},
++
++ {0xad, 0x84},
++ {0xd9, 0x24},
++ {0xda, 0x00},
++ {0xae, 0x10},
++
++ {0xab, 0xe7},
++ {0xb9, 0xa0},
++ {0xba, 0x80},
++ {0xbb, 0xa0},
++ {0xbc, 0x80},
++
++ {0xbd, 0x08},
++ {0xbe, 0x19},
++ {0xbf, 0x02},
++ {0xc0, 0x08},
++ {0xc1, 0x2a},
++ {0xc2, 0x34},
++ {0xc3, 0x2d},
++ {0xc4, 0x2d},
++ {0xc5, 0x00},
++ {0xc6, 0x98},
++ {0xc7, 0x18},
++ {0x69, 0x48},
++
++ {0x74, 0xc0},
++
++ {0x7c, 0x18},
++ {0x65, 0x11},
++ {0x66, 0x00},
++ {0x41, 0xa0},
++ {0x5b, 0x28},
++ {0x60, 0x84},
++ {0x05, 0x07},
++ {0x03, 0x03},
++ {0xd2, 0x8c},
++
++ {0xc7, 0x90},
++ {0xc8, 0x06},
++ {0xcb, 0x40},
++ {0xcc, 0x40},
++ {0xcf, 0x00},
++ {0xd0, 0x20},
++ {0xd1, 0x00},
++ {0xc7, 0x18},
++
++ {0x0d, 0x82},
++ {0x0d, 0x80},
++
++ {0x09, 0x01},
++
++ {0xff, 0xff},
++};
++
++/* 1280x1024 */
++static struct regval_list ov9665_res_sxga[] = {
++ {0x0c, 0xbc}, /* note this */
++ {0xff, 0xff},
++};
++
++/* 640x480 */
++static struct regval_list ov9665_res_vga[] = {
++ /* Fclk/4 */
++ {0x11, 0x80},
++ {0x63, 0x00},
++
++ {0x12, 0x40}, /*VGA format*/
++ {0x14, 0x30}, /*4x*/
++ {0x0c, 0xbc},
++ {0x4d, 0x09},
++ {0x5c, 0x80}, /* Full average AEC */
++
++ /* Windows setting */
++ {0x17, 0x0c},
++ {0x18, 0x5c},
++ {0x19, 0x02},
++ {0x1a, 0x3f},
++ {0x03, 0x03},
++ {0x32, 0xad},
++
++ /* 50/60Hz AEC */
++ {0x5a, 0x23},
++ {0x2b, 0x00},
++
++ {0x64, 0xa4},
++ /*
++ {0x4F, 0x4f},
++ {0x50, 0x42},
++ */
++ {0x4F, 0x9e},
++ {0x50, 0x84},
++ {0x97, 0x0a},
++ {0xad, 0x82},
++ {0xd9, 0x11},
++
++ /* Scale window */
++ {0xb9, 0x50},
++ {0xba, 0x3c},
++ {0xbb, 0x50},
++ {0xbc, 0x3c},
++
++ {0xad, 0x80},
++ {0xd9, 0x00},
++ {0xac, 0x0f},
++ {0x84, 0x86},
++
++ /*This is for Color Matrix*/
++ {0xbd, 0x05},
++ {0xbe, 0x16},
++ {0xbf, 0x05},
++ {0xc0, 0x07},
++ {0xc1, 0x18},
++ {0xc2, 0x1f},
++ {0xc3, 0x2b},
++ {0xc4, 0x2b},
++ {0xc5, 0x00},
++
++ {0x0d, 0x92},
++ {0x0d, 0x90},
++
++ {0xff, 0xff},
++};
+--- /dev/null
++++ b/drivers/staging/mrstci/mrsts5k4e1/Kconfig
+@@ -0,0 +1,9 @@
++config VIDEO_MRST_S5K4E1
++ tristate "Moorestown s5k4e1 RAW Sensor"
++ depends on I2C && VIDEO_MRST_ISP
++
++ ---help---
++ Say Y here if your platform support s5k4e1 RAW Sensor.
++
++ To compile this driver as a module, choose M here: the
++ module will be called mrstov2650.ko.
+--- /dev/null
++++ b/drivers/staging/mrstci/mrsts5k4e1/Makefile
+@@ -0,0 +1,3 @@
++obj-$(CONFIG_VIDEO_MRST_S5K4E1) += mrsts5k4e1.o
++
++EXTRA_CFLAGS += -I$(src)/../include
+--- /dev/null
++++ b/drivers/staging/mrstci/mrsts5k4e1/mrsts5k4e1.c
+@@ -0,0 +1,1034 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/string.h>
++#include <linux/errno.h>
++#include <linux/init.h>
++#include <linux/kmod.h>
++#include <linux/device.h>
++#include <linux/delay.h>
++#include <linux/fs.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/delay.h>
++#include <linux/i2c.h>
++#include <linux/gpio.h>
++
++#include <media/v4l2-device.h>
++#include <media/v4l2-chip-ident.h>
++#include <media/v4l2-i2c-drv.h>
++
++#include <asm/mrst.h>
++
++#include "ci_sensor_common.h"
++#include "mrsts5k4e1.h"
++/* #include "priv.h" */
++/* extern const struct DumpRegs regs_d[]; */
++
++static int s5k4e1_debug;
++module_param(s5k4e1_debug, int, 0644);
++MODULE_PARM_DESC(s5k4e1_debug, "Debug level (0-1)");
++
++#define dprintk(level, fmt, arg...) \
++ do { \
++ if (s5k4e1_debug >= level) \
++ printk(KERN_DEBUG "mrstisp@%s: " fmt "\n", \
++ __func__, ## arg);\
++ } while (0)
++
++#define eprintk(fmt, arg...) \
++ printk(KERN_ERR "mrstisp@%s:" fmt "\n", \
++ __func__, ## arg);
++
++#define DBG_entering dprintk(1, "entering");
++#define DBG_leaving dprintk(1, "leaving");
++#define DBG_line dprintk(1, " line: %d", __LINE__);
++
++static inline struct ci_sensor_config *to_sensor_config(struct v4l2_subdev *sd)
++{
++ return container_of(sd, struct ci_sensor_config, sd);
++}
++
++static struct s5k4e1_format_struct {
++ __u8 *desc;
++ __u32 pixelformat;
++ struct regval_list *regs;
++} s5k4e1_formats[] = {
++ {
++ .desc = "Raw RGB Bayer",
++ .pixelformat = SENSOR_MODE_MIPI,
++ .regs = NULL,
++ },
++};
++#define N_S5K4E1_FMTS ARRAY_SIZE(s5k4e1_formats)
++
++static struct s5k4e1_res_struct {
++ __u8 *desc;
++ int res;
++ int width;
++ int height;
++ /* FIXME: correct the fps values.. */
++ int fps;
++ bool used;
++ struct regval_list *regs;
++} s5k4e1_res[] = {
++ {
++ .desc = "QSXGA_PLUS4",
++ .res = SENSOR_RES_QXGA_PLUS,
++ .width = 2592,
++ .height = 1944,
++ .fps = 15,
++ .used = 0,
++ .regs = s5k4e1_res_qsxga_plus4,
++ },
++ {
++ .desc = "1080P",
++ .res = SENSOR_RES_1080P,
++ .width = 1920,
++ .height = 1080,
++ .fps = 25,
++ .used = 0,
++ .regs = s5k4e1_res_1080p,
++ },
++ {
++ .desc = "VGA_PLUS",
++ .res = SENSOR_RES_VGA_PLUS,
++ .width = 1304,
++ .height = 980,
++ .fps = 30,
++ .used = 0,
++ .regs = s5k4e1_res_vga_ac04_bill,
++ },
++ {
++ .desc = "720p",
++ .res = SENSOR_RES_720P,
++ .width = 1280,
++ .height = 720,
++ .fps = 30,
++ .used = 0,
++ .regs = s5k4e1_res_720p,
++ },
++ {
++ .desc = "VGA",
++ .res = SENSOR_RES_VGA,
++ .width = 640,
++ .height = 480,
++ .used = 0,
++ .fps = 40,
++ .regs = s5k4e1_res_vga_ac04_bill,
++ },
++};
++
++#define N_RES (ARRAY_SIZE(s5k4e1_res))
++
++/*
++ * I2C Read & Write stuff
++ */
++static int s5k4e1_read(struct i2c_client *c, u32 reg, u32 *value)
++{
++ int ret;
++ int i;
++ struct i2c_msg msg[2];
++ u8 msgbuf[2];
++ u8 ret_val = 0;
++ *value = 0;
++ /* Read needs two message to go */
++ memset(&msg, 0, sizeof(msg));
++ msgbuf[0] = 0;
++ msgbuf[1] = 0;
++ i = 0;
++
++ msgbuf[i++] = ((u16)reg) >> 8;
++ msgbuf[i++] = ((u16)reg) & 0xff;
++ msg[0].addr = c->addr;
++ msg[0].buf = msgbuf;
++ msg[0].len = i;
++
++ msg[1].addr = c->addr;
++ msg[1].flags = I2C_M_RD;
++ msg[1].buf = &ret_val;
++ msg[1].len = 1;
++
++ ret = i2c_transfer(c->adapter, &msg[0], 2);
++ *value = ret_val;
++
++ ret = (ret == 2) ? 0 : -1;
++ dprintk(2, "reg:0x%8x, value:0x%8x - %s", reg, *value,
++ (ret ? "failed" : "succesfully"));
++ return ret;
++}
++
++static int s5k4e1_write(struct i2c_client *c, u32 reg, u32 value)
++{
++ int ret, i;
++ struct i2c_msg msg;
++ u8 msgbuf[3];
++
++ /* Writing only needs one message */
++ memset(&msg, 0, sizeof(msg));
++ i = 0;
++ msgbuf[i++] = ((u16)reg) >> 8;
++ msgbuf[i++] = (u16)reg & 0xff;
++ msgbuf[i++] = (u8)value;
++
++ msg.addr = c->addr;
++ msg.flags = 0;
++ msg.buf = msgbuf;
++ msg.len = i;
++
++ ret = i2c_transfer(c->adapter, &msg, 1);
++
++ /* If this is a reset register, wait for 1ms */
++ if (reg == 0x0103 && (value & 0x01))
++ /*Note here, check if this is needed */
++ msleep(4);
++
++ ret = (ret == 1) ? 0 : -1;
++ dprintk(2, "reg:0x%8x, value:0x%8x - %s", reg, value,
++ (ret ? "failed" : "successfully"));
++ return ret;
++}
++
++static int s5k4e1_write_array(struct i2c_client *c, struct regval_list *vals)
++{
++ struct regval_list *p;
++ u32 read_val = 0;
++ int err_num = 0;
++ int i = 0;
++
++ DBG_entering;
++
++ p = vals;
++ while (p->reg_num != 0xffff) {
++ s5k4e1_write(c, (u32)p->reg_num, (u32)p->value);
++ s5k4e1_read(c, (u32)p->reg_num, &read_val);
++ /* msleep(100);*/
++ if (read_val != p->value) {
++ eprintk("0x%x write error:should be 0x%x, but 0x%x",
++ p->reg_num, p->value, read_val);
++ err_num++;
++ }
++ p++;
++ i++;
++ }
++ dprintk(1, "sucessfully wrote %d registers, err is %d", i,
++ err_num);
++ return 0;
++}
++
++/*
++ * Sensor specific helper function
++ */
++static int s5k4e1_standby(void)
++{
++ if (mrst_platform_id() == MRST_PLATFORM_AAVA_SC)
++ gpio_set_value(GPIO_STDBY_PIN, 0);
++ else
++ gpio_set_value(GPIO_STDBY_PIN, 1);
++ dprintk(1, "PM: standby called\n");
++ return 0;
++}
++
++static int s5k4e1_wakeup(void)
++{
++ if (mrst_platform_id() == MRST_PLATFORM_AAVA_SC)
++ gpio_set_value(GPIO_STDBY_PIN, 1);
++ else
++ gpio_set_value(GPIO_STDBY_PIN, 0);
++ dprintk(1, "PM: wakeup called\n");
++ return 0;
++}
++
++static int s5k4e1_s_power(struct v4l2_subdev *sd, u32 val)
++{
++ if (val == 1)
++ s5k4e1_standby();
++ if (val == 0)
++ s5k4e1_wakeup();
++ return 0;
++}
++
++static int s5k4e1_set_img_ctrl(struct i2c_client *c,
++ const struct ci_sensor_config *config)
++{
++ int err = 0;
++
++ DBG_entering;
++
++ switch (config->blc) {
++ /* only SENSOR_BLC_AUTO supported */
++ case SENSOR_BLC_AUTO:
++ break;
++ default:
++ dprintk(1, "BLC not supported,\
++ set to BLC_AUTO by default.");
++ }
++
++ switch (config->bls) {
++ /* only SENSOR_BLS_OFF supported */
++ case SENSOR_BLS_OFF:
++ break;
++ default:
++ dprintk(1, "Black level not supported,\
++ set to BLS_OFF by default.");
++ }
++
++ switch (config->agc) {
++ /* only SENSOR_AGC_OFF supported */
++ case SENSOR_AGC_OFF:
++ break;
++ default:
++ dprintk(1, "AGC not supported,\
++ set to AGC_OFF by default.");
++ }
++
++ switch (config->awb) {
++ /* only SENSOR_AWB_OFF supported */
++ case SENSOR_AWB_OFF:
++ break;
++ default:
++ dprintk(1, "AWB not supported,\
++ set to AWB_OFF by default.");
++ }
++
++ switch (config->aec) {
++ /* only SENSOR_AEC_OFF supported */
++ case SENSOR_AEC_OFF:
++ break;
++ default:
++ dprintk(1, "AEC not supported,\
++ set to AEC_OFF by default.");
++ }
++
++ DBG_leaving;
++
++ return err;
++}
++static int s5k4e1_init(struct i2c_client *c)
++{
++ int ret = 0;
++ struct v4l2_subdev *sd = i2c_get_clientdata(c);
++ struct ci_sensor_config *info = to_sensor_config(sd);
++ char *name = "";
++
++ DBG_entering;
++
++ /* Fill the configuration structure */
++ /* Note this default configuration value */
++ info->mode = s5k4e1_formats[0].pixelformat;
++ info->res = s5k4e1_res[0].res;
++ info->type = SENSOR_TYPE_RAW;
++ info->bls = SENSOR_BLS_OFF;
++ info->gamma = SENSOR_GAMMA_OFF;
++ info->cconv = SENSOR_CCONV_OFF;
++ info->blc = SENSOR_BLC_AUTO;
++ info->agc = SENSOR_AGC_OFF;
++ info->awb = SENSOR_AWB_OFF;
++ info->aec = SENSOR_AEC_OFF;
++ /*info->bus_width = SENSOR_BUSWIDTH_10BIT_ZZ;*/
++ info->bus_width = SENSOR_BUSWIDTH_12BIT;
++ info->ycseq = SENSOR_YCSEQ_YCBYCR;
++ info->conv422 = SENSOR_CONV422_COSITED;
++ /*info->conv422 = SENSOR_CONV422_NOCOSITED;*/
++ info->bpat = SENSOR_BPAT_GRGRBGBG;
++ info->field_inv = SENSOR_FIELDINV_NOSWAP;
++ info->field_sel = SENSOR_FIELDSEL_BOTH;
++ info->hpol = SENSOR_HPOL_REFPOS;
++ info->vpol = SENSOR_VPOL_NEG;
++ info->edge = SENSOR_EDGE_RISING;
++ info->flicker_freq = SENSOR_FLICKER_100;
++ info->cie_profile = SENSOR_CIEPROF_F11;
++ info->mipi_mode = SENSOR_MIPI_MODE_RAW_10;
++ name = "s5k4e1";
++ memcpy(info->name, name, 7);
++
++ /* Reset sensor hardware, and implement the setting*/
++ ret += s5k4e1_write(c, 0x0100, (u32)0x00);
++ /*TODO: See if we can ignore this*/
++ ret = s5k4e1_write(c, 0x0103, (u32)0x01);
++
++ /* sw reset -- delay 3.1ms */
++ msleep(4);
++
++ /* Set registers into default config value */
++ /* ret += s5k4e1_write_array(c, s5k4e1_def_reg); */
++
++ /* Set MIPI interface */
++#ifdef S5K4E1_MIPI
++ ret += s5k4e1_write_array(c, s5k4e1_mipi);
++#endif
++
++ ret += s5k4e1_set_img_ctrl(c, info); /*FIXME*/
++
++ /* streaming */
++ /* ret += s5k4e1_write(c, 0x0100, (u32)0x01); */
++ ret += s5k4e1_write(c, 0x0100, (u32)0x00);
++
++ msleep(1);
++
++ DBG_leaving;
++
++ return ret;
++}
++
++static int distance(struct s5k4e1_res_struct *res, u32 w, u32 h)
++{
++ int ret;
++
++ DBG_entering;
++
++ if (res->width < w || res->height < h)
++ return -1;
++
++ ret = ((res->width - w) + (res->height - h));
++
++ DBG_leaving;
++
++ return ret;
++}
++
++static int s5k4e1_try_res(u32 *w, u32 *h)
++{
++ struct s5k4e1_res_struct *res_index, *p = NULL;
++ int dis, last_dis = s5k4e1_res->width + s5k4e1_res->height;
++
++ DBG_entering;
++
++ for (res_index = s5k4e1_res;
++ res_index < s5k4e1_res + N_RES;
++ res_index++) {
++ if ((res_index->width < *w) || (res_index->height < *h))
++ break;
++ dis = distance(res_index, *w, *h);
++ if (dis < last_dis) {
++ last_dis = dis;
++ p = res_index;
++ }
++ }
++
++ if (p == NULL)
++ p = s5k4e1_res;
++ else if ((p->width < *w) || (p->height < *h)) {
++ if (p != s5k4e1_res)
++ p--;
++ }
++
++ if ((w != NULL) && (h != NULL)) {
++ *w = p->width;
++ *h = p->height;
++ }
++
++ DBG_leaving;
++ return 0;
++}
++
++static struct s5k4e1_res_struct *s5k4e1_to_res(u32 w, u32 h)
++{
++ struct s5k4e1_res_struct *res_index;
++
++ DBG_entering;
++
++ for (res_index = s5k4e1_res;
++ res_index < s5k4e1_res + N_RES;
++ res_index++)
++ if ((res_index->width == w) && (res_index->height == h))
++ break;
++
++ if (res_index >= s5k4e1_res + N_RES)
++ res_index--; /* Take the bigger one */
++
++ DBG_leaving;
++
++ return res_index;
++}
++
++static int s5k4e1_try_fmt(struct v4l2_subdev *sd,
++ struct v4l2_format *fmt)
++{
++ DBG_entering;
++ return s5k4e1_try_res(&fmt->fmt.pix.width, &fmt->fmt.pix.height);
++ DBG_leaving;
++}
++
++static int s5k4e1_get_fmt(struct v4l2_subdev *sd,
++ struct v4l2_format *fmt)
++{
++ struct ci_sensor_config *info = to_sensor_config(sd);
++ unsigned short width, height;
++ int index;
++
++ ci_sensor_res2size(info->res, &width, &height);
++
++ /* Marked the current sensor res as being "used" */
++ for (index = 0; index < N_RES; index++) {
++ if ((width == s5k4e1_res[index].width) &&
++ (height == s5k4e1_res[index].height)) {
++ s5k4e1_res[index].used = 1;
++ continue;
++ }
++ s5k4e1_res[index].used = 0;
++ }
++
++ fmt->fmt.pix.width = width;
++ fmt->fmt.pix.height = height;
++ return 0;
++
++}
++
++#if 0
++/* chuanxiao add, to dump regs */
++static int s5k4e1_dump_regs(struct i2c_client *c)
++{
++ /*struct i2c_client *c = v4l2_get_subdevdata(sd);*/
++ const struct DumpRegs *p = regs_d;
++ u32 value;
++ u32 value1, value2, value3, value4;
++ while (p->ulFlags != eTableEnd) {
++ if (p->ulFlags & eFourBytes) {
++ s5k4e1_read(c, (u32)p->ulAddr, &value1);
++ s5k4e1_read(c, (u32)p->ulAddr+1, &value2);
++ s5k4e1_read(c, (u32)p->ulAddr+2, &value3);
++ s5k4e1_read(c, (u32)p->ulAddr+3, &value4);
++ value = value1<<24 | value2<<16 | value3<<8 | value4;
++ } else if (p->ulFlags & eTwoBytes) {
++ s5k4e1_read(c, (u32)p->ulAddr, &value1);
++ s5k4e1_read(c, (u32)p->ulAddr+1, &value2);
++ value = value1<<8 | value2;
++ } else
++ s5k4e1_read(c, (u32)p->ulAddr, &value);
++ /*
++ if (value == p->ulDefaultValue)
++ dprintk(0, "%s\t @ 0x%x = 0x%lx (= default value)\n",
++ p->pszName, p->ulAddr, value);
++ else
++ dprintk(0, "%s\t @ 0x%x = 0x%lx (default was 0x%lx)\n",
++ p->pszName, p->ulAddr, value, p->ulDefaultValue);
++ */
++ dprintk(0, "%-30s @ 0x%04X = 0x%08X", p->pszName,
++ p->ulAddr, value);
++ p++;
++ }
++ return 0;
++}
++#endif
++
++static int s5k4e1_set_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
++{
++ struct i2c_client *c = v4l2_get_subdevdata(sd);
++ struct ci_sensor_config *info = to_sensor_config(sd);
++ int ret = 0;
++ struct s5k4e1_res_struct *res_index;
++ u32 width, height;
++ int index;
++
++ DBG_entering;
++
++ width = fmt->fmt.pix.width;
++ height = fmt->fmt.pix.height;
++
++ dprintk(1, "was told to set fmt (%d x %d) ", width, height);
++ ret = s5k4e1_try_res(&width, &height);
++
++ res_index = s5k4e1_to_res(width, height);
++
++ s5k4e1_wakeup();
++ DBG_line;
++ if (res_index->regs) {
++ /* software sleep/standby */
++ ret += s5k4e1_write(c, 0x0100, (u32)0x00);
++
++ /* Soft reset camera first*/
++ /*TODO: See if we can ignore this*/
++ ret = s5k4e1_write(c, 0x0103, (u32)0xff);
++
++ /* Set registers into default config value */
++ /* ret += s5k4e1_write_array(c, s5k4e1_def_reg);*/
++
++ /* set image resolution */
++ ret += s5k4e1_write_array(c, res_index->regs);
++
++ ret += s5k4e1_set_img_ctrl(c, info);
++
++ /* XXX setup with unknow meaning ... */
++ /* ret += s5k4e1_write(c, 0x30b0, 0xfe); */
++
++ /* Set MIPI interface */
++#ifdef S5K4E1_MIPI
++ ret += s5k4e1_write_array(c, s5k4e1_mipi);
++#endif
++
++ /* streaming */
++ ret = s5k4e1_write(c, 0x0100, (u32)0x01);
++ msleep(1);
++
++ info->res = res_index->res;
++
++ /* Marked current sensor res as being "used" */
++ for (index = 0; index < N_RES; index++) {
++ if ((width == s5k4e1_res[index].width) &&
++ (height == s5k4e1_res[index].height)) {
++ s5k4e1_res[index].used = 1;
++ continue;
++ }
++ s5k4e1_res[index].used = 0;
++ }
++
++ for (index = 0; index < N_RES; index++)
++ dprintk(2, "index = %d, used = %d\n", index,
++ s5k4e1_res[index].used);
++
++ DBG_line;
++ } else {
++ eprintk("no res for (%d x %d)", width, height);
++ }
++
++ DBG_leaving;
++ return ret;
++}
++
++static int s5k4e1_t_gain(struct v4l2_subdev *sd, int value)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++
++ DBG_entering;
++
++ s5k4e1_write(client, 0x0104, 1); /*hold*/
++
++ /* analog gain */
++ s5k4e1_write(client, 0x0204, value >> 8);
++
++ s5k4e1_write(client, 0x0205, value & 0xff);
++
++ s5k4e1_write(client, 0x0104, 0); /*unhold*/
++
++ dprintk(1, "gain %x was writen to 0x0204/5", value);
++
++ DBG_leaving;
++ return 0;
++}
++
++static int s5k4e1_t_exposure(struct v4l2_subdev *sd, int value)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++
++ DBG_entering;
++
++ s5k4e1_write(client, 0x0104, 1); /*hold*/
++
++ /* fine integration time */
++ s5k4e1_write(client, 0x0200, value >> 24);
++
++ s5k4e1_write(client, 0x0201, (value >> 16) & 0xff);
++
++ /* coarse integration time */
++ s5k4e1_write(client, 0x0202, (value & 0xff00) >> 8);
++
++ s5k4e1_write(client, 0x0203, value & 0xff);
++
++ s5k4e1_write(client, 0x0104, 0); /*unhold*/
++
++ dprintk(1, "exposure %x was writen to 0x0200/1/2/3", value);
++
++ DBG_leaving;
++ return 0;
++}
++
++static struct s5k4e1_control {
++ struct v4l2_queryctrl qc;
++ int (*query)(struct v4l2_subdev *sd, __s32 *value);
++ int (*tweak)(struct v4l2_subdev *sd, int value);
++} s5k4e1_controls[] = {
++ {
++ .qc = {
++ .id = V4L2_CID_GAIN,
++ .type = V4L2_CTRL_TYPE_INTEGER,
++ .name = "global gain",
++ .minimum = 0x0,
++ .maximum = 0xFFFF,
++ .step = 0x01,
++ .default_value = 0x00,
++ .flags = 0,
++ },
++ .tweak = s5k4e1_t_gain,
++ },
++ {
++ .qc = {
++ .id = V4L2_CID_EXPOSURE,
++ .type = V4L2_CTRL_TYPE_INTEGER,
++ .name = "exposure",
++ .minimum = 0x0,
++ .maximum = 0xFFFF,
++ .step = 0x01,
++ .default_value = 0x00,
++ .flags = 0,
++ },
++ .tweak = s5k4e1_t_exposure,
++ },
++};
++#define N_CONTROLS (ARRAY_SIZE(s5k4e1_controls))
++
++static struct s5k4e1_control *s5k4e1_find_control(__u32 id)
++{
++ int i;
++
++ DBG_entering;
++ for (i = 0; i < N_CONTROLS; i++)
++ if (s5k4e1_controls[i].qc.id == id)
++ return s5k4e1_controls + i;
++ DBG_leaving;
++ return NULL;
++}
++
++static int s5k4e1_queryctrl(struct v4l2_subdev *sd,
++ struct v4l2_queryctrl *qc)
++{
++ struct s5k4e1_control *ctrl = s5k4e1_find_control(qc->id);
++
++ DBG_entering;
++ if (ctrl == NULL)
++ return -EINVAL;
++ *qc = ctrl->qc;
++
++ DBG_leaving;
++ return 0;
++}
++
++static int s5k4e1_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
++{
++/*
++ struct s5k4e1_control *octrl = s5k4e1_find_control(parm->index);
++ int ret;
++
++ if (octrl == NULL)
++ return -EINVAL;
++ ret = octrl->query(client, &parm->value);
++ if (ret >= 0)
++ return 0;
++*/
++ return 0;
++}
++
++static int s5k4e1_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
++{
++ struct s5k4e1_control *octrl = s5k4e1_find_control(ctrl->id);
++ int ret;
++
++ DBG_entering;
++
++ if (octrl == NULL)
++ return -EINVAL;
++ ret = octrl->tweak(sd, ctrl->value);
++ if (ret >= 0)
++ return 0;
++
++ DBG_leaving;
++ return ret;
++}
++
++static int s5k4e1_s_stream(struct v4l2_subdev *sd, int enable)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++ DBG_entering;
++
++ if (enable) {
++ s5k4e1_write(client, (u32)0x0100, 0x01);
++ /*chuanxiao add, dump s5k4e1 regs*/
++ /* s5k4e1_dump_regs(client); */
++ } else
++ s5k4e1_write(client, (u32)0x0100, 0x00);
++
++ /*msleep(1);*/
++
++ DBG_leaving;
++ return 0;
++}
++
++static int s5k4e1_enum_framesizes(struct v4l2_subdev *sd,
++ struct v4l2_frmsizeenum *fsize)
++{
++ unsigned int index = fsize->index;
++
++ DBG_entering;
++
++ if (index >= N_RES)
++ return -EINVAL;
++
++ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
++ fsize->discrete.width = s5k4e1_res[index].width;
++ fsize->discrete.height = s5k4e1_res[index].height;
++ fsize->reserved[0] = s5k4e1_res[index].used;
++
++ DBG_leaving;
++
++ return 0;
++}
++
++static int s5k4e1_enum_frameintervals(struct v4l2_subdev *sd,
++ struct v4l2_frmivalenum *fival)
++{
++ unsigned int index = fival->index;
++
++ DBG_entering;
++
++ if (index >= N_RES)
++ return -EINVAL;
++
++ fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
++ fival->discrete.numerator = 1;
++ fival->discrete.denominator = s5k4e1_res[index].fps;
++
++ DBG_leaving;
++
++ return 0;
++}
++
++static int s5k4e1_g_chip_ident(struct v4l2_subdev *sd,
++ struct v4l2_dbg_chip_ident *chip)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++
++ DBG_entering;
++
++#define V4L2_IDENT_S5K4E1 8250
++ DBG_leaving;
++
++ return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_S5K4E1, 0);
++}
++
++#ifdef CONFIG_VIDEO_ADV_DEBUG
++static int s5k4e1_g_register(struct v4l2_subdev *sd,
++ struct v4l2_dbg_register *reg)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++ unsigned char val = 0;
++ int ret;
++
++ if (!v4l2_chip_match_i2c_client(client, &reg->match))
++ return -EINVAL;
++ if (!capable(CAP_SYS_ADMIN))
++ return -EPERM;
++ ret = s5k4e1_read(client, reg->reg & 0xffff, &val);
++ reg->val = val;
++ reg->size = 1;
++ return ret;
++}
++
++static int s5k4e1_s_register(struct v4l2_subdev *sd,
++ struct v4l2_dbg_register *reg)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++
++ if (!v4l2_chip_match_i2c_client(client, &reg->match))
++ return -EINVAL;
++ if (!capable(CAP_SYS_ADMIN))
++ return -EPERM;
++ s5k4e1_write(client, reg->reg & 0xffff, reg->val & 0xff);
++ return 0;
++}
++#endif
++
++static const struct v4l2_subdev_video_ops s5k4e1_video_ops = {
++ .try_fmt = s5k4e1_try_fmt,
++ .s_fmt = s5k4e1_set_fmt,
++ .g_fmt = s5k4e1_get_fmt,
++ .s_stream = s5k4e1_s_stream,
++ .enum_framesizes = s5k4e1_enum_framesizes,
++ .enum_frameintervals = s5k4e1_enum_frameintervals,
++};
++
++static const struct v4l2_subdev_core_ops s5k4e1_core_ops = {
++ .g_chip_ident = s5k4e1_g_chip_ident,
++ .queryctrl = s5k4e1_queryctrl,
++ .g_ctrl = s5k4e1_g_ctrl,
++ .s_ctrl = s5k4e1_s_ctrl,
++ .s_gpio = s5k4e1_s_power,
++ /*.g_ext_ctrls = s5k4e1_g_ext_ctrls,*/
++ /*.s_ext_ctrls = s5k4e1_s_ext_ctrls,*/
++#ifdef CONFIG_VIDEO_ADV_DEBUG
++ .g_register = s5k4e1_g_register,
++ .s_register = s5k4e1_s_register,
++#endif
++};
++
++static const struct v4l2_subdev_ops s5k4e1_ops = {
++ .core = &s5k4e1_core_ops,
++ .video = &s5k4e1_video_ops,
++};
++
++/*
++ * Basic i2c stuff
++ */
++/*
++static unsigned short normal_i2c[] = {0x36, I2C_CLIENT_END};
++I2C_CLIENT_INSMOD;
++
++static struct i2c_driver i2c_driver_s5k4e1_sensor;
++*/
++static int s5k4e1_detect(struct i2c_client *client)
++{
++ struct i2c_adapter *adapter = client->adapter;
++ int adap_id = i2c_adapter_id(adapter);
++ u32 value;
++
++ DBG_entering;
++
++ if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) {
++ eprintk("error i2c check func");
++ return -ENODEV;
++ }
++
++ if (adap_id != 1) {
++ eprintk("adap_id != 1");
++ return -ENODEV;
++ }
++
++ if (s5k4e1_wakeup()) {
++ eprintk("sensor wakeup failed");
++ return -EIO;
++ }
++
++ s5k4e1_read(client, 0x0003, &value);
++ dprintk(1, "Read from 0x0003: %x", value);
++ if ((value != 0x09))
++ return -ENODEV;
++
++ s5k4e1_read(client, 0x0000, &value);
++ dprintk(1, "Read from 0x0000: %x", value);
++ if ((value != 0x4e) && (value != 0x10))
++ return -ENODEV;
++
++ s5k4e1_read(client, 0x0001, &value);
++ dprintk(1, "Read from 0x0001: %x", value);
++ if ((value != 0x4e) && (value != 0x10))
++ return -ENODEV;
++
++ /*TODO EVT3 detect*/
++ if (mrst_platform_id() != MRST_PLATFORM_AAVA_SC) {
++ s5k4e1_read(client, 0x0002, &value);
++ dprintk(1, "Read from 0x0002: %x", value);
++ if (value == 0x0010) {
++ dprintk(1, "EVT3 module not supported!");
++ return -ENODEV;
++ }
++ }
++
++ DBG_leaving;
++ return 0;
++}
++
++static int s5k4e1_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ struct ci_sensor_config *info;
++ struct v4l2_subdev *sd;
++ int ret = -1;
++
++ DBG_entering;
++
++ v4l_info(client, "chip found @ 0x%x (%s)\n",
++ client->addr << 1, client->adapter->name);
++
++ /*
++ * Setup sensor configuration structure
++ */
++ info = kzalloc(sizeof(struct ci_sensor_config), GFP_KERNEL);
++ if (!info) {
++ dprintk(0, "fail to malloc for ci_sensor_config");
++ ret = -ENOMEM;
++ goto out;
++ }
++
++ ret = s5k4e1_detect(client);
++ if (ret) {
++ dprintk(0, "error s5k4e1_detect");
++ goto out_free;
++ }
++
++ sd = &info->sd;
++ v4l2_i2c_subdev_init(sd, client, &s5k4e1_ops);
++
++ /*
++ * Initialization S5K4E1
++ * then turn into standby mode
++ */
++ ret = s5k4e1_init(client);
++ if (ret) {
++ dprintk(0, "error calling s5k4e1_init");
++ goto out_free;
++ }
++
++ s5k4e1_standby();
++ dprintk(0, "Init s5k4e1 sensor successfully");
++
++ ret = 0;
++ goto out;
++
++out_free:
++ kfree(info);
++ DBG_leaving;
++out:
++
++ DBG_leaving;
++ return ret;
++}
++
++
++static int s5k4e1_remove(struct i2c_client *client)
++{
++ struct v4l2_subdev *sd = i2c_get_clientdata(client);
++
++ DBG_entering;
++
++ v4l2_device_unregister_subdev(sd);
++ kfree(to_sensor_config(sd));
++
++ DBG_leaving;
++ return 0;
++}
++
++/**
++ * i2c_driver for s5k4e1_sensor
++ */
++static const struct i2c_device_id s5k4e1_id[] = {
++ {"s5k4e1", 0},
++ {}
++};
++
++MODULE_DEVICE_TABLE(i2c, s5k4e1_id);
++
++static struct v4l2_i2c_driver_data v4l2_i2c_data = {
++ .name = "s5k4e1",
++ .probe = s5k4e1_probe,
++ .remove = s5k4e1_remove,
++ /* .suspend = s5k4e1_suspend,
++ * .resume = s5k4e1_resume, */
++ .id_table = s5k4e1_id,
++};
++
++MODULE_AUTHOR("Xiaolin Zhang <xiaolin.zhang@intel.com>");
++MODULE_DESCRIPTION("A low-level driver for Samsung S5K4E1 sensors");
++MODULE_LICENSE("GPL");
+--- /dev/null
++++ b/drivers/staging/mrstci/mrsts5k4e1/mrsts5k4e1.h
+@@ -0,0 +1,676 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#define I2C_S5K4E1 0x6C
++/* Should add to kernel source */
++#define I2C_DRIVERID_S5K4E1 1046
++/* GPIO pin on Moorestown */
++#define GPIO_SCLK_25 44
++#define GPIO_STB_PIN 47
++#define GPIO_STDBY_PIN 49
++#define GPIO_RESET_PIN 50
++
++struct regval_list {
++ u16 reg_num;
++ u8 value;
++};
++
++/*
++ * Default register value
++ * 5Mega Pixel, 2592x1944
++ */
++/* MIPI register are removed by Wen */
++
++/* 2592x1944 */
++static struct regval_list s5k4e1_res_qsxga_plus4[] = {
++ /* Reset for operation */
++ {0x0100, 0x00}, /* stream off */
++ {0x0103, 0x01}, /* software reset */
++
++/*
++ * Analog Setting
++ * This register is for FACTORY ONLY.
++ * If you change it without prior notification,
++ * You are RESPONSIBLE for the FAILURE that will happen in the future.
++ */
++
++/* CDS timing setting ... */
++ {0x3000, 0x04}, /* ct_ld_start (default = 07h) */
++ {0x3001, 0x02}, /* ct_sl_start (default = 05h) */
++ {0x3002, 0x0C}, /* ct_rx_start (default = 21h) */
++ {0x3003, 0x0E}, /* ct_cds_start (default = 23h) */
++ {0x3004, 0x2C}, /* ct_smp_width (default = 60h) */
++ {0x3005, 0x0D}, /* ct_az_width (default = 28h) */
++ {0x3006, 0x39}, /* ct_s1r_width (default = 88h) */
++ {0x3007, 0x02}, /* ct_tx_start (default = 06h) */
++ {0x3008, 0x3C}, /* ct_tx_width 1.5us (default = 7Ch) */
++ {0x3009, 0x3C}, /* ct_stx_width 1.5us (default = 7Ch) */
++ {0x300A, 0x28}, /* ct_dtx_width 1us (default = 3Eh) */
++ {0x300B, 0x15}, /* ct_rmp_rst_start (default = 44h) */
++ {0x300C, 0x15}, /* ct_rmp_sig_start (default = 48h) */
++ {0x300D, 0x02}, /* ct_rmp_lat (default = 02h) */
++ {0x300E, 0xA9}, /* D-Shut en[7], CLP On[5], LD high[4] */
++
++/* CDS option setting ... */
++ {0x3010, 0x00}, /* smp_en[2]=0(00) 1(04) row_id[1:0] = 00 */
++ {0x3011, 0x7A}, /* RST_MX (288), SIG_MX (1024+352) */
++ {0x3012, 0x30}, /* SIG offset1 48 code */
++ {0x3013, 0xA0}, /* RST offset1 160 code */
++ {0x3014, 0x00}, /* SIG offset2 */
++ {0x3015, 0x00}, /* RST offset2 */
++ {0x3016, 0x02}, /* ADC_SAT (510mV) */
++ {0x3017, 0x94}, /* RMP_INIT[3:0](RMP_REG) 1.8V MS[6:4]=1 */
++ {0x3018, 0x78}, /* rmp option - ramp connect[MSB] +RMP INIT DAC MIN */
++ {0x301D, 0xD4}, /* CLP level (default = 0Fh) */
++
++ {0x3021, 0x02}, /* inrush ctrl[1] off */
++ {0x3022, 0x44}, /* pump ring oscillator set [7:4]=CP, [3:0]=NCP */
++ {0x3024, 0x40}, /* pix voltage 2.8V (default = 88h) */
++ {0x3027, 0x08}, /* ntg voltage (default = 04h) */
++
++/* Pixel option setting ... */
++ {0x301C, 0x05}, /* Pixel Bias [3:0] (default = 03h) */
++ {0x30D8, 0x3F}, /* All tx off 2f, on 3f */
++
++/* ADLC setting ... */
++ {0x3070, 0x5F}, /* [6]L-ADLC BPR, [4]ch sel, [3]L-ADLC, [2]F-ADLC */
++ {0x3071, 0x00}, /* F&L-adlc max 127 (default = 11h, max 255) */
++ {0x3080, 0x04}, /* F-ADLC filter A (default = 10h) */
++ {0x3081, 0x38}, /* F-ADLC filter B (default = 20h) */
++
++/* Integration setting ... */
++ {0x0202, 0x03}, /* coarse integration time */
++ {0x0203, 0xCF},
++ {0x0204, 0x00}, /* analog gain[msb] 0100 x8 0080 x4 */
++ {0x0205, 0x80}, /* analog gain[lsb] 0040 x2 0020 x1 */
++
++/* Frame Length */
++ {0x0340, 0x07}, /* Capture 07B4(1960[# of row]+12[V-blank]) */
++ {0x0341, 0xA4}, /* Preview 03E0(980[# of row]+12[V-blank]) */
++
++/* Line Length */
++ {0x0342, 0x0A}, /* 2738 */
++ {0x0343, 0xB2}, /* (Same as sensor default) */
++
++/* embedded 2-line OFF setting ... */
++/* 2608 x 1960 */
++ {0x3084, 0x15}, /* SYNC Mode */
++
++/* (3) MIPI 2-lane Serial(TST = 0000b or TST = 0010b), 30 fps */
++
++ {0x30A9, 0x01},
++ {0x0387, 0x01},
++
++ {0x30BD, 0x00}, /* SEL_CCP[0] */
++ {0x30B2, 0x08}, /* PLL P = 8 */
++ {0x30B3, 0x00}, /* PLL M[8] = 0 */
++ {0x30B5, 0x01}, /* PLL S = 0 */
++ {0x30BE, 0x1A}, /* M_PCLKDIV_AUTO[4], M_DIV_PCLK[3:0] */
++
++ {0x30BF, 0xAB},
++ {0x30C0, 0x00}, /* video_offset[7:4] 3240%12 */
++ {0x30C1, 0x01}, /* pack video enable [0] */
++ {0x30C8, 0x0C}, /* video_data_length 3260 = 2608 * 1.25 */
++ {0x30C9, 0xA8},
++ {0x30E2, 0x02}, /* num lanes[1:0] = 2 */
++ {0x30EE, 0x02}, /* DPHY enable [1] */
++ {0x30F1, 0x70}, /* DPHY BANDCTRL 800MHz=80.6MHz */
++ {0x3111, 0x86}, /* Embedded data off [5] */
++
++ {0x034C, 0x0A},
++ {0x034D, 0x20},
++ {0x044E, 0x07},
++ {0x034F, 0x98},
++
++ {0x0344, 0x00},
++ {0x0345, 0x08},
++ {0x0346, 0x00},
++ {0x0347, 0x08},
++ {0x0348, 0x0A},
++ {0x0349, 0x27},
++ {0x034A, 0x07},
++ {0x034B, 0x9F},
++
++ /* This is to set FRAME_NUM > 0 */
++ {0x30d9, 0x00},
++
++ /* Add this setting according to Bill's test */
++ {0x0305, 0x05},
++ {0x0306, 0x00},
++ {0x0307, 0x3c},
++ {0x30b5, 0x02},
++
++ {0x020E, 0x01}, /* Gr Digital Gain */
++ {0x020F, 0x00},
++ {0x0210, 0x01}, /* Red Digital Gain */
++ {0x0211, 0x00},
++ {0x0212, 0x01}, /* Blue Digital Gain */
++ {0x0213, 0x00},
++ {0x0214, 0x01}, /* Gb Digital Gain */
++ {0x0215, 0x00},
++ {0x0204, 0x00},
++ {0x0205, 0x80},
++
++#if 1
++ /*Apply Bill's setting*/
++ {0x30E2, 0x02},
++ {0x0305, 0x05},
++ {0x0306, 0x00},
++ /*Don't apply Bill's PLL setting
++ {0x0307, 0x50},
++ {0x30B5, 0x01},
++ */
++ {0x30B4, 0x50},
++
++ {0x30B2, 0x05},
++
++ {0x30BE, 0x1A}, /* DIV_M_PCLK = 5 */
++
++ {0x0100, 0x01}, /* stream on */
++ {0xffff, 0xff},
++#endif
++};
++
++/* 1920x1080 */
++static struct regval_list s5k4e1_res_1080p[] = {
++/* Reset for operation ... */
++ {0x0100, 0x00}, /* stream off */
++ {0x0103, 0x01}, /* software reset */
++
++/*
++ * Analog Setting
++ * This register is for FACTORY ONLY.
++ * If you change it without prior notification,
++ * You are RESPONSIBLE for the FAILURE that will happen in the future.
++ */
++
++/* CDS timing setting ... */
++ {0x3000, 0x04}, /* ct_ld_start (default = 07h) */
++ {0x3001, 0x02}, /* ct_sl_start (default = 05h) */
++ {0x3002, 0x0C}, /* ct_rx_start (default = 21h) */
++ {0x3003, 0x0E}, /* ct_cds_start (default = 23h) */
++ {0x3004, 0x2C}, /* ct_smp_width (default = 60h) */
++ {0x3005, 0x0D}, /* ct_az_width (default = 28h) */
++ {0x3006, 0x39}, /* ct_s1r_width (default = 88h) */
++ {0x3007, 0x02}, /* ct_tx_start (default = 06h) */
++ {0x3008, 0x3C}, /* ct_tx_width 1.5us (default = 7Ch) */
++ {0x300A, 0x28}, /* ct_dtx_width 1us (default = 3Eh) */
++ {0x300B, 0x15}, /* ct_rmp_rst_start (default = 44h) */
++ {0x300C, 0x15}, /* ct_rmp_sig_start (default = 48h) */
++ {0x300D, 0x02}, /* ct_rmp_lat (default = 02h) */
++ {0x300E, 0xA9}, /* D-Shut en[7], CLP On[5], LD high[4] */
++
++/* CDS option setting ... */
++ {0x3010, 0x00}, /* smp_en[2]=0(00) 1(04) row_id[1:0] = 00 */
++ {0x3011, 0x7A}, /* RST_MX (288), SIG_MX (1024+352) */
++ {0x3012, 0x30}, /* SIG offset1 48 code */
++ {0x3013, 0xA0}, /* RST offset1 160 code */
++ {0x3014, 0x00}, /* SIG offset2 */
++ {0x3015, 0x00}, /* RST offset2 */
++ {0x3016, 0x0A}, /* ADC_SAT (510mV) */
++ {0x3017, 0x94}, /* RMP_INIT[3:0](RMP_REG) 1.8V MS[6:4]=1 */
++ {0x3018, 0x78}, /* rmp option - ramp connect[MSB] +RMP INIT DAC MIN */
++
++ {0x301D, 0xD4}, /* CLP level (default = 0Fh) */
++
++ {0x3021, 0x02}, /* inrush ctrl[1] off */
++ {0x3022, 0x41}, /* pump ring oscillator set [7:4]=CP, [3:0]=NCP */
++ {0x3024, 0x08}, /* pix voltage 2.8V (default = 88h) */
++ {0x3027, 0x08}, /* ntg voltage (default = 04h) */
++
++/* Pixel option setting ... */
++ {0x301C, 0x05}, /* Pixel Bias [3:0] (default = 03h) */
++ {0x30D8, 0x3F}, /* All tx off 2f, on 3f */
++
++/* ADLC setting ... */
++ {0x3070, 0x5F}, /* [6]L-ADLC BPR, [4]ch sel, [3]L-ADLC, [2]F-ADLC */
++ {0x3071, 0x00}, /* F&L-adlc max 127 (default = 11h, max 255) */
++ {0x3080, 0x04}, /* F-ADLC filter A (default = 10h) */
++ {0x3081, 0x38}, /* F-ADLC filter B (default = 20h) */
++
++/* Integration setting ... */
++ {0x0202, 0x03}, /* coarse integration time */
++ {0x0203, 0xCD},
++ {0x0204, 0x00}, /* analog gain[msb] 0100 x8 0080 x4 */
++ {0x0205, 0x80}, /* analog gain[lsb] 0040 x2 0020 x1 */
++
++/* Frame Length */
++ {0x0340, 0x04}, /*Capture 07B4(1960[# of row]+12[V-blank]) */
++ {0x0341, 0x44}, /*Preview 03E0(980[# of row]+12[V-blank]) */
++
++/* Line Length */
++ {0x0342, 0x0A}, /* 2738 */
++ {0x0343, 0xB2}, /*(Same as sensor default) */
++
++/* embedded 2-line OFF setting ... */
++/* 1920 x 1080 */
++ {0x3084, 0x15}, /* SYNC Mode */
++
++/* PLL & MIPI setting ... */
++/* input clock 25MHz */
++
++/* (3) MIPI 2-lane Serial(TST = 0000b or TST = 0010b), 30 fps */
++ {0x30BD, 0x00}, /* SEL_CCP[0] */
++ {0x30B2, 0x08}, /* PLL P = 8 */
++ {0x30B3, 0x00}, /* PLL M[8] = 0 */
++ {0x30B4, 0x78}, /* PLL M = 129 */
++ {0x30B5, 0x00}, /* PLL S = 0 */
++ {0x30BE, 0x1A}, /* M_PCLKDIV_AUTO[4], M_DIV_PCLK[3:0] */
++
++ {0x30BF, 0xAB},
++ {0x30C0, 0x00}, /* video_offset[7:4] 2400%12 */
++ {0x30C1, 0x01}, /* pack video enable [0] */
++ {0x30C8, 0x09}, /* video_data_length 2400 = 1920 * 1.25 */
++ {0x30C9, 0x60},
++ {0x30E2, 0x02}, /* num lanes[1:0] = 2 */
++ {0x30EE, 0x02}, /* DPHY enable [1] */
++ {0x30F1, 0x70}, /* DPHY BANDCTRL 800MHz=80.6MHz */
++ {0x3111, 0x86}, /* Embedded data off [5] */
++
++ {0x30b4, 0x20},
++ {0x30b5, 0x01},
++
++ {0x30A9, 0x01},
++ {0x0387, 0x01},
++ {0x0344, 0x01}, /*x_addr_start 344 */
++ {0x0345, 0x58},
++ {0x0348, 0x08}, /*x_addr_end 2263 */
++ {0x0349, 0xD7},
++ {0x0346, 0x01}, /*y_addr_start 440 */
++ {0x0347, 0xB8},
++ {0x034A, 0x05}, /*y_addr_end 1519 */
++ {0x034B, 0xEF},
++
++ {0x034C, 0x07}, /*x_output_size 1920 */
++ {0x034D, 0x80},
++ {0x034E, 0x04}, /*y_output_size 1080 */
++ {0x034F, 0x38},
++
++ {0x30d9, 0x00},
++
++ {0x020E, 0x01}, /*Gr Digital Gain */
++ {0x020F, 0x00},
++ {0x0210, 0x01}, /*Red Digital Gain */
++ {0x0211, 0x00},
++ {0x0212, 0x01}, /*Blue Digital Gain */
++ {0x0213, 0x00},
++ {0x0214, 0x01}, /*Gb Digital Gain */
++ {0x0215, 0x00},
++ {0x0204, 0x00},
++ {0x0205, 0x80},
++
++
++ /*Apply Bill's setting*/
++ {0x30E2, 0x02},
++ {0x0305, 0x05},
++ {0x0306, 0x00},
++ /*Don't apply Bill's PLL setting
++ {0x0307, 0x50},
++ {0x30B5, 0x01},
++ */
++ {0x0307, 0x3c},
++ {0x30b5, 0x02},
++ {0x30B4, 0x50},
++
++ {0x30B2, 0x05},
++
++ {0x30BE, 0x1A}, /*DIV_M_PCLK = 5 */
++
++ {0x0383, 0x01},
++
++ {0x0100, 0x01}, /* stream on */
++ {0xffff, 0xff},
++
++};
++
++/* 1280x720, V1F2 & H1F2 */
++static struct regval_list s5k4e1_res_720p[] = {
++ {0x0100, 0x00}, /* stream off */
++ {0x0103, 0x01}, /* software reset */
++
++/* CDS timing setting ... */
++ {0x3000, 0x04},
++ {0x3001, 0x02},
++ {0x3002, 0x0C},
++ {0x3003, 0x0E},
++ {0x3004, 0x2C},
++ {0x3005, 0x0D},
++ {0x3006, 0x39},
++ {0x3007, 0x02},
++ {0x3008, 0x3C},
++ {0x3009, 0x3C},
++ {0x300A, 0x28},
++ {0x300B, 0x15},
++ {0x300C, 0x15},
++ {0x300D, 0x02},
++ {0x300E, 0xAB},
++
++/* CDS option setting ... */
++ {0x3010, 0x00},
++ {0x3011, 0x7A},
++ {0x3012, 0x30},
++ {0x3013, 0x90},
++ {0x3014, 0x00},
++ {0x3015, 0x00},
++ {0x3016, 0x0A},
++ {0x3017, 0x84},
++ {0x3018, 0x78},
++ {0x301D, 0xD4},
++
++ {0x3021, 0x02},
++ {0x3022, 0x41},
++ {0x3024, 0x08},
++ {0x3027, 0x08},
++
++/* Pixel option setting ... */
++ {0x301C, 0x05}, /* Pixel Bias [3:0] (default = 03h) */
++ {0x30D8, 0x3F}, /* All tx off 2f, on 3f */
++
++/* ADLC setting ... */
++ {0x3070, 0x5F},
++ {0x3071, 0x00},
++ {0x3080, 0x04},
++ {0x3081, 0x38},
++
++/* Integration setting ... */
++ {0x0202, 0x03},
++ {0x0203, 0xD8},
++ {0x0204, 0x00},
++ {0x0205, 0x80},
++
++/*Frame Length*/
++ {0x0340, 0x02},
++ {0x0341, 0xDC},
++
++/* Line Length */
++ {0x0342, 0x0A}, /*2738 */
++ {0x0343, 0xB2},
++
++/* Average Sub-sampling */
++ {0x0387, 0x03},
++ {0x30a9, 0x02},
++
++/* embedded 2-line OFF setting ... */
++/* 1280 x 720 */
++ {0x3084, 0x15},
++
++/* PLL & MIPI setting ... */
++
++/* (3) MIPI 2-lane Serial(TST = 0000b or TST = 0010b), 60 fps */
++ {0x30BD, 0x00},
++ {0x30B2, 0x08},
++ {0x30B3, 0x00},
++ {0x30B4, 0x78},
++ {0x30B5, 0x00},
++ {0x30BE, 0x1A},
++
++ {0x30BF, 0xAB},
++ {0x30C0, 0x40},
++ {0x30C1, 0x01},
++ {0x30C8, 0x06},
++ {0x30C9, 0x40},
++
++ {0x30E2, 0x02},
++
++ {0x30b4, 0x20},
++ {0x30b5, 0x01},
++
++ {0x30EE, 0x02},
++ {0x30F1, 0x70},
++ {0x3111, 0x86},
++
++/* MIPI Size Setting ... */
++/* 1304 x 980 */
++ {0x0344, 0x00},
++ {0x0345, 0x18},
++ {0x0348, 0x0A},
++ {0x0349, 0x17},
++ {0x0346, 0x01},
++ {0x0347, 0x04},
++ {0x034A, 0x06},
++ {0x034B, 0xA3},
++
++ {0x0380, 0x00},
++ {0x0381, 0x01},
++ {0x0382, 0x00},
++ {0x0383, 0x01},
++ {0x0384, 0x00},
++ {0x0385, 0x01},
++ {0x0386, 0x00},
++ {0x0387, 0x03},
++
++ {0x034C, 0x05}, /* x_output_size = 1280 */
++ {0x034D, 0x00},
++ {0x034E, 0x02}, /* y_output_size = 720 */
++ {0x034F, 0xD0},
++
++ {0x30d9, 0x00},
++
++ {0x020E, 0x01},
++ {0x020F, 0x00},
++ {0x0210, 0x01},
++ {0x0211, 0x00},
++ {0x0212, 0x01},
++ {0x0213, 0x00},
++ {0x0214, 0x01},
++ {0x0215, 0x00},
++ {0x0204, 0x01},
++ {0x0205, 0x00},
++
++ /*Apply Bill's setting*/
++ {0x30E2, 0x02},
++ {0x0305, 0x05},
++ {0x0306, 0x00},
++ /*Don't apply Bill's PLL setting
++ {0x0307, 0x50},
++ {0x30B5, 0x01},
++ */
++ {0x0307, 0x3c},
++ {0x30b5, 0x02},
++ {0x30B4, 0x50},
++
++ {0x30B2, 0x05},
++
++ {0x30BE, 0x15}, /*DIV_M_PCLK = 5 */
++
++ {0x0100, 0x01}, /* stream on */
++ {0xffff, 0xff},
++};
++
++/*VGA*/
++static struct regval_list s5k4e1_res_vga_ac04_bill[] = {
++ {0x0100, 0x00}, /* stream off */
++ {0x0103, 0x01}, /* software reset */
++
++ {0x3000, 0x04},
++ {0x3001, 0x02},
++ {0x3002, 0x0C},
++ {0x3003, 0x0E},
++ {0x3004, 0x2C},
++ {0x3005, 0x0D},
++ {0x3006, 0x39},
++ {0x3007, 0x02},
++ {0x3008, 0x3C},
++ {0x3009, 0x3C},
++ {0x300A, 0x28},
++ {0x300B, 0x15},
++ {0x300C, 0x15},
++ {0x300D, 0x02},
++ {0x300E, 0xA8},
++
++ {0x3010, 0x00},
++ {0x3011, 0x7A},
++ {0x3012, 0x30},
++ {0x3013, 0xA0},
++ {0x3014, 0x00},
++ {0x3015, 0x00},
++ {0x3016, 0x0A},
++ {0x3017, 0x94},
++ {0x3018, 0x78},
++
++ {0x301D, 0xD4},
++
++ {0x3021, 0x02},
++ {0x3022, 0x41},
++ {0x3024, 0x08},
++ {0x3027, 0x08},
++
++ {0x301C, 0x05},
++ {0x30D8, 0x3F},
++
++ {0x3070, 0x5F},
++ {0x3071, 0x00},
++ {0x3080, 0x04},
++ {0x3081, 0x38},
++
++ {0x0202, 0x03},
++ {0x0203, 0xD4},
++ {0x0204, 0x00},
++ {0x0205, 0x20},
++
++ {0x0340, 0x03},
++ {0x0341, 0xE0},
++
++ {0x0342, 0x0A},
++ {0x0343, 0xB2},
++
++ {0x0344, 0x00},
++ {0x0345, 0x18},
++ {0x0348, 0x0A},
++ {0x0349, 0x17},
++ {0x0346, 0x00},
++ {0x0347, 0x14},
++ {0x034A, 0x07},
++ {0x034B, 0x93},
++
++ {0x034C, 0x02},
++ {0x034D, 0x80},
++ {0x034E, 0x01},
++ {0x034F, 0xE0},
++
++ {0x0380, 0x00},
++ {0x0381, 0x01},
++ {0x0382, 0x00},
++ {0x0383, 0x07},
++ {0x0384, 0x00},
++ {0x0385, 0x01},
++ {0x0386, 0x00},
++ {0x0387, 0x07},
++
++ {0x3084, 0x15},
++
++ {0x30BD, 0x00},
++
++
++ {0x30b3, 0x00},
++ {0x30b4, 0x57},
++ {0x30b5, 0x01},
++ {0x30f1, 0x70},
++
++ {0x30BE, 0x1A},
++
++ {0x30BF, 0xAB},
++ {0x30C0, 0x80},
++ {0x30C1, 0x01},
++ {0x30C8, 0x03},
++ {0x30C9, 0x20},
++
++ {0x30b2, 0x06},
++ {0x30E2, 0x02},
++
++ {0x30EE, 0x02},
++
++ {0x3111, 0x86},
++
++ {0x30d9, 0x00},
++
++ {0x020E, 0x01},
++ {0x020F, 0x00},
++ {0x0210, 0x01},
++ {0x0211, 0x00},
++ {0x0212, 0x01},
++ {0x0213, 0x00},
++ {0x0214, 0x01},
++ {0x0215, 0x00},
++ {0x0204, 0x01},
++ {0x0205, 0x00},
++
++#if 1
++ /* Apply Bill's setting */
++ {0x30E2, 0x02},
++ {0x0305, 0x05},
++ {0x0306, 0x00},
++ /*Don't apply Bill's PLL setting
++ {0x0307, 0x50},
++ {0x30B5, 0x01},
++ */
++ {0x0307, 0x3c},
++ {0x30b5, 0x02},
++ {0x30B4, 0x50},
++
++ {0x30B2, 0x05},
++
++ {0x30BE, 0x15},
++
++ /* {0x0100, 0x01}, */
++ /* {0xffff, 0xff}, */
++#endif
++
++#if 1
++ /* 1304x980 */
++ {0x3013, 0x90},
++ {0x3017, 0x84},
++ {0x30A9, 0x02},
++ {0x300E, 0xAB},
++
++ {0x0387, 0x03},
++ {0x0344, 0x00}, /* x_addr_start = 0 */
++ {0x0345, 0x00},
++ {0x0348, 0x0A}, /* x_addr_end = 2607 */
++ {0x0349, 0x2F},
++ {0x0346, 0x00}, /* y_addr_start = 0 */
++ {0x0347, 0x00},
++ {0x034A, 0x07}, /* y_addr_end = 1959 */
++ {0x034B, 0xA7},
++ {0x0380, 0x00},
++ {0x0381, 0x01},
++ {0x0382, 0x00},
++ {0x0383, 0x01},
++ {0x0384, 0x00},
++ {0x0385, 0x01},
++ {0x0386, 0x00},
++ {0x0387, 0x03},
++ {0x034c, 0x05}, /* x_output_size = 1304 */
++ {0x034d, 0x18},
++ {0x034e, 0x03}, /* y_output_size = 980 */
++ {0x034f, 0xd4},
++ {0x30BF, 0xAB},
++ {0x30c0, 0xa0},
++ {0x30C8, 0x06}, /* x_output_size * 1.25 */
++ {0x30c9, 0x5e},
++
++ {0x0100, 0x01},
++ {0xffff, 0xff},
++
++#endif
++};
+--- /dev/null
++++ b/drivers/staging/mrstci/mrsts5k4e1_motor/Kconfig
+@@ -0,0 +1,9 @@
++config VIDEO_MRST_S5K4E1_MOTOR
++ tristate "Moorestown s5k4e1 motor"
++ depends on I2C && VIDEO_MRST_ISP && VIDEO_MRST_S5K4E1
++
++ ---help---
++ Say Y here if your platform support s5k4e1 motor.
++
++ To compile this driver as a module, choose M here: the
++ module will be called mrstov2650.ko.
+--- /dev/null
++++ b/drivers/staging/mrstci/mrsts5k4e1_motor/Makefile
+@@ -0,0 +1,3 @@
++obj-$(CONFIG_VIDEO_MRST_S5K4E1_MOTOR) += mrsts5k4e1_motor.o
++
++EXTRA_CFLAGS += -I$(src)/../include
+--- /dev/null
++++ b/drivers/staging/mrstci/mrsts5k4e1_motor/mrsts5k4e1_motor.c
+@@ -0,0 +1,430 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/string.h>
++#include <linux/errno.h>
++#include <linux/init.h>
++#include <linux/kmod.h>
++#include <linux/device.h>
++#include <linux/delay.h>
++#include <linux/fs.h>
++#include <linux/slab.h>
++#include <linux/delay.h>
++#include <linux/i2c.h>
++#include <linux/gpio.h>
++
++#include <media/v4l2-device.h>
++#include <media/v4l2-chip-ident.h>
++#include <media/v4l2-i2c-drv.h>
++
++#include "mrsts5k4e1_motor.h"
++
++static int s5k4e1_motor_debug;
++module_param(s5k4e1_motor_debug, int, 0644);
++MODULE_PARM_DESC(s5k4e1_motor_debug, "Debug level (0-1)");
++
++#define dprintk(level, fmt, arg...) \
++ do { \
++ if (s5k4e1_motor_debug >= level) \
++ printk(KERN_DEBUG "mrstisp@%s: " fmt "\n", __func__, ## arg); \
++ } while (0)
++
++#define eprintk(fmt, arg...) \
++ printk(KERN_ERR "mrstisp@%s: line %d: " fmt "\n", \
++ __func__, __LINE__, ## arg);
++
++#define DBG_entering dprintk(1, "entering");
++#define DBG_leaving dprintk(1, "leaving");
++#define DBG_line dprintk(1, " line: %d", __LINE__);
++
++static inline struct s5k4e1_motor *to_motor_config(struct v4l2_subdev *sd)
++{
++ return container_of(sd, struct s5k4e1_motor, sd);
++}
++
++/*static struct s5k4e1_motor *config; */
++static int motor_read(struct i2c_client *c, u32 *reg)
++{
++ int ret;
++ struct i2c_msg msg;
++ u8 msgbuf[3];
++
++ msgbuf[0] = 0;
++ msgbuf[1] = 0;
++ msgbuf[2] = 0;
++
++ memset(&msg, 0, sizeof(msg));
++ msg.addr = c->addr;
++ msg.buf = msgbuf;
++ msg.len = 3;
++ msg.flags = I2C_M_RD;
++
++ ret = i2c_transfer(c->adapter, &msg, 1);
++
++ *reg = (msgbuf[0] << 16 | msgbuf[1] << 8 | msgbuf[2]);
++
++ ret = (ret == 1) ? 0 : -1;
++ return ret;
++}
++
++static int motor_write(struct i2c_client *c, u32 reg)
++{
++ int ret;
++ struct i2c_msg msg;
++ u8 msgbuf[3];
++
++ memset(&msg, 0, sizeof(msg));
++ msgbuf[0] = (reg & 0x00FFFFFFFF) >> 16;
++ msgbuf[1] = (reg & 0x0000FFFF) >> 8 ;
++ msgbuf[2] = reg;
++
++ msg.addr = c->addr;
++ msg.flags = 0;
++ msg.buf = msgbuf;
++ msg.len = 3;
++
++ ret = i2c_transfer(c->adapter, &msg, 1);
++
++ ret = (ret == 1) ? 0 : -1;
++ return ret;
++}
++
++static int s5k4e1_motor_goto_position(struct i2c_client *c,
++ unsigned short code,
++ struct s5k4e1_motor *config,
++ unsigned int step)
++{
++ int max_code, min_code;
++ int timeout = 25; /*TODO: check the timeout time */
++ u8 cmdh, cmdl, finished;
++ u32 cmd = 0, val = 0;
++
++ max_code = config->macro_code;
++ min_code = config->infin_code;
++
++ if (code > max_code)
++ code = max_code;
++ if (code < min_code)
++ code = min_code;
++
++ cmdh = MOTOR_DAC_CTRL_MODE_1 | (code >> 8); /* PS EN x x M W TD9 TD8*/
++ cmdl = code; /* TD7 ~ TD0 */
++ cmd |= (cmdh << 16) | (cmdl << 8);
++
++ dprintk(1, "cmdh: %x, cmdl: %x, cmd: %x", cmdh, cmdl, cmd);
++ dprintk(1, "DAC code: %x", code);
++
++ motor_write(c, cmd);
++ finished = 0;
++ while ((!finished) && timeout--) {
++ msleep(1);
++ motor_read(c, &val);
++ cmdh = val >> 16;
++ cmdl = val >> 8;
++
++ dprintk(1, "cmdh & MOTOR_F = %x", cmdh & MOTOR_F);
++ finished = cmdh & MOTOR_F;
++ finished = (finished) ? 0 : 1;
++ };
++
++ if (finished) {
++ dprintk(1, "Moving from code %x to code %x takes %d ms.",
++ config->cur_code, code, 25-timeout);
++ return 0;
++ } else {
++ eprintk("Unable to move motor to step %d, TIMEOUT!!", step);
++ return -1;
++ }
++
++}
++
++int s5k4e1_motor_wakeup(struct i2c_client *client)
++{
++ /* hardware wakeup: set PS = 1 */
++ return motor_write(client, 0xC00000);
++}
++
++int s5k4e1_motor_standby(struct i2c_client *client)
++{
++ /* hardware standby: set PS = 0 */
++ return motor_write(client, 0x400000);
++}
++
++int s5k4e1_motor_init(struct i2c_client *client, struct s5k4e1_motor *config)
++{
++
++ int ret;
++ int infin_cur, macro_cur;
++ int step_res, step_time;
++ int val;
++
++ DBG_entering;
++ infin_cur = MAX(MOTOR_INFIN_CUR, MOTOR_DAC_MIN_CUR);
++ macro_cur = MIN(MOTOR_MACRO_CUR, MOTOR_DAC_MAX_CUR);
++ step_res = 1 << MOTOR_STEP_SHIFT;
++ step_time = MOTOR_STEP_TIME;
++
++ /*config->motor = client;*/
++ config->infin_cur = infin_cur;
++ config->macro_cur = macro_cur;
++
++ config->infin_code = MOTOR_INFIN_CODE;
++ config->macro_code = MOTOR_MACRO_CODE;
++
++ config->max_step = ((config->macro_code - config->infin_code)
++ >> MOTOR_STEP_SHIFT) + 1;
++ config->step_res = step_res;
++ config->step_time = step_time;
++
++ dprintk(1, "max_step: %d, step_res: %d, step_time: %d",
++ config->max_step, step_res, step_time);
++
++ /* Set motor step time and resolution */
++ val = (MOTOR_DAC_CTRL_MODE_0 << 16) | (step_res << 8) | step_time;
++ ret = motor_write(client, val);
++
++ /* Note here, maybe macro_code */
++ ret |= s5k4e1_motor_goto_position(client, config->infin_code,
++ config, 0);
++ if (!ret) {
++ config->cur_code = config->infin_code;
++ dprintk(1, "Motor initialization success!");
++ } else
++ eprintk("Error while initializing motor!!!");
++
++ return ret;
++}
++
++int s5k4e1_motor_set_focus(struct i2c_client *c,
++ unsigned int step,
++ struct s5k4e1_motor *config)
++{
++ int s_code, ret;
++ int max_step = config->max_step;
++ unsigned int val = step;
++
++ if (val > max_step)
++ val = max_step;
++
++ s_code = (val << MOTOR_STEP_SHIFT);
++ s_code += config->infin_code;
++
++ ret = s5k4e1_motor_goto_position(c, s_code, config, step);
++ if (!ret)
++ config->cur_code = s_code;
++
++ return ret;
++}
++
++static int s5k4e1_motor_g_ctrl(struct v4l2_subdev *sd,
++ struct v4l2_control *ctrl)
++{
++ struct i2c_client *c = v4l2_get_subdevdata(sd);
++ struct s5k4e1_motor *config = to_motor_config(sd);
++ int ret;
++
++ DBG_entering;
++ ret = s5k4e1_motor_get_focus(c, &ctrl->value, config);
++ if (ret) {
++ eprintk("error call s5k4e1_motor_get_focue");
++ return ret;
++ }
++ DBG_leaving;
++ return 0;
++}
++
++static int s5k4e1_motor_s_ctrl(struct v4l2_subdev *sd,
++ struct v4l2_control *ctrl)
++{
++ struct i2c_client *c = v4l2_get_subdevdata(sd);
++ struct s5k4e1_motor *config = to_motor_config(sd);
++ int ret;
++
++ DBG_entering;
++ ret = s5k4e1_motor_set_focus(c, ctrl->value, config);
++ if (ret) {
++ eprintk("error call s5k4e1_motor_set_focue");
++ return ret;
++ }
++ DBG_leaving;
++ return 0;
++}
++
++int s5k4e1_motor_get_focus(struct i2c_client *c,
++ unsigned int *step,
++ struct s5k4e1_motor *config)
++{
++ int ret_step;
++
++ ret_step = ((config->cur_code - config->infin_code)
++ >> MOTOR_STEP_SHIFT);
++
++ if (ret_step <= config->max_step)
++ *step = ret_step;
++ else
++ *step = config->max_step;
++
++ return 0;
++}
++
++int s5k4e1_motor_max_step(struct i2c_client *c,
++ unsigned int *max_code,
++ struct s5k4e1_motor *config)
++{
++ if (config->max_step != 0)
++ *max_code = config->max_step;
++ return 0;
++
++}
++
++static int s5k4e1_motor_queryctrl(struct v4l2_subdev *sd,
++ struct v4l2_queryctrl *qc)
++{
++ struct s5k4e1_motor *config = to_motor_config(sd);
++
++ DBG_entering;
++ dprintk(1, "got focus range of %d", config->max_step);
++ if (config->max_step != 0)
++ qc->maximum = config->max_step;
++ DBG_leaving;
++ return 0;
++}
++
++static const struct v4l2_subdev_core_ops s5k4e1_motor_core_ops = {
++ .g_ctrl = s5k4e1_motor_g_ctrl,
++ .s_ctrl = s5k4e1_motor_s_ctrl,
++ .queryctrl = s5k4e1_motor_queryctrl,
++};
++
++static const struct v4l2_subdev_ops s5k4e1_motor_ops = {
++ .core = &s5k4e1_motor_core_ops,
++};
++
++static int s5k4e1_motor_detect(struct i2c_client *client)
++{
++ struct i2c_adapter *adapter = client->adapter;
++ int adap_id = i2c_adapter_id(adapter);
++
++ if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) {
++ eprintk("error i2c check func");
++ return -ENODEV;
++ }
++
++ if (adap_id != 1) {
++ eprintk("adap_id != 1");
++ return -ENODEV;
++ }
++
++ if (s5k4e1_motor_wakeup(client))
++ eprintk("unable to wakeup s5k4e1 motor.");
++
++ return 0;
++}
++
++static int s5k4e1_motor_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ struct s5k4e1_motor *info;
++ struct v4l2_subdev *sd;
++ int ret = -1;
++/* struct i2c_client *motor; */
++
++ DBG_entering;
++ v4l_info(client, "chip found @ 0x%x (%s)\n",
++ client->addr << 1, client->adapter->name);
++ /*
++ * Setup sensor configuration structure
++ */
++ info = kzalloc(sizeof(struct s5k4e1_motor), GFP_KERNEL);
++ if (!info) {
++ eprintk("fail to malloc for ci_motor");
++ ret = -ENOMEM;
++ goto out;
++ }
++
++ ret = s5k4e1_motor_detect(client);
++ if (ret) {
++ eprintk("error s5k4e1_motor_detect");
++ goto out_free;
++ }
++
++ sd = &info->sd;
++ v4l2_i2c_subdev_init(sd, client, &s5k4e1_motor_ops);
++
++ /*
++ * Initialization S5K4E1
++ * then turn into standby mode
++ */
++ ret = s5k4e1_motor_init(client, info);
++ if (ret) {
++ eprintk("error calling s5k4e1_motor_init");
++ goto out_free;
++ }
++
++ ret = 0;
++ goto out;
++
++out_free:
++ kfree(info);
++ DBG_leaving;
++out:
++ return ret;
++}
++
++/*
++ * XXX: Need to be checked
++ */
++static int s5k4e1_motor_remove(struct i2c_client *client)
++{
++ struct v4l2_subdev *sd = i2c_get_clientdata(client);
++
++ DBG_entering;
++
++ v4l2_device_unregister_subdev(sd);
++ kfree(to_motor_config(sd));
++
++ DBG_leaving;
++ return 0;
++}
++
++static const struct i2c_device_id s5k4e1_motor_id[] = {
++ {"s5k4e1_motor", 0},
++ {}
++};
++MODULE_DEVICE_TABLE(i2c, s5k4e1_motor_id);
++
++static struct v4l2_i2c_driver_data v4l2_i2c_data = {
++ .name = "s5k4e1_motor",
++ .probe = s5k4e1_motor_probe,
++ .remove = s5k4e1_motor_remove,
++ /* .suspend = ov5630_suspend,
++ * .resume = ov5630_resume, */
++ .id_table = s5k4e1_motor_id,
++};
++MODULE_AUTHOR("Xiaolin Zhang <xiaolin.zhang@intel.com>");
++MODULE_DESCRIPTION("A low-level driver for Samsung S5K4E1 sensor motor");
++MODULE_LICENSE("GPL");
+--- /dev/null
++++ b/drivers/staging/mrstci/mrsts5k4e1_motor/mrsts5k4e1_motor.h
+@@ -0,0 +1,102 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#include <media/v4l2-subdev.h>
++
++/* DAC output max current (mA) */
++#define MOTOR_DAC_MAX_CUR 125
++/* DAC output min current (mA) */
++#define MOTOR_DAC_MIN_CUR 1
++/* DAC max code (Hex) */
++#define MOTOR_DAC_CODE_MAX 0x3FF
++/* DAC min code (Hex) */
++#define MOTOR_DAC_CODE_MIN 0x0
++
++/* VCM start code (Hex) */
++#define MOTOR_INFIN_CODE 0x120
++/* VCM stop code (Hex) */
++#define MOTOR_MACRO_CODE 0x205
++
++#define MOTOR_STEP_SHIFT 4 /* Step res = 2^4 = 10H */
++#define MOTOR_STEP_TIME 20 /* Step time = 50us x 20d = 1ms */
++
++/* VCM start current (mA) */
++#define MOTOR_INFIN_CUR ((MOTOR_DAC_MAX_CUR / MOTOR_DAC_CODE_MAX) \
++ * MOTOR_INFIN_CODE + 1)
++/* VCM max current for Macro (mA) */
++#define MOTOR_MACRO_CUR ((MOTOR_DAC_MAX_CUR / MOTOR_DAC_CODE_MAX) \
++ * MOTOR_MACRO_CODE + 1)
++
++
++#define MOTOR_DAC_BIT_RES 10
++#define MOTOR_DAC_MAX_CODE ((1 << MOTOR_DAC_BIT_RES) - 1)
++
++#define MOTOR_STEP_SHIFT 4
++
++#define MAX(x, y) ((x) > (y) ? (x) : (y))
++#define MIN(x, y) ((x) < (y) ? (x) : (y))
++
++/* DAC register related define */
++#define MOTOR_PS (1 << 7) /* power save */
++#define MOTOR_EN (1 << 6) /* out pin status*/
++#define MOTOR_M (1 << 3) /* mode select */
++#define MOTOR_W (1 << 2) /* register address */
++#define MOTOR_F (1 << 4) /* finish flag */
++
++#define MOTOR_DAC_CODE_L(x) (x & 0xff)
++#define MOTOR_DAC_CODE_H(x) ((x >> 8) & 0xf3)
++
++/* Step mode setting */
++#define MOTOR_DAC_CTRL_MODE_0 0xCC
++/* DAC code setting */
++#define MOTOR_DAC_CTRL_MODE_1 0xC8
++
++#define S5K4E1_MOTOR_ADDR (0x18 >> 1)
++/*#define POWER_EN_PIN 7*/
++#define GPIO_AF_PD 95
++
++#define DEBUG 0
++
++struct s5k4e1_motor{
++ /*struct i2c_client *motor;*/
++ unsigned int infin_cur;
++ unsigned int infin_code;
++ unsigned int macro_cur;
++ unsigned int macro_code;
++ unsigned int max_step;
++ unsigned int cur_code;
++ unsigned int step_res;
++ unsigned int step_time;
++ struct v4l2_subdev sd;
++};
++
++extern int s5k4e1_motor_init(struct i2c_client *client,
++ struct s5k4e1_motor *config);
++extern int s5k4e1_motor_standby(struct i2c_client *client);
++extern int s5k4e1_motor_wakeup(struct i2c_client *client);
++extern int s5k4e1_motor_set_focus(struct i2c_client *c, unsigned int step,
++ struct s5k4e1_motor *config);
++extern int s5k4e1_motor_get_focus(struct i2c_client *c, unsigned int *step,
++ struct s5k4e1_motor *config);
++extern int s5k4e1_motor_max_step(struct i2c_client *c, unsigned int *max_code,
++ struct s5k4e1_motor *config);
+--- a/drivers/staging/rar_register/rar_register.c
++++ b/drivers/staging/rar_register/rar_register.c
+@@ -148,7 +148,7 @@
+ */
+ static struct rar_device *_rar_to_device(int rar, int *off)
+ {
+- if (rar >= 0 && rar <= 3) {
++ if (rar >= 0 && rar < ARRAY_SIZE(my_rar_device.client)) {
+ *off = rar;
+ return &my_rar_device;
+ }
+--- /dev/null
++++ b/drivers/staging/spectra/Kconfig
+@@ -0,0 +1,40 @@
++
++menuconfig SPECTRA
++ tristate "Denali Spectra Flash Translation Layer"
++ depends on BLOCK
++ default n
++ ---help---
++ Enable the FTL pseudo-filesystem used with the NAND Flash
++ controller on Intel Moorestown Platform to pretend to be a disk
++
++choice
++ prompt "Compile for"
++ depends on SPECTRA
++ default SPECTRA_MRST_HW
++
++config SPECTRA_MRST_HW
++ bool "Moorestown hardware mode"
++ help
++ Driver communicates with the Moorestown hardware's register interface.
++ in DMA mode.
++
++config SPECTRA_MTD
++ bool "Linux MTD mode"
++ depends on MTD
++ help
++ Driver communicates with the kernel MTD subsystem instead of its own
++ built-in hardware driver.
++
++config SPECTRA_EMU
++ bool "RAM emulator testing"
++ help
++ Driver emulates Flash on a RAM buffer and / or disk file. Useful to test the behavior of FTL layer.
++
++endchoice
++
++config SPECTRA_MRST_HW_DMA
++ bool
++ default n
++ depends on SPECTRA_MRST_HW
++ help
++ Use DMA for native hardware interface.
+--- /dev/null
++++ b/drivers/staging/spectra/Makefile
+@@ -0,0 +1,11 @@
++#
++# Makefile of Intel Moorestown NAND controller driver
++#
++
++obj-$(CONFIG_SPECTRA) += spectra.o
++spectra-y := ffsport.o flash.o lld.o
++spectra-$(CONFIG_SPECTRA_MRST_HW) += lld_nand.o
++spectra-$(CONFIG_SPECTRA_MRST_HW_DMA) += lld_cdma.o
++spectra-$(CONFIG_SPECTRA_EMU) += lld_emu.o
++spectra-$(CONFIG_SPECTRA_MTD) += lld_mtd.o
++
+--- /dev/null
++++ b/drivers/staging/spectra/README
+@@ -0,0 +1,29 @@
++This is a driver for NAND controller of Intel Moorestown platform.
++
++This driver is a standalone linux block device driver, it acts as if it's a normal hard disk.
++It includes three layer:
++ block layer interface - file ffsport.c
++ Flash Translation Layer (FTL) - file flash.c (implement the NAND flash Translation Layer, includs address mapping, garbage collection, wear-leveling and so on)
++ Low level layer - file lld_nand.c/lld_cdma.c/lld_emu.c (which implements actual controller hardware registers access)
++
++This driver can be build as modules or build-in.
++
++Dependency:
++This driver has dependency on IA Firmware of Intel Moorestown platform.
++It need the IA Firmware to create the block table for the first time.
++And to validate this driver code without IA Firmware, you can change the
++macro AUTO_FORMAT_FLASH from 0 to 1 in file spectraswconfig.h. Thus the
++driver will erase the whole nand flash and create a new block table.
++
++TODO:
++ - Enable Command DMA feature support
++ - lower the memory footprint
++ - Remove most of the unnecessary global variables
++ - Change all the upcase variable / functions name to lowercase
++ - Some other misc bugs
++
++Please send patches to:
++ Greg Kroah-Hartman <gregkh@suse.de>
++
++And Cc to: Gao Yunpeng <yunpeng.gao@intel.com>
++
+--- /dev/null
++++ b/drivers/staging/spectra/ffsdefs.h
+@@ -0,0 +1,58 @@
++/*
++ * NAND Flash Controller Device Driver
++ * Copyright (c) 2009, Intel Corporation and its suppliers.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++#ifndef _FFSDEFS_
++#define _FFSDEFS_
++
++#define CLEAR 0 /*use this to clear a field instead of "fail"*/
++#define SET 1 /*use this to set a field instead of "pass"*/
++#define FAIL 1 /*failed flag*/
++#define PASS 0 /*success flag*/
++#define ERR -1 /*error flag*/
++
++#define ERASE_CMD 10
++#define WRITE_MAIN_CMD 11
++#define READ_MAIN_CMD 12
++#define WRITE_SPARE_CMD 13
++#define READ_SPARE_CMD 14
++#define WRITE_MAIN_SPARE_CMD 15
++#define READ_MAIN_SPARE_CMD 16
++#define MEMCOPY_CMD 17
++#define DUMMY_CMD 99
++
++#define EVENT_PASS 0x00
++#define EVENT_CORRECTABLE_DATA_ERROR_FIXED 0x01
++#define EVENT_UNCORRECTABLE_DATA_ERROR 0x02
++#define EVENT_TIME_OUT 0x03
++#define EVENT_PROGRAM_FAILURE 0x04
++#define EVENT_ERASE_FAILURE 0x05
++#define EVENT_MEMCOPY_FAILURE 0x06
++#define EVENT_FAIL 0x07
++
++#define EVENT_NONE 0x22
++#define EVENT_DMA_CMD_COMP 0x77
++#define EVENT_ECC_TRANSACTION_DONE 0x88
++#define EVENT_DMA_CMD_FAIL 0x99
++
++#define CMD_PASS 0
++#define CMD_FAIL 1
++#define CMD_ABORT 2
++#define CMD_NOT_DONE 3
++
++#endif /* _FFSDEFS_ */
+--- /dev/null
++++ b/drivers/staging/spectra/ffsport.c
+@@ -0,0 +1,830 @@
++/*
++ * NAND Flash Controller Device Driver
++ * Copyright (c) 2009, Intel Corporation and its suppliers.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++#include "ffsport.h"
++#include "flash.h"
++#include <linux/interrupt.h>
++#include <linux/delay.h>
++#include <linux/blkdev.h>
++#include <linux/wait.h>
++#include <linux/mutex.h>
++#include <linux/kthread.h>
++#include <linux/log2.h>
++#include <linux/init.h>
++
++/**** Helper functions used for Div, Remainder operation on u64 ****/
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: GLOB_Calc_Used_Bits
++* Inputs: Power of 2 number
++* Outputs: Number of Used Bits
++* 0, if the argument is 0
++* Description: Calculate the number of bits used by a given power of 2 number
++* Number can be upto 32 bit
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++int GLOB_Calc_Used_Bits(u32 n)
++{
++ int tot_bits = 0;
++
++ if (n >= 1 << 16) {
++ n >>= 16;
++ tot_bits += 16;
++ }
++
++ if (n >= 1 << 8) {
++ n >>= 8;
++ tot_bits += 8;
++ }
++
++ if (n >= 1 << 4) {
++ n >>= 4;
++ tot_bits += 4;
++ }
++
++ if (n >= 1 << 2) {
++ n >>= 2;
++ tot_bits += 2;
++ }
++
++ if (n >= 1 << 1)
++ tot_bits += 1;
++
++ return ((n == 0) ? (0) : tot_bits);
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: GLOB_u64_Div
++* Inputs: Number of u64
++* A power of 2 number as Division
++* Outputs: Quotient of the Divisor operation
++* Description: It divides the address by divisor by using bit shift operation
++* (essentially without explicitely using "/").
++* Divisor is a power of 2 number and Divided is of u64
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++u64 GLOB_u64_Div(u64 addr, u32 divisor)
++{
++ return (u64)(addr >> GLOB_Calc_Used_Bits(divisor));
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: GLOB_u64_Remainder
++* Inputs: Number of u64
++* Divisor Type (1 -PageAddress, 2- BlockAddress)
++* Outputs: Remainder of the Division operation
++* Description: It calculates the remainder of a number (of u64) by
++* divisor(power of 2 number ) by using bit shifting and multiply
++* operation(essentially without explicitely using "/").
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++u64 GLOB_u64_Remainder(u64 addr, u32 divisor_type)
++{
++ u64 result = 0;
++
++ if (divisor_type == 1) { /* Remainder -- Page */
++ result = (addr >> DeviceInfo.nBitsInPageDataSize);
++ result = result * DeviceInfo.wPageDataSize;
++ } else if (divisor_type == 2) { /* Remainder -- Block */
++ result = (addr >> DeviceInfo.nBitsInBlockDataSize);
++ result = result * DeviceInfo.wBlockDataSize;
++ }
++
++ result = addr - result;
++
++ return result;
++}
++
++#define NUM_DEVICES 1
++#define PARTITIONS 8
++
++#define GLOB_SBD_NAME "nd"
++#define GLOB_SBD_IRQ_NUM (29)
++
++#define GLOB_SBD_IOCTL_GC (0x7701)
++#define GLOB_SBD_IOCTL_WL (0x7702)
++#define GLOB_SBD_IOCTL_FORMAT (0x7703)
++#define GLOB_SBD_IOCTL_ERASE_FLASH (0x7704)
++#define GLOB_SBD_IOCTL_FLUSH_CACHE (0x7705)
++#define GLOB_SBD_IOCTL_COPY_BLK_TABLE (0x7706)
++#define GLOB_SBD_IOCTL_COPY_WEAR_LEVELING_TABLE (0x7707)
++#define GLOB_SBD_IOCTL_GET_NAND_INFO (0x7708)
++#define GLOB_SBD_IOCTL_WRITE_DATA (0x7709)
++#define GLOB_SBD_IOCTL_READ_DATA (0x770A)
++
++static int reserved_mb = 0;
++module_param(reserved_mb, int, 0);
++MODULE_PARM_DESC(reserved_mb, "Reserved space for OS image, in MiB (default 25 MiB)");
++
++int nand_debug_level;
++module_param(nand_debug_level, int, 0644);
++MODULE_PARM_DESC(nand_debug_level, "debug level value: 1-3");
++
++MODULE_LICENSE("GPL");
++
++struct spectra_nand_dev {
++ struct pci_dev *dev;
++ u64 size;
++ u16 users;
++ spinlock_t qlock;
++ void __iomem *ioaddr; /* Mapped address */
++ struct request_queue *queue;
++ struct task_struct *thread;
++ struct gendisk *gd;
++ u8 *tmp_buf;
++};
++
++
++static int GLOB_SBD_majornum;
++
++static char *GLOB_version = GLOB_VERSION;
++
++static struct spectra_nand_dev nand_device[NUM_DEVICES];
++
++static struct mutex spectra_lock;
++
++static int res_blks_os = 1;
++
++struct spectra_indentfy_dev_tag IdentifyDeviceData;
++
++static int force_flush_cache(void)
++{
++ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ if (ERR == GLOB_FTL_Flush_Cache()) {
++ printk(KERN_ERR "Fail to Flush FTL Cache!\n");
++ return -EFAULT;
++ }
++#if CMD_DMA
++ if (glob_ftl_execute_cmds())
++ return -EIO;
++ else
++ return 0;
++#endif
++ return 0;
++}
++
++struct ioctl_rw_page_info {
++ u8 *data;
++ unsigned int page;
++};
++
++static int ioctl_read_page_data(unsigned long arg)
++{
++ u8 *buf;
++ struct ioctl_rw_page_info info;
++ int result = PASS;
++
++ if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
++ return -EFAULT;
++
++ buf = kmalloc(IdentifyDeviceData.PageDataSize, GFP_ATOMIC);
++ if (!buf) {
++ printk(KERN_ERR "ioctl_read_page_data: "
++ "failed to allocate memory\n");
++ return -ENOMEM;
++ }
++
++ mutex_lock(&spectra_lock);
++ result = GLOB_FTL_Page_Read(buf,
++ (u64)info.page * IdentifyDeviceData.PageDataSize);
++ mutex_unlock(&spectra_lock);
++
++ if (copy_to_user((void __user *)info.data, buf,
++ IdentifyDeviceData.PageDataSize)) {
++ printk(KERN_ERR "ioctl_read_page_data: "
++ "failed to copy user data\n");
++ kfree(buf);
++ return -EFAULT;
++ }
++
++ kfree(buf);
++ return result;
++}
++
++static int ioctl_write_page_data(unsigned long arg)
++{
++ u8 *buf;
++ struct ioctl_rw_page_info info;
++ int result = PASS;
++
++ if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
++ return -EFAULT;
++
++ buf = kmalloc(IdentifyDeviceData.PageDataSize, GFP_ATOMIC);
++ if (!buf) {
++ printk(KERN_ERR "ioctl_write_page_data: "
++ "failed to allocate memory\n");
++ return -ENOMEM;
++ }
++
++ if (copy_from_user(buf, (void __user *)info.data,
++ IdentifyDeviceData.PageDataSize)) {
++ printk(KERN_ERR "ioctl_write_page_data: "
++ "failed to copy user data\n");
++ kfree(buf);
++ return -EFAULT;
++ }
++
++ mutex_lock(&spectra_lock);
++ result = GLOB_FTL_Page_Write(buf,
++ (u64)info.page * IdentifyDeviceData.PageDataSize);
++ mutex_unlock(&spectra_lock);
++
++ kfree(buf);
++ return result;
++}
++
++/* Return how many blocks should be reserved for bad block replacement */
++static int get_res_blk_num_bad_blk(void)
++{
++ return IdentifyDeviceData.wDataBlockNum / 10;
++}
++
++/* Return how many blocks should be reserved for OS image */
++static int get_res_blk_num_os(void)
++{
++ u32 res_blks, blk_size;
++
++ blk_size = IdentifyDeviceData.PageDataSize *
++ IdentifyDeviceData.PagesPerBlock;
++
++ res_blks = (reserved_mb * 1024 * 1024) / blk_size;
++
++ if ((res_blks < 1) || (res_blks >= IdentifyDeviceData.wDataBlockNum))
++ res_blks = 1; /* Reserved 1 block for block table */
++
++ return res_blks;
++}
++
++static void SBD_prepare_flush(struct request_queue *q, struct request *rq)
++{
++ rq->cmd_type = REQ_TYPE_LINUX_BLOCK;
++ /* rq->timeout = 5 * HZ; */
++ rq->cmd[0] = REQ_LB_OP_FLUSH;
++}
++
++/* Transfer a full request. */
++static int do_transfer(struct spectra_nand_dev *tr, struct request *req)
++{
++ u64 start_addr, addr;
++ u32 logical_start_sect, hd_start_sect;
++ u32 nsect, hd_sects;
++ u32 rsect, tsect = 0;
++ char *buf;
++ u32 ratio = IdentifyDeviceData.PageDataSize >> 9;
++
++ start_addr = (u64)(blk_rq_pos(req)) << 9;
++ /* Add a big enough offset to prevent the OS Image from
++ * being accessed or damaged by file system */
++ start_addr += IdentifyDeviceData.PageDataSize *
++ IdentifyDeviceData.PagesPerBlock *
++ res_blks_os;
++
++ if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
++ req->cmd[0] == REQ_LB_OP_FLUSH) {
++ if (force_flush_cache()) /* Fail to flush cache */
++ return -EIO;
++ else
++ return 0;
++ }
++
++ if (!blk_fs_request(req))
++ return -EIO;
++
++ if (blk_rq_pos(req) + blk_rq_cur_sectors(req) > get_capacity(tr->gd)) {
++ printk(KERN_ERR "Spectra error: request over the NAND "
++ "capacity!sector %d, current_nr_sectors %d, "
++ "while capacity is %d\n",
++ (int)blk_rq_pos(req),
++ blk_rq_cur_sectors(req),
++ (int)get_capacity(tr->gd));
++ return -EIO;
++ }
++
++ logical_start_sect = start_addr >> 9;
++ hd_start_sect = logical_start_sect / ratio;
++ rsect = logical_start_sect - hd_start_sect * ratio;
++
++ addr = (u64)hd_start_sect * ratio * 512;
++ buf = req->buffer;
++ nsect = blk_rq_cur_sectors(req);
++
++ if (rsect)
++ tsect = (ratio - rsect) < nsect ? (ratio - rsect) : nsect;
++
++ switch (rq_data_dir(req)) {
++ case READ:
++ /* Read the first NAND page */
++ if (rsect) {
++ if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
++ printk(KERN_ERR "Error in %s, Line %d\n",
++ __FILE__, __LINE__);
++ return -EIO;
++ }
++ memcpy(buf, tr->tmp_buf + (rsect << 9), tsect << 9);
++ addr += IdentifyDeviceData.PageDataSize;
++ buf += tsect << 9;
++ nsect -= tsect;
++ }
++
++ /* Read the other NAND pages */
++ for (hd_sects = nsect / ratio; hd_sects > 0; hd_sects--) {
++ if (GLOB_FTL_Page_Read(buf, addr)) {
++ printk(KERN_ERR "Error in %s, Line %d\n",
++ __FILE__, __LINE__);
++ return -EIO;
++ }
++ addr += IdentifyDeviceData.PageDataSize;
++ buf += IdentifyDeviceData.PageDataSize;
++ }
++
++ /* Read the last NAND pages */
++ if (nsect % ratio) {
++ if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
++ printk(KERN_ERR "Error in %s, Line %d\n",
++ __FILE__, __LINE__);
++ return -EIO;
++ }
++ memcpy(buf, tr->tmp_buf, (nsect % ratio) << 9);
++ }
++#if CMD_DMA
++ if (glob_ftl_execute_cmds())
++ return -EIO;
++ else
++ return 0;
++#endif
++ return 0;
++
++ case WRITE:
++ /* Write the first NAND page */
++ if (rsect) {
++ if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
++ printk(KERN_ERR "Error in %s, Line %d\n",
++ __FILE__, __LINE__);
++ return -EIO;
++ }
++ memcpy(tr->tmp_buf + (rsect << 9), buf, tsect << 9);
++ if (GLOB_FTL_Page_Write(tr->tmp_buf, addr)) {
++ printk(KERN_ERR "Error in %s, Line %d\n",
++ __FILE__, __LINE__);
++ return -EIO;
++ }
++ addr += IdentifyDeviceData.PageDataSize;
++ buf += tsect << 9;
++ nsect -= tsect;
++ }
++
++ /* Write the other NAND pages */
++ for (hd_sects = nsect / ratio; hd_sects > 0; hd_sects--) {
++ if (GLOB_FTL_Page_Write(buf, addr)) {
++ printk(KERN_ERR "Error in %s, Line %d\n",
++ __FILE__, __LINE__);
++ return -EIO;
++ }
++ addr += IdentifyDeviceData.PageDataSize;
++ buf += IdentifyDeviceData.PageDataSize;
++ }
++
++ /* Write the last NAND pages */
++ if (nsect % ratio) {
++ if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
++ printk(KERN_ERR "Error in %s, Line %d\n",
++ __FILE__, __LINE__);
++ return -EIO;
++ }
++ memcpy(tr->tmp_buf, buf, (nsect % ratio) << 9);
++ if (GLOB_FTL_Page_Write(tr->tmp_buf, addr)) {
++ printk(KERN_ERR "Error in %s, Line %d\n",
++ __FILE__, __LINE__);
++ return -EIO;
++ }
++ }
++#if CMD_DMA
++ if (glob_ftl_execute_cmds())
++ return -EIO;
++ else
++ return 0;
++#endif
++ return 0;
++
++ default:
++ printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
++ return -EIO;
++ }
++}
++
++/* This function is copied from drivers/mtd/mtd_blkdevs.c */
++static int spectra_trans_thread(void *arg)
++{
++ struct spectra_nand_dev *tr = arg;
++ struct request_queue *rq = tr->queue;
++ struct request *req = NULL;
++
++ /* we might get involved when memory gets low, so use PF_MEMALLOC */
++ current->flags |= PF_MEMALLOC;
++
++ spin_lock_irq(rq->queue_lock);
++ while (!kthread_should_stop()) {
++ int res;
++
++ if (!req) {
++ req = blk_fetch_request(rq);
++ if (!req) {
++ set_current_state(TASK_INTERRUPTIBLE);
++ spin_unlock_irq(rq->queue_lock);
++ schedule();
++ spin_lock_irq(rq->queue_lock);
++ continue;
++ }
++ }
++
++ spin_unlock_irq(rq->queue_lock);
++
++ mutex_lock(&spectra_lock);
++ res = do_transfer(tr, req);
++ mutex_unlock(&spectra_lock);
++
++ spin_lock_irq(rq->queue_lock);
++
++ if (!__blk_end_request_cur(req, res))
++ req = NULL;
++ }
++
++ if (req)
++ __blk_end_request_all(req, -EIO);
++
++ spin_unlock_irq(rq->queue_lock);
++
++ return 0;
++}
++
++
++/* Request function that "handles clustering". */
++static void GLOB_SBD_request(struct request_queue *rq)
++{
++ struct spectra_nand_dev *pdev = rq->queuedata;
++ wake_up_process(pdev->thread);
++}
++
++static int GLOB_SBD_open(struct block_device *bdev, fmode_t mode)
++
++{
++ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++ return 0;
++}
++
++static int GLOB_SBD_release(struct gendisk *disk, fmode_t mode)
++{
++ int ret;
++
++ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ mutex_lock(&spectra_lock);
++ ret = force_flush_cache();
++ mutex_unlock(&spectra_lock);
++
++ return 0;
++}
++
++static int GLOB_SBD_getgeo(struct block_device *bdev, struct hd_geometry *geo)
++{
++ geo->heads = 4;
++ geo->sectors = 16;
++ geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
++
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "heads: %d, sectors: %d, cylinders: %d\n",
++ geo->heads, geo->sectors, geo->cylinders);
++
++ return 0;
++}
++
++int GLOB_SBD_ioctl(struct block_device *bdev, fmode_t mode,
++ unsigned int cmd, unsigned long arg)
++{
++ int ret;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ switch (cmd) {
++ case GLOB_SBD_IOCTL_GC:
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Spectra IOCTL: Garbage Collection "
++ "being performed\n");
++ if (PASS != GLOB_FTL_Garbage_Collection())
++ return -EFAULT;
++ return 0;
++
++ case GLOB_SBD_IOCTL_WL:
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Spectra IOCTL: Static Wear Leveling "
++ "being performed\n");
++ if (PASS != GLOB_FTL_Wear_Leveling())
++ return -EFAULT;
++ return 0;
++
++ case GLOB_SBD_IOCTL_FORMAT:
++ nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: Flash format "
++ "being performed\n");
++ if (PASS != GLOB_FTL_Flash_Format())
++ return -EFAULT;
++ return 0;
++
++ case GLOB_SBD_IOCTL_FLUSH_CACHE:
++ nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: Cache flush "
++ "being performed\n");
++ mutex_lock(&spectra_lock);
++ ret = force_flush_cache();
++ mutex_unlock(&spectra_lock);
++ return ret;
++
++ case GLOB_SBD_IOCTL_COPY_BLK_TABLE:
++ nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
++ "Copy block table\n");
++ if (copy_to_user((void __user *)arg,
++ get_blk_table_start_addr(),
++ get_blk_table_len()))
++ return -EFAULT;
++ return 0;
++
++ case GLOB_SBD_IOCTL_COPY_WEAR_LEVELING_TABLE:
++ nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
++ "Copy wear leveling table\n");
++ if (copy_to_user((void __user *)arg,
++ get_wear_leveling_table_start_addr(),
++ get_wear_leveling_table_len()))
++ return -EFAULT;
++ return 0;
++
++ case GLOB_SBD_IOCTL_GET_NAND_INFO:
++ nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
++ "Get NAND info\n");
++ if (copy_to_user((void __user *)arg, &IdentifyDeviceData,
++ sizeof(IdentifyDeviceData)))
++ return -EFAULT;
++ return 0;
++
++ case GLOB_SBD_IOCTL_WRITE_DATA:
++ nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
++ "Write one page data\n");
++ return ioctl_write_page_data(arg);
++
++ case GLOB_SBD_IOCTL_READ_DATA:
++ nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
++ "Read one page data\n");
++ return ioctl_read_page_data(arg);
++ }
++
++ return -ENOTTY;
++}
++
++static struct block_device_operations GLOB_SBD_ops = {
++ .owner = THIS_MODULE,
++ .open = GLOB_SBD_open,
++ .release = GLOB_SBD_release,
++ .locked_ioctl = GLOB_SBD_ioctl,
++ .getgeo = GLOB_SBD_getgeo,
++};
++
++static int SBD_setup_device(struct spectra_nand_dev *dev, int which)
++{
++ int res_blks;
++ u32 sects;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ memset(dev, 0, sizeof(struct spectra_nand_dev));
++
++ nand_dbg_print(NAND_DBG_WARN, "Reserved %d blocks "
++ "for OS image, %d blocks for bad block replacement.\n",
++ get_res_blk_num_os(),
++ get_res_blk_num_bad_blk());
++
++ res_blks = get_res_blk_num_bad_blk() + get_res_blk_num_os();
++
++ dev->size = (u64)IdentifyDeviceData.PageDataSize *
++ IdentifyDeviceData.PagesPerBlock *
++ (IdentifyDeviceData.wDataBlockNum - res_blks);
++
++ res_blks_os = get_res_blk_num_os();
++
++ spin_lock_init(&dev->qlock);
++
++ dev->tmp_buf = kmalloc(IdentifyDeviceData.PageDataSize, GFP_ATOMIC);
++ if (!dev->tmp_buf) {
++ printk(KERN_ERR "Failed to kmalloc memory in %s Line %d, exit.\n",
++ __FILE__, __LINE__);
++ goto out_vfree;
++ }
++
++ dev->queue = blk_init_queue(GLOB_SBD_request, &dev->qlock);
++ if (dev->queue == NULL) {
++ printk(KERN_ERR
++ "Spectra: Request queue could not be initialized."
++ " Aborting\n ");
++ goto out_vfree;
++ }
++ dev->queue->queuedata = dev;
++
++ /* As Linux block layer doens't support >4KB hardware sector, */
++ /* Here we force report 512 byte hardware sector size to Kernel */
++ blk_queue_logical_block_size(dev->queue, 512);
++
++ blk_queue_ordered(dev->queue, QUEUE_ORDERED_DRAIN_FLUSH,
++ SBD_prepare_flush);
++
++ dev->thread = kthread_run(spectra_trans_thread, dev, "nand_thd");
++ if (IS_ERR(dev->thread)) {
++ blk_cleanup_queue(dev->queue);
++ unregister_blkdev(GLOB_SBD_majornum, GLOB_SBD_NAME);
++ return PTR_ERR(dev->thread);
++ }
++
++ dev->gd = alloc_disk(PARTITIONS);
++ if (!dev->gd) {
++ printk(KERN_ERR
++ "Spectra: Could not allocate disk. Aborting \n ");
++ goto out_vfree;
++ }
++ dev->gd->major = GLOB_SBD_majornum;
++ dev->gd->first_minor = which * PARTITIONS;
++ dev->gd->fops = &GLOB_SBD_ops;
++ dev->gd->queue = dev->queue;
++ dev->gd->private_data = dev;
++ snprintf(dev->gd->disk_name, 32, "%s%c", GLOB_SBD_NAME, which + 'a');
++
++ sects = dev->size >> 9;
++ nand_dbg_print(NAND_DBG_WARN, "Capacity sects: %d\n", sects);
++ set_capacity(dev->gd, sects);
++
++ add_disk(dev->gd);
++
++ return 0;
++out_vfree:
++ return -ENOMEM;
++}
++
++/*
++static ssize_t show_nand_block_num(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ return snprintf(buf, PAGE_SIZE, "%d\n",
++ (int)IdentifyDeviceData.wDataBlockNum);
++}
++
++static ssize_t show_nand_pages_per_block(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ return snprintf(buf, PAGE_SIZE, "%d\n",
++ (int)IdentifyDeviceData.PagesPerBlock);
++}
++
++static ssize_t show_nand_page_size(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ return snprintf(buf, PAGE_SIZE, "%d\n",
++ (int)IdentifyDeviceData.PageDataSize);
++}
++
++static DEVICE_ATTR(nand_block_num, 0444, show_nand_block_num, NULL);
++static DEVICE_ATTR(nand_pages_per_block, 0444, show_nand_pages_per_block, NULL);
++static DEVICE_ATTR(nand_page_size, 0444, show_nand_page_size, NULL);
++
++static void create_sysfs_entry(struct device *dev)
++{
++ if (device_create_file(dev, &dev_attr_nand_block_num))
++ printk(KERN_ERR "Spectra: "
++ "failed to create sysfs entry nand_block_num.\n");
++ if (device_create_file(dev, &dev_attr_nand_pages_per_block))
++ printk(KERN_ERR "Spectra: "
++ "failed to create sysfs entry nand_pages_per_block.\n");
++ if (device_create_file(dev, &dev_attr_nand_page_size))
++ printk(KERN_ERR "Spectra: "
++ "failed to create sysfs entry nand_page_size.\n");
++}
++*/
++
++int register_spectra_ftl()
++{
++ int i;
++
++ /* create_sysfs_entry(&dev->dev); */
++
++ if (PASS != GLOB_FTL_IdentifyDevice(&IdentifyDeviceData)) {
++ printk(KERN_ERR "Spectra: Unable to Read Flash Device. "
++ "Aborting\n");
++ return -ENOMEM;
++ } else {
++ nand_dbg_print(NAND_DBG_WARN, "In GLOB_SBD_init: "
++ "Num blocks=%d, pagesperblock=%d, "
++ "pagedatasize=%d, ECCBytesPerSector=%d\n",
++ (int)IdentifyDeviceData.NumBlocks,
++ (int)IdentifyDeviceData.PagesPerBlock,
++ (int)IdentifyDeviceData.PageDataSize,
++ (int)IdentifyDeviceData.wECCBytesPerSector);
++ }
++
++ printk(KERN_ALERT "Spectra: searching block table, please wait ...\n");
++ if (GLOB_FTL_Init() != PASS) {
++ printk(KERN_ERR "Spectra: Unable to Initialize FTL Layer. "
++ "Aborting\n");
++ goto out_ftl_flash_register;
++ }
++ printk(KERN_ALERT "Spectra: block table has been found.\n");
++
++ GLOB_SBD_majornum = register_blkdev(0, GLOB_SBD_NAME);
++ if (GLOB_SBD_majornum <= 0) {
++ printk(KERN_ERR "Unable to get the major %d for Spectra",
++ GLOB_SBD_majornum);
++ goto out_ftl_flash_register;
++ }
++
++ for (i = 0; i < NUM_DEVICES; i++)
++ if (SBD_setup_device(&nand_device[i], i) == -ENOMEM)
++ goto out_blk_register;
++
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Spectra: module loaded with major number %d\n",
++ GLOB_SBD_majornum);
++
++ return PASS;
++
++out_blk_register:
++ unregister_blkdev(GLOB_SBD_majornum, GLOB_SBD_NAME);
++out_ftl_flash_register:
++ GLOB_FTL_Cache_Release();
++ printk(KERN_ERR "Spectra: Module load failed.\n");
++
++ return FAIL;
++}
++EXPORT_SYMBOL_GPL(register_spectra_ftl);
++
++static int GLOB_SBD_init(void)
++{
++ /* Set debug output level (0~3) here. 3 is most verbose */
++ printk(KERN_ALERT "Spectra: %s\n", GLOB_version);
++
++ mutex_init(&spectra_lock);
++
++ if (PASS != GLOB_FTL_Flash_Init()) {
++ printk(KERN_ERR "Spectra: Unable to Initialize Flash Device. "
++ "Aborting\n");
++ return -ENODEV;
++ }
++ return 0;
++}
++
++static void __exit GLOB_SBD_exit(void)
++{
++ int i;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ for (i = 0; i < NUM_DEVICES; i++) {
++ struct spectra_nand_dev *dev = &nand_device[i];
++ if (dev->gd) {
++ del_gendisk(dev->gd);
++ put_disk(dev->gd);
++ }
++ if (dev->queue)
++ blk_cleanup_queue(dev->queue);
++ kfree(dev->tmp_buf);
++ }
++
++ unregister_blkdev(GLOB_SBD_majornum, GLOB_SBD_NAME);
++
++ mutex_lock(&spectra_lock);
++ force_flush_cache();
++ mutex_unlock(&spectra_lock);
++
++ GLOB_FTL_Cache_Release();
++
++ GLOB_FTL_Flash_Release();
++
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Spectra FTL module (major number %d) unloaded.\n",
++ GLOB_SBD_majornum);
++}
++
++module_init(GLOB_SBD_init);
++module_exit(GLOB_SBD_exit);
+--- /dev/null
++++ b/drivers/staging/spectra/ffsport.h
+@@ -0,0 +1,85 @@
++/*
++ * NAND Flash Controller Device Driver
++ * Copyright (c) 2009, Intel Corporation and its suppliers.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++#ifndef _FFSPORT_
++#define _FFSPORT_
++
++#include "ffsdefs.h"
++
++#if defined __GNUC__
++#define PACKED
++#define PACKED_GNU __attribute__ ((packed))
++#define UNALIGNED
++#endif
++
++#include <linux/semaphore.h>
++#include <linux/string.h> /* for strcpy(), stricmp(), etc */
++#include <linux/mm.h> /* for kmalloc(), kfree() */
++#include <linux/vmalloc.h>
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/init.h>
++
++#include <linux/kernel.h> /* printk() */
++#include <linux/fs.h> /* everything... */
++#include <linux/errno.h> /* error codes */
++#include <linux/types.h> /* size_t */
++#include <linux/genhd.h>
++#include <linux/blkdev.h>
++#include <linux/hdreg.h>
++#include <linux/pci.h>
++#include "flash.h"
++
++#define VERBOSE 1
++
++#define NAND_DBG_WARN 1
++#define NAND_DBG_DEBUG 2
++#define NAND_DBG_TRACE 3
++
++extern int nand_debug_level;
++
++#ifdef VERBOSE
++#define nand_dbg_print(level, args...) \
++ do { \
++ if (level <= nand_debug_level) \
++ printk(KERN_ALERT args); \
++ } while (0)
++#else
++#define nand_dbg_print(level, args...)
++#endif
++
++#ifdef SUPPORT_BIG_ENDIAN
++#define INVERTUINT16(w) ((u16)(((u16)(w)) << 8) | \
++ (u16)((u16)(w) >> 8))
++
++#define INVERTUINT32(dw) (((u32)(dw) << 24) | \
++ (((u32)(dw) << 8) & 0x00ff0000) | \
++ (((u32)(dw) >> 8) & 0x0000ff00) | \
++ ((u32)(dw) >> 24))
++#else
++#define INVERTUINT16(w) w
++#define INVERTUINT32(dw) dw
++#endif
++
++extern int GLOB_Calc_Used_Bits(u32 n);
++extern u64 GLOB_u64_Div(u64 addr, u32 divisor);
++extern u64 GLOB_u64_Remainder(u64 addr, u32 divisor_type);
++extern int register_spectra_ftl(void);
++
++#endif /* _FFSPORT_ */
+--- /dev/null
++++ b/drivers/staging/spectra/flash.c
+@@ -0,0 +1,4729 @@
++/*
++ * NAND Flash Controller Device Driver
++ * Copyright (c) 2009, Intel Corporation and its suppliers.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++#include <linux/fs.h>
++#include <linux/slab.h>
++
++#include "flash.h"
++#include "ffsdefs.h"
++#include "lld.h"
++#include "lld_nand.h"
++#if CMD_DMA
++#include "lld_cdma.h"
++#endif
++
++#define BLK_FROM_ADDR(addr) ((u32)(addr >> DeviceInfo.nBitsInBlockDataSize))
++#define PAGE_FROM_ADDR(addr, Block) ((u16)((addr - (u64)Block * \
++ DeviceInfo.wBlockDataSize) >> DeviceInfo.nBitsInPageDataSize))
++
++#define IS_SPARE_BLOCK(blk) (BAD_BLOCK != (pbt[blk] &\
++ BAD_BLOCK) && SPARE_BLOCK == (pbt[blk] & SPARE_BLOCK))
++
++#define IS_DATA_BLOCK(blk) (0 == (pbt[blk] & BAD_BLOCK))
++
++#define IS_DISCARDED_BLOCK(blk) (BAD_BLOCK != (pbt[blk] &\
++ BAD_BLOCK) && DISCARD_BLOCK == (pbt[blk] & DISCARD_BLOCK))
++
++#define IS_BAD_BLOCK(blk) (BAD_BLOCK == (pbt[blk] & BAD_BLOCK))
++
++#if DEBUG_BNDRY
++void debug_boundary_lineno_error(int chnl, int limit, int no,
++ int lineno, char *filename)
++{
++ if (chnl >= limit)
++ printk(KERN_ERR "Boundary Check Fail value %d >= limit %d, "
++ "at %s:%d. Other info:%d. Aborting...\n",
++ chnl, limit, filename, lineno, no);
++}
++/* static int globalmemsize; */
++#endif
++
++static u16 FTL_Cache_If_Hit(u64 dwPageAddr);
++static int FTL_Cache_Read(u64 dwPageAddr);
++static void FTL_Cache_Read_Page(u8 *pData, u64 dwPageAddr,
++ u16 cache_blk);
++static void FTL_Cache_Write_Page(u8 *pData, u64 dwPageAddr,
++ u8 cache_blk, u16 flag);
++static int FTL_Cache_Write(void);
++static int FTL_Cache_Write_Back(u8 *pData, u64 blk_addr);
++static void FTL_Calculate_LRU(void);
++static u32 FTL_Get_Block_Index(u32 wBlockNum);
++
++static int FTL_Search_Block_Table_IN_Block(u32 BT_Block,
++ u8 BT_Tag, u16 *Page);
++static int FTL_Read_Block_Table(void);
++static int FTL_Write_Block_Table(int wForce);
++static int FTL_Write_Block_Table_Data(void);
++static int FTL_Check_Block_Table(int wOldTable);
++static int FTL_Static_Wear_Leveling(void);
++static u32 FTL_Replace_Block_Table(void);
++static int FTL_Write_IN_Progress_Block_Table_Page(void);
++
++static u32 FTL_Get_Page_Num(u64 length);
++static u64 FTL_Get_Physical_Block_Addr(u64 blk_addr);
++
++static u32 FTL_Replace_OneBlock(u32 wBlockNum,
++ u32 wReplaceNum);
++static u32 FTL_Replace_LWBlock(u32 wBlockNum,
++ int *pGarbageCollect);
++static u32 FTL_Replace_MWBlock(void);
++static int FTL_Replace_Block(u64 blk_addr);
++static int FTL_Adjust_Relative_Erase_Count(u32 Index_of_MAX);
++
++static int FTL_Flash_Error_Handle(u8 *pData, u64 old_page_addr, u64 blk_addr);
++
++struct device_info_tag DeviceInfo;
++struct flash_cache_tag Cache;
++static struct spectra_l2_cache_info cache_l2;
++
++static u8 *cache_l2_page_buf;
++static u8 *cache_l2_blk_buf;
++
++u8 *g_pBlockTable;
++u8 *g_pWearCounter;
++u16 *g_pReadCounter;
++u32 *g_pBTBlocks;
++static u16 g_wBlockTableOffset;
++static u32 g_wBlockTableIndex;
++static u8 g_cBlockTableStatus;
++
++static u8 *g_pTempBuf;
++static u8 *flag_check_blk_table;
++static u8 *tmp_buf_search_bt_in_block;
++static u8 *spare_buf_search_bt_in_block;
++static u8 *spare_buf_bt_search_bt_in_block;
++static u8 *tmp_buf1_read_blk_table;
++static u8 *tmp_buf2_read_blk_table;
++static u8 *flags_static_wear_leveling;
++static u8 *tmp_buf_write_blk_table_data;
++static u8 *tmp_buf_read_disturbance;
++
++u8 *buf_read_page_main_spare;
++u8 *buf_write_page_main_spare;
++u8 *buf_read_page_spare;
++u8 *buf_get_bad_block;
++
++#if (RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE && CMD_DMA)
++struct flash_cache_delta_list_tag int_cache[MAX_CHANS + MAX_DESCS];
++struct flash_cache_tag cache_start_copy;
++#endif
++
++int g_wNumFreeBlocks;
++u8 g_SBDCmdIndex;
++
++static u8 *g_pIPF;
++static u8 bt_flag = FIRST_BT_ID;
++static u8 bt_block_changed;
++
++static u16 cache_block_to_write;
++static u8 last_erased = FIRST_BT_ID;
++
++static u8 GC_Called;
++static u8 BT_GC_Called;
++
++#if CMD_DMA
++#define COPY_BACK_BUF_NUM 10
++
++static u8 ftl_cmd_cnt; /* Init value is 0 */
++u8 *g_pBTDelta;
++u8 *g_pBTDelta_Free;
++u8 *g_pBTStartingCopy;
++u8 *g_pWearCounterCopy;
++u16 *g_pReadCounterCopy;
++u8 *g_pBlockTableCopies;
++u8 *g_pNextBlockTable;
++static u8 *cp_back_buf_copies[COPY_BACK_BUF_NUM];
++static int cp_back_buf_idx;
++
++static u8 *g_temp_buf;
++
++#pragma pack(push, 1)
++#pragma pack(1)
++struct BTableChangesDelta {
++ u8 ftl_cmd_cnt;
++ u8 ValidFields;
++ u16 g_wBlockTableOffset;
++ u32 g_wBlockTableIndex;
++ u32 BT_Index;
++ u32 BT_Entry_Value;
++ u32 WC_Index;
++ u8 WC_Entry_Value;
++ u32 RC_Index;
++ u16 RC_Entry_Value;
++};
++
++#pragma pack(pop)
++
++struct BTableChangesDelta *p_BTableChangesDelta;
++#endif
++
++
++#define MARK_BLOCK_AS_BAD(blocknode) (blocknode |= BAD_BLOCK)
++#define MARK_BLK_AS_DISCARD(blk) (blk = (blk & ~SPARE_BLOCK) | DISCARD_BLOCK)
++
++#define FTL_Get_LBAPBA_Table_Mem_Size_Bytes() (DeviceInfo.wDataBlockNum *\
++ sizeof(u32))
++#define FTL_Get_WearCounter_Table_Mem_Size_Bytes() (DeviceInfo.wDataBlockNum *\
++ sizeof(u8))
++#define FTL_Get_ReadCounter_Table_Mem_Size_Bytes() (DeviceInfo.wDataBlockNum *\
++ sizeof(u16))
++#if SUPPORT_LARGE_BLOCKNUM
++#define FTL_Get_LBAPBA_Table_Flash_Size_Bytes() (DeviceInfo.wDataBlockNum *\
++ sizeof(u8) * 3)
++#else
++#define FTL_Get_LBAPBA_Table_Flash_Size_Bytes() (DeviceInfo.wDataBlockNum *\
++ sizeof(u16))
++#endif
++#define FTL_Get_WearCounter_Table_Flash_Size_Bytes \
++ FTL_Get_WearCounter_Table_Mem_Size_Bytes
++#define FTL_Get_ReadCounter_Table_Flash_Size_Bytes \
++ FTL_Get_ReadCounter_Table_Mem_Size_Bytes
++
++static u32 FTL_Get_Block_Table_Flash_Size_Bytes(void)
++{
++ u32 byte_num;
++
++ if (DeviceInfo.MLCDevice) {
++ byte_num = FTL_Get_LBAPBA_Table_Flash_Size_Bytes() +
++ DeviceInfo.wDataBlockNum * sizeof(u8) +
++ DeviceInfo.wDataBlockNum * sizeof(u16);
++ } else {
++ byte_num = FTL_Get_LBAPBA_Table_Flash_Size_Bytes() +
++ DeviceInfo.wDataBlockNum * sizeof(u8);
++ }
++
++ byte_num += 4 * sizeof(u8);
++
++ return byte_num;
++}
++
++static u16 FTL_Get_Block_Table_Flash_Size_Pages(void)
++{
++ return (u16)FTL_Get_Page_Num(FTL_Get_Block_Table_Flash_Size_Bytes());
++}
++
++static int FTL_Copy_Block_Table_To_Flash(u8 *flashBuf, u32 sizeToTx,
++ u32 sizeTxed)
++{
++ u32 wBytesCopied, blk_tbl_size, wBytes;
++ u32 *pbt = (u32 *)g_pBlockTable;
++
++ blk_tbl_size = FTL_Get_LBAPBA_Table_Flash_Size_Bytes();
++ for (wBytes = 0;
++ (wBytes < sizeToTx) && ((wBytes + sizeTxed) < blk_tbl_size);
++ wBytes++) {
++#if SUPPORT_LARGE_BLOCKNUM
++ flashBuf[wBytes] = (u8)(pbt[(wBytes + sizeTxed) / 3]
++ >> (((wBytes + sizeTxed) % 3) ?
++ ((((wBytes + sizeTxed) % 3) == 2) ? 0 : 8) : 16)) & 0xFF;
++#else
++ flashBuf[wBytes] = (u8)(pbt[(wBytes + sizeTxed) / 2]
++ >> (((wBytes + sizeTxed) % 2) ? 0 : 8)) & 0xFF;
++#endif
++ }
++
++ sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
++ blk_tbl_size = FTL_Get_WearCounter_Table_Flash_Size_Bytes();
++ wBytesCopied = wBytes;
++ wBytes = ((blk_tbl_size - sizeTxed) > (sizeToTx - wBytesCopied)) ?
++ (sizeToTx - wBytesCopied) : (blk_tbl_size - sizeTxed);
++ memcpy(flashBuf + wBytesCopied, g_pWearCounter + sizeTxed, wBytes);
++
++ sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
++
++ if (DeviceInfo.MLCDevice) {
++ blk_tbl_size = FTL_Get_ReadCounter_Table_Flash_Size_Bytes();
++ wBytesCopied += wBytes;
++ for (wBytes = 0; ((wBytes + wBytesCopied) < sizeToTx) &&
++ ((wBytes + sizeTxed) < blk_tbl_size); wBytes++)
++ flashBuf[wBytes + wBytesCopied] =
++ (g_pReadCounter[(wBytes + sizeTxed) / 2] >>
++ (((wBytes + sizeTxed) % 2) ? 0 : 8)) & 0xFF;
++ }
++
++ return wBytesCopied + wBytes;
++}
++
++static int FTL_Copy_Block_Table_From_Flash(u8 *flashBuf,
++ u32 sizeToTx, u32 sizeTxed)
++{
++ u32 wBytesCopied, blk_tbl_size, wBytes;
++ u32 *pbt = (u32 *)g_pBlockTable;
++
++ blk_tbl_size = FTL_Get_LBAPBA_Table_Flash_Size_Bytes();
++ for (wBytes = 0; (wBytes < sizeToTx) &&
++ ((wBytes + sizeTxed) < blk_tbl_size); wBytes++) {
++#if SUPPORT_LARGE_BLOCKNUM
++ if (!((wBytes + sizeTxed) % 3))
++ pbt[(wBytes + sizeTxed) / 3] = 0;
++ pbt[(wBytes + sizeTxed) / 3] |=
++ (flashBuf[wBytes] << (((wBytes + sizeTxed) % 3) ?
++ ((((wBytes + sizeTxed) % 3) == 2) ? 0 : 8) : 16));
++#else
++ if (!((wBytes + sizeTxed) % 2))
++ pbt[(wBytes + sizeTxed) / 2] = 0;
++ pbt[(wBytes + sizeTxed) / 2] |=
++ (flashBuf[wBytes] << (((wBytes + sizeTxed) % 2) ?
++ 0 : 8));
++#endif
++ }
++
++ sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
++ blk_tbl_size = FTL_Get_WearCounter_Table_Flash_Size_Bytes();
++ wBytesCopied = wBytes;
++ wBytes = ((blk_tbl_size - sizeTxed) > (sizeToTx - wBytesCopied)) ?
++ (sizeToTx - wBytesCopied) : (blk_tbl_size - sizeTxed);
++ memcpy(g_pWearCounter + sizeTxed, flashBuf + wBytesCopied, wBytes);
++ sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
++
++ if (DeviceInfo.MLCDevice) {
++ wBytesCopied += wBytes;
++ blk_tbl_size = FTL_Get_ReadCounter_Table_Flash_Size_Bytes();
++ for (wBytes = 0; ((wBytes + wBytesCopied) < sizeToTx) &&
++ ((wBytes + sizeTxed) < blk_tbl_size); wBytes++) {
++ if (((wBytes + sizeTxed) % 2))
++ g_pReadCounter[(wBytes + sizeTxed) / 2] = 0;
++ g_pReadCounter[(wBytes + sizeTxed) / 2] |=
++ (flashBuf[wBytes] <<
++ (((wBytes + sizeTxed) % 2) ? 0 : 8));
++ }
++ }
++
++ return wBytesCopied+wBytes;
++}
++
++static int FTL_Insert_Block_Table_Signature(u8 *buf, u8 tag)
++{
++ int i;
++
++ for (i = 0; i < BTSIG_BYTES; i++)
++ buf[BTSIG_OFFSET + i] =
++ ((tag + (i * BTSIG_DELTA) - FIRST_BT_ID) %
++ (1 + LAST_BT_ID-FIRST_BT_ID)) + FIRST_BT_ID;
++
++ return PASS;
++}
++
++static int FTL_Extract_Block_Table_Tag(u8 *buf, u8 **tagarray)
++{
++ static u8 tag[BTSIG_BYTES >> 1];
++ int i, j, k, tagi, tagtemp, status;
++
++ *tagarray = (u8 *)tag;
++ tagi = 0;
++
++ for (i = 0; i < (BTSIG_BYTES - 1); i++) {
++ for (j = i + 1; (j < BTSIG_BYTES) &&
++ (tagi < (BTSIG_BYTES >> 1)); j++) {
++ tagtemp = buf[BTSIG_OFFSET + j] -
++ buf[BTSIG_OFFSET + i];
++ if (tagtemp && !(tagtemp % BTSIG_DELTA)) {
++ tagtemp = (buf[BTSIG_OFFSET + i] +
++ (1 + LAST_BT_ID - FIRST_BT_ID) -
++ (i * BTSIG_DELTA)) %
++ (1 + LAST_BT_ID - FIRST_BT_ID);
++ status = FAIL;
++ for (k = 0; k < tagi; k++) {
++ if (tagtemp == tag[k])
++ status = PASS;
++ }
++
++ if (status == FAIL) {
++ tag[tagi++] = tagtemp;
++ i = (j == (i + 1)) ? i + 1 : i;
++ j = (j == (i + 1)) ? i + 1 : i;
++ }
++ }
++ }
++ }
++
++ return tagi;
++}
++
++
++static int FTL_Execute_SPL_Recovery(void)
++{
++ u32 j, block, blks;
++ u32 *pbt = (u32 *)g_pBlockTable;
++ int ret;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ blks = DeviceInfo.wSpectraEndBlock - DeviceInfo.wSpectraStartBlock;
++ for (j = 0; j <= blks; j++) {
++ block = (pbt[j]);
++ if (((block & BAD_BLOCK) != BAD_BLOCK) &&
++ ((block & SPARE_BLOCK) == SPARE_BLOCK)) {
++ ret = GLOB_LLD_Erase_Block(block & ~BAD_BLOCK);
++ if (FAIL == ret) {
++ nand_dbg_print(NAND_DBG_WARN,
++ "NAND Program fail in %s, Line %d, "
++ "Function: %s, new Bad Block %d "
++ "generated!\n",
++ __FILE__, __LINE__, __func__,
++ (int)(block & ~BAD_BLOCK));
++ MARK_BLOCK_AS_BAD(pbt[j]);
++ }
++ }
++ }
++
++ return PASS;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: GLOB_FTL_IdentifyDevice
++* Inputs: pointer to identify data structure
++* Outputs: PASS / FAIL
++* Description: the identify data structure is filled in with
++* information for the block driver.
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++int GLOB_FTL_IdentifyDevice(struct spectra_indentfy_dev_tag *dev_data)
++{
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ dev_data->NumBlocks = DeviceInfo.wTotalBlocks;
++ dev_data->PagesPerBlock = DeviceInfo.wPagesPerBlock;
++ dev_data->PageDataSize = DeviceInfo.wPageDataSize;
++ dev_data->wECCBytesPerSector = DeviceInfo.wECCBytesPerSector;
++ dev_data->wDataBlockNum = DeviceInfo.wDataBlockNum;
++
++ return PASS;
++}
++
++/* ..... */
++static int allocate_memory(void)
++{
++ u32 block_table_size, page_size, block_size, mem_size;
++ u32 total_bytes = 0;
++ int i;
++#if CMD_DMA
++ int j;
++#endif
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ page_size = DeviceInfo.wPageSize;
++ block_size = DeviceInfo.wPagesPerBlock * DeviceInfo.wPageDataSize;
++
++ block_table_size = DeviceInfo.wDataBlockNum *
++ (sizeof(u32) + sizeof(u8) + sizeof(u16));
++ block_table_size += (DeviceInfo.wPageDataSize -
++ (block_table_size % DeviceInfo.wPageDataSize)) %
++ DeviceInfo.wPageDataSize;
++
++ /* Malloc memory for block tables */
++ g_pBlockTable = kmalloc(block_table_size, GFP_ATOMIC);
++ if (!g_pBlockTable)
++ goto block_table_fail;
++ memset(g_pBlockTable, 0, block_table_size);
++ total_bytes += block_table_size;
++
++ g_pWearCounter = (u8 *)(g_pBlockTable +
++ DeviceInfo.wDataBlockNum * sizeof(u32));
++
++ if (DeviceInfo.MLCDevice)
++ g_pReadCounter = (u16 *)(g_pBlockTable +
++ DeviceInfo.wDataBlockNum *
++ (sizeof(u32) + sizeof(u8)));
++
++ /* Malloc memory and init for cache items */
++ for (i = 0; i < CACHE_ITEM_NUM; i++) {
++ Cache.array[i].address = NAND_CACHE_INIT_ADDR;
++ Cache.array[i].use_cnt = 0;
++ Cache.array[i].changed = CLEAR;
++ Cache.array[i].buf = kmalloc(Cache.cache_item_size,
++ GFP_ATOMIC);
++ if (!Cache.array[i].buf)
++ goto cache_item_fail;
++ memset(Cache.array[i].buf, 0, Cache.cache_item_size);
++ total_bytes += Cache.cache_item_size;
++ }
++
++ /* Malloc memory for IPF */
++ g_pIPF = kmalloc(page_size, GFP_ATOMIC);
++ if (!g_pIPF)
++ goto ipf_fail;
++ memset(g_pIPF, 0, page_size);
++ total_bytes += page_size;
++
++ /* Malloc memory for data merging during Level2 Cache flush */
++ cache_l2_page_buf = kmalloc(page_size, GFP_ATOMIC);
++ if (!cache_l2_page_buf)
++ goto cache_l2_page_buf_fail;
++ memset(cache_l2_page_buf, 0xff, page_size);
++ total_bytes += page_size;
++
++ cache_l2_blk_buf = kmalloc(block_size, GFP_ATOMIC);
++ if (!cache_l2_blk_buf)
++ goto cache_l2_blk_buf_fail;
++ memset(cache_l2_blk_buf, 0xff, block_size);
++ total_bytes += block_size;
++
++ /* Malloc memory for temp buffer */
++ g_pTempBuf = kmalloc(Cache.cache_item_size, GFP_ATOMIC);
++ if (!g_pTempBuf)
++ goto Temp_buf_fail;
++ memset(g_pTempBuf, 0, Cache.cache_item_size);
++ total_bytes += Cache.cache_item_size;
++
++ /* Malloc memory for block table blocks */
++ mem_size = (1 + LAST_BT_ID - FIRST_BT_ID) * sizeof(u32);
++ g_pBTBlocks = kmalloc(mem_size, GFP_ATOMIC);
++ if (!g_pBTBlocks)
++ goto bt_blocks_fail;
++ memset(g_pBTBlocks, 0xff, mem_size);
++ total_bytes += mem_size;
++
++ /* Malloc memory for function FTL_Check_Block_Table */
++ flag_check_blk_table = kmalloc(DeviceInfo.wDataBlockNum, GFP_ATOMIC);
++ if (!flag_check_blk_table)
++ goto flag_check_blk_table_fail;
++ total_bytes += DeviceInfo.wDataBlockNum;
++
++ /* Malloc memory for function FTL_Search_Block_Table_IN_Block */
++ tmp_buf_search_bt_in_block = kmalloc(page_size, GFP_ATOMIC);
++ if (!tmp_buf_search_bt_in_block)
++ goto tmp_buf_search_bt_in_block_fail;
++ memset(tmp_buf_search_bt_in_block, 0xff, page_size);
++ total_bytes += page_size;
++
++ mem_size = DeviceInfo.wPageSize - DeviceInfo.wPageDataSize;
++ spare_buf_search_bt_in_block = kmalloc(mem_size, GFP_ATOMIC);
++ if (!spare_buf_search_bt_in_block)
++ goto spare_buf_search_bt_in_block_fail;
++ memset(spare_buf_search_bt_in_block, 0xff, mem_size);
++ total_bytes += mem_size;
++
++ spare_buf_bt_search_bt_in_block = kmalloc(mem_size, GFP_ATOMIC);
++ if (!spare_buf_bt_search_bt_in_block)
++ goto spare_buf_bt_search_bt_in_block_fail;
++ memset(spare_buf_bt_search_bt_in_block, 0xff, mem_size);
++ total_bytes += mem_size;
++
++ /* Malloc memory for function FTL_Read_Block_Table */
++ tmp_buf1_read_blk_table = kmalloc(page_size, GFP_ATOMIC);
++ if (!tmp_buf1_read_blk_table)
++ goto tmp_buf1_read_blk_table_fail;
++ memset(tmp_buf1_read_blk_table, 0xff, page_size);
++ total_bytes += page_size;
++
++ tmp_buf2_read_blk_table = kmalloc(page_size, GFP_ATOMIC);
++ if (!tmp_buf2_read_blk_table)
++ goto tmp_buf2_read_blk_table_fail;
++ memset(tmp_buf2_read_blk_table, 0xff, page_size);
++ total_bytes += page_size;
++
++ /* Malloc memory for function FTL_Static_Wear_Leveling */
++ flags_static_wear_leveling = kmalloc(DeviceInfo.wDataBlockNum,
++ GFP_ATOMIC);
++ if (!flags_static_wear_leveling)
++ goto flags_static_wear_leveling_fail;
++ total_bytes += DeviceInfo.wDataBlockNum;
++
++ /* Malloc memory for function FTL_Write_Block_Table_Data */
++ if (FTL_Get_Block_Table_Flash_Size_Pages() > 3)
++ mem_size = FTL_Get_Block_Table_Flash_Size_Bytes() -
++ 2 * DeviceInfo.wPageSize;
++ else
++ mem_size = DeviceInfo.wPageSize;
++ tmp_buf_write_blk_table_data = kmalloc(mem_size, GFP_ATOMIC);
++ if (!tmp_buf_write_blk_table_data)
++ goto tmp_buf_write_blk_table_data_fail;
++ memset(tmp_buf_write_blk_table_data, 0xff, mem_size);
++ total_bytes += mem_size;
++
++ /* Malloc memory for function FTL_Read_Disturbance */
++ tmp_buf_read_disturbance = kmalloc(block_size, GFP_ATOMIC);
++ if (!tmp_buf_read_disturbance)
++ goto tmp_buf_read_disturbance_fail;
++ memset(tmp_buf_read_disturbance, 0xff, block_size);
++ total_bytes += block_size;
++
++ /* Alloc mem for function NAND_Read_Page_Main_Spare of lld_nand.c */
++ buf_read_page_main_spare = kmalloc(DeviceInfo.wPageSize, GFP_ATOMIC);
++ if (!buf_read_page_main_spare)
++ goto buf_read_page_main_spare_fail;
++ total_bytes += DeviceInfo.wPageSize;
++
++ /* Alloc mem for function NAND_Write_Page_Main_Spare of lld_nand.c */
++ buf_write_page_main_spare = kmalloc(DeviceInfo.wPageSize, GFP_ATOMIC);
++ if (!buf_write_page_main_spare)
++ goto buf_write_page_main_spare_fail;
++ total_bytes += DeviceInfo.wPageSize;
++
++ /* Alloc mem for function NAND_Read_Page_Spare of lld_nand.c */
++ buf_read_page_spare = kmalloc(DeviceInfo.wPageSpareSize, GFP_ATOMIC);
++ if (!buf_read_page_spare)
++ goto buf_read_page_spare_fail;
++ memset(buf_read_page_spare, 0xff, DeviceInfo.wPageSpareSize);
++ total_bytes += DeviceInfo.wPageSpareSize;
++
++ /* Alloc mem for function NAND_Get_Bad_Block of lld_nand.c */
++ buf_get_bad_block = kmalloc(DeviceInfo.wPageSpareSize, GFP_ATOMIC);
++ if (!buf_get_bad_block)
++ goto buf_get_bad_block_fail;
++ memset(buf_get_bad_block, 0xff, DeviceInfo.wPageSpareSize);
++ total_bytes += DeviceInfo.wPageSpareSize;
++
++#if CMD_DMA
++ g_temp_buf = kmalloc(block_size, GFP_ATOMIC);
++ if (!g_temp_buf)
++ goto temp_buf_fail;
++ memset(g_temp_buf, 0xff, block_size);
++ total_bytes += block_size;
++
++ /* Malloc memory for copy of block table used in CDMA mode */
++ g_pBTStartingCopy = kmalloc(block_table_size, GFP_ATOMIC);
++ if (!g_pBTStartingCopy)
++ goto bt_starting_copy;
++ memset(g_pBTStartingCopy, 0, block_table_size);
++ total_bytes += block_table_size;
++
++ g_pWearCounterCopy = (u8 *)(g_pBTStartingCopy +
++ DeviceInfo.wDataBlockNum * sizeof(u32));
++
++ if (DeviceInfo.MLCDevice)
++ g_pReadCounterCopy = (u16 *)(g_pBTStartingCopy +
++ DeviceInfo.wDataBlockNum *
++ (sizeof(u32) + sizeof(u8)));
++
++ /* Malloc memory for block table copies */
++ mem_size = 5 * DeviceInfo.wDataBlockNum * sizeof(u32) +
++ 5 * DeviceInfo.wDataBlockNum * sizeof(u8);
++ if (DeviceInfo.MLCDevice)
++ mem_size += 5 * DeviceInfo.wDataBlockNum * sizeof(u16);
++ g_pBlockTableCopies = kmalloc(mem_size, GFP_ATOMIC);
++ if (!g_pBlockTableCopies)
++ goto blk_table_copies_fail;
++ memset(g_pBlockTableCopies, 0, mem_size);
++ total_bytes += mem_size;
++ g_pNextBlockTable = g_pBlockTableCopies;
++
++ /* Malloc memory for Block Table Delta */
++ mem_size = MAX_DESCS * sizeof(struct BTableChangesDelta);
++ g_pBTDelta = kmalloc(mem_size, GFP_ATOMIC);
++ if (!g_pBTDelta)
++ goto bt_delta_fail;
++ memset(g_pBTDelta, 0, mem_size);
++ total_bytes += mem_size;
++ g_pBTDelta_Free = g_pBTDelta;
++
++ /* Malloc memory for Copy Back Buffers */
++ for (j = 0; j < COPY_BACK_BUF_NUM; j++) {
++ cp_back_buf_copies[j] = kmalloc(block_size, GFP_ATOMIC);
++ if (!cp_back_buf_copies[j])
++ goto cp_back_buf_copies_fail;
++ memset(cp_back_buf_copies[j], 0, block_size);
++ total_bytes += block_size;
++ }
++ cp_back_buf_idx = 0;
++
++ /* Malloc memory for pending commands list */
++ mem_size = sizeof(struct pending_cmd) * MAX_DESCS;
++ info.pcmds = kzalloc(mem_size, GFP_KERNEL);
++ if (!info.pcmds)
++ goto pending_cmds_buf_fail;
++ total_bytes += mem_size;
++
++ /* Malloc memory for CDMA descripter table */
++ mem_size = sizeof(struct cdma_descriptor) * MAX_DESCS;
++ info.cdma_desc_buf = kzalloc(mem_size, GFP_KERNEL);
++ if (!info.cdma_desc_buf)
++ goto cdma_desc_buf_fail;
++ total_bytes += mem_size;
++
++ /* Malloc memory for Memcpy descripter table */
++ mem_size = sizeof(struct memcpy_descriptor) * MAX_DESCS;
++ info.memcp_desc_buf = kzalloc(mem_size, GFP_KERNEL);
++ if (!info.memcp_desc_buf)
++ goto memcp_desc_buf_fail;
++ total_bytes += mem_size;
++#endif
++
++ nand_dbg_print(NAND_DBG_WARN,
++ "Total memory allocated in FTL layer: %d\n", total_bytes);
++
++ return PASS;
++
++#if CMD_DMA
++memcp_desc_buf_fail:
++ kfree(info.cdma_desc_buf);
++cdma_desc_buf_fail:
++ kfree(info.pcmds);
++pending_cmds_buf_fail:
++cp_back_buf_copies_fail:
++ j--;
++ for (; j >= 0; j--)
++ kfree(cp_back_buf_copies[j]);
++ kfree(g_pBTDelta);
++bt_delta_fail:
++ kfree(g_pBlockTableCopies);
++blk_table_copies_fail:
++ kfree(g_pBTStartingCopy);
++bt_starting_copy:
++ kfree(g_temp_buf);
++temp_buf_fail:
++ kfree(buf_get_bad_block);
++#endif
++
++buf_get_bad_block_fail:
++ kfree(buf_read_page_spare);
++buf_read_page_spare_fail:
++ kfree(buf_write_page_main_spare);
++buf_write_page_main_spare_fail:
++ kfree(buf_read_page_main_spare);
++buf_read_page_main_spare_fail:
++ kfree(tmp_buf_read_disturbance);
++tmp_buf_read_disturbance_fail:
++ kfree(tmp_buf_write_blk_table_data);
++tmp_buf_write_blk_table_data_fail:
++ kfree(flags_static_wear_leveling);
++flags_static_wear_leveling_fail:
++ kfree(tmp_buf2_read_blk_table);
++tmp_buf2_read_blk_table_fail:
++ kfree(tmp_buf1_read_blk_table);
++tmp_buf1_read_blk_table_fail:
++ kfree(spare_buf_bt_search_bt_in_block);
++spare_buf_bt_search_bt_in_block_fail:
++ kfree(spare_buf_search_bt_in_block);
++spare_buf_search_bt_in_block_fail:
++ kfree(tmp_buf_search_bt_in_block);
++tmp_buf_search_bt_in_block_fail:
++ kfree(flag_check_blk_table);
++flag_check_blk_table_fail:
++ kfree(g_pBTBlocks);
++bt_blocks_fail:
++ kfree(g_pTempBuf);
++Temp_buf_fail:
++ kfree(cache_l2_blk_buf);
++cache_l2_blk_buf_fail:
++ kfree(cache_l2_page_buf);
++cache_l2_page_buf_fail:
++ kfree(g_pIPF);
++ipf_fail:
++cache_item_fail:
++ i--;
++ for (; i >= 0; i--)
++ kfree(Cache.array[i].buf);
++ kfree(g_pBlockTable);
++block_table_fail:
++ printk(KERN_ERR "Failed to kmalloc memory in %s Line %d.\n",
++ __FILE__, __LINE__);
++
++ return -ENOMEM;
++}
++
++/* .... */
++static int free_memory(void)
++{
++ int i;
++
++#if CMD_DMA
++ kfree(info.memcp_desc_buf);
++ kfree(info.cdma_desc_buf);
++ kfree(info.pcmds);
++ for (i = COPY_BACK_BUF_NUM - 1; i >= 0; i--)
++ kfree(cp_back_buf_copies[i]);
++ kfree(g_pBTDelta);
++ kfree(g_pBlockTableCopies);
++ kfree(g_pBTStartingCopy);
++ kfree(g_temp_buf);
++ kfree(buf_get_bad_block);
++#endif
++ kfree(buf_read_page_spare);
++ kfree(buf_write_page_main_spare);
++ kfree(buf_read_page_main_spare);
++ kfree(tmp_buf_read_disturbance);
++ kfree(tmp_buf_write_blk_table_data);
++ kfree(flags_static_wear_leveling);
++ kfree(tmp_buf2_read_blk_table);
++ kfree(tmp_buf1_read_blk_table);
++ kfree(spare_buf_bt_search_bt_in_block);
++ kfree(spare_buf_search_bt_in_block);
++ kfree(tmp_buf_search_bt_in_block);
++ kfree(flag_check_blk_table);
++ kfree(g_pBTBlocks);
++ kfree(g_pTempBuf);
++ kfree(g_pIPF);
++ for (i = CACHE_ITEM_NUM - 1; i >= 0; i--)
++ kfree(Cache.array[i].buf);
++ kfree(g_pBlockTable);
++
++ return 0;
++}
++
++static void dump_cache_l2_table(void)
++{
++ struct list_head *p;
++ struct spectra_l2_cache_list *pnd;
++ int n, i;
++
++ n = 0;
++ list_for_each(p, &cache_l2.table.list) {
++ pnd = list_entry(p, struct spectra_l2_cache_list, list);
++ nand_dbg_print(NAND_DBG_WARN, "dump_cache_l2_table node: %d, logical_blk_num: %d\n", n, pnd->logical_blk_num);
++/*
++ for (i = 0; i < DeviceInfo.wPagesPerBlock; i++) {
++ if (pnd->pages_array[i] != MAX_U32_VALUE)
++ nand_dbg_print(NAND_DBG_WARN, " pages_array[%d]: 0x%x\n", i, pnd->pages_array[i]);
++ }
++*/
++ n++;
++ }
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: GLOB_FTL_Init
++* Inputs: none
++* Outputs: PASS=0 / FAIL=1
++* Description: allocates the memory for cache array,
++* important data structures
++* clears the cache array
++* reads the block table from flash into array
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++int GLOB_FTL_Init(void)
++{
++ int i;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ Cache.pages_per_item = 1;
++ Cache.cache_item_size = 1 * DeviceInfo.wPageDataSize;
++
++ if (allocate_memory() != PASS)
++ return FAIL;
++
++#if CMD_DMA
++#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
++ memcpy((void *)&cache_start_copy, (void *)&Cache,
++ sizeof(struct flash_cache_tag));
++ memset((void *)&int_cache, -1,
++ sizeof(struct flash_cache_delta_list_tag) *
++ (MAX_CHANS + MAX_DESCS));
++#endif
++ ftl_cmd_cnt = 0;
++#endif
++
++ if (FTL_Read_Block_Table() != PASS)
++ return FAIL;
++
++ /* Init the Level2 Cache data structure */
++ for (i = 0; i < BLK_NUM_FOR_L2_CACHE; i++)
++ cache_l2.blk_array[i] = MAX_U32_VALUE;
++ cache_l2.cur_blk_idx = 0;
++ cache_l2.cur_page_num = 0;
++ INIT_LIST_HEAD(&cache_l2.table.list);
++ cache_l2.table.logical_blk_num = MAX_U32_VALUE;
++
++ dump_cache_l2_table();
++
++ return 0;
++}
++
++
++#if CMD_DMA
++#if 0
++static void save_blk_table_changes(u16 idx)
++{
++ u8 ftl_cmd;
++ u32 *pbt = (u32 *)g_pBTStartingCopy;
++
++#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
++ u16 id;
++ u8 cache_blks;
++
++ id = idx - MAX_CHANS;
++ if (int_cache[id].item != -1) {
++ cache_blks = int_cache[id].item;
++ cache_start_copy.array[cache_blks].address =
++ int_cache[id].cache.address;
++ cache_start_copy.array[cache_blks].changed =
++ int_cache[id].cache.changed;
++ }
++#endif
++
++ ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
++
++ while (ftl_cmd <= PendingCMD[idx].Tag) {
++ if (p_BTableChangesDelta->ValidFields == 0x01) {
++ g_wBlockTableOffset =
++ p_BTableChangesDelta->g_wBlockTableOffset;
++ } else if (p_BTableChangesDelta->ValidFields == 0x0C) {
++ pbt[p_BTableChangesDelta->BT_Index] =
++ p_BTableChangesDelta->BT_Entry_Value;
++ debug_boundary_error(((
++ p_BTableChangesDelta->BT_Index)),
++ DeviceInfo.wDataBlockNum, 0);
++ } else if (p_BTableChangesDelta->ValidFields == 0x03) {
++ g_wBlockTableOffset =
++ p_BTableChangesDelta->g_wBlockTableOffset;
++ g_wBlockTableIndex =
++ p_BTableChangesDelta->g_wBlockTableIndex;
++ } else if (p_BTableChangesDelta->ValidFields == 0x30) {
++ g_pWearCounterCopy[p_BTableChangesDelta->WC_Index] =
++ p_BTableChangesDelta->WC_Entry_Value;
++ } else if ((DeviceInfo.MLCDevice) &&
++ (p_BTableChangesDelta->ValidFields == 0xC0)) {
++ g_pReadCounterCopy[p_BTableChangesDelta->RC_Index] =
++ p_BTableChangesDelta->RC_Entry_Value;
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "In event status setting read counter "
++ "GLOB_ftl_cmd_cnt %u Count %u Index %u\n",
++ ftl_cmd,
++ p_BTableChangesDelta->RC_Entry_Value,
++ (unsigned int)p_BTableChangesDelta->RC_Index);
++ } else {
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "This should never occur \n");
++ }
++ p_BTableChangesDelta += 1;
++ ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
++ }
++}
++
++static void discard_cmds(u16 n)
++{
++ u32 *pbt = (u32 *)g_pBTStartingCopy;
++ u8 ftl_cmd;
++ unsigned long k;
++#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
++ u8 cache_blks;
++ u16 id;
++#endif
++
++ if ((PendingCMD[n].CMD == WRITE_MAIN_CMD) ||
++ (PendingCMD[n].CMD == WRITE_MAIN_SPARE_CMD)) {
++ for (k = 0; k < DeviceInfo.wDataBlockNum; k++) {
++ if (PendingCMD[n].Block == (pbt[k] & (~BAD_BLOCK)))
++ MARK_BLK_AS_DISCARD(pbt[k]);
++ }
++ }
++
++ ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
++ while (ftl_cmd <= PendingCMD[n].Tag) {
++ p_BTableChangesDelta += 1;
++ ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
++ }
++
++#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
++ id = n - MAX_CHANS;
++
++ if (int_cache[id].item != -1) {
++ cache_blks = int_cache[id].item;
++ if (PendingCMD[n].CMD == MEMCOPY_CMD) {
++ if ((cache_start_copy.array[cache_blks].buf <=
++ PendingCMD[n].DataDestAddr) &&
++ ((cache_start_copy.array[cache_blks].buf +
++ Cache.cache_item_size) >
++ PendingCMD[n].DataDestAddr)) {
++ cache_start_copy.array[cache_blks].address =
++ NAND_CACHE_INIT_ADDR;
++ cache_start_copy.array[cache_blks].use_cnt =
++ 0;
++ cache_start_copy.array[cache_blks].changed =
++ CLEAR;
++ }
++ } else {
++ cache_start_copy.array[cache_blks].address =
++ int_cache[id].cache.address;
++ cache_start_copy.array[cache_blks].changed =
++ int_cache[id].cache.changed;
++ }
++ }
++#endif
++}
++
++static void process_cmd_pass(int *first_failed_cmd, u16 idx)
++{
++ if (0 == *first_failed_cmd)
++ save_blk_table_changes(idx);
++ else
++ discard_cmds(idx);
++}
++
++static void process_cmd_fail_abort(int *first_failed_cmd,
++ u16 idx, int event)
++{
++ u32 *pbt = (u32 *)g_pBTStartingCopy;
++ u8 ftl_cmd;
++ unsigned long i;
++ int erase_fail, program_fail;
++#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
++ u8 cache_blks;
++ u16 id;
++#endif
++
++ if (0 == *first_failed_cmd)
++ *first_failed_cmd = PendingCMD[idx].SBDCmdIndex;
++
++ nand_dbg_print(NAND_DBG_DEBUG, "Uncorrectable error has occured "
++ "while executing %u Command %u accesing Block %u\n",
++ (unsigned int)p_BTableChangesDelta->ftl_cmd_cnt,
++ PendingCMD[idx].CMD,
++ (unsigned int)PendingCMD[idx].Block);
++
++ ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
++ while (ftl_cmd <= PendingCMD[idx].Tag) {
++ p_BTableChangesDelta += 1;
++ ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
++ }
++
++#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
++ id = idx - MAX_CHANS;
++
++ if (int_cache[id].item != -1) {
++ cache_blks = int_cache[id].item;
++ if ((PendingCMD[idx].CMD == WRITE_MAIN_CMD)) {
++ cache_start_copy.array[cache_blks].address =
++ int_cache[id].cache.address;
++ cache_start_copy.array[cache_blks].changed = SET;
++ } else if ((PendingCMD[idx].CMD == READ_MAIN_CMD)) {
++ cache_start_copy.array[cache_blks].address =
++ NAND_CACHE_INIT_ADDR;
++ cache_start_copy.array[cache_blks].use_cnt = 0;
++ cache_start_copy.array[cache_blks].changed =
++ CLEAR;
++ } else if (PendingCMD[idx].CMD == ERASE_CMD) {
++ /* ? */
++ } else if (PendingCMD[idx].CMD == MEMCOPY_CMD) {
++ /* ? */
++ }
++ }
++#endif
++
++ erase_fail = (event == EVENT_ERASE_FAILURE) &&
++ (PendingCMD[idx].CMD == ERASE_CMD);
++
++ program_fail = (event == EVENT_PROGRAM_FAILURE) &&
++ ((PendingCMD[idx].CMD == WRITE_MAIN_CMD) ||
++ (PendingCMD[idx].CMD == WRITE_MAIN_SPARE_CMD));
++
++ if (erase_fail || program_fail) {
++ for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
++ if (PendingCMD[idx].Block ==
++ (pbt[i] & (~BAD_BLOCK)))
++ MARK_BLOCK_AS_BAD(pbt[i]);
++ }
++ }
++}
++
++static void process_cmd(int *first_failed_cmd, u16 idx, int event)
++{
++ u8 ftl_cmd;
++ int cmd_match = 0;
++
++ if (p_BTableChangesDelta->ftl_cmd_cnt == PendingCMD[idx].Tag)
++ cmd_match = 1;
++
++ if (PendingCMD[idx].Status == CMD_PASS) {
++ process_cmd_pass(first_failed_cmd, idx);
++ } else if ((PendingCMD[idx].Status == CMD_FAIL) ||
++ (PendingCMD[idx].Status == CMD_ABORT)) {
++ process_cmd_fail_abort(first_failed_cmd, idx, event);
++ } else if ((PendingCMD[idx].Status == CMD_NOT_DONE) &&
++ PendingCMD[idx].Tag) {
++ nand_dbg_print(NAND_DBG_DEBUG,
++ " Command no. %hu is not executed\n",
++ (unsigned int)PendingCMD[idx].Tag);
++ ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
++ while (ftl_cmd <= PendingCMD[idx].Tag) {
++ p_BTableChangesDelta += 1;
++ ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
++ }
++ }
++}
++#endif
++
++static void process_cmd(int *first_failed_cmd, u16 idx, int event)
++{
++ printk(KERN_ERR "temporary workaround function. "
++ "Should not be called! \n");
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: GLOB_FTL_Event_Status
++* Inputs: none
++* Outputs: Event Code
++* Description: It is called by SBD after hardware interrupt signalling
++* completion of commands chain
++* It does following things
++* get event status from LLD
++* analyze command chain status
++* determine last command executed
++* analyze results
++* rebuild the block table in case of uncorrectable error
++* return event code
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++int GLOB_FTL_Event_Status(int *first_failed_cmd)
++{
++ int event_code = PASS;
++ u16 i_P;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ *first_failed_cmd = 0;
++
++ event_code = GLOB_LLD_Event_Status();
++
++ switch (event_code) {
++ case EVENT_PASS:
++ nand_dbg_print(NAND_DBG_DEBUG, "Handling EVENT_PASS\n");
++ break;
++ case EVENT_UNCORRECTABLE_DATA_ERROR:
++ nand_dbg_print(NAND_DBG_DEBUG, "Handling Uncorrectable ECC!\n");
++ break;
++ case EVENT_PROGRAM_FAILURE:
++ case EVENT_ERASE_FAILURE:
++ nand_dbg_print(NAND_DBG_WARN, "Handling Ugly case. "
++ "Event code: 0x%x\n", event_code);
++ p_BTableChangesDelta =
++ (struct BTableChangesDelta *)g_pBTDelta;
++ for (i_P = MAX_CHANS; i_P < (ftl_cmd_cnt + MAX_CHANS);
++ i_P++)
++ process_cmd(first_failed_cmd, i_P, event_code);
++ memcpy(g_pBlockTable, g_pBTStartingCopy,
++ DeviceInfo.wDataBlockNum * sizeof(u32));
++ memcpy(g_pWearCounter, g_pWearCounterCopy,
++ DeviceInfo.wDataBlockNum * sizeof(u8));
++ if (DeviceInfo.MLCDevice)
++ memcpy(g_pReadCounter, g_pReadCounterCopy,
++ DeviceInfo.wDataBlockNum * sizeof(u16));
++
++#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
++ memcpy((void *)&Cache, (void *)&cache_start_copy,
++ sizeof(struct flash_cache_tag));
++ memset((void *)&int_cache, -1,
++ sizeof(struct flash_cache_delta_list_tag) *
++ (MAX_DESCS + MAX_CHANS));
++#endif
++ break;
++ default:
++ nand_dbg_print(NAND_DBG_WARN,
++ "Handling unexpected event code - 0x%x\n",
++ event_code);
++ event_code = ERR;
++ break;
++ }
++
++ memcpy(g_pBTStartingCopy, g_pBlockTable,
++ DeviceInfo.wDataBlockNum * sizeof(u32));
++ memcpy(g_pWearCounterCopy, g_pWearCounter,
++ DeviceInfo.wDataBlockNum * sizeof(u8));
++ if (DeviceInfo.MLCDevice)
++ memcpy(g_pReadCounterCopy, g_pReadCounter,
++ DeviceInfo.wDataBlockNum * sizeof(u16));
++
++ g_pBTDelta_Free = g_pBTDelta;
++ ftl_cmd_cnt = 0;
++ g_pNextBlockTable = g_pBlockTableCopies;
++ cp_back_buf_idx = 0;
++
++#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
++ memcpy((void *)&cache_start_copy, (void *)&Cache,
++ sizeof(struct flash_cache_tag));
++ memset((void *)&int_cache, -1,
++ sizeof(struct flash_cache_delta_list_tag) *
++ (MAX_DESCS + MAX_CHANS));
++#endif
++
++ return event_code;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: glob_ftl_execute_cmds
++* Inputs: none
++* Outputs: none
++* Description: pass thru to LLD
++***************************************************************/
++u16 glob_ftl_execute_cmds(void)
++{
++ nand_dbg_print(NAND_DBG_TRACE,
++ "glob_ftl_execute_cmds: ftl_cmd_cnt %u\n",
++ (unsigned int)ftl_cmd_cnt);
++ g_SBDCmdIndex = 0;
++ return glob_lld_execute_cmds();
++}
++
++#endif
++
++#if !CMD_DMA
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: GLOB_FTL_Read Immediate
++* Inputs: pointer to data
++* address of data
++* Outputs: PASS / FAIL
++* Description: Reads one page of data into RAM directly from flash without
++* using or disturbing cache.It is assumed this function is called
++* with CMD-DMA disabled.
++*****************************************************************/
++int GLOB_FTL_Read_Immediate(u8 *read_data, u64 addr)
++{
++ int wResult = FAIL;
++ u32 Block;
++ u16 Page;
++ u32 phy_blk;
++ u32 *pbt = (u32 *)g_pBlockTable;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ Block = BLK_FROM_ADDR(addr);
++ Page = PAGE_FROM_ADDR(addr, Block);
++
++ if (!IS_SPARE_BLOCK(Block))
++ return FAIL;
++
++ phy_blk = pbt[Block];
++ wResult = GLOB_LLD_Read_Page_Main(read_data, phy_blk, Page, 1);
++
++ if (DeviceInfo.MLCDevice) {
++ g_pReadCounter[phy_blk - DeviceInfo.wSpectraStartBlock]++;
++ if (g_pReadCounter[phy_blk - DeviceInfo.wSpectraStartBlock]
++ >= MAX_READ_COUNTER)
++ FTL_Read_Disturbance(phy_blk);
++ if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
++ g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
++ FTL_Write_IN_Progress_Block_Table_Page();
++ }
++ }
++
++ return wResult;
++}
++#endif
++
++#ifdef SUPPORT_BIG_ENDIAN
++/*********************************************************************
++* Function: FTL_Invert_Block_Table
++* Inputs: none
++* Outputs: none
++* Description: Re-format the block table in ram based on BIG_ENDIAN and
++* LARGE_BLOCKNUM if necessary
++**********************************************************************/
++static void FTL_Invert_Block_Table(void)
++{
++ u32 i;
++ u32 *pbt = (u32 *)g_pBlockTable;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++#ifdef SUPPORT_LARGE_BLOCKNUM
++ for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
++ pbt[i] = INVERTUINT32(pbt[i]);
++ g_pWearCounter[i] = INVERTUINT32(g_pWearCounter[i]);
++ }
++#else
++ for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
++ pbt[i] = INVERTUINT16(pbt[i]);
++ g_pWearCounter[i] = INVERTUINT16(g_pWearCounter[i]);
++ }
++#endif
++}
++#endif
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: GLOB_FTL_Flash_Init
++* Inputs: none
++* Outputs: PASS=0 / FAIL=0x01 (based on read ID)
++* Description: The flash controller is initialized
++* The flash device is reset
++* Perform a flash READ ID command to confirm that a
++* valid device is attached and active.
++* The DeviceInfo structure gets filled in
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++int GLOB_FTL_Flash_Init(void)
++{
++ int status = FAIL;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ g_SBDCmdIndex = 0;
++
++ status = GLOB_LLD_Flash_Init();
++
++ return status;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Inputs: none
++* Outputs: PASS=0 / FAIL=0x01 (based on read ID)
++* Description: The flash controller is released
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++int GLOB_FTL_Flash_Release(void)
++{
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ return GLOB_LLD_Flash_Release();
++}
++
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: GLOB_FTL_Cache_Release
++* Inputs: none
++* Outputs: none
++* Description: release all allocated memory in GLOB_FTL_Init
++* (allocated in GLOB_FTL_Init)
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++void GLOB_FTL_Cache_Release(void)
++{
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ free_memory();
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Cache_If_Hit
++* Inputs: Page Address
++* Outputs: Block number/UNHIT BLOCK
++* Description: Determines if the addressed page is in cache
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++static u16 FTL_Cache_If_Hit(u64 page_addr)
++{
++ u16 item;
++ u64 addr;
++ int i;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ item = UNHIT_CACHE_ITEM;
++ for (i = 0; i < CACHE_ITEM_NUM; i++) {
++ addr = Cache.array[i].address;
++ if ((page_addr >= addr) &&
++ (page_addr < (addr + Cache.cache_item_size))) {
++ item = i;
++ break;
++ }
++ }
++
++ return item;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Calculate_LRU
++* Inputs: None
++* Outputs: None
++* Description: Calculate the least recently block in a cache and record its
++* index in LRU field.
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++static void FTL_Calculate_LRU(void)
++{
++ u16 i, bCurrentLRU, bTempCount;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ bCurrentLRU = 0;
++ bTempCount = MAX_WORD_VALUE;
++
++ for (i = 0; i < CACHE_ITEM_NUM; i++) {
++ if (Cache.array[i].use_cnt < bTempCount) {
++ bCurrentLRU = i;
++ bTempCount = Cache.array[i].use_cnt;
++ }
++ }
++
++ Cache.LRU = bCurrentLRU;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Cache_Read_Page
++* Inputs: pointer to read buffer, logical address and cache item number
++* Outputs: None
++* Description: Read the page from the cached block addressed by blocknumber
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++static void FTL_Cache_Read_Page(u8 *data_buf, u64 logic_addr, u16 cache_item)
++{
++ u8 *start_addr;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ start_addr = Cache.array[cache_item].buf;
++ start_addr += (u32)(((logic_addr - Cache.array[cache_item].address) >>
++ DeviceInfo.nBitsInPageDataSize) * DeviceInfo.wPageDataSize);
++
++#if CMD_DMA
++ GLOB_LLD_MemCopy_CMD(data_buf, start_addr,
++ DeviceInfo.wPageDataSize, 0);
++ ftl_cmd_cnt++;
++#else
++ memcpy(data_buf, start_addr, DeviceInfo.wPageDataSize);
++#endif
++
++ if (Cache.array[cache_item].use_cnt < MAX_WORD_VALUE)
++ Cache.array[cache_item].use_cnt++;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Cache_Read_All
++* Inputs: pointer to read buffer,block address
++* Outputs: PASS=0 / FAIL =1
++* Description: It reads pages in cache
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++static int FTL_Cache_Read_All(u8 *pData, u64 phy_addr)
++{
++ int wResult = PASS;
++ u32 Block;
++ u32 lba;
++ u16 Page;
++ u16 PageCount;
++ u32 *pbt = (u32 *)g_pBlockTable;
++ u32 i;
++
++ Block = BLK_FROM_ADDR(phy_addr);
++ Page = PAGE_FROM_ADDR(phy_addr, Block);
++ PageCount = Cache.pages_per_item;
++
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "%s, Line %d, Function: %s, Block: 0x%x\n",
++ __FILE__, __LINE__, __func__, Block);
++
++ lba = 0xffffffff;
++ for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
++ if ((pbt[i] & (~BAD_BLOCK)) == Block) {
++ lba = i;
++ if (IS_SPARE_BLOCK(i) || IS_BAD_BLOCK(i) ||
++ IS_DISCARDED_BLOCK(i)) {
++ /* Add by yunpeng -2008.12.3 */
++#if CMD_DMA
++ GLOB_LLD_MemCopy_CMD(pData, g_temp_buf,
++ PageCount * DeviceInfo.wPageDataSize, 0);
++ ftl_cmd_cnt++;
++#else
++ memset(pData, 0xFF,
++ PageCount * DeviceInfo.wPageDataSize);
++#endif
++ return wResult;
++ } else {
++ continue; /* break ?? */
++ }
++ }
++ }
++
++ if (0xffffffff == lba)
++ printk(KERN_ERR "FTL_Cache_Read_All: Block is not found in BT\n");
++
++#if CMD_DMA
++ wResult = GLOB_LLD_Read_Page_Main_cdma(pData, Block, Page,
++ PageCount, LLD_CMD_FLAG_MODE_CDMA);
++ if (DeviceInfo.MLCDevice) {
++ g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock]++;
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Read Counter modified in ftl_cmd_cnt %u"
++ " Block %u Counter%u\n",
++ ftl_cmd_cnt, (unsigned int)Block,
++ g_pReadCounter[Block -
++ DeviceInfo.wSpectraStartBlock]);
++
++ p_BTableChangesDelta =
++ (struct BTableChangesDelta *)g_pBTDelta_Free;
++ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
++ p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
++ p_BTableChangesDelta->RC_Index =
++ Block - DeviceInfo.wSpectraStartBlock;
++ p_BTableChangesDelta->RC_Entry_Value =
++ g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock];
++ p_BTableChangesDelta->ValidFields = 0xC0;
++
++ ftl_cmd_cnt++;
++
++ if (g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock] >=
++ MAX_READ_COUNTER)
++ FTL_Read_Disturbance(Block);
++ if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
++ g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
++ FTL_Write_IN_Progress_Block_Table_Page();
++ }
++ } else {
++ ftl_cmd_cnt++;
++ }
++#else
++ wResult = GLOB_LLD_Read_Page_Main(pData, Block, Page, PageCount);
++ if (wResult == FAIL)
++ return wResult;
++
++ if (DeviceInfo.MLCDevice) {
++ g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock]++;
++ if (g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock] >=
++ MAX_READ_COUNTER)
++ FTL_Read_Disturbance(Block);
++ if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
++ g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
++ FTL_Write_IN_Progress_Block_Table_Page();
++ }
++ }
++#endif
++ return wResult;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Cache_Write_All
++* Inputs: pointer to cache in sys memory
++* address of free block in flash
++* Outputs: PASS=0 / FAIL=1
++* Description: writes all the pages of the block in cache to flash
++*
++* NOTE:need to make sure this works ok when cache is limited
++* to a partial block. This is where copy-back would be
++* activated. This would require knowing which pages in the
++* cached block are clean/dirty.Right now we only know if
++* the whole block is clean/dirty.
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++static int FTL_Cache_Write_All(u8 *pData, u64 blk_addr)
++{
++ u16 wResult = PASS;
++ u32 Block;
++ u16 Page;
++ u16 PageCount;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ nand_dbg_print(NAND_DBG_DEBUG, "This block %d going to be written "
++ "on %d\n", cache_block_to_write,
++ (u32)(blk_addr >> DeviceInfo.nBitsInBlockDataSize));
++
++ Block = BLK_FROM_ADDR(blk_addr);
++ Page = PAGE_FROM_ADDR(blk_addr, Block);
++ PageCount = Cache.pages_per_item;
++
++#if CMD_DMA
++ if (FAIL == GLOB_LLD_Write_Page_Main_cdma(pData,
++ Block, Page, PageCount)) {
++ nand_dbg_print(NAND_DBG_WARN,
++ "NAND Program fail in %s, Line %d, "
++ "Function: %s, new Bad Block %d generated! "
++ "Need Bad Block replacing.\n",
++ __FILE__, __LINE__, __func__, Block);
++ wResult = FAIL;
++ }
++ ftl_cmd_cnt++;
++#else
++ if (FAIL == GLOB_LLD_Write_Page_Main(pData, Block, Page, PageCount)) {
++ nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in %s,"
++ " Line %d, Function %s, new Bad Block %d generated!"
++ "Need Bad Block replacing.\n",
++ __FILE__, __LINE__, __func__, Block);
++ wResult = FAIL;
++ }
++#endif
++ return wResult;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Cache_Update_Block
++* Inputs: pointer to buffer,page address,block address
++* Outputs: PASS=0 / FAIL=1
++* Description: It updates the cache
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++static int FTL_Cache_Update_Block(u8 *pData,
++ u64 old_page_addr, u64 blk_addr)
++{
++ int i, j;
++ u8 *buf = pData;
++ int wResult = PASS;
++ int wFoundInCache;
++ u64 page_addr;
++ u64 addr;
++ u64 old_blk_addr;
++ u16 page_offset;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ old_blk_addr = (u64)(old_page_addr >>
++ DeviceInfo.nBitsInBlockDataSize) * DeviceInfo.wBlockDataSize;
++ page_offset = (u16)(GLOB_u64_Remainder(old_page_addr, 2) >>
++ DeviceInfo.nBitsInPageDataSize);
++
++ for (i = 0; i < DeviceInfo.wPagesPerBlock; i += Cache.pages_per_item) {
++ page_addr = old_blk_addr + i * DeviceInfo.wPageDataSize;
++ if (i != page_offset) {
++ wFoundInCache = FAIL;
++ for (j = 0; j < CACHE_ITEM_NUM; j++) {
++ addr = Cache.array[j].address;
++ addr = FTL_Get_Physical_Block_Addr(addr) +
++ GLOB_u64_Remainder(addr, 2);
++ if ((addr >= page_addr) && addr <
++ (page_addr + Cache.cache_item_size)) {
++ wFoundInCache = PASS;
++ buf = Cache.array[j].buf;
++ Cache.array[j].changed = SET;
++#if CMD_DMA
++#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
++ int_cache[ftl_cmd_cnt].item = j;
++ int_cache[ftl_cmd_cnt].cache.address =
++ Cache.array[j].address;
++ int_cache[ftl_cmd_cnt].cache.changed =
++ Cache.array[j].changed;
++#endif
++#endif
++ break;
++ }
++ }
++ if (FAIL == wFoundInCache) {
++ if (ERR == FTL_Cache_Read_All(g_pTempBuf,
++ page_addr)) {
++ wResult = FAIL;
++ break;
++ }
++ buf = g_pTempBuf;
++ }
++ } else {
++ buf = pData;
++ }
++
++ if (FAIL == FTL_Cache_Write_All(buf,
++ blk_addr + (page_addr - old_blk_addr))) {
++ wResult = FAIL;
++ break;
++ }
++ }
++
++ return wResult;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Copy_Block
++* Inputs: source block address
++* Destination block address
++* Outputs: PASS=0 / FAIL=1
++* Description: used only for static wear leveling to move the block
++* containing static data to new blocks(more worn)
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++int FTL_Copy_Block(u64 old_blk_addr, u64 blk_addr)
++{
++ int i, r1, r2, wResult = PASS;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ for (i = 0; i < DeviceInfo.wPagesPerBlock; i += Cache.pages_per_item) {
++ r1 = FTL_Cache_Read_All(g_pTempBuf, old_blk_addr +
++ i * DeviceInfo.wPageDataSize);
++ r2 = FTL_Cache_Write_All(g_pTempBuf, blk_addr +
++ i * DeviceInfo.wPageDataSize);
++ if ((ERR == r1) || (FAIL == r2)) {
++ wResult = FAIL;
++ break;
++ }
++ }
++
++ return wResult;
++}
++
++/* Search the block table to find out the least wear block and then return it */
++static u32 find_least_worn_blk_for_l2_cache(void)
++{
++ int i;
++ u32 *pbt = (u32 *)g_pBlockTable;
++ u8 least_wear_cnt = MAX_BYTE_VALUE;
++ u32 least_wear_blk_idx = MAX_U32_VALUE;
++ u32 phy_idx;
++
++ for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
++ if (IS_SPARE_BLOCK(i)) {
++ phy_idx = (u32)((~BAD_BLOCK) & pbt[i]);
++ if (phy_idx > DeviceInfo.wSpectraEndBlock)
++ printk(KERN_ERR "find_least_worn_blk_for_l2_cache: "
++ "Too big phy block num (%d)\n", phy_idx);
++ if (g_pWearCounter[phy_idx -DeviceInfo.wSpectraStartBlock] < least_wear_cnt) {
++ least_wear_cnt = g_pWearCounter[phy_idx - DeviceInfo.wSpectraStartBlock];
++ least_wear_blk_idx = i;
++ }
++ }
++ }
++
++ nand_dbg_print(NAND_DBG_WARN,
++ "find_least_worn_blk_for_l2_cache: "
++ "find block %d with least worn counter (%d)\n",
++ least_wear_blk_idx, least_wear_cnt);
++
++ return least_wear_blk_idx;
++}
++
++
++
++/* Get blocks for Level2 Cache */
++static int get_l2_cache_blks(void)
++{
++ int n;
++ u32 blk;
++ u32 *pbt = (u32 *)g_pBlockTable;
++
++ for (n = 0; n < BLK_NUM_FOR_L2_CACHE; n++) {
++ blk = find_least_worn_blk_for_l2_cache();
++ if (blk > DeviceInfo.wDataBlockNum) {
++ nand_dbg_print(NAND_DBG_WARN,
++ "find_least_worn_blk_for_l2_cache: "
++ "No enough free NAND blocks (n: %d) for L2 Cache!\n", n);
++ return FAIL;
++ }
++ /* Tag the free block as discard in block table */
++ pbt[blk] = (pbt[blk] & (~BAD_BLOCK)) | DISCARD_BLOCK;
++ /* Add the free block to the L2 Cache block array */
++ cache_l2.blk_array[n] = pbt[blk] & (~BAD_BLOCK);
++ }
++
++ return PASS;
++}
++
++static int erase_l2_cache_blocks(void)
++{
++ int i, ret = PASS;
++ u32 pblk, lblk;
++ u64 addr;
++ u32 *pbt = (u32 *)g_pBlockTable;
++
++ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ for (i = 0; i < BLK_NUM_FOR_L2_CACHE; i++) {
++ pblk = cache_l2.blk_array[i];
++
++ /* If the L2 cache block is invalid, then just skip it */
++ if (MAX_U32_VALUE == pblk)
++ continue;
++
++ BUG_ON(pblk > DeviceInfo.wSpectraEndBlock);
++
++ addr = (u64)pblk << DeviceInfo.nBitsInBlockDataSize;
++ if (PASS == GLOB_FTL_Block_Erase(addr)) {
++ /* Get logical block number of the erased block */
++ lblk = FTL_Get_Block_Index(pblk);
++ BUG_ON(BAD_BLOCK == lblk);
++ /* Tag it as free in the block table */
++ pbt[lblk] &= (u32)(~DISCARD_BLOCK);
++ pbt[lblk] |= (u32)(SPARE_BLOCK);
++ } else {
++ MARK_BLOCK_AS_BAD(pbt[lblk]);
++ ret = ERR;
++ }
++ }
++
++ return ret;
++}
++
++/*
++ * Merge the valid data page in the L2 cache blocks into NAND.
++*/
++static int flush_l2_cache(void)
++{
++ struct list_head *p;
++ struct spectra_l2_cache_list *pnd, *tmp_pnd;
++ u32 *pbt = (u32 *)g_pBlockTable;
++ u32 phy_blk, l2_blk;
++ u64 addr;
++ u16 l2_page;
++ int i, ret = PASS;
++
++ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ if (list_empty(&cache_l2.table.list)) /* No data to flush */
++ return ret;
++
++ //dump_cache_l2_table();
++
++ if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
++ g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
++ FTL_Write_IN_Progress_Block_Table_Page();
++ }
++
++ list_for_each(p, &cache_l2.table.list) {
++ pnd = list_entry(p, struct spectra_l2_cache_list, list);
++ if (IS_SPARE_BLOCK(pnd->logical_blk_num) ||
++ IS_BAD_BLOCK(pnd->logical_blk_num) ||
++ IS_DISCARDED_BLOCK(pnd->logical_blk_num)) {
++ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d\n", __FILE__, __LINE__);
++ memset(cache_l2_blk_buf, 0xff, DeviceInfo.wPagesPerBlock * DeviceInfo.wPageDataSize);
++ } else {
++ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d\n", __FILE__, __LINE__);
++ phy_blk = pbt[pnd->logical_blk_num] & (~BAD_BLOCK);
++ ret = GLOB_LLD_Read_Page_Main(cache_l2_blk_buf,
++ phy_blk, 0, DeviceInfo.wPagesPerBlock);
++ if (ret == FAIL) {
++ printk(KERN_ERR "Read NAND page fail in %s, Line %d\n", __FILE__, __LINE__);
++ }
++ }
++
++ for (i = 0; i < DeviceInfo.wPagesPerBlock; i++) {
++ if (pnd->pages_array[i] != MAX_U32_VALUE) {
++ l2_blk = cache_l2.blk_array[(pnd->pages_array[i] >> 16) & 0xffff];
++ l2_page = pnd->pages_array[i] & 0xffff;
++ ret = GLOB_LLD_Read_Page_Main(cache_l2_page_buf, l2_blk, l2_page, 1);
++ if (ret == FAIL) {
++ printk(KERN_ERR "Read NAND page fail in %s, Line %d\n", __FILE__, __LINE__);
++ }
++ memcpy(cache_l2_blk_buf + i * DeviceInfo.wPageDataSize, cache_l2_page_buf, DeviceInfo.wPageDataSize);
++ }
++ }
++
++ /* Find a free block and tag the original block as discarded */
++ addr = (u64)pnd->logical_blk_num << DeviceInfo.nBitsInBlockDataSize;
++ ret = FTL_Replace_Block(addr);
++ if (ret == FAIL) {
++ printk(KERN_ERR "FTL_Replace_Block fail in %s, Line %d\n", __FILE__, __LINE__);
++ }
++
++ /* Write back the updated data into NAND */
++ phy_blk = pbt[pnd->logical_blk_num] & (~BAD_BLOCK);
++ if (FAIL == GLOB_LLD_Write_Page_Main(cache_l2_blk_buf, phy_blk, 0, DeviceInfo.wPagesPerBlock)) {
++ nand_dbg_print(NAND_DBG_WARN,
++ "Program NAND block %d fail in %s, Line %d\n",
++ phy_blk, __FILE__, __LINE__);
++ /* This may not be really a bad block. So just tag it as discarded. */
++ /* Then it has a chance to be erased when garbage collection. */
++ /* If it is really bad, then the erase will fail and it will be marked */
++ /* as bad then. Otherwise it will be marked as free and can be used again */
++ MARK_BLK_AS_DISCARD(pbt[pnd->logical_blk_num]);
++ /* Find another free block and write it again */
++ FTL_Replace_Block(addr);
++ phy_blk = pbt[pnd->logical_blk_num] & (~BAD_BLOCK);
++ if (FAIL == GLOB_LLD_Write_Page_Main(cache_l2_blk_buf, phy_blk, 0, DeviceInfo.wPagesPerBlock)) {
++ printk(KERN_ERR "Failed to write back block %d when flush L2 cache."
++ "Some data will be lost!\n", phy_blk);
++ MARK_BLOCK_AS_BAD(pbt[pnd->logical_blk_num]);
++ }
++ } else {
++ /* tag the new free block as used block */
++ pbt[pnd->logical_blk_num] &= (~SPARE_BLOCK);
++ }
++ }
++
++ /* Destroy the L2 Cache table and free the memory of all nodes */
++ list_for_each_entry_safe(pnd, tmp_pnd, &cache_l2.table.list, list) {
++ list_del(&pnd->list);
++ kfree(pnd);
++ }
++
++ /* Erase discard L2 cache blocks */
++ if (erase_l2_cache_blocks() != PASS)
++ nand_dbg_print(NAND_DBG_WARN,
++ " Erase L2 cache blocks error in %s, Line %d\n",
++ __FILE__, __LINE__);
++
++ /* Init the Level2 Cache data structure */
++ for (i = 0; i < BLK_NUM_FOR_L2_CACHE; i++)
++ cache_l2.blk_array[i] = MAX_U32_VALUE;
++ cache_l2.cur_blk_idx = 0;
++ cache_l2.cur_page_num = 0;
++ INIT_LIST_HEAD(&cache_l2.table.list);
++ cache_l2.table.logical_blk_num = MAX_U32_VALUE;
++
++ return ret;
++}
++
++/*
++ * Write back a changed victim cache item to the Level2 Cache
++ * and update the L2 Cache table to map the change.
++ * If the L2 Cache is full, then start to do the L2 Cache flush.
++*/
++static int write_back_to_l2_cache(u8 *buf, u64 logical_addr)
++{
++ u32 logical_blk_num;
++ u16 logical_page_num;
++ struct list_head *p;
++ struct spectra_l2_cache_list *pnd, *pnd_new;
++ u32 node_size;
++ int i, found;
++
++ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ /*
++ * If Level2 Cache table is empty, then it means either:
++ * 1. This is the first time that the function called after FTL_init
++ * or
++ * 2. The Level2 Cache has just been flushed
++ *
++ * So, 'steal' some free blocks from NAND for L2 Cache using
++ * by just mask them as discard in the block table
++ */
++ if (list_empty(&cache_l2.table.list)) {
++ BUG_ON(cache_l2.cur_blk_idx != 0);
++ BUG_ON(cache_l2.cur_page_num!= 0);
++ BUG_ON(cache_l2.table.logical_blk_num != MAX_U32_VALUE);
++ if (FAIL == get_l2_cache_blks()) {
++ GLOB_FTL_Garbage_Collection();
++ if (FAIL == get_l2_cache_blks()) {
++ printk(KERN_ALERT "Fail to get L2 cache blks!\n");
++ return FAIL;
++ }
++ }
++ }
++
++ logical_blk_num = BLK_FROM_ADDR(logical_addr);
++ logical_page_num = PAGE_FROM_ADDR(logical_addr, logical_blk_num);
++ BUG_ON(logical_blk_num == MAX_U32_VALUE);
++
++ /* Write the cache item data into the current position of L2 Cache */
++#if CMD_DMA
++ /*
++ * TODO
++ */
++#else
++ if (FAIL == GLOB_LLD_Write_Page_Main(buf,
++ cache_l2.blk_array[cache_l2.cur_blk_idx],
++ cache_l2.cur_page_num, 1)) {
++ nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in "
++ "%s, Line %d, new Bad Block %d generated!\n",
++ __FILE__, __LINE__,
++ cache_l2.blk_array[cache_l2.cur_blk_idx]);
++
++ /* TODO: tag the current block as bad and try again */
++
++ return FAIL;
++ }
++#endif
++
++ /*
++ * Update the L2 Cache table.
++ *
++ * First seaching in the table to see whether the logical block
++ * has been mapped. If not, then kmalloc a new node for the
++ * logical block, fill data, and then insert it to the list.
++ * Otherwise, just update the mapped node directly.
++ */
++ found = 0;
++ list_for_each(p, &cache_l2.table.list) {
++ pnd = list_entry(p, struct spectra_l2_cache_list, list);
++ if (pnd->logical_blk_num == logical_blk_num) {
++ pnd->pages_array[logical_page_num] =
++ (cache_l2.cur_blk_idx << 16) |
++ cache_l2.cur_page_num;
++ found = 1;
++ break;
++ }
++ }
++ if (!found) { /* Create new node for the logical block here */
++
++ /* The logical pages to physical pages map array is
++ * located at the end of struct spectra_l2_cache_list.
++ */
++ node_size = sizeof(struct spectra_l2_cache_list) +
++ sizeof(u32) * DeviceInfo.wPagesPerBlock;
++ pnd_new = kmalloc(node_size, GFP_ATOMIC);
++ if (!pnd_new) {
++ printk(KERN_ERR "Failed to kmalloc in %s Line %d\n",
++ __FILE__, __LINE__);
++ /*
++ * TODO: Need to flush all the L2 cache into NAND ASAP
++ * since no memory available here
++ */
++ }
++ pnd_new->logical_blk_num = logical_blk_num;
++ for (i = 0; i < DeviceInfo.wPagesPerBlock; i++)
++ pnd_new->pages_array[i] = MAX_U32_VALUE;
++ pnd_new->pages_array[logical_page_num] =
++ (cache_l2.cur_blk_idx << 16) | cache_l2.cur_page_num;
++ list_add(&pnd_new->list, &cache_l2.table.list);
++ }
++
++ /* Increasing the current position pointer of the L2 Cache */
++ cache_l2.cur_page_num++;
++ if (cache_l2.cur_page_num >= DeviceInfo.wPagesPerBlock) {
++ cache_l2.cur_blk_idx++;
++ if (cache_l2.cur_blk_idx >= BLK_NUM_FOR_L2_CACHE) {
++ /* The L2 Cache is full. Need to flush it now */
++ nand_dbg_print(NAND_DBG_WARN,
++ "L2 Cache is full, will start to flush it\n");
++ flush_l2_cache();
++ } else {
++ cache_l2.cur_page_num = 0;
++ }
++ }
++
++ return PASS;
++}
++
++/*
++ * Seach in the Level2 Cache table to find the cache item.
++ * If find, read the data from the NAND page of L2 Cache,
++ * Otherwise, return FAIL.
++ */
++static int search_l2_cache(u8 *buf, u64 logical_addr)
++{
++ u32 logical_blk_num;
++ u16 logical_page_num;
++ struct list_head *p;
++ struct spectra_l2_cache_list *pnd;
++ u32 tmp = MAX_U32_VALUE;
++ u32 phy_blk;
++ u16 phy_page;
++ int ret = FAIL;
++
++ logical_blk_num = BLK_FROM_ADDR(logical_addr);
++ logical_page_num = PAGE_FROM_ADDR(logical_addr, logical_blk_num);
++
++ list_for_each(p, &cache_l2.table.list) {
++ pnd = list_entry(p, struct spectra_l2_cache_list, list);
++ if (pnd->logical_blk_num == logical_blk_num) {
++ tmp = pnd->pages_array[logical_page_num];
++ break;
++ }
++ }
++
++ if (tmp != MAX_U32_VALUE) { /* Found valid map */
++ phy_blk = cache_l2.blk_array[(tmp >> 16) & 0xFFFF];
++ phy_page = tmp & 0xFFFF;
++#if CMD_DMA
++ /* TODO */
++#else
++ ret = GLOB_LLD_Read_Page_Main(buf, phy_blk, phy_page, 1);
++#endif
++ }
++
++ return ret;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Cache_Write_Back
++* Inputs: pointer to data cached in sys memory
++* address of free block in flash
++* Outputs: PASS=0 / FAIL=1
++* Description: writes all the pages of Cache Block to flash
++*
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++static int FTL_Cache_Write_Back(u8 *pData, u64 blk_addr)
++{
++ int i, j, iErase;
++ u64 old_page_addr, addr, phy_addr;
++ u32 *pbt = (u32 *)g_pBlockTable;
++ u32 lba;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ old_page_addr = FTL_Get_Physical_Block_Addr(blk_addr) +
++ GLOB_u64_Remainder(blk_addr, 2);
++
++ iErase = (FAIL == FTL_Replace_Block(blk_addr)) ? PASS : FAIL;
++
++ pbt[BLK_FROM_ADDR(blk_addr)] &= (~SPARE_BLOCK);
++
++#if CMD_DMA
++ p_BTableChangesDelta = (struct BTableChangesDelta *)g_pBTDelta_Free;
++ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
++
++ p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
++ p_BTableChangesDelta->BT_Index = (u32)(blk_addr >>
++ DeviceInfo.nBitsInBlockDataSize);
++ p_BTableChangesDelta->BT_Entry_Value =
++ pbt[(u32)(blk_addr >> DeviceInfo.nBitsInBlockDataSize)];
++ p_BTableChangesDelta->ValidFields = 0x0C;
++#endif
++
++ if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
++ g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
++ FTL_Write_IN_Progress_Block_Table_Page();
++ }
++
++ for (i = 0; i < RETRY_TIMES; i++) {
++ if (PASS == iErase) {
++ phy_addr = FTL_Get_Physical_Block_Addr(blk_addr);
++ if (FAIL == GLOB_FTL_Block_Erase(phy_addr)) {
++ lba = BLK_FROM_ADDR(blk_addr);
++ MARK_BLOCK_AS_BAD(pbt[lba]);
++ i = RETRY_TIMES;
++ break;
++ }
++ }
++
++ for (j = 0; j < CACHE_ITEM_NUM; j++) {
++ addr = Cache.array[j].address;
++ if ((addr <= blk_addr) &&
++ ((addr + Cache.cache_item_size) > blk_addr))
++ cache_block_to_write = j;
++ }
++
++ phy_addr = FTL_Get_Physical_Block_Addr(blk_addr);
++ if (PASS == FTL_Cache_Update_Block(pData,
++ old_page_addr, phy_addr)) {
++ cache_block_to_write = UNHIT_CACHE_ITEM;
++ break;
++ } else {
++ iErase = PASS;
++ }
++ }
++
++ if (i >= RETRY_TIMES) {
++ if (ERR == FTL_Flash_Error_Handle(pData,
++ old_page_addr, blk_addr))
++ return ERR;
++ else
++ return FAIL;
++ }
++
++ return PASS;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Cache_Write_Page
++* Inputs: Pointer to buffer, page address, cache block number
++* Outputs: PASS=0 / FAIL=1
++* Description: It writes the data in Cache Block
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++static void FTL_Cache_Write_Page(u8 *pData, u64 page_addr,
++ u8 cache_blk, u16 flag)
++{
++ u8 *pDest;
++ u64 addr;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ addr = Cache.array[cache_blk].address;
++ pDest = Cache.array[cache_blk].buf;
++
++ pDest += (unsigned long)(page_addr - addr);
++ Cache.array[cache_blk].changed = SET;
++#if CMD_DMA
++#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
++ int_cache[ftl_cmd_cnt].item = cache_blk;
++ int_cache[ftl_cmd_cnt].cache.address =
++ Cache.array[cache_blk].address;
++ int_cache[ftl_cmd_cnt].cache.changed =
++ Cache.array[cache_blk].changed;
++#endif
++ GLOB_LLD_MemCopy_CMD(pDest, pData, DeviceInfo.wPageDataSize, flag);
++ ftl_cmd_cnt++;
++#else
++ memcpy(pDest, pData, DeviceInfo.wPageDataSize);
++#endif
++ if (Cache.array[cache_blk].use_cnt < MAX_WORD_VALUE)
++ Cache.array[cache_blk].use_cnt++;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Cache_Write
++* Inputs: none
++* Outputs: PASS=0 / FAIL=1
++* Description: It writes least frequently used Cache block to flash if it
++* has been changed
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++static int FTL_Cache_Write(void)
++{
++ int i, bResult = PASS;
++ u16 bNO, least_count = 0xFFFF;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ FTL_Calculate_LRU();
++
++ bNO = Cache.LRU;
++ nand_dbg_print(NAND_DBG_DEBUG, "FTL_Cache_Write: "
++ "Least used cache block is %d\n", bNO);
++
++ if (Cache.array[bNO].changed != SET)
++ return bResult;
++
++ nand_dbg_print(NAND_DBG_DEBUG, "FTL_Cache_Write: Cache"
++ " Block %d containing logical block %d is dirty\n",
++ bNO,
++ (u32)(Cache.array[bNO].address >>
++ DeviceInfo.nBitsInBlockDataSize));
++#if CMD_DMA
++#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
++ int_cache[ftl_cmd_cnt].item = bNO;
++ int_cache[ftl_cmd_cnt].cache.address =
++ Cache.array[bNO].address;
++ int_cache[ftl_cmd_cnt].cache.changed = CLEAR;
++#endif
++#endif
++ bResult = write_back_to_l2_cache(Cache.array[bNO].buf,
++ Cache.array[bNO].address);
++ if (bResult != ERR)
++ Cache.array[bNO].changed = CLEAR;
++
++ least_count = Cache.array[bNO].use_cnt;
++
++ for (i = 0; i < CACHE_ITEM_NUM; i++) {
++ if (i == bNO)
++ continue;
++ if (Cache.array[i].use_cnt > 0)
++ Cache.array[i].use_cnt -= least_count;
++ }
++
++ return bResult;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Cache_Read
++* Inputs: Page address
++* Outputs: PASS=0 / FAIL=1
++* Description: It reads the block from device in Cache Block
++* Set the LRU count to 1
++* Mark the Cache Block as clean
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++static int FTL_Cache_Read(u64 logical_addr)
++{
++ u64 item_addr, phy_addr;
++ u16 num;
++ int ret;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ num = Cache.LRU; /* The LRU cache item will be overwritten */
++
++ item_addr = (u64)GLOB_u64_Div(logical_addr, Cache.cache_item_size) *
++ Cache.cache_item_size;
++ Cache.array[num].address = item_addr;
++ Cache.array[num].use_cnt = 1;
++ Cache.array[num].changed = CLEAR;
++
++#if CMD_DMA
++#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
++ int_cache[ftl_cmd_cnt].item = num;
++ int_cache[ftl_cmd_cnt].cache.address =
++ Cache.array[num].address;
++ int_cache[ftl_cmd_cnt].cache.changed =
++ Cache.array[num].changed;
++#endif
++#endif
++ /*
++ * Search in L2 Cache. If hit, fill data into L1 Cache item buffer,
++ * Otherwise, read it from NAND
++ */
++ ret = search_l2_cache(Cache.array[num].buf, logical_addr);
++ if (PASS == ret) /* Hit in L2 Cache */
++ return ret;
++
++ /* Compute the physical start address of NAND device according to */
++ /* the logical start address of the cache item (LRU cache item) */
++ phy_addr = FTL_Get_Physical_Block_Addr(item_addr) +
++ GLOB_u64_Remainder(item_addr, 2);
++
++ return FTL_Cache_Read_All(Cache.array[num].buf, phy_addr);
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Check_Block_Table
++* Inputs: ?
++* Outputs: PASS=0 / FAIL=1
++* Description: It checks the correctness of each block table entry
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++static int FTL_Check_Block_Table(int wOldTable)
++{
++ u32 i;
++ int wResult = PASS;
++ u32 blk_idx;
++ u32 *pbt = (u32 *)g_pBlockTable;
++ u8 *pFlag = flag_check_blk_table;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ if (NULL != pFlag) {
++ memset(pFlag, FAIL, DeviceInfo.wDataBlockNum);
++ for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
++ blk_idx = (u32)(pbt[i] & (~BAD_BLOCK));
++
++ /*
++ * 20081006/KBV - Changed to pFlag[i] reference
++ * to avoid buffer overflow
++ */
++
++ /*
++ * 2008-10-20 Yunpeng Note: This change avoid
++ * buffer overflow, but changed function of
++ * the code, so it should be re-write later
++ */
++ if ((blk_idx > DeviceInfo.wSpectraEndBlock) ||
++ PASS == pFlag[i]) {
++ wResult = FAIL;
++ break;
++ } else {
++ pFlag[i] = PASS;
++ }
++ }
++ }
++
++ return wResult;
++}
++
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Write_Block_Table
++* Inputs: flasg
++* Outputs: 0=Block Table was updated. No write done. 1=Block write needs to
++* happen. -1 Error
++* Description: It writes the block table
++* Block table always mapped to LBA 0 which inturn mapped
++* to any physical block
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++static int FTL_Write_Block_Table(int wForce)
++{
++ u32 *pbt = (u32 *)g_pBlockTable;
++ int wSuccess = PASS;
++ u32 wTempBlockTableIndex;
++ u16 bt_pages, new_bt_offset;
++ u8 blockchangeoccured = 0;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
++
++ if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus)
++ return 0;
++
++ if (PASS == wForce) {
++ g_wBlockTableOffset =
++ (u16)(DeviceInfo.wPagesPerBlock - bt_pages);
++#if CMD_DMA
++ p_BTableChangesDelta =
++ (struct BTableChangesDelta *)g_pBTDelta_Free;
++ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
++
++ p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
++ p_BTableChangesDelta->g_wBlockTableOffset =
++ g_wBlockTableOffset;
++ p_BTableChangesDelta->ValidFields = 0x01;
++#endif
++ }
++
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Inside FTL_Write_Block_Table: block %d Page:%d\n",
++ g_wBlockTableIndex, g_wBlockTableOffset);
++
++ do {
++ new_bt_offset = g_wBlockTableOffset + bt_pages + 1;
++ if ((0 == (new_bt_offset % DeviceInfo.wPagesPerBlock)) ||
++ (new_bt_offset > DeviceInfo.wPagesPerBlock) ||
++ (FAIL == wSuccess)) {
++ wTempBlockTableIndex = FTL_Replace_Block_Table();
++ if (BAD_BLOCK == wTempBlockTableIndex)
++ return ERR;
++ if (!blockchangeoccured) {
++ bt_block_changed = 1;
++ blockchangeoccured = 1;
++ }
++
++ g_wBlockTableIndex = wTempBlockTableIndex;
++ g_wBlockTableOffset = 0;
++ pbt[BLOCK_TABLE_INDEX] = g_wBlockTableIndex;
++#if CMD_DMA
++ p_BTableChangesDelta =
++ (struct BTableChangesDelta *)g_pBTDelta_Free;
++ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
++
++ p_BTableChangesDelta->ftl_cmd_cnt =
++ ftl_cmd_cnt;
++ p_BTableChangesDelta->g_wBlockTableOffset =
++ g_wBlockTableOffset;
++ p_BTableChangesDelta->g_wBlockTableIndex =
++ g_wBlockTableIndex;
++ p_BTableChangesDelta->ValidFields = 0x03;
++
++ p_BTableChangesDelta =
++ (struct BTableChangesDelta *)g_pBTDelta_Free;
++ g_pBTDelta_Free +=
++ sizeof(struct BTableChangesDelta);
++
++ p_BTableChangesDelta->ftl_cmd_cnt =
++ ftl_cmd_cnt;
++ p_BTableChangesDelta->BT_Index =
++ BLOCK_TABLE_INDEX;
++ p_BTableChangesDelta->BT_Entry_Value =
++ pbt[BLOCK_TABLE_INDEX];
++ p_BTableChangesDelta->ValidFields = 0x0C;
++#endif
++ }
++
++ wSuccess = FTL_Write_Block_Table_Data();
++ if (FAIL == wSuccess)
++ MARK_BLOCK_AS_BAD(pbt[BLOCK_TABLE_INDEX]);
++ } while (FAIL == wSuccess);
++
++ g_cBlockTableStatus = CURRENT_BLOCK_TABLE;
++
++ return 1;
++}
++
++/******************************************************************
++* Function: GLOB_FTL_Flash_Format
++* Inputs: none
++* Outputs: PASS
++* Description: The block table stores bad block info, including MDF+
++* blocks gone bad over the ages. Therefore, if we have a
++* block table in place, then use it to scan for bad blocks
++* If not, then scan for MDF.
++* Now, a block table will only be found if spectra was already
++* being used. For a fresh flash, we'll go thru scanning for
++* MDF. If spectra was being used, then there is a chance that
++* the MDF has been corrupted. Spectra avoids writing to the
++* first 2 bytes of the spare area to all pages in a block. This
++* covers all known flash devices. However, since flash
++* manufacturers have no standard of where the MDF is stored,
++* this cannot guarantee that the MDF is protected for future
++* devices too. The initial scanning for the block table assures
++* this. It is ok even if the block table is outdated, as all
++* we're looking for are bad block markers.
++* Use this when mounting a file system or starting a
++* new flash.
++*
++*********************************************************************/
++static int FTL_Format_Flash(u8 valid_block_table)
++{
++ u32 i, j;
++ u32 *pbt = (u32 *)g_pBlockTable;
++ u32 tempNode;
++ int ret;
++
++#if CMD_DMA
++ u32 *pbtStartingCopy = (u32 *)g_pBTStartingCopy;
++ if (ftl_cmd_cnt)
++ return FAIL;
++#endif
++
++ if (FAIL == FTL_Check_Block_Table(FAIL))
++ valid_block_table = 0;
++
++ if (valid_block_table) {
++ u8 switched = 1;
++ u32 block, k;
++
++ k = DeviceInfo.wSpectraStartBlock;
++ while (switched && (k < DeviceInfo.wSpectraEndBlock)) {
++ switched = 0;
++ k++;
++ for (j = DeviceInfo.wSpectraStartBlock, i = 0;
++ j <= DeviceInfo.wSpectraEndBlock;
++ j++, i++) {
++ block = (pbt[i] & ~BAD_BLOCK) -
++ DeviceInfo.wSpectraStartBlock;
++ if (block != i) {
++ switched = 1;
++ tempNode = pbt[i];
++ pbt[i] = pbt[block];
++ pbt[block] = tempNode;
++ }
++ }
++ }
++ if ((k == DeviceInfo.wSpectraEndBlock) && switched)
++ valid_block_table = 0;
++ }
++
++ if (!valid_block_table) {
++ memset(g_pBlockTable, 0,
++ DeviceInfo.wDataBlockNum * sizeof(u32));
++ memset(g_pWearCounter, 0,
++ DeviceInfo.wDataBlockNum * sizeof(u8));
++ if (DeviceInfo.MLCDevice)
++ memset(g_pReadCounter, 0,
++ DeviceInfo.wDataBlockNum * sizeof(u16));
++#if CMD_DMA
++ memset(g_pBTStartingCopy, 0,
++ DeviceInfo.wDataBlockNum * sizeof(u32));
++ memset(g_pWearCounterCopy, 0,
++ DeviceInfo.wDataBlockNum * sizeof(u8));
++ if (DeviceInfo.MLCDevice)
++ memset(g_pReadCounterCopy, 0,
++ DeviceInfo.wDataBlockNum * sizeof(u16));
++#endif
++ for (j = DeviceInfo.wSpectraStartBlock, i = 0;
++ j <= DeviceInfo.wSpectraEndBlock;
++ j++, i++) {
++ if (GLOB_LLD_Get_Bad_Block((u32)j))
++ pbt[i] = (u32)(BAD_BLOCK | j);
++ }
++ }
++
++ nand_dbg_print(NAND_DBG_WARN, "Erasing all blocks in the NAND\n");
++
++ for (j = DeviceInfo.wSpectraStartBlock, i = 0;
++ j <= DeviceInfo.wSpectraEndBlock;
++ j++, i++) {
++ if ((pbt[i] & BAD_BLOCK) != BAD_BLOCK) {
++ ret = GLOB_LLD_Erase_Block(j);
++ if (FAIL == ret) {
++ pbt[i] = (u32)(j);
++ MARK_BLOCK_AS_BAD(pbt[i]);
++ nand_dbg_print(NAND_DBG_WARN,
++ "NAND Program fail in %s, Line %d, "
++ "Function: %s, new Bad Block %d generated!\n",
++ __FILE__, __LINE__, __func__, (int)j);
++ } else {
++ pbt[i] = (u32)(SPARE_BLOCK | j);
++ }
++ }
++#if CMD_DMA
++ pbtStartingCopy[i] = pbt[i];
++#endif
++ }
++
++ g_wBlockTableOffset = 0;
++ for (i = 0; (i <= (DeviceInfo.wSpectraEndBlock -
++ DeviceInfo.wSpectraStartBlock))
++ && ((pbt[i] & BAD_BLOCK) == BAD_BLOCK); i++)
++ ;
++ if (i > (DeviceInfo.wSpectraEndBlock - DeviceInfo.wSpectraStartBlock)) {
++ printk(KERN_ERR "All blocks bad!\n");
++ return FAIL;
++ } else {
++ g_wBlockTableIndex = pbt[i] & ~BAD_BLOCK;
++ if (i != BLOCK_TABLE_INDEX) {
++ tempNode = pbt[i];
++ pbt[i] = pbt[BLOCK_TABLE_INDEX];
++ pbt[BLOCK_TABLE_INDEX] = tempNode;
++ }
++ }
++ pbt[BLOCK_TABLE_INDEX] &= (~SPARE_BLOCK);
++
++#if CMD_DMA
++ pbtStartingCopy[BLOCK_TABLE_INDEX] &= (~SPARE_BLOCK);
++#endif
++
++ g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
++ memset(g_pBTBlocks, 0xFF,
++ (1 + LAST_BT_ID - FIRST_BT_ID) * sizeof(u32));
++ g_pBTBlocks[FIRST_BT_ID-FIRST_BT_ID] = g_wBlockTableIndex;
++ FTL_Write_Block_Table(FAIL);
++
++ for (i = 0; i < CACHE_ITEM_NUM; i++) {
++ Cache.array[i].address = NAND_CACHE_INIT_ADDR;
++ Cache.array[i].use_cnt = 0;
++ Cache.array[i].changed = CLEAR;
++ }
++
++#if (RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE && CMD_DMA)
++ memcpy((void *)&cache_start_copy, (void *)&Cache,
++ sizeof(struct flash_cache_tag));
++#endif
++ return PASS;
++}
++
++static int force_format_nand(void)
++{
++ u32 i;
++
++ /* Force erase the whole unprotected physical partiton of NAND */
++ printk(KERN_ALERT "Start to force erase whole NAND device ...\n");
++ printk(KERN_ALERT "From phyical block %d to %d\n",
++ DeviceInfo.wSpectraStartBlock, DeviceInfo.wSpectraEndBlock);
++ for (i = DeviceInfo.wSpectraStartBlock; i <= DeviceInfo.wSpectraEndBlock; i++) {
++ if (GLOB_LLD_Erase_Block(i))
++ printk(KERN_ERR "Failed to force erase NAND block %d\n", i);
++ }
++ printk(KERN_ALERT "Force Erase ends. Please reboot the system ...\n");
++ while(1);
++
++ return PASS;
++}
++
++int GLOB_FTL_Flash_Format(void)
++{
++ //return FTL_Format_Flash(1);
++ return force_format_nand();
++
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Search_Block_Table_IN_Block
++* Inputs: Block Number
++* Pointer to page
++* Outputs: PASS / FAIL
++* Page contatining the block table
++* Description: It searches the block table in the block
++* passed as an argument.
++*
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++static int FTL_Search_Block_Table_IN_Block(u32 BT_Block,
++ u8 BT_Tag, u16 *Page)
++{
++ u16 i, j, k;
++ u16 Result = PASS;
++ u16 Last_IPF = 0;
++ u8 BT_Found = 0;
++ u8 *tagarray;
++ u8 *tempbuf = tmp_buf_search_bt_in_block;
++ u8 *pSpareBuf = spare_buf_search_bt_in_block;
++ u8 *pSpareBufBTLastPage = spare_buf_bt_search_bt_in_block;
++ u8 bt_flag_last_page = 0xFF;
++ u8 search_in_previous_pages = 0;
++ u16 bt_pages;
++
++ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Searching block table in %u block\n",
++ (unsigned int)BT_Block);
++
++ bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
++
++ for (i = bt_pages; i < DeviceInfo.wPagesPerBlock;
++ i += (bt_pages + 1)) {
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Searching last IPF: %d\n", i);
++ Result = GLOB_LLD_Read_Page_Main_Polling(tempbuf,
++ BT_Block, i, 1);
++
++ if (0 == memcmp(tempbuf, g_pIPF, DeviceInfo.wPageDataSize)) {
++ if ((i + bt_pages + 1) < DeviceInfo.wPagesPerBlock) {
++ continue;
++ } else {
++ search_in_previous_pages = 1;
++ Last_IPF = i;
++ }
++ }
++
++ if (!search_in_previous_pages) {
++ if (i != bt_pages) {
++ i -= (bt_pages + 1);
++ Last_IPF = i;
++ }
++ }
++
++ if (0 == Last_IPF)
++ break;
++
++ if (!search_in_previous_pages) {
++ i = i + 1;
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Reading the spare area of Block %u Page %u",
++ (unsigned int)BT_Block, i);
++ Result = GLOB_LLD_Read_Page_Spare(pSpareBuf,
++ BT_Block, i, 1);
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Reading the spare area of Block %u Page %u",
++ (unsigned int)BT_Block, i + bt_pages - 1);
++ Result = GLOB_LLD_Read_Page_Spare(pSpareBufBTLastPage,
++ BT_Block, i + bt_pages - 1, 1);
++
++ k = 0;
++ j = FTL_Extract_Block_Table_Tag(pSpareBuf, &tagarray);
++ if (j) {
++ for (; k < j; k++) {
++ if (tagarray[k] == BT_Tag)
++ break;
++ }
++ }
++
++ if (k < j)
++ bt_flag = tagarray[k];
++ else
++ Result = FAIL;
++
++ if (Result == PASS) {
++ k = 0;
++ j = FTL_Extract_Block_Table_Tag(
++ pSpareBufBTLastPage, &tagarray);
++ if (j) {
++ for (; k < j; k++) {
++ if (tagarray[k] == BT_Tag)
++ break;
++ }
++ }
++
++ if (k < j)
++ bt_flag_last_page = tagarray[k];
++ else
++ Result = FAIL;
++
++ if (Result == PASS) {
++ if (bt_flag == bt_flag_last_page) {
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Block table is found"
++ " in page after IPF "
++ "at block %d "
++ "page %d\n",
++ (int)BT_Block, i);
++ BT_Found = 1;
++ *Page = i;
++ g_cBlockTableStatus =
++ CURRENT_BLOCK_TABLE;
++ break;
++ } else {
++ Result = FAIL;
++ }
++ }
++ }
++ }
++
++ if (search_in_previous_pages)
++ i = i - bt_pages;
++ else
++ i = i - (bt_pages + 1);
++
++ Result = PASS;
++
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Reading the spare area of Block %d Page %d",
++ (int)BT_Block, i);
++
++ Result = GLOB_LLD_Read_Page_Spare(pSpareBuf, BT_Block, i, 1);
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Reading the spare area of Block %u Page %u",
++ (unsigned int)BT_Block, i + bt_pages - 1);
++
++ Result = GLOB_LLD_Read_Page_Spare(pSpareBufBTLastPage,
++ BT_Block, i + bt_pages - 1, 1);
++
++ k = 0;
++ j = FTL_Extract_Block_Table_Tag(pSpareBuf, &tagarray);
++ if (j) {
++ for (; k < j; k++) {
++ if (tagarray[k] == BT_Tag)
++ break;
++ }
++ }
++
++ if (k < j)
++ bt_flag = tagarray[k];
++ else
++ Result = FAIL;
++
++ if (Result == PASS) {
++ k = 0;
++ j = FTL_Extract_Block_Table_Tag(pSpareBufBTLastPage,
++ &tagarray);
++ if (j) {
++ for (; k < j; k++) {
++ if (tagarray[k] == BT_Tag)
++ break;
++ }
++ }
++
++ if (k < j) {
++ bt_flag_last_page = tagarray[k];
++ } else {
++ Result = FAIL;
++ break;
++ }
++
++ if (Result == PASS) {
++ if (bt_flag == bt_flag_last_page) {
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Block table is found "
++ "in page prior to IPF "
++ "at block %u page %d\n",
++ (unsigned int)BT_Block, i);
++ BT_Found = 1;
++ *Page = i;
++ g_cBlockTableStatus =
++ IN_PROGRESS_BLOCK_TABLE;
++ break;
++ } else {
++ Result = FAIL;
++ break;
++ }
++ }
++ }
++ }
++
++ if (Result == FAIL) {
++ if ((Last_IPF > bt_pages) && (i < Last_IPF) && (!BT_Found)) {
++ BT_Found = 1;
++ *Page = i - (bt_pages + 1);
++ }
++ if ((Last_IPF == bt_pages) && (i < Last_IPF) && (!BT_Found))
++ goto func_return;
++ }
++
++ if (Last_IPF == 0) {
++ i = 0;
++ Result = PASS;
++ nand_dbg_print(NAND_DBG_DEBUG, "Reading the spare area of "
++ "Block %u Page %u", (unsigned int)BT_Block, i);
++
++ Result = GLOB_LLD_Read_Page_Spare(pSpareBuf, BT_Block, i, 1);
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Reading the spare area of Block %u Page %u",
++ (unsigned int)BT_Block, i + bt_pages - 1);
++ Result = GLOB_LLD_Read_Page_Spare(pSpareBufBTLastPage,
++ BT_Block, i + bt_pages - 1, 1);
++
++ k = 0;
++ j = FTL_Extract_Block_Table_Tag(pSpareBuf, &tagarray);
++ if (j) {
++ for (; k < j; k++) {
++ if (tagarray[k] == BT_Tag)
++ break;
++ }
++ }
++
++ if (k < j)
++ bt_flag = tagarray[k];
++ else
++ Result = FAIL;
++
++ if (Result == PASS) {
++ k = 0;
++ j = FTL_Extract_Block_Table_Tag(pSpareBufBTLastPage,
++ &tagarray);
++ if (j) {
++ for (; k < j; k++) {
++ if (tagarray[k] == BT_Tag)
++ break;
++ }
++ }
++
++ if (k < j)
++ bt_flag_last_page = tagarray[k];
++ else
++ Result = FAIL;
++
++ if (Result == PASS) {
++ if (bt_flag == bt_flag_last_page) {
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Block table is found "
++ "in page after IPF at "
++ "block %u page %u\n",
++ (unsigned int)BT_Block,
++ (unsigned int)i);
++ BT_Found = 1;
++ *Page = i;
++ g_cBlockTableStatus =
++ CURRENT_BLOCK_TABLE;
++ goto func_return;
++ } else {
++ Result = FAIL;
++ }
++ }
++ }
++
++ if (Result == FAIL)
++ goto func_return;
++ }
++func_return:
++ return Result;
++}
++
++u8 *get_blk_table_start_addr(void)
++{
++ return g_pBlockTable;
++}
++
++unsigned long get_blk_table_len(void)
++{
++ return DeviceInfo.wDataBlockNum * sizeof(u32);
++}
++
++u8 *get_wear_leveling_table_start_addr(void)
++{
++ return g_pWearCounter;
++}
++
++unsigned long get_wear_leveling_table_len(void)
++{
++ return DeviceInfo.wDataBlockNum * sizeof(u8);
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Read_Block_Table
++* Inputs: none
++* Outputs: PASS / FAIL
++* Description: read the flash spare area and find a block containing the
++* most recent block table(having largest block_table_counter).
++* Find the last written Block table in this block.
++* Check the correctness of Block Table
++* If CDMA is enabled, this function is called in
++* polling mode.
++* We don't need to store changes in Block table in this
++* function as it is called only at initialization
++*
++* Note: Currently this function is called at initialization
++* before any read/erase/write command issued to flash so,
++* there is no need to wait for CDMA list to complete as of now
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++static int FTL_Read_Block_Table(void)
++{
++ u16 i = 0;
++ int k, j;
++ u8 *tempBuf, *tagarray;
++ int wResult = FAIL;
++ int status = FAIL;
++ u8 block_table_found = 0;
++ int search_result;
++ u32 Block;
++ u16 Page = 0;
++ u16 PageCount;
++ u16 bt_pages;
++ int wBytesCopied = 0, tempvar;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ tempBuf = tmp_buf1_read_blk_table;
++ bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
++
++ for (j = DeviceInfo.wSpectraStartBlock;
++ j <= (int)DeviceInfo.wSpectraEndBlock;
++ j++) {
++ status = GLOB_LLD_Read_Page_Spare(tempBuf, j, 0, 1);
++ k = 0;
++ i = FTL_Extract_Block_Table_Tag(tempBuf, &tagarray);
++ if (i) {
++ status = GLOB_LLD_Read_Page_Main_Polling(tempBuf,
++ j, 0, 1);
++ for (; k < i; k++) {
++ if (tagarray[k] == tempBuf[3])
++ break;
++ }
++ }
++
++ if (k < i)
++ k = tagarray[k];
++ else
++ continue;
++
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Block table is contained in Block %d %d\n",
++ (unsigned int)j, (unsigned int)k);
++
++ if (g_pBTBlocks[k-FIRST_BT_ID] == BTBLOCK_INVAL) {
++ g_pBTBlocks[k-FIRST_BT_ID] = j;
++ block_table_found = 1;
++ } else {
++ printk(KERN_ERR "FTL_Read_Block_Table -"
++ "This should never happens. "
++ "Two block table have same counter %u!\n", k);
++ }
++ }
++
++ if (block_table_found) {
++ if (g_pBTBlocks[FIRST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL &&
++ g_pBTBlocks[LAST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL) {
++ j = LAST_BT_ID;
++ while ((j > FIRST_BT_ID) &&
++ (g_pBTBlocks[j - FIRST_BT_ID] != BTBLOCK_INVAL))
++ j--;
++ if (j == FIRST_BT_ID) {
++ j = LAST_BT_ID;
++ last_erased = LAST_BT_ID;
++ } else {
++ last_erased = (u8)j + 1;
++ while ((j > FIRST_BT_ID) && (BTBLOCK_INVAL ==
++ g_pBTBlocks[j - FIRST_BT_ID]))
++ j--;
++ }
++ } else {
++ j = FIRST_BT_ID;
++ while (g_pBTBlocks[j - FIRST_BT_ID] == BTBLOCK_INVAL)
++ j++;
++ last_erased = (u8)j;
++ while ((j < LAST_BT_ID) && (BTBLOCK_INVAL !=
++ g_pBTBlocks[j - FIRST_BT_ID]))
++ j++;
++ if (g_pBTBlocks[j-FIRST_BT_ID] == BTBLOCK_INVAL)
++ j--;
++ }
++
++ if (last_erased > j)
++ j += (1 + LAST_BT_ID - FIRST_BT_ID);
++
++ for (; (j >= last_erased) && (FAIL == wResult); j--) {
++ i = (j - FIRST_BT_ID) %
++ (1 + LAST_BT_ID - FIRST_BT_ID);
++ search_result =
++ FTL_Search_Block_Table_IN_Block(g_pBTBlocks[i],
++ i + FIRST_BT_ID, &Page);
++ if (g_cBlockTableStatus == IN_PROGRESS_BLOCK_TABLE)
++ block_table_found = 0;
++
++ while ((search_result == PASS) && (FAIL == wResult)) {
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "FTL_Read_Block_Table:"
++ "Block: %u Page: %u "
++ "contains block table\n",
++ (unsigned int)g_pBTBlocks[i],
++ (unsigned int)Page);
++
++ tempBuf = tmp_buf2_read_blk_table;
++
++ for (k = 0; k < bt_pages; k++) {
++ Block = g_pBTBlocks[i];
++ PageCount = 1;
++
++ status =
++ GLOB_LLD_Read_Page_Main_Polling(
++ tempBuf, Block, Page, PageCount);
++
++ tempvar = k ? 0 : 4;
++
++ wBytesCopied +=
++ FTL_Copy_Block_Table_From_Flash(
++ tempBuf + tempvar,
++ DeviceInfo.wPageDataSize - tempvar,
++ wBytesCopied);
++
++ Page++;
++ }
++
++ wResult = FTL_Check_Block_Table(FAIL);
++ if (FAIL == wResult) {
++ block_table_found = 0;
++ if (Page > bt_pages)
++ Page -= ((bt_pages<<1) + 1);
++ else
++ search_result = FAIL;
++ }
++ }
++ }
++ }
++
++ if (PASS == wResult) {
++ if (!block_table_found)
++ FTL_Execute_SPL_Recovery();
++
++ if (g_cBlockTableStatus == IN_PROGRESS_BLOCK_TABLE)
++ g_wBlockTableOffset = (u16)Page + 1;
++ else
++ g_wBlockTableOffset = (u16)Page - bt_pages;
++
++ g_wBlockTableIndex = (u32)g_pBTBlocks[i];
++
++#if CMD_DMA
++ if (DeviceInfo.MLCDevice)
++ memcpy(g_pBTStartingCopy, g_pBlockTable,
++ DeviceInfo.wDataBlockNum * sizeof(u32)
++ + DeviceInfo.wDataBlockNum * sizeof(u8)
++ + DeviceInfo.wDataBlockNum * sizeof(u16));
++ else
++ memcpy(g_pBTStartingCopy, g_pBlockTable,
++ DeviceInfo.wDataBlockNum * sizeof(u32)
++ + DeviceInfo.wDataBlockNum * sizeof(u8));
++#endif
++ }
++
++ if (FAIL == wResult)
++ printk(KERN_ERR "Yunpeng - "
++ "Can not find valid spectra block table!\n");
++
++#if AUTO_FORMAT_FLASH
++ if (FAIL == wResult) {
++ nand_dbg_print(NAND_DBG_DEBUG, "doing auto-format\n");
++ wResult = FTL_Format_Flash(0);
++ }
++#endif
++
++ return wResult;
++}
++
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Flash_Error_Handle
++* Inputs: Pointer to data
++* Page address
++* Block address
++* Outputs: PASS=0 / FAIL=1
++* Description: It handles any error occured during Spectra operation
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++static int FTL_Flash_Error_Handle(u8 *pData, u64 old_page_addr,
++ u64 blk_addr)
++{
++ u32 i;
++ int j;
++ u32 tmp_node, blk_node = BLK_FROM_ADDR(blk_addr);
++ u64 phy_addr;
++ int wErase = FAIL;
++ int wResult = FAIL;
++ u32 *pbt = (u32 *)g_pBlockTable;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ if (ERR == GLOB_FTL_Garbage_Collection())
++ return ERR;
++
++ do {
++ for (i = DeviceInfo.wSpectraEndBlock -
++ DeviceInfo.wSpectraStartBlock;
++ i > 0; i--) {
++ if (IS_SPARE_BLOCK(i)) {
++ tmp_node = (u32)(BAD_BLOCK |
++ pbt[blk_node]);
++ pbt[blk_node] = (u32)(pbt[i] &
++ (~SPARE_BLOCK));
++ pbt[i] = tmp_node;
++#if CMD_DMA
++ p_BTableChangesDelta =
++ (struct BTableChangesDelta *)
++ g_pBTDelta_Free;
++ g_pBTDelta_Free +=
++ sizeof(struct BTableChangesDelta);
++
++ p_BTableChangesDelta->ftl_cmd_cnt =
++ ftl_cmd_cnt;
++ p_BTableChangesDelta->BT_Index =
++ blk_node;
++ p_BTableChangesDelta->BT_Entry_Value =
++ pbt[blk_node];
++ p_BTableChangesDelta->ValidFields = 0x0C;
++
++ p_BTableChangesDelta =
++ (struct BTableChangesDelta *)
++ g_pBTDelta_Free;
++ g_pBTDelta_Free +=
++ sizeof(struct BTableChangesDelta);
++
++ p_BTableChangesDelta->ftl_cmd_cnt =
++ ftl_cmd_cnt;
++ p_BTableChangesDelta->BT_Index = i;
++ p_BTableChangesDelta->BT_Entry_Value = pbt[i];
++ p_BTableChangesDelta->ValidFields = 0x0C;
++#endif
++ wResult = PASS;
++ break;
++ }
++ }
++
++ if (FAIL == wResult) {
++ if (FAIL == GLOB_FTL_Garbage_Collection())
++ break;
++ else
++ continue;
++ }
++
++ if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
++ g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
++ FTL_Write_IN_Progress_Block_Table_Page();
++ }
++
++ phy_addr = FTL_Get_Physical_Block_Addr(blk_addr);
++
++ for (j = 0; j < RETRY_TIMES; j++) {
++ if (PASS == wErase) {
++ if (FAIL == GLOB_FTL_Block_Erase(phy_addr)) {
++ MARK_BLOCK_AS_BAD(pbt[blk_node]);
++ break;
++ }
++ }
++ if (PASS == FTL_Cache_Update_Block(pData,
++ old_page_addr,
++ phy_addr)) {
++ wResult = PASS;
++ break;
++ } else {
++ wResult = FAIL;
++ wErase = PASS;
++ }
++ }
++ } while (FAIL == wResult);
++
++ FTL_Write_Block_Table(FAIL);
++
++ return wResult;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Get_Page_Num
++* Inputs: Size in bytes
++* Outputs: Size in pages
++* Description: It calculates the pages required for the length passed
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++static u32 FTL_Get_Page_Num(u64 length)
++{
++ return (u32)((length >> DeviceInfo.nBitsInPageDataSize) +
++ (GLOB_u64_Remainder(length , 1) > 0 ? 1 : 0));
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Get_Physical_Block_Addr
++* Inputs: Block Address (byte format)
++* Outputs: Physical address of the block.
++* Description: It translates LBA to PBA by returning address stored
++* at the LBA location in the block table
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++static u64 FTL_Get_Physical_Block_Addr(u64 logical_addr)
++{
++ u32 *pbt;
++ u64 physical_addr;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ pbt = (u32 *)g_pBlockTable;
++ physical_addr = (u64) DeviceInfo.wBlockDataSize *
++ (pbt[BLK_FROM_ADDR(logical_addr)] & (~BAD_BLOCK));
++
++ return physical_addr;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Get_Block_Index
++* Inputs: Physical Block no.
++* Outputs: Logical block no. /BAD_BLOCK
++* Description: It returns the logical block no. for the PBA passed
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++static u32 FTL_Get_Block_Index(u32 wBlockNum)
++{
++ u32 *pbt = (u32 *)g_pBlockTable;
++ u32 i;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ for (i = 0; i < DeviceInfo.wDataBlockNum; i++)
++ if (wBlockNum == (pbt[i] & (~BAD_BLOCK)))
++ return i;
++
++ return BAD_BLOCK;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: GLOB_FTL_Wear_Leveling
++* Inputs: none
++* Outputs: PASS=0
++* Description: This is static wear leveling (done by explicit call)
++* do complete static wear leveling
++* do complete garbage collection
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++int GLOB_FTL_Wear_Leveling(void)
++{
++ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ FTL_Static_Wear_Leveling();
++ GLOB_FTL_Garbage_Collection();
++
++ return PASS;
++}
++
++static void find_least_most_worn(u8 *chg,
++ u32 *least_idx, u8 *least_cnt,
++ u32 *most_idx, u8 *most_cnt)
++{
++ u32 *pbt = (u32 *)g_pBlockTable;
++ u32 idx;
++ u8 cnt;
++ int i;
++
++ for (i = BLOCK_TABLE_INDEX + 1; i < DeviceInfo.wDataBlockNum; i++) {
++ if (IS_BAD_BLOCK(i) || PASS == chg[i])
++ continue;
++
++ idx = (u32) ((~BAD_BLOCK) & pbt[i]);
++ cnt = g_pWearCounter[idx - DeviceInfo.wSpectraStartBlock];
++
++ if (IS_SPARE_BLOCK(i)) {
++ if (cnt > *most_cnt) {
++ *most_cnt = cnt;
++ *most_idx = idx;
++ }
++ }
++
++ if (IS_DATA_BLOCK(i)) {
++ if (cnt < *least_cnt) {
++ *least_cnt = cnt;
++ *least_idx = idx;
++ }
++ }
++
++ if (PASS == chg[*most_idx] || PASS == chg[*least_idx]) {
++ debug_boundary_error(*most_idx,
++ DeviceInfo.wDataBlockNum, 0);
++ debug_boundary_error(*least_idx,
++ DeviceInfo.wDataBlockNum, 0);
++ continue;
++ }
++ }
++}
++
++static int move_blks_for_wear_leveling(u8 *chg,
++ u32 *least_idx, u32 *rep_blk_num, int *result)
++{
++ u32 *pbt = (u32 *)g_pBlockTable;
++ u32 rep_blk;
++ int j, ret_cp_blk, ret_erase;
++ int ret = PASS;
++
++ chg[*least_idx] = PASS;
++ debug_boundary_error(*least_idx, DeviceInfo.wDataBlockNum, 0);
++
++ rep_blk = FTL_Replace_MWBlock();
++ if (rep_blk != BAD_BLOCK) {
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "More than two spare blocks exist so do it\n");
++ nand_dbg_print(NAND_DBG_DEBUG, "Block Replaced is %d\n",
++ rep_blk);
++
++ chg[rep_blk] = PASS;
++
++ if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
++ g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
++ FTL_Write_IN_Progress_Block_Table_Page();
++ }
++
++ for (j = 0; j < RETRY_TIMES; j++) {
++ ret_cp_blk = FTL_Copy_Block((u64)(*least_idx) *
++ DeviceInfo.wBlockDataSize,
++ (u64)rep_blk * DeviceInfo.wBlockDataSize);
++ if (FAIL == ret_cp_blk) {
++ ret_erase = GLOB_FTL_Block_Erase((u64)rep_blk
++ * DeviceInfo.wBlockDataSize);
++ if (FAIL == ret_erase)
++ MARK_BLOCK_AS_BAD(pbt[rep_blk]);
++ } else {
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "FTL_Copy_Block == OK\n");
++ break;
++ }
++ }
++
++ if (j < RETRY_TIMES) {
++ u32 tmp;
++ u32 old_idx = FTL_Get_Block_Index(*least_idx);
++ u32 rep_idx = FTL_Get_Block_Index(rep_blk);
++ tmp = (u32)(DISCARD_BLOCK | pbt[old_idx]);
++ pbt[old_idx] = (u32)((~SPARE_BLOCK) &
++ pbt[rep_idx]);
++ pbt[rep_idx] = tmp;
++#if CMD_DMA
++ p_BTableChangesDelta = (struct BTableChangesDelta *)
++ g_pBTDelta_Free;
++ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
++ p_BTableChangesDelta->ftl_cmd_cnt =
++ ftl_cmd_cnt;
++ p_BTableChangesDelta->BT_Index = old_idx;
++ p_BTableChangesDelta->BT_Entry_Value = pbt[old_idx];
++ p_BTableChangesDelta->ValidFields = 0x0C;
++
++ p_BTableChangesDelta = (struct BTableChangesDelta *)
++ g_pBTDelta_Free;
++ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
++
++ p_BTableChangesDelta->ftl_cmd_cnt =
++ ftl_cmd_cnt;
++ p_BTableChangesDelta->BT_Index = rep_idx;
++ p_BTableChangesDelta->BT_Entry_Value = pbt[rep_idx];
++ p_BTableChangesDelta->ValidFields = 0x0C;
++#endif
++ } else {
++ pbt[FTL_Get_Block_Index(rep_blk)] |= BAD_BLOCK;
++#if CMD_DMA
++ p_BTableChangesDelta = (struct BTableChangesDelta *)
++ g_pBTDelta_Free;
++ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
++
++ p_BTableChangesDelta->ftl_cmd_cnt =
++ ftl_cmd_cnt;
++ p_BTableChangesDelta->BT_Index =
++ FTL_Get_Block_Index(rep_blk);
++ p_BTableChangesDelta->BT_Entry_Value =
++ pbt[FTL_Get_Block_Index(rep_blk)];
++ p_BTableChangesDelta->ValidFields = 0x0C;
++#endif
++ *result = FAIL;
++ ret = FAIL;
++ }
++
++ if (((*rep_blk_num)++) > WEAR_LEVELING_BLOCK_NUM)
++ ret = FAIL;
++ } else {
++ printk(KERN_ERR "Less than 3 spare blocks exist so quit\n");
++ ret = FAIL;
++ }
++
++ return ret;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Static_Wear_Leveling
++* Inputs: none
++* Outputs: PASS=0 / FAIL=1
++* Description: This is static wear leveling (done by explicit call)
++* search for most&least used
++* if difference < GATE:
++* update the block table with exhange
++* mark block table in flash as IN_PROGRESS
++* copy flash block
++* the caller should handle GC clean up after calling this function
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++int FTL_Static_Wear_Leveling(void)
++{
++ u8 most_worn_cnt;
++ u8 least_worn_cnt;
++ u32 most_worn_idx;
++ u32 least_worn_idx;
++ int result = PASS;
++ int go_on = PASS;
++ u32 replaced_blks = 0;
++ u8 *chang_flag = flags_static_wear_leveling;
++
++ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ if (!chang_flag)
++ return FAIL;
++
++ memset(chang_flag, FAIL, DeviceInfo.wDataBlockNum);
++ while (go_on == PASS) {
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "starting static wear leveling\n");
++ most_worn_cnt = 0;
++ least_worn_cnt = 0xFF;
++ least_worn_idx = BLOCK_TABLE_INDEX;
++ most_worn_idx = BLOCK_TABLE_INDEX;
++
++ find_least_most_worn(chang_flag, &least_worn_idx,
++ &least_worn_cnt, &most_worn_idx, &most_worn_cnt);
++
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Used and least worn is block %u, whos count is %u\n",
++ (unsigned int)least_worn_idx,
++ (unsigned int)least_worn_cnt);
++
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Free and most worn is block %u, whos count is %u\n",
++ (unsigned int)most_worn_idx,
++ (unsigned int)most_worn_cnt);
++
++ if ((most_worn_cnt > least_worn_cnt) &&
++ (most_worn_cnt - least_worn_cnt > WEAR_LEVELING_GATE))
++ go_on = move_blks_for_wear_leveling(chang_flag,
++ &least_worn_idx, &replaced_blks, &result);
++ else
++ go_on = FAIL;
++ }
++
++ return result;
++}
++
++#if CMD_DMA
++static int do_garbage_collection(u32 discard_cnt)
++{
++ u32 *pbt = (u32 *)g_pBlockTable;
++ u32 pba;
++ u8 bt_block_erased = 0;
++ int i, cnt, ret = FAIL;
++ u64 addr;
++
++ i = 0;
++ while ((i < DeviceInfo.wDataBlockNum) && (discard_cnt > 0) &&
++ ((ftl_cmd_cnt + 28) < 256)) {
++ if (((pbt[i] & BAD_BLOCK) != BAD_BLOCK) &&
++ (pbt[i] & DISCARD_BLOCK)) {
++ if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
++ g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
++ FTL_Write_IN_Progress_Block_Table_Page();
++ }
++
++ addr = FTL_Get_Physical_Block_Addr((u64)i *
++ DeviceInfo.wBlockDataSize);
++ pba = BLK_FROM_ADDR(addr);
++
++ for (cnt = FIRST_BT_ID; cnt <= LAST_BT_ID; cnt++) {
++ if (pba == g_pBTBlocks[cnt - FIRST_BT_ID]) {
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "GC will erase BT block %u\n",
++ (unsigned int)pba);
++ discard_cnt--;
++ i++;
++ bt_block_erased = 1;
++ break;
++ }
++ }
++
++ if (bt_block_erased) {
++ bt_block_erased = 0;
++ continue;
++ }
++
++ addr = FTL_Get_Physical_Block_Addr((u64)i *
++ DeviceInfo.wBlockDataSize);
++
++ if (PASS == GLOB_FTL_Block_Erase(addr)) {
++ pbt[i] &= (u32)(~DISCARD_BLOCK);
++ pbt[i] |= (u32)(SPARE_BLOCK);
++ p_BTableChangesDelta =
++ (struct BTableChangesDelta *)
++ g_pBTDelta_Free;
++ g_pBTDelta_Free +=
++ sizeof(struct BTableChangesDelta);
++ p_BTableChangesDelta->ftl_cmd_cnt =
++ ftl_cmd_cnt - 1;
++ p_BTableChangesDelta->BT_Index = i;
++ p_BTableChangesDelta->BT_Entry_Value = pbt[i];
++ p_BTableChangesDelta->ValidFields = 0x0C;
++ discard_cnt--;
++ ret = PASS;
++ } else {
++ MARK_BLOCK_AS_BAD(pbt[i]);
++ }
++ }
++
++ i++;
++ }
++
++ return ret;
++}
++
++#else
++static int do_garbage_collection(u32 discard_cnt)
++{
++ u32 *pbt = (u32 *)g_pBlockTable;
++ u32 pba;
++ u8 bt_block_erased = 0;
++ int i, cnt, ret = FAIL;
++ u64 addr;
++
++ i = 0;
++ while ((i < DeviceInfo.wDataBlockNum) && (discard_cnt > 0)) {
++ if (((pbt[i] & BAD_BLOCK) != BAD_BLOCK) &&
++ (pbt[i] & DISCARD_BLOCK)) {
++ if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
++ g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
++ FTL_Write_IN_Progress_Block_Table_Page();
++ }
++
++ addr = FTL_Get_Physical_Block_Addr((u64)i *
++ DeviceInfo.wBlockDataSize);
++ pba = BLK_FROM_ADDR(addr);
++
++ for (cnt = FIRST_BT_ID; cnt <= LAST_BT_ID; cnt++) {
++ if (pba == g_pBTBlocks[cnt - FIRST_BT_ID]) {
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "GC will erase BT block %d\n",
++ pba);
++ discard_cnt--;
++ i++;
++ bt_block_erased = 1;
++ break;
++ }
++ }
++
++ if (bt_block_erased) {
++ bt_block_erased = 0;
++ continue;
++ }
++
++ /* If the discard block is L2 cache block, then just skip it */
++ for (cnt = 0; cnt < BLK_NUM_FOR_L2_CACHE; cnt++) {
++ if (cache_l2.blk_array[cnt] == pba) {
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "GC will erase L2 cache blk %d\n",
++ pba);
++ break;
++ }
++ }
++ if (cnt < BLK_NUM_FOR_L2_CACHE) { /* Skip it */
++ discard_cnt--;
++ i++;
++ continue;
++ }
++
++ addr = FTL_Get_Physical_Block_Addr((u64)i *
++ DeviceInfo.wBlockDataSize);
++
++ if (PASS == GLOB_FTL_Block_Erase(addr)) {
++ pbt[i] &= (u32)(~DISCARD_BLOCK);
++ pbt[i] |= (u32)(SPARE_BLOCK);
++ discard_cnt--;
++ ret = PASS;
++ } else {
++ MARK_BLOCK_AS_BAD(pbt[i]);
++ }
++ }
++
++ i++;
++ }
++
++ return ret;
++}
++#endif
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: GLOB_FTL_Garbage_Collection
++* Inputs: none
++* Outputs: PASS / FAIL (returns the number of un-erased blocks
++* Description: search the block table for all discarded blocks to erase
++* for each discarded block:
++* set the flash block to IN_PROGRESS
++* erase the block
++* update the block table
++* write the block table to flash
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++int GLOB_FTL_Garbage_Collection(void)
++{
++ u32 i;
++ u32 wDiscard = 0;
++ int wResult = FAIL;
++ u32 *pbt = (u32 *)g_pBlockTable;
++
++ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ if (GC_Called) {
++ printk(KERN_ALERT "GLOB_FTL_Garbage_Collection() "
++ "has been re-entered! Exit.\n");
++ return PASS;
++ }
++
++ GC_Called = 1;
++
++ GLOB_FTL_BT_Garbage_Collection();
++
++ for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
++ if (IS_DISCARDED_BLOCK(i))
++ wDiscard++;
++ }
++
++ if (wDiscard <= 0) {
++ GC_Called = 0;
++ return wResult;
++ }
++
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Found %d discarded blocks\n", wDiscard);
++
++ FTL_Write_Block_Table(FAIL);
++
++ wResult = do_garbage_collection(wDiscard);
++
++ FTL_Write_Block_Table(FAIL);
++
++ GC_Called = 0;
++
++ return wResult;
++}
++
++
++#if CMD_DMA
++static int do_bt_garbage_collection(void)
++{
++ u32 pba, lba;
++ u32 *pbt = (u32 *)g_pBlockTable;
++ u32 *pBTBlocksNode = (u32 *)g_pBTBlocks;
++ u64 addr;
++ int i, ret = FAIL;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ if (BT_GC_Called)
++ return PASS;
++
++ BT_GC_Called = 1;
++
++ for (i = last_erased; (i <= LAST_BT_ID) &&
++ (g_pBTBlocks[((i + 2) % (1 + LAST_BT_ID - FIRST_BT_ID)) +
++ FIRST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL) &&
++ ((ftl_cmd_cnt + 28)) < 256; i++) {
++ pba = pBTBlocksNode[i - FIRST_BT_ID];
++ lba = FTL_Get_Block_Index(pba);
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "do_bt_garbage_collection: pba %d, lba %d\n",
++ pba, lba);
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Block Table Entry: %d", pbt[lba]);
++
++ if (((pbt[lba] & BAD_BLOCK) != BAD_BLOCK) &&
++ (pbt[lba] & DISCARD_BLOCK)) {
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "do_bt_garbage_collection_cdma: "
++ "Erasing Block tables present in block %d\n",
++ pba);
++ addr = FTL_Get_Physical_Block_Addr((u64)lba *
++ DeviceInfo.wBlockDataSize);
++ if (PASS == GLOB_FTL_Block_Erase(addr)) {
++ pbt[lba] &= (u32)(~DISCARD_BLOCK);
++ pbt[lba] |= (u32)(SPARE_BLOCK);
++
++ p_BTableChangesDelta =
++ (struct BTableChangesDelta *)
++ g_pBTDelta_Free;
++ g_pBTDelta_Free +=
++ sizeof(struct BTableChangesDelta);
++
++ p_BTableChangesDelta->ftl_cmd_cnt =
++ ftl_cmd_cnt - 1;
++ p_BTableChangesDelta->BT_Index = lba;
++ p_BTableChangesDelta->BT_Entry_Value =
++ pbt[lba];
++
++ p_BTableChangesDelta->ValidFields = 0x0C;
++
++ ret = PASS;
++ pBTBlocksNode[last_erased - FIRST_BT_ID] =
++ BTBLOCK_INVAL;
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "resetting bt entry at index %d "
++ "value %d\n", i,
++ pBTBlocksNode[i - FIRST_BT_ID]);
++ if (last_erased == LAST_BT_ID)
++ last_erased = FIRST_BT_ID;
++ else
++ last_erased++;
++ } else {
++ MARK_BLOCK_AS_BAD(pbt[lba]);
++ }
++ }
++ }
++
++ BT_GC_Called = 0;
++
++ return ret;
++}
++
++#else
++static int do_bt_garbage_collection(void)
++{
++ u32 pba, lba;
++ u32 *pbt = (u32 *)g_pBlockTable;
++ u32 *pBTBlocksNode = (u32 *)g_pBTBlocks;
++ u64 addr;
++ int i, ret = FAIL;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ if (BT_GC_Called)
++ return PASS;
++
++ BT_GC_Called = 1;
++
++ for (i = last_erased; (i <= LAST_BT_ID) &&
++ (g_pBTBlocks[((i + 2) % (1 + LAST_BT_ID - FIRST_BT_ID)) +
++ FIRST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL); i++) {
++ pba = pBTBlocksNode[i - FIRST_BT_ID];
++ lba = FTL_Get_Block_Index(pba);
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "do_bt_garbage_collection_cdma: pba %d, lba %d\n",
++ pba, lba);
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Block Table Entry: %d", pbt[lba]);
++
++ if (((pbt[lba] & BAD_BLOCK) != BAD_BLOCK) &&
++ (pbt[lba] & DISCARD_BLOCK)) {
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "do_bt_garbage_collection: "
++ "Erasing Block tables present in block %d\n",
++ pba);
++ addr = FTL_Get_Physical_Block_Addr((u64)lba *
++ DeviceInfo.wBlockDataSize);
++ if (PASS == GLOB_FTL_Block_Erase(addr)) {
++ pbt[lba] &= (u32)(~DISCARD_BLOCK);
++ pbt[lba] |= (u32)(SPARE_BLOCK);
++ ret = PASS;
++ pBTBlocksNode[last_erased - FIRST_BT_ID] =
++ BTBLOCK_INVAL;
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "resetting bt entry at index %d "
++ "value %d\n", i,
++ pBTBlocksNode[i - FIRST_BT_ID]);
++ if (last_erased == LAST_BT_ID)
++ last_erased = FIRST_BT_ID;
++ else
++ last_erased++;
++ } else {
++ MARK_BLOCK_AS_BAD(pbt[lba]);
++ }
++ }
++ }
++
++ BT_GC_Called = 0;
++
++ return ret;
++}
++
++#endif
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: GLOB_FTL_BT_Garbage_Collection
++* Inputs: none
++* Outputs: PASS / FAIL (returns the number of un-erased blocks
++* Description: Erases discarded blocks containing Block table
++*
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++int GLOB_FTL_BT_Garbage_Collection(void)
++{
++ return do_bt_garbage_collection();
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Replace_OneBlock
++* Inputs: Block number 1
++* Block number 2
++* Outputs: Replaced Block Number
++* Description: Interchange block table entries at wBlockNum and wReplaceNum
++*
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++static u32 FTL_Replace_OneBlock(u32 blk, u32 rep_blk)
++{
++ u32 tmp_blk;
++ u32 replace_node = BAD_BLOCK;
++ u32 *pbt = (u32 *)g_pBlockTable;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ if (rep_blk != BAD_BLOCK) {
++ if (IS_BAD_BLOCK(blk))
++ tmp_blk = pbt[blk];
++ else
++ tmp_blk = DISCARD_BLOCK | (~SPARE_BLOCK & pbt[blk]);
++
++ replace_node = (u32) ((~SPARE_BLOCK) & pbt[rep_blk]);
++ pbt[blk] = replace_node;
++ pbt[rep_blk] = tmp_blk;
++
++#if CMD_DMA
++ p_BTableChangesDelta =
++ (struct BTableChangesDelta *)g_pBTDelta_Free;
++ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
++
++ p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
++ p_BTableChangesDelta->BT_Index = blk;
++ p_BTableChangesDelta->BT_Entry_Value = pbt[blk];
++
++ p_BTableChangesDelta->ValidFields = 0x0C;
++
++ p_BTableChangesDelta =
++ (struct BTableChangesDelta *)g_pBTDelta_Free;
++ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
++
++ p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
++ p_BTableChangesDelta->BT_Index = rep_blk;
++ p_BTableChangesDelta->BT_Entry_Value = pbt[rep_blk];
++ p_BTableChangesDelta->ValidFields = 0x0C;
++#endif
++ }
++
++ return replace_node;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Write_Block_Table_Data
++* Inputs: Block table size in pages
++* Outputs: PASS=0 / FAIL=1
++* Description: Write block table data in flash
++* If first page and last page
++* Write data+BT flag
++* else
++* Write data
++* BT flag is a counter. Its value is incremented for block table
++* write in a new Block
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++static int FTL_Write_Block_Table_Data(void)
++{
++ u64 dwBlockTableAddr, pTempAddr;
++ u32 Block;
++ u16 Page, PageCount;
++ u8 *tempBuf = tmp_buf_write_blk_table_data;
++ int wBytesCopied;
++ u16 bt_pages;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ dwBlockTableAddr =
++ (u64)((u64)g_wBlockTableIndex * DeviceInfo.wBlockDataSize +
++ (u64)g_wBlockTableOffset * DeviceInfo.wPageDataSize);
++ pTempAddr = dwBlockTableAddr;
++
++ bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
++
++ nand_dbg_print(NAND_DBG_DEBUG, "FTL_Write_Block_Table_Data: "
++ "page= %d BlockTableIndex= %d "
++ "BlockTableOffset=%d\n", bt_pages,
++ g_wBlockTableIndex, g_wBlockTableOffset);
++
++ Block = BLK_FROM_ADDR(pTempAddr);
++ Page = PAGE_FROM_ADDR(pTempAddr, Block);
++ PageCount = 1;
++
++ if (bt_block_changed) {
++ if (bt_flag == LAST_BT_ID) {
++ bt_flag = FIRST_BT_ID;
++ g_pBTBlocks[bt_flag - FIRST_BT_ID] = Block;
++ } else if (bt_flag < LAST_BT_ID) {
++ bt_flag++;
++ g_pBTBlocks[bt_flag - FIRST_BT_ID] = Block;
++ }
++
++ if ((bt_flag > (LAST_BT_ID-4)) &&
++ g_pBTBlocks[FIRST_BT_ID - FIRST_BT_ID] !=
++ BTBLOCK_INVAL) {
++ bt_block_changed = 0;
++ GLOB_FTL_BT_Garbage_Collection();
++ }
++
++ bt_block_changed = 0;
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Block Table Counter is %u Block %u\n",
++ bt_flag, (unsigned int)Block);
++ }
++
++ memset(tempBuf, 0, 3);
++ tempBuf[3] = bt_flag;
++ wBytesCopied = FTL_Copy_Block_Table_To_Flash(tempBuf + 4,
++ DeviceInfo.wPageDataSize - 4, 0);
++ memset(&tempBuf[wBytesCopied + 4], 0xff,
++ DeviceInfo.wPageSize - (wBytesCopied + 4));
++ FTL_Insert_Block_Table_Signature(&tempBuf[DeviceInfo.wPageDataSize],
++ bt_flag);
++
++#if CMD_DMA
++ memcpy(g_pNextBlockTable, tempBuf,
++ DeviceInfo.wPageSize * sizeof(u8));
++ nand_dbg_print(NAND_DBG_DEBUG, "Writing First Page of Block Table "
++ "Block %u Page %u\n", (unsigned int)Block, Page);
++ if (FAIL == GLOB_LLD_Write_Page_Main_Spare_cdma(g_pNextBlockTable,
++ Block, Page, 1,
++ LLD_CMD_FLAG_MODE_CDMA | LLD_CMD_FLAG_ORDER_BEFORE_REST)) {
++ nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in "
++ "%s, Line %d, Function: %s, "
++ "new Bad Block %d generated!\n",
++ __FILE__, __LINE__, __func__, Block);
++ goto func_return;
++ }
++
++ ftl_cmd_cnt++;
++ g_pNextBlockTable += ((DeviceInfo.wPageSize * sizeof(u8)));
++#else
++ if (FAIL == GLOB_LLD_Write_Page_Main_Spare(tempBuf, Block, Page, 1)) {
++ nand_dbg_print(NAND_DBG_WARN,
++ "NAND Program fail in %s, Line %d, Function: %s, "
++ "new Bad Block %d generated!\n",
++ __FILE__, __LINE__, __func__, Block);
++ goto func_return;
++ }
++#endif
++
++ if (bt_pages > 1) {
++ PageCount = bt_pages - 1;
++ if (PageCount > 1) {
++ wBytesCopied += FTL_Copy_Block_Table_To_Flash(tempBuf,
++ DeviceInfo.wPageDataSize * (PageCount - 1),
++ wBytesCopied);
++
++#if CMD_DMA
++ memcpy(g_pNextBlockTable, tempBuf,
++ (PageCount - 1) * DeviceInfo.wPageDataSize);
++ if (FAIL == GLOB_LLD_Write_Page_Main_cdma(
++ g_pNextBlockTable, Block, Page + 1,
++ PageCount - 1)) {
++ nand_dbg_print(NAND_DBG_WARN,
++ "NAND Program fail in %s, Line %d, "
++ "Function: %s, "
++ "new Bad Block %d generated!\n",
++ __FILE__, __LINE__, __func__,
++ (int)Block);
++ goto func_return;
++ }
++
++ ftl_cmd_cnt++;
++ g_pNextBlockTable += (PageCount - 1) *
++ DeviceInfo.wPageDataSize * sizeof(u8);
++#else
++ if (FAIL == GLOB_LLD_Write_Page_Main(tempBuf,
++ Block, Page + 1, PageCount - 1)) {
++ nand_dbg_print(NAND_DBG_WARN,
++ "NAND Program fail in %s, Line %d, "
++ "Function: %s, "
++ "new Bad Block %d generated!\n",
++ __FILE__, __LINE__, __func__,
++ (int)Block);
++ goto func_return;
++ }
++#endif
++ }
++
++ wBytesCopied = FTL_Copy_Block_Table_To_Flash(tempBuf,
++ DeviceInfo.wPageDataSize, wBytesCopied);
++ memset(&tempBuf[wBytesCopied], 0xff,
++ DeviceInfo.wPageSize-wBytesCopied);
++ FTL_Insert_Block_Table_Signature(
++ &tempBuf[DeviceInfo.wPageDataSize], bt_flag);
++#if CMD_DMA
++ memcpy(g_pNextBlockTable, tempBuf,
++ DeviceInfo.wPageSize * sizeof(u8));
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Writing the last Page of Block Table "
++ "Block %u Page %u\n",
++ (unsigned int)Block, Page + bt_pages - 1);
++ if (FAIL == GLOB_LLD_Write_Page_Main_Spare_cdma(
++ g_pNextBlockTable, Block, Page + bt_pages - 1, 1,
++ LLD_CMD_FLAG_MODE_CDMA |
++ LLD_CMD_FLAG_ORDER_BEFORE_REST)) {
++ nand_dbg_print(NAND_DBG_WARN,
++ "NAND Program fail in %s, Line %d, "
++ "Function: %s, new Bad Block %d generated!\n",
++ __FILE__, __LINE__, __func__, Block);
++ goto func_return;
++ }
++ ftl_cmd_cnt++;
++#else
++ if (FAIL == GLOB_LLD_Write_Page_Main_Spare(tempBuf,
++ Block, Page+bt_pages - 1, 1)) {
++ nand_dbg_print(NAND_DBG_WARN,
++ "NAND Program fail in %s, Line %d, "
++ "Function: %s, "
++ "new Bad Block %d generated!\n",
++ __FILE__, __LINE__, __func__, Block);
++ goto func_return;
++ }
++#endif
++ }
++
++ nand_dbg_print(NAND_DBG_DEBUG, "FTL_Write_Block_Table_Data: done\n");
++
++func_return:
++ return PASS;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Replace_Block_Table
++* Inputs: None
++* Outputs: PASS=0 / FAIL=1
++* Description: Get a new block to write block table
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++static u32 FTL_Replace_Block_Table(void)
++{
++ u32 blk;
++ int gc;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ blk = FTL_Replace_LWBlock(BLOCK_TABLE_INDEX, &gc);
++
++ if ((BAD_BLOCK == blk) && (PASS == gc)) {
++ GLOB_FTL_Garbage_Collection();
++ blk = FTL_Replace_LWBlock(BLOCK_TABLE_INDEX, &gc);
++ }
++ if (BAD_BLOCK == blk)
++ printk(KERN_ERR "%s, %s: There is no spare block. "
++ "It should never happen\n",
++ __FILE__, __func__);
++
++ nand_dbg_print(NAND_DBG_DEBUG, "New Block table Block is %d\n", blk);
++
++ return blk;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Replace_LWBlock
++* Inputs: Block number
++* Pointer to Garbage Collect flag
++* Outputs:
++* Description: Determine the least weared block by traversing
++* block table
++* Set Garbage collection to be called if number of spare
++* block is less than Free Block Gate count
++* Change Block table entry to map least worn block for current
++* operation
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++static u32 FTL_Replace_LWBlock(u32 wBlockNum, int *pGarbageCollect)
++{
++ u32 i;
++ u32 *pbt = (u32 *)g_pBlockTable;
++ u8 wLeastWornCounter = 0xFF;
++ u32 wLeastWornIndex = BAD_BLOCK;
++ u32 wSpareBlockNum = 0;
++ u32 wDiscardBlockNum = 0;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ if (IS_SPARE_BLOCK(wBlockNum)) {
++ *pGarbageCollect = FAIL;
++ pbt[wBlockNum] = (u32)(pbt[wBlockNum] & (~SPARE_BLOCK));
++#if CMD_DMA
++ p_BTableChangesDelta =
++ (struct BTableChangesDelta *)g_pBTDelta_Free;
++ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
++ p_BTableChangesDelta->ftl_cmd_cnt =
++ ftl_cmd_cnt;
++ p_BTableChangesDelta->BT_Index = (u32)(wBlockNum);
++ p_BTableChangesDelta->BT_Entry_Value = pbt[wBlockNum];
++ p_BTableChangesDelta->ValidFields = 0x0C;
++#endif
++ return pbt[wBlockNum];
++ }
++
++ for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
++ if (IS_DISCARDED_BLOCK(i))
++ wDiscardBlockNum++;
++
++ if (IS_SPARE_BLOCK(i)) {
++ u32 wPhysicalIndex = (u32)((~BAD_BLOCK) & pbt[i]);
++ if (wPhysicalIndex > DeviceInfo.wSpectraEndBlock)
++ printk(KERN_ERR "FTL_Replace_LWBlock: "
++ "This should never occur!\n");
++ if (g_pWearCounter[wPhysicalIndex -
++ DeviceInfo.wSpectraStartBlock] <
++ wLeastWornCounter) {
++ wLeastWornCounter =
++ g_pWearCounter[wPhysicalIndex -
++ DeviceInfo.wSpectraStartBlock];
++ wLeastWornIndex = i;
++ }
++ wSpareBlockNum++;
++ }
++ }
++
++ nand_dbg_print(NAND_DBG_WARN,
++ "FTL_Replace_LWBlock: Least Worn Counter %d\n",
++ (int)wLeastWornCounter);
++
++ if ((wDiscardBlockNum >= NUM_FREE_BLOCKS_GATE) ||
++ (wSpareBlockNum <= NUM_FREE_BLOCKS_GATE))
++ *pGarbageCollect = PASS;
++ else
++ *pGarbageCollect = FAIL;
++
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "FTL_Replace_LWBlock: Discarded Blocks %u Spare"
++ " Blocks %u\n",
++ (unsigned int)wDiscardBlockNum,
++ (unsigned int)wSpareBlockNum);
++
++ return FTL_Replace_OneBlock(wBlockNum, wLeastWornIndex);
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Replace_MWBlock
++* Inputs: None
++* Outputs: most worn spare block no./BAD_BLOCK
++* Description: It finds most worn spare block.
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++static u32 FTL_Replace_MWBlock(void)
++{
++ u32 i;
++ u32 *pbt = (u32 *)g_pBlockTable;
++ u8 wMostWornCounter = 0;
++ u32 wMostWornIndex = BAD_BLOCK;
++ u32 wSpareBlockNum = 0;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
++ if (IS_SPARE_BLOCK(i)) {
++ u32 wPhysicalIndex = (u32)((~SPARE_BLOCK) & pbt[i]);
++ if (g_pWearCounter[wPhysicalIndex -
++ DeviceInfo.wSpectraStartBlock] >
++ wMostWornCounter) {
++ wMostWornCounter =
++ g_pWearCounter[wPhysicalIndex -
++ DeviceInfo.wSpectraStartBlock];
++ wMostWornIndex = wPhysicalIndex;
++ }
++ wSpareBlockNum++;
++ }
++ }
++
++ if (wSpareBlockNum <= 2)
++ return BAD_BLOCK;
++
++ return wMostWornIndex;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Replace_Block
++* Inputs: Block Address
++* Outputs: PASS=0 / FAIL=1
++* Description: If block specified by blk_addr parameter is not free,
++* replace it with the least worn block.
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++static int FTL_Replace_Block(u64 blk_addr)
++{
++ u32 current_blk = BLK_FROM_ADDR(blk_addr);
++ u32 *pbt = (u32 *)g_pBlockTable;
++ int wResult = PASS;
++ int GarbageCollect = FAIL;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ if (IS_SPARE_BLOCK(current_blk)) {
++ pbt[current_blk] = (~SPARE_BLOCK) & pbt[current_blk];
++#if CMD_DMA
++ p_BTableChangesDelta =
++ (struct BTableChangesDelta *)g_pBTDelta_Free;
++ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
++ p_BTableChangesDelta->ftl_cmd_cnt =
++ ftl_cmd_cnt;
++ p_BTableChangesDelta->BT_Index = current_blk;
++ p_BTableChangesDelta->BT_Entry_Value = pbt[current_blk];
++ p_BTableChangesDelta->ValidFields = 0x0C ;
++#endif
++ return wResult;
++ }
++
++ FTL_Replace_LWBlock(current_blk, &GarbageCollect);
++
++ if (PASS == GarbageCollect)
++ wResult = GLOB_FTL_Garbage_Collection();
++
++ return wResult;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: GLOB_FTL_Is_BadBlock
++* Inputs: block number to test
++* Outputs: PASS (block is BAD) / FAIL (block is not bad)
++* Description: test if this block number is flagged as bad
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++int GLOB_FTL_Is_BadBlock(u32 wBlockNum)
++{
++ u32 *pbt = (u32 *)g_pBlockTable;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ if (wBlockNum >= DeviceInfo.wSpectraStartBlock
++ && BAD_BLOCK == (pbt[wBlockNum] & BAD_BLOCK))
++ return PASS;
++ else
++ return FAIL;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: GLOB_FTL_Flush_Cache
++* Inputs: none
++* Outputs: PASS=0 / FAIL=1
++* Description: flush all the cache blocks to flash
++* if a cache block is not dirty, don't do anything with it
++* else, write the block and update the block table
++* Note: This function should be called at shutdown/power down.
++* to write important data into device
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++int GLOB_FTL_Flush_Cache(void)
++{
++ int i, ret;
++
++ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ for (i = 0; i < CACHE_ITEM_NUM; i++) {
++ if (SET == Cache.array[i].changed) {
++#if CMD_DMA
++#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
++ int_cache[ftl_cmd_cnt].item = i;
++ int_cache[ftl_cmd_cnt].cache.address =
++ Cache.array[i].address;
++ int_cache[ftl_cmd_cnt].cache.changed = CLEAR;
++#endif
++#endif
++ ret = write_back_to_l2_cache(Cache.array[i].buf, Cache.array[i].address);
++ if (PASS == ret) {
++ Cache.array[i].changed = CLEAR;
++ } else {
++ printk(KERN_ALERT "Failed when write back to L2 cache!\n");
++ /* TODO - How to handle this? */
++ }
++ }
++ }
++
++ flush_l2_cache();
++
++ return FTL_Write_Block_Table(FAIL);
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: GLOB_FTL_Page_Read
++* Inputs: pointer to data
++* logical address of data (u64 is LBA * Bytes/Page)
++* Outputs: PASS=0 / FAIL=1
++* Description: reads a page of data into RAM from the cache
++* if the data is not already in cache, read from flash to cache
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++int GLOB_FTL_Page_Read(u8 *data, u64 logical_addr)
++{
++ u16 cache_item;
++ int res = PASS;
++
++ nand_dbg_print(NAND_DBG_DEBUG, "GLOB_FTL_Page_Read - "
++ "page_addr: %llu\n", logical_addr);
++
++ cache_item = FTL_Cache_If_Hit(logical_addr);
++
++ if (UNHIT_CACHE_ITEM == cache_item) {
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "GLOB_FTL_Page_Read: Cache not hit\n");
++ res = FTL_Cache_Write();
++ if (ERR == FTL_Cache_Read(logical_addr))
++ res = ERR;
++ cache_item = Cache.LRU;
++ }
++
++ FTL_Cache_Read_Page(data, logical_addr, cache_item);
++
++ return res;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: GLOB_FTL_Page_Write
++* Inputs: pointer to data
++* address of data (ADDRESSTYPE is LBA * Bytes/Page)
++* Outputs: PASS=0 / FAIL=1
++* Description: writes a page of data from RAM to the cache
++* if the data is not already in cache, write back the
++* least recently used block and read the addressed block
++* from flash to cache
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++int GLOB_FTL_Page_Write(u8 *pData, u64 dwPageAddr)
++{
++ u16 cache_blk;
++ u32 *pbt = (u32 *)g_pBlockTable;
++ int wResult = PASS;
++
++ nand_dbg_print(NAND_DBG_TRACE, "GLOB_FTL_Page_Write - "
++ "dwPageAddr: %llu\n", dwPageAddr);
++
++ cache_blk = FTL_Cache_If_Hit(dwPageAddr);
++
++ if (UNHIT_CACHE_ITEM == cache_blk) {
++ wResult = FTL_Cache_Write();
++ if (IS_BAD_BLOCK(BLK_FROM_ADDR(dwPageAddr))) {
++ wResult = FTL_Replace_Block(dwPageAddr);
++ pbt[BLK_FROM_ADDR(dwPageAddr)] |= SPARE_BLOCK;
++ if (wResult == FAIL)
++ return FAIL;
++ }
++ if (ERR == FTL_Cache_Read(dwPageAddr))
++ wResult = ERR;
++ cache_blk = Cache.LRU;
++ FTL_Cache_Write_Page(pData, dwPageAddr, cache_blk, 0);
++ } else {
++#if CMD_DMA
++ FTL_Cache_Write_Page(pData, dwPageAddr, cache_blk,
++ LLD_CMD_FLAG_ORDER_BEFORE_REST);
++#else
++ FTL_Cache_Write_Page(pData, dwPageAddr, cache_blk, 0);
++#endif
++ }
++
++ return wResult;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: GLOB_FTL_Block_Erase
++* Inputs: address of block to erase (now in byte format, should change to
++* block format)
++* Outputs: PASS=0 / FAIL=1
++* Description: erases the specified block
++* increments the erase count
++* If erase count reaches its upper limit,call function to
++* do the ajustment as per the relative erase count values
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++int GLOB_FTL_Block_Erase(u64 blk_addr)
++{
++ int status;
++ u32 BlkIdx;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ BlkIdx = (u32)(blk_addr >> DeviceInfo.nBitsInBlockDataSize);
++
++ if (BlkIdx < DeviceInfo.wSpectraStartBlock) {
++ printk(KERN_ERR "GLOB_FTL_Block_Erase: "
++ "This should never occur\n");
++ return FAIL;
++ }
++
++#if CMD_DMA
++ status = GLOB_LLD_Erase_Block_cdma(BlkIdx, LLD_CMD_FLAG_MODE_CDMA);
++ if (status == FAIL)
++ nand_dbg_print(NAND_DBG_WARN,
++ "NAND Program fail in %s, Line %d, "
++ "Function: %s, new Bad Block %d generated!\n",
++ __FILE__, __LINE__, __func__, BlkIdx);
++#else
++ status = GLOB_LLD_Erase_Block(BlkIdx);
++ if (status == FAIL) {
++ nand_dbg_print(NAND_DBG_WARN,
++ "NAND Program fail in %s, Line %d, "
++ "Function: %s, new Bad Block %d generated!\n",
++ __FILE__, __LINE__, __func__, BlkIdx);
++ return status;
++ }
++#endif
++
++ if (DeviceInfo.MLCDevice) {
++ g_pReadCounter[BlkIdx - DeviceInfo.wSpectraStartBlock] = 0;
++ if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
++ g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
++ FTL_Write_IN_Progress_Block_Table_Page();
++ }
++ }
++
++ g_pWearCounter[BlkIdx - DeviceInfo.wSpectraStartBlock]++;
++
++#if CMD_DMA
++ p_BTableChangesDelta =
++ (struct BTableChangesDelta *)g_pBTDelta_Free;
++ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
++ p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
++ p_BTableChangesDelta->WC_Index =
++ BlkIdx - DeviceInfo.wSpectraStartBlock;
++ p_BTableChangesDelta->WC_Entry_Value =
++ g_pWearCounter[BlkIdx - DeviceInfo.wSpectraStartBlock];
++ p_BTableChangesDelta->ValidFields = 0x30;
++
++ if (DeviceInfo.MLCDevice) {
++ p_BTableChangesDelta =
++ (struct BTableChangesDelta *)g_pBTDelta_Free;
++ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
++ p_BTableChangesDelta->ftl_cmd_cnt =
++ ftl_cmd_cnt;
++ p_BTableChangesDelta->RC_Index =
++ BlkIdx - DeviceInfo.wSpectraStartBlock;
++ p_BTableChangesDelta->RC_Entry_Value =
++ g_pReadCounter[BlkIdx -
++ DeviceInfo.wSpectraStartBlock];
++ p_BTableChangesDelta->ValidFields = 0xC0;
++ }
++
++ ftl_cmd_cnt++;
++#endif
++
++ if (g_pWearCounter[BlkIdx - DeviceInfo.wSpectraStartBlock] == 0xFE)
++ FTL_Adjust_Relative_Erase_Count(BlkIdx);
++
++ return status;
++}
++
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Adjust_Relative_Erase_Count
++* Inputs: index to block that was just incremented and is at the max
++* Outputs: PASS=0 / FAIL=1
++* Description: If any erase counts at MAX, adjusts erase count of every
++* block by substracting least worn
++* counter from counter value of every entry in wear table
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++static int FTL_Adjust_Relative_Erase_Count(u32 Index_of_MAX)
++{
++ u8 wLeastWornCounter = MAX_BYTE_VALUE;
++ u8 wWearCounter;
++ u32 i, wWearIndex;
++ u32 *pbt = (u32 *)g_pBlockTable;
++ int wResult = PASS;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
++ if (IS_BAD_BLOCK(i))
++ continue;
++ wWearIndex = (u32)(pbt[i] & (~BAD_BLOCK));
++
++ if ((wWearIndex - DeviceInfo.wSpectraStartBlock) < 0)
++ printk(KERN_ERR "FTL_Adjust_Relative_Erase_Count:"
++ "This should never occur\n");
++ wWearCounter = g_pWearCounter[wWearIndex -
++ DeviceInfo.wSpectraStartBlock];
++ if (wWearCounter < wLeastWornCounter)
++ wLeastWornCounter = wWearCounter;
++ }
++
++ if (wLeastWornCounter == 0) {
++ nand_dbg_print(NAND_DBG_WARN,
++ "Adjusting Wear Levelling Counters: Special Case\n");
++ g_pWearCounter[Index_of_MAX -
++ DeviceInfo.wSpectraStartBlock]--;
++#if CMD_DMA
++ p_BTableChangesDelta =
++ (struct BTableChangesDelta *)g_pBTDelta_Free;
++ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
++ p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
++ p_BTableChangesDelta->WC_Index =
++ Index_of_MAX - DeviceInfo.wSpectraStartBlock;
++ p_BTableChangesDelta->WC_Entry_Value =
++ g_pWearCounter[Index_of_MAX -
++ DeviceInfo.wSpectraStartBlock];
++ p_BTableChangesDelta->ValidFields = 0x30;
++#endif
++ FTL_Static_Wear_Leveling();
++ } else {
++ for (i = 0; i < DeviceInfo.wDataBlockNum; i++)
++ if (!IS_BAD_BLOCK(i)) {
++ wWearIndex = (u32)(pbt[i] & (~BAD_BLOCK));
++ g_pWearCounter[wWearIndex -
++ DeviceInfo.wSpectraStartBlock] =
++ (u8)(g_pWearCounter
++ [wWearIndex -
++ DeviceInfo.wSpectraStartBlock] -
++ wLeastWornCounter);
++#if CMD_DMA
++ p_BTableChangesDelta =
++ (struct BTableChangesDelta *)g_pBTDelta_Free;
++ g_pBTDelta_Free +=
++ sizeof(struct BTableChangesDelta);
++
++ p_BTableChangesDelta->ftl_cmd_cnt =
++ ftl_cmd_cnt;
++ p_BTableChangesDelta->WC_Index = wWearIndex -
++ DeviceInfo.wSpectraStartBlock;
++ p_BTableChangesDelta->WC_Entry_Value =
++ g_pWearCounter[wWearIndex -
++ DeviceInfo.wSpectraStartBlock];
++ p_BTableChangesDelta->ValidFields = 0x30;
++#endif
++ }
++ }
++
++ return wResult;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Write_IN_Progress_Block_Table_Page
++* Inputs: None
++* Outputs: None
++* Description: It writes in-progress flag page to the page next to
++* block table
++***********************************************************************/
++static int FTL_Write_IN_Progress_Block_Table_Page(void)
++{
++ int wResult = PASS;
++ u16 bt_pages;
++ u16 dwIPFPageAddr;
++#if CMD_DMA
++#else
++ u32 *pbt = (u32 *)g_pBlockTable;
++ u32 wTempBlockTableIndex;
++#endif
++
++ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
++
++ dwIPFPageAddr = g_wBlockTableOffset + bt_pages;
++
++ nand_dbg_print(NAND_DBG_DEBUG, "Writing IPF at "
++ "Block %d Page %d\n",
++ g_wBlockTableIndex, dwIPFPageAddr);
++
++#if CMD_DMA
++ wResult = GLOB_LLD_Write_Page_Main_Spare_cdma(g_pIPF,
++ g_wBlockTableIndex, dwIPFPageAddr, 1,
++ LLD_CMD_FLAG_MODE_CDMA | LLD_CMD_FLAG_ORDER_BEFORE_REST);
++ if (wResult == FAIL) {
++ nand_dbg_print(NAND_DBG_WARN,
++ "NAND Program fail in %s, Line %d, "
++ "Function: %s, new Bad Block %d generated!\n",
++ __FILE__, __LINE__, __func__,
++ g_wBlockTableIndex);
++ }
++ g_wBlockTableOffset = dwIPFPageAddr + 1;
++ p_BTableChangesDelta = (struct BTableChangesDelta *)g_pBTDelta_Free;
++ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
++ p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
++ p_BTableChangesDelta->g_wBlockTableOffset = g_wBlockTableOffset;
++ p_BTableChangesDelta->ValidFields = 0x01;
++ ftl_cmd_cnt++;
++#else
++ wResult = GLOB_LLD_Write_Page_Main_Spare(g_pIPF,
++ g_wBlockTableIndex, dwIPFPageAddr, 1);
++ if (wResult == FAIL) {
++ nand_dbg_print(NAND_DBG_WARN,
++ "NAND Program fail in %s, Line %d, "
++ "Function: %s, new Bad Block %d generated!\n",
++ __FILE__, __LINE__, __func__,
++ (int)g_wBlockTableIndex);
++ MARK_BLOCK_AS_BAD(pbt[BLOCK_TABLE_INDEX]);
++ wTempBlockTableIndex = FTL_Replace_Block_Table();
++ bt_block_changed = 1;
++ if (BAD_BLOCK == wTempBlockTableIndex)
++ return ERR;
++ g_wBlockTableIndex = wTempBlockTableIndex;
++ g_wBlockTableOffset = 0;
++ /* Block table tag is '00'. Means it's used one */
++ pbt[BLOCK_TABLE_INDEX] = g_wBlockTableIndex;
++ return FAIL;
++ }
++ g_wBlockTableOffset = dwIPFPageAddr + 1;
++#endif
++ return wResult;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Read_Disturbance
++* Inputs: block address
++* Outputs: PASS=0 / FAIL=1
++* Description: used to handle read disturbance. Data in block that
++* reaches its read limit is moved to new block
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++int FTL_Read_Disturbance(u32 blk_addr)
++{
++ int wResult = FAIL;
++ u32 *pbt = (u32 *) g_pBlockTable;
++ u32 dwOldBlockAddr = blk_addr;
++ u32 wBlockNum;
++ u32 i;
++ u32 wLeastReadCounter = 0xFFFF;
++ u32 wLeastReadIndex = BAD_BLOCK;
++ u32 wSpareBlockNum = 0;
++ u32 wTempNode;
++ u32 wReplacedNode;
++ u8 *g_pTempBuf;
++
++ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++#if CMD_DMA
++ g_pTempBuf = cp_back_buf_copies[cp_back_buf_idx];
++ cp_back_buf_idx++;
++ if (cp_back_buf_idx > COPY_BACK_BUF_NUM) {
++ printk(KERN_ERR "cp_back_buf_copies overflow! Exit."
++ "Maybe too many pending commands in your CDMA chain.\n");
++ return FAIL;
++ }
++#else
++ g_pTempBuf = tmp_buf_read_disturbance;
++#endif
++
++ wBlockNum = FTL_Get_Block_Index(blk_addr);
++
++ do {
++ /* This is a bug.Here 'i' should be logical block number
++ * and start from 1 (0 is reserved for block table).
++ * Have fixed it. - Yunpeng 2008. 12. 19
++ */
++ for (i = 1; i < DeviceInfo.wDataBlockNum; i++) {
++ if (IS_SPARE_BLOCK(i)) {
++ u32 wPhysicalIndex =
++ (u32)((~SPARE_BLOCK) & pbt[i]);
++ if (g_pReadCounter[wPhysicalIndex -
++ DeviceInfo.wSpectraStartBlock] <
++ wLeastReadCounter) {
++ wLeastReadCounter =
++ g_pReadCounter[wPhysicalIndex -
++ DeviceInfo.wSpectraStartBlock];
++ wLeastReadIndex = i;
++ }
++ wSpareBlockNum++;
++ }
++ }
++
++ if (wSpareBlockNum <= NUM_FREE_BLOCKS_GATE) {
++ wResult = GLOB_FTL_Garbage_Collection();
++ if (PASS == wResult)
++ continue;
++ else
++ break;
++ } else {
++ wTempNode = (u32)(DISCARD_BLOCK | pbt[wBlockNum]);
++ wReplacedNode = (u32)((~SPARE_BLOCK) &
++ pbt[wLeastReadIndex]);
++#if CMD_DMA
++ pbt[wBlockNum] = wReplacedNode;
++ pbt[wLeastReadIndex] = wTempNode;
++ p_BTableChangesDelta =
++ (struct BTableChangesDelta *)g_pBTDelta_Free;
++ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
++
++ p_BTableChangesDelta->ftl_cmd_cnt =
++ ftl_cmd_cnt;
++ p_BTableChangesDelta->BT_Index = wBlockNum;
++ p_BTableChangesDelta->BT_Entry_Value = pbt[wBlockNum];
++ p_BTableChangesDelta->ValidFields = 0x0C;
++
++ p_BTableChangesDelta =
++ (struct BTableChangesDelta *)g_pBTDelta_Free;
++ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
++
++ p_BTableChangesDelta->ftl_cmd_cnt =
++ ftl_cmd_cnt;
++ p_BTableChangesDelta->BT_Index = wLeastReadIndex;
++ p_BTableChangesDelta->BT_Entry_Value =
++ pbt[wLeastReadIndex];
++ p_BTableChangesDelta->ValidFields = 0x0C;
++
++ wResult = GLOB_LLD_Read_Page_Main_cdma(g_pTempBuf,
++ dwOldBlockAddr, 0, DeviceInfo.wPagesPerBlock,
++ LLD_CMD_FLAG_MODE_CDMA);
++ if (wResult == FAIL)
++ return wResult;
++
++ ftl_cmd_cnt++;
++
++ if (wResult != FAIL) {
++ if (FAIL == GLOB_LLD_Write_Page_Main_cdma(
++ g_pTempBuf, pbt[wBlockNum], 0,
++ DeviceInfo.wPagesPerBlock)) {
++ nand_dbg_print(NAND_DBG_WARN,
++ "NAND Program fail in "
++ "%s, Line %d, Function: %s, "
++ "new Bad Block %d "
++ "generated!\n",
++ __FILE__, __LINE__, __func__,
++ (int)pbt[wBlockNum]);
++ wResult = FAIL;
++ MARK_BLOCK_AS_BAD(pbt[wBlockNum]);
++ }
++ ftl_cmd_cnt++;
++ }
++#else
++ wResult = GLOB_LLD_Read_Page_Main(g_pTempBuf,
++ dwOldBlockAddr, 0, DeviceInfo.wPagesPerBlock);
++ if (wResult == FAIL)
++ return wResult;
++
++ if (wResult != FAIL) {
++ /* This is a bug. At this time, pbt[wBlockNum]
++ is still the physical address of
++ discard block, and should not be write.
++ Have fixed it as below.
++ -- Yunpeng 2008.12.19
++ */
++ wResult = GLOB_LLD_Write_Page_Main(g_pTempBuf,
++ wReplacedNode, 0,
++ DeviceInfo.wPagesPerBlock);
++ if (wResult == FAIL) {
++ nand_dbg_print(NAND_DBG_WARN,
++ "NAND Program fail in "
++ "%s, Line %d, Function: %s, "
++ "new Bad Block %d "
++ "generated!\n",
++ __FILE__, __LINE__, __func__,
++ (int)wReplacedNode);
++ MARK_BLOCK_AS_BAD(wReplacedNode);
++ } else {
++ pbt[wBlockNum] = wReplacedNode;
++ pbt[wLeastReadIndex] = wTempNode;
++ }
++ }
++
++ if ((wResult == PASS) && (g_cBlockTableStatus !=
++ IN_PROGRESS_BLOCK_TABLE)) {
++ g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
++ FTL_Write_IN_Progress_Block_Table_Page();
++ }
++#endif
++ }
++ } while (wResult != PASS)
++ ;
++
++#if CMD_DMA
++ /* ... */
++#endif
++
++ return wResult;
++}
++
+--- /dev/null
++++ b/drivers/staging/spectra/flash.h
+@@ -0,0 +1,198 @@
++/*
++ * NAND Flash Controller Device Driver
++ * Copyright (c) 2009, Intel Corporation and its suppliers.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++#ifndef _FLASH_INTERFACE_
++#define _FLASH_INTERFACE_
++
++#include "ffsport.h"
++#include "spectraswconfig.h"
++
++#define MAX_BYTE_VALUE 0xFF
++#define MAX_WORD_VALUE 0xFFFF
++#define MAX_U32_VALUE 0xFFFFFFFF
++
++#define MAX_BLOCKNODE_VALUE 0xFFFFFF
++#define DISCARD_BLOCK 0x800000
++#define SPARE_BLOCK 0x400000
++#define BAD_BLOCK 0xC00000
++
++#define UNHIT_CACHE_ITEM 0xFFFF
++
++#define NAND_CACHE_INIT_ADDR 0xffffffffffffffffULL
++
++#define IN_PROGRESS_BLOCK_TABLE 0x00
++#define CURRENT_BLOCK_TABLE 0x01
++
++#define BTSIG_OFFSET (0)
++#define BTSIG_BYTES (5)
++#define BTSIG_DELTA (3)
++
++#define MAX_READ_COUNTER 0x2710
++
++#define FIRST_BT_ID (1)
++#define LAST_BT_ID (254)
++#define BTBLOCK_INVAL (u32)(0xFFFFFFFF)
++
++struct device_info_tag {
++ u16 wDeviceMaker;
++ u16 wDeviceID;
++ u32 wDeviceType;
++ u32 wSpectraStartBlock;
++ u32 wSpectraEndBlock;
++ u32 wTotalBlocks;
++ u16 wPagesPerBlock;
++ u16 wPageSize;
++ u16 wPageDataSize;
++ u16 wPageSpareSize;
++ u16 wNumPageSpareFlag;
++ u16 wECCBytesPerSector;
++ u32 wBlockSize;
++ u32 wBlockDataSize;
++ u32 wDataBlockNum;
++ u8 bPlaneNum;
++ u16 wDeviceMainAreaSize;
++ u16 wDeviceSpareAreaSize;
++ u16 wDevicesConnected;
++ u16 wDeviceWidth;
++ u16 wHWRevision;
++ u16 wHWFeatures;
++
++ u16 wONFIDevFeatures;
++ u16 wONFIOptCommands;
++ u16 wONFITimingMode;
++ u16 wONFIPgmCacheTimingMode;
++
++ u16 MLCDevice;
++ u16 wSpareSkipBytes;
++
++ u8 nBitsInPageNumber;
++ u8 nBitsInPageDataSize;
++ u8 nBitsInBlockDataSize;
++};
++
++extern struct device_info_tag DeviceInfo;
++
++/* Cache item format */
++struct flash_cache_item_tag {
++ u64 address;
++ u16 use_cnt;
++ u16 changed;
++ u8 *buf;
++};
++
++struct flash_cache_tag {
++ u32 cache_item_size; /* Size in bytes of each cache item */
++ u16 pages_per_item; /* How many NAND pages in each cache item */
++ u16 LRU; /* No. of the least recently used cache item */
++ struct flash_cache_item_tag array[CACHE_ITEM_NUM];
++};
++
++/*
++ *Data structure for each list node of the managment table
++ * used for the Level 2 Cache. Each node maps one logical NAND block.
++ */
++struct spectra_l2_cache_list {
++ struct list_head list;
++ u32 logical_blk_num; /* Logical block number */
++ u32 pages_array[]; /* Page map array of this logical block.
++ * Array index is the logical block number,
++ * and for every item of this arry:
++ * high 16 bit is index of the L2 cache block num,
++ * low 16 bit is the phy page num
++ * of the above L2 cache block.
++ * This array will be kmalloc during run time.
++ */
++};
++
++struct spectra_l2_cache_info {
++ u32 blk_array[BLK_NUM_FOR_L2_CACHE];
++ u16 cur_blk_idx; /* idx to the phy block number of current using */
++ u16 cur_page_num; /* pages number of current using */
++ struct spectra_l2_cache_list table; /* First node of the table */
++};
++
++#define RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE 1
++
++#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
++struct flash_cache_mod_item_tag {
++ u64 address;
++ u8 changed;
++};
++
++struct flash_cache_delta_list_tag {
++ u8 item; /* used cache item */
++ struct flash_cache_mod_item_tag cache;
++};
++#endif
++
++extern struct flash_cache_tag Cache;
++
++extern u8 *buf_read_page_main_spare;
++extern u8 *buf_write_page_main_spare;
++extern u8 *buf_read_page_spare;
++extern u8 *buf_get_bad_block;
++extern u8 *cdma_desc_buf;
++extern u8 *memcp_desc_buf;
++
++/* struture used for IndentfyDevice function */
++struct spectra_indentfy_dev_tag {
++ u32 NumBlocks;
++ u16 PagesPerBlock;
++ u16 PageDataSize;
++ u16 wECCBytesPerSector;
++ u32 wDataBlockNum;
++};
++
++int GLOB_FTL_Flash_Init(void);
++int GLOB_FTL_Flash_Release(void);
++/*void GLOB_FTL_Erase_Flash(void);*/
++int GLOB_FTL_Block_Erase(u64 block_addr);
++int GLOB_FTL_Is_BadBlock(u32 block_num);
++int GLOB_FTL_IdentifyDevice(struct spectra_indentfy_dev_tag *dev_data);
++int GLOB_FTL_Event_Status(int *);
++u16 glob_ftl_execute_cmds(void);
++
++/*int FTL_Read_Disturbance(ADDRESSTYPE dwBlockAddr);*/
++int FTL_Read_Disturbance(u32 dwBlockAddr);
++
++/*Flash r/w based on cache*/
++int GLOB_FTL_Page_Read(u8 *read_data, u64 page_addr);
++int GLOB_FTL_Page_Write(u8 *write_data, u64 page_addr);
++int GLOB_FTL_Wear_Leveling(void);
++int GLOB_FTL_Flash_Format(void);
++int GLOB_FTL_Init(void);
++int GLOB_FTL_Flush_Cache(void);
++int GLOB_FTL_Garbage_Collection(void);
++int GLOB_FTL_BT_Garbage_Collection(void);
++void GLOB_FTL_Cache_Release(void);
++u8 *get_blk_table_start_addr(void);
++u8 *get_wear_leveling_table_start_addr(void);
++unsigned long get_blk_table_len(void);
++unsigned long get_wear_leveling_table_len(void);
++
++#if DEBUG_BNDRY
++void debug_boundary_lineno_error(int chnl, int limit, int no, int lineno,
++ char *filename);
++#define debug_boundary_error(chnl, limit, no) debug_boundary_lineno_error(chnl,\
++ limit, no, __LINE__, __FILE__)
++#else
++#define debug_boundary_error(chnl, limit, no) ;
++#endif
++
++#endif /*_FLASH_INTERFACE_*/
+--- /dev/null
++++ b/drivers/staging/spectra/lld.c
+@@ -0,0 +1,339 @@
++/*
++ * NAND Flash Controller Device Driver
++ * Copyright (c) 2009, Intel Corporation and its suppliers.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++#include "spectraswconfig.h"
++#include "ffsport.h"
++#include "ffsdefs.h"
++#include "lld.h"
++#include "lld_nand.h"
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++#if FLASH_EMU /* vector all the LLD calls to the LLD_EMU code */
++#include "lld_emu.h"
++#include "lld_cdma.h"
++
++/* common functions: */
++u16 GLOB_LLD_Flash_Reset(void)
++{
++ return emu_Flash_Reset();
++}
++
++u16 GLOB_LLD_Read_Device_ID(void)
++{
++ return emu_Read_Device_ID();
++}
++
++int GLOB_LLD_Flash_Release(void)
++{
++ return emu_Flash_Release();
++}
++
++u16 GLOB_LLD_Flash_Init(void)
++{
++ return emu_Flash_Init();
++}
++
++u16 GLOB_LLD_Erase_Block(u32 block_add)
++{
++ return emu_Erase_Block(block_add);
++}
++
++u16 GLOB_LLD_Write_Page_Main(u8 *write_data, u32 block, u16 Page,
++ u16 PageCount)
++{
++ return emu_Write_Page_Main(write_data, block, Page, PageCount);
++}
++
++u16 GLOB_LLD_Read_Page_Main(u8 *read_data, u32 block, u16 Page,
++ u16 PageCount)
++{
++ return emu_Read_Page_Main(read_data, block, Page, PageCount);
++}
++
++u16 GLOB_LLD_Read_Page_Main_Polling(u8 *read_data,
++ u32 block, u16 page, u16 page_count)
++{
++ return emu_Read_Page_Main(read_data, block, page, page_count);
++}
++
++u16 GLOB_LLD_Write_Page_Main_Spare(u8 *write_data, u32 block,
++ u16 Page, u16 PageCount)
++{
++ return emu_Write_Page_Main_Spare(write_data, block, Page, PageCount);
++}
++
++u16 GLOB_LLD_Read_Page_Main_Spare(u8 *read_data, u32 block,
++ u16 Page, u16 PageCount)
++{
++ return emu_Read_Page_Main_Spare(read_data, block, Page, PageCount);
++}
++
++u16 GLOB_LLD_Write_Page_Spare(u8 *write_data, u32 block, u16 Page,
++ u16 PageCount)
++{
++ return emu_Write_Page_Spare(write_data, block, Page, PageCount);
++}
++
++u16 GLOB_LLD_Read_Page_Spare(u8 *read_data, u32 block, u16 Page,
++ u16 PageCount)
++{
++ return emu_Read_Page_Spare(read_data, block, Page, PageCount);
++}
++
++u16 GLOB_LLD_Get_Bad_Block(u32 block)
++{
++ return emu_Get_Bad_Block(block);
++}
++
++#endif /* FLASH_EMU */
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++#if FLASH_MTD /* vector all the LLD calls to the LLD_MTD code */
++#include "lld_mtd.h"
++#include "lld_cdma.h"
++
++/* common functions: */
++u16 GLOB_LLD_Flash_Reset(void)
++{
++ return mtd_Flash_Reset();
++}
++
++u16 GLOB_LLD_Read_Device_ID(void)
++{
++ return mtd_Read_Device_ID();
++}
++
++int GLOB_LLD_Flash_Release(void)
++{
++ return mtd_Flash_Release();
++}
++
++u16 GLOB_LLD_Flash_Init(void)
++{
++ return mtd_Flash_Init();
++}
++
++u16 GLOB_LLD_Erase_Block(u32 block_add)
++{
++ return mtd_Erase_Block(block_add);
++}
++
++u16 GLOB_LLD_Write_Page_Main(u8 *write_data, u32 block, u16 Page,
++ u16 PageCount)
++{
++ return mtd_Write_Page_Main(write_data, block, Page, PageCount);
++}
++
++u16 GLOB_LLD_Read_Page_Main(u8 *read_data, u32 block, u16 Page,
++ u16 PageCount)
++{
++ return mtd_Read_Page_Main(read_data, block, Page, PageCount);
++}
++
++u16 GLOB_LLD_Read_Page_Main_Polling(u8 *read_data,
++ u32 block, u16 page, u16 page_count)
++{
++ return mtd_Read_Page_Main(read_data, block, page, page_count);
++}
++
++u16 GLOB_LLD_Write_Page_Main_Spare(u8 *write_data, u32 block,
++ u16 Page, u16 PageCount)
++{
++ return mtd_Write_Page_Main_Spare(write_data, block, Page, PageCount);
++}
++
++u16 GLOB_LLD_Read_Page_Main_Spare(u8 *read_data, u32 block,
++ u16 Page, u16 PageCount)
++{
++ return mtd_Read_Page_Main_Spare(read_data, block, Page, PageCount);
++}
++
++u16 GLOB_LLD_Write_Page_Spare(u8 *write_data, u32 block, u16 Page,
++ u16 PageCount)
++{
++ return mtd_Write_Page_Spare(write_data, block, Page, PageCount);
++}
++
++u16 GLOB_LLD_Read_Page_Spare(u8 *read_data, u32 block, u16 Page,
++ u16 PageCount)
++{
++ return mtd_Read_Page_Spare(read_data, block, Page, PageCount);
++}
++
++u16 GLOB_LLD_Get_Bad_Block(u32 block)
++{
++ return mtd_Get_Bad_Block(block);
++}
++
++#endif /* FLASH_MTD */
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++#if FLASH_NAND /* vector all the LLD calls to the NAND controller code */
++#include "lld_nand.h"
++#include "lld_cdma.h"
++#include "flash.h"
++
++/* common functions for LLD_NAND */
++void GLOB_LLD_ECC_Control(int enable)
++{
++ NAND_ECC_Ctrl(enable);
++}
++
++/* common functions for LLD_NAND */
++u16 GLOB_LLD_Flash_Reset(void)
++{
++ return NAND_Flash_Reset();
++}
++
++u16 GLOB_LLD_Read_Device_ID(void)
++{
++ return NAND_Read_Device_ID();
++}
++
++u16 GLOB_LLD_UnlockArrayAll(void)
++{
++ return NAND_UnlockArrayAll();
++}
++
++u16 GLOB_LLD_Flash_Init(void)
++{
++ return NAND_Flash_Init();
++}
++
++int GLOB_LLD_Flash_Release(void)
++{
++ return nand_release_spectra();
++}
++
++u16 GLOB_LLD_Erase_Block(u32 block_add)
++{
++ return NAND_Erase_Block(block_add);
++}
++
++
++u16 GLOB_LLD_Write_Page_Main(u8 *write_data, u32 block, u16 Page,
++ u16 PageCount)
++{
++ return NAND_Write_Page_Main(write_data, block, Page, PageCount);
++}
++
++u16 GLOB_LLD_Read_Page_Main(u8 *read_data, u32 block, u16 page,
++ u16 page_count)
++{
++ if (page_count == 1) /* Using polling to improve read speed */
++ return NAND_Read_Page_Main_Polling(read_data, block, page, 1);
++ else
++ return NAND_Read_Page_Main(read_data, block, page, page_count);
++}
++
++u16 GLOB_LLD_Read_Page_Main_Polling(u8 *read_data,
++ u32 block, u16 page, u16 page_count)
++{
++ return NAND_Read_Page_Main_Polling(read_data,
++ block, page, page_count);
++}
++
++u16 GLOB_LLD_Write_Page_Main_Spare(u8 *write_data, u32 block,
++ u16 Page, u16 PageCount)
++{
++ return NAND_Write_Page_Main_Spare(write_data, block, Page, PageCount);
++}
++
++u16 GLOB_LLD_Write_Page_Spare(u8 *write_data, u32 block, u16 Page,
++ u16 PageCount)
++{
++ return NAND_Write_Page_Spare(write_data, block, Page, PageCount);
++}
++
++u16 GLOB_LLD_Read_Page_Main_Spare(u8 *read_data, u32 block,
++ u16 page, u16 page_count)
++{
++ return NAND_Read_Page_Main_Spare(read_data, block, page, page_count);
++}
++
++u16 GLOB_LLD_Read_Page_Spare(u8 *read_data, u32 block, u16 Page,
++ u16 PageCount)
++{
++ return NAND_Read_Page_Spare(read_data, block, Page, PageCount);
++}
++
++u16 GLOB_LLD_Get_Bad_Block(u32 block)
++{
++ return NAND_Get_Bad_Block(block);
++}
++
++#if CMD_DMA
++u16 GLOB_LLD_Event_Status(void)
++{
++ return CDMA_Event_Status();
++}
++
++u16 glob_lld_execute_cmds(void)
++{
++ return CDMA_Execute_CMDs();
++}
++
++u16 GLOB_LLD_MemCopy_CMD(u8 *dest, u8 *src,
++ u32 ByteCount, u16 flag)
++{
++ /* Replace the hardware memcopy with software memcpy function */
++ if (CDMA_Execute_CMDs())
++ return FAIL;
++ memcpy(dest, src, ByteCount);
++ return PASS;
++
++ /* return CDMA_MemCopy_CMD(dest, src, ByteCount, flag); */
++}
++
++u16 GLOB_LLD_Erase_Block_cdma(u32 block, u16 flags)
++{
++ return CDMA_Data_CMD(ERASE_CMD, 0, block, 0, 0, flags);
++}
++
++u16 GLOB_LLD_Write_Page_Main_cdma(u8 *data, u32 block, u16 page, u16 count)
++{
++ return CDMA_Data_CMD(WRITE_MAIN_CMD, data, block, page, count, 0);
++}
++
++u16 GLOB_LLD_Read_Page_Main_cdma(u8 *data, u32 block, u16 page,
++ u16 count, u16 flags)
++{
++ return CDMA_Data_CMD(READ_MAIN_CMD, data, block, page, count, flags);
++}
++
++u16 GLOB_LLD_Write_Page_Main_Spare_cdma(u8 *data, u32 block, u16 page,
++ u16 count, u16 flags)
++{
++ return CDMA_Data_CMD(WRITE_MAIN_SPARE_CMD,
++ data, block, page, count, flags);
++}
++
++u16 GLOB_LLD_Read_Page_Main_Spare_cdma(u8 *data,
++ u32 block, u16 page, u16 count)
++{
++ return CDMA_Data_CMD(READ_MAIN_SPARE_CMD, data, block, page, count,
++ LLD_CMD_FLAG_MODE_CDMA);
++}
++
++#endif /* CMD_DMA */
++#endif /* FLASH_NAND */
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++
++/* end of LLD.c */
+--- /dev/null
++++ b/drivers/staging/spectra/lld.h
+@@ -0,0 +1,111 @@
++/*
++ * NAND Flash Controller Device Driver
++ * Copyright (c) 2009, Intel Corporation and its suppliers.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++
++
++#ifndef _LLD_
++#define _LLD_
++
++#include "ffsport.h"
++#include "spectraswconfig.h"
++#include "flash.h"
++
++#define GOOD_BLOCK 0
++#define DEFECTIVE_BLOCK 1
++#define READ_ERROR 2
++
++#define CLK_X 5
++#define CLK_MULTI 4
++
++/* Typedefs */
++
++/* prototypes: API for LLD */
++/* Currently, Write_Page_Main
++ * MemCopy
++ * Read_Page_Main_Spare
++ * do not have flag because they were not implemented prior to this
++ * They are not being added to keep changes to a minimum for now.
++ * Currently, they are not required (only reqd for Wr_P_M_S.)
++ * Later on, these NEED to be changed.
++ */
++
++extern void GLOB_LLD_ECC_Control(int enable);
++
++extern u16 GLOB_LLD_Flash_Reset(void);
++
++extern u16 GLOB_LLD_Read_Device_ID(void);
++
++extern u16 GLOB_LLD_UnlockArrayAll(void);
++
++extern u16 GLOB_LLD_Flash_Init(void);
++
++extern int GLOB_LLD_Flash_Release(void);
++
++extern u16 GLOB_LLD_Erase_Block(u32 block_add);
++
++extern u16 GLOB_LLD_Write_Page_Main(u8 *write_data,
++ u32 block, u16 Page, u16 PageCount);
++
++extern u16 GLOB_LLD_Read_Page_Main(u8 *read_data,
++ u32 block, u16 page, u16 page_count);
++
++extern u16 GLOB_LLD_Read_Page_Main_Polling(u8 *read_data,
++ u32 block, u16 page, u16 page_count);
++
++extern u16 GLOB_LLD_Write_Page_Main_Spare(u8 *write_data,
++ u32 block, u16 Page, u16 PageCount);
++
++extern u16 GLOB_LLD_Write_Page_Spare(u8 *write_data,
++ u32 block, u16 Page, u16 PageCount);
++
++extern u16 GLOB_LLD_Read_Page_Main_Spare(u8 *read_data,
++ u32 block, u16 page, u16 page_count);
++
++extern u16 GLOB_LLD_Read_Page_Spare(u8 *read_data,
++ u32 block, u16 Page, u16 PageCount);
++
++extern u16 GLOB_LLD_Get_Bad_Block(u32 block);
++
++extern u16 GLOB_LLD_Event_Status(void);
++
++extern u16 GLOB_LLD_MemCopy_CMD(u8 *dest, u8 *src, u32 ByteCount, u16 flag);
++
++extern u16 glob_lld_execute_cmds(void);
++
++extern u16 GLOB_LLD_Erase_Block_cdma(u32 block, u16 flags);
++
++extern u16 GLOB_LLD_Write_Page_Main_cdma(u8 *data,
++ u32 block, u16 page, u16 count);
++
++extern u16 GLOB_LLD_Read_Page_Main_cdma(u8 *data,
++ u32 block, u16 page, u16 count, u16 flags);
++
++extern u16 GLOB_LLD_Write_Page_Main_Spare_cdma(u8 *data,
++ u32 block, u16 page, u16 count, u16 flags);
++
++extern u16 GLOB_LLD_Read_Page_Main_Spare_cdma(u8 *data,
++ u32 block, u16 page, u16 count);
++
++#define LLD_CMD_FLAG_ORDER_BEFORE_REST (0x1)
++#define LLD_CMD_FLAG_MODE_CDMA (0x8)
++
++
++#endif /*_LLD_ */
++
++
+--- /dev/null
++++ b/drivers/staging/spectra/lld_cdma.c
+@@ -0,0 +1,910 @@
++/*
++ * NAND Flash Controller Device Driver
++ * Copyright (c) 2009, Intel Corporation and its suppliers.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++#include <linux/fs.h>
++#include <linux/slab.h>
++
++#include "spectraswconfig.h"
++#include "lld.h"
++#include "lld_nand.h"
++#include "lld_cdma.h"
++#include "lld_emu.h"
++#include "flash.h"
++#include "nand_regs.h"
++
++#define MAX_PENDING_CMDS 4
++#define MODE_02 (0x2 << 26)
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: CDMA_Data_Cmd
++* Inputs: cmd code (aligned for hw)
++* data: pointer to source or destination
++* block: block address
++* page: page address
++* num: num pages to transfer
++* Outputs: PASS
++* Description: This function takes the parameters and puts them
++* into the "pending commands" array.
++* It does not parse or validate the parameters.
++* The array index is same as the tag.
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++u16 CDMA_Data_CMD(u8 cmd, u8 *data, u32 block, u16 page, u16 num, u16 flags)
++{
++ u8 bank;
++
++ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ if (0 == cmd)
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "%s, Line %d, Illegal cmd (0)\n", __FILE__, __LINE__);
++
++ /* If a command of another bank comes, then first execute */
++ /* pending commands of the current bank, then set the new */
++ /* bank as current bank */
++ bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
++ if (bank != info.flash_bank) {
++ nand_dbg_print(NAND_DBG_WARN,
++ "Will access new bank. old bank: %d, new bank: %d\n",
++ info.flash_bank, bank);
++ if (CDMA_Execute_CMDs()) {
++ printk(KERN_ERR "CDMA_Execute_CMDs fail!\n");
++ return FAIL;
++ }
++ info.flash_bank = bank;
++ }
++
++ info.pcmds[info.pcmds_num].CMD = cmd;
++ info.pcmds[info.pcmds_num].DataAddr = data;
++ info.pcmds[info.pcmds_num].Block = block;
++ info.pcmds[info.pcmds_num].Page = page;
++ info.pcmds[info.pcmds_num].PageCount = num;
++ info.pcmds[info.pcmds_num].DataDestAddr = 0;
++ info.pcmds[info.pcmds_num].DataSrcAddr = 0;
++ info.pcmds[info.pcmds_num].MemCopyByteCnt = 0;
++ info.pcmds[info.pcmds_num].Flags = flags;
++ info.pcmds[info.pcmds_num].Status = 0xB0B;
++
++ switch (cmd) {
++ case WRITE_MAIN_SPARE_CMD:
++ Conv_Main_Spare_Data_Log2Phy_Format(data, num);
++ break;
++ case WRITE_SPARE_CMD:
++ Conv_Spare_Data_Log2Phy_Format(data);
++ break;
++ default:
++ break;
++ }
++
++ info.pcmds_num++;
++
++ if (info.pcmds_num >= MAX_PENDING_CMDS) {
++ if (CDMA_Execute_CMDs()) {
++ printk(KERN_ERR "CDMA_Execute_CMDs fail!\n");
++ return FAIL;
++ }
++ }
++
++ return PASS;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: CDMA_MemCopy_CMD
++* Inputs: dest: pointer to destination
++* src: pointer to source
++* count: num bytes to transfer
++* Outputs: PASS
++* Description: This function takes the parameters and puts them
++* into the "pending commands" array.
++* It does not parse or validate the parameters.
++* The array index is same as the tag.
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++u16 CDMA_MemCopy_CMD(u8 *dest, u8 *src, u32 byte_cnt, u16 flags)
++{
++ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ info.pcmds[info.pcmds_num].CMD = MEMCOPY_CMD;
++ info.pcmds[info.pcmds_num].DataAddr = 0;
++ info.pcmds[info.pcmds_num].Block = 0;
++ info.pcmds[info.pcmds_num].Page = 0;
++ info.pcmds[info.pcmds_num].PageCount = 0;
++ info.pcmds[info.pcmds_num].DataDestAddr = dest;
++ info.pcmds[info.pcmds_num].DataSrcAddr = src;
++ info.pcmds[info.pcmds_num].MemCopyByteCnt = byte_cnt;
++ info.pcmds[info.pcmds_num].Flags = flags;
++ info.pcmds[info.pcmds_num].Status = 0xB0B;
++
++ info.pcmds_num++;
++
++ if (info.pcmds_num >= MAX_PENDING_CMDS) {
++ if (CDMA_Execute_CMDs()) {
++ printk(KERN_ERR "CDMA_Execute_CMDs fail!\n");
++ return FAIL;
++ }
++ }
++
++ return PASS;
++}
++
++#if 0
++/* Prints the PendingCMDs array */
++void print_pending_cmds(void)
++{
++ u16 i;
++
++ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ for (i = 0; i < info.pcmds_num; i++) {
++ nand_dbg_print(NAND_DBG_DEBUG, "\ni: %d\n", i);
++ switch (info.pcmds[i].CMD) {
++ case ERASE_CMD:
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Erase Command (0x%x)\n",
++ info.pcmds[i].CMD);
++ break;
++ case WRITE_MAIN_CMD:
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Write Main Command (0x%x)\n",
++ info.pcmds[i].CMD);
++ break;
++ case WRITE_MAIN_SPARE_CMD:
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Write Main Spare Command (0x%x)\n",
++ info.pcmds[i].CMD);
++ break;
++ case READ_MAIN_SPARE_CMD:
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Read Main Spare Command (0x%x)\n",
++ info.pcmds[i].CMD);
++ break;
++ case READ_MAIN_CMD:
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Read Main Command (0x%x)\n",
++ info.pcmds[i].CMD);
++ break;
++ case MEMCOPY_CMD:
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Memcopy Command (0x%x)\n",
++ info.pcmds[i].CMD);
++ break;
++ case DUMMY_CMD:
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Dummy Command (0x%x)\n",
++ info.pcmds[i].CMD);
++ break;
++ default:
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Illegal Command (0x%x)\n",
++ info.pcmds[i].CMD);
++ break;
++ }
++
++ nand_dbg_print(NAND_DBG_DEBUG, "DataAddr: 0x%x\n",
++ (u32)info.pcmds[i].DataAddr);
++ nand_dbg_print(NAND_DBG_DEBUG, "Block: %d\n",
++ info.pcmds[i].Block);
++ nand_dbg_print(NAND_DBG_DEBUG, "Page: %d\n",
++ info.pcmds[i].Page);
++ nand_dbg_print(NAND_DBG_DEBUG, "PageCount: %d\n",
++ info.pcmds[i].PageCount);
++ nand_dbg_print(NAND_DBG_DEBUG, "DataDestAddr: 0x%x\n",
++ (u32)info.pcmds[i].DataDestAddr);
++ nand_dbg_print(NAND_DBG_DEBUG, "DataSrcAddr: 0x%x\n",
++ (u32)info.pcmds[i].DataSrcAddr);
++ nand_dbg_print(NAND_DBG_DEBUG, "MemCopyByteCnt: %d\n",
++ info.pcmds[i].MemCopyByteCnt);
++ nand_dbg_print(NAND_DBG_DEBUG, "Flags: 0x%x\n",
++ info.pcmds[i].Flags);
++ nand_dbg_print(NAND_DBG_DEBUG, "Status: 0x%x\n",
++ info.pcmds[i].Status);
++ }
++}
++
++/* Print the CDMA descriptors */
++void print_cdma_descriptors(void)
++{
++ struct cdma_descriptor *pc;
++ int i;
++
++ pc = (struct cdma_descriptor *)info.cdma_desc_buf;
++
++ nand_dbg_print(NAND_DBG_DEBUG, "\nWill dump cdma descriptors:\n");
++
++ for (i = 0; i < info.cdma_num; i++) {
++ nand_dbg_print(NAND_DBG_DEBUG, "\ni: %d\n", i);
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "NxtPointerHi: 0x%x, NxtPointerLo: 0x%x\n",
++ pc[i].NxtPointerHi, pc[i].NxtPointerLo);
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "FlashPointerHi: 0x%x, FlashPointerLo: 0x%x\n",
++ pc[i].FlashPointerHi, pc[i].FlashPointerLo);
++ nand_dbg_print(NAND_DBG_DEBUG, "CommandType: 0x%x\n",
++ pc[i].CommandType);
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "MemAddrHi: 0x%x, MemAddrLo: 0x%x\n",
++ pc[i].MemAddrHi, pc[i].MemAddrLo);
++ nand_dbg_print(NAND_DBG_DEBUG, "CommandFlags: 0x%x\n",
++ pc[i].CommandFlags);
++ nand_dbg_print(NAND_DBG_DEBUG, "Channel: %d, Status: 0x%x\n",
++ pc[i].Channel, pc[i].Status);
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "MemCopyPointerHi: 0x%x, MemCopyPointerLo: 0x%x\n",
++ pc[i].MemCopyPointerHi, pc[i].MemCopyPointerLo);
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Reserved12: 0x%x, Reserved13: 0x%x, "
++ "Reserved14: 0x%x, pcmd: %d\n",
++ pc[i].Reserved12, pc[i].Reserved13,
++ pc[i].Reserved14, pc[i].pcmd);
++ }
++}
++
++/* Print the Memory copy descriptors */
++static void print_memcp_descriptors(void)
++{
++ struct memcpy_descriptor *pm;
++ int i;
++
++ pm = (struct memcpy_descriptor *)info.memcp_desc_buf;
++
++ nand_dbg_print(NAND_DBG_DEBUG, "\nWill dump mem_cpy descriptors:\n");
++
++ for (i = 0; i < info.cdma_num; i++) {
++ nand_dbg_print(NAND_DBG_DEBUG, "\ni: %d\n", i);
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "NxtPointerHi: 0x%x, NxtPointerLo: 0x%x\n",
++ pm[i].NxtPointerHi, pm[i].NxtPointerLo);
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "SrcAddrHi: 0x%x, SrcAddrLo: 0x%x\n",
++ pm[i].SrcAddrHi, pm[i].SrcAddrLo);
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "DestAddrHi: 0x%x, DestAddrLo: 0x%x\n",
++ pm[i].DestAddrHi, pm[i].DestAddrLo);
++ nand_dbg_print(NAND_DBG_DEBUG, "XferSize: %d\n",
++ pm[i].XferSize);
++ nand_dbg_print(NAND_DBG_DEBUG, "MemCopyFlags: 0x%x\n",
++ pm[i].MemCopyFlags);
++ nand_dbg_print(NAND_DBG_DEBUG, "MemCopyStatus: %d\n",
++ pm[i].MemCopyStatus);
++ nand_dbg_print(NAND_DBG_DEBUG, "reserved9: 0x%x\n",
++ pm[i].reserved9);
++ nand_dbg_print(NAND_DBG_DEBUG, "reserved10: 0x%x\n",
++ pm[i].reserved10);
++ nand_dbg_print(NAND_DBG_DEBUG, "reserved11: 0x%x\n",
++ pm[i].reserved11);
++ nand_dbg_print(NAND_DBG_DEBUG, "reserved12: 0x%x\n",
++ pm[i].reserved12);
++ nand_dbg_print(NAND_DBG_DEBUG, "reserved13: 0x%x\n",
++ pm[i].reserved13);
++ nand_dbg_print(NAND_DBG_DEBUG, "reserved14: 0x%x\n",
++ pm[i].reserved14);
++ nand_dbg_print(NAND_DBG_DEBUG, "reserved15: 0x%x\n",
++ pm[i].reserved15);
++ }
++}
++#endif
++
++/* Reset cdma_descriptor chain to 0 */
++static void reset_cdma_desc(int i)
++{
++ struct cdma_descriptor *ptr;
++
++ BUG_ON(i >= MAX_DESCS);
++
++ ptr = (struct cdma_descriptor *)info.cdma_desc_buf;
++
++ ptr[i].NxtPointerHi = 0;
++ ptr[i].NxtPointerLo = 0;
++ ptr[i].FlashPointerHi = 0;
++ ptr[i].FlashPointerLo = 0;
++ ptr[i].CommandType = 0;
++ ptr[i].MemAddrHi = 0;
++ ptr[i].MemAddrLo = 0;
++ ptr[i].CommandFlags = 0;
++ ptr[i].Channel = 0;
++ ptr[i].Status = 0;
++ ptr[i].MemCopyPointerHi = 0;
++ ptr[i].MemCopyPointerLo = 0;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: CDMA_UpdateEventStatus
++* Inputs: none
++* Outputs: none
++* Description: This function update the event status of all the channels
++* when an error condition is reported.
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++void CDMA_UpdateEventStatus(void)
++{
++ int i, j, active_chan;
++ struct cdma_descriptor *ptr;
++
++ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ ptr = (struct cdma_descriptor *)info.cdma_desc_buf;
++
++ for (j = 0; j < info.cdma_num; j++) {
++ /* Check for the descriptor with failure */
++ if ((ptr[j].Status & CMD_DMA_DESC_FAIL))
++ break;
++
++ }
++
++ /* All the previous cmd's status for this channel must be good */
++ for (i = 0; i < j; i++) {
++ if (ptr[i].pcmd != 0xff)
++ info.pcmds[ptr[i].pcmd].Status = CMD_PASS;
++ }
++
++ /* Abort the channel with type 0 reset command. It resets the */
++ /* selected channel after the descriptor completes the flash */
++ /* operation and status has been updated for the descriptor. */
++ /* Memory Copy and Sync associated with this descriptor will */
++ /* not be executed */
++ active_chan = ioread32(FlashReg + CHNL_ACTIVE);
++ if ((active_chan & (1 << info.flash_bank)) == (1 << info.flash_bank)) {
++ iowrite32(MODE_02 | (0 << 4), FlashMem); /* Type 0 reset */
++ iowrite32((0xF << 4) | info.flash_bank, FlashMem + 0x10);
++ } else { /* Should not reached here */
++ printk(KERN_ERR "Error! Used bank is not set in"
++ " reg CHNL_ACTIVE\n");
++ }
++}
++
++static void cdma_trans(u16 chan)
++{
++ u32 addr;
++
++ addr = info.cdma_desc;
++
++ iowrite32(MODE_10 | (chan << 24), FlashMem);
++ iowrite32((1 << 7) | chan, FlashMem + 0x10);
++
++ iowrite32(MODE_10 | (chan << 24) | ((0x0FFFF & (addr >> 16)) << 8),
++ FlashMem);
++ iowrite32((1 << 7) | (1 << 4) | 0, FlashMem + 0x10);
++
++ iowrite32(MODE_10 | (chan << 24) | ((0x0FFFF & addr) << 8), FlashMem);
++ iowrite32((1 << 7) | (1 << 5) | 0, FlashMem + 0x10);
++
++ iowrite32(MODE_10 | (chan << 24), FlashMem);
++ iowrite32((1 << 7) | (1 << 5) | (1 << 4) | 0, FlashMem + 0x10);
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: CDMA_Execute_CMDs (for use with CMD_DMA)
++* Inputs: tag_count: the number of pending cmds to do
++* Outputs: PASS/FAIL
++* Description: Build the SDMA chain(s) by making one CMD-DMA descriptor
++* for each pending command, start the CDMA engine, and return.
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++u16 CDMA_Execute_CMDs(void)
++{
++ int i, ret;
++ u64 flash_add;
++ u32 ptr;
++ dma_addr_t map_addr, next_ptr;
++ u16 status = PASS;
++ u16 tmp_c;
++ struct cdma_descriptor *pc;
++ struct memcpy_descriptor *pm;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ /* No pending cmds to execute, just exit */
++ if (0 == info.pcmds_num) {
++ nand_dbg_print(NAND_DBG_TRACE,
++ "No pending cmds to execute. Just exit.\n");
++ return PASS;
++ }
++
++ for (i = 0; i < MAX_DESCS; i++)
++ reset_cdma_desc(i);
++
++ pc = (struct cdma_descriptor *)info.cdma_desc_buf;
++ pm = (struct memcpy_descriptor *)info.memcp_desc_buf;
++
++ info.cdma_desc = virt_to_bus(info.cdma_desc_buf);
++ info.memcp_desc = virt_to_bus(info.memcp_desc_buf);
++ next_ptr = info.cdma_desc;
++ info.cdma_num = 0;
++
++ for (i = 0; i < info.pcmds_num; i++) {
++ if (info.pcmds[i].Block >= DeviceInfo.wTotalBlocks) {
++ info.pcmds[i].Status = CMD_NOT_DONE;
++ continue;
++ }
++
++ next_ptr += sizeof(struct cdma_descriptor);
++ pc[info.cdma_num].NxtPointerHi = next_ptr >> 16;
++ pc[info.cdma_num].NxtPointerLo = next_ptr & 0xffff;
++
++ /* Use the Block offset within a bank */
++ tmp_c = info.pcmds[i].Block /
++ (DeviceInfo.wTotalBlocks / totalUsedBanks);
++ flash_add = (u64)(info.pcmds[i].Block - tmp_c *
++ (DeviceInfo.wTotalBlocks / totalUsedBanks)) *
++ DeviceInfo.wBlockDataSize +
++ (u64)(info.pcmds[i].Page) *
++ DeviceInfo.wPageDataSize;
++
++ ptr = MODE_10 | (info.flash_bank << 24) |
++ (u32)GLOB_u64_Div(flash_add,
++ DeviceInfo.wPageDataSize);
++ pc[info.cdma_num].FlashPointerHi = ptr >> 16;
++ pc[info.cdma_num].FlashPointerLo = ptr & 0xffff;
++
++ if ((info.pcmds[i].CMD == WRITE_MAIN_SPARE_CMD) ||
++ (info.pcmds[i].CMD == READ_MAIN_SPARE_CMD)) {
++ /* Descriptor to set Main+Spare Access Mode */
++ pc[info.cdma_num].CommandType = 0x43;
++ pc[info.cdma_num].CommandFlags =
++ (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
++ pc[info.cdma_num].MemAddrHi = 0;
++ pc[info.cdma_num].MemAddrLo = 0;
++ pc[info.cdma_num].Channel = 0;
++ pc[info.cdma_num].Status = 0;
++ pc[info.cdma_num].pcmd = i;
++
++ info.cdma_num++;
++ BUG_ON(info.cdma_num >= MAX_DESCS);
++
++ reset_cdma_desc(info.cdma_num);
++ next_ptr += sizeof(struct cdma_descriptor);
++ pc[info.cdma_num].NxtPointerHi = next_ptr >> 16;
++ pc[info.cdma_num].NxtPointerLo = next_ptr & 0xffff;
++ pc[info.cdma_num].FlashPointerHi = ptr >> 16;
++ pc[info.cdma_num].FlashPointerLo = ptr & 0xffff;
++ }
++
++ switch (info.pcmds[i].CMD) {
++ case ERASE_CMD:
++ pc[info.cdma_num].CommandType = 1;
++ pc[info.cdma_num].CommandFlags =
++ (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
++ pc[info.cdma_num].MemAddrHi = 0;
++ pc[info.cdma_num].MemAddrLo = 0;
++ break;
++
++ case WRITE_MAIN_CMD:
++ pc[info.cdma_num].CommandType =
++ 0x2100 | info.pcmds[i].PageCount;
++ pc[info.cdma_num].CommandFlags =
++ (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
++ map_addr = virt_to_bus(info.pcmds[i].DataAddr);
++ pc[info.cdma_num].MemAddrHi = map_addr >> 16;
++ pc[info.cdma_num].MemAddrLo = map_addr & 0xffff;
++ break;
++
++ case READ_MAIN_CMD:
++ pc[info.cdma_num].CommandType =
++ 0x2000 | info.pcmds[i].PageCount;
++ pc[info.cdma_num].CommandFlags =
++ (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
++ map_addr = virt_to_bus(info.pcmds[i].DataAddr);
++ pc[info.cdma_num].MemAddrHi = map_addr >> 16;
++ pc[info.cdma_num].MemAddrLo = map_addr & 0xffff;
++ break;
++
++ case WRITE_MAIN_SPARE_CMD:
++ pc[info.cdma_num].CommandType =
++ 0x2100 | info.pcmds[i].PageCount;
++ pc[info.cdma_num].CommandFlags =
++ (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
++ map_addr = virt_to_bus(info.pcmds[i].DataAddr);
++ pc[info.cdma_num].MemAddrHi = map_addr >> 16;
++ pc[info.cdma_num].MemAddrLo = map_addr & 0xffff;
++ break;
++
++ case READ_MAIN_SPARE_CMD:
++ pc[info.cdma_num].CommandType =
++ 0x2000 | info.pcmds[i].PageCount;
++ pc[info.cdma_num].CommandFlags =
++ (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
++ map_addr = virt_to_bus(info.pcmds[i].DataAddr);
++ pc[info.cdma_num].MemAddrHi = map_addr >> 16;
++ pc[info.cdma_num].MemAddrLo = map_addr & 0xffff;
++ break;
++
++ case MEMCOPY_CMD:
++ pc[info.cdma_num].CommandType = 0xFFFF; /* NOP cmd */
++ /* Set bit 11 to let the CDMA engine continue to */
++ /* execute only after it has finished processing */
++ /* the memcopy descriptor. */
++ /* Also set bit 10 and bit 9 to 1 */
++ pc[info.cdma_num].CommandFlags = 0x0E40;
++ map_addr = info.memcp_desc + info.cdma_num *
++ sizeof(struct memcpy_descriptor);
++ pc[info.cdma_num].MemCopyPointerHi = map_addr >> 16;
++ pc[info.cdma_num].MemCopyPointerLo = map_addr & 0xffff;
++
++ pm[info.cdma_num].NxtPointerHi = 0;
++ pm[info.cdma_num].NxtPointerLo = 0;
++
++ map_addr = virt_to_bus(info.pcmds[i].DataSrcAddr);
++ pm[info.cdma_num].SrcAddrHi = map_addr >> 16;
++ pm[info.cdma_num].SrcAddrLo = map_addr & 0xffff;
++ map_addr = virt_to_bus(info.pcmds[i].DataDestAddr);
++ pm[info.cdma_num].DestAddrHi = map_addr >> 16;
++ pm[info.cdma_num].DestAddrLo = map_addr & 0xffff;
++
++ pm[info.cdma_num].XferSize =
++ info.pcmds[i].MemCopyByteCnt;
++ pm[info.cdma_num].MemCopyFlags =
++ (0 << 15 | 0 << 14 | 27 << 8 | 0x40);
++ pm[info.cdma_num].MemCopyStatus = 0;
++ break;
++
++ case DUMMY_CMD:
++ default:
++ pc[info.cdma_num].CommandType = 0XFFFF;
++ pc[info.cdma_num].CommandFlags =
++ (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
++ pc[info.cdma_num].MemAddrHi = 0;
++ pc[info.cdma_num].MemAddrLo = 0;
++ break;
++ }
++
++ pc[info.cdma_num].Channel = 0;
++ pc[info.cdma_num].Status = 0;
++ pc[info.cdma_num].pcmd = i;
++
++ info.cdma_num++;
++ BUG_ON(info.cdma_num >= MAX_DESCS);
++
++ if ((info.pcmds[i].CMD == WRITE_MAIN_SPARE_CMD) ||
++ (info.pcmds[i].CMD == READ_MAIN_SPARE_CMD)) {
++ /* Descriptor to set back Main Area Access Mode */
++ reset_cdma_desc(info.cdma_num);
++ next_ptr += sizeof(struct cdma_descriptor);
++ pc[info.cdma_num].NxtPointerHi = next_ptr >> 16;
++ pc[info.cdma_num].NxtPointerLo = next_ptr & 0xffff;
++
++ pc[info.cdma_num].FlashPointerHi = ptr >> 16;
++ pc[info.cdma_num].FlashPointerLo = ptr & 0xffff;
++
++ pc[info.cdma_num].CommandType = 0x42;
++ pc[info.cdma_num].CommandFlags =
++ (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
++ pc[info.cdma_num].MemAddrHi = 0;
++ pc[info.cdma_num].MemAddrLo = 0;
++
++ pc[info.cdma_num].Channel = 0;
++ pc[info.cdma_num].Status = 0;
++ pc[info.cdma_num].pcmd = i;
++
++ info.cdma_num++;
++ BUG_ON(info.cdma_num >= MAX_DESCS);
++ }
++ }
++
++ /* Add a dummy descriptor at end of the CDMA chain */
++ reset_cdma_desc(info.cdma_num);
++ ptr = MODE_10 | (info.flash_bank << 24);
++ pc[info.cdma_num].FlashPointerHi = ptr >> 16;
++ pc[info.cdma_num].FlashPointerLo = ptr & 0xffff;
++ pc[info.cdma_num].CommandType = 0xFFFF; /* NOP command */
++ /* Set Command Flags for the last CDMA descriptor: */
++ /* set Continue bit (bit 9) to 0 and Interrupt bit (bit 8) to 1 */
++ pc[info.cdma_num].CommandFlags =
++ (0 << 10) | (0 << 9) | (1 << 8) | 0x40;
++ pc[info.cdma_num].pcmd = 0xff; /* Set it to an illegal value */
++ info.cdma_num++;
++ BUG_ON(info.cdma_num >= MAX_DESCS);
++
++ iowrite32(1, FlashReg + GLOBAL_INT_ENABLE); /* Enable Interrupt */
++
++ iowrite32(1, FlashReg + DMA_ENABLE);
++ /* Wait for DMA to be enabled before issuing the next command */
++ while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
++ ;
++ cdma_trans(info.flash_bank);
++
++ ret = wait_for_completion_timeout(&info.complete, 50 * HZ);
++ if (!ret)
++ printk(KERN_ERR "Wait for completion timeout "
++ "in %s, Line %d\n", __FILE__, __LINE__);
++ status = info.ret;
++
++ info.pcmds_num = 0; /* Clear the pending cmds number to 0 */
++
++ return status;
++}
++
++int is_cdma_interrupt(void)
++{
++ u32 ints_b0, ints_b1, ints_b2, ints_b3, ints_cdma;
++ u32 int_en_mask;
++ u32 cdma_int_en_mask;
++
++ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ /* Set the global Enable masks for only those interrupts
++ * that are supported */
++ cdma_int_en_mask = (DMA_INTR__DESC_COMP_CHANNEL0 |
++ DMA_INTR__DESC_COMP_CHANNEL1 |
++ DMA_INTR__DESC_COMP_CHANNEL2 |
++ DMA_INTR__DESC_COMP_CHANNEL3 |
++ DMA_INTR__MEMCOPY_DESC_COMP);
++
++ int_en_mask = (INTR_STATUS0__ECC_ERR |
++ INTR_STATUS0__PROGRAM_FAIL |
++ INTR_STATUS0__ERASE_FAIL);
++
++ ints_b0 = ioread32(FlashReg + INTR_STATUS0) & int_en_mask;
++ ints_b1 = ioread32(FlashReg + INTR_STATUS1) & int_en_mask;
++ ints_b2 = ioread32(FlashReg + INTR_STATUS2) & int_en_mask;
++ ints_b3 = ioread32(FlashReg + INTR_STATUS3) & int_en_mask;
++ ints_cdma = ioread32(FlashReg + DMA_INTR) & cdma_int_en_mask;
++
++ nand_dbg_print(NAND_DBG_WARN, "ints_bank0 to ints_bank3: "
++ "0x%x, 0x%x, 0x%x, 0x%x, ints_cdma: 0x%x\n",
++ ints_b0, ints_b1, ints_b2, ints_b3, ints_cdma);
++
++ if (ints_b0 || ints_b1 || ints_b2 || ints_b3 || ints_cdma) {
++ return 1;
++ } else {
++ iowrite32(ints_b0, FlashReg + INTR_STATUS0);
++ iowrite32(ints_b1, FlashReg + INTR_STATUS1);
++ iowrite32(ints_b2, FlashReg + INTR_STATUS2);
++ iowrite32(ints_b3, FlashReg + INTR_STATUS3);
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Not a NAND controller interrupt! Ignore it.\n");
++ return 0;
++ }
++}
++
++static void update_event_status(void)
++{
++ int i;
++ struct cdma_descriptor *ptr;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ ptr = (struct cdma_descriptor *)info.cdma_desc_buf;
++
++ for (i = 0; i < info.cdma_num; i++) {
++ if (ptr[i].pcmd != 0xff)
++ info.pcmds[ptr[i].pcmd].Status = CMD_PASS;
++ if ((ptr[i].CommandType == 0x41) ||
++ (ptr[i].CommandType == 0x42) ||
++ (ptr[i].CommandType == 0x43))
++ continue;
++
++ switch (info.pcmds[ptr[i].pcmd].CMD) {
++ case READ_MAIN_SPARE_CMD:
++ Conv_Main_Spare_Data_Phy2Log_Format(
++ info.pcmds[ptr[i].pcmd].DataAddr,
++ info.pcmds[ptr[i].pcmd].PageCount);
++ break;
++ case READ_SPARE_CMD:
++ Conv_Spare_Data_Phy2Log_Format(
++ info.pcmds[ptr[i].pcmd].DataAddr);
++ break;
++ }
++ }
++}
++
++static u16 do_ecc_for_desc(u32 ch, u8 *buf, u16 page)
++{
++ u16 event = EVENT_NONE;
++ u16 err_byte;
++ u16 err_page = 0;
++ u8 err_sector;
++ u8 err_device;
++ u16 ecc_correction_info;
++ u16 err_address;
++ u32 eccSectorSize;
++ u8 *err_pos;
++
++ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ eccSectorSize = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
++
++ do {
++ if (0 == ch)
++ err_page = ioread32(FlashReg + ERR_PAGE_ADDR0);
++ else if (1 == ch)
++ err_page = ioread32(FlashReg + ERR_PAGE_ADDR1);
++ else if (2 == ch)
++ err_page = ioread32(FlashReg + ERR_PAGE_ADDR2);
++ else if (3 == ch)
++ err_page = ioread32(FlashReg + ERR_PAGE_ADDR3);
++
++ err_address = ioread32(FlashReg + ECC_ERROR_ADDRESS);
++ err_byte = err_address & ECC_ERROR_ADDRESS__OFFSET;
++ err_sector = ((err_address &
++ ECC_ERROR_ADDRESS__SECTOR_NR) >> 12);
++
++ ecc_correction_info = ioread32(FlashReg + ERR_CORRECTION_INFO);
++ err_device = ((ecc_correction_info &
++ ERR_CORRECTION_INFO__DEVICE_NR) >> 8);
++
++ if (ecc_correction_info & ERR_CORRECTION_INFO__ERROR_TYPE) {
++ event = EVENT_UNCORRECTABLE_DATA_ERROR;
++ } else {
++ event = EVENT_CORRECTABLE_DATA_ERROR_FIXED;
++ if (err_byte < ECC_SECTOR_SIZE) {
++ err_pos = buf +
++ (err_page - page) *
++ DeviceInfo.wPageDataSize +
++ err_sector * eccSectorSize +
++ err_byte *
++ DeviceInfo.wDevicesConnected +
++ err_device;
++ *err_pos ^= ecc_correction_info &
++ ERR_CORRECTION_INFO__BYTEMASK;
++ }
++ }
++ } while (!(ecc_correction_info & ERR_CORRECTION_INFO__LAST_ERR_INFO));
++
++ return event;
++}
++
++static u16 process_ecc_int(u32 c, u16 *p_desc_num)
++{
++ struct cdma_descriptor *ptr;
++ u16 j;
++ int event = EVENT_PASS;
++
++ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ if (c != info.flash_bank)
++ printk(KERN_ERR "Error!info.flash_bank is %d, while c is %d\n",
++ info.flash_bank, c);
++
++ ptr = (struct cdma_descriptor *)info.cdma_desc_buf;
++
++ for (j = 0; j < info.cdma_num; j++)
++ if ((ptr[j].Status & CMD_DMA_DESC_COMP) != CMD_DMA_DESC_COMP)
++ break;
++
++ *p_desc_num = j; /* Pass the descripter number found here */
++
++ if (j >= info.cdma_num) {
++ printk(KERN_ERR "Can not find the correct descriptor number "
++ "when ecc interrupt triggered!"
++ "info.cdma_num: %d, j: %d\n", info.cdma_num, j);
++ return EVENT_UNCORRECTABLE_DATA_ERROR;
++ }
++
++ event = do_ecc_for_desc(c, info.pcmds[ptr[j].pcmd].DataAddr,
++ info.pcmds[ptr[j].pcmd].Page);
++
++ if (EVENT_UNCORRECTABLE_DATA_ERROR == event) {
++ printk(KERN_ERR "Uncorrectable ECC error!"
++ "info.cdma_num: %d, j: %d, "
++ "pending cmd CMD: 0x%x, "
++ "Block: 0x%x, Page: 0x%x, PageCount: 0x%x\n",
++ info.cdma_num, j,
++ info.pcmds[ptr[j].pcmd].CMD,
++ info.pcmds[ptr[j].pcmd].Block,
++ info.pcmds[ptr[j].pcmd].Page,
++ info.pcmds[ptr[j].pcmd].PageCount);
++
++ if (ptr[j].pcmd != 0xff)
++ info.pcmds[ptr[j].pcmd].Status = CMD_FAIL;
++ CDMA_UpdateEventStatus();
++ }
++
++ return event;
++}
++
++static void process_prog_erase_fail_int(u16 desc_num)
++{
++ struct cdma_descriptor *ptr;
++
++ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ ptr = (struct cdma_descriptor *)info.cdma_desc_buf;
++
++ if (ptr[desc_num].pcmd != 0xFF)
++ info.pcmds[ptr[desc_num].pcmd].Status = CMD_FAIL;
++
++ CDMA_UpdateEventStatus();
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: CDMA_Event_Status (for use with CMD_DMA)
++* Inputs: none
++* Outputs: Event_Status code
++* Description: This function is called after an interrupt has happened
++* It reads the HW status register and ...tbd
++* It returns the appropriate event status
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++u16 CDMA_Event_Status(void)
++{
++ u32 ints_addr[4] = {INTR_STATUS0, INTR_STATUS1,
++ INTR_STATUS2, INTR_STATUS3};
++ u32 dma_intr_bit[4] = {DMA_INTR__DESC_COMP_CHANNEL0,
++ DMA_INTR__DESC_COMP_CHANNEL1,
++ DMA_INTR__DESC_COMP_CHANNEL2,
++ DMA_INTR__DESC_COMP_CHANNEL3};
++ u32 cdma_int_status, int_status;
++ u32 ecc_enable = 0;
++ u16 event = EVENT_PASS;
++ u16 cur_desc = 0;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ ecc_enable = ioread32(FlashReg + ECC_ENABLE);
++
++ while (1) {
++ int_status = ioread32(FlashReg + ints_addr[info.flash_bank]);
++ if (ecc_enable && (int_status & INTR_STATUS0__ECC_ERR)) {
++ event = process_ecc_int(info.flash_bank, &cur_desc);
++ iowrite32(INTR_STATUS0__ECC_ERR,
++ FlashReg + ints_addr[info.flash_bank]);
++ if (EVENT_UNCORRECTABLE_DATA_ERROR == event) {
++ nand_dbg_print(NAND_DBG_WARN,
++ "ints_bank0 to ints_bank3: "
++ "0x%x, 0x%x, 0x%x, 0x%x, "
++ "ints_cdma: 0x%x\n",
++ ioread32(FlashReg + INTR_STATUS0),
++ ioread32(FlashReg + INTR_STATUS1),
++ ioread32(FlashReg + INTR_STATUS2),
++ ioread32(FlashReg + INTR_STATUS3),
++ ioread32(FlashReg + DMA_INTR));
++ break;
++ }
++ } else if (int_status & INTR_STATUS0__PROGRAM_FAIL) {
++ printk(KERN_ERR "NAND program fail interrupt!\n");
++ process_prog_erase_fail_int(cur_desc);
++ event = EVENT_PROGRAM_FAILURE;
++ break;
++ } else if (int_status & INTR_STATUS0__ERASE_FAIL) {
++ printk(KERN_ERR "NAND erase fail interrupt!\n");
++ process_prog_erase_fail_int(cur_desc);
++ event = EVENT_ERASE_FAILURE;
++ break;
++ } else {
++ cdma_int_status = ioread32(FlashReg + DMA_INTR);
++ if (cdma_int_status & dma_intr_bit[info.flash_bank]) {
++ iowrite32(dma_intr_bit[info.flash_bank],
++ FlashReg + DMA_INTR);
++ update_event_status();
++ event = EVENT_PASS;
++ break;
++ }
++ }
++ }
++
++ int_status = ioread32(FlashReg + ints_addr[info.flash_bank]);
++ iowrite32(int_status, FlashReg + ints_addr[info.flash_bank]);
++ cdma_int_status = ioread32(FlashReg + DMA_INTR);
++ iowrite32(cdma_int_status, FlashReg + DMA_INTR);
++
++ iowrite32(0, FlashReg + DMA_ENABLE);
++ while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
++ ;
++
++ return event;
++}
++
++
++
+--- /dev/null
++++ b/drivers/staging/spectra/lld_cdma.h
+@@ -0,0 +1,123 @@
++/*
++ * NAND Flash Controller Device Driver
++ * Copyright (c) 2009, Intel Corporation and its suppliers.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++/* header for LLD_CDMA.c module */
++
++#ifndef _LLD_CDMA_
++#define _LLD_CDMA_
++
++#include "flash.h"
++
++#define DEBUG_SYNC 1
++
++/*/////////// CDMA specific MACRO definition */
++#define MAX_DESCS (255)
++#define MAX_CHANS (4)
++#define MAX_SYNC_POINTS (16)
++#define MAX_DESC_PER_CHAN (MAX_DESCS * 3 + MAX_SYNC_POINTS + 2)
++
++#define CHANNEL_SYNC_MASK (0x000F)
++#define CHANNEL_DMA_MASK (0x00F0)
++#define CHANNEL_ID_MASK (0x0300)
++#define CHANNEL_CONT_MASK (0x4000)
++#define CHANNEL_INTR_MASK (0x8000)
++
++#define CHANNEL_SYNC_OFFSET (0)
++#define CHANNEL_DMA_OFFSET (4)
++#define CHANNEL_ID_OFFSET (8)
++#define CHANNEL_CONT_OFFSET (14)
++#define CHANNEL_INTR_OFFSET (15)
++
++u16 CDMA_Data_CMD(u8 cmd, u8 *data, u32 block, u16 page, u16 num, u16 flags);
++u16 CDMA_MemCopy_CMD(u8 *dest, u8 *src, u32 byte_cnt, u16 flags);
++u16 CDMA_Execute_CMDs(void);
++void print_pending_cmds(void);
++void print_cdma_descriptors(void);
++
++extern u8 g_SBDCmdIndex;
++extern struct mrst_nand_info info;
++
++
++/*/////////// prototypes: APIs for LLD_CDMA */
++int is_cdma_interrupt(void);
++u16 CDMA_Event_Status(void);
++
++/* CMD-DMA Descriptor Struct. These are defined by the CMD_DMA HW */
++struct cdma_descriptor {
++ u32 NxtPointerHi;
++ u32 NxtPointerLo;
++ u32 FlashPointerHi;
++ u32 FlashPointerLo;
++ u32 CommandType;
++ u32 MemAddrHi;
++ u32 MemAddrLo;
++ u32 CommandFlags;
++ u32 Channel;
++ u32 Status;
++ u32 MemCopyPointerHi;
++ u32 MemCopyPointerLo;
++ u32 Reserved12;
++ u32 Reserved13;
++ u32 Reserved14;
++ u32 pcmd; /* pending cmd num related to this descriptor */
++};
++
++/* This struct holds one MemCopy descriptor as defined by the HW */
++struct memcpy_descriptor {
++ u32 NxtPointerHi;
++ u32 NxtPointerLo;
++ u32 SrcAddrHi;
++ u32 SrcAddrLo;
++ u32 DestAddrHi;
++ u32 DestAddrLo;
++ u32 XferSize;
++ u32 MemCopyFlags;
++ u32 MemCopyStatus;
++ u32 reserved9;
++ u32 reserved10;
++ u32 reserved11;
++ u32 reserved12;
++ u32 reserved13;
++ u32 reserved14;
++ u32 reserved15;
++};
++
++/* Pending CMD table entries (includes MemCopy parameters */
++struct pending_cmd {
++ u8 CMD;
++ u8 *DataAddr;
++ u32 Block;
++ u16 Page;
++ u16 PageCount;
++ u8 *DataDestAddr;
++ u8 *DataSrcAddr;
++ u32 MemCopyByteCnt;
++ u16 Flags;
++ u16 Status;
++};
++
++#if DEBUG_SYNC
++extern u32 debug_sync_cnt;
++#endif
++
++/* Definitions for CMD DMA descriptor chain fields */
++#define CMD_DMA_DESC_COMP 0x8000
++#define CMD_DMA_DESC_FAIL 0x4000
++
++#endif /*_LLD_CDMA_*/
+--- /dev/null
++++ b/drivers/staging/spectra/lld_emu.c
+@@ -0,0 +1,780 @@
++/*
++ * NAND Flash Controller Device Driver
++ * Copyright (c) 2009, Intel Corporation and its suppliers.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++#include <linux/fs.h>
++#include <linux/slab.h>
++#include "flash.h"
++#include "ffsdefs.h"
++#include "lld_emu.h"
++#include "lld.h"
++#if CMD_DMA
++#include "lld_cdma.h"
++#endif
++
++#define GLOB_LLD_PAGES 64
++#define GLOB_LLD_PAGE_SIZE (512+16)
++#define GLOB_LLD_PAGE_DATA_SIZE 512
++#define GLOB_LLD_BLOCKS 2048
++
++#if (CMD_DMA && FLASH_EMU)
++#include "lld_cdma.h"
++u32 totalUsedBanks;
++u32 valid_banks[MAX_CHANS];
++#endif
++
++#if FLASH_EMU /* This is for entire module */
++
++static u8 *flash_memory[GLOB_LLD_BLOCKS * GLOB_LLD_PAGES];
++
++/* Read nand emu file and then fill it's content to flash_memory */
++int emu_load_file_to_mem(void)
++{
++ mm_segment_t fs;
++ struct file *nef_filp = NULL;
++ struct inode *inode = NULL;
++ loff_t nef_size = 0;
++ loff_t tmp_file_offset, file_offset;
++ ssize_t nread;
++ int i, rc = -EINVAL;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ fs = get_fs();
++ set_fs(get_ds());
++
++ nef_filp = filp_open("/root/nand_emu_file", O_RDWR | O_LARGEFILE, 0);
++ if (IS_ERR(nef_filp)) {
++ printk(KERN_ERR "filp_open error: "
++ "Unable to open nand emu file!\n");
++ return PTR_ERR(nef_filp);
++ }
++
++ if (nef_filp->f_path.dentry) {
++ inode = nef_filp->f_path.dentry->d_inode;
++ } else {
++ printk(KERN_ERR "Can not get valid inode!\n");
++ goto out;
++ }
++
++ nef_size = i_size_read(inode->i_mapping->host);
++ if (nef_size <= 0) {
++ printk(KERN_ERR "Invalid nand emu file size: "
++ "0x%llx\n", nef_size);
++ goto out;
++ } else {
++ nand_dbg_print(NAND_DBG_DEBUG, "nand emu file size: %lld\n",
++ nef_size);
++ }
++
++ file_offset = 0;
++ for (i = 0; i < GLOB_LLD_BLOCKS * GLOB_LLD_PAGES; i++) {
++ tmp_file_offset = file_offset;
++ nread = vfs_read(nef_filp,
++ (char __user *)flash_memory[i],
++ GLOB_LLD_PAGE_SIZE, &tmp_file_offset);
++ if (nread < GLOB_LLD_PAGE_SIZE) {
++ printk(KERN_ERR "%s, Line %d - "
++ "nand emu file partial read: "
++ "%d bytes\n", __FILE__, __LINE__, (int)nread);
++ goto out;
++ }
++ file_offset += GLOB_LLD_PAGE_SIZE;
++ }
++ rc = 0;
++
++out:
++ filp_close(nef_filp, current->files);
++ set_fs(fs);
++ return rc;
++}
++
++/* Write contents of flash_memory to nand emu file */
++int emu_write_mem_to_file(void)
++{
++ mm_segment_t fs;
++ struct file *nef_filp = NULL;
++ struct inode *inode = NULL;
++ loff_t nef_size = 0;
++ loff_t tmp_file_offset, file_offset;
++ ssize_t nwritten;
++ int i, rc = -EINVAL;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ fs = get_fs();
++ set_fs(get_ds());
++
++ nef_filp = filp_open("/root/nand_emu_file", O_RDWR | O_LARGEFILE, 0);
++ if (IS_ERR(nef_filp)) {
++ printk(KERN_ERR "filp_open error: "
++ "Unable to open nand emu file!\n");
++ return PTR_ERR(nef_filp);
++ }
++
++ if (nef_filp->f_path.dentry) {
++ inode = nef_filp->f_path.dentry->d_inode;
++ } else {
++ printk(KERN_ERR "Invalid " "nef_filp->f_path.dentry value!\n");
++ goto out;
++ }
++
++ nef_size = i_size_read(inode->i_mapping->host);
++ if (nef_size <= 0) {
++ printk(KERN_ERR "Invalid "
++ "nand emu file size: 0x%llx\n", nef_size);
++ goto out;
++ } else {
++ nand_dbg_print(NAND_DBG_DEBUG, "nand emu file size: "
++ "%lld\n", nef_size);
++ }
++
++ file_offset = 0;
++ for (i = 0; i < GLOB_LLD_BLOCKS * GLOB_LLD_PAGES; i++) {
++ tmp_file_offset = file_offset;
++ nwritten = vfs_write(nef_filp,
++ (char __user *)flash_memory[i],
++ GLOB_LLD_PAGE_SIZE, &tmp_file_offset);
++ if (nwritten < GLOB_LLD_PAGE_SIZE) {
++ printk(KERN_ERR "%s, Line %d - "
++ "nand emu file partial write: "
++ "%d bytes\n", __FILE__, __LINE__, (int)nwritten);
++ goto out;
++ }
++ file_offset += GLOB_LLD_PAGE_SIZE;
++ }
++ rc = 0;
++
++out:
++ filp_close(nef_filp, current->files);
++ set_fs(fs);
++ return rc;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: emu_Flash_Init
++* Inputs: none
++* Outputs: PASS=0 (notice 0=ok here)
++* Description: Creates & initializes the flash RAM array.
++*
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++u16 emu_Flash_Init(void)
++{
++ int i;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ flash_memory[0] = (u8 *)vmalloc(GLOB_LLD_PAGE_SIZE *
++ GLOB_LLD_BLOCKS *
++ GLOB_LLD_PAGES *
++ sizeof(u8));
++ if (!flash_memory[0]) {
++ printk(KERN_ERR "Fail to allocate memory "
++ "for nand emulator!\n");
++ return ERR;
++ }
++
++ memset((char *)(flash_memory[0]), 0xFF,
++ GLOB_LLD_PAGE_SIZE * GLOB_LLD_BLOCKS * GLOB_LLD_PAGES *
++ sizeof(u8));
++
++ for (i = 1; i < GLOB_LLD_BLOCKS * GLOB_LLD_PAGES; i++)
++ flash_memory[i] = flash_memory[i - 1] + GLOB_LLD_PAGE_SIZE;
++
++ emu_load_file_to_mem(); /* Load nand emu file to mem */
++
++ return PASS;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: emu_Flash_Release
++* Inputs: none
++* Outputs: PASS=0 (notice 0=ok here)
++* Description: Releases the flash.
++*
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++int emu_Flash_Release(void)
++{
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ emu_write_mem_to_file(); /* Write back mem to nand emu file */
++
++ vfree(flash_memory[0]);
++ return PASS;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: emu_Read_Device_ID
++* Inputs: none
++* Outputs: PASS=1 FAIL=0
++* Description: Reads the info from the controller registers.
++* Sets up DeviceInfo structure with device parameters
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++
++u16 emu_Read_Device_ID(void)
++{
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ DeviceInfo.wDeviceMaker = 0;
++ DeviceInfo.wDeviceType = 8;
++ DeviceInfo.wSpectraStartBlock = 36;
++ DeviceInfo.wSpectraEndBlock = GLOB_LLD_BLOCKS - 1;
++ DeviceInfo.wTotalBlocks = GLOB_LLD_BLOCKS;
++ DeviceInfo.wPagesPerBlock = GLOB_LLD_PAGES;
++ DeviceInfo.wPageSize = GLOB_LLD_PAGE_SIZE;
++ DeviceInfo.wPageDataSize = GLOB_LLD_PAGE_DATA_SIZE;
++ DeviceInfo.wPageSpareSize = GLOB_LLD_PAGE_SIZE -
++ GLOB_LLD_PAGE_DATA_SIZE;
++ DeviceInfo.wBlockSize = DeviceInfo.wPageSize * GLOB_LLD_PAGES;
++ DeviceInfo.wBlockDataSize = DeviceInfo.wPageDataSize * GLOB_LLD_PAGES;
++ DeviceInfo.wDataBlockNum = (u32) (DeviceInfo.wSpectraEndBlock -
++ DeviceInfo.wSpectraStartBlock
++ + 1);
++ DeviceInfo.MLCDevice = 1; /* Emulate MLC device */
++ DeviceInfo.nBitsInPageNumber =
++ (u8)GLOB_Calc_Used_Bits(DeviceInfo.wPagesPerBlock);
++ DeviceInfo.nBitsInPageDataSize =
++ (u8)GLOB_Calc_Used_Bits(DeviceInfo.wPageDataSize);
++ DeviceInfo.nBitsInBlockDataSize =
++ (u8)GLOB_Calc_Used_Bits(DeviceInfo.wBlockDataSize);
++
++#if CMD_DMA
++ totalUsedBanks = 4;
++ valid_banks[0] = 1;
++ valid_banks[1] = 1;
++ valid_banks[2] = 1;
++ valid_banks[3] = 1;
++#endif
++
++ return PASS;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: emu_Flash_Reset
++* Inputs: none
++* Outputs: PASS=0 (notice 0=ok here)
++* Description: Reset the flash
++*
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++u16 emu_Flash_Reset(void)
++{
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ return PASS;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: emu_Erase_Block
++* Inputs: Address
++* Outputs: PASS=0 (notice 0=ok here)
++* Description: Erase a block
++*
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++u16 emu_Erase_Block(u32 block_add)
++{
++ int i;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ if (block_add >= DeviceInfo.wTotalBlocks) {
++ printk(KERN_ERR "emu_Erase_Block error! "
++ "Too big block address: %d\n", block_add);
++ return FAIL;
++ }
++
++ nand_dbg_print(NAND_DBG_DEBUG, "Erasing block %d\n",
++ (int)block_add);
++
++ for (i = block_add * GLOB_LLD_PAGES;
++ i < ((block_add + 1) * GLOB_LLD_PAGES); i++) {
++ if (flash_memory[i]) {
++ memset((u8 *)(flash_memory[i]), 0xFF,
++ DeviceInfo.wPageSize * sizeof(u8));
++ }
++ }
++
++ return PASS;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: emu_Write_Page_Main
++* Inputs: Write buffer address pointer
++* Block number
++* Page number
++* Number of pages to process
++* Outputs: PASS=0 (notice 0=ok here)
++* Description: Write the data in the buffer to main area of flash
++*
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++u16 emu_Write_Page_Main(u8 *write_data, u32 Block,
++ u16 Page, u16 PageCount)
++{
++ int i;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ if (Block >= DeviceInfo.wTotalBlocks)
++ return FAIL;
++
++ if (Page + PageCount > DeviceInfo.wPagesPerBlock)
++ return FAIL;
++
++ nand_dbg_print(NAND_DBG_DEBUG, "emu_Write_Page_Main: "
++ "lba %u Page %u PageCount %u\n",
++ (unsigned int)Block,
++ (unsigned int)Page, (unsigned int)PageCount);
++
++ for (i = 0; i < PageCount; i++) {
++ if (NULL == flash_memory[Block * GLOB_LLD_PAGES + Page]) {
++ printk(KERN_ERR "Run out of memory\n");
++ return FAIL;
++ }
++ memcpy((u8 *) (flash_memory[Block * GLOB_LLD_PAGES + Page]),
++ write_data, DeviceInfo.wPageDataSize);
++ write_data += DeviceInfo.wPageDataSize;
++ Page++;
++ }
++
++ return PASS;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: emu_Read_Page_Main
++* Inputs: Read buffer address pointer
++* Block number
++* Page number
++* Number of pages to process
++* Outputs: PASS=0 (notice 0=ok here)
++* Description: Read the data from the flash main area to the buffer
++*
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++u16 emu_Read_Page_Main(u8 *read_data, u32 Block,
++ u16 Page, u16 PageCount)
++{
++ int i;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ if (Block >= DeviceInfo.wTotalBlocks)
++ return FAIL;
++
++ if (Page + PageCount > DeviceInfo.wPagesPerBlock)
++ return FAIL;
++
++ nand_dbg_print(NAND_DBG_DEBUG, "emu_Read_Page_Main: "
++ "lba %u Page %u PageCount %u\n",
++ (unsigned int)Block,
++ (unsigned int)Page, (unsigned int)PageCount);
++
++ for (i = 0; i < PageCount; i++) {
++ if (NULL == flash_memory[Block * GLOB_LLD_PAGES + Page]) {
++ memset(read_data, 0xFF, DeviceInfo.wPageDataSize);
++ } else {
++ memcpy(read_data,
++ (u8 *) (flash_memory[Block * GLOB_LLD_PAGES
++ + Page]),
++ DeviceInfo.wPageDataSize);
++ }
++ read_data += DeviceInfo.wPageDataSize;
++ Page++;
++ }
++
++ return PASS;
++}
++
++#ifndef ELDORA
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: emu_Read_Page_Main_Spare
++* Inputs: Write Buffer
++* Address
++* Buffer size
++* Outputs: PASS=0 (notice 0=ok here)
++* Description: Read from flash main+spare area
++*
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++u16 emu_Read_Page_Main_Spare(u8 *read_data, u32 Block,
++ u16 Page, u16 PageCount)
++{
++ int i;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ if (Block >= DeviceInfo.wTotalBlocks) {
++ printk(KERN_ERR "Read Page Main+Spare "
++ "Error: Block Address too big\n");
++ return FAIL;
++ }
++
++ if (Page + PageCount > DeviceInfo.wPagesPerBlock) {
++ printk(KERN_ERR "Read Page Main+Spare "
++ "Error: Page number too big\n");
++ return FAIL;
++ }
++
++ nand_dbg_print(NAND_DBG_DEBUG, "Read Page Main + Spare - "
++ "No. of pages %u block %u start page %u\n",
++ (unsigned int)PageCount,
++ (unsigned int)Block, (unsigned int)Page);
++
++ for (i = 0; i < PageCount; i++) {
++ if (NULL == flash_memory[Block * GLOB_LLD_PAGES + Page]) {
++ memset(read_data, 0xFF, DeviceInfo.wPageSize);
++ } else {
++ memcpy(read_data, (u8 *) (flash_memory[Block *
++ GLOB_LLD_PAGES
++ + Page]),
++ DeviceInfo.wPageSize);
++ }
++
++ read_data += DeviceInfo.wPageSize;
++ Page++;
++ }
++
++ return PASS;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: emu_Write_Page_Main_Spare
++* Inputs: Write buffer
++* address
++* buffer length
++* Outputs: PASS=0 (notice 0=ok here)
++* Description: Write the buffer to main+spare area of flash
++*
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++u16 emu_Write_Page_Main_Spare(u8 *write_data, u32 Block,
++ u16 Page, u16 page_count)
++{
++ u16 i;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ if (Block >= DeviceInfo.wTotalBlocks) {
++ printk(KERN_ERR "Write Page Main + Spare "
++ "Error: Block Address too big\n");
++ return FAIL;
++ }
++
++ if (Page + page_count > DeviceInfo.wPagesPerBlock) {
++ printk(KERN_ERR "Write Page Main + Spare "
++ "Error: Page number too big\n");
++ return FAIL;
++ }
++
++ nand_dbg_print(NAND_DBG_DEBUG, "Write Page Main+Spare - "
++ "No. of pages %u block %u start page %u\n",
++ (unsigned int)page_count,
++ (unsigned int)Block, (unsigned int)Page);
++
++ for (i = 0; i < page_count; i++) {
++ if (NULL == flash_memory[Block * GLOB_LLD_PAGES + Page]) {
++ printk(KERN_ERR "Run out of memory!\n");
++ return FAIL;
++ }
++ memcpy((u8 *) (flash_memory[Block * GLOB_LLD_PAGES + Page]),
++ write_data, DeviceInfo.wPageSize);
++ write_data += DeviceInfo.wPageSize;
++ Page++;
++ }
++
++ return PASS;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: emu_Write_Page_Spare
++* Inputs: Write buffer
++* Address
++* buffer size
++* Outputs: PASS=0 (notice 0=ok here)
++* Description: Write the buffer in the spare area
++*
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++u16 emu_Write_Page_Spare(u8 *write_data, u32 Block,
++ u16 Page, u16 PageCount)
++{
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ if (Block >= DeviceInfo.wTotalBlocks) {
++ printk(KERN_ERR "Read Page Spare Error: "
++ "Block Address too big\n");
++ return FAIL;
++ }
++
++ if (Page + PageCount > DeviceInfo.wPagesPerBlock) {
++ printk(KERN_ERR "Read Page Spare Error: "
++ "Page number too big\n");
++ return FAIL;
++ }
++
++ nand_dbg_print(NAND_DBG_DEBUG, "Write Page Spare- "
++ "block %u page %u\n",
++ (unsigned int)Block, (unsigned int)Page);
++
++ if (NULL == flash_memory[Block * GLOB_LLD_PAGES + Page]) {
++ printk(KERN_ERR "Run out of memory!\n");
++ return FAIL;
++ }
++
++ memcpy((u8 *) (flash_memory[Block * GLOB_LLD_PAGES + Page] +
++ DeviceInfo.wPageDataSize), write_data,
++ (DeviceInfo.wPageSize - DeviceInfo.wPageDataSize));
++
++ return PASS;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: emu_Read_Page_Spare
++* Inputs: Write Buffer
++* Address
++* Buffer size
++* Outputs: PASS=0 (notice 0=ok here)
++* Description: Read data from the spare area
++*
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++u16 emu_Read_Page_Spare(u8 *write_data, u32 Block,
++ u16 Page, u16 PageCount)
++{
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ if (Block >= DeviceInfo.wTotalBlocks) {
++ printk(KERN_ERR "Read Page Spare "
++ "Error: Block Address too big\n");
++ return FAIL;
++ }
++
++ if (Page + PageCount > DeviceInfo.wPagesPerBlock) {
++ printk(KERN_ERR "Read Page Spare "
++ "Error: Page number too big\n");
++ return FAIL;
++ }
++
++ nand_dbg_print(NAND_DBG_DEBUG, "Read Page Spare- "
++ "block %u page %u\n",
++ (unsigned int)Block, (unsigned int)Page);
++
++ if (NULL == flash_memory[Block * GLOB_LLD_PAGES + Page]) {
++ memset(write_data, 0xFF,
++ (DeviceInfo.wPageSize - DeviceInfo.wPageDataSize));
++ } else {
++ memcpy(write_data,
++ (u8 *) (flash_memory[Block * GLOB_LLD_PAGES + Page]
++ + DeviceInfo.wPageDataSize),
++ (DeviceInfo.wPageSize - DeviceInfo.wPageDataSize));
++ }
++
++ return PASS;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: emu_Enable_Disable_Interrupts
++* Inputs: enable or disable
++* Outputs: none
++* Description: NOP
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++void emu_Enable_Disable_Interrupts(u16 INT_ENABLE)
++{
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++}
++
++u16 emu_Get_Bad_Block(u32 block)
++{
++ return 0;
++}
++
++#if CMD_DMA
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Support for CDMA functions
++************************************
++* emu_CDMA_Flash_Init
++* CDMA_process_data command (use LLD_CDMA)
++* CDMA_MemCopy_CMD (use LLD_CDMA)
++* emu_CDMA_execute all commands
++* emu_CDMA_Event_Status
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++u16 emu_CDMA_Flash_Init(void)
++{
++ u16 i;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ for (i = 0; i < MAX_DESCS + MAX_CHANS; i++) {
++ PendingCMD[i].CMD = 0;
++ PendingCMD[i].Tag = 0;
++ PendingCMD[i].DataAddr = 0;
++ PendingCMD[i].Block = 0;
++ PendingCMD[i].Page = 0;
++ PendingCMD[i].PageCount = 0;
++ PendingCMD[i].DataDestAddr = 0;
++ PendingCMD[i].DataSrcAddr = 0;
++ PendingCMD[i].MemCopyByteCnt = 0;
++ PendingCMD[i].ChanSync[0] = 0;
++ PendingCMD[i].ChanSync[1] = 0;
++ PendingCMD[i].ChanSync[2] = 0;
++ PendingCMD[i].ChanSync[3] = 0;
++ PendingCMD[i].ChanSync[4] = 0;
++ PendingCMD[i].Status = 3;
++ }
++
++ return PASS;
++}
++
++static void emu_isr(int irq, void *dev_id)
++{
++ /* TODO: ... */
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: CDMA_Execute_CMDs
++* Inputs: tag_count: the number of pending cmds to do
++* Outputs: PASS/FAIL
++* Description: execute each command in the pending CMD array
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++u16 emu_CDMA_Execute_CMDs(u16 tag_count)
++{
++ u16 i, j;
++ u8 CMD; /* cmd parameter */
++ u8 *data;
++ u32 block;
++ u16 page;
++ u16 count;
++ u16 status = PASS;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ nand_dbg_print(NAND_DBG_TRACE, "At start of Execute CMDs: "
++ "Tag Count %u\n", tag_count);
++
++ for (i = 0; i < totalUsedBanks; i++) {
++ PendingCMD[i].CMD = DUMMY_CMD;
++ PendingCMD[i].Tag = 0xFF;
++ PendingCMD[i].Block =
++ (DeviceInfo.wTotalBlocks / totalUsedBanks) * i;
++
++ for (j = 0; j <= MAX_CHANS; j++)
++ PendingCMD[i].ChanSync[j] = 0;
++ }
++
++ CDMA_Execute_CMDs(tag_count);
++
++ print_pending_cmds(tag_count);
++
++#if DEBUG_SYNC
++ }
++ debug_sync_cnt++;
++#endif
++
++ for (i = MAX_CHANS;
++ i < tag_count + MAX_CHANS; i++) {
++ CMD = PendingCMD[i].CMD;
++ data = PendingCMD[i].DataAddr;
++ block = PendingCMD[i].Block;
++ page = PendingCMD[i].Page;
++ count = PendingCMD[i].PageCount;
++
++ switch (CMD) {
++ case ERASE_CMD:
++ emu_Erase_Block(block);
++ PendingCMD[i].Status = PASS;
++ break;
++ case WRITE_MAIN_CMD:
++ emu_Write_Page_Main(data, block, page, count);
++ PendingCMD[i].Status = PASS;
++ break;
++ case WRITE_MAIN_SPARE_CMD:
++ emu_Write_Page_Main_Spare(data, block, page, count);
++ PendingCMD[i].Status = PASS;
++ break;
++ case READ_MAIN_CMD:
++ emu_Read_Page_Main(data, block, page, count);
++ PendingCMD[i].Status = PASS;
++ break;
++ case MEMCOPY_CMD:
++ memcpy(PendingCMD[i].DataDestAddr,
++ PendingCMD[i].DataSrcAddr,
++ PendingCMD[i].MemCopyByteCnt);
++ case DUMMY_CMD:
++ PendingCMD[i].Status = PASS;
++ break;
++ default:
++ PendingCMD[i].Status = FAIL;
++ break;
++ }
++ }
++
++ /*
++ * Temperory adding code to reset PendingCMD array for basic testing.
++ * It should be done at the end of event status function.
++ */
++ for (i = tag_count + MAX_CHANS; i < MAX_DESCS; i++) {
++ PendingCMD[i].CMD = 0;
++ PendingCMD[i].Tag = 0;
++ PendingCMD[i].DataAddr = 0;
++ PendingCMD[i].Block = 0;
++ PendingCMD[i].Page = 0;
++ PendingCMD[i].PageCount = 0;
++ PendingCMD[i].DataDestAddr = 0;
++ PendingCMD[i].DataSrcAddr = 0;
++ PendingCMD[i].MemCopyByteCnt = 0;
++ PendingCMD[i].ChanSync[0] = 0;
++ PendingCMD[i].ChanSync[1] = 0;
++ PendingCMD[i].ChanSync[2] = 0;
++ PendingCMD[i].ChanSync[3] = 0;
++ PendingCMD[i].ChanSync[4] = 0;
++ PendingCMD[i].Status = CMD_NOT_DONE;
++ }
++
++ nand_dbg_print(NAND_DBG_TRACE, "At end of Execute CMDs.\n");
++
++ emu_isr(0, 0); /* This is a null isr now. Need fill it in future */
++
++ return status;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: emu_Event_Status
++* Inputs: none
++* Outputs: Event_Status code
++* Description: This function can also be used to force errors
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++u16 emu_CDMA_Event_Status(void)
++{
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ return EVENT_PASS;
++}
++
++#endif /* CMD_DMA */
++#endif /* !ELDORA */
++#endif /* FLASH_EMU */
+--- /dev/null
++++ b/drivers/staging/spectra/lld_emu.h
+@@ -0,0 +1,51 @@
++/*
++ * NAND Flash Controller Device Driver
++ * Copyright (c) 2009, Intel Corporation and its suppliers.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++#ifndef _LLD_EMU_
++#define _LLD_EMU_
++
++#include "ffsport.h"
++#include "ffsdefs.h"
++
++/* prototypes: emulator API functions */
++extern u16 emu_Flash_Reset(void);
++extern u16 emu_Flash_Init(void);
++extern int emu_Flash_Release(void);
++extern u16 emu_Read_Device_ID(void);
++extern u16 emu_Erase_Block(u32 block_addr);
++extern u16 emu_Write_Page_Main(u8 *write_data, u32 Block,
++ u16 Page, u16 PageCount);
++extern u16 emu_Read_Page_Main(u8 *read_data, u32 Block, u16 Page,
++ u16 PageCount);
++extern u16 emu_Event_Status(void);
++extern void emu_Enable_Disable_Interrupts(u16 INT_ENABLE);
++extern u16 emu_Write_Page_Main_Spare(u8 *write_data, u32 Block,
++ u16 Page, u16 PageCount);
++extern u16 emu_Write_Page_Spare(u8 *write_data, u32 Block,
++ u16 Page, u16 PageCount);
++extern u16 emu_Read_Page_Main_Spare(u8 *read_data, u32 Block,
++ u16 Page, u16 PageCount);
++extern u16 emu_Read_Page_Spare(u8 *read_data, u32 Block, u16 Page,
++ u16 PageCount);
++extern u16 emu_Get_Bad_Block(u32 block);
++
++u16 emu_CDMA_Flash_Init(void);
++u16 emu_CDMA_Execute_CMDs(u16 tag_count);
++u16 emu_CDMA_Event_Status(void);
++#endif /*_LLD_EMU_*/
+--- /dev/null
++++ b/drivers/staging/spectra/lld_mtd.c
+@@ -0,0 +1,687 @@
++/*
++ * NAND Flash Controller Device Driver
++ * Copyright (c) 2009, Intel Corporation and its suppliers.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++#include <linux/fs.h>
++#include <linux/slab.h>
++#include <linux/mtd/mtd.h>
++#include "flash.h"
++#include "ffsdefs.h"
++#include "lld_emu.h"
++#include "lld.h"
++#if CMD_DMA
++#include "lld_cdma.h"
++#endif
++
++#define GLOB_LLD_PAGES 64
++#define GLOB_LLD_PAGE_SIZE (512+16)
++#define GLOB_LLD_PAGE_DATA_SIZE 512
++#define GLOB_LLD_BLOCKS 2048
++
++#if CMD_DMA
++#include "lld_cdma.h"
++u32 totalUsedBanks;
++u32 valid_banks[MAX_CHANS];
++#endif
++
++static struct mtd_info *spectra_mtd;
++static int mtddev = -1;
++module_param(mtddev, int, 0);
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: mtd_Flash_Init
++* Inputs: none
++* Outputs: PASS=0 (notice 0=ok here)
++* Description: Creates & initializes the flash RAM array.
++*
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++u16 mtd_Flash_Init(void)
++{
++ if (mtddev == -1) {
++ printk(KERN_ERR "No MTD device specified. Give mtddev parameter\n");
++ return FAIL;
++ }
++
++ spectra_mtd = get_mtd_device(NULL, mtddev);
++ if (!spectra_mtd) {
++ printk(KERN_ERR "Failed to obtain MTD device #%d\n", mtddev);
++ return FAIL;
++ }
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ return PASS;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: mtd_Flash_Release
++* Inputs: none
++* Outputs: PASS=0 (notice 0=ok here)
++* Description: Releases the flash.
++*
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++int mtd_Flash_Release(void)
++{
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++ if (!spectra_mtd)
++ return PASS;
++
++ put_mtd_device(spectra_mtd);
++ spectra_mtd = NULL;
++
++ return PASS;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: mtd_Read_Device_ID
++* Inputs: none
++* Outputs: PASS=1 FAIL=0
++* Description: Reads the info from the controller registers.
++* Sets up DeviceInfo structure with device parameters
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++
++u16 mtd_Read_Device_ID(void)
++{
++ uint64_t tmp;
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ if (!spectra_mtd)
++ return FAIL;
++
++ DeviceInfo.wDeviceMaker = 0;
++ DeviceInfo.wDeviceType = 8;
++ DeviceInfo.wSpectraStartBlock = SPECTRA_START_BLOCK;
++ tmp = spectra_mtd->size;
++ do_div(tmp, spectra_mtd->erasesize);
++ DeviceInfo.wTotalBlocks = tmp;
++ DeviceInfo.wSpectraEndBlock = DeviceInfo.wTotalBlocks - 1;
++ DeviceInfo.wPagesPerBlock = spectra_mtd->erasesize / spectra_mtd->writesize;
++ DeviceInfo.wPageSize = spectra_mtd->writesize + spectra_mtd->oobsize;
++ DeviceInfo.wPageDataSize = spectra_mtd->writesize;
++ DeviceInfo.wPageSpareSize = spectra_mtd->oobsize;
++ DeviceInfo.wBlockSize = DeviceInfo.wPageSize * DeviceInfo.wPagesPerBlock;
++ DeviceInfo.wBlockDataSize = DeviceInfo.wPageDataSize * DeviceInfo.wPagesPerBlock;
++ DeviceInfo.wDataBlockNum = (u32) (DeviceInfo.wSpectraEndBlock -
++ DeviceInfo.wSpectraStartBlock
++ + 1);
++ DeviceInfo.MLCDevice = 0;//spectra_mtd->celltype & NAND_CI_CELLTYPE_MSK;
++ DeviceInfo.nBitsInPageNumber =
++ (u8)GLOB_Calc_Used_Bits(DeviceInfo.wPagesPerBlock);
++ DeviceInfo.nBitsInPageDataSize =
++ (u8)GLOB_Calc_Used_Bits(DeviceInfo.wPageDataSize);
++ DeviceInfo.nBitsInBlockDataSize =
++ (u8)GLOB_Calc_Used_Bits(DeviceInfo.wBlockDataSize);
++
++#if CMD_DMA
++ totalUsedBanks = 4;
++ valid_banks[0] = 1;
++ valid_banks[1] = 1;
++ valid_banks[2] = 1;
++ valid_banks[3] = 1;
++#endif
++
++ return PASS;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: mtd_Flash_Reset
++* Inputs: none
++* Outputs: PASS=0 (notice 0=ok here)
++* Description: Reset the flash
++*
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++u16 mtd_Flash_Reset(void)
++{
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ return PASS;
++}
++
++void erase_callback(struct erase_info *e)
++{
++ complete((void *)e->priv);
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: mtd_Erase_Block
++* Inputs: Address
++* Outputs: PASS=0 (notice 0=ok here)
++* Description: Erase a block
++*
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++u16 mtd_Erase_Block(u32 block_add)
++{
++ struct erase_info erase;
++ DECLARE_COMPLETION_ONSTACK(comp);
++ int ret;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ if (block_add >= DeviceInfo.wTotalBlocks) {
++ printk(KERN_ERR "mtd_Erase_Block error! "
++ "Too big block address: %d\n", block_add);
++ return FAIL;
++ }
++
++ nand_dbg_print(NAND_DBG_DEBUG, "Erasing block %d\n",
++ (int)block_add);
++
++ erase.mtd = spectra_mtd;
++ erase.callback = erase_callback;
++ erase.addr = block_add * spectra_mtd->erasesize;
++ erase.len = spectra_mtd->erasesize;
++ erase.priv = (unsigned long)&comp;
++
++ ret = spectra_mtd->erase(spectra_mtd, &erase);
++ if (!ret) {
++ wait_for_completion(&comp);
++ if (erase.state != MTD_ERASE_DONE)
++ ret = -EIO;
++ }
++ if (ret) {
++ printk(KERN_WARNING "mtd_Erase_Block error! "
++ "erase of region [0x%llx, 0x%llx] failed\n",
++ erase.addr, erase.len);
++ return FAIL;
++ }
++
++ return PASS;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: mtd_Write_Page_Main
++* Inputs: Write buffer address pointer
++* Block number
++* Page number
++* Number of pages to process
++* Outputs: PASS=0 (notice 0=ok here)
++* Description: Write the data in the buffer to main area of flash
++*
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++u16 mtd_Write_Page_Main(u8 *write_data, u32 Block,
++ u16 Page, u16 PageCount)
++{
++ size_t retlen;
++ int ret = 0;
++
++ if (Block >= DeviceInfo.wTotalBlocks)
++ return FAIL;
++
++ if (Page + PageCount > DeviceInfo.wPagesPerBlock)
++ return FAIL;
++
++ nand_dbg_print(NAND_DBG_DEBUG, "mtd_Write_Page_Main: "
++ "lba %u Page %u PageCount %u\n",
++ (unsigned int)Block,
++ (unsigned int)Page, (unsigned int)PageCount);
++
++
++ while (PageCount) {
++ ret = spectra_mtd->write(spectra_mtd,
++ (Block * spectra_mtd->erasesize) + (Page * spectra_mtd->writesize),
++ DeviceInfo.wPageDataSize, &retlen, write_data);
++ if (ret) {
++ printk(KERN_ERR "%s failed %d\n", __func__, ret);
++ return FAIL;
++ }
++ write_data += DeviceInfo.wPageDataSize;
++ Page++;
++ PageCount--;
++ }
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ return PASS;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: mtd_Read_Page_Main
++* Inputs: Read buffer address pointer
++* Block number
++* Page number
++* Number of pages to process
++* Outputs: PASS=0 (notice 0=ok here)
++* Description: Read the data from the flash main area to the buffer
++*
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++u16 mtd_Read_Page_Main(u8 *read_data, u32 Block,
++ u16 Page, u16 PageCount)
++{
++ size_t retlen;
++ int ret = 0;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ if (Block >= DeviceInfo.wTotalBlocks)
++ return FAIL;
++
++ if (Page + PageCount > DeviceInfo.wPagesPerBlock)
++ return FAIL;
++
++ nand_dbg_print(NAND_DBG_DEBUG, "mtd_Read_Page_Main: "
++ "lba %u Page %u PageCount %u\n",
++ (unsigned int)Block,
++ (unsigned int)Page, (unsigned int)PageCount);
++
++
++ while (PageCount) {
++ ret = spectra_mtd->read(spectra_mtd,
++ (Block * spectra_mtd->erasesize) + (Page * spectra_mtd->writesize),
++ DeviceInfo.wPageDataSize, &retlen, read_data);
++ if (ret) {
++ printk(KERN_ERR "%s failed %d\n", __func__, ret);
++ return FAIL;
++ }
++ read_data += DeviceInfo.wPageDataSize;
++ Page++;
++ PageCount--;
++ }
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ return PASS;
++}
++
++#ifndef ELDORA
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: mtd_Read_Page_Main_Spare
++* Inputs: Write Buffer
++* Address
++* Buffer size
++* Outputs: PASS=0 (notice 0=ok here)
++* Description: Read from flash main+spare area
++*
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++u16 mtd_Read_Page_Main_Spare(u8 *read_data, u32 Block,
++ u16 Page, u16 PageCount)
++{
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ if (Block >= DeviceInfo.wTotalBlocks) {
++ printk(KERN_ERR "Read Page Main+Spare "
++ "Error: Block Address too big\n");
++ return FAIL;
++ }
++
++ if (Page + PageCount > DeviceInfo.wPagesPerBlock) {
++ printk(KERN_ERR "Read Page Main+Spare "
++ "Error: Page number %d+%d too big in block %d\n",
++ Page, PageCount, Block);
++ return FAIL;
++ }
++
++ nand_dbg_print(NAND_DBG_DEBUG, "Read Page Main + Spare - "
++ "No. of pages %u block %u start page %u\n",
++ (unsigned int)PageCount,
++ (unsigned int)Block, (unsigned int)Page);
++
++
++ while (PageCount) {
++ struct mtd_oob_ops ops;
++ int ret;
++
++ ops.mode = MTD_OOB_AUTO;
++ ops.datbuf = read_data;
++ ops.len = DeviceInfo.wPageDataSize;
++ ops.oobbuf = read_data + DeviceInfo.wPageDataSize + BTSIG_OFFSET;
++ ops.ooblen = BTSIG_BYTES;
++ ops.ooboffs = 0;
++
++ ret = spectra_mtd->read_oob(spectra_mtd,
++ (Block * spectra_mtd->erasesize) + (Page * spectra_mtd->writesize),
++ &ops);
++ if (ret) {
++ printk(KERN_ERR "%s failed %d\n", __func__, ret);
++ return FAIL;
++ }
++ read_data += DeviceInfo.wPageSize;
++ Page++;
++ PageCount--;
++ }
++
++ return PASS;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: mtd_Write_Page_Main_Spare
++* Inputs: Write buffer
++* address
++* buffer length
++* Outputs: PASS=0 (notice 0=ok here)
++* Description: Write the buffer to main+spare area of flash
++*
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++u16 mtd_Write_Page_Main_Spare(u8 *write_data, u32 Block,
++ u16 Page, u16 page_count)
++{
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ if (Block >= DeviceInfo.wTotalBlocks) {
++ printk(KERN_ERR "Write Page Main + Spare "
++ "Error: Block Address too big\n");
++ return FAIL;
++ }
++
++ if (Page + page_count > DeviceInfo.wPagesPerBlock) {
++ printk(KERN_ERR "Write Page Main + Spare "
++ "Error: Page number %d+%d too big in block %d\n",
++ Page, page_count, Block);
++ WARN_ON(1);
++ return FAIL;
++ }
++
++ nand_dbg_print(NAND_DBG_DEBUG, "Write Page Main+Spare - "
++ "No. of pages %u block %u start page %u\n",
++ (unsigned int)page_count,
++ (unsigned int)Block, (unsigned int)Page);
++
++ while (page_count) {
++ struct mtd_oob_ops ops;
++ int ret;
++
++ ops.mode = MTD_OOB_AUTO;
++ ops.datbuf = write_data;
++ ops.len = DeviceInfo.wPageDataSize;
++ ops.oobbuf = write_data + DeviceInfo.wPageDataSize + BTSIG_OFFSET;
++ ops.ooblen = BTSIG_BYTES;
++ ops.ooboffs = 0;
++
++ ret = spectra_mtd->write_oob(spectra_mtd,
++ (Block * spectra_mtd->erasesize) + (Page * spectra_mtd->writesize),
++ &ops);
++ if (ret) {
++ printk(KERN_ERR "%s failed %d\n", __func__, ret);
++ return FAIL;
++ }
++ write_data += DeviceInfo.wPageSize;
++ Page++;
++ page_count--;
++ }
++
++ return PASS;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: mtd_Write_Page_Spare
++* Inputs: Write buffer
++* Address
++* buffer size
++* Outputs: PASS=0 (notice 0=ok here)
++* Description: Write the buffer in the spare area
++*
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++u16 mtd_Write_Page_Spare(u8 *write_data, u32 Block,
++ u16 Page, u16 PageCount)
++{
++ WARN_ON(1);
++ return FAIL;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: mtd_Read_Page_Spare
++* Inputs: Write Buffer
++* Address
++* Buffer size
++* Outputs: PASS=0 (notice 0=ok here)
++* Description: Read data from the spare area
++*
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++u16 mtd_Read_Page_Spare(u8 *read_data, u32 Block,
++ u16 Page, u16 PageCount)
++{
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ if (Block >= DeviceInfo.wTotalBlocks) {
++ printk(KERN_ERR "Read Page Spare "
++ "Error: Block Address too big\n");
++ return FAIL;
++ }
++
++ if (Page + PageCount > DeviceInfo.wPagesPerBlock) {
++ printk(KERN_ERR "Read Page Spare "
++ "Error: Page number too big\n");
++ return FAIL;
++ }
++
++ nand_dbg_print(NAND_DBG_DEBUG, "Read Page Spare- "
++ "block %u page %u (%u pages)\n",
++ (unsigned int)Block, (unsigned int)Page, PageCount);
++
++ while (PageCount) {
++ struct mtd_oob_ops ops;
++ int ret;
++
++ ops.mode = MTD_OOB_AUTO;
++ ops.datbuf = NULL;
++ ops.len = 0;
++ ops.oobbuf = read_data;
++ ops.ooblen = BTSIG_BYTES;
++ ops.ooboffs = 0;
++
++ ret = spectra_mtd->read_oob(spectra_mtd,
++ (Block * spectra_mtd->erasesize) + (Page * spectra_mtd->writesize),
++ &ops);
++ if (ret) {
++ printk(KERN_ERR "%s failed %d\n", __func__, ret);
++ return FAIL;
++ }
++
++ read_data += DeviceInfo.wPageSize;
++ Page++;
++ PageCount--;
++ }
++
++ return PASS;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: mtd_Enable_Disable_Interrupts
++* Inputs: enable or disable
++* Outputs: none
++* Description: NOP
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++void mtd_Enable_Disable_Interrupts(u16 INT_ENABLE)
++{
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++}
++
++u16 mtd_Get_Bad_Block(u32 block)
++{
++ return 0;
++}
++
++#if CMD_DMA
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Support for CDMA functions
++************************************
++* mtd_CDMA_Flash_Init
++* CDMA_process_data command (use LLD_CDMA)
++* CDMA_MemCopy_CMD (use LLD_CDMA)
++* mtd_CDMA_execute all commands
++* mtd_CDMA_Event_Status
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++u16 mtd_CDMA_Flash_Init(void)
++{
++ u16 i;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ for (i = 0; i < MAX_DESCS + MAX_CHANS; i++) {
++ PendingCMD[i].CMD = 0;
++ PendingCMD[i].Tag = 0;
++ PendingCMD[i].DataAddr = 0;
++ PendingCMD[i].Block = 0;
++ PendingCMD[i].Page = 0;
++ PendingCMD[i].PageCount = 0;
++ PendingCMD[i].DataDestAddr = 0;
++ PendingCMD[i].DataSrcAddr = 0;
++ PendingCMD[i].MemCopyByteCnt = 0;
++ PendingCMD[i].ChanSync[0] = 0;
++ PendingCMD[i].ChanSync[1] = 0;
++ PendingCMD[i].ChanSync[2] = 0;
++ PendingCMD[i].ChanSync[3] = 0;
++ PendingCMD[i].ChanSync[4] = 0;
++ PendingCMD[i].Status = 3;
++ }
++
++ return PASS;
++}
++
++static void mtd_isr(int irq, void *dev_id)
++{
++ /* TODO: ... */
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: CDMA_Execute_CMDs
++* Inputs: tag_count: the number of pending cmds to do
++* Outputs: PASS/FAIL
++* Description: execute each command in the pending CMD array
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++u16 mtd_CDMA_Execute_CMDs(u16 tag_count)
++{
++ u16 i, j;
++ u8 CMD; /* cmd parameter */
++ u8 *data;
++ u32 block;
++ u16 page;
++ u16 count;
++ u16 status = PASS;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ nand_dbg_print(NAND_DBG_TRACE, "At start of Execute CMDs: "
++ "Tag Count %u\n", tag_count);
++
++ for (i = 0; i < totalUsedBanks; i++) {
++ PendingCMD[i].CMD = DUMMY_CMD;
++ PendingCMD[i].Tag = 0xFF;
++ PendingCMD[i].Block =
++ (DeviceInfo.wTotalBlocks / totalUsedBanks) * i;
++
++ for (j = 0; j <= MAX_CHANS; j++)
++ PendingCMD[i].ChanSync[j] = 0;
++ }
++
++ CDMA_Execute_CMDs(tag_count);
++
++#ifdef VERBOSE
++ print_pending_cmds(tag_count);
++#endif
++#if DEBUG_SYNC
++ }
++ debug_sync_cnt++;
++#endif
++
++ for (i = MAX_CHANS;
++ i < tag_count + MAX_CHANS; i++) {
++ CMD = PendingCMD[i].CMD;
++ data = PendingCMD[i].DataAddr;
++ block = PendingCMD[i].Block;
++ page = PendingCMD[i].Page;
++ count = PendingCMD[i].PageCount;
++
++ switch (CMD) {
++ case ERASE_CMD:
++ mtd_Erase_Block(block);
++ PendingCMD[i].Status = PASS;
++ break;
++ case WRITE_MAIN_CMD:
++ mtd_Write_Page_Main(data, block, page, count);
++ PendingCMD[i].Status = PASS;
++ break;
++ case WRITE_MAIN_SPARE_CMD:
++ mtd_Write_Page_Main_Spare(data, block, page, count);
++ PendingCMD[i].Status = PASS;
++ break;
++ case READ_MAIN_CMD:
++ mtd_Read_Page_Main(data, block, page, count);
++ PendingCMD[i].Status = PASS;
++ break;
++ case MEMCOPY_CMD:
++ memcpy(PendingCMD[i].DataDestAddr,
++ PendingCMD[i].DataSrcAddr,
++ PendingCMD[i].MemCopyByteCnt);
++ case DUMMY_CMD:
++ PendingCMD[i].Status = PASS;
++ break;
++ default:
++ PendingCMD[i].Status = FAIL;
++ break;
++ }
++ }
++
++ /*
++ * Temperory adding code to reset PendingCMD array for basic testing.
++ * It should be done at the end of event status function.
++ */
++ for (i = tag_count + MAX_CHANS; i < MAX_DESCS; i++) {
++ PendingCMD[i].CMD = 0;
++ PendingCMD[i].Tag = 0;
++ PendingCMD[i].DataAddr = 0;
++ PendingCMD[i].Block = 0;
++ PendingCMD[i].Page = 0;
++ PendingCMD[i].PageCount = 0;
++ PendingCMD[i].DataDestAddr = 0;
++ PendingCMD[i].DataSrcAddr = 0;
++ PendingCMD[i].MemCopyByteCnt = 0;
++ PendingCMD[i].ChanSync[0] = 0;
++ PendingCMD[i].ChanSync[1] = 0;
++ PendingCMD[i].ChanSync[2] = 0;
++ PendingCMD[i].ChanSync[3] = 0;
++ PendingCMD[i].ChanSync[4] = 0;
++ PendingCMD[i].Status = CMD_NOT_DONE;
++ }
++
++ nand_dbg_print(NAND_DBG_TRACE, "At end of Execute CMDs.\n");
++
++ mtd_isr(0, 0); /* This is a null isr now. Need fill it in future */
++
++ return status;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: mtd_Event_Status
++* Inputs: none
++* Outputs: Event_Status code
++* Description: This function can also be used to force errors
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++u16 mtd_CDMA_Event_Status(void)
++{
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ return EVENT_PASS;
++}
++
++#endif /* CMD_DMA */
++#endif /* !ELDORA */
+--- /dev/null
++++ b/drivers/staging/spectra/lld_mtd.h
+@@ -0,0 +1,51 @@
++/*
++ * NAND Flash Controller Device Driver
++ * Copyright (c) 2009, Intel Corporation and its suppliers.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++#ifndef _LLD_MTD_
++#define _LLD_MTD_
++
++#include "ffsport.h"
++#include "ffsdefs.h"
++
++/* prototypes: MTD API functions */
++extern u16 mtd_Flash_Reset(void);
++extern u16 mtd_Flash_Init(void);
++extern int mtd_Flash_Release(void);
++extern u16 mtd_Read_Device_ID(void);
++extern u16 mtd_Erase_Block(u32 block_addr);
++extern u16 mtd_Write_Page_Main(u8 *write_data, u32 Block,
++ u16 Page, u16 PageCount);
++extern u16 mtd_Read_Page_Main(u8 *read_data, u32 Block, u16 Page,
++ u16 PageCount);
++extern u16 mtd_Event_Status(void);
++extern void mtd_Enable_Disable_Interrupts(u16 INT_ENABLE);
++extern u16 mtd_Write_Page_Main_Spare(u8 *write_data, u32 Block,
++ u16 Page, u16 PageCount);
++extern u16 mtd_Write_Page_Spare(u8 *write_data, u32 Block,
++ u16 Page, u16 PageCount);
++extern u16 mtd_Read_Page_Main_Spare(u8 *read_data, u32 Block,
++ u16 Page, u16 PageCount);
++extern u16 mtd_Read_Page_Spare(u8 *read_data, u32 Block, u16 Page,
++ u16 PageCount);
++extern u16 mtd_Get_Bad_Block(u32 block);
++
++u16 mtd_CDMA_Flash_Init(void);
++u16 mtd_CDMA_Execute_CMDs(u16 tag_count);
++u16 mtd_CDMA_Event_Status(void);
++#endif /*_LLD_MTD_*/
+--- /dev/null
++++ b/drivers/staging/spectra/lld_nand.c
+@@ -0,0 +1,2616 @@
++/*
++ * NAND Flash Controller Device Driver
++ * Copyright (c) 2009, Intel Corporation and its suppliers.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++#include "lld.h"
++#include "lld_nand.h"
++#include "lld_cdma.h"
++
++#include "spectraswconfig.h"
++#include "flash.h"
++#include "ffsdefs.h"
++
++#include <linux/interrupt.h>
++#include <linux/delay.h>
++#include <linux/wait.h>
++#include <linux/mutex.h>
++
++#include "nand_regs.h"
++
++#define SPECTRA_NAND_NAME "nd"
++
++#define CEIL_DIV(X, Y) (((X)%(Y)) ? ((X)/(Y)+1) : ((X)/(Y)))
++#define MAX_PAGES_PER_RW 128
++
++#define INT_IDLE_STATE 0
++#define INT_READ_PAGE_MAIN 0x01
++#define INT_WRITE_PAGE_MAIN 0x02
++#define INT_PIPELINE_READ_AHEAD 0x04
++#define INT_PIPELINE_WRITE_AHEAD 0x08
++#define INT_MULTI_PLANE_READ 0x10
++#define INT_MULTI_PLANE_WRITE 0x11
++
++static u32 enable_ecc;
++
++struct mrst_nand_info info;
++
++int totalUsedBanks;
++u32 GLOB_valid_banks[LLD_MAX_FLASH_BANKS];
++
++void __iomem *FlashReg;
++void __iomem *FlashMem;
++
++u16 conf_parameters[] = {
++ 0x0000,
++ 0x0000,
++ 0x01F4,
++ 0x01F4,
++ 0x01F4,
++ 0x01F4,
++ 0x0000,
++ 0x0000,
++ 0x0001,
++ 0x0000,
++ 0x0000,
++ 0x0000,
++ 0x0000,
++ 0x0040,
++ 0x0001,
++ 0x000A,
++ 0x000A,
++ 0x000A,
++ 0x0000,
++ 0x0000,
++ 0x0005,
++ 0x0012,
++ 0x000C
++};
++
++u16 NAND_Get_Bad_Block(u32 block)
++{
++ u32 status = PASS;
++ u32 flag_bytes = 0;
++ u32 skip_bytes = DeviceInfo.wSpareSkipBytes;
++ u32 page, i;
++ u8 *pReadSpareBuf = buf_get_bad_block;
++
++ if (enable_ecc)
++ flag_bytes = DeviceInfo.wNumPageSpareFlag;
++
++ for (page = 0; page < 2; page++) {
++ status = NAND_Read_Page_Spare(pReadSpareBuf, block, page, 1);
++ if (status != PASS)
++ return READ_ERROR;
++ for (i = flag_bytes; i < (flag_bytes + skip_bytes); i++)
++ if (pReadSpareBuf[i] != 0xff)
++ return DEFECTIVE_BLOCK;
++ }
++
++ for (page = 1; page < 3; page++) {
++ status = NAND_Read_Page_Spare(pReadSpareBuf, block,
++ DeviceInfo.wPagesPerBlock - page , 1);
++ if (status != PASS)
++ return READ_ERROR;
++ for (i = flag_bytes; i < (flag_bytes + skip_bytes); i++)
++ if (pReadSpareBuf[i] != 0xff)
++ return DEFECTIVE_BLOCK;
++ }
++
++ return GOOD_BLOCK;
++}
++
++
++u16 NAND_Flash_Reset(void)
++{
++ u32 i;
++ u32 intr_status_rst_comp[4] = {INTR_STATUS0__RST_COMP,
++ INTR_STATUS1__RST_COMP,
++ INTR_STATUS2__RST_COMP,
++ INTR_STATUS3__RST_COMP};
++ u32 intr_status_time_out[4] = {INTR_STATUS0__TIME_OUT,
++ INTR_STATUS1__TIME_OUT,
++ INTR_STATUS2__TIME_OUT,
++ INTR_STATUS3__TIME_OUT};
++ u32 intr_status[4] = {INTR_STATUS0, INTR_STATUS1,
++ INTR_STATUS2, INTR_STATUS3};
++ u32 device_reset_banks[4] = {DEVICE_RESET__BANK0,
++ DEVICE_RESET__BANK1,
++ DEVICE_RESET__BANK2,
++ DEVICE_RESET__BANK3};
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++)
++ iowrite32(intr_status_rst_comp[i] | intr_status_time_out[i],
++ FlashReg + intr_status[i]);
++
++ for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++) {
++ iowrite32(device_reset_banks[i], FlashReg + DEVICE_RESET);
++ while (!(ioread32(FlashReg + intr_status[i]) &
++ (intr_status_rst_comp[i] | intr_status_time_out[i])))
++ ;
++ if (ioread32(FlashReg + intr_status[i]) &
++ intr_status_time_out[i])
++ nand_dbg_print(NAND_DBG_WARN,
++ "NAND Reset operation timed out on bank %d\n", i);
++ }
++
++ for (i = 0; i < LLD_MAX_FLASH_BANKS; i++)
++ iowrite32(intr_status_rst_comp[i] | intr_status_time_out[i],
++ FlashReg + intr_status[i]);
++
++ return PASS;
++}
++
++static void NAND_ONFi_Timing_Mode(u16 mode)
++{
++ u16 Trea[6] = {40, 30, 25, 20, 20, 16};
++ u16 Trp[6] = {50, 25, 17, 15, 12, 10};
++ u16 Treh[6] = {30, 15, 15, 10, 10, 7};
++ u16 Trc[6] = {100, 50, 35, 30, 25, 20};
++ u16 Trhoh[6] = {0, 15, 15, 15, 15, 15};
++ u16 Trloh[6] = {0, 0, 0, 0, 5, 5};
++ u16 Tcea[6] = {100, 45, 30, 25, 25, 25};
++ u16 Tadl[6] = {200, 100, 100, 100, 70, 70};
++ u16 Trhw[6] = {200, 100, 100, 100, 100, 100};
++ u16 Trhz[6] = {200, 100, 100, 100, 100, 100};
++ u16 Twhr[6] = {120, 80, 80, 60, 60, 60};
++ u16 Tcs[6] = {70, 35, 25, 25, 20, 15};
++
++ u16 TclsRising = 1;
++ u16 data_invalid_rhoh, data_invalid_rloh, data_invalid;
++ u16 dv_window = 0;
++ u16 en_lo, en_hi;
++ u16 acc_clks;
++ u16 addr_2_data, re_2_we, re_2_re, we_2_re, cs_cnt;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ en_lo = CEIL_DIV(Trp[mode], CLK_X);
++ en_hi = CEIL_DIV(Treh[mode], CLK_X);
++
++#if ONFI_BLOOM_TIME
++ if ((en_hi * CLK_X) < (Treh[mode] + 2))
++ en_hi++;
++#endif
++
++ if ((en_lo + en_hi) * CLK_X < Trc[mode])
++ en_lo += CEIL_DIV((Trc[mode] - (en_lo + en_hi) * CLK_X), CLK_X);
++
++ if ((en_lo + en_hi) < CLK_MULTI)
++ en_lo += CLK_MULTI - en_lo - en_hi;
++
++ while (dv_window < 8) {
++ data_invalid_rhoh = en_lo * CLK_X + Trhoh[mode];
++
++ data_invalid_rloh = (en_lo + en_hi) * CLK_X + Trloh[mode];
++
++ data_invalid =
++ data_invalid_rhoh <
++ data_invalid_rloh ? data_invalid_rhoh : data_invalid_rloh;
++
++ dv_window = data_invalid - Trea[mode];
++
++ if (dv_window < 8)
++ en_lo++;
++ }
++
++ acc_clks = CEIL_DIV(Trea[mode], CLK_X);
++
++ while (((acc_clks * CLK_X) - Trea[mode]) < 3)
++ acc_clks++;
++
++ if ((data_invalid - acc_clks * CLK_X) < 2)
++ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d: Warning!\n",
++ __FILE__, __LINE__);
++
++ addr_2_data = CEIL_DIV(Tadl[mode], CLK_X);
++ re_2_we = CEIL_DIV(Trhw[mode], CLK_X);
++ re_2_re = CEIL_DIV(Trhz[mode], CLK_X);
++ we_2_re = CEIL_DIV(Twhr[mode], CLK_X);
++ cs_cnt = CEIL_DIV((Tcs[mode] - Trp[mode]), CLK_X);
++ if (!TclsRising)
++ cs_cnt = CEIL_DIV(Tcs[mode], CLK_X);
++ if (cs_cnt == 0)
++ cs_cnt = 1;
++
++ if (Tcea[mode]) {
++ while (((cs_cnt * CLK_X) + Trea[mode]) < Tcea[mode])
++ cs_cnt++;
++ }
++
++#if MODE5_WORKAROUND
++ if (mode == 5)
++ acc_clks = 5;
++#endif
++
++ /* Sighting 3462430: Temporary hack for MT29F128G08CJABAWP:B */
++ if ((ioread32(FlashReg + MANUFACTURER_ID) == 0) &&
++ (ioread32(FlashReg + DEVICE_ID) == 0x88))
++ acc_clks = 6;
++
++ iowrite32(acc_clks, FlashReg + ACC_CLKS);
++ iowrite32(re_2_we, FlashReg + RE_2_WE);
++ iowrite32(re_2_re, FlashReg + RE_2_RE);
++ iowrite32(we_2_re, FlashReg + WE_2_RE);
++ iowrite32(addr_2_data, FlashReg + ADDR_2_DATA);
++ iowrite32(en_lo, FlashReg + RDWR_EN_LO_CNT);
++ iowrite32(en_hi, FlashReg + RDWR_EN_HI_CNT);
++ iowrite32(cs_cnt, FlashReg + CS_SETUP_CNT);
++}
++
++static void index_addr(u32 address, u32 data)
++{
++ iowrite32(address, FlashMem);
++ iowrite32(data, FlashMem + 0x10);
++}
++
++static void index_addr_read_data(u32 address, u32 *pdata)
++{
++ iowrite32(address, FlashMem);
++ *pdata = ioread32(FlashMem + 0x10);
++}
++
++static void set_ecc_config(void)
++{
++#if SUPPORT_8BITECC
++ if ((ioread32(FlashReg + DEVICE_MAIN_AREA_SIZE) < 4096) ||
++ (ioread32(FlashReg + DEVICE_SPARE_AREA_SIZE) <= 128))
++ iowrite32(8, FlashReg + ECC_CORRECTION);
++#endif
++
++ if ((ioread32(FlashReg + ECC_CORRECTION) & ECC_CORRECTION__VALUE)
++ == 1) {
++ DeviceInfo.wECCBytesPerSector = 4;
++ DeviceInfo.wECCBytesPerSector *= DeviceInfo.wDevicesConnected;
++ DeviceInfo.wNumPageSpareFlag =
++ DeviceInfo.wPageSpareSize -
++ DeviceInfo.wPageDataSize /
++ (ECC_SECTOR_SIZE * DeviceInfo.wDevicesConnected) *
++ DeviceInfo.wECCBytesPerSector
++ - DeviceInfo.wSpareSkipBytes;
++ } else {
++ DeviceInfo.wECCBytesPerSector =
++ (ioread32(FlashReg + ECC_CORRECTION) &
++ ECC_CORRECTION__VALUE) * 13 / 8;
++ if ((DeviceInfo.wECCBytesPerSector) % 2 == 0)
++ DeviceInfo.wECCBytesPerSector += 2;
++ else
++ DeviceInfo.wECCBytesPerSector += 1;
++
++ DeviceInfo.wECCBytesPerSector *= DeviceInfo.wDevicesConnected;
++ DeviceInfo.wNumPageSpareFlag = DeviceInfo.wPageSpareSize -
++ DeviceInfo.wPageDataSize /
++ (ECC_SECTOR_SIZE * DeviceInfo.wDevicesConnected) *
++ DeviceInfo.wECCBytesPerSector
++ - DeviceInfo.wSpareSkipBytes;
++ }
++}
++
++static u16 get_onfi_nand_para(void)
++{
++ int i;
++ u16 blks_lun_l, blks_lun_h, n_of_luns;
++ u32 blockperlun, id;
++
++ iowrite32(DEVICE_RESET__BANK0, FlashReg + DEVICE_RESET);
++
++ while (!((ioread32(FlashReg + INTR_STATUS0) &
++ INTR_STATUS0__RST_COMP) |
++ (ioread32(FlashReg + INTR_STATUS0) &
++ INTR_STATUS0__TIME_OUT)))
++ ;
++
++ if (ioread32(FlashReg + INTR_STATUS0) & INTR_STATUS0__RST_COMP) {
++ iowrite32(DEVICE_RESET__BANK1, FlashReg + DEVICE_RESET);
++ while (!((ioread32(FlashReg + INTR_STATUS1) &
++ INTR_STATUS1__RST_COMP) |
++ (ioread32(FlashReg + INTR_STATUS1) &
++ INTR_STATUS1__TIME_OUT)))
++ ;
++
++ if (ioread32(FlashReg + INTR_STATUS1) &
++ INTR_STATUS1__RST_COMP) {
++ iowrite32(DEVICE_RESET__BANK2,
++ FlashReg + DEVICE_RESET);
++ while (!((ioread32(FlashReg + INTR_STATUS2) &
++ INTR_STATUS2__RST_COMP) |
++ (ioread32(FlashReg + INTR_STATUS2) &
++ INTR_STATUS2__TIME_OUT)))
++ ;
++
++ if (ioread32(FlashReg + INTR_STATUS2) &
++ INTR_STATUS2__RST_COMP) {
++ iowrite32(DEVICE_RESET__BANK3,
++ FlashReg + DEVICE_RESET);
++ while (!((ioread32(FlashReg + INTR_STATUS3) &
++ INTR_STATUS3__RST_COMP) |
++ (ioread32(FlashReg + INTR_STATUS3) &
++ INTR_STATUS3__TIME_OUT)))
++ ;
++ } else {
++ printk(KERN_ERR "Getting a time out for bank 2!\n");
++ }
++ } else {
++ printk(KERN_ERR "Getting a time out for bank 1!\n");
++ }
++ }
++
++ iowrite32(INTR_STATUS0__TIME_OUT, FlashReg + INTR_STATUS0);
++ iowrite32(INTR_STATUS1__TIME_OUT, FlashReg + INTR_STATUS1);
++ iowrite32(INTR_STATUS2__TIME_OUT, FlashReg + INTR_STATUS2);
++ iowrite32(INTR_STATUS3__TIME_OUT, FlashReg + INTR_STATUS3);
++
++ DeviceInfo.wONFIDevFeatures =
++ ioread32(FlashReg + ONFI_DEVICE_FEATURES);
++ DeviceInfo.wONFIOptCommands =
++ ioread32(FlashReg + ONFI_OPTIONAL_COMMANDS);
++ DeviceInfo.wONFITimingMode =
++ ioread32(FlashReg + ONFI_TIMING_MODE);
++ DeviceInfo.wONFIPgmCacheTimingMode =
++ ioread32(FlashReg + ONFI_PGM_CACHE_TIMING_MODE);
++
++ n_of_luns = ioread32(FlashReg + ONFI_DEVICE_NO_OF_LUNS) &
++ ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS;
++ blks_lun_l = ioread32(FlashReg + ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L);
++ blks_lun_h = ioread32(FlashReg + ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U);
++
++ blockperlun = (blks_lun_h << 16) | blks_lun_l;
++
++ DeviceInfo.wTotalBlocks = n_of_luns * blockperlun;
++
++ if (!(ioread32(FlashReg + ONFI_TIMING_MODE) &
++ ONFI_TIMING_MODE__VALUE))
++ return FAIL;
++
++ for (i = 5; i > 0; i--) {
++ if (ioread32(FlashReg + ONFI_TIMING_MODE) & (0x01 << i))
++ break;
++ }
++
++ NAND_ONFi_Timing_Mode(i);
++
++ index_addr(MODE_11 | 0, 0x90);
++ index_addr(MODE_11 | 1, 0);
++
++ for (i = 0; i < 3; i++)
++ index_addr_read_data(MODE_11 | 2, &id);
++
++ nand_dbg_print(NAND_DBG_DEBUG, "3rd ID: 0x%x\n", id);
++
++ DeviceInfo.MLCDevice = id & 0x0C;
++
++ /* By now, all the ONFI devices we know support the page cache */
++ /* rw feature. So here we enable the pipeline_rw_ahead feature */
++ /* iowrite32(1, FlashReg + CACHE_WRITE_ENABLE); */
++ /* iowrite32(1, FlashReg + CACHE_READ_ENABLE); */
++
++ return PASS;
++}
++
++static void get_samsung_nand_para(void)
++{
++ u8 no_of_planes;
++ u32 blk_size;
++ u64 plane_size, capacity;
++ u32 id_bytes[5];
++ int i;
++
++ index_addr((u32)(MODE_11 | 0), 0x90);
++ index_addr((u32)(MODE_11 | 1), 0);
++ for (i = 0; i < 5; i++)
++ index_addr_read_data((u32)(MODE_11 | 2), &id_bytes[i]);
++
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "ID bytes: 0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n",
++ id_bytes[0], id_bytes[1], id_bytes[2],
++ id_bytes[3], id_bytes[4]);
++
++ if ((id_bytes[1] & 0xff) == 0xd3) { /* Samsung K9WAG08U1A */
++ /* Set timing register values according to datasheet */
++ iowrite32(5, FlashReg + ACC_CLKS);
++ iowrite32(20, FlashReg + RE_2_WE);
++ iowrite32(12, FlashReg + WE_2_RE);
++ iowrite32(14, FlashReg + ADDR_2_DATA);
++ iowrite32(3, FlashReg + RDWR_EN_LO_CNT);
++ iowrite32(2, FlashReg + RDWR_EN_HI_CNT);
++ iowrite32(2, FlashReg + CS_SETUP_CNT);
++ }
++
++ no_of_planes = 1 << ((id_bytes[4] & 0x0c) >> 2);
++ plane_size = (u64)64 << ((id_bytes[4] & 0x70) >> 4);
++ blk_size = 64 << ((ioread32(FlashReg + DEVICE_PARAM_1) & 0x30) >> 4);
++ capacity = (u64)128 * plane_size * no_of_planes;
++
++ DeviceInfo.wTotalBlocks = (u32)GLOB_u64_Div(capacity, blk_size);
++}
++
++static void get_toshiba_nand_para(void)
++{
++ void __iomem *scratch_reg;
++ u32 tmp;
++
++ /* Workaround to fix a controller bug which reports a wrong */
++ /* spare area size for some kind of Toshiba NAND device */
++ if ((ioread32(FlashReg + DEVICE_MAIN_AREA_SIZE) == 4096) &&
++ (ioread32(FlashReg + DEVICE_SPARE_AREA_SIZE) == 64)) {
++ iowrite32(216, FlashReg + DEVICE_SPARE_AREA_SIZE);
++ tmp = ioread32(FlashReg + DEVICES_CONNECTED) *
++ ioread32(FlashReg + DEVICE_SPARE_AREA_SIZE);
++ iowrite32(tmp, FlashReg + LOGICAL_PAGE_SPARE_SIZE);
++#if SUPPORT_15BITECC
++ iowrite32(15, FlashReg + ECC_CORRECTION);
++#elif SUPPORT_8BITECC
++ iowrite32(8, FlashReg + ECC_CORRECTION);
++#endif
++ }
++
++ /* As Toshiba NAND can not provide it's block number, */
++ /* so here we need user to provide the correct block */
++ /* number in a scratch register before the Linux NAND */
++ /* driver is loaded. If no valid value found in the scratch */
++ /* register, then we use default block number value */
++ scratch_reg = ioremap_nocache(SCRATCH_REG_ADDR, SCRATCH_REG_SIZE);
++ if (!scratch_reg) {
++ printk(KERN_ERR "Spectra: ioremap failed in %s, Line %d",
++ __FILE__, __LINE__);
++ DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
++ } else {
++ nand_dbg_print(NAND_DBG_WARN,
++ "Spectra: ioremap reg address: 0x%p\n", scratch_reg);
++ DeviceInfo.wTotalBlocks = 1 << ioread8(scratch_reg);
++ if (DeviceInfo.wTotalBlocks < 512)
++ DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
++ iounmap(scratch_reg);
++ }
++}
++
++static void get_hynix_nand_para(void)
++{
++ void __iomem *scratch_reg;
++ u32 main_size, spare_size;
++
++ switch (DeviceInfo.wDeviceID) {
++ case 0xD5: /* Hynix H27UAG8T2A, H27UBG8U5A or H27UCG8VFA */
++ case 0xD7: /* Hynix H27UDG8VEM, H27UCG8UDM or H27UCG8V5A */
++ iowrite32(128, FlashReg + PAGES_PER_BLOCK);
++ iowrite32(4096, FlashReg + DEVICE_MAIN_AREA_SIZE);
++ iowrite32(224, FlashReg + DEVICE_SPARE_AREA_SIZE);
++ main_size = 4096 * ioread32(FlashReg + DEVICES_CONNECTED);
++ spare_size = 224 * ioread32(FlashReg + DEVICES_CONNECTED);
++ iowrite32(main_size, FlashReg + LOGICAL_PAGE_DATA_SIZE);
++ iowrite32(spare_size, FlashReg + LOGICAL_PAGE_SPARE_SIZE);
++ iowrite32(0, FlashReg + DEVICE_WIDTH);
++#if SUPPORT_15BITECC
++ iowrite32(15, FlashReg + ECC_CORRECTION);
++#elif SUPPORT_8BITECC
++ iowrite32(8, FlashReg + ECC_CORRECTION);
++#endif
++ DeviceInfo.MLCDevice = 1;
++ break;
++ default:
++ nand_dbg_print(NAND_DBG_WARN,
++ "Spectra: Unknown Hynix NAND (Device ID: 0x%x)."
++ "Will use default parameter values instead.\n",
++ DeviceInfo.wDeviceID);
++ }
++
++ scratch_reg = ioremap_nocache(SCRATCH_REG_ADDR, SCRATCH_REG_SIZE);
++ if (!scratch_reg) {
++ printk(KERN_ERR "Spectra: ioremap failed in %s, Line %d",
++ __FILE__, __LINE__);
++ DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
++ } else {
++ nand_dbg_print(NAND_DBG_WARN,
++ "Spectra: ioremap reg address: 0x%p\n", scratch_reg);
++ DeviceInfo.wTotalBlocks = 1 << ioread8(scratch_reg);
++ if (DeviceInfo.wTotalBlocks < 512)
++ DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
++ iounmap(scratch_reg);
++ }
++}
++
++static void find_valid_banks(void)
++{
++ u32 id[LLD_MAX_FLASH_BANKS];
++ int i;
++
++ totalUsedBanks = 0;
++ for (i = 0; i < LLD_MAX_FLASH_BANKS; i++) {
++ index_addr((u32)(MODE_11 | (i << 24) | 0), 0x90);
++ index_addr((u32)(MODE_11 | (i << 24) | 1), 0);
++ index_addr_read_data((u32)(MODE_11 | (i << 24) | 2), &id[i]);
++
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Return 1st ID for bank[%d]: %x\n", i, id[i]);
++
++ if (i == 0) {
++ if (id[i] & 0x0ff)
++ GLOB_valid_banks[i] = 1;
++ } else {
++ if ((id[i] & 0x0ff) == (id[0] & 0x0ff))
++ GLOB_valid_banks[i] = 1;
++ }
++
++ totalUsedBanks += GLOB_valid_banks[i];
++ }
++
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "totalUsedBanks: %d\n", totalUsedBanks);
++}
++
++static void detect_partition_feature(void)
++{
++ if (ioread32(FlashReg + FEATURES) & FEATURES__PARTITION) {
++ if ((ioread32(FlashReg + PERM_SRC_ID_1) &
++ PERM_SRC_ID_1__SRCID) == SPECTRA_PARTITION_ID) {
++ DeviceInfo.wSpectraStartBlock =
++ ((ioread32(FlashReg + MIN_MAX_BANK_1) &
++ MIN_MAX_BANK_1__MIN_VALUE) *
++ DeviceInfo.wTotalBlocks)
++ +
++ (ioread32(FlashReg + MIN_BLK_ADDR_1) &
++ MIN_BLK_ADDR_1__VALUE);
++
++ DeviceInfo.wSpectraEndBlock =
++ (((ioread32(FlashReg + MIN_MAX_BANK_1) &
++ MIN_MAX_BANK_1__MAX_VALUE) >> 2) *
++ DeviceInfo.wTotalBlocks)
++ +
++ (ioread32(FlashReg + MAX_BLK_ADDR_1) &
++ MAX_BLK_ADDR_1__VALUE);
++
++ DeviceInfo.wTotalBlocks *= totalUsedBanks;
++
++ if (DeviceInfo.wSpectraEndBlock >=
++ DeviceInfo.wTotalBlocks) {
++ DeviceInfo.wSpectraEndBlock =
++ DeviceInfo.wTotalBlocks - 1;
++ }
++
++ DeviceInfo.wDataBlockNum =
++ DeviceInfo.wSpectraEndBlock -
++ DeviceInfo.wSpectraStartBlock + 1;
++ } else {
++ DeviceInfo.wTotalBlocks *= totalUsedBanks;
++ DeviceInfo.wSpectraStartBlock = SPECTRA_START_BLOCK;
++ DeviceInfo.wSpectraEndBlock =
++ DeviceInfo.wTotalBlocks - 1;
++ DeviceInfo.wDataBlockNum =
++ DeviceInfo.wSpectraEndBlock -
++ DeviceInfo.wSpectraStartBlock + 1;
++ }
++ } else {
++ DeviceInfo.wTotalBlocks *= totalUsedBanks;
++ DeviceInfo.wSpectraStartBlock = SPECTRA_START_BLOCK;
++ DeviceInfo.wSpectraEndBlock = DeviceInfo.wTotalBlocks - 1;
++ DeviceInfo.wDataBlockNum =
++ DeviceInfo.wSpectraEndBlock -
++ DeviceInfo.wSpectraStartBlock + 1;
++ }
++}
++
++static void dump_device_info(void)
++{
++ nand_dbg_print(NAND_DBG_DEBUG, "DeviceInfo:\n");
++ nand_dbg_print(NAND_DBG_DEBUG, "DeviceMaker: 0x%x\n",
++ DeviceInfo.wDeviceMaker);
++ nand_dbg_print(NAND_DBG_DEBUG, "DeviceID: 0x%x\n",
++ DeviceInfo.wDeviceID);
++ nand_dbg_print(NAND_DBG_DEBUG, "DeviceType: 0x%x\n",
++ DeviceInfo.wDeviceType);
++ nand_dbg_print(NAND_DBG_DEBUG, "SpectraStartBlock: %d\n",
++ DeviceInfo.wSpectraStartBlock);
++ nand_dbg_print(NAND_DBG_DEBUG, "SpectraEndBlock: %d\n",
++ DeviceInfo.wSpectraEndBlock);
++ nand_dbg_print(NAND_DBG_DEBUG, "TotalBlocks: %d\n",
++ DeviceInfo.wTotalBlocks);
++ nand_dbg_print(NAND_DBG_DEBUG, "PagesPerBlock: %d\n",
++ DeviceInfo.wPagesPerBlock);
++ nand_dbg_print(NAND_DBG_DEBUG, "PageSize: %d\n",
++ DeviceInfo.wPageSize);
++ nand_dbg_print(NAND_DBG_DEBUG, "PageDataSize: %d\n",
++ DeviceInfo.wPageDataSize);
++ nand_dbg_print(NAND_DBG_DEBUG, "PageSpareSize: %d\n",
++ DeviceInfo.wPageSpareSize);
++ nand_dbg_print(NAND_DBG_DEBUG, "NumPageSpareFlag: %d\n",
++ DeviceInfo.wNumPageSpareFlag);
++ nand_dbg_print(NAND_DBG_DEBUG, "ECCBytesPerSector: %d\n",
++ DeviceInfo.wECCBytesPerSector);
++ nand_dbg_print(NAND_DBG_DEBUG, "BlockSize: %d\n",
++ DeviceInfo.wBlockSize);
++ nand_dbg_print(NAND_DBG_DEBUG, "BlockDataSize: %d\n",
++ DeviceInfo.wBlockDataSize);
++ nand_dbg_print(NAND_DBG_DEBUG, "DataBlockNum: %d\n",
++ DeviceInfo.wDataBlockNum);
++ nand_dbg_print(NAND_DBG_DEBUG, "PlaneNum: %d\n",
++ DeviceInfo.bPlaneNum);
++ nand_dbg_print(NAND_DBG_DEBUG, "DeviceMainAreaSize: %d\n",
++ DeviceInfo.wDeviceMainAreaSize);
++ nand_dbg_print(NAND_DBG_DEBUG, "DeviceSpareAreaSize: %d\n",
++ DeviceInfo.wDeviceSpareAreaSize);
++ nand_dbg_print(NAND_DBG_DEBUG, "DevicesConnected: %d\n",
++ DeviceInfo.wDevicesConnected);
++ nand_dbg_print(NAND_DBG_DEBUG, "DeviceWidth: %d\n",
++ DeviceInfo.wDeviceWidth);
++ nand_dbg_print(NAND_DBG_DEBUG, "HWRevision: 0x%x\n",
++ DeviceInfo.wHWRevision);
++ nand_dbg_print(NAND_DBG_DEBUG, "HWFeatures: 0x%x\n",
++ DeviceInfo.wHWFeatures);
++ nand_dbg_print(NAND_DBG_DEBUG, "ONFIDevFeatures: 0x%x\n",
++ DeviceInfo.wONFIDevFeatures);
++ nand_dbg_print(NAND_DBG_DEBUG, "ONFIOptCommands: 0x%x\n",
++ DeviceInfo.wONFIOptCommands);
++ nand_dbg_print(NAND_DBG_DEBUG, "ONFITimingMode: 0x%x\n",
++ DeviceInfo.wONFITimingMode);
++ nand_dbg_print(NAND_DBG_DEBUG, "ONFIPgmCacheTimingMode: 0x%x\n",
++ DeviceInfo.wONFIPgmCacheTimingMode);
++ nand_dbg_print(NAND_DBG_DEBUG, "MLCDevice: %s\n",
++ DeviceInfo.MLCDevice ? "Yes" : "No");
++ nand_dbg_print(NAND_DBG_DEBUG, "SpareSkipBytes: %d\n",
++ DeviceInfo.wSpareSkipBytes);
++ nand_dbg_print(NAND_DBG_DEBUG, "BitsInPageNumber: %d\n",
++ DeviceInfo.nBitsInPageNumber);
++ nand_dbg_print(NAND_DBG_DEBUG, "BitsInPageDataSize: %d\n",
++ DeviceInfo.nBitsInPageDataSize);
++ nand_dbg_print(NAND_DBG_DEBUG, "BitsInBlockDataSize: %d\n",
++ DeviceInfo.nBitsInBlockDataSize);
++}
++
++u16 NAND_Read_Device_ID(void)
++{
++ u16 status = PASS;
++ u8 no_of_planes;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ iowrite32(0x02, FlashReg + SPARE_AREA_SKIP_BYTES);
++ iowrite32(0xffff, FlashReg + SPARE_AREA_MARKER);
++ DeviceInfo.wDeviceMaker = ioread32(FlashReg + MANUFACTURER_ID);
++ DeviceInfo.wDeviceID = ioread32(FlashReg + DEVICE_ID);
++ DeviceInfo.MLCDevice = ioread32(FlashReg + DEVICE_PARAM_0) & 0x0c;
++
++ if (ioread32(FlashReg + ONFI_DEVICE_NO_OF_LUNS) &
++ ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE) { /* ONFI 1.0 NAND */
++ if (FAIL == get_onfi_nand_para())
++ return FAIL;
++ } else if (DeviceInfo.wDeviceMaker == 0xEC) { /* Samsung NAND */
++ get_samsung_nand_para();
++ } else if (DeviceInfo.wDeviceMaker == 0x98) { /* Toshiba NAND */
++ get_toshiba_nand_para();
++ } else if (DeviceInfo.wDeviceMaker == 0xAD) { /* Hynix NAND */
++ get_hynix_nand_para();
++ } else {
++ DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
++ }
++
++ nand_dbg_print(NAND_DBG_DEBUG, "Dump timing register values:"
++ "acc_clks: %d, re_2_we: %d, we_2_re: %d,"
++ "addr_2_data: %d, rdwr_en_lo_cnt: %d, "
++ "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
++ ioread32(FlashReg + ACC_CLKS),
++ ioread32(FlashReg + RE_2_WE),
++ ioread32(FlashReg + WE_2_RE),
++ ioread32(FlashReg + ADDR_2_DATA),
++ ioread32(FlashReg + RDWR_EN_LO_CNT),
++ ioread32(FlashReg + RDWR_EN_HI_CNT),
++ ioread32(FlashReg + CS_SETUP_CNT));
++
++ DeviceInfo.wHWRevision = ioread32(FlashReg + REVISION);
++ DeviceInfo.wHWFeatures = ioread32(FlashReg + FEATURES);
++
++ DeviceInfo.wDeviceMainAreaSize =
++ ioread32(FlashReg + DEVICE_MAIN_AREA_SIZE);
++ DeviceInfo.wDeviceSpareAreaSize =
++ ioread32(FlashReg + DEVICE_SPARE_AREA_SIZE);
++
++ DeviceInfo.wPageDataSize =
++ ioread32(FlashReg + LOGICAL_PAGE_DATA_SIZE);
++
++ /* Note: When using the Micon 4K NAND device, the controller will report
++ * Page Spare Size as 216 bytes. But Micron's Spec say it's 218 bytes.
++ * And if force set it to 218 bytes, the controller can not work
++ * correctly. So just let it be. But keep in mind that this bug may
++ * cause
++ * other problems in future. - Yunpeng 2008-10-10
++ */
++ DeviceInfo.wPageSpareSize =
++ ioread32(FlashReg + LOGICAL_PAGE_SPARE_SIZE);
++
++ DeviceInfo.wPagesPerBlock = ioread32(FlashReg + PAGES_PER_BLOCK);
++
++ DeviceInfo.wPageSize =
++ DeviceInfo.wPageDataSize + DeviceInfo.wPageSpareSize;
++ DeviceInfo.wBlockSize =
++ DeviceInfo.wPageSize * DeviceInfo.wPagesPerBlock;
++ DeviceInfo.wBlockDataSize =
++ DeviceInfo.wPagesPerBlock * DeviceInfo.wPageDataSize;
++
++ DeviceInfo.wDeviceWidth = ioread32(FlashReg + DEVICE_WIDTH);
++ DeviceInfo.wDeviceType =
++ ((ioread32(FlashReg + DEVICE_WIDTH) > 0) ? 16 : 8);
++
++ DeviceInfo.wDevicesConnected = ioread32(FlashReg + DEVICES_CONNECTED);
++
++ DeviceInfo.wSpareSkipBytes =
++ ioread32(FlashReg + SPARE_AREA_SKIP_BYTES) *
++ DeviceInfo.wDevicesConnected;
++
++ DeviceInfo.nBitsInPageNumber =
++ (u8)GLOB_Calc_Used_Bits(DeviceInfo.wPagesPerBlock);
++ DeviceInfo.nBitsInPageDataSize =
++ (u8)GLOB_Calc_Used_Bits(DeviceInfo.wPageDataSize);
++ DeviceInfo.nBitsInBlockDataSize =
++ (u8)GLOB_Calc_Used_Bits(DeviceInfo.wBlockDataSize);
++
++ set_ecc_config();
++
++ no_of_planes = ioread32(FlashReg + NUMBER_OF_PLANES) &
++ NUMBER_OF_PLANES__VALUE;
++
++ switch (no_of_planes) {
++ case 0:
++ case 1:
++ case 3:
++ case 7:
++ DeviceInfo.bPlaneNum = no_of_planes + 1;
++ break;
++ default:
++ status = FAIL;
++ break;
++ }
++
++ find_valid_banks();
++
++ detect_partition_feature();
++
++ dump_device_info();
++
++ return status;
++}
++
++u16 NAND_UnlockArrayAll(void)
++{
++ u64 start_addr, end_addr;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ start_addr = 0;
++ end_addr = ((u64)DeviceInfo.wBlockSize *
++ (DeviceInfo.wTotalBlocks - 1)) >>
++ DeviceInfo.nBitsInPageDataSize;
++
++ index_addr((u32)(MODE_10 | (u32)start_addr), 0x10);
++ index_addr((u32)(MODE_10 | (u32)end_addr), 0x11);
++
++ return PASS;
++}
++
++void NAND_LLD_Enable_Disable_Interrupts(u16 INT_ENABLE)
++{
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ if (INT_ENABLE)
++ iowrite32(1, FlashReg + GLOBAL_INT_ENABLE);
++ else
++ iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
++}
++
++u16 NAND_Erase_Block(u32 block)
++{
++ u16 status = PASS;
++ u64 flash_add;
++ u16 flash_bank;
++ u32 intr_status = 0;
++ u32 intr_status_addresses[4] = {INTR_STATUS0,
++ INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
++ * DeviceInfo.wBlockDataSize;
++
++ flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
++
++ if (block >= DeviceInfo.wTotalBlocks)
++ status = FAIL;
++
++ if (status == PASS) {
++ intr_status = intr_status_addresses[flash_bank];
++
++ iowrite32(INTR_STATUS0__ERASE_COMP | INTR_STATUS0__ERASE_FAIL,
++ FlashReg + intr_status);
++
++ index_addr((u32)(MODE_10 | (flash_bank << 24) |
++ (flash_add >> DeviceInfo.nBitsInPageDataSize)), 1);
++
++ while (!(ioread32(FlashReg + intr_status) &
++ (INTR_STATUS0__ERASE_COMP | INTR_STATUS0__ERASE_FAIL)))
++ ;
++
++ if (ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__ERASE_FAIL)
++ status = FAIL;
++
++ iowrite32(INTR_STATUS0__ERASE_COMP | INTR_STATUS0__ERASE_FAIL,
++ FlashReg + intr_status);
++ }
++
++ return status;
++}
++
++static u32 Boundary_Check_Block_Page(u32 block, u16 page,
++ u16 page_count)
++{
++ u32 status = PASS;
++
++ if (block >= DeviceInfo.wTotalBlocks)
++ status = FAIL;
++
++ if (page + page_count > DeviceInfo.wPagesPerBlock)
++ status = FAIL;
++
++ return status;
++}
++
++u16 NAND_Read_Page_Spare(u8 *read_data, u32 block, u16 page,
++ u16 page_count)
++{
++ u32 status = PASS;
++ u32 i;
++ u64 flash_add;
++ u32 PageSpareSize = DeviceInfo.wPageSpareSize;
++ u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
++ u32 flash_bank;
++ u32 intr_status = 0;
++ u32 intr_status_addresses[4] = {INTR_STATUS0,
++ INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
++ u8 *page_spare = buf_read_page_spare;
++
++ if (block >= DeviceInfo.wTotalBlocks) {
++ printk(KERN_ERR "block too big: %d\n", (int)block);
++ status = FAIL;
++ }
++
++ if (page >= DeviceInfo.wPagesPerBlock) {
++ printk(KERN_ERR "page too big: %d\n", page);
++ status = FAIL;
++ }
++
++ if (page_count > 1) {
++ printk(KERN_ERR "page count too big: %d\n", page_count);
++ status = FAIL;
++ }
++
++ flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
++ * DeviceInfo.wBlockDataSize +
++ (u64)page * DeviceInfo.wPageDataSize;
++
++ flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
++
++ if (status == PASS) {
++ intr_status = intr_status_addresses[flash_bank];
++ iowrite32(ioread32(FlashReg + intr_status),
++ FlashReg + intr_status);
++
++ index_addr((u32)(MODE_10 | (flash_bank << 24) |
++ (flash_add >> DeviceInfo.nBitsInPageDataSize)),
++ 0x41);
++ index_addr((u32)(MODE_10 | (flash_bank << 24) |
++ (flash_add >> DeviceInfo.nBitsInPageDataSize)),
++ 0x2000 | page_count);
++ while (!(ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__LOAD_COMP))
++ ;
++
++ iowrite32((u32)(MODE_01 | (flash_bank << 24) |
++ (flash_add >> DeviceInfo.nBitsInPageDataSize)),
++ FlashMem);
++
++ for (i = 0; i < (PageSpareSize / 4); i++)
++ *((u32 *)page_spare + i) =
++ ioread32(FlashMem + 0x10);
++
++ if (enable_ecc) {
++ for (i = 0; i < spareFlagBytes; i++)
++ read_data[i] =
++ page_spare[PageSpareSize -
++ spareFlagBytes + i];
++ for (i = 0; i < (PageSpareSize - spareFlagBytes); i++)
++ read_data[spareFlagBytes + i] =
++ page_spare[i];
++ } else {
++ for (i = 0; i < PageSpareSize; i++)
++ read_data[i] = page_spare[i];
++ }
++
++ index_addr((u32)(MODE_10 | (flash_bank << 24) |
++ (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
++ }
++
++ return status;
++}
++
++/* No use function. Should be removed later */
++u16 NAND_Write_Page_Spare(u8 *write_data, u32 block, u16 page,
++ u16 page_count)
++{
++ printk(KERN_ERR
++ "Error! This function (NAND_Write_Page_Spare) should never"
++ " be called!\n");
++ return ERR;
++}
++
++/* op value: 0 - DDMA read; 1 - DDMA write */
++static void ddma_trans(u8 *data, u64 flash_add,
++ u32 flash_bank, int op, u32 numPages)
++{
++ u32 data_addr;
++
++ /* Map virtual address to bus address for DDMA */
++ data_addr = virt_to_bus(data);
++
++ index_addr((u32)(MODE_10 | (flash_bank << 24) |
++ (flash_add >> DeviceInfo.nBitsInPageDataSize)),
++ (u16)(2 << 12) | (op << 8) | numPages);
++
++ index_addr((u32)(MODE_10 | (flash_bank << 24) |
++ ((u16)(0x0FFFF & (data_addr >> 16)) << 8)),
++ (u16)(2 << 12) | (2 << 8) | 0);
++
++ index_addr((u32)(MODE_10 | (flash_bank << 24) |
++ ((u16)(0x0FFFF & data_addr) << 8)),
++ (u16)(2 << 12) | (3 << 8) | 0);
++
++ index_addr((u32)(MODE_10 | (flash_bank << 24) |
++ (1 << 16) | (0x40 << 8)),
++ (u16)(2 << 12) | (4 << 8) | 0);
++}
++
++/* If data in buf are all 0xff, then return 1; otherwise return 0 */
++static int check_all_1(u8 *buf)
++{
++ int i, j, cnt;
++
++ for (i = 0; i < DeviceInfo.wPageDataSize; i++) {
++ if (buf[i] != 0xff) {
++ cnt = 0;
++ nand_dbg_print(NAND_DBG_WARN,
++ "the first non-0xff data byte is: %d\n", i);
++ for (j = i; j < DeviceInfo.wPageDataSize; j++) {
++ nand_dbg_print(NAND_DBG_WARN, "0x%x ", buf[j]);
++ cnt++;
++ if (cnt > 8)
++ break;
++ }
++ nand_dbg_print(NAND_DBG_WARN, "\n");
++ return 0;
++ }
++ }
++
++ return 1;
++}
++
++static int do_ecc_new(unsigned long bank, u8 *buf,
++ u32 block, u16 page)
++{
++ int status = PASS;
++ u16 err_page = 0;
++ u16 err_byte;
++ u8 err_sect;
++ u8 err_dev;
++ u16 err_fix_info;
++ u16 err_addr;
++ u32 ecc_sect_size;
++ u8 *err_pos;
++ u32 err_page_addr[4] = {ERR_PAGE_ADDR0,
++ ERR_PAGE_ADDR1, ERR_PAGE_ADDR2, ERR_PAGE_ADDR3};
++
++ ecc_sect_size = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
++
++ do {
++ err_page = ioread32(FlashReg + err_page_addr[bank]);
++ err_addr = ioread32(FlashReg + ECC_ERROR_ADDRESS);
++ err_byte = err_addr & ECC_ERROR_ADDRESS__OFFSET;
++ err_sect = ((err_addr & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12);
++ err_fix_info = ioread32(FlashReg + ERR_CORRECTION_INFO);
++ err_dev = ((err_fix_info & ERR_CORRECTION_INFO__DEVICE_NR)
++ >> 8);
++ if (err_fix_info & ERR_CORRECTION_INFO__ERROR_TYPE) {
++ nand_dbg_print(NAND_DBG_WARN,
++ "%s, Line %d Uncorrectable ECC error "
++ "when read block %d page %d."
++ "PTN_INTR register: 0x%x "
++ "err_page: %d, err_sect: %d, err_byte: %d, "
++ "err_dev: %d, ecc_sect_size: %d, "
++ "err_fix_info: 0x%x\n",
++ __FILE__, __LINE__, block, page,
++ ioread32(FlashReg + PTN_INTR),
++ err_page, err_sect, err_byte, err_dev,
++ ecc_sect_size, (u32)err_fix_info);
++
++ if (check_all_1(buf))
++ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d"
++ "All 0xff!\n",
++ __FILE__, __LINE__);
++ else
++ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d"
++ "Not all 0xff!\n",
++ __FILE__, __LINE__);
++ status = FAIL;
++ } else {
++ nand_dbg_print(NAND_DBG_WARN,
++ "%s, Line %d Found ECC error "
++ "when read block %d page %d."
++ "err_page: %d, err_sect: %d, err_byte: %d, "
++ "err_dev: %d, ecc_sect_size: %d, "
++ "err_fix_info: 0x%x\n",
++ __FILE__, __LINE__, block, page,
++ err_page, err_sect, err_byte, err_dev,
++ ecc_sect_size, (u32)err_fix_info);
++ if (err_byte < ECC_SECTOR_SIZE) {
++ err_pos = buf +
++ (err_page - page) *
++ DeviceInfo.wPageDataSize +
++ err_sect * ecc_sect_size +
++ err_byte *
++ DeviceInfo.wDevicesConnected +
++ err_dev;
++
++ *err_pos ^= err_fix_info &
++ ERR_CORRECTION_INFO__BYTEMASK;
++ }
++ }
++ } while (!(err_fix_info & ERR_CORRECTION_INFO__LAST_ERR_INFO));
++
++ return status;
++}
++
++u16 NAND_Read_Page_Main_Polling(u8 *read_data,
++ u32 block, u16 page, u16 page_count)
++{
++ u32 status = PASS;
++ u64 flash_add;
++ u32 intr_status = 0;
++ u32 flash_bank;
++ u32 intr_status_addresses[4] = {INTR_STATUS0,
++ INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
++ u8 *read_data_l;
++
++ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ status = Boundary_Check_Block_Page(block, page, page_count);
++ if (status != PASS)
++ return status;
++
++ flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
++ * DeviceInfo.wBlockDataSize +
++ (u64)page * DeviceInfo.wPageDataSize;
++ flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
++
++ iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
++
++ intr_status = intr_status_addresses[flash_bank];
++ iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
++
++ if (page_count > 1) {
++ read_data_l = read_data;
++ while (page_count > MAX_PAGES_PER_RW) {
++ if (ioread32(FlashReg + MULTIPLANE_OPERATION))
++ status = NAND_Multiplane_Read(read_data_l,
++ block, page, MAX_PAGES_PER_RW);
++ else
++ status = NAND_Pipeline_Read_Ahead_Polling(
++ read_data_l, block, page,
++ MAX_PAGES_PER_RW);
++
++ if (status == FAIL)
++ return status;
++
++ read_data_l += DeviceInfo.wPageDataSize *
++ MAX_PAGES_PER_RW;
++ page_count -= MAX_PAGES_PER_RW;
++ page += MAX_PAGES_PER_RW;
++ }
++ if (ioread32(FlashReg + MULTIPLANE_OPERATION))
++ status = NAND_Multiplane_Read(read_data_l,
++ block, page, page_count);
++ else
++ status = NAND_Pipeline_Read_Ahead_Polling(
++ read_data_l, block, page, page_count);
++
++ return status;
++ }
++
++ iowrite32(1, FlashReg + DMA_ENABLE);
++ while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
++ ;
++
++ iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
++ iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
++
++ ddma_trans(read_data, flash_add, flash_bank, 0, 1);
++
++ if (enable_ecc) {
++ while (!(ioread32(FlashReg + intr_status) &
++ (INTR_STATUS0__ECC_TRANSACTION_DONE |
++ INTR_STATUS0__ECC_ERR)))
++ ;
++
++ if (ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__ECC_ERR) {
++ iowrite32(INTR_STATUS0__ECC_ERR,
++ FlashReg + intr_status);
++ status = do_ecc_new(flash_bank, read_data,
++ block, page);
++ }
++
++ if (ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__ECC_TRANSACTION_DONE &
++ INTR_STATUS0__ECC_ERR)
++ iowrite32(INTR_STATUS0__ECC_TRANSACTION_DONE |
++ INTR_STATUS0__ECC_ERR,
++ FlashReg + intr_status);
++ else if (ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__ECC_TRANSACTION_DONE)
++ iowrite32(INTR_STATUS0__ECC_TRANSACTION_DONE,
++ FlashReg + intr_status);
++ else if (ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__ECC_ERR)
++ iowrite32(INTR_STATUS0__ECC_ERR,
++ FlashReg + intr_status);
++ } else {
++ while (!(ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__DMA_CMD_COMP))
++ ;
++ iowrite32(INTR_STATUS0__DMA_CMD_COMP, FlashReg + intr_status);
++ }
++
++ iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
++
++ iowrite32(0, FlashReg + DMA_ENABLE);
++ while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
++ ;
++
++ return status;
++}
++
++u16 NAND_Pipeline_Read_Ahead_Polling(u8 *read_data,
++ u32 block, u16 page, u16 page_count)
++{
++ u32 status = PASS;
++ u32 NumPages = page_count;
++ u64 flash_add;
++ u32 flash_bank;
++ u32 intr_status = 0;
++ u32 intr_status_addresses[4] = {INTR_STATUS0,
++ INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
++ u32 ecc_done_OR_dma_comp;
++
++ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ status = Boundary_Check_Block_Page(block, page, page_count);
++
++ if (page_count < 2)
++ status = FAIL;
++
++ flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
++ *DeviceInfo.wBlockDataSize +
++ (u64)page * DeviceInfo.wPageDataSize;
++
++ flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
++
++ if (status == PASS) {
++ intr_status = intr_status_addresses[flash_bank];
++ iowrite32(ioread32(FlashReg + intr_status),
++ FlashReg + intr_status);
++
++ iowrite32(1, FlashReg + DMA_ENABLE);
++ while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
++ ;
++
++ iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
++
++ index_addr((u32)(MODE_10 | (flash_bank << 24) |
++ (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
++ ddma_trans(read_data, flash_add, flash_bank, 0, NumPages);
++
++ ecc_done_OR_dma_comp = 0;
++ while (1) {
++ if (enable_ecc) {
++ while (!ioread32(FlashReg + intr_status))
++ ;
++
++ if (ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__ECC_ERR) {
++ iowrite32(INTR_STATUS0__ECC_ERR,
++ FlashReg + intr_status);
++ status = do_ecc_new(flash_bank,
++ read_data, block, page);
++ } else if (ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__DMA_CMD_COMP) {
++ iowrite32(INTR_STATUS0__DMA_CMD_COMP,
++ FlashReg + intr_status);
++
++ if (1 == ecc_done_OR_dma_comp)
++ break;
++
++ ecc_done_OR_dma_comp = 1;
++ } else if (ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__ECC_TRANSACTION_DONE) {
++ iowrite32(
++ INTR_STATUS0__ECC_TRANSACTION_DONE,
++ FlashReg + intr_status);
++
++ if (1 == ecc_done_OR_dma_comp)
++ break;
++
++ ecc_done_OR_dma_comp = 1;
++ }
++ } else {
++ while (!(ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__DMA_CMD_COMP))
++ ;
++
++ iowrite32(INTR_STATUS0__DMA_CMD_COMP,
++ FlashReg + intr_status);
++ break;
++ }
++
++ iowrite32((~INTR_STATUS0__ECC_ERR) &
++ (~INTR_STATUS0__ECC_TRANSACTION_DONE) &
++ (~INTR_STATUS0__DMA_CMD_COMP),
++ FlashReg + intr_status);
++
++ }
++
++ iowrite32(ioread32(FlashReg + intr_status),
++ FlashReg + intr_status);
++
++ iowrite32(0, FlashReg + DMA_ENABLE);
++
++ while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
++ ;
++ }
++ return status;
++}
++
++u16 NAND_Read_Page_Main(u8 *read_data, u32 block, u16 page,
++ u16 page_count)
++{
++ u32 status = PASS;
++ u64 flash_add;
++ u32 intr_status = 0;
++ u32 flash_bank;
++ u32 intr_status_addresses[4] = {INTR_STATUS0,
++ INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
++ int ret;
++ u8 *read_data_l;
++
++ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ status = Boundary_Check_Block_Page(block, page, page_count);
++ if (status != PASS)
++ return status;
++
++ flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
++ * DeviceInfo.wBlockDataSize +
++ (u64)page * DeviceInfo.wPageDataSize;
++ flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
++
++ iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
++
++ intr_status = intr_status_addresses[flash_bank];
++ iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
++
++ if (page_count > 1) {
++ read_data_l = read_data;
++ while (page_count > MAX_PAGES_PER_RW) {
++ if (ioread32(FlashReg + MULTIPLANE_OPERATION))
++ status = NAND_Multiplane_Read(read_data_l,
++ block, page, MAX_PAGES_PER_RW);
++ else
++ status = NAND_Pipeline_Read_Ahead(
++ read_data_l, block, page,
++ MAX_PAGES_PER_RW);
++
++ if (status == FAIL)
++ return status;
++
++ read_data_l += DeviceInfo.wPageDataSize *
++ MAX_PAGES_PER_RW;
++ page_count -= MAX_PAGES_PER_RW;
++ page += MAX_PAGES_PER_RW;
++ }
++ if (ioread32(FlashReg + MULTIPLANE_OPERATION))
++ status = NAND_Multiplane_Read(read_data_l,
++ block, page, page_count);
++ else
++ status = NAND_Pipeline_Read_Ahead(
++ read_data_l, block, page, page_count);
++
++ return status;
++ }
++
++ iowrite32(1, FlashReg + DMA_ENABLE);
++ while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
++ ;
++
++ iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
++ iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
++
++ /* Fill the mrst_nand_info structure */
++ info.state = INT_READ_PAGE_MAIN;
++ info.read_data = read_data;
++ info.flash_bank = flash_bank;
++ info.block = block;
++ info.page = page;
++ info.ret = PASS;
++
++ ddma_trans(read_data, flash_add, flash_bank, 0, 1);
++
++ iowrite32(1, FlashReg + GLOBAL_INT_ENABLE); /* Enable Interrupt */
++
++ ret = wait_for_completion_timeout(&info.complete, 10 * HZ);
++ if (!ret) {
++ printk(KERN_ERR "Wait for completion timeout "
++ "in %s, Line %d\n", __FILE__, __LINE__);
++ status = ERR;
++ } else {
++ status = info.ret;
++ }
++
++ iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
++
++ iowrite32(0, FlashReg + DMA_ENABLE);
++ while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
++ ;
++
++ return status;
++}
++
++void Conv_Spare_Data_Log2Phy_Format(u8 *data)
++{
++ int i;
++ const u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
++ const u32 PageSpareSize = DeviceInfo.wPageSpareSize;
++
++ if (enable_ecc) {
++ for (i = spareFlagBytes - 1; i >= 0; i++)
++ data[PageSpareSize - spareFlagBytes + i] = data[i];
++ }
++}
++
++void Conv_Spare_Data_Phy2Log_Format(u8 *data)
++{
++ int i;
++ const u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
++ const u32 PageSpareSize = DeviceInfo.wPageSpareSize;
++
++ if (enable_ecc) {
++ for (i = 0; i < spareFlagBytes; i++)
++ data[i] = data[PageSpareSize - spareFlagBytes + i];
++ }
++}
++
++
++void Conv_Main_Spare_Data_Log2Phy_Format(u8 *data, u16 page_count)
++{
++ const u32 PageSize = DeviceInfo.wPageSize;
++ const u32 PageDataSize = DeviceInfo.wPageDataSize;
++ const u32 eccBytes = DeviceInfo.wECCBytesPerSector;
++ const u32 spareSkipBytes = DeviceInfo.wSpareSkipBytes;
++ const u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
++ u32 eccSectorSize;
++ u32 page_offset;
++ int i, j;
++
++ eccSectorSize = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
++ if (enable_ecc) {
++ while (page_count > 0) {
++ page_offset = (page_count - 1) * PageSize;
++ j = (DeviceInfo.wPageDataSize / eccSectorSize);
++ for (i = spareFlagBytes - 1; i >= 0; i--)
++ data[page_offset +
++ (eccSectorSize + eccBytes) * j + i] =
++ data[page_offset + PageDataSize + i];
++ for (j--; j >= 1; j--) {
++ for (i = eccSectorSize - 1; i >= 0; i--)
++ data[page_offset +
++ (eccSectorSize + eccBytes) * j + i] =
++ data[page_offset +
++ eccSectorSize * j + i];
++ }
++ for (i = (PageSize - spareSkipBytes) - 1;
++ i >= PageDataSize; i--)
++ data[page_offset + i + spareSkipBytes] =
++ data[page_offset + i];
++ page_count--;
++ }
++ }
++}
++
++void Conv_Main_Spare_Data_Phy2Log_Format(u8 *data, u16 page_count)
++{
++ const u32 PageSize = DeviceInfo.wPageSize;
++ const u32 PageDataSize = DeviceInfo.wPageDataSize;
++ const u32 eccBytes = DeviceInfo.wECCBytesPerSector;
++ const u32 spareSkipBytes = DeviceInfo.wSpareSkipBytes;
++ const u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
++ u32 eccSectorSize;
++ u32 page_offset;
++ int i, j;
++
++ eccSectorSize = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
++ if (enable_ecc) {
++ while (page_count > 0) {
++ page_offset = (page_count - 1) * PageSize;
++ for (i = PageDataSize;
++ i < PageSize - spareSkipBytes;
++ i++)
++ data[page_offset + i] =
++ data[page_offset + i +
++ spareSkipBytes];
++ for (j = 1;
++ j < DeviceInfo.wPageDataSize / eccSectorSize;
++ j++) {
++ for (i = 0; i < eccSectorSize; i++)
++ data[page_offset +
++ eccSectorSize * j + i] =
++ data[page_offset +
++ (eccSectorSize + eccBytes) * j
++ + i];
++ }
++ for (i = 0; i < spareFlagBytes; i++)
++ data[page_offset + PageDataSize + i] =
++ data[page_offset +
++ (eccSectorSize + eccBytes) * j + i];
++ page_count--;
++ }
++ }
++}
++
++/* Un-tested function */
++u16 NAND_Multiplane_Read(u8 *read_data, u32 block, u16 page,
++ u16 page_count)
++{
++ u32 status = PASS;
++ u32 NumPages = page_count;
++ u64 flash_add;
++ u32 flash_bank;
++ u32 intr_status = 0;
++ u32 intr_status_addresses[4] = {INTR_STATUS0,
++ INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
++ u32 ecc_done_OR_dma_comp;
++
++ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ status = Boundary_Check_Block_Page(block, page, page_count);
++
++ flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
++ * DeviceInfo.wBlockDataSize +
++ (u64)page * DeviceInfo.wPageDataSize;
++
++ flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
++
++ if (status == PASS) {
++ intr_status = intr_status_addresses[flash_bank];
++ iowrite32(ioread32(FlashReg + intr_status),
++ FlashReg + intr_status);
++
++ iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
++ iowrite32(0x01, FlashReg + MULTIPLANE_OPERATION);
++
++ iowrite32(1, FlashReg + DMA_ENABLE);
++ while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
++ ;
++ index_addr((u32)(MODE_10 | (flash_bank << 24) |
++ (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
++ ddma_trans(read_data, flash_add, flash_bank, 0, NumPages);
++
++ ecc_done_OR_dma_comp = 0;
++ while (1) {
++ if (enable_ecc) {
++ while (!ioread32(FlashReg + intr_status))
++ ;
++
++ if (ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__ECC_ERR) {
++ iowrite32(INTR_STATUS0__ECC_ERR,
++ FlashReg + intr_status);
++ status = do_ecc_new(flash_bank,
++ read_data, block, page);
++ } else if (ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__DMA_CMD_COMP) {
++ iowrite32(INTR_STATUS0__DMA_CMD_COMP,
++ FlashReg + intr_status);
++
++ if (1 == ecc_done_OR_dma_comp)
++ break;
++
++ ecc_done_OR_dma_comp = 1;
++ } else if (ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__ECC_TRANSACTION_DONE) {
++ iowrite32(
++ INTR_STATUS0__ECC_TRANSACTION_DONE,
++ FlashReg + intr_status);
++
++ if (1 == ecc_done_OR_dma_comp)
++ break;
++
++ ecc_done_OR_dma_comp = 1;
++ }
++ } else {
++ while (!(ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__DMA_CMD_COMP))
++ ;
++ iowrite32(INTR_STATUS0__DMA_CMD_COMP,
++ FlashReg + intr_status);
++ break;
++ }
++
++ iowrite32((~INTR_STATUS0__ECC_ERR) &
++ (~INTR_STATUS0__ECC_TRANSACTION_DONE) &
++ (~INTR_STATUS0__DMA_CMD_COMP),
++ FlashReg + intr_status);
++
++ }
++
++ iowrite32(ioread32(FlashReg + intr_status),
++ FlashReg + intr_status);
++
++ iowrite32(0, FlashReg + DMA_ENABLE);
++
++ while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
++ ;
++
++ iowrite32(0, FlashReg + MULTIPLANE_OPERATION);
++ }
++
++ return status;
++}
++
++u16 NAND_Pipeline_Read_Ahead(u8 *read_data, u32 block,
++ u16 page, u16 page_count)
++{
++ u32 status = PASS;
++ u32 NumPages = page_count;
++ u64 flash_add;
++ u32 flash_bank;
++ u32 intr_status = 0;
++ u32 intr_status_addresses[4] = {INTR_STATUS0,
++ INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
++ int ret;
++
++ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ status = Boundary_Check_Block_Page(block, page, page_count);
++
++ if (page_count < 2)
++ status = FAIL;
++
++ if (status != PASS)
++ return status;
++
++ flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
++ *DeviceInfo.wBlockDataSize +
++ (u64)page * DeviceInfo.wPageDataSize;
++
++ flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
++
++ intr_status = intr_status_addresses[flash_bank];
++ iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
++
++ iowrite32(1, FlashReg + DMA_ENABLE);
++ while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
++ ;
++
++ iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
++
++ /* Fill the mrst_nand_info structure */
++ info.state = INT_PIPELINE_READ_AHEAD;
++ info.read_data = read_data;
++ info.flash_bank = flash_bank;
++ info.block = block;
++ info.page = page;
++ info.ret = PASS;
++
++ index_addr((u32)(MODE_10 | (flash_bank << 24) |
++ (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
++
++ ddma_trans(read_data, flash_add, flash_bank, 0, NumPages);
++
++ iowrite32(1, FlashReg + GLOBAL_INT_ENABLE); /* Enable Interrupt */
++
++ ret = wait_for_completion_timeout(&info.complete, 10 * HZ);
++ if (!ret) {
++ printk(KERN_ERR "Wait for completion timeout "
++ "in %s, Line %d\n", __FILE__, __LINE__);
++ status = ERR;
++ } else {
++ status = info.ret;
++ }
++
++ iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
++
++ iowrite32(0, FlashReg + DMA_ENABLE);
++
++ while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
++ ;
++
++ return status;
++}
++
++
++u16 NAND_Write_Page_Main(u8 *write_data, u32 block, u16 page,
++ u16 page_count)
++{
++ u32 status = PASS;
++ u64 flash_add;
++ u32 intr_status = 0;
++ u32 flash_bank;
++ u32 intr_status_addresses[4] = {INTR_STATUS0,
++ INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
++ int ret;
++ u8 *write_data_l;
++
++ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ status = Boundary_Check_Block_Page(block, page, page_count);
++ if (status != PASS)
++ return status;
++
++ flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
++ * DeviceInfo.wBlockDataSize +
++ (u64)page * DeviceInfo.wPageDataSize;
++
++ flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
++
++ intr_status = intr_status_addresses[flash_bank];
++
++ iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
++
++ iowrite32(INTR_STATUS0__PROGRAM_COMP |
++ INTR_STATUS0__PROGRAM_FAIL, FlashReg + intr_status);
++
++ if (page_count > 1) {
++ write_data_l = write_data;
++ while (page_count > MAX_PAGES_PER_RW) {
++ if (ioread32(FlashReg + MULTIPLANE_OPERATION))
++ status = NAND_Multiplane_Write(write_data_l,
++ block, page, MAX_PAGES_PER_RW);
++ else
++ status = NAND_Pipeline_Write_Ahead(
++ write_data_l, block, page,
++ MAX_PAGES_PER_RW);
++ if (status == FAIL)
++ return status;
++
++ write_data_l += DeviceInfo.wPageDataSize *
++ MAX_PAGES_PER_RW;
++ page_count -= MAX_PAGES_PER_RW;
++ page += MAX_PAGES_PER_RW;
++ }
++ if (ioread32(FlashReg + MULTIPLANE_OPERATION))
++ status = NAND_Multiplane_Write(write_data_l,
++ block, page, page_count);
++ else
++ status = NAND_Pipeline_Write_Ahead(write_data_l,
++ block, page, page_count);
++
++ return status;
++ }
++
++ iowrite32(1, FlashReg + DMA_ENABLE);
++ while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
++ ;
++
++ iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
++
++ iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
++
++ /* Fill the mrst_nand_info structure */
++ info.state = INT_WRITE_PAGE_MAIN;
++ info.write_data = write_data;
++ info.flash_bank = flash_bank;
++ info.block = block;
++ info.page = page;
++ info.ret = PASS;
++
++ ddma_trans(write_data, flash_add, flash_bank, 1, 1);
++
++ iowrite32(1, FlashReg + GLOBAL_INT_ENABLE); /* Enable interrupt */
++
++ ret = wait_for_completion_timeout(&info.complete, 10 * HZ);
++ if (!ret) {
++ printk(KERN_ERR "Wait for completion timeout "
++ "in %s, Line %d\n", __FILE__, __LINE__);
++ status = ERR;
++ } else {
++ status = info.ret;
++ }
++
++ iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
++
++ iowrite32(0, FlashReg + DMA_ENABLE);
++ while (ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG)
++ ;
++
++ return status;
++}
++
++void NAND_ECC_Ctrl(int enable)
++{
++ if (enable) {
++ nand_dbg_print(NAND_DBG_WARN,
++ "Will enable ECC in %s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++ iowrite32(1, FlashReg + ECC_ENABLE);
++ enable_ecc = 1;
++ } else {
++ nand_dbg_print(NAND_DBG_WARN,
++ "Will disable ECC in %s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++ iowrite32(0, FlashReg + ECC_ENABLE);
++ enable_ecc = 0;
++ }
++}
++
++u16 NAND_Write_Page_Main_Spare(u8 *write_data, u32 block,
++ u16 page, u16 page_count)
++{
++ u32 status = PASS;
++ u32 i, j, page_num = 0;
++ u32 PageSize = DeviceInfo.wPageSize;
++ u32 PageDataSize = DeviceInfo.wPageDataSize;
++ u32 eccBytes = DeviceInfo.wECCBytesPerSector;
++ u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
++ u32 spareSkipBytes = DeviceInfo.wSpareSkipBytes;
++ u64 flash_add;
++ u32 eccSectorSize;
++ u32 flash_bank;
++ u32 intr_status = 0;
++ u32 intr_status_addresses[4] = {INTR_STATUS0,
++ INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
++ u8 *page_main_spare = buf_write_page_main_spare;
++
++ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ eccSectorSize = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
++
++ status = Boundary_Check_Block_Page(block, page, page_count);
++
++ flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
++
++ if (status == PASS) {
++ intr_status = intr_status_addresses[flash_bank];
++
++ iowrite32(1, FlashReg + TRANSFER_SPARE_REG);
++
++ while ((status != FAIL) && (page_count > 0)) {
++ flash_add = (u64)(block %
++ (DeviceInfo.wTotalBlocks / totalUsedBanks)) *
++ DeviceInfo.wBlockDataSize +
++ (u64)page * DeviceInfo.wPageDataSize;
++
++ iowrite32(ioread32(FlashReg + intr_status),
++ FlashReg + intr_status);
++
++ iowrite32((u32)(MODE_01 | (flash_bank << 24) |
++ (flash_add >>
++ DeviceInfo.nBitsInPageDataSize)),
++ FlashMem);
++
++ if (enable_ecc) {
++ for (j = 0;
++ j <
++ DeviceInfo.wPageDataSize / eccSectorSize;
++ j++) {
++ for (i = 0; i < eccSectorSize; i++)
++ page_main_spare[(eccSectorSize +
++ eccBytes) * j +
++ i] =
++ write_data[eccSectorSize *
++ j + i];
++
++ for (i = 0; i < eccBytes; i++)
++ page_main_spare[(eccSectorSize +
++ eccBytes) * j +
++ eccSectorSize +
++ i] =
++ write_data[PageDataSize +
++ spareFlagBytes +
++ eccBytes * j +
++ i];
++ }
++
++ for (i = 0; i < spareFlagBytes; i++)
++ page_main_spare[(eccSectorSize +
++ eccBytes) * j + i] =
++ write_data[PageDataSize + i];
++
++ for (i = PageSize - 1; i >= PageDataSize +
++ spareSkipBytes; i--)
++ page_main_spare[i] = page_main_spare[i -
++ spareSkipBytes];
++
++ for (i = PageDataSize; i < PageDataSize +
++ spareSkipBytes; i++)
++ page_main_spare[i] = 0xff;
++
++ for (i = 0; i < PageSize / 4; i++)
++ iowrite32(
++ *((u32 *)page_main_spare + i),
++ FlashMem + 0x10);
++ } else {
++
++ for (i = 0; i < PageSize / 4; i++)
++ iowrite32(*((u32 *)write_data + i),
++ FlashMem + 0x10);
++ }
++
++ while (!(ioread32(FlashReg + intr_status) &
++ (INTR_STATUS0__PROGRAM_COMP |
++ INTR_STATUS0__PROGRAM_FAIL)))
++ ;
++
++ if (ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__PROGRAM_FAIL)
++ status = FAIL;
++
++ iowrite32(ioread32(FlashReg + intr_status),
++ FlashReg + intr_status);
++
++ page_num++;
++ page_count--;
++ write_data += PageSize;
++ }
++
++ iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
++ }
++
++ return status;
++}
++
++u16 NAND_Read_Page_Main_Spare(u8 *read_data, u32 block, u16 page,
++ u16 page_count)
++{
++ u32 status = PASS;
++ u32 i, j;
++ u64 flash_add = 0;
++ u32 PageSize = DeviceInfo.wPageSize;
++ u32 PageDataSize = DeviceInfo.wPageDataSize;
++ u32 PageSpareSize = DeviceInfo.wPageSpareSize;
++ u32 eccBytes = DeviceInfo.wECCBytesPerSector;
++ u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
++ u32 spareSkipBytes = DeviceInfo.wSpareSkipBytes;
++ u32 eccSectorSize;
++ u32 flash_bank;
++ u32 intr_status = 0;
++ u8 *read_data_l = read_data;
++ u32 intr_status_addresses[4] = {INTR_STATUS0,
++ INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
++ u8 *page_main_spare = buf_read_page_main_spare;
++
++ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ eccSectorSize = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
++
++ status = Boundary_Check_Block_Page(block, page, page_count);
++
++ flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
++
++ if (status == PASS) {
++ intr_status = intr_status_addresses[flash_bank];
++
++ iowrite32(1, FlashReg + TRANSFER_SPARE_REG);
++
++ iowrite32(ioread32(FlashReg + intr_status),
++ FlashReg + intr_status);
++
++ while ((status != FAIL) && (page_count > 0)) {
++ flash_add = (u64)(block %
++ (DeviceInfo.wTotalBlocks / totalUsedBanks))
++ * DeviceInfo.wBlockDataSize +
++ (u64)page * DeviceInfo.wPageDataSize;
++
++ index_addr((u32)(MODE_10 | (flash_bank << 24) |
++ (flash_add >> DeviceInfo.nBitsInPageDataSize)),
++ 0x43);
++ index_addr((u32)(MODE_10 | (flash_bank << 24) |
++ (flash_add >> DeviceInfo.nBitsInPageDataSize)),
++ 0x2000 | page_count);
++
++ while (!(ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__LOAD_COMP))
++ ;
++
++ iowrite32((u32)(MODE_01 | (flash_bank << 24) |
++ (flash_add >>
++ DeviceInfo.nBitsInPageDataSize)),
++ FlashMem);
++
++ for (i = 0; i < PageSize / 4; i++)
++ *(((u32 *)page_main_spare) + i) =
++ ioread32(FlashMem + 0x10);
++
++ if (enable_ecc) {
++ for (i = PageDataSize; i < PageSize -
++ spareSkipBytes; i++)
++ page_main_spare[i] = page_main_spare[i +
++ spareSkipBytes];
++
++ for (j = 0;
++ j < DeviceInfo.wPageDataSize / eccSectorSize;
++ j++) {
++
++ for (i = 0; i < eccSectorSize; i++)
++ read_data_l[eccSectorSize * j +
++ i] =
++ page_main_spare[
++ (eccSectorSize +
++ eccBytes) * j + i];
++
++ for (i = 0; i < eccBytes; i++)
++ read_data_l[PageDataSize +
++ spareFlagBytes +
++ eccBytes * j + i] =
++ page_main_spare[
++ (eccSectorSize +
++ eccBytes) * j +
++ eccSectorSize + i];
++ }
++
++ for (i = 0; i < spareFlagBytes; i++)
++ read_data_l[PageDataSize + i] =
++ page_main_spare[(eccSectorSize +
++ eccBytes) * j + i];
++ } else {
++ for (i = 0; i < (PageDataSize + PageSpareSize);
++ i++)
++ read_data_l[i] = page_main_spare[i];
++
++ }
++
++ if (enable_ecc) {
++ while (!(ioread32(FlashReg + intr_status) &
++ (INTR_STATUS0__ECC_TRANSACTION_DONE |
++ INTR_STATUS0__ECC_ERR)))
++ ;
++
++ if (ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__ECC_ERR) {
++ iowrite32(INTR_STATUS0__ECC_ERR,
++ FlashReg + intr_status);
++ status = do_ecc_new(flash_bank,
++ read_data, block, page);
++ }
++
++ if (ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__ECC_TRANSACTION_DONE &
++ INTR_STATUS0__ECC_ERR) {
++ iowrite32(INTR_STATUS0__ECC_ERR |
++ INTR_STATUS0__ECC_TRANSACTION_DONE,
++ FlashReg + intr_status);
++ } else if (ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__ECC_TRANSACTION_DONE) {
++ iowrite32(
++ INTR_STATUS0__ECC_TRANSACTION_DONE,
++ FlashReg + intr_status);
++ } else if (ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__ECC_ERR) {
++ iowrite32(INTR_STATUS0__ECC_ERR,
++ FlashReg + intr_status);
++ }
++ }
++
++ page++;
++ page_count--;
++ read_data_l += PageSize;
++ }
++ }
++
++ iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
++
++ index_addr((u32)(MODE_10 | (flash_bank << 24) |
++ (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
++
++ return status;
++}
++
++u16 NAND_Pipeline_Write_Ahead(u8 *write_data, u32 block,
++ u16 page, u16 page_count)
++{
++ u16 status = PASS;
++ u32 NumPages = page_count;
++ u64 flash_add;
++ u32 flash_bank;
++ u32 intr_status = 0;
++ u32 intr_status_addresses[4] = {INTR_STATUS0,
++ INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
++ int ret;
++
++ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ status = Boundary_Check_Block_Page(block, page, page_count);
++
++ if (page_count < 2)
++ status = FAIL;
++
++ if (status != PASS)
++ return status;
++
++ flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
++ * DeviceInfo.wBlockDataSize +
++ (u64)page * DeviceInfo.wPageDataSize;
++
++ flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
++
++ intr_status = intr_status_addresses[flash_bank];
++ iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
++
++ iowrite32(1, FlashReg + DMA_ENABLE);
++ while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
++ ;
++
++ iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
++
++ /* Fill the mrst_nand_info structure */
++ info.state = INT_PIPELINE_WRITE_AHEAD;
++ info.write_data = write_data;
++ info.flash_bank = flash_bank;
++ info.block = block;
++ info.page = page;
++ info.ret = PASS;
++
++ index_addr((u32)(MODE_10 | (flash_bank << 24) |
++ (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
++
++ ddma_trans(write_data, flash_add, flash_bank, 1, NumPages);
++
++ iowrite32(1, FlashReg + GLOBAL_INT_ENABLE); /* Enable interrupt */
++
++ ret = wait_for_completion_timeout(&info.complete, 10 * HZ);
++ if (!ret) {
++ printk(KERN_ERR "Wait for completion timeout "
++ "in %s, Line %d\n", __FILE__, __LINE__);
++ status = ERR;
++ } else {
++ status = info.ret;
++ }
++
++ iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
++
++ iowrite32(0, FlashReg + DMA_ENABLE);
++ while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
++ ;
++
++ return status;
++}
++
++/* Un-tested function */
++u16 NAND_Multiplane_Write(u8 *write_data, u32 block, u16 page,
++ u16 page_count)
++{
++ u16 status = PASS;
++ u32 NumPages = page_count;
++ u64 flash_add;
++ u32 flash_bank;
++ u32 intr_status = 0;
++ u32 intr_status_addresses[4] = {INTR_STATUS0,
++ INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
++ u16 status2 = PASS;
++ u32 t;
++
++ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ status = Boundary_Check_Block_Page(block, page, page_count);
++ if (status != PASS)
++ return status;
++
++ flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
++ * DeviceInfo.wBlockDataSize +
++ (u64)page * DeviceInfo.wPageDataSize;
++
++ flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
++
++ intr_status = intr_status_addresses[flash_bank];
++ iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
++
++ iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
++ iowrite32(0x01, FlashReg + MULTIPLANE_OPERATION);
++
++ iowrite32(1, FlashReg + DMA_ENABLE);
++ while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
++ ;
++
++ iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
++
++ index_addr((u32)(MODE_10 | (flash_bank << 24) |
++ (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
++
++ ddma_trans(write_data, flash_add, flash_bank, 1, NumPages);
++
++ while (1) {
++ while (!ioread32(FlashReg + intr_status))
++ ;
++
++ if (ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__DMA_CMD_COMP) {
++ iowrite32(INTR_STATUS0__DMA_CMD_COMP,
++ FlashReg + intr_status);
++ status = PASS;
++ if (status2 == FAIL)
++ status = FAIL;
++ break;
++ } else if (ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__PROGRAM_FAIL) {
++ status2 = FAIL;
++ status = FAIL;
++ t = ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__PROGRAM_FAIL;
++ iowrite32(t, FlashReg + intr_status);
++ } else {
++ iowrite32((~INTR_STATUS0__PROGRAM_FAIL) &
++ (~INTR_STATUS0__DMA_CMD_COMP),
++ FlashReg + intr_status);
++ }
++ }
++
++ iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
++
++ iowrite32(0, FlashReg + DMA_ENABLE);
++
++ while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
++ ;
++
++ iowrite32(0, FlashReg + MULTIPLANE_OPERATION);
++
++ return status;
++}
++
++
++#if CMD_DMA
++static irqreturn_t cdma_isr(int irq, void *dev_id)
++{
++ struct mrst_nand_info *dev = dev_id;
++ int first_failed_cmd;
++
++ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ if (!is_cdma_interrupt())
++ return IRQ_NONE;
++
++ /* Disable controller interrupts */
++ iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
++ GLOB_FTL_Event_Status(&first_failed_cmd);
++ complete(&dev->complete);
++
++ return IRQ_HANDLED;
++}
++#else
++static void handle_nand_int_read(struct mrst_nand_info *dev)
++{
++ u32 intr_status_addresses[4] = {INTR_STATUS0,
++ INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
++ u32 intr_status;
++ u32 ecc_done_OR_dma_comp = 0;
++
++ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ dev->ret = PASS;
++ intr_status = intr_status_addresses[dev->flash_bank];
++
++ while (1) {
++ if (enable_ecc) {
++ if (ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__ECC_ERR) {
++ iowrite32(INTR_STATUS0__ECC_ERR,
++ FlashReg + intr_status);
++ dev->ret = do_ecc_new(dev->flash_bank,
++ dev->read_data,
++ dev->block, dev->page);
++ } else if (ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__DMA_CMD_COMP) {
++ iowrite32(INTR_STATUS0__DMA_CMD_COMP,
++ FlashReg + intr_status);
++ if (1 == ecc_done_OR_dma_comp)
++ break;
++ ecc_done_OR_dma_comp = 1;
++ } else if (ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__ECC_TRANSACTION_DONE) {
++ iowrite32(INTR_STATUS0__ECC_TRANSACTION_DONE,
++ FlashReg + intr_status);
++ if (1 == ecc_done_OR_dma_comp)
++ break;
++ ecc_done_OR_dma_comp = 1;
++ }
++ } else {
++ if (ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__DMA_CMD_COMP) {
++ iowrite32(INTR_STATUS0__DMA_CMD_COMP,
++ FlashReg + intr_status);
++ break;
++ } else {
++ printk(KERN_ERR "Illegal INTS "
++ "(offset addr 0x%x) value: 0x%x\n",
++ intr_status,
++ ioread32(FlashReg + intr_status));
++ }
++ }
++
++ iowrite32((~INTR_STATUS0__ECC_ERR) &
++ (~INTR_STATUS0__ECC_TRANSACTION_DONE) &
++ (~INTR_STATUS0__DMA_CMD_COMP),
++ FlashReg + intr_status);
++ }
++}
++
++static void handle_nand_int_write(struct mrst_nand_info *dev)
++{
++ u32 intr_status;
++ u32 intr[4] = {INTR_STATUS0, INTR_STATUS1,
++ INTR_STATUS2, INTR_STATUS3};
++ int status = PASS;
++
++ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ dev->ret = PASS;
++ intr_status = intr[dev->flash_bank];
++
++ while (1) {
++ while (!ioread32(FlashReg + intr_status))
++ ;
++
++ if (ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__DMA_CMD_COMP) {
++ iowrite32(INTR_STATUS0__DMA_CMD_COMP,
++ FlashReg + intr_status);
++ if (FAIL == status)
++ dev->ret = FAIL;
++ break;
++ } else if (ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__PROGRAM_FAIL) {
++ status = FAIL;
++ iowrite32(INTR_STATUS0__PROGRAM_FAIL,
++ FlashReg + intr_status);
++ } else {
++ iowrite32((~INTR_STATUS0__PROGRAM_FAIL) &
++ (~INTR_STATUS0__DMA_CMD_COMP),
++ FlashReg + intr_status);
++ }
++ }
++}
++
++static irqreturn_t ddma_isr(int irq, void *dev_id)
++{
++ struct mrst_nand_info *dev = dev_id;
++ u32 int_mask, ints0, ints1, ints2, ints3, ints_offset;
++ u32 intr[4] = {INTR_STATUS0, INTR_STATUS1,
++ INTR_STATUS2, INTR_STATUS3};
++
++ int_mask = INTR_STATUS0__DMA_CMD_COMP |
++ INTR_STATUS0__ECC_TRANSACTION_DONE |
++ INTR_STATUS0__ECC_ERR |
++ INTR_STATUS0__PROGRAM_FAIL |
++ INTR_STATUS0__ERASE_FAIL;
++
++ ints0 = ioread32(FlashReg + INTR_STATUS0);
++ ints1 = ioread32(FlashReg + INTR_STATUS1);
++ ints2 = ioread32(FlashReg + INTR_STATUS2);
++ ints3 = ioread32(FlashReg + INTR_STATUS3);
++
++ ints_offset = intr[dev->flash_bank];
++
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "INTR0: 0x%x, INTR1: 0x%x, INTR2: 0x%x, INTR3: 0x%x, "
++ "DMA_INTR: 0x%x, "
++ "dev->state: 0x%x, dev->flash_bank: %d\n",
++ ints0, ints1, ints2, ints3,
++ ioread32(FlashReg + DMA_INTR),
++ dev->state, dev->flash_bank);
++
++ if (!(ioread32(FlashReg + ints_offset) & int_mask)) {
++ iowrite32(ints0, FlashReg + INTR_STATUS0);
++ iowrite32(ints1, FlashReg + INTR_STATUS1);
++ iowrite32(ints2, FlashReg + INTR_STATUS2);
++ iowrite32(ints3, FlashReg + INTR_STATUS3);
++ nand_dbg_print(NAND_DBG_WARN,
++ "ddma_isr: Invalid interrupt for NAND controller. "
++ "Ignore it\n");
++ return IRQ_NONE;
++ }
++
++ switch (dev->state) {
++ case INT_READ_PAGE_MAIN:
++ case INT_PIPELINE_READ_AHEAD:
++ /* Disable controller interrupts */
++ iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
++ handle_nand_int_read(dev);
++ break;
++ case INT_WRITE_PAGE_MAIN:
++ case INT_PIPELINE_WRITE_AHEAD:
++ iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
++ handle_nand_int_write(dev);
++ break;
++ default:
++ printk(KERN_ERR "ddma_isr - Illegal state: 0x%x\n",
++ dev->state);
++ return IRQ_NONE;
++ }
++
++ dev->state = INT_IDLE_STATE;
++ complete(&dev->complete);
++ return IRQ_HANDLED;
++}
++#endif
++
++static const struct pci_device_id nand_pci_ids[] = {
++ {
++ .vendor = 0x8086,
++ .device = 0x0809,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ },
++ { /* end: all zeroes */ }
++};
++
++static int nand_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
++{
++ int ret = -ENODEV;
++ unsigned long csr_base;
++ unsigned long csr_len;
++ struct mrst_nand_info *pndev = &info;
++ u32 int_mask;
++
++ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ FlashReg = ioremap_nocache(GLOB_HWCTL_REG_BASE,
++ GLOB_HWCTL_REG_SIZE);
++ if (!FlashReg) {
++ printk(KERN_ERR "Spectra: ioremap_nocache failed!");
++ return -ENOMEM;
++ }
++ nand_dbg_print(NAND_DBG_WARN,
++ "Spectra: Remapped reg base address: "
++ "0x%p, len: %d\n",
++ FlashReg, GLOB_HWCTL_REG_SIZE);
++
++ FlashMem = ioremap_nocache(GLOB_HWCTL_MEM_BASE,
++ GLOB_HWCTL_MEM_SIZE);
++ if (!FlashMem) {
++ printk(KERN_ERR "Spectra: ioremap_nocache failed!");
++ iounmap(FlashReg);
++ return -ENOMEM;
++ }
++ nand_dbg_print(NAND_DBG_WARN,
++ "Spectra: Remapped flash base address: "
++ "0x%p, len: %d\n",
++ (void *)FlashMem, GLOB_HWCTL_MEM_SIZE);
++
++ nand_dbg_print(NAND_DBG_DEBUG, "Dump timing register values:"
++ "acc_clks: %d, re_2_we: %d, we_2_re: %d,"
++ "addr_2_data: %d, rdwr_en_lo_cnt: %d, "
++ "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
++ ioread32(FlashReg + ACC_CLKS),
++ ioread32(FlashReg + RE_2_WE),
++ ioread32(FlashReg + WE_2_RE),
++ ioread32(FlashReg + ADDR_2_DATA),
++ ioread32(FlashReg + RDWR_EN_LO_CNT),
++ ioread32(FlashReg + RDWR_EN_HI_CNT),
++ ioread32(FlashReg + CS_SETUP_CNT));
++
++ NAND_Flash_Reset();
++
++ iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
++
++#if CMD_DMA
++ info.pcmds_num = 0;
++ info.flash_bank = 0;
++ info.cdma_num = 0;
++ int_mask = (DMA_INTR__DESC_COMP_CHANNEL0 |
++ DMA_INTR__DESC_COMP_CHANNEL1 |
++ DMA_INTR__DESC_COMP_CHANNEL2 |
++ DMA_INTR__DESC_COMP_CHANNEL3 |
++ DMA_INTR__MEMCOPY_DESC_COMP);
++ iowrite32(int_mask, FlashReg + DMA_INTR_EN);
++ iowrite32(0xFFFF, FlashReg + DMA_INTR);
++
++ int_mask = (INTR_STATUS0__ECC_ERR |
++ INTR_STATUS0__PROGRAM_FAIL |
++ INTR_STATUS0__ERASE_FAIL);
++#else
++ int_mask = INTR_STATUS0__DMA_CMD_COMP |
++ INTR_STATUS0__ECC_TRANSACTION_DONE |
++ INTR_STATUS0__ECC_ERR |
++ INTR_STATUS0__PROGRAM_FAIL |
++ INTR_STATUS0__ERASE_FAIL;
++#endif
++ iowrite32(int_mask, FlashReg + INTR_EN0);
++ iowrite32(int_mask, FlashReg + INTR_EN1);
++ iowrite32(int_mask, FlashReg + INTR_EN2);
++ iowrite32(int_mask, FlashReg + INTR_EN3);
++
++ /* Clear all status bits */
++ iowrite32(0xFFFF, FlashReg + INTR_STATUS0);
++ iowrite32(0xFFFF, FlashReg + INTR_STATUS1);
++ iowrite32(0xFFFF, FlashReg + INTR_STATUS2);
++ iowrite32(0xFFFF, FlashReg + INTR_STATUS3);
++
++ iowrite32(0x0F, FlashReg + RB_PIN_ENABLED);
++ iowrite32(CHIP_EN_DONT_CARE__FLAG, FlashReg + CHIP_ENABLE_DONT_CARE);
++
++ /* Should set value for these registers when init */
++ iowrite32(0, FlashReg + TWO_ROW_ADDR_CYCLES);
++ iowrite32(1, FlashReg + ECC_ENABLE);
++ enable_ecc = 1;
++ ret = pci_enable_device(dev);
++ if (ret) {
++ printk(KERN_ERR "Spectra: pci_enable_device failed.\n");
++ goto failed_req_csr;
++ }
++
++ pci_set_master(dev);
++ pndev->dev = dev;
++
++ csr_base = pci_resource_start(dev, 0);
++ if (!csr_base) {
++ printk(KERN_ERR "Spectra: pci_resource_start failed!\n");
++ ret = -ENODEV;
++ goto failed_req_csr;
++ }
++
++ csr_len = pci_resource_len(dev, 0);
++ if (!csr_len) {
++ printk(KERN_ERR "Spectra: pci_resource_len failed!\n");
++ ret = -ENODEV;
++ goto failed_req_csr;
++ }
++
++ ret = pci_request_regions(dev, SPECTRA_NAND_NAME);
++ if (ret) {
++ printk(KERN_ERR "Spectra: Unable to request "
++ "memory region\n");
++ goto failed_req_csr;
++ }
++
++ pndev->ioaddr = ioremap_nocache(csr_base, csr_len);
++ if (!pndev->ioaddr) {
++ printk(KERN_ERR "Spectra: Unable to remap memory region\n");
++ ret = -ENOMEM;
++ goto failed_remap_csr;
++ }
++ nand_dbg_print(NAND_DBG_DEBUG, "Spectra: CSR 0x%08lx -> 0x%p (0x%lx)\n",
++ csr_base, pndev->ioaddr, csr_len);
++
++ init_completion(&pndev->complete);
++ nand_dbg_print(NAND_DBG_DEBUG, "Spectra: IRQ %d\n", dev->irq);
++
++#if CMD_DMA
++ if (request_irq(dev->irq, cdma_isr, IRQF_SHARED,
++ SPECTRA_NAND_NAME, &info)) {
++ printk(KERN_ERR "Spectra: Unable to allocate IRQ\n");
++ ret = -ENODEV;
++ iounmap(pndev->ioaddr);
++ goto failed_remap_csr;
++ }
++#else
++ if (request_irq(dev->irq, ddma_isr, IRQF_SHARED,
++ SPECTRA_NAND_NAME, &info)) {
++ printk(KERN_ERR "Spectra: Unable to allocate IRQ\n");
++ ret = -ENODEV;
++ iounmap(pndev->ioaddr);
++ goto failed_remap_csr;
++ }
++#endif
++
++ pci_set_drvdata(dev, pndev);
++
++ ret = GLOB_LLD_Read_Device_ID();
++ if (ret) {
++ iounmap(pndev->ioaddr);
++ goto failed_remap_csr;
++ }
++
++ ret = register_spectra_ftl();
++ if (ret) {
++ iounmap(pndev->ioaddr);
++ goto failed_remap_csr;
++ }
++
++ return 0;
++
++failed_remap_csr:
++ pci_release_regions(dev);
++failed_req_csr:
++ iounmap(FlashMem);
++ iounmap(FlashReg);
++
++ return ret;
++}
++
++static void nand_pci_remove(struct pci_dev *dev)
++{
++ struct mrst_nand_info *pndev = pci_get_drvdata(dev);
++
++ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++#if CMD_DMA
++ free_irq(dev->irq, pndev);
++#endif
++ iounmap(pndev->ioaddr);
++ pci_release_regions(dev);
++ pci_disable_device(dev);
++}
++
++MODULE_DEVICE_TABLE(pci, nand_pci_ids);
++
++static struct pci_driver nand_pci_driver = {
++ .name = SPECTRA_NAND_NAME,
++ .id_table = nand_pci_ids,
++ .probe = nand_pci_probe,
++ .remove = nand_pci_remove,
++};
++
++int NAND_Flash_Init(void)
++{
++ int retval;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ retval = pci_register_driver(&nand_pci_driver);
++ if (retval)
++ return -ENOMEM;
++
++ return PASS;
++}
++
++/* Free memory */
++int nand_release_spectra(void)
++{
++ pci_unregister_driver(&nand_pci_driver);
++ iounmap(FlashMem);
++ iounmap(FlashReg);
++
++ return 0;
++}
++
++
++
+--- /dev/null
++++ b/drivers/staging/spectra/lld_nand.h
+@@ -0,0 +1,131 @@
++/*
++ * NAND Flash Controller Device Driver
++ * Copyright (c) 2009, Intel Corporation and its suppliers.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++#ifndef _LLD_NAND_
++#define _LLD_NAND_
++
++#ifdef ELDORA
++#include "defs.h"
++#else
++#include "flash.h"
++#include "ffsport.h"
++#endif
++
++#define MODE_00 0x00000000
++#define MODE_01 0x04000000
++#define MODE_10 0x08000000
++#define MODE_11 0x0C000000
++
++
++#define DATA_TRANSFER_MODE 0
++#define PROTECTION_PER_BLOCK 1
++#define LOAD_WAIT_COUNT 2
++#define PROGRAM_WAIT_COUNT 3
++#define ERASE_WAIT_COUNT 4
++#define INT_MONITOR_CYCLE_COUNT 5
++#define READ_BUSY_PIN_ENABLED 6
++#define MULTIPLANE_OPERATION_SUPPORT 7
++#define PRE_FETCH_MODE 8
++#define CE_DONT_CARE_SUPPORT 9
++#define COPYBACK_SUPPORT 10
++#define CACHE_WRITE_SUPPORT 11
++#define CACHE_READ_SUPPORT 12
++#define NUM_PAGES_IN_BLOCK 13
++#define ECC_ENABLE_SELECT 14
++#define WRITE_ENABLE_2_READ_ENABLE 15
++#define ADDRESS_2_DATA 16
++#define READ_ENABLE_2_WRITE_ENABLE 17
++#define TWO_ROW_ADDRESS_CYCLES 18
++#define MULTIPLANE_ADDRESS_RESTRICT 19
++#define ACC_CLOCKS 20
++#define READ_WRITE_ENABLE_LOW_COUNT 21
++#define READ_WRITE_ENABLE_HIGH_COUNT 22
++
++#define ECC_SECTOR_SIZE 512
++#define LLD_MAX_FLASH_BANKS 4
++
++struct mrst_nand_info {
++ struct pci_dev *dev;
++ u32 state;
++ u32 flash_bank;
++ u8 *read_data;
++ u8 *write_data;
++ u32 block;
++ u16 page;
++ u32 use_dma;
++ void __iomem *ioaddr; /* Mapped io reg base address */
++ int ret;
++ u32 pcmds_num;
++ struct pending_cmd *pcmds;
++ int cdma_num; /* CDMA descriptor number in this chan */
++ u8 *cdma_desc_buf; /* CDMA descriptor table */
++ u8 *memcp_desc_buf; /* Memory copy descriptor table */
++ dma_addr_t cdma_desc; /* Mapped CDMA descriptor table */
++ dma_addr_t memcp_desc; /* Mapped memory copy descriptor table */
++ struct completion complete;
++};
++
++int NAND_Flash_Init(void);
++int nand_release_spectra(void);
++u16 NAND_Flash_Reset(void);
++u16 NAND_Read_Device_ID(void);
++u16 NAND_Erase_Block(u32 flash_add);
++u16 NAND_Write_Page_Main(u8 *write_data, u32 block, u16 page,
++ u16 page_count);
++u16 NAND_Read_Page_Main(u8 *read_data, u32 block, u16 page,
++ u16 page_count);
++u16 NAND_UnlockArrayAll(void);
++u16 NAND_Write_Page_Main_Spare(u8 *write_data, u32 block,
++ u16 page, u16 page_count);
++u16 NAND_Write_Page_Spare(u8 *read_data, u32 block, u16 page,
++ u16 page_count);
++u16 NAND_Read_Page_Main_Spare(u8 *read_data, u32 block, u16 page,
++ u16 page_count);
++u16 NAND_Read_Page_Spare(u8 *read_data, u32 block, u16 page,
++ u16 page_count);
++void NAND_LLD_Enable_Disable_Interrupts(u16 INT_ENABLE);
++u16 NAND_Get_Bad_Block(u32 block);
++u16 NAND_Pipeline_Read_Ahead(u8 *read_data, u32 block, u16 page,
++ u16 page_count);
++u16 NAND_Pipeline_Write_Ahead(u8 *write_data, u32 block,
++ u16 page, u16 page_count);
++u16 NAND_Multiplane_Read(u8 *read_data, u32 block, u16 page,
++ u16 page_count);
++u16 NAND_Multiplane_Write(u8 *write_data, u32 block, u16 page,
++ u16 page_count);
++void NAND_ECC_Ctrl(int enable);
++u16 NAND_Read_Page_Main_Polling(u8 *read_data,
++ u32 block, u16 page, u16 page_count);
++u16 NAND_Pipeline_Read_Ahead_Polling(u8 *read_data,
++ u32 block, u16 page, u16 page_count);
++void Conv_Spare_Data_Log2Phy_Format(u8 *data);
++void Conv_Spare_Data_Phy2Log_Format(u8 *data);
++void Conv_Main_Spare_Data_Log2Phy_Format(u8 *data, u16 page_count);
++void Conv_Main_Spare_Data_Phy2Log_Format(u8 *data, u16 page_count);
++
++extern void __iomem *FlashReg;
++extern void __iomem *FlashMem;
++
++extern int totalUsedBanks;
++extern u32 GLOB_valid_banks[LLD_MAX_FLASH_BANKS];
++
++#endif /*_LLD_NAND_*/
++
++
++
+--- /dev/null
++++ b/drivers/staging/spectra/nand_regs.h
+@@ -0,0 +1,619 @@
++/*
++ * NAND Flash Controller Device Driver
++ * Copyright (c) 2009, Intel Corporation and its suppliers.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++#define DEVICE_RESET 0x0
++#define DEVICE_RESET__BANK0 0x0001
++#define DEVICE_RESET__BANK1 0x0002
++#define DEVICE_RESET__BANK2 0x0004
++#define DEVICE_RESET__BANK3 0x0008
++
++#define TRANSFER_SPARE_REG 0x10
++#define TRANSFER_SPARE_REG__FLAG 0x0001
++
++#define LOAD_WAIT_CNT 0x20
++#define LOAD_WAIT_CNT__VALUE 0xffff
++
++#define PROGRAM_WAIT_CNT 0x30
++#define PROGRAM_WAIT_CNT__VALUE 0xffff
++
++#define ERASE_WAIT_CNT 0x40
++#define ERASE_WAIT_CNT__VALUE 0xffff
++
++#define INT_MON_CYCCNT 0x50
++#define INT_MON_CYCCNT__VALUE 0xffff
++
++#define RB_PIN_ENABLED 0x60
++#define RB_PIN_ENABLED__BANK0 0x0001
++#define RB_PIN_ENABLED__BANK1 0x0002
++#define RB_PIN_ENABLED__BANK2 0x0004
++#define RB_PIN_ENABLED__BANK3 0x0008
++
++#define MULTIPLANE_OPERATION 0x70
++#define MULTIPLANE_OPERATION__FLAG 0x0001
++
++#define MULTIPLANE_READ_ENABLE 0x80
++#define MULTIPLANE_READ_ENABLE__FLAG 0x0001
++
++#define COPYBACK_DISABLE 0x90
++#define COPYBACK_DISABLE__FLAG 0x0001
++
++#define CACHE_WRITE_ENABLE 0xa0
++#define CACHE_WRITE_ENABLE__FLAG 0x0001
++
++#define CACHE_READ_ENABLE 0xb0
++#define CACHE_READ_ENABLE__FLAG 0x0001
++
++#define PREFETCH_MODE 0xc0
++#define PREFETCH_MODE__PREFETCH_EN 0x0001
++#define PREFETCH_MODE__PREFETCH_BURST_LENGTH 0xfff0
++
++#define CHIP_ENABLE_DONT_CARE 0xd0
++#define CHIP_EN_DONT_CARE__FLAG 0x01
++
++#define ECC_ENABLE 0xe0
++#define ECC_ENABLE__FLAG 0x0001
++
++#define GLOBAL_INT_ENABLE 0xf0
++#define GLOBAL_INT_EN_FLAG 0x01
++
++#define WE_2_RE 0x100
++#define WE_2_RE__VALUE 0x003f
++
++#define ADDR_2_DATA 0x110
++#define ADDR_2_DATA__VALUE 0x003f
++
++#define RE_2_WE 0x120
++#define RE_2_WE__VALUE 0x003f
++
++#define ACC_CLKS 0x130
++#define ACC_CLKS__VALUE 0x000f
++
++#define NUMBER_OF_PLANES 0x140
++#define NUMBER_OF_PLANES__VALUE 0x0007
++
++#define PAGES_PER_BLOCK 0x150
++#define PAGES_PER_BLOCK__VALUE 0xffff
++
++#define DEVICE_WIDTH 0x160
++#define DEVICE_WIDTH__VALUE 0x0003
++
++#define DEVICE_MAIN_AREA_SIZE 0x170
++#define DEVICE_MAIN_AREA_SIZE__VALUE 0xffff
++
++#define DEVICE_SPARE_AREA_SIZE 0x180
++#define DEVICE_SPARE_AREA_SIZE__VALUE 0xffff
++
++#define TWO_ROW_ADDR_CYCLES 0x190
++#define TWO_ROW_ADDR_CYCLES__FLAG 0x0001
++
++#define MULTIPLANE_ADDR_RESTRICT 0x1a0
++#define MULTIPLANE_ADDR_RESTRICT__FLAG 0x0001
++
++#define ECC_CORRECTION 0x1b0
++#define ECC_CORRECTION__VALUE 0x001f
++
++#define READ_MODE 0x1c0
++#define READ_MODE__VALUE 0x000f
++
++#define WRITE_MODE 0x1d0
++#define WRITE_MODE__VALUE 0x000f
++
++#define COPYBACK_MODE 0x1e0
++#define COPYBACK_MODE__VALUE 0x000f
++
++#define RDWR_EN_LO_CNT 0x1f0
++#define RDWR_EN_LO_CNT__VALUE 0x001f
++
++#define RDWR_EN_HI_CNT 0x200
++#define RDWR_EN_HI_CNT__VALUE 0x001f
++
++#define MAX_RD_DELAY 0x210
++#define MAX_RD_DELAY__VALUE 0x000f
++
++#define CS_SETUP_CNT 0x220
++#define CS_SETUP_CNT__VALUE 0x001f
++
++#define SPARE_AREA_SKIP_BYTES 0x230
++#define SPARE_AREA_SKIP_BYTES__VALUE 0x003f
++
++#define SPARE_AREA_MARKER 0x240
++#define SPARE_AREA_MARKER__VALUE 0xffff
++
++#define DEVICES_CONNECTED 0x250
++#define DEVICES_CONNECTED__VALUE 0x0007
++
++#define DIE_MASK 0x260
++#define DIE_MASK__VALUE 0x00ff
++
++#define FIRST_BLOCK_OF_NEXT_PLANE 0x270
++#define FIRST_BLOCK_OF_NEXT_PLANE__VALUE 0xffff
++
++#define WRITE_PROTECT 0x280
++#define WRITE_PROTECT__FLAG 0x0001
++
++#define RE_2_RE 0x290
++#define RE_2_RE__VALUE 0x003f
++
++#define MANUFACTURER_ID 0x300
++#define MANUFACTURER_ID__VALUE 0x00ff
++
++#define DEVICE_ID 0x310
++#define DEVICE_ID__VALUE 0x00ff
++
++#define DEVICE_PARAM_0 0x320
++#define DEVICE_PARAM_0__VALUE 0x00ff
++
++#define DEVICE_PARAM_1 0x330
++#define DEVICE_PARAM_1__VALUE 0x00ff
++
++#define DEVICE_PARAM_2 0x340
++#define DEVICE_PARAM_2__VALUE 0x00ff
++
++#define LOGICAL_PAGE_DATA_SIZE 0x350
++#define LOGICAL_PAGE_DATA_SIZE__VALUE 0xffff
++
++#define LOGICAL_PAGE_SPARE_SIZE 0x360
++#define LOGICAL_PAGE_SPARE_SIZE__VALUE 0xffff
++
++#define REVISION 0x370
++#define REVISION__VALUE 0xffff
++
++#define ONFI_DEVICE_FEATURES 0x380
++#define ONFI_DEVICE_FEATURES__VALUE 0x003f
++
++#define ONFI_OPTIONAL_COMMANDS 0x390
++#define ONFI_OPTIONAL_COMMANDS__VALUE 0x003f
++
++#define ONFI_TIMING_MODE 0x3a0
++#define ONFI_TIMING_MODE__VALUE 0x003f
++
++#define ONFI_PGM_CACHE_TIMING_MODE 0x3b0
++#define ONFI_PGM_CACHE_TIMING_MODE__VALUE 0x003f
++
++#define ONFI_DEVICE_NO_OF_LUNS 0x3c0
++#define ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS 0x00ff
++#define ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE 0x0100
++
++#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L 0x3d0
++#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L__VALUE 0xffff
++
++#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U 0x3e0
++#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U__VALUE 0xffff
++
++#define FEATURES 0x3f0
++#define FEATURES__N_BANKS 0x0003
++#define FEATURES__ECC_MAX_ERR 0x003c
++#define FEATURES__DMA 0x0040
++#define FEATURES__CMD_DMA 0x0080
++#define FEATURES__PARTITION 0x0100
++#define FEATURES__XDMA_SIDEBAND 0x0200
++#define FEATURES__GPREG 0x0400
++#define FEATURES__INDEX_ADDR 0x0800
++
++#define TRANSFER_MODE 0x400
++#define TRANSFER_MODE__VALUE 0x0003
++
++#define INTR_STATUS0 0x410
++#define INTR_STATUS0__ECC_TRANSACTION_DONE 0x0001
++#define INTR_STATUS0__ECC_ERR 0x0002
++#define INTR_STATUS0__DMA_CMD_COMP 0x0004
++#define INTR_STATUS0__TIME_OUT 0x0008
++#define INTR_STATUS0__PROGRAM_FAIL 0x0010
++#define INTR_STATUS0__ERASE_FAIL 0x0020
++#define INTR_STATUS0__LOAD_COMP 0x0040
++#define INTR_STATUS0__PROGRAM_COMP 0x0080
++#define INTR_STATUS0__ERASE_COMP 0x0100
++#define INTR_STATUS0__PIPE_CPYBCK_CMD_COMP 0x0200
++#define INTR_STATUS0__LOCKED_BLK 0x0400
++#define INTR_STATUS0__UNSUP_CMD 0x0800
++#define INTR_STATUS0__INT_ACT 0x1000
++#define INTR_STATUS0__RST_COMP 0x2000
++#define INTR_STATUS0__PIPE_CMD_ERR 0x4000
++#define INTR_STATUS0__PAGE_XFER_INC 0x8000
++
++#define INTR_EN0 0x420
++#define INTR_EN0__ECC_TRANSACTION_DONE 0x0001
++#define INTR_EN0__ECC_ERR 0x0002
++#define INTR_EN0__DMA_CMD_COMP 0x0004
++#define INTR_EN0__TIME_OUT 0x0008
++#define INTR_EN0__PROGRAM_FAIL 0x0010
++#define INTR_EN0__ERASE_FAIL 0x0020
++#define INTR_EN0__LOAD_COMP 0x0040
++#define INTR_EN0__PROGRAM_COMP 0x0080
++#define INTR_EN0__ERASE_COMP 0x0100
++#define INTR_EN0__PIPE_CPYBCK_CMD_COMP 0x0200
++#define INTR_EN0__LOCKED_BLK 0x0400
++#define INTR_EN0__UNSUP_CMD 0x0800
++#define INTR_EN0__INT_ACT 0x1000
++#define INTR_EN0__RST_COMP 0x2000
++#define INTR_EN0__PIPE_CMD_ERR 0x4000
++#define INTR_EN0__PAGE_XFER_INC 0x8000
++
++#define PAGE_CNT0 0x430
++#define PAGE_CNT0__VALUE 0x00ff
++
++#define ERR_PAGE_ADDR0 0x440
++#define ERR_PAGE_ADDR0__VALUE 0xffff
++
++#define ERR_BLOCK_ADDR0 0x450
++#define ERR_BLOCK_ADDR0__VALUE 0xffff
++
++#define INTR_STATUS1 0x460
++#define INTR_STATUS1__ECC_TRANSACTION_DONE 0x0001
++#define INTR_STATUS1__ECC_ERR 0x0002
++#define INTR_STATUS1__DMA_CMD_COMP 0x0004
++#define INTR_STATUS1__TIME_OUT 0x0008
++#define INTR_STATUS1__PROGRAM_FAIL 0x0010
++#define INTR_STATUS1__ERASE_FAIL 0x0020
++#define INTR_STATUS1__LOAD_COMP 0x0040
++#define INTR_STATUS1__PROGRAM_COMP 0x0080
++#define INTR_STATUS1__ERASE_COMP 0x0100
++#define INTR_STATUS1__PIPE_CPYBCK_CMD_COMP 0x0200
++#define INTR_STATUS1__LOCKED_BLK 0x0400
++#define INTR_STATUS1__UNSUP_CMD 0x0800
++#define INTR_STATUS1__INT_ACT 0x1000
++#define INTR_STATUS1__RST_COMP 0x2000
++#define INTR_STATUS1__PIPE_CMD_ERR 0x4000
++#define INTR_STATUS1__PAGE_XFER_INC 0x8000
++
++#define INTR_EN1 0x470
++#define INTR_EN1__ECC_TRANSACTION_DONE 0x0001
++#define INTR_EN1__ECC_ERR 0x0002
++#define INTR_EN1__DMA_CMD_COMP 0x0004
++#define INTR_EN1__TIME_OUT 0x0008
++#define INTR_EN1__PROGRAM_FAIL 0x0010
++#define INTR_EN1__ERASE_FAIL 0x0020
++#define INTR_EN1__LOAD_COMP 0x0040
++#define INTR_EN1__PROGRAM_COMP 0x0080
++#define INTR_EN1__ERASE_COMP 0x0100
++#define INTR_EN1__PIPE_CPYBCK_CMD_COMP 0x0200
++#define INTR_EN1__LOCKED_BLK 0x0400
++#define INTR_EN1__UNSUP_CMD 0x0800
++#define INTR_EN1__INT_ACT 0x1000
++#define INTR_EN1__RST_COMP 0x2000
++#define INTR_EN1__PIPE_CMD_ERR 0x4000
++#define INTR_EN1__PAGE_XFER_INC 0x8000
++
++#define PAGE_CNT1 0x480
++#define PAGE_CNT1__VALUE 0x00ff
++
++#define ERR_PAGE_ADDR1 0x490
++#define ERR_PAGE_ADDR1__VALUE 0xffff
++
++#define ERR_BLOCK_ADDR1 0x4a0
++#define ERR_BLOCK_ADDR1__VALUE 0xffff
++
++#define INTR_STATUS2 0x4b0
++#define INTR_STATUS2__ECC_TRANSACTION_DONE 0x0001
++#define INTR_STATUS2__ECC_ERR 0x0002
++#define INTR_STATUS2__DMA_CMD_COMP 0x0004
++#define INTR_STATUS2__TIME_OUT 0x0008
++#define INTR_STATUS2__PROGRAM_FAIL 0x0010
++#define INTR_STATUS2__ERASE_FAIL 0x0020
++#define INTR_STATUS2__LOAD_COMP 0x0040
++#define INTR_STATUS2__PROGRAM_COMP 0x0080
++#define INTR_STATUS2__ERASE_COMP 0x0100
++#define INTR_STATUS2__PIPE_CPYBCK_CMD_COMP 0x0200
++#define INTR_STATUS2__LOCKED_BLK 0x0400
++#define INTR_STATUS2__UNSUP_CMD 0x0800
++#define INTR_STATUS2__INT_ACT 0x1000
++#define INTR_STATUS2__RST_COMP 0x2000
++#define INTR_STATUS2__PIPE_CMD_ERR 0x4000
++#define INTR_STATUS2__PAGE_XFER_INC 0x8000
++
++#define INTR_EN2 0x4c0
++#define INTR_EN2__ECC_TRANSACTION_DONE 0x0001
++#define INTR_EN2__ECC_ERR 0x0002
++#define INTR_EN2__DMA_CMD_COMP 0x0004
++#define INTR_EN2__TIME_OUT 0x0008
++#define INTR_EN2__PROGRAM_FAIL 0x0010
++#define INTR_EN2__ERASE_FAIL 0x0020
++#define INTR_EN2__LOAD_COMP 0x0040
++#define INTR_EN2__PROGRAM_COMP 0x0080
++#define INTR_EN2__ERASE_COMP 0x0100
++#define INTR_EN2__PIPE_CPYBCK_CMD_COMP 0x0200
++#define INTR_EN2__LOCKED_BLK 0x0400
++#define INTR_EN2__UNSUP_CMD 0x0800
++#define INTR_EN2__INT_ACT 0x1000
++#define INTR_EN2__RST_COMP 0x2000
++#define INTR_EN2__PIPE_CMD_ERR 0x4000
++#define INTR_EN2__PAGE_XFER_INC 0x8000
++
++#define PAGE_CNT2 0x4d0
++#define PAGE_CNT2__VALUE 0x00ff
++
++#define ERR_PAGE_ADDR2 0x4e0
++#define ERR_PAGE_ADDR2__VALUE 0xffff
++
++#define ERR_BLOCK_ADDR2 0x4f0
++#define ERR_BLOCK_ADDR2__VALUE 0xffff
++
++#define INTR_STATUS3 0x500
++#define INTR_STATUS3__ECC_TRANSACTION_DONE 0x0001
++#define INTR_STATUS3__ECC_ERR 0x0002
++#define INTR_STATUS3__DMA_CMD_COMP 0x0004
++#define INTR_STATUS3__TIME_OUT 0x0008
++#define INTR_STATUS3__PROGRAM_FAIL 0x0010
++#define INTR_STATUS3__ERASE_FAIL 0x0020
++#define INTR_STATUS3__LOAD_COMP 0x0040
++#define INTR_STATUS3__PROGRAM_COMP 0x0080
++#define INTR_STATUS3__ERASE_COMP 0x0100
++#define INTR_STATUS3__PIPE_CPYBCK_CMD_COMP 0x0200
++#define INTR_STATUS3__LOCKED_BLK 0x0400
++#define INTR_STATUS3__UNSUP_CMD 0x0800
++#define INTR_STATUS3__INT_ACT 0x1000
++#define INTR_STATUS3__RST_COMP 0x2000
++#define INTR_STATUS3__PIPE_CMD_ERR 0x4000
++#define INTR_STATUS3__PAGE_XFER_INC 0x8000
++
++#define INTR_EN3 0x510
++#define INTR_EN3__ECC_TRANSACTION_DONE 0x0001
++#define INTR_EN3__ECC_ERR 0x0002
++#define INTR_EN3__DMA_CMD_COMP 0x0004
++#define INTR_EN3__TIME_OUT 0x0008
++#define INTR_EN3__PROGRAM_FAIL 0x0010
++#define INTR_EN3__ERASE_FAIL 0x0020
++#define INTR_EN3__LOAD_COMP 0x0040
++#define INTR_EN3__PROGRAM_COMP 0x0080
++#define INTR_EN3__ERASE_COMP 0x0100
++#define INTR_EN3__PIPE_CPYBCK_CMD_COMP 0x0200
++#define INTR_EN3__LOCKED_BLK 0x0400
++#define INTR_EN3__UNSUP_CMD 0x0800
++#define INTR_EN3__INT_ACT 0x1000
++#define INTR_EN3__RST_COMP 0x2000
++#define INTR_EN3__PIPE_CMD_ERR 0x4000
++#define INTR_EN3__PAGE_XFER_INC 0x8000
++
++#define PAGE_CNT3 0x520
++#define PAGE_CNT3__VALUE 0x00ff
++
++#define ERR_PAGE_ADDR3 0x530
++#define ERR_PAGE_ADDR3__VALUE 0xffff
++
++#define ERR_BLOCK_ADDR3 0x540
++#define ERR_BLOCK_ADDR3__VALUE 0xffff
++
++#define DATA_INTR 0x550
++#define DATA_INTR__WRITE_SPACE_AV 0x0001
++#define DATA_INTR__READ_DATA_AV 0x0002
++
++#define DATA_INTR_EN 0x560
++#define DATA_INTR_EN__WRITE_SPACE_AV 0x0001
++#define DATA_INTR_EN__READ_DATA_AV 0x0002
++
++#define GPREG_0 0x570
++#define GPREG_0__VALUE 0xffff
++
++#define GPREG_1 0x580
++#define GPREG_1__VALUE 0xffff
++
++#define GPREG_2 0x590
++#define GPREG_2__VALUE 0xffff
++
++#define GPREG_3 0x5a0
++#define GPREG_3__VALUE 0xffff
++
++#define ECC_THRESHOLD 0x600
++#define ECC_THRESHOLD__VALUE 0x03ff
++
++#define ECC_ERROR_BLOCK_ADDRESS 0x610
++#define ECC_ERROR_BLOCK_ADDRESS__VALUE 0xffff
++
++#define ECC_ERROR_PAGE_ADDRESS 0x620
++#define ECC_ERROR_PAGE_ADDRESS__VALUE 0x0fff
++#define ECC_ERROR_PAGE_ADDRESS__BANK 0xf000
++
++#define ECC_ERROR_ADDRESS 0x630
++#define ECC_ERROR_ADDRESS__OFFSET 0x0fff
++#define ECC_ERROR_ADDRESS__SECTOR_NR 0xf000
++
++#define ERR_CORRECTION_INFO 0x640
++#define ERR_CORRECTION_INFO__BYTEMASK 0x00ff
++#define ERR_CORRECTION_INFO__DEVICE_NR 0x0f00
++#define ERR_CORRECTION_INFO__ERROR_TYPE 0x4000
++#define ERR_CORRECTION_INFO__LAST_ERR_INFO 0x8000
++
++#define DMA_ENABLE 0x700
++#define DMA_ENABLE__FLAG 0x0001
++
++#define IGNORE_ECC_DONE 0x710
++#define IGNORE_ECC_DONE__FLAG 0x0001
++
++#define DMA_INTR 0x720
++#define DMA_INTR__TARGET_ERROR 0x0001
++#define DMA_INTR__DESC_COMP_CHANNEL0 0x0002
++#define DMA_INTR__DESC_COMP_CHANNEL1 0x0004
++#define DMA_INTR__DESC_COMP_CHANNEL2 0x0008
++#define DMA_INTR__DESC_COMP_CHANNEL3 0x0010
++#define DMA_INTR__MEMCOPY_DESC_COMP 0x0020
++
++#define DMA_INTR_EN 0x730
++#define DMA_INTR_EN__TARGET_ERROR 0x0001
++#define DMA_INTR_EN__DESC_COMP_CHANNEL0 0x0002
++#define DMA_INTR_EN__DESC_COMP_CHANNEL1 0x0004
++#define DMA_INTR_EN__DESC_COMP_CHANNEL2 0x0008
++#define DMA_INTR_EN__DESC_COMP_CHANNEL3 0x0010
++#define DMA_INTR_EN__MEMCOPY_DESC_COMP 0x0020
++
++#define TARGET_ERR_ADDR_LO 0x740
++#define TARGET_ERR_ADDR_LO__VALUE 0xffff
++
++#define TARGET_ERR_ADDR_HI 0x750
++#define TARGET_ERR_ADDR_HI__VALUE 0xffff
++
++#define CHNL_ACTIVE 0x760
++#define CHNL_ACTIVE__CHANNEL0 0x0001
++#define CHNL_ACTIVE__CHANNEL1 0x0002
++#define CHNL_ACTIVE__CHANNEL2 0x0004
++#define CHNL_ACTIVE__CHANNEL3 0x0008
++
++#define ACTIVE_SRC_ID 0x800
++#define ACTIVE_SRC_ID__VALUE 0x00ff
++
++#define PTN_INTR 0x810
++#define PTN_INTR__CONFIG_ERROR 0x0001
++#define PTN_INTR__ACCESS_ERROR_BANK0 0x0002
++#define PTN_INTR__ACCESS_ERROR_BANK1 0x0004
++#define PTN_INTR__ACCESS_ERROR_BANK2 0x0008
++#define PTN_INTR__ACCESS_ERROR_BANK3 0x0010
++#define PTN_INTR__REG_ACCESS_ERROR 0x0020
++
++#define PTN_INTR_EN 0x820
++#define PTN_INTR_EN__CONFIG_ERROR 0x0001
++#define PTN_INTR_EN__ACCESS_ERROR_BANK0 0x0002
++#define PTN_INTR_EN__ACCESS_ERROR_BANK1 0x0004
++#define PTN_INTR_EN__ACCESS_ERROR_BANK2 0x0008
++#define PTN_INTR_EN__ACCESS_ERROR_BANK3 0x0010
++#define PTN_INTR_EN__REG_ACCESS_ERROR 0x0020
++
++#define PERM_SRC_ID_0 0x830
++#define PERM_SRC_ID_0__SRCID 0x00ff
++#define PERM_SRC_ID_0__DIRECT_ACCESS_ACTIVE 0x0800
++#define PERM_SRC_ID_0__WRITE_ACTIVE 0x2000
++#define PERM_SRC_ID_0__READ_ACTIVE 0x4000
++#define PERM_SRC_ID_0__PARTITION_VALID 0x8000
++
++#define MIN_BLK_ADDR_0 0x840
++#define MIN_BLK_ADDR_0__VALUE 0xffff
++
++#define MAX_BLK_ADDR_0 0x850
++#define MAX_BLK_ADDR_0__VALUE 0xffff
++
++#define MIN_MAX_BANK_0 0x860
++#define MIN_MAX_BANK_0__MIN_VALUE 0x0003
++#define MIN_MAX_BANK_0__MAX_VALUE 0x000c
++
++#define PERM_SRC_ID_1 0x870
++#define PERM_SRC_ID_1__SRCID 0x00ff
++#define PERM_SRC_ID_1__DIRECT_ACCESS_ACTIVE 0x0800
++#define PERM_SRC_ID_1__WRITE_ACTIVE 0x2000
++#define PERM_SRC_ID_1__READ_ACTIVE 0x4000
++#define PERM_SRC_ID_1__PARTITION_VALID 0x8000
++
++#define MIN_BLK_ADDR_1 0x880
++#define MIN_BLK_ADDR_1__VALUE 0xffff
++
++#define MAX_BLK_ADDR_1 0x890
++#define MAX_BLK_ADDR_1__VALUE 0xffff
++
++#define MIN_MAX_BANK_1 0x8a0
++#define MIN_MAX_BANK_1__MIN_VALUE 0x0003
++#define MIN_MAX_BANK_1__MAX_VALUE 0x000c
++
++#define PERM_SRC_ID_2 0x8b0
++#define PERM_SRC_ID_2__SRCID 0x00ff
++#define PERM_SRC_ID_2__DIRECT_ACCESS_ACTIVE 0x0800
++#define PERM_SRC_ID_2__WRITE_ACTIVE 0x2000
++#define PERM_SRC_ID_2__READ_ACTIVE 0x4000
++#define PERM_SRC_ID_2__PARTITION_VALID 0x8000
++
++#define MIN_BLK_ADDR_2 0x8c0
++#define MIN_BLK_ADDR_2__VALUE 0xffff
++
++#define MAX_BLK_ADDR_2 0x8d0
++#define MAX_BLK_ADDR_2__VALUE 0xffff
++
++#define MIN_MAX_BANK_2 0x8e0
++#define MIN_MAX_BANK_2__MIN_VALUE 0x0003
++#define MIN_MAX_BANK_2__MAX_VALUE 0x000c
++
++#define PERM_SRC_ID_3 0x8f0
++#define PERM_SRC_ID_3__SRCID 0x00ff
++#define PERM_SRC_ID_3__DIRECT_ACCESS_ACTIVE 0x0800
++#define PERM_SRC_ID_3__WRITE_ACTIVE 0x2000
++#define PERM_SRC_ID_3__READ_ACTIVE 0x4000
++#define PERM_SRC_ID_3__PARTITION_VALID 0x8000
++
++#define MIN_BLK_ADDR_3 0x900
++#define MIN_BLK_ADDR_3__VALUE 0xffff
++
++#define MAX_BLK_ADDR_3 0x910
++#define MAX_BLK_ADDR_3__VALUE 0xffff
++
++#define MIN_MAX_BANK_3 0x920
++#define MIN_MAX_BANK_3__MIN_VALUE 0x0003
++#define MIN_MAX_BANK_3__MAX_VALUE 0x000c
++
++#define PERM_SRC_ID_4 0x930
++#define PERM_SRC_ID_4__SRCID 0x00ff
++#define PERM_SRC_ID_4__DIRECT_ACCESS_ACTIVE 0x0800
++#define PERM_SRC_ID_4__WRITE_ACTIVE 0x2000
++#define PERM_SRC_ID_4__READ_ACTIVE 0x4000
++#define PERM_SRC_ID_4__PARTITION_VALID 0x8000
++
++#define MIN_BLK_ADDR_4 0x940
++#define MIN_BLK_ADDR_4__VALUE 0xffff
++
++#define MAX_BLK_ADDR_4 0x950
++#define MAX_BLK_ADDR_4__VALUE 0xffff
++
++#define MIN_MAX_BANK_4 0x960
++#define MIN_MAX_BANK_4__MIN_VALUE 0x0003
++#define MIN_MAX_BANK_4__MAX_VALUE 0x000c
++
++#define PERM_SRC_ID_5 0x970
++#define PERM_SRC_ID_5__SRCID 0x00ff
++#define PERM_SRC_ID_5__DIRECT_ACCESS_ACTIVE 0x0800
++#define PERM_SRC_ID_5__WRITE_ACTIVE 0x2000
++#define PERM_SRC_ID_5__READ_ACTIVE 0x4000
++#define PERM_SRC_ID_5__PARTITION_VALID 0x8000
++
++#define MIN_BLK_ADDR_5 0x980
++#define MIN_BLK_ADDR_5__VALUE 0xffff
++
++#define MAX_BLK_ADDR_5 0x990
++#define MAX_BLK_ADDR_5__VALUE 0xffff
++
++#define MIN_MAX_BANK_5 0x9a0
++#define MIN_MAX_BANK_5__MIN_VALUE 0x0003
++#define MIN_MAX_BANK_5__MAX_VALUE 0x000c
++
++#define PERM_SRC_ID_6 0x9b0
++#define PERM_SRC_ID_6__SRCID 0x00ff
++#define PERM_SRC_ID_6__DIRECT_ACCESS_ACTIVE 0x0800
++#define PERM_SRC_ID_6__WRITE_ACTIVE 0x2000
++#define PERM_SRC_ID_6__READ_ACTIVE 0x4000
++#define PERM_SRC_ID_6__PARTITION_VALID 0x8000
++
++#define MIN_BLK_ADDR_6 0x9c0
++#define MIN_BLK_ADDR_6__VALUE 0xffff
++
++#define MAX_BLK_ADDR_6 0x9d0
++#define MAX_BLK_ADDR_6__VALUE 0xffff
++
++#define MIN_MAX_BANK_6 0x9e0
++#define MIN_MAX_BANK_6__MIN_VALUE 0x0003
++#define MIN_MAX_BANK_6__MAX_VALUE 0x000c
++
++#define PERM_SRC_ID_7 0x9f0
++#define PERM_SRC_ID_7__SRCID 0x00ff
++#define PERM_SRC_ID_7__DIRECT_ACCESS_ACTIVE 0x0800
++#define PERM_SRC_ID_7__WRITE_ACTIVE 0x2000
++#define PERM_SRC_ID_7__READ_ACTIVE 0x4000
++#define PERM_SRC_ID_7__PARTITION_VALID 0x8000
++
++#define MIN_BLK_ADDR_7 0xa00
++#define MIN_BLK_ADDR_7__VALUE 0xffff
++
++#define MAX_BLK_ADDR_7 0xa10
++#define MAX_BLK_ADDR_7__VALUE 0xffff
++
++#define MIN_MAX_BANK_7 0xa20
++#define MIN_MAX_BANK_7__MIN_VALUE 0x0003
++#define MIN_MAX_BANK_7__MAX_VALUE 0x000c
+--- /dev/null
++++ b/drivers/staging/spectra/spectraswconfig.h
+@@ -0,0 +1,82 @@
++/*
++ * NAND Flash Controller Device Driver
++ * Copyright (c) 2009, Intel Corporation and its suppliers.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++#ifndef _SPECTRASWCONFIG_
++#define _SPECTRASWCONFIG_
++
++/* NAND driver version */
++#define GLOB_VERSION "driver version 20100311"
++
++
++/***** Common Parameters *****/
++#define RETRY_TIMES 3
++
++#define READ_BADBLOCK_INFO 1
++#define READBACK_VERIFY 0
++#define AUTO_FORMAT_FLASH 0
++
++/***** Cache Parameters *****/
++#define CACHE_ITEM_NUM 128
++#define BLK_NUM_FOR_L2_CACHE 16
++
++/***** Block Table Parameters *****/
++#define BLOCK_TABLE_INDEX 0
++
++/***** Wear Leveling Parameters *****/
++#define WEAR_LEVELING_GATE 0x10
++#define WEAR_LEVELING_BLOCK_NUM 10
++
++#define DEBUG_BNDRY 0
++
++/***** Product Feature Support *****/
++#define FLASH_EMU defined(CONFIG_SPECTRA_EMU)
++#define FLASH_NAND defined(CONFIG_SPECTRA_MRST_HW)
++#define FLASH_MTD defined(CONFIG_SPECTRA_MTD)
++#define CMD_DMA defined(CONFIG_SPECTRA_MRST_HW_DMA)
++
++#define SPECTRA_PARTITION_ID 0
++
++/* Enable this macro if the number of flash blocks is larger than 16K. */
++#define SUPPORT_LARGE_BLOCKNUM 1
++
++/**** Block Table and Reserved Block Parameters *****/
++#define SPECTRA_START_BLOCK 3
++//#define NUM_FREE_BLOCKS_GATE 30
++#define NUM_FREE_BLOCKS_GATE 60
++
++/**** Hardware Parameters ****/
++#define GLOB_HWCTL_REG_BASE 0xFFA40000
++#define GLOB_HWCTL_REG_SIZE 4096
++
++#define GLOB_HWCTL_MEM_BASE 0xFFA48000
++#define GLOB_HWCTL_MEM_SIZE 4096
++
++/* KBV - Updated to LNW scratch register address */
++#define SCRATCH_REG_ADDR 0xFF108018
++#define SCRATCH_REG_SIZE 64
++
++#define GLOB_HWCTL_DEFAULT_BLKS 2048
++
++#define SUPPORT_15BITECC 1
++#define SUPPORT_8BITECC 1
++
++#define ONFI_BLOOM_TIME 0
++#define MODE5_WORKAROUND 1
++
++#endif /*_SPECTRASWCONFIG_*/
+--- a/drivers/usb/core/buffer.c
++++ b/drivers/usb/core/buffer.c
+@@ -115,6 +115,11 @@
+ return kmalloc(size, mem_flags);
+ }
+
++ /* we won't use internal SRAM as data payload, we can't get
++ any benefits from it */
++ if (hcd->has_sram && hcd->sram_no_payload)
++ return dma_alloc_coherent(NULL, size, dma, mem_flags);
++
+ for (i = 0; i < HCD_BUFFER_POOLS; i++) {
+ if (size <= pool_max [i])
+ return dma_pool_alloc(hcd->pool [i], mem_flags, dma);
+@@ -141,6 +146,11 @@
+ return;
+ }
+
++ if (hcd->has_sram && hcd->sram_no_payload) {
++ dma_free_coherent(NULL, size, addr, dma);
++ return;
++ }
++
+ for (i = 0; i < HCD_BUFFER_POOLS; i++) {
+ if (size <= pool_max [i]) {
+ dma_pool_free(hcd->pool [i], addr, dma);
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -1545,6 +1545,24 @@
+ hcd->driver->free_dev(hcd, udev);
+ }
+
++#ifdef CONFIG_USB_OTG
++
++static void otg_notify(struct usb_device *udev, unsigned action)
++{
++ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
++
++ if (hcd->otg_notify)
++ hcd->otg_notify(udev, action);
++}
++
++#else
++
++static inline void otg_notify(struct usb_device *udev, unsigned action)
++{
++}
++
++#endif
++
+ /**
+ * usb_disconnect - disconnect a device (usbcore-internal)
+ * @pdev: pointer to device being disconnected
+@@ -1602,7 +1620,7 @@
+ * notifier chain (used by usbfs and possibly others).
+ */
+ device_del(&udev->dev);
+-
++ otg_notify(udev, USB_DEVICE_REMOVE);
+ /* Free the device number and delete the parent's children[]
+ * (or root_hub) pointer.
+ */
+@@ -1821,6 +1839,7 @@
+ * notifier chain (used by usbfs and possibly others).
+ */
+ err = device_add(&udev->dev);
++ otg_notify(udev, USB_DEVICE_ADD);
+ if (err) {
+ dev_err(&udev->dev, "can't device_add, error %d\n", err);
+ goto fail;
+@@ -2880,7 +2899,9 @@
+ }
+
+ retval = 0;
+-
++ /* notify HCD that we have a device connected and addressed */
++ if (hcd->driver->update_device)
++ hcd->driver->update_device(hcd, udev);
+ fail:
+ if (retval) {
+ hub_port_disable(hub, port1, 0);
+--- a/drivers/usb/core/usb.h
++++ b/drivers/usb/core/usb.h
+@@ -147,4 +147,3 @@
+ extern void usb_notify_remove_device(struct usb_device *udev);
+ extern void usb_notify_add_bus(struct usb_bus *ubus);
+ extern void usb_notify_remove_bus(struct usb_bus *ubus);
+-
+--- a/drivers/usb/gadget/Kconfig
++++ b/drivers/usb/gadget/Kconfig
+@@ -485,11 +485,11 @@
+ select USB_GADGET_SELECTED
+
+ config USB_GADGET_LANGWELL
+- boolean "Intel Langwell USB Device Controller"
++ boolean "Intel Langwell/Penwell USB Device Controller"
+ depends on PCI
+ select USB_GADGET_DUALSPEED
+ help
+- Intel Langwell USB Device Controller is a High-Speed USB
++ Intel Langwell/Penwell USB Device Controller is a High-Speed USB
+ On-The-Go device controller.
+
+ The number of programmable endpoints is different through
+--- a/drivers/usb/gadget/f_acm.c
++++ b/drivers/usb/gadget/f_acm.c
+@@ -578,6 +578,8 @@
+ struct f_acm *acm = func_to_acm(f);
+ int status;
+ struct usb_ep *ep;
++ struct usb_descriptor_header **fs_function;
++ struct usb_descriptor_header **hs_function;
+
+ /* allocate instance-specific interface IDs, and patch descriptors */
+ status = usb_interface_id(c, f);
+@@ -629,16 +631,23 @@
+ acm->notify_req->complete = acm_cdc_notify_complete;
+ acm->notify_req->context = acm;
+
++ if (c->bConfigurationValue == 4) {
++ /* Descriptors with association descriptor */
++ fs_function = acm_fs_function;
++ } else {
++ /* Descriptors without association descriptor */
++ fs_function = &acm_fs_function[1];
++ }
+ /* copy descriptors, and track endpoint copies */
+- f->descriptors = usb_copy_descriptors(acm_fs_function);
++ f->descriptors = usb_copy_descriptors(fs_function);
+ if (!f->descriptors)
+ goto fail;
+
+- acm->fs.in = usb_find_endpoint(acm_fs_function,
++ acm->fs.in = usb_find_endpoint(fs_function,
+ f->descriptors, &acm_fs_in_desc);
+- acm->fs.out = usb_find_endpoint(acm_fs_function,
++ acm->fs.out = usb_find_endpoint(fs_function,
+ f->descriptors, &acm_fs_out_desc);
+- acm->fs.notify = usb_find_endpoint(acm_fs_function,
++ acm->fs.notify = usb_find_endpoint(fs_function,
+ f->descriptors, &acm_fs_notify_desc);
+
+ /* support all relevant hardware speeds... we expect that when
+@@ -654,13 +663,20 @@
+ acm_fs_notify_desc.bEndpointAddress;
+
+ /* copy descriptors, and track endpoint copies */
+- f->hs_descriptors = usb_copy_descriptors(acm_hs_function);
++ if (c->bConfigurationValue == 4) {
++ /* Descriptors with association descriptor */
++ hs_function = acm_hs_function;
++ } else {
++ /* Descriptors without association descriptor */
++ hs_function = &acm_hs_function[1];
++ }
++ f->hs_descriptors = usb_copy_descriptors(hs_function);
+
+- acm->hs.in = usb_find_endpoint(acm_hs_function,
++ acm->hs.in = usb_find_endpoint(hs_function,
+ f->hs_descriptors, &acm_hs_in_desc);
+- acm->hs.out = usb_find_endpoint(acm_hs_function,
++ acm->hs.out = usb_find_endpoint(hs_function,
+ f->hs_descriptors, &acm_hs_out_desc);
+- acm->hs.notify = usb_find_endpoint(acm_hs_function,
++ acm->hs.notify = usb_find_endpoint(hs_function,
+ f->hs_descriptors, &acm_hs_notify_desc);
+ }
+
+--- a/drivers/usb/gadget/langwell_udc.c
++++ b/drivers/usb/gadget/langwell_udc.c
+@@ -1,6 +1,6 @@
+ /*
+- * Intel Langwell USB Device Controller driver
+- * Copyright (C) 2008-2009, Intel Corporation.
++ * Intel Langwell/Penwell USB Device Controller driver
++ * Copyright (C) 2008-2010, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+@@ -19,9 +19,9 @@
+
+
+ /* #undef DEBUG */
+-/* #undef VERBOSE */
++/* #undef VERBOSE_DEBUG */
+
+-#if defined(CONFIG_USB_LANGWELL_OTG)
++#if defined(CONFIG_USB_LANGWELL_OTG) || defined(CONFIG_USB_PENWELL_OTG)
+ #define OTG_TRANSCEIVER
+ #endif
+
+@@ -53,8 +53,8 @@
+ #include "langwell_udc.h"
+
+
+-#define DRIVER_DESC "Intel Langwell USB Device Controller driver"
+-#define DRIVER_VERSION "16 May 2009"
++#define DRIVER_DESC "Intel Langwell/Penwell USB Device Controller driver"
++#define DRIVER_VERSION "June 3, 2010"
+
+ static const char driver_name[] = "langwell_udc";
+ static const char driver_desc[] = DRIVER_DESC;
+@@ -77,141 +77,110 @@
+ /*-------------------------------------------------------------------------*/
+ /* debugging */
+
+-#ifdef DEBUG
+-#define DBG(dev, fmt, args...) \
+- pr_debug("%s %s: " fmt , driver_name, \
+- pci_name(dev->pdev), ## args)
+-#else
+-#define DBG(dev, fmt, args...) \
+- do { } while (0)
+-#endif /* DEBUG */
+-
+-
+-#ifdef VERBOSE
+-#define VDBG DBG
+-#else
+-#define VDBG(dev, fmt, args...) \
+- do { } while (0)
+-#endif /* VERBOSE */
+-
+-
+-#define ERROR(dev, fmt, args...) \
+- pr_err("%s %s: " fmt , driver_name, \
+- pci_name(dev->pdev), ## args)
+-
+-#define WARNING(dev, fmt, args...) \
+- pr_warning("%s %s: " fmt , driver_name, \
+- pci_name(dev->pdev), ## args)
+-
+-#define INFO(dev, fmt, args...) \
+- pr_info("%s %s: " fmt , driver_name, \
+- pci_name(dev->pdev), ## args)
+-
+-
+-#ifdef VERBOSE
++#ifdef VERBOSE_DEBUG
+ static inline void print_all_registers(struct langwell_udc *dev)
+ {
+ int i;
+
+ /* Capability Registers */
+- printk(KERN_DEBUG "Capability Registers (offset: "
+- "0x%04x, length: 0x%08x)\n",
+- CAP_REG_OFFSET,
+- (u32)sizeof(struct langwell_cap_regs));
+- printk(KERN_DEBUG "caplength=0x%02x\n",
++ dev_dbg(&dev->pdev->dev,
++ "Capability Registers (offset: 0x%04x, length: 0x%08x)\n",
++ CAP_REG_OFFSET, (u32)sizeof(struct langwell_cap_regs));
++ dev_dbg(&dev->pdev->dev, "caplength=0x%02x\n",
+ readb(&dev->cap_regs->caplength));
+- printk(KERN_DEBUG "hciversion=0x%04x\n",
++ dev_dbg(&dev->pdev->dev, "hciversion=0x%04x\n",
+ readw(&dev->cap_regs->hciversion));
+- printk(KERN_DEBUG "hcsparams=0x%08x\n",
++ dev_dbg(&dev->pdev->dev, "hcsparams=0x%08x\n",
+ readl(&dev->cap_regs->hcsparams));
+- printk(KERN_DEBUG "hccparams=0x%08x\n",
++ dev_dbg(&dev->pdev->dev, "hccparams=0x%08x\n",
+ readl(&dev->cap_regs->hccparams));
+- printk(KERN_DEBUG "dciversion=0x%04x\n",
++ dev_dbg(&dev->pdev->dev, "dciversion=0x%04x\n",
+ readw(&dev->cap_regs->dciversion));
+- printk(KERN_DEBUG "dccparams=0x%08x\n",
++ dev_dbg(&dev->pdev->dev, "dccparams=0x%08x\n",
+ readl(&dev->cap_regs->dccparams));
+
+ /* Operational Registers */
+- printk(KERN_DEBUG "Operational Registers (offset: "
+- "0x%04x, length: 0x%08x)\n",
+- OP_REG_OFFSET,
+- (u32)sizeof(struct langwell_op_regs));
+- printk(KERN_DEBUG "extsts=0x%08x\n",
++ dev_dbg(&dev->pdev->dev,
++ "Operational Registers (offset: 0x%04x, length: 0x%08x)\n",
++ OP_REG_OFFSET, (u32)sizeof(struct langwell_op_regs));
++ dev_dbg(&dev->pdev->dev, "extsts=0x%08x\n",
+ readl(&dev->op_regs->extsts));
+- printk(KERN_DEBUG "extintr=0x%08x\n",
++ dev_dbg(&dev->pdev->dev, "extintr=0x%08x\n",
+ readl(&dev->op_regs->extintr));
+- printk(KERN_DEBUG "usbcmd=0x%08x\n",
++ dev_dbg(&dev->pdev->dev, "usbcmd=0x%08x\n",
+ readl(&dev->op_regs->usbcmd));
+- printk(KERN_DEBUG "usbsts=0x%08x\n",
++ dev_dbg(&dev->pdev->dev, "usbsts=0x%08x\n",
+ readl(&dev->op_regs->usbsts));
+- printk(KERN_DEBUG "usbintr=0x%08x\n",
++ dev_dbg(&dev->pdev->dev, "usbintr=0x%08x\n",
+ readl(&dev->op_regs->usbintr));
+- printk(KERN_DEBUG "frindex=0x%08x\n",
++ dev_dbg(&dev->pdev->dev, "frindex=0x%08x\n",
+ readl(&dev->op_regs->frindex));
+- printk(KERN_DEBUG "ctrldssegment=0x%08x\n",
++ dev_dbg(&dev->pdev->dev, "ctrldssegment=0x%08x\n",
+ readl(&dev->op_regs->ctrldssegment));
+- printk(KERN_DEBUG "deviceaddr=0x%08x\n",
++ dev_dbg(&dev->pdev->dev, "deviceaddr=0x%08x\n",
+ readl(&dev->op_regs->deviceaddr));
+- printk(KERN_DEBUG "endpointlistaddr=0x%08x\n",
++ dev_dbg(&dev->pdev->dev, "endpointlistaddr=0x%08x\n",
+ readl(&dev->op_regs->endpointlistaddr));
+- printk(KERN_DEBUG "ttctrl=0x%08x\n",
++ dev_dbg(&dev->pdev->dev, "ttctrl=0x%08x\n",
+ readl(&dev->op_regs->ttctrl));
+- printk(KERN_DEBUG "burstsize=0x%08x\n",
++ dev_dbg(&dev->pdev->dev, "burstsize=0x%08x\n",
+ readl(&dev->op_regs->burstsize));
+- printk(KERN_DEBUG "txfilltuning=0x%08x\n",
++ dev_dbg(&dev->pdev->dev, "txfilltuning=0x%08x\n",
+ readl(&dev->op_regs->txfilltuning));
+- printk(KERN_DEBUG "txttfilltuning=0x%08x\n",
++ dev_dbg(&dev->pdev->dev, "txttfilltuning=0x%08x\n",
+ readl(&dev->op_regs->txttfilltuning));
+- printk(KERN_DEBUG "ic_usb=0x%08x\n",
++ dev_dbg(&dev->pdev->dev, "ic_usb=0x%08x\n",
+ readl(&dev->op_regs->ic_usb));
+- printk(KERN_DEBUG "ulpi_viewport=0x%08x\n",
++ dev_dbg(&dev->pdev->dev, "ulpi_viewport=0x%08x\n",
+ readl(&dev->op_regs->ulpi_viewport));
+- printk(KERN_DEBUG "configflag=0x%08x\n",
++ dev_dbg(&dev->pdev->dev, "configflag=0x%08x\n",
+ readl(&dev->op_regs->configflag));
+- printk(KERN_DEBUG "portsc1=0x%08x\n",
++ dev_dbg(&dev->pdev->dev, "portsc1=0x%08x\n",
+ readl(&dev->op_regs->portsc1));
+- printk(KERN_DEBUG "devlc=0x%08x\n",
++ dev_dbg(&dev->pdev->dev, "devlc=0x%08x\n",
+ readl(&dev->op_regs->devlc));
+- printk(KERN_DEBUG "otgsc=0x%08x\n",
++ dev_dbg(&dev->pdev->dev, "otgsc=0x%08x\n",
+ readl(&dev->op_regs->otgsc));
+- printk(KERN_DEBUG "usbmode=0x%08x\n",
++ dev_dbg(&dev->pdev->dev, "usbmode=0x%08x\n",
+ readl(&dev->op_regs->usbmode));
+- printk(KERN_DEBUG "endptnak=0x%08x\n",
++ dev_dbg(&dev->pdev->dev, "endptnak=0x%08x\n",
+ readl(&dev->op_regs->endptnak));
+- printk(KERN_DEBUG "endptnaken=0x%08x\n",
++ dev_dbg(&dev->pdev->dev, "endptnaken=0x%08x\n",
+ readl(&dev->op_regs->endptnaken));
+- printk(KERN_DEBUG "endptsetupstat=0x%08x\n",
++ dev_dbg(&dev->pdev->dev, "endptsetupstat=0x%08x\n",
+ readl(&dev->op_regs->endptsetupstat));
+- printk(KERN_DEBUG "endptprime=0x%08x\n",
++ dev_dbg(&dev->pdev->dev, "endptprime=0x%08x\n",
+ readl(&dev->op_regs->endptprime));
+- printk(KERN_DEBUG "endptflush=0x%08x\n",
++ dev_dbg(&dev->pdev->dev, "endptflush=0x%08x\n",
+ readl(&dev->op_regs->endptflush));
+- printk(KERN_DEBUG "endptstat=0x%08x\n",
++ dev_dbg(&dev->pdev->dev, "endptstat=0x%08x\n",
+ readl(&dev->op_regs->endptstat));
+- printk(KERN_DEBUG "endptcomplete=0x%08x\n",
++ dev_dbg(&dev->pdev->dev, "endptcomplete=0x%08x\n",
+ readl(&dev->op_regs->endptcomplete));
+
+ for (i = 0; i < dev->ep_max / 2; i++) {
+- printk(KERN_DEBUG "endptctrl[%d]=0x%08x\n",
++ dev_dbg(&dev->pdev->dev, "endptctrl[%d]=0x%08x\n",
+ i, readl(&dev->op_regs->endptctrl[i]));
+ }
+ }
+-#endif /* VERBOSE */
++#else
++
++#define print_all_registers(dev) do { } while (0)
++
++#endif /* VERBOSE_DEBUG */
+
+
+ /*-------------------------------------------------------------------------*/
+
+-#define DIR_STRING(bAddress) (((bAddress) & USB_DIR_IN) ? "in" : "out")
++#define is_in(ep) (((ep)->ep_num == 0) ? ((ep)->dev->ep0_dir == \
++ USB_DIR_IN) : (usb_endpoint_dir_in((ep)->desc)))
+
+-#define is_in(ep) (((ep)->ep_num == 0) ? ((ep)->dev->ep0_dir == \
+- USB_DIR_IN) : ((ep)->desc->bEndpointAddress \
+- & USB_DIR_IN) == USB_DIR_IN)
++#define DIR_STRING(ep) (is_in(ep) ? "in" : "out")
+
+
+-#ifdef DEBUG
+-static char *type_string(u8 bmAttributes)
++static char *type_string(const struct usb_endpoint_descriptor *desc)
+ {
+- switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) {
++ switch (usb_endpoint_type(desc)) {
+ case USB_ENDPOINT_XFER_BULK:
+ return "bulk";
+ case USB_ENDPOINT_XFER_ISOC:
+@@ -222,7 +191,6 @@
+
+ return "control";
+ }
+-#endif
+
+
+ /* configure endpoint control registers */
+@@ -233,7 +201,7 @@
+ u32 endptctrl;
+
+ dev = ep->dev;
+- VDBG(dev, "---> %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+ endptctrl = readl(&dev->op_regs->endptctrl[ep_num]);
+ if (is_in) { /* TX */
+@@ -250,7 +218,7 @@
+
+ writel(endptctrl, &dev->op_regs->endptctrl[ep_num]);
+
+- VDBG(dev, "<--- %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+ }
+
+
+@@ -260,7 +228,7 @@
+ struct langwell_ep *ep;
+ int i;
+
+- VDBG(dev, "---> %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+ /* ep0 in and out */
+ for (i = 0; i < 2; i++) {
+@@ -274,16 +242,18 @@
+ ep->dqh->dqh_ios = 1;
+ ep->dqh->dqh_mpl = EP0_MAX_PKT_SIZE;
+
+- /* FIXME: enable ep0-in HW zero length termination select */
++ /* enable ep0-in HW zero length termination select */
+ if (is_in(ep))
+ ep->dqh->dqh_zlt = 0;
+ ep->dqh->dqh_mult = 0;
+
++ ep->dqh->dtd_next = DTD_TERM;
++
+ /* configure ep0 control registers */
+ ep_reset(&dev->ep[0], 0, i, USB_ENDPOINT_XFER_CONTROL);
+ }
+
+- VDBG(dev, "<--- %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+ return;
+ }
+
+@@ -300,12 +270,12 @@
+ struct langwell_ep *ep;
+ u16 max = 0;
+ unsigned long flags;
+- int retval = 0;
++ int i, retval = 0;
+ unsigned char zlt, ios = 0, mult = 0;
+
+ ep = container_of(_ep, struct langwell_ep, ep);
+ dev = ep->dev;
+- VDBG(dev, "---> %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+ if (!_ep || !desc || ep->desc
+ || desc->bDescriptorType != USB_DT_ENDPOINT)
+@@ -326,7 +296,7 @@
+ * sanity check type, direction, address, and then
+ * initialize the endpoint capabilities fields in dQH
+ */
+- switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
++ switch (usb_endpoint_type(desc)) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ ios = 1;
+ break;
+@@ -386,33 +356,36 @@
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+- /* configure endpoint capabilities in dQH */
+- ep->dqh->dqh_ios = ios;
+- ep->dqh->dqh_mpl = cpu_to_le16(max);
+- ep->dqh->dqh_zlt = zlt;
+- ep->dqh->dqh_mult = mult;
+-
+ ep->ep.maxpacket = max;
+ ep->desc = desc;
+ ep->stopped = 0;
+- ep->ep_num = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
++ ep->ep_num = usb_endpoint_num(desc);
+
+ /* ep_type */
+- ep->ep_type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
++ ep->ep_type = usb_endpoint_type(desc);
+
+ /* configure endpoint control registers */
+ ep_reset(ep, ep->ep_num, is_in(ep), ep->ep_type);
+
+- DBG(dev, "enabled %s (ep%d%s-%s), max %04x\n",
++ /* configure endpoint capabilities in dQH */
++ i = ep->ep_num * 2 + is_in(ep);
++ ep->dqh = &dev->ep_dqh[i];
++ ep->dqh->dqh_ios = ios;
++ ep->dqh->dqh_mpl = cpu_to_le16(max);
++ ep->dqh->dqh_zlt = zlt;
++ ep->dqh->dqh_mult = mult;
++ ep->dqh->dtd_next = DTD_TERM;
++
++ dev_dbg(&dev->pdev->dev, "enabled %s (ep%d%s-%s), max %04x\n",
+ _ep->name,
+ ep->ep_num,
+- DIR_STRING(desc->bEndpointAddress),
+- type_string(desc->bmAttributes),
++ DIR_STRING(ep),
++ type_string(desc),
+ max);
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+ done:
+- VDBG(dev, "<--- %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+ return retval;
+ }
+
+@@ -428,7 +401,7 @@
+ struct langwell_dtd *curr_dtd, *next_dtd;
+ int i;
+
+- VDBG(dev, "---> %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+ /* remove the req from ep->queue */
+ list_del_init(&req->queue);
+@@ -448,7 +421,8 @@
+ }
+
+ if (req->mapped) {
+- dma_unmap_single(&dev->pdev->dev, req->req.dma, req->req.length,
++ dma_unmap_single(&dev->pdev->dev,
++ req->req.dma, req->req.length,
+ is_in(ep) ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
+ req->req.dma = DMA_ADDR_INVALID;
+ req->mapped = 0;
+@@ -458,9 +432,10 @@
+ is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+ if (status != -ESHUTDOWN)
+- DBG(dev, "complete %s, req %p, stat %d, len %u/%u\n",
+- ep->ep.name, &req->req, status,
+- req->req.actual, req->req.length);
++ dev_dbg(&dev->pdev->dev,
++ "complete %s, req %p, stat %d, len %u/%u\n",
++ ep->ep.name, &req->req, status,
++ req->req.actual, req->req.length);
+
+ /* don't modify queue heads during completion callback */
+ ep->stopped = 1;
+@@ -473,7 +448,7 @@
+ spin_lock(&dev->lock);
+ ep->stopped = stopped;
+
+- VDBG(dev, "<--- %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+ }
+
+
+@@ -511,7 +486,7 @@
+
+ ep = container_of(_ep, struct langwell_ep, ep);
+ dev = ep->dev;
+- VDBG(dev, "---> %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+ if (!_ep || !ep->desc)
+ return -EINVAL;
+@@ -535,8 +510,8 @@
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+- DBG(dev, "disabled %s\n", _ep->name);
+- VDBG(dev, "<--- %s()\n", __func__);
++ dev_dbg(&dev->pdev->dev, "disabled %s\n", _ep->name);
++ dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+
+ return 0;
+ }
+@@ -555,7 +530,7 @@
+
+ ep = container_of(_ep, struct langwell_ep, ep);
+ dev = ep->dev;
+- VDBG(dev, "---> %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+ req = kzalloc(sizeof(*req), gfp_flags);
+ if (!req)
+@@ -564,8 +539,8 @@
+ req->req.dma = DMA_ADDR_INVALID;
+ INIT_LIST_HEAD(&req->queue);
+
+- VDBG(dev, "alloc request for %s\n", _ep->name);
+- VDBG(dev, "<--- %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "alloc request for %s\n", _ep->name);
++ dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+ return &req->req;
+ }
+
+@@ -580,7 +555,7 @@
+
+ ep = container_of(_ep, struct langwell_ep, ep);
+ dev = ep->dev;
+- VDBG(dev, "---> %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+ if (!_ep || !_req)
+ return;
+@@ -591,8 +566,8 @@
+ if (_req)
+ kfree(req);
+
+- VDBG(dev, "free request for %s\n", _ep->name);
+- VDBG(dev, "<--- %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "free request for %s\n", _ep->name);
++ dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+ }
+
+
+@@ -608,23 +583,24 @@
+ struct langwell_udc *dev;
+
+ dev = ep->dev;
+- VDBG(dev, "---> %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+ i = ep->ep_num * 2 + is_in(ep);
+ dqh = &dev->ep_dqh[i];
+
+ if (ep->ep_num)
+- VDBG(dev, "%s\n", ep->name);
++ dev_vdbg(&dev->pdev->dev, "%s\n", ep->name);
+ else
+ /* ep0 */
+- VDBG(dev, "%s-%s\n", ep->name, is_in(ep) ? "in" : "out");
++ dev_vdbg(&dev->pdev->dev, "%s-%s\n", ep->name, DIR_STRING(ep));
+
+- VDBG(dev, "ep_dqh[%d] addr: 0x%08x\n", i, (u32)&(dev->ep_dqh[i]));
++ dev_vdbg(&dev->pdev->dev, "ep_dqh[%d] addr: 0x%08x\n",
++ i, (u32)&(dev->ep_dqh[i]));
+
+ bit_mask = is_in(ep) ?
+ (1 << (ep->ep_num + 16)) : (1 << (ep->ep_num));
+
+- VDBG(dev, "bit_mask = 0x%08x\n", bit_mask);
++ dev_vdbg(&dev->pdev->dev, "bit_mask = 0x%08x\n", bit_mask);
+
+ /* check if the pipe is empty */
+ if (!(list_empty(&ep->queue))) {
+@@ -665,14 +641,17 @@
+ /* clear active and halt bit */
+ dtd_status = (u8) ~(DTD_STS_ACTIVE | DTD_STS_HALTED);
+ dqh->dtd_status &= dtd_status;
+- VDBG(dev, "dqh->dtd_status = 0x%x\n", dqh->dtd_status);
++ dev_vdbg(&dev->pdev->dev, "dqh->dtd_status = 0x%x\n", dqh->dtd_status);
++
++ /* ensure that updates to the dQH will occure before priming */
++ wmb();
+
+ /* write 1 to endptprime register to PRIME endpoint */
+ bit_mask = is_in(ep) ? (1 << (ep->ep_num + 16)) : (1 << ep->ep_num);
+- VDBG(dev, "endprime bit_mask = 0x%08x\n", bit_mask);
++ dev_vdbg(&dev->pdev->dev, "endprime bit_mask = 0x%08x\n", bit_mask);
+ writel(bit_mask, &dev->op_regs->endptprime);
+ out:
+- VDBG(dev, "<--- %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+ return 0;
+ }
+
+@@ -687,7 +666,7 @@
+ int i;
+
+ dev = req->ep->dev;
+- VDBG(dev, "---> %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+ /* the maximum transfer length, up to 16k bytes */
+ *length = min(req->req.length - req->req.actual,
+@@ -708,7 +687,7 @@
+
+ /* fill in total bytes with transfer size */
+ dtd->dtd_total = cpu_to_le16(*length);
+- VDBG(dev, "dtd->dtd_total = %d\n", dtd->dtd_total);
++ dev_vdbg(&dev->pdev->dev, "dtd->dtd_total = %d\n", dtd->dtd_total);
+
+ /* set is_last flag if req->req.zero is set or not */
+ if (req->req.zero) {
+@@ -722,7 +701,7 @@
+ *is_last = 0;
+
+ if (*is_last == 0)
+- VDBG(dev, "multi-dtd request!\n");
++ dev_vdbg(&dev->pdev->dev, "multi-dtd request!\n");
+
+ /* set interrupt on complete bit for the last dTD */
+ if (*is_last && !req->req.no_interrupt)
+@@ -733,10 +712,12 @@
+
+ /* set the active bit of status field to 1 */
+ dtd->dtd_status = DTD_STS_ACTIVE;
+- VDBG(dev, "dtd->dtd_status = 0x%02x\n", dtd->dtd_status);
++ dev_vdbg(&dev->pdev->dev, "dtd->dtd_status = 0x%02x\n",
++ dtd->dtd_status);
+
+- VDBG(dev, "length = %d, dma addr= 0x%08x\n", *length, (int)*dma);
+- VDBG(dev, "<--- %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "length = %d, dma addr= 0x%08x\n",
++ *length, (int)*dma);
++ dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+ return dtd;
+ }
+
+@@ -751,7 +732,7 @@
+ dma_addr_t dma;
+
+ dev = req->ep->dev;
+- VDBG(dev, "---> %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+ do {
+ dtd = build_dtd(req, &count, &dma, &is_last);
+ if (dtd == NULL)
+@@ -773,7 +754,7 @@
+
+ req->tail = dtd;
+
+- VDBG(dev, "<--- %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+ return 0;
+ }
+
+@@ -803,9 +784,9 @@
+
+ dev = ep->dev;
+ req->ep = ep;
+- VDBG(dev, "---> %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+- if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
++ if (usb_endpoint_xfer_isoc(ep->desc)) {
+ if (req->req.length > ep->ep.maxpacket)
+ return -EMSGSIZE;
+ is_iso = 1;
+@@ -818,7 +799,7 @@
+ if (_req->dma == DMA_ADDR_INVALID) {
+ /* WORKAROUND: WARN_ON(size == 0) */
+ if (_req->length == 0) {
+- VDBG(dev, "req->length: 0->1\n");
++ dev_vdbg(&dev->pdev->dev, "req->length: 0->1\n");
+ zlflag = 1;
+ _req->length++;
+ }
+@@ -827,24 +808,25 @@
+ _req->buf, _req->length,
+ is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ if (zlflag && (_req->length == 1)) {
+- VDBG(dev, "req->length: 1->0\n");
++ dev_vdbg(&dev->pdev->dev, "req->length: 1->0\n");
+ zlflag = 0;
+ _req->length = 0;
+ }
+
+ req->mapped = 1;
+- VDBG(dev, "req->mapped = 1\n");
++ dev_vdbg(&dev->pdev->dev, "req->mapped = 1\n");
+ } else {
+ dma_sync_single_for_device(&dev->pdev->dev,
+ _req->dma, _req->length,
+ is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ req->mapped = 0;
+- VDBG(dev, "req->mapped = 0\n");
++ dev_vdbg(&dev->pdev->dev, "req->mapped = 0\n");
+ }
+
+- DBG(dev, "%s queue req %p, len %u, buf %p, dma 0x%08x\n",
++ dev_dbg(&dev->pdev->dev,
++ "%s queue req %p, len %u, buf %p, dma 0x%08x\n",
+ _ep->name,
+- _req, _req->length, _req->buf, _req->dma);
++ _req, _req->length, _req->buf, (int)_req->dma);
+
+ _req->status = -EINPROGRESS;
+ _req->actual = 0;
+@@ -866,12 +848,12 @@
+
+ if (likely(req != NULL)) {
+ list_add_tail(&req->queue, &ep->queue);
+- VDBG(dev, "list_add_tail() \n");
++ dev_vdbg(&dev->pdev->dev, "list_add_tail()\n");
+ }
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+- VDBG(dev, "<--- %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+ return 0;
+ }
+
+@@ -888,7 +870,7 @@
+
+ ep = container_of(_ep, struct langwell_ep, ep);
+ dev = ep->dev;
+- VDBG(dev, "---> %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+ if (!_ep || !ep->desc || !_req)
+ return -EINVAL;
+@@ -924,7 +906,7 @@
+
+ /* queue head may be partially complete. */
+ if (ep->queue.next == &req->queue) {
+- DBG(dev, "unlink (%s) dma\n", _ep->name);
++ dev_dbg(&dev->pdev->dev, "unlink (%s) dma\n", _ep->name);
+ _req->status = -ECONNRESET;
+ langwell_ep_fifo_flush(&ep->ep);
+
+@@ -963,7 +945,7 @@
+ ep->stopped = stopped;
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+- VDBG(dev, "<--- %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+ return retval;
+ }
+
+@@ -976,7 +958,7 @@
+ u32 endptctrl = 0;
+ int ep_num;
+ struct langwell_udc *dev = ep->dev;
+- VDBG(dev, "---> %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+ ep_num = ep->ep_num;
+ endptctrl = readl(&dev->op_regs->endptctrl[ep_num]);
+@@ -1001,7 +983,7 @@
+
+ writel(endptctrl, &dev->op_regs->endptctrl[ep_num]);
+
+- VDBG(dev, "<--- %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+ }
+
+
+@@ -1016,7 +998,7 @@
+ ep = container_of(_ep, struct langwell_ep, ep);
+ dev = ep->dev;
+
+- VDBG(dev, "---> %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+ if (!_ep || !ep->desc)
+ return -EINVAL;
+@@ -1024,8 +1006,7 @@
+ if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
+ return -ESHUTDOWN;
+
+- if (ep->desc && (ep->desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
+- == USB_ENDPOINT_XFER_ISOC)
++ if (usb_endpoint_xfer_isoc(ep->desc))
+ return -EOPNOTSUPP;
+
+ spin_lock_irqsave(&dev->lock, flags);
+@@ -1036,7 +1017,7 @@
+ */
+ if (!list_empty(&ep->queue) && is_in(ep) && value) {
+ /* IN endpoint FIFO holds bytes */
+- DBG(dev, "%s FIFO holds bytes\n", _ep->name);
++ dev_dbg(&dev->pdev->dev, "%s FIFO holds bytes\n", _ep->name);
+ retval = -EAGAIN;
+ goto done;
+ }
+@@ -1050,8 +1031,9 @@
+ }
+ done:
+ spin_unlock_irqrestore(&dev->lock, flags);
+- DBG(dev, "%s %s halt\n", _ep->name, value ? "set" : "clear");
+- VDBG(dev, "<--- %s()\n", __func__);
++ dev_dbg(&dev->pdev->dev, "%s %s halt\n",
++ _ep->name, value ? "set" : "clear");
++ dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+ return retval;
+ }
+
+@@ -1065,12 +1047,12 @@
+ ep = container_of(_ep, struct langwell_ep, ep);
+ dev = ep->dev;
+
+- VDBG(dev, "---> %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+ if (!_ep || !ep->desc)
+ return -EINVAL;
+
+- VDBG(dev, "<--- %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+ return usb_ep_set_halt(_ep);
+ }
+
+@@ -1086,15 +1068,16 @@
+ ep = container_of(_ep, struct langwell_ep, ep);
+ dev = ep->dev;
+
+- VDBG(dev, "---> %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+ if (!_ep || !ep->desc) {
+- VDBG(dev, "ep or ep->desc is NULL\n");
+- VDBG(dev, "<--- %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "ep or ep->desc is NULL\n");
++ dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+ return;
+ }
+
+- VDBG(dev, "%s-%s fifo flush\n", _ep->name, is_in(ep) ? "in" : "out");
++ dev_vdbg(&dev->pdev->dev, "%s-%s fifo flush\n",
++ _ep->name, DIR_STRING(ep));
+
+ /* flush endpoint buffer */
+ if (ep->ep_num == 0)
+@@ -1110,14 +1093,14 @@
+ writel(flush_bit, &dev->op_regs->endptflush);
+ while (readl(&dev->op_regs->endptflush)) {
+ if (time_after(jiffies, timeout)) {
+- ERROR(dev, "ep flush timeout\n");
++ dev_err(&dev->pdev->dev, "ep flush timeout\n");
+ goto done;
+ }
+ cpu_relax();
+ }
+ } while (readl(&dev->op_regs->endptstat) & flush_bit);
+ done:
+- VDBG(dev, "<--- %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+ }
+
+
+@@ -1167,31 +1150,59 @@
+ return -ENODEV;
+
+ dev = container_of(_gadget, struct langwell_udc, gadget);
+- VDBG(dev, "---> %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+ retval = readl(&dev->op_regs->frindex) & FRINDEX_MASK;
+
+- VDBG(dev, "<--- %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+ return retval;
+ }
+
+
++/* enter or exit PHY low power state */
++static void langwell_phy_low_power(struct langwell_udc *dev, bool flag)
++{
++ u32 devlc;
++ u8 devlc_byte2;
++ dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
++
++ devlc = readl(&dev->op_regs->devlc);
++ dev_vdbg(&dev->pdev->dev, "devlc = 0x%08x\n", devlc);
++
++ if (flag)
++ devlc |= LPM_PHCD;
++ else
++ devlc &= ~LPM_PHCD;
++
++ /* FIXME: workaround for Langwell A1/A2/A3 sighting */
++ devlc_byte2 = (devlc >> 16) & 0xff;
++ writeb(devlc_byte2, (u8 *)&dev->op_regs->devlc + 2);
++
++ devlc = readl(&dev->op_regs->devlc);
++ dev_vdbg(&dev->pdev->dev,
++ "%s PHY low power suspend, devlc = 0x%08x\n",
++ flag ? "enter" : "exit", devlc);
++}
++
++
+ /* tries to wake up the host connected to this gadget */
+ static int langwell_wakeup(struct usb_gadget *_gadget)
+ {
+ struct langwell_udc *dev;
+- u32 portsc1, devlc;
+- unsigned long flags;
++ u32 portsc1;
++ unsigned long flags;
+
+ if (!_gadget)
+ return 0;
+
+ dev = container_of(_gadget, struct langwell_udc, gadget);
+- VDBG(dev, "---> %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+- /* Remote Wakeup feature not enabled by host */
+- if (!dev->remote_wakeup)
++ /* remote wakeup feature not enabled by host */
++ if (!dev->remote_wakeup) {
++ dev_info(&dev->pdev->dev, "remote wakeup is disabled\n");
+ return -ENOTSUPP;
++ }
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+@@ -1201,27 +1212,23 @@
+ return 0;
+ }
+
+- /* LPM L1 to L0, remote wakeup */
+- if (dev->lpm && dev->lpm_state == LPM_L1) {
+- portsc1 |= PORTS_SLP;
+- writel(portsc1, &dev->op_regs->portsc1);
+- }
+-
+- /* force port resume */
+- if (dev->usb_state == USB_STATE_SUSPENDED) {
+- portsc1 |= PORTS_FPR;
+- writel(portsc1, &dev->op_regs->portsc1);
+- }
++ /* LPM L1 to L0 or legacy remote wakeup */
++ if (dev->lpm && dev->lpm_state == LPM_L1)
++ dev_info(&dev->pdev->dev, "LPM L1 to L0 remote wakeup\n");
++ else
++ dev_info(&dev->pdev->dev, "device remote wakeup\n");
+
+ /* exit PHY low power suspend */
+- devlc = readl(&dev->op_regs->devlc);
+- VDBG(dev, "devlc = 0x%08x\n", devlc);
+- devlc &= ~LPM_PHCD;
+- writel(devlc, &dev->op_regs->devlc);
++ if (dev->pdev->device != 0x0829)
++ langwell_phy_low_power(dev, 0);
++
++ /* force port resume */
++ portsc1 |= PORTS_FPR;
++ writel(portsc1, &dev->op_regs->portsc1);
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+- VDBG(dev, "<--- %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+ return 0;
+ }
+
+@@ -1231,16 +1238,17 @@
+ {
+ struct langwell_udc *dev;
+ unsigned long flags;
+- u32 usbcmd;
++ u32 usbcmd;
+
+ if (!_gadget)
+ return -ENODEV;
+
+ dev = container_of(_gadget, struct langwell_udc, gadget);
+- VDBG(dev, "---> %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+ spin_lock_irqsave(&dev->lock, flags);
+- VDBG(dev, "VBUS status: %s\n", is_active ? "on" : "off");
++ dev_vdbg(&dev->pdev->dev, "VBUS status: %s\n",
++ is_active ? "on" : "off");
+
+ dev->vbus_active = (is_active != 0);
+ if (dev->driver && dev->softconnected && dev->vbus_active) {
+@@ -1255,7 +1263,7 @@
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+- VDBG(dev, "<--- %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+ return 0;
+ }
+
+@@ -1269,15 +1277,15 @@
+ return -ENODEV;
+
+ dev = container_of(_gadget, struct langwell_udc, gadget);
+- VDBG(dev, "---> %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+ if (dev->transceiver) {
+- VDBG(dev, "otg_set_power\n");
+- VDBG(dev, "<--- %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "otg_set_power\n");
++ dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+ return otg_set_power(dev->transceiver, mA);
+ }
+
+- VDBG(dev, "<--- %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+ return -ENOTSUPP;
+ }
+
+@@ -1286,15 +1294,15 @@
+ static int langwell_pullup(struct usb_gadget *_gadget, int is_on)
+ {
+ struct langwell_udc *dev;
+- u32 usbcmd;
+- unsigned long flags;
++ u32 usbcmd;
++ unsigned long flags;
+
+ if (!_gadget)
+ return -ENODEV;
+
+ dev = container_of(_gadget, struct langwell_udc, gadget);
+
+- VDBG(dev, "---> %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+ spin_lock_irqsave(&dev->lock, flags);
+ dev->softconnected = (is_on != 0);
+@@ -1310,7 +1318,7 @@
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+- VDBG(dev, "<--- %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+ return 0;
+ }
+
+@@ -1346,12 +1354,13 @@
+ static int langwell_udc_reset(struct langwell_udc *dev)
+ {
+ u32 usbcmd, usbmode, devlc, endpointlistaddr;
++ u8 devlc_byte0, devlc_byte2;
+ unsigned long timeout;
+
+ if (!dev)
+ return -EINVAL;
+
+- DBG(dev, "---> %s()\n", __func__);
++ dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+ /* set controller to stop state */
+ usbcmd = readl(&dev->op_regs->usbcmd);
+@@ -1367,7 +1376,7 @@
+ timeout = jiffies + RESET_TIMEOUT;
+ while (readl(&dev->op_regs->usbcmd) & CMD_RST) {
+ if (time_after(jiffies, timeout)) {
+- ERROR(dev, "device reset timeout\n");
++ dev_err(&dev->pdev->dev, "device reset timeout\n");
+ return -ETIMEDOUT;
+ }
+ cpu_relax();
+@@ -1382,7 +1391,7 @@
+
+ writel(usbmode, &dev->op_regs->usbmode);
+ usbmode = readl(&dev->op_regs->usbmode);
+- VDBG(dev, "usbmode=0x%08x\n", usbmode);
++ dev_vdbg(&dev->pdev->dev, "usbmode=0x%08x\n", usbmode);
+
+ /* Write-Clear setup status */
+ writel(0, &dev->op_regs->usbsts);
+@@ -1390,9 +1399,17 @@
+ /* if support USB LPM, ACK all LPM token */
+ if (dev->lpm) {
+ devlc = readl(&dev->op_regs->devlc);
++ dev_vdbg(&dev->pdev->dev, "devlc = 0x%08x\n", devlc);
++ /* FIXME: workaround for Langwell A1/A2/A3 sighting */
+ devlc &= ~LPM_STL; /* don't STALL LPM token */
+ devlc &= ~LPM_NYT_ACK; /* ACK LPM token */
+- writel(devlc, &dev->op_regs->devlc);
++ devlc_byte0 = devlc & 0xff;
++ devlc_byte2 = (devlc >> 16) & 0xff;
++ writeb(devlc_byte0, (u8 *)&dev->op_regs->devlc);
++ writeb(devlc_byte2, (u8 *)&dev->op_regs->devlc + 2);
++ devlc = readl(&dev->op_regs->devlc);
++ dev_vdbg(&dev->pdev->dev,
++ "ACK LPM token, devlc = 0x%08x\n", devlc);
+ }
+
+ /* fill endpointlistaddr register */
+@@ -1400,10 +1417,11 @@
+ endpointlistaddr &= ENDPOINTLISTADDR_MASK;
+ writel(endpointlistaddr, &dev->op_regs->endpointlistaddr);
+
+- VDBG(dev, "dQH base (vir: %p, phy: 0x%08x), endpointlistaddr=0x%08x\n",
+- dev->ep_dqh, endpointlistaddr,
+- readl(&dev->op_regs->endpointlistaddr));
+- DBG(dev, "<--- %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev,
++ "dQH base (vir: %p, phy: 0x%08x), endpointlistaddr=0x%08x\n",
++ dev->ep_dqh, endpointlistaddr,
++ readl(&dev->op_regs->endpointlistaddr));
++ dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+ return 0;
+ }
+
+@@ -1415,7 +1433,7 @@
+ char name[14];
+ int i;
+
+- VDBG(dev, "---> %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+ /* initialize ep0 */
+ ep = &dev->ep[0];
+@@ -1449,11 +1467,9 @@
+
+ INIT_LIST_HEAD(&ep->queue);
+ list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
+-
+- ep->dqh = &dev->ep_dqh[i];
+ }
+
+- VDBG(dev, "<--- %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+ return 0;
+ }
+
+@@ -1462,7 +1478,7 @@
+ static void langwell_udc_start(struct langwell_udc *dev)
+ {
+ u32 usbintr, usbcmd;
+- DBG(dev, "---> %s()\n", __func__);
++ dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+ /* enable interrupts */
+ usbintr = INTR_ULPIE /* ULPI */
+@@ -1485,7 +1501,7 @@
+ usbcmd |= CMD_RUNSTOP;
+ writel(usbcmd, &dev->op_regs->usbcmd);
+
+- DBG(dev, "<--- %s()\n", __func__);
++ dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+ return;
+ }
+
+@@ -1495,7 +1511,7 @@
+ {
+ u32 usbcmd;
+
+- DBG(dev, "---> %s()\n", __func__);
++ dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+ /* disable all interrupts */
+ writel(0, &dev->op_regs->usbintr);
+@@ -1508,7 +1524,7 @@
+ usbcmd &= ~CMD_RUNSTOP;
+ writel(usbcmd, &dev->op_regs->usbcmd);
+
+- DBG(dev, "<--- %s()\n", __func__);
++ dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+ return;
+ }
+
+@@ -1518,7 +1534,7 @@
+ struct usb_gadget_driver *driver)
+ {
+ struct langwell_ep *ep;
+- DBG(dev, "---> %s()\n", __func__);
++ dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+ nuke(&dev->ep[0], -ESHUTDOWN);
+
+@@ -1533,27 +1549,12 @@
+ spin_lock(&dev->lock);
+ }
+
+- DBG(dev, "<--- %s()\n", __func__);
++ dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+ }
+
+
+ /*-------------------------------------------------------------------------*/
+
+-/* device "function" sysfs attribute file */
+-static ssize_t show_function(struct device *_dev,
+- struct device_attribute *attr, char *buf)
+-{
+- struct langwell_udc *dev = the_controller;
+-
+- if (!dev->driver || !dev->driver->function
+- || strlen(dev->driver->function) > PAGE_SIZE)
+- return 0;
+-
+- return scnprintf(buf, PAGE_SIZE, "%s\n", dev->driver->function);
+-}
+-static DEVICE_ATTR(function, S_IRUGO, show_function, NULL);
+-
+-
+ /* device "langwell_udc" sysfs attribute file */
+ static ssize_t show_langwell_udc(struct device *_dev,
+ struct device_attribute *attr, char *buf)
+@@ -1659,13 +1660,15 @@
+ "Over-current Change: %s\n"
+ "Port Enable/Disable Change: %s\n"
+ "Port Enabled/Disabled: %s\n"
+- "Current Connect Status: %s\n\n",
++ "Current Connect Status: %s\n"
++ "LPM Suspend Status: %s\n\n",
+ (tmp_reg & PORTS_PR) ? "Reset" : "Not Reset",
+ (tmp_reg & PORTS_SUSP) ? "Suspend " : "Not Suspend",
+ (tmp_reg & PORTS_OCC) ? "Detected" : "No",
+ (tmp_reg & PORTS_PEC) ? "Changed" : "Not Changed",
+ (tmp_reg & PORTS_PE) ? "Enable" : "Not Correct",
+- (tmp_reg & PORTS_CCS) ? "Attached" : "Not Attached");
++ (tmp_reg & PORTS_CCS) ? "Attached" : "Not Attached",
++ (tmp_reg & PORTS_SLP) ? "LPM L1" : "LPM L0");
+ size -= t;
+ next += t;
+
+@@ -1676,7 +1679,7 @@
+ "Serial Transceiver : %d\n"
+ "Port Speed: %s\n"
+ "Port Force Full Speed Connenct: %s\n"
+- "PHY Low Power Suspend Clock Disable: %s\n"
++ "PHY Low Power Suspend Clock: %s\n"
+ "BmAttributes: %d\n\n",
+ LPM_PTS(tmp_reg),
+ (tmp_reg & LPM_STS) ? 1 : 0,
+@@ -1797,6 +1800,40 @@
+ static DEVICE_ATTR(langwell_udc, S_IRUGO, show_langwell_udc, NULL);
+
+
++/* device "remote_wakeup" sysfs attribute file */
++static ssize_t store_remote_wakeup(struct device *_dev,
++ struct device_attribute *attr, const char *buf, size_t count)
++{
++ struct langwell_udc *dev = the_controller;
++#if defined(CONFIG_USB_DEBUG)
++ unsigned long flags;
++#endif
++ ssize_t rc = count;
++
++ if (count > 2)
++ return -EINVAL;
++
++ if (count > 0 && buf[count-1] == '\n')
++ ((char *) buf)[count-1] = 0;
++
++ if (buf[0] != '1')
++ return -EINVAL;
++
++#if defined(CONFIG_USB_DEBUG)
++ /* force remote wakeup enabled in case gadget driver doesn't support */
++ spin_lock_irqsave(&dev->lock, flags);
++ dev->remote_wakeup = 1;
++ dev->dev_status |= (1 << USB_DEVICE_REMOTE_WAKEUP);
++ spin_unlock_irqrestore(&dev->lock, flags);
++#endif
++
++ langwell_wakeup(&dev->gadget);
++
++ return rc;
++}
++static DEVICE_ATTR(remote_wakeup, S_IWUSR, NULL, store_remote_wakeup);
++
++
+ /*-------------------------------------------------------------------------*/
+
+ /*
+@@ -1816,7 +1853,7 @@
+ if (!dev)
+ return -ENODEV;
+
+- DBG(dev, "---> %s()\n", __func__);
++ dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+ if (dev->driver)
+ return -EBUSY;
+@@ -1832,41 +1869,33 @@
+
+ retval = driver->bind(&dev->gadget);
+ if (retval) {
+- DBG(dev, "bind to driver %s --> %d\n",
++ dev_dbg(&dev->pdev->dev, "bind to driver %s --> %d\n",
+ driver->driver.name, retval);
+ dev->driver = NULL;
+ dev->gadget.dev.driver = NULL;
+ return retval;
+ }
+
+- retval = device_create_file(&dev->pdev->dev, &dev_attr_function);
+- if (retval)
+- goto err_unbind;
+-
+ dev->usb_state = USB_STATE_ATTACHED;
+ dev->ep0_state = WAIT_FOR_SETUP;
+ dev->ep0_dir = USB_DIR_OUT;
+
++ /* bind OTG transceiver */
++ if (dev->transceiver)
++ (void)otg_set_peripheral(dev->transceiver, &dev->gadget);
++
+ /* enable interrupt and set controller to run state */
+ if (dev->got_irq)
+ langwell_udc_start(dev);
+
+- VDBG(dev, "After langwell_udc_start(), print all registers:\n");
+-#ifdef VERBOSE
++ dev_vdbg(&dev->pdev->dev,
++ "After langwell_udc_start(), print all registers:\n");
+ print_all_registers(dev);
+-#endif
+
+- INFO(dev, "register driver: %s\n", driver->driver.name);
+- VDBG(dev, "<--- %s()\n", __func__);
++ dev_info(&dev->pdev->dev, "register driver: %s\n",
++ driver->driver.name);
++ dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+ return 0;
+-
+-err_unbind:
+- driver->unbind(&dev->gadget);
+- dev->gadget.dev.driver = NULL;
+- dev->driver = NULL;
+-
+- DBG(dev, "<--- %s()\n", __func__);
+- return retval;
+ }
+ EXPORT_SYMBOL(usb_gadget_register_driver);
+
+@@ -1880,11 +1909,15 @@
+ if (!dev)
+ return -ENODEV;
+
+- DBG(dev, "---> %s()\n", __func__);
++ dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+- if (unlikely(!driver || !driver->bind || !driver->unbind))
++ if (unlikely(!driver || !driver->unbind || !driver->disconnect))
+ return -EINVAL;
+
++ /* exit PHY low power suspend */
++ if (dev->pdev->device != 0x0829)
++ langwell_phy_low_power(dev, 0);
++
+ /* unbind OTG transceiver */
+ if (dev->transceiver)
+ (void)otg_set_peripheral(dev->transceiver, 0);
+@@ -1908,15 +1941,63 @@
+ dev->gadget.dev.driver = NULL;
+ dev->driver = NULL;
+
+- device_remove_file(&dev->pdev->dev, &dev_attr_function);
+-
+- INFO(dev, "unregistered driver '%s'\n", driver->driver.name);
+- DBG(dev, "<--- %s()\n", __func__);
++ dev_info(&dev->pdev->dev, "unregistered driver '%s'\n",
++ driver->driver.name);
++ dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+ return 0;
+ }
+ EXPORT_SYMBOL(usb_gadget_unregister_driver);
+
+
++/* gets the maximum power consumption */
++int langwell_udc_maxpower(int *mA)
++{
++ struct langwell_udc *dev = the_controller;
++ u32 usbmode, portsc1, usbcmd;
++
++ /* fatal error */
++ if (!dev) {
++ *mA = 0;
++ return -EINVAL;
++ }
++
++ dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
++
++ /* contrller is not in device mode */
++ usbmode = readl(&dev->op_regs->usbmode);
++ if (MODE_CM(usbmode) != MODE_DEVICE) {
++ *mA = 0;
++ return -EINVAL;
++ }
++
++ /* can't get maximum power */
++ usbcmd = readl(&dev->op_regs->usbcmd);
++ if (!(usbcmd & CMD_RUNSTOP)) {
++ *mA = 0;
++ return -EINVAL;
++ }
++
++ /* disconnect to USB host */
++ portsc1 = readl(&dev->op_regs->portsc1);
++ if (!(portsc1 & PORTS_CCS)) {
++ *mA = 0;
++ return -EINVAL;
++ }
++
++ /* set max power capability */
++ *mA = CONFIG_USB_GADGET_VBUS_DRAW;
++
++ if ((*mA < 8) || (*mA > 500)) {
++ *mA = 0;
++ return -EINVAL;
++ }
++
++ dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
++ return 0;
++}
++EXPORT_SYMBOL_GPL(langwell_udc_maxpower);
++
++
+ /*-------------------------------------------------------------------------*/
+
+ /*
+@@ -1930,7 +2011,7 @@
+ unsigned long timeout;
+ struct langwell_dqh *dqh;
+
+- VDBG(dev, "---> %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+ /* ep0 OUT dQH */
+ dqh = &dev->ep_dqh[EP_DIR_OUT];
+@@ -1943,7 +2024,7 @@
+ timeout = jiffies + SETUPSTAT_TIMEOUT;
+ while (readl(&dev->op_regs->endptsetupstat)) {
+ if (time_after(jiffies, timeout)) {
+- ERROR(dev, "setup_tripwire timeout\n");
++ dev_err(&dev->pdev->dev, "setup_tripwire timeout\n");
+ break;
+ }
+ cpu_relax();
+@@ -1963,7 +2044,7 @@
+ usbcmd = readl(&dev->op_regs->usbcmd);
+ writel(usbcmd & ~CMD_SUTW, &dev->op_regs->usbcmd);
+
+- VDBG(dev, "<--- %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+ }
+
+
+@@ -1972,7 +2053,7 @@
+ {
+ u32 endptctrl;
+
+- VDBG(dev, "---> %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+ /* set TX and RX to stall */
+ endptctrl = readl(&dev->op_regs->endptctrl[0]);
+@@ -1983,7 +2064,7 @@
+ dev->ep0_state = WAIT_FOR_SETUP;
+ dev->ep0_dir = USB_DIR_OUT;
+
+- VDBG(dev, "<--- %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+ }
+
+
+@@ -1994,7 +2075,7 @@
+ struct langwell_ep *ep;
+ int status = 0;
+
+- VDBG(dev, "---> %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+ if (dir == EP_DIR_IN)
+ dev->ep0_dir = USB_DIR_IN;
+@@ -2019,11 +2100,11 @@
+ return -ENOMEM;
+
+ if (status)
+- ERROR(dev, "can't queue ep0 status request\n");
++ dev_err(&dev->pdev->dev, "can't queue ep0 status request\n");
+
+ list_add_tail(&req->queue, &ep->queue);
+
+- VDBG(dev, "<--- %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+ return status;
+ }
+
+@@ -2032,11 +2113,11 @@
+ static void set_address(struct langwell_udc *dev, u16 value,
+ u16 index, u16 length)
+ {
+- VDBG(dev, "---> %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+ /* save the new address to device struct */
+ dev->dev_addr = (u8) value;
+- VDBG(dev, "dev->dev_addr = %d\n", dev->dev_addr);
++ dev_vdbg(&dev->pdev->dev, "dev->dev_addr = %d\n", dev->dev_addr);
+
+ /* update usb state */
+ dev->usb_state = USB_STATE_ADDRESS;
+@@ -2045,7 +2126,7 @@
+ if (prime_status_phase(dev, EP_DIR_IN))
+ ep0_stall(dev);
+
+- VDBG(dev, "<--- %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+ }
+
+
+@@ -2054,7 +2135,7 @@
+ u16 wIndex)
+ {
+ struct langwell_ep *ep;
+- VDBG(dev, "---> %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+ if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
+ return &dev->ep[0];
+@@ -2073,7 +2154,7 @@
+ return ep;
+ }
+
+- VDBG(dev, "<--- %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+ return NULL;
+ }
+
+@@ -2085,7 +2166,7 @@
+ u32 endptctrl;
+ int retval;
+
+- VDBG(dev, "---> %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+ endptctrl = readl(&dev->op_regs->endptctrl[ep->ep_num]);
+ if (is_in(ep))
+@@ -2093,7 +2174,7 @@
+ else
+ retval = endptctrl & EPCTRL_RXS ? 1 : 0;
+
+- VDBG(dev, "<--- %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+ return retval;
+ }
+
+@@ -2107,14 +2188,13 @@
+ u16 status_data = 0; /* 16 bits cpu view status data */
+ int status = 0;
+
+- VDBG(dev, "---> %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+ ep = &dev->ep[0];
+
+ if ((request_type & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
+ /* get device status */
+- status_data = 1 << USB_DEVICE_SELF_POWERED;
+- status_data |= dev->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP;
++ status_data = dev->dev_status;
+ } else if ((request_type & USB_RECIP_MASK) == USB_RECIP_INTERFACE) {
+ /* get interface status */
+ status_data = 0;
+@@ -2129,6 +2209,8 @@
+ status_data = ep_is_stall(epn) << USB_ENDPOINT_HALT;
+ }
+
++ dev_dbg(&dev->pdev->dev, "get status data: 0x%04x\n", status_data);
++
+ dev->ep0_dir = USB_DIR_IN;
+
+ /* borrow the per device status_req */
+@@ -2150,18 +2232,19 @@
+ goto stall;
+
+ if (status) {
+- ERROR(dev, "response error on GET_STATUS request\n");
++ dev_err(&dev->pdev->dev,
++ "response error on GET_STATUS request\n");
+ goto stall;
+ }
+
+ list_add_tail(&req->queue, &ep->queue);
+ dev->ep0_state = DATA_STATE_XMIT;
+
+- VDBG(dev, "<--- %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+ return;
+ stall:
+ ep0_stall(dev);
+- VDBG(dev, "<--- %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+ }
+
+
+@@ -2173,12 +2256,12 @@
+ u16 wIndex = le16_to_cpu(setup->wIndex);
+ u16 wLength = le16_to_cpu(setup->wLength);
+
+- VDBG(dev, "---> %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+ /* ep0 fifo flush */
+ nuke(&dev->ep[0], -ESHUTDOWN);
+
+- DBG(dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
++ dev_dbg(&dev->pdev->dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
+ setup->bRequestType, setup->bRequest,
+ wValue, wIndex, wLength);
+
+@@ -2197,7 +2280,7 @@
+ /* We process some stardard setup requests here */
+ switch (setup->bRequest) {
+ case USB_REQ_GET_STATUS:
+- DBG(dev, "SETUP: USB_REQ_GET_STATUS\n");
++ dev_dbg(&dev->pdev->dev, "SETUP: USB_REQ_GET_STATUS\n");
+ /* get status, DATA and STATUS phase */
+ if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
+ != (USB_DIR_IN | USB_TYPE_STANDARD))
+@@ -2206,7 +2289,7 @@
+ goto end;
+
+ case USB_REQ_SET_ADDRESS:
+- DBG(dev, "SETUP: USB_REQ_SET_ADDRESS\n");
++ dev_dbg(&dev->pdev->dev, "SETUP: USB_REQ_SET_ADDRESS\n");
+ /* STATUS phase */
+ if (setup->bRequestType != (USB_DIR_OUT | USB_TYPE_STANDARD
+ | USB_RECIP_DEVICE))
+@@ -2220,9 +2303,11 @@
+ {
+ int rc = -EOPNOTSUPP;
+ if (setup->bRequest == USB_REQ_SET_FEATURE)
+- DBG(dev, "SETUP: USB_REQ_SET_FEATURE\n");
++ dev_dbg(&dev->pdev->dev,
++ "SETUP: USB_REQ_SET_FEATURE\n");
+ else if (setup->bRequest == USB_REQ_CLEAR_FEATURE)
+- DBG(dev, "SETUP: USB_REQ_CLEAR_FEATURE\n");
++ dev_dbg(&dev->pdev->dev,
++ "SETUP: USB_REQ_CLEAR_FEATURE\n");
+
+ if ((setup->bRequestType & (USB_RECIP_MASK | USB_TYPE_MASK))
+ == (USB_RECIP_ENDPOINT | USB_TYPE_STANDARD)) {
+@@ -2240,29 +2325,40 @@
+
+ spin_unlock(&dev->lock);
+ rc = langwell_ep_set_halt(&epn->ep,
+- (setup->bRequest == USB_REQ_SET_FEATURE)
+- ? 1 : 0);
++ (setup->bRequest == USB_REQ_SET_FEATURE)
++ ? 1 : 0);
+ spin_lock(&dev->lock);
+
+ } else if ((setup->bRequestType & (USB_RECIP_MASK
+ | USB_TYPE_MASK)) == (USB_RECIP_DEVICE
+ | USB_TYPE_STANDARD)) {
+- if (!gadget_is_otg(&dev->gadget))
++ rc = 0;
++ switch (wValue) {
++ case USB_DEVICE_REMOTE_WAKEUP:
++ if (setup->bRequest == USB_REQ_SET_FEATURE) {
++ dev->remote_wakeup = 1;
++ dev->dev_status |= (1 << wValue);
++ } else {
++ dev->remote_wakeup = 0;
++ dev->dev_status &= ~(1 << wValue);
++ }
+ break;
+- else if (setup->bRequest == USB_DEVICE_B_HNP_ENABLE) {
++ case USB_DEVICE_B_HNP_ENABLE:
+ dev->gadget.b_hnp_enable = 1;
+-#ifdef OTG_TRANSCEIVER
+- if (!dev->lotg->otg.default_a)
+- dev->lotg->hsm.b_hnp_enable = 1;
+-#endif
+- } else if (setup->bRequest == USB_DEVICE_A_HNP_SUPPORT)
++ dev->dev_status |= (1 << wValue);
++ break;
++ case USB_DEVICE_A_HNP_SUPPORT:
+ dev->gadget.a_hnp_support = 1;
+- else if (setup->bRequest ==
+- USB_DEVICE_A_ALT_HNP_SUPPORT)
++ dev->dev_status |= (1 << wValue);
++ break;
++ case USB_DEVICE_A_ALT_HNP_SUPPORT:
+ dev->gadget.a_alt_hnp_support = 1;
+- else
++ dev->dev_status |= (1 << wValue);
+ break;
+- rc = 0;
++ default:
++ rc = -EOPNOTSUPP;
++ break;
++ }
+ } else
+ break;
+
+@@ -2274,31 +2370,38 @@
+ }
+
+ case USB_REQ_GET_DESCRIPTOR:
+- DBG(dev, "SETUP: USB_REQ_GET_DESCRIPTOR\n");
++ dev_dbg(&dev->pdev->dev,
++ "SETUP: USB_REQ_GET_DESCRIPTOR\n");
+ goto delegate;
+
+ case USB_REQ_SET_DESCRIPTOR:
+- DBG(dev, "SETUP: USB_REQ_SET_DESCRIPTOR unsupported\n");
++ dev_dbg(&dev->pdev->dev,
++ "SETUP: USB_REQ_SET_DESCRIPTOR unsupported\n");
+ goto delegate;
+
+ case USB_REQ_GET_CONFIGURATION:
+- DBG(dev, "SETUP: USB_REQ_GET_CONFIGURATION\n");
++ dev_dbg(&dev->pdev->dev,
++ "SETUP: USB_REQ_GET_CONFIGURATION\n");
+ goto delegate;
+
+ case USB_REQ_SET_CONFIGURATION:
+- DBG(dev, "SETUP: USB_REQ_SET_CONFIGURATION\n");
++ dev_dbg(&dev->pdev->dev,
++ "SETUP: USB_REQ_SET_CONFIGURATION\n");
+ goto delegate;
+
+ case USB_REQ_GET_INTERFACE:
+- DBG(dev, "SETUP: USB_REQ_GET_INTERFACE\n");
++ dev_dbg(&dev->pdev->dev,
++ "SETUP: USB_REQ_GET_INTERFACE\n");
+ goto delegate;
+
+ case USB_REQ_SET_INTERFACE:
+- DBG(dev, "SETUP: USB_REQ_SET_INTERFACE\n");
++ dev_dbg(&dev->pdev->dev,
++ "SETUP: USB_REQ_SET_INTERFACE\n");
+ goto delegate;
+
+ case USB_REQ_SYNCH_FRAME:
+- DBG(dev, "SETUP: USB_REQ_SYNCH_FRAME unsupported\n");
++ dev_dbg(&dev->pdev->dev,
++ "SETUP: USB_REQ_SYNCH_FRAME unsupported\n");
+ goto delegate;
+
+ default:
+@@ -2310,7 +2413,8 @@
+ /* DATA phase from gadget, STATUS phase from udc */
+ dev->ep0_dir = (setup->bRequestType & USB_DIR_IN)
+ ? USB_DIR_IN : USB_DIR_OUT;
+- VDBG(dev, "dev->ep0_dir = 0x%x, wLength = %d\n",
++ dev_vdbg(&dev->pdev->dev,
++ "dev->ep0_dir = 0x%x, wLength = %d\n",
+ dev->ep0_dir, wLength);
+ spin_unlock(&dev->lock);
+ if (dev->driver->setup(&dev->gadget,
+@@ -2322,7 +2426,8 @@
+ } else {
+ /* no DATA phase, IN STATUS phase from gadget */
+ dev->ep0_dir = USB_DIR_IN;
+- VDBG(dev, "dev->ep0_dir = 0x%x, wLength = %d\n",
++ dev_vdbg(&dev->pdev->dev,
++ "dev->ep0_dir = 0x%x, wLength = %d\n",
+ dev->ep0_dir, wLength);
+ spin_unlock(&dev->lock);
+ if (dev->driver->setup(&dev->gadget,
+@@ -2334,7 +2439,7 @@
+ break;
+ }
+ end:
+- VDBG(dev, "<--- %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+ return;
+ }
+
+@@ -2359,7 +2464,7 @@
+ td_complete = 0;
+ actual = curr_req->req.length;
+
+- VDBG(dev, "---> %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+ for (i = 0; i < curr_req->dtd_count; i++) {
+ remaining_length = le16_to_cpu(curr_dtd->dtd_total);
+@@ -2372,10 +2477,12 @@
+ /* transfers completed successfully */
+ if (!remaining_length) {
+ td_complete++;
+- VDBG(dev, "dTD transmitted successfully\n");
++ dev_vdbg(&dev->pdev->dev,
++ "dTD transmitted successfully\n");
+ } else {
+ if (dir) {
+- VDBG(dev, "TX dTD remains data\n");
++ dev_vdbg(&dev->pdev->dev,
++ "TX dTD remains data\n");
+ retval = -EPROTO;
+ break;
+
+@@ -2387,27 +2494,32 @@
+ } else {
+ /* transfers completed with errors */
+ if (dtd_status & DTD_STS_ACTIVE) {
+- DBG(dev, "request not completed\n");
++ dev_dbg(&dev->pdev->dev,
++ "dTD status ACTIVE dQH[%d]\n", index);
+ retval = 1;
+ return retval;
+ } else if (dtd_status & DTD_STS_HALTED) {
+- ERROR(dev, "dTD error %08x dQH[%d]\n",
+- dtd_status, index);
++ dev_err(&dev->pdev->dev,
++ "dTD error %08x dQH[%d]\n",
++ dtd_status, index);
+ /* clear the errors and halt condition */
+ curr_dqh->dtd_status = 0;
+ retval = -EPIPE;
+ break;
+ } else if (dtd_status & DTD_STS_DBE) {
+- DBG(dev, "data buffer (overflow) error\n");
++ dev_dbg(&dev->pdev->dev,
++ "data buffer (overflow) error\n");
+ retval = -EPROTO;
+ break;
+ } else if (dtd_status & DTD_STS_TRE) {
+- DBG(dev, "transaction(ISO) error\n");
++ dev_dbg(&dev->pdev->dev,
++ "transaction(ISO) error\n");
+ retval = -EILSEQ;
+ break;
+ } else
+- ERROR(dev, "unknown error (0x%x)!\n",
+- dtd_status);
++ dev_err(&dev->pdev->dev,
++ "unknown error (0x%x)!\n",
++ dtd_status);
+ }
+
+ if (i != curr_req->dtd_count - 1)
+@@ -2420,7 +2532,7 @@
+
+ curr_req->req.actual = actual;
+
+- VDBG(dev, "<--- %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+ return 0;
+ }
+
+@@ -2430,7 +2542,7 @@
+ struct langwell_ep *ep0, struct langwell_request *req)
+ {
+ u32 new_addr;
+- VDBG(dev, "---> %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+ if (dev->usb_state == USB_STATE_ADDRESS) {
+ /* set the new address */
+@@ -2438,7 +2550,7 @@
+ writel(new_addr << USBADR_SHIFT, &dev->op_regs->deviceaddr);
+
+ new_addr = USBADR(readl(&dev->op_regs->deviceaddr));
+- VDBG(dev, "new_addr = %d\n", new_addr);
++ dev_vdbg(&dev->pdev->dev, "new_addr = %d\n", new_addr);
+ }
+
+ done(ep0, req, 0);
+@@ -2458,14 +2570,14 @@
+ dev->ep0_state = WAIT_FOR_SETUP;
+ break;
+ case WAIT_FOR_SETUP:
+- ERROR(dev, "unexpect ep0 packets\n");
++ dev_err(&dev->pdev->dev, "unexpect ep0 packets\n");
+ break;
+ default:
+ ep0_stall(dev);
+ break;
+ }
+
+- VDBG(dev, "<--- %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+ }
+
+
+@@ -2477,16 +2589,17 @@
+ struct langwell_ep *epn;
+ struct langwell_request *curr_req, *temp_req;
+
+- VDBG(dev, "---> %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+ complete_bits = readl(&dev->op_regs->endptcomplete);
+- VDBG(dev, "endptcomplete register: 0x%08x\n", complete_bits);
++ dev_vdbg(&dev->pdev->dev, "endptcomplete register: 0x%08x\n",
++ complete_bits);
+
+ /* Write-Clear the bits in endptcomplete register */
+ writel(complete_bits, &dev->op_regs->endptcomplete);
+
+ if (!complete_bits) {
+- DBG(dev, "complete_bits = 0\n");
++ dev_dbg(&dev->pdev->dev, "complete_bits = 0\n");
+ goto done;
+ }
+
+@@ -2506,23 +2619,25 @@
+ epn = &dev->ep[i];
+
+ if (epn->name == NULL) {
+- WARNING(dev, "invalid endpoint\n");
++ dev_warn(&dev->pdev->dev, "invalid endpoint\n");
+ continue;
+ }
+
+ if (i < 2)
+ /* ep0 in and out */
+- DBG(dev, "%s-%s transfer completed\n",
++ dev_dbg(&dev->pdev->dev, "%s-%s transfer completed\n",
+ epn->name,
+ is_in(epn) ? "in" : "out");
+ else
+- DBG(dev, "%s transfer completed\n", epn->name);
++ dev_dbg(&dev->pdev->dev, "%s transfer completed\n",
++ epn->name);
+
+ /* process the req queue until an uncomplete request */
+ list_for_each_entry_safe(curr_req, temp_req,
+ &epn->queue, queue) {
+ status = process_ep_req(dev, i, curr_req);
+- VDBG(dev, "%s req status: %d\n", epn->name, status);
++ dev_vdbg(&dev->pdev->dev, "%s req status: %d\n",
++ epn->name, status);
+
+ if (status)
+ break;
+@@ -2540,7 +2655,7 @@
+ }
+ }
+ done:
+- VDBG(dev, "<--- %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+ return;
+ }
+
+@@ -2551,14 +2666,14 @@
+ u32 portsc1, devlc;
+ u32 speed;
+
+- VDBG(dev, "---> %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+ if (dev->bus_reset)
+ dev->bus_reset = 0;
+
+ portsc1 = readl(&dev->op_regs->portsc1);
+ devlc = readl(&dev->op_regs->devlc);
+- VDBG(dev, "portsc1 = 0x%08x, devlc = 0x%08x\n",
++ dev_vdbg(&dev->pdev->dev, "portsc1 = 0x%08x, devlc = 0x%08x\n",
+ portsc1, devlc);
+
+ /* bus reset is finished */
+@@ -2579,25 +2694,22 @@
+ dev->gadget.speed = USB_SPEED_UNKNOWN;
+ break;
+ }
+- VDBG(dev, "speed = %d, dev->gadget.speed = %d\n",
++ dev_vdbg(&dev->pdev->dev,
++ "speed = %d, dev->gadget.speed = %d\n",
+ speed, dev->gadget.speed);
+ }
+
+ /* LPM L0 to L1 */
+ if (dev->lpm && dev->lpm_state == LPM_L0)
+ if (portsc1 & PORTS_SUSP && portsc1 & PORTS_SLP) {
+- INFO(dev, "LPM L0 to L1\n");
+- dev->lpm_state = LPM_L1;
++ dev_info(&dev->pdev->dev, "LPM L0 to L1\n");
++ dev->lpm_state = LPM_L1;
+ }
+
+ /* LPM L1 to L0, force resume or remote wakeup finished */
+ if (dev->lpm && dev->lpm_state == LPM_L1)
+ if (!(portsc1 & PORTS_SUSP)) {
+- if (portsc1 & PORTS_SLP)
+- INFO(dev, "LPM L1 to L0, force resume\n");
+- else
+- INFO(dev, "LPM L1 to L0, remote wakeup\n");
+-
++ dev_info(&dev->pdev->dev, "LPM L1 to L0\n");
+ dev->lpm_state = LPM_L0;
+ }
+
+@@ -2605,7 +2717,7 @@
+ if (!dev->resume_state)
+ dev->usb_state = USB_STATE_DEFAULT;
+
+- VDBG(dev, "<--- %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+ }
+
+
+@@ -2617,7 +2729,7 @@
+ endptcomplete;
+ unsigned long timeout;
+
+- VDBG(dev, "---> %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+ /* Write-Clear the device address */
+ deviceaddr = readl(&dev->op_regs->deviceaddr);
+@@ -2634,7 +2746,10 @@
+
+ dev->ep0_dir = USB_DIR_OUT;
+ dev->ep0_state = WAIT_FOR_SETUP;
+- dev->remote_wakeup = 0; /* default to 0 on reset */
++
++ /* remote wakeup reset to 0 when the device is reset */
++ dev->remote_wakeup = 0;
++ dev->dev_status = 1 << USB_DEVICE_SELF_POWERED;
+ dev->gadget.b_hnp_enable = 0;
+ dev->gadget.a_hnp_support = 0;
+ dev->gadget.a_alt_hnp_support = 0;
+@@ -2651,7 +2766,7 @@
+ timeout = jiffies + PRIME_TIMEOUT;
+ while (readl(&dev->op_regs->endptprime)) {
+ if (time_after(jiffies, timeout)) {
+- ERROR(dev, "USB reset timeout\n");
++ dev_err(&dev->pdev->dev, "USB reset timeout\n");
+ break;
+ }
+ cpu_relax();
+@@ -2661,7 +2776,7 @@
+ writel((u32) ~0, &dev->op_regs->endptflush);
+
+ if (readl(&dev->op_regs->portsc1) & PORTS_PR) {
+- VDBG(dev, "USB bus reset\n");
++ dev_vdbg(&dev->pdev->dev, "USB bus reset\n");
+ /* bus is reseting */
+ dev->bus_reset = 1;
+
+@@ -2669,7 +2784,7 @@
+ stop_activity(dev, dev->driver);
+ dev->usb_state = USB_STATE_DEFAULT;
+ } else {
+- VDBG(dev, "device controller reset\n");
++ dev_vdbg(&dev->pdev->dev, "device controller reset\n");
+ /* controller reset */
+ langwell_udc_reset(dev);
+
+@@ -2685,46 +2800,21 @@
+ dev->usb_state = USB_STATE_ATTACHED;
+ }
+
+-#ifdef OTG_TRANSCEIVER
+- /* refer to USB OTG 6.6.2.3 b_hnp_en is cleared */
+- if (!dev->lotg->otg.default_a)
+- dev->lotg->hsm.b_hnp_enable = 0;
+-#endif
+-
+- VDBG(dev, "<--- %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+ }
+
+
+ /* USB bus suspend/resume interrupt */
+ static void handle_bus_suspend(struct langwell_udc *dev)
+ {
+- u32 devlc;
+- DBG(dev, "---> %s()\n", __func__);
++ dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+ dev->resume_state = dev->usb_state;
+ dev->usb_state = USB_STATE_SUSPENDED;
+
+ #ifdef OTG_TRANSCEIVER
+- if (dev->lotg->otg.default_a) {
+- if (dev->lotg->hsm.b_bus_suspend_vld == 1) {
+- dev->lotg->hsm.b_bus_suspend = 1;
+- /* notify transceiver the state changes */
+- if (spin_trylock(&dev->lotg->wq_lock)) {
+- langwell_update_transceiver();
+- spin_unlock(&dev->lotg->wq_lock);
+- }
+- }
+- dev->lotg->hsm.b_bus_suspend_vld++;
+- } else {
+- if (!dev->lotg->hsm.a_bus_suspend) {
+- dev->lotg->hsm.a_bus_suspend = 1;
+- /* notify transceiver the state changes */
+- if (spin_trylock(&dev->lotg->wq_lock)) {
+- langwell_update_transceiver();
+- spin_unlock(&dev->lotg->wq_lock);
+- }
+- }
+- }
++ atomic_notifier_call_chain(&dev->iotg->iotg_notifier,
++ MID_OTG_NOTIFY_CSUSPEND, dev->iotg);
+ #endif
+
+ /* report suspend to the driver */
+@@ -2733,37 +2823,33 @@
+ spin_unlock(&dev->lock);
+ dev->driver->suspend(&dev->gadget);
+ spin_lock(&dev->lock);
+- DBG(dev, "suspend %s\n", dev->driver->driver.name);
++ dev_dbg(&dev->pdev->dev, "suspend %s\n",
++ dev->driver->driver.name);
+ }
+ }
+
+ /* enter PHY low power suspend */
+- devlc = readl(&dev->op_regs->devlc);
+- VDBG(dev, "devlc = 0x%08x\n", devlc);
+- devlc |= LPM_PHCD;
+- writel(devlc, &dev->op_regs->devlc);
++ if (dev->pdev->device != 0x0829)
++ langwell_phy_low_power(dev, 1);
+
+- DBG(dev, "<--- %s()\n", __func__);
++ dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+ }
+
+
+ static void handle_bus_resume(struct langwell_udc *dev)
+ {
+- u32 devlc;
+- DBG(dev, "---> %s()\n", __func__);
++ dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+ dev->usb_state = dev->resume_state;
+ dev->resume_state = 0;
+
+ /* exit PHY low power suspend */
+- devlc = readl(&dev->op_regs->devlc);
+- VDBG(dev, "devlc = 0x%08x\n", devlc);
+- devlc &= ~LPM_PHCD;
+- writel(devlc, &dev->op_regs->devlc);
++ if (dev->pdev->device != 0x0829)
++ langwell_phy_low_power(dev, 0);
+
+ #ifdef OTG_TRANSCEIVER
+- if (dev->lotg->otg.default_a == 0)
+- dev->lotg->hsm.a_bus_suspend = 0;
++ atomic_notifier_call_chain(&dev->iotg->iotg_notifier,
++ MID_OTG_NOTIFY_CRESUME, dev->iotg);
+ #endif
+
+ /* report resume to the driver */
+@@ -2772,11 +2858,12 @@
+ spin_unlock(&dev->lock);
+ dev->driver->resume(&dev->gadget);
+ spin_lock(&dev->lock);
+- DBG(dev, "resume %s\n", dev->driver->driver.name);
++ dev_dbg(&dev->pdev->dev, "resume %s\n",
++ dev->driver->driver.name);
+ }
+ }
+
+- DBG(dev, "<--- %s()\n", __func__);
++ dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+ }
+
+
+@@ -2789,11 +2876,11 @@
+ irq_sts,
+ portsc1;
+
+- VDBG(dev, "---> %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+ if (dev->stopped) {
+- VDBG(dev, "handle IRQ_NONE\n");
+- VDBG(dev, "<--- %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "handle IRQ_NONE\n");
++ dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+ return IRQ_NONE;
+ }
+
+@@ -2806,12 +2893,13 @@
+ usbintr = readl(&dev->op_regs->usbintr);
+
+ irq_sts = usbsts & usbintr;
+- VDBG(dev, "usbsts = 0x%08x, usbintr = 0x%08x, irq_sts = 0x%08x\n",
++ dev_vdbg(&dev->pdev->dev,
++ "usbsts = 0x%08x, usbintr = 0x%08x, irq_sts = 0x%08x\n",
+ usbsts, usbintr, irq_sts);
+
+ if (!irq_sts) {
+- VDBG(dev, "handle IRQ_NONE\n");
+- VDBG(dev, "<--- %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "handle IRQ_NONE\n");
++ dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+ spin_unlock(&dev->lock);
+ return IRQ_NONE;
+ }
+@@ -2827,12 +2915,13 @@
+
+ /* USB interrupt */
+ if (irq_sts & STS_UI) {
+- VDBG(dev, "USB interrupt\n");
++ dev_vdbg(&dev->pdev->dev, "USB interrupt\n");
+
+ /* setup packet received from ep0 */
+ if (readl(&dev->op_regs->endptsetupstat)
+ & EP0SETUPSTAT_MASK) {
+- VDBG(dev, "USB SETUP packet received interrupt\n");
++ dev_vdbg(&dev->pdev->dev,
++ "USB SETUP packet received interrupt\n");
+ /* setup tripwire semaphone */
+ setup_tripwire(dev);
+ handle_setup_packet(dev, &dev->local_setup_buff);
+@@ -2840,7 +2929,8 @@
+
+ /* USB transfer completion */
+ if (readl(&dev->op_regs->endptcomplete)) {
+- VDBG(dev, "USB transfer completion interrupt\n");
++ dev_vdbg(&dev->pdev->dev,
++ "USB transfer completion interrupt\n");
+ handle_trans_complete(dev);
+ }
+ }
+@@ -2848,36 +2938,36 @@
+ /* SOF received interrupt (for ISO transfer) */
+ if (irq_sts & STS_SRI) {
+ /* FIXME */
+- /* VDBG(dev, "SOF received interrupt\n"); */
++ /* dev_vdbg(&dev->pdev->dev, "SOF received interrupt\n"); */
+ }
+
+ /* port change detect interrupt */
+ if (irq_sts & STS_PCI) {
+- VDBG(dev, "port change detect interrupt\n");
++ dev_vdbg(&dev->pdev->dev, "port change detect interrupt\n");
+ handle_port_change(dev);
+ }
+
+ /* suspend interrrupt */
+ if (irq_sts & STS_SLI) {
+- VDBG(dev, "suspend interrupt\n");
++ dev_vdbg(&dev->pdev->dev, "suspend interrupt\n");
+ handle_bus_suspend(dev);
+ }
+
+ /* USB reset interrupt */
+ if (irq_sts & STS_URI) {
+- VDBG(dev, "USB reset interrupt\n");
++ dev_vdbg(&dev->pdev->dev, "USB reset interrupt\n");
+ handle_usb_reset(dev);
+ }
+
+ /* USB error or system error interrupt */
+ if (irq_sts & (STS_UEI | STS_SEI)) {
+ /* FIXME */
+- WARNING(dev, "error IRQ, irq_sts: %x\n", irq_sts);
++ dev_warn(&dev->pdev->dev, "error IRQ, irq_sts: %x\n", irq_sts);
+ }
+
+ spin_unlock(&dev->lock);
+
+- VDBG(dev, "<--- %s()\n", __func__);
++ dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+ return IRQ_HANDLED;
+ }
+
+@@ -2889,15 +2979,59 @@
+ {
+ struct langwell_udc *dev = the_controller;
+
+- DBG(dev, "---> %s()\n", __func__);
++ dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+ complete(dev->done);
+
+- DBG(dev, "<--- %s()\n", __func__);
++ dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+ kfree(dev);
+ }
+
+
++/* enable SRAM caching if SRAM detected */
++static void sram_init(struct langwell_udc *dev)
++{
++ struct pci_dev *pdev = dev->pdev;
++
++ dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
++
++ dev->sram_addr = pci_resource_start(pdev, 1);
++ dev->sram_size = pci_resource_len(pdev, 1);
++ dev_info(&dev->pdev->dev, "Found private SRAM at %x size:%x\n",
++ dev->sram_addr, dev->sram_size);
++ dev->got_sram = 1;
++
++ if (pci_request_region(pdev, 1, kobject_name(&pdev->dev.kobj))) {
++ dev_warn(&dev->pdev->dev, "SRAM request failed\n");
++ dev->got_sram = 0;
++ } else if (!dma_declare_coherent_memory(&pdev->dev, dev->sram_addr,
++ dev->sram_addr, dev->sram_size, DMA_MEMORY_MAP)) {
++ dev_warn(&dev->pdev->dev, "SRAM DMA declare failed\n");
++ pci_release_region(pdev, 1);
++ dev->got_sram = 0;
++ }
++
++ dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
++}
++
++
++/* release SRAM caching */
++static void sram_deinit(struct langwell_udc *dev)
++{
++ struct pci_dev *pdev = dev->pdev;
++
++ dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
++
++ dma_release_declared_memory(&pdev->dev);
++ pci_release_region(pdev, 1);
++
++ dev->got_sram = 0;
++
++ dev_info(&dev->pdev->dev, "release SRAM caching\n");
++ dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
++}
++
++
+ /* tear down the binding between this driver and the pci device */
+ static void langwell_udc_remove(struct pci_dev *pdev)
+ {
+@@ -2906,23 +3040,29 @@
+ DECLARE_COMPLETION(done);
+
+ BUG_ON(dev->driver);
+- DBG(dev, "---> %s()\n", __func__);
++ dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+ dev->done = &done;
+
+- /* free memory allocated in probe */
++#ifndef OTG_TRANSCEIVER
++ /* free dTD dma_pool and dQH */
+ if (dev->dtd_pool)
+ dma_pool_destroy(dev->dtd_pool);
+
++ if (dev->ep_dqh)
++ dma_free_coherent(&pdev->dev, dev->ep_dqh_size,
++ dev->ep_dqh, dev->ep_dqh_dma);
++
++ /* release SRAM caching */
++ if (dev->has_sram && dev->got_sram)
++ sram_deinit(dev);
++#endif
++
+ if (dev->status_req) {
+ kfree(dev->status_req->req.buf);
+ kfree(dev->status_req);
+ }
+
+- if (dev->ep_dqh)
+- dma_free_coherent(&pdev->dev, dev->ep_dqh_size,
+- dev->ep_dqh, dev->ep_dqh_dma);
+-
+ kfree(dev->ep);
+
+ /* diable IRQ handler */
+@@ -2943,17 +3083,18 @@
+ if (dev->transceiver) {
+ otg_put_transceiver(dev->transceiver);
+ dev->transceiver = NULL;
+- dev->lotg = NULL;
++ dev->iotg = NULL;
+ }
+ #endif
+
+ dev->cap_regs = NULL;
+
+- INFO(dev, "unbind\n");
+- DBG(dev, "<--- %s()\n", __func__);
++ dev_info(&dev->pdev->dev, "unbind\n");
++ dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+
+ device_unregister(&dev->gadget.dev);
+ device_remove_file(&pdev->dev, &dev_attr_langwell_udc);
++ device_remove_file(&pdev->dev, &dev_attr_remote_wakeup);
+
+ #ifndef OTG_TRANSCEIVER
+ pci_set_drvdata(pdev, NULL);
+@@ -2976,9 +3117,9 @@
+ struct langwell_udc *dev;
+ #ifndef OTG_TRANSCEIVER
+ unsigned long resource, len;
++ size_t size;
+ #endif
+ void __iomem *base = NULL;
+- size_t size;
+ int retval;
+
+ if (the_controller) {
+@@ -2997,7 +3138,7 @@
+ spin_lock_init(&dev->lock);
+
+ dev->pdev = pdev;
+- DBG(dev, "---> %s()\n", __func__);
++ dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+ #ifdef OTG_TRANSCEIVER
+ /* PCI device is already enabled by otg_transceiver driver */
+@@ -3006,8 +3147,10 @@
+ /* mem region and register base */
+ dev->region = 1;
+ dev->transceiver = otg_get_transceiver();
+- dev->lotg = otg_to_langwell(dev->transceiver);
+- base = dev->lotg->regs;
++
++ dev->iotg = otg_to_mid_xceiv(dev->transceiver);
++
++ base = dev->iotg->base;
+ #else
+ pci_set_drvdata(pdev, dev);
+
+@@ -3022,7 +3165,7 @@
+ resource = pci_resource_start(pdev, 0);
+ len = pci_resource_len(pdev, 0);
+ if (!request_mem_region(resource, len, driver_name)) {
+- ERROR(dev, "controller already in use\n");
++ dev_err(&dev->pdev->dev, "controller already in use\n");
+ retval = -EBUSY;
+ goto error;
+ }
+@@ -3031,33 +3174,43 @@
+ base = ioremap_nocache(resource, len);
+ #endif
+ if (base == NULL) {
+- ERROR(dev, "can't map memory\n");
++ dev_err(&dev->pdev->dev, "can't map memory\n");
+ retval = -EFAULT;
+ goto error;
+ }
+
+ dev->cap_regs = (struct langwell_cap_regs __iomem *) base;
+- VDBG(dev, "dev->cap_regs: %p\n", dev->cap_regs);
++ dev_vdbg(&dev->pdev->dev, "dev->cap_regs: %p\n", dev->cap_regs);
+ dev->op_regs = (struct langwell_op_regs __iomem *)
+ (base + OP_REG_OFFSET);
+- VDBG(dev, "dev->op_regs: %p\n", dev->op_regs);
++ dev_vdbg(&dev->pdev->dev, "dev->op_regs: %p\n", dev->op_regs);
+
+ /* irq setup after old hardware is cleaned up */
+ if (!pdev->irq) {
+- ERROR(dev, "No IRQ. Check PCI setup!\n");
++ dev_err(&dev->pdev->dev, "No IRQ. Check PCI setup!\n");
+ retval = -ENODEV;
+ goto error;
+ }
+
++ dev->has_sram = 1;
++ dev->got_sram = 0;
++ dev_vdbg(&dev->pdev->dev, "dev->has_sram: %d\n", dev->has_sram);
++
+ #ifndef OTG_TRANSCEIVER
+- INFO(dev, "irq %d, io mem: 0x%08lx, len: 0x%08lx, pci mem 0x%p\n",
++ /* enable SRAM caching if detected */
++ if (dev->has_sram && !dev->got_sram)
++ sram_init(dev);
++
++ dev_info(&dev->pdev->dev,
++ "irq %d, io mem: 0x%08lx, len: 0x%08lx, pci mem 0x%p\n",
+ pdev->irq, resource, len, base);
+ /* enables bus-mastering for device dev */
+ pci_set_master(pdev);
+
+ if (request_irq(pdev->irq, langwell_irq, IRQF_SHARED,
+ driver_name, dev) != 0) {
+- ERROR(dev, "request interrupt %d failed\n", pdev->irq);
++ dev_err(&dev->pdev->dev,
++ "request interrupt %d failed\n", pdev->irq);
+ retval = -EBUSY;
+ goto error;
+ }
+@@ -3071,32 +3224,35 @@
+ dev->lpm = (readl(&dev->cap_regs->hccparams) & HCC_LEN) ? 1 : 0;
+ dev->dciversion = readw(&dev->cap_regs->dciversion);
+ dev->devcap = (readl(&dev->cap_regs->dccparams) & DEVCAP) ? 1 : 0;
+- VDBG(dev, "dev->lpm: %d\n", dev->lpm);
+- VDBG(dev, "dev->dciversion: 0x%04x\n", dev->dciversion);
+- VDBG(dev, "dccparams: 0x%08x\n", readl(&dev->cap_regs->dccparams));
+- VDBG(dev, "dev->devcap: %d\n", dev->devcap);
++ dev_vdbg(&dev->pdev->dev, "dev->lpm: %d\n", dev->lpm);
++ dev_vdbg(&dev->pdev->dev, "dev->dciversion: 0x%04x\n",
++ dev->dciversion);
++ dev_vdbg(&dev->pdev->dev, "dccparams: 0x%08x\n",
++ readl(&dev->cap_regs->dccparams));
++ dev_vdbg(&dev->pdev->dev, "dev->devcap: %d\n", dev->devcap);
+ if (!dev->devcap) {
+- ERROR(dev, "can't support device mode\n");
++ dev_err(&dev->pdev->dev, "can't support device mode\n");
+ retval = -ENODEV;
+ goto error;
+ }
+
+ /* a pair of endpoints (out/in) for each address */
+ dev->ep_max = DEN(readl(&dev->cap_regs->dccparams)) * 2;
+- VDBG(dev, "dev->ep_max: %d\n", dev->ep_max);
++ dev_vdbg(&dev->pdev->dev, "dev->ep_max: %d\n", dev->ep_max);
+
+ /* allocate endpoints memory */
+ dev->ep = kzalloc(sizeof(struct langwell_ep) * dev->ep_max,
+ GFP_KERNEL);
+ if (!dev->ep) {
+- ERROR(dev, "allocate endpoints memory failed\n");
++ dev_err(&dev->pdev->dev, "allocate endpoints memory failed\n");
+ retval = -ENOMEM;
+ goto error;
+ }
+
++#ifndef OTG_TRANSCEIVER
+ /* allocate device dQH memory */
+ size = dev->ep_max * sizeof(struct langwell_dqh);
+- VDBG(dev, "orig size = %d\n", size);
++ dev_vdbg(&dev->pdev->dev, "orig size = %d\n", size);
+ if (size < DQH_ALIGNMENT)
+ size = DQH_ALIGNMENT;
+ else if ((size % DQH_ALIGNMENT) != 0) {
+@@ -3106,17 +3262,19 @@
+ dev->ep_dqh = dma_alloc_coherent(&pdev->dev, size,
+ &dev->ep_dqh_dma, GFP_KERNEL);
+ if (!dev->ep_dqh) {
+- ERROR(dev, "allocate dQH memory failed\n");
++ dev_err(&dev->pdev->dev, "allocate dQH memory failed\n");
+ retval = -ENOMEM;
+ goto error;
+ }
+ dev->ep_dqh_size = size;
+- VDBG(dev, "ep_dqh_size = %d\n", dev->ep_dqh_size);
++ dev_vdbg(&dev->pdev->dev, "ep_dqh_size = %d\n", dev->ep_dqh_size);
++#endif
+
+ /* initialize ep0 status request structure */
+ dev->status_req = kzalloc(sizeof(struct langwell_request), GFP_KERNEL);
+ if (!dev->status_req) {
+- ERROR(dev, "allocate status_req memory failed\n");
++ dev_err(&dev->pdev->dev,
++ "allocate status_req memory failed\n");
+ retval = -ENOMEM;
+ goto error;
+ }
+@@ -3129,7 +3287,10 @@
+ dev->resume_state = USB_STATE_NOTATTACHED;
+ dev->usb_state = USB_STATE_POWERED;
+ dev->ep0_dir = USB_DIR_OUT;
+- dev->remote_wakeup = 0; /* default to 0 on reset */
++
++ /* remote wakeup reset to 0 when the device is reset */
++ dev->remote_wakeup = 0;
++ dev->dev_status = 1 << USB_DEVICE_SELF_POWERED;
+
+ #ifndef OTG_TRANSCEIVER
+ /* reset device controller */
+@@ -3159,7 +3320,6 @@
+ #ifndef OTG_TRANSCEIVER
+ /* reset ep0 dQH and endptctrl */
+ ep0_reset(dev);
+-#endif
+
+ /* create dTD dma_pool resource */
+ dev->dtd_pool = dma_pool_create("langwell_dtd",
+@@ -3172,20 +3332,23 @@
+ retval = -ENOMEM;
+ goto error;
+ }
++#endif
+
+ /* done */
+- INFO(dev, "%s\n", driver_desc);
+- INFO(dev, "irq %d, pci mem %p\n", pdev->irq, base);
+- INFO(dev, "Driver version: " DRIVER_VERSION "\n");
+- INFO(dev, "Support (max) %d endpoints\n", dev->ep_max);
+- INFO(dev, "Device interface version: 0x%04x\n", dev->dciversion);
+- INFO(dev, "Controller mode: %s\n", dev->devcap ? "Device" : "Host");
+- INFO(dev, "Support USB LPM: %s\n", dev->lpm ? "Yes" : "No");
++ dev_info(&dev->pdev->dev, "%s\n", driver_desc);
++ dev_info(&dev->pdev->dev, "irq %d, pci mem %p\n", pdev->irq, base);
++ dev_info(&dev->pdev->dev, "Driver version: " DRIVER_VERSION "\n");
++ dev_info(&dev->pdev->dev, "Support (max) %d endpoints\n", dev->ep_max);
++ dev_info(&dev->pdev->dev, "Device interface version: 0x%04x\n",
++ dev->dciversion);
++ dev_info(&dev->pdev->dev, "Controller mode: %s\n",
++ dev->devcap ? "Device" : "Host");
++ dev_info(&dev->pdev->dev, "Support USB LPM: %s\n",
++ dev->lpm ? "Yes" : "No");
+
+- VDBG(dev, "After langwell_udc_probe(), print all registers:\n");
+-#ifdef VERBOSE
++ dev_vdbg(&dev->pdev->dev,
++ "After langwell_udc_probe(), print all registers:\n");
+ print_all_registers(dev);
+-#endif
+
+ the_controller = dev;
+
+@@ -3197,12 +3360,18 @@
+ if (retval)
+ goto error;
+
+- VDBG(dev, "<--- %s()\n", __func__);
++ retval = device_create_file(&pdev->dev, &dev_attr_remote_wakeup);
++ if (retval)
++ goto error_attr1;
++
++ dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+ return 0;
+
++error_attr1:
++ device_remove_file(&pdev->dev, &dev_attr_langwell_udc);
+ error:
+ if (dev) {
+- DBG(dev, "<--- %s()\n", __func__);
++ dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+ langwell_udc_remove(pdev);
+ }
+
+@@ -3214,9 +3383,8 @@
+ static int langwell_udc_suspend(struct pci_dev *pdev, pm_message_t state)
+ {
+ struct langwell_udc *dev = the_controller;
+- u32 devlc;
+
+- DBG(dev, "---> %s()\n", __func__);
++ dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+ /* disable interrupt and set controller to stop state */
+ langwell_udc_stop(dev);
+@@ -3226,20 +3394,29 @@
+ free_irq(pdev->irq, dev);
+ dev->got_irq = 0;
+
+-
+ /* save PCI state */
+ pci_save_state(pdev);
+
++ /* free dTD dma_pool and dQH */
++ if (dev->dtd_pool)
++ dma_pool_destroy(dev->dtd_pool);
++
++ if (dev->ep_dqh)
++ dma_free_coherent(&pdev->dev, dev->ep_dqh_size,
++ dev->ep_dqh, dev->ep_dqh_dma);
++
++ /* release SRAM caching */
++ if (dev->has_sram && dev->got_sram)
++ sram_deinit(dev);
++
+ /* set device power state */
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ /* enter PHY low power suspend */
+- devlc = readl(&dev->op_regs->devlc);
+- VDBG(dev, "devlc = 0x%08x\n", devlc);
+- devlc |= LPM_PHCD;
+- writel(devlc, &dev->op_regs->devlc);
++ if (dev->pdev->device != 0x0829)
++ langwell_phy_low_power(dev, 1);
+
+- DBG(dev, "<--- %s()\n", __func__);
++ dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+ return 0;
+ }
+
+@@ -3248,27 +3425,58 @@
+ static int langwell_udc_resume(struct pci_dev *pdev)
+ {
+ struct langwell_udc *dev = the_controller;
+- u32 devlc;
++ size_t size;
+
+- DBG(dev, "---> %s()\n", __func__);
++ dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+ /* exit PHY low power suspend */
+- devlc = readl(&dev->op_regs->devlc);
+- VDBG(dev, "devlc = 0x%08x\n", devlc);
+- devlc &= ~LPM_PHCD;
+- writel(devlc, &dev->op_regs->devlc);
++ if (dev->pdev->device != 0x0829)
++ langwell_phy_low_power(dev, 0);
+
+ /* set device D0 power state */
+ pci_set_power_state(pdev, PCI_D0);
+
++ /* enable SRAM caching if detected */
++ if (dev->has_sram && !dev->got_sram)
++ sram_init(dev);
++
++ /* allocate device dQH memory */
++ size = dev->ep_max * sizeof(struct langwell_dqh);
++ dev_vdbg(&dev->pdev->dev, "orig size = %d\n", size);
++ if (size < DQH_ALIGNMENT)
++ size = DQH_ALIGNMENT;
++ else if ((size % DQH_ALIGNMENT) != 0) {
++ size += DQH_ALIGNMENT + 1;
++ size &= ~(DQH_ALIGNMENT - 1);
++ }
++ dev->ep_dqh = dma_alloc_coherent(&pdev->dev, size,
++ &dev->ep_dqh_dma, GFP_KERNEL);
++ if (!dev->ep_dqh) {
++ dev_err(&dev->pdev->dev, "allocate dQH memory failed\n");
++ return -ENOMEM;
++ }
++ dev->ep_dqh_size = size;
++ dev_vdbg(&dev->pdev->dev, "ep_dqh_size = %d\n", dev->ep_dqh_size);
++
++ /* create dTD dma_pool resource */
++ dev->dtd_pool = dma_pool_create("langwell_dtd",
++ &dev->pdev->dev,
++ sizeof(struct langwell_dtd),
++ DTD_ALIGNMENT,
++ DMA_BOUNDARY);
++
++ if (!dev->dtd_pool)
++ return -ENOMEM;
++
+ /* restore PCI state */
+ pci_restore_state(pdev);
+
+ /* enable IRQ handler */
+- if (request_irq(pdev->irq, langwell_irq, IRQF_SHARED, driver_name, dev)
+- != 0) {
+- ERROR(dev, "request interrupt %d failed\n", pdev->irq);
+- return -1;
++ if (request_irq(pdev->irq, langwell_irq, IRQF_SHARED,
++ driver_name, dev) != 0) {
++ dev_err(&dev->pdev->dev, "request interrupt %d failed\n",
++ pdev->irq);
++ return -EBUSY;
+ }
+ dev->got_irq = 1;
+
+@@ -3290,7 +3498,7 @@
+ dev->ep0_state = WAIT_FOR_SETUP;
+ dev->ep0_dir = USB_DIR_OUT;
+
+- DBG(dev, "<--- %s()\n", __func__);
++ dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+ return 0;
+ }
+
+@@ -3301,15 +3509,15 @@
+ struct langwell_udc *dev = the_controller;
+ u32 usbmode;
+
+- DBG(dev, "---> %s()\n", __func__);
++ dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
+
+ /* reset controller mode to IDLE */
+ usbmode = readl(&dev->op_regs->usbmode);
+- DBG(dev, "usbmode = 0x%08x\n", usbmode);
++ dev_dbg(&dev->pdev->dev, "usbmode = 0x%08x\n", usbmode);
+ usbmode &= (~3 | MODE_IDLE);
+ writel(usbmode, &dev->op_regs->usbmode);
+
+- DBG(dev, "<--- %s()\n", __func__);
++ dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
+ }
+
+ /*-------------------------------------------------------------------------*/
+@@ -3321,10 +3529,16 @@
+ .device = 0x0811,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
++}, {
++ .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
++ .class_mask = ~0,
++ .vendor = 0x8086,
++ .device = 0x0829,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
+ }, { /* end: all zeroes */ }
+ };
+
+-
+ MODULE_DEVICE_TABLE(pci, pci_ids);
+
+
+@@ -3342,17 +3556,110 @@
+ .shutdown = langwell_udc_shutdown,
+ };
+
++#ifdef OTG_TRANSCEIVER
++static int intel_mid_start_peripheral(struct intel_mid_otg_xceiv *iotg)
++{
++ struct pci_dev *pdev;
++ int retval;
++
++ if (iotg == NULL)
++ return -EINVAL;
+
+-MODULE_DESCRIPTION(DRIVER_DESC);
+-MODULE_AUTHOR("Xiaochen Shen <xiaochen.shen@intel.com>");
+-MODULE_VERSION(DRIVER_VERSION);
+-MODULE_LICENSE("GPL");
++ pdev = to_pci_dev(iotg->otg.dev);
++
++ retval = langwell_udc_resume(pdev);
++ if (retval)
++ dev_dbg(&pdev->dev, "Failed to start peripheral driver\n");
++
++ return retval;
++}
++
++static int intel_mid_stop_peripheral(struct intel_mid_otg_xceiv *iotg)
++{
++ struct pci_dev *pdev;
++ int retval;
++
++ if (iotg == NULL)
++ return -EINVAL;
++
++ pdev = to_pci_dev(iotg->otg.dev);
++
++ retval = langwell_udc_suspend(pdev, PMSG_FREEZE);
++ if (retval)
++ dev_dbg(&pdev->dev, "Failed to stop peripheral driver\n");
++
++ return retval;
++}
++
++static int intel_mid_register_peripheral(struct pci_driver *peripheral_driver)
++{
++ struct otg_transceiver *otg;
++ struct intel_mid_otg_xceiv *iotg;
++ struct pci_dev *pdev;
++ int retval;
++
++ otg = otg_get_transceiver();
++ if (otg == NULL)
++ return -ENODEV;
++
++ if (peripheral_driver == NULL || peripheral_driver->probe == NULL)
++ return -EINVAL;
++
++ pdev = to_pci_dev(otg->dev);
++ retval = peripheral_driver->probe(pdev, peripheral_driver->id_table);
++ if (retval) {
++ dev_dbg(&pdev->dev, "client probe function failed\n");
++ return retval;
++ }
++
++ iotg = otg_to_mid_xceiv(otg);
++
++ iotg->start_peripheral = intel_mid_start_peripheral;
++ iotg->stop_peripheral = intel_mid_stop_peripheral;
++
++ atomic_notifier_call_chain(&iotg->iotg_notifier,
++ MID_OTG_NOTIFY_CLIENTADD, iotg);
++
++ otg_put_transceiver(otg);
++
++ return 0;
++}
++
++
++static void intel_mid_unregister_peripheral(struct pci_driver
++ *peripheral_driver)
++{
++ struct otg_transceiver *otg;
++ struct intel_mid_otg_xceiv *iotg;
++ struct pci_dev *pdev;
++
++ otg = otg_get_transceiver();
++ if (otg == NULL)
++ return ;
++
++ if (peripheral_driver == NULL || peripheral_driver->remove == NULL)
++ return ;
++
++ pdev = to_pci_dev(otg->dev);
++ peripheral_driver->remove(pdev);
++
++ iotg = otg_to_mid_xceiv(otg);
++
++ iotg->start_peripheral = NULL;
++ iotg->stop_peripheral = NULL;
++
++ atomic_notifier_call_chain(&iotg->iotg_notifier,
++ MID_OTG_NOTIFY_CLIENTREMOVE, iotg);
++
++ otg_put_transceiver(otg);
++}
++#endif
+
+
+ static int __init init(void)
+ {
+ #ifdef OTG_TRANSCEIVER
+- return langwell_register_peripheral(&langwell_pci_driver);
++ return intel_mid_register_peripheral(&langwell_pci_driver);
+ #else
+ return pci_register_driver(&langwell_pci_driver);
+ #endif
+@@ -3363,10 +3670,16 @@
+ static void __exit cleanup(void)
+ {
+ #ifdef OTG_TRANSCEIVER
+- return langwell_unregister_peripheral(&langwell_pci_driver);
++ intel_mid_unregister_peripheral(&langwell_pci_driver);
+ #else
+ pci_unregister_driver(&langwell_pci_driver);
+ #endif
+ }
+ module_exit(cleanup);
+
++
++MODULE_DESCRIPTION(DRIVER_DESC);
++MODULE_AUTHOR("Xiaochen Shen <xiaochen.shen@intel.com>");
++MODULE_VERSION(DRIVER_VERSION);
++MODULE_LICENSE("GPL");
++
+--- a/drivers/usb/gadget/langwell_udc.h
++++ b/drivers/usb/gadget/langwell_udc.h
+@@ -1,5 +1,5 @@
+ /*
+- * Intel Langwell USB Device Controller driver
++ * Intel Langwell/Penwell USB Device Controller driver
+ * Copyright (C) 2008-2009, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+@@ -18,6 +18,7 @@
+ */
+
+ #include <linux/usb/langwell_udc.h>
++#include <linux/usb/intel_mid_otg.h>
+
+ #if defined(CONFIG_USB_LANGWELL_OTG)
+ #include <linux/usb/langwell_otg.h>
+@@ -199,13 +200,15 @@
+ vbus_active:1,
+ suspended:1,
+ stopped:1,
+- lpm:1; /* LPM capability */
++ lpm:1, /* LPM capability */
++ has_sram:1, /* SRAM caching */
++ got_sram:1;
+
+ /* pci state used to access those endpoints */
+ struct pci_dev *pdev;
+
+- /* Langwell otg transceiver */
+- struct langwell_otg *lotg;
++ /* Intel mid otg transceiver */
++ struct intel_mid_otg_xceiv *iotg;
+
+ /* control registers */
+ struct langwell_cap_regs __iomem *cap_regs;
+@@ -224,5 +227,12 @@
+
+ /* make sure release() is done */
+ struct completion *done;
++
++ /* for private SRAM caching */
++ unsigned int sram_addr;
++ unsigned int sram_size;
++
++ /* device status data for get_status request */
++ u16 dev_status;
+ };
+
+--- a/drivers/usb/gadget/serial.c
++++ b/drivers/usb/gadget/serial.c
+@@ -58,6 +58,8 @@
+ #define GS_PRODUCT_ID 0xa4a6 /* Linux-USB Serial Gadget */
+ #define GS_CDC_PRODUCT_ID 0xa4a7 /* ... as CDC-ACM */
+ #define GS_CDC_OBEX_PRODUCT_ID 0xa4a9 /* ... as CDC-OBEX */
++#define GS_CDC_ASSOC_DESCR_PRODUCT_ID 0xa4ab /* ... as CDC-ACM \
++ with association descriptor */
+
+ /* string IDs are assigned dynamically */
+
+@@ -89,8 +91,8 @@
+ .bDescriptorType = USB_DT_DEVICE,
+ .bcdUSB = cpu_to_le16(0x0200),
+ /* .bDeviceClass = f(use_acm) */
+- .bDeviceSubClass = 0,
+- .bDeviceProtocol = 0,
++ /*.bDeviceSubClass = f(use_assoc_descr) ,*/
++ /*.bDeviceProtocol = f(use_assoc_descr) ,*/
+ /* .bMaxPacketSize0 = f(hardware) */
+ .idVendor = cpu_to_le16(GS_VENDOR_ID),
+ /* .idProduct = f(use_acm) */
+@@ -135,6 +137,10 @@
+ module_param(n_ports, uint, 0);
+ MODULE_PARM_DESC(n_ports, "number of ports to create, default=1");
+
++static bool use_assoc_descr = false;
++module_param(use_assoc_descr, bool, 0);
++MODULE_PARM_DESC(use_assoc_descr, "Use association descriptor with CDC ACM, default=no");
++
+ /*-------------------------------------------------------------------------*/
+
+ static int __init serial_bind_config(struct usb_configuration *c)
+@@ -171,6 +177,14 @@
+ if (status < 0)
+ return status;
+
++ if (use_assoc_descr) {
++ device_desc.bDeviceSubClass = 0x02;
++ device_desc.bDeviceProtocol = 0x01;
++ } else {
++ device_desc.bDeviceSubClass = 0;
++ device_desc.bDeviceProtocol = 0;
++ }
++
+ /* Allocate string descriptor numbers ... note that string
+ * contents can be overridden by the composite_dev glue.
+ */
+@@ -252,10 +266,17 @@
+ */
+ if (use_acm) {
+ serial_config_driver.label = "CDC ACM config";
+- serial_config_driver.bConfigurationValue = 2;
+- device_desc.bDeviceClass = USB_CLASS_COMM;
+- device_desc.idProduct =
++ if (use_assoc_descr) {
++ serial_config_driver.bConfigurationValue = 4;
++ device_desc.bDeviceClass = 0xef;
++ device_desc.idProduct =
++ cpu_to_le16(GS_CDC_ASSOC_DESCR_PRODUCT_ID);
++ } else {
++ serial_config_driver.bConfigurationValue = 2;
++ device_desc.bDeviceClass = USB_CLASS_COMM;
++ device_desc.idProduct =
+ cpu_to_le16(GS_CDC_PRODUCT_ID);
++ }
+ } else if (use_obex) {
+ serial_config_driver.label = "CDC OBEX config";
+ serial_config_driver.bConfigurationValue = 3;
+--- a/drivers/usb/host/ehci-dbg.c
++++ b/drivers/usb/host/ehci-dbg.c
+@@ -98,13 +98,18 @@
+ HCC_64BIT_ADDR(params) ? " 64 bit addr" : "");
+ } else {
+ ehci_dbg (ehci,
+- "%s hcc_params %04x thresh %d uframes %s%s%s\n",
++ "%s hcc_params %04x thresh %d uframes %s%s%s%s%s%s%s\n",
+ label,
+ params,
+ HCC_ISOC_THRES(params),
+ HCC_PGM_FRAMELISTLEN(params) ? "256/512/1024" : "1024",
+ HCC_CANPARK(params) ? " park" : "",
+- HCC_64BIT_ADDR(params) ? " 64 bit addr" : "");
++ HCC_64BIT_ADDR(params) ? " 64 bit addr" : "",
++ HCC_LPM(params) ? " LPM" : "",
++ HCC_PER_PORT_CHANGE_EVENT(params) ? " ppce" : "",
++ HCC_HW_PREFETCH(params) ? " hw prefetch" : "",
++ HCC_32FRAME_PERIODIC_LIST(params) ?
++ " 32 peridic list" : "");
+ }
+ }
+ #else
+@@ -191,8 +196,9 @@
+ dbg_status_buf (char *buf, unsigned len, const char *label, u32 status)
+ {
+ return scnprintf (buf, len,
+- "%s%sstatus %04x%s%s%s%s%s%s%s%s%s%s",
++ "%s%sstatus %04x%s%s%s%s%s%s%s%s%s%s%s",
+ label, label [0] ? " " : "", status,
++ (status & STS_PPCE_MASK) ? " PPCE" : "",
+ (status & STS_ASS) ? " Async" : "",
+ (status & STS_PSS) ? " Periodic" : "",
+ (status & STS_RECL) ? " Recl" : "",
+@@ -210,8 +216,9 @@
+ dbg_intr_buf (char *buf, unsigned len, const char *label, u32 enable)
+ {
+ return scnprintf (buf, len,
+- "%s%sintrenable %02x%s%s%s%s%s%s",
++ "%s%sintrenable %02x%s%s%s%s%s%s%s",
+ label, label [0] ? " " : "", enable,
++ (enable & STS_PPCE_MASK) ? " PPCE" : "",
+ (enable & STS_IAA) ? " IAA" : "",
+ (enable & STS_FATAL) ? " FATAL" : "",
+ (enable & STS_FLR) ? " FLR" : "",
+@@ -228,9 +235,15 @@
+ dbg_command_buf (char *buf, unsigned len, const char *label, u32 command)
+ {
+ return scnprintf (buf, len,
+- "%s%scommand %06x %s=%d ithresh=%d%s%s%s%s period=%s%s %s",
++ "%s%scommand %07x %s%s%s%s%s%s=%d ithresh=%d%s%s%s%s "
++ "period=%s%s %s",
+ label, label [0] ? " " : "", command,
+- (command & CMD_PARK) ? "park" : "(park)",
++ (command & CMD_HIRD) ? " HIRD" : "",
++ (command & CMD_PPCEE) ? " PPCEE" : "",
++ (command & CMD_FSP) ? " FSP" : "",
++ (command & CMD_ASPE) ? " ASPE" : "",
++ (command & CMD_PSPE) ? " PSPE" : "",
++ (command & CMD_PARK) ? " park" : "(park)",
+ CMD_PARK_CNT (command),
+ (command >> 16) & 0x3f,
+ (command & CMD_LRESET) ? " LReset" : "",
+@@ -257,11 +270,22 @@
+ }
+
+ return scnprintf (buf, len,
+- "%s%sport %d status %06x%s%s sig=%s%s%s%s%s%s%s%s%s%s",
++ "%s%sport:%d status %06x %d %s%s%s%s%s%s "
++ "sig=%s%s%s%s%s%s%s%s%s%s%s",
+ label, label [0] ? " " : "", port, status,
++ status>>25,/*device address */
++ (status & PORT_SSTS)>>23 == PORTSC_SUSPEND_STS_ACK ?
++ " ACK" : "",
++ (status & PORT_SSTS)>>23 == PORTSC_SUSPEND_STS_NYET ?
++ " NYET" : "",
++ (status & PORT_SSTS)>>23 == PORTSC_SUSPEND_STS_STALL ?
++ " STALL" : "",
++ (status & PORT_SSTS)>>23 == PORTSC_SUSPEND_STS_ERR ?
++ " ERR" : "",
+ (status & PORT_POWER) ? " POWER" : "",
+ (status & PORT_OWNER) ? " OWNER" : "",
+ sig,
++ (status & PORT_LPM) ? " LPM" : "",
+ (status & PORT_RESET) ? " RESET" : "",
+ (status & PORT_SUSPEND) ? " SUSPEND" : "",
+ (status & PORT_RESUME) ? " RESUME" : "",
+@@ -330,6 +354,13 @@
+ static int debug_periodic_open(struct inode *, struct file *);
+ static int debug_registers_open(struct inode *, struct file *);
+ static int debug_async_open(struct inode *, struct file *);
++static int debug_lpm_open(struct inode *, struct file *);
++static ssize_t debug_lpm_read(struct file *file, char __user *user_buf,
++ size_t count, loff_t *ppos);
++static ssize_t debug_lpm_write(struct file *file, const char __user *buffer,
++ size_t count, loff_t *ppos);
++static int debug_lpm_close(struct inode *inode, struct file *file);
++
+ static ssize_t debug_output(struct file*, char __user*, size_t, loff_t*);
+ static int debug_close(struct inode *, struct file *);
+
+@@ -351,6 +382,13 @@
+ .read = debug_output,
+ .release = debug_close,
+ };
++static const struct file_operations debug_lpm_fops = {
++ .owner = THIS_MODULE,
++ .open = debug_lpm_open,
++ .read = debug_lpm_read,
++ .write = debug_lpm_write,
++ .release = debug_lpm_close,
++};
+
+ static struct dentry *ehci_debug_root;
+
+@@ -917,6 +955,94 @@
+ return file->private_data ? 0 : -ENOMEM;
+ }
+
++static int debug_lpm_open(struct inode *inode, struct file *file)
++{
++ file->private_data = inode->i_private;
++ return 0;
++}
++
++static int debug_lpm_close(struct inode *inode, struct file *file)
++{
++ return 0;
++}
++
++static ssize_t debug_lpm_read(struct file *file, char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ /* TODO: show lpm stats */
++ return 0;
++}
++
++static ssize_t debug_lpm_write(struct file *file, const char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct usb_hcd *hcd;
++ struct ehci_hcd *ehci;
++ char buf[50];
++ size_t len;
++ u32 temp;
++ unsigned long port;
++ u32 __iomem *portsc ;
++ u32 params;
++
++ hcd = bus_to_hcd(file->private_data);
++ ehci = hcd_to_ehci(hcd);
++
++ len = min(count, sizeof(buf) - 1);
++ if (copy_from_user(buf, user_buf, len))
++ return -EFAULT;
++ buf[len] = '\0';
++ if (len > 0 && buf[len - 1] == '\n')
++ buf[len - 1] = '\0';
++
++ if (strncmp(buf, "enable", 5) == 0) {
++ if (strict_strtoul(buf + 7, 10, &port))
++ return -EINVAL;
++ params = ehci_readl(ehci, &ehci->caps->hcs_params);
++ if (port > HCS_N_PORTS(params)) {
++ ehci_dbg(ehci, "ERR: LPM on bad port %lu\n", port);
++ return -ENODEV;
++ }
++ portsc = &ehci->regs->port_status[port-1];
++ temp = ehci_readl(ehci, portsc);
++ if (!(temp & PORT_DEV_ADDR)) {
++ ehci_dbg(ehci, "LPM: no device attached\n");
++ return -ENODEV;
++ }
++ temp |= PORT_LPM;
++ ehci_writel(ehci, temp, portsc);
++ printk(KERN_INFO "force enable LPM for port %lu\n", port);
++ } else if (strncmp(buf, "hird=", 5) == 0) {
++ unsigned long hird;
++ if (strict_strtoul(buf + 5, 16, &hird))
++ return -EINVAL;
++ printk(KERN_INFO "setting hird %s %lu\n", buf + 6, hird);
++ temp = ehci_readl(ehci, &ehci->regs->command);
++ temp &= ~CMD_HIRD;
++ temp |= hird << 24;
++ ehci_writel(ehci, temp, &ehci->regs->command);
++ } else if (strncmp(buf, "disable", 7) == 0) {
++ if (strict_strtoul(buf + 8, 10, &port))
++ return -EINVAL;
++ params = ehci_readl(ehci, &ehci->caps->hcs_params);
++ if (port > HCS_N_PORTS(params)) {
++ ehci_dbg(ehci, "ERR: LPM off bad port %lu\n", port);
++ return -ENODEV;
++ }
++ portsc = &ehci->regs->port_status[port-1];
++ temp = ehci_readl(ehci, portsc);
++ if (!(temp & PORT_DEV_ADDR)) {
++ ehci_dbg(ehci, "ERR: no device attached\n");
++ return -ENODEV;
++ }
++ temp &= ~PORT_LPM;
++ ehci_writel(ehci, temp, portsc);
++ printk(KERN_INFO "disabled LPM for port %lu\n", port);
++ } else
++ return -EOPNOTSUPP;
++ return count;
++}
++
+ static inline void create_debug_files (struct ehci_hcd *ehci)
+ {
+ struct usb_bus *bus = &ehci_to_hcd(ehci)->self;
+@@ -940,6 +1066,10 @@
+ ehci->debug_registers = debugfs_create_file("registers", S_IRUGO,
+ ehci->debug_dir, bus,
+ &debug_registers_fops);
++
++ ehci->debug_registers = debugfs_create_file("lpm", S_IRUGO|S_IWUGO,
++ ehci->debug_dir, bus,
++ &debug_lpm_fops);
+ if (!ehci->debug_registers)
+ goto registers_error;
+ return;
+--- a/drivers/usb/host/ehci-hcd.c
++++ b/drivers/usb/host/ehci-hcd.c
+@@ -100,6 +100,11 @@
+ module_param (ignore_oc, bool, S_IRUGO);
+ MODULE_PARM_DESC (ignore_oc, "ignore bogus hardware overcurrent indications");
+
++/* for link power management(LPM) feature */
++static unsigned int hird;
++module_param(hird, int, S_IRUGO);
++MODULE_PARM_DESC(hird, "host initiated resume duration, +1 for each 75us\n");
++
+ #define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT)
+
+ /*-------------------------------------------------------------------------*/
+@@ -304,6 +309,7 @@
+ static void ehci_work(struct ehci_hcd *ehci);
+
+ #include "ehci-hub.c"
++#include "ehci-lpm.c"
+ #include "ehci-mem.c"
+ #include "ehci-q.c"
+ #include "ehci-sched.c"
+@@ -500,7 +506,8 @@
+ ehci_work (ehci);
+ spin_unlock_irq (&ehci->lock);
+ ehci_mem_cleanup (ehci);
+-
++ if (hcd->has_sram)
++ sram_deinit(hcd);
+ #ifdef EHCI_STATS
+ ehci_dbg (ehci, "irq normal %ld err %ld reclaim %ld (lost %ld)\n",
+ ehci->stats.normal, ehci->stats.error, ehci->stats.reclaim,
+@@ -577,6 +584,11 @@
+ if (log2_irq_thresh < 0 || log2_irq_thresh > 6)
+ log2_irq_thresh = 0;
+ temp = 1 << (16 + log2_irq_thresh);
++ if (HCC_PER_PORT_CHANGE_EVENT(hcc_params)) {
++ ehci->has_ppcd = 1;
++ ehci_dbg(ehci, "enable per-port change event\n");
++ temp |= CMD_PPCEE;
++ }
+ if (HCC_CANPARK(hcc_params)) {
+ /* HW default park == 3, on hardware that supports it (like
+ * NVidia and ALI silicon), maximizes throughput on the async
+@@ -603,6 +615,17 @@
+ default: BUG();
+ }
+ }
++ if (HCC_LPM(hcc_params)) {
++ /* support link power management EHCI 1.1 addendum */
++ ehci_dbg(ehci, "support lpm\n");
++ ehci->has_lpm = 1;
++ if (hird > 0xf) {
++ ehci_dbg(ehci, "hird %d invalid, use default 0",
++ hird);
++ hird = 0;
++ }
++ temp |= hird << 24;
++ }
+ ehci->command = temp;
+
+ /* Accept arbitrarily long scatter-gather lists */
+@@ -764,6 +787,7 @@
+ /* remote wakeup [4.3.1] */
+ if (status & STS_PCD) {
+ unsigned i = HCS_N_PORTS (ehci->hcs_params);
++ u32 ppcd = 0;
+
+ /* kick root hub later */
+ pcd_status = status;
+@@ -772,9 +796,18 @@
+ if (!(cmd & CMD_RUN))
+ usb_hcd_resume_root_hub(hcd);
+
++ /* get per-port change detect bits */
++ if (ehci->has_ppcd)
++ ppcd = status >> 16;
++
+ while (i--) {
+- int pstatus = ehci_readl(ehci,
+- &ehci->regs->port_status [i]);
++ int pstatus;
++
++ /* leverage per-port change bits feature */
++ if (ehci->has_ppcd && !(ppcd & (1 << i)))
++ continue;
++ pstatus = ehci_readl(ehci,
++ &ehci->regs->port_status[i]);
+
+ if (pstatus & PORT_OWNER)
+ continue;
+@@ -1101,6 +1134,10 @@
+ #ifdef CONFIG_PCI
+ #include "ehci-pci.c"
+ #define PCI_DRIVER ehci_pci_driver
++#if defined(CONFIG_USB_LANGWELL_OTG) || defined(CONFIG_USB_PENWELL_OTG)
++#include "ehci-langwell-pci.c"
++#define INTEL_MID_OTG_HOST_DRIVER ehci_otg_driver
++#endif
+ #endif
+
+ #ifdef CONFIG_USB_EHCI_FSL
+@@ -1220,12 +1257,23 @@
+ if (retval < 0)
+ goto clean4;
+ #endif
++
++#ifdef INTEL_MID_OTG_HOST_DRIVER
++ retval = intel_mid_ehci_driver_register(&INTEL_MID_OTG_HOST_DRIVER);
++ if (retval < 0)
++ goto clean4;
++#endif
++
+ return retval;
+
+ #ifdef XILINX_OF_PLATFORM_DRIVER
+ /* of_unregister_platform_driver(&XILINX_OF_PLATFORM_DRIVER); */
+ clean4:
+ #endif
++#ifdef INTEL_MID_OTG_HOST_DRIVER
++clean4:
++ intel_mid_ehci_driver_unregister(&INTEL_MID_OTG_HOST_DRIVER);
++#endif
+ #ifdef OF_PLATFORM_DRIVER
+ of_unregister_platform_driver(&OF_PLATFORM_DRIVER);
+ clean3:
+@@ -1269,6 +1317,9 @@
+ #ifdef PS3_SYSTEM_BUS_DRIVER
+ ps3_ehci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
+ #endif
++#ifdef INTEL_MID_OTG_HOST_DRIVER
++ intel_mid_ehci_driver_unregister(&INTEL_MID_OTG_HOST_DRIVER);
++#endif
+ #ifdef DEBUG
+ debugfs_remove(ehci_debug_root);
+ #endif
+--- a/drivers/usb/host/ehci-hub.c
++++ b/drivers/usb/host/ehci-hub.c
+@@ -175,6 +175,8 @@
+ int port;
+ int mask;
+ int changed;
++ u32 __iomem *hostpc_reg = NULL;
++ int rc = 0;
+
+ ehci_dbg(ehci, "suspend root hub\n");
+
+@@ -294,13 +296,18 @@
+ ehci_readl(ehci, &ehci->regs->intr_enable);
+
+ ehci->next_statechange = jiffies + msecs_to_jiffies(10);
++
++#ifdef CONFIG_USB_OTG
++ if (ehci->has_otg && ehci->otg_suspend)
++ rc = ehci->otg_suspend(hcd);
++#endif
+ spin_unlock_irq (&ehci->lock);
+
+ /* ehci_work() may have re-enabled the watchdog timer, which we do not
+ * want, and so we must delete any pending watchdog timer events.
+ */
+ del_timer_sync(&ehci->watchdog);
+- return 0;
++ return rc;
+ }
+
+
+@@ -312,6 +319,7 @@
+ u32 power_okay;
+ int i;
+ u8 resume_needed = 0;
++ int rc = 0;
+
+ if (time_before (jiffies, ehci->next_statechange))
+ msleep(5);
+@@ -425,9 +433,13 @@
+ /* Now we can safely re-enable irqs */
+ ehci_writel(ehci, INTR_MASK, &ehci->regs->intr_enable);
+
++#ifdef CONFIG_USB_OTG
++ if (ehci->has_otg && ehci->otg_resume)
++ rc = ehci->otg_resume(hcd);
++#endif
+ spin_unlock_irq (&ehci->lock);
+ ehci_handover_companion_ports(ehci);
+- return 0;
++ return rc;
+ }
+
+ #else
+@@ -603,6 +615,7 @@
+ u32 mask;
+ int ports, i, retval = 1;
+ unsigned long flags;
++ u32 ppcd = 0;
+
+ /* if !USB_SUSPEND, root hub timers won't get shut down ... */
+ if (!HC_IS_RUNNING(hcd->state))
+@@ -632,7 +645,15 @@
+
+ /* port N changes (bit N)? */
+ spin_lock_irqsave (&ehci->lock, flags);
++
++ /* get per-port change detect bits */
++ if (ehci->has_ppcd)
++ ppcd = ehci_readl(ehci, &ehci->regs->status) >> 16;
++
+ for (i = 0; i < ports; i++) {
++ /* leverage per-port change bits feature */
++ if (ehci->has_ppcd && !(ppcd & (1 << i)))
++ continue;
+ temp = ehci_readl(ehci, &ehci->regs->port_status [i]);
+
+ /*
+@@ -790,6 +811,11 @@
+ status_reg);
+ break;
+ case USB_PORT_FEAT_C_CONNECTION:
++ if (ehci->has_lpm) {
++ /* clear PORTSC bits on disconnect */
++ temp &= ~PORT_LPM;
++ temp &= ~PORT_DEV_ADDR;
++ }
+ ehci_writel(ehci, (temp & ~PORT_RWC_BITS) | PORT_CSC,
+ status_reg);
+ break;
+--- /dev/null
++++ b/drivers/usb/host/ehci-langwell-pci.c
+@@ -0,0 +1,269 @@
++/*
++ * Intel MID Platform Langwell/Penwell OTG EHCI Controller PCI Bus Glue.
++ *
++ * Copyright (c) 2008 - 2010, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License 2 as published by the
++ * Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
++ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
++ * for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software Foundation,
++ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++#include <linux/usb/otg.h>
++#include <linux/usb/intel_mid_otg.h>
++
++static int usb_otg_suspend(struct usb_hcd *hcd)
++{
++ struct otg_transceiver *otg;
++ struct intel_mid_otg_xceiv *iotg;
++
++ otg = otg_get_transceiver();
++ if (otg == NULL) {
++ printk(KERN_ERR "%s Failed to get otg transceiver\n", __func__);
++ return -EINVAL;
++ }
++ iotg = otg_to_mid_xceiv(otg);
++ printk(KERN_INFO "%s OTG HNP update suspend\n", __func__);
++
++ atomic_notifier_call_chain(&iotg->iotg_notifier,
++ MID_OTG_NOTIFY_HSUSPEND, iotg);
++ otg_put_transceiver(otg);
++ return 0;
++}
++
++static int usb_otg_resume(struct usb_hcd *hcd)
++{
++ struct otg_transceiver *otg;
++ struct intel_mid_otg_xceiv *iotg;
++
++ otg = otg_get_transceiver();
++ if (otg == NULL) {
++ printk(KERN_ERR "%s Failed to get otg transceiver\n", __func__);
++ return -EINVAL;
++ }
++ iotg = otg_to_mid_xceiv(otg);
++ printk(KERN_INFO "%s OTG HNP update resume\n", __func__);
++
++ atomic_notifier_call_chain(&iotg->iotg_notifier,
++ MID_OTG_NOTIFY_HRESUME, iotg);
++ otg_put_transceiver(otg);
++ return 0;
++}
++
++/* the root hub will call this callback when device added/removed */
++static void otg_notify(struct usb_device *udev, unsigned action)
++{
++ struct otg_transceiver *otg;
++ struct intel_mid_otg_xceiv *iotg;
++
++ /* Ignore root hub add/remove event */
++ if (!udev->parent) {
++ printk(KERN_INFO "%s Ignore root hub otg_notify\n", __func__);
++ return;
++ }
++
++ otg = otg_get_transceiver();
++ if (otg == NULL) {
++ printk(KERN_ERR "%s Failed to get otg transceiver\n", __func__);
++ return;
++ }
++ iotg = otg_to_mid_xceiv(otg);
++
++ switch (action) {
++ case USB_DEVICE_ADD:
++ pr_debug("Notify OTG HNP add device\n");
++ atomic_notifier_call_chain(&iotg->iotg_notifier,
++ MID_OTG_NOTIFY_CONNECT, iotg);
++ break;
++ case USB_DEVICE_REMOVE:
++ pr_debug("Notify OTG HNP delete device\n");
++ atomic_notifier_call_chain(&iotg->iotg_notifier,
++ MID_OTG_NOTIFY_DISCONN, iotg);
++ break;
++ default:
++ otg_put_transceiver(otg);
++ return ;
++ }
++ otg_put_transceiver(otg);
++ return;
++}
++
++static int ehci_mid_probe(struct pci_dev *pdev,
++ const struct pci_device_id *id)
++{
++ struct hc_driver *driver;
++ struct otg_transceiver *otg;
++ struct intel_mid_otg_xceiv *iotg;
++ struct usb_hcd *hcd;
++ struct ehci_hcd *ehci;
++ int irq;
++ int retval;
++
++ pr_debug("initializing Intel MID USB OTG Host Controller\n");
++
++ /* we need not call pci_enable_dev since otg transceiver already take
++ * the control of this device and this probe actaully gets called by
++ * otg transceiver driver with HNP protocol.
++ */
++ irq = pdev->irq;
++
++ if (!id)
++ return -EINVAL;
++ driver = (struct hc_driver *)id->driver_data;
++ if (!driver)
++ return -EINVAL;
++
++ hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
++ if (!hcd) {
++ retval = -ENOMEM;
++ goto err1;
++ }
++
++ hcd->self.otg_port = 1;
++ ehci = hcd_to_ehci(hcd);
++ /* this will be called in ehci_bus_suspend and ehci_bus_resume */
++ ehci->otg_suspend = usb_otg_suspend;
++ ehci->otg_resume = usb_otg_resume;
++ /* this will be called by root hub code */
++ hcd->otg_notify = otg_notify;
++ otg = otg_get_transceiver();
++ if (otg == NULL) {
++ printk(KERN_ERR "%s Failed to get otg transceiver\n", __func__);
++ retval = -EINVAL;
++ goto err1;
++ }
++
++ iotg = otg_to_mid_xceiv(otg);
++ hcd->regs = iotg->base;
++
++ hcd->rsrc_start = pci_resource_start(pdev, 0);
++ hcd->rsrc_len = pci_resource_len(pdev, 0);
++
++ if (hcd->regs == NULL) {
++ dev_dbg(&pdev->dev, "error mapping memory\n");
++ retval = -EFAULT;
++ goto err2;
++ }
++ retval = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
++ if (retval != 0)
++ goto err2;
++ retval = otg_set_host(otg, &hcd->self);
++ if (!otg->default_a)
++ hcd->self.is_b_host = 1;
++ otg_put_transceiver(otg);
++ return retval;
++
++err2:
++ usb_put_hcd(hcd);
++err1:
++ dev_err(&pdev->dev, "init %s fail, %d\n", dev_name(&pdev->dev), retval);
++ return retval;
++}
++
++void ehci_mid_remove(struct pci_dev *dev)
++{
++ struct usb_hcd *hcd = pci_get_drvdata(dev);
++
++ if (!hcd)
++ return;
++ usb_remove_hcd(hcd);
++ usb_put_hcd(hcd);
++}
++
++/* Intel MID OTG EHCI driver */
++static struct pci_driver ehci_otg_driver = {
++ .name = "ehci-intel-mid",
++ .id_table = pci_ids,
++
++ .probe = ehci_mid_probe,
++ .remove = ehci_mid_remove,
++
++#ifdef CONFIG_PM_SLEEP
++ .driver = {
++ .pm = &usb_hcd_pci_pm_ops
++ },
++#endif
++ .shutdown = usb_hcd_pci_shutdown,
++};
++
++static int ehci_mid_start_host(struct intel_mid_otg_xceiv *iotg)
++{
++ struct pci_dev *pdev;
++ int retval;
++
++ if (iotg == NULL)
++ return -EINVAL;
++
++ pdev = to_pci_dev(iotg->otg.dev);
++
++ retval = ehci_mid_probe(pdev, ehci_otg_driver.id_table);
++ if (retval)
++ dev_dbg(iotg->otg.dev, "Failed to start host\n");
++
++ return retval;
++}
++
++static int ehci_mid_stop_host(struct intel_mid_otg_xceiv *iotg)
++{
++ struct pci_dev *pdev;
++
++ if (iotg == NULL)
++ return -EINVAL;
++
++ pdev = to_pci_dev(iotg->otg.dev);
++
++ ehci_mid_remove(pdev);
++
++ return 0;
++}
++
++static int intel_mid_ehci_driver_register(struct pci_driver *host_driver)
++{
++ struct otg_transceiver *otg;
++ struct intel_mid_otg_xceiv *iotg;
++
++ otg = otg_get_transceiver();
++ if (otg == NULL)
++ return -EINVAL;
++
++ iotg = otg_to_mid_xceiv(otg);
++ iotg->start_host = ehci_mid_start_host;
++ iotg->stop_host = ehci_mid_stop_host;
++
++ /* notify host driver is registered */
++ atomic_notifier_call_chain(&iotg->iotg_notifier,
++ MID_OTG_NOTIFY_HOSTADD, iotg);
++
++ otg_put_transceiver(otg);
++
++ return 0;
++}
++
++static void intel_mid_ehci_driver_unregister(struct pci_driver *host_driver)
++{
++ struct otg_transceiver *otg;
++ struct intel_mid_otg_xceiv *iotg;
++
++ otg = otg_get_transceiver();
++ if (otg == NULL)
++ return ;
++
++ iotg = otg_to_mid_xceiv(otg);
++ iotg->start_host = NULL;
++ iotg->stop_host = NULL;
++
++ /* notify host driver is unregistered */
++ atomic_notifier_call_chain(&iotg->iotg_notifier,
++ MID_OTG_NOTIFY_HOSTREMOVE, iotg);
++
++ otg_put_transceiver(otg);
++}
++
+--- /dev/null
++++ b/drivers/usb/host/ehci-lpm.c
+@@ -0,0 +1,83 @@
++/* ehci-lpm.c EHCI HCD LPM support code
++ * Copyright (c) 2008 - 2010, Intel Corporation.
++ * Author: Jacob Pan <jacob.jun.pan@intel.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*/
++
++/* this file is part of ehci-hcd.c */
++static int ehci_lpm_set_da(struct ehci_hcd *ehci, int dev_addr, int port_num)
++{
++ u32 __iomem portsc;
++
++ ehci_dbg(ehci, "set dev address %d for port %d\n", dev_addr, port_num);
++ if (port_num > HCS_N_PORTS(ehci->hcs_params)) {
++ ehci_dbg(ehci, "invalid port number %d\n", port_num);
++ return -ENODEV;
++ }
++ portsc = ehci_readl(ehci, &ehci->regs->port_status[port_num-1]);
++ portsc &= ~PORT_DEV_ADDR;
++ portsc |= dev_addr<<25;
++ ehci_writel(ehci, portsc, &ehci->regs->port_status[port_num-1]);
++ return 0;
++}
++
++/*
++ * this function is used to check if the device support LPM
++ * if yes, mark the PORTSC register with PORT_LPM bit
++ */
++static int ehci_lpm_check(struct ehci_hcd *ehci, int port)
++{
++ u32 __iomem *portsc ;
++ u32 val32;
++ int retval;
++
++ portsc = &ehci->regs->port_status[port-1];
++ val32 = ehci_readl(ehci, portsc);
++ if (!(val32 & PORT_DEV_ADDR)) {
++ ehci_dbg(ehci, "LPM: no device attached\n");
++ return -ENODEV;
++ }
++ val32 |= PORT_LPM;
++ ehci_writel(ehci, val32, portsc);
++ msleep(5);
++ val32 |= PORT_SUSPEND;
++ ehci_dbg(ehci, "Sending LPM 0x%08x to port %d\n", val32, port);
++ ehci_writel(ehci, val32, portsc);
++ /* wait for ACK */
++ msleep(10);
++ retval = handshake(ehci, &ehci->regs->port_status[port-1], PORT_SSTS,
++ PORTSC_SUSPEND_STS_ACK, 125);
++ dbg_port(ehci, "LPM", port, val32);
++ if (retval != -ETIMEDOUT) {
++ ehci_dbg(ehci, "LPM: device ACK for LPM\n");
++ val32 |= PORT_LPM;
++ /*
++ * now device should be in L1 sleep, let's wake up the device
++ * so that we can complete enumeration.
++ */
++ ehci_writel(ehci, val32, portsc);
++ msleep(10);
++ val32 |= PORT_RESUME;
++ ehci_writel(ehci, val32, portsc);
++ } else {
++ ehci_dbg(ehci, "LPM: device does not ACK, disable LPM %d\n",
++ retval);
++ val32 &= ~PORT_LPM;
++ retval = -ETIMEDOUT;
++ ehci_writel(ehci, val32, portsc);
++ }
++
++ return retval;
++}
+--- a/drivers/usb/host/ehci-pci.c
++++ b/drivers/usb/host/ehci-pci.c
+@@ -41,6 +41,39 @@
+ return 0;
+ }
+
++/* enable SRAM if sram detected */
++static void sram_init(struct usb_hcd *hcd)
++{
++ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
++ struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
++
++ if (!hcd->has_sram)
++ return;
++ ehci->sram_addr = pci_resource_start(pdev, 1);
++ ehci->sram_size = pci_resource_len(pdev, 1);
++ ehci_info(ehci, "Found HCD SRAM at %x size:%x\n",
++ ehci->sram_addr, ehci->sram_size);
++ if (pci_request_region(pdev, 1, kobject_name(&pdev->dev.kobj))) {
++ ehci_warn(ehci, "SRAM request failed\n");
++ hcd->has_sram = 0;
++ } else if (!dma_declare_coherent_memory(&pdev->dev, ehci->sram_addr,
++ ehci->sram_addr, ehci->sram_size, DMA_MEMORY_MAP)) {
++ ehci_warn(ehci, "SRAM DMA declare failed\n");
++ pci_release_region(pdev, 1);
++ hcd->has_sram = 0;
++ }
++}
++
++static void sram_deinit(struct usb_hcd *hcd)
++{
++ struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
++
++ if (!hcd->has_sram)
++ return;
++ dma_release_declared_memory(&pdev->dev);
++ pci_release_region(pdev, 1);
++}
++
+ /* called during probe() after chip reset completes */
+ static int ehci_pci_setup(struct usb_hcd *hcd)
+ {
+@@ -50,6 +83,7 @@
+ u8 rev;
+ u32 temp;
+ int retval;
++ int force_otg_hc_mode = 0;
+
+ switch (pdev->vendor) {
+ case PCI_VENDOR_ID_TOSHIBA_2:
+@@ -63,6 +97,30 @@
+ #endif
+ }
+ break;
++ case PCI_VENDOR_ID_INTEL:
++ if (pdev->device == 0x0811 || pdev->device == 0x0829) {
++ ehci_info(ehci, "Detected Intel MID OTG HC\n");
++ hcd->has_tt = 1;
++ ehci->has_hostpc = 1;
++#ifdef CONFIG_USB_OTG
++ ehci->has_otg = 1;
++#endif
++ force_otg_hc_mode = 1;
++ hcd->has_sram = 1;
++ hcd->sram_no_payload = 1;
++ sram_init(hcd);
++ } else if (pdev->device == 0x0806) {
++ ehci_info(ehci, "Detected Langwell MPH\n");
++ hcd->has_tt = 1;
++ ehci->has_hostpc = 1;
++ hcd->has_sram = 1;
++ hcd->sram_no_payload = 1;
++ sram_init(hcd);
++ } else if (pdev->device == 0x0829) {
++ ehci_info(ehci, "Detected Penwell OTG HC\n");
++ hcd->has_tt = 1;
++ ehci->has_hostpc = 1;
++ }
+ }
+
+ ehci->caps = hcd->regs;
+@@ -98,6 +156,8 @@
+
+ /* cache this readonly data; minimize chip reads */
+ ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
++ if (force_otg_hc_mode)
++ ehci_reset(ehci);
+
+ retval = ehci_halt(ehci);
+ if (retval)
+@@ -119,6 +179,11 @@
+ ehci->broken_periodic = 1;
+ ehci_info(ehci, "using broken periodic workaround\n");
+ }
++ if (pdev->device == 0x0806 || pdev->device == 0x0811
++ || pdev->device == 0x0829) {
++ ehci_info(ehci, "disable lpm for langwell/penwell\n");
++ ehci->has_lpm = 0;
++ }
+ break;
+ case PCI_VENDOR_ID_TDI:
+ if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) {
+@@ -362,6 +427,22 @@
+ }
+ #endif
+
++static int ehci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
++{
++ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
++ int rc = 0;
++
++ if (!udev->parent) /* udev is root hub itself, impossible */
++ rc = -1;
++ /* we only support lpm device connected to root hub yet */
++ if (ehci->has_lpm && !udev->parent->parent) {
++ rc = ehci_lpm_set_da(ehci, udev->devnum, udev->portnum);
++ if (!rc)
++ rc = ehci_lpm_check(ehci, udev->portnum);
++ }
++ return rc;
++}
++
+ static const struct hc_driver ehci_pci_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "EHCI Host Controller",
+@@ -408,6 +489,11 @@
+ .relinquish_port = ehci_relinquish_port,
+ .port_handed_over = ehci_port_handed_over,
+
++ /*
++ * call back when device connected and addressed
++ */
++ .update_device = ehci_update_device,
++
+ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+ };
+
+--- a/drivers/usb/host/ehci.h
++++ b/drivers/usb/host/ehci.h
+@@ -141,8 +141,18 @@
+ #define OHCI_HCCTRL_LEN 0x4
+ __hc32 *ohci_hcctrl_reg;
+ unsigned has_hostpc:1;
++ unsigned has_lpm:1; /* support link power management */
++ unsigned has_ppcd:1; /* support per-port change bits */
+
++#ifdef CONFIG_USB_OTG
++ unsigned has_otg:1; /* if it is otg host*/
++ /* otg host has additional bus_suspend and bus_resume */
++ int (*otg_suspend)(struct usb_hcd *hcd);
++ int (*otg_resume)(struct usb_hcd *hcd);
++#endif
+ u8 sbrn; /* packed release number */
++ unsigned int sram_addr;
++ unsigned int sram_size;
+
+ /* irq statistics */
+ #ifdef EHCI_STATS
+@@ -158,6 +168,7 @@
+ struct dentry *debug_async;
+ struct dentry *debug_periodic;
+ struct dentry *debug_registers;
++ struct dentry *debug_lpm;
+ #endif
+ };
+
+@@ -730,5 +741,9 @@
+ #endif /* DEBUG */
+
+ /*-------------------------------------------------------------------------*/
+-
++#ifdef CONFIG_PCI
++static void sram_deinit(struct usb_hcd *hcd);
++#else
++static void sram_deinit(struct usb_hcd *hcd) { return; };
++#endif
+ #endif /* __LINUX_EHCI_HCD_H */
+--- a/drivers/usb/otg/Kconfig
++++ b/drivers/usb/otg/Kconfig
+@@ -69,4 +69,32 @@
+ built-in with usb ip or which are autonomous and doesn't require any
+ phy programming such as ISP1x04 etc.
+
++config USB_LANGWELL_OTG
++ tristate "Intel Langwell USB OTG dual-role support"
++ depends on USB && PCI && INTEL_SCU_IPC
++ select USB_OTG
++ select USB_OTG_UTILS
++ help
++ Say Y here if you want to build Intel Langwell USB OTG
++ transciever driver in kernel. This driver implements role
++ switch between EHCI host driver and Langwell USB OTG
++ client driver.
++
++ To compile this driver as a module, choose M here: the
++ module will be called langwell_otg.
++
++config USB_PENWELL_OTG
++ tristate "Intel Penwell USB OTG dual-role support"
++ depends on USB && PCI && INTEL_SCU_IPC
++ select USB_OTG
++ select USB_OTG_UTILS
++ help
++ Say Y here if you want to build Intel Penwell USB OTG
++ transciever driver in kernel. This driver implements role
++ switch between EHCI host driver and Penwell USB OTG
++ client driver.
++
++ To compile this driver as a module, choose M here: the
++ module will be called penwell_otg.
++
+ endif # USB || OTG
+--- a/drivers/usb/otg/Makefile
++++ b/drivers/usb/otg/Makefile
+@@ -9,6 +9,8 @@
+ obj-$(CONFIG_USB_GPIO_VBUS) += gpio_vbus.o
+ obj-$(CONFIG_ISP1301_OMAP) += isp1301_omap.o
+ obj-$(CONFIG_TWL4030_USB) += twl4030-usb.o
++obj-$(CONFIG_USB_LANGWELL_OTG) += langwell_otg.o
++obj-$(CONFIG_USB_PENWELL_OTG) += penwell_otg.o
+ obj-$(CONFIG_NOP_USB_XCEIV) += nop-usb-xceiv.o
+ obj-$(CONFIG_USB_ULPI) += ulpi.o
+
+--- /dev/null
++++ b/drivers/usb/otg/langwell_otg.c
+@@ -0,0 +1,2382 @@
++/*
++ * Intel Langwell USB OTG transceiver driver
++ * Copyright (C) 2008 - 2010, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++/* This driver helps to switch Langwell OTG controller function between host
++ * and peripheral. It works with EHCI driver and Langwell client controller
++ * driver together.
++ */
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/pci.h>
++#include <linux/errno.h>
++#include <linux/interrupt.h>
++#include <linux/kernel.h>
++#include <linux/device.h>
++#include <linux/moduleparam.h>
++#include <linux/usb/ch9.h>
++#include <linux/usb/gadget.h>
++#include <linux/usb.h>
++#include <linux/usb/otg.h>
++#include <linux/usb/hcd.h>
++#include <linux/notifier.h>
++#include <linux/delay.h>
++#include <asm/intel_scu_ipc.h>
++
++#include <linux/usb/langwell_otg.h>
++
++#define DRIVER_DESC "Intel Langwell USB OTG transceiver driver"
++#define DRIVER_VERSION "July 10, 2010"
++
++MODULE_DESCRIPTION(DRIVER_DESC);
++MODULE_AUTHOR("Henry Yuan <hang.yuan@intel.com>, Hao Wu <hao.wu@intel.com>");
++MODULE_VERSION(DRIVER_VERSION);
++MODULE_LICENSE("GPL");
++
++static const char driver_name[] = "langwell_otg";
++
++static int langwell_otg_probe(struct pci_dev *pdev,
++ const struct pci_device_id *id);
++static void langwell_otg_remove(struct pci_dev *pdev);
++static int langwell_otg_suspend(struct pci_dev *pdev, pm_message_t message);
++static int langwell_otg_resume(struct pci_dev *pdev);
++
++static int langwell_otg_set_host(struct otg_transceiver *otg,
++ struct usb_bus *host);
++static int langwell_otg_set_peripheral(struct otg_transceiver *otg,
++ struct usb_gadget *gadget);
++static int langwell_otg_start_srp(struct otg_transceiver *otg);
++
++static const struct pci_device_id pci_ids[] = {{
++ .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
++ .class_mask = ~0,
++ .vendor = 0x8086,
++ .device = 0x0811,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++}, { /* end: all zeroes */ }
++};
++
++static struct pci_driver otg_pci_driver = {
++ .name = (char *) driver_name,
++ .id_table = pci_ids,
++
++ .probe = langwell_otg_probe,
++ .remove = langwell_otg_remove,
++
++ .suspend = langwell_otg_suspend,
++ .resume = langwell_otg_resume,
++};
++
++static const char *state_string(enum usb_otg_state state)
++{
++ switch (state) {
++ case OTG_STATE_A_IDLE:
++ return "a_idle";
++ case OTG_STATE_A_WAIT_VRISE:
++ return "a_wait_vrise";
++ case OTG_STATE_A_WAIT_BCON:
++ return "a_wait_bcon";
++ case OTG_STATE_A_HOST:
++ return "a_host";
++ case OTG_STATE_A_SUSPEND:
++ return "a_suspend";
++ case OTG_STATE_A_PERIPHERAL:
++ return "a_peripheral";
++ case OTG_STATE_A_WAIT_VFALL:
++ return "a_wait_vfall";
++ case OTG_STATE_A_VBUS_ERR:
++ return "a_vbus_err";
++ case OTG_STATE_B_IDLE:
++ return "b_idle";
++ case OTG_STATE_B_SRP_INIT:
++ return "b_srp_init";
++ case OTG_STATE_B_PERIPHERAL:
++ return "b_peripheral";
++ case OTG_STATE_B_WAIT_ACON:
++ return "b_wait_acon";
++ case OTG_STATE_B_HOST:
++ return "b_host";
++ default:
++ return "UNDEFINED";
++ }
++}
++
++/* HSM timers */
++static inline struct langwell_otg_timer *otg_timer_initializer
++(void (*function)(unsigned long), unsigned long expires, unsigned long data)
++{
++ struct langwell_otg_timer *timer;
++ timer = kmalloc(sizeof(struct langwell_otg_timer), GFP_KERNEL);
++ if (timer == NULL)
++ return timer;
++
++ timer->function = function;
++ timer->expires = expires;
++ timer->data = data;
++ return timer;
++}
++
++static struct langwell_otg_timer *a_wait_vrise_tmr, *a_aidl_bdis_tmr,
++ *b_se0_srp_tmr, *b_srp_init_tmr;
++
++static struct list_head active_timers;
++
++static struct langwell_otg *the_transceiver;
++
++/* host/client notify transceiver when event affects HNP state */
++void langwell_update_transceiver(void)
++{
++ struct langwell_otg *lnw = the_transceiver;
++
++ dev_dbg(lnw->dev, "transceiver is updated\n");
++
++ if (!lnw->qwork)
++ return ;
++
++ queue_work(lnw->qwork, &lnw->work);
++}
++EXPORT_SYMBOL(langwell_update_transceiver);
++
++static int langwell_otg_set_host(struct otg_transceiver *otg,
++ struct usb_bus *host)
++{
++ otg->host = host;
++
++ return 0;
++}
++
++static int langwell_otg_set_peripheral(struct otg_transceiver *otg,
++ struct usb_gadget *gadget)
++{
++ otg->gadget = gadget;
++
++ return 0;
++}
++
++static int langwell_otg_set_power(struct otg_transceiver *otg,
++ unsigned mA)
++{
++ return 0;
++}
++
++/* A-device drives vbus, controlled through IPC commands */
++static int langwell_otg_set_vbus(struct otg_transceiver *otg, bool enabled)
++{
++ struct langwell_otg *lnw = the_transceiver;
++ u8 sub_id;
++
++ dev_dbg(lnw->dev, "%s <--- %s\n", __func__, enabled ? "on" : "off");
++
++ if (enabled)
++ sub_id = 0x8; /* Turn on the VBus */
++ else
++ sub_id = 0x9; /* Turn off the VBus */
++
++ if (intel_scu_ipc_simple_command(0xef, sub_id)) {
++ dev_dbg(lnw->dev, "Failed to set Vbus via IPC commands\n");
++ return -EBUSY;
++ }
++
++ dev_dbg(lnw->dev, "%s --->\n", __func__);
++
++ return 0;
++}
++
++/* charge vbus or discharge vbus through a resistor to ground */
++static void langwell_otg_chrg_vbus(int on)
++{
++ struct langwell_otg *lnw = the_transceiver;
++ u32 val;
++
++ val = readl(lnw->iotg.base + CI_OTGSC);
++
++ if (on)
++ writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_VC,
++ lnw->iotg.base + CI_OTGSC);
++ else
++ writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_VD,
++ lnw->iotg.base + CI_OTGSC);
++}
++
++/* Start SRP */
++static int langwell_otg_start_srp(struct otg_transceiver *otg)
++{
++ struct langwell_otg *lnw = the_transceiver;
++ struct intel_mid_otg_xceiv *iotg = &lnw->iotg;
++ u32 val;
++
++ dev_dbg(lnw->dev, "%s --->\n", __func__);
++
++ val = readl(iotg->base + CI_OTGSC);
++
++ writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_HADP,
++ iotg->base + CI_OTGSC);
++
++ /* Check if the data plus is finished or not */
++ msleep(8);
++ val = readl(iotg->base + CI_OTGSC);
++ if (val & (OTGSC_HADP | OTGSC_DP))
++ dev_dbg(lnw->dev, "DataLine SRP Error\n");
++
++ /* Disable interrupt - b_sess_vld */
++ val = readl(iotg->base + CI_OTGSC);
++ val &= (~(OTGSC_BSVIE | OTGSC_BSEIE));
++ writel(val, iotg->base + CI_OTGSC);
++
++ /* Start VBus SRP, drive vbus to generate VBus pulse */
++ iotg->otg.set_vbus(&iotg->otg, true);
++ msleep(15);
++ iotg->otg.set_vbus(&iotg->otg, false);
++
++ /* Enable interrupt - b_sess_vld*/
++ val = readl(iotg->base + CI_OTGSC);
++ dev_dbg(lnw->dev, "after VBUS pulse otgsc = %x\n", val);
++
++ val |= (OTGSC_BSVIE | OTGSC_BSEIE);
++ writel(val, iotg->base + CI_OTGSC);
++
++ /* If Vbus is valid, then update the hsm */
++ if (val & OTGSC_BSV) {
++ dev_dbg(lnw->dev, "no b_sess_vld interrupt\n");
++
++ lnw->iotg.hsm.b_sess_vld = 1;
++ langwell_update_transceiver();
++ }
++
++ dev_dbg(lnw->dev, "%s <---\n", __func__);
++ return 0;
++}
++
++/* stop SOF via bus_suspend */
++static void langwell_otg_loc_sof(int on)
++{
++ struct langwell_otg *lnw = the_transceiver;
++ struct usb_hcd *hcd;
++ int err;
++
++ dev_dbg(lnw->dev, "%s ---> %s\n", __func__, on ? "suspend" : "resume");
++
++ hcd = bus_to_hcd(lnw->iotg.otg.host);
++ if (on)
++ err = hcd->driver->bus_resume(hcd);
++ else
++ err = hcd->driver->bus_suspend(hcd);
++
++ if (err)
++ dev_dbg(lnw->dev, "Fail to resume/suspend USB bus - %d\n", err);
++
++ dev_dbg(lnw->dev, "%s <---\n", __func__);
++}
++
++static int langwell_otg_check_otgsc(void)
++{
++ struct langwell_otg *lnw = the_transceiver;
++ u32 otgsc, usbcfg;
++
++ dev_dbg(lnw->dev, "check sync OTGSC and USBCFG registers\n");
++
++ otgsc = readl(lnw->iotg.base + CI_OTGSC);
++ usbcfg = readl(lnw->usbcfg);
++
++ dev_dbg(lnw->dev, "OTGSC = %08x, USBCFG = %08x\n",
++ otgsc, usbcfg);
++ dev_dbg(lnw->dev, "OTGSC_AVV = %d\n", !!(otgsc & OTGSC_AVV));
++ dev_dbg(lnw->dev, "USBCFG.VBUSVAL = %d\n",
++ !!(usbcfg & USBCFG_VBUSVAL));
++ dev_dbg(lnw->dev, "OTGSC_ASV = %d\n", !!(otgsc & OTGSC_ASV));
++ dev_dbg(lnw->dev, "USBCFG.AVALID = %d\n",
++ !!(usbcfg & USBCFG_AVALID));
++ dev_dbg(lnw->dev, "OTGSC_BSV = %d\n", !!(otgsc & OTGSC_BSV));
++ dev_dbg(lnw->dev, "USBCFG.BVALID = %d\n",
++ !!(usbcfg & USBCFG_BVALID));
++ dev_dbg(lnw->dev, "OTGSC_BSE = %d\n", !!(otgsc & OTGSC_BSE));
++ dev_dbg(lnw->dev, "USBCFG.SESEND = %d\n",
++ !!(usbcfg & USBCFG_SESEND));
++
++ /* Check USBCFG VBusValid/AValid/BValid/SessEnd */
++ if (!!(otgsc & OTGSC_AVV) ^ !!(usbcfg & USBCFG_VBUSVAL)) {
++ dev_dbg(lnw->dev, "OTGSC.AVV != USBCFG.VBUSVAL\n");
++ goto err;
++ }
++ if (!!(otgsc & OTGSC_ASV) ^ !!(usbcfg & USBCFG_AVALID)) {
++ dev_dbg(lnw->dev, "OTGSC.ASV != USBCFG.AVALID\n");
++ goto err;
++ }
++ if (!!(otgsc & OTGSC_BSV) ^ !!(usbcfg & USBCFG_BVALID)) {
++ dev_dbg(lnw->dev, "OTGSC.BSV != USBCFG.BVALID\n");
++ goto err;
++ }
++ if (!!(otgsc & OTGSC_BSE) ^ !!(usbcfg & USBCFG_SESEND)) {
++ dev_dbg(lnw->dev, "OTGSC.BSE != USBCFG.SESSEN\n");
++ goto err;
++ }
++
++ dev_dbg(lnw->dev, "OTGSC and USBCFG are synced\n");
++
++ return 0;
++
++err:
++ dev_warn(lnw->dev, "OTGSC isn't equal to USBCFG\n");
++ return -EPIPE;
++}
++
++
++static void langwell_otg_phy_low_power(int on)
++{
++ struct langwell_otg *lnw = the_transceiver;
++ struct intel_mid_otg_xceiv *iotg = &lnw->iotg;
++ u8 val, phcd;
++ int retval;
++
++ dev_dbg(lnw->dev, "%s ---> %s mode\n",
++ __func__, on ? "Low power" : "Normal");
++
++ phcd = 0x40;
++
++ val = readb(iotg->base + CI_HOSTPC1 + 2);
++
++ if (on) {
++ /* Due to hardware issue, after set PHCD, sync will failed
++ * between USBCFG and OTGSC, so before set PHCD, check if
++ * sync is in process now. If the answer is "yes", then do
++ * not touch PHCD bit */
++ retval = langwell_otg_check_otgsc();
++ if (retval) {
++ dev_dbg(lnw->dev, "Skip PHCD programming..\n");
++ return ;
++ }
++
++ writeb(val | phcd, iotg->base + CI_HOSTPC1 + 2);
++ } else
++ writeb(val & ~phcd, iotg->base + CI_HOSTPC1 + 2);
++
++ dev_dbg(lnw->dev, "%s <--- done\n", __func__);
++}
++
++/* After drv vbus, add 5 ms delay to set PHCD */
++static void langwell_otg_phy_low_power_wait(int on)
++{
++ struct langwell_otg *lnw = the_transceiver;
++
++ dev_dbg(lnw->dev, "add 5ms delay before programing PHCD\n");
++
++ mdelay(5);
++ langwell_otg_phy_low_power(on);
++}
++
++/* Enable/Disable OTG interrupt */
++static void langwell_otg_intr(int on)
++{
++ struct langwell_otg *lnw = the_transceiver;
++ struct intel_mid_otg_xceiv *iotg = &lnw->iotg;
++ u32 val;
++
++ dev_dbg(lnw->dev, "%s ---> %s\n", __func__, on ? "on" : "off");
++
++ val = readl(iotg->base + CI_OTGSC);
++
++ /* OTGSC_INT_MASK doesn't contains 1msInt */
++ if (on) {
++ val = val | (OTGSC_INT_MASK);
++ writel(val, iotg->base + CI_OTGSC);
++ } else {
++ val = val & ~(OTGSC_INT_MASK);
++ writel(val, iotg->base + CI_OTGSC);
++ }
++
++ dev_dbg(lnw->dev, "%s <---\n", __func__);
++}
++
++/* set HAAR: Hardware Assist Auto-Reset */
++static void langwell_otg_HAAR(int on)
++{
++ struct langwell_otg *lnw = the_transceiver;
++ struct intel_mid_otg_xceiv *iotg = &lnw->iotg;
++ u32 val;
++
++ dev_dbg(lnw->dev, "%s ---> %s\n", __func__, on ? "on" : "off");
++
++ val = readl(iotg->base + CI_OTGSC);
++ if (on)
++ writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_HAAR,
++ iotg->base + CI_OTGSC);
++ else
++ writel((val & ~OTGSC_INTSTS_MASK) & ~OTGSC_HAAR,
++ iotg->base + CI_OTGSC);
++
++ dev_dbg(lnw->dev, "%s <---\n", __func__);
++}
++
++/* set HABA: Hardware Assist B-Disconnect to A-Connect */
++static void langwell_otg_HABA(int on)
++{
++ struct langwell_otg *lnw = the_transceiver;
++ struct intel_mid_otg_xceiv *iotg = &lnw->iotg;
++ u32 val;
++
++ dev_dbg(lnw->dev, "%s ---> %s\n", __func__, on ? "on" : "off");
++
++ val = readl(iotg->base + CI_OTGSC);
++ if (on)
++ writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_HABA,
++ iotg->base + CI_OTGSC);
++ else
++ writel((val & ~OTGSC_INTSTS_MASK) & ~OTGSC_HABA,
++ iotg->base + CI_OTGSC);
++
++ dev_dbg(lnw->dev, "%s <---\n", __func__);
++}
++
++static int langwell_otg_check_se0_srp(int on)
++{
++ struct langwell_otg *lnw = the_transceiver;
++ int delay_time = TB_SE0_SRP * 10;
++ u32 val;
++
++ dev_dbg(lnw->dev, "%s --->\n", __func__);
++
++ do {
++ udelay(100);
++ if (!delay_time--)
++ break;
++ val = readl(lnw->iotg.base + CI_PORTSC1);
++ val &= PORTSC_LS;
++ } while (!val);
++
++ dev_dbg(lnw->dev, "%s <---\n", __func__);
++ return val;
++}
++
++/* The timeout callback function to set time out bit */
++static void set_tmout(unsigned long indicator)
++{
++ *(int *)indicator = 1;
++}
++
++void langwell_otg_nsf_msg(unsigned long indicator)
++{
++ struct langwell_otg *lnw = the_transceiver;
++
++ switch (indicator) {
++ case 2:
++ case 4:
++ case 6:
++ case 7:
++ dev_warn(lnw->dev,
++ "OTG:NSF-%lu - deivce not responding\n", indicator);
++ break;
++ case 3:
++ dev_warn(lnw->dev,
++ "OTG:NSF-%lu - deivce not supported\n", indicator);
++ break;
++ default:
++ dev_warn(lnw->dev, "Do not have this kind of NSF\n");
++ break;
++ }
++}
++
++/* Initialize timers */
++static int langwell_otg_init_timers(struct otg_hsm *hsm)
++{
++ /* HSM used timers */
++ a_wait_vrise_tmr = otg_timer_initializer(&set_tmout, TA_WAIT_VRISE,
++ (unsigned long)&hsm->a_wait_vrise_tmout);
++ if (a_wait_vrise_tmr == NULL)
++ return -ENOMEM;
++ a_aidl_bdis_tmr = otg_timer_initializer(&set_tmout, TA_AIDL_BDIS,
++ (unsigned long)&hsm->a_aidl_bdis_tmout);
++ if (a_aidl_bdis_tmr == NULL)
++ return -ENOMEM;
++ b_se0_srp_tmr = otg_timer_initializer(&set_tmout, TB_SE0_SRP,
++ (unsigned long)&hsm->b_se0_srp);
++ if (b_se0_srp_tmr == NULL)
++ return -ENOMEM;
++ b_srp_init_tmr = otg_timer_initializer(&set_tmout, TB_SRP_INIT,
++ (unsigned long)&hsm->b_srp_init_tmout);
++ if (b_srp_init_tmr == NULL)
++ return -ENOMEM;
++
++ return 0;
++}
++
++/* Free timers */
++static void langwell_otg_free_timers(void)
++{
++ kfree(a_wait_vrise_tmr);
++ kfree(a_aidl_bdis_tmr);
++ kfree(b_se0_srp_tmr);
++ kfree(b_srp_init_tmr);
++}
++
++/* The timeout callback function to set time out bit */
++static void langwell_otg_timer_fn(unsigned long indicator)
++{
++ struct langwell_otg *lnw = the_transceiver;
++
++ *(int *)indicator = 1;
++
++ dev_dbg(lnw->dev, "kernel timer - timeout\n");
++
++ langwell_update_transceiver();
++}
++
++/* kernel timer used instead of HW based interrupt */
++static void langwell_otg_add_ktimer(enum langwell_otg_timer_type timers)
++{
++ struct langwell_otg *lnw = the_transceiver;
++ struct intel_mid_otg_xceiv *iotg = &lnw->iotg;
++ unsigned long j = jiffies;
++ unsigned long data, time;
++
++ switch (timers) {
++ case TA_WAIT_VRISE_TMR:
++ iotg->hsm.a_wait_vrise_tmout = 0;
++ data = (unsigned long)&iotg->hsm.a_wait_vrise_tmout;
++ time = TA_WAIT_VRISE;
++ break;
++ case TA_WAIT_BCON_TMR:
++ iotg->hsm.a_wait_bcon_tmout = 0;
++ data = (unsigned long)&iotg->hsm.a_wait_bcon_tmout;
++ time = TA_WAIT_BCON;
++ break;
++ case TA_AIDL_BDIS_TMR:
++ iotg->hsm.a_aidl_bdis_tmout = 0;
++ data = (unsigned long)&iotg->hsm.a_aidl_bdis_tmout;
++ time = TA_AIDL_BDIS;
++ break;
++ case TB_ASE0_BRST_TMR:
++ iotg->hsm.b_ase0_brst_tmout = 0;
++ data = (unsigned long)&iotg->hsm.b_ase0_brst_tmout;
++ time = TB_ASE0_BRST;
++ break;
++ case TB_SRP_INIT_TMR:
++ iotg->hsm.b_srp_init_tmout = 0;
++ data = (unsigned long)&iotg->hsm.b_srp_init_tmout;
++ time = TB_SRP_INIT;
++ break;
++ case TB_SRP_FAIL_TMR:
++ iotg->hsm.b_srp_fail_tmout = 0;
++ data = (unsigned long)&iotg->hsm.b_srp_fail_tmout;
++ time = TB_SRP_FAIL;
++ break;
++ case TB_BUS_SUSPEND_TMR:
++ iotg->hsm.b_bus_suspend_tmout = 0;
++ data = (unsigned long)&iotg->hsm.b_bus_suspend_tmout;
++ time = TB_BUS_SUSPEND;
++ break;
++ default:
++ dev_dbg(lnw->dev, "unkown timer, cannot enable it\n");
++ return;
++ }
++
++ lnw->hsm_timer.data = data;
++ lnw->hsm_timer.function = langwell_otg_timer_fn;
++ lnw->hsm_timer.expires = j + time * HZ / 1000; /* milliseconds */
++
++ add_timer(&lnw->hsm_timer);
++
++ dev_dbg(lnw->dev, "add timer successfully\n");
++}
++
++/* Add timer to timer list */
++static void langwell_otg_add_timer(void *gtimer)
++{
++ struct langwell_otg_timer *timer = (struct langwell_otg_timer *)gtimer;
++ struct langwell_otg_timer *tmp_timer;
++ struct intel_mid_otg_xceiv *iotg = &the_transceiver->iotg;
++ u32 val32;
++
++ /* Check if the timer is already in the active list,
++ * if so update timer count
++ */
++ list_for_each_entry(tmp_timer, &active_timers, list)
++ if (tmp_timer == timer) {
++ timer->count = timer->expires;
++ return;
++ }
++ timer->count = timer->expires;
++
++ if (list_empty(&active_timers)) {
++ val32 = readl(iotg->base + CI_OTGSC);
++ writel(val32 | OTGSC_1MSE, iotg->base + CI_OTGSC);
++ }
++
++ list_add_tail(&timer->list, &active_timers);
++}
++
++/* Remove timer from the timer list; clear timeout status */
++static void langwell_otg_del_timer(void *gtimer)
++{
++ struct langwell_otg *lnw = the_transceiver;
++ struct langwell_otg_timer *timer = (struct langwell_otg_timer *)gtimer;
++ struct langwell_otg_timer *tmp_timer, *del_tmp;
++ u32 val32;
++
++ list_for_each_entry_safe(tmp_timer, del_tmp, &active_timers, list)
++ if (tmp_timer == timer)
++ list_del(&timer->list);
++
++ if (list_empty(&active_timers)) {
++ val32 = readl(lnw->iotg.base + CI_OTGSC);
++ writel(val32 & ~OTGSC_1MSE, lnw->iotg.base + CI_OTGSC);
++ }
++}
++
++/* Reduce timer count by 1, and find timeout conditions.*/
++static int langwell_otg_tick_timer(u32 *int_sts)
++{
++ struct langwell_otg *lnw = the_transceiver;
++ struct langwell_otg_timer *tmp_timer, *del_tmp;
++ int expired = 0;
++
++ list_for_each_entry_safe(tmp_timer, del_tmp, &active_timers, list) {
++ tmp_timer->count--;
++ /* check if timer expires */
++ if (!tmp_timer->count) {
++ list_del(&tmp_timer->list);
++ tmp_timer->function(tmp_timer->data);
++ expired = 1;
++ }
++ }
++
++ if (list_empty(&active_timers)) {
++ dev_dbg(lnw->dev, "tick timer: disable 1ms int\n");
++ *int_sts = *int_sts & ~OTGSC_1MSE;
++ }
++ return expired;
++}
++
++static void reset_otg(void)
++{
++ struct langwell_otg *lnw = the_transceiver;
++ int delay_time = 1000;
++ u32 val;
++
++ dev_dbg(lnw->dev, "reseting OTG controller ...\n");
++ val = readl(lnw->iotg.base + CI_USBCMD);
++ writel(val | USBCMD_RST, lnw->iotg.base + CI_USBCMD);
++ do {
++ udelay(100);
++ if (!delay_time--)
++ dev_dbg(lnw->dev, "reset timeout\n");
++ val = readl(lnw->iotg.base + CI_USBCMD);
++ val &= USBCMD_RST;
++ } while (val != 0);
++ dev_dbg(lnw->dev, "reset done.\n");
++}
++
++static void set_host_mode(void)
++{
++ struct langwell_otg *lnw = the_transceiver;
++ u32 val;
++
++ reset_otg();
++ val = readl(lnw->iotg.base + CI_USBMODE);
++ val = (val & (~USBMODE_CM)) | USBMODE_HOST;
++ writel(val, lnw->iotg.base + CI_USBMODE);
++}
++
++static void set_client_mode(void)
++{
++ struct langwell_otg *lnw = the_transceiver;
++ u32 val;
++
++ reset_otg();
++ val = readl(lnw->iotg.base + CI_USBMODE);
++ val = (val & (~USBMODE_CM)) | USBMODE_DEVICE;
++ writel(val, lnw->iotg.base + CI_USBMODE);
++}
++
++static void init_hsm(void)
++{
++ struct langwell_otg *lnw = the_transceiver;
++ struct intel_mid_otg_xceiv *iotg = &lnw->iotg;
++ u32 val32;
++
++ /* read OTGSC after reset */
++ val32 = readl(lnw->iotg.base + CI_OTGSC);
++ dev_dbg(lnw->dev, "%s: OTGSC init value = 0x%x\n", __func__, val32);
++
++ /* set init state */
++ if (val32 & OTGSC_ID) {
++ iotg->hsm.id = 1;
++ iotg->otg.default_a = 0;
++ set_client_mode();
++ iotg->otg.state = OTG_STATE_B_IDLE;
++ } else {
++ iotg->hsm.id = 0;
++ iotg->otg.default_a = 1;
++ set_host_mode();
++ iotg->otg.state = OTG_STATE_A_IDLE;
++ }
++
++ /* set session indicator */
++ if (val32 & OTGSC_BSE)
++ iotg->hsm.b_sess_end = 1;
++ if (val32 & OTGSC_BSV)
++ iotg->hsm.b_sess_vld = 1;
++ if (val32 & OTGSC_ASV)
++ iotg->hsm.a_sess_vld = 1;
++ if (val32 & OTGSC_AVV)
++ iotg->hsm.a_vbus_vld = 1;
++
++ /* defautly power the bus */
++ iotg->hsm.a_bus_req = 1;
++ iotg->hsm.a_bus_drop = 0;
++ /* defautly don't request bus as B device */
++ iotg->hsm.b_bus_req = 0;
++ /* no system error */
++ iotg->hsm.a_clr_err = 0;
++
++ langwell_otg_phy_low_power_wait(1);
++}
++
++static void update_hsm(void)
++{
++ struct langwell_otg *lnw = the_transceiver;
++ struct intel_mid_otg_xceiv *iotg = &lnw->iotg;
++ u32 val32;
++
++ /* read OTGSC */
++ val32 = readl(lnw->iotg.base + CI_OTGSC);
++ dev_dbg(lnw->dev, "%s: OTGSC value = 0x%x\n", __func__, val32);
++
++ iotg->hsm.id = !!(val32 & OTGSC_ID);
++ iotg->hsm.b_sess_end = !!(val32 & OTGSC_BSE);
++ iotg->hsm.b_sess_vld = !!(val32 & OTGSC_BSV);
++ iotg->hsm.a_sess_vld = !!(val32 & OTGSC_ASV);
++ iotg->hsm.a_vbus_vld = !!(val32 & OTGSC_AVV);
++}
++
++static irqreturn_t otg_dummy_irq(int irq, void *_dev)
++{
++ struct langwell_otg *lnw = the_transceiver;
++ void __iomem *reg_base = _dev;
++ u32 val;
++ u32 int_mask = 0;
++
++ val = readl(reg_base + CI_USBMODE);
++ if ((val & USBMODE_CM) != USBMODE_DEVICE)
++ return IRQ_NONE;
++
++ val = readl(reg_base + CI_USBSTS);
++ int_mask = val & INTR_DUMMY_MASK;
++
++ if (int_mask == 0)
++ return IRQ_NONE;
++
++ /* clear hsm.b_conn here since host driver can't detect it
++ * otg_dummy_irq called means B-disconnect happened.
++ */
++ if (lnw->iotg.hsm.b_conn) {
++ lnw->iotg.hsm.b_conn = 0;
++ if (spin_trylock(&lnw->wq_lock)) {
++ langwell_update_transceiver();
++ spin_unlock(&lnw->wq_lock);
++ }
++ }
++
++ /* Clear interrupts */
++ writel(int_mask, reg_base + CI_USBSTS);
++ return IRQ_HANDLED;
++}
++
++static irqreturn_t otg_irq(int irq, void *_dev)
++{
++ struct langwell_otg *lnw = _dev;
++ struct intel_mid_otg_xceiv *iotg = &lnw->iotg;
++ u32 int_sts, int_en;
++ u32 int_mask = 0;
++ int flag = 0;
++
++ int_sts = readl(lnw->iotg.base + CI_OTGSC);
++ int_en = (int_sts & OTGSC_INTEN_MASK) >> 8;
++ int_mask = int_sts & int_en;
++ if (int_mask == 0)
++ return IRQ_NONE;
++
++ if (int_mask & OTGSC_IDIS) {
++ dev_dbg(lnw->dev, "%s: id change int\n", __func__);
++ iotg->hsm.id = (int_sts & OTGSC_ID) ? 1 : 0;
++ dev_dbg(lnw->dev, "id = %d\n", iotg->hsm.id);
++ flag = 1;
++ }
++ if (int_mask & OTGSC_DPIS) {
++ dev_dbg(lnw->dev, "%s: data pulse int\n", __func__);
++ iotg->hsm.a_srp_det = (int_sts & OTGSC_DPS) ? 1 : 0;
++ dev_dbg(lnw->dev, "data pulse = %d\n", iotg->hsm.a_srp_det);
++ flag = 1;
++ }
++ if (int_mask & OTGSC_BSEIS) {
++ dev_dbg(lnw->dev, "%s: b session end int\n", __func__);
++ iotg->hsm.b_sess_end = (int_sts & OTGSC_BSE) ? 1 : 0;
++ dev_dbg(lnw->dev, "b_sess_end = %d\n", iotg->hsm.b_sess_end);
++ flag = 1;
++ }
++ if (int_mask & OTGSC_BSVIS) {
++ dev_dbg(lnw->dev, "%s: b session valid int\n", __func__);
++ iotg->hsm.b_sess_vld = (int_sts & OTGSC_BSV) ? 1 : 0;
++ dev_dbg(lnw->dev, "b_sess_vld = %d\n", iotg->hsm.b_sess_end);
++ flag = 1;
++ }
++ if (int_mask & OTGSC_ASVIS) {
++ dev_dbg(lnw->dev, "%s: a session valid int\n", __func__);
++ iotg->hsm.a_sess_vld = (int_sts & OTGSC_ASV) ? 1 : 0;
++ dev_dbg(lnw->dev, "a_sess_vld = %d\n", iotg->hsm.a_sess_vld);
++ flag = 1;
++ }
++ if (int_mask & OTGSC_AVVIS) {
++ dev_dbg(lnw->dev, "%s: a vbus valid int\n", __func__);
++ iotg->hsm.a_vbus_vld = (int_sts & OTGSC_AVV) ? 1 : 0;
++ dev_dbg(lnw->dev, "a_vbus_vld = %d\n", iotg->hsm.a_vbus_vld);
++ flag = 1;
++ }
++
++ if (int_mask & OTGSC_1MSS) {
++ /* need to schedule otg_work if any timer is expired */
++ if (langwell_otg_tick_timer(&int_sts))
++ flag = 1;
++ }
++
++ writel((int_sts & ~OTGSC_INTSTS_MASK) | int_mask,
++ lnw->iotg.base + CI_OTGSC);
++ if (flag)
++ langwell_update_transceiver();
++
++ return IRQ_HANDLED;
++}
++
++static int langwell_otg_iotg_notify(struct notifier_block *nb,
++ unsigned long action, void *data)
++{
++ struct langwell_otg *lnw = the_transceiver;
++ struct intel_mid_otg_xceiv *iotg = data;
++ int flag = 0;
++
++ if (iotg == NULL)
++ return NOTIFY_BAD;
++
++ if (lnw == NULL)
++ return NOTIFY_BAD;
++
++ switch (action) {
++ case MID_OTG_NOTIFY_CONNECT:
++ dev_dbg(lnw->dev, "Lnw OTG Notify Connect Event\n");
++ if (iotg->otg.default_a == 1)
++ iotg->hsm.b_conn = 1;
++ else
++ iotg->hsm.a_conn = 1;
++ flag = 1;
++ break;
++ case MID_OTG_NOTIFY_DISCONN:
++ dev_dbg(lnw->dev, "Lnw OTG Notify Disconnect Event\n");
++ if (iotg->otg.default_a == 1)
++ iotg->hsm.b_conn = 0;
++ else
++ iotg->hsm.a_conn = 0;
++ flag = 1;
++ break;
++ case MID_OTG_NOTIFY_HSUSPEND:
++ dev_dbg(lnw->dev, "Lnw OTG Notify Host Bus suspend Event\n");
++ if (iotg->otg.default_a == 1)
++ iotg->hsm.a_suspend_req = 1;
++ else
++ iotg->hsm.b_bus_req = 0;
++ flag = 1;
++ break;
++ case MID_OTG_NOTIFY_HRESUME:
++ dev_dbg(lnw->dev, "Lnw OTG Notify Host Bus resume Event\n");
++ if (iotg->otg.default_a == 1)
++ iotg->hsm.b_bus_resume = 1;
++ flag = 1;
++ break;
++ case MID_OTG_NOTIFY_CSUSPEND:
++ dev_dbg(lnw->dev, "Lnw OTG Notify Client Bus suspend Event\n");
++ if (iotg->otg.default_a == 1) {
++ if (iotg->hsm.b_bus_suspend_vld == 2) {
++ iotg->hsm.b_bus_suspend = 1;
++ iotg->hsm.b_bus_suspend_vld = 0;
++ flag = 1;
++ } else {
++ iotg->hsm.b_bus_suspend_vld++;
++ flag = 0;
++ }
++ } else {
++ if (iotg->hsm.a_bus_suspend == 0) {
++ iotg->hsm.a_bus_suspend = 1;
++ flag = 1;
++ }
++ }
++ break;
++ case MID_OTG_NOTIFY_CRESUME:
++ dev_dbg(lnw->dev, "Lnw OTG Notify Client Bus resume Event\n");
++ if (iotg->otg.default_a == 0)
++ iotg->hsm.a_bus_suspend = 0;
++ flag = 0;
++ break;
++ case MID_OTG_NOTIFY_HOSTADD:
++ dev_dbg(lnw->dev, "Lnw OTG Nofity Host Driver Add\n");
++ flag = 1;
++ break;
++ case MID_OTG_NOTIFY_HOSTREMOVE:
++ dev_dbg(lnw->dev, "Lnw OTG Nofity Host Driver remove\n");
++ flag = 1;
++ break;
++ case MID_OTG_NOTIFY_CLIENTADD:
++ dev_dbg(lnw->dev, "Lnw OTG Nofity Client Driver Add\n");
++ flag = 1;
++ break;
++ case MID_OTG_NOTIFY_CLIENTREMOVE:
++ dev_dbg(lnw->dev, "Lnw OTG Nofity Client Driver remove\n");
++ flag = 1;
++ break;
++ default:
++ dev_dbg(lnw->dev, "Lnw OTG Nofity unknown notify message\n");
++ return NOTIFY_DONE;
++ }
++
++ if (flag)
++ langwell_update_transceiver();
++
++ return NOTIFY_OK;
++}
++
++static void langwell_otg_work(struct work_struct *work)
++{
++ struct langwell_otg *lnw;
++ struct intel_mid_otg_xceiv *iotg;
++ int retval;
++ struct pci_dev *pdev;
++
++ lnw = container_of(work, struct langwell_otg, work);
++ iotg = &lnw->iotg;
++ pdev = to_pci_dev(lnw->dev);
++
++ dev_dbg(lnw->dev, "%s: old state = %s\n", __func__,
++ state_string(iotg->otg.state));
++
++ switch (iotg->otg.state) {
++ case OTG_STATE_UNDEFINED:
++ case OTG_STATE_B_IDLE:
++ if (!iotg->hsm.id) {
++ langwell_otg_del_timer(b_srp_init_tmr);
++ del_timer_sync(&lnw->hsm_timer);
++
++ iotg->otg.default_a = 1;
++ iotg->hsm.a_srp_det = 0;
++
++ langwell_otg_chrg_vbus(0);
++ set_host_mode();
++ langwell_otg_phy_low_power(1);
++
++ iotg->otg.state = OTG_STATE_A_IDLE;
++ langwell_update_transceiver();
++ } else if (iotg->hsm.b_sess_vld) {
++ langwell_otg_del_timer(b_srp_init_tmr);
++ del_timer_sync(&lnw->hsm_timer);
++ iotg->hsm.b_sess_end = 0;
++ iotg->hsm.a_bus_suspend = 0;
++ langwell_otg_chrg_vbus(0);
++
++ if (lnw->iotg.start_peripheral) {
++ lnw->iotg.start_peripheral(&lnw->iotg);
++ iotg->otg.state = OTG_STATE_B_PERIPHERAL;
++ } else
++ dev_dbg(lnw->dev, "client driver not loaded\n");
++
++ } else if (iotg->hsm.b_srp_init_tmout) {
++ iotg->hsm.b_srp_init_tmout = 0;
++ dev_warn(lnw->dev, "SRP init timeout\n");
++ } else if (iotg->hsm.b_srp_fail_tmout) {
++ iotg->hsm.b_srp_fail_tmout = 0;
++ iotg->hsm.b_bus_req = 0;
++
++ /* No silence failure */
++ langwell_otg_nsf_msg(6);
++ } else if (iotg->hsm.b_bus_req && iotg->hsm.b_sess_end) {
++ del_timer_sync(&lnw->hsm_timer);
++ /* workaround for b_se0_srp detection */
++ retval = langwell_otg_check_se0_srp(0);
++ if (retval) {
++ iotg->hsm.b_bus_req = 0;
++ dev_dbg(lnw->dev, "LS isn't SE0, try later\n");
++ } else {
++ /* clear the PHCD before start srp */
++ langwell_otg_phy_low_power(0);
++
++ /* Start SRP */
++ langwell_otg_add_timer(b_srp_init_tmr);
++ iotg->otg.start_srp(&iotg->otg);
++ langwell_otg_del_timer(b_srp_init_tmr);
++ langwell_otg_add_ktimer(TB_SRP_FAIL_TMR);
++
++ /* reset PHY low power mode here */
++ langwell_otg_phy_low_power_wait(1);
++ }
++ }
++ break;
++ case OTG_STATE_B_SRP_INIT:
++ if (!iotg->hsm.id) {
++ iotg->otg.default_a = 1;
++ iotg->hsm.a_srp_det = 0;
++
++ /* Turn off VBus */
++ iotg->otg.set_vbus(&iotg->otg, false);
++ langwell_otg_chrg_vbus(0);
++ set_host_mode();
++ langwell_otg_phy_low_power(1);
++ iotg->otg.state = OTG_STATE_A_IDLE;
++ langwell_update_transceiver();
++ } else if (iotg->hsm.b_sess_vld) {
++ langwell_otg_chrg_vbus(0);
++ if (lnw->iotg.start_peripheral) {
++ lnw->iotg.start_peripheral(&lnw->iotg);
++ iotg->otg.state = OTG_STATE_B_PERIPHERAL;
++ } else
++ dev_dbg(lnw->dev, "client driver not loaded\n");
++ }
++ break;
++ case OTG_STATE_B_PERIPHERAL:
++ if (!iotg->hsm.id) {
++ iotg->otg.default_a = 1;
++ iotg->hsm.a_srp_det = 0;
++
++ langwell_otg_chrg_vbus(0);
++
++ if (lnw->iotg.stop_peripheral)
++ lnw->iotg.stop_peripheral(&lnw->iotg);
++ else
++ dev_dbg(lnw->dev,
++ "client driver has been removed.\n");
++
++ set_host_mode();
++ langwell_otg_phy_low_power(1);
++ iotg->otg.state = OTG_STATE_A_IDLE;
++ langwell_update_transceiver();
++ } else if (!iotg->hsm.b_sess_vld) {
++ iotg->hsm.b_hnp_enable = 0;
++
++ if (lnw->iotg.stop_peripheral)
++ lnw->iotg.stop_peripheral(&lnw->iotg);
++ else
++ dev_dbg(lnw->dev,
++ "client driver has been removed.\n");
++
++ iotg->otg.state = OTG_STATE_B_IDLE;
++ } else if (iotg->hsm.b_bus_req && iotg->otg.gadget &&
++ iotg->otg.gadget->b_hnp_enable &&
++ iotg->hsm.a_bus_suspend) {
++
++ if (lnw->iotg.stop_peripheral)
++ lnw->iotg.stop_peripheral(&lnw->iotg);
++ else
++ dev_dbg(lnw->dev,
++ "client driver has been removed.\n");
++
++ langwell_otg_HAAR(1);
++ iotg->hsm.a_conn = 0;
++
++ if (lnw->iotg.start_host) {
++ lnw->iotg.start_host(&lnw->iotg);
++ iotg->otg.state = OTG_STATE_B_WAIT_ACON;
++ } else
++ dev_dbg(lnw->dev,
++ "host driver not loaded.\n");
++
++ iotg->hsm.a_bus_resume = 0;
++ langwell_otg_add_ktimer(TB_ASE0_BRST_TMR);
++ }
++ break;
++
++ case OTG_STATE_B_WAIT_ACON:
++ if (!iotg->hsm.id) {
++ /* delete hsm timer for b_ase0_brst_tmr */
++ del_timer_sync(&lnw->hsm_timer);
++
++ iotg->otg.default_a = 1;
++ iotg->hsm.a_srp_det = 0;
++
++ langwell_otg_chrg_vbus(0);
++
++ langwell_otg_HAAR(0);
++ if (lnw->iotg.stop_host)
++ lnw->iotg.stop_host(&lnw->iotg);
++ else
++ dev_dbg(lnw->dev,
++ "host driver has been removed.\n");
++
++ set_host_mode();
++ langwell_otg_phy_low_power(1);
++ iotg->otg.state = OTG_STATE_A_IDLE;
++ langwell_update_transceiver();
++ } else if (!iotg->hsm.b_sess_vld) {
++ /* delete hsm timer for b_ase0_brst_tmr */
++ del_timer_sync(&lnw->hsm_timer);
++
++ iotg->hsm.b_hnp_enable = 0;
++ iotg->hsm.b_bus_req = 0;
++
++ langwell_otg_chrg_vbus(0);
++ langwell_otg_HAAR(0);
++
++ if (lnw->iotg.stop_host)
++ lnw->iotg.stop_host(&lnw->iotg);
++ else
++ dev_dbg(lnw->dev,
++ "host driver has been removed.\n");
++
++ set_client_mode();
++ langwell_otg_phy_low_power(1);
++ iotg->otg.state = OTG_STATE_B_IDLE;
++ } else if (iotg->hsm.a_conn) {
++ /* delete hsm timer for b_ase0_brst_tmr */
++ del_timer_sync(&lnw->hsm_timer);
++
++ langwell_otg_HAAR(0);
++ iotg->otg.state = OTG_STATE_B_HOST;
++ langwell_update_transceiver();
++ } else if (iotg->hsm.a_bus_resume ||
++ iotg->hsm.b_ase0_brst_tmout) {
++ /* delete hsm timer for b_ase0_brst_tmr */
++ del_timer_sync(&lnw->hsm_timer);
++
++ langwell_otg_HAAR(0);
++ langwell_otg_nsf_msg(7);
++
++ if (lnw->iotg.stop_host)
++ lnw->iotg.stop_host(&lnw->iotg);
++ else
++ dev_dbg(lnw->dev,
++ "host driver has been removed.\n");
++
++ iotg->hsm.a_bus_suspend = 0;
++ iotg->hsm.b_bus_req = 0;
++
++ if (lnw->iotg.start_peripheral)
++ lnw->iotg.start_peripheral(&lnw->iotg);
++ else
++ dev_dbg(lnw->dev,
++ "client driver not loaded.\n");
++
++ iotg->otg.state = OTG_STATE_B_PERIPHERAL;
++ }
++ break;
++
++ case OTG_STATE_B_HOST:
++ if (!iotg->hsm.id) {
++ iotg->otg.default_a = 1;
++ iotg->hsm.a_srp_det = 0;
++
++ langwell_otg_chrg_vbus(0);
++
++ if (lnw->iotg.stop_host)
++ lnw->iotg.stop_host(&lnw->iotg);
++ else
++ dev_dbg(lnw->dev,
++ "host driver has been removed.\n");
++
++ set_host_mode();
++ langwell_otg_phy_low_power(1);
++ iotg->otg.state = OTG_STATE_A_IDLE;
++ langwell_update_transceiver();
++ } else if (!iotg->hsm.b_sess_vld) {
++ iotg->hsm.b_hnp_enable = 0;
++ iotg->hsm.b_bus_req = 0;
++
++ langwell_otg_chrg_vbus(0);
++ if (lnw->iotg.stop_host)
++ lnw->iotg.stop_host(&lnw->iotg);
++ else
++ dev_dbg(lnw->dev,
++ "host driver has been removed.\n");
++
++ set_client_mode();
++ langwell_otg_phy_low_power(1);
++ iotg->otg.state = OTG_STATE_B_IDLE;
++ } else if ((!iotg->hsm.b_bus_req) ||
++ (!iotg->hsm.a_conn)) {
++ iotg->hsm.b_bus_req = 0;
++ langwell_otg_loc_sof(0);
++
++ if (lnw->iotg.stop_host)
++ lnw->iotg.stop_host(&lnw->iotg);
++ else
++ dev_dbg(lnw->dev,
++ "host driver has been removed.\n");
++
++ iotg->hsm.a_bus_suspend = 0;
++
++ if (lnw->iotg.start_peripheral)
++ lnw->iotg.start_peripheral(&lnw->iotg);
++ else
++ dev_dbg(lnw->dev,
++ "client driver not loaded.\n");
++
++ iotg->otg.state = OTG_STATE_B_PERIPHERAL;
++ }
++ break;
++
++ case OTG_STATE_A_IDLE:
++ iotg->otg.default_a = 1;
++ if (iotg->hsm.id) {
++ iotg->otg.default_a = 0;
++ iotg->hsm.b_bus_req = 0;
++ iotg->hsm.vbus_srp_up = 0;
++
++ langwell_otg_chrg_vbus(0);
++ set_client_mode();
++ langwell_otg_phy_low_power(1);
++ iotg->otg.state = OTG_STATE_B_IDLE;
++ langwell_update_transceiver();
++ } else if (!iotg->hsm.a_bus_drop &&
++ (iotg->hsm.a_srp_det || iotg->hsm.a_bus_req)) {
++ langwell_otg_phy_low_power(0);
++
++ /* Turn on VBus */
++ iotg->otg.set_vbus(&iotg->otg, true);
++
++ iotg->hsm.vbus_srp_up = 0;
++ iotg->hsm.a_wait_vrise_tmout = 0;
++ langwell_otg_add_timer(a_wait_vrise_tmr);
++ iotg->otg.state = OTG_STATE_A_WAIT_VRISE;
++ langwell_update_transceiver();
++ } else if (!iotg->hsm.a_bus_drop && iotg->hsm.a_sess_vld) {
++ iotg->hsm.vbus_srp_up = 1;
++ } else if (!iotg->hsm.a_sess_vld && iotg->hsm.vbus_srp_up) {
++ msleep(10);
++ langwell_otg_phy_low_power(0);
++
++ /* Turn on VBus */
++ iotg->otg.set_vbus(&iotg->otg, true);
++ iotg->hsm.a_srp_det = 1;
++ iotg->hsm.vbus_srp_up = 0;
++ iotg->hsm.a_wait_vrise_tmout = 0;
++ langwell_otg_add_timer(a_wait_vrise_tmr);
++ iotg->otg.state = OTG_STATE_A_WAIT_VRISE;
++ langwell_update_transceiver();
++ } else if (!iotg->hsm.a_sess_vld &&
++ !iotg->hsm.vbus_srp_up) {
++ langwell_otg_phy_low_power(1);
++ }
++ break;
++ case OTG_STATE_A_WAIT_VRISE:
++ if (iotg->hsm.id) {
++ langwell_otg_del_timer(a_wait_vrise_tmr);
++ iotg->hsm.b_bus_req = 0;
++ iotg->otg.default_a = 0;
++
++ /* Turn off VBus */
++ iotg->otg.set_vbus(&iotg->otg, false);
++ set_client_mode();
++ langwell_otg_phy_low_power_wait(1);
++ iotg->otg.state = OTG_STATE_B_IDLE;
++ } else if (iotg->hsm.a_vbus_vld) {
++ langwell_otg_del_timer(a_wait_vrise_tmr);
++ iotg->hsm.b_conn = 0;
++ if (lnw->iotg.start_host)
++ lnw->iotg.start_host(&lnw->iotg);
++ else {
++ dev_dbg(lnw->dev, "host driver not loaded.\n");
++ break;
++ }
++
++ langwell_otg_add_ktimer(TA_WAIT_BCON_TMR);
++ iotg->otg.state = OTG_STATE_A_WAIT_BCON;
++ } else if (iotg->hsm.a_wait_vrise_tmout) {
++ iotg->hsm.b_conn = 0;
++ if (iotg->hsm.a_vbus_vld) {
++ if (lnw->iotg.start_host)
++ lnw->iotg.start_host(&lnw->iotg);
++ else {
++ dev_dbg(lnw->dev,
++ "host driver not loaded.\n");
++ break;
++ }
++ langwell_otg_add_ktimer(TA_WAIT_BCON_TMR);
++ iotg->otg.state = OTG_STATE_A_WAIT_BCON;
++ } else {
++
++ /* Turn off VBus */
++ iotg->otg.set_vbus(&iotg->otg, false);
++ langwell_otg_phy_low_power_wait(1);
++ iotg->otg.state = OTG_STATE_A_VBUS_ERR;
++ }
++ }
++ break;
++ case OTG_STATE_A_WAIT_BCON:
++ if (iotg->hsm.id) {
++ /* delete hsm timer for a_wait_bcon_tmr */
++ del_timer_sync(&lnw->hsm_timer);
++
++ iotg->otg.default_a = 0;
++ iotg->hsm.b_bus_req = 0;
++
++ if (lnw->iotg.stop_host)
++ lnw->iotg.stop_host(&lnw->iotg);
++ else
++ dev_dbg(lnw->dev,
++ "host driver has been removed.\n");
++
++ /* Turn off VBus */
++ iotg->otg.set_vbus(&iotg->otg, false);
++ set_client_mode();
++ langwell_otg_phy_low_power_wait(1);
++ iotg->otg.state = OTG_STATE_B_IDLE;
++ langwell_update_transceiver();
++ } else if (!iotg->hsm.a_vbus_vld) {
++ /* delete hsm timer for a_wait_bcon_tmr */
++ del_timer_sync(&lnw->hsm_timer);
++
++ if (lnw->iotg.stop_host)
++ lnw->iotg.stop_host(&lnw->iotg);
++ else
++ dev_dbg(lnw->dev,
++ "host driver has been removed.\n");
++
++ /* Turn off VBus */
++ iotg->otg.set_vbus(&iotg->otg, false);
++ langwell_otg_phy_low_power_wait(1);
++ iotg->otg.state = OTG_STATE_A_VBUS_ERR;
++ } else if (iotg->hsm.a_bus_drop ||
++ (iotg->hsm.a_wait_bcon_tmout &&
++ !iotg->hsm.a_bus_req)) {
++ /* delete hsm timer for a_wait_bcon_tmr */
++ del_timer_sync(&lnw->hsm_timer);
++
++ if (lnw->iotg.stop_host)
++ lnw->iotg.stop_host(&lnw->iotg);
++ else
++ dev_dbg(lnw->dev,
++ "host driver has been removed.\n");
++
++ /* Turn off VBus */
++ iotg->otg.set_vbus(&iotg->otg, false);
++ iotg->otg.state = OTG_STATE_A_WAIT_VFALL;
++ } else if (iotg->hsm.b_conn) {
++ /* delete hsm timer for a_wait_bcon_tmr */
++ del_timer_sync(&lnw->hsm_timer);
++
++ iotg->hsm.a_suspend_req = 0;
++ iotg->otg.state = OTG_STATE_A_HOST;
++ if (iotg->hsm.a_srp_det && iotg->otg.host &&
++ !iotg->otg.host->b_hnp_enable) {
++ /* SRP capable peripheral-only device */
++ iotg->hsm.a_bus_req = 1;
++ iotg->hsm.a_srp_det = 0;
++ } else if (!iotg->hsm.a_bus_req && iotg->otg.host &&
++ iotg->otg.host->b_hnp_enable) {
++ /* It is not safe enough to do a fast
++ * transistion from A_WAIT_BCON to
++ * A_SUSPEND */
++ msleep(10000);
++ if (iotg->hsm.a_bus_req)
++ break;
++
++ if (request_irq(pdev->irq,
++ otg_dummy_irq, IRQF_SHARED,
++ driver_name, iotg->base) != 0) {
++ dev_dbg(lnw->dev,
++ "request interrupt %d fail\n",
++ pdev->irq);
++ }
++
++ langwell_otg_HABA(1);
++ iotg->hsm.b_bus_resume = 0;
++ iotg->hsm.a_aidl_bdis_tmout = 0;
++
++ langwell_otg_loc_sof(0);
++ /* clear PHCD to enable HW timer */
++ langwell_otg_phy_low_power(0);
++ langwell_otg_add_timer(a_aidl_bdis_tmr);
++ iotg->otg.state = OTG_STATE_A_SUSPEND;
++ } else if (!iotg->hsm.a_bus_req && iotg->otg.host &&
++ !iotg->otg.host->b_hnp_enable) {
++ if (lnw->iotg.stop_host)
++ lnw->iotg.stop_host(&lnw->iotg);
++ else
++ dev_dbg(lnw->dev,
++ "host driver removed.\n");
++
++ /* Turn off VBus */
++ iotg->otg.set_vbus(&iotg->otg, false);
++ iotg->otg.state = OTG_STATE_A_WAIT_VFALL;
++ }
++ }
++ break;
++ case OTG_STATE_A_HOST:
++ if (iotg->hsm.id) {
++ iotg->otg.default_a = 0;
++ iotg->hsm.b_bus_req = 0;
++
++ if (lnw->iotg.stop_host)
++ lnw->iotg.stop_host(&lnw->iotg);
++ else
++ dev_dbg(lnw->dev,
++ "host driver has been removed.\n");
++
++ /* Turn off VBus */
++ iotg->otg.set_vbus(&iotg->otg, false);
++ set_client_mode();
++ langwell_otg_phy_low_power_wait(1);
++ iotg->otg.state = OTG_STATE_B_IDLE;
++ langwell_update_transceiver();
++ } else if (iotg->hsm.a_bus_drop ||
++ (iotg->otg.host &&
++ !iotg->otg.host->b_hnp_enable &&
++ !iotg->hsm.a_bus_req)) {
++ if (lnw->iotg.stop_host)
++ lnw->iotg.stop_host(&lnw->iotg);
++ else
++ dev_dbg(lnw->dev,
++ "host driver has been removed.\n");
++
++ /* Turn off VBus */
++ iotg->otg.set_vbus(&iotg->otg, false);
++ iotg->otg.state = OTG_STATE_A_WAIT_VFALL;
++ } else if (!iotg->hsm.a_vbus_vld) {
++ if (lnw->iotg.stop_host)
++ lnw->iotg.stop_host(&lnw->iotg);
++ else
++ dev_dbg(lnw->dev,
++ "host driver has been removed.\n");
++
++ /* Turn off VBus */
++ iotg->otg.set_vbus(&iotg->otg, false);
++ langwell_otg_phy_low_power_wait(1);
++ iotg->otg.state = OTG_STATE_A_VBUS_ERR;
++ } else if (iotg->otg.host &&
++ iotg->otg.host->b_hnp_enable &&
++ !iotg->hsm.a_bus_req) {
++ /* Set HABA to enable hardware assistance to signal
++ * A-connect after receiver B-disconnect. Hardware
++ * will then set client mode and enable URE, SLE and
++ * PCE after the assistance. otg_dummy_irq is used to
++ * clean these ints when client driver is not resumed.
++ */
++ if (request_irq(pdev->irq, otg_dummy_irq, IRQF_SHARED,
++ driver_name, iotg->base) != 0) {
++ dev_dbg(lnw->dev,
++ "request interrupt %d failed\n",
++ pdev->irq);
++ }
++
++ /* set HABA */
++ langwell_otg_HABA(1);
++ iotg->hsm.b_bus_resume = 0;
++ iotg->hsm.a_aidl_bdis_tmout = 0;
++ langwell_otg_loc_sof(0);
++ /* clear PHCD to enable HW timer */
++ langwell_otg_phy_low_power(0);
++ langwell_otg_add_timer(a_aidl_bdis_tmr);
++ iotg->otg.state = OTG_STATE_A_SUSPEND;
++ } else if (!iotg->hsm.b_conn || !iotg->hsm.a_bus_req) {
++ langwell_otg_add_ktimer(TA_WAIT_BCON_TMR);
++ iotg->otg.state = OTG_STATE_A_WAIT_BCON;
++ }
++ break;
++ case OTG_STATE_A_SUSPEND:
++ if (iotg->hsm.id) {
++ langwell_otg_del_timer(a_aidl_bdis_tmr);
++ langwell_otg_HABA(0);
++ free_irq(pdev->irq, iotg->base);
++ iotg->otg.default_a = 0;
++ iotg->hsm.b_bus_req = 0;
++
++ if (lnw->iotg.stop_host)
++ lnw->iotg.stop_host(&lnw->iotg);
++ else
++ dev_dbg(lnw->dev,
++ "host driver has been removed.\n");
++
++ /* Turn off VBus */
++ iotg->otg.set_vbus(&iotg->otg, false);
++ set_client_mode();
++ langwell_otg_phy_low_power(1);
++ iotg->otg.state = OTG_STATE_B_IDLE;
++ langwell_update_transceiver();
++ } else if (iotg->hsm.a_bus_req ||
++ iotg->hsm.b_bus_resume) {
++ langwell_otg_del_timer(a_aidl_bdis_tmr);
++ langwell_otg_HABA(0);
++ free_irq(pdev->irq, iotg->base);
++ iotg->hsm.a_suspend_req = 0;
++ langwell_otg_loc_sof(1);
++ iotg->otg.state = OTG_STATE_A_HOST;
++ } else if (iotg->hsm.a_aidl_bdis_tmout ||
++ iotg->hsm.a_bus_drop) {
++ langwell_otg_del_timer(a_aidl_bdis_tmr);
++ langwell_otg_HABA(0);
++ free_irq(pdev->irq, iotg->base);
++ if (lnw->iotg.stop_host)
++ lnw->iotg.stop_host(&lnw->iotg);
++ else
++ dev_dbg(lnw->dev,
++ "host driver has been removed.\n");
++
++ /* Turn off VBus */
++ iotg->otg.set_vbus(&iotg->otg, false);
++ iotg->otg.state = OTG_STATE_A_WAIT_VFALL;
++ } else if (!iotg->hsm.b_conn && iotg->otg.host &&
++ iotg->otg.host->b_hnp_enable) {
++ langwell_otg_del_timer(a_aidl_bdis_tmr);
++ langwell_otg_HABA(0);
++ free_irq(pdev->irq, iotg->base);
++
++ if (lnw->iotg.stop_host)
++ lnw->iotg.stop_host(&lnw->iotg);
++ else
++ dev_dbg(lnw->dev,
++ "host driver has been removed.\n");
++
++ iotg->hsm.b_bus_suspend = 0;
++ iotg->hsm.b_bus_suspend_vld = 0;
++
++ /* msleep(200); */
++ if (lnw->iotg.start_peripheral)
++ lnw->iotg.start_peripheral(&lnw->iotg);
++ else
++ dev_dbg(lnw->dev,
++ "client driver not loaded.\n");
++
++ langwell_otg_add_ktimer(TB_BUS_SUSPEND_TMR);
++ iotg->otg.state = OTG_STATE_A_PERIPHERAL;
++ break;
++ } else if (!iotg->hsm.a_vbus_vld) {
++ langwell_otg_del_timer(a_aidl_bdis_tmr);
++ langwell_otg_HABA(0);
++ free_irq(pdev->irq, iotg->base);
++ if (lnw->iotg.stop_host)
++ lnw->iotg.stop_host(&lnw->iotg);
++ else
++ dev_dbg(lnw->dev,
++ "host driver has been removed.\n");
++
++ /* Turn off VBus */
++ iotg->otg.set_vbus(&iotg->otg, false);
++ langwell_otg_phy_low_power_wait(1);
++ iotg->otg.state = OTG_STATE_A_VBUS_ERR;
++ }
++ break;
++ case OTG_STATE_A_PERIPHERAL:
++ if (iotg->hsm.id) {
++ /* delete hsm timer for b_bus_suspend_tmr */
++ del_timer_sync(&lnw->hsm_timer);
++ iotg->otg.default_a = 0;
++ iotg->hsm.b_bus_req = 0;
++ if (lnw->iotg.stop_peripheral)
++ lnw->iotg.stop_peripheral(&lnw->iotg);
++ else
++ dev_dbg(lnw->dev,
++ "client driver has been removed.\n");
++
++ /* Turn off VBus */
++ iotg->otg.set_vbus(&iotg->otg, false);
++ set_client_mode();
++ langwell_otg_phy_low_power_wait(1);
++ iotg->otg.state = OTG_STATE_B_IDLE;
++ langwell_update_transceiver();
++ } else if (!iotg->hsm.a_vbus_vld) {
++ /* delete hsm timer for b_bus_suspend_tmr */
++ del_timer_sync(&lnw->hsm_timer);
++
++ if (lnw->iotg.stop_peripheral)
++ lnw->iotg.stop_peripheral(&lnw->iotg);
++ else
++ dev_dbg(lnw->dev,
++ "client driver has been removed.\n");
++
++ /* Turn off VBus */
++ iotg->otg.set_vbus(&iotg->otg, false);
++ langwell_otg_phy_low_power_wait(1);
++ iotg->otg.state = OTG_STATE_A_VBUS_ERR;
++ } else if (iotg->hsm.a_bus_drop) {
++ /* delete hsm timer for b_bus_suspend_tmr */
++ del_timer_sync(&lnw->hsm_timer);
++
++ if (lnw->iotg.stop_peripheral)
++ lnw->iotg.stop_peripheral(&lnw->iotg);
++ else
++ dev_dbg(lnw->dev,
++ "client driver has been removed.\n");
++
++ /* Turn off VBus */
++ iotg->otg.set_vbus(&iotg->otg, false);
++ iotg->otg.state = OTG_STATE_A_WAIT_VFALL;
++ } else if (iotg->hsm.b_bus_suspend) {
++ /* delete hsm timer for b_bus_suspend_tmr */
++ del_timer_sync(&lnw->hsm_timer);
++
++ if (lnw->iotg.stop_peripheral)
++ lnw->iotg.stop_peripheral(&lnw->iotg);
++ else
++ dev_dbg(lnw->dev,
++ "client driver has been removed.\n");
++
++ if (lnw->iotg.start_host)
++ lnw->iotg.start_host(&lnw->iotg);
++ else
++ dev_dbg(lnw->dev,
++ "host driver not loaded.\n");
++ langwell_otg_add_ktimer(TA_WAIT_BCON_TMR);
++ iotg->otg.state = OTG_STATE_A_WAIT_BCON;
++ } else if (iotg->hsm.b_bus_suspend_tmout) {
++ u32 val;
++ val = readl(lnw->iotg.base + CI_PORTSC1);
++ if (!(val & PORTSC_SUSP))
++ break;
++
++ if (lnw->iotg.stop_peripheral)
++ lnw->iotg.stop_peripheral(&lnw->iotg);
++ else
++ dev_dbg(lnw->dev,
++ "client driver has been removed.\n");
++
++ if (lnw->iotg.start_host)
++ lnw->iotg.start_host(&lnw->iotg);
++ else
++ dev_dbg(lnw->dev,
++ "host driver not loaded.\n");
++ langwell_otg_add_ktimer(TA_WAIT_BCON_TMR);
++ iotg->otg.state = OTG_STATE_A_WAIT_BCON;
++ }
++ break;
++ case OTG_STATE_A_VBUS_ERR:
++ if (iotg->hsm.id) {
++ iotg->otg.default_a = 0;
++ iotg->hsm.a_clr_err = 0;
++ iotg->hsm.a_srp_det = 0;
++ set_client_mode();
++ langwell_otg_phy_low_power(1);
++ iotg->otg.state = OTG_STATE_B_IDLE;
++ langwell_update_transceiver();
++ } else if (iotg->hsm.a_clr_err) {
++ iotg->hsm.a_clr_err = 0;
++ iotg->hsm.a_srp_det = 0;
++ reset_otg();
++ init_hsm();
++ if (iotg->otg.state == OTG_STATE_A_IDLE)
++ langwell_update_transceiver();
++ } else {
++ /* FW will clear PHCD bit when any VBus
++ * event detected. Reset PHCD to 1 again */
++ langwell_otg_phy_low_power(1);
++ }
++ break;
++ case OTG_STATE_A_WAIT_VFALL:
++ if (iotg->hsm.id) {
++ iotg->otg.default_a = 0;
++ set_client_mode();
++ langwell_otg_phy_low_power(1);
++ iotg->otg.state = OTG_STATE_B_IDLE;
++ langwell_update_transceiver();
++ } else if (iotg->hsm.a_bus_req) {
++
++ /* Turn on VBus */
++ iotg->otg.set_vbus(&iotg->otg, true);
++ iotg->hsm.a_wait_vrise_tmout = 0;
++ langwell_otg_add_timer(a_wait_vrise_tmr);
++ iotg->otg.state = OTG_STATE_A_WAIT_VRISE;
++ } else if (!iotg->hsm.a_sess_vld) {
++ iotg->hsm.a_srp_det = 0;
++ set_host_mode();
++ langwell_otg_phy_low_power(1);
++ iotg->otg.state = OTG_STATE_A_IDLE;
++ }
++ break;
++ default:
++ ;
++ }
++
++ dev_dbg(lnw->dev, "%s: new state = %s\n", __func__,
++ state_string(iotg->otg.state));
++}
++
++static ssize_t
++show_registers(struct device *_dev, struct device_attribute *attr, char *buf)
++{
++ struct langwell_otg *lnw = the_transceiver;
++ char *next;
++ unsigned size, t;
++
++ next = buf;
++ size = PAGE_SIZE;
++
++ t = scnprintf(next, size,
++ "\n"
++ "USBCMD = 0x%08x\n"
++ "USBSTS = 0x%08x\n"
++ "USBINTR = 0x%08x\n"
++ "ASYNCLISTADDR = 0x%08x\n"
++ "PORTSC1 = 0x%08x\n"
++ "HOSTPC1 = 0x%08x\n"
++ "OTGSC = 0x%08x\n"
++ "USBMODE = 0x%08x\n",
++ readl(lnw->iotg.base + 0x30),
++ readl(lnw->iotg.base + 0x34),
++ readl(lnw->iotg.base + 0x38),
++ readl(lnw->iotg.base + 0x48),
++ readl(lnw->iotg.base + 0x74),
++ readl(lnw->iotg.base + 0xb4),
++ readl(lnw->iotg.base + 0xf4),
++ readl(lnw->iotg.base + 0xf8)
++ );
++ size -= t;
++ next += t;
++
++ return PAGE_SIZE - size;
++}
++static DEVICE_ATTR(registers, S_IRUGO, show_registers, NULL);
++
++static ssize_t
++show_hsm(struct device *_dev, struct device_attribute *attr, char *buf)
++{
++ struct langwell_otg *lnw = the_transceiver;
++ struct intel_mid_otg_xceiv *iotg = &lnw->iotg;
++ char *next;
++ unsigned size, t;
++
++ next = buf;
++ size = PAGE_SIZE;
++
++ if (iotg->otg.host)
++ iotg->hsm.a_set_b_hnp_en = iotg->otg.host->b_hnp_enable;
++
++ if (iotg->otg.gadget)
++ iotg->hsm.b_hnp_enable = iotg->otg.gadget->b_hnp_enable;
++
++ t = scnprintf(next, size,
++ "\n"
++ "current state = %s\n"
++ "a_bus_resume = \t%d\n"
++ "a_bus_suspend = \t%d\n"
++ "a_conn = \t%d\n"
++ "a_sess_vld = \t%d\n"
++ "a_srp_det = \t%d\n"
++ "a_vbus_vld = \t%d\n"
++ "b_bus_resume = \t%d\n"
++ "b_bus_suspend = \t%d\n"
++ "b_conn = \t%d\n"
++ "b_se0_srp = \t%d\n"
++ "b_sess_end = \t%d\n"
++ "b_sess_vld = \t%d\n"
++ "id = \t%d\n"
++ "a_set_b_hnp_en = \t%d\n"
++ "b_srp_done = \t%d\n"
++ "b_hnp_enable = \t%d\n"
++ "a_wait_vrise_tmout = \t%d\n"
++ "a_wait_bcon_tmout = \t%d\n"
++ "a_aidl_bdis_tmout = \t%d\n"
++ "b_ase0_brst_tmout = \t%d\n"
++ "a_bus_drop = \t%d\n"
++ "a_bus_req = \t%d\n"
++ "a_clr_err = \t%d\n"
++ "a_suspend_req = \t%d\n"
++ "b_bus_req = \t%d\n"
++ "b_bus_suspend_tmout = \t%d\n"
++ "b_bus_suspend_vld = \t%d\n",
++ state_string(iotg->otg.state),
++ iotg->hsm.a_bus_resume,
++ iotg->hsm.a_bus_suspend,
++ iotg->hsm.a_conn,
++ iotg->hsm.a_sess_vld,
++ iotg->hsm.a_srp_det,
++ iotg->hsm.a_vbus_vld,
++ iotg->hsm.b_bus_resume,
++ iotg->hsm.b_bus_suspend,
++ iotg->hsm.b_conn,
++ iotg->hsm.b_se0_srp,
++ iotg->hsm.b_sess_end,
++ iotg->hsm.b_sess_vld,
++ iotg->hsm.id,
++ iotg->hsm.a_set_b_hnp_en,
++ iotg->hsm.b_srp_done,
++ iotg->hsm.b_hnp_enable,
++ iotg->hsm.a_wait_vrise_tmout,
++ iotg->hsm.a_wait_bcon_tmout,
++ iotg->hsm.a_aidl_bdis_tmout,
++ iotg->hsm.b_ase0_brst_tmout,
++ iotg->hsm.a_bus_drop,
++ iotg->hsm.a_bus_req,
++ iotg->hsm.a_clr_err,
++ iotg->hsm.a_suspend_req,
++ iotg->hsm.b_bus_req,
++ iotg->hsm.b_bus_suspend_tmout,
++ iotg->hsm.b_bus_suspend_vld
++ );
++ size -= t;
++ next += t;
++
++ return PAGE_SIZE - size;
++}
++static DEVICE_ATTR(hsm, S_IRUGO, show_hsm, NULL);
++
++static ssize_t
++get_a_bus_req(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ struct langwell_otg *lnw = the_transceiver;
++ char *next;
++ unsigned size, t;
++
++ next = buf;
++ size = PAGE_SIZE;
++
++ t = scnprintf(next, size, "%d", lnw->iotg.hsm.a_bus_req);
++ size -= t;
++ next += t;
++
++ return PAGE_SIZE - size;
++}
++
++static ssize_t
++set_a_bus_req(struct device *dev, struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct langwell_otg *lnw = the_transceiver;
++ struct intel_mid_otg_xceiv *iotg = &lnw->iotg;
++
++ if (!iotg->otg.default_a)
++ return -1;
++ if (count > 2)
++ return -1;
++
++ if (buf[0] == '0') {
++ iotg->hsm.a_bus_req = 0;
++ dev_dbg(lnw->dev, "User request: a_bus_req = 0\n");
++ } else if (buf[0] == '1') {
++ /* If a_bus_drop is TRUE, a_bus_req can't be set */
++ if (iotg->hsm.a_bus_drop)
++ return -1;
++ iotg->hsm.a_bus_req = 1;
++ dev_dbg(lnw->dev, "User request: a_bus_req = 1\n");
++ }
++ if (spin_trylock(&lnw->wq_lock)) {
++ langwell_update_transceiver();
++ spin_unlock(&lnw->wq_lock);
++ }
++ return count;
++}
++static DEVICE_ATTR(a_bus_req, S_IRUGO | S_IWUGO, get_a_bus_req, set_a_bus_req);
++
++static ssize_t
++get_a_bus_drop(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ struct langwell_otg *lnw = the_transceiver;
++ char *next;
++ unsigned size, t;
++
++ next = buf;
++ size = PAGE_SIZE;
++
++ t = scnprintf(next, size, "%d", lnw->iotg.hsm.a_bus_drop);
++ size -= t;
++ next += t;
++
++ return PAGE_SIZE - size;
++}
++
++static ssize_t
++set_a_bus_drop(struct device *dev, struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct langwell_otg *lnw = the_transceiver;
++ struct intel_mid_otg_xceiv *iotg = &lnw->iotg;
++
++ if (!iotg->otg.default_a)
++ return -1;
++ if (count > 2)
++ return -1;
++
++ if (buf[0] == '0') {
++ iotg->hsm.a_bus_drop = 0;
++ dev_dbg(lnw->dev, "User request: a_bus_drop = 0\n");
++ } else if (buf[0] == '1') {
++ iotg->hsm.a_bus_drop = 1;
++ iotg->hsm.a_bus_req = 0;
++ dev_dbg(lnw->dev, "User request: a_bus_drop = 1\n");
++ dev_dbg(lnw->dev, "User request: and a_bus_req = 0\n");
++ }
++ if (spin_trylock(&lnw->wq_lock)) {
++ langwell_update_transceiver();
++ spin_unlock(&lnw->wq_lock);
++ }
++ return count;
++}
++static DEVICE_ATTR(a_bus_drop, S_IRUGO | S_IWUGO,
++ get_a_bus_drop, set_a_bus_drop);
++
++static ssize_t
++get_b_bus_req(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ struct langwell_otg *lnw = the_transceiver;
++ char *next;
++ unsigned size, t;
++
++ next = buf;
++ size = PAGE_SIZE;
++
++ t = scnprintf(next, size, "%d", lnw->iotg.hsm.b_bus_req);
++ size -= t;
++ next += t;
++
++ return PAGE_SIZE - size;
++}
++
++static ssize_t
++set_b_bus_req(struct device *dev, struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct langwell_otg *lnw = the_transceiver;
++ struct intel_mid_otg_xceiv *iotg = &lnw->iotg;
++
++ if (iotg->otg.default_a)
++ return -1;
++
++ if (count > 2)
++ return -1;
++
++ if (buf[0] == '0') {
++ iotg->hsm.b_bus_req = 0;
++ dev_dbg(lnw->dev, "User request: b_bus_req = 0\n");
++ } else if (buf[0] == '1') {
++ iotg->hsm.b_bus_req = 1;
++ dev_dbg(lnw->dev, "User request: b_bus_req = 1\n");
++ }
++ if (spin_trylock(&lnw->wq_lock)) {
++ langwell_update_transceiver();
++ spin_unlock(&lnw->wq_lock);
++ }
++ return count;
++}
++static DEVICE_ATTR(b_bus_req, S_IRUGO | S_IWUGO, get_b_bus_req, set_b_bus_req);
++
++static ssize_t
++set_a_clr_err(struct device *dev, struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct langwell_otg *lnw = the_transceiver;
++ struct intel_mid_otg_xceiv *iotg = &lnw->iotg;
++
++ if (!iotg->otg.default_a)
++ return -1;
++ if (count > 2)
++ return -1;
++
++ if (buf[0] == '1') {
++ iotg->hsm.a_clr_err = 1;
++ dev_dbg(lnw->dev, "User request: a_clr_err = 1\n");
++ }
++ if (spin_trylock(&lnw->wq_lock)) {
++ langwell_update_transceiver();
++ spin_unlock(&lnw->wq_lock);
++ }
++ return count;
++}
++static DEVICE_ATTR(a_clr_err, S_IWUGO, NULL, set_a_clr_err);
++
++static struct attribute *inputs_attrs[] = {
++ &dev_attr_a_bus_req.attr,
++ &dev_attr_a_bus_drop.attr,
++ &dev_attr_b_bus_req.attr,
++ &dev_attr_a_clr_err.attr,
++ NULL,
++};
++
++static struct attribute_group debug_dev_attr_group = {
++ .name = "inputs",
++ .attrs = inputs_attrs,
++};
++
++static int langwell_otg_probe(struct pci_dev *pdev,
++ const struct pci_device_id *id)
++{
++ unsigned long resource, len;
++ void __iomem *base = NULL;
++ int retval;
++ u32 val32;
++ struct langwell_otg *lnw;
++ char qname[] = "langwell_otg_queue";
++
++ retval = 0;
++ dev_dbg(&pdev->dev, "\notg controller is detected.\n");
++ if (pci_enable_device(pdev) < 0) {
++ retval = -ENODEV;
++ goto done;
++ }
++
++ lnw = kzalloc(sizeof *lnw, GFP_KERNEL);
++ if (lnw == NULL) {
++ retval = -ENOMEM;
++ goto done;
++ }
++ the_transceiver = lnw;
++
++ /* control register: BAR 0 */
++ resource = pci_resource_start(pdev, 0);
++ len = pci_resource_len(pdev, 0);
++ if (!request_mem_region(resource, len, driver_name)) {
++ retval = -EBUSY;
++ goto err;
++ }
++ lnw->region = 1;
++
++ base = ioremap_nocache(resource, len);
++ if (base == NULL) {
++ retval = -EFAULT;
++ goto err;
++ }
++ lnw->iotg.base = base;
++
++ if (!request_mem_region(USBCFG_ADDR, USBCFG_LEN, driver_name)) {
++ retval = -EBUSY;
++ goto err;
++ }
++ lnw->cfg_region = 1;
++
++ /* For the SCCB.USBCFG register */
++ base = ioremap_nocache(USBCFG_ADDR, USBCFG_LEN);
++ if (base == NULL) {
++ retval = -EFAULT;
++ goto err;
++ }
++ lnw->usbcfg = base;
++
++ if (!pdev->irq) {
++ dev_dbg(&pdev->dev, "No IRQ.\n");
++ retval = -ENODEV;
++ goto err;
++ }
++
++ lnw->qwork = create_singlethread_workqueue(qname);
++ if (!lnw->qwork) {
++ dev_dbg(&pdev->dev, "cannot create workqueue %s\n", qname);
++ retval = -ENOMEM;
++ goto err;
++ }
++ INIT_WORK(&lnw->work, langwell_otg_work);
++
++ /* OTG common part */
++ lnw->dev = &pdev->dev;
++ lnw->iotg.otg.dev = lnw->dev;
++ lnw->iotg.otg.label = driver_name;
++ lnw->iotg.otg.set_host = langwell_otg_set_host;
++ lnw->iotg.otg.set_peripheral = langwell_otg_set_peripheral;
++ lnw->iotg.otg.set_power = langwell_otg_set_power;
++ lnw->iotg.otg.set_vbus = langwell_otg_set_vbus;
++ lnw->iotg.otg.start_srp = langwell_otg_start_srp;
++ lnw->iotg.otg.state = OTG_STATE_UNDEFINED;
++
++ if (otg_set_transceiver(&lnw->iotg.otg)) {
++ dev_dbg(lnw->dev, "can't set transceiver\n");
++ retval = -EBUSY;
++ goto err;
++ }
++
++ reset_otg();
++ init_hsm();
++
++ spin_lock_init(&lnw->lock);
++ spin_lock_init(&lnw->wq_lock);
++ INIT_LIST_HEAD(&active_timers);
++ retval = langwell_otg_init_timers(&lnw->iotg.hsm);
++ if (retval) {
++ dev_dbg(&pdev->dev, "Failed to init timers\n");
++ goto err;
++ }
++
++ init_timer(&lnw->hsm_timer);
++ ATOMIC_INIT_NOTIFIER_HEAD(&lnw->iotg.iotg_notifier);
++
++ lnw->iotg_notifier.notifier_call = langwell_otg_iotg_notify;
++
++ retval = intel_mid_otg_register_notifier(&lnw->iotg,
++ &lnw->iotg_notifier);
++ if (retval) {
++ dev_dbg(lnw->dev, "Failed to register notifier\n");
++ goto err;
++ }
++
++ if (request_irq(pdev->irq, otg_irq, IRQF_SHARED,
++ driver_name, lnw) != 0) {
++ dev_dbg(lnw->dev, "request interrupt %d failed\n", pdev->irq);
++ retval = -EBUSY;
++ goto err;
++ }
++
++ /* enable OTGSC int */
++ val32 = OTGSC_DPIE | OTGSC_BSEIE | OTGSC_BSVIE |
++ OTGSC_ASVIE | OTGSC_AVVIE | OTGSC_IDIE | OTGSC_IDPU;
++ writel(val32, lnw->iotg.base + CI_OTGSC);
++
++ retval = device_create_file(&pdev->dev, &dev_attr_registers);
++ if (retval < 0) {
++ dev_dbg(lnw->dev,
++ "Can't register sysfs attribute: %d\n", retval);
++ goto err;
++ }
++
++ retval = device_create_file(&pdev->dev, &dev_attr_hsm);
++ if (retval < 0) {
++ dev_dbg(lnw->dev, "Can't hsm sysfs attribute: %d\n", retval);
++ goto err;
++ }
++
++ retval = sysfs_create_group(&pdev->dev.kobj, &debug_dev_attr_group);
++ if (retval < 0) {
++ dev_dbg(lnw->dev,
++ "Can't register sysfs attr group: %d\n", retval);
++ goto err;
++ }
++
++ if (lnw->iotg.otg.state == OTG_STATE_A_IDLE)
++ langwell_update_transceiver();
++
++ return 0;
++
++err:
++ if (the_transceiver)
++ langwell_otg_remove(pdev);
++done:
++ return retval;
++}
++
++static void langwell_otg_remove(struct pci_dev *pdev)
++{
++ struct langwell_otg *lnw = the_transceiver;
++
++ if (lnw->qwork) {
++ flush_workqueue(lnw->qwork);
++ destroy_workqueue(lnw->qwork);
++ }
++ intel_mid_otg_unregister_notifier(&lnw->iotg, &lnw->iotg_notifier);
++ langwell_otg_free_timers();
++
++ /* disable OTGSC interrupt as OTGSC doesn't change in reset */
++ writel(0, lnw->iotg.base + CI_OTGSC);
++
++ if (pdev->irq)
++ free_irq(pdev->irq, lnw);
++ if (lnw->usbcfg)
++ iounmap(lnw->usbcfg);
++ if (lnw->cfg_region)
++ release_mem_region(USBCFG_ADDR, USBCFG_LEN);
++ if (lnw->iotg.base)
++ iounmap(lnw->iotg.base);
++ if (lnw->region)
++ release_mem_region(pci_resource_start(pdev, 0),
++ pci_resource_len(pdev, 0));
++
++ otg_set_transceiver(NULL);
++ pci_disable_device(pdev);
++ sysfs_remove_group(&pdev->dev.kobj, &debug_dev_attr_group);
++ device_remove_file(&pdev->dev, &dev_attr_hsm);
++ device_remove_file(&pdev->dev, &dev_attr_registers);
++ kfree(lnw);
++ lnw = NULL;
++}
++
++static void transceiver_suspend(struct pci_dev *pdev)
++{
++ pci_save_state(pdev);
++ pci_set_power_state(pdev, PCI_D3hot);
++ langwell_otg_phy_low_power(1);
++}
++
++static int langwell_otg_suspend(struct pci_dev *pdev, pm_message_t message)
++{
++ struct langwell_otg *lnw = the_transceiver;
++ struct intel_mid_otg_xceiv *iotg = &lnw->iotg;
++ int ret = 0;
++
++ /* Disbale OTG interrupts */
++ langwell_otg_intr(0);
++
++ if (pdev->irq)
++ free_irq(pdev->irq, lnw);
++
++ /* Prevent more otg_work */
++ flush_workqueue(lnw->qwork);
++ destroy_workqueue(lnw->qwork);
++ lnw->qwork = NULL;
++
++ /* start actions */
++ switch (iotg->otg.state) {
++ case OTG_STATE_A_WAIT_VFALL:
++ iotg->otg.state = OTG_STATE_A_IDLE;
++ case OTG_STATE_A_IDLE:
++ case OTG_STATE_B_IDLE:
++ case OTG_STATE_A_VBUS_ERR:
++ transceiver_suspend(pdev);
++ break;
++ case OTG_STATE_A_WAIT_VRISE:
++ langwell_otg_del_timer(a_wait_vrise_tmr);
++ iotg->hsm.a_srp_det = 0;
++
++ /* Turn off VBus */
++ iotg->otg.set_vbus(&iotg->otg, false);
++ iotg->otg.state = OTG_STATE_A_IDLE;
++ transceiver_suspend(pdev);
++ break;
++ case OTG_STATE_A_WAIT_BCON:
++ del_timer_sync(&lnw->hsm_timer);
++ if (lnw->iotg.stop_host)
++ lnw->iotg.stop_host(&lnw->iotg);
++ else
++ dev_dbg(&pdev->dev, "host driver has been removed.\n");
++
++ iotg->hsm.a_srp_det = 0;
++
++ /* Turn off VBus */
++ iotg->otg.set_vbus(&iotg->otg, false);
++ iotg->otg.state = OTG_STATE_A_IDLE;
++ transceiver_suspend(pdev);
++ break;
++ case OTG_STATE_A_HOST:
++ if (lnw->iotg.stop_host)
++ lnw->iotg.stop_host(&lnw->iotg);
++ else
++ dev_dbg(&pdev->dev, "host driver has been removed.\n");
++
++ iotg->hsm.a_srp_det = 0;
++
++ /* Turn off VBus */
++ iotg->otg.set_vbus(&iotg->otg, false);
++
++ iotg->otg.state = OTG_STATE_A_IDLE;
++ transceiver_suspend(pdev);
++ break;
++ case OTG_STATE_A_SUSPEND:
++ langwell_otg_del_timer(a_aidl_bdis_tmr);
++ langwell_otg_HABA(0);
++ if (lnw->iotg.stop_host)
++ lnw->iotg.stop_host(&lnw->iotg);
++ else
++ dev_dbg(lnw->dev, "host driver has been removed.\n");
++ iotg->hsm.a_srp_det = 0;
++
++ /* Turn off VBus */
++ iotg->otg.set_vbus(&iotg->otg, false);
++ iotg->otg.state = OTG_STATE_A_IDLE;
++ transceiver_suspend(pdev);
++ break;
++ case OTG_STATE_A_PERIPHERAL:
++ del_timer_sync(&lnw->hsm_timer);
++
++ if (lnw->iotg.stop_peripheral)
++ lnw->iotg.stop_peripheral(&lnw->iotg);
++ else
++ dev_dbg(&pdev->dev,
++ "client driver has been removed.\n");
++ iotg->hsm.a_srp_det = 0;
++
++ /* Turn off VBus */
++ iotg->otg.set_vbus(&iotg->otg, false);
++ iotg->otg.state = OTG_STATE_A_IDLE;
++ transceiver_suspend(pdev);
++ break;
++ case OTG_STATE_B_HOST:
++ if (lnw->iotg.stop_host)
++ lnw->iotg.stop_host(&lnw->iotg);
++ else
++ dev_dbg(&pdev->dev, "host driver has been removed.\n");
++ iotg->hsm.b_bus_req = 0;
++ iotg->otg.state = OTG_STATE_B_IDLE;
++ transceiver_suspend(pdev);
++ break;
++ case OTG_STATE_B_PERIPHERAL:
++ if (lnw->iotg.stop_peripheral)
++ lnw->iotg.stop_peripheral(&lnw->iotg);
++ else
++ dev_dbg(&pdev->dev,
++ "client driver has been removed.\n");
++ iotg->otg.state = OTG_STATE_B_IDLE;
++ transceiver_suspend(pdev);
++ break;
++ case OTG_STATE_B_WAIT_ACON:
++ /* delete hsm timer for b_ase0_brst_tmr */
++ del_timer_sync(&lnw->hsm_timer);
++
++ langwell_otg_HAAR(0);
++
++ if (lnw->iotg.stop_host)
++ lnw->iotg.stop_host(&lnw->iotg);
++ else
++ dev_dbg(&pdev->dev, "host driver has been removed.\n");
++ iotg->hsm.b_bus_req = 0;
++ iotg->otg.state = OTG_STATE_B_IDLE;
++ transceiver_suspend(pdev);
++ break;
++ default:
++ dev_dbg(lnw->dev, "error state before suspend\n");
++ break;
++ }
++
++ return ret;
++}
++
++static void transceiver_resume(struct pci_dev *pdev)
++{
++ pci_restore_state(pdev);
++ pci_set_power_state(pdev, PCI_D0);
++}
++
++static int langwell_otg_resume(struct pci_dev *pdev)
++{
++ struct langwell_otg *lnw = the_transceiver;
++ int ret = 0;
++
++ transceiver_resume(pdev);
++
++ lnw->qwork = create_singlethread_workqueue("langwell_otg_queue");
++ if (!lnw->qwork) {
++ dev_dbg(&pdev->dev, "cannot create langwell otg workqueuen");
++ ret = -ENOMEM;
++ goto error;
++ }
++
++ if (request_irq(pdev->irq, otg_irq, IRQF_SHARED,
++ driver_name, lnw) != 0) {
++ dev_dbg(&pdev->dev, "request interrupt %d failed\n", pdev->irq);
++ ret = -EBUSY;
++ goto error;
++ }
++
++ /* enable OTG interrupts */
++ langwell_otg_intr(1);
++
++ update_hsm();
++
++ langwell_update_transceiver();
++
++ return ret;
++error:
++ langwell_otg_intr(0);
++ transceiver_suspend(pdev);
++ return ret;
++}
++
++static int __init langwell_otg_init(void)
++{
++ return pci_register_driver(&otg_pci_driver);
++}
++module_init(langwell_otg_init);
++
++static void __exit langwell_otg_cleanup(void)
++{
++ pci_unregister_driver(&otg_pci_driver);
++}
++module_exit(langwell_otg_cleanup);
+--- /dev/null
++++ b/drivers/usb/otg/penwell_otg.c
+@@ -0,0 +1,2321 @@
++/*
++ * Intel Penwell USB OTG transceiver driver
++ * Copyright (C) 2009 - 2010, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++/* This driver helps to switch Penwell OTG controller function between host
++ * and peripheral. It works with EHCI driver and Penwell client controller
++ * driver together.
++ */
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/pci.h>
++#include <linux/errno.h>
++#include <linux/interrupt.h>
++#include <linux/kernel.h>
++#include <linux/device.h>
++#include <linux/moduleparam.h>
++#include <linux/usb/ch9.h>
++#include <linux/usb/gadget.h>
++#include <linux/usb.h>
++#include <linux/usb/hcd.h>
++#include <linux/usb/otg.h>
++#include <linux/notifier.h>
++#include <linux/delay.h>
++#include <asm/intel_scu_ipc.h>
++#include "../core/usb.h"
++
++#include <linux/usb/penwell_otg.h>
++
++#define DRIVER_DESC "Intel Penwell USB OTG transceiver driver"
++#define DRIVER_VERSION "July 4, 2010"
++
++MODULE_DESCRIPTION(DRIVER_DESC);
++MODULE_AUTHOR("Henry Yuan <hang.yuan@intel.com>, Hao Wu <hao.wu@intel.com>");
++MODULE_VERSION(DRIVER_VERSION);
++MODULE_LICENSE("GPL");
++
++static const char driver_name[] = "penwell_otg";
++
++static int penwell_otg_probe(struct pci_dev *pdev,
++ const struct pci_device_id *id);
++static void penwell_otg_remove(struct pci_dev *pdev);
++static int penwell_otg_suspend(struct pci_dev *pdev, pm_message_t message);
++static int penwell_otg_resume(struct pci_dev *pdev);
++
++static int penwell_otg_set_host(struct otg_transceiver *otg,
++ struct usb_bus *host);
++static int penwell_otg_set_peripheral(struct otg_transceiver *otg,
++ struct usb_gadget *gadget);
++static int penwell_otg_start_srp(struct otg_transceiver *otg);
++
++static const struct pci_device_id pci_ids[] = {{
++ .class = ((PCI_CLASS_SERIAL_USB << 8) | 0x20),
++ .class_mask = ~0,
++ .vendor = 0x8086,
++ .device = 0x0829,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++}, { /* end: all zeroes */ }
++};
++
++static struct pci_driver otg_pci_driver = {
++ .name = (char *) driver_name,
++ .id_table = pci_ids,
++
++ .probe = penwell_otg_probe,
++ .remove = penwell_otg_remove,
++
++ .suspend = penwell_otg_suspend,
++ .resume = penwell_otg_resume,
++};
++
++static const char *state_string(enum usb_otg_state state)
++{
++ switch (state) {
++ case OTG_STATE_A_IDLE:
++ return "a_idle";
++ case OTG_STATE_A_WAIT_VRISE:
++ return "a_wait_vrise";
++ case OTG_STATE_A_WAIT_BCON:
++ return "a_wait_bcon";
++ case OTG_STATE_A_HOST:
++ return "a_host";
++ case OTG_STATE_A_SUSPEND:
++ return "a_suspend";
++ case OTG_STATE_A_PERIPHERAL:
++ return "a_peripheral";
++ case OTG_STATE_A_WAIT_VFALL:
++ return "a_wait_vfall";
++ case OTG_STATE_A_VBUS_ERR:
++ return "a_vbus_err";
++ case OTG_STATE_B_IDLE:
++ return "b_idle";
++ case OTG_STATE_B_PERIPHERAL:
++ return "b_peripheral";
++ case OTG_STATE_B_WAIT_ACON:
++ return "b_wait_acon";
++ case OTG_STATE_B_HOST:
++ return "b_host";
++ default:
++ return "UNDEFINED";
++ }
++}
++
++static struct penwell_otg *the_transceiver;
++
++void penwell_update_transceiver(void)
++{
++ struct penwell_otg *pnw = the_transceiver;
++
++ dev_dbg(pnw->dev, "transceiver is updated\n");
++
++ if (!pnw->qwork)
++ return ;
++
++ queue_work(pnw->qwork, &pnw->work);
++}
++
++static int penwell_otg_set_host(struct otg_transceiver *otg,
++ struct usb_bus *host)
++{
++ otg->host = host;
++
++ return 0;
++}
++
++static int penwell_otg_set_peripheral(struct otg_transceiver *otg,
++ struct usb_gadget *gadget)
++{
++ otg->gadget = gadget;
++
++ return 0;
++}
++
++static int penwell_otg_set_power(struct otg_transceiver *otg,
++ unsigned mA)
++{
++ return 0;
++}
++
++/* After probe, it should enable the power of USB PHY */
++static void penwell_otg_phy_enable(int on)
++{
++ struct penwell_otg *pnw = the_transceiver;
++ u16 addr;
++ u8 data;
++
++ dev_dbg(pnw->dev, "%s ---> %s\n", __func__, on ? "on" : "off");
++
++ addr = MSIC_VUSB330CNT;
++ data = on ? 0x37 : 0x24;
++
++ if (intel_scu_ipc_iowrite8(addr, data)) {
++ dev_err(pnw->dev, "Fail to access register for"
++ " OTG PHY power - write reg 0x%x failed.\n", addr);
++ return;
++ }
++ dev_dbg(pnw->dev, "%s <---\n", __func__);
++}
++
++/* A-device drives vbus, controlled through MSIC register */
++static int penwell_otg_set_vbus(struct otg_transceiver *otg, bool enabled)
++{
++ struct penwell_otg *pnw = the_transceiver;
++ u16 addr;
++ u8 data, mask;
++
++ dev_dbg(pnw->dev, "%s ---> %s\n", __func__, enabled ? "on" : "off");
++
++ addr = MSIC_VOTGCNT;
++ data = enabled ? VOTGEN : 0;
++ mask = VOTGEN;
++
++ if (intel_scu_ipc_update_register(addr, data, mask)) {
++ dev_err(pnw->dev, "Fail to drive power on OTG Port - "
++ "update register 0x%x failed.\n", addr);
++ return -EBUSY;
++ }
++
++ dev_dbg(pnw->dev, "VOTGCNT val = 0x%x", data);
++ dev_dbg(pnw->dev, "%s <---\n", __func__);
++
++ return 0;
++}
++
++static int penwell_otg_ulpi_run(void)
++{
++ struct penwell_otg *pnw = the_transceiver;
++ u32 val;
++
++ val = readl(pnw->iotg.base + CI_ULPIVP);
++
++ if (val & ULPI_RUN) {
++ dev_dbg(pnw->dev, "%s: ULPI command wip\n", __func__);
++ return 1;
++ }
++
++ dev_dbg(pnw->dev, "%s: ULPI command done\n", __func__);
++ return 0;
++}
++
++/* io_ops to access ulpi registers */
++static int
++penwell_otg_ulpi_read(struct intel_mid_otg_xceiv *iotg, u8 reg, u8 *val)
++{
++ struct penwell_otg *pnw = the_transceiver;
++ u32 val32 = 0;
++ int count;
++
++ dev_dbg(pnw->dev, "%s - addr 0x%x\n", __func__, reg);
++
++ /* Port = 0 */
++ val32 = ULPI_RUN | reg << 16;
++ writel(val32, pnw->iotg.base + CI_ULPIVP);
++
++ /* Polling for write operation to complete*/
++ count = 10;
++
++ while (count) {
++ val32 = readl(pnw->iotg.base + CI_ULPIVP);
++ if (val32 & ULPI_RUN) {
++ count--;
++ udelay(20);
++ } else {
++ *val = (u8)((val32 & ULPI_DATRD) >> 8);
++ dev_dbg(pnw->dev,
++ "%s - done data 0x%x\n", __func__, *val);
++ return 0;
++ }
++ }
++
++ dev_dbg(pnw->dev, "%s - timeout\n", __func__);
++
++ return -ETIMEDOUT;
++
++}
++
++static int
++penwell_otg_ulpi_write(struct intel_mid_otg_xceiv *iotg, u8 reg, u8 val)
++{
++ struct penwell_otg *pnw = the_transceiver;
++ u32 val32 = 0;
++ int count;
++
++ dev_dbg(pnw->dev,
++ "%s - addr 0x%x - data 0x%x\n", __func__, reg, val);
++
++ /* Port = 0 */
++ val32 = ULPI_RUN | ULPI_RW | reg << 16 | val;
++ writel(val32, pnw->iotg.base + CI_ULPIVP);
++
++ /* Polling for write operation to complete*/
++ count = 10;
++
++ while (count && penwell_otg_ulpi_run()) {
++ count--;
++ udelay(20);
++ }
++
++ dev_dbg(pnw->dev,
++ "%s - %s\n", __func__, count ? "complete" : "timeout");
++
++ return count ? 0 : -ETIMEDOUT;
++}
++
++static enum msic_vendor penwell_otg_check_msic(void)
++{
++ /* Return MSIC_VD_TI directly */
++ return MSIC_VD_TI;
++}
++
++/* Start SRP function */
++static int penwell_otg_start_srp(struct otg_transceiver *otg)
++{
++ struct penwell_otg *pnw = the_transceiver;
++ u32 val;
++
++ dev_dbg(pnw->dev, "%s --->\n", __func__);
++
++ val = readl(pnw->iotg.base + CI_OTGSC);
++
++ writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_HADP,
++ pnw->iotg.base + CI_OTGSC);
++
++ /* Check if the data plus is finished or not */
++ msleep(8);
++ val = readl(pnw->iotg.base + CI_OTGSC);
++ if (val & (OTGSC_HADP | OTGSC_DP))
++ dev_dbg(pnw->dev, "DataLine SRP Error\n");
++
++ dev_dbg(pnw->dev, "%s <---\n", __func__);
++ return 0;
++}
++
++/* The timeout callback function to poll the host request flag */
++static void penwell_otg_hnp_poll_fn(unsigned long indicator)
++{
++ struct penwell_otg *pnw = the_transceiver;
++
++ queue_work(pnw->qwork, &pnw->hnp_poll_work);
++}
++
++/* stop SOF via bus_suspend */
++static void penwell_otg_loc_sof(int on)
++{
++ struct penwell_otg *pnw = the_transceiver;
++ struct usb_hcd *hcd;
++ int err;
++
++ dev_dbg(pnw->dev, "%s ---> %s\n", __func__, on ? "suspend" : "resume");
++
++ hcd = bus_to_hcd(pnw->iotg.otg.host);
++ if (on)
++ err = hcd->driver->bus_resume(hcd);
++ else
++ err = hcd->driver->bus_suspend(hcd);
++
++ if (err)
++ dev_dbg(pnw->dev, "Fail to resume/suspend USB bus - %d\n", err);
++
++ dev_dbg(pnw->dev, "%s <---\n", __func__);
++}
++
++static void penwell_otg_phy_low_power(int on)
++{
++#if 0 /* Disable PHY low power */
++ struct penwell_otg *pnw = the_transceiver;
++ u8 val, phcd;
++
++ dev_dbg(pnw->dev, "%s ---> %s\n", __func__, on ? "on" : "off");
++
++ phcd = 0x40;
++
++ val = readb(pnw->iotg.base + CI_HOSTPC1 + 2);
++
++ if (on)
++ writeb(val | phcd, pnw->iotg.base + CI_HOSTPC1 + 2);
++ else
++ writeb(val & ~phcd, pnw->iotg.base + CI_HOSTPC1 + 2);
++
++ val = readb(pnw->iotg.base + CI_HOSTPC1 + 2);
++
++ dev_dbg(pnw->dev, "CI_HOSTPC1 byte2 = %x\n", val);
++ dev_dbg(pnw->dev, "%s <---\n", __func__);
++#endif
++}
++
++/* Enable/Disable OTG interrupt */
++static void penwell_otg_intr(int on)
++{
++ struct penwell_otg *pnw = the_transceiver;
++ u32 val;
++
++ dev_dbg(pnw->dev, "%s ---> %s\n", __func__, on ? "on" : "off");
++
++ val = readl(pnw->iotg.base + CI_OTGSC);
++ if (on) {
++ val = val | (OTGSC_INTEN_MASK);
++ writel(val, pnw->iotg.base + CI_OTGSC);
++ } else {
++ val = val & ~(OTGSC_INTEN_MASK);
++ writel(val, pnw->iotg.base + CI_OTGSC);
++ }
++}
++
++/* set HAAR: Hardware Assist Auto-Reset */
++static void penwell_otg_HAAR(int on)
++{
++ struct penwell_otg *pnw = the_transceiver;
++ u32 val;
++
++ dev_dbg(pnw->dev, "%s ---> %s\n", __func__, on ? "on" : "off");
++
++ val = readl(pnw->iotg.base + CI_OTGSC);
++ if (on)
++ writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_HAAR,
++ pnw->iotg.base + CI_OTGSC);
++ else
++ writel((val & ~OTGSC_INTSTS_MASK) & ~OTGSC_HAAR,
++ pnw->iotg.base + CI_OTGSC);
++}
++
++/* set HABA: Hardware Assist B-Disconnect to A-Connect */
++static void penwell_otg_HABA(int on)
++{
++ struct penwell_otg *pnw = the_transceiver;
++ u32 val;
++
++ dev_dbg(pnw->dev, "%s ---> %s\n", __func__, on ? "on" : "off");
++
++ val = readl(pnw->iotg.base + CI_OTGSC);
++ if (on)
++ writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_HABA,
++ pnw->iotg.base + CI_OTGSC);
++ else
++ writel((val & ~OTGSC_INTSTS_MASK) & ~OTGSC_HABA,
++ pnw->iotg.base + CI_OTGSC);
++}
++
++void penwell_otg_nsf_msg(unsigned long indicator)
++{
++ switch (indicator) {
++ case 2:
++ case 4:
++ case 6:
++ case 7:
++ dev_warn(the_transceiver->dev,
++ "NSF-%lu - deivce not responding\n", indicator);
++ break;
++ case 3:
++ dev_warn(the_transceiver->dev,
++ "NSF-%lu - deivce not supported\n", indicator);
++ break;
++ default:
++ dev_warn(the_transceiver->dev,
++ "Do not have this kind of NSF\n");
++ break;
++ }
++}
++
++/* The timeout callback function to set time out bit */
++static void penwell_otg_timer_fn(unsigned long indicator)
++{
++ struct penwell_otg *pnw = the_transceiver;
++
++ *(int *)indicator = 1;
++
++ dev_dbg(pnw->dev, "kernel timer - timeout\n");
++
++ queue_work(pnw->qwork, &pnw->work);
++}
++
++/* kernel timer used for OTG timing */
++static void penwell_otg_add_timer(enum penwell_otg_timer_type timers)
++{
++ struct penwell_otg *pnw = the_transceiver;
++ struct intel_mid_otg_xceiv *iotg = &pnw->iotg;
++ unsigned long j = jiffies;
++ unsigned long data, time;
++
++ switch (timers) {
++ case TA_WAIT_VRISE_TMR:
++ iotg->hsm.a_wait_vrise_tmout = 0;
++ data = (unsigned long)&iotg->hsm.a_wait_vrise_tmout;
++ time = TA_WAIT_VRISE;
++ dev_dbg(pnw->dev,
++ "Add timer TA_WAIT_VRISE = %d\n", TA_WAIT_VRISE);
++ break;
++ case TA_WAIT_BCON_TMR:
++ iotg->hsm.a_wait_bcon_tmout = 0;
++ data = (unsigned long)&iotg->hsm.a_wait_bcon_tmout;
++ time = TA_WAIT_BCON;
++ dev_dbg(pnw->dev,
++ "Add timer TA_WAIT_BCON = %d\n", TA_WAIT_BCON);
++ break;
++ case TA_AIDL_BDIS_TMR:
++ iotg->hsm.a_aidl_bdis_tmout = 0;
++ data = (unsigned long)&iotg->hsm.a_aidl_bdis_tmout;
++ time = TA_AIDL_BDIS;
++ dev_dbg(pnw->dev,
++ "Add timer TA_AIDL_BDIS = %d\n", TA_AIDL_BDIS);
++ break;
++ case TA_BIDL_ADIS_TMR:
++ iotg->hsm.a_bidl_adis_tmout = 0;
++ iotg->hsm.a_bidl_adis_tmr = 1;
++ data = (unsigned long)&iotg->hsm.a_bidl_adis_tmout;
++ time = TA_BIDL_ADIS;
++ dev_dbg(pnw->dev,
++ "Add timer TA_BIDL_ADIS = %d\n", TA_BIDL_ADIS);
++ break;
++ case TA_WAIT_VFALL_TMR:
++ iotg->hsm.a_wait_vfall_tmout = 0;
++ data = (unsigned long)&iotg->hsm.a_wait_vfall_tmout;
++ time = TA_WAIT_VFALL;
++ dev_dbg(pnw->dev,
++ "Add timer TA_WAIT_VFALL = %d\n", TA_WAIT_VFALL);
++ break;
++ case TB_ASE0_BRST_TMR:
++ iotg->hsm.b_ase0_brst_tmout = 0;
++ data = (unsigned long)&iotg->hsm.b_ase0_brst_tmout;
++ time = TB_ASE0_BRST;
++ dev_dbg(pnw->dev,
++ "Add timer TB_ASE0_BRST = %d\n", TB_ASE0_BRST);
++ break;
++ case TB_SRP_FAIL_TMR:
++ iotg->hsm.b_srp_fail_tmout = 0;
++ iotg->hsm.b_srp_fail_tmr = 1;
++ data = (unsigned long)&iotg->hsm.b_srp_fail_tmout;
++ time = TB_SRP_FAIL;
++ dev_dbg(pnw->dev,
++ "Add timer TB_SRP_FAIL = %d\n", TB_SRP_FAIL);
++ break;
++ default:
++ dev_dbg(pnw->dev,
++ "unkown timer, can not enable such timer\n");
++ return;
++ }
++
++ init_timer(&pnw->hsm_timer);
++
++ pnw->hsm_timer.data = data;
++ pnw->hsm_timer.function = penwell_otg_timer_fn;
++ pnw->hsm_timer.expires = j + time * HZ / 1000; /* milliseconds */
++
++ add_timer(&pnw->hsm_timer);
++}
++
++static inline void penwell_otg_del_timer(enum penwell_otg_timer_type timers)
++{
++ struct penwell_otg *pnw = the_transceiver;
++ struct intel_mid_otg_xceiv *iotg = &pnw->iotg;
++
++ switch (timers) {
++ case TA_BIDL_ADIS_TMR:
++ iotg->hsm.a_bidl_adis_tmr = 0;
++ break;
++ case TB_SRP_FAIL_TMR:
++ iotg->hsm.b_srp_fail_tmr = 0;
++ break;
++ default:
++ break;
++ }
++
++ dev_dbg(pnw->dev, "state machine timer deleted\n");
++ del_timer_sync(&pnw->hsm_timer);
++}
++
++static void reset_otg(void)
++{
++ struct penwell_otg *pnw = the_transceiver;
++ u32 val;
++ int delay_time = 1000;
++
++ dev_dbg(pnw->dev, "reseting OTG controller ...\n");
++ val = readl(pnw->iotg.base + CI_USBCMD);
++ writel(val | USBCMD_RST, pnw->iotg.base + CI_USBCMD);
++ do {
++ udelay(100);
++ if (!delay_time--)
++ dev_dbg(pnw->dev, "reset timeout\n");
++ val = readl(pnw->iotg.base + CI_USBCMD);
++ val &= USBCMD_RST;
++ } while (val != 0);
++ dev_dbg(pnw->dev, "reset done.\n");
++}
++
++static void set_host_mode(void)
++{
++ u32 val;
++
++ reset_otg();
++ val = readl(the_transceiver->iotg.base + CI_USBMODE);
++ val = (val & (~USBMODE_CM)) | USBMODE_HOST;
++ writel(val, the_transceiver->iotg.base + CI_USBMODE);
++}
++
++static void set_client_mode(void)
++{
++ u32 val;
++
++ reset_otg();
++ val = readl(the_transceiver->iotg.base + CI_USBMODE);
++ val = (val & (~USBMODE_CM)) | USBMODE_DEVICE;
++ writel(val, the_transceiver->iotg.base + CI_USBMODE);
++}
++
++static void init_hsm(void)
++{
++ struct penwell_otg *pnw = the_transceiver;
++ struct intel_mid_otg_xceiv *iotg = &pnw->iotg;
++ u32 val32;
++
++ /* read OTGSC after reset */
++ val32 = readl(iotg->base + CI_OTGSC);
++ dev_dbg(pnw->dev,
++ "%s: OTGSC init value = 0x%x\n", __func__, val32);
++
++ /* set init state */
++ if (val32 & OTGSC_ID) {
++ iotg->hsm.id = ID_B;
++ iotg->otg.default_a = 0;
++ set_client_mode();
++ iotg->otg.state = OTG_STATE_B_IDLE;
++ } else {
++ iotg->hsm.id = ID_A;
++ iotg->otg.default_a = 1;
++ set_host_mode();
++ iotg->otg.state = OTG_STATE_A_IDLE;
++ }
++
++ /* set session indicator */
++ if (val32 & OTGSC_BSE)
++ iotg->hsm.b_sess_end = 1;
++ if (val32 & OTGSC_BSV)
++ iotg->hsm.b_sess_vld = 1;
++ if (val32 & OTGSC_ASV)
++ iotg->hsm.a_sess_vld = 1;
++ if (val32 & OTGSC_AVV)
++ iotg->hsm.a_vbus_vld = 1;
++
++ /* default user is not request the bus */
++ iotg->hsm.a_bus_req = 1;
++ iotg->hsm.a_bus_drop = 0;
++ /* init hsm means power_up case */
++ iotg->hsm.power_up = 0;
++ /* defautly don't request bus as B device */
++ iotg->hsm.b_bus_req = 0;
++ /* no system error */
++ iotg->hsm.a_clr_err = 0;
++
++ penwell_otg_phy_low_power(1);
++
++}
++
++static void update_hsm(void)
++{
++ struct penwell_otg *pnw = the_transceiver;
++ struct intel_mid_otg_xceiv *iotg = &pnw->iotg;
++ u32 val32;
++
++ /* read OTGSC */
++ val32 = readl(iotg->base + CI_OTGSC);
++ dev_dbg(pnw->dev,
++ "%s OTGSC current value = 0x%x\n", __func__, val32);
++
++ iotg->hsm.id = !!(val32 & OTGSC_ID) ? ID_B : ID_A;
++ iotg->hsm.b_sess_end = !!(val32 & OTGSC_BSE);
++ iotg->hsm.b_sess_vld = !!(val32 & OTGSC_BSV);
++ iotg->hsm.a_sess_vld = !!(val32 & OTGSC_ASV);
++ iotg->hsm.a_vbus_vld = !!(val32 & OTGSC_AVV);
++}
++
++static irqreturn_t otg_irq(int irq, void *_dev)
++{
++ struct penwell_otg *pnw = _dev;
++ struct intel_mid_otg_xceiv *iotg = &pnw->iotg;
++ int flag = 0;
++ u32 int_sts, int_en, int_mask = 0;
++
++ /* Check VBUS/SRP interrup */
++ int_sts = readl(pnw->iotg.base + CI_OTGSC);
++ int_en = (int_sts & OTGSC_INTEN_MASK) >> 8;
++ int_mask = int_sts & int_en;
++
++ if (int_mask) {
++ dev_dbg(pnw->dev,
++ "OTGSC = 0x%x, mask =0x%x\n", int_sts, int_mask);
++
++ /* FIXME: if ACA/ID interrupt is enabled, */
++ if (int_mask & OTGSC_IDIS) {
++ iotg->hsm.id = (int_sts & OTGSC_ID) ? ID_B : ID_A;
++ flag = 1;
++ dev_dbg(pnw->dev, "%s: id change int = %d\n",
++ __func__, iotg->hsm.id);
++ }
++ if (int_mask & OTGSC_DPIS) {
++ iotg->hsm.a_srp_det = (int_sts & OTGSC_DPS) ? 1 : 0;
++ flag = 1;
++ dev_dbg(pnw->dev, "%s: data pulse int = %d\n",
++ __func__, iotg->hsm.a_srp_det);
++ }
++ if (int_mask & OTGSC_BSEIS) {
++ iotg->hsm.b_sess_end = (int_sts & OTGSC_BSE) ? 1 : 0;
++ flag = 1;
++ dev_dbg(pnw->dev, "%s: b sess end int = %d\n",
++ __func__, iotg->hsm.b_sess_end);
++ }
++ if (int_mask & OTGSC_BSVIS) {
++ iotg->hsm.b_sess_vld = (int_sts & OTGSC_BSV) ? 1 : 0;
++ flag = 1;
++ dev_dbg(pnw->dev, "%s: b sess valid int = %d\n",
++ __func__, iotg->hsm.b_sess_vld);
++ }
++ if (int_mask & OTGSC_ASVIS) {
++ iotg->hsm.a_sess_vld = (int_sts & OTGSC_ASV) ? 1 : 0;
++ flag = 1;
++ dev_dbg(pnw->dev, "%s: a sess valid int = %d\n",
++ __func__, iotg->hsm.a_sess_vld);
++ }
++ if (int_mask & OTGSC_AVVIS) {
++ iotg->hsm.a_vbus_vld = (int_sts & OTGSC_AVV) ? 1 : 0;
++ flag = 1;
++ dev_dbg(pnw->dev, "%s: a vbus valid int = %d\n",
++ __func__, iotg->hsm.a_vbus_vld);
++ }
++
++ writel((int_sts & ~OTGSC_INTSTS_MASK) | int_mask,
++ pnw->iotg.base + CI_OTGSC);
++ }
++
++ if (flag)
++ penwell_update_transceiver();
++
++ return IRQ_HANDLED;
++}
++
++static int penwell_otg_iotg_notify(struct notifier_block *nb,
++ unsigned long action, void *data)
++{
++ struct penwell_otg *pnw = the_transceiver;
++ struct intel_mid_otg_xceiv *iotg = data;
++ int flag = 0;
++
++ if (iotg == NULL)
++ return NOTIFY_BAD;
++
++ if (pnw == NULL)
++ return NOTIFY_BAD;
++
++ switch (action) {
++ case MID_OTG_NOTIFY_CONNECT:
++ dev_dbg(pnw->dev, "PNW OTG Notify Connect Event\n");
++ if (iotg->otg.default_a == 1)
++ iotg->hsm.b_conn = 1;
++ else
++ iotg->hsm.a_conn = 1;
++ flag = 1;
++ break;
++ case MID_OTG_NOTIFY_DISCONN:
++ dev_dbg(pnw->dev, "PNW OTG Notify Disconnect Event\n");
++ if (iotg->otg.default_a == 1)
++ iotg->hsm.b_conn = 0;
++ else
++ iotg->hsm.a_conn = 0;
++ flag = 1;
++ break;
++ case MID_OTG_NOTIFY_HSUSPEND:
++ dev_dbg(pnw->dev, "PNW OTG Notify Host Bus suspend Event\n");
++ flag = 0;
++ break;
++ case MID_OTG_NOTIFY_HRESUME:
++ dev_dbg(pnw->dev, "PNW OTG Notify Host Bus resume Event\n");
++ flag = 0;
++ break;
++ case MID_OTG_NOTIFY_CSUSPEND:
++ dev_dbg(pnw->dev, "PNW OTG Notify Client Bus suspend Event\n");
++ flag = 0;
++ break;
++ case MID_OTG_NOTIFY_CRESUME:
++ dev_dbg(pnw->dev, "PNW OTG Notify Client Bus resume Event\n");
++ flag = 0;
++ break;
++ case MID_OTG_NOTIFY_HOSTADD:
++ dev_dbg(pnw->dev, "PNW OTG Nofity Host Driver Add\n");
++ flag = 1;
++ break;
++ case MID_OTG_NOTIFY_HOSTREMOVE:
++ dev_dbg(pnw->dev, "PNW OTG Nofity Host Driver remove\n");
++ flag = 1;
++ break;
++ case MID_OTG_NOTIFY_CLIENTADD:
++ dev_dbg(pnw->dev, "PNW OTG Nofity Client Driver Add\n");
++ flag = 1;
++ break;
++ case MID_OTG_NOTIFY_CLIENTREMOVE:
++ dev_dbg(pnw->dev, "PNW OTG Nofity Client Driver remove\n");
++ flag = 1;
++ break;
++ default:
++ dev_dbg(pnw->dev, "PNW OTG Nofity unknown notify message\n");
++ return NOTIFY_DONE;
++ }
++
++ if (flag)
++ penwell_update_transceiver();
++
++ return NOTIFY_OK;
++}
++
++
++static void penwell_otg_hnp_poll_work(struct work_struct *work)
++{
++ struct penwell_otg *pnw = the_transceiver;
++ struct intel_mid_otg_xceiv *iotg = &pnw->iotg;
++ struct usb_device *udev;
++ unsigned long j = jiffies;
++ int err = 0;
++ u8 data;
++
++ if (iotg->otg.host && iotg->otg.host->root_hub) {
++ udev = iotg->otg.host->root_hub->children[0];
++ } else {
++ dev_dbg(pnw->dev, "no host or root_hub registered\n");
++ return;
++ }
++
++ if (iotg->otg.state != OTG_STATE_A_HOST
++ && iotg->otg.state != OTG_STATE_B_HOST)
++ return;
++
++ if (!udev) {
++ dev_dbg(pnw->dev,
++ "no usb dev connected, stop HNP polling\n");
++ return;
++ }
++
++ /* get host request flag from connected USB device */
++ err = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
++ USB_REQ_GET_STATUS, USB_DIR_IN, 0, 0xF000, &data, 1, 5000);
++
++ if (err < 0) {
++ dev_warn(pnw->dev,
++ "ERR in HNP polling = %d, stop HNP polling\n", err);
++ return ;
++ }
++
++ if (data & HOST_REQUEST_FLAG) {
++ /* set a_bus_req = 0 */
++ if (iotg->hsm.id == ID_B) {
++ dev_dbg(pnw->dev,
++ "Device B host - start HNP - b_bus_req = 0\n");
++ iotg->hsm.b_bus_req = 0;
++ } else if (iotg->hsm.id == ID_A) {
++ dev_dbg(pnw->dev,
++ "Device A host - start HNP - a_bus_req = 0\n");
++ iotg->hsm.a_bus_req = 0;
++ }
++ penwell_update_transceiver();
++ } else {
++ pnw->hnp_poll_timer.data = 1;
++ pnw->hnp_poll_timer.function = penwell_otg_hnp_poll_fn;
++ pnw->hnp_poll_timer.expires = j + THOS_REQ_POL * HZ / 1000;
++ add_timer(&pnw->hnp_poll_timer);
++
++ dev_dbg(pnw->dev, "HNP Polling - continue\n");
++ }
++}
++
++static void penwell_otg_work(struct work_struct *work)
++{
++ struct penwell_otg *pnw = container_of(work,
++ struct penwell_otg, work);
++ struct intel_mid_otg_xceiv *iotg = &pnw->iotg;
++ struct otg_hsm *hsm = &iotg->hsm;
++
++ dev_dbg(pnw->dev,
++ "old state = %s\n", state_string(iotg->otg.state));
++
++ switch (iotg->otg.state) {
++ case OTG_STATE_UNDEFINED:
++ case OTG_STATE_B_IDLE:
++ if (hsm->id == ID_A || hsm->id == ID_ACA_A) {
++ /* Move to A_IDLE state, ID changes */
++
++ /* Delete current timer */
++ penwell_otg_del_timer(TB_SRP_FAIL_TMR);
++
++ iotg->otg.default_a = 1;
++ hsm->a_srp_det = 0;
++ set_host_mode();
++ penwell_otg_phy_low_power(1);
++
++ /* Always set a_bus_req to 1, in case no ADP */
++ hsm->a_bus_req = 1;
++
++ iotg->otg.state = OTG_STATE_A_IDLE;
++ penwell_update_transceiver();
++ } else if (hsm->b_adp_sense_tmout) {
++ hsm->b_adp_sense_tmout = 0;
++ } else if (hsm->b_srp_fail_tmout) {
++ hsm->b_srp_fail_tmr = 0;
++ hsm->b_srp_fail_tmout = 0;
++ hsm->b_bus_req = 0;
++ penwell_otg_nsf_msg(6);
++
++ penwell_update_transceiver();
++ } else if (hsm->b_sess_vld) {
++ /* Check it is caused by ACA attachment */
++ if (hsm->id == ID_ACA_B) {
++ /* in this case, update current limit*/
++ if (iotg->otg.set_power)
++ iotg->otg.set_power(&iotg->otg, 1500);
++
++ /* make sure PHY low power state */
++ penwell_otg_phy_low_power(1);
++ break;
++ } else if (hsm->id == ID_ACA_C) {
++ /* in this case, update current limit*/
++ if (iotg->otg.set_power)
++ iotg->otg.set_power(&iotg->otg, 1500);
++ }
++
++ /* Clear power_up */
++ if (hsm->power_up)
++ hsm->power_up = 0;
++
++ /* Move to B_PERIPHERAL state, Session Valid */
++
++ /* Delete current timer */
++ penwell_otg_del_timer(TB_SRP_FAIL_TMR);
++
++ hsm->b_sess_end = 0;
++ hsm->a_bus_suspend = 0;
++
++ if (iotg->start_peripheral) {
++ iotg->start_peripheral(iotg);
++ iotg->otg.state = OTG_STATE_B_PERIPHERAL;
++ } else {
++ dev_dbg(pnw->dev, "client driver not loaded\n");
++ break;
++ }
++
++ } else if ((hsm->b_bus_req || hsm->power_up ||
++ hsm->adp_change) && !hsm->b_srp_fail_tmr) {
++ if ((hsm->b_ssend_srp && hsm->b_se0_srp) ||
++ hsm->adp_change || hsm->power_up) {
++
++ if (hsm->power_up)
++ hsm->power_up = 0;
++
++ if (hsm->adp_change)
++ hsm->adp_change = 0;
++
++ /* clear the PHCD before start srp */
++ penwell_otg_phy_low_power(0);
++
++ /* Start SRP */
++ if (pnw->iotg.otg.start_srp)
++ pnw->iotg.otg.start_srp(&pnw->iotg.otg);
++ penwell_otg_add_timer(TB_SRP_FAIL_TMR);
++
++ /* reset PHY low power mode here */
++ penwell_otg_phy_low_power(1);
++ } else {
++ hsm->b_bus_req = 0;
++ dev_info(pnw->dev,
++ "BUS is active, try SRP later\n");
++ }
++ } else if (!hsm->b_sess_vld && hsm->id == ID_B) {
++ if (iotg->otg.set_power)
++ iotg->otg.set_power(&iotg->otg, 0);
++ penwell_otg_phy_low_power(1);
++ }
++ break;
++
++ case OTG_STATE_B_PERIPHERAL:
++ /* FIXME: Check if ID_ACA_A event will happened in this state */
++ if (hsm->id == ID_A) {
++ iotg->otg.default_a = 1;
++ hsm->a_srp_det = 0;
++
++ if (iotg->stop_peripheral)
++ iotg->stop_peripheral(iotg);
++ else
++ dev_dbg(pnw->dev,
++ "client driver has been removed.\n");
++
++ set_host_mode();
++ penwell_otg_phy_low_power(1);
++
++ /* Always set a_bus_req to 1, in case no ADP */
++ hsm->a_bus_req = 1;
++
++ iotg->otg.state = OTG_STATE_A_IDLE;
++ penwell_update_transceiver();
++ } else if (!hsm->b_sess_vld || hsm->id == ID_ACA_B) {
++ if (hsm->id == ID_ACA_B && iotg->otg.set_power)
++ iotg->otg.set_power(&iotg->otg, 1500);
++ else if (hsm->id == ID_B && iotg->otg.set_power)
++ iotg->otg.set_power(&iotg->otg, 0);
++
++ hsm->b_hnp_enable = 0;
++
++ if (iotg->stop_peripheral)
++ iotg->stop_peripheral(iotg);
++ else
++ dev_dbg(pnw->dev,
++ "client driver has been removed.\n");
++
++ hsm->b_ssend_srp = 1;
++ hsm->b_se0_srp = 1;
++ penwell_otg_phy_low_power(1);
++
++ iotg->otg.state = OTG_STATE_B_IDLE;
++ } else if (hsm->b_bus_req && hsm->b_hnp_enable
++ && hsm->a_bus_suspend) {
++
++ if (iotg->stop_peripheral)
++ iotg->stop_peripheral(iotg);
++ else
++ dev_dbg(pnw->dev,
++ "client driver has been removed.\n");
++
++ penwell_otg_HAAR(1);
++ hsm->a_conn = 0;
++ hsm->a_bus_resume = 0;
++
++ if (iotg->start_host) {
++ iotg->start_host(iotg);
++ hsm->test_device = 0;
++ iotg->otg.state = OTG_STATE_B_WAIT_ACON;
++ } else
++ dev_dbg(pnw->dev, "host driver not loaded.\n");
++
++ penwell_otg_add_timer(TB_ASE0_BRST_TMR);
++ } else if (hsm->id == ID_ACA_C) {
++ /* Make sure current limit updated */
++ if (iotg->otg.set_power)
++ iotg->otg.set_power(&iotg->otg, 1500);
++ } else if (hsm->id == ID_B) {
++ if (iotg->otg.set_power)
++ iotg->otg.set_power(&iotg->otg, 100);
++ }
++ break;
++
++ case OTG_STATE_B_WAIT_ACON:
++ if (hsm->id == ID_A) {
++ /* Move to A_IDLE state, ID changes */
++
++ /* Delete current timer */
++ penwell_otg_del_timer(TB_ASE0_BRST_TMR);
++
++ iotg->otg.default_a = 1;
++ hsm->a_srp_det = 0;
++
++ penwell_otg_HAAR(0);
++ if (iotg->stop_host)
++ iotg->stop_host(iotg);
++ else
++ dev_dbg(pnw->dev,
++ "host driver has been removed.\n");
++
++ set_host_mode();
++ penwell_otg_phy_low_power(1);
++
++ /* Always set a_bus_req to 1, in case no ADP */
++ iotg->hsm.a_bus_req = 1;
++
++ iotg->otg.state = OTG_STATE_A_IDLE;
++ penwell_update_transceiver();
++ } else if (!hsm->b_sess_vld || hsm->id == ID_ACA_B) {
++ /* Move to B_IDLE state, VBUS off */
++
++ if (hsm->id == ID_ACA_B && iotg->otg.set_power)
++ iotg->otg.set_power(&iotg->otg, 1500);
++ else if (hsm->id == ID_B)
++ iotg->otg.set_power(&iotg->otg, 0);
++
++ /* Delete current timer */
++ penwell_otg_del_timer(TB_ASE0_BRST_TMR);
++
++ hsm->b_hnp_enable = 0;
++ hsm->b_bus_req = 0;
++ penwell_otg_HAAR(0);
++
++ if (iotg->stop_host)
++ iotg->stop_host(iotg);
++ else
++ dev_dbg(pnw->dev,
++ "host driver has been removed.\n");
++
++ set_client_mode();
++ penwell_otg_phy_low_power(1);
++
++ hsm->b_ssend_srp = 1;
++ hsm->b_se0_srp = 1;
++
++ iotg->otg.state = OTG_STATE_B_IDLE;
++ } else if (hsm->a_conn) {
++ /* Move to B_HOST state, A connected */
++
++ /* Delete current timer */
++ penwell_otg_del_timer(TB_ASE0_BRST_TMR);
++
++ penwell_otg_HAAR(0);
++ iotg->otg.state = OTG_STATE_B_HOST;
++ penwell_update_transceiver();
++ } else if (hsm->a_bus_resume || hsm->b_ase0_brst_tmout) {
++ /* Move to B_HOST state, A connected */
++
++ /* Delete current timer */
++ penwell_otg_del_timer(TB_ASE0_BRST_TMR);
++
++ penwell_otg_HAAR(0);
++ penwell_otg_nsf_msg(7);
++
++ if (iotg->stop_host)
++ iotg->stop_host(iotg);
++ else
++ dev_dbg(pnw->dev,
++ "host driver has been removed.\n");
++
++ hsm->a_bus_suspend = 0;
++ hsm->b_bus_req = 0;
++
++ if (iotg->start_peripheral)
++ iotg->start_peripheral(iotg);
++ else
++ dev_dbg(pnw->dev, "client driver not loaded\n");
++
++ iotg->otg.state = OTG_STATE_B_PERIPHERAL;
++ } else if (hsm->id == ID_ACA_C) {
++ /* Make sure current limit updated */
++ if (iotg->otg.set_power)
++ iotg->otg.set_power(&iotg->otg, 1500);
++ } else if (hsm->id == ID_B) {
++ /* only set 2mA due to client function stopped */
++ if (iotg->otg.set_power)
++ iotg->otg.set_power(&iotg->otg, 2);
++ }
++ break;
++
++ case OTG_STATE_B_HOST:
++ if (hsm->id == ID_A) {
++ iotg->otg.default_a = 1;
++ hsm->a_srp_det = 0;
++
++ if (iotg->stop_host)
++ iotg->stop_host(iotg);
++ else
++ dev_dbg(pnw->dev,
++ "host driver has been removed.\n");
++
++ set_host_mode();
++ penwell_otg_phy_low_power(1);
++
++ /* Always set a_bus_req to 1, in case no ADP */
++ hsm->a_bus_req = 1;
++
++ iotg->otg.state = OTG_STATE_A_IDLE;
++ penwell_update_transceiver();
++ } else if (!hsm->b_sess_vld || hsm->id == ID_ACA_B) {
++ if (hsm->id == ID_ACA_B && iotg->otg.set_power)
++ iotg->otg.set_power(&iotg->otg, 1500);
++ else if (hsm->id == ID_B && iotg->otg.set_power)
++ iotg->otg.set_power(&iotg->otg, 0);
++
++ hsm->b_hnp_enable = 0;
++ hsm->b_bus_req = 0;
++
++ if (iotg->stop_host)
++ iotg->stop_host(iotg);
++ else
++ dev_dbg(pnw->dev,
++ "host driver has been removed.\n");
++
++ set_client_mode();
++ penwell_otg_phy_low_power(1);
++
++ hsm->b_ssend_srp = 1;
++ hsm->b_se0_srp = 1;
++
++ iotg->otg.state = OTG_STATE_B_IDLE;
++ } else if (!hsm->b_bus_req || !hsm->a_conn
++ || hsm->test_device) {
++ hsm->b_bus_req = 0;
++
++ if (iotg->stop_host)
++ iotg->stop_host(iotg);
++ else
++ dev_dbg(pnw->dev,
++ "host driver has been removed.\n");
++
++ hsm->a_bus_suspend = 0;
++
++ if (iotg->start_peripheral)
++ iotg->start_peripheral(iotg);
++ else
++ dev_dbg(pnw->dev,
++ "client driver not loaded.\n");
++
++ iotg->otg.state = OTG_STATE_B_PERIPHERAL;
++ } else if (hsm->id == ID_ACA_C) {
++ /* Make sure current limit updated */
++ if (iotg->otg.set_power)
++ iotg->otg.set_power(&iotg->otg, 1500);
++ } else if (hsm->id == ID_B) {
++ if (iotg->otg.set_power)
++ iotg->otg.set_power(&iotg->otg, 100);
++ }
++ break;
++
++ case OTG_STATE_A_IDLE:
++ if (hsm->id == ID_B || hsm->id == ID_ACA_B) {
++ pnw->iotg.otg.default_a = 0;
++ hsm->b_bus_req = 0;
++
++ if (hsm->id == ID_ACA_B && iotg->otg.set_power)
++ iotg->otg.set_power(&iotg->otg, 1500);
++
++ set_client_mode();
++ penwell_otg_phy_low_power(1);
++
++ hsm->b_ssend_srp = 1;
++ hsm->b_se0_srp = 1;
++
++ iotg->otg.state = OTG_STATE_B_IDLE;
++ penwell_update_transceiver();
++ } else if (hsm->id == ID_ACA_A) {
++
++ if (iotg->otg.set_power)
++ iotg->otg.set_power(&iotg->otg, 1500);
++
++ if (hsm->power_up)
++ hsm->power_up = 0;
++
++ if (hsm->adp_change)
++ hsm->adp_change = 0;
++
++ if (hsm->a_srp_det)
++ hsm->a_srp_det = 0;
++
++ hsm->b_conn = 0;
++ hsm->hnp_poll_enable = 0;
++
++ if (iotg->start_host)
++ iotg->start_host(iotg);
++ else {
++ dev_dbg(pnw->dev, "host driver not loaded.\n");
++ break;
++ }
++ iotg->otg.state = OTG_STATE_A_WAIT_BCON;
++ } else if (!hsm->a_bus_drop && (hsm->power_up || hsm->a_bus_req
++ || hsm->power_up || hsm->adp_change)) {
++ /* power up / adp changes / srp detection should be
++ * cleared at once after handled. */
++ if (hsm->power_up)
++ hsm->power_up = 0;
++
++ if (hsm->adp_change)
++ hsm->adp_change = 0;
++
++ if (hsm->a_srp_det)
++ hsm->a_srp_det = 0;
++
++ if (iotg->otg.set_vbus)
++ iotg->otg.set_vbus(&iotg->otg, true);
++
++ penwell_otg_add_timer(TA_WAIT_VRISE_TMR);
++ iotg->otg.state = OTG_STATE_A_WAIT_VRISE;
++
++ penwell_update_transceiver();
++ } else if (hsm->b_sess_end || hsm->a_sess_vld ||
++ !hsm->b_sess_vld) {
++ dev_dbg(pnw->dev,
++ "reconfig...PHCD bit for PHY low power mode\n");
++ penwell_otg_phy_low_power(1);
++ }
++ break;
++
++ case OTG_STATE_A_WAIT_VRISE:
++ if (hsm->a_bus_drop ||
++ hsm->id == ID_B || hsm->id == ID_ACA_B) {
++ /* Move to A_WAIT_VFALL, over current/user request */
++
++ /* Delete current timer */
++ penwell_otg_del_timer(TA_WAIT_VRISE_TMR);
++
++ /* Turn off VBUS */
++ if (iotg->otg.set_vbus)
++ iotg->otg.set_vbus(&iotg->otg, false);
++
++ penwell_otg_add_timer(TA_WAIT_VFALL_TMR);
++ iotg->otg.state = OTG_STATE_A_WAIT_VFALL;
++ } else if (hsm->a_wait_vrise_tmout || hsm->id == ID_ACA_A) {
++ /* Move to A_WAIT_BCON state, a vbus vld */
++ /* Delete current timer and clear flags */
++ penwell_otg_del_timer(TA_WAIT_VRISE_TMR);
++
++ if (!hsm->a_vbus_vld) {
++ /* Turn off VBUS */
++ if (iotg->otg.set_vbus)
++ iotg->otg.set_vbus(&iotg->otg, false);
++ penwell_otg_add_timer(TA_WAIT_VFALL_TMR);
++ iotg->otg.state = OTG_STATE_A_WAIT_VFALL;
++ break;
++ }
++
++ if (hsm->id == ID_ACA_A) {
++ if (iotg->otg.set_vbus)
++ iotg->otg.set_vbus(&iotg->otg, false);
++ if (iotg->otg.set_power)
++ iotg->otg.set_power(&iotg->otg, 1500);
++ }
++
++ hsm->b_conn = 0;
++ hsm->hnp_poll_enable = 0;
++
++ if (iotg->start_host) {
++ dev_dbg(pnw->dev, "host_ops registered!\n");
++ iotg->start_host(iotg);
++ } else {
++ dev_dbg(pnw->dev, "host driver not loaded.\n");
++ break;
++ }
++
++ iotg->otg.state = OTG_STATE_A_WAIT_BCON;
++ }
++ break;
++ case OTG_STATE_A_WAIT_BCON:
++ if (hsm->id == ID_B || hsm->id == ID_ACA_B || hsm->a_bus_drop) {
++ /* Move to A_WAIT_VFALL state, user request */
++
++ /* Delete current timer and clear flags for B-Device */
++ penwell_otg_del_timer(TA_WAIT_BCON_TMR);
++
++ hsm->b_bus_req = 0;
++
++ if (iotg->stop_host)
++ iotg->stop_host(iotg);
++ else
++ dev_dbg(pnw->dev,
++ "host driver has been removed.\n");
++
++ /* Turn off VBUS */
++ if (iotg->otg.set_vbus)
++ iotg->otg.set_vbus(&iotg->otg, false);
++
++ penwell_otg_add_timer(TA_WAIT_VFALL_TMR);
++ iotg->otg.state = OTG_STATE_A_WAIT_VFALL;
++ } else if (!hsm->a_vbus_vld) {
++ /* Move to A_VBUS_ERR state, over-current detected */
++
++ /* Delete current timer and disable host function */
++ penwell_otg_del_timer(TA_WAIT_BCON_TMR);
++
++ if (iotg->otg.set_power)
++ iotg->otg.set_power(&iotg->otg, 0);
++
++ if (iotg->stop_host)
++ iotg->stop_host(iotg);
++ else
++ dev_dbg(pnw->dev,
++ "host driver has been removed.\n");
++
++ /* Turn off VBUS and enter PHY low power mode */
++ if (iotg->otg.set_vbus)
++ iotg->otg.set_vbus(&iotg->otg, false);
++
++ penwell_otg_phy_low_power(1);
++ iotg->otg.state = OTG_STATE_A_VBUS_ERR;
++ } else if (hsm->b_conn) {
++ /* Move to A_HOST state, device connected */
++
++ /* Delete current timer and disable host function */
++ penwell_otg_del_timer(TA_WAIT_BCON_TMR);
++
++ iotg->otg.state = OTG_STATE_A_HOST;
++
++ if (!hsm->a_bus_req)
++ hsm->a_bus_req = 1;
++ } else if (hsm->id == ID_ACA_A) {
++ if (iotg->otg.set_power)
++ iotg->otg.set_power(&iotg->otg, 1500);
++
++ /* Turn off VBUS */
++ if (iotg->otg.set_vbus)
++ iotg->otg.set_vbus(&iotg->otg, false);
++ }
++ break;
++
++ case OTG_STATE_A_HOST:
++ if (hsm->id == ID_B || hsm->id == ID_ACA_B || hsm->a_bus_drop) {
++ /* Move to A_WAIT_VFALL state, timeout/user request */
++ if (hsm->id == ID_ACA_B && iotg->otg.set_power)
++ iotg->otg.set_power(&iotg->otg, 1500);
++
++ if (iotg->stop_host)
++ iotg->stop_host(iotg);
++ else
++ dev_dbg(pnw->dev,
++ "host driver has been removed.\n");
++
++ /* Turn off VBUS */
++ if (iotg->otg.set_vbus)
++ iotg->otg.set_vbus(&iotg->otg, false);
++
++ penwell_otg_add_timer(TA_WAIT_VFALL_TMR);
++ iotg->otg.state = OTG_STATE_A_WAIT_VFALL;
++ } else if (!hsm->a_vbus_vld) {
++ /* Move to A_VBUS_ERR state */
++
++ if (iotg->otg.set_power)
++ iotg->otg.set_power(&iotg->otg, 0);
++
++ if (iotg->stop_host)
++ iotg->stop_host(iotg);
++ else
++ dev_dbg(pnw->dev,
++ "host driver has been removed.\n");
++
++ /* Turn off VBUS */
++ if (iotg->otg.set_vbus)
++ iotg->otg.set_vbus(&iotg->otg, false);
++
++ penwell_otg_phy_low_power(1);
++ iotg->otg.state = OTG_STATE_A_VBUS_ERR;
++ } else if (!hsm->a_bus_req) {
++ /* Move to A_SUSPEND state */
++
++ penwell_otg_loc_sof(0);
++
++ if (iotg->otg.host->b_hnp_enable) {
++ /* According to Spec 7.1.5 */
++ penwell_otg_add_timer(TA_AIDL_BDIS_TMR);
++ }
++
++ iotg->otg.state = OTG_STATE_A_SUSPEND;
++ } else if (!hsm->b_conn) {
++ hsm->hnp_poll_enable = 0;
++ /* add kernel timer */
++ iotg->otg.state = OTG_STATE_A_WAIT_BCON;
++ } else if (hsm->id == ID_ACA_A) {
++ if (iotg->otg.set_power)
++ iotg->otg.set_power(&iotg->otg, 1500);
++
++ /* Turn off VBUS */
++ if (iotg->otg.set_vbus)
++ iotg->otg.set_vbus(&iotg->otg, false);
++ }
++ break;
++
++ case OTG_STATE_A_SUSPEND:
++ if (hsm->id == ID_B || hsm->id == ID_ACA_B ||
++ hsm->a_bus_drop || hsm->a_aidl_bdis_tmout) {
++ /* Move to A_WAIT_VFALL state, timeout/user request */
++
++ /* Delete current timer and clear HW assist */
++ if (hsm->a_aidl_bdis_tmout)
++ hsm->a_aidl_bdis_tmout = 0;
++ penwell_otg_del_timer(TA_AIDL_BDIS_TMR);
++
++ if (hsm->id == ID_ACA_B && iotg->otg.set_power)
++ iotg->otg.set_power(&iotg->otg, 1500);
++
++ if (iotg->stop_host)
++ iotg->stop_host(iotg);
++ else
++ dev_dbg(pnw->dev,
++ "host driver has been removed.\n");
++
++ /* Turn off VBUS */
++ if (iotg->otg.set_vbus)
++ iotg->otg.set_vbus(&iotg->otg, false);
++
++ penwell_otg_add_timer(TA_WAIT_VFALL_TMR);
++ iotg->otg.state = OTG_STATE_A_WAIT_VFALL;
++ } else if (!hsm->a_vbus_vld) {
++ /* Move to A_VBUS_ERR state, Over-current */
++
++ /* Delete current timer and clear flags */
++ penwell_otg_del_timer(TA_AIDL_BDIS_TMR);
++
++ if (iotg->otg.set_power)
++ iotg->otg.set_power(&iotg->otg, 0);
++
++ if (iotg->stop_host)
++ iotg->stop_host(iotg);
++ else
++ dev_dbg(pnw->dev,
++ "host driver has been removed.\n");
++
++ /* Turn off VBUS */
++ if (iotg->otg.set_vbus)
++ iotg->otg.set_vbus(&iotg->otg, false);
++ penwell_otg_phy_low_power(1);
++ iotg->otg.state = OTG_STATE_A_VBUS_ERR;
++ } else if (!hsm->b_conn && !pnw->iotg.otg.host->b_hnp_enable) {
++ /* Move to A_WAIT_BCON */
++
++ /* delete current timer */
++ penwell_otg_del_timer(TA_AIDL_BDIS_TMR);
++
++ /* add kernel timer */
++ penwell_otg_add_timer(TA_WAIT_BCON_TMR);
++ iotg->otg.state = OTG_STATE_A_WAIT_BCON;
++ } else if (!hsm->b_conn && pnw->iotg.otg.host->b_hnp_enable) {
++ /* Move to A_PERIPHERAL state, HNP */
++
++ /* Delete current timer and clear flags */
++ penwell_otg_del_timer(TA_AIDL_BDIS_TMR);
++
++ if (iotg->stop_host)
++ iotg->stop_host(iotg);
++ else
++ dev_dbg(pnw->dev,
++ "host driver has been removed.\n");
++
++ hsm->b_bus_suspend = 0;
++
++ if (iotg->start_peripheral)
++ iotg->start_peripheral(iotg);
++ else
++ dev_dbg(pnw->dev,
++ "client driver not loaded.\n");
++
++ penwell_otg_add_timer(TA_BIDL_ADIS_TMR);
++ iotg->otg.state = OTG_STATE_A_PERIPHERAL;
++ } else if (hsm->a_bus_req) {
++ /* Move to A_HOST state, user request */
++
++ /* Delete current timer and clear flags */
++ penwell_otg_del_timer(TA_AIDL_BDIS_TMR);
++
++ penwell_otg_loc_sof(1);
++ iotg->otg.state = OTG_STATE_A_HOST;
++ } else if (hsm->id == ID_ACA_A) {
++ if (hsm->id == ID_ACA_A && iotg->otg.set_power)
++ iotg->otg.set_power(&iotg->otg, 1500);
++
++ /* Turn off VBUS */
++ if (iotg->otg.set_vbus)
++ iotg->otg.set_vbus(&iotg->otg, false);
++ }
++
++ break;
++ case OTG_STATE_A_PERIPHERAL:
++ if (hsm->id == ID_B || hsm->a_bus_drop) {
++ /* Move to A_WAIT_VFALL state */
++
++ /* Delete current timer and clear flags */
++ penwell_otg_del_timer(TA_BIDL_ADIS_TMR);
++
++ if (iotg->stop_peripheral)
++ iotg->stop_peripheral(iotg);
++ else
++ dev_dbg(pnw->dev,
++ "client driver has been removed.\n");
++
++ /* Turn off VBUS */
++ if (iotg->otg.set_vbus)
++ iotg->otg.set_vbus(&iotg->otg, false);
++ set_host_mode();
++
++ penwell_otg_add_timer(TA_WAIT_VFALL_TMR);
++ iotg->otg.state = OTG_STATE_A_WAIT_VFALL;
++ } else if (!hsm->a_vbus_vld) {
++ /* Move to A_VBUS_ERR state, over-current detected */
++
++ /* Delete current timer and disable client function */
++ penwell_otg_del_timer(TA_BIDL_ADIS_TMR);
++
++ if (iotg->otg.set_power)
++ iotg->otg.set_power(&iotg->otg, 0);
++
++ if (iotg->stop_peripheral)
++ iotg->stop_peripheral(iotg);
++ else
++ dev_dbg(pnw->dev,
++ "client driver has been removed.\n");
++
++ /* Turn off the VBUS and enter PHY low power mode */
++ if (iotg->otg.set_vbus)
++ iotg->otg.set_vbus(&iotg->otg, false);
++ penwell_otg_phy_low_power(1);
++
++ iotg->otg.state = OTG_STATE_A_VBUS_ERR;
++ } else if (hsm->a_bidl_adis_tmout) {
++ /* Move to A_WAIT_BCON state */
++ hsm->a_bidl_adis_tmr = 0;
++
++ /* Disable client function and switch to host mode */
++ if (iotg->stop_peripheral)
++ iotg->stop_peripheral(iotg);
++ else
++ dev_dbg(pnw->dev,
++ "client driver has been removed.\n");
++
++ hsm->hnp_poll_enable = 0;
++ hsm->b_conn = 0;
++
++ if (iotg->start_host)
++ iotg->start_host(iotg);
++ else
++ dev_dbg(pnw->dev,
++ "host driver not loaded.\n");
++
++ penwell_otg_add_timer(TA_WAIT_BCON_TMR);
++ iotg->otg.state = OTG_STATE_A_WAIT_BCON;
++ } else if (!hsm->b_bus_suspend && hsm->a_bidl_adis_tmr) {
++ /* Client report suspend state end, delete timer */
++ penwell_otg_del_timer(TA_BIDL_ADIS_TMR);
++ } else if (hsm->b_bus_suspend && !hsm->a_bidl_adis_tmr) {
++ /* Client report suspend state start, start timer */
++ if (!timer_pending(&pnw->hsm_timer))
++ penwell_otg_add_timer(TA_BIDL_ADIS_TMR);
++ } else if (hsm->id == ID_ACA_A) {
++ if (hsm->id == ID_ACA_A && iotg->otg.set_power)
++ iotg->otg.set_power(&iotg->otg, 1500);
++
++ /* Turn off VBUS */
++ if (iotg->otg.set_vbus)
++ iotg->otg.set_vbus(&iotg->otg, false);
++ }
++ break;
++ case OTG_STATE_A_VBUS_ERR:
++ if (hsm->id == ID_B || hsm->id == ID_ACA_B ||
++ hsm->id == ID_ACA_A || hsm->a_bus_drop ||
++ hsm->a_clr_err) {
++ if (hsm->a_clr_err)
++ hsm->a_clr_err = 0;
++
++ penwell_otg_add_timer(TA_WAIT_VFALL_TMR);
++ iotg->otg.state = OTG_STATE_A_WAIT_VFALL;
++ }
++ break;
++ case OTG_STATE_A_WAIT_VFALL:
++ if (hsm->a_wait_vfall_tmout) {
++ /* Move to A_IDLE state, vbus falls */
++ penwell_otg_phy_low_power(1);
++
++ /* Always set a_bus_req to 1, in case no ADP */
++ hsm->a_bus_req = 1;
++
++ iotg->otg.state = OTG_STATE_A_IDLE;
++ penwell_update_transceiver();
++ }
++ break;
++ default:
++ ;
++ }
++
++ dev_dbg(pnw->dev,
++ "new state = %s\n", state_string(iotg->otg.state));
++}
++
++static ssize_t
++show_registers(struct device *_dev, struct device_attribute *attr, char *buf)
++{
++ struct penwell_otg *pnw = the_transceiver;
++ char *next;
++ unsigned size;
++ unsigned t;
++
++ next = buf;
++ size = PAGE_SIZE;
++
++ t = scnprintf(next, size,
++ "\n"
++ "USBCMD = 0x%08x\n"
++ "USBSTS = 0x%08x\n"
++ "USBINTR = 0x%08x\n"
++ "ASYNCLISTADDR = 0x%08x\n"
++ "PORTSC1 = 0x%08x\n"
++ "HOSTPC1 = 0x%08x\n"
++ "OTGSC = 0x%08x\n"
++ "USBMODE = 0x%08x\n",
++ readl(pnw->iotg.base + 0x30),
++ readl(pnw->iotg.base + 0x34),
++ readl(pnw->iotg.base + 0x38),
++ readl(pnw->iotg.base + 0x48),
++ readl(pnw->iotg.base + 0x74),
++ readl(pnw->iotg.base + 0xb4),
++ readl(pnw->iotg.base + 0xf4),
++ readl(pnw->iotg.base + 0xf8)
++ );
++ size -= t;
++ next += t;
++
++ return PAGE_SIZE - size;
++}
++static DEVICE_ATTR(registers, S_IRUGO, show_registers, NULL);
++
++static ssize_t
++show_hsm(struct device *_dev, struct device_attribute *attr, char *buf)
++{
++ struct penwell_otg *pnw = the_transceiver;
++ struct intel_mid_otg_xceiv *iotg = &pnw->iotg;
++ char *next;
++ unsigned size, t;
++
++ next = buf;
++ size = PAGE_SIZE;
++
++ if (iotg->otg.host)
++ iotg->hsm.a_set_b_hnp_en = iotg->otg.host->b_hnp_enable;
++
++ if (iotg->otg.gadget)
++ iotg->hsm.b_hnp_enable = iotg->otg.gadget->b_hnp_enable;
++
++ t = scnprintf(next, size,
++ "\n"
++ "current state = %s\n"
++ "a_bus_resume = \t%d\n"
++ "a_bus_suspend = \t%d\n"
++ "a_conn = \t%d\n"
++ "a_sess_vld = \t%d\n"
++ "a_srp_det = \t%d\n"
++ "a_vbus_vld = \t%d\n"
++ "b_bus_suspend = \t%d\n"
++ "b_conn = \t%d\n"
++ "b_se0_srp = \t%d\n"
++ "b_ssend_srp = \t%d\n"
++ "b_sess_end = \t%d\n"
++ "b_sess_vld = \t%d\n"
++ "id = \t%d\n"
++ "power_up = \t%d\n"
++ "adp_change = \t%d\n"
++ "test_device = \t%d\n"
++ "a_set_b_hnp_en = \t%d\n"
++ "b_srp_done = \t%d\n"
++ "b_hnp_enable = \t%d\n"
++ "hnp_poll_enable = \t%d\n"
++ "a_wait_vrise_tmout = \t%d\n"
++ "a_wait_bcon_tmout = \t%d\n"
++ "a_aidl_bdis_tmout = \t%d\n"
++ "a_bidl_adis_tmout = \t%d\n"
++ "a_bidl_adis_tmr = \t%d\n"
++ "a_wait_vfall_tmout = \t%d\n"
++ "b_ase0_brst_tmout = \t%d\n"
++ "b_srp_fail_tmout = \t%d\n"
++ "b_srp_fail_tmr = \t%d\n"
++ "b_adp_sense_tmout = \t%d\n"
++ "a_bus_drop = \t%d\n"
++ "a_bus_req = \t%d\n"
++ "a_clr_err = \t%d\n"
++ "b_bus_req = \t%d\n",
++ state_string(iotg->otg.state),
++ iotg->hsm.a_bus_resume,
++ iotg->hsm.a_bus_suspend,
++ iotg->hsm.a_conn,
++ iotg->hsm.a_sess_vld,
++ iotg->hsm.a_srp_det,
++ iotg->hsm.a_vbus_vld,
++ iotg->hsm.b_bus_suspend,
++ iotg->hsm.b_conn,
++ iotg->hsm.b_se0_srp,
++ iotg->hsm.b_ssend_srp,
++ iotg->hsm.b_sess_end,
++ iotg->hsm.b_sess_vld,
++ iotg->hsm.id,
++ iotg->hsm.power_up,
++ iotg->hsm.adp_change,
++ iotg->hsm.test_device,
++ iotg->hsm.a_set_b_hnp_en,
++ iotg->hsm.b_srp_done,
++ iotg->hsm.b_hnp_enable,
++ iotg->hsm.hnp_poll_enable,
++ iotg->hsm.a_wait_vrise_tmout,
++ iotg->hsm.a_wait_bcon_tmout,
++ iotg->hsm.a_aidl_bdis_tmout,
++ iotg->hsm.a_bidl_adis_tmout,
++ iotg->hsm.a_bidl_adis_tmr,
++ iotg->hsm.a_wait_vfall_tmout,
++ iotg->hsm.b_ase0_brst_tmout,
++ iotg->hsm.b_srp_fail_tmout,
++ iotg->hsm.b_srp_fail_tmr,
++ iotg->hsm.b_adp_sense_tmout,
++ iotg->hsm.a_bus_drop,
++ iotg->hsm.a_bus_req,
++ iotg->hsm.a_clr_err,
++ iotg->hsm.b_bus_req
++ );
++ size -= t;
++ next += t;
++
++ return PAGE_SIZE - size;
++}
++static DEVICE_ATTR(hsm, S_IRUGO, show_hsm, NULL);
++
++static ssize_t
++get_a_bus_req(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ struct penwell_otg *pnw = the_transceiver;
++ char *next;
++ unsigned size, t;
++
++ next = buf;
++ size = PAGE_SIZE;
++
++ t = scnprintf(next, size, "%d", pnw->iotg.hsm.a_bus_req);
++ size -= t;
++ next += t;
++
++ return PAGE_SIZE - size;
++}
++
++static ssize_t
++set_a_bus_req(struct device *dev, struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct penwell_otg *pnw = the_transceiver;
++ struct intel_mid_otg_xceiv *iotg = &pnw->iotg;
++
++ if (!iotg->otg.default_a)
++ return -1;
++ if (count > 2)
++ return -1;
++
++ if (buf[0] == '0') {
++ iotg->hsm.a_bus_req = 0;
++ dev_dbg(pnw->dev, "a_bus_req = 0\n");
++ } else if (buf[0] == '1') {
++ /* If a_bus_drop is TRUE, a_bus_req can't be set */
++ if (iotg->hsm.a_bus_drop)
++ return -1;
++ iotg->hsm.a_bus_req = 1;
++ dev_dbg(pnw->dev, "a_bus_req = 1\n");
++ if (iotg->otg.state == OTG_STATE_A_PERIPHERAL) {
++ dev_warn(pnw->dev, "Role switch will be "
++ "performed soon, if connected OTG device "
++ "supports role switch request.\n");
++ dev_warn(pnw->dev, "It may cause data"
++ "corruption during data transfer\n");
++ }
++ }
++
++ penwell_update_transceiver();
++
++ return count;
++}
++static DEVICE_ATTR(a_bus_req, S_IRUGO | S_IWUGO, get_a_bus_req, set_a_bus_req);
++
++static ssize_t
++get_a_bus_drop(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ struct penwell_otg *pnw = the_transceiver;
++ char *next;
++ unsigned size;
++ unsigned t;
++
++ next = buf;
++ size = PAGE_SIZE;
++
++ t = scnprintf(next, size, "%d", pnw->iotg.hsm.a_bus_drop);
++ size -= t;
++ next += t;
++
++ return PAGE_SIZE - size;
++}
++
++static ssize_t
++set_a_bus_drop(struct device *dev, struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct penwell_otg *pnw = the_transceiver;
++ struct intel_mid_otg_xceiv *iotg = &pnw->iotg;
++
++ if (!iotg->otg.default_a)
++ return -1;
++ if (count > 2)
++ return -1;
++
++ if (buf[0] == '0') {
++ iotg->hsm.a_bus_drop = 0;
++ dev_dbg(pnw->dev, "a_bus_drop = 0\n");
++ } else if (buf[0] == '1') {
++ iotg->hsm.a_bus_drop = 1;
++ iotg->hsm.a_bus_req = 0;
++ dev_dbg(pnw->dev, "a_bus_drop = 1, so a_bus_req = 0\n");
++ }
++
++ penwell_update_transceiver();
++
++ return count;
++}
++static DEVICE_ATTR(a_bus_drop, S_IRUGO | S_IWUGO,
++ get_a_bus_drop, set_a_bus_drop);
++
++static ssize_t
++get_b_bus_req(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ struct penwell_otg *pnw = the_transceiver;
++ char *next;
++ unsigned size;
++ unsigned t;
++
++ next = buf;
++ size = PAGE_SIZE;
++
++ t = scnprintf(next, size, "%d", pnw->iotg.hsm.b_bus_req);
++ size -= t;
++ next += t;
++
++ return PAGE_SIZE - size;
++}
++
++static ssize_t
++set_b_bus_req(struct device *dev, struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct penwell_otg *pnw = the_transceiver;
++ struct intel_mid_otg_xceiv *iotg = &pnw->iotg;
++
++ if (iotg->otg.default_a)
++ return -1;
++
++ if (count > 2)
++ return -1;
++
++ if (buf[0] == '0') {
++ iotg->hsm.b_bus_req = 0;
++ dev_dbg(pnw->dev, "b_bus_req = 0\n");
++ } else if (buf[0] == '1') {
++ iotg->hsm.b_bus_req = 1;
++ dev_dbg(pnw->dev, "b_bus_req = 1\n");
++ if (iotg->otg.state == OTG_STATE_B_PERIPHERAL) {
++ dev_warn(pnw->dev, "Role switch will be "
++ "performed soon, if connected OTG device "
++ "supports role switch request.\n");
++ dev_warn(pnw->dev, "It may cause data "
++ "corruption during data transfer\n");
++ }
++ }
++
++ penwell_update_transceiver();
++
++ return count;
++}
++static DEVICE_ATTR(b_bus_req, S_IRUGO | S_IWUGO, get_b_bus_req, set_b_bus_req);
++
++static ssize_t
++set_a_clr_err(struct device *dev, struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct penwell_otg *pnw = the_transceiver;
++ struct intel_mid_otg_xceiv *iotg = &pnw->iotg;
++
++ if (!iotg->otg.default_a)
++ return -1;
++ if (iotg->otg.state != OTG_STATE_A_VBUS_ERR)
++ return -1;
++ if (count > 2)
++ return -1;
++
++ if (buf[0] == '1') {
++ iotg->hsm.a_clr_err = 1;
++ dev_dbg(pnw->dev, "a_clr_err = 1\n");
++ }
++
++ penwell_update_transceiver();
++
++ return count;
++}
++static DEVICE_ATTR(a_clr_err, S_IWUGO, NULL, set_a_clr_err);
++
++static struct attribute *inputs_attrs[] = {
++ &dev_attr_a_bus_req.attr,
++ &dev_attr_a_bus_drop.attr,
++ &dev_attr_b_bus_req.attr,
++ &dev_attr_a_clr_err.attr,
++ NULL,
++};
++
++static struct attribute_group debug_dev_attr_group = {
++ .name = "inputs",
++ .attrs = inputs_attrs,
++};
++
++static int penwell_otg_probe(struct pci_dev *pdev,
++ const struct pci_device_id *id)
++{
++ unsigned long resource, len;
++ void __iomem *base = NULL;
++ int retval;
++ u32 val32;
++ struct penwell_otg *pnw;
++ char qname[] = "penwell_otg_queue";
++
++ retval = 0;
++
++ dev_dbg(&pdev->dev, "\notg controller is detected.\n");
++
++ if (pci_enable_device(pdev) < 0) {
++ retval = -ENODEV;
++ goto done;
++ }
++
++ pnw = kzalloc(sizeof *pnw, GFP_KERNEL);
++ if (pnw == NULL) {
++ retval = -ENOMEM;
++ goto done;
++ }
++ the_transceiver = pnw;
++
++ /* control register: BAR 0 */
++ resource = pci_resource_start(pdev, 0);
++ len = pci_resource_len(pdev, 0);
++ if (!request_mem_region(resource, len, driver_name)) {
++ retval = -EBUSY;
++ goto err;
++ }
++ pnw->region = 1;
++
++ base = ioremap_nocache(resource, len);
++ if (base == NULL) {
++ retval = -EFAULT;
++ goto err;
++ }
++ pnw->iotg.base = base;
++
++ if (!request_mem_region(USBCFG_ADDR, USBCFG_LEN, driver_name)) {
++ retval = -EBUSY;
++ goto err;
++ }
++ pnw->cfg_region = 1;
++
++ if (!pdev->irq) {
++ dev_dbg(&pdev->dev, "No IRQ.\n");
++ retval = -ENODEV;
++ goto err;
++ }
++
++ pnw->qwork = create_singlethread_workqueue(qname);
++ if (!pnw->qwork) {
++ dev_dbg(&pdev->dev, "cannot create workqueue %s\n", qname);
++ retval = -ENOMEM;
++ goto err;
++ }
++ INIT_WORK(&pnw->work, penwell_otg_work);
++ INIT_WORK(&pnw->hnp_poll_work, penwell_otg_hnp_poll_work);
++
++ /* OTG common part */
++ pnw->dev = &pdev->dev;
++ pnw->iotg.otg.dev = &pdev->dev;
++ pnw->iotg.otg.label = driver_name;
++ pnw->iotg.otg.set_host = penwell_otg_set_host;
++ pnw->iotg.otg.set_peripheral = penwell_otg_set_peripheral;
++ pnw->iotg.otg.set_power = penwell_otg_set_power;
++ pnw->iotg.otg.set_vbus = penwell_otg_set_vbus;
++ pnw->iotg.otg.start_srp = penwell_otg_start_srp;
++ pnw->iotg.set_adp_probe = NULL;
++ pnw->iotg.set_adp_sense = NULL;
++ pnw->iotg.otg.state = OTG_STATE_UNDEFINED;
++ if (otg_set_transceiver(&pnw->iotg.otg)) {
++ dev_dbg(pnw->dev, "can't set transceiver\n");
++ retval = -EBUSY;
++ goto err;
++ }
++
++ pnw->iotg.ulpi_ops.read = penwell_otg_ulpi_read;
++ pnw->iotg.ulpi_ops.write = penwell_otg_ulpi_write;
++
++ init_timer(&pnw->hsm_timer);
++ init_timer(&pnw->hnp_poll_timer);
++ init_completion(&pnw->adp.adp_comp);
++
++ ATOMIC_INIT_NOTIFIER_HEAD(&pnw->iotg.iotg_notifier);
++
++ pnw->iotg_notifier.notifier_call = penwell_otg_iotg_notify;
++ if (intel_mid_otg_register_notifier(&pnw->iotg, &pnw->iotg_notifier)) {
++ dev_dbg(pnw->dev, "Failed to register notifier\n");
++ retval = -EBUSY;
++ goto err;
++ }
++
++ pnw->msic = penwell_otg_check_msic();
++
++ penwell_otg_phy_enable(1);
++
++ reset_otg();
++ init_hsm();
++
++ if (request_irq(pdev->irq, otg_irq, IRQF_SHARED,
++ driver_name, pnw) != 0) {
++ dev_dbg(pnw->dev,
++ "request interrupt %d failed\n", pdev->irq);
++ retval = -EBUSY;
++ goto err;
++ }
++
++ /* enable OTGSC int */
++ val32 = OTGSC_DPIE | OTGSC_BSEIE | OTGSC_BSVIE |
++ OTGSC_ASVIE | OTGSC_AVVIE | OTGSC_IDIE | OTGSC_IDPU;
++ writel(val32, pnw->iotg.base + CI_OTGSC);
++
++ retval = device_create_file(&pdev->dev, &dev_attr_registers);
++ if (retval < 0) {
++ dev_dbg(pnw->dev,
++ "Can't register sysfs attribute: %d\n", retval);
++ goto err;
++ }
++
++ retval = device_create_file(&pdev->dev, &dev_attr_hsm);
++ if (retval < 0) {
++ dev_dbg(pnw->dev,
++ "Can't hsm sysfs attribute: %d\n", retval);
++ goto err;
++ }
++
++ retval = sysfs_create_group(&pdev->dev.kobj, &debug_dev_attr_group);
++ if (retval < 0) {
++ dev_dbg(pnw->dev,
++ "Can't register sysfs attr group: %d\n", retval);
++ goto err;
++ }
++
++ if (pnw->iotg.otg.state == OTG_STATE_A_IDLE)
++ queue_work(pnw->qwork, &pnw->work);
++
++ return 0;
++
++err:
++ if (the_transceiver)
++ penwell_otg_remove(pdev);
++done:
++ return retval;
++}
++
++static void penwell_otg_remove(struct pci_dev *pdev)
++{
++ struct penwell_otg *pnw = the_transceiver;
++
++ if (pnw->qwork) {
++ flush_workqueue(pnw->qwork);
++ destroy_workqueue(pnw->qwork);
++ }
++
++ /* disable OTGSC interrupt as OTGSC doesn't change in reset */
++ writel(0, pnw->iotg.base + CI_OTGSC);
++
++ if (pdev->irq)
++ free_irq(pdev->irq, pnw);
++ if (pnw->cfg_region)
++ release_mem_region(USBCFG_ADDR, USBCFG_LEN);
++ if (pnw->iotg.base)
++ iounmap(pnw->iotg.base);
++ if (pnw->region)
++ release_mem_region(pci_resource_start(pdev, 0),
++ pci_resource_len(pdev, 0));
++
++ otg_set_transceiver(NULL);
++ pci_disable_device(pdev);
++ sysfs_remove_group(&pdev->dev.kobj, &debug_dev_attr_group);
++ device_remove_file(&pdev->dev, &dev_attr_hsm);
++ device_remove_file(&pdev->dev, &dev_attr_registers);
++ kfree(pnw);
++ pnw = NULL;
++}
++
++static void transceiver_suspend(struct pci_dev *pdev)
++{
++ pci_save_state(pdev);
++ pci_set_power_state(pdev, PCI_D3hot);
++ penwell_otg_phy_low_power(1);
++}
++
++static int penwell_otg_suspend(struct pci_dev *pdev, pm_message_t message)
++{
++ struct penwell_otg *pnw = the_transceiver;
++ struct intel_mid_otg_xceiv *iotg = &pnw->iotg;
++ int ret = 0;
++
++ /* Disbale OTG interrupts */
++ penwell_otg_intr(0);
++
++ if (pdev->irq)
++ free_irq(pdev->irq, pnw);
++
++ /* Prevent more otg_work */
++ flush_workqueue(pnw->qwork);
++ destroy_workqueue(pnw->qwork);
++ pnw->qwork = NULL;
++
++ /* start actions */
++ switch (iotg->otg.state) {
++ case OTG_STATE_A_WAIT_VFALL:
++ iotg->otg.state = OTG_STATE_A_IDLE;
++ case OTG_STATE_A_IDLE:
++ case OTG_STATE_B_IDLE:
++ case OTG_STATE_A_VBUS_ERR:
++ transceiver_suspend(pdev);
++ break;
++ case OTG_STATE_A_WAIT_VRISE:
++ penwell_otg_del_timer(TA_WAIT_VRISE_TMR);
++ iotg->hsm.a_srp_det = 0;
++
++ /* Turn off VBus */
++ iotg->otg.set_vbus(&iotg->otg, false);
++ iotg->otg.state = OTG_STATE_A_IDLE;
++ transceiver_suspend(pdev);
++ break;
++ case OTG_STATE_A_WAIT_BCON:
++ penwell_otg_del_timer(TA_WAIT_BCON_TMR);
++ if (pnw->iotg.stop_host)
++ pnw->iotg.stop_host(&pnw->iotg);
++ else
++ dev_dbg(pnw->dev, "host driver has been stopped.\n");
++
++ iotg->hsm.a_srp_det = 0;
++
++ /* Turn off VBus */
++ iotg->otg.set_vbus(&iotg->otg, false);
++ iotg->otg.state = OTG_STATE_A_IDLE;
++ transceiver_suspend(pdev);
++ break;
++ case OTG_STATE_A_HOST:
++ if (pnw->iotg.stop_host)
++ pnw->iotg.stop_host(&pnw->iotg);
++ else
++ dev_dbg(pnw->dev, "host driver has been stopped.\n");
++
++ iotg->hsm.a_srp_det = 0;
++
++ /* Turn off VBus */
++ iotg->otg.set_vbus(&iotg->otg, false);
++
++ iotg->otg.state = OTG_STATE_A_IDLE;
++ transceiver_suspend(pdev);
++ break;
++ case OTG_STATE_A_SUSPEND:
++ penwell_otg_del_timer(TA_AIDL_BDIS_TMR);
++ penwell_otg_HABA(0);
++ if (pnw->iotg.stop_host)
++ pnw->iotg.stop_host(&pnw->iotg);
++ else
++ dev_dbg(pnw->dev, "host driver has been removed.\n");
++ iotg->hsm.a_srp_det = 0;
++
++ /* Turn off VBus */
++ iotg->otg.set_vbus(&iotg->otg, false);
++ iotg->otg.state = OTG_STATE_A_IDLE;
++ transceiver_suspend(pdev);
++ break;
++ case OTG_STATE_A_PERIPHERAL:
++ penwell_otg_del_timer(TA_BIDL_ADIS_TMR);
++
++ if (pnw->iotg.stop_peripheral)
++ pnw->iotg.stop_peripheral(&pnw->iotg);
++ else
++ dev_dbg(pnw->dev, "client driver has been stopped.\n");
++
++ /* Turn off VBus */
++ iotg->otg.set_vbus(&iotg->otg, false);
++ iotg->hsm.a_srp_det = 0;
++ iotg->otg.state = OTG_STATE_A_IDLE;
++ transceiver_suspend(pdev);
++ break;
++ case OTG_STATE_B_HOST:
++ if (pnw->iotg.stop_host)
++ pnw->iotg.stop_host(&pnw->iotg);
++ else
++ dev_dbg(pnw->dev, "host driver has been stopped.\n");
++ iotg->hsm.b_bus_req = 0;
++ iotg->otg.state = OTG_STATE_B_IDLE;
++ transceiver_suspend(pdev);
++ break;
++ case OTG_STATE_B_PERIPHERAL:
++ if (pnw->iotg.stop_peripheral)
++ pnw->iotg.stop_peripheral(&pnw->iotg);
++ else
++ dev_dbg(pnw->dev, "client driver has been stopped.\n");
++ iotg->otg.state = OTG_STATE_B_IDLE;
++ transceiver_suspend(pdev);
++ break;
++ case OTG_STATE_B_WAIT_ACON:
++ penwell_otg_del_timer(TB_ASE0_BRST_TMR);
++
++ penwell_otg_HAAR(0);
++
++ if (pnw->iotg.stop_host)
++ pnw->iotg.stop_host(&pnw->iotg);
++ else
++ dev_dbg(pnw->dev, "host driver has been stopped.\n");
++ iotg->hsm.b_bus_req = 0;
++ iotg->otg.state = OTG_STATE_B_IDLE;
++ transceiver_suspend(pdev);
++ break;
++ default:
++ dev_dbg(pnw->dev, "error state before suspend\n");
++ break;
++ }
++
++ return ret;
++}
++
++static void transceiver_resume(struct pci_dev *pdev)
++{
++ pci_restore_state(pdev);
++ pci_set_power_state(pdev, PCI_D0);
++}
++
++static int penwell_otg_resume(struct pci_dev *pdev)
++{
++ struct penwell_otg *pnw = the_transceiver;
++ int ret = 0;
++
++ transceiver_resume(pdev);
++
++ pnw->qwork = create_singlethread_workqueue("penwell_otg_queue");
++ if (!pnw->qwork) {
++ dev_dbg(pnw->dev, "cannot create penwell otg workqueue\n");
++ ret = -ENOMEM;
++ goto error;
++ }
++
++ if (request_irq(pdev->irq, otg_irq, IRQF_SHARED,
++ driver_name, pnw) != 0) {
++ dev_dbg(pnw->dev, "request irq %d failed\n", pdev->irq);
++ ret = -EBUSY;
++ goto error;
++ }
++
++ /* enable OTG interrupts */
++ penwell_otg_intr(1);
++
++ update_hsm();
++
++ penwell_update_transceiver();
++
++ return ret;
++error:
++ penwell_otg_intr(0);
++ transceiver_suspend(pdev);
++ return ret;
++}
++
++static int __init penwell_otg_init(void)
++{
++ return pci_register_driver(&otg_pci_driver);
++}
++module_init(penwell_otg_init);
++
++static void __exit penwell_otg_cleanup(void)
++{
++ pci_unregister_driver(&otg_pci_driver);
++}
++module_exit(penwell_otg_cleanup);
+--- a/drivers/watchdog/Kconfig
++++ b/drivers/watchdog/Kconfig
+@@ -504,6 +504,16 @@
+ To compile this driver as a module, choose M here: the
+ module will be called i6300esb.
+
++config INTEL_SCU_WATCHDOG
++ tristate "Intel SCU Watchdog for Mobile Platforms"
++ depends on WATCHDOG
++ depends on INTEL_SCU_IPC
++ ---help---
++ Hardware driver for the watchdog time built into the Intel SCU
++ for Intel Mobile Platforms.
++
++ To compile this driver as a module, choose M here.
++
+ config ITCO_WDT
+ tristate "Intel TCO Timer/Watchdog"
+ depends on (X86 || IA64) && PCI
+--- a/drivers/watchdog/Makefile
++++ b/drivers/watchdog/Makefile
+@@ -98,6 +98,7 @@
+ obj-$(CONFIG_W83977F_WDT) += w83977f_wdt.o
+ obj-$(CONFIG_MACHZ_WDT) += machzwd.o
+ obj-$(CONFIG_SBC_EPX_C3_WATCHDOG) += sbc_epx_c3.o
++obj-$(CONFIG_INTEL_SCU_WATCHDOG) += intel_scu_watchdog.o
+
+ # M32R Architecture
+
+--- /dev/null
++++ b/drivers/watchdog/intel_scu_watchdog.c
+@@ -0,0 +1,633 @@
++/*
++ * Intel_SCU 0.2: An Intel SCU IOH Based Watchdog Device
++ * for Intel part #(s):
++ * - AF82MP20 PCH
++ *
++ * Copyright (C) 2009-2010 Intel Corporation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of version 2 of the GNU General
++ * Public License as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be
++ * useful, but WITHOUT ANY WARRANTY; without even the implied
++ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
++ * PURPOSE. See the GNU General Public License for more details.
++ * You should have received a copy of the GNU General Public
++ * License along with this program; if not, write to the Free
++ * Software Foundation, Inc., 59 Temple Place - Suite 330,
++ * Boston, MA 02111-1307, USA.
++ * The full GNU General Public License is included in this
++ * distribution in the file called COPYING.
++ *
++ */
++
++#define DEBUG 1
++
++#include <linux/compiler.h>
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/moduleparam.h>
++#include <linux/types.h>
++#include <linux/miscdevice.h>
++#include <linux/watchdog.h>
++#include <linux/fs.h>
++#include <linux/notifier.h>
++#include <linux/reboot.h>
++#include <linux/init.h>
++#include <linux/jiffies.h>
++#include <linux/uaccess.h>
++#include <linux/slab.h>
++#include <linux/io.h>
++#include <linux/interrupt.h>
++#include <linux/delay.h>
++#include <linux/sched.h>
++#include <linux/signal.h>
++#include <linux/sfi.h>
++#include <linux/types.h>
++#include <asm/irq.h>
++#include <asm/atomic.h>
++
++/* See arch/x86/kernel/ipc_mrst.c */
++#include <asm/intel_scu_ipc.h>
++#include <asm/apb_timer.h>
++
++#include "intel_scu_watchdog.h"
++
++/* Bounds number of times we will retry loading time count */
++/* This retry is a work around for a silicon bug. */
++#define MAX_RETRY 16
++
++#define IPC_SET_WATCHDOG_TIMER 0xF8
++
++static DECLARE_WAIT_QUEUE_HEAD(read_wq);
++
++/* The read function (intel_scu_read) waits for the warning_flag to */
++/* be set by the watchdog interrupt handler. */
++/* When warning_flag is set intel_scu_read wakes up the user level */
++/* process, which is responsible for refreshing the watchdog timer */
++static int warning_flag;
++
++static int timer_margin = DEFAULT_SOFT_TO_HARD_MARGIN;
++module_param(timer_margin, int, 0);
++MODULE_PARM_DESC(timer_margin,
++ "Watchdog timer margin"
++ "Time between interrupt and resetting the system"
++ "The range is from 1 to 160"
++ "This is the time for all keep alives to arrive");
++
++static int timer_set = DEFAULT_TIME;
++module_param(timer_set, int, 0);
++MODULE_PARM_DESC(timer_set,
++ "Default Watchdog timer setting"
++ "Complete cycle time"
++ "The range is from 1 to 170"
++ "This is the time for all keep alives to arrive");
++
++/* After watchdog device is closed, check force_boot. If:
++ * force_boot == 0, then force boot on next watchdog interrupt after close,
++ * force_boot == 1, then force boot immediately when device is closed.
++ */
++static int force_boot;
++module_param(force_boot, int, 0);
++MODULE_PARM_DESC(force_boot,
++ "A value of 1 means that the driver will reboot"
++ "the system immediately if the /dev/watchdog device is closed"
++ "A value of 0 means that when /dev/watchdog device is closed"
++ "the watchdog timer will be refreshed for one more interval"
++ "of length: timer_set. At the end of this interval, the"
++ "watchdog timer will reset the system."
++ );
++
++/* there is only one device in the system now; this can be made into
++ * an array in the future if we have more than one device */
++
++static struct intel_scu_watchdog_dev watchdog_device;
++
++/* Forces restart, if force_reboot is set */
++static void watchdog_fire(void)
++{
++ if (force_boot) {
++ printk(KERN_CRIT PFX "Initiating system reboot.\n");
++ emergency_restart();
++ printk(KERN_CRIT PFX "Reboot didn't ?????\n");
++ }
++
++ else {
++ printk(KERN_CRIT PFX "Immediate Reboot Disabled\n");
++ printk(KERN_CRIT PFX
++ "System will reset when watchdog timer times out!\n");
++ }
++}
++
++static int check_timer_margin(int new_margin)
++{
++ if ((new_margin < MIN_TIME_CYCLE) ||
++ (new_margin > MAX_TIME - timer_set)) {
++ pr_debug("Watchdog timer: Value of new_margin %d is "
++ "out of the range %d to %d\n",
++ new_margin, MIN_TIME_CYCLE, MAX_TIME - timer_set);
++ return -EINVAL;
++ }
++ return 0;
++}
++
++/*
++ * IPC operations
++ */
++static int watchdog_set_ipc(int soft_threshold, int threshold)
++{
++ u32 *ipc_wbuf;
++ u8 cbuf[16] = { '\0' };
++ int ipc_ret = 0;
++
++ ipc_wbuf = (u32 *)&cbuf;
++ ipc_wbuf[0] = soft_threshold;
++ ipc_wbuf[1] = threshold;
++
++ ipc_ret = intel_scu_ipc_command(
++ IPC_SET_WATCHDOG_TIMER,
++ 0,
++ ipc_wbuf,
++ 2,
++ NULL,
++ 0);
++
++ if (ipc_ret != 0)
++ printk(KERN_CRIT PFX "Error Setting SCU Watchdog Timer: %x\n",
++ ipc_ret);
++
++ return ipc_ret;
++};
++
++/*
++ * Intel_SCU operations
++ */
++
++/* timer interrupt handler */
++static irqreturn_t watchdog_timer_interrupt(int irq, void *dev_id)
++{
++ int int_status;
++ int_status = ioread32(watchdog_device.timer_interrupt_status_addr);
++
++ pr_debug("Watchdog timer: irq, int_status: %x\n", int_status);
++
++ if (int_status != 0)
++ return IRQ_NONE;
++
++ /* has the timer been started? If not, then this is spurious */
++ if (watchdog_device.timer_started == 0) {
++ pr_debug("Watchdog timer: spurious interrupt received\n");
++ return IRQ_HANDLED;
++ }
++
++ /* wake up read to send data to user (reminder for keep alive */
++ warning_flag = 1;
++
++ /* temporarily disable the timer */
++ iowrite32(0x00000002, watchdog_device.timer_control_addr);
++
++ /* set the timer to the threshold */
++ iowrite32(watchdog_device.threshold,
++ watchdog_device.timer_load_count_addr);
++
++ /* allow the timer to run */
++ iowrite32(0x00000003, watchdog_device.timer_control_addr);
++
++ wake_up_interruptible(&read_wq);
++
++ return IRQ_HANDLED;
++}
++
++static int intel_scu_keepalive(void)
++{
++
++ /* read eoi register - clears interrupt */
++ ioread32(watchdog_device.timer_clear_interrupt_addr);
++
++ /* temporarily disable the timer */
++ iowrite32(0x00000002, watchdog_device.timer_control_addr);
++
++ /* set the timer to the soft_threshold */
++ iowrite32(watchdog_device.soft_threshold,
++ watchdog_device.timer_load_count_addr);
++
++ /* allow the timer to run */
++ iowrite32(0x00000003, watchdog_device.timer_control_addr);
++
++ return 0;
++}
++
++static int intel_scu_stop(void)
++{
++ iowrite32(0, watchdog_device.timer_control_addr);
++ return 0;
++}
++
++static int intel_scu_set_heartbeat(u32 t)
++{
++ int ipc_ret;
++ int retry_count;
++ u32 soft_value;
++ u32 hw_pre_value;
++ u32 hw_value;
++
++ watchdog_device.timer_set = t;
++ watchdog_device.threshold =
++ timer_margin * watchdog_device.timer_tbl_ptr->freq_hz;
++ watchdog_device.soft_threshold =
++ (watchdog_device.timer_set - timer_margin)
++ * watchdog_device.timer_tbl_ptr->freq_hz;
++
++ pr_debug("Watchdog timer: set_heartbeat: timer freq is %d\n",
++ watchdog_device.timer_tbl_ptr->freq_hz);
++ pr_debug("Watchdog timer: set_heartbeat: timer_set is %x (hex)\n",
++ watchdog_device.timer_set);
++ pr_debug("Watchdog timer: set_hearbeat: timer_margin is %x (hex)\n",
++ timer_margin);
++ pr_debug("Watchdog timer: set_heartbeat: threshold is %x (hex)\n",
++ watchdog_device.threshold);
++ pr_debug("Watchdog timer: set_heartbeat: soft_threshold is %x (hex)\n",
++ watchdog_device.soft_threshold);
++
++ /* Adjust thresholds by FREQ_ADJUSTMENT factor, to make the */
++ /* watchdog timing come out right. */
++ watchdog_device.threshold =
++ watchdog_device.threshold / FREQ_ADJUSTMENT;
++ watchdog_device.soft_threshold =
++ watchdog_device.soft_threshold / FREQ_ADJUSTMENT;
++
++ /* temporarily disable the timer */
++ iowrite32(0x00000002, watchdog_device.timer_control_addr);
++
++ /* send the threshold and soft_threshold via IPC to the processor */
++ ipc_ret = watchdog_set_ipc(watchdog_device.soft_threshold,
++ watchdog_device.threshold);
++
++ if (ipc_ret != 0) {
++ /* Make sure the watchdog timer is stopped */
++ intel_scu_stop();
++ return ipc_ret;
++ }
++
++ /* Soft Threshold set loop. Early versions of silicon did */
++ /* not always set this count correctly. This loop checks */
++ /* the value and retries if it was not set correctly. */
++
++ retry_count = 0;
++ soft_value = watchdog_device.soft_threshold & 0xFFFF0000;
++ do {
++
++ /* Make sure timer is stopped */
++ intel_scu_stop();
++
++ if (MAX_RETRY < retry_count++) {
++ /* Unable to set timer value */
++ pr_err("Watchdog timer: Unable to set timer\n");
++ return -ENODEV;
++ }
++
++ /* set the timer to the soft threshold */
++ iowrite32(watchdog_device.soft_threshold,
++ watchdog_device.timer_load_count_addr);
++
++ /* read count value before starting timer */
++ hw_pre_value = ioread32(watchdog_device.timer_load_count_addr);
++ hw_pre_value = hw_pre_value & 0xFFFF0000;
++
++ /* Start the timer */
++ iowrite32(0x00000003, watchdog_device.timer_control_addr);
++
++ /* read the value the time loaded into its count reg */
++ hw_value = ioread32(watchdog_device.timer_load_count_addr);
++ hw_value = hw_value & 0xFFFF0000;
++
++
++ } while (soft_value != hw_value);
++
++ watchdog_device.timer_started = 1;
++
++ return 0;
++}
++
++/*
++ * /dev/watchdog handling
++ */
++
++static int intel_scu_open(struct inode *inode, struct file *file)
++{
++
++ /* Set flag to indicate that watchdog device is open */
++ if (test_and_set_bit(0, &watchdog_device.driver_open))
++ return -EBUSY;
++
++ /* Check for reopen of driver. Reopens are not allowed */
++ if (watchdog_device.driver_closed)
++ return -EPERM;
++
++ return nonseekable_open(inode, file);
++}
++
++static int intel_scu_release(struct inode *inode, struct file *file)
++{
++ /*
++ * This watchdog should not be closed, after the timer
++ * is started with the WDIPC_SETTIMEOUT ioctl
++ * If force_boot is set watchdog_fire() will cause an
++ * immediate reset. If force_boot is not set, the watchdog
++ * timer is refreshed for one more interval. At the end
++ * of that interval, the watchdog timer will reset the system.
++ */
++
++ if (!test_and_clear_bit(0, &watchdog_device.driver_open)) {
++ pr_debug("Watchdog timer: intel_scu_release, without open\n");
++ return -ENOTTY;
++ }
++
++ if (!watchdog_device.timer_started) {
++ /* Just close, since timer has not been started */
++ pr_debug("Watchdog timer: Closed, without starting timer\n");
++ return 0;
++ }
++
++ printk(KERN_CRIT PFX
++ "Unexpected close of /dev/watchdog!\n");
++
++ /* Since the timer was started, prevent future reopens */
++ watchdog_device.driver_closed = 1;
++
++ /* Refresh the timer for one more interval */
++ intel_scu_keepalive();
++
++ /* Reboot system (if force_boot is set) */
++ watchdog_fire();
++
++ /* We should only reach this point if force_boot is not set */
++ return 0;
++}
++
++static ssize_t intel_scu_write(struct file *file,
++ char const *data,
++ size_t len,
++ loff_t *ppos)
++{
++
++ if (watchdog_device.timer_started)
++ /* Watchdog already started, keep it alive */
++ intel_scu_keepalive();
++ else
++ /* Start watchdog with timer value set by init */
++ intel_scu_set_heartbeat(watchdog_device.timer_set);
++
++ return len;
++}
++
++static ssize_t intel_scu_read(struct file *file,
++ char __user *user_data,
++ size_t len,
++ loff_t *user_ppos)
++{
++ int result;
++ u8 buf = 0;
++
++ /* we wait for the next interrupt; if more than one */
++ /* interrupt has occurred since the last read, we */
++ /* dont care. The data is not critical. We will do */
++ /* a copy to user each time we get and interrupt */
++ /* It is up to the Watchdog daemon to be ready to */
++ /* do the read (which signifies that the driver is */
++ /* awaiting a keep alive and that a limited time */
++ /* is available for the keep alive before the system */
++ /* is rebooted by the timer */
++ if (wait_event_interruptible(read_wq, warning_flag != 0))
++ return -ERESTARTSYS;
++
++ warning_flag = 0;
++
++ /* Please note that the content of the data is irrelevent */
++ /* All that matters is that the read is available to the user */
++ result = copy_to_user(user_data, (void *)&buf, 1);
++
++ if (result != 0)
++ return -EFAULT;
++ else
++ return 1;
++
++}
++
++static long intel_scu_ioctl(struct file *file,
++ unsigned int cmd,
++ unsigned long arg)
++{
++ void __user *argp = (void __user *)arg;
++ u32 __user *p = argp;
++ u32 new_margin;
++
++
++ static const struct watchdog_info ident = {
++ .options = WDIOF_SETTIMEOUT
++ | WDIOF_KEEPALIVEPING,
++ .firmware_version = 0, /* @todo Get from SCU via
++ ipc_get_scu_fw_version()? */
++ .identity = "Intel_SCU IOH Watchdog" /* len < 32 */
++ };
++
++ switch (cmd) {
++ case WDIOC_GETSUPPORT:
++ return copy_to_user(argp,
++ &ident,
++ sizeof(ident)) ? -EFAULT : 0;
++ case WDIOC_GETSTATUS:
++ case WDIOC_GETBOOTSTATUS:
++ return put_user(0, p);
++ case WDIOC_KEEPALIVE:
++ intel_scu_keepalive();
++
++ return 0;
++ case WDIOC_SETTIMEOUT:
++ if (get_user(new_margin, p))
++ return -EFAULT;
++
++ if (check_timer_margin(new_margin))
++ return -EINVAL;
++
++ if (intel_scu_set_heartbeat(new_margin))
++ return -EINVAL;
++ return 0;
++ case WDIOC_GETTIMEOUT:
++ return put_user(watchdog_device.soft_threshold, p);
++
++ default:
++ return -ENOTTY;
++ }
++}
++
++/*
++ * Notifier for system down
++ */
++static int intel_scu_notify_sys(struct notifier_block *this,
++ unsigned long code,
++ void *another_unused)
++{
++ if (code == SYS_DOWN || code == SYS_HALT)
++ /* Turn off the watchdog timer. */
++ intel_scu_stop();
++ return NOTIFY_DONE;
++}
++
++/*
++ * Kernel Interfaces
++ */
++static const struct file_operations intel_scu_fops = {
++ .owner = THIS_MODULE,
++ .llseek = no_llseek,
++ .write = intel_scu_write,
++ .read = intel_scu_read,
++ .unlocked_ioctl = intel_scu_ioctl,
++ .open = intel_scu_open,
++ .release = intel_scu_release,
++};
++
++static int __init intel_scu_watchdog_init(void)
++{
++ int ret;
++ u32 __iomem *tmp_addr;
++
++
++ /* Check boot parameters to verify that their initial values */
++ /* are in range. */
++ /* Check value of timer_set boot parameter */
++ if ((timer_set < MIN_TIME_CYCLE) ||
++ (timer_set > MAX_TIME - MIN_TIME_CYCLE)) {
++ pr_err("Watchdog timer: Value of timer_set %x (hex) "
++ "is out of range from %x to %x (hex)\n",
++ timer_set, MIN_TIME_CYCLE, MAX_TIME - MIN_TIME_CYCLE);
++ return -EINVAL;
++ }
++
++ /* Check value of timer_margin boot parameter */
++ if (check_timer_margin(timer_margin))
++ return -EINVAL;
++
++ watchdog_device.timer_tbl_ptr = sfi_get_mtmr(sfi_mtimer_num-1);
++
++ if (watchdog_device.timer_tbl_ptr == NULL) {
++ pr_debug("Watchdog timer - Intel SCU watchdog: Timer is"
++ " not available\n");
++ return -ENODEV;
++ }
++ /* make sure the timer exists */
++ if (watchdog_device.timer_tbl_ptr->phys_addr == 0) {
++ pr_debug("Watchdog timer - Intel SCU watchdog - timer %d does"
++ " not have valid physical memory\n", sfi_mtimer_num);
++ return -ENODEV;
++ }
++
++ if (watchdog_device.timer_tbl_ptr->irq == 0) {
++ pr_debug("Watchdog timer: timer %d invalid irq\n",
++ sfi_mtimer_num);
++ return -ENODEV;
++ }
++
++ tmp_addr = ioremap_nocache(watchdog_device.timer_tbl_ptr->phys_addr,
++ 20);
++
++ if (tmp_addr == NULL) {
++ pr_debug("Watchdog timer: timer unable to ioremap\n");
++ return -ENOMEM;
++ }
++
++ watchdog_device.timer_load_count_addr = tmp_addr++;
++ watchdog_device.timer_current_value_addr = tmp_addr++;
++ watchdog_device.timer_control_addr = tmp_addr++;
++ watchdog_device.timer_clear_interrupt_addr = tmp_addr++;
++ watchdog_device.timer_interrupt_status_addr = tmp_addr++;
++
++ /* Set the default time values in device structure */
++
++ watchdog_device.timer_set = timer_set;
++ watchdog_device.threshold =
++ timer_margin * watchdog_device.timer_tbl_ptr->freq_hz;
++ watchdog_device.soft_threshold =
++ (watchdog_device.timer_set - timer_margin)
++ * watchdog_device.timer_tbl_ptr->freq_hz;
++
++
++ watchdog_device.intel_scu_notifier.notifier_call =
++ intel_scu_notify_sys;
++
++ ret = register_reboot_notifier(&watchdog_device.intel_scu_notifier);
++ if (ret) {
++ printk(KERN_ERR PFX
++ "Watchdog timer: cannot register notifier %d)\n", ret);
++ goto register_reboot_error;
++ }
++
++ watchdog_device.miscdev.minor = WATCHDOG_MINOR;
++ watchdog_device.miscdev.name = "watchdog";
++ watchdog_device.miscdev.fops = &intel_scu_fops;
++
++ ret = misc_register(&watchdog_device.miscdev);
++ if (ret) {
++ printk(KERN_ERR PFX
++ "Watchdog timer: cannot register miscdev %d err =%d\n",
++ WATCHDOG_MINOR,
++ ret);
++ goto misc_register_error;
++ }
++
++ ret = request_irq((unsigned int)watchdog_device.timer_tbl_ptr->irq,
++ watchdog_timer_interrupt,
++ IRQF_SHARED, "watchdog",
++ &watchdog_device.timer_load_count_addr);
++ if (ret) {
++ printk(KERN_ERR "Watchdog timer: error requesting irq\n");
++ printk(KERN_ERR "Watchdog timer: error value returned is %d\n",
++ ret);
++ goto request_irq_error;
++ }
++
++ /* Make sure timer is disabled before returning */
++ intel_scu_stop();
++
++ return 0;
++
++/* error cleanup */
++
++request_irq_error:
++
++ misc_deregister(&watchdog_device.miscdev);
++
++misc_register_error:
++
++ pr_debug("Watchdog timer: misc_register_error\n");
++ unregister_reboot_notifier(&watchdog_device.intel_scu_notifier);
++
++register_reboot_error:
++
++ intel_scu_stop();
++
++ iounmap(watchdog_device.timer_load_count_addr);
++
++ return ret;
++}
++
++static void __exit intel_scu_watchdog_exit(void)
++{
++
++ misc_deregister(&watchdog_device.miscdev);
++ unregister_reboot_notifier(&watchdog_device.intel_scu_notifier);
++ /* disable the timer */
++ iowrite32(0x00000002, watchdog_device.timer_control_addr);
++ iounmap(watchdog_device.timer_load_count_addr);
++}
++
++late_initcall(intel_scu_watchdog_init);
++module_exit(intel_scu_watchdog_exit);
++
++MODULE_AUTHOR("Intel Corporation");
++MODULE_DESCRIPTION("Intel SCU Watchdog Device Driver");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
++MODULE_VERSION(WDT_VER);
++
+--- /dev/null
++++ b/drivers/watchdog/intel_scu_watchdog.h
+@@ -0,0 +1,66 @@
++/*
++ * Intel_SCU 0.2: An Intel SCU IOH Based Watchdog Device
++ * for Intel part #(s):
++ * - AF82MP20 PCH
++ *
++ * Copyright (C) 2009-2010 Intel Corporation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of version 2 of the GNU General
++ * Public License as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be
++ * useful, but WITHOUT ANY WARRANTY; without even the implied
++ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
++ * PURPOSE. See the GNU General Public License for more details.
++ * You should have received a copy of the GNU General Public
++ * License along with this program; if not, write to the Free
++ * Software Foundation, Inc., 59 Temple Place - Suite 330,
++ * Boston, MA 02111-1307, USA.
++ * The full GNU General Public License is included in this
++ * distribution in the file called COPYING.
++ *
++ */
++
++#ifndef __INTEL_SCU_WATCHDOG_H
++#define __INTEL_SCU_WATCHDOG_H
++
++#define PFX "Intel_SCU: "
++#define WDT_VER "0.3"
++
++/* minimum time between interrupts */
++#define MIN_TIME_CYCLE 1
++
++/* Time from warning to reboot is 2 seconds */
++#define DEFAULT_SOFT_TO_HARD_MARGIN 2
++
++#define MAX_TIME 170
++
++#define DEFAULT_TIME 5
++
++#define MAX_SOFT_TO_HARD_MARGIN (MAX_TIME-MIN_TIME_CYCLE)
++
++/* Ajustment to clock tick frequency to make timing come out right */
++#define FREQ_ADJUSTMENT 8
++
++struct intel_scu_watchdog_dev {
++ ulong driver_open;
++ ulong driver_closed;
++ u32 timer_started;
++ u32 timer_set;
++ u32 threshold;
++ u32 soft_threshold;
++ u32 __iomem *timer_load_count_addr;
++ u32 __iomem *timer_current_value_addr;
++ u32 __iomem *timer_control_addr;
++ u32 __iomem *timer_clear_interrupt_addr;
++ u32 __iomem *timer_interrupt_status_addr;
++ struct sfi_timer_table_entry *timer_tbl_ptr;
++ struct notifier_block intel_scu_notifier;
++ struct miscdevice miscdev;
++};
++
++extern int sfi_mtimer_num;
++
++/* extern struct sfi_timer_table_entry *sfi_get_mtmr(int hint); */
++#endif /* __INTEL_SCU_WATCHDOG_H */
+--- a/include/drm/drm_mode.h
++++ b/include/drm/drm_mode.h
+@@ -124,6 +124,7 @@
+ #define DRM_MODE_ENCODER_TMDS 2
+ #define DRM_MODE_ENCODER_LVDS 3
+ #define DRM_MODE_ENCODER_TVDAC 4
++#define DRM_MODE_ENCODER_MIPI 5
+
+ struct drm_mode_get_encoder {
+ __u32 encoder_id;
+@@ -161,6 +162,7 @@
+ #define DRM_MODE_CONNECTOR_HDMIB 12
+ #define DRM_MODE_CONNECTOR_TV 13
+ #define DRM_MODE_CONNECTOR_eDP 14
++#define DRM_MODE_CONNECTOR_MIPI 15
+
+ struct drm_mode_get_connector {
+
+--- /dev/null
++++ b/include/linux/bh1770glc.h
+@@ -0,0 +1,39 @@
++#ifndef __BH1770GLC_H__
++#define __BH1770GLC_H__
++
++struct bh1770glc_platform_data {
++/* IR-Led configuration for proximity sensing */
++#define BH1770GLC_LED1 0x00
++#define BH1770GLC_LED12 0x01
++#define BH1770GLC_LED13 0x02
++#define BH1770GLC_LED123 0x03
++
++ __u8 leds;
++/* led_max_curr is a safetylimit for IR leds */
++#define BH1770GLC_LED_5mA 0
++#define BH1770GLC_LED_10mA 1
++#define BH1770GLC_LED_20mA 2
++#define BH1770GLC_LED_50mA 3
++#define BH1770GLC_LED_100mA 4
++#define BH1770GLC_LED_150mA 5
++#define BH1770GLC_LED_200mA 6
++ __u8 led_max_curr;
++ __u8 led_def_curr[3];
++
++ int (*setup_resources)(void);
++ int (*release_resources)(void);
++};
++
++/* Device name: /dev/bh1770glc_ps */
++struct bh1770glc_ps {
++ __u8 led1;
++ __u8 led2;
++ __u8 led3;
++} __attribute__((packed));
++
++/* Device name: /dev/bh1770glc_als */
++struct bh1770glc_als {
++ __u16 lux;
++} __attribute__((packed));
++
++#endif
+--- /dev/null
++++ b/include/linux/cy8ctmg110_pdata.h
+@@ -0,0 +1,10 @@
++#ifndef _LINUX_CY8CTMG110_PDATA_H
++#define _LINUX_CY8CTMG110_PDATA_H
++
++struct cy8ctmg110_pdata
++{
++ int reset_pin; /* Reset pin is wired to this GPIO (optional) */
++ int irq_pin; /* IRQ pin is wired to this GPIO */
++};
++
++#endif
+--- /dev/null
++++ b/include/linux/i2c/cp_tm1217.h
+@@ -0,0 +1,8 @@
++#ifndef __LINUX_I2C_CP_TM1217_H
++#define __LINUX_I2C_CP_TM1217_H
++
++struct cp_tm1217_platform_data {
++ int gpio; /* If not set uses the IRQ resource 0 */
++};
++
++#endif
+--- /dev/null
++++ b/include/linux/i2c/tc35894xbg.h
+@@ -0,0 +1,72 @@
++/*
++ * tc35894xbg.h - Configuration for TC35894XBG keypad driver.
++ *
++ * (C) Copyright 2010 Intel Corporation
++ * Author: Charlie Paul (z8cpaul@windriver.com)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; version 2
++ * of the License.
++ */
++
++#ifndef __LINUX_TC35894XBG_H
++#define __LINUX_TC35894XBG_H
++
++#include <linux/types.h>
++
++/*
++ * Largest keycode that the chip can send, plus one,
++ * so keys can be mapped directly at the index of the
++ * TC35894XBG keycode instead of subtracting one.
++ */
++#define TC35894XBG_KEYMAP_SIZE (0x7f + 1)
++
++#define SHIFT_NEEDED (0x1000)
++
++#define KEY_EXCLAM (KEY_1 + SHIFT_NEEDED) /* '!' -> shift+1 */
++#define KEY_AT (KEY_2 + SHIFT_NEEDED) /* '@' -> shift+2 */
++#define KEY_NUMBER_SIGN (KEY_3 + SHIFT_NEEDED) /* '#' -> shift+3 */
++#define KEY_DOLLAR_SIGN (KEY_4 + SHIFT_NEEDED) /* '$' -> shift+4 */
++#define KEY_NOR (KEY_6 + SHIFT_NEEDED) /* '^' -> shift+6 */
++#define KEY_PERCENT (KEY_5 + SHIFT_NEEDED) /* '%' -> shift+5 */
++#define KEY_AMPERSAND (KEY_7 + SHIFT_NEEDED) /* '&' -> shift+7 */
++#define KEY_PLUS (KEY_EQUAL + SHIFT_NEEDED) /* '+' -> shift+= */
++
++#define KEY_BAR (KEY_BACKSLASH + SHIFT_NEEDED) /* '|' -> shift+\ */
++#define KEY_COLON (KEY_SEMICOLON + SHIFT_NEEDED) /* ':' -> shift+; */
++#define KEY_UNDERSCORE (KEY_MINUS + SHIFT_NEEDED) /* '_' -> shift+- */
++#define KEY_QUOTE_DBL (KEY_APOSTROPHE + SHIFT_NEEDED) /* '"' -> shift+' */
++
++
++#define TC_MAX_KEYMAPS (2)
++#define TC_DEFAULT_KEYMAP (0)
++#define TC_ALT_KEYMAP (1)
++#define TC35894XBG_MAX_FIFO (8)
++
++
++struct tc35894xbg_platform_data {
++
++ unsigned char debounce_time; /* Time to watch for bouncing, in ms. */
++ unsigned char settle_time; /* Idle time until sleep, in ms. */
++ unsigned char col_setting; /* Sets up ball settings in reg 0x04 */
++ unsigned char rowcol_setting; /* Sets up ball settings in reg 0x05 */
++
++ int gpio_reset; /* reset output GPIO index (-1 if not implemented) */
++ int gpio_irq; /* interrupt GPIO */
++ int keymap_size;
++ int size_x;
++ int size_y;
++ int function_key;
++ int right_shift_key;
++
++ void (*reset_ctrl)(struct i2c_client *client, int value);
++
++ int n_keymaps;
++ unsigned short keymap[TC_MAX_KEYMAPS][TC35894XBG_KEYMAP_SIZE];
++
++ /* Device name. */
++ const char *name;
++};
++
++#endif /* __LINUX_TC35894XBG_H */
+--- /dev/null
++++ b/include/linux/intel_mid_dma.h
+@@ -0,0 +1,86 @@
++/*
++ * intel_mid_dma.h - Intel MID DMA Drivers
++ *
++ * Copyright (C) 2008-10 Intel Corp
++ * Author: Vinod Koul <vinod.koul@intel.com>
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ *
++ */
++#ifndef __INTEL_MID_DMA_H__
++#define __INTEL_MID_DMA_H__
++
++#include <linux/dmaengine.h>
++
++/*DMA transaction width, src and dstn width would be same
++The DMA length must be width aligned,
++for 32 bit width the length must be 32 bit (4bytes) aligned only*/
++enum intel_mid_dma_width {
++ LNW_DMA_WIDTH_8BIT = 0x0,
++ LNW_DMA_WIDTH_16BIT = 0x1,
++ LNW_DMA_WIDTH_32BIT = 0x2,
++};
++
++/*DMA mode configurations*/
++enum intel_mid_dma_mode {
++ LNW_DMA_PER_TO_MEM = 0, /*periphral to memory configuration*/
++ LNW_DMA_MEM_TO_PER, /*memory to periphral configuration*/
++ LNW_DMA_MEM_TO_MEM, /*mem to mem confg (testing only)*/
++};
++
++/*DMA handshaking*/
++enum intel_mid_dma_hs_mode {
++ LNW_DMA_HW_HS = 0, /*HW Handshaking only*/
++ LNW_DMA_SW_HS = 1, /*SW Handshaking not recommended*/
++};
++
++/*Burst size configuration*/
++enum intel_mid_dma_msize {
++ LNW_DMA_MSIZE_1 = 0x0,
++ LNW_DMA_MSIZE_4 = 0x1,
++ LNW_DMA_MSIZE_8 = 0x2,
++ LNW_DMA_MSIZE_16 = 0x3,
++ LNW_DMA_MSIZE_32 = 0x4,
++ LNW_DMA_MSIZE_64 = 0x5,
++};
++
++/**
++ * struct intel_mid_dma_slave - DMA slave structure
++ *
++ * @dirn: DMA trf direction
++ * @src_width: tx register width
++ * @dst_width: rx register width
++ * @hs_mode: HW/SW handshaking mode
++ * @cfg_mode: DMA data transfer mode (per-per/mem-per/mem-mem)
++ * @src_msize: Source DMA burst size
++ * @dst_msize: Dst DMA burst size
++ * @device_instance: DMA peripheral device instance, we can have multiple
++ * peripheral device connected to single DMAC
++ */
++struct intel_mid_dma_slave {
++ enum dma_data_direction dirn;
++ enum intel_mid_dma_width src_width; /*width of DMA src txn*/
++ enum intel_mid_dma_width dst_width; /*width of DMA dst txn*/
++ enum intel_mid_dma_hs_mode hs_mode; /*handshaking*/
++ enum intel_mid_dma_mode cfg_mode; /*mode configuration*/
++ enum intel_mid_dma_msize src_msize; /*size if src burst*/
++ enum intel_mid_dma_msize dst_msize; /*size of dst burst*/
++ unsigned int device_instance; /*0, 1 for periphral instance*/
++};
++
++#endif /*__INTEL_MID_DMA_H__*/
+--- /dev/null
++++ b/include/linux/koski_hwid.h
+@@ -0,0 +1,25 @@
++#ifndef _KOSKI_HWID_H_
++#define _KOSKI_HWID_H_
++
++typedef struct {
++ char gpio_value;
++ char *name;
++ char id;
++} koski_id;
++
++enum {
++ KOSKI_EV2 = 0,
++ KOSKI_DV1,
++ KOSKI_DV2,
++};
++
++enum {
++ PRODUCT_KOSKI,
++ PRODUCT_SC,
++ PRODUCT_KOSKI_E
++};
++
++extern int get_koski_product_id(void);
++extern int get_koski_build_id(void);
++
++#endif
+--- /dev/null
++++ b/include/linux/leds-lp5523.h
+@@ -0,0 +1,22 @@
++#ifndef __LINUX_LP5523_H
++#define __LINUX_LP5523_H
++
++struct lp5523_led_config {
++ u8 chan_nr;
++ u8 led_current; /* mA x10, 0 if led is not connected */
++};
++
++#define LP5523_CLOCK_AUTO 0
++#define LP5523_CLOCK_INT 1
++#define LP5523_CLOCK_EXT 2
++
++struct lp5523_platform_data {
++ struct lp5523_led_config *led_config;
++ u8 num_channels;
++ u8 clock_mode;
++ int (*setup_resources)(void);
++ void (*release_resources)(void);
++ void (*enable)(bool state);
++};
++
++#endif /* __LINUX_LP5523_H */
+--- a/include/linux/mmc/host.h
++++ b/include/linux/mmc/host.h
+@@ -111,6 +111,10 @@
+
+ /* optional callback for HC quirks */
+ void (*init_card)(struct mmc_host *host, struct mmc_card *card);
++
++ /* optional callback for HC mutex (Dekker algorithm) */
++ int (*acquire_ownership)(struct mmc_host *host);
++ void (*release_ownership)(struct mmc_host *host);
+ };
+
+ struct mmc_card;
+@@ -211,6 +215,8 @@
+
+ struct dentry *debugfs_root;
+
++ struct mutex *port_mutex;
++
+ unsigned long private[0] ____cacheline_aligned;
+ };
+
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -270,6 +270,8 @@
+ unsigned int d1_support:1; /* Low power state D1 is supported */
+ unsigned int d2_support:1; /* Low power state D2 is supported */
+ unsigned int no_d1d2:1; /* Only allow D0 and D3 */
++ unsigned int mmio_always_on:1; /* disallow turning off io/mem
++ decoding during bar sizing */
+ unsigned int wakeup_prepared:1;
+ unsigned int d3_delay; /* D3->D0 transition time in ms */
+
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -2399,6 +2399,13 @@
+ #define PCI_DEVICE_ID_INTEL_82375 0x0482
+ #define PCI_DEVICE_ID_INTEL_82424 0x0483
+ #define PCI_DEVICE_ID_INTEL_82378 0x0484
++#define PCI_DEVICE_ID_INTEL_MRST_SD0 0x0807
++#define PCI_DEVICE_ID_INTEL_MRST_SD1 0x0808
++#define PCI_DEVICE_ID_INTEL_MFD_SD 0x0820
++#define PCI_DEVICE_ID_INTEL_MFD_SDIO1 0x0821
++#define PCI_DEVICE_ID_INTEL_MFD_SDIO2 0x0822
++#define PCI_DEVICE_ID_INTEL_MFD_EMMC0 0x0823
++#define PCI_DEVICE_ID_INTEL_MFD_EMMC1 0x0824
+ #define PCI_DEVICE_ID_INTEL_I960 0x0960
+ #define PCI_DEVICE_ID_INTEL_I960RM 0x0962
+ #define PCI_DEVICE_ID_INTEL_8257X_SOL 0x1062
+--- /dev/null
++++ b/include/linux/pti.h
+@@ -0,0 +1,38 @@
++/*
++ * Copyright (C) Intel 2010
++ * Ken Mills <ken.k.mills@intel.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
++ * USA
++ *
++ */
++
++#ifndef PTI_H_
++#define PTI_H_
++
++
++struct masterchannel {
++ u8 master;
++ u8 channel;
++};
++
++
++void mipi_pti_writedata(struct masterchannel *mc, u8 *cp, int count);
++struct masterchannel *mipi_request_masterchannel(u8 kerneluser);
++void mipi_release_masterchannel(struct masterchannel *mc);
++
++#define APERTURE_14 0x3800000
++#define APERTURE_LEN 0x400000
++
++#endif /*PTI_H_*/
+--- a/include/linux/serial_core.h
++++ b/include/linux/serial_core.h
+@@ -186,6 +186,13 @@
+ #define PORT_ALTERA_JTAGUART 91
+ #define PORT_ALTERA_UART 92
+
++/* High Speed UART for Medfield */
++#define PORT_MFD 93
++
++/* MAX3107 */
++#define PORT_MAX3107 94
++
++
+ #ifdef __KERNEL__
+
+ #include <linux/compiler.h>
+--- /dev/null
++++ b/include/linux/serial_mfd.h
+@@ -0,0 +1,47 @@
++#ifndef _SERIAL_MFD_H_
++#define _SERIAL_MFD_H_
++
++/* HW register offset definition */
++#define UART_FOR 0x08
++#define UART_PS 0x0C
++#define UART_MUL 0x0D
++#define UART_DIV 0x0E
++
++#define HSU_GBL_IEN 0x0
++#define HSU_GBL_IST 0x4
++
++#define HSU_GBL_INT_BIT_PORT0 0x0
++#define HSU_GBL_INT_BIT_PORT1 0x1
++#define HSU_GBL_INT_BIT_PORT2 0x2
++#define HSU_GBL_INT_BIT_IRI 0x3
++#define HSU_GBL_INT_BIT_HDLC 0x4
++#define HSU_GBL_INT_BIT_DMA 0x5
++
++#define HSU_GBL_ISR 0x8
++#define HSU_GBL_DMASR 0x400
++#define HSU_GBL_DMAISR 0x404
++
++#define HSU_PORT_REG_OFFSET 0x80
++#define HSU_PORT0_REG_OFFSET 0x80
++#define HSU_PORT1_REG_OFFSET 0x100
++#define HSU_PORT2_REG_OFFSET 0x180
++#define HSU_PORT_REG_LENGTH 0x80
++
++#define HSU_DMA_CHANS_REG_OFFSET 0x500
++#define HSU_DMA_CHANS_REG_LENGTH 0x40
++
++#define HSU_CH_SR 0x0 /* channel status reg */
++#define HSU_CH_CR 0x4 /* control reg */
++#define HSU_CH_DCR 0x8 /* descriptor control reg */
++#define HSU_CH_BSR 0x10 /* max fifo buffer size reg */
++#define HSU_CH_MOTSR 0x14 /* minimum ocp transfer size */
++#define HSU_CH_D0SAR 0x20 /* desc 0 start addr */
++#define HSU_CH_D0TSR 0x24 /* desc 0 transfer size */
++#define HSU_CH_D1SAR 0x28
++#define HSU_CH_D1TSR 0x2C
++#define HSU_CH_D2SAR 0x30
++#define HSU_CH_D2TSR 0x34
++#define HSU_CH_D3SAR 0x38
++#define HSU_CH_D3TSR 0x3C
++
++#endif
+--- a/include/linux/serial_reg.h
++++ b/include/linux/serial_reg.h
+@@ -221,8 +221,24 @@
+ #define UART_FCR_PXAR16 0x80 /* receive FIFO threshold = 16 */
+ #define UART_FCR_PXAR32 0xc0 /* receive FIFO threshold = 32 */
+
++/*
++ * Intel MID on-chip HSU (High Speed UART) defined bits
++ */
++#define UART_FCR_HSU_64_1B 0x00 /* receive FIFO treshold = 1 */
++#define UART_FCR_HSU_64_16B 0x40 /* receive FIFO treshold = 16 */
++#define UART_FCR_HSU_64_32B 0x80 /* receive FIFO treshold = 32 */
++#define UART_FCR_HSU_64_56B 0xc0 /* receive FIFO treshold = 56 */
++
++#define UART_FCR_HSU_16_1B 0x00 /* receive FIFO treshold = 1 */
++#define UART_FCR_HSU_16_4B 0x40 /* receive FIFO treshold = 4 */
++#define UART_FCR_HSU_16_8B 0x80 /* receive FIFO treshold = 8 */
++#define UART_FCR_HSU_16_14B 0xc0 /* receive FIFO treshold = 14 */
+
++#define UART_FCR_HSU_64B_FIFO 0x20 /* chose 64 bytes FIFO */
++#define UART_FCR_HSU_16B_FIFO 0x00 /* chose 16 bytes FIFO */
+
++#define UART_FCR_HALF_EMPT_TXI 0x00 /* trigger TX_EMPT IRQ for half empty */
++#define UART_FCR_FULL_EMPT_TXI 0x08 /* trigger TX_EMPT IRQ for full empty */
+
+ /*
+ * These register definitions are for the 16C950
+--- /dev/null
++++ b/include/linux/sfi_processor.h
+@@ -0,0 +1,74 @@
++/*
++ * sfi_processor.h
++ * Copyright (c) 2010, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++#ifndef __SFI_PROCESSOR_H__
++#define __SFI_PROCESSOR_H__
++
++#include <linux/sfi.h>
++#include <linux/cpuidle.h>
++
++struct sfi_processor_power {
++ struct cpuidle_device dev;
++ u32 default_state;
++ int count;
++ struct cpuidle_state *states;
++ struct sfi_cstate_table_entry *sfi_cstates;
++};
++
++struct sfi_processor_flags {
++ u8 valid;
++ u8 power;
++};
++
++struct sfi_processor {
++ u32 id;
++ struct sfi_processor_flags flags;
++ struct sfi_processor_power power;
++ struct sfi_processor_performance *performance;
++};
++
++/* Performance management */
++struct sfi_processor_px {
++ u32 core_frequency; /* megahertz */
++ u32 transition_latency; /* microseconds */
++ u32 control; /* control value */
++ u32 status; /* success indicator */
++};
++
++struct sfi_processor_performance {
++ unsigned int state;
++ unsigned int state_count;
++ struct sfi_processor_px *states;
++ cpumask_var_t shared_cpu_map;
++ unsigned int shared_type;
++};
++
++/* for communication between multiple parts of the processor kernel module */
++DECLARE_PER_CPU(struct sfi_processor *, sfi_processors);
++
++int sfi_processor_power_init(struct sfi_processor *pr);
++int sfi_processor_power_exit(struct sfi_processor *pr);
++extern int sfi_processor_register_performance(struct sfi_processor_performance
++ *performance, unsigned int cpu);
++extern void sfi_processor_unregister_performance(struct
++ sfi_processor_performance
++ *performance,
++ unsigned int cpu);
++
++#endif /*__SFI_PROCESSOR_H__*/
+--- a/include/linux/spi/dw_spi.h
++++ b/include/linux/spi/dw_spi.h
+@@ -1,5 +1,6 @@
+ #ifndef DW_SPI_HEADER_H
+ #define DW_SPI_HEADER_H
++
+ #include <linux/io.h>
+
+ /* Bit fields in CTRLR0 */
+@@ -80,6 +81,13 @@
+ though only low 16 bits matters */
+ } __packed;
+
++struct dw_spi;
++struct dw_spi_dma_ops {
++ int (*dma_init)(struct dw_spi *dws);
++ void (*dma_exit)(struct dw_spi *dws);
++ int (*dma_transfer)(struct dw_spi *dws, int cs_change);
++};
++
+ struct dw_spi {
+ struct spi_master *master;
+ struct spi_device *cur_dev;
+@@ -141,6 +149,9 @@
+ u64 rx_param;
+ struct device *dma_dev;
+ dma_addr_t dma_addr;
++ struct dw_spi_dma_ops *dma_ops;
++ void *dma_priv; /* platform relate info */
++ struct pci_dev *dmac;
+
+ /* Bus interface info */
+ void *priv;
+@@ -214,4 +225,8 @@
+ extern void dw_spi_remove_host(struct dw_spi *dws);
+ extern int dw_spi_suspend_host(struct dw_spi *dws);
+ extern int dw_spi_resume_host(struct dw_spi *dws);
++extern void dw_spi_xfer_done(struct dw_spi *dws);
++
++/* platform related setup */
++extern int dw_spi_mid_init(struct dw_spi *dws); /* Intel MID platforms */
+ #endif /* DW_SPI_HEADER_H */
+--- /dev/null
++++ b/include/linux/spi/emp_modem.h
+@@ -0,0 +1,13 @@
++#ifndef LINUX_EMP_MODEM_H
++#define LINUX_EMP_MODEM_H
++
++struct emp_modem_platform_data {
++ unsigned short intr; /* interrupt pin number */
++ unsigned short cwr;
++ unsigned short awr;
++ unsigned short serven;
++ unsigned short resout;
++ unsigned short rst;
++};
++
++#endif
+--- /dev/null
++++ b/include/linux/spi/ifx_gps.h
+@@ -0,0 +1,9 @@
++#ifndef LINUX_IFX_GPS_H
++#define LINUX_IFX_GPS_H
++
++struct ifx_gps_platform_data {
++ unsigned short pd; /* power disable pin number ? */
++ unsigned short rst; /* reset pin number */
++};
++
++#endif
+--- /dev/null
++++ b/include/linux/spi/ifx_modem.h
+@@ -0,0 +1,13 @@
++#ifndef LINUX_IFX_MODEM_H
++#define LINUX_IFX_MODEM_H
++
++struct ifx_modem_platform_data {
++ unsigned short rst_out; /* modem reset out */
++ unsigned short pwr_on; /* power on */
++ unsigned short rst_pmu; /* reset modem */
++ unsigned short tx_pwr; /* modem power threshold */
++ unsigned short srdy; /* SRDY */
++ unsigned short mrdy; /* MRDY */
++};
++
++#endif
+--- /dev/null
++++ b/include/linux/spi/intel_mid_ssp_spi.h
+@@ -0,0 +1,51 @@
++/*
++ * Copyright (C) Intel 2009
++ * Ken Mills <ken.k.mills@intel.com>
++ * Sylvain Centelles <sylvain.centelles@intel.com>
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ */
++#ifndef INTEL_MID_SSP_SPI_H_
++#define INTEL_MID_SSP_SPI_H_
++
++/* spi_board_info.controller_data for SPI slave devices,
++ * copied to spi_device.platform_data ... mostly for dma tuning
++ */
++struct intel_mid_ssp_spi_chip {
++ u8 tx_threshold;
++ u8 rx_threshold;
++ u32 timeout;
++ u8 enable_loopback;
++ u8 dma_enabled;
++};
++
++
++#define SPI_DIB_NAME_LEN 16
++#define SPI_DIB_SPEC_INFO_LEN 10
++
++struct spi_dib_header {
++ u32 signature;
++ u32 length;
++ u8 rev;
++ u8 checksum;
++ u8 dib[0];
++} __attribute__((packed));
++
++#endif /*INTEL_MID_SSP_SPI_H_*/
+--- /dev/null
++++ b/include/linux/spi/intel_pmic_gpio.h
+@@ -0,0 +1,15 @@
++#ifndef LINUX_SPI_INTEL_PMIC_H
++#define LINUX_SPI_INTEL_PMIC_H
++
++struct intel_pmic_gpio_platform_data {
++ /* the first IRQ of the chip */
++ unsigned irq_base;
++ /* number assigned to the first GPIO */
++ unsigned gpio_base;
++ /* sram address for gpiointr register, the langwell chip will map
++ * the PMIC spi GPIO expander's GPIOINTR register in sram.
++ */
++ unsigned gpiointr;
++};
++
++#endif
+--- /dev/null
++++ b/include/linux/spi/opt_modem.h
+@@ -0,0 +1,11 @@
++#ifndef LINUX_OPT_MODEM_H
++#define LINUX_OPT_MODEM_H
++
++struct opt_modem_platform_data {
++ unsigned short intr; /* interrupt pin number */
++ unsigned short wake; /* wakeup pin number */
++ unsigned short dis; /* power on/off pin number ? */
++ unsigned short rst; /* reset pin number */
++};
++
++#endif
+--- /dev/null
++++ b/include/linux/spi/pw_spi3.h
+@@ -0,0 +1,135 @@
++/*
++ * Copyright (C) Intel 2010
++ * Ken Mills <ken.k.mills@intel.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
++ * USA
++ *
++ */
++#ifndef PW_SPI3_H_
++#define PW_SPI3_H_
++
++
++/*
++ * Penwell SSP register definitions
++ */
++
++#define SSCR0_DSS (0x0000000f) /* Data Size Select (mask) */
++#define SSCR0_DataSize(x) ((x) - 1) /* Data Size Select [4..16] */
++#define SSCR0_FRF (0x00000030) /* FRame Format (mask) */
++#define SSCR0_Motorola (0x0 << 4) /* Motorola's SPI mode */
++#define SSCR0_ECS (1 << 6) /* External clock select */
++#define SSCR0_SSE (1 << 7) /* Synchronous Serial Port Enable */
++
++
++#define SSCR0_SCR (0x000fff00) /* Serial Clock Rate (mask) */
++#define SSCR0_SerClkDiv(x) (((x) - 1) << 8) /* Divisor [1..4096] */
++#define SSCR0_EDSS (1 << 20) /* Extended data size select */
++#define SSCR0_NCS (1 << 21) /* Network clock select */
++#define SSCR0_RIM (1 << 22) /* Receive FIFO overrrun int mask */
++#define SSCR0_TUM (1 << 23) /* Transmit FIFO underrun int mask */
++#define SSCR0_FRDC (0x07000000) /* Frame rate divider control (mask) */
++#define SSCR0_SlotsPerFrm(x) (((x) - 1) << 24) /* Time slots per frame */
++#define SSCR0_ADC (1 << 30) /* Audio clock select */
++#define SSCR0_MOD (1 << 31) /* Mode (normal or network) */
++
++
++#define SSCR1_RIE (1 << 0) /* Receive FIFO Interrupt Enable */
++#define SSCR1_TIE (1 << 1) /* Transmit FIFO Interrupt Enable */
++#define SSCR1_LBM (1 << 2) /* Loop-Back Mode */
++#define SSCR1_SPO (1 << 3) /* SSPSCLK polarity setting */
++#define SSCR1_SPH (1 << 4) /* Motorola SPI SSPSCLK phase setting */
++#define SSCR1_MWDS (1 << 5) /* Microwire Transmit Data Size */
++#define SSCR1_TFT (0x000003c0) /* Transmit FIFO Threshold (mask) */
++#define SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..16] */
++#define SSCR1_RFT (0x00003c00) /* Receive FIFO Threshold (mask) */
++#define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..16] */
++
++#define SSSR_TNF (1 << 2) /* Transmit FIFO Not Full */
++#define SSSR_RNE (1 << 3) /* Receive FIFO Not Empty */
++#define SSSR_BSY (1 << 4) /* SSP Busy */
++#define SSSR_TFS (1 << 5) /* Transmit FIFO Service Request */
++#define SSSR_RFS (1 << 6) /* Receive FIFO Service Request */
++#define SSSR_ROR (1 << 7) /* Receive FIFO Overrun */
++#define SSSR_TFL (0x0f00) /* Transmit FIFO Level (mask) */
++#define SSSR_RFL (0xf000) /* Receive FIFO Level (mask) */
++
++#define SSCR0_TIM (1 << 23) /* Transmit FIFO Under Run Int Mask */
++#define SSCR0_RIM (1 << 22) /* Receive FIFO Over Run int Mask */
++#define SSCR0_NCS (1 << 21) /* Network Clock Select */
++#define SSCR0_EDSS (1 << 20) /* Extended Data Size Select */
++
++#define SSCR0_TISSP (1 << 4) /* TI Sync Serial Protocol */
++#define SSCR0_PSP (3 << 4) /* PSP - Programmable Serial Protocol */
++#define SSCR1_TTELP (1 << 31) /* TXD Tristate Enable Last Phase */
++#define SSCR1_TTE (1 << 30) /* TXD Tristate Enable */
++#define SSCR1_EBCEI (1 << 29) /* Enable Bit Count Error interrupt */
++#define SSCR1_SCFR (1 << 28) /* Slave Clock free Running */
++#define SSCR1_ECRA (1 << 27) /* Enable Clock Request A */
++#define SSCR1_ECRB (1 << 26) /* Enable Clock request B */
++#define SSCR1_SCLKDIR (1 << 25) /* Serial Bit Rate Clock Direction */
++#define SSCR1_SFRMDIR (1 << 24) /* Frame Direction */
++#define SSCR1_RWOT (1 << 23) /* Receive Without Transmit */
++#define SSCR1_TRAIL (1 << 22) /* Trailing Byte */
++#define SSCR1_TSRE (1 << 21) /* Transmit Service Request Enable */
++#define SSCR1_RSRE (1 << 20) /* Receive Service Request Enable */
++#define SSCR1_TINTE (1 << 19) /* Receiver Time-out Interrupt enable */
++#define SSCR1_PINTE (1 << 18) /* Trailing Byte Interupt Enable */
++#define SSCR1_IFS (1 << 16) /* Invert Frame Signal */
++#define SSCR1_STRF (1 << 15) /* Select FIFO or EFWR */
++#define SSCR1_EFWR (1 << 14) /* Enable FIFO Write/Read */
++
++#define SSSR_BCE (1 << 23) /* Bit Count Error */
++#define SSSR_CSS (1 << 22) /* Clock Synchronisation Status */
++#define SSSR_TUR (1 << 21) /* Transmit FIFO Under Run */
++#define SSSR_EOC (1 << 20) /* End Of Chain */
++#define SSSR_TINT (1 << 19) /* Receiver Time-out Interrupt */
++#define SSSR_PINT (1 << 18) /* Peripheral Trailing Byte Interrupt */
++
++#define SSPSP_FSRT (1 << 25) /* Frame Sync Relative Timing */
++#define SSPSP_DMYSTOP(x) ((x) << 23) /* Dummy Stop */
++#define SSPSP_SFRMWDTH(x) ((x) << 16) /* Serial Frame Width */
++#define SSPSP_SFRMDLY(x) ((x) << 9) /* Serial Frame Delay */
++#define SSPSP_DMYSTRT(x) ((x) << 7) /* Dummy Start */
++#define SSPSP_STRTDLY(x) ((x) << 4) /* Start Delay */
++#define SSPSP_ETDS (1 << 3) /* End of Transfer data State */
++#define SSPSP_SFRMP (1 << 2) /* Serial Frame Polarity */
++#define SSPSP_SCMODE(x) ((x) << 0) /* Serial Bit Rate Clock Mode */
++
++#define SSCR0 0x00
++#define SSCR1 0x04
++#define SSSR 0x08
++#define SSITR 0x0c
++#define SSDR 0x10
++#define SSTO 0x28
++#define SSPSP 0x2c
++#define SYSCFG 0x20bc0
++
++
++/* spi_board_info.controller_data for SPI slave devices,
++ * copied to spi_device.platform_data ... mostly for dma tuning
++ */
++struct pnwl_spi3_chip {
++ u8 tx_threshold;
++ u8 rx_threshold;
++ u8 dma_burst_size;
++ u32 timeout;
++ u8 enable_dma;
++ u8 poll_mode;
++ u8 enable_loopback;
++ u16 extra_data[5];
++};
++
++
++#endif /* PW_SPI3_H_ */
+--- a/include/linux/spi/spi.h
++++ b/include/linux/spi/spi.h
+@@ -204,6 +204,7 @@
+ /**
+ * struct spi_master - interface to SPI master controller
+ * @dev: device interface to this driver
++ * @list: link with the global spi_master list
+ * @bus_num: board-specific (and often SOC-specific) identifier for a
+ * given SPI controller.
+ * @num_chipselect: chipselects are used to distinguish individual
+@@ -235,6 +236,8 @@
+ struct spi_master {
+ struct device dev;
+
++ struct list_head list;
++
+ /* other than negative (== assign one dynamically), bus_num is fully
+ * board-specific. usually that simplifies to being SOC-specific.
+ * example: one SOC has three SPI controllers, numbered 0..2,
+--- a/include/linux/tty.h
++++ b/include/linux/tty.h
+@@ -49,6 +49,9 @@
+ #define N_V253 19 /* Codec control over voice modem */
+ #define N_CAIF 20 /* CAIF protocol for talking to modems */
+ #define N_GSM0710 21 /* GSM 0710 Mux */
++#define N_PTIR 22 /* PTI cJTAG data routing for MIPI P1149.7 */
++
++#define N_IFX_SPI 29 /* Mux mode for Infineon modems */
+
+ /*
+ * This character is the same as _POSIX_VDISABLE: it cannot be used as
+--- a/include/linux/usb.h
++++ b/include/linux/usb.h
+@@ -1577,6 +1577,7 @@
+ #define USB_DEVICE_REMOVE 0x0002
+ #define USB_BUS_ADD 0x0003
+ #define USB_BUS_REMOVE 0x0004
++
+ extern void usb_register_notify(struct notifier_block *nb);
+ extern void usb_unregister_notify(struct notifier_block *nb);
+
+--- a/include/linux/usb/ehci_def.h
++++ b/include/linux/usb/ehci_def.h
+@@ -39,6 +39,12 @@
+ #define HCS_N_PORTS(p) (((p)>>0)&0xf) /* bits 3:0, ports on HC */
+
+ u32 hcc_params; /* HCCPARAMS - offset 0x8 */
++/* EHCI 1.1 addendum */
++#define HCC_32FRAME_PERIODIC_LIST(p) ((p)&(1 << 19))
++#define HCC_PER_PORT_CHANGE_EVENT(p) ((p)&(1 << 18))
++#define HCC_LPM(p) ((p)&(1 << 17))
++#define HCC_HW_PREFETCH(p) ((p)&(1 << 16))
++
+ #define HCC_EXT_CAPS(p) (((p)>>8)&0xff) /* for pci extended caps */
+ #define HCC_ISOC_CACHE(p) ((p)&(1 << 7)) /* true: can cache isoc frame */
+ #define HCC_ISOC_THRES(p) (((p)>>4)&0x7) /* bits 6:4, uframes cached */
+@@ -54,6 +60,13 @@
+
+ /* USBCMD: offset 0x00 */
+ u32 command;
++
++/* EHCI 1.1 addendum */
++#define CMD_HIRD (0xf<<24) /* host initiated resume duration */
++#define CMD_PPCEE (1<<15) /* per port change event enable */
++#define CMD_FSP (1<<14) /* fully synchronized prefetch */
++#define CMD_ASPE (1<<13) /* async schedule prefetch enable */
++#define CMD_PSPE (1<<12) /* periodic schedule prefetch enable */
+ /* 23:16 is r/w intr rate, in microframes; default "8" == 1/msec */
+ #define CMD_PARK (1<<11) /* enable "park" on async qh */
+ #define CMD_PARK_CNT(c) (((c)>>8)&3) /* how many transfers to park for */
+@@ -67,6 +80,7 @@
+
+ /* USBSTS: offset 0x04 */
+ u32 status;
++#define STS_PPCE_MASK (0xff<<16) /* Per-Port change event 1-16 */
+ #define STS_ASS (1<<15) /* Async Schedule Status */
+ #define STS_PSS (1<<14) /* Periodic Schedule Status */
+ #define STS_RECL (1<<13) /* Reclamation */
+@@ -100,6 +114,14 @@
+
+ /* PORTSC: offset 0x44 */
+ u32 port_status[0]; /* up to N_PORTS */
++/* EHCI 1.1 addendum */
++#define PORTSC_SUSPEND_STS_ACK 0
++#define PORTSC_SUSPEND_STS_NYET 1
++#define PORTSC_SUSPEND_STS_STALL 2
++#define PORTSC_SUSPEND_STS_ERR 3
++
++#define PORT_DEV_ADDR (0x7f<<25) /* device address */
++#define PORT_SSTS (0x3<<23) /* suspend status */
+ /* 31:23 reserved */
+ #define PORT_WKOC_E (1<<22) /* wake on overcurrent (enable) */
+ #define PORT_WKDISC_E (1<<21) /* wake on disconnect (enable) */
+@@ -115,6 +137,7 @@
+ #define PORT_USB11(x) (((x)&(3<<10)) == (1<<10)) /* USB 1.1 device */
+ /* 11:10 for detecting lowspeed devices (reset vs release ownership) */
+ /* 9 reserved */
++#define PORT_LPM (1<<9) /* LPM transaction */
+ #define PORT_RESET (1<<8) /* reset port */
+ #define PORT_SUSPEND (1<<7) /* suspend port */
+ #define PORT_RESUME (1<<6) /* resume it */
+--- a/include/linux/usb/hcd.h
++++ b/include/linux/usb/hcd.h
+@@ -104,6 +104,8 @@
+ unsigned wireless:1; /* Wireless USB HCD */
+ unsigned authorized_default:1;
+ unsigned has_tt:1; /* Integrated TT in root hub */
++ unsigned has_sram:1; /* Local SRAM for caching */
++ unsigned sram_no_payload:1; /* sram not for payload */
+
+ int irq; /* irq allocated */
+ void __iomem *regs; /* device memory/io */
+@@ -148,6 +150,13 @@
+ * (ohci 32, uhci 1024, ehci 256/512/1024).
+ */
+
++#ifdef CONFIG_USB_OTG
++ /* some otg HCDs need this to get USB_DEVICE_ADD and USB_DEVICE_REMOVE
++ * from root hub, we do not want to use USB notification chain, since
++ * it would be a over kill to use high level notification.
++ */
++ void (*otg_notify) (struct usb_device *udev, unsigned action);
++#endif
+ /* The HC driver's private data is stored at the end of
+ * this structure.
+ */
+@@ -299,6 +308,10 @@
+ int (*update_hub_device)(struct usb_hcd *, struct usb_device *hdev,
+ struct usb_tt *tt, gfp_t mem_flags);
+ int (*reset_device)(struct usb_hcd *, struct usb_device *);
++ /* Notifies the HCD after a device is connected and its
++ * address is set
++ */
++ int (*update_device)(struct usb_hcd *, struct usb_device *);
+ };
+
+ extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb);
+--- /dev/null
++++ b/include/linux/usb/intel_mid_otg.h
+@@ -0,0 +1,180 @@
++/*
++ * Intel MID (Langwell/Penwell) USB OTG Transceiver driver
++ * Copyright (C) 2008 - 2010, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++#ifndef __INTEL_MID_OTG_H
++#define __INTEL_MID_OTG_H
++
++#include <linux/pm.h>
++#include <linux/usb/otg.h>
++#include <linux/notifier.h>
++
++struct intel_mid_otg_xceiv;
++
++/* This is a common data structure for Intel MID platform to
++ * save values of the OTG state machine */
++struct otg_hsm {
++ /* Input */
++ int a_bus_resume;
++ int a_bus_suspend;
++ int a_conn;
++ int a_sess_vld;
++ int a_srp_det;
++ int a_vbus_vld;
++ int b_bus_resume;
++ int b_bus_suspend;
++ int b_conn;
++ int b_se0_srp;
++ int b_ssend_srp;
++ int b_sess_end;
++ int b_sess_vld;
++ int id;
++/* id values */
++#define ID_B 0x05
++#define ID_A 0x04
++#define ID_ACA_C 0x03
++#define ID_ACA_B 0x02
++#define ID_ACA_A 0x01
++ int power_up;
++ int adp_change;
++ int test_device;
++
++ /* Internal variables */
++ int a_set_b_hnp_en;
++ int b_srp_done;
++ int b_hnp_enable;
++ int hnp_poll_enable;
++
++ /* Timeout indicator for timers */
++ int a_wait_vrise_tmout;
++ int a_wait_bcon_tmout;
++ int a_aidl_bdis_tmout;
++ int a_bidl_adis_tmout;
++ int a_bidl_adis_tmr;
++ int a_wait_vfall_tmout;
++ int b_ase0_brst_tmout;
++ int b_bus_suspend_tmout;
++ int b_srp_init_tmout;
++ int b_srp_fail_tmout;
++ int b_srp_fail_tmr;
++ int b_adp_sense_tmout;
++
++ /* Informative variables */
++ int a_bus_drop;
++ int a_bus_req;
++ int a_clr_err;
++ int b_bus_req;
++ int a_suspend_req;
++ int b_bus_suspend_vld;
++
++ /* Output */
++ int drv_vbus;
++ int loc_conn;
++ int loc_sof;
++
++ /* Others */
++ int vbus_srp_up;
++};
++
++/* must provide ULPI access function to read/write registers implemented in
++ * ULPI address space */
++struct iotg_ulpi_access_ops {
++ int (*read)(struct intel_mid_otg_xceiv *iotg, u8 reg, u8 *val);
++ int (*write)(struct intel_mid_otg_xceiv *iotg, u8 reg, u8 val);
++};
++
++#define OTG_A_DEVICE 0x0
++#define OTG_B_DEVICE 0x1
++
++/*
++ * the Intel MID (Langwell/Penwell) otg transceiver driver needs to interact
++ * with device and host drivers to implement the USB OTG related feature. More
++ * function members are added based on otg_transceiver data structure for this
++ * purpose.
++ */
++struct intel_mid_otg_xceiv {
++ struct otg_transceiver otg;
++ struct otg_hsm hsm;
++
++ /* base address */
++ void __iomem *base;
++
++ /* ops to access ulpi */
++ struct iotg_ulpi_access_ops ulpi_ops;
++
++ /* atomic notifier for interrupt context */
++ struct atomic_notifier_head iotg_notifier;
++
++ /* start/stop USB Host function */
++ int (*start_host)(struct intel_mid_otg_xceiv *iotg);
++ int (*stop_host)(struct intel_mid_otg_xceiv *iotg);
++
++ /* start/stop USB Peripheral function */
++ int (*start_peripheral)(struct intel_mid_otg_xceiv *iotg);
++ int (*stop_peripheral)(struct intel_mid_otg_xceiv *iotg);
++
++ /* start/stop ADP sense/probe function */
++ int (*set_adp_probe)(struct intel_mid_otg_xceiv *iotg,
++ bool enabled, int dev);
++ int (*set_adp_sense)(struct intel_mid_otg_xceiv *iotg,
++ bool enabled);
++
++#ifdef CONFIG_PM
++ /* suspend/resume USB host function */
++ int (*suspend_host)(struct intel_mid_otg_xceiv *iotg,
++ pm_message_t message);
++ int (*resume_host)(struct intel_mid_otg_xceiv *iotg);
++
++ int (*suspend_peripheral)(struct intel_mid_otg_xceiv *iotg,
++ pm_message_t message);
++ int (*resume_peripheral)(struct intel_mid_otg_xceiv *iotg);
++#endif
++
++};
++static inline
++struct intel_mid_otg_xceiv *otg_to_mid_xceiv(struct otg_transceiver *otg)
++{
++ return container_of(otg, struct intel_mid_otg_xceiv, otg);
++}
++
++#define MID_OTG_NOTIFY_CONNECT 0x0001
++#define MID_OTG_NOTIFY_DISCONN 0x0002
++#define MID_OTG_NOTIFY_HSUSPEND 0x0003
++#define MID_OTG_NOTIFY_HRESUME 0x0004
++#define MID_OTG_NOTIFY_CSUSPEND 0x0005
++#define MID_OTG_NOTIFY_CRESUME 0x0006
++#define MID_OTG_NOTIFY_HOSTADD 0x0007
++#define MID_OTG_NOTIFY_HOSTREMOVE 0x0008
++#define MID_OTG_NOTIFY_CLIENTADD 0x0009
++#define MID_OTG_NOTIFY_CLIENTREMOVE 0x000a
++
++static inline int
++intel_mid_otg_register_notifier(struct intel_mid_otg_xceiv *iotg,
++ struct notifier_block *nb)
++{
++ return atomic_notifier_chain_register(&iotg->iotg_notifier, nb);
++}
++
++static inline void
++intel_mid_otg_unregister_notifier(struct intel_mid_otg_xceiv *iotg,
++ struct notifier_block *nb)
++{
++ atomic_notifier_chain_unregister(&iotg->iotg_notifier, nb);
++}
++
++#endif /* __INTEL_MID_OTG_H */
+--- /dev/null
++++ b/include/linux/usb/langwell_otg.h
+@@ -0,0 +1,139 @@
++/*
++ * Intel Langwell USB OTG transceiver driver
++ * Copyright (C) 2008 - 2010, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++#ifndef __LANGWELL_OTG_H
++#define __LANGWELL_OTG_H
++
++#include <linux/usb/intel_mid_otg.h>
++
++#define CI_USBCMD 0x30
++# define USBCMD_RST BIT(1)
++# define USBCMD_RS BIT(0)
++#define CI_USBSTS 0x34
++# define USBSTS_SLI BIT(8)
++# define USBSTS_URI BIT(6)
++# define USBSTS_PCI BIT(2)
++#define CI_PORTSC1 0x74
++# define PORTSC_PP BIT(12)
++# define PORTSC_LS (BIT(11) | BIT(10))
++# define PORTSC_SUSP BIT(7)
++# define PORTSC_CCS BIT(0)
++#define CI_HOSTPC1 0xb4
++# define HOSTPC1_PHCD BIT(22)
++#define CI_OTGSC 0xf4
++# define OTGSC_DPIE BIT(30)
++# define OTGSC_1MSE BIT(29)
++# define OTGSC_BSEIE BIT(28)
++# define OTGSC_BSVIE BIT(27)
++# define OTGSC_ASVIE BIT(26)
++# define OTGSC_AVVIE BIT(25)
++# define OTGSC_IDIE BIT(24)
++# define OTGSC_DPIS BIT(22)
++# define OTGSC_1MSS BIT(21)
++# define OTGSC_BSEIS BIT(20)
++# define OTGSC_BSVIS BIT(19)
++# define OTGSC_ASVIS BIT(18)
++# define OTGSC_AVVIS BIT(17)
++# define OTGSC_IDIS BIT(16)
++# define OTGSC_DPS BIT(14)
++# define OTGSC_1MST BIT(13)
++# define OTGSC_BSE BIT(12)
++# define OTGSC_BSV BIT(11)
++# define OTGSC_ASV BIT(10)
++# define OTGSC_AVV BIT(9)
++# define OTGSC_ID BIT(8)
++# define OTGSC_HABA BIT(7)
++# define OTGSC_HADP BIT(6)
++# define OTGSC_IDPU BIT(5)
++# define OTGSC_DP BIT(4)
++# define OTGSC_OT BIT(3)
++# define OTGSC_HAAR BIT(2)
++# define OTGSC_VC BIT(1)
++# define OTGSC_VD BIT(0)
++# define OTGSC_INTEN_MASK (0x7f << 24)
++# define OTGSC_INT_MASK (0x5f << 24)
++# define OTGSC_INTSTS_MASK (0x7f << 16)
++#define CI_USBMODE 0xf8
++# define USBMODE_CM (BIT(1) | BIT(0))
++# define USBMODE_IDLE 0
++# define USBMODE_DEVICE 0x2
++# define USBMODE_HOST 0x3
++#define USBCFG_ADDR 0xff10801c
++#define USBCFG_LEN 4
++# define USBCFG_VBUSVAL BIT(14)
++# define USBCFG_AVALID BIT(13)
++# define USBCFG_BVALID BIT(12)
++# define USBCFG_SESEND BIT(11)
++
++#define INTR_DUMMY_MASK (USBSTS_SLI | USBSTS_URI | USBSTS_PCI)
++
++enum langwell_otg_timer_type {
++ TA_WAIT_VRISE_TMR,
++ TA_WAIT_BCON_TMR,
++ TA_AIDL_BDIS_TMR,
++ TB_ASE0_BRST_TMR,
++ TB_SE0_SRP_TMR,
++ TB_SRP_INIT_TMR,
++ TB_SRP_FAIL_TMR,
++ TB_BUS_SUSPEND_TMR
++};
++
++#define TA_WAIT_VRISE 100
++#define TA_WAIT_BCON 30000
++#define TA_AIDL_BDIS 15000
++#define TB_ASE0_BRST 5000
++#define TB_SE0_SRP 2
++#define TB_SRP_INIT 100
++#define TB_SRP_FAIL 5500
++#define TB_BUS_SUSPEND 500
++
++struct langwell_otg_timer {
++ unsigned long expires; /* Number of count increase to timeout */
++ unsigned long count; /* Tick counter */
++ void (*function)(unsigned long); /* Timeout function */
++ unsigned long data; /* Data passed to function */
++ struct list_head list;
++};
++
++struct langwell_otg {
++ struct intel_mid_otg_xceiv iotg;
++ struct device *dev;
++
++ void __iomem *usbcfg; /* SCCBUSB config Reg */
++
++ unsigned region;
++ unsigned cfg_region;
++
++ struct work_struct work;
++ struct workqueue_struct *qwork;
++ struct timer_list hsm_timer;
++
++ spinlock_t lock;
++ spinlock_t wq_lock;
++
++ struct notifier_block iotg_notifier;
++};
++
++static inline
++struct langwell_otg *mid_xceiv_to_lnw(struct intel_mid_otg_xceiv *iotg)
++{
++ return container_of(iotg, struct langwell_otg, iotg);
++}
++
++#endif /* __LANGWELL_OTG_H__ */
+--- a/include/linux/usb/langwell_udc.h
++++ b/include/linux/usb/langwell_udc.h
+@@ -1,5 +1,5 @@
+ /*
+- * Intel Langwell USB Device Controller driver
++ * Intel Langwell/Penwell USB Device Controller driver
+ * Copyright (C) 2008-2009, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+@@ -306,5 +306,11 @@
+ #define EPCTRL_RXS BIT(0) /* RX endpoint STALL */
+ } __attribute__ ((packed));
+
++
++/* export function declaration */
++
++/* gets the maximum power consumption */
++extern int langwell_udc_maxpower(int *mA);
++
+ #endif /* __LANGWELL_UDC_H */
+
+--- /dev/null
++++ b/include/linux/usb/penwell_otg.h
+@@ -0,0 +1,277 @@
++/*
++ * Intel Penwell USB OTG transceiver driver
++ * Copyright (C) 2009 - 2010, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++#ifndef __PENWELL_OTG_H__
++#define __PENWELL_OTG_H__
++
++#include <linux/usb/intel_mid_otg.h>
++
++#define CI_USBCMD 0x30
++# define USBCMD_RST BIT(1)
++# define USBCMD_RS BIT(0)
++#define CI_USBSTS 0x34
++# define USBSTS_SLI BIT(8)
++# define USBSTS_URI BIT(6)
++# define USBSTS_PCI BIT(2)
++#define CI_ULPIVP 0x60
++# define ULPI_WU BIT(31)
++# define ULPI_RUN BIT(30)
++# define ULPI_RW BIT(29)
++# define ULPI_SS BIT(27)
++# define ULPI_PORT (BIT(26) | BIT(25) | BIT(24))
++# define ULPI_ADDR (0xff << 16)
++# define ULPI_DATRD (0xff << 8)
++# define ULPI_DATWR (0xff << 0)
++#define CI_PORTSC1 0x74
++# define PORTSC_PP BIT(12)
++# define PORTSC_LS (BIT(11) | BIT(10))
++# define PORTSC_SUSP BIT(7)
++# define PORTSC_CCS BIT(0)
++#define CI_HOSTPC1 0xb4
++# define HOSTPC1_PHCD BIT(22)
++#define CI_OTGSC 0xf4
++# define OTGSC_DPIE BIT(30)
++# define OTGSC_1MSE BIT(29)
++# define OTGSC_BSEIE BIT(28)
++# define OTGSC_BSVIE BIT(27)
++# define OTGSC_ASVIE BIT(26)
++# define OTGSC_AVVIE BIT(25)
++# define OTGSC_IDIE BIT(24)
++# define OTGSC_DPIS BIT(22)
++# define OTGSC_1MSS BIT(21)
++# define OTGSC_BSEIS BIT(20)
++# define OTGSC_BSVIS BIT(19)
++# define OTGSC_ASVIS BIT(18)
++# define OTGSC_AVVIS BIT(17)
++# define OTGSC_IDIS BIT(16)
++# define OTGSC_DPS BIT(14)
++# define OTGSC_1MST BIT(13)
++# define OTGSC_BSE BIT(12)
++# define OTGSC_BSV BIT(11)
++# define OTGSC_ASV BIT(10)
++# define OTGSC_AVV BIT(9)
++# define OTGSC_ID BIT(8)
++# define OTGSC_HABA BIT(7)
++# define OTGSC_HADP BIT(6)
++# define OTGSC_IDPU BIT(5)
++# define OTGSC_DP BIT(4)
++# define OTGSC_OT BIT(3)
++# define OTGSC_HAAR BIT(2)
++# define OTGSC_VC BIT(1)
++# define OTGSC_VD BIT(0)
++#define CI_USBMODE 0xf8
++# define USBMODE_CM (BIT(1) | BIT(0))
++# define USBMODE_IDLE 0
++# define USBMODE_DEVICE 0x2
++# define USBMODE_HOST 0x3
++#define USBCFG_ADDR 0xff10801c
++#define USBCFG_LEN 4
++# define USBCFG_VBUSVAL BIT(14)
++# define USBCFG_AVALID BIT(13)
++# define USBCFG_BVALID BIT(12)
++# define USBCFG_SESEND BIT(11)
++
++#define OTGSC_INTEN_MASK \
++ (OTGSC_DPIE | OTGSC_BSEIE | OTGSC_BSVIE \
++ | OTGSC_ASVIE | OTGSC_AVVIE | OTGSC_IDIE)
++
++#define OTGSC_INTSTS_MASK \
++ (OTGSC_DPIS | OTGSC_BSEIS | OTGSC_BSVIS \
++ | OTGSC_ASVIS | OTGSC_AVVIS | OTGSC_IDIS)
++
++#define INTR_DUMMY_MASK (USBSTS_SLI | USBSTS_URI | USBSTS_PCI)
++
++#define HOST_REQUEST_FLAG BIT(0)
++
++/* MSIC register for vbus power control */
++#define MSIC_ID 0x00
++# define ID0_VENDID0 (BIT(7) | BIT(6))
++#define MSIC_ID1 0x01
++# define ID1_VENDID1 (BIT(7) | BIT(6))
++#define MSIC_VUSB330CNT 0xd4
++#define MSIC_VOTGCNT 0xdf
++# define VOTGEN BIT(7)
++# define VOTGRAMP BIT(4)
++#define MSIC_SPWRSRINT1 0x193
++# define SUSBCHPDET BIT(6)
++# define SUSBDCDET BIT(2)
++# define MSIC_SPWRSRINT1_MASK (BIT(6) | BIT(2))
++# define SPWRSRINT1_CHRG_PORT BIT(6)
++# define SPWRSRINT1_HOST_PORT 0
++# define SPWRSRINT1_DEDT_CHRG (BIT(6) | BIT(2))
++#define MSIC_IS4SET 0x2c8 /* Intel Specific */
++# define IS4_CHGDSERXDPINV BIT(5)
++#define MSIC_OTGCTRLSET 0x340
++#define MSIC_OTGCTRLCLR 0x341
++# define DMPULLDOWNCLR BIT(2)
++# define DPPULLDOWNCLR BIT(1)
++#define MSIC_PWRCTRLSET 0x342
++# define DPWKPUENSET BIT(4)
++# define SWCNTRLSET BIT(0)
++#define MSIC_PWRCTRLCLR 0x343
++# define DPVSRCENCLR BIT(6)
++# define SWCNTRLCLR BIT(0)
++#define MSIC_FUNCTRLSET 0x344
++# define OPMODESET0 BIT(3)
++#define MSIC_FUNCTRLCLR 0x345
++# define OPMODECLR1 BIT(4)
++#define MSIC_VS3SET 0x346 /* Vendor Specific */
++# define SWUSBDETSET BIT(4)
++# define DATACONENSET BIT(3)
++#define MSIC_VS3CLR 0x347
++# define SWUSBDETCLR BIT(4)
++# define DATACONENCLR BIT(3)
++#define MSIC_ULPIACCESSMODE 0x348
++# define SPIMODE BIT(0)
++
++/* MSIC TI implementation for ADP/ACA */
++#define ULPI_TI_VS2 0x83
++# define TI_ID_FLOAT_STS BIT(4)
++# define TI_ID_RARBRC_STS(d) (((d)>>2)&3)
++# define TI_ID_RARBRC_STS_MASK (BIT(3) | BIT(2))
++# define TI_ID_RARBRC_NONE 0
++# define TI_ID_RARBRC_A 1
++# define TI_ID_RARBRC_B 2
++# define TI_ID_RARBRC_C 3
++# define TI_ADP_INT_STS BIT(1)
++#define ULPI_TI_VS4 0x88
++# define TI_ACA_DET_EN BIT(6)
++#define ULPI_TI_VS5 0x8b
++# define TI_ADP_INT_EN BIT(7)
++# define TI_ID_FLOAT_EN BIT(5)
++# define TI_ID_RES_EN BIT(4)
++#define ULPI_TI_VS6 0x8e
++# define TI_HS_TXPREN BIT(4)
++# define TI_ADP_MODE(d) (((d)>>2)&3)
++# define TI_ADP_MODE_MASK (BIT(3) | BIT(2))
++# define TI_ADP_MODE_DISABLE 0
++# define TI_ADP_MODE_SENSE 1
++# define TI_ADP_MODE_PRB_A 2
++# define TI_ADP_MODE_PRB_B 3
++# define TI_VBUS_IADP_SRC BIT(1)
++# define TI_VBUS_IADP_SINK BIT(0)
++#define ULPI_TI_VS7 0x91
++# define TI_T_ADP_HIGH (0xff)
++#define ULPI_TI_VS8 0x94
++# define TI_T_ADP_LOW (0xff)
++#define ULPI_TI_VS9 0x97
++# define TI_T_ADP_RISE (0xff)
++
++#define TI_PRB_DELTA 0x08
++
++/* MSIC FreeScale Implementation for ADP */
++#define ULPI_FS_ADPCL 0x28
++# define ADPCL_PRBDSCHG (BIT(5) | BIT(6))
++# define ADPCL_PRBDSCHG_4 0
++# define ADPCL_PRBDSCHG_8 1
++# define ADPCL_PRBDSCHG_16 2
++# define ADPCL_PRBDSCHG_32 3
++# define ADPCL_PRBPRD (BIT(3) | BIT(4))
++# define ADPCL_PRBPRD_A_HALF 0
++# define ADPCL_PRBPRD_B_HALF 1
++# define ADPCL_PRBPRD_A 2
++# define ADPCL_PRBPRD_B 3
++# define ADPCL_SNSEN BIT(2)
++# define ADPCL_PRBEN BIT(1)
++# define ADPCL_ADPEN BIT(0)
++#define ULPI_FS_ADPCH 0x29
++# define ADPCH_PRBDELTA (0x1f << 0)
++#define ULPI_FS_ADPIE 0x2a
++# define ADPIE_ADPRAMPIE BIT(2)
++# define ADPIE_SNSMISSIE BIT(1)
++# define ADPIE_PRBTRGIE BIT(0)
++#define ULPI_FS_ADPIS 0x2b
++# define ADPIS_ADPRAMPS BIT(5)
++# define ADPIS_SNSMISSS BIT(4)
++# define ADPIS_PRBTRGS BIT(3)
++# define ADPIS_ADPRAMPI BIT(2)
++# define ADPIS_SNSMISSI BIT(1)
++# define ADPIS_PRBTRGI BIT(0)
++#define ULPI_FS_ADPRL 0x2c
++# define ADPRL_ADPRAMP (0xff << 0)
++#define ULPI_FS_ADPRH 0x2d
++# define ADPRH_ADPRAMP (0x7 << 0)
++
++#define FS_ADPI_MASK (ADPIS_ADPRAMPI | ADPIS_SNSMISSI | ADPIS_PRBTRGI)
++
++enum penwell_otg_timer_type {
++ TA_WAIT_VRISE_TMR,
++ TA_WAIT_BCON_TMR,
++ TA_AIDL_BDIS_TMR,
++ TA_BIDL_ADIS_TMR,
++ TA_WAIT_VFALL_TMR,
++ TB_ASE0_BRST_TMR,
++ TB_SE0_SRP_TMR,
++ TB_SRP_FAIL_TMR, /* wait for response of SRP */
++ TB_BUS_SUSPEND_TMR
++};
++
++#define TA_WAIT_VRISE 100
++#define TA_WAIT_BCON 30000
++#define TA_AIDL_BDIS 1500
++#define TA_BIDL_ADIS 300
++#define TA_WAIT_VFALL 100
++#define TB_ASE0_BRST 300
++#define TB_SE0_SRP 1800
++#define TB_SSEND_SRP 1800
++# define SRP_MON_INVAL 200
++#define TB_SRP_FAIL 5500
++#define TB_BUS_SUSPEND 500
++#define THOS_REQ_POL 1500
++
++/* MSIC vendor information */
++enum msic_vendor {
++ MSIC_VD_FS,
++ MSIC_VD_TI,
++ MSIC_VD_UNKNOWN
++};
++
++struct adp_status {
++ struct completion adp_comp;
++ u8 t_adp_rise;
++};
++
++struct penwell_otg {
++ struct intel_mid_otg_xceiv iotg;
++ struct device *dev;
++
++ unsigned region;
++ unsigned cfg_region;
++
++ struct work_struct work;
++ struct work_struct hnp_poll_work;
++ struct workqueue_struct *qwork;
++
++ struct timer_list hsm_timer;
++ struct timer_list hnp_poll_timer;
++
++ enum msic_vendor msic;
++
++ struct notifier_block iotg_notifier;
++
++ struct adp_status adp;
++};
++
++static inline
++struct penwell_otg *iotg_to_penwell(struct intel_mid_otg_xceiv *iotg)
++{
++ return container_of(iotg, struct penwell_otg, iotg);
++}
++
++#endif /* __PENWELL_OTG_H__ */
+--- /dev/null
++++ b/include/net/caif/caif_spi.h
+@@ -0,0 +1,153 @@
++/*
++ * Copyright (C) ST-Ericsson AB 2010
++ * Author: Daniel Martensson / Daniel.Martensson@stericsson.com
++ * License terms: GNU General Public License (GPL) version 2
++ */
++
++#ifndef CAIF_SPI_H_
++#define CAIF_SPI_H_
++
++#include <net/caif/caif_device.h>
++
++#define SPI_CMD_WR 0x00
++#define SPI_CMD_RD 0x01
++#define SPI_CMD_EOT 0x02
++#define SPI_CMD_IND 0x04
++
++#define SPI_DMA_BUF_LEN 8192
++
++#define WL_SZ 2 /* 16 bits. */
++#define SPI_CMD_SZ 4 /* 32 bits. */
++#define SPI_IND_SZ 4 /* 32 bits. */
++
++#define SPI_XFER 0
++#define SPI_SS_ON 1
++#define SPI_SS_OFF 2
++#define SPI_TERMINATE 3
++
++/* Minimum time between different levels is 50 microseconds. */
++#define MIN_TRANSITION_TIME_USEC 50
++
++/* Defines for calculating duration of SPI transfers for a particular
++ * number of bytes.
++ */
++#define SPI_MASTER_CLK_MHZ 13
++#define SPI_XFER_TIME_USEC(bytes, clk) (((bytes) * 8) / clk)
++
++/* Normally this should be aligned on the modem in order to benefit from full
++ * duplex transfers. However a size of 8188 provokes errors when running with
++ * the modem. These errors occur when packet sizes approaches 4 kB of data.
++ */
++#define CAIF_MAX_SPI_FRAME 4092
++
++/* Maximum number of uplink CAIF frames that can reside in the same SPI frame.
++ * This number should correspond with the modem setting. The application side
++ * CAIF accepts any number of embedded downlink CAIF frames.
++ */
++#define CAIF_MAX_SPI_PKTS 9
++
++/* Decides if SPI buffers should be prefilled with 0xFF pattern for easier
++ * debugging. Both TX and RX buffers will be filled before the transfer.
++ */
++#define CFSPI_DBG_PREFILL 0
++
++/* Structure describing a SPI transfer. */
++struct cfspi_xfer {
++ u16 tx_dma_len;
++ u16 rx_dma_len;
++ void *va_tx;
++ dma_addr_t pa_tx;
++ void *va_rx;
++ dma_addr_t pa_rx;
++};
++
++/* Structure implemented by the SPI interface. */
++struct cfspi_ifc {
++ void (*ss_cb) (bool assert, struct cfspi_ifc *ifc);
++ void (*xfer_done_cb) (struct cfspi_ifc *ifc);
++ void *priv;
++};
++
++/* Structure implemented by SPI clients. */
++struct cfspi_dev {
++ int (*init_xfer) (struct cfspi_xfer *xfer, struct cfspi_dev *dev);
++ void (*sig_xfer) (bool xfer, struct cfspi_dev *dev);
++ struct cfspi_ifc *ifc;
++ char *name;
++ u32 clk_mhz;
++ void *priv;
++};
++
++/* Enumeration describing the CAIF SPI state. */
++enum cfspi_state {
++ CFSPI_STATE_WAITING = 0,
++ CFSPI_STATE_AWAKE,
++ CFSPI_STATE_FETCH_PKT,
++ CFSPI_STATE_GET_NEXT,
++ CFSPI_STATE_INIT_XFER,
++ CFSPI_STATE_WAIT_ACTIVE,
++ CFSPI_STATE_SIG_ACTIVE,
++ CFSPI_STATE_WAIT_XFER_DONE,
++ CFSPI_STATE_XFER_DONE,
++ CFSPI_STATE_WAIT_INACTIVE,
++ CFSPI_STATE_SIG_INACTIVE,
++ CFSPI_STATE_DELIVER_PKT,
++ CFSPI_STATE_MAX,
++};
++
++/* Structure implemented by SPI physical interfaces. */
++struct cfspi {
++ struct caif_dev_common cfdev;
++ struct net_device *ndev;
++ struct platform_device *pdev;
++ struct sk_buff_head qhead;
++ struct sk_buff_head chead;
++ u16 cmd;
++ u16 tx_cpck_len;
++ u16 tx_npck_len;
++ u16 rx_cpck_len;
++ u16 rx_npck_len;
++ struct cfspi_ifc ifc;
++ struct cfspi_xfer xfer;
++ struct cfspi_dev *dev;
++ unsigned long state;
++ struct work_struct work;
++ struct workqueue_struct *wq;
++ struct list_head list;
++ int flow_off_sent;
++ u32 qd_low_mark;
++ u32 qd_high_mark;
++ struct completion comp;
++ wait_queue_head_t wait;
++ spinlock_t lock;
++ bool flow_stop;
++#ifdef CONFIG_DEBUG_FS
++ enum cfspi_state dbg_state;
++ u16 pcmd;
++ u16 tx_ppck_len;
++ u16 rx_ppck_len;
++ struct dentry *dbgfs_dir;
++ struct dentry *dbgfs_state;
++ struct dentry *dbgfs_frame;
++#endif /* CONFIG_DEBUG_FS */
++};
++
++extern int spi_frm_align;
++extern int spi_up_head_align;
++extern int spi_up_tail_align;
++extern int spi_down_head_align;
++extern int spi_down_tail_align;
++extern struct platform_driver cfspi_spi_driver;
++
++void cfspi_dbg_state(struct cfspi *cfspi, int state);
++int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len);
++int cfspi_xmitlen(struct cfspi *cfspi);
++int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len);
++int cfspi_spi_remove(struct platform_device *pdev);
++int cfspi_spi_probe(struct platform_device *pdev);
++int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len);
++int cfspi_xmitlen(struct cfspi *cfspi);
++int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len);
++void cfspi_xfer(struct work_struct *work);
++
++#endif /* CAIF_SPI_H_ */
+--- /dev/null
++++ b/include/sound/intel_sst.h
+@@ -0,0 +1,131 @@
++#ifndef __INTEL_SST_H__
++#define __INTEL_SST_H__
++/*
++ * intel_sst.h - Intel SST Driver for audio engine
++ *
++ * Copyright (C) 2008-10 Intel Corporation
++ * Authors: Vinod Koul <vinod.koul@intel.com>
++ * Harsha Priya <priya.harsha@intel.com>
++ * Dharageswari R <dharageswari.r@intel.com>
++ * KP Jeeja <jeeja.kp@intel.com>
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This driver exposes the audio engine functionalities to the ALSA
++ * and middleware.
++ * This file is shared between the SST and MAD drivers
++ */
++
++#define SST_CARD_NAMES "intel_mid_card"
++
++/* control list Pmic & Lpe */
++/* Input controls */
++enum port_status {
++ ACTIVATE = 1,
++ DEACTIVATE,
++};
++
++/* Card states */
++enum sst_card_states {
++ SND_CARD_UN_INIT = 0,
++ SND_CARD_INIT_DONE,
++};
++
++enum sst_controls {
++ SST_SND_ALLOC = 0x1000,
++ SST_SND_PAUSE = 0x1001,
++ SST_SND_RESUME = 0x1002,
++ SST_SND_DROP = 0x1003,
++ SST_SND_FREE = 0x1004,
++ SST_SND_BUFFER_POINTER = 0x1005,
++ SST_SND_STREAM_INIT = 0x1006,
++ SST_SND_START = 0x1007,
++ SST_SND_STREAM_PROCESS = 0x1008,
++ SST_MAX_CONTROLS = 0x1008,
++ SST_CONTROL_BASE = 0x1000,
++ SST_ENABLE_RX_TIME_SLOT = 0x1009,
++};
++
++enum SND_CARDS {
++ SND_FS = 0,
++ SND_MX,
++ SND_NC,
++ SND_MSIC
++};
++
++struct pcm_stream_info {
++ int str_id;
++ void *mad_substream;
++ void (*period_elapsed) (void *mad_substream);
++ unsigned long long buffer_ptr;
++ int sfreq;
++};
++
++struct snd_pmic_ops {
++ int card_status;
++ int master_mute;
++ int num_channel;
++ int input_dev_id;
++ int mute_status;
++ int pb_on;
++ int cap_on;
++ int output_dev_id;
++ int (*set_input_dev) (u8 value);
++ int (*set_output_dev) (u8 value);
++
++ int (*set_mute) (int dev_id, u8 value);
++ int (*get_mute) (int dev_id, u8 *value);
++
++ int (*set_vol) (int dev_id, int value);
++ int (*get_vol) (int dev_id, int *value);
++
++ int (*init_card) (void);
++ int (*set_pcm_audio_params)
++ (int sfreq, int word_size , int num_channel);
++ int (*set_pcm_voice_params) (void);
++ int (*set_voice_port) (int status);
++ int (*set_audio_port) (int status);
++
++ int (*power_up_pmic_pb) (unsigned int port);
++ int (*power_up_pmic_cp) (unsigned int port);
++ int (*power_down_pmic_pb) (void);
++ int (*power_down_pmic_cp) (void);
++ int (*power_down_pmic) (void);
++};
++
++struct intel_sst_card_ops {
++ char *module_name;
++ unsigned int vendor_id;
++ int (*control_set) (int control_element, void *value);
++ struct snd_pmic_ops *scard_ops;
++};
++
++/* modified for generic access */
++struct sc_reg_access {
++ u16 reg_addr;
++ u8 value;
++ u8 mask;
++};
++enum sc_reg_access_type {
++ PMIC_READ = 0,
++ PMIC_WRITE,
++ PMIC_READ_MODIFY,
++};
++
++int register_sst_card(struct intel_sst_card_ops *card);
++void unregister_sst_card(struct intel_sst_card_ops *card);
++#endif /* __INTEL_SST_H__ */
+--- /dev/null
++++ b/include/sound/intel_sst_ioctl.h
+@@ -0,0 +1,435 @@
++#ifndef __INTEL_SST_IOCTL_H__
++#define __INTEL_SST_IOCTL_H__
++/*
++ * intel_sst_ioctl.h - Intel SST Driver for audio engine
++ *
++ * Copyright (C) 2008-10 Intel Corporation
++ * Authors: Vinod Koul <vinod.koul@intel.com>
++ * Harsha Priya <priya.harsha@intel.com>
++ * Dharageswari R <dharageswari.r@intel.com>
++ * KP Jeeja <jeeja.kp@intel.com>
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This file defines all sst ioctls
++ */
++
++/* codec and post/pre processing related info */
++
++#include <linux/types.h>
++
++enum sst_codec_types {
++/* AUDIO/MUSIC CODEC Type Definitions */
++ SST_CODEC_TYPE_UNKNOWN = 0,
++ SST_CODEC_TYPE_PCM, /* Pass through Audio codec */
++ SST_CODEC_TYPE_MP3,
++ SST_CODEC_TYPE_MP24,
++ SST_CODEC_TYPE_AAC,
++ SST_CODEC_TYPE_AACP,
++ SST_CODEC_TYPE_eAACP,
++ SST_CODEC_TYPE_WMA9,
++ SST_CODEC_TYPE_WMA10,
++ SST_CODEC_TYPE_WMA10P,
++ SST_CODEC_TYPE_RA,
++ SST_CODEC_TYPE_DDAC3,
++ SST_CODEC_TYPE_STEREO_TRUE_HD,
++ SST_CODEC_TYPE_STEREO_HD_PLUS,
++
++ /* VOICE CODEC Type Definitions */
++ SST_CODEC_TYPE_VOICE_PCM = 0x21, /* Pass through voice codec */
++};
++
++enum sst_algo_types {
++ SST_CODEC_SRC = 0x64,
++ SST_CODEC_MIXER = 0x65,
++ SST_CODEC_DOWN_MIXER = 0x66,
++ SST_CODEC_VOLUME_CONTROL = 0x67,
++ SST_CODEC_OEM1 = 0xC8,
++ SST_CODEC_OEM2 = 0xC9,
++};
++
++enum snd_sst_stream_ops {
++ STREAM_OPS_PLAYBACK = 0, /* Decode */
++ STREAM_OPS_CAPTURE, /* Encode */
++ STREAM_OPS_PLAYBACK_DRM, /* Play Audio/Voice */
++ STREAM_OPS_PLAYBACK_ALERT, /* Play Audio/Voice */
++ STREAM_OPS_CAPTURE_VOICE_CALL, /* CSV Voice recording */
++};
++
++enum stream_mode {
++ SST_STREAM_MODE_NONE = 0,
++ SST_STREAM_MODE_DNR = 1,
++ SST_STREAM_MODE_FNF = 2,
++ SST_STREAM_MODE_CAPTURE = 3
++};
++
++enum stream_type {
++ SST_STREAM_TYPE_NONE = 0,
++ SST_STREAM_TYPE_MUSIC = 1,
++ SST_STREAM_TYPE_NORMAL = 2,
++ SST_STREAM_TYPE_LONG_PB = 3,
++ SST_STREAM_TYPE_LOW_LATENCY = 4,
++};
++
++enum snd_sst_audio_device_type {
++ SND_SST_DEVICE_HEADSET = 1,
++ SND_SST_DEVICE_IHF,
++ SND_SST_DEVICE_VIBRA,
++ SND_SST_DEVICE_HAPTIC,
++ SND_SST_DEVICE_CAPTURE,
++};
++
++/* Firmware Version info */
++struct snd_sst_fw_version {
++ __u8 build; /* build number*/
++ __u8 minor; /* minor number*/
++ __u8 major; /* major number*/
++ __u8 type; /* build type */
++};
++
++/* Port info structure */
++struct snd_sst_port_info {
++ __u16 port_type;
++ __u16 reserved;
++};
++
++/* Mixer info structure */
++struct snd_sst_mix_info {
++ __u16 max_streams;
++ __u16 reserved;
++};
++
++/* PCM Parameters */
++struct snd_pcm_params {
++ __u16 codec; /* codec type */
++ __u8 num_chan; /* 1=Mono, 2=Stereo */
++ __u8 pcm_wd_sz; /* 16/24 - bit*/
++ __u32 reserved; /* Bitrate in bits per second */
++ __u32 sfreq; /* Sampling rate in Hz */
++ __u32 ring_buffer_size;
++ __u32 period_count; /* period elapsed in samples*/
++ __u32 ring_buffer_addr;
++};
++
++/* MP3 Music Parameters Message */
++struct snd_mp3_params {
++ __u16 codec;
++ __u8 num_chan; /* 1=Mono, 2=Stereo */
++ __u8 pcm_wd_sz; /* 16/24 - bit*/
++ __u32 brate; /* Use the hard coded value. */
++ __u32 sfreq; /* Sampling freq eg. 8000, 441000, 48000 */
++ __u8 crc_check; /* crc_check - disable (0) or enable (1) */
++ __u8 op_align; /* op align 0- 16 bit, 1- MSB, 2 LSB*/
++ __u16 reserved; /* Unused */
++};
++
++#define AAC_BIT_STREAM_ADTS 0
++#define AAC_BIT_STREAM_ADIF 1
++#define AAC_BIT_STREAM_RAW 2
++
++/* AAC Music Parameters Message */
++struct snd_aac_params {
++ __u16 codec;
++ __u8 num_chan; /* 1=Mono, 2=Stereo*/
++ __u8 pcm_wd_sz; /* 16/24 - bit*/
++ __u32 brate;
++ __u32 sfreq; /* Sampling freq eg. 8000, 441000, 48000 */
++ __u32 aac_srate; /* Plain AAC decoder operating sample rate */
++ __u8 mpg_id; /* 0=MPEG-2, 1=MPEG-4 */
++ __u8 bs_format; /* input bit stream format adts=0, adif=1, raw=2 */
++ __u8 aac_profile; /* 0=Main Profile, 1=LC profile, 3=SSR profile */
++ __u8 ext_chl; /* No.of external channels */
++ __u8 aot; /* Audio object type. 1=Main , 2=LC , 3=SSR, 4=SBR*/
++ __u8 op_align; /* output alignment 0=16 bit , 1=MSB, 2= LSB align */
++ __u8 brate_type; /* 0=CBR, 1=VBR */
++ __u8 crc_check; /* crc check 0= disable, 1=enable */
++ __s8 bit_stream_format[8]; /* input bit stream format adts/adif/raw */
++ __u8 jstereo; /* Joint stereo Flag */
++ __u8 sbr_present; /* 1 = SBR Present, 0 = SBR absent, for RAW */
++ __u8 downsample; /* 1 = Downsampling ON, 0 = Downsampling OFF */
++ __u8 num_syntc_elems; /* 1- Mono/stereo, 0 - Dual Mono, 0 - for raw */
++ __s8 syntc_id[2]; /* 0 for ID_SCE(Dula Mono), -1 for raw */
++ __s8 syntc_tag[2]; /* raw - -1 and 0 -16 for rest of the streams */
++ __u8 pce_present; /* Flag. 1- present 0 - not present, for RAW */
++ __u8 sbr_type; /* sbr_type: 0-plain aac, 1-aac-v1, 2-aac-v2 */
++ __u8 outchmode; /*0- mono, 1-stereo, 2-dual mono 3-Parametric stereo */
++ __u8 ps_present;
++};
++
++/* WMA Music Parameters Message */
++struct snd_wma_params {
++ __u16 codec;
++ __u8 num_chan; /* 1=Mono, 2=Stereo */
++ __u8 pcm_wd_sz; /* 16/24 - bit*/
++ __u32 brate; /* Use the hard coded value. */
++ __u32 sfreq; /* Sampling freq eg. 8000, 441000, 48000 */
++ __u32 channel_mask; /* Channel Mask */
++ __u16 format_tag; /* Format Tag */
++ __u16 block_align; /* packet size */
++ __u16 wma_encode_opt;/* Encoder option */
++ __u8 op_align; /* op align 0- 16 bit, 1- MSB, 2 LSB */
++ __u8 pcm_src; /* input pcm bit width */
++};
++
++/* Pre processing param structure */
++struct snd_prp_params {
++ __u32 reserved; /* No pre-processing defined yet */
++};
++
++struct snd_params_block {
++ __u32 type; /*Type of the parameter*/
++ __u32 size; /*size of the parameters in the block*/
++ __u8 params[0]; /*Parameters of the algorithm*/
++};
++
++/* Pre and post processing params structure */
++struct snd_ppp_params {
++ enum sst_algo_types algo_id;/* Post/Pre processing algorithm ID */
++ __u8 str_id; /*Only 5 bits used 0 - 31 are valid*/
++ __u8 enable; /* 0= disable, 1= enable*/
++ __u8 reserved;
++ __u32 size; /*Size of parameters for all blocks*/
++ struct snd_params_block params[0];
++};
++
++struct snd_sst_postproc_info {
++ __u32 src_min; /* Supported SRC Min sampling freq */
++ __u32 src_max; /* Supported SRC Max sampling freq */
++ __u8 src; /* 0=Not supported, 1=Supported */
++ __u8 bass_boost; /* 0=Not Supported, 1=Supported */
++ __u8 stereo_widening; /* 0=Not Supported, 1=Supported */
++ __u8 volume_control; /* 0=Not Supported, 1=Supported */
++ __s16 min_vol; /* Minimum value of Volume in dB */
++ __s16 max_vol; /* Maximum value of Volume in dB */
++ __u8 mute_control; /* 0=No Mute, 1=Mute */
++ __u8 reserved1;
++ __u16 reserved2;
++};
++
++/* pre processing Capability info structure */
++struct snd_sst_prp_info {
++ __s16 min_vol; /* Minimum value of Volume in dB */
++ __s16 max_vol; /* Maximum value of Volume in dB */
++ __u8 volume_control; /* 0=Not Supported, 1=Supported */
++ __u8 reserved1; /* for 32 bit alignment */
++ __u16 reserved2; /* for 32 bit alignment */
++} __attribute__ ((packed));
++
++/*Pre / Post processing algorithms support*/
++struct snd_sst_ppp_info {
++ __u32 src:1; /* 0=Not supported, 1=Supported */
++ __u32 mixer:1; /* 0=Not supported, 1=Supported */
++ __u32 volume_control:1; /* 0=Not Supported, 1=Supported */
++ __u32 mute_control:1; /* 0=Not Supported, 1=Supported */
++ __u32 anc:1; /* 0=Not Supported, 1=Supported */
++ __u32 side_tone:1; /* 0=Not Supported, 1=Supported */
++ __u32 dc_removal:1; /* 0=Not Supported, 1=Supported */
++ __u32 equalizer:1; /* 0=Not Supported, 1=Supported */
++ __u32 spkr_prot:1; /* 0=Not Supported, 1=Supported */
++ __u32 bass_boost:1; /* 0=Not Supported, 1=Supported */
++ __u32 stereo_widening:1;/* 0=Not Supported, 1=Supported */
++ __u32 rsvd1:21;
++ __u32 rsvd2;
++};
++
++/* Firmware capabilities info */
++struct snd_sst_fw_info {
++ struct snd_sst_fw_version fw_version; /* Firmware version */
++ __u8 audio_codecs_supported[8]; /* Codecs supported by FW */
++ __u32 recommend_min_duration; /* Min duration for Lowpower Playback */
++ __u8 max_pcm_streams_supported; /* Max num of PCM streams supported */
++ __u8 max_enc_streams_supported; /* Max number of Encoded streams */
++ __u16 reserved; /* 32 bit alignment*/
++ struct snd_sst_ppp_info ppp_info; /* pre_processing mod cap info */
++ struct snd_sst_postproc_info pop_info; /* Post processing cap info*/
++ struct snd_sst_port_info port_info[3]; /* Port info */
++ struct snd_sst_mix_info mix_info;/* Mixer info */
++ __u32 min_input_buf; /* minmum i/p buffer for decode */
++};
++
++/* Codec params struture */
++union snd_sst_codec_params {
++ struct snd_pcm_params pcm_params;
++ struct snd_mp3_params mp3_params;
++ struct snd_aac_params aac_params;
++ struct snd_wma_params wma_params;
++};
++
++
++struct snd_sst_stream_params {
++ union snd_sst_codec_params uc;
++} __attribute__ ((packed));
++
++struct snd_sst_params {
++ __u32 result;
++ __u32 stream_id;
++ __u8 codec;
++ __u8 ops;
++ __u8 stream_type;
++ __u8 device_type;
++ struct snd_sst_stream_params sparams;
++};
++
++struct snd_sst_vol {
++ __u32 stream_id;
++ __s32 volume;
++ __u32 ramp_duration;
++ __u32 ramp_type; /* Ramp type, default=0 */
++};
++
++struct snd_sst_mute {
++ __u32 stream_id;
++ __u32 mute;
++};
++
++/* ioctl related stuff here */
++struct snd_sst_pmic_config {
++ __u32 sfreq; /* Sampling rate in Hz */
++ __u16 num_chan; /* Mono =1 or Stereo =2 */
++ __u16 pcm_wd_sz; /* Number of bits per sample */
++} __attribute__ ((packed));
++
++struct snd_sst_get_stream_params {
++ struct snd_sst_params codec_params;
++ struct snd_sst_pmic_config pcm_params;
++};
++
++enum snd_sst_target_type {
++ SND_SST_TARGET_PMIC = 1,
++ SND_SST_TARGET_LPE,
++ SND_SST_TARGET_MODEM,
++ SND_SST_TARGET_BT,
++ SND_SST_TARGET_FM,
++ SND_SST_TARGET_NONE,
++};
++
++enum snd_sst_device_type {
++ SND_SST_DEVICE_SSP = 1,
++ SND_SST_DEVICE_PCM,
++ SND_SST_DEVICE_OTHER,
++};
++
++enum snd_sst_device_mode {
++
++ SND_SST_DEV_MODE_PCM_MODE1 = 1, /*(16-bit word, bit-length frame sync)*/
++ SND_SST_DEV_MODE_PCM_MODE2,
++ SND_SST_DEV_MODE_PCM_MODE3,
++ SND_SST_DEV_MODE_PCM_MODE4_RIGHT_JUSTIFIED,
++ SND_SST_DEV_MODE_PCM_MODE4_LEFT_JUSTIFIED,
++ SND_SST_DEV_MODE_PCM_MODE4_I2S, /*(I2S mode, 16-bit words)*/
++ SND_SST_DEV_MODE_PCM_MODE5,
++ SND_SST_DEV_MODE_PCM_MODE6,
++};
++
++enum snd_sst_port_action {
++ SND_SST_PORT_PREPARE = 1,
++ SND_SST_PORT_ACTIVATE,
++};
++
++/* Target selection per device structure */
++struct snd_sst_slot_info {
++ __u8 mix_enable; /* Mixer enable or disable */
++ __u8 device_type;
++ __u8 device_instance; /* 0, 1, 2 */
++ __u8 target_device;
++ __u16 target_sink;
++ __u8 slot[2];
++ __u8 master;
++ __u8 action;
++ __u8 device_mode;
++ __u8 reserved;
++ struct snd_sst_pmic_config pcm_params;
++} __attribute__ ((packed));
++
++#define SST_MAX_TARGET_DEVICES 3
++/* Target device list structure */
++struct snd_sst_target_device {
++ __u32 device_route;
++ struct snd_sst_slot_info devices[SST_MAX_TARGET_DEVICES];
++} __attribute__ ((packed));
++
++struct snd_sst_driver_info {
++ __u32 version; /* Version of the driver */
++ __u32 active_pcm_streams;
++ __u32 active_enc_streams;
++ __u32 max_pcm_streams;
++ __u32 max_enc_streams;
++ __u32 buf_per_stream;
++};
++
++enum snd_sst_buff_type {
++ SST_BUF_USER = 1,
++ SST_BUF_MMAP,
++ SST_BUF_RAR,
++};
++
++struct snd_sst_mmap_buff_entry {
++ unsigned int offset;
++ unsigned int size;
++};
++
++struct snd_sst_mmap_buffs {
++ unsigned int entries;
++ enum snd_sst_buff_type type;
++ struct snd_sst_mmap_buff_entry *buff;
++};
++
++struct snd_sst_buff_entry {
++ void *buffer;
++ unsigned int size;
++};
++
++struct snd_sst_buffs {
++ unsigned int entries;
++ __u8 type;
++ struct snd_sst_buff_entry *buff_entry;
++};
++
++struct snd_sst_dbufs {
++ unsigned long long input_bytes_consumed;
++ unsigned long long output_bytes_produced;
++ struct snd_sst_buffs *ibufs;
++ struct snd_sst_buffs *obufs;
++};
++
++/*IOCTL defined here */
++/*SST MMF IOCTLS only */
++#define SNDRV_SST_STREAM_SET_PARAMS _IOR('L', 0x00, \
++ struct snd_sst_stream_params *)
++#define SNDRV_SST_STREAM_GET_PARAMS _IOWR('L', 0x01, \
++ struct snd_sst_get_stream_params *)
++#define SNDRV_SST_STREAM_GET_TSTAMP _IOWR('L', 0x02, __u64 *)
++#define SNDRV_SST_STREAM_DECODE _IOWR('L', 0x03, struct snd_sst_dbufs *)
++#define SNDRV_SST_STREAM_BYTES_DECODED _IOWR('L', 0x04, __u64 *)
++#define SNDRV_SST_STREAM_START _IO('A', 0x42)
++#define SNDRV_SST_STREAM_DROP _IO('A', 0x43)
++#define SNDRV_SST_STREAM_DRAIN _IO('A', 0x44)
++#define SNDRV_SST_STREAM_PAUSE _IOW('A', 0x45, int)
++#define SNDRV_SST_STREAM_RESUME _IO('A', 0x47)
++#define SNDRV_SST_MMAP_PLAY _IOW('L', 0x05, struct snd_sst_mmap_buffs *)
++#define SNDRV_SST_MMAP_CAPTURE _IOW('L', 0x06, struct snd_sst_mmap_buffs *)
++/*SST common ioctls */
++#define SNDRV_SST_DRIVER_INFO _IOR('L', 0x10, struct snd_sst_driver_info *)
++#define SNDRV_SST_SET_VOL _IOW('L', 0x11, struct snd_sst_vol *)
++#define SNDRV_SST_GET_VOL _IOW('L', 0x12, struct snd_sst_vol *)
++#define SNDRV_SST_MUTE _IOW('L', 0x13, struct snd_sst_mute *)
++/*AM Ioctly only */
++#define SNDRV_SST_FW_INFO _IOR('L', 0x20, struct snd_sst_fw_info *)
++#define SNDRV_SST_SET_TARGET_DEVICE _IOW('L', 0x21, \
++ struct snd_sst_target_device *)
++
++#endif /* __INTEL_SST_IOCTL_H__ */
+--- a/include/sound/jack.h
++++ b/include/sound/jack.h
+@@ -42,6 +42,8 @@
+ SND_JACK_MECHANICAL = 0x0008, /* If detected separately */
+ SND_JACK_VIDEOOUT = 0x0010,
+ SND_JACK_AVOUT = SND_JACK_LINEOUT | SND_JACK_VIDEOOUT,
++ SND_JACK_HS_SHORT_PRESS = SND_JACK_HEADSET | 0x0020,
++ SND_JACK_HS_LONG_PRESS = SND_JACK_HEADSET | 0x0040,
+
+ /* Kept separate from switches to facilitate implementation */
+ SND_JACK_BTN_0 = 0x4000,
+--- a/sound/pci/Kconfig
++++ b/sound/pci/Kconfig
+@@ -9,6 +9,25 @@
+
+ if SND_PCI
+
++config SND_INTEL_SST
++ tristate "Intel SST (LPE) Driver"
++ depends on X86 && INTEL_SCU_IPC
++ default n
++ help
++ Say Y here to include support for the Intel(R) MID SST DSP driver
++ On other PC platforms if you are unsure answer 'N'
++
++config SND_INTELMID
++ tristate "Intel MID sound card driver"
++ select SND_PCM
++ select SND_SEQUENCER
++ select SND_JACK
++ depends on SND_INTEL_SST
++ default n
++ help
++ Say Y here to include support for the Intel(R) MID sound card driver
++ On other PC platforms if you are unsure answer 'N'
++
+ config SND_AD1889
+ tristate "Analog Devices AD1889"
+ select SND_AC97_CODEC
+--- a/sound/pci/Makefile
++++ b/sound/pci/Makefile
+@@ -78,4 +78,5 @@
+ rme9652/ \
+ trident/ \
+ ymfpci/ \
+- vx222/
++ vx222/ \
++ sst/
+--- /dev/null
++++ b/sound/pci/sst/Makefile
+@@ -0,0 +1,8 @@
++#
++# Makefile for Intel MID Audio drivers
++#
++EXTRA_CFLAGS=-g -DDEBUG
++snd-intel-sst-objs := intel_sst.o intel_sst_ipc.o intel_sst_stream.o intel_sst_drv_interface.o intel_sst_dsp.o intel_sst_pvt.o intel_sst_stream_encoded.o intel_sst_app_interface.o
++snd-intelmid-objs := intelmid.o intelmid_msic_control.o intelmid_ctrl.o intelmid_pvt.o intelmid_v0_control.o intelmid_v1_control.o intelmid_v2_control.o
++obj-$(CONFIG_SND_INTEL_SST) += snd-intel-sst.o
++obj-$(CONFIG_SND_INTELMID) += snd-intelmid.o
+--- /dev/null
++++ b/sound/pci/sst/intel_sst.c
+@@ -0,0 +1,512 @@
++/*
++ * intel_sst.c - Intel SST Driver for audio engine
++ *
++ * Copyright (C) 2008-10 Intel Corp
++ * Authors: Vinod Koul <vinod.koul@intel.com>
++ * Harsha Priya <priya.harsha@intel.com>
++ * Dharageswari R <dharageswari.r@intel.com>
++ * KP Jeeja <jeeja.kp@intel.com>
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This driver exposes the audio engine functionalities to the ALSA
++ * and middleware.
++ *
++ * This file contains all init functions
++ */
++
++#include <linux/pci.h>
++#include <linux/fs.h>
++#include <linux/interrupt.h>
++#include <linux/firmware.h>
++#include <linux/miscdevice.h>
++#include <asm/mrst.h>
++#include <sound/intel_sst.h>
++#include <sound/intel_sst_ioctl.h>
++#include "intel_sst_fw_ipc.h"
++#include "intel_sst_common.h"
++
++
++MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
++MODULE_AUTHOR("Harsha Priya <priya.harsha@intel.com>");
++MODULE_AUTHOR("Dharageswari R <dharageswari.r@intel.com>");
++MODULE_AUTHOR("KP Jeeja <jeeja.kp@intel.com>");
++MODULE_DESCRIPTION("Intel (R) SST(R) Audio Engine Driver");
++MODULE_LICENSE("GPL v2");
++MODULE_VERSION(SST_DRIVER_VERSION);
++
++struct intel_sst_drv *sst_drv_ctx;
++static struct mutex drv_ctx_lock;
++struct class *sst_class;
++
++/* fops Routines */
++static const struct file_operations intel_sst_fops = {
++ .owner = THIS_MODULE,
++ .open = intel_sst_open,
++ .release = intel_sst_release,
++ .read = intel_sst_read,
++ .write = intel_sst_write,
++ .ioctl = intel_sst_ioctl,
++ .mmap = intel_sst_mmap,
++ .aio_read = intel_sst_aio_read,
++ .aio_write = intel_sst_aio_write,
++};
++static const struct file_operations intel_sst_fops_cntrl = {
++ .owner = THIS_MODULE,
++ .open = intel_sst_open_cntrl,
++ .release = intel_sst_release_cntrl,
++ .ioctl = intel_sst_ioctl,
++};
++
++static struct miscdevice lpe_dev = {
++ .minor = MISC_DYNAMIC_MINOR,/* dynamic allocation */
++ .name = "intel_sst",/* /dev/intel_sst */
++ .fops = &intel_sst_fops
++};
++
++
++static struct miscdevice lpe_ctrl = {
++ .minor = MISC_DYNAMIC_MINOR,/* dynamic allocation */
++ .name = "intel_sst_ctrl",/* /dev/intel_sst_ctrl */
++ .fops = &intel_sst_fops_cntrl
++};
++
++/**
++* intel_sst_interrupt - Interrupt service routine for SST
++*
++* @irq: irq number of interrupt
++* @context: pointer to device structre
++*
++* This function is called by OS when SST device raises
++* an interrupt. This will be result of write in IPC register
++* Source can be busy or done interrupt
++*/
++static irqreturn_t intel_sst_interrupt(int irq, void *context)
++{
++ union interrupt_reg isr;
++ union ipc_header header;
++ union interrupt_reg imr;
++ struct intel_sst_drv *drv = (struct intel_sst_drv *) context;
++ unsigned int size = 0, str_id;
++ struct stream_info *stream ;
++
++ /* Interrupt arrived, check src */
++ isr.full = sst_shim_read(drv->shim, SST_ISRX);
++
++ if (isr.part.busy_interrupt) {
++ header.full = sst_shim_read(drv->shim, SST_IPCD);
++ if (header.part.msg_id == IPC_SST_PERIOD_ELAPSED) {
++ sst_clear_interrupt();
++ str_id = header.part.str_id;
++ stream = &sst_drv_ctx->streams[str_id];
++ if (stream->period_elapsed)
++ stream->period_elapsed(stream->pcm_substream);
++ return IRQ_HANDLED;
++ }
++ if (header.part.large)
++ size = header.part.data;
++ if (header.part.msg_id & REPLY_MSG) {
++ sst_drv_ctx->ipc_process_msg.header = header;
++ memcpy_fromio(sst_drv_ctx->ipc_process_msg.mailbox,
++ drv->mailbox + SST_MAILBOX_RCV, size);
++ queue_work(sst_drv_ctx->process_msg_wq,
++ &sst_drv_ctx->ipc_process_msg.wq);
++ } else {
++ sst_drv_ctx->ipc_process_reply.header = header;
++ memcpy_fromio(sst_drv_ctx->ipc_process_reply.mailbox,
++ drv->mailbox + SST_MAILBOX_RCV, size);
++ queue_work(sst_drv_ctx->process_reply_wq,
++ &sst_drv_ctx->ipc_process_reply.wq);
++ }
++ /* mask busy inetrrupt */
++ imr.full = sst_shim_read(drv->shim, SST_IMRX);
++ imr.part.busy_interrupt = 1;
++ sst_shim_write(sst_drv_ctx->shim, SST_IMRX, imr.full);
++ return IRQ_HANDLED;
++ } else if (isr.part.done_interrupt) {
++ /* Clear done bit */
++ header.full = sst_shim_read(drv->shim, SST_IPCX);
++ header.part.done = 0;
++ sst_shim_write(sst_drv_ctx->shim, SST_IPCX, header.full);
++ /* write 1 to clear status register */;
++ isr.part.done_interrupt = 1;
++ /* dummy register for shim workaround */
++ sst_shim_write(sst_drv_ctx->shim, SST_ISRX, isr.full);
++ queue_work(sst_drv_ctx->post_msg_wq,
++ &sst_drv_ctx->ipc_post_msg.wq);
++ return IRQ_HANDLED;
++ } else
++ return IRQ_NONE;
++
++}
++
++
++/*
++* intel_sst_probe - PCI probe function
++*
++* @pci: PCI device structure
++* @pci_id: PCI device ID structure
++*
++* This function is called by OS when a device is found
++* This enables the device, interrupt etc
++*/
++static int __devinit intel_sst_probe(struct pci_dev *pci,
++ const struct pci_device_id *pci_id)
++{
++ int i, ret = 0;
++
++ pr_debug("sst: Probe for DID %x\n", pci->device);
++ mutex_lock(&drv_ctx_lock);
++ if (sst_drv_ctx) {
++ pr_err("sst: Only one sst handle is supported\n");
++ mutex_unlock(&drv_ctx_lock);
++ return -EBUSY;
++ }
++
++ sst_drv_ctx = kzalloc(sizeof(*sst_drv_ctx), GFP_KERNEL);
++ if (!sst_drv_ctx) {
++ pr_err("sst: intel_sst malloc fail\n");
++ mutex_unlock(&drv_ctx_lock);
++ return -ENOMEM;
++ }
++ mutex_unlock(&drv_ctx_lock);
++
++ sst_drv_ctx->pci_id = pci->device;
++
++ mutex_init(&sst_drv_ctx->stream_lock);
++ mutex_init(&sst_drv_ctx->sst_lock);
++ sst_drv_ctx->pmic_state = SND_MAD_UN_INIT;
++
++ sst_drv_ctx->stream_cnt = 0;
++ sst_drv_ctx->encoded_cnt = 0;
++ sst_drv_ctx->am_cnt = 0;
++ sst_drv_ctx->pb_streams = 0;
++ sst_drv_ctx->cp_streams = 0;
++ sst_drv_ctx->unique_id = 0;
++ sst_drv_ctx->pmic_port_instance = SST_DEFAULT_PMIC_PORT;
++
++ INIT_LIST_HEAD(&sst_drv_ctx->ipc_dispatch_list);
++ INIT_WORK(&sst_drv_ctx->ipc_post_msg.wq, sst_post_message);
++ INIT_WORK(&sst_drv_ctx->ipc_process_msg.wq, sst_process_message);
++ INIT_WORK(&sst_drv_ctx->ipc_process_reply.wq, sst_process_reply);
++ INIT_WORK(&sst_drv_ctx->mad_ops.wq, sst_process_mad_ops);
++ init_waitqueue_head(&sst_drv_ctx->wait_queue);
++
++ sst_drv_ctx->mad_wq = create_workqueue("sst_mad_wq");
++ if (!sst_drv_ctx->mad_wq)
++ goto do_free_drv_ctx;
++ sst_drv_ctx->post_msg_wq = create_workqueue("sst_post_msg_wq");
++ if (!sst_drv_ctx->post_msg_wq)
++ goto free_mad_wq;
++ sst_drv_ctx->process_msg_wq = create_workqueue("sst_process_msg_wqq");
++ if (!sst_drv_ctx->process_msg_wq)
++ goto free_post_msg_wq;
++ sst_drv_ctx->process_reply_wq = create_workqueue("sst_proces_reply_wq");
++ if (!sst_drv_ctx->process_reply_wq)
++ goto free_process_msg_wq;
++
++ for (i = 0; i < MAX_ACTIVE_STREAM; i++) {
++ sst_drv_ctx->alloc_block[i].sst_id = BLOCK_UNINIT;
++ sst_drv_ctx->alloc_block[i].ops_block.condition = false;
++ }
++ spin_lock_init(&sst_drv_ctx->list_spin_lock);
++
++ sst_drv_ctx->max_streams = pci_id->driver_data;
++ pr_debug("sst: Got drv data max stream %d\n",
++ sst_drv_ctx->max_streams);
++ for (i = 1; i <= sst_drv_ctx->max_streams; i++) {
++ struct stream_info *stream = &sst_drv_ctx->streams[i];
++ INIT_LIST_HEAD(&stream->bufs);
++ mutex_init(&stream->lock);
++ spin_lock_init(&stream->pcm_lock);
++ }
++ if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID) {
++ sst_drv_ctx->mmap_mem = NULL;
++ sst_drv_ctx->mmap_len = SST_MMAP_PAGES * PAGE_SIZE;
++ while (sst_drv_ctx->mmap_len > 0) {
++ sst_drv_ctx->mmap_mem =
++ kzalloc(sst_drv_ctx->mmap_len, GFP_KERNEL);
++ if (sst_drv_ctx->mmap_mem) {
++ pr_debug("sst: Got memory %p size 0x%x\n",
++ sst_drv_ctx->mmap_mem,
++ sst_drv_ctx->mmap_len);
++ break;
++ }
++ if (sst_drv_ctx->mmap_len < (SST_MMAP_STEP*PAGE_SIZE)) {
++ pr_err("sst: mem alloc fail...abort!!\n");
++ ret = -ENOMEM;
++ goto free_process_reply_wq;
++ }
++ sst_drv_ctx->mmap_len -= (SST_MMAP_STEP * PAGE_SIZE);
++ pr_debug("sst:mem alloc failed...trying %d\n",
++ sst_drv_ctx->mmap_len);
++ }
++ }
++
++ /* Init the device */
++ ret = pci_enable_device(pci);
++ if (ret) {
++ pr_err("sst: device cant be enabled\n");
++ goto do_free_mem;
++ }
++ sst_drv_ctx->pci = pci_dev_get(pci);
++ ret = pci_request_regions(pci, SST_DRV_NAME);
++ if (ret)
++ goto do_disable_device;
++ /* map registers */
++ /* SST Shim */
++ sst_drv_ctx->shim_phy_add = pci_resource_start(pci, 1);
++ sst_drv_ctx->shim = pci_ioremap_bar(pci, 1);
++ if (!sst_drv_ctx->shim)
++ goto do_release_regions;
++ pr_debug("sst: SST Shim Ptr %p\n", sst_drv_ctx->shim);
++
++ /* Shared SRAM */
++ sst_drv_ctx->mailbox = pci_ioremap_bar(pci, 2);
++ if (!sst_drv_ctx->mailbox)
++ goto do_unmap_shim;
++ pr_debug("sst: SRAM Ptr %p\n", sst_drv_ctx->mailbox);
++
++ /* IRAM */
++ sst_drv_ctx->iram = pci_ioremap_bar(pci, 3);
++ if (!sst_drv_ctx->iram)
++ goto do_unmap_sram;
++ pr_debug("sst:IRAM Ptr %p\n", sst_drv_ctx->iram);
++
++ /* DRAM */
++ sst_drv_ctx->dram = pci_ioremap_bar(pci, 4);
++ if (!sst_drv_ctx->dram)
++ goto do_unmap_iram;
++ pr_debug("sst: DRAM Ptr %p\n", sst_drv_ctx->dram);
++
++ mutex_lock(&sst_drv_ctx->sst_lock);
++ sst_drv_ctx->sst_state = SST_UN_INIT;
++ mutex_unlock(&sst_drv_ctx->sst_lock);
++ /* Register the ISR */
++ ret = request_irq(pci->irq, intel_sst_interrupt,
++ IRQF_SHARED, SST_DRV_NAME, sst_drv_ctx);
++ if (ret)
++ goto do_unmap_dram;
++ pr_debug("sst: Registered IRQ 0x%x\n", pci->irq);
++
++ if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID) {
++ ret = misc_register(&lpe_dev);
++ if (ret) {
++ pr_err("sst: couldn't register LPE device\n");
++ goto do_free_irq;
++ }
++
++ /*Register LPE Control as misc driver*/
++ ret = misc_register(&lpe_ctrl);
++ if (ret) {
++ pr_err("sst: couldn't register misc driver\n");
++ goto do_free_irq;
++ }
++ }
++ sst_drv_ctx->lpe_stalled = 0;
++ pr_debug("sst: ...successfully done!!!\n");
++ return ret;
++
++do_free_irq:
++ free_irq(pci->irq, sst_drv_ctx);
++do_unmap_dram:
++ iounmap(sst_drv_ctx->dram);
++do_unmap_iram:
++ iounmap(sst_drv_ctx->iram);
++do_unmap_sram:
++ iounmap(sst_drv_ctx->mailbox);
++do_unmap_shim:
++ iounmap(sst_drv_ctx->shim);
++do_release_regions:
++ pci_release_regions(pci);
++do_disable_device:
++ pci_disable_device(pci);
++do_free_mem:
++ kfree(sst_drv_ctx->mmap_mem);
++free_process_reply_wq:
++ destroy_workqueue(sst_drv_ctx->process_reply_wq);
++free_process_msg_wq:
++ destroy_workqueue(sst_drv_ctx->process_msg_wq);
++free_post_msg_wq:
++ destroy_workqueue(sst_drv_ctx->post_msg_wq);
++free_mad_wq:
++ destroy_workqueue(sst_drv_ctx->mad_wq);
++do_free_drv_ctx:
++ kfree(sst_drv_ctx);
++ pr_err("sst: Probe failed with 0x%x\n", ret);
++ return ret;
++}
++
++/**
++* intel_sst_remove - PCI remove function
++*
++* @pci: PCI device structure
++*
++* This function is called by OS when a device is unloaded
++* This frees the interrupt etc
++*/
++static void __devexit intel_sst_remove(struct pci_dev *pci)
++{
++ pci_dev_put(sst_drv_ctx->pci);
++ mutex_lock(&sst_drv_ctx->sst_lock);
++ sst_drv_ctx->sst_state = SST_UN_INIT;
++ mutex_unlock(&sst_drv_ctx->sst_lock);
++ if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID) {
++ misc_deregister(&lpe_dev);
++ misc_deregister(&lpe_ctrl);
++ }
++ free_irq(pci->irq, sst_drv_ctx);
++ iounmap(sst_drv_ctx->dram);
++ iounmap(sst_drv_ctx->iram);
++ iounmap(sst_drv_ctx->mailbox);
++ iounmap(sst_drv_ctx->shim);
++ sst_drv_ctx->pmic_state = SND_MAD_UN_INIT;
++ if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID)
++ kfree(sst_drv_ctx->mmap_mem);
++ flush_scheduled_work();
++ destroy_workqueue(sst_drv_ctx->process_reply_wq);
++ destroy_workqueue(sst_drv_ctx->process_msg_wq);
++ destroy_workqueue(sst_drv_ctx->post_msg_wq);
++ destroy_workqueue(sst_drv_ctx->mad_wq);
++ kfree(sst_drv_ctx);
++ pci_release_region(pci, 1);
++ pci_release_region(pci, 2);
++ pci_release_region(pci, 3);
++ pci_release_region(pci, 4);
++ pci_release_region(pci, 5);
++ pci_set_drvdata(pci, NULL);
++}
++
++/* Power Management */
++/*
++* intel_sst_suspend - PCI suspend function
++*
++* @pci: PCI device structure
++* @state: PM message
++*
++* This function is called by OS when a power event occurs
++*/
++int intel_sst_suspend(struct pci_dev *pci, pm_message_t state)
++{
++ union config_status_reg csr;
++
++ pr_debug("sst: intel_sst_suspend called\n");
++
++ if (sst_drv_ctx->pb_streams != 0 || sst_drv_ctx->cp_streams != 0)
++ return -EPERM;
++ /*Assert RESET on LPE Processor*/
++ csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
++ csr.full = csr.full | 0x2;
++ /* Move the SST state to Suspended */
++ mutex_lock(&sst_drv_ctx->sst_lock);
++ sst_drv_ctx->sst_state = SST_SUSPENDED;
++ sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
++ mutex_unlock(&sst_drv_ctx->sst_lock);
++ pci_set_drvdata(pci, sst_drv_ctx);
++ pci_save_state(pci);
++ pci_disable_device(pci);
++ pci_set_power_state(pci, PCI_D3hot);
++ return 0;
++}
++
++/**
++* intel_sst_resume - PCI resume function
++*
++* @pci: PCI device structure
++*
++* This function is called by OS when a power event occurs
++*/
++int intel_sst_resume(struct pci_dev *pci)
++{
++ int ret = 0;
++
++ pr_debug("sst: intel_sst_resume called\n");
++ if (sst_drv_ctx->sst_state != SST_SUSPENDED) {
++ pr_err("sst: SST is not in suspended state\n");
++ return -EPERM;
++ }
++ sst_drv_ctx = pci_get_drvdata(pci);
++ pci_set_power_state(pci, PCI_D0);
++ pci_restore_state(pci);
++ ret = pci_enable_device(pci);
++ if (ret)
++ pr_err("sst: device cant be enabled\n");
++
++ mutex_lock(&sst_drv_ctx->sst_lock);
++ sst_drv_ctx->sst_state = SST_UN_INIT;
++ mutex_unlock(&sst_drv_ctx->sst_lock);
++ return 0;
++}
++
++/* PCI Routines */
++static struct pci_device_id intel_sst_ids[] = {
++ { PCI_VDEVICE(INTEL, SST_MRST_PCI_ID), 3},
++ { PCI_VDEVICE(INTEL, SST_MFLD_PCI_ID), 6},
++ { 0, }
++};
++MODULE_DEVICE_TABLE(pci, intel_sst_ids);
++
++static struct pci_driver driver = {
++ .name = SST_DRV_NAME,
++ .id_table = intel_sst_ids,
++ .probe = intel_sst_probe,
++ .remove = __devexit_p(intel_sst_remove),
++#ifdef CONFIG_PM
++ .suspend = intel_sst_suspend,
++ .resume = intel_sst_resume,
++#endif
++};
++
++/**
++* intel_sst_init - Module init function
++*
++* Registers with PCI
++* Registers with /dev
++* Init all data strutures
++*/
++static int __init intel_sst_init(void)
++{
++ /* Init all variables, data structure etc....*/
++ int ret = 0;
++ pr_debug("sst: INFO: ******** SST DRIVER loading.. Ver: %s\n",
++ SST_DRIVER_VERSION);
++
++ mutex_init(&drv_ctx_lock);
++ /* Register with PCI */
++ ret = pci_register_driver(&driver);
++ if (ret)
++ pr_err("sst: PCI register failed\n");
++ return ret;
++}
++
++/**
++* intel_sst_exit - Module exit function
++*
++* Unregisters with PCI
++* Unregisters with /dev
++* Frees all data strutures
++*/
++static void __exit intel_sst_exit(void)
++{
++ pci_unregister_driver(&driver);
++
++ pr_debug("sst: driver unloaded\n");
++ return;
++}
++
++module_init(intel_sst_init);
++module_exit(intel_sst_exit);
+--- /dev/null
++++ b/sound/pci/sst/intel_sst_app_interface.c
+@@ -0,0 +1,1232 @@
++/*
++ * intel_sst_interface.c - Intel SST Driver for audio engine
++ *
++ * Copyright (C) 2008-10 Intel Corp
++ * Authors: Vinod Koul <vinod.koul@intel.com>
++ * Harsha Priya <priya.harsha@intel.com>
++ * Dharageswari R <dharageswari.r@intel.com>
++ * Jeeja KP <jeeja.kp@intel.com>
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ * This driver exposes the audio engine functionalities to the ALSA
++ * and middleware.
++ * Upper layer interfaces (MAD driver, MMF) to SST driver
++ */
++
++#include <linux/pci.h>
++#include <linux/fs.h>
++#include <linux/uio.h>
++#include <linux/aio.h>
++#include <linux/uaccess.h>
++#include <linux/firmware.h>
++#include <linux/ioctl.h>
++#ifdef CONFIG_MRST_RAR_HANDLER
++#include "../../../drivers/staging/rar_register/rar_register.h"
++#include "../../../drivers/staging/memrar/memrar.h"
++#endif
++#include <sound/intel_sst.h>
++#include <sound/intel_sst_ioctl.h>
++#include "intel_sst_fw_ipc.h"
++#include "intel_sst_common.h"
++
++#define AM_MODULE 1
++#define STREAM_MODULE 0
++
++
++/**
++* intel_sst_check_device - checks SST device
++*
++* This utility function checks the state of SST device and downlaods FW if
++* not done, or resumes the device if suspended
++*/
++
++static int intel_sst_check_device(void)
++{
++ int retval = 0;
++ if (sst_drv_ctx->pmic_state != SND_MAD_INIT_DONE) {
++ pr_warn("sst: Sound card not availble\n ");
++ return -EIO;
++ }
++ if (sst_drv_ctx->sst_state == SST_SUSPENDED) {
++ pr_debug("sst: Resuming from Suspended state\n");
++ retval = intel_sst_resume(sst_drv_ctx->pci);
++ if (retval) {
++ pr_debug("sst: Resume Failed= %#x,abort\n", retval);
++ return retval;
++ }
++ }
++
++ if (sst_drv_ctx->sst_state == SST_UN_INIT) {
++ /* FW is not downloaded */
++ retval = sst_download_fw();
++ if (retval)
++ return -ENODEV;
++ if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID) {
++ retval = sst_drv_ctx->rx_time_slot_status;
++ if (retval != RX_TIMESLOT_UNINIT
++ && sst_drv_ctx->pmic_vendor != SND_NC)
++ sst_enable_rx_timeslot(retval);
++ }
++ }
++ return 0;
++}
++
++/**
++ * intel_sst_open - opens a handle to driver
++ *
++ * @i_node: inode structure
++ * @file_ptr:pointer to file
++ *
++ * This function is called by OS when a user space component
++ * tries to get a driver handle. Only one handle at a time
++ * will be allowed
++ */
++int intel_sst_open(struct inode *i_node, struct file *file_ptr)
++{
++ unsigned int retval = intel_sst_check_device();
++ if (retval)
++ return retval;
++
++ mutex_lock(&sst_drv_ctx->stream_lock);
++ if (sst_drv_ctx->encoded_cnt < MAX_ENC_STREAM) {
++ struct ioctl_pvt_data *data =
++ kzalloc(sizeof(struct ioctl_pvt_data), GFP_KERNEL);
++ if (!data) {
++ mutex_unlock(&sst_drv_ctx->stream_lock);
++ return -ENOMEM;
++ }
++
++ sst_drv_ctx->encoded_cnt++;
++ mutex_unlock(&sst_drv_ctx->stream_lock);
++ data->pvt_id = sst_assign_pvt_id(sst_drv_ctx);
++ data->str_id = 0;
++ file_ptr->private_data = (void *)data;
++ pr_debug("sst: pvt_id handle = %d!\n", data->pvt_id);
++ } else {
++ retval = -EUSERS;
++ mutex_unlock(&sst_drv_ctx->stream_lock);
++ }
++ return retval;
++}
++
++/**
++ * intel_sst_open_cntrl - opens a handle to driver
++ *
++ * @i_node: inode structure
++ * @file_ptr:pointer to file
++ *
++ * This function is called by OS when a user space component
++ * tries to get a driver handle to /dev/intel_sst_control.
++ * Only one handle at a time will be allowed
++ * This is for control operations only
++ */
++int intel_sst_open_cntrl(struct inode *i_node, struct file *file_ptr)
++{
++ unsigned int retval = intel_sst_check_device();
++ if (retval)
++ return retval;
++
++ /* audio manager open */
++ mutex_lock(&sst_drv_ctx->stream_lock);
++ if (sst_drv_ctx->am_cnt < MAX_AM_HANDLES) {
++ sst_drv_ctx->am_cnt++;
++ pr_debug("sst: AM handle opened...\n");
++ file_ptr->private_data = NULL;
++ } else
++ retval = -EACCES;
++
++ mutex_unlock(&sst_drv_ctx->stream_lock);
++ return retval;
++}
++
++/**
++ * intel_sst_release - releases a handle to driver
++ *
++ * @i_node: inode structure
++ * @file_ptr: pointer to file
++ *
++ * This function is called by OS when a user space component
++ * tries to release a driver handle.
++ */
++int intel_sst_release(struct inode *i_node, struct file *file_ptr)
++{
++ struct ioctl_pvt_data *data = file_ptr->private_data;
++
++ pr_debug("sst: Release called, closing app handle\n");
++ mutex_lock(&sst_drv_ctx->stream_lock);
++ sst_drv_ctx->encoded_cnt--;
++ sst_drv_ctx->stream_cnt--;
++ mutex_unlock(&sst_drv_ctx->stream_lock);
++ free_stream_context(data->str_id);
++ kfree(data);
++ return 0;
++}
++
++int intel_sst_release_cntrl(struct inode *i_node, struct file *file_ptr)
++{
++ /* audio manager close */
++ mutex_lock(&sst_drv_ctx->stream_lock);
++ sst_drv_ctx->am_cnt--;
++ mutex_unlock(&sst_drv_ctx->stream_lock);
++ pr_debug("sst: AM handle closed\n");
++ return 0;
++}
++
++/**
++* intel_sst_mmap - mmaps a kernel buffer to user space for copying data
++*
++* @vma: vm area structure instance
++* @file_ptr: pointer to file
++*
++* This function is called by OS when a user space component
++* tries to get mmap memory from driver
++*/
++int intel_sst_mmap(struct file *file_ptr, struct vm_area_struct *vma)
++{
++ int retval, length;
++ struct ioctl_pvt_data *data =
++ (struct ioctl_pvt_data *)file_ptr->private_data;
++ int str_id = data->str_id;
++ void *mem_area;
++
++ retval = sst_validate_strid(str_id);
++ if (retval)
++ return -EINVAL;
++
++ length = vma->vm_end - vma->vm_start;
++ pr_debug("sst: called for stream %d length 0x%x\n", str_id, length);
++
++ if (length > sst_drv_ctx->mmap_len)
++ return -ENOMEM;
++ if (!sst_drv_ctx->mmap_mem)
++ return -EIO;
++
++ /* round it up to the page bondary */
++ /*mem_area = (void *)((((unsigned long)sst_drv_ctx->mmap_mem)
++ + PAGE_SIZE - 1) & PAGE_MASK);*/
++ mem_area = (void *) PAGE_ALIGN((unsigned int) sst_drv_ctx->mmap_mem);
++
++ /* map the whole physically contiguous area in one piece */
++ retval = remap_pfn_range(vma,
++ vma->vm_start,
++ virt_to_phys((void *)mem_area) >> PAGE_SHIFT,
++ length,
++ vma->vm_page_prot);
++ if (retval)
++ sst_drv_ctx->streams[str_id].mmapped = false;
++ else
++ sst_drv_ctx->streams[str_id].mmapped = true;
++
++ pr_debug("sst: mmap ret 0x%x\n", retval);
++ return retval;
++}
++
++/* sets mmap data buffers to play/capture*/
++static int intel_sst_mmap_play_capture(u32 str_id,
++ struct snd_sst_mmap_buffs *mmap_buf)
++{
++ struct sst_stream_bufs *bufs;
++ int retval, i;
++ struct stream_info *stream;
++ struct snd_sst_mmap_buff_entry *buf_entry;
++
++ pr_debug("sst:called for str_id %d\n", str_id);
++ retval = sst_validate_strid(str_id);
++ if (retval)
++ return -EINVAL;
++ BUG_ON(!mmap_buf);
++
++ stream = &sst_drv_ctx->streams[str_id];
++ if (stream->mmapped != true)
++ return -EIO;
++
++ if (stream->status == STREAM_UN_INIT ||
++ stream->status == STREAM_DECODE) {
++ return -EBADRQC;
++ }
++ stream->curr_bytes = 0;
++ stream->cumm_bytes = 0;
++
++ pr_debug("sst:new buffers count %d status %d\n",
++ mmap_buf->entries, stream->status);
++ buf_entry = mmap_buf->buff;
++ for (i = 0; i < mmap_buf->entries; i++) {
++ BUG_ON(!buf_entry);
++ bufs = kzalloc(sizeof(*bufs), GFP_KERNEL);
++ if (!bufs)
++ return -ENOMEM;
++ bufs->size = buf_entry->size;
++ bufs->offset = buf_entry->offset;
++ bufs->addr = sst_drv_ctx->mmap_mem;
++ bufs->in_use = false;
++ buf_entry++;
++ /* locking here */
++ mutex_lock(&stream->lock);
++ list_add_tail(&bufs->node, &stream->bufs);
++ mutex_unlock(&stream->lock);
++ }
++
++ mutex_lock(&stream->lock);
++ stream->data_blk.condition = false;
++ stream->data_blk.ret_code = 0;
++ if (stream->status == STREAM_INIT &&
++ stream->prev != STREAM_UN_INIT &&
++ stream->need_draining != true) {
++ stream->prev = stream->status;
++ stream->status = STREAM_RUNNING;
++ if (stream->ops == STREAM_OPS_PLAYBACK) {
++ if (sst_play_frame(str_id) < 0) {
++ pr_warn("sst: play frames fail\n");
++ mutex_unlock(&stream->lock);
++ return -EIO;
++ }
++ } else if (stream->ops == STREAM_OPS_CAPTURE) {
++ if (sst_capture_frame(str_id) < 0) {
++ pr_warn("sst: capture frame fail\n");
++ mutex_unlock(&stream->lock);
++ return -EIO;
++ }
++ }
++ }
++ mutex_unlock(&stream->lock);
++ /* Block the call for reply */
++ if (!list_empty(&stream->bufs)) {
++ stream->data_blk.on = true;
++ retval = sst_wait_interruptible(sst_drv_ctx,
++ &stream->data_blk);
++ }
++
++ if (retval >= 0)
++ retval = stream->cumm_bytes;
++ pr_debug("sst:end of play/rec ioctl bytes = %d!!\n", retval);
++ return retval;
++}
++
++/*sets user data buffers to play/capture*/
++static int intel_sst_play_capture(struct stream_info *stream, int str_id)
++{
++ int retval;
++
++ stream->data_blk.ret_code = 0;
++ stream->data_blk.on = true;
++ stream->data_blk.condition = false;
++
++ mutex_lock(&stream->lock);
++ if (stream->status == STREAM_INIT && stream->prev != STREAM_UN_INIT) {
++ /* stream is started */
++ stream->prev = stream->status;
++ stream->status = STREAM_RUNNING;
++ }
++
++ if (stream->status == STREAM_INIT && stream->prev == STREAM_UN_INIT) {
++ /* stream is not started yet */
++ pr_debug("sst: Stream isn't in started state %d, prev %d\n",
++ stream->status, stream->prev);
++ } else if ((stream->status == STREAM_RUNNING ||
++ stream->status == STREAM_PAUSED) &&
++ stream->need_draining != true) {
++ /* stream is started */
++ if (stream->ops == STREAM_OPS_PLAYBACK ||
++ stream->ops == STREAM_OPS_PLAYBACK_DRM) {
++ if (sst_play_frame(str_id) < 0) {
++ pr_warn("sst: play frames failed\n");
++ mutex_unlock(&stream->lock);
++ return -EIO;
++ }
++ } else if (stream->ops == STREAM_OPS_CAPTURE) {
++ if (sst_capture_frame(str_id) < 0) {
++ pr_warn("sst: capture frames failed\n ");
++ mutex_unlock(&stream->lock);
++ return -EIO;
++ }
++ }
++ } else {
++ mutex_unlock(&stream->lock);
++ return -EIO;
++ }
++ mutex_unlock(&stream->lock);
++ /* Block the call for reply */
++
++ retval = sst_wait_interruptible(sst_drv_ctx, &stream->data_blk);
++ if (retval) {
++ stream->status = STREAM_INIT;
++ pr_debug("sst: wait returned error...\n");
++ }
++ return retval;
++}
++
++/* fills kernel list with buffer addresses for SST DSP driver to process*/
++static int snd_sst_fill_kernel_list(struct stream_info *stream,
++ const struct iovec *iovec, unsigned long nr_segs,
++ struct list_head *copy_to_list)
++{
++ struct sst_stream_bufs *stream_bufs;
++ unsigned long index, data_not_copied, mmap_len;
++ unsigned char *bufp;
++ unsigned long size, copied_size;
++ int retval = 0, add_to_list = 0;
++ static int sent_offset;
++ static unsigned long sent_index;
++
++ stream_bufs = kzalloc(sizeof(*stream_bufs), GFP_KERNEL);
++ if (!stream_bufs)
++ return -ENOMEM;
++ stream_bufs->addr = sst_drv_ctx->mmap_mem;
++#ifdef CONFIG_MRST_RAR_HANDLER
++ if (stream->ops == STREAM_OPS_PLAYBACK_DRM) {
++ for (index = stream->sg_index; index < nr_segs; index++) {
++ __u32 rar_handle;
++ struct sst_stream_bufs *stream_bufs =
++ kzalloc(sizeof(*stream_bufs), GFP_KERNEL);
++
++ stream->sg_index = index;
++ if (!stream_bufs)
++ return -ENOMEM;
++ retval = copy_from_user((void *) &rar_handle,
++ iovec[index].iov_base,
++ sizeof(__u32));
++ if (retval != 0)
++ return -EFAULT;
++ stream_bufs->addr = (char *)rar_handle;
++ stream_bufs->in_use = false;
++ stream_bufs->size = iovec[0].iov_len;
++ /* locking here */
++ mutex_lock(&stream->lock);
++ list_add_tail(&stream_bufs->node, &stream->bufs);
++ mutex_unlock(&stream->lock);
++ }
++ stream->sg_index = index;
++ return retval;
++ }
++#endif
++ mmap_len = sst_drv_ctx->mmap_len;
++ stream_bufs->addr = sst_drv_ctx->mmap_mem;
++ bufp = stream->cur_ptr;
++
++ copied_size = 0;
++
++ if (!stream->sg_index)
++ sent_index = sent_offset = 0;
++
++ for (index = stream->sg_index; index < nr_segs; index++) {
++ stream->sg_index = index;
++ if (!stream->cur_ptr)
++ bufp = iovec[index].iov_base;
++
++ size = ((unsigned long)iovec[index].iov_base
++ + iovec[index].iov_len) - (unsigned long) bufp;
++
++ if ((copied_size + size) > mmap_len)
++ size = mmap_len - copied_size;
++
++
++ if (stream->ops == STREAM_OPS_PLAYBACK) {
++ data_not_copied = copy_from_user(
++ (void *)(stream_bufs->addr + copied_size),
++ bufp, size);
++ if (data_not_copied > 0) {
++ /* Clean up the list and return error code */
++ retval = -EFAULT;
++ break;
++ }
++ } else if (stream->ops == STREAM_OPS_CAPTURE) {
++ struct snd_sst_user_cap_list *entry =
++ kzalloc(sizeof(*entry), GFP_KERNEL);
++
++ if (!entry) {
++ kfree(stream_bufs);
++ return -ENOMEM;
++ }
++ entry->iov_index = index;
++ entry->iov_offset = (unsigned long) bufp -
++ (unsigned long)iovec[index].iov_base;
++ entry->offset = copied_size;
++ entry->size = size;
++ list_add_tail(&entry->node, copy_to_list);
++ }
++
++ stream->cur_ptr = bufp + size;
++
++ if (((unsigned long)iovec[index].iov_base
++ + iovec[index].iov_len) <
++ ((unsigned long)iovec[index].iov_base)) {
++ pr_debug("sst: Buffer overflows");
++ kfree(stream_bufs);
++ return -EINVAL;
++ }
++
++ if (((unsigned long)iovec[index].iov_base
++ + iovec[index].iov_len) ==
++ (unsigned long)stream->cur_ptr) {
++ stream->cur_ptr = NULL;
++ stream->sg_index++;
++ }
++
++ copied_size += size;
++ pr_debug("sst: copied_size - %lx\n", copied_size);
++ if ((copied_size >= mmap_len) ||
++ (stream->sg_index == nr_segs)) {
++ add_to_list = 1;
++ }
++
++ if (add_to_list) {
++ stream_bufs->in_use = false;
++ stream_bufs->size = copied_size;
++ /* locking here */
++ mutex_lock(&stream->lock);
++ list_add_tail(&stream_bufs->node, &stream->bufs);
++ mutex_unlock(&stream->lock);
++ break;
++ }
++ }
++ return retval;
++}
++
++/* This function copies the captured data returned from SST DSP engine
++ * to the user buffers*/
++static int snd_sst_copy_userbuf_capture(struct stream_info *stream,
++ const struct iovec *iovec,
++ struct list_head *copy_to_list)
++{
++ struct snd_sst_user_cap_list *entry, *_entry;
++ struct sst_stream_bufs *kbufs = NULL, *_kbufs;
++ int retval = 0;
++ unsigned long data_not_copied;
++
++ /* copy sent buffers */
++ pr_debug("sst: capture stream copying to user now...\n");
++ list_for_each_entry_safe(kbufs, _kbufs, &stream->bufs, node) {
++ if (kbufs->in_use == true) {
++ /* copy to user */
++ list_for_each_entry_safe(entry, _entry,
++ copy_to_list, node) {
++ data_not_copied = copy_to_user((void *)
++ iovec[entry->iov_index].iov_base +
++ entry->iov_offset,
++ kbufs->addr + entry->offset,
++ entry->size);
++ if (data_not_copied > 0) {
++ /* Clean up the list and return error */
++ retval = -EFAULT;
++ break;
++ }
++ list_del(&entry->node);
++ kfree(entry);
++ }
++ }
++ }
++ pr_debug("sst: end of cap copy\n");
++ return retval;
++}
++
++/*
++ * snd_sst_userbufs_play_cap - constructs the list from user buffers
++ *
++ * @iovec:pointer to iovec structure
++ * @nr_segs:number entries in the iovec structure
++ * @str_id:stream id
++ * @stream:pointer to stream_info structure
++ *
++ * This function will traverse the user list and copy the data to the kernel
++ * space buffers.
++ */
++static int snd_sst_userbufs_play_cap(const struct iovec *iovec,
++ unsigned long nr_segs, unsigned int str_id,
++ struct stream_info *stream)
++{
++ int retval;
++ LIST_HEAD(copy_to_list);
++
++
++ retval = snd_sst_fill_kernel_list(stream, iovec, nr_segs,
++ &copy_to_list);
++
++ retval = intel_sst_play_capture(stream, str_id);
++ if (retval < 0)
++ return retval;
++
++ if (stream->ops == STREAM_OPS_CAPTURE) {
++ retval = snd_sst_copy_userbuf_capture(stream, iovec,
++ &copy_to_list);
++ }
++ return retval;
++}
++
++/* This function is common function across read/write
++ for user buffers called from system calls*/
++static int intel_sst_read_write(unsigned int str_id, char __user *buf,
++ size_t count)
++{
++ int retval;
++ struct stream_info *stream;
++ struct iovec iovec;
++ unsigned long nr_segs;
++
++ retval = sst_validate_strid(str_id);
++ if (retval)
++ return -EINVAL;
++ stream = &sst_drv_ctx->streams[str_id];
++ if (stream->mmapped == true) {
++ pr_warn("sst: user write and stream is mapped");
++ return -EIO;
++ }
++ if (!count)
++ return -EINVAL;
++ stream->curr_bytes = 0;
++ stream->cumm_bytes = 0;
++ /* copy user buf details */
++ pr_debug("sst: new buffers %p, copy size %d, status %d\n" ,
++ buf, (int) count, (int) stream->status);
++
++ stream->buf_type = SST_BUF_USER_STATIC;
++ iovec.iov_base = (void *)buf;
++ iovec.iov_len = count;
++ nr_segs = 1;
++
++ do {
++ retval = snd_sst_userbufs_play_cap(
++ &iovec, nr_segs, str_id, stream);
++ if (retval < 0)
++ break;
++
++ } while (stream->sg_index < nr_segs);
++
++ stream->sg_index = 0;
++ stream->cur_ptr = NULL;
++ if (retval >= 0)
++ retval = stream->cumm_bytes;
++ pr_debug("sst: end of play/rec bytes = %d!!\n", retval);
++ return retval;
++}
++
++/***
++ * intel_sst_write - This function is called when user tries to play out data
++ *
++ * @file_ptr:pointer to file
++ * @buf:user buffer to be played out
++ * @count:size of tthe buffer
++ * @offset:offset to start from
++ *
++ * writes the encoded data into DSP
++ */
++int intel_sst_write(struct file *file_ptr, const char __user *buf,
++ size_t count, loff_t *offset)
++{
++ struct ioctl_pvt_data *data = file_ptr->private_data;
++ int str_id = data->str_id;
++ struct stream_info *stream = &sst_drv_ctx->streams[str_id];
++
++ pr_debug("sst: called for %d\n", str_id);
++ if (stream->status == STREAM_UN_INIT ||
++ stream->status == STREAM_DECODE) {
++ return -EBADRQC;
++ }
++ return intel_sst_read_write(str_id, (char __user *)buf, count);
++}
++
++/*
++ * intel_sst_aio_write - write buffers
++ *
++ * @kiocb:pointer to a structure containing file pointer
++ * @iov:list of user buffer to be played out
++ * @nr_segs:number of entries
++ * @offset:offset to start from
++ *
++ * This function is called when user tries to play out multiple data buffers
++ */
++ssize_t intel_sst_aio_write(struct kiocb *kiocb, const struct iovec *iov,
++ unsigned long nr_segs, loff_t offset)
++{
++ int retval;
++ struct ioctl_pvt_data *data = kiocb->ki_filp->private_data;
++ int str_id = data->str_id;
++ struct stream_info *stream;
++
++ pr_debug("sst: entry - %ld\n", nr_segs);
++
++ if (is_sync_kiocb(kiocb) == false)
++ return -EINVAL;
++
++ pr_debug("sst: called for str_id %d\n", str_id);
++ retval = sst_validate_strid(str_id);
++ if (retval)
++ return -EINVAL;
++ stream = &sst_drv_ctx->streams[str_id];
++ if (stream->mmapped == true)
++ return -EIO;
++ if (stream->status == STREAM_UN_INIT ||
++ stream->status == STREAM_DECODE) {
++ return -EBADRQC;
++ }
++ stream->curr_bytes = 0;
++ stream->cumm_bytes = 0;
++ pr_debug("sst: new segs %ld, offset %d, status %d\n" ,
++ nr_segs, (int) offset, (int) stream->status);
++ stream->buf_type = SST_BUF_USER_STATIC;
++ do {
++ retval = snd_sst_userbufs_play_cap(iov, nr_segs,
++ str_id, stream);
++ if (retval < 0)
++ break;
++
++ } while (stream->sg_index < nr_segs);
++
++ stream->sg_index = 0;
++ stream->cur_ptr = NULL;
++ if (retval >= 0)
++ retval = stream->cumm_bytes;
++ pr_debug("sst: end of play/rec bytes = %d!!\n", retval);
++ return retval;
++}
++
++/*
++ * intel_sst_read - read the encoded data
++ *
++ * @file_ptr: pointer to file
++ * @buf: user buffer to be filled with captured data
++ * @count: size of tthe buffer
++ * @offset: offset to start from
++ *
++ * This function is called when user tries to capture data
++ */
++int intel_sst_read(struct file *file_ptr, char __user *buf,
++ size_t count, loff_t *offset)
++{
++ struct ioctl_pvt_data *data = file_ptr->private_data;
++ int str_id = data->str_id;
++ struct stream_info *stream = &sst_drv_ctx->streams[str_id];
++
++ pr_debug("sst: called for %d\n", str_id);
++ if (stream->status == STREAM_UN_INIT ||
++ stream->status == STREAM_DECODE)
++ return -EBADRQC;
++ return intel_sst_read_write(str_id, buf, count);
++}
++
++/*
++ * intel_sst_aio_read - aio read
++ *
++ * @kiocb: pointer to a structure containing file pointer
++ * @iov: list of user buffer to be filled with captured
++ * @nr_segs: number of entries
++ * @offset: offset to start from
++ *
++ * This function is called when user tries to capture out multiple data buffers
++ */
++ssize_t intel_sst_aio_read(struct kiocb *kiocb, const struct iovec *iov,
++ unsigned long nr_segs, loff_t offset)
++{
++ int retval;
++ struct ioctl_pvt_data *data = kiocb->ki_filp->private_data;
++ int str_id = data->str_id;
++ struct stream_info *stream;
++
++ pr_debug("sst: entry - %ld\n", nr_segs);
++
++ if (is_sync_kiocb(kiocb) == false) {
++ pr_debug("sst: aio_read from user space is not allowed\n");
++ return -EINVAL;
++ }
++
++ pr_debug("sst: called for str_id %d\n", str_id);
++ retval = sst_validate_strid(str_id);
++ if (retval)
++ return -EINVAL;
++ stream = &sst_drv_ctx->streams[str_id];
++ if (stream->mmapped == true)
++ return -EIO;
++ if (stream->status == STREAM_UN_INIT ||
++ stream->status == STREAM_DECODE)
++ return -EBADRQC;
++ stream->curr_bytes = 0;
++ stream->cumm_bytes = 0;
++
++ pr_debug("sst: new segs %ld, offset %d, status %d\n" ,
++ nr_segs, (int) offset, (int) stream->status);
++ stream->buf_type = SST_BUF_USER_STATIC;
++ do {
++ retval = snd_sst_userbufs_play_cap(iov, nr_segs,
++ str_id, stream);
++ if (retval < 0)
++ break;
++
++ } while (stream->sg_index < nr_segs);
++
++ stream->sg_index = 0;
++ stream->cur_ptr = NULL;
++ if (retval >= 0)
++ retval = stream->cumm_bytes;
++ pr_debug("sst: end of play/rec bytes = %d!!\n", retval);
++ return retval;
++}
++
++/* sst_print_stream_params - prints the stream parameters (debug fn)*/
++static void sst_print_stream_params(struct snd_sst_get_stream_params *get_prm)
++{
++ pr_debug("sst: codec params:result =%d\n",
++ get_prm->codec_params.result);
++ pr_debug("sst: codec params:stream = %d\n",
++ get_prm->codec_params.stream_id);
++ pr_debug("sst: codec params:codec = %d\n",
++ get_prm->codec_params.codec);
++ pr_debug("sst: codec params:ops = %d\n",
++ get_prm->codec_params.ops);
++ pr_debug("sst: codec params:stream_type= %d\n",
++ get_prm->codec_params.stream_type);
++ pr_debug("sst: pcmparams:sfreq= %d\n",
++ get_prm->pcm_params.sfreq);
++ pr_debug("sst: pcmparams:num_chan= %d\n",
++ get_prm->pcm_params.num_chan);
++ pr_debug("sst: pcmparams:pcm_wd_sz= %d\n",
++ get_prm->pcm_params.pcm_wd_sz);
++ return;
++}
++
++/**
++ * intel_sst_ioctl - recieves the device ioctl's
++ *
++ * @i_node:inode structure
++ * @file_ptr:pointer to file
++ * @cmd:Ioctl cmd
++ * @arg:data
++ *
++ * This function is called by OS when a user space component
++ * sends an Ioctl to SST driver
++ */
++int intel_sst_ioctl(struct inode *i_node, struct file *file_ptr,
++ unsigned int cmd, unsigned long arg)
++{
++ int retval = 0;
++ struct ioctl_pvt_data *data = NULL;
++ int str_id = 0, minor = 0;
++
++ data = file_ptr->private_data;
++ if (data) {
++ minor = 0;
++ str_id = data->str_id;
++ } else
++ minor = 1;
++
++ if (sst_drv_ctx->sst_state != SST_FW_RUNNING)
++ return -EBUSY;
++
++ switch (_IOC_NR(cmd)) {
++ case _IOC_NR(SNDRV_SST_STREAM_PAUSE):
++ pr_debug("sst: IOCTL_PAUSE recieved for %d!\n", str_id);
++ if (minor != STREAM_MODULE) {
++ retval = -EBADRQC;
++ break;
++ }
++ retval = sst_pause_stream(str_id);
++ break;
++
++ case _IOC_NR(SNDRV_SST_STREAM_RESUME):
++ pr_debug("sst: SNDRV_SST_IOCTL_RESUME recieved!\n");
++ if (minor != STREAM_MODULE) {
++ retval = -EBADRQC;
++ break;
++ }
++ retval = sst_resume_stream(str_id);
++ break;
++
++ case _IOC_NR(SNDRV_SST_STREAM_SET_PARAMS): {
++ struct snd_sst_params *str_param = (struct snd_sst_params *)arg;
++
++ pr_debug("sst: IOCTL_SET_PARAMS recieved!\n");
++ if (minor != STREAM_MODULE) {
++ retval = -EBADRQC;
++ break;
++ }
++
++ if (!str_id) {
++
++ retval = sst_get_stream(str_param);
++ if (retval > 0) {
++ struct stream_info *str_info;
++ sst_drv_ctx->stream_cnt++;
++ data->str_id = retval;
++ str_info = &sst_drv_ctx->streams[retval];
++ str_info->src = SST_DRV;
++ retval = copy_to_user(&str_param->stream_id,
++ &retval, sizeof(__u32));
++ } else {
++ if (retval == -SST_ERR_INVALID_PARAMS)
++ retval = -EINVAL;
++ }
++ } else {
++ pr_debug("sst: SET_STREAM_PARAMS recieved!\n");
++ /* allocated set params only */
++ retval = sst_set_stream_param(str_id, str_param);
++ /* Block the call for reply */
++ if (!retval) {
++ int sfreq = 0, word_size = 0, num_channel = 0;
++ sfreq = str_param->sparams.uc.pcm_params.sfreq;
++ word_size = str_param->sparams.
++ uc.pcm_params.pcm_wd_sz;
++ num_channel = str_param->
++ sparams.uc.pcm_params.num_chan;
++ if (str_param->ops == STREAM_OPS_CAPTURE) {
++ sst_drv_ctx->scard_ops->\
++ set_pcm_audio_params(sfreq,
++ word_size, num_channel);
++ }
++ }
++ }
++ break;
++ }
++ case _IOC_NR(SNDRV_SST_SET_VOL): {
++ struct snd_sst_vol *set_vol;
++ struct snd_sst_vol *rec_vol = (struct snd_sst_vol *)arg;
++ pr_debug("sst: SET_VOLUME recieved for %d!\n",
++ rec_vol->stream_id);
++ if (minor == STREAM_MODULE && rec_vol->stream_id == 0) {
++ pr_debug("sst: invalid operation!\n");
++ retval = -EPERM;
++ break;
++ }
++ set_vol = kzalloc(sizeof(*set_vol), GFP_ATOMIC);
++ if (!set_vol) {
++ pr_debug("sst: mem allocation failed\n");
++ retval = -ENOMEM;
++ break;
++ }
++ retval = copy_from_user(set_vol, rec_vol, sizeof(*set_vol));
++ if (retval) {
++ pr_debug("sst: copy failed\n");
++ retval = -EAGAIN;
++ break;
++ }
++ retval = sst_set_vol(set_vol);
++ kfree(set_vol);
++ break;
++ }
++ case _IOC_NR(SNDRV_SST_GET_VOL): {
++ struct snd_sst_vol *rec_vol = (struct snd_sst_vol *)arg;
++ struct snd_sst_vol get_vol;
++ pr_debug("sst: IOCTL_GET_VOLUME recieved for stream = %d!\n",
++ rec_vol->stream_id);
++ if (minor == STREAM_MODULE && rec_vol->stream_id == 0) {
++ pr_debug("sst: invalid operation!\n");
++ retval = -EPERM;
++ break;
++ }
++ get_vol.stream_id = rec_vol->stream_id;
++ retval = sst_get_vol(&get_vol);
++ if (retval) {
++ retval = -EIO;
++ break;
++ }
++ pr_debug("sst: id:%d\n, vol:%d, ramp_dur:%d, ramp_type:%d\n",
++ get_vol.stream_id, get_vol.volume,
++ get_vol.ramp_duration, get_vol.ramp_type);
++ retval = copy_to_user((struct snd_sst_vol *)arg,
++ &get_vol, sizeof(get_vol));
++ if (retval) {
++ retval = -EIO;
++ break;
++ }
++ /*sst_print_get_vol_info(str_id, &get_vol);*/
++ break;
++ }
++
++ case _IOC_NR(SNDRV_SST_MUTE): {
++ struct snd_sst_mute *set_mute;
++ struct snd_sst_vol *rec_mute = (struct snd_sst_vol *)arg;
++ pr_debug("sst: SNDRV_SST_SET_VOLUME recieved for %d!\n",
++ rec_mute->stream_id);
++ if (minor == STREAM_MODULE && rec_mute->stream_id == 0) {
++ retval = -EPERM;
++ break;
++ }
++ set_mute = kzalloc(sizeof(*set_mute), GFP_ATOMIC);
++ if (!set_mute) {
++ retval = -ENOMEM;
++ break;
++ }
++ retval = copy_from_user(set_mute, rec_mute, sizeof(*set_mute));
++ if (retval) {
++ retval = -EFAULT;
++ break;
++ }
++ retval = sst_set_mute(set_mute);
++ kfree(set_mute);
++ break;
++ }
++ case _IOC_NR(SNDRV_SST_STREAM_GET_PARAMS): {
++ struct snd_sst_get_stream_params get_params;
++
++ pr_debug("sst: IOCTL_GET_PARAMS recieved!\n");
++ if (minor != 0) {
++ retval = -EBADRQC;
++ break;
++ }
++
++ retval = sst_get_stream_params(str_id, &get_params);
++ if (retval) {
++ retval = -EIO;
++ break;
++ }
++ retval = copy_to_user((struct snd_sst_get_stream_params *)arg,
++ &get_params, sizeof(get_params));
++ if (retval) {
++ retval = -EBUSY;
++ break;
++ }
++ sst_print_stream_params(&get_params);
++ break;
++ }
++
++ case _IOC_NR(SNDRV_SST_MMAP_PLAY):
++ case _IOC_NR(SNDRV_SST_MMAP_CAPTURE):
++ pr_debug("sst: SNDRV_SST_MMAP_PLAY/CAPTURE recieved!\n");
++ if (minor != STREAM_MODULE) {
++ retval = -EBADRQC;
++ break;
++ }
++ retval = intel_sst_mmap_play_capture(str_id,
++ (struct snd_sst_mmap_buffs *)arg);
++ break;
++
++ case _IOC_NR(SNDRV_SST_STREAM_DROP):
++ pr_debug("sst: SNDRV_SST_IOCTL_DROP recieved!\n");
++ if (minor != STREAM_MODULE) {
++ retval = -EINVAL;
++ break;
++ }
++ retval = sst_drop_stream(str_id);
++ break;
++
++ case _IOC_NR(SNDRV_SST_STREAM_GET_TSTAMP): {
++ unsigned long long *ms = (unsigned long long *)arg;
++ struct snd_sst_tstamp tstamp = {0};
++ unsigned long long time, freq, mod;
++
++ pr_debug("sst: SNDRV_SST_STREAM_GET_TSTAMP recieved!\n");
++ if (minor != STREAM_MODULE) {
++ retval = -EBADRQC;
++ break;
++ }
++ memcpy_fromio(&tstamp,
++ ((void *)(sst_drv_ctx->mailbox + SST_TIME_STAMP)
++ +(str_id * sizeof(tstamp))),
++ sizeof(tstamp));
++ time = tstamp.samples_rendered;
++ freq = (unsigned long long) tstamp.sampling_frequency;
++ time = time * 1000; /* converting it to ms */
++ mod = do_div(time, freq);
++ retval = copy_to_user(ms, &time, sizeof(*ms));
++ if (retval)
++ retval = -EFAULT;
++ break;
++ }
++
++ case _IOC_NR(SNDRV_SST_STREAM_START):{
++ struct stream_info *stream;
++
++ pr_debug("sst: SNDRV_SST_STREAM_START recieved!\n");
++ if (minor != STREAM_MODULE) {
++ retval = -EINVAL;
++ break;
++ }
++ retval = sst_validate_strid(str_id);
++ if (retval)
++ break;
++ stream = &sst_drv_ctx->streams[str_id];
++ mutex_lock(&stream->lock);
++ if (stream->status == STREAM_INIT &&
++ stream->need_draining != true) {
++ stream->prev = stream->status;
++ stream->status = STREAM_RUNNING;
++ if (stream->ops == STREAM_OPS_PLAYBACK ||
++ stream->ops == STREAM_OPS_PLAYBACK_DRM) {
++ retval = sst_play_frame(str_id);
++ } else if (stream->ops == STREAM_OPS_CAPTURE)
++ retval = sst_capture_frame(str_id);
++ else {
++ retval = -EINVAL;
++ mutex_unlock(
++ &sst_drv_ctx->streams[str_id].lock);
++ break;
++ }
++ if (retval < 0) {
++ stream->status = STREAM_INIT;
++ mutex_unlock(
++ &sst_drv_ctx->streams[str_id].lock);
++ break;
++ }
++ } else {
++ retval = -EINVAL;
++ }
++ mutex_unlock(&sst_drv_ctx->streams[str_id].lock);
++ break;
++ }
++
++ case _IOC_NR(SNDRV_SST_SET_TARGET_DEVICE): {
++ struct snd_sst_target_device *target_device;
++
++ pr_debug("sst: SET_TARGET_DEVICE recieved!\n");
++ target_device = (struct snd_sst_target_device *)arg;
++ BUG_ON(!target_device);
++ if (minor != AM_MODULE) {
++ retval = -EBADRQC;
++ break;
++ }
++ retval = sst_target_device_select(target_device);
++ break;
++ }
++
++ case _IOC_NR(SNDRV_SST_DRIVER_INFO): {
++ struct snd_sst_driver_info *info =
++ (struct snd_sst_driver_info *)arg;
++
++ pr_debug("sst: SNDRV_SST_DRIVER_INFO recived\n");
++ info->version = SST_VERSION_NUM;
++ /* hard coding, shud get sumhow later */
++ info->active_pcm_streams = sst_drv_ctx->stream_cnt -
++ sst_drv_ctx->encoded_cnt;
++ info->active_enc_streams = sst_drv_ctx->encoded_cnt;
++ info->max_pcm_streams = MAX_ACTIVE_STREAM - MAX_ENC_STREAM;
++ info->max_enc_streams = MAX_ENC_STREAM;
++ info->buf_per_stream = sst_drv_ctx->mmap_len;
++ break;
++ }
++
++ case _IOC_NR(SNDRV_SST_STREAM_DECODE): {
++ struct snd_sst_dbufs *param =
++ (struct snd_sst_dbufs *)arg, dbufs_local;
++ int i;
++ struct snd_sst_buffs ibufs, obufs;
++ struct snd_sst_buff_entry ibuf_temp[param->ibufs->entries],
++ obuf_temp[param->obufs->entries];
++
++ pr_debug("sst: SNDRV_SST_STREAM_DECODE recived\n");
++ if (minor != STREAM_MODULE) {
++ retval = -EBADRQC;
++ break;
++ }
++ if (!param) {
++ retval = -EINVAL;
++ break;
++ }
++
++ dbufs_local.input_bytes_consumed = param->input_bytes_consumed;
++ dbufs_local.output_bytes_produced =
++ param->output_bytes_produced;
++ dbufs_local.ibufs = &ibufs;
++ dbufs_local.obufs = &obufs;
++ dbufs_local.ibufs->entries = param->ibufs->entries;
++ dbufs_local.ibufs->type = param->ibufs->type;
++ dbufs_local.obufs->entries = param->obufs->entries;
++ dbufs_local.obufs->type = param->obufs->type;
++
++ dbufs_local.ibufs->buff_entry = ibuf_temp;
++ for (i = 0; i < dbufs_local.ibufs->entries; i++) {
++ ibuf_temp[i].buffer =
++ param->ibufs->buff_entry[i].buffer;
++ ibuf_temp[i].size =
++ param->ibufs->buff_entry[i].size;
++ }
++ dbufs_local.obufs->buff_entry = obuf_temp;
++ for (i = 0; i < dbufs_local.obufs->entries; i++) {
++ obuf_temp[i].buffer =
++ param->obufs->buff_entry[i].buffer;
++ obuf_temp[i].size =
++ param->obufs->buff_entry[i].size;
++ }
++ retval = sst_decode(str_id, &dbufs_local);
++ if (retval)
++ retval = -EAGAIN;
++ retval = copy_to_user(&param->input_bytes_consumed,
++ &dbufs_local.input_bytes_consumed,
++ sizeof(unsigned long long));
++ if (retval) {
++ retval = -EFAULT;
++ break;
++ }
++ retval = copy_to_user(&param->output_bytes_produced,
++ &dbufs_local.output_bytes_produced,
++ sizeof(unsigned long long));
++ if (retval) {
++ retval = -EFAULT;
++ break;
++ }
++ break;
++ }
++
++ case _IOC_NR(SNDRV_SST_STREAM_DRAIN):
++ pr_debug("sst: SNDRV_SST_STREAM_DRAIN recived\n");
++ if (minor != STREAM_MODULE) {
++ retval = -EINVAL;
++ break;
++ }
++ retval = sst_drain_stream(str_id);
++ break;
++
++ case _IOC_NR(SNDRV_SST_STREAM_BYTES_DECODED): {
++ unsigned long long *bytes = (unsigned long long *)arg;
++ struct snd_sst_tstamp tstamp = {0};
++
++ pr_debug("sst: STREAM_BYTES_DECODED recieved!\n");
++ if (minor != STREAM_MODULE) {
++ retval = -EINVAL;
++ break;
++ }
++ memcpy_fromio(&tstamp,
++ ((void *)(sst_drv_ctx->mailbox + SST_TIME_STAMP)
++ +(str_id * sizeof(tstamp))),
++ sizeof(tstamp));
++ retval = copy_to_user(bytes, &tstamp.bytes_processed,
++ sizeof(*bytes));
++ if (retval)
++ retval = -EFAULT;
++ break;
++ }
++ case _IOC_NR(SNDRV_SST_FW_INFO): {
++ struct snd_sst_fw_info *fw_info;
++
++ pr_debug("sst: SNDRV_SST_FW_INFO recived\n");
++
++ fw_info = kzalloc(sizeof(*fw_info), GFP_ATOMIC);
++ if (!fw_info) {
++ retval = -ENOMEM;
++ break;
++ }
++ retval = sst_get_fw_info(fw_info);
++ if (retval) {
++ retval = -EIO;
++ kfree(fw_info);
++ break;
++ }
++ retval = copy_to_user((struct snd_sst_dbufs *)arg,
++ fw_info, sizeof(*fw_info));
++ if (retval) {
++ kfree(fw_info);
++ retval = -EFAULT;
++ break;
++ }
++ /*sst_print_fw_info(fw_info);*/
++ kfree(fw_info);
++ break;
++ }
++ default:
++ retval = -EINVAL;
++ }
++ pr_debug("sst: intel_sst_ioctl:complete ret code = %d\n", retval);
++
++ return retval;
++}
++
+--- /dev/null
++++ b/sound/pci/sst/intel_sst_common.h
+@@ -0,0 +1,618 @@
++#ifndef __INTEL_SST_COMMON_H__
++#define __INTEL_SST_COMMON_H__
++/*
++ * intel_sst_common.h - Intel SST Driver for audio engine
++ *
++ * Copyright (C) 2008-10 Intel Corporation
++ * Authors: Vinod Koul <vinod.koul@intel.com>
++ * Harsha Priya <priya.harsha@intel.com>
++ * Dharageswari R <dharageswari.r@intel.com>
++ * KP Jeeja <jeeja.kp@intel.com>
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * Common private declarations for SST
++ */
++
++#define SST_DRIVER_VERSION "1.2.05"
++#define SST_VERSION_NUM 0x1205
++
++/* driver names */
++#define SST_DRV_NAME "intel_sst_driver"
++#define SST_FW_FILENAME_MRST "fw_sst_080a.bin"
++#define SST_FW_FILENAME_MFLD "fw_sst_082f.bin"
++#define SST_MRST_PCI_ID 0x080A
++#define SST_MFLD_PCI_ID 0x082F
++
++enum sst_states {
++ SST_FW_LOADED = 1,
++ SST_FW_RUNNING,
++ SST_UN_INIT,
++ SST_ERROR,
++ SST_SUSPENDED
++};
++
++#define MAX_ACTIVE_STREAM 3
++#define MAX_ENC_STREAM 1
++#define MAX_AM_HANDLES 1
++#define ALLOC_TIMEOUT 5000
++/* SST numbers */
++#define SST_BLOCK_TIMEOUT 5000
++#define TARGET_DEV_BLOCK_TIMEOUT 5000
++
++#define BLOCK_UNINIT -1
++#define RX_TIMESLOT_UNINIT -1
++
++/* SST register map */
++#define SST_CSR 0x00
++#define SST_PISR 0x08
++#define SST_PIMR 0x10
++#define SST_ISRX 0x18
++#define SST_IMRX 0x28
++#define SST_IPCX 0x38 /* IPC IA-SST */
++#define SST_IPCD 0x40 /* IPC SST-IA */
++#define SST_ISRD 0x20 /* dummy register for shim workaround */
++#define SST_SHIM_SIZE 0X44
++
++#define SPI_MODE_ENABLE_BASE_ADDR 0xffae4000
++#define FW_SIGNATURE_SIZE 4
++
++/* PMIC and SST hardware states */
++enum sst_mad_states {
++ SND_MAD_UN_INIT = 0,
++ SND_MAD_INIT_DONE,
++};
++
++/* stream states */
++enum sst_stream_states {
++ STREAM_UN_INIT = 0, /* Freed/Not used stream */
++ STREAM_RUNNING = 1, /* Running */
++ STREAM_PAUSED = 2, /* Paused stream */
++ STREAM_DECODE = 3, /* stream is in decoding only state */
++ STREAM_INIT = 4, /* stream init, waiting for data */
++};
++
++
++enum sst_ram_type {
++ SST_IRAM = 1,
++ SST_DRAM = 2,
++};
++/* SST shim registers to structure mapping */
++union config_status_reg {
++ struct {
++ u32 rsvd0:1;
++ u32 sst_reset:1;
++ u32 hw_rsvd:3;
++ u32 sst_clk:2;
++ u32 bypass:3;
++ u32 run_stall:1;
++ u32 rsvd1:2;
++ u32 strb_cntr_rst:1;
++ u32 rsvd:18;
++ } part;
++ u32 full;
++};
++
++union interrupt_reg {
++ struct {
++ u32 done_interrupt:1;
++ u32 busy_interrupt:1;
++ u32 rsvd:30;
++ } part;
++ u32 full;
++};
++
++union sst_pisr_reg {
++ struct {
++ u32 pssp0:1;
++ u32 pssp1:1;
++ u32 rsvd0:3;
++ u32 dmac:1;
++ u32 rsvd1:26;
++ } part;
++ u32 full;
++};
++
++union sst_pimr_reg {
++ struct {
++ u32 ssp0:1;
++ u32 ssp1:1;
++ u32 rsvd0:3;
++ u32 dmac:1;
++ u32 rsvd1:10;
++ u32 ssp0_sc:1;
++ u32 ssp1_sc:1;
++ u32 rsvd2:3;
++ u32 dmac_sc:1;
++ u32 rsvd3:10;
++ } part;
++ u32 full;
++};
++
++
++struct sst_stream_bufs {
++ struct list_head node;
++ u32 size;
++ const char *addr;
++ u32 data_copied;
++ bool in_use;
++ u32 offset;
++};
++
++struct snd_sst_user_cap_list {
++ unsigned int iov_index; /* index of iov */
++ unsigned long iov_offset; /* offset in iov */
++ unsigned long offset; /* offset in kmem */
++ unsigned long size; /* size copied */
++ struct list_head node;
++};
++/*
++This structure is used to block a user/fw data call to another
++fw/user call
++*/
++struct sst_block {
++ bool condition; /* condition for blocking check */
++ int ret_code; /* ret code when block is released */
++ void *data; /* data to be appsed for block if any */
++ bool on;
++};
++
++enum snd_sst_buf_type {
++ SST_BUF_USER_STATIC = 1,
++ SST_BUF_USER_DYNAMIC,
++ SST_BUF_MMAP_STATIC,
++ SST_BUF_MMAP_DYNAMIC,
++};
++
++enum snd_src {
++ SST_DRV = 1,
++ MAD_DRV = 2
++};
++
++/**
++ * struct stream_info - structure that holds the stream information
++ *
++ * @status : stream current state
++ * @prev : stream prev state
++ * @codec : stream codec
++ * @sst_id : stream id
++ * @ops : stream operation pb/cp/drm...
++ * @bufs: stream buffer list
++ * @lock : stream mutex for protecting state
++ * @pcm_lock : spinlock for pcm path only
++ * @mmapped : is stream mmapped
++ * @sg_index : current stream user buffer index
++ * @cur_ptr : stream user buffer pointer
++ * @buf_entry : current user buffer
++ * @data_blk : stream block for data operations
++ * @ctrl_blk : stream block for ctrl operations
++ * @buf_type : stream user buffer type
++ * @pcm_substream : PCM substream
++ * @period_elapsed : PCM period elapsed callback
++ * @sfreq : stream sampling freq
++ * @decode_ibuf : Decoded i/p buffers pointer
++ * @decode_obuf : Decoded o/p buffers pointer
++ * @decode_isize : Decoded i/p buffers size
++ * @decode_osize : Decoded o/p buffers size
++ * @decode_ibuf_type : Decoded i/p buffer type
++ * @decode_obuf_type : Decoded o/p buffer type
++ * @idecode_alloc : Decode alloc index
++ * @need_draining : stream set for drain
++ * @str_type : stream type
++ * @curr_bytes : current bytes decoded
++ * @cumm_bytes : cummulative bytes decoded
++ * @str_type : stream type
++ * @src : stream source
++ * @device : output device type (medfield only)
++ * @pcm_slot : pcm slot value
++ */
++struct stream_info {
++ unsigned int status;
++ unsigned int prev;
++ u8 codec;
++ unsigned int sst_id;
++ unsigned int ops;
++ struct list_head bufs;
++ struct mutex lock; /* mutex */
++ spinlock_t pcm_lock;
++ bool mmapped;
++ unsigned int sg_index; /* current buf Index */
++ unsigned char *cur_ptr; /* Current static bufs */
++ struct snd_sst_buf_entry *buf_entry;
++ struct sst_block data_blk; /* stream ops block */
++ struct sst_block ctrl_blk; /* stream control cmd block */
++ enum snd_sst_buf_type buf_type;
++ void *pcm_substream;
++ void (*period_elapsed) (void *pcm_substream);
++ unsigned int sfreq;
++ void *decode_ibuf, *decode_obuf;
++ unsigned int decode_isize, decode_osize;
++ u8 decode_ibuf_type, decode_obuf_type;
++ unsigned int idecode_alloc;
++ unsigned int need_draining;
++ unsigned int str_type;
++ u32 curr_bytes;
++ u32 cumm_bytes;
++ u32 src;
++ enum snd_sst_audio_device_type device;
++ u8 pcm_slot;
++};
++
++/*
++ * struct stream_alloc_bloc - this structure is used for blocking the user's
++ * alloc calls to fw's response to alloc calls
++ *
++ * @sst_id : session id of blocked stream
++ * @ops_block : ops block struture
++ */
++struct stream_alloc_block {
++ int sst_id; /* session id of blocked stream */
++ struct sst_block ops_block; /* ops block struture */
++};
++
++#define SST_FW_SIGN "$SST"
++#define SST_FW_LIB_SIGN "$LIB"
++
++/*
++ * struct fw_header - FW file headers
++ *
++ * @signature : FW signature
++ * @modules : # of modules
++ * @file_format : version of header format
++ * @reserved : reserved fields
++ */
++struct fw_header {
++ unsigned char signature[FW_SIGNATURE_SIZE]; /* FW signature */
++ u32 file_size; /* size of fw minus this header */
++ u32 modules; /* # of modules */
++ u32 file_format; /* version of header format */
++ u32 reserved[4];
++};
++
++struct fw_module_header {
++ unsigned char signature[FW_SIGNATURE_SIZE]; /* module signature */
++ u32 mod_size; /* size of module */
++ u32 blocks; /* # of blocks */
++ u32 type; /* codec type, pp lib */
++ u32 entry_point;
++};
++
++struct dma_block_info {
++ enum sst_ram_type type; /* IRAM/DRAM */
++ u32 size; /* Bytes */
++ u32 ram_offset; /* Offset in I/DRAM */
++ u32 rsvd; /* Reserved field */
++};
++
++struct ioctl_pvt_data {
++ int str_id;
++ int pvt_id;
++};
++
++struct sst_ipc_msg_wq {
++ union ipc_header header;
++ char mailbox[SST_MAILBOX_SIZE];
++ struct work_struct wq;
++};
++
++struct mad_ops_wq {
++ int stream_id;
++ enum sst_controls control_op;
++ struct work_struct wq;
++
++};
++
++#define SST_MMAP_PAGES (640*1024 / PAGE_SIZE)
++#define SST_MMAP_STEP (40*1024 / PAGE_SIZE)
++
++/***
++ * struct intel_sst_drv - driver ops
++ *
++ * @pmic_state : pmic state
++ * @pmic_vendor : pmic vendor detected
++ * @sst_state : current sst device state
++ * @pci_id : PCI device id loaded
++ * @shim : SST shim pointer
++ * @mailbox : SST mailbox pointer
++ * @iram : SST IRAM pointer
++ * @dram : SST DRAM pointer
++ * @shim_phy_add : SST shim phy addr
++ * @ipc_dispatch_list : ipc messages dispatched
++ * @ipc_post_msg_wq : wq to post IPC messages context
++ * @ipc_process_msg : wq to process msgs from FW context
++ * @ipc_process_reply : wq to process reply from FW context
++ * @ipc_post_msg : wq to post reply from FW context
++ * @mad_ops : MAD driver operations registered
++ * @mad_wq : MAD driver wq
++ * @post_msg_wq : wq to post IPC messages
++ * @process_msg_wq : wq to process msgs from FW
++ * @process_reply_wq : wq to process reply from FW
++ * @streams : sst stream contexts
++ * @alloc_block : block structure for alloc
++ * @tgt_dev_blk : block structure for target device
++ * @fw_info_blk : block structure for fw info block
++ * @vol_info_blk : block structure for vol info block
++ * @mute_info_blk : block structure for mute info block
++ * @hs_info_blk : block structure for hs info block
++ * @list_lock : sst driver list lock (deprecated)
++ * @list_spin_lock : sst driver spin lock block
++ * @scard_ops : sst card ops
++ * @pci : sst pci device struture
++ * @active_streams : sst active streams
++ * @sst_lock : sst device lock
++ * @stream_lock : sst stream lock
++ * @unique_id : sst unique id
++ * @stream_cnt : total sst active stream count
++ * @pb_streams : total active pb streams
++ * @cp_streams : total active cp streams
++ * @lpe_stalled : lpe stall status
++ * @pmic_port_instance : active pmic port instance
++ * @rx_time_slot_status : active rx slot
++ * @lpaudio_start : lpaudio status
++ * @audio_start : audio status
++ * @devt_d : pointer to /dev/lpe node
++ * @devt_c : pointer to /dev/lpe_ctrl node
++ * @max_streams : max streams allowed
++ */
++struct intel_sst_drv {
++ bool pmic_state;
++ int pmic_vendor;
++ int sst_state;
++ unsigned int pci_id;
++ void __iomem *shim;
++ void __iomem *mailbox;
++ void __iomem *iram;
++ void __iomem *dram;
++ unsigned int shim_phy_add;
++ struct list_head ipc_dispatch_list;
++ struct work_struct ipc_post_msg_wq;
++ struct sst_ipc_msg_wq ipc_process_msg;
++ struct sst_ipc_msg_wq ipc_process_reply;
++ struct sst_ipc_msg_wq ipc_post_msg;
++ struct mad_ops_wq mad_ops;
++ wait_queue_head_t wait_queue;
++ struct workqueue_struct *mad_wq;
++ struct workqueue_struct *post_msg_wq;
++ struct workqueue_struct *process_msg_wq;
++ struct workqueue_struct *process_reply_wq;
++
++ struct stream_info streams[MAX_NUM_STREAMS];
++ struct stream_alloc_block alloc_block[MAX_ACTIVE_STREAM];
++ struct sst_block tgt_dev_blk, fw_info_blk,
++ vol_info_blk, mute_info_blk, hs_info_blk;
++ struct mutex list_lock;/* mutex for IPC list locking */
++ spinlock_t list_spin_lock; /* mutex for IPC list locking */
++ struct snd_pmic_ops *scard_ops;
++ struct pci_dev *pci;
++ int active_streams[MAX_NUM_STREAMS];
++ void *mmap_mem;
++ struct mutex sst_lock;
++ struct mutex stream_lock;
++ unsigned int mmap_len;
++ unsigned int unique_id;
++ unsigned int stream_cnt; /* total streams */
++ unsigned int encoded_cnt; /* enocded streams only */
++ unsigned int am_cnt;
++ unsigned int pb_streams; /* pb streams active */
++ unsigned int cp_streams; /* cp streams active */
++ unsigned int lpe_stalled; /* LPE is stalled or not */
++ unsigned int pmic_port_instance; /*pmic port instance*/
++ int rx_time_slot_status;
++ unsigned int lpaudio_start;
++ /* 1 - LPA stream(MP3 pb) in progress*/
++ unsigned int audio_start;
++ dev_t devt_d, devt_c;
++ unsigned int max_streams;
++};
++
++extern struct intel_sst_drv *sst_drv_ctx;
++
++#define CHIP_REV_REG 0xff108000
++#define CHIP_REV_ADDR 0x78
++
++/* misc definitions */
++#define FW_DWNL_ID 0xFF
++#define LOOP1 0x11111111
++#define LOOP2 0x22222222
++#define LOOP3 0x33333333
++#define LOOP4 0x44444444
++
++#define SST_DEFAULT_PMIC_PORT 1 /*audio port*/
++/* NOTE: status will have +ve for good cases and -ve for error ones */
++#define MAX_STREAM_FIELD 255
++
++int sst_alloc_stream(char *params, unsigned int stream_ops, u8 codec,
++ unsigned int session_id);
++int sst_alloc_stream_response(unsigned int str_id,
++ struct snd_sst_alloc_response *response);
++int sst_stalled(void);
++int sst_pause_stream(int id);
++int sst_resume_stream(int id);
++int sst_enable_rx_timeslot(int status);
++int sst_drop_stream(int id);
++int sst_free_stream(int id);
++int sst_start_stream(int streamID);
++int sst_play_frame(int streamID);
++int sst_pcm_play_frame(int str_id, struct sst_stream_bufs *sst_buf);
++int sst_capture_frame(int streamID);
++int sst_set_stream_param(int streamID, struct snd_sst_params *str_param);
++int sst_target_device_select(struct snd_sst_target_device *target_device);
++int sst_decode(int str_id, struct snd_sst_dbufs *dbufs);
++int sst_get_decoded_bytes(int str_id, unsigned long long *bytes);
++int sst_get_fw_info(struct snd_sst_fw_info *info);
++int sst_get_stream_params(int str_id,
++ struct snd_sst_get_stream_params *get_params);
++int sst_get_stream(struct snd_sst_params *str_param);
++int sst_get_stream_allocated(struct snd_sst_params *str_param,
++ struct snd_sst_lib_download **lib_dnld);
++int sst_drain_stream(int str_id);
++int sst_get_vol(struct snd_sst_vol *set_vol);
++int sst_set_vol(struct snd_sst_vol *set_vol);
++int sst_set_mute(struct snd_sst_mute *set_mute);
++
++
++void sst_post_message(struct work_struct *work);
++void sst_process_message(struct work_struct *work);
++void sst_process_reply(struct work_struct *work);
++void sst_process_mad_ops(struct work_struct *work);
++void sst_process_mad_jack_detection(struct work_struct *work);
++
++int intel_sst_ioctl(struct inode *i_node, struct file *file_ptr,
++ unsigned int cmd, unsigned long arg);
++int intel_sst_open(struct inode *i_node, struct file *file_ptr);
++int intel_sst_open_cntrl(struct inode *i_node, struct file *file_ptr);
++int intel_sst_release(struct inode *i_node, struct file *file_ptr);
++int intel_sst_release_cntrl(struct inode *i_node, struct file *file_ptr);
++int intel_sst_read(struct file *file_ptr, char __user *buf,
++ size_t count, loff_t *ppos);
++int intel_sst_write(struct file *file_ptr, const char __user *buf,
++ size_t count, loff_t *ppos);
++int intel_sst_mmap(struct file *fp, struct vm_area_struct *vma);
++ssize_t intel_sst_aio_write(struct kiocb *kiocb, const struct iovec *iov,
++ unsigned long nr_segs, loff_t offset);
++ssize_t intel_sst_aio_read(struct kiocb *kiocb, const struct iovec *iov,
++ unsigned long nr_segs, loff_t offset);
++
++int sst_load_fw(const struct firmware *fw, void *context);
++int sst_load_library(struct snd_sst_lib_download *lib, u8 ops);
++int sst_spi_mode_enable(void);
++int sst_get_block_stream(struct intel_sst_drv *sst_drv_ctx);
++
++int sst_wait_interruptible(struct intel_sst_drv *sst_drv_ctx,
++ struct sst_block *block);
++int sst_wait_interruptible_timeout(struct intel_sst_drv *sst_drv_ctx,
++ struct sst_block *block, int timeout);
++int sst_wait_timeout(struct intel_sst_drv *sst_drv_ctx,
++ struct stream_alloc_block *block);
++int sst_create_large_msg(struct ipc_post **arg);
++int sst_create_short_msg(struct ipc_post **arg);
++void sst_wake_up_alloc_block(struct intel_sst_drv *sst_drv_ctx,
++ u8 sst_id, int status, void *data);
++void sst_clear_interrupt(void);
++int intel_sst_resume(struct pci_dev *pci);
++int sst_download_fw(void);
++void free_stream_context(unsigned int str_id);
++void sst_clean_stream(struct stream_info *stream);
++
++/*
++ * sst_fill_header - inline to fill sst header
++ *
++ * @header : ipc header
++ * @msg : IPC message to be sent
++ * @large : is ipc large msg
++ * @str_id : stream id
++ *
++ * this function is an inline function that sets the headers before
++ * sending a message
++ */
++static inline void sst_fill_header(union ipc_header *header,
++ int msg, int large, int str_id)
++{
++ header->part.msg_id = msg;
++ header->part.str_id = str_id;
++ header->part.large = large;
++ header->part.done = 0;
++ header->part.busy = 1;
++ header->part.data = 0;
++}
++
++/*
++ * sst_assign_pvt_id - assign a pvt id for stream
++ *
++ * @sst_drv_ctx : driver context
++ *
++ * this inline function assigns a private id for calls that dont have stream
++ * context yet, should be called with lock held
++ */
++static inline unsigned int sst_assign_pvt_id(struct intel_sst_drv *sst_drv_ctx)
++{
++ sst_drv_ctx->unique_id++;
++ if (sst_drv_ctx->unique_id >= MAX_NUM_STREAMS)
++ sst_drv_ctx->unique_id = 1;
++ return sst_drv_ctx->unique_id;
++}
++
++/*
++ * sst_init_stream - this function initialzes stream context
++ *
++ * @stream : stream struture
++ * @codec : codec for stream
++ * @sst_id : stream id
++ * @ops : stream operation
++ * @slot : stream pcm slot
++ * @device : device type
++ *
++ * this inline function initialzes stream context for allocated stream
++ */
++static inline void sst_init_stream(struct stream_info *stream,
++ int codec, int sst_id, int ops, u8 slot,
++ enum snd_sst_audio_device_type device)
++{
++ stream->status = STREAM_INIT;
++ stream->prev = STREAM_UN_INIT;
++ stream->codec = codec;
++ stream->sst_id = sst_id;
++ stream->str_type = 0;
++ stream->ops = ops;
++ stream->data_blk.on = false;
++ stream->data_blk.condition = false;
++ stream->data_blk.ret_code = 0;
++ stream->data_blk.data = NULL;
++ stream->ctrl_blk.on = false;
++ stream->ctrl_blk.condition = false;
++ stream->ctrl_blk.ret_code = 0;
++ stream->ctrl_blk.data = NULL;
++ stream->need_draining = false;
++ stream->decode_ibuf = NULL;
++ stream->decode_isize = 0;
++ stream->mmapped = false;
++ stream->pcm_slot = slot;
++ stream->device = device;
++}
++
++
++/*
++ * sst_validate_strid - this function validates the stream id
++ *
++ * @str_id : stream id to be validated
++ *
++ * returns 0 if valid stream
++ */
++static inline int sst_validate_strid(int str_id)
++{
++ if (str_id <= 0 || str_id > sst_drv_ctx->max_streams) {
++ pr_err("SST ERR: invalid stream id : %d MAX_STREAMS:%d\n",
++ str_id, sst_drv_ctx->max_streams);
++ return -EINVAL;
++ } else
++ return 0;
++}
++
++static inline int sst_shim_write(void __iomem *addr, int offset, int value)
++{
++
++ if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID)
++ writel(value, addr + SST_ISRD); /*dummy*/
++ writel(value, addr + offset);
++ return 0;
++}
++
++static inline int sst_shim_read(void __iomem *addr, int offset)
++{
++ return readl(addr + offset);
++}
++#endif /* __INTEL_SST_COMMON_H__ */
+--- /dev/null
++++ b/sound/pci/sst/intel_sst_drv_interface.c
+@@ -0,0 +1,492 @@
++/*
++ * intel_sst_interface.c - Intel SST Driver for audio engine
++ *
++ * Copyright (C) 2008-10 Intel Corp
++ * Authors: Vinod Koul <vinod.koul@intel.com>
++ * Harsha Priya <priya.harsha@intel.com>
++ * Dharageswari R <dharageswari.r@intel.com)
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ * This driver exposes the audio engine functionalities to the ALSA
++ * and middleware.
++ * Upper layer interfaces (MAD driver, MMF) to SST driver
++ */
++
++#include <linux/pci.h>
++#include <linux/fs.h>
++#include <linux/firmware.h>
++#include <sound/intel_sst.h>
++#include <sound/intel_sst_ioctl.h>
++#include "intel_sst_fw_ipc.h"
++#include "intel_sst_common.h"
++
++
++/*
++ * sst_download_fw - download the audio firmware to DSP
++ *
++ * This function is called when the FW needs to be downloaded to SST DSP engine
++ */
++int sst_download_fw(void)
++{
++ int retval;
++ const struct firmware *fw_sst;
++ const char *name;
++ if (sst_drv_ctx->sst_state != SST_UN_INIT)
++ return -EPERM;
++ if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID)
++ name = SST_FW_FILENAME_MRST;
++ else
++ name = SST_FW_FILENAME_MFLD;
++ pr_debug("sst: Downloading %s FW now...\n", name);
++ retval = request_firmware(&fw_sst, name, &sst_drv_ctx->pci->dev);
++ if (retval) {
++ pr_err("sst: request fw failed %d\n", retval);
++ return retval;
++ }
++ sst_drv_ctx->alloc_block[0].sst_id = FW_DWNL_ID;
++ sst_drv_ctx->alloc_block[0].ops_block.condition = false;
++ retval = sst_load_fw(fw_sst, NULL);
++ if (retval)
++ goto end_restore;
++
++ retval = sst_wait_timeout(sst_drv_ctx, &sst_drv_ctx->alloc_block[0]);
++ if (retval)
++ pr_err("sst: fw download failed %d\n" , retval);
++end_restore:
++ release_firmware(fw_sst);
++ sst_drv_ctx->alloc_block[0].sst_id = BLOCK_UNINIT;
++ return retval;
++}
++
++
++/*
++ * sst_stalled - this function checks if the lpe is in stalled state
++ */
++int sst_stalled(void)
++{
++ int retry = 1000;
++ int retval = -1;
++
++ while (retry) {
++ if (!sst_drv_ctx->lpe_stalled)
++ return 0;
++ /*wait for time and re-check*/
++ msleep(1);
++
++ retry--;
++ }
++ pr_debug("sst: in Stalled State\n");
++ return retval;
++}
++
++void free_stream_context(unsigned int str_id)
++{
++ struct stream_info *stream;
++
++ if (!sst_validate_strid(str_id)) {
++ /* str_id is valid, so stream is alloacted */
++ stream = &sst_drv_ctx->streams[str_id];
++ if (stream->ops == STREAM_OPS_PLAYBACK ||
++ stream->ops == STREAM_OPS_PLAYBACK_DRM) {
++ sst_drv_ctx->pb_streams--;
++ if (sst_drv_ctx->pb_streams == 0)
++ sst_drv_ctx->scard_ops->power_down_pmic_pb();
++ } else if (stream->ops == STREAM_OPS_CAPTURE) {
++ sst_drv_ctx->cp_streams--;
++ if (sst_drv_ctx->cp_streams == 0)
++ sst_drv_ctx->scard_ops->power_down_pmic_cp();
++ }
++ if (sst_drv_ctx->pb_streams == 0
++ && sst_drv_ctx->cp_streams == 0)
++ sst_drv_ctx->scard_ops->power_down_pmic();
++ if (sst_free_stream(str_id))
++ sst_clean_stream(&sst_drv_ctx->streams[str_id]);
++ }
++}
++
++/*
++ * sst_get_stream_allocated - this function gets a stream allocated with
++ * the given params
++ *
++ * @str_param : stream params
++ * @lib_dnld : pointer to pointer of lib downlaod struct
++ *
++ * This creates new stream id for a stream, in case lib is to be downloaded to
++ * DSP, it downloads that
++ */
++int sst_get_stream_allocated(struct snd_sst_params *str_param,
++ struct snd_sst_lib_download **lib_dnld)
++{
++ int retval, str_id;
++ struct stream_info *str_info;
++
++ retval = sst_alloc_stream((char *) &str_param->sparams, str_param->ops,
++ str_param->codec, str_param->device_type);
++ if (retval < 0) {
++ pr_err("sst: sst_alloc_stream failed %d\n", retval);
++ return retval;
++ }
++ pr_debug("sst: Stream allocated %d\n", retval);
++ str_id = retval;
++ str_info = &sst_drv_ctx->streams[str_id];
++ /* Block the call for reply */
++ retval = sst_wait_interruptible_timeout(sst_drv_ctx,
++ &str_info->ctrl_blk, SST_BLOCK_TIMEOUT);
++ if ((retval != 0) || (str_info->ctrl_blk.ret_code != 0)) {
++ pr_debug("sst: FW alloc failed retval %d, ret_code %d\n",
++ retval, str_info->ctrl_blk.ret_code);
++ str_id = -str_info->ctrl_blk.ret_code; /*return error*/
++ *lib_dnld = str_info->ctrl_blk.data;
++ sst_clean_stream(str_info);
++ } else
++ pr_debug("sst: FW Stream allocated sucess\n");
++ return str_id; /*will ret either error (in above if) or correct str id*/
++}
++
++/*
++ * sst_get_sfreq - this function returns the frequency of the stream
++ *
++ * @str_param : stream params
++ */
++static int sst_get_sfreq(struct snd_sst_params *str_param)
++{
++ switch (str_param->codec) {
++ case SST_CODEC_TYPE_PCM:
++ return 48000; /*str_param->sparams.uc.pcm_params.sfreq;*/
++ case SST_CODEC_TYPE_MP3:
++ return str_param->sparams.uc.mp3_params.sfreq;
++ case SST_CODEC_TYPE_AAC:
++ return str_param->sparams.uc.aac_params.sfreq;;
++ case SST_CODEC_TYPE_WMA9:
++ return str_param->sparams.uc.wma_params.sfreq;;
++ default:
++ return 0;
++ }
++}
++
++/*
++ * sst_get_stream - this function prepares for stream allocation
++ *
++ * @str_param : stream param
++ */
++int sst_get_stream(struct snd_sst_params *str_param)
++{
++ int i, retval;
++ struct stream_info *str_info;
++ struct snd_sst_lib_download *lib_dnld;
++
++ /* stream is not allocated, we are allocating */
++ retval = sst_get_stream_allocated(str_param, &lib_dnld);
++ if (retval == -(SST_LIB_ERR_LIB_DNLD_REQUIRED)) {
++ /* codec download is required */
++ struct snd_sst_alloc_response *response;
++
++ pr_debug("sst: Codec is required.... trying that\n");
++ if (lib_dnld == NULL) {
++ pr_err("sst: lib download null!!! abort\n");
++ return -EIO;
++ }
++ i = sst_get_block_stream(sst_drv_ctx);
++ response = sst_drv_ctx->alloc_block[i].ops_block.data;
++ pr_debug("sst: alloc block allocated = %d\n", i);
++ if (i < 0) {
++ kfree(lib_dnld);
++ return -ENOMEM;
++ }
++ retval = sst_load_library(lib_dnld, str_param->ops);
++ kfree(lib_dnld);
++
++ sst_drv_ctx->alloc_block[i].sst_id = BLOCK_UNINIT;
++ if (!retval) {
++ pr_debug("sst: codec was downloaded sucesfully\n");
++
++ retval = sst_get_stream_allocated(str_param, &lib_dnld);
++ if (retval <= 0)
++ goto err;
++
++ pr_debug("sst: Alloc done stream id %d\n", retval);
++ } else {
++ pr_debug("sst: codec download failed\n");
++ retval = -EIO;
++ goto err;
++ }
++ } else if (retval <= 0)
++ goto err;
++ /*else
++ set_port_params(str_param, str_param->ops);*/
++
++ /* store sampling freq */
++ str_info = &sst_drv_ctx->streams[retval];
++ str_info->sfreq = sst_get_sfreq(str_param);
++
++ /* power on the analog, if reqd */
++ if (str_param->ops == STREAM_OPS_PLAYBACK ||
++ str_param->ops == STREAM_OPS_PLAYBACK_DRM) {
++ if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID)
++ sst_drv_ctx->scard_ops->power_up_pmic_pb(
++ sst_drv_ctx->pmic_port_instance);
++ else
++ sst_drv_ctx->scard_ops->power_up_pmic_pb(
++ str_info->device);
++ /*Only if the playback is MP3 - Send a message*/
++ sst_drv_ctx->pb_streams++;
++ } else if (str_param->ops == STREAM_OPS_CAPTURE) {
++
++ sst_drv_ctx->scard_ops->power_up_pmic_cp(
++ sst_drv_ctx->pmic_port_instance);
++ /*Send a messageif not sent already*/
++ sst_drv_ctx->cp_streams++;
++ }
++
++err:
++ return retval;
++}
++
++void sst_process_mad_ops(struct work_struct *work)
++{
++
++ struct mad_ops_wq *mad_ops =
++ container_of(work, struct mad_ops_wq, wq);
++ int retval = 0;
++
++ switch (mad_ops->control_op) {
++ case SST_SND_PAUSE:
++ retval = sst_pause_stream(mad_ops->stream_id);
++ break;
++ case SST_SND_RESUME:
++ retval = sst_resume_stream(mad_ops->stream_id);
++ break;
++ case SST_SND_DROP:
++/* retval = sst_drop_stream(mad_ops->stream_id);
++*/ break;
++ case SST_SND_START:
++ pr_debug("SST Debug: start stream\n");
++ retval = sst_start_stream(mad_ops->stream_id);
++ break;
++ case SST_SND_STREAM_PROCESS:
++ pr_debug("sst: play/capt frames...\n");
++ break;
++ default:
++ pr_err("sst: wrong control_ops reported\n");
++ }
++ return;
++}
++/*
++ * sst_control_set - Set Control params
++ *
++ * @control_list: list of controls to be set
++ *
++ * This function is called by MID sound card driver to set
++ * SST/Sound card controls. This is registered with MID driver
++ */
++int sst_control_set(int control_element, void *value)
++{
++ int retval = 0, str_id = 0;
++ struct stream_info *stream;
++
++ if (sst_drv_ctx->sst_state == SST_SUSPENDED) {
++ /*LPE is suspended, resume it before proceding*/
++ pr_debug("sst: Resuming from Suspended state\n");
++ retval = intel_sst_resume(sst_drv_ctx->pci);
++ if (retval) {
++ pr_err("sst: Resume Failed = %#x, abort\n", retval);
++ return retval;
++ }
++ }
++ if (sst_drv_ctx->sst_state == SST_UN_INIT) {
++ /* FW is not downloaded */
++ pr_debug("sst: DSP Downloading FW now...\n");
++ retval = sst_download_fw();
++ if (retval) {
++ pr_err("sst: FW download fail %x, abort\n", retval);
++ return retval;
++ }
++ if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID &&
++ sst_drv_ctx->rx_time_slot_status != RX_TIMESLOT_UNINIT
++ && sst_drv_ctx->pmic_vendor != SND_NC)
++ sst_enable_rx_timeslot(
++ sst_drv_ctx->rx_time_slot_status);
++ }
++
++ switch (control_element) {
++ case SST_SND_ALLOC: {
++ struct snd_sst_params *str_param;
++ struct stream_info *str_info;
++
++ str_param = (struct snd_sst_params *)value;
++ BUG_ON(!str_param);
++ retval = sst_get_stream(str_param);
++ if (retval >= 0)
++ sst_drv_ctx->stream_cnt++;
++ str_info = &sst_drv_ctx->streams[retval];
++ str_info->src = MAD_DRV;
++ break;
++ }
++
++ case SST_SND_PAUSE:
++ case SST_SND_RESUME:
++ case SST_SND_DROP:
++ case SST_SND_START:
++ sst_drv_ctx->mad_ops.control_op = control_element;
++ sst_drv_ctx->mad_ops.stream_id = *(int *)value;
++ queue_work(sst_drv_ctx->mad_wq, &sst_drv_ctx->mad_ops.wq);
++ break;
++
++ case SST_SND_FREE:
++ str_id = *(int *)value;
++ stream = &sst_drv_ctx->streams[str_id];
++ free_stream_context(str_id);
++ stream->pcm_substream = NULL;
++ stream->status = STREAM_UN_INIT;
++ stream->period_elapsed = NULL;
++ sst_drv_ctx->stream_cnt--;
++ break;
++
++ case SST_SND_STREAM_INIT: {
++ struct pcm_stream_info *str_info;
++ struct stream_info *stream;
++
++ pr_debug("sst: stream init called\n");
++ str_info = (struct pcm_stream_info *)value;
++ str_id = str_info->str_id;
++ retval = sst_validate_strid(str_id);
++ if (retval)
++ break;
++
++ stream = &sst_drv_ctx->streams[str_id];
++ pr_debug("sst: setting the period ptrs\n");
++ stream->pcm_substream = str_info->mad_substream;
++ stream->period_elapsed = str_info->period_elapsed;
++ stream->sfreq = str_info->sfreq;
++ stream->prev = stream->status;
++ stream->status = STREAM_INIT;
++ break;
++ }
++
++ case SST_SND_BUFFER_POINTER: {
++ struct pcm_stream_info *stream_info;
++ struct snd_sst_tstamp fw_tstamp = {0,};
++ struct stream_info *stream;
++
++
++ stream_info = (struct pcm_stream_info *)value;
++ str_id = stream_info->str_id;
++ retval = sst_validate_strid(str_id);
++ if (retval)
++ break;
++ stream = &sst_drv_ctx->streams[str_id];
++
++ if (!stream->pcm_substream)
++ break;
++ memcpy_fromio(&fw_tstamp,
++ ((void *)(sst_drv_ctx->mailbox + SST_TIME_STAMP)
++ +(str_id * sizeof(fw_tstamp))),
++ sizeof(fw_tstamp));
++
++ pr_debug("sst: Pointer Query on strid = %d ops %d\n",
++ str_id, stream->ops);
++
++ if (stream->ops == STREAM_OPS_PLAYBACK)
++ stream_info->buffer_ptr = fw_tstamp.samples_rendered;
++ else
++ stream_info->buffer_ptr = fw_tstamp.samples_processed;
++ pr_debug("sst: Samples rendered = %llu, buffer ptr %llu\n",
++ fw_tstamp.samples_rendered, stream_info->buffer_ptr);
++ break;
++ }
++ case SST_ENABLE_RX_TIME_SLOT: {
++ int status = *(int *)value;
++ sst_drv_ctx->rx_time_slot_status = status ;
++ sst_enable_rx_timeslot(status);
++ break;
++ }
++ default:
++ /* Illegal case */
++ pr_warn("sst: illegal req\n");
++ return -EINVAL;
++ }
++
++ return retval;
++}
++
++
++struct intel_sst_card_ops sst_pmic_ops = {
++ .control_set = sst_control_set,
++};
++
++/*
++ * register_sst_card - function for sound card to register
++ *
++ * @card: pointer to structure of operations
++ *
++ * This function is called card driver loads and is ready for registration
++ */
++int register_sst_card(struct intel_sst_card_ops *card)
++{
++ if (!sst_drv_ctx) {
++ pr_err("sst: No SST driver register card reject\n");
++ return -ENODEV;
++ }
++
++ if (!card || !card->module_name) {
++ pr_err("sst: Null Pointer Passed\n");
++ return -EINVAL;
++ }
++ if (sst_drv_ctx->pmic_state == SND_MAD_UN_INIT) {
++ /* register this driver */
++ if ((strncmp(SST_CARD_NAMES, card->module_name,
++ strlen(SST_CARD_NAMES))) == 0) {
++ sst_drv_ctx->pmic_vendor = card->vendor_id;
++ sst_drv_ctx->scard_ops = card->scard_ops;
++ sst_pmic_ops.module_name = card->module_name;
++ sst_drv_ctx->pmic_state = SND_MAD_INIT_DONE;
++ sst_drv_ctx->rx_time_slot_status = 0; /*default AMIC*/
++ card->control_set = sst_pmic_ops.control_set;
++ sst_drv_ctx->scard_ops->card_status = SND_CARD_UN_INIT;
++ return 0;
++ } else {
++ pr_err("sst: strcmp fail %s\n", card->module_name);
++ return -EINVAL;
++ }
++
++ } else {
++ /* already registered a driver */
++ pr_err("sst: Repeat for registeration..denied\n");
++ return -EBADRQC;
++ }
++ return 0;
++}
++EXPORT_SYMBOL_GPL(register_sst_card);
++
++/*
++ * unregister_sst_card- function for sound card to un-register
++ *
++ * @card: pointer to structure of operations
++ *
++ * This function is called when card driver unloads
++ */
++void unregister_sst_card(struct intel_sst_card_ops *card)
++{
++ if (sst_pmic_ops.control_set == card->control_set) {
++ /* unreg */
++ sst_pmic_ops.module_name = "";
++ sst_drv_ctx->pmic_state = SND_MAD_UN_INIT;
++ pr_debug("sst: Unregistered %s\n", card->module_name);
++ }
++ return;
++}
++EXPORT_SYMBOL_GPL(unregister_sst_card);
+--- /dev/null
++++ b/sound/pci/sst/intel_sst_dsp.c
+@@ -0,0 +1,486 @@
++/*
++ * intel_sst_dsp.c - Intel SST Driver for audio engine
++ *
++ * Copyright (C) 2008-10 Intel Corp
++ * Authors: Vinod Koul <vinod.koul@intel.com>
++ * Harsha Priya <priya.harsha@intel.com>
++ * Dharageswari R <dharageswari.r@intel.com>
++ * KP Jeeja <jeeja.kp@intel.com>
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This driver exposes the audio engine functionalities to the ALSA
++ * and middleware.
++ *
++ * This file contains all dsp controlling functions like firmware download,
++ * setting/resetting dsp cores, etc
++ */
++#include <linux/pci.h>
++#include <linux/fs.h>
++#include <linux/firmware.h>
++#include <sound/intel_sst.h>
++#include <sound/intel_sst_ioctl.h>
++#include "intel_sst_fw_ipc.h"
++#include "intel_sst_common.h"
++
++
++/**
++ * intel_sst_reset_dsp_mrst - Resetting SST DSP
++ *
++ * This resets DSP in case of MRST platfroms
++ */
++static int intel_sst_reset_dsp_mrst(void)
++{
++ union config_status_reg csr;
++
++ pr_debug("sst: Resetting the DSP in mrst\n");
++ csr.full = 0x3a2;
++ sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
++ csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
++ csr.part.strb_cntr_rst = 0;
++ csr.part.run_stall = 0x1;
++ csr.part.bypass = 0x7;
++ csr.part.sst_reset = 0x1;
++ sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
++ return 0;
++}
++
++/**
++ * intel_sst_reset_dsp_medfield - Resetting SST DSP
++ *
++ * This resets DSP in case of Medfield platfroms
++ */
++static int intel_sst_reset_dsp_medfield(void)
++{
++ union config_status_reg csr;
++
++ pr_debug("sst: Resetting the DSP in medfield\n");
++ csr.full = 0x048303E2;
++ sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
++
++ return 0;
++}
++
++/**
++ * sst_start_mrst - Start the SST DSP processor
++ *
++ * This starts the DSP in MRST platfroms
++ */
++static int sst_start_mrst(void)
++{
++ union config_status_reg csr;
++
++ csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
++ csr.part.bypass = 0;
++ sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
++ csr.part.run_stall = 0;
++ csr.part.sst_reset = 0;
++ csr.part.strb_cntr_rst = 1;
++ pr_debug("sst: Setting SST to execute_mrst 0x%x\n", csr.full);
++ sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
++
++ return 0;
++}
++
++/**
++ * sst_start_medfield - Start the SST DSP processor
++ *
++ * This starts the DSP in MRST platfroms
++ */
++static int sst_start_medfield(void)
++{
++ union config_status_reg csr;
++
++ csr.full = 0x04830062;
++ sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
++ csr.full = 0x04830063;
++ sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
++ csr.full = 0x04830061;
++ sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
++ pr_debug("sst: Starting the DSP_medfld\n");
++
++ return 0;
++}
++
++/**
++ * sst_parse_module - Parse audio FW modules
++ *
++ * @module: FW module header
++ *
++ * Parses modules that need to be placed in SST IRAM and DRAM
++ * returns error or 0 if module sizes are proper
++ */
++static int sst_parse_module(struct fw_module_header *module)
++{
++ struct dma_block_info *block;
++ u32 count;
++ void __iomem *ram;
++
++ pr_debug("sst: module sign %s size %x blocks %x type %x\n",
++ module->signature, module->mod_size,
++ module->blocks, module->type);
++ pr_debug("sst: module entrypoint 0x%x\n", module->entry_point);
++
++ block = (void *)module + sizeof(*module);
++
++ for (count = 0; count < module->blocks; count++) {
++ if (block->size <= 0) {
++ pr_err("sst: block size invalid\n");
++ return -EINVAL;
++ }
++ switch (block->type) {
++ case SST_IRAM:
++ ram = sst_drv_ctx->iram;
++ break;
++ case SST_DRAM:
++ ram = sst_drv_ctx->dram;
++ break;
++ default:
++ pr_err("sst: wrong ram type0x%x in block0x%x\n",
++ block->type, count);
++ return -EINVAL;
++ }
++ memcpy_toio(ram + block->ram_offset,
++ (void *)block + sizeof(*block), block->size);
++ block = (void *)block + sizeof(*block) + block->size;
++ }
++ return 0;
++}
++
++/**
++ * sst_parse_fw_image - parse and load FW
++ *
++ * @sst_fw: pointer to audio fw
++ *
++ * This function is called to parse and download the FW image
++ */
++static int sst_parse_fw_image(const struct firmware *sst_fw)
++{
++ struct fw_header *header;
++ u32 count;
++ int ret_val;
++ struct fw_module_header *module;
++
++ BUG_ON(!sst_fw);
++
++ /* Read the header information from the data pointer */
++ header = (struct fw_header *)sst_fw->data;
++
++ /* verify FW */
++ if ((strncmp(header->signature, SST_FW_SIGN, 4) != 0) ||
++ (sst_fw->size != header->file_size + sizeof(*header))) {
++ /* Invalid FW signature */
++ pr_err("sst: InvalidFW sign/filesize mismatch\n");
++ return -EINVAL;
++ }
++ pr_debug("sst: header sign=%s size=%x modules=%x fmt=%x size=%x\n",
++ header->signature, header->file_size, header->modules,
++ header->file_format, sizeof(*header));
++ module = (void *)sst_fw->data + sizeof(*header);
++ for (count = 0; count < header->modules; count++) {
++ /* module */
++ ret_val = sst_parse_module(module);
++ if (ret_val)
++ return ret_val;
++ module = (void *)module + sizeof(*module) + module->mod_size ;
++ }
++
++ return 0;
++}
++
++/**
++ * sst_load_fw - function to load FW into DSP
++ *
++ * @fw: Pointer to driver loaded FW
++ * @context: driver context
++ *
++ * This function is called by OS when the FW is loaded into kernel
++ */
++int sst_load_fw(const struct firmware *fw, void *context)
++{
++ int ret_val;
++
++ pr_debug("sst: load_fw called\n");
++ BUG_ON(!fw);
++
++ if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID)
++ ret_val = intel_sst_reset_dsp_mrst();
++ else if (sst_drv_ctx->pci_id == SST_MFLD_PCI_ID)
++ ret_val = intel_sst_reset_dsp_medfield();
++ if (ret_val)
++ return ret_val;
++
++ ret_val = sst_parse_fw_image(fw);
++ if (ret_val)
++ return ret_val;
++ mutex_lock(&sst_drv_ctx->sst_lock);
++ sst_drv_ctx->sst_state = SST_FW_LOADED;
++ mutex_unlock(&sst_drv_ctx->sst_lock);
++ /* 7. ask scu to reset the bypass bits */
++ /* 8.bring sst out of reset */
++ if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID)
++ ret_val = sst_start_mrst();
++ else if (sst_drv_ctx->pci_id == SST_MFLD_PCI_ID)
++ ret_val = sst_start_medfield();
++ if (ret_val)
++ return ret_val;
++
++ pr_debug("sst: fw loaded successful!!!\n");
++ return ret_val;
++}
++
++/*This function is called when any codec/post processing library
++ needs to be downloaded*/
++static int sst_download_library(const struct firmware *fw_lib,
++ struct snd_sst_lib_download_info *lib)
++{
++ /* send IPC message and wait */
++ int i;
++ u8 pvt_id;
++ struct ipc_post *msg = NULL;
++ union config_status_reg csr;
++ struct snd_sst_str_type str_type = {0};
++ int retval = 0;
++
++ if (sst_create_large_msg(&msg))
++ return -ENOMEM;
++
++ pvt_id = sst_assign_pvt_id(sst_drv_ctx);
++ i = sst_get_block_stream(sst_drv_ctx);
++ pr_debug("sst: alloc block allocated = %d, pvt_id %d\n", i, pvt_id);
++ if (i < 0) {
++ kfree(msg);
++ return -ENOMEM;
++ }
++ sst_drv_ctx->alloc_block[i].sst_id = pvt_id;
++ sst_fill_header(&msg->header, IPC_IA_PREP_LIB_DNLD, 1, pvt_id);
++ msg->header.part.data = sizeof(u32) + sizeof(str_type);
++ str_type.codec_type = lib->dload_lib.lib_info.lib_type;
++ /*str_type.pvt_id = pvt_id;*/
++ memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
++ memcpy(msg->mailbox_data + sizeof(u32), &str_type, sizeof(str_type));
++ spin_lock(&sst_drv_ctx->list_spin_lock);
++ list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
++ spin_unlock(&sst_drv_ctx->list_spin_lock);
++ sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
++ retval = sst_wait_timeout(sst_drv_ctx, &sst_drv_ctx->alloc_block[i]);
++ if (retval) {
++ /* error */
++ sst_drv_ctx->alloc_block[i].sst_id = BLOCK_UNINIT;
++ pr_err("sst: Prep codec downloaded failed %d\n",
++ retval);
++ return -EIO;
++ }
++ pr_debug("sst: FW responded, ready for download now...\n");
++ /* downloading on success */
++ mutex_lock(&sst_drv_ctx->sst_lock);
++ sst_drv_ctx->sst_state = SST_FW_LOADED;
++ mutex_unlock(&sst_drv_ctx->sst_lock);
++ csr.full = readl(sst_drv_ctx->shim + SST_CSR);
++ csr.part.run_stall = 1;
++ sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
++
++ csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
++ csr.part.bypass = 0x7;
++ sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
++
++ sst_parse_fw_image(fw_lib);
++
++ /* set the FW to running again */
++ csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
++ csr.part.bypass = 0x0;
++ sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
++
++ csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
++ csr.part.run_stall = 0;
++ sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
++
++ /* send download complete and wait */
++ if (sst_create_large_msg(&msg)) {
++ sst_drv_ctx->alloc_block[i].sst_id = BLOCK_UNINIT;
++ return -ENOMEM;
++ }
++
++ sst_fill_header(&msg->header, IPC_IA_LIB_DNLD_CMPLT, 1, pvt_id);
++ sst_drv_ctx->alloc_block[i].sst_id = pvt_id;
++ msg->header.part.data = sizeof(u32) + sizeof(*lib);
++ lib->pvt_id = pvt_id;
++ memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
++ memcpy(msg->mailbox_data + sizeof(u32), lib, sizeof(*lib));
++ spin_lock(&sst_drv_ctx->list_spin_lock);
++ list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
++ spin_unlock(&sst_drv_ctx->list_spin_lock);
++ sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
++ pr_debug("sst: Waiting for FW response Download complete\n");
++ sst_drv_ctx->alloc_block[i].ops_block.condition = false;
++ retval = sst_wait_timeout(sst_drv_ctx, &sst_drv_ctx->alloc_block[i]);
++ if (retval) {
++ /* error */
++ mutex_lock(&sst_drv_ctx->sst_lock);
++ sst_drv_ctx->sst_state = SST_UN_INIT;
++ mutex_unlock(&sst_drv_ctx->sst_lock);
++ sst_drv_ctx->alloc_block[i].sst_id = BLOCK_UNINIT;
++ return -EIO;
++ }
++
++ pr_debug("sst: FW sucess on Download complete\n");
++ sst_drv_ctx->alloc_block[i].sst_id = BLOCK_UNINIT;
++ mutex_lock(&sst_drv_ctx->sst_lock);
++ sst_drv_ctx->sst_state = SST_FW_RUNNING;
++ mutex_unlock(&sst_drv_ctx->sst_lock);
++ return 0;
++
++}
++
++/* This function is called befoer downloading the codec/postprocessing
++library is set for download to SST DSP*/
++static int sst_validate_library(const struct firmware *fw_lib,
++ struct lib_slot_info *slot,
++ u32 *entry_point)
++{
++ struct fw_header *header;
++ struct fw_module_header *module;
++ struct dma_block_info *block;
++ unsigned int n_blk, isize = 0, dsize = 0;
++ int err = 0;
++
++ header = (struct fw_header *)fw_lib->data;
++ if (header->modules != 1) {
++ pr_err("sst: Module no mismatch found\n ");
++ err = -EINVAL;
++ goto exit;
++ }
++ module = (void *)fw_lib->data + sizeof(*header);
++ *entry_point = module->entry_point;
++ pr_debug("sst: Module entry point 0x%x\n", *entry_point);
++ pr_debug("sst: Module Sign %s, Size 0x%x, Blocks 0x%x Type 0x%x\n",
++ module->signature, module->mod_size,
++ module->blocks, module->type);
++
++ block = (void *)module + sizeof(*module);
++ for (n_blk = 0; n_blk < module->blocks; n_blk++) {
++ switch (block->type) {
++ case SST_IRAM:
++ isize += block->size;
++ break;
++ case SST_DRAM:
++ dsize += block->size;
++ break;
++ default:
++ pr_err("sst: Invalid block type for 0x%x\n", n_blk);
++ err = -EINVAL;
++ goto exit;
++ }
++ block = (void *)block + sizeof(*block) + block->size;
++ }
++ if (isize > slot->iram_size || dsize > slot->dram_size) {
++ pr_err("sst: library exceeds size allocated\n");
++ err = -EINVAL;
++ goto exit;
++ } else
++ pr_debug("sst: Library is safe for download...\n");
++
++ pr_debug("sst: iram 0x%x, dram 0x%x, iram 0x%x, dram 0x%x\n",
++ isize, dsize, slot->iram_size, slot->dram_size);
++exit:
++ return err;
++
++}
++
++/* This function is called when FW requests for a particular libary download
++This function prepares the library to download*/
++int sst_load_library(struct snd_sst_lib_download *lib, u8 ops)
++{
++ char buf[20];
++ const char *type, *dir;
++ int len = 0, error = 0;
++ u32 entry_point;
++ const struct firmware *fw_lib;
++ struct snd_sst_lib_download_info dload_info = {{{0},},};
++
++ memset(buf, 0, sizeof(buf));
++
++ pr_debug("sst: Lib Type 0x%x, Slot 0x%x, ops 0x%x\n",
++ lib->lib_info.lib_type, lib->slot_info.slot_num, ops);
++ pr_debug("sst: Version 0x%x, name %s, caps 0x%x media type 0x%x\n",
++ lib->lib_info.lib_version, lib->lib_info.lib_name,
++ lib->lib_info.lib_caps, lib->lib_info.media_type);
++
++ pr_debug("sst: IRAM Size 0x%x, offset 0x%x\n",
++ lib->slot_info.iram_size, lib->slot_info.iram_offset);
++ pr_debug("sst: DRAM Size 0x%x, offset 0x%x\n",
++ lib->slot_info.dram_size, lib->slot_info.dram_offset);
++
++ switch (lib->lib_info.lib_type) {
++ case SST_CODEC_TYPE_MP3:
++ type = "mp3_";
++ break;
++ case SST_CODEC_TYPE_AAC:
++ type = "aac_";
++ break;
++ case SST_CODEC_TYPE_AACP:
++ type = "aac_v1_";
++ break;
++ case SST_CODEC_TYPE_eAACP:
++ type = "aac_v2_";
++ break;
++ case SST_CODEC_TYPE_WMA9:
++ type = "wma9_";
++ break;
++ default:
++ pr_err("sst: Invalid codec type\n");
++ error = -EINVAL;
++ goto wake;
++ }
++
++ if (ops == STREAM_OPS_CAPTURE)
++ dir = "enc_";
++ else
++ dir = "dec_";
++ len = strlen(type) + strlen(dir);
++ strncpy(buf, type, sizeof(buf)-1);
++ strncpy(buf + strlen(type), dir, sizeof(buf)-strlen(type)-1);
++ len += snprintf(buf + len, sizeof(buf) - len, "%d",
++ lib->slot_info.slot_num);
++ len += snprintf(buf + len, sizeof(buf) - len, ".bin");
++
++ pr_debug("sst: Requesting %s\n", buf);
++
++ error = request_firmware(&fw_lib, buf, &sst_drv_ctx->pci->dev);
++ if (error) {
++ pr_err("sst: library load failed %d\n", error);
++ goto wake;
++ }
++ error = sst_validate_library(fw_lib, &lib->slot_info, &entry_point);
++ if (error)
++ goto wake_free;
++
++ lib->mod_entry_pt = entry_point;
++ memcpy(&dload_info.dload_lib, lib, sizeof(*lib));
++ error = sst_download_library(fw_lib, &dload_info);
++ if (error)
++ goto wake_free;
++
++ /* lib is downloaded and init send alloc again */
++ pr_debug("sst: Library is downloaded now...\n");
++wake_free:
++ /* sst_wake_up_alloc_block(sst_drv_ctx, pvt_id, error, NULL); */
++ release_firmware(fw_lib);
++wake:
++ return error;
++}
++
+--- /dev/null
++++ b/sound/pci/sst/intel_sst_fw_ipc.h
+@@ -0,0 +1,393 @@
++#ifndef __INTEL_SST_FW_IPC_H__
++#define __INTEL_SST_FW_IPC_H__
++/*
++* intel_sst_fw_ipc.h - Intel SST Driver for audio engine
++*
++* Copyright (C) 2008-10 Intel Corporation
++* Author: Vinod Koul <vinod.koul@intel.com>
++* Harsha Priya <priya.harsha@intel.com>
++* Dharageswari R <dharageswari.r@intel.com>
++* KP Jeeja <jeeja.kp@intel.com>
++* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; version 2 of the License.
++*
++* This program is distributed in the hope that it will be useful, but
++* WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++* General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License along
++* with this program; if not, write to the Free Software Foundation, Inc.,
++* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++*
++* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++*
++* This driver exposes the audio engine functionalities to the ALSA
++* and middleware.
++* This file has definitions shared between the firmware and driver
++*/
++
++#define MAX_NUM_STREAMS_MRST 3
++#define MAX_NUM_STREAMS_MFLD 6
++#define MAX_NUM_STREAMS 6
++#define MAX_DBG_RW_BYTES 80
++#define MAX_NUM_SCATTER_BUFFERS 8
++#define MAX_LOOP_BACK_DWORDS 8
++/* IPC base address and mailbox, timestamp offsets */
++#define SST_MAILBOX_SIZE 0x0400
++#define SST_MAILBOX_SEND 0x0000
++#define SST_MAILBOX_RCV 0x0804
++#define SST_TIME_STAMP 0x1800
++#define SST_RESERVED_OFFSET 0x1A00
++#define SST_CHEKPOINT_OFFSET 0x1C00
++#define REPLY_MSG 0x80
++
++/* Message ID's for IPC messages */
++/* Bits B7: SST or IA/SC ; B6-B4: Msg Category; B3-B0: Msg Type */
++
++/* I2L Firmware/Codec Download msgs */
++#define IPC_IA_PREP_LIB_DNLD 0x01
++#define IPC_IA_LIB_DNLD_CMPLT 0x02
++
++#define IPC_IA_SET_PMIC_TYPE 0x03
++#define IPC_IA_GET_FW_VERSION 0x04
++#define IPC_IA_GET_FW_BUILD_INF 0x05
++#define IPC_IA_GET_FW_INFO 0x06
++
++/* I2L Codec Config/control msgs */
++#define IPC_IA_SET_CODEC_PARAMS 0x10
++#define IPC_IA_GET_CODEC_PARAMS 0x11
++#define IPC_IA_SET_PPP_PARAMS 0x12
++#define IPC_IA_GET_PPP_PARAMS 0x13
++#define IPC_IA_PLAY_FRAMES 0x14
++#define IPC_IA_CAPT_FRAMES 0x15
++#define IPC_IA_PLAY_VOICE 0x16
++#define IPC_IA_CAPT_VOICE 0x17
++#define IPC_IA_DECODE_FRAMES 0x18
++
++/* I2L Stream config/control msgs */
++#define IPC_IA_ALLOC_STREAM 0x20 /* Allocate a stream ID */
++#define IPC_IA_FREE_STREAM 0x21 /* Free the stream ID */
++#define IPC_IA_SET_STREAM_PARAMS 0x22
++#define IPC_IA_GET_STREAM_PARAMS 0x23
++#define IPC_IA_PAUSE_STREAM 0x24
++#define IPC_IA_RESUME_STREAM 0x25
++#define IPC_IA_DROP_STREAM 0x26
++#define IPC_IA_DRAIN_STREAM 0x27 /* Short msg with str_id */
++#define IPC_IA_TARGET_DEV_SELECT 0x28
++#define IPC_IA_CONTROL_ROUTING 0x29
++
++#define IPC_IA_SET_STREAM_VOL 0x2A /*Vol for stream, pre mixer */
++#define IPC_IA_GET_STREAM_VOL 0x2B
++#define IPC_IA_SET_STREAM_MUTE 0x2C
++#define IPC_IA_GET_STREAM_MUTE 0x2D
++#define IPC_IA_ENABLE_RX_TIME_SLOT 0x2E /* Enable Rx time slot 0 or 1 */
++
++#define IPC_IA_START_STREAM 0x30 /* Short msg with str_id */
++
++/* Debug msgs */
++#define IPC_IA_DBG_MEM_READ 0x40
++#define IPC_IA_DBG_MEM_WRITE 0x41
++#define IPC_IA_DBG_LOOP_BACK 0x42
++
++/* L2I Firmware/Codec Download msgs */
++#define IPC_IA_FW_INIT_CMPLT 0x81
++#define IPC_IA_LPE_GETTING_STALLED 0x82
++#define IPC_IA_LPE_UNSTALLED 0x83
++
++/* L2I Codec Config/control msgs */
++#define IPC_SST_GET_PLAY_FRAMES 0x90 /* Request IA more data */
++#define IPC_SST_GET_CAPT_FRAMES 0x91 /* Request IA more data */
++#define IPC_SST_BUF_UNDER_RUN 0x92 /* PB Under run and stopped */
++#define IPC_SST_BUF_OVER_RUN 0x93 /* CAP Under run and stopped */
++#define IPC_SST_DRAIN_END 0x94 /* PB Drain complete and stopped */
++#define IPC_SST_CHNGE_SSP_PARAMS 0x95 /* PB SSP parameters changed */
++#define IPC_SST_STREAM_PROCESS_FATAL_ERR 0x96/* error in processing a stream */
++#define IPC_SST_PERIOD_ELAPSED 0x97 /* period elapsed */
++#define IPC_IA_TARGET_DEV_CHNGD 0x98 /* error in processing a stream */
++
++#define IPC_SST_ERROR_EVENT 0x99 /* Buffer over run occured */
++/* L2S messages */
++#define IPC_SC_DDR_LINK_UP 0xC0
++#define IPC_SC_DDR_LINK_DOWN 0xC1
++#define IPC_SC_SET_LPECLK_REQ 0xC2
++#define IPC_SC_SSP_BIT_BANG 0xC3
++
++/* L2I Error reporting msgs */
++#define IPC_IA_MEM_ALLOC_FAIL 0xE0
++#define IPC_IA_PROC_ERR 0xE1 /* error in processing a
++ stream can be used by playback and
++ capture modules */
++
++/* L2I Debug msgs */
++#define IPC_IA_PRINT_STRING 0xF0
++
++
++
++/* Command Response or Acknowledge message to any IPC message will have
++ * same message ID and stream ID information which is sent.
++ * There is no specific Ack message ID. The data field is used as response
++ * meaning.
++ */
++enum ackData {
++ IPC_ACK_SUCCESS = 0,
++ IPC_ACK_FAILURE
++};
++
++
++enum sst_error_codes {
++ /* Error code,response to msgId: Description */
++ /* Common error codes */
++ SST_SUCCESS = 0, /* Success */
++ SST_ERR_INVALID_STREAM_ID, /* Invalid stream ID */
++ SST_ERR_INVALID_MSG_ID, /* Invalid message ID */
++ SST_ERR_INVALID_STREAM_OP, /* Invalid stream operation request */
++ SST_ERR_INVALID_PARAMS, /* Invalid params */
++ SST_ERR_INVALID_CODEC, /* Invalid codec type */
++ SST_ERR_INVALID_MEDIA_TYPE, /* Invalid media type */
++ SST_ERR_STREAM_ERR, /* ANY: Stream control or config or
++ processing error */
++
++ /* IPC specific error codes */
++ SST_IPC_ERR_CALL_BACK_NOT_REGD, /* Call back for msg not regd */
++ SST_IPC_ERR_STREAM_NOT_ALLOCATED, /* Stream is not allocated */
++ SST_IPC_ERR_STREAM_ALLOC_FAILED, /* ALLOC:Stream alloc failed */
++ SST_IPC_ERR_GET_STREAM_FAILED, /* ALLOC:Get stream id failed*/
++ SST_ERR_MOD_NOT_AVAIL, /* SET/GET: Mod(AEC/AGC/ALC) not available */
++ SST_ERR_MOD_DNLD_RQD, /* SET/GET: Mod(AEC/AGC/ALC) download required */
++ SST_ERR_STREAM_STOPPED, /* ANY: Stream is in stopped state */
++ SST_ERR_STREAM_IN_USE, /* ANY: Stream is already in use */
++
++ /* Capture specific error codes */
++ SST_CAP_ERR_INCMPLTE_CAPTURE_MSG,/* ANY:Incomplete message */
++ SST_CAP_ERR_CAPTURE_FAIL, /* ANY:Capture op failed */
++ SST_CAP_ERR_GET_DDR_NEW_SGLIST,
++ SST_CAP_ERR_UNDER_RUN, /* lack of input data */
++ SST_CAP_ERR_OVERFLOW, /* lack of output space */
++
++ /* Playback specific error codes*/
++ SST_PB_ERR_INCMPLTE_PLAY_MSG, /* ANY: Incomplete message */
++ SST_PB_ERR_PLAY_FAIL, /* ANY: Playback operation failed */
++ SST_PB_ERR_GET_DDR_NEW_SGLIST,
++
++ /* Codec manager specific error codes */
++ SST_LIB_ERR_LIB_DNLD_REQUIRED, /* ALLOC: Codec download required */
++ SST_LIB_ERR_LIB_NOT_SUPPORTED, /* Library is not supported */
++
++ /* Library manager specific error codes */
++ SST_SCC_ERR_PREP_DNLD_FAILED, /* Failed to prepare for codec download */
++ SST_SCC_ERR_LIB_DNLD_RES_FAILED, /* Lib download resume failed */
++ /* Scheduler specific error codes */
++ SST_SCH_ERR_FAIL, /* REPORT: */
++
++ /* DMA specific error codes */
++ SST_DMA_ERR_NO_CHNL_AVAILABLE, /* DMA Ch not available */
++ SST_DMA_ERR_INVALID_INPUT_PARAMS, /* Invalid input params */
++ SST_DMA_ERR_CHNL_ALREADY_SUSPENDED, /* Ch is suspended */
++ SST_DMA_ERR_CHNL_ALREADY_STARTED, /* Ch already started */
++ SST_DMA_ERR_CHNL_NOT_ENABLED, /* Ch not enabled */
++ SST_DMA_ERR_TRANSFER_FAILED, /* Transfer failed */
++ SST_SSP_ERR_ALREADY_ENABLED, /* REPORT: SSP already enabled */
++ SST_SSP_ERR_ALREADY_DISABLED, /* REPORT: SSP already disabled */
++ SST_SSP_ERR_NOT_INITIALIZED,
++
++ /* Other error codes */
++ SST_ERR_MOD_INIT_FAIL, /* Firmware Module init failed */
++
++ /* FW init error codes */
++ SST_RDR_ERR_IO_DEV_SEL_NOT_ALLOWED,
++ SST_RDR_ERR_ROUTE_ALREADY_STARTED,
++ SST_RDR_PREP_CODEC_DNLD_FAILED,
++
++ /* Memory debug error codes */
++ SST_ERR_DBG_MEM_READ_FAIL,
++ SST_ERR_DBG_MEM_WRITE_FAIL,
++
++ /* Decode error codes */
++ SST_ERR_DEC_NEED_INPUT_BUF,
++
++};
++
++enum dbg_mem_data_type {
++ /* Data type of debug read/write */
++ DATA_TYPE_U32,
++ DATA_TYPE_U16,
++ DATA_TYPE_U8,
++};
++
++/* CAUTION NOTE: All IPC message body must be multiple of 32 bits.*/
++
++/* IPC Header */
++union ipc_header {
++ struct {
++ u32 msg_id:8; /* Message ID - Max 256 Message Types */
++ u32 str_id:5;
++ u32 large:1; /* Large Message if large = 1 */
++ u32 reserved:2; /* Reserved for future use */
++ u32 data:14; /* Ack/Info for msg, size of msg in Mailbox */
++ u32 done:1; /* bit 30 */
++ u32 busy:1; /* bit 31 */
++ } part;
++ u32 full;
++} __attribute__ ((packed));
++
++/* Firmware build info */
++struct sst_fw_build_info {
++ unsigned char date[16]; /* Firmware build date */
++ unsigned char time[16]; /* Firmware build time */
++} __attribute__ ((packed));
++
++struct ipc_header_fw_init {
++ struct snd_sst_fw_version fw_version;/* Firmware version details */
++ struct sst_fw_build_info build_info;
++ u16 result; /* Fw init result */
++ u8 module_id; /* Module ID in case of error */
++ u8 debug_info; /* Debug info from Module ID in case of fail */
++} __attribute__ ((packed));
++
++/* Address and size info of a frame buffer in DDR */
++struct sst_address_info {
++ u32 addr; /* Address at IA */
++ u32 size; /* Size of the buffer */
++} __attribute__ ((packed));
++
++/* Time stamp */
++struct snd_sst_tstamp {
++ u64 samples_processed;/* capture - data in DDR */
++ u64 samples_rendered;/* playback - data rendered */
++ u64 bytes_processed;/* bytes decoded or encoded */
++ u32 sampling_frequency;/* eg: 48000, 44100 */
++ u32 dma_base_address;/* DMA base address */
++ u16 dma_channel_no;/* DMA Channel used for the data transfer*/
++ u16 reserved;/* 32 bit alignment */
++};
++
++/* Frame info to play or capture */
++struct sst_frame_info {
++ u16 num_entries; /* number of entries to follow */
++ u16 rsrvd;
++ struct sst_address_info addr[MAX_NUM_SCATTER_BUFFERS];
++} __attribute__ ((packed));
++
++/* Frames info for decode */
++struct snd_sst_decode_info {
++ unsigned long long input_bytes_consumed;
++ unsigned long long output_bytes_produced;
++ struct sst_frame_info frames_in;
++ struct sst_frame_info frames_out;
++} __attribute__ ((packed));
++
++/* SST to IA print debug message*/
++struct ipc_sst_ia_print_params {
++ u32 string_size;/* Max value is 160 */
++ u8 prt_string[160];/* Null terminated Char string */
++} __attribute__ ((packed));
++
++/* Voice data message */
++struct snd_sst_voice_data {
++ u16 num_bytes;/* Number of valid voice data bytes */
++ u8 pcm_wd_size;/* 0=8 bit, 1=16 bit 2=32 bit */
++ u8 reserved;/* Reserved */
++ u8 voice_data_buf[0];/* Voice data buffer in bytes, little endian */
++} __attribute__ ((packed));
++
++/* SST to IA memory read debug message */
++struct ipc_sst_ia_dbg_mem_rw {
++ u16 num_bytes;/* Maximum of MAX_DBG_RW_BYTES */
++ u16 data_type;/* enum: dbg_mem_data_type */
++ u32 address; /* Memory address of data memory of data_type */
++ u8 rw_bytes[MAX_DBG_RW_BYTES];/* Maximum of 64 bytes can be RW */
++} __attribute__ ((packed));
++
++struct ipc_sst_ia_dbg_loop_back {
++ u16 num_dwords; /* Maximum of MAX_DBG_RW_BYTES */
++ u16 increment_val;/* Increments dwords by this value, 0- no increment */
++ u32 lpbk_dwords[MAX_LOOP_BACK_DWORDS];/* Maximum of 8 dwords loopback */
++} __attribute__ ((packed));
++
++/* Stream type params struture for Alloc stream */
++struct snd_sst_str_type {
++ u8 codec_type; /* Codec type */
++ u8 str_type; /* 1 = voice 2 = music */
++ u8 operation; /* Playback or Capture */
++ u8 protected_str; /* 0=Non DRM, 1=DRM */
++ u8 time_slots;
++ u8 reserved; /* Reserved */
++ u16 result; /* Result used for acknowledgment */
++} __attribute__ ((packed));
++
++/* Library info structure */
++struct module_info {
++ u32 lib_version;
++ u32 lib_type;/*TBD- KLOCKWORK u8 lib_type;*/
++ u32 media_type;
++ u8 lib_name[12];
++ u32 lib_caps;
++ unsigned char b_date[16]; /* Lib build date */
++ unsigned char b_time[16]; /* Lib build time */
++} __attribute__ ((packed));
++
++/* Library slot info */
++struct lib_slot_info {
++ u8 slot_num; /* 1 or 2 */
++ u8 reserved1;
++ u16 reserved2;
++ u32 iram_size; /* slot size in IRAM */
++ u32 dram_size; /* slot size in DRAM */
++ u32 iram_offset; /* starting offset of slot in IRAM */
++ u32 dram_offset; /* starting offset of slot in DRAM */
++} __attribute__ ((packed));
++
++struct snd_sst_lib_download {
++ struct module_info lib_info; /* library info type, capabilities etc */
++ struct lib_slot_info slot_info; /* slot info to be downloaded */
++ u32 mod_entry_pt;
++};
++
++struct snd_sst_lib_download_info {
++ struct snd_sst_lib_download dload_lib;
++ u16 result; /* Result used for acknowledgment */
++ u8 pvt_id; /* Private ID */
++ u8 reserved; /* for alignment */
++};
++
++/* Alloc stream params structure */
++struct snd_sst_alloc_params {
++ struct snd_sst_str_type str_type;
++ struct snd_sst_stream_params stream_params;
++};
++
++struct snd_sst_fw_get_stream_params {
++ struct snd_sst_stream_params codec_params;
++ struct snd_sst_pmic_config pcm_params;
++};
++
++/* Alloc stream response message */
++struct snd_sst_alloc_response {
++ struct snd_sst_str_type str_type; /* Stream type for allocation */
++ struct snd_sst_lib_download lib_dnld; /* Valid only for codec dnld */
++};
++
++/* Drop response */
++struct snd_sst_drop_response {
++ u32 result;
++ u32 bytes;
++};
++
++/* CSV Voice call routing structure */
++struct snd_sst_control_routing {
++ u8 control; /* 0=start, 1=Stop */
++ u8 reserved[3]; /* Reserved- for 32 bit alignment */
++};
++
++
++struct ipc_post {
++ struct list_head node;
++ union ipc_header header; /* driver specific */
++ char *mailbox_data;
++};
++
++#endif /* __INTEL_SST_FW_IPC_H__ */
+--- /dev/null
++++ b/sound/pci/sst/intel_sst_ipc.c
+@@ -0,0 +1,656 @@
++/*
++ * intel_sst_ipc.c - Intel SST Driver for audio engine
++ *
++ * Copyright (C) 2008-10 Intel Corporation
++ * Authors: Vinod Koul <vinod.koul@intel.com>
++ * Harsha Priya <priya.harsha@intel.com>
++ * Dharageswari R <dharageswari.r@intel.com>
++ * KP Jeeja <jeeja.kp@intel.com>
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This file defines all ipc functions
++ */
++
++#include <linux/pci.h>
++#include <linux/firmware.h>
++#include <linux/sched.h>
++#include <sound/intel_sst.h>
++#include <sound/intel_sst_ioctl.h>
++#include "intel_sst_fw_ipc.h"
++#include "intel_sst_common.h"
++
++/*
++ * sst_send_sound_card_type - send sound card type
++ *
++ * this function sends the sound card type to sst dsp engine
++ */
++static void sst_send_sound_card_type(void)
++{
++ struct ipc_post *msg = NULL;
++
++ if (sst_create_short_msg(&msg))
++ return;
++
++ sst_fill_header(&msg->header, IPC_IA_SET_PMIC_TYPE, 0, 0);
++ msg->header.part.data = sst_drv_ctx->pmic_vendor;
++ spin_lock(&sst_drv_ctx->list_spin_lock);
++ list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
++ spin_unlock(&sst_drv_ctx->list_spin_lock);
++ sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
++ return;
++}
++
++/**
++* sst_post_message - Posts message to SST
++*
++* @work: Pointer to work structure
++*
++* This function is called by any component in driver which
++* wants to send an IPC message. This will post message only if
++* busy bit is free
++*/
++void sst_post_message(struct work_struct *work)
++{
++ struct ipc_post *msg;
++ union ipc_header header;
++ union interrupt_reg imr;
++ int retval = 0;
++ imr.full = 0;
++
++ /*To check if LPE is in stalled state.*/
++ retval = sst_stalled();
++ if (retval < 0) {
++ pr_err("sst: in stalled state\n");
++ return;
++ }
++ pr_debug("sst: post message called\n");
++ spin_lock(&sst_drv_ctx->list_spin_lock);
++
++ /* check list */
++ if (list_empty(&sst_drv_ctx->ipc_dispatch_list)) {
++ /* list is empty, mask imr */
++ pr_debug("sst: Empty msg queue... masking\n");
++ imr.full = readl(sst_drv_ctx->shim + SST_IMRX);
++ imr.part.done_interrupt = 1;
++ /* dummy register for shim workaround */
++ sst_shim_write(sst_drv_ctx->shim, SST_IMRX, imr.full);
++ spin_unlock(&sst_drv_ctx->list_spin_lock);
++ return;
++ }
++
++ /* check busy bit */
++ header.full = sst_shim_read(sst_drv_ctx->shim, SST_IPCX);
++ if (header.part.busy) {
++ /* busy, unmask */
++ pr_debug("sst: Busy not free... unmasking\n");
++ imr.full = readl(sst_drv_ctx->shim + SST_IMRX);
++ imr.part.done_interrupt = 0;
++ /* dummy register for shim workaround */
++ sst_shim_write(sst_drv_ctx->shim, SST_IMRX, imr.full);
++ spin_unlock(&sst_drv_ctx->list_spin_lock);
++ return;
++ }
++ /* copy msg from list */
++ msg = list_entry(sst_drv_ctx->ipc_dispatch_list.next,
++ struct ipc_post, node);
++ list_del(&msg->node);
++ pr_debug("sst: Post message: header = %x\n", msg->header.full);
++ pr_debug("sst: size: = %x\n", msg->header.part.data);
++ if (msg->header.part.large)
++ memcpy_toio(sst_drv_ctx->mailbox + SST_MAILBOX_SEND,
++ msg->mailbox_data, msg->header.part.data);
++ /* dummy register for shim workaround */
++
++ sst_shim_write(sst_drv_ctx->shim, SST_IPCX, msg->header.full);
++ spin_unlock(&sst_drv_ctx->list_spin_lock);
++
++ kfree(msg->mailbox_data);
++ kfree(msg);
++ return;
++}
++
++/*
++ * sst_clear_interrupt - clear the SST FW interrupt
++ *
++ * This function clears the interrupt register after the interrupt
++ * bottom half is complete allowing next interrupt to arrive
++ */
++void sst_clear_interrupt(void)
++{
++ union interrupt_reg isr;
++ union interrupt_reg imr;
++ union ipc_header clear_ipc;
++
++ imr.full = sst_shim_read(sst_drv_ctx->shim, SST_IMRX);
++ isr.full = sst_shim_read(sst_drv_ctx->shim, SST_ISRX);
++ /* write 1 to clear */;
++ isr.part.busy_interrupt = 1;
++ sst_shim_write(sst_drv_ctx->shim, SST_ISRX, isr.full);
++ /* Set IA done bit */
++ clear_ipc.full = sst_shim_read(sst_drv_ctx->shim, SST_IPCD);
++ clear_ipc.part.busy = 0;
++ clear_ipc.part.done = 1;
++ clear_ipc.part.data = IPC_ACK_SUCCESS;
++ sst_shim_write(sst_drv_ctx->shim, SST_IPCD, clear_ipc.full);
++ /* un mask busy interrupt */
++ imr.part.busy_interrupt = 0;
++ sst_shim_write(sst_drv_ctx->shim, SST_IMRX, imr.full);
++}
++
++/*
++ * process_fw_init - process the FW init msg
++ *
++ * @msg: IPC message from FW
++ *
++ * This function processes the FW init msg from FW
++ * marks FW state and prints debug info of loaded FW
++ */
++int process_fw_init(struct sst_ipc_msg_wq *msg)
++{
++ struct ipc_header_fw_init *init =
++ (struct ipc_header_fw_init *)msg->mailbox;
++ int retval = 0;
++
++ pr_debug("sst: *** FW Init msg came***\n");
++ if (init->result) {
++ mutex_lock(&sst_drv_ctx->sst_lock);
++ sst_drv_ctx->sst_state = SST_ERROR;
++ mutex_unlock(&sst_drv_ctx->sst_lock);
++ pr_debug("sst: FW Init failed, Error %x\n", init->result);
++ pr_err("sst: FW Init failed, Error %x\n", init->result);
++ retval = -init->result;
++ return retval;
++ }
++ if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID)
++ sst_send_sound_card_type();
++ mutex_lock(&sst_drv_ctx->sst_lock);
++ sst_drv_ctx->sst_state = SST_FW_RUNNING;
++ mutex_unlock(&sst_drv_ctx->sst_lock);
++ pr_debug("sst: FW Version %x.%x\n",
++ init->fw_version.major, init->fw_version.minor);
++ pr_debug("sst: Build No %x Type %x\n",
++ init->fw_version.build, init->fw_version.type);
++ pr_debug("sst: Build date %s Time %s\n",
++ init->build_info.date, init->build_info.time);
++ sst_wake_up_alloc_block(sst_drv_ctx, FW_DWNL_ID, retval, NULL);
++ return retval;
++}
++/**
++* sst_process_message - Processes message from SST
++*
++* @work: Pointer to work structure
++*
++* This function is scheduled by ISR
++* It take a msg from process_queue and does action based on msg
++*/
++void sst_process_message(struct work_struct *work)
++{
++ struct sst_ipc_msg_wq *msg =
++ container_of(work, struct sst_ipc_msg_wq, wq);
++ int str_id = msg->header.part.str_id;
++
++ pr_debug("sst: IPC process for %x\n", msg->header.full);
++
++ /* based on msg in list call respective handler */
++ switch (msg->header.part.msg_id) {
++ case IPC_SST_BUF_UNDER_RUN:
++ case IPC_SST_BUF_OVER_RUN:
++ if (sst_validate_strid(str_id)) {
++ pr_err("sst: stream id %d invalid\n", str_id);
++ break;
++ }
++ pr_err("sst: Buffer under/overrun for%d\n",
++ msg->header.part.str_id);
++ pr_err("sst: Got Underrun & not to send data...ignore\n");
++ break;
++
++ case IPC_SST_GET_PLAY_FRAMES:
++ if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID) {
++ struct stream_info *stream ;
++
++ if (sst_validate_strid(str_id)) {
++ pr_err("sst: strid %d invalid\n", str_id);
++ break;
++ }
++ /* call sst_play_frame */
++ stream = &sst_drv_ctx->streams[str_id];
++ pr_debug("sst: sst_play_frames for %d\n",
++ msg->header.part.str_id);
++ mutex_lock(&sst_drv_ctx->streams[str_id].lock);
++ sst_play_frame(msg->header.part.str_id);
++ mutex_unlock(&sst_drv_ctx->streams[str_id].lock);
++ break;
++ } else
++ pr_err("sst: sst_play_frames for Penwell!!\n");
++
++ case IPC_SST_GET_CAPT_FRAMES:
++ if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID) {
++ struct stream_info *stream;
++ /* call sst_capture_frame */
++ if (sst_validate_strid(str_id)) {
++ pr_err("sst: str id %d invalid\n", str_id);
++ break;
++ }
++ stream = &sst_drv_ctx->streams[str_id];
++ pr_debug("sst: sst_capture_frames for %d\n",
++ msg->header.part.str_id);
++ mutex_lock(&stream->lock);
++ if (stream->mmapped == false &&
++ stream->src == SST_DRV) {
++ pr_debug("sst: waking up block for copy.\n");
++ stream->data_blk.ret_code = 0;
++ stream->data_blk.condition = true;
++ stream->data_blk.on = false;
++ wake_up(&sst_drv_ctx->wait_queue);
++ } else
++ sst_capture_frame(msg->header.part.str_id);
++ mutex_unlock(&stream->lock);
++ } else
++ pr_err("sst: sst_play_frames for Penwell!!\n");
++ break;
++
++ case IPC_IA_PRINT_STRING:
++ pr_debug("sst: been asked to print something by fw\n");
++ /* TBD */
++ break;
++
++ case IPC_IA_FW_INIT_CMPLT: {
++ /* send next data to FW */
++ process_fw_init(msg);
++ break;
++ }
++
++ case IPC_SST_STREAM_PROCESS_FATAL_ERR:
++ if (sst_validate_strid(str_id)) {
++ pr_err("sst: stream id %d invalid\n", str_id);
++ break;
++ }
++ pr_err("sst: codec fatal error %x stream %d...\n",
++ msg->header.full, msg->header.part.str_id);
++ pr_err("sst: Dropping the stream\n");
++ sst_drop_stream(msg->header.part.str_id);
++ break;
++ case IPC_IA_LPE_GETTING_STALLED:
++ sst_drv_ctx->lpe_stalled = 1;
++ break;
++ case IPC_IA_LPE_UNSTALLED:
++ sst_drv_ctx->lpe_stalled = 0;
++ break;
++ default:
++ /* Illegal case */
++ pr_err("sst: Unhandled msg %x header %x\n",
++ msg->header.part.msg_id, msg->header.full);
++ }
++ sst_clear_interrupt();
++ return;
++}
++
++/**
++* sst_process_reply - Processes reply message from SST
++*
++* @work: Pointer to work structure
++*
++* This function is scheduled by ISR
++* It take a reply msg from response_queue and
++* does action based on msg
++*/
++void sst_process_reply(struct work_struct *work)
++{
++ struct sst_ipc_msg_wq *msg =
++ container_of(work, struct sst_ipc_msg_wq, wq);
++
++ int str_id = msg->header.part.str_id;
++ struct stream_info *str_info;
++
++ switch (msg->header.part.msg_id) {
++ case IPC_IA_TARGET_DEV_SELECT:
++ if (!msg->header.part.data) {
++ sst_drv_ctx->tgt_dev_blk.ret_code = 0;
++ } else {
++ pr_err("sst: Msg %x reply error %x\n",
++ msg->header.part.msg_id, msg->header.part.data);
++ sst_drv_ctx->tgt_dev_blk.ret_code =
++ -msg->header.part.data;
++ }
++
++ if (sst_drv_ctx->tgt_dev_blk.on == true) {
++ sst_drv_ctx->tgt_dev_blk.condition = true;
++ wake_up(&sst_drv_ctx->wait_queue);
++ }
++ break;
++ case IPC_IA_GET_FW_INFO: {
++ struct snd_sst_fw_info *fw_info =
++ (struct snd_sst_fw_info *)msg->mailbox;
++ if (msg->header.part.large) {
++ int major = fw_info->fw_version.major;
++ int minor = fw_info->fw_version.minor;
++ int build = fw_info->fw_version.build;
++ pr_debug("sst: Msg succedded %x\n",
++ msg->header.part.msg_id);
++ pr_debug("INFO: ***FW*** = %02d.%02d.%02d\n",
++ major, minor, build);
++ memcpy_fromio(sst_drv_ctx->fw_info_blk.data,
++ ((struct snd_sst_fw_info *)(msg->mailbox)),
++ sizeof(struct snd_sst_fw_info));
++ sst_drv_ctx->fw_info_blk.ret_code = 0;
++ } else {
++ pr_err("sst: Msg %x reply error %x\n",
++ msg->header.part.msg_id, msg->header.part.data);
++ sst_drv_ctx->fw_info_blk.ret_code =
++ -msg->header.part.data;
++ }
++ if (sst_drv_ctx->fw_info_blk.on == true) {
++ pr_debug("sst: Memcopy succedded\n");
++ sst_drv_ctx->fw_info_blk.on = false;
++ sst_drv_ctx->fw_info_blk.condition = true;
++ wake_up(&sst_drv_ctx->wait_queue);
++ }
++ break;
++ }
++ case IPC_IA_SET_STREAM_MUTE:
++ if (!msg->header.part.data) {
++ pr_debug("sst: Msg succedded %x\n",
++ msg->header.part.msg_id);
++ sst_drv_ctx->mute_info_blk.ret_code = 0;
++ } else {
++ pr_err("sst: Msg %x reply error %x\n",
++ msg->header.part.msg_id, msg->header.part.data);
++ sst_drv_ctx->mute_info_blk.ret_code =
++ -msg->header.part.data;
++
++ }
++ if (sst_drv_ctx->mute_info_blk.on == true) {
++ sst_drv_ctx->mute_info_blk.on = false;
++ sst_drv_ctx->mute_info_blk.condition = true;
++ wake_up(&sst_drv_ctx->wait_queue);
++ }
++ break;
++ case IPC_IA_SET_STREAM_VOL:
++ if (!msg->header.part.data) {
++ pr_debug("sst: Msg succedded %x\n",
++ msg->header.part.msg_id);
++ sst_drv_ctx->vol_info_blk.ret_code = 0;
++ } else {
++ pr_err("sst: Msg %x reply error %x\n",
++ msg->header.part.msg_id,
++ msg->header.part.data);
++ sst_drv_ctx->vol_info_blk.ret_code =
++ -msg->header.part.data;
++
++ }
++
++ if (sst_drv_ctx->vol_info_blk.on == true) {
++ sst_drv_ctx->vol_info_blk.on = false;
++ sst_drv_ctx->vol_info_blk.condition = true;
++ wake_up(&sst_drv_ctx->wait_queue);
++ }
++ break;
++ case IPC_IA_GET_STREAM_VOL:
++ if (msg->header.part.large) {
++ pr_debug("sst: Large Msg Received Successfully\n");
++ pr_debug("sst: Msg succedded %x\n",
++ msg->header.part.msg_id);
++ memcpy_fromio(sst_drv_ctx->vol_info_blk.data,
++ (void *) msg->mailbox,
++ sizeof(struct snd_sst_vol));
++ sst_drv_ctx->vol_info_blk.ret_code = 0;
++ } else {
++ pr_err("sst: Msg %x reply error %x\n",
++ msg->header.part.msg_id, msg->header.part.data);
++ sst_drv_ctx->vol_info_blk.ret_code =
++ -msg->header.part.data;
++ }
++ if (sst_drv_ctx->vol_info_blk.on == true) {
++ sst_drv_ctx->vol_info_blk.on = false;
++ sst_drv_ctx->vol_info_blk.condition = true;
++ wake_up(&sst_drv_ctx->wait_queue);
++ }
++ break;
++
++ case IPC_IA_GET_STREAM_PARAMS:
++ if (sst_validate_strid(str_id)) {
++ pr_err("sst: stream id %d invalid\n", str_id);
++ break;
++ }
++ str_info = &sst_drv_ctx->streams[str_id];
++ if (msg->header.part.large) {
++ pr_debug("sst: Get stream large success\n");
++ memcpy_fromio(str_info->ctrl_blk.data,
++ ((void *)(msg->mailbox)),
++ sizeof(struct snd_sst_fw_get_stream_params));
++ str_info->ctrl_blk.ret_code = 0;
++ } else {
++ pr_err("sst: Msg %x reply error %x\n",
++ msg->header.part.msg_id, msg->header.part.data);
++ str_info->ctrl_blk.ret_code = -msg->header.part.data;
++ }
++ if (str_info->ctrl_blk.on == true) {
++ str_info->ctrl_blk.on = false;
++ str_info->ctrl_blk.condition = true;
++ wake_up(&sst_drv_ctx->wait_queue);
++ }
++ break;
++ case IPC_IA_DECODE_FRAMES:
++ if (sst_validate_strid(str_id)) {
++ pr_err("sst: stream id %d invalid\n", str_id);
++ break;
++ }
++ str_info = &sst_drv_ctx->streams[str_id];
++ if (msg->header.part.large) {
++ pr_debug("sst: Msg succedded %x\n",
++ msg->header.part.msg_id);
++ memcpy_fromio(str_info->data_blk.data,
++ ((void *)(msg->mailbox)),
++ sizeof(struct snd_sst_decode_info));
++ str_info->data_blk.ret_code = 0;
++ } else {
++ pr_err("sst: Msg %x reply error %x\n",
++ msg->header.part.msg_id, msg->header.part.data);
++ str_info->data_blk.ret_code = -msg->header.part.data;
++ }
++ if (str_info->data_blk.on == true) {
++ str_info->data_blk.on = false;
++ str_info->data_blk.condition = true;
++ wake_up(&sst_drv_ctx->wait_queue);
++ }
++ break;
++ case IPC_IA_DRAIN_STREAM:
++ if (sst_validate_strid(str_id)) {
++ pr_err("sst: stream id %d invalid\n", str_id);
++ break;
++ }
++ str_info = &sst_drv_ctx->streams[str_id];
++ if (!msg->header.part.data) {
++ pr_debug("sst: Msg succedded %x\n",
++ msg->header.part.msg_id);
++ str_info->ctrl_blk.ret_code = 0;
++
++ } else {
++ pr_err("sst: Msg %x reply error %x\n",
++ msg->header.part.msg_id, msg->header.part.data);
++ str_info->ctrl_blk.ret_code = -msg->header.part.data;
++
++ }
++ str_info = &sst_drv_ctx->streams[str_id];
++ if (str_info->data_blk.on == true) {
++ str_info->data_blk.on = false;
++ str_info->data_blk.condition = true;
++ wake_up(&sst_drv_ctx->wait_queue);
++ }
++ break;
++
++ case IPC_IA_DROP_STREAM:
++ if (sst_validate_strid(str_id)) {
++ pr_err("sst: str id %d invalid\n", str_id);
++ break;
++ }
++ str_info = &sst_drv_ctx->streams[str_id];
++ if (msg->header.part.large) {
++ struct snd_sst_drop_response *drop_resp =
++ (struct snd_sst_drop_response *)msg->mailbox;
++
++ pr_debug("sst: Drop ret bytes %x\n", drop_resp->bytes);
++
++ str_info->curr_bytes = drop_resp->bytes;
++ str_info->ctrl_blk.ret_code = 0;
++ } else {
++ pr_err("sst: Msg %x reply error %x\n",
++ msg->header.part.msg_id, msg->header.part.data);
++ str_info->ctrl_blk.ret_code = -msg->header.part.data;
++ }
++ if (str_info->ctrl_blk.on == true) {
++ str_info->ctrl_blk.on = false;
++ str_info->ctrl_blk.condition = true;
++ wake_up(&sst_drv_ctx->wait_queue);
++ }
++ break;
++ case IPC_IA_ENABLE_RX_TIME_SLOT:
++ if (!msg->header.part.data) {
++ pr_debug("sst: RX_TIME_SLOT success\n");
++ sst_drv_ctx->hs_info_blk.ret_code = 0;
++ } else {
++ pr_err("sst: Msg %x reply error %x\n",
++ msg->header.part.msg_id,
++ msg->header.part.data);
++ sst_drv_ctx->hs_info_blk.ret_code =
++ -msg->header.part.data;
++ }
++ if (sst_drv_ctx->hs_info_blk.on == true) {
++ sst_drv_ctx->hs_info_blk.on = false;
++ sst_drv_ctx->hs_info_blk.condition = true;
++ wake_up(&sst_drv_ctx->wait_queue);
++ }
++ break;
++ case IPC_IA_PAUSE_STREAM:
++ case IPC_IA_RESUME_STREAM:
++ case IPC_IA_SET_STREAM_PARAMS:
++ str_info = &sst_drv_ctx->streams[str_id];
++ if (!msg->header.part.data) {
++ pr_debug("sst: Msg succedded %x\n",
++ msg->header.part.msg_id);
++ str_info->ctrl_blk.ret_code = 0;
++ } else {
++ pr_err("sst: Msg %x reply error %x\n",
++ msg->header.part.msg_id,
++ msg->header.part.data);
++ str_info->ctrl_blk.ret_code = -msg->header.part.data;
++ }
++ if (sst_validate_strid(str_id)) {
++ pr_err("sst: stream id %d invalid\n", str_id);
++ break;
++ }
++
++ if (str_info->ctrl_blk.on == true) {
++ str_info->ctrl_blk.on = false;
++ str_info->ctrl_blk.condition = true;
++ wake_up(&sst_drv_ctx->wait_queue);
++ }
++ break;
++
++ case IPC_IA_FREE_STREAM:
++ if (!msg->header.part.data) {
++ pr_debug("sst: Stream %d freed\n", str_id);
++ } else {
++ pr_err("sst: Free for %d ret error %x\n",
++ str_id, msg->header.part.data);
++ }
++ break;
++ case IPC_IA_ALLOC_STREAM: {
++ /* map to stream, call play */
++ struct snd_sst_alloc_response *resp =
++ (struct snd_sst_alloc_response *)msg->mailbox;
++ if (resp->str_type.result)
++ pr_err("sst: error alloc stream = %x\n",
++ resp->str_type.result);
++ sst_alloc_stream_response(str_id, resp);
++ break;
++ }
++
++ case IPC_IA_PLAY_FRAMES:
++ case IPC_IA_CAPT_FRAMES:
++ if (sst_validate_strid(str_id)) {
++ pr_err("sst: stream id %d invalid\n" , str_id);
++ break;
++ }
++ pr_debug("sst: Ack for play/capt frames recived\n");
++ break;
++
++ case IPC_IA_PREP_LIB_DNLD: {
++ struct snd_sst_str_type *str_type =
++ (struct snd_sst_str_type *)msg->mailbox;
++ pr_debug("sst: Prep Lib download %x\n",
++ msg->header.part.msg_id);
++ if (str_type->result)
++ pr_err("sst: Prep lib download %x\n", str_type->result);
++ else
++ pr_debug("sst: Can download codec now...\n");
++ sst_wake_up_alloc_block(sst_drv_ctx, str_id,
++ str_type->result, NULL);
++ break;
++ }
++
++ case IPC_IA_LIB_DNLD_CMPLT: {
++ struct snd_sst_lib_download_info *resp =
++ (struct snd_sst_lib_download_info *)msg->mailbox;
++ int retval = resp->result;
++
++ pr_debug("sst: Lib downloaded %x\n", msg->header.part.msg_id);
++ if (resp->result) {
++ pr_err("sst: err in lib dload %x\n", resp->result);
++ } else {
++ pr_debug("sst: Codec download complete...\n");
++ pr_debug("sst: codec Type %d Ver %d Built %s: %s\n",
++ resp->dload_lib.lib_info.lib_type,
++ resp->dload_lib.lib_info.lib_version,
++ resp->dload_lib.lib_info.b_date,
++ resp->dload_lib.lib_info.b_time);
++ }
++ sst_wake_up_alloc_block(sst_drv_ctx, str_id,
++ retval, NULL);
++ break;
++ }
++
++ case IPC_IA_GET_FW_VERSION: {
++ struct ipc_header_fw_init *version =
++ (struct ipc_header_fw_init *)msg->mailbox;
++ int major = version->fw_version.major;
++ int minor = version->fw_version.minor;
++ int build = version->fw_version.build;
++ dev_info(&sst_drv_ctx->pci->dev,
++ "INFO: ***LOADED SST FW VERSION*** = %02d.%02d.%02d\n",
++ major, minor, build);
++ break;
++ }
++ case IPC_IA_GET_FW_BUILD_INF: {
++ struct sst_fw_build_info *build =
++ (struct sst_fw_build_info *)msg->mailbox;
++ pr_debug("sst: Build date:%sTime:%s", build->date, build->time);
++ break;
++ }
++ case IPC_IA_SET_PMIC_TYPE:
++ break;
++ case IPC_IA_START_STREAM:
++ pr_debug("sst: reply for START STREAM %x\n", msg->header.full);
++ break;
++ default:
++ /* Illegal case */
++ pr_err("sst: process reply:default = %x\n", msg->header.full);
++ }
++ sst_clear_interrupt();
++ return;
++}
+--- /dev/null
++++ b/sound/pci/sst/intel_sst_pvt.c
+@@ -0,0 +1,311 @@
++/*
++ * intel_sst_pvt.c - Intel SST Driver for audio engine
++ *
++ * Copyright (C) 2008-10 Intel Corp
++ * Authors: Vinod Koul <vinod.koul@intel.com>
++ * Harsha Priya <priya.harsha@intel.com>
++ * Dharageswari R <dharageswari.r@intel.com>
++ * KP Jeeja <jeeja.kp@intel.com>
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This driver exposes the audio engine functionalities to the ALSA
++ * and middleware.
++ *
++ * This file contains all private functions
++ */
++
++#include <linux/pci.h>
++#include <linux/fs.h>
++#include <linux/firmware.h>
++#include <linux/sched.h>
++#include <sound/intel_sst.h>
++#include <sound/intel_sst_ioctl.h>
++#include "intel_sst_fw_ipc.h"
++#include "intel_sst_common.h"
++
++/*
++ * sst_get_block_stream - get a new block stream
++ *
++ * @sst_drv_ctx: Driver context structure
++ *
++ * This function assigns a block for the calls that dont have stream context yet
++ * the blocks are used for waiting on Firmware's response for any operation
++ * Should be called with stream lock held
++ */
++int sst_get_block_stream(struct intel_sst_drv *sst_drv_ctx)
++{
++ int i;
++
++ for (i = 0; i < MAX_ACTIVE_STREAM; i++) {
++ if (sst_drv_ctx->alloc_block[i].sst_id == BLOCK_UNINIT) {
++ sst_drv_ctx->alloc_block[i].ops_block.condition = false;
++ sst_drv_ctx->alloc_block[i].ops_block.ret_code = 0;
++ sst_drv_ctx->alloc_block[i].sst_id = 0;
++ break;
++ }
++ }
++ if (i == MAX_ACTIVE_STREAM) {
++ pr_err("sst: max alloc_stream reached");
++ i = -EBUSY; /* active stream limit reached */
++ }
++ return i;
++}
++
++/*
++ * sst_wait_interruptible - wait on event
++ *
++ * @sst_drv_ctx: Driver context
++ * @block: Driver block to wait on
++ *
++ * This function waits without a timeout (and is interruptable) for a
++ * given block event
++ */
++int sst_wait_interruptible(struct intel_sst_drv *sst_drv_ctx,
++ struct sst_block *block)
++{
++ int retval = 0;
++
++ if (!wait_event_interruptible(sst_drv_ctx->wait_queue,
++ block->condition)) {
++ /* event wake */
++ if (block->ret_code < 0) {
++ pr_err("sst: stream failed %d\n", block->ret_code);
++ retval = -EBUSY;
++ } else {
++ pr_debug("sst: event up\n");
++ retval = 0;
++ }
++ } else {
++ pr_err("sst: signal interrupted\n");
++ retval = -EINTR;
++ }
++ return retval;
++
++}
++
++
++/*
++ * sst_wait_interruptible_timeout - wait on event interruptable
++ *
++ * @sst_drv_ctx: Driver context
++ * @block: Driver block to wait on
++ * @timeout: time for wait on
++ *
++ * This function waits with a timeout value (and is interruptible) on a
++ * given block event
++ */
++int sst_wait_interruptible_timeout(
++ struct intel_sst_drv *sst_drv_ctx,
++ struct sst_block *block, int timeout)
++{
++ int retval = 0;
++
++ pr_debug("sst: sst_wait_interruptible_timeout - waiting....\n");
++ if (wait_event_interruptible_timeout(sst_drv_ctx->wait_queue,
++ block->condition,
++ msecs_to_jiffies(timeout))) {
++ if (block->ret_code < 0)
++ pr_err("sst: stream failed %d\n", block->ret_code);
++ else
++ pr_debug("sst: event up\n");
++ retval = block->ret_code;
++ } else {
++ block->on = false;
++ pr_err("sst: timeout occured...\n");
++ /*setting firmware state as uninit so that the
++ firmware will get re-downloaded on next request
++ this is because firmare not responding for 5 sec
++ is equalant to some unrecoverable error of FW
++ sst_drv_ctx->sst_state = SST_UN_INIT;*/
++ retval = -EBUSY;
++ }
++ return retval;
++
++}
++
++
++/*
++ * sst_wait_timeout - wait on event for timeout
++ *
++ * @sst_drv_ctx: Driver context
++ * @block: Driver block to wait on
++ *
++ * This function waits with a timeout value (and is not interruptible) on a
++ * given block event
++ */
++int sst_wait_timeout(struct intel_sst_drv *sst_drv_ctx,
++ struct stream_alloc_block *block)
++{
++ int retval = 0;
++
++ /* NOTE:
++ Observed that FW processes the alloc msg and replies even
++ before the alloc thread has finished execution */
++ pr_debug("sst: waiting for %x, condition %x\n",
++ block->sst_id, block->ops_block.condition);
++ if (wait_event_interruptible_timeout(sst_drv_ctx->wait_queue,
++ block->ops_block.condition,
++ msecs_to_jiffies(SST_BLOCK_TIMEOUT))) {
++ /* event wake */
++ pr_debug("sst: Event wake %x\n", block->ops_block.condition);
++ pr_debug("sst: message ret: %d\n", block->ops_block.ret_code);
++ retval = block->ops_block.ret_code;
++ } else {
++ block->ops_block.on = false;
++ pr_err("sst: Wait timed-out %x\n", block->ops_block.condition);
++ /* settign firmware state as uninit so that the
++ firmware will get redownloaded on next request
++ this is because firmare not responding for 5 sec
++ is equalant to some unrecoverable error of FW
++ sst_drv_ctx->sst_state = SST_UN_INIT;*/
++ retval = -EBUSY;
++ }
++ return retval;
++
++}
++
++/*
++ * sst_create_large_msg - create a large IPC message
++ *
++ * @arg: ipc message
++ *
++ * this function allocates structures to send a large message to the firmware
++ */
++int sst_create_large_msg(struct ipc_post **arg)
++{
++ struct ipc_post *msg;
++
++ msg = kzalloc(sizeof(struct ipc_post), GFP_ATOMIC);
++ if (!msg) {
++ pr_err("sst: kzalloc msg failed\n");
++ return -ENOMEM;
++ }
++
++ msg->mailbox_data = kzalloc(SST_MAILBOX_SIZE, GFP_ATOMIC);
++ if (!msg->mailbox_data) {
++ kfree(msg);
++ pr_err("sst: kzalloc mailbox_data failed");
++ return -ENOMEM;
++ };
++ *arg = msg;
++ return 0;
++}
++
++/*
++ * sst_create_short_msg - create a short IPC message
++ *
++ * @arg: ipc message
++ *
++ * this function allocates structures to send a short message to the firmware
++ */
++int sst_create_short_msg(struct ipc_post **arg)
++{
++ struct ipc_post *msg;
++
++ msg = kzalloc(sizeof(*msg), GFP_ATOMIC);
++ if (!msg) {
++ pr_err("sst: kzalloc msg failed\n");
++ return -ENOMEM;
++ }
++ msg->mailbox_data = NULL;
++ *arg = msg;
++ return 0;
++}
++
++/*
++ * sst_clean_stream - clean the stream context
++ *
++ * @stream: stream structure
++ *
++ * this function resets the stream contexts
++ * should be called in free
++ */
++void sst_clean_stream(struct stream_info *stream)
++{
++ struct sst_stream_bufs *bufs = NULL, *_bufs;
++ stream->status = STREAM_UN_INIT;
++ stream->prev = STREAM_UN_INIT;
++ mutex_lock(&stream->lock);
++ list_for_each_entry_safe(bufs, _bufs, &stream->bufs, node) {
++ list_del(&bufs->node);
++ kfree(bufs);
++ }
++ mutex_unlock(&stream->lock);
++
++ if (stream->ops != STREAM_OPS_PLAYBACK_DRM)
++ kfree(stream->decode_ibuf);
++}
++
++/*
++ * sst_wake_up_alloc_block - wake up waiting block
++ *
++ * @sst_drv_ctx: Driver context
++ * @sst_id: stream id
++ * @status: status of wakeup
++ * @data: data pointer of wakeup
++ *
++ * This function wakes up a sleeping block event based on the response
++ */
++void sst_wake_up_alloc_block(struct intel_sst_drv *sst_drv_ctx,
++ u8 sst_id, int status, void *data)
++{
++ int i;
++
++ /* Unblock with retval code */
++ for (i = 0; i < MAX_ACTIVE_STREAM; i++) {
++ if (sst_id == sst_drv_ctx->alloc_block[i].sst_id) {
++ sst_drv_ctx->alloc_block[i].ops_block.condition = true;
++ sst_drv_ctx->alloc_block[i].ops_block.ret_code = status;
++ sst_drv_ctx->alloc_block[i].ops_block.data = data;
++ wake_up(&sst_drv_ctx->wait_queue);
++ break;
++ }
++ }
++}
++
++/*
++ * sst_enable_rx_timeslot - Send msg to query for stream parameters
++ * @status: rx timeslot to be enabled
++ *
++ * This function is called when the RX timeslot is required to be enabled
++ */
++int sst_enable_rx_timeslot(int status)
++{
++ int retval = 0;
++ struct ipc_post *msg = NULL;
++
++ if (sst_create_short_msg(&msg)) {
++ pr_err("sst: mem allocation failed\n");
++ return -ENOMEM;
++ }
++ pr_debug("sst: ipc message sending: ENABLE_RX_TIME_SLOT\n");
++ sst_fill_header(&msg->header, IPC_IA_ENABLE_RX_TIME_SLOT, 0, 0);
++ msg->header.part.data = status;
++ sst_drv_ctx->hs_info_blk.condition = false;
++ sst_drv_ctx->hs_info_blk.ret_code = 0;
++ sst_drv_ctx->hs_info_blk.on = true;
++ spin_lock(&sst_drv_ctx->list_spin_lock);
++ list_add_tail(&msg->node,
++ &sst_drv_ctx->ipc_dispatch_list);
++ spin_unlock(&sst_drv_ctx->list_spin_lock);
++ sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
++ retval = sst_wait_interruptible_timeout(sst_drv_ctx,
++ &sst_drv_ctx->hs_info_blk, SST_BLOCK_TIMEOUT);
++ return retval;
++}
++
+--- /dev/null
++++ b/sound/pci/sst/intel_sst_stream.c
+@@ -0,0 +1,575 @@
++/*
++ * intel_sst_stream.c - Intel SST Driver for audio engine
++ *
++ * Copyright (C) 2008-10 Intel Corp
++ * Authors: Vinod Koul <vinod.koul@intel.com>
++ * Harsha Priya <priya.harsha@intel.com>
++ * Dharageswari R <dharageswari.r@intel.com>
++ * KP Jeeja <jeeja.kp@intel.com>
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This file contains the stream operations of SST driver
++ */
++
++#include <linux/pci.h>
++#include <linux/firmware.h>
++#include <linux/sched.h>
++#include <sound/intel_sst_ioctl.h>
++#include <sound/intel_sst.h>
++#include "intel_sst_fw_ipc.h"
++#include "intel_sst_common.h"
++
++/*
++ * sst_check_device_type - Check the medfield device type
++ *
++ * @device: Device to be checked
++ * @num_ch: Number of channels queried
++ * @pcm_slot: slot to be enabled for this device
++ *
++ * This checks the deivce against the map and calculates pcm_slot value
++ */
++int sst_check_device_type(u32 device, u32 num_chan, u32 *pcm_slot)
++{
++ if (device > MAX_NUM_STREAMS_MFLD) {
++ pr_debug("sst: device type invalid %d\n", device);
++ return -EINVAL;
++ }
++ if (sst_drv_ctx->streams[device].status == STREAM_UN_INIT) {
++ if (device == SND_SST_DEVICE_VIBRA && num_chan == 1)
++ *pcm_slot = 0x10;
++ else if (device == SND_SST_DEVICE_HAPTIC && num_chan == 1)
++ *pcm_slot = 0x20;
++ else if (device == SND_SST_DEVICE_IHF && num_chan == 1)
++ *pcm_slot = 0x04;
++ else if (device == SND_SST_DEVICE_IHF && num_chan == 2)
++ *pcm_slot = 0x0C;
++ else if (device == SND_SST_DEVICE_HEADSET && num_chan == 1)
++ *pcm_slot = 0x01;
++ else if (device == SND_SST_DEVICE_HEADSET && num_chan == 2)
++ *pcm_slot = 0x03;
++ else if (device == SND_SST_DEVICE_CAPTURE && num_chan == 1)
++ *pcm_slot = 0x01;
++ else if (device == SND_SST_DEVICE_CAPTURE && num_chan == 2)
++ *pcm_slot = 0x03;
++ else if (device == SND_SST_DEVICE_CAPTURE && num_chan == 3)
++ *pcm_slot = 0x07;
++ else if (device == SND_SST_DEVICE_CAPTURE && num_chan == 4)
++ *pcm_slot = 0x0F;
++ else {
++ pr_debug("sst: No condition satisfied.. ret err\n");
++ return -EINVAL;
++ }
++ } else {
++ pr_debug("sst: this stream state is not uni-init, is %d\n",
++ sst_drv_ctx->streams[device].status);
++ return -EBADRQC;
++ }
++ pr_debug("sst: returning slot %x\n", *pcm_slot);
++ return 0;
++}
++/**
++ * get_mrst_stream_id - gets a new stream id for use
++ *
++ * This functions searches the current streams and allocated an empty stream
++ * lock stream_lock required to be held before calling this
++ */
++static unsigned int get_mrst_stream_id(void)
++{
++ int i;
++
++ for (i = 1; i <= MAX_NUM_STREAMS_MRST; i++) {
++ if (sst_drv_ctx->streams[i].status == STREAM_UN_INIT)
++ return i;
++ }
++ pr_debug("sst: Didnt find empty stream for mrst\n");
++ return -EBUSY;
++}
++
++/**
++ * sst_alloc_stream - Send msg for a new stream ID
++ *
++ * @params: stream params
++ * @stream_ops: operation of stream PB/capture
++ * @codec: codec for stream
++ * @device: device stream to be allocated for
++ *
++ * This function is called by any function which wants to start
++ * a new stream. This also check if a stream exists which is idle
++ * it initializes idle stream id to this request
++ */
++int sst_alloc_stream(char *params, unsigned int stream_ops,
++ u8 codec, unsigned int device)
++{
++ struct ipc_post *msg = NULL;
++ struct snd_sst_alloc_params alloc_param;
++ unsigned int pcm_slot = 0, num_ch, str_id;
++ struct snd_sst_stream_params *sparams;
++ struct stream_info *str_info;
++
++ pr_debug("SST DBG:entering sst_alloc_stream\n");
++ pr_debug("SST DBG:%d %d %d\n", stream_ops, codec, device);
++
++ BUG_ON(!params);
++ sparams = (struct snd_sst_stream_params *)params;
++ num_ch = sparams->uc.pcm_params.num_chan;
++ /*check the device type*/
++ if (sst_drv_ctx->pci_id == SST_MFLD_PCI_ID) {
++ if (sst_check_device_type(device, num_ch, &pcm_slot))
++ return -EINVAL;
++ mutex_lock(&sst_drv_ctx->stream_lock);
++ str_id = device;
++ mutex_unlock(&sst_drv_ctx->stream_lock);
++ pr_debug("SST_DBG: slot %x\n", pcm_slot);
++ } else {
++ mutex_lock(&sst_drv_ctx->stream_lock);
++ str_id = get_mrst_stream_id();
++ mutex_unlock(&sst_drv_ctx->stream_lock);
++ if (str_id <= 0)
++ return -EBUSY;
++ }
++ /*allocate device type context*/
++ sst_init_stream(&sst_drv_ctx->streams[str_id], codec,
++ str_id, stream_ops, pcm_slot, device);
++ /* send msg to FW to allocate a stream */
++ if (sst_create_large_msg(&msg))
++ return -ENOMEM;
++
++ sst_fill_header(&msg->header, IPC_IA_ALLOC_STREAM, 1, str_id);
++ msg->header.part.data = sizeof(alloc_param) + sizeof(u32);
++ alloc_param.str_type.codec_type = codec;
++ alloc_param.str_type.str_type = SST_STREAM_TYPE_MUSIC;
++ alloc_param.str_type.operation = stream_ops;
++ alloc_param.str_type.protected_str = 0; /* non drm */
++ alloc_param.str_type.time_slots = pcm_slot;
++ alloc_param.str_type.result = alloc_param.str_type.reserved = 0;
++ memcpy(&alloc_param.stream_params, params,
++ sizeof(struct snd_sst_stream_params));
++
++ memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
++ memcpy(msg->mailbox_data + sizeof(u32), &alloc_param,
++ sizeof(alloc_param));
++ str_info = &sst_drv_ctx->streams[str_id];
++ str_info->ctrl_blk.condition = false;
++ str_info->ctrl_blk.ret_code = 0;
++ str_info->ctrl_blk.on = true;
++ spin_lock(&sst_drv_ctx->list_spin_lock);
++ list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
++ spin_unlock(&sst_drv_ctx->list_spin_lock);
++ sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
++ pr_debug("SST DBG:alloc stream done\n");
++ return str_id;
++}
++
++
++/*
++ * sst_alloc_stream_response - process alloc reply
++ *
++ * @str_id: stream id for which the stream has been allocated
++ * @resp the stream response from firware
++ *
++ * This function is called by firmware as a response to stream allcoation
++ * request
++ */
++int sst_alloc_stream_response(unsigned int str_id,
++ struct snd_sst_alloc_response *resp)
++{
++ int retval = 0;
++ struct stream_info *str_info;
++ struct snd_sst_lib_download *lib_dnld;
++
++ pr_debug("SST DEBUG: stream number given = %d\n", str_id);
++ str_info = &sst_drv_ctx->streams[str_id];
++ if (resp->str_type.result == SST_LIB_ERR_LIB_DNLD_REQUIRED) {
++ lib_dnld = kzalloc(sizeof(*lib_dnld), GFP_KERNEL);
++ memcpy(lib_dnld, &resp->lib_dnld, sizeof(*lib_dnld));
++ } else
++ lib_dnld = NULL;
++ if (str_info->ctrl_blk.on == true) {
++ str_info->ctrl_blk.on = false;
++ str_info->ctrl_blk.data = lib_dnld;
++ str_info->ctrl_blk.condition = true;
++ str_info->ctrl_blk.ret_code = resp->str_type.result;
++ pr_debug("SST DEBUG: sst_alloc_stream_response: waking up.\n");
++ wake_up(&sst_drv_ctx->wait_queue);
++ }
++ return retval;
++}
++
++
++/**
++* sst_get_fw_info - Send msg to query for firmware configurations
++* @info: out param that holds the firmare configurations
++*
++* This function is called when the firmware configurations are queiried for
++*/
++int sst_get_fw_info(struct snd_sst_fw_info *info)
++{
++ int retval = 0;
++ struct ipc_post *msg = NULL;
++
++ pr_debug("SST DBG:sst_get_fw_info called\n");
++
++ if (sst_create_short_msg(&msg)) {
++ pr_err("SST ERR: message creation failed\n");
++ return -ENOMEM;
++ }
++
++ sst_fill_header(&msg->header, IPC_IA_GET_FW_INFO, 0, 0);
++ sst_drv_ctx->fw_info_blk.condition = false;
++ sst_drv_ctx->fw_info_blk.ret_code = 0;
++ sst_drv_ctx->fw_info_blk.on = true;
++ sst_drv_ctx->fw_info_blk.data = info;
++ spin_lock(&sst_drv_ctx->list_spin_lock);
++ list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
++ spin_unlock(&sst_drv_ctx->list_spin_lock);
++ sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
++ retval = sst_wait_interruptible_timeout(sst_drv_ctx,
++ &sst_drv_ctx->fw_info_blk, SST_BLOCK_TIMEOUT);
++ if (retval) {
++ pr_err("SST ERR: error in fw_info = %d\n", retval);
++ retval = -EIO;
++ }
++ return retval;
++}
++
++
++/**
++* sst_pause_stream - Send msg for a pausing stream
++* @str_id: stream ID
++*
++* This function is called by any function which wants to pause
++* an already running stream.
++*/
++int sst_start_stream(int str_id)
++{
++ int retval = 0;
++ struct ipc_post *msg = NULL;
++ struct stream_info *str_info;
++
++ pr_debug("sst_start_stream for %d\n", str_id);
++ retval = sst_validate_strid(str_id);
++ if (retval)
++ return retval;
++ str_info = &sst_drv_ctx->streams[str_id];
++ if (str_info->status != STREAM_INIT)
++ return -EBADRQC;
++ if (sst_create_short_msg(&msg))
++ return -ENOMEM;
++
++ sst_fill_header(&msg->header, IPC_IA_START_STREAM, 0, str_id);
++ spin_lock(&sst_drv_ctx->list_spin_lock);
++ list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
++ spin_unlock(&sst_drv_ctx->list_spin_lock);
++ sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
++ return retval;
++}
++
++/*
++ * sst_pause_stream - Send msg for a pausing stream
++ * @str_id: stream ID
++ *
++ * This function is called by any function which wants to pause
++ * an already running stream.
++ */
++int sst_pause_stream(int str_id)
++{
++ int retval = 0;
++ struct ipc_post *msg = NULL;
++ struct stream_info *str_info;
++
++ pr_debug("SST DBG:sst_pause_stream for %d\n", str_id);
++ retval = sst_validate_strid(str_id);
++ if (retval)
++ return retval;
++ str_info = &sst_drv_ctx->streams[str_id];
++ if (str_info->status == STREAM_PAUSED)
++ return 0;
++ if (str_info->status == STREAM_RUNNING ||
++ str_info->status == STREAM_INIT) {
++ if (str_info->prev == STREAM_UN_INIT)
++ return -EBADRQC;
++ if (str_info->ctrl_blk.on == true) {
++ pr_err("SST ERR: control path is in use\n ");
++ return -EINVAL;
++ }
++ if (sst_create_short_msg(&msg))
++ return -ENOMEM;
++
++ sst_fill_header(&msg->header, IPC_IA_PAUSE_STREAM, 0, str_id);
++ str_info->ctrl_blk.condition = false;
++ str_info->ctrl_blk.ret_code = 0;
++ str_info->ctrl_blk.on = true;
++ spin_lock(&sst_drv_ctx->list_spin_lock);
++ list_add_tail(&msg->node,
++ &sst_drv_ctx->ipc_dispatch_list);
++ spin_unlock(&sst_drv_ctx->list_spin_lock);
++ sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
++ retval = sst_wait_interruptible_timeout(sst_drv_ctx,
++ &str_info->ctrl_blk, SST_BLOCK_TIMEOUT);
++ if (retval == 0) {
++ str_info->prev = str_info->status;
++ str_info->status = STREAM_PAUSED;
++ } else if (retval == SST_ERR_INVALID_STREAM_ID) {
++ retval = -EINVAL;
++ mutex_lock(&sst_drv_ctx->stream_lock);
++ sst_clean_stream(str_info);
++ mutex_unlock(&sst_drv_ctx->stream_lock);
++ }
++ } else {
++ retval = -EBADRQC;
++ pr_err("SST ERR:BADQRC for stream\n ");
++ }
++
++ return retval;
++}
++
++/**
++ * sst_resume_stream - Send msg for resuming stream
++ * @str_id: stream ID
++ *
++ * This function is called by any function which wants to resume
++ * an already paused stream.
++ */
++int sst_resume_stream(int str_id)
++{
++ int retval = 0;
++ struct ipc_post *msg = NULL;
++ struct stream_info *str_info;
++
++ pr_debug("SST DBG:sst_resume_stream for %d\n", str_id);
++ retval = sst_validate_strid(str_id);
++ if (retval)
++ return retval;
++ str_info = &sst_drv_ctx->streams[str_id];
++ if (str_info->status == STREAM_RUNNING)
++ return 0;
++ if (str_info->status == STREAM_PAUSED) {
++ if (str_info->ctrl_blk.on == true) {
++ pr_err("SST ERR: control path in use\n");
++ return -EINVAL;
++ }
++ if (sst_create_short_msg(&msg)) {
++ pr_err("SST ERR: mem allocation failed\n");
++ return -ENOMEM;
++ }
++ sst_fill_header(&msg->header, IPC_IA_RESUME_STREAM, 0, str_id);
++ str_info->ctrl_blk.condition = false;
++ str_info->ctrl_blk.ret_code = 0;
++ str_info->ctrl_blk.on = true;
++ spin_lock(&sst_drv_ctx->list_spin_lock);
++ list_add_tail(&msg->node,
++ &sst_drv_ctx->ipc_dispatch_list);
++ spin_unlock(&sst_drv_ctx->list_spin_lock);
++ sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
++ retval = sst_wait_interruptible_timeout(sst_drv_ctx,
++ &str_info->ctrl_blk, SST_BLOCK_TIMEOUT);
++ if (!retval) {
++ if (str_info->prev == STREAM_RUNNING)
++ str_info->status = STREAM_RUNNING;
++ else
++ str_info->status = STREAM_INIT;
++ str_info->prev = STREAM_PAUSED;
++ } else if (retval == -SST_ERR_INVALID_STREAM_ID) {
++ retval = -EINVAL;
++ mutex_lock(&sst_drv_ctx->stream_lock);
++ sst_clean_stream(str_info);
++ mutex_unlock(&sst_drv_ctx->stream_lock);
++ }
++ } else {
++ retval = -EBADRQC;
++ pr_err("SST ERR: BADQRC for stream\n");
++ }
++
++ return retval;
++}
++
++
++/**
++ * sst_drop_stream - Send msg for stopping stream
++ * @str_id: stream ID
++ *
++ * This function is called by any function which wants to stop
++ * a stream.
++ */
++int sst_drop_stream(int str_id)
++{
++ int retval = 0;
++ struct ipc_post *msg = NULL;
++ struct sst_stream_bufs *bufs = NULL, *_bufs;
++ struct stream_info *str_info;
++
++ pr_debug("SST DBG:sst_drop_stream for %d\n", str_id);
++ retval = sst_validate_strid(str_id);
++ if (retval)
++ return retval;
++ str_info = &sst_drv_ctx->streams[str_id];
++
++ if (str_info->status != STREAM_UN_INIT &&
++ str_info->status != STREAM_DECODE) {
++ if (str_info->ctrl_blk.on == true) {
++ pr_err("SST ERR: control path in use\n");
++ return -EINVAL;
++ }
++ if (sst_create_short_msg(&msg)) {
++ pr_err("SST ERR: mem allocation failed\n");
++ return -ENOMEM;
++ }
++ sst_fill_header(&msg->header, IPC_IA_DROP_STREAM, 0, str_id);
++ str_info->ctrl_blk.condition = false;
++ str_info->ctrl_blk.ret_code = 0;
++ str_info->ctrl_blk.on = true;
++ spin_lock(&sst_drv_ctx->list_spin_lock);
++ list_add_tail(&msg->node,
++ &sst_drv_ctx->ipc_dispatch_list);
++ spin_unlock(&sst_drv_ctx->list_spin_lock);
++ sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
++ retval = sst_wait_interruptible_timeout(sst_drv_ctx,
++ &str_info->ctrl_blk, SST_BLOCK_TIMEOUT);
++ if (!retval) {
++ pr_debug("SST DBG:drop success\n");
++ str_info->prev = STREAM_UN_INIT;
++ str_info->status = STREAM_INIT;
++ if (str_info->src != MAD_DRV) {
++ mutex_lock(&str_info->lock);
++ list_for_each_entry_safe(bufs, _bufs,
++ &str_info->bufs, node) {
++ list_del(&bufs->node);
++ kfree(bufs);
++ }
++ mutex_unlock(&str_info->lock);
++ }
++ str_info->cumm_bytes += str_info->curr_bytes;
++ } else if (retval == -SST_ERR_INVALID_STREAM_ID) {
++ retval = -EINVAL;
++ mutex_lock(&sst_drv_ctx->stream_lock);
++ sst_clean_stream(str_info);
++ mutex_unlock(&sst_drv_ctx->stream_lock);
++ }
++ if (str_info->data_blk.on == true) {
++ str_info->data_blk.condition = true;
++ str_info->data_blk.ret_code = retval;
++ wake_up(&sst_drv_ctx->wait_queue);
++ }
++ } else {
++ retval = -EBADRQC;
++ pr_err("SST ERR:BADQRC for stream\n");
++ }
++ return retval;
++}
++
++/**
++* sst_drain_stream - Send msg for draining stream
++* @str_id: stream ID
++*
++* This function is called by any function which wants to drain
++* a stream.
++*/
++int sst_drain_stream(int str_id)
++{
++ int retval = 0;
++ struct ipc_post *msg = NULL;
++ struct stream_info *str_info;
++
++ pr_debug("SST DBG:sst_drain_stream for %d\n", str_id);
++ retval = sst_validate_strid(str_id);
++ if (retval)
++ return retval;
++ str_info = &sst_drv_ctx->streams[str_id];
++
++ if (str_info->status != STREAM_RUNNING &&
++ str_info->status != STREAM_INIT &&
++ str_info->status != STREAM_PAUSED) {
++ pr_err("SST ERR: BADQRC for stream = %d\n",
++ str_info->status);
++ return -EBADRQC;
++ }
++
++ if (str_info->status == STREAM_INIT) {
++ if (sst_create_short_msg(&msg)) {
++ pr_err("SST ERR: mem allocation failed\n");
++ return -ENOMEM;
++ }
++ sst_fill_header(&msg->header, IPC_IA_DRAIN_STREAM, 0, str_id);
++ spin_lock(&sst_drv_ctx->list_spin_lock);
++ list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
++ spin_unlock(&sst_drv_ctx->list_spin_lock);
++ sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
++ } else
++ str_info->need_draining = true;
++ str_info->data_blk.condition = false;
++ str_info->data_blk.ret_code = 0;
++ str_info->data_blk.on = true;
++ retval = sst_wait_interruptible(sst_drv_ctx, &str_info->data_blk);
++ str_info->need_draining = false;
++ if (retval == -SST_ERR_INVALID_STREAM_ID) {
++ retval = -EINVAL;
++ sst_clean_stream(str_info);
++ }
++ return retval;
++}
++
++/**
++ * sst_free_stream - Frees a stream
++ * @str_id: stream ID
++ *
++ * This function is called by any function which wants to free
++ * a stream.
++ */
++int sst_free_stream(int str_id)
++{
++ int retval = 0;
++ struct ipc_post *msg = NULL;
++ struct stream_info *str_info;
++
++ pr_debug("SST DBG:sst_free_stream for %d\n", str_id);
++
++ retval = sst_validate_strid(str_id);
++ if (retval)
++ return retval;
++ str_info = &sst_drv_ctx->streams[str_id];
++
++ if (str_info->status != STREAM_UN_INIT) {
++ if (sst_create_short_msg(&msg)) {
++ pr_err("SST ERR: mem allocation failed\n");
++ return -ENOMEM;
++ }
++ sst_fill_header(&msg->header, IPC_IA_FREE_STREAM, 0, str_id);
++ spin_lock(&sst_drv_ctx->list_spin_lock);
++ list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
++ spin_unlock(&sst_drv_ctx->list_spin_lock);
++ sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
++ str_info->prev = str_info->status;
++ str_info->status = STREAM_UN_INIT;
++ if (str_info->data_blk.on == true) {
++ str_info->data_blk.condition = true;
++ str_info->data_blk.ret_code = 0;
++ wake_up(&sst_drv_ctx->wait_queue);
++ }
++ mutex_lock(&sst_drv_ctx->stream_lock);
++ sst_clean_stream(str_info);
++ mutex_unlock(&sst_drv_ctx->stream_lock);
++ pr_debug("SST DBG:Stream freed\n");
++ } else {
++ retval = -EBADRQC;
++ pr_debug("SST DBG:BADQRC for stream\n");
++ }
++
++ return retval;
++}
++
++
+--- /dev/null
++++ b/sound/pci/sst/intel_sst_stream_encoded.c
+@@ -0,0 +1,1273 @@
++/*
++ * intel_sst_stream.c - Intel SST Driver for audio engine
++ *
++ * Copyright (C) 2008-10 Intel Corp
++ * Authors: Vinod Koul <vinod.koul@intel.com>
++ * Harsha Priya <priya.harsha@intel.com>
++ * Dharageswari R <dharageswari.r@intel.com>
++ * KP Jeeja <jeeja.kp@intel.com>
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This file contains the stream operations of SST driver
++ */
++
++#include <linux/pci.h>
++#include <linux/syscalls.h>
++#include <linux/firmware.h>
++#include <linux/sched.h>
++#ifdef CONFIG_MRST_RAR_HANDLER
++#include "../../../drivers/staging/rar_register/rar_register.h"
++#include "../../../drivers/staging/memrar/memrar.h"
++#endif
++#include <sound/intel_sst_ioctl.h>
++#include <sound/intel_sst.h>
++#include "intel_sst_fw_ipc.h"
++#include "intel_sst_common.h"
++/**
++* sst_get_stream_params - Send msg to query for stream parameters
++* @str_id: stream id for which the parameters are queried for
++* @get_params: out parameters to which the parameters are copied to
++*
++* This function is called when the stream parameters are queiried for
++*/
++int sst_get_stream_params(int str_id,
++ struct snd_sst_get_stream_params *get_params)
++{
++ int retval = 0;
++ struct ipc_post *msg = NULL;
++ struct stream_info *str_info;
++ struct snd_sst_fw_get_stream_params *fw_params;
++
++ pr_debug("sst: get_stream for %d\n", str_id);
++ retval = sst_validate_strid(str_id);
++ if (retval)
++ return retval;
++
++ str_info = &sst_drv_ctx->streams[str_id];
++ if (str_info->status != STREAM_UN_INIT) {
++ if (str_info->ctrl_blk.on == true) {
++ pr_err("sst: control path in use\n");
++ return -EINVAL;
++ }
++ if (sst_create_short_msg(&msg)) {
++ pr_err("sst: message creation failed\n");
++ return -ENOMEM;
++ }
++ fw_params = kzalloc(sizeof(*fw_params), GFP_ATOMIC);
++ if (!fw_params) {
++ pr_err("sst: mem allcoation failed\n ");
++ kfree(msg);
++ return -ENOMEM;
++ }
++
++ sst_fill_header(&msg->header, IPC_IA_GET_STREAM_PARAMS,
++ 0, str_id);
++ str_info->ctrl_blk.condition = false;
++ str_info->ctrl_blk.ret_code = 0;
++ str_info->ctrl_blk.on = true;
++ str_info->ctrl_blk.data = (void *) fw_params;
++ spin_lock(&sst_drv_ctx->list_spin_lock);
++ list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
++ spin_unlock(&sst_drv_ctx->list_spin_lock);
++ sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
++ retval = sst_wait_interruptible_timeout(sst_drv_ctx,
++ &str_info->ctrl_blk, SST_BLOCK_TIMEOUT);
++ if (retval) {
++ get_params->codec_params.result = retval;
++ kfree(fw_params);
++ return -EIO;
++ }
++ memcpy(&get_params->pcm_params, &fw_params->pcm_params,
++ sizeof(fw_params->pcm_params));
++ memcpy(&get_params->codec_params.sparams,
++ &fw_params->codec_params,
++ sizeof(fw_params->codec_params));
++ get_params->codec_params.result = 0;
++ get_params->codec_params.stream_id = str_id;
++ get_params->codec_params.codec = str_info->codec;
++ get_params->codec_params.ops = str_info->ops;
++ get_params->codec_params.stream_type = str_info->str_type;
++ kfree(fw_params);
++ } else {
++ pr_debug("sst: Stream is not in the init state\n");
++ }
++ return retval;
++}
++
++/**
++ * sst_set_stream_param - Send msg for setting stream parameters
++ *
++ * @str_id: stream id
++ * @str_param: stream params
++ *
++ * This function sets stream params during runtime
++ */
++int sst_set_stream_param(int str_id, struct snd_sst_params *str_param)
++{
++ int retval = 0;
++ struct ipc_post *msg = NULL;
++ struct stream_info *str_info;
++
++ BUG_ON(!str_param);
++ if (sst_drv_ctx->streams[str_id].ops != str_param->ops) {
++ pr_err("sst: Invalid operation\n");
++ return -EINVAL;
++ }
++ retval = sst_validate_strid(str_id);
++ if (retval)
++ return retval;
++ pr_debug("sst: set_stream for %d\n", str_id);
++ str_info = &sst_drv_ctx->streams[str_id];
++ if (sst_drv_ctx->streams[str_id].status == STREAM_INIT) {
++ if (str_info->ctrl_blk.on == true) {
++ pr_err("sst: control path in use\n");
++ return -EAGAIN;
++ }
++ if (sst_create_large_msg(&msg))
++ return -ENOMEM;
++
++ sst_fill_header(&msg->header,
++ IPC_IA_SET_STREAM_PARAMS, 1, str_id);
++ str_info->ctrl_blk.condition = false;
++ str_info->ctrl_blk.ret_code = 0;
++ str_info->ctrl_blk.on = true;
++ msg->header.part.data = sizeof(u32) +
++ sizeof(str_param->sparams);
++ memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
++ memcpy(msg->mailbox_data + sizeof(u32), &str_param->sparams,
++ sizeof(str_param->sparams));
++ spin_lock(&sst_drv_ctx->list_spin_lock);
++ list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
++ spin_unlock(&sst_drv_ctx->list_spin_lock);
++ sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
++ retval = sst_wait_interruptible_timeout(sst_drv_ctx,
++ &str_info->ctrl_blk, SST_BLOCK_TIMEOUT);
++ if (retval < 0) {
++ retval = -EIO;
++ sst_clean_stream(str_info);
++ }
++ } else {
++ retval = -EBADRQC;
++ pr_err("sst: BADQRC for stream\n");
++ }
++ return retval;
++}
++
++/**
++* sst_get_vol - This fuction allows to get the premix gain or gain of a stream
++*
++* @get_vol: this is an output param through which the volume
++* structure is passed back to user
++*
++* This function is called when the premix gain or stream gain is queried for
++*/
++int sst_get_vol(struct snd_sst_vol *get_vol)
++{
++ int retval = 0;
++ struct ipc_post *msg = NULL;
++ struct snd_sst_vol *fw_get_vol;
++ int str_id = get_vol->stream_id;
++
++ pr_debug("sst: get vol called\n");
++
++ if (sst_create_short_msg(&msg))
++ return -ENOMEM;
++
++ sst_fill_header(&msg->header,
++ IPC_IA_GET_STREAM_VOL, 0, str_id);
++ sst_drv_ctx->vol_info_blk.condition = false;
++ sst_drv_ctx->vol_info_blk.ret_code = 0;
++ sst_drv_ctx->vol_info_blk.on = true;
++ fw_get_vol = kzalloc(sizeof(*fw_get_vol), GFP_ATOMIC);
++ if (!fw_get_vol) {
++ pr_err("sst: mem allocation failed\n");
++ kfree(msg);
++ return -ENOMEM;
++ }
++ sst_drv_ctx->vol_info_blk.data = (void *)fw_get_vol;
++ spin_lock(&sst_drv_ctx->list_spin_lock);
++ list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
++ spin_unlock(&sst_drv_ctx->list_spin_lock);
++ sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
++ retval = sst_wait_interruptible_timeout(sst_drv_ctx,
++ &sst_drv_ctx->vol_info_blk, SST_BLOCK_TIMEOUT);
++ if (retval)
++ retval = -EIO;
++ else {
++ pr_debug("sst: stream id %d\n", fw_get_vol->stream_id);
++ pr_debug("sst: volume %d\n", fw_get_vol->volume);
++ pr_debug("sst: ramp duration %d\n", fw_get_vol->ramp_duration);
++ pr_debug("sst: ramp_type %d\n", fw_get_vol->ramp_type);
++ memcpy(get_vol, fw_get_vol, sizeof(*fw_get_vol));
++ }
++ return retval;
++}
++
++/**
++* sst_set_vol - This fuction allows to set the premix gain or gain of a stream
++*
++* @set_vol: this holds the volume structure that needs to be set
++*
++* This function is called when premix gain or stream gain is requested to be set
++*/
++int sst_set_vol(struct snd_sst_vol *set_vol)
++{
++
++ int retval = 0;
++ struct ipc_post *msg = NULL;
++
++ pr_debug("sst: set vol called\n");
++
++ if (sst_create_large_msg(&msg)) {
++ pr_err("sst: message creation failed\n");
++ return -ENOMEM;
++ }
++ sst_fill_header(&msg->header, IPC_IA_SET_STREAM_VOL, 1,
++ set_vol->stream_id);
++
++ msg->header.part.data = sizeof(u32) + sizeof(*set_vol);
++ memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
++ memcpy(msg->mailbox_data + sizeof(u32), set_vol, sizeof(*set_vol));
++ sst_drv_ctx->vol_info_blk.condition = false;
++ sst_drv_ctx->vol_info_blk.ret_code = 0;
++ sst_drv_ctx->vol_info_blk.on = true;
++ sst_drv_ctx->vol_info_blk.data = set_vol;
++ spin_lock(&sst_drv_ctx->list_spin_lock);
++ list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
++ spin_unlock(&sst_drv_ctx->list_spin_lock);
++ sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
++ retval = sst_wait_interruptible_timeout(sst_drv_ctx,
++ &sst_drv_ctx->vol_info_blk, SST_BLOCK_TIMEOUT);
++ if (retval) {
++ pr_err("sst: error in set_vol = %d\n", retval);
++ retval = -EIO;
++ }
++ return retval;
++}
++
++/**
++* sst_set_mute - This fuction sets premix mute or soft mute of a stream
++*
++* @set_mute: this holds the mute structure that needs to be set
++*
++* This function is called when premix mute or stream mute requested to be set
++*/
++int sst_set_mute(struct snd_sst_mute *set_mute)
++{
++
++ int retval = 0;
++ struct ipc_post *msg = NULL;
++
++ pr_debug("sst: set mute called\n");
++
++ if (sst_create_large_msg(&msg)) {
++ pr_err("sst: message creation failed\n");
++ return -ENOMEM;
++ }
++ sst_fill_header(&msg->header, IPC_IA_SET_STREAM_MUTE, 1,
++ set_mute->stream_id);
++ sst_drv_ctx->mute_info_blk.condition = false;
++ sst_drv_ctx->mute_info_blk.ret_code = 0;
++ sst_drv_ctx->mute_info_blk.on = true;
++ sst_drv_ctx->mute_info_blk.data = set_mute;
++
++ msg->header.part.data = sizeof(u32) + sizeof(*set_mute);
++ memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
++ memcpy(msg->mailbox_data + sizeof(u32), set_mute,
++ sizeof(*set_mute));
++ spin_lock(&sst_drv_ctx->list_spin_lock);
++ list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
++ spin_unlock(&sst_drv_ctx->list_spin_lock);
++ sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
++ retval = sst_wait_interruptible_timeout(sst_drv_ctx,
++ &sst_drv_ctx->mute_info_blk, SST_BLOCK_TIMEOUT);
++ if (retval) {
++ pr_err("sst: error in set_mute = %d\n", retval);
++ retval = -EIO;
++ }
++ return retval;
++}
++
++int sst_prepare_target(struct snd_sst_slot_info *slot)
++{
++ if (slot->target_device == SND_SST_TARGET_PMIC
++ && slot->device_instance == 1) {
++ /*music mode*/
++ if (sst_drv_ctx->pmic_port_instance == 0)
++ sst_drv_ctx->scard_ops->set_voice_port(
++ DEACTIVATE);
++ } else if ((slot->target_device == SND_SST_TARGET_PMIC ||
++ slot->target_device == SND_SST_TARGET_MODEM) &&
++ slot->device_instance == 0) {
++ /*voip mode where pcm0 is active*/
++ if (sst_drv_ctx->pmic_port_instance == 1)
++ sst_drv_ctx->scard_ops->set_audio_port(
++ DEACTIVATE);
++ }
++ return 0;
++}
++
++int sst_activate_target(struct snd_sst_slot_info *slot)
++{
++ if (slot->target_device == SND_SST_TARGET_PMIC &&
++ slot->device_instance == 1) {
++ /*music mode*/
++ sst_drv_ctx->pmic_port_instance = 1;
++ sst_drv_ctx->scard_ops->set_audio_port(ACTIVATE);
++ sst_drv_ctx->scard_ops->set_pcm_audio_params(
++ slot->pcm_params.sfreq,
++ slot->pcm_params.pcm_wd_sz,
++ slot->pcm_params.num_chan);
++ if (sst_drv_ctx->pb_streams)
++ sst_drv_ctx->scard_ops->power_up_pmic_pb(1);
++ if (sst_drv_ctx->cp_streams)
++ sst_drv_ctx->scard_ops->power_up_pmic_cp(1);
++ } else if ((slot->target_device == SND_SST_TARGET_PMIC ||
++ slot->target_device == SND_SST_TARGET_MODEM) &&
++ slot->device_instance == 0) {
++ /*voip mode where pcm0 is active*/
++ sst_drv_ctx->pmic_port_instance = 0;
++ sst_drv_ctx->scard_ops->set_voice_port(
++ ACTIVATE);
++ sst_drv_ctx->scard_ops->power_up_pmic_pb(0);
++ /*sst_drv_ctx->scard_ops->power_up_pmic_cp(0);*/
++ }
++ return 0;
++}
++
++int sst_parse_target(struct snd_sst_slot_info *slot)
++{
++ int retval = 0;
++
++ if (slot->action == SND_SST_PORT_ACTIVATE &&
++ slot->device_type == SND_SST_DEVICE_PCM) {
++ retval = sst_activate_target(slot);
++ if (retval)
++ pr_err("sst: SST_Activate_target_fail\n");
++ else
++ pr_err("sst: SST_Activate_target_pass\n");
++ return retval;
++ } else if (slot->action == SND_SST_PORT_PREPARE &&
++ slot->device_type == SND_SST_DEVICE_PCM) {
++ retval = sst_prepare_target(slot);
++ if (retval)
++ pr_err("sst: SST_prepare_target_fail\n");
++ else
++ pr_err("sst: SST_prepare_target_pass\n");
++ return retval;
++ } else {
++ pr_err("sst: slot_action : %d, device_type: %d\n",
++ slot->action, slot->device_type);
++ return retval;
++ }
++}
++
++int sst_send_target(struct snd_sst_target_device *target)
++{
++ int retval;
++ struct ipc_post *msg;
++
++ if (sst_create_large_msg(&msg)) {
++ pr_err("sst: message creation failed\n");
++ return -ENOMEM;
++ }
++ sst_fill_header(&msg->header, IPC_IA_TARGET_DEV_SELECT, 1, 0);
++ sst_drv_ctx->tgt_dev_blk.condition = false;
++ sst_drv_ctx->tgt_dev_blk.ret_code = 0;
++ sst_drv_ctx->tgt_dev_blk.on = true;
++
++ msg->header.part.data = sizeof(u32) + sizeof(*target);
++ memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
++ memcpy(msg->mailbox_data + sizeof(u32), target,
++ sizeof(*target));
++ spin_lock(&sst_drv_ctx->list_spin_lock);
++ list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
++ spin_unlock(&sst_drv_ctx->list_spin_lock);
++ sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
++ pr_debug("sst: message sent- waiting\n");
++ retval = sst_wait_interruptible_timeout(sst_drv_ctx,
++ &sst_drv_ctx->tgt_dev_blk, TARGET_DEV_BLOCK_TIMEOUT);
++ if (retval)
++ pr_err("sst: target device ipc failed = 0x%x\n", retval);
++ return retval;
++
++}
++
++int sst_target_device_validate(struct snd_sst_target_device *target)
++{
++ int retval = 0;
++ int i;
++
++ for (i = 0; i < SST_MAX_TARGET_DEVICES; i++) {
++ if (target->devices[i].device_type == SND_SST_DEVICE_PCM) {
++ /*pcm device, check params*/
++ if (target->devices[i].device_instance == 1) {
++ if ((target->devices[i].device_mode !=
++ SND_SST_DEV_MODE_PCM_MODE4_I2S) &&
++ (target->devices[i].device_mode !=
++ SND_SST_DEV_MODE_PCM_MODE4_RIGHT_JUSTIFIED)
++ && (target->devices[i].device_mode !=
++ SND_SST_DEV_MODE_PCM_MODE1))
++ goto err;
++ } else if (target->devices[i].device_instance == 0) {
++ if ((target->devices[i].device_mode !=
++ SND_SST_DEV_MODE_PCM_MODE2)
++ && (target->devices[i].device_mode !=
++ SND_SST_DEV_MODE_PCM_MODE4_I2S)
++ && (target->devices[i].device_mode !=
++ SND_SST_DEV_MODE_PCM_MODE1))
++ goto err;
++ if (target->devices[i].pcm_params.sfreq != 8000
++ || target->devices[i].pcm_params.num_chan != 1
++ || target->devices[i].pcm_params.pcm_wd_sz !=
++ 16)
++ goto err;
++ } else {
++err:
++ pr_err("sst: i/p params incorrect\n");
++ return -EINVAL;
++ }
++ }
++ }
++ return retval;
++}
++
++/**
++ * sst_target_device_select - This fuction sets the target device configurations
++ *
++ * @target: this parameter holds the configurations to be set
++ *
++ * This function is called when the user layer wants to change the target
++ * device's configurations
++ */
++
++int sst_target_device_select(struct snd_sst_target_device *target)
++{
++ int retval, i, prepare_count = 0;
++
++ pr_debug("sst: Target Device Select\n");
++
++ if (target->device_route < 0 || target->device_route > 2) {
++ pr_err("sst: device route is invalid\n");
++ return -EINVAL;
++ }
++
++ if (target->device_route != 0) {
++ pr_err("sst: Unsupported config\n");
++ return -EIO;
++ }
++ retval = sst_target_device_validate(target);
++ if (retval)
++ return retval;
++
++ retval = sst_send_target(target);
++ if (retval)
++ return retval;
++ for (i = 0; i < SST_MAX_TARGET_DEVICES; i++) {
++ if (target->devices[i].action == SND_SST_PORT_ACTIVATE) {
++ pr_debug("sst: activate called in %d\n", i);
++ retval = sst_parse_target(&target->devices[i]);
++ if (retval)
++ return retval;
++ } else if (target->devices[i].action == SND_SST_PORT_PREPARE) {
++ pr_debug("sst: PREPARE in %d, Forwading\n", i);
++ retval = sst_parse_target(&target->devices[i]);
++ if (retval) {
++ pr_err("sst: Parse Target fail %d", retval);
++ return retval;
++ }
++ pr_debug("sst: Parse Target successful %d", retval);
++ if (target->devices[i].device_type ==
++ SND_SST_DEVICE_PCM)
++ prepare_count++;
++ }
++ }
++ if (target->devices[0].action == SND_SST_PORT_PREPARE &&
++ prepare_count == 0)
++ sst_drv_ctx->scard_ops->power_down_pmic();
++
++ return retval;
++}
++#ifdef CONFIG_MRST_RAR_HANDLER
++/*This function gets the physical address of the secure memory from the handle*/
++static inline int sst_get_RAR(struct RAR_buffer *buffers, int count)
++{
++ int retval = 0, rar_status = 0;
++
++ rar_status = rar_handle_to_bus(buffers, count);
++
++ if (count != rar_status) {
++ pr_err("sst: The rar CALL Failed");
++ retval = -EIO;
++ }
++ if (buffers->info.type != RAR_TYPE_AUDIO) {
++ pr_err("sst: Invalid RAR type\n");
++ return -EINVAL;
++ }
++ return retval;
++}
++
++#endif
++
++/* This function creates the scatter gather list to be sent to firmware to
++capture/playback data*/
++static int sst_create_sg_list(struct stream_info *stream,
++ struct sst_frame_info *sg_list)
++{
++ struct sst_stream_bufs *kbufs = NULL;
++#ifdef CONFIG_MRST_RAR_HANDLER
++ struct RAR_buffer rar_buffers;
++#endif
++ int i = 0, retval = 0;
++ list_for_each_entry(kbufs, &stream->bufs, node) {
++ if (kbufs->in_use == false) {
++ #ifdef CONFIG_MRST_RAR_HANDLER
++ if (stream->ops == STREAM_OPS_PLAYBACK_DRM) {
++ pr_debug("sst: DRM playback handling\n");
++ rar_buffers.info.handle = (__u32)kbufs->addr;
++ rar_buffers.info.size = kbufs->size;
++ pr_debug("sst: rar handle 0x%x size=0x%x",
++ rar_buffers.info.handle,
++ rar_buffers.info.size);
++ retval = sst_get_RAR(&rar_buffers, 1);
++
++ if (retval)
++ return retval;
++ sg_list->addr[i].addr = rar_buffers.bus_address;
++ /* rar_buffers.info.size; */
++ sg_list->addr[i].size = (__u32)kbufs->size;
++ pr_debug("sst: phyaddr[%d] 0x%x Size:0x%x\n"
++ , i, sg_list->addr[i].addr,
++ sg_list->addr[i].size);
++ }
++ #endif
++ if (stream->ops != STREAM_OPS_PLAYBACK_DRM) {
++ sg_list->addr[i].addr =
++ virt_to_phys((void *)
++ kbufs->addr + kbufs->offset);
++ sg_list->addr[i].size = kbufs->size;
++ pr_debug("sst: phyaddr[%d]:0x%x Size:0x%x\n"
++ , i , sg_list->addr[i].addr, kbufs->size);
++ }
++ stream->curr_bytes += sg_list->addr[i].size;
++ kbufs->in_use = true;
++ i++;
++ }
++ if (i >= MAX_NUM_SCATTER_BUFFERS)
++ break;
++ }
++
++ sg_list->num_entries = i;
++ pr_debug("sst:sg list entries = %d\n", sg_list->num_entries);
++ return i;
++}
++
++
++/**
++ * sst_play_frame - Send msg for sending stream frames
++ *
++ * @str_id: ID of stream
++ *
++ * This function is called to send data to be played out
++ * to the firmware
++ */
++int sst_play_frame(int str_id)
++{
++ int i = 0, retval = 0;
++ struct ipc_post *msg = NULL;
++ struct sst_frame_info sg_list = {0};
++ struct sst_stream_bufs *kbufs = NULL, *_kbufs;
++ struct stream_info *stream;
++
++ pr_debug("sst: play frame for %d\n", str_id);
++ retval = sst_validate_strid(str_id);
++ if (retval)
++ return retval;
++
++ stream = &sst_drv_ctx->streams[str_id];
++ /* clear prev sent buffers */
++ list_for_each_entry_safe(kbufs, _kbufs, &stream->bufs, node) {
++ if (kbufs->in_use == true) {
++ spin_lock(&stream->pcm_lock);
++ list_del(&kbufs->node);
++ spin_unlock(&stream->pcm_lock);
++ kfree(kbufs);
++ }
++ }
++ /* update bytes sent */
++ stream->cumm_bytes += stream->curr_bytes;
++ stream->curr_bytes = 0;
++ if (list_empty(&stream->bufs)) {
++ /* no user buffer available */
++ pr_debug("sst: Null buffer stream status %d\n", stream->status);
++ stream->prev = stream->status;
++ stream->status = STREAM_INIT;
++ pr_debug("sst:new stream status = %d\n", stream->status);
++ if (stream->need_draining == true) {
++ pr_debug("sst:draining stream\n");
++ if (sst_create_short_msg(&msg)) {
++ pr_err("sst: mem alloc failed\n");
++ return -ENOMEM;
++ }
++ sst_fill_header(&msg->header, IPC_IA_DRAIN_STREAM,
++ 0, str_id);
++ spin_lock(&sst_drv_ctx->list_spin_lock);
++ list_add_tail(&msg->node,
++ &sst_drv_ctx->ipc_dispatch_list);
++ spin_unlock(&sst_drv_ctx->list_spin_lock);
++ sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
++ } else if (stream->data_blk.on == true) {
++ pr_debug("sst:user list empty.. wake\n");
++ /* unblock */
++ stream->data_blk.ret_code = 0;
++ stream->data_blk.condition = true;
++ stream->data_blk.on = false;
++ wake_up(&sst_drv_ctx->wait_queue);
++ }
++ return 0;
++ }
++
++ /* create list */
++ i = sst_create_sg_list(stream, &sg_list);
++
++ /* post msg */
++ if (sst_create_large_msg(&msg))
++ return -ENOMEM;
++
++ sst_fill_header(&msg->header, IPC_IA_PLAY_FRAMES, 1, str_id);
++ msg->header.part.data = sizeof(u32) + sizeof(sg_list);
++ memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
++ memcpy(msg->mailbox_data + sizeof(u32), &sg_list, sizeof(sg_list));
++ spin_lock(&sst_drv_ctx->list_spin_lock);
++ list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
++ spin_unlock(&sst_drv_ctx->list_spin_lock);
++ sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
++ return 0;
++
++}
++
++/**
++ * sst_capture_frame - Send msg for sending stream frames
++ *
++ * @str_id: ID of stream
++ *
++ * This function is called to capture data from the firmware
++ */
++int sst_capture_frame(int str_id)
++{
++ int i = 0, retval = 0;
++ struct ipc_post *msg = NULL;
++ struct sst_frame_info sg_list = {0};
++ struct sst_stream_bufs *kbufs = NULL, *_kbufs;
++ struct stream_info *stream;
++
++
++ pr_debug("sst:capture frame for %d\n", str_id);
++ retval = sst_validate_strid(str_id);
++ if (retval)
++ return retval;
++ stream = &sst_drv_ctx->streams[str_id];
++ /* clear prev sent buffers */
++ list_for_each_entry_safe(kbufs, _kbufs, &stream->bufs, node) {
++ if (kbufs->in_use == true) {
++ list_del(&kbufs->node);
++ kfree(kbufs);
++ pr_debug("sst:del node\n");
++ }
++ }
++ if (list_empty(&stream->bufs)) {
++ /* no user buffer available */
++ pr_debug("sst:Null buffer!!!!stream status %d\n",
++ stream->status);
++ stream->prev = stream->status;
++ stream->status = STREAM_INIT;
++ pr_debug("sst:new stream status = %d\n",
++ stream->status);
++ if (stream->data_blk.on == true) {
++ pr_debug("sst:user list empty.. wake\n");
++ /* unblock */
++ stream->data_blk.ret_code = 0;
++ stream->data_blk.condition = true;
++ stream->data_blk.on = false;
++ wake_up(&sst_drv_ctx->wait_queue);
++
++ }
++ return 0;
++ }
++ /* create new sg list */
++ i = sst_create_sg_list(stream, &sg_list);
++
++ /* post msg */
++ if (sst_create_large_msg(&msg))
++ return -ENOMEM;
++
++ sst_fill_header(&msg->header, IPC_IA_CAPT_FRAMES, 1, str_id);
++ msg->header.part.data = sizeof(u32) + sizeof(sg_list);
++ memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
++ memcpy(msg->mailbox_data + sizeof(u32), &sg_list, sizeof(sg_list));
++ spin_lock(&sst_drv_ctx->list_spin_lock);
++ list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
++ spin_unlock(&sst_drv_ctx->list_spin_lock);
++ sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
++
++
++ /*update bytes recevied*/
++ stream->cumm_bytes += stream->curr_bytes;
++ stream->curr_bytes = 0;
++
++ pr_debug("sst:Cum bytes = %d\n", stream->cumm_bytes);
++ return 0;
++}
++
++/*This function is used to calculate the minimum size of input buffers given*/
++static unsigned int calculate_min_size(struct snd_sst_buffs *bufs)
++{
++ int i, min_val = bufs->buff_entry[0].size;
++ for (i = 1 ; i < bufs->entries; i++) {
++ if (bufs->buff_entry[i].size < min_val)
++ min_val = bufs->buff_entry[i].size;
++ }
++ pr_debug("sst:min_val = %d\n", min_val);
++ return min_val;
++}
++
++static unsigned int calculate_max_size(struct snd_sst_buffs *bufs)
++{
++ int i, max_val = bufs->buff_entry[0].size;
++ for (i = 1 ; i < bufs->entries; i++) {
++ if (bufs->buff_entry[i].size > max_val)
++ max_val = bufs->buff_entry[i].size;
++ }
++ pr_debug("sst:max_val = %d\n", max_val);
++ return max_val;
++}
++
++/*This function is used to allocate input and output buffers to be sent to
++the firmware that will take encoded data and return decoded data*/
++static int sst_allocate_decode_buf(struct stream_info *str_info,
++ struct snd_sst_dbufs *dbufs,
++ unsigned int cum_input_given,
++ unsigned int cum_output_given)
++{
++#ifdef CONFIG_MRST_RAR_HANDLER
++ if (str_info->ops == STREAM_OPS_PLAYBACK_DRM) {
++
++ if (dbufs->ibufs->type == SST_BUF_RAR &&
++ dbufs->obufs->type == SST_BUF_RAR) {
++ if (dbufs->ibufs->entries == dbufs->obufs->entries)
++ return 0;
++ else {
++ pr_err("sst: RAR entries dont match\n");
++ return -EINVAL;
++ }
++ } else
++ str_info->decode_osize = cum_output_given;
++ return 0;
++
++ }
++#endif
++ if (!str_info->decode_ibuf) {
++ pr_debug("sst:no i/p buffers, trying full size\n");
++ str_info->decode_isize = cum_input_given;
++ str_info->decode_ibuf = kzalloc(str_info->decode_isize,
++ GFP_KERNEL);
++ str_info->idecode_alloc = str_info->decode_isize;
++ }
++ if (!str_info->decode_ibuf) {
++ pr_debug("sst:buff alloc failed, try max size\n");
++ str_info->decode_isize = calculate_max_size(dbufs->ibufs);
++ str_info->decode_ibuf = kzalloc(
++ str_info->decode_isize, GFP_KERNEL);
++ str_info->idecode_alloc = str_info->decode_isize;
++ }
++ if (!str_info->decode_ibuf) {
++ pr_debug("sst:buff alloc failed, try min size\n");
++ str_info->decode_isize = calculate_min_size(dbufs->ibufs);
++ str_info->decode_ibuf = kzalloc(str_info->decode_isize,
++ GFP_KERNEL);
++ if (!str_info->decode_ibuf) {
++ pr_err("sst: mem allocation failed\n");
++ return -ENOMEM;
++ }
++ str_info->idecode_alloc = str_info->decode_isize;
++ }
++ str_info->decode_osize = cum_output_given;
++ if (str_info->decode_osize > sst_drv_ctx->mmap_len)
++ str_info->decode_osize = sst_drv_ctx->mmap_len;
++ return 0;
++}
++
++/*This function is used to send the message to firmware to decode the data*/
++static int sst_send_decode_mess(int str_id, struct stream_info *str_info,
++ struct snd_sst_decode_info *dec_info)
++{
++ struct ipc_post *msg = NULL;
++ int retval = 0;
++
++ pr_debug("SST DBGsst_set_mute:called\n");
++
++ if (str_info->decode_ibuf_type == SST_BUF_RAR) {
++#ifdef CONFIG_MRST_RAR_HANDLER
++ dec_info->frames_in.addr[0].addr =
++ (unsigned long)str_info->decode_ibuf;
++ dec_info->frames_in.addr[0].size =
++ str_info->decode_isize;
++#endif
++
++ } else {
++ dec_info->frames_in.addr[0].addr = virt_to_phys((void *)
++ str_info->decode_ibuf);
++ dec_info->frames_in.addr[0].size = str_info->decode_isize;
++ }
++
++
++ if (str_info->decode_obuf_type == SST_BUF_RAR) {
++#ifdef CONFIG_MRST_RAR_HANDLER
++ dec_info->frames_out.addr[0].addr =
++ (unsigned long)str_info->decode_obuf;
++ dec_info->frames_out.addr[0].size = str_info->decode_osize;
++#endif
++
++ } else {
++ dec_info->frames_out.addr[0].addr = virt_to_phys((void *)
++ str_info->decode_obuf) ;
++ dec_info->frames_out.addr[0].size = str_info->decode_osize;
++ }
++
++ dec_info->frames_in.num_entries = 1;
++ dec_info->frames_out.num_entries = 1;
++ dec_info->frames_in.rsrvd = 0;
++ dec_info->frames_out.rsrvd = 0;
++ dec_info->input_bytes_consumed = 0;
++ dec_info->output_bytes_produced = 0;
++ if (sst_create_large_msg(&msg)) {
++ pr_err("sst: message creation failed\n");
++ return -ENOMEM;
++ }
++
++ sst_fill_header(&msg->header, IPC_IA_DECODE_FRAMES, 1, str_id);
++ msg->header.part.data = sizeof(u32) + sizeof(*dec_info);
++ memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
++ memcpy(msg->mailbox_data + sizeof(u32), dec_info,
++ sizeof(*dec_info));
++ spin_lock(&sst_drv_ctx->list_spin_lock);
++ list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
++ spin_unlock(&sst_drv_ctx->list_spin_lock);
++ str_info->data_blk.condition = false;
++ str_info->data_blk.ret_code = 0;
++ str_info->data_blk.on = true;
++ str_info->data_blk.data = dec_info;
++ sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
++ retval = sst_wait_interruptible(sst_drv_ctx, &str_info->data_blk);
++ return retval;
++}
++
++static int sst_prepare_input_buffers_rar(struct stream_info *str_info,
++ struct snd_sst_dbufs *dbufs,
++ int *input_index, int *in_copied,
++ int *input_index_valid_size, int *new_entry_flag)
++{
++ int retval = 0, i;
++
++#ifdef CONFIG_MRST_RAR_HANDLER
++ if (str_info->ops == STREAM_OPS_PLAYBACK_DRM) {
++ struct RAR_buffer rar_buffers;
++ __u32 info;
++ retval = copy_from_user((void *) &info,
++ dbufs->ibufs->buff_entry[i].buffer,
++ sizeof(__u32));
++ if (retval) {
++ pr_err("sst:cpy from user fail\n");
++ return -EAGAIN;
++ }
++ rar_buffers.info.type = dbufs->ibufs->type;
++ rar_buffers.info.size = dbufs->ibufs->buff_entry[i].size;
++ rar_buffers.info.handle = info;
++ pr_debug("rar in DnR(input buffer function)=0x%x size=0x%x",
++ rar_buffers.info.handle,
++ rar_buffers.info.size);
++ retval = sst_get_RAR(&rar_buffers, 1);
++ if (retval) {
++ pr_debug("SST ERR: RAR API failed\n");
++ return retval;
++ }
++ str_info->decode_ibuf =
++ (void *) ((unsigned long) rar_buffers.bus_address);
++ pr_debug("RAR buf addr in DnR (input buffer function)0x%lu",
++ (unsigned long) str_info->decode_ibuf);
++ pr_debug("rar in DnR decode funtion/output b_add rar =0x%lu",
++ (unsigned long) rar_buffers.bus_address);
++ *input_index = i + 1;
++ str_info->decode_isize = dbufs->ibufs->buff_entry[i].size;
++ str_info->decode_ibuf_type = dbufs->ibufs->type;
++ *in_copied = str_info->decode_isize;
++ }
++#endif
++ return retval;
++}
++/*This function is used to prepare the kernel input buffers with contents
++before sending for decode*/
++static int sst_prepare_input_buffers(struct stream_info *str_info,
++ struct snd_sst_dbufs *dbufs,
++ int *input_index, int *in_copied,
++ int *input_index_valid_size, int *new_entry_flag)
++{
++ int i, cpy_size, retval = 0;
++
++ pr_debug("sst:input_index = %d, input entries = %d\n",
++ *input_index, dbufs->ibufs->entries);
++ for (i = *input_index; i < dbufs->ibufs->entries; i++) {
++#ifdef CONFIG_MRST_RAR_HANDLER
++ retval = sst_prepare_input_buffers_rar(str_info,
++ dbufs, input_index, in_copied,
++ input_index_valid_size, new_entry_flag);
++ if (retval) {
++ pr_err("sst: In prepare input buffers for RAR\n");
++ return -EIO;
++ }
++#endif
++ *input_index = i;
++ if (*input_index_valid_size == 0)
++ *input_index_valid_size =
++ dbufs->ibufs->buff_entry[i].size;
++ pr_debug("sst:inout addr = %p, size = %d\n",
++ dbufs->ibufs->buff_entry[i].buffer,
++ *input_index_valid_size);
++ pr_debug("sst:decode_isize = %d, in_copied %d\n",
++ str_info->decode_isize, *in_copied);
++ if (*input_index_valid_size <=
++ (str_info->decode_isize - *in_copied))
++ cpy_size = *input_index_valid_size;
++ else
++ cpy_size = str_info->decode_isize - *in_copied;
++
++ pr_debug("sst:cpy size = %d\n", cpy_size);
++ if (!dbufs->ibufs->buff_entry[i].buffer) {
++ pr_err("sst: i/p buffer is null\n");
++ return -EINVAL;
++ }
++ pr_debug("sst:Try copy To %p, From %p, size %d\n",
++ str_info->decode_ibuf + *in_copied,
++ dbufs->ibufs->buff_entry[i].buffer, cpy_size);
++
++ retval =
++ copy_from_user((void *)(str_info->decode_ibuf + *in_copied),
++ (void *) dbufs->ibufs->buff_entry[i].buffer,
++ cpy_size);
++ if (retval) {
++ pr_err("sst: copy from user failed\n");
++ return -EIO;
++ }
++ *in_copied += cpy_size;
++ *input_index_valid_size -= cpy_size;
++ pr_debug("sst:in buff size = %d, in_copied = %d\n",
++ *input_index_valid_size, *in_copied);
++ if (*input_index_valid_size != 0) {
++ pr_debug("sst:more input buffers left\n");
++ dbufs->ibufs->buff_entry[i].buffer += cpy_size;
++ break;
++ }
++ if (*in_copied == str_info->decode_isize &&
++ *input_index_valid_size == 0 &&
++ (i+1) <= dbufs->ibufs->entries) {
++ pr_debug("sst:all input buffers copied\n");
++ *new_entry_flag = true;
++ *input_index = i + 1;
++ break;
++ }
++ }
++ return retval;
++}
++
++/* This function is used to copy the decoded data from kernel buffers to
++the user output buffers with contents after decode*/
++static int sst_prepare_output_buffers(struct stream_info *str_info,
++ struct snd_sst_dbufs *dbufs,
++ int *output_index, int output_size,
++ int *out_copied)
++
++{
++ int i, cpy_size, retval = 0;
++ pr_debug("sst:output_index = %d, output entries = %d\n",
++ *output_index,
++ dbufs->obufs->entries);
++ for (i = *output_index; i < dbufs->obufs->entries; i++) {
++ *output_index = i;
++ pr_debug("sst:output addr = %p, size = %d\n",
++ dbufs->obufs->buff_entry[i].buffer,
++ dbufs->obufs->buff_entry[i].size);
++ pr_debug("sst:output_size = %d, out_copied = %d\n",
++ output_size, *out_copied);
++ if (dbufs->obufs->buff_entry[i].size <
++ (output_size - *out_copied))
++ cpy_size = dbufs->obufs->buff_entry[i].size;
++ else
++ cpy_size = output_size - *out_copied;
++ pr_debug("sst:cpy size = %d\n", cpy_size);
++ pr_debug("sst:Try copy To: %p, From %p, size %d\n",
++ dbufs->obufs->buff_entry[i].buffer,
++ sst_drv_ctx->mmap_mem + *out_copied,
++ cpy_size);
++ retval = copy_to_user(dbufs->obufs->buff_entry[i].buffer,
++ sst_drv_ctx->mmap_mem + *out_copied,
++ cpy_size);
++ if (retval) {
++ pr_err("sst: copy to user failed\n");
++ return -EIO;
++ } else
++ pr_debug("sst:copy to user passed\n");
++ *out_copied += cpy_size;
++ dbufs->obufs->buff_entry[i].size -= cpy_size;
++ pr_debug("sst:o/p buff size %d, out_copied %d\n",
++ dbufs->obufs->buff_entry[i].size, *out_copied);
++ if (dbufs->obufs->buff_entry[i].size != 0) {
++ *output_index = i;
++ dbufs->obufs->buff_entry[i].buffer += cpy_size;
++ break;
++ } else if (*out_copied == output_size) {
++ *output_index = i + 1;
++ break;
++ }
++ }
++ return retval;
++}
++
++/**
++ * sst_decode - Send msg for decoding frames
++ *
++ * @str_id: ID of stream
++ * @dbufs: param that holds the user input and output buffers and size
++ *
++ * This function is called to decode data from the firmware
++ */
++int sst_decode(int str_id, struct snd_sst_dbufs *dbufs)
++{
++ int retval = 0, i;
++ unsigned long long total_input = 0 , total_output = 0;
++ unsigned int cum_input_given = 0 , cum_output_given = 0;
++ int copy_in_done = false, copy_out_done = false;
++ int input_index = 0, output_index = 0;
++ int input_index_valid_size = 0;
++ int in_copied, out_copied;
++ int new_entry_flag;
++ u64 output_size;
++ struct stream_info *str_info;
++ struct snd_sst_decode_info dec_info;
++ unsigned long long input_bytes, output_bytes;
++
++ sst_drv_ctx->scard_ops->power_down_pmic();
++ pr_debug("sst: Powering_down_PMIC...\n");
++
++ retval = sst_validate_strid(str_id);
++ if (retval)
++ return retval;
++
++ str_info = &sst_drv_ctx->streams[str_id];
++ if (str_info->status != STREAM_INIT) {
++ pr_err("sst: invalid stream state = %d\n",
++ str_info->status);
++ return -EINVAL;
++ }
++
++ str_info->prev = str_info->status;
++ str_info->status = STREAM_DECODE;
++
++ for (i = 0; i < dbufs->ibufs->entries; i++)
++ cum_input_given += dbufs->ibufs->buff_entry[i].size;
++ for (i = 0; i < dbufs->obufs->entries; i++)
++ cum_output_given += dbufs->obufs->buff_entry[i].size;
++
++ /* input and output buffer allocation */
++ retval = sst_allocate_decode_buf(str_info, dbufs,
++ cum_input_given, cum_output_given);
++ if (retval) {
++ pr_err("sst: mem allocation failed, abort!!!\n");
++ retval = -ENOMEM;
++ goto finish;
++ }
++
++ str_info->decode_isize = str_info->idecode_alloc;
++ str_info->decode_ibuf_type = dbufs->ibufs->type;
++ str_info->decode_obuf_type = dbufs->obufs->type;
++
++ while ((copy_out_done == false) && (copy_in_done == false)) {
++ in_copied = 0;
++ new_entry_flag = false;
++ retval = sst_prepare_input_buffers(str_info,\
++ dbufs, &input_index, &in_copied,
++ &input_index_valid_size, &new_entry_flag);
++ if (retval) {
++ pr_err("sst: prepare in buffers failed\n");
++ goto finish;
++ }
++
++ if (str_info->ops != STREAM_OPS_PLAYBACK_DRM)
++ str_info->decode_obuf = sst_drv_ctx->mmap_mem;
++
++#ifdef CONFIG_MRST_RAR_HANDLER
++ else {
++ if (dbufs->obufs->type == SST_BUF_RAR) {
++ struct RAR_buffer rar_buffers;
++ __u32 info;
++
++ pr_debug("DRM");
++ retval = copy_from_user((void *) &info,
++ dbufs->obufs->
++ buff_entry[output_index].buffer,
++ sizeof(__u32));
++
++ rar_buffers.info.size = dbufs->obufs->
++ buff_entry[output_index].size;
++ rar_buffers.info.handle = info;
++ retval = sst_get_RAR(&rar_buffers, 1);
++ if (retval)
++ return retval;
++
++ str_info->decode_obuf = (void *)((unsigned long)
++ rar_buffers.bus_address);
++ str_info->decode_osize = dbufs->obufs->
++ buff_entry[output_index].size;
++ str_info->decode_obuf_type = dbufs->obufs->type;
++ pr_debug("sst:DRM handling\n");
++ pr_debug("o/p_add=0x%lu Size=0x%x",
++ (unsigned long) str_info->decode_obuf,
++ str_info->decode_osize);
++ } else {
++ str_info->decode_obuf = sst_drv_ctx->mmap_mem;
++ str_info->decode_osize = dbufs->obufs->
++ buff_entry[output_index].size;
++
++ }
++ }
++#endif
++ if (str_info->ops != STREAM_OPS_PLAYBACK_DRM) {
++ if (str_info->decode_isize > in_copied) {
++ str_info->decode_isize = in_copied;
++ pr_debug("sst:i/p size = %d\n",
++ str_info->decode_isize);
++ }
++ }
++
++
++ retval = sst_send_decode_mess(str_id, str_info, &dec_info);
++ if (retval || dec_info.input_bytes_consumed == 0) {
++ pr_err(
++ "SST ERR: mess failed or no input consumed\n");
++ goto finish;
++ }
++ input_bytes = dec_info.input_bytes_consumed;
++ output_bytes = dec_info.output_bytes_produced;
++
++ pr_debug("sst:in_copied=%d, con=%lld, prod=%lld\n",
++ in_copied, input_bytes, output_bytes);
++ if (dbufs->obufs->type == SST_BUF_RAR) {
++ output_index += 1;
++ if (output_index == dbufs->obufs->entries) {
++ copy_in_done = true;
++ pr_debug("sst:all i/p cpy done\n");
++ }
++ total_output += output_bytes;
++ } else {
++ out_copied = 0;
++ output_size = output_bytes;
++ retval = sst_prepare_output_buffers(str_info, dbufs,
++ &output_index, output_size, &out_copied);
++ if (retval) {
++ pr_err("sst:prep out buff fail\n");
++ goto finish;
++ }
++ if (str_info->ops != STREAM_OPS_PLAYBACK_DRM) {
++ if (in_copied != input_bytes) {
++ int bytes_left = in_copied -
++ input_bytes;
++ pr_debug("sst:bytes %d\n",
++ bytes_left);
++ if (new_entry_flag == true)
++ input_index--;
++ while (bytes_left) {
++ struct snd_sst_buffs *ibufs;
++ struct snd_sst_buff_entry
++ *buff_entry;
++ unsigned int size_sent;
++
++ ibufs = dbufs->ibufs;
++ buff_entry =
++ &ibufs->buff_entry[input_index];
++ size_sent = buff_entry->size -\
++ input_index_valid_size;
++ if (bytes_left == size_sent) {
++ bytes_left = 0;
++ } else if (bytes_left <
++ size_sent) {
++ buff_entry->buffer +=
++ (size_sent -
++ bytes_left);
++ buff_entry->size -=
++ (size_sent -
++ bytes_left);
++ bytes_left = 0;
++ } else {
++ bytes_left -= size_sent;
++ input_index--;
++ input_index_valid_size =
++ 0;
++ }
++ }
++
++ }
++ }
++
++ total_output += out_copied;
++ if (str_info->decode_osize != out_copied) {
++ str_info->decode_osize -= out_copied;
++ pr_debug("sst:output size modified = %d\n",
++ str_info->decode_osize);
++ }
++ }
++ total_input += input_bytes;
++
++ if (str_info->ops == STREAM_OPS_PLAYBACK_DRM) {
++ if (total_input == cum_input_given)
++ copy_in_done = true;
++ copy_out_done = true;
++
++ } else {
++ if (total_output == cum_output_given) {
++ copy_out_done = true;
++ pr_debug("sst:all o/p cpy done\n");
++ }
++
++ if (total_input == cum_input_given) {
++ copy_in_done = true;
++ pr_debug("sst:all i/p cpy done\n");
++ }
++ }
++
++ pr_debug("sst:copy_out = %d, copy_in = %d\n",
++ copy_out_done, copy_in_done);
++ }
++
++finish:
++ dbufs->input_bytes_consumed = total_input;
++ dbufs->output_bytes_produced = total_output;
++ str_info->status = str_info->prev;
++ str_info->prev = STREAM_DECODE;
++ str_info->decode_ibuf = NULL;
++ kfree(str_info->decode_ibuf);
++ return retval;
++}
+--- /dev/null
++++ b/sound/pci/sst/intelmid.c
+@@ -0,0 +1,1233 @@
++/*
++ * intelmid.c - Intel Sound card driver for MID
++ *
++ * Copyright (C) 2008-10 Intel Corp
++ * Authors: Harsha Priya <priya.harsha@intel.com>
++ * Vinod Koul <vinod.koul@intel.com>
++ * Dharageswari R <dharageswari.r@intel.com>
++ * KP Jeeja <jeeja.kp@intel.com>
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ * ALSA driver for Intel MID sound card chipset
++ */
++#include <linux/slab.h>
++#include <linux/io.h>
++#include <linux/platform_device.h>
++#include <linux/interrupt.h>
++#include <linux/sched.h>
++#include <sound/control.h>
++#include <asm/mrst.h>
++#include <sound/pcm.h>
++#include <sound/jack.h>
++#include <sound/pcm_params.h>
++#include <sound/initval.h>
++#include <sound/intel_sst.h>
++#include <sound/intel_sst_ioctl.h>
++#include "intelmid_snd_control.h"
++#include "intelmid.h"
++
++MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
++MODULE_AUTHOR("Harsha Priya <priya.harsha@intel.com>");
++MODULE_AUTHOR("Dharageswari R <dharageswari.r@intel.com>");
++MODULE_AUTHOR("KP Jeeja <jeeja.kp@intel.com>");
++MODULE_DESCRIPTION("Intel MAD Sound card driver");
++MODULE_LICENSE("GPL v2");
++MODULE_SUPPORTED_DEVICE("{Intel,Intel_MAD}");
++
++
++static int card_index = SNDRV_DEFAULT_IDX1;/* Index 0-MAX */
++static char *card_id = SNDRV_DEFAULT_STR1; /* ID for this card */
++
++module_param(card_index, int, 0444);
++MODULE_PARM_DESC(card_index, "Index value for INTELMAD soundcard.");
++module_param(card_id, charp, 0444);
++MODULE_PARM_DESC(card_id, "ID string for INTELMAD soundcard.");
++
++int sst_card_vendor_id;
++int intelmid_audio_interrupt_enable;/*checkpatch fix*/
++
++/* Data path functionalities */
++static struct snd_pcm_hardware snd_intelmad_stream = {
++ .info = (SNDRV_PCM_INFO_INTERLEAVED |
++ SNDRV_PCM_INFO_DOUBLE |
++ SNDRV_PCM_INFO_PAUSE |
++ SNDRV_PCM_INFO_RESUME |
++ SNDRV_PCM_INFO_MMAP|
++ SNDRV_PCM_INFO_MMAP_VALID |
++ SNDRV_PCM_INFO_BLOCK_TRANSFER |
++ SNDRV_PCM_INFO_SYNC_START),
++ .formats = (SNDRV_PCM_FMTBIT_S16 | SNDRV_PCM_FMTBIT_U16 |
++ SNDRV_PCM_FMTBIT_S24 | SNDRV_PCM_FMTBIT_U24 |
++ SNDRV_PCM_FMTBIT_S32 | SNDRV_PCM_FMTBIT_U32),
++ .rates = (SNDRV_PCM_RATE_8000|
++ SNDRV_PCM_RATE_44100 |
++ SNDRV_PCM_RATE_48000),
++ .rate_min = MIN_RATE,
++
++ .rate_max = MAX_RATE,
++ .channels_min = MIN_CHANNEL,
++ .channels_max = MAX_CHANNEL_AMIC,
++ .buffer_bytes_max = MAX_BUFFER,
++ .period_bytes_min = MIN_PERIOD_BYTES,
++ .period_bytes_max = MAX_PERIOD_BYTES,
++ .periods_min = MIN_PERIODS,
++ .periods_max = MAX_PERIODS,
++ .fifo_size = FIFO_SIZE,
++};
++
++
++/**
++ * snd_intelmad_pcm_trigger - stream activities are handled here
++ *
++ * @substream:substream for which the stream function is called
++ * @cmd:the stream commamd that requested from upper layer
++ *
++ * This function is called whenever an a stream activity is invoked
++ */
++static int snd_intelmad_pcm_trigger(struct snd_pcm_substream *substream,
++ int cmd)
++{
++ int ret_val = 0;
++ struct snd_intelmad *intelmaddata;
++ struct mad_stream_pvt *stream;
++ /*struct stream_buffer buffer_to_sst;*/
++
++
++
++ WARN_ON(!substream);
++
++ intelmaddata = snd_pcm_substream_chip(substream);
++ stream = substream->runtime->private_data;
++
++ WARN_ON(!intelmaddata->sstdrv_ops);
++ WARN_ON(!intelmaddata->sstdrv_ops->scard_ops);
++
++ switch (cmd) {
++ case SNDRV_PCM_TRIGGER_START:
++ pr_debug("sst: Trigger Start\n");
++ ret_val = intelmaddata->sstdrv_ops->control_set(SST_SND_START,
++ &stream->stream_info.str_id);
++ if (ret_val)
++ return ret_val;
++ stream->stream_status = RUNNING;
++ stream->substream = substream;
++ stream->stream_status = RUNNING;
++ break;
++ case SNDRV_PCM_TRIGGER_STOP:
++ pr_debug("sst: in stop\n");
++ ret_val = intelmaddata->sstdrv_ops->control_set(SST_SND_DROP,
++ &stream->stream_info.str_id);
++ if (ret_val)
++ return ret_val;
++ stream->stream_status = DROPPED;
++ break;
++ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
++ pr_debug("sst: in pause\n");
++ ret_val = intelmaddata->sstdrv_ops->control_set(SST_SND_PAUSE,
++ &stream->stream_info.str_id);
++ if (ret_val)
++ return ret_val;
++ stream->stream_status = PAUSED;
++ break;
++ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
++ pr_debug("sst: in pause release\n");
++ ret_val = intelmaddata->sstdrv_ops->control_set(SST_SND_RESUME,
++ &stream->stream_info.str_id);
++ if (ret_val)
++ return ret_val;
++ stream->stream_status = RUNNING;
++ break;
++ default:
++ return -EINVAL;
++ }
++ return ret_val;
++}
++
++/**
++* snd_intelmad_pcm_prepare- internal preparation before starting a stream
++*
++* @substream: substream for which the function is called
++*
++* This function is called when a stream is started for internal preparation.
++*/
++static int snd_intelmad_pcm_prepare(struct snd_pcm_substream *substream)
++{
++ struct mad_stream_pvt *stream;
++ int ret_val = 0;
++ struct snd_intelmad *intelmaddata;
++
++ pr_debug("sst: pcm_prepare called\n");
++
++ WARN_ON(!substream);
++ stream = substream->runtime->private_data;
++ intelmaddata = snd_pcm_substream_chip(substream);
++ pr_debug("sst: pb cnt = %d cap cnt = %d\n",\
++ intelmaddata->playback_cnt,
++ intelmaddata->capture_cnt);
++
++ if (stream->stream_info.str_id) {
++ pr_debug("sst: Prepare called for already set stream\n");
++ ret_val = intelmaddata->sstdrv_ops->control_set(SST_SND_DROP,
++ &stream->stream_info.str_id);
++ return ret_val;
++ }
++
++ ret_val = snd_intelmad_alloc_stream(substream);
++ if (ret_val < 0)
++ return ret_val;
++ stream->dbg_cum_bytes = 0;
++ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
++ intelmaddata->playback_cnt++;
++ else
++ intelmaddata->capture_cnt++;
++ /* return back the stream id */
++ snprintf(substream->pcm->id, sizeof(substream->pcm->id),
++ "%d", stream->stream_info.str_id);
++ pr_debug("sst: stream id to user = %s\n",
++ substream->pcm->id);
++
++ ret_val = snd_intelmad_init_stream(substream);
++ if (ret_val)
++ return ret_val;
++ substream->runtime->hw.info = SNDRV_PCM_INFO_BLOCK_TRANSFER;
++ return ret_val;
++}
++
++static int snd_intelmad_hw_params(struct snd_pcm_substream *substream,
++ struct snd_pcm_hw_params *hw_params)
++{
++ int ret_val;
++
++ pr_debug("sst: snd_intelmad_hw_params called\n");
++ ret_val = snd_pcm_lib_malloc_pages(substream,
++ params_buffer_bytes(hw_params));
++ memset(substream->runtime->dma_area, 0,
++ params_buffer_bytes(hw_params));
++
++ return ret_val;
++}
++
++static int snd_intelmad_hw_free(struct snd_pcm_substream *substream)
++{
++ pr_debug("sst: snd_intelmad_hw_free called\n");
++ return snd_pcm_lib_free_pages(substream);
++}
++
++/**
++ * snd_intelmad_pcm_pointer- to send the current buffer pointer processed by hw
++ *
++ * @substream: substream for which the function is called
++ *
++ * This function is called by ALSA framework to get the current hw buffer ptr
++ * when a period is elapsed
++ */
++static snd_pcm_uframes_t snd_intelmad_pcm_pointer
++ (struct snd_pcm_substream *substream)
++{
++ /* struct snd_pcm_runtime *runtime = substream->runtime; */
++ struct mad_stream_pvt *stream;
++ struct snd_intelmad *intelmaddata;
++ int ret_val;
++
++ WARN_ON(!substream);
++
++ intelmaddata = snd_pcm_substream_chip(substream);
++ stream = substream->runtime->private_data;
++ if (stream->stream_status == INIT)
++ return 0;
++
++ ret_val = intelmaddata->sstdrv_ops->control_set(SST_SND_BUFFER_POINTER,
++ &stream->stream_info);
++ if (ret_val) {
++ pr_err("sst: error code = 0x%x\n", ret_val);
++ return ret_val;
++ }
++ pr_debug("sst: samples reported out 0x%llx\n",
++ stream->stream_info.buffer_ptr);
++ pr_debug("sst: Frame bits:: %d period_count :: %d\n",
++ (int)substream->runtime->frame_bits,
++ (int)substream->runtime->period_size);
++
++ return stream->stream_info.buffer_ptr;
++
++}
++
++/**
++ * snd_intelmad_close- to free parameteres when stream is stopped
++ *
++ * @substream: substream for which the function is called
++ *
++ * This function is called by ALSA framework when stream is stopped
++ */
++static int snd_intelmad_close(struct snd_pcm_substream *substream)
++{
++ struct snd_intelmad *intelmaddata;
++ struct mad_stream_pvt *stream;
++ int ret_val = 0;
++
++ WARN_ON(!substream);
++
++ stream = substream->runtime->private_data;
++
++ pr_debug("sst: snd_intelmad_close called\n");
++ intelmaddata = snd_pcm_substream_chip(substream);
++
++ pr_debug("sst: str id = %d\n", stream->stream_info.str_id);
++ if (stream->stream_info.str_id) {
++ /* SST API to actually stop/free the stream */
++ ret_val = intelmaddata->sstdrv_ops->control_set(SST_SND_FREE,
++ &stream->stream_info.str_id);
++ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
++ intelmaddata->playback_cnt--;
++ else
++ intelmaddata->capture_cnt--;
++ }
++ pr_debug("sst: snd_intelmad_close : pb cnt = %d cap cnt = %d\n",
++ intelmaddata->playback_cnt, intelmaddata->capture_cnt);
++ kfree(substream->runtime->private_data);
++ return ret_val;
++}
++
++/**
++ * snd_intelmad_open- to set runtime parameters during stream start
++ *
++ * @substream: substream for which the function is called
++ * @type: audio device type
++ *
++ * This function is called by ALSA framework when stream is started
++ */
++static int snd_intelmad_open(struct snd_pcm_substream *substream,
++ enum snd_sst_audio_device_type type)
++{
++ struct snd_intelmad *intelmaddata;
++ struct snd_pcm_runtime *runtime;
++ struct mad_stream_pvt *stream;
++
++ WARN_ON(!substream);
++
++ pr_debug("sst: snd_intelmad_open called\n");
++
++ intelmaddata = snd_pcm_substream_chip(substream);
++ runtime = substream->runtime;
++ /* set the runtime hw parameter with local snd_pcm_hardware struct */
++ runtime->hw = snd_intelmad_stream;
++ if (intelmaddata->cpu_id == CPU_CHIP_PENWELL) {
++ runtime->hw = snd_intelmad_stream;
++ runtime->hw.rates = SNDRV_PCM_RATE_48000;
++ runtime->hw.rate_min = MAX_RATE;
++ runtime->hw.formats = (SNDRV_PCM_FMTBIT_S24 |
++ SNDRV_PCM_FMTBIT_U24);
++ if (intelmaddata->sstdrv_ops->scard_ops->input_dev_id == AMIC)
++ runtime->hw.channels_max = MAX_CHANNEL_AMIC;
++ else
++ runtime->hw.channels_max = MAX_CHANNEL_DMIC;
++
++ }
++ /* setup the internal datastruture stream pointers based on it being
++ playback or capture stream */
++ stream = kzalloc(sizeof(*stream), GFP_KERNEL);
++ if (!stream)
++ return -ENOMEM;
++ stream->stream_info.str_id = 0;
++ stream->device = type;
++ stream->stream_status = INIT;
++ runtime->private_data = stream;
++ return snd_pcm_hw_constraint_integer(runtime,
++ SNDRV_PCM_HW_PARAM_PERIODS);
++}
++
++static int snd_intelmad_headset_open(struct snd_pcm_substream *substream)
++{
++ return snd_intelmad_open(substream, SND_SST_DEVICE_HEADSET);
++}
++
++static int snd_intelmad_ihf_open(struct snd_pcm_substream *substream)
++{
++ return snd_intelmad_open(substream, SND_SST_DEVICE_IHF);
++}
++
++static int snd_intelmad_vibra_open(struct snd_pcm_substream *substream)
++{
++ return snd_intelmad_open(substream, SND_SST_DEVICE_VIBRA);
++}
++
++static int snd_intelmad_haptic_open(struct snd_pcm_substream *substream)
++{
++ return snd_intelmad_open(substream, SND_SST_DEVICE_HAPTIC);
++}
++
++static struct snd_pcm_ops snd_intelmad_headset_ops = {
++ .open = snd_intelmad_headset_open,
++ .close = snd_intelmad_close,
++ .ioctl = snd_pcm_lib_ioctl,
++ .hw_params = snd_intelmad_hw_params,
++ .hw_free = snd_intelmad_hw_free,
++ .prepare = snd_intelmad_pcm_prepare,
++ .trigger = snd_intelmad_pcm_trigger,
++ .pointer = snd_intelmad_pcm_pointer,
++};
++
++static struct snd_pcm_ops snd_intelmad_ihf_ops = {
++ .open = snd_intelmad_ihf_open,
++ .close = snd_intelmad_close,
++ .ioctl = snd_pcm_lib_ioctl,
++ .hw_params = snd_intelmad_hw_params,
++ .hw_free = snd_intelmad_hw_free,
++ .prepare = snd_intelmad_pcm_prepare,
++ .trigger = snd_intelmad_pcm_trigger,
++ .pointer = snd_intelmad_pcm_pointer,
++};
++
++static struct snd_pcm_ops snd_intelmad_vibra_ops = {
++ .open = snd_intelmad_vibra_open,
++ .close = snd_intelmad_close,
++ .ioctl = snd_pcm_lib_ioctl,
++ .hw_params = snd_intelmad_hw_params,
++ .hw_free = snd_intelmad_hw_free,
++ .prepare = snd_intelmad_pcm_prepare,
++ .trigger = snd_intelmad_pcm_trigger,
++ .pointer = snd_intelmad_pcm_pointer,
++};
++
++static struct snd_pcm_ops snd_intelmad_haptic_ops = {
++ .open = snd_intelmad_haptic_open,
++ .close = snd_intelmad_close,
++ .ioctl = snd_pcm_lib_ioctl,
++ .hw_params = snd_intelmad_hw_params,
++ .hw_free = snd_intelmad_hw_free,
++ .prepare = snd_intelmad_pcm_prepare,
++ .trigger = snd_intelmad_pcm_trigger,
++ .pointer = snd_intelmad_pcm_pointer,
++};
++
++static struct snd_pcm_ops snd_intelmad_capture_ops = {
++ .open = snd_intelmad_headset_open,
++ .close = snd_intelmad_close,
++ .ioctl = snd_pcm_lib_ioctl,
++ .hw_params = snd_intelmad_hw_params,
++ .hw_free = snd_intelmad_hw_free,
++ .prepare = snd_intelmad_pcm_prepare,
++ .trigger = snd_intelmad_pcm_trigger,
++ .pointer = snd_intelmad_pcm_pointer,
++};
++
++
++/**
++ * snd_intelmad_intr_handler- interrupt handler
++ *
++ * @irq : irq number of the interrupt received
++ * @dev: device context
++ *
++ * This function is called when an interrupt is raised at the sound card
++ */
++static irqreturn_t snd_intelmad_intr_handler(int irq, void *dev)
++{
++ struct snd_intelmad *intelmaddata =
++ (struct snd_intelmad *)dev;
++ u8 intsts;
++
++ memcpy_fromio(&intsts,
++ ((void *)(intelmaddata->int_base)),
++ sizeof(u8));
++ intelmaddata->mad_jack_msg.intsts = intsts;
++ intelmaddata->mad_jack_msg.intelmaddata = intelmaddata;
++
++ queue_work(intelmaddata->mad_jack_wq, &intelmaddata->mad_jack_msg.wq);
++
++ return IRQ_HANDLED;
++}
++
++void sst_mad_send_jack_report(struct snd_jack *jack,
++ int buttonpressevent , int status)
++{
++
++ if (!jack) {
++ pr_debug("sst: MAD error jack empty\n");
++
++ } else {
++ pr_debug("sst: MAD send jack report for = %d!!!\n", status);
++ pr_debug("sst: MAD send jack report %d\n", jack->type);
++ snd_jack_report(jack, status);
++
++ /*button pressed and released */
++ if (buttonpressevent)
++ snd_jack_report(jack, 0);
++ pr_debug("sst: MAD sending jack report Done !!!\n");
++ }
++
++
++
++}
++
++void sst_mad_jackdetection_fs(u8 intsts , struct snd_intelmad *intelmaddata)
++{
++ struct snd_jack *jack = NULL;
++ unsigned int present = 0, jack_event_flag = 0, buttonpressflag = 0;
++ struct sc_reg_access sc_access[] = {
++ {0x187, 0x00, MASK7},
++ {0x188, 0x10, MASK4},
++ {0x18b, 0x10, MASK4},
++ };
++
++ struct sc_reg_access sc_access_write[] = {
++ {0x198, 0x00, 0x0},
++ };
++
++ if (intsts & 0x4) {
++
++ if (!(intelmid_audio_interrupt_enable)) {
++ pr_debug("sst: Audio interrupt enable\n");
++ sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 3);
++
++ sst_sc_reg_access(sc_access_write, PMIC_WRITE, 1);
++ intelmid_audio_interrupt_enable = 1;
++ intelmaddata->jack[0].jack_status = 0;
++ intelmaddata->jack[1].jack_status = 0;
++
++ }
++ /* send headphone detect */
++ pr_debug("sst: MAD headphone %d\n", intsts & 0x4);
++ jack = &intelmaddata->jack[0].jack;
++ present = !(intelmaddata->jack[0].jack_status);
++ intelmaddata->jack[0].jack_status = present;
++ jack_event_flag = 1;
++
++ }
++
++ if (intsts & 0x2) {
++ /* send short push */
++ pr_debug("sst: MAD short push %d\n", intsts & 0x2);
++ jack = &intelmaddata->jack[2].jack;
++ present = 1;
++ jack_event_flag = 1;
++ buttonpressflag = 1;
++ }
++ if (intsts & 0x1) {
++ /* send long push */
++ pr_debug("sst: MAD long push %d\n", intsts & 0x1);
++ jack = &intelmaddata->jack[3].jack;
++ present = 1;
++ jack_event_flag = 1;
++ buttonpressflag = 1;
++ }
++ if (intsts & 0x8) {
++ if (!(intelmid_audio_interrupt_enable)) {
++ pr_debug("sst: Audio interrupt enable\n");
++ sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 3);
++
++ sst_sc_reg_access(sc_access_write, PMIC_WRITE, 1);
++ intelmid_audio_interrupt_enable = 1;
++ intelmaddata->jack[0].jack_status = 0;
++ intelmaddata->jack[1].jack_status = 0;
++ }
++ /* send headset detect */
++ pr_debug("sst: MAD headset = %d\n", intsts & 0x8);
++ jack = &intelmaddata->jack[1].jack;
++ present = !(intelmaddata->jack[1].jack_status);
++ intelmaddata->jack[1].jack_status = present;
++ jack_event_flag = 1;
++ }
++
++ if (jack_event_flag)
++ sst_mad_send_jack_report(jack, buttonpressflag, present);
++}
++
++
++void sst_mad_jackdetection_mx(u8 intsts, struct snd_intelmad *intelmaddata)
++{
++ u8 value = 0, jack_prev_state = 0;
++ struct snd_jack *jack = NULL;
++ unsigned int present = 0, jack_event_flag = 0, buttonpressflag = 0;
++ time_t timediff;
++ struct sc_reg_access sc_access_read = {0,};
++ struct snd_pmic_ops *scard_ops;
++
++ scard_ops = intelmaddata->sstdrv_ops->scard_ops;
++
++ pr_debug("sst: previous value: %x\n", intelmaddata->jack_prev_state);
++
++ if (!(intelmid_audio_interrupt_enable)) {
++ pr_debug("sst: Audio interrupt enable\n");
++ intelmaddata->jack_prev_state = 0xC0;
++ intelmid_audio_interrupt_enable = 1;
++ }
++
++ if (intsts & 0x2) {
++ jack_prev_state = intelmaddata->jack_prev_state;
++ if (intelmaddata->pmic_status == PMIC_INIT) {
++ sc_access_read.reg_addr = 0x201;
++ sst_sc_reg_access(&sc_access_read, PMIC_READ, 1);
++ value = (sc_access_read.value);
++ pr_debug("sst: value returned = 0x%x\n", value);
++ }
++
++ if (jack_prev_state == 0xc0 && value == 0x40) {
++ /*headset detected. */
++ pr_debug("sst: MAD headset inserted\n");
++ jack = &intelmaddata->jack[1].jack;
++ present = 1;
++ jack_event_flag = 1;
++ intelmaddata->jack[1].jack_status = 1;
++
++ }
++
++ if (jack_prev_state == 0xc0 && value == 0x00) {
++ /* headphone detected. */
++ pr_debug("sst: MAD headphone inserted\n");
++ jack = &intelmaddata->jack[0].jack;
++ present = 1;
++ jack_event_flag = 1;
++
++ }
++
++ if (jack_prev_state == 0x40 && value == 0xc0) {
++ /*headset removed*/
++ pr_debug("sst: Jack headset status %d\n",
++ intelmaddata->jack[1].jack_status);
++ pr_debug("sst: MAD headset removed\n");
++ jack = &intelmaddata->jack[1].jack;
++ present = 0;
++ jack_event_flag = 1;
++ intelmaddata->jack[1].jack_status = 0;
++ }
++
++ if (jack_prev_state == 0x00 && value == 0xc0) {
++ /* headphone detected. */
++ pr_debug("sst: Jack headphone status %d\n",
++ intelmaddata->jack[0].jack_status);
++ pr_debug("sst: headphone removed\n");
++ jack = &intelmaddata->jack[0].jack;
++ present = 0;
++ jack_event_flag = 1;
++ }
++
++ if (jack_prev_state == 0x40 && value == 0x00) {
++ /*button pressed*/
++ do_gettimeofday(&intelmaddata->jack[1].buttonpressed);
++ pr_debug("sst: MAD button press detected n");
++ }
++
++
++ if (jack_prev_state == 0x00 && value == 0x40) {
++ if (intelmaddata->jack[1].jack_status) {
++ /*button pressed*/
++ do_gettimeofday(
++ &intelmaddata->jack[1].buttonreleased);
++ /*button pressed */
++ pr_debug("sst: Button Released detected\n");
++ timediff = intelmaddata->jack[1].
++ buttonreleased.tv_sec - intelmaddata->
++ jack[1].buttonpressed.tv_sec;
++ buttonpressflag = 1;
++ if (timediff > 1) {
++ pr_debug("sst: long press detected\n");
++ /* send headphone detect/undetect */
++ jack = &intelmaddata->jack[3].jack;
++ present = 1;
++ jack_event_flag = 1;
++ } else {
++ pr_debug("sst: short press detected\n");
++ /* send headphone detect/undetect */
++ jack = &intelmaddata->jack[2].jack;
++ present = 1;
++ jack_event_flag = 1;
++ }
++ }
++
++ }
++ intelmaddata->jack_prev_state = value ;
++
++ }
++ if ((mrst_platform_id() == MRST_PLATFORM_AAVA_SC) && jack) {
++ if (present) {
++ pr_debug("sst: Jack... YES\n");
++ scard_ops->set_output_dev(STEREO_HEADPHONE);
++
++ } else {
++ pr_debug("sst: Jack... NO\n");
++ scard_ops->set_output_dev(INTERNAL_SPKR);
++
++ }
++ }
++
++ if (jack_event_flag)
++ sst_mad_send_jack_report(jack, buttonpressflag, present);
++}
++
++
++void sst_mad_jackdetection_nec(u8 intsts, struct snd_intelmad *intelmaddata)
++{
++ u8 value = 0;
++ struct snd_jack *jack = NULL;
++ unsigned int present = 0, jack_event_flag = 0, buttonpressflag = 0;
++ struct sc_reg_access sc_access_read = {0,};
++
++ if (intelmaddata->pmic_status == PMIC_INIT) {
++ sc_access_read.reg_addr = 0x132;
++ sst_sc_reg_access(&sc_access_read, PMIC_READ, 1);
++ value = (sc_access_read.value);
++ pr_debug("sst: value returned = 0x%x\n", value);
++ }
++ if (intsts & 0x1) {
++ pr_debug("sst: headset detected\n");
++ /* send headset detect/undetect */
++ jack = &intelmaddata->jack[1].jack;
++ present = (value == 0x1) ? 1 : 0;
++ jack_event_flag = 1;
++ }
++ if (intsts & 0x2) {
++ pr_debug("sst: headphone detected\n");
++ /* send headphone detect/undetect */
++ jack = &intelmaddata->jack[0].jack;
++ present = (value == 0x2) ? 1 : 0;
++ jack_event_flag = 1;
++ }
++ if (intsts & 0x4) {
++ pr_debug("sst: short push detected\n");
++ /* send short push */
++ jack = &intelmaddata->jack[2].jack;
++ present = 1;
++ jack_event_flag = 1;
++ buttonpressflag = 1;
++ }
++ if (intsts & 0x8) {
++ pr_debug("sst: long push detected\n");
++ /* send long push */
++ jack = &intelmaddata->jack[3].jack;
++ present = 1;
++ jack_event_flag = 1;
++ buttonpressflag = 1;
++ }
++
++ if (jack_event_flag)
++ sst_mad_send_jack_report(jack, buttonpressflag, present);
++
++
++}
++
++void sst_process_mad_jack_detection(struct work_struct *work)
++{
++ u8 intsts;
++ struct mad_jack_msg_wq *mad_jack_detect =
++ container_of(work, struct mad_jack_msg_wq, wq);
++
++ struct snd_intelmad *intelmaddata =
++ mad_jack_detect->intelmaddata;
++
++ intsts = mad_jack_detect->intsts;
++
++ switch (intelmaddata->sstdrv_ops->vendor_id) {
++ case SND_FS:
++ sst_mad_jackdetection_fs(intsts , intelmaddata);
++ break;
++ case SND_MX:
++ sst_mad_jackdetection_mx(intsts , intelmaddata);
++ break;
++ case SND_NC:
++ sst_mad_jackdetection_nec(intsts , intelmaddata);
++ break;
++ }
++}
++
++
++static int __devinit snd_intelmad_register_irq(
++ struct snd_intelmad *intelmaddata)
++{
++ int ret_val;
++ u32 regbase = AUDINT_BASE, regsize = 8;
++ char *drv_name;
++
++ pr_debug("sst: irq reg done, regbase 0x%x, regsize 0x%x\n",
++ regbase, regsize);
++ intelmaddata->int_base = ioremap_nocache(regbase, regsize);
++ if (!intelmaddata->int_base)
++ pr_err("sst: Mapping of cache failed\n");
++ pr_debug("sst: irq = 0x%x\n", intelmaddata->irq);
++ if (intelmaddata->cpu_id == CPU_CHIP_PENWELL)
++ drv_name = DRIVER_NAME_MFLD;
++ else
++ drv_name = DRIVER_NAME_MRST;
++ ret_val = request_irq(intelmaddata->irq,
++ snd_intelmad_intr_handler,
++ IRQF_SHARED, drv_name,
++ intelmaddata);
++ if (ret_val)
++ pr_err("sst: cannot register IRQ\n");
++ return ret_val;
++}
++
++static int __devinit snd_intelmad_sst_register(
++ struct snd_intelmad *intelmaddata)
++{
++ int ret_val = 0;
++ struct snd_pmic_ops *intelmad_vendor_ops[MAX_VENDORS] = {
++ &snd_pmic_ops_fs,
++ &snd_pmic_ops_mx,
++ &snd_pmic_ops_nc,
++ &snd_msic_ops
++ };
++
++ struct sc_reg_access vendor_addr = {0x00, 0x00, 0x00};
++
++ if (intelmaddata->cpu_id == CPU_CHIP_LINCROFT) {
++ ret_val = sst_sc_reg_access(&vendor_addr, PMIC_READ, 1);
++ if (ret_val)
++ return ret_val;
++ sst_card_vendor_id = (vendor_addr.value & (MASK2|MASK1|MASK0));
++ pr_debug("sst: orginal n extrated vendor id = 0x%x %d\n",
++ vendor_addr.value, sst_card_vendor_id);
++ if (sst_card_vendor_id < 0 || sst_card_vendor_id > 2) {
++ pr_err("sst: vendor card not supported!!\n");
++ return -EIO;
++ }
++ } else
++ sst_card_vendor_id = 0x3;
++
++ intelmaddata->sstdrv_ops->module_name = SST_CARD_NAMES;
++ intelmaddata->sstdrv_ops->vendor_id = sst_card_vendor_id;
++ BUG_ON(!intelmad_vendor_ops[sst_card_vendor_id]);
++ intelmaddata->sstdrv_ops->scard_ops =
++ intelmad_vendor_ops[sst_card_vendor_id];
++
++ if (intelmaddata->cpu_id == CPU_CHIP_PENWELL) {
++ intelmaddata->sstdrv_ops->scard_ops->pb_on = 0;
++ intelmaddata->sstdrv_ops->scard_ops->cap_on = 0;
++ intelmaddata->sstdrv_ops->scard_ops->input_dev_id = DMIC;
++ intelmaddata->sstdrv_ops->scard_ops->output_dev_id =
++ STEREO_HEADPHONE;
++ }
++
++ /* registering with SST driver to get access to SST APIs to use */
++ ret_val = register_sst_card(intelmaddata->sstdrv_ops);
++ if (ret_val) {
++ pr_err("sst: sst card registration failed\n");
++ return ret_val;
++ }
++
++ sst_card_vendor_id = intelmaddata->sstdrv_ops->vendor_id;
++ intelmaddata->pmic_status = PMIC_UNINIT;
++ return ret_val;
++}
++
++/* Driver Init/exit functionalities */
++/**
++ * snd_intelmad_pcm_new - to setup pcm for the card
++ *
++ * @card: pointer to the sound card structure
++ * @intelmaddata: pointer to internal context
++ * @pb: playback count for this card
++ * @cap: capture count for this card
++ * @index: device index
++ *
++ * This function is called from probe function to set up pcm params
++ * and functions
++ */
++static int __devinit snd_intelmad_pcm_new(struct snd_card *card,
++ struct snd_intelmad *intelmaddata,
++ unsigned int pb, unsigned int cap, unsigned int index)
++{
++ int ret_val = 0;
++ struct snd_pcm *pcm;
++ char name[32] = INTEL_MAD;
++ struct snd_pcm_ops *pb_ops = NULL, *cap_ops = NULL;
++
++ pr_debug("sst: called for pb %d, cp %d, idx %d\n", pb, cap, index);
++ ret_val = snd_pcm_new(card, name, index, pb, cap, &pcm);
++ if (ret_val)
++ return ret_val;
++ /* setup the ops for playback and capture streams */
++ switch (index) {
++ case 0:
++ pb_ops = &snd_intelmad_headset_ops;
++ cap_ops = &snd_intelmad_capture_ops;
++ break;
++ case 1:
++ pb_ops = &snd_intelmad_ihf_ops;
++ cap_ops = &snd_intelmad_capture_ops;
++ break;
++ case 2:
++ pb_ops = &snd_intelmad_vibra_ops;
++ cap_ops = &snd_intelmad_capture_ops;
++ break;
++ case 3:
++ pb_ops = &snd_intelmad_haptic_ops;
++ cap_ops = &snd_intelmad_capture_ops;
++ break;
++ }
++ if (pb)
++ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, pb_ops);
++ if (cap)
++ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, cap_ops);
++ /* setup private data which can be retrieved when required */
++ pcm->private_data = intelmaddata;
++ pcm->info_flags = 0;
++ strncpy(pcm->name, card->shortname, strlen(card->shortname));
++ /* allocate dma pages for ALSA stream operations */
++ snd_pcm_lib_preallocate_pages_for_all(pcm,
++ SNDRV_DMA_TYPE_CONTINUOUS,
++ snd_dma_continuous_data(GFP_KERNEL),
++ MIN_BUFFER, MAX_BUFFER);
++ return ret_val;
++}
++
++static int __devinit snd_intelmad_pcm(struct snd_card *card,
++ struct snd_intelmad *intelmaddata)
++{
++ int ret_val = 0;
++
++ WARN_ON(!card);
++ WARN_ON(!intelmaddata);
++ pr_debug("sst: snd_intelmad_pcm called\n");
++ ret_val = snd_intelmad_pcm_new(card, intelmaddata, 1, 1, 0);
++ if (intelmaddata->cpu_id == CPU_CHIP_LINCROFT)
++ return ret_val;
++ ret_val = snd_intelmad_pcm_new(card, intelmaddata, 1, 0, 1);
++ if (ret_val)
++ return ret_val;
++ ret_val = snd_intelmad_pcm_new(card, intelmaddata, 1, 0, 2);
++ if (ret_val)
++ return ret_val;
++ return snd_intelmad_pcm_new(card, intelmaddata, 1, 0, 3);
++}
++
++/**
++ * snd_intelmad_jack- to setup jack settings of the card
++ *
++ * @intelmaddata: pointer to internal context
++ *
++ * This function is called send jack events
++ */
++static int snd_intelmad_jack(struct snd_intelmad *intelmaddata)
++{
++ struct snd_jack *jack;
++ int retval;
++
++ pr_debug("sst: snd_intelmad_jack called\n");
++ jack = &intelmaddata->jack[0].jack;
++ retval = snd_jack_new(intelmaddata->card, "Headphone",
++ SND_JACK_HEADPHONE, &jack);
++ if (retval < 0)
++ return retval;
++ snd_jack_report(jack, 0);
++
++ jack->private_data = jack;
++ intelmaddata->jack[0].jack = *jack;
++
++
++ jack = &intelmaddata->jack[1].jack;
++ retval = snd_jack_new(intelmaddata->card, "Headset",
++ SND_JACK_HEADSET, &jack);
++ if (retval < 0)
++ return retval;
++
++
++
++ jack->private_data = jack;
++ intelmaddata->jack[1].jack = *jack;
++
++
++ jack = &intelmaddata->jack[2].jack;
++ retval = snd_jack_new(intelmaddata->card, "Short Press",
++ SND_JACK_HS_SHORT_PRESS, &jack);
++ if (retval < 0)
++ return retval;
++
++
++ jack->private_data = jack;
++ intelmaddata->jack[2].jack = *jack;
++
++
++ jack = &intelmaddata->jack[3].jack;
++ retval = snd_jack_new(intelmaddata->card, "Long Press",
++ SND_JACK_HS_LONG_PRESS, &jack);
++ if (retval < 0)
++ return retval;
++
++
++ jack->private_data = jack;
++ intelmaddata->jack[3].jack = *jack;
++
++ return retval;
++}
++
++/**
++ * snd_intelmad_mixer- to setup mixer settings of the card
++ *
++ * @intelmaddata: pointer to internal context
++ *
++ * This function is called from probe function to set up mixer controls
++ */
++static int __devinit snd_intelmad_mixer(struct snd_intelmad *intelmaddata)
++{
++ struct snd_card *card;
++ unsigned int idx;
++ int ret_val = 0, max_controls = 0;
++ char *mixername = "IntelMAD Controls";
++ struct snd_kcontrol_new *controls;
++
++ WARN_ON(!intelmaddata);
++
++ card = intelmaddata->card;
++ strncpy(card->mixername, mixername, sizeof(card->mixername)-1);
++ /* add all widget controls and expose the same */
++ if (intelmaddata->cpu_id == CPU_CHIP_PENWELL) {
++ max_controls = MAX_CTRL_MFLD;
++ controls = snd_intelmad_controls_mfld;
++ } else {
++ max_controls = MAX_CTRL_MRST;
++ controls = snd_intelmad_controls_mrst;
++ }
++ for (idx = 0; idx < max_controls; idx++) {
++ ret_val = snd_ctl_add(card,
++ snd_ctl_new1(&controls[idx],
++ intelmaddata));
++ pr_debug("sst: mixer[idx]=%d added\n", idx);
++ if (ret_val) {
++ pr_err("sst: in adding of control index = %d\n", idx);
++ break;
++ }
++ }
++ return ret_val;
++}
++
++static int snd_intelmad_dev_free(struct snd_device *device)
++{
++ struct snd_intelmad *intelmaddata;
++
++ WARN_ON(!device);
++
++ intelmaddata = device->device_data;
++
++ pr_debug("sst: snd_intelmad_dev_free called\n");
++ snd_card_free(intelmaddata->card);
++ /*genl_unregister_family(&audio_event_genl_family);*/
++ unregister_sst_card(intelmaddata->sstdrv_ops);
++
++ /* free allocated memory for internal context */
++ destroy_workqueue(intelmaddata->mad_jack_wq);
++ kfree(intelmaddata->sstdrv_ops);
++ kfree(intelmaddata);
++ return 0;
++}
++
++static int __devinit snd_intelmad_create(
++ struct snd_intelmad *intelmaddata,
++ struct snd_card *card)
++{
++ int ret_val;
++ static struct snd_device_ops ops = {
++ .dev_free = snd_intelmad_dev_free,
++ };
++
++ WARN_ON(!intelmaddata);
++ WARN_ON(!card);
++ /* ALSA api to register for the device */
++ ret_val = snd_device_new(card, SNDRV_DEV_LOWLEVEL, intelmaddata, &ops);
++ return ret_val;
++}
++
++/**
++* snd_intelmad_probe- function registred for init
++* @pdev : pointer to the device struture
++* This function is called when the device is initialized
++*/
++int __devinit snd_intelmad_probe(struct platform_device *pdev)
++{
++ struct snd_card *card;
++ int ret_val;
++ struct snd_intelmad *intelmaddata;
++ const struct platform_device_id *id = platform_get_device_id(pdev);
++ unsigned int cpu_id = (unsigned int)id->driver_data;
++
++ pr_debug("sst: probe for %s cpu_id %d\n", pdev->name, cpu_id);
++ if (!strcmp(pdev->name, DRIVER_NAME_MRST))
++ pr_debug("sst: detected MRST\n");
++ else if (!strcmp(pdev->name, DRIVER_NAME_MFLD))
++ pr_debug("sst: detected MFLD\n");
++ else {
++ pr_err("sst: detected unknown device abort!!\n");
++ return -EIO;
++ }
++ if ((cpu_id < CPU_CHIP_LINCROFT) || (cpu_id > CPU_CHIP_PENWELL)) {
++ pr_err("sst: detected unknown cpu_id abort!!\n");
++ return -EIO;
++ }
++ /* allocate memory for saving internal context and working */
++ intelmaddata = kzalloc(sizeof(*intelmaddata), GFP_KERNEL);
++ if (!intelmaddata) {
++ pr_debug("sst: mem alloctn fail\n");
++ return -ENOMEM;
++ }
++
++ /* allocate memory for LPE API set */
++ intelmaddata->sstdrv_ops = kzalloc(sizeof(struct intel_sst_card_ops),
++ GFP_KERNEL);
++ if (!intelmaddata->sstdrv_ops) {
++ pr_err("sst: mem allocation for ops fail\n");
++ kfree(intelmaddata);
++ return -ENOMEM;
++ }
++
++ intelmaddata->cpu_id = cpu_id;
++ /* create a card instance with ALSA framework */
++ ret_val = snd_card_create(card_index, card_id, THIS_MODULE, 0, &card);
++ if (ret_val) {
++ pr_err("sst: snd_card_create fail\n");
++ goto free_allocs;
++ }
++
++ intelmaddata->pdev = pdev;
++ intelmaddata->irq = platform_get_irq(pdev, 0);
++ platform_set_drvdata(pdev, intelmaddata);
++ intelmaddata->card = card;
++ intelmaddata->card_id = card_id;
++ intelmaddata->card_index = card_index;
++ intelmaddata->master_mute = UNMUTE;
++ intelmaddata->playback_cnt = intelmaddata->capture_cnt = 0;
++ strncpy(card->driver, INTEL_MAD, strlen(INTEL_MAD));
++ strncpy(card->shortname, INTEL_MAD, strlen(INTEL_MAD));
++
++ intelmaddata->sstdrv_ops->module_name = SST_CARD_NAMES;
++ /* registering with LPE driver to get access to SST APIs to use */
++ ret_val = snd_intelmad_sst_register(intelmaddata);
++ if (ret_val) {
++ pr_err("sst: snd_intelmad_sst_register failed\n");
++ goto free_allocs;
++ }
++
++ intelmaddata->pmic_status = PMIC_INIT;
++
++ ret_val = snd_intelmad_pcm(card, intelmaddata);
++ if (ret_val) {
++ pr_err("sst: snd_intelmad_pcm failed\n");
++ goto free_allocs;
++ }
++
++ ret_val = snd_intelmad_mixer(intelmaddata);
++ if (ret_val) {
++ pr_err("sst: snd_intelmad_mixer failed\n");
++ goto free_allocs;
++ }
++
++ ret_val = snd_intelmad_jack(intelmaddata);
++ if (ret_val) {
++ pr_err("sst: snd_intelmad_jack failed\n");
++ goto free_allocs;
++ }
++
++ /*create work queue for jack interrupt*/
++ INIT_WORK(&intelmaddata->mad_jack_msg.wq,
++ sst_process_mad_jack_detection);
++
++ intelmaddata->mad_jack_wq = create_workqueue("sst_mad_jack_wq");
++ if (!intelmaddata->mad_jack_wq)
++ goto free_mad_jack_wq;
++
++ ret_val = snd_intelmad_register_irq(intelmaddata);
++ if (ret_val) {
++ pr_err("sst: snd_intelmad_register_irq fail\n");
++ goto free_allocs;
++ }
++
++ /* internal function call to register device with ALSA */
++ ret_val = snd_intelmad_create(intelmaddata, card);
++ if (ret_val) {
++ pr_err("sst: snd_intelmad_create failed\n");
++ goto free_allocs;
++ }
++ card->private_data = &intelmaddata;
++ snd_card_set_dev(card, &pdev->dev);
++ ret_val = snd_card_register(card);
++ if (ret_val) {
++ pr_err("sst: snd_card_register failed\n");
++ goto free_allocs;
++ }
++
++ pr_debug("sst:snd_intelmad_probe complete\n");
++ return ret_val;
++
++free_mad_jack_wq:
++ destroy_workqueue(intelmaddata->mad_jack_wq);
++free_allocs:
++ pr_err("sst: probe failed\n");
++ snd_card_free(card);
++ kfree(intelmaddata->sstdrv_ops);
++ kfree(intelmaddata);
++ return ret_val;
++}
++
++
++static int snd_intelmad_remove(struct platform_device *pdev)
++{
++ struct snd_intelmad *intelmaddata = platform_get_drvdata(pdev);
++
++ if (intelmaddata) {
++ snd_card_free(intelmaddata->card);
++ unregister_sst_card(intelmaddata->sstdrv_ops);
++ /* free allocated memory for internal context */
++ destroy_workqueue(intelmaddata->mad_jack_wq);
++ kfree(intelmaddata->sstdrv_ops);
++ kfree(intelmaddata);
++ }
++ return 0;
++}
++
++/*********************************************************************
++ * Driver initialization and exit
++ *********************************************************************/
++static const struct platform_device_id snd_intelmad_ids[] = {
++ {DRIVER_NAME_MRST, CPU_CHIP_LINCROFT},
++ {DRIVER_NAME_MFLD, CPU_CHIP_PENWELL},
++ {"", 0},
++
++};
++
++static struct platform_driver snd_intelmad_driver = {
++ .driver = {
++ .owner = THIS_MODULE,
++ .name = "intel_mid_sound_card",
++ },
++ .id_table = snd_intelmad_ids,
++ .probe = snd_intelmad_probe,
++ .remove = __devexit_p(snd_intelmad_remove),
++};
++
++/*
++ * alsa_card_intelmad_init- driver init function
++ *
++ * This function is called when driver module is inserted
++ */
++static int __init alsa_card_intelmad_init(void)
++{
++ pr_debug("sst: mad_init called\n");
++ return platform_driver_register(&snd_intelmad_driver);
++}
++
++/**
++ * alsa_card_intelmad_exit- driver exit function
++ *
++ * This function is called when driver module is removed
++ */
++static void __exit alsa_card_intelmad_exit(void)
++{
++ pr_debug("sst:mad_exit called\n");
++ return platform_driver_unregister(&snd_intelmad_driver);
++}
++
++module_init(alsa_card_intelmad_init)
++module_exit(alsa_card_intelmad_exit)
++
+--- /dev/null
++++ b/sound/pci/sst/intelmid.h
+@@ -0,0 +1,181 @@
++/*
++ * intelmid.h - Intel Sound card driver for MID
++ *
++ * Copyright (C) 2008-10 Intel Corp
++ * Authors: Harsha Priya <priya.harsha@intel.com>
++ * Vinod Koul <vinod.koul@intel.com>
++ * Dharageswari R <dharageswari.r@intel.com>
++ * KP Jeeja <jeeja.kp@intel.com>
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ * ALSA driver header for Intel MAD chipset
++ */
++#ifndef __INTELMID_H
++#define __INTELMID_H
++
++#include <linux/time.h>
++
++#define DRIVER_NAME_MFLD "msic_audio"
++#define DRIVER_NAME_MRST "pmic_audio"
++#define DRIVER_NAME "intelmid_audio"
++#define PMIC_SOUND_IRQ_TYPE_MASK (1 << 15)
++#define AUDINT_BASE (0xFFFFEFF8 + (6 * sizeof(u8)))
++#define REG_IRQ
++/* values #defined */
++/* will differ for different hw - to be taken from config */
++#define MAX_DEVICES 1
++#define MIN_RATE 8000
++#define MAX_RATE 48000
++#define MAX_BUFFER (800*1024) /* for PCM */
++#define MIN_BUFFER (800*1024)
++#define MAX_PERIODS (1024*2)
++#define MIN_PERIODS 1
++#define MAX_PERIOD_BYTES MAX_BUFFER
++#define MIN_PERIOD_BYTES 32
++/*#define MIN_PERIOD_BYTES 160*/
++#define MAX_MUTE 1
++#define MIN_MUTE 0
++#define MONO_CNTL 1
++#define STEREO_CNTL 2
++#define MIN_CHANNEL 1
++#define MAX_CHANNEL_AMIC 2
++#define MAX_CHANNEL_DMIC 4
++#define FIFO_SIZE 0 /* fifo not being used */
++#define INTEL_MAD "Intel MAD"
++#define MAX_CTRL_MRST 7
++#define MAX_CTRL_MFLD 2
++#define MAX_CTRL 7
++#define MAX_VENDORS 4
++/* TODO +6 db */
++#define MAX_VOL 64
++/* TODO -57 db */
++#define MIN_VOL 0
++#define PLAYBACK_COUNT 1
++#define CAPTURE_COUNT 1
++
++extern int sst_card_vendor_id;
++
++struct mad_jack {
++ struct snd_jack jack;
++ int jack_status;
++ struct timeval buttonpressed;
++ struct timeval buttonreleased;
++};
++
++struct mad_jack_msg_wq {
++ u8 intsts;
++ struct snd_intelmad *intelmaddata;
++ struct work_struct wq;
++
++};
++
++/**
++ * struct snd_intelmad - intelmad driver structure
++ *
++ * @card: ptr to the card details
++ * @card_index: sound card index
++ * @card_id: sound card id detected
++ * @sstdrv_ops: ptr to sst driver ops
++ * @pdev: ptr to platfrom device
++ * @irq: interrupt number detected
++ * @pmic_status: Device status of sound card
++ * @int_base: ptr to MMIO interrupt region
++ * @output_sel: device slected as o/p
++ * @input_sel: device slected as i/p
++ * @master_mute: master mute status
++ * @jack: jack status
++ * @playback_cnt: active pb streams
++ * @capture_cnt: active cp streams
++ * @mad_jack_msg: wq struct for jack interrupt processing
++ * @mad_jack_wq: wq for jack interrupt processing
++ * @jack_prev_state: Previos state of jack detected
++ * @cpu_id: current cpu id loaded for
++ */
++struct snd_intelmad {
++ struct snd_card *card; /* ptr to the card details */
++ int card_index;/* card index */
++ char *card_id; /* card id */
++ struct intel_sst_card_ops *sstdrv_ops;/* ptr to sst driver ops */
++ struct platform_device *pdev;
++ int irq;
++ int pmic_status;
++ void __iomem *int_base;
++ int output_sel;
++ int input_sel;
++ int master_mute;
++ struct mad_jack jack[4];
++ int playback_cnt;
++ int capture_cnt;
++ struct mad_jack_msg_wq mad_jack_msg;
++ struct workqueue_struct *mad_jack_wq;
++ u8 jack_prev_state;
++ unsigned int cpu_id;
++};
++
++struct snd_control_val {
++ int playback_vol_max;
++ int playback_vol_min;
++ int capture_vol_max;
++ int capture_vol_min;
++};
++
++struct mad_stream_pvt {
++ int stream_status;
++ int stream_ops;
++ struct snd_pcm_substream *substream;
++ struct pcm_stream_info stream_info;
++ ssize_t dbg_cum_bytes;
++ enum snd_sst_device_type device;
++};
++
++enum mad_drv_status {
++ INIT = 1,
++ STARTED,
++ RUNNING,
++ PAUSED,
++ DROPPED,
++};
++
++enum mad_pmic_status {
++ PMIC_UNINIT = 1,
++ PMIC_INIT,
++};
++enum _widget_ctrl {
++ OUTPUT_SEL = 1,
++ INPUT_SEL,
++ PLAYBACK_VOL,
++ PLAYBACK_MUTE,
++ CAPTURE_VOL,
++ CAPTURE_MUTE,
++ MASTER_MUTE
++};
++
++void period_elapsed(void *mad_substream);
++int snd_intelmad_alloc_stream(struct snd_pcm_substream *substream);
++int snd_intelmad_init_stream(struct snd_pcm_substream *substream);
++
++int sst_sc_reg_access(struct sc_reg_access *sc_access,
++ int type, int num_val);
++#define CPU_CHIP_LINCROFT 1 /* System running lincroft */
++#define CPU_CHIP_PENWELL 2 /* System running penwell */
++
++extern struct snd_control_val intelmad_ctrl_val[];
++extern struct snd_kcontrol_new snd_intelmad_controls_mrst[];
++extern struct snd_kcontrol_new snd_intelmad_controls_mfld[];
++extern struct snd_pmic_ops *intelmad_vendor_ops[];
++
++#endif /* __INTELMID_H */
+--- /dev/null
++++ b/sound/pci/sst/intelmid_ctrl.c
+@@ -0,0 +1,629 @@
++/*
++ * intelmid_ctrl.c - Intel Sound card driver for MID
++ *
++ * Copyright (C) 2008-10 Intel Corp
++ * Authors: Harsha Priya <priya.harsha@intel.com>
++ * Vinod Koul <vinod.koul@intel.com>
++ * Dharageswari R <dharageswari.r@intel.com>
++ * KP Jeeja <jeeja.kp@intel.com>
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ * ALSA driver handling mixer controls for Intel MAD chipset
++ */
++#include <sound/core.h>
++#include <sound/control.h>
++#include <sound/jack.h>
++#include <sound/intel_sst.h>
++#include <sound/intel_sst_ioctl.h>
++#include "intelmid_snd_control.h"
++#include "intelmid.h"
++
++static char *out_names_mrst[] = {"Headphones",
++ "Internal speakers"};
++static char *in_names_mrst[] = {"AMIC",
++ "DMIC",
++ "HS_MIC"};
++static char *out_names_mfld[] = {"Headset ",
++ "EarPiece "};
++static char *in_names_mfld[] = {"AMIC",
++ "DMIC"};
++
++struct snd_control_val intelmad_ctrl_val[MAX_VENDORS] = {
++ {
++ .playback_vol_max = 63,
++ .playback_vol_min = 0,
++ .capture_vol_max = 63,
++ .capture_vol_min = 0,
++ },
++ {
++ .playback_vol_max = 0,
++ .playback_vol_min = -31,
++ .capture_vol_max = 0,
++ .capture_vol_min = -20,
++ },
++ {
++ .playback_vol_max = 0,
++ .playback_vol_min = -126,
++ .capture_vol_max = 0,
++ .capture_vol_min = -31,
++ },
++};
++
++/* control path functionalities */
++
++static inline int snd_intelmad_volume_info(struct snd_ctl_elem_info *uinfo,
++ int control_type, int max, int min)
++{
++ WARN_ON(!uinfo);
++
++ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
++ uinfo->count = control_type;
++ uinfo->value.integer.min = min;
++ uinfo->value.integer.max = max;
++ return 0;
++}
++
++/**
++* snd_intelmad_mute_info - provides information about the mute controls
++*
++* @kcontrol: pointer to the control
++* @uinfo: pointer to the structure where the control's info need
++* to be filled
++*
++* This function is called when a mixer application requests for control's info
++*/
++static int snd_intelmad_mute_info(struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_info *uinfo)
++{
++ WARN_ON(!uinfo);
++ WARN_ON(!kcontrol);
++
++ /* set up the mute as a boolean mono control with min-max values */
++ uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
++ uinfo->count = MONO_CNTL;
++ uinfo->value.integer.min = MIN_MUTE;
++ uinfo->value.integer.max = MAX_MUTE;
++ return 0;
++}
++
++/**
++* snd_intelmad_capture_volume_info - provides info about the volume control
++*
++* @kcontrol: pointer to the control
++* @uinfo: pointer to the structure where the control's info need
++* to be filled
++*
++* This function is called when a mixer application requests for control's info
++*/
++static int snd_intelmad_capture_volume_info(struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_info *uinfo)
++{
++ snd_intelmad_volume_info(uinfo, MONO_CNTL,
++ intelmad_ctrl_val[sst_card_vendor_id].capture_vol_max,
++ intelmad_ctrl_val[sst_card_vendor_id].capture_vol_min);
++ return 0;
++}
++
++/**
++* snd_intelmad_playback_volume_info - provides info about the volume control
++*
++* @kcontrol: pointer to the control
++* @uinfo: pointer to the structure where the control's info need
++* to be filled
++*
++* This function is called when a mixer application requests for control's info
++*/
++static int snd_intelmad_playback_volume_info(struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_info *uinfo)
++{
++ snd_intelmad_volume_info(uinfo, STEREO_CNTL,
++ intelmad_ctrl_val[sst_card_vendor_id].playback_vol_max,
++ intelmad_ctrl_val[sst_card_vendor_id].playback_vol_min);
++ return 0;
++}
++
++/**
++* snd_intelmad_device_info_mrst - provides information about the devices available
++*
++* @kcontrol: pointer to the control
++* @uinfo: pointer to the structure where the devices's info need
++* to be filled
++*
++* This function is called when a mixer application requests for device's info
++*/
++static int snd_intelmad_device_info_mrst(struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_info *uinfo)
++{
++
++ WARN_ON(!kcontrol);
++ WARN_ON(!uinfo);
++
++ /* setup device select as drop down controls with different values */
++ if (kcontrol->id.numid == OUTPUT_SEL)
++ uinfo->value.enumerated.items = ARRAY_SIZE(out_names_mrst);
++ else
++ uinfo->value.enumerated.items = ARRAY_SIZE(in_names_mrst);
++ uinfo->count = MONO_CNTL;
++ uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
++
++ if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
++ uinfo->value.enumerated.item = 1;
++ if (kcontrol->id.numid == OUTPUT_SEL)
++ strncpy(uinfo->value.enumerated.name,
++ out_names_mrst[uinfo->value.enumerated.item],
++ sizeof(uinfo->value.enumerated.name)-1);
++ else
++ strncpy(uinfo->value.enumerated.name,
++ in_names_mrst[uinfo->value.enumerated.item],
++ sizeof(uinfo->value.enumerated.name)-1);
++ return 0;
++}
++
++static int snd_intelmad_device_info_mfld(struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_info *uinfo)
++{
++ WARN_ON(!kcontrol);
++ WARN_ON(!uinfo);
++ /* setup device select as drop down controls with different values */
++ if (kcontrol->id.numid == OUTPUT_SEL)
++ uinfo->value.enumerated.items = ARRAY_SIZE(out_names_mfld);
++ else
++ uinfo->value.enumerated.items = ARRAY_SIZE(in_names_mfld);
++ uinfo->count = MONO_CNTL;
++ uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
++
++ if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
++ uinfo->value.enumerated.item = 1;
++ if (kcontrol->id.numid == OUTPUT_SEL)
++ strncpy(uinfo->value.enumerated.name,
++ out_names_mfld[uinfo->value.enumerated.item],
++ sizeof(uinfo->value.enumerated.name)-1);
++ else
++ strncpy(uinfo->value.enumerated.name,
++ in_names_mfld[uinfo->value.enumerated.item],
++ sizeof(uinfo->value.enumerated.name)-1);
++ return 0;
++}
++
++/**
++* snd_intelmad_volume_get - gets the current volume for the control
++*
++* @kcontrol: pointer to the control
++* @uval: pointer to the structure where the control's info need
++* to be filled
++*
++* This function is called when .get function of a control is invoked from app
++*/
++static int snd_intelmad_volume_get(struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_value *uval)
++{
++ int ret_val = 0, cntl_list[2] = {0,};
++ int value = 0;
++ struct snd_intelmad *intelmaddata;
++ struct snd_pmic_ops *scard_ops;
++
++ pr_debug("sst: snd_intelmad_volume_get called\n");
++
++ WARN_ON(!uval);
++ WARN_ON(!kcontrol);
++
++ intelmaddata = kcontrol->private_data;
++
++ WARN_ON(!intelmaddata->sstdrv_ops);
++
++ scard_ops = intelmaddata->sstdrv_ops->scard_ops;
++
++ WARN_ON(!scard_ops);
++
++ switch (kcontrol->id.numid) {
++ case PLAYBACK_VOL:
++ cntl_list[0] = PMIC_SND_RIGHT_PB_VOL;
++ cntl_list[1] = PMIC_SND_LEFT_PB_VOL;
++ break;
++
++ case CAPTURE_VOL:
++ cntl_list[0] = PMIC_SND_CAPTURE_VOL;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ ret_val = scard_ops->get_vol(cntl_list[0], &value);
++ uval->value.integer.value[0] = value;
++
++ if (ret_val)
++ return ret_val;
++
++ if (kcontrol->id.numid == PLAYBACK_VOL) {
++ ret_val = scard_ops->get_vol(cntl_list[1], &value);
++ uval->value.integer.value[1] = value;
++ }
++ return ret_val;
++}
++
++/**
++* snd_intelmad_mute_get - gets the current mute status for the control
++*
++* @kcontrol: pointer to the control
++* @uval: pointer to the structure where the control's info need
++* to be filled
++*
++* This function is called when .get function of a control is invoked from app
++*/
++static int snd_intelmad_mute_get(struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_value *uval)
++{
++
++ int cntl_list = 0, ret_val = 0;
++ u8 value = 0;
++ struct snd_intelmad *intelmaddata;
++ struct snd_pmic_ops *scard_ops;
++
++ pr_debug("sst: Mute_get called\n");
++
++ WARN_ON(!uval);
++ WARN_ON(!kcontrol);
++
++ intelmaddata = kcontrol->private_data;
++
++ WARN_ON(!intelmaddata->sstdrv_ops);
++
++ scard_ops = intelmaddata->sstdrv_ops->scard_ops;
++
++ WARN_ON(!scard_ops);
++
++ switch (kcontrol->id.numid) {
++ case PLAYBACK_MUTE:
++ if (intelmaddata->output_sel == STEREO_HEADPHONE)
++ cntl_list = PMIC_SND_LEFT_HP_MUTE;
++ else if ((intelmaddata->output_sel == INTERNAL_SPKR) ||
++ (intelmaddata->output_sel == MONO_EARPIECE))
++ cntl_list = PMIC_SND_LEFT_SPEAKER_MUTE;
++ break;
++
++ case CAPTURE_MUTE:
++ if (intelmaddata->input_sel == DMIC)
++ cntl_list = PMIC_SND_DMIC_MUTE;
++ else if (intelmaddata->input_sel == AMIC)
++ cntl_list = PMIC_SND_AMIC_MUTE;
++ else if (intelmaddata->input_sel == HS_MIC)
++ cntl_list = PMIC_SND_HP_MIC_MUTE;
++ break;
++ case MASTER_MUTE:
++ uval->value.integer.value[0] = intelmaddata->master_mute;
++ return 0;
++ default:
++ return -EINVAL;
++ }
++
++ ret_val = scard_ops->get_mute(cntl_list, &value);
++ uval->value.integer.value[0] = value;
++ return ret_val;
++}
++
++/**
++* snd_intelmad_volume_set - sets the volume control's info
++*
++* @kcontrol: pointer to the control
++* @uval: pointer to the structure where the control's info is
++* available to be set
++*
++* This function is called when .set function of a control is invoked from app
++*/
++static int snd_intelmad_volume_set(struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_value *uval)
++{
++
++ int ret_val, cntl_list[2] = {0,};
++ struct snd_intelmad *intelmaddata;
++ struct snd_pmic_ops *scard_ops;
++
++ pr_debug("sst: volume set called:%ld %ld\n",
++ uval->value.integer.value[0],
++ uval->value.integer.value[1]);
++
++ WARN_ON(!uval);
++ WARN_ON(!kcontrol);
++
++ intelmaddata = kcontrol->private_data;
++
++ WARN_ON(!intelmaddata->sstdrv_ops);
++
++ scard_ops = intelmaddata->sstdrv_ops->scard_ops;
++
++ WARN_ON(!scard_ops);
++
++ switch (kcontrol->id.numid) {
++ case PLAYBACK_VOL:
++ cntl_list[0] = PMIC_SND_LEFT_PB_VOL;
++ cntl_list[1] = PMIC_SND_RIGHT_PB_VOL;
++ break;
++
++ case CAPTURE_VOL:
++ cntl_list[0] = PMIC_SND_CAPTURE_VOL;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ ret_val = scard_ops->set_vol(cntl_list[0],
++ uval->value.integer.value[0]);
++ if (ret_val)
++ return ret_val;
++
++ if (kcontrol->id.numid == PLAYBACK_VOL)
++ ret_val = scard_ops->set_vol(cntl_list[1],
++ uval->value.integer.value[1]);
++ return ret_val;
++}
++
++/**
++* snd_intelmad_mute_set - sets the mute control's info
++*
++* @kcontrol: pointer to the control
++* @uval: pointer to the structure where the control's info is
++* available to be set
++*
++* This function is called when .set function of a control is invoked from app
++*/
++static int snd_intelmad_mute_set(struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_value *uval)
++{
++ int cntl_list[2] = {0,}, ret_val;
++ struct snd_intelmad *intelmaddata;
++ struct snd_pmic_ops *scard_ops;
++
++ pr_debug("sst: snd_intelmad_mute_set called\n");
++
++ WARN_ON(!uval);
++ WARN_ON(!kcontrol);
++
++ intelmaddata = kcontrol->private_data;
++
++ WARN_ON(!intelmaddata->sstdrv_ops);
++
++ scard_ops = intelmaddata->sstdrv_ops->scard_ops;
++
++ WARN_ON(!scard_ops);
++
++ kcontrol->private_value = uval->value.integer.value[0];
++
++ switch (kcontrol->id.numid) {
++ case PLAYBACK_MUTE:
++ if (intelmaddata->output_sel == STEREO_HEADPHONE) {
++ cntl_list[0] = PMIC_SND_LEFT_HP_MUTE;
++ cntl_list[1] = PMIC_SND_RIGHT_HP_MUTE;
++ } else if ((intelmaddata->output_sel == INTERNAL_SPKR) ||
++ (intelmaddata->output_sel == MONO_EARPIECE)) {
++ cntl_list[0] = PMIC_SND_LEFT_SPEAKER_MUTE;
++ cntl_list[1] = PMIC_SND_RIGHT_SPEAKER_MUTE;
++ }
++ break;
++
++ case CAPTURE_MUTE:/*based on sel device mute the i/p dev*/
++ if (intelmaddata->input_sel == DMIC)
++ cntl_list[0] = PMIC_SND_DMIC_MUTE;
++ else if (intelmaddata->input_sel == AMIC)
++ cntl_list[0] = PMIC_SND_AMIC_MUTE;
++ else if (intelmaddata->input_sel == HS_MIC)
++ cntl_list[0] = PMIC_SND_HP_MIC_MUTE;
++ break;
++ case MASTER_MUTE:
++ cntl_list[0] = PMIC_SND_MUTE_ALL;
++ intelmaddata->master_mute = uval->value.integer.value[0];
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ ret_val = scard_ops->set_mute(cntl_list[0],
++ uval->value.integer.value[0]);
++ if (ret_val)
++ return ret_val;
++
++ if (kcontrol->id.numid == PLAYBACK_MUTE)
++ ret_val = scard_ops->set_mute(cntl_list[1],
++ uval->value.integer.value[0]);
++ return ret_val;
++}
++
++/**
++* snd_intelmad_device_get - get the device select control's info
++*
++* @kcontrol: pointer to the control
++* @uval: pointer to the structure where the control's info is
++* to be filled
++*
++* This function is called when .get function of a control is invoked from app
++*/
++static int snd_intelmad_device_get(struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_value *uval)
++{
++ struct snd_intelmad *intelmaddata;
++ struct snd_pmic_ops *scard_ops;
++ pr_debug("sst: device_get called\n");
++
++ WARN_ON(!uval);
++ WARN_ON(!kcontrol);
++
++ intelmaddata = kcontrol->private_data;
++ if (intelmaddata->cpu_id == CPU_CHIP_PENWELL) {
++ scard_ops = intelmaddata->sstdrv_ops->scard_ops;
++ if (kcontrol->id.numid == OUTPUT_SEL)
++ uval->value.enumerated.item[0] =
++ scard_ops->output_dev_id;
++ else if (kcontrol->id.numid == INPUT_SEL)
++ uval->value.enumerated.item[0] =
++ scard_ops->input_dev_id;
++ else
++ return -EINVAL;
++ } else
++ uval->value.enumerated.item[0] = kcontrol->private_value;
++ return 0;
++}
++
++/**
++* snd_intelmad_device_set - set the device select control's info
++*
++* @kcontrol: pointer to the control
++* @uval: pointer to the structure where the control's info is
++* available to be set
++*
++* This function is called when .set function of a control is invoked from app
++*/
++static int snd_intelmad_device_set(struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_value *uval)
++{
++ struct snd_intelmad *intelmaddata;
++ struct snd_pmic_ops *scard_ops;
++ int ret_val = 0, vendor, status;
++
++ pr_debug("sst: snd_intelmad_device_set called\n");
++
++ WARN_ON(!uval);
++ WARN_ON(!kcontrol);
++ status = -1;
++
++ intelmaddata = kcontrol->private_data;
++
++ WARN_ON(!intelmaddata->sstdrv_ops);
++
++ scard_ops = intelmaddata->sstdrv_ops->scard_ops;
++
++ WARN_ON(!scard_ops);
++
++ /* store value with driver */
++ kcontrol->private_value = uval->value.enumerated.item[0];
++
++ switch (kcontrol->id.numid) {
++ case OUTPUT_SEL:
++ ret_val = scard_ops->set_output_dev(
++ uval->value.enumerated.item[0]);
++ intelmaddata->output_sel = uval->value.enumerated.item[0];
++ break;
++ case INPUT_SEL:
++ vendor = intelmaddata->sstdrv_ops->vendor_id;
++ if ((vendor == SND_MX) || (vendor == SND_FS)) {
++ if (uval->value.enumerated.item[0] == HS_MIC) {
++ status = 1;
++ intelmaddata->sstdrv_ops->
++ control_set(SST_ENABLE_RX_TIME_SLOT, &status);
++ } else {
++ status = 0;
++ intelmaddata->sstdrv_ops->
++ control_set(SST_ENABLE_RX_TIME_SLOT, &status);
++ }
++ }
++ ret_val = scard_ops->set_input_dev(
++ uval->value.enumerated.item[0]);
++ intelmaddata->input_sel = uval->value.enumerated.item[0];
++ break;
++ default:
++ return -EINVAL;
++ }
++ kcontrol->private_value = uval->value.enumerated.item[0];
++ return ret_val;
++}
++
++struct snd_kcontrol_new snd_intelmad_controls_mrst[MAX_CTRL] __devinitdata = {
++{
++ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++ .name = "PCM Playback Source",
++ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
++ .info = snd_intelmad_device_info_mrst,
++ .get = snd_intelmad_device_get,
++ .put = snd_intelmad_device_set,
++ .private_value = 0,
++},
++{
++ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++ .name = "PCM Capture Source",
++ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
++ .info = snd_intelmad_device_info_mrst,
++ .get = snd_intelmad_device_get,
++ .put = snd_intelmad_device_set,
++ .private_value = 0,
++},
++{
++ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++ .name = "PCM Playback Volume",
++ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
++ .info = snd_intelmad_playback_volume_info,
++ .get = snd_intelmad_volume_get,
++ .put = snd_intelmad_volume_set,
++ .private_value = 0,
++},
++{
++ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++ .name = "PCM Playback Switch",
++ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
++ .info = snd_intelmad_mute_info,
++ .get = snd_intelmad_mute_get,
++ .put = snd_intelmad_mute_set,
++ .private_value = 0,
++},
++{
++ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++ .name = "PCM Capture Volume",
++ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
++ .info = snd_intelmad_capture_volume_info,
++ .get = snd_intelmad_volume_get,
++ .put = snd_intelmad_volume_set,
++ .private_value = 0,
++},
++{
++ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++ .name = "PCM Capture Switch",
++ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
++ .info = snd_intelmad_mute_info,
++ .get = snd_intelmad_mute_get,
++ .put = snd_intelmad_mute_set,
++ .private_value = 0,
++},
++{
++ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++ .name = "Master Playback Switch",
++ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
++ .info = snd_intelmad_mute_info,
++ .get = snd_intelmad_mute_get,
++ .put = snd_intelmad_mute_set,
++ .private_value = 0,
++},
++};
++
++struct snd_kcontrol_new
++snd_intelmad_controls_mfld[MAX_CTRL_MFLD] __devinitdata = {
++{
++ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++ .name = "PCM Playback Source",
++ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
++ .info = snd_intelmad_device_info_mfld,
++ .get = snd_intelmad_device_get,
++ .put = snd_intelmad_device_set,
++ .private_value = 0,
++},
++{
++ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++ .name = "PCM Capture Source",
++ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
++ .info = snd_intelmad_device_info_mfld,
++ .get = snd_intelmad_device_get,
++ .put = snd_intelmad_device_set,
++ .private_value = 0,
++},
++};
++
+--- /dev/null
++++ b/sound/pci/sst/intelmid_msic_control.c
+@@ -0,0 +1,410 @@
++/*
++ * intelmid_vm_control.c - Intel Sound card driver for MID
++ *
++ * Copyright (C) 2010 Intel Corp
++ * Authors: Vinod Koul <vinod.koul@intel.com>
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This file contains the control operations of msic vendors
++ */
++
++#include <linux/pci.h>
++#include <linux/file.h>
++#include <sound/intel_sst.h>
++#include <sound/intel_sst_ioctl.h>
++#include "intelmid_snd_control.h"
++
++static int msic_init_card(void)
++{
++ struct sc_reg_access sc_access[] = {
++ /* dmic configuration */
++ {0x241, 0x85, 0},
++ {0x242, 0x02, 0},
++ /* audio paths config */
++ {0x24C, 0x10, 0},
++ {0x24D, 0x32, 0},
++ /* PCM2 interface slots */
++ /* preconfigured slots for 0-5 both tx, rx */
++ {0x272, 0x10, 0},
++ {0x273, 0x32, 0},
++ {0x274, 0xFF, 0},
++ {0x275, 0x10, 0},
++ {0x276, 0x32, 0},
++ {0x277, 0x54, 0},
++ /*Sinc5 decimator*/
++ {0x24E, 0x28, 0},
++ /*TI vibra w/a settings*/
++ {0x384, 0x80, 0},
++ {0x385, 0x80, 0},
++ /*vibra settings*/
++ {0x267, 0x00, 0},
++ {0x26A, 0x10, 0},
++ {0x261, 0x00, 0},
++ {0x264, 0x10, 0},
++ /* pcm port setting */
++ {0x278, 0x00, 0},
++ {0x27B, 0x01, 0},
++ {0x27C, 0x0a, 0},
++ /* Set vol HSLRVOLCTRL, IHFVOL */
++ {0x259, 0x04, 0},
++ {0x25A, 0x04, 0},
++ {0x25B, 0x04, 0},
++ {0x25C, 0x04, 0},
++ /* HSEPRXCTRL Enable the headset left and right FIR filters */
++ {0x250, 0x30, 0},
++ /* HSMIXER */
++ {0x256, 0x11, 0},
++ /* amic configuration */
++ {0x249, 0x09, 0x0},
++ {0x24A, 0x09, 0x0},
++ /* unmask ocaudio/accdet interrupts */
++ {0x1d, 0x00, 0x00},
++ {0x1e, 0x00, 0x00},
++ };
++ snd_msic_ops.card_status = SND_CARD_INIT_DONE;
++ sst_sc_reg_access(sc_access, PMIC_WRITE, 30);
++ snd_msic_ops.pb_on = 0;
++ snd_msic_ops.cap_on = 0;
++ snd_msic_ops.input_dev_id = DMIC; /*def dev*/
++ snd_msic_ops.output_dev_id = STEREO_HEADPHONE;
++ pr_debug("sst: msic init complete!!\n");
++ return 0;
++}
++
++static int msic_power_up_pb(unsigned int device)
++{
++ struct sc_reg_access sc_access1[] = {
++ /* turn on the audio power supplies */
++ {0x0DB, 0x05, 0},
++ /* VHSP */
++ {0x0DC, 0xFF, 0},
++ /* VHSN */
++ {0x0DD, 0x3F, 0},
++ /* turn on PLL */
++ {0x240, 0x21, 0},
++ };
++ struct sc_reg_access sc_access2[] = {
++ /* disable driver */
++ {0x25D, 0x0, 0x43},
++ /* DAC CONFIG ; both HP, LP on */
++ {0x257, 0x03, 0x03},
++ };
++ struct sc_reg_access sc_access3[] = {
++ /* HSEPRXCTRL Enable the headset left and right FIR filters */
++ {0x250, 0x30, 0},
++ /* HSMIXER */
++ {0x256, 0x11, 0},
++ };
++ struct sc_reg_access sc_access4[] = {
++ /* enable driver */
++ {0x25D, 0x3, 0x3},
++ /* unmute the headset */
++ { 0x259, 0x80, 0x80},
++ { 0x25A, 0x80, 0x80},
++ };
++ struct sc_reg_access sc_access_vihf[] = {
++ /* VIHF ON */
++ {0x0C9, 0x2D, 0x00},
++ };
++ struct sc_reg_access sc_access22[] = {
++ /* disable driver */
++ {0x25D, 0x00, 0x0C},
++ /*Filer DAC enable*/
++ {0x251, 0x03, 0x03},
++ {0x257, 0x0C, 0x0C},
++ };
++ struct sc_reg_access sc_access32[] = {
++ /*enable drv*/
++ {0x25D, 0x0C, 0x0c},
++ };
++ struct sc_reg_access sc_access42[] = {
++ /*unmute headset*/
++ {0x25B, 0x80, 0x80},
++ {0x25C, 0x80, 0x80},
++ };
++ struct sc_reg_access sc_access23[] = {
++ /* disable driver */
++ {0x25D, 0x0, 0x43},
++ /* DAC CONFIG ; both HP, LP on */
++ {0x257, 0x03, 0x03},
++ };
++ struct sc_reg_access sc_access43[] = {
++ /* enable driver */
++ {0x25D, 0x40, 0x40},
++ /* unmute the headset */
++ { 0x259, 0x80, 0x80},
++ { 0x25A, 0x80, 0x80},
++ };
++ struct sc_reg_access sc_access_vib[] = {
++ /* enable driver, ADC */
++ {0x25D, 0x10, 0x10},
++ {0x264, 0x02, 0x02},
++ };
++ struct sc_reg_access sc_access_hap[] = {
++ /* enable driver, ADC */
++ {0x25D, 0x20, 0x20},
++ {0x26A, 0x02, 0x02},
++ };
++ struct sc_reg_access sc_access_pcm2[] = {
++ /* enable pcm 2 */
++ {0x27C, 0x1, 0x1},
++ };
++ int retval = 0;
++
++ if (snd_msic_ops.card_status == SND_CARD_UN_INIT) {
++ retval = msic_init_card();
++ if (retval)
++ return retval;
++ }
++
++ pr_debug("sst: powering up pb.... Device %d\n", device);
++ sst_sc_reg_access(sc_access1, PMIC_WRITE, 4);
++ switch (device) {
++ case SND_SST_DEVICE_HEADSET:
++ if (snd_msic_ops.output_dev_id == STEREO_HEADPHONE) {
++ sst_sc_reg_access(sc_access2, PMIC_READ_MODIFY, 2);
++ sst_sc_reg_access(sc_access3, PMIC_WRITE, 2);
++ sst_sc_reg_access(sc_access4, PMIC_READ_MODIFY, 3);
++ } else {
++ sst_sc_reg_access(sc_access23, PMIC_READ_MODIFY, 2);
++ sst_sc_reg_access(sc_access3, PMIC_WRITE, 2);
++ sst_sc_reg_access(sc_access43, PMIC_READ_MODIFY, 3);
++ }
++ snd_msic_ops.pb_on = 1;
++ break;
++
++ case SND_SST_DEVICE_IHF:
++ sst_sc_reg_access(sc_access_vihf, PMIC_WRITE, 1);
++ sst_sc_reg_access(sc_access22, PMIC_READ_MODIFY, 3);
++ sst_sc_reg_access(sc_access32, PMIC_READ_MODIFY, 1);
++ sst_sc_reg_access(sc_access42, PMIC_READ_MODIFY, 2);
++ break;
++
++ case SND_SST_DEVICE_VIBRA:
++ sst_sc_reg_access(sc_access_vib, PMIC_READ_MODIFY, 2);
++ break;
++
++ case SND_SST_DEVICE_HAPTIC:
++ sst_sc_reg_access(sc_access_hap, PMIC_READ_MODIFY, 2);
++ break;
++
++ default:
++ pr_warn("sst: Wrong Device %d, selected %d\n",
++ device, snd_msic_ops.output_dev_id);
++ }
++ return sst_sc_reg_access(sc_access_pcm2, PMIC_READ_MODIFY, 1);
++}
++
++static int msic_power_up_cp(unsigned int device)
++{
++ struct sc_reg_access sc_access[] = {
++ /* turn on the audio power supplies */
++ {0x0DB, 0x05, 0},
++ /* VHSP */
++ {0x0DC, 0xFF, 0},
++ /* VHSN */
++ {0x0DD, 0x3F, 0},
++ /* turn on PLL */
++ {0x240, 0x21, 0},
++
++ /* Turn on DMIC supply */
++ {0x247, 0xA0, 0x0},
++ {0x240, 0x21, 0x0},
++ {0x24C, 0x10, 0x0},
++
++ /* mic demux enable */
++ {0x245, 0x3F, 0x0},
++ {0x246, 0x7, 0x0},
++
++ };
++ struct sc_reg_access sc_access_amic[] = {
++ /* turn on the audio power supplies */
++ {0x0DB, 0x05, 0},
++ /* VHSP */
++ {0x0DC, 0xFF, 0},
++ /* VHSN */
++ {0x0DD, 0x3F, 0},
++ /* turn on PLL */
++ {0x240, 0x21, 0},
++ /*ADC EN*/
++ {0x248, 0x05, 0x0},
++ {0x24C, 0x76, 0x0},
++ /*MIC EN*/
++ {0x249, 0x09, 0x0},
++ {0x24A, 0x09, 0x0},
++ /* Turn on AMIC supply */
++ {0x247, 0xFC, 0x0},
++
++ };
++ struct sc_reg_access sc_access2[] = {
++ /* enable pcm 2 */
++ {0x27C, 0x1, 0x1},
++ };
++ struct sc_reg_access sc_access3[] = {
++ /*wait for mic to stabalize before turning on audio channels*/
++ {0x24F, 0x3C, 0x0},
++ };
++ int retval = 0;
++
++ if (snd_msic_ops.card_status == SND_CARD_UN_INIT) {
++ retval = msic_init_card();
++ if (retval)
++ return retval;
++ }
++
++ pr_debug("sst: powering up cp....%d\n", snd_msic_ops.input_dev_id);
++ sst_sc_reg_access(sc_access2, PMIC_READ_MODIFY, 1);
++ snd_msic_ops.cap_on = 1;
++ if (snd_msic_ops.input_dev_id == AMIC)
++ sst_sc_reg_access(sc_access_amic, PMIC_WRITE, 9);
++ else
++ sst_sc_reg_access(sc_access, PMIC_WRITE, 9);
++ return sst_sc_reg_access(sc_access3, PMIC_WRITE, 1);
++
++}
++
++static int msic_power_down(void)
++{
++ int retval = 0;
++
++ pr_debug("sst: powering dn msic\n");
++ snd_msic_ops.pb_on = 0;
++ snd_msic_ops.cap_on = 0;
++ return retval;
++}
++
++static int msic_power_down_pb(void)
++{
++ int retval = 0;
++
++ pr_debug("sst: powering dn pb....\n");
++ snd_msic_ops.pb_on = 0;
++ return retval;
++}
++
++static int msic_power_down_cp(void)
++{
++ int retval = 0;
++
++ pr_debug("sst: powering dn cp....\n");
++ snd_msic_ops.cap_on = 0;
++ return retval;
++}
++
++static int msic_set_selected_output_dev(u8 value)
++{
++ int retval = 0;
++
++ pr_debug("sst: msic set selected output:%d\n", value);
++ snd_msic_ops.output_dev_id = value;
++ if (snd_msic_ops.pb_on)
++ msic_power_up_pb(SND_SST_DEVICE_HEADSET);
++ return retval;
++}
++
++static int msic_set_selected_input_dev(u8 value)
++{
++
++ struct sc_reg_access sc_access_dmic[] = {
++ {0x24C, 0x10, 0x0},
++ };
++ struct sc_reg_access sc_access_amic[] = {
++ {0x24C, 0x76, 0x0},
++
++ };
++ int retval = 0;
++
++ pr_debug("sst: msic_set_selected_input_dev:%d\n", value);
++ snd_msic_ops.input_dev_id = value;
++ switch (value) {
++ case AMIC:
++ pr_debug("sst: Selecting AMIC1\n");
++ retval = sst_sc_reg_access(sc_access_amic, PMIC_WRITE, 1);
++ break;
++ case DMIC:
++ pr_debug("sst: Selecting DMIC1\n");
++ retval = sst_sc_reg_access(sc_access_dmic, PMIC_WRITE, 1);
++ break;
++ default:
++ return -EINVAL;
++
++ }
++ if (snd_msic_ops.cap_on)
++ retval = msic_power_up_cp(SND_SST_DEVICE_CAPTURE);
++ return retval;
++}
++
++static int msic_set_pcm_voice_params(void)
++{
++ return 0;
++}
++
++static int msic_set_pcm_audio_params(int sfreq, int word_size, int num_channel)
++{
++ return 0;
++}
++
++static int msic_set_audio_port(int status)
++{
++ return 0;
++}
++
++static int msic_set_voice_port(int status)
++{
++ return 0;
++}
++
++static int msic_set_mute(int dev_id, u8 value)
++{
++ return 0;
++}
++
++static int msic_set_vol(int dev_id, int value)
++{
++ return 0;
++}
++
++static int msic_get_mute(int dev_id, u8 *value)
++{
++ return 0;
++}
++
++static int msic_get_vol(int dev_id, int *value)
++{
++ return 0;
++}
++
++struct snd_pmic_ops snd_msic_ops = {
++ .set_input_dev = msic_set_selected_input_dev,
++ .set_output_dev = msic_set_selected_output_dev,
++ .set_mute = msic_set_mute,
++ .get_mute = msic_get_mute,
++ .set_vol = msic_set_vol,
++ .get_vol = msic_get_vol,
++ .init_card = msic_init_card,
++ .set_pcm_audio_params = msic_set_pcm_audio_params,
++ .set_pcm_voice_params = msic_set_pcm_voice_params,
++ .set_voice_port = msic_set_voice_port,
++ .set_audio_port = msic_set_audio_port,
++ .power_up_pmic_pb = msic_power_up_pb,
++ .power_up_pmic_cp = msic_power_up_cp,
++ .power_down_pmic_pb = msic_power_down_pb,
++ .power_down_pmic_cp = msic_power_down_cp,
++ .power_down_pmic = msic_power_down,
++};
+--- /dev/null
++++ b/sound/pci/sst/intelmid_pvt.c
+@@ -0,0 +1,174 @@
++/*
++ * intelmid_pvt.h - Intel Sound card driver for MID
++ *
++ * Copyright (C) 2008-10 Intel Corp
++ * Authors: Harsha Priya <priya.harsha@intel.com>
++ * Vinod Koul <vinod.koul@intel.com>
++ * KP Jeeja <jeeja.kp@intel.com>
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ * ALSA driver for Intel MID sound card chipset - holding private functions
++ */
++#include <linux/io.h>
++#include <asm/intel_scu_ipc.h>
++#include <sound/core.h>
++#include <sound/control.h>
++#include <sound/pcm.h>
++#include <sound/jack.h>
++#include <sound/intel_sst.h>
++#include <sound/intel_sst_ioctl.h>
++#include "intelmid_snd_control.h"
++#include "intelmid.h"
++
++
++void period_elapsed(void *mad_substream)
++{
++ struct snd_pcm_substream *substream = mad_substream;
++ struct mad_stream_pvt *stream;
++
++
++
++ if (!substream || !substream->runtime)
++ return;
++ stream = substream->runtime->private_data;
++ if (!stream)
++ return;
++
++ if (stream->stream_status != RUNNING)
++ return;
++ pr_debug("sst: calling period elapsed\n");
++ snd_pcm_period_elapsed(substream);
++ return;
++}
++
++
++int snd_intelmad_alloc_stream(struct snd_pcm_substream *substream)
++{
++ struct snd_intelmad *intelmaddata = snd_pcm_substream_chip(substream);
++ struct mad_stream_pvt *stream = substream->runtime->private_data;
++ struct snd_sst_stream_params param = {{{0,},},};
++ struct snd_sst_params str_params = {0};
++ int ret_val;
++
++ /* set codec params and inform SST driver the same */
++
++ param.uc.pcm_params.codec = SST_CODEC_TYPE_PCM;
++ param.uc.pcm_params.num_chan = (u8) substream->runtime->channels;
++ param.uc.pcm_params.pcm_wd_sz = substream->runtime->sample_bits;
++ param.uc.pcm_params.reserved = 0;
++ param.uc.pcm_params.sfreq = substream->runtime->rate;
++ param.uc.pcm_params.ring_buffer_size =
++ snd_pcm_lib_buffer_bytes(substream);
++ param.uc.pcm_params.period_count = substream->runtime->period_size;
++ param.uc.pcm_params.ring_buffer_addr =
++ virt_to_phys(substream->runtime->dma_area);
++ pr_debug("sst: period_cnt = %d\n", param.uc.pcm_params.period_count);
++ pr_debug("sst: sfreq= %d, wd_sz = %d\n",
++ param.uc.pcm_params.sfreq, param.uc.pcm_params.pcm_wd_sz);
++
++ str_params.sparams = param;
++ str_params.codec = SST_CODEC_TYPE_PCM;
++
++ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
++ str_params.ops = STREAM_OPS_PLAYBACK;
++ pr_debug("sst: Playbck stream,Device %d\n", stream->device);
++ } else {
++ str_params.ops = STREAM_OPS_CAPTURE;
++ stream->device = SND_SST_DEVICE_CAPTURE;
++ pr_debug("sst: Capture stream,Device %d\n", stream->device);
++ }
++ str_params.device_type = stream->device;
++ ret_val = intelmaddata->sstdrv_ops->control_set(SST_SND_ALLOC,
++ &str_params);
++ pr_debug("sst: SST_SND_PLAY/CAPTURE ret_val = %x\n",
++ ret_val);
++ if (ret_val < 0)
++ return ret_val;
++
++ stream->stream_info.str_id = ret_val;
++ stream->stream_status = INIT;
++ stream->stream_info.buffer_ptr = 0;
++ pr_debug("sst: str id : %d\n", stream->stream_info.str_id);
++
++ return ret_val;
++}
++
++int snd_intelmad_init_stream(struct snd_pcm_substream *substream)
++{
++ struct mad_stream_pvt *stream = substream->runtime->private_data;
++ struct snd_intelmad *intelmaddata = snd_pcm_substream_chip(substream);
++ int ret_val;
++
++ pr_debug("sst: setting buffer ptr param\n");
++ stream->stream_info.period_elapsed = period_elapsed;
++ stream->stream_info.mad_substream = substream;
++ stream->stream_info.buffer_ptr = 0;
++ stream->stream_info.sfreq = substream->runtime->rate;
++ ret_val = intelmaddata->sstdrv_ops->control_set(SST_SND_STREAM_INIT,
++ &stream->stream_info);
++ if (ret_val)
++ pr_err("sst: control_set ret error %d\n", ret_val);
++ return ret_val;
++
++}
++
++
++/**
++ * sst_sc_reg_access - IPC read/write wrapper
++ *
++ * @sc_access: array of data, addresses and mask
++ * @type: operation type
++ * @num_val: number of reg to opertae on
++ *
++ * Reads/writes/read-modify operations on registers accessed through SCU (sound
++ * card and few SST DSP regsisters that are not accissible to IA)
++ */
++int sst_sc_reg_access(struct sc_reg_access *sc_access,
++ int type, int num_val)
++{
++ int i, retval = 0;
++ if (type == PMIC_WRITE) {
++ for (i = 0; i < num_val; i++) {
++ retval = intel_scu_ipc_iowrite8(sc_access[i].reg_addr,
++ sc_access[i].value);
++ if (retval) {
++ pr_err("sst: IPC write failed!!! %d\n", retval);
++ return retval;
++ }
++ }
++ } else if (type == PMIC_READ) {
++ for (i = 0; i < num_val; i++) {
++ retval = intel_scu_ipc_ioread8(sc_access[i].reg_addr,
++ &(sc_access[i].value));
++ if (retval) {
++ pr_err("sst: IPC read failed!!!!!%d\n", retval);
++ return retval;
++ }
++ }
++ } else {
++ for (i = 0; i < num_val; i++) {
++ retval = intel_scu_ipc_update_register(
++ sc_access[i].reg_addr, sc_access[i].value,
++ sc_access[i].mask);
++ if (retval) {
++ pr_err("sst: IPC Modify failed!!!%d\n", retval);
++ return retval;
++ }
++ }
++ }
++ return retval;
++}
+--- /dev/null
++++ b/sound/pci/sst/intelmid_snd_control.h
+@@ -0,0 +1,114 @@
++#ifndef __INTELMID_SND_CTRL_H__
++#define __INTELMID_SND_CTRL_H__
++/*
++ * intelmid_snd_control.h - Intel Sound card driver for MID
++ *
++ * Copyright (C) 2008-10 Intel Corporation
++ * Authors: Vinod Koul <vinod.koul@intel.com>
++ * Harsha Priya <priya.harsha@intel.com>
++ * Dharageswari R <dharageswari.r@intel.com>
++ * KP Jeeja <jeeja.kp@intel.com>
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This file defines all snd control functions
++ */
++
++/*
++Mask bits
++*/
++#define MASK0 0x01 /* 0000 0001 */
++#define MASK1 0x02 /* 0000 0010 */
++#define MASK2 0x04 /* 0000 0100 */
++#define MASK3 0x08 /* 0000 1000 */
++#define MASK4 0x10 /* 0001 0000 */
++#define MASK5 0x20 /* 0010 0000 */
++#define MASK6 0x40 /* 0100 0000 */
++#define MASK7 0x80 /* 1000 0000 */
++/*
++value bits
++*/
++#define VALUE0 0x01 /* 0000 0001 */
++#define VALUE1 0x02 /* 0000 0010 */
++#define VALUE2 0x04 /* 0000 0100 */
++#define VALUE3 0x08 /* 0000 1000 */
++#define VALUE4 0x10 /* 0001 0000 */
++#define VALUE5 0x20 /* 0010 0000 */
++#define VALUE6 0x40 /* 0100 0000 */
++#define VALUE7 0x80 /* 1000 0000 */
++
++#define MUTE 0 /* ALSA Passes 0 for mute */
++#define UNMUTE 1 /* ALSA Passes 1 for unmute */
++
++#define MAX_VOL_PMIC_VENDOR0 0x3f /* max vol in dB for stereo & voice DAC */
++#define MIN_VOL_PMIC_VENDOR0 0 /* min vol in dB for stereo & voice DAC */
++/* Head phone volume control */
++#define MAX_HP_VOL_PMIC_VENDOR1 6 /* max volume in dB for HP */
++#define MIN_HP_VOL_PMIC_VENDOR1 (-84) /* min volume in dB for HP */
++#define MAX_HP_VOL_INDX_PMIC_VENDOR1 40 /* Number of HP volume control values */
++
++/* Mono Earpiece Volume control */
++#define MAX_EP_VOL_PMIC_VENDOR1 0 /* max volume in dB for EP */
++#define MIN_EP_VOL_PMIC_VENDOR1 (-75) /* min volume in dB for EP */
++#define MAX_EP_VOL_INDX_PMIC_VENDOR1 32 /* Number of EP volume control values */
++
++int sst_sc_reg_access(struct sc_reg_access *sc_access,
++ int type, int num_val);
++extern struct snd_pmic_ops snd_pmic_ops_fs;
++extern struct snd_pmic_ops snd_pmic_ops_mx;
++extern struct snd_pmic_ops snd_pmic_ops_nc;
++extern struct snd_pmic_ops snd_msic_ops;
++
++/* device */
++enum SND_INPUT_DEVICE {
++ AMIC,
++ DMIC,
++ HS_MIC,
++ IN_UNDEFINED
++};
++
++enum SND_OUTPUT_DEVICE {
++ STEREO_HEADPHONE,
++ MONO_EARPIECE,
++
++ INTERNAL_SPKR,
++ RECEIVER,
++ OUT_UNDEFINED
++};
++
++enum pmic_controls {
++ PMIC_SND_HP_MIC_MUTE = 0x0001,
++ PMIC_SND_AMIC_MUTE = 0x0002,
++ PMIC_SND_DMIC_MUTE = 0x0003,
++ PMIC_SND_CAPTURE_VOL = 0x0004,
++/* Output controls */
++ PMIC_SND_LEFT_PB_VOL = 0x0010,
++ PMIC_SND_RIGHT_PB_VOL = 0x0011,
++ PMIC_SND_LEFT_HP_MUTE = 0x0012,
++ PMIC_SND_RIGHT_HP_MUTE = 0x0013,
++ PMIC_SND_LEFT_SPEAKER_MUTE = 0x0014,
++ PMIC_SND_RIGHT_SPEAKER_MUTE = 0x0015,
++ PMIC_SND_RECEIVER_VOL = 0x0016,
++ PMIC_SND_RECEIVER_MUTE = 0x0017,
++/* Other controls */
++ PMIC_SND_MUTE_ALL = 0x0020,
++ PMIC_MAX_CONTROLS = 0x0020,
++};
++
++#endif /* __INTELMID_SND_CTRL_H__ */
++
++
+--- /dev/null
++++ b/sound/pci/sst/intelmid_v0_control.c
+@@ -0,0 +1,771 @@
++/*
++ * intel_sst_v0_control.c - Intel SST Driver for audio engine
++ *
++ * Copyright (C) 2008-10 Intel Corporation
++ * Authors: Vinod Koul <vinod.koul@intel.com>
++ * Harsha Priya <priya.harsha@intel.com>
++ * Dharageswari R <dharageswari.r@intel.com>
++ * KP Jeeja <jeeja.kp@intel.com>
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This file contains the control operations of vendor 1
++ */
++
++#include <linux/pci.h>
++#include <linux/file.h>
++#include <sound/intel_sst.h>
++#include "intelmid_snd_control.h"
++
++
++enum _reg_v1 {
++ VOICEPORT1 = 0x180,
++ VOICEPORT2 = 0x181,
++ AUDIOPORT1 = 0x182,
++ AUDIOPORT2 = 0x183,
++ MISCVOICECTRL = 0x184,
++ MISCAUDCTRL = 0x185,
++ DMICCTRL1 = 0x186,
++ AUDIOBIAS = 0x187,
++ MICCTRL = 0x188,
++ MICLICTRL1 = 0x189,
++ MICLICTRL2 = 0x18A,
++ MICLICTRL3 = 0x18B,
++ VOICEDACCTRL1 = 0x18C,
++ STEREOADCCTRL = 0x18D,
++ AUD15 = 0x18E,
++ AUD16 = 0x18F,
++ AUD17 = 0x190,
++ AUD18 = 0x191,
++ RMIXOUTSEL = 0x192,
++ ANALOGLBR = 0x193,
++ ANALOGLBL = 0x194,
++ POWERCTRL1 = 0x195,
++ POWERCTRL2 = 0x196,
++ HEADSETDETECTINT = 0x197,
++ HEADSETDETECTINTMASK = 0x198,
++ TRIMENABLE = 0x199,
++};
++
++int rev_id = 0x20;
++
++/****
++ * fs_init_card - initialize the sound card
++ *
++ * This initilizes the audio paths to know values in case of this sound card
++ */
++static int fs_init_card(void)
++{
++ struct sc_reg_access sc_access[] = {
++ {0x180, 0x00, 0x0},
++ {0x181, 0x00, 0x0},
++ {0x182, 0xF8, 0x0},
++ {0x183, 0x08, 0x0},
++ {0x184, 0x00, 0x0},
++ {0x185, 0x40, 0x0},
++ {0x186, 0x06, 0x0},
++ {0x187, 0x80, 0x0},
++ {0x188, 0x40, 0x0},
++ {0x189, 0x39, 0x0},
++ {0x18a, 0x39, 0x0},
++ {0x18b, 0x1F, 0x0},
++ {0x18c, 0x00, 0x0},
++ {0x18d, 0x00, 0x0},
++ {0x18e, 0x39, 0x0},
++ {0x18f, 0x39, 0x0},
++ {0x190, 0x39, 0x0},
++ {0x191, 0x11, 0x0},
++ {0x192, 0x0E, 0x0},
++ {0x193, 0x00, 0x0},
++ {0x194, 0x00, 0x0},
++ {0x195, 0x00, 0x0},
++ {0x196, 0x7C, 0x0},
++ {0x197, 0x00, 0x0},
++ {0x198, 0x0B, 0x0},
++ {0x199, 0x00, 0x0},
++ {0x037, 0x3F, 0x0},
++ };
++
++ snd_pmic_ops_fs.card_status = SND_CARD_INIT_DONE;
++ snd_pmic_ops_fs.master_mute = UNMUTE;
++ snd_pmic_ops_fs.mute_status = UNMUTE;
++ snd_pmic_ops_fs.num_channel = 2;
++ return sst_sc_reg_access(sc_access, PMIC_WRITE, 27);
++}
++
++static int fs_enable_audiodac(int value)
++{
++ struct sc_reg_access sc_access[3];
++ sc_access[0].reg_addr = AUD16;
++ sc_access[1].reg_addr = AUD17;
++ sc_access[2].reg_addr = AUD15;
++ sc_access[0].mask = sc_access[1].mask = sc_access[2].mask = MASK7;
++
++ if (snd_pmic_ops_fs.mute_status == MUTE)
++ return 0;
++ if (value == MUTE) {
++ sc_access[0].value = sc_access[1].value =
++ sc_access[2].value = 0x80;
++
++ } else {
++ sc_access[0].value = sc_access[1].value =
++ sc_access[2].value = 0x0;
++ }
++ if (snd_pmic_ops_fs.num_channel == 1)
++ sc_access[1].value = sc_access[2].value = 0x80;
++ return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 3);
++
++}
++
++static int fs_power_up_pb(unsigned int port)
++{
++ struct sc_reg_access sc_access[] = {
++ {AUDIOBIAS, 0x00, MASK7},
++ {POWERCTRL1, 0xC6, 0xC6},
++ {POWERCTRL2, 0x30, 0x30},
++
++ };
++ int retval = 0;
++
++ if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
++ retval = fs_init_card();
++ if (retval)
++ return retval;
++ retval = fs_enable_audiodac(MUTE);
++ retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 3);
++
++ if (retval)
++ return retval;
++
++ pr_debug("sst: in fs power up pb\n");
++ return fs_enable_audiodac(UNMUTE);
++}
++
++static int fs_power_down_pb(void)
++{
++ struct sc_reg_access sc_access[] = {
++ {POWERCTRL1, 0x00, 0xC6},
++ {POWERCTRL2, 0x00, 0x30},
++ };
++ int retval = 0;
++
++ if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
++ retval = fs_init_card();
++ if (retval)
++ return retval;
++ retval = fs_enable_audiodac(MUTE);
++ retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
++
++ if (retval)
++ return retval;
++
++ pr_debug("sst: in fsl power down pb\n");
++ return fs_enable_audiodac(UNMUTE);
++}
++
++static int fs_power_up_cp(unsigned int port)
++{
++ struct sc_reg_access sc_access[] = {
++ {POWERCTRL2, 0x32, 0x32}, /*NOTE power up A ADC only as*/
++ {AUDIOBIAS, 0x00, MASK7},
++ /*as turning on V ADC causes noise*/
++ };
++ int retval = 0;
++
++ if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
++ retval = fs_init_card();
++ if (retval)
++ return retval;
++ return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
++}
++
++static int fs_power_down_cp(void)
++{
++ struct sc_reg_access sc_access[] = {
++ {POWERCTRL2, 0x00, 0x03},
++ };
++ int retval = 0;
++
++ if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
++ retval = fs_init_card();
++ if (retval)
++ return retval;
++ return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
++}
++
++static int fs_power_down(void)
++{
++ int retval = 0;
++ struct sc_reg_access sc_access[] = {
++ {AUDIOBIAS, MASK7, MASK7},
++ };
++
++ if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
++ retval = fs_init_card();
++ if (retval)
++ return retval;
++ return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
++}
++
++static int fs_set_pcm_voice_params(void)
++{
++ struct sc_reg_access sc_access[] = {
++ {0x180, 0xA0, 0},
++ {0x181, 0x04, 0},
++ {0x182, 0x0, 0},
++ {0x183, 0x0, 0},
++ {0x184, 0x18, 0},
++ {0x185, 0x40, 0},
++ {0x186, 0x06, 0},
++ {0x187, 0x0, 0},
++ {0x188, 0x10, 0},
++ {0x189, 0x39, 0},
++ {0x18a, 0x39, 0},
++ {0x18b, 0x02, 0},
++ {0x18c, 0x0, 0},
++ {0x18d, 0x0, 0},
++ {0x18e, 0x39, 0},
++ {0x18f, 0x0, 0},
++ {0x190, 0x0, 0},
++ {0x191, 0x20, 0},
++ {0x192, 0x20, 0},
++ {0x193, 0x0, 0},
++ {0x194, 0x0, 0},
++ {0x195, 0x06, 0},
++ {0x196, 0x25, 0},
++ {0x197, 0x0, 0},
++ {0x198, 0xF, 0},
++ {0x199, 0x0, 0},
++ };
++ int retval = 0;
++
++ if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
++ retval = fs_init_card();
++ if (retval)
++ return retval;
++ return sst_sc_reg_access(sc_access, PMIC_WRITE, 26);
++}
++
++static int fs_set_audio_port(int status)
++{
++ struct sc_reg_access sc_access[2];
++ int retval = 0;
++
++ if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
++ retval = fs_init_card();
++ if (retval)
++ return retval;
++ if (status == DEACTIVATE) {
++ /* Deactivate audio port-tristate and power */
++ sc_access[0].value = 0x00;
++ sc_access[0].mask = MASK6|MASK7;
++ sc_access[0].reg_addr = AUDIOPORT1;
++ sc_access[1].value = 0x00;
++ sc_access[1].mask = MASK4|MASK5;
++ sc_access[1].reg_addr = POWERCTRL2;
++ return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
++ } else if (status == ACTIVATE) {
++ /* activate audio port */
++ sc_access[0].value = 0xC0;
++ sc_access[0].mask = MASK6|MASK7;
++ sc_access[0].reg_addr = AUDIOPORT1;
++ sc_access[1].value = 0x30;
++ sc_access[1].mask = MASK4|MASK5;
++ sc_access[1].reg_addr = POWERCTRL2;
++ return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
++ } else
++ return -EINVAL;
++}
++
++static int fs_set_voice_port(int status)
++{
++ struct sc_reg_access sc_access[2];
++ int retval = 0;
++
++ if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
++ retval = fs_init_card();
++ if (retval)
++ return retval;
++ if (status == DEACTIVATE) {
++ /* Deactivate audio port-tristate and power */
++ sc_access[0].value = 0x00;
++ sc_access[0].mask = MASK6|MASK7;
++ sc_access[0].reg_addr = VOICEPORT1;
++ sc_access[1].value = 0x00;
++ sc_access[1].mask = MASK0|MASK1;
++ sc_access[1].reg_addr = POWERCTRL2;
++ return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
++ } else if (status == ACTIVATE) {
++ /* activate audio port */
++ sc_access[0].value = 0xC0;
++ sc_access[0].mask = MASK6|MASK7;
++ sc_access[0].reg_addr = VOICEPORT1;
++ sc_access[1].value = 0x03;
++ sc_access[1].mask = MASK0|MASK1;
++ sc_access[1].reg_addr = POWERCTRL2;
++ return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
++ } else
++ return -EINVAL;
++}
++
++static int fs_set_pcm_audio_params(int sfreq, int word_size, int num_channel)
++{
++ u8 config1 = 0;
++ struct sc_reg_access sc_access[4];
++ int retval = 0, num_value = 0;
++
++ if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
++ retval = fs_init_card();
++ if (retval)
++ return retval;
++ switch (sfreq) {
++ case 8000:
++ config1 = 0x00;
++ break;
++ case 11025:
++ config1 = 0x01;
++ break;
++ case 12000:
++ config1 = 0x02;
++ break;
++ case 16000:
++ config1 = 0x03;
++ break;
++ case 22050:
++ config1 = 0x04;
++ break;
++ case 24000:
++ config1 = 0x05;
++ break;
++ case 26000:
++ config1 = 0x06;
++ break;
++ case 32000:
++ config1 = 0x07;
++ break;
++ case 44100:
++ config1 = 0x08;
++ break;
++ case 48000:
++ config1 = 0x09;
++ break;
++ }
++ snd_pmic_ops_fs.num_channel = num_channel;
++ if (snd_pmic_ops_fs.num_channel == 1) {
++ sc_access[0].reg_addr = AUD17;
++ sc_access[1].reg_addr = AUD15;
++ sc_access[0].mask = sc_access[1].mask = MASK7;
++ sc_access[0].value = sc_access[1].value = 0x80;
++ sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
++
++ } else {
++ sc_access[0].reg_addr = AUD17;
++ sc_access[1].reg_addr = AUD15;
++ sc_access[0].mask = sc_access[1].mask = MASK7;
++ sc_access[0].value = sc_access[1].value = 0x00;
++ sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
++
++ }
++ pr_debug("sst: sfreq:%d,Register value = %x\n", sfreq, config1);
++
++ if (word_size == 24) {
++ sc_access[0].reg_addr = AUDIOPORT1;
++ sc_access[0].mask = MASK0|MASK1|MASK2|MASK3;
++ sc_access[0].value = 0xFB;
++
++
++ sc_access[1].reg_addr = AUDIOPORT2;
++ sc_access[1].value = config1 | 0x10;
++ sc_access[1].mask = MASK0 | MASK1 | MASK2 | MASK3
++ | MASK4 | MASK5 | MASK6;
++
++ sc_access[2].reg_addr = MISCAUDCTRL;
++ sc_access[2].value = 0x02;
++ sc_access[2].mask = 0x02;
++
++ num_value = 3 ;
++
++ } else {
++
++ sc_access[0].reg_addr = AUDIOPORT2;
++ sc_access[0].value = config1;
++ sc_access[0].mask = MASK0|MASK1|MASK2|MASK3;
++
++ sc_access[1].reg_addr = MISCAUDCTRL;
++ sc_access[1].value = 0x00;
++ sc_access[1].mask = 0x02;
++ num_value = 2;
++ }
++ return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, num_value);
++
++}
++
++static int fs_set_selected_input_dev(u8 value)
++{
++ struct sc_reg_access sc_access_dmic[] = {
++ {MICCTRL, 0x81, 0xf7},
++ {MICLICTRL3, 0x00, 0xE0},
++ };
++ struct sc_reg_access sc_access_mic[] = {
++ {MICCTRL, 0x40, MASK2|MASK4|MASK5|MASK6|MASK7},
++ {MICLICTRL3, 0x00, 0xE0},
++ };
++ struct sc_reg_access sc_access_hsmic[] = {
++ {MICCTRL, 0x10, MASK2|MASK4|MASK5|MASK6|MASK7},
++ {MICLICTRL3, 0x00, 0xE0},
++ };
++
++ int retval = 0;
++
++ if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
++ retval = fs_init_card();
++ if (retval)
++ return retval;
++
++ switch (value) {
++ case AMIC:
++ pr_debug("sst: Selecting amic not supported in mono cfg\n");
++ return sst_sc_reg_access(sc_access_mic, PMIC_READ_MODIFY, 2);
++ break;
++
++ case HS_MIC:
++ pr_debug("sst: Selecting hsmic\n");
++ return sst_sc_reg_access(sc_access_hsmic,
++ PMIC_READ_MODIFY, 2);
++ break;
++
++ case DMIC:
++ pr_debug("sst: Selecting dmic\n");
++ return sst_sc_reg_access(sc_access_dmic, PMIC_READ_MODIFY, 2);
++ break;
++
++ default:
++ return -EINVAL;
++
++ }
++}
++
++static int fs_set_selected_output_dev(u8 value)
++{
++ struct sc_reg_access sc_access_hp[] = {
++ {0x191, 0x11, 0x0},
++ {0x192, 0x0E, 0x0},
++ };
++ struct sc_reg_access sc_access_is[] = {
++ {0x191, 0x17, 0xFF},
++ {0x192, 0x08, 0xFF},
++ };
++ int retval = 0;
++
++ if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
++ retval = fs_init_card();
++ if (retval)
++ return retval;
++
++ switch (value) {
++ case STEREO_HEADPHONE:
++ pr_debug("SST DBG:Selecting headphone\n");
++ return sst_sc_reg_access(sc_access_hp, PMIC_WRITE, 2);
++ break;
++ case MONO_EARPIECE:
++ case INTERNAL_SPKR:
++ pr_debug("SST DBG:Selecting internal spkr\n");
++ return sst_sc_reg_access(sc_access_is, PMIC_READ_MODIFY, 2);
++ break;
++
++ default:
++ return -EINVAL;
++
++ }
++}
++
++static int fs_set_mute(int dev_id, u8 value)
++{
++ struct sc_reg_access sc_access[6] = {{0,},};
++ int reg_num = 0;
++ int retval = 0;
++
++ if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
++ retval = fs_init_card();
++ if (retval)
++ return retval;
++
++
++ pr_debug("sst: dev_id:0x%x value:0x%x\n", dev_id, value);
++ switch (dev_id) {
++ case PMIC_SND_DMIC_MUTE:
++ sc_access[0].reg_addr = MICCTRL;
++ sc_access[1].reg_addr = MICLICTRL1;
++ sc_access[2].reg_addr = MICLICTRL2;
++ sc_access[0].mask = MASK5;
++ sc_access[1].mask = sc_access[2].mask = MASK6;
++ if (value == MUTE) {
++ sc_access[0].value = 0x20;
++ sc_access[2].value = sc_access[1].value = 0x40;
++ } else
++ sc_access[0].value = sc_access[1].value
++ = sc_access[2].value = 0x0;
++ reg_num = 3;
++ break;
++ case PMIC_SND_HP_MIC_MUTE:
++ case PMIC_SND_AMIC_MUTE:
++ sc_access[0].reg_addr = MICLICTRL1;
++ sc_access[1].reg_addr = MICLICTRL2;
++ sc_access[0].mask = sc_access[1].mask = MASK6;
++ if (value == MUTE)
++ sc_access[0].value = sc_access[1].value = 0x40;
++ else
++ sc_access[0].value = sc_access[1].value = 0x0;
++ reg_num = 2;
++ break;
++ case PMIC_SND_LEFT_SPEAKER_MUTE:
++ case PMIC_SND_LEFT_HP_MUTE:
++ sc_access[0].reg_addr = AUD16;
++ sc_access[1].reg_addr = AUD15;
++
++ sc_access[0].mask = sc_access[1].mask = MASK7;
++ if (value == MUTE)
++ sc_access[0].value = sc_access[1].value = 0x80;
++ else
++ sc_access[0].value = sc_access[1].value = 0x0;
++ reg_num = 2;
++ snd_pmic_ops_fs.mute_status = value;
++ break;
++ case PMIC_SND_RIGHT_HP_MUTE:
++ case PMIC_SND_RIGHT_SPEAKER_MUTE:
++ sc_access[0].reg_addr = AUD17;
++ sc_access[1].reg_addr = AUD15;
++ sc_access[0].mask = sc_access[1].mask = MASK7;
++ if (value == MUTE)
++ sc_access[0].value = sc_access[1].value = 0x80;
++ else
++ sc_access[0].value = sc_access[1].value = 0x0;
++ snd_pmic_ops_fs.mute_status = value;
++ if (snd_pmic_ops_fs.num_channel == 1)
++ sc_access[0].value = sc_access[1].value = 0x80;
++ reg_num = 2;
++ break;
++ case PMIC_SND_MUTE_ALL:
++ sc_access[0].reg_addr = AUD16;
++ sc_access[1].reg_addr = AUD17;
++ sc_access[2].reg_addr = AUD15;
++ sc_access[3].reg_addr = MICCTRL;
++ sc_access[4].reg_addr = MICLICTRL1;
++ sc_access[5].reg_addr = MICLICTRL2;
++ sc_access[0].mask = sc_access[1].mask =
++ sc_access[2].mask = MASK7;
++ sc_access[3].mask = MASK5;
++ sc_access[4].mask = sc_access[5].mask = MASK6;
++
++ if (value == MUTE) {
++ sc_access[0].value =
++ sc_access[1].value = sc_access[2].value = 0x80;
++ sc_access[3].value = 0x20;
++ sc_access[4].value = sc_access[5].value = 0x40;
++
++ } else {
++ sc_access[0].value = sc_access[1].value =
++ sc_access[2].value = sc_access[3].value =
++ sc_access[4].value = sc_access[5].value = 0x0;
++ }
++ if (snd_pmic_ops_fs.num_channel == 1)
++ sc_access[1].value = sc_access[2].value = 0x80;
++ reg_num = 6;
++ snd_pmic_ops_fs.mute_status = value;
++ snd_pmic_ops_fs.master_mute = value;
++ break;
++
++ }
++ return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, reg_num);
++}
++
++static int fs_set_vol(int dev_id, int value)
++{
++ struct sc_reg_access sc_acces, sc_access[4] = {{0},};
++ int reg_num = 0;
++ int retval = 0;
++
++ if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
++ retval = fs_init_card();
++ if (retval)
++ return retval;
++
++ switch (dev_id) {
++ case PMIC_SND_LEFT_PB_VOL:
++ pr_debug("sst: PMIC_SND_LEFT_PB_VOL:%d\n", value);
++ sc_access[0].value = sc_access[1].value = value;
++ sc_access[0].reg_addr = AUD16;
++ sc_access[1].reg_addr = AUD15;
++ sc_access[0].mask = sc_access[1].mask =
++ (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5);
++ reg_num = 2;
++ break;
++
++ case PMIC_SND_RIGHT_PB_VOL:
++ pr_debug("sst: PMIC_SND_RIGHT_PB_VOL:%d\n", value);
++ sc_access[0].value = sc_access[1].value = value;
++ sc_access[0].reg_addr = AUD17;
++ sc_access[1].reg_addr = AUD15;
++ sc_access[0].mask = sc_access[1].mask =
++ (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5);
++ if (snd_pmic_ops_fs.num_channel == 1) {
++ sc_access[0].value = sc_access[1].value = 0x80;
++ sc_access[0].mask = sc_access[1].mask = MASK7;
++ }
++ reg_num = 2;
++ break;
++ case PMIC_SND_CAPTURE_VOL:
++ pr_debug("sst: PMIC_SND_CAPTURE_VOL:%d\n", value);
++ sc_access[0].reg_addr = MICLICTRL1;
++ sc_access[1].reg_addr = MICLICTRL2;
++ sc_access[2].reg_addr = DMICCTRL1;
++ sc_access[2].value = value;
++ sc_access[0].value = sc_access[1].value = value;
++ sc_acces.reg_addr = MICLICTRL3;
++ sc_acces.value = value;
++ sc_acces.mask = (MASK0|MASK1|MASK2|MASK3|MASK5|MASK6|MASK7);
++ retval = sst_sc_reg_access(&sc_acces, PMIC_READ_MODIFY, 1);
++ sc_access[0].mask = sc_access[1].mask =
++ sc_access[2].mask = (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5);
++ reg_num = 3;
++ break;
++
++ default:
++ return -EINVAL;
++ }
++
++ return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, reg_num);
++}
++
++static int fs_get_mute(int dev_id, u8 *value)
++{
++ struct sc_reg_access sc_access[6] = {{0,},};
++
++ int retval = 0, temp_value = 0, mask = 0;
++
++ if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
++ retval = fs_init_card();
++ if (retval)
++ return retval;
++
++ switch (dev_id) {
++
++ case PMIC_SND_AMIC_MUTE:
++ case PMIC_SND_HP_MIC_MUTE:
++ sc_access[0].reg_addr = MICLICTRL1;
++ mask = MASK6;
++ retval = sst_sc_reg_access(sc_access, PMIC_READ, 1);
++ if (sc_access[0].value & mask)
++ *value = MUTE;
++ else
++ *value = UNMUTE;
++ break;
++ case PMIC_SND_DMIC_MUTE:
++ sc_access[0].reg_addr = MICCTRL;
++ mask = MASK5;
++ retval = sst_sc_reg_access(sc_access, PMIC_READ, 1);
++ temp_value = (sc_access[0].value & mask);
++ if (temp_value == 0)
++ *value = UNMUTE;
++ else
++ *value = MUTE;
++ break;
++
++ case PMIC_SND_LEFT_HP_MUTE:
++ case PMIC_SND_LEFT_SPEAKER_MUTE:
++ sc_access[0].reg_addr = AUD16;
++ mask = MASK7;
++ retval = sst_sc_reg_access(sc_access, PMIC_READ, 1);
++ temp_value = sc_access[0].value & mask;
++ if (temp_value == 0)
++ *value = UNMUTE;
++ else
++ *value = MUTE;
++ break;
++ case PMIC_SND_RIGHT_HP_MUTE:
++ case PMIC_SND_RIGHT_SPEAKER_MUTE:
++ sc_access[0].reg_addr = AUD17;
++ mask = MASK7;
++ retval = sst_sc_reg_access(sc_access, PMIC_READ, 1);
++ temp_value = sc_access[0].value & mask;
++ if (temp_value == 0)
++ *value = UNMUTE;
++ else
++ *value = MUTE;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ return retval;
++}
++
++static int fs_get_vol(int dev_id, int *value)
++{
++ struct sc_reg_access sc_access = {0,};
++ int retval = 0, mask = 0;
++
++ if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
++ retval = fs_init_card();
++ if (retval)
++ return retval;
++
++ switch (dev_id) {
++ case PMIC_SND_CAPTURE_VOL:
++ pr_debug("sst: PMIC_SND_CAPTURE_VOL\n");
++ sc_access.reg_addr = MICLICTRL1;
++ mask = (MASK5|MASK4|MASK3|MASK2|MASK1|MASK0);
++ break;
++ case PMIC_SND_LEFT_PB_VOL:
++ pr_debug("sst: PMIC_SND_LEFT_PB_VOL\n");
++ sc_access.reg_addr = AUD16;
++ mask = (MASK5|MASK4|MASK3|MASK2|MASK1|MASK0);
++ break;
++ case PMIC_SND_RIGHT_PB_VOL:
++ pr_debug("sst: PMIC_SND_RT_PB_VOL\n");
++ sc_access.reg_addr = AUD17;
++ mask = (MASK5|MASK4|MASK3|MASK2|MASK1|MASK0);
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ retval = sst_sc_reg_access(&sc_access, PMIC_READ, 1);
++ pr_debug("sst: value read = 0x%x\n", sc_access.value);
++ *value = (int) (sc_access.value & mask);
++ pr_debug("sst: value returned = 0x%x\n", *value);
++ return retval;
++}
++
++struct snd_pmic_ops snd_pmic_ops_fs = {
++ .set_input_dev = fs_set_selected_input_dev,
++ .set_output_dev = fs_set_selected_output_dev,
++ .set_mute = fs_set_mute,
++ .get_mute = fs_get_mute,
++ .set_vol = fs_set_vol,
++ .get_vol = fs_get_vol,
++ .init_card = fs_init_card,
++ .set_pcm_audio_params = fs_set_pcm_audio_params,
++ .set_pcm_voice_params = fs_set_pcm_voice_params,
++ .set_voice_port = fs_set_voice_port,
++ .set_audio_port = fs_set_audio_port,
++ .power_up_pmic_pb = fs_power_up_pb,
++ .power_up_pmic_cp = fs_power_up_cp,
++ .power_down_pmic_pb = fs_power_down_pb,
++ .power_down_pmic_cp = fs_power_down_cp,
++ .power_down_pmic = fs_power_down,
++};
+--- /dev/null
++++ b/sound/pci/sst/intelmid_v1_control.c
+@@ -0,0 +1,1066 @@
++/* intel_sst_v1_control.c - Intel SST Driver for audio engine
++ *
++ * Copyright (C) 2008-10 Intel Corp
++ * Authors: Vinod Koul <vinod.koul@intel.com>
++ * Harsha Priya <priya.harsha@intel.com>
++ * Dharageswari R <dharageswari.r@intel.com>
++ * KP Jeeja <jeeja.kp@intel.com>
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This file contains the control operations of vendor 2
++ */
++
++#include <linux/pci.h>
++#include <linux/file.h>
++#include <asm/mrst.h>
++#include <sound/intel_sst.h>
++#include "intelmid_snd_control.h"
++
++#include <linux/gpio.h>
++#define KOSKI_VOICE_CODEC_ENABLE 46
++
++enum _reg_v2 {
++
++ MASTER_CLOCK_PRESCALAR = 0x205,
++ SET_MASTER_AND_LR_CLK1 = 0x20b,
++ SET_MASTER_AND_LR_CLK2 = 0x20c,
++ MASTER_MODE_AND_DATA_DELAY = 0x20d,
++ DIGITAL_INTERFACE_TO_DAI2 = 0x20e,
++ CLK_AND_FS1 = 0x208,
++ CLK_AND_FS2 = 0x209,
++ DAI2_TO_DAC_HP = 0x210,
++ HP_OP_SINGLE_ENDED = 0x224,
++ ENABLE_OPDEV_CTRL = 0x226,
++ ENABLE_DEV_AND_USE_XTAL = 0x227,
++
++ /* Max audio subsystem (PQ49) MAX 8921 */
++ AS_IP_MODE_CTL = 0xF9,
++ AS_LEFT_SPKR_VOL_CTL = 0xFA, /* Mono Earpiece volume control */
++ AS_RIGHT_SPKR_VOL_CTL = 0xFB,
++ AS_LEFT_HP_VOL_CTL = 0xFC,
++ AS_RIGHT_HP_VOL_CTL = 0xFD,
++ AS_OP_MIX_CTL = 0xFE,
++ AS_CONFIG = 0xFF,
++
++ /* Headphone volume control & mute registers */
++ VOL_CTRL_LT = 0x21c,
++ VOL_CTRL_RT = 0x21d,
++
++};
++/**
++ * mx_init_card - initilize the sound card
++ *
++ * This initilizes the audio paths to know values in case of this sound card
++ */
++static int mx_init_card(void)
++{
++ if (mrst_platform_id() == MRST_PLATFORM_AAVA_SC) {
++
++ struct sc_reg_access sc_access[] = {
++ {0x200, 0x00, 0x0},
++ {0x201, 0xC0, 0x0},
++ {0x202, 0x00, 0x0},
++ {0x203, 0x00, 0x0},
++ {0x204, 0x0e, 0x0},
++ {0x205, 0x20, 0x0},
++ {0x206, 0x00, 0x0},
++ {0x207, 0x00, 0x0},
++ {0x208, 0x00, 0x0},
++ {0x209, 0x51, 0x0},
++ {0x20a, 0x00, 0x0},
++ {0x20b, 0x5a, 0x0},
++ {0x20c, 0xbe, 0x0},
++ {0x20d, 0x90, 0x0},
++ {0x20e, 0x51, 0x0},
++ {0x20f, 0x00, 0x0},
++ {0x210, 0x21, 0x0},
++ {0x211, 0x00, 0x0},
++ {0x212, 0x00, 0x0},
++ {0x213, 0x00, 0x0},
++ {0x214, 0x41, 0x0},
++ {0x215, 0x81, 0x0},
++ {0x216, 0x00, 0x0},
++ {0x217, 0x00, 0x0},
++ {0x218, 0x00, 0x0},
++ {0x219, 0x00, 0x0},
++ {0x21a, 0x00, 0x0},
++ {0x21b, 0x00, 0x0},
++ {0x21c, 0x00, 0x0},
++ {0x21d, 0x00, 0x0},
++ {0x21e, 0x00, 0x0},
++ {0x21f, 0x00, 0x0},
++ {0x220, 0x00, 0x0},
++ {0x221, 0x00, 0x0},
++ {0x222, 0x51, 0x0},
++ {0x223, 0x20, 0x0}, /* Jack detection: 00 -> 01 */
++ {0x224, 0x40, 0x0},
++ {0x225, 0x80, 0x0}, /* JAck detection: 00 -> 80 */
++ {0x226, 0x00, 0x0},
++ {0x227, 0x00, 0x0},
++ {0xf9, 0x40, 0x0},
++ {0xfa, 0x1F, 0x0},
++ {0xfb, 0x1F, 0x0},
++ {0xfc, 0x1F, 0x0},
++ {0xfd, 0x1F, 0x0},
++ {0xfe, 0x00, 0x0},
++ {0xff, 0x00, 0x0}, /* Removed sel_output */
++ };
++ int retval;
++
++ /*init clock sig to voice codec*/
++ retval = gpio_request(KOSKI_VOICE_CODEC_ENABLE,
++ "sound_voice_codec");
++ if (retval) {
++ pr_err("sst: Error enabling voice codec clock\n");
++ } else {
++ gpio_direction_output(KOSKI_VOICE_CODEC_ENABLE, 1);
++ pr_debug("sst: Voice codec clock enabled\n");
++ }
++
++ snd_pmic_ops_mx.card_status = SND_CARD_INIT_DONE;
++ snd_pmic_ops_mx.master_mute = UNMUTE;
++ snd_pmic_ops_mx.mute_status = UNMUTE;
++ snd_pmic_ops_mx.num_channel = 2;
++ pr_debug("**************inside aava\n");
++ return sst_sc_reg_access(sc_access, PMIC_WRITE, 47);
++ } else {
++ struct sc_reg_access sc_access[] = {
++ {0x200, 0x80, 0x00},
++ {0x201, 0xC0, 0x00},
++ {0x202, 0x00, 0x00},
++ {0x203, 0x00, 0x00},
++ {0x204, 0x02, 0x00},
++ {0x205, 0x10, 0x00},
++ {0x206, 0x60, 0x00},
++ {0x207, 0x00, 0x00},
++ {0x208, 0x90, 0x00},
++ {0x209, 0x51, 0x00},
++ {0x20a, 0x00, 0x00},
++ {0x20b, 0x10, 0x00},
++ {0x20c, 0x00, 0x00},
++ {0x20d, 0x00, 0x00},
++ {0x20e, 0x21, 0x00},
++ {0x20f, 0x00, 0x00},
++ {0x210, 0x84, 0x00},
++ {0x211, 0xB3, 0x00},
++ {0x212, 0x00, 0x00},
++ {0x213, 0x00, 0x00},
++ {0x214, 0x41, 0x00},
++ {0x215, 0x00, 0x00},
++ {0x216, 0x00, 0x00},
++ {0x217, 0x00, 0x00},
++ {0x218, 0x03, 0x00},
++ {0x219, 0x03, 0x00},
++ {0x21a, 0x00, 0x00},
++ {0x21b, 0x00, 0x00},
++ {0x21c, 0x00, 0x00},
++ {0x21d, 0x00, 0x00},
++ {0x21e, 0x00, 0x00},
++ {0x21f, 0x00, 0x00},
++ {0x220, 0x20, 0x00},
++ {0x221, 0x20, 0x00},
++ {0x222, 0x51, 0x00},
++ {0x223, 0x20, 0x00},
++ {0x224, 0x04, 0x00},
++ {0x225, 0x80, 0x00},
++ {0x226, 0x0F, 0x00},
++ {0x227, 0x08, 0x00},
++ {0xf9, 0x40, 0x00},
++ {0xfa, 0x1f, 0x00},
++ {0xfb, 0x1f, 0x00},
++ {0xfc, 0x1f, 0x00},
++ {0xfd, 0x1f, 0x00},
++ {0xfe, 0x00, 0x00},
++ {0xff, 0x0c, 0x00},
++ };
++ snd_pmic_ops_mx.card_status = SND_CARD_INIT_DONE;
++ snd_pmic_ops_mx.num_channel = 2;
++ snd_pmic_ops_mx.master_mute = UNMUTE;
++ snd_pmic_ops_mx.mute_status = UNMUTE;
++ return sst_sc_reg_access(sc_access, PMIC_WRITE, 47);
++ }
++}
++
++static int mx_init_capture_card(void)
++{
++ struct sc_reg_access sc_access[] = {
++ {0x206, 0x5a, 0x0},
++ {0x207, 0xbe, 0x0},
++ {0x208, 0x90, 0x0},
++ {0x209, 0x32, 0x0},
++ {0x20e, 0x22, 0x0},
++ {0x210, 0x84, 0x0},
++ {0x223, 0x20, 0x0},
++ {0x226, 0xC0, 0x0},
++ };
++
++ int retval = 0;
++
++ retval = sst_sc_reg_access(sc_access, PMIC_WRITE, 8);
++ if (0 != retval) {
++ /* pmic communication fails */
++ pr_debug("sst: pmic commn failed\n");
++ return retval;
++ }
++
++ pr_debug("sst: Capture configuration complete!!\n");
++ return 0;
++}
++
++static int mx_init_playback_card(void)
++{
++ struct sc_reg_access sc_access[] = {
++ {0x206, 0x00, 0x0},
++ {0x207, 0x00, 0x0},
++ {0x208, 0x00, 0x0},
++ {0x209, 0x51, 0x0},
++ {0x20e, 0x51, 0x0},
++ {0x210, 0x21, 0x0},
++ {0x223, 0x01, 0x0},
++ };
++ int retval = 0;
++
++ retval = sst_sc_reg_access(sc_access, PMIC_WRITE, 9);
++ if (0 != retval) {
++ /* pmic communication fails */
++ pr_debug("sst: pmic commn failed\n");
++ return retval;
++ }
++
++ pr_debug("sst: Playback configuration complete!!\n");
++ return 0;
++}
++
++static int mx_enable_audiodac(int value)
++{
++ struct sc_reg_access sc_access[3];
++ int mute_val = 0;
++ int mute_val1 = 0;
++ int retval = 0;
++
++ sc_access[0].reg_addr = AS_LEFT_HP_VOL_CTL;
++ sc_access[1].reg_addr = AS_RIGHT_HP_VOL_CTL;
++
++ if (value == UNMUTE) {
++ mute_val = 0x1F;
++ mute_val1 = 0x00;
++ } else {
++ mute_val = 0x00;
++ mute_val1 = 0x40;
++ }
++ sc_access[0].mask = sc_access[1].mask = MASK0|MASK1|MASK2|MASK3|MASK4;
++ sc_access[0].value = sc_access[1].value = (u8)mute_val;
++ retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
++ if (retval)
++ return retval;
++ pr_debug("sst: mute status = %d", snd_pmic_ops_mx.mute_status);
++ if (snd_pmic_ops_mx.mute_status == MUTE ||
++ snd_pmic_ops_mx.master_mute == MUTE)
++ return retval;
++
++ sc_access[0].reg_addr = VOL_CTRL_LT;
++ sc_access[1].reg_addr = VOL_CTRL_RT;
++ sc_access[0].mask = sc_access[1].mask = MASK6;
++ sc_access[0].value = sc_access[1].value = mute_val1;
++ if (snd_pmic_ops_mx.num_channel == 1)
++ sc_access[1].value = 0x40;
++ return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
++}
++
++static int mx_power_up_pb(unsigned int port)
++{
++
++ int retval = 0;
++ struct sc_reg_access sc_access[3];
++
++ if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
++ retval = mx_init_card();
++ if (retval)
++ return retval;
++ }
++ if ((mrst_platform_id() == MRST_PLATFORM_AAVA_SC) && port == 1)
++ mx_init_playback_card();
++ retval = mx_enable_audiodac(MUTE);
++ if (retval)
++ return retval;
++
++ msleep(10);
++
++ sc_access[0].reg_addr = AS_CONFIG;
++ sc_access[0].mask = MASK7;
++ sc_access[0].value = 0x80;
++ retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
++ if (retval)
++ return retval;
++
++ sc_access[0].reg_addr = ENABLE_OPDEV_CTRL;
++ sc_access[0].mask = 0xff;
++ sc_access[0].value = 0x3C;
++ retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
++ if (retval)
++ return retval;
++
++ sc_access[0].reg_addr = ENABLE_DEV_AND_USE_XTAL;
++ sc_access[0].mask = 0x80;
++ sc_access[0].value = 0x80;
++ retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
++ if (retval)
++ return retval;
++
++ return mx_enable_audiodac(UNMUTE);
++}
++
++static int mx_power_down_pb(void)
++{
++ struct sc_reg_access sc_access[3];
++ int retval = 0;
++
++ if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
++ retval = mx_init_card();
++ if (retval)
++ return retval;
++ }
++
++ retval = mx_enable_audiodac(MUTE);
++ if (retval)
++ return retval;
++
++ sc_access[0].reg_addr = ENABLE_OPDEV_CTRL;
++ sc_access[0].mask = MASK3|MASK2;
++ sc_access[0].value = 0x00;
++
++ retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
++ if (retval)
++ return retval;
++
++ return mx_enable_audiodac(UNMUTE);
++}
++
++static int mx_power_up_cp(unsigned int port)
++{
++ int retval = 0;
++ struct sc_reg_access sc_access[] = {
++ {ENABLE_DEV_AND_USE_XTAL, 0x80, MASK7},
++ {ENABLE_OPDEV_CTRL, 0x3, 0x3},
++ };
++
++ if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
++ retval = mx_init_card();
++ if (retval)
++ return retval;
++ }
++
++ if (mrst_platform_id() == MRST_PLATFORM_AAVA_SC) {
++ retval = mx_init_capture_card();
++ if (retval)
++ return retval;
++ return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
++ } else
++ return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
++}
++
++static int mx_power_down_cp(void)
++{
++ struct sc_reg_access sc_access[] = {
++ {ENABLE_OPDEV_CTRL, 0x00, MASK1|MASK0},
++ };
++ int retval = 0;
++
++ if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
++ retval = mx_init_card();
++ if (retval)
++ return retval;
++ }
++
++ return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
++}
++
++static int mx_power_down(void)
++{
++ int retval = 0;
++ struct sc_reg_access sc_access[3];
++
++ if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
++ retval = mx_init_card();
++ if (retval)
++ return retval;
++ }
++
++ retval = mx_enable_audiodac(MUTE);
++ if (retval)
++ return retval;
++
++ sc_access[0].reg_addr = AS_CONFIG;
++ sc_access[0].mask = MASK7;
++ sc_access[0].value = 0x00;
++ retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
++ if (retval)
++ return retval;
++
++ sc_access[0].reg_addr = ENABLE_DEV_AND_USE_XTAL;
++ sc_access[0].mask = MASK7;
++ sc_access[0].value = 0x00;
++ retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
++ if (retval)
++ return retval;
++
++ sc_access[0].reg_addr = ENABLE_OPDEV_CTRL;
++ sc_access[0].mask = MASK3|MASK2;
++ sc_access[0].value = 0x00;
++ retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
++ if (retval)
++ return retval;
++
++ return mx_enable_audiodac(UNMUTE);
++}
++
++static int mx_set_pcm_voice_params(void)
++{
++ int retval = 0;
++ struct sc_reg_access sc_access[] = {
++ {0x200, 0x80, 0x00},
++ {0x201, 0xC0, 0x00},
++ {0x202, 0x00, 0x00},
++ {0x203, 0x00, 0x00},
++ {0x204, 0x0e, 0x00},
++ {0x205, 0x20, 0x00},
++ {0x206, 0x8f, 0x00},
++ {0x207, 0x21, 0x00},
++ {0x208, 0x18, 0x00},
++ {0x209, 0x32, 0x00},
++ {0x20a, 0x00, 0x00},
++ {0x20b, 0x5A, 0x00},
++ {0x20c, 0xBE, 0x00},/* 0x00 -> 0xBE Koski */
++ {0x20d, 0x00, 0x00}, /* DAI2 'off' */
++ {0x20e, 0x40, 0x00},
++ {0x20f, 0x00, 0x00},
++ {0x210, 0x84, 0x00},
++ {0x211, 0x33, 0x00}, /* Voice filter */
++ {0x212, 0x00, 0x00},
++ {0x213, 0x00, 0x00},
++ {0x214, 0x41, 0x00},
++ {0x215, 0x00, 0x00},
++ {0x216, 0x00, 0x00},
++ {0x217, 0x20, 0x00},
++ {0x218, 0x00, 0x00},
++ {0x219, 0x00, 0x00},
++ {0x21a, 0x40, 0x00},
++ {0x21b, 0x40, 0x00},
++ {0x21c, 0x09, 0x00},
++ {0x21d, 0x09, 0x00},
++ {0x21e, 0x00, 0x00},
++ {0x21f, 0x00, 0x00},
++ {0x220, 0x00, 0x00}, /* Microphone configurations */
++ {0x221, 0x00, 0x00}, /* Microphone configurations */
++ {0x222, 0x50, 0x00}, /* Microphone configurations */
++ {0x223, 0x21, 0x00}, /* Microphone configurations */
++ {0x224, 0x00, 0x00},
++ {0x225, 0x80, 0x00},
++ {0xf9, 0x40, 0x00},
++ {0xfa, 0x19, 0x00},
++ {0xfb, 0x19, 0x00},
++ {0xfc, 0x12, 0x00},
++ {0xfd, 0x12, 0x00},
++ {0xfe, 0x00, 0x00},
++ };
++
++ if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
++ retval = mx_init_card();
++ if (retval)
++ return retval;
++ }
++ pr_debug("sst: SST DBG mx_set_pcm_voice_params called\n");
++ return sst_sc_reg_access(sc_access, PMIC_WRITE, 44);
++}
++
++static int mx_set_pcm_audio_params(int sfreq, int word_size, int num_channel)
++{
++ int retval = 0;
++
++ if (mrst_platform_id() != MRST_PLATFORM_AAVA_SC) {
++ int config1 = 0, config2 = 0, filter = 0xB3;
++ struct sc_reg_access sc_access[5];
++
++ if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
++ retval = mx_init_card();
++ if (retval)
++ return retval;
++ }
++
++ switch (sfreq) {
++ case 8000:
++ config1 = 0x10;
++ config2 = 0x00;
++ filter = 0x33;
++ break;
++ case 11025:
++ config1 = 0x16;
++ config2 = 0x0d;
++ break;
++ case 12000:
++ config1 = 0x18;
++ config2 = 0x00;
++ break;
++ case 16000:
++ config1 = 0x20;
++ config2 = 0x00;
++ break;
++ case 22050:
++ config1 = 0x2c;
++ config2 = 0x1a;
++ break;
++ case 24000:
++ config1 = 0x30;
++ config2 = 0x00;
++ break;
++ case 32000:
++ config1 = 0x40;
++ config2 = 0x00;
++ break;
++ case 44100:
++ config1 = 0x58;
++ config2 = 0x33;
++ break;
++ case 48000:
++ config1 = 0x60;
++ config2 = 0x00;
++ break;
++ }
++
++ snd_pmic_ops_mx.num_channel = num_channel;
++ /*mute the right channel if MONO*/
++ if (snd_pmic_ops_mx.num_channel == 1) {
++
++ sc_access[0].reg_addr = VOL_CTRL_RT;
++ sc_access[0].value = 0x40;
++ sc_access[0].mask = MASK6;
++
++ sc_access[1].reg_addr = 0x224;
++ sc_access[1].value = 0x05;
++ sc_access[1].mask = MASK0|MASK1|MASK2;
++
++ retval = sst_sc_reg_access(sc_access,
++ PMIC_READ_MODIFY, 2);
++ if (retval)
++ return retval;
++ } else {
++ sc_access[0].reg_addr = VOL_CTRL_RT;
++ sc_access[0].value = 0x00;
++ sc_access[0].mask = MASK6;
++
++ sc_access[1].reg_addr = 0x224;
++ sc_access[1].value = 0x04;
++ sc_access[1].mask = MASK0|MASK1|MASK2;
++
++ retval = sst_sc_reg_access(sc_access,
++ PMIC_READ_MODIFY, 2);
++ if (retval)
++ return retval;
++ }
++ sc_access[0].reg_addr = 0x206;
++ sc_access[0].value = config1;
++ sc_access[1].reg_addr = 0x207;
++ sc_access[1].value = config2;
++
++ if (word_size == 16) {
++ sc_access[2].value = 0x51;
++ sc_access[3].value = 0x31;
++ } else if (word_size == 24) {
++ sc_access[2].value = 0x52;
++ sc_access[3].value = 0x92;
++ }
++
++ sc_access[2].reg_addr = 0x209;
++ sc_access[3].reg_addr = 0x20e;
++
++ sc_access[4].reg_addr = 0x211;
++ sc_access[4].value = filter;
++
++ return sst_sc_reg_access(sc_access, PMIC_WRITE, 5);
++ } else {
++ int config1 = 0, config2 = 0, filter = 0x00;
++ struct sc_reg_access sc_access[5];
++
++ pr_debug("sst: mx_set_pcm_audio_params - inside AAVA\n");
++
++ if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
++ retval = mx_init_card();
++ if (retval)
++ return retval;
++ }
++
++ switch (sfreq) {
++ case 8000:
++ config1 = 0x20;
++ config2 = 0x0f;
++ filter = 0x33;
++ break;
++ case 11025:
++ config1 = 0x14;
++ config2 = 0xd8;
++ break;
++ case 12000:
++ config1 = 0x16;
++ config2 = 0xaf;
++ break;
++ case 16000:
++ config1 = 0x1e;
++ config2 = 0x3f;
++ break;
++ case 22050:
++ config1 = 0x29;
++ config2 = 0xaf;
++ break;
++ case 24000:
++ config1 = 0x2d;
++ config2 = 0x5f;
++ break;
++ case 32000:
++ config1 = 0x3c;
++ config2 = 0x7f;
++ break;
++ case 44100:
++ config1 = 0x53;
++ config2 = 0x5f;
++ break;
++ case 48000:
++ config1 = 0x5a;
++ config2 = 0xbe;
++ break;
++ }
++
++ snd_pmic_ops_mx.num_channel = num_channel;
++ /*mute the right channel if MONO*/
++ sc_access[0].reg_addr = 0x20b;
++ sc_access[0].value = config1;
++ sc_access[1].reg_addr = 0x20c;
++ sc_access[1].value = config2;
++ if (word_size == 16) {
++ sc_access[2].value = 0x51;
++ sc_access[3].value = 0x51;
++ } else if (word_size == 24) {
++ sc_access[2].value = 0x52;
++ sc_access[3].value = 0x92;
++
++ }
++
++ sc_access[2].reg_addr = 0x209;
++ sc_access[3].reg_addr = 0x20e;
++ sc_access[4].reg_addr = 0x211;
++ sc_access[4].value = filter;
++
++ return sst_sc_reg_access(sc_access, PMIC_WRITE, 5);
++ }
++ return 0;
++}
++
++static int mx_set_selected_output_dev(u8 dev_id)
++{
++ struct sc_reg_access sc_access[2];
++ int num_reg = 0;
++ int retval = 0;
++
++ if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
++ retval = mx_init_card();
++ if (retval)
++ return retval;
++ }
++
++ pr_debug("sst: mx_set_selected_output_dev dev_id:0x%x\n", dev_id);
++ snd_pmic_ops_mx.output_dev_id = dev_id;
++ switch (dev_id) {
++ case STEREO_HEADPHONE:
++ sc_access[0].reg_addr = 0xFF;
++ sc_access[0].value = 0x8C;
++ sc_access[0].mask =
++ MASK2|MASK3|MASK5|MASK6|MASK4;
++
++ num_reg = 1;
++ break;
++ case MONO_EARPIECE:
++ case INTERNAL_SPKR:
++ sc_access[0].reg_addr = 0xFF;
++ sc_access[0].value = 0xb0;
++ sc_access[0].mask = MASK2|MASK3|MASK5|MASK6|MASK4;
++
++ num_reg = 1;
++ break;
++ case RECEIVER:
++ pr_debug("sst: RECEIVER Koski selected\n");
++
++ /* configuration - AS enable, receiver enable */
++ sc_access[0].reg_addr = 0xFF;
++ sc_access[0].value = 0x81;
++ sc_access[0].mask = 0xff;
++
++ num_reg = 1;
++ break;
++ default:
++ pr_err("sst: Not a valid output dev\n");
++ return 0;
++ }
++ return sst_sc_reg_access(sc_access, PMIC_WRITE, num_reg);
++}
++
++
++static int mx_set_voice_port(int status)
++{
++ int retval = 0;
++
++ if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
++ retval = mx_init_card();
++ if (retval)
++ return retval;
++ }
++ if (status == ACTIVATE)
++ retval = mx_set_pcm_voice_params();
++
++ return retval;
++}
++
++static int mx_set_audio_port(int status)
++{
++ int retval = 0;
++ if (mrst_platform_id() == MRST_PLATFORM_AAVA_SC) {
++
++ if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT)
++ retval = mx_init_card();
++ if (retval)
++ return retval;
++ if (status == ACTIVATE) {
++ mx_init_card();
++ mx_set_selected_output_dev
++ (snd_pmic_ops_mx.output_dev_id);
++ }
++ }
++ return retval;
++
++}
++
++static int mx_set_selected_input_dev(u8 dev_id)
++{
++ struct sc_reg_access sc_access[2];
++ int num_reg = 0;
++ int retval = 0;
++
++ if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
++ retval = mx_init_card();
++ if (retval)
++ return retval;
++ }
++ snd_pmic_ops_mx.input_dev_id = dev_id;
++ pr_debug("sst: mx_set_selected_input_dev dev_id:0x%x\n", dev_id);
++
++ switch (dev_id) {
++ case AMIC:
++ sc_access[0].reg_addr = 0x223;
++ sc_access[0].value = 0x00;
++ sc_access[0].mask = MASK7|MASK6|MASK5|MASK4|MASK0;
++ sc_access[1].reg_addr = 0x222;
++ sc_access[1].value = 0x50;
++ sc_access[1].mask = MASK7|MASK6|MASK5|MASK4;
++ num_reg = 2;
++ break;
++
++ case HS_MIC:
++ sc_access[0].reg_addr = 0x223;
++ sc_access[0].value = 0x20;
++ sc_access[0].mask = MASK7|MASK6|MASK5|MASK4|MASK0;
++ sc_access[1].reg_addr = 0x222;
++ sc_access[1].value = 0x51;
++ sc_access[1].mask = MASK7|MASK6|MASK5|MASK4;
++ num_reg = 2;
++ break;
++ case DMIC:
++ sc_access[1].reg_addr = 0x222;
++ sc_access[1].value = 0x00;
++ sc_access[1].mask = MASK7|MASK6|MASK5|MASK4|MASK0;
++ sc_access[0].reg_addr = 0x223;
++ sc_access[0].value = 0x20;
++ sc_access[0].mask = MASK7|MASK6|MASK5|MASK4|MASK0;
++ num_reg = 2;
++ break;
++ }
++ return sst_sc_reg_access(sc_access, PMIC_WRITE, num_reg);
++}
++
++static int mx_set_mute(int dev_id, u8 value)
++{
++ struct sc_reg_access sc_access[5];
++ int num_reg = 0;
++ int retval = 0;
++
++ if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
++ retval = mx_init_card();
++ if (retval)
++ return retval;
++ }
++
++
++ pr_debug("sst: set_mute dev_id:0x%x , value:%d\n", dev_id, value);
++
++ switch (dev_id) {
++ case PMIC_SND_DMIC_MUTE:
++ case PMIC_SND_AMIC_MUTE:
++ case PMIC_SND_HP_MIC_MUTE:
++ sc_access[0].reg_addr = 0x220;
++ sc_access[1].reg_addr = 0x221;
++ sc_access[2].reg_addr = 0x223;
++ if (value == MUTE) {
++ sc_access[0].value = 0x00;
++ sc_access[1].value = 0x00;
++ if (snd_pmic_ops_mx.input_dev_id == DMIC)
++ sc_access[2].value = 0x00;
++ else
++ sc_access[2].value = 0x20;
++ } else {
++ sc_access[0].value = 0x20;
++ sc_access[1].value = 0x20;
++ if (snd_pmic_ops_mx.input_dev_id == DMIC)
++ sc_access[2].value = 0x20;
++ else
++ sc_access[2].value = 0x00;
++ }
++ sc_access[0].mask = MASK5|MASK6;
++ sc_access[1].mask = MASK5|MASK6;
++ sc_access[2].mask = MASK5|MASK6;
++ num_reg = 3;
++ break;
++ case PMIC_SND_LEFT_SPEAKER_MUTE:
++ case PMIC_SND_LEFT_HP_MUTE:
++ sc_access[0].reg_addr = VOL_CTRL_LT;
++ if (value == MUTE)
++ sc_access[0].value = 0x40;
++ else
++ sc_access[0].value = 0x00;
++ sc_access[0].mask = MASK6;
++ num_reg = 1;
++ snd_pmic_ops_mx.mute_status = value;
++ break;
++ case PMIC_SND_RIGHT_SPEAKER_MUTE:
++ case PMIC_SND_RIGHT_HP_MUTE:
++ sc_access[0].reg_addr = VOL_CTRL_RT;
++ if (snd_pmic_ops_mx.num_channel == 1)
++ value = MUTE;
++ if (value == MUTE)
++ sc_access[0].value = 0x40;
++ else
++ sc_access[0].value = 0x00;
++ sc_access[0].mask = MASK6;
++ num_reg = 1;
++ snd_pmic_ops_mx.mute_status = value;
++ break;
++ case PMIC_SND_MUTE_ALL:
++ sc_access[0].reg_addr = VOL_CTRL_RT;
++ sc_access[1].reg_addr = VOL_CTRL_LT;
++ sc_access[2].reg_addr = 0x220;
++ sc_access[3].reg_addr = 0x221;
++ sc_access[4].reg_addr = 0x223;
++ snd_pmic_ops_mx.master_mute = value;
++ if (value == MUTE) {
++ sc_access[0].value = sc_access[1].value = 0x40;
++ sc_access[2].value = 0x00;
++ sc_access[3].value = 0x00;
++ if (snd_pmic_ops_mx.input_dev_id == DMIC)
++ sc_access[4].value = 0x00;
++ else
++ sc_access[4].value = 0x20;
++
++ } else {
++ sc_access[0].value = sc_access[1].value = 0x00;
++ sc_access[2].value = sc_access[3].value = 0x20;
++ sc_access[4].value = 0x20;
++ if (snd_pmic_ops_mx.input_dev_id == DMIC)
++ sc_access[4].value = 0x20;
++ else
++ sc_access[4].value = 0x00;
++
++
++ }
++ if (snd_pmic_ops_mx.num_channel == 1)
++ sc_access[0].value = 0x40;
++ sc_access[0].mask = sc_access[1].mask = MASK6;
++ sc_access[2].mask = MASK5|MASK6;
++ sc_access[3].mask = MASK5|MASK6|MASK2|MASK4;
++ sc_access[4].mask = MASK5|MASK6|MASK4;
++
++ num_reg = 5;
++ break;
++ case PMIC_SND_RECEIVER_MUTE:
++ sc_access[0].reg_addr = VOL_CTRL_RT;
++ if (value == MUTE)
++ sc_access[0].value = 0x40;
++ else
++ sc_access[0].value = 0x00;
++ sc_access[0].mask = MASK6;
++ num_reg = 1;
++ break;
++ }
++
++ return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, num_reg);
++}
++
++static int mx_set_vol(int dev_id, int value)
++{
++ struct sc_reg_access sc_access[2] = {{0},};
++ int num_reg = 0;
++ int retval = 0;
++
++ if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
++ retval = mx_init_card();
++ if (retval)
++ return retval;
++ }
++ pr_debug("sst: set_vol dev_id:0x%x ,value:%d\n", dev_id, value);
++ switch (dev_id) {
++ case PMIC_SND_RECEIVER_VOL:
++ return 0;
++ break;
++ case PMIC_SND_CAPTURE_VOL:
++ sc_access[0].reg_addr = 0x220;
++ sc_access[1].reg_addr = 0x221;
++ sc_access[0].value = sc_access[1].value = -value;
++ sc_access[0].mask = sc_access[1].mask =
++ (MASK0|MASK1|MASK2|MASK3|MASK4);
++ num_reg = 2;
++ break;
++ case PMIC_SND_LEFT_PB_VOL:
++ sc_access[0].value = -value;
++ sc_access[0].reg_addr = VOL_CTRL_LT;
++ sc_access[0].mask = (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5);
++ num_reg = 1;
++ break;
++ case PMIC_SND_RIGHT_PB_VOL:
++ sc_access[0].value = -value;
++ sc_access[0].reg_addr = VOL_CTRL_RT;
++ sc_access[0].mask = (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5);
++ if (snd_pmic_ops_mx.num_channel == 1) {
++ sc_access[0].value = 0x40;
++ sc_access[0].mask = MASK6;
++ sc_access[0].reg_addr = VOL_CTRL_RT;
++ }
++ num_reg = 1;
++ break;
++ }
++ return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, num_reg);
++}
++
++static int mx_get_mute(int dev_id, u8 *value)
++{
++ struct sc_reg_access sc_access[4] = {{0},};
++ int retval = 0, num_reg = 0, mask = 0;
++
++ if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
++ retval = mx_init_card();
++ if (retval)
++ return retval;
++ }
++ switch (dev_id) {
++ case PMIC_SND_DMIC_MUTE:
++ case PMIC_SND_AMIC_MUTE:
++ case PMIC_SND_HP_MIC_MUTE:
++ sc_access[0].reg_addr = 0x220;
++ mask = MASK5|MASK6;
++ num_reg = 1;
++ retval = sst_sc_reg_access(sc_access, PMIC_READ, num_reg);
++ if (retval)
++ return retval;
++ *value = sc_access[0].value & mask;
++ if (*value)
++ *value = UNMUTE;
++ else
++ *value = MUTE;
++ return retval;
++ case PMIC_SND_LEFT_HP_MUTE:
++ case PMIC_SND_LEFT_SPEAKER_MUTE:
++ sc_access[0].reg_addr = VOL_CTRL_LT;
++ num_reg = 1;
++ mask = MASK6;
++ break;
++ case PMIC_SND_RIGHT_HP_MUTE:
++ case PMIC_SND_RIGHT_SPEAKER_MUTE:
++ sc_access[0].reg_addr = VOL_CTRL_RT;
++ num_reg = 1;
++ mask = MASK6;
++ break;
++ }
++ retval = sst_sc_reg_access(sc_access, PMIC_READ, num_reg);
++ if (retval)
++ return retval;
++ *value = sc_access[0].value & mask;
++ if (*value)
++ *value = MUTE;
++ else
++ *value = UNMUTE;
++ return retval;
++}
++
++static int mx_get_vol(int dev_id, int *value)
++{
++ struct sc_reg_access sc_access = {0,};
++ int retval = 0, mask = 0, num_reg = 0;
++
++ if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
++ retval = mx_init_card();
++ if (retval)
++ return retval;
++ }
++ switch (dev_id) {
++ case PMIC_SND_CAPTURE_VOL:
++ sc_access.reg_addr = 0x220;
++ mask = MASK0|MASK1|MASK2|MASK3|MASK4;
++ num_reg = 1;
++ break;
++ case PMIC_SND_LEFT_PB_VOL:
++ sc_access.reg_addr = VOL_CTRL_LT;
++ mask = MASK0|MASK1|MASK2|MASK3|MASK4|MASK5;
++ num_reg = 1;
++ break;
++ case PMIC_SND_RIGHT_PB_VOL:
++ sc_access.reg_addr = VOL_CTRL_RT;
++ mask = MASK0|MASK1|MASK2|MASK3|MASK4|MASK5;
++ num_reg = 1;
++ break;
++ }
++ retval = sst_sc_reg_access(&sc_access, PMIC_READ, num_reg);
++ if (retval)
++ return retval;
++ *value = -(sc_access.value & mask);
++ pr_debug("sst: get volume value extracted %d\n", *value);
++ return retval;
++}
++
++struct snd_pmic_ops snd_pmic_ops_mx = {
++ .set_input_dev = mx_set_selected_input_dev,
++ .set_output_dev = mx_set_selected_output_dev,
++ .set_mute = mx_set_mute,
++ .get_mute = mx_get_mute,
++ .set_vol = mx_set_vol,
++ .get_vol = mx_get_vol,
++ .init_card = mx_init_card,
++ .set_pcm_audio_params = mx_set_pcm_audio_params,
++ .set_pcm_voice_params = mx_set_pcm_voice_params,
++ .set_voice_port = mx_set_voice_port,
++ .set_audio_port = mx_set_audio_port,
++ .power_up_pmic_pb = mx_power_up_pb,
++ .power_up_pmic_cp = mx_power_up_cp,
++ .power_down_pmic_pb = mx_power_down_pb,
++ .power_down_pmic_cp = mx_power_down_cp,
++ .power_down_pmic = mx_power_down,
++};
++
+--- /dev/null
++++ b/sound/pci/sst/intelmid_v2_control.c
+@@ -0,0 +1,1001 @@
++/*
++ * intelmid_v2_control.c - Intel Sound card driver for MID
++ *
++ * Copyright (C) 2008-10 Intel Corp
++ * Authors: Vinod Koul <vinod.koul@intel.com>
++ * Harsha Priya <priya.harsha@intel.com>
++ * KP Jeeja <jeeja.kp@intel.com>
++ * Dharageswari R <dharageswari.r@intel.com>
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This file contains the control operations of vendor 3
++ */
++
++#include <linux/pci.h>
++#include <linux/file.h>
++#include <sound/intel_sst.h>
++#include "intelmid_snd_control.h"
++
++enum reg_v3 {
++ VAUDIOCNT = 0x51,
++ VOICEPORT1 = 0x100,
++ VOICEPORT2 = 0x101,
++ AUDIOPORT1 = 0x102,
++ AUDIOPORT2 = 0x103,
++ ADCSAMPLERATE = 0x104,
++ DMICCTRL1 = 0x105,
++ DMICCTRL2 = 0x106,
++ MICCTRL = 0x107,
++ MICSELVOL = 0x108,
++ LILSEL = 0x109,
++ LIRSEL = 0x10a,
++ VOICEVOL = 0x10b,
++ AUDIOLVOL = 0x10c,
++ AUDIORVOL = 0x10d,
++ LMUTE = 0x10e,
++ RMUTE = 0x10f,
++ POWERCTRL1 = 0x110,
++ POWERCTRL2 = 0x111,
++ DRVPOWERCTRL = 0x112,
++ VREFPLL = 0x113,
++ PCMBUFCTRL = 0x114,
++ SOFTMUTE = 0x115,
++ DTMFPATH = 0x116,
++ DTMFVOL = 0x117,
++ DTMFFREQ = 0x118,
++ DTMFHFREQ = 0x119,
++ DTMFLFREQ = 0x11a,
++ DTMFCTRL = 0x11b,
++ DTMFASON = 0x11c,
++ DTMFASOFF = 0x11d,
++ DTMFASINUM = 0x11e,
++ CLASSDVOL = 0x11f,
++ VOICEDACAVOL = 0x120,
++ AUDDACAVOL = 0x121,
++ LOMUTEVOL = 0x122,
++ HPLVOL = 0x123,
++ HPRVOL = 0x124,
++ MONOVOL = 0x125,
++ LINEOUTMIXVOL = 0x126,
++ EPMIXVOL = 0x127,
++ LINEOUTLSEL = 0x128,
++ LINEOUTRSEL = 0x129,
++ EPMIXOUTSEL = 0x12a,
++ HPLMIXSEL = 0x12b,
++ HPRMIXSEL = 0x12c,
++ LOANTIPOP = 0x12d,
++};
++
++/****
++ * nc_init_card - initilize the sound card
++ *
++ * This initilizes the audio paths to know values in case of this sound card
++ */
++static int nc_init_card(void)
++{
++ struct sc_reg_access sc_access[] = {
++ {VAUDIOCNT, 0x25, 0},
++ {VOICEPORT1, 0x00, 0},
++ {VOICEPORT2, 0x00, 0},
++ {AUDIOPORT1, 0x98, 0},
++ {AUDIOPORT2, 0x09, 0},
++ {AUDIOLVOL, 0x00, 0},
++ {AUDIORVOL, 0x00, 0},
++ {LMUTE, 0x03, 0},
++ {RMUTE, 0x03, 0},
++ {POWERCTRL1, 0x00, 0},
++ {POWERCTRL2, 0x00, 0},
++ {DRVPOWERCTRL, 0x00, 0},
++ {VREFPLL, 0x10, 0},
++ {HPLMIXSEL, 0xee, 0},
++ {HPRMIXSEL, 0xf6, 0},
++ {PCMBUFCTRL, 0x0, 0},
++ {VOICEVOL, 0x0e, 0},
++ {HPLVOL, 0x06, 0},
++ {HPRVOL, 0x06, 0},
++ {MICCTRL, 0x41, 0x00},
++ {ADCSAMPLERATE, 0x8B, 0x00},
++ {MICSELVOL, 0x5B, 0x00},
++ {LILSEL, 0x06, 0},
++ {LIRSEL, 0x46, 0},
++ {LOANTIPOP, 0x00, 0},
++ {DMICCTRL1, 0x40, 0},
++ };
++ snd_pmic_ops_nc.card_status = SND_CARD_INIT_DONE;
++ snd_pmic_ops_nc.master_mute = UNMUTE;
++ snd_pmic_ops_nc.mute_status = UNMUTE;
++ sst_sc_reg_access(sc_access, PMIC_WRITE, 26);
++ pr_debug("sst: init complete!!\n");
++ return 0;
++}
++
++static int nc_enable_audiodac(int value)
++{
++ struct sc_reg_access sc_access[3];
++ int mute_val = 0;
++
++ if (snd_pmic_ops_nc.mute_status == MUTE)
++ return 0;
++
++ if (((snd_pmic_ops_nc.output_dev_id == MONO_EARPIECE) ||
++ (snd_pmic_ops_nc.output_dev_id == INTERNAL_SPKR)) &&
++ (value == UNMUTE))
++ return 0;
++ if (value == UNMUTE) {
++ /* unmute the system, set the 7th bit to zero */
++ mute_val = 0x00;
++ } else {
++ /* MUTE:Set the seventh bit */
++ mute_val = 0x04;
++
++ }
++ sc_access[0].reg_addr = LMUTE;
++ sc_access[1].reg_addr = RMUTE;
++ sc_access[0].mask = sc_access[1].mask = MASK2;
++ sc_access[0].value = sc_access[1].value = mute_val;
++
++ if (snd_pmic_ops_nc.num_channel == 1)
++ sc_access[1].value = 0x04;
++ return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
++
++}
++
++static int nc_power_up_pb(unsigned int port)
++{
++ struct sc_reg_access sc_access[7];
++ int retval = 0;
++
++ if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
++ retval = nc_init_card();
++ if (retval)
++ return retval;
++ if (port == 0xFF)
++ return 0;
++ nc_enable_audiodac(MUTE);
++ msleep(30);
++
++ pr_debug("sst: powering up pb....\n");
++
++ sc_access[0].reg_addr = VAUDIOCNT;
++ sc_access[0].value = 0x27;
++ sc_access[0].mask = 0x27;
++ sc_access[1].reg_addr = VREFPLL;
++ if (port == 0) {
++ sc_access[1].value = 0x3A;
++ sc_access[1].mask = 0x3A;
++ } else if (port == 1) {
++ sc_access[1].value = 0x35;
++ sc_access[1].mask = 0x35;
++ }
++ retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
++
++
++
++ sc_access[0].reg_addr = POWERCTRL1;
++ if (port == 0) {
++ sc_access[0].value = 0x40;
++ sc_access[0].mask = 0x40;
++ } else if (port == 1) {
++ sc_access[0].value = 0x01;
++ sc_access[0].mask = 0x01;
++ }
++ sc_access[1].reg_addr = POWERCTRL2;
++ sc_access[1].value = 0x0C;
++ sc_access[1].mask = 0x0C;
++
++ sc_access[2].reg_addr = DRVPOWERCTRL;
++ sc_access[2].value = 0x86;
++ sc_access[2].mask = 0x86;
++
++ sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 3);
++
++ msleep(30);
++
++ return nc_enable_audiodac(UNMUTE);
++
++}
++
++static int nc_power_up_cp(unsigned int port)
++{
++ struct sc_reg_access sc_access[5];
++ int retval = 0;
++
++
++ if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
++ retval = nc_init_card();
++ if (retval)
++ return retval;
++
++
++ pr_debug("sst: powering up cp....\n");
++
++ if (port == 0xFF)
++ return 0;
++ sc_access[0].reg_addr = VAUDIOCNT;
++ sc_access[0].value = 0x27;
++ sc_access[0].mask = 0x27;
++ sc_access[1].reg_addr = VREFPLL;
++ if (port == 0) {
++ sc_access[1].value = 0x3E;
++ sc_access[1].mask = 0x3E;
++ } else if (port == 1) {
++ sc_access[1].value = 0x35;
++ sc_access[1].mask = 0x35;
++ }
++
++ retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
++
++
++ sc_access[0].reg_addr = POWERCTRL1;
++ if (port == 0) {
++ sc_access[0].value = 0xB4;
++ sc_access[0].mask = 0xB4;
++ } else if (port == 1) {
++ sc_access[0].value = 0xBF;
++ sc_access[0].mask = 0xBF;
++ }
++ sc_access[1].reg_addr = POWERCTRL2;
++ if (port == 0) {
++ sc_access[1].value = 0x0C;
++ sc_access[1].mask = 0x0C;
++ } else if (port == 1) {
++ sc_access[1].value = 0x02;
++ sc_access[1].mask = 0x02;
++ }
++
++ return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
++
++}
++
++static int nc_power_down(void)
++{
++ int retval = 0;
++ struct sc_reg_access sc_access[5];
++
++
++ if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
++ retval = nc_init_card();
++ if (retval)
++ return retval;
++ nc_enable_audiodac(MUTE);
++
++
++ pr_debug("sst: powering dn nc_power_down ....\n");
++
++ msleep(30);
++
++ sc_access[0].reg_addr = DRVPOWERCTRL;
++ sc_access[0].value = 0x00;
++ sc_access[0].mask = 0x00;
++
++ sst_sc_reg_access(sc_access, PMIC_WRITE, 1);
++
++ sc_access[0].reg_addr = POWERCTRL1;
++ sc_access[0].value = 0x00;
++ sc_access[0].mask = 0x00;
++
++ sc_access[1].reg_addr = POWERCTRL2;
++ sc_access[1].value = 0x00;
++ sc_access[1].mask = 0x00;
++
++
++
++ sst_sc_reg_access(sc_access, PMIC_WRITE, 2);
++
++ msleep(30);
++ sc_access[0].reg_addr = VREFPLL;
++ sc_access[0].value = 0x10;
++ sc_access[0].mask = 0x10;
++
++ sc_access[1].reg_addr = VAUDIOCNT;
++ sc_access[1].value = 0x25;
++ sc_access[1].mask = 0x25;
++
++
++ retval = sst_sc_reg_access(sc_access, PMIC_WRITE, 2);
++
++ msleep(30);
++ return nc_enable_audiodac(UNMUTE);
++}
++
++static int nc_power_down_pb(void)
++{
++
++ int retval = 0;
++ struct sc_reg_access sc_access[5];
++
++ if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
++ retval = nc_init_card();
++ if (retval)
++ return retval;
++
++ pr_debug("sst: powering dn pb....\n");
++
++ nc_enable_audiodac(MUTE);
++
++
++ msleep(30);
++
++
++ sc_access[0].reg_addr = DRVPOWERCTRL;
++ sc_access[0].value = 0x00;
++ sc_access[0].mask = 0x00;
++
++ sst_sc_reg_access(sc_access, PMIC_WRITE, 1);
++
++ msleep(30);
++
++ sc_access[0].reg_addr = POWERCTRL1;
++ sc_access[0].value = 0x00;
++ sc_access[0].mask = 0x41;
++
++ sc_access[1].reg_addr = POWERCTRL2;
++ sc_access[1].value = 0x00;
++ sc_access[1].mask = 0x0C;
++
++ sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
++
++ msleep(30);
++
++ return nc_enable_audiodac(UNMUTE);
++
++
++}
++
++static int nc_power_down_cp(void)
++{
++ struct sc_reg_access sc_access[] = {
++ {POWERCTRL1, 0x00, 0xBE},
++ {POWERCTRL2, 0x00, 0x02},
++ };
++ int retval = 0;
++
++ if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
++ retval = nc_init_card();
++ if (retval)
++ return retval;
++
++ pr_debug("sst: powering dn cp....\n");
++ return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
++}
++
++static int nc_set_pcm_voice_params(void)
++{
++ struct sc_reg_access sc_access[] = {
++ {0x100, 0xD5, 0},
++ {0x101, 0x08, 0},
++ {0x104, 0x03, 0},
++ {0x107, 0x10, 0},
++ {0x10B, 0x0E, 0},
++ {0x10E, 0x03, 0},
++ {0x10F, 0x03, 0},
++ {0x114, 0x13, 0},
++ {0x115, 0x00, 0},
++ {0x128, 0xFE, 0},
++ {0x129, 0xFE, 0},
++ {0x12A, 0xFE, 0},
++ {0x12B, 0xDE, 0},
++ {0x12C, 0xDE, 0},
++ };
++ int retval = 0;
++
++ if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
++ retval = nc_init_card();
++ if (retval)
++ return retval;
++
++ sst_sc_reg_access(sc_access, PMIC_WRITE, 14);
++ pr_debug("sst: Voice parameters set successfully!!\n");
++ return 0;
++}
++
++
++static int nc_set_pcm_audio_params(int sfreq, int word_size, int num_channel)
++{
++ int config2 = 0;
++ struct sc_reg_access sc_access;
++ int retval = 0;
++
++ if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
++ retval = nc_init_card();
++ if (retval)
++ return retval;
++
++ switch (sfreq) {
++ case 8000:
++ config2 = 0x00;
++ break;
++ case 11025:
++ config2 = 0x01;
++ break;
++ case 12000:
++ config2 = 0x02;
++ break;
++ case 16000:
++ config2 = 0x03;
++ break;
++ case 22050:
++ config2 = 0x04;
++ break;
++ case 24000:
++ config2 = 0x05;
++ break;
++ case 32000:
++ config2 = 0x07;
++ break;
++ case 44100:
++ config2 = 0x08;
++ break;
++ case 48000:
++ config2 = 0x09;
++ break;
++ }
++
++ snd_pmic_ops_nc.num_channel = num_channel;
++ if (snd_pmic_ops_nc.num_channel == 1) {
++
++ sc_access.value = 0x07;
++ sc_access.reg_addr = RMUTE;
++ pr_debug("sst: RIGHT_HP_MUTE value%d\n", sc_access.value);
++ sc_access.mask = MASK2;
++ sst_sc_reg_access(&sc_access, PMIC_READ_MODIFY, 1);
++ } else {
++ sc_access.value = 0x00;
++ sc_access.reg_addr = RMUTE;
++ pr_debug("sst: RIGHT_HP_MUTE value %d\n", sc_access.value);
++ sc_access.mask = MASK2;
++ sst_sc_reg_access(&sc_access, PMIC_READ_MODIFY, 1);
++
++
++ }
++
++ pr_debug("sst: word_size = %d\n", word_size);
++
++ if (word_size == 24) {
++ sc_access.reg_addr = AUDIOPORT2;
++ sc_access.value = config2 | 0x10;
++ sc_access.mask = 0x1F;
++ } else {
++ sc_access.value = config2;
++ sc_access.mask = 0x1F;
++ sc_access.reg_addr = AUDIOPORT2;
++ }
++ sst_sc_reg_access(&sc_access, PMIC_READ_MODIFY, 1);
++
++ pr_debug("sst: word_size = %d\n", word_size);
++ sc_access.reg_addr = AUDIOPORT1;
++ sc_access.mask = MASK5|MASK4|MASK1|MASK0;
++ if (word_size == 16)
++ sc_access.value = 0x98;
++ else if (word_size == 24)
++ sc_access.value = 0xAB;
++
++ return sst_sc_reg_access(&sc_access, PMIC_READ_MODIFY, 1);
++
++
++
++}
++
++static int nc_set_selected_output_dev(u8 value)
++{
++ struct sc_reg_access sc_access_HP[] = {
++ {LMUTE, 0x02, 0x06},
++ {RMUTE, 0x02, 0x06}
++ };
++ struct sc_reg_access sc_access_IS[] = {
++ {LMUTE, 0x04, 0x06},
++ {RMUTE, 0x04, 0x06}
++ };
++ int retval = 0;
++
++ snd_pmic_ops_nc.output_dev_id = value;
++ if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
++ retval = nc_init_card();
++ if (retval)
++ return retval;
++ pr_debug("sst: nc set selected output:%d\n", value);
++ switch (value) {
++ case STEREO_HEADPHONE:
++ retval = sst_sc_reg_access(sc_access_HP, PMIC_WRITE, 2);
++ break;
++ case INTERNAL_SPKR:
++ retval = sst_sc_reg_access(sc_access_IS, PMIC_WRITE, 2);
++ break;
++ default:
++ pr_err("sst: rcvd illegal request: %d\n", value);
++ return -EINVAL;
++ }
++ return retval;
++}
++
++static int nc_audio_init(void)
++{
++ struct sc_reg_access sc_acces, sc_access[] = {
++ {0x100, 0x00, 0},
++ {0x101, 0x00, 0},
++ {0x104, 0x8B, 0},
++ {0x107, 0x11, 0},
++ {0x10B, 0x0E, 0},
++ {0x114, 0x00, 0},
++ {0x115, 0x00, 0},
++ {0x128, 0x00, 0},
++ {0x129, 0x00, 0},
++ {0x12A, 0x00, 0},
++ {0x12B, 0xee, 0},
++ {0x12C, 0xf6, 0},
++ };
++
++ sst_sc_reg_access(sc_access, PMIC_WRITE, 12);
++ pr_debug("sst: Audio Init successfully!!\n");
++
++ /*set output device */
++ nc_set_selected_output_dev(snd_pmic_ops_nc.output_dev_id);
++
++ if (snd_pmic_ops_nc.num_channel == 1) {
++ sc_acces.value = 0x07;
++ sc_acces.reg_addr = RMUTE;
++ pr_debug("sst: RIGHT_HP_MUTE value%d\n", sc_acces.value);
++ sc_acces.mask = MASK2;
++ sst_sc_reg_access(&sc_acces, PMIC_READ_MODIFY, 1);
++ } else {
++ sc_acces.value = 0x00;
++ sc_acces.reg_addr = RMUTE;
++ pr_debug("sst: RIGHT_HP_MUTE value%d\n", sc_acces.value);
++ sc_acces.mask = MASK2;
++ sst_sc_reg_access(&sc_acces, PMIC_READ_MODIFY, 1);
++ }
++
++ return 0;
++}
++
++static int nc_set_audio_port(int status)
++{
++ struct sc_reg_access sc_access[2] = {{0,},};
++ int retval = 0;
++
++ if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
++ retval = nc_init_card();
++ if (retval)
++ return retval;
++
++ if (status == DEACTIVATE) {
++ /* Deactivate audio port-tristate and power */
++ sc_access[0].value = 0x00;
++ sc_access[0].mask = MASK4|MASK5;
++ sc_access[0].reg_addr = AUDIOPORT1;
++ return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
++ } else if (status == ACTIVATE) {
++ /* activate audio port */
++ nc_audio_init();
++ sc_access[0].value = 0x10;
++ sc_access[0].mask = MASK4|MASK5 ;
++ sc_access[0].reg_addr = AUDIOPORT1;
++ return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
++ } else
++ return -EINVAL;
++
++}
++
++static int nc_set_voice_port(int status)
++{
++ struct sc_reg_access sc_access[2] = {{0,},};
++ int retval = 0;
++
++ if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
++ retval = nc_init_card();
++ if (retval)
++ return retval;
++
++ if (status == DEACTIVATE) {
++ /* Activate Voice port */
++ sc_access[0].value = 0x00;
++ sc_access[0].mask = MASK4;
++ sc_access[0].reg_addr = VOICEPORT1;
++ return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
++ } else if (status == ACTIVATE) {
++ /* Deactivate voice port */
++ nc_set_pcm_voice_params();
++ sc_access[0].value = 0x10;
++ sc_access[0].mask = MASK4;
++ sc_access[0].reg_addr = VOICEPORT1;
++ return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
++ } else
++ return -EINVAL;
++}
++
++static int nc_set_mute(int dev_id, u8 value)
++{
++ struct sc_reg_access sc_access[3];
++ u8 mute_val, cap_mute;
++ int retval = 0;
++
++ if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
++ retval = nc_init_card();
++ if (retval)
++ return retval;
++
++ pr_debug("sst: set device id::%d, value %d\n", dev_id, value);
++
++ switch (dev_id) {
++ case PMIC_SND_MUTE_ALL:
++ pr_debug("sst: PMIC_SND_MUTE_ALL value %d\n", value);
++ snd_pmic_ops_nc.mute_status = value;
++ snd_pmic_ops_nc.master_mute = value;
++ if (value == UNMUTE) {
++ /* unmute the system, set the 7th bit to zero */
++ mute_val = cap_mute = 0x00;
++ } else {
++ /* MUTE:Set the seventh bit */
++ mute_val = 0x80;
++ cap_mute = 0x40;
++ }
++ sc_access[0].reg_addr = AUDIOLVOL;
++ sc_access[1].reg_addr = AUDIORVOL;
++ sc_access[0].mask = sc_access[1].mask = MASK7;
++ sc_access[0].value = sc_access[1].value = mute_val;
++ if (snd_pmic_ops_nc.num_channel == 1)
++ sc_access[1].value = 0x80;
++ if (!sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2)) {
++ sc_access[0].reg_addr = 0x109;
++ sc_access[1].reg_addr = 0x10a;
++ sc_access[2].reg_addr = 0x105;
++ sc_access[0].mask = sc_access[1].mask =
++ sc_access[2].mask = MASK6;
++ sc_access[0].value = sc_access[1].value =
++ sc_access[2].value = cap_mute;
++
++ if ((snd_pmic_ops_nc.input_dev_id == AMIC) ||
++ (snd_pmic_ops_nc.input_dev_id == DMIC))
++ sc_access[1].value = 0x40;
++ if (snd_pmic_ops_nc.input_dev_id == HS_MIC)
++ sc_access[0].value = 0x40;
++ retval = sst_sc_reg_access(sc_access,
++ PMIC_READ_MODIFY, 3);
++ }
++ break;
++ case PMIC_SND_HP_MIC_MUTE:
++ pr_debug("sst: PMIC_SND_HPMIC_MUTE value %d\n", value);
++ if (value == UNMUTE) {
++ /* unmute the system, set the 6th bit to one */
++ sc_access[0].value = 0x00;
++ } else {
++ /* mute the system, reset the 6th bit to zero */
++ sc_access[0].value = 0x40;
++ }
++ sc_access[0].reg_addr = LIRSEL;
++ sc_access[0].mask = MASK6;
++ retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
++ break;
++ case PMIC_SND_AMIC_MUTE:
++ pr_debug("sst: PMIC_SND_AMIC_MUTE value %d\n", value);
++ if (value == UNMUTE) {
++ /* unmute the system, set the 6th bit to one */
++ sc_access[0].value = 0x00;
++ } else {
++ /* mute the system, reset the 6th bit to zero */
++ sc_access[0].value = 0x40;
++ }
++ sc_access[0].reg_addr = LILSEL;
++ sc_access[0].mask = MASK6;
++ retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
++ break;
++
++ case PMIC_SND_DMIC_MUTE:
++ pr_debug("sst: INPUT_MUTE_DMIC value%d\n", value);
++ if (value == UNMUTE) {
++ /* unmute the system, set the 6th bit to one */
++ sc_access[1].value = 0x00;
++ sc_access[0].value = 0x00;
++ } else {
++ /* mute the system, reset the 6th bit to zero */
++ sc_access[1].value = 0x40;
++ sc_access[0].value = 0x40;
++ }
++ sc_access[0].reg_addr = DMICCTRL1;
++ sc_access[0].mask = MASK6;
++ sc_access[1].reg_addr = LILSEL;
++ sc_access[1].mask = MASK6;
++ retval = sst_sc_reg_access(sc_access,
++ PMIC_READ_MODIFY, 2);
++ break;
++
++ case PMIC_SND_LEFT_HP_MUTE:
++ case PMIC_SND_RIGHT_HP_MUTE:
++ snd_pmic_ops_nc.mute_status = value;
++ if (value == UNMUTE)
++ sc_access[0].value = 0x0;
++ else
++ sc_access[0].value = 0x04;
++
++ if (dev_id == PMIC_SND_LEFT_HP_MUTE) {
++ sc_access[0].reg_addr = LMUTE;
++ pr_debug("sst: LEFT_HP_MUTE value %d\n",
++ sc_access[0].value);
++ } else {
++ if (snd_pmic_ops_nc.num_channel == 1)
++ sc_access[0].value = 0x04;
++ sc_access[0].reg_addr = RMUTE;
++ pr_debug("sst: RIGHT_HP_MUTE value %d\n",
++ sc_access[0].value);
++ }
++ sc_access[0].mask = MASK2;
++ retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
++ break;
++ case PMIC_SND_LEFT_SPEAKER_MUTE:
++ case PMIC_SND_RIGHT_SPEAKER_MUTE:
++ if (value == UNMUTE)
++ sc_access[0].value = 0x00;
++ else
++ sc_access[0].value = 0x03;
++ sc_access[0].reg_addr = LMUTE;
++ pr_debug("sst: SPEAKER_MUTE %d\n", sc_access[0].value);
++ sc_access[0].mask = MASK1;
++ retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
++ break;
++ default:
++ return -EINVAL;
++ }
++ return retval ;
++
++}
++
++static int nc_set_vol(int dev_id, int value)
++{
++ struct sc_reg_access sc_access[3];
++ int retval = 0, entries = 0;
++
++ if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
++ retval = nc_init_card();
++ if (retval)
++ return retval;
++
++ pr_debug("sst: set volume:%d\n", dev_id);
++ switch (dev_id) {
++ case PMIC_SND_CAPTURE_VOL:
++ pr_debug("sst: PMIC_SND_CAPTURE_VOL:value::%d\n", value);
++ sc_access[0].value = sc_access[1].value =
++ sc_access[2].value = -value;
++ sc_access[0].mask = sc_access[1].mask = sc_access[2].mask =
++ (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5);
++ sc_access[0].reg_addr = 0x10a;
++ sc_access[1].reg_addr = 0x109;
++ sc_access[2].reg_addr = 0x105;
++ entries = 3;
++ break;
++
++ case PMIC_SND_LEFT_PB_VOL:
++ pr_debug("sst: PMIC_SND_LEFT_HP_VOL %d\n", value);
++ sc_access[0].value = -value;
++ sc_access[0].reg_addr = AUDIOLVOL;
++ sc_access[0].mask =
++ (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5|MASK6);
++ entries = 1;
++ break;
++
++ case PMIC_SND_RIGHT_PB_VOL:
++ pr_debug("sst: PMIC_SND_RIGHT_HP_VOL value %d\n", value);
++ if (snd_pmic_ops_nc.num_channel == 1) {
++ sc_access[0].value = 0x04;
++ sc_access[0].reg_addr = RMUTE;
++ sc_access[0].mask = MASK2;
++ } else {
++ sc_access[0].value = -value;
++ sc_access[0].reg_addr = AUDIORVOL;
++ sc_access[0].mask =
++ (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5|MASK6);
++ entries = 1;
++ }
++ break;
++
++ default:
++ return -EINVAL;
++
++ }
++ return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, entries);
++}
++
++static int nc_set_selected_input_dev(u8 value)
++{
++ struct sc_reg_access sc_access[6];
++ u8 num_val;
++ int retval = 0;
++
++ if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
++ retval = nc_init_card();
++ if (retval)
++ return retval;
++ snd_pmic_ops_nc.input_dev_id = value;
++
++ pr_debug("sst: nc set selected input:%d\n", value);
++
++ switch (value) {
++ case AMIC:
++ pr_debug("sst: Selecting AMIC\n");
++ sc_access[0].reg_addr = 0x107;
++ sc_access[0].value = 0x40;
++ sc_access[0].mask = MASK6|MASK4|MASK3|MASK1|MASK0;
++ sc_access[1].reg_addr = 0x10a;
++ sc_access[1].value = 0x40;
++ sc_access[1].mask = MASK6;
++ sc_access[2].reg_addr = 0x109;
++ sc_access[2].value = 0x00;
++ sc_access[2].mask = MASK6;
++ sc_access[3].reg_addr = 0x105;
++ sc_access[3].value = 0x40;
++ sc_access[3].mask = MASK6;
++ num_val = 4;
++ break;
++
++ case HS_MIC:
++ pr_debug("sst: Selecting HS_MIC\n");
++ sc_access[0].reg_addr = 0x107;
++ sc_access[0].mask = MASK6|MASK4|MASK3|MASK1|MASK0;
++ sc_access[0].value = 0x10;
++ sc_access[1].reg_addr = 0x109;
++ sc_access[1].mask = MASK6;
++ sc_access[1].value = 0x40;
++ sc_access[2].reg_addr = 0x10a;
++ sc_access[2].mask = MASK6;
++ sc_access[2].value = 0x00;
++ sc_access[3].reg_addr = 0x105;
++ sc_access[3].value = 0x40;
++ sc_access[3].mask = MASK6;
++ num_val = 4;
++ break;
++
++ case DMIC:
++ pr_debug("sst: DMIC\n");
++ sc_access[0].reg_addr = 0x107;
++ sc_access[0].mask = MASK6|MASK4|MASK3|MASK1|MASK0;
++ sc_access[0].value = 0x0B;
++ sc_access[1].reg_addr = 0x105;
++ sc_access[1].value = 0x80;
++ sc_access[1].mask = MASK7|MASK6;
++ sc_access[2].reg_addr = 0x10a;
++ sc_access[2].value = 0x40;
++ sc_access[2].mask = MASK6;
++ sc_access[3].reg_addr = 0x109;
++ sc_access[3].mask = MASK6;
++ sc_access[3].value = 0x40;
++ num_val = 4;
++ break;
++ default:
++ return -EINVAL;
++ }
++ return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, num_val);
++}
++
++static int nc_get_mute(int dev_id, u8 *value)
++{
++ int retval = 0, mask = 0;
++ struct sc_reg_access sc_access = {0,};
++
++ if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
++ retval = nc_init_card();
++ if (retval)
++ return retval;
++
++ pr_debug("sst: get mute::%d\n", dev_id);
++
++ switch (dev_id) {
++ case PMIC_SND_AMIC_MUTE:
++ pr_debug("sst: PMIC_SND_INPUT_MUTE_MIC1\n");
++ sc_access.reg_addr = LILSEL;
++ mask = MASK6;
++ break;
++ case PMIC_SND_HP_MIC_MUTE:
++ pr_debug("sst: PMIC_SND_INPUT_MUTE_MIC2\n");
++ sc_access.reg_addr = LIRSEL;
++ mask = MASK6;
++ break;
++ case PMIC_SND_LEFT_HP_MUTE:
++ case PMIC_SND_RIGHT_HP_MUTE:
++ mask = MASK2;
++ pr_debug("sst: PMIC_SN_LEFT/RIGHT_HP_MUTE\n");
++ if (dev_id == PMIC_SND_RIGHT_HP_MUTE)
++ sc_access.reg_addr = RMUTE;
++ else
++ sc_access.reg_addr = LMUTE;
++ break;
++
++ case PMIC_SND_LEFT_SPEAKER_MUTE:
++ pr_debug("sst: PMIC_MONO_EARPIECE_MUTE\n");
++ sc_access.reg_addr = RMUTE;
++ mask = MASK1;
++ break;
++ case PMIC_SND_DMIC_MUTE:
++ pr_debug("sst: PMIC_SND_INPUT_MUTE_DMIC\n");
++ sc_access.reg_addr = 0x105;
++ mask = MASK6;
++ break;
++ default:
++ return -EINVAL;
++
++ }
++ retval = sst_sc_reg_access(&sc_access, PMIC_READ, 1);
++ pr_debug("sst: reg value = %d\n", sc_access.value);
++ if (retval)
++ return retval;
++ *value = (sc_access.value) & mask;
++ pr_debug("sst: masked value = %d\n", *value);
++ if (*value)
++ *value = 0;
++ else
++ *value = 1;
++ pr_debug("sst: value returned = 0x%x\n", *value);
++ return retval;
++}
++
++static int nc_get_vol(int dev_id, int *value)
++{
++ int retval = 0, mask = 0;
++ struct sc_reg_access sc_access = {0,};
++
++ if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
++ retval = nc_init_card();
++ if (retval)
++ return retval;
++
++ switch (dev_id) {
++ case PMIC_SND_CAPTURE_VOL:
++ pr_debug("sst: PMIC_SND_INPUT_CAPTURE_VOL\n");
++ sc_access.reg_addr = LILSEL;
++ mask = (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5);
++ break;
++
++ case PMIC_SND_RIGHT_PB_VOL:
++ pr_debug("sst: GET_VOLUME_PMIC_LEFT_HP_VOL\n");
++ sc_access.reg_addr = AUDIOLVOL;
++ mask = (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5|MASK6);
++ break;
++
++ case PMIC_SND_LEFT_PB_VOL:
++ pr_debug("sst: GET_VOLUME_PMIC_RIGHT_HP_VOL\n");
++ sc_access.reg_addr = AUDIORVOL;
++ mask = (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5|MASK6);
++ break;
++
++ default:
++ return -EINVAL;
++
++ }
++ retval = sst_sc_reg_access(&sc_access, PMIC_READ, 1);
++ pr_debug("sst: value read = 0x%x\n", sc_access.value);
++ *value = -((sc_access.value) & mask);
++ pr_debug("sst: get vol value returned = %d\n", *value);
++ return retval;
++}
++
++struct snd_pmic_ops snd_pmic_ops_nc = {
++ .set_input_dev = nc_set_selected_input_dev,
++ .set_output_dev = nc_set_selected_output_dev,
++ .set_mute = nc_set_mute,
++ .get_mute = nc_get_mute,
++ .set_vol = nc_set_vol,
++ .get_vol = nc_get_vol,
++ .init_card = nc_init_card,
++ .set_pcm_audio_params = nc_set_pcm_audio_params,
++ .set_pcm_voice_params = nc_set_pcm_voice_params,
++ .set_voice_port = nc_set_voice_port,
++ .set_audio_port = nc_set_audio_port,
++ .power_up_pmic_pb = nc_power_up_pb,
++ .power_up_pmic_cp = nc_power_up_cp,
++ .power_down_pmic_pb = nc_power_down_pb,
++ .power_down_pmic_cp = nc_power_down_cp,
++ .power_down_pmic = nc_power_down,
++};
diff --git a/recipes/linux/linux-2.6.35/nokia900/linux-2.6.35-ac-pending.patch b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.35-ac-pending.patch
new file mode 100644
index 0000000000..b7a28d1dec
--- /dev/null
+++ b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.35-ac-pending.patch
@@ -0,0 +1,26 @@
+--- linux-2.6.34/drivers/staging/mrst/drv/psb_intel_display2.c~ 2010-07-26 11:39:36.000000000 -0400
++++ linux-2.6.34/drivers/staging/mrst/drv/psb_intel_display2.c 2010-07-26 12:17:17.904000652 -0400
+@@ -798,7 +798,7 @@
+
+ static const struct mrst_limit_t *mdfld_limit(struct drm_crtc *crtc)
+ {
+- const struct mrst_limit_t *limit;
++ const struct mrst_limit_t *limit = NULL;
+ struct drm_device *dev = crtc->dev;
+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+
+diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
+index a918553..4759062 100644
+--- a/arch/x86/pci/mmconfig-shared.c
++++ b/arch/x86/pci/mmconfig-shared.c
+@@ -480,6 +480,10 @@ static void __init pci_mmcfg_reject_broken(int early)
+ {
+ struct pci_mmcfg_region *cfg;
+
++#ifdef CONFIG_X86_MRST
++ return;
++#endif
++
+ list_for_each_entry(cfg, &pci_mmcfg_list, list) {
+ int valid = 0;
+
diff --git a/recipes/linux/linux-2.6.35/nokia900/linux-2.6.35-ac-revert-mmc-hacks.patch b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.35-ac-revert-mmc-hacks.patch
new file mode 100644
index 0000000000..af565e8d8d
--- /dev/null
+++ b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.35-ac-revert-mmc-hacks.patch
@@ -0,0 +1,403 @@
+reverted:
+
+commit 9502a9dbda0d5ba7c9785c8d2a70fef6e2a205d4
+Author: Yunpeng Gao <yunpeng.gao@intel.com>
+Date: Fri Aug 6 12:34:21 2010 +0100
+
+ From c17ae9aa3afa9fcb357337e3abd941700884f0b6 Mon Sep 17 00:00:00 2001
+ Subject: [PATCH] Implement Dekker algorithm as two new callbacks in mmc_host_ops structure
+
+ In Medfield, the SCU cpu doesn't have any local storage. And the IA cpu can
+ not access eMMC0 boot partition due to hardware protection. So the SCU cpu has to access
+ eMMC0 device (pci device id 0x0823) boot partition for security requirments
+ during IA cpu running.
+
+ To avoid the concurrent eMMC0 host accessing, the Dekker algorithm has been
+ introduced. Both SCU side and IA side should implemented it respectively. And
+ this patch implemented it on IA side.
+
+ The Dekker algorithm needs 3 shared SRAM variables. These address of these
+ variables were get by IPC call.
+
+ Signed-off-by: Yunpeng Gao <yunpeng.gao@intel.com>
+
+--- b/drivers/mmc/host/sdhci-pci.c
++++ a/drivers/mmc/host/sdhci-pci.c
+@@ -22,7 +22,6 @@
+
+ #include <asm/scatterlist.h>
+ #include <asm/io.h>
+-#include <asm/intel_scu_ipc.h>
+
+ #include "sdhci.h"
+
+@@ -393,57 +392,6 @@
+ .probe = single_slot
+ };
+
+-/*
+- * Get the base address in shared SRAM for eMMC mutex
+- * (Dekker's algorithm) through IPC call.
+- *
+- * Please note it'll always return 0 whether the address requesting
+- * success or not. So, the mmc driver will still work well if the scu
+- * firmware is not ready yet.
+-*/
+-static int mfld_sdio3_probe_slot(struct sdhci_pci_slot *slot)
+-{
+- u32 mutex_base_addr = 0;
+- int ret = -EIO; /* Assume IPC call fails */
+-
+- /*
+- * Currently, the SCU firmware and interface in IPC driver is
+- * not ready yet. So just disable it by always set 'ret = -EIO' here.
+- * Will submit a patch to enable it once the SCU firmware and
+- * IPC driver interface is ready.
+- */
+- /* ret = intel_scu_ipc_get_emmc_mutex_addr(&mutex_base_addr); */
+- if (ret) {
+- dev_err(&slot->chip->pdev->dev, "IPC error: %d\n", ret);
+- slot->host->sram_addr = 0;
+- } else {
+- /* 3 housekeeping mutex variables, 12 bytes length */
+- slot->host->sram_addr = ioremap_nocache(mutex_base_addr, 16);
+- if (!slot->host->sram_addr) {
+- dev_err(&slot->chip->pdev->dev, "ioremap failed!\n");
+- } else {
+- dev_info(&slot->chip->pdev->dev, "mapped addr: %p\n",
+- slot->host->sram_addr);
+- dev_info(&slot->chip->pdev->dev, "current eMMC owner:"
+- " %d, IA req: %d, SCU req: %d\n",
+- readl(slot->host->sram_addr + DEKKER_EMMC_OWNER_OFFSET),
+- readl(slot->host->sram_addr + DEKKER_IA_REQ_OFFSET),
+- readl(slot->host->sram_addr + DEKKER_SCU_REQ_OFFSET));
+- }
+- }
+-
+- return 0;
+-}
+-
+-static void mfld_sdio3_remove_slot(struct sdhci_pci_slot *slot, int dead)
+-{
+- if (dead)
+- return;
+-
+- if (slot->host->sram_addr)
+- iounmap(slot->host->sram_addr);
+-}
+-
+ static const struct sdhci_pci_fixes sdhci_intel_mfd_sd = {
+ .quirks = SDHCI_QUIRK_MFD_SD_RESTRICTION |
+ SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+@@ -454,14 +402,6 @@
+ SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ };
+
+-static const struct sdhci_pci_fixes sdhci_intel_mfd_sdio3 = {
+- .quirks = SDHCI_QUIRK_NEED_DEKKER_MUTEX |
+- SDHCI_QUIRK_MFD_EMMC_SDIO_RESTRICTION |
+- SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+- .probe_slot = mfld_sdio3_probe_slot,
+- .remove_slot = mfld_sdio3_remove_slot,
+-};
+-
+ static const struct pci_device_id pci_ids[] __devinitdata = {
+ {
+ .vendor = PCI_VENDOR_ID_RICOH,
+@@ -588,7 +528,7 @@
+ .device = PCI_DEVICE_ID_INTEL_MFD_EMMC0,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
++ .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc_sdio,
+- .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_sdio3,
+ },
+
+ {
+@@ -754,7 +694,9 @@
+ {
+ struct sdhci_pci_slot *slot;
+ struct sdhci_host *host;
++
+ resource_size_t addr;
++
+ int ret;
+
+ if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
+reverted:
+--- b/drivers/mmc/host/sdhci.c
++++ a/drivers/mmc/host/sdhci.c
+@@ -1299,147 +1299,11 @@
+ spin_unlock_irqrestore(&host->lock, flags);
+ }
+
+-/*
+- * One of the Medfield eMMC controller (PCI device id 0x0823, SDIO3) is
+- * a shared resource used by the SCU and the IA processors. SCU primarily
+- * uses the eMMC host controller to access the eMMC device's Boot Partition,
+- * while the IA CPU uses the eMMC host controller to access the eMMC device's
+- * User Partition.
+- *
+- * After the SCU hands off the system to the IA processor, the IA processor assumes
+- * ownership to the eMMC host controller. Due to absence of any arbitration at the
+- * eMMC host controller, this could result in concurrent eMMC host accesses resulting in
+- * bus contention and garbage data ending up in either of the partitions.
+- *
+- * To circumvent this from happening, eMMC host controller locking mechanism
+- * is employed, where at any one given time, only one agent, SCU or IA, may be
+- * allowed to access the host. This is achieved by implementing Dekker's Algorithm
+- * (http://en.wikipedia.org/wiki/Dekker's_algorithm) between the two processors.
+- *
+- * Before handing off the system to the IA processor, SCU must set up three
+- * housekeeping mutex variables allocated in the shared SRAM as follows:
+- *
+- * eMMC_Owner = IA (SCU and IA processors - RW, 32bit)
+- * IA_Req = FALSE (IA -RW, SCU - RO, 32bit)
+- * SCU_Req = FALSE (IA - RO, SCU - R/W, 32bit)
+- *
+- * There is no hardware based access control to these variables and so code executing
+- * on SCU and IA processors must follow below access rules (Dekker's algorithm):
+- *
+- * -----------------------------------------
+- * SCU Processor Implementation
+- * -----------------------------------------
+- * SCU_Req = TRUE;
+- * while (IA_Req == TRUE) {
+- * if (eMMC_Owner != SCU){
+- * SCU_Req = FALSE;
+- * while (eMMC_Owner != SCU);
+- * SCU_Req = TRUE;
+- * }
+- * }
+- * // SCU now performs eMMC transactions here
+- * ...
+- * // When done, relinquish control to IA
+- * eMMC_Owner = IA;
+- * SCU_Req = FALSE;
+- *
+- * -----------------------------------------
+- * IA Processor Implementation
+- * -----------------------------------------
+- * IA_Req = TRUE;
+- * while (SCU_Req == TRUE) {
+- * if (eMMC_Owner != IA){
+- * IA_Req = FALSE;
+- * while (eMMC_Owner != IA);
+- * IA_Req = TRUE;
+- * }
+- * }
+- * //IA now performs eMMC transactions here
+- * ¡­
+- * //When done, relinquish control to SCU
+- * eMMC_Owner = SCU;
+- * IA_Req = FALSE;
+- *
+- * ----------------------------------------
+-*/
+-
+-/* Implement the Dekker's algorithm on the IA process side */
+-static int sdhci_acquire_ownership(struct mmc_host *mmc)
+-{
+- struct sdhci_host *host;
+- unsigned long t1, t2;
+-
+- host = mmc_priv(mmc);
+-
+- if (!((host->quirks & SDHCI_QUIRK_NEED_DEKKER_MUTEX) && (host->sram_addr)))
+- return 0;
+-
+- DBG("Acquire ownership - eMMC owner: %d, IA req: %d, SCU req: %d\n",
+- readl(host->sram_addr + DEKKER_EMMC_OWNER_OFFSET),
+- readl(host->sram_addr + DEKKER_IA_REQ_OFFSET),
+- readl(host->sram_addr + DEKKER_SCU_REQ_OFFSET));
+-
+- writel(1, host->sram_addr + DEKKER_IA_REQ_OFFSET);
+-
+- t1 = jiffies + 10 * HZ;
+- t2 = 500;
+-
+- while (readl(host->sram_addr + DEKKER_SCU_REQ_OFFSET)) {
+- if (readl(host->sram_addr + DEKKER_EMMC_OWNER_OFFSET) != DEKKER_OWNER_IA) {
+- writel(0, host->sram_addr + DEKKER_IA_REQ_OFFSET);
+- while (t2) {
+- if (readl(host->sram_addr + DEKKER_EMMC_OWNER_OFFSET) == DEKKER_OWNER_IA)
+- break;
+- msleep(10);
+- t2--;
+- }
+- if (t2) {
+- writel(1, host->sram_addr + DEKKER_IA_REQ_OFFSET);
+- } else {
+- pr_err("eMMC mutex timeout (owner)!\n");
+- goto timeout;
+- }
+- }
+- if (time_after(jiffies, t1)) {
+- pr_err("eMMC mutex timeout (req)!\n");
+- goto timeout;
+- }
+- cpu_relax();
+- }
+-
+- return 0;
+-
+-timeout:
+- writel(DEKKER_OWNER_SCU, host->sram_addr + DEKKER_EMMC_OWNER_OFFSET);
+- writel(0, host->sram_addr + DEKKER_IA_REQ_OFFSET);
+- return -EBUSY;
+-}
+-
+-static void sdhci_release_ownership(struct mmc_host *mmc)
+-{
+- struct sdhci_host *host;
+-
+- host = mmc_priv(mmc);
+-
+- if (!((host->quirks & SDHCI_QUIRK_NEED_DEKKER_MUTEX) && (host->sram_addr)))
+- return;
+-
+- writel(DEKKER_OWNER_SCU, host->sram_addr + DEKKER_EMMC_OWNER_OFFSET);
+- writel(0, host->sram_addr + DEKKER_IA_REQ_OFFSET);
+-
+- DBG("Exit ownership - eMMC owner: %d, IA req: %d, SCU req: %d\n",
+- readl(host->sram_addr + DEKKER_EMMC_OWNER_OFFSET),
+- readl(host->sram_addr + DEKKER_IA_REQ_OFFSET),
+- readl(host->sram_addr + DEKKER_SCU_REQ_OFFSET));
+-}
+-
+ static const struct mmc_host_ops sdhci_ops = {
+ .request = sdhci_request,
+ .set_ios = sdhci_set_ios,
+ .get_ro = sdhci_get_ro,
+ .enable_sdio_irq = sdhci_enable_sdio_irq,
+- .acquire_ownership = sdhci_acquire_ownership,
+- .release_ownership = sdhci_release_ownership,
+ };
+
+ /*****************************************************************************\
+@@ -2025,7 +1889,6 @@
+ * Set host parameters.
+ */
+ mmc->ops = &sdhci_ops;
+-
+ if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK &&
+ host->ops->set_clock && host->ops->get_min_clock)
+ mmc->f_min = host->ops->get_min_clock(host);
+reverted:
+--- b/drivers/mmc/host/sdhci.h
++++ a/drivers/mmc/host/sdhci.h
+@@ -250,21 +250,10 @@
+ /* Controller of Medfield specific restriction */
+ #define SDHCI_QUIRK_MFD_SD_RESTRICTION 0x40000000ULL
+ #define SDHCI_QUIRK_MFD_EMMC_SDIO_RESTRICTION 0x80000000ULL
+-/* One controller port will be accessed by driver and fw at the same time */
+-#define SDHCI_QUIRK_NEED_DEKKER_MUTEX 0x100000000ULL
+
+ int irq; /* Device IRQ */
+ void __iomem * ioaddr; /* Mapped address */
+
+- /* XXX: SCU/X86 mutex variables base address in shared SRAM */
+- void __iomem * sram_addr; /* Shared SRAM address */
+-
+-#define DEKKER_EMMC_OWNER_OFFSET 0
+-#define DEKKER_IA_REQ_OFFSET 0x04
+-#define DEKKER_SCU_REQ_OFFSET 0x08
+-#define DEKKER_OWNER_IA 0
+-#define DEKKER_OWNER_SCU 1
+-
+ const struct sdhci_ops *ops; /* Low level hw interface */
+
+ /* Internal data */
+@@ -316,6 +305,7 @@
+ unsigned long private[0] ____cacheline_aligned;
+ };
+
++
+ struct sdhci_ops {
+ #ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
+ u32 (*read_l)(struct sdhci_host *host, int reg);
+reverted:
+--- b/include/linux/mmc/host.h
++++ a/include/linux/mmc/host.h
+@@ -111,10 +111,6 @@
+
+ /* optional callback for HC quirks */
+ void (*init_card)(struct mmc_host *host, struct mmc_card *card);
+-
+- /* optional callback for HC mutex (Dekker algorithm) */
+- int (*acquire_ownership)(struct mmc_host *host);
+- void (*release_ownership)(struct mmc_host *host);
+ };
+
+ struct mmc_card;
+reverted:
+--- b/drivers/mmc/card/queue.c
++++ a/drivers/mmc/card/queue.c
+@@ -42,29 +42,6 @@
+ return BLKPREP_OK;
+ }
+
+-static void acquire_ownership(struct mmc_queue *mq)
+-{
+- struct mmc_card *card = mq->card;
+- struct mmc_host *host = card->host;
+-
+- mmc_claim_host(host);
+- if (host->ops->acquire_ownership)
+- host->ops->acquire_ownership(host);
+- mmc_release_host(host);
+-}
+-
+-static void release_ownership(struct mmc_queue *mq)
+-{
+- struct mmc_card *card = mq->card;
+- struct mmc_host *host = card->host;
+-
+- mmc_claim_host(host);
+- if (host->ops->release_ownership)
+- host->ops->release_ownership(host);
+- mmc_release_host(host);
+-}
+-
+-
+ static int mmc_queue_thread(void *d)
+ {
+ struct mmc_queue *mq = d;
+@@ -89,14 +66,12 @@
+ break;
+ }
+ up(&mq->thread_sem);
+- release_ownership(mq);
+ schedule();
+ down(&mq->thread_sem);
+ continue;
+ }
++ set_current_state(TASK_RUNNING);
+
+- set_current_state(TASK_RUNNING);
+- acquire_ownership(mq);
+ mq->issue_fn(mq, req);
+ } while (1);
+ up(&mq->thread_sem);
+reverted:
+--- b/drivers/mmc/core/core.c
++++ a/drivers/mmc/core/core.c
+@@ -1072,9 +1072,6 @@
+ u32 ocr;
+ int err;
+
+- if (host->ops->acquire_ownership)
+- host->ops->acquire_ownership(host);
+-
+ mmc_bus_get(host);
+
+ /* if there is a card registered, check whether it is still present */
+@@ -1147,9 +1144,6 @@
+ out:
+ if (host->caps & MMC_CAP_NEEDS_POLL)
+ mmc_schedule_delayed_work(&host->detect, HZ);
+-
+- if (host->ops->release_ownership)
+- host->ops->release_ownership(host);
+ }
+
+ void mmc_start_host(struct mmc_host *host)
diff --git a/recipes/linux/linux-2.6.35/nokia900/linux-2.6.35-dont-skew-the-tick.patch b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.35-dont-skew-the-tick.patch
new file mode 100644
index 0000000000..e329ea1c1f
--- /dev/null
+++ b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.35-dont-skew-the-tick.patch
@@ -0,0 +1,33 @@
+Subject: [patch] Remove the per cpu tick skew
+
+Historically, Linux has tried to make the regular timer tick on the various
+CPUs not happen at the same time, to avoid contention on xtime_lock.
+
+Nowadays, with the tickless kernel, this contention no longer happens
+since time keeping and updating are done differently. In addition,
+this skew is actually hurting power consumption in a measurable
+way on many-core systems.
+
+Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
+
+--- linux.trees.git/kernel/time/tick-sched.c~ 2010-07-16 09:40:50.000000000 -0400
++++ linux.trees.git/kernel/time/tick-sched.c 2010-07-26 11:18:51.138003329 -0400
+@@ -780,7 +780,6 @@
+ {
+ struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
+ ktime_t now = ktime_get();
+- u64 offset;
+
+ /*
+ * Emulate tick processing via per-CPU hrtimers:
+@@ -790,10 +789,6 @@
+
+ /* Get the next period (per cpu) */
+ hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update());
+- offset = ktime_to_ns(tick_period) >> 1;
+- do_div(offset, num_possible_cpus());
+- offset *= smp_processor_id();
+- hrtimer_add_expires_ns(&ts->sched_timer, offset);
+
+ for (;;) {
+ hrtimer_forward(&ts->sched_timer, now, tick_period);
diff --git a/recipes/linux/linux-2.6.35/nokia900/linux-2.6.35-make-gma600-work-on-IA.patch b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.35-make-gma600-work-on-IA.patch
new file mode 100644
index 0000000000..c01e1f68c3
--- /dev/null
+++ b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.35-make-gma600-work-on-IA.patch
@@ -0,0 +1,148 @@
+From: Arjan van de Ven <arjan@linux.intel.com>
+Subject: Make the GMA600 driver work on IA platform kernels
+
+The GMA600 driver does not currently compile on IA kernels;
+this patch adds a few strategic ifdefs to make it at least compile
+so that people can test with it.
+
+Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
+
+diff --git a/drivers/staging/mrst/drv/psb_intel_dsi.c b/drivers/staging/mrst/drv/psb_intel_dsi.c
+index 30a3770..0e10a23 100644
+--- a/drivers/staging/mrst/drv/psb_intel_dsi.c
++++ b/drivers/staging/mrst/drv/psb_intel_dsi.c
+@@ -2236,10 +2236,12 @@ void mrst_dsi_init(struct drm_device *dev,
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+
++#ifdef CONFIG_X86_MRST
+ if (mrst_platform_id() == MRST_PLATFORM_AAVA_SC) {
+ aava_koski_dsi_init(dev, mode_dev);
+ return;
+ }
++#endif
+
+ psb_intel_output = kzalloc(sizeof(struct psb_intel_output), GFP_KERNEL);
+ if (!psb_intel_output)
+diff --git a/drivers/staging/mrst/drv/psb_intel_dsi2.c b/drivers/staging/mrst/drv/psb_intel_dsi2.c
+index cca74d0..3de2699 100644
+--- a/drivers/staging/mrst/drv/psb_intel_dsi2.c
++++ b/drivers/staging/mrst/drv/psb_intel_dsi2.c
+@@ -3521,13 +3521,13 @@ void mid_dsi_init(struct drm_device *dev,
+ /* GPIO control to reset MIP */
+ gpio_request(128, "gfx");
+ gpio_direction_output(128, 1);
+- __gpio_get_value(128);
++ gpio_get_value(128);
+ mode_dev->panel_fixed_mode = panel_fixed_mode;
+ } else {
+ /* GPIO control to reset MIP */
+ gpio_request(34, "gfx");
+ gpio_direction_output(34, 1);
+- __gpio_get_value(128);
++ gpio_get_value(128);
+ mode_dev->panel_fixed_mode2 = panel_fixed_mode;
+ }
+
+diff --git a/drivers/staging/mrst/drv/psb_intel_dsi_aava.c b/drivers/staging/mrst/drv/psb_intel_dsi_aava.c
+index 6c27305..3ea8637 100644
+--- a/drivers/staging/mrst/drv/psb_intel_dsi_aava.c
++++ b/drivers/staging/mrst/drv/psb_intel_dsi_aava.c
+@@ -152,6 +152,7 @@ static int dsi_wait_hs_ctrl_fifo(struct drm_device *dev)
+
+ static void dsi_set_backlight_state(int state)
+ {
++#ifdef CONFIG_X86_MRST
+ u8 addr[2], value[2];
+
+ addr[0] = 0x2a;
+@@ -171,6 +172,7 @@ static void dsi_set_backlight_state(int state)
+
+ intel_scu_ipc_iowrite8(addr[0], value[0]);
+ intel_scu_ipc_iowrite8(addr[1], value[1]);
++#endif
+ }
+
+
+@@ -225,10 +227,12 @@ static void dsi_set_panel_reset_state(int state)
+
+ value &= 0xbf;
+ #endif /*AAVA_EV_0_5*/
++#ifdef CONFIG_X86_MRST
+ if(intel_scu_ipc_iowrite8(addr, value)) {
+ printk("panel_reset_on: failed to write pmic reg 0xf4!\n");
+ return;
+ }
++#endif
+ #endif /*LINUX_VERSION_CODE*/
+ /* Minimum active time to trigger reset is 10us */
+ udelay(10);
+@@ -271,9 +275,11 @@ static void dsi_set_panel_reset_state(int state)
+
+ value |= 0x40;
+ #endif
++#ifdef CONFIG_X86_MRST
+ if (intel_scu_ipc_iowrite8(addr, value)) {
+ printk("panel_reset_off: failed to write pmic reg 0xe6!\n");
+ }
++#endif
+ #endif
+ /* Maximum startup time from reset is 120ms */
+ msleep(120);
+diff --git a/drivers/staging/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/mrstlfb_displayclass.c b/drivers/staging/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/mrstlfb_displayclass.c
+index 8b457c4..2feef07 100644
+--- a/drivers/staging/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/mrstlfb_displayclass.c
++++ b/drivers/staging/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/mrstlfb_displayclass.c
+@@ -1990,7 +1990,7 @@ PVRSRV_ERROR MRSTLFBPrePowerState(IMG_HANDLE hDevHandle,
+ MRSTLFB_DEVINFO* psDevInfo = (MRSTLFB_DEVINFO *)hDevHandle;
+ struct drm_device* dev = psDevInfo->psDrmDevice;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+- int pp_stat, ret;
++ int pp_stat;
+
+ if ((eNewPowerState == eCurrentPowerState) ||
+ (eNewPowerState == PVRSRV_DEV_POWER_STATE_ON))
+@@ -2021,6 +2021,8 @@ PVRSRV_ERROR MRSTLFBPrePowerState(IMG_HANDLE hDevHandle,
+ /*turn off PLLs*/
+ PSB_WVDC32(0, MRST_DPLL_A);
+ } else {
++#ifdef CONFIG_X86_MRST
++ int ret;
+ if (dev_priv->dsi_prePowerState == NULL) {
+ PSB_WVDC32(DPI_SHUT_DOWN, DPI_CONTROL_REG);
+ PSB_WVDC32(0x0, PIPEACONF);
+@@ -2040,6 +2042,7 @@ PVRSRV_ERROR MRSTLFBPrePowerState(IMG_HANDLE hDevHandle,
+ #endif
+ if (ret)
+ printk(KERN_WARNING "IPC 0xE9 failed to turn off pnl pwr. Error is: %x\n", ret);
++#endif
+ }
+
+ return PVRSRV_OK;
+@@ -2054,7 +2057,6 @@ PVRSRV_ERROR MRSTLFBPostPowerState(IMG_HANDLE hDevHandle,
+ struct drm_device* dev = psDevInfo->psDrmDevice;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_gtt *pg = dev_priv->pg;
+- int ret;
+
+ if ((eNewPowerState == eCurrentPowerState) ||
+ (eNewPowerState == PVRSRV_DEV_POWER_STATE_OFF))
+@@ -2072,6 +2074,8 @@ PVRSRV_ERROR MRSTLFBPostPowerState(IMG_HANDLE hDevHandle,
+ /*psb_gtt_init(dev_priv->pg, 1);*/
+
+ if (!dev_priv->iLVDS_enable) {
++#ifdef CONFIG_X86_MRST
++ int ret;
+ #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34))
+ /* turn on mipi panel power */
+ ret = lnw_ipc_single_cmd(IPC_MSG_PANEL_ON_OFF, IPC_CMD_PANEL_ON, 0, 0);
+@@ -2081,8 +2085,8 @@ PVRSRV_ERROR MRSTLFBPostPowerState(IMG_HANDLE hDevHandle,
+ if (ret)
+ printk(KERN_WARNING "IPC 0xE9 failed to turn on pnl pwr. Error is: %x\n", ret);
+ msleep(2000); /* wait 2 seconds */
++#endif
+ }
+-
+ restore_display_registers(dev);
+
+ if (!dev_priv->iLVDS_enable && dev_priv->dsi_postPowerState != NULL)
diff --git a/recipes/linux/linux-2.6.35/nokia900/linux-2.6.35-mrst-rtc.patch b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.35-mrst-rtc.patch
new file mode 100644
index 0000000000..5e7e301369
--- /dev/null
+++ b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.35-mrst-rtc.patch
@@ -0,0 +1,28 @@
+From: Arjan van de Ven <arjan@linux.intel.com>
+Subject: mrst: fix argument inverstion
+
+ipc_simple_command takes "subsystem, command" as arguments
+not "command, subsystem".
+
+Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
+
+--- linux-2.6.35/drivers/rtc/rtc-mrst.c~ 2010-08-24 11:58:41.000000000 -0400
++++ linux-2.6.35/drivers/rtc/rtc-mrst.c 2010-08-24 12:41:01.150003160 -0400
+@@ -123,7 +123,7 @@
+
+ spin_unlock_irqrestore(&rtc_lock, flags);
+
+- ret = intel_scu_ipc_simple_command(IPC_CMD_VRTC_SETTIME, IPCMSG_VRTC);
++ ret = intel_scu_ipc_simple_command(IPCMSG_VRTC, IPC_CMD_VRTC_SETTIME);
+ return ret;
+ }
+
+@@ -223,7 +223,7 @@
+
+ spin_unlock_irq(&rtc_lock);
+
+- ret = intel_scu_ipc_simple_command(IPC_CMD_VRTC_SETALARM, IPCMSG_VRTC);
++ ret = intel_scu_ipc_simple_command(IPCMSG_VRTC, IPC_CMD_VRTC_SETALARM);
+ if (ret)
+ return ret;
+
diff --git a/recipes/linux/linux-2.6.35/nokia900/linux-2.6.35-rc4-annotate-device-pm.patch b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.35-rc4-annotate-device-pm.patch
new file mode 100644
index 0000000000..b41edede59
--- /dev/null
+++ b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.35-rc4-annotate-device-pm.patch
@@ -0,0 +1,224 @@
+From: Arjan van de Ven <arjan@linux.intel.com>
+Subject: [PATCH v2] pm: Add runtime PM statistics
+
+In order for PowerTOP to be able to report how well the new runtime PM is
+working for the various drivers, the kernel needs to export some basic
+statistics in sysfs.
+
+This patch adds two sysfs files in the runtime PM domain that expose the
+total time a device has been active, and the time a device has been
+suspended.
+
+With this PowerTOP can compute the activity percentage
+
+Active %age = 100 * (delta active) / (delta active + delta suspended)
+
+and present the information to the user.
+
+I've written the PowerTOP code (slated for version 1.12) already, and the
+output looks like this:
+
+Runtime Device Power Management statistics
+Active Device name
+ 10.0% 06:00.0 Ethernet controller: Realtek Semiconductor Co., Ltd. RTL8101E/RTL8102E PCI Express Fast Ethernet controller
+
+
+[version 2: fix stat update bugs noticed by Alan Stern]
+
+Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
+
+
+diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
+index b0ec0e9..b78c401 100644
+--- a/drivers/base/power/runtime.c
++++ b/drivers/base/power/runtime.c
+@@ -123,6 +123,45 @@ int pm_runtime_idle(struct device *dev)
+ }
+ EXPORT_SYMBOL_GPL(pm_runtime_idle);
+
++
++/**
++ * update_pm_runtime_accounting - Update the time accounting of power states
++ * @dev: Device to update the accounting for
++ *
++ * In order to be able to have time accounting of the various power states
++ * (as used by programs such as PowerTOP to show the effectiveness of runtime
++ * PM), we need to track the time spent in each state.
++ * update_pm_runtime_accounting must be called each time before the
++ * runtime_status field is updated, to account the time in the old state
++ * correctly.
++ */
++void update_pm_runtime_accounting(struct device *dev)
++{
++ unsigned long now = jiffies;
++ int delta;
++
++ delta = now - dev->power.accounting_timestamp;
++
++ if (delta < 0)
++ delta = 0;
++
++ dev->power.accounting_timestamp = now;
++
++ if (dev->power.disable_depth > 0)
++ return;
++
++ if (dev->power.runtime_status == RPM_SUSPENDED)
++ dev->power.suspended_jiffies += delta;
++ else
++ dev->power.active_jiffies += delta;
++}
++
++static void __update_runtime_status(struct device *dev, enum rpm_status status)
++{
++ update_pm_runtime_accounting(dev);
++ dev->power.runtime_status = status;
++}
++
+ /**
+ * __pm_runtime_suspend - Carry out run-time suspend of given device.
+ * @dev: Device to suspend.
+@@ -197,7 +236,7 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq)
+ goto repeat;
+ }
+
+- dev->power.runtime_status = RPM_SUSPENDING;
++ __update_runtime_status(dev, RPM_SUSPENDING);
+ dev->power.deferred_resume = false;
+
+ if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) {
+@@ -228,7 +267,7 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq)
+ }
+
+ if (retval) {
+- dev->power.runtime_status = RPM_ACTIVE;
++ __update_runtime_status(dev, RPM_ACTIVE);
+ if (retval == -EAGAIN || retval == -EBUSY) {
+ if (dev->power.timer_expires == 0)
+ notify = true;
+@@ -237,7 +276,7 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq)
+ pm_runtime_cancel_pending(dev);
+ }
+ } else {
+- dev->power.runtime_status = RPM_SUSPENDED;
++ __update_runtime_status(dev, RPM_SUSPENDED);
+ pm_runtime_deactivate_timer(dev);
+
+ if (dev->parent) {
+@@ -381,7 +420,7 @@ int __pm_runtime_resume(struct device *dev, bool from_wq)
+ goto repeat;
+ }
+
+- dev->power.runtime_status = RPM_RESUMING;
++ __update_runtime_status(dev, RPM_RESUMING);
+
+ if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume) {
+ spin_unlock_irq(&dev->power.lock);
+@@ -411,10 +450,10 @@ int __pm_runtime_resume(struct device *dev, bool from_wq)
+ }
+
+ if (retval) {
+- dev->power.runtime_status = RPM_SUSPENDED;
++ __update_runtime_status(dev, RPM_SUSPENDED);
+ pm_runtime_cancel_pending(dev);
+ } else {
+- dev->power.runtime_status = RPM_ACTIVE;
++ __update_runtime_status(dev, RPM_ACTIVE);
+ if (parent)
+ atomic_inc(&parent->power.child_count);
+ }
+@@ -848,7 +887,7 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status)
+ }
+
+ out_set:
+- dev->power.runtime_status = status;
++ __update_runtime_status(dev, status);
+ dev->power.runtime_error = 0;
+ out:
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+@@ -1077,6 +1116,7 @@ void pm_runtime_init(struct device *dev)
+ dev->power.request_pending = false;
+ dev->power.request = RPM_REQ_NONE;
+ dev->power.deferred_resume = false;
++ dev->power.accounting_timestamp = jiffies;
+ INIT_WORK(&dev->power.work, pm_runtime_work);
+
+ dev->power.timer_expires = 0;
+diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
+index a4c33bc..f45c316 100644
+--- a/drivers/base/power/sysfs.c
++++ b/drivers/base/power/sysfs.c
+@@ -6,6 +6,7 @@
+ #include <linux/string.h>
+ #include <linux/pm_runtime.h>
+ #include <asm/atomic.h>
++#include <linux/jiffies.h>
+ #include "power.h"
+
+ /*
+@@ -190,9 +191,34 @@ static ssize_t rtpm_status_show(struct device *dev,
+ return -EIO;
+ }
+
++static ssize_t rtpm_active_time_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ int ret;
++ spin_lock_irq(&dev->power.lock);
++ update_pm_runtime_accounting(dev);
++ ret = sprintf(buf, "%i\n", jiffies_to_msecs(dev->power.active_jiffies));
++ spin_unlock_irq(&dev->power.lock);
++ return ret;
++}
++
++static ssize_t rtpm_suspended_time_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ int ret;
++ spin_lock_irq(&dev->power.lock);
++ update_pm_runtime_accounting(dev);
++ ret = sprintf(buf, "%i\n",
++ jiffies_to_msecs(dev->power.suspended_jiffies));
++ spin_unlock_irq(&dev->power.lock);
++ return ret;
++}
++
+ static DEVICE_ATTR(runtime_usage, 0444, rtpm_usagecount_show, NULL);
+ static DEVICE_ATTR(runtime_active_kids, 0444, rtpm_children_show, NULL);
+ static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
++static DEVICE_ATTR(runtime_active_time, 0444, rtpm_active_time_show, NULL);
++static DEVICE_ATTR(runtime_suspended_time, 0444, rtpm_suspended_time_show, NULL);
+ static DEVICE_ATTR(runtime_enabled, 0444, rtpm_enabled_show, NULL);
+
+ #endif
+@@ -234,6 +260,8 @@ static struct attribute * power_attrs[] = {
+ &dev_attr_async.attr,
+ #ifdef CONFIG_PM_RUNTIME
+ &dev_attr_runtime_usage.attr,
++ &dev_attr_runtime_suspended_time.attr,
++ &dev_attr_runtime_active_time.attr,
+ &dev_attr_runtime_active_kids.attr,
+ &dev_attr_runtime_status.attr,
+ &dev_attr_runtime_enabled.attr,
+diff --git a/include/linux/pm.h b/include/linux/pm.h
+index 8e258c7..dca597f 100644
+--- a/include/linux/pm.h
++++ b/include/linux/pm.h
+@@ -476,9 +476,15 @@ struct dev_pm_info {
+ enum rpm_request request;
+ enum rpm_status runtime_status;
+ int runtime_error;
++ unsigned long active_jiffies;
++ unsigned long suspended_jiffies;
++ unsigned long accounting_timestamp;
+ #endif
+ };
+
++extern void update_pm_runtime_accounting(struct device *dev);
++
++
+ /*
+ * The PM_EVENT_ messages are also used by drivers implementing the legacy
+ * suspend framework, based on the ->suspend() and ->resume() callbacks common
+
+ \ No newline at end of file
diff --git a/recipes/linux/linux-2.6.35/nokia900/linux-2.6.35-slab-timer.patch b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.35-slab-timer.patch
new file mode 100644
index 0000000000..6f9437e99c
--- /dev/null
+++ b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.35-slab-timer.patch
@@ -0,0 +1,38 @@
+From 1889a44e1ae886cc6bf1ddc903251d4c8cd2b909 Mon Sep 17 00:00:00 2001
+From: arjan <arjan@localhost.localdomain>
+Date: Wed, 6 Jan 2010 10:52:08 -0500
+Subject: [PATCH] slab: use deferred timers for its periodic housekeeping
+
+slab has a "once every 2 second" timer for its housekeeping.
+As the number of logical processors is growing, its more and more
+common that this 2 second timer becomes the primary wakeup source.
+
+This patch turns this housekeeping timer into a deferred timer,
+which means that the timer does not interrupt idle, but just runs
+at the next event that wakes the cpu up.
+
+The impact is that the timer likely runs a bit later, but during the
+delay no code is running so there's not all that much reason for
+a difference in housekeeping to occur because of this delay.
+
+Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
+---
+ mm/slab.c | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+diff --git a/mm/slab.c b/mm/slab.c
+index 7451bda..ef3cd3d 100644
+--- a/mm/slab.c
++++ b/mm/slab.c
+@@ -884,7 +884,7 @@ static void __cpuinit start_cpu_timer(int cpu)
+ */
+ if (keventd_up() && reap_work->work.func == NULL) {
+ init_reap_node(cpu);
+- INIT_DELAYED_WORK(reap_work, cache_reap);
++ INIT_DELAYED_WORK_DEFERRABLE(reap_work, cache_reap);
+ schedule_delayed_work_on(cpu, reap_work,
+ __round_jiffies_relative(HZ, cpu));
+ }
+--
+1.6.1.3
+
diff --git a/recipes/linux/linux-2.6.35/nokia900/linux-2.6.35-stable-cherry-picks.patch b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.35-stable-cherry-picks.patch
new file mode 100644
index 0000000000..3dc5427b45
--- /dev/null
+++ b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.35-stable-cherry-picks.patch
@@ -0,0 +1,4289 @@
+diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
+index 9dcb11e..bf62c44 100644
+--- a/arch/arm/include/asm/ptrace.h
++++ b/arch/arm/include/asm/ptrace.h
+@@ -158,15 +158,24 @@ struct pt_regs {
+ */
+ static inline int valid_user_regs(struct pt_regs *regs)
+ {
+- if (user_mode(regs) && (regs->ARM_cpsr & PSR_I_BIT) == 0) {
+- regs->ARM_cpsr &= ~(PSR_F_BIT | PSR_A_BIT);
+- return 1;
++ unsigned long mode = regs->ARM_cpsr & MODE_MASK;
++
++ /*
++ * Always clear the F (FIQ) and A (delayed abort) bits
++ */
++ regs->ARM_cpsr &= ~(PSR_F_BIT | PSR_A_BIT);
++
++ if ((regs->ARM_cpsr & PSR_I_BIT) == 0) {
++ if (mode == USR_MODE)
++ return 1;
++ if (elf_hwcap & HWCAP_26BIT && mode == USR26_MODE)
++ return 1;
+ }
+
+ /*
+ * Force CPSR to something logical...
+ */
+- regs->ARM_cpsr &= PSR_f | PSR_s | (PSR_x & ~PSR_A_BIT) | PSR_T_BIT | MODE32_BIT;
++ regs->ARM_cpsr &= PSR_f | PSR_s | PSR_x | PSR_T_BIT | MODE32_BIT;
+ if (!(elf_hwcap & HWCAP_26BIT))
+ regs->ARM_cpsr |= USR_MODE;
+
+diff --git a/arch/arm/mach-ixp4xx/ixdp425-setup.c b/arch/arm/mach-ixp4xx/ixdp425-setup.c
+index 827cbc4..ea9ee4e 100644
+--- a/arch/arm/mach-ixp4xx/ixdp425-setup.c
++++ b/arch/arm/mach-ixp4xx/ixdp425-setup.c
+@@ -100,6 +100,7 @@ ixdp425_flash_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+
+ static struct platform_nand_data ixdp425_flash_nand_data = {
+ .chip = {
++ .nr_chips = 1,
+ .chip_delay = 30,
+ .options = NAND_NO_AUTOINCR,
+ #ifdef CONFIG_MTD_PARTITIONS
+diff --git a/arch/arm/mach-mx3/mach-qong.c b/arch/arm/mach-mx3/mach-qong.c
+index e5b5b83..1f9363f 100644
+--- a/arch/arm/mach-mx3/mach-qong.c
++++ b/arch/arm/mach-mx3/mach-qong.c
+@@ -169,6 +169,7 @@ static void qong_nand_select_chip(struct mtd_info *mtd, int chip)
+
+ static struct platform_nand_data qong_nand_data = {
+ .chip = {
++ .nr_chips = 1,
+ .chip_delay = 20,
+ .options = 0,
+ },
+diff --git a/arch/arm/mach-orion5x/ts78xx-setup.c b/arch/arm/mach-orion5x/ts78xx-setup.c
+index 5041d1b..696b1a9 100644
+--- a/arch/arm/mach-orion5x/ts78xx-setup.c
++++ b/arch/arm/mach-orion5x/ts78xx-setup.c
+@@ -216,6 +216,7 @@ static struct mtd_partition ts78xx_ts_nand_parts[] = {
+
+ static struct platform_nand_data ts78xx_ts_nand_data = {
+ .chip = {
++ .nr_chips = 1,
+ .part_probe_types = ts_nand_part_probes,
+ .partitions = ts78xx_ts_nand_parts,
+ .nr_partitions = ARRAY_SIZE(ts78xx_ts_nand_parts),
+diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c
+index 9eaf5b0..68a27bc 100644
+--- a/arch/blackfin/mach-bf537/boards/stamp.c
++++ b/arch/blackfin/mach-bf537/boards/stamp.c
+@@ -400,6 +400,7 @@ static int bfin_plat_nand_dev_ready(struct mtd_info *mtd)
+
+ static struct platform_nand_data bfin_plat_nand_data = {
+ .chip = {
++ .nr_chips = 1,
+ .chip_delay = 30,
+ #ifdef CONFIG_MTD_PARTITIONS
+ .part_probe_types = part_probes,
+diff --git a/arch/blackfin/mach-bf561/boards/acvilon.c b/arch/blackfin/mach-bf561/boards/acvilon.c
+index bfcfa86..35b6d12 100644
+--- a/arch/blackfin/mach-bf561/boards/acvilon.c
++++ b/arch/blackfin/mach-bf561/boards/acvilon.c
+@@ -284,6 +284,7 @@ static int bfin_plat_nand_dev_ready(struct mtd_info *mtd)
+
+ static struct platform_nand_data bfin_plat_nand_data = {
+ .chip = {
++ .nr_chips = 1,
+ .chip_delay = 30,
+ #ifdef CONFIG_MTD_PARTITIONS
+ .part_probe_types = part_probes,
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index dcb0593..f942bb7 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -247,6 +247,11 @@ config ARCH_HWEIGHT_CFLAGS
+
+ config KTIME_SCALAR
+ def_bool X86_32
++
++config ARCH_CPU_PROBE_RELEASE
++ def_bool y
++ depends on HOTPLUG_CPU
++
+ source "init/Kconfig"
+ source "kernel/Kconfig.freezer"
+
+diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h
+index c1cf59d..20955ea 100644
+--- a/arch/x86/include/asm/cmpxchg_32.h
++++ b/arch/x86/include/asm/cmpxchg_32.h
+@@ -53,60 +53,33 @@ struct __xchg_dummy {
+ __xchg((v), (ptr), sizeof(*ptr))
+
+ /*
+- * The semantics of XCHGCMP8B are a bit strange, this is why
+- * there is a loop and the loading of %%eax and %%edx has to
+- * be inside. This inlines well in most cases, the cached
+- * cost is around ~38 cycles. (in the future we might want
+- * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
+- * might have an implicit FPU-save as a cost, so it's not
+- * clear which path to go.)
++ * CMPXCHG8B only writes to the target if we had the previous
++ * value in registers, otherwise it acts as a read and gives us the
++ * "new previous" value. That is why there is a loop. Preloading
++ * EDX:EAX is a performance optimization: in the common case it means
++ * we need only one locked operation.
+ *
+- * cmpxchg8b must be used with the lock prefix here to allow
+- * the instruction to be executed atomically, see page 3-102
+- * of the instruction set reference 24319102.pdf. We need
+- * the reader side to see the coherent 64bit value.
++ * A SIMD/3DNOW!/MMX/FPU 64-bit store here would require at the very
++ * least an FPU save and/or %cr0.ts manipulation.
++ *
++ * cmpxchg8b must be used with the lock prefix here to allow the
++ * instruction to be executed atomically. We need to have the reader
++ * side to see the coherent 64bit value.
+ */
+-static inline void __set_64bit(unsigned long long *ptr,
+- unsigned int low, unsigned int high)
++static inline void set_64bit(volatile u64 *ptr, u64 value)
+ {
++ u32 low = value;
++ u32 high = value >> 32;
++ u64 prev = *ptr;
++
+ asm volatile("\n1:\t"
+- "movl (%1), %%eax\n\t"
+- "movl 4(%1), %%edx\n\t"
+- LOCK_PREFIX "cmpxchg8b (%1)\n\t"
++ LOCK_PREFIX "cmpxchg8b %0\n\t"
+ "jnz 1b"
+- : "=m" (*ptr)
+- : "D" (ptr),
+- "b" (low),
+- "c" (high)
+- : "ax", "dx", "memory");
+-}
+-
+-static inline void __set_64bit_constant(unsigned long long *ptr,
+- unsigned long long value)
+-{
+- __set_64bit(ptr, (unsigned int)value, (unsigned int)(value >> 32));
+-}
+-
+-#define ll_low(x) *(((unsigned int *)&(x)) + 0)
+-#define ll_high(x) *(((unsigned int *)&(x)) + 1)
+-
+-static inline void __set_64bit_var(unsigned long long *ptr,
+- unsigned long long value)
+-{
+- __set_64bit(ptr, ll_low(value), ll_high(value));
++ : "=m" (*ptr), "+A" (prev)
++ : "b" (low), "c" (high)
++ : "memory");
+ }
+
+-#define set_64bit(ptr, value) \
+- (__builtin_constant_p((value)) \
+- ? __set_64bit_constant((ptr), (value)) \
+- : __set_64bit_var((ptr), (value)))
+-
+-#define _set_64bit(ptr, value) \
+- (__builtin_constant_p(value) \
+- ? __set_64bit(ptr, (unsigned int)(value), \
+- (unsigned int)((value) >> 32)) \
+- : __set_64bit(ptr, ll_low((value)), ll_high((value))))
+-
+ extern void __cmpxchg_wrong_size(void);
+
+ /*
+diff --git a/arch/x86/include/asm/cmpxchg_64.h b/arch/x86/include/asm/cmpxchg_64.h
+index b92f147..9596e7c 100644
+--- a/arch/x86/include/asm/cmpxchg_64.h
++++ b/arch/x86/include/asm/cmpxchg_64.h
+@@ -5,13 +5,11 @@
+
+ #define __xg(x) ((volatile long *)(x))
+
+-static inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
++static inline void set_64bit(volatile u64 *ptr, u64 val)
+ {
+ *ptr = val;
+ }
+
+-#define _set_64bit set_64bit
+-
+ extern void __xchg_wrong_size(void);
+ extern void __cmpxchg_wrong_size(void);
+
+diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
+index a96489e..c07e513 100644
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -1606,7 +1606,7 @@ void __init init_apic_mappings(void)
+ * acpi lapic path already maps that address in
+ * acpi_register_lapic_address()
+ */
+- if (!acpi_lapic)
++ if (!acpi_lapic && !smp_found_config)
+ set_fixmap_nocache(FIX_APIC_BASE, apic_phys);
+
+ apic_printk(APIC_VERBOSE, "mapped APIC to %08lx (%08lx)\n",
+diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
+index e41ed24..2b18af1 100644
+--- a/arch/x86/kernel/apic/io_apic.c
++++ b/arch/x86/kernel/apic/io_apic.c
+@@ -1728,6 +1728,8 @@ __apicdebuginit(void) print_IO_APIC(void)
+ struct irq_pin_list *entry;
+
+ cfg = desc->chip_data;
++ if (!cfg)
++ continue;
+ entry = cfg->irq_2_pin;
+ if (!entry)
+ continue;
+diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
+index 214ac86..d8d86d0 100644
+--- a/arch/x86/kernel/cpu/perf_event_intel.c
++++ b/arch/x86/kernel/cpu/perf_event_intel.c
+@@ -491,33 +491,78 @@ static void intel_pmu_enable_all(int added)
+ * Intel Errata AAP53 (model 30)
+ * Intel Errata BD53 (model 44)
+ *
+- * These chips need to be 'reset' when adding counters by programming
+- * the magic three (non counting) events 0x4300D2, 0x4300B1 and 0x4300B5
+- * either in sequence on the same PMC or on different PMCs.
++ * The official story:
++ * These chips need to be 'reset' when adding counters by programming the
++ * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
++ * in sequence on the same PMC or on different PMCs.
++ *
++ * In practise it appears some of these events do in fact count, and
++ * we need to programm all 4 events.
+ */
+-static void intel_pmu_nhm_enable_all(int added)
++static void intel_pmu_nhm_workaround(void)
+ {
+- if (added) {
+- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+- int i;
++ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
++ static const unsigned long nhm_magic[4] = {
++ 0x4300B5,
++ 0x4300D2,
++ 0x4300B1,
++ 0x4300B1
++ };
++ struct perf_event *event;
++ int i;
++
++ /*
++ * The Errata requires below steps:
++ * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
++ * 2) Configure 4 PERFEVTSELx with the magic events and clear
++ * the corresponding PMCx;
++ * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
++ * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
++ * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
++ */
++
++ /*
++ * The real steps we choose are a little different from above.
++ * A) To reduce MSR operations, we don't run step 1) as they
++ * are already cleared before this function is called;
++ * B) Call x86_perf_event_update to save PMCx before configuring
++ * PERFEVTSELx with magic number;
++ * C) With step 5), we do clear only when the PERFEVTSELx is
++ * not used currently.
++ * D) Call x86_perf_event_set_period to restore PMCx;
++ */
+
+- wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 0, 0x4300D2);
+- wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 1, 0x4300B1);
+- wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 2, 0x4300B5);
++ /* We always operate 4 pairs of PERF Counters */
++ for (i = 0; i < 4; i++) {
++ event = cpuc->events[i];
++ if (event)
++ x86_perf_event_update(event);
++ }
+
+- wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x3);
+- wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
++ for (i = 0; i < 4; i++) {
++ wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
++ wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
++ }
+
+- for (i = 0; i < 3; i++) {
+- struct perf_event *event = cpuc->events[i];
++ wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
++ wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
+
+- if (!event)
+- continue;
++ for (i = 0; i < 4; i++) {
++ event = cpuc->events[i];
+
++ if (event) {
++ x86_perf_event_set_period(event);
+ __x86_pmu_enable_event(&event->hw,
+- ARCH_PERFMON_EVENTSEL_ENABLE);
+- }
++ ARCH_PERFMON_EVENTSEL_ENABLE);
++ } else
++ wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
+ }
++}
++
++static void intel_pmu_nhm_enable_all(int added)
++{
++ if (added)
++ intel_pmu_nhm_workaround();
+ intel_pmu_enable_all(added);
+ }
+
+diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
+index ae85d69..0ffe19e 100644
+--- a/arch/x86/kernel/cpu/perf_event_p4.c
++++ b/arch/x86/kernel/cpu/perf_event_p4.c
+@@ -581,6 +581,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
+ cpuc = &__get_cpu_var(cpu_hw_events);
+
+ for (idx = 0; idx < x86_pmu.num_counters; idx++) {
++ int overflow;
+
+ if (!test_bit(idx, cpuc->active_mask))
+ continue;
+@@ -591,12 +592,14 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
+ WARN_ON_ONCE(hwc->idx != idx);
+
+ /* it might be unflagged overflow */
+- handled = p4_pmu_clear_cccr_ovf(hwc);
++ overflow = p4_pmu_clear_cccr_ovf(hwc);
+
+ val = x86_perf_event_update(event);
+- if (!handled && (val & (1ULL << (x86_pmu.cntval_bits - 1))))
++ if (!overflow && (val & (1ULL << (x86_pmu.cntval_bits - 1))))
+ continue;
+
++ handled += overflow;
++
+ /* event overflow for sure */
+ data.period = event->hw.last_period;
+
+@@ -612,7 +615,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
+ inc_irq_stat(apic_perf_irqs);
+ }
+
+- return handled;
++ return handled > 0;
+ }
+
+ /*
+diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
+index d86dbf7..d7b6f7f 100644
+--- a/arch/x86/kernel/mpparse.c
++++ b/arch/x86/kernel/mpparse.c
+@@ -274,6 +274,18 @@ static void __init smp_dump_mptable(struct mpc_table *mpc, unsigned char *mpt)
+
+ void __init default_smp_read_mpc_oem(struct mpc_table *mpc) { }
+
++static void __init smp_register_lapic_address(unsigned long address)
++{
++ mp_lapic_addr = address;
++
++ set_fixmap_nocache(FIX_APIC_BASE, address);
++ if (boot_cpu_physical_apicid == -1U) {
++ boot_cpu_physical_apicid = read_apic_id();
++ apic_version[boot_cpu_physical_apicid] =
++ GET_APIC_VERSION(apic_read(APIC_LVR));
++ }
++}
++
+ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
+ {
+ char str[16];
+@@ -295,6 +307,10 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
+ if (early)
+ return 1;
+
++ /* Initialize the lapic mapping */
++ if (!acpi_lapic)
++ smp_register_lapic_address(mpc->lapic);
++
+ if (mpc->oemptr)
+ x86_init.mpparse.smp_read_mpc_oem(mpc);
+
+diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
+index 11015fd..0bf2ece 100644
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -91,6 +91,25 @@ DEFINE_PER_CPU(int, cpu_state) = { 0 };
+ static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
+ #define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x))
+ #define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p))
++
++/*
++ * We need this for trampoline_base protection from concurrent accesses when
++ * off- and onlining cores wildly.
++ */
++static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
++
++void cpu_hotplug_driver_lock()
++{
++ mutex_lock(&x86_cpu_hotplug_driver_mutex);
++}
++
++void cpu_hotplug_driver_unlock()
++{
++ mutex_unlock(&x86_cpu_hotplug_driver_mutex);
++}
++
++ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
++ssize_t arch_cpu_release(const char *buf, size_t count) { return -1; }
+ #else
+ static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
+ #define get_idle_for_cpu(x) (idle_thread_array[(x)])
+diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
+index 4a5979a..78ee8e0 100644
+--- a/arch/x86/lib/atomic64_386_32.S
++++ b/arch/x86/lib/atomic64_386_32.S
+@@ -25,150 +25,170 @@
+ CFI_ADJUST_CFA_OFFSET -4
+ .endm
+
+-.macro BEGIN func reg
+-$v = \reg
+-
+-ENTRY(atomic64_\func\()_386)
+- CFI_STARTPROC
+- LOCK $v
+-
+-.macro RETURN
+- UNLOCK $v
++#define BEGIN(op) \
++.macro END; \
++ CFI_ENDPROC; \
++ENDPROC(atomic64_##op##_386); \
++.purgem END; \
++.endm; \
++ENTRY(atomic64_##op##_386); \
++ CFI_STARTPROC; \
++ LOCK v;
++
++#define RET \
++ UNLOCK v; \
+ ret
+-.endm
+-
+-.macro END_
+- CFI_ENDPROC
+-ENDPROC(atomic64_\func\()_386)
+-.purgem RETURN
+-.purgem END_
+-.purgem END
+-.endm
+-
+-.macro END
+-RETURN
+-END_
+-.endm
+-.endm
+-
+-BEGIN read %ecx
+- movl ($v), %eax
+- movl 4($v), %edx
+-END
+-
+-BEGIN set %esi
+- movl %ebx, ($v)
+- movl %ecx, 4($v)
+-END
+-
+-BEGIN xchg %esi
+- movl ($v), %eax
+- movl 4($v), %edx
+- movl %ebx, ($v)
+- movl %ecx, 4($v)
+-END
+-
+-BEGIN add %ecx
+- addl %eax, ($v)
+- adcl %edx, 4($v)
+-END
+
+-BEGIN add_return %ecx
+- addl ($v), %eax
+- adcl 4($v), %edx
+- movl %eax, ($v)
+- movl %edx, 4($v)
+-END
+-
+-BEGIN sub %ecx
+- subl %eax, ($v)
+- sbbl %edx, 4($v)
+-END
+-
+-BEGIN sub_return %ecx
++#define RET_END \
++ RET; \
++ END
++
++#define v %ecx
++BEGIN(read)
++ movl (v), %eax
++ movl 4(v), %edx
++RET_END
++#undef v
++
++#define v %esi
++BEGIN(set)
++ movl %ebx, (v)
++ movl %ecx, 4(v)
++RET_END
++#undef v
++
++#define v %esi
++BEGIN(xchg)
++ movl (v), %eax
++ movl 4(v), %edx
++ movl %ebx, (v)
++ movl %ecx, 4(v)
++RET_END
++#undef v
++
++#define v %ecx
++BEGIN(add)
++ addl %eax, (v)
++ adcl %edx, 4(v)
++RET_END
++#undef v
++
++#define v %ecx
++BEGIN(add_return)
++ addl (v), %eax
++ adcl 4(v), %edx
++ movl %eax, (v)
++ movl %edx, 4(v)
++RET_END
++#undef v
++
++#define v %ecx
++BEGIN(sub)
++ subl %eax, (v)
++ sbbl %edx, 4(v)
++RET_END
++#undef v
++
++#define v %ecx
++BEGIN(sub_return)
+ negl %edx
+ negl %eax
+ sbbl $0, %edx
+- addl ($v), %eax
+- adcl 4($v), %edx
+- movl %eax, ($v)
+- movl %edx, 4($v)
+-END
+-
+-BEGIN inc %esi
+- addl $1, ($v)
+- adcl $0, 4($v)
+-END
+-
+-BEGIN inc_return %esi
+- movl ($v), %eax
+- movl 4($v), %edx
++ addl (v), %eax
++ adcl 4(v), %edx
++ movl %eax, (v)
++ movl %edx, 4(v)
++RET_END
++#undef v
++
++#define v %esi
++BEGIN(inc)
++ addl $1, (v)
++ adcl $0, 4(v)
++RET_END
++#undef v
++
++#define v %esi
++BEGIN(inc_return)
++ movl (v), %eax
++ movl 4(v), %edx
+ addl $1, %eax
+ adcl $0, %edx
+- movl %eax, ($v)
+- movl %edx, 4($v)
+-END
+-
+-BEGIN dec %esi
+- subl $1, ($v)
+- sbbl $0, 4($v)
+-END
+-
+-BEGIN dec_return %esi
+- movl ($v), %eax
+- movl 4($v), %edx
++ movl %eax, (v)
++ movl %edx, 4(v)
++RET_END
++#undef v
++
++#define v %esi
++BEGIN(dec)
++ subl $1, (v)
++ sbbl $0, 4(v)
++RET_END
++#undef v
++
++#define v %esi
++BEGIN(dec_return)
++ movl (v), %eax
++ movl 4(v), %edx
+ subl $1, %eax
+ sbbl $0, %edx
+- movl %eax, ($v)
+- movl %edx, 4($v)
+-END
++ movl %eax, (v)
++ movl %edx, 4(v)
++RET_END
++#undef v
+
+-BEGIN add_unless %ecx
++#define v %ecx
++BEGIN(add_unless)
+ addl %eax, %esi
+ adcl %edx, %edi
+- addl ($v), %eax
+- adcl 4($v), %edx
++ addl (v), %eax
++ adcl 4(v), %edx
+ cmpl %eax, %esi
+ je 3f
+ 1:
+- movl %eax, ($v)
+- movl %edx, 4($v)
++ movl %eax, (v)
++ movl %edx, 4(v)
+ movl $1, %eax
+ 2:
+-RETURN
++ RET
+ 3:
+ cmpl %edx, %edi
+ jne 1b
+ xorl %eax, %eax
+ jmp 2b
+-END_
++END
++#undef v
+
+-BEGIN inc_not_zero %esi
+- movl ($v), %eax
+- movl 4($v), %edx
++#define v %esi
++BEGIN(inc_not_zero)
++ movl (v), %eax
++ movl 4(v), %edx
+ testl %eax, %eax
+ je 3f
+ 1:
+ addl $1, %eax
+ adcl $0, %edx
+- movl %eax, ($v)
+- movl %edx, 4($v)
++ movl %eax, (v)
++ movl %edx, 4(v)
+ movl $1, %eax
+ 2:
+-RETURN
++ RET
+ 3:
+ testl %edx, %edx
+ jne 1b
+ jmp 2b
+-END_
++END
++#undef v
+
+-BEGIN dec_if_positive %esi
+- movl ($v), %eax
+- movl 4($v), %edx
++#define v %esi
++BEGIN(dec_if_positive)
++ movl (v), %eax
++ movl 4(v), %edx
+ subl $1, %eax
+ sbbl $0, %edx
+ js 1f
+- movl %eax, ($v)
+- movl %edx, 4($v)
++ movl %eax, (v)
++ movl %edx, 4(v)
+ 1:
+-END
++RET_END
++#undef v
+diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
+index b28d2f1..f6b48f6 100644
+--- a/arch/x86/oprofile/nmi_int.c
++++ b/arch/x86/oprofile/nmi_int.c
+@@ -634,6 +634,18 @@ static int __init ppro_init(char **cpu_type)
+ if (force_arch_perfmon && cpu_has_arch_perfmon)
+ return 0;
+
++ /*
++ * Documentation on identifying Intel processors by CPU family
++ * and model can be found in the Intel Software Developer's
++ * Manuals (SDM):
++ *
++ * http://www.intel.com/products/processor/manuals/
++ *
++ * As of May 2010 the documentation for this was in the:
++ * "Intel 64 and IA-32 Architectures Software Developer's
++ * Manual Volume 3B: System Programming Guide", "Table B-1
++ * CPUID Signature Values of DisplayFamily_DisplayModel".
++ */
+ switch (cpu_model) {
+ case 0 ... 2:
+ *cpu_type = "i386/ppro";
+@@ -655,12 +667,13 @@ static int __init ppro_init(char **cpu_type)
+ case 15: case 23:
+ *cpu_type = "i386/core_2";
+ break;
++ case 0x1a:
++ case 0x1e:
+ case 0x2e:
+- case 26:
+ spec = &op_arch_perfmon_spec;
+ *cpu_type = "i386/core_i7";
+ break;
+- case 28:
++ case 0x1c:
+ *cpu_type = "i386/atom";
+ break;
+ default:
+diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
+index 864dd46..18645f4 100644
+--- a/drivers/acpi/apei/erst.c
++++ b/drivers/acpi/apei/erst.c
+@@ -33,6 +33,7 @@
+ #include <linux/uaccess.h>
+ #include <linux/cper.h>
+ #include <linux/nmi.h>
++#include <linux/hardirq.h>
+ #include <acpi/apei.h>
+
+ #include "apei-internal.h"
+diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
+index a754715..d84af6c 100644
+--- a/drivers/char/agp/intel-gtt.c
++++ b/drivers/char/agp/intel-gtt.c
+@@ -25,6 +25,10 @@
+ #define USE_PCI_DMA_API 1
+ #endif
+
++/* Max amount of stolen space, anything above will be returned to Linux */
++int intel_max_stolen = 32 * 1024 * 1024;
++EXPORT_SYMBOL(intel_max_stolen);
++
+ static const struct aper_size_info_fixed intel_i810_sizes[] =
+ {
+ {64, 16384, 4},
+@@ -710,7 +714,12 @@ static void intel_i830_init_gtt_entries(void)
+ break;
+ }
+ }
+- if (gtt_entries > 0) {
++ if (!local && gtt_entries > intel_max_stolen) {
++ dev_info(&agp_bridge->dev->dev,
++ "detected %dK stolen memory, trimming to %dK\n",
++ gtt_entries / KB(1), intel_max_stolen / KB(1));
++ gtt_entries = intel_max_stolen / KB(4);
++ } else if (gtt_entries > 0) {
+ dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n",
+ gtt_entries / KB(1), local ? "local" : "stolen");
+ gtt_entries /= KB(4);
+diff --git a/drivers/char/mem.c b/drivers/char/mem.c
+index f54dab8..a398ecd 100644
+--- a/drivers/char/mem.c
++++ b/drivers/char/mem.c
+@@ -916,7 +916,7 @@ static int __init chr_dev_init(void)
+ NULL, devlist[minor].name);
+ }
+
+- return 0;
++ return tty_init();
+ }
+
+ fs_initcall(chr_dev_init);
+diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
+index d71f0fc..507441a 100644
+--- a/drivers/char/tty_io.c
++++ b/drivers/char/tty_io.c
+@@ -3128,7 +3128,7 @@ static struct cdev tty_cdev, console_cdev;
+ * Ok, now we can initialize the rest of the tty devices and can count
+ * on memory allocations, interrupts etc..
+ */
+-static int __init tty_init(void)
++int __init tty_init(void)
+ {
+ cdev_init(&tty_cdev, &tty_fops);
+ if (cdev_add(&tty_cdev, MKDEV(TTYAUX_MAJOR, 0), 1) ||
+@@ -3149,4 +3149,4 @@ static int __init tty_init(void)
+ #endif
+ return 0;
+ }
+-module_init(tty_init);
++
+diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
+index 4a66201..c9736ed 100644
+--- a/drivers/gpu/drm/drm_drv.c
++++ b/drivers/gpu/drm/drm_drv.c
+@@ -502,7 +502,9 @@ long drm_ioctl(struct file *filp,
+ retcode = -EFAULT;
+ goto err_i1;
+ }
+- }
++ } else
++ memset(kdata, 0, _IOC_SIZE(cmd));
++
+ if (ioctl->flags & DRM_UNLOCKED)
+ retcode = func(dev, kdata, file_priv);
+ else {
+diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
+index 2305a12..013a0ae 100644
+--- a/drivers/gpu/drm/i915/i915_dma.c
++++ b/drivers/gpu/drm/i915/i915_dma.c
+@@ -40,6 +40,8 @@
+ #include <linux/vga_switcheroo.h>
+ #include <linux/slab.h>
+
++extern int intel_max_stolen; /* from AGP driver */
++
+ /**
+ * Sets up the hardware status page for devices that need a physical address
+ * in the register.
+@@ -2104,6 +2106,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
+ if (ret)
+ goto out_iomapfree;
+
++ if (prealloc_size > intel_max_stolen) {
++ DRM_INFO("detected %dM stolen memory, trimming to %dM\n",
++ prealloc_size >> 20, intel_max_stolen >> 20);
++ prealloc_size = intel_max_stolen;
++ }
++
+ dev_priv->wq = create_singlethread_workqueue("i915");
+ if (dev_priv->wq == NULL) {
+ DRM_ERROR("Failed to create our workqueue.\n");
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 8a84306..e9a4b12 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -1502,6 +1502,7 @@ static void ironlake_enable_pll_edp (struct drm_crtc *crtc)
+ dpa_ctl = I915_READ(DP_A);
+ dpa_ctl |= DP_PLL_ENABLE;
+ I915_WRITE(DP_A, dpa_ctl);
++ POSTING_READ(DP_A);
+ udelay(200);
+ }
+
+@@ -4816,14 +4817,16 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
+ work->pending_flip_obj = obj;
+
+ if (intel_crtc->plane)
+- flip_mask = I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
++ flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
+ else
+- flip_mask = I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT;
++ flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
+
+- /* Wait for any previous flip to finish */
+- if (IS_GEN3(dev))
+- while (I915_READ(ISR) & flip_mask)
+- ;
++ if (IS_GEN3(dev) || IS_GEN2(dev)) {
++ BEGIN_LP_RING(2);
++ OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
++ OUT_RING(0);
++ ADVANCE_LP_RING();
++ }
+
+ /* Offset into the new buffer for cases of shared fbs between CRTCs */
+ offset = obj_priv->gtt_offset;
+@@ -4837,12 +4840,18 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
+ OUT_RING(offset | obj_priv->tiling_mode);
+ pipesrc = I915_READ(pipesrc_reg);
+ OUT_RING(pipesrc & 0x0fff0fff);
+- } else {
++ } else if (IS_GEN3(dev)) {
+ OUT_RING(MI_DISPLAY_FLIP_I915 |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ OUT_RING(fb->pitch);
+ OUT_RING(offset);
+ OUT_RING(MI_NOOP);
++ } else {
++ OUT_RING(MI_DISPLAY_FLIP |
++ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
++ OUT_RING(fb->pitch);
++ OUT_RING(offset);
++ OUT_RING(MI_NOOP);
+ }
+ ADVANCE_LP_RING();
+
+diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
+index 10673ae..6bfef51 100644
+--- a/drivers/gpu/drm/radeon/radeon_atombios.c
++++ b/drivers/gpu/drm/radeon/radeon_atombios.c
+@@ -206,6 +206,7 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
+ uint16_t *line_mux,
+ struct radeon_hpd *hpd)
+ {
++ struct radeon_device *rdev = dev->dev_private;
+
+ /* Asus M2A-VM HDMI board lists the DVI port as HDMI */
+ if ((dev->pdev->device == 0x791e) &&
+@@ -308,13 +309,22 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
+ }
+ }
+
+- /* Acer laptop reports DVI-D as DVI-I */
++ /* Acer laptop reports DVI-D as DVI-I and hpd pins reversed */
+ if ((dev->pdev->device == 0x95c4) &&
+ (dev->pdev->subsystem_vendor == 0x1025) &&
+ (dev->pdev->subsystem_device == 0x013c)) {
++ struct radeon_gpio_rec gpio;
++
+ if ((*connector_type == DRM_MODE_CONNECTOR_DVII) &&
+- (supported_device == ATOM_DEVICE_DFP1_SUPPORT))
++ (supported_device == ATOM_DEVICE_DFP1_SUPPORT)) {
++ gpio = radeon_lookup_gpio(rdev, 6);
++ *hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio);
+ *connector_type = DRM_MODE_CONNECTOR_DVID;
++ } else if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
++ (supported_device == ATOM_DEVICE_DFP1_SUPPORT)) {
++ gpio = radeon_lookup_gpio(rdev, 7);
++ *hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio);
++ }
+ }
+
+ /* XFX Pine Group device rv730 reports no VGA DDC lines
+@@ -1049,7 +1059,7 @@ bool radeon_atombios_sideport_present(struct radeon_device *rdev)
+ }
+ break;
+ case 2:
+- if (igp_info->info_2.ucMemoryType & 0x0f)
++ if (igp_info->info_2.ulBootUpSidePortClock)
+ return true;
+ break;
+ default:
+diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
+index dd279da..a718463 100644
+--- a/drivers/gpu/drm/radeon/radeon_device.c
++++ b/drivers/gpu/drm/radeon/radeon_device.c
+@@ -199,7 +199,7 @@ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64
+ mc->mc_vram_size = mc->aper_size;
+ }
+ mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
+- if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_end <= mc->gtt_end) {
++ if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
+ dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
+ mc->real_vram_size = mc->aper_size;
+ mc->mc_vram_size = mc->aper_size;
+diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
+index 5def6f5..0cd2704 100644
+--- a/drivers/gpu/drm/radeon/radeon_i2c.c
++++ b/drivers/gpu/drm/radeon/radeon_i2c.c
+@@ -95,6 +95,13 @@ static void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state)
+ }
+ }
+
++ /* switch the pads to ddc mode */
++ if (ASIC_IS_DCE3(rdev) && rec->hw_capable) {
++ temp = RREG32(rec->mask_clk_reg);
++ temp &= ~(1 << 16);
++ WREG32(rec->mask_clk_reg, temp);
++ }
++
+ /* clear the output pin values */
+ temp = RREG32(rec->a_clk_reg) & ~rec->a_clk_mask;
+ WREG32(rec->a_clk_reg, temp);
+diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
+index 059bfa4..a108c7e 100644
+--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
++++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
+@@ -121,11 +121,12 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
+ * chips. Disable MSI on them for now.
+ */
+ if ((rdev->family >= CHIP_RV380) &&
+- (!(rdev->flags & RADEON_IS_IGP))) {
++ (!(rdev->flags & RADEON_IS_IGP)) &&
++ (!(rdev->flags & RADEON_IS_AGP))) {
+ int ret = pci_enable_msi(rdev->pdev);
+ if (!ret) {
+ rdev->msi_enabled = 1;
+- DRM_INFO("radeon: using MSI.\n");
++ dev_info(rdev->dev, "radeon: using MSI.\n");
+ }
+ }
+ rdev->irq.installed = true;
+diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
+index ab389f8..b20379e 100644
+--- a/drivers/gpu/drm/radeon/radeon_kms.c
++++ b/drivers/gpu/drm/radeon/radeon_kms.c
+@@ -106,7 +106,9 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+
+ info = data;
+ value_ptr = (uint32_t *)((unsigned long)info->value);
+- value = *value_ptr;
++ if (DRM_COPY_FROM_USER(&value, value_ptr, sizeof(value)))
++ return -EFAULT;
++
+ switch (info->request) {
+ case RADEON_INFO_DEVICE_ID:
+ value = dev->pci_device;
+diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+index e1e5255..cf3a51f 100644
+--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
++++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+@@ -272,7 +272,7 @@ static uint8_t radeon_compute_pll_gain(uint16_t ref_freq, uint16_t ref_div,
+ if (!ref_div)
+ return 1;
+
+- vcoFreq = ((unsigned)ref_freq & fb_div) / ref_div;
++ vcoFreq = ((unsigned)ref_freq * fb_div) / ref_div;
+
+ /*
+ * This is horribly crude: the VCO frequency range is divided into
+diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
+index 3fa6984..c91b741 100644
+--- a/drivers/gpu/drm/radeon/radeon_pm.c
++++ b/drivers/gpu/drm/radeon/radeon_pm.c
+@@ -224,6 +224,11 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
+ {
+ int i;
+
++ /* no need to take locks, etc. if nothing's going to change */
++ if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
++ (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
++ return;
++
+ mutex_lock(&rdev->ddev->struct_mutex);
+ mutex_lock(&rdev->vram_mutex);
+ mutex_lock(&rdev->cp.mutex);
+diff --git a/drivers/hwmon/pc87360.c b/drivers/hwmon/pc87360.c
+index 4a64b85..68e69a4 100644
+--- a/drivers/hwmon/pc87360.c
++++ b/drivers/hwmon/pc87360.c
+@@ -1610,11 +1610,8 @@ static struct pc87360_data *pc87360_update_device(struct device *dev)
+
+ static int __init pc87360_device_add(unsigned short address)
+ {
+- struct resource res = {
+- .name = "pc87360",
+- .flags = IORESOURCE_IO,
+- };
+- int err, i;
++ struct resource res[3];
++ int err, i, res_count;
+
+ pdev = platform_device_alloc("pc87360", address);
+ if (!pdev) {
+@@ -1623,22 +1620,28 @@ static int __init pc87360_device_add(unsigned short address)
+ goto exit;
+ }
+
++ memset(res, 0, 3 * sizeof(struct resource));
++ res_count = 0;
+ for (i = 0; i < 3; i++) {
+ if (!extra_isa[i])
+ continue;
+- res.start = extra_isa[i];
+- res.end = extra_isa[i] + PC87360_EXTENT - 1;
++ res[res_count].start = extra_isa[i];
++ res[res_count].end = extra_isa[i] + PC87360_EXTENT - 1;
++ res[res_count].name = "pc87360",
++ res[res_count].flags = IORESOURCE_IO,
+
+- err = acpi_check_resource_conflict(&res);
++ err = acpi_check_resource_conflict(&res[res_count]);
+ if (err)
+ goto exit_device_put;
+
+- err = platform_device_add_resources(pdev, &res, 1);
+- if (err) {
+- printk(KERN_ERR "pc87360: Device resource[%d] "
+- "addition failed (%d)\n", i, err);
+- goto exit_device_put;
+- }
++ res_count++;
++ }
++
++ err = platform_device_add_resources(pdev, res, res_count);
++ if (err) {
++ printk(KERN_ERR "pc87360: Device resources addition failed "
++ "(%d)\n", err);
++ goto exit_device_put;
+ }
+
+ err = platform_device_add(pdev);
+diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
+index 2b7907b..0bdb201 100644
+--- a/drivers/md/dm-exception-store.c
++++ b/drivers/md/dm-exception-store.c
+@@ -173,7 +173,9 @@ int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
+
+ /* Validate the chunk size against the device block size */
+ if (chunk_size %
+- (bdev_logical_block_size(dm_snap_cow(store->snap)->bdev) >> 9)) {
++ (bdev_logical_block_size(dm_snap_cow(store->snap)->bdev) >> 9) ||
++ chunk_size %
++ (bdev_logical_block_size(dm_snap_origin(store->snap)->bdev) >> 9)) {
+ *error = "Chunk size is not a multiple of device blocksize";
+ return -EINVAL;
+ }
+diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h
+index e8dfa06..0b25362 100644
+--- a/drivers/md/dm-exception-store.h
++++ b/drivers/md/dm-exception-store.h
+@@ -126,8 +126,9 @@ struct dm_exception_store {
+ };
+
+ /*
+- * Obtain the cow device used by a given snapshot.
++ * Obtain the origin or cow device used by a given snapshot.
+ */
++struct dm_dev *dm_snap_origin(struct dm_snapshot *snap);
+ struct dm_dev *dm_snap_cow(struct dm_snapshot *snap);
+
+ /*
+diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
+index d7500e1..bb6bdc8 100644
+--- a/drivers/md/dm-ioctl.c
++++ b/drivers/md/dm-ioctl.c
+@@ -249,40 +249,50 @@ static void __hash_remove(struct hash_cell *hc)
+
+ static void dm_hash_remove_all(int keep_open_devices)
+ {
+- int i, dev_skipped, dev_removed;
++ int i, dev_skipped;
+ struct hash_cell *hc;
+- struct list_head *tmp, *n;
++ struct mapped_device *md;
++
++retry:
++ dev_skipped = 0;
+
+ down_write(&_hash_lock);
+
+-retry:
+- dev_skipped = dev_removed = 0;
+ for (i = 0; i < NUM_BUCKETS; i++) {
+- list_for_each_safe (tmp, n, _name_buckets + i) {
+- hc = list_entry(tmp, struct hash_cell, name_list);
++ list_for_each_entry(hc, _name_buckets + i, name_list) {
++ md = hc->md;
++ dm_get(md);
+
+- if (keep_open_devices &&
+- dm_lock_for_deletion(hc->md)) {
++ if (keep_open_devices && dm_lock_for_deletion(md)) {
++ dm_put(md);
+ dev_skipped++;
+ continue;
+ }
++
+ __hash_remove(hc);
+- dev_removed = 1;
+- }
+- }
+
+- /*
+- * Some mapped devices may be using other mapped devices, so if any
+- * still exist, repeat until we make no further progress.
+- */
+- if (dev_skipped) {
+- if (dev_removed)
+- goto retry;
++ up_write(&_hash_lock);
+
+- DMWARN("remove_all left %d open device(s)", dev_skipped);
++ dm_put(md);
++ if (likely(keep_open_devices))
++ dm_destroy(md);
++ else
++ dm_destroy_immediate(md);
++
++ /*
++ * Some mapped devices may be using other mapped
++ * devices, so repeat until we make no further
++ * progress. If a new mapped device is created
++ * here it will also get removed.
++ */
++ goto retry;
++ }
+ }
+
+ up_write(&_hash_lock);
++
++ if (dev_skipped)
++ DMWARN("remove_all left %d open device(s)", dev_skipped);
+ }
+
+ static int dm_hash_rename(uint32_t cookie, uint32_t *flags, const char *old,
+@@ -640,6 +650,7 @@ static int dev_create(struct dm_ioctl *param, size_t param_size)
+ r = dm_hash_insert(param->name, *param->uuid ? param->uuid : NULL, md);
+ if (r) {
+ dm_put(md);
++ dm_destroy(md);
+ return r;
+ }
+
+@@ -742,6 +753,7 @@ static int dev_remove(struct dm_ioctl *param, size_t param_size)
+ param->flags |= DM_UEVENT_GENERATED_FLAG;
+
+ dm_put(md);
++ dm_destroy(md);
+ return 0;
+ }
+
+diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
+index 5485377..a1f2ab5 100644
+--- a/drivers/md/dm-snap.c
++++ b/drivers/md/dm-snap.c
+@@ -148,6 +148,12 @@ struct dm_snapshot {
+ #define RUNNING_MERGE 0
+ #define SHUTDOWN_MERGE 1
+
++struct dm_dev *dm_snap_origin(struct dm_snapshot *s)
++{
++ return s->origin;
++}
++EXPORT_SYMBOL(dm_snap_origin);
++
+ struct dm_dev *dm_snap_cow(struct dm_snapshot *s)
+ {
+ return s->cow;
+@@ -1065,10 +1071,6 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ origin_mode = FMODE_WRITE;
+ }
+
+- origin_path = argv[0];
+- argv++;
+- argc--;
+-
+ s = kmalloc(sizeof(*s), GFP_KERNEL);
+ if (!s) {
+ ti->error = "Cannot allocate snapshot context private "
+@@ -1077,6 +1079,16 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ goto bad;
+ }
+
++ origin_path = argv[0];
++ argv++;
++ argc--;
++
++ r = dm_get_device(ti, origin_path, origin_mode, &s->origin);
++ if (r) {
++ ti->error = "Cannot get origin device";
++ goto bad_origin;
++ }
++
+ cow_path = argv[0];
+ argv++;
+ argc--;
+@@ -1097,12 +1109,6 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ argv += args_used;
+ argc -= args_used;
+
+- r = dm_get_device(ti, origin_path, origin_mode, &s->origin);
+- if (r) {
+- ti->error = "Cannot get origin device";
+- goto bad_origin;
+- }
+-
+ s->ti = ti;
+ s->valid = 1;
+ s->active = 0;
+@@ -1212,15 +1218,15 @@ bad_kcopyd:
+ dm_exception_table_exit(&s->complete, exception_cache);
+
+ bad_hash_tables:
+- dm_put_device(ti, s->origin);
+-
+-bad_origin:
+ dm_exception_store_destroy(s->store);
+
+ bad_store:
+ dm_put_device(ti, s->cow);
+
+ bad_cow:
++ dm_put_device(ti, s->origin);
++
++bad_origin:
+ kfree(s);
+
+ bad:
+@@ -1314,12 +1320,12 @@ static void snapshot_dtr(struct dm_target *ti)
+
+ mempool_destroy(s->pending_pool);
+
+- dm_put_device(ti, s->origin);
+-
+ dm_exception_store_destroy(s->store);
+
+ dm_put_device(ti, s->cow);
+
++ dm_put_device(ti, s->origin);
++
+ kfree(s);
+ }
+
+@@ -1899,8 +1905,14 @@ static int snapshot_iterate_devices(struct dm_target *ti,
+ iterate_devices_callout_fn fn, void *data)
+ {
+ struct dm_snapshot *snap = ti->private;
++ int r;
++
++ r = fn(ti, snap->origin, 0, ti->len, data);
+
+- return fn(ti, snap->origin, 0, ti->len, data);
++ if (!r)
++ r = fn(ti, snap->cow, 0, get_dev_size(snap->cow->bdev), data);
++
++ return r;
+ }
+
+
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index d21e128..e3a512d 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -19,6 +19,7 @@
+ #include <linux/slab.h>
+ #include <linux/idr.h>
+ #include <linux/hdreg.h>
++#include <linux/delay.h>
+
+ #include <trace/events/block.h>
+
+@@ -2141,6 +2142,7 @@ static struct mapped_device *dm_find_md(dev_t dev)
+ md = idr_find(&_minor_idr, minor);
+ if (md && (md == MINOR_ALLOCED ||
+ (MINOR(disk_devt(dm_disk(md))) != minor) ||
++ dm_deleting_md(md) ||
+ test_bit(DMF_FREEING, &md->flags))) {
+ md = NULL;
+ goto out;
+@@ -2175,6 +2177,7 @@ void dm_set_mdptr(struct mapped_device *md, void *ptr)
+ void dm_get(struct mapped_device *md)
+ {
+ atomic_inc(&md->holders);
++ BUG_ON(test_bit(DMF_FREEING, &md->flags));
+ }
+
+ const char *dm_device_name(struct mapped_device *md)
+@@ -2183,27 +2186,55 @@ const char *dm_device_name(struct mapped_device *md)
+ }
+ EXPORT_SYMBOL_GPL(dm_device_name);
+
+-void dm_put(struct mapped_device *md)
++static void __dm_destroy(struct mapped_device *md, bool wait)
+ {
+ struct dm_table *map;
+
+- BUG_ON(test_bit(DMF_FREEING, &md->flags));
++ might_sleep();
+
+- if (atomic_dec_and_lock(&md->holders, &_minor_lock)) {
+- map = dm_get_live_table(md);
+- idr_replace(&_minor_idr, MINOR_ALLOCED,
+- MINOR(disk_devt(dm_disk(md))));
+- set_bit(DMF_FREEING, &md->flags);
+- spin_unlock(&_minor_lock);
+- if (!dm_suspended_md(md)) {
+- dm_table_presuspend_targets(map);
+- dm_table_postsuspend_targets(map);
+- }
+- dm_sysfs_exit(md);
+- dm_table_put(map);
+- dm_table_destroy(__unbind(md));
+- free_dev(md);
++ spin_lock(&_minor_lock);
++ map = dm_get_live_table(md);
++ idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
++ set_bit(DMF_FREEING, &md->flags);
++ spin_unlock(&_minor_lock);
++
++ if (!dm_suspended_md(md)) {
++ dm_table_presuspend_targets(map);
++ dm_table_postsuspend_targets(map);
+ }
++
++ /*
++ * Rare, but there may be I/O requests still going to complete,
++ * for example. Wait for all references to disappear.
++ * No one should increment the reference count of the mapped_device,
++ * after the mapped_device state becomes DMF_FREEING.
++ */
++ if (wait)
++ while (atomic_read(&md->holders))
++ msleep(1);
++ else if (atomic_read(&md->holders))
++ DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
++ dm_device_name(md), atomic_read(&md->holders));
++
++ dm_sysfs_exit(md);
++ dm_table_put(map);
++ dm_table_destroy(__unbind(md));
++ free_dev(md);
++}
++
++void dm_destroy(struct mapped_device *md)
++{
++ __dm_destroy(md, true);
++}
++
++void dm_destroy_immediate(struct mapped_device *md)
++{
++ __dm_destroy(md, false);
++}
++
++void dm_put(struct mapped_device *md)
++{
++ atomic_dec(&md->holders);
+ }
+ EXPORT_SYMBOL_GPL(dm_put);
+
+diff --git a/drivers/md/dm.h b/drivers/md/dm.h
+index bad1724..8223671 100644
+--- a/drivers/md/dm.h
++++ b/drivers/md/dm.h
+@@ -122,6 +122,11 @@ void dm_linear_exit(void);
+ int dm_stripe_init(void);
+ void dm_stripe_exit(void);
+
++/*
++ * mapped_device operations
++ */
++void dm_destroy(struct mapped_device *md);
++void dm_destroy_immediate(struct mapped_device *md);
+ int dm_open_count(struct mapped_device *md);
+ int dm_lock_for_deletion(struct mapped_device *md);
+
+diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
+index 8327e24..300ec15 100644
+--- a/drivers/memstick/core/mspro_block.c
++++ b/drivers/memstick/core/mspro_block.c
+@@ -1040,6 +1040,7 @@ static int mspro_block_read_attributes(struct memstick_dev *card)
+ snprintf(s_attr->name, sizeof(s_attr->name),
+ "attr_x%02x", attr->entries[cnt].id);
+
++ sysfs_attr_init(&s_attr->dev_attr.attr);
+ s_attr->dev_attr.attr.name = s_attr->name;
+ s_attr->dev_attr.attr.mode = S_IRUGO;
+ s_attr->dev_attr.show = mspro_block_attr_show(s_attr->id);
+@@ -1330,13 +1331,14 @@ static void mspro_block_remove(struct memstick_dev *card)
+ struct mspro_block_data *msb = memstick_get_drvdata(card);
+ unsigned long flags;
+
+- del_gendisk(msb->disk);
+- dev_dbg(&card->dev, "mspro block remove\n");
+ spin_lock_irqsave(&msb->q_lock, flags);
+ msb->eject = 1;
+ blk_start_queue(msb->queue);
+ spin_unlock_irqrestore(&msb->q_lock, flags);
+
++ del_gendisk(msb->disk);
++ dev_dbg(&card->dev, "mspro block remove\n");
++
+ blk_cleanup_queue(msb->queue);
+ msb->queue = NULL;
+
+diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
+index 62f3ea9..3364061 100644
+--- a/drivers/mtd/chips/cfi_cmdset_0001.c
++++ b/drivers/mtd/chips/cfi_cmdset_0001.c
+@@ -717,7 +717,7 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
+ chip = &newcfi->chips[0];
+ for (i = 0; i < cfi->numchips; i++) {
+ shared[i].writing = shared[i].erasing = NULL;
+- spin_lock_init(&shared[i].lock);
++ mutex_init(&shared[i].lock);
+ for (j = 0; j < numparts; j++) {
+ *chip = cfi->chips[i];
+ chip->start += j << partshift;
+@@ -886,7 +886,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
+ */
+ struct flchip_shared *shared = chip->priv;
+ struct flchip *contender;
+- spin_lock(&shared->lock);
++ mutex_lock(&shared->lock);
+ contender = shared->writing;
+ if (contender && contender != chip) {
+ /*
+@@ -899,7 +899,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
+ * get_chip returns success we're clear to go ahead.
+ */
+ ret = mutex_trylock(&contender->mutex);
+- spin_unlock(&shared->lock);
++ mutex_unlock(&shared->lock);
+ if (!ret)
+ goto retry;
+ mutex_unlock(&chip->mutex);
+@@ -914,7 +914,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
+ mutex_unlock(&contender->mutex);
+ return ret;
+ }
+- spin_lock(&shared->lock);
++ mutex_lock(&shared->lock);
+
+ /* We should not own chip if it is already
+ * in FL_SYNCING state. Put contender and retry. */
+@@ -930,7 +930,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
+ * on this chip. Sleep. */
+ if (mode == FL_ERASING && shared->erasing
+ && shared->erasing->oldstate == FL_ERASING) {
+- spin_unlock(&shared->lock);
++ mutex_unlock(&shared->lock);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ mutex_unlock(&chip->mutex);
+@@ -944,7 +944,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
+ shared->writing = chip;
+ if (mode == FL_ERASING)
+ shared->erasing = chip;
+- spin_unlock(&shared->lock);
++ mutex_unlock(&shared->lock);
+ }
+ ret = chip_ready(map, chip, adr, mode);
+ if (ret == -EAGAIN)
+@@ -959,7 +959,7 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
+
+ if (chip->priv) {
+ struct flchip_shared *shared = chip->priv;
+- spin_lock(&shared->lock);
++ mutex_lock(&shared->lock);
+ if (shared->writing == chip && chip->oldstate == FL_READY) {
+ /* We own the ability to write, but we're done */
+ shared->writing = shared->erasing;
+@@ -967,7 +967,7 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
+ /* give back ownership to who we loaned it from */
+ struct flchip *loaner = shared->writing;
+ mutex_lock(&loaner->mutex);
+- spin_unlock(&shared->lock);
++ mutex_unlock(&shared->lock);
+ mutex_unlock(&chip->mutex);
+ put_chip(map, loaner, loaner->start);
+ mutex_lock(&chip->mutex);
+@@ -985,11 +985,11 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
+ * Don't let the switch below mess things up since
+ * we don't have ownership to resume anything.
+ */
+- spin_unlock(&shared->lock);
++ mutex_unlock(&shared->lock);
+ wake_up(&chip->wq);
+ return;
+ }
+- spin_unlock(&shared->lock);
++ mutex_unlock(&shared->lock);
+ }
+
+ switch(chip->oldstate) {
+diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c
+index fece5be..04fdfcc 100644
+--- a/drivers/mtd/lpddr/lpddr_cmds.c
++++ b/drivers/mtd/lpddr/lpddr_cmds.c
+@@ -98,7 +98,7 @@ struct mtd_info *lpddr_cmdset(struct map_info *map)
+ numchips = lpddr->numchips / lpddr->qinfo->HWPartsNum;
+ for (i = 0; i < numchips; i++) {
+ shared[i].writing = shared[i].erasing = NULL;
+- spin_lock_init(&shared[i].lock);
++ mutex_init(&shared[i].lock);
+ for (j = 0; j < lpddr->qinfo->HWPartsNum; j++) {
+ *chip = lpddr->chips[i];
+ chip->start += j << lpddr->chipshift;
+@@ -217,7 +217,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, int mode)
+ */
+ struct flchip_shared *shared = chip->priv;
+ struct flchip *contender;
+- spin_lock(&shared->lock);
++ mutex_lock(&shared->lock);
+ contender = shared->writing;
+ if (contender && contender != chip) {
+ /*
+@@ -230,7 +230,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, int mode)
+ * get_chip returns success we're clear to go ahead.
+ */
+ ret = mutex_trylock(&contender->mutex);
+- spin_unlock(&shared->lock);
++ mutex_unlock(&shared->lock);
+ if (!ret)
+ goto retry;
+ mutex_unlock(&chip->mutex);
+@@ -245,7 +245,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, int mode)
+ mutex_unlock(&contender->mutex);
+ return ret;
+ }
+- spin_lock(&shared->lock);
++ mutex_lock(&shared->lock);
+
+ /* We should not own chip if it is already in FL_SYNCING
+ * state. Put contender and retry. */
+@@ -261,7 +261,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, int mode)
+ Must sleep in such a case. */
+ if (mode == FL_ERASING && shared->erasing
+ && shared->erasing->oldstate == FL_ERASING) {
+- spin_unlock(&shared->lock);
++ mutex_unlock(&shared->lock);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ mutex_unlock(&chip->mutex);
+@@ -275,7 +275,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, int mode)
+ shared->writing = chip;
+ if (mode == FL_ERASING)
+ shared->erasing = chip;
+- spin_unlock(&shared->lock);
++ mutex_unlock(&shared->lock);
+ }
+
+ ret = chip_ready(map, chip, mode);
+@@ -348,7 +348,7 @@ static void put_chip(struct map_info *map, struct flchip *chip)
+ {
+ if (chip->priv) {
+ struct flchip_shared *shared = chip->priv;
+- spin_lock(&shared->lock);
++ mutex_lock(&shared->lock);
+ if (shared->writing == chip && chip->oldstate == FL_READY) {
+ /* We own the ability to write, but we're done */
+ shared->writing = shared->erasing;
+@@ -356,7 +356,7 @@ static void put_chip(struct map_info *map, struct flchip *chip)
+ /* give back the ownership */
+ struct flchip *loaner = shared->writing;
+ mutex_lock(&loaner->mutex);
+- spin_unlock(&shared->lock);
++ mutex_unlock(&shared->lock);
+ mutex_unlock(&chip->mutex);
+ put_chip(map, loaner);
+ mutex_lock(&chip->mutex);
+@@ -374,11 +374,11 @@ static void put_chip(struct map_info *map, struct flchip *chip)
+ * Don't let the switch below mess things up since
+ * we don't have ownership to resume anything.
+ */
+- spin_unlock(&shared->lock);
++ mutex_unlock(&shared->lock);
+ wake_up(&chip->wq);
+ return;
+ }
+- spin_unlock(&shared->lock);
++ mutex_unlock(&shared->lock);
+ }
+
+ switch (chip->oldstate) {
+diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
+index 4a7b864..5bcc34a 100644
+--- a/drivers/mtd/nand/nand_base.c
++++ b/drivers/mtd/nand/nand_base.c
+@@ -2852,6 +2852,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
+ */
+ if (id_data[0] == id_data[6] && id_data[1] == id_data[7] &&
+ id_data[0] == NAND_MFR_SAMSUNG &&
++ (chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
+ id_data[5] != 0x00) {
+ /* Calc pagesize */
+ mtd->writesize = 2048 << (extid & 0x03);
+diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
+index 90e143e..317aff4 100644
+--- a/drivers/mtd/nand/plat_nand.c
++++ b/drivers/mtd/nand/plat_nand.c
+@@ -37,6 +37,11 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
+ struct resource *res;
+ int err = 0;
+
++ if (pdata->chip.nr_chips < 1) {
++ dev_err(&pdev->dev, "invalid number of chips specified\n");
++ return -EINVAL;
++ }
++
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENXIO;
+diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
+index e02fa4f..4d89f37 100644
+--- a/drivers/mtd/nand/pxa3xx_nand.c
++++ b/drivers/mtd/nand/pxa3xx_nand.c
+@@ -363,7 +363,7 @@ static struct pxa3xx_nand_flash *builtin_flash_types[] = {
+ #define tAR_NDTR1(r) (((r) >> 0) & 0xf)
+
+ /* convert nano-seconds to nand flash controller clock cycles */
+-#define ns2cycle(ns, clk) (int)(((ns) * (clk / 1000000) / 1000) - 1)
++#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
+
+ /* convert nand flash controller clock cycles to nano-seconds */
+ #define cycle2ns(c, clk) ((((c) + 1) * 1000000 + clk / 500) / (clk / 1000))
+diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
+index f654db9..d206f21 100644
+--- a/drivers/net/e1000e/82571.c
++++ b/drivers/net/e1000e/82571.c
+@@ -936,12 +936,14 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
+ ew32(IMC, 0xffffffff);
+ icr = er32(ICR);
+
+- /* Install any alternate MAC address into RAR0 */
+- ret_val = e1000_check_alt_mac_addr_generic(hw);
+- if (ret_val)
+- return ret_val;
++ if (hw->mac.type == e1000_82571) {
++ /* Install any alternate MAC address into RAR0 */
++ ret_val = e1000_check_alt_mac_addr_generic(hw);
++ if (ret_val)
++ return ret_val;
+
+- e1000e_set_laa_state_82571(hw, true);
++ e1000e_set_laa_state_82571(hw, true);
++ }
+
+ /* Reinitialize the 82571 serdes link state machine */
+ if (hw->phy.media_type == e1000_media_type_internal_serdes)
+@@ -1618,14 +1620,16 @@ static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw)
+ {
+ s32 ret_val = 0;
+
+- /*
+- * If there's an alternate MAC address place it in RAR0
+- * so that it will override the Si installed default perm
+- * address.
+- */
+- ret_val = e1000_check_alt_mac_addr_generic(hw);
+- if (ret_val)
+- goto out;
++ if (hw->mac.type == e1000_82571) {
++ /*
++ * If there's an alternate MAC address place it in RAR0
++ * so that it will override the Si installed default perm
++ * address.
++ */
++ ret_val = e1000_check_alt_mac_addr_generic(hw);
++ if (ret_val)
++ goto out;
++ }
+
+ ret_val = e1000_read_mac_addr_generic(hw);
+
+@@ -1833,6 +1837,7 @@ struct e1000_info e1000_82573_info = {
+ | FLAG_HAS_SMART_POWER_DOWN
+ | FLAG_HAS_AMT
+ | FLAG_HAS_SWSM_ON_LOAD,
++ .flags2 = FLAG2_DISABLE_ASPM_L1,
+ .pba = 20,
+ .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
+ .get_variants = e1000_get_variants_82571,
+diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
+index 4dc02c7..75289ca 100644
+--- a/drivers/net/e1000e/defines.h
++++ b/drivers/net/e1000e/defines.h
+@@ -620,6 +620,7 @@
+ #define E1000_FLASH_UPDATES 2000
+
+ /* NVM Word Offsets */
++#define NVM_COMPAT 0x0003
+ #define NVM_ID_LED_SETTINGS 0x0004
+ #define NVM_INIT_CONTROL2_REG 0x000F
+ #define NVM_INIT_CONTROL3_PORT_B 0x0014
+@@ -642,6 +643,9 @@
+ /* Mask bits for fields in Word 0x1a of the NVM */
+ #define NVM_WORD1A_ASPM_MASK 0x000C
+
++/* Mask bits for fields in Word 0x03 of the EEPROM */
++#define NVM_COMPAT_LOM 0x0800
++
+ /* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
+ #define NVM_SUM 0xBABA
+
+diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
+index a968e3a..768c105 100644
+--- a/drivers/net/e1000e/lib.c
++++ b/drivers/net/e1000e/lib.c
+@@ -183,6 +183,16 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
+ u16 offset, nvm_alt_mac_addr_offset, nvm_data;
+ u8 alt_mac_addr[ETH_ALEN];
+
++ ret_val = e1000_read_nvm(hw, NVM_COMPAT, 1, &nvm_data);
++ if (ret_val)
++ goto out;
++
++ /* Check for LOM (vs. NIC) or one of two valid mezzanine cards */
++ if (!((nvm_data & NVM_COMPAT_LOM) ||
++ (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_DUAL) ||
++ (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD)))
++ goto out;
++
+ ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
+ &nvm_alt_mac_addr_offset);
+ if (ret_val) {
+diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
+index 648972d..ab9fe22 100644
+--- a/drivers/net/wireless/ath/ath5k/base.c
++++ b/drivers/net/wireless/ath/ath5k/base.c
+@@ -48,6 +48,7 @@
+ #include <linux/netdevice.h>
+ #include <linux/cache.h>
+ #include <linux/pci.h>
++#include <linux/pci-aspm.h>
+ #include <linux/ethtool.h>
+ #include <linux/uaccess.h>
+ #include <linux/slab.h>
+@@ -472,6 +473,26 @@ ath5k_pci_probe(struct pci_dev *pdev,
+ int ret;
+ u8 csz;
+
++ /*
++ * L0s needs to be disabled on all ath5k cards.
++ *
++ * For distributions shipping with CONFIG_PCIEASPM (this will be enabled
++ * by default in the future in 2.6.36) this will also mean both L1 and
++ * L0s will be disabled when a pre 1.1 PCIe device is detected. We do
++ * know L1 works correctly even for all ath5k pre 1.1 PCIe devices
++ * though but cannot currently undue the effect of a blacklist, for
++ * details you can read pcie_aspm_sanity_check() and see how it adjusts
++ * the device link capability.
++ *
++ * It may be possible in the future to implement some PCI API to allow
++ * drivers to override blacklists for pre 1.1 PCIe but for now it is
++ * best to accept that both L0s and L1 will be disabled completely for
++ * distributions shipping with CONFIG_PCIEASPM rather than having this
++ * issue present. Motivation for adding this new API will be to help
++ * with power consumption for some of these devices.
++ */
++ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S);
++
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "can't enable device\n");
+diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+index 2571b44..5fcbc2f 100644
+--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
++++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+@@ -68,18 +68,23 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_sta *sta = tx_info->control.sta;
+ struct ath9k_htc_sta *ista;
+- struct ath9k_htc_vif *avp;
+ struct ath9k_htc_tx_ctl tx_ctl;
+ enum htc_endpoint_id epid;
+ u16 qnum, hw_qnum;
+ __le16 fc;
+ u8 *tx_fhdr;
+- u8 sta_idx;
++ u8 sta_idx, vif_idx;
+
+ hdr = (struct ieee80211_hdr *) skb->data;
+ fc = hdr->frame_control;
+
+- avp = (struct ath9k_htc_vif *) tx_info->control.vif->drv_priv;
++ if (tx_info->control.vif &&
++ (struct ath9k_htc_vif *) tx_info->control.vif->drv_priv)
++ vif_idx = ((struct ath9k_htc_vif *)
++ tx_info->control.vif->drv_priv)->index;
++ else
++ vif_idx = priv->nvifs;
++
+ if (sta) {
+ ista = (struct ath9k_htc_sta *) sta->drv_priv;
+ sta_idx = ista->index;
+@@ -96,7 +101,7 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
+ memset(&tx_hdr, 0, sizeof(struct tx_frame_hdr));
+
+ tx_hdr.node_idx = sta_idx;
+- tx_hdr.vif_idx = avp->index;
++ tx_hdr.vif_idx = vif_idx;
+
+ if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
+ tx_ctl.type = ATH9K_HTC_AMPDU;
+@@ -156,7 +161,7 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
+ tx_ctl.type = ATH9K_HTC_NORMAL;
+
+ mgmt_hdr.node_idx = sta_idx;
+- mgmt_hdr.vif_idx = avp->index;
++ mgmt_hdr.vif_idx = vif_idx;
+ mgmt_hdr.tidno = 0;
+ mgmt_hdr.flags = 0;
+
+diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
+index c44a303..2a9480d 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
++++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
+@@ -915,22 +915,6 @@ void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
+ rts_retry_limit = data_retry_limit;
+ tx_cmd->rts_retry_limit = rts_retry_limit;
+
+- if (ieee80211_is_mgmt(fc)) {
+- switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
+- case cpu_to_le16(IEEE80211_STYPE_AUTH):
+- case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
+- case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
+- case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
+- if (tx_flags & TX_CMD_FLG_RTS_MSK) {
+- tx_flags &= ~TX_CMD_FLG_RTS_MSK;
+- tx_flags |= TX_CMD_FLG_CTS_MSK;
+- }
+- break;
+- default:
+- break;
+- }
+- }
+-
+ tx_cmd->rate = rate;
+ tx_cmd->tx_flags = tx_flags;
+
+diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
+index 01658cf..2a30397 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
++++ b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
+@@ -209,10 +209,21 @@ static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
+ }
+ }
+
+-static void iwlagn_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
+- __le32 *tx_flags)
++static void iwlagn_rts_tx_cmd_flag(struct iwl_priv *priv,
++ struct ieee80211_tx_info *info,
++ __le16 fc, __le32 *tx_flags)
+ {
+- *tx_flags |= TX_CMD_FLG_RTS_CTS_MSK;
++ if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS ||
++ info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
++ *tx_flags |= TX_CMD_FLG_RTS_CTS_MSK;
++ return;
++ }
++
++ if (priv->cfg->use_rts_for_ht &&
++ info->flags & IEEE80211_TX_CTL_AMPDU) {
++ *tx_flags |= TX_CMD_FLG_RTS_CTS_MSK;
++ return;
++ }
+ }
+
+ /* Calc max signal level (dBm) among 3 possible receivers */
+diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+index cf4a95b..ca46831 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
++++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+@@ -325,18 +325,11 @@ static void rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid,
+ struct iwl_lq_sta *lq_data,
+ struct ieee80211_sta *sta)
+ {
+- if ((tid < TID_MAX_LOAD_COUNT) &&
+- !rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta)) {
+- if (priv->cfg->use_rts_for_ht) {
+- /*
+- * switch to RTS/CTS if it is the prefer protection
+- * method for HT traffic
+- */
+- IWL_DEBUG_HT(priv, "use RTS/CTS protection for HT\n");
+- priv->staging_rxon.flags &= ~RXON_FLG_SELF_CTS_EN;
+- iwlcore_commit_rxon(priv);
+- }
+- }
++ if (tid < TID_MAX_LOAD_COUNT)
++ rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta);
++ else
++ IWL_ERR(priv, "tid exceeds max load count: %d/%d\n",
++ tid, TID_MAX_LOAD_COUNT);
+ }
+
+ static inline int get_num_of_ant_from_rate(u32 rate_n_flags)
+diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+index 7d614c4..3a3d27c 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
++++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+@@ -376,10 +376,7 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
+ tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+ }
+
+- priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags);
+-
+- if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
+- tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
++ priv->cfg->ops->utils->rts_tx_cmd_flag(priv, info, fc, &tx_flags);
+
+ tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
+ if (ieee80211_is_mgmt(fc)) {
+@@ -453,21 +450,6 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
+ if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
+ rate_flags |= RATE_MCS_CCK_MSK;
+
+- /* Set up RTS and CTS flags for certain packets */
+- switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
+- case cpu_to_le16(IEEE80211_STYPE_AUTH):
+- case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
+- case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
+- case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
+- if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) {
+- tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK;
+- tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK;
+- }
+- break;
+- default:
+- break;
+- }
+-
+ /* Set up antennas */
+ priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant);
+ rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
+diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
+index 24aff65..c7f56b4 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
++++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
+@@ -200,13 +200,6 @@ int iwl_commit_rxon(struct iwl_priv *priv)
+
+ priv->start_calib = 0;
+ if (new_assoc) {
+- /*
+- * allow CTS-to-self if possible for new association.
+- * this is relevant only for 5000 series and up,
+- * but will not damage 4965
+- */
+- priv->staging_rxon.flags |= RXON_FLG_SELF_CTS_EN;
+-
+ /* Apply the new configuration
+ * RXON assoc doesn't clear the station table in uCode,
+ */
+@@ -3336,13 +3329,40 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
+ IWL_DEBUG_HT(priv, "priv->_agn.agg_tids_count = %u\n",
+ priv->_agn.agg_tids_count);
+ }
++ if (priv->cfg->use_rts_for_ht) {
++ struct iwl_station_priv *sta_priv =
++ (void *) sta->drv_priv;
++ /*
++ * switch off RTS/CTS if it was previously enabled
++ */
++
++ sta_priv->lq_sta.lq.general_params.flags &=
++ ~LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
++ iwl_send_lq_cmd(priv, &sta_priv->lq_sta.lq,
++ CMD_ASYNC, false);
++ }
++ break;
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return 0;
+ else
+ return ret;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+- /* do nothing */
+- return -EOPNOTSUPP;
++ if (priv->cfg->use_rts_for_ht) {
++ struct iwl_station_priv *sta_priv =
++ (void *) sta->drv_priv;
++
++ /*
++ * switch to RTS/CTS if it is the prefer protection
++ * method for HT traffic
++ */
++
++ sta_priv->lq_sta.lq.general_params.flags |=
++ LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
++ iwl_send_lq_cmd(priv, &sta_priv->lq_sta.lq,
++ CMD_ASYNC, false);
++ }
++ ret = 0;
++ break;
+ default:
+ IWL_DEBUG_HT(priv, "unknown\n");
+ return -EINVAL;
+@@ -3423,6 +3443,49 @@ static int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
+ return 0;
+ }
+
++static void iwlagn_configure_filter(struct ieee80211_hw *hw,
++ unsigned int changed_flags,
++ unsigned int *total_flags,
++ u64 multicast)
++{
++ struct iwl_priv *priv = hw->priv;
++ __le32 filter_or = 0, filter_nand = 0;
++
++#define CHK(test, flag) do { \
++ if (*total_flags & (test)) \
++ filter_or |= (flag); \
++ else \
++ filter_nand |= (flag); \
++ } while (0)
++
++ IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
++ changed_flags, *total_flags);
++
++ CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
++ CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
++ CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
++
++#undef CHK
++
++ mutex_lock(&priv->mutex);
++
++ priv->staging_rxon.filter_flags &= ~filter_nand;
++ priv->staging_rxon.filter_flags |= filter_or;
++
++ iwlcore_commit_rxon(priv);
++
++ mutex_unlock(&priv->mutex);
++
++ /*
++ * Receiving all multicast frames is always enabled by the
++ * default flags setup in iwl_connection_init_rx_config()
++ * since we currently do not support programming multicast
++ * filters into the device.
++ */
++ *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
++ FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
++}
++
+ /*****************************************************************************
+ *
+ * driver setup and teardown
+@@ -3583,7 +3646,7 @@ static struct ieee80211_ops iwl_hw_ops = {
+ .add_interface = iwl_mac_add_interface,
+ .remove_interface = iwl_mac_remove_interface,
+ .config = iwl_mac_config,
+- .configure_filter = iwl_configure_filter,
++ .configure_filter = iwlagn_configure_filter,
+ .set_key = iwl_mac_set_key,
+ .update_tkip_key = iwl_mac_update_tkip_key,
+ .conf_tx = iwl_mac_conf_tx,
+diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
+index 5bbc529..cd5b664 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-core.c
++++ b/drivers/net/wireless/iwlwifi/iwl-core.c
+@@ -403,19 +403,36 @@ EXPORT_SYMBOL(iwlcore_free_geos);
+ * iwlcore_rts_tx_cmd_flag: Set rts/cts. 3945 and 4965 only share this
+ * function.
+ */
+-void iwlcore_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
+- __le32 *tx_flags)
++void iwlcore_rts_tx_cmd_flag(struct iwl_priv *priv,
++ struct ieee80211_tx_info *info,
++ __le16 fc, __le32 *tx_flags)
+ {
+ if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
+ *tx_flags |= TX_CMD_FLG_RTS_MSK;
+ *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
++ *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
++
++ if (!ieee80211_is_mgmt(fc))
++ return;
++
++ switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
++ case cpu_to_le16(IEEE80211_STYPE_AUTH):
++ case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
++ case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
++ case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
++ *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
++ *tx_flags |= TX_CMD_FLG_CTS_MSK;
++ break;
++ }
+ } else if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
+ *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
+ *tx_flags |= TX_CMD_FLG_CTS_MSK;
++ *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
+ }
+ }
+ EXPORT_SYMBOL(iwlcore_rts_tx_cmd_flag);
+
++
+ static bool is_single_rx_stream(struct iwl_priv *priv)
+ {
+ return priv->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
+@@ -1294,51 +1311,6 @@ out:
+ EXPORT_SYMBOL(iwl_apm_init);
+
+
+-
+-void iwl_configure_filter(struct ieee80211_hw *hw,
+- unsigned int changed_flags,
+- unsigned int *total_flags,
+- u64 multicast)
+-{
+- struct iwl_priv *priv = hw->priv;
+- __le32 filter_or = 0, filter_nand = 0;
+-
+-#define CHK(test, flag) do { \
+- if (*total_flags & (test)) \
+- filter_or |= (flag); \
+- else \
+- filter_nand |= (flag); \
+- } while (0)
+-
+- IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
+- changed_flags, *total_flags);
+-
+- CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
+- CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
+- CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
+-
+-#undef CHK
+-
+- mutex_lock(&priv->mutex);
+-
+- priv->staging_rxon.filter_flags &= ~filter_nand;
+- priv->staging_rxon.filter_flags |= filter_or;
+-
+- iwlcore_commit_rxon(priv);
+-
+- mutex_unlock(&priv->mutex);
+-
+- /*
+- * Receiving all multicast frames is always enabled by the
+- * default flags setup in iwl_connection_init_rx_config()
+- * since we currently do not support programming multicast
+- * filters into the device.
+- */
+- *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
+- FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
+-}
+-EXPORT_SYMBOL(iwl_configure_filter);
+-
+ int iwl_set_hw_params(struct iwl_priv *priv)
+ {
+ priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
+@@ -1936,6 +1908,10 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
+ priv->staging_rxon.flags |= RXON_FLG_TGG_PROTECT_MSK;
+ else
+ priv->staging_rxon.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
++ if (bss_conf->use_cts_prot)
++ priv->staging_rxon.flags |= RXON_FLG_SELF_CTS_EN;
++ else
++ priv->staging_rxon.flags &= ~RXON_FLG_SELF_CTS_EN;
+ }
+
+ if (changes & BSS_CHANGED_BASIC_RATES) {
+diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
+index 31775bd..e8ef317 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-core.h
++++ b/drivers/net/wireless/iwlwifi/iwl-core.h
+@@ -102,8 +102,9 @@ struct iwl_hcmd_utils_ops {
+ u32 min_average_noise,
+ u8 default_chain);
+ void (*chain_noise_reset)(struct iwl_priv *priv);
+- void (*rts_tx_cmd_flag)(struct ieee80211_tx_info *info,
+- __le32 *tx_flags);
++ void (*rts_tx_cmd_flag)(struct iwl_priv *priv,
++ struct ieee80211_tx_info *info,
++ __le16 fc, __le32 *tx_flags);
+ int (*calc_rssi)(struct iwl_priv *priv,
+ struct iwl_rx_phy_res *rx_resp);
+ void (*request_scan)(struct iwl_priv *priv, struct ieee80211_vif *vif);
+@@ -355,9 +356,6 @@ int iwl_set_decrypted_flag(struct iwl_priv *priv,
+ u32 decrypt_res,
+ struct ieee80211_rx_status *stats);
+ void iwl_irq_handle_error(struct iwl_priv *priv);
+-void iwl_configure_filter(struct ieee80211_hw *hw,
+- unsigned int changed_flags,
+- unsigned int *total_flags, u64 multicast);
+ int iwl_set_hw_params(struct iwl_priv *priv);
+ void iwl_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif);
+ void iwl_bss_info_changed(struct ieee80211_hw *hw,
+@@ -375,8 +373,9 @@ void iwl_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif);
+ void iwl_mac_reset_tsf(struct ieee80211_hw *hw);
+ int iwl_alloc_txq_mem(struct iwl_priv *priv);
+ void iwl_free_txq_mem(struct iwl_priv *priv);
+-void iwlcore_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
+- __le32 *tx_flags);
++void iwlcore_rts_tx_cmd_flag(struct iwl_priv *priv,
++ struct ieee80211_tx_info *info,
++ __le16 fc, __le32 *tx_flags);
+ #ifdef CONFIG_IWLWIFI_DEBUGFS
+ int iwl_alloc_traffic_mem(struct iwl_priv *priv);
+ void iwl_free_traffic_mem(struct iwl_priv *priv);
+diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
+index a27872d..39c0d2d 100644
+--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
++++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
+@@ -434,10 +434,7 @@ static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv,
+ tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+ }
+
+- priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags);
+-
+- if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
+- tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
++ priv->cfg->ops->utils->rts_tx_cmd_flag(priv, info, fc, &tx_flags);
+
+ tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
+ if (ieee80211_is_mgmt(fc)) {
+@@ -3465,6 +3462,55 @@ static int iwl3945_mac_sta_add(struct ieee80211_hw *hw,
+
+ return 0;
+ }
++
++static void iwl3945_configure_filter(struct ieee80211_hw *hw,
++ unsigned int changed_flags,
++ unsigned int *total_flags,
++ u64 multicast)
++{
++ struct iwl_priv *priv = hw->priv;
++ __le32 filter_or = 0, filter_nand = 0;
++
++#define CHK(test, flag) do { \
++ if (*total_flags & (test)) \
++ filter_or |= (flag); \
++ else \
++ filter_nand |= (flag); \
++ } while (0)
++
++ IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
++ changed_flags, *total_flags);
++
++ CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
++ CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
++ CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
++
++#undef CHK
++
++ mutex_lock(&priv->mutex);
++
++ priv->staging_rxon.filter_flags &= ~filter_nand;
++ priv->staging_rxon.filter_flags |= filter_or;
++
++ /*
++ * Committing directly here breaks for some reason,
++ * but we'll eventually commit the filter flags
++ * change anyway.
++ */
++
++ mutex_unlock(&priv->mutex);
++
++ /*
++ * Receiving all multicast frames is always enabled by the
++ * default flags setup in iwl_connection_init_rx_config()
++ * since we currently do not support programming multicast
++ * filters into the device.
++ */
++ *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
++ FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
++}
++
++
+ /*****************************************************************************
+ *
+ * sysfs attributes
+@@ -3870,7 +3916,7 @@ static struct ieee80211_ops iwl3945_hw_ops = {
+ .add_interface = iwl_mac_add_interface,
+ .remove_interface = iwl_mac_remove_interface,
+ .config = iwl_mac_config,
+- .configure_filter = iwl_configure_filter,
++ .configure_filter = iwl3945_configure_filter,
+ .set_key = iwl3945_mac_set_key,
+ .conf_tx = iwl_mac_conf_tx,
+ .reset_tsf = iwl_mac_reset_tsf,
+diff --git a/drivers/net/wireless/wl12xx/wl1251_cmd.c b/drivers/net/wireless/wl12xx/wl1251_cmd.c
+index a37b30c..ce3722f 100644
+--- a/drivers/net/wireless/wl12xx/wl1251_cmd.c
++++ b/drivers/net/wireless/wl12xx/wl1251_cmd.c
+@@ -484,7 +484,7 @@ int wl1251_cmd_trigger_scan_to(struct wl1251 *wl, u32 timeout)
+
+ cmd->timeout = timeout;
+
+- ret = wl1251_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd));
++ ret = wl1251_cmd_send(wl, CMD_TRIGGER_SCAN_TO, cmd, sizeof(*cmd));
+ if (ret < 0) {
+ wl1251_error("cmd trigger scan to failed: %d", ret);
+ goto out;
+diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
+index 71ff154..90111d7 100644
+--- a/drivers/platform/x86/compal-laptop.c
++++ b/drivers/platform/x86/compal-laptop.c
+@@ -259,6 +259,14 @@ static struct dmi_system_id __initdata compal_dmi_table[] = {
+ .callback = dmi_check_cb
+ },
+ {
++ .ident = "Dell Mini 1012",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"),
++ },
++ .callback = dmi_check_cb
++ },
++ {
+ .ident = "Dell Inspiron 11z",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+@@ -375,5 +383,6 @@ MODULE_ALIAS("dmi:*:rnIFT00:rvrIFT00:*");
+ MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron910:*");
+ MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1010:*");
+ MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1011:*");
++MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1012:*");
+ MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1110:*");
+ MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1210:*");
+diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
+index 661e3ac..6110601 100644
+--- a/drivers/platform/x86/dell-laptop.c
++++ b/drivers/platform/x86/dell-laptop.c
+@@ -116,6 +116,13 @@ static struct dmi_system_id __devinitdata dell_blacklist[] = {
+ },
+ },
+ {
++ .ident = "Dell Mini 1012",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"),
++ },
++ },
++ {
+ .ident = "Dell Inspiron 11z",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+diff --git a/drivers/regulator/wm8994-regulator.c b/drivers/regulator/wm8994-regulator.c
+index 5a1dc8a..03713bc 100644
+--- a/drivers/regulator/wm8994-regulator.c
++++ b/drivers/regulator/wm8994-regulator.c
+@@ -219,8 +219,6 @@ static __devinit int wm8994_ldo_probe(struct platform_device *pdev)
+
+ ldo->wm8994 = wm8994;
+
+- ldo->is_enabled = true;
+-
+ if (pdata->ldo[id].enable && gpio_is_valid(pdata->ldo[id].enable)) {
+ ldo->enable = pdata->ldo[id].enable;
+
+@@ -237,7 +235,8 @@ static __devinit int wm8994_ldo_probe(struct platform_device *pdev)
+ ret);
+ goto err_gpio;
+ }
+- }
++ } else
++ ldo->is_enabled = true;
+
+ ldo->regulator = regulator_register(&wm8994_ldo_desc[id], &pdev->dev,
+ pdata->ldo[id].init_data, ldo);
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index bfc99a9..221f999 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -131,7 +131,7 @@ static void next_trb(struct xhci_hcd *xhci,
+ *seg = (*seg)->next;
+ *trb = ((*seg)->trbs);
+ } else {
+- *trb = (*trb)++;
++ (*trb)++;
+ }
+ }
+
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 2bef441..80bf833 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -222,8 +222,8 @@ static struct usb_serial_driver cp210x_device = {
+ #define BITS_STOP_2 0x0002
+
+ /* CP210X_SET_BREAK */
+-#define BREAK_ON 0x0000
+-#define BREAK_OFF 0x0001
++#define BREAK_ON 0x0001
++#define BREAK_OFF 0x0000
+
+ /* CP210X_(SET_MHS|GET_MDMSTS) */
+ #define CONTROL_DTR 0x0001
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index eb12d9b..63ddb2f 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -180,6 +180,7 @@ static struct usb_device_id id_table_combined [] = {
+ { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) },
+ { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_SPROG_II) },
++ { USB_DEVICE(FTDI_VID, FTDI_LENZ_LIUSB_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_XF_632_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_XF_634_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_XF_547_PID) },
+@@ -750,6 +751,8 @@ static struct usb_device_id id_table_combined [] = {
+ { USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_SH4_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(FTDI_VID, SEGWAY_RMP200_PID) },
++ { USB_DEVICE(IONICS_VID, IONICS_PLUGCOMPUTER_PID),
++ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { }, /* Optional parameter entry */
+ { } /* Terminating entry */
+ };
+@@ -1376,7 +1379,7 @@ static void ftdi_set_max_packet_size(struct usb_serial_port *port)
+ }
+
+ /* set max packet size based on descriptor */
+- priv->max_packet_size = ep_desc->wMaxPacketSize;
++ priv->max_packet_size = le16_to_cpu(ep_desc->wMaxPacketSize);
+
+ dev_info(&udev->dev, "Setting MaxPacketSize %d\n", priv->max_packet_size);
+ }
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 6e612c5..2e95857 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -110,6 +110,9 @@
+ /* Propox devices */
+ #define FTDI_PROPOX_JTAGCABLEII_PID 0xD738
+
++/* Lenz LI-USB Computer Interface. */
++#define FTDI_LENZ_LIUSB_PID 0xD780
++
+ /*
+ * Xsens Technologies BV products (http://www.xsens.com).
+ */
+@@ -989,6 +992,12 @@
+ #define ALTI2_N3_PID 0x6001 /* Neptune 3 */
+
+ /*
++ * Ionics PlugComputer
++ */
++#define IONICS_VID 0x1c0c
++#define IONICS_PLUGCOMPUTER_PID 0x0102
++
++/*
+ * Dresden Elektronik Sensor Terminal Board
+ */
+ #define DE_VID 0x1cf1 /* Vendor ID */
+diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
+index 0fca265..9991063 100644
+--- a/drivers/usb/serial/io_ti.c
++++ b/drivers/usb/serial/io_ti.c
+@@ -1151,7 +1151,7 @@ static int download_fw(struct edgeport_serial *serial)
+
+ /* Check if we have an old version in the I2C and
+ update if necessary */
+- if (download_cur_ver != download_new_ver) {
++ if (download_cur_ver < download_new_ver) {
+ dbg("%s - Update I2C dld from %d.%d to %d.%d",
+ __func__,
+ firmware_version->Ver_Major,
+diff --git a/drivers/usb/serial/navman.c b/drivers/usb/serial/navman.c
+index a6b207c..1f00f24 100644
+--- a/drivers/usb/serial/navman.c
++++ b/drivers/usb/serial/navman.c
+@@ -25,6 +25,7 @@ static int debug;
+
+ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x0a99, 0x0001) }, /* Talon Technology device */
++ { USB_DEVICE(0x0df7, 0x0900) }, /* Mobile Action i-gotU */
+ { },
+ };
+ MODULE_DEVICE_TABLE(usb, id_table);
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 5c35b3a..80c74d4 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -368,6 +368,10 @@ static void option_instat_callback(struct urb *urb);
+ #define OLIVETTI_VENDOR_ID 0x0b3c
+ #define OLIVETTI_PRODUCT_OLICARD100 0xc000
+
++/* Celot products */
++#define CELOT_VENDOR_ID 0x211f
++#define CELOT_PRODUCT_CT680M 0x6801
++
+ /* some devices interfaces need special handling due to a number of reasons */
+ enum option_blacklist_reason {
+ OPTION_BLACKLIST_NONE = 0,
+@@ -891,10 +895,9 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100F) },
+ { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1011)},
+ { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1012)},
+-
+ { USB_DEVICE(CINTERION_VENDOR_ID, 0x0047) },
+-
+ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
++ { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
+ { } /* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, option_ids);
+diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
+index 6b60018..c98f0fb 100644
+--- a/drivers/usb/serial/pl2303.c
++++ b/drivers/usb/serial/pl2303.c
+@@ -86,6 +86,7 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) },
+ { USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) },
+ { USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) },
++ { USB_DEVICE(ZEAGLE_VENDOR_ID, ZEAGLE_N2ITION3_PRODUCT_ID) },
+ { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) },
+ { USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) },
+ { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) },
+diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
+index a871645..43eb9bd 100644
+--- a/drivers/usb/serial/pl2303.h
++++ b/drivers/usb/serial/pl2303.h
+@@ -128,6 +128,10 @@
+ #define CRESSI_VENDOR_ID 0x04b8
+ #define CRESSI_EDY_PRODUCT_ID 0x0521
+
++/* Zeagle dive computer interface */
++#define ZEAGLE_VENDOR_ID 0x04b8
++#define ZEAGLE_N2ITION3_PRODUCT_ID 0x0522
++
+ /* Sony, USB data cable for CMD-Jxx mobile phones */
+ #define SONY_VENDOR_ID 0x054c
+ #define SONY_QN3USB_PRODUCT_ID 0x0437
+diff --git a/drivers/video/matrox/matroxfb_base.h b/drivers/video/matrox/matroxfb_base.h
+index f3a4e15..f96a471 100644
+--- a/drivers/video/matrox/matroxfb_base.h
++++ b/drivers/video/matrox/matroxfb_base.h
+@@ -151,13 +151,13 @@ static inline void mga_writel(vaddr_t va, unsigned int offs, u_int32_t value) {
+ static inline void mga_memcpy_toio(vaddr_t va, const void* src, int len) {
+ #if defined(__alpha__) || defined(__i386__) || defined(__x86_64__)
+ /*
+- * memcpy_toio works for us if:
++ * iowrite32_rep works for us if:
+ * (1) Copies data as 32bit quantities, not byte after byte,
+ * (2) Performs LE ordered stores, and
+ * (3) It copes with unaligned source (destination is guaranteed to be page
+ * aligned and length is guaranteed to be multiple of 4).
+ */
+- memcpy_toio(va.vaddr, src, len);
++ iowrite32_rep(va.vaddr, src, len >> 2);
+ #else
+ u_int32_t __iomem* addr = va.vaddr;
+
+diff --git a/firmware/Makefile b/firmware/Makefile
+index 020e629..99955ed 100644
+--- a/firmware/Makefile
++++ b/firmware/Makefile
+@@ -142,7 +142,7 @@ fw-shipped-$(CONFIG_YAM) += yam/1200.bin yam/9600.bin
+ fw-shipped-all := $(fw-shipped-y) $(fw-shipped-m) $(fw-shipped-)
+
+ # Directories which we _might_ need to create, so we have a rule for them.
+-firmware-dirs := $(sort $(patsubst %,$(objtree)/$(obj)/%/,$(dir $(fw-external-y) $(fw-shipped-all))))
++firmware-dirs := $(sort $(addprefix $(objtree)/$(obj)/,$(dir $(fw-external-y) $(fw-shipped-all))))
+
+ quiet_cmd_mkdir = MKDIR $(patsubst $(objtree)/%,%,$@)
+ cmd_mkdir = mkdir -p $@
+diff --git a/fs/char_dev.c b/fs/char_dev.c
+index d6db933..f80a4f2 100644
+--- a/fs/char_dev.c
++++ b/fs/char_dev.c
+@@ -20,6 +20,7 @@
+ #include <linux/cdev.h>
+ #include <linux/mutex.h>
+ #include <linux/backing-dev.h>
++#include <linux/tty.h>
+
+ #include "internal.h"
+
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
+index e60416d..d69551e 100644
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -1103,7 +1103,7 @@ static int nfs_open_revalidate(struct dentry *dentry, struct nameidata *nd)
+ if ((openflags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL))
+ goto no_open_dput;
+ /* We can't create new files, or truncate existing ones here */
+- openflags &= ~(O_CREAT|O_TRUNC);
++ openflags &= ~(O_CREAT|O_EXCL|O_TRUNC);
+
+ /*
+ * Note: we're not holding inode->i_mutex and so may be racing with
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 70015dd..330a3c9 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -2023,7 +2023,8 @@ nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
+ struct rpc_cred *cred;
+ struct nfs4_state *state;
+ struct dentry *res;
+- fmode_t fmode = nd->intent.open.flags & (FMODE_READ | FMODE_WRITE | FMODE_EXEC);
++ int open_flags = nd->intent.open.flags;
++ fmode_t fmode = open_flags & (FMODE_READ | FMODE_WRITE | FMODE_EXEC);
+
+ if (nd->flags & LOOKUP_CREATE) {
+ attr.ia_mode = nd->intent.open.create_mode;
+@@ -2031,8 +2032,9 @@ nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
+ if (!IS_POSIXACL(dir))
+ attr.ia_mode &= ~current_umask();
+ } else {
++ open_flags &= ~O_EXCL;
+ attr.ia_valid = 0;
+- BUG_ON(nd->intent.open.flags & O_CREAT);
++ BUG_ON(open_flags & O_CREAT);
+ }
+
+ cred = rpc_lookup_cred();
+@@ -2041,7 +2043,7 @@ nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
+ parent = dentry->d_parent;
+ /* Protect against concurrent sillydeletes */
+ nfs_block_sillyrename(parent);
+- state = nfs4_do_open(dir, &path, fmode, nd->intent.open.flags, &attr, cred);
++ state = nfs4_do_open(dir, &path, fmode, open_flags, &attr, cred);
+ put_rpccred(cred);
+ if (IS_ERR(state)) {
+ if (PTR_ERR(state) == -ENOENT) {
+diff --git a/fs/nfs/super.c b/fs/nfs/super.c
+index f9df16d..6bf11d7 100644
+--- a/fs/nfs/super.c
++++ b/fs/nfs/super.c
+@@ -652,6 +652,13 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss,
+
+ if (nfss->options & NFS_OPTION_FSCACHE)
+ seq_printf(m, ",fsc");
++
++ if (nfss->flags & NFS_MOUNT_LOOKUP_CACHE_NONEG) {
++ if (nfss->flags & NFS_MOUNT_LOOKUP_CACHE_NONE)
++ seq_printf(m, ",lookupcache=none");
++ else
++ seq_printf(m, ",lookupcache=pos");
++ }
+ }
+
+ /*
+diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
+index 414ef68..fbb354c 100644
+--- a/fs/nilfs2/super.c
++++ b/fs/nilfs2/super.c
+@@ -336,9 +336,10 @@ int nilfs_attach_checkpoint(struct nilfs_sb_info *sbi, __u64 cno)
+ list_add(&sbi->s_list, &nilfs->ns_supers);
+ up_write(&nilfs->ns_super_sem);
+
++ err = -ENOMEM;
+ sbi->s_ifile = nilfs_ifile_new(sbi, nilfs->ns_inode_size);
+ if (!sbi->s_ifile)
+- return -ENOMEM;
++ goto delist;
+
+ down_read(&nilfs->ns_segctor_sem);
+ err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, cno, 0, &raw_cp,
+@@ -369,6 +370,7 @@ int nilfs_attach_checkpoint(struct nilfs_sb_info *sbi, __u64 cno)
+ nilfs_mdt_destroy(sbi->s_ifile);
+ sbi->s_ifile = NULL;
+
++ delist:
+ down_write(&nilfs->ns_super_sem);
+ list_del_init(&sbi->s_list);
+ up_write(&nilfs->ns_super_sem);
+diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
+index da70229..a76e0aa 100644
+--- a/fs/ocfs2/acl.c
++++ b/fs/ocfs2/acl.c
+@@ -290,12 +290,30 @@ static int ocfs2_set_acl(handle_t *handle,
+
+ int ocfs2_check_acl(struct inode *inode, int mask)
+ {
+- struct posix_acl *acl = ocfs2_get_acl(inode, ACL_TYPE_ACCESS);
++ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
++ struct buffer_head *di_bh = NULL;
++ struct posix_acl *acl;
++ int ret = -EAGAIN;
+
+- if (IS_ERR(acl))
++ if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL))
++ return ret;
++
++ ret = ocfs2_read_inode_block(inode, &di_bh);
++ if (ret < 0) {
++ mlog_errno(ret);
++ return ret;
++ }
++
++ acl = ocfs2_get_acl_nolock(inode, ACL_TYPE_ACCESS, di_bh);
++
++ brelse(di_bh);
++
++ if (IS_ERR(acl)) {
++ mlog_errno(PTR_ERR(acl));
+ return PTR_ERR(acl);
++ }
+ if (acl) {
+- int ret = posix_acl_permission(inode, acl, mask);
++ ret = posix_acl_permission(inode, acl, mask);
+ posix_acl_release(acl);
+ return ret;
+ }
+@@ -344,7 +362,7 @@ int ocfs2_init_acl(handle_t *handle,
+ {
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ struct posix_acl *acl = NULL;
+- int ret = 0;
++ int ret = 0, ret2;
+ mode_t mode;
+
+ if (!S_ISLNK(inode->i_mode)) {
+@@ -381,7 +399,12 @@ int ocfs2_init_acl(handle_t *handle,
+ mode = inode->i_mode;
+ ret = posix_acl_create_masq(clone, &mode);
+ if (ret >= 0) {
+- ret = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
++ ret2 = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
++ if (ret2) {
++ mlog_errno(ret2);
++ ret = ret2;
++ goto cleanup;
++ }
+ if (ret > 0) {
+ ret = ocfs2_set_acl(handle, inode,
+ di_bh, ACL_TYPE_ACCESS,
+diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
+index 94b97fc..ffb4c68 100644
+--- a/fs/ocfs2/dlm/dlmmaster.c
++++ b/fs/ocfs2/dlm/dlmmaster.c
+@@ -511,8 +511,6 @@ static void dlm_lockres_release(struct kref *kref)
+
+ atomic_dec(&dlm->res_cur_count);
+
+- dlm_put(dlm);
+-
+ if (!hlist_unhashed(&res->hash_node) ||
+ !list_empty(&res->granted) ||
+ !list_empty(&res->converting) ||
+@@ -585,8 +583,6 @@ static void dlm_init_lockres(struct dlm_ctxt *dlm,
+ res->migration_pending = 0;
+ res->inflight_locks = 0;
+
+- /* put in dlm_lockres_release */
+- dlm_grab(dlm);
+ res->dlm = dlm;
+
+ kref_init(&res->refs);
+@@ -3050,8 +3046,6 @@ int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data,
+ /* check for pre-existing lock */
+ spin_lock(&dlm->spinlock);
+ res = __dlm_lookup_lockres(dlm, name, namelen, hash);
+- spin_lock(&dlm->master_lock);
+-
+ if (res) {
+ spin_lock(&res->spinlock);
+ if (res->state & DLM_LOCK_RES_RECOVERING) {
+@@ -3069,14 +3063,15 @@ int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data,
+ spin_unlock(&res->spinlock);
+ }
+
++ spin_lock(&dlm->master_lock);
+ /* ignore status. only nonzero status would BUG. */
+ ret = dlm_add_migration_mle(dlm, res, mle, &oldmle,
+ name, namelen,
+ migrate->new_master,
+ migrate->master);
+
+-unlock:
+ spin_unlock(&dlm->master_lock);
++unlock:
+ spin_unlock(&dlm->spinlock);
+
+ if (oldmle) {
+diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
+index 9dfaac7..aaaffbc 100644
+--- a/fs/ocfs2/dlm/dlmrecovery.c
++++ b/fs/ocfs2/dlm/dlmrecovery.c
+@@ -1997,6 +1997,8 @@ void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
+ struct list_head *queue;
+ struct dlm_lock *lock, *next;
+
++ assert_spin_locked(&dlm->spinlock);
++ assert_spin_locked(&res->spinlock);
+ res->state |= DLM_LOCK_RES_RECOVERING;
+ if (!list_empty(&res->recovering)) {
+ mlog(0,
+@@ -2326,19 +2328,15 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
+ /* zero the lvb if necessary */
+ dlm_revalidate_lvb(dlm, res, dead_node);
+ if (res->owner == dead_node) {
+- if (res->state & DLM_LOCK_RES_DROPPING_REF)
+- mlog(0, "%s:%.*s: owned by "
+- "dead node %u, this node was "
+- "dropping its ref when it died. "
+- "continue, dropping the flag.\n",
+- dlm->name, res->lockname.len,
+- res->lockname.name, dead_node);
+-
+- /* the wake_up for this will happen when the
+- * RECOVERING flag is dropped later */
+- res->state &= ~DLM_LOCK_RES_DROPPING_REF;
++ if (res->state & DLM_LOCK_RES_DROPPING_REF) {
++ mlog(ML_NOTICE, "Ignore %.*s for "
++ "recovery as it is being freed\n",
++ res->lockname.len,
++ res->lockname.name);
++ } else
++ dlm_move_lockres_to_recovery_list(dlm,
++ res);
+
+- dlm_move_lockres_to_recovery_list(dlm, res);
+ } else if (res->owner == dlm->node_num) {
+ dlm_free_dead_locks(dlm, res, dead_node);
+ __dlm_lockres_calc_usage(dlm, res);
+diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c
+index d4f73ca..2211acf 100644
+--- a/fs/ocfs2/dlm/dlmthread.c
++++ b/fs/ocfs2/dlm/dlmthread.c
+@@ -92,19 +92,27 @@ int __dlm_lockres_has_locks(struct dlm_lock_resource *res)
+ * truly ready to be freed. */
+ int __dlm_lockres_unused(struct dlm_lock_resource *res)
+ {
+- if (!__dlm_lockres_has_locks(res) &&
+- (list_empty(&res->dirty) && !(res->state & DLM_LOCK_RES_DIRTY))) {
+- /* try not to scan the bitmap unless the first two
+- * conditions are already true */
+- int bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
+- if (bit >= O2NM_MAX_NODES) {
+- /* since the bit for dlm->node_num is not
+- * set, inflight_locks better be zero */
+- BUG_ON(res->inflight_locks != 0);
+- return 1;
+- }
+- }
+- return 0;
++ int bit;
++
++ if (__dlm_lockres_has_locks(res))
++ return 0;
++
++ if (!list_empty(&res->dirty) || res->state & DLM_LOCK_RES_DIRTY)
++ return 0;
++
++ if (res->state & DLM_LOCK_RES_RECOVERING)
++ return 0;
++
++ bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
++ if (bit < O2NM_MAX_NODES)
++ return 0;
++
++ /*
++ * since the bit for dlm->node_num is not set, inflight_locks better
++ * be zero
++ */
++ BUG_ON(res->inflight_locks != 0);
++ return 1;
+ }
+
+
+@@ -152,45 +160,25 @@ void dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
+ spin_unlock(&dlm->spinlock);
+ }
+
+-static int dlm_purge_lockres(struct dlm_ctxt *dlm,
++static void dlm_purge_lockres(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res)
+ {
+ int master;
+ int ret = 0;
+
+- spin_lock(&res->spinlock);
+- if (!__dlm_lockres_unused(res)) {
+- mlog(0, "%s:%.*s: tried to purge but not unused\n",
+- dlm->name, res->lockname.len, res->lockname.name);
+- __dlm_print_one_lock_resource(res);
+- spin_unlock(&res->spinlock);
+- BUG();
+- }
+-
+- if (res->state & DLM_LOCK_RES_MIGRATING) {
+- mlog(0, "%s:%.*s: Delay dropref as this lockres is "
+- "being remastered\n", dlm->name, res->lockname.len,
+- res->lockname.name);
+- /* Re-add the lockres to the end of the purge list */
+- if (!list_empty(&res->purge)) {
+- list_del_init(&res->purge);
+- list_add_tail(&res->purge, &dlm->purge_list);
+- }
+- spin_unlock(&res->spinlock);
+- return 0;
+- }
++ assert_spin_locked(&dlm->spinlock);
++ assert_spin_locked(&res->spinlock);
+
+ master = (res->owner == dlm->node_num);
+
+- if (!master)
+- res->state |= DLM_LOCK_RES_DROPPING_REF;
+- spin_unlock(&res->spinlock);
+
+ mlog(0, "purging lockres %.*s, master = %d\n", res->lockname.len,
+ res->lockname.name, master);
+
+ if (!master) {
++ res->state |= DLM_LOCK_RES_DROPPING_REF;
+ /* drop spinlock... retake below */
++ spin_unlock(&res->spinlock);
+ spin_unlock(&dlm->spinlock);
+
+ spin_lock(&res->spinlock);
+@@ -208,31 +196,35 @@ static int dlm_purge_lockres(struct dlm_ctxt *dlm,
+ mlog(0, "%s:%.*s: dlm_deref_lockres returned %d\n",
+ dlm->name, res->lockname.len, res->lockname.name, ret);
+ spin_lock(&dlm->spinlock);
++ spin_lock(&res->spinlock);
+ }
+
+- spin_lock(&res->spinlock);
+ if (!list_empty(&res->purge)) {
+ mlog(0, "removing lockres %.*s:%p from purgelist, "
+ "master = %d\n", res->lockname.len, res->lockname.name,
+ res, master);
+ list_del_init(&res->purge);
+- spin_unlock(&res->spinlock);
+ dlm_lockres_put(res);
+ dlm->purge_count--;
+- } else
+- spin_unlock(&res->spinlock);
++ }
++
++ if (!__dlm_lockres_unused(res)) {
++ mlog(ML_ERROR, "found lockres %s:%.*s: in use after deref\n",
++ dlm->name, res->lockname.len, res->lockname.name);
++ __dlm_print_one_lock_resource(res);
++ BUG();
++ }
+
+ __dlm_unhash_lockres(res);
+
+ /* lockres is not in the hash now. drop the flag and wake up
+ * any processes waiting in dlm_get_lock_resource. */
+ if (!master) {
+- spin_lock(&res->spinlock);
+ res->state &= ~DLM_LOCK_RES_DROPPING_REF;
+ spin_unlock(&res->spinlock);
+ wake_up(&res->wq);
+- }
+- return 0;
++ } else
++ spin_unlock(&res->spinlock);
+ }
+
+ static void dlm_run_purge_list(struct dlm_ctxt *dlm,
+@@ -251,17 +243,7 @@ static void dlm_run_purge_list(struct dlm_ctxt *dlm,
+ lockres = list_entry(dlm->purge_list.next,
+ struct dlm_lock_resource, purge);
+
+- /* Status of the lockres *might* change so double
+- * check. If the lockres is unused, holding the dlm
+- * spinlock will prevent people from getting and more
+- * refs on it -- there's no need to keep the lockres
+- * spinlock. */
+ spin_lock(&lockres->spinlock);
+- unused = __dlm_lockres_unused(lockres);
+- spin_unlock(&lockres->spinlock);
+-
+- if (!unused)
+- continue;
+
+ purge_jiffies = lockres->last_used +
+ msecs_to_jiffies(DLM_PURGE_INTERVAL_MS);
+@@ -273,15 +255,29 @@ static void dlm_run_purge_list(struct dlm_ctxt *dlm,
+ * in tail order, we can stop at the first
+ * unpurgable resource -- anyone added after
+ * him will have a greater last_used value */
++ spin_unlock(&lockres->spinlock);
+ break;
+ }
+
++ /* Status of the lockres *might* change so double
++ * check. If the lockres is unused, holding the dlm
++ * spinlock will prevent people from getting and more
++ * refs on it. */
++ unused = __dlm_lockres_unused(lockres);
++ if (!unused ||
++ (lockres->state & DLM_LOCK_RES_MIGRATING)) {
++ mlog(0, "lockres %s:%.*s: is in use or "
++ "being remastered, used %d, state %d\n",
++ dlm->name, lockres->lockname.len,
++ lockres->lockname.name, !unused, lockres->state);
++ list_move_tail(&dlm->purge_list, &lockres->purge);
++ spin_unlock(&lockres->spinlock);
++ continue;
++ }
++
+ dlm_lockres_get(lockres);
+
+- /* This may drop and reacquire the dlm spinlock if it
+- * has to do migration. */
+- if (dlm_purge_lockres(dlm, lockres))
+- BUG();
++ dlm_purge_lockres(dlm, lockres);
+
+ dlm_lockres_put(lockres);
+
+diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
+index 3ac5aa7..73a11cc 100644
+--- a/fs/ocfs2/refcounttree.c
++++ b/fs/ocfs2/refcounttree.c
+@@ -2436,16 +2436,26 @@ static int ocfs2_calc_refcount_meta_credits(struct super_block *sb,
+ len = min((u64)cpos + clusters, le64_to_cpu(rec.r_cpos) +
+ le32_to_cpu(rec.r_clusters)) - cpos;
+ /*
+- * If the refcount rec already exist, cool. We just need
+- * to check whether there is a split. Otherwise we just need
+- * to increase the refcount.
+- * If we will insert one, increases recs_add.
+- *
+ * We record all the records which will be inserted to the
+ * same refcount block, so that we can tell exactly whether
+ * we need a new refcount block or not.
++ *
++ * If we will insert a new one, this is easy and only happens
++ * during adding refcounted flag to the extent, so we don't
++ * have a chance of spliting. We just need one record.
++ *
++ * If the refcount rec already exists, that would be a little
++ * complicated. we may have to:
++ * 1) split at the beginning if the start pos isn't aligned.
++ * we need 1 more record in this case.
++ * 2) split int the end if the end pos isn't aligned.
++ * we need 1 more record in this case.
++ * 3) split in the middle because of file system fragmentation.
++ * we need 2 more records in this case(we can't detect this
++ * beforehand, so always think of the worst case).
+ */
+ if (rec.r_refcount) {
++ recs_add += 2;
+ /* Check whether we need a split at the beginning. */
+ if (cpos == start_cpos &&
+ cpos != le64_to_cpu(rec.r_cpos))
+diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
+index e5039a2..103f08a 100644
+--- a/include/acpi/platform/aclinux.h
++++ b/include/acpi/platform/aclinux.h
+@@ -148,13 +148,17 @@ static inline void *acpi_os_acquire_object(acpi_cache_t * cache)
+ #define ACPI_ALLOCATE_ZEROED(a) acpi_os_allocate_zeroed(a)
+ #define ACPI_FREE(a) kfree(a)
+
+-/* Used within ACPICA to show where it is safe to preempt execution */
+-#include <linux/hardirq.h>
++#ifndef CONFIG_PREEMPT
++/*
++ * Used within ACPICA to show where it is safe to preempt execution
++ * when CONFIG_PREEMPT=n
++ */
+ #define ACPI_PREEMPTION_POINT() \
+ do { \
+- if (!in_atomic_preempt_off() && !irqs_disabled()) \
++ if (!irqs_disabled()) \
+ cond_resched(); \
+ } while (0)
++#endif
+
+ #endif /* __KERNEL__ */
+
+diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
+index b8bb9a6..ee7e258 100644
+--- a/include/linux/mm_types.h
++++ b/include/linux/mm_types.h
+@@ -134,7 +134,7 @@ struct vm_area_struct {
+ within vm_mm. */
+
+ /* linked list of VM areas per task, sorted by address */
+- struct vm_area_struct *vm_next;
++ struct vm_area_struct *vm_next, *vm_prev;
+
+ pgprot_t vm_page_prot; /* Access permissions of this VMA. */
+ unsigned long vm_flags; /* Flags, see mm.h. */
+diff --git a/include/linux/mtd/flashchip.h b/include/linux/mtd/flashchip.h
+index f43e9b4..23cc10f 100644
+--- a/include/linux/mtd/flashchip.h
++++ b/include/linux/mtd/flashchip.h
+@@ -92,7 +92,7 @@ struct flchip {
+ /* This is used to handle contention on write/erase operations
+ between partitions of the same physical chip. */
+ struct flchip_shared {
+- spinlock_t lock;
++ struct mutex lock;
+ struct flchip *writing;
+ struct flchip *erasing;
+ };
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index f89e7fd..eb674b7 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -169,6 +169,7 @@ struct skb_shared_hwtstamps {
+ * @software: generate software time stamp
+ * @in_progress: device driver is going to provide
+ * hardware time stamp
++ * @prevent_sk_orphan: make sk reference available on driver level
+ * @flags: all shared_tx flags
+ *
+ * These flags are attached to packets as part of the
+@@ -178,7 +179,8 @@ union skb_shared_tx {
+ struct {
+ __u8 hardware:1,
+ software:1,
+- in_progress:1;
++ in_progress:1,
++ prevent_sk_orphan:1;
+ };
+ __u8 flags;
+ };
+diff --git a/include/linux/tty.h b/include/linux/tty.h
+index 931078b..7802a24 100644
+--- a/include/linux/tty.h
++++ b/include/linux/tty.h
+@@ -552,6 +552,9 @@ static inline void tty_audit_push_task(struct task_struct *tsk,
+ }
+ #endif
+
++/* tty_io.c */
++extern int __init tty_init(void);
++
+ /* tty_ioctl.c */
+ extern int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file,
+ unsigned int cmd, unsigned long arg);
+diff --git a/include/sound/emu10k1.h b/include/sound/emu10k1.h
+index 6a664c3..7dc97d1 100644
+--- a/include/sound/emu10k1.h
++++ b/include/sound/emu10k1.h
+@@ -1707,6 +1707,7 @@ struct snd_emu10k1 {
+ unsigned int card_type; /* EMU10K1_CARD_* */
+ unsigned int ecard_ctrl; /* ecard control bits */
+ unsigned long dma_mask; /* PCI DMA mask */
++ unsigned int delay_pcm_irq; /* in samples */
+ int max_cache_pages; /* max memory size / PAGE_SIZE */
+ struct snd_dma_buffer silent_page; /* silent page */
+ struct snd_dma_buffer ptb_pages; /* page table pages */
+diff --git a/kernel/fork.c b/kernel/fork.c
+index b6cce14..e96c0cd 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -300,7 +300,7 @@ out:
+ #ifdef CONFIG_MMU
+ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
+ {
+- struct vm_area_struct *mpnt, *tmp, **pprev;
++ struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
+ struct rb_node **rb_link, *rb_parent;
+ int retval;
+ unsigned long charge;
+@@ -328,6 +328,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
+ if (retval)
+ goto out;
+
++ prev = NULL;
+ for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
+ struct file *file;
+
+@@ -359,7 +360,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
+ goto fail_nomem_anon_vma_fork;
+ tmp->vm_flags &= ~VM_LOCKED;
+ tmp->vm_mm = mm;
+- tmp->vm_next = NULL;
++ tmp->vm_next = tmp->vm_prev = NULL;
+ file = tmp->vm_file;
+ if (file) {
+ struct inode *inode = file->f_path.dentry->d_inode;
+@@ -392,6 +393,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
+ */
+ *pprev = tmp;
+ pprev = &tmp->vm_next;
++ tmp->vm_prev = prev;
++ prev = tmp;
+
+ __vma_link_rb(mm, tmp, rb_link, rb_parent);
+ rb_link = &tmp->vm_rb.rb_right;
+diff --git a/kernel/sched.c b/kernel/sched.c
+index 63b4a14..6d0dbeb 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -3694,8 +3694,16 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
+ /*
+ * Owner changed, break to re-assess state.
+ */
+- if (lock->owner != owner)
++ if (lock->owner != owner) {
++ /*
++ * If the lock has switched to a different owner,
++ * we likely have heavy contention. Return 0 to quit
++ * optimistic spinning and not contend further:
++ */
++ if (lock->owner)
++ return 0;
+ break;
++ }
+
+ /*
+ * Is that owner really running on that cpu?
+diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
+index caf8d4d..b87c22f 100644
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -736,6 +736,7 @@ static void timekeeping_adjust(s64 offset)
+ static cycle_t logarithmic_accumulation(cycle_t offset, int shift)
+ {
+ u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift;
++ u64 raw_nsecs;
+
+ /* If the offset is smaller then a shifted interval, do nothing */
+ if (offset < timekeeper.cycle_interval<<shift)
+@@ -752,12 +753,15 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift)
+ second_overflow();
+ }
+
+- /* Accumulate into raw time */
+- raw_time.tv_nsec += timekeeper.raw_interval << shift;;
+- while (raw_time.tv_nsec >= NSEC_PER_SEC) {
+- raw_time.tv_nsec -= NSEC_PER_SEC;
+- raw_time.tv_sec++;
++ /* Accumulate raw time */
++ raw_nsecs = timekeeper.raw_interval << shift;
++ raw_nsecs += raw_time.tv_nsec;
++ if (raw_nsecs >= NSEC_PER_SEC) {
++ u64 raw_secs = raw_nsecs;
++ raw_nsecs = do_div(raw_secs, NSEC_PER_SEC);
++ raw_time.tv_sec += raw_secs;
+ }
++ raw_time.tv_nsec = raw_nsecs;
+
+ /* Accumulate error between NTP and clock interval */
+ timekeeper.ntp_error += tick_length << shift;
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 1da7b6e..5ec8f1d 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -3868,6 +3868,9 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
+ rpos = reader->read;
+ pos += size;
+
++ if (rpos >= commit)
++ break;
++
+ event = rb_reader_event(cpu_buffer);
+ size = rb_event_length(event);
+ } while (len > size);
+diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
+index 79f4bac..b4c179a 100644
+--- a/kernel/trace/trace_functions_graph.c
++++ b/kernel/trace/trace_functions_graph.c
+@@ -507,7 +507,15 @@ get_return_for_leaf(struct trace_iterator *iter,
+ * if the output fails.
+ */
+ data->ent = *curr;
+- data->ret = *next;
++ /*
++ * If the next event is not a return type, then
++ * we only care about what type it is. Otherwise we can
++ * safely copy the entire event.
++ */
++ if (next->ent.type == TRACE_GRAPH_RET)
++ data->ret = *next;
++ else
++ data->ret.ent.type = next->ent.type;
+ }
+ }
+
+diff --git a/mm/memory.c b/mm/memory.c
+index 307bf77..53cf85d 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -2770,11 +2770,18 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo
+ {
+ address &= PAGE_MASK;
+ if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
+- address -= PAGE_SIZE;
+- if (find_vma(vma->vm_mm, address) != vma)
+- return -ENOMEM;
++ struct vm_area_struct *prev = vma->vm_prev;
++
++ /*
++ * Is there a mapping abutting this one below?
++ *
++ * That's only ok if it's the same stack mapping
++ * that has gotten split..
++ */
++ if (prev && prev->vm_end == address)
++ return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
+
+- expand_stack(vma, address);
++ expand_stack(vma, address - PAGE_SIZE);
+ }
+ return 0;
+ }
+diff --git a/mm/mlock.c b/mm/mlock.c
+index 49e5e4c..cbae7c5 100644
+--- a/mm/mlock.c
++++ b/mm/mlock.c
+@@ -135,6 +135,19 @@ void munlock_vma_page(struct page *page)
+ }
+ }
+
++/* Is the vma a continuation of the stack vma above it? */
++static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
++{
++ return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
++}
++
++static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
++{
++ return (vma->vm_flags & VM_GROWSDOWN) &&
++ (vma->vm_start == addr) &&
++ !vma_stack_continue(vma->vm_prev, addr);
++}
++
+ /**
+ * __mlock_vma_pages_range() - mlock a range of pages in the vma.
+ * @vma: target vma
+@@ -168,11 +181,9 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
+ gup_flags |= FOLL_WRITE;
+
+ /* We don't try to access the guard page of a stack vma */
+- if (vma->vm_flags & VM_GROWSDOWN) {
+- if (start == vma->vm_start) {
+- start += PAGE_SIZE;
+- nr_pages--;
+- }
++ if (stack_guard_page(vma, start)) {
++ addr += PAGE_SIZE;
++ nr_pages--;
+ }
+
+ while (nr_pages > 0) {
+diff --git a/mm/mmap.c b/mm/mmap.c
+index 456ec6f..3867cfc 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -388,17 +388,23 @@ static inline void
+ __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
+ struct vm_area_struct *prev, struct rb_node *rb_parent)
+ {
++ struct vm_area_struct *next;
++
++ vma->vm_prev = prev;
+ if (prev) {
+- vma->vm_next = prev->vm_next;
++ next = prev->vm_next;
+ prev->vm_next = vma;
+ } else {
+ mm->mmap = vma;
+ if (rb_parent)
+- vma->vm_next = rb_entry(rb_parent,
++ next = rb_entry(rb_parent,
+ struct vm_area_struct, vm_rb);
+ else
+- vma->vm_next = NULL;
++ next = NULL;
+ }
++ vma->vm_next = next;
++ if (next)
++ next->vm_prev = vma;
+ }
+
+ void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -485,7 +491,11 @@ static inline void
+ __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
+ struct vm_area_struct *prev)
+ {
+- prev->vm_next = vma->vm_next;
++ struct vm_area_struct *next = vma->vm_next;
++
++ prev->vm_next = next;
++ if (next)
++ next->vm_prev = prev;
+ rb_erase(&vma->vm_rb, &mm->mm_rb);
+ if (mm->mmap_cache == vma)
+ mm->mmap_cache = prev;
+@@ -1900,6 +1910,7 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long addr;
+
+ insertion_point = (prev ? &prev->vm_next : &mm->mmap);
++ vma->vm_prev = NULL;
+ do {
+ rb_erase(&vma->vm_rb, &mm->mm_rb);
+ mm->map_count--;
+@@ -1907,6 +1918,8 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
+ vma = vma->vm_next;
+ } while (vma && vma->vm_start < end);
+ *insertion_point = vma;
++ if (vma)
++ vma->vm_prev = prev;
+ tail_vma->vm_next = NULL;
+ if (mm->unmap_area == arch_unmap_area)
+ addr = prev ? prev->vm_end : mm->mmap_base;
+diff --git a/mm/nommu.c b/mm/nommu.c
+index b76f3ee..e48b38c 100644
+--- a/mm/nommu.c
++++ b/mm/nommu.c
+@@ -609,7 +609,7 @@ static void protect_vma(struct vm_area_struct *vma, unsigned long flags)
+ */
+ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
+ {
+- struct vm_area_struct *pvma, **pp;
++ struct vm_area_struct *pvma, **pp, *next;
+ struct address_space *mapping;
+ struct rb_node **p, *parent;
+
+@@ -669,8 +669,11 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
+ break;
+ }
+
+- vma->vm_next = *pp;
++ next = *pp;
+ *pp = vma;
++ vma->vm_next = next;
++ if (next)
++ next->vm_prev = vma;
+ }
+
+ /*
+diff --git a/mm/slab.c b/mm/slab.c
+index e49f8f4..e4f747f 100644
+--- a/mm/slab.c
++++ b/mm/slab.c
+@@ -2331,8 +2331,8 @@ kmem_cache_create (const char *name, size_t size, size_t align,
+ }
+ #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
+ if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
+- && cachep->obj_size > cache_line_size() && size < PAGE_SIZE) {
+- cachep->obj_offset += PAGE_SIZE - size;
++ && cachep->obj_size > cache_line_size() && ALIGN(size, align) < PAGE_SIZE) {
++ cachep->obj_offset += PAGE_SIZE - ALIGN(size, align);
+ size = PAGE_SIZE;
+ }
+ #endif
+diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
+index 753fc42..f49bcd9 100644
+--- a/net/bridge/br_device.c
++++ b/net/bridge/br_device.c
+@@ -22,7 +22,7 @@
+ #include <asm/uaccess.h>
+ #include "br_private.h"
+
+-/* net device transmit always called with no BH (preempt_disabled) */
++/* net device transmit always called with BH disabled */
+ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+ struct net_bridge *br = netdev_priv(dev);
+@@ -46,9 +46,12 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+ skb_reset_mac_header(skb);
+ skb_pull(skb, ETH_HLEN);
+
++ rcu_read_lock();
+ if (is_multicast_ether_addr(dest)) {
+- if (br_multicast_rcv(br, NULL, skb))
++ if (br_multicast_rcv(br, NULL, skb)) {
++ kfree_skb(skb);
+ goto out;
++ }
+
+ mdst = br_mdb_get(br, skb);
+ if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb))
+@@ -61,6 +64,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+ br_flood_deliver(br, skb);
+
+ out:
++ rcu_read_unlock();
+ return NETDEV_TX_OK;
+ }
+
+diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
+index b01dde3..7204ad3 100644
+--- a/net/bridge/br_fdb.c
++++ b/net/bridge/br_fdb.c
+@@ -214,7 +214,7 @@ void br_fdb_delete_by_port(struct net_bridge *br,
+ spin_unlock_bh(&br->hash_lock);
+ }
+
+-/* No locking or refcounting, assumes caller has no preempt (rcu_read_lock) */
++/* No locking or refcounting, assumes caller has rcu_read_lock */
+ struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
+ const unsigned char *addr)
+ {
+diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
+index d36e700..114365c 100644
+--- a/net/bridge/br_input.c
++++ b/net/bridge/br_input.c
+@@ -37,7 +37,7 @@ static int br_pass_frame_up(struct sk_buff *skb)
+ netif_receive_skb);
+ }
+
+-/* note: already called with rcu_read_lock (preempt_disabled) */
++/* note: already called with rcu_read_lock */
+ int br_handle_frame_finish(struct sk_buff *skb)
+ {
+ const unsigned char *dest = eth_hdr(skb)->h_dest;
+@@ -108,7 +108,7 @@ drop:
+ goto out;
+ }
+
+-/* note: already called with rcu_read_lock (preempt_disabled) */
++/* note: already called with rcu_read_lock */
+ static int br_handle_local_finish(struct sk_buff *skb)
+ {
+ struct net_bridge_port *p = rcu_dereference(skb->dev->br_port);
+@@ -133,7 +133,7 @@ static inline int is_link_local(const unsigned char *dest)
+ /*
+ * Called via br_handle_frame_hook.
+ * Return NULL if skb is handled
+- * note: already called with rcu_read_lock (preempt_disabled)
++ * note: already called with rcu_read_lock
+ */
+ struct sk_buff *br_handle_frame(struct net_bridge_port *p, struct sk_buff *skb)
+ {
+diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
+index 217bd22..5854e82 100644
+--- a/net/bridge/br_stp_bpdu.c
++++ b/net/bridge/br_stp_bpdu.c
+@@ -131,7 +131,7 @@ void br_send_tcn_bpdu(struct net_bridge_port *p)
+ /*
+ * Called from llc.
+ *
+- * NO locks, but rcu_read_lock (preempt_disabled)
++ * NO locks, but rcu_read_lock
+ */
+ void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
+ struct net_device *dev)
+diff --git a/net/can/bcm.c b/net/can/bcm.c
+index 9c65e9d..08ffe9e 100644
+--- a/net/can/bcm.c
++++ b/net/can/bcm.c
+@@ -60,6 +60,13 @@
+ #include <net/sock.h>
+ #include <net/net_namespace.h>
+
++/*
++ * To send multiple CAN frame content within TX_SETUP or to filter
++ * CAN messages with multiplex index within RX_SETUP, the number of
++ * different filters is limited to 256 due to the one byte index value.
++ */
++#define MAX_NFRAMES 256
++
+ /* use of last_frames[index].can_dlc */
+ #define RX_RECV 0x40 /* received data for this element */
+ #define RX_THR 0x80 /* element not been sent due to throttle feature */
+@@ -89,16 +96,16 @@ struct bcm_op {
+ struct list_head list;
+ int ifindex;
+ canid_t can_id;
+- int flags;
++ u32 flags;
+ unsigned long frames_abs, frames_filtered;
+ struct timeval ival1, ival2;
+ struct hrtimer timer, thrtimer;
+ struct tasklet_struct tsklet, thrtsklet;
+ ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg;
+ int rx_ifindex;
+- int count;
+- int nframes;
+- int currframe;
++ u32 count;
++ u32 nframes;
++ u32 currframe;
+ struct can_frame *frames;
+ struct can_frame *last_frames;
+ struct can_frame sframe;
+@@ -175,7 +182,7 @@ static int bcm_proc_show(struct seq_file *m, void *v)
+
+ seq_printf(m, "rx_op: %03X %-5s ",
+ op->can_id, bcm_proc_getifname(ifname, op->ifindex));
+- seq_printf(m, "[%d]%c ", op->nframes,
++ seq_printf(m, "[%u]%c ", op->nframes,
+ (op->flags & RX_CHECK_DLC)?'d':' ');
+ if (op->kt_ival1.tv64)
+ seq_printf(m, "timeo=%lld ",
+@@ -198,7 +205,7 @@ static int bcm_proc_show(struct seq_file *m, void *v)
+
+ list_for_each_entry(op, &bo->tx_ops, list) {
+
+- seq_printf(m, "tx_op: %03X %s [%d] ",
++ seq_printf(m, "tx_op: %03X %s [%u] ",
+ op->can_id,
+ bcm_proc_getifname(ifname, op->ifindex),
+ op->nframes);
+@@ -283,7 +290,7 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
+ struct can_frame *firstframe;
+ struct sockaddr_can *addr;
+ struct sock *sk = op->sk;
+- int datalen = head->nframes * CFSIZ;
++ unsigned int datalen = head->nframes * CFSIZ;
+ int err;
+
+ skb = alloc_skb(sizeof(*head) + datalen, gfp_any());
+@@ -468,7 +475,7 @@ rx_changed_settime:
+ * bcm_rx_cmp_to_index - (bit)compares the currently received data to formerly
+ * received data stored in op->last_frames[]
+ */
+-static void bcm_rx_cmp_to_index(struct bcm_op *op, int index,
++static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index,
+ const struct can_frame *rxdata)
+ {
+ /*
+@@ -554,7 +561,8 @@ static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
+ /*
+ * bcm_rx_do_flush - helper for bcm_rx_thr_flush
+ */
+-static inline int bcm_rx_do_flush(struct bcm_op *op, int update, int index)
++static inline int bcm_rx_do_flush(struct bcm_op *op, int update,
++ unsigned int index)
+ {
+ if ((op->last_frames) && (op->last_frames[index].can_dlc & RX_THR)) {
+ if (update)
+@@ -575,7 +583,7 @@ static int bcm_rx_thr_flush(struct bcm_op *op, int update)
+ int updated = 0;
+
+ if (op->nframes > 1) {
+- int i;
++ unsigned int i;
+
+ /* for MUX filter we start at index 1 */
+ for (i = 1; i < op->nframes; i++)
+@@ -624,7 +632,7 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data)
+ {
+ struct bcm_op *op = (struct bcm_op *)data;
+ const struct can_frame *rxframe = (struct can_frame *)skb->data;
+- int i;
++ unsigned int i;
+
+ /* disable timeout */
+ hrtimer_cancel(&op->timer);
+@@ -822,14 +830,15 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
+ {
+ struct bcm_sock *bo = bcm_sk(sk);
+ struct bcm_op *op;
+- int i, err;
++ unsigned int i;
++ int err;
+
+ /* we need a real device to send frames */
+ if (!ifindex)
+ return -ENODEV;
+
+- /* we need at least one can_frame */
+- if (msg_head->nframes < 1)
++ /* check nframes boundaries - we need at least one can_frame */
++ if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES)
+ return -EINVAL;
+
+ /* check the given can_id */
+@@ -993,6 +1002,10 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
+ msg_head->nframes = 0;
+ }
+
++ /* the first element contains the mux-mask => MAX_NFRAMES + 1 */
++ if (msg_head->nframes > MAX_NFRAMES + 1)
++ return -EINVAL;
++
+ if ((msg_head->flags & RX_RTR_FRAME) &&
+ ((msg_head->nframes != 1) ||
+ (!(msg_head->can_id & CAN_RTR_FLAG))))
+diff --git a/net/can/raw.c b/net/can/raw.c
+index da99cf1..1650599 100644
+--- a/net/can/raw.c
++++ b/net/can/raw.c
+@@ -655,6 +655,10 @@ static int raw_sendmsg(struct kiocb *iocb, struct socket *sock,
+ err = sock_tx_timestamp(msg, sk, skb_tx(skb));
+ if (err < 0)
+ goto free_skb;
++
++ /* to be able to check the received tx sock reference in raw_rcv() */
++ skb_tx(skb)->prevent_sk_orphan = 1;
++
+ skb->dev = dev;
+ skb->sk = sk;
+
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 1f466e8..95cc486 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -2504,6 +2504,7 @@ int netif_rx(struct sk_buff *skb)
+ struct rps_dev_flow voidflow, *rflow = &voidflow;
+ int cpu;
+
++ preempt_disable();
+ rcu_read_lock();
+
+ cpu = get_rps_cpu(skb->dev, skb, &rflow);
+@@ -2513,6 +2514,7 @@ int netif_rx(struct sk_buff *skb)
+ ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
+
+ rcu_read_unlock();
++ preempt_enable();
+ }
+ #else
+ {
+@@ -3064,7 +3066,7 @@ enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
+ int mac_len;
+ enum gro_result ret;
+
+- if (!(skb->dev->features & NETIF_F_GRO))
++ if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
+ goto normal;
+
+ if (skb_is_gso(skb) || skb_has_frags(skb))
+@@ -3133,7 +3135,7 @@ pull:
+ put_page(skb_shinfo(skb)->frags[0].page);
+ memmove(skb_shinfo(skb)->frags,
+ skb_shinfo(skb)->frags + 1,
+- --skb_shinfo(skb)->nr_frags);
++ --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
+ }
+ }
+
+@@ -3151,9 +3153,6 @@ __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
+ {
+ struct sk_buff *p;
+
+- if (netpoll_rx_on(skb))
+- return GRO_NORMAL;
+-
+ for (p = napi->gro_list; p; p = p->next) {
+ NAPI_GRO_CB(p)->same_flow =
+ (p->dev == skb->dev) &&
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 65afeae..c259714 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -2176,6 +2176,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
+ GFP_KERNEL);
+ if (cvp == NULL)
+ return -ENOMEM;
++
++ kref_init(&cvp->kref);
+ }
+ lock_sock(sk);
+ tp->rx_opt.cookie_in_always =
+@@ -2190,12 +2192,11 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
+ */
+ kref_put(&tp->cookie_values->kref,
+ tcp_cookie_values_release);
+- kref_init(&cvp->kref);
+- tp->cookie_values = cvp;
+ } else {
+ cvp = tp->cookie_values;
+ }
+ }
++
+ if (cvp != NULL) {
+ cvp->cookie_desired = ctd.tcpct_cookie_desired;
+
+@@ -2209,6 +2210,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
+ cvp->s_data_desired = ctd.tcpct_s_data_desired;
+ cvp->s_data_constant = 0; /* false */
+ }
++
++ tp->cookie_values = cvp;
+ }
+ release_sock(sk);
+ return err;
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index a2eb965..54d7308 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -1400,7 +1400,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
+ struct netlink_sock *nlk = nlk_sk(sk);
+ int noblock = flags&MSG_DONTWAIT;
+ size_t copied;
+- struct sk_buff *skb, *frag __maybe_unused = NULL;
++ struct sk_buff *skb, *data_skb;
+ int err;
+
+ if (flags&MSG_OOB)
+@@ -1412,45 +1412,35 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
+ if (skb == NULL)
+ goto out;
+
++ data_skb = skb;
++
+ #ifdef CONFIG_COMPAT_NETLINK_MESSAGES
+ if (unlikely(skb_shinfo(skb)->frag_list)) {
+- bool need_compat = !!(flags & MSG_CMSG_COMPAT);
+-
+ /*
+- * If this skb has a frag_list, then here that means that
+- * we will have to use the frag_list skb for compat tasks
+- * and the regular skb for non-compat tasks.
++ * If this skb has a frag_list, then here that means that we
++ * will have to use the frag_list skb's data for compat tasks
++ * and the regular skb's data for normal (non-compat) tasks.
+ *
+- * The skb might (and likely will) be cloned, so we can't
+- * just reset frag_list and go on with things -- we need to
+- * keep that. For the compat case that's easy -- simply get
+- * a reference to the compat skb and free the regular one
+- * including the frag. For the non-compat case, we need to
+- * avoid sending the frag to the user -- so assign NULL but
+- * restore it below before freeing the skb.
++ * If we need to send the compat skb, assign it to the
++ * 'data_skb' variable so that it will be used below for data
++ * copying. We keep 'skb' for everything else, including
++ * freeing both later.
+ */
+- if (need_compat) {
+- struct sk_buff *compskb = skb_shinfo(skb)->frag_list;
+- skb_get(compskb);
+- kfree_skb(skb);
+- skb = compskb;
+- } else {
+- frag = skb_shinfo(skb)->frag_list;
+- skb_shinfo(skb)->frag_list = NULL;
+- }
++ if (flags & MSG_CMSG_COMPAT)
++ data_skb = skb_shinfo(skb)->frag_list;
+ }
+ #endif
+
+ msg->msg_namelen = 0;
+
+- copied = skb->len;
++ copied = data_skb->len;
+ if (len < copied) {
+ msg->msg_flags |= MSG_TRUNC;
+ copied = len;
+ }
+
+- skb_reset_transport_header(skb);
+- err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
++ skb_reset_transport_header(data_skb);
++ err = skb_copy_datagram_iovec(data_skb, 0, msg->msg_iov, copied);
+
+ if (msg->msg_name) {
+ struct sockaddr_nl *addr = (struct sockaddr_nl *)msg->msg_name;
+@@ -1470,11 +1460,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
+ }
+ siocb->scm->creds = *NETLINK_CREDS(skb);
+ if (flags & MSG_TRUNC)
+- copied = skb->len;
+-
+-#ifdef CONFIG_COMPAT_NETLINK_MESSAGES
+- skb_shinfo(skb)->frag_list = frag;
+-#endif
++ copied = data_skb->len;
+
+ skb_free_datagram(sk, skb);
+
+diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
+index 724553e..abbf4fa 100644
+--- a/net/sched/act_nat.c
++++ b/net/sched/act_nat.c
+@@ -218,6 +218,7 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a,
+ if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + sizeof(*iph)))
+ goto drop;
+
++ icmph = (void *)(skb_network_header(skb) + ihl);
+ iph = (void *)(icmph + 1);
+ if (egress)
+ addr = iph->daddr;
+@@ -246,7 +247,7 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a,
+ iph->saddr = new_addr;
+
+ inet_proto_csum_replace4(&icmph->checksum, skb, addr, new_addr,
+- 1);
++ 0);
+ break;
+ }
+ default:
+diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
+index c657628..a9be0ef 100644
+--- a/net/sched/sch_sfq.c
++++ b/net/sched/sch_sfq.c
+@@ -497,11 +497,22 @@ nla_put_failure:
+ return -1;
+ }
+
++static struct Qdisc *sfq_leaf(struct Qdisc *sch, unsigned long arg)
++{
++ return NULL;
++}
++
+ static unsigned long sfq_get(struct Qdisc *sch, u32 classid)
+ {
+ return 0;
+ }
+
++static unsigned long sfq_bind(struct Qdisc *sch, unsigned long parent,
++ u32 classid)
++{
++ return 0;
++}
++
+ static struct tcf_proto **sfq_find_tcf(struct Qdisc *sch, unsigned long cl)
+ {
+ struct sfq_sched_data *q = qdisc_priv(sch);
+@@ -554,8 +565,10 @@ static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
+ }
+
+ static const struct Qdisc_class_ops sfq_class_ops = {
++ .leaf = sfq_leaf,
+ .get = sfq_get,
+ .tcf_chain = sfq_find_tcf,
++ .bind_tcf = sfq_bind,
+ .dump = sfq_dump_class,
+ .dump_stats = sfq_dump_class_stats,
+ .walk = sfq_walk,
+diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
+index ef17fcf..e4be688 100644
+--- a/net/wireless/mlme.c
++++ b/net/wireless/mlme.c
+@@ -842,12 +842,18 @@ int cfg80211_mlme_action(struct cfg80211_registered_device *rdev,
+ return -EINVAL;
+ if (mgmt->u.action.category != WLAN_CATEGORY_PUBLIC) {
+ /* Verify that we are associated with the destination AP */
++ wdev_lock(wdev);
++
+ if (!wdev->current_bss ||
+ memcmp(wdev->current_bss->pub.bssid, mgmt->bssid,
+ ETH_ALEN) != 0 ||
+ memcmp(wdev->current_bss->pub.bssid, mgmt->da,
+- ETH_ALEN) != 0)
++ ETH_ALEN) != 0) {
++ wdev_unlock(wdev);
+ return -ENOTCONN;
++ }
++ wdev_unlock(wdev);
++
+ }
+
+ if (memcmp(mgmt->sa, dev->dev_addr, ETH_ALEN) != 0)
+diff --git a/scripts/mkmakefile b/scripts/mkmakefile
+index 67d59c7..5325423 100644
+--- a/scripts/mkmakefile
++++ b/scripts/mkmakefile
+@@ -44,7 +44,9 @@ all:
+
+ Makefile:;
+
+-\$(all) %/: all
++\$(all): all
+ @:
+
++%/: all
++ @:
+ EOF
+diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
+index 303ac04..1990918 100644
+--- a/sound/core/pcm_native.c
++++ b/sound/core/pcm_native.c
+@@ -981,6 +981,10 @@ static int snd_pcm_do_pause(struct snd_pcm_substream *substream, int push)
+ {
+ if (substream->runtime->trigger_master != substream)
+ return 0;
++ /* some drivers might use hw_ptr to recover from the pause -
++ update the hw_ptr now */
++ if (push)
++ snd_pcm_update_hw_ptr(substream);
+ /* The jiffies check in snd_pcm_update_hw_ptr*() is done by
+ * a delta betwen the current jiffies, this gives a large enough
+ * delta, effectively to skip the check once.
+diff --git a/sound/pci/emu10k1/emu10k1.c b/sound/pci/emu10k1/emu10k1.c
+index 4203782..aff8387 100644
+--- a/sound/pci/emu10k1/emu10k1.c
++++ b/sound/pci/emu10k1/emu10k1.c
+@@ -52,6 +52,7 @@ static int max_synth_voices[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 64};
+ static int max_buffer_size[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 128};
+ static int enable_ir[SNDRV_CARDS];
+ static uint subsystem[SNDRV_CARDS]; /* Force card subsystem model */
++static uint delay_pcm_irq[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 2};
+
+ module_param_array(index, int, NULL, 0444);
+ MODULE_PARM_DESC(index, "Index value for the EMU10K1 soundcard.");
+@@ -73,6 +74,8 @@ module_param_array(enable_ir, bool, NULL, 0444);
+ MODULE_PARM_DESC(enable_ir, "Enable IR.");
+ module_param_array(subsystem, uint, NULL, 0444);
+ MODULE_PARM_DESC(subsystem, "Force card subsystem model.");
++module_param_array(delay_pcm_irq, uint, NULL, 0444);
++MODULE_PARM_DESC(delay_pcm_irq, "Delay PCM interrupt by specified number of samples (default 0).");
+ /*
+ * Class 0401: 1102:0008 (rev 00) Subsystem: 1102:1001 -> Audigy2 Value Model:SB0400
+ */
+@@ -127,6 +130,7 @@ static int __devinit snd_card_emu10k1_probe(struct pci_dev *pci,
+ &emu)) < 0)
+ goto error;
+ card->private_data = emu;
++ emu->delay_pcm_irq = delay_pcm_irq[dev] & 0x1f;
+ if ((err = snd_emu10k1_pcm(emu, 0, NULL)) < 0)
+ goto error;
+ if ((err = snd_emu10k1_pcm_mic(emu, 1, NULL)) < 0)
+diff --git a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c
+index 55b83ef..622bace 100644
+--- a/sound/pci/emu10k1/emupcm.c
++++ b/sound/pci/emu10k1/emupcm.c
+@@ -332,7 +332,7 @@ static void snd_emu10k1_pcm_init_voice(struct snd_emu10k1 *emu,
+ evoice->epcm->ccca_start_addr = start_addr + ccis;
+ if (extra) {
+ start_addr += ccis;
+- end_addr += ccis;
++ end_addr += ccis + emu->delay_pcm_irq;
+ }
+ if (stereo && !extra) {
+ snd_emu10k1_ptr_write(emu, CPF, voice, CPF_STEREO_MASK);
+@@ -360,7 +360,9 @@ static void snd_emu10k1_pcm_init_voice(struct snd_emu10k1 *emu,
+ /* Assumption that PT is already 0 so no harm overwriting */
+ snd_emu10k1_ptr_write(emu, PTRX, voice, (send_amount[0] << 8) | send_amount[1]);
+ snd_emu10k1_ptr_write(emu, DSL, voice, end_addr | (send_amount[3] << 24));
+- snd_emu10k1_ptr_write(emu, PSST, voice, start_addr | (send_amount[2] << 24));
++ snd_emu10k1_ptr_write(emu, PSST, voice,
++ (start_addr + (extra ? emu->delay_pcm_irq : 0)) |
++ (send_amount[2] << 24));
+ if (emu->card_capabilities->emu_model)
+ pitch_target = PITCH_48000; /* Disable interpolators on emu1010 card */
+ else
+@@ -732,6 +734,23 @@ static void snd_emu10k1_playback_stop_voice(struct snd_emu10k1 *emu, struct snd_
+ snd_emu10k1_ptr_write(emu, IP, voice, 0);
+ }
+
++static inline void snd_emu10k1_playback_mangle_extra(struct snd_emu10k1 *emu,
++ struct snd_emu10k1_pcm *epcm,
++ struct snd_pcm_substream *substream,
++ struct snd_pcm_runtime *runtime)
++{
++ unsigned int ptr, period_pos;
++
++ /* try to sychronize the current position for the interrupt
++ source voice */
++ period_pos = runtime->status->hw_ptr - runtime->hw_ptr_interrupt;
++ period_pos %= runtime->period_size;
++ ptr = snd_emu10k1_ptr_read(emu, CCCA, epcm->extra->number);
++ ptr &= ~0x00ffffff;
++ ptr |= epcm->ccca_start_addr + period_pos;
++ snd_emu10k1_ptr_write(emu, CCCA, epcm->extra->number, ptr);
++}
++
+ static int snd_emu10k1_playback_trigger(struct snd_pcm_substream *substream,
+ int cmd)
+ {
+@@ -753,6 +772,8 @@ static int snd_emu10k1_playback_trigger(struct snd_pcm_substream *substream,
+ /* follow thru */
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ case SNDRV_PCM_TRIGGER_RESUME:
++ if (cmd == SNDRV_PCM_TRIGGER_PAUSE_RELEASE)
++ snd_emu10k1_playback_mangle_extra(emu, epcm, substream, runtime);
+ mix = &emu->pcm_mixer[substream->number];
+ snd_emu10k1_playback_prepare_voice(emu, epcm->voices[0], 1, 0, mix);
+ snd_emu10k1_playback_prepare_voice(emu, epcm->voices[1], 0, 0, mix);
+@@ -869,8 +890,9 @@ static snd_pcm_uframes_t snd_emu10k1_playback_pointer(struct snd_pcm_substream *
+ #endif
+ /*
+ printk(KERN_DEBUG
+- "ptr = 0x%x, buffer_size = 0x%x, period_size = 0x%x\n",
+- ptr, runtime->buffer_size, runtime->period_size);
++ "ptr = 0x%lx, buffer_size = 0x%lx, period_size = 0x%lx\n",
++ (long)ptr, (long)runtime->buffer_size,
++ (long)runtime->period_size);
+ */
+ return ptr;
+ }
+diff --git a/sound/pci/emu10k1/memory.c b/sound/pci/emu10k1/memory.c
+index ffb1ddb..957a311 100644
+--- a/sound/pci/emu10k1/memory.c
++++ b/sound/pci/emu10k1/memory.c
+@@ -310,8 +310,10 @@ snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *subst
+ if (snd_BUG_ON(!hdr))
+ return NULL;
+
++ idx = runtime->period_size >= runtime->buffer_size ?
++ (emu->delay_pcm_irq * 2) : 0;
+ mutex_lock(&hdr->block_mutex);
+- blk = search_empty(emu, runtime->dma_bytes);
++ blk = search_empty(emu, runtime->dma_bytes + idx);
+ if (blk == NULL) {
+ mutex_unlock(&hdr->block_mutex);
+ return NULL;
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 2bf2cb5..baadda4 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -2970,6 +2970,7 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = {
+ SND_PCI_QUIRK(0x1028, 0x02f5, "Dell",
+ CXT5066_DELL_LAPTOP),
+ SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT5066_OLPC_XO_1_5),
++ SND_PCI_QUIRK(0x1028, 0x02d8, "Dell Vostro", CXT5066_DELL_VOSTO),
+ SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTO),
+ SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD),
+ SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5),
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index aa7cc51..6d9a542 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -6864,6 +6864,7 @@ static int patch_alc260(struct hda_codec *codec)
+
+ spec->stream_analog_playback = &alc260_pcm_analog_playback;
+ spec->stream_analog_capture = &alc260_pcm_analog_capture;
++ spec->stream_analog_alt_capture = &alc260_pcm_analog_capture;
+
+ spec->stream_digital_playback = &alc260_pcm_digital_playback;
+ spec->stream_digital_capture = &alc260_pcm_digital_capture;
+diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
+index 6433e65..4677492 100644
+--- a/sound/pci/intel8x0.c
++++ b/sound/pci/intel8x0.c
+@@ -1776,6 +1776,12 @@ static struct ac97_quirk ac97_quirks[] __devinitdata = {
+ },
+ {
+ .subvendor = 0x1014,
++ .subdevice = 0x0534,
++ .name = "ThinkPad X31",
++ .type = AC97_TUNE_INV_EAPD
++ },
++ {
++ .subvendor = 0x1014,
+ .subdevice = 0x1f00,
+ .name = "MS-9128",
+ .type = AC97_TUNE_ALC_JACK
+diff --git a/sound/pci/riptide/riptide.c b/sound/pci/riptide/riptide.c
+index ad44626..c737287 100644
+--- a/sound/pci/riptide/riptide.c
++++ b/sound/pci/riptide/riptide.c
+@@ -1224,15 +1224,14 @@ static int try_to_load_firmware(struct cmdif *cif, struct snd_riptide *chip)
+ firmware.firmware.ASIC, firmware.firmware.CODEC,
+ firmware.firmware.AUXDSP, firmware.firmware.PROG);
+
++ if (!chip)
++ return 1;
++
+ for (i = 0; i < FIRMWARE_VERSIONS; i++) {
+ if (!memcmp(&firmware_versions[i], &firmware, sizeof(firmware)))
+- break;
+- }
+- if (i >= FIRMWARE_VERSIONS)
+- return 0; /* no match */
++ return 1; /* OK */
+
+- if (!chip)
+- return 1; /* OK */
++ }
+
+ snd_printdd("Writing Firmware\n");
+ if (!chip->fw_entry) {
+diff --git a/sound/soc/codecs/wm8580.c b/sound/soc/codecs/wm8580.c
+index c3571ee..72deeab 100644
+--- a/sound/soc/codecs/wm8580.c
++++ b/sound/soc/codecs/wm8580.c
+@@ -269,9 +269,9 @@ SOC_DOUBLE("DAC2 Invert Switch", WM8580_DAC_CONTROL4, 2, 3, 1, 0),
+ SOC_DOUBLE("DAC3 Invert Switch", WM8580_DAC_CONTROL4, 4, 5, 1, 0),
+
+ SOC_SINGLE("DAC ZC Switch", WM8580_DAC_CONTROL5, 5, 1, 0),
+-SOC_SINGLE("DAC1 Switch", WM8580_DAC_CONTROL5, 0, 1, 0),
+-SOC_SINGLE("DAC2 Switch", WM8580_DAC_CONTROL5, 1, 1, 0),
+-SOC_SINGLE("DAC3 Switch", WM8580_DAC_CONTROL5, 2, 1, 0),
++SOC_SINGLE("DAC1 Switch", WM8580_DAC_CONTROL5, 0, 1, 1),
++SOC_SINGLE("DAC2 Switch", WM8580_DAC_CONTROL5, 1, 1, 1),
++SOC_SINGLE("DAC3 Switch", WM8580_DAC_CONTROL5, 2, 1, 1),
+
+ SOC_DOUBLE("ADC Mute Switch", WM8580_ADC_CONTROL1, 0, 1, 1, 0),
+ SOC_SINGLE("ADC High-Pass Filter Switch", WM8580_ADC_CONTROL1, 4, 1, 0),
+diff --git a/sound/soc/codecs/wm8776.c b/sound/soc/codecs/wm8776.c
+index 4e212ed..f8154e6 100644
+--- a/sound/soc/codecs/wm8776.c
++++ b/sound/soc/codecs/wm8776.c
+@@ -178,13 +178,6 @@ static int wm8776_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+ case SND_SOC_DAIFMT_LEFT_J:
+ iface |= 0x0001;
+ break;
+- /* FIXME: CHECK A/B */
+- case SND_SOC_DAIFMT_DSP_A:
+- iface |= 0x0003;
+- break;
+- case SND_SOC_DAIFMT_DSP_B:
+- iface |= 0x0007;
+- break;
+ default:
+ return -EINVAL;
+ }
+diff --git a/sound/soc/soc-cache.c b/sound/soc/soc-cache.c
+index 472af38..adbc68c 100644
+--- a/sound/soc/soc-cache.c
++++ b/sound/soc/soc-cache.c
+@@ -340,7 +340,7 @@ static unsigned int snd_soc_16_8_read_i2c(struct snd_soc_codec *codec,
+ static unsigned int snd_soc_16_8_read(struct snd_soc_codec *codec,
+ unsigned int reg)
+ {
+- u16 *cache = codec->reg_cache;
++ u8 *cache = codec->reg_cache;
+
+ reg &= 0xff;
+ if (reg >= codec->reg_cache_size)
+@@ -351,7 +351,7 @@ static unsigned int snd_soc_16_8_read(struct snd_soc_codec *codec,
+ static int snd_soc_16_8_write(struct snd_soc_codec *codec, unsigned int reg,
+ unsigned int value)
+ {
+- u16 *cache = codec->reg_cache;
++ u8 *cache = codec->reg_cache;
+ u8 data[3];
+ int ret;
+
diff --git a/recipes/linux/linux-2.6.35/nokia900/linux-2.6.36-FM-TX-headphone-TV-out-and-basic-jack-detection-supp.patch b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.36-FM-TX-headphone-TV-out-and-basic-jack-detection-supp.patch
new file mode 100644
index 0000000000..6dcabd83c5
--- /dev/null
+++ b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.36-FM-TX-headphone-TV-out-and-basic-jack-detection-supp.patch
@@ -0,0 +1,810 @@
+From ae7c5b5bcde720340330336825b57aa5a88ca618 Mon Sep 17 00:00:00 2001
+From: Jarkko Nikula <jhnikula@gmail.com>
+Date: Wed, 5 May 2010 13:57:00 +0300
+Subject: [PATCH 08/11] FM TX, headphone, TV-out and basic jack detection support for N900
+
+This patch is combination of following patches:
+
+1. omap: rx51: Add platform_data for tlv320aic3x with reset gpio number
+
+Signed-off-by: Jarkko Nikula <jhnikula@gmail.com>
+
+2. omap: rx51: Use REGULATOR_SUPPLY macro when initializing regulator consumers
+
+There is REGULATOR_SUPPLY macro available for initializing the struct
+regulator_consumer_supply so use it where applicable (all other supplies
+than vdds_sdi) as it improves the readability.
+
+Signed-off-by: Jarkko Nikula <jhnikula@gmail.com>
+Acked-by: Eduardo Valentin <eduardo.valentin@nokia.com>
+
+3. omap: rx51: Add supply and data for the tpa6130a2 headphone amplifier
+
+With these and upcoming change to tpa6130a2 driver it's possible to add
+support for the TPA6130A2 headphone amplifier.
+
+[jhnikula@gmail.com: Include statement adjusted for nokia-n900-kernel.git]
+
+Signed-off-by: Jarkko Nikula <jhnikula@gmail.com>
+
+4. ASoC: RX-51: Add Jack Function kcontrol
+
+Nokia RX-51/N900 has multifunction 4-pole audio-video jack that can be used
+as headphone, headset or audio-video connector. This patch implements the
+control 'Jack Function' which is used to select the desired function.
+At the moment only TV-out without audio is supported.
+
+Signed-off-by: Jarkko Nikula <jhnikula@gmail.com>
+
+5. ASoC: RX-51: Add basic jack detection
+
+This patch adds GPIO jack detection to Nokia N900/RX-51. At the moment only
+SND_JACK_VIDEOOUT type is reported. More types could be reported after
+getting more audio features supported and necessary drivers integrated for
+implementing automated accessory detection.
+
+Signed-off-by: Jarkko Nikula <jhnikula@gmail.com>
+
+6. ASoC: RX-51: Add stereo audio output to AV jack
+
+The TPA6130A2 headphone amplifier is used in Nokia N900/RX-51 to drive the
+audio output pins in 4-pole multifunction audio-video jack. This patch
+implements audio output functionality, extends the control 'Jack Function'
+and limits the TPA6130A2 output level.
+
+Limiting is done because the TPA6130A2 can output very high audio levels to
+headphones. A value for the maximum volume is found from the Maemo 2.6.28
+kernel sources.
+
+Signed-off-by: Jarkko Nikula <jhnikula@gmail.com>
+
+7. V4L/DVB: radio-si4713: Release i2c adapter in driver cleanup paths
+
+Call to i2c_put_adapter was missing in radio_si4713_pdriver_probe and
+radio_si4713_pdriver_remove.
+
+Signed-off-by: Jarkko Nikula <jhnikula@gmail.com>
+Cc: Eduardo Valentin <eduardo.valentin@nokia.com>
+
+8. V4L/DVB: radio-si4713: Add regulator framework support
+
+Convert the driver to use regulator framework instead of set_power callback.
+This with gpio_reset platform data provide cleaner way to manage chip VIO,
+VDD and reset signal inside the driver.
+
+Signed-off-by: Jarkko Nikula <jhnikula@gmail.com>
+Cc: Eduardo Valentin <eduardo.valentin@nokia.com>
+
+9. omap: rx51: Set regulator V28 always on
+
+It seems that the battery cover sensor in Nokia N900 is powered from the
+V28 domain. Now if this regulator is disabled it causes that the gpio 160
+reads only zero which effectively causes uSD removal detection.
+
+Currently the bootloader enabled V28 is kept on but this may change in the
+future according to comment in
+drivers/regulator/core.c: regulator_has_full_constraints.
+
+Also if there are any consumers on the V28 domain doing regulator_enable
+regulator_disable cycle the V28 will be disabled after that.
+
+Prepare for these by defining the V28 as always_on regulator.
+
+Signed-off-by: Jarkko Nikula <jhnikula@gmail.com>
+Cc: Adrian Hunter <adrian.hunter@nokia.com>
+
+10. omap: rx51: Add initialization and platform data for Si4713 FM transmitter
+
+This patch adds supplies, gpio and platform data for the the Si4713 FM
+transmitter.
+
+[jhnikula@gmail.com: Two minor line adjustment for nokia-n900-kernel.git]
+
+Signed-off-by: Jarkko Nikula <jhnikula@gmail.com>
+
+11. ASoC: RX-51: Add support for FM transmitter
+
+Nokia N900/RX-51 has Si4713 FM transmitter which shares the same codec
+line-out output than the TPA6130A2 amplifier. Add route to transmitter
+and kcontrol 'FMTX Function' which used to indicate to DAPM when the route
+to FM transmitter should be active.
+
+Signed-off-by: Jarkko Nikula <jhnikula@gmail.com>
+---
+ arch/arm/mach-omap2/board-rx51-peripherals.c | 108 +++++++++++++++------
+ drivers/media/radio/radio-si4713.c | 30 +++++-
+ drivers/media/radio/si4713-i2c.c | 48 ++++++++--
+ drivers/media/radio/si4713-i2c.h | 3 +-
+ include/media/si4713.h | 3 +-
+ sound/soc/omap/Kconfig | 1 +
+ sound/soc/omap/rx51.c | 132 +++++++++++++++++++++++++-
+ 7 files changed, 276 insertions(+), 49 deletions(-)
+
+diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
+index 8989119..0aab1a0 100644
+--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
++++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
+@@ -25,6 +25,8 @@
+ #include <linux/gpio_keys.h>
+ #include <linux/mmc/host.h>
+ #include <linux/bluetooth/hci_h4p.h>
++#include <media/radio-si4713.h>
++#include <media/si4713.h>
+
+ #include <plat/mcspi.h>
+ #include <plat/mux.h>
+@@ -39,6 +41,9 @@
+ #include <../drivers/staging/iio/light/tsl2563.h>
+ #include <linux/lis3lv02d.h>
+
++#include <sound/tlv320aic3x.h>
++#include <sound/tpa6130a2-plat.h>
++
+ #include "mux.h"
+ #include "hsmmc.h"
+
+@@ -47,6 +52,8 @@
+
+ #define RX51_WL1251_POWER_GPIO 87
+ #define RX51_WL1251_IRQ_GPIO 42
++#define RX51_FMTX_RESET_GPIO 163
++#define RX51_FMTX_IRQ 53
+
+ #define RX51_TSC2005_RESET_GPIO 104
+ #define RX51_TSC2005_IRQ_GPIO 100
+@@ -394,48 +401,31 @@ static struct omap2_hsmmc_info mmc[] __initdata = {
+ {} /* Terminator */
+ };
+
+-static struct regulator_consumer_supply rx51_vmmc1_supply = {
+- .supply = "vmmc",
+- .dev_name = "mmci-omap-hs.0",
+-};
++static struct regulator_consumer_supply rx51_vmmc1_supply =
++ REGULATOR_SUPPLY("vmmc", "mmci-omap-hs.0");
+
+-static struct regulator_consumer_supply rx51_vaux3_supply = {
+- .supply = "vmmc",
+- .dev_name = "mmci-omap-hs.1",
+-};
++static struct regulator_consumer_supply rx51_vaux3_supply =
++ REGULATOR_SUPPLY("vmmc", "mmci-omap-hs.1");
+
+-static struct regulator_consumer_supply rx51_vsim_supply = {
+- .supply = "vmmc_aux",
+- .dev_name = "mmci-omap-hs.1",
+-};
++static struct regulator_consumer_supply rx51_vsim_supply =
++ REGULATOR_SUPPLY("vmmc_aux", "mmci-omap-hs.1");
+
+ static struct regulator_consumer_supply rx51_vmmc2_supplies[] = {
+ /* tlv320aic3x analog supplies */
+- {
+- .supply = "AVDD",
+- .dev_name = "2-0018",
+- },
+- {
+- .supply = "DRVDD",
+- .dev_name = "2-0018",
+- },
++ REGULATOR_SUPPLY("AVDD", "2-0018"),
++ REGULATOR_SUPPLY("DRVDD", "2-0018"),
++ /* tpa6130a2 */
++ REGULATOR_SUPPLY("Vdd", "2-0060"),
+ /* Keep vmmc as last item. It is not iterated for newer boards */
+- {
+- .supply = "vmmc",
+- .dev_name = "mmci-omap-hs.1",
+- },
++ REGULATOR_SUPPLY("vmmc", "mmci-omap-hs.1"),
+ };
+
+ static struct regulator_consumer_supply rx51_vio_supplies[] = {
+ /* tlv320aic3x digital supplies */
+- {
+- .supply = "IOVDD",
+- .dev_name = "2-0018"
+- },
+- {
+- .supply = "DVDD",
+- .dev_name = "2-0018"
+- },
++ REGULATOR_SUPPLY("IOVDD", "2-0018"),
++ REGULATOR_SUPPLY("DVDD", "2-0018"),
++ /* Si4713 IO supply */
++ REGULATOR_SUPPLY("vio", "radio-si4713"),
+ };
+
+ #if defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE)
+@@ -449,6 +439,8 @@ static struct regulator_consumer_supply rx51_vaux1_consumers[] = {
+ .dev = &rx51_display_device.dev,
+ },
+ #endif
++ /* Si4713 supply */
++ REGULATOR_SUPPLY("vdd", "2-0063"),
+ };
+
+ static struct regulator_init_data rx51_vaux1 = {
+@@ -456,6 +448,7 @@ static struct regulator_init_data rx51_vaux1 = {
+ .name = "V28",
+ .min_uV = 2800000,
+ .max_uV = 2800000,
++ .always_on = true, /* due battery cover sensor */
+ .valid_modes_mask = REGULATOR_MODE_NORMAL
+ | REGULATOR_MODE_STANDBY,
+ .valid_ops_mask = REGULATOR_CHANGE_MODE
+@@ -591,6 +584,42 @@ static struct regulator_init_data rx51_vio = {
+ .consumer_supplies = rx51_vio_supplies,
+ };
+
++static struct si4713_platform_data rx51_si4713_i2c_data = {
++ .gpio_reset = RX51_FMTX_RESET_GPIO,
++};
++
++static struct i2c_board_info rx51_si4713_board_info = {
++ I2C_BOARD_INFO("si4713", SI4713_I2C_ADDR_BUSEN_HIGH),
++ .platform_data = &rx51_si4713_i2c_data,
++};
++
++static struct radio_si4713_platform_data rx51_si4713_data = {
++ .i2c_bus = 2,
++ .subdev_board_info = &rx51_si4713_board_info,
++};
++
++static struct platform_device rx51_si4713_dev = {
++ .name = "radio-si4713",
++ .id = -1,
++ .dev = {
++ .platform_data = &rx51_si4713_data,
++ },
++};
++
++static __init void rx51_init_si4713(void)
++{
++ int err;
++
++ err = gpio_request(RX51_FMTX_IRQ, "si4713");
++ if (err) {
++ printk(KERN_ERR "Cannot request gpio %d\n", RX51_FMTX_IRQ);
++ return;
++ }
++ gpio_direction_input(RX51_FMTX_IRQ);
++ rx51_si4713_board_info.irq = gpio_to_irq(RX51_FMTX_IRQ);
++ platform_device_register(&rx51_si4713_dev);
++}
++
+ static int rx51_twlgpio_setup(struct device *dev, unsigned gpio, unsigned n)
+ {
+ /* FIXME this gpio setup is just a placeholder for now */
+@@ -801,6 +830,15 @@ static struct twl4030_platform_data rx51_twldata __initdata = {
+ .vio = &rx51_vio,
+ };
+
++static struct aic3x_pdata rx51_aic3x_data __initdata = {
++ .gpio_reset = 60,
++};
++
++static struct tpa6130a2_platform_data rx51_tpa6130a2_data __initdata = {
++ .id = TPA6130A2,
++ .power_gpio = 98,
++};
++
+ static struct i2c_board_info __initdata rx51_peripherals_i2c_board_info_1[] = {
+ {
+ I2C_BOARD_INFO("twl5030", 0x48),
+@@ -813,6 +851,7 @@ static struct i2c_board_info __initdata rx51_peripherals_i2c_board_info_1[] = {
+ static struct i2c_board_info __initdata rx51_peripherals_i2c_board_info_2[] = {
+ {
+ I2C_BOARD_INFO("tlv320aic3x", 0x18),
++ .platform_data = &rx51_aic3x_data,
+ },
+ #if defined(CONFIG_SENSORS_TSL2563) || defined(CONFIG_SENSORS_TSL2563_MODULE)
+ {
+@@ -820,6 +859,10 @@ static struct i2c_board_info __initdata rx51_peripherals_i2c_board_info_2[] = {
+ .platform_data = &rx51_tsl2563_platform_data,
+ },
+ #endif
++ {
++ I2C_BOARD_INFO("tpa6130a2", 0x60),
++ .platform_data = &rx51_tpa6130a2_data,
++ },
+ };
+
+ static struct i2c_board_info __initdata rx51_peripherals_i2c_board_info_3[] = {
+@@ -1098,6 +1141,7 @@ void __init rx51_peripherals_init(void)
+ rx51_init_wl1251();
+ rx51_init_tsc2005();
+ rx51_bt_init();
++ rx51_init_si4713();
+ spi_register_board_info(rx51_peripherals_spi_board_info,
+ ARRAY_SIZE(rx51_peripherals_spi_board_info));
+ omap2_hsmmc_init(mmc);
+diff --git a/drivers/media/radio/radio-si4713.c b/drivers/media/radio/radio-si4713.c
+index 13554ab..c666012 100644
+--- a/drivers/media/radio/radio-si4713.c
++++ b/drivers/media/radio/radio-si4713.c
+@@ -28,6 +28,7 @@
+ #include <linux/i2c.h>
+ #include <linux/videodev2.h>
+ #include <linux/slab.h>
++#include <linux/regulator/consumer.h>
+ #include <media/v4l2-device.h>
+ #include <media/v4l2-common.h>
+ #include <media/v4l2-ioctl.h>
+@@ -48,6 +49,7 @@ MODULE_VERSION("0.0.1");
+ struct radio_si4713_device {
+ struct v4l2_device v4l2_dev;
+ struct video_device *radio_dev;
++ struct regulator *reg_vio;
+ };
+
+ /* radio_si4713_fops - file operations interface */
+@@ -283,12 +285,22 @@ static int radio_si4713_pdriver_probe(struct platform_device *pdev)
+ goto free_rsdev;
+ }
+
++ rsdev->reg_vio = regulator_get(&pdev->dev, "vio");
++ if (IS_ERR(rsdev->reg_vio)) {
++ dev_err(&pdev->dev, "Cannot get vio regulator\n");
++ rval = PTR_ERR(rsdev->reg_vio);
++ goto unregister_v4l2_dev;
++ }
++ rval = regulator_enable(rsdev->reg_vio);
++ if (rval)
++ goto reg_put;
++
+ adapter = i2c_get_adapter(pdata->i2c_bus);
+ if (!adapter) {
+ dev_err(&pdev->dev, "Cannot get i2c adapter %d\n",
+ pdata->i2c_bus);
+ rval = -ENODEV;
+- goto unregister_v4l2_dev;
++ goto reg_disable;
+ }
+
+ sd = v4l2_i2c_new_subdev_board(&rsdev->v4l2_dev, adapter, "si4713_i2c",
+@@ -296,14 +308,14 @@ static int radio_si4713_pdriver_probe(struct platform_device *pdev)
+ if (!sd) {
+ dev_err(&pdev->dev, "Cannot get v4l2 subdevice\n");
+ rval = -ENODEV;
+- goto unregister_v4l2_dev;
++ goto put_adapter;
+ }
+
+ rsdev->radio_dev = video_device_alloc();
+ if (!rsdev->radio_dev) {
+ dev_err(&pdev->dev, "Failed to alloc video device.\n");
+ rval = -ENOMEM;
+- goto unregister_v4l2_dev;
++ goto put_adapter;
+ }
+
+ memcpy(rsdev->radio_dev, &radio_si4713_vdev_template,
+@@ -320,6 +332,12 @@ static int radio_si4713_pdriver_probe(struct platform_device *pdev)
+
+ free_vdev:
+ video_device_release(rsdev->radio_dev);
++put_adapter:
++ i2c_put_adapter(adapter);
++reg_disable:
++ regulator_disable(rsdev->reg_vio);
++reg_put:
++ regulator_put(rsdev->reg_vio);
+ unregister_v4l2_dev:
+ v4l2_device_unregister(&rsdev->v4l2_dev);
+ free_rsdev:
+@@ -335,8 +353,14 @@ static int __exit radio_si4713_pdriver_remove(struct platform_device *pdev)
+ struct radio_si4713_device *rsdev = container_of(v4l2_dev,
+ struct radio_si4713_device,
+ v4l2_dev);
++ struct v4l2_subdev *sd = list_entry(v4l2_dev->subdevs.next,
++ struct v4l2_subdev, list);
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ video_unregister_device(rsdev->radio_dev);
++ i2c_put_adapter(client->adapter);
++ regulator_disable(rsdev->reg_vio);
++ regulator_put(rsdev->reg_vio);
+ v4l2_device_unregister(&rsdev->v4l2_dev);
+ kfree(rsdev);
+
+diff --git a/drivers/media/radio/si4713-i2c.c b/drivers/media/radio/si4713-i2c.c
+index ab63dd5..4b5470c 100644
+--- a/drivers/media/radio/si4713-i2c.c
++++ b/drivers/media/radio/si4713-i2c.c
+@@ -27,6 +27,8 @@
+ #include <linux/interrupt.h>
+ #include <linux/i2c.h>
+ #include <linux/slab.h>
++#include <linux/gpio.h>
++#include <linux/regulator/consumer.h>
+ #include <media/v4l2-device.h>
+ #include <media/v4l2-ioctl.h>
+ #include <media/v4l2-common.h>
+@@ -369,7 +371,12 @@ static int si4713_powerup(struct si4713_device *sdev)
+ if (sdev->power_state)
+ return 0;
+
+- sdev->platform_data->set_power(1);
++ regulator_enable(sdev->reg_vdd);
++ if (gpio_is_valid(sdev->gpio_reset)) {
++ udelay(50);
++ gpio_set_value(sdev->gpio_reset, 1);
++ }
++
+ err = si4713_send_command(sdev, SI4713_CMD_POWER_UP,
+ args, ARRAY_SIZE(args),
+ resp, ARRAY_SIZE(resp),
+@@ -384,7 +391,9 @@ static int si4713_powerup(struct si4713_device *sdev)
+ err = si4713_write_property(sdev, SI4713_GPO_IEN,
+ SI4713_STC_INT | SI4713_CTS);
+ } else {
+- sdev->platform_data->set_power(0);
++ if (gpio_is_valid(sdev->gpio_reset))
++ gpio_set_value(sdev->gpio_reset, 0);
++ regulator_disable(sdev->reg_vdd);
+ }
+
+ return err;
+@@ -411,7 +420,9 @@ static int si4713_powerdown(struct si4713_device *sdev)
+ v4l2_dbg(1, debug, &sdev->sd, "Power down response: 0x%02x\n",
+ resp[0]);
+ v4l2_dbg(1, debug, &sdev->sd, "Device in reset mode\n");
+- sdev->platform_data->set_power(0);
++ if (gpio_is_valid(sdev->gpio_reset))
++ gpio_set_value(sdev->gpio_reset, 0);
++ regulator_disable(sdev->reg_vdd);
+ sdev->power_state = POWER_OFF;
+ }
+
+@@ -1959,6 +1970,7 @@ static int si4713_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+ {
+ struct si4713_device *sdev;
++ struct si4713_platform_data *pdata = client->dev.platform_data;
+ int rval;
+
+ sdev = kzalloc(sizeof *sdev, GFP_KERNEL);
+@@ -1968,11 +1980,20 @@ static int si4713_probe(struct i2c_client *client,
+ goto exit;
+ }
+
+- sdev->platform_data = client->dev.platform_data;
+- if (!sdev->platform_data) {
+- v4l2_err(&sdev->sd, "No platform data registered.\n");
+- rval = -ENODEV;
+- goto free_sdev;
++ sdev->gpio_reset = -1;
++ if (pdata && gpio_is_valid(pdata->gpio_reset)) {
++ rval = gpio_request(pdata->gpio_reset, "si4713 reset");
++ if (rval)
++ goto free_sdev;
++ sdev->gpio_reset = pdata->gpio_reset;
++ gpio_direction_output(sdev->gpio_reset, 0);
++ }
++
++ sdev->reg_vdd = regulator_get(&client->dev, "vdd");
++ if (IS_ERR(sdev->reg_vdd)) {
++ dev_err(&client->dev, "Cannot get vdd regulator\n");
++ rval = PTR_ERR(sdev->reg_vdd);
++ goto free_gpio;
+ }
+
+ v4l2_i2c_subdev_init(&sdev->sd, client, &si4713_subdev_ops);
+@@ -1986,7 +2007,7 @@ static int si4713_probe(struct i2c_client *client,
+ client->name, sdev);
+ if (rval < 0) {
+ v4l2_err(&sdev->sd, "Could not request IRQ\n");
+- goto free_sdev;
++ goto put_reg;
+ }
+ v4l2_dbg(1, debug, &sdev->sd, "IRQ requested.\n");
+ } else {
+@@ -2004,6 +2025,11 @@ static int si4713_probe(struct i2c_client *client,
+ free_irq:
+ if (client->irq)
+ free_irq(client->irq, sdev);
++put_reg:
++ regulator_put(sdev->reg_vdd);
++free_gpio:
++ if (gpio_is_valid(sdev->gpio_reset))
++ gpio_free(sdev->gpio_reset);
+ free_sdev:
+ kfree(sdev);
+ exit:
+@@ -2023,7 +2049,9 @@ static int si4713_remove(struct i2c_client *client)
+ free_irq(client->irq, sdev);
+
+ v4l2_device_unregister_subdev(sd);
+-
++ regulator_put(sdev->reg_vdd);
++ if (gpio_is_valid(sdev->gpio_reset))
++ gpio_free(sdev->gpio_reset);
+ kfree(sdev);
+
+ return 0;
+diff --git a/drivers/media/radio/si4713-i2c.h b/drivers/media/radio/si4713-i2c.h
+index faf8cff..cf79f6e 100644
+--- a/drivers/media/radio/si4713-i2c.h
++++ b/drivers/media/radio/si4713-i2c.h
+@@ -220,11 +220,12 @@ struct si4713_device {
+ /* private data structures */
+ struct mutex mutex;
+ struct completion work;
+- struct si4713_platform_data *platform_data;
+ struct rds_info rds_info;
+ struct limiter_info limiter_info;
+ struct pilot_info pilot_info;
+ struct acomp_info acomp_info;
++ struct regulator *reg_vdd;
++ int gpio_reset;
+ u32 frequency;
+ u32 preemphasis;
+ u32 mute;
+diff --git a/include/media/si4713.h b/include/media/si4713.h
+index 99850a5..ed7353e 100644
+--- a/include/media/si4713.h
++++ b/include/media/si4713.h
+@@ -23,8 +23,7 @@
+ * Platform dependent definition
+ */
+ struct si4713_platform_data {
+- /* Set power state, zero is off, non-zero is on. */
+- int (*set_power)(int power);
++ int gpio_reset; /* < 0 if not used */
+ };
+
+ /*
+diff --git a/sound/soc/omap/Kconfig b/sound/soc/omap/Kconfig
+index d542ea2..db88dd0 100644
+--- a/sound/soc/omap/Kconfig
++++ b/sound/soc/omap/Kconfig
+@@ -24,6 +24,7 @@ config SND_OMAP_SOC_RX51
+ select OMAP_MCBSP
+ select SND_OMAP_SOC_MCBSP
+ select SND_SOC_TLV320AIC3X
++ select SND_SOC_TPA6130A2
+ help
+ Say Y if you want to add support for SoC audio on Nokia RX-51
+ hardware. This is also known as Nokia N900 product.
+diff --git a/sound/soc/omap/rx51.c b/sound/soc/omap/rx51.c
+index 47d831e..9b536da 100644
+--- a/sound/soc/omap/rx51.c
++++ b/sound/soc/omap/rx51.c
+@@ -27,6 +27,7 @@
+ #include <linux/gpio.h>
+ #include <linux/platform_device.h>
+ #include <sound/core.h>
++#include <sound/jack.h>
+ #include <sound/pcm.h>
+ #include <sound/soc.h>
+ #include <sound/soc-dapm.h>
+@@ -36,18 +37,38 @@
+ #include "omap-mcbsp.h"
+ #include "omap-pcm.h"
+ #include "../codecs/tlv320aic3x.h"
++#include "../codecs/tpa6130a2.h"
+
++#define RX51_TVOUT_SEL_GPIO 40
++#define RX51_JACK_DETECT_GPIO 177
+ /*
+ * REVISIT: TWL4030 GPIO base in RX-51. Now statically defined to 192. This
+ * gpio is reserved in arch/arm/mach-omap2/board-rx51-peripherals.c
+ */
+ #define RX51_SPEAKER_AMP_TWL_GPIO (192 + 7)
+
++enum {
++ RX51_JACK_DISABLED,
++ RX51_JACK_TVOUT, /* tv-out with stereo audio */
++ RX51_JACK_HP, /* stereo output, no mic */
++};
++
+ static int rx51_spk_func;
+ static int rx51_dmic_func;
++static int rx51_jack_func;
++static int rx51_fmtx_func;
+
+ static void rx51_ext_control(struct snd_soc_codec *codec)
+ {
++ int hp = 0;
++
++ switch (rx51_jack_func) {
++ case RX51_JACK_TVOUT:
++ case RX51_JACK_HP:
++ hp = 1;
++ break;
++ }
++
+ if (rx51_spk_func)
+ snd_soc_dapm_enable_pin(codec, "Ext Spk");
+ else
+@@ -56,6 +77,17 @@ static void rx51_ext_control(struct snd_soc_codec *codec)
+ snd_soc_dapm_enable_pin(codec, "DMic");
+ else
+ snd_soc_dapm_disable_pin(codec, "DMic");
++ if (hp)
++ snd_soc_dapm_enable_pin(codec, "Headphone Jack");
++ else
++ snd_soc_dapm_disable_pin(codec, "Headphone Jack");
++ if (rx51_fmtx_func)
++ snd_soc_dapm_enable_pin(codec, "FM Transmitter");
++ else
++ snd_soc_dapm_disable_pin(codec, "FM Transmitter");
++
++ gpio_set_value(RX51_TVOUT_SEL_GPIO,
++ rx51_jack_func == RX51_JACK_TVOUT);
+
+ snd_soc_dapm_sync(codec);
+ }
+@@ -162,9 +194,67 @@ static int rx51_set_input(struct snd_kcontrol *kcontrol,
+ return 1;
+ }
+
++static int rx51_get_jack(struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_value *ucontrol)
++{
++ ucontrol->value.integer.value[0] = rx51_jack_func;
++
++ return 0;
++}
++
++static int rx51_set_jack(struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_value *ucontrol)
++{
++ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
++
++ if (rx51_jack_func == ucontrol->value.integer.value[0])
++ return 0;
++
++ rx51_jack_func = ucontrol->value.integer.value[0];
++ rx51_ext_control(codec);
++
++ return 1;
++}
++
++static int rx51_get_fmtx(struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_value *ucontrol)
++{
++ ucontrol->value.integer.value[0] = rx51_fmtx_func;
++
++ return 0;
++}
++
++static int rx51_set_fmtx(struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_value *ucontrol)
++{
++ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
++
++ if (rx51_fmtx_func == ucontrol->value.integer.value[0])
++ return 0;
++
++ rx51_fmtx_func = ucontrol->value.integer.value[0];
++ rx51_ext_control(codec);
++
++ return 1;
++}
++
++static struct snd_soc_jack rx51_av_jack;
++
++static struct snd_soc_jack_gpio rx51_av_jack_gpios[] = {
++ {
++ .gpio = RX51_JACK_DETECT_GPIO,
++ .name = "avdet-gpio",
++ .report = SND_JACK_VIDEOOUT,
++ .invert = 1,
++ .debounce_time = 200,
++ },
++};
++
+ static const struct snd_soc_dapm_widget aic34_dapm_widgets[] = {
+ SND_SOC_DAPM_SPK("Ext Spk", rx51_spk_event),
+ SND_SOC_DAPM_MIC("DMic", NULL),
++ SND_SOC_DAPM_HP("Headphone Jack", NULL),
++ SND_SOC_DAPM_LINE("FM Transmitter", NULL),
+ };
+
+ static const struct snd_soc_dapm_route audio_map[] = {
+@@ -173,14 +263,26 @@ static const struct snd_soc_dapm_route audio_map[] = {
+
+ {"DMic Rate 64", NULL, "Mic Bias 2V"},
+ {"Mic Bias 2V", NULL, "DMic"},
++
++ {"Headphone Jack", NULL, "TPA6130A2 Headphone Left"},
++ {"Headphone Jack", NULL, "TPA6130A2 Headphone Right"},
++ {"TPA6130A2 Left", NULL, "LLOUT"},
++ {"TPA6130A2 Right", NULL, "RLOUT"},
++
++ {"FM Transmitter", NULL, "LLOUT"},
++ {"FM Transmitter", NULL, "RLOUT"},
+ };
+
+ static const char *spk_function[] = {"Off", "On"};
+ static const char *input_function[] = {"ADC", "Digital Mic"};
++static const char *jack_function[] = {"Off", "TV-OUT", "Headphone"};
++static const char *fmtx_function[] = {"Off", "On"};
+
+ static const struct soc_enum rx51_enum[] = {
+ SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(spk_function), spk_function),
+ SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(input_function), input_function),
++ SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(jack_function), jack_function),
++ SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(fmtx_function), fmtx_function),
+ };
+
+ static const struct snd_kcontrol_new aic34_rx51_controls[] = {
+@@ -188,10 +290,15 @@ static const struct snd_kcontrol_new aic34_rx51_controls[] = {
+ rx51_get_spk, rx51_set_spk),
+ SOC_ENUM_EXT("Input Select", rx51_enum[1],
+ rx51_get_input, rx51_set_input),
++ SOC_ENUM_EXT("Jack Function", rx51_enum[2],
++ rx51_get_jack, rx51_set_jack),
++ SOC_ENUM_EXT("FMTX Function", rx51_enum[3],
++ rx51_get_fmtx, rx51_set_fmtx),
+ };
+
+ static int rx51_aic34_init(struct snd_soc_codec *codec)
+ {
++ struct snd_soc_card *card = codec->socdev->card;
+ int err;
+
+ /* Set up NC codec pins */
+@@ -209,12 +316,24 @@ static int rx51_aic34_init(struct snd_soc_codec *codec)
+ snd_soc_dapm_new_controls(codec, aic34_dapm_widgets,
+ ARRAY_SIZE(aic34_dapm_widgets));
+
++ tpa6130a2_add_controls(codec);
++ snd_soc_limit_volume(codec, "TPA6130A2 Headphone Playback Volume", 42);
++
+ /* Set up RX-51 specific audio path audio_map */
+ snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+
+ snd_soc_dapm_sync(codec);
+
+- return 0;
++ /* AV jack detection */
++ err = snd_soc_jack_new(card, "AV Jack",
++ SND_JACK_VIDEOOUT, &rx51_av_jack);
++ if (err)
++ return err;
++ err = snd_soc_jack_add_gpios(&rx51_av_jack,
++ ARRAY_SIZE(rx51_av_jack_gpios),
++ rx51_av_jack_gpios);
++
++ return err;
+ }
+
+ /* Digital audio interface glue - connects codec <--> CPU */
+@@ -259,6 +378,11 @@ static int __init rx51_soc_init(void)
+ if (!machine_is_nokia_rx51())
+ return -ENODEV;
+
++ err = gpio_request(RX51_TVOUT_SEL_GPIO, "tvout_sel");
++ if (err)
++ goto err_gpio_tvout_sel;
++ gpio_direction_output(RX51_TVOUT_SEL_GPIO, 0);
++
+ rx51_snd_device = platform_device_alloc("soc-audio", -1);
+ if (!rx51_snd_device) {
+ err = -ENOMEM;
+@@ -277,13 +401,19 @@ static int __init rx51_soc_init(void)
+ err2:
+ platform_device_put(rx51_snd_device);
+ err1:
++ gpio_free(RX51_TVOUT_SEL_GPIO);
++err_gpio_tvout_sel:
+
+ return err;
+ }
+
+ static void __exit rx51_soc_exit(void)
+ {
++ snd_soc_jack_free_gpios(&rx51_av_jack, ARRAY_SIZE(rx51_av_jack_gpios),
++ rx51_av_jack_gpios);
++
+ platform_device_unregister(rx51_snd_device);
++ gpio_free(RX51_TVOUT_SEL_GPIO);
+ }
+
+ module_init(rx51_soc_init);
+--
+1.7.0.4
+
diff --git a/recipes/linux/linux-2.6.35/nokia900/linux-2.6.36-Introduce-and-enable-tsc2005-driver.patch b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.36-Introduce-and-enable-tsc2005-driver.patch
new file mode 100644
index 0000000000..f7ece39690
--- /dev/null
+++ b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.36-Introduce-and-enable-tsc2005-driver.patch
@@ -0,0 +1,912 @@
+From a0fe4bb1863c93dd365dab70e07f62f1eb024a76 Mon Sep 17 00:00:00 2001
+From: Lauri Leukkunen <lauri.leukkunen@nokia.com>
+Date: Fri, 12 Mar 2010 16:54:33 +0000
+Subject: [PATCH 02/11] Introduce and enable tsc2005 driver
+
+This patch is combination of following patches:
+
+1. input: touchscreen: introduce tsc2005 driver
+
+Patch-mainline: 2.6.35?
+Discussions: http://www.mail-archive.com/linux-omap@vger.kernel.org/msg26748.html
+
+Introduce a driver for the Texas Instruments TSC2005 touchscreen
+controller (http://focus.ti.com/docs/prod/folders/print/tsc2005.html).
+
+The patch is based on a driver by Lauri Leukkunen, with modifications
+by David Brownell, Phil Carmody, Imre Deak, Hiroshi DOYU, Ari Kauppi,
+Tony Lindgren, Jarkko Nikula, Eero Nurkkala and Roman Tereshonkov.
+
+Signed-off-by: Lauri Leukkunen <lauri.leukkunen@nokia.com>
+[aaro.koskinen@nokia.com: patch description, rebasing & cleanup]
+Signed-off-by: Aaro Koskinen <aaro.koskinen@nokia.com>
+Cc: David Brownell <dbrownell@users.sourceforge.net>
+Cc: Phil Carmody <ext-phil.2.carmody@nokia.com>
+Cc: Imre Deak <imre.deak@nokia.com>
+Cc: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
+Cc: Ari Kauppi <Ext-Ari.Kauppi@nokia.com>
+Cc: Tony Lindgren <tony@atomide.com>
+Cc: Jarkko Nikula <jhnikula@gmail.com>
+Cc: Eero Nurkkala <ext-eero.nurkkala@nokia.com>
+Cc: Roman Tereshonkov <roman.tereshonkov@nokia.com>
+
+2. omap: rx-51: enable tsc2005
+
+Patch-mainline: 2.6.35
+Discussions: http://www.mail-archive.com/linux-omap@vger.kernel.org/msg26749.html
+
+Enable TSC2005 touchscreen driver on the RX-51 board.
+
+Signed-off-by: Aaro Koskinen <aaro.koskinen@nokia.com>
+---
+ arch/arm/configs/rx51_defconfig | 1 +
+ arch/arm/mach-omap2/board-rx51-peripherals.c | 46 ++-
+ drivers/input/touchscreen/Kconfig | 11 +
+ drivers/input/touchscreen/Makefile | 1 +
+ drivers/input/touchscreen/tsc2005.c | 678 ++++++++++++++++++++++++++
+ include/linux/spi/tsc2005.h | 41 ++
+ 6 files changed, 776 insertions(+), 2 deletions(-)
+ create mode 100644 drivers/input/touchscreen/tsc2005.c
+ create mode 100644 include/linux/spi/tsc2005.h
+
+index abdf321..f4b1c1c 100644
+--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
++++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
+@@ -14,6 +14,7 @@
+ #include <linux/input.h>
+ #include <linux/input/matrix_keypad.h>
+ #include <linux/spi/spi.h>
++#include <linux/spi/tsc2005.h>
+ #include <linux/spi/wl12xx.h>
+ #include <linux/i2c.h>
+ #include <linux/i2c/twl.h>
+@@ -42,6 +43,9 @@
+ #define RX51_WL1251_POWER_GPIO 87
+ #define RX51_WL1251_IRQ_GPIO 42
+
++#define RX51_TSC2005_RESET_GPIO 104
++#define RX51_TSC2005_IRQ_GPIO 100
++
+ /* list all spi devices here */
+ enum {
+ RX51_SPI_WL1251,
+@@ -50,6 +54,7 @@ enum {
+ };
+
+ static struct wl12xx_platform_data wl1251_pdata;
++static struct tsc2005_platform_data tsc2005_pdata;
+
+ static struct omap2_mcspi_device_config wl1251_mcspi_config = {
+ .turbo_mode = 0,
+@@ -87,10 +92,10 @@ static struct spi_board_info rx51_peripherals_spi_board_info[] __initdata = {
+ .modalias = "tsc2005",
+ .bus_num = 1,
+ .chip_select = 0,
+- /* .irq = OMAP_GPIO_IRQ(RX51_TSC2005_IRQ_GPIO),*/
++ .irq = OMAP_GPIO_IRQ(RX51_TSC2005_IRQ_GPIO),
+ .max_speed_hz = 6000000,
+ .controller_data = &tsc2005_mcspi_config,
+- /* .platform_data = &tsc2005_config,*/
++ .platform_data = &tsc2005_pdata,
+ },
+ };
+
+@@ -799,6 +804,42 @@ static inline void board_onenand_init(void)
+
+ #endif
+
++static struct tsc2005_platform_data tsc2005_pdata = {
++ .ts_pressure_max = 2048,
++ .ts_pressure_fudge = 2,
++ .ts_x_max = 4096,
++ .ts_x_fudge = 4,
++ .ts_y_max = 4096,
++ .ts_y_fudge = 7,
++ .ts_x_plate_ohm = 280,
++ .esd_timeout_ms = 8000,
++};
++
++static void rx51_tsc2005_set_reset(bool enable)
++{
++ gpio_set_value(RX51_TSC2005_RESET_GPIO, enable);
++}
++
++static void __init rx51_init_tsc2005(void)
++{
++ int r;
++
++ r = gpio_request(RX51_TSC2005_IRQ_GPIO, "tsc2005 IRQ");
++ if (r >= 0)
++ gpio_direction_input(RX51_TSC2005_IRQ_GPIO);
++ else
++ printk(KERN_ERR "unable to get %s GPIO\n", "tsc2005 IRQ");
++
++ r = gpio_request(RX51_TSC2005_RESET_GPIO, "tsc2005 reset");
++ if (r >= 0) {
++ gpio_direction_output(RX51_TSC2005_RESET_GPIO, 1);
++ tsc2005_pdata.set_reset = rx51_tsc2005_set_reset;
++ } else {
++ printk(KERN_ERR "unable to get %s GPIO\n", "tsc2005 reset");
++ tsc2005_pdata.esd_timeout_ms = 0;
++ }
++}
++
+ #if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
+
+ static struct omap_smc91x_platform_data board_smc91x_data = {
+@@ -883,6 +924,7 @@ void __init rx51_peripherals_init(void)
+ board_smc91x_init();
+ rx51_add_gpio_keys();
+ rx51_init_wl1251();
++ rx51_init_tsc2005();
+ spi_register_board_info(rx51_peripherals_spi_board_info,
+ ARRAY_SIZE(rx51_peripherals_spi_board_info));
+ omap2_hsmmc_init(mmc);
+diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
+index 3b9d5e2..9de7ea8 100644
+--- a/drivers/input/touchscreen/Kconfig
++++ b/drivers/input/touchscreen/Kconfig
+@@ -561,6 +561,17 @@ config TOUCHSCREEN_TOUCHIT213
+ To compile this driver as a module, choose M here: the
+ module will be called touchit213.
+
++config TOUCHSCREEN_TSC2005
++ tristate "TSC2005 based touchscreens"
++ depends on SPI_MASTER
++ help
++ Say Y here if you have a TSC2005 based touchscreen.
++
++ If unsure, say N.
++
++ To compile this driver as a module, choose M here: the
++ module will be called tsc2005.
++
+ config TOUCHSCREEN_TSC2007
+ tristate "TSC2007 based touchscreens"
+ depends on I2C
+diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
+index 497964a..317a746 100644
+--- a/drivers/input/touchscreen/Makefile
++++ b/drivers/input/touchscreen/Makefile
+@@ -34,6 +34,7 @@ obj-$(CONFIG_TOUCHSCREEN_S3C2410) += s3c2410_ts.o
+ obj-$(CONFIG_TOUCHSCREEN_TOUCHIT213) += touchit213.o
+ obj-$(CONFIG_TOUCHSCREEN_TOUCHRIGHT) += touchright.o
+ obj-$(CONFIG_TOUCHSCREEN_TOUCHWIN) += touchwin.o
++obj-$(CONFIG_TOUCHSCREEN_TSC2005) += tsc2005.o
+ obj-$(CONFIG_TOUCHSCREEN_TSC2007) += tsc2007.o
+ obj-$(CONFIG_TOUCHSCREEN_UCB1400) += ucb1400_ts.o
+ obj-$(CONFIG_TOUCHSCREEN_WACOM_W8001) += wacom_w8001.o
+diff --git a/drivers/input/touchscreen/tsc2005.c b/drivers/input/touchscreen/tsc2005.c
+new file mode 100644
+index 0000000..27ee361
+--- /dev/null
++++ b/drivers/input/touchscreen/tsc2005.c
+@@ -0,0 +1,678 @@
++/*
++ * TSC2005 touchscreen driver
++ *
++ * Copyright (C) 2006-2010 Nokia Corporation
++ *
++ * Author: Lauri Leukkunen <lauri.leukkunen@nokia.com>
++ * based on TSC2301 driver by Klaus K. Pedersen <klaus.k.pedersen@nokia.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/input.h>
++#include <linux/interrupt.h>
++#include <linux/delay.h>
++#include <linux/spi/spi.h>
++#include <linux/spi/tsc2005.h>
++
++/*
++ * The touchscreen interface operates as follows:
++ *
++ * 1) Pen is pressed against the touchscreen.
++ * 2) TSC2005 performs AD conversion.
++ * 3) After the conversion is done TSC2005 drives DAV line down.
++ * 4) GPIO IRQ is received and tsc2005_irq_thread() is scheduled.
++ * 5) tsc2005_irq_thread() queues up an spi transfer to fetch the x, y, z1, z2
++ * values.
++ * 6) tsc2005_irq_thread() reports coordinates to input layer and sets up
++ * tsc2005_penup_timer() to be called after TSC2005_PENUP_TIME_MS (40ms).
++ * 7) When the penup timer expires, there have not been touch or DAV interrupts
++ * during the last 40ms which means the pen has been lifted.
++ *
++ * ESD recovery via a hardware reset is done if the TSC2005 doesn't respond
++ * after a configurable period (in ms) of activity. If esd_timeout is 0, the
++ * watchdog is disabled.
++ */
++
++/* control byte 1 */
++#define TSC2005_CMD 0x80
++#define TSC2005_CMD_NORMAL 0x00
++#define TSC2005_CMD_STOP 0x01
++#define TSC2005_CMD_12BIT 0x04
++
++/* control byte 0 */
++#define TSC2005_REG_READ 0x0001
++#define TSC2005_REG_PND0 0x0002
++#define TSC2005_REG_X 0x0000
++#define TSC2005_REG_Y 0x0008
++#define TSC2005_REG_Z1 0x0010
++#define TSC2005_REG_Z2 0x0018
++#define TSC2005_REG_TEMP_HIGH 0x0050
++#define TSC2005_REG_CFR0 0x0060
++#define TSC2005_REG_CFR1 0x0068
++#define TSC2005_REG_CFR2 0x0070
++
++/* configuration register 0 */
++#define TSC2005_CFR0_PRECHARGE_276US 0x0040
++#define TSC2005_CFR0_STABTIME_1MS 0x0300
++#define TSC2005_CFR0_CLOCK_1MHZ 0x1000
++#define TSC2005_CFR0_RESOLUTION12 0x2000
++#define TSC2005_CFR0_PENMODE 0x8000
++#define TSC2005_CFR0_INITVALUE (TSC2005_CFR0_STABTIME_1MS | \
++ TSC2005_CFR0_CLOCK_1MHZ | \
++ TSC2005_CFR0_RESOLUTION12 | \
++ TSC2005_CFR0_PRECHARGE_276US | \
++ TSC2005_CFR0_PENMODE)
++
++/* bits common to both read and write of configuration register 0 */
++#define TSC2005_CFR0_RW_MASK 0x3fff
++
++/* configuration register 1 */
++#define TSC2005_CFR1_BATCHDELAY_4MS 0x0003
++#define TSC2005_CFR1_INITVALUE TSC2005_CFR1_BATCHDELAY_4MS
++
++/* configuration register 2 */
++#define TSC2005_CFR2_MAVE_Z 0x0004
++#define TSC2005_CFR2_MAVE_Y 0x0008
++#define TSC2005_CFR2_MAVE_X 0x0010
++#define TSC2005_CFR2_AVG_7 0x0800
++#define TSC2005_CFR2_MEDIUM_15 0x3000
++#define TSC2005_CFR2_INITVALUE (TSC2005_CFR2_MAVE_X | \
++ TSC2005_CFR2_MAVE_Y | \
++ TSC2005_CFR2_MAVE_Z | \
++ TSC2005_CFR2_MEDIUM_15 | \
++ TSC2005_CFR2_AVG_7)
++
++#define MAX_12BIT 0xfff
++#define TSC2005_SPI_MAX_SPEED_HZ 10000000
++#define TSC2005_PENUP_TIME_MS 40
++
++struct tsc2005_spi_rd {
++ struct spi_transfer spi_xfer;
++ u32 spi_tx;
++ u32 spi_rx;
++};
++
++struct tsc2005 {
++ struct spi_device *spi;
++
++ struct spi_message spi_read_msg;
++ struct tsc2005_spi_rd spi_x;
++ struct tsc2005_spi_rd spi_y;
++ struct tsc2005_spi_rd spi_z1;
++ struct tsc2005_spi_rd spi_z2;
++
++ struct input_dev *idev;
++ char phys[32];
++
++ struct mutex mutex;
++
++ struct timer_list penup_timer;
++ struct work_struct penup_work;
++
++ unsigned int esd_timeout;
++ struct timer_list esd_timer;
++ struct work_struct esd_work;
++
++ unsigned int x_plate_ohm;
++
++ bool disabled;
++ unsigned int disable_depth;
++
++ void (*set_reset)(bool enable);
++};
++
++static void tsc2005_cmd(struct tsc2005 *ts, u8 cmd)
++{
++ u8 tx;
++ struct spi_message msg;
++ struct spi_transfer xfer = { 0 };
++
++ tx = TSC2005_CMD | TSC2005_CMD_12BIT | cmd;
++
++ xfer.tx_buf = &tx;
++ xfer.rx_buf = NULL;
++ xfer.len = 1;
++ xfer.bits_per_word = 8;
++
++ spi_message_init(&msg);
++ spi_message_add_tail(&xfer, &msg);
++ spi_sync(ts->spi, &msg);
++}
++
++static void tsc2005_write(struct tsc2005 *ts, u8 reg, u16 value)
++{
++ u32 tx;
++ struct spi_message msg;
++ struct spi_transfer xfer = { 0 };
++
++ tx = (reg | TSC2005_REG_PND0) << 16;
++ tx |= value;
++
++ xfer.tx_buf = &tx;
++ xfer.rx_buf = NULL;
++ xfer.len = 4;
++ xfer.bits_per_word = 24;
++
++ spi_message_init(&msg);
++ spi_message_add_tail(&xfer, &msg);
++ spi_sync(ts->spi, &msg);
++}
++
++static void tsc2005_setup_read(struct tsc2005_spi_rd *rd, u8 reg, bool last)
++{
++ rd->spi_tx = (reg | TSC2005_REG_READ) << 16;
++ rd->spi_xfer.tx_buf = &rd->spi_tx;
++ rd->spi_xfer.rx_buf = &rd->spi_rx;
++ rd->spi_xfer.len = 4;
++ rd->spi_xfer.bits_per_word = 24;
++ rd->spi_xfer.cs_change = !last;
++}
++
++static void tsc2005_read(struct tsc2005 *ts, u8 reg, u16 *value)
++{
++ struct spi_message msg;
++ struct tsc2005_spi_rd spi_rd = { { 0 }, 0, 0 };
++
++ tsc2005_setup_read(&spi_rd, reg, 1);
++
++ spi_message_init(&msg);
++ spi_message_add_tail(&spi_rd.spi_xfer, &msg);
++ spi_sync(ts->spi, &msg);
++ *value = spi_rd.spi_rx;
++}
++
++static void tsc2005_update_pen_state(struct tsc2005 *ts,
++ int x, int y, int pressure)
++{
++ if (pressure) {
++ input_report_abs(ts->idev, ABS_X, x);
++ input_report_abs(ts->idev, ABS_Y, y);
++ }
++ input_report_abs(ts->idev, ABS_PRESSURE, pressure);
++ input_report_key(ts->idev, BTN_TOUCH, !!pressure);
++ input_sync(ts->idev);
++ dev_dbg(&ts->spi->dev, "point(%4d,%4d), pressure (%4d)\n", x, y,
++ pressure);
++}
++
++static irqreturn_t tsc2005_irq_handler(int irq, void *dev_id)
++{
++ struct tsc2005 *ts = dev_id;
++
++ /* update the penup timer only if it's pending */
++ mod_timer_pending(&ts->penup_timer,
++ jiffies + msecs_to_jiffies(TSC2005_PENUP_TIME_MS));
++
++ return IRQ_WAKE_THREAD;
++}
++
++static irqreturn_t tsc2005_irq_thread(int irq, void *_ts)
++{
++ struct tsc2005 *ts = _ts;
++ unsigned int pressure;
++ u32 x;
++ u32 y;
++ u32 z1;
++ u32 z2;
++
++ mutex_lock(&ts->mutex);
++
++ if (unlikely(ts->disable_depth))
++ goto out;
++
++ /* read the coordinates */
++ spi_sync(ts->spi, &ts->spi_read_msg);
++ x = ts->spi_x.spi_rx;
++ y = ts->spi_y.spi_rx;
++ z1 = ts->spi_z1.spi_rx;
++ z2 = ts->spi_z2.spi_rx;
++
++ /* validate position */
++ if (unlikely(x > MAX_12BIT || y > MAX_12BIT))
++ goto out;
++
++ /* skip coords if the pressure components are out of range */
++ if (unlikely(z1 == 0 || z2 > MAX_12BIT || z1 >= z2))
++ goto out;
++
++ /* compute touch pressure resistance using equation #1 */
++ pressure = x * (z2 - z1) / z1;
++ pressure = pressure * ts->x_plate_ohm / 4096;
++ if (unlikely(pressure > MAX_12BIT))
++ goto out;
++
++ tsc2005_update_pen_state(ts, x, y, pressure);
++
++ /* set the penup timer */
++ mod_timer(&ts->penup_timer,
++ jiffies + msecs_to_jiffies(TSC2005_PENUP_TIME_MS));
++
++ if (!ts->esd_timeout)
++ goto out;
++
++ /* update the watchdog timer */
++ mod_timer(&ts->esd_timer,
++ round_jiffies(jiffies + msecs_to_jiffies(ts->esd_timeout)));
++
++out:
++ mutex_unlock(&ts->mutex);
++ return IRQ_HANDLED;
++}
++
++static void tsc2005_penup_timer(unsigned long data)
++{
++ struct tsc2005 *ts = (struct tsc2005 *)data;
++
++ schedule_work(&ts->penup_work);
++}
++
++static void tsc2005_penup_work(struct work_struct *work)
++{
++ struct tsc2005 *ts = container_of(work, struct tsc2005, penup_work);
++
++ mutex_lock(&ts->mutex);
++ tsc2005_update_pen_state(ts, 0, 0, 0);
++ mutex_unlock(&ts->mutex);
++}
++
++static void tsc2005_start_scan(struct tsc2005 *ts)
++{
++ tsc2005_write(ts, TSC2005_REG_CFR0, TSC2005_CFR0_INITVALUE);
++ tsc2005_write(ts, TSC2005_REG_CFR1, TSC2005_CFR1_INITVALUE);
++ tsc2005_write(ts, TSC2005_REG_CFR2, TSC2005_CFR2_INITVALUE);
++ tsc2005_cmd(ts, TSC2005_CMD_NORMAL);
++}
++
++static void tsc2005_stop_scan(struct tsc2005 *ts)
++{
++ tsc2005_cmd(ts, TSC2005_CMD_STOP);
++}
++
++/* must be called with mutex held */
++static void tsc2005_disable(struct tsc2005 *ts)
++{
++ if (ts->disable_depth++ != 0)
++ return;
++ disable_irq(ts->spi->irq);
++ if (ts->esd_timeout)
++ del_timer_sync(&ts->esd_timer);
++ del_timer_sync(&ts->penup_timer);
++ tsc2005_stop_scan(ts);
++}
++
++/* must be called with mutex held */
++static void tsc2005_enable(struct tsc2005 *ts)
++{
++ if (--ts->disable_depth != 0)
++ return;
++ tsc2005_start_scan(ts);
++ enable_irq(ts->spi->irq);
++ if (!ts->esd_timeout)
++ return;
++ mod_timer(&ts->esd_timer,
++ round_jiffies(jiffies + msecs_to_jiffies(ts->esd_timeout)));
++}
++
++static ssize_t tsc2005_disable_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct tsc2005 *ts = dev_get_drvdata(dev);
++
++ return sprintf(buf, "%u\n", ts->disabled);
++}
++
++static ssize_t tsc2005_disable_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct tsc2005 *ts = dev_get_drvdata(dev);
++ unsigned long res;
++ int i;
++
++ if (strict_strtoul(buf, 10, &res) < 0)
++ return -EINVAL;
++ i = res ? 1 : 0;
++
++ mutex_lock(&ts->mutex);
++ if (i == ts->disabled)
++ goto out;
++ ts->disabled = i;
++ if (i)
++ tsc2005_disable(ts);
++ else
++ tsc2005_enable(ts);
++out:
++ mutex_unlock(&ts->mutex);
++ return count;
++}
++static DEVICE_ATTR(disable, 0664, tsc2005_disable_show, tsc2005_disable_store);
++
++static ssize_t tsc2005_selftest_show(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ struct tsc2005 *ts = dev_get_drvdata(dev);
++ u16 temp_high;
++ u16 temp_high_orig;
++ u16 temp_high_test;
++ unsigned int result;
++
++ if (!ts->set_reset) {
++ dev_warn(&ts->spi->dev,
++ "unable to selftest: no reset function\n");
++ result = 0;
++ goto out;
++ }
++
++ mutex_lock(&ts->mutex);
++
++ /*
++ * Test TSC2005 communications via temp high register.
++ */
++ tsc2005_disable(ts);
++ result = 1;
++ tsc2005_read(ts, TSC2005_REG_TEMP_HIGH, &temp_high_orig);
++ temp_high_test = (temp_high_orig - 1) & MAX_12BIT;
++ tsc2005_write(ts, TSC2005_REG_TEMP_HIGH, temp_high_test);
++ tsc2005_read(ts, TSC2005_REG_TEMP_HIGH, &temp_high);
++ if (temp_high != temp_high_test) {
++ dev_warn(dev, "selftest failed: %d != %d\n",
++ temp_high, temp_high_test);
++ result = 0;
++ }
++
++ /* hardware reset */
++ ts->set_reset(0);
++ msleep(1); /* only 10us required */
++ ts->set_reset(1);
++ tsc2005_enable(ts);
++
++ /* test that the reset really happened */
++ tsc2005_read(ts, TSC2005_REG_TEMP_HIGH, &temp_high);
++ if (temp_high != temp_high_orig) {
++ dev_warn(dev, "selftest failed after reset: %d != %d\n",
++ temp_high, temp_high_orig);
++ result = 0;
++ }
++
++ mutex_unlock(&ts->mutex);
++
++out:
++ return sprintf(buf, "%u\n", result);
++}
++static DEVICE_ATTR(selftest, S_IRUGO, tsc2005_selftest_show, NULL);
++
++static void tsc2005_esd_timer(unsigned long data)
++{
++ struct tsc2005 *ts = (struct tsc2005 *)data;
++
++ schedule_work(&ts->esd_work);
++}
++
++static void tsc2005_esd_work(struct work_struct *work)
++{
++ struct tsc2005 *ts = container_of(work, struct tsc2005, esd_work);
++ u16 r;
++
++ mutex_lock(&ts->mutex);
++
++ if (ts->disable_depth)
++ goto out;
++
++ /*
++ * If we cannot read our known value from configuration register 0 then
++ * reset the controller as if from power-up and start scanning again.
++ */
++ tsc2005_read(ts, TSC2005_REG_CFR0, &r);
++ if ((r ^ TSC2005_CFR0_INITVALUE) & TSC2005_CFR0_RW_MASK) {
++ dev_info(&ts->spi->dev, "TSC2005 not responding - resetting\n");
++ ts->set_reset(0);
++ msleep(1); /* only 10us required */
++ ts->set_reset(1);
++ tsc2005_start_scan(ts);
++ }
++
++ /* re-arm the watchdog */
++ mod_timer(&ts->esd_timer,
++ round_jiffies(jiffies + msecs_to_jiffies(ts->esd_timeout)));
++
++out:
++ mutex_unlock(&ts->mutex);
++}
++
++static void __devinit tsc2005_setup_spi_xfer(struct tsc2005 *ts)
++{
++ tsc2005_setup_read(&ts->spi_x, TSC2005_REG_X, 0);
++ tsc2005_setup_read(&ts->spi_y, TSC2005_REG_Y, 0);
++ tsc2005_setup_read(&ts->spi_z1, TSC2005_REG_Z1, 0);
++ tsc2005_setup_read(&ts->spi_z2, TSC2005_REG_Z2, 1);
++
++ spi_message_init(&ts->spi_read_msg);
++ spi_message_add_tail(&ts->spi_x.spi_xfer, &ts->spi_read_msg);
++ spi_message_add_tail(&ts->spi_y.spi_xfer, &ts->spi_read_msg);
++ spi_message_add_tail(&ts->spi_z1.spi_xfer, &ts->spi_read_msg);
++ spi_message_add_tail(&ts->spi_z2.spi_xfer, &ts->spi_read_msg);
++}
++
++static struct attribute *tsc2005_attrs[] = {
++ &dev_attr_disable.attr,
++ &dev_attr_selftest.attr,
++ NULL
++};
++
++static struct attribute_group tsc2005_attr_group = {
++ .attrs = tsc2005_attrs,
++};
++
++static int __devinit tsc2005_setup(struct tsc2005 *ts,
++ struct tsc2005_platform_data *pdata)
++{
++ int r;
++ int fudge_x;
++ int fudge_y;
++ int fudge_p;
++ int p_max;
++ int x_max;
++ int y_max;
++
++ mutex_init(&ts->mutex);
++
++ tsc2005_setup_spi_xfer(ts);
++
++ init_timer(&ts->penup_timer);
++ setup_timer(&ts->penup_timer, tsc2005_penup_timer, (unsigned long)ts);
++ INIT_WORK(&ts->penup_work, tsc2005_penup_work);
++
++ fudge_x = pdata->ts_x_fudge ? : 0;
++ fudge_y = pdata->ts_y_fudge ? : 0;
++ fudge_p = pdata->ts_pressure_fudge ? : 0;
++ x_max = pdata->ts_x_max ? : MAX_12BIT;
++ y_max = pdata->ts_y_max ? : MAX_12BIT;
++ p_max = pdata->ts_pressure_max ? : MAX_12BIT;
++ ts->x_plate_ohm = pdata->ts_x_plate_ohm ? : 0;
++ ts->esd_timeout = pdata->esd_timeout_ms;
++ ts->set_reset = pdata->set_reset;
++
++ ts->idev = input_allocate_device();
++ if (ts->idev == NULL)
++ return -ENOMEM;
++ ts->idev->name = "TSC2005 touchscreen";
++ snprintf(ts->phys, sizeof(ts->phys), "%s/input-ts",
++ dev_name(&ts->spi->dev));
++ ts->idev->phys = ts->phys;
++ ts->idev->evbit[0] = BIT(EV_ABS) | BIT(EV_KEY);
++ ts->idev->absbit[0] = BIT(ABS_X) | BIT(ABS_Y) | BIT(ABS_PRESSURE);
++ ts->idev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
++
++ input_set_abs_params(ts->idev, ABS_X, 0, x_max, fudge_x, 0);
++ input_set_abs_params(ts->idev, ABS_Y, 0, y_max, fudge_y, 0);
++ input_set_abs_params(ts->idev, ABS_PRESSURE, 0, p_max, fudge_p, 0);
++
++ r = request_threaded_irq(ts->spi->irq, tsc2005_irq_handler,
++ tsc2005_irq_thread, IRQF_TRIGGER_RISING,
++ "tsc2005", ts);
++ if (r) {
++ dev_err(&ts->spi->dev, "request_threaded_irq(): %d\n", r);
++ goto err1;
++ }
++ set_irq_wake(ts->spi->irq, 1);
++
++ r = input_register_device(ts->idev);
++ if (r) {
++ dev_err(&ts->spi->dev, "input_register_device(): %d\n", r);
++ goto err2;
++ }
++
++ r = sysfs_create_group(&ts->spi->dev.kobj, &tsc2005_attr_group);
++ if (r)
++ dev_warn(&ts->spi->dev, "sysfs entry creation failed: %d\n", r);
++
++ tsc2005_start_scan(ts);
++
++ if (!ts->esd_timeout || !ts->set_reset)
++ goto done;
++
++ /* start the optional ESD watchdog */
++ setup_timer(&ts->esd_timer, tsc2005_esd_timer, (unsigned long)ts);
++ INIT_WORK(&ts->esd_work, tsc2005_esd_work);
++ mod_timer(&ts->esd_timer,
++ round_jiffies(jiffies + msecs_to_jiffies(ts->esd_timeout)));
++
++done:
++ return 0;
++
++err2:
++ free_irq(ts->spi->irq, ts);
++
++err1:
++ input_free_device(ts->idev);
++ return r;
++}
++
++static int __devinit tsc2005_probe(struct spi_device *spi)
++{
++ struct tsc2005_platform_data *pdata = spi->dev.platform_data;
++ struct tsc2005 *ts;
++ int r;
++
++ if (spi->irq < 0) {
++ dev_dbg(&spi->dev, "no irq\n");
++ return -ENODEV;
++ }
++
++ if (!pdata) {
++ dev_dbg(&spi->dev, "no platform data\n");
++ return -ENODEV;
++ }
++
++ ts = kzalloc(sizeof(*ts), GFP_KERNEL);
++ if (ts == NULL)
++ return -ENOMEM;
++
++ dev_set_drvdata(&spi->dev, ts);
++ ts->spi = spi;
++ spi->dev.power.power_state = PMSG_ON;
++ spi->mode = SPI_MODE_0;
++ spi->bits_per_word = 8;
++ if (!spi->max_speed_hz)
++ spi->max_speed_hz = TSC2005_SPI_MAX_SPEED_HZ;
++ spi_setup(spi);
++
++ r = tsc2005_setup(ts, pdata);
++ if (r)
++ kfree(ts);
++ return r;
++}
++
++static int __devexit tsc2005_remove(struct spi_device *spi)
++{
++ struct tsc2005 *ts = dev_get_drvdata(&spi->dev);
++
++ mutex_lock(&ts->mutex);
++ tsc2005_disable(ts);
++ mutex_unlock(&ts->mutex);
++
++ if (ts->esd_timeout)
++ del_timer_sync(&ts->esd_timer);
++ del_timer_sync(&ts->penup_timer);
++
++ flush_work(&ts->esd_work);
++ flush_work(&ts->penup_work);
++
++ sysfs_remove_group(&ts->spi->dev.kobj, &tsc2005_attr_group);
++ free_irq(ts->spi->irq, ts);
++ input_unregister_device(ts->idev);
++ kfree(ts);
++
++ return 0;
++}
++
++#ifdef CONFIG_PM
++static int tsc2005_suspend(struct spi_device *spi, pm_message_t mesg)
++{
++ struct tsc2005 *ts = dev_get_drvdata(&spi->dev);
++
++ mutex_lock(&ts->mutex);
++ tsc2005_disable(ts);
++ mutex_unlock(&ts->mutex);
++
++ return 0;
++}
++
++static int tsc2005_resume(struct spi_device *spi)
++{
++ struct tsc2005 *ts = dev_get_drvdata(&spi->dev);
++
++ mutex_lock(&ts->mutex);
++ tsc2005_enable(ts);
++ mutex_unlock(&ts->mutex);
++
++ return 0;
++}
++#endif
++
++static struct spi_driver tsc2005_driver = {
++ .driver = {
++ .name = "tsc2005",
++ .owner = THIS_MODULE,
++ },
++#ifdef CONFIG_PM
++ .suspend = tsc2005_suspend,
++ .resume = tsc2005_resume,
++#endif
++ .probe = tsc2005_probe,
++ .remove = __devexit_p(tsc2005_remove),
++};
++
++static int __init tsc2005_init(void)
++{
++ printk(KERN_INFO "TSC2005 driver initializing\n");
++ return spi_register_driver(&tsc2005_driver);
++}
++module_init(tsc2005_init);
++
++static void __exit tsc2005_exit(void)
++{
++ spi_unregister_driver(&tsc2005_driver);
++}
++module_exit(tsc2005_exit);
++
++MODULE_AUTHOR("Lauri Leukkunen <lauri.leukkunen@nokia.com>");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS("platform:tsc2005");
+diff --git a/include/linux/spi/tsc2005.h b/include/linux/spi/tsc2005.h
+new file mode 100644
+index 0000000..d9b0c84
+--- /dev/null
++++ b/include/linux/spi/tsc2005.h
+@@ -0,0 +1,41 @@
++/*
++ * This file is part of TSC2005 touchscreen driver
++ *
++ * Copyright (C) 2009-2010 Nokia Corporation
++ *
++ * Contact: Aaro Koskinen <aaro.koskinen@nokia.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ */
++
++#ifndef _LINUX_SPI_TSC2005_H
++#define _LINUX_SPI_TSC2005_H
++
++#include <linux/types.h>
++
++struct tsc2005_platform_data {
++ int ts_pressure_max;
++ int ts_pressure_fudge;
++ int ts_x_max;
++ int ts_x_fudge;
++ int ts_y_max;
++ int ts_y_fudge;
++ int ts_x_plate_ohm;
++ unsigned int esd_timeout_ms;
++ void (*set_reset)(bool enable);
++};
++
++#endif
+--
+1.7.0.4
+
diff --git a/recipes/linux/linux-2.6.35/nokia900/linux-2.6.36-battery.patch b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.36-battery.patch
new file mode 100644
index 0000000000..0f0020d991
--- /dev/null
+++ b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.36-battery.patch
@@ -0,0 +1,87 @@
+From: Matthew Garrett <mjg@redhat.com>
+Subject: [PATCH] acpi: Update battery information on notification 0x81
+Message-Id: <1282588802-2962-1-git-send-email-mjg@redhat.com>
+
+A notification event 0x81 from an ACPI battery device requires us to
+re-read the battery information structure. Do so, and if the battery's
+reporting units have changed (as is the case on some Thinkpads) destroy
+and recreate the battery in order to populate the fields correctly.
+
+Signed-off-by: Matthew Garrett <mjg@redhat.com>
+---
+ drivers/acpi/battery.c | 22 +++++++++++++++++-----
+ 1 files changed, 17 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
+index dc58402..69638c4 100644
+--- a/drivers/acpi/battery.c
++++ b/drivers/acpi/battery.c
+@@ -562,9 +562,10 @@ static void acpi_battery_quirks(struct acpi_battery *battery)
+ }
+ }
+
+-static int acpi_battery_update(struct acpi_battery *battery)
++static int acpi_battery_update(struct acpi_battery *battery, bool get_info)
+ {
+ int result, old_present = acpi_battery_present(battery);
++ int old_power_unit = battery->power_unit;
+ result = acpi_battery_get_status(battery);
+ if (result)
+ return result;
+@@ -587,6 +588,16 @@ static int acpi_battery_update(struct acpi_battery *battery)
+ if (!battery->bat.dev)
+ sysfs_add_battery(battery);
+ #endif
++ if (get_info) {
++ acpi_battery_get_info(battery);
++#ifdef CONFIG_ACPI_SYSFS_POWER
++ if (old_power_unit != battery->power_unit) {
++ /* The battery has changed its reporting units */
++ sysfs_remove_battery(battery);
++ sysfs_add_battery(battery);
++ }
++#endif
++ }
+ return acpi_battery_get_state(battery);
+ }
+
+@@ -762,7 +773,7 @@ static print_func acpi_print_funcs[ACPI_BATTERY_NUMFILES] = {
+ static int acpi_battery_read(int fid, struct seq_file *seq)
+ {
+ struct acpi_battery *battery = seq->private;
+- int result = acpi_battery_update(battery);
++ int result = acpi_battery_update(battery, false);
+ return acpi_print_funcs[fid](seq, result);
+ }
+
+@@ -877,7 +888,8 @@ static void acpi_battery_notify(struct acpi_device *device, u32 event)
+ #ifdef CONFIG_ACPI_SYSFS_POWER
+ old = battery->bat.dev;
+ #endif
+- acpi_battery_update(battery);
++ acpi_battery_update(battery, (event == ACPI_BATTERY_NOTIFY_INFO ? true
++ : false));
+ acpi_bus_generate_proc_event(device, event,
+ acpi_battery_present(battery));
+ acpi_bus_generate_netlink_event(device->pnp.device_class,
+@@ -908,7 +920,7 @@ static int acpi_battery_add(struct acpi_device *device)
+ if (ACPI_SUCCESS(acpi_get_handle(battery->device->handle,
+ "_BIX", &handle)))
+ set_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags);
+- acpi_battery_update(battery);
++ acpi_battery_update(battery, false);
+ #ifdef CONFIG_ACPI_PROCFS_POWER
+ result = acpi_battery_add_fs(device);
+ #endif
+@@ -951,7 +963,7 @@ static int acpi_battery_resume(struct acpi_device *device)
+ return -EINVAL;
+ battery = acpi_driver_data(device);
+ battery->update_time = 0;
+- acpi_battery_update(battery);
++ acpi_battery_update(battery, true);
+ return 0;
+ }
+
+--
+1.7.2.1
+
diff --git a/recipes/linux/linux-2.6.35/nokia900/linux-2.6.36-battery2.patch b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.36-battery2.patch
new file mode 100644
index 0000000000..08798e1ac8
--- /dev/null
+++ b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.36-battery2.patch
@@ -0,0 +1,33 @@
+From: Matthew Garrett <mjg@redhat.com>
+To: linux-acpi@vger.kernel.org
+Subject: [PATCH] ACPI: Don't report current_now if battery reports in mWh
+Date: Mon, 23 Aug 2010 16:25:32 -0400
+Message-Id: <1282595132-5026-1-git-send-email-mjg@redhat.com>
+
+ACPI batteries can report in units of either current or energy. Right
+now we expose the current_now file even if the battery is reporting
+energy units, resulting in a file that should contain mA instead
+containing mW. Don't expose this value unless the battery is reporting
+current.
+
+Signed-off-by: Matthew Garrett <mjg@redhat.com>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+---
+ drivers/acpi/battery.c | 1 -
+ 1 files changed, 0 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
+index dc58402..9841720 100644
+--- a/drivers/acpi/battery.c
++++ b/drivers/acpi/battery.c
+@@ -273,7 +273,6 @@ static enum power_supply_property energy_battery_props[] = {
+ POWER_SUPPLY_PROP_CYCLE_COUNT,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+- POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_POWER_NOW,
+ POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN,
+ POWER_SUPPLY_PROP_ENERGY_FULL,
+--
+1.7.2.1
+
diff --git a/recipes/linux/linux-2.6.35/nokia900/linux-2.6.36-fix-unprotected-acess-to-task-credentials.patch b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.36-fix-unprotected-acess-to-task-credentials.patch
new file mode 100644
index 0000000000..479faa35c1
--- /dev/null
+++ b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.36-fix-unprotected-acess-to-task-credentials.patch
@@ -0,0 +1,98 @@
+From f362b73244fb16ea4ae127ced1467dd8adaa7733 Mon Sep 17 00:00:00 2001
+From: Daniel J Blueman <daniel.blueman@gmail.com>
+Date: Tue, 17 Aug 2010 23:56:55 +0100
+Subject: [PATCH] Fix unprotected access to task credentials in waitid()
+
+Using a program like the following:
+
+ #include <stdlib.h>
+ #include <unistd.h>
+ #include <sys/types.h>
+ #include <sys/wait.h>
+
+ int main() {
+ id_t id;
+ siginfo_t infop;
+ pid_t res;
+
+ id = fork();
+ if (id == 0) { sleep(1); exit(0); }
+ kill(id, SIGSTOP);
+ alarm(1);
+ waitid(P_PID, id, &infop, WCONTINUED);
+ return 0;
+ }
+
+to call waitid() on a stopped process results in access to the child task's
+credentials without the RCU read lock being held - which may be replaced in the
+meantime - eliciting the following warning:
+
+ ===================================================
+ [ INFO: suspicious rcu_dereference_check() usage. ]
+ ---------------------------------------------------
+ kernel/exit.c:1460 invoked rcu_dereference_check() without protection!
+
+ other info that might help us debug this:
+
+ rcu_scheduler_active = 1, debug_locks = 1
+ 2 locks held by waitid02/22252:
+ #0: (tasklist_lock){.?.?..}, at: [<ffffffff81061ce5>] do_wait+0xc5/0x310
+ #1: (&(&sighand->siglock)->rlock){-.-...}, at: [<ffffffff810611da>]
+ wait_consider_task+0x19a/0xbe0
+
+ stack backtrace:
+ Pid: 22252, comm: waitid02 Not tainted 2.6.35-323cd+ #3
+ Call Trace:
+ [<ffffffff81095da4>] lockdep_rcu_dereference+0xa4/0xc0
+ [<ffffffff81061b31>] wait_consider_task+0xaf1/0xbe0
+ [<ffffffff81061d15>] do_wait+0xf5/0x310
+ [<ffffffff810620b6>] sys_waitid+0x86/0x1f0
+ [<ffffffff8105fce0>] ? child_wait_callback+0x0/0x70
+ [<ffffffff81003282>] system_call_fastpath+0x16/0x1b
+
+This is fixed by holding the RCU read lock in wait_task_continued() to ensure
+that the task's current credentials aren't destroyed between us reading the
+cred pointer and us reading the UID from those credentials.
+
+Furthermore, protect wait_task_stopped() in the same way.
+
+We don't need to keep holding the RCU read lock once we've read the UID from
+the credentials as holding the RCU read lock doesn't stop the target task from
+changing its creds under us - so the credentials may be outdated immediately
+after we've read the pointer, lock or no lock.
+
+Signed-off-by: Daniel J Blueman <daniel.blueman@gmail.com>
+Signed-off-by: David Howells <dhowells@redhat.com>
+Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Acked-by: Oleg Nesterov <oleg@redhat.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+---
+ kernel/exit.c | 5 ++---
+ 1 files changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/kernel/exit.c b/kernel/exit.c
+index 671ed56..0312022 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -1386,8 +1386,7 @@ static int wait_task_stopped(struct wait_opts *wo,
+ if (!unlikely(wo->wo_flags & WNOWAIT))
+ *p_code = 0;
+
+- /* don't need the RCU readlock here as we're holding a spinlock */
+- uid = __task_cred(p)->uid;
++ uid = task_uid(p);
+ unlock_sig:
+ spin_unlock_irq(&p->sighand->siglock);
+ if (!exit_code)
+@@ -1460,7 +1459,7 @@ static int wait_task_continued(struct wait_opts *wo, struct task_struct *p)
+ }
+ if (!unlikely(wo->wo_flags & WNOWAIT))
+ p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
+- uid = __task_cred(p)->uid;
++ uid = task_uid(p);
+ spin_unlock_irq(&p->sighand->siglock);
+
+ pid = task_pid_vnr(p);
+--
+1.7.2.1
+
diff --git a/recipes/linux/linux-2.6.35/nokia900/linux-2.6.36-omap-rx51-Platform-support-for-lis3lv02d-acceleromet.patch b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.36-omap-rx51-Platform-support-for-lis3lv02d-acceleromet.patch
new file mode 100644
index 0000000000..4628b39b14
--- /dev/null
+++ b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.36-omap-rx51-Platform-support-for-lis3lv02d-acceleromet.patch
@@ -0,0 +1,140 @@
+From 7ac65416a444cf52af9e16b2ace3f26765f9c7a1 Mon Sep 17 00:00:00 2001
+From: Ameya Palande <ameya.palande@nokia.com>
+Date: Thu, 17 Jun 2010 18:18:42 +0300
+Subject: [PATCH 07/11] omap: rx51: Platform support for lis3lv02d accelerometer
+
+Signed-off-by: Ameya Palande <ameya.palande@nokia.com>
+---
+ arch/arm/mach-omap2/board-rx51-peripherals.c | 88 +++++++++++++++++++++++++-
+ 1 files changed, 87 insertions(+), 1 deletions(-)
+
+diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
+index bec0d39..8989119 100644
+--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
++++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
+@@ -37,6 +37,7 @@
+ #include <plat/serial.h>
+
+ #include <../drivers/staging/iio/light/tsl2563.h>
++#include <linux/lis3lv02d.h>
+
+ #include "mux.h"
+ #include "hsmmc.h"
+@@ -54,6 +55,9 @@
+ #define RX51_HCI_H4P_HOSTWU_GPIO 101
+ #define RX51_HCI_H4P_BTWU_GPIO 37
+
++#define LIS302_IRQ1_GPIO 181
++#define LIS302_IRQ2_GPIO 180 /* Not yet in use */
++
+ /* list all spi devices here */
+ enum {
+ RX51_SPI_WL1251,
+@@ -64,6 +68,77 @@ enum {
+ static struct wl12xx_platform_data wl1251_pdata;
+ static struct tsc2005_platform_data tsc2005_pdata;
+
++#if defined(CONFIG_SENSORS_LIS3_I2C) || defined(CONFIG_SENSORS_LIS3_I2C_MODULE)
++static int lis302_setup(void)
++{
++ int err;
++ int irq1 = LIS302_IRQ1_GPIO;
++ int irq2 = LIS302_IRQ2_GPIO;
++
++ /* gpio for interrupt pin 1 */
++ err = gpio_request(irq1, "lis3lv02dl_irq1");
++ if (err) {
++ printk(KERN_ERR "lis3lv02dl: gpio request failed\n");
++ goto out;
++ }
++
++ /* gpio for interrupt pin 2 */
++ err = gpio_request(irq2, "lis3lv02dl_irq2");
++ if (err) {
++ gpio_free(irq1);
++ printk(KERN_ERR "lis3lv02dl: gpio request failed\n");
++ goto out;
++ }
++
++ gpio_direction_input(irq1);
++ gpio_direction_input(irq2);
++
++out:
++ return err;
++}
++
++static int lis302_release(void)
++{
++ gpio_free(LIS302_IRQ1_GPIO);
++ gpio_free(LIS302_IRQ2_GPIO);
++
++ return 0;
++}
++
++static struct lis3lv02d_platform_data rx51_lis3lv02d_data = {
++ .click_flags = LIS3_CLICK_SINGLE_X | LIS3_CLICK_SINGLE_Y |
++ LIS3_CLICK_SINGLE_Z,
++ /* Limits are 0.5g * value */
++ .click_thresh_x = 8,
++ .click_thresh_y = 8,
++ .click_thresh_z = 10,
++ /* Click must be longer than time limit */
++ .click_time_limit = 9,
++ /* Kind of debounce filter */
++ .click_latency = 50,
++
++ /* Limits for all axis. millig-value / 18 to get HW values */
++ .wakeup_flags = LIS3_WAKEUP_X_HI | LIS3_WAKEUP_Y_HI,
++ .wakeup_thresh = 800 / 18,
++ .wakeup_flags2 = LIS3_WAKEUP_Z_HI ,
++ .wakeup_thresh2 = 900 / 18,
++
++ .hipass_ctrl = LIS3_HIPASS1_DISABLE | LIS3_HIPASS2_DISABLE,
++
++ /* Interrupt line 2 for click detection, line 1 for thresholds */
++ .irq_cfg = LIS3_IRQ2_CLICK | LIS3_IRQ1_FF_WU_12,
++
++ .axis_x = LIS3_DEV_X,
++ .axis_y = LIS3_INV_DEV_Y,
++ .axis_z = LIS3_INV_DEV_Z,
++ .setup_resources = lis302_setup,
++ .release_resources = lis302_release,
++ .st_min_limits = {-32, 3, 3},
++ .st_max_limits = {-3, 32, 32},
++ .irq2 = OMAP_GPIO_IRQ(LIS302_IRQ2_GPIO),
++};
++#endif
++
+ #if defined(CONFIG_SENSORS_TSL2563) || defined(CONFIG_SENSORS_TSL2563_MODULE)
+ static struct tsl2563_platform_data rx51_tsl2563_platform_data = {
+ .cover_comp_gain = 16,
+@@ -747,6 +822,16 @@ static struct i2c_board_info __initdata rx51_peripherals_i2c_board_info_2[] = {
+ #endif
+ };
+
++static struct i2c_board_info __initdata rx51_peripherals_i2c_board_info_3[] = {
++#if defined(CONFIG_SENSORS_LIS3_I2C) || defined(CONFIG_SENSORS_LIS3_I2C_MODULE)
++ {
++ I2C_BOARD_INFO("lis3lv02d", 0x1d),
++ .platform_data = &rx51_lis3lv02d_data,
++ .irq = OMAP_GPIO_IRQ(LIS302_IRQ1_GPIO),
++ },
++#endif
++};
++
+ static int __init rx51_i2c_init(void)
+ {
+ if ((system_rev >= SYSTEM_REV_S_USES_VAUX3 && system_rev < 0x100) ||
+@@ -762,7 +847,8 @@ static int __init rx51_i2c_init(void)
+ ARRAY_SIZE(rx51_peripherals_i2c_board_info_1));
+ omap_register_i2c_bus(2, 100, rx51_peripherals_i2c_board_info_2,
+ ARRAY_SIZE(rx51_peripherals_i2c_board_info_2));
+- omap_register_i2c_bus(3, 400, NULL, 0);
++ omap_register_i2c_bus(3, 400, rx51_peripherals_i2c_board_info_3,
++ ARRAY_SIZE(rx51_peripherals_i2c_board_info_3));
+ return 0;
+ }
+
+--
+1.7.0.4
+
diff --git a/recipes/linux/linux-2.6.35/nokia900/linux-2.6.36-omap-rx51-Platform-support-for-tsl2563-ALS.patch b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.36-omap-rx51-Platform-support-for-tsl2563-ALS.patch
new file mode 100644
index 0000000000..559932f97e
--- /dev/null
+++ b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.36-omap-rx51-Platform-support-for-tsl2563-ALS.patch
@@ -0,0 +1,52 @@
+From f0bff13906a6332549403dcdd5cccf952e4265ec Mon Sep 17 00:00:00 2001
+From: Ameya Palande <ameya.palande@nokia.com>
+Date: Tue, 8 Jun 2010 14:08:55 +0300
+Subject: [PATCH 06/11] omap: rx51: Platform support for tsl2563 ALS
+
+Signed-off-by: Ameya Palande <ameya.palande@nokia.com>
+---
+ arch/arm/mach-omap2/board-rx51-peripherals.c | 14 ++++++++++++++
+ 1 files changed, 14 insertions(+), 0 deletions(-)
+
+diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
+index e8d8ff5..bec0d39 100644
+--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
++++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
+@@ -36,6 +36,8 @@
+ #include <plat/gpmc-smc91x.h>
+ #include <plat/serial.h>
+
++#include <../drivers/staging/iio/light/tsl2563.h>
++
+ #include "mux.h"
+ #include "hsmmc.h"
+
+@@ -62,6 +64,12 @@ enum {
+ static struct wl12xx_platform_data wl1251_pdata;
+ static struct tsc2005_platform_data tsc2005_pdata;
+
++#if defined(CONFIG_SENSORS_TSL2563) || defined(CONFIG_SENSORS_TSL2563_MODULE)
++static struct tsl2563_platform_data rx51_tsl2563_platform_data = {
++ .cover_comp_gain = 16,
++};
++#endif
++
+ static struct omap2_mcspi_device_config wl1251_mcspi_config = {
+ .turbo_mode = 0,
+ .single_channel = 1,
+@@ -731,6 +739,12 @@ static struct i2c_board_info __initdata rx51_peripherals_i2c_board_info_2[] = {
+ {
+ I2C_BOARD_INFO("tlv320aic3x", 0x18),
+ },
++#if defined(CONFIG_SENSORS_TSL2563) || defined(CONFIG_SENSORS_TSL2563_MODULE)
++ {
++ I2C_BOARD_INFO("tsl2563", 0x29),
++ .platform_data = &rx51_tsl2563_platform_data,
++ },
++#endif
+ };
+
+ static int __init rx51_i2c_init(void)
+--
+1.7.0.4
+
diff --git a/recipes/linux/linux-2.6.35/nokia900/linux-2.6.36-powertop-timer-tracing.patch b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.36-powertop-timer-tracing.patch
new file mode 100644
index 0000000000..1d8e21b99c
--- /dev/null
+++ b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.36-powertop-timer-tracing.patch
@@ -0,0 +1,64 @@
+From fe9633af11395d339880417439a1931bb9e7e493 Mon Sep 17 00:00:00 2001
+From: Arjan van de Ven <arjan@linux.intel.com>
+Date: Wed, 18 Aug 2010 15:28:59 -0400
+Subject: [PATCH] tracing: Make timer tracing actually useful
+
+PowerTOP would like to be able to trace timers.
+Unfortunately, the current timer tracing is not very useful, the actual
+timer function is not recorded in the trace at the start of timer execution.
+
+Although this is recorded for timer "start" time (when it gets armed), this
+is not useful; most timers get started early, and a tracer like PowerTOP
+will never see this event, but will only see the actual running of the timer.
+
+This patch just adds the function to the timer tracing; I've verified with
+PowerTOP that now it can get useful information about timers.
+
+Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
+---
+ include/trace/events/timer.h | 8 ++++++--
+ 1 files changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
+index c624126..94511c8 100644
+--- a/include/trace/events/timer.h
++++ b/include/trace/events/timer.h
+@@ -81,14 +81,16 @@ TRACE_EVENT(timer_expire_entry,
+ TP_STRUCT__entry(
+ __field( void *, timer )
+ __field( unsigned long, now )
++ __field( void *, function )
+ ),
+
+ TP_fast_assign(
+ __entry->timer = timer;
+ __entry->now = jiffies;
++ __entry->function = timer->function;
+ ),
+
+- TP_printk("timer=%p now=%lu", __entry->timer, __entry->now)
++ TP_printk("timer=%p function=%pf now=%lu", __entry->timer, __entry->function,__entry->now)
+ );
+
+ /**
+@@ -200,14 +202,16 @@ TRACE_EVENT(hrtimer_expire_entry,
+ TP_STRUCT__entry(
+ __field( void *, hrtimer )
+ __field( s64, now )
++ __field( void *, function )
+ ),
+
+ TP_fast_assign(
+ __entry->hrtimer = hrtimer;
+ __entry->now = now->tv64;
++ __entry->function = hrtimer->function;
+ ),
+
+- TP_printk("hrtimer=%p now=%llu", __entry->hrtimer,
++ TP_printk("hrtimer=%p function=%pf now=%llu", __entry->hrtimer, __entry->function,
+ (unsigned long long)ktime_to_ns((ktime_t) { .tv64 = __entry->now }))
+ );
+
+--
+1.6.1.3
+
diff --git a/recipes/linux/linux-2.6.35/nokia900/linux-2.6.36-tidspbridge.patch b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.36-tidspbridge.patch
new file mode 100644
index 0000000000..c51d8a05c5
--- /dev/null
+++ b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.36-tidspbridge.patch
@@ -0,0 +1,51086 @@
+From e14ce7b80df7964074618c61e102e42c07490088 Mon Sep 17 00:00:00 2001
+From: Omar Ramirez Luna <omar.ramirez@ti.com>
+Date: Wed, 23 Jun 2010 16:01:55 +0300
+Subject: [PATCH 2/2] tidspbridge driver
+
+top commit:
+OUT-OF-TREE: tidspbridge: move platform_device_register under mach-omap2
+
+taken from:
+git://dev.omapzoom.org/pub/scm/tidspbridge/kernel-dspbridge.git
+
+Signed-off-by: Ameya Palande <ameya.palande@nokia.com>
+---
+ arch/arm/mach-omap2/Makefile | 4 +
+ arch/arm/mach-omap2/dspbridge.c | 109 +
+ arch/arm/mach-omap2/io.c | 3 +
+ drivers/staging/Kconfig | 2 +
+ drivers/staging/Makefile | 1 +
+ .../staging/tidspbridge/Documentation/CONTRIBUTORS | 45 +
+ drivers/staging/tidspbridge/Documentation/README | 70 +
+ .../staging/tidspbridge/Documentation/error-codes | 157 +
+ drivers/staging/tidspbridge/Kconfig | 90 +
+ drivers/staging/tidspbridge/Makefile | 34 +
+ drivers/staging/tidspbridge/TODO | 18 +
+ drivers/staging/tidspbridge/core/_cmm.h | 45 +
+ drivers/staging/tidspbridge/core/_deh.h | 35 +
+ drivers/staging/tidspbridge/core/_msg_sm.h | 142 +
+ drivers/staging/tidspbridge/core/_tiomap.h | 371 +++
+ drivers/staging/tidspbridge/core/_tiomap_pwr.h | 85 +
+ drivers/staging/tidspbridge/core/chnl_sm.c | 1014 ++++++
+ drivers/staging/tidspbridge/core/dsp-clock.c | 422 +++
+ drivers/staging/tidspbridge/core/io_sm.c | 2333 ++++++++++++++
+ drivers/staging/tidspbridge/core/msg_sm.c | 673 ++++
+ drivers/staging/tidspbridge/core/tiomap3430.c | 1802 +++++++++++
+ drivers/staging/tidspbridge/core/tiomap3430_pwr.c | 550 ++++
+ drivers/staging/tidspbridge/core/tiomap_io.c | 455 +++
+ drivers/staging/tidspbridge/core/tiomap_io.h | 104 +
+ drivers/staging/tidspbridge/core/ue_deh.c | 273 ++
+ drivers/staging/tidspbridge/core/wdt.c | 150 +
+ drivers/staging/tidspbridge/dynload/cload.c | 1953 ++++++++++++
+ .../staging/tidspbridge/dynload/dload_internal.h | 344 +++
+ drivers/staging/tidspbridge/dynload/doff.h | 354 +++
+ drivers/staging/tidspbridge/dynload/getsection.c | 407 +++
+ drivers/staging/tidspbridge/dynload/header.h | 49 +
+ drivers/staging/tidspbridge/dynload/module_list.h | 159 +
+ drivers/staging/tidspbridge/dynload/params.h | 226 ++
+ drivers/staging/tidspbridge/dynload/reloc.c | 484 +++
+ drivers/staging/tidspbridge/dynload/reloc_table.h | 102 +
+ .../tidspbridge/dynload/reloc_table_c6000.c | 257 ++
+ drivers/staging/tidspbridge/dynload/tramp.c | 1143 +++++++
+ .../tidspbridge/dynload/tramp_table_c6000.c | 164 +
+ drivers/staging/tidspbridge/gen/gb.c | 167 +
+ drivers/staging/tidspbridge/gen/gh.c | 215 ++
+ drivers/staging/tidspbridge/gen/gs.c | 89 +
+ drivers/staging/tidspbridge/gen/uuidutil.c | 113 +
+ drivers/staging/tidspbridge/hw/EasiGlobal.h | 41 +
+ drivers/staging/tidspbridge/hw/MMUAccInt.h | 76 +
+ drivers/staging/tidspbridge/hw/MMURegAcM.h | 225 ++
+ drivers/staging/tidspbridge/hw/hw_defs.h | 58 +
+ drivers/staging/tidspbridge/hw/hw_mmu.c | 562 ++++
+ drivers/staging/tidspbridge/hw/hw_mmu.h | 163 +
+ .../tidspbridge/include/dspbridge/_chnl_sm.h | 181 ++
+ .../tidspbridge/include/dspbridge/brddefs.h | 39 +
+ .../staging/tidspbridge/include/dspbridge/cfg.h | 222 ++
+ .../tidspbridge/include/dspbridge/cfgdefs.h | 81 +
+ .../staging/tidspbridge/include/dspbridge/chnl.h | 130 +
+ .../tidspbridge/include/dspbridge/chnldefs.h | 66 +
+ .../tidspbridge/include/dspbridge/chnlpriv.h | 98 +
+ .../staging/tidspbridge/include/dspbridge/clk.h | 101 +
+ .../staging/tidspbridge/include/dspbridge/cmm.h | 386 +++
+ .../tidspbridge/include/dspbridge/cmmdefs.h | 105 +
+ .../staging/tidspbridge/include/dspbridge/cod.h | 369 +++
+ .../staging/tidspbridge/include/dspbridge/dbc.h | 46 +
+ .../staging/tidspbridge/include/dspbridge/dbdcd.h | 358 +++
+ .../tidspbridge/include/dspbridge/dbdcddef.h | 78 +
+ .../staging/tidspbridge/include/dspbridge/dbdefs.h | 514 ++++
+ .../tidspbridge/include/dspbridge/dbldefs.h | 141 +
+ .../staging/tidspbridge/include/dspbridge/dbll.h | 62 +
+ .../tidspbridge/include/dspbridge/dblldefs.h | 496 +++
+ .../tidspbridge/include/dspbridge/dehdefs.h | 32 +
+ .../staging/tidspbridge/include/dspbridge/dev.h | 702 +++++
+ .../tidspbridge/include/dspbridge/devdefs.h | 26 +
+ .../staging/tidspbridge/include/dspbridge/disp.h | 204 ++
+ .../tidspbridge/include/dspbridge/dispdefs.h | 35 +
+ .../staging/tidspbridge/include/dspbridge/dmm.h | 75 +
+ .../staging/tidspbridge/include/dspbridge/drv.h | 521 ++++
+ .../tidspbridge/include/dspbridge/drvdefs.h | 25 +
+ .../tidspbridge/include/dspbridge/dspapi-ioctl.h | 475 +++
+ .../staging/tidspbridge/include/dspbridge/dspapi.h | 167 +
+ .../tidspbridge/include/dspbridge/dspchnl.h | 72 +
+ .../tidspbridge/include/dspbridge/dspdefs.h | 1054 +++++++
+ .../staging/tidspbridge/include/dspbridge/dspdeh.h | 43 +
+ .../staging/tidspbridge/include/dspbridge/dspdrv.h | 62 +
+ .../staging/tidspbridge/include/dspbridge/dspio.h | 41 +
+ .../tidspbridge/include/dspbridge/dspioctl.h | 73 +
+ .../staging/tidspbridge/include/dspbridge/dspmsg.h | 56 +
+ .../tidspbridge/include/dspbridge/dynamic_loader.h | 492 +++
+ drivers/staging/tidspbridge/include/dspbridge/gb.h | 79 +
+ .../tidspbridge/include/dspbridge/getsection.h | 108 +
+ drivers/staging/tidspbridge/include/dspbridge/gh.h | 34 +
+ drivers/staging/tidspbridge/include/dspbridge/gs.h | 59 +
+ .../tidspbridge/include/dspbridge/host_os.h | 88 +
+ drivers/staging/tidspbridge/include/dspbridge/io.h | 114 +
+ .../staging/tidspbridge/include/dspbridge/io_sm.h | 298 ++
+ .../staging/tidspbridge/include/dspbridge/iodefs.h | 36 +
+ .../staging/tidspbridge/include/dspbridge/ldr.h | 29 +
+ .../staging/tidspbridge/include/dspbridge/list.h | 225 ++
+ .../staging/tidspbridge/include/dspbridge/mbx_sh.h | 184 ++
+ .../tidspbridge/include/dspbridge/memdefs.h | 30 +
+ .../staging/tidspbridge/include/dspbridge/mgr.h | 205 ++
+ .../tidspbridge/include/dspbridge/mgrpriv.h | 45 +
+ .../staging/tidspbridge/include/dspbridge/msg.h | 86 +
+ .../tidspbridge/include/dspbridge/msgdefs.h | 29 +
+ .../staging/tidspbridge/include/dspbridge/nldr.h | 57 +
+ .../tidspbridge/include/dspbridge/nldrdefs.h | 293 ++
+ .../staging/tidspbridge/include/dspbridge/node.h | 583 ++++
+ .../tidspbridge/include/dspbridge/nodedefs.h | 28 +
+ .../tidspbridge/include/dspbridge/nodepriv.h | 182 ++
+ .../staging/tidspbridge/include/dspbridge/ntfy.h | 217 ++
+ .../staging/tidspbridge/include/dspbridge/proc.h | 621 ++++
+ .../tidspbridge/include/dspbridge/procpriv.h | 25 +
+ .../staging/tidspbridge/include/dspbridge/pwr.h | 107 +
+ .../staging/tidspbridge/include/dspbridge/pwr_sh.h | 33 +
+ .../include/dspbridge/resourcecleanup.h | 52 +
+ .../staging/tidspbridge/include/dspbridge/rmm.h | 181 ++
+ .../staging/tidspbridge/include/dspbridge/rms_sh.h | 95 +
+ .../tidspbridge/include/dspbridge/rmstypes.h | 24 +
+ .../tidspbridge/include/dspbridge/services.h | 50 +
+ .../staging/tidspbridge/include/dspbridge/strm.h | 404 +++
+ .../tidspbridge/include/dspbridge/strmdefs.h | 46 +
+ .../staging/tidspbridge/include/dspbridge/sync.h | 109 +
+ .../tidspbridge/include/dspbridge/utildefs.h | 39 +
+ .../tidspbridge/include/dspbridge/uuidutil.h | 62 +
+ .../staging/tidspbridge/include/dspbridge/wdt.h | 79 +
+ drivers/staging/tidspbridge/pmgr/chnl.c | 163 +
+ drivers/staging/tidspbridge/pmgr/chnlobj.h | 46 +
+ drivers/staging/tidspbridge/pmgr/cmm.c | 1154 +++++++
+ drivers/staging/tidspbridge/pmgr/cod.c | 652 ++++
+ drivers/staging/tidspbridge/pmgr/dbll.c | 1585 ++++++++++
+ drivers/staging/tidspbridge/pmgr/dev.c | 1151 +++++++
+ drivers/staging/tidspbridge/pmgr/dmm.c | 533 ++++
+ drivers/staging/tidspbridge/pmgr/dspapi.c | 1906 ++++++++++++
+ drivers/staging/tidspbridge/pmgr/io.c | 142 +
+ drivers/staging/tidspbridge/pmgr/ioobj.h | 38 +
+ drivers/staging/tidspbridge/pmgr/msg.c | 129 +
+ drivers/staging/tidspbridge/pmgr/msgobj.h | 38 +
+ drivers/staging/tidspbridge/rmgr/dbdcd.c | 1512 +++++++++
+ drivers/staging/tidspbridge/rmgr/disp.c | 752 +++++
+ drivers/staging/tidspbridge/rmgr/drv.c | 929 ++++++
+ drivers/staging/tidspbridge/rmgr/drv_interface.c | 656 ++++
+ drivers/staging/tidspbridge/rmgr/drv_interface.h | 28 +
+ drivers/staging/tidspbridge/rmgr/dspdrv.c | 142 +
+ drivers/staging/tidspbridge/rmgr/mgr.c | 375 +++
+ drivers/staging/tidspbridge/rmgr/nldr.c | 1974 ++++++++++++
+ drivers/staging/tidspbridge/rmgr/node.c | 3234 ++++++++++++++++++++
+ drivers/staging/tidspbridge/rmgr/proc.c | 1936 ++++++++++++
+ drivers/staging/tidspbridge/rmgr/pwr.c | 176 ++
+ drivers/staging/tidspbridge/rmgr/rmm.c | 537 ++++
+ drivers/staging/tidspbridge/rmgr/strm.c | 853 ++++++
+ drivers/staging/tidspbridge/services/cfg.c | 253 ++
+ drivers/staging/tidspbridge/services/ntfy.c | 31 +
+ drivers/staging/tidspbridge/services/services.c | 70 +
+ drivers/staging/tidspbridge/services/sync.c | 104 +
+ 150 files changed, 50001 insertions(+), 0 deletions(-)
+ create mode 100644 arch/arm/mach-omap2/dspbridge.c
+ create mode 100644 drivers/staging/tidspbridge/Documentation/CONTRIBUTORS
+ create mode 100644 drivers/staging/tidspbridge/Documentation/README
+ create mode 100644 drivers/staging/tidspbridge/Documentation/error-codes
+ create mode 100644 drivers/staging/tidspbridge/Kconfig
+ create mode 100644 drivers/staging/tidspbridge/Makefile
+ create mode 100644 drivers/staging/tidspbridge/TODO
+ create mode 100644 drivers/staging/tidspbridge/core/_cmm.h
+ create mode 100644 drivers/staging/tidspbridge/core/_deh.h
+ create mode 100644 drivers/staging/tidspbridge/core/_msg_sm.h
+ create mode 100644 drivers/staging/tidspbridge/core/_tiomap.h
+ create mode 100644 drivers/staging/tidspbridge/core/_tiomap_pwr.h
+ create mode 100644 drivers/staging/tidspbridge/core/chnl_sm.c
+ create mode 100644 drivers/staging/tidspbridge/core/dsp-clock.c
+ create mode 100644 drivers/staging/tidspbridge/core/io_sm.c
+ create mode 100644 drivers/staging/tidspbridge/core/msg_sm.c
+ create mode 100644 drivers/staging/tidspbridge/core/tiomap3430.c
+ create mode 100644 drivers/staging/tidspbridge/core/tiomap3430_pwr.c
+ create mode 100644 drivers/staging/tidspbridge/core/tiomap_io.c
+ create mode 100644 drivers/staging/tidspbridge/core/tiomap_io.h
+ create mode 100644 drivers/staging/tidspbridge/core/ue_deh.c
+ create mode 100644 drivers/staging/tidspbridge/core/wdt.c
+ create mode 100644 drivers/staging/tidspbridge/dynload/cload.c
+ create mode 100644 drivers/staging/tidspbridge/dynload/dload_internal.h
+ create mode 100644 drivers/staging/tidspbridge/dynload/doff.h
+ create mode 100644 drivers/staging/tidspbridge/dynload/getsection.c
+ create mode 100644 drivers/staging/tidspbridge/dynload/header.h
+ create mode 100644 drivers/staging/tidspbridge/dynload/module_list.h
+ create mode 100644 drivers/staging/tidspbridge/dynload/params.h
+ create mode 100644 drivers/staging/tidspbridge/dynload/reloc.c
+ create mode 100644 drivers/staging/tidspbridge/dynload/reloc_table.h
+ create mode 100644 drivers/staging/tidspbridge/dynload/reloc_table_c6000.c
+ create mode 100644 drivers/staging/tidspbridge/dynload/tramp.c
+ create mode 100644 drivers/staging/tidspbridge/dynload/tramp_table_c6000.c
+ create mode 100644 drivers/staging/tidspbridge/gen/gb.c
+ create mode 100644 drivers/staging/tidspbridge/gen/gh.c
+ create mode 100644 drivers/staging/tidspbridge/gen/gs.c
+ create mode 100644 drivers/staging/tidspbridge/gen/uuidutil.c
+ create mode 100644 drivers/staging/tidspbridge/hw/EasiGlobal.h
+ create mode 100644 drivers/staging/tidspbridge/hw/MMUAccInt.h
+ create mode 100644 drivers/staging/tidspbridge/hw/MMURegAcM.h
+ create mode 100644 drivers/staging/tidspbridge/hw/hw_defs.h
+ create mode 100644 drivers/staging/tidspbridge/hw/hw_mmu.c
+ create mode 100644 drivers/staging/tidspbridge/hw/hw_mmu.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/_chnl_sm.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/brddefs.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/cfg.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/chnl.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/chnldefs.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/chnlpriv.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/clk.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/cmm.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/cmmdefs.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/cod.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/dbc.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/dbdcd.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/dbdcddef.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/dbdefs.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/dbldefs.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/dbll.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/dblldefs.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/dehdefs.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/dev.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/devdefs.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/disp.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/dispdefs.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/dmm.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/drv.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/drvdefs.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/dspapi-ioctl.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/dspapi.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/dspchnl.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/dspdefs.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/dspdeh.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/dspdrv.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/dspio.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/dspioctl.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/dspmsg.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/dynamic_loader.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/gb.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/getsection.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/gh.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/gs.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/host_os.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/io.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/io_sm.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/iodefs.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/ldr.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/list.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/mbx_sh.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/memdefs.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/mgr.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/mgrpriv.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/msg.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/msgdefs.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/nldr.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/nldrdefs.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/node.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/nodedefs.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/nodepriv.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/ntfy.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/proc.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/procpriv.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/pwr.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/pwr_sh.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/resourcecleanup.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/rmm.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/rms_sh.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/rmstypes.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/services.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/strm.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/strmdefs.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/sync.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/utildefs.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/uuidutil.h
+ create mode 100644 drivers/staging/tidspbridge/include/dspbridge/wdt.h
+ create mode 100644 drivers/staging/tidspbridge/pmgr/chnl.c
+ create mode 100644 drivers/staging/tidspbridge/pmgr/chnlobj.h
+ create mode 100644 drivers/staging/tidspbridge/pmgr/cmm.c
+ create mode 100644 drivers/staging/tidspbridge/pmgr/cod.c
+ create mode 100644 drivers/staging/tidspbridge/pmgr/dbll.c
+ create mode 100644 drivers/staging/tidspbridge/pmgr/dev.c
+ create mode 100644 drivers/staging/tidspbridge/pmgr/dmm.c
+ create mode 100644 drivers/staging/tidspbridge/pmgr/dspapi.c
+ create mode 100644 drivers/staging/tidspbridge/pmgr/io.c
+ create mode 100644 drivers/staging/tidspbridge/pmgr/ioobj.h
+ create mode 100644 drivers/staging/tidspbridge/pmgr/msg.c
+ create mode 100644 drivers/staging/tidspbridge/pmgr/msgobj.h
+ create mode 100644 drivers/staging/tidspbridge/rmgr/dbdcd.c
+ create mode 100644 drivers/staging/tidspbridge/rmgr/disp.c
+ create mode 100644 drivers/staging/tidspbridge/rmgr/drv.c
+ create mode 100644 drivers/staging/tidspbridge/rmgr/drv_interface.c
+ create mode 100644 drivers/staging/tidspbridge/rmgr/drv_interface.h
+ create mode 100644 drivers/staging/tidspbridge/rmgr/dspdrv.c
+ create mode 100644 drivers/staging/tidspbridge/rmgr/mgr.c
+ create mode 100644 drivers/staging/tidspbridge/rmgr/nldr.c
+ create mode 100644 drivers/staging/tidspbridge/rmgr/node.c
+ create mode 100644 drivers/staging/tidspbridge/rmgr/proc.c
+ create mode 100644 drivers/staging/tidspbridge/rmgr/pwr.c
+ create mode 100644 drivers/staging/tidspbridge/rmgr/rmm.c
+ create mode 100644 drivers/staging/tidspbridge/rmgr/strm.c
+ create mode 100644 drivers/staging/tidspbridge/services/cfg.c
+ create mode 100644 drivers/staging/tidspbridge/services/ntfy.c
+ create mode 100644 drivers/staging/tidspbridge/services/services.c
+ create mode 100644 drivers/staging/tidspbridge/services/sync.c
+
+Index: linux-2.6.35-master/arch/arm/mach-omap2/Makefile
+===================================================================
+--- linux-2.6.35-master.orig/arch/arm/mach-omap2/Makefile 2010-08-18 11:18:19.000000000 +0300
++++ linux-2.6.35-master/arch/arm/mach-omap2/Makefile 2010-08-18 11:24:23.146051246 +0300
+@@ -162,3 +162,7 @@
+
+ smc91x-$(CONFIG_SMC91X) := gpmc-smc91x.o
+ obj-y += $(smc91x-m) $(smc91x-y)
++
++ifneq ($(CONFIG_TIDSPBRIDGE),)
++obj-y += dspbridge.o
++endif
+Index: linux-2.6.35-master/arch/arm/mach-omap2/dspbridge.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/arch/arm/mach-omap2/dspbridge.c 2010-08-18 11:24:23.146051246 +0300
+@@ -0,0 +1,109 @@
++/*
++ * TI's dspbridge platform device registration
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ * Copyright (C) 2009 Nokia Corporation
++ *
++ * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/platform_device.h>
++//#include <linux/lmb.h>
++#include <linux/bootmem.h>
++
++#include "prm.h"
++#include "cm.h"
++#ifdef CONFIG_BRIDGE_DVFS
++#include <plat/omap-pm.h>
++#endif
++
++#include "../../../drivers/staging/tidspbridge/include/dspbridge/host_os.h"
++
++static struct platform_device *dspbridge_pdev;
++
++static struct dspbridge_platform_data dspbridge_pdata __initdata = {
++#ifdef CONFIG_BRIDGE_DVFS
++ .dsp_set_min_opp = omap_pm_dsp_set_min_opp,
++ .dsp_get_opp = omap_pm_dsp_get_opp,
++ .cpu_set_freq = omap_pm_cpu_set_freq,
++ .cpu_get_freq = omap_pm_cpu_get_freq,
++#endif
++ .dsp_prm_read = prm_read_mod_reg,
++ .dsp_prm_write = prm_write_mod_reg,
++ .dsp_prm_rmw_bits = prm_rmw_mod_reg_bits,
++ .dsp_cm_read = cm_read_mod_reg,
++ .dsp_cm_write = cm_write_mod_reg,
++ .dsp_cm_rmw_bits = cm_rmw_mod_reg_bits,
++};
++
++static unsigned long dspbridge_phys_mempool_base;
++
++void __init dspbridge_reserve_sdram(void)
++{
++// unsigned long va, size = CONFIG_TIDSPBRIDGE_MEMPOOL_SIZE; // LMB
++ void *va;
++ unsigned long size = CONFIG_TIDSPBRIDGE_MEMPOOL_SIZE;
++
++ if (!size)
++ return;
++
++// va = lmb_alloc(size, SZ_1M); //LMB
++ va = __alloc_bootmem_nopanic(size, SZ_1M, 0);
++ if (!va) {
++ pr_err("%s: Failed to bootmem allocation(%lu bytes)\n",
++ __func__, size);
++ return;
++ }
++
++// dspbridge_phys_mempool_base = va; //LMB
++ dspbridge_phys_mempool_base = virt_to_phys(va);
++}
++
++static int __init dspbridge_init(void)
++{
++ struct platform_device *pdev;
++ int err = -ENOMEM;
++ struct dspbridge_platform_data *pdata = &dspbridge_pdata;
++
++ pdata->phys_mempool_base = dspbridge_phys_mempool_base;
++ if (pdata->phys_mempool_base) {
++ pdata->phys_mempool_size = CONFIG_TIDSPBRIDGE_MEMPOOL_SIZE;
++ pr_info("%s: %x bytes @ %x\n", __func__,
++ pdata->phys_mempool_size, pdata->phys_mempool_base);
++ }
++
++ pdev = platform_device_alloc("C6410", -1);
++ if (!pdev)
++ goto err_out;
++
++ err = platform_device_add_data(pdev, pdata, sizeof(*pdata));
++ if (err)
++ goto err_out;
++
++ err = platform_device_add(pdev);
++ if (err)
++ goto err_out;
++
++ dspbridge_pdev = pdev;
++ return 0;
++
++err_out:
++ platform_device_put(pdev);
++ return err;
++}
++module_init(dspbridge_init);
++
++static void __exit dspbridge_exit(void)
++{
++ platform_device_unregister(dspbridge_pdev);
++}
++module_exit(dspbridge_exit);
++
++MODULE_AUTHOR("Hiroshi DOYU");
++MODULE_DESCRIPTION("TI's dspbridge platform device registration");
++MODULE_LICENSE("GPL v2");
++
+Index: linux-2.6.35-master/arch/arm/mach-omap2/io.c
+===================================================================
+--- linux-2.6.35-master.orig/arch/arm/mach-omap2/io.c 2010-08-18 11:18:19.000000000 +0300
++++ linux-2.6.35-master/arch/arm/mach-omap2/io.c 2010-08-18 11:24:23.146051246 +0300
+@@ -47,6 +47,8 @@
+ #include "clockdomains.h"
+ #include <plat/omap_hwmod.h>
+
++#include "../../../drivers/staging/tidspbridge/include/dspbridge/host_os.h"
++
+ /*
+ * The machine specific code may provide the extra mapping besides the
+ * default mapping provided here.
+@@ -243,6 +245,7 @@
+ omap_sram_init();
+ omapfb_reserve_sdram();
+ omap_vram_reserve_sdram();
++ dspbridge_reserve_sdram();
+ }
+
+ #ifdef CONFIG_ARCH_OMAP2420
+Index: linux-2.6.35-master/drivers/staging/Kconfig
+===================================================================
+--- linux-2.6.35-master.orig/drivers/staging/Kconfig 2010-08-18 11:18:19.000000000 +0300
++++ linux-2.6.35-master/drivers/staging/Kconfig 2010-08-18 11:24:23.146051246 +0300
+@@ -161,5 +161,7 @@
+
+ source "drivers/staging/ifx-mux/Kconfig"
+
++source "drivers/staging/tidspbridge/Kconfig"
++
+ endif # !STAGING_EXCLUDE_BUILD
+ endif # STAGING
+Index: linux-2.6.35-master/drivers/staging/Makefile
+===================================================================
+--- linux-2.6.35-master.orig/drivers/staging/Makefile 2010-08-18 11:18:18.000000000 +0300
++++ linux-2.6.35-master/drivers/staging/Makefile 2010-08-18 11:28:07.935063108 +0300
+@@ -61,3 +61,4 @@
+ obj-$(CONFIG_X86_INTEL_CE) += ice4100/
+ obj-$(CONFIG_VIDEO_MRSTCI) += mrstci/
+ obj-$(CONFIG_N_IFX_MUX) += ifx-mux/
++obj-$(CONFIG_TIDSPBRIDGE) += tidspbridge/
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/Documentation/CONTRIBUTORS
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/Documentation/CONTRIBUTORS 2010-08-18 11:24:23.146051246 +0300
+@@ -0,0 +1,45 @@
++TI DSP/Bridge Driver - Contributors File
++
++The DSP/Bridge project wish to thank all of its contributors, current bridge
++driver is the result of the work of all of them. If any name is accidentally
++omitted, let us know by sending a mail to omar.ramirez@ti.com or
++x095840@ti.com.
++
++Please keep the following list in alphabetical order.
++
++ Suman Anna
++ Sripal Bagadia
++ Felipe Balbi
++ Ohad Ben-Cohen
++ Phil Carmody
++ Deepak Chitriki
++ Felipe Contreras
++ Hiroshi Doyu
++ Seth Forshee
++ Ivan Gomez Castellanos
++ Mark Grosen
++ Ramesh Gupta G
++ Fernando Guzman Lugo
++ Axel Haslam
++ Janet Head
++ Shivananda Hebbar
++ Hari Kanigeri
++ Tony Lindgren
++ Antonio Luna
++ Hari Nagalla
++ Nishanth Menon
++ Ameya Palande
++ Vijay Pasam
++ Gilbert Pitney
++ Omar Ramirez Luna
++ Ernesto Ramos
++ Chris Ring
++ Larry Schiefer
++ Rebecca Schultz Zavin
++ Bhavin Shah
++ Andy Shevchenko
++ Jeff Taylor
++ Roman Tereshonkov
++ Armando Uribe de Leon
++ Nischal Varide
++ Wenbiao Wang
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/Documentation/README
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/Documentation/README 2010-08-18 11:24:23.146051246 +0300
+@@ -0,0 +1,70 @@
++ Linux DSP/BIOS Bridge release
++
++DSP/BIOS Bridge overview
++========================
++
++DSP/BIOS Bridge is designed for platforms that contain a GPP and one or more
++attached DSPs. The GPP is considered the master or "host" processor, and the
++attached DSPs are processing resources that can be utilized by applications
++and drivers running on the GPP.
++
++The abstraction that DSP/BIOS Bridge supplies, is a direct link between a GPP
++program and a DSP task. This communication link is partitioned into two
++types of sub-links: messaging (short, fixed-length packets) and data
++streaming (multiple, large buffers). Each sub-link operates independently,
++and features in-order delivery of data, meaning that messages are delivered
++in the order they were submitted to the message link, and stream buffers are
++delivered in the order they were submitted to the stream link.
++
++In addition, a GPP client can specify what inputs and outputs a DSP task
++uses. DSP tasks typically use message objects for passing control and status
++information and stream objects for efficient streaming of real-time data.
++
++GPP Software Architecture
++=========================
++
++A GPP application communicates with its associated DSP task running on the
++DSP subsystem using the DSP/BIOS Bridge API. For example, a GPP audio
++application can use the API to pass messages to a DSP task that is managing
++data flowing from analog-to-digital converters (ADCs) to digital-to-analog
++converters (DACs).
++
++From the perspective of the GPP OS, the DSP is treated as just another
++peripheral device. Most high level GPP OS typically support a device driver
++model, whereby applications can safely access and share a hardware peripheral
++through standard driver interfaces. Therefore, to allow multiple GPP
++applications to share access to the DSP, the GPP side of DSP/BIOS Bridge
++implements a device driver for the DSP.
++
++Since driver interfaces are not always standard across GPP OS, and to provide
++some level of interoperability of application code using DSP/BIOS Bridge
++between GPP OS, DSP/BIOS Bridge provides a standard library of APIs which
++wrap calls into the device driver. So, rather than calling GPP OS specific
++driver interfaces, applications (and even other device drivers) can use the
++standard API library directly.
++
++DSP Software Architecture
++=========================
++
++For DSP/BIOS, DSP/BIOS Bridge adds a device-independent streaming I/O (STRM)
++interface, a messaging interface (NODE), and a Resource Manager (RM) Server.
++The RM Server runs as a task of DSP/BIOS and is subservient to commands
++and queries from the GPP. It executes commands to start and stop DSP signal
++processing nodes in response to GPP programs making requests through the
++(GPP-side) API.
++
++DSP tasks started by the RM Server are similar to any other DSP task with two
++important differences: they must follow a specific task model consisting of
++three C-callable functions (node create, execute, and delete), with specific
++sets of arguments, and they have a pre-defined task environment established
++by the RM Server.
++
++Tasks started by the RM Server communicate using the STRM and NODE interfaces
++and act as servers for their corresponding GPP clients, performing signal
++processing functions as requested by messages sent by their GPP client.
++Typically, a DSP task moves data from source devices to sink devices using
++device independent I/O streams, performing application-specific processing
++and transformations on the data while it is moved. For example, an audio
++task might perform audio decompression (ADPCM, MPEG, CELP) on data received
++from a GPP audio driver and then send the decompressed linear samples to a
++digital-to-analog converter.
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/Documentation/error-codes
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/Documentation/error-codes 2010-08-18 11:24:23.150060763 +0300
+@@ -0,0 +1,157 @@
++ DSP/Bridge Error Code Guide
++
++
++Success code is always taken as 0, except for one case where a success status
++different than 0 can be possible, this is when enumerating a series of dsp
++objects, if the enumeration doesn't have any more objects it is considered as a
++successful case. In this case a positive ENODATA is returned (TODO: Change to
++avoid this case).
++
++Error codes are returned as a negative 1, if an specific code is expected, it
++can be propagated to user space by reading errno symbol defined in errno.h, for
++specific details on the implementation a copy of the standard used should be
++read first.
++
++The error codes used by this driver are:
++
++[EPERM]
++ General driver failure.
++
++ According to the use case the following might apply:
++ - Device is in 'sleep/suspend' mode due to DPM.
++ - User cannot mark end of stream on an input channel.
++ - Requested operation is invalid for the node type.
++ - Invalid alignment for the node messaging buffer.
++ - The specified direction is invalid for the stream.
++ - Invalid stream mode.
++
++[ENOENT]
++ The specified object or file was not found.
++
++[ESRCH]
++ A shared memory buffer contained in a message or stream could not be mapped
++ to the GPP client process's virtual space.
++
++[EIO]
++ Driver interface I/O error.
++
++ or:
++ - Unable to plug channel ISR for configured IRQ.
++ - No free I/O request packets are available.
++
++[ENXIO]
++ Unable to find a named section in DSP executable or a non-existent memory
++ segment identifier was specified.
++
++[EBADF]
++ General error for file handling:
++
++ - Unable to open file.
++ - Unable to read file.
++ - An error occurred while parsing the DSP executable file.
++
++[ENOMEM]
++ A memory allocation failure occurred.
++
++[EACCES]
++ - Unable to read content of DCD data section; this is typically caused by
++ improperly configured nodes.
++ - Unable to decode DCD data section content; this is typically caused by
++ changes to DSP/BIOS Bridge data structures.
++ - Unable to get pointer to DCD data section; this is typically caused by
++ improperly configured UUIDs.
++ - Unable to load file containing DCD data section; this is typically
++ caused by a missing COFF file.
++ - The specified COFF file does not contain a valid node registration
++ section.
++
++[EFAULT]
++ Invalid pointer or handler.
++
++[EEXIST]
++ Attempted to create a channel manager when one already exists.
++
++[EINVAL]
++ Invalid argument.
++
++[ESPIPE]
++ Symbol not found in the COFF file. DSPNode_Create will return this if
++ the iAlg function table for an xDAIS socket is not found in the COFF file.
++ In this case, force the symbol to be linked into the COFF file.
++ DSPNode_Create, DSPNode_Execute, and DSPNode_Delete will return this if
++ the create, execute, or delete phase function, respectively, could not be
++ found in the COFF file.
++
++ - No symbol table is loaded/found for this board.
++ - Unable to initialize the ZL COFF parsing module.
++
++[EPIPE]
++ I/O is currently pending.
++
++ - End of stream was already requested on this output channel.
++
++[EDOM]
++ A parameter is specified outside its valid range.
++
++[ENOSYS]
++ The indicated operation is not supported.
++
++[EIDRM]
++ During enumeration a change in the number or properties of the objects
++ has occurred.
++
++[ECHRNG]
++ Attempt to created channel manager with too many channels or channel ID out
++ of range.
++
++[EBADR]
++ The state of the specified object is incorrect for the requested operation.
++
++ - Invalid segment ID.
++
++[ENODATA]
++ Unable to retrieve resource information from the registry.
++
++ - No more registry values.
++
++[ETIME]
++ A timeout occurred before the requested operation could complete.
++
++[ENOSR]
++ A stream has been issued the maximum number of buffers allowed in the
++ stream at once; buffers must be reclaimed from the stream before any more
++ can be issued.
++
++ - No free channels are available.
++
++[EILSEQ]
++ Error occurred in a dynamic loader library function.
++
++[EISCONN]
++ The Specified Connection already exists.
++
++[ENOTCONN]
++ Nodes not connected.
++
++[ETIMEDOUT]
++ Timeout occurred waiting for a response from the hardware.
++
++ - Wait for flush operation on an output channel timed out.
++
++[ECONNREFUSED]
++ No more connections can be made for this node.
++
++[EALREADY]
++ Channel is already in use.
++
++[EREMOTEIO]
++ dwTimeOut parameter was CHNL_IOCNOWAIT, yet no I/O completions were
++ queued.
++
++[ECANCELED]
++ I/O has been cancelled on this channel.
++
++[ENOKEY]
++ Invalid subkey parameter.
++
++ - UUID not found in registry.
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/Kconfig
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/Kconfig 2010-08-18 11:24:23.150060763 +0300
+@@ -0,0 +1,90 @@
++#
++# DSP Bridge Driver Support
++#
++
++menuconfig TIDSPBRIDGE
++ tristate "DSP Bridge driver"
++ depends on ARCH_OMAP3
++ select OMAP_MBOX_FWK
++ help
++ DSP/BIOS Bridge is designed for platforms that contain a GPP and
++ one or more attached DSPs. The GPP is considered the master or
++ "host" processor, and the attached DSPs are processing resources
++ that can be utilized by applications and drivers running on the GPP.
++
++ This driver depends on OMAP Mailbox (OMAP_MBOX_FWK).
++
++config TIDSPBRIDGE_DVFS
++ bool "Enable Bridge Dynamic Voltage and Frequency Scaling (DVFS)"
++ depends on TIDSPBRIDGE && OMAP_PM_SRF && CPU_FREQ
++ help
++ DVFS allows DSP Bridge to initiate the operating point change to
++ scale the chip voltage and frequency in order to match the
++ performance and power consumption to the current processing
++ requirements.
++
++config TIDSPBRIDGE_MEMPOOL_SIZE
++ hex "Physical memory pool size (Byte)"
++ depends on TIDSPBRIDGE
++ default 0x600000
++ help
++ Allocate specified size of memory at booting time to avoid allocation
++ failure under heavy memory fragmentation after some use time.
++
++config TIDSPBRIDGE_DEBUG
++ bool "Debug Support"
++ depends on TIDSPBRIDGE
++ help
++ Say Y to enable Bridge debugging capabilities
++
++config TIDSPBRIDGE_RECOVERY
++ bool "Recovery Support"
++ depends on TIDSPBRIDGE
++ default y
++ help
++ In case of DSP fatal error, BRIDGE driver will try to
++ recover itself.
++
++config TIDSPBRIDGE_CACHE_LINE_CHECK
++ bool "Check buffers to be 128 byte aligned"
++ depends on TIDSPBRIDGE
++ help
++ When the DSP processes data, the DSP cache controller loads 128-Byte
++ chunks (lines) from SDRAM and writes the data back in 128-Byte chunks.
++ If a DMM buffer does not start and end on a 128-Byte boundary, the data
++ preceding the start address (SA) from the 128-Byte boundary to the SA
++ and the data at addresses trailing the end address (EA) from the EA to
++ the next 128-Byte boundary will be loaded and written back as well.
++ This can lead to heap corruption. Say Y, to enforce the check for 128
++ byte alignment, buffers failing this check will be rejected.
++
++config TIDSPBRIDGE_WDT3
++ bool "Enable watchdog timer"
++ depends on TIDSPBRIDGE
++ help
++ WTD3 is managed by DSP and once it is enabled, DSP side bridge is in
++ charge of refreshing the timer before overflow, if the DSP hangs MPU
++ will caught the interrupt and try to recover DSP.
++
++config TIDSPBRIDGE_WDT_TIMEOUT
++ int "Watchdog timer timeout (in secs)"
++ depends on TIDSPBRIDGE && TIDSPBRIDGE_WDT3
++ default 5
++ help
++ Watchdog timer timeout value, after that time if the watchdog timer
++ counter is not reset the wdt overflow interrupt will be triggered
++
++config TIDSPBRIDGE_NTFY_PWRERR
++ bool "Notify power errors"
++ depends on TIDSPBRIDGE
++ help
++ Enable notifications to registered clients on the event of power errror
++ trying to suspend bridge driver. Say Y, to signal this event as a fatal
++ error, this will require a bridge restart to recover.
++
++config TIDSPBRIDGE_BACKTRACE
++ bool "Dump backtraces on fatal errors"
++ depends on TIDSPBRIDGE
++ help
++ Enable useful information to backtrace fatal errors. Say Y if you
++ want to dump information for testing purposes.
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/Makefile
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/Makefile 2010-08-18 11:24:23.150060763 +0300
+@@ -0,0 +1,34 @@
++obj-$(CONFIG_TIDSPBRIDGE) += bridgedriver.o
++
++libgen = gen/gb.o gen/gs.o gen/gh.o gen/uuidutil.o
++libservices = services/sync.o services/cfg.o \
++ services/ntfy.o services/services.o
++libcore = core/chnl_sm.o core/msg_sm.o core/io_sm.o core/tiomap3430.o \
++ core/tiomap3430_pwr.o core/tiomap_io.o \
++ core/ue_deh.o core/wdt.o core/dsp-clock.o
++libpmgr = pmgr/chnl.o pmgr/io.o pmgr/msg.o pmgr/cod.o pmgr/dev.o pmgr/dspapi.o \
++ pmgr/dmm.o pmgr/cmm.o pmgr/dbll.o
++librmgr = rmgr/dbdcd.o rmgr/disp.o rmgr/drv.o rmgr/mgr.o rmgr/node.o \
++ rmgr/proc.o rmgr/pwr.o rmgr/rmm.o rmgr/strm.o rmgr/dspdrv.o \
++ rmgr/nldr.o rmgr/drv_interface.o
++libdload = dynload/cload.o dynload/getsection.o dynload/reloc.o \
++ dynload/tramp.o
++libhw = hw/hw_mmu.o
++
++bridgedriver-objs = $(libgen) $(libservices) $(libcore) $(libpmgr) $(librmgr) \
++ $(libdload) $(libhw)
++
++#Machine dependent
++ccflags-y += -D_TI_ -D_DB_TIOMAP -DTMS32060 \
++ -DTICFG_PROC_VER -DTICFG_EVM_TYPE -DCHNL_SMCLASS \
++ -DCHNL_MESSAGES -DUSE_LEVEL_1_MACROS
++
++ccflags-y += -Idrivers/staging/tidspbridge/include
++ccflags-y += -Idrivers/staging/tidspbridge/services
++ccflags-y += -Idrivers/staging/tidspbridge/core
++ccflags-y += -Idrivers/staging/tidspbridge/pmgr
++ccflags-y += -Idrivers/staging/tidspbridge/rmgr
++ccflags-y += -Idrivers/staging/tidspbridge/dynload
++ccflags-y += -Idrivers/staging/tidspbridge/hw
++ccflags-y += -Iarch/arm
++
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/TODO
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/TODO 2010-08-18 11:28:46.111073340 +0300
+@@ -0,0 +1,18 @@
++* Migrate to (and if necessary, extend) existing upstream code such as
++ iommu, wdt, mcbsp, gptimers
++* Decouple hardware-specific code (e.g. bridge_brd_start/stop/delete/monitor)
++* DOFF binary loader: consider pushing to user space. at the very least
++ eliminate the direct filesystem access
++* Eliminate general services and libraries - use or extend existing kernel
++ libraries instead (e.g. gcf/lcm in nldr.c, global helpers in gen/)
++* Eliminate direct manipulation of OMAP_SYSC_BASE
++* Eliminate list.h : seem like a redundant wrapper to existing kernel lists
++* Eliminate DSP_SUCCEEDED macros and their imposed redundant indentations
++ (adopt the kernel way of checking for return values)
++* Audit interfaces exposed to user space
++* Audit and clean up header files folder
++* Use kernel coding style
++* checkpatch.pl fixes
++
++Please send any patches to Greg Kroah-Hartman <greg@kroah.com>
++and Omar Ramirez Luna <omar.ramirez@ti.com>.
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/core/_cmm.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/core/_cmm.h 2010-08-18 11:24:23.150060763 +0300
+@@ -0,0 +1,45 @@
++/*
++ * _cmm.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Private header file defining CMM manager objects and defines needed
++ * by IO manager to register shared memory regions when DSP base image
++ * is loaded(bridge_io_on_loaded).
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef _CMM_
++#define _CMM_
++
++/*
++ * These target side symbols define the beginning and ending addresses
++ * of the section of shared memory used for shared memory manager CMM.
++ * They are defined in the *cfg.cmd file by cdb code.
++ */
++#define SHM0_SHARED_BASE_SYM "_SHM0_BEG"
++#define SHM0_SHARED_END_SYM "_SHM0_END"
++#define SHM0_SHARED_RESERVED_BASE_SYM "_SHM0_RSVDSTRT"
++
++/*
++ * Shared Memory Region #0(SHMSEG0) is used in the following way:
++ *
++ * |(_SHM0_BEG) | (_SHM0_RSVDSTRT) | (_SHM0_END)
++ * V V V
++ * ------------------------------------------------------------
++ * | DSP-side allocations | GPP-side allocations |
++ * ------------------------------------------------------------
++ *
++ *
++ */
++
++#endif /* _CMM_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/core/_deh.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/core/_deh.h 2010-08-18 11:24:23.154051495 +0300
+@@ -0,0 +1,35 @@
++/*
++ * _deh.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Private header for DEH module.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ * Copyright (C) 2010 Felipe Contreras
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef _DEH_
++#define _DEH_
++
++#include <dspbridge/ntfy.h>
++#include <dspbridge/dspdefs.h>
++
++/* DEH Manager: only one created per board: */
++struct deh_mgr {
++ struct bridge_dev_context *hbridge_context; /* Bridge context. */
++ struct ntfy_object *ntfy_obj; /* NTFY object */
++
++ /* MMU Fault DPC */
++ struct tasklet_struct dpc_tasklet;
++};
++
++#endif /* _DEH_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/core/_msg_sm.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/core/_msg_sm.h 2010-08-18 11:24:23.154051495 +0300
+@@ -0,0 +1,142 @@
++/*
++ * _msg_sm.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Private header file defining msg_ctrl manager objects and defines needed
++ * by IO manager.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef _MSG_SM_
++#define _MSG_SM_
++
++#include <dspbridge/list.h>
++#include <dspbridge/msgdefs.h>
++
++/*
++ * These target side symbols define the beginning and ending addresses
++ * of the section of shared memory used for messages. They are
++ * defined in the *cfg.cmd file by cdb code.
++ */
++#define MSG_SHARED_BUFFER_BASE_SYM "_MSG_BEG"
++#define MSG_SHARED_BUFFER_LIMIT_SYM "_MSG_END"
++
++#ifndef _CHNL_WORDSIZE
++#define _CHNL_WORDSIZE 4 /* default _CHNL_WORDSIZE is 2 bytes/word */
++#endif
++
++/*
++ * ======== msg_ctrl ========
++ * There is a control structure for messages to the DSP, and a control
++ * structure for messages from the DSP. The shared memory region for
++ * transferring messages is partitioned as follows:
++ *
++ * ----------------------------------------------------------
++ * |Control | Messages from DSP | Control | Messages to DSP |
++ * ----------------------------------------------------------
++ *
++ * msg_ctrl control structure for messages to the DSP is used in the following
++ * way:
++ *
++ * buf_empty - This flag is set to FALSE by the GPP after it has output
++ * messages for the DSP. The DSP host driver sets it to
++ * TRUE after it has copied the messages.
++ * post_swi - Set to 1 by the GPP after it has written the messages,
++ * set the size, and set buf_empty to FALSE.
++ * The DSP Host driver uses SWI_andn of the post_swi field
++ * when a host interrupt occurs. The host driver clears
++ * this after posting the SWI.
++ * size - Number of messages to be read by the DSP.
++ *
++ * For messages from the DSP:
++ * buf_empty - This flag is set to FALSE by the DSP after it has output
++ * messages for the GPP. The DPC on the GPP sets it to
++ * TRUE after it has copied the messages.
++ * post_swi - Set to 1 the DPC on the GPP after copying the messages.
++ * size - Number of messages to be read by the GPP.
++ */
++struct msg_ctrl {
++ u32 buf_empty; /* to/from DSP buffer is empty */
++ u32 post_swi; /* Set to "1" to post msg_ctrl SWI */
++ u32 size; /* Number of messages to/from the DSP */
++ u32 resvd;
++};
++
++/*
++ * ======== msg_mgr ========
++ * The msg_mgr maintains a list of all MSG_QUEUEs. Each NODE object can
++ * have msg_queue to hold all messages that come up from the corresponding
++ * node on the DSP. The msg_mgr also has a shared queue of messages
++ * ready to go to the DSP.
++ */
++struct msg_mgr {
++ /* The first field must match that in msgobj.h */
++
++ /* Function interface to Bridge driver */
++ struct bridge_drv_interface *intf_fxns;
++
++ struct io_mgr *hio_mgr; /* IO manager */
++ struct lst_list *queue_list; /* List of MSG_QUEUEs */
++ spinlock_t msg_mgr_lock; /* For critical sections */
++ /* Signalled when MsgFrame is available */
++ struct sync_object *sync_event;
++ struct lst_list *msg_free_list; /* Free MsgFrames ready to be filled */
++ struct lst_list *msg_used_list; /* MsgFrames ready to go to DSP */
++ u32 msgs_pending; /* # of queued messages to go to DSP */
++ u32 max_msgs; /* Max # of msgs that fit in buffer */
++ msg_onexit on_exit; /* called when RMS_EXIT is received */
++};
++
++/*
++ * ======== msg_queue ========
++ * Each NODE has a msg_queue for receiving messages from the
++ * corresponding node on the DSP. The msg_queue object maintains a list
++ * of messages that have been sent to the host, but not yet read (MSG_Get),
++ * and a list of free frames that can be filled when new messages arrive
++ * from the DSP.
++ * The msg_queue's hSynEvent gets posted when a message is ready.
++ */
++struct msg_queue {
++ struct list_head list_elem;
++ struct msg_mgr *hmsg_mgr;
++ u32 max_msgs; /* Node message depth */
++ u32 msgq_id; /* Node environment pointer */
++ struct lst_list *msg_free_list; /* Free MsgFrames ready to be filled */
++ /* Filled MsgFramess waiting to be read */
++ struct lst_list *msg_used_list;
++ void *arg; /* Handle passed to mgr on_exit callback */
++ struct sync_object *sync_event; /* Signalled when message is ready */
++ struct sync_object *sync_done; /* For synchronizing cleanup */
++ struct sync_object *sync_done_ack; /* For synchronizing cleanup */
++ struct ntfy_object *ntfy_obj; /* For notification of message ready */
++ bool done; /* TRUE <==> deleting the object */
++ u32 io_msg_pend; /* Number of pending MSG_get/put calls */
++};
++
++/*
++ * ======== msg_dspmsg ========
++ */
++struct msg_dspmsg {
++ struct dsp_msg msg;
++ u32 msgq_id; /* Identifies the node the message goes to */
++};
++
++/*
++ * ======== msg_frame ========
++ */
++struct msg_frame {
++ struct list_head list_elem;
++ struct msg_dspmsg msg_data;
++};
++
++#endif /* _MSG_SM_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/core/_tiomap.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/core/_tiomap.h 2010-08-18 11:24:23.154051495 +0300
+@@ -0,0 +1,371 @@
++/*
++ * _tiomap.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Definitions and types private to this Bridge driver.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef _TIOMAP_
++#define _TIOMAP_
++
++#include <plat/powerdomain.h>
++#include <plat/clockdomain.h>
++#include <mach-omap2/prm-regbits-34xx.h>
++#include <mach-omap2/cm-regbits-34xx.h>
++#include <dspbridge/devdefs.h>
++#include <hw_defs.h>
++#include <dspbridge/dspioctl.h> /* for bridge_ioctl_extproc defn */
++#include <dspbridge/sync.h>
++#include <dspbridge/clk.h>
++
++struct map_l4_peripheral {
++ u32 phys_addr;
++ u32 dsp_virt_addr;
++};
++
++#define ARM_MAILBOX_START 0xfffcf000
++#define ARM_MAILBOX_LENGTH 0x800
++
++/* New Registers in OMAP3.1 */
++
++#define TESTBLOCK_ID_START 0xfffed400
++#define TESTBLOCK_ID_LENGTH 0xff
++
++/* ID Returned by OMAP1510 */
++#define TBC_ID_VALUE 0xB47002F
++
++#define SPACE_LENGTH 0x2000
++#define API_CLKM_DPLL_DMA 0xfffec000
++#define ARM_INTERRUPT_OFFSET 0xb00
++
++#define BIOS24XX
++
++#define L4_PERIPHERAL_NULL 0x0
++#define DSPVA_PERIPHERAL_NULL 0x0
++
++#define MAX_LOCK_TLB_ENTRIES 15
++
++#define L4_PERIPHERAL_PRM 0x48306000 /*PRM L4 Peripheral */
++#define DSPVA_PERIPHERAL_PRM 0x1181e000
++#define L4_PERIPHERAL_SCM 0x48002000 /*SCM L4 Peripheral */
++#define DSPVA_PERIPHERAL_SCM 0x1181f000
++#define L4_PERIPHERAL_MMU 0x5D000000 /*MMU L4 Peripheral */
++#define DSPVA_PERIPHERAL_MMU 0x11820000
++#define L4_PERIPHERAL_CM 0x48004000 /* Core L4, Clock Management */
++#define DSPVA_PERIPHERAL_CM 0x1181c000
++#define L4_PERIPHERAL_PER 0x48005000 /* PER */
++#define DSPVA_PERIPHERAL_PER 0x1181d000
++
++#define L4_PERIPHERAL_GPIO1 0x48310000
++#define DSPVA_PERIPHERAL_GPIO1 0x11809000
++#define L4_PERIPHERAL_GPIO2 0x49050000
++#define DSPVA_PERIPHERAL_GPIO2 0x1180a000
++#define L4_PERIPHERAL_GPIO3 0x49052000
++#define DSPVA_PERIPHERAL_GPIO3 0x1180b000
++#define L4_PERIPHERAL_GPIO4 0x49054000
++#define DSPVA_PERIPHERAL_GPIO4 0x1180c000
++#define L4_PERIPHERAL_GPIO5 0x49056000
++#define DSPVA_PERIPHERAL_GPIO5 0x1180d000
++
++#define L4_PERIPHERAL_IVA2WDT 0x49030000
++#define DSPVA_PERIPHERAL_IVA2WDT 0x1180e000
++
++#define L4_PERIPHERAL_DISPLAY 0x48050000
++#define DSPVA_PERIPHERAL_DISPLAY 0x1180f000
++
++#define L4_PERIPHERAL_SSI 0x48058000
++#define DSPVA_PERIPHERAL_SSI 0x11804000
++#define L4_PERIPHERAL_GDD 0x48059000
++#define DSPVA_PERIPHERAL_GDD 0x11805000
++#define L4_PERIPHERAL_SS1 0x4805a000
++#define DSPVA_PERIPHERAL_SS1 0x11806000
++#define L4_PERIPHERAL_SS2 0x4805b000
++#define DSPVA_PERIPHERAL_SS2 0x11807000
++
++#define L4_PERIPHERAL_CAMERA 0x480BC000
++#define DSPVA_PERIPHERAL_CAMERA 0x11819000
++
++#define L4_PERIPHERAL_SDMA 0x48056000
++#define DSPVA_PERIPHERAL_SDMA 0x11810000 /* 0x1181d000 conflict w/ PER */
++
++#define L4_PERIPHERAL_UART1 0x4806a000
++#define DSPVA_PERIPHERAL_UART1 0x11811000
++#define L4_PERIPHERAL_UART2 0x4806c000
++#define DSPVA_PERIPHERAL_UART2 0x11812000
++#define L4_PERIPHERAL_UART3 0x49020000
++#define DSPVA_PERIPHERAL_UART3 0x11813000
++
++#define L4_PERIPHERAL_MCBSP1 0x48074000
++#define DSPVA_PERIPHERAL_MCBSP1 0x11814000
++#define L4_PERIPHERAL_MCBSP2 0x49022000
++#define DSPVA_PERIPHERAL_MCBSP2 0x11815000
++#define L4_PERIPHERAL_MCBSP3 0x49024000
++#define DSPVA_PERIPHERAL_MCBSP3 0x11816000
++#define L4_PERIPHERAL_MCBSP4 0x49026000
++#define DSPVA_PERIPHERAL_MCBSP4 0x11817000
++#define L4_PERIPHERAL_MCBSP5 0x48096000
++#define DSPVA_PERIPHERAL_MCBSP5 0x11818000
++
++#define L4_PERIPHERAL_GPTIMER5 0x49038000
++#define DSPVA_PERIPHERAL_GPTIMER5 0x11800000
++#define L4_PERIPHERAL_GPTIMER6 0x4903a000
++#define DSPVA_PERIPHERAL_GPTIMER6 0x11801000
++#define L4_PERIPHERAL_GPTIMER7 0x4903c000
++#define DSPVA_PERIPHERAL_GPTIMER7 0x11802000
++#define L4_PERIPHERAL_GPTIMER8 0x4903e000
++#define DSPVA_PERIPHERAL_GPTIMER8 0x11803000
++
++#define L4_PERIPHERAL_SPI1 0x48098000
++#define DSPVA_PERIPHERAL_SPI1 0x1181a000
++#define L4_PERIPHERAL_SPI2 0x4809a000
++#define DSPVA_PERIPHERAL_SPI2 0x1181b000
++
++#define L4_PERIPHERAL_MBOX 0x48094000
++#define DSPVA_PERIPHERAL_MBOX 0x11808000
++
++#define PM_GRPSEL_BASE 0x48307000
++#define DSPVA_GRPSEL_BASE 0x11821000
++
++#define L4_PERIPHERAL_SIDETONE_MCBSP2 0x49028000
++#define DSPVA_PERIPHERAL_SIDETONE_MCBSP2 0x11824000
++#define L4_PERIPHERAL_SIDETONE_MCBSP3 0x4902a000
++#define DSPVA_PERIPHERAL_SIDETONE_MCBSP3 0x11825000
++
++/* define a static array with L4 mappings */
++static const struct map_l4_peripheral l4_peripheral_table[] = {
++ {L4_PERIPHERAL_MBOX, DSPVA_PERIPHERAL_MBOX},
++ {L4_PERIPHERAL_SCM, DSPVA_PERIPHERAL_SCM},
++ {L4_PERIPHERAL_MMU, DSPVA_PERIPHERAL_MMU},
++ {L4_PERIPHERAL_GPTIMER5, DSPVA_PERIPHERAL_GPTIMER5},
++ {L4_PERIPHERAL_GPTIMER6, DSPVA_PERIPHERAL_GPTIMER6},
++ {L4_PERIPHERAL_GPTIMER7, DSPVA_PERIPHERAL_GPTIMER7},
++ {L4_PERIPHERAL_GPTIMER8, DSPVA_PERIPHERAL_GPTIMER8},
++ {L4_PERIPHERAL_GPIO1, DSPVA_PERIPHERAL_GPIO1},
++ {L4_PERIPHERAL_GPIO2, DSPVA_PERIPHERAL_GPIO2},
++ {L4_PERIPHERAL_GPIO3, DSPVA_PERIPHERAL_GPIO3},
++ {L4_PERIPHERAL_GPIO4, DSPVA_PERIPHERAL_GPIO4},
++ {L4_PERIPHERAL_GPIO5, DSPVA_PERIPHERAL_GPIO5},
++ {L4_PERIPHERAL_IVA2WDT, DSPVA_PERIPHERAL_IVA2WDT},
++ {L4_PERIPHERAL_DISPLAY, DSPVA_PERIPHERAL_DISPLAY},
++ {L4_PERIPHERAL_SSI, DSPVA_PERIPHERAL_SSI},
++ {L4_PERIPHERAL_GDD, DSPVA_PERIPHERAL_GDD},
++ {L4_PERIPHERAL_SS1, DSPVA_PERIPHERAL_SS1},
++ {L4_PERIPHERAL_SS2, DSPVA_PERIPHERAL_SS2},
++ {L4_PERIPHERAL_UART1, DSPVA_PERIPHERAL_UART1},
++ {L4_PERIPHERAL_UART2, DSPVA_PERIPHERAL_UART2},
++ {L4_PERIPHERAL_UART3, DSPVA_PERIPHERAL_UART3},
++ {L4_PERIPHERAL_MCBSP1, DSPVA_PERIPHERAL_MCBSP1},
++ {L4_PERIPHERAL_MCBSP2, DSPVA_PERIPHERAL_MCBSP2},
++ {L4_PERIPHERAL_MCBSP3, DSPVA_PERIPHERAL_MCBSP3},
++ {L4_PERIPHERAL_MCBSP4, DSPVA_PERIPHERAL_MCBSP4},
++ {L4_PERIPHERAL_MCBSP5, DSPVA_PERIPHERAL_MCBSP5},
++ {L4_PERIPHERAL_CAMERA, DSPVA_PERIPHERAL_CAMERA},
++ {L4_PERIPHERAL_SPI1, DSPVA_PERIPHERAL_SPI1},
++ {L4_PERIPHERAL_SPI2, DSPVA_PERIPHERAL_SPI2},
++ {L4_PERIPHERAL_PRM, DSPVA_PERIPHERAL_PRM},
++ {L4_PERIPHERAL_CM, DSPVA_PERIPHERAL_CM},
++ {L4_PERIPHERAL_PER, DSPVA_PERIPHERAL_PER},
++ {PM_GRPSEL_BASE, DSPVA_GRPSEL_BASE},
++ {L4_PERIPHERAL_SIDETONE_MCBSP2, DSPVA_PERIPHERAL_SIDETONE_MCBSP2},
++ {L4_PERIPHERAL_SIDETONE_MCBSP3, DSPVA_PERIPHERAL_SIDETONE_MCBSP3},
++ {L4_PERIPHERAL_NULL, DSPVA_PERIPHERAL_NULL}
++};
++
++/*
++ * 15 10 0
++ * ---------------------------------
++ * |0|0|1|0|0|0|c|c|c|i|i|i|i|i|i|i|
++ * ---------------------------------
++ * | (class) | (module specific) |
++ *
++ * where c -> Externel Clock Command: Clk & Autoidle Disable/Enable
++ * i -> External Clock ID Timers 5,6,7,8, McBSP1,2 and WDT3
++ */
++
++/* MBX_PM_CLK_IDMASK: DSP External clock id mask. */
++#define MBX_PM_CLK_IDMASK 0x7F
++
++/* MBX_PM_CLK_CMDSHIFT: DSP External clock command shift. */
++#define MBX_PM_CLK_CMDSHIFT 7
++
++/* MBX_PM_CLK_CMDMASK: DSP External clock command mask. */
++#define MBX_PM_CLK_CMDMASK 7
++
++/* MBX_PM_MAX_RESOURCES: CORE 1 Clock resources. */
++#define MBX_CORE1_RESOURCES 7
++
++/* MBX_PM_MAX_RESOURCES: CORE 2 Clock Resources. */
++#define MBX_CORE2_RESOURCES 1
++
++/* MBX_PM_MAX_RESOURCES: TOTAL Clock Reosurces. */
++#define MBX_PM_MAX_RESOURCES 11
++
++/* Power Management Commands */
++#define BPWR_DISABLE_CLOCK 0
++#define BPWR_ENABLE_CLOCK 1
++
++/* OMAP242x specific resources */
++enum bpwr_ext_clock_id {
++ BPWR_GP_TIMER5 = 0x10,
++ BPWR_GP_TIMER6,
++ BPWR_GP_TIMER7,
++ BPWR_GP_TIMER8,
++ BPWR_WD_TIMER3,
++ BPWR_MCBSP1,
++ BPWR_MCBSP2,
++ BPWR_MCBSP3,
++ BPWR_MCBSP4,
++ BPWR_MCBSP5,
++ BPWR_SSI = 0x20
++};
++
++static const u32 bpwr_clkid[] = {
++ (u32) BPWR_GP_TIMER5,
++ (u32) BPWR_GP_TIMER6,
++ (u32) BPWR_GP_TIMER7,
++ (u32) BPWR_GP_TIMER8,
++ (u32) BPWR_WD_TIMER3,
++ (u32) BPWR_MCBSP1,
++ (u32) BPWR_MCBSP2,
++ (u32) BPWR_MCBSP3,
++ (u32) BPWR_MCBSP4,
++ (u32) BPWR_MCBSP5,
++ (u32) BPWR_SSI
++};
++
++struct bpwr_clk_t {
++ u32 clk_id;
++ enum dsp_clk_id clk;
++};
++
++static const struct bpwr_clk_t bpwr_clks[] = {
++ {(u32) BPWR_GP_TIMER5, DSP_CLK_GPT5},
++ {(u32) BPWR_GP_TIMER6, DSP_CLK_GPT6},
++ {(u32) BPWR_GP_TIMER7, DSP_CLK_GPT7},
++ {(u32) BPWR_GP_TIMER8, DSP_CLK_GPT8},
++ {(u32) BPWR_WD_TIMER3, DSP_CLK_WDT3},
++ {(u32) BPWR_MCBSP1, DSP_CLK_MCBSP1},
++ {(u32) BPWR_MCBSP2, DSP_CLK_MCBSP2},
++ {(u32) BPWR_MCBSP3, DSP_CLK_MCBSP3},
++ {(u32) BPWR_MCBSP4, DSP_CLK_MCBSP4},
++ {(u32) BPWR_MCBSP5, DSP_CLK_MCBSP5},
++ {(u32) BPWR_SSI, DSP_CLK_SSI}
++};
++
++/* Interrupt Register Offsets */
++#define INTH_IT_REG_OFFSET 0x00 /* Interrupt register offset */
++#define INTH_MASK_IT_REG_OFFSET 0x04 /* Mask Interrupt reg offset */
++
++#define DSP_MAILBOX1_INT 10
++/*
++ * Bit definition of Interrupt Level Registers
++ */
++
++/* Mail Box defines */
++#define MB_ARM2DSP1_REG_OFFSET 0x00
++
++#define MB_ARM2DSP1B_REG_OFFSET 0x04
++
++#define MB_DSP2ARM1B_REG_OFFSET 0x0C
++
++#define MB_ARM2DSP1_FLAG_REG_OFFSET 0x18
++
++#define MB_ARM2DSP_FLAG 0x0001
++
++#define MBOX_ARM2DSP HW_MBOX_ID0
++#define MBOX_DSP2ARM HW_MBOX_ID1
++#define MBOX_ARM HW_MBOX_U0_ARM
++#define MBOX_DSP HW_MBOX_U1_DSP1
++
++#define ENABLE true
++#define DISABLE false
++
++#define HIGH_LEVEL true
++#define LOW_LEVEL false
++
++/* Macro's */
++#define CLEAR_BIT(reg, mask) (reg &= ~mask)
++#define SET_BIT(reg, mask) (reg |= mask)
++
++#define SET_GROUP_BITS16(reg, position, width, value) \
++ do {\
++ reg &= ~((0xFFFF >> (16 - (width))) << (position)) ; \
++ reg |= ((value & (0xFFFF >> (16 - (width)))) << (position)); \
++ } while (0);
++
++#define CLEAR_BIT_INDEX(reg, index) (reg &= ~(1 << (index)))
++
++/* This Bridge driver's device context: */
++struct bridge_dev_context {
++ struct dev_object *hdev_obj; /* Handle to Bridge device object. */
++ u32 dw_dsp_base_addr; /* Arm's API to DSP virt base addr */
++ /*
++ * DSP External memory prog address as seen virtually by the OS on
++ * the host side.
++ */
++ u32 dw_dsp_ext_base_addr; /* See the comment above */
++ u32 dw_api_reg_base; /* API mem map'd registers */
++ void __iomem *dw_dsp_mmu_base; /* DSP MMU Mapped registers */
++ u32 dw_api_clk_base; /* CLK Registers */
++ u32 dw_dsp_clk_m2_base; /* DSP Clock Module m2 */
++ u32 dw_public_rhea; /* Pub Rhea */
++ u32 dw_int_addr; /* MB INTR reg */
++ u32 dw_tc_endianism; /* TC Endianism register */
++ u32 dw_test_base; /* DSP MMU Mapped registers */
++ u32 dw_self_loop; /* Pointer to the selfloop */
++ u32 dw_dsp_start_add; /* API Boot vector */
++ u32 dw_internal_size; /* Internal memory size */
++
++ struct omap_mbox *mbox; /* Mail box handle */
++
++ struct cfg_hostres *resources; /* Host Resources */
++
++ /*
++ * Processor specific info is set when prog loaded and read from DCD.
++ * [See bridge_dev_ctrl()] PROC info contains DSP-MMU TLB entries.
++ */
++ /* DMMU TLB entries */
++ struct bridge_ioctl_extproc atlb_entry[BRDIOCTL_NUMOFMMUTLB];
++ u32 dw_brd_state; /* Last known board state. */
++
++ /* TC Settings */
++ bool tc_word_swap_on; /* Traffic Controller Word Swap */
++ struct pg_table_attrs *pt_attrs;
++ u32 dsp_per_clks;
++};
++
++/*
++ * If dsp_debug is true, do not branch to the DSP entry
++ * point and wait for DSP to boot.
++ */
++extern s32 dsp_debug;
++
++/*
++ * ======== sm_interrupt_dsp ========
++ * Purpose:
++ * Set interrupt value & send an interrupt to the DSP processor(s).
++ * This is typicaly used when mailbox interrupt mechanisms allow data
++ * to be associated with interrupt such as for OMAP's CMD/DATA regs.
++ * Parameters:
++ * dev_context: Handle to Bridge driver defined device info.
++ * mb_val: Value associated with interrupt(e.g. mailbox value).
++ * Returns:
++ * 0: Interrupt sent;
++ * else: Unable to send interrupt.
++ * Requires:
++ * Ensures:
++ */
++int sm_interrupt_dsp(struct bridge_dev_context *dev_context, u16 mb_val);
++
++#endif /* _TIOMAP_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/core/_tiomap_pwr.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/core/_tiomap_pwr.h 2010-08-18 11:24:23.154051495 +0300
+@@ -0,0 +1,85 @@
++/*
++ * _tiomap_pwr.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Definitions and types for the DSP wake/sleep routines.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef _TIOMAP_PWR_
++#define _TIOMAP_PWR_
++
++#ifdef CONFIG_PM
++extern s32 dsp_test_sleepstate;
++#endif
++
++extern struct mailbox_context mboxsetting;
++
++/*
++ * ======== wake_dsp =========
++ * Wakes up the DSP from DeepSleep
++ */
++extern int wake_dsp(struct bridge_dev_context *dev_context,
++ void *pargs);
++
++/*
++ * ======== sleep_dsp =========
++ * Places the DSP in DeepSleep.
++ */
++extern int sleep_dsp(struct bridge_dev_context *dev_context,
++ u32 dw_cmd, void *pargs);
++/*
++ * ========interrupt_dsp========
++ * Sends an interrupt to DSP unconditionally.
++ */
++extern void interrupt_dsp(struct bridge_dev_context *dev_context,
++ u16 mb_val);
++
++/*
++ * ======== wake_dsp =========
++ * Wakes up the DSP from DeepSleep
++ */
++extern int dsp_peripheral_clk_ctrl(struct bridge_dev_context
++ *dev_context, void *pargs);
++/*
++ * ======== handle_hibernation_from_dsp ========
++ * Handle Hibernation requested from DSP
++ */
++int handle_hibernation_from_dsp(struct bridge_dev_context *dev_context);
++/*
++ * ======== post_scale_dsp ========
++ * Handle Post Scale notification to DSP
++ */
++int post_scale_dsp(struct bridge_dev_context *dev_context,
++ void *pargs);
++/*
++ * ======== pre_scale_dsp ========
++ * Handle Pre Scale notification to DSP
++ */
++int pre_scale_dsp(struct bridge_dev_context *dev_context,
++ void *pargs);
++/*
++ * ======== handle_constraints_set ========
++ * Handle constraints request from DSP
++ */
++int handle_constraints_set(struct bridge_dev_context *dev_context,
++ void *pargs);
++
++/*
++ * ======== dsp_clk_wakeup_event_ctrl ========
++ * This function sets the group selction bits for while
++ * enabling/disabling.
++ */
++void dsp_clk_wakeup_event_ctrl(u32 clock_id, bool enable);
++
++#endif /* _TIOMAP_PWR_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/core/chnl_sm.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/core/chnl_sm.c 2010-08-18 11:24:23.154051495 +0300
+@@ -0,0 +1,1014 @@
++/*
++ * chnl_sm.c
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Implements upper edge functions for Bridge driver channel module.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++/*
++ * The lower edge functions must be implemented by the Bridge driver
++ * writer, and are declared in chnl_sm.h.
++ *
++ * Care is taken in this code to prevent simulataneous access to channel
++ * queues from
++ * 1. Threads.
++ * 2. io_dpc(), scheduled from the io_isr() as an event.
++ *
++ * This is done primarily by:
++ * - Semaphores.
++ * - state flags in the channel object; and
++ * - ensuring the IO_Dispatch() routine, which is called from both
++ * CHNL_AddIOReq() and the DPC(if implemented), is not re-entered.
++ *
++ * Channel Invariant:
++ * There is an important invariant condition which must be maintained per
++ * channel outside of bridge_chnl_get_ioc() and IO_Dispatch(), violation of
++ * which may cause timeouts and/or failure offunction sync_wait_on_event.
++ * This invariant condition is:
++ *
++ * LST_Empty(pchnl->pio_completions) ==> pchnl->sync_event is reset
++ * and
++ * !LST_Empty(pchnl->pio_completions) ==> pchnl->sync_event is set.
++ */
++
++#include <linux/types.h>
++
++/* ----------------------------------- OS */
++#include <dspbridge/host_os.h>
++
++/* ----------------------------------- DSP/BIOS Bridge */
++#include <dspbridge/dbdefs.h>
++
++/* ----------------------------------- Trace & Debug */
++#include <dspbridge/dbc.h>
++
++/* ----------------------------------- OS Adaptation Layer */
++#include <dspbridge/cfg.h>
++#include <dspbridge/sync.h>
++
++/* ----------------------------------- Bridge Driver */
++#include <dspbridge/dspdefs.h>
++#include <dspbridge/dspchnl.h>
++#include "_tiomap.h"
++
++/* ----------------------------------- Platform Manager */
++#include <dspbridge/dev.h>
++
++/* ----------------------------------- Others */
++#include <dspbridge/io_sm.h>
++
++/* ----------------------------------- Define for This */
++#define USERMODE_ADDR PAGE_OFFSET
++
++#define MAILBOX_IRQ INT_MAIL_MPU_IRQ
++
++/* ----------------------------------- Function Prototypes */
++static struct lst_list *create_chirp_list(u32 chirps);
++
++static void free_chirp_list(struct lst_list *chirp_list);
++
++static struct chnl_irp *make_new_chirp(void);
++
++static int search_free_channel(struct chnl_mgr *chnl_mgr_obj,
++ u32 *chnl);
++
++/*
++ * ======== bridge_chnl_add_io_req ========
++ * Enqueue an I/O request for data transfer on a channel to the DSP.
++ * The direction (mode) is specified in the channel object. Note the DSP
++ * address is specified for channels opened in direct I/O mode.
++ */
++int bridge_chnl_add_io_req(struct chnl_object *chnl_obj, void *host_buf,
++ u32 byte_size, u32 buf_size,
++ u32 dw_dsp_addr, u32 dw_arg)
++{
++ int status = 0;
++ struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
++ struct chnl_irp *chnl_packet_obj = NULL;
++ struct bridge_dev_context *dev_ctxt;
++ struct dev_object *dev_obj;
++ u8 dw_state;
++ bool is_eos;
++ struct chnl_mgr *chnl_mgr_obj = pchnl->chnl_mgr_obj;
++ u8 *host_sys_buf = NULL;
++ bool sched_dpc = false;
++ u16 mb_val = 0;
++
++ is_eos = (byte_size == 0);
++
++ /* Validate args */
++ if (!host_buf || !pchnl) {
++ status = -EFAULT;
++ } else if (is_eos && CHNL_IS_INPUT(pchnl->chnl_mode)) {
++ status = -EPERM;
++ } else {
++ /*
++ * Check the channel state: only queue chirp if channel state
++ * allows it.
++ */
++ dw_state = pchnl->dw_state;
++ if (dw_state != CHNL_STATEREADY) {
++ if (dw_state & CHNL_STATECANCEL)
++ status = -ECANCELED;
++ else if ((dw_state & CHNL_STATEEOS) &&
++ CHNL_IS_OUTPUT(pchnl->chnl_mode))
++ status = -EPIPE;
++ else
++ /* No other possible states left */
++ DBC_ASSERT(0);
++ }
++ }
++
++ dev_obj = dev_get_first();
++ dev_get_bridge_context(dev_obj, &dev_ctxt);
++ if (!dev_ctxt)
++ status = -EFAULT;
++
++ if (status)
++ goto func_end;
++
++ if (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1 && host_buf) {
++ if (!(host_buf < (void *)USERMODE_ADDR)) {
++ host_sys_buf = host_buf;
++ goto func_cont;
++ }
++ /* if addr in user mode, then copy to kernel space */
++ host_sys_buf = kmalloc(buf_size, GFP_KERNEL);
++ if (host_sys_buf == NULL) {
++ status = -ENOMEM;
++ goto func_end;
++ }
++ if (CHNL_IS_OUTPUT(pchnl->chnl_mode)) {
++ status = copy_from_user(host_sys_buf, host_buf,
++ buf_size);
++ if (status) {
++ kfree(host_sys_buf);
++ host_sys_buf = NULL;
++ status = -EFAULT;
++ goto func_end;
++ }
++ }
++ }
++func_cont:
++ /* Mailbox IRQ is disabled to avoid race condition with DMA/ZCPY
++ * channels. DPCCS is held to avoid race conditions with PCPY channels.
++ * If DPC is scheduled in process context (iosm_schedule) and any
++ * non-mailbox interrupt occurs, that DPC will run and break CS. Hence
++ * we disable ALL DPCs. We will try to disable ONLY IO DPC later. */
++ spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock);
++ omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX);
++ if (pchnl->chnl_type == CHNL_PCPY) {
++ /* This is a processor-copy channel. */
++ if (!status && CHNL_IS_OUTPUT(pchnl->chnl_mode)) {
++ /* Check buffer size on output channels for fit. */
++ if (byte_size >
++ io_buf_size(pchnl->chnl_mgr_obj->hio_mgr))
++ status = -EINVAL;
++
++ }
++ }
++ if (!status) {
++ /* Get a free chirp: */
++ chnl_packet_obj =
++ (struct chnl_irp *)lst_get_head(pchnl->free_packets_list);
++ if (chnl_packet_obj == NULL)
++ status = -EIO;
++
++ }
++ if (!status) {
++ /* Enqueue the chirp on the chnl's IORequest queue: */
++ chnl_packet_obj->host_user_buf = chnl_packet_obj->host_sys_buf =
++ host_buf;
++ if (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1)
++ chnl_packet_obj->host_sys_buf = host_sys_buf;
++
++ /*
++ * Note: for dma chans dw_dsp_addr contains dsp address
++ * of SM buffer.
++ */
++ DBC_ASSERT(chnl_mgr_obj->word_size != 0);
++ /* DSP address */
++ chnl_packet_obj->dsp_tx_addr =
++ dw_dsp_addr / chnl_mgr_obj->word_size;
++ chnl_packet_obj->byte_size = byte_size;
++ chnl_packet_obj->buf_size = buf_size;
++ /* Only valid for output channel */
++ chnl_packet_obj->dw_arg = dw_arg;
++ chnl_packet_obj->status = (is_eos ? CHNL_IOCSTATEOS :
++ CHNL_IOCSTATCOMPLETE);
++ lst_put_tail(pchnl->pio_requests,
++ (struct list_head *)chnl_packet_obj);
++ pchnl->cio_reqs++;
++ DBC_ASSERT(pchnl->cio_reqs <= pchnl->chnl_packets);
++ /*
++ * If end of stream, update the channel state to prevent
++ * more IOR's.
++ */
++ if (is_eos)
++ pchnl->dw_state |= CHNL_STATEEOS;
++
++ /* Legacy DSM Processor-Copy */
++ DBC_ASSERT(pchnl->chnl_type == CHNL_PCPY);
++ /* Request IO from the DSP */
++ io_request_chnl(chnl_mgr_obj->hio_mgr, pchnl,
++ (CHNL_IS_INPUT(pchnl->chnl_mode) ? IO_INPUT :
++ IO_OUTPUT), &mb_val);
++ sched_dpc = true;
++
++ }
++ omap_mbox_enable_irq(dev_ctxt->mbox, IRQ_RX);
++ spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
++ if (mb_val != 0)
++ sm_interrupt_dsp(dev_ctxt, mb_val);
++
++ /* Schedule a DPC, to do the actual data transfer */
++ if (sched_dpc)
++ iosm_schedule(chnl_mgr_obj->hio_mgr);
++
++func_end:
++ return status;
++}
++
++/*
++ * ======== bridge_chnl_cancel_io ========
++ * Return all I/O requests to the client which have not yet been
++ * transferred. The channel's I/O completion object is
++ * signalled, and all the I/O requests are queued as IOC's, with the
++ * status field set to CHNL_IOCSTATCANCEL.
++ * This call is typically used in abort situations, and is a prelude to
++ * chnl_close();
++ */
++int bridge_chnl_cancel_io(struct chnl_object *chnl_obj)
++{
++ int status = 0;
++ struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
++ u32 chnl_id = -1;
++ s8 chnl_mode;
++ struct chnl_irp *chnl_packet_obj;
++ struct chnl_mgr *chnl_mgr_obj = NULL;
++
++ /* Check args: */
++ if (pchnl && pchnl->chnl_mgr_obj) {
++ chnl_id = pchnl->chnl_id;
++ chnl_mode = pchnl->chnl_mode;
++ chnl_mgr_obj = pchnl->chnl_mgr_obj;
++ } else {
++ status = -EFAULT;
++ }
++ if (status)
++ goto func_end;
++
++ /* Mark this channel as cancelled, to prevent further IORequests or
++ * IORequests or dispatching. */
++ spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock);
++ pchnl->dw_state |= CHNL_STATECANCEL;
++ if (LST_IS_EMPTY(pchnl->pio_requests))
++ goto func_cont;
++
++ if (pchnl->chnl_type == CHNL_PCPY) {
++ /* Indicate we have no more buffers available for transfer: */
++ if (CHNL_IS_INPUT(pchnl->chnl_mode)) {
++ io_cancel_chnl(chnl_mgr_obj->hio_mgr, chnl_id);
++ } else {
++ /* Record that we no longer have output buffers
++ * available: */
++ chnl_mgr_obj->dw_output_mask &= ~(1 << chnl_id);
++ }
++ }
++ /* Move all IOR's to IOC queue: */
++ while (!LST_IS_EMPTY(pchnl->pio_requests)) {
++ chnl_packet_obj =
++ (struct chnl_irp *)lst_get_head(pchnl->pio_requests);
++ if (chnl_packet_obj) {
++ chnl_packet_obj->byte_size = 0;
++ chnl_packet_obj->status |= CHNL_IOCSTATCANCEL;
++ lst_put_tail(pchnl->pio_completions,
++ (struct list_head *)chnl_packet_obj);
++ pchnl->cio_cs++;
++ pchnl->cio_reqs--;
++ DBC_ASSERT(pchnl->cio_reqs >= 0);
++ }
++ }
++func_cont:
++ spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
++func_end:
++ return status;
++}
++
++/*
++ * ======== bridge_chnl_close ========
++ * Purpose:
++ * Ensures all pending I/O on this channel is cancelled, discards all
++ * queued I/O completion notifications, then frees the resources allocated
++ * for this channel, and makes the corresponding logical channel id
++ * available for subsequent use.
++ */
++int bridge_chnl_close(struct chnl_object *chnl_obj)
++{
++ int status;
++ struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
++
++ /* Check args: */
++ if (!pchnl) {
++ status = -EFAULT;
++ goto func_cont;
++ }
++ {
++ /* Cancel IO: this ensures no further IO requests or
++ * notifications. */
++ status = bridge_chnl_cancel_io(chnl_obj);
++ }
++func_cont:
++ if (!status) {
++ /* Assert I/O on this channel is now cancelled: Protects
++ * from io_dpc. */
++ DBC_ASSERT((pchnl->dw_state & CHNL_STATECANCEL));
++ /* Invalidate channel object: Protects from
++ * CHNL_GetIOCompletion(). */
++ /* Free the slot in the channel manager: */
++ pchnl->chnl_mgr_obj->ap_channel[pchnl->chnl_id] = NULL;
++ spin_lock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
++ pchnl->chnl_mgr_obj->open_channels -= 1;
++ spin_unlock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
++ if (pchnl->ntfy_obj) {
++ ntfy_delete(pchnl->ntfy_obj);
++ kfree(pchnl->ntfy_obj);
++ pchnl->ntfy_obj = NULL;
++ }
++ /* Reset channel event: (NOTE: user_event freed in user
++ * context.). */
++ if (pchnl->sync_event) {
++ sync_reset_event(pchnl->sync_event);
++ kfree(pchnl->sync_event);
++ pchnl->sync_event = NULL;
++ }
++ /* Free I/O request and I/O completion queues: */
++ if (pchnl->pio_completions) {
++ free_chirp_list(pchnl->pio_completions);
++ pchnl->pio_completions = NULL;
++ pchnl->cio_cs = 0;
++ }
++ if (pchnl->pio_requests) {
++ free_chirp_list(pchnl->pio_requests);
++ pchnl->pio_requests = NULL;
++ pchnl->cio_reqs = 0;
++ }
++ if (pchnl->free_packets_list) {
++ free_chirp_list(pchnl->free_packets_list);
++ pchnl->free_packets_list = NULL;
++ }
++ /* Release channel object. */
++ kfree(pchnl);
++ pchnl = NULL;
++ }
++ DBC_ENSURE(status || !pchnl);
++ return status;
++}
++
++/*
++ * ======== bridge_chnl_create ========
++ * Create a channel manager object, responsible for opening new channels
++ * and closing old ones for a given board.
++ */
++int bridge_chnl_create(struct chnl_mgr **channel_mgr,
++ struct dev_object *hdev_obj,
++ const struct chnl_mgrattrs *mgr_attrts)
++{
++ int status = 0;
++ struct chnl_mgr *chnl_mgr_obj = NULL;
++ u8 max_channels;
++
++ /* Check DBC requirements: */
++ DBC_REQUIRE(channel_mgr != NULL);
++ DBC_REQUIRE(mgr_attrts != NULL);
++ DBC_REQUIRE(mgr_attrts->max_channels > 0);
++ DBC_REQUIRE(mgr_attrts->max_channels <= CHNL_MAXCHANNELS);
++ DBC_REQUIRE(mgr_attrts->word_size != 0);
++
++ /* Allocate channel manager object */
++ chnl_mgr_obj = kzalloc(sizeof(struct chnl_mgr), GFP_KERNEL);
++ if (chnl_mgr_obj) {
++ /*
++ * The max_channels attr must equal the # of supported chnls for
++ * each transport(# chnls for PCPY = DDMA = ZCPY): i.e.
++ * mgr_attrts->max_channels = CHNL_MAXCHANNELS =
++ * DDMA_MAXDDMACHNLS = DDMA_MAXZCPYCHNLS.
++ */
++ DBC_ASSERT(mgr_attrts->max_channels == CHNL_MAXCHANNELS);
++ max_channels = CHNL_MAXCHANNELS + CHNL_MAXCHANNELS * CHNL_PCPY;
++ /* Create array of channels */
++ chnl_mgr_obj->ap_channel = kzalloc(sizeof(struct chnl_object *)
++ * max_channels, GFP_KERNEL);
++ if (chnl_mgr_obj->ap_channel) {
++ /* Initialize chnl_mgr object */
++ chnl_mgr_obj->dw_type = CHNL_TYPESM;
++ chnl_mgr_obj->word_size = mgr_attrts->word_size;
++ /* Total # chnls supported */
++ chnl_mgr_obj->max_channels = max_channels;
++ chnl_mgr_obj->open_channels = 0;
++ chnl_mgr_obj->dw_output_mask = 0;
++ chnl_mgr_obj->dw_last_output = 0;
++ chnl_mgr_obj->hdev_obj = hdev_obj;
++ spin_lock_init(&chnl_mgr_obj->chnl_mgr_lock);
++ } else {
++ status = -ENOMEM;
++ }
++ } else {
++ status = -ENOMEM;
++ }
++
++ if (status) {
++ bridge_chnl_destroy(chnl_mgr_obj);
++ *channel_mgr = NULL;
++ } else {
++ /* Return channel manager object to caller... */
++ *channel_mgr = chnl_mgr_obj;
++ }
++ return status;
++}
++
++/*
++ * ======== bridge_chnl_destroy ========
++ * Purpose:
++ * Close all open channels, and destroy the channel manager.
++ */
++int bridge_chnl_destroy(struct chnl_mgr *hchnl_mgr)
++{
++ int status = 0;
++ struct chnl_mgr *chnl_mgr_obj = hchnl_mgr;
++ u32 chnl_id;
++
++ if (hchnl_mgr) {
++ /* Close all open channels: */
++ for (chnl_id = 0; chnl_id < chnl_mgr_obj->max_channels;
++ chnl_id++) {
++ status =
++ bridge_chnl_close(chnl_mgr_obj->ap_channel
++ [chnl_id]);
++ if (status)
++ dev_dbg(bridge, "%s: Error status 0x%x\n",
++ __func__, status);
++ }
++
++ /* Free channel manager object: */
++ kfree(chnl_mgr_obj->ap_channel);
++
++ /* Set hchnl_mgr to NULL in device object. */
++ dev_set_chnl_mgr(chnl_mgr_obj->hdev_obj, NULL);
++ /* Free this Chnl Mgr object: */
++ kfree(hchnl_mgr);
++ } else {
++ status = -EFAULT;
++ }
++ return status;
++}
++
++/*
++ * ======== bridge_chnl_flush_io ========
++ * purpose:
++ * Flushes all the outstanding data requests on a channel.
++ */
++int bridge_chnl_flush_io(struct chnl_object *chnl_obj, u32 timeout)
++{
++ int status = 0;
++ struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
++ s8 chnl_mode = -1;
++ struct chnl_mgr *chnl_mgr_obj;
++ struct chnl_ioc chnl_ioc_obj;
++ /* Check args: */
++ if (pchnl) {
++ if ((timeout == CHNL_IOCNOWAIT)
++ && CHNL_IS_OUTPUT(pchnl->chnl_mode)) {
++ status = -EINVAL;
++ } else {
++ chnl_mode = pchnl->chnl_mode;
++ chnl_mgr_obj = pchnl->chnl_mgr_obj;
++ }
++ } else {
++ status = -EFAULT;
++ }
++ if (!status) {
++ /* Note: Currently, if another thread continues to add IO
++ * requests to this channel, this function will continue to
++ * flush all such queued IO requests. */
++ if (CHNL_IS_OUTPUT(chnl_mode)
++ && (pchnl->chnl_type == CHNL_PCPY)) {
++ /* Wait for IO completions, up to the specified
++ * timeout: */
++ while (!LST_IS_EMPTY(pchnl->pio_requests) && !status) {
++ status = bridge_chnl_get_ioc(chnl_obj,
++ timeout, &chnl_ioc_obj);
++ if (status)
++ continue;
++
++ if (chnl_ioc_obj.status & CHNL_IOCSTATTIMEOUT)
++ status = -ETIMEDOUT;
++
++ }
++ } else {
++ status = bridge_chnl_cancel_io(chnl_obj);
++ /* Now, leave the channel in the ready state: */
++ pchnl->dw_state &= ~CHNL_STATECANCEL;
++ }
++ }
++ DBC_ENSURE(status || LST_IS_EMPTY(pchnl->pio_requests));
++ return status;
++}
++
++/*
++ * ======== bridge_chnl_get_info ========
++ * Purpose:
++ * Retrieve information related to a channel.
++ */
++int bridge_chnl_get_info(struct chnl_object *chnl_obj,
++ struct chnl_info *channel_info)
++{
++ int status = 0;
++ struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
++ if (channel_info != NULL) {
++ if (pchnl) {
++ /* Return the requested information: */
++ channel_info->hchnl_mgr = pchnl->chnl_mgr_obj;
++ channel_info->event_obj = pchnl->user_event;
++ channel_info->cnhl_id = pchnl->chnl_id;
++ channel_info->dw_mode = pchnl->chnl_mode;
++ channel_info->bytes_tx = pchnl->bytes_moved;
++ channel_info->process = pchnl->process;
++ channel_info->sync_event = pchnl->sync_event;
++ channel_info->cio_cs = pchnl->cio_cs;
++ channel_info->cio_reqs = pchnl->cio_reqs;
++ channel_info->dw_state = pchnl->dw_state;
++ } else {
++ status = -EFAULT;
++ }
++ } else {
++ status = -EFAULT;
++ }
++ return status;
++}
++
++/*
++ * ======== bridge_chnl_get_ioc ========
++ * Optionally wait for I/O completion on a channel. Dequeue an I/O
++ * completion record, which contains information about the completed
++ * I/O request.
++ * Note: Ensures Channel Invariant (see notes above).
++ */
++int bridge_chnl_get_ioc(struct chnl_object *chnl_obj, u32 timeout,
++ struct chnl_ioc *chan_ioc)
++{
++ int status = 0;
++ struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
++ struct chnl_irp *chnl_packet_obj;
++ int stat_sync;
++ bool dequeue_ioc = true;
++ struct chnl_ioc ioc = { NULL, 0, 0, 0, 0 };
++ u8 *host_sys_buf = NULL;
++ struct bridge_dev_context *dev_ctxt;
++ struct dev_object *dev_obj;
++
++ /* Check args: */
++ if (!chan_ioc || !pchnl) {
++ status = -EFAULT;
++ } else if (timeout == CHNL_IOCNOWAIT) {
++ if (LST_IS_EMPTY(pchnl->pio_completions))
++ status = -EREMOTEIO;
++
++ }
++
++ dev_obj = dev_get_first();
++ dev_get_bridge_context(dev_obj, &dev_ctxt);
++ if (!dev_ctxt)
++ status = -EFAULT;
++
++ if (status)
++ goto func_end;
++
++ ioc.status = CHNL_IOCSTATCOMPLETE;
++ if (timeout !=
++ CHNL_IOCNOWAIT && LST_IS_EMPTY(pchnl->pio_completions)) {
++ if (timeout == CHNL_IOCINFINITE)
++ timeout = SYNC_INFINITE;
++
++ stat_sync = sync_wait_on_event(pchnl->sync_event, timeout);
++ if (stat_sync == -ETIME) {
++ /* No response from DSP */
++ ioc.status |= CHNL_IOCSTATTIMEOUT;
++ dequeue_ioc = false;
++ } else if (stat_sync == -EPERM) {
++ /* This can occur when the user mode thread is
++ * aborted (^C), or when _VWIN32_WaitSingleObject()
++ * fails due to unkown causes. */
++ /* Even though Wait failed, there may be something in
++ * the Q: */
++ if (LST_IS_EMPTY(pchnl->pio_completions)) {
++ ioc.status |= CHNL_IOCSTATCANCEL;
++ dequeue_ioc = false;
++ }
++ }
++ }
++ /* See comment in AddIOReq */
++ spin_lock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
++ omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX);
++ if (dequeue_ioc) {
++ /* Dequeue IOC and set chan_ioc; */
++ DBC_ASSERT(!LST_IS_EMPTY(pchnl->pio_completions));
++ chnl_packet_obj =
++ (struct chnl_irp *)lst_get_head(pchnl->pio_completions);
++ /* Update chan_ioc from channel state and chirp: */
++ if (chnl_packet_obj) {
++ pchnl->cio_cs--;
++ /* If this is a zero-copy channel, then set IOC's pbuf
++ * to the DSP's address. This DSP address will get
++ * translated to user's virtual addr later. */
++ {
++ host_sys_buf = chnl_packet_obj->host_sys_buf;
++ ioc.pbuf = chnl_packet_obj->host_user_buf;
++ }
++ ioc.byte_size = chnl_packet_obj->byte_size;
++ ioc.buf_size = chnl_packet_obj->buf_size;
++ ioc.dw_arg = chnl_packet_obj->dw_arg;
++ ioc.status |= chnl_packet_obj->status;
++ /* Place the used chirp on the free list: */
++ lst_put_tail(pchnl->free_packets_list,
++ (struct list_head *)chnl_packet_obj);
++ } else {
++ ioc.pbuf = NULL;
++ ioc.byte_size = 0;
++ }
++ } else {
++ ioc.pbuf = NULL;
++ ioc.byte_size = 0;
++ ioc.dw_arg = 0;
++ ioc.buf_size = 0;
++ }
++ /* Ensure invariant: If any IOC's are queued for this channel... */
++ if (!LST_IS_EMPTY(pchnl->pio_completions)) {
++ /* Since DSPStream_Reclaim() does not take a timeout
++ * parameter, we pass the stream's timeout value to
++ * bridge_chnl_get_ioc. We cannot determine whether or not
++ * we have waited in User mode. Since the stream's timeout
++ * value may be non-zero, we still have to set the event.
++ * Therefore, this optimization is taken out.
++ *
++ * if (timeout == CHNL_IOCNOWAIT) {
++ * ... ensure event is set..
++ * sync_set_event(pchnl->sync_event);
++ * } */
++ sync_set_event(pchnl->sync_event);
++ } else {
++ /* else, if list is empty, ensure event is reset. */
++ sync_reset_event(pchnl->sync_event);
++ }
++ omap_mbox_enable_irq(dev_ctxt->mbox, IRQ_RX);
++ spin_unlock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
++ if (dequeue_ioc
++ && (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1)) {
++ if (!(ioc.pbuf < (void *)USERMODE_ADDR))
++ goto func_cont;
++
++ /* If the addr is in user mode, then copy it */
++ if (!host_sys_buf || !ioc.pbuf) {
++ status = -EFAULT;
++ goto func_cont;
++ }
++ if (!CHNL_IS_INPUT(pchnl->chnl_mode))
++ goto func_cont1;
++
++ /*host_user_buf */
++ status = copy_to_user(ioc.pbuf, host_sys_buf, ioc.byte_size);
++ if (status) {
++ if (current->flags & PF_EXITING)
++ status = 0;
++ }
++ if (status)
++ status = -EFAULT;
++func_cont1:
++ kfree(host_sys_buf);
++ }
++func_cont:
++ /* Update User's IOC block: */
++ *chan_ioc = ioc;
++func_end:
++ return status;
++}
++
++/*
++ * ======== bridge_chnl_get_mgr_info ========
++ * Retrieve information related to the channel manager.
++ */
++int bridge_chnl_get_mgr_info(struct chnl_mgr *hchnl_mgr, u32 ch_id,
++ struct chnl_mgrinfo *mgr_info)
++{
++ int status = 0;
++ struct chnl_mgr *chnl_mgr_obj = (struct chnl_mgr *)hchnl_mgr;
++
++ if (mgr_info != NULL) {
++ if (ch_id <= CHNL_MAXCHANNELS) {
++ if (hchnl_mgr) {
++ /* Return the requested information: */
++ mgr_info->chnl_obj =
++ chnl_mgr_obj->ap_channel[ch_id];
++ mgr_info->open_channels =
++ chnl_mgr_obj->open_channels;
++ mgr_info->dw_type = chnl_mgr_obj->dw_type;
++ /* total # of chnls */
++ mgr_info->max_channels =
++ chnl_mgr_obj->max_channels;
++ } else {
++ status = -EFAULT;
++ }
++ } else {
++ status = -ECHRNG;
++ }
++ } else {
++ status = -EFAULT;
++ }
++
++ return status;
++}
++
++/*
++ * ======== bridge_chnl_idle ========
++ * Idles a particular channel.
++ */
++int bridge_chnl_idle(struct chnl_object *chnl_obj, u32 timeout,
++ bool flush_data)
++{
++ s8 chnl_mode;
++ struct chnl_mgr *chnl_mgr_obj;
++ int status = 0;
++
++ DBC_REQUIRE(chnl_obj);
++
++ chnl_mode = chnl_obj->chnl_mode;
++ chnl_mgr_obj = chnl_obj->chnl_mgr_obj;
++
++ if (CHNL_IS_OUTPUT(chnl_mode) && !flush_data) {
++ /* Wait for IO completions, up to the specified timeout: */
++ status = bridge_chnl_flush_io(chnl_obj, timeout);
++ } else {
++ status = bridge_chnl_cancel_io(chnl_obj);
++
++ /* Reset the byte count and put channel back in ready state. */
++ chnl_obj->bytes_moved = 0;
++ chnl_obj->dw_state &= ~CHNL_STATECANCEL;
++ }
++
++ return status;
++}
++
++/*
++ * ======== bridge_chnl_open ========
++ * Open a new half-duplex channel to the DSP board.
++ */
++int bridge_chnl_open(struct chnl_object **chnl,
++ struct chnl_mgr *hchnl_mgr, s8 chnl_mode,
++ u32 ch_id, const struct chnl_attr *pattrs)
++{
++ int status = 0;
++ struct chnl_mgr *chnl_mgr_obj = hchnl_mgr;
++ struct chnl_object *pchnl = NULL;
++ struct sync_object *sync_event = NULL;
++ /* Ensure DBC requirements: */
++ DBC_REQUIRE(chnl != NULL);
++ DBC_REQUIRE(pattrs != NULL);
++ DBC_REQUIRE(hchnl_mgr != NULL);
++ *chnl = NULL;
++ /* Validate Args: */
++ if (pattrs->uio_reqs == 0) {
++ status = -EINVAL;
++ } else {
++ if (!hchnl_mgr) {
++ status = -EFAULT;
++ } else {
++ if (ch_id != CHNL_PICKFREE) {
++ if (ch_id >= chnl_mgr_obj->max_channels)
++ status = -ECHRNG;
++ else if (chnl_mgr_obj->ap_channel[ch_id] !=
++ NULL)
++ status = -EALREADY;
++ } else {
++ /* Check for free channel */
++ status =
++ search_free_channel(chnl_mgr_obj, &ch_id);
++ }
++ }
++ }
++ if (status)
++ goto func_end;
++
++ DBC_ASSERT(ch_id < chnl_mgr_obj->max_channels);
++ /* Create channel object: */
++ pchnl = kzalloc(sizeof(struct chnl_object), GFP_KERNEL);
++ if (!pchnl) {
++ status = -ENOMEM;
++ goto func_end;
++ }
++ /* Protect queues from io_dpc: */
++ pchnl->dw_state = CHNL_STATECANCEL;
++ /* Allocate initial IOR and IOC queues: */
++ pchnl->free_packets_list = create_chirp_list(pattrs->uio_reqs);
++ pchnl->pio_requests = create_chirp_list(0);
++ pchnl->pio_completions = create_chirp_list(0);
++ pchnl->chnl_packets = pattrs->uio_reqs;
++ pchnl->cio_cs = 0;
++ pchnl->cio_reqs = 0;
++ sync_event = kzalloc(sizeof(struct sync_object), GFP_KERNEL);
++ if (sync_event)
++ sync_init_event(sync_event);
++ else
++ status = -ENOMEM;
++
++ if (!status) {
++ pchnl->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
++ GFP_KERNEL);
++ if (pchnl->ntfy_obj)
++ ntfy_init(pchnl->ntfy_obj);
++ else
++ status = -ENOMEM;
++ }
++
++ if (!status) {
++ if (pchnl->pio_completions && pchnl->pio_requests &&
++ pchnl->free_packets_list) {
++ /* Initialize CHNL object fields: */
++ pchnl->chnl_mgr_obj = chnl_mgr_obj;
++ pchnl->chnl_id = ch_id;
++ pchnl->chnl_mode = chnl_mode;
++ pchnl->user_event = sync_event;
++ pchnl->sync_event = sync_event;
++ /* Get the process handle */
++ pchnl->process = current->tgid;
++ pchnl->pcb_arg = 0;
++ pchnl->bytes_moved = 0;
++ /* Default to proc-copy */
++ pchnl->chnl_type = CHNL_PCPY;
++ } else {
++ status = -ENOMEM;
++ }
++ }
++
++ if (status) {
++ /* Free memory */
++ if (pchnl->pio_completions) {
++ free_chirp_list(pchnl->pio_completions);
++ pchnl->pio_completions = NULL;
++ pchnl->cio_cs = 0;
++ }
++ if (pchnl->pio_requests) {
++ free_chirp_list(pchnl->pio_requests);
++ pchnl->pio_requests = NULL;
++ }
++ if (pchnl->free_packets_list) {
++ free_chirp_list(pchnl->free_packets_list);
++ pchnl->free_packets_list = NULL;
++ }
++ kfree(sync_event);
++ sync_event = NULL;
++
++ if (pchnl->ntfy_obj) {
++ ntfy_delete(pchnl->ntfy_obj);
++ kfree(pchnl->ntfy_obj);
++ pchnl->ntfy_obj = NULL;
++ }
++ kfree(pchnl);
++ } else {
++ /* Insert channel object in channel manager: */
++ chnl_mgr_obj->ap_channel[pchnl->chnl_id] = pchnl;
++ spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock);
++ chnl_mgr_obj->open_channels++;
++ spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
++ /* Return result... */
++ pchnl->dw_state = CHNL_STATEREADY;
++ *chnl = pchnl;
++ }
++func_end:
++ DBC_ENSURE((!status && pchnl) || (*chnl == NULL));
++ return status;
++}
++
++/*
++ * ======== bridge_chnl_register_notify ========
++ * Registers for events on a particular channel.
++ */
++int bridge_chnl_register_notify(struct chnl_object *chnl_obj,
++ u32 event_mask, u32 notify_type,
++ struct dsp_notification *hnotification)
++{
++ int status = 0;
++
++ DBC_ASSERT(!(event_mask & ~(DSP_STREAMDONE | DSP_STREAMIOCOMPLETION)));
++
++ if (event_mask)
++ status = ntfy_register(chnl_obj->ntfy_obj, hnotification,
++ event_mask, notify_type);
++ else
++ status = ntfy_unregister(chnl_obj->ntfy_obj, hnotification);
++
++ return status;
++}
++
++/*
++ * ======== create_chirp_list ========
++ * Purpose:
++ * Initialize a queue of channel I/O Request/Completion packets.
++ * Parameters:
++ * chirps: Number of Chirps to allocate.
++ * Returns:
++ * Pointer to queue of IRPs, or NULL.
++ * Requires:
++ * Ensures:
++ */
++static struct lst_list *create_chirp_list(u32 chirps)
++{
++ struct lst_list *chirp_list;
++ struct chnl_irp *chnl_packet_obj;
++ u32 i;
++
++ chirp_list = kzalloc(sizeof(struct lst_list), GFP_KERNEL);
++
++ if (chirp_list) {
++ INIT_LIST_HEAD(&chirp_list->head);
++ /* Make N chirps and place on queue. */
++ for (i = 0; (i < chirps)
++ && ((chnl_packet_obj = make_new_chirp()) != NULL); i++) {
++ lst_put_tail(chirp_list,
++ (struct list_head *)chnl_packet_obj);
++ }
++
++ /* If we couldn't allocate all chirps, free those allocated: */
++ if (i != chirps) {
++ free_chirp_list(chirp_list);
++ chirp_list = NULL;
++ }
++ }
++
++ return chirp_list;
++}
++
++/*
++ * ======== free_chirp_list ========
++ * Purpose:
++ * Free the queue of Chirps.
++ */
++static void free_chirp_list(struct lst_list *chirp_list)
++{
++ DBC_REQUIRE(chirp_list != NULL);
++
++ while (!LST_IS_EMPTY(chirp_list))
++ kfree(lst_get_head(chirp_list));
++
++ kfree(chirp_list);
++}
++
++/*
++ * ======== make_new_chirp ========
++ * Allocate the memory for a new channel IRP.
++ */
++static struct chnl_irp *make_new_chirp(void)
++{
++ struct chnl_irp *chnl_packet_obj;
++
++ chnl_packet_obj = kzalloc(sizeof(struct chnl_irp), GFP_KERNEL);
++ if (chnl_packet_obj != NULL) {
++ /* lst_init_elem only resets the list's member values. */
++ lst_init_elem(&chnl_packet_obj->link);
++ }
++
++ return chnl_packet_obj;
++}
++
++/*
++ * ======== search_free_channel ========
++ * Search for a free channel slot in the array of channel pointers.
++ */
++static int search_free_channel(struct chnl_mgr *chnl_mgr_obj,
++ u32 *chnl)
++{
++ int status = -ENOSR;
++ u32 i;
++
++ DBC_REQUIRE(chnl_mgr_obj);
++
++ for (i = 0; i < chnl_mgr_obj->max_channels; i++) {
++ if (chnl_mgr_obj->ap_channel[i] == NULL) {
++ status = 0;
++ *chnl = i;
++ break;
++ }
++ }
++
++ return status;
++}
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/core/dsp-clock.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/core/dsp-clock.c 2010-08-18 11:24:23.154051495 +0300
+@@ -0,0 +1,422 @@
++/*
++ * clk.c
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Clock and Timer services.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#include <linux/types.h>
++
++/* ----------------------------------- Host OS */
++#include <dspbridge/host_os.h>
++#include <plat/dmtimer.h>
++#include <plat/mcbsp.h>
++
++/* ----------------------------------- DSP/BIOS Bridge */
++#include <dspbridge/dbdefs.h>
++#include <dspbridge/cfg.h>
++#include <dspbridge/drv.h>
++#include <dspbridge/dev.h>
++#include "_tiomap.h"
++
++/* ----------------------------------- Trace & Debug */
++#include <dspbridge/dbc.h>
++
++/* ----------------------------------- This */
++#include <dspbridge/clk.h>
++
++/* ----------------------------------- Defines, Data Structures, Typedefs */
++
++#define OMAP_SSI_OFFSET 0x58000
++#define OMAP_SSI_SIZE 0x1000
++#define OMAP_SSI_SYSCONFIG_OFFSET 0x10
++
++#define SSI_AUTOIDLE (1 << 0)
++#define SSI_SIDLE_SMARTIDLE (2 << 3)
++#define SSI_MIDLE_NOIDLE (1 << 12)
++
++/* Clk types requested by the dsp */
++#define IVA2_CLK 0
++#define GPT_CLK 1
++#define WDT_CLK 2
++#define MCBSP_CLK 3
++#define SSI_CLK 4
++
++/* Bridge GPT id (1 - 4), DM Timer id (5 - 8) */
++#define DMT_ID(id) ((id) + 4)
++
++/* Bridge MCBSP id (6 - 10), OMAP Mcbsp id (0 - 4) */
++#define MCBSP_ID(id) ((id) - 6)
++
++static struct omap_dm_timer *timer[4];
++
++struct clk *iva2_clk;
++
++struct dsp_ssi {
++ struct clk *sst_fck;
++ struct clk *ssr_fck;
++ struct clk *ick;
++};
++
++static struct dsp_ssi ssi;
++
++static u32 dsp_clocks;
++
++static inline u32 is_dsp_clk_active(u32 clk, u8 id)
++{
++ return clk & (1 << id);
++}
++
++static inline void set_dsp_clk_active(u32 *clk, u8 id)
++{
++ *clk |= (1 << id);
++}
++
++static inline void set_dsp_clk_inactive(u32 *clk, u8 id)
++{
++ *clk &= ~(1 << id);
++}
++
++static s8 get_clk_type(u8 id)
++{
++ s8 type;
++
++ if (id == DSP_CLK_IVA2)
++ type = IVA2_CLK;
++ else if (id <= DSP_CLK_GPT8)
++ type = GPT_CLK;
++ else if (id == DSP_CLK_WDT3)
++ type = WDT_CLK;
++ else if (id <= DSP_CLK_MCBSP5)
++ type = MCBSP_CLK;
++ else if (id == DSP_CLK_SSI)
++ type = SSI_CLK;
++ else
++ type = -1;
++
++ return type;
++}
++
++/*
++ * ======== dsp_clk_exit ========
++ * Purpose:
++ * Cleanup CLK module.
++ */
++void dsp_clk_exit(void)
++{
++ dsp_clock_disable_all(dsp_clocks);
++
++ clk_put(iva2_clk);
++ clk_put(ssi.sst_fck);
++ clk_put(ssi.ssr_fck);
++ clk_put(ssi.ick);
++}
++
++/*
++ * ======== dsp_clk_init ========
++ * Purpose:
++ * Initialize CLK module.
++ */
++void dsp_clk_init(void)
++{
++ static struct platform_device dspbridge_device;
++
++ dspbridge_device.dev.bus = &platform_bus_type;
++
++ iva2_clk = clk_get(&dspbridge_device.dev, "iva2_ck");
++ if (IS_ERR(iva2_clk))
++ dev_err(bridge, "failed to get iva2 clock %p\n", iva2_clk);
++
++ ssi.sst_fck = clk_get(&dspbridge_device.dev, "ssi_sst_fck");
++ ssi.ssr_fck = clk_get(&dspbridge_device.dev, "ssi_ssr_fck");
++ ssi.ick = clk_get(&dspbridge_device.dev, "ssi_ick");
++
++ if (IS_ERR(ssi.sst_fck) || IS_ERR(ssi.ssr_fck) || IS_ERR(ssi.ick))
++ dev_err(bridge, "failed to get ssi: sst %p, ssr %p, ick %p\n",
++ ssi.sst_fck, ssi.ssr_fck, ssi.ick);
++}
++
++#ifdef CONFIG_OMAP_MCBSP
++static void mcbsp_clk_prepare(bool flag, u8 id)
++{
++ struct cfg_hostres *resources;
++ struct dev_object *hdev_object = NULL;
++ struct bridge_dev_context *bridge_context = NULL;
++ u32 val;
++
++ hdev_object = (struct dev_object *)drv_get_first_dev_object();
++ if (!hdev_object)
++ return;
++
++ dev_get_bridge_context(hdev_object, &bridge_context);
++ if (!bridge_context)
++ return;
++
++ resources = bridge_context->resources;
++ if (!resources)
++ return;
++
++ if (flag) {
++ if (id == DSP_CLK_MCBSP1) {
++ /* set MCBSP1_CLKS, on McBSP1 ON */
++ val = __raw_readl(resources->dw_sys_ctrl_base + 0x274);
++ val |= 1 << 2;
++ __raw_writel(val, resources->dw_sys_ctrl_base + 0x274);
++ } else if (id == DSP_CLK_MCBSP2) {
++ /* set MCBSP2_CLKS, on McBSP2 ON */
++ val = __raw_readl(resources->dw_sys_ctrl_base + 0x274);
++ val |= 1 << 6;
++ __raw_writel(val, resources->dw_sys_ctrl_base + 0x274);
++ }
++ } else {
++ if (id == DSP_CLK_MCBSP1) {
++ /* clear MCBSP1_CLKS, on McBSP1 OFF */
++ val = __raw_readl(resources->dw_sys_ctrl_base + 0x274);
++ val &= ~(1 << 2);
++ __raw_writel(val, resources->dw_sys_ctrl_base + 0x274);
++ } else if (id == DSP_CLK_MCBSP2) {
++ /* clear MCBSP2_CLKS, on McBSP2 OFF */
++ val = __raw_readl(resources->dw_sys_ctrl_base + 0x274);
++ val &= ~(1 << 6);
++ __raw_writel(val, resources->dw_sys_ctrl_base + 0x274);
++ }
++ }
++}
++#endif
++
++/**
++ * dsp_gpt_wait_overflow - set gpt overflow and wait for fixed timeout
++ * @clk_id: GP Timer clock id.
++ * @load: Overflow value.
++ *
++ * Sets an overflow interrupt for the desired GPT waiting for a timeout
++ * of 5 msecs for the interrupt to occur.
++ */
++void dsp_gpt_wait_overflow(short int clk_id, unsigned int load)
++{
++ struct omap_dm_timer *gpt = timer[clk_id - 1];
++ unsigned long timeout;
++
++ if (!gpt)
++ return;
++
++ /* Enable overflow interrupt */
++ omap_dm_timer_set_int_enable(gpt, OMAP_TIMER_INT_OVERFLOW);
++
++ /*
++ * Set counter value to overflow counter after
++ * one tick and start timer.
++ */
++ omap_dm_timer_set_load_start(gpt, 0, load);
++
++ /* Wait 80us for timer to overflow */
++ udelay(80);
++
++ timeout = msecs_to_jiffies(5);
++ /* Check interrupt status and wait for interrupt */
++ while (!(omap_dm_timer_read_status(gpt) & OMAP_TIMER_INT_OVERFLOW)) {
++ if (time_is_after_jiffies(timeout)) {
++ pr_err("%s: GPTimer interrupt failed\n", __func__);
++ break;
++ }
++ }
++}
++
++/*
++ * ======== dsp_clk_enable ========
++ * Purpose:
++ * Enable Clock .
++ *
++ */
++int dsp_clk_enable(enum dsp_clk_id clk_id)
++{
++ int status = 0;
++
++ if (is_dsp_clk_active(dsp_clocks, clk_id)) {
++ dev_err(bridge, "WARN: clock id %d already enabled\n", clk_id);
++ goto out;
++ }
++
++ switch (get_clk_type(clk_id)) {
++ case IVA2_CLK:
++ clk_enable(iva2_clk);
++ break;
++ case GPT_CLK:
++ timer[clk_id - 1] =
++ omap_dm_timer_request_specific(DMT_ID(clk_id));
++ break;
++#ifdef CONFIG_OMAP_MCBSP
++ case MCBSP_CLK:
++ mcbsp_clk_prepare(true, clk_id);
++ omap_mcbsp_set_io_type(MCBSP_ID(clk_id), OMAP_MCBSP_POLL_IO);
++ omap_mcbsp_request(MCBSP_ID(clk_id));
++ break;
++#endif
++ case WDT_CLK:
++ dev_err(bridge, "ERROR: DSP requested to enable WDT3 clk\n");
++ break;
++ case SSI_CLK:
++ clk_enable(ssi.sst_fck);
++ clk_enable(ssi.ssr_fck);
++ clk_enable(ssi.ick);
++
++ /*
++ * The SSI module need to configured not to have the Forced
++ * idle for master interface. If it is set to forced idle,
++ * the SSI module is transitioning to standby thereby causing
++ * the client in the DSP hang waiting for the SSI module to
++ * be active after enabling the clocks
++ */
++ ssi_clk_prepare(true);
++ break;
++ default:
++ dev_err(bridge, "Invalid clock id for enable\n");
++ status = -EPERM;
++ }
++
++ if (!status)
++ set_dsp_clk_active(&dsp_clocks, clk_id);
++
++out:
++ return status;
++}
++
++/**
++ * dsp_clock_enable_all - Enable clocks used by the DSP
++ * @dev_context Driver's device context strucure
++ *
++ * This function enables all the peripheral clocks that were requested by DSP.
++ */
++u32 dsp_clock_enable_all(u32 dsp_per_clocks)
++{
++ u32 clk_id;
++ u32 status = -EPERM;
++
++ for (clk_id = 0; clk_id < DSP_CLK_NOT_DEFINED; clk_id++) {
++ if (is_dsp_clk_active(dsp_per_clocks, clk_id))
++ status = dsp_clk_enable(clk_id);
++ }
++
++ return status;
++}
++
++/*
++ * ======== dsp_clk_disable ========
++ * Purpose:
++ * Disable the clock.
++ *
++ */
++int dsp_clk_disable(enum dsp_clk_id clk_id)
++{
++ int status = 0;
++
++ if (!is_dsp_clk_active(dsp_clocks, clk_id)) {
++ dev_err(bridge, "ERR: clock id %d already disabled\n", clk_id);
++ goto out;
++ }
++
++ switch (get_clk_type(clk_id)) {
++ case IVA2_CLK:
++ clk_disable(iva2_clk);
++ break;
++ case GPT_CLK:
++ omap_dm_timer_free(timer[clk_id - 1]);
++ break;
++#ifdef CONFIG_OMAP_MCBSP
++ case MCBSP_CLK:
++ mcbsp_clk_prepare(false, clk_id);
++ omap_mcbsp_free(MCBSP_ID(clk_id));
++ break;
++#endif
++ case WDT_CLK:
++ dev_err(bridge, "ERROR: DSP requested to disable WDT3 clk\n");
++ break;
++ case SSI_CLK:
++ ssi_clk_prepare(false);
++ ssi_clk_prepare(false);
++ clk_disable(ssi.sst_fck);
++ clk_disable(ssi.ssr_fck);
++ clk_disable(ssi.ick);
++ break;
++ default:
++ dev_err(bridge, "Invalid clock id for disable\n");
++ status = -EPERM;
++ }
++
++ if (!status)
++ set_dsp_clk_inactive(&dsp_clocks, clk_id);
++
++out:
++ return status;
++}
++
++/**
++ * dsp_clock_disable_all - Disable all active clocks
++ * @dev_context Driver's device context structure
++ *
++ * This function disables all the peripheral clocks that were enabled by DSP.
++ * It is meant to be called only when DSP is entering hibernation or when DSP
++ * is in error state.
++ */
++u32 dsp_clock_disable_all(u32 dsp_per_clocks)
++{
++ u32 clk_id;
++ u32 status = -EPERM;
++
++ for (clk_id = 0; clk_id < DSP_CLK_NOT_DEFINED; clk_id++) {
++ if (is_dsp_clk_active(dsp_per_clocks, clk_id))
++ status = dsp_clk_disable(clk_id);
++ }
++
++ return status;
++}
++
++u32 dsp_clk_get_iva2_rate(void)
++{
++ u32 clk_speed_khz;
++
++ clk_speed_khz = clk_get_rate(iva2_clk);
++ clk_speed_khz /= 1000;
++ dev_dbg(bridge, "%s: clk speed Khz = %d\n", __func__, clk_speed_khz);
++
++ return clk_speed_khz;
++}
++
++void ssi_clk_prepare(bool FLAG)
++{
++ void __iomem *ssi_base;
++ unsigned int value;
++
++ ssi_base = ioremap(L4_34XX_BASE + OMAP_SSI_OFFSET, OMAP_SSI_SIZE);
++ if (!ssi_base) {
++ pr_err("%s: error, SSI not configured\n", __func__);
++ return;
++ }
++
++ if (FLAG) {
++ /* Set Autoidle, SIDLEMode to smart idle, and MIDLEmode to
++ * no idle
++ */
++ value = SSI_AUTOIDLE | SSI_SIDLE_SMARTIDLE | SSI_MIDLE_NOIDLE;
++ } else {
++ /* Set Autoidle, SIDLEMode to forced idle, and MIDLEmode to
++ * forced idle
++ */
++ value = SSI_AUTOIDLE;
++ }
++
++ __raw_writel(value, ssi_base + OMAP_SSI_SYSCONFIG_OFFSET);
++ iounmap(ssi_base);
++}
++
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/core/io_sm.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/core/io_sm.c 2010-08-18 11:24:23.158050746 +0300
+@@ -0,0 +1,2333 @@
++/*
++ * io_sm.c
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * IO dispatcher for a shared memory channel driver.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++/*
++ * Channel Invariant:
++ * There is an important invariant condition which must be maintained per
++ * channel outside of bridge_chnl_get_ioc() and IO_Dispatch(), violation of
++ * which may cause timeouts and/or failure of the sync_wait_on_event
++ * function.
++ */
++#include <linux/types.h>
++
++/* Host OS */
++#include <dspbridge/host_os.h>
++#include <linux/workqueue.h>
++
++/* ----------------------------------- DSP/BIOS Bridge */
++#include <dspbridge/dbdefs.h>
++
++/* Trace & Debug */
++#include <dspbridge/dbc.h>
++
++/* Services Layer */
++#include <dspbridge/cfg.h>
++#include <dspbridge/ntfy.h>
++#include <dspbridge/sync.h>
++
++/* Hardware Abstraction Layer */
++#include <hw_defs.h>
++#include <hw_mmu.h>
++
++/* Bridge Driver */
++#include <dspbridge/dspdeh.h>
++#include <dspbridge/dspio.h>
++#include <dspbridge/dspioctl.h>
++#include <dspbridge/wdt.h>
++#include <_tiomap.h>
++#include <tiomap_io.h>
++#include <_tiomap_pwr.h>
++
++/* Platform Manager */
++#include <dspbridge/cod.h>
++#include <dspbridge/node.h>
++#include <dspbridge/dev.h>
++
++/* Others */
++#include <dspbridge/rms_sh.h>
++#include <dspbridge/mgr.h>
++#include <dspbridge/drv.h>
++#include "_cmm.h"
++#include "module_list.h"
++
++/* This */
++#include <dspbridge/io_sm.h>
++#include "_msg_sm.h"
++
++/* Defines, Data Structures, Typedefs */
++#define OUTPUTNOTREADY 0xffff
++#define NOTENABLED 0xffff /* Channel(s) not enabled */
++
++#define EXTEND "_EXT_END"
++
++#define SWAP_WORD(x) (x)
++#define UL_PAGE_ALIGN_SIZE 0x10000 /* Page Align Size */
++
++#define MAX_PM_REQS 32
++
++#define MMU_FAULT_HEAD1 0xa5a5a5a5
++#define MMU_FAULT_HEAD2 0x96969696
++#define POLL_MAX 1000
++#define MAX_MMU_DBGBUFF 10240
++
++/* IO Manager: only one created per board */
++struct io_mgr {
++ /* These four fields must be the first fields in a io_mgr_ struct */
++ /* Bridge device context */
++ struct bridge_dev_context *hbridge_context;
++ /* Function interface to Bridge driver */
++ struct bridge_drv_interface *intf_fxns;
++ struct dev_object *hdev_obj; /* Device this board represents */
++
++ /* These fields initialized in bridge_io_create() */
++ struct chnl_mgr *hchnl_mgr;
++ struct shm *shared_mem; /* Shared Memory control */
++ u8 *input; /* Address of input channel */
++ u8 *output; /* Address of output channel */
++ struct msg_mgr *hmsg_mgr; /* Message manager */
++ /* Msg control for from DSP messages */
++ struct msg_ctrl *msg_input_ctrl;
++ /* Msg control for to DSP messages */
++ struct msg_ctrl *msg_output_ctrl;
++ u8 *msg_input; /* Address of input messages */
++ u8 *msg_output; /* Address of output messages */
++ u32 usm_buf_size; /* Size of a shared memory I/O channel */
++ bool shared_irq; /* Is this IRQ shared? */
++ u32 word_size; /* Size in bytes of DSP word */
++ u16 intr_val; /* Interrupt value */
++ /* Private extnd proc info; mmu setup */
++ struct mgr_processorextinfo ext_proc_info;
++ struct cmm_object *hcmm_mgr; /* Shared Mem Mngr */
++ struct work_struct io_workq; /* workqueue */
++#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
++ u32 ul_trace_buffer_begin; /* Trace message start address */
++ u32 ul_trace_buffer_end; /* Trace message end address */
++ u32 ul_trace_buffer_current; /* Trace message current address */
++ u32 ul_gpp_read_pointer; /* GPP Read pointer to Trace buffer */
++ u8 *pmsg;
++ u32 ul_gpp_va;
++ u32 ul_dsp_va;
++#endif
++ /* IO Dpc */
++ u32 dpc_req; /* Number of requested DPC's. */
++ u32 dpc_sched; /* Number of executed DPC's. */
++ struct tasklet_struct dpc_tasklet;
++ spinlock_t dpc_lock;
++
++};
++
++/* Function Prototypes */
++static void io_dispatch_pm(struct io_mgr *pio_mgr);
++static void notify_chnl_complete(struct chnl_object *pchnl,
++ struct chnl_irp *chnl_packet_obj);
++static void input_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
++ u8 io_mode);
++static void output_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
++ u8 io_mode);
++static void input_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr);
++static void output_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr);
++static u32 find_ready_output(struct chnl_mgr *chnl_mgr_obj,
++ struct chnl_object *pchnl, u32 mask);
++
++/* Bus Addr (cached kernel) */
++static int register_shm_segs(struct io_mgr *hio_mgr,
++ struct cod_manager *cod_man,
++ u32 dw_gpp_base_pa);
++
++static inline void set_chnl_free(struct shm *sm, u32 chnl)
++{
++ sm->host_free_mask &= ~(1 << chnl);
++}
++
++static inline void set_chnl_busy(struct shm *sm, u32 chnl)
++{
++ sm->host_free_mask |= 1 << chnl;
++}
++
++
++/*
++ * ======== bridge_io_create ========
++ * Create an IO manager object.
++ */
++int bridge_io_create(struct io_mgr **io_man,
++ struct dev_object *hdev_obj,
++ const struct io_attrs *mgr_attrts)
++{
++ int status = 0;
++ struct io_mgr *pio_mgr = NULL;
++ struct shm *shared_mem = NULL;
++ struct bridge_dev_context *hbridge_context = NULL;
++ struct cfg_devnode *dev_node_obj;
++ struct chnl_mgr *hchnl_mgr;
++ u8 dev_type;
++
++ /* Check requirements */
++ if (!io_man || !mgr_attrts || mgr_attrts->word_size == 0) {
++ status = -EFAULT;
++ goto func_end;
++ }
++ dev_get_chnl_mgr(hdev_obj, &hchnl_mgr);
++ if (!hchnl_mgr || hchnl_mgr->hio_mgr) {
++ status = -EFAULT;
++ goto func_end;
++ }
++ /*
++ * Message manager will be created when a file is loaded, since
++ * size of message buffer in shared memory is configurable in
++ * the base image.
++ */
++ dev_get_bridge_context(hdev_obj, &hbridge_context);
++ if (!hbridge_context) {
++ status = -EFAULT;
++ goto func_end;
++ }
++ dev_get_dev_type(hdev_obj, &dev_type);
++ /*
++ * DSP shared memory area will get set properly when
++ * a program is loaded. They are unknown until a COFF file is
++ * loaded. I chose the value -1 because it was less likely to be
++ * a valid address than 0.
++ */
++ shared_mem = (struct shm *)-1;
++
++ /* Allocate IO manager object */
++ pio_mgr = kzalloc(sizeof(struct io_mgr), GFP_KERNEL);
++ if (pio_mgr == NULL) {
++ status = -ENOMEM;
++ goto func_end;
++ }
++
++ /* Initialize chnl_mgr object */
++#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
++ pio_mgr->pmsg = NULL;
++#endif
++ pio_mgr->hchnl_mgr = hchnl_mgr;
++ pio_mgr->word_size = mgr_attrts->word_size;
++ pio_mgr->shared_mem = shared_mem;
++
++ if (dev_type == DSP_UNIT) {
++ /* Create an IO DPC */
++ tasklet_init(&pio_mgr->dpc_tasklet, io_dpc, (u32) pio_mgr);
++
++ /* Initialize DPC counters */
++ pio_mgr->dpc_req = 0;
++ pio_mgr->dpc_sched = 0;
++
++ spin_lock_init(&pio_mgr->dpc_lock);
++
++ status = dev_get_dev_node(hdev_obj, &dev_node_obj);
++ }
++
++ if (!status) {
++ pio_mgr->hbridge_context = hbridge_context;
++ pio_mgr->shared_irq = mgr_attrts->irq_shared;
++ if (dsp_wdt_init())
++ status = -EPERM;
++ } else {
++ status = -EIO;
++ }
++func_end:
++ if (status) {
++ /* Cleanup */
++ bridge_io_destroy(pio_mgr);
++ if (io_man)
++ *io_man = NULL;
++ } else {
++ /* Return IO manager object to caller... */
++ hchnl_mgr->hio_mgr = pio_mgr;
++ *io_man = pio_mgr;
++ }
++ return status;
++}
++
++/*
++ * ======== bridge_io_destroy ========
++ * Purpose:
++ * Disable interrupts, destroy the IO manager.
++ */
++int bridge_io_destroy(struct io_mgr *hio_mgr)
++{
++ int status = 0;
++ if (hio_mgr) {
++ /* Free IO DPC object */
++ tasklet_kill(&hio_mgr->dpc_tasklet);
++
++#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
++ kfree(hio_mgr->pmsg);
++#endif
++ dsp_wdt_exit();
++ /* Free this IO manager object */
++ kfree(hio_mgr);
++ } else {
++ status = -EFAULT;
++ }
++
++ return status;
++}
++
++/*
++ * ======== bridge_io_on_loaded ========
++ * Purpose:
++ * Called when a new program is loaded to get shared memory buffer
++ * parameters from COFF file. ulSharedBufferBase and ulSharedBufferLimit
++ * are in DSP address units.
++ */
++int bridge_io_on_loaded(struct io_mgr *hio_mgr)
++{
++ struct cod_manager *cod_man;
++ struct chnl_mgr *hchnl_mgr;
++ struct msg_mgr *hmsg_mgr;
++ u32 ul_shm_base;
++ u32 ul_shm_base_offset;
++ u32 ul_shm_limit;
++ u32 ul_shm_length = -1;
++ u32 ul_mem_length = -1;
++ u32 ul_msg_base;
++ u32 ul_msg_limit;
++ u32 ul_msg_length = -1;
++ u32 ul_ext_end;
++ u32 ul_gpp_pa = 0;
++ u32 ul_gpp_va = 0;
++ u32 ul_dsp_va = 0;
++ u32 ul_seg_size = 0;
++ u32 ul_pad_size = 0;
++ u32 i;
++ int status = 0;
++ u8 num_procs = 0;
++ s32 ndx = 0;
++ /* DSP MMU setup table */
++ struct bridge_ioctl_extproc ae_proc[BRDIOCTL_NUMOFMMUTLB];
++ struct cfg_hostres *host_res;
++ struct bridge_dev_context *pbridge_context;
++ u32 map_attrs;
++ u32 shm0_end;
++ u32 ul_dyn_ext_base;
++ u32 ul_seg1_size = 0;
++ u32 pa_curr = 0;
++ u32 va_curr = 0;
++ u32 gpp_va_curr = 0;
++ u32 num_bytes = 0;
++ u32 all_bits = 0;
++ u32 page_size[] = { HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB,
++ HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB
++ };
++
++ status = dev_get_bridge_context(hio_mgr->hdev_obj, &pbridge_context);
++ if (!pbridge_context) {
++ status = -EFAULT;
++ goto func_end;
++ }
++
++ host_res = pbridge_context->resources;
++ if (!host_res) {
++ status = -EFAULT;
++ goto func_end;
++ }
++ status = dev_get_cod_mgr(hio_mgr->hdev_obj, &cod_man);
++ if (!cod_man) {
++ status = -EFAULT;
++ goto func_end;
++ }
++ hchnl_mgr = hio_mgr->hchnl_mgr;
++ /* The message manager is destroyed when the board is stopped. */
++ dev_get_msg_mgr(hio_mgr->hdev_obj, &hio_mgr->hmsg_mgr);
++ hmsg_mgr = hio_mgr->hmsg_mgr;
++ if (!hchnl_mgr || !hmsg_mgr) {
++ status = -EFAULT;
++ goto func_end;
++ }
++ if (hio_mgr->shared_mem)
++ hio_mgr->shared_mem = NULL;
++
++ /* Get start and length of channel part of shared memory */
++ status = cod_get_sym_value(cod_man, CHNL_SHARED_BUFFER_BASE_SYM,
++ &ul_shm_base);
++ if (status) {
++ status = -EFAULT;
++ goto func_end;
++ }
++ status = cod_get_sym_value(cod_man, CHNL_SHARED_BUFFER_LIMIT_SYM,
++ &ul_shm_limit);
++ if (status) {
++ status = -EFAULT;
++ goto func_end;
++ }
++ if (ul_shm_limit <= ul_shm_base) {
++ status = -EINVAL;
++ goto func_end;
++ }
++ /* Get total length in bytes */
++ ul_shm_length = (ul_shm_limit - ul_shm_base + 1) * hio_mgr->word_size;
++ /* Calculate size of a PROCCOPY shared memory region */
++ dev_dbg(bridge, "%s: (proc)proccopy shmmem size: 0x%x bytes\n",
++ __func__, (ul_shm_length - sizeof(struct shm)));
++
++ /* Get start and length of message part of shared memory */
++ status = cod_get_sym_value(cod_man, MSG_SHARED_BUFFER_BASE_SYM,
++ &ul_msg_base);
++ if (!status) {
++ status = cod_get_sym_value(cod_man, MSG_SHARED_BUFFER_LIMIT_SYM,
++ &ul_msg_limit);
++ if (!status) {
++ if (ul_msg_limit <= ul_msg_base) {
++ status = -EINVAL;
++ } else {
++ /*
++ * Length (bytes) of messaging part of shared
++ * memory.
++ */
++ ul_msg_length =
++ (ul_msg_limit - ul_msg_base +
++ 1) * hio_mgr->word_size;
++ /*
++ * Total length (bytes) of shared memory:
++ * chnl + msg.
++ */
++ ul_mem_length = ul_shm_length + ul_msg_length;
++ }
++ } else {
++ status = -EFAULT;
++ }
++ } else {
++ status = -EFAULT;
++ }
++ if (!status) {
++#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
++ status =
++ cod_get_sym_value(cod_man, DSP_TRACESEC_END, &shm0_end);
++#else
++ status = cod_get_sym_value(cod_man, SHM0_SHARED_END_SYM,
++ &shm0_end);
++#endif
++ if (status)
++ status = -EFAULT;
++ }
++ if (!status) {
++ status =
++ cod_get_sym_value(cod_man, DYNEXTBASE, &ul_dyn_ext_base);
++ if (status)
++ status = -EFAULT;
++ }
++ if (!status) {
++ status = cod_get_sym_value(cod_man, EXTEND, &ul_ext_end);
++ if (status)
++ status = -EFAULT;
++ }
++ if (!status) {
++ /* Get memory reserved in host resources */
++ (void)mgr_enum_processor_info(0, (struct dsp_processorinfo *)
++ &hio_mgr->ext_proc_info,
++ sizeof(struct
++ mgr_processorextinfo),
++ &num_procs);
++
++ /* The first MMU TLB entry(TLB_0) in DCD is ShmBase. */
++ ndx = 0;
++ ul_gpp_pa = host_res->dw_mem_phys[1];
++ ul_gpp_va = host_res->dw_mem_base[1];
++ /* This is the virtual uncached ioremapped address!!! */
++ /* Why can't we directly take the DSPVA from the symbols? */
++ ul_dsp_va = hio_mgr->ext_proc_info.ty_tlb[0].ul_dsp_virt;
++ ul_seg_size = (shm0_end - ul_dsp_va) * hio_mgr->word_size;
++ ul_seg1_size =
++ (ul_ext_end - ul_dyn_ext_base) * hio_mgr->word_size;
++ /* 4K align */
++ ul_seg1_size = (ul_seg1_size + 0xFFF) & (~0xFFFUL);
++ /* 64K align */
++ ul_seg_size = (ul_seg_size + 0xFFFF) & (~0xFFFFUL);
++ ul_pad_size = UL_PAGE_ALIGN_SIZE - ((ul_gpp_pa + ul_seg1_size) %
++ UL_PAGE_ALIGN_SIZE);
++ if (ul_pad_size == UL_PAGE_ALIGN_SIZE)
++ ul_pad_size = 0x0;
++
++ dev_dbg(bridge, "%s: ul_gpp_pa %x, ul_gpp_va %x, ul_dsp_va %x, "
++ "shm0_end %x, ul_dyn_ext_base %x, ul_ext_end %x, "
++ "ul_seg_size %x ul_seg1_size %x \n", __func__,
++ ul_gpp_pa, ul_gpp_va, ul_dsp_va, shm0_end,
++ ul_dyn_ext_base, ul_ext_end, ul_seg_size, ul_seg1_size);
++
++ if ((ul_seg_size + ul_seg1_size + ul_pad_size) >
++ host_res->dw_mem_length[1]) {
++ pr_err("%s: shm Error, reserved 0x%x required 0x%x\n",
++ __func__, host_res->dw_mem_length[1],
++ ul_seg_size + ul_seg1_size + ul_pad_size);
++ status = -ENOMEM;
++ }
++ }
++ if (status)
++ goto func_end;
++
++ pa_curr = ul_gpp_pa;
++ va_curr = ul_dyn_ext_base * hio_mgr->word_size;
++ gpp_va_curr = ul_gpp_va;
++ num_bytes = ul_seg1_size;
++
++ /*
++ * Try to fit into TLB entries. If not possible, push them to page
++ * tables. It is quite possible that if sections are not on
++ * bigger page boundary, we may end up making several small pages.
++ * So, push them onto page tables, if that is the case.
++ */
++ map_attrs = 0x00000000;
++ map_attrs = DSP_MAPLITTLEENDIAN;
++ map_attrs |= DSP_MAPPHYSICALADDR;
++ map_attrs |= DSP_MAPELEMSIZE32;
++ map_attrs |= DSP_MAPDONOTLOCK;
++
++ while (num_bytes) {
++ /*
++ * To find the max. page size with which both PA & VA are
++ * aligned.
++ */
++ all_bits = pa_curr | va_curr;
++ dev_dbg(bridge, "all_bits %x, pa_curr %x, va_curr %x, "
++ "num_bytes %x\n", all_bits, pa_curr, va_curr,
++ num_bytes);
++ for (i = 0; i < 4; i++) {
++ if ((num_bytes >= page_size[i]) && ((all_bits &
++ (page_size[i] -
++ 1)) == 0)) {
++ status =
++ hio_mgr->intf_fxns->
++ pfn_brd_mem_map(hio_mgr->hbridge_context,
++ pa_curr, va_curr,
++ page_size[i], map_attrs,
++ NULL);
++ if (status)
++ goto func_end;
++ pa_curr += page_size[i];
++ va_curr += page_size[i];
++ gpp_va_curr += page_size[i];
++ num_bytes -= page_size[i];
++ /*
++ * Don't try smaller sizes. Hopefully we have
++ * reached an address aligned to a bigger page
++ * size.
++ */
++ break;
++ }
++ }
++ }
++ pa_curr += ul_pad_size;
++ va_curr += ul_pad_size;
++ gpp_va_curr += ul_pad_size;
++
++ /* Configure the TLB entries for the next cacheable segment */
++ num_bytes = ul_seg_size;
++ va_curr = ul_dsp_va * hio_mgr->word_size;
++ while (num_bytes) {
++ /*
++ * To find the max. page size with which both PA & VA are
++ * aligned.
++ */
++ all_bits = pa_curr | va_curr;
++ dev_dbg(bridge, "all_bits for Seg1 %x, pa_curr %x, "
++ "va_curr %x, num_bytes %x\n", all_bits, pa_curr,
++ va_curr, num_bytes);
++ for (i = 0; i < 4; i++) {
++ if (!(num_bytes >= page_size[i]) ||
++ !((all_bits & (page_size[i] - 1)) == 0))
++ continue;
++ if (ndx < MAX_LOCK_TLB_ENTRIES) {
++ /*
++ * This is the physical address written to
++ * DSP MMU.
++ */
++ ae_proc[ndx].ul_gpp_pa = pa_curr;
++ /*
++ * This is the virtual uncached ioremapped
++ * address!!!
++ */
++ ae_proc[ndx].ul_gpp_va = gpp_va_curr;
++ ae_proc[ndx].ul_dsp_va =
++ va_curr / hio_mgr->word_size;
++ ae_proc[ndx].ul_size = page_size[i];
++ ae_proc[ndx].endianism = HW_LITTLE_ENDIAN;
++ ae_proc[ndx].elem_size = HW_ELEM_SIZE16BIT;
++ ae_proc[ndx].mixed_mode = HW_MMU_CPUES;
++ dev_dbg(bridge, "shm MMU TLB entry PA %x"
++ " VA %x DSP_VA %x Size %x\n",
++ ae_proc[ndx].ul_gpp_pa,
++ ae_proc[ndx].ul_gpp_va,
++ ae_proc[ndx].ul_dsp_va *
++ hio_mgr->word_size, page_size[i]);
++ ndx++;
++ } else {
++ status =
++ hio_mgr->intf_fxns->
++ pfn_brd_mem_map(hio_mgr->hbridge_context,
++ pa_curr, va_curr,
++ page_size[i], map_attrs,
++ NULL);
++ dev_dbg(bridge,
++ "shm MMU PTE entry PA %x"
++ " VA %x DSP_VA %x Size %x\n",
++ ae_proc[ndx].ul_gpp_pa,
++ ae_proc[ndx].ul_gpp_va,
++ ae_proc[ndx].ul_dsp_va *
++ hio_mgr->word_size, page_size[i]);
++ if (status)
++ goto func_end;
++ }
++ pa_curr += page_size[i];
++ va_curr += page_size[i];
++ gpp_va_curr += page_size[i];
++ num_bytes -= page_size[i];
++ /*
++ * Don't try smaller sizes. Hopefully we have reached
++ * an address aligned to a bigger page size.
++ */
++ break;
++ }
++ }
++
++ /*
++ * Copy remaining entries from CDB. All entries are 1 MB and
++ * should not conflict with shm entries on MPU or DSP side.
++ */
++ for (i = 3; i < 7 && ndx < BRDIOCTL_NUMOFMMUTLB; i++) {
++ if (hio_mgr->ext_proc_info.ty_tlb[i].ul_gpp_phys == 0)
++ continue;
++
++ if ((hio_mgr->ext_proc_info.ty_tlb[i].ul_gpp_phys >
++ ul_gpp_pa - 0x100000
++ && hio_mgr->ext_proc_info.ty_tlb[i].ul_gpp_phys <=
++ ul_gpp_pa + ul_seg_size)
++ || (hio_mgr->ext_proc_info.ty_tlb[i].ul_dsp_virt >
++ ul_dsp_va - 0x100000 / hio_mgr->word_size
++ && hio_mgr->ext_proc_info.ty_tlb[i].ul_dsp_virt <=
++ ul_dsp_va + ul_seg_size / hio_mgr->word_size)) {
++ dev_dbg(bridge,
++ "CDB MMU entry %d conflicts with "
++ "shm.\n\tCDB: GppPa %x, DspVa %x.\n\tSHM: "
++ "GppPa %x, DspVa %x, Bytes %x.\n", i,
++ hio_mgr->ext_proc_info.ty_tlb[i].ul_gpp_phys,
++ hio_mgr->ext_proc_info.ty_tlb[i].ul_dsp_virt,
++ ul_gpp_pa, ul_dsp_va, ul_seg_size);
++ status = -EPERM;
++ } else {
++ if (ndx < MAX_LOCK_TLB_ENTRIES) {
++ ae_proc[ndx].ul_dsp_va =
++ hio_mgr->ext_proc_info.ty_tlb[i].
++ ul_dsp_virt;
++ ae_proc[ndx].ul_gpp_pa =
++ hio_mgr->ext_proc_info.ty_tlb[i].
++ ul_gpp_phys;
++ ae_proc[ndx].ul_gpp_va = 0;
++ /* 1 MB */
++ ae_proc[ndx].ul_size = 0x100000;
++ dev_dbg(bridge, "shm MMU entry PA %x "
++ "DSP_VA 0x%x\n", ae_proc[ndx].ul_gpp_pa,
++ ae_proc[ndx].ul_dsp_va);
++ ndx++;
++ } else {
++ status = hio_mgr->intf_fxns->pfn_brd_mem_map
++ (hio_mgr->hbridge_context,
++ hio_mgr->ext_proc_info.ty_tlb[i].
++ ul_gpp_phys,
++ hio_mgr->ext_proc_info.ty_tlb[i].
++ ul_dsp_virt, 0x100000, map_attrs,
++ NULL);
++ }
++ }
++ if (status)
++ goto func_end;
++ }
++
++ map_attrs = 0x00000000;
++ map_attrs = DSP_MAPLITTLEENDIAN;
++ map_attrs |= DSP_MAPPHYSICALADDR;
++ map_attrs |= DSP_MAPELEMSIZE32;
++ map_attrs |= DSP_MAPDONOTLOCK;
++
++ /* Map the L4 peripherals */
++ i = 0;
++ while (l4_peripheral_table[i].phys_addr) {
++ status = hio_mgr->intf_fxns->pfn_brd_mem_map
++ (hio_mgr->hbridge_context, l4_peripheral_table[i].phys_addr,
++ l4_peripheral_table[i].dsp_virt_addr, HW_PAGE_SIZE4KB,
++ map_attrs, NULL);
++ if (status)
++ goto func_end;
++ i++;
++ }
++
++ for (i = ndx; i < BRDIOCTL_NUMOFMMUTLB; i++) {
++ ae_proc[i].ul_dsp_va = 0;
++ ae_proc[i].ul_gpp_pa = 0;
++ ae_proc[i].ul_gpp_va = 0;
++ ae_proc[i].ul_size = 0;
++ }
++ /*
++ * Set the shm physical address entry (grayed out in CDB file)
++ * to the virtual uncached ioremapped address of shm reserved
++ * on MPU.
++ */
++ hio_mgr->ext_proc_info.ty_tlb[0].ul_gpp_phys =
++ (ul_gpp_va + ul_seg1_size + ul_pad_size);
++
++ /*
++ * Need shm Phys addr. IO supports only one DSP for now:
++ * num_procs = 1.
++ */
++ if (!hio_mgr->ext_proc_info.ty_tlb[0].ul_gpp_phys || num_procs != 1) {
++ status = -EFAULT;
++ goto func_end;
++ } else {
++ if (ae_proc[0].ul_dsp_va > ul_shm_base) {
++ status = -EPERM;
++ goto func_end;
++ }
++ /* ul_shm_base may not be at ul_dsp_va address */
++ ul_shm_base_offset = (ul_shm_base - ae_proc[0].ul_dsp_va) *
++ hio_mgr->word_size;
++ /*
++ * bridge_dev_ctrl() will set dev context dsp-mmu info. In
++ * bridge_brd_start() the MMU will be re-programed with MMU
++ * DSPVa-GPPPa pair info while DSP is in a known
++ * (reset) state.
++ */
++
++ status =
++ hio_mgr->intf_fxns->pfn_dev_cntrl(hio_mgr->hbridge_context,
++ BRDIOCTL_SETMMUCONFIG,
++ ae_proc);
++ if (status)
++ goto func_end;
++ ul_shm_base = hio_mgr->ext_proc_info.ty_tlb[0].ul_gpp_phys;
++ ul_shm_base += ul_shm_base_offset;
++ ul_shm_base = (u32) MEM_LINEAR_ADDRESS((void *)ul_shm_base,
++ ul_mem_length);
++ if (ul_shm_base == 0) {
++ status = -EFAULT;
++ goto func_end;
++ }
++ /* Register SM */
++ status =
++ register_shm_segs(hio_mgr, cod_man, ae_proc[0].ul_gpp_pa);
++ }
++
++ hio_mgr->shared_mem = (struct shm *)ul_shm_base;
++ hio_mgr->input = (u8 *) hio_mgr->shared_mem + sizeof(struct shm);
++ hio_mgr->output = hio_mgr->input + (ul_shm_length -
++ sizeof(struct shm)) / 2;
++ hio_mgr->usm_buf_size = hio_mgr->output - hio_mgr->input;
++
++ /* Set up Shared memory addresses for messaging. */
++ hio_mgr->msg_input_ctrl = (struct msg_ctrl *)((u8 *) hio_mgr->shared_mem
++ + ul_shm_length);
++ hio_mgr->msg_input =
++ (u8 *) hio_mgr->msg_input_ctrl + sizeof(struct msg_ctrl);
++ hio_mgr->msg_output_ctrl =
++ (struct msg_ctrl *)((u8 *) hio_mgr->msg_input_ctrl +
++ ul_msg_length / 2);
++ hio_mgr->msg_output =
++ (u8 *) hio_mgr->msg_output_ctrl + sizeof(struct msg_ctrl);
++ hmsg_mgr->max_msgs =
++ ((u8 *) hio_mgr->msg_output_ctrl - hio_mgr->msg_input)
++ / sizeof(struct msg_dspmsg);
++ dev_dbg(bridge, "IO MGR shm details: shared_mem %p, input %p, "
++ "output %p, msg_input_ctrl %p, msg_input %p, "
++ "msg_output_ctrl %p, msg_output %p\n",
++ (u8 *) hio_mgr->shared_mem, hio_mgr->input,
++ hio_mgr->output, (u8 *) hio_mgr->msg_input_ctrl,
++ hio_mgr->msg_input, (u8 *) hio_mgr->msg_output_ctrl,
++ hio_mgr->msg_output);
++ dev_dbg(bridge, "(proc) Mas msgs in shared memory: 0x%x\n",
++ hmsg_mgr->max_msgs);
++ memset((void *)hio_mgr->shared_mem, 0, sizeof(struct shm));
++
++#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
++ /* Get the start address of trace buffer */
++ status = cod_get_sym_value(cod_man, SYS_PUTCBEG,
++ &hio_mgr->ul_trace_buffer_begin);
++ if (status) {
++ status = -EFAULT;
++ goto func_end;
++ }
++
++ hio_mgr->ul_gpp_read_pointer = hio_mgr->ul_trace_buffer_begin =
++ (ul_gpp_va + ul_seg1_size + ul_pad_size) +
++ (hio_mgr->ul_trace_buffer_begin - ul_dsp_va);
++ /* Get the end address of trace buffer */
++ status = cod_get_sym_value(cod_man, SYS_PUTCEND,
++ &hio_mgr->ul_trace_buffer_end);
++ if (status) {
++ status = -EFAULT;
++ goto func_end;
++ }
++ hio_mgr->ul_trace_buffer_end =
++ (ul_gpp_va + ul_seg1_size + ul_pad_size) +
++ (hio_mgr->ul_trace_buffer_end - ul_dsp_va);
++ /* Get the current address of DSP write pointer */
++ status = cod_get_sym_value(cod_man, BRIDGE_SYS_PUTC_CURRENT,
++ &hio_mgr->ul_trace_buffer_current);
++ if (status) {
++ status = -EFAULT;
++ goto func_end;
++ }
++ hio_mgr->ul_trace_buffer_current =
++ (ul_gpp_va + ul_seg1_size + ul_pad_size) +
++ (hio_mgr->ul_trace_buffer_current - ul_dsp_va);
++ /* Calculate the size of trace buffer */
++ kfree(hio_mgr->pmsg);
++ hio_mgr->pmsg = kmalloc(((hio_mgr->ul_trace_buffer_end -
++ hio_mgr->ul_trace_buffer_begin) *
++ hio_mgr->word_size) + 2, GFP_KERNEL);
++ if (!hio_mgr->pmsg)
++ status = -ENOMEM;
++
++ hio_mgr->ul_dsp_va = ul_dsp_va;
++ hio_mgr->ul_gpp_va = (ul_gpp_va + ul_seg1_size + ul_pad_size);
++
++#endif
++func_end:
++ return status;
++}
++
++/*
++ * ======== io_buf_size ========
++ * Size of shared memory I/O channel.
++ */
++u32 io_buf_size(struct io_mgr *hio_mgr)
++{
++ if (hio_mgr)
++ return hio_mgr->usm_buf_size;
++ else
++ return 0;
++}
++
++/*
++ * ======== io_cancel_chnl ========
++ * Cancel IO on a given PCPY channel.
++ */
++void io_cancel_chnl(struct io_mgr *hio_mgr, u32 chnl)
++{
++ struct io_mgr *pio_mgr = (struct io_mgr *)hio_mgr;
++ struct shm *sm;
++
++ if (!hio_mgr)
++ goto func_end;
++ sm = hio_mgr->shared_mem;
++
++ /* Inform DSP that we have no more buffers on this channel */
++ set_chnl_free(sm, chnl);
++
++ sm_interrupt_dsp(pio_mgr->hbridge_context, MBX_PCPY_CLASS);
++func_end:
++ return;
++}
++
++
++/*
++ * ======== io_dispatch_pm ========
++ * Performs I/O dispatch on PM related messages from DSP
++ */
++static void io_dispatch_pm(struct io_mgr *pio_mgr)
++{
++ int status;
++ u32 parg[2];
++
++ /* Perform Power message processing here */
++ parg[0] = pio_mgr->intr_val;
++
++ /* Send the command to the Bridge clk/pwr manager to handle */
++ if (parg[0] == MBX_PM_HIBERNATE_EN) {
++ dev_dbg(bridge, "PM: Hibernate command\n");
++ status = pio_mgr->intf_fxns->
++ pfn_dev_cntrl(pio_mgr->hbridge_context,
++ BRDIOCTL_PWR_HIBERNATE, parg);
++ if (status)
++ pr_err("%s: hibernate cmd failed 0x%x\n",
++ __func__, status);
++ } else if (parg[0] == MBX_PM_OPP_REQ) {
++ parg[1] = pio_mgr->shared_mem->opp_request.rqst_opp_pt;
++ dev_dbg(bridge, "PM: Requested OPP = 0x%x\n", parg[1]);
++ status = pio_mgr->intf_fxns->
++ pfn_dev_cntrl(pio_mgr->hbridge_context,
++ BRDIOCTL_CONSTRAINT_REQUEST, parg);
++ if (status)
++ dev_dbg(bridge, "PM: Failed to set constraint "
++ "= 0x%x\n", parg[1]);
++ } else {
++ dev_dbg(bridge, "PM: clk control value of msg = 0x%x\n",
++ parg[0]);
++ status = pio_mgr->intf_fxns->
++ pfn_dev_cntrl(pio_mgr->hbridge_context,
++ BRDIOCTL_CLK_CTRL, parg);
++ if (status)
++ dev_dbg(bridge, "PM: Failed to ctrl the DSP clk"
++ "= 0x%x\n", *parg);
++ }
++}
++
++/*
++ * ======== io_dpc ========
++ * Deferred procedure call for shared memory channel driver ISR. Carries
++ * out the dispatch of I/O as a non-preemptible event.It can only be
++ * pre-empted by an ISR.
++ */
++void io_dpc(unsigned long ref_data)
++{
++ struct io_mgr *pio_mgr = (struct io_mgr *)ref_data;
++ struct chnl_mgr *chnl_mgr_obj;
++ struct msg_mgr *msg_mgr_obj;
++ struct deh_mgr *hdeh_mgr;
++ u32 requested;
++ u32 serviced;
++
++ if (!pio_mgr)
++ goto func_end;
++ chnl_mgr_obj = pio_mgr->hchnl_mgr;
++ dev_get_msg_mgr(pio_mgr->hdev_obj, &msg_mgr_obj);
++ dev_get_deh_mgr(pio_mgr->hdev_obj, &hdeh_mgr);
++ if (!chnl_mgr_obj)
++ goto func_end;
++
++ requested = pio_mgr->dpc_req;
++ serviced = pio_mgr->dpc_sched;
++
++ if (serviced == requested)
++ goto func_end;
++
++ /* Process pending DPC's */
++ do {
++ /* Check value of interrupt reg to ensure it's a valid error */
++ if ((pio_mgr->intr_val > DEH_BASE) &&
++ (pio_mgr->intr_val < DEH_LIMIT)) {
++ /* Notify DSP/BIOS exception */
++ if (hdeh_mgr) {
++#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
++ print_dsp_debug_trace(pio_mgr);
++#endif
++ bridge_deh_notify(hdeh_mgr, DSP_SYSERROR,
++ pio_mgr->intr_val);
++ }
++ }
++ /* Proc-copy chanel dispatch */
++ input_chnl(pio_mgr, NULL, IO_SERVICE);
++ output_chnl(pio_mgr, NULL, IO_SERVICE);
++
++#ifdef CHNL_MESSAGES
++ if (msg_mgr_obj) {
++ /* Perform I/O dispatch on message queues */
++ input_msg(pio_mgr, msg_mgr_obj);
++ output_msg(pio_mgr, msg_mgr_obj);
++ }
++
++#endif
++#ifdef CONFIG_TIDSPBRIDGE_DEBUG
++ if (pio_mgr->intr_val & MBX_DBG_SYSPRINTF) {
++ /* Notify DSP Trace message */
++ print_dsp_debug_trace(pio_mgr);
++ }
++#endif
++ serviced++;
++ } while (serviced != requested);
++ pio_mgr->dpc_sched = requested;
++func_end:
++ return;
++}
++
++/*
++ * ======== io_mbox_msg ========
++ * Main interrupt handler for the shared memory IO manager.
++ * Calls the Bridge's CHNL_ISR to determine if this interrupt is ours, then
++ * schedules a DPC to dispatch I/O.
++ */
++void io_mbox_msg(u32 msg)
++{
++ struct io_mgr *pio_mgr;
++ struct dev_object *dev_obj;
++ unsigned long flags;
++
++ dev_obj = dev_get_first();
++ dev_get_io_mgr(dev_obj, &pio_mgr);
++
++ if (!pio_mgr)
++ return;
++
++ pio_mgr->intr_val = (u16)msg;
++ if (pio_mgr->intr_val & MBX_PM_CLASS)
++ io_dispatch_pm(pio_mgr);
++
++ if (pio_mgr->intr_val == MBX_DEH_RESET) {
++ pio_mgr->intr_val = 0;
++ } else {
++ spin_lock_irqsave(&pio_mgr->dpc_lock, flags);
++ pio_mgr->dpc_req++;
++ spin_unlock_irqrestore(&pio_mgr->dpc_lock, flags);
++ tasklet_schedule(&pio_mgr->dpc_tasklet);
++ }
++ return;
++}
++
++/*
++ * ======== io_request_chnl ========
++ * Purpose:
++ * Request chanenel I/O from the DSP. Sets flags in shared memory, then
++ * interrupts the DSP.
++ */
++void io_request_chnl(struct io_mgr *io_manager, struct chnl_object *pchnl,
++ u8 io_mode, u16 *mbx_val)
++{
++ struct chnl_mgr *chnl_mgr_obj;
++ struct shm *sm;
++
++ if (!pchnl || !mbx_val)
++ goto func_end;
++ chnl_mgr_obj = io_manager->hchnl_mgr;
++ sm = io_manager->shared_mem;
++ if (io_mode == IO_INPUT) {
++ /*
++ * Assertion fires if CHNL_AddIOReq() called on a stream
++ * which was cancelled, or attached to a dead board.
++ */
++ DBC_ASSERT((pchnl->dw_state == CHNL_STATEREADY) ||
++ (pchnl->dw_state == CHNL_STATEEOS));
++ /* Indicate to the DSP we have a buffer available for input */
++ set_chnl_busy(sm, pchnl->chnl_id);
++ *mbx_val = MBX_PCPY_CLASS;
++ } else if (io_mode == IO_OUTPUT) {
++ /*
++ * This assertion fails if CHNL_AddIOReq() was called on a
++ * stream which was cancelled, or attached to a dead board.
++ */
++ DBC_ASSERT((pchnl->dw_state & ~CHNL_STATEEOS) ==
++ CHNL_STATEREADY);
++ /*
++ * Record the fact that we have a buffer available for
++ * output.
++ */
++ chnl_mgr_obj->dw_output_mask |= (1 << pchnl->chnl_id);
++ } else {
++ DBC_ASSERT(io_mode); /* Shouldn't get here. */
++ }
++func_end:
++ return;
++}
++
++/*
++ * ======== iosm_schedule ========
++ * Schedule DPC for IO.
++ */
++void iosm_schedule(struct io_mgr *io_manager)
++{
++ unsigned long flags;
++
++ if (!io_manager)
++ return;
++
++ /* Increment count of DPC's pending. */
++ spin_lock_irqsave(&io_manager->dpc_lock, flags);
++ io_manager->dpc_req++;
++ spin_unlock_irqrestore(&io_manager->dpc_lock, flags);
++
++ /* Schedule DPC */
++ tasklet_schedule(&io_manager->dpc_tasklet);
++}
++
++/*
++ * ======== find_ready_output ========
++ * Search for a host output channel which is ready to send. If this is
++ * called as a result of servicing the DPC, then implement a round
++ * robin search; otherwise, this was called by a client thread (via
++ * IO_Dispatch()), so just start searching from the current channel id.
++ */
++static u32 find_ready_output(struct chnl_mgr *chnl_mgr_obj,
++ struct chnl_object *pchnl, u32 mask)
++{
++ u32 ret = OUTPUTNOTREADY;
++ u32 id, start_id;
++ u32 shift;
++
++ id = (pchnl !=
++ NULL ? pchnl->chnl_id : (chnl_mgr_obj->dw_last_output + 1));
++ id = ((id == CHNL_MAXCHANNELS) ? 0 : id);
++ if (id >= CHNL_MAXCHANNELS)
++ goto func_end;
++ if (mask) {
++ shift = (1 << id);
++ start_id = id;
++ do {
++ if (mask & shift) {
++ ret = id;
++ if (pchnl == NULL)
++ chnl_mgr_obj->dw_last_output = id;
++ break;
++ }
++ id = id + 1;
++ id = ((id == CHNL_MAXCHANNELS) ? 0 : id);
++ shift = (1 << id);
++ } while (id != start_id);
++ }
++func_end:
++ return ret;
++}
++
++/*
++ * ======== input_chnl ========
++ * Dispatch a buffer on an input channel.
++ */
++static void input_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
++ u8 io_mode)
++{
++ struct chnl_mgr *chnl_mgr_obj;
++ struct shm *sm;
++ u32 chnl_id;
++ u32 bytes;
++ struct chnl_irp *chnl_packet_obj = NULL;
++ u32 dw_arg;
++ bool clear_chnl = false;
++ bool notify_client = false;
++
++ sm = pio_mgr->shared_mem;
++ chnl_mgr_obj = pio_mgr->hchnl_mgr;
++
++ /* Attempt to perform input */
++ if (!sm->input_full)
++ goto func_end;
++
++ bytes = sm->input_size * chnl_mgr_obj->word_size;
++ chnl_id = sm->input_id;
++ dw_arg = sm->arg;
++ if (chnl_id >= CHNL_MAXCHANNELS) {
++ /* Shouldn't be here: would indicate corrupted shm. */
++ DBC_ASSERT(chnl_id);
++ goto func_end;
++ }
++ pchnl = chnl_mgr_obj->ap_channel[chnl_id];
++ if ((pchnl != NULL) && CHNL_IS_INPUT(pchnl->chnl_mode)) {
++ if ((pchnl->dw_state & ~CHNL_STATEEOS) == CHNL_STATEREADY) {
++ if (!pchnl->pio_requests)
++ goto func_end;
++ /* Get the I/O request, and attempt a transfer */
++ chnl_packet_obj = (struct chnl_irp *)
++ lst_get_head(pchnl->pio_requests);
++ if (chnl_packet_obj) {
++ pchnl->cio_reqs--;
++ if (pchnl->cio_reqs < 0)
++ goto func_end;
++ /*
++ * Ensure we don't overflow the client's
++ * buffer.
++ */
++ bytes = min(bytes, chnl_packet_obj->byte_size);
++ memcpy(chnl_packet_obj->host_sys_buf,
++ pio_mgr->input, bytes);
++ pchnl->bytes_moved += bytes;
++ chnl_packet_obj->byte_size = bytes;
++ chnl_packet_obj->dw_arg = dw_arg;
++ chnl_packet_obj->status = CHNL_IOCSTATCOMPLETE;
++
++ if (bytes == 0) {
++ /*
++ * This assertion fails if the DSP
++ * sends EOS more than once on this
++ * channel.
++ */
++ if (pchnl->dw_state & CHNL_STATEEOS)
++ goto func_end;
++ /*
++ * Zero bytes indicates EOS. Update
++ * IOC status for this chirp, and also
++ * the channel state.
++ */
++ chnl_packet_obj->status |=
++ CHNL_IOCSTATEOS;
++ pchnl->dw_state |= CHNL_STATEEOS;
++ /*
++ * Notify that end of stream has
++ * occurred.
++ */
++ ntfy_notify(pchnl->ntfy_obj,
++ DSP_STREAMDONE);
++ }
++ /* Tell DSP if no more I/O buffers available */
++ if (!pchnl->pio_requests)
++ goto func_end;
++ if (LST_IS_EMPTY(pchnl->pio_requests)) {
++ set_chnl_free(sm, pchnl->chnl_id);
++ }
++ clear_chnl = true;
++ notify_client = true;
++ } else {
++ /*
++ * Input full for this channel, but we have no
++ * buffers available. The channel must be
++ * "idling". Clear out the physical input
++ * channel.
++ */
++ clear_chnl = true;
++ }
++ } else {
++ /* Input channel cancelled: clear input channel */
++ clear_chnl = true;
++ }
++ } else {
++ /* DPC fired after host closed channel: clear input channel */
++ clear_chnl = true;
++ }
++ if (clear_chnl) {
++ /* Indicate to the DSP we have read the input */
++ sm->input_full = 0;
++ sm_interrupt_dsp(pio_mgr->hbridge_context, MBX_PCPY_CLASS);
++ }
++ if (notify_client) {
++ /* Notify client with IO completion record */
++ notify_chnl_complete(pchnl, chnl_packet_obj);
++ }
++func_end:
++ return;
++}
++
++/*
++ * ======== input_msg ========
++ * Copies messages from shared memory to the message queues.
++ */
++static void input_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr)
++{
++ u32 num_msgs;
++ u32 i;
++ u8 *msg_input;
++ struct msg_queue *msg_queue_obj;
++ struct msg_frame *pmsg;
++ struct msg_dspmsg msg;
++ struct msg_ctrl *msg_ctr_obj;
++ u32 input_empty;
++ u32 addr;
++
++ msg_ctr_obj = pio_mgr->msg_input_ctrl;
++ /* Get the number of input messages to be read */
++ input_empty = msg_ctr_obj->buf_empty;
++ num_msgs = msg_ctr_obj->size;
++ if (input_empty)
++ goto func_end;
++
++ msg_input = pio_mgr->msg_input;
++ for (i = 0; i < num_msgs; i++) {
++ /* Read the next message */
++ addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.dw_cmd);
++ msg.msg.dw_cmd =
++ read_ext32_bit_dsp_data(pio_mgr->hbridge_context, addr);
++ addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.dw_arg1);
++ msg.msg.dw_arg1 =
++ read_ext32_bit_dsp_data(pio_mgr->hbridge_context, addr);
++ addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.dw_arg2);
++ msg.msg.dw_arg2 =
++ read_ext32_bit_dsp_data(pio_mgr->hbridge_context, addr);
++ addr = (u32) &(((struct msg_dspmsg *)msg_input)->msgq_id);
++ msg.msgq_id =
++ read_ext32_bit_dsp_data(pio_mgr->hbridge_context, addr);
++ msg_input += sizeof(struct msg_dspmsg);
++ if (!hmsg_mgr->queue_list)
++ goto func_end;
++
++ /* Determine which queue to put the message in */
++ msg_queue_obj =
++ (struct msg_queue *)lst_first(hmsg_mgr->queue_list);
++ dev_dbg(bridge, "input msg: dw_cmd=0x%x dw_arg1=0x%x "
++ "dw_arg2=0x%x msgq_id=0x%x \n", msg.msg.dw_cmd,
++ msg.msg.dw_arg1, msg.msg.dw_arg2, msg.msgq_id);
++ /*
++ * Interrupt may occur before shared memory and message
++ * input locations have been set up. If all nodes were
++ * cleaned up, hmsg_mgr->max_msgs should be 0.
++ */
++ while (msg_queue_obj != NULL) {
++ if (msg.msgq_id == msg_queue_obj->msgq_id) {
++ /* Found it */
++ if (msg.msg.dw_cmd == RMS_EXITACK) {
++ /*
++ * Call the node exit notification.
++ * The exit message does not get
++ * queued.
++ */
++ (*hmsg_mgr->on_exit) ((void *)
++ msg_queue_obj->arg,
++ msg.msg.dw_arg1);
++ } else {
++ /*
++ * Not an exit acknowledgement, queue
++ * the message.
++ */
++ if (!msg_queue_obj->msg_free_list)
++ goto func_end;
++ pmsg = (struct msg_frame *)lst_get_head
++ (msg_queue_obj->msg_free_list);
++ if (msg_queue_obj->msg_used_list
++ && pmsg) {
++ pmsg->msg_data = msg;
++ lst_put_tail
++ (msg_queue_obj->msg_used_list,
++ (struct list_head *)pmsg);
++ ntfy_notify
++ (msg_queue_obj->ntfy_obj,
++ DSP_NODEMESSAGEREADY);
++ sync_set_event
++ (msg_queue_obj->sync_event);
++ } else {
++ /*
++ * No free frame to copy the
++ * message into.
++ */
++ pr_err("%s: no free msg frames,"
++ " discarding msg\n",
++ __func__);
++ }
++ }
++ break;
++ }
++
++ if (!hmsg_mgr->queue_list || !msg_queue_obj)
++ goto func_end;
++ msg_queue_obj =
++ (struct msg_queue *)lst_next(hmsg_mgr->queue_list,
++ (struct list_head *)
++ msg_queue_obj);
++ }
++ }
++ /* Set the post SWI flag */
++ if (num_msgs > 0) {
++ /* Tell the DSP we've read the messages */
++ msg_ctr_obj->buf_empty = true;
++ msg_ctr_obj->post_swi = true;
++ sm_interrupt_dsp(pio_mgr->hbridge_context, MBX_PCPY_CLASS);
++ }
++func_end:
++ return;
++}
++
++/*
++ * ======== notify_chnl_complete ========
++ * Purpose:
++ * Signal the channel event, notifying the client that I/O has completed.
++ */
++static void notify_chnl_complete(struct chnl_object *pchnl,
++ struct chnl_irp *chnl_packet_obj)
++{
++ bool signal_event;
++
++ if (!pchnl || !pchnl->sync_event ||
++ !pchnl->pio_completions || !chnl_packet_obj)
++ goto func_end;
++
++ /*
++ * Note: we signal the channel event only if the queue of IO
++ * completions is empty. If it is not empty, the event is sure to be
++ * signalled by the only IO completion list consumer:
++ * bridge_chnl_get_ioc().
++ */
++ signal_event = LST_IS_EMPTY(pchnl->pio_completions);
++ /* Enqueue the IO completion info for the client */
++ lst_put_tail(pchnl->pio_completions,
++ (struct list_head *)chnl_packet_obj);
++ pchnl->cio_cs++;
++
++ if (pchnl->cio_cs > pchnl->chnl_packets)
++ goto func_end;
++ /* Signal the channel event (if not already set) that IO is complete */
++ if (signal_event)
++ sync_set_event(pchnl->sync_event);
++
++ /* Notify that IO is complete */
++ ntfy_notify(pchnl->ntfy_obj, DSP_STREAMIOCOMPLETION);
++func_end:
++ return;
++}
++
++/*
++ * ======== output_chnl ========
++ * Purpose:
++ * Dispatch a buffer on an output channel.
++ */
++static void output_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
++ u8 io_mode)
++{
++ struct chnl_mgr *chnl_mgr_obj;
++ struct shm *sm;
++ u32 chnl_id;
++ struct chnl_irp *chnl_packet_obj;
++ u32 dw_dsp_f_mask;
++
++ chnl_mgr_obj = pio_mgr->hchnl_mgr;
++ sm = pio_mgr->shared_mem;
++ /* Attempt to perform output */
++ if (sm->output_full)
++ goto func_end;
++
++ if (pchnl && !((pchnl->dw_state & ~CHNL_STATEEOS) == CHNL_STATEREADY))
++ goto func_end;
++
++ /* Look to see if both a PC and DSP output channel are ready */
++ dw_dsp_f_mask = sm->dsp_free_mask;
++ chnl_id =
++ find_ready_output(chnl_mgr_obj, pchnl,
++ (chnl_mgr_obj->dw_output_mask & dw_dsp_f_mask));
++ if (chnl_id == OUTPUTNOTREADY)
++ goto func_end;
++
++ pchnl = chnl_mgr_obj->ap_channel[chnl_id];
++ if (!pchnl || !pchnl->pio_requests) {
++ /* Shouldn't get here */
++ goto func_end;
++ }
++ /* Get the I/O request, and attempt a transfer */
++ chnl_packet_obj = (struct chnl_irp *)lst_get_head(pchnl->pio_requests);
++ if (!chnl_packet_obj)
++ goto func_end;
++
++ pchnl->cio_reqs--;
++ if (pchnl->cio_reqs < 0 || !pchnl->pio_requests)
++ goto func_end;
++
++ /* Record fact that no more I/O buffers available */
++ if (LST_IS_EMPTY(pchnl->pio_requests))
++ chnl_mgr_obj->dw_output_mask &= ~(1 << chnl_id);
++
++ /* Transfer buffer to DSP side */
++ chnl_packet_obj->byte_size = min(pio_mgr->usm_buf_size,
++ chnl_packet_obj->byte_size);
++ memcpy(pio_mgr->output, chnl_packet_obj->host_sys_buf,
++ chnl_packet_obj->byte_size);
++ pchnl->bytes_moved += chnl_packet_obj->byte_size;
++ /* Write all 32 bits of arg */
++ sm->arg = chnl_packet_obj->dw_arg;
++#if _CHNL_WORDSIZE == 2
++ /* Access can be different SM access word size (e.g. 16/32 bit words) */
++ sm->output_id = (u16) chnl_id;
++ sm->output_size = (u16) (chnl_packet_obj->byte_size +
++ chnl_mgr_obj->word_size - 1) /
++ (u16) chnl_mgr_obj->word_size;
++#else
++ sm->output_id = chnl_id;
++ sm->output_size = (chnl_packet_obj->byte_size +
++ chnl_mgr_obj->word_size - 1) / chnl_mgr_obj->word_size;
++#endif
++ sm->output_full = 1;
++ /* Indicate to the DSP we have written the output */
++ sm_interrupt_dsp(pio_mgr->hbridge_context, MBX_PCPY_CLASS);
++ /* Notify client with IO completion record (keep EOS) */
++ chnl_packet_obj->status &= CHNL_IOCSTATEOS;
++ notify_chnl_complete(pchnl, chnl_packet_obj);
++ /* Notify if stream is done. */
++ if (chnl_packet_obj->status & CHNL_IOCSTATEOS)
++ ntfy_notify(pchnl->ntfy_obj, DSP_STREAMDONE);
++
++func_end:
++ return;
++}
++
++/*
++ * ======== output_msg ========
++ * Copies messages from the message queues to the shared memory.
++ */
++static void output_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr)
++{
++ u32 num_msgs = 0;
++ u32 i;
++ u8 *msg_output;
++ struct msg_frame *pmsg;
++ struct msg_ctrl *msg_ctr_obj;
++ u32 output_empty;
++ u32 val;
++ u32 addr;
++
++ msg_ctr_obj = pio_mgr->msg_output_ctrl;
++
++ /* Check if output has been cleared */
++ output_empty = msg_ctr_obj->buf_empty;
++ if (output_empty) {
++ num_msgs = (hmsg_mgr->msgs_pending > hmsg_mgr->max_msgs) ?
++ hmsg_mgr->max_msgs : hmsg_mgr->msgs_pending;
++ msg_output = pio_mgr->msg_output;
++ /* Copy num_msgs messages into shared memory */
++ for (i = 0; i < num_msgs; i++) {
++ if (!hmsg_mgr->msg_used_list) {
++ pmsg = NULL;
++ goto func_end;
++ } else {
++ pmsg = (struct msg_frame *)
++ lst_get_head(hmsg_mgr->msg_used_list);
++ }
++ if (pmsg != NULL) {
++ val = (pmsg->msg_data).msgq_id;
++ addr = (u32) &(((struct msg_dspmsg *)
++ msg_output)->msgq_id);
++ write_ext32_bit_dsp_data(
++ pio_mgr->hbridge_context, addr, val);
++ val = (pmsg->msg_data).msg.dw_cmd;
++ addr = (u32) &((((struct msg_dspmsg *)
++ msg_output)->msg).dw_cmd);
++ write_ext32_bit_dsp_data(
++ pio_mgr->hbridge_context, addr, val);
++ val = (pmsg->msg_data).msg.dw_arg1;
++ addr = (u32) &((((struct msg_dspmsg *)
++ msg_output)->msg).dw_arg1);
++ write_ext32_bit_dsp_data(
++ pio_mgr->hbridge_context, addr, val);
++ val = (pmsg->msg_data).msg.dw_arg2;
++ addr = (u32) &((((struct msg_dspmsg *)
++ msg_output)->msg).dw_arg2);
++ write_ext32_bit_dsp_data(
++ pio_mgr->hbridge_context, addr, val);
++ msg_output += sizeof(struct msg_dspmsg);
++ if (!hmsg_mgr->msg_free_list)
++ goto func_end;
++ lst_put_tail(hmsg_mgr->msg_free_list,
++ (struct list_head *)pmsg);
++ sync_set_event(hmsg_mgr->sync_event);
++ }
++ }
++
++ if (num_msgs > 0) {
++ hmsg_mgr->msgs_pending -= num_msgs;
++#if _CHNL_WORDSIZE == 2
++ /*
++ * Access can be different SM access word size
++ * (e.g. 16/32 bit words)
++ */
++ msg_ctr_obj->size = (u16) num_msgs;
++#else
++ msg_ctr_obj->size = num_msgs;
++#endif
++ msg_ctr_obj->buf_empty = false;
++ /* Set the post SWI flag */
++ msg_ctr_obj->post_swi = true;
++ /* Tell the DSP we have written the output. */
++ sm_interrupt_dsp(pio_mgr->hbridge_context,
++ MBX_PCPY_CLASS);
++ }
++ }
++func_end:
++ return;
++}
++
++/*
++ * ======== register_shm_segs ========
++ * purpose:
++ * Registers GPP SM segment with CMM.
++ */
++static int register_shm_segs(struct io_mgr *hio_mgr,
++ struct cod_manager *cod_man,
++ u32 dw_gpp_base_pa)
++{
++ int status = 0;
++ u32 ul_shm0_base = 0;
++ u32 shm0_end = 0;
++ u32 ul_shm0_rsrvd_start = 0;
++ u32 ul_rsrvd_size = 0;
++ u32 ul_gpp_phys;
++ u32 ul_dsp_virt;
++ u32 ul_shm_seg_id0 = 0;
++ u32 dw_offset, dw_gpp_base_va, ul_dsp_size;
++
++ /*
++ * Read address and size info for first SM region.
++ * Get start of 1st SM Heap region.
++ */
++ status =
++ cod_get_sym_value(cod_man, SHM0_SHARED_BASE_SYM, &ul_shm0_base);
++ if (ul_shm0_base == 0) {
++ status = -EPERM;
++ goto func_end;
++ }
++ /* Get end of 1st SM Heap region */
++ if (!status) {
++ /* Get start and length of message part of shared memory */
++ status = cod_get_sym_value(cod_man, SHM0_SHARED_END_SYM,
++ &shm0_end);
++ if (shm0_end == 0) {
++ status = -EPERM;
++ goto func_end;
++ }
++ }
++ /* Start of Gpp reserved region */
++ if (!status) {
++ /* Get start and length of message part of shared memory */
++ status =
++ cod_get_sym_value(cod_man, SHM0_SHARED_RESERVED_BASE_SYM,
++ &ul_shm0_rsrvd_start);
++ if (ul_shm0_rsrvd_start == 0) {
++ status = -EPERM;
++ goto func_end;
++ }
++ }
++ /* Register with CMM */
++ if (!status) {
++ status = dev_get_cmm_mgr(hio_mgr->hdev_obj, &hio_mgr->hcmm_mgr);
++ if (!status) {
++ status = cmm_un_register_gppsm_seg(hio_mgr->hcmm_mgr,
++ CMM_ALLSEGMENTS);
++ }
++ }
++ /* Register new SM region(s) */
++ if (!status && (shm0_end - ul_shm0_base) > 0) {
++ /* Calc size (bytes) of SM the GPP can alloc from */
++ ul_rsrvd_size =
++ (shm0_end - ul_shm0_rsrvd_start + 1) * hio_mgr->word_size;
++ if (ul_rsrvd_size <= 0) {
++ status = -EPERM;
++ goto func_end;
++ }
++ /* Calc size of SM DSP can alloc from */
++ ul_dsp_size =
++ (ul_shm0_rsrvd_start - ul_shm0_base) * hio_mgr->word_size;
++ if (ul_dsp_size <= 0) {
++ status = -EPERM;
++ goto func_end;
++ }
++ /* First TLB entry reserved for Bridge SM use. */
++ ul_gpp_phys = hio_mgr->ext_proc_info.ty_tlb[0].ul_gpp_phys;
++ /* Get size in bytes */
++ ul_dsp_virt =
++ hio_mgr->ext_proc_info.ty_tlb[0].ul_dsp_virt *
++ hio_mgr->word_size;
++ /*
++ * Calc byte offset used to convert GPP phys <-> DSP byte
++ * address.
++ */
++ if (dw_gpp_base_pa > ul_dsp_virt)
++ dw_offset = dw_gpp_base_pa - ul_dsp_virt;
++ else
++ dw_offset = ul_dsp_virt - dw_gpp_base_pa;
++
++ if (ul_shm0_rsrvd_start * hio_mgr->word_size < ul_dsp_virt) {
++ status = -EPERM;
++ goto func_end;
++ }
++ /*
++ * Calc Gpp phys base of SM region.
++ * This is actually uncached kernel virtual address.
++ */
++ dw_gpp_base_va =
++ ul_gpp_phys + ul_shm0_rsrvd_start * hio_mgr->word_size -
++ ul_dsp_virt;
++ /*
++ * Calc Gpp phys base of SM region.
++ * This is the physical address.
++ */
++ dw_gpp_base_pa =
++ dw_gpp_base_pa + ul_shm0_rsrvd_start * hio_mgr->word_size -
++ ul_dsp_virt;
++ /* Register SM Segment 0. */
++ status =
++ cmm_register_gppsm_seg(hio_mgr->hcmm_mgr, dw_gpp_base_pa,
++ ul_rsrvd_size, dw_offset,
++ (dw_gpp_base_pa >
++ ul_dsp_virt) ? CMM_ADDTODSPPA :
++ CMM_SUBFROMDSPPA,
++ (u32) (ul_shm0_base *
++ hio_mgr->word_size),
++ ul_dsp_size, &ul_shm_seg_id0,
++ dw_gpp_base_va);
++ /* First SM region is seg_id = 1 */
++ if (ul_shm_seg_id0 != 1)
++ status = -EPERM;
++ }
++func_end:
++ return status;
++}
++
++/* ZCPY IO routines. */
++/*
++ * ======== IO_SHMcontrol ========
++ * Sets the requested shm setting.
++ */
++int io_sh_msetting(struct io_mgr *hio_mgr, u8 desc, void *pargs)
++{
++#ifdef CONFIG_TIDSPBRIDGE_DVFS
++ u32 i;
++ struct dspbridge_platform_data *pdata =
++ omap_dspbridge_dev->dev.platform_data;
++
++ switch (desc) {
++ case SHM_CURROPP:
++ /* Update the shared memory with requested OPP information */
++ if (pargs != NULL)
++ hio_mgr->shared_mem->opp_table_struct.curr_opp_pt =
++ *(u32 *) pargs;
++ else
++ return -EPERM;
++ break;
++ case SHM_OPPINFO:
++ /*
++ * Update the shared memory with the voltage, frequency,
++ * min and max frequency values for an OPP.
++ */
++ for (i = 0; i <= dsp_max_opps; i++) {
++ hio_mgr->shared_mem->opp_table_struct.opp_point[i].
++ voltage = vdd1_dsp_freq[i][0];
++ dev_dbg(bridge, "OPP-shm: voltage: %d\n",
++ vdd1_dsp_freq[i][0]);
++ hio_mgr->shared_mem->opp_table_struct.
++ opp_point[i].frequency = vdd1_dsp_freq[i][1];
++ dev_dbg(bridge, "OPP-shm: frequency: %d\n",
++ vdd1_dsp_freq[i][1]);
++ hio_mgr->shared_mem->opp_table_struct.opp_point[i].
++ min_freq = vdd1_dsp_freq[i][2];
++ dev_dbg(bridge, "OPP-shm: min freq: %d\n",
++ vdd1_dsp_freq[i][2]);
++ hio_mgr->shared_mem->opp_table_struct.opp_point[i].
++ max_freq = vdd1_dsp_freq[i][3];
++ dev_dbg(bridge, "OPP-shm: max freq: %d\n",
++ vdd1_dsp_freq[i][3]);
++ }
++ hio_mgr->shared_mem->opp_table_struct.num_opp_pts =
++ dsp_max_opps;
++ dev_dbg(bridge, "OPP-shm: max OPP number: %d\n", dsp_max_opps);
++ /* Update the current OPP number */
++ if (pdata->dsp_get_opp)
++ i = (*pdata->dsp_get_opp) ();
++ hio_mgr->shared_mem->opp_table_struct.curr_opp_pt = i;
++ dev_dbg(bridge, "OPP-shm: value programmed = %d\n", i);
++ break;
++ case SHM_GETOPP:
++ /* Get the OPP that DSP has requested */
++ *(u32 *) pargs = hio_mgr->shared_mem->opp_request.rqst_opp_pt;
++ break;
++ default:
++ break;
++ }
++#endif
++ return 0;
++}
++
++/*
++ * ======== bridge_io_get_proc_load ========
++ * Gets the Processor's Load information
++ */
++int bridge_io_get_proc_load(struct io_mgr *hio_mgr,
++ struct dsp_procloadstat *proc_lstat)
++{
++ proc_lstat->curr_load =
++ hio_mgr->shared_mem->load_mon_info.curr_dsp_load;
++ proc_lstat->predicted_load =
++ hio_mgr->shared_mem->load_mon_info.pred_dsp_load;
++ proc_lstat->curr_dsp_freq =
++ hio_mgr->shared_mem->load_mon_info.curr_dsp_freq;
++ proc_lstat->predicted_freq =
++ hio_mgr->shared_mem->load_mon_info.pred_dsp_freq;
++
++ dev_dbg(bridge, "Curr Load = %d, Pred Load = %d, Curr Freq = %d, "
++ "Pred Freq = %d\n", proc_lstat->curr_load,
++ proc_lstat->predicted_load, proc_lstat->curr_dsp_freq,
++ proc_lstat->predicted_freq);
++ return 0;
++}
++
++void io_sm_init(void)
++{
++ /* Do nothing */
++}
++
++#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
++void print_dsp_debug_trace(struct io_mgr *hio_mgr)
++{
++ u32 ul_new_message_length = 0, ul_gpp_cur_pointer;
++
++ while (true) {
++ /* Get the DSP current pointer */
++ ul_gpp_cur_pointer =
++ *(u32 *) (hio_mgr->ul_trace_buffer_current);
++ ul_gpp_cur_pointer =
++ hio_mgr->ul_gpp_va + (ul_gpp_cur_pointer -
++ hio_mgr->ul_dsp_va);
++
++ /* No new debug messages available yet */
++ if (ul_gpp_cur_pointer == hio_mgr->ul_gpp_read_pointer) {
++ break;
++ } else if (ul_gpp_cur_pointer > hio_mgr->ul_gpp_read_pointer) {
++ /* Continuous data */
++ ul_new_message_length =
++ ul_gpp_cur_pointer - hio_mgr->ul_gpp_read_pointer;
++
++ memcpy(hio_mgr->pmsg,
++ (char *)hio_mgr->ul_gpp_read_pointer,
++ ul_new_message_length);
++ hio_mgr->pmsg[ul_new_message_length] = '\0';
++ /*
++ * Advance the GPP trace pointer to DSP current
++ * pointer.
++ */
++ hio_mgr->ul_gpp_read_pointer += ul_new_message_length;
++ /* Print the trace messages */
++ pr_info("DSPTrace: %s\n", hio_mgr->pmsg);
++ } else if (ul_gpp_cur_pointer < hio_mgr->ul_gpp_read_pointer) {
++ /* Handle trace buffer wraparound */
++ memcpy(hio_mgr->pmsg,
++ (char *)hio_mgr->ul_gpp_read_pointer,
++ hio_mgr->ul_trace_buffer_end -
++ hio_mgr->ul_gpp_read_pointer);
++ ul_new_message_length =
++ ul_gpp_cur_pointer - hio_mgr->ul_trace_buffer_begin;
++ memcpy(&hio_mgr->pmsg[hio_mgr->ul_trace_buffer_end -
++ hio_mgr->ul_gpp_read_pointer],
++ (char *)hio_mgr->ul_trace_buffer_begin,
++ ul_new_message_length);
++ hio_mgr->pmsg[hio_mgr->ul_trace_buffer_end -
++ hio_mgr->ul_gpp_read_pointer +
++ ul_new_message_length] = '\0';
++ /*
++ * Advance the GPP trace pointer to DSP current
++ * pointer.
++ */
++ hio_mgr->ul_gpp_read_pointer =
++ hio_mgr->ul_trace_buffer_begin +
++ ul_new_message_length;
++ /* Print the trace messages */
++ pr_info("DSPTrace: %s\n", hio_mgr->pmsg);
++ }
++ }
++}
++#endif
++
++#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
++/*
++ * ======== print_dsp_trace_buffer ========
++ * Prints the trace buffer returned from the DSP (if DBG_Trace is enabled).
++ * Parameters:
++ * hdeh_mgr: Handle to DEH manager object
++ * number of extra carriage returns to generate.
++ * Returns:
++ * 0: Success.
++ * -ENOMEM: Unable to allocate memory.
++ * Requires:
++ * hdeh_mgr muse be valid. Checked in bridge_deh_notify.
++ */
++int print_dsp_trace_buffer(struct bridge_dev_context *hbridge_context)
++{
++ int status = 0;
++ struct cod_manager *cod_mgr;
++ u32 ul_trace_end;
++ u32 ul_trace_begin;
++ u32 trace_cur_pos;
++ u32 ul_num_bytes = 0;
++ u32 ul_num_words = 0;
++ u32 ul_word_size = 2;
++ char *psz_buf;
++ char *str_beg;
++ char *trace_end;
++ char *buf_end;
++ char *new_line;
++
++ struct bridge_dev_context *pbridge_context = hbridge_context;
++ struct bridge_drv_interface *intf_fxns;
++ struct dev_object *dev_obj = (struct dev_object *)
++ pbridge_context->hdev_obj;
++
++ status = dev_get_cod_mgr(dev_obj, &cod_mgr);
++
++ if (cod_mgr) {
++ /* Look for SYS_PUTCBEG/SYS_PUTCEND */
++ status =
++ cod_get_sym_value(cod_mgr, COD_TRACEBEG, &ul_trace_begin);
++ } else {
++ status = -EFAULT;
++ }
++ if (!status)
++ status =
++ cod_get_sym_value(cod_mgr, COD_TRACEEND, &ul_trace_end);
++
++ if (!status)
++ /* trace_cur_pos will hold the address of a DSP pointer */
++ status = cod_get_sym_value(cod_mgr, COD_TRACECURPOS,
++ &trace_cur_pos);
++
++ if (status)
++ goto func_end;
++
++ ul_num_bytes = (ul_trace_end - ul_trace_begin);
++
++ ul_num_words = ul_num_bytes * ul_word_size;
++ status = dev_get_intf_fxns(dev_obj, &intf_fxns);
++
++ if (status)
++ goto func_end;
++
++ psz_buf = kzalloc(ul_num_bytes + 2, GFP_ATOMIC);
++ if (psz_buf != NULL) {
++ /* Read trace buffer data */
++ status = (*intf_fxns->pfn_brd_read)(pbridge_context,
++ (u8 *)psz_buf, (u32)ul_trace_begin,
++ ul_num_bytes, 0);
++
++ if (status)
++ goto func_end;
++
++ /* Pack and do newline conversion */
++ pr_debug("PrintDspTraceBuffer: "
++ "before pack and unpack.\n");
++ pr_debug("%s: DSP Trace Buffer Begin:\n"
++ "=======================\n%s\n",
++ __func__, psz_buf);
++
++ /* Read the value at the DSP address in trace_cur_pos. */
++ status = (*intf_fxns->pfn_brd_read)(pbridge_context,
++ (u8 *)&trace_cur_pos, (u32)trace_cur_pos,
++ 4, 0);
++ if (status)
++ goto func_end;
++ /* Pack and do newline conversion */
++ pr_info("DSP Trace Buffer Begin:\n"
++ "=======================\n%s\n",
++ psz_buf);
++
++
++ /* convert to offset */
++ trace_cur_pos = trace_cur_pos - ul_trace_begin;
++
++ if (ul_num_bytes) {
++ /*
++ * The buffer is not full, find the end of the
++ * data -- buf_end will be >= pszBuf after
++ * while.
++ */
++ buf_end = &psz_buf[ul_num_bytes+1];
++ /* DSP print position */
++ trace_end = &psz_buf[trace_cur_pos];
++
++ /*
++ * Search buffer for a new_line and replace it
++ * with '\0', then print as string.
++ * Continue until end of buffer is reached.
++ */
++ str_beg = trace_end;
++ ul_num_bytes = buf_end - str_beg;
++
++ while (str_beg < buf_end) {
++ new_line = strnchr(str_beg, ul_num_bytes,
++ '\n');
++ if (new_line && new_line < buf_end) {
++ *new_line = 0;
++ pr_debug("%s\n", str_beg);
++ str_beg = ++new_line;
++ ul_num_bytes = buf_end - str_beg;
++ } else {
++ /*
++ * Assume buffer empty if it contains
++ * a zero
++ */
++ if (*str_beg != '\0') {
++ str_beg[ul_num_bytes] = 0;
++ pr_debug("%s\n", str_beg);
++ }
++ str_beg = buf_end;
++ ul_num_bytes = 0;
++ }
++ }
++ /*
++ * Search buffer for a nNewLine and replace it
++ * with '\0', then print as string.
++ * Continue until buffer is exhausted.
++ */
++ str_beg = psz_buf;
++ ul_num_bytes = trace_end - str_beg;
++
++ while (str_beg < trace_end) {
++ new_line = strnchr(str_beg, ul_num_bytes, '\n');
++ if (new_line != NULL && new_line < trace_end) {
++ *new_line = 0;
++ pr_debug("%s\n", str_beg);
++ str_beg = ++new_line;
++ ul_num_bytes = trace_end - str_beg;
++ } else {
++ /*
++ * Assume buffer empty if it contains
++ * a zero
++ */
++ if (*str_beg != '\0') {
++ str_beg[ul_num_bytes] = 0;
++ pr_debug("%s\n", str_beg);
++ }
++ str_beg = trace_end;
++ ul_num_bytes = 0;
++ }
++ }
++ }
++ pr_info("\n=======================\n"
++ "DSP Trace Buffer End:\n");
++ kfree(psz_buf);
++ } else {
++ status = -ENOMEM;
++ }
++func_end:
++ if (status)
++ dev_dbg(bridge, "%s Failed, status 0x%x\n", __func__, status);
++ return status;
++}
++
++/**
++ * dump_dsp_stack() - This function dumps the data on the DSP stack.
++ * @bridge_context: Bridge driver's device context pointer.
++ *
++ */
++int dump_dsp_stack(struct bridge_dev_context *bridge_context)
++{
++ int status = 0;
++ struct cod_manager *code_mgr;
++ struct node_mgr *node_mgr;
++ u32 trace_begin;
++ char name[256];
++ struct {
++ u32 head[2];
++ u32 size;
++ } mmu_fault_dbg_info;
++ u32 *buffer;
++ u32 *buffer_beg;
++ u32 *buffer_end;
++ u32 exc_type;
++ u32 dyn_ext_base;
++ u32 i;
++ u32 offset_output;
++ u32 total_size;
++ u32 poll_cnt;
++ const char *dsp_regs[] = {"EFR", "IERR", "ITSR", "NTSR",
++ "IRP", "NRP", "AMR", "SSR",
++ "ILC", "RILC", "IER", "CSR"};
++ const char *exec_ctxt[] = {"Task", "SWI", "HWI", "Unknown"};
++ struct bridge_drv_interface *intf_fxns;
++ struct dev_object *dev_object = bridge_context->hdev_obj;
++
++ status = dev_get_cod_mgr(dev_object, &code_mgr);
++ if (!code_mgr) {
++ pr_debug("%s: Failed on dev_get_cod_mgr.\n", __func__);
++ status = -EFAULT;
++ }
++
++ if (!status) {
++ status = dev_get_node_manager(dev_object, &node_mgr);
++ if (!node_mgr) {
++ pr_debug("%s: Failed on dev_get_node_manager.\n",
++ __func__);
++ status = -EFAULT;
++ }
++ }
++
++ if (!status) {
++ /* Look for SYS_PUTCBEG/SYS_PUTCEND: */
++ status =
++ cod_get_sym_value(code_mgr, COD_TRACEBEG, &trace_begin);
++ pr_debug("%s: trace_begin Value 0x%x\n",
++ __func__, trace_begin);
++ if (status)
++ pr_debug("%s: Failed on cod_get_sym_value.\n",
++ __func__);
++ }
++ if (!status)
++ status = dev_get_intf_fxns(dev_object, &intf_fxns);
++ /*
++ * Check for the "magic number" in the trace buffer. If it has
++ * yet to appear then poll the trace buffer to wait for it. Its
++ * appearance signals that the DSP has finished dumping its state.
++ */
++ mmu_fault_dbg_info.head[0] = 0;
++ mmu_fault_dbg_info.head[1] = 0;
++ if (!status) {
++ poll_cnt = 0;
++ while ((mmu_fault_dbg_info.head[0] != MMU_FAULT_HEAD1 ||
++ mmu_fault_dbg_info.head[1] != MMU_FAULT_HEAD2) &&
++ poll_cnt < POLL_MAX) {
++
++ /* Read DSP dump size from the DSP trace buffer... */
++ status = (*intf_fxns->pfn_brd_read)(bridge_context,
++ (u8 *)&mmu_fault_dbg_info, (u32)trace_begin,
++ sizeof(mmu_fault_dbg_info), 0);
++
++ if (status)
++ break;
++
++ poll_cnt++;
++ }
++
++ if (mmu_fault_dbg_info.head[0] != MMU_FAULT_HEAD1 &&
++ mmu_fault_dbg_info.head[1] != MMU_FAULT_HEAD2) {
++ status = -ETIME;
++ pr_err("%s:No DSP MMU-Fault information available.\n",
++ __func__);
++ }
++ }
++
++ if (!status) {
++ total_size = mmu_fault_dbg_info.size;
++ /* Limit the size in case DSP went crazy */
++ if (total_size > MAX_MMU_DBGBUFF)
++ total_size = MAX_MMU_DBGBUFF;
++
++ buffer = kzalloc(total_size, GFP_ATOMIC);
++ if (!buffer) {
++ status = -ENOMEM;
++ pr_debug("%s: Failed to "
++ "allocate stack dump buffer.\n", __func__);
++ goto func_end;
++ }
++
++ buffer_beg = buffer;
++ buffer_end = buffer + total_size / 4;
++
++ /* Read bytes from the DSP trace buffer... */
++ status = (*intf_fxns->pfn_brd_read)(bridge_context,
++ (u8 *)buffer, (u32)trace_begin,
++ total_size, 0);
++ if (status) {
++ pr_debug("%s: Failed to Read Trace Buffer.\n",
++ __func__);
++ goto func_end;
++ }
++
++ pr_err("\nAproximate Crash Position:\n"
++ "--------------------------\n");
++
++ exc_type = buffer[3];
++ if (!exc_type)
++ i = buffer[79]; /* IRP */
++ else
++ i = buffer[80]; /* NRP */
++
++ status =
++ cod_get_sym_value(code_mgr, DYNEXTBASE, &dyn_ext_base);
++ if (status) {
++ status = -EFAULT;
++ goto func_end;
++ }
++
++ if ((i > dyn_ext_base) && (node_find_addr(node_mgr, i,
++ 0x1000, &offset_output, name) == 0))
++ pr_err("0x%-8x [\"%s\" + 0x%x]\n", i, name,
++ i - offset_output);
++ else
++ pr_err("0x%-8x [Unable to match to a symbol.]\n", i);
++
++ buffer += 4;
++
++ pr_err("\nExecution Info:\n"
++ "---------------\n");
++
++ if (*buffer < ARRAY_SIZE(exec_ctxt)) {
++ pr_err("Execution context \t%s\n",
++ exec_ctxt[*buffer++]);
++ } else {
++ pr_err("Execution context corrupt\n");
++ kfree(buffer_beg);
++ return -EFAULT;
++ }
++ pr_err("Task Handle\t\t0x%x\n", *buffer++);
++ pr_err("Stack Pointer\t\t0x%x\n", *buffer++);
++ pr_err("Stack Top\t\t0x%x\n", *buffer++);
++ pr_err("Stack Bottom\t\t0x%x\n", *buffer++);
++ pr_err("Stack Size\t\t0x%x\n", *buffer++);
++ pr_err("Stack Size In Use\t0x%x\n", *buffer++);
++
++ pr_err("\nCPU Registers\n"
++ "---------------\n");
++
++ for (i = 0; i < 32; i++) {
++ if (i == 4 || i == 6 || i == 8)
++ pr_err("A%d 0x%-8x [Function Argument %d]\n",
++ i, *buffer++, i-3);
++ else if (i == 15)
++ pr_err("A15 0x%-8x [Frame Pointer]\n",
++ *buffer++);
++ else
++ pr_err("A%d 0x%x\n", i, *buffer++);
++ }
++
++ pr_err("\nB0 0x%x\n", *buffer++);
++ pr_err("B1 0x%x\n", *buffer++);
++ pr_err("B2 0x%x\n", *buffer++);
++
++ if ((*buffer > dyn_ext_base) && (node_find_addr(node_mgr,
++ *buffer, 0x1000, &offset_output, name) == 0))
++
++ pr_err("B3 0x%-8x [Function Return Pointer:"
++ " \"%s\" + 0x%x]\n", *buffer, name,
++ *buffer - offset_output);
++ else
++ pr_err("B3 0x%-8x [Function Return Pointer:"
++ "Unable to match to a symbol.]\n", *buffer);
++
++ buffer++;
++
++ for (i = 4; i < 32; i++) {
++ if (i == 4 || i == 6 || i == 8)
++ pr_err("B%d 0x%-8x [Function Argument %d]\n",
++ i, *buffer++, i-2);
++ else if (i == 14)
++ pr_err("B14 0x%-8x [Data Page Pointer]\n",
++ *buffer++);
++ else
++ pr_err("B%d 0x%x\n", i, *buffer++);
++ }
++
++ pr_err("\n");
++
++ for (i = 0; i < ARRAY_SIZE(dsp_regs); i++)
++ pr_err("%s 0x%x\n", dsp_regs[i], *buffer++);
++
++ pr_err("\nStack:\n"
++ "------\n");
++
++ for (i = 0; buffer < buffer_end; i++, buffer++) {
++ if ((*buffer > dyn_ext_base) && (
++ node_find_addr(node_mgr, *buffer , 0x600,
++ &offset_output, name) == 0))
++ pr_err("[%d] 0x%-8x [\"%s\" + 0x%x]\n",
++ i, *buffer, name,
++ *buffer - offset_output);
++ else
++ pr_err("[%d] 0x%x\n", i, *buffer);
++ }
++ kfree(buffer_beg);
++ }
++func_end:
++ return status;
++}
++
++/**
++ * dump_dl_modules() - This functions dumps the _DLModules loaded in DSP side
++ * @bridge_context: Bridge driver's device context pointer.
++ *
++ */
++void dump_dl_modules(struct bridge_dev_context *bridge_context)
++{
++ struct cod_manager *code_mgr;
++ struct bridge_drv_interface *intf_fxns;
++ struct bridge_dev_context *bridge_ctxt = bridge_context;
++ struct dev_object *dev_object = bridge_ctxt->hdev_obj;
++ struct modules_header modules_hdr;
++ struct dll_module *module_struct = NULL;
++ u32 module_dsp_addr;
++ u32 module_size;
++ u32 module_struct_size = 0;
++ u32 sect_ndx;
++ char *sect_str ;
++ int status = 0;
++
++ status = dev_get_intf_fxns(dev_object, &intf_fxns);
++ if (status) {
++ pr_debug("%s: Failed on dev_get_intf_fxns.\n", __func__);
++ goto func_end;
++ }
++
++ status = dev_get_cod_mgr(dev_object, &code_mgr);
++ if (!code_mgr) {
++ pr_debug("%s: Failed on dev_get_cod_mgr.\n", __func__);
++ status = -EFAULT;
++ goto func_end;
++ }
++
++ /* Lookup the address of the modules_header structure */
++ status = cod_get_sym_value(code_mgr, "_DLModules", &module_dsp_addr);
++ if (status) {
++ pr_debug("%s: Failed on cod_get_sym_value for _DLModules.\n",
++ __func__);
++ goto func_end;
++ }
++
++ pr_debug("%s: _DLModules at 0x%x\n", __func__, module_dsp_addr);
++
++ /* Copy the modules_header structure from DSP memory. */
++ status = (*intf_fxns->pfn_brd_read)(bridge_context, (u8 *) &modules_hdr,
++ (u32) module_dsp_addr, sizeof(modules_hdr), 0);
++
++ if (status) {
++ pr_debug("%s: Failed failed to read modules header.\n",
++ __func__);
++ goto func_end;
++ }
++
++ module_dsp_addr = modules_hdr.first_module;
++ module_size = modules_hdr.first_module_size;
++
++ pr_debug("%s: dll_module_header 0x%x %d\n", __func__, module_dsp_addr,
++ module_size);
++
++ pr_err("\nDynamically Loaded Modules:\n"
++ "---------------------------\n");
++
++ /* For each dll_module structure in the list... */
++ while (module_size) {
++ /*
++ * Allocate/re-allocate memory to hold the dll_module
++ * structure. The memory is re-allocated only if the existing
++ * allocation is too small.
++ */
++ if (module_size > module_struct_size) {
++ kfree(module_struct);
++ module_struct = kzalloc(module_size+128, GFP_ATOMIC);
++ module_struct_size = module_size+128;
++ pr_debug("%s: allocated module struct %p %d\n",
++ __func__, module_struct, module_struct_size);
++ if (!module_struct)
++ goto func_end;
++ }
++ /* Copy the dll_module structure from DSP memory */
++ status = (*intf_fxns->pfn_brd_read)(bridge_context,
++ (u8 *)module_struct, module_dsp_addr, module_size, 0);
++
++ if (status) {
++ pr_debug(
++ "%s: Failed to read dll_module stuct for 0x%x.\n",
++ __func__, module_dsp_addr);
++ break;
++ }
++
++ /* Update info regarding the _next_ module in the list. */
++ module_dsp_addr = module_struct->next_module;
++ module_size = module_struct->next_module_size;
++
++ pr_debug("%s: next module 0x%x %d, this module num sects %d\n",
++ __func__, module_dsp_addr, module_size,
++ module_struct->num_sects);
++
++ /*
++ * The section name strings start immedialty following
++ * the array of dll_sect structures.
++ */
++ sect_str = (char *) &module_struct->
++ sects[module_struct->num_sects];
++ pr_err("%s\n", sect_str);
++
++ /*
++ * Advance to the first section name string.
++ * Each string follows the one before.
++ */
++ sect_str += strlen(sect_str) + 1;
++
++ /* Access each dll_sect structure and its name string. */
++ for (sect_ndx = 0;
++ sect_ndx < module_struct->num_sects; sect_ndx++) {
++ pr_err(" Section: 0x%x ",
++ module_struct->sects[sect_ndx].sect_load_adr);
++
++ if (((u32) sect_str - (u32) module_struct) <
++ module_struct_size) {
++ pr_err("%s\n", sect_str);
++ /* Each string follows the one before. */
++ sect_str += strlen(sect_str)+1;
++ } else {
++ pr_err("<string error>\n");
++ pr_debug("%s: section name sting address "
++ "is invalid %p\n", __func__, sect_str);
++ }
++ }
++ }
++func_end:
++ kfree(module_struct);
++}
++#endif
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/core/msg_sm.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/core/msg_sm.c 2010-08-18 11:24:23.158050746 +0300
+@@ -0,0 +1,673 @@
++/*
++ * msg_sm.c
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Implements upper edge functions for Bridge message module.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++#include <linux/types.h>
++
++/* ----------------------------------- DSP/BIOS Bridge */
++#include <dspbridge/dbdefs.h>
++
++/* ----------------------------------- Trace & Debug */
++#include <dspbridge/dbc.h>
++
++/* ----------------------------------- OS Adaptation Layer */
++#include <dspbridge/list.h>
++#include <dspbridge/sync.h>
++
++/* ----------------------------------- Platform Manager */
++#include <dspbridge/dev.h>
++
++/* ----------------------------------- Others */
++#include <dspbridge/io_sm.h>
++
++/* ----------------------------------- This */
++#include <_msg_sm.h>
++#include <dspbridge/dspmsg.h>
++
++/* ----------------------------------- Function Prototypes */
++static int add_new_msg(struct lst_list *msg_list);
++static void delete_msg_mgr(struct msg_mgr *hmsg_mgr);
++static void delete_msg_queue(struct msg_queue *msg_queue_obj, u32 num_to_dsp);
++static void free_msg_list(struct lst_list *msg_list);
++
++/*
++ * ======== bridge_msg_create ========
++ * Create an object to manage message queues. Only one of these objects
++ * can exist per device object.
++ */
++int bridge_msg_create(struct msg_mgr **msg_man,
++ struct dev_object *hdev_obj,
++ msg_onexit msg_callback)
++{
++ struct msg_mgr *msg_mgr_obj;
++ struct io_mgr *hio_mgr;
++ int status = 0;
++
++ if (!msg_man || !msg_callback || !hdev_obj) {
++ status = -EFAULT;
++ goto func_end;
++ }
++ dev_get_io_mgr(hdev_obj, &hio_mgr);
++ if (!hio_mgr) {
++ status = -EFAULT;
++ goto func_end;
++ }
++ *msg_man = NULL;
++ /* Allocate msg_ctrl manager object */
++ msg_mgr_obj = kzalloc(sizeof(struct msg_mgr), GFP_KERNEL);
++
++ if (msg_mgr_obj) {
++ msg_mgr_obj->on_exit = msg_callback;
++ msg_mgr_obj->hio_mgr = hio_mgr;
++ /* List of MSG_QUEUEs */
++ msg_mgr_obj->queue_list = kzalloc(sizeof(struct lst_list),
++ GFP_KERNEL);
++ /* Queues of message frames for messages to the DSP. Message
++ * frames will only be added to the free queue when a
++ * msg_queue object is created. */
++ msg_mgr_obj->msg_free_list = kzalloc(sizeof(struct lst_list),
++ GFP_KERNEL);
++ msg_mgr_obj->msg_used_list = kzalloc(sizeof(struct lst_list),
++ GFP_KERNEL);
++ if (msg_mgr_obj->queue_list == NULL ||
++ msg_mgr_obj->msg_free_list == NULL ||
++ msg_mgr_obj->msg_used_list == NULL) {
++ status = -ENOMEM;
++ } else {
++ INIT_LIST_HEAD(&msg_mgr_obj->queue_list->head);
++ INIT_LIST_HEAD(&msg_mgr_obj->msg_free_list->head);
++ INIT_LIST_HEAD(&msg_mgr_obj->msg_used_list->head);
++ spin_lock_init(&msg_mgr_obj->msg_mgr_lock);
++ }
++
++ /* Create an event to be used by bridge_msg_put() in waiting
++ * for an available free frame from the message manager. */
++ msg_mgr_obj->sync_event =
++ kzalloc(sizeof(struct sync_object), GFP_KERNEL);
++ if (!msg_mgr_obj->sync_event)
++ status = -ENOMEM;
++ else
++ sync_init_event(msg_mgr_obj->sync_event);
++
++ if (!status)
++ *msg_man = msg_mgr_obj;
++ else
++ delete_msg_mgr(msg_mgr_obj);
++
++ } else {
++ status = -ENOMEM;
++ }
++func_end:
++ return status;
++}
++
++/*
++ * ======== bridge_msg_create_queue ========
++ * Create a msg_queue for sending/receiving messages to/from a node
++ * on the DSP.
++ */
++int bridge_msg_create_queue(struct msg_mgr *hmsg_mgr,
++ struct msg_queue **msgq,
++ u32 msgq_id, u32 max_msgs, void *arg)
++{
++ u32 i;
++ u32 num_allocated = 0;
++ struct msg_queue *msg_q;
++ int status = 0;
++
++ if (!hmsg_mgr || msgq == NULL || !hmsg_mgr->msg_free_list) {
++ status = -EFAULT;
++ goto func_end;
++ }
++
++ *msgq = NULL;
++ /* Allocate msg_queue object */
++ msg_q = kzalloc(sizeof(struct msg_queue), GFP_KERNEL);
++ if (!msg_q) {
++ status = -ENOMEM;
++ goto func_end;
++ }
++ lst_init_elem((struct list_head *)msg_q);
++ msg_q->max_msgs = max_msgs;
++ msg_q->hmsg_mgr = hmsg_mgr;
++ msg_q->arg = arg; /* Node handle */
++ msg_q->msgq_id = msgq_id; /* Node env (not valid yet) */
++ /* Queues of Message frames for messages from the DSP */
++ msg_q->msg_free_list = kzalloc(sizeof(struct lst_list), GFP_KERNEL);
++ msg_q->msg_used_list = kzalloc(sizeof(struct lst_list), GFP_KERNEL);
++ if (msg_q->msg_free_list == NULL || msg_q->msg_used_list == NULL)
++ status = -ENOMEM;
++ else {
++ INIT_LIST_HEAD(&msg_q->msg_free_list->head);
++ INIT_LIST_HEAD(&msg_q->msg_used_list->head);
++ }
++
++ /* Create event that will be signalled when a message from
++ * the DSP is available. */
++ if (!status) {
++ msg_q->sync_event = kzalloc(sizeof(struct sync_object),
++ GFP_KERNEL);
++ if (msg_q->sync_event)
++ sync_init_event(msg_q->sync_event);
++ else
++ status = -ENOMEM;
++ }
++
++ /* Create a notification list for message ready notification. */
++ if (!status) {
++ msg_q->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
++ GFP_KERNEL);
++ if (msg_q->ntfy_obj)
++ ntfy_init(msg_q->ntfy_obj);
++ else
++ status = -ENOMEM;
++ }
++
++ /* Create events that will be used to synchronize cleanup
++ * when the object is deleted. sync_done will be set to
++ * unblock threads in MSG_Put() or MSG_Get(). sync_done_ack
++ * will be set by the unblocked thread to signal that it
++ * is unblocked and will no longer reference the object. */
++ if (!status) {
++ msg_q->sync_done = kzalloc(sizeof(struct sync_object),
++ GFP_KERNEL);
++ if (msg_q->sync_done)
++ sync_init_event(msg_q->sync_done);
++ else
++ status = -ENOMEM;
++ }
++
++ if (!status) {
++ msg_q->sync_done_ack = kzalloc(sizeof(struct sync_object),
++ GFP_KERNEL);
++ if (msg_q->sync_done_ack)
++ sync_init_event(msg_q->sync_done_ack);
++ else
++ status = -ENOMEM;
++ }
++
++ if (!status) {
++ /* Enter critical section */
++ spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
++ /* Initialize message frames and put in appropriate queues */
++ for (i = 0; i < max_msgs && !status; i++) {
++ status = add_new_msg(hmsg_mgr->msg_free_list);
++ if (!status) {
++ num_allocated++;
++ status = add_new_msg(msg_q->msg_free_list);
++ }
++ }
++ if (status) {
++ /* Stay inside CS to prevent others from taking any
++ * of the newly allocated message frames. */
++ delete_msg_queue(msg_q, num_allocated);
++ } else {
++ lst_put_tail(hmsg_mgr->queue_list,
++ (struct list_head *)msg_q);
++ *msgq = msg_q;
++ /* Signal that free frames are now available */
++ if (!LST_IS_EMPTY(hmsg_mgr->msg_free_list))
++ sync_set_event(hmsg_mgr->sync_event);
++
++ }
++ /* Exit critical section */
++ spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
++ } else {
++ delete_msg_queue(msg_q, 0);
++ }
++func_end:
++ return status;
++}
++
++/*
++ * ======== bridge_msg_delete ========
++ * Delete a msg_ctrl manager allocated in bridge_msg_create().
++ */
++void bridge_msg_delete(struct msg_mgr *hmsg_mgr)
++{
++ if (hmsg_mgr)
++ delete_msg_mgr(hmsg_mgr);
++}
++
++/*
++ * ======== bridge_msg_delete_queue ========
++ * Delete a msg_ctrl queue allocated in bridge_msg_create_queue.
++ */
++void bridge_msg_delete_queue(struct msg_queue *msg_queue_obj)
++{
++ struct msg_mgr *hmsg_mgr;
++ u32 io_msg_pend;
++
++ if (!msg_queue_obj || !msg_queue_obj->hmsg_mgr)
++ goto func_end;
++
++ hmsg_mgr = msg_queue_obj->hmsg_mgr;
++ msg_queue_obj->done = true;
++ /* Unblock all threads blocked in MSG_Get() or MSG_Put(). */
++ io_msg_pend = msg_queue_obj->io_msg_pend;
++ while (io_msg_pend) {
++ /* Unblock thread */
++ sync_set_event(msg_queue_obj->sync_done);
++ /* Wait for acknowledgement */
++ sync_wait_on_event(msg_queue_obj->sync_done_ack, SYNC_INFINITE);
++ io_msg_pend = msg_queue_obj->io_msg_pend;
++ }
++ /* Remove message queue from hmsg_mgr->queue_list */
++ spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
++ lst_remove_elem(hmsg_mgr->queue_list,
++ (struct list_head *)msg_queue_obj);
++ /* Free the message queue object */
++ delete_msg_queue(msg_queue_obj, msg_queue_obj->max_msgs);
++ if (!hmsg_mgr->msg_free_list)
++ goto func_cont;
++ if (LST_IS_EMPTY(hmsg_mgr->msg_free_list))
++ sync_reset_event(hmsg_mgr->sync_event);
++func_cont:
++ spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
++func_end:
++ return;
++}
++
++/*
++ * ======== bridge_msg_get ========
++ * Get a message from a msg_ctrl queue.
++ */
++int bridge_msg_get(struct msg_queue *msg_queue_obj,
++ struct dsp_msg *pmsg, u32 utimeout)
++{
++ struct msg_frame *msg_frame_obj;
++ struct msg_mgr *hmsg_mgr;
++ bool got_msg = false;
++ struct sync_object *syncs[2];
++ u32 index;
++ int status = 0;
++
++ if (!msg_queue_obj || pmsg == NULL) {
++ status = -ENOMEM;
++ goto func_end;
++ }
++
++ hmsg_mgr = msg_queue_obj->hmsg_mgr;
++ if (!msg_queue_obj->msg_used_list) {
++ status = -EFAULT;
++ goto func_end;
++ }
++
++ /* Enter critical section */
++ spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
++ /* If a message is already there, get it */
++ if (!LST_IS_EMPTY(msg_queue_obj->msg_used_list)) {
++ msg_frame_obj = (struct msg_frame *)
++ lst_get_head(msg_queue_obj->msg_used_list);
++ if (msg_frame_obj != NULL) {
++ *pmsg = msg_frame_obj->msg_data.msg;
++ lst_put_tail(msg_queue_obj->msg_free_list,
++ (struct list_head *)msg_frame_obj);
++ if (LST_IS_EMPTY(msg_queue_obj->msg_used_list))
++ sync_reset_event(msg_queue_obj->sync_event);
++
++ got_msg = true;
++ }
++ } else {
++ if (msg_queue_obj->done)
++ status = -EPERM;
++ else
++ msg_queue_obj->io_msg_pend++;
++
++ }
++ /* Exit critical section */
++ spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
++ if (!status && !got_msg) {
++ /* Wait til message is available, timeout, or done. We don't
++ * have to schedule the DPC, since the DSP will send messages
++ * when they are available. */
++ syncs[0] = msg_queue_obj->sync_event;
++ syncs[1] = msg_queue_obj->sync_done;
++ status = sync_wait_on_multiple_events(syncs, 2, utimeout,
++ &index);
++ /* Enter critical section */
++ spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
++ if (msg_queue_obj->done) {
++ msg_queue_obj->io_msg_pend--;
++ /* Exit critical section */
++ spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
++ /* Signal that we're not going to access msg_queue_obj
++ * anymore, so it can be deleted. */
++ (void)sync_set_event(msg_queue_obj->sync_done_ack);
++ status = -EPERM;
++ } else {
++ if (!status) {
++ DBC_ASSERT(!LST_IS_EMPTY
++ (msg_queue_obj->msg_used_list));
++ /* Get msg from used list */
++ msg_frame_obj = (struct msg_frame *)
++ lst_get_head(msg_queue_obj->msg_used_list);
++ /* Copy message into pmsg and put frame on the
++ * free list */
++ if (msg_frame_obj != NULL) {
++ *pmsg = msg_frame_obj->msg_data.msg;
++ lst_put_tail
++ (msg_queue_obj->msg_free_list,
++ (struct list_head *)
++ msg_frame_obj);
++ }
++ }
++ msg_queue_obj->io_msg_pend--;
++ /* Reset the event if there are still queued messages */
++ if (!LST_IS_EMPTY(msg_queue_obj->msg_used_list))
++ sync_set_event(msg_queue_obj->sync_event);
++
++ /* Exit critical section */
++ spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
++ }
++ }
++func_end:
++ return status;
++}
++
++/*
++ * ======== bridge_msg_put ========
++ * Put a message onto a msg_ctrl queue.
++ */
++int bridge_msg_put(struct msg_queue *msg_queue_obj,
++ const struct dsp_msg *pmsg, u32 utimeout)
++{
++ struct msg_frame *msg_frame_obj;
++ struct msg_mgr *hmsg_mgr;
++ bool put_msg = false;
++ struct sync_object *syncs[2];
++ u32 index;
++ int status = 0;
++
++ if (!msg_queue_obj || !pmsg || !msg_queue_obj->hmsg_mgr) {
++ status = -ENOMEM;
++ goto func_end;
++ }
++ hmsg_mgr = msg_queue_obj->hmsg_mgr;
++ if (!hmsg_mgr->msg_free_list) {
++ status = -EFAULT;
++ goto func_end;
++ }
++
++ spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
++
++ /* If a message frame is available, use it */
++ if (!LST_IS_EMPTY(hmsg_mgr->msg_free_list)) {
++ msg_frame_obj =
++ (struct msg_frame *)lst_get_head(hmsg_mgr->msg_free_list);
++ if (msg_frame_obj != NULL) {
++ msg_frame_obj->msg_data.msg = *pmsg;
++ msg_frame_obj->msg_data.msgq_id =
++ msg_queue_obj->msgq_id;
++ lst_put_tail(hmsg_mgr->msg_used_list,
++ (struct list_head *)msg_frame_obj);
++ hmsg_mgr->msgs_pending++;
++ put_msg = true;
++ }
++ if (LST_IS_EMPTY(hmsg_mgr->msg_free_list))
++ sync_reset_event(hmsg_mgr->sync_event);
++
++ /* Release critical section before scheduling DPC */
++ spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
++ /* Schedule a DPC, to do the actual data transfer: */
++ iosm_schedule(hmsg_mgr->hio_mgr);
++ } else {
++ if (msg_queue_obj->done)
++ status = -EPERM;
++ else
++ msg_queue_obj->io_msg_pend++;
++
++ spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
++ }
++ if (!status && !put_msg) {
++ /* Wait til a free message frame is available, timeout,
++ * or done */
++ syncs[0] = hmsg_mgr->sync_event;
++ syncs[1] = msg_queue_obj->sync_done;
++ status = sync_wait_on_multiple_events(syncs, 2, utimeout,
++ &index);
++ if (status)
++ goto func_end;
++ /* Enter critical section */
++ spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
++ if (msg_queue_obj->done) {
++ msg_queue_obj->io_msg_pend--;
++ /* Exit critical section */
++ spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
++ /* Signal that we're not going to access msg_queue_obj
++ * anymore, so it can be deleted. */
++ (void)sync_set_event(msg_queue_obj->sync_done_ack);
++ status = -EPERM;
++ } else {
++ if (LST_IS_EMPTY(hmsg_mgr->msg_free_list)) {
++ status = -EFAULT;
++ goto func_cont;
++ }
++ /* Get msg from free list */
++ msg_frame_obj = (struct msg_frame *)
++ lst_get_head(hmsg_mgr->msg_free_list);
++ /*
++ * Copy message into pmsg and put frame on the
++ * used list.
++ */
++ if (msg_frame_obj) {
++ msg_frame_obj->msg_data.msg = *pmsg;
++ msg_frame_obj->msg_data.msgq_id =
++ msg_queue_obj->msgq_id;
++ lst_put_tail(hmsg_mgr->msg_used_list,
++ (struct list_head *)msg_frame_obj);
++ hmsg_mgr->msgs_pending++;
++ /*
++ * Schedule a DPC, to do the actual
++ * data transfer.
++ */
++ iosm_schedule(hmsg_mgr->hio_mgr);
++ }
++
++ msg_queue_obj->io_msg_pend--;
++ /* Reset event if there are still frames available */
++ if (!LST_IS_EMPTY(hmsg_mgr->msg_free_list))
++ sync_set_event(hmsg_mgr->sync_event);
++func_cont:
++ /* Exit critical section */
++ spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
++ }
++ }
++func_end:
++ return status;
++}
++
++/*
++ * ======== bridge_msg_register_notify ========
++ */
++int bridge_msg_register_notify(struct msg_queue *msg_queue_obj,
++ u32 event_mask, u32 notify_type,
++ struct dsp_notification *hnotification)
++{
++ int status = 0;
++
++ if (!msg_queue_obj || !hnotification) {
++ status = -ENOMEM;
++ goto func_end;
++ }
++
++ if (!(event_mask == DSP_NODEMESSAGEREADY || event_mask == 0)) {
++ status = -EPERM;
++ goto func_end;
++ }
++
++ if (notify_type != DSP_SIGNALEVENT) {
++ status = -EBADR;
++ goto func_end;
++ }
++
++ if (event_mask)
++ status = ntfy_register(msg_queue_obj->ntfy_obj, hnotification,
++ event_mask, notify_type);
++ else
++ status = ntfy_unregister(msg_queue_obj->ntfy_obj,
++ hnotification);
++
++ if (status == -EINVAL) {
++ /* Not registered. Ok, since we couldn't have known. Node
++ * notifications are split between node state change handled
++ * by NODE, and message ready handled by msg_ctrl. */
++ status = 0;
++ }
++func_end:
++ return status;
++}
++
++/*
++ * ======== bridge_msg_set_queue_id ========
++ */
++void bridge_msg_set_queue_id(struct msg_queue *msg_queue_obj, u32 msgq_id)
++{
++ /*
++ * A message queue must be created when a node is allocated,
++ * so that node_register_notify() can be called before the node
++ * is created. Since we don't know the node environment until the
++ * node is created, we need this function to set msg_queue_obj->msgq_id
++ * to the node environment, after the node is created.
++ */
++ if (msg_queue_obj)
++ msg_queue_obj->msgq_id = msgq_id;
++}
++
++/*
++ * ======== add_new_msg ========
++ * Must be called in message manager critical section.
++ */
++static int add_new_msg(struct lst_list *msg_list)
++{
++ struct msg_frame *pmsg;
++ int status = 0;
++
++ pmsg = kzalloc(sizeof(struct msg_frame), GFP_ATOMIC);
++ if (pmsg != NULL) {
++ lst_init_elem((struct list_head *)pmsg);
++ lst_put_tail(msg_list, (struct list_head *)pmsg);
++ } else {
++ status = -ENOMEM;
++ }
++
++ return status;
++}
++
++/*
++ * ======== delete_msg_mgr ========
++ */
++static void delete_msg_mgr(struct msg_mgr *hmsg_mgr)
++{
++ if (!hmsg_mgr)
++ goto func_end;
++
++ if (hmsg_mgr->queue_list) {
++ if (LST_IS_EMPTY(hmsg_mgr->queue_list)) {
++ kfree(hmsg_mgr->queue_list);
++ hmsg_mgr->queue_list = NULL;
++ }
++ }
++
++ if (hmsg_mgr->msg_free_list) {
++ free_msg_list(hmsg_mgr->msg_free_list);
++ hmsg_mgr->msg_free_list = NULL;
++ }
++
++ if (hmsg_mgr->msg_used_list) {
++ free_msg_list(hmsg_mgr->msg_used_list);
++ hmsg_mgr->msg_used_list = NULL;
++ }
++
++ kfree(hmsg_mgr->sync_event);
++
++ kfree(hmsg_mgr);
++func_end:
++ return;
++}
++
++/*
++ * ======== delete_msg_queue ========
++ */
++static void delete_msg_queue(struct msg_queue *msg_queue_obj, u32 num_to_dsp)
++{
++ struct msg_mgr *hmsg_mgr;
++ struct msg_frame *pmsg;
++ u32 i;
++
++ if (!msg_queue_obj ||
++ !msg_queue_obj->hmsg_mgr || !msg_queue_obj->hmsg_mgr->msg_free_list)
++ goto func_end;
++
++ hmsg_mgr = msg_queue_obj->hmsg_mgr;
++
++ /* Pull off num_to_dsp message frames from Msg manager and free */
++ for (i = 0; i < num_to_dsp; i++) {
++
++ if (!LST_IS_EMPTY(hmsg_mgr->msg_free_list)) {
++ pmsg = (struct msg_frame *)
++ lst_get_head(hmsg_mgr->msg_free_list);
++ kfree(pmsg);
++ } else {
++ /* Cannot free all of the message frames */
++ break;
++ }
++ }
++
++ if (msg_queue_obj->msg_free_list) {
++ free_msg_list(msg_queue_obj->msg_free_list);
++ msg_queue_obj->msg_free_list = NULL;
++ }
++
++ if (msg_queue_obj->msg_used_list) {
++ free_msg_list(msg_queue_obj->msg_used_list);
++ msg_queue_obj->msg_used_list = NULL;
++ }
++
++ if (msg_queue_obj->ntfy_obj) {
++ ntfy_delete(msg_queue_obj->ntfy_obj);
++ kfree(msg_queue_obj->ntfy_obj);
++ }
++
++ kfree(msg_queue_obj->sync_event);
++ kfree(msg_queue_obj->sync_done);
++ kfree(msg_queue_obj->sync_done_ack);
++
++ kfree(msg_queue_obj);
++func_end:
++ return;
++
++}
++
++/*
++ * ======== free_msg_list ========
++ */
++static void free_msg_list(struct lst_list *msg_list)
++{
++ struct msg_frame *pmsg;
++
++ if (!msg_list)
++ goto func_end;
++
++ while ((pmsg = (struct msg_frame *)lst_get_head(msg_list)) != NULL)
++ kfree(pmsg);
++
++ DBC_ASSERT(LST_IS_EMPTY(msg_list));
++
++ kfree(msg_list);
++func_end:
++ return;
++}
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/core/tiomap3430.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/core/tiomap3430.c 2010-08-18 11:24:23.162056214 +0300
+@@ -0,0 +1,1802 @@
++/*
++ * tiomap.c
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Processor Manager Driver for TI OMAP3430 EVM.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#include <linux/types.h>
++/* ----------------------------------- Host OS */
++#include <dspbridge/host_os.h>
++#include <linux/mm.h>
++#include <linux/mmzone.h>
++#include <plat/control.h>
++
++/* ----------------------------------- DSP/BIOS Bridge */
++#include <dspbridge/dbdefs.h>
++
++/* ----------------------------------- Trace & Debug */
++#include <dspbridge/dbc.h>
++
++/* ----------------------------------- OS Adaptation Layer */
++#include <dspbridge/cfg.h>
++#include <dspbridge/drv.h>
++#include <dspbridge/sync.h>
++
++/* ------------------------------------ Hardware Abstraction Layer */
++#include <hw_defs.h>
++#include <hw_mmu.h>
++
++/* ----------------------------------- Link Driver */
++#include <dspbridge/dspdefs.h>
++#include <dspbridge/dspchnl.h>
++#include <dspbridge/dspdeh.h>
++#include <dspbridge/dspio.h>
++#include <dspbridge/dspmsg.h>
++#include <dspbridge/pwr.h>
++#include <dspbridge/io_sm.h>
++
++/* ----------------------------------- Platform Manager */
++#include <dspbridge/dev.h>
++#include <dspbridge/dspapi.h>
++#include <dspbridge/dmm.h>
++#include <dspbridge/wdt.h>
++
++/* ----------------------------------- Local */
++#include "_tiomap.h"
++#include "_tiomap_pwr.h"
++#include "tiomap_io.h"
++
++/* Offset in shared mem to write to in order to synchronize start with DSP */
++#define SHMSYNCOFFSET 4 /* GPP byte offset */
++
++#define BUFFERSIZE 1024
++
++#define TIHELEN_ACKTIMEOUT 10000
++
++#define MMU_SECTION_ADDR_MASK 0xFFF00000
++#define MMU_SSECTION_ADDR_MASK 0xFF000000
++#define MMU_LARGE_PAGE_MASK 0xFFFF0000
++#define MMU_SMALL_PAGE_MASK 0xFFFFF000
++#define OMAP3_IVA2_BOOTADDR_MASK 0xFFFFFC00
++#define PAGES_II_LVL_TABLE 512
++#define PHYS_TO_PAGE(phys) pfn_to_page((phys) >> PAGE_SHIFT)
++
++/* Forward Declarations: */
++static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt);
++static int bridge_brd_read(struct bridge_dev_context *dev_ctxt,
++ u8 *host_buff,
++ u32 dsp_addr, u32 ul_num_bytes,
++ u32 mem_type);
++static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
++ u32 dsp_addr);
++static int bridge_brd_status(struct bridge_dev_context *dev_ctxt,
++ int *board_state);
++static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt);
++static int bridge_brd_write(struct bridge_dev_context *dev_ctxt,
++ u8 *host_buff,
++ u32 dsp_addr, u32 ul_num_bytes,
++ u32 mem_type);
++static int bridge_brd_set_state(struct bridge_dev_context *dev_ctxt,
++ u32 brd_state);
++static int bridge_brd_mem_copy(struct bridge_dev_context *dev_ctxt,
++ u32 dsp_dest_addr, u32 dsp_src_addr,
++ u32 ul_num_bytes, u32 mem_type);
++static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt,
++ u8 *host_buff, u32 dsp_addr,
++ u32 ul_num_bytes, u32 mem_type);
++static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctxt,
++ u32 ul_mpu_addr, u32 virt_addr,
++ u32 ul_num_bytes, u32 ul_map_attr,
++ struct page **mapped_pages);
++static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt,
++ u32 virt_addr, u32 ul_num_bytes);
++static int bridge_dev_create(struct bridge_dev_context
++ **dev_cntxt,
++ struct dev_object *hdev_obj,
++ struct cfg_hostres *config_param);
++static int bridge_dev_ctrl(struct bridge_dev_context *dev_context,
++ u32 dw_cmd, void *pargs);
++static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt);
++static u32 user_va2_pa(struct mm_struct *mm, u32 address);
++static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa,
++ u32 va, u32 size,
++ struct hw_mmu_map_attrs_t *map_attrs);
++static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va,
++ u32 size, struct hw_mmu_map_attrs_t *attrs);
++static int mem_map_vmalloc(struct bridge_dev_context *dev_context,
++ u32 ul_mpu_addr, u32 virt_addr,
++ u32 ul_num_bytes,
++ struct hw_mmu_map_attrs_t *hw_attrs);
++
++bool wait_for_start(struct bridge_dev_context *dev_context, u32 dw_sync_addr);
++
++/* ----------------------------------- Globals */
++
++/* Attributes of L2 page tables for DSP MMU */
++struct page_info {
++ u32 num_entries; /* Number of valid PTEs in the L2 PT */
++};
++
++/* Attributes used to manage the DSP MMU page tables */
++struct pg_table_attrs {
++ spinlock_t pg_lock; /* Critical section object handle */
++
++ u32 l1_base_pa; /* Physical address of the L1 PT */
++ u32 l1_base_va; /* Virtual address of the L1 PT */
++ u32 l1_size; /* Size of the L1 PT */
++ u32 l1_tbl_alloc_pa;
++ /* Physical address of Allocated mem for L1 table. May not be aligned */
++ u32 l1_tbl_alloc_va;
++ /* Virtual address of Allocated mem for L1 table. May not be aligned */
++ u32 l1_tbl_alloc_sz;
++ /* Size of consistent memory allocated for L1 table.
++ * May not be aligned */
++
++ u32 l2_base_pa; /* Physical address of the L2 PT */
++ u32 l2_base_va; /* Virtual address of the L2 PT */
++ u32 l2_size; /* Size of the L2 PT */
++ u32 l2_tbl_alloc_pa;
++ /* Physical address of Allocated mem for L2 table. May not be aligned */
++ u32 l2_tbl_alloc_va;
++ /* Virtual address of Allocated mem for L2 table. May not be aligned */
++ u32 l2_tbl_alloc_sz;
++ /* Size of consistent memory allocated for L2 table.
++ * May not be aligned */
++
++ u32 l2_num_pages; /* Number of allocated L2 PT */
++ /* Array [l2_num_pages] of L2 PT info structs */
++ struct page_info *pg_info;
++};
++
++/*
++ * This Bridge driver's function interface table.
++ */
++static struct bridge_drv_interface drv_interface_fxns = {
++ /* Bridge API ver. for which this bridge driver is built. */
++ BRD_API_MAJOR_VERSION,
++ BRD_API_MINOR_VERSION,
++ bridge_dev_create,
++ bridge_dev_destroy,
++ bridge_dev_ctrl,
++ bridge_brd_monitor,
++ bridge_brd_start,
++ bridge_brd_stop,
++ bridge_brd_status,
++ bridge_brd_read,
++ bridge_brd_write,
++ bridge_brd_set_state,
++ bridge_brd_mem_copy,
++ bridge_brd_mem_write,
++ bridge_brd_mem_map,
++ bridge_brd_mem_un_map,
++ /* The following CHNL functions are provided by chnl_io.lib: */
++ bridge_chnl_create,
++ bridge_chnl_destroy,
++ bridge_chnl_open,
++ bridge_chnl_close,
++ bridge_chnl_add_io_req,
++ bridge_chnl_get_ioc,
++ bridge_chnl_cancel_io,
++ bridge_chnl_flush_io,
++ bridge_chnl_get_info,
++ bridge_chnl_get_mgr_info,
++ bridge_chnl_idle,
++ bridge_chnl_register_notify,
++ /* The following IO functions are provided by chnl_io.lib: */
++ bridge_io_create,
++ bridge_io_destroy,
++ bridge_io_on_loaded,
++ bridge_io_get_proc_load,
++ /* The following msg_ctrl functions are provided by chnl_io.lib: */
++ bridge_msg_create,
++ bridge_msg_create_queue,
++ bridge_msg_delete,
++ bridge_msg_delete_queue,
++ bridge_msg_get,
++ bridge_msg_put,
++ bridge_msg_register_notify,
++ bridge_msg_set_queue_id,
++};
++
++static inline void flush_all(struct bridge_dev_context *dev_context)
++{
++ if (dev_context->dw_brd_state == BRD_DSP_HIBERNATION ||
++ dev_context->dw_brd_state == BRD_HIBERNATION)
++ wake_dsp(dev_context, NULL);
++
++ hw_mmu_tlb_flush_all(dev_context->dw_dsp_mmu_base);
++}
++
++static void bad_page_dump(u32 pa, struct page *pg)
++{
++ pr_emerg("DSPBRIDGE: MAP function: COUNT 0 FOR PA 0x%x\n", pa);
++ pr_emerg("Bad page state in process '%s'\n"
++ "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n"
++ "Backtrace:\n",
++ current->comm, pg, (int)(2 * sizeof(unsigned long)),
++ (unsigned long)pg->flags, pg->mapping,
++ page_mapcount(pg), page_count(pg));
++ dump_stack();
++}
++
++/*
++ * ======== bridge_drv_entry ========
++ * purpose:
++ * Bridge Driver entry point.
++ */
++void bridge_drv_entry(struct bridge_drv_interface **drv_intf,
++ const char *driver_file_name)
++{
++
++ DBC_REQUIRE(driver_file_name != NULL);
++
++ io_sm_init(); /* Initialization of io_sm module */
++
++ if (strcmp(driver_file_name, "UMA") == 0)
++ *drv_intf = &drv_interface_fxns;
++ else
++ dev_dbg(bridge, "%s Unknown Bridge file name", __func__);
++
++}
++
++/*
++ * ======== bridge_brd_monitor ========
++ * purpose:
++ * This bridge_brd_monitor puts DSP into a Loadable state.
++ * i.e Application can load and start the device.
++ *
++ * Preconditions:
++ * Device in 'OFF' state.
++ */
++static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt)
++{
++ struct bridge_dev_context *dev_context = dev_ctxt;
++ u32 temp;
++ struct dspbridge_platform_data *pdata =
++ omap_dspbridge_dev->dev.platform_data;
++
++ temp = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
++ OMAP_POWERSTATEST_MASK;
++ if (!(temp & 0x02)) {
++ /* IVA2 is not in ON state */
++ /* Read and set PM_PWSTCTRL_IVA2 to ON */
++ (*pdata->dsp_prm_rmw_bits)(OMAP_POWERSTATEST_MASK,
++ PWRDM_POWER_ON, OMAP3430_IVA2_MOD, OMAP2_PM_PWSTCTRL);
++ /* Set the SW supervised state transition */
++ (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_FORCE_WAKEUP,
++ OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
++
++ /* Wait until the state has moved to ON */
++ while ((*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
++ OMAP_INTRANSITION_MASK)
++ ;
++ /* Disable Automatic transition */
++ (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_DISABLE_AUTO,
++ OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
++ }
++ (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
++ OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
++ dsp_clk_enable(DSP_CLK_IVA2);
++
++ /* set the device state to IDLE */
++ dev_context->dw_brd_state = BRD_IDLE;
++
++ return 0;
++}
++
++/*
++ * ======== bridge_brd_read ========
++ * purpose:
++ * Reads buffers for DSP memory.
++ */
++static int bridge_brd_read(struct bridge_dev_context *dev_ctxt,
++ u8 *host_buff, u32 dsp_addr,
++ u32 ul_num_bytes, u32 mem_type)
++{
++ int status = 0;
++ struct bridge_dev_context *dev_context = dev_ctxt;
++ u32 offset;
++ u32 dsp_base_addr = dev_ctxt->dw_dsp_base_addr;
++
++ if (dsp_addr < dev_context->dw_dsp_start_add) {
++ status = -EPERM;
++ return status;
++ }
++ /* change here to account for the 3 bands of the DSP internal memory */
++ if ((dsp_addr - dev_context->dw_dsp_start_add) <
++ dev_context->dw_internal_size) {
++ offset = dsp_addr - dev_context->dw_dsp_start_add;
++ } else {
++ status = read_ext_dsp_data(dev_context, host_buff, dsp_addr,
++ ul_num_bytes, mem_type);
++ return status;
++ }
++ /* copy the data from DSP memory, */
++ memcpy(host_buff, (void *)(dsp_base_addr + offset), ul_num_bytes);
++ return status;
++}
++
++/*
++ * ======== bridge_brd_set_state ========
++ * purpose:
++ * This routine updates the Board status.
++ */
++static int bridge_brd_set_state(struct bridge_dev_context *dev_ctxt,
++ u32 brd_state)
++{
++ int status = 0;
++ struct bridge_dev_context *dev_context = dev_ctxt;
++
++ dev_context->dw_brd_state = brd_state;
++ return status;
++}
++
++/*
++ * ======== bridge_brd_start ========
++ * purpose:
++ * Initializes DSP MMU and Starts DSP.
++ *
++ * Preconditions:
++ * a) DSP domain is 'ACTIVE'.
++ * b) DSP_RST1 is asserted.
++ * b) DSP_RST2 is released.
++ */
++static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
++ u32 dsp_addr)
++{
++ int status = 0;
++ struct bridge_dev_context *dev_context = dev_ctxt;
++ u32 dw_sync_addr = 0;
++ u32 ul_shm_base; /* Gpp Phys SM base addr(byte) */
++ u32 ul_shm_base_virt; /* Dsp Virt SM base addr */
++ u32 ul_tlb_base_virt; /* Base of MMU TLB entry */
++ /* Offset of shm_base_virt from tlb_base_virt */
++ u32 ul_shm_offset_virt;
++ s32 entry_ndx;
++ s32 itmp_entry_ndx = 0; /* DSP-MMU TLB entry base address */
++ struct cfg_hostres *resources = NULL;
++ u32 temp;
++ u32 ul_dsp_clk_rate;
++ u32 ul_dsp_clk_addr;
++ u32 ul_bios_gp_timer;
++ u32 clk_cmd;
++ struct io_mgr *hio_mgr;
++ u32 ul_load_monitor_timer;
++ struct dspbridge_platform_data *pdata =
++ omap_dspbridge_dev->dev.platform_data;
++
++ /* The device context contains all the mmu setup info from when the
++ * last dsp base image was loaded. The first entry is always
++ * SHMMEM base. */
++ /* Get SHM_BEG - convert to byte address */
++ (void)dev_get_symbol(dev_context->hdev_obj, SHMBASENAME,
++ &ul_shm_base_virt);
++ ul_shm_base_virt *= DSPWORDSIZE;
++ DBC_ASSERT(ul_shm_base_virt != 0);
++ /* DSP Virtual address */
++ ul_tlb_base_virt = dev_context->atlb_entry[0].ul_dsp_va;
++ DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
++ ul_shm_offset_virt =
++ ul_shm_base_virt - (ul_tlb_base_virt * DSPWORDSIZE);
++ /* Kernel logical address */
++ ul_shm_base = dev_context->atlb_entry[0].ul_gpp_va + ul_shm_offset_virt;
++
++ DBC_ASSERT(ul_shm_base != 0);
++ /* 2nd wd is used as sync field */
++ dw_sync_addr = ul_shm_base + SHMSYNCOFFSET;
++ /* Write a signature into the shm base + offset; this will
++ * get cleared when the DSP program starts. */
++ if ((ul_shm_base_virt == 0) || (ul_shm_base == 0)) {
++ pr_err("%s: Illegal SM base\n", __func__);
++ status = -EPERM;
++ } else
++ __raw_writel(0xffffffff, dw_sync_addr);
++
++ if (!status) {
++ resources = dev_context->resources;
++ if (!resources)
++ status = -EPERM;
++
++ /* Assert RST1 i.e only the RST only for DSP megacell */
++ if (!status) {
++ (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK,
++ OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD,
++ OMAP2_RM_RSTCTRL);
++ /* Mask address with 1K for compatibility */
++ __raw_writel(dsp_addr & OMAP3_IVA2_BOOTADDR_MASK,
++ OMAP343X_CTRL_REGADDR(
++ OMAP343X_CONTROL_IVA2_BOOTADDR));
++ /*
++ * Set bootmode to self loop if dsp_debug flag is true
++ */
++ __raw_writel((dsp_debug) ? OMAP3_IVA2_BOOTMOD_IDLE : 0,
++ OMAP343X_CTRL_REGADDR(
++ OMAP343X_CONTROL_IVA2_BOOTMOD));
++ }
++ }
++ if (!status) {
++ /* Reset and Unreset the RST2, so that BOOTADDR is copied to
++ * IVA2 SYSC register */
++ (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK,
++ OMAP3430_RST2_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
++ udelay(100);
++ (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
++ OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
++ udelay(100);
++
++ /* Disbale the DSP MMU */
++ hw_mmu_disable(resources->dw_dmmu_base);
++ /* Disable TWL */
++ hw_mmu_twl_disable(resources->dw_dmmu_base);
++
++ /* Only make TLB entry if both addresses are non-zero */
++ for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB;
++ entry_ndx++) {
++ struct bridge_ioctl_extproc *e = &dev_context->atlb_entry[entry_ndx];
++ struct hw_mmu_map_attrs_t map_attrs = {
++ .endianism = e->endianism,
++ .element_size = e->elem_size,
++ .mixed_size = e->mixed_mode,
++ };
++
++ if (!e->ul_gpp_pa || !e->ul_dsp_va)
++ continue;
++
++ dev_dbg(bridge,
++ "MMU %d, pa: 0x%x, va: 0x%x, size: 0x%x",
++ itmp_entry_ndx,
++ e->ul_gpp_pa,
++ e->ul_dsp_va,
++ e->ul_size);
++
++ hw_mmu_tlb_add(dev_context->dw_dsp_mmu_base,
++ e->ul_gpp_pa,
++ e->ul_dsp_va,
++ e->ul_size,
++ itmp_entry_ndx,
++ &map_attrs, 1, 1);
++
++ itmp_entry_ndx++;
++ }
++ }
++
++ /* Lock the above TLB entries and get the BIOS and load monitor timer
++ * information */
++ if (!status) {
++ hw_mmu_num_locked_set(resources->dw_dmmu_base, itmp_entry_ndx);
++ hw_mmu_victim_num_set(resources->dw_dmmu_base, itmp_entry_ndx);
++ hw_mmu_ttb_set(resources->dw_dmmu_base,
++ dev_context->pt_attrs->l1_base_pa);
++ hw_mmu_twl_enable(resources->dw_dmmu_base);
++ /* Enable the SmartIdle and AutoIdle bit for MMU_SYSCONFIG */
++
++ temp = __raw_readl((resources->dw_dmmu_base) + 0x10);
++ temp = (temp & 0xFFFFFFEF) | 0x11;
++ __raw_writel(temp, (resources->dw_dmmu_base) + 0x10);
++
++ /* Let the DSP MMU run */
++ hw_mmu_enable(resources->dw_dmmu_base);
++
++ /* Enable the BIOS clock */
++ (void)dev_get_symbol(dev_context->hdev_obj,
++ BRIDGEINIT_BIOSGPTIMER, &ul_bios_gp_timer);
++ (void)dev_get_symbol(dev_context->hdev_obj,
++ BRIDGEINIT_LOADMON_GPTIMER,
++ &ul_load_monitor_timer);
++ }
++
++ if (!status) {
++ if (ul_load_monitor_timer != 0xFFFF) {
++ clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) |
++ ul_load_monitor_timer;
++ dsp_peripheral_clk_ctrl(dev_context, &clk_cmd);
++ } else {
++ dev_dbg(bridge, "Not able to get the symbol for Load "
++ "Monitor Timer\n");
++ }
++ }
++
++ if (!status) {
++ if (ul_bios_gp_timer != 0xFFFF) {
++ clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) |
++ ul_bios_gp_timer;
++ dsp_peripheral_clk_ctrl(dev_context, &clk_cmd);
++ } else {
++ dev_dbg(bridge,
++ "Not able to get the symbol for BIOS Timer\n");
++ }
++ }
++
++ if (!status) {
++ /* Set the DSP clock rate */
++ (void)dev_get_symbol(dev_context->hdev_obj,
++ "_BRIDGEINIT_DSP_FREQ", &ul_dsp_clk_addr);
++ /*Set Autoidle Mode for IVA2 PLL */
++ (*pdata->dsp_cm_write)(1 << OMAP3430_AUTO_IVA2_DPLL_SHIFT,
++ OMAP3430_IVA2_MOD, OMAP3430_CM_AUTOIDLE_PLL);
++
++ if ((unsigned int *)ul_dsp_clk_addr != NULL) {
++ /* Get the clock rate */
++ ul_dsp_clk_rate = dsp_clk_get_iva2_rate();
++ dev_dbg(bridge, "%s: DSP clock rate (KHZ): 0x%x \n",
++ __func__, ul_dsp_clk_rate);
++ (void)bridge_brd_write(dev_context,
++ (u8 *) &ul_dsp_clk_rate,
++ ul_dsp_clk_addr, sizeof(u32), 0);
++ }
++ /*
++ * Enable Mailbox events and also drain any pending
++ * stale messages.
++ */
++ dev_context->mbox = omap_mbox_get("dsp");
++ if (IS_ERR(dev_context->mbox)) {
++ dev_context->mbox = NULL;
++ pr_err("%s: Failed to get dsp mailbox handle\n",
++ __func__);
++ status = -EPERM;
++ }
++
++ }
++ if (!status) {
++ dev_context->mbox->rxq->callback = (int (*)(void *))io_mbox_msg;
++
++/*PM_IVA2GRPSEL_PER = 0xC0;*/
++ temp = readl(resources->dw_per_pm_base + 0xA8);
++ temp = (temp & 0xFFFFFF30) | 0xC0;
++ writel(temp, resources->dw_per_pm_base + 0xA8);
++
++/*PM_MPUGRPSEL_PER &= 0xFFFFFF3F; */
++ temp = readl(resources->dw_per_pm_base + 0xA4);
++ temp = (temp & 0xFFFFFF3F);
++ writel(temp, resources->dw_per_pm_base + 0xA4);
++/*CM_SLEEPDEP_PER |= 0x04; */
++ temp = readl(resources->dw_per_base + 0x44);
++ temp = (temp & 0xFFFFFFFB) | 0x04;
++ writel(temp, resources->dw_per_base + 0x44);
++
++/*CM_CLKSTCTRL_IVA2 = 0x00000003 -To Allow automatic transitions */
++ (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_ENABLE_AUTO,
++ OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
++
++ /* Let DSP go */
++ dev_dbg(bridge, "%s Unreset\n", __func__);
++ /* Enable DSP MMU Interrupts */
++ hw_mmu_event_enable(resources->dw_dmmu_base,
++ HW_MMU_ALL_INTERRUPTS);
++ /* release the RST1, DSP starts executing now .. */
++ (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 0,
++ OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
++
++ dev_dbg(bridge, "Waiting for Sync @ 0x%x\n", dw_sync_addr);
++ dev_dbg(bridge, "DSP c_int00 Address = 0x%x\n", dsp_addr);
++ if (dsp_debug)
++ while (__raw_readw(dw_sync_addr))
++ ;;
++
++ /* Wait for DSP to clear word in shared memory */
++ /* Read the Location */
++ if (!wait_for_start(dev_context, dw_sync_addr))
++ status = -ETIMEDOUT;
++
++ /* Start wdt */
++ dsp_wdt_sm_set((void *)ul_shm_base);
++ dsp_wdt_enable(true);
++
++ status = dev_get_io_mgr(dev_context->hdev_obj, &hio_mgr);
++ if (hio_mgr) {
++ io_sh_msetting(hio_mgr, SHM_OPPINFO, NULL);
++ /* Write the synchronization bit to indicate the
++ * completion of OPP table update to DSP
++ */
++ __raw_writel(0XCAFECAFE, dw_sync_addr);
++
++ /* update board state */
++ dev_context->dw_brd_state = BRD_RUNNING;
++ /* (void)chnlsm_enable_interrupt(dev_context); */
++ } else {
++ dev_context->dw_brd_state = BRD_UNKNOWN;
++ }
++ }
++ return status;
++}
++
++/*
++ * ======== bridge_brd_stop ========
++ * purpose:
++ * Puts DSP in self loop.
++ *
++ * Preconditions :
++ * a) None
++ */
++static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt)
++{
++ int status = 0;
++ struct bridge_dev_context *dev_context = dev_ctxt;
++ struct pg_table_attrs *pt_attrs;
++ u32 dsp_pwr_state;
++ int clk_status;
++ struct dspbridge_platform_data *pdata =
++ omap_dspbridge_dev->dev.platform_data;
++
++ if (dev_context->dw_brd_state == BRD_STOPPED)
++ return status;
++
++ /* as per TRM, it is advised to first drive the IVA2 to 'Standby' mode,
++ * before turning off the clocks.. This is to ensure that there are no
++ * pending L3 or other transactons from IVA2 */
++ dsp_pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
++ OMAP_POWERSTATEST_MASK;
++ if (dsp_pwr_state != PWRDM_POWER_OFF) {
++ (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
++ OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
++ sm_interrupt_dsp(dev_context, MBX_PM_DSPIDLE);
++ mdelay(10);
++
++ /* IVA2 is not in OFF state */
++ /* Set PM_PWSTCTRL_IVA2 to OFF */
++ (*pdata->dsp_prm_rmw_bits)(OMAP_POWERSTATEST_MASK,
++ PWRDM_POWER_OFF, OMAP3430_IVA2_MOD, OMAP2_PM_PWSTCTRL);
++ /* Set the SW supervised state transition for Sleep */
++ (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_FORCE_SLEEP,
++ OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
++ }
++ udelay(10);
++ /* Release the Ext Base virtual Address as the next DSP Program
++ * may have a different load address */
++ if (dev_context->dw_dsp_ext_base_addr)
++ dev_context->dw_dsp_ext_base_addr = 0;
++
++ dev_context->dw_brd_state = BRD_STOPPED; /* update board state */
++
++ dsp_wdt_enable(false);
++
++ /* This is a good place to clear the MMU page tables as well */
++ if (dev_context->pt_attrs) {
++ pt_attrs = dev_context->pt_attrs;
++ memset((u8 *) pt_attrs->l1_base_va, 0x00, pt_attrs->l1_size);
++ memset((u8 *) pt_attrs->l2_base_va, 0x00, pt_attrs->l2_size);
++ memset((u8 *) pt_attrs->pg_info, 0x00,
++ (pt_attrs->l2_num_pages * sizeof(struct page_info)));
++ }
++ /* Disable the mailbox interrupts */
++ if (dev_context->mbox) {
++ omap_mbox_disable_irq(dev_context->mbox, IRQ_RX);
++ omap_mbox_put(dev_context->mbox);
++ dev_context->mbox = NULL;
++ }
++ /* Reset IVA2 clocks*/
++ (*pdata->dsp_prm_write)(OMAP3430_RST1_IVA2_MASK | OMAP3430_RST2_IVA2_MASK |
++ OMAP3430_RST3_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
++
++ clk_status = dsp_clk_disable(DSP_CLK_IVA2);
++
++ return status;
++}
++
++/*
++ * ======== bridge_brd_status ========
++ * Returns the board status.
++ */
++static int bridge_brd_status(struct bridge_dev_context *dev_ctxt,
++ int *board_state)
++{
++ struct bridge_dev_context *dev_context = dev_ctxt;
++ *board_state = dev_context->dw_brd_state;
++ return 0;
++}
++
++/*
++ * ======== bridge_brd_write ========
++ * Copies the buffers to DSP internal or external memory.
++ */
++static int bridge_brd_write(struct bridge_dev_context *dev_ctxt,
++ u8 *host_buff, u32 dsp_addr,
++ u32 ul_num_bytes, u32 mem_type)
++{
++ int status = 0;
++ struct bridge_dev_context *dev_context = dev_ctxt;
++
++ if (dsp_addr < dev_context->dw_dsp_start_add) {
++ status = -EPERM;
++ return status;
++ }
++ if ((dsp_addr - dev_context->dw_dsp_start_add) <
++ dev_context->dw_internal_size) {
++ status = write_dsp_data(dev_ctxt, host_buff, dsp_addr,
++ ul_num_bytes, mem_type);
++ } else {
++ status = write_ext_dsp_data(dev_context, host_buff, dsp_addr,
++ ul_num_bytes, mem_type, false);
++ }
++
++ return status;
++}
++
++/*
++ * ======== bridge_dev_create ========
++ * Creates a driver object. Puts DSP in self loop.
++ */
++static int bridge_dev_create(struct bridge_dev_context
++ **dev_cntxt,
++ struct dev_object *hdev_obj,
++ struct cfg_hostres *config_param)
++{
++ int status = 0;
++ struct bridge_dev_context *dev_context = NULL;
++ s32 entry_ndx;
++ struct cfg_hostres *resources = config_param;
++ struct pg_table_attrs *pt_attrs;
++ u32 pg_tbl_pa;
++ u32 pg_tbl_va;
++ u32 align_size;
++ struct drv_data *drv_datap = dev_get_drvdata(bridge);
++
++ /* Allocate and initialize a data structure to contain the bridge driver
++ * state, which becomes the context for later calls into this driver */
++ dev_context = kzalloc(sizeof(struct bridge_dev_context), GFP_KERNEL);
++ if (!dev_context) {
++ status = -ENOMEM;
++ goto func_end;
++ }
++
++ dev_context->dw_dsp_start_add = (u32) OMAP_GEM_BASE;
++ dev_context->dw_self_loop = (u32) NULL;
++ dev_context->dsp_per_clks = 0;
++ dev_context->dw_internal_size = OMAP_DSP_SIZE;
++ /* Clear dev context MMU table entries.
++ * These get set on bridge_io_on_loaded() call after program loaded. */
++ for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB; entry_ndx++) {
++ dev_context->atlb_entry[entry_ndx].ul_gpp_pa =
++ dev_context->atlb_entry[entry_ndx].ul_dsp_va = 0;
++ }
++ dev_context->dw_dsp_base_addr = (u32) MEM_LINEAR_ADDRESS((void *)
++ (config_param->
++ dw_mem_base
++ [3]),
++ config_param->
++ dw_mem_length
++ [3]);
++ if (!dev_context->dw_dsp_base_addr)
++ status = -EPERM;
++
++ pt_attrs = kzalloc(sizeof(struct pg_table_attrs), GFP_KERNEL);
++ if (pt_attrs != NULL) {
++ /* Assuming that we use only DSP's memory map
++ * until 0x4000:0000 , we would need only 1024
++ * L1 enties i.e L1 size = 4K */
++ pt_attrs->l1_size = 0x1000;
++ align_size = pt_attrs->l1_size;
++ /* Align sizes are expected to be power of 2 */
++ /* we like to get aligned on L1 table size */
++ pg_tbl_va = (u32) mem_alloc_phys_mem(pt_attrs->l1_size,
++ align_size, &pg_tbl_pa);
++
++ /* Check if the PA is aligned for us */
++ if ((pg_tbl_pa) & (align_size - 1)) {
++ /* PA not aligned to page table size ,
++ * try with more allocation and align */
++ mem_free_phys_mem((void *)pg_tbl_va, pg_tbl_pa,
++ pt_attrs->l1_size);
++ /* we like to get aligned on L1 table size */
++ pg_tbl_va =
++ (u32) mem_alloc_phys_mem((pt_attrs->l1_size) * 2,
++ align_size, &pg_tbl_pa);
++ /* We should be able to get aligned table now */
++ pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa;
++ pt_attrs->l1_tbl_alloc_va = pg_tbl_va;
++ pt_attrs->l1_tbl_alloc_sz = pt_attrs->l1_size * 2;
++ /* Align the PA to the next 'align' boundary */
++ pt_attrs->l1_base_pa =
++ ((pg_tbl_pa) +
++ (align_size - 1)) & (~(align_size - 1));
++ pt_attrs->l1_base_va =
++ pg_tbl_va + (pt_attrs->l1_base_pa - pg_tbl_pa);
++ } else {
++ /* We got aligned PA, cool */
++ pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa;
++ pt_attrs->l1_tbl_alloc_va = pg_tbl_va;
++ pt_attrs->l1_tbl_alloc_sz = pt_attrs->l1_size;
++ pt_attrs->l1_base_pa = pg_tbl_pa;
++ pt_attrs->l1_base_va = pg_tbl_va;
++ }
++ if (pt_attrs->l1_base_va)
++ memset((u8 *) pt_attrs->l1_base_va, 0x00,
++ pt_attrs->l1_size);
++
++ /* number of L2 page tables = DMM pool used + SHMMEM +EXTMEM +
++ * L4 pages */
++ pt_attrs->l2_num_pages = ((DMMPOOLSIZE >> 20) + 6);
++ pt_attrs->l2_size = HW_MMU_COARSE_PAGE_SIZE *
++ pt_attrs->l2_num_pages;
++ align_size = 4; /* Make it u32 aligned */
++ /* we like to get aligned on L1 table size */
++ pg_tbl_va = (u32) mem_alloc_phys_mem(pt_attrs->l2_size,
++ align_size, &pg_tbl_pa);
++ pt_attrs->l2_tbl_alloc_pa = pg_tbl_pa;
++ pt_attrs->l2_tbl_alloc_va = pg_tbl_va;
++ pt_attrs->l2_tbl_alloc_sz = pt_attrs->l2_size;
++ pt_attrs->l2_base_pa = pg_tbl_pa;
++ pt_attrs->l2_base_va = pg_tbl_va;
++
++ if (pt_attrs->l2_base_va)
++ memset((u8 *) pt_attrs->l2_base_va, 0x00,
++ pt_attrs->l2_size);
++
++ pt_attrs->pg_info = kzalloc(pt_attrs->l2_num_pages *
++ sizeof(struct page_info), GFP_KERNEL);
++ dev_dbg(bridge,
++ "L1 pa %x, va %x, size %x\n L2 pa %x, va "
++ "%x, size %x\n", pt_attrs->l1_base_pa,
++ pt_attrs->l1_base_va, pt_attrs->l1_size,
++ pt_attrs->l2_base_pa, pt_attrs->l2_base_va,
++ pt_attrs->l2_size);
++ dev_dbg(bridge, "pt_attrs %p L2 NumPages %x pg_info %p\n",
++ pt_attrs, pt_attrs->l2_num_pages, pt_attrs->pg_info);
++ }
++ if ((pt_attrs != NULL) && (pt_attrs->l1_base_va != 0) &&
++ (pt_attrs->l2_base_va != 0) && (pt_attrs->pg_info != NULL))
++ dev_context->pt_attrs = pt_attrs;
++ else
++ status = -ENOMEM;
++
++ if (!status) {
++ spin_lock_init(&pt_attrs->pg_lock);
++ dev_context->tc_word_swap_on = drv_datap->tc_wordswapon;
++
++ /* Set the Clock Divisor for the DSP module */
++ udelay(5);
++ /* MMU address is obtained from the host
++ * resources struct */
++ dev_context->dw_dsp_mmu_base = resources->dw_dmmu_base;
++ }
++ if (!status) {
++ dev_context->hdev_obj = hdev_obj;
++ /* Store current board state. */
++ dev_context->dw_brd_state = BRD_UNKNOWN;
++ dev_context->resources = resources;
++ dsp_clk_enable(DSP_CLK_IVA2);
++ bridge_brd_stop(dev_context);
++ /* Return ptr to our device state to the DSP API for storage */
++ *dev_cntxt = dev_context;
++ } else {
++ if (pt_attrs != NULL) {
++ kfree(pt_attrs->pg_info);
++
++ if (pt_attrs->l2_tbl_alloc_va) {
++ mem_free_phys_mem((void *)
++ pt_attrs->l2_tbl_alloc_va,
++ pt_attrs->l2_tbl_alloc_pa,
++ pt_attrs->l2_tbl_alloc_sz);
++ }
++ if (pt_attrs->l1_tbl_alloc_va) {
++ mem_free_phys_mem((void *)
++ pt_attrs->l1_tbl_alloc_va,
++ pt_attrs->l1_tbl_alloc_pa,
++ pt_attrs->l1_tbl_alloc_sz);
++ }
++ }
++ kfree(pt_attrs);
++ kfree(dev_context);
++ }
++func_end:
++ return status;
++}
++
++/*
++ * ======== bridge_dev_ctrl ========
++ * Receives device specific commands.
++ */
++static int bridge_dev_ctrl(struct bridge_dev_context *dev_context,
++ u32 dw_cmd, void *pargs)
++{
++ int status = 0;
++ struct bridge_ioctl_extproc *pa_ext_proc =
++ (struct bridge_ioctl_extproc *)pargs;
++ s32 ndx;
++
++ switch (dw_cmd) {
++ case BRDIOCTL_CHNLREAD:
++ break;
++ case BRDIOCTL_CHNLWRITE:
++ break;
++ case BRDIOCTL_SETMMUCONFIG:
++ /* store away dsp-mmu setup values for later use */
++ for (ndx = 0; ndx < BRDIOCTL_NUMOFMMUTLB; ndx++, pa_ext_proc++)
++ dev_context->atlb_entry[ndx] = *pa_ext_proc;
++ break;
++ case BRDIOCTL_DEEPSLEEP:
++ case BRDIOCTL_EMERGENCYSLEEP:
++ /* Currently only DSP Idle is supported Need to update for
++ * later releases */
++ status = sleep_dsp(dev_context, PWR_DEEPSLEEP, pargs);
++ break;
++ case BRDIOCTL_WAKEUP:
++ status = wake_dsp(dev_context, pargs);
++ break;
++ case BRDIOCTL_CLK_CTRL:
++ status = 0;
++ /* Looking For Baseport Fix for Clocks */
++ status = dsp_peripheral_clk_ctrl(dev_context, pargs);
++ break;
++ case BRDIOCTL_PWR_HIBERNATE:
++ status = handle_hibernation_from_dsp(dev_context);
++ break;
++ case BRDIOCTL_PRESCALE_NOTIFY:
++ status = pre_scale_dsp(dev_context, pargs);
++ break;
++ case BRDIOCTL_POSTSCALE_NOTIFY:
++ status = post_scale_dsp(dev_context, pargs);
++ break;
++ case BRDIOCTL_CONSTRAINT_REQUEST:
++ status = handle_constraints_set(dev_context, pargs);
++ break;
++ default:
++ status = -EPERM;
++ break;
++ }
++ return status;
++}
++
++/*
++ * ======== bridge_dev_destroy ========
++ * Destroys the driver object.
++ */
++static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt)
++{
++ struct pg_table_attrs *pt_attrs;
++ int status = 0;
++ struct bridge_dev_context *dev_context = (struct bridge_dev_context *)
++ dev_ctxt;
++ struct cfg_hostres *host_res;
++ u32 shm_size;
++ struct drv_data *drv_datap = dev_get_drvdata(bridge);
++
++ /* It should never happen */
++ if (!dev_ctxt)
++ return -EFAULT;
++
++ /* first put the device to stop state */
++ bridge_brd_stop(dev_context);
++ if (dev_context->pt_attrs) {
++ pt_attrs = dev_context->pt_attrs;
++ kfree(pt_attrs->pg_info);
++
++ if (pt_attrs->l2_tbl_alloc_va) {
++ mem_free_phys_mem((void *)pt_attrs->l2_tbl_alloc_va,
++ pt_attrs->l2_tbl_alloc_pa,
++ pt_attrs->l2_tbl_alloc_sz);
++ }
++ if (pt_attrs->l1_tbl_alloc_va) {
++ mem_free_phys_mem((void *)pt_attrs->l1_tbl_alloc_va,
++ pt_attrs->l1_tbl_alloc_pa,
++ pt_attrs->l1_tbl_alloc_sz);
++ }
++ kfree(pt_attrs);
++
++ }
++
++ if (dev_context->resources) {
++ host_res = dev_context->resources;
++ shm_size = drv_datap->shm_size;
++ if (shm_size >= 0x10000) {
++ if ((host_res->dw_mem_base[1]) &&
++ (host_res->dw_mem_phys[1])) {
++ mem_free_phys_mem((void *)
++ host_res->dw_mem_base
++ [1],
++ host_res->dw_mem_phys
++ [1], shm_size);
++ }
++ } else {
++ dev_dbg(bridge, "%s: Error getting shm size "
++ "from registry: %x. Not calling "
++ "mem_free_phys_mem\n", __func__,
++ status);
++ }
++ host_res->dw_mem_base[1] = 0;
++ host_res->dw_mem_phys[1] = 0;
++
++ if (host_res->dw_mem_base[0])
++ iounmap((void *)host_res->dw_mem_base[0]);
++ if (host_res->dw_mem_base[2])
++ iounmap((void *)host_res->dw_mem_base[2]);
++ if (host_res->dw_mem_base[3])
++ iounmap((void *)host_res->dw_mem_base[3]);
++ if (host_res->dw_mem_base[4])
++ iounmap((void *)host_res->dw_mem_base[4]);
++ if (host_res->dw_dmmu_base)
++ iounmap(host_res->dw_dmmu_base);
++ if (host_res->dw_per_base)
++ iounmap(host_res->dw_per_base);
++ if (host_res->dw_per_pm_base)
++ iounmap((void *)host_res->dw_per_pm_base);
++ if (host_res->dw_core_pm_base)
++ iounmap((void *)host_res->dw_core_pm_base);
++ if (host_res->dw_sys_ctrl_base)
++ iounmap(host_res->dw_sys_ctrl_base);
++
++ host_res->dw_mem_base[0] = (u32) NULL;
++ host_res->dw_mem_base[2] = (u32) NULL;
++ host_res->dw_mem_base[3] = (u32) NULL;
++ host_res->dw_mem_base[4] = (u32) NULL;
++ host_res->dw_dmmu_base = NULL;
++ host_res->dw_sys_ctrl_base = NULL;
++
++ kfree(host_res);
++ }
++
++ /* Free the driver's device context: */
++ kfree(drv_datap->base_img);
++ kfree(drv_datap);
++ dev_set_drvdata(bridge, NULL);
++ kfree((void *)dev_ctxt);
++ return status;
++}
++
++static int bridge_brd_mem_copy(struct bridge_dev_context *dev_ctxt,
++ u32 dsp_dest_addr, u32 dsp_src_addr,
++ u32 ul_num_bytes, u32 mem_type)
++{
++ int status = 0;
++ u32 src_addr = dsp_src_addr;
++ u32 dest_addr = dsp_dest_addr;
++ u32 copy_bytes = 0;
++ u32 total_bytes = ul_num_bytes;
++ u8 host_buf[BUFFERSIZE];
++ struct bridge_dev_context *dev_context = dev_ctxt;
++ while (total_bytes > 0 && !status) {
++ copy_bytes =
++ total_bytes > BUFFERSIZE ? BUFFERSIZE : total_bytes;
++ /* Read from External memory */
++ status = read_ext_dsp_data(dev_ctxt, host_buf, src_addr,
++ copy_bytes, mem_type);
++ if (!status) {
++ if (dest_addr < (dev_context->dw_dsp_start_add +
++ dev_context->dw_internal_size)) {
++ /* Write to Internal memory */
++ status = write_dsp_data(dev_ctxt, host_buf,
++ dest_addr, copy_bytes,
++ mem_type);
++ } else {
++ /* Write to External memory */
++ status =
++ write_ext_dsp_data(dev_ctxt, host_buf,
++ dest_addr, copy_bytes,
++ mem_type, false);
++ }
++ }
++ total_bytes -= copy_bytes;
++ src_addr += copy_bytes;
++ dest_addr += copy_bytes;
++ }
++ return status;
++}
++
++/* Mem Write does not halt the DSP to write unlike bridge_brd_write */
++static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt,
++ u8 *host_buff, u32 dsp_addr,
++ u32 ul_num_bytes, u32 mem_type)
++{
++ int status = 0;
++ struct bridge_dev_context *dev_context = dev_ctxt;
++ u32 ul_remain_bytes = 0;
++ u32 ul_bytes = 0;
++ ul_remain_bytes = ul_num_bytes;
++ while (ul_remain_bytes > 0 && !status) {
++ ul_bytes =
++ ul_remain_bytes > BUFFERSIZE ? BUFFERSIZE : ul_remain_bytes;
++ if (dsp_addr < (dev_context->dw_dsp_start_add +
++ dev_context->dw_internal_size)) {
++ status =
++ write_dsp_data(dev_ctxt, host_buff, dsp_addr,
++ ul_bytes, mem_type);
++ } else {
++ status = write_ext_dsp_data(dev_ctxt, host_buff,
++ dsp_addr, ul_bytes,
++ mem_type, true);
++ }
++ ul_remain_bytes -= ul_bytes;
++ dsp_addr += ul_bytes;
++ host_buff = host_buff + ul_bytes;
++ }
++ return status;
++}
++
++/*
++ * ======== bridge_brd_mem_map ========
++ * This function maps MPU buffer to the DSP address space. It performs
++ * linear to physical address translation if required. It translates each
++ * page since linear addresses can be physically non-contiguous
++ * All address & size arguments are assumed to be page aligned (in proc.c)
++ *
++ * TODO: Disable MMU while updating the page tables (but that'll stall DSP)
++ */
++static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctxt,
++ u32 ul_mpu_addr, u32 virt_addr,
++ u32 ul_num_bytes, u32 ul_map_attr,
++ struct page **mapped_pages)
++{
++ u32 attrs;
++ int status = 0;
++ struct bridge_dev_context *dev_context = dev_ctxt;
++ struct hw_mmu_map_attrs_t hw_attrs;
++ struct vm_area_struct *vma;
++ struct mm_struct *mm = current->mm;
++ u32 write = 0;
++ u32 num_usr_pgs = 0;
++ struct page *mapped_page, *pg;
++ s32 pg_num;
++ u32 va = virt_addr;
++ struct task_struct *curr_task = current;
++ u32 pg_i = 0;
++ u32 mpu_addr, pa;
++
++ dev_dbg(bridge,
++ "%s hDevCtxt %p, pa %x, va %x, size %x, ul_map_attr %x\n",
++ __func__, dev_ctxt, ul_mpu_addr, virt_addr, ul_num_bytes,
++ ul_map_attr);
++ if (ul_num_bytes == 0)
++ return -EINVAL;
++
++ if (ul_map_attr & DSP_MAP_DIR_MASK) {
++ attrs = ul_map_attr;
++ } else {
++ /* Assign default attributes */
++ attrs = ul_map_attr | (DSP_MAPVIRTUALADDR | DSP_MAPELEMSIZE16);
++ }
++ /* Take mapping properties */
++ if (attrs & DSP_MAPBIGENDIAN)
++ hw_attrs.endianism = HW_BIG_ENDIAN;
++ else
++ hw_attrs.endianism = HW_LITTLE_ENDIAN;
++
++ hw_attrs.mixed_size = (enum hw_mmu_mixed_size_t)
++ ((attrs & DSP_MAPMIXEDELEMSIZE) >> 2);
++ /* Ignore element_size if mixed_size is enabled */
++ if (hw_attrs.mixed_size == 0) {
++ if (attrs & DSP_MAPELEMSIZE8) {
++ /* Size is 8 bit */
++ hw_attrs.element_size = HW_ELEM_SIZE8BIT;
++ } else if (attrs & DSP_MAPELEMSIZE16) {
++ /* Size is 16 bit */
++ hw_attrs.element_size = HW_ELEM_SIZE16BIT;
++ } else if (attrs & DSP_MAPELEMSIZE32) {
++ /* Size is 32 bit */
++ hw_attrs.element_size = HW_ELEM_SIZE32BIT;
++ } else if (attrs & DSP_MAPELEMSIZE64) {
++ /* Size is 64 bit */
++ hw_attrs.element_size = HW_ELEM_SIZE64BIT;
++ } else {
++ /*
++ * Mixedsize isn't enabled, so size can't be
++ * zero here
++ */
++ return -EINVAL;
++ }
++ }
++ if (attrs & DSP_MAPDONOTLOCK)
++ hw_attrs.donotlockmpupage = 1;
++ else
++ hw_attrs.donotlockmpupage = 0;
++
++ if (attrs & DSP_MAPVMALLOCADDR) {
++ return mem_map_vmalloc(dev_ctxt, ul_mpu_addr, virt_addr,
++ ul_num_bytes, &hw_attrs);
++ }
++ /*
++ * Do OS-specific user-va to pa translation.
++ * Combine physically contiguous regions to reduce TLBs.
++ * Pass the translated pa to pte_update.
++ */
++ if ((attrs & DSP_MAPPHYSICALADDR)) {
++ status = pte_update(dev_context, ul_mpu_addr, virt_addr,
++ ul_num_bytes, &hw_attrs);
++ goto func_cont;
++ }
++
++ /*
++ * Important Note: ul_mpu_addr is mapped from user application process
++ * to current process - it must lie completely within the current
++ * virtual memory address space in order to be of use to us here!
++ */
++ down_read(&mm->mmap_sem);
++ vma = find_vma(mm, ul_mpu_addr);
++ if (vma)
++ dev_dbg(bridge,
++ "VMAfor UserBuf: ul_mpu_addr=%x, ul_num_bytes=%x, "
++ "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr,
++ ul_num_bytes, vma->vm_start, vma->vm_end,
++ vma->vm_flags);
++
++ /*
++ * It is observed that under some circumstances, the user buffer is
++ * spread across several VMAs. So loop through and check if the entire
++ * user buffer is covered
++ */
++ while ((vma) && (ul_mpu_addr + ul_num_bytes > vma->vm_end)) {
++ /* jump to the next VMA region */
++ vma = find_vma(mm, vma->vm_end + 1);
++ dev_dbg(bridge,
++ "VMA for UserBuf ul_mpu_addr=%x ul_num_bytes=%x, "
++ "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr,
++ ul_num_bytes, vma->vm_start, vma->vm_end,
++ vma->vm_flags);
++ }
++ if (!vma) {
++ pr_err("%s: Failed to get VMA region for 0x%x (%d)\n",
++ __func__, ul_mpu_addr, ul_num_bytes);
++ status = -EINVAL;
++ up_read(&mm->mmap_sem);
++ goto func_cont;
++ }
++
++ if (vma->vm_flags & VM_IO) {
++ num_usr_pgs = ul_num_bytes / PG_SIZE4K;
++ mpu_addr = ul_mpu_addr;
++
++ /* Get the physical addresses for user buffer */
++ for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) {
++ pa = user_va2_pa(mm, mpu_addr);
++ if (!pa) {
++ status = -EPERM;
++ pr_err("DSPBRIDGE: VM_IO mapping physical"
++ "address is invalid\n");
++ break;
++ }
++ if (pfn_valid(__phys_to_pfn(pa))) {
++ pg = PHYS_TO_PAGE(pa);
++ get_page(pg);
++ if (page_count(pg) < 1) {
++ pr_err("Bad page in VM_IO buffer\n");
++ bad_page_dump(pa, pg);
++ }
++ }
++ status = pte_set(dev_context->pt_attrs, pa,
++ va, HW_PAGE_SIZE4KB, &hw_attrs);
++ if (status)
++ break;
++
++ va += HW_PAGE_SIZE4KB;
++ mpu_addr += HW_PAGE_SIZE4KB;
++ pa += HW_PAGE_SIZE4KB;
++ }
++ } else {
++ num_usr_pgs = ul_num_bytes / PG_SIZE4K;
++ if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
++ write = 1;
++
++ for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) {
++ pg_num = get_user_pages(curr_task, mm, ul_mpu_addr, 1,
++ write, 1, &mapped_page, NULL);
++ if (pg_num > 0) {
++ if (page_count(mapped_page) < 1) {
++ pr_err("Bad page count after doing"
++ "get_user_pages on"
++ "user buffer\n");
++ bad_page_dump(page_to_phys(mapped_page),
++ mapped_page);
++ }
++ status = pte_set(dev_context->pt_attrs,
++ page_to_phys(mapped_page), va,
++ HW_PAGE_SIZE4KB, &hw_attrs);
++ if (status)
++ break;
++
++ if (mapped_pages)
++ mapped_pages[pg_i] = mapped_page;
++
++ va += HW_PAGE_SIZE4KB;
++ ul_mpu_addr += HW_PAGE_SIZE4KB;
++ } else {
++ pr_err("DSPBRIDGE: get_user_pages FAILED,"
++ "MPU addr = 0x%x,"
++ "vma->vm_flags = 0x%lx,"
++ "get_user_pages Err"
++ "Value = %d, Buffer"
++ "size=0x%x\n", ul_mpu_addr,
++ vma->vm_flags, pg_num, ul_num_bytes);
++ status = -EPERM;
++ break;
++ }
++ }
++ }
++ up_read(&mm->mmap_sem);
++func_cont:
++ if (status) {
++ /*
++ * Roll out the mapped pages incase it failed in middle of
++ * mapping
++ */
++ if (pg_i) {
++ bridge_brd_mem_un_map(dev_context, virt_addr,
++ (pg_i * PG_SIZE4K));
++ }
++ status = -EPERM;
++ }
++ /*
++ * In any case, flush the TLB
++ * This is called from here instead from pte_update to avoid unnecessary
++ * repetition while mapping non-contiguous physical regions of a virtual
++ * region
++ */
++ flush_all(dev_context);
++ dev_dbg(bridge, "%s status %x\n", __func__, status);
++ return status;
++}
++
++/*
++ * ======== bridge_brd_mem_un_map ========
++ * Invalidate the PTEs for the DSP VA block to be unmapped.
++ *
++ * PTEs of a mapped memory block are contiguous in any page table
++ * So, instead of looking up the PTE address for every 4K block,
++ * we clear consecutive PTEs until we unmap all the bytes
++ */
++static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt,
++ u32 virt_addr, u32 ul_num_bytes)
++{
++ u32 l1_base_va;
++ u32 l2_base_va;
++ u32 l2_base_pa;
++ u32 l2_page_num;
++ u32 pte_val;
++ u32 pte_size;
++ u32 pte_count;
++ u32 pte_addr_l1;
++ u32 pte_addr_l2 = 0;
++ u32 rem_bytes;
++ u32 rem_bytes_l2;
++ u32 va_curr;
++ struct page *pg = NULL;
++ int status = 0;
++ struct bridge_dev_context *dev_context = dev_ctxt;
++ struct pg_table_attrs *pt = dev_context->pt_attrs;
++ u32 temp;
++ u32 paddr;
++ u32 numof4k_pages = 0;
++
++ va_curr = virt_addr;
++ rem_bytes = ul_num_bytes;
++ rem_bytes_l2 = 0;
++ l1_base_va = pt->l1_base_va;
++ pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr);
++ dev_dbg(bridge, "%s dev_ctxt %p, va %x, NumBytes %x l1_base_va %x, "
++ "pte_addr_l1 %x\n", __func__, dev_ctxt, virt_addr,
++ ul_num_bytes, l1_base_va, pte_addr_l1);
++
++ while (rem_bytes && !status) {
++ u32 va_curr_orig = va_curr;
++ /* Find whether the L1 PTE points to a valid L2 PT */
++ pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr);
++ pte_val = *(u32 *) pte_addr_l1;
++ pte_size = hw_mmu_pte_size_l1(pte_val);
++
++ if (pte_size != HW_MMU_COARSE_PAGE_SIZE)
++ goto skip_coarse_page;
++
++ /*
++ * Get the L2 PA from the L1 PTE, and find
++ * corresponding L2 VA
++ */
++ l2_base_pa = hw_mmu_pte_coarse_l1(pte_val);
++ l2_base_va = l2_base_pa - pt->l2_base_pa + pt->l2_base_va;
++ l2_page_num =
++ (l2_base_pa - pt->l2_base_pa) / HW_MMU_COARSE_PAGE_SIZE;
++ /*
++ * Find the L2 PTE address from which we will start
++ * clearing, the number of PTEs to be cleared on this
++ * page, and the size of VA space that needs to be
++ * cleared on this L2 page
++ */
++ pte_addr_l2 = hw_mmu_pte_addr_l2(l2_base_va, va_curr);
++ pte_count = pte_addr_l2 & (HW_MMU_COARSE_PAGE_SIZE - 1);
++ pte_count = (HW_MMU_COARSE_PAGE_SIZE - pte_count) / sizeof(u32);
++ if (rem_bytes < (pte_count * PG_SIZE4K))
++ pte_count = rem_bytes / PG_SIZE4K;
++ rem_bytes_l2 = pte_count * PG_SIZE4K;
++
++ /*
++ * Unmap the VA space on this L2 PT. A quicker way
++ * would be to clear pte_count entries starting from
++ * pte_addr_l2. However, below code checks that we don't
++ * clear invalid entries or less than 64KB for a 64KB
++ * entry. Similar checking is done for L1 PTEs too
++ * below
++ */
++ while (rem_bytes_l2 && !status) {
++ pte_val = *(u32 *) pte_addr_l2;
++ pte_size = hw_mmu_pte_size_l2(pte_val);
++ /* va_curr aligned to pte_size? */
++ if (pte_size == 0 || rem_bytes_l2 < pte_size ||
++ va_curr & (pte_size - 1)) {
++ status = -EPERM;
++ break;
++ }
++
++ /* Collect Physical addresses from VA */
++ paddr = (pte_val & ~(pte_size - 1));
++ if (pte_size == HW_PAGE_SIZE64KB)
++ numof4k_pages = 16;
++ else
++ numof4k_pages = 1;
++ temp = 0;
++ while (temp++ < numof4k_pages) {
++ if (!pfn_valid(__phys_to_pfn(paddr))) {
++ paddr += HW_PAGE_SIZE4KB;
++ continue;
++ }
++ pg = PHYS_TO_PAGE(paddr);
++ if (page_count(pg) < 1) {
++ pr_info("DSPBRIDGE: UNMAP function: "
++ "COUNT 0 FOR PA 0x%x, size = "
++ "0x%x\n", paddr, ul_num_bytes);
++ bad_page_dump(paddr, pg);
++ } else {
++ set_page_dirty(pg);
++ page_cache_release(pg);
++ }
++ paddr += HW_PAGE_SIZE4KB;
++ }
++ if (hw_mmu_pte_clear(pte_addr_l2, va_curr, pte_size)) {
++ status = -EPERM;
++ goto EXIT_LOOP;
++ }
++
++ status = 0;
++ rem_bytes_l2 -= pte_size;
++ va_curr += pte_size;
++ pte_addr_l2 += (pte_size >> 12) * sizeof(u32);
++ }
++ spin_lock(&pt->pg_lock);
++ if (rem_bytes_l2 == 0) {
++ pt->pg_info[l2_page_num].num_entries -= pte_count;
++ if (pt->pg_info[l2_page_num].num_entries == 0) {
++ /*
++ * Clear the L1 PTE pointing to the L2 PT
++ */
++ if (!hw_mmu_pte_clear(l1_base_va, va_curr_orig,
++ HW_MMU_COARSE_PAGE_SIZE))
++ status = 0;
++ else {
++ status = -EPERM;
++ spin_unlock(&pt->pg_lock);
++ goto EXIT_LOOP;
++ }
++ }
++ rem_bytes -= pte_count * PG_SIZE4K;
++ } else
++ status = -EPERM;
++
++ spin_unlock(&pt->pg_lock);
++ continue;
++skip_coarse_page:
++ /* va_curr aligned to pte_size? */
++ /* pte_size = 1 MB or 16 MB */
++ if (pte_size == 0 || rem_bytes < pte_size ||
++ va_curr & (pte_size - 1)) {
++ status = -EPERM;
++ break;
++ }
++
++ if (pte_size == HW_PAGE_SIZE1MB)
++ numof4k_pages = 256;
++ else
++ numof4k_pages = 4096;
++ temp = 0;
++ /* Collect Physical addresses from VA */
++ paddr = (pte_val & ~(pte_size - 1));
++ while (temp++ < numof4k_pages) {
++ if (pfn_valid(__phys_to_pfn(paddr))) {
++ pg = PHYS_TO_PAGE(paddr);
++ if (page_count(pg) < 1) {
++ pr_info("DSPBRIDGE: UNMAP function: "
++ "COUNT 0 FOR PA 0x%x, size = "
++ "0x%x\n", paddr, ul_num_bytes);
++ bad_page_dump(paddr, pg);
++ } else {
++ set_page_dirty(pg);
++ page_cache_release(pg);
++ }
++ }
++ paddr += HW_PAGE_SIZE4KB;
++ }
++ if (!hw_mmu_pte_clear(l1_base_va, va_curr, pte_size)) {
++ status = 0;
++ rem_bytes -= pte_size;
++ va_curr += pte_size;
++ } else {
++ status = -EPERM;
++ goto EXIT_LOOP;
++ }
++ }
++ /*
++ * It is better to flush the TLB here, so that any stale old entries
++ * get flushed
++ */
++EXIT_LOOP:
++ flush_all(dev_context);
++ dev_dbg(bridge,
++ "%s: va_curr %x, pte_addr_l1 %x pte_addr_l2 %x rem_bytes %x,"
++ " rem_bytes_l2 %x status %x\n", __func__, va_curr, pte_addr_l1,
++ pte_addr_l2, rem_bytes, rem_bytes_l2, status);
++ return status;
++}
++
++/*
++ * ======== user_va2_pa ========
++ * Purpose:
++ * This function walks through the page tables to convert a userland
++ * virtual address to physical address
++ */
++static u32 user_va2_pa(struct mm_struct *mm, u32 address)
++{
++ pgd_t *pgd;
++ pmd_t *pmd;
++ pte_t *ptep, pte;
++
++ pgd = pgd_offset(mm, address);
++ if (!(pgd_none(*pgd) || pgd_bad(*pgd))) {
++ pmd = pmd_offset(pgd, address);
++ if (!(pmd_none(*pmd) || pmd_bad(*pmd))) {
++ ptep = pte_offset_map(pmd, address);
++ if (ptep) {
++ pte = *ptep;
++ if (pte_present(pte))
++ return pte & PAGE_MASK;
++ }
++ }
++ }
++
++ return 0;
++}
++
++/*
++ * ======== pte_update ========
++ * This function calculates the optimum page-aligned addresses and sizes
++ * Caller must pass page-aligned values
++ */
++static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa,
++ u32 va, u32 size,
++ struct hw_mmu_map_attrs_t *map_attrs)
++{
++ u32 i;
++ u32 all_bits;
++ u32 pa_curr = pa;
++ u32 va_curr = va;
++ u32 num_bytes = size;
++ struct bridge_dev_context *dev_context = dev_ctxt;
++ int status = 0;
++ u32 page_size[] = { HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB,
++ HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB
++ };
++
++ while (num_bytes && !status) {
++ /* To find the max. page size with which both PA & VA are
++ * aligned */
++ all_bits = pa_curr | va_curr;
++
++ for (i = 0; i < 4; i++) {
++ if ((num_bytes >= page_size[i]) && ((all_bits &
++ (page_size[i] -
++ 1)) == 0)) {
++ status =
++ pte_set(dev_context->pt_attrs, pa_curr,
++ va_curr, page_size[i], map_attrs);
++ pa_curr += page_size[i];
++ va_curr += page_size[i];
++ num_bytes -= page_size[i];
++ /* Don't try smaller sizes. Hopefully we have
++ * reached an address aligned to a bigger page
++ * size */
++ break;
++ }
++ }
++ }
++
++ return status;
++}
++
++/*
++ * ======== pte_set ========
++ * This function calculates PTE address (MPU virtual) to be updated
++ * It also manages the L2 page tables
++ */
++static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va,
++ u32 size, struct hw_mmu_map_attrs_t *attrs)
++{
++ u32 i;
++ u32 pte_val;
++ u32 pte_addr_l1;
++ u32 pte_size;
++ /* Base address of the PT that will be updated */
++ u32 pg_tbl_va;
++ u32 l1_base_va;
++ /* Compiler warns that the next three variables might be used
++ * uninitialized in this function. Doesn't seem so. Working around,
++ * anyways. */
++ u32 l2_base_va = 0;
++ u32 l2_base_pa = 0;
++ u32 l2_page_num = 0;
++ int status = 0;
++
++ l1_base_va = pt->l1_base_va;
++ pg_tbl_va = l1_base_va;
++ if ((size == HW_PAGE_SIZE64KB) || (size == HW_PAGE_SIZE4KB)) {
++ /* Find whether the L1 PTE points to a valid L2 PT */
++ pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va);
++ if (pte_addr_l1 <= (pt->l1_base_va + pt->l1_size)) {
++ pte_val = *(u32 *) pte_addr_l1;
++ pte_size = hw_mmu_pte_size_l1(pte_val);
++ } else {
++ return -EPERM;
++ }
++ spin_lock(&pt->pg_lock);
++ if (pte_size == HW_MMU_COARSE_PAGE_SIZE) {
++ /* Get the L2 PA from the L1 PTE, and find
++ * corresponding L2 VA */
++ l2_base_pa = hw_mmu_pte_coarse_l1(pte_val);
++ l2_base_va =
++ l2_base_pa - pt->l2_base_pa + pt->l2_base_va;
++ l2_page_num =
++ (l2_base_pa -
++ pt->l2_base_pa) / HW_MMU_COARSE_PAGE_SIZE;
++ } else if (pte_size == 0) {
++ /* L1 PTE is invalid. Allocate a L2 PT and
++ * point the L1 PTE to it */
++ /* Find a free L2 PT. */
++ for (i = 0; (i < pt->l2_num_pages) &&
++ (pt->pg_info[i].num_entries != 0); i++)
++ ;;
++ if (i < pt->l2_num_pages) {
++ l2_page_num = i;
++ l2_base_pa = pt->l2_base_pa + (l2_page_num *
++ HW_MMU_COARSE_PAGE_SIZE);
++ l2_base_va = pt->l2_base_va + (l2_page_num *
++ HW_MMU_COARSE_PAGE_SIZE);
++ /* Endianness attributes are ignored for
++ * HW_MMU_COARSE_PAGE_SIZE */
++ status =
++ hw_mmu_pte_set(l1_base_va, l2_base_pa, va,
++ HW_MMU_COARSE_PAGE_SIZE,
++ attrs);
++ } else {
++ status = -ENOMEM;
++ }
++ } else {
++ /* Found valid L1 PTE of another size.
++ * Should not overwrite it. */
++ status = -EPERM;
++ }
++ if (!status) {
++ pg_tbl_va = l2_base_va;
++ if (size == HW_PAGE_SIZE64KB)
++ pt->pg_info[l2_page_num].num_entries += 16;
++ else
++ pt->pg_info[l2_page_num].num_entries++;
++ dev_dbg(bridge, "PTE: L2 BaseVa %x, BasePa %x, PageNum "
++ "%x, num_entries %x\n", l2_base_va,
++ l2_base_pa, l2_page_num,
++ pt->pg_info[l2_page_num].num_entries);
++ }
++ spin_unlock(&pt->pg_lock);
++ }
++ if (!status) {
++ dev_dbg(bridge, "PTE: pg_tbl_va %x, pa %x, va %x, size %x\n",
++ pg_tbl_va, pa, va, size);
++ dev_dbg(bridge, "PTE: endianism %x, element_size %x, "
++ "mixed_size %x\n", attrs->endianism,
++ attrs->element_size, attrs->mixed_size);
++ status = hw_mmu_pte_set(pg_tbl_va, pa, va, size, attrs);
++ }
++
++ return status;
++}
++
++/* Memory map kernel VA -- memory allocated with vmalloc */
++static int mem_map_vmalloc(struct bridge_dev_context *dev_context,
++ u32 ul_mpu_addr, u32 virt_addr,
++ u32 ul_num_bytes,
++ struct hw_mmu_map_attrs_t *hw_attrs)
++{
++ int status = 0;
++ struct page *page[1];
++ u32 i;
++ u32 pa_curr;
++ u32 pa_next;
++ u32 va_curr;
++ u32 size_curr;
++ u32 num_pages;
++ u32 pa;
++ u32 num_of4k_pages;
++ u32 temp = 0;
++
++ /*
++ * Do Kernel va to pa translation.
++ * Combine physically contiguous regions to reduce TLBs.
++ * Pass the translated pa to pte_update.
++ */
++ num_pages = ul_num_bytes / PAGE_SIZE; /* PAGE_SIZE = OS page size */
++ i = 0;
++ va_curr = ul_mpu_addr;
++ page[0] = vmalloc_to_page((void *)va_curr);
++ pa_next = page_to_phys(page[0]);
++ while (!status && (i < num_pages)) {
++ /*
++ * Reuse pa_next from the previous iteraion to avoid
++ * an extra va2pa call
++ */
++ pa_curr = pa_next;
++ size_curr = PAGE_SIZE;
++ /*
++ * If the next page is physically contiguous,
++ * map it with the current one by increasing
++ * the size of the region to be mapped
++ */
++ while (++i < num_pages) {
++ page[0] =
++ vmalloc_to_page((void *)(va_curr + size_curr));
++ pa_next = page_to_phys(page[0]);
++
++ if (pa_next == (pa_curr + size_curr))
++ size_curr += PAGE_SIZE;
++ else
++ break;
++
++ }
++ if (pa_next == 0) {
++ status = -ENOMEM;
++ break;
++ }
++ pa = pa_curr;
++ num_of4k_pages = size_curr / HW_PAGE_SIZE4KB;
++ while (temp++ < num_of4k_pages) {
++ get_page(PHYS_TO_PAGE(pa));
++ pa += HW_PAGE_SIZE4KB;
++ }
++ status = pte_update(dev_context, pa_curr, virt_addr +
++ (va_curr - ul_mpu_addr), size_curr,
++ hw_attrs);
++ va_curr += size_curr;
++ }
++ /*
++ * In any case, flush the TLB
++ * This is called from here instead from pte_update to avoid unnecessary
++ * repetition while mapping non-contiguous physical regions of a virtual
++ * region
++ */
++ flush_all(dev_context);
++ dev_dbg(bridge, "%s status %x\n", __func__, status);
++ return status;
++}
++
++/*
++ * ======== wait_for_start ========
++ * Wait for the singal from DSP that it has started, or time out.
++ */
++bool wait_for_start(struct bridge_dev_context *dev_context, u32 dw_sync_addr)
++{
++ u16 timeout = TIHELEN_ACKTIMEOUT;
++
++ /* Wait for response from board */
++ while (__raw_readw(dw_sync_addr) && --timeout)
++ udelay(10);
++
++ /* If timed out: return false */
++ if (!timeout) {
++ pr_err("%s: Timed out waiting DSP to Start\n", __func__);
++ return false;
++ }
++ return true;
++}
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/core/tiomap3430_pwr.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/core/tiomap3430_pwr.c 2010-08-18 11:24:23.162056214 +0300
+@@ -0,0 +1,550 @@
++/*
++ * tiomap_pwr.c
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Implementation of DSP wake/sleep routines.
++ *
++ * Copyright (C) 2007-2008 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++/* ----------------------------------- DSP/BIOS Bridge */
++#include <dspbridge/dbdefs.h>
++#include <dspbridge/cfg.h>
++#include <dspbridge/drv.h>
++#include <dspbridge/io_sm.h>
++
++/* ----------------------------------- Platform Manager */
++#include <dspbridge/brddefs.h>
++#include <dspbridge/dev.h>
++#include <dspbridge/iodefs.h>
++
++/* ------------------------------------ Hardware Abstraction Layer */
++#include <hw_defs.h>
++#include <hw_mmu.h>
++
++#include <dspbridge/pwr_sh.h>
++
++/* ----------------------------------- Bridge Driver */
++#include <dspbridge/dspdeh.h>
++#include <dspbridge/wdt.h>
++
++/* ----------------------------------- specific to this file */
++#include "_tiomap.h"
++#include "_tiomap_pwr.h"
++#include <mach-omap2/prm-regbits-34xx.h>
++#include <mach-omap2/cm-regbits-34xx.h>
++
++#define PWRSTST_TIMEOUT 200
++
++/*
++ * ======== handle_constraints_set ========
++ * Sets new DSP constraint
++ */
++int handle_constraints_set(struct bridge_dev_context *dev_context,
++ void *pargs)
++{
++#ifdef CONFIG_TIDSPBRIDGE_DVFS
++ u32 *constraint_val;
++ struct dspbridge_platform_data *pdata =
++ omap_dspbridge_dev->dev.platform_data;
++
++ constraint_val = (u32 *) (pargs);
++ /* Read the target value requested by DSP */
++ dev_dbg(bridge, "OPP: %s opp requested = 0x%x\n", __func__,
++ (u32) *(constraint_val + 1));
++
++ /* Set the new opp value */
++ if (pdata->dsp_set_min_opp)
++ (*pdata->dsp_set_min_opp) ((u32) *(constraint_val + 1));
++#endif /* #ifdef CONFIG_TIDSPBRIDGE_DVFS */
++ return 0;
++}
++
++/*
++ * ======== handle_hibernation_from_dsp ========
++ * Handle Hibernation requested from DSP
++ */
++int handle_hibernation_from_dsp(struct bridge_dev_context *dev_context)
++{
++ int status = 0;
++#ifdef CONFIG_PM
++ u16 timeout = PWRSTST_TIMEOUT / 10;
++ u32 pwr_state;
++#ifdef CONFIG_TIDSPBRIDGE_DVFS
++ u32 opplevel;
++ struct io_mgr *hio_mgr;
++#endif
++ struct dspbridge_platform_data *pdata =
++ omap_dspbridge_dev->dev.platform_data;
++
++ pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
++ OMAP_POWERSTATEST_MASK;
++ /* Wait for DSP to move into OFF state */
++ while ((pwr_state != PWRDM_POWER_OFF) && --timeout) {
++ if (msleep_interruptible(10)) {
++ pr_err("Waiting for DSP OFF mode interrupted\n");
++ return -EPERM;
++ }
++ pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD,
++ OMAP2_PM_PWSTST) & OMAP_POWERSTATEST_MASK;
++ }
++ if (timeout == 0) {
++ pr_err("%s: Timed out waiting for DSP off mode\n", __func__);
++ status = -ETIMEDOUT;
++ return status;
++ } else {
++
++ /* Save mailbox settings */
++ omap_mbox_save_ctx(dev_context->mbox);
++
++ /* Turn off DSP Peripheral clocks and DSP Load monitor timer */
++ status = dsp_clock_disable_all(dev_context->dsp_per_clks);
++
++ /* Disable wdt on hibernation. */
++ dsp_wdt_enable(false);
++
++ if (!status) {
++ /* Update the Bridger Driver state */
++ dev_context->dw_brd_state = BRD_DSP_HIBERNATION;
++#ifdef CONFIG_TIDSPBRIDGE_DVFS
++ status =
++ dev_get_io_mgr(dev_context->hdev_obj, &hio_mgr);
++ if (!hio_mgr) {
++ status = DSP_EHANDLE;
++ return status;
++ }
++ io_sh_msetting(hio_mgr, SHM_GETOPP, &opplevel);
++
++ /*
++ * Set the OPP to low level before moving to OFF
++ * mode
++ */
++ if (pdata->dsp_set_min_opp)
++ (*pdata->dsp_set_min_opp) (VDD1_OPP1);
++ status = 0;
++#endif /* CONFIG_TIDSPBRIDGE_DVFS */
++ }
++ }
++#endif
++ return status;
++}
++
++/*
++ * ======== sleep_dsp ========
++ * Put DSP in low power consuming state.
++ */
++int sleep_dsp(struct bridge_dev_context *dev_context, u32 dw_cmd,
++ void *pargs)
++{
++ int status = 0;
++#ifdef CONFIG_PM
++#ifdef CONFIG_TIDSPBRIDGE_NTFY_PWRERR
++ struct deh_mgr *hdeh_mgr;
++#endif /* CONFIG_TIDSPBRIDGE_NTFY_PWRERR */
++ u16 timeout = PWRSTST_TIMEOUT / 10;
++ u32 pwr_state, target_pwr_state;
++ struct dspbridge_platform_data *pdata =
++ omap_dspbridge_dev->dev.platform_data;
++
++ /* Check if sleep code is valid */
++ if ((dw_cmd != PWR_DEEPSLEEP) && (dw_cmd != PWR_EMERGENCYDEEPSLEEP))
++ return -EINVAL;
++
++ switch (dev_context->dw_brd_state) {
++ case BRD_RUNNING:
++ omap_mbox_save_ctx(dev_context->mbox);
++ if (dsp_test_sleepstate == PWRDM_POWER_OFF) {
++ sm_interrupt_dsp(dev_context, MBX_PM_DSPHIBERNATE);
++ dev_dbg(bridge, "PM: %s - sent hibernate cmd to DSP\n",
++ __func__);
++ target_pwr_state = PWRDM_POWER_OFF;
++ } else {
++ sm_interrupt_dsp(dev_context, MBX_PM_DSPRETENTION);
++ target_pwr_state = PWRDM_POWER_RET;
++ }
++ break;
++ case BRD_RETENTION:
++ omap_mbox_save_ctx(dev_context->mbox);
++ if (dsp_test_sleepstate == PWRDM_POWER_OFF) {
++ sm_interrupt_dsp(dev_context, MBX_PM_DSPHIBERNATE);
++ target_pwr_state = PWRDM_POWER_OFF;
++ } else
++ return 0;
++ break;
++ case BRD_HIBERNATION:
++ case BRD_DSP_HIBERNATION:
++ /* Already in Hibernation, so just return */
++ dev_dbg(bridge, "PM: %s - DSP already in hibernation\n",
++ __func__);
++ return 0;
++ case BRD_STOPPED:
++ dev_dbg(bridge, "PM: %s - Board in STOP state\n", __func__);
++ return 0;
++ default:
++ dev_dbg(bridge, "PM: %s - Bridge in Illegal state\n", __func__);
++ return -EPERM;
++ }
++
++ /* Get the PRCM DSP power domain status */
++ pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
++ OMAP_POWERSTATEST_MASK;
++
++ /* Wait for DSP to move into target power state */
++ while ((pwr_state != target_pwr_state) && --timeout) {
++ if (msleep_interruptible(10)) {
++ pr_err("Waiting for DSP to Suspend interrupted\n");
++ return -EPERM;
++ }
++ pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD,
++ OMAP2_PM_PWSTST) & OMAP_POWERSTATEST_MASK;
++ }
++
++ if (!timeout) {
++ pr_err("%s: Timed out waiting for DSP off mode, state %x\n",
++ __func__, pwr_state);
++#ifdef CONFIG_TIDSPBRIDGE_NTFY_PWRERR
++ dev_get_deh_mgr(dev_context->hdev_obj, &hdeh_mgr);
++ bridge_deh_notify(hdeh_mgr, DSP_PWRERROR, 0);
++#endif /* CONFIG_TIDSPBRIDGE_NTFY_PWRERR */
++ return -ETIMEDOUT;
++ } else {
++ /* Update the Bridger Driver state */
++ if (dsp_test_sleepstate == PWRDM_POWER_OFF)
++ dev_context->dw_brd_state = BRD_HIBERNATION;
++ else
++ dev_context->dw_brd_state = BRD_RETENTION;
++
++ /* Disable wdt on hibernation. */
++ dsp_wdt_enable(false);
++
++ /* Turn off DSP Peripheral clocks */
++ status = dsp_clock_disable_all(dev_context->dsp_per_clks);
++ if (status)
++ return status;
++#ifdef CONFIG_TIDSPBRIDGE_DVFS
++ else if (target_pwr_state == PWRDM_POWER_OFF) {
++ /*
++ * Set the OPP to low level before moving to OFF mode
++ */
++ if (pdata->dsp_set_min_opp)
++ (*pdata->dsp_set_min_opp) (VDD1_OPP1);
++ }
++#endif /* CONFIG_TIDSPBRIDGE_DVFS */
++ }
++#endif /* CONFIG_PM */
++ return status;
++}
++
++/*
++ * ======== wake_dsp ========
++ * Wake up DSP from sleep.
++ */
++int wake_dsp(struct bridge_dev_context *dev_context, void *pargs)
++{
++ int status = 0;
++#ifdef CONFIG_PM
++
++ /* Check the board state, if it is not 'SLEEP' then return */
++ if (dev_context->dw_brd_state == BRD_RUNNING ||
++ dev_context->dw_brd_state == BRD_STOPPED) {
++ /* The Device is in 'RET' or 'OFF' state and Bridge state is not
++ * 'SLEEP', this means state inconsistency, so return */
++ return 0;
++ }
++
++ /* Send a wakeup message to DSP */
++ sm_interrupt_dsp(dev_context, MBX_PM_DSPWAKEUP);
++
++ /* Set the device state to RUNNIG */
++ dev_context->dw_brd_state = BRD_RUNNING;
++#endif /* CONFIG_PM */
++ return status;
++}
++
++/*
++ * ======== dsp_peripheral_clk_ctrl ========
++ * Enable/Disable the DSP peripheral clocks as needed..
++ */
++int dsp_peripheral_clk_ctrl(struct bridge_dev_context *dev_context,
++ void *pargs)
++{
++ u32 ext_clk = 0;
++ u32 ext_clk_id = 0;
++ u32 ext_clk_cmd = 0;
++ u32 clk_id_index = MBX_PM_MAX_RESOURCES;
++ u32 tmp_index;
++ u32 dsp_per_clks_before;
++ int status = 0;
++
++ dsp_per_clks_before = dev_context->dsp_per_clks;
++
++ ext_clk = (u32) *((u32 *) pargs);
++ ext_clk_id = ext_clk & MBX_PM_CLK_IDMASK;
++
++ /* process the power message -- TODO, keep it in a separate function */
++ for (tmp_index = 0; tmp_index < MBX_PM_MAX_RESOURCES; tmp_index++) {
++ if (ext_clk_id == bpwr_clkid[tmp_index]) {
++ clk_id_index = tmp_index;
++ break;
++ }
++ }
++ /* TODO -- Assert may be a too hard restriction here.. May be we should
++ * just return with failure when the CLK ID does not match */
++ /* DBC_ASSERT(clk_id_index < MBX_PM_MAX_RESOURCES); */
++ if (clk_id_index == MBX_PM_MAX_RESOURCES) {
++ /* return with a more meaningfull error code */
++ return -EPERM;
++ }
++ ext_clk_cmd = (ext_clk >> MBX_PM_CLK_CMDSHIFT) & MBX_PM_CLK_CMDMASK;
++ switch (ext_clk_cmd) {
++ case BPWR_DISABLE_CLOCK:
++ status = dsp_clk_disable(bpwr_clks[clk_id_index].clk);
++ dsp_clk_wakeup_event_ctrl(bpwr_clks[clk_id_index].clk_id,
++ false);
++ if (!status) {
++ (dev_context->dsp_per_clks) &=
++ (~((u32) (1 << bpwr_clks[clk_id_index].clk)));
++ }
++ break;
++ case BPWR_ENABLE_CLOCK:
++ status = dsp_clk_enable(bpwr_clks[clk_id_index].clk);
++ dsp_clk_wakeup_event_ctrl(bpwr_clks[clk_id_index].clk_id, true);
++ if (!status)
++ (dev_context->dsp_per_clks) |=
++ (1 << bpwr_clks[clk_id_index].clk);
++ break;
++ default:
++ dev_dbg(bridge, "%s: Unsupported CMD\n", __func__);
++ /* unsupported cmd */
++ /* TODO -- provide support for AUTOIDLE Enable/Disable
++ * commands */
++ }
++ return status;
++}
++
++/*
++ * ========pre_scale_dsp========
++ * Sends prescale notification to DSP
++ *
++ */
++int pre_scale_dsp(struct bridge_dev_context *dev_context, void *pargs)
++{
++#ifdef CONFIG_TIDSPBRIDGE_DVFS
++ u32 level;
++ u32 voltage_domain;
++
++ voltage_domain = *((u32 *) pargs);
++ level = *((u32 *) pargs + 1);
++
++ dev_dbg(bridge, "OPP: %s voltage_domain = %x, level = 0x%x\n",
++ __func__, voltage_domain, level);
++ if ((dev_context->dw_brd_state == BRD_HIBERNATION) ||
++ (dev_context->dw_brd_state == BRD_RETENTION) ||
++ (dev_context->dw_brd_state == BRD_DSP_HIBERNATION)) {
++ dev_dbg(bridge, "OPP: %s IVA in sleep. No message to DSP\n");
++ return 0;
++ } else if ((dev_context->dw_brd_state == BRD_RUNNING)) {
++ /* Send a prenotificatio to DSP */
++ dev_dbg(bridge, "OPP: %s sent notification to DSP\n", __func__);
++ sm_interrupt_dsp(dev_context, MBX_PM_SETPOINT_PRENOTIFY);
++ return 0;
++ } else {
++ return -EPERM;
++ }
++#endif /* #ifdef CONFIG_TIDSPBRIDGE_DVFS */
++ return 0;
++}
++
++/*
++ * ========post_scale_dsp========
++ * Sends postscale notification to DSP
++ *
++ */
++int post_scale_dsp(struct bridge_dev_context *dev_context,
++ void *pargs)
++{
++ int status = 0;
++#ifdef CONFIG_TIDSPBRIDGE_DVFS
++ u32 level;
++ u32 voltage_domain;
++ struct io_mgr *hio_mgr;
++
++ status = dev_get_io_mgr(dev_context->hdev_obj, &hio_mgr);
++ if (!hio_mgr)
++ return -EFAULT;
++
++ voltage_domain = *((u32 *) pargs);
++ level = *((u32 *) pargs + 1);
++ dev_dbg(bridge, "OPP: %s voltage_domain = %x, level = 0x%x\n",
++ __func__, voltage_domain, level);
++ if ((dev_context->dw_brd_state == BRD_HIBERNATION) ||
++ (dev_context->dw_brd_state == BRD_RETENTION) ||
++ (dev_context->dw_brd_state == BRD_DSP_HIBERNATION)) {
++ /* Update the OPP value in shared memory */
++ io_sh_msetting(hio_mgr, SHM_CURROPP, &level);
++ dev_dbg(bridge, "OPP: %s IVA in sleep. Wrote to shm\n",
++ __func__);
++ } else if ((dev_context->dw_brd_state == BRD_RUNNING)) {
++ /* Update the OPP value in shared memory */
++ io_sh_msetting(hio_mgr, SHM_CURROPP, &level);
++ /* Send a post notification to DSP */
++ sm_interrupt_dsp(dev_context, MBX_PM_SETPOINT_POSTNOTIFY);
++ dev_dbg(bridge, "OPP: %s wrote to shm. Sent post notification "
++ "to DSP\n", __func__);
++ } else {
++ status = -EPERM;
++ }
++#endif /* #ifdef CONFIG_TIDSPBRIDGE_DVFS */
++ return status;
++}
++
++void dsp_clk_wakeup_event_ctrl(u32 clock_id, bool enable)
++{
++ struct cfg_hostres *resources;
++ int status = 0;
++ u32 iva2_grpsel;
++ u32 mpu_grpsel;
++ struct dev_object *hdev_object = NULL;
++ struct bridge_dev_context *bridge_context = NULL;
++
++ hdev_object = (struct dev_object *)drv_get_first_dev_object();
++ if (!hdev_object)
++ return;
++
++ status = dev_get_bridge_context(hdev_object, &bridge_context);
++ if (!bridge_context)
++ return;
++
++ resources = bridge_context->resources;
++ if (!resources)
++ return;
++
++ switch (clock_id) {
++ case BPWR_GP_TIMER5:
++ iva2_grpsel = readl(resources->dw_per_pm_base + 0xA8);
++ mpu_grpsel = readl(resources->dw_per_pm_base + 0xA4);
++ if (enable) {
++ iva2_grpsel |= OMAP3430_GRPSEL_GPT5_MASK;
++ mpu_grpsel &= ~OMAP3430_GRPSEL_GPT5_MASK;
++ } else {
++ mpu_grpsel |= OMAP3430_GRPSEL_GPT5_MASK;
++ iva2_grpsel &= ~OMAP3430_GRPSEL_GPT5_MASK;
++ }
++ writel(iva2_grpsel, resources->dw_per_pm_base + 0xA8);
++ writel(mpu_grpsel, resources->dw_per_pm_base + 0xA4);
++ break;
++ case BPWR_GP_TIMER6:
++ iva2_grpsel = readl(resources->dw_per_pm_base + 0xA8);
++ mpu_grpsel = readl(resources->dw_per_pm_base + 0xA4);
++ if (enable) {
++ iva2_grpsel |= OMAP3430_GRPSEL_GPT6_MASK;
++ mpu_grpsel &= ~OMAP3430_GRPSEL_GPT6_MASK;
++ } else {
++ mpu_grpsel |= OMAP3430_GRPSEL_GPT6_MASK;
++ iva2_grpsel &= ~OMAP3430_GRPSEL_GPT6_MASK;
++ }
++ writel(iva2_grpsel, resources->dw_per_pm_base + 0xA8);
++ writel(mpu_grpsel, resources->dw_per_pm_base + 0xA4);
++ break;
++ case BPWR_GP_TIMER7:
++ iva2_grpsel = readl(resources->dw_per_pm_base + 0xA8);
++ mpu_grpsel = readl(resources->dw_per_pm_base + 0xA4);
++ if (enable) {
++ iva2_grpsel |= OMAP3430_GRPSEL_GPT7_MASK;
++ mpu_grpsel &= ~OMAP3430_GRPSEL_GPT7_MASK;
++ } else {
++ mpu_grpsel |= OMAP3430_GRPSEL_GPT7_MASK;
++ iva2_grpsel &= ~OMAP3430_GRPSEL_GPT7_MASK;
++ }
++ writel(iva2_grpsel, resources->dw_per_pm_base + 0xA8);
++ writel(mpu_grpsel, resources->dw_per_pm_base + 0xA4);
++ break;
++ case BPWR_GP_TIMER8:
++ iva2_grpsel = readl(resources->dw_per_pm_base + 0xA8);
++ mpu_grpsel = readl(resources->dw_per_pm_base + 0xA4);
++ if (enable) {
++ iva2_grpsel |= OMAP3430_GRPSEL_GPT8_MASK;
++ mpu_grpsel &= ~OMAP3430_GRPSEL_GPT8_MASK;
++ } else {
++ mpu_grpsel |= OMAP3430_GRPSEL_GPT8_MASK;
++ iva2_grpsel &= ~OMAP3430_GRPSEL_GPT8_MASK;
++ }
++ writel(iva2_grpsel, resources->dw_per_pm_base + 0xA8);
++ writel(mpu_grpsel, resources->dw_per_pm_base + 0xA4);
++ break;
++ case BPWR_MCBSP1:
++ iva2_grpsel = readl(resources->dw_core_pm_base + 0xA8);
++ mpu_grpsel = readl(resources->dw_core_pm_base + 0xA4);
++ if (enable) {
++ iva2_grpsel |= OMAP3430_GRPSEL_MCBSP1_MASK;
++ mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP1_MASK;
++ } else {
++ mpu_grpsel |= OMAP3430_GRPSEL_MCBSP1_MASK;
++ iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP1_MASK;
++ }
++ writel(iva2_grpsel, resources->dw_core_pm_base + 0xA8);
++ writel(mpu_grpsel, resources->dw_core_pm_base + 0xA4);
++ break;
++ case BPWR_MCBSP2:
++ iva2_grpsel = readl(resources->dw_per_pm_base + 0xA8);
++ mpu_grpsel = readl(resources->dw_per_pm_base + 0xA4);
++ if (enable) {
++ iva2_grpsel |= OMAP3430_GRPSEL_MCBSP2_MASK;
++ mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP2_MASK;
++ } else {
++ mpu_grpsel |= OMAP3430_GRPSEL_MCBSP2_MASK;
++ iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP2_MASK;
++ }
++ writel(iva2_grpsel, resources->dw_per_pm_base + 0xA8);
++ writel(mpu_grpsel, resources->dw_per_pm_base + 0xA4);
++ break;
++ case BPWR_MCBSP3:
++ iva2_grpsel = readl(resources->dw_per_pm_base + 0xA8);
++ mpu_grpsel = readl(resources->dw_per_pm_base + 0xA4);
++ if (enable) {
++ iva2_grpsel |= OMAP3430_GRPSEL_MCBSP3_MASK;
++ mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP3_MASK;
++ } else {
++ mpu_grpsel |= OMAP3430_GRPSEL_MCBSP3_MASK;
++ iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP3_MASK;
++ }
++ writel(iva2_grpsel, resources->dw_per_pm_base + 0xA8);
++ writel(mpu_grpsel, resources->dw_per_pm_base + 0xA4);
++ break;
++ case BPWR_MCBSP4:
++ iva2_grpsel = readl(resources->dw_per_pm_base + 0xA8);
++ mpu_grpsel = readl(resources->dw_per_pm_base + 0xA4);
++ if (enable) {
++ iva2_grpsel |= OMAP3430_GRPSEL_MCBSP4_MASK;
++ mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP4_MASK;
++ } else {
++ mpu_grpsel |= OMAP3430_GRPSEL_MCBSP4_MASK;
++ iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP4_MASK;
++ }
++ writel(iva2_grpsel, resources->dw_per_pm_base + 0xA8);
++ writel(mpu_grpsel, resources->dw_per_pm_base + 0xA4);
++ break;
++ case BPWR_MCBSP5:
++ iva2_grpsel = readl(resources->dw_per_pm_base + 0xA8);
++ mpu_grpsel = readl(resources->dw_per_pm_base + 0xA4);
++ if (enable) {
++ iva2_grpsel |= OMAP3430_GRPSEL_MCBSP5_MASK;
++ mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP5_MASK;
++ } else {
++ mpu_grpsel |= OMAP3430_GRPSEL_MCBSP5_MASK;
++ iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP5_MASK;
++ }
++ writel(iva2_grpsel, resources->dw_per_pm_base + 0xA8);
++ writel(mpu_grpsel, resources->dw_per_pm_base + 0xA4);
++ break;
++ }
++}
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/core/tiomap_io.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/core/tiomap_io.c 2010-08-18 11:24:23.162056214 +0300
+@@ -0,0 +1,455 @@
++/*
++ * tiomap_io.c
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Implementation for the io read/write routines.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++/* ----------------------------------- DSP/BIOS Bridge */
++#include <dspbridge/dbdefs.h>
++
++/* ----------------------------------- Trace & Debug */
++#include <dspbridge/dbc.h>
++
++/* ----------------------------------- Platform Manager */
++#include <dspbridge/dev.h>
++#include <dspbridge/drv.h>
++
++/* ----------------------------------- OS Adaptation Layer */
++#include <dspbridge/cfg.h>
++#include <dspbridge/wdt.h>
++
++/* ----------------------------------- specific to this file */
++#include "_tiomap.h"
++#include "_tiomap_pwr.h"
++#include "tiomap_io.h"
++
++static u32 ul_ext_base;
++static u32 ul_ext_end;
++
++static u32 shm0_end;
++static u32 ul_dyn_ext_base;
++static u32 ul_trace_sec_beg;
++static u32 ul_trace_sec_end;
++static u32 ul_shm_base_virt;
++
++bool symbols_reloaded = true;
++
++/*
++ * ======== read_ext_dsp_data ========
++ * Copies DSP external memory buffers to the host side buffers.
++ */
++int read_ext_dsp_data(struct bridge_dev_context *dev_ctxt,
++ u8 *host_buff, u32 dsp_addr,
++ u32 ul_num_bytes, u32 mem_type)
++{
++ int status = 0;
++ struct bridge_dev_context *dev_context = dev_ctxt;
++ u32 offset;
++ u32 ul_tlb_base_virt = 0;
++ u32 ul_shm_offset_virt = 0;
++ u32 dw_ext_prog_virt_mem;
++ u32 dw_base_addr = dev_context->dw_dsp_ext_base_addr;
++ bool trace_read = false;
++
++ if (!ul_shm_base_virt) {
++ status = dev_get_symbol(dev_context->hdev_obj,
++ SHMBASENAME, &ul_shm_base_virt);
++ }
++ DBC_ASSERT(ul_shm_base_virt != 0);
++
++ /* Check if it is a read of Trace section */
++ if (!status && !ul_trace_sec_beg) {
++ status = dev_get_symbol(dev_context->hdev_obj,
++ DSP_TRACESEC_BEG, &ul_trace_sec_beg);
++ }
++ DBC_ASSERT(ul_trace_sec_beg != 0);
++
++ if (!status && !ul_trace_sec_end) {
++ status = dev_get_symbol(dev_context->hdev_obj,
++ DSP_TRACESEC_END, &ul_trace_sec_end);
++ }
++ DBC_ASSERT(ul_trace_sec_end != 0);
++
++ if (!status) {
++ if ((dsp_addr <= ul_trace_sec_end) &&
++ (dsp_addr >= ul_trace_sec_beg))
++ trace_read = true;
++ }
++
++ /* If reading from TRACE, force remap/unmap */
++ if (trace_read && dw_base_addr) {
++ dw_base_addr = 0;
++ dev_context->dw_dsp_ext_base_addr = 0;
++ }
++
++ if (!dw_base_addr) {
++ /* Initialize ul_ext_base and ul_ext_end */
++ ul_ext_base = 0;
++ ul_ext_end = 0;
++
++ /* Get DYNEXT_BEG, EXT_BEG and EXT_END. */
++ if (!status && !ul_dyn_ext_base) {
++ status = dev_get_symbol(dev_context->hdev_obj,
++ DYNEXTBASE, &ul_dyn_ext_base);
++ }
++ DBC_ASSERT(ul_dyn_ext_base != 0);
++
++ if (!status) {
++ status = dev_get_symbol(dev_context->hdev_obj,
++ EXTBASE, &ul_ext_base);
++ }
++ DBC_ASSERT(ul_ext_base != 0);
++
++ if (!status) {
++ status = dev_get_symbol(dev_context->hdev_obj,
++ EXTEND, &ul_ext_end);
++ }
++ DBC_ASSERT(ul_ext_end != 0);
++
++ /* Trace buffer is right after the shm SEG0,
++ * so set the base address to SHMBASE */
++ if (trace_read) {
++ ul_ext_base = ul_shm_base_virt;
++ ul_ext_end = ul_trace_sec_end;
++ }
++
++ DBC_ASSERT(ul_ext_end != 0);
++ DBC_ASSERT(ul_ext_end > ul_ext_base);
++
++ if (ul_ext_end < ul_ext_base)
++ status = -EPERM;
++
++ if (!status) {
++ ul_tlb_base_virt =
++ dev_context->atlb_entry[0].ul_dsp_va * DSPWORDSIZE;
++ DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
++ dw_ext_prog_virt_mem =
++ dev_context->atlb_entry[0].ul_gpp_va;
++
++ if (!trace_read) {
++ ul_shm_offset_virt =
++ ul_shm_base_virt - ul_tlb_base_virt;
++ ul_shm_offset_virt +=
++ PG_ALIGN_HIGH(ul_ext_end - ul_dyn_ext_base +
++ 1, HW_PAGE_SIZE64KB);
++ dw_ext_prog_virt_mem -= ul_shm_offset_virt;
++ dw_ext_prog_virt_mem +=
++ (ul_ext_base - ul_dyn_ext_base);
++ dev_context->dw_dsp_ext_base_addr =
++ dw_ext_prog_virt_mem;
++
++ /*
++ * This dw_dsp_ext_base_addr will get cleared
++ * only when the board is stopped.
++ */
++ if (!dev_context->dw_dsp_ext_base_addr)
++ status = -EPERM;
++ }
++
++ dw_base_addr = dw_ext_prog_virt_mem;
++ }
++ }
++
++ if (!dw_base_addr || !ul_ext_base || !ul_ext_end)
++ status = -EPERM;
++
++ offset = dsp_addr - ul_ext_base;
++
++ if (!status)
++ memcpy(host_buff, (u8 *) dw_base_addr + offset, ul_num_bytes);
++
++ return status;
++}
++
++/*
++ * ======== write_dsp_data ========
++ * purpose:
++ * Copies buffers to the DSP internal/external memory.
++ */
++int write_dsp_data(struct bridge_dev_context *dev_context,
++ u8 *host_buff, u32 dsp_addr, u32 ul_num_bytes,
++ u32 mem_type)
++{
++ u32 offset;
++ u32 dw_base_addr = dev_context->dw_dsp_base_addr;
++ struct cfg_hostres *resources = dev_context->resources;
++ int status = 0;
++ u32 base1, base2, base3;
++ base1 = OMAP_DSP_MEM1_SIZE;
++ base2 = OMAP_DSP_MEM2_BASE - OMAP_DSP_MEM1_BASE;
++ base3 = OMAP_DSP_MEM3_BASE - OMAP_DSP_MEM1_BASE;
++
++ if (!resources)
++ return -EPERM;
++
++ offset = dsp_addr - dev_context->dw_dsp_start_add;
++ if (offset < base1) {
++ dw_base_addr = MEM_LINEAR_ADDRESS(resources->dw_mem_base[2],
++ resources->dw_mem_length[2]);
++ } else if (offset > base1 && offset < base2 + OMAP_DSP_MEM2_SIZE) {
++ dw_base_addr = MEM_LINEAR_ADDRESS(resources->dw_mem_base[3],
++ resources->dw_mem_length[3]);
++ offset = offset - base2;
++ } else if (offset >= base2 + OMAP_DSP_MEM2_SIZE &&
++ offset < base3 + OMAP_DSP_MEM3_SIZE) {
++ dw_base_addr = MEM_LINEAR_ADDRESS(resources->dw_mem_base[4],
++ resources->dw_mem_length[4]);
++ offset = offset - base3;
++ } else {
++ return -EPERM;
++ }
++ if (ul_num_bytes)
++ memcpy((u8 *) (dw_base_addr + offset), host_buff, ul_num_bytes);
++ else
++ *((u32 *) host_buff) = dw_base_addr + offset;
++
++ return status;
++}
++
++/*
++ * ======== write_ext_dsp_data ========
++ * purpose:
++ * Copies buffers to the external memory.
++ *
++ */
++int write_ext_dsp_data(struct bridge_dev_context *dev_context,
++ u8 *host_buff, u32 dsp_addr,
++ u32 ul_num_bytes, u32 mem_type,
++ bool dynamic_load)
++{
++ u32 dw_base_addr = dev_context->dw_dsp_ext_base_addr;
++ u32 dw_offset = 0;
++ u8 temp_byte1, temp_byte2;
++ u8 remain_byte[4];
++ s32 i;
++ int ret = 0;
++ u32 dw_ext_prog_virt_mem;
++ u32 ul_tlb_base_virt = 0;
++ u32 ul_shm_offset_virt = 0;
++ struct cfg_hostres *host_res = dev_context->resources;
++ bool trace_load = false;
++ temp_byte1 = 0x0;
++ temp_byte2 = 0x0;
++
++ if (symbols_reloaded) {
++ /* Check if it is a load to Trace section */
++ ret = dev_get_symbol(dev_context->hdev_obj,
++ DSP_TRACESEC_BEG, &ul_trace_sec_beg);
++ if (!ret)
++ ret = dev_get_symbol(dev_context->hdev_obj,
++ DSP_TRACESEC_END,
++ &ul_trace_sec_end);
++ }
++ if (!ret) {
++ if ((dsp_addr <= ul_trace_sec_end) &&
++ (dsp_addr >= ul_trace_sec_beg))
++ trace_load = true;
++ }
++
++ /* If dynamic, force remap/unmap */
++ if ((dynamic_load || trace_load) && dw_base_addr) {
++ dw_base_addr = 0;
++ MEM_UNMAP_LINEAR_ADDRESS((void *)
++ dev_context->dw_dsp_ext_base_addr);
++ dev_context->dw_dsp_ext_base_addr = 0x0;
++ }
++ if (!dw_base_addr) {
++ if (symbols_reloaded)
++ /* Get SHM_BEG EXT_BEG and EXT_END. */
++ ret = dev_get_symbol(dev_context->hdev_obj,
++ SHMBASENAME, &ul_shm_base_virt);
++ DBC_ASSERT(ul_shm_base_virt != 0);
++ if (dynamic_load) {
++ if (!ret) {
++ if (symbols_reloaded)
++ ret =
++ dev_get_symbol
++ (dev_context->hdev_obj, DYNEXTBASE,
++ &ul_ext_base);
++ }
++ DBC_ASSERT(ul_ext_base != 0);
++ if (!ret) {
++ /* DR OMAPS00013235 : DLModules array may be
++ * in EXTMEM. It is expected that DYNEXTMEM and
++ * EXTMEM are contiguous, so checking for the
++ * upper bound at EXTEND should be Ok. */
++ if (symbols_reloaded)
++ ret =
++ dev_get_symbol
++ (dev_context->hdev_obj, EXTEND,
++ &ul_ext_end);
++ }
++ } else {
++ if (symbols_reloaded) {
++ if (!ret)
++ ret =
++ dev_get_symbol
++ (dev_context->hdev_obj, EXTBASE,
++ &ul_ext_base);
++ DBC_ASSERT(ul_ext_base != 0);
++ if (!ret)
++ ret =
++ dev_get_symbol
++ (dev_context->hdev_obj, EXTEND,
++ &ul_ext_end);
++ }
++ }
++ /* Trace buffer it right after the shm SEG0, so set the
++ * base address to SHMBASE */
++ if (trace_load)
++ ul_ext_base = ul_shm_base_virt;
++
++ DBC_ASSERT(ul_ext_end != 0);
++ DBC_ASSERT(ul_ext_end > ul_ext_base);
++ if (ul_ext_end < ul_ext_base)
++ ret = -EPERM;
++
++ if (!ret) {
++ ul_tlb_base_virt =
++ dev_context->atlb_entry[0].ul_dsp_va * DSPWORDSIZE;
++ DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
++
++ if (symbols_reloaded) {
++ ret = dev_get_symbol
++ (dev_context->hdev_obj,
++ DSP_TRACESEC_END, &shm0_end);
++ if (!ret) {
++ ret =
++ dev_get_symbol
++ (dev_context->hdev_obj, DYNEXTBASE,
++ &ul_dyn_ext_base);
++ }
++ }
++ ul_shm_offset_virt =
++ ul_shm_base_virt - ul_tlb_base_virt;
++ if (trace_load) {
++ dw_ext_prog_virt_mem =
++ dev_context->atlb_entry[0].ul_gpp_va;
++ } else {
++ dw_ext_prog_virt_mem = host_res->dw_mem_base[1];
++ dw_ext_prog_virt_mem +=
++ (ul_ext_base - ul_dyn_ext_base);
++ }
++
++ dev_context->dw_dsp_ext_base_addr =
++ (u32) MEM_LINEAR_ADDRESS((void *)
++ dw_ext_prog_virt_mem,
++ ul_ext_end - ul_ext_base);
++ dw_base_addr += dev_context->dw_dsp_ext_base_addr;
++ /* This dw_dsp_ext_base_addr will get cleared only when
++ * the board is stopped. */
++ if (!dev_context->dw_dsp_ext_base_addr)
++ ret = -EPERM;
++ }
++ }
++ if (!dw_base_addr || !ul_ext_base || !ul_ext_end)
++ ret = -EPERM;
++
++ if (!ret) {
++ for (i = 0; i < 4; i++)
++ remain_byte[i] = 0x0;
++
++ dw_offset = dsp_addr - ul_ext_base;
++ /* Also make sure the dsp_addr is < ul_ext_end */
++ if (dsp_addr > ul_ext_end || dw_offset > dsp_addr)
++ ret = -EPERM;
++ }
++ if (!ret) {
++ if (ul_num_bytes)
++ memcpy((u8 *) dw_base_addr + dw_offset, host_buff,
++ ul_num_bytes);
++ else
++ *((u32 *) host_buff) = dw_base_addr + dw_offset;
++ }
++ /* Unmap here to force remap for other Ext loads */
++ if ((dynamic_load || trace_load) && dev_context->dw_dsp_ext_base_addr) {
++ MEM_UNMAP_LINEAR_ADDRESS((void *)
++ dev_context->dw_dsp_ext_base_addr);
++ dev_context->dw_dsp_ext_base_addr = 0x0;
++ }
++ symbols_reloaded = false;
++ return ret;
++}
++
++int sm_interrupt_dsp(struct bridge_dev_context *dev_context, u16 mb_val)
++{
++#ifdef CONFIG_TIDSPBRIDGE_DVFS
++ u32 opplevel = 0;
++#endif
++ struct dspbridge_platform_data *pdata =
++ omap_dspbridge_dev->dev.platform_data;
++ struct cfg_hostres *resources = dev_context->resources;
++ int status = 0;
++ u32 temp;
++
++ if (!dev_context->mbox)
++ return 0;
++
++ if (!resources)
++ return -EPERM;
++
++ if (dev_context->dw_brd_state == BRD_DSP_HIBERNATION ||
++ dev_context->dw_brd_state == BRD_HIBERNATION) {
++#ifdef CONFIG_TIDSPBRIDGE_DVFS
++ if (pdata->dsp_get_opp)
++ opplevel = (*pdata->dsp_get_opp) ();
++ if (opplevel == VDD1_OPP1) {
++ if (pdata->dsp_set_min_opp)
++ (*pdata->dsp_set_min_opp) (VDD1_OPP2);
++ }
++#endif
++ /* Restart the peripheral clocks */
++ dsp_clock_enable_all(dev_context->dsp_per_clks);
++ dsp_wdt_enable(true);
++
++ /*
++ * 2:0 AUTO_IVA2_DPLL - Enabling IVA2 DPLL auto control
++ * in CM_AUTOIDLE_PLL_IVA2 register
++ */
++ (*pdata->dsp_cm_write)(1 << OMAP3430_AUTO_IVA2_DPLL_SHIFT,
++ OMAP3430_IVA2_MOD, OMAP3430_CM_AUTOIDLE_PLL);
++
++ /*
++ * 7:4 IVA2_DPLL_FREQSEL - IVA2 internal frq set to
++ * 0.75 MHz - 1.0 MHz
++ * 2:0 EN_IVA2_DPLL - Enable IVA2 DPLL in lock mode
++ */
++ (*pdata->dsp_cm_rmw_bits)(OMAP3430_IVA2_DPLL_FREQSEL_MASK |
++ OMAP3430_EN_IVA2_DPLL_MASK,
++ 0x3 << OMAP3430_IVA2_DPLL_FREQSEL_SHIFT |
++ 0x7 << OMAP3430_EN_IVA2_DPLL_SHIFT,
++ OMAP3430_IVA2_MOD, OMAP3430_CM_CLKEN_PLL);
++
++ /* Restore mailbox settings */
++ omap_mbox_restore_ctx(dev_context->mbox);
++
++ /* Access MMU SYS CONFIG register to generate a short wakeup */
++ temp = readl(resources->dw_dmmu_base + 0x10);
++
++ dev_context->dw_brd_state = BRD_RUNNING;
++ } else if (dev_context->dw_brd_state == BRD_RETENTION) {
++ /* Restart the peripheral clocks */
++ dsp_clock_enable_all(dev_context->dsp_per_clks);
++ }
++
++ status = omap_mbox_msg_send(dev_context->mbox, mb_val);
++
++ if (status) {
++ pr_err("omap_mbox_msg_send Fail and status = %d\n", status);
++ status = -EPERM;
++ }
++
++ return 0;
++}
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/core/tiomap_io.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/core/tiomap_io.h 2010-08-18 11:24:23.162056214 +0300
+@@ -0,0 +1,104 @@
++/*
++ * tiomap_io.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Definitions, types and function prototypes for the io (r/w external mem).
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef _TIOMAP_IO_
++#define _TIOMAP_IO_
++
++/*
++ * Symbol that defines beginning of shared memory.
++ * For OMAP (Helen) this is the DSP Virtual base address of SDRAM.
++ * This will be used to program DSP MMU to map DSP Virt to GPP phys.
++ * (see dspMmuTlbEntry()).
++ */
++#define SHMBASENAME "SHM_BEG"
++#define EXTBASE "EXT_BEG"
++#define EXTEND "_EXT_END"
++#define DYNEXTBASE "_DYNEXT_BEG"
++#define DYNEXTEND "_DYNEXT_END"
++#define IVAEXTMEMBASE "_IVAEXTMEM_BEG"
++#define IVAEXTMEMEND "_IVAEXTMEM_END"
++
++#define DSP_TRACESEC_BEG "_BRIDGE_TRACE_BEG"
++#define DSP_TRACESEC_END "_BRIDGE_TRACE_END"
++
++#define SYS_PUTCBEG "_SYS_PUTCBEG"
++#define SYS_PUTCEND "_SYS_PUTCEND"
++#define BRIDGE_SYS_PUTC_CURRENT "_BRIDGE_SYS_PUTC_current"
++
++#define WORDSWAP_ENABLE 0x3 /* Enable word swap */
++
++/*
++ * ======== read_ext_dsp_data ========
++ * Reads it from DSP External memory. The external memory for the DSP
++ * is configured by the combination of DSP MMU and shm Memory manager in the CDB
++ */
++extern int read_ext_dsp_data(struct bridge_dev_context *dev_ctxt,
++ u8 *host_buff, u32 dsp_addr,
++ u32 ul_num_bytes, u32 mem_type);
++
++/*
++ * ======== write_dsp_data ========
++ */
++extern int write_dsp_data(struct bridge_dev_context *dev_context,
++ u8 *host_buff, u32 dsp_addr,
++ u32 ul_num_bytes, u32 mem_type);
++
++/*
++ * ======== write_ext_dsp_data ========
++ * Writes to the DSP External memory for external program.
++ * The ext mem for progra is configured by the combination of DSP MMU and
++ * shm Memory manager in the CDB
++ */
++extern int write_ext_dsp_data(struct bridge_dev_context *dev_context,
++ u8 *host_buff, u32 dsp_addr,
++ u32 ul_num_bytes, u32 mem_type,
++ bool dynamic_load);
++
++/*
++ * ======== write_ext32_bit_dsp_data ========
++ * Writes 32 bit data to the external memory
++ */
++extern inline void write_ext32_bit_dsp_data(const
++ struct bridge_dev_context *dev_context,
++ u32 dsp_addr, u32 val)
++{
++ *(u32 *) dsp_addr = ((dev_context->tc_word_swap_on) ? (((val << 16) &
++ 0xFFFF0000) |
++ ((val >> 16) &
++ 0x0000FFFF)) :
++ val);
++}
++
++/*
++ * ======== read_ext32_bit_dsp_data ========
++ * Reads 32 bit data from the external memory
++ */
++extern inline u32 read_ext32_bit_dsp_data(const struct bridge_dev_context
++ *dev_context, u32 dsp_addr)
++{
++ u32 ret;
++ ret = *(u32 *) dsp_addr;
++
++ ret = ((dev_context->tc_word_swap_on) ? (((ret << 16)
++ & 0xFFFF0000) | ((ret >> 16) &
++ 0x0000FFFF))
++ : ret);
++ return ret;
++}
++
++#endif /* _TIOMAP_IO_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/core/ue_deh.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/core/ue_deh.c 2010-08-18 11:24:23.162056214 +0300
+@@ -0,0 +1,273 @@
++/*
++ * ue_deh.c
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Implements upper edge DSP exception handling (DEH) functions.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ * Copyright (C) 2010 Felipe Contreras
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#include <linux/kernel.h>
++#include <linux/interrupt.h>
++#include <plat/dmtimer.h>
++
++#include <dspbridge/dbdefs.h>
++#include <dspbridge/dspdeh.h>
++#include <dspbridge/dev.h>
++#include "_tiomap.h"
++#include "_deh.h"
++
++#include <dspbridge/io_sm.h>
++#include <dspbridge/drv.h>
++#include <dspbridge/wdt.h>
++
++static u32 fault_addr;
++
++static void mmu_fault_dpc(unsigned long data)
++{
++ struct deh_mgr *deh = (void *)data;
++
++ if (!deh)
++ return;
++
++ bridge_deh_notify(deh, DSP_MMUFAULT, 0);
++}
++
++static irqreturn_t mmu_fault_isr(int irq, void *data)
++{
++ struct deh_mgr *deh = data;
++ struct cfg_hostres *resources;
++ u32 event;
++
++ if (!deh)
++ return IRQ_HANDLED;
++
++ resources = deh->hbridge_context->resources;
++ if (!resources) {
++ dev_dbg(bridge, "%s: Failed to get Host Resources\n",
++ __func__);
++ return IRQ_HANDLED;
++ }
++
++ hw_mmu_event_status(resources->dw_dmmu_base, &event);
++ if (event == HW_MMU_TRANSLATION_FAULT) {
++ hw_mmu_fault_addr_read(resources->dw_dmmu_base, &fault_addr);
++ dev_dbg(bridge, "%s: event=0x%x, fault_addr=0x%x\n", __func__,
++ event, fault_addr);
++ /*
++ * Schedule a DPC directly. In the future, it may be
++ * necessary to check if DSP MMU fault is intended for
++ * Bridge.
++ */
++ tasklet_schedule(&deh->dpc_tasklet);
++
++ /* Disable the MMU events, else once we clear it will
++ * start to raise INTs again */
++ hw_mmu_event_disable(resources->dw_dmmu_base,
++ HW_MMU_TRANSLATION_FAULT);
++ } else {
++ hw_mmu_event_disable(resources->dw_dmmu_base,
++ HW_MMU_ALL_INTERRUPTS);
++ }
++ return IRQ_HANDLED;
++}
++
++int bridge_deh_create(struct deh_mgr **ret_deh,
++ struct dev_object *hdev_obj)
++{
++ int status;
++ struct deh_mgr *deh;
++ struct bridge_dev_context *hbridge_context = NULL;
++
++ /* Message manager will be created when a file is loaded, since
++ * size of message buffer in shared memory is configurable in
++ * the base image. */
++ /* Get Bridge context info. */
++ dev_get_bridge_context(hdev_obj, &hbridge_context);
++ /* Allocate IO manager object: */
++ deh = kzalloc(sizeof(*deh), GFP_KERNEL);
++ if (!deh) {
++ status = -ENOMEM;
++ goto err;
++ }
++
++ /* Create an NTFY object to manage notifications */
++ deh->ntfy_obj = kmalloc(sizeof(struct ntfy_object), GFP_KERNEL);
++ if (!deh->ntfy_obj) {
++ status = -ENOMEM;
++ goto err;
++ }
++ ntfy_init(deh->ntfy_obj);
++
++ /* Create a MMUfault DPC */
++ tasklet_init(&deh->dpc_tasklet, mmu_fault_dpc, (u32) deh);
++
++ /* Fill in context structure */
++ deh->hbridge_context = hbridge_context;
++
++ /* Install ISR function for DSP MMU fault */
++ status = request_irq(INT_DSP_MMU_IRQ, mmu_fault_isr, 0,
++ "DspBridge\tiommu fault", deh);
++ if (status < 0)
++ goto err;
++
++ *ret_deh = deh;
++ return 0;
++
++err:
++ bridge_deh_destroy(deh);
++ *ret_deh = NULL;
++ return status;
++}
++
++int bridge_deh_destroy(struct deh_mgr *deh)
++{
++ if (!deh)
++ return -EFAULT;
++
++ /* If notification object exists, delete it */
++ if (deh->ntfy_obj) {
++ ntfy_delete(deh->ntfy_obj);
++ kfree(deh->ntfy_obj);
++ }
++ /* Disable DSP MMU fault */
++ free_irq(INT_DSP_MMU_IRQ, deh);
++
++ /* Free DPC object */
++ tasklet_kill(&deh->dpc_tasklet);
++
++ /* Deallocate the DEH manager object */
++ kfree(deh);
++
++ return 0;
++}
++
++int bridge_deh_register_notify(struct deh_mgr *deh, u32 event_mask,
++ u32 notify_type,
++ struct dsp_notification *hnotification)
++{
++ if (!deh)
++ return -EFAULT;
++
++ if (event_mask)
++ return ntfy_register(deh->ntfy_obj, hnotification,
++ event_mask, notify_type);
++ else
++ return ntfy_unregister(deh->ntfy_obj, hnotification);
++}
++
++#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
++static void mmu_fault_print_stack(struct bridge_dev_context *dev_context)
++{
++ struct cfg_hostres *resources;
++ struct hw_mmu_map_attrs_t map_attrs = {
++ .endianism = HW_LITTLE_ENDIAN,
++ .element_size = HW_ELEM_SIZE16BIT,
++ .mixed_size = HW_MMU_CPUES,
++ };
++ void *dummy_va_addr;
++
++ resources = dev_context->resources;
++ dummy_va_addr = (void*)__get_free_page(GFP_ATOMIC);
++
++ /*
++ * Before acking the MMU fault, let's make sure MMU can only
++ * access entry #0. Then add a new entry so that the DSP OS
++ * can continue in order to dump the stack.
++ */
++ hw_mmu_twl_disable(resources->dw_dmmu_base);
++ hw_mmu_tlb_flush_all(resources->dw_dmmu_base);
++
++ hw_mmu_tlb_add(resources->dw_dmmu_base,
++ virt_to_phys(dummy_va_addr), fault_addr,
++ HW_PAGE_SIZE4KB, 1,
++ &map_attrs, HW_SET, HW_SET);
++
++ dsp_clk_enable(DSP_CLK_GPT8);
++
++ dsp_gpt_wait_overflow(DSP_CLK_GPT8, 0xfffffffe);
++
++ /* Clear MMU interrupt */
++ hw_mmu_event_ack(resources->dw_dmmu_base,
++ HW_MMU_TRANSLATION_FAULT);
++ dump_dsp_stack(dev_context);
++ dsp_clk_disable(DSP_CLK_GPT8);
++
++ hw_mmu_disable(resources->dw_dmmu_base);
++ free_page((unsigned long)dummy_va_addr);
++}
++#endif
++
++static inline const char *event_to_string(int event)
++{
++ switch (event) {
++ case DSP_SYSERROR: return "DSP_SYSERROR"; break;
++ case DSP_MMUFAULT: return "DSP_MMUFAULT"; break;
++ case DSP_PWRERROR: return "DSP_PWRERROR"; break;
++ case DSP_WDTOVERFLOW: return "DSP_WDTOVERFLOW"; break;
++ default: return "unkown event"; break;
++ }
++}
++
++void bridge_deh_notify(struct deh_mgr *deh, int event, int info)
++{
++ struct bridge_dev_context *dev_context;
++ const char *str = event_to_string(event);
++
++ if (!deh)
++ return;
++
++ dev_dbg(bridge, "%s: device exception", __func__);
++ dev_context = deh->hbridge_context;
++
++ switch (event) {
++ case DSP_SYSERROR:
++ dev_err(bridge, "%s: %s, info=0x%x", __func__,
++ str, info);
++#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
++ dump_dl_modules(dev_context);
++ dump_dsp_stack(dev_context);
++#endif
++ break;
++ case DSP_MMUFAULT:
++ dev_err(bridge, "%s: %s, addr=0x%x", __func__,
++ str, fault_addr);
++#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
++ print_dsp_trace_buffer(dev_context);
++ dump_dl_modules(dev_context);
++ mmu_fault_print_stack(dev_context);
++#endif
++ break;
++ default:
++ dev_err(bridge, "%s: %s", __func__, str);
++ break;
++ }
++
++ /* Filter subsequent notifications when an error occurs */
++ if (dev_context->dw_brd_state != BRD_ERROR) {
++ ntfy_notify(deh->ntfy_obj, event);
++#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
++ bridge_recover_schedule();
++#endif
++ }
++
++ /* Set the Board state as ERROR */
++ dev_context->dw_brd_state = BRD_ERROR;
++ /* Disable all the clocks that were enabled by DSP */
++ dsp_clock_disable_all(dev_context->dsp_per_clks);
++ /*
++ * Avoid the subsequent WDT if it happens once,
++ * also if fatal error occurs.
++ */
++ dsp_wdt_enable(false);
++}
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/core/wdt.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/core/wdt.c 2010-08-18 11:24:23.162056214 +0300
+@@ -0,0 +1,150 @@
++/*
++ * wdt.c
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * IO dispatcher for a shared memory channel driver.
++ *
++ * Copyright (C) 2010 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++#include <linux/types.h>
++
++#include <dspbridge/dbdefs.h>
++#include <dspbridge/dspdeh.h>
++#include <dspbridge/dev.h>
++#include <dspbridge/_chnl_sm.h>
++#include <dspbridge/wdt.h>
++#include <dspbridge/host_os.h>
++
++
++#ifdef CONFIG_TIDSPBRIDGE_WDT3
++
++#define OMAP34XX_WDT3_BASE (L4_PER_34XX_BASE + 0x30000)
++
++static struct dsp_wdt_setting dsp_wdt;
++
++void dsp_wdt_dpc(unsigned long data)
++{
++ struct deh_mgr *deh_mgr;
++ dev_get_deh_mgr(dev_get_first(), &deh_mgr);
++ if (deh_mgr)
++ bridge_deh_notify(deh_mgr, DSP_WDTOVERFLOW, 0);
++}
++
++irqreturn_t dsp_wdt_isr(int irq, void *data)
++{
++ u32 value;
++ /* ack wdt3 interrupt */
++ value = __raw_readl(dsp_wdt.reg_base + OMAP3_WDT3_ISR_OFFSET);
++ __raw_writel(value, dsp_wdt.reg_base + OMAP3_WDT3_ISR_OFFSET);
++
++ tasklet_schedule(&dsp_wdt.wdt3_tasklet);
++ return IRQ_HANDLED;
++}
++
++int dsp_wdt_init(void)
++{
++ int ret = 0;
++
++ dsp_wdt.sm_wdt = NULL;
++ dsp_wdt.reg_base = OMAP2_L4_IO_ADDRESS(OMAP34XX_WDT3_BASE);
++ tasklet_init(&dsp_wdt.wdt3_tasklet, dsp_wdt_dpc, 0);
++
++ dsp_wdt.fclk = clk_get(NULL, "wdt3_fck");
++
++ if (dsp_wdt.fclk) {
++ dsp_wdt.iclk = clk_get(NULL, "wdt3_ick");
++ if (!dsp_wdt.iclk) {
++ clk_put(dsp_wdt.fclk);
++ dsp_wdt.fclk = NULL;
++ ret = -EFAULT;
++ }
++ } else
++ ret = -EFAULT;
++
++ if (!ret)
++ ret = request_irq(INT_34XX_WDT3_IRQ, dsp_wdt_isr, 0,
++ "dsp_wdt", &dsp_wdt);
++
++ /* Disable at this moment, it will be enabled when DSP starts */
++ if (!ret)
++ disable_irq(INT_34XX_WDT3_IRQ);
++
++ return ret;
++}
++
++void dsp_wdt_sm_set(void *data)
++{
++ dsp_wdt.sm_wdt = data;
++ dsp_wdt.sm_wdt->wdt_overflow = CONFIG_TIDSPBRIDGE_WDT_TIMEOUT;
++}
++
++
++void dsp_wdt_exit(void)
++{
++ free_irq(INT_34XX_WDT3_IRQ, &dsp_wdt);
++ tasklet_kill(&dsp_wdt.wdt3_tasklet);
++
++ if (dsp_wdt.fclk)
++ clk_put(dsp_wdt.fclk);
++ if (dsp_wdt.iclk)
++ clk_put(dsp_wdt.iclk);
++
++ dsp_wdt.fclk = NULL;
++ dsp_wdt.iclk = NULL;
++ dsp_wdt.sm_wdt = NULL;
++ dsp_wdt.reg_base = NULL;
++}
++
++void dsp_wdt_enable(bool enable)
++{
++ u32 tmp;
++ static bool wdt_enable;
++
++ if (wdt_enable == enable || !dsp_wdt.fclk || !dsp_wdt.iclk)
++ return;
++
++ wdt_enable = enable;
++
++ if (enable) {
++ clk_enable(dsp_wdt.fclk);
++ clk_enable(dsp_wdt.iclk);
++ dsp_wdt.sm_wdt->wdt_setclocks = 1;
++ tmp = __raw_readl(dsp_wdt.reg_base + OMAP3_WDT3_ISR_OFFSET);
++ __raw_writel(tmp, dsp_wdt.reg_base + OMAP3_WDT3_ISR_OFFSET);
++ enable_irq(INT_34XX_WDT3_IRQ);
++ } else {
++ disable_irq(INT_34XX_WDT3_IRQ);
++ dsp_wdt.sm_wdt->wdt_setclocks = 0;
++ clk_disable(dsp_wdt.iclk);
++ clk_disable(dsp_wdt.fclk);
++ }
++}
++
++#else
++void dsp_wdt_enable(bool enable)
++{
++}
++
++void dsp_wdt_sm_set(void *data)
++{
++}
++
++int dsp_wdt_init(void)
++{
++ return 0;
++}
++
++void dsp_wdt_exit(void)
++{
++}
++#endif
++
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/dynload/cload.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/dynload/cload.c 2010-08-18 11:24:23.166053090 +0300
+@@ -0,0 +1,1953 @@
++/*
++ * cload.c
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#include "header.h"
++
++#include "module_list.h"
++#define LINKER_MODULES_HEADER ("_" MODULES_HEADER)
++
++/*
++ * forward references
++ */
++static void dload_symbols(struct dload_state *dlthis);
++static void dload_data(struct dload_state *dlthis);
++static void allocate_sections(struct dload_state *dlthis);
++static void string_table_free(struct dload_state *dlthis);
++static void symbol_table_free(struct dload_state *dlthis);
++static void section_table_free(struct dload_state *dlthis);
++static void init_module_handle(struct dload_state *dlthis);
++#if BITS_PER_AU > BITS_PER_BYTE
++static char *unpack_name(struct dload_state *dlthis, u32 soffset);
++#endif
++
++static const char cinitname[] = { ".cinit" };
++static const char loader_dllview_root[] = { "?DLModules?" };
++
++/*
++ * Error strings
++ */
++static const char readstrm[] = { "Error reading %s from input stream" };
++static const char err_alloc[] = { "Syms->dload_allocate( %d ) failed" };
++static const char tgtalloc[] = {
++ "Target memory allocate failed, section %s size " FMT_UI32 };
++static const char initfail[] = { "%s to target address " FMT_UI32 " failed" };
++static const char dlvwrite[] = { "Write to DLLview list failed" };
++static const char iconnect[] = { "Connect call to init interface failed" };
++static const char err_checksum[] = { "Checksum failed on %s" };
++
++/*************************************************************************
++ * Procedure dload_error
++ *
++ * Parameters:
++ * errtxt description of the error, printf style
++ * ... additional information
++ *
++ * Effect:
++ * Reports or records the error as appropriate.
++ *********************************************************************** */
++void dload_error(struct dload_state *dlthis, const char *errtxt, ...)
++{
++ va_list args;
++
++ va_start(args, errtxt);
++ dlthis->mysym->error_report(dlthis->mysym, errtxt, args);
++ va_end(args);
++ dlthis->dload_errcount += 1;
++
++} /* dload_error */
++
++#define DL_ERROR(zza, zzb) dload_error(dlthis, zza, zzb)
++
++/*************************************************************************
++ * Procedure dload_syms_error
++ *
++ * Parameters:
++ * errtxt description of the error, printf style
++ * ... additional information
++ *
++ * Effect:
++ * Reports or records the error as appropriate.
++ *********************************************************************** */
++void dload_syms_error(struct dynamic_loader_sym *syms, const char *errtxt, ...)
++{
++ va_list args;
++
++ va_start(args, errtxt);
++ syms->error_report(syms, errtxt, args);
++ va_end(args);
++}
++
++/*************************************************************************
++ * Procedure dynamic_load_module
++ *
++ * Parameters:
++ * module The input stream that supplies the module image
++ * syms Host-side symbol table and malloc/free functions
++ * alloc Target-side memory allocation
++ * init Target-side memory initialization
++ * options Option flags DLOAD_*
++ * mhandle A module handle for use with Dynamic_Unload
++ *
++ * Effect:
++ * The module image is read using *module. Target storage for the new
++ * image is
++ * obtained from *alloc. Symbols defined and referenced by the module are
++ * managed using *syms. The image is then relocated and references
++ * resolved as necessary, and the resulting executable bits are placed
++ * into target memory using *init.
++ *
++ * Returns:
++ * On a successful load, a module handle is placed in *mhandle,
++ * and zero is returned. On error, the number of errors detected is
++ * returned. Individual errors are reported during the load process
++ * using syms->error_report().
++ ********************************************************************** */
++int dynamic_load_module(struct dynamic_loader_stream *module,
++ struct dynamic_loader_sym *syms,
++ struct dynamic_loader_allocate *alloc,
++ struct dynamic_loader_initialize *init,
++ unsigned options, void **mhandle)
++{
++ register unsigned *dp, sz;
++ struct dload_state dl_state; /* internal state for this call */
++
++ /* blast our internal state */
++ dp = (unsigned *)&dl_state;
++ for (sz = sizeof(dl_state) / sizeof(unsigned); sz > 0; sz -= 1)
++ *dp++ = 0;
++
++ /* Enable _only_ BSS initialization if enabled by user */
++ if ((options & DLOAD_INITBSS) == DLOAD_INITBSS)
++ dl_state.myoptions = DLOAD_INITBSS;
++
++ /* Check that mandatory arguments are present */
++ if (!module || !syms) {
++ dload_error(&dl_state, "Required parameter is NULL");
++ } else {
++ dl_state.strm = module;
++ dl_state.mysym = syms;
++ dload_headers(&dl_state);
++ if (!dl_state.dload_errcount)
++ dload_strings(&dl_state, false);
++ if (!dl_state.dload_errcount)
++ dload_sections(&dl_state);
++
++ if (init && !dl_state.dload_errcount) {
++ if (init->connect(init)) {
++ dl_state.myio = init;
++ dl_state.myalloc = alloc;
++ /* do now, before reducing symbols */
++ allocate_sections(&dl_state);
++ } else
++ dload_error(&dl_state, iconnect);
++ }
++
++ if (!dl_state.dload_errcount) {
++ /* fix up entry point address */
++ unsigned sref = dl_state.dfile_hdr.df_entry_secn - 1;
++ if (sref < dl_state.allocated_secn_count)
++ dl_state.dfile_hdr.df_entrypt +=
++ dl_state.ldr_sections[sref].run_addr;
++
++ dload_symbols(&dl_state);
++ }
++
++ if (init && !dl_state.dload_errcount)
++ dload_data(&dl_state);
++
++ init_module_handle(&dl_state);
++
++ /* dl_state.myio is init or 0 at this point. */
++ if (dl_state.myio) {
++ if ((!dl_state.dload_errcount) &&
++ (dl_state.dfile_hdr.df_entry_secn != DN_UNDEF) &&
++ (!init->execute(init,
++ dl_state.dfile_hdr.df_entrypt)))
++ dload_error(&dl_state, "Init->Execute Failed");
++ init->release(init);
++ }
++
++ symbol_table_free(&dl_state);
++ section_table_free(&dl_state);
++ string_table_free(&dl_state);
++ dload_tramp_cleanup(&dl_state);
++
++ if (dl_state.dload_errcount) {
++ dynamic_unload_module(dl_state.myhandle, syms, alloc,
++ init);
++ dl_state.myhandle = NULL;
++ }
++ }
++
++ if (mhandle)
++ *mhandle = dl_state.myhandle; /* give back the handle */
++
++ return dl_state.dload_errcount;
++} /* DLOAD_File */
++
++/*************************************************************************
++ * Procedure dynamic_open_module
++ *
++ * Parameters:
++ * module The input stream that supplies the module image
++ * syms Host-side symbol table and malloc/free functions
++ * alloc Target-side memory allocation
++ * init Target-side memory initialization
++ * options Option flags DLOAD_*
++ * mhandle A module handle for use with Dynamic_Unload
++ *
++ * Effect:
++ * The module image is read using *module. Target storage for the new
++ * image is
++ * obtained from *alloc. Symbols defined and referenced by the module are
++ * managed using *syms. The image is then relocated and references
++ * resolved as necessary, and the resulting executable bits are placed
++ * into target memory using *init.
++ *
++ * Returns:
++ * On a successful load, a module handle is placed in *mhandle,
++ * and zero is returned. On error, the number of errors detected is
++ * returned. Individual errors are reported during the load process
++ * using syms->error_report().
++ ********************************************************************** */
++int
++dynamic_open_module(struct dynamic_loader_stream *module,
++ struct dynamic_loader_sym *syms,
++ struct dynamic_loader_allocate *alloc,
++ struct dynamic_loader_initialize *init,
++ unsigned options, void **mhandle)
++{
++ register unsigned *dp, sz;
++ struct dload_state dl_state; /* internal state for this call */
++
++ /* blast our internal state */
++ dp = (unsigned *)&dl_state;
++ for (sz = sizeof(dl_state) / sizeof(unsigned); sz > 0; sz -= 1)
++ *dp++ = 0;
++
++ /* Enable _only_ BSS initialization if enabled by user */
++ if ((options & DLOAD_INITBSS) == DLOAD_INITBSS)
++ dl_state.myoptions = DLOAD_INITBSS;
++
++ /* Check that mandatory arguments are present */
++ if (!module || !syms) {
++ dload_error(&dl_state, "Required parameter is NULL");
++ } else {
++ dl_state.strm = module;
++ dl_state.mysym = syms;
++ dload_headers(&dl_state);
++ if (!dl_state.dload_errcount)
++ dload_strings(&dl_state, false);
++ if (!dl_state.dload_errcount)
++ dload_sections(&dl_state);
++
++ if (init && !dl_state.dload_errcount) {
++ if (init->connect(init)) {
++ dl_state.myio = init;
++ dl_state.myalloc = alloc;
++ /* do now, before reducing symbols */
++ allocate_sections(&dl_state);
++ } else
++ dload_error(&dl_state, iconnect);
++ }
++
++ if (!dl_state.dload_errcount) {
++ /* fix up entry point address */
++ unsigned sref = dl_state.dfile_hdr.df_entry_secn - 1;
++ if (sref < dl_state.allocated_secn_count)
++ dl_state.dfile_hdr.df_entrypt +=
++ dl_state.ldr_sections[sref].run_addr;
++
++ dload_symbols(&dl_state);
++ }
++
++ init_module_handle(&dl_state);
++
++ /* dl_state.myio is either 0 or init at this point. */
++ if (dl_state.myio) {
++ if ((!dl_state.dload_errcount) &&
++ (dl_state.dfile_hdr.df_entry_secn != DN_UNDEF) &&
++ (!init->execute(init,
++ dl_state.dfile_hdr.df_entrypt)))
++ dload_error(&dl_state, "Init->Execute Failed");
++ init->release(init);
++ }
++
++ symbol_table_free(&dl_state);
++ section_table_free(&dl_state);
++ string_table_free(&dl_state);
++
++ if (dl_state.dload_errcount) {
++ dynamic_unload_module(dl_state.myhandle, syms, alloc,
++ init);
++ dl_state.myhandle = NULL;
++ }
++ }
++
++ if (mhandle)
++ *mhandle = dl_state.myhandle; /* give back the handle */
++
++ return dl_state.dload_errcount;
++} /* DLOAD_File */
++
++/*************************************************************************
++ * Procedure dload_headers
++ *
++ * Parameters:
++ * none
++ *
++ * Effect:
++ * Loads the DOFF header and verify record. Deals with any byte-order
++ * issues and checks them for validity.
++ *********************************************************************** */
++#define COMBINED_HEADER_SIZE (sizeof(struct doff_filehdr_t)+ \
++ sizeof(struct doff_verify_rec_t))
++
++void dload_headers(struct dload_state *dlthis)
++{
++ u32 map;
++
++ /* Read the header and the verify record as one. If we don't get it
++ all, we're done */
++ if (dlthis->strm->read_buffer(dlthis->strm, &dlthis->dfile_hdr,
++ COMBINED_HEADER_SIZE) !=
++ COMBINED_HEADER_SIZE) {
++ DL_ERROR(readstrm, "File Headers");
++ return;
++ }
++ /*
++ * Verify that we have the byte order of the file correct.
++ * If not, must fix it before we can continue
++ */
++ map = REORDER_MAP(dlthis->dfile_hdr.df_byte_reshuffle);
++ if (map != REORDER_MAP(BYTE_RESHUFFLE_VALUE)) {
++ /* input is either byte-shuffled or bad */
++ if ((map & 0xFCFCFCFC) == 0) { /* no obviously bogus bits */
++ dload_reorder(&dlthis->dfile_hdr, COMBINED_HEADER_SIZE,
++ map);
++ }
++ if (dlthis->dfile_hdr.df_byte_reshuffle !=
++ BYTE_RESHUFFLE_VALUE) {
++ /* didn't fix the problem, the byte swap map is bad */
++ dload_error(dlthis,
++ "Bad byte swap map " FMT_UI32 " in header",
++ dlthis->dfile_hdr.df_byte_reshuffle);
++ return;
++ }
++ dlthis->reorder_map = map; /* keep map for future use */
++ }
++
++ /*
++ * Verify checksum of header and verify record
++ */
++ if (~dload_checksum(&dlthis->dfile_hdr,
++ sizeof(struct doff_filehdr_t)) ||
++ ~dload_checksum(&dlthis->verify,
++ sizeof(struct doff_verify_rec_t))) {
++ DL_ERROR(err_checksum, "header or verify record");
++ return;
++ }
++#if HOST_ENDIANNESS
++ dlthis->dfile_hdr.df_byte_reshuffle = map; /* put back for later */
++#endif
++
++ /* Check for valid target ID */
++ if ((dlthis->dfile_hdr.df_target_id != TARGET_ID) &&
++ -(dlthis->dfile_hdr.df_target_id != TMS470_ID)) {
++ dload_error(dlthis, "Bad target ID 0x%x and TARGET_ID 0x%x",
++ dlthis->dfile_hdr.df_target_id, TARGET_ID);
++ return;
++ }
++ /* Check for valid file format */
++ if ((dlthis->dfile_hdr.df_doff_version != DOFF0)) {
++ dload_error(dlthis, "Bad DOFF version 0x%x",
++ dlthis->dfile_hdr.df_doff_version);
++ return;
++ }
++
++ /*
++ * Apply reasonableness checks to count fields
++ */
++ if (dlthis->dfile_hdr.df_strtab_size > MAX_REASONABLE_STRINGTAB) {
++ dload_error(dlthis, "Excessive string table size " FMT_UI32,
++ dlthis->dfile_hdr.df_strtab_size);
++ return;
++ }
++ if (dlthis->dfile_hdr.df_no_scns > MAX_REASONABLE_SECTIONS) {
++ dload_error(dlthis, "Excessive section count 0x%x",
++ dlthis->dfile_hdr.df_no_scns);
++ return;
++ }
++#ifndef TARGET_ENDIANNESS
++ /*
++ * Check that endianness does not disagree with explicit specification
++ */
++ if ((dlthis->dfile_hdr.df_flags >> ALIGN_COFF_ENDIANNESS) &
++ dlthis->myoptions & ENDIANNESS_MASK) {
++ dload_error(dlthis,
++ "Input endianness disagrees with specified option");
++ return;
++ }
++ dlthis->big_e_target = dlthis->dfile_hdr.df_flags & DF_BIG;
++#endif
++
++} /* dload_headers */
++
++/* COFF Section Processing
++ *
++ * COFF sections are read in and retained intact. Each record is embedded
++ * in a new structure that records the updated load and
++ * run addresses of the section */
++
++static const char secn_errid[] = { "section" };
++
++/*************************************************************************
++ * Procedure dload_sections
++ *
++ * Parameters:
++ * none
++ *
++ * Effect:
++ * Loads the section records into an internal table.
++ *********************************************************************** */
++void dload_sections(struct dload_state *dlthis)
++{
++ s16 siz;
++ struct doff_scnhdr_t *shp;
++ unsigned nsecs = dlthis->dfile_hdr.df_no_scns;
++
++ /* allocate space for the DOFF section records */
++ siz = nsecs * sizeof(struct doff_scnhdr_t);
++ shp =
++ (struct doff_scnhdr_t *)dlthis->mysym->dload_allocate(dlthis->mysym,
++ siz);
++ if (!shp) { /* not enough storage */
++ DL_ERROR(err_alloc, siz);
++ return;
++ }
++ dlthis->sect_hdrs = shp;
++
++ /* read in the section records */
++ if (dlthis->strm->read_buffer(dlthis->strm, shp, siz) != siz) {
++ DL_ERROR(readstrm, secn_errid);
++ return;
++ }
++
++ /* if we need to fix up byte order, do it now */
++ if (dlthis->reorder_map)
++ dload_reorder(shp, siz, dlthis->reorder_map);
++
++ /* check for validity */
++ if (~dload_checksum(dlthis->sect_hdrs, siz) !=
++ dlthis->verify.dv_scn_rec_checksum) {
++ DL_ERROR(err_checksum, secn_errid);
++ return;
++ }
++
++} /* dload_sections */
++
++/*****************************************************************************
++ * Procedure allocate_sections
++ *
++ * Parameters:
++ * alloc target memory allocator class
++ *
++ * Effect:
++ * Assigns new (target) addresses for sections
++ **************************************************************************** */
++static void allocate_sections(struct dload_state *dlthis)
++{
++ u16 curr_sect, nsecs, siz;
++ struct doff_scnhdr_t *shp;
++ struct ldr_section_info *asecs;
++ struct my_handle *hndl;
++ nsecs = dlthis->dfile_hdr.df_no_scns;
++ if (!nsecs)
++ return;
++ if ((dlthis->myalloc == NULL) &&
++ (dlthis->dfile_hdr.df_target_scns > 0)) {
++ DL_ERROR("Arg 3 (alloc) required but NULL", 0);
++ return;
++ }
++ /*
++ * allocate space for the module handle, which we will keep for unload
++ * purposes include an additional section store for an auto-generated
++ * trampoline section in case we need it.
++ */
++ siz = (dlthis->dfile_hdr.df_target_scns + 1) *
++ sizeof(struct ldr_section_info) + MY_HANDLE_SIZE;
++
++ hndl =
++ (struct my_handle *)dlthis->mysym->dload_allocate(dlthis->mysym,
++ siz);
++ if (!hndl) { /* not enough storage */
++ DL_ERROR(err_alloc, siz);
++ return;
++ }
++ /* initialize the handle header */
++ hndl->dm.hnext = hndl->dm.hprev = hndl; /* circular list */
++ hndl->dm.hroot = NULL;
++ hndl->dm.dbthis = 0;
++ dlthis->myhandle = hndl; /* save away for return */
++ /* pointer to the section list of allocated sections */
++ dlthis->ldr_sections = asecs = hndl->secns;
++ /* * Insert names into all sections, make copies of
++ the sections we allocate */
++ shp = dlthis->sect_hdrs;
++ for (curr_sect = 0; curr_sect < nsecs; curr_sect++) {
++ u32 soffset = shp->ds_offset;
++#if BITS_PER_AU <= BITS_PER_BYTE
++ /* attempt to insert the name of this section */
++ if (soffset < dlthis->dfile_hdr.df_strtab_size)
++ ((struct ldr_section_info *)shp)->name =
++ dlthis->str_head + soffset;
++ else {
++ dload_error(dlthis, "Bad name offset in section %d",
++ curr_sect);
++ ((struct ldr_section_info *)shp)->name = NULL;
++ }
++#endif
++ /* allocate target storage for sections that require it */
++ if (ds_needs_allocation(shp)) {
++ *asecs = *(struct ldr_section_info *)shp;
++ asecs->context = 0; /* zero the context field */
++#if BITS_PER_AU > BITS_PER_BYTE
++ asecs->name = unpack_name(dlthis, soffset);
++ dlthis->debug_string_size = soffset + dlthis->temp_len;
++#else
++ dlthis->debug_string_size = soffset;
++#endif
++ if (dlthis->myalloc != NULL) {
++ if (!dlthis->myalloc->
++ dload_allocate(dlthis->myalloc, asecs,
++ ds_alignment(asecs->type))) {
++ dload_error(dlthis, tgtalloc,
++ asecs->name, asecs->size);
++ return;
++ }
++ }
++ /* keep address deltas in original section table */
++ shp->ds_vaddr = asecs->load_addr - shp->ds_vaddr;
++ shp->ds_paddr = asecs->run_addr - shp->ds_paddr;
++ dlthis->allocated_secn_count += 1;
++ } /* allocate target storage */
++ shp += 1;
++ asecs += 1;
++ }
++#if BITS_PER_AU <= BITS_PER_BYTE
++ dlthis->debug_string_size +=
++ strlen(dlthis->str_head + dlthis->debug_string_size) + 1;
++#endif
++} /* allocate sections */
++
++/*************************************************************************
++ * Procedure section_table_free
++ *
++ * Parameters:
++ * none
++ *
++ * Effect:
++ * Frees any state used by the symbol table.
++ *
++ * WARNING:
++ * This routine is not allowed to declare errors!
++ *********************************************************************** */
++static void section_table_free(struct dload_state *dlthis)
++{
++ struct doff_scnhdr_t *shp;
++
++ shp = dlthis->sect_hdrs;
++ if (shp)
++ dlthis->mysym->dload_deallocate(dlthis->mysym, shp);
++
++} /* section_table_free */
++
++/*************************************************************************
++ * Procedure dload_strings
++ *
++ * Parameters:
++ * sec_names_only If true only read in the "section names"
++ * portion of the string table
++ *
++ * Effect:
++ * Loads the DOFF string table into memory. DOFF keeps all strings in a
++ * big unsorted array. We just read that array into memory in bulk.
++ *********************************************************************** */
++static const char stringtbl[] = { "string table" };
++
++void dload_strings(struct dload_state *dlthis, bool sec_names_only)
++{
++ u32 ssiz;
++ char *strbuf;
++
++ if (sec_names_only) {
++ ssiz = BYTE_TO_HOST(DOFF_ALIGN
++ (dlthis->dfile_hdr.df_scn_name_size));
++ } else {
++ ssiz = BYTE_TO_HOST(DOFF_ALIGN
++ (dlthis->dfile_hdr.df_strtab_size));
++ }
++ if (ssiz == 0)
++ return;
++
++ /* get some memory for the string table */
++#if BITS_PER_AU > BITS_PER_BYTE
++ strbuf = (char *)dlthis->mysym->dload_allocate(dlthis->mysym, ssiz +
++ dlthis->dfile_hdr.
++ df_max_str_len);
++#else
++ strbuf = (char *)dlthis->mysym->dload_allocate(dlthis->mysym, ssiz);
++#endif
++ if (strbuf == NULL) {
++ DL_ERROR(err_alloc, ssiz);
++ return;
++ }
++ dlthis->str_head = strbuf;
++#if BITS_PER_AU > BITS_PER_BYTE
++ dlthis->str_temp = strbuf + ssiz;
++#endif
++ /* read in the strings and verify them */
++ if ((unsigned)(dlthis->strm->read_buffer(dlthis->strm, strbuf,
++ ssiz)) != ssiz) {
++ DL_ERROR(readstrm, stringtbl);
++ }
++ /* if we need to fix up byte order, do it now */
++#ifndef _BIG_ENDIAN
++ if (dlthis->reorder_map)
++ dload_reorder(strbuf, ssiz, dlthis->reorder_map);
++
++ if ((!sec_names_only) && (~dload_checksum(strbuf, ssiz) !=
++ dlthis->verify.dv_str_tab_checksum)) {
++ DL_ERROR(err_checksum, stringtbl);
++ }
++#else
++ if (dlthis->dfile_hdr.df_byte_reshuffle !=
++ HOST_BYTE_ORDER(REORDER_MAP(BYTE_RESHUFFLE_VALUE))) {
++ /* put strings in big-endian order, not in PC order */
++ dload_reorder(strbuf, ssiz,
++ HOST_BYTE_ORDER(dlthis->
++ dfile_hdr.df_byte_reshuffle));
++ }
++ if ((!sec_names_only) && (~dload_reverse_checksum(strbuf, ssiz) !=
++ dlthis->verify.dv_str_tab_checksum)) {
++ DL_ERROR(err_checksum, stringtbl);
++ }
++#endif
++} /* dload_strings */
++
++/*************************************************************************
++ * Procedure string_table_free
++ *
++ * Parameters:
++ * none
++ *
++ * Effect:
++ * Frees any state used by the string table.
++ *
++ * WARNING:
++ * This routine is not allowed to declare errors!
++ ************************************************************************ */
++static void string_table_free(struct dload_state *dlthis)
++{
++ if (dlthis->str_head)
++ dlthis->mysym->dload_deallocate(dlthis->mysym,
++ dlthis->str_head);
++
++} /* string_table_free */
++
++/*
++ * Symbol Table Maintenance Functions
++ *
++ * COFF symbols are read by dload_symbols(), which is called after
++ * sections have been allocated. Symbols which might be used in
++ * relocation (ie, not debug info) are retained in an internal temporary
++ * compressed table (type local_symbol). A particular symbol is recovered
++ * by index by calling dload_find_symbol(). dload_find_symbol
++ * reconstructs a more explicit representation (type SLOTVEC) which is
++ * used by reloc.c
++ */
++/* real size of debug header */
++#define DBG_HDR_SIZE (sizeof(struct dll_module) - sizeof(struct dll_sect))
++
++static const char sym_errid[] = { "symbol" };
++
++/**************************************************************************
++ * Procedure dload_symbols
++ *
++ * Parameters:
++ * none
++ *
++ * Effect:
++ * Reads in symbols and retains ones that might be needed for relocation
++ * purposes.
++ *********************************************************************** */
++/* size of symbol buffer no bigger than target data buffer, to limit stack
++ * usage */
++#define MY_SYM_BUF_SIZ (BYTE_TO_HOST(IMAGE_PACKET_SIZE)/\
++ sizeof(struct doff_syment_t))
++
++static void dload_symbols(struct dload_state *dlthis)
++{
++ u32 sym_count, siz, dsiz, symbols_left;
++ u32 checks;
++ struct local_symbol *sp;
++ struct dynload_symbol *symp;
++ struct dynload_symbol *newsym;
++
++ sym_count = dlthis->dfile_hdr.df_no_syms;
++ if (sym_count == 0)
++ return;
++
++ /*
++ * We keep a local symbol table for all of the symbols in the input.
++ * This table contains only section & value info, as we do not have
++ * to do any name processing for locals. We reuse this storage
++ * as a temporary for .dllview record construction.
++ * Allocate storage for the whole table. Add 1 to the section count
++ * in case a trampoline section is auto-generated as well as the
++ * size of the trampoline section name so DLLView doens't get lost.
++ */
++
++ siz = sym_count * sizeof(struct local_symbol);
++ dsiz = DBG_HDR_SIZE +
++ (sizeof(struct dll_sect) * dlthis->allocated_secn_count) +
++ BYTE_TO_HOST_ROUND(dlthis->debug_string_size + 1);
++ if (dsiz > siz)
++ siz = dsiz; /* larger of symbols and .dllview temp */
++ sp = (struct local_symbol *)dlthis->mysym->dload_allocate(dlthis->mysym,
++ siz);
++ if (!sp) {
++ DL_ERROR(err_alloc, siz);
++ return;
++ }
++ dlthis->local_symtab = sp;
++ /* Read the symbols in the input, store them in the table, and post any
++ * globals to the global symbol table. In the process, externals
++ become defined from the global symbol table */
++ checks = dlthis->verify.dv_sym_tab_checksum;
++ symbols_left = sym_count;
++ do { /* read all symbols */
++ char *sname;
++ u32 val;
++ s32 delta;
++ struct doff_syment_t *input_sym;
++ unsigned syms_in_buf;
++ struct doff_syment_t my_sym_buf[MY_SYM_BUF_SIZ];
++ input_sym = my_sym_buf;
++ syms_in_buf = symbols_left > MY_SYM_BUF_SIZ ?
++ MY_SYM_BUF_SIZ : symbols_left;
++ siz = syms_in_buf * sizeof(struct doff_syment_t);
++ if (dlthis->strm->read_buffer(dlthis->strm, input_sym, siz) !=
++ siz) {
++ DL_ERROR(readstrm, sym_errid);
++ return;
++ }
++ if (dlthis->reorder_map)
++ dload_reorder(input_sym, siz, dlthis->reorder_map);
++
++ checks += dload_checksum(input_sym, siz);
++ do { /* process symbols in buffer */
++ symbols_left -= 1;
++ /* attempt to derive the name of this symbol */
++ sname = NULL;
++ if (input_sym->dn_offset > 0) {
++#if BITS_PER_AU <= BITS_PER_BYTE
++ if ((u32) input_sym->dn_offset <
++ dlthis->dfile_hdr.df_strtab_size)
++ sname = dlthis->str_head +
++ BYTE_TO_HOST(input_sym->dn_offset);
++ else
++ dload_error(dlthis,
++ "Bad name offset in symbol "
++ " %d", symbols_left);
++#else
++ sname = unpack_name(dlthis,
++ input_sym->dn_offset);
++#endif
++ }
++ val = input_sym->dn_value;
++ delta = 0;
++ sp->sclass = input_sym->dn_sclass;
++ sp->secnn = input_sym->dn_scnum;
++ /* if this is an undefined symbol,
++ * define it (or fail) now */
++ if (sp->secnn == DN_UNDEF) {
++ /* pointless for static undefined */
++ if (input_sym->dn_sclass != DN_EXT)
++ goto loop_cont;
++
++ /* try to define symbol from previously
++ * loaded images */
++ symp = dlthis->mysym->find_matching_symbol
++ (dlthis->mysym, sname);
++ if (!symp) {
++ DL_ERROR
++ ("Undefined external symbol %s",
++ sname);
++ goto loop_cont;
++ }
++ val = delta = symp->value;
++#ifdef ENABLE_TRAMP_DEBUG
++ dload_syms_error(dlthis->mysym,
++ "===> ext sym [%s] at %x",
++ sname, val);
++#endif
++
++ goto loop_cont;
++ }
++ /* symbol defined by this module */
++ if (sp->secnn > 0) {
++ /* symbol references a section */
++ if ((unsigned)sp->secnn <=
++ dlthis->allocated_secn_count) {
++ /* section was allocated */
++ struct doff_scnhdr_t *srefp =
++ &dlthis->sect_hdrs[sp->secnn - 1];
++
++ if (input_sym->dn_sclass ==
++ DN_STATLAB ||
++ input_sym->dn_sclass == DN_EXTLAB) {
++ /* load */
++ delta = srefp->ds_vaddr;
++ } else {
++ /* run */
++ delta = srefp->ds_paddr;
++ }
++ val += delta;
++ }
++ goto loop_itr;
++ }
++ /* This symbol is an absolute symbol */
++ if (sp->secnn == DN_ABS && ((sp->sclass == DN_EXT) ||
++ (sp->sclass ==
++ DN_EXTLAB))) {
++ symp =
++ dlthis->mysym->find_matching_symbol(dlthis->
++ mysym,
++ sname);
++ if (!symp)
++ goto loop_itr;
++ /* This absolute symbol is already defined. */
++ if (symp->value == input_sym->dn_value) {
++ /* If symbol values are equal, continue
++ * but don't add to the global symbol
++ * table */
++ sp->value = val;
++ sp->delta = delta;
++ sp += 1;
++ input_sym += 1;
++ continue;
++ } else {
++ /* If symbol values are not equal,
++ * return with redefinition error */
++ DL_ERROR("Absolute symbol %s is "
++ "defined multiple times with "
++ "different values", sname);
++ return;
++ }
++ }
++loop_itr:
++ /* if this is a global symbol, post it to the
++ * global table */
++ if (input_sym->dn_sclass == DN_EXT ||
++ input_sym->dn_sclass == DN_EXTLAB) {
++ /* Keep this global symbol for subsequent
++ * modules. Don't complain on error, to allow
++ * symbol API to suppress global symbols */
++ if (!sname)
++ goto loop_cont;
++
++ newsym = dlthis->mysym->add_to_symbol_table
++ (dlthis->mysym, sname,
++ (unsigned)dlthis->myhandle);
++ if (newsym)
++ newsym->value = val;
++
++ } /* global */
++loop_cont:
++ sp->value = val;
++ sp->delta = delta;
++ sp += 1;
++ input_sym += 1;
++ } while ((syms_in_buf -= 1) > 0); /* process sym in buf */
++ } while (symbols_left > 0); /* read all symbols */
++ if (~checks)
++ dload_error(dlthis, "Checksum of symbols failed");
++
++} /* dload_symbols */
++
++/*****************************************************************************
++ * Procedure symbol_table_free
++ *
++ * Parameters:
++ * none
++ *
++ * Effect:
++ * Frees any state used by the symbol table.
++ *
++ * WARNING:
++ * This routine is not allowed to declare errors!
++ **************************************************************************** */
++static void symbol_table_free(struct dload_state *dlthis)
++{
++ if (dlthis->local_symtab) {
++ if (dlthis->dload_errcount) { /* blow off our symbols */
++ dlthis->mysym->purge_symbol_table(dlthis->mysym,
++ (unsigned)
++ dlthis->myhandle);
++ }
++ dlthis->mysym->dload_deallocate(dlthis->mysym,
++ dlthis->local_symtab);
++ }
++} /* symbol_table_free */
++
++/* .cinit Processing
++ *
++ * The dynamic loader does .cinit interpretation. cload_cinit()
++ * acts as a special write-to-target function, in that it takes relocated
++ * data from the normal data flow, and interprets it as .cinit actions.
++ * Because the normal data flow does not necessarily process the whole
++ * .cinit section in one buffer, cload_cinit() must be prepared to
++ * interpret the data piecemeal. A state machine is used for this
++ * purpose.
++ */
++
++/* The following are only for use by reloc.c and things it calls */
++static const struct ldr_section_info cinit_info_init = { cinitname, 0, 0,
++ (ldr_addr)-1, 0, DLOAD_BSS, 0
++};
++
++/*************************************************************************
++ * Procedure cload_cinit
++ *
++ * Parameters:
++ * ipacket Pointer to data packet to be loaded
++ *
++ * Effect:
++ * Interprets the data in the buffer as .cinit data, and performs the
++ * appropriate initializations.
++ *********************************************************************** */
++static void cload_cinit(struct dload_state *dlthis,
++ struct image_packet_t *ipacket)
++{
++#if TDATA_TO_HOST(CINIT_COUNT)*BITS_PER_AU > 16
++ s32 init_count, left;
++#else
++ s16 init_count, left;
++#endif
++ unsigned char *pktp = ipacket->img_data;
++ unsigned char *pktend = pktp + BYTE_TO_HOST_ROUND(ipacket->packet_size);
++ int temp;
++ ldr_addr atmp;
++ struct ldr_section_info cinit_info;
++
++ /* PROCESS ALL THE INITIALIZATION RECORDS THE BUFFER. */
++ while (true) {
++ left = pktend - pktp;
++ switch (dlthis->cinit_state) {
++ case CI_COUNT: /* count field */
++ if (left < TDATA_TO_HOST(CINIT_COUNT))
++ goto loopexit;
++ temp = dload_unpack(dlthis, (tgt_au_t *) pktp,
++ CINIT_COUNT * TDATA_AU_BITS, 0,
++ ROP_SGN);
++ pktp += TDATA_TO_HOST(CINIT_COUNT);
++ /* negative signifies BSS table, zero means done */
++ if (temp <= 0) {
++ dlthis->cinit_state = CI_DONE;
++ break;
++ }
++ dlthis->cinit_count = temp;
++ dlthis->cinit_state = CI_ADDRESS;
++ break;
++#if CINIT_ALIGN < CINIT_ADDRESS
++ case CI_PARTADDRESS:
++ pktp -= TDATA_TO_HOST(CINIT_ALIGN);
++ /* back up pointer into space courtesy of caller */
++ *(uint16_t *) pktp = dlthis->cinit_addr;
++ /* stuff in saved bits !! FALL THRU !! */
++#endif
++ case CI_ADDRESS: /* Address field for a copy packet */
++ if (left < TDATA_TO_HOST(CINIT_ADDRESS)) {
++#if CINIT_ALIGN < CINIT_ADDRESS
++ if (left == TDATA_TO_HOST(CINIT_ALIGN)) {
++ /* address broken into halves */
++ dlthis->cinit_addr = *(uint16_t *) pktp;
++ /* remember 1st half */
++ dlthis->cinit_state = CI_PARTADDRESS;
++ left = 0;
++ }
++#endif
++ goto loopexit;
++ }
++ atmp = dload_unpack(dlthis, (tgt_au_t *) pktp,
++ CINIT_ADDRESS * TDATA_AU_BITS, 0,
++ ROP_UNS);
++ pktp += TDATA_TO_HOST(CINIT_ADDRESS);
++#if CINIT_PAGE_BITS > 0
++ dlthis->cinit_page = atmp &
++ ((1 << CINIT_PAGE_BITS) - 1);
++ atmp >>= CINIT_PAGE_BITS;
++#else
++ dlthis->cinit_page = CINIT_DEFAULT_PAGE;
++#endif
++ dlthis->cinit_addr = atmp;
++ dlthis->cinit_state = CI_COPY;
++ break;
++ case CI_COPY: /* copy bits to the target */
++ init_count = HOST_TO_TDATA(left);
++ if (init_count > dlthis->cinit_count)
++ init_count = dlthis->cinit_count;
++ if (init_count == 0)
++ goto loopexit; /* get more bits */
++ cinit_info = cinit_info_init;
++ cinit_info.page = dlthis->cinit_page;
++ if (!dlthis->myio->writemem(dlthis->myio, pktp,
++ TDATA_TO_TADDR
++ (dlthis->cinit_addr),
++ &cinit_info,
++ TDATA_TO_HOST(init_count))) {
++ dload_error(dlthis, initfail, "write",
++ dlthis->cinit_addr);
++ }
++ dlthis->cinit_count -= init_count;
++ if (dlthis->cinit_count <= 0) {
++ dlthis->cinit_state = CI_COUNT;
++ init_count = (init_count + CINIT_ALIGN - 1) &
++ -CINIT_ALIGN;
++ /* align to next init */
++ }
++ pktp += TDATA_TO_HOST(init_count);
++ dlthis->cinit_addr += init_count;
++ break;
++ case CI_DONE: /* no more .cinit to do */
++ return;
++ } /* switch (cinit_state) */
++ } /* while */
++
++loopexit:
++ if (left > 0) {
++ dload_error(dlthis, "%d bytes left over in cinit packet", left);
++ dlthis->cinit_state = CI_DONE; /* left over bytes are bad */
++ }
++} /* cload_cinit */
++
++/* Functions to interface to reloc.c
++ *
++ * reloc.c is the relocation module borrowed from the linker, with
++ * minimal (we hope) changes for our purposes. cload_sect_data() invokes
++ * this module on a section to relocate and load the image data for that
++ * section. The actual read and write actions are supplied by the global
++ * routines below.
++ */
++
++/************************************************************************
++ * Procedure relocate_packet
++ *
++ * Parameters:
++ * ipacket Pointer to an image packet to relocate
++ *
++ * Effect:
++ * Performs the required relocations on the packet. Returns a checksum
++ * of the relocation operations.
++ *********************************************************************** */
++#define MY_RELOC_BUF_SIZ 8
++/* careful! exists at the same time as the image buffer */
++static int relocate_packet(struct dload_state *dlthis,
++ struct image_packet_t *ipacket,
++ u32 *checks, bool *tramps_generated)
++{
++ u32 rnum;
++ *tramps_generated = false;
++
++ rnum = ipacket->num_relocs;
++ do { /* all relocs */
++ unsigned rinbuf;
++ int siz;
++ struct reloc_record_t *rp, rrec[MY_RELOC_BUF_SIZ];
++ rp = rrec;
++ rinbuf = rnum > MY_RELOC_BUF_SIZ ? MY_RELOC_BUF_SIZ : rnum;
++ siz = rinbuf * sizeof(struct reloc_record_t);
++ if (dlthis->strm->read_buffer(dlthis->strm, rp, siz) != siz) {
++ DL_ERROR(readstrm, "relocation");
++ return 0;
++ }
++ /* reorder the bytes if need be */
++ if (dlthis->reorder_map)
++ dload_reorder(rp, siz, dlthis->reorder_map);
++
++ *checks += dload_checksum(rp, siz);
++ do {
++ /* perform the relocation operation */
++ dload_relocate(dlthis, (tgt_au_t *) ipacket->img_data,
++ rp, tramps_generated, false);
++ rp += 1;
++ rnum -= 1;
++ } while ((rinbuf -= 1) > 0);
++ } while (rnum > 0); /* all relocs */
++ /* If trampoline(s) were generated, we need to do an update of the
++ * trampoline copy of the packet since a 2nd phase relo will be done
++ * later. */
++ if (*tramps_generated == true) {
++ dload_tramp_pkt_udpate(dlthis,
++ (dlthis->image_secn -
++ dlthis->ldr_sections),
++ dlthis->image_offset, ipacket);
++ }
++
++ return 1;
++} /* dload_read_reloc */
++
++#define IPH_SIZE (sizeof(struct image_packet_t) - sizeof(u32))
++
++/* VERY dangerous */
++static const char imagepak[] = { "image packet" };
++
++/*************************************************************************
++ * Procedure dload_data
++ *
++ * Parameters:
++ * none
++ *
++ * Effect:
++ * Read image data from input file, relocate it, and download it to the
++ * target.
++ *********************************************************************** */
++static void dload_data(struct dload_state *dlthis)
++{
++ u16 curr_sect;
++ struct doff_scnhdr_t *sptr = dlthis->sect_hdrs;
++ struct ldr_section_info *lptr = dlthis->ldr_sections;
++#ifdef OPT_ZERO_COPY_LOADER
++ bool zero_copy = false;
++#endif
++ u8 *dest;
++
++ struct {
++ struct image_packet_t ipacket;
++ u8 bufr[BYTE_TO_HOST(IMAGE_PACKET_SIZE)];
++ } ibuf;
++
++ /* Indicates whether CINIT processing has occurred */
++ bool cinit_processed = false;
++
++ /* Loop through the sections and load them one at a time.
++ */
++ for (curr_sect = 0; curr_sect < dlthis->dfile_hdr.df_no_scns;
++ curr_sect += 1) {
++ if (ds_needs_download(sptr)) {
++ s32 nip;
++ ldr_addr image_offset = 0;
++ /* set relocation info for this section */
++ if (curr_sect < dlthis->allocated_secn_count)
++ dlthis->delta_runaddr = sptr->ds_paddr;
++ else {
++ lptr = (struct ldr_section_info *)sptr;
++ dlthis->delta_runaddr = 0;
++ }
++ dlthis->image_secn = lptr;
++#if BITS_PER_AU > BITS_PER_BYTE
++ lptr->name = unpack_name(dlthis, sptr->ds_offset);
++#endif
++ nip = sptr->ds_nipacks;
++ while ((nip -= 1) >= 0) { /* process packets */
++
++ s32 ipsize;
++ u32 checks;
++ bool tramp_generated = false;
++
++ /* get the fixed header bits */
++ if (dlthis->strm->read_buffer(dlthis->strm,
++ &ibuf.ipacket,
++ IPH_SIZE) !=
++ IPH_SIZE) {
++ DL_ERROR(readstrm, imagepak);
++ return;
++ }
++ /* reorder the header if need be */
++ if (dlthis->reorder_map) {
++ dload_reorder(&ibuf.ipacket, IPH_SIZE,
++ dlthis->reorder_map);
++ }
++ /* now read the rest of the packet */
++ ipsize =
++ BYTE_TO_HOST(DOFF_ALIGN
++ (ibuf.ipacket.packet_size));
++ if (ipsize > BYTE_TO_HOST(IMAGE_PACKET_SIZE)) {
++ DL_ERROR("Bad image packet size %d",
++ ipsize);
++ return;
++ }
++ dest = ibuf.bufr;
++#ifdef OPT_ZERO_COPY_LOADER
++ zero_copy = false;
++ if (!dload_check_type(sptr, DLOAD_CINIT) {
++ dlthis->myio->writemem(dlthis->myio,
++ &dest,
++ lptr->load_addr +
++ image_offset,
++ lptr, 0);
++ zero_copy = (dest != ibuf.bufr);
++ }
++#endif
++ /* End of determination */
++
++ if (dlthis->strm->read_buffer(dlthis->strm,
++ ibuf.bufr,
++ ipsize) !=
++ ipsize) {
++ DL_ERROR(readstrm, imagepak);
++ return;
++ }
++ ibuf.ipacket.img_data = dest;
++
++ /* reorder the bytes if need be */
++#if !defined(_BIG_ENDIAN) || (TARGET_AU_BITS > 16)
++ if (dlthis->reorder_map) {
++ dload_reorder(dest, ipsize,
++ dlthis->reorder_map);
++ }
++ checks = dload_checksum(dest, ipsize);
++#else
++ if (dlthis->dfile_hdr.df_byte_reshuffle !=
++ TARGET_ORDER(REORDER_MAP
++ (BYTE_RESHUFFLE_VALUE))) {
++ /* put image bytes in big-endian order,
++ * not PC order */
++ dload_reorder(dest, ipsize,
++ TARGET_ORDER
++ (dlthis->dfile_hdr.
++ df_byte_reshuffle));
++ }
++#if TARGET_AU_BITS > 8
++ checks = dload_reverse_checksum16(dest, ipsize);
++#else
++ checks = dload_reverse_checksum(dest, ipsize);
++#endif
++#endif
++
++ checks += dload_checksum(&ibuf.ipacket,
++ IPH_SIZE);
++ /* relocate the image bits as needed */
++ if (ibuf.ipacket.num_relocs) {
++ dlthis->image_offset = image_offset;
++ if (!relocate_packet(dlthis,
++ &ibuf.ipacket,
++ &checks,
++ &tramp_generated))
++ return; /* serious error */
++ }
++ if (~checks)
++ DL_ERROR(err_checksum, imagepak);
++ /* Only write the result to the target if no
++ * trampoline was generated. Otherwise it
++ *will be done during trampoline finalize. */
++
++ if (tramp_generated == false) {
++
++ /* stuff the result into target
++ * memory */
++ if (dload_check_type(sptr,
++ DLOAD_CINIT)) {
++ cload_cinit(dlthis,
++ &ibuf.ipacket);
++ cinit_processed = true;
++ } else {
++#ifdef OPT_ZERO_COPY_LOADER
++ if (!zero_copy) {
++#endif
++ /* FIXME */
++ if (!dlthis->myio->
++ writemem(dlthis->
++ myio,
++ ibuf.bufr,
++ lptr->
++ load_addr +
++ image_offset,
++ lptr,
++ BYTE_TO_HOST
++ (ibuf.
++ ipacket.
++ packet_size))) {
++ DL_ERROR
++ ("Write to "
++ FMT_UI32
++ " failed",
++ lptr->
++ load_addr +
++ image_offset);
++ }
++#ifdef OPT_ZERO_COPY_LOADER
++ }
++#endif
++ }
++ }
++ image_offset +=
++ BYTE_TO_TADDR(ibuf.ipacket.packet_size);
++ } /* process packets */
++ /* if this is a BSS section, we may want to fill it */
++ if (!dload_check_type(sptr, DLOAD_BSS))
++ goto loop_cont;
++
++ if (!(dlthis->myoptions & DLOAD_INITBSS))
++ goto loop_cont;
++
++ if (cinit_processed) {
++ /* Don't clear BSS after load-time
++ * initialization */
++ DL_ERROR
++ ("Zero-initialization at " FMT_UI32
++ " after " "load-time initialization!",
++ lptr->load_addr);
++ goto loop_cont;
++ }
++ /* fill the .bss area */
++ dlthis->myio->fillmem(dlthis->myio,
++ TADDR_TO_HOST(lptr->load_addr),
++ lptr, TADDR_TO_HOST(lptr->size),
++ DLOAD_FILL_BSS);
++ goto loop_cont;
++ }
++ /* if DS_DOWNLOAD_MASK */
++ /* If not loading, but BSS, zero initialize */
++ if (!dload_check_type(sptr, DLOAD_BSS))
++ goto loop_cont;
++
++ if (!(dlthis->myoptions & DLOAD_INITBSS))
++ goto loop_cont;
++
++ if (curr_sect >= dlthis->allocated_secn_count)
++ lptr = (struct ldr_section_info *)sptr;
++
++ if (cinit_processed) {
++ /*Don't clear BSS after load-time initialization */
++ DL_ERROR("Zero-initialization at " FMT_UI32
++ " attempted after "
++ "load-time initialization!", lptr->load_addr);
++ goto loop_cont;
++ }
++ /* fill the .bss area */
++ dlthis->myio->fillmem(dlthis->myio,
++ TADDR_TO_HOST(lptr->load_addr), lptr,
++ TADDR_TO_HOST(lptr->size),
++ DLOAD_FILL_BSS);
++loop_cont:
++ sptr += 1;
++ lptr += 1;
++ } /* load sections */
++
++ /* Finalize any trampolines that were created during the load */
++ if (dload_tramp_finalize(dlthis) == 0) {
++ DL_ERROR("Finalization of auto-trampolines (size = " FMT_UI32
++ ") failed", dlthis->tramp.tramp_sect_next_addr);
++ }
++} /* dload_data */
++
++/*************************************************************************
++ * Procedure dload_reorder
++ *
++ * Parameters:
++ * data 32-bit aligned pointer to data to be byte-swapped
++ * dsiz size of the data to be reordered in sizeof() units.
++ * map 32-bit map defining how to reorder the data. Value
++ * must be REORDER_MAP() of some permutation
++ * of 0x00 01 02 03
++ *
++ * Effect:
++ * Re-arranges the bytes in each word according to the map specified.
++ *
++ *********************************************************************** */
++/* mask for byte shift count */
++#define SHIFT_COUNT_MASK (3 << LOG_BITS_PER_BYTE)
++
++void dload_reorder(void *data, int dsiz, unsigned int map)
++{
++ register u32 tmp, tmap, datv;
++ u32 *dp = (u32 *) data;
++
++ map <<= LOG_BITS_PER_BYTE; /* align map with SHIFT_COUNT_MASK */
++ do {
++ tmp = 0;
++ datv = *dp;
++ tmap = map;
++ do {
++ tmp |= (datv & BYTE_MASK) << (tmap & SHIFT_COUNT_MASK);
++ tmap >>= BITS_PER_BYTE;
++ } while (datv >>= BITS_PER_BYTE);
++ *dp++ = tmp;
++ } while ((dsiz -= sizeof(u32)) > 0);
++} /* dload_reorder */
++
++/*************************************************************************
++ * Procedure dload_checksum
++ *
++ * Parameters:
++ * data 32-bit aligned pointer to data to be checksummed
++ * siz size of the data to be checksummed in sizeof() units.
++ *
++ * Effect:
++ * Returns a checksum of the specified block
++ *
++ *********************************************************************** */
++u32 dload_checksum(void *data, unsigned siz)
++{
++ u32 sum;
++ u32 *dp;
++ int left;
++
++ sum = 0;
++ dp = (u32 *) data;
++ for (left = siz; left > 0; left -= sizeof(u32))
++ sum += *dp++;
++ return sum;
++} /* dload_checksum */
++
++#if HOST_ENDIANNESS
++/*************************************************************************
++ * Procedure dload_reverse_checksum
++ *
++ * Parameters:
++ * data 32-bit aligned pointer to data to be checksummed
++ * siz size of the data to be checksummed in sizeof() units.
++ *
++ * Effect:
++ * Returns a checksum of the specified block, which is assumed to be bytes
++ * in big-endian order.
++ *
++ * Notes:
++ * In a big-endian host, things like the string table are stored as bytes
++ * in host order. But dllcreate always checksums in little-endian order.
++ * It is most efficient to just handle the difference a word at a time.
++ *
++ ********************************************************************** */
++u32 dload_reverse_checksum(void *data, unsigned siz)
++{
++ u32 sum, temp;
++ u32 *dp;
++ int left;
++
++ sum = 0;
++ dp = (u32 *) data;
++
++ for (left = siz; left > 0; left -= sizeof(u32)) {
++ temp = *dp++;
++ sum += temp << BITS_PER_BYTE * 3;
++ sum += temp >> BITS_PER_BYTE * 3;
++ sum += (temp >> BITS_PER_BYTE) & (BYTE_MASK << BITS_PER_BYTE);
++ sum += (temp & (BYTE_MASK << BITS_PER_BYTE)) << BITS_PER_BYTE;
++ }
++
++ return sum;
++} /* dload_reverse_checksum */
++
++#if (TARGET_AU_BITS > 8) && (TARGET_AU_BITS < 32)
++u32 dload_reverse_checksum16(void *data, unsigned siz)
++{
++ uint_fast32_t sum, temp;
++ u32 *dp;
++ int left;
++
++ sum = 0;
++ dp = (u32 *) data;
++
++ for (left = siz; left > 0; left -= sizeof(u32)) {
++ temp = *dp++;
++ sum += temp << BITS_PER_BYTE * 2;
++ sum += temp >> BITS_PER_BYTE * 2;
++ }
++
++ return sum;
++} /* dload_reverse_checksum16 */
++#endif
++#endif
++
++/*************************************************************************
++ * Procedure swap_words
++ *
++ * Parameters:
++ * data 32-bit aligned pointer to data to be swapped
++ * siz size of the data to be swapped.
++ * bitmap Bit map of how to swap each 32-bit word; 1 => 2 shorts,
++ * 0 => 1 long
++ *
++ * Effect:
++ * Swaps the specified data according to the specified map
++ *
++ *********************************************************************** */
++static void swap_words(void *data, unsigned siz, unsigned bitmap)
++{
++ register int i;
++#if TARGET_AU_BITS < 16
++ register u16 *sp;
++#endif
++ register u32 *lp;
++
++ siz /= sizeof(u16);
++
++#if TARGET_AU_BITS < 16
++ /* pass 1: do all the bytes */
++ i = siz;
++ sp = (u16 *) data;
++ do {
++ register u16 tmp;
++ tmp = *sp;
++ *sp++ = SWAP16BY8(tmp);
++ } while ((i -= 1) > 0);
++#endif
++
++#if TARGET_AU_BITS < 32
++ /* pass 2: fixup the 32-bit words */
++ i = siz >> 1;
++ lp = (u32 *) data;
++ do {
++ if ((bitmap & 1) == 0) {
++ register u32 tmp;
++ tmp = *lp;
++ *lp = SWAP32BY16(tmp);
++ }
++ lp += 1;
++ bitmap >>= 1;
++ } while ((i -= 1) > 0);
++#endif
++} /* swap_words */
++
++/*************************************************************************
++ * Procedure copy_tgt_strings
++ *
++ * Parameters:
++ * dstp Destination address. Assumed to be 32-bit aligned
++ * srcp Source address. Assumed to be 32-bit aligned
++ * charcount Number of characters to copy.
++ *
++ * Effect:
++ * Copies strings from the source (which is in usual .dof file order on
++ * the loading processor) to the destination buffer (which should be in proper
++ * target addressable unit order). Makes sure the last string in the
++ * buffer is NULL terminated (for safety).
++ * Returns the first unused destination address.
++ *********************************************************************** */
++static char *copy_tgt_strings(void *dstp, void *srcp, unsigned charcount)
++{
++ register tgt_au_t *src = (tgt_au_t *) srcp;
++ register tgt_au_t *dst = (tgt_au_t *) dstp;
++ register int cnt = charcount;
++ do {
++#if TARGET_AU_BITS <= BITS_PER_AU
++ /* byte-swapping issues may exist for strings on target */
++ *dst++ = *src++;
++#else
++ *dst++ = *src++;
++#endif
++ } while ((cnt -= (sizeof(tgt_au_t) * BITS_PER_AU / BITS_PER_BYTE)) > 0);
++ /*apply force to make sure that the string table has null terminator */
++#if (BITS_PER_AU == BITS_PER_BYTE) && (TARGET_AU_BITS == BITS_PER_BYTE)
++ dst[-1] = 0;
++#else
++ /* little endian */
++ dst[-1] &= (1 << (BITS_PER_AU - BITS_PER_BYTE)) - 1;
++#endif
++ return (char *)dst;
++} /* copy_tgt_strings */
++
++/*************************************************************************
++ * Procedure init_module_handle
++ *
++ * Parameters:
++ * none
++ *
++ * Effect:
++ * Initializes the module handle we use to enable unloading, and installs
++ * the debug information required by the target.
++ *
++ * Notes:
++ * The handle returned from dynamic_load_module needs to encapsulate all the
++ * allocations done for the module, and enable them plus the modules symbols to
++ * be deallocated.
++ *
++ *********************************************************************** */
++#ifndef _BIG_ENDIAN
++static const struct ldr_section_info dllview_info_init = { ".dllview", 0, 0,
++ (ldr_addr)-1, DBG_LIST_PAGE, DLOAD_DATA, 0
++};
++#else
++static const struct ldr_section_info dllview_info_init = { ".dllview", 0, 0,
++ (ldr_addr)-1, DLOAD_DATA, DBG_LIST_PAGE, 0
++};
++#endif
++static void init_module_handle(struct dload_state *dlthis)
++{
++ struct my_handle *hndl;
++ u16 curr_sect;
++ struct ldr_section_info *asecs;
++ struct dll_module *dbmod;
++ struct dll_sect *dbsec;
++ struct dbg_mirror_root *mlist;
++ register char *cp;
++ struct modules_header mhdr;
++ struct ldr_section_info dllview_info;
++ struct dynload_symbol *debug_mirror_sym;
++ hndl = dlthis->myhandle;
++ if (!hndl)
++ return; /* must be errors detected, so forget it */
++
++ /* Store the section count */
++ hndl->secn_count = dlthis->allocated_secn_count;
++
++ /* If a trampoline section was created, add it in */
++ if (dlthis->tramp.tramp_sect_next_addr != 0)
++ hndl->secn_count += 1;
++
++ hndl->secn_count = hndl->secn_count << 1;
++
++ hndl->secn_count = dlthis->allocated_secn_count << 1;
++#ifndef TARGET_ENDIANNESS
++ if (dlthis->big_e_target)
++ hndl->secn_count += 1; /* flag for big-endian */
++#endif
++ if (dlthis->dload_errcount)
++ return; /* abandon if errors detected */
++ /* Locate the symbol that names the header for the CCS debug list
++ of modules. If not found, we just don't generate the debug record.
++ If found, we create our modules list. We make sure to create the
++ loader_dllview_root even if there is no relocation info to record,
++ just to try to put both symbols in the same symbol table and
++ module. */
++ debug_mirror_sym = dlthis->mysym->find_matching_symbol(dlthis->mysym,
++ loader_dllview_root);
++ if (!debug_mirror_sym) {
++ struct dynload_symbol *dlmodsym;
++ struct dbg_mirror_root *mlst;
++
++ /* our root symbol is not yet present;
++ check if we have DLModules defined */
++ dlmodsym = dlthis->mysym->find_matching_symbol(dlthis->mysym,
++ LINKER_MODULES_HEADER);
++ if (!dlmodsym)
++ return; /* no DLModules list so no debug info */
++ /* if we have DLModules defined, construct our header */
++ mlst = (struct dbg_mirror_root *)
++ dlthis->mysym->dload_allocate(dlthis->mysym,
++ sizeof(struct
++ dbg_mirror_root));
++ if (!mlst) {
++ DL_ERROR(err_alloc, sizeof(struct dbg_mirror_root));
++ return;
++ }
++ mlst->hnext = NULL;
++ mlst->changes = 0;
++ mlst->refcount = 0;
++ mlst->dbthis = TDATA_TO_TADDR(dlmodsym->value);
++ /* add our root symbol */
++ debug_mirror_sym = dlthis->mysym->add_to_symbol_table
++ (dlthis->mysym, loader_dllview_root,
++ (unsigned)dlthis->myhandle);
++ if (!debug_mirror_sym) {
++ /* failed, recover memory */
++ dlthis->mysym->dload_deallocate(dlthis->mysym, mlst);
++ return;
++ }
++ debug_mirror_sym->value = (u32) mlst;
++ }
++ /* First create the DLLview record and stuff it into the buffer.
++ Then write it to the DSP. Record pertinent locations in our hndl,
++ and add it to the per-processor list of handles with debug info. */
++#ifndef DEBUG_HEADER_IN_LOADER
++ mlist = (struct dbg_mirror_root *)debug_mirror_sym->value;
++ if (!mlist)
++ return;
++#else
++ mlist = (struct dbg_mirror_root *)&debug_list_header;
++#endif
++ hndl->dm.hroot = mlist; /* set pointer to root into our handle */
++ if (!dlthis->allocated_secn_count)
++ return; /* no load addresses to be recorded */
++ /* reuse temporary symbol storage */
++ dbmod = (struct dll_module *)dlthis->local_symtab;
++ /* Create the DLLview record in the memory we retain for our handle */
++ dbmod->num_sects = dlthis->allocated_secn_count;
++ dbmod->timestamp = dlthis->verify.dv_timdat;
++ dbmod->version = INIT_VERSION;
++ dbmod->verification = VERIFICATION;
++ asecs = dlthis->ldr_sections;
++ dbsec = dbmod->sects;
++ for (curr_sect = dlthis->allocated_secn_count;
++ curr_sect > 0; curr_sect -= 1) {
++ dbsec->sect_load_adr = asecs->load_addr;
++ dbsec->sect_run_adr = asecs->run_addr;
++ dbsec += 1;
++ asecs += 1;
++ }
++
++ /* If a trampoline section was created go ahead and add its info */
++ if (dlthis->tramp.tramp_sect_next_addr != 0) {
++ dbmod->num_sects++;
++ dbsec->sect_load_adr = asecs->load_addr;
++ dbsec->sect_run_adr = asecs->run_addr;
++ dbsec++;
++ asecs++;
++ }
++
++ /* now cram in the names */
++ cp = copy_tgt_strings(dbsec, dlthis->str_head,
++ dlthis->debug_string_size);
++
++ /* If a trampoline section was created, add its name so DLLView
++ * can show the user the section info. */
++ if (dlthis->tramp.tramp_sect_next_addr != 0) {
++ cp = copy_tgt_strings(cp,
++ dlthis->tramp.final_string_table,
++ strlen(dlthis->tramp.final_string_table) +
++ 1);
++ }
++
++ /* round off the size of the debug record, and remember same */
++ hndl->dm.dbsiz = HOST_TO_TDATA_ROUND(cp - (char *)dbmod);
++ *cp = 0; /* strictly to make our test harness happy */
++ dllview_info = dllview_info_init;
++ dllview_info.size = TDATA_TO_TADDR(hndl->dm.dbsiz);
++ /* Initialize memory context to default heap */
++ dllview_info.context = 0;
++ hndl->dm.context = 0;
++ /* fill in next pointer and size */
++ if (mlist->hnext) {
++ dbmod->next_module = TADDR_TO_TDATA(mlist->hnext->dm.dbthis);
++ dbmod->next_module_size = mlist->hnext->dm.dbsiz;
++ } else {
++ dbmod->next_module_size = 0;
++ dbmod->next_module = 0;
++ }
++ /* allocate memory for on-DSP DLLview debug record */
++ if (!dlthis->myalloc)
++ return;
++ if (!dlthis->myalloc->dload_allocate(dlthis->myalloc, &dllview_info,
++ HOST_TO_TADDR(sizeof(u32)))) {
++ return;
++ }
++ /* Store load address of .dllview section */
++ hndl->dm.dbthis = dllview_info.load_addr;
++ /* Store memory context (segid) in which .dllview section
++ * was allocated */
++ hndl->dm.context = dllview_info.context;
++ mlist->refcount += 1;
++ /* swap bytes in the entire debug record, but not the string table */
++ if (TARGET_ENDIANNESS_DIFFERS(TARGET_BIG_ENDIAN)) {
++ swap_words(dbmod, (char *)dbsec - (char *)dbmod,
++ DLL_MODULE_BITMAP);
++ }
++ /* Update the DLLview list on the DSP write new record */
++ if (!dlthis->myio->writemem(dlthis->myio, dbmod,
++ dllview_info.load_addr, &dllview_info,
++ TADDR_TO_HOST(dllview_info.size))) {
++ return;
++ }
++ /* write new header */
++ mhdr.first_module_size = hndl->dm.dbsiz;
++ mhdr.first_module = TADDR_TO_TDATA(dllview_info.load_addr);
++ /* swap bytes in the module header, if needed */
++ if (TARGET_ENDIANNESS_DIFFERS(TARGET_BIG_ENDIAN)) {
++ swap_words(&mhdr, sizeof(struct modules_header) - sizeof(u16),
++ MODULES_HEADER_BITMAP);
++ }
++ dllview_info = dllview_info_init;
++ if (!dlthis->myio->writemem(dlthis->myio, &mhdr, mlist->dbthis,
++ &dllview_info,
++ sizeof(struct modules_header) -
++ sizeof(u16))) {
++ return;
++ }
++ /* Add the module handle to this processor's list
++ of handles with debug info */
++ hndl->dm.hnext = mlist->hnext;
++ if (hndl->dm.hnext)
++ hndl->dm.hnext->dm.hprev = hndl;
++ hndl->dm.hprev = (struct my_handle *)mlist;
++ mlist->hnext = hndl; /* insert after root */
++} /* init_module_handle */
++
++/*************************************************************************
++ * Procedure dynamic_unload_module
++ *
++ * Parameters:
++ * mhandle A module handle from dynamic_load_module
++ * syms Host-side symbol table and malloc/free functions
++ * alloc Target-side memory allocation
++ *
++ * Effect:
++ * The module specified by mhandle is unloaded. Unloading causes all
++ * target memory to be deallocated, all symbols defined by the module to
++ * be purged, and any host-side storage used by the dynamic loader for
++ * this module to be released.
++ *
++ * Returns:
++ * Zero for success. On error, the number of errors detected is returned.
++ * Individual errors are reported using syms->error_report().
++ *********************************************************************** */
++int dynamic_unload_module(void *mhandle,
++ struct dynamic_loader_sym *syms,
++ struct dynamic_loader_allocate *alloc,
++ struct dynamic_loader_initialize *init)
++{
++ s16 curr_sect;
++ struct ldr_section_info *asecs;
++ struct my_handle *hndl;
++ struct dbg_mirror_root *root;
++ unsigned errcount = 0;
++ struct ldr_section_info dllview_info = dllview_info_init;
++ struct modules_header mhdr;
++
++ hndl = (struct my_handle *)mhandle;
++ if (!hndl)
++ return 0; /* if handle is null, nothing to do */
++ /* Clear out the module symbols
++ * Note that if this is the module that defined MODULES_HEADER
++ (the head of the target debug list)
++ * then this operation will blow away that symbol.
++ It will therefore be impossible for subsequent
++ * operations to add entries to this un-referenceable list. */
++ if (!syms)
++ return 1;
++ syms->purge_symbol_table(syms, (unsigned)hndl);
++ /* Deallocate target memory for sections
++ * NOTE: The trampoline section, if created, gets deleted here, too */
++
++ asecs = hndl->secns;
++ if (alloc)
++ for (curr_sect = (hndl->secn_count >> 1); curr_sect > 0;
++ curr_sect -= 1) {
++ asecs->name = NULL;
++ alloc->dload_deallocate(alloc, asecs++);
++ }
++ root = hndl->dm.hroot;
++ if (!root) {
++ /* there is a debug list containing this module */
++ goto func_end;
++ }
++ if (!hndl->dm.dbthis) { /* target-side dllview record exists */
++ goto loop_end;
++ }
++ /* Retrieve memory context in which .dllview was allocated */
++ dllview_info.context = hndl->dm.context;
++ if (hndl->dm.hprev == hndl)
++ goto exitunltgt;
++
++ /* target-side dllview record is in list */
++ /* dequeue this record from our GPP-side mirror list */
++ hndl->dm.hprev->dm.hnext = hndl->dm.hnext;
++ if (hndl->dm.hnext)
++ hndl->dm.hnext->dm.hprev = hndl->dm.hprev;
++ /* Update next_module of previous entry in target list
++ * We are using mhdr here as a surrogate for either a
++ struct modules_header or a dll_module */
++ if (hndl->dm.hnext) {
++ mhdr.first_module = TADDR_TO_TDATA(hndl->dm.hnext->dm.dbthis);
++ mhdr.first_module_size = hndl->dm.hnext->dm.dbsiz;
++ } else {
++ mhdr.first_module = 0;
++ mhdr.first_module_size = 0;
++ }
++ if (!init)
++ goto exitunltgt;
++
++ if (!init->connect(init)) {
++ dload_syms_error(syms, iconnect);
++ errcount += 1;
++ goto exitunltgt;
++ }
++ /* swap bytes in the module header, if needed */
++ if (TARGET_ENDIANNESS_DIFFERS(hndl->secn_count & 0x1)) {
++ swap_words(&mhdr, sizeof(struct modules_header) - sizeof(u16),
++ MODULES_HEADER_BITMAP);
++ }
++ if (!init->writemem(init, &mhdr, hndl->dm.hprev->dm.dbthis,
++ &dllview_info, sizeof(struct modules_header) -
++ sizeof(mhdr.update_flag))) {
++ dload_syms_error(syms, dlvwrite);
++ errcount += 1;
++ }
++ /* update change counter */
++ root->changes += 1;
++ if (!init->writemem(init, &(root->changes),
++ root->dbthis + HOST_TO_TADDR
++ (sizeof(mhdr.first_module) +
++ sizeof(mhdr.first_module_size)),
++ &dllview_info, sizeof(mhdr.update_flag))) {
++ dload_syms_error(syms, dlvwrite);
++ errcount += 1;
++ }
++ init->release(init);
++exitunltgt:
++ /* release target storage */
++ dllview_info.size = TDATA_TO_TADDR(hndl->dm.dbsiz);
++ dllview_info.load_addr = hndl->dm.dbthis;
++ if (alloc)
++ alloc->dload_deallocate(alloc, &dllview_info);
++ root->refcount -= 1;
++ /* target-side dllview record exists */
++loop_end:
++#ifndef DEBUG_HEADER_IN_LOADER
++ if (root->refcount <= 0) {
++ /* if all references gone, blow off the header */
++ /* our root symbol may be gone due to the Purge above,
++ but if not, do not destroy the root */
++ if (syms->find_matching_symbol
++ (syms, loader_dllview_root) == NULL)
++ syms->dload_deallocate(syms, root);
++ }
++#endif
++func_end:
++ /* there is a debug list containing this module */
++ syms->dload_deallocate(syms, mhandle); /* release our storage */
++ return errcount;
++} /* dynamic_unload_module */
++
++#if BITS_PER_AU > BITS_PER_BYTE
++/*************************************************************************
++ * Procedure unpack_name
++ *
++ * Parameters:
++ * soffset Byte offset into the string table
++ *
++ * Effect:
++ * Returns a pointer to the string specified by the offset supplied, or
++ * NULL for error.
++ *
++ *********************************************************************** */
++static char *unpack_name(struct dload_state *dlthis, u32 soffset)
++{
++ u8 tmp, *src;
++ char *dst;
++
++ if (soffset >= dlthis->dfile_hdr.df_strtab_size) {
++ dload_error(dlthis, "Bad string table offset " FMT_UI32,
++ soffset);
++ return NULL;
++ }
++ src = (uint_least8_t *) dlthis->str_head +
++ (soffset >> (LOG_BITS_PER_AU - LOG_BITS_PER_BYTE));
++ dst = dlthis->str_temp;
++ if (soffset & 1)
++ *dst++ = *src++; /* only 1 character in first word */
++ do {
++ tmp = *src++;
++ *dst = (tmp >> BITS_PER_BYTE);
++ if (!(*dst++))
++ break;
++ } while ((*dst++ = tmp & BYTE_MASK));
++ dlthis->temp_len = dst - dlthis->str_temp;
++ /* squirrel away length including terminating null */
++ return dlthis->str_temp;
++} /* unpack_name */
++#endif
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/dynload/dload_internal.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/dynload/dload_internal.h 2010-08-18 11:24:23.166053090 +0300
+@@ -0,0 +1,344 @@
++/*
++ * dload_internal.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef _DLOAD_INTERNAL_
++#define _DLOAD_INTERNAL_
++
++#include <linux/types.h>
++
++/*
++ * Internal state definitions for the dynamic loader
++ */
++
++/* type used for relocation intermediate results */
++typedef s32 rvalue;
++
++/* unsigned version of same; must have at least as many bits */
++typedef u32 urvalue;
++
++/*
++ * Dynamic loader configuration constants
++ */
++/* error issued if input has more sections than this limit */
++#define REASONABLE_SECTION_LIMIT 100
++
++/* (Addressable unit) value used to clear BSS section */
++#define DLOAD_FILL_BSS 0
++
++/*
++ * Reorder maps explained (?)
++ *
++ * The doff file format defines a 32-bit pattern used to determine the
++ * byte order of an image being read. That value is
++ * BYTE_RESHUFFLE_VALUE == 0x00010203
++ * For purposes of the reorder routine, we would rather have the all-is-OK
++ * for 32-bits pattern be 0x03020100. This first macro makes the
++ * translation from doff file header value to MAP value: */
++#define REORDER_MAP(rawmap) ((rawmap) ^ 0x3030303)
++/* This translation is made in dload_headers. Thereafter, the all-is-OK
++ * value for the maps stored in dlthis is REORDER_MAP(BYTE_RESHUFFLE_VALUE).
++ * But sadly, not all bits of the doff file are 32-bit integers.
++ * The notable exceptions are strings and image bits.
++ * Strings obey host byte order: */
++#if defined(_BIG_ENDIAN)
++#define HOST_BYTE_ORDER(cookedmap) ((cookedmap) ^ 0x3030303)
++#else
++#define HOST_BYTE_ORDER(cookedmap) (cookedmap)
++#endif
++/* Target bits consist of target AUs (could be bytes, or 16-bits,
++ * or 32-bits) stored as an array in host order. A target order
++ * map is defined by: */
++#if !defined(_BIG_ENDIAN) || TARGET_AU_BITS > 16
++#define TARGET_ORDER(cookedmap) (cookedmap)
++#elif TARGET_AU_BITS > 8
++#define TARGET_ORDER(cookedmap) ((cookedmap) ^ 0x2020202)
++#else
++#define TARGET_ORDER(cookedmap) ((cookedmap) ^ 0x3030303)
++#endif
++
++/* forward declaration for handle returned by dynamic loader */
++struct my_handle;
++
++/*
++ * a list of module handles, which mirrors the debug list on the target
++ */
++struct dbg_mirror_root {
++ /* must be same as dbg_mirror_list; __DLModules address on target */
++ u32 dbthis;
++ struct my_handle *hnext; /* must be same as dbg_mirror_list */
++ u16 changes; /* change counter */
++ u16 refcount; /* number of modules referencing this root */
++};
++
++struct dbg_mirror_list {
++ u32 dbthis;
++ struct my_handle *hnext, *hprev;
++ struct dbg_mirror_root *hroot;
++ u16 dbsiz;
++ u32 context; /* Save context for .dllview memory allocation */
++};
++
++#define VARIABLE_SIZE 1
++/*
++ * the structure we actually return as an opaque module handle
++ */
++struct my_handle {
++ struct dbg_mirror_list dm; /* !!! must be first !!! */
++ /* sections following << 1, LSB is set for big-endian target */
++ u16 secn_count;
++ struct ldr_section_info secns[VARIABLE_SIZE];
++};
++#define MY_HANDLE_SIZE (sizeof(struct my_handle) -\
++ sizeof(struct ldr_section_info))
++/* real size of my_handle */
++
++/*
++ * reduced symbol structure used for symbols during relocation
++ */
++struct local_symbol {
++ s32 value; /* Relocated symbol value */
++ s32 delta; /* Original value in input file */
++ s16 secnn; /* section number */
++ s16 sclass; /* symbol class */
++};
++
++/*
++ * Trampoline data structures
++ */
++#define TRAMP_NO_GEN_AVAIL 65535
++#define TRAMP_SYM_PREFIX "__$dbTR__"
++#define TRAMP_SECT_NAME ".dbTR"
++/* MUST MATCH THE LENGTH ABOVE!! */
++#define TRAMP_SYM_PREFIX_LEN 9
++/* Includes NULL termination */
++#define TRAMP_SYM_HEX_ASCII_LEN 9
++
++#define GET_CONTAINER(ptr, type, field) ((type *)((unsigned long)ptr -\
++ (unsigned long)(&((type *)0)->field)))
++#ifndef FIELD_OFFSET
++#define FIELD_OFFSET(type, field) ((unsigned long)(&((type *)0)->field))
++#endif
++
++/*
++ The trampoline code for the target is located in a table called
++ "tramp_gen_info" with is indexed by looking up the index in the table
++ "tramp_map". The tramp_map index is acquired using the target
++ HASH_FUNC on the relocation type that caused the trampoline. Each
++ trampoline code table entry MUST follow this format:
++
++ |----------------------------------------------|
++ | tramp_gen_code_hdr |
++ |----------------------------------------------|
++ | Trampoline image code |
++ | (the raw instruction code for the target) |
++ |----------------------------------------------|
++ | Relocation entries for the image code |
++ |----------------------------------------------|
++
++ This is very similar to how image data is laid out in the DOFF file
++ itself.
++ */
++struct tramp_gen_code_hdr {
++ u32 tramp_code_size; /* in BYTES */
++ u32 num_relos;
++ u32 relo_offset; /* in BYTES */
++};
++
++struct tramp_img_pkt {
++ struct tramp_img_pkt *next; /* MUST BE FIRST */
++ u32 base;
++ struct tramp_gen_code_hdr hdr;
++ u8 payload[VARIABLE_SIZE];
++};
++
++struct tramp_img_dup_relo {
++ struct tramp_img_dup_relo *next;
++ struct reloc_record_t relo;
++};
++
++struct tramp_img_dup_pkt {
++ struct tramp_img_dup_pkt *next; /* MUST BE FIRST */
++ s16 secnn;
++ u32 offset;
++ struct image_packet_t img_pkt;
++ struct tramp_img_dup_relo *relo_chain;
++
++ /* PAYLOAD OF IMG PKT FOLLOWS */
++};
++
++struct tramp_sym {
++ struct tramp_sym *next; /* MUST BE FIRST */
++ u32 index;
++ u32 str_index;
++ struct local_symbol sym_info;
++};
++
++struct tramp_string {
++ struct tramp_string *next; /* MUST BE FIRST */
++ u32 index;
++ char str[VARIABLE_SIZE]; /* NULL terminated */
++};
++
++struct tramp_info {
++ u32 tramp_sect_next_addr;
++ struct ldr_section_info sect_info;
++
++ struct tramp_sym *symbol_head;
++ struct tramp_sym *symbol_tail;
++ u32 tramp_sym_next_index;
++ struct local_symbol *final_sym_table;
++
++ struct tramp_string *string_head;
++ struct tramp_string *string_tail;
++ u32 tramp_string_next_index;
++ u32 tramp_string_size;
++ char *final_string_table;
++
++ struct tramp_img_pkt *tramp_pkts;
++ struct tramp_img_dup_pkt *dup_pkts;
++};
++
++/*
++ * States of the .cinit state machine
++ */
++enum cinit_mode {
++ CI_COUNT = 0, /* expecting a count */
++ CI_ADDRESS, /* expecting an address */
++#if CINIT_ALIGN < CINIT_ADDRESS /* handle case of partial address field */
++ CI_PARTADDRESS, /* have only part of the address */
++#endif
++ CI_COPY, /* in the middle of copying data */
++ CI_DONE /* end of .cinit table */
++};
++
++/*
++ * The internal state of the dynamic loader, which is passed around as
++ * an object
++ */
++struct dload_state {
++ struct dynamic_loader_stream *strm; /* The module input stream */
++ struct dynamic_loader_sym *mysym; /* Symbols for this session */
++ /* target memory allocator */
++ struct dynamic_loader_allocate *myalloc;
++ struct dynamic_loader_initialize *myio; /* target memory initializer */
++ unsigned myoptions; /* Options parameter dynamic_load_module */
++
++ char *str_head; /* Pointer to string table */
++#if BITS_PER_AU > BITS_PER_BYTE
++ char *str_temp; /* Pointer to temporary buffer for strings */
++ /* big enough to hold longest string */
++ unsigned temp_len; /* length of last temporary string */
++ char *xstrings; /* Pointer to buffer for expanded */
++ /* strings for sec names */
++#endif
++ /* Total size of strings for DLLView section names */
++ unsigned debug_string_size;
++ /* Pointer to parallel section info for allocated sections only */
++ struct doff_scnhdr_t *sect_hdrs; /* Pointer to section table */
++ struct ldr_section_info *ldr_sections;
++#if TMS32060
++ /* The address of the start of the .bss section */
++ ldr_addr bss_run_base;
++#endif
++ struct local_symbol *local_symtab; /* Relocation symbol table */
++
++ /* pointer to DL section info for the section being relocated */
++ struct ldr_section_info *image_secn;
++ /* change in run address for current section during relocation */
++ ldr_addr delta_runaddr;
++ ldr_addr image_offset; /* offset of current packet in section */
++ enum cinit_mode cinit_state; /* current state of cload_cinit() */
++ int cinit_count; /* the current count */
++ ldr_addr cinit_addr; /* the current address */
++ s16 cinit_page; /* the current page */
++ /* Handle to be returned by dynamic_load_module */
++ struct my_handle *myhandle;
++ unsigned dload_errcount; /* Total # of errors reported so far */
++ /* Number of target sections that require allocation and relocation */
++ unsigned allocated_secn_count;
++#ifndef TARGET_ENDIANNESS
++ int big_e_target; /* Target data in big-endian format */
++#endif
++ /* map for reordering bytes, 0 if not needed */
++ u32 reorder_map;
++ struct doff_filehdr_t dfile_hdr; /* DOFF file header structure */
++ struct doff_verify_rec_t verify; /* Verify record */
++
++ struct tramp_info tramp; /* Trampoline data, if needed */
++
++ int relstkidx; /* index into relocation value stack */
++ /* relocation value stack used in relexp.c */
++ rvalue relstk[STATIC_EXPR_STK_SIZE];
++
++};
++
++#ifdef TARGET_ENDIANNESS
++#define TARGET_BIG_ENDIAN TARGET_ENDIANNESS
++#else
++#define TARGET_BIG_ENDIAN (dlthis->big_e_target)
++#endif
++
++/*
++ * Exports from cload.c to rest of the world
++ */
++extern void dload_error(struct dload_state *dlthis, const char *errtxt, ...);
++extern void dload_syms_error(struct dynamic_loader_sym *syms,
++ const char *errtxt, ...);
++extern void dload_headers(struct dload_state *dlthis);
++extern void dload_strings(struct dload_state *dlthis, bool sec_names_only);
++extern void dload_sections(struct dload_state *dlthis);
++extern void dload_reorder(void *data, int dsiz, u32 map);
++extern u32 dload_checksum(void *data, unsigned siz);
++
++#if HOST_ENDIANNESS
++extern uint32_t dload_reverse_checksum(void *data, unsigned siz);
++#if (TARGET_AU_BITS > 8) && (TARGET_AU_BITS < 32)
++extern uint32_t dload_reverse_checksum16(void *data, unsigned siz);
++#endif
++#endif
++
++/*
++ * exported by reloc.c
++ */
++extern void dload_relocate(struct dload_state *dlthis, tgt_au_t * data,
++ struct reloc_record_t *rp, bool * tramps_generated,
++ bool second_pass);
++
++extern rvalue dload_unpack(struct dload_state *dlthis, tgt_au_t * data,
++ int fieldsz, int offset, unsigned sgn);
++
++extern int dload_repack(struct dload_state *dlthis, rvalue val, tgt_au_t * data,
++ int fieldsz, int offset, unsigned sgn);
++
++/*
++ * exported by tramp.c
++ */
++extern bool dload_tramp_avail(struct dload_state *dlthis,
++ struct reloc_record_t *rp);
++
++int dload_tramp_generate(struct dload_state *dlthis, s16 secnn,
++ u32 image_offset, struct image_packet_t *ipacket,
++ struct reloc_record_t *rp);
++
++extern int dload_tramp_pkt_udpate(struct dload_state *dlthis,
++ s16 secnn, u32 image_offset,
++ struct image_packet_t *ipacket);
++
++extern int dload_tramp_finalize(struct dload_state *dlthis);
++
++extern void dload_tramp_cleanup(struct dload_state *dlthis);
++
++#endif /* _DLOAD_INTERNAL_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/dynload/doff.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/dynload/doff.h 2010-08-18 11:24:23.166053090 +0300
+@@ -0,0 +1,354 @@
++/*
++ * doff.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Structures & definitions used for dynamically loaded modules file format.
++ * This format is a reformatted version of COFF. It optimizes the layout for
++ * the dynamic loader.
++ *
++ * .dof files, when viewed as a sequence of 32-bit integers, look the same
++ * on big-endian and little-endian machines.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef _DOFF_H
++#define _DOFF_H
++
++
++#define BYTE_RESHUFFLE_VALUE 0x00010203
++
++/* DOFF file header containing fields categorizing the remainder of the file */
++struct doff_filehdr_t {
++
++ /* string table size, including filename, in bytes */
++ u32 df_strtab_size;
++
++ /* entry point if one exists */
++ u32 df_entrypt;
++
++ /* identifies byte ordering of file;
++ * always set to BYTE_RESHUFFLE_VALUE */
++ u32 df_byte_reshuffle;
++
++ /* Size of the string table up to and including the last section name */
++ /* Size includes the name of the COFF file also */
++ u32 df_scn_name_size;
++
++#ifndef _BIG_ENDIAN
++ /* number of symbols */
++ u16 df_no_syms;
++
++ /* length in bytes of the longest string, including terminating NULL */
++ /* excludes the name of the file */
++ u16 df_max_str_len;
++
++ /* total number of sections including no-load ones */
++ u16 df_no_scns;
++
++ /* number of sections containing target code allocated or downloaded */
++ u16 df_target_scns;
++
++ /* unique id for dll file format & version */
++ u16 df_doff_version;
++
++ /* identifies ISA */
++ u16 df_target_id;
++
++ /* useful file flags */
++ u16 df_flags;
++
++ /* section reference for entry point, N_UNDEF for none, */
++ /* N_ABS for absolute address */
++ s16 df_entry_secn;
++#else
++ /* length of the longest string, including terminating NULL */
++ u16 df_max_str_len;
++
++ /* number of symbols */
++ u16 df_no_syms;
++
++ /* number of sections containing target code allocated or downloaded */
++ u16 df_target_scns;
++
++ /* total number of sections including no-load ones */
++ u16 df_no_scns;
++
++ /* identifies ISA */
++ u16 df_target_id;
++
++ /* unique id for dll file format & version */
++ u16 df_doff_version;
++
++ /* section reference for entry point, N_UNDEF for none, */
++ /* N_ABS for absolute address */
++ s16 df_entry_secn;
++
++ /* useful file flags */
++ u16 df_flags;
++#endif
++ /* checksum for file header record */
++ u32 df_checksum;
++
++};
++
++/* flags in the df_flags field */
++#define DF_LITTLE 0x100
++#define DF_BIG 0x200
++#define DF_BYTE_ORDER (DF_LITTLE | DF_BIG)
++
++/* Supported processors */
++#define TMS470_ID 0x97
++#define LEAD_ID 0x98
++#define TMS32060_ID 0x99
++#define LEAD3_ID 0x9c
++
++/* Primary processor for loading */
++#if TMS32060
++#define TARGET_ID TMS32060_ID
++#endif
++
++/* Verification record containing values used to test integrity of the bits */
++struct doff_verify_rec_t {
++
++ /* time and date stamp */
++ u32 dv_timdat;
++
++ /* checksum for all section records */
++ u32 dv_scn_rec_checksum;
++
++ /* checksum for string table */
++ u32 dv_str_tab_checksum;
++
++ /* checksum for symbol table */
++ u32 dv_sym_tab_checksum;
++
++ /* checksum for verification record */
++ u32 dv_verify_rec_checksum;
++
++};
++
++/* String table is an array of null-terminated strings. The first entry is
++ * the filename, which is added by DLLcreate. No new structure definitions
++ * are required.
++ */
++
++/* Section Records including information on the corresponding image packets */
++/*
++ * !!WARNING!!
++ *
++ * This structure is expected to match in form ldr_section_info in
++ * dynamic_loader.h
++ */
++
++struct doff_scnhdr_t {
++
++ s32 ds_offset; /* offset into string table of name */
++ s32 ds_paddr; /* RUN address, in target AU */
++ s32 ds_vaddr; /* LOAD address, in target AU */
++ s32 ds_size; /* section size, in target AU */
++#ifndef _BIG_ENDIAN
++ u16 ds_page; /* memory page id */
++ u16 ds_flags; /* section flags */
++#else
++ u16 ds_flags; /* section flags */
++ u16 ds_page; /* memory page id */
++#endif
++ u32 ds_first_pkt_offset;
++ /* Absolute byte offset into the file */
++ /* where the first image record resides */
++
++ s32 ds_nipacks; /* number of image packets */
++
++};
++
++/* Symbol table entry */
++struct doff_syment_t {
++
++ s32 dn_offset; /* offset into string table of name */
++ s32 dn_value; /* value of symbol */
++#ifndef _BIG_ENDIAN
++ s16 dn_scnum; /* section number */
++ s16 dn_sclass; /* storage class */
++#else
++ s16 dn_sclass; /* storage class */
++ s16 dn_scnum; /* section number, 1-based */
++#endif
++
++};
++
++/* special values for dn_scnum */
++#define DN_UNDEF 0 /* undefined symbol */
++#define DN_ABS (-1) /* value of symbol is absolute */
++/* special values for dn_sclass */
++#define DN_EXT 2
++#define DN_STATLAB 20
++#define DN_EXTLAB 21
++
++/* Default value of image bits in packet */
++/* Configurable by user on the command line */
++#define IMAGE_PACKET_SIZE 1024
++
++/* An image packet contains a chunk of data from a section along with */
++/* information necessary for its processing. */
++struct image_packet_t {
++
++ s32 num_relocs; /* number of relocations for */
++ /* this packet */
++
++ s32 packet_size; /* number of bytes in array */
++ /* "bits" occupied by */
++ /* valid data. Could be */
++ /* < IMAGE_PACKET_SIZE to */
++ /* prevent splitting a */
++ /* relocation across packets. */
++ /* Last packet of a section */
++ /* will most likely contain */
++ /* < IMAGE_PACKET_SIZE bytes */
++ /* of valid data */
++
++ s32 img_chksum; /* Checksum for image packet */
++ /* and the corresponding */
++ /* relocation records */
++
++ u8 *img_data; /* Actual data in section */
++
++};
++
++/* The relocation structure definition matches the COFF version. Offsets */
++/* however are relative to the image packet base not the section base. */
++struct reloc_record_t {
++
++ s32 vaddr;
++
++ /* expressed in target AUs */
++
++ union {
++ struct {
++#ifndef _BIG_ENDIAN
++ u8 _offset; /* bit offset of rel fld */
++ u8 _fieldsz; /* size of rel fld */
++ u8 _wordsz; /* # bytes containing rel fld */
++ u8 _dum1;
++ u16 _dum2;
++ u16 _type;
++#else
++ unsigned _dum1:8;
++ unsigned _wordsz:8; /* # bytes containing rel fld */
++ unsigned _fieldsz:8; /* size of rel fld */
++ unsigned _offset:8; /* bit offset of rel fld */
++ u16 _type;
++ u16 _dum2;
++#endif
++ } _r_field;
++
++ struct {
++ u32 _spc; /* image packet relative PC */
++#ifndef _BIG_ENDIAN
++ u16 _dum;
++ u16 _type; /* relocation type */
++#else
++ u16 _type; /* relocation type */
++ u16 _dum;
++#endif
++ } _r_spc;
++
++ struct {
++ u32 _uval; /* constant value */
++#ifndef _BIG_ENDIAN
++ u16 _dum;
++ u16 _type; /* relocation type */
++#else
++ u16 _type; /* relocation type */
++ u16 _dum;
++#endif
++ } _r_uval;
++
++ struct {
++ s32 _symndx; /* 32-bit sym tbl index */
++#ifndef _BIG_ENDIAN
++ u16 _disp; /* extra addr encode data */
++ u16 _type; /* relocation type */
++#else
++ u16 _type; /* relocation type */
++ u16 _disp; /* extra addr encode data */
++#endif
++ } _r_sym;
++ } _u_reloc;
++
++};
++
++/* abbreviations for convenience */
++#ifndef TYPE
++#define TYPE _u_reloc._r_sym._type
++#define UVAL _u_reloc._r_uval._uval
++#define SYMNDX _u_reloc._r_sym._symndx
++#define OFFSET _u_reloc._r_field._offset
++#define FIELDSZ _u_reloc._r_field._fieldsz
++#define WORDSZ _u_reloc._r_field._wordsz
++#define R_DISP _u_reloc._r_sym._disp
++#endif
++
++/**************************************************************************** */
++/* */
++/* Important DOFF macros used for file processing */
++/* */
++/**************************************************************************** */
++
++/* DOFF Versions */
++#define DOFF0 0
++
++/* Return the address/size >= to addr that is at a 32-bit boundary */
++/* This assumes that a byte is 8 bits */
++#define DOFF_ALIGN(addr) (((addr) + 3) & ~3UL)
++
++/**************************************************************************** */
++/* */
++/* The DOFF section header flags field is laid out as follows: */
++/* */
++/* Bits 0-3 : Section Type */
++/* Bit 4 : Set when section requires target memory to be allocated by DL */
++/* Bit 5 : Set when section requires downloading */
++/* Bits 8-11: Alignment, same as COFF */
++/* */
++/**************************************************************************** */
++
++/* Enum for DOFF section types (bits 0-3 of flag): See dynamic_loader.h */
++#define DS_SECTION_TYPE_MASK 0xF
++/* DS_ALLOCATE indicates whether a section needs space on the target */
++#define DS_ALLOCATE_MASK 0x10
++/* DS_DOWNLOAD indicates that the loader needs to copy bits */
++#define DS_DOWNLOAD_MASK 0x20
++/* Section alignment requirement in AUs */
++#define DS_ALIGNMENT_SHIFT 8
++
++static inline bool dload_check_type(struct doff_scnhdr_t *sptr, u32 flag)
++{
++ return (sptr->ds_flags & DS_SECTION_TYPE_MASK) == flag;
++}
++static inline bool ds_needs_allocation(struct doff_scnhdr_t *sptr)
++{
++ return sptr->ds_flags & DS_ALLOCATE_MASK;
++}
++
++static inline bool ds_needs_download(struct doff_scnhdr_t *sptr)
++{
++ return sptr->ds_flags & DS_DOWNLOAD_MASK;
++}
++
++static inline int ds_alignment(u16 ds_flags)
++{
++ return 1 << ((ds_flags >> DS_ALIGNMENT_SHIFT) & DS_SECTION_TYPE_MASK);
++}
++
++
++#endif /* _DOFF_H */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/dynload/getsection.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/dynload/getsection.c 2010-08-18 11:24:23.166053090 +0300
+@@ -0,0 +1,407 @@
++/*
++ * getsection.c
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#include <dspbridge/getsection.h>
++#include "header.h"
++
++/*
++ * Error strings
++ */
++static const char readstrm[] = { "Error reading %s from input stream" };
++static const char seek[] = { "Set file position to %d failed" };
++static const char isiz[] = { "Bad image packet size %d" };
++static const char err_checksum[] = { "Checksum failed on %s" };
++
++static const char err_reloc[] = { "dload_get_section unable to read"
++ "sections containing relocation entries"
++};
++
++#if BITS_PER_AU > BITS_PER_BYTE
++static const char err_alloc[] = { "Syms->dload_allocate( %d ) failed" };
++static const char stbl[] = { "Bad string table offset " FMT_UI32 };
++#endif
++
++/************************************************************** */
++/********************* SUPPORT FUNCTIONS ********************** */
++/************************************************************** */
++
++#if BITS_PER_AU > BITS_PER_BYTE
++/**************************************************************************
++ * Procedure unpack_sec_name
++ *
++ * Parameters:
++ * dlthis Handle from dload_module_open for this module
++ * soffset Byte offset into the string table
++ * dst Place to store the expanded string
++ *
++ * Effect:
++ * Stores a string from the string table into the destination, expanding
++ * it in the process. Returns a pointer just past the end of the stored
++ * string on success, or NULL on failure.
++ *
++ ************************************************************************ */
++static char *unpack_sec_name(struct dload_state *dlthis, u32 soffset, char *dst)
++{
++ u8 tmp, *src;
++
++ if (soffset >= dlthis->dfile_hdr.df_scn_name_size) {
++ dload_error(dlthis, stbl, soffset);
++ return NULL;
++ }
++ src = (u8 *) dlthis->str_head +
++ (soffset >> (LOG_BITS_PER_AU - LOG_BITS_PER_BYTE));
++ if (soffset & 1)
++ *dst++ = *src++; /* only 1 character in first word */
++ do {
++ tmp = *src++;
++ *dst = (tmp >> BITS_PER_BYTE)
++ if (!(*dst++))
++ break;
++ } while ((*dst++ = tmp & BYTE_MASK));
++
++ return dst;
++}
++
++/**************************************************************************
++ * Procedure expand_sec_names
++ *
++ * Parameters:
++ * dlthis Handle from dload_module_open for this module
++ *
++ * Effect:
++ * Allocates a buffer, unpacks and copies strings from string table into it.
++ * Stores a pointer to the buffer into a state variable.
++ ************************************************************************* */
++static void expand_sec_names(struct dload_state *dlthis)
++{
++ char *xstrings, *curr, *next;
++ u32 xsize;
++ u16 sec;
++ struct ldr_section_info *shp;
++ /* assume worst-case size requirement */
++ xsize = dlthis->dfile_hdr.df_max_str_len * dlthis->dfile_hdr.df_no_scns;
++ xstrings = (char *)dlthis->mysym->dload_allocate(dlthis->mysym, xsize);
++ if (xstrings == NULL) {
++ dload_error(dlthis, err_alloc, xsize);
++ return;
++ }
++ dlthis->xstrings = xstrings;
++ /* For each sec, copy and expand its name */
++ curr = xstrings;
++ for (sec = 0; sec < dlthis->dfile_hdr.df_no_scns; sec++) {
++ shp = (struct ldr_section_info *)&dlthis->sect_hdrs[sec];
++ next = unpack_sec_name(dlthis, *(u32 *) &shp->name, curr);
++ if (next == NULL)
++ break; /* error */
++ shp->name = curr;
++ curr = next;
++ }
++}
++
++#endif
++
++/************************************************************** */
++/********************* EXPORTED FUNCTIONS ********************* */
++/************************************************************** */
++
++/**************************************************************************
++ * Procedure dload_module_open
++ *
++ * Parameters:
++ * module The input stream that supplies the module image
++ * syms Host-side malloc/free and error reporting functions.
++ * Other methods are unused.
++ *
++ * Effect:
++ * Reads header information from a dynamic loader module using the
++ specified
++ * stream object, and returns a handle for the module information. This
++ * handle may be used in subsequent query calls to obtain information
++ * contained in the module.
++ *
++ * Returns:
++ * NULL if an error is encountered, otherwise a module handle for use
++ * in subsequent operations.
++ ************************************************************************* */
++void *dload_module_open(struct dynamic_loader_stream *module,
++ struct dynamic_loader_sym *syms)
++{
++ struct dload_state *dlthis; /* internal state for this call */
++ unsigned *dp, sz;
++ u32 sec_start;
++#if BITS_PER_AU <= BITS_PER_BYTE
++ u16 sec;
++#endif
++
++ /* Check that mandatory arguments are present */
++ if (!module || !syms) {
++ if (syms != NULL)
++ dload_syms_error(syms, "Required parameter is NULL");
++
++ return NULL;
++ }
++
++ dlthis = (struct dload_state *)
++ syms->dload_allocate(syms, sizeof(struct dload_state));
++ if (!dlthis) {
++ /* not enough storage */
++ dload_syms_error(syms, "Can't allocate module info");
++ return NULL;
++ }
++
++ /* clear our internal state */
++ dp = (unsigned *)dlthis;
++ for (sz = sizeof(struct dload_state) / sizeof(unsigned);
++ sz > 0; sz -= 1)
++ *dp++ = 0;
++
++ dlthis->strm = module;
++ dlthis->mysym = syms;
++
++ /* read in the doff image and store in our state variable */
++ dload_headers(dlthis);
++
++ if (!dlthis->dload_errcount)
++ dload_strings(dlthis, true);
++
++ /* skip ahead past the unread portion of the string table */
++ sec_start = sizeof(struct doff_filehdr_t) +
++ sizeof(struct doff_verify_rec_t) +
++ BYTE_TO_HOST(DOFF_ALIGN(dlthis->dfile_hdr.df_strtab_size));
++
++ if (dlthis->strm->set_file_posn(dlthis->strm, sec_start) != 0) {
++ dload_error(dlthis, seek, sec_start);
++ return NULL;
++ }
++
++ if (!dlthis->dload_errcount)
++ dload_sections(dlthis);
++
++ if (dlthis->dload_errcount) {
++ dload_module_close(dlthis); /* errors, blow off our state */
++ dlthis = NULL;
++ return NULL;
++ }
++#if BITS_PER_AU > BITS_PER_BYTE
++ /* Expand all section names from the string table into the */
++ /* state variable, and convert section names from a relative */
++ /* string table offset to a pointers to the expanded string. */
++ expand_sec_names(dlthis);
++#else
++ /* Convert section names from a relative string table offset */
++ /* to a pointer into the string table. */
++ for (sec = 0; sec < dlthis->dfile_hdr.df_no_scns; sec++) {
++ struct ldr_section_info *shp =
++ (struct ldr_section_info *)&dlthis->sect_hdrs[sec];
++ shp->name = dlthis->str_head + *(u32 *) &shp->name;
++ }
++#endif
++
++ return dlthis;
++}
++
++/***************************************************************************
++ * Procedure dload_get_section_info
++ *
++ * Parameters:
++ * minfo Handle from dload_module_open for this module
++ * section_name Pointer to the string name of the section desired
++ * section_info Address of a section info structure pointer to be
++ * initialized
++ *
++ * Effect:
++ * Finds the specified section in the module information, and initializes
++ * the provided struct ldr_section_info pointer.
++ *
++ * Returns:
++ * true for success, false for section not found
++ ************************************************************************* */
++int dload_get_section_info(void *minfo, const char *section_name,
++ const struct ldr_section_info **const section_info)
++{
++ struct dload_state *dlthis;
++ struct ldr_section_info *shp;
++ u16 sec;
++
++ dlthis = (struct dload_state *)minfo;
++ if (!dlthis)
++ return false;
++
++ for (sec = 0; sec < dlthis->dfile_hdr.df_no_scns; sec++) {
++ shp = (struct ldr_section_info *)&dlthis->sect_hdrs[sec];
++ if (strcmp(section_name, shp->name) == 0) {
++ *section_info = shp;
++ return true;
++ }
++ }
++
++ return false;
++}
++
++#define IPH_SIZE (sizeof(struct image_packet_t) - sizeof(u32))
++
++/**************************************************************************
++ * Procedure dload_get_section
++ *
++ * Parameters:
++ * minfo Handle from dload_module_open for this module
++ * section_info Pointer to a section info structure for the desired
++ * section
++ * section_data Buffer to contain the section initialized data
++ *
++ * Effect:
++ * Copies the initialized data for the specified section into the
++ * supplied buffer.
++ *
++ * Returns:
++ * true for success, false for section not found
++ ************************************************************************* */
++int dload_get_section(void *minfo,
++ const struct ldr_section_info *section_info,
++ void *section_data)
++{
++ struct dload_state *dlthis;
++ u32 pos;
++ struct doff_scnhdr_t *sptr = NULL;
++ s32 nip;
++ struct image_packet_t ipacket;
++ s32 ipsize;
++ u32 checks;
++ s8 *dest = (s8 *) section_data;
++
++ dlthis = (struct dload_state *)minfo;
++ if (!dlthis)
++ return false;
++ sptr = (struct doff_scnhdr_t *)section_info;
++ if (sptr == NULL)
++ return false;
++
++ /* skip ahead to the start of the first packet */
++ pos = BYTE_TO_HOST(DOFF_ALIGN((u32) sptr->ds_first_pkt_offset));
++ if (dlthis->strm->set_file_posn(dlthis->strm, pos) != 0) {
++ dload_error(dlthis, seek, pos);
++ return false;
++ }
++
++ nip = sptr->ds_nipacks;
++ while ((nip -= 1) >= 0) { /* for each packet */
++ /* get the fixed header bits */
++ if (dlthis->strm->read_buffer(dlthis->strm, &ipacket,
++ IPH_SIZE) != IPH_SIZE) {
++ dload_error(dlthis, readstrm, "image packet");
++ return false;
++ }
++ /* reorder the header if need be */
++ if (dlthis->reorder_map)
++ dload_reorder(&ipacket, IPH_SIZE, dlthis->reorder_map);
++
++ /* Now read the packet image bits. Note: round the size up to
++ * the next multiple of 4 bytes; this is what checksum
++ * routines want. */
++ ipsize = BYTE_TO_HOST(DOFF_ALIGN(ipacket.packet_size));
++ if (ipsize > BYTE_TO_HOST(IMAGE_PACKET_SIZE)) {
++ dload_error(dlthis, isiz, ipsize);
++ return false;
++ }
++ if (dlthis->strm->read_buffer
++ (dlthis->strm, dest, ipsize) != ipsize) {
++ dload_error(dlthis, readstrm, "image packet");
++ return false;
++ }
++ /* reorder the bytes if need be */
++#if !defined(_BIG_ENDIAN) || (TARGET_AU_BITS > 16)
++ if (dlthis->reorder_map)
++ dload_reorder(dest, ipsize, dlthis->reorder_map);
++
++ checks = dload_checksum(dest, ipsize);
++#else
++ if (dlthis->dfile_hdr.df_byte_reshuffle !=
++ TARGET_ORDER(REORDER_MAP(BYTE_RESHUFFLE_VALUE))) {
++ /* put image bytes in big-endian order, not PC order */
++ dload_reorder(dest, ipsize,
++ TARGET_ORDER(dlthis->
++ dfile_hdr.df_byte_reshuffle));
++ }
++#if TARGET_AU_BITS > 8
++ checks = dload_reverse_checksum16(dest, ipsize);
++#else
++ checks = dload_reverse_checksum(dest, ipsize);
++#endif
++#endif
++ checks += dload_checksum(&ipacket, IPH_SIZE);
++
++ /* NYI: unable to handle relocation entries here. Reloc
++ * entries referring to fields that span the packet boundaries
++ * may result in packets of sizes that are not multiple of
++ * 4 bytes. Our checksum implementation works on 32-bit words
++ * only. */
++ if (ipacket.num_relocs != 0) {
++ dload_error(dlthis, err_reloc, ipsize);
++ return false;
++ }
++
++ if (~checks) {
++ dload_error(dlthis, err_checksum, "image packet");
++ return false;
++ }
++
++ /*Advance destination ptr by the size of the just-read packet */
++ dest += ipsize;
++ }
++
++ return true;
++}
++
++/***************************************************************************
++ * Procedure dload_module_close
++ *
++ * Parameters:
++ * minfo Handle from dload_module_open for this module
++ *
++ * Effect:
++ * Releases any storage associated with the module handle. On return,
++ * the module handle is invalid.
++ *
++ * Returns:
++ * Zero for success. On error, the number of errors detected is returned.
++ * Individual errors are reported using syms->error_report(), where syms was
++ * an argument to dload_module_open
++ ************************************************************************* */
++void dload_module_close(void *minfo)
++{
++ struct dload_state *dlthis;
++
++ dlthis = (struct dload_state *)minfo;
++ if (!dlthis)
++ return;
++
++ if (dlthis->str_head)
++ dlthis->mysym->dload_deallocate(dlthis->mysym,
++ dlthis->str_head);
++
++ if (dlthis->sect_hdrs)
++ dlthis->mysym->dload_deallocate(dlthis->mysym,
++ dlthis->sect_hdrs);
++
++#if BITS_PER_AU > BITS_PER_BYTE
++ if (dlthis->xstrings)
++ dlthis->mysym->dload_deallocate(dlthis->mysym,
++ dlthis->xstrings);
++
++#endif
++
++ dlthis->mysym->dload_deallocate(dlthis->mysym, dlthis);
++}
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/dynload/header.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/dynload/header.h 2010-08-18 11:24:23.166053090 +0300
+@@ -0,0 +1,49 @@
++/*
++ * header.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#include <linux/string.h>
++#define DL_STRCMP strcmp
++
++/* maximum parenthesis nesting in relocation stack expressions */
++#define STATIC_EXPR_STK_SIZE 10
++
++#include <linux/types.h>
++
++#include "doff.h"
++#include <dspbridge/dynamic_loader.h>
++#include "params.h"
++#include "dload_internal.h"
++#include "reloc_table.h"
++
++/*
++ * Plausibility limits
++ *
++ * These limits are imposed upon the input DOFF file as a check for validity.
++ * They are hard limits, in that the load will fail if they are exceeded.
++ * The numbers selected are arbitrary, in that the loader implementation does
++ * not require these limits.
++ */
++
++/* maximum number of bytes in string table */
++#define MAX_REASONABLE_STRINGTAB (0x100000)
++/* maximum number of code,data,etc. sections */
++#define MAX_REASONABLE_SECTIONS (200)
++/* maximum number of linker symbols */
++#define MAX_REASONABLE_SYMBOLS (100000)
++
++/* shift count to align F_BIG with DLOAD_LITTLE */
++#define ALIGN_COFF_ENDIANNESS 7
++#define ENDIANNESS_MASK (DF_BYTE_ORDER >> ALIGN_COFF_ENDIANNESS)
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/dynload/module_list.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/dynload/module_list.h 2010-08-18 11:24:23.166053090 +0300
+@@ -0,0 +1,159 @@
++/*
++ * dspbridge/mpu_driver/src/dynload/module_list.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Copyright (C) 2008 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++/*
++ * This C header file gives the layout of the data structure created by the
++ * dynamic loader to describe the set of modules loaded into the DSP.
++ *
++ * Linked List Structure:
++ * ----------------------
++ * The data structure defined here is a singly-linked list. The list
++ * represents the set of modules which are currently loaded in the DSP memory.
++ * The first entry in the list is a header record which contains a flag
++ * representing the state of the list. The rest of the entries in the list
++ * are module records.
++ *
++ * Global symbol _DLModules designates the first record in the list (i.e. the
++ * header record). This symbol must be defined in any program that wishes to
++ * use DLLview plug-in.
++ *
++ * String Representation:
++ * ----------------------
++ * The string names of the module and its sections are stored in a block of
++ * memory which follows the module record itself. The strings are ordered:
++ * module name first, followed by section names in order from the first
++ * section to the last. String names are tightly packed arrays of 8-bit
++ * characters (two characters per 16-bit word on the C55x). Strings are
++ * zero-byte-terminated.
++ *
++ * Creating and updating the list:
++ * -------------------------------
++ * Upon loading a new module into the DSP memory the dynamic loader inserts a
++ * new module record as the first module record in the list. The fields of
++ * this module record are initialized to reflect the properties of the module.
++ * The dynamic loader does NOT increment the flag/counter in the list's header
++ * record.
++ *
++ * Upon unloading a module from the DSP memory the dynamic loader removes the
++ * module's record from this list. The dynamic loader also increments the
++ * flag/counter in the list's header record to indicate that the list has been
++ * changed.
++ */
++
++#ifndef _MODULE_LIST_H_
++#define _MODULE_LIST_H_
++
++#include <linux/types.h>
++
++/* Global pointer to the modules_header structure */
++#define MODULES_HEADER "_DLModules"
++#define MODULES_HEADER_NO_UNDERSCORE "DLModules"
++
++/* Initial version number */
++#define INIT_VERSION 1
++
++/* Verification number -- to be recorded in each module record */
++#define VERIFICATION 0x79
++
++/* forward declarations */
++struct dll_module;
++struct dll_sect;
++
++/* the first entry in the list is the modules_header record;
++ * its address is contained in the global _DLModules pointer */
++struct modules_header {
++
++ /*
++ * Address of the first dll_module record in the list or NULL.
++ * Note: for C55x this is a word address (C55x data is
++ * word-addressable)
++ */
++ u32 first_module;
++
++ /* Combined storage size (in target addressable units) of the
++ * dll_module record which follows this header record, or zero
++ * if the list is empty. This size includes the module's string table.
++ * Note: for C55x the unit is a 16-bit word */
++ u16 first_module_size;
++
++ /* Counter is incremented whenever a module record is removed from
++ * the list */
++ u16 update_flag;
++
++};
++
++/* for each 32-bits in above structure, a bitmap, LSB first, whose bits are:
++ * 0 => a 32-bit value, 1 => 2 16-bit values */
++/* swapping bitmap for type modules_header */
++#define MODULES_HEADER_BITMAP 0x2
++
++/* information recorded about each section in a module */
++struct dll_sect {
++
++ /* Load-time address of the section.
++ * Note: for C55x this is a byte address for program sections, and
++ * a word address for data sections. C55x program memory is
++ * byte-addressable, while data memory is word-addressable. */
++ u32 sect_load_adr;
++
++ /* Run-time address of the section.
++ * Note 1: for C55x this is a byte address for program sections, and
++ * a word address for data sections.
++ * Note 2: for C55x two most significant bits of this field indicate
++ * the section type: '00' for a code section, '11' for a data section
++ * (C55 addresses are really only 24-bits wide). */
++ u32 sect_run_adr;
++
++};
++
++/* the rest of the entries in the list are module records */
++struct dll_module {
++
++ /* Address of the next dll_module record in the list, or 0 if this is
++ * the last record in the list.
++ * Note: for C55x this is a word address (C55x data is
++ * word-addressable) */
++ u32 next_module;
++
++ /* Combined storage size (in target addressable units) of the
++ * dll_module record which follows this one, or zero if this is the
++ * last record in the list. This size includes the module's string
++ * table.
++ * Note: for C55x the unit is a 16-bit word. */
++ u16 next_module_size;
++
++ /* version number of the tooling; set to INIT_VERSION for Phase 1 */
++ u16 version;
++
++ /* the verification word; set to VERIFICATION */
++ u16 verification;
++
++ /* Number of sections in the sects array */
++ u16 num_sects;
++
++ /* Module's "unique" id; copy of the timestamp from the host
++ * COFF file */
++ u32 timestamp;
++
++ /* Array of num_sects elements of the module's section records */
++ struct dll_sect sects[1];
++};
++
++/* for each 32 bits in above structure, a bitmap, LSB first, whose bits are:
++ * 0 => a 32-bit value, 1 => 2 16-bit values */
++#define DLL_MODULE_BITMAP 0x6 /* swapping bitmap for type dll_module */
++
++#endif /* _MODULE_LIST_H_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/dynload/params.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/dynload/params.h 2010-08-18 11:24:23.166053090 +0300
+@@ -0,0 +1,226 @@
++/*
++ * params.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * This file defines host and target properties for all machines
++ * supported by the dynamic loader. To be tedious...
++ *
++ * host: the machine on which the dynamic loader runs
++ * target: the machine that the dynamic loader is loading
++ *
++ * Host and target may or may not be the same, depending upon the particular
++ * use.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++/******************************************************************************
++ *
++ * Host Properties
++ *
++ **************************************************************************** */
++
++#define BITS_PER_BYTE 8 /* bits in the standard PC/SUN byte */
++#define LOG_BITS_PER_BYTE 3 /* log base 2 of same */
++#define BYTE_MASK ((1U<<BITS_PER_BYTE)-1)
++
++#if defined(__TMS320C55X__) || defined(_TMS320C5XX)
++#define BITS_PER_AU 16
++#define LOG_BITS_PER_AU 4
++ /* use this print string in error messages for uint32_t */
++#define FMT_UI32 "0x%lx"
++#define FMT8_UI32 "%08lx" /* same but no 0x, fixed width field */
++#else
++/* bits in the smallest addressable data storage unit */
++#define BITS_PER_AU 8
++/* log base 2 of the same; useful for shift counts */
++#define LOG_BITS_PER_AU 3
++#define FMT_UI32 "0x%x"
++#define FMT8_UI32 "%08x"
++#endif
++
++/* generic fastest method for swapping bytes and shorts */
++#define SWAP32BY16(zz) (((zz) << 16) | ((zz) >> 16))
++#define SWAP16BY8(zz) (((zz) << 8) | ((zz) >> 8))
++
++/* !! don't be tempted to insert type definitions here; use <stdint.h> !! */
++
++/******************************************************************************
++ *
++ * Target Properties
++ *
++ **************************************************************************** */
++
++/*-------------------------------------------------------------------------- */
++/* TMS320C6x Target Specific Parameters (byte-addressable) */
++/*-------------------------------------------------------------------------- */
++#if TMS32060
++#define MEMORG 0x0L /* Size of configured memory */
++#define MEMSIZE 0x0L /* (full address space) */
++
++#define CINIT_ALIGN 8 /* alignment of cinit record in TDATA AUs */
++#define CINIT_COUNT 4 /* width of count field in TDATA AUs */
++#define CINIT_ADDRESS 4 /* width of address field in TDATA AUs */
++#define CINIT_PAGE_BITS 0 /* Number of LSBs of address that
++ * are page number */
++
++#define LENIENT_SIGNED_RELEXPS 0 /* DOES SIGNED ALLOW MAX UNSIGNED */
++
++#undef TARGET_ENDIANNESS /* may be big or little endian */
++
++/* align a target address to a word boundary */
++#define TARGET_WORD_ALIGN(zz) (((zz) + 0x3) & -0x4)
++#endif
++
++/*--------------------------------------------------------------------------
++ *
++ * DEFAULT SETTINGS and DERIVED PROPERTIES
++ *
++ * This section establishes defaults for values not specified above
++ *-------------------------------------------------------------------------- */
++#ifndef TARGET_AU_BITS
++#define TARGET_AU_BITS 8 /* width of the target addressable unit */
++#define LOG_TARGET_AU_BITS 3 /* log2 of same */
++#endif
++
++#ifndef CINIT_DEFAULT_PAGE
++#define CINIT_DEFAULT_PAGE 0 /* default .cinit page number */
++#endif
++
++#ifndef DATA_RUN2LOAD
++#define DATA_RUN2LOAD(zz) (zz) /* translate data run address to load address */
++#endif
++
++#ifndef DBG_LIST_PAGE
++#define DBG_LIST_PAGE 0 /* page number for .dllview section */
++#endif
++
++#ifndef TARGET_WORD_ALIGN
++/* align a target address to a word boundary */
++#define TARGET_WORD_ALIGN(zz) (zz)
++#endif
++
++#ifndef TDATA_TO_TADDR
++#define TDATA_TO_TADDR(zz) (zz) /* target data address to target AU address */
++#define TADDR_TO_TDATA(zz) (zz) /* target AU address to target data address */
++#define TDATA_AU_BITS TARGET_AU_BITS /* bits per data AU */
++#define LOG_TDATA_AU_BITS LOG_TARGET_AU_BITS
++#endif
++
++/*
++ *
++ * Useful properties and conversions derived from the above
++ *
++ */
++
++/*
++ * Conversions between host and target addresses
++ */
++#if LOG_BITS_PER_AU == LOG_TARGET_AU_BITS
++/* translate target addressable unit to host address */
++#define TADDR_TO_HOST(x) (x)
++/* translate host address to target addressable unit */
++#define HOST_TO_TADDR(x) (x)
++#elif LOG_BITS_PER_AU > LOG_TARGET_AU_BITS
++#define TADDR_TO_HOST(x) ((x) >> (LOG_BITS_PER_AU-LOG_TARGET_AU_BITS))
++#define HOST_TO_TADDR(x) ((x) << (LOG_BITS_PER_AU-LOG_TARGET_AU_BITS))
++#else
++#define TADDR_TO_HOST(x) ((x) << (LOG_TARGET_AU_BITS-LOG_BITS_PER_AU))
++#define HOST_TO_TADDR(x) ((x) >> (LOG_TARGET_AU_BITS-LOG_BITS_PER_AU))
++#endif
++
++#if LOG_BITS_PER_AU == LOG_TDATA_AU_BITS
++/* translate target addressable unit to host address */
++#define TDATA_TO_HOST(x) (x)
++/* translate host address to target addressable unit */
++#define HOST_TO_TDATA(x) (x)
++/* translate host address to target addressable unit, round up */
++#define HOST_TO_TDATA_ROUND(x) (x)
++/* byte offset to host offset, rounded up for TDATA size */
++#define BYTE_TO_HOST_TDATA_ROUND(x) BYTE_TO_HOST_ROUND(x)
++#elif LOG_BITS_PER_AU > LOG_TDATA_AU_BITS
++#define TDATA_TO_HOST(x) ((x) >> (LOG_BITS_PER_AU-LOG_TDATA_AU_BITS))
++#define HOST_TO_TDATA(x) ((x) << (LOG_BITS_PER_AU-LOG_TDATA_AU_BITS))
++#define HOST_TO_TDATA_ROUND(x) ((x) << (LOG_BITS_PER_AU-LOG_TDATA_AU_BITS))
++#define BYTE_TO_HOST_TDATA_ROUND(x) BYTE_TO_HOST_ROUND(x)
++#else
++#define TDATA_TO_HOST(x) ((x) << (LOG_TDATA_AU_BITS-LOG_BITS_PER_AU))
++#define HOST_TO_TDATA(x) ((x) >> (LOG_TDATA_AU_BITS-LOG_BITS_PER_AU))
++#define HOST_TO_TDATA_ROUND(x) (((x) +\
++ (1<<(LOG_TDATA_AU_BITS-LOG_BITS_PER_AU))-1) >>\
++ (LOG_TDATA_AU_BITS-LOG_BITS_PER_AU))
++#define BYTE_TO_HOST_TDATA_ROUND(x) (BYTE_TO_HOST((x) +\
++ (1<<(LOG_TDATA_AU_BITS-LOG_BITS_PER_BYTE))-1) &\
++ -(TDATA_AU_BITS/BITS_PER_AU))
++#endif
++
++/*
++ * Input in DOFF format is always expresed in bytes, regardless of loading host
++ * so we wind up converting from bytes to target and host units even when the
++ * host is not a byte machine.
++ */
++#if LOG_BITS_PER_AU == LOG_BITS_PER_BYTE
++#define BYTE_TO_HOST(x) (x)
++#define BYTE_TO_HOST_ROUND(x) (x)
++#define HOST_TO_BYTE(x) (x)
++#elif LOG_BITS_PER_AU >= LOG_BITS_PER_BYTE
++#define BYTE_TO_HOST(x) ((x) >> (LOG_BITS_PER_AU - LOG_BITS_PER_BYTE))
++#define BYTE_TO_HOST_ROUND(x) ((x + (BITS_PER_AU/BITS_PER_BYTE-1)) >>\
++ (LOG_BITS_PER_AU - LOG_BITS_PER_BYTE))
++#define HOST_TO_BYTE(x) ((x) << (LOG_BITS_PER_AU - LOG_BITS_PER_BYTE))
++#else
++/* lets not try to deal with sub-8-bit byte machines */
++#endif
++
++#if LOG_TARGET_AU_BITS == LOG_BITS_PER_BYTE
++/* translate target addressable unit to byte address */
++#define TADDR_TO_BYTE(x) (x)
++/* translate byte address to target addressable unit */
++#define BYTE_TO_TADDR(x) (x)
++#elif LOG_TARGET_AU_BITS > LOG_BITS_PER_BYTE
++#define TADDR_TO_BYTE(x) ((x) << (LOG_TARGET_AU_BITS-LOG_BITS_PER_BYTE))
++#define BYTE_TO_TADDR(x) ((x) >> (LOG_TARGET_AU_BITS-LOG_BITS_PER_BYTE))
++#else
++/* lets not try to deal with sub-8-bit byte machines */
++#endif
++
++#ifdef _BIG_ENDIAN
++#define HOST_ENDIANNESS 1
++#else
++#define HOST_ENDIANNESS 0
++#endif
++
++#ifdef TARGET_ENDIANNESS
++#define TARGET_ENDIANNESS_DIFFERS(rtend) (HOST_ENDIANNESS^TARGET_ENDIANNESS)
++#elif HOST_ENDIANNESS
++#define TARGET_ENDIANNESS_DIFFERS(rtend) (!(rtend))
++#else
++#define TARGET_ENDIANNESS_DIFFERS(rtend) (rtend)
++#endif
++
++/* the unit in which we process target image data */
++#if TARGET_AU_BITS <= 8
++typedef u8 tgt_au_t;
++#elif TARGET_AU_BITS <= 16
++typedef u16 tgt_au_t;
++#else
++typedef u32 tgt_au_t;
++#endif
++
++/* size of that unit */
++#if TARGET_AU_BITS < BITS_PER_AU
++#define TGTAU_BITS BITS_PER_AU
++#define LOG_TGTAU_BITS LOG_BITS_PER_AU
++#else
++#define TGTAU_BITS TARGET_AU_BITS
++#define LOG_TGTAU_BITS LOG_TARGET_AU_BITS
++#endif
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/dynload/reloc.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/dynload/reloc.c 2010-08-18 11:24:23.170057650 +0300
+@@ -0,0 +1,484 @@
++/*
++ * reloc.c
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#include "header.h"
++
++#if TMS32060
++/* the magic symbol for the start of BSS */
++static const char bsssymbol[] = { ".bss" };
++#endif
++
++#if TMS32060
++#include "reloc_table_c6000.c"
++#endif
++
++#if TMS32060
++/* From coff.h - ignore these relocation operations */
++#define R_C60ALIGN 0x76 /* C60: Alignment info for compressor */
++#define R_C60FPHEAD 0x77 /* C60: Explicit assembly directive */
++#define R_C60NOCMP 0x100 /* C60: Don't compress this code scn */
++#endif
++
++/**************************************************************************
++ * Procedure dload_unpack
++ *
++ * Parameters:
++ * data pointer to storage unit containing lowest host address of
++ * image data
++ * fieldsz Size of bit field, 0 < fieldsz <= sizeof(rvalue)*BITS_PER_AU
++ * offset Offset from LSB, 0 <= offset < BITS_PER_AU
++ * sgn Signedness of the field (ROP_SGN, ROP_UNS, ROP_MAX, ROP_ANY)
++ *
++ * Effect:
++ * Extracts the specified field and returns it.
++ ************************************************************************* */
++rvalue dload_unpack(struct dload_state *dlthis, tgt_au_t * data, int fieldsz,
++ int offset, unsigned sgn)
++{
++ register rvalue objval;
++ register int shift, direction;
++ register tgt_au_t *dp = data;
++
++ fieldsz -= 1; /* avoid nastiness with 32-bit shift of 32-bit value */
++ /* * collect up enough bits to contain the desired field */
++ if (TARGET_BIG_ENDIAN) {
++ dp += (fieldsz + offset) >> LOG_TGTAU_BITS;
++ direction = -1;
++ } else
++ direction = 1;
++ objval = *dp >> offset;
++ shift = TGTAU_BITS - offset;
++ while (shift <= fieldsz) {
++ dp += direction;
++ objval += (rvalue) *dp << shift;
++ shift += TGTAU_BITS;
++ }
++
++ /* * sign or zero extend the value appropriately */
++ if (sgn == ROP_UNS)
++ objval &= (2 << fieldsz) - 1;
++ else {
++ shift = sizeof(rvalue) * BITS_PER_AU - 1 - fieldsz;
++ objval = (objval << shift) >> shift;
++ }
++
++ return objval;
++
++} /* dload_unpack */
++
++/**************************************************************************
++ * Procedure dload_repack
++ *
++ * Parameters:
++ * val Value to insert
++ * data Pointer to storage unit containing lowest host address of
++ * image data
++ * fieldsz Size of bit field, 0 < fieldsz <= sizeof(rvalue)*BITS_PER_AU
++ * offset Offset from LSB, 0 <= offset < BITS_PER_AU
++ * sgn Signedness of the field (ROP_SGN, ROP_UNS, ROP_MAX, ROP_ANY)
++ *
++ * Effect:
++ * Stuffs the specified value in the specified field. Returns 0 for
++ * success
++ * or 1 if the value will not fit in the specified field according to the
++ * specified signedness rule.
++ ************************************************************************* */
++static const unsigned char ovf_limit[] = { 1, 2, 2 };
++
++int dload_repack(struct dload_state *dlthis, rvalue val, tgt_au_t * data,
++ int fieldsz, int offset, unsigned sgn)
++{
++ register urvalue objval, mask;
++ register int shift, direction;
++ register tgt_au_t *dp = data;
++
++ fieldsz -= 1; /* avoid nastiness with 32-bit shift of 32-bit value */
++ /* clip the bits */
++ mask = (2UL << fieldsz) - 1;
++ objval = (val & mask);
++ /* * store the bits through the specified mask */
++ if (TARGET_BIG_ENDIAN) {
++ dp += (fieldsz + offset) >> LOG_TGTAU_BITS;
++ direction = -1;
++ } else
++ direction = 1;
++
++ /* insert LSBs */
++ *dp = (*dp & ~(mask << offset)) + (objval << offset);
++ shift = TGTAU_BITS - offset;
++ /* align mask and objval with AU boundary */
++ objval >>= shift;
++ mask >>= shift;
++
++ while (mask) {
++ dp += direction;
++ *dp = (*dp & ~mask) + objval;
++ objval >>= TGTAU_BITS;
++ mask >>= TGTAU_BITS;
++ }
++
++ /*
++ * check for overflow
++ */
++ if (sgn) {
++ unsigned tmp = (val >> fieldsz) + (sgn & 0x1);
++ if (tmp > ovf_limit[sgn - 1])
++ return 1;
++ }
++ return 0;
++
++} /* dload_repack */
++
++/* lookup table for the scaling amount in a C6x instruction */
++#if TMS32060
++#define SCALE_BITS 4 /* there are 4 bits in the scale field */
++#define SCALE_MASK 0x7 /* we really only use the bottom 3 bits */
++static const u8 c60_scale[SCALE_MASK + 1] = {
++ 1, 0, 0, 0, 1, 1, 2, 2
++};
++#endif
++
++/**************************************************************************
++ * Procedure dload_relocate
++ *
++ * Parameters:
++ * data Pointer to base of image data
++ * rp Pointer to relocation operation
++ *
++ * Effect:
++ * Performs the specified relocation operation
++ ************************************************************************* */
++void dload_relocate(struct dload_state *dlthis, tgt_au_t * data,
++ struct reloc_record_t *rp, bool *tramps_generated,
++ bool second_pass)
++{
++ rvalue val, reloc_amt, orig_val = 0;
++ unsigned int fieldsz = 0;
++ unsigned int offset = 0;
++ unsigned int reloc_info = 0;
++ unsigned int reloc_action = 0;
++ register int rx = 0;
++ rvalue *stackp = NULL;
++ int top;
++ struct local_symbol *svp = NULL;
++#ifdef RFV_SCALE
++ unsigned int scale = 0;
++#endif
++ struct image_packet_t *img_pkt = NULL;
++
++ /* The image packet data struct is only used during first pass
++ * relocation in the event that a trampoline is needed. 2nd pass
++ * relocation doesn't guarantee that data is coming from an
++ * image_packet_t structure. See cload.c, dload_data for how img_data is
++ * set. If that changes this needs to be updated!!! */
++ if (second_pass == false)
++ img_pkt = (struct image_packet_t *)((u8 *) data -
++ sizeof(struct
++ image_packet_t));
++
++ rx = HASH_FUNC(rp->TYPE);
++ while (rop_map1[rx] != rp->TYPE) {
++ rx = HASH_L(rop_map2[rx]);
++ if (rx < 0) {
++#if TMS32060
++ switch (rp->TYPE) {
++ case R_C60ALIGN:
++ case R_C60NOCMP:
++ case R_C60FPHEAD:
++ /* Ignore these reloc types and return */
++ break;
++ default:
++ /* Unknown reloc type, print error and return */
++ dload_error(dlthis, "Bad coff operator 0x%x",
++ rp->TYPE);
++ }
++#else
++ dload_error(dlthis, "Bad coff operator 0x%x", rp->TYPE);
++#endif
++ return;
++ }
++ }
++ rx = HASH_I(rop_map2[rx]);
++ if ((rx < (sizeof(rop_action) / sizeof(u16)))
++ && (rx < (sizeof(rop_info) / sizeof(u16))) && (rx > 0)) {
++ reloc_action = rop_action[rx];
++ reloc_info = rop_info[rx];
++ } else {
++ dload_error(dlthis, "Buffer Overflow - Array Index Out "
++ "of Bounds");
++ }
++
++ /* Compute the relocation amount for the referenced symbol, if any */
++ reloc_amt = rp->UVAL;
++ if (RFV_SYM(reloc_info)) { /* relocation uses a symbol reference */
++ /* If this is first pass, use the module local symbol table,
++ * else use the trampoline symbol table. */
++ if (second_pass == false) {
++ if ((u32) rp->SYMNDX < dlthis->dfile_hdr.df_no_syms) {
++ /* real symbol reference */
++ svp = &dlthis->local_symtab[rp->SYMNDX];
++ reloc_amt = (RFV_SYM(reloc_info) == ROP_SYMD) ?
++ svp->delta : svp->value;
++ }
++ /* reloc references current section */
++ else if (rp->SYMNDX == -1) {
++ reloc_amt = (RFV_SYM(reloc_info) == ROP_SYMD) ?
++ dlthis->delta_runaddr :
++ dlthis->image_secn->run_addr;
++ }
++ }
++ }
++ /* relocation uses a symbol reference */
++ /* Handle stack adjustment */
++ val = 0;
++ top = RFV_STK(reloc_info);
++ if (top) {
++ top += dlthis->relstkidx - RSTK_UOP;
++ if (top >= STATIC_EXPR_STK_SIZE) {
++ dload_error(dlthis,
++ "Expression stack overflow in %s at offset "
++ FMT_UI32, dlthis->image_secn->name,
++ rp->vaddr + dlthis->image_offset);
++ return;
++ }
++ val = dlthis->relstk[dlthis->relstkidx];
++ dlthis->relstkidx = top;
++ stackp = &dlthis->relstk[top];
++ }
++ /* Derive field position and size, if we need them */
++ if (reloc_info & ROP_RW) { /* read or write action in our future */
++ fieldsz = RFV_WIDTH(reloc_action);
++ if (fieldsz) { /* field info from table */
++ offset = RFV_POSN(reloc_action);
++ if (TARGET_BIG_ENDIAN)
++ /* make sure vaddr is the lowest target
++ * address containing bits */
++ rp->vaddr += RFV_BIGOFF(reloc_info);
++ } else { /* field info from relocation op */
++ fieldsz = rp->FIELDSZ;
++ offset = rp->OFFSET;
++ if (TARGET_BIG_ENDIAN)
++ /* make sure vaddr is the lowest target
++ address containing bits */
++ rp->vaddr += (rp->WORDSZ - offset - fieldsz)
++ >> LOG_TARGET_AU_BITS;
++ }
++ data = (tgt_au_t *) ((char *)data + TADDR_TO_HOST(rp->vaddr));
++ /* compute lowest host location of referenced data */
++#if BITS_PER_AU > TARGET_AU_BITS
++ /* conversion from target address to host address may lose
++ address bits; add loss to offset */
++ if (TARGET_BIG_ENDIAN) {
++ offset += -((rp->vaddr << LOG_TARGET_AU_BITS) +
++ offset + fieldsz) &
++ (BITS_PER_AU - TARGET_AU_BITS);
++ } else {
++ offset += (rp->vaddr << LOG_TARGET_AU_BITS) &
++ (BITS_PER_AU - 1);
++ }
++#endif
++#ifdef RFV_SCALE
++ scale = RFV_SCALE(reloc_info);
++#endif
++ }
++ /* read the object value from the current image, if so ordered */
++ if (reloc_info & ROP_R) {
++ /* relocation reads current image value */
++ val = dload_unpack(dlthis, data, fieldsz, offset,
++ RFV_SIGN(reloc_info));
++ /* Save off the original value in case the relo overflows and
++ * we can trampoline it. */
++ orig_val = val;
++
++#ifdef RFV_SCALE
++ val <<= scale;
++#endif
++ }
++ /* perform the necessary arithmetic */
++ switch (RFV_ACTION(reloc_action)) { /* relocation actions */
++ case RACT_VAL:
++ break;
++ case RACT_ASGN:
++ val = reloc_amt;
++ break;
++ case RACT_ADD:
++ val += reloc_amt;
++ break;
++ case RACT_PCR:
++ /*-----------------------------------------------------------
++ * Handle special cases of jumping from absolute sections
++ * (special reloc type) or to absolute destination
++ * (symndx == -1). In either case, set the appropriate
++ * relocation amount to 0.
++ *----------------------------------------------------------- */
++ if (rp->SYMNDX == -1)
++ reloc_amt = 0;
++ val += reloc_amt - dlthis->delta_runaddr;
++ break;
++ case RACT_ADDISP:
++ val += rp->R_DISP + reloc_amt;
++ break;
++ case RACT_ASGPC:
++ val = dlthis->image_secn->run_addr + reloc_amt;
++ break;
++ case RACT_PLUS:
++ if (stackp != NULL)
++ val += *stackp;
++ break;
++ case RACT_SUB:
++ if (stackp != NULL)
++ val = *stackp - val;
++ break;
++ case RACT_NEG:
++ val = -val;
++ break;
++ case RACT_MPY:
++ if (stackp != NULL)
++ val *= *stackp;
++ break;
++ case RACT_DIV:
++ if (stackp != NULL)
++ val = *stackp / val;
++ break;
++ case RACT_MOD:
++ if (stackp != NULL)
++ val = *stackp % val;
++ break;
++ case RACT_SR:
++ if (val >= sizeof(rvalue) * BITS_PER_AU)
++ val = 0;
++ else if (stackp != NULL)
++ val = (urvalue) *stackp >> val;
++ break;
++ case RACT_ASR:
++ if (val >= sizeof(rvalue) * BITS_PER_AU)
++ val = sizeof(rvalue) * BITS_PER_AU - 1;
++ else if (stackp != NULL)
++ val = *stackp >> val;
++ break;
++ case RACT_SL:
++ if (val >= sizeof(rvalue) * BITS_PER_AU)
++ val = 0;
++ else if (stackp != NULL)
++ val = *stackp << val;
++ break;
++ case RACT_AND:
++ if (stackp != NULL)
++ val &= *stackp;
++ break;
++ case RACT_OR:
++ if (stackp != NULL)
++ val |= *stackp;
++ break;
++ case RACT_XOR:
++ if (stackp != NULL)
++ val ^= *stackp;
++ break;
++ case RACT_NOT:
++ val = ~val;
++ break;
++#if TMS32060
++ case RACT_C6SECT:
++ /* actually needed address of secn containing symbol */
++ if (svp != NULL) {
++ if (rp->SYMNDX >= 0)
++ if (svp->secnn > 0)
++ reloc_amt = dlthis->ldr_sections
++ [svp->secnn - 1].run_addr;
++ }
++ /* !!! FALL THRU !!! */
++ case RACT_C6BASE:
++ if (dlthis->bss_run_base == 0) {
++ struct dynload_symbol *symp;
++ symp = dlthis->mysym->find_matching_symbol
++ (dlthis->mysym, bsssymbol);
++ /* lookup value of global BSS base */
++ if (symp)
++ dlthis->bss_run_base = symp->value;
++ else
++ dload_error(dlthis,
++ "Global BSS base referenced in %s "
++ "offset" FMT_UI32 " but not "
++ "defined",
++ dlthis->image_secn->name,
++ rp->vaddr + dlthis->image_offset);
++ }
++ reloc_amt -= dlthis->bss_run_base;
++ /* !!! FALL THRU !!! */
++ case RACT_C6DSPL:
++ /* scale factor determined by 3 LSBs of field */
++ scale = c60_scale[val & SCALE_MASK];
++ offset += SCALE_BITS;
++ fieldsz -= SCALE_BITS;
++ val >>= SCALE_BITS; /* ignore the scale field hereafter */
++ val <<= scale;
++ val += reloc_amt; /* do the usual relocation */
++ if (((1 << scale) - 1) & val)
++ dload_error(dlthis,
++ "Unaligned reference in %s offset "
++ FMT_UI32, dlthis->image_secn->name,
++ rp->vaddr + dlthis->image_offset);
++ break;
++#endif
++ } /* relocation actions */
++ /* * Put back result as required */
++ if (reloc_info & ROP_W) { /* relocation writes image value */
++#ifdef RFV_SCALE
++ val >>= scale;
++#endif
++ if (dload_repack(dlthis, val, data, fieldsz, offset,
++ RFV_SIGN(reloc_info))) {
++ /* Check to see if this relo can be trampolined,
++ * but only in first phase relocation. 2nd phase
++ * relocation cannot trampoline. */
++ if ((second_pass == false) &&
++ (dload_tramp_avail(dlthis, rp) == true)) {
++
++ /* Before generating the trampoline, restore
++ * the value to its original so the 2nd pass
++ * relo will work. */
++ dload_repack(dlthis, orig_val, data, fieldsz,
++ offset, RFV_SIGN(reloc_info));
++ if (!dload_tramp_generate(dlthis,
++ (dlthis->image_secn -
++ dlthis->ldr_sections),
++ dlthis->image_offset,
++ img_pkt, rp)) {
++ dload_error(dlthis,
++ "Failed to "
++ "generate trampoline for "
++ "bit overflow");
++ dload_error(dlthis,
++ "Relocation val " FMT_UI32
++ " overflows %d bits in %s "
++ "offset " FMT_UI32, val,
++ fieldsz,
++ dlthis->image_secn->name,
++ dlthis->image_offset +
++ rp->vaddr);
++ } else
++ *tramps_generated = true;
++ } else {
++ dload_error(dlthis, "Relocation value "
++ FMT_UI32 " overflows %d bits in %s"
++ " offset " FMT_UI32, val, fieldsz,
++ dlthis->image_secn->name,
++ dlthis->image_offset + rp->vaddr);
++ }
++ }
++ } else if (top)
++ *stackp = val;
++} /* reloc_value */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/dynload/reloc_table.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/dynload/reloc_table.h 2010-08-18 11:24:23.170057650 +0300
+@@ -0,0 +1,102 @@
++/*
++ * reloc_table.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef _RELOC_TABLE_H_
++#define _RELOC_TABLE_H_
++/*
++ * Table of relocation operator properties
++ */
++#include <linux/types.h>
++
++/* How does this relocation operation access the program image? */
++#define ROP_N 0 /* does not access image */
++#define ROP_R 1 /* read from image */
++#define ROP_W 2 /* write to image */
++#define ROP_RW 3 /* read from and write to image */
++
++/* For program image access, what are the overflow rules for the bit field? */
++/* Beware! Procedure repack depends on this encoding */
++#define ROP_ANY 0 /* no overflow ever, just truncate the value */
++#define ROP_SGN 1 /* signed field */
++#define ROP_UNS 2 /* unsigned field */
++#define ROP_MAX 3 /* allow maximum range of either signed or unsigned */
++
++/* How does the relocation operation use the symbol reference */
++#define ROP_IGN 0 /* no symbol is referenced */
++#define ROP_LIT 0 /* use rp->UVAL literal field */
++#define ROP_SYM 1 /* symbol value is used in relocation */
++#define ROP_SYMD 2 /* delta value vs last link is used */
++
++/* How does the reloc op use the stack? */
++#define RSTK_N 0 /* Does not use */
++#define RSTK_POP 1 /* Does a POP */
++#define RSTK_UOP 2 /* Unary op, stack position unaffected */
++#define RSTK_PSH 3 /* Does a push */
++
++/*
++ * Computational actions performed by the dynamic loader
++ */
++enum dload_actions {
++ /* don't alter the current val (from stack or mem fetch) */
++ RACT_VAL,
++ /* set value to reference amount (from symbol reference) */
++ RACT_ASGN,
++ RACT_ADD, /* add reference to value */
++ RACT_PCR, /* add reference minus PC delta to value */
++ RACT_ADDISP, /* add reference plus R_DISP */
++ RACT_ASGPC, /* set value to section addr plus reference */
++
++ RACT_PLUS, /* stack + */
++ RACT_SUB, /* stack - */
++ RACT_NEG, /* stack unary - */
++
++ RACT_MPY, /* stack * */
++ RACT_DIV, /* stack / */
++ RACT_MOD, /* stack % */
++
++ RACT_SR, /* stack unsigned >> */
++ RACT_ASR, /* stack signed >> */
++ RACT_SL, /* stack << */
++ RACT_AND, /* stack & */
++ RACT_OR, /* stack | */
++ RACT_XOR, /* stack ^ */
++ RACT_NOT, /* stack ~ */
++ RACT_C6SECT, /* for C60 R_SECT op */
++ RACT_C6BASE, /* for C60 R_BASE op */
++ RACT_C6DSPL, /* for C60 scaled 15-bit displacement */
++ RACT_PCR23T /* for ARM Thumb long branch */
++};
++
++/*
++ * macros used to extract values
++ */
++#define RFV_POSN(aaa) ((aaa) & 0xF)
++#define RFV_WIDTH(aaa) (((aaa) >> 4) & 0x3F)
++#define RFV_ACTION(aaa) ((aaa) >> 10)
++
++#define RFV_SIGN(iii) (((iii) >> 2) & 0x3)
++#define RFV_SYM(iii) (((iii) >> 4) & 0x3)
++#define RFV_STK(iii) (((iii) >> 6) & 0x3)
++#define RFV_ACCS(iii) ((iii) & 0x3)
++
++#if (TMS32060)
++#define RFV_SCALE(iii) ((iii) >> 11)
++#define RFV_BIGOFF(iii) (((iii) >> 8) & 0x7)
++#else
++#define RFV_BIGOFF(iii) ((iii) >> 8)
++#endif
++
++#endif /* _RELOC_TABLE_H_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/dynload/reloc_table_c6000.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/dynload/reloc_table_c6000.c 2010-08-18 11:24:23.170057650 +0300
+@@ -0,0 +1,257 @@
++/*
++ * reloc_table_c6000.c
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++/* Tables generated for c6000 */
++
++#define HASH_FUNC(zz) (((((zz) + 1) * 1845UL) >> 11) & 63)
++#define HASH_L(zz) ((zz) >> 8)
++#define HASH_I(zz) ((zz) & 0xFF)
++
++static const u16 rop_map1[] = {
++ 0,
++ 1,
++ 2,
++ 20,
++ 4,
++ 5,
++ 6,
++ 15,
++ 80,
++ 81,
++ 82,
++ 83,
++ 84,
++ 85,
++ 86,
++ 87,
++ 17,
++ 18,
++ 19,
++ 21,
++ 16,
++ 16394,
++ 16404,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 32,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 40,
++ 112,
++ 113,
++ 65535,
++ 16384,
++ 16385,
++ 16386,
++ 16387,
++ 16388,
++ 16389,
++ 16390,
++ 16391,
++ 16392,
++ 16393,
++ 16395,
++ 16396,
++ 16397,
++ 16398,
++ 16399,
++ 16400,
++ 16401,
++ 16402,
++ 16403,
++ 16405,
++ 16406,
++ 65535,
++ 65535,
++ 65535
++};
++
++static const s16 rop_map2[] = {
++ -256,
++ -255,
++ -254,
++ -245,
++ -253,
++ -252,
++ -251,
++ -250,
++ -241,
++ -240,
++ -239,
++ -238,
++ -237,
++ -236,
++ 1813,
++ 5142,
++ -248,
++ -247,
++ 778,
++ -244,
++ -249,
++ -221,
++ -211,
++ -1,
++ -1,
++ -1,
++ -1,
++ -1,
++ -1,
++ -243,
++ -1,
++ -1,
++ -1,
++ -1,
++ -1,
++ -1,
++ -242,
++ -233,
++ -232,
++ -1,
++ -231,
++ -230,
++ -229,
++ -228,
++ -227,
++ -226,
++ -225,
++ -224,
++ -223,
++ 5410,
++ -220,
++ -219,
++ -218,
++ -217,
++ -216,
++ -215,
++ -214,
++ -213,
++ 5676,
++ -210,
++ -209,
++ -1,
++ -1,
++ -1
++};
++
++static const u16 rop_action[] = {
++ 2560,
++ 2304,
++ 2304,
++ 2432,
++ 2432,
++ 2560,
++ 2176,
++ 2304,
++ 2560,
++ 3200,
++ 3328,
++ 3584,
++ 3456,
++ 2304,
++ 4208,
++ 20788,
++ 21812,
++ 3415,
++ 3245,
++ 2311,
++ 4359,
++ 19764,
++ 2311,
++ 3191,
++ 3280,
++ 6656,
++ 7680,
++ 8704,
++ 9728,
++ 10752,
++ 11776,
++ 12800,
++ 13824,
++ 14848,
++ 15872,
++ 16896,
++ 17920,
++ 18944,
++ 0,
++ 0,
++ 0,
++ 0,
++ 1536,
++ 1536,
++ 1536,
++ 5632,
++ 512,
++ 0
++};
++
++static const u16 rop_info[] = {
++ 0,
++ 35,
++ 35,
++ 35,
++ 35,
++ 35,
++ 35,
++ 35,
++ 35,
++ 39,
++ 39,
++ 39,
++ 39,
++ 35,
++ 34,
++ 283,
++ 299,
++ 4135,
++ 4391,
++ 291,
++ 33059,
++ 283,
++ 295,
++ 4647,
++ 4135,
++ 64,
++ 64,
++ 128,
++ 64,
++ 64,
++ 64,
++ 64,
++ 64,
++ 64,
++ 64,
++ 64,
++ 64,
++ 128,
++ 201,
++ 197,
++ 74,
++ 70,
++ 208,
++ 196,
++ 200,
++ 192,
++ 192,
++ 66
++};
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/dynload/tramp.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/dynload/tramp.c 2010-08-18 11:24:23.170057650 +0300
+@@ -0,0 +1,1143 @@
++/*
++ * tramp.c
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Copyright (C) 2009 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#include "header.h"
++
++#if TMS32060
++#include "tramp_table_c6000.c"
++#endif
++
++#define MAX_RELOS_PER_PASS 4
++
++/*
++ * Function: priv_tramp_sect_tgt_alloc
++ * Description: Allocate target memory for the trampoline section. The
++ * target mem size is easily obtained as the next available address.
++ */
++static int priv_tramp_sect_tgt_alloc(struct dload_state *dlthis)
++{
++ int ret_val = 0;
++ struct ldr_section_info *sect_info;
++
++ /* Populate the trampoline loader section and allocate it on the
++ * target. The section name is ALWAYS the first string in the final
++ * string table for trampolines. The trampoline section is always
++ * 1 beyond the total number of allocated sections. */
++ sect_info = &dlthis->ldr_sections[dlthis->allocated_secn_count];
++
++ sect_info->name = dlthis->tramp.final_string_table;
++ sect_info->size = dlthis->tramp.tramp_sect_next_addr;
++ sect_info->context = 0;
++ sect_info->type =
++ (4 << 8) | DLOAD_TEXT | DS_ALLOCATE_MASK | DS_DOWNLOAD_MASK;
++ sect_info->page = 0;
++ sect_info->run_addr = 0;
++ sect_info->load_addr = 0;
++ ret_val = dlthis->myalloc->dload_allocate(dlthis->myalloc,
++ sect_info,
++ ds_alignment
++ (sect_info->type));
++
++ if (ret_val == 0)
++ dload_error(dlthis, "Failed to allocate target memory for"
++ " trampoline");
++
++ return ret_val;
++}
++
++/*
++ * Function: priv_h2a
++ * Description: Helper function to convert a hex value to its ASCII
++ * representation. Used for trampoline symbol name generation.
++ */
++static u8 priv_h2a(u8 value)
++{
++ if (value > 0xF)
++ return 0xFF;
++
++ if (value <= 9)
++ value += 0x30;
++ else
++ value += 0x37;
++
++ return value;
++}
++
++/*
++ * Function: priv_tramp_sym_gen_name
++ * Description: Generate a trampoline symbol name (ASCII) using the value
++ * of the symbol. This places the new name into the user buffer.
++ * The name is fixed in length and of the form: __$dbTR__xxxxxxxx
++ * (where "xxxxxxxx" is the hex value.
++ */
++static void priv_tramp_sym_gen_name(u32 value, char *dst)
++{
++ u32 i;
++ char *prefix = TRAMP_SYM_PREFIX;
++ char *dst_local = dst;
++ u8 tmp;
++
++ /* Clear out the destination, including the ending NULL */
++ for (i = 0; i < (TRAMP_SYM_PREFIX_LEN + TRAMP_SYM_HEX_ASCII_LEN); i++)
++ *(dst_local + i) = 0;
++
++ /* Copy the prefix to start */
++ for (i = 0; i < strlen(TRAMP_SYM_PREFIX); i++) {
++ *dst_local = *(prefix + i);
++ dst_local++;
++ }
++
++ /* Now convert the value passed in to a string equiv of the hex */
++ for (i = 0; i < sizeof(value); i++) {
++#ifndef _BIG_ENDIAN
++ tmp = *(((u8 *) &value) + (sizeof(value) - 1) - i);
++ *dst_local = priv_h2a((tmp & 0xF0) >> 4);
++ dst_local++;
++ *dst_local = priv_h2a(tmp & 0x0F);
++ dst_local++;
++#else
++ tmp = *(((u8 *) &value) + i);
++ *dst_local = priv_h2a((tmp & 0xF0) >> 4);
++ dst_local++;
++ *dst_local = priv_h2a(tmp & 0x0F);
++ dst_local++;
++#endif
++ }
++
++ /* NULL terminate */
++ *dst_local = 0;
++}
++
++/*
++ * Function: priv_tramp_string_create
++ * Description: Create a new string specific to the trampoline loading and add
++ * it to the trampoline string list. This list contains the
++ * trampoline section name and trampoline point symbols.
++ */
++static struct tramp_string *priv_tramp_string_create(struct dload_state *dlthis,
++ u32 str_len, char *str)
++{
++ struct tramp_string *new_string = NULL;
++ u32 i;
++
++ /* Create a new string object with the specified size. */
++ new_string =
++ (struct tramp_string *)dlthis->mysym->dload_allocate(dlthis->mysym,
++ (sizeof
++ (struct
++ tramp_string)
++ + str_len +
++ 1));
++ if (new_string != NULL) {
++ /* Clear the string first. This ensures the ending NULL is
++ * present and the optimizer won't touch it. */
++ for (i = 0; i < (sizeof(struct tramp_string) + str_len + 1);
++ i++)
++ *((u8 *) new_string + i) = 0;
++
++ /* Add this string to our virtual table by assigning it the
++ * next index and pushing it to the tail of the list. */
++ new_string->index = dlthis->tramp.tramp_string_next_index;
++ dlthis->tramp.tramp_string_next_index++;
++ dlthis->tramp.tramp_string_size += str_len + 1;
++
++ new_string->next = NULL;
++ if (dlthis->tramp.string_head == NULL)
++ dlthis->tramp.string_head = new_string;
++ else
++ dlthis->tramp.string_tail->next = new_string;
++
++ dlthis->tramp.string_tail = new_string;
++
++ /* Copy the string over to the new object */
++ for (i = 0; i < str_len; i++)
++ new_string->str[i] = str[i];
++ }
++
++ return new_string;
++}
++
++/*
++ * Function: priv_tramp_string_find
++ * Description: Walk the trampoline string list and find a match for the
++ * provided string. If not match is found, NULL is returned.
++ */
++static struct tramp_string *priv_tramp_string_find(struct dload_state *dlthis,
++ char *str)
++{
++ struct tramp_string *cur_str = NULL;
++ struct tramp_string *ret_val = NULL;
++ u32 i;
++ u32 str_len = strlen(str);
++
++ for (cur_str = dlthis->tramp.string_head;
++ (ret_val == NULL) && (cur_str != NULL); cur_str = cur_str->next) {
++ /* If the string lengths aren't equal, don't bother
++ * comparing */
++ if (str_len != strlen(cur_str->str))
++ continue;
++
++ /* Walk the strings until one of them ends */
++ for (i = 0; i < str_len; i++) {
++ /* If they don't match in the current position then
++ * break out now, no sense in continuing to look at
++ * this string. */
++ if (str[i] != cur_str->str[i])
++ break;
++ }
++
++ if (i == str_len)
++ ret_val = cur_str;
++ }
++
++ return ret_val;
++}
++
++/*
++ * Function: priv_string_tbl_finalize
++ * Description: Flatten the trampoline string list into a table of NULL
++ * terminated strings. This is the same format of string table
++ * as used by the COFF/DOFF file.
++ */
++static int priv_string_tbl_finalize(struct dload_state *dlthis)
++{
++ int ret_val = 0;
++ struct tramp_string *cur_string;
++ char *cur_loc;
++ char *tmp;
++
++ /* Allocate enough space for all strings that have been created. The
++ * table is simply all strings concatenated together will NULL
++ * endings. */
++ dlthis->tramp.final_string_table =
++ (char *)dlthis->mysym->dload_allocate(dlthis->mysym,
++ dlthis->tramp.
++ tramp_string_size);
++ if (dlthis->tramp.final_string_table != NULL) {
++ /* We got our buffer, walk the list and release the nodes as*
++ * we go */
++ cur_loc = dlthis->tramp.final_string_table;
++ cur_string = dlthis->tramp.string_head;
++ while (cur_string != NULL) {
++ /* Move the head/tail pointers */
++ dlthis->tramp.string_head = cur_string->next;
++ if (dlthis->tramp.string_tail == cur_string)
++ dlthis->tramp.string_tail = NULL;
++
++ /* Copy the string contents */
++ for (tmp = cur_string->str;
++ *tmp != '\0'; tmp++, cur_loc++)
++ *cur_loc = *tmp;
++
++ /* Pick up the NULL termination since it was missed by
++ * breaking using it to end the above loop. */
++ *cur_loc = '\0';
++ cur_loc++;
++
++ /* Free the string node, we don't need it any more. */
++ dlthis->mysym->dload_deallocate(dlthis->mysym,
++ cur_string);
++
++ /* Move our pointer to the next one */
++ cur_string = dlthis->tramp.string_head;
++ }
++
++ /* Update our return value to success */
++ ret_val = 1;
++ } else
++ dload_error(dlthis, "Failed to allocate trampoline "
++ "string table");
++
++ return ret_val;
++}
++
++/*
++ * Function: priv_tramp_sect_alloc
++ * Description: Virtually allocate space from the trampoline section. This
++ * function returns the next offset within the trampoline section
++ * that is available and moved the next available offset by the
++ * requested size. NO TARGET ALLOCATION IS DONE AT THIS TIME.
++ */
++static u32 priv_tramp_sect_alloc(struct dload_state *dlthis, u32 tramp_size)
++{
++ u32 ret_val;
++
++ /* If the next available address is 0, this is our first allocation.
++ * Create a section name string to go into the string table . */
++ if (dlthis->tramp.tramp_sect_next_addr == 0) {
++ dload_syms_error(dlthis->mysym, "*** WARNING *** created "
++ "dynamic TRAMPOLINE section for module %s",
++ dlthis->str_head);
++ }
++
++ /* Reserve space for the new trampoline */
++ ret_val = dlthis->tramp.tramp_sect_next_addr;
++ dlthis->tramp.tramp_sect_next_addr += tramp_size;
++ return ret_val;
++}
++
++/*
++ * Function: priv_tramp_sym_create
++ * Description: Allocate and create a new trampoline specific symbol and add
++ * it to the trampoline symbol list. These symbols will include
++ * trampoline points as well as the external symbols they
++ * reference.
++ */
++static struct tramp_sym *priv_tramp_sym_create(struct dload_state *dlthis,
++ u32 str_index,
++ struct local_symbol *tmp_sym)
++{
++ struct tramp_sym *new_sym = NULL;
++ u32 i;
++
++ /* Allocate new space for the symbol in the symbol table. */
++ new_sym =
++ (struct tramp_sym *)dlthis->mysym->dload_allocate(dlthis->mysym,
++ sizeof(struct tramp_sym));
++ if (new_sym != NULL) {
++ for (i = 0; i != sizeof(struct tramp_sym); i++)
++ *((char *)new_sym + i) = 0;
++
++ /* Assign this symbol the next symbol index for easier
++ * reference later during relocation. */
++ new_sym->index = dlthis->tramp.tramp_sym_next_index;
++ dlthis->tramp.tramp_sym_next_index++;
++
++ /* Populate the symbol information. At this point any
++ * trampoline symbols will be the offset location, not the
++ * final. Copy over the symbol info to start, then be sure to
++ * get the string index from the trampoline string table. */
++ new_sym->sym_info = *tmp_sym;
++ new_sym->str_index = str_index;
++
++ /* Push the new symbol to the tail of the symbol table list */
++ new_sym->next = NULL;
++ if (dlthis->tramp.symbol_head == NULL)
++ dlthis->tramp.symbol_head = new_sym;
++ else
++ dlthis->tramp.symbol_tail->next = new_sym;
++
++ dlthis->tramp.symbol_tail = new_sym;
++ }
++
++ return new_sym;
++}
++
++/*
++ * Function: priv_tramp_sym_get
++ * Description: Search for the symbol with the matching string index (from
++ * the trampoline string table) and return the trampoline
++ * symbol object, if found. Otherwise return NULL.
++ */
++static struct tramp_sym *priv_tramp_sym_get(struct dload_state *dlthis,
++ u32 string_index)
++{
++ struct tramp_sym *sym_found = NULL;
++
++ /* Walk the symbol table list and search vs. the string index */
++ for (sym_found = dlthis->tramp.symbol_head;
++ sym_found != NULL; sym_found = sym_found->next) {
++ if (sym_found->str_index == string_index)
++ break;
++ }
++
++ return sym_found;
++}
++
++/*
++ * Function: priv_tramp_sym_find
++ * Description: Search for a trampoline symbol based on the string name of
++ * the symbol. Return the symbol object, if found, otherwise
++ * return NULL.
++ */
++static struct tramp_sym *priv_tramp_sym_find(struct dload_state *dlthis,
++ char *string)
++{
++ struct tramp_sym *sym_found = NULL;
++ struct tramp_string *str_found = NULL;
++
++ /* First, search for the string, then search for the sym based on the
++ string index. */
++ str_found = priv_tramp_string_find(dlthis, string);
++ if (str_found != NULL)
++ sym_found = priv_tramp_sym_get(dlthis, str_found->index);
++
++ return sym_found;
++}
++
++/*
++ * Function: priv_tramp_sym_finalize
++ * Description: Allocate a flat symbol table for the trampoline section,
++ * put each trampoline symbol into the table, adjust the
++ * symbol value based on the section address on the target and
++ * free the trampoline symbol list nodes.
++ */
++static int priv_tramp_sym_finalize(struct dload_state *dlthis)
++{
++ int ret_val = 0;
++ struct tramp_sym *cur_sym;
++ struct ldr_section_info *tramp_sect =
++ &dlthis->ldr_sections[dlthis->allocated_secn_count];
++ struct local_symbol *new_sym;
++
++ /* Allocate a table to hold a flattened version of all symbols
++ * created. */
++ dlthis->tramp.final_sym_table =
++ (struct local_symbol *)dlthis->mysym->dload_allocate(dlthis->mysym,
++ (sizeof(struct local_symbol) * dlthis->tramp.
++ tramp_sym_next_index));
++ if (dlthis->tramp.final_sym_table != NULL) {
++ /* Walk the list of all symbols, copy it over to the flattened
++ * table. After it has been copied, the node can be freed as
++ * it is no longer needed. */
++ new_sym = dlthis->tramp.final_sym_table;
++ cur_sym = dlthis->tramp.symbol_head;
++ while (cur_sym != NULL) {
++ /* Pop it off the list */
++ dlthis->tramp.symbol_head = cur_sym->next;
++ if (cur_sym == dlthis->tramp.symbol_tail)
++ dlthis->tramp.symbol_tail = NULL;
++
++ /* Copy the symbol contents into the flat table */
++ *new_sym = cur_sym->sym_info;
++
++ /* Now finaize the symbol. If it is in the tramp
++ * section, we need to adjust for the section start.
++ * If it is external then we don't need to adjust at
++ * all.
++ * NOTE: THIS CODE ASSUMES THAT THE TRAMPOLINE IS
++ * REFERENCED LIKE A CALL TO AN EXTERNAL SO VALUE AND
++ * DELTA ARE THE SAME. SEE THE FUNCTION dload_symbols
++ * WHERE DN_UNDEF IS HANDLED FOR MORE REFERENCE. */
++ if (new_sym->secnn < 0) {
++ new_sym->value += tramp_sect->load_addr;
++ new_sym->delta = new_sym->value;
++ }
++
++ /* Let go of the symbol node */
++ dlthis->mysym->dload_deallocate(dlthis->mysym, cur_sym);
++
++ /* Move to the next node */
++ cur_sym = dlthis->tramp.symbol_head;
++ new_sym++;
++ }
++
++ ret_val = 1;
++ } else
++ dload_error(dlthis, "Failed to alloc trampoline sym table");
++
++ return ret_val;
++}
++
++/*
++ * Function: priv_tgt_img_gen
++ * Description: Allocate storage for and copy the target specific image data
++ * and fix up its relocations for the new external symbol. If
++ * a trampoline image packet was successfully created it is added
++ * to the trampoline list.
++ */
++static int priv_tgt_img_gen(struct dload_state *dlthis, u32 base,
++ u32 gen_index, struct tramp_sym *new_ext_sym)
++{
++ struct tramp_img_pkt *new_img_pkt = NULL;
++ u32 i;
++ u32 pkt_size = tramp_img_pkt_size_get();
++ u8 *gen_tbl_entry;
++ u8 *pkt_data;
++ struct reloc_record_t *cur_relo;
++ int ret_val = 0;
++
++ /* Allocate a new image packet and set it up. */
++ new_img_pkt =
++ (struct tramp_img_pkt *)dlthis->mysym->dload_allocate(dlthis->mysym,
++ pkt_size);
++ if (new_img_pkt != NULL) {
++ /* Save the base, this is where it goes in the section */
++ new_img_pkt->base = base;
++
++ /* Copy over the image data and relos from the target table */
++ pkt_data = (u8 *) &new_img_pkt->hdr;
++ gen_tbl_entry = (u8 *) &tramp_gen_info[gen_index];
++ for (i = 0; i < pkt_size; i++) {
++ *pkt_data = *gen_tbl_entry;
++ pkt_data++;
++ gen_tbl_entry++;
++ }
++
++ /* Update the relocations to point to the external symbol */
++ cur_relo =
++ (struct reloc_record_t *)((u8 *) &new_img_pkt->hdr +
++ new_img_pkt->hdr.relo_offset);
++ for (i = 0; i < new_img_pkt->hdr.num_relos; i++)
++ cur_relo[i].SYMNDX = new_ext_sym->index;
++
++ /* Add it to the trampoline list. */
++ new_img_pkt->next = dlthis->tramp.tramp_pkts;
++ dlthis->tramp.tramp_pkts = new_img_pkt;
++
++ ret_val = 1;
++ }
++
++ return ret_val;
++}
++
++/*
++ * Function: priv_pkt_relo
++ * Description: Take the provided image data and the collection of relocations
++ * for it and perform the relocations. Note that all relocations
++ * at this stage are considered SECOND PASS since the original
++ * image has already been processed in the first pass. This means
++ * TRAMPOLINES ARE TREATED AS 2ND PASS even though this is really
++ * the first (and only) relocation that will be performed on them.
++ */
++static int priv_pkt_relo(struct dload_state *dlthis, tgt_au_t * data,
++ struct reloc_record_t *rp[], u32 relo_count)
++{
++ int ret_val = 1;
++ u32 i;
++ bool tmp;
++
++ /* Walk through all of the relos and process them. This function is
++ * the equivalent of relocate_packet() from cload.c, but specialized
++ * for trampolines and 2nd phase relocations. */
++ for (i = 0; i < relo_count; i++)
++ dload_relocate(dlthis, data, rp[i], &tmp, true);
++
++ return ret_val;
++}
++
++/*
++ * Function: priv_tramp_pkt_finalize
++ * Description: Walk the list of all trampoline packets and finalize them.
++ * Each trampoline image packet will be relocated now that the
++ * trampoline section has been allocated on the target. Once
++ * all of the relocations are done the trampoline image data
++ * is written into target memory and the trampoline packet
++ * is freed: it is no longer needed after this point.
++ */
++static int priv_tramp_pkt_finalize(struct dload_state *dlthis)
++{
++ int ret_val = 1;
++ struct tramp_img_pkt *cur_pkt = NULL;
++ struct reloc_record_t *relos[MAX_RELOS_PER_PASS];
++ u32 relos_done;
++ u32 i;
++ struct reloc_record_t *cur_relo;
++ struct ldr_section_info *sect_info =
++ &dlthis->ldr_sections[dlthis->allocated_secn_count];
++
++ /* Walk the list of trampoline packets and relocate each packet. This
++ * function is the trampoline equivalent of dload_data() from
++ * cload.c. */
++ cur_pkt = dlthis->tramp.tramp_pkts;
++ while ((ret_val != 0) && (cur_pkt != NULL)) {
++ /* Remove the pkt from the list */
++ dlthis->tramp.tramp_pkts = cur_pkt->next;
++
++ /* Setup section and image offset information for the relo */
++ dlthis->image_secn = sect_info;
++ dlthis->image_offset = cur_pkt->base;
++ dlthis->delta_runaddr = sect_info->run_addr;
++
++ /* Walk through all relos for the packet */
++ relos_done = 0;
++ cur_relo = (struct reloc_record_t *)((u8 *) &cur_pkt->hdr +
++ cur_pkt->hdr.relo_offset);
++ while (relos_done < cur_pkt->hdr.num_relos) {
++#ifdef ENABLE_TRAMP_DEBUG
++ dload_syms_error(dlthis->mysym,
++ "===> Trampoline %x branches to %x",
++ sect_info->run_addr +
++ dlthis->image_offset,
++ dlthis->
++ tramp.final_sym_table[cur_relo->
++ SYMNDX].value);
++#endif
++
++ for (i = 0;
++ ((i < MAX_RELOS_PER_PASS) &&
++ ((i + relos_done) < cur_pkt->hdr.num_relos)); i++)
++ relos[i] = cur_relo + i;
++
++ /* Do the actual relo */
++ ret_val = priv_pkt_relo(dlthis,
++ (tgt_au_t *) &cur_pkt->payload,
++ relos, i);
++ if (ret_val == 0) {
++ dload_error(dlthis,
++ "Relocation of trampoline pkt at %x"
++ " failed", cur_pkt->base +
++ sect_info->run_addr);
++ break;
++ }
++
++ relos_done += i;
++ cur_relo += i;
++ }
++
++ /* Make sure we didn't hit a problem */
++ if (ret_val != 0) {
++ /* Relos are done for the packet, write it to the
++ * target */
++ ret_val = dlthis->myio->writemem(dlthis->myio,
++ &cur_pkt->payload,
++ sect_info->load_addr +
++ cur_pkt->base,
++ sect_info,
++ BYTE_TO_HOST
++ (cur_pkt->hdr.
++ tramp_code_size));
++ if (ret_val == 0) {
++ dload_error(dlthis,
++ "Write to " FMT_UI32 " failed",
++ sect_info->load_addr +
++ cur_pkt->base);
++ }
++
++ /* Done with the pkt, let it go */
++ dlthis->mysym->dload_deallocate(dlthis->mysym, cur_pkt);
++
++ /* Get the next packet to process */
++ cur_pkt = dlthis->tramp.tramp_pkts;
++ }
++ }
++
++ return ret_val;
++}
++
++/*
++ * Function: priv_dup_pkt_finalize
++ * Description: Walk the list of duplicate image packets and finalize them.
++ * Each duplicate packet will be relocated again for the
++ * relocations that previously failed and have been adjusted
++ * to point at a trampoline. Once all relocations for a packet
++ * have been done, write the packet into target memory. The
++ * duplicate packet and its relocation chain are all freed
++ * after use here as they are no longer needed after this.
++ */
++static int priv_dup_pkt_finalize(struct dload_state *dlthis)
++{
++ int ret_val = 1;
++ struct tramp_img_dup_pkt *cur_pkt;
++ struct tramp_img_dup_relo *cur_relo;
++ struct reloc_record_t *relos[MAX_RELOS_PER_PASS];
++ struct doff_scnhdr_t *sect_hdr = NULL;
++ s32 i;
++
++ /* Similar to the trampoline pkt finalize, this function walks each dup
++ * pkt that was generated and performs all relocations that were
++ * deferred to a 2nd pass. This is the equivalent of dload_data() from
++ * cload.c, but does not need the additional reorder and checksum
++ * processing as it has already been done. */
++ cur_pkt = dlthis->tramp.dup_pkts;
++ while ((ret_val != 0) && (cur_pkt != NULL)) {
++ /* Remove the node from the list, we'll be freeing it
++ * shortly */
++ dlthis->tramp.dup_pkts = cur_pkt->next;
++
++ /* Setup the section and image offset for relocation */
++ dlthis->image_secn = &dlthis->ldr_sections[cur_pkt->secnn];
++ dlthis->image_offset = cur_pkt->offset;
++
++ /* In order to get the delta run address, we need to reference
++ * the original section header. It's a bit ugly, but needed
++ * for relo. */
++ i = (s32) (dlthis->image_secn - dlthis->ldr_sections);
++ sect_hdr = dlthis->sect_hdrs + i;
++ dlthis->delta_runaddr = sect_hdr->ds_paddr;
++
++ /* Walk all relos in the chain and process each. */
++ cur_relo = cur_pkt->relo_chain;
++ while (cur_relo != NULL) {
++ /* Process them a chunk at a time to be efficient */
++ for (i = 0; (i < MAX_RELOS_PER_PASS)
++ && (cur_relo != NULL);
++ i++, cur_relo = cur_relo->next) {
++ relos[i] = &cur_relo->relo;
++ cur_pkt->relo_chain = cur_relo->next;
++ }
++
++ /* Do the actual relo */
++ ret_val = priv_pkt_relo(dlthis,
++ cur_pkt->img_pkt.img_data,
++ relos, i);
++ if (ret_val == 0) {
++ dload_error(dlthis,
++ "Relocation of dup pkt at %x"
++ " failed", cur_pkt->offset +
++ dlthis->image_secn->run_addr);
++ break;
++ }
++
++ /* Release all of these relos, we're done with them */
++ while (i > 0) {
++ dlthis->mysym->dload_deallocate(dlthis->mysym,
++ GET_CONTAINER
++ (relos[i - 1],
++ struct tramp_img_dup_relo,
++ relo));
++ i--;
++ }
++
++ /* DO NOT ADVANCE cur_relo, IT IS ALREADY READY TO
++ * GO! */
++ }
++
++ /* Done with all relos. Make sure we didn't have a problem and
++ * write it out to the target */
++ if (ret_val != 0) {
++ ret_val = dlthis->myio->writemem(dlthis->myio,
++ cur_pkt->img_pkt.
++ img_data,
++ dlthis->image_secn->
++ load_addr +
++ cur_pkt->offset,
++ dlthis->image_secn,
++ BYTE_TO_HOST
++ (cur_pkt->img_pkt.
++ packet_size));
++ if (ret_val == 0) {
++ dload_error(dlthis,
++ "Write to " FMT_UI32 " failed",
++ dlthis->image_secn->load_addr +
++ cur_pkt->offset);
++ }
++
++ dlthis->mysym->dload_deallocate(dlthis->mysym, cur_pkt);
++
++ /* Advance to the next packet */
++ cur_pkt = dlthis->tramp.dup_pkts;
++ }
++ }
++
++ return ret_val;
++}
++
++/*
++ * Function: priv_dup_find
++ * Description: Walk the list of existing duplicate packets and find a
++ * match based on the section number and image offset. Return
++ * the duplicate packet if found, otherwise NULL.
++ */
++static struct tramp_img_dup_pkt *priv_dup_find(struct dload_state *dlthis,
++ s16 secnn, u32 image_offset)
++{
++ struct tramp_img_dup_pkt *cur_pkt = NULL;
++
++ for (cur_pkt = dlthis->tramp.dup_pkts;
++ cur_pkt != NULL; cur_pkt = cur_pkt->next) {
++ if ((cur_pkt->secnn == secnn) &&
++ (cur_pkt->offset == image_offset)) {
++ /* Found a match, break out */
++ break;
++ }
++ }
++
++ return cur_pkt;
++}
++
++/*
++ * Function: priv_img_pkt_dup
++ * Description: Duplicate the original image packet. If this is the first
++ * time this image packet has been seen (based on section number
++ * and image offset), create a new duplicate packet and add it
++ * to the dup packet list. If not, just get the existing one and
++ * update it with the current packet contents (since relocation
++ * on the packet is still ongoing in first pass.) Create a
++ * duplicate of the provided relocation, but update it to point
++ * to the new trampoline symbol. Add the new relocation dup to
++ * the dup packet's relo chain for 2nd pass relocation later.
++ */
++static int priv_img_pkt_dup(struct dload_state *dlthis,
++ s16 secnn, u32 image_offset,
++ struct image_packet_t *ipacket,
++ struct reloc_record_t *rp,
++ struct tramp_sym *new_tramp_sym)
++{
++ struct tramp_img_dup_pkt *dup_pkt = NULL;
++ u32 new_dup_size;
++ s32 i;
++ int ret_val = 0;
++ struct tramp_img_dup_relo *dup_relo = NULL;
++
++ /* Determinne if this image packet is already being tracked in the
++ dup list for other trampolines. */
++ dup_pkt = priv_dup_find(dlthis, secnn, image_offset);
++
++ if (dup_pkt == NULL) {
++ /* This image packet does not exist in our tracking, so create
++ * a new one and add it to the head of the list. */
++ new_dup_size = sizeof(struct tramp_img_dup_pkt) +
++ ipacket->packet_size;
++
++ dup_pkt = (struct tramp_img_dup_pkt *)
++ dlthis->mysym->dload_allocate(dlthis->mysym, new_dup_size);
++ if (dup_pkt != NULL) {
++ /* Save off the section and offset information */
++ dup_pkt->secnn = secnn;
++ dup_pkt->offset = image_offset;
++ dup_pkt->relo_chain = NULL;
++
++ /* Copy the original packet content */
++ dup_pkt->img_pkt = *ipacket;
++ dup_pkt->img_pkt.img_data = (u8 *) (dup_pkt + 1);
++ for (i = 0; i < ipacket->packet_size; i++)
++ *(dup_pkt->img_pkt.img_data + i) =
++ *(ipacket->img_data + i);
++
++ /* Add the packet to the dup list */
++ dup_pkt->next = dlthis->tramp.dup_pkts;
++ dlthis->tramp.dup_pkts = dup_pkt;
++ } else
++ dload_error(dlthis, "Failed to create dup packet!");
++ } else {
++ /* The image packet contents could have changed since
++ * trampoline detection happens during relocation of the image
++ * packets. So, we need to update the image packet contents
++ * before adding relo information. */
++ for (i = 0; i < dup_pkt->img_pkt.packet_size; i++)
++ *(dup_pkt->img_pkt.img_data + i) =
++ *(ipacket->img_data + i);
++ }
++
++ /* Since the previous code may have allocated a new dup packet for us,
++ double check that we actually have one. */
++ if (dup_pkt != NULL) {
++ /* Allocate a new node for the relo chain. Each image packet
++ * can potentially have multiple relocations that cause a
++ * trampoline to be generated. So, we keep them in a chain,
++ * order is not important. */
++ dup_relo = dlthis->mysym->dload_allocate(dlthis->mysym,
++ sizeof(struct tramp_img_dup_relo));
++ if (dup_relo != NULL) {
++ /* Copy the relo contents, adjust for the new
++ * trampoline and add it to the list. */
++ dup_relo->relo = *rp;
++ dup_relo->relo.SYMNDX = new_tramp_sym->index;
++
++ dup_relo->next = dup_pkt->relo_chain;
++ dup_pkt->relo_chain = dup_relo;
++
++ /* That's it, we're done. Make sure we update our
++ * return value to be success since everything finished
++ * ok */
++ ret_val = 1;
++ } else
++ dload_error(dlthis, "Unable to alloc dup relo");
++ }
++
++ return ret_val;
++}
++
++/*
++ * Function: dload_tramp_avail
++ * Description: Check to see if the target supports a trampoline for this type
++ * of relocation. Return true if it does, otherwise false.
++ */
++bool dload_tramp_avail(struct dload_state *dlthis, struct reloc_record_t *rp)
++{
++ bool ret_val = false;
++ u16 map_index;
++ u16 gen_index;
++
++ /* Check type hash vs. target tramp table */
++ map_index = HASH_FUNC(rp->TYPE);
++ gen_index = tramp_map[map_index];
++ if (gen_index != TRAMP_NO_GEN_AVAIL)
++ ret_val = true;
++
++ return ret_val;
++}
++
++/*
++ * Function: dload_tramp_generate
++ * Description: Create a new trampoline for the provided image packet and
++ * relocation causing problems. This will create the trampoline
++ * as well as duplicate/update the image packet and relocation
++ * causing the problem, which will be relo'd again during
++ * finalization.
++ */
++int dload_tramp_generate(struct dload_state *dlthis, s16 secnn,
++ u32 image_offset, struct image_packet_t *ipacket,
++ struct reloc_record_t *rp)
++{
++ u16 map_index;
++ u16 gen_index;
++ int ret_val = 1;
++ char tramp_sym_str[TRAMP_SYM_PREFIX_LEN + TRAMP_SYM_HEX_ASCII_LEN];
++ struct local_symbol *ref_sym;
++ struct tramp_sym *new_tramp_sym;
++ struct tramp_sym *new_ext_sym;
++ struct tramp_string *new_tramp_str;
++ u32 new_tramp_base;
++ struct local_symbol tmp_sym;
++ struct local_symbol ext_tmp_sym;
++
++ /* Hash the relo type to get our generator information */
++ map_index = HASH_FUNC(rp->TYPE);
++ gen_index = tramp_map[map_index];
++ if (gen_index != TRAMP_NO_GEN_AVAIL) {
++ /* If this is the first trampoline, create the section name in
++ * our string table for debug help later. */
++ if (dlthis->tramp.string_head == NULL) {
++ priv_tramp_string_create(dlthis,
++ strlen(TRAMP_SECT_NAME),
++ TRAMP_SECT_NAME);
++ }
++#ifdef ENABLE_TRAMP_DEBUG
++ dload_syms_error(dlthis->mysym,
++ "Trampoline at img loc %x, references %x",
++ dlthis->ldr_sections[secnn].run_addr +
++ image_offset + rp->vaddr,
++ dlthis->local_symtab[rp->SYMNDX].value);
++#endif
++
++ /* Generate the trampoline string, check if already defined.
++ * If the relo symbol index is -1, it means we need the section
++ * info for relo later. To do this we'll dummy up a symbol
++ * with the section delta and run addresses. */
++ if (rp->SYMNDX == -1) {
++ ext_tmp_sym.value =
++ dlthis->ldr_sections[secnn].run_addr;
++ ext_tmp_sym.delta = dlthis->sect_hdrs[secnn].ds_paddr;
++ ref_sym = &ext_tmp_sym;
++ } else
++ ref_sym = &(dlthis->local_symtab[rp->SYMNDX]);
++
++ priv_tramp_sym_gen_name(ref_sym->value, tramp_sym_str);
++ new_tramp_sym = priv_tramp_sym_find(dlthis, tramp_sym_str);
++ if (new_tramp_sym == NULL) {
++ /* If tramp string not defined, create it and a new
++ * string, and symbol for it as well as the original
++ * symbol which caused the trampoline. */
++ new_tramp_str = priv_tramp_string_create(dlthis,
++ strlen
++ (tramp_sym_str),
++ tramp_sym_str);
++ if (new_tramp_str == NULL) {
++ dload_error(dlthis, "Failed to create new "
++ "trampoline string\n");
++ ret_val = 0;
++ } else {
++ /* Allocate tramp section space for the new
++ * tramp from the target */
++ new_tramp_base = priv_tramp_sect_alloc(dlthis,
++ tramp_size_get());
++
++ /* We have a string, create the new symbol and
++ * duplicate the external. */
++ tmp_sym.value = new_tramp_base;
++ tmp_sym.delta = 0;
++ tmp_sym.secnn = -1;
++ tmp_sym.sclass = 0;
++ new_tramp_sym = priv_tramp_sym_create(dlthis,
++ new_tramp_str->
++ index,
++ &tmp_sym);
++
++ new_ext_sym = priv_tramp_sym_create(dlthis, -1,
++ ref_sym);
++
++ if ((new_tramp_sym != NULL) &&
++ (new_ext_sym != NULL)) {
++ /* Call the image generator to get the
++ * new image data and fix up its
++ * relocations for the external
++ * symbol. */
++ ret_val = priv_tgt_img_gen(dlthis,
++ new_tramp_base,
++ gen_index,
++ new_ext_sym);
++
++ /* Add generated image data to tramp
++ * image list */
++ if (ret_val != 1) {
++ dload_error(dlthis, "Failed to "
++ "create img pkt for"
++ " trampoline\n");
++ }
++ } else {
++ dload_error(dlthis, "Failed to create "
++ "new tramp syms "
++ "(%8.8X, %8.8X)\n",
++ new_tramp_sym, new_ext_sym);
++ ret_val = 0;
++ }
++ }
++ }
++
++ /* Duplicate the image data and relo record that caused the
++ * tramp, including update the relo data to point to the tramp
++ * symbol. */
++ if (ret_val == 1) {
++ ret_val = priv_img_pkt_dup(dlthis, secnn, image_offset,
++ ipacket, rp, new_tramp_sym);
++ if (ret_val != 1) {
++ dload_error(dlthis, "Failed to create dup of "
++ "original img pkt\n");
++ }
++ }
++ }
++
++ return ret_val;
++}
++
++/*
++ * Function: dload_tramp_pkt_update
++ * Description: Update the duplicate copy of this image packet, which the
++ * trampoline layer is already tracking. This is call is critical
++ * to make if trampolines were generated anywhere within the
++ * packet and first pass relo continued on the remainder. The
++ * trampoline layer needs the updates image data so when 2nd
++ * pass relo is done during finalize the image packet can be
++ * written to the target since all relo is done.
++ */
++int dload_tramp_pkt_udpate(struct dload_state *dlthis, s16 secnn,
++ u32 image_offset, struct image_packet_t *ipacket)
++{
++ struct tramp_img_dup_pkt *dup_pkt = NULL;
++ s32 i;
++ int ret_val = 0;
++
++ /* Find the image packet in question, the caller needs us to update it
++ since a trampoline was previously generated. */
++ dup_pkt = priv_dup_find(dlthis, secnn, image_offset);
++ if (dup_pkt != NULL) {
++ for (i = 0; i < dup_pkt->img_pkt.packet_size; i++)
++ *(dup_pkt->img_pkt.img_data + i) =
++ *(ipacket->img_data + i);
++
++ ret_val = 1;
++ } else {
++ dload_error(dlthis,
++ "Unable to find existing DUP pkt for %x, offset %x",
++ secnn, image_offset);
++
++ }
++
++ return ret_val;
++}
++
++/*
++ * Function: dload_tramp_finalize
++ * Description: If any trampolines were created, finalize everything on the
++ * target by allocating the trampoline section on the target,
++ * finalizing the trampoline symbols, finalizing the trampoline
++ * packets (write the new section to target memory) and finalize
++ * the duplicate packets by doing 2nd pass relo over them.
++ */
++int dload_tramp_finalize(struct dload_state *dlthis)
++{
++ int ret_val = 1;
++
++ if (dlthis->tramp.tramp_sect_next_addr != 0) {
++ /* Finalize strings into a flat table. This is needed so it
++ * can be added to the debug string table later. */
++ ret_val = priv_string_tbl_finalize(dlthis);
++
++ /* Do target allocation for section BEFORE finalizing
++ * symbols. */
++ if (ret_val != 0)
++ ret_val = priv_tramp_sect_tgt_alloc(dlthis);
++
++ /* Finalize symbols with their correct target information and
++ * flatten */
++ if (ret_val != 0)
++ ret_val = priv_tramp_sym_finalize(dlthis);
++
++ /* Finalize all trampoline packets. This performs the
++ * relocation on the packets as well as writing them to target
++ * memory. */
++ if (ret_val != 0)
++ ret_val = priv_tramp_pkt_finalize(dlthis);
++
++ /* Perform a 2nd pass relocation on the dup list. */
++ if (ret_val != 0)
++ ret_val = priv_dup_pkt_finalize(dlthis);
++ }
++
++ return ret_val;
++}
++
++/*
++ * Function: dload_tramp_cleanup
++ * Description: Release all temporary resources used in the trampoline layer.
++ * Note that the target memory which may have been allocated and
++ * written to store the trampolines is NOT RELEASED HERE since it
++ * is potentially still in use. It is automatically released
++ * when the module is unloaded.
++ */
++void dload_tramp_cleanup(struct dload_state *dlthis)
++{
++ struct tramp_info *tramp = &dlthis->tramp;
++ struct tramp_sym *cur_sym;
++ struct tramp_string *cur_string;
++ struct tramp_img_pkt *cur_tramp_pkt;
++ struct tramp_img_dup_pkt *cur_dup_pkt;
++ struct tramp_img_dup_relo *cur_dup_relo;
++
++ /* If there were no tramps generated, just return */
++ if (tramp->tramp_sect_next_addr == 0)
++ return;
++
++ /* Destroy all tramp information */
++ for (cur_sym = tramp->symbol_head;
++ cur_sym != NULL; cur_sym = tramp->symbol_head) {
++ tramp->symbol_head = cur_sym->next;
++ if (tramp->symbol_tail == cur_sym)
++ tramp->symbol_tail = NULL;
++
++ dlthis->mysym->dload_deallocate(dlthis->mysym, cur_sym);
++ }
++
++ if (tramp->final_sym_table != NULL)
++ dlthis->mysym->dload_deallocate(dlthis->mysym,
++ tramp->final_sym_table);
++
++ for (cur_string = tramp->string_head;
++ cur_string != NULL; cur_string = tramp->string_head) {
++ tramp->string_head = cur_string->next;
++ if (tramp->string_tail == cur_string)
++ tramp->string_tail = NULL;
++
++ dlthis->mysym->dload_deallocate(dlthis->mysym, cur_string);
++ }
++
++ if (tramp->final_string_table != NULL)
++ dlthis->mysym->dload_deallocate(dlthis->mysym,
++ tramp->final_string_table);
++
++ for (cur_tramp_pkt = tramp->tramp_pkts;
++ cur_tramp_pkt != NULL; cur_tramp_pkt = tramp->tramp_pkts) {
++ tramp->tramp_pkts = cur_tramp_pkt->next;
++ dlthis->mysym->dload_deallocate(dlthis->mysym, cur_tramp_pkt);
++ }
++
++ for (cur_dup_pkt = tramp->dup_pkts;
++ cur_dup_pkt != NULL; cur_dup_pkt = tramp->dup_pkts) {
++ tramp->dup_pkts = cur_dup_pkt->next;
++
++ for (cur_dup_relo = cur_dup_pkt->relo_chain;
++ cur_dup_relo != NULL;
++ cur_dup_relo = cur_dup_pkt->relo_chain) {
++ cur_dup_pkt->relo_chain = cur_dup_relo->next;
++ dlthis->mysym->dload_deallocate(dlthis->mysym,
++ cur_dup_relo);
++ }
++
++ dlthis->mysym->dload_deallocate(dlthis->mysym, cur_dup_pkt);
++ }
++}
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/dynload/tramp_table_c6000.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/dynload/tramp_table_c6000.c 2010-08-18 11:24:23.170057650 +0300
+@@ -0,0 +1,164 @@
++/*
++ * tramp_table_c6000.c
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#include "dload_internal.h"
++
++/* These are defined in coff.h, but may not be available on all platforms
++ so we'll go ahead and define them here. */
++#ifndef R_C60LO16
++#define R_C60LO16 0x54 /* C60: MVK Low Half Register */
++#define R_C60HI16 0x55 /* C60: MVKH/MVKLH High Half Register */
++#endif
++
++#define C6X_TRAMP_WORD_COUNT 8
++#define C6X_TRAMP_MAX_RELOS 8
++
++/* THIS HASH FUNCTION MUST MATCH THE ONE reloc_table_c6000.c */
++#define HASH_FUNC(zz) (((((zz) + 1) * 1845UL) >> 11) & 63)
++
++/* THIS MUST MATCH reloc_record_t FOR A SYMBOL BASED RELO */
++struct c6000_relo_record {
++ s32 vaddr;
++ s32 symndx;
++#ifndef _BIG_ENDIAN
++ u16 disp;
++ u16 type;
++#else
++ u16 type;
++ u16 disp;
++#endif
++};
++
++struct c6000_gen_code {
++ struct tramp_gen_code_hdr hdr;
++ u32 tramp_instrs[C6X_TRAMP_WORD_COUNT];
++ struct c6000_relo_record relos[C6X_TRAMP_MAX_RELOS];
++};
++
++/* Hash mapping for relos that can cause trampolines. */
++static const u16 tramp_map[] = {
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 0,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535,
++ 65535
++};
++
++static const struct c6000_gen_code tramp_gen_info[] = {
++ /* Tramp caused by R_C60PCR21 */
++ {
++ /* Header - 8 instructions, 2 relos */
++ {
++ sizeof(u32) * C6X_TRAMP_WORD_COUNT,
++ 2,
++ FIELD_OFFSET(struct c6000_gen_code, relos)
++ },
++
++ /* Trampoline instructions */
++ {
++ 0x053C54F7, /* STW.D2T2 B10, *sp--[2] */
++ 0x0500002A, /* || MVK.S2 <blank>, B10 */
++ 0x0500006A, /* MVKH.S2 <blank>, B10 */
++ 0x00280362, /* B.S2 B10 */
++ 0x053C52E6, /* LDW.D2T2 *++sp[2], B10 */
++ 0x00006000, /* NOP 4 */
++ 0x00000000, /* NOP */
++ 0x00000000 /* NOP */
++ },
++
++ /* Relocations */
++ {
++ {4, 0, 0, R_C60LO16},
++ {8, 0, 0, R_C60HI16},
++ {0, 0, 0, 0x0000},
++ {0, 0, 0, 0x0000},
++ {0, 0, 0, 0x0000},
++ {0, 0, 0, 0x0000},
++ {0, 0, 0, 0x0000},
++ {0, 0, 0, 0x0000}
++ }
++ }
++};
++
++/* TARGET SPECIFIC FUNCTIONS THAT MUST BE DEFINED */
++static u32 tramp_size_get(void)
++{
++ return sizeof(u32) * C6X_TRAMP_WORD_COUNT;
++}
++
++static u32 tramp_img_pkt_size_get(void)
++{
++ return sizeof(struct c6000_gen_code);
++}
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/gen/gb.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/gen/gb.c 2010-08-18 11:24:23.170057650 +0300
+@@ -0,0 +1,167 @@
++/*
++ * gb.c
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Generic bitmap operations.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++#include <linux/types.h>
++
++/* ----------------------------------- DSP/BIOS Bridge */
++#include <linux/types.h>
++/* ----------------------------------- This */
++#include <dspbridge/gs.h>
++#include <dspbridge/gb.h>
++
++struct gb_t_map {
++ u32 len;
++ u32 wcnt;
++ u32 *words;
++};
++
++/*
++ * ======== gb_clear ========
++ * purpose:
++ * Clears a bit in the bit map.
++ */
++
++void gb_clear(struct gb_t_map *map, u32 bitn)
++{
++ u32 mask;
++
++ mask = 1L << (bitn % BITS_PER_LONG);
++ map->words[bitn / BITS_PER_LONG] &= ~mask;
++}
++
++/*
++ * ======== gb_create ========
++ * purpose:
++ * Creates a bit map.
++ */
++
++struct gb_t_map *gb_create(u32 len)
++{
++ struct gb_t_map *map;
++ u32 i;
++ map = (struct gb_t_map *)gs_alloc(sizeof(struct gb_t_map));
++ if (map != NULL) {
++ map->len = len;
++ map->wcnt = len / BITS_PER_LONG + 1;
++ map->words = (u32 *) gs_alloc(map->wcnt * sizeof(u32));
++ if (map->words != NULL) {
++ for (i = 0; i < map->wcnt; i++)
++ map->words[i] = 0L;
++
++ } else {
++ gs_frees(map, sizeof(struct gb_t_map));
++ map = NULL;
++ }
++ }
++
++ return map;
++}
++
++/*
++ * ======== gb_delete ========
++ * purpose:
++ * Frees a bit map.
++ */
++
++void gb_delete(struct gb_t_map *map)
++{
++ gs_frees(map->words, map->wcnt * sizeof(u32));
++ gs_frees(map, sizeof(struct gb_t_map));
++}
++
++/*
++ * ======== gb_findandset ========
++ * purpose:
++ * Finds a free bit and sets it.
++ */
++u32 gb_findandset(struct gb_t_map *map)
++{
++ u32 bitn;
++
++ bitn = gb_minclear(map);
++
++ if (bitn != GB_NOBITS)
++ gb_set(map, bitn);
++
++ return bitn;
++}
++
++/*
++ * ======== gb_minclear ========
++ * purpose:
++ * returns the location of the first unset bit in the bit map.
++ */
++u32 gb_minclear(struct gb_t_map *map)
++{
++ u32 bit_location = 0;
++ u32 bit_acc = 0;
++ u32 i;
++ u32 bit;
++ u32 *word;
++
++ for (word = map->words, i = 0; i < map->wcnt; word++, i++) {
++ if (~*word) {
++ for (bit = 0; bit < BITS_PER_LONG; bit++, bit_acc++) {
++ if (bit_acc == map->len)
++ return GB_NOBITS;
++
++ if (~*word & (1L << bit)) {
++ bit_location = i * BITS_PER_LONG + bit;
++ return bit_location;
++ }
++
++ }
++ } else {
++ bit_acc += BITS_PER_LONG;
++ }
++ }
++
++ return GB_NOBITS;
++}
++
++/*
++ * ======== gb_set ========
++ * purpose:
++ * Sets a bit in the bit map.
++ */
++
++void gb_set(struct gb_t_map *map, u32 bitn)
++{
++ u32 mask;
++
++ mask = 1L << (bitn % BITS_PER_LONG);
++ map->words[bitn / BITS_PER_LONG] |= mask;
++}
++
++/*
++ * ======== gb_test ========
++ * purpose:
++ * Returns true if the bit is set in the specified location.
++ */
++
++bool gb_test(struct gb_t_map *map, u32 bitn)
++{
++ bool state;
++ u32 mask;
++ u32 word;
++
++ mask = 1L << (bitn % BITS_PER_LONG);
++ word = map->words[bitn / BITS_PER_LONG];
++ state = word & mask ? true : false;
++
++ return state;
++}
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/gen/gh.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/gen/gh.c 2010-08-18 11:24:23.170057650 +0300
+@@ -0,0 +1,215 @@
++/*
++ * gh.c
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#include <linux/types.h>
++
++#include <dspbridge/host_os.h>
++
++#include <dspbridge/gs.h>
++
++#include <dspbridge/gh.h>
++
++struct element {
++ struct element *next;
++ u8 data[1];
++};
++
++struct gh_t_hash_tab {
++ u16 max_bucket;
++ u16 val_size;
++ struct element **buckets;
++ u16(*hash) (void *, u16);
++ bool(*match) (void *, void *);
++ void (*delete) (void *);
++};
++
++static void noop(void *p);
++static s32 cur_init;
++static void myfree(void *ptr, s32 size);
++
++/*
++ * ======== gh_create ========
++ */
++
++struct gh_t_hash_tab *gh_create(u16 max_bucket, u16 val_size,
++ u16(*hash) (void *, u16), bool(*match) (void *,
++ void *),
++ void (*delete) (void *))
++{
++ struct gh_t_hash_tab *hash_tab;
++ u16 i;
++ hash_tab =
++ (struct gh_t_hash_tab *)gs_alloc(sizeof(struct gh_t_hash_tab));
++ if (hash_tab == NULL)
++ return NULL;
++ hash_tab->max_bucket = max_bucket;
++ hash_tab->val_size = val_size;
++ hash_tab->hash = hash;
++ hash_tab->match = match;
++ hash_tab->delete = delete == NULL ? noop : delete;
++
++ hash_tab->buckets = (struct element **)
++ gs_alloc(sizeof(struct element *) * max_bucket);
++ if (hash_tab->buckets == NULL) {
++ gh_delete(hash_tab);
++ return NULL;
++ }
++
++ for (i = 0; i < max_bucket; i++)
++ hash_tab->buckets[i] = NULL;
++
++ return hash_tab;
++}
++
++/*
++ * ======== gh_delete ========
++ */
++void gh_delete(struct gh_t_hash_tab *hash_tab)
++{
++ struct element *elem, *next;
++ u16 i;
++
++ if (hash_tab != NULL) {
++ if (hash_tab->buckets != NULL) {
++ for (i = 0; i < hash_tab->max_bucket; i++) {
++ for (elem = hash_tab->buckets[i]; elem != NULL;
++ elem = next) {
++ next = elem->next;
++ (*hash_tab->delete) (elem->data);
++ myfree(elem,
++ sizeof(struct element) - 1 +
++ hash_tab->val_size);
++ }
++ }
++
++ myfree(hash_tab->buckets, sizeof(struct element *)
++ * hash_tab->max_bucket);
++ }
++
++ myfree(hash_tab, sizeof(struct gh_t_hash_tab));
++ }
++}
++
++/*
++ * ======== gh_exit ========
++ */
++
++void gh_exit(void)
++{
++ if (cur_init-- == 1)
++ gs_exit();
++
++}
++
++/*
++ * ======== gh_find ========
++ */
++
++void *gh_find(struct gh_t_hash_tab *hash_tab, void *key)
++{
++ struct element *elem;
++
++ elem = hash_tab->buckets[(*hash_tab->hash) (key, hash_tab->max_bucket)];
++
++ for (; elem; elem = elem->next) {
++ if ((*hash_tab->match) (key, elem->data))
++ return elem->data;
++ }
++
++ return NULL;
++}
++
++/*
++ * ======== gh_init ========
++ */
++
++void gh_init(void)
++{
++ if (cur_init++ == 0)
++ gs_init();
++}
++
++/*
++ * ======== gh_insert ========
++ */
++
++void *gh_insert(struct gh_t_hash_tab *hash_tab, void *key, void *value)
++{
++ struct element *elem;
++ u16 i;
++ char *src, *dst;
++
++ elem = (struct element *)gs_alloc(sizeof(struct element) - 1 +
++ hash_tab->val_size);
++ if (elem != NULL) {
++
++ dst = (char *)elem->data;
++ src = (char *)value;
++ for (i = 0; i < hash_tab->val_size; i++)
++ *dst++ = *src++;
++
++ i = (*hash_tab->hash) (key, hash_tab->max_bucket);
++ elem->next = hash_tab->buckets[i];
++ hash_tab->buckets[i] = elem;
++
++ return elem->data;
++ }
++
++ return NULL;
++}
++
++/*
++ * ======== noop ========
++ */
++/* ARGSUSED */
++static void noop(void *p)
++{
++ p = p; /* stifle compiler warning */
++}
++
++/*
++ * ======== myfree ========
++ */
++static void myfree(void *ptr, s32 size)
++{
++ gs_free(ptr);
++}
++
++#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
++/**
++ * gh_iterate() - This function goes through all the elements in the hash table
++ * looking for the dsp symbols.
++ * @hash_tab: Hash table
++ * @callback: pointer to callback function
++ * @user_data: User data, contains the find_symbol_context pointer
++ *
++ */
++void gh_iterate(struct gh_t_hash_tab *hash_tab,
++ void (*callback)(void *, void *), void *user_data)
++{
++ struct element *elem;
++ u32 i;
++
++ if (hash_tab && hash_tab->buckets)
++ for (i = 0; i < hash_tab->max_bucket; i++) {
++ elem = hash_tab->buckets[i];
++ while (elem) {
++ callback(&elem->data, user_data);
++ elem = elem->next;
++ }
++ }
++}
++#endif
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/gen/gs.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/gen/gs.c 2010-08-18 11:24:23.170057650 +0300
+@@ -0,0 +1,89 @@
++/*
++ * gs.c
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * General storage memory allocator services.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#include <linux/types.h>
++/* ----------------------------------- DSP/BIOS Bridge */
++#include <dspbridge/dbdefs.h>
++#include <linux/types.h>
++
++/* ----------------------------------- This */
++#include <dspbridge/gs.h>
++
++#include <linux/slab.h>
++
++/* ----------------------------------- Globals */
++static u32 cumsize;
++
++/*
++ * ======== gs_alloc ========
++ * purpose:
++ * Allocates memory of the specified size.
++ */
++void *gs_alloc(u32 size)
++{
++ void *p;
++
++ p = kzalloc(size, GFP_KERNEL);
++ if (p == NULL)
++ return NULL;
++ cumsize += size;
++ return p;
++}
++
++/*
++ * ======== gs_exit ========
++ * purpose:
++ * Discontinue the usage of the GS module.
++ */
++void gs_exit(void)
++{
++ /* Do nothing */
++}
++
++/*
++ * ======== gs_free ========
++ * purpose:
++ * Frees the memory.
++ */
++void gs_free(void *ptr)
++{
++ kfree(ptr);
++ /* ack! no size info */
++ /* cumsize -= size; */
++}
++
++/*
++ * ======== gs_frees ========
++ * purpose:
++ * Frees the memory.
++ */
++void gs_frees(void *ptr, u32 size)
++{
++ kfree(ptr);
++ cumsize -= size;
++}
++
++/*
++ * ======== gs_init ========
++ * purpose:
++ * Initializes the GS module.
++ */
++void gs_init(void)
++{
++ /* Do nothing */
++}
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/gen/uuidutil.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/gen/uuidutil.c 2010-08-18 11:24:23.174059555 +0300
+@@ -0,0 +1,113 @@
++/*
++ * uuidutil.c
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * This file contains the implementation of UUID helper functions.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++#include <linux/types.h>
++
++/* ----------------------------------- Host OS */
++#include <dspbridge/host_os.h>
++
++/* ----------------------------------- DSP/BIOS Bridge */
++#include <dspbridge/dbdefs.h>
++
++/* ----------------------------------- Trace & Debug */
++#include <dspbridge/dbc.h>
++
++/* ----------------------------------- This */
++#include <dspbridge/uuidutil.h>
++
++/*
++ * ======== uuid_uuid_to_string ========
++ * Purpose:
++ * Converts a struct dsp_uuid to a string.
++ * Note: snprintf format specifier is:
++ * %[flags] [width] [.precision] [{h | l | I64 | L}]type
++ */
++void uuid_uuid_to_string(struct dsp_uuid *uuid_obj, char *sz_uuid,
++ s32 size)
++{
++ s32 i; /* return result from snprintf. */
++
++ DBC_REQUIRE(uuid_obj && sz_uuid);
++
++ i = snprintf(sz_uuid, size,
++ "%.8X_%.4X_%.4X_%.2X%.2X_%.2X%.2X%.2X%.2X%.2X%.2X",
++ uuid_obj->ul_data1, uuid_obj->us_data2, uuid_obj->us_data3,
++ uuid_obj->uc_data4, uuid_obj->uc_data5,
++ uuid_obj->uc_data6[0], uuid_obj->uc_data6[1],
++ uuid_obj->uc_data6[2], uuid_obj->uc_data6[3],
++ uuid_obj->uc_data6[4], uuid_obj->uc_data6[5]);
++
++ DBC_ENSURE(i != -1);
++}
++
++static s32 uuid_hex_to_bin(char *buf, s32 len)
++{
++ s32 i;
++ s32 result = 0;
++ int value;
++
++ for (i = 0; i < len; i++) {
++ value = hex_to_bin(*buf++);
++ result *= 16;
++ if (value > 0)
++ result += value;
++ }
++
++ return result;
++}
++
++/*
++ * ======== uuid_uuid_from_string ========
++ * Purpose:
++ * Converts a string to a struct dsp_uuid.
++ */
++void uuid_uuid_from_string(char *sz_uuid, struct dsp_uuid *uuid_obj)
++{
++ s32 j;
++
++ uuid_obj->ul_data1 = uuid_hex_to_bin(sz_uuid, 8);
++ sz_uuid += 8;
++
++ /* Step over underscore */
++ sz_uuid++;
++
++ uuid_obj->us_data2 = (u16) uuid_hex_to_bin(sz_uuid, 4);
++ sz_uuid += 4;
++
++ /* Step over underscore */
++ sz_uuid++;
++
++ uuid_obj->us_data3 = (u16) uuid_hex_to_bin(sz_uuid, 4);
++ sz_uuid += 4;
++
++ /* Step over underscore */
++ sz_uuid++;
++
++ uuid_obj->uc_data4 = (u8) uuid_hex_to_bin(sz_uuid, 2);
++ sz_uuid += 2;
++
++ uuid_obj->uc_data5 = (u8) uuid_hex_to_bin(sz_uuid, 2);
++ sz_uuid += 2;
++
++ /* Step over underscore */
++ sz_uuid++;
++
++ for (j = 0; j < 6; j++) {
++ uuid_obj->uc_data6[j] = (u8) uuid_hex_to_bin(sz_uuid, 2);
++ sz_uuid += 2;
++ }
++}
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/hw/EasiGlobal.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/hw/EasiGlobal.h 2010-08-18 11:24:23.174059555 +0300
+@@ -0,0 +1,41 @@
++/*
++ * EasiGlobal.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Copyright (C) 2007 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef _EASIGLOBAL_H
++#define _EASIGLOBAL_H
++#include <linux/types.h>
++
++/*
++ * DEFINE: READ_ONLY, WRITE_ONLY & READ_WRITE
++ *
++ * DESCRIPTION: Defines used to describe register types for EASI-checker tests.
++ */
++
++#define READ_ONLY 1
++#define WRITE_ONLY 2
++#define READ_WRITE 3
++
++/*
++ * MACRO: _DEBUG_LEVEL1_EASI
++ *
++ * DESCRIPTION: A MACRO which can be used to indicate that a particular beach
++ * register access function was called.
++ *
++ * NOTE: We currently dont use this functionality.
++ */
++#define _DEBUG_LEVEL1_EASI(easi_num) ((void)0)
++
++#endif /* _EASIGLOBAL_H */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/hw/MMUAccInt.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/hw/MMUAccInt.h 2010-08-18 11:24:23.174059555 +0300
+@@ -0,0 +1,76 @@
++/*
++ * MMUAccInt.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Copyright (C) 2007 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef _MMU_ACC_INT_H
++#define _MMU_ACC_INT_H
++
++/* Mappings of level 1 EASI function numbers to function names */
++
++#define EASIL1_MMUMMU_SYSCONFIG_READ_REGISTER32 (MMU_BASE_EASIL1 + 3)
++#define EASIL1_MMUMMU_SYSCONFIG_IDLE_MODE_WRITE32 (MMU_BASE_EASIL1 + 17)
++#define EASIL1_MMUMMU_SYSCONFIG_AUTO_IDLE_WRITE32 (MMU_BASE_EASIL1 + 39)
++#define EASIL1_MMUMMU_IRQSTATUS_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 51)
++#define EASIL1_MMUMMU_IRQENABLE_READ_REGISTER32 (MMU_BASE_EASIL1 + 102)
++#define EASIL1_MMUMMU_IRQENABLE_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 103)
++#define EASIL1_MMUMMU_WALKING_STTWL_RUNNING_READ32 (MMU_BASE_EASIL1 + 156)
++#define EASIL1_MMUMMU_CNTLTWL_ENABLE_READ32 (MMU_BASE_EASIL1 + 174)
++#define EASIL1_MMUMMU_CNTLTWL_ENABLE_WRITE32 (MMU_BASE_EASIL1 + 180)
++#define EASIL1_MMUMMU_CNTLMMU_ENABLE_WRITE32 (MMU_BASE_EASIL1 + 190)
++#define EASIL1_MMUMMU_FAULT_AD_READ_REGISTER32 (MMU_BASE_EASIL1 + 194)
++#define EASIL1_MMUMMU_TTB_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 198)
++#define EASIL1_MMUMMU_LOCK_READ_REGISTER32 (MMU_BASE_EASIL1 + 203)
++#define EASIL1_MMUMMU_LOCK_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 204)
++#define EASIL1_MMUMMU_LOCK_BASE_VALUE_READ32 (MMU_BASE_EASIL1 + 205)
++#define EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_READ32 (MMU_BASE_EASIL1 + 209)
++#define EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_WRITE32 (MMU_BASE_EASIL1 + 211)
++#define EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_SET32 (MMU_BASE_EASIL1 + 212)
++#define EASIL1_MMUMMU_LD_TLB_READ_REGISTER32 (MMU_BASE_EASIL1 + 213)
++#define EASIL1_MMUMMU_LD_TLB_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 214)
++#define EASIL1_MMUMMU_CAM_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 226)
++#define EASIL1_MMUMMU_RAM_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 268)
++#define EASIL1_MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 322)
++
++/* Register offset address definitions */
++#define MMU_MMU_SYSCONFIG_OFFSET 0x10
++#define MMU_MMU_IRQSTATUS_OFFSET 0x18
++#define MMU_MMU_IRQENABLE_OFFSET 0x1c
++#define MMU_MMU_WALKING_ST_OFFSET 0x40
++#define MMU_MMU_CNTL_OFFSET 0x44
++#define MMU_MMU_FAULT_AD_OFFSET 0x48
++#define MMU_MMU_TTB_OFFSET 0x4c
++#define MMU_MMU_LOCK_OFFSET 0x50
++#define MMU_MMU_LD_TLB_OFFSET 0x54
++#define MMU_MMU_CAM_OFFSET 0x58
++#define MMU_MMU_RAM_OFFSET 0x5c
++#define MMU_MMU_GFLUSH_OFFSET 0x60
++#define MMU_MMU_FLUSH_ENTRY_OFFSET 0x64
++/* Bitfield mask and offset declarations */
++#define MMU_MMU_SYSCONFIG_IDLE_MODE_MASK 0x18
++#define MMU_MMU_SYSCONFIG_IDLE_MODE_OFFSET 3
++#define MMU_MMU_SYSCONFIG_AUTO_IDLE_MASK 0x1
++#define MMU_MMU_SYSCONFIG_AUTO_IDLE_OFFSET 0
++#define MMU_MMU_WALKING_ST_TWL_RUNNING_MASK 0x1
++#define MMU_MMU_WALKING_ST_TWL_RUNNING_OFFSET 0
++#define MMU_MMU_CNTL_TWL_ENABLE_MASK 0x4
++#define MMU_MMU_CNTL_TWL_ENABLE_OFFSET 2
++#define MMU_MMU_CNTL_MMU_ENABLE_MASK 0x2
++#define MMU_MMU_CNTL_MMU_ENABLE_OFFSET 1
++#define MMU_MMU_LOCK_BASE_VALUE_MASK 0xfc00
++#define MMU_MMU_LOCK_BASE_VALUE_OFFSET 10
++#define MMU_MMU_LOCK_CURRENT_VICTIM_MASK 0x3f0
++#define MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET 4
++
++#endif /* _MMU_ACC_INT_H */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/hw/MMURegAcM.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/hw/MMURegAcM.h 2010-08-18 11:24:23.174059555 +0300
+@@ -0,0 +1,225 @@
++/*
++ * MMURegAcM.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Copyright (C) 2007 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef _MMU_REG_ACM_H
++#define _MMU_REG_ACM_H
++
++#include <linux/io.h>
++#include <EasiGlobal.h>
++
++#include "MMUAccInt.h"
++
++#if defined(USE_LEVEL_1_MACROS)
++
++#define MMUMMU_SYSCONFIG_READ_REGISTER32(base_address)\
++ (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_SYSCONFIG_READ_REGISTER32),\
++ __raw_readl((base_address)+MMU_MMU_SYSCONFIG_OFFSET))
++
++#define MMUMMU_SYSCONFIG_IDLE_MODE_WRITE32(base_address, value)\
++{\
++ const u32 offset = MMU_MMU_SYSCONFIG_OFFSET;\
++ register u32 data = __raw_readl((base_address)+offset);\
++ register u32 new_value = (value);\
++ _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_SYSCONFIG_IDLE_MODE_WRITE32);\
++ data &= ~(MMU_MMU_SYSCONFIG_IDLE_MODE_MASK);\
++ new_value <<= MMU_MMU_SYSCONFIG_IDLE_MODE_OFFSET;\
++ new_value &= MMU_MMU_SYSCONFIG_IDLE_MODE_MASK;\
++ new_value |= data;\
++ __raw_writel(new_value, base_address+offset);\
++}
++
++#define MMUMMU_SYSCONFIG_AUTO_IDLE_WRITE32(base_address, value)\
++{\
++ const u32 offset = MMU_MMU_SYSCONFIG_OFFSET;\
++ register u32 data = __raw_readl((base_address)+offset);\
++ register u32 new_value = (value);\
++ _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_SYSCONFIG_AUTO_IDLE_WRITE32);\
++ data &= ~(MMU_MMU_SYSCONFIG_AUTO_IDLE_MASK);\
++ new_value <<= MMU_MMU_SYSCONFIG_AUTO_IDLE_OFFSET;\
++ new_value &= MMU_MMU_SYSCONFIG_AUTO_IDLE_MASK;\
++ new_value |= data;\
++ __raw_writel(new_value, base_address+offset);\
++}
++
++#define MMUMMU_IRQSTATUS_READ_REGISTER32(base_address)\
++ (_DEBUG_LEVEL1_EASI(easil1_mmummu_irqstatus_read_register32),\
++ __raw_readl((base_address)+MMU_MMU_IRQSTATUS_OFFSET))
++
++#define MMUMMU_IRQSTATUS_WRITE_REGISTER32(base_address, value)\
++{\
++ const u32 offset = MMU_MMU_IRQSTATUS_OFFSET;\
++ register u32 new_value = (value);\
++ _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_IRQSTATUS_WRITE_REGISTER32);\
++ __raw_writel(new_value, (base_address)+offset);\
++}
++
++#define MMUMMU_IRQENABLE_READ_REGISTER32(base_address)\
++ (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_IRQENABLE_READ_REGISTER32),\
++ __raw_readl((base_address)+MMU_MMU_IRQENABLE_OFFSET))
++
++#define MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, value)\
++{\
++ const u32 offset = MMU_MMU_IRQENABLE_OFFSET;\
++ register u32 new_value = (value);\
++ _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_IRQENABLE_WRITE_REGISTER32);\
++ __raw_writel(new_value, (base_address)+offset);\
++}
++
++#define MMUMMU_WALKING_STTWL_RUNNING_READ32(base_address)\
++ (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_WALKING_STTWL_RUNNING_READ32),\
++ (((__raw_readl(((base_address)+(MMU_MMU_WALKING_ST_OFFSET))))\
++ & MMU_MMU_WALKING_ST_TWL_RUNNING_MASK) >>\
++ MMU_MMU_WALKING_ST_TWL_RUNNING_OFFSET))
++
++#define MMUMMU_CNTLTWL_ENABLE_READ32(base_address)\
++ (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CNTLTWL_ENABLE_READ32),\
++ (((__raw_readl(((base_address)+(MMU_MMU_CNTL_OFFSET)))) &\
++ MMU_MMU_CNTL_TWL_ENABLE_MASK) >>\
++ MMU_MMU_CNTL_TWL_ENABLE_OFFSET))
++
++#define MMUMMU_CNTLTWL_ENABLE_WRITE32(base_address, value)\
++{\
++ const u32 offset = MMU_MMU_CNTL_OFFSET;\
++ register u32 data = __raw_readl((base_address)+offset);\
++ register u32 new_value = (value);\
++ _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CNTLTWL_ENABLE_WRITE32);\
++ data &= ~(MMU_MMU_CNTL_TWL_ENABLE_MASK);\
++ new_value <<= MMU_MMU_CNTL_TWL_ENABLE_OFFSET;\
++ new_value &= MMU_MMU_CNTL_TWL_ENABLE_MASK;\
++ new_value |= data;\
++ __raw_writel(new_value, base_address+offset);\
++}
++
++#define MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, value)\
++{\
++ const u32 offset = MMU_MMU_CNTL_OFFSET;\
++ register u32 data = __raw_readl((base_address)+offset);\
++ register u32 new_value = (value);\
++ _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CNTLMMU_ENABLE_WRITE32);\
++ data &= ~(MMU_MMU_CNTL_MMU_ENABLE_MASK);\
++ new_value <<= MMU_MMU_CNTL_MMU_ENABLE_OFFSET;\
++ new_value &= MMU_MMU_CNTL_MMU_ENABLE_MASK;\
++ new_value |= data;\
++ __raw_writel(new_value, base_address+offset);\
++}
++
++#define MMUMMU_FAULT_AD_READ_REGISTER32(base_address)\
++ (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_FAULT_AD_READ_REGISTER32),\
++ __raw_readl((base_address)+MMU_MMU_FAULT_AD_OFFSET))
++
++#define MMUMMU_TTB_WRITE_REGISTER32(base_address, value)\
++{\
++ const u32 offset = MMU_MMU_TTB_OFFSET;\
++ register u32 new_value = (value);\
++ _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_TTB_WRITE_REGISTER32);\
++ __raw_writel(new_value, (base_address)+offset);\
++}
++
++#define MMUMMU_LOCK_READ_REGISTER32(base_address)\
++ (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_READ_REGISTER32),\
++ __raw_readl((base_address)+MMU_MMU_LOCK_OFFSET))
++
++#define MMUMMU_LOCK_WRITE_REGISTER32(base_address, value)\
++{\
++ const u32 offset = MMU_MMU_LOCK_OFFSET;\
++ register u32 new_value = (value);\
++ _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_WRITE_REGISTER32);\
++ __raw_writel(new_value, (base_address)+offset);\
++}
++
++#define MMUMMU_LOCK_BASE_VALUE_READ32(base_address)\
++ (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_BASE_VALUE_READ32),\
++ (((__raw_readl(((base_address)+(MMU_MMU_LOCK_OFFSET)))) &\
++ MMU_MMU_LOCK_BASE_VALUE_MASK) >>\
++ MMU_MMU_LOCK_BASE_VALUE_OFFSET))
++
++#define MMUMMU_LOCK_BASE_VALUE_WRITE32(base_address, value)\
++{\
++ const u32 offset = MMU_MMU_LOCK_OFFSET;\
++ register u32 data = __raw_readl((base_address)+offset);\
++ register u32 new_value = (value);\
++ _DEBUG_LEVEL1_EASI(easil1_mmummu_lock_base_value_write32);\
++ data &= ~(MMU_MMU_LOCK_BASE_VALUE_MASK);\
++ new_value <<= MMU_MMU_LOCK_BASE_VALUE_OFFSET;\
++ new_value &= MMU_MMU_LOCK_BASE_VALUE_MASK;\
++ new_value |= data;\
++ __raw_writel(new_value, base_address+offset);\
++}
++
++#define MMUMMU_LOCK_CURRENT_VICTIM_READ32(base_address)\
++ (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_READ32),\
++ (((__raw_readl(((base_address)+(MMU_MMU_LOCK_OFFSET)))) &\
++ MMU_MMU_LOCK_CURRENT_VICTIM_MASK) >>\
++ MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET))
++
++#define MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, value)\
++{\
++ const u32 offset = MMU_MMU_LOCK_OFFSET;\
++ register u32 data = __raw_readl((base_address)+offset);\
++ register u32 new_value = (value);\
++ _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_WRITE32);\
++ data &= ~(MMU_MMU_LOCK_CURRENT_VICTIM_MASK);\
++ new_value <<= MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET;\
++ new_value &= MMU_MMU_LOCK_CURRENT_VICTIM_MASK;\
++ new_value |= data;\
++ __raw_writel(new_value, base_address+offset);\
++}
++
++#define MMUMMU_LOCK_CURRENT_VICTIM_SET32(var, value)\
++ (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_SET32),\
++ (((var) & ~(MMU_MMU_LOCK_CURRENT_VICTIM_MASK)) |\
++ (((value) << MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET) &\
++ MMU_MMU_LOCK_CURRENT_VICTIM_MASK)))
++
++#define MMUMMU_LD_TLB_READ_REGISTER32(base_address)\
++ (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LD_TLB_READ_REGISTER32),\
++ __raw_readl((base_address)+MMU_MMU_LD_TLB_OFFSET))
++
++#define MMUMMU_LD_TLB_WRITE_REGISTER32(base_address, value)\
++{\
++ const u32 offset = MMU_MMU_LD_TLB_OFFSET;\
++ register u32 new_value = (value);\
++ _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LD_TLB_WRITE_REGISTER32);\
++ __raw_writel(new_value, (base_address)+offset);\
++}
++
++#define MMUMMU_CAM_WRITE_REGISTER32(base_address, value)\
++{\
++ const u32 offset = MMU_MMU_CAM_OFFSET;\
++ register u32 new_value = (value);\
++ _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CAM_WRITE_REGISTER32);\
++ __raw_writel(new_value, (base_address)+offset);\
++}
++
++#define MMUMMU_RAM_WRITE_REGISTER32(base_address, value)\
++{\
++ const u32 offset = MMU_MMU_RAM_OFFSET;\
++ register u32 new_value = (value);\
++ _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_RAM_WRITE_REGISTER32);\
++ __raw_writel(new_value, (base_address)+offset);\
++}
++
++#define MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32(base_address, value)\
++{\
++ const u32 offset = MMU_MMU_FLUSH_ENTRY_OFFSET;\
++ register u32 new_value = (value);\
++ _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32);\
++ __raw_writel(new_value, (base_address)+offset);\
++}
++
++#endif /* USE_LEVEL_1_MACROS */
++
++#endif /* _MMU_REG_ACM_H */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/hw/hw_defs.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/hw/hw_defs.h 2010-08-18 11:24:23.174059555 +0300
+@@ -0,0 +1,58 @@
++/*
++ * hw_defs.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Global HW definitions
++ *
++ * Copyright (C) 2007 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef _HW_DEFS_H
++#define _HW_DEFS_H
++
++/* Page size */
++#define HW_PAGE_SIZE4KB 0x1000
++#define HW_PAGE_SIZE64KB 0x10000
++#define HW_PAGE_SIZE1MB 0x100000
++#define HW_PAGE_SIZE16MB 0x1000000
++
++/* hw_status: return type for HW API */
++typedef long hw_status;
++
++/* Macro used to set and clear any bit */
++#define HW_CLEAR 0
++#define HW_SET 1
++
++/* hw_endianism_t: Enumerated Type used to specify the endianism
++ * Do NOT change these values. They are used as bit fields. */
++enum hw_endianism_t {
++ HW_LITTLE_ENDIAN,
++ HW_BIG_ENDIAN
++};
++
++/* hw_element_size_t: Enumerated Type used to specify the element size
++ * Do NOT change these values. They are used as bit fields. */
++enum hw_element_size_t {
++ HW_ELEM_SIZE8BIT,
++ HW_ELEM_SIZE16BIT,
++ HW_ELEM_SIZE32BIT,
++ HW_ELEM_SIZE64BIT
++};
++
++/* hw_idle_mode_t: Enumerated Type used to specify Idle modes */
++enum hw_idle_mode_t {
++ HW_FORCE_IDLE,
++ HW_NO_IDLE,
++ HW_SMART_IDLE
++};
++
++#endif /* _HW_DEFS_H */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/hw/hw_mmu.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/hw/hw_mmu.c 2010-08-18 11:24:23.174059555 +0300
+@@ -0,0 +1,562 @@
++/*
++ * hw_mmu.c
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * API definitions to setup MMU TLB and PTE
++ *
++ * Copyright (C) 2007 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#include <linux/io.h>
++#include "MMURegAcM.h"
++#include <hw_defs.h>
++#include <hw_mmu.h>
++#include <linux/types.h>
++#include <linux/err.h>
++
++#define MMU_BASE_VAL_MASK 0xFC00
++#define MMU_PAGE_MAX 3
++#define MMU_ELEMENTSIZE_MAX 3
++#define MMU_ADDR_MASK 0xFFFFF000
++#define MMU_TTB_MASK 0xFFFFC000
++#define MMU_SECTION_ADDR_MASK 0xFFF00000
++#define MMU_SSECTION_ADDR_MASK 0xFF000000
++#define MMU_PAGE_TABLE_MASK 0xFFFFFC00
++#define MMU_LARGE_PAGE_MASK 0xFFFF0000
++#define MMU_SMALL_PAGE_MASK 0xFFFFF000
++
++#define MMU_LOAD_TLB 0x00000001
++#define MMU_GFLUSH 0x60
++
++/*
++ * hw_mmu_page_size_t: Enumerated Type used to specify the MMU Page Size(SLSS)
++ */
++enum hw_mmu_page_size_t {
++ HW_MMU_SECTION,
++ HW_MMU_LARGE_PAGE,
++ HW_MMU_SMALL_PAGE,
++ HW_MMU_SUPERSECTION
++};
++
++/*
++ * FUNCTION : mmu_flush_entry
++ *
++ * INPUTS:
++ *
++ * Identifier : base_address
++ * Type : const u32
++ * Description : Base Address of instance of MMU module
++ *
++ * RETURNS:
++ *
++ * Type : hw_status
++ * Description : 0 -- No errors occured
++ * RET_BAD_NULL_PARAM -- A Pointer
++ * Paramater was set to NULL
++ *
++ * PURPOSE: : Flush the TLB entry pointed by the
++ * lock counter register
++ * even if this entry is set protected
++ *
++ * METHOD: : Check the Input parameter and Flush a
++ * single entry in the TLB.
++ */
++static hw_status mmu_flush_entry(const void __iomem *base_address);
++
++/*
++ * FUNCTION : mmu_set_cam_entry
++ *
++ * INPUTS:
++ *
++ * Identifier : base_address
++ * TypE : const u32
++ * Description : Base Address of instance of MMU module
++ *
++ * Identifier : page_sz
++ * TypE : const u32
++ * Description : It indicates the page size
++ *
++ * Identifier : preserved_bit
++ * Type : const u32
++ * Description : It indicates the TLB entry is preserved entry
++ * or not
++ *
++ * Identifier : valid_bit
++ * Type : const u32
++ * Description : It indicates the TLB entry is valid entry or not
++ *
++ *
++ * Identifier : virtual_addr_tag
++ * Type : const u32
++ * Description : virtual Address
++ *
++ * RETURNS:
++ *
++ * Type : hw_status
++ * Description : 0 -- No errors occured
++ * RET_BAD_NULL_PARAM -- A Pointer Paramater
++ * was set to NULL
++ * RET_PARAM_OUT_OF_RANGE -- Input Parameter out
++ * of Range
++ *
++ * PURPOSE: : Set MMU_CAM reg
++ *
++ * METHOD: : Check the Input parameters and set the CAM entry.
++ */
++static hw_status mmu_set_cam_entry(const void __iomem *base_address,
++ const u32 page_sz,
++ const u32 preserved_bit,
++ const u32 valid_bit,
++ const u32 virtual_addr_tag);
++
++/*
++ * FUNCTION : mmu_set_ram_entry
++ *
++ * INPUTS:
++ *
++ * Identifier : base_address
++ * Type : const u32
++ * Description : Base Address of instance of MMU module
++ *
++ * Identifier : physical_addr
++ * Type : const u32
++ * Description : Physical Address to which the corresponding
++ * virtual Address shouldpoint
++ *
++ * Identifier : endianism
++ * Type : hw_endianism_t
++ * Description : endianism for the given page
++ *
++ * Identifier : element_size
++ * Type : hw_element_size_t
++ * Description : The element size ( 8,16, 32 or 64 bit)
++ *
++ * Identifier : mixed_size
++ * Type : hw_mmu_mixed_size_t
++ * Description : Element Size to follow CPU or TLB
++ *
++ * RETURNS:
++ *
++ * Type : hw_status
++ * Description : 0 -- No errors occured
++ * RET_BAD_NULL_PARAM -- A Pointer Paramater
++ * was set to NULL
++ * RET_PARAM_OUT_OF_RANGE -- Input Parameter
++ * out of Range
++ *
++ * PURPOSE: : Set MMU_CAM reg
++ *
++ * METHOD: : Check the Input parameters and set the RAM entry.
++ */
++static hw_status mmu_set_ram_entry(const void __iomem *base_address,
++ const u32 physical_addr,
++ enum hw_endianism_t endianism,
++ enum hw_element_size_t element_size,
++ enum hw_mmu_mixed_size_t mixed_size);
++
++/* HW FUNCTIONS */
++
++hw_status hw_mmu_enable(const void __iomem *base_address)
++{
++ hw_status status = 0;
++
++ MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, HW_SET);
++
++ return status;
++}
++
++hw_status hw_mmu_disable(const void __iomem *base_address)
++{
++ hw_status status = 0;
++
++ MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, HW_CLEAR);
++
++ return status;
++}
++
++hw_status hw_mmu_num_locked_set(const void __iomem *base_address,
++ u32 num_locked_entries)
++{
++ hw_status status = 0;
++
++ MMUMMU_LOCK_BASE_VALUE_WRITE32(base_address, num_locked_entries);
++
++ return status;
++}
++
++hw_status hw_mmu_victim_num_set(const void __iomem *base_address,
++ u32 victim_entry_num)
++{
++ hw_status status = 0;
++
++ MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, victim_entry_num);
++
++ return status;
++}
++
++hw_status hw_mmu_event_ack(const void __iomem *base_address, u32 irq_mask)
++{
++ hw_status status = 0;
++
++ MMUMMU_IRQSTATUS_WRITE_REGISTER32(base_address, irq_mask);
++
++ return status;
++}
++
++hw_status hw_mmu_event_disable(const void __iomem *base_address, u32 irq_mask)
++{
++ hw_status status = 0;
++ u32 irq_reg;
++
++ irq_reg = MMUMMU_IRQENABLE_READ_REGISTER32(base_address);
++
++ MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, irq_reg & ~irq_mask);
++
++ return status;
++}
++
++hw_status hw_mmu_event_enable(const void __iomem *base_address, u32 irq_mask)
++{
++ hw_status status = 0;
++ u32 irq_reg;
++
++ irq_reg = MMUMMU_IRQENABLE_READ_REGISTER32(base_address);
++
++ MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, irq_reg | irq_mask);
++
++ return status;
++}
++
++hw_status hw_mmu_event_status(const void __iomem *base_address, u32 *irq_mask)
++{
++ hw_status status = 0;
++
++ *irq_mask = MMUMMU_IRQSTATUS_READ_REGISTER32(base_address);
++
++ return status;
++}
++
++hw_status hw_mmu_fault_addr_read(const void __iomem *base_address, u32 *addr)
++{
++ hw_status status = 0;
++
++ /* read values from register */
++ *addr = MMUMMU_FAULT_AD_READ_REGISTER32(base_address);
++
++ return status;
++}
++
++hw_status hw_mmu_ttb_set(const void __iomem *base_address, u32 ttb_phys_addr)
++{
++ hw_status status = 0;
++ u32 load_ttb;
++
++ load_ttb = ttb_phys_addr & ~0x7FUL;
++ /* write values to register */
++ MMUMMU_TTB_WRITE_REGISTER32(base_address, load_ttb);
++
++ return status;
++}
++
++hw_status hw_mmu_twl_enable(const void __iomem *base_address)
++{
++ hw_status status = 0;
++
++ MMUMMU_CNTLTWL_ENABLE_WRITE32(base_address, HW_SET);
++
++ return status;
++}
++
++hw_status hw_mmu_twl_disable(const void __iomem *base_address)
++{
++ hw_status status = 0;
++
++ MMUMMU_CNTLTWL_ENABLE_WRITE32(base_address, HW_CLEAR);
++
++ return status;
++}
++
++hw_status hw_mmu_tlb_flush(const void __iomem *base_address, u32 virtual_addr,
++ u32 page_sz)
++{
++ hw_status status = 0;
++ u32 virtual_addr_tag;
++ enum hw_mmu_page_size_t pg_size_bits;
++
++ switch (page_sz) {
++ case HW_PAGE_SIZE4KB:
++ pg_size_bits = HW_MMU_SMALL_PAGE;
++ break;
++
++ case HW_PAGE_SIZE64KB:
++ pg_size_bits = HW_MMU_LARGE_PAGE;
++ break;
++
++ case HW_PAGE_SIZE1MB:
++ pg_size_bits = HW_MMU_SECTION;
++ break;
++
++ case HW_PAGE_SIZE16MB:
++ pg_size_bits = HW_MMU_SUPERSECTION;
++ break;
++
++ default:
++ return -EINVAL;
++ }
++
++ /* Generate the 20-bit tag from virtual address */
++ virtual_addr_tag = ((virtual_addr & MMU_ADDR_MASK) >> 12);
++
++ mmu_set_cam_entry(base_address, pg_size_bits, 0, 0, virtual_addr_tag);
++
++ mmu_flush_entry(base_address);
++
++ return status;
++}
++
++hw_status hw_mmu_tlb_add(const void __iomem *base_address,
++ u32 physical_addr,
++ u32 virtual_addr,
++ u32 page_sz,
++ u32 entry_num,
++ struct hw_mmu_map_attrs_t *map_attrs,
++ s8 preserved_bit, s8 valid_bit)
++{
++ hw_status status = 0;
++ u32 lock_reg;
++ u32 virtual_addr_tag;
++ enum hw_mmu_page_size_t mmu_pg_size;
++
++ /*Check the input Parameters */
++ switch (page_sz) {
++ case HW_PAGE_SIZE4KB:
++ mmu_pg_size = HW_MMU_SMALL_PAGE;
++ break;
++
++ case HW_PAGE_SIZE64KB:
++ mmu_pg_size = HW_MMU_LARGE_PAGE;
++ break;
++
++ case HW_PAGE_SIZE1MB:
++ mmu_pg_size = HW_MMU_SECTION;
++ break;
++
++ case HW_PAGE_SIZE16MB:
++ mmu_pg_size = HW_MMU_SUPERSECTION;
++ break;
++
++ default:
++ return -EINVAL;
++ }
++
++ lock_reg = MMUMMU_LOCK_READ_REGISTER32(base_address);
++
++ /* Generate the 20-bit tag from virtual address */
++ virtual_addr_tag = ((virtual_addr & MMU_ADDR_MASK) >> 12);
++
++ /* Write the fields in the CAM Entry Register */
++ mmu_set_cam_entry(base_address, mmu_pg_size, preserved_bit, valid_bit,
++ virtual_addr_tag);
++
++ /* Write the different fields of the RAM Entry Register */
++ /* endianism of the page,Element Size of the page (8, 16, 32, 64 bit) */
++ mmu_set_ram_entry(base_address, physical_addr, map_attrs->endianism,
++ map_attrs->element_size, map_attrs->mixed_size);
++
++ /* Update the MMU Lock Register */
++ /* currentVictim between lockedBaseValue and (MMU_Entries_Number - 1) */
++ MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, entry_num);
++
++ /* Enable loading of an entry in TLB by writing 1
++ into LD_TLB_REG register */
++ MMUMMU_LD_TLB_WRITE_REGISTER32(base_address, MMU_LOAD_TLB);
++
++ MMUMMU_LOCK_WRITE_REGISTER32(base_address, lock_reg);
++
++ return status;
++}
++
++hw_status hw_mmu_pte_set(const u32 pg_tbl_va,
++ u32 physical_addr,
++ u32 virtual_addr,
++ u32 page_sz, struct hw_mmu_map_attrs_t *map_attrs)
++{
++ hw_status status = 0;
++ u32 pte_addr, pte_val;
++ s32 num_entries = 1;
++
++ switch (page_sz) {
++ case HW_PAGE_SIZE4KB:
++ pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
++ virtual_addr &
++ MMU_SMALL_PAGE_MASK);
++ pte_val =
++ ((physical_addr & MMU_SMALL_PAGE_MASK) |
++ (map_attrs->endianism << 9) | (map_attrs->
++ element_size << 4) |
++ (map_attrs->mixed_size << 11) | 2);
++ break;
++
++ case HW_PAGE_SIZE64KB:
++ num_entries = 16;
++ pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
++ virtual_addr &
++ MMU_LARGE_PAGE_MASK);
++ pte_val =
++ ((physical_addr & MMU_LARGE_PAGE_MASK) |
++ (map_attrs->endianism << 9) | (map_attrs->
++ element_size << 4) |
++ (map_attrs->mixed_size << 11) | 1);
++ break;
++
++ case HW_PAGE_SIZE1MB:
++ pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
++ virtual_addr &
++ MMU_SECTION_ADDR_MASK);
++ pte_val =
++ ((((physical_addr & MMU_SECTION_ADDR_MASK) |
++ (map_attrs->endianism << 15) | (map_attrs->
++ element_size << 10) |
++ (map_attrs->mixed_size << 17)) & ~0x40000) | 0x2);
++ break;
++
++ case HW_PAGE_SIZE16MB:
++ num_entries = 16;
++ pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
++ virtual_addr &
++ MMU_SSECTION_ADDR_MASK);
++ pte_val =
++ (((physical_addr & MMU_SSECTION_ADDR_MASK) |
++ (map_attrs->endianism << 15) | (map_attrs->
++ element_size << 10) |
++ (map_attrs->mixed_size << 17)
++ ) | 0x40000 | 0x2);
++ break;
++
++ case HW_MMU_COARSE_PAGE_SIZE:
++ pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
++ virtual_addr &
++ MMU_SECTION_ADDR_MASK);
++ pte_val = (physical_addr & MMU_PAGE_TABLE_MASK) | 1;
++ break;
++
++ default:
++ return -EINVAL;
++ }
++
++ while (--num_entries >= 0)
++ ((u32 *) pte_addr)[num_entries] = pte_val;
++
++ return status;
++}
++
++hw_status hw_mmu_pte_clear(const u32 pg_tbl_va, u32 virtual_addr, u32 page_size)
++{
++ hw_status status = 0;
++ u32 pte_addr;
++ s32 num_entries = 1;
++
++ switch (page_size) {
++ case HW_PAGE_SIZE4KB:
++ pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
++ virtual_addr &
++ MMU_SMALL_PAGE_MASK);
++ break;
++
++ case HW_PAGE_SIZE64KB:
++ num_entries = 16;
++ pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
++ virtual_addr &
++ MMU_LARGE_PAGE_MASK);
++ break;
++
++ case HW_PAGE_SIZE1MB:
++ case HW_MMU_COARSE_PAGE_SIZE:
++ pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
++ virtual_addr &
++ MMU_SECTION_ADDR_MASK);
++ break;
++
++ case HW_PAGE_SIZE16MB:
++ num_entries = 16;
++ pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
++ virtual_addr &
++ MMU_SSECTION_ADDR_MASK);
++ break;
++
++ default:
++ return -EINVAL;
++ }
++
++ while (--num_entries >= 0)
++ ((u32 *) pte_addr)[num_entries] = 0;
++
++ return status;
++}
++
++/* mmu_flush_entry */
++static hw_status mmu_flush_entry(const void __iomem *base_address)
++{
++ hw_status status = 0;
++ u32 flush_entry_data = 0x1;
++
++ /* write values to register */
++ MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32(base_address, flush_entry_data);
++
++ return status;
++}
++
++/* mmu_set_cam_entry */
++static hw_status mmu_set_cam_entry(const void __iomem *base_address,
++ const u32 page_sz,
++ const u32 preserved_bit,
++ const u32 valid_bit,
++ const u32 virtual_addr_tag)
++{
++ hw_status status = 0;
++ u32 mmu_cam_reg;
++
++ mmu_cam_reg = (virtual_addr_tag << 12);
++ mmu_cam_reg = (mmu_cam_reg) | (page_sz) | (valid_bit << 2) |
++ (preserved_bit << 3);
++
++ /* write values to register */
++ MMUMMU_CAM_WRITE_REGISTER32(base_address, mmu_cam_reg);
++
++ return status;
++}
++
++/* mmu_set_ram_entry */
++static hw_status mmu_set_ram_entry(const void __iomem *base_address,
++ const u32 physical_addr,
++ enum hw_endianism_t endianism,
++ enum hw_element_size_t element_size,
++ enum hw_mmu_mixed_size_t mixed_size)
++{
++ hw_status status = 0;
++ u32 mmu_ram_reg;
++
++ mmu_ram_reg = (physical_addr & MMU_ADDR_MASK);
++ mmu_ram_reg = (mmu_ram_reg) | ((endianism << 9) | (element_size << 7) |
++ (mixed_size << 6));
++
++ /* write values to register */
++ MMUMMU_RAM_WRITE_REGISTER32(base_address, mmu_ram_reg);
++
++ return status;
++
++}
++
++void hw_mmu_tlb_flush_all(const void __iomem *base)
++{
++ __raw_writeb(1, base + MMU_GFLUSH);
++}
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/hw/hw_mmu.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/hw/hw_mmu.h 2010-08-18 11:24:23.174059555 +0300
+@@ -0,0 +1,163 @@
++/*
++ * hw_mmu.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * MMU types and API declarations
++ *
++ * Copyright (C) 2007 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef _HW_MMU_H
++#define _HW_MMU_H
++
++#include <linux/types.h>
++
++/* Bitmasks for interrupt sources */
++#define HW_MMU_TRANSLATION_FAULT 0x2
++#define HW_MMU_ALL_INTERRUPTS 0x1F
++
++#define HW_MMU_COARSE_PAGE_SIZE 0x400
++
++/* hw_mmu_mixed_size_t: Enumerated Type used to specify whether to follow
++ CPU/TLB Element size */
++enum hw_mmu_mixed_size_t {
++ HW_MMU_TLBES,
++ HW_MMU_CPUES
++};
++
++/* hw_mmu_map_attrs_t: Struct containing MMU mapping attributes */
++struct hw_mmu_map_attrs_t {
++ enum hw_endianism_t endianism;
++ enum hw_element_size_t element_size;
++ enum hw_mmu_mixed_size_t mixed_size;
++ bool donotlockmpupage;
++};
++
++extern hw_status hw_mmu_enable(const void __iomem *base_address);
++
++extern hw_status hw_mmu_disable(const void __iomem *base_address);
++
++extern hw_status hw_mmu_num_locked_set(const void __iomem *base_address,
++ u32 num_locked_entries);
++
++extern hw_status hw_mmu_victim_num_set(const void __iomem *base_address,
++ u32 victim_entry_num);
++
++/* For MMU faults */
++extern hw_status hw_mmu_event_ack(const void __iomem *base_address,
++ u32 irq_mask);
++
++extern hw_status hw_mmu_event_disable(const void __iomem *base_address,
++ u32 irq_mask);
++
++extern hw_status hw_mmu_event_enable(const void __iomem *base_address,
++ u32 irq_mask);
++
++extern hw_status hw_mmu_event_status(const void __iomem *base_address,
++ u32 *irq_mask);
++
++extern hw_status hw_mmu_fault_addr_read(const void __iomem *base_address,
++ u32 *addr);
++
++/* Set the TT base address */
++extern hw_status hw_mmu_ttb_set(const void __iomem *base_address,
++ u32 ttb_phys_addr);
++
++extern hw_status hw_mmu_twl_enable(const void __iomem *base_address);
++
++extern hw_status hw_mmu_twl_disable(const void __iomem *base_address);
++
++extern hw_status hw_mmu_tlb_flush(const void __iomem *base_address,
++ u32 virtual_addr, u32 page_sz);
++
++extern hw_status hw_mmu_tlb_add(const void __iomem *base_address,
++ u32 physical_addr,
++ u32 virtual_addr,
++ u32 page_sz,
++ u32 entry_num,
++ struct hw_mmu_map_attrs_t *map_attrs,
++ s8 preserved_bit, s8 valid_bit);
++
++/* For PTEs */
++extern hw_status hw_mmu_pte_set(const u32 pg_tbl_va,
++ u32 physical_addr,
++ u32 virtual_addr,
++ u32 page_sz,
++ struct hw_mmu_map_attrs_t *map_attrs);
++
++extern hw_status hw_mmu_pte_clear(const u32 pg_tbl_va,
++ u32 virtual_addr, u32 page_size);
++
++void hw_mmu_tlb_flush_all(const void __iomem *base);
++
++static inline u32 hw_mmu_pte_addr_l1(u32 l1_base, u32 va)
++{
++ u32 pte_addr;
++ u32 va31_to20;
++
++ va31_to20 = va >> (20 - 2); /* Left-shift by 2 here itself */
++ va31_to20 &= 0xFFFFFFFCUL;
++ pte_addr = l1_base + va31_to20;
++
++ return pte_addr;
++}
++
++static inline u32 hw_mmu_pte_addr_l2(u32 l2_base, u32 va)
++{
++ u32 pte_addr;
++
++ pte_addr = (l2_base & 0xFFFFFC00) | ((va >> 10) & 0x3FC);
++
++ return pte_addr;
++}
++
++static inline u32 hw_mmu_pte_coarse_l1(u32 pte_val)
++{
++ u32 pte_coarse;
++
++ pte_coarse = pte_val & 0xFFFFFC00;
++
++ return pte_coarse;
++}
++
++static inline u32 hw_mmu_pte_size_l1(u32 pte_val)
++{
++ u32 pte_size = 0;
++
++ if ((pte_val & 0x3) == 0x1) {
++ /* Points to L2 PT */
++ pte_size = HW_MMU_COARSE_PAGE_SIZE;
++ }
++
++ if ((pte_val & 0x3) == 0x2) {
++ if (pte_val & (1 << 18))
++ pte_size = HW_PAGE_SIZE16MB;
++ else
++ pte_size = HW_PAGE_SIZE1MB;
++ }
++
++ return pte_size;
++}
++
++static inline u32 hw_mmu_pte_size_l2(u32 pte_val)
++{
++ u32 pte_size = 0;
++
++ if (pte_val & 0x2)
++ pte_size = HW_PAGE_SIZE4KB;
++ else if (pte_val & 0x1)
++ pte_size = HW_PAGE_SIZE64KB;
++
++ return pte_size;
++}
++
++#endif /* _HW_MMU_H */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/_chnl_sm.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/_chnl_sm.h 2010-08-18 11:24:23.174059555 +0300
+@@ -0,0 +1,181 @@
++/*
++ * _chnl_sm.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Private header file defining channel manager and channel objects for
++ * a shared memory channel driver.
++ *
++ * Shared between the modules implementing the shared memory channel class
++ * library.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef _CHNL_SM_
++#define _CHNL_SM_
++
++#include <dspbridge/dspapi.h>
++#include <dspbridge/dspdefs.h>
++
++#include <dspbridge/list.h>
++#include <dspbridge/ntfy.h>
++
++/*
++ * These target side symbols define the beginning and ending addresses
++ * of shared memory buffer. They are defined in the *cfg.cmd file by
++ * cdb code.
++ */
++#define CHNL_SHARED_BUFFER_BASE_SYM "_SHM_BEG"
++#define CHNL_SHARED_BUFFER_LIMIT_SYM "_SHM_END"
++#define BRIDGEINIT_BIOSGPTIMER "_BRIDGEINIT_BIOSGPTIMER"
++#define BRIDGEINIT_LOADMON_GPTIMER "_BRIDGEINIT_LOADMON_GPTIMER"
++
++#ifndef _CHNL_WORDSIZE
++#define _CHNL_WORDSIZE 4 /* default _CHNL_WORDSIZE is 2 bytes/word */
++#endif
++
++#define MAXOPPS 16
++
++/* Shared memory config options */
++#define SHM_CURROPP 0 /* Set current OPP in shm */
++#define SHM_OPPINFO 1 /* Set dsp voltage and freq table values */
++#define SHM_GETOPP 2 /* Get opp requested by DSP */
++
++struct opp_table_entry {
++ u32 voltage;
++ u32 frequency;
++ u32 min_freq;
++ u32 max_freq;
++};
++
++struct opp_struct {
++ u32 curr_opp_pt;
++ u32 num_opp_pts;
++ struct opp_table_entry opp_point[MAXOPPS];
++};
++
++/* Request to MPU */
++struct opp_rqst_struct {
++ u32 rqst_dsp_freq;
++ u32 rqst_opp_pt;
++};
++
++/* Info to MPU */
++struct load_mon_struct {
++ u32 curr_dsp_load;
++ u32 curr_dsp_freq;
++ u32 pred_dsp_load;
++ u32 pred_dsp_freq;
++};
++
++/* Structure in shared between DSP and PC for communication. */
++struct shm {
++ u32 dsp_free_mask; /* Written by DSP, read by PC. */
++ u32 host_free_mask; /* Written by PC, read by DSP */
++
++ u32 input_full; /* Input channel has unread data. */
++ u32 input_id; /* Channel for which input is available. */
++ u32 input_size; /* Size of data block (in DSP words). */
++
++ u32 output_full; /* Output channel has unread data. */
++ u32 output_id; /* Channel for which output is available. */
++ u32 output_size; /* Size of data block (in DSP words). */
++
++ u32 arg; /* Arg for Issue/Reclaim (23 bits for 55x). */
++ u32 resvd; /* Keep structure size even for 32-bit DSPs */
++
++ /* Operating Point structure */
++ struct opp_struct opp_table_struct;
++ /* Operating Point Request structure */
++ struct opp_rqst_struct opp_request;
++ /* load monitor information structure */
++ struct load_mon_struct load_mon_info;
++#ifdef CONFIG_TIDSPBRIDGE_WDT3
++ /* Flag for WDT enable/disable F/I clocks */
++ u32 wdt_setclocks;
++ u32 wdt_overflow; /* WDT overflow time */
++ char dummy[176]; /* padding to 256 byte boundary */
++#else
++ char dummy[184]; /* padding to 256 byte boundary */
++#endif
++ u32 shm_dbg_var[64]; /* shared memory debug variables */
++};
++
++ /* Channel Manager: only one created per board: */
++struct chnl_mgr {
++ /* Function interface to Bridge driver */
++ struct bridge_drv_interface *intf_fxns;
++ struct io_mgr *hio_mgr; /* IO manager */
++ /* Device this board represents */
++ struct dev_object *hdev_obj;
++
++ /* These fields initialized in bridge_chnl_create(): */
++ u32 dw_output_mask; /* Host output channels w/ full buffers */
++ u32 dw_last_output; /* Last output channel fired from DPC */
++ /* Critical section object handle */
++ spinlock_t chnl_mgr_lock;
++ u32 word_size; /* Size in bytes of DSP word */
++ u8 max_channels; /* Total number of channels */
++ u8 open_channels; /* Total number of open channels */
++ struct chnl_object **ap_channel; /* Array of channels */
++ u8 dw_type; /* Type of channel class library */
++ /* If no shm syms, return for CHNL_Open */
++ int chnl_open_status;
++};
++
++/*
++ * Channel: up to CHNL_MAXCHANNELS per board or if DSP-DMA supported then
++ * up to CHNL_MAXCHANNELS + CHNL_MAXDDMACHNLS per board.
++ */
++struct chnl_object {
++ /* Pointer back to channel manager */
++ struct chnl_mgr *chnl_mgr_obj;
++ u32 chnl_id; /* Channel id */
++ u8 dw_state; /* Current channel state */
++ s8 chnl_mode; /* Chnl mode and attributes */
++ /* Chnl I/O completion event (user mode) */
++ void *user_event;
++ /* Abstract syncronization object */
++ struct sync_object *sync_event;
++ u32 process; /* Process which created this channel */
++ u32 pcb_arg; /* Argument to use with callback */
++ struct lst_list *pio_requests; /* List of IOR's to driver */
++ s32 cio_cs; /* Number of IOC's in queue */
++ s32 cio_reqs; /* Number of IORequests in queue */
++ s32 chnl_packets; /* Initial number of free Irps */
++ /* List of IOC's from driver */
++ struct lst_list *pio_completions;
++ struct lst_list *free_packets_list; /* List of free Irps */
++ struct ntfy_object *ntfy_obj;
++ u32 bytes_moved; /* Total number of bytes transfered */
++
++ /* For DSP-DMA */
++
++ /* Type of chnl transport:CHNL_[PCPY][DDMA] */
++ u32 chnl_type;
++};
++
++/* I/O Request/completion packet: */
++struct chnl_irp {
++ struct list_head link; /* Link to next CHIRP in queue. */
++ /* Buffer to be filled/emptied. (User) */
++ u8 *host_user_buf;
++ /* Buffer to be filled/emptied. (System) */
++ u8 *host_sys_buf;
++ u32 dw_arg; /* Issue/Reclaim argument. */
++ u32 dsp_tx_addr; /* Transfer address on DSP side. */
++ u32 byte_size; /* Bytes transferred. */
++ u32 buf_size; /* Actual buffer size when allocated. */
++ u32 status; /* Status of IO completion. */
++};
++
++#endif /* _CHNL_SM_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/brddefs.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/brddefs.h 2010-08-18 11:24:23.174059555 +0300
+@@ -0,0 +1,39 @@
++/*
++ * brddefs.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Global BRD constants and types, shared between DSP API and Bridge driver.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef BRDDEFS_
++#define BRDDEFS_
++
++/* platform status values */
++#define BRD_STOPPED 0x0 /* No Monitor Loaded, Not running. */
++#define BRD_IDLE 0x1 /* Monitor Loaded, but suspended. */
++#define BRD_RUNNING 0x2 /* Monitor loaded, and executing. */
++#define BRD_UNKNOWN 0x3 /* Board state is indeterminate. */
++#define BRD_SYNCINIT 0x4
++#define BRD_LOADED 0x5
++#define BRD_LASTSTATE BRD_LOADED /* Set to highest legal board state. */
++#define BRD_SLEEP_TRANSITION 0x6 /* Sleep transition in progress */
++#define BRD_HIBERNATION 0x7 /* MPU initiated hibernation */
++#define BRD_RETENTION 0x8 /* Retention mode */
++#define BRD_DSP_HIBERNATION 0x9 /* DSP initiated hibernation */
++#define BRD_ERROR 0xA /* Board state is Error */
++
++/* BRD Object */
++struct brd_object;
++
++#endif /* BRDDEFS_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/cfg.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/cfg.h 2010-08-18 11:24:23.174059555 +0300
+@@ -0,0 +1,222 @@
++/*
++ * cfg.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * PM Configuration module.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef CFG_
++#define CFG_
++#include <dspbridge/host_os.h>
++#include <dspbridge/cfgdefs.h>
++
++/*
++ * ======== cfg_exit ========
++ * Purpose:
++ * Discontinue usage of the CFG module.
++ * Parameters:
++ * Returns:
++ * Requires:
++ * cfg_init(void) was previously called.
++ * Ensures:
++ * Resources acquired in cfg_init(void) are freed.
++ */
++extern void cfg_exit(void);
++
++/*
++ * ======== cfg_get_auto_start ========
++ * Purpose:
++ * Retreive the autostart mask, if any, for this board.
++ * Parameters:
++ * dev_node_obj: Handle to the dev_node who's driver we are querying.
++ * auto_start: Ptr to location for 32 bit autostart mask.
++ * Returns:
++ * 0: Success.
++ * -EFAULT: dev_node_obj is invalid.
++ * -ENODATA: Unable to retreive resource.
++ * Requires:
++ * CFG initialized.
++ * Ensures:
++ * 0: *auto_start contains autostart mask for this devnode.
++ */
++extern int cfg_get_auto_start(struct cfg_devnode *dev_node_obj,
++ u32 *auto_start);
++
++/*
++ * ======== cfg_get_cd_version ========
++ * Purpose:
++ * Retrieves the version of the PM Class Driver.
++ * Parameters:
++ * version: Ptr to u32 to contain version number upon return.
++ * Returns:
++ * 0: Success. version contains Class Driver version in
++ * the form: 0xAABBCCDD where AABB is Major version and
++ * CCDD is Minor.
++ * -EPERM: Failure.
++ * Requires:
++ * CFG initialized.
++ * Ensures:
++ * 0: Success.
++ * else: *version is NULL.
++ */
++extern int cfg_get_cd_version(u32 *version);
++
++/*
++ * ======== cfg_get_dev_object ========
++ * Purpose:
++ * Retrieve the Device Object handle for a given devnode.
++ * Parameters:
++ * dev_node_obj: Platform's dev_node handle from which to retrieve
++ * value.
++ * value: Ptr to location to store the value.
++ * Returns:
++ * 0: Success.
++ * -EFAULT: dev_node_obj is invalid or device_obj is invalid.
++ * -ENODATA: The resource is not available.
++ * Requires:
++ * CFG initialized.
++ * Ensures:
++ * 0: *value is set to the retrieved u32.
++ * else: *value is set to 0L.
++ */
++extern int cfg_get_dev_object(struct cfg_devnode *dev_node_obj,
++ u32 *value);
++
++/*
++ * ======== cfg_get_exec_file ========
++ * Purpose:
++ * Retreive the default executable, if any, for this board.
++ * Parameters:
++ * dev_node_obj: Handle to the dev_node who's driver we are querying.
++ * buf_size: Size of buffer.
++ * str_exec_file: Ptr to character buf to hold ExecFile.
++ * Returns:
++ * 0: Success.
++ * -EFAULT: dev_node_obj is invalid or str_exec_file is invalid.
++ * -ENODATA: The resource is not available.
++ * Requires:
++ * CFG initialized.
++ * Ensures:
++ * 0: Not more than buf_size bytes were copied into str_exec_file,
++ * and *str_exec_file contains default executable for this
++ * devnode.
++ */
++extern int cfg_get_exec_file(struct cfg_devnode *dev_node_obj,
++ u32 buf_size, char *str_exec_file);
++
++/*
++ * ======== cfg_get_object ========
++ * Purpose:
++ * Retrieve the Driver Object handle From the Registry
++ * Parameters:
++ * value: Ptr to location to store the value.
++ * dw_type Type of Object to Get
++ * Returns:
++ * 0: Success.
++ * Requires:
++ * CFG initialized.
++ * Ensures:
++ * 0: *value is set to the retrieved u32(non-Zero).
++ * else: *value is set to 0L.
++ */
++extern int cfg_get_object(u32 *value, u8 dw_type);
++
++/*
++ * ======== cfg_get_perf_value ========
++ * Purpose:
++ * Retrieve a flag indicating whether PERF should log statistics for the
++ * PM class driver.
++ * Parameters:
++ * enable_perf: Location to store flag. 0 indicates the key was
++ * not found, or had a zero value. A nonzero value
++ * means the key was found and had a nonzero value.
++ * Returns:
++ * Requires:
++ * enable_perf != NULL;
++ * Ensures:
++ */
++extern void cfg_get_perf_value(bool *enable_perf);
++
++/*
++ * ======== cfg_get_zl_file ========
++ * Purpose:
++ * Retreive the ZLFile, if any, for this board.
++ * Parameters:
++ * dev_node_obj: Handle to the dev_node who's driver we are querying.
++ * buf_size: Size of buffer.
++ * str_zl_file_name: Ptr to character buf to hold ZLFileName.
++ * Returns:
++ * 0: Success.
++ * -EFAULT: str_zl_file_name is invalid or dev_node_obj is invalid.
++ * -ENODATA: couldn't find the ZLFileName.
++ * Requires:
++ * CFG initialized.
++ * Ensures:
++ * 0: Not more than buf_size bytes were copied into
++ * str_zl_file_name, and *str_zl_file_name contains ZLFileName
++ * for this devnode.
++ */
++extern int cfg_get_zl_file(struct cfg_devnode *dev_node_obj,
++ u32 buf_size, char *str_zl_file_name);
++
++/*
++ * ======== cfg_init ========
++ * Purpose:
++ * Initialize the CFG module's private state.
++ * Parameters:
++ * Returns:
++ * TRUE if initialized; FALSE if error occured.
++ * Requires:
++ * Ensures:
++ * A requirement for each of the other public CFG functions.
++ */
++extern bool cfg_init(void);
++
++/*
++ * ======== cfg_set_dev_object ========
++ * Purpose:
++ * Store the Device Object handle for a given devnode.
++ * Parameters:
++ * dev_node_obj: Platform's dev_node handle we are storing value with.
++ * value: Arbitrary value to store.
++ * Returns:
++ * 0: Success.
++ * -EFAULT: dev_node_obj is invalid.
++ * -EPERM: Internal Error.
++ * Requires:
++ * CFG initialized.
++ * Ensures:
++ * 0: The Private u32 was successfully set.
++ */
++extern int cfg_set_dev_object(struct cfg_devnode *dev_node_obj,
++ u32 value);
++
++/*
++ * ======== CFG_SetDrvObject ========
++ * Purpose:
++ * Store the Driver Object handle.
++ * Parameters:
++ * value: Arbitrary value to store.
++ * dw_type Type of Object to Store
++ * Returns:
++ * 0: Success.
++ * -EPERM: Internal Error.
++ * Requires:
++ * CFG initialized.
++ * Ensures:
++ * 0: The Private u32 was successfully set.
++ */
++extern int cfg_set_object(u32 value, u8 dw_type);
++
++#endif /* CFG_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h 2010-08-18 11:24:23.178060203 +0300
+@@ -0,0 +1,81 @@
++/*
++ * cfgdefs.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Global CFG constants and types, shared between DSP API and Bridge driver.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef CFGDEFS_
++#define CFGDEFS_
++
++/* Maximum length of module search path. */
++#define CFG_MAXSEARCHPATHLEN 255
++
++/* Maximum length of general paths. */
++#define CFG_MAXPATH 255
++
++/* Host Resources: */
++#define CFG_MAXMEMREGISTERS 9
++#define CFG_MAXIOPORTS 20
++#define CFG_MAXIRQS 7
++#define CFG_MAXDMACHANNELS 7
++
++/* IRQ flag */
++#define CFG_IRQSHARED 0x01 /* IRQ can be shared */
++
++/* DSP Resources: */
++#define CFG_DSPMAXMEMTYPES 10
++#define CFG_DEFAULT_NUM_WINDOWS 1 /* We support only one window. */
++
++/* A platform-related device handle: */
++struct cfg_devnode;
++
++/*
++ * Host resource structure.
++ */
++struct cfg_hostres {
++ u32 num_mem_windows; /* Set to default */
++ /* This is the base.memory */
++ u32 dw_mem_base[CFG_MAXMEMREGISTERS]; /* shm virtual address */
++ u32 dw_mem_length[CFG_MAXMEMREGISTERS]; /* Length of the Base */
++ u32 dw_mem_phys[CFG_MAXMEMREGISTERS]; /* shm Physical address */
++ u8 birq_registers; /* IRQ Number */
++ u8 birq_attrib; /* IRQ Attribute */
++ u32 dw_offset_for_monitor; /* The Shared memory starts from
++ * dw_mem_base + this offset */
++ /*
++ * Info needed by NODE for allocating channels to communicate with RMS:
++ * dw_chnl_offset: Offset of RMS channels. Lower channels are
++ * reserved.
++ * dw_chnl_buf_size: Size of channel buffer to send to RMS
++ * dw_num_chnls: Total number of channels
++ * (including reserved).
++ */
++ u32 dw_chnl_offset;
++ u32 dw_chnl_buf_size;
++ u32 dw_num_chnls;
++ void __iomem *dw_per_base;
++ u32 dw_per_pm_base;
++ u32 dw_core_pm_base;
++ void __iomem *dw_dmmu_base;
++ void __iomem *dw_sys_ctrl_base;
++};
++
++struct cfg_dspmemdesc {
++ u32 mem_type; /* Type of memory. */
++ u32 ul_min; /* Minimum amount of memory of this type. */
++ u32 ul_max; /* Maximum amount of memory of this type. */
++};
++
++#endif /* CFGDEFS_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/chnl.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/chnl.h 2010-08-18 11:24:23.178060203 +0300
+@@ -0,0 +1,130 @@
++/*
++ * chnl.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * DSP API channel interface: multiplexes data streams through the single
++ * physical link managed by a Bridge driver.
++ *
++ * See DSP API chnl.h for more details.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef CHNL_
++#define CHNL_
++
++#include <dspbridge/chnlpriv.h>
++
++/*
++ * ======== chnl_close ========
++ * Purpose:
++ * Ensures all pending I/O on this channel is cancelled, discards all
++ * queued I/O completion notifications, then frees the resources allocated
++ * for this channel, and makes the corresponding logical channel id
++ * available for subsequent use.
++ * Parameters:
++ * chnl_obj: Channel object handle.
++ * Returns:
++ * 0: Success;
++ * -EFAULT: Invalid chnl_obj.
++ * Requires:
++ * chnl_init(void) called.
++ * No thread must be blocked on this channel's I/O completion event.
++ * Ensures:
++ * 0: The I/O completion event for this channel is freed.
++ * chnl_obj is no longer valid.
++ */
++extern int chnl_close(struct chnl_object *chnl_obj);
++
++/*
++ * ======== chnl_create ========
++ * Purpose:
++ * Create a channel manager object, responsible for opening new channels
++ * and closing old ones for a given board.
++ * Parameters:
++ * channel_mgr: Location to store a channel manager object on output.
++ * hdev_obj: Handle to a device object.
++ * mgr_attrts: Channel manager attributes.
++ * mgr_attrts->max_channels: Max channels
++ * mgr_attrts->birq: Channel's I/O IRQ number.
++ * mgr_attrts->irq_shared: TRUE if the IRQ is shareable.
++ * mgr_attrts->word_size: DSP Word size in equivalent PC bytes..
++ * Returns:
++ * 0: Success;
++ * -EFAULT: hdev_obj is invalid.
++ * -EINVAL: max_channels is 0.
++ * Invalid DSP word size (must be > 0).
++ * Invalid base address for DSP communications.
++ * -ENOMEM: Insufficient memory for requested resources.
++ * -EIO: Unable to plug channel ISR for configured IRQ.
++ * -ECHRNG: This manager cannot handle this many channels.
++ * -EEXIST: Channel manager already exists for this device.
++ * Requires:
++ * chnl_init(void) called.
++ * channel_mgr != NULL.
++ * mgr_attrts != NULL.
++ * Ensures:
++ * 0: Subsequent calls to chnl_create() for the same
++ * board without an intervening call to
++ * chnl_destroy() will fail.
++ */
++extern int chnl_create(struct chnl_mgr **channel_mgr,
++ struct dev_object *hdev_obj,
++ const struct chnl_mgrattrs *mgr_attrts);
++
++/*
++ * ======== chnl_destroy ========
++ * Purpose:
++ * Close all open channels, and destroy the channel manager.
++ * Parameters:
++ * hchnl_mgr: Channel manager object.
++ * Returns:
++ * 0: Success.
++ * -EFAULT: hchnl_mgr was invalid.
++ * Requires:
++ * chnl_init(void) called.
++ * Ensures:
++ * 0: Cancels I/O on each open channel.
++ * Closes each open channel.
++ * chnl_create may subsequently be called for the
++ * same board.
++ */
++extern int chnl_destroy(struct chnl_mgr *hchnl_mgr);
++
++/*
++ * ======== chnl_exit ========
++ * Purpose:
++ * Discontinue usage of the CHNL module.
++ * Parameters:
++ * Returns:
++ * Requires:
++ * chnl_init(void) previously called.
++ * Ensures:
++ * Resources, if any acquired in chnl_init(void), are freed when the last
++ * client of CHNL calls chnl_exit(void).
++ */
++extern void chnl_exit(void);
++
++/*
++ * ======== chnl_init ========
++ * Purpose:
++ * Initialize the CHNL module's private state.
++ * Parameters:
++ * Returns:
++ * TRUE if initialized; FALSE if error occurred.
++ * Requires:
++ * Ensures:
++ * A requirement for each of the other public CHNL functions.
++ */
++extern bool chnl_init(void);
++
++#endif /* CHNL_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/chnldefs.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/chnldefs.h 2010-08-18 11:24:23.178060203 +0300
+@@ -0,0 +1,66 @@
++/*
++ * chnldefs.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * System-wide channel objects and constants.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef CHNLDEFS_
++#define CHNLDEFS_
++
++/* Channel id option. */
++#define CHNL_PICKFREE (~0UL) /* Let manager pick a free channel. */
++
++/* Channel manager limits: */
++#define CHNL_INITIOREQS 4 /* Default # of I/O requests. */
++
++/* Channel modes */
++#define CHNL_MODETODSP 0 /* Data streaming to the DSP. */
++#define CHNL_MODEFROMDSP 1 /* Data streaming from the DSP. */
++
++/* GetIOCompletion flags */
++#define CHNL_IOCINFINITE 0xffffffff /* Wait forever for IO completion. */
++#define CHNL_IOCNOWAIT 0x0 /* Dequeue an IOC, if available. */
++
++/* IO Completion Record status: */
++#define CHNL_IOCSTATCOMPLETE 0x0000 /* IO Completed. */
++#define CHNL_IOCSTATCANCEL 0x0002 /* IO was cancelled */
++#define CHNL_IOCSTATTIMEOUT 0x0008 /* Wait for IOC timed out. */
++#define CHNL_IOCSTATEOS 0x8000 /* End Of Stream reached. */
++
++/* Macros for checking I/O Completion status: */
++#define CHNL_IS_IO_COMPLETE(ioc) (!(ioc.status & ~CHNL_IOCSTATEOS))
++#define CHNL_IS_IO_CANCELLED(ioc) (ioc.status & CHNL_IOCSTATCANCEL)
++#define CHNL_IS_TIMED_OUT(ioc) (ioc.status & CHNL_IOCSTATTIMEOUT)
++
++/* Channel attributes: */
++struct chnl_attr {
++ u32 uio_reqs; /* Max # of preallocated I/O requests. */
++ void *event_obj; /* User supplied auto-reset event object. */
++ char *pstr_event_name; /* Ptr to name of user event object. */
++ void *reserved1; /* Reserved for future use. */
++ u32 reserved2; /* Reserved for future use. */
++
++};
++
++/* I/O completion record: */
++struct chnl_ioc {
++ void *pbuf; /* Buffer to be filled/emptied. */
++ u32 byte_size; /* Bytes transferred. */
++ u32 buf_size; /* Actual buffer size in bytes */
++ u32 status; /* Status of IO completion. */
++ u32 dw_arg; /* User argument associated with pbuf. */
++};
++
++#endif /* CHNLDEFS_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/chnlpriv.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/chnlpriv.h 2010-08-18 11:24:23.178060203 +0300
+@@ -0,0 +1,98 @@
++/*
++ * chnlpriv.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Private channel header shared between DSPSYS, DSPAPI and
++ * Bridge driver modules.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef CHNLPRIV_
++#define CHNLPRIV_
++
++#include <dspbridge/chnldefs.h>
++#include <dspbridge/devdefs.h>
++#include <dspbridge/sync.h>
++
++/* Channel manager limits: */
++#define CHNL_MAXCHANNELS 32 /* Max channels available per transport */
++
++/*
++ * Trans port channel Id definitions:(must match dsp-side).
++ *
++ * For CHNL_MAXCHANNELS = 16:
++ *
++ * ChnlIds:
++ * 0-15 (PCPY) - transport 0)
++ * 16-31 (DDMA) - transport 1)
++ * 32-47 (ZCPY) - transport 2)
++ */
++#define CHNL_PCPY 0 /* Proc-copy transport 0 */
++
++#define CHNL_MAXIRQ 0xff /* Arbitrarily large number. */
++
++/* The following modes are private: */
++#define CHNL_MODEUSEREVENT 0x1000 /* User provided the channel event. */
++#define CHNL_MODEMASK 0x1001
++
++/* Higher level channel states: */
++#define CHNL_STATEREADY 0 /* Channel ready for I/O. */
++#define CHNL_STATECANCEL 1 /* I/O was cancelled. */
++#define CHNL_STATEEOS 2 /* End Of Stream reached. */
++
++/* Macros for checking mode: */
++#define CHNL_IS_INPUT(mode) (mode & CHNL_MODEFROMDSP)
++#define CHNL_IS_OUTPUT(mode) (!CHNL_IS_INPUT(mode))
++
++/* Types of channel class libraries: */
++#define CHNL_TYPESM 1 /* Shared memory driver. */
++#define CHNL_TYPEBM 2 /* Bus Mastering driver. */
++
++/* Max string length of channel I/O completion event name - change if needed */
++#define CHNL_MAXEVTNAMELEN 32
++
++/* Max memory pages lockable in CHNL_PrepareBuffer() - change if needed */
++#define CHNL_MAXLOCKPAGES 64
++
++/* Channel info. */
++struct chnl_info {
++ struct chnl_mgr *hchnl_mgr; /* Owning channel manager. */
++ u32 cnhl_id; /* Channel ID. */
++ void *event_obj; /* Channel I/O completion event. */
++ /*Abstraction of I/O completion event. */
++ struct sync_object *sync_event;
++ s8 dw_mode; /* Channel mode. */
++ u8 dw_state; /* Current channel state. */
++ u32 bytes_tx; /* Total bytes transferred. */
++ u32 cio_cs; /* Number of IOCs in queue. */
++ u32 cio_reqs; /* Number of IO Requests in queue. */
++ u32 process; /* Process owning this channel. */
++};
++
++/* Channel manager info: */
++struct chnl_mgrinfo {
++ u8 dw_type; /* Type of channel class library. */
++ /* Channel handle, given the channel id. */
++ struct chnl_object *chnl_obj;
++ u8 open_channels; /* Number of open channels. */
++ u8 max_channels; /* total # of chnls supported */
++};
++
++/* Channel Manager Attrs: */
++struct chnl_mgrattrs {
++ /* Max number of channels this manager can use. */
++ u8 max_channels;
++ u32 word_size; /* DSP Word size. */
++};
++
++#endif /* CHNLPRIV_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/clk.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/clk.h 2010-08-18 11:24:23.178060203 +0300
+@@ -0,0 +1,101 @@
++/*
++ * clk.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Provides Clock functions.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef _CLK_H
++#define _CLK_H
++
++enum dsp_clk_id {
++ DSP_CLK_IVA2 = 0,
++ DSP_CLK_GPT5,
++ DSP_CLK_GPT6,
++ DSP_CLK_GPT7,
++ DSP_CLK_GPT8,
++ DSP_CLK_WDT3,
++ DSP_CLK_MCBSP1,
++ DSP_CLK_MCBSP2,
++ DSP_CLK_MCBSP3,
++ DSP_CLK_MCBSP4,
++ DSP_CLK_MCBSP5,
++ DSP_CLK_SSI,
++ DSP_CLK_NOT_DEFINED
++};
++
++/*
++ * ======== dsp_clk_exit ========
++ * Purpose:
++ * Discontinue usage of module; free resources when reference count
++ * reaches 0.
++ * Parameters:
++ * Returns:
++ * Requires:
++ * CLK initialized.
++ * Ensures:
++ * Resources used by module are freed when cRef reaches zero.
++ */
++extern void dsp_clk_exit(void);
++
++/*
++ * ======== dsp_clk_init ========
++ * Purpose:
++ * Initializes private state of CLK module.
++ * Parameters:
++ * Returns:
++ * TRUE if initialized; FALSE if error occured.
++ * Requires:
++ * Ensures:
++ * CLK initialized.
++ */
++extern void dsp_clk_init(void);
++
++void dsp_gpt_wait_overflow(short int clk_id, unsigned int load);
++
++/*
++ * ======== dsp_clk_enable ========
++ * Purpose:
++ * Enables the clock requested.
++ * Parameters:
++ * Returns:
++ * 0: Success.
++ * -EPERM: Error occured while enabling the clock.
++ * Requires:
++ * Ensures:
++ */
++extern int dsp_clk_enable(enum dsp_clk_id clk_id);
++
++u32 dsp_clock_enable_all(u32 dsp_per_clocks);
++
++/*
++ * ======== dsp_clk_disable ========
++ * Purpose:
++ * Disables the clock requested.
++ * Parameters:
++ * Returns:
++ * 0: Success.
++ * -EPERM: Error occured while disabling the clock.
++ * Requires:
++ * Ensures:
++ */
++extern int dsp_clk_disable(enum dsp_clk_id clk_id);
++
++extern u32 dsp_clk_get_iva2_rate(void);
++
++u32 dsp_clock_disable_all(u32 dsp_per_clocks);
++
++extern void ssi_clk_prepare(bool FLAG);
++
++#endif /* _SYNC_H */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/cmm.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/cmm.h 2010-08-18 11:24:23.178060203 +0300
+@@ -0,0 +1,386 @@
++/*
++ * cmm.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * The Communication Memory Management(CMM) module provides shared memory
++ * management services for DSP/BIOS Bridge data streaming and messaging.
++ * Multiple shared memory segments can be registered with CMM. Memory is
++ * coelesced back to the appropriate pool when a buffer is freed.
++ *
++ * The CMM_Xlator[xxx] functions are used for node messaging and data
++ * streaming address translation to perform zero-copy inter-processor
++ * data transfer(GPP<->DSP). A "translator" object is created for a node or
++ * stream object that contains per thread virtual address information. This
++ * translator info is used at runtime to perform SM address translation
++ * to/from the DSP address space.
++ *
++ * Notes:
++ * cmm_xlator_alloc_buf - Used by Node and Stream modules for SM address
++ * translation.
++ *
++ * Copyright (C) 2008 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef CMM_
++#define CMM_
++
++#include <dspbridge/devdefs.h>
++
++#include <dspbridge/cmmdefs.h>
++#include <dspbridge/host_os.h>
++
++/*
++ * ======== cmm_calloc_buf ========
++ * Purpose:
++ * Allocate memory buffers that can be used for data streaming or
++ * messaging.
++ * Parameters:
++ * hcmm_mgr: Cmm Mgr handle.
++ * usize: Number of bytes to allocate.
++ * pattr: Attributes of memory to allocate.
++ * pp_buf_va: Address of where to place VA.
++ * Returns:
++ * Pointer to a zero'd block of SM memory;
++ * NULL if memory couldn't be allocated,
++ * or if byte_size == 0,
++ * Requires:
++ * Valid hcmm_mgr.
++ * CMM initialized.
++ * Ensures:
++ * The returned pointer, if not NULL, points to a valid memory block of
++ * the size requested.
++ *
++ */
++extern void *cmm_calloc_buf(struct cmm_object *hcmm_mgr,
++ u32 usize, struct cmm_attrs *pattrs,
++ void **pp_buf_va);
++
++/*
++ * ======== cmm_create ========
++ * Purpose:
++ * Create a communication memory manager object.
++ * Parameters:
++ * ph_cmm_mgr: Location to store a communication manager handle on
++ * output.
++ * hdev_obj: Handle to a device object.
++ * mgr_attrts: Comm mem manager attributes.
++ * Returns:
++ * 0: Success;
++ * -ENOMEM: Insufficient memory for requested resources.
++ * -EPERM: Failed to initialize critical sect sync object.
++ *
++ * Requires:
++ * cmm_init(void) called.
++ * ph_cmm_mgr != NULL.
++ * mgr_attrts->ul_min_block_size >= 4 bytes.
++ * Ensures:
++ *
++ */
++extern int cmm_create(struct cmm_object **ph_cmm_mgr,
++ struct dev_object *hdev_obj,
++ const struct cmm_mgrattrs *mgr_attrts);
++
++/*
++ * ======== cmm_destroy ========
++ * Purpose:
++ * Destroy the communication memory manager object.
++ * Parameters:
++ * hcmm_mgr: Cmm Mgr handle.
++ * force: Force deallocation of all cmm memory immediately if set TRUE.
++ * If FALSE, and outstanding allocations will return -EPERM
++ * status.
++ * Returns:
++ * 0: CMM object & resources deleted.
++ * -EPERM: Unable to free CMM object due to outstanding allocation.
++ * -EFAULT: Unable to free CMM due to bad handle.
++ * Requires:
++ * CMM is initialized.
++ * hcmm_mgr != NULL.
++ * Ensures:
++ * Memory resources used by Cmm Mgr are freed.
++ */
++extern int cmm_destroy(struct cmm_object *hcmm_mgr, bool force);
++
++/*
++ * ======== cmm_exit ========
++ * Purpose:
++ * Discontinue usage of module. Cleanup CMM module if CMM cRef reaches zero.
++ * Parameters:
++ * n/a
++ * Returns:
++ * n/a
++ * Requires:
++ * CMM is initialized.
++ * Ensures:
++ */
++extern void cmm_exit(void);
++
++/*
++ * ======== cmm_free_buf ========
++ * Purpose:
++ * Free the given buffer.
++ * Parameters:
++ * hcmm_mgr: Cmm Mgr handle.
++ * pbuf: Pointer to memory allocated by cmm_calloc_buf().
++ * ul_seg_id: SM segment Id used in CMM_Calloc() attrs.
++ * Set to 0 to use default segment.
++ * Returns:
++ * 0
++ * -EPERM
++ * Requires:
++ * CMM initialized.
++ * buf_pa != NULL
++ * Ensures:
++ *
++ */
++extern int cmm_free_buf(struct cmm_object *hcmm_mgr,
++ void *buf_pa, u32 ul_seg_id);
++
++/*
++ * ======== cmm_get_handle ========
++ * Purpose:
++ * Return the handle to the cmm mgr for the given device obj.
++ * Parameters:
++ * hprocessor: Handle to a Processor.
++ * ph_cmm_mgr: Location to store the shared memory mgr handle on
++ * output.
++ *
++ * Returns:
++ * 0: Cmm Mgr opaque handle returned.
++ * -EFAULT: Invalid handle.
++ * Requires:
++ * ph_cmm_mgr != NULL
++ * hdev_obj != NULL
++ * Ensures:
++ */
++extern int cmm_get_handle(void *hprocessor,
++ struct cmm_object **ph_cmm_mgr);
++
++/*
++ * ======== cmm_get_info ========
++ * Purpose:
++ * Return the current SM and VM utilization information.
++ * Parameters:
++ * hcmm_mgr: Handle to a Cmm Mgr.
++ * cmm_info_obj: Location to store the Cmm information on output.
++ *
++ * Returns:
++ * 0: Success.
++ * -EFAULT: Invalid handle.
++ * -EINVAL Invalid input argument.
++ * Requires:
++ * Ensures:
++ *
++ */
++extern int cmm_get_info(struct cmm_object *hcmm_mgr,
++ struct cmm_info *cmm_info_obj);
++
++/*
++ * ======== cmm_init ========
++ * Purpose:
++ * Initializes private state of CMM module.
++ * Parameters:
++ * Returns:
++ * TRUE if initialized; FALSE if error occured.
++ * Requires:
++ * Ensures:
++ * CMM initialized.
++ */
++extern bool cmm_init(void);
++
++/*
++ * ======== cmm_register_gppsm_seg ========
++ * Purpose:
++ * Register a block of SM with the CMM.
++ * Parameters:
++ * hcmm_mgr: Handle to a Cmm Mgr.
++ * lpGPPBasePA: GPP Base Physical address.
++ * ul_size: Size in GPP bytes.
++ * dsp_addr_offset GPP PA to DSP PA Offset.
++ * c_factor: Add offset if CMM_ADDTODSPPA, sub if CMM_SUBFROMDSPPA.
++ * dw_dsp_base: DSP virtual base byte address.
++ * ul_dsp_size: Size of DSP segment in bytes.
++ * sgmt_id: Address to store segment Id.
++ *
++ * Returns:
++ * 0: Success.
++ * -EFAULT: Invalid hcmm_mgr handle.
++ * -EINVAL: Invalid input argument.
++ * -EPERM: Unable to register.
++ * - On success *sgmt_id is a valid SM segment ID.
++ * Requires:
++ * ul_size > 0
++ * sgmt_id != NULL
++ * dw_gpp_base_pa != 0
++ * c_factor = CMM_ADDTODSPPA || c_factor = CMM_SUBFROMDSPPA
++ * Ensures:
++ *
++ */
++extern int cmm_register_gppsm_seg(struct cmm_object *hcmm_mgr,
++ unsigned int dw_gpp_base_pa,
++ u32 ul_size,
++ u32 dsp_addr_offset,
++ s8 c_factor,
++ unsigned int dw_dsp_base,
++ u32 ul_dsp_size,
++ u32 *sgmt_id, u32 gpp_base_va);
++
++/*
++ * ======== cmm_un_register_gppsm_seg ========
++ * Purpose:
++ * Unregister the given memory segment that was previously registered
++ * by cmm_register_gppsm_seg.
++ * Parameters:
++ * hcmm_mgr: Handle to a Cmm Mgr.
++ * ul_seg_id Segment identifier returned by cmm_register_gppsm_seg.
++ * Returns:
++ * 0: Success.
++ * -EFAULT: Invalid handle.
++ * -EINVAL: Invalid ul_seg_id.
++ * -EPERM: Unable to unregister for unknown reason.
++ * Requires:
++ * Ensures:
++ *
++ */
++extern int cmm_un_register_gppsm_seg(struct cmm_object *hcmm_mgr,
++ u32 ul_seg_id);
++
++/*
++ * ======== cmm_xlator_alloc_buf ========
++ * Purpose:
++ * Allocate the specified SM buffer and create a local memory descriptor.
++ * Place on the descriptor on the translator's HaQ (Host Alloc'd Queue).
++ * Parameters:
++ * xlator: Handle to a Xlator object.
++ * va_buf: Virtual address ptr(client context)
++ * pa_size: Size of SM memory to allocate.
++ * Returns:
++ * Ptr to valid physical address(Pa) of pa_size bytes, NULL if failed.
++ * Requires:
++ * va_buf != 0.
++ * pa_size != 0.
++ * Ensures:
++ *
++ */
++extern void *cmm_xlator_alloc_buf(struct cmm_xlatorobject *xlator,
++ void *va_buf, u32 pa_size);
++
++/*
++ * ======== cmm_xlator_create ========
++ * Purpose:
++ * Create a translator(xlator) object used for process specific Va<->Pa
++ * address translation. Node messaging and streams use this to perform
++ * inter-processor(GPP<->DSP) zero-copy data transfer.
++ * Parameters:
++ * xlator: Address to place handle to a new Xlator handle.
++ * hcmm_mgr: Handle to Cmm Mgr associated with this translator.
++ * xlator_attrs: Translator attributes used for the client NODE or STREAM.
++ * Returns:
++ * 0: Success.
++ * -EINVAL: Bad input Attrs.
++ * -ENOMEM: Insufficient memory(local) for requested resources.
++ * Requires:
++ * xlator != NULL
++ * hcmm_mgr != NULL
++ * xlator_attrs != NULL
++ * Ensures:
++ *
++ */
++extern int cmm_xlator_create(struct cmm_xlatorobject **xlator,
++ struct cmm_object *hcmm_mgr,
++ struct cmm_xlatorattrs *xlator_attrs);
++
++/*
++ * ======== cmm_xlator_delete ========
++ * Purpose:
++ * Delete translator resources
++ * Parameters:
++ * xlator: handle to translator.
++ * force: force = TRUE will free XLators SM buffers/dscriptrs.
++ * Returns:
++ * 0: Success.
++ * -EFAULT: Bad translator handle.
++ * -EPERM: Unable to free translator resources.
++ * Requires:
++ * refs > 0
++ * Ensures:
++ *
++ */
++extern int cmm_xlator_delete(struct cmm_xlatorobject *xlator,
++ bool force);
++
++/*
++ * ======== cmm_xlator_free_buf ========
++ * Purpose:
++ * Free SM buffer and descriptor.
++ * Does not free client process VM.
++ * Parameters:
++ * xlator: handle to translator.
++ * buf_va Virtual address of PA to free.
++ * Returns:
++ * 0: Success.
++ * -EFAULT: Bad translator handle.
++ * Requires:
++ * Ensures:
++ *
++ */
++extern int cmm_xlator_free_buf(struct cmm_xlatorobject *xlator,
++ void *buf_va);
++
++/*
++ * ======== cmm_xlator_info ========
++ * Purpose:
++ * Set/Get process specific "translator" address info.
++ * This is used to perform fast virtaul address translation
++ * for shared memory buffers between the GPP and DSP.
++ * Parameters:
++ * xlator: handle to translator.
++ * paddr: Virtual base address of segment.
++ * ul_size: Size in bytes.
++ * segm_id: Segment identifier of SM segment(s)
++ * set_info Set xlator fields if TRUE, else return base addr
++ * Returns:
++ * 0: Success.
++ * -EFAULT: Bad translator handle.
++ * Requires:
++ * (refs > 0)
++ * (paddr != NULL)
++ * (ul_size > 0)
++ * Ensures:
++ *
++ */
++extern int cmm_xlator_info(struct cmm_xlatorobject *xlator,
++ u8 **paddr,
++ u32 ul_size, u32 segm_id, bool set_info);
++
++/*
++ * ======== cmm_xlator_translate ========
++ * Purpose:
++ * Perform address translation VA<->PA for the specified stream or
++ * message shared memory buffer.
++ * Parameters:
++ * xlator: handle to translator.
++ * paddr address of buffer to translate.
++ * xtype Type of address xlation. CMM_PA2VA or CMM_VA2PA.
++ * Returns:
++ * Valid address on success, else NULL.
++ * Requires:
++ * refs > 0
++ * paddr != NULL
++ * xtype >= CMM_VA2PA) && (xtype <= CMM_DSPPA2PA)
++ * Ensures:
++ *
++ */
++extern void *cmm_xlator_translate(struct cmm_xlatorobject *xlator,
++ void *paddr, enum cmm_xlatetype xtype);
++
++#endif /* CMM_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/cmmdefs.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/cmmdefs.h 2010-08-18 11:24:23.178060203 +0300
+@@ -0,0 +1,105 @@
++/*
++ * cmmdefs.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Global MEM constants and types.
++ *
++ * Copyright (C) 2008 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef CMMDEFS_
++#define CMMDEFS_
++
++#include <dspbridge/list.h>
++
++/* Cmm attributes used in cmm_create() */
++struct cmm_mgrattrs {
++ /* Minimum SM allocation; default 32 bytes. */
++ u32 ul_min_block_size;
++};
++
++/* Attributes for CMM_AllocBuf() & CMM_AllocDesc() */
++struct cmm_attrs {
++ u32 ul_seg_id; /* 1,2... are SM segments. 0 is not. */
++ u32 ul_alignment; /* 0,1,2,4....ul_min_block_size */
++};
++
++/*
++ * DSPPa to GPPPa Conversion Factor.
++ *
++ * For typical platforms:
++ * converted Address = PaDSP + ( c_factor * addressToConvert).
++ */
++#define CMM_SUBFROMDSPPA -1
++#define CMM_ADDTODSPPA 1
++
++#define CMM_ALLSEGMENTS 0xFFFFFF /* All SegIds */
++#define CMM_MAXGPPSEGS 1 /* Maximum # of SM segs */
++
++/*
++ * SMSEGs are SM segments the DSP allocates from.
++ *
++ * This info is used by the GPP to xlate DSP allocated PAs.
++ */
++
++struct cmm_seginfo {
++ u32 dw_seg_base_pa; /* Start Phys address of SM segment */
++ /* Total size in bytes of segment: DSP+GPP */
++ u32 ul_total_seg_size;
++ u32 dw_gpp_base_pa; /* Start Phys addr of Gpp SM seg */
++ u32 ul_gpp_size; /* Size of Gpp SM seg in bytes */
++ u32 dw_dsp_base_va; /* DSP virt base byte address */
++ u32 ul_dsp_size; /* DSP seg size in bytes */
++ /* # of current GPP allocations from this segment */
++ u32 ul_in_use_cnt;
++ u32 dw_seg_base_va; /* Start Virt address of SM seg */
++
++};
++
++/* CMM useful information */
++struct cmm_info {
++ /* # of SM segments registered with this Cmm. */
++ u32 ul_num_gppsm_segs;
++ /* Total # of allocations outstanding for CMM */
++ u32 ul_total_in_use_cnt;
++ /* Min SM block size allocation from cmm_create() */
++ u32 ul_min_block_size;
++ /* Info per registered SM segment. */
++ struct cmm_seginfo seg_info[CMM_MAXGPPSEGS];
++};
++
++/* XlatorCreate attributes */
++struct cmm_xlatorattrs {
++ u32 ul_seg_id; /* segment Id used for SM allocations */
++ u32 dw_dsp_bufs; /* # of DSP-side bufs */
++ u32 dw_dsp_buf_size; /* size of DSP-side bufs in GPP bytes */
++ /* Vm base address alloc'd in client process context */
++ void *vm_base;
++ /* dw_vm_size must be >= (dwMaxNumBufs * dwMaxSize) */
++ u32 dw_vm_size;
++};
++
++/*
++ * Cmm translation types. Use to map SM addresses to process context.
++ */
++enum cmm_xlatetype {
++ CMM_VA2PA = 0, /* Virtual to GPP physical address xlation */
++ CMM_PA2VA = 1, /* GPP Physical to virtual */
++ CMM_VA2DSPPA = 2, /* Va to DSP Pa */
++ CMM_PA2DSPPA = 3, /* GPP Pa to DSP Pa */
++ CMM_DSPPA2PA = 4, /* DSP Pa to GPP Pa */
++};
++
++struct cmm_object;
++struct cmm_xlatorobject;
++
++#endif /* CMMDEFS_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/cod.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/cod.h 2010-08-18 11:24:23.178060203 +0300
+@@ -0,0 +1,369 @@
++/*
++ * cod.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Code management module for DSPs. This module provides an interface
++ * interface for loading both static and dynamic code objects onto DSP
++ * systems.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef COD_
++#define COD_
++
++#include <dspbridge/dblldefs.h>
++
++#define COD_MAXPATHLENGTH 255
++#define COD_TRACEBEG "SYS_PUTCBEG"
++#define COD_TRACEEND "SYS_PUTCEND"
++#define COD_TRACECURPOS "BRIDGE_SYS_PUTC_current"
++#define COD_TRACESECT "trace"
++#define COD_TRACEBEGOLD "PUTCBEG"
++#define COD_TRACEENDOLD "PUTCEND"
++
++#define COD_NOLOAD DBLL_NOLOAD
++#define COD_SYMB DBLL_SYMB
++
++/* COD code manager handle */
++struct cod_manager;
++
++/* COD library handle */
++struct cod_libraryobj;
++
++/* COD attributes */
++struct cod_attrs {
++ u32 ul_reserved;
++};
++
++/*
++ * Function prototypes for writing memory to a DSP system, allocating
++ * and freeing DSP memory.
++ */
++typedef u32(*cod_writefxn) (void *priv_ref, u32 dsp_add,
++ void *pbuf, u32 ul_num_bytes, u32 mem_space);
++
++/*
++ * ======== cod_close ========
++ * Purpose:
++ * Close a library opened with cod_open().
++ * Parameters:
++ * lib - Library handle returned by cod_open().
++ * Returns:
++ * None.
++ * Requires:
++ * COD module initialized.
++ * valid lib.
++ * Ensures:
++ *
++ */
++extern void cod_close(struct cod_libraryobj *lib);
++
++/*
++ * ======== cod_create ========
++ * Purpose:
++ * Create an object to manage code on a DSP system. This object can be
++ * used to load an initial program image with arguments that can later
++ * be expanded with dynamically loaded object files.
++ * Symbol table information is managed by this object and can be retrieved
++ * using the cod_get_sym_value() function.
++ * Parameters:
++ * manager: created manager object
++ * str_zl_file: ZL DLL filename, of length < COD_MAXPATHLENGTH.
++ * attrs: attributes to be used by this object. A NULL value
++ * will cause default attrs to be used.
++ * Returns:
++ * 0: Success.
++ * -ESPIPE: ZL_Create failed.
++ * -ENOSYS: attrs was not NULL. We don't yet support
++ * non default values of attrs.
++ * Requires:
++ * COD module initialized.
++ * str_zl_file != NULL
++ * Ensures:
++ */
++extern int cod_create(struct cod_manager **mgr,
++ char *str_zl_file,
++ const struct cod_attrs *attrs);
++
++/*
++ * ======== cod_delete ========
++ * Purpose:
++ * Delete a code manager object.
++ * Parameters:
++ * cod_mgr_obj: handle of manager to be deleted
++ * Returns:
++ * None.
++ * Requires:
++ * COD module initialized.
++ * valid cod_mgr_obj.
++ * Ensures:
++ */
++extern void cod_delete(struct cod_manager *cod_mgr_obj);
++
++/*
++ * ======== cod_exit ========
++ * Purpose:
++ * Discontinue usage of the COD module.
++ * Parameters:
++ * None.
++ * Returns:
++ * None.
++ * Requires:
++ * COD initialized.
++ * Ensures:
++ * Resources acquired in cod_init(void) are freed.
++ */
++extern void cod_exit(void);
++
++/*
++ * ======== cod_get_base_lib ========
++ * Purpose:
++ * Get handle to the base image DBL library.
++ * Parameters:
++ * cod_mgr_obj: handle of manager to be deleted
++ * plib: location to store library handle on output.
++ * Returns:
++ * 0: Success.
++ * Requires:
++ * COD module initialized.
++ * valid cod_mgr_obj.
++ * plib != NULL.
++ * Ensures:
++ */
++extern int cod_get_base_lib(struct cod_manager *cod_mgr_obj,
++ struct dbll_library_obj **plib);
++
++/*
++ * ======== cod_get_base_name ========
++ * Purpose:
++ * Get the name of the base image DBL library.
++ * Parameters:
++ * cod_mgr_obj: handle of manager to be deleted
++ * sz_name: location to store library name on output.
++ * usize: size of name buffer.
++ * Returns:
++ * 0: Success.
++ * -EPERM: Buffer too small.
++ * Requires:
++ * COD module initialized.
++ * valid cod_mgr_obj.
++ * sz_name != NULL.
++ * Ensures:
++ */
++extern int cod_get_base_name(struct cod_manager *cod_mgr_obj,
++ char *sz_name, u32 usize);
++
++/*
++ * ======== cod_get_entry ========
++ * Purpose:
++ * Retrieve the entry point of a loaded DSP program image
++ * Parameters:
++ * cod_mgr_obj: handle of manager to be deleted
++ * entry_pt: pointer to location for entry point
++ * Returns:
++ * 0: Success.
++ * Requires:
++ * COD module initialized.
++ * valid cod_mgr_obj.
++ * entry_pt != NULL.
++ * Ensures:
++ */
++extern int cod_get_entry(struct cod_manager *cod_mgr_obj,
++ u32 *entry_pt);
++
++/*
++ * ======== cod_get_loader ========
++ * Purpose:
++ * Get handle to the DBL loader.
++ * Parameters:
++ * cod_mgr_obj: handle of manager to be deleted
++ * loader: location to store loader handle on output.
++ * Returns:
++ * 0: Success.
++ * Requires:
++ * COD module initialized.
++ * valid cod_mgr_obj.
++ * loader != NULL.
++ * Ensures:
++ */
++extern int cod_get_loader(struct cod_manager *cod_mgr_obj,
++ struct dbll_tar_obj **loader);
++
++/*
++ * ======== cod_get_section ========
++ * Purpose:
++ * Retrieve the starting address and length of a section in the COFF file
++ * given the section name.
++ * Parameters:
++ * lib Library handle returned from cod_open().
++ * str_sect: name of the section, with or without leading "."
++ * addr: Location to store address.
++ * len: Location to store length.
++ * Returns:
++ * 0: Success
++ * -ESPIPE: Symbols could not be found or have not been loaded onto
++ * the board.
++ * Requires:
++ * COD module initialized.
++ * valid cod_mgr_obj.
++ * str_sect != NULL;
++ * addr != NULL;
++ * len != NULL;
++ * Ensures:
++ * 0: *addr and *len contain the address and length of the
++ * section.
++ * else: *addr == 0 and *len == 0;
++ *
++ */
++extern int cod_get_section(struct cod_libraryobj *lib,
++ char *str_sect,
++ u32 *addr, u32 *len);
++
++/*
++ * ======== cod_get_sym_value ========
++ * Purpose:
++ * Retrieve the value for the specified symbol. The symbol is first
++ * searched for literally and then, if not found, searched for as a
++ * C symbol.
++ * Parameters:
++ * lib: library handle returned from cod_open().
++ * pstrSymbol: name of the symbol
++ * value: value of the symbol
++ * Returns:
++ * 0: Success.
++ * -ESPIPE: Symbols could not be found or have not been loaded onto
++ * the board.
++ * Requires:
++ * COD module initialized.
++ * Valid cod_mgr_obj.
++ * str_sym != NULL.
++ * pul_value != NULL.
++ * Ensures:
++ */
++extern int cod_get_sym_value(struct cod_manager *cod_mgr_obj,
++ char *str_sym, u32 * pul_value);
++
++/*
++ * ======== cod_init ========
++ * Purpose:
++ * Initialize the COD module's private state.
++ * Parameters:
++ * None.
++ * Returns:
++ * TRUE if initialized; FALSE if error occured.
++ * Requires:
++ * Ensures:
++ * A requirement for each of the other public COD functions.
++ */
++extern bool cod_init(void);
++
++/*
++ * ======== cod_load_base ========
++ * Purpose:
++ * Load the initial program image, optionally with command-line arguments,
++ * on the DSP system managed by the supplied handle. The program to be
++ * loaded must be the first element of the args array and must be a fully
++ * qualified pathname.
++ * Parameters:
++ * hmgr: manager to load the code with
++ * num_argc: number of arguments in the args array
++ * args: array of strings for arguments to DSP program
++ * write_fxn: board-specific function to write data to DSP system
++ * arb: arbitrary pointer to be passed as first arg to write_fxn
++ * envp: array of environment strings for DSP exec.
++ * Returns:
++ * 0: Success.
++ * -EBADF: Failed to open target code.
++ * Requires:
++ * COD module initialized.
++ * hmgr is valid.
++ * num_argc > 0.
++ * args != NULL.
++ * args[0] != NULL.
++ * pfn_write != NULL.
++ * Ensures:
++ */
++extern int cod_load_base(struct cod_manager *cod_mgr_obj,
++ u32 num_argc, char *args[],
++ cod_writefxn pfn_write, void *arb,
++ char *envp[]);
++
++/*
++ * ======== cod_open ========
++ * Purpose:
++ * Open a library for reading sections. Does not load or set the base.
++ * Parameters:
++ * hmgr: manager to load the code with
++ * sz_coff_path: Coff file to open.
++ * flags: COD_NOLOAD (don't load symbols) or COD_SYMB (load
++ * symbols).
++ * lib_obj: Handle returned that can be used in calls to cod_close
++ * and cod_get_section.
++ * Returns:
++ * S_OK: Success.
++ * -EBADF: Failed to open target code.
++ * Requires:
++ * COD module initialized.
++ * hmgr is valid.
++ * flags == COD_NOLOAD || flags == COD_SYMB.
++ * sz_coff_path != NULL.
++ * Ensures:
++ */
++extern int cod_open(struct cod_manager *hmgr,
++ char *sz_coff_path,
++ u32 flags, struct cod_libraryobj **lib_obj);
++
++/*
++ * ======== cod_open_base ========
++ * Purpose:
++ * Open base image for reading sections. Does not load the base.
++ * Parameters:
++ * hmgr: manager to load the code with
++ * sz_coff_path: Coff file to open.
++ * flags: Specifies whether to load symbols.
++ * Returns:
++ * 0: Success.
++ * -EBADF: Failed to open target code.
++ * Requires:
++ * COD module initialized.
++ * hmgr is valid.
++ * sz_coff_path != NULL.
++ * Ensures:
++ */
++extern int cod_open_base(struct cod_manager *hmgr, char *sz_coff_path,
++ dbll_flags flags);
++
++/*
++ * ======== cod_read_section ========
++ * Purpose:
++ * Retrieve the content of a code section given the section name.
++ * Parameters:
++ * cod_mgr_obj - manager in which to search for the symbol
++ * str_sect - name of the section, with or without leading "."
++ * str_content - buffer to store content of the section.
++ * Returns:
++ * 0: on success, error code on failure
++ * -ESPIPE: Symbols have not been loaded onto the board.
++ * Requires:
++ * COD module initialized.
++ * valid cod_mgr_obj.
++ * str_sect != NULL;
++ * str_content != NULL;
++ * Ensures:
++ * 0: *str_content stores the content of the named section.
++ */
++extern int cod_read_section(struct cod_libraryobj *lib,
++ char *str_sect,
++ char *str_content, u32 content_size);
++
++#endif /* COD_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/dbc.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/dbc.h 2010-08-18 11:24:23.178060203 +0300
+@@ -0,0 +1,46 @@
++/*
++ * dbc.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * "Design by Contract" programming macros.
++ *
++ * Notes:
++ * Requires that the GT->ERROR function has been defaulted to a valid
++ * error handler for the given execution environment.
++ *
++ * Does not require that GT_init() be called.
++ *
++ * Copyright (C) 2008 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef DBC_
++#define DBC_
++
++/* Assertion Macros: */
++#ifdef CONFIG_TIDSPBRIDGE_DEBUG
++
++#define DBC_ASSERT(exp) \
++ if (!(exp)) \
++ pr_err("%s, line %d: Assertion (" #exp ") failed.\n", \
++ __FILE__, __LINE__)
++#define DBC_REQUIRE DBC_ASSERT /* Function Precondition. */
++#define DBC_ENSURE DBC_ASSERT /* Function Postcondition. */
++
++#else
++
++#define DBC_ASSERT(exp) {}
++#define DBC_REQUIRE(exp) {}
++#define DBC_ENSURE(exp) {}
++
++#endif /* DEBUG */
++
++#endif /* DBC_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/dbdcd.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/dbdcd.h 2010-08-18 11:24:23.178060203 +0300
+@@ -0,0 +1,358 @@
++/*
++ * dbdcd.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Defines the DSP/BIOS Bridge Configuration Database (DCD) API.
++ *
++ * Copyright (C) 2008 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef DBDCD_
++#define DBDCD_
++
++#include <dspbridge/dbdcddef.h>
++#include <dspbridge/host_os.h>
++#include <dspbridge/nldrdefs.h>
++
++/*
++ * ======== dcd_auto_register ========
++ * Purpose:
++ * This function automatically registers DCD objects specified in a
++ * special COFF section called ".dcd_register"
++ * Parameters:
++ * hdcd_mgr: A DCD manager handle.
++ * sz_coff_path: Pointer to name of COFF file containing DCD
++ * objects to be registered.
++ * Returns:
++ * 0: Success.
++ * -EACCES: Unable to find auto-registration/read/load section.
++ * -EFAULT: Invalid DCD_HMANAGER handle..
++ * Requires:
++ * DCD initialized.
++ * Ensures:
++ * Note:
++ * Due to the DCD database construction, it is essential for a DCD-enabled
++ * COFF file to contain the right COFF sections, especially
++ * ".dcd_register", which is used for auto registration.
++ */
++extern int dcd_auto_register(struct dcd_manager *hdcd_mgr,
++ char *sz_coff_path);
++
++/*
++ * ======== dcd_auto_unregister ========
++ * Purpose:
++ * This function automatically unregisters DCD objects specified in a
++ * special COFF section called ".dcd_register"
++ * Parameters:
++ * hdcd_mgr: A DCD manager handle.
++ * sz_coff_path: Pointer to name of COFF file containing
++ * DCD objects to be unregistered.
++ * Returns:
++ * 0: Success.
++ * -EACCES: Unable to find auto-registration/read/load section.
++ * -EFAULT: Invalid DCD_HMANAGER handle..
++ * Requires:
++ * DCD initialized.
++ * Ensures:
++ * Note:
++ * Due to the DCD database construction, it is essential for a DCD-enabled
++ * COFF file to contain the right COFF sections, especially
++ * ".dcd_register", which is used for auto unregistration.
++ */
++extern int dcd_auto_unregister(struct dcd_manager *hdcd_mgr,
++ char *sz_coff_path);
++
++/*
++ * ======== dcd_create_manager ========
++ * Purpose:
++ * This function creates a DCD module manager.
++ * Parameters:
++ * sz_zl_dll_name: Pointer to a DLL name string.
++ * dcd_mgr: A pointer to a DCD manager handle.
++ * Returns:
++ * 0: Success.
++ * -ENOMEM: Unable to allocate memory for DCD manager handle.
++ * -EPERM: General failure.
++ * Requires:
++ * DCD initialized.
++ * sz_zl_dll_name is non-NULL.
++ * dcd_mgr is non-NULL.
++ * Ensures:
++ * A DCD manager handle is created.
++ */
++extern int dcd_create_manager(char *sz_zl_dll_name,
++ struct dcd_manager **dcd_mgr);
++
++/*
++ * ======== dcd_destroy_manager ========
++ * Purpose:
++ * This function destroys a DCD module manager.
++ * Parameters:
++ * hdcd_mgr: A DCD manager handle.
++ * Returns:
++ * 0: Success.
++ * -EFAULT: Invalid DCD manager handle.
++ * Requires:
++ * DCD initialized.
++ * Ensures:
++ */
++extern int dcd_destroy_manager(struct dcd_manager *hdcd_mgr);
++
++/*
++ * ======== dcd_enumerate_object ========
++ * Purpose:
++ * This function enumerates currently visible DSP/BIOS Bridge objects
++ * and returns the UUID and type of each enumerated object.
++ * Parameters:
++ * index: The object enumeration index.
++ * obj_type: Type of object to enumerate.
++ * uuid_obj: Pointer to a dsp_uuid object.
++ * Returns:
++ * 0: Success.
++ * -EPERM: Unable to enumerate through the DCD database.
++ * ENODATA: Enumeration completed. This is not an error code.
++ * Requires:
++ * DCD initialized.
++ * uuid_obj is a valid pointer.
++ * Ensures:
++ * Details:
++ * This function can be used in conjunction with dcd_get_object_def to
++ * retrieve object properties.
++ */
++extern int dcd_enumerate_object(s32 index,
++ enum dsp_dcdobjtype obj_type,
++ struct dsp_uuid *uuid_obj);
++
++/*
++ * ======== dcd_exit ========
++ * Purpose:
++ * This function cleans up the DCD module.
++ * Parameters:
++ * Returns:
++ * Requires:
++ * DCD initialized.
++ * Ensures:
++ */
++extern void dcd_exit(void);
++
++/*
++ * ======== dcd_get_dep_libs ========
++ * Purpose:
++ * Given the uuid of a library and size of array of uuids, this function
++ * fills the array with the uuids of all dependent libraries of the input
++ * library.
++ * Parameters:
++ * hdcd_mgr: A DCD manager handle.
++ * uuid_obj: Pointer to a dsp_uuid for a library.
++ * num_libs: Size of uuid array (number of library uuids).
++ * dep_lib_uuids: Array of dependent library uuids to be filled in.
++ * prstnt_dep_libs: Array indicating if corresponding lib is persistent.
++ * phase: phase to obtain correct input library
++ * Returns:
++ * 0: Success.
++ * -ENOMEM: Memory allocation failure.
++ * -EACCES: Failure to read section containing library info.
++ * -EPERM: General failure.
++ * Requires:
++ * DCD initialized.
++ * Valid hdcd_mgr.
++ * uuid_obj != NULL
++ * dep_lib_uuids != NULL.
++ * Ensures:
++ */
++extern int dcd_get_dep_libs(struct dcd_manager *hdcd_mgr,
++ struct dsp_uuid *uuid_obj,
++ u16 num_libs,
++ struct dsp_uuid *dep_lib_uuids,
++ bool *prstnt_dep_libs,
++ enum nldr_phase phase);
++
++/*
++ * ======== dcd_get_num_dep_libs ========
++ * Purpose:
++ * Given the uuid of a library, determine its number of dependent
++ * libraries.
++ * Parameters:
++ * hdcd_mgr: A DCD manager handle.
++ * uuid_obj: Pointer to a dsp_uuid for a library.
++ * num_libs: Size of uuid array (number of library uuids).
++ * num_pers_libs: number of persistent dependent library.
++ * phase: Phase to obtain correct input library
++ * Returns:
++ * 0: Success.
++ * -ENOMEM: Memory allocation failure.
++ * -EACCES: Failure to read section containing library info.
++ * -EPERM: General failure.
++ * Requires:
++ * DCD initialized.
++ * Valid hdcd_mgr.
++ * uuid_obj != NULL
++ * num_libs != NULL.
++ * Ensures:
++ */
++extern int dcd_get_num_dep_libs(struct dcd_manager *hdcd_mgr,
++ struct dsp_uuid *uuid_obj,
++ u16 *num_libs,
++ u16 *num_pers_libs,
++ enum nldr_phase phase);
++
++/*
++ * ======== dcd_get_library_name ========
++ * Purpose:
++ * This function returns the name of a (dynamic) library for a given
++ * UUID.
++ * Parameters:
++ * hdcd_mgr: A DCD manager handle.
++ * uuid_obj: Pointer to a dsp_uuid that represents a unique DSP/BIOS
++ * Bridge object.
++ * str_lib_name: Buffer to hold library name.
++ * buff_size: Contains buffer size. Set to string size on output.
++ * phase: Which phase to load
++ * phase_split: Are phases in multiple libraries
++ * Returns:
++ * 0: Success.
++ * -EPERM: General failure.
++ * Requires:
++ * DCD initialized.
++ * Valid hdcd_mgr.
++ * str_lib_name != NULL.
++ * uuid_obj != NULL
++ * buff_size != NULL.
++ * Ensures:
++ */
++extern int dcd_get_library_name(struct dcd_manager *hdcd_mgr,
++ struct dsp_uuid *uuid_obj,
++ char *str_lib_name,
++ u32 *buff_size,
++ enum nldr_phase phase,
++ bool *phase_split);
++
++/*
++ * ======== dcd_get_object_def ========
++ * Purpose:
++ * This function returns the properties/attributes of a DSP/BIOS Bridge
++ * object.
++ * Parameters:
++ * hdcd_mgr: A DCD manager handle.
++ * uuid_obj: Pointer to a dsp_uuid that represents a unique
++ * DSP/BIOS Bridge object.
++ * obj_type: The type of DSP/BIOS Bridge object to be
++ * referenced (node, processor, etc).
++ * obj_def: Pointer to an object definition structure. A
++ * union of various possible DCD object types.
++ * Returns:
++ * 0: Success.
++ * -EACCES: Unable to access/read/parse/load content of object code
++ * section.
++ * -EPERM: General failure.
++ * -EFAULT: Invalid DCD_HMANAGER handle.
++ * Requires:
++ * DCD initialized.
++ * obj_uuid is non-NULL.
++ * obj_def is non-NULL.
++ * Ensures:
++ */
++extern int dcd_get_object_def(struct dcd_manager *hdcd_mgr,
++ struct dsp_uuid *obj_uuid,
++ enum dsp_dcdobjtype obj_type,
++ struct dcd_genericobj *obj_def);
++
++/*
++ * ======== dcd_get_objects ========
++ * Purpose:
++ * This function finds all DCD objects specified in a special
++ * COFF section called ".dcd_register", and for each object,
++ * call a "register" function. The "register" function may perform
++ * various actions, such as 1) register nodes in the node database, 2)
++ * unregister nodes from the node database, and 3) add overlay nodes.
++ * Parameters:
++ * hdcd_mgr: A DCD manager handle.
++ * sz_coff_path: Pointer to name of COFF file containing DCD
++ * objects.
++ * register_fxn: Callback fxn to be applied on each located
++ * DCD object.
++ * handle: Handle to pass to callback.
++ * Returns:
++ * 0: Success.
++ * -EACCES: Unable to access/read/parse/load content of object code
++ * section.
++ * -EFAULT: Invalid DCD_HMANAGER handle..
++ * Requires:
++ * DCD initialized.
++ * Ensures:
++ * Note:
++ * Due to the DCD database construction, it is essential for a DCD-enabled
++ * COFF file to contain the right COFF sections, especially
++ * ".dcd_register", which is used for auto registration.
++ */
++extern int dcd_get_objects(struct dcd_manager *hdcd_mgr,
++ char *sz_coff_path,
++ dcd_registerfxn register_fxn, void *handle);
++
++/*
++ * ======== dcd_init ========
++ * Purpose:
++ * This function initializes DCD.
++ * Parameters:
++ * Returns:
++ * FALSE: Initialization failed.
++ * TRUE: Initialization succeeded.
++ * Requires:
++ * Ensures:
++ * DCD initialized.
++ */
++extern bool dcd_init(void);
++
++/*
++ * ======== dcd_register_object ========
++ * Purpose:
++ * This function registers a DSP/BIOS Bridge object in the DCD database.
++ * Parameters:
++ * uuid_obj: Pointer to a dsp_uuid that identifies a DSP/BIOS
++ * Bridge object.
++ * obj_type: Type of object.
++ * psz_path_name: Path to the object's COFF file.
++ * Returns:
++ * 0: Success.
++ * -EPERM: Failed to register object.
++ * Requires:
++ * DCD initialized.
++ * uuid_obj and szPathName are non-NULL values.
++ * obj_type is a valid type value.
++ * Ensures:
++ */
++extern int dcd_register_object(struct dsp_uuid *uuid_obj,
++ enum dsp_dcdobjtype obj_type,
++ char *psz_path_name);
++
++/*
++ * ======== dcd_unregister_object ========
++ * Purpose:
++ * This function de-registers a valid DSP/BIOS Bridge object from the DCD
++ * database.
++ * Parameters:
++ * uuid_obj: Pointer to a dsp_uuid that identifies a DSP/BIOS Bridge
++ * object.
++ * obj_type: Type of object.
++ * Returns:
++ * 0: Success.
++ * -EPERM: Unable to de-register the specified object.
++ * Requires:
++ * DCD initialized.
++ * uuid_obj is a non-NULL value.
++ * obj_type is a valid type value.
++ * Ensures:
++ */
++extern int dcd_unregister_object(struct dsp_uuid *uuid_obj,
++ enum dsp_dcdobjtype obj_type);
++
++#endif /* _DBDCD_H */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/dbdcddef.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/dbdcddef.h 2010-08-18 11:24:23.182060014 +0300
+@@ -0,0 +1,78 @@
++/*
++ * dbdcddef.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * DCD (DSP/BIOS Bridge Configuration Database) constants and types.
++ *
++ * Copyright (C) 2008 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef DBDCDDEF_
++#define DBDCDDEF_
++
++#include <dspbridge/dbdefs.h>
++#include <dspbridge/mgrpriv.h> /* for mgr_processorextinfo */
++
++/*
++ * The following defines are critical elements for the DCD module:
++ *
++ * - DCD_REGKEY enables DCD functions to locate registered DCD objects.
++ * - DCD_REGISTER_SECTION identifies the COFF section where the UUID of
++ * registered DCD objects are stored.
++ */
++#define DCD_REGKEY "Software\\TexasInstruments\\DspBridge\\DCD"
++#define DCD_REGISTER_SECTION ".dcd_register"
++
++#define DCD_MAXPATHLENGTH 255
++
++/* DCD Manager Object */
++struct dcd_manager;
++
++struct dcd_key_elem {
++ struct list_head link; /* Make it linked to a list */
++ char name[DCD_MAXPATHLENGTH]; /* Name of a given value entry */
++ char *path; /* Pointer to the actual data */
++};
++
++/* DCD Node Properties */
++struct dcd_nodeprops {
++ struct dsp_ndbprops ndb_props;
++ u32 msg_segid;
++ u32 msg_notify_type;
++ char *pstr_create_phase_fxn;
++ char *pstr_delete_phase_fxn;
++ char *pstr_execute_phase_fxn;
++ char *pstr_i_alg_name;
++
++ /* Dynamic load properties */
++ u16 us_load_type; /* Static, dynamic, overlay */
++ u32 ul_data_mem_seg_mask; /* Data memory requirements */
++ u32 ul_code_mem_seg_mask; /* Code memory requirements */
++};
++
++/* DCD Generic Object Type */
++struct dcd_genericobj {
++ union dcd_obj {
++ struct dcd_nodeprops node_obj; /* node object. */
++ /* processor object. */
++ struct dsp_processorinfo proc_info;
++ /* extended proc object (private) */
++ struct mgr_processorextinfo ext_proc_obj;
++ } obj_data;
++};
++
++/* DCD Internal Callback Type */
++typedef int(*dcd_registerfxn) (struct dsp_uuid *uuid_obj,
++ enum dsp_dcdobjtype obj_type,
++ void *handle);
++
++#endif /* DBDCDDEF_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/dbdefs.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/dbdefs.h 2010-08-18 11:24:23.182060014 +0300
+@@ -0,0 +1,514 @@
++/*
++ * dbdefs.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Global definitions and constants for DSP/BIOS Bridge.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef DBDEFS_
++#define DBDEFS_
++
++#include <linux/types.h>
++
++#include <dspbridge/rms_sh.h> /* Types shared between GPP and DSP */
++
++#define PG_SIZE4K 4096
++#define PG_MASK(pg_size) (~((pg_size)-1))
++#define PG_ALIGN_LOW(addr, pg_size) ((addr) & PG_MASK(pg_size))
++#define PG_ALIGN_HIGH(addr, pg_size) (((addr)+(pg_size)-1) & PG_MASK(pg_size))
++
++/* API return value and calling convention */
++#define DBAPI int
++
++/* Infinite time value for the utimeout parameter to DSPStream_Select() */
++#define DSP_FOREVER (-1)
++
++/* Maximum length of node name, used in dsp_ndbprops */
++#define DSP_MAXNAMELEN 32
++
++/* notify_type values for the RegisterNotify() functions. */
++#define DSP_SIGNALEVENT 0x00000001
++
++/* Types of events for processors */
++#define DSP_PROCESSORSTATECHANGE 0x00000001
++#define DSP_PROCESSORATTACH 0x00000002
++#define DSP_PROCESSORDETACH 0x00000004
++#define DSP_PROCESSORRESTART 0x00000008
++
++/* DSP exception events (DSP/BIOS and DSP MMU fault) */
++#define DSP_MMUFAULT 0x00000010
++#define DSP_SYSERROR 0x00000020
++#define DSP_EXCEPTIONABORT 0x00000300
++#define DSP_PWRERROR 0x00000080
++#define DSP_WDTOVERFLOW 0x00000040
++
++/* IVA exception events (IVA MMU fault) */
++#define IVA_MMUFAULT 0x00000040
++/* Types of events for nodes */
++#define DSP_NODESTATECHANGE 0x00000100
++#define DSP_NODEMESSAGEREADY 0x00000200
++
++/* Types of events for streams */
++#define DSP_STREAMDONE 0x00001000
++#define DSP_STREAMIOCOMPLETION 0x00002000
++
++/* Handle definition representing the GPP node in DSPNode_Connect() calls */
++#define DSP_HGPPNODE 0xFFFFFFFF
++
++/* Node directions used in DSPNode_Connect() */
++#define DSP_TONODE 1
++#define DSP_FROMNODE 2
++
++/* Define Node Minimum and Maximum Priorities */
++#define DSP_NODE_MIN_PRIORITY 1
++#define DSP_NODE_MAX_PRIORITY 15
++
++/* Pre-Defined Message Command Codes available to user: */
++#define DSP_RMSUSERCODESTART RMS_USER /* Start of RMS user cmd codes */
++/* end of user codes */
++#define DSP_RMSUSERCODEEND (RMS_USER + RMS_MAXUSERCODES);
++/* msg_ctrl contains SM buffer description */
++#define DSP_RMSBUFDESC RMS_BUFDESC
++
++/* Shared memory identifier for MEM segment named "SHMSEG0" */
++#define DSP_SHMSEG0 (u32)(-1)
++
++/* Processor ID numbers */
++#define DSP_UNIT 0
++#define IVA_UNIT 1
++
++#define DSPWORD unsigned char
++#define DSPWORDSIZE sizeof(DSPWORD)
++
++/* Power control enumerations */
++#define PROC_PWRCONTROL 0x8070
++
++#define PROC_PWRMGT_ENABLE (PROC_PWRCONTROL + 0x3)
++#define PROC_PWRMGT_DISABLE (PROC_PWRCONTROL + 0x4)
++
++/* Bridge Code Version */
++#define BRIDGE_VERSION_CODE 333
++
++#define MAX_PROFILES 16
++
++/* DSP chip type */
++#define DSPTYPE64 0x99
++
++/* Handy Macros */
++#define VALID_PROC_EVENT (DSP_PROCESSORSTATECHANGE | DSP_PROCESSORATTACH | \
++ DSP_PROCESSORDETACH | DSP_PROCESSORRESTART | DSP_NODESTATECHANGE | \
++ DSP_STREAMDONE | DSP_STREAMIOCOMPLETION | DSP_MMUFAULT | \
++ DSP_SYSERROR | DSP_WDTOVERFLOW | DSP_PWRERROR)
++
++static inline bool is_valid_proc_event(u32 x)
++{
++ return (x == 0 || (x & VALID_PROC_EVENT && !(x & ~VALID_PROC_EVENT)));
++}
++
++/* The Node UUID structure */
++struct dsp_uuid {
++ u32 ul_data1;
++ u16 us_data2;
++ u16 us_data3;
++ u8 uc_data4;
++ u8 uc_data5;
++ u8 uc_data6[6];
++};
++
++/* DCD types */
++enum dsp_dcdobjtype {
++ DSP_DCDNODETYPE,
++ DSP_DCDPROCESSORTYPE,
++ DSP_DCDLIBRARYTYPE,
++ DSP_DCDCREATELIBTYPE,
++ DSP_DCDEXECUTELIBTYPE,
++ DSP_DCDDELETELIBTYPE,
++ /* DSP_DCDMAXOBJTYPE is meant to be the last DCD object type */
++ DSP_DCDMAXOBJTYPE
++};
++
++/* Processor states */
++enum dsp_procstate {
++ PROC_STOPPED,
++ PROC_LOADED,
++ PROC_RUNNING,
++ PROC_ERROR
++};
++
++/*
++ * Node types: Message node, task node, xDAIS socket node, and
++ * device node. _NODE_GPP is used when defining a stream connection
++ * between a task or socket node and the GPP.
++ *
++ */
++enum node_type {
++ NODE_DEVICE,
++ NODE_TASK,
++ NODE_DAISSOCKET,
++ NODE_MESSAGE,
++ NODE_GPP
++};
++
++/*
++ * ======== node_state ========
++ * Internal node states.
++ */
++enum node_state {
++ NODE_ALLOCATED,
++ NODE_CREATED,
++ NODE_RUNNING,
++ NODE_PAUSED,
++ NODE_DONE,
++ NODE_CREATING,
++ NODE_STARTING,
++ NODE_PAUSING,
++ NODE_TERMINATING,
++ NODE_DELETING,
++};
++
++/* Stream states */
++enum dsp_streamstate {
++ STREAM_IDLE,
++ STREAM_READY,
++ STREAM_PENDING,
++ STREAM_DONE
++};
++
++/* Stream connect types */
++enum dsp_connecttype {
++ CONNECTTYPE_NODEOUTPUT,
++ CONNECTTYPE_GPPOUTPUT,
++ CONNECTTYPE_NODEINPUT,
++ CONNECTTYPE_GPPINPUT
++};
++
++/* Stream mode types */
++enum dsp_strmmode {
++ STRMMODE_PROCCOPY, /* Processor(s) copy stream data payloads */
++ STRMMODE_ZEROCOPY, /* Strm buffer ptrs swapped no data copied */
++ STRMMODE_LDMA, /* Local DMA : OMAP's System-DMA device */
++ STRMMODE_RDMA /* Remote DMA: OMAP's DSP-DMA device */
++};
++
++/* Resource Types */
++enum dsp_resourceinfotype {
++ DSP_RESOURCE_DYNDARAM = 0,
++ DSP_RESOURCE_DYNSARAM,
++ DSP_RESOURCE_DYNEXTERNAL,
++ DSP_RESOURCE_DYNSRAM,
++ DSP_RESOURCE_PROCLOAD
++};
++
++/* Memory Segment Types */
++enum dsp_memtype {
++ DSP_DYNDARAM = 0,
++ DSP_DYNSARAM,
++ DSP_DYNEXTERNAL,
++ DSP_DYNSRAM
++};
++
++/* Memory Flush Types */
++enum dsp_flushtype {
++ PROC_INVALIDATE_MEM = 0,
++ PROC_WRITEBACK_MEM,
++ PROC_WRITEBACK_INVALIDATE_MEM,
++};
++
++/* Memory Segment Status Values */
++struct dsp_memstat {
++ u32 ul_size;
++ u32 ul_total_free_size;
++ u32 ul_len_max_free_block;
++ u32 ul_num_free_blocks;
++ u32 ul_num_alloc_blocks;
++};
++
++/* Processor Load information Values */
++struct dsp_procloadstat {
++ u32 curr_load;
++ u32 predicted_load;
++ u32 curr_dsp_freq;
++ u32 predicted_freq;
++};
++
++/* Attributes for STRM connections between nodes */
++struct dsp_strmattr {
++ u32 seg_id; /* Memory segment on DSP to allocate buffers */
++ u32 buf_size; /* Buffer size (DSP words) */
++ u32 num_bufs; /* Number of buffers */
++ u32 buf_alignment; /* Buffer alignment */
++ u32 utimeout; /* Timeout for blocking STRM calls */
++ enum dsp_strmmode strm_mode; /* mode of stream when opened */
++ /* DMA chnl id if dsp_strmmode is LDMA or RDMA */
++ u32 udma_chnl_id;
++ u32 udma_priority; /* DMA channel priority 0=lowest, >0=high */
++};
++
++/* The dsp_cbdata structure */
++struct dsp_cbdata {
++ u32 cb_data;
++ u8 node_data[1];
++};
++
++/* The dsp_msg structure */
++struct dsp_msg {
++ u32 dw_cmd;
++ u32 dw_arg1;
++ u32 dw_arg2;
++};
++
++/* The dsp_resourcereqmts structure for node's resource requirements */
++struct dsp_resourcereqmts {
++ u32 cb_struct;
++ u32 static_data_size;
++ u32 global_data_size;
++ u32 program_mem_size;
++ u32 uwc_execution_time;
++ u32 uwc_period;
++ u32 uwc_deadline;
++ u32 avg_exection_time;
++ u32 minimum_period;
++};
++
++/*
++ * The dsp_streamconnect structure describes a stream connection
++ * between two nodes, or between a node and the GPP
++ */
++struct dsp_streamconnect {
++ u32 cb_struct;
++ enum dsp_connecttype connect_type;
++ u32 this_node_stream_index;
++ void *connected_node;
++ struct dsp_uuid ui_connected_node_id;
++ u32 connected_node_stream_index;
++};
++
++struct dsp_nodeprofs {
++ u32 ul_heap_size;
++};
++
++/* The dsp_ndbprops structure reports the attributes of a node */
++struct dsp_ndbprops {
++ u32 cb_struct;
++ struct dsp_uuid ui_node_id;
++ char ac_name[DSP_MAXNAMELEN];
++ enum node_type ntype;
++ u32 cache_on_gpp;
++ struct dsp_resourcereqmts dsp_resource_reqmts;
++ s32 prio;
++ u32 stack_size;
++ u32 sys_stack_size;
++ u32 stack_seg;
++ u32 message_depth;
++ u32 num_input_streams;
++ u32 num_output_streams;
++ u32 utimeout;
++ u32 count_profiles; /* Number of supported profiles */
++ /* Array of profiles */
++ struct dsp_nodeprofs node_profiles[MAX_PROFILES];
++ u32 stack_seg_name; /* Stack Segment Name */
++};
++
++ /* The dsp_nodeattrin structure describes the attributes of a
++ * node client */
++struct dsp_nodeattrin {
++ u32 cb_struct;
++ s32 prio;
++ u32 utimeout;
++ u32 profile_id;
++ /* Reserved, for Bridge Internal use only */
++ u32 heap_size;
++ void *pgpp_virt_addr; /* Reserved, for Bridge Internal use only */
++};
++
++ /* The dsp_nodeinfo structure is used to retrieve information
++ * about a node */
++struct dsp_nodeinfo {
++ u32 cb_struct;
++ struct dsp_ndbprops nb_node_database_props;
++ u32 execution_priority;
++ enum node_state ns_execution_state;
++ void *device_owner;
++ u32 number_streams;
++ struct dsp_streamconnect sc_stream_connection[16];
++ u32 node_env;
++};
++
++ /* The dsp_nodeattr structure describes the attributes of a node */
++struct dsp_nodeattr {
++ u32 cb_struct;
++ struct dsp_nodeattrin in_node_attr_in;
++ u32 node_attr_inputs;
++ u32 node_attr_outputs;
++ struct dsp_nodeinfo node_info;
++};
++
++/*
++ * Notification type: either the name of an opened event, or an event or
++ * window handle.
++ */
++struct dsp_notification {
++ char *ps_name;
++ void *handle;
++};
++
++/* The dsp_processorattrin structure describes the attributes of a processor */
++struct dsp_processorattrin {
++ u32 cb_struct;
++ u32 utimeout;
++};
++/*
++ * The dsp_processorinfo structure describes basic capabilities of a
++ * DSP processor
++ */
++struct dsp_processorinfo {
++ u32 cb_struct;
++ int processor_family;
++ int processor_type;
++ u32 clock_rate;
++ u32 ul_internal_mem_size;
++ u32 ul_external_mem_size;
++ u32 processor_id;
++ int ty_running_rtos;
++ s32 node_min_priority;
++ s32 node_max_priority;
++};
++
++/* Error information of last DSP exception signalled to the GPP */
++struct dsp_errorinfo {
++ u32 dw_err_mask;
++ u32 dw_val1;
++ u32 dw_val2;
++ u32 dw_val3;
++};
++
++/* The dsp_processorstate structure describes the state of a DSP processor */
++struct dsp_processorstate {
++ u32 cb_struct;
++ enum dsp_procstate proc_state;
++};
++
++/*
++ * The dsp_resourceinfo structure is used to retrieve information about a
++ * processor's resources
++ */
++struct dsp_resourceinfo {
++ u32 cb_struct;
++ enum dsp_resourceinfotype resource_type;
++ union {
++ u32 ul_resource;
++ struct dsp_memstat mem_stat;
++ struct dsp_procloadstat proc_load_stat;
++ } result;
++};
++
++/*
++ * The dsp_streamattrin structure describes the attributes of a stream,
++ * including segment and alignment of data buffers allocated with
++ * DSPStream_AllocateBuffers(), if applicable
++ */
++struct dsp_streamattrin {
++ u32 cb_struct;
++ u32 utimeout;
++ u32 segment_id;
++ u32 buf_alignment;
++ u32 num_bufs;
++ enum dsp_strmmode strm_mode;
++ u32 udma_chnl_id;
++ u32 udma_priority;
++};
++
++/* The dsp_bufferattr structure describes the attributes of a data buffer */
++struct dsp_bufferattr {
++ u32 cb_struct;
++ u32 segment_id;
++ u32 buf_alignment;
++};
++
++/*
++ * The dsp_streaminfo structure is used to retrieve information
++ * about a stream.
++ */
++struct dsp_streaminfo {
++ u32 cb_struct;
++ u32 number_bufs_allowed;
++ u32 number_bufs_in_stream;
++ u32 ul_number_bytes;
++ void *sync_object_handle;
++ enum dsp_streamstate ss_stream_state;
++};
++
++/* DMM MAP attributes
++It is a bit mask with each bit value indicating a specific attribute
++bit 0 - GPP address type (user virtual=0, physical=1)
++bit 1 - MMU Endianism (Big Endian=1, Little Endian=0)
++bit 2 - MMU mixed page attribute (Mixed/ CPUES=1, TLBES =0)
++bit 3 - MMU element size = 8bit (valid only for non mixed page entries)
++bit 4 - MMU element size = 16bit (valid only for non mixed page entries)
++bit 5 - MMU element size = 32bit (valid only for non mixed page entries)
++bit 6 - MMU element size = 64bit (valid only for non mixed page entries)
++
++bit 14 - Input (read only) buffer
++bit 15 - Output (writeable) buffer
++*/
++
++/* Types of mapping attributes */
++
++/* MPU address is virtual and needs to be translated to physical addr */
++#define DSP_MAPVIRTUALADDR 0x00000000
++#define DSP_MAPPHYSICALADDR 0x00000001
++
++/* Mapped data is big endian */
++#define DSP_MAPBIGENDIAN 0x00000002
++#define DSP_MAPLITTLEENDIAN 0x00000000
++
++/* Element size is based on DSP r/w access size */
++#define DSP_MAPMIXEDELEMSIZE 0x00000004
++
++/*
++ * Element size for MMU mapping (8, 16, 32, or 64 bit)
++ * Ignored if DSP_MAPMIXEDELEMSIZE enabled
++ */
++#define DSP_MAPELEMSIZE8 0x00000008
++#define DSP_MAPELEMSIZE16 0x00000010
++#define DSP_MAPELEMSIZE32 0x00000020
++#define DSP_MAPELEMSIZE64 0x00000040
++
++#define DSP_MAPVMALLOCADDR 0x00000080
++
++#define DSP_MAPDONOTLOCK 0x00000100
++
++#define DSP_MAP_DIR_MASK 0x3FFF
++
++#define GEM_CACHE_LINE_SIZE 128
++#define GEM_L1P_PREFETCH_SIZE 128
++
++/*
++ * Definitions from dbreg.h
++ */
++
++#define DSPPROCTYPE_C64 6410
++#define IVAPROCTYPE_ARM7 470
++
++#define REG_MGR_OBJECT 1
++#define REG_DRV_OBJECT 2
++
++/* registry */
++#define DRVOBJECT "DrvObject"
++#define MGROBJECT "MgrObject"
++
++/* Max registry path length. Also the max registry value length. */
++#define MAXREGPATHLENGTH 255
++
++#endif /* DBDEFS_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/dbldefs.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/dbldefs.h 2010-08-18 11:24:23.182060014 +0300
+@@ -0,0 +1,141 @@
++/*
++ * dbldefs.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef DBLDEFS_
++#define DBLDEFS_
++
++/*
++ * Bit masks for dbl_flags.
++ */
++#define DBL_NOLOAD 0x0 /* Don't load symbols, code, or data */
++#define DBL_SYMB 0x1 /* load symbols */
++#define DBL_CODE 0x2 /* load code */
++#define DBL_DATA 0x4 /* load data */
++#define DBL_DYNAMIC 0x8 /* dynamic load */
++#define DBL_BSS 0x20 /* Unitialized section */
++
++#define DBL_MAXPATHLENGTH 255
++
++/*
++ * ======== dbl_flags ========
++ * Specifies whether to load code, data, or symbols
++ */
++typedef s32 dbl_flags;
++
++/*
++ * ======== dbl_sect_info ========
++ * For collecting info on overlay sections
++ */
++struct dbl_sect_info {
++ const char *name; /* name of section */
++ u32 sect_run_addr; /* run address of section */
++ u32 sect_load_addr; /* load address of section */
++ u32 size; /* size of section (target MAUs) */
++ dbl_flags type; /* Code, data, or BSS */
++};
++
++/*
++ * ======== dbl_symbol ========
++ * (Needed for dynamic load library)
++ */
++struct dbl_symbol {
++ u32 value;
++};
++
++/*
++ * ======== dbl_alloc_fxn ========
++ * Allocate memory function. Allocate or reserve (if reserved == TRUE)
++ * "size" bytes of memory from segment "space" and return the address in
++ * *dsp_address (or starting at *dsp_address if reserve == TRUE). Returns 0 on
++ * success, or an error code on failure.
++ */
++typedef s32(*dbl_alloc_fxn) (void *hdl, s32 space, u32 size, u32 align,
++ u32 *dsp_address, s32 seg_id, s32 req,
++ bool reserved);
++
++/*
++ * ======== dbl_free_fxn ========
++ * Free memory function. Free, or unreserve (if reserved == TRUE) "size"
++ * bytes of memory from segment "space"
++ */
++typedef bool(*dbl_free_fxn) (void *hdl, u32 addr, s32 space, u32 size,
++ bool reserved);
++
++/*
++ * ======== dbl_log_write_fxn ========
++ * Function to call when writing data from a section, to log the info.
++ * Can be NULL if no logging is required.
++ */
++typedef int(*dbl_log_write_fxn) (void *handle,
++ struct dbl_sect_info *sect, u32 addr,
++ u32 bytes);
++
++/*
++ * ======== dbl_sym_lookup ========
++ * Symbol lookup function - Find the symbol name and return its value.
++ *
++ * Parameters:
++ * handle - Opaque handle
++ * parg - Opaque argument.
++ * name - Name of symbol to lookup.
++ * sym - Location to store address of symbol structure.
++ *
++ * Returns:
++ * TRUE: Success (symbol was found).
++ * FALSE: Failed to find symbol.
++ */
++typedef bool(*dbl_sym_lookup) (void *handle, void *parg, void *rmm_handle,
++ const char *name, struct dbl_symbol ** sym);
++
++/*
++ * ======== dbl_write_fxn ========
++ * Write memory function. Write "n" HOST bytes of memory to segment "mtype"
++ * starting at address "dsp_address" from the buffer "buf". The buffer is
++ * formatted as an array of words appropriate for the DSP.
++ */
++typedef s32(*dbl_write_fxn) (void *hdl, u32 dsp_address, void *buf,
++ u32 n, s32 mtype);
++
++/*
++ * ======== dbl_attrs ========
++ */
++struct dbl_attrs {
++ dbl_alloc_fxn alloc;
++ dbl_free_fxn free;
++ void *rmm_handle; /* Handle to pass to alloc, free functions */
++ dbl_write_fxn write;
++ void *input_params; /* Handle to pass to write, cinit function */
++
++ dbl_log_write_fxn log_write;
++ void *log_write_handle;
++
++ /* Symbol matching function and handle to pass to it */
++ dbl_sym_lookup sym_lookup;
++ void *sym_handle;
++ void *sym_arg;
++
++ /*
++ * These file manipulation functions should be compatible with the
++ * "C" run time library functions of the same name.
++ */
++ s32(*fread) (void *, size_t, size_t, void *);
++ s32(*fseek) (void *, long, int);
++ s32(*ftell) (void *);
++ s32(*fclose) (void *);
++ void *(*fopen) (const char *, const char *);
++};
++
++#endif /* DBLDEFS_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/dbll.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/dbll.h 2010-08-18 11:24:23.182060014 +0300
+@@ -0,0 +1,62 @@
++/*
++ * dbll.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * DSP/BIOS Bridge Dynamic load library module interface. Function header
++ * comments are in the file dblldefs.h.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef DBLL_
++#define DBLL_
++
++#include <dspbridge/dbdefs.h>
++#include <dspbridge/dblldefs.h>
++
++extern bool symbols_reloaded;
++
++extern void dbll_close(struct dbll_library_obj *zl_lib);
++extern int dbll_create(struct dbll_tar_obj **target_obj,
++ struct dbll_attrs *pattrs);
++extern void dbll_delete(struct dbll_tar_obj *target);
++extern void dbll_exit(void);
++extern bool dbll_get_addr(struct dbll_library_obj *zl_lib, char *name,
++ struct dbll_sym_val **sym_val);
++extern void dbll_get_attrs(struct dbll_tar_obj *target,
++ struct dbll_attrs *pattrs);
++extern bool dbll_get_c_addr(struct dbll_library_obj *zl_lib, char *name,
++ struct dbll_sym_val **sym_val);
++extern int dbll_get_sect(struct dbll_library_obj *lib, char *name,
++ u32 *paddr, u32 *psize);
++extern bool dbll_init(void);
++extern int dbll_load(struct dbll_library_obj *lib,
++ dbll_flags flags,
++ struct dbll_attrs *attrs, u32 * entry);
++extern int dbll_load_sect(struct dbll_library_obj *zl_lib,
++ char *sec_name, struct dbll_attrs *attrs);
++extern int dbll_open(struct dbll_tar_obj *target, char *file,
++ dbll_flags flags,
++ struct dbll_library_obj **lib_obj);
++extern int dbll_read_sect(struct dbll_library_obj *lib,
++ char *name, char *buf, u32 size);
++extern void dbll_set_attrs(struct dbll_tar_obj *target,
++ struct dbll_attrs *pattrs);
++extern void dbll_unload(struct dbll_library_obj *lib, struct dbll_attrs *attrs);
++extern int dbll_unload_sect(struct dbll_library_obj *lib,
++ char *sect_name, struct dbll_attrs *attrs);
++#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
++bool dbll_find_dsp_symbol(struct dbll_library_obj *zl_lib, u32 address,
++ u32 offset_range, u32 *sym_addr_output, char *name_output);
++#endif
++
++#endif /* DBLL_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/dblldefs.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/dblldefs.h 2010-08-18 11:24:23.182060014 +0300
+@@ -0,0 +1,496 @@
++/*
++ * dblldefs.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef DBLLDEFS_
++#define DBLLDEFS_
++
++/*
++ * Bit masks for dbl_flags.
++ */
++#define DBLL_NOLOAD 0x0 /* Don't load symbols, code, or data */
++#define DBLL_SYMB 0x1 /* load symbols */
++#define DBLL_CODE 0x2 /* load code */
++#define DBLL_DATA 0x4 /* load data */
++#define DBLL_DYNAMIC 0x8 /* dynamic load */
++#define DBLL_BSS 0x20 /* Unitialized section */
++
++#define DBLL_MAXPATHLENGTH 255
++
++/*
++ * ======== DBLL_Target ========
++ *
++ */
++struct dbll_tar_obj;
++
++/*
++ * ======== dbll_flags ========
++ * Specifies whether to load code, data, or symbols
++ */
++typedef s32 dbll_flags;
++
++/*
++ * ======== DBLL_Library ========
++ *
++ */
++struct dbll_library_obj;
++
++/*
++ * ======== dbll_sect_info ========
++ * For collecting info on overlay sections
++ */
++struct dbll_sect_info {
++ const char *name; /* name of section */
++ u32 sect_run_addr; /* run address of section */
++ u32 sect_load_addr; /* load address of section */
++ u32 size; /* size of section (target MAUs) */
++ dbll_flags type; /* Code, data, or BSS */
++};
++
++/*
++ * ======== dbll_sym_val ========
++ * (Needed for dynamic load library)
++ */
++struct dbll_sym_val {
++ u32 value;
++};
++
++/*
++ * ======== dbll_alloc_fxn ========
++ * Allocate memory function. Allocate or reserve (if reserved == TRUE)
++ * "size" bytes of memory from segment "space" and return the address in
++ * *dsp_address (or starting at *dsp_address if reserve == TRUE). Returns 0 on
++ * success, or an error code on failure.
++ */
++typedef s32(*dbll_alloc_fxn) (void *hdl, s32 space, u32 size, u32 align,
++ u32 *dsp_address, s32 seg_id, s32 req,
++ bool reserved);
++
++/*
++ * ======== dbll_close_fxn ========
++ */
++typedef s32(*dbll_f_close_fxn) (void *);
++
++/*
++ * ======== dbll_free_fxn ========
++ * Free memory function. Free, or unreserve (if reserved == TRUE) "size"
++ * bytes of memory from segment "space"
++ */
++typedef bool(*dbll_free_fxn) (void *hdl, u32 addr, s32 space, u32 size,
++ bool reserved);
++
++/*
++ * ======== dbll_f_open_fxn ========
++ */
++typedef void *(*dbll_f_open_fxn) (const char *, const char *);
++
++/*
++ * ======== dbll_log_write_fxn ========
++ * Function to call when writing data from a section, to log the info.
++ * Can be NULL if no logging is required.
++ */
++typedef int(*dbll_log_write_fxn) (void *handle,
++ struct dbll_sect_info *sect, u32 addr,
++ u32 bytes);
++
++/*
++ * ======== dbll_read_fxn ========
++ */
++typedef s32(*dbll_read_fxn) (void *, size_t, size_t, void *);
++
++/*
++ * ======== dbll_seek_fxn ========
++ */
++typedef s32(*dbll_seek_fxn) (void *, long, int);
++
++/*
++ * ======== dbll_sym_lookup ========
++ * Symbol lookup function - Find the symbol name and return its value.
++ *
++ * Parameters:
++ * handle - Opaque handle
++ * parg - Opaque argument.
++ * name - Name of symbol to lookup.
++ * sym - Location to store address of symbol structure.
++ *
++ * Returns:
++ * TRUE: Success (symbol was found).
++ * FALSE: Failed to find symbol.
++ */
++typedef bool(*dbll_sym_lookup) (void *handle, void *parg, void *rmm_handle,
++ const char *name, struct dbll_sym_val ** sym);
++
++/*
++ * ======== dbll_tell_fxn ========
++ */
++typedef s32(*dbll_tell_fxn) (void *);
++
++/*
++ * ======== dbll_write_fxn ========
++ * Write memory function. Write "n" HOST bytes of memory to segment "mtype"
++ * starting at address "dsp_address" from the buffer "buf". The buffer is
++ * formatted as an array of words appropriate for the DSP.
++ */
++typedef s32(*dbll_write_fxn) (void *hdl, u32 dsp_address, void *buf,
++ u32 n, s32 mtype);
++
++/*
++ * ======== dbll_attrs ========
++ */
++struct dbll_attrs {
++ dbll_alloc_fxn alloc;
++ dbll_free_fxn free;
++ void *rmm_handle; /* Handle to pass to alloc, free functions */
++ dbll_write_fxn write;
++ void *input_params; /* Handle to pass to write, cinit function */
++ bool base_image;
++ dbll_log_write_fxn log_write;
++ void *log_write_handle;
++
++ /* Symbol matching function and handle to pass to it */
++ dbll_sym_lookup sym_lookup;
++ void *sym_handle;
++ void *sym_arg;
++
++ /*
++ * These file manipulation functions should be compatible with the
++ * "C" run time library functions of the same name.
++ */
++ s32(*fread) (void *, size_t, size_t, void *);
++ s32(*fseek) (void *, long, int);
++ s32(*ftell) (void *);
++ s32(*fclose) (void *);
++ void *(*fopen) (const char *, const char *);
++};
++
++/*
++ * ======== dbll_close ========
++ * Close library opened with dbll_open.
++ * Parameters:
++ * lib - Handle returned from dbll_open().
++ * Returns:
++ * Requires:
++ * DBL initialized.
++ * Valid lib.
++ * Ensures:
++ */
++typedef void (*dbll_close_fxn) (struct dbll_library_obj *library);
++
++/*
++ * ======== dbll_create ========
++ * Create a target object, specifying the alloc, free, and write functions.
++ * Parameters:
++ * target_obj - Location to store target handle on output.
++ * pattrs - Attributes.
++ * Returns:
++ * 0: Success.
++ * -ENOMEM: Memory allocation failed.
++ * Requires:
++ * DBL initialized.
++ * pattrs != NULL.
++ * target_obj != NULL;
++ * Ensures:
++ * Success: *target_obj != NULL.
++ * Failure: *target_obj == NULL.
++ */
++typedef int(*dbll_create_fxn) (struct dbll_tar_obj **target_obj,
++ struct dbll_attrs *attrs);
++
++/*
++ * ======== dbll_delete ========
++ * Delete target object and free resources for any loaded libraries.
++ * Parameters:
++ * target - Handle returned from DBLL_Create().
++ * Returns:
++ * Requires:
++ * DBL initialized.
++ * Valid target.
++ * Ensures:
++ */
++typedef void (*dbll_delete_fxn) (struct dbll_tar_obj *target);
++
++/*
++ * ======== dbll_exit ========
++ * Discontinue use of DBL module.
++ * Parameters:
++ * Returns:
++ * Requires:
++ * refs > 0.
++ * Ensures:
++ * refs >= 0.
++ */
++typedef void (*dbll_exit_fxn) (void);
++
++/*
++ * ======== dbll_get_addr ========
++ * Get address of name in the specified library.
++ * Parameters:
++ * lib - Handle returned from dbll_open().
++ * name - Name of symbol
++ * sym_val - Location to store symbol address on output.
++ * Returns:
++ * TRUE: Success.
++ * FALSE: Symbol not found.
++ * Requires:
++ * DBL initialized.
++ * Valid library.
++ * name != NULL.
++ * sym_val != NULL.
++ * Ensures:
++ */
++typedef bool(*dbll_get_addr_fxn) (struct dbll_library_obj *lib, char *name,
++ struct dbll_sym_val **sym_val);
++
++/*
++ * ======== dbll_get_attrs ========
++ * Retrieve the attributes of the target.
++ * Parameters:
++ * target - Handle returned from DBLL_Create().
++ * pattrs - Location to store attributes on output.
++ * Returns:
++ * Requires:
++ * DBL initialized.
++ * Valid target.
++ * pattrs != NULL.
++ * Ensures:
++ */
++typedef void (*dbll_get_attrs_fxn) (struct dbll_tar_obj *target,
++ struct dbll_attrs *attrs);
++
++/*
++ * ======== dbll_get_c_addr ========
++ * Get address of "C" name on the specified library.
++ * Parameters:
++ * lib - Handle returned from dbll_open().
++ * name - Name of symbol
++ * sym_val - Location to store symbol address on output.
++ * Returns:
++ * TRUE: Success.
++ * FALSE: Symbol not found.
++ * Requires:
++ * DBL initialized.
++ * Valid target.
++ * name != NULL.
++ * sym_val != NULL.
++ * Ensures:
++ */
++typedef bool(*dbll_get_c_addr_fxn) (struct dbll_library_obj *lib, char *name,
++ struct dbll_sym_val **sym_val);
++
++/*
++ * ======== dbll_get_sect ========
++ * Get address and size of a named section.
++ * Parameters:
++ * lib - Library handle returned from dbll_open().
++ * name - Name of section.
++ * paddr - Location to store section address on output.
++ * psize - Location to store section size on output.
++ * Returns:
++ * 0: Success.
++ * -ENXIO: Section not found.
++ * Requires:
++ * DBL initialized.
++ * Valid lib.
++ * name != NULL.
++ * paddr != NULL;
++ * psize != NULL.
++ * Ensures:
++ */
++typedef int(*dbll_get_sect_fxn) (struct dbll_library_obj *lib,
++ char *name, u32 * addr, u32 * size);
++
++/*
++ * ======== dbll_init ========
++ * Initialize DBL module.
++ * Parameters:
++ * Returns:
++ * TRUE: Success.
++ * FALSE: Failure.
++ * Requires:
++ * refs >= 0.
++ * Ensures:
++ * Success: refs > 0.
++ * Failure: refs >= 0.
++ */
++typedef bool(*dbll_init_fxn) (void);
++
++/*
++ * ======== dbll_load ========
++ * Load library onto the target.
++ *
++ * Parameters:
++ * lib - Library handle returned from dbll_open().
++ * flags - Load code, data and/or symbols.
++ * attrs - May contain alloc, free, and write function.
++ * entry_pt - Location to store program entry on output.
++ * Returns:
++ * 0: Success.
++ * -EBADF: File read failed.
++ * -EILSEQ: Failure in dynamic loader library.
++ * Requires:
++ * DBL initialized.
++ * Valid lib.
++ * entry != NULL.
++ * Ensures:
++ */
++typedef int(*dbll_load_fxn) (struct dbll_library_obj *lib,
++ dbll_flags flags,
++ struct dbll_attrs *attrs, u32 *entry);
++
++/*
++ * ======== dbll_load_sect ========
++ * Load a named section from an library (for overlay support).
++ * Parameters:
++ * lib - Handle returned from dbll_open().
++ * sec_name - Name of section to load.
++ * attrs - Contains write function and handle to pass to it.
++ * Returns:
++ * 0: Success.
++ * -ENXIO: Section not found.
++ * -ENOSYS: Function not implemented.
++ * Requires:
++ * Valid lib.
++ * sec_name != NULL.
++ * attrs != NULL.
++ * attrs->write != NULL.
++ * Ensures:
++ */
++typedef int(*dbll_load_sect_fxn) (struct dbll_library_obj *lib,
++ char *sz_sect_name,
++ struct dbll_attrs *attrs);
++
++/*
++ * ======== dbll_open ========
++ * dbll_open() returns a library handle that can be used to load/unload
++ * the symbols/code/data via dbll_load()/dbll_unload().
++ * Parameters:
++ * target - Handle returned from dbll_create().
++ * file - Name of file to open.
++ * flags - If flags & DBLL_SYMB, load symbols.
++ * lib_obj - Location to store library handle on output.
++ * Returns:
++ * 0: Success.
++ * -ENOMEM: Memory allocation failure.
++ * -EBADF: File open/read failure.
++ * Unable to determine target type.
++ * Requires:
++ * DBL initialized.
++ * Valid target.
++ * file != NULL.
++ * lib_obj != NULL.
++ * dbll_attrs fopen function non-NULL.
++ * Ensures:
++ * Success: Valid *lib_obj.
++ * Failure: *lib_obj == NULL.
++ */
++typedef int(*dbll_open_fxn) (struct dbll_tar_obj *target, char *file,
++ dbll_flags flags,
++ struct dbll_library_obj **lib_obj);
++
++/*
++ * ======== dbll_read_sect ========
++ * Read COFF section into a character buffer.
++ * Parameters:
++ * lib - Library handle returned from dbll_open().
++ * name - Name of section.
++ * pbuf - Buffer to write section contents into.
++ * size - Buffer size
++ * Returns:
++ * 0: Success.
++ * -ENXIO: Named section does not exists.
++ * Requires:
++ * DBL initialized.
++ * Valid lib.
++ * name != NULL.
++ * pbuf != NULL.
++ * size != 0.
++ * Ensures:
++ */
++typedef int(*dbll_read_sect_fxn) (struct dbll_library_obj *lib,
++ char *name, char *content,
++ u32 cont_size);
++
++/*
++ * ======== dbll_set_attrs ========
++ * Set the attributes of the target.
++ * Parameters:
++ * target - Handle returned from dbll_create().
++ * pattrs - New attributes.
++ * Returns:
++ * Requires:
++ * DBL initialized.
++ * Valid target.
++ * pattrs != NULL.
++ * Ensures:
++ */
++typedef void (*dbll_set_attrs_fxn) (struct dbll_tar_obj *target,
++ struct dbll_attrs *attrs);
++
++/*
++ * ======== dbll_unload ========
++ * Unload library loaded with dbll_load().
++ * Parameters:
++ * lib - Handle returned from dbll_open().
++ * attrs - Contains free() function and handle to pass to it.
++ * Returns:
++ * Requires:
++ * DBL initialized.
++ * Valid lib.
++ * Ensures:
++ */
++typedef void (*dbll_unload_fxn) (struct dbll_library_obj *library,
++ struct dbll_attrs *attrs);
++
++/*
++ * ======== dbll_unload_sect ========
++ * Unload a named section from an library (for overlay support).
++ * Parameters:
++ * lib - Handle returned from dbll_open().
++ * sec_name - Name of section to load.
++ * attrs - Contains free() function and handle to pass to it.
++ * Returns:
++ * 0: Success.
++ * -ENXIO: Named section not found.
++ * -ENOSYS
++ * Requires:
++ * DBL initialized.
++ * Valid lib.
++ * sec_name != NULL.
++ * Ensures:
++ */
++typedef int(*dbll_unload_sect_fxn) (struct dbll_library_obj *lib,
++ char *sz_sect_name,
++ struct dbll_attrs *attrs);
++
++struct dbll_fxns {
++ dbll_close_fxn close_fxn;
++ dbll_create_fxn create_fxn;
++ dbll_delete_fxn delete_fxn;
++ dbll_exit_fxn exit_fxn;
++ dbll_get_attrs_fxn get_attrs_fxn;
++ dbll_get_addr_fxn get_addr_fxn;
++ dbll_get_c_addr_fxn get_c_addr_fxn;
++ dbll_get_sect_fxn get_sect_fxn;
++ dbll_init_fxn init_fxn;
++ dbll_load_fxn load_fxn;
++ dbll_load_sect_fxn load_sect_fxn;
++ dbll_open_fxn open_fxn;
++ dbll_read_sect_fxn read_sect_fxn;
++ dbll_set_attrs_fxn set_attrs_fxn;
++ dbll_unload_fxn unload_fxn;
++ dbll_unload_sect_fxn unload_sect_fxn;
++};
++
++#endif /* DBLDEFS_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/dehdefs.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/dehdefs.h 2010-08-18 11:24:23.182060014 +0300
+@@ -0,0 +1,32 @@
++/*
++ * dehdefs.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Definition for Bridge driver module DEH.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef DEHDEFS_
++#define DEHDEFS_
++
++#include <dspbridge/mbx_sh.h> /* shared mailbox codes */
++
++/* DEH object manager */
++struct deh_mgr;
++
++/* Magic code used to determine if DSP signaled exception. */
++#define DEH_BASE MBX_DEH_BASE
++#define DEH_USERS_BASE MBX_DEH_USERS_BASE
++#define DEH_LIMIT MBX_DEH_LIMIT
++
++#endif /* _DEHDEFS_H */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/dev.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/dev.h 2010-08-18 11:24:23.182060014 +0300
+@@ -0,0 +1,702 @@
++/*
++ * dev.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Bridge Bridge driver device operations.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef DEV_
++#define DEV_
++
++/* ----------------------------------- Module Dependent Headers */
++#include <dspbridge/chnldefs.h>
++#include <dspbridge/cmm.h>
++#include <dspbridge/cod.h>
++#include <dspbridge/dehdefs.h>
++#include <dspbridge/nodedefs.h>
++#include <dspbridge/dispdefs.h>
++#include <dspbridge/dspdefs.h>
++#include <dspbridge/dmm.h>
++#include <dspbridge/host_os.h>
++
++/* ----------------------------------- This */
++#include <dspbridge/devdefs.h>
++
++/*
++ * ======== dev_brd_write_fxn ========
++ * Purpose:
++ * Exported function to be used as the COD write function. This function
++ * is passed a handle to a DEV_hObject by ZL in arb, then calls the
++ * device's bridge_brd_write() function.
++ * Parameters:
++ * arb: Handle to a Device Object.
++ * dev_ctxt: Handle to Bridge driver defined device info.
++ * dsp_addr: Address on DSP board (Destination).
++ * host_buf: Pointer to host buffer (Source).
++ * ul_num_bytes: Number of bytes to transfer.
++ * mem_type: Memory space on DSP to which to transfer.
++ * Returns:
++ * Number of bytes written. Returns 0 if the DEV_hObject passed in via
++ * arb is invalid.
++ * Requires:
++ * DEV Initialized.
++ * host_buf != NULL
++ * Ensures:
++ */
++extern u32 dev_brd_write_fxn(void *arb,
++ u32 dsp_add,
++ void *host_buf, u32 ul_num_bytes, u32 mem_space);
++
++/*
++ * ======== dev_create_device ========
++ * Purpose:
++ * Called by the operating system to load the Bridge Driver for a
++ * 'Bridge device.
++ * Parameters:
++ * device_obj: Ptr to location to receive the device object handle.
++ * driver_file_name: Name of Bridge driver PE DLL file to load. If the
++ * absolute path is not provided, the file is loaded
++ * through 'Bridge's module search path.
++ * host_config: Host configuration information, to be passed down
++ * to the Bridge driver when bridge_dev_create() is called.
++ * pDspConfig: DSP resources, to be passed down to the Bridge driver
++ * when bridge_dev_create() is called.
++ * dev_node_obj: Platform specific device node.
++ * Returns:
++ * 0: Module is loaded, device object has been created
++ * -ENOMEM: Insufficient memory to create needed resources.
++ * -EPERM: Unable to find Bridge driver entry point function.
++ * -ESPIPE: Unable to load ZL DLL.
++ * Requires:
++ * DEV Initialized.
++ * device_obj != NULL.
++ * driver_file_name != NULL.
++ * host_config != NULL.
++ * pDspConfig != NULL.
++ * Ensures:
++ * 0: *device_obj will contain handle to the new device object.
++ * Otherwise, does not create the device object, ensures the Bridge driver
++ * module is unloaded, and sets *device_obj to NULL.
++ */
++extern int dev_create_device(struct dev_object
++ **device_obj,
++ const char *driver_file_name,
++ struct cfg_devnode *dev_node_obj);
++
++/*
++ * ======== dev_create_iva_device ========
++ * Purpose:
++ * Called by the operating system to load the Bridge Driver for IVA.
++ * Parameters:
++ * device_obj: Ptr to location to receive the device object handle.
++ * driver_file_name: Name of Bridge driver PE DLL file to load. If the
++ * absolute path is not provided, the file is loaded
++ * through 'Bridge's module search path.
++ * host_config: Host configuration information, to be passed down
++ * to the Bridge driver when bridge_dev_create() is called.
++ * pDspConfig: DSP resources, to be passed down to the Bridge driver
++ * when bridge_dev_create() is called.
++ * dev_node_obj: Platform specific device node.
++ * Returns:
++ * 0: Module is loaded, device object has been created
++ * -ENOMEM: Insufficient memory to create needed resources.
++ * -EPERM: Unable to find Bridge driver entry point function.
++ * -ESPIPE: Unable to load ZL DLL.
++ * Requires:
++ * DEV Initialized.
++ * device_obj != NULL.
++ * driver_file_name != NULL.
++ * host_config != NULL.
++ * pDspConfig != NULL.
++ * Ensures:
++ * 0: *device_obj will contain handle to the new device object.
++ * Otherwise, does not create the device object, ensures the Bridge driver
++ * module is unloaded, and sets *device_obj to NULL.
++ */
++extern int dev_create_iva_device(struct dev_object
++ **device_obj,
++ const char *driver_file_name,
++ const struct cfg_hostres
++ *host_config,
++ struct cfg_devnode *dev_node_obj);
++
++/*
++ * ======== dev_create2 ========
++ * Purpose:
++ * After successful loading of the image from api_init_complete2
++ * (PROC Auto_Start) or proc_load this fxn is called. This creates
++ * the Node Manager and updates the DEV Object.
++ * Parameters:
++ * hdev_obj: Handle to device object created with dev_create_device().
++ * Returns:
++ * 0: Successful Creation of Node Manager
++ * -EPERM: Some Error Occurred.
++ * Requires:
++ * DEV Initialized
++ * Valid hdev_obj
++ * Ensures:
++ * 0 and hdev_obj->hnode_mgr != NULL
++ * else hdev_obj->hnode_mgr == NULL
++ */
++extern int dev_create2(struct dev_object *hdev_obj);
++
++/*
++ * ======== dev_destroy2 ========
++ * Purpose:
++ * Destroys the Node manager for this device.
++ * Parameters:
++ * hdev_obj: Handle to device object created with dev_create_device().
++ * Returns:
++ * 0: Successful Creation of Node Manager
++ * -EPERM: Some Error Occurred.
++ * Requires:
++ * DEV Initialized
++ * Valid hdev_obj
++ * Ensures:
++ * 0 and hdev_obj->hnode_mgr == NULL
++ * else -EPERM.
++ */
++extern int dev_destroy2(struct dev_object *hdev_obj);
++
++/*
++ * ======== dev_destroy_device ========
++ * Purpose:
++ * Destroys the channel manager for this device, if any, calls
++ * bridge_dev_destroy(), and then attempts to unload the Bridge module.
++ * Parameters:
++ * hdev_obj: Handle to device object created with
++ * dev_create_device().
++ * Returns:
++ * 0: Success.
++ * -EFAULT: Invalid hdev_obj.
++ * -EPERM: The Bridge driver failed it's bridge_dev_destroy() function.
++ * Requires:
++ * DEV Initialized.
++ * Ensures:
++ */
++extern int dev_destroy_device(struct dev_object
++ *hdev_obj);
++
++/*
++ * ======== dev_get_chnl_mgr ========
++ * Purpose:
++ * Retrieve the handle to the channel manager created for this device.
++ * Parameters:
++ * hdev_obj: Handle to device object created with
++ * dev_create_device().
++ * *mgr: Ptr to location to store handle.
++ * Returns:
++ * 0: Success.
++ * -EFAULT: Invalid hdev_obj.
++ * Requires:
++ * mgr != NULL.
++ * DEV Initialized.
++ * Ensures:
++ * 0: *mgr contains a handle to a channel manager object,
++ * or NULL.
++ * else: *mgr is NULL.
++ */
++extern int dev_get_chnl_mgr(struct dev_object *hdev_obj,
++ struct chnl_mgr **mgr);
++
++/*
++ * ======== dev_get_cmm_mgr ========
++ * Purpose:
++ * Retrieve the handle to the shared memory manager created for this
++ * device.
++ * Parameters:
++ * hdev_obj: Handle to device object created with
++ * dev_create_device().
++ * *mgr: Ptr to location to store handle.
++ * Returns:
++ * 0: Success.
++ * -EFAULT: Invalid hdev_obj.
++ * Requires:
++ * mgr != NULL.
++ * DEV Initialized.
++ * Ensures:
++ * 0: *mgr contains a handle to a channel manager object,
++ * or NULL.
++ * else: *mgr is NULL.
++ */
++extern int dev_get_cmm_mgr(struct dev_object *hdev_obj,
++ struct cmm_object **mgr);
++
++/*
++ * ======== dev_get_dmm_mgr ========
++ * Purpose:
++ * Retrieve the handle to the dynamic memory manager created for this
++ * device.
++ * Parameters:
++ * hdev_obj: Handle to device object created with
++ * dev_create_device().
++ * *mgr: Ptr to location to store handle.
++ * Returns:
++ * 0: Success.
++ * -EFAULT: Invalid hdev_obj.
++ * Requires:
++ * mgr != NULL.
++ * DEV Initialized.
++ * Ensures:
++ * 0: *mgr contains a handle to a channel manager object,
++ * or NULL.
++ * else: *mgr is NULL.
++ */
++extern int dev_get_dmm_mgr(struct dev_object *hdev_obj,
++ struct dmm_object **mgr);
++
++/*
++ * ======== dev_get_cod_mgr ========
++ * Purpose:
++ * Retrieve the COD manager create for this device.
++ * Parameters:
++ * hdev_obj: Handle to device object created with
++ * dev_create_device().
++ * *cod_mgr: Ptr to location to store handle.
++ * Returns:
++ * 0: Success.
++ * -EFAULT: Invalid hdev_obj.
++ * Requires:
++ * cod_mgr != NULL.
++ * DEV Initialized.
++ * Ensures:
++ * 0: *cod_mgr contains a handle to a COD manager object.
++ * else: *cod_mgr is NULL.
++ */
++extern int dev_get_cod_mgr(struct dev_object *hdev_obj,
++ struct cod_manager **cod_mgr);
++
++/*
++ * ======== dev_get_deh_mgr ========
++ * Purpose:
++ * Retrieve the DEH manager created for this device.
++ * Parameters:
++ * hdev_obj: Handle to device object created with dev_create_device().
++ * *deh_manager: Ptr to location to store handle.
++ * Returns:
++ * 0: Success.
++ * -EFAULT: Invalid hdev_obj.
++ * Requires:
++ * deh_manager != NULL.
++ * DEH Initialized.
++ * Ensures:
++ * 0: *deh_manager contains a handle to a DEH manager object.
++ * else: *deh_manager is NULL.
++ */
++extern int dev_get_deh_mgr(struct dev_object *hdev_obj,
++ struct deh_mgr **deh_manager);
++
++/*
++ * ======== dev_get_dev_node ========
++ * Purpose:
++ * Retrieve the platform specific device ID for this device.
++ * Parameters:
++ * hdev_obj: Handle to device object created with
++ * dev_create_device().
++ * dev_nde: Ptr to location to get the device node handle.
++ * Returns:
++ * 0: Returns a DEVNODE in *dev_node_obj.
++ * -EFAULT: Invalid hdev_obj.
++ * Requires:
++ * dev_nde != NULL.
++ * DEV Initialized.
++ * Ensures:
++ * 0: *dev_nde contains a platform specific device ID;
++ * else: *dev_nde is NULL.
++ */
++extern int dev_get_dev_node(struct dev_object *hdev_obj,
++ struct cfg_devnode **dev_nde);
++
++/*
++ * ======== dev_get_dev_type ========
++ * Purpose:
++ * Retrieve the platform specific device ID for this device.
++ * Parameters:
++ * hdev_obj: Handle to device object created with
++ * dev_create_device().
++ * dev_nde: Ptr to location to get the device node handle.
++ * Returns:
++ * 0: Success
++ * -EFAULT: Invalid hdev_obj.
++ * Requires:
++ * dev_nde != NULL.
++ * DEV Initialized.
++ * Ensures:
++ * 0: *dev_nde contains a platform specific device ID;
++ * else: *dev_nde is NULL.
++ */
++extern int dev_get_dev_type(struct dev_object *device_obj,
++ u8 *dev_type);
++
++/*
++ * ======== dev_get_first ========
++ * Purpose:
++ * Retrieve the first Device Object handle from an internal linked list of
++ * of DEV_OBJECTs maintained by DEV.
++ * Parameters:
++ * Returns:
++ * NULL if there are no device objects stored; else
++ * a valid DEV_HOBJECT.
++ * Requires:
++ * No calls to dev_create_device or dev_destroy_device (which my modify the
++ * internal device object list) may occur between calls to dev_get_first
++ * and dev_get_next.
++ * Ensures:
++ * The DEV_HOBJECT returned is valid.
++ * A subsequent call to dev_get_next will return the next device object in
++ * the list.
++ */
++extern struct dev_object *dev_get_first(void);
++
++/*
++ * ======== dev_get_intf_fxns ========
++ * Purpose:
++ * Retrieve the Bridge driver interface function structure for the
++ * loaded Bridge driver.
++ * Parameters:
++ * hdev_obj: Handle to device object created with
++ * dev_create_device().
++ * *if_fxns: Ptr to location to store fxn interface.
++ * Returns:
++ * 0: Success.
++ * -EFAULT: Invalid hdev_obj.
++ * Requires:
++ * if_fxns != NULL.
++ * DEV Initialized.
++ * Ensures:
++ * 0: *if_fxns contains a pointer to the Bridge
++ * driver interface;
++ * else: *if_fxns is NULL.
++ */
++extern int dev_get_intf_fxns(struct dev_object *hdev_obj,
++ struct bridge_drv_interface **if_fxns);
++
++/*
++ * ======== dev_get_io_mgr ========
++ * Purpose:
++ * Retrieve the handle to the IO manager created for this device.
++ * Parameters:
++ * hdev_obj: Handle to device object created with
++ * dev_create_device().
++ * *mgr: Ptr to location to store handle.
++ * Returns:
++ * 0: Success.
++ * -EFAULT: Invalid hdev_obj.
++ * Requires:
++ * mgr != NULL.
++ * DEV Initialized.
++ * Ensures:
++ * 0: *mgr contains a handle to an IO manager object.
++ * else: *mgr is NULL.
++ */
++extern int dev_get_io_mgr(struct dev_object *hdev_obj,
++ struct io_mgr **mgr);
++
++/*
++ * ======== dev_get_next ========
++ * Purpose:
++ * Retrieve the next Device Object handle from an internal linked list of
++ * of DEV_OBJECTs maintained by DEV, after having previously called
++ * dev_get_first() and zero or more dev_get_next
++ * Parameters:
++ * hdev_obj: Handle to the device object returned from a previous
++ * call to dev_get_first() or dev_get_next().
++ * Returns:
++ * NULL if there are no further device objects on the list or hdev_obj
++ * was invalid;
++ * else the next valid DEV_HOBJECT in the list.
++ * Requires:
++ * No calls to dev_create_device or dev_destroy_device (which my modify the
++ * internal device object list) may occur between calls to dev_get_first
++ * and dev_get_next.
++ * Ensures:
++ * The DEV_HOBJECT returned is valid.
++ * A subsequent call to dev_get_next will return the next device object in
++ * the list.
++ */
++extern struct dev_object *dev_get_next(struct dev_object
++ *hdev_obj);
++
++/*
++ * ========= dev_get_msg_mgr ========
++ * Purpose:
++ * Retrieve the msg_ctrl Manager Handle from the DevObject.
++ * Parameters:
++ * hdev_obj: Handle to the Dev Object
++ * msg_man: Location where msg_ctrl Manager handle will be returned.
++ * Returns:
++ * Requires:
++ * DEV Initialized.
++ * Valid hdev_obj.
++ * node_man != NULL.
++ * Ensures:
++ */
++extern void dev_get_msg_mgr(struct dev_object *hdev_obj,
++ struct msg_mgr **msg_man);
++
++/*
++ * ========= dev_get_node_manager ========
++ * Purpose:
++ * Retrieve the Node Manager Handle from the DevObject. It is an
++ * accessor function
++ * Parameters:
++ * hdev_obj: Handle to the Dev Object
++ * node_man: Location where Handle to the Node Manager will be
++ * returned..
++ * Returns:
++ * 0: Success
++ * -EFAULT: Invalid Dev Object handle.
++ * Requires:
++ * DEV Initialized.
++ * node_man is not null
++ * Ensures:
++ * 0: *node_man contains a handle to a Node manager object.
++ * else: *node_man is NULL.
++ */
++extern int dev_get_node_manager(struct dev_object
++ *hdev_obj,
++ struct node_mgr **node_man);
++
++/*
++ * ======== dev_get_symbol ========
++ * Purpose:
++ * Get the value of a symbol in the currently loaded program.
++ * Parameters:
++ * hdev_obj: Handle to device object created with
++ * dev_create_device().
++ * str_sym: Name of symbol to look up.
++ * pul_value: Ptr to symbol value.
++ * Returns:
++ * 0: Success.
++ * -EFAULT: Invalid hdev_obj.
++ * -ESPIPE: Symbols couldn not be found or have not been loaded onto
++ * the board.
++ * Requires:
++ * str_sym != NULL.
++ * pul_value != NULL.
++ * DEV Initialized.
++ * Ensures:
++ * 0: *pul_value contains the symbol value;
++ */
++extern int dev_get_symbol(struct dev_object *hdev_obj,
++ const char *str_sym, u32 * pul_value);
++
++/*
++ * ======== dev_get_bridge_context ========
++ * Purpose:
++ * Retrieve the Bridge Context handle, as returned by the
++ * bridge_dev_create fxn.
++ * Parameters:
++ * hdev_obj: Handle to device object created with dev_create_device()
++ * *phbridge_context: Ptr to location to store context handle.
++ * Returns:
++ * 0: Success.
++ * -EFAULT: Invalid hdev_obj.
++ * Requires:
++ * phbridge_context != NULL.
++ * DEV Initialized.
++ * Ensures:
++ * 0: *phbridge_context contains context handle;
++ * else: *phbridge_context is NULL;
++ */
++extern int dev_get_bridge_context(struct dev_object *hdev_obj,
++ struct bridge_dev_context
++ **phbridge_context);
++
++/*
++ * ======== dev_exit ========
++ * Purpose:
++ * Decrement reference count, and free resources when reference count is
++ * 0.
++ * Parameters:
++ * Returns:
++ * Requires:
++ * DEV is initialized.
++ * Ensures:
++ * When reference count == 0, DEV's private resources are freed.
++ */
++extern void dev_exit(void);
++
++/*
++ * ======== dev_init ========
++ * Purpose:
++ * Initialize DEV's private state, keeping a reference count on each call.
++ * Parameters:
++ * Returns:
++ * TRUE if initialized; FALSE if error occured.
++ * Requires:
++ * Ensures:
++ * TRUE: A requirement for the other public DEV functions.
++ */
++extern bool dev_init(void);
++
++/*
++ * ======== dev_is_locked ========
++ * Purpose:
++ * Predicate function to determine if the device has been
++ * locked by a client for exclusive access.
++ * Parameters:
++ * hdev_obj: Handle to device object created with
++ * dev_create_device().
++ * Returns:
++ * 0: TRUE: device has been locked.
++ * 0: FALSE: device not locked.
++ * -EFAULT: hdev_obj was invalid.
++ * Requires:
++ * DEV Initialized.
++ * Ensures:
++ */
++extern int dev_is_locked(struct dev_object *hdev_obj);
++
++/*
++ * ======== dev_insert_proc_object ========
++ * Purpose:
++ * Inserts the Processor Object into the List of PROC Objects
++ * kept in the DEV Object
++ * Parameters:
++ * proc_obj: Handle to the Proc Object
++ * hdev_obj Handle to the Dev Object
++ * bAttachedNew Specifies if there are already processors attached
++ * Returns:
++ * 0: Successfully inserted into the list
++ * Requires:
++ * proc_obj is not NULL
++ * hdev_obj is a valid handle to the DEV.
++ * DEV Initialized.
++ * List(of Proc object in Dev) Exists.
++ * Ensures:
++ * 0 & the PROC Object is inserted and the list is not empty
++ * Details:
++ * If the List of Proc Object is empty bAttachedNew is TRUE, it indicated
++ * this is the first Processor attaching.
++ * If it is False, there are already processors attached.
++ */
++extern int dev_insert_proc_object(struct dev_object
++ *hdev_obj,
++ u32 proc_obj,
++ bool *already_attached);
++
++/*
++ * ======== dev_remove_proc_object ========
++ * Purpose:
++ * Search for and remove a Proc object from the given list maintained
++ * by the DEV
++ * Parameters:
++ * p_proc_object: Ptr to ProcObject to insert.
++ * dev_obj: Ptr to Dev Object where the list is.
++ * already_attached: Ptr to return the bool
++ * Returns:
++ * 0: If successful.
++ * -EPERM Failure to Remove the PROC Object from the list
++ * Requires:
++ * DevObject is Valid
++ * proc_obj != 0
++ * dev_obj->proc_list != NULL
++ * !LST_IS_EMPTY(dev_obj->proc_list)
++ * already_attached !=NULL
++ * Ensures:
++ * Details:
++ * List will be deleted when the DEV is destroyed.
++ *
++ */
++extern int dev_remove_proc_object(struct dev_object
++ *hdev_obj, u32 proc_obj);
++
++/*
++ * ======== dev_notify_clients ========
++ * Purpose:
++ * Notify all clients of this device of a change in device status.
++ * Clients may include multiple users of BRD, as well as CHNL.
++ * This function is asychronous, and may be called by a timer event
++ * set up by a watchdog timer.
++ * Parameters:
++ * hdev_obj: Handle to device object created with dev_create_device().
++ * ret: A status word, most likely a BRD_STATUS.
++ * Returns:
++ * 0: All registered clients were asynchronously notified.
++ * -EINVAL: Invalid hdev_obj.
++ * Requires:
++ * DEV Initialized.
++ * Ensures:
++ * 0: Notifications are queued by the operating system to be
++ * delivered to clients. This function does not ensure that
++ * the notifications will ever be delivered.
++ */
++extern int dev_notify_clients(struct dev_object *hdev_obj, u32 ret);
++
++/*
++ * ======== dev_remove_device ========
++ * Purpose:
++ * Destroys the Device Object created by dev_start_device.
++ * Parameters:
++ * dev_node_obj: Device node as it is know to OS.
++ * Returns:
++ * 0: If success;
++ * <error code> Otherwise.
++ * Requires:
++ * Ensures:
++ */
++extern int dev_remove_device(struct cfg_devnode *dev_node_obj);
++
++/*
++ * ======== dev_set_chnl_mgr ========
++ * Purpose:
++ * Set the channel manager for this device.
++ * Parameters:
++ * hdev_obj: Handle to device object created with
++ * dev_create_device().
++ * hmgr: Handle to a channel manager, or NULL.
++ * Returns:
++ * 0: Success.
++ * -EFAULT: Invalid hdev_obj.
++ * Requires:
++ * DEV Initialized.
++ * Ensures:
++ */
++extern int dev_set_chnl_mgr(struct dev_object *hdev_obj,
++ struct chnl_mgr *hmgr);
++
++/*
++ * ======== dev_set_msg_mgr ========
++ * Purpose:
++ * Set the Message manager for this device.
++ * Parameters:
++ * hdev_obj: Handle to device object created with dev_create_device().
++ * hmgr: Handle to a message manager, or NULL.
++ * Returns:
++ * Requires:
++ * DEV Initialized.
++ * Ensures:
++ */
++extern void dev_set_msg_mgr(struct dev_object *hdev_obj, struct msg_mgr *hmgr);
++
++/*
++ * ======== dev_start_device ========
++ * Purpose:
++ * Initializes the new device with bridge environment. This involves
++ * querying CM for allocated resources, querying the registry for
++ * necessary dsp resources (requested in the INF file), and using this
++ * information to create a bridge device object.
++ * Parameters:
++ * dev_node_obj: Device node as it is know to OS.
++ * Returns:
++ * 0: If success;
++ * <error code> Otherwise.
++ * Requires:
++ * DEV initialized.
++ * Ensures:
++ */
++extern int dev_start_device(struct cfg_devnode *dev_node_obj);
++
++#endif /* DEV_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/devdefs.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/devdefs.h 2010-08-18 11:24:23.182060014 +0300
+@@ -0,0 +1,26 @@
++/*
++ * devdefs.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Definition of common include typedef between dspdefs.h and dev.h. Required
++ * to break circular dependency between Bridge driver and DEV include files.
++ *
++ * Copyright (C) 2008 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef DEVDEFS_
++#define DEVDEFS_
++
++/* Bridge Device Object */
++struct dev_object;
++
++#endif /* DEVDEFS_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/disp.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/disp.h 2010-08-18 11:24:23.186056401 +0300
+@@ -0,0 +1,204 @@
++/*
++ * disp.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * DSP/BIOS Bridge Node Dispatcher.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef DISP_
++#define DISP_
++
++#include <dspbridge/dbdefs.h>
++#include <dspbridge/nodedefs.h>
++#include <dspbridge/nodepriv.h>
++#include <dspbridge/dispdefs.h>
++
++/*
++ * ======== disp_create ========
++ * Create a NODE Dispatcher object. This object handles the creation,
++ * deletion, and execution of nodes on the DSP target, through communication
++ * with the Resource Manager Server running on the target. Each NODE
++ * Manager object should have exactly one NODE Dispatcher.
++ *
++ * Parameters:
++ * dispatch_obj: Location to store node dispatcher object on output.
++ * hdev_obj: Device for this processor.
++ * disp_attrs: Node dispatcher attributes.
++ * Returns:
++ * 0: Success;
++ * -ENOMEM: Insufficient memory for requested resources.
++ * -EPERM: Unable to create dispatcher.
++ * Requires:
++ * disp_init(void) called.
++ * disp_attrs != NULL.
++ * hdev_obj != NULL.
++ * dispatch_obj != NULL.
++ * Ensures:
++ * 0: IS_VALID(*dispatch_obj).
++ * error: *dispatch_obj == NULL.
++ */
++extern int disp_create(struct disp_object **dispatch_obj,
++ struct dev_object *hdev_obj,
++ const struct disp_attr *disp_attrs);
++
++/*
++ * ======== disp_delete ========
++ * Delete the NODE Dispatcher.
++ *
++ * Parameters:
++ * disp_obj: Node Dispatcher object.
++ * Returns:
++ * Requires:
++ * disp_init(void) called.
++ * Valid disp_obj.
++ * Ensures:
++ * disp_obj is invalid.
++ */
++extern void disp_delete(struct disp_object *disp_obj);
++
++/*
++ * ======== disp_exit ========
++ * Discontinue usage of DISP module.
++ *
++ * Parameters:
++ * Returns:
++ * Requires:
++ * disp_init(void) previously called.
++ * Ensures:
++ * Any resources acquired in disp_init(void) will be freed when last DISP
++ * client calls disp_exit(void).
++ */
++extern void disp_exit(void);
++
++/*
++ * ======== disp_init ========
++ * Initialize the DISP module.
++ *
++ * Parameters:
++ * Returns:
++ * TRUE if initialization succeeded, FALSE otherwise.
++ * Ensures:
++ */
++extern bool disp_init(void);
++
++/*
++ * ======== disp_node_change_priority ========
++ * Change the priority of a node currently running on the target.
++ *
++ * Parameters:
++ * disp_obj: Node Dispatcher object.
++ * hnode: Node object representing a node currently
++ * allocated or running on the DSP.
++ * ulFxnAddress: Address of RMS function for changing priority.
++ * node_env: Address of node's environment structure.
++ * prio: New priority level to set node's priority to.
++ * Returns:
++ * 0: Success.
++ * -ETIME: A timeout occurred before the DSP responded.
++ * Requires:
++ * disp_init(void) called.
++ * Valid disp_obj.
++ * hnode != NULL.
++ * Ensures:
++ */
++extern int disp_node_change_priority(struct disp_object
++ *disp_obj,
++ struct node_object *hnode,
++ u32 rms_fxn,
++ nodeenv node_env, s32 prio);
++
++/*
++ * ======== disp_node_create ========
++ * Create a node on the DSP by remotely calling the node's create function.
++ *
++ * Parameters:
++ * disp_obj: Node Dispatcher object.
++ * hnode: Node handle obtained from node_allocate().
++ * ul_fxn_addr: Address or RMS create node function.
++ * ul_create_fxn: Address of node's create function.
++ * pargs: Arguments to pass to RMS node create function.
++ * node_env: Location to store node environment pointer on
++ * output.
++ * Returns:
++ * 0: Success.
++ * -ETIME: A timeout occurred before the DSP responded.
++ * -EPERM: A failure occurred, unable to create node.
++ * Requires:
++ * disp_init(void) called.
++ * Valid disp_obj.
++ * pargs != NULL.
++ * hnode != NULL.
++ * node_env != NULL.
++ * node_get_type(hnode) != NODE_DEVICE.
++ * Ensures:
++ */
++extern int disp_node_create(struct disp_object *disp_obj,
++ struct node_object *hnode,
++ u32 rms_fxn,
++ u32 ul_create_fxn,
++ const struct node_createargs
++ *pargs, nodeenv *node_env);
++
++/*
++ * ======== disp_node_delete ========
++ * Delete a node on the DSP by remotely calling the node's delete function.
++ *
++ * Parameters:
++ * disp_obj: Node Dispatcher object.
++ * hnode: Node object representing a node currently
++ * loaded on the DSP.
++ * ul_fxn_addr: Address or RMS delete node function.
++ * ul_delete_fxn: Address of node's delete function.
++ * node_env: Address of node's environment structure.
++ * Returns:
++ * 0: Success.
++ * -ETIME: A timeout occurred before the DSP responded.
++ * Requires:
++ * disp_init(void) called.
++ * Valid disp_obj.
++ * hnode != NULL.
++ * Ensures:
++ */
++extern int disp_node_delete(struct disp_object *disp_obj,
++ struct node_object *hnode,
++ u32 rms_fxn,
++ u32 ul_delete_fxn, nodeenv node_env);
++
++/*
++ * ======== disp_node_run ========
++ * Start execution of a node's execute phase, or resume execution of a node
++ * that has been suspended (via DISP_NodePause()) on the DSP.
++ *
++ * Parameters:
++ * disp_obj: Node Dispatcher object.
++ * hnode: Node object representing a node to be executed
++ * on the DSP.
++ * ul_fxn_addr: Address or RMS node execute function.
++ * ul_execute_fxn: Address of node's execute function.
++ * node_env: Address of node's environment structure.
++ * Returns:
++ * 0: Success.
++ * -ETIME: A timeout occurred before the DSP responded.
++ * Requires:
++ * disp_init(void) called.
++ * Valid disp_obj.
++ * hnode != NULL.
++ * Ensures:
++ */
++extern int disp_node_run(struct disp_object *disp_obj,
++ struct node_object *hnode,
++ u32 rms_fxn,
++ u32 ul_execute_fxn, nodeenv node_env);
++
++#endif /* DISP_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/dispdefs.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/dispdefs.h 2010-08-18 11:24:23.186056401 +0300
+@@ -0,0 +1,35 @@
++/*
++ * dispdefs.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Global DISP constants and types, shared by PROCESSOR, NODE, and DISP.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef DISPDEFS_
++#define DISPDEFS_
++
++struct disp_object;
++
++/* Node Dispatcher attributes */
++struct disp_attr {
++ u32 ul_chnl_offset; /* Offset of channel ids reserved for RMS */
++ /* Size of buffer for sending data to RMS */
++ u32 ul_chnl_buf_size;
++ int proc_family; /* eg, 5000 */
++ int proc_type; /* eg, 5510 */
++ void *reserved1; /* Reserved for future use. */
++ u32 reserved2; /* Reserved for future use. */
++};
++
++#endif /* DISPDEFS_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/dmm.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/dmm.h 2010-08-18 11:24:23.186056401 +0300
+@@ -0,0 +1,75 @@
++/*
++ * dmm.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * The Dynamic Memory Mapping(DMM) module manages the DSP Virtual address
++ * space that can be directly mapped to any MPU buffer or memory region.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef DMM_
++#define DMM_
++
++#include <dspbridge/dbdefs.h>
++
++struct dmm_object;
++
++/* DMM attributes used in dmm_create() */
++struct dmm_mgrattrs {
++ u32 reserved;
++};
++
++#define DMMPOOLSIZE 0x4000000
++
++/*
++ * ======== dmm_get_handle ========
++ * Purpose:
++ * Return the dynamic memory manager object for this device.
++ * This is typically called from the client process.
++ */
++
++extern int dmm_get_handle(void *hprocessor,
++ struct dmm_object **dmm_manager);
++
++extern int dmm_reserve_memory(struct dmm_object *dmm_mgr,
++ u32 size, u32 *prsv_addr);
++
++extern int dmm_un_reserve_memory(struct dmm_object *dmm_mgr,
++ u32 rsv_addr);
++
++extern int dmm_map_memory(struct dmm_object *dmm_mgr, u32 addr,
++ u32 size);
++
++extern int dmm_un_map_memory(struct dmm_object *dmm_mgr,
++ u32 addr, u32 *psize);
++
++extern int dmm_destroy(struct dmm_object *dmm_mgr);
++
++extern int dmm_delete_tables(struct dmm_object *dmm_mgr);
++
++extern int dmm_create(struct dmm_object **dmm_manager,
++ struct dev_object *hdev_obj,
++ const struct dmm_mgrattrs *mgr_attrts);
++
++extern bool dmm_init(void);
++
++extern void dmm_exit(void);
++
++extern int dmm_create_tables(struct dmm_object *dmm_mgr,
++ u32 addr, u32 size);
++
++#ifdef DSP_DMM_DEBUG
++u32 dmm_mem_map_dump(struct dmm_object *dmm_mgr);
++#endif
++
++#endif /* DMM_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/drv.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/drv.h 2010-08-18 11:24:23.186056401 +0300
+@@ -0,0 +1,521 @@
++/*
++ * drv.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * DRV Resource allocation module. Driver Object gets Created
++ * at the time of Loading. It holds the List of Device Objects
++ * in the system.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef DRV_
++#define DRV_
++
++#include <dspbridge/devdefs.h>
++
++#include <dspbridge/drvdefs.h>
++#include <linux/idr.h>
++
++#define DRV_ASSIGN 1
++#define DRV_RELEASE 0
++
++/* Provide the DSP Internal memory windows that can be accessed from L3 address
++ * space */
++
++#define OMAP_GEM_BASE 0x107F8000
++#define OMAP_DSP_SIZE 0x00720000
++
++/* MEM1 is L2 RAM + L2 Cache space */
++#define OMAP_DSP_MEM1_BASE 0x5C7F8000
++#define OMAP_DSP_MEM1_SIZE 0x18000
++#define OMAP_DSP_GEM1_BASE 0x107F8000
++
++/* MEM2 is L1P RAM/CACHE space */
++#define OMAP_DSP_MEM2_BASE 0x5CE00000
++#define OMAP_DSP_MEM2_SIZE 0x8000
++#define OMAP_DSP_GEM2_BASE 0x10E00000
++
++/* MEM3 is L1D RAM/CACHE space */
++#define OMAP_DSP_MEM3_BASE 0x5CF04000
++#define OMAP_DSP_MEM3_SIZE 0x14000
++#define OMAP_DSP_GEM3_BASE 0x10F04000
++
++#define OMAP_IVA2_PRM_BASE 0x48306000
++#define OMAP_IVA2_PRM_SIZE 0x1000
++
++#define OMAP_IVA2_CM_BASE 0x48004000
++#define OMAP_IVA2_CM_SIZE 0x1000
++
++#define OMAP_PER_CM_BASE 0x48005000
++#define OMAP_PER_CM_SIZE 0x1000
++
++#define OMAP_PER_PRM_BASE 0x48307000
++#define OMAP_PER_PRM_SIZE 0x1000
++
++#define OMAP_CORE_PRM_BASE 0x48306A00
++#define OMAP_CORE_PRM_SIZE 0x1000
++
++#define OMAP_SYSC_BASE 0x48002000
++#define OMAP_SYSC_SIZE 0x1000
++
++#define OMAP_DMMU_BASE 0x5D000000
++#define OMAP_DMMU_SIZE 0x1000
++
++#define OMAP_PRCM_VDD1_DOMAIN 1
++#define OMAP_PRCM_VDD2_DOMAIN 2
++
++/* GPP PROCESS CLEANUP Data structures */
++
++/* New structure (member of process context) abstracts NODE resource info */
++struct node_res_object {
++ void *hnode;
++ s32 node_allocated; /* Node status */
++ s32 heap_allocated; /* Heap status */
++ s32 streams_allocated; /* Streams status */
++ int id;
++};
++
++/* used to cache dma mapping information */
++struct bridge_dma_map_info {
++ /* direction of DMA in action, or DMA_NONE */
++ enum dma_data_direction dir;
++ /* number of elements requested by us */
++ int num_pages;
++ /* number of elements returned from dma_map_sg */
++ int sg_num;
++ /* list of buffers used in this DMA action */
++ struct scatterlist *sg;
++};
++
++/* Used for DMM mapped memory accounting */
++struct dmm_map_object {
++ struct list_head link;
++ u32 dsp_addr;
++ u32 mpu_addr;
++ u32 size;
++ u32 num_usr_pgs;
++ struct page **pages;
++ struct bridge_dma_map_info dma_info;
++};
++
++/* Used for DMM reserved memory accounting */
++struct dmm_rsv_object {
++ struct list_head link;
++ u32 dsp_reserved_addr;
++};
++
++/* New structure (member of process context) abstracts DMM resource info */
++struct dspheap_res_object {
++ s32 heap_allocated; /* DMM status */
++ u32 ul_mpu_addr;
++ u32 ul_dsp_addr;
++ u32 ul_dsp_res_addr;
++ u32 heap_size;
++ void *hprocessor;
++ struct dspheap_res_object *next;
++};
++
++/* New structure (member of process context) abstracts stream resource info */
++struct strm_res_object {
++ s32 stream_allocated; /* Stream status */
++ void *hstream;
++ u32 num_bufs;
++ u32 dir;
++ int id;
++};
++
++/* Overall Bridge process resource usage state */
++enum gpp_proc_res_state {
++ PROC_RES_ALLOCATED,
++ PROC_RES_FREED
++};
++
++/* Bridge Data */
++struct drv_data {
++ char *base_img;
++ s32 shm_size;
++ int tc_wordswapon;
++ void *drv_object;
++ void *dev_object;
++ void *mgr_object;
++};
++
++/* Process Context */
++struct process_context {
++ /* Process State */
++ enum gpp_proc_res_state res_state;
++
++ /* Handle to Processor */
++ void *hprocessor;
++
++ /* DSP Node resources */
++ struct idr *node_id;
++
++ /* DMM mapped memory resources */
++ struct list_head dmm_map_list;
++ spinlock_t dmm_map_lock;
++
++ /* DMM reserved memory resources */
++ struct list_head dmm_rsv_list;
++ spinlock_t dmm_rsv_lock;
++
++ /* DSP Heap resources */
++ struct dspheap_res_object *pdspheap_list;
++
++ /* Stream resources */
++ struct idr *stream_id;
++};
++
++/*
++ * ======== drv_create ========
++ * Purpose:
++ * Creates the Driver Object. This is done during the driver loading.
++ * There is only one Driver Object in the DSP/BIOS Bridge.
++ * Parameters:
++ * drv_obj: Location to store created DRV Object handle.
++ * Returns:
++ * 0: Sucess
++ * -ENOMEM: Failed in Memory allocation
++ * -EPERM: General Failure
++ * Requires:
++ * DRV Initialized (refs > 0 )
++ * drv_obj != NULL.
++ * Ensures:
++ * 0: - *drv_obj is a valid DRV interface to the device.
++ * - List of DevObject Created and Initialized.
++ * - List of dev_node String created and intialized.
++ * - Registry is updated with the DRV Object.
++ * !0: DRV Object not created
++ * Details:
++ * There is one Driver Object for the Driver representing
++ * the driver itself. It contains the list of device
++ * Objects and the list of Device Extensions in the system.
++ * Also it can hold other neccessary
++ * information in its storage area.
++ */
++extern int drv_create(struct drv_object **drv_obj);
++
++/*
++ * ======== drv_destroy ========
++ * Purpose:
++ * destroys the Dev Object list, DrvExt list
++ * and destroy the DRV object
++ * Called upon driver unLoading.or unsuccesful loading of the driver.
++ * Parameters:
++ * driver_obj: Handle to Driver object .
++ * Returns:
++ * 0: Success.
++ * -EPERM: Failed to destroy DRV Object
++ * Requires:
++ * DRV Initialized (cRegs > 0 )
++ * hdrv_obj is not NULL and a valid DRV handle .
++ * List of DevObject is Empty.
++ * List of DrvExt is Empty
++ * Ensures:
++ * 0: - DRV Object destroyed and hdrv_obj is not a valid
++ * DRV handle.
++ * - Registry is updated with "0" as the DRV Object.
++ */
++extern int drv_destroy(struct drv_object *driver_obj);
++
++/*
++ * ======== drv_exit ========
++ * Purpose:
++ * Exit the DRV module, freeing any modules initialized in drv_init.
++ * Parameters:
++ * Returns:
++ * Requires:
++ * Ensures:
++ */
++extern void drv_exit(void);
++
++/*
++ * ======== drv_get_first_dev_object ========
++ * Purpose:
++ * Returns the Ptr to the FirstDev Object in the List
++ * Parameters:
++ * Requires:
++ * DRV Initialized
++ * Returns:
++ * dw_dev_object: Ptr to the First Dev Object as a u32
++ * 0 if it fails to retrieve the First Dev Object
++ * Ensures:
++ */
++extern u32 drv_get_first_dev_object(void);
++
++/*
++ * ======== drv_get_first_dev_extension ========
++ * Purpose:
++ * Returns the Ptr to the First Device Extension in the List
++ * Parameters:
++ * Requires:
++ * DRV Initialized
++ * Returns:
++ * dw_dev_extension: Ptr to the First Device Extension as a u32
++ * 0: Failed to Get the Device Extension
++ * Ensures:
++ */
++extern u32 drv_get_first_dev_extension(void);
++
++/*
++ * ======== drv_get_dev_object ========
++ * Purpose:
++ * Given a index, returns a handle to DevObject from the list
++ * Parameters:
++ * hdrv_obj: Handle to the Manager
++ * device_obj: Location to store the Dev Handle
++ * Requires:
++ * DRV Initialized
++ * index >= 0
++ * hdrv_obj is not NULL and Valid DRV Object
++ * device_obj is not NULL
++ * Device Object List not Empty
++ * Returns:
++ * 0: Success
++ * -EPERM: Failed to Get the Dev Object
++ * Ensures:
++ * 0: *device_obj != NULL
++ * -EPERM: *device_obj = NULL
++ */
++extern int drv_get_dev_object(u32 index,
++ struct drv_object *hdrv_obj,
++ struct dev_object **device_obj);
++
++/*
++ * ======== drv_get_next_dev_object ========
++ * Purpose:
++ * Returns the Ptr to the Next Device Object from the the List
++ * Parameters:
++ * hdev_obj: Handle to the Device Object
++ * Requires:
++ * DRV Initialized
++ * hdev_obj != 0
++ * Returns:
++ * dw_dev_object: Ptr to the Next Dev Object as a u32
++ * 0: If it fail to get the next Dev Object.
++ * Ensures:
++ */
++extern u32 drv_get_next_dev_object(u32 hdev_obj);
++
++/*
++ * ======== drv_get_next_dev_extension ========
++ * Purpose:
++ * Returns the Ptr to the Next Device Extension from the the List
++ * Parameters:
++ * dev_extension: Handle to the Device Extension
++ * Requires:
++ * DRV Initialized
++ * dev_extension != 0.
++ * Returns:
++ * dw_dev_extension: Ptr to the Next Dev Extension
++ * 0: If it fail to Get the next Dev Extension
++ * Ensures:
++ */
++extern u32 drv_get_next_dev_extension(u32 dev_extension);
++
++/*
++ * ======== drv_init ========
++ * Purpose:
++ * Initialize the DRV module.
++ * Parameters:
++ * Returns:
++ * TRUE if success; FALSE otherwise.
++ * Requires:
++ * Ensures:
++ */
++extern int drv_init(void);
++
++/*
++ * ======== drv_insert_dev_object ========
++ * Purpose:
++ * Insert a DeviceObject into the list of Driver object.
++ * Parameters:
++ * driver_obj: Handle to DrvObject
++ * hdev_obj: Handle to DeviceObject to insert.
++ * Returns:
++ * 0: If successful.
++ * -EPERM: General Failure:
++ * Requires:
++ * hdrv_obj != NULL and Valid DRV Handle.
++ * hdev_obj != NULL.
++ * Ensures:
++ * 0: Device Object is inserted and the List is not empty.
++ */
++extern int drv_insert_dev_object(struct drv_object *driver_obj,
++ struct dev_object *hdev_obj);
++
++/*
++ * ======== drv_remove_dev_object ========
++ * Purpose:
++ * Search for and remove a Device object from the given list of Device Obj
++ * objects.
++ * Parameters:
++ * driver_obj: Handle to DrvObject
++ * hdev_obj: Handle to DevObject to Remove
++ * Returns:
++ * 0: Success.
++ * -EPERM: Unable to find dev_obj.
++ * Requires:
++ * hdrv_obj != NULL and a Valid DRV Handle.
++ * hdev_obj != NULL.
++ * List exists and is not empty.
++ * Ensures:
++ * List either does not exist (NULL), or is not empty if it does exist.
++ */
++extern int drv_remove_dev_object(struct drv_object *driver_obj,
++ struct dev_object *hdev_obj);
++
++/*
++ * ======== drv_request_resources ========
++ * Purpose:
++ * Assigns the Resources or Releases them.
++ * Parameters:
++ * dw_context: Path to the driver Registry Key.
++ * dev_node_strg: Ptr to dev_node String stored in the Device Ext.
++ * Returns:
++ * TRUE if success; FALSE otherwise.
++ * Requires:
++ * Ensures:
++ * The Resources are assigned based on Bus type.
++ * The hardware is initialized. Resource information is
++ * gathered from the Registry(ISA, PCMCIA)or scanned(PCI)
++ * Resource structure is stored in the registry which will be
++ * later used by the CFG module.
++ */
++extern int drv_request_resources(u32 dw_context,
++ u32 *dev_node_strg);
++
++/*
++ * ======== drv_release_resources ========
++ * Purpose:
++ * Assigns the Resources or Releases them.
++ * Parameters:
++ * dw_context: Path to the driver Registry Key.
++ * hdrv_obj: Handle to the Driver Object.
++ * Returns:
++ * TRUE if success; FALSE otherwise.
++ * Requires:
++ * Ensures:
++ * The Resources are released based on Bus type.
++ * Resource structure is deleted from the registry
++ */
++extern int drv_release_resources(u32 dw_context,
++ struct drv_object *hdrv_obj);
++
++/**
++ * drv_request_bridge_res_dsp() - Reserves shared memory for bridge.
++ * @phost_resources: pointer to host resources.
++ */
++int drv_request_bridge_res_dsp(void **phost_resources);
++
++#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
++void bridge_recover_schedule(void);
++#endif
++
++/*
++ * ======== mem_ext_phys_pool_init ========
++ * Purpose:
++ * Uses the physical memory chunk passed for internal consitent memory
++ * allocations.
++ * physical address based on the page frame address.
++ * Parameters:
++ * pool_phys_base starting address of the physical memory pool.
++ * pool_size size of the physical memory pool.
++ * Returns:
++ * none.
++ * Requires:
++ * - MEM initialized.
++ * - valid physical address for the base and size > 0
++ */
++extern void mem_ext_phys_pool_init(u32 pool_phys_base, u32 pool_size);
++
++/*
++ * ======== mem_ext_phys_pool_release ========
++ */
++extern void mem_ext_phys_pool_release(void);
++
++/* ======== mem_alloc_phys_mem ========
++ * Purpose:
++ * Allocate physically contiguous, uncached memory
++ * Parameters:
++ * byte_size: Number of bytes to allocate.
++ * align_mask: Alignment Mask.
++ * physical_address: Physical address of allocated memory.
++ * Returns:
++ * Pointer to a block of memory;
++ * NULL if memory couldn't be allocated, or if byte_size == 0.
++ * Requires:
++ * MEM initialized.
++ * Ensures:
++ * The returned pointer, if not NULL, points to a valid memory block of
++ * the size requested. Returned physical address refers to physical
++ * location of memory.
++ */
++extern void *mem_alloc_phys_mem(u32 byte_size,
++ u32 align_mask, u32 *physical_address);
++
++/*
++ * ======== mem_free_phys_mem ========
++ * Purpose:
++ * Free the given block of physically contiguous memory.
++ * Parameters:
++ * virtual_address: Pointer to virtual memory region allocated
++ * by mem_alloc_phys_mem().
++ * physical_address: Pointer to physical memory region allocated
++ * by mem_alloc_phys_mem().
++ * byte_size: Size of the memory region allocated by mem_alloc_phys_mem().
++ * Returns:
++ * Requires:
++ * MEM initialized.
++ * virtual_address is a valid memory address returned by
++ * mem_alloc_phys_mem()
++ * Ensures:
++ * virtual_address is no longer a valid pointer to memory.
++ */
++extern void mem_free_phys_mem(void *virtual_address,
++ u32 physical_address, u32 byte_size);
++
++/*
++ * ======== MEM_LINEAR_ADDRESS ========
++ * Purpose:
++ * Get the linear address corresponding to the given physical address.
++ * Parameters:
++ * phys_addr: Physical address to be mapped.
++ * byte_size: Number of bytes in physical range to map.
++ * Returns:
++ * The corresponding linear address, or NULL if unsuccessful.
++ * Requires:
++ * MEM initialized.
++ * Ensures:
++ * Notes:
++ * If valid linear address is returned, be sure to call
++ * MEM_UNMAP_LINEAR_ADDRESS().
++ */
++#define MEM_LINEAR_ADDRESS(phy_addr, byte_size) phy_addr
++
++/*
++ * ======== MEM_UNMAP_LINEAR_ADDRESS ========
++ * Purpose:
++ * Unmap the linear address mapped in MEM_LINEAR_ADDRESS.
++ * Parameters:
++ * base_addr: Ptr to mapped memory (as returned by MEM_LINEAR_ADDRESS()).
++ * Returns:
++ * Requires:
++ * - MEM initialized.
++ * - base_addr is a valid linear address mapped in MEM_LINEAR_ADDRESS.
++ * Ensures:
++ * - base_addr no longer points to a valid linear address.
++ */
++#define MEM_UNMAP_LINEAR_ADDRESS(base_addr) {}
++
++#endif /* DRV_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/drvdefs.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/drvdefs.h 2010-08-18 11:24:23.186056401 +0300
+@@ -0,0 +1,25 @@
++/*
++ * drvdefs.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Definition of common struct between dspdefs.h and drv.h.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef DRVDEFS_
++#define DRVDEFS_
++
++/* Bridge Driver Object */
++struct drv_object;
++
++#endif /* DRVDEFS_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/dspapi-ioctl.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/dspapi-ioctl.h 2010-08-18 11:24:23.186056401 +0300
+@@ -0,0 +1,475 @@
++/*
++ * dspapi-ioctl.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Contains structures and commands that are used for interaction
++ * between the DDSP API and Bridge driver.
++ *
++ * Copyright (C) 2008 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef DSPAPIIOCTL_
++#define DSPAPIIOCTL_
++
++#include <dspbridge/cmm.h>
++#include <dspbridge/strmdefs.h>
++#include <dspbridge/dbdcd.h>
++
++union trapped_args {
++
++ /* MGR Module */
++ struct {
++ u32 node_id;
++ struct dsp_ndbprops __user *pndb_props;
++ u32 undb_props_size;
++ u32 __user *pu_num_nodes;
++ } args_mgr_enumnode_info;
++
++ struct {
++ u32 processor_id;
++ struct dsp_processorinfo __user *processor_info;
++ u32 processor_info_size;
++ u32 __user *pu_num_procs;
++ } args_mgr_enumproc_info;
++
++ struct {
++ struct dsp_uuid *uuid_obj;
++ enum dsp_dcdobjtype obj_type;
++ char *psz_path_name;
++ } args_mgr_registerobject;
++
++ struct {
++ struct dsp_uuid *uuid_obj;
++ enum dsp_dcdobjtype obj_type;
++ } args_mgr_unregisterobject;
++
++ struct {
++ struct dsp_notification __user *__user *anotifications;
++ u32 count;
++ u32 __user *pu_index;
++ u32 utimeout;
++ } args_mgr_wait;
++
++ /* PROC Module */
++ struct {
++ u32 processor_id;
++ struct dsp_processorattrin __user *attr_in;
++ void *__user *ph_processor;
++ } args_proc_attach;
++
++ struct {
++ void *hprocessor;
++ u32 dw_cmd;
++ struct dsp_cbdata __user *pargs;
++ } args_proc_ctrl;
++
++ struct {
++ void *hprocessor;
++ } args_proc_detach;
++
++ struct {
++ void *hprocessor;
++ void *__user *node_tab;
++ u32 node_tab_size;
++ u32 __user *pu_num_nodes;
++ u32 __user *pu_allocated;
++ } args_proc_enumnode_info;
++
++ struct {
++ void *hprocessor;
++ u32 resource_type;
++ struct dsp_resourceinfo *resource_info;
++ u32 resource_info_size;
++ } args_proc_enumresources;
++
++ struct {
++ void *hprocessor;
++ struct dsp_processorstate __user *proc_state_obj;
++ u32 state_info_size;
++ } args_proc_getstate;
++
++ struct {
++ void *hprocessor;
++ u8 __user *pbuf;
++ u8 __user *psize;
++ u32 max_size;
++ } args_proc_gettrace;
++
++ struct {
++ void *hprocessor;
++ s32 argc_index;
++ char __user *__user *user_args;
++ char *__user *user_envp;
++ } args_proc_load;
++
++ struct {
++ void *hprocessor;
++ u32 event_mask;
++ u32 notify_type;
++ struct dsp_notification __user *hnotification;
++ } args_proc_register_notify;
++
++ struct {
++ void *hprocessor;
++ } args_proc_start;
++
++ struct {
++ void *hprocessor;
++ u32 ul_size;
++ void *__user *pp_rsv_addr;
++ } args_proc_rsvmem;
++
++ struct {
++ void *hprocessor;
++ u32 ul_size;
++ void *prsv_addr;
++ } args_proc_unrsvmem;
++
++ struct {
++ void *hprocessor;
++ void *pmpu_addr;
++ u32 ul_size;
++ void *req_addr;
++ void *__user *pp_map_addr;
++ u32 ul_map_attr;
++ } args_proc_mapmem;
++
++ struct {
++ void *hprocessor;
++ u32 ul_size;
++ void *map_addr;
++ } args_proc_unmapmem;
++
++ struct {
++ void *hprocessor;
++ void *pmpu_addr;
++ u32 ul_size;
++ u32 dir;
++ } args_proc_dma;
++
++ struct {
++ void *hprocessor;
++ void *pmpu_addr;
++ u32 ul_size;
++ u32 ul_flags;
++ } args_proc_flushmemory;
++
++ struct {
++ void *hprocessor;
++ } args_proc_stop;
++
++ struct {
++ void *hprocessor;
++ void *pmpu_addr;
++ u32 ul_size;
++ } args_proc_invalidatememory;
++
++ /* NODE Module */
++ struct {
++ void *hprocessor;
++ struct dsp_uuid __user *node_id_ptr;
++ struct dsp_cbdata __user *pargs;
++ struct dsp_nodeattrin __user *attr_in;
++ void *__user *ph_node;
++ } args_node_allocate;
++
++ struct {
++ void *hnode;
++ u32 usize;
++ struct dsp_bufferattr __user *pattr;
++ u8 *__user *pbuffer;
++ } args_node_allocmsgbuf;
++
++ struct {
++ void *hnode;
++ s32 prio;
++ } args_node_changepriority;
++
++ struct {
++ void *hnode;
++ u32 stream_id;
++ void *other_node;
++ u32 other_stream;
++ struct dsp_strmattr __user *pattrs;
++ struct dsp_cbdata __user *conn_param;
++ } args_node_connect;
++
++ struct {
++ void *hnode;
++ } args_node_create;
++
++ struct {
++ void *hnode;
++ } args_node_delete;
++
++ struct {
++ void *hnode;
++ struct dsp_bufferattr __user *pattr;
++ u8 *pbuffer;
++ } args_node_freemsgbuf;
++
++ struct {
++ void *hnode;
++ struct dsp_nodeattr __user *pattr;
++ u32 attr_size;
++ } args_node_getattr;
++
++ struct {
++ void *hnode;
++ struct dsp_msg __user *message;
++ u32 utimeout;
++ } args_node_getmessage;
++
++ struct {
++ void *hnode;
++ } args_node_pause;
++
++ struct {
++ void *hnode;
++ struct dsp_msg __user *message;
++ u32 utimeout;
++ } args_node_putmessage;
++
++ struct {
++ void *hnode;
++ u32 event_mask;
++ u32 notify_type;
++ struct dsp_notification __user *hnotification;
++ } args_node_registernotify;
++
++ struct {
++ void *hnode;
++ } args_node_run;
++
++ struct {
++ void *hnode;
++ int __user *pstatus;
++ } args_node_terminate;
++
++ struct {
++ void *hprocessor;
++ struct dsp_uuid __user *node_id_ptr;
++ struct dsp_ndbprops __user *node_props;
++ } args_node_getuuidprops;
++
++ /* STRM module */
++
++ struct {
++ void *hstream;
++ u32 usize;
++ u8 *__user *ap_buffer;
++ u32 num_bufs;
++ } args_strm_allocatebuffer;
++
++ struct {
++ void *hstream;
++ } args_strm_close;
++
++ struct {
++ void *hstream;
++ u8 *__user *ap_buffer;
++ u32 num_bufs;
++ } args_strm_freebuffer;
++
++ struct {
++ void *hstream;
++ void **ph_event;
++ } args_strm_geteventhandle;
++
++ struct {
++ void *hstream;
++ struct stream_info __user *stream_info;
++ u32 stream_info_size;
++ } args_strm_getinfo;
++
++ struct {
++ void *hstream;
++ bool flush_flag;
++ } args_strm_idle;
++
++ struct {
++ void *hstream;
++ u8 *pbuffer;
++ u32 dw_bytes;
++ u32 dw_buf_size;
++ u32 dw_arg;
++ } args_strm_issue;
++
++ struct {
++ void *hnode;
++ u32 direction;
++ u32 index;
++ struct strm_attr __user *attr_in;
++ void *__user *ph_stream;
++ } args_strm_open;
++
++ struct {
++ void *hstream;
++ u8 *__user *buf_ptr;
++ u32 __user *bytes;
++ u32 __user *buf_size_ptr;
++ u32 __user *pdw_arg;
++ } args_strm_reclaim;
++
++ struct {
++ void *hstream;
++ u32 event_mask;
++ u32 notify_type;
++ struct dsp_notification __user *hnotification;
++ } args_strm_registernotify;
++
++ struct {
++ void *__user *stream_tab;
++ u32 strm_num;
++ u32 __user *pmask;
++ u32 utimeout;
++ } args_strm_select;
++
++ /* CMM Module */
++ struct {
++ struct cmm_object *hcmm_mgr;
++ u32 usize;
++ struct cmm_attrs *pattrs;
++ void **pp_buf_va;
++ } args_cmm_allocbuf;
++
++ struct {
++ struct cmm_object *hcmm_mgr;
++ void *buf_pa;
++ u32 ul_seg_id;
++ } args_cmm_freebuf;
++
++ struct {
++ void *hprocessor;
++ struct cmm_object *__user *ph_cmm_mgr;
++ } args_cmm_gethandle;
++
++ struct {
++ struct cmm_object *hcmm_mgr;
++ struct cmm_info __user *cmm_info_obj;
++ } args_cmm_getinfo;
++
++ /* UTIL module */
++ struct {
++ s32 util_argc;
++ char **pp_argv;
++ } args_util_testdll;
++};
++
++/*
++ * Dspbridge Ioctl numbering scheme
++ *
++ * 7 0
++ * ---------------------------------
++ * | Module | Ioctl Number |
++ * ---------------------------------
++ * | x | x | x | 0 | 0 | 0 | 0 | 0 |
++ * ---------------------------------
++ */
++
++/* Ioctl driver identifier */
++#define DB 0xDB
++
++/*
++ * Following are used to distinguish between module ioctls, this is needed
++ * in case new ioctls are introduced.
++ */
++#define DB_MODULE_MASK 0xE0
++#define DB_IOC_MASK 0x1F
++
++/* Ioctl module masks */
++#define DB_MGR 0x0
++#define DB_PROC 0x20
++#define DB_NODE 0x40
++#define DB_STRM 0x60
++#define DB_CMM 0x80
++
++#define DB_MODULE_SHIFT 5
++
++/* Used to calculate the ioctl per dspbridge module */
++#define DB_IOC(module, num) \
++ (((module) & DB_MODULE_MASK) | ((num) & DB_IOC_MASK))
++/* Used to get dspbridge ioctl module */
++#define DB_GET_MODULE(cmd) ((cmd) & DB_MODULE_MASK)
++/* Used to get dspbridge ioctl number */
++#define DB_GET_IOC(cmd) ((cmd) & DB_IOC_MASK)
++
++/* TODO: Remove deprecated and not implemented */
++
++/* MGR Module */
++#define MGR_ENUMNODE_INFO _IOWR(DB, DB_IOC(DB_MGR, 0), unsigned long)
++#define MGR_ENUMPROC_INFO _IOWR(DB, DB_IOC(DB_MGR, 1), unsigned long)
++#define MGR_REGISTEROBJECT _IOWR(DB, DB_IOC(DB_MGR, 2), unsigned long)
++#define MGR_UNREGISTEROBJECT _IOWR(DB, DB_IOC(DB_MGR, 3), unsigned long)
++#define MGR_WAIT _IOWR(DB, DB_IOC(DB_MGR, 4), unsigned long)
++/* MGR_GET_PROC_RES Deprecated */
++#define MGR_GET_PROC_RES _IOR(DB, DB_IOC(DB_MGR, 5), unsigned long)
++
++/* PROC Module */
++#define PROC_ATTACH _IOWR(DB, DB_IOC(DB_PROC, 0), unsigned long)
++#define PROC_CTRL _IOR(DB, DB_IOC(DB_PROC, 1), unsigned long)
++/* PROC_DETACH Deprecated */
++#define PROC_DETACH _IOR(DB, DB_IOC(DB_PROC, 2), unsigned long)
++#define PROC_ENUMNODE _IOWR(DB, DB_IOC(DB_PROC, 3), unsigned long)
++#define PROC_ENUMRESOURCES _IOWR(DB, DB_IOC(DB_PROC, 4), unsigned long)
++#define PROC_GET_STATE _IOWR(DB, DB_IOC(DB_PROC, 5), unsigned long)
++#define PROC_GET_TRACE _IOWR(DB, DB_IOC(DB_PROC, 6), unsigned long)
++#define PROC_LOAD _IOW(DB, DB_IOC(DB_PROC, 7), unsigned long)
++#define PROC_REGISTERNOTIFY _IOWR(DB, DB_IOC(DB_PROC, 8), unsigned long)
++#define PROC_START _IOW(DB, DB_IOC(DB_PROC, 9), unsigned long)
++#define PROC_RSVMEM _IOWR(DB, DB_IOC(DB_PROC, 10), unsigned long)
++#define PROC_UNRSVMEM _IOW(DB, DB_IOC(DB_PROC, 11), unsigned long)
++#define PROC_MAPMEM _IOWR(DB, DB_IOC(DB_PROC, 12), unsigned long)
++#define PROC_UNMAPMEM _IOR(DB, DB_IOC(DB_PROC, 13), unsigned long)
++#define PROC_FLUSHMEMORY _IOW(DB, DB_IOC(DB_PROC, 14), unsigned long)
++#define PROC_STOP _IOWR(DB, DB_IOC(DB_PROC, 15), unsigned long)
++#define PROC_INVALIDATEMEMORY _IOW(DB, DB_IOC(DB_PROC, 16), unsigned long)
++#define PROC_BEGINDMA _IOW(DB, DB_IOC(DB_PROC, 17), unsigned long)
++#define PROC_ENDDMA _IOW(DB, DB_IOC(DB_PROC, 18), unsigned long)
++
++/* NODE Module */
++#define NODE_ALLOCATE _IOWR(DB, DB_IOC(DB_NODE, 0), unsigned long)
++#define NODE_ALLOCMSGBUF _IOWR(DB, DB_IOC(DB_NODE, 1), unsigned long)
++#define NODE_CHANGEPRIORITY _IOW(DB, DB_IOC(DB_NODE, 2), unsigned long)
++#define NODE_CONNECT _IOW(DB, DB_IOC(DB_NODE, 3), unsigned long)
++#define NODE_CREATE _IOW(DB, DB_IOC(DB_NODE, 4), unsigned long)
++#define NODE_DELETE _IOW(DB, DB_IOC(DB_NODE, 5), unsigned long)
++#define NODE_FREEMSGBUF _IOW(DB, DB_IOC(DB_NODE, 6), unsigned long)
++#define NODE_GETATTR _IOWR(DB, DB_IOC(DB_NODE, 7), unsigned long)
++#define NODE_GETMESSAGE _IOWR(DB, DB_IOC(DB_NODE, 8), unsigned long)
++#define NODE_PAUSE _IOW(DB, DB_IOC(DB_NODE, 9), unsigned long)
++#define NODE_PUTMESSAGE _IOW(DB, DB_IOC(DB_NODE, 10), unsigned long)
++#define NODE_REGISTERNOTIFY _IOWR(DB, DB_IOC(DB_NODE, 11), unsigned long)
++#define NODE_RUN _IOW(DB, DB_IOC(DB_NODE, 12), unsigned long)
++#define NODE_TERMINATE _IOWR(DB, DB_IOC(DB_NODE, 13), unsigned long)
++#define NODE_GETUUIDPROPS _IOWR(DB, DB_IOC(DB_NODE, 14), unsigned long)
++
++/* STRM Module */
++#define STRM_ALLOCATEBUFFER _IOWR(DB, DB_IOC(DB_STRM, 0), unsigned long)
++#define STRM_CLOSE _IOW(DB, DB_IOC(DB_STRM, 1), unsigned long)
++#define STRM_FREEBUFFER _IOWR(DB, DB_IOC(DB_STRM, 2), unsigned long)
++#define STRM_GETEVENTHANDLE _IO(DB, DB_IOC(DB_STRM, 3)) /* Not Impl'd */
++#define STRM_GETINFO _IOWR(DB, DB_IOC(DB_STRM, 4), unsigned long)
++#define STRM_IDLE _IOW(DB, DB_IOC(DB_STRM, 5), unsigned long)
++#define STRM_ISSUE _IOW(DB, DB_IOC(DB_STRM, 6), unsigned long)
++#define STRM_OPEN _IOWR(DB, DB_IOC(DB_STRM, 7), unsigned long)
++#define STRM_RECLAIM _IOWR(DB, DB_IOC(DB_STRM, 8), unsigned long)
++#define STRM_REGISTERNOTIFY _IOWR(DB, DB_IOC(DB_STRM, 9), unsigned long)
++#define STRM_SELECT _IOWR(DB, DB_IOC(DB_STRM, 10), unsigned long)
++
++/* CMM Module */
++#define CMM_ALLOCBUF _IO(DB, DB_IOC(DB_CMM, 0)) /* Not Impl'd */
++#define CMM_FREEBUF _IO(DB, DB_IOC(DB_CMM, 1)) /* Not Impl'd */
++#define CMM_GETHANDLE _IOR(DB, DB_IOC(DB_CMM, 2), unsigned long)
++#define CMM_GETINFO _IOR(DB, DB_IOC(DB_CMM, 3), unsigned long)
++
++#endif /* DSPAPIIOCTL_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/dspapi.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/dspapi.h 2010-08-18 11:24:23.186056401 +0300
+@@ -0,0 +1,167 @@
++/*
++ * dspapi.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Includes the wrapper functions called directly by the
++ * DeviceIOControl interface.
++ *
++ * Notes:
++ * Bridge services exported to Bridge driver are initialized by the DSPAPI on
++ * behalf of the Bridge driver. Bridge driver must not call module Init/Exit
++ * functions.
++ *
++ * To ensure Bridge driver binary compatibility across different platforms,
++ * for the same processor, a Bridge driver must restrict its usage of system
++ * services to those exported by the DSPAPI library.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef DSPAPI_
++#define DSPAPI_
++
++#include <dspbridge/dspapi-ioctl.h>
++
++/* This BRD API Library Version: */
++#define BRD_API_MAJOR_VERSION (u32)8 /* .8x - Alpha, .9x - Beta, 1.x FCS */
++#define BRD_API_MINOR_VERSION (u32)0
++
++/*
++ * ======== api_call_dev_ioctl ========
++ * Purpose:
++ * Call the (wrapper) function for the corresponding API IOCTL.
++ * Parameters:
++ * cmd: IOCTL id, base 0.
++ * args: Argument structure.
++ * result:
++ * Returns:
++ * 0 if command called; -EINVAL if command not in IOCTL
++ * table.
++ * Requires:
++ * Ensures:
++ */
++extern int api_call_dev_ioctl(unsigned int cmd,
++ union trapped_args *args,
++ u32 *result, void *pr_ctxt);
++
++/*
++ * ======== api_init ========
++ * Purpose:
++ * Initialize modules used by Bridge API.
++ * This procedure is called when the driver is loaded.
++ * Parameters:
++ * Returns:
++ * TRUE if success; FALSE otherwise.
++ * Requires:
++ * Ensures:
++ */
++extern bool api_init(void);
++
++/*
++ * ======== api_init_complete2 ========
++ * Purpose:
++ * Perform any required bridge initialization which cannot
++ * be performed in api_init() or dev_start_device() due
++ * to the fact that some services are not yet
++ * completely initialized.
++ * Parameters:
++ * Returns:
++ * 0: Allow this device to load
++ * -EPERM: Failure.
++ * Requires:
++ * Bridge API initialized.
++ * Ensures:
++ */
++extern int api_init_complete2(void);
++
++/*
++ * ======== api_exit ========
++ * Purpose:
++ * Exit all modules initialized in api_init(void).
++ * This procedure is called when the driver is unloaded.
++ * Parameters:
++ * Returns:
++ * Requires:
++ * api_init(void) was previously called.
++ * Ensures:
++ * Resources acquired in api_init(void) are freed.
++ */
++extern void api_exit(void);
++
++/* MGR wrapper functions */
++extern u32 mgrwrap_enum_node_info(union trapped_args *args, void *pr_ctxt);
++extern u32 mgrwrap_enum_proc_info(union trapped_args *args, void *pr_ctxt);
++extern u32 mgrwrap_register_object(union trapped_args *args, void *pr_ctxt);
++extern u32 mgrwrap_unregister_object(union trapped_args *args, void *pr_ctxt);
++extern u32 mgrwrap_wait_for_bridge_events(union trapped_args *args,
++ void *pr_ctxt);
++
++extern u32 mgrwrap_get_process_resources_info(union trapped_args *args,
++ void *pr_ctxt);
++
++/* CPRC (Processor) wrapper Functions */
++extern u32 procwrap_attach(union trapped_args *args, void *pr_ctxt);
++extern u32 procwrap_ctrl(union trapped_args *args, void *pr_ctxt);
++extern u32 procwrap_detach(union trapped_args *args, void *pr_ctxt);
++extern u32 procwrap_enum_node_info(union trapped_args *args, void *pr_ctxt);
++extern u32 procwrap_enum_resources(union trapped_args *args, void *pr_ctxt);
++extern u32 procwrap_get_state(union trapped_args *args, void *pr_ctxt);
++extern u32 procwrap_get_trace(union trapped_args *args, void *pr_ctxt);
++extern u32 procwrap_load(union trapped_args *args, void *pr_ctxt);
++extern u32 procwrap_register_notify(union trapped_args *args, void *pr_ctxt);
++extern u32 procwrap_start(union trapped_args *args, void *pr_ctxt);
++extern u32 procwrap_reserve_memory(union trapped_args *args, void *pr_ctxt);
++extern u32 procwrap_un_reserve_memory(union trapped_args *args, void *pr_ctxt);
++extern u32 procwrap_map(union trapped_args *args, void *pr_ctxt);
++extern u32 procwrap_un_map(union trapped_args *args, void *pr_ctxt);
++extern u32 procwrap_flush_memory(union trapped_args *args, void *pr_ctxt);
++extern u32 procwrap_stop(union trapped_args *args, void *pr_ctxt);
++extern u32 procwrap_invalidate_memory(union trapped_args *args, void *pr_ctxt);
++extern u32 procwrap_begin_dma(union trapped_args *args, void *pr_ctxt);
++extern u32 procwrap_end_dma(union trapped_args *args, void *pr_ctxt);
++
++/* NODE wrapper functions */
++extern u32 nodewrap_allocate(union trapped_args *args, void *pr_ctxt);
++extern u32 nodewrap_alloc_msg_buf(union trapped_args *args, void *pr_ctxt);
++extern u32 nodewrap_change_priority(union trapped_args *args, void *pr_ctxt);
++extern u32 nodewrap_connect(union trapped_args *args, void *pr_ctxt);
++extern u32 nodewrap_create(union trapped_args *args, void *pr_ctxt);
++extern u32 nodewrap_delete(union trapped_args *args, void *pr_ctxt);
++extern u32 nodewrap_free_msg_buf(union trapped_args *args, void *pr_ctxt);
++extern u32 nodewrap_get_attr(union trapped_args *args, void *pr_ctxt);
++extern u32 nodewrap_get_message(union trapped_args *args, void *pr_ctxt);
++extern u32 nodewrap_pause(union trapped_args *args, void *pr_ctxt);
++extern u32 nodewrap_put_message(union trapped_args *args, void *pr_ctxt);
++extern u32 nodewrap_register_notify(union trapped_args *args, void *pr_ctxt);
++extern u32 nodewrap_run(union trapped_args *args, void *pr_ctxt);
++extern u32 nodewrap_terminate(union trapped_args *args, void *pr_ctxt);
++extern u32 nodewrap_get_uuid_props(union trapped_args *args, void *pr_ctxt);
++
++/* STRM wrapper functions */
++extern u32 strmwrap_allocate_buffer(union trapped_args *args, void *pr_ctxt);
++extern u32 strmwrap_close(union trapped_args *args, void *pr_ctxt);
++extern u32 strmwrap_free_buffer(union trapped_args *args, void *pr_ctxt);
++extern u32 strmwrap_get_event_handle(union trapped_args *args, void *pr_ctxt);
++extern u32 strmwrap_get_info(union trapped_args *args, void *pr_ctxt);
++extern u32 strmwrap_idle(union trapped_args *args, void *pr_ctxt);
++extern u32 strmwrap_issue(union trapped_args *args, void *pr_ctxt);
++extern u32 strmwrap_open(union trapped_args *args, void *pr_ctxt);
++extern u32 strmwrap_reclaim(union trapped_args *args, void *pr_ctxt);
++extern u32 strmwrap_register_notify(union trapped_args *args, void *pr_ctxt);
++extern u32 strmwrap_select(union trapped_args *args, void *pr_ctxt);
++
++extern u32 cmmwrap_calloc_buf(union trapped_args *args, void *pr_ctxt);
++extern u32 cmmwrap_free_buf(union trapped_args *args, void *pr_ctxt);
++extern u32 cmmwrap_get_handle(union trapped_args *args, void *pr_ctxt);
++extern u32 cmmwrap_get_info(union trapped_args *args, void *pr_ctxt);
++
++#endif /* DSPAPI_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/dspchnl.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/dspchnl.h 2010-08-18 11:24:23.186056401 +0300
+@@ -0,0 +1,72 @@
++/*
++ * dspchnl.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Declares the upper edge channel class library functions required by
++ * all Bridge driver / DSP API driver interface tables. These functions are
++ * implemented by every class of Bridge channel library.
++ *
++ * Notes:
++ * The function comment headers reside in dspdefs.h.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef DSPCHNL_
++#define DSPCHNL_
++
++extern int bridge_chnl_create(struct chnl_mgr **channel_mgr,
++ struct dev_object *hdev_obj,
++ const struct chnl_mgrattrs
++ *mgr_attrts);
++
++extern int bridge_chnl_destroy(struct chnl_mgr *hchnl_mgr);
++
++extern int bridge_chnl_open(struct chnl_object **chnl,
++ struct chnl_mgr *hchnl_mgr,
++ s8 chnl_mode,
++ u32 ch_id,
++ const struct chnl_attr
++ *pattrs);
++
++extern int bridge_chnl_close(struct chnl_object *chnl_obj);
++
++extern int bridge_chnl_add_io_req(struct chnl_object *chnl_obj,
++ void *host_buf,
++ u32 byte_size, u32 buf_size,
++ u32 dw_dsp_addr, u32 dw_arg);
++
++extern int bridge_chnl_get_ioc(struct chnl_object *chnl_obj,
++ u32 timeout, struct chnl_ioc *chan_ioc);
++
++extern int bridge_chnl_cancel_io(struct chnl_object *chnl_obj);
++
++extern int bridge_chnl_flush_io(struct chnl_object *chnl_obj,
++ u32 timeout);
++
++extern int bridge_chnl_get_info(struct chnl_object *chnl_obj,
++ struct chnl_info *channel_info);
++
++extern int bridge_chnl_get_mgr_info(struct chnl_mgr *hchnl_mgr,
++ u32 ch_id, struct chnl_mgrinfo
++ *mgr_info);
++
++extern int bridge_chnl_idle(struct chnl_object *chnl_obj,
++ u32 timeout, bool flush_data);
++
++extern int bridge_chnl_register_notify(struct chnl_object *chnl_obj,
++ u32 event_mask,
++ u32 notify_type,
++ struct dsp_notification
++ *hnotification);
++
++#endif /* DSPCHNL_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/dspdefs.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/dspdefs.h 2010-08-18 11:24:23.190052789 +0300
+@@ -0,0 +1,1054 @@
++/*
++ * dspdefs.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Bridge driver entry point and interface function declarations.
++ *
++ * Notes:
++ * The DSP API obtains it's function interface to
++ * the Bridge driver via a call to bridge_drv_entry().
++ *
++ * Bridge services exported to Bridge drivers are initialized by the
++ * DSP API on behalf of the Bridge driver.
++ *
++ * Bridge function DBC Requires and Ensures are also made by the DSP API on
++ * behalf of the Bridge driver, to simplify the Bridge driver code.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef DSPDEFS_
++#define DSPDEFS_
++
++#include <dspbridge/brddefs.h>
++#include <dspbridge/cfgdefs.h>
++#include <dspbridge/chnlpriv.h>
++#include <dspbridge/dehdefs.h>
++#include <dspbridge/devdefs.h>
++#include <dspbridge/iodefs.h>
++#include <dspbridge/msgdefs.h>
++
++/*
++ * Any IOCTLS at or above this value are reserved for standard Bridge driver
++ * interfaces.
++ */
++#define BRD_RESERVEDIOCTLBASE 0x8000
++
++/* Handle to Bridge driver's private device context. */
++struct bridge_dev_context;
++
++/*--------------------------------------------------------------------------- */
++/* BRIDGE DRIVER FUNCTION TYPES */
++/*--------------------------------------------------------------------------- */
++
++/*
++ * ======== bridge_brd_monitor ========
++ * Purpose:
++ * Bring the board to the BRD_IDLE (monitor) state.
++ * Parameters:
++ * dev_ctxt: Handle to Bridge driver defined device context.
++ * Returns:
++ * 0: Success.
++ * -ETIMEDOUT: Timeout occured waiting for a response from hardware.
++ * -EPERM: Other, unspecified error.
++ * Requires:
++ * dev_ctxt != NULL
++ * Ensures:
++ * 0: Board is in BRD_IDLE state;
++ * else: Board state is indeterminate.
++ */
++typedef int(*fxn_brd_monitor) (struct bridge_dev_context *dev_ctxt);
++
++/*
++ * ======== fxn_brd_setstate ========
++ * Purpose:
++ * Sets the Bridge driver state
++ * Parameters:
++ * dev_ctxt: Handle to Bridge driver defined device info.
++ * brd_state: Board state
++ * Returns:
++ * 0: Success.
++ * -EPERM: Other, unspecified error.
++ * Requires:
++ * dev_ctxt != NULL;
++ * brd_state <= BRD_LASTSTATE.
++ * Ensures:
++ * brd_state <= BRD_LASTSTATE.
++ * Update the Board state to the specified state.
++ */
++typedef int(*fxn_brd_setstate) (struct bridge_dev_context
++ * dev_ctxt, u32 brd_state);
++
++/*
++ * ======== bridge_brd_start ========
++ * Purpose:
++ * Bring board to the BRD_RUNNING (start) state.
++ * Parameters:
++ * dev_ctxt: Handle to Bridge driver defined device context.
++ * dsp_addr: DSP address at which to start execution.
++ * Returns:
++ * 0: Success.
++ * -ETIMEDOUT: Timeout occured waiting for a response from hardware.
++ * -EPERM: Other, unspecified error.
++ * Requires:
++ * dev_ctxt != NULL
++ * Board is in monitor (BRD_IDLE) state.
++ * Ensures:
++ * 0: Board is in BRD_RUNNING state.
++ * Interrupts to the PC are enabled.
++ * else: Board state is indeterminate.
++ */
++typedef int(*fxn_brd_start) (struct bridge_dev_context
++ * dev_ctxt, u32 dsp_addr);
++
++/*
++ * ======== bridge_brd_mem_copy ========
++ * Purpose:
++ * Copy memory from one DSP address to another
++ * Parameters:
++ * dev_context: Pointer to context handle
++ * dsp_dest_addr: DSP address to copy to
++ * dsp_src_addr: DSP address to copy from
++ * ul_num_bytes: Number of bytes to copy
++ * mem_type: What section of memory to copy to
++ * Returns:
++ * 0: Success.
++ * -EPERM: Other, unspecified error.
++ * Requires:
++ * dev_context != NULL
++ * Ensures:
++ * 0: Board is in BRD_RUNNING state.
++ * Interrupts to the PC are enabled.
++ * else: Board state is indeterminate.
++ */
++typedef int(*fxn_brd_memcopy) (struct bridge_dev_context
++ * dev_ctxt,
++ u32 dsp_dest_addr,
++ u32 dsp_src_addr,
++ u32 ul_num_bytes, u32 mem_type);
++/*
++ * ======== bridge_brd_mem_write ========
++ * Purpose:
++ * Write a block of host memory into a DSP address, into a given memory
++ * space. Unlike bridge_brd_write, this API does reset the DSP
++ * Parameters:
++ * dev_ctxt: Handle to Bridge driver defined device info.
++ * dsp_addr: Address on DSP board (Destination).
++ * host_buf: Pointer to host buffer (Source).
++ * ul_num_bytes: Number of bytes to transfer.
++ * mem_type: Memory space on DSP to which to transfer.
++ * Returns:
++ * 0: Success.
++ * -ETIMEDOUT: Timeout occured waiting for a response from hardware.
++ * -EPERM: Other, unspecified error.
++ * Requires:
++ * dev_ctxt != NULL;
++ * host_buf != NULL.
++ * Ensures:
++ */
++typedef int(*fxn_brd_memwrite) (struct bridge_dev_context
++ * dev_ctxt,
++ u8 *host_buf,
++ u32 dsp_addr, u32 ul_num_bytes,
++ u32 mem_type);
++
++/*
++ * ======== bridge_brd_mem_map ========
++ * Purpose:
++ * Map a MPU memory region to a DSP/IVA memory space
++ * Parameters:
++ * dev_ctxt: Handle to Bridge driver defined device info.
++ * ul_mpu_addr: MPU memory region start address.
++ * virt_addr: DSP/IVA memory region u8 address.
++ * ul_num_bytes: Number of bytes to map.
++ * map_attrs: Mapping attributes (e.g. endianness).
++ * Returns:
++ * 0: Success.
++ * -EPERM: Other, unspecified error.
++ * Requires:
++ * dev_ctxt != NULL;
++ * Ensures:
++ */
++typedef int(*fxn_brd_memmap) (struct bridge_dev_context
++ * dev_ctxt, u32 ul_mpu_addr,
++ u32 virt_addr, u32 ul_num_bytes,
++ u32 map_attr,
++ struct page **mapped_pages);
++
++/*
++ * ======== bridge_brd_mem_un_map ========
++ * Purpose:
++ * UnMap an MPU memory region from DSP/IVA memory space
++ * Parameters:
++ * dev_ctxt: Handle to Bridge driver defined device info.
++ * virt_addr: DSP/IVA memory region u8 address.
++ * ul_num_bytes: Number of bytes to unmap.
++ * Returns:
++ * 0: Success.
++ * -EPERM: Other, unspecified error.
++ * Requires:
++ * dev_ctxt != NULL;
++ * Ensures:
++ */
++typedef int(*fxn_brd_memunmap) (struct bridge_dev_context
++ * dev_ctxt,
++ u32 virt_addr, u32 ul_num_bytes);
++
++/*
++ * ======== bridge_brd_stop ========
++ * Purpose:
++ * Bring board to the BRD_STOPPED state.
++ * Parameters:
++ * dev_ctxt: Handle to Bridge driver defined device context.
++ * Returns:
++ * 0: Success.
++ * -ETIMEDOUT: Timeout occured waiting for a response from hardware.
++ * -EPERM: Other, unspecified error.
++ * Requires:
++ * dev_ctxt != NULL
++ * Ensures:
++ * 0: Board is in BRD_STOPPED (stop) state;
++ * Interrupts to the PC are disabled.
++ * else: Board state is indeterminate.
++ */
++typedef int(*fxn_brd_stop) (struct bridge_dev_context *dev_ctxt);
++
++/*
++ * ======== bridge_brd_status ========
++ * Purpose:
++ * Report the current state of the board.
++ * Parameters:
++ * dev_ctxt: Handle to Bridge driver defined device context.
++ * board_state: Ptr to BRD status variable.
++ * Returns:
++ * 0:
++ * Requires:
++ * board_state != NULL;
++ * dev_ctxt != NULL
++ * Ensures:
++ * *board_state is one of
++ * {BRD_STOPPED, BRD_IDLE, BRD_RUNNING, BRD_UNKNOWN};
++ */
++typedef int(*fxn_brd_status) (struct bridge_dev_context *dev_ctxt,
++ int *board_state);
++
++/*
++ * ======== bridge_brd_read ========
++ * Purpose:
++ * Read a block of DSP memory, from a given memory space, into a host
++ * buffer.
++ * Parameters:
++ * dev_ctxt: Handle to Bridge driver defined device info.
++ * host_buf: Pointer to host buffer (Destination).
++ * dsp_addr: Address on DSP board (Source).
++ * ul_num_bytes: Number of bytes to transfer.
++ * mem_type: Memory space on DSP from which to transfer.
++ * Returns:
++ * 0: Success.
++ * -ETIMEDOUT: Timeout occured waiting for a response from hardware.
++ * -EPERM: Other, unspecified error.
++ * Requires:
++ * dev_ctxt != NULL;
++ * host_buf != NULL.
++ * Ensures:
++ * Will not write more than ul_num_bytes bytes into host_buf.
++ */
++typedef int(*fxn_brd_read) (struct bridge_dev_context *dev_ctxt,
++ u8 *host_buf,
++ u32 dsp_addr,
++ u32 ul_num_bytes, u32 mem_type);
++
++/*
++ * ======== bridge_brd_write ========
++ * Purpose:
++ * Write a block of host memory into a DSP address, into a given memory
++ * space.
++ * Parameters:
++ * dev_ctxt: Handle to Bridge driver defined device info.
++ * dsp_addr: Address on DSP board (Destination).
++ * host_buf: Pointer to host buffer (Source).
++ * ul_num_bytes: Number of bytes to transfer.
++ * mem_type: Memory space on DSP to which to transfer.
++ * Returns:
++ * 0: Success.
++ * -ETIMEDOUT: Timeout occured waiting for a response from hardware.
++ * -EPERM: Other, unspecified error.
++ * Requires:
++ * dev_ctxt != NULL;
++ * host_buf != NULL.
++ * Ensures:
++ */
++typedef int(*fxn_brd_write) (struct bridge_dev_context *dev_ctxt,
++ u8 *host_buf,
++ u32 dsp_addr,
++ u32 ul_num_bytes, u32 mem_type);
++
++/*
++ * ======== bridge_chnl_create ========
++ * Purpose:
++ * Create a channel manager object, responsible for opening new channels
++ * and closing old ones for a given 'Bridge board.
++ * Parameters:
++ * channel_mgr: Location to store a channel manager object on output.
++ * hdev_obj: Handle to a device object.
++ * mgr_attrts: Channel manager attributes.
++ * mgr_attrts->max_channels: Max channels
++ * mgr_attrts->birq: Channel's I/O IRQ number.
++ * mgr_attrts->irq_shared: TRUE if the IRQ is shareable.
++ * mgr_attrts->word_size: DSP Word size in equivalent PC bytes..
++ * mgr_attrts->shm_base: Base physical address of shared memory, if any.
++ * mgr_attrts->usm_length: Bytes of shared memory block.
++ * Returns:
++ * 0: Success;
++ * -ENOMEM: Insufficient memory for requested resources.
++ * -EIO: Unable to plug ISR for given IRQ.
++ * -EFAULT: Couldn't map physical address to a virtual one.
++ * Requires:
++ * channel_mgr != NULL.
++ * mgr_attrts != NULL
++ * mgr_attrts field are all valid:
++ * 0 < max_channels <= CHNL_MAXCHANNELS.
++ * birq <= 15.
++ * word_size > 0.
++ * hdev_obj != NULL
++ * No channel manager exists for this board.
++ * Ensures:
++ */
++typedef int(*fxn_chnl_create) (struct chnl_mgr
++ **channel_mgr,
++ struct dev_object
++ * hdev_obj,
++ const struct
++ chnl_mgrattrs * mgr_attrts);
++
++/*
++ * ======== bridge_chnl_destroy ========
++ * Purpose:
++ * Close all open channels, and destroy the channel manager.
++ * Parameters:
++ * hchnl_mgr: Channel manager object.
++ * Returns:
++ * 0: Success.
++ * -EFAULT: hchnl_mgr was invalid.
++ * Requires:
++ * Ensures:
++ * 0: Cancels I/O on each open channel. Closes each open channel.
++ * chnl_create may subsequently be called for the same device.
++ */
++typedef int(*fxn_chnl_destroy) (struct chnl_mgr *hchnl_mgr);
++/*
++ * ======== bridge_deh_notify ========
++ * Purpose:
++ * When notified of DSP error, take appropriate action.
++ * Parameters:
++ * hdeh_mgr: Handle to DEH manager object.
++ * evnt_mask: Indicate the type of exception
++ * error_info: Error information
++ * Returns:
++ *
++ * Requires:
++ * hdeh_mgr != NULL;
++ * evnt_mask with a valid exception
++ * Ensures:
++ */
++typedef void (*fxn_deh_notify) (struct deh_mgr *hdeh_mgr,
++ u32 evnt_mask, u32 error_info);
++
++/*
++ * ======== bridge_chnl_open ========
++ * Purpose:
++ * Open a new half-duplex channel to the DSP board.
++ * Parameters:
++ * chnl: Location to store a channel object handle.
++ * hchnl_mgr: Handle to channel manager, as returned by
++ * CHNL_GetMgr().
++ * chnl_mode: One of {CHNL_MODETODSP, CHNL_MODEFROMDSP} specifies
++ * direction of data transfer.
++ * ch_id: If CHNL_PICKFREE is specified, the channel manager will
++ * select a free channel id (default);
++ * otherwise this field specifies the id of the channel.
++ * pattrs: Channel attributes. Attribute fields are as follows:
++ * pattrs->uio_reqs: Specifies the maximum number of I/O requests which can
++ * be pending at any given time. All request packets are
++ * preallocated when the channel is opened.
++ * pattrs->event_obj: This field allows the user to supply an auto reset
++ * event object for channel I/O completion notifications.
++ * It is the responsibility of the user to destroy this
++ * object AFTER closing the channel.
++ * This channel event object can be retrieved using
++ * CHNL_GetEventHandle().
++ * pattrs->hReserved: The kernel mode handle of this event object.
++ *
++ * Returns:
++ * 0: Success.
++ * -EFAULT: hchnl_mgr is invalid.
++ * -ENOMEM: Insufficient memory for requested resources.
++ * -EINVAL: Invalid number of IOReqs.
++ * -ENOSR: No free channels available.
++ * -ECHRNG: Channel ID is out of range.
++ * -EALREADY: Channel is in use.
++ * -EIO: No free IO request packets available for
++ * queuing.
++ * Requires:
++ * chnl != NULL.
++ * pattrs != NULL.
++ * pattrs->event_obj is a valid event handle.
++ * pattrs->hReserved is the kernel mode handle for pattrs->event_obj.
++ * Ensures:
++ * 0: *chnl is a valid channel.
++ * else: *chnl is set to NULL if (chnl != NULL);
++ */
++typedef int(*fxn_chnl_open) (struct chnl_object
++ **chnl,
++ struct chnl_mgr *hchnl_mgr,
++ s8 chnl_mode,
++ u32 ch_id,
++ const struct
++ chnl_attr * pattrs);
++
++/*
++ * ======== bridge_chnl_close ========
++ * Purpose:
++ * Ensures all pending I/O on this channel is cancelled, discards all
++ * queued I/O completion notifications, then frees the resources allocated
++ * for this channel, and makes the corresponding logical channel id
++ * available for subsequent use.
++ * Parameters:
++ * chnl_obj: Handle to a channel object.
++ * Returns:
++ * 0: Success;
++ * -EFAULT: Invalid chnl_obj.
++ * Requires:
++ * No thread must be blocked on this channel's I/O completion event.
++ * Ensures:
++ * 0: chnl_obj is no longer valid.
++ */
++typedef int(*fxn_chnl_close) (struct chnl_object *chnl_obj);
++
++/*
++ * ======== bridge_chnl_add_io_req ========
++ * Purpose:
++ * Enqueue an I/O request for data transfer on a channel to the DSP.
++ * The direction (mode) is specified in the channel object. Note the DSP
++ * address is specified for channels opened in direct I/O mode.
++ * Parameters:
++ * chnl_obj: Channel object handle.
++ * host_buf: Host buffer address source.
++ * byte_size: Number of PC bytes to transfer. A zero value indicates
++ * that this buffer is the last in the output channel.
++ * A zero value is invalid for an input channel.
++ *! buf_size: Actual buffer size in host bytes.
++ * dw_dsp_addr: DSP address for transfer. (Currently ignored).
++ * dw_arg: A user argument that travels with the buffer.
++ * Returns:
++ * 0: Success;
++ * -EFAULT: Invalid chnl_obj or host_buf.
++ * -EPERM: User cannot mark EOS on an input channel.
++ * -ECANCELED: I/O has been cancelled on this channel. No further
++ * I/O is allowed.
++ * -EPIPE: End of stream was already marked on a previous
++ * IORequest on this channel. No further I/O is expected.
++ * -EINVAL: Buffer submitted to this output channel is larger than
++ * the size of the physical shared memory output window.
++ * Requires:
++ * Ensures:
++ * 0: The buffer will be transferred if the channel is ready;
++ * otherwise, will be queued for transfer when the channel becomes
++ * ready. In any case, notifications of I/O completion are
++ * asynchronous.
++ * If byte_size is 0 for an output channel, subsequent CHNL_AddIOReq's
++ * on this channel will fail with error code -EPIPE. The
++ * corresponding IOC for this I/O request will have its status flag
++ * set to CHNL_IOCSTATEOS.
++ */
++typedef int(*fxn_chnl_addioreq) (struct chnl_object
++ * chnl_obj,
++ void *host_buf,
++ u32 byte_size,
++ u32 buf_size,
++ u32 dw_dsp_addr, u32 dw_arg);
++
++/*
++ * ======== bridge_chnl_get_ioc ========
++ * Purpose:
++ * Dequeue an I/O completion record, which contains information about the
++ * completed I/O request.
++ * Parameters:
++ * chnl_obj: Channel object handle.
++ * timeout: A value of CHNL_IOCNOWAIT will simply dequeue the
++ * first available IOC.
++ * chan_ioc: On output, contains host buffer address, bytes
++ * transferred, and status of I/O completion.
++ * chan_ioc->status: See chnldefs.h.
++ * Returns:
++ * 0: Success.
++ * -EFAULT: Invalid chnl_obj or chan_ioc.
++ * -EREMOTEIO: CHNL_IOCNOWAIT was specified as the timeout parameter
++ * yet no I/O completions were queued.
++ * Requires:
++ * timeout == CHNL_IOCNOWAIT.
++ * Ensures:
++ * 0: if there are any remaining IOC's queued before this call
++ * returns, the channel event object will be left in a signalled
++ * state.
++ */
++typedef int(*fxn_chnl_getioc) (struct chnl_object *chnl_obj,
++ u32 timeout,
++ struct chnl_ioc *chan_ioc);
++
++/*
++ * ======== bridge_chnl_cancel_io ========
++ * Purpose:
++ * Return all I/O requests to the client which have not yet been
++ * transferred. The channel's I/O completion object is
++ * signalled, and all the I/O requests are queued as IOC's, with the
++ * status field set to CHNL_IOCSTATCANCEL.
++ * This call is typically used in abort situations, and is a prelude to
++ * chnl_close();
++ * Parameters:
++ * chnl_obj: Channel object handle.
++ * Returns:
++ * 0: Success;
++ * -EFAULT: Invalid chnl_obj.
++ * Requires:
++ * Ensures:
++ * Subsequent I/O requests to this channel will not be accepted.
++ */
++typedef int(*fxn_chnl_cancelio) (struct chnl_object *chnl_obj);
++
++/*
++ * ======== bridge_chnl_flush_io ========
++ * Purpose:
++ * For an output stream (to the DSP), indicates if any IO requests are in
++ * the output request queue. For input streams (from the DSP), will
++ * cancel all pending IO requests.
++ * Parameters:
++ * chnl_obj: Channel object handle.
++ * timeout: Timeout value for flush operation.
++ * Returns:
++ * 0: Success;
++ * S_CHNLIOREQUEST: Returned if any IORequests are in the output queue.
++ * -EFAULT: Invalid chnl_obj.
++ * Requires:
++ * Ensures:
++ * 0: No I/O requests will be pending on this channel.
++ */
++typedef int(*fxn_chnl_flushio) (struct chnl_object *chnl_obj,
++ u32 timeout);
++
++/*
++ * ======== bridge_chnl_get_info ========
++ * Purpose:
++ * Retrieve information related to a channel.
++ * Parameters:
++ * chnl_obj: Handle to a valid channel object, or NULL.
++ * channel_info: Location to store channel info.
++ * Returns:
++ * 0: Success;
++ * -EFAULT: Invalid chnl_obj or channel_info.
++ * Requires:
++ * Ensures:
++ * 0: channel_info points to a filled in chnl_info struct,
++ * if (channel_info != NULL).
++ */
++typedef int(*fxn_chnl_getinfo) (struct chnl_object *chnl_obj,
++ struct chnl_info *channel_info);
++
++/*
++ * ======== bridge_chnl_get_mgr_info ========
++ * Purpose:
++ * Retrieve information related to the channel manager.
++ * Parameters:
++ * hchnl_mgr: Handle to a valid channel manager, or NULL.
++ * ch_id: Channel ID.
++ * mgr_info: Location to store channel manager info.
++ * Returns:
++ * 0: Success;
++ * -EFAULT: Invalid hchnl_mgr or mgr_info.
++ * -ECHRNG: Invalid channel ID.
++ * Requires:
++ * Ensures:
++ * 0: mgr_info points to a filled in chnl_mgrinfo
++ * struct, if (mgr_info != NULL).
++ */
++typedef int(*fxn_chnl_getmgrinfo) (struct chnl_mgr
++ * hchnl_mgr,
++ u32 ch_id,
++ struct chnl_mgrinfo *mgr_info);
++
++/*
++ * ======== bridge_chnl_idle ========
++ * Purpose:
++ * Idle a channel. If this is an input channel, or if this is an output
++ * channel and flush_data is TRUE, all currently enqueued buffers will be
++ * dequeued (data discarded for output channel).
++ * If this is an output channel and flush_data is FALSE, this function
++ * will block until all currently buffered data is output, or the timeout
++ * specified has been reached.
++ *
++ * Parameters:
++ * chnl_obj: Channel object handle.
++ * timeout: If output channel and flush_data is FALSE, timeout value
++ * to wait for buffers to be output. (Not used for
++ * input channel).
++ * flush_data: If output channel and flush_data is TRUE, discard any
++ * currently buffered data. If FALSE, wait for currently
++ * buffered data to be output, or timeout, whichever
++ * occurs first. flush_data is ignored for input channel.
++ * Returns:
++ * 0: Success;
++ * -EFAULT: Invalid chnl_obj.
++ * -ETIMEDOUT: Timeout occured before channel could be idled.
++ * Requires:
++ * Ensures:
++ */
++typedef int(*fxn_chnl_idle) (struct chnl_object *chnl_obj,
++ u32 timeout, bool flush_data);
++
++/*
++ * ======== bridge_chnl_register_notify ========
++ * Purpose:
++ * Register for notification of events on a channel.
++ * Parameters:
++ * chnl_obj: Channel object handle.
++ * event_mask: Type of events to be notified about: IO completion
++ * (DSP_STREAMIOCOMPLETION) or end of stream
++ * (DSP_STREAMDONE).
++ * notify_type: DSP_SIGNALEVENT.
++ * hnotification: Handle of a dsp_notification object.
++ * Returns:
++ * 0: Success.
++ * -ENOMEM: Insufficient memory.
++ * -EINVAL: event_mask is 0 and hnotification was not
++ * previously registered.
++ * -EFAULT: NULL hnotification, hnotification event name
++ * too long, or hnotification event name NULL.
++ * Requires:
++ * Valid chnl_obj.
++ * hnotification != NULL.
++ * (event_mask & ~(DSP_STREAMIOCOMPLETION | DSP_STREAMDONE)) == 0.
++ * notify_type == DSP_SIGNALEVENT.
++ * Ensures:
++ */
++typedef int(*fxn_chnl_registernotify)
++ (struct chnl_object *chnl_obj,
++ u32 event_mask, u32 notify_type, struct dsp_notification *hnotification);
++
++/*
++ * ======== bridge_dev_create ========
++ * Purpose:
++ * Complete creation of the device object for this board.
++ * Parameters:
++ * device_ctx: Ptr to location to store a Bridge device context.
++ * hdev_obj: Handle to a Device Object, created and managed by DSP API.
++ * config_param: Ptr to configuration parameters provided by the
++ * Configuration Manager during device loading.
++ * pDspConfig: DSP resources, as specified in the registry key for this
++ * device.
++ * Returns:
++ * 0: Success.
++ * -ENOMEM: Unable to allocate memory for device context.
++ * Requires:
++ * device_ctx != NULL;
++ * hdev_obj != NULL;
++ * config_param != NULL;
++ * pDspConfig != NULL;
++ * Fields in config_param and pDspConfig contain valid values.
++ * Ensures:
++ * 0: All Bridge driver specific DSP resource and other
++ * board context has been allocated.
++ * -ENOMEM: Bridge failed to allocate resources.
++ * Any acquired resources have been freed. The DSP API
++ * will not call bridge_dev_destroy() if
++ * bridge_dev_create() fails.
++ * Details:
++ * Called during the CONFIGMG's Device_Init phase. Based on host and
++ * DSP configuration information, create a board context, a handle to
++ * which is passed into other Bridge BRD and CHNL functions. The
++ * board context contains state information for the device. Since the
++ * addresses of all pointer parameters may be invalid when this
++ * function returns, they must not be stored into the device context
++ * structure.
++ */
++typedef int(*fxn_dev_create) (struct bridge_dev_context
++ **device_ctx,
++ struct dev_object
++ * hdev_obj,
++ struct cfg_hostres
++ * config_param);
++
++/*
++ * ======== bridge_dev_ctrl ========
++ * Purpose:
++ * Bridge driver specific interface.
++ * Parameters:
++ * dev_ctxt: Handle to Bridge driver defined device info.
++ * dw_cmd: Bridge driver defined command code.
++ * pargs: Pointer to an arbitrary argument structure.
++ * Returns:
++ * 0 or -EPERM. Actual command error codes should be passed back in
++ * the pargs structure, and are defined by the Bridge driver implementor.
++ * Requires:
++ * All calls are currently assumed to be synchronous. There are no
++ * IOCTL completion routines provided.
++ * Ensures:
++ */
++typedef int(*fxn_dev_ctrl) (struct bridge_dev_context *dev_ctxt,
++ u32 dw_cmd, void *pargs);
++
++/*
++ * ======== bridge_dev_destroy ========
++ * Purpose:
++ * Deallocate Bridge device extension structures and all other resources
++ * acquired by the Bridge driver.
++ * No calls to other Bridge driver functions may subsequently
++ * occur, except for bridge_dev_create().
++ * Parameters:
++ * dev_ctxt: Handle to Bridge driver defined device information.
++ * Returns:
++ * 0: Success.
++ * -EPERM: Failed to release a resource previously acquired.
++ * Requires:
++ * dev_ctxt != NULL;
++ * Ensures:
++ * 0: Device context is freed.
++ */
++typedef int(*fxn_dev_destroy) (struct bridge_dev_context *dev_ctxt);
++
++/*
++ * ======== bridge_io_create ========
++ * Purpose:
++ * Create an object that manages I/O between CHNL and msg_ctrl.
++ * Parameters:
++ * io_man: Location to store IO manager on output.
++ * hchnl_mgr: Handle to channel manager.
++ * hmsg_mgr: Handle to message manager.
++ * Returns:
++ * 0: Success.
++ * -ENOMEM: Memory allocation failure.
++ * -EPERM: Creation failed.
++ * Requires:
++ * hdev_obj != NULL;
++ * Channel manager already created;
++ * Message manager already created;
++ * mgr_attrts != NULL;
++ * io_man != NULL;
++ * Ensures:
++ */
++typedef int(*fxn_io_create) (struct io_mgr **io_man,
++ struct dev_object *hdev_obj,
++ const struct io_attrs *mgr_attrts);
++
++/*
++ * ======== bridge_io_destroy ========
++ * Purpose:
++ * Destroy object created in bridge_io_create.
++ * Parameters:
++ * hio_mgr: IO Manager.
++ * Returns:
++ * 0: Success.
++ * -ENOMEM: Memory allocation failure.
++ * -EPERM: Creation failed.
++ * Requires:
++ * Valid hio_mgr;
++ * Ensures:
++ */
++typedef int(*fxn_io_destroy) (struct io_mgr *hio_mgr);
++
++/*
++ * ======== bridge_io_on_loaded ========
++ * Purpose:
++ * Called whenever a program is loaded to update internal data. For
++ * example, if shared memory is used, this function would update the
++ * shared memory location and address.
++ * Parameters:
++ * hio_mgr: IO Manager.
++ * Returns:
++ * 0: Success.
++ * -EPERM: Internal failure occurred.
++ * Requires:
++ * Valid hio_mgr;
++ * Ensures:
++ */
++typedef int(*fxn_io_onloaded) (struct io_mgr *hio_mgr);
++
++/*
++ * ======== fxn_io_getprocload ========
++ * Purpose:
++ * Called to get the Processor's current and predicted load
++ * Parameters:
++ * hio_mgr: IO Manager.
++ * proc_load_stat Processor Load statistics
++ * Returns:
++ * 0: Success.
++ * -EPERM: Internal failure occurred.
++ * Requires:
++ * Valid hio_mgr;
++ * Ensures:
++ */
++typedef int(*fxn_io_getprocload) (struct io_mgr *hio_mgr,
++ struct dsp_procloadstat *
++ proc_load_stat);
++
++/*
++ * ======== bridge_msg_create ========
++ * Purpose:
++ * Create an object to manage message queues. Only one of these objects
++ * can exist per device object.
++ * Parameters:
++ * msg_man: Location to store msg_ctrl manager on output.
++ * hdev_obj: Handle to a device object.
++ * msg_callback: Called whenever an RMS_EXIT message is received.
++ * Returns:
++ * 0: Success.
++ * -ENOMEM: Insufficient memory.
++ * Requires:
++ * msg_man != NULL.
++ * msg_callback != NULL.
++ * hdev_obj != NULL.
++ * Ensures:
++ */
++typedef int(*fxn_msg_create)
++ (struct msg_mgr **msg_man,
++ struct dev_object *hdev_obj, msg_onexit msg_callback);
++
++/*
++ * ======== bridge_msg_create_queue ========
++ * Purpose:
++ * Create a msg_ctrl queue for sending or receiving messages from a Message
++ * node on the DSP.
++ * Parameters:
++ * hmsg_mgr: msg_ctrl queue manager handle returned from
++ * bridge_msg_create.
++ * msgq: Location to store msg_ctrl queue on output.
++ * msgq_id: Identifier for messages (node environment pointer).
++ * max_msgs: Max number of simultaneous messages for the node.
++ * h: Handle passed to hmsg_mgr->msg_callback().
++ * Returns:
++ * 0: Success.
++ * -ENOMEM: Insufficient memory.
++ * Requires:
++ * msgq != NULL.
++ * h != NULL.
++ * max_msgs > 0.
++ * Ensures:
++ * msgq !=NULL <==> 0.
++ */
++typedef int(*fxn_msg_createqueue)
++ (struct msg_mgr *hmsg_mgr,
++ struct msg_queue **msgq, u32 msgq_id, u32 max_msgs, void *h);
++
++/*
++ * ======== bridge_msg_delete ========
++ * Purpose:
++ * Delete a msg_ctrl manager allocated in bridge_msg_create().
++ * Parameters:
++ * hmsg_mgr: Handle returned from bridge_msg_create().
++ * Returns:
++ * Requires:
++ * Valid hmsg_mgr.
++ * Ensures:
++ */
++typedef void (*fxn_msg_delete) (struct msg_mgr *hmsg_mgr);
++
++/*
++ * ======== bridge_msg_delete_queue ========
++ * Purpose:
++ * Delete a msg_ctrl queue allocated in bridge_msg_create_queue.
++ * Parameters:
++ * msg_queue_obj: Handle to msg_ctrl queue returned from
++ * bridge_msg_create_queue.
++ * Returns:
++ * Requires:
++ * Valid msg_queue_obj.
++ * Ensures:
++ */
++typedef void (*fxn_msg_deletequeue) (struct msg_queue *msg_queue_obj);
++
++/*
++ * ======== bridge_msg_get ========
++ * Purpose:
++ * Get a message from a msg_ctrl queue.
++ * Parameters:
++ * msg_queue_obj: Handle to msg_ctrl queue returned from
++ * bridge_msg_create_queue.
++ * pmsg: Location to copy message into.
++ * utimeout: Timeout to wait for a message.
++ * Returns:
++ * 0: Success.
++ * -ETIME: Timeout occurred.
++ * -EPERM: No frames available for message (max_msgs too
++ * small).
++ * Requires:
++ * Valid msg_queue_obj.
++ * pmsg != NULL.
++ * Ensures:
++ */
++typedef int(*fxn_msg_get) (struct msg_queue *msg_queue_obj,
++ struct dsp_msg *pmsg, u32 utimeout);
++
++/*
++ * ======== bridge_msg_put ========
++ * Purpose:
++ * Put a message onto a msg_ctrl queue.
++ * Parameters:
++ * msg_queue_obj: Handle to msg_ctrl queue returned from
++ * bridge_msg_create_queue.
++ * pmsg: Pointer to message.
++ * utimeout: Timeout to wait for a message.
++ * Returns:
++ * 0: Success.
++ * -ETIME: Timeout occurred.
++ * -EPERM: No frames available for message (max_msgs too
++ * small).
++ * Requires:
++ * Valid msg_queue_obj.
++ * pmsg != NULL.
++ * Ensures:
++ */
++typedef int(*fxn_msg_put) (struct msg_queue *msg_queue_obj,
++ const struct dsp_msg *pmsg, u32 utimeout);
++
++/*
++ * ======== bridge_msg_register_notify ========
++ * Purpose:
++ * Register notification for when a message is ready.
++ * Parameters:
++ * msg_queue_obj: Handle to msg_ctrl queue returned from
++ * bridge_msg_create_queue.
++ * event_mask: Type of events to be notified about: Must be
++ * DSP_NODEMESSAGEREADY, or 0 to unregister.
++ * notify_type: DSP_SIGNALEVENT.
++ * hnotification: Handle of notification object.
++ * Returns:
++ * 0: Success.
++ * -ENOMEM: Insufficient memory.
++ * Requires:
++ * Valid msg_queue_obj.
++ * hnotification != NULL.
++ * notify_type == DSP_SIGNALEVENT.
++ * event_mask == DSP_NODEMESSAGEREADY || event_mask == 0.
++ * Ensures:
++ */
++typedef int(*fxn_msg_registernotify)
++ (struct msg_queue *msg_queue_obj,
++ u32 event_mask, u32 notify_type, struct dsp_notification *hnotification);
++
++/*
++ * ======== bridge_msg_set_queue_id ========
++ * Purpose:
++ * Set message queue id to node environment. Allows bridge_msg_create_queue
++ * to be called in node_allocate, before the node environment is known.
++ * Parameters:
++ * msg_queue_obj: Handle to msg_ctrl queue returned from
++ * bridge_msg_create_queue.
++ * msgq_id: Node environment pointer.
++ * Returns:
++ * Requires:
++ * Valid msg_queue_obj.
++ * msgq_id != 0.
++ * Ensures:
++ */
++typedef void (*fxn_msg_setqueueid) (struct msg_queue *msg_queue_obj,
++ u32 msgq_id);
++
++/*
++ * Bridge Driver interface function table.
++ *
++ * The information in this table is filled in by the specific Bridge driver,
++ * and copied into the DSP API's own space. If any interface
++ * function field is set to a value of NULL, then the DSP API will
++ * consider that function not implemented, and return the error code
++ * -ENOSYS when a Bridge driver client attempts to call that function.
++ *
++ * This function table contains DSP API version numbers, which are used by the
++ * Bridge driver loader to help ensure backwards compatility between older
++ * Bridge drivers and newer DSP API. These must be set to
++ * BRD_API_MAJOR_VERSION and BRD_API_MINOR_VERSION, respectively.
++ *
++ * A Bridge driver need not export a CHNL interface. In this case, *all* of
++ * the bridge_chnl_* entries must be set to NULL.
++ */
++struct bridge_drv_interface {
++ u32 brd_api_major_version; /* Set to BRD_API_MAJOR_VERSION. */
++ u32 brd_api_minor_version; /* Set to BRD_API_MINOR_VERSION. */
++ fxn_dev_create pfn_dev_create; /* Create device context */
++ fxn_dev_destroy pfn_dev_destroy; /* Destroy device context */
++ fxn_dev_ctrl pfn_dev_cntrl; /* Optional vendor interface */
++ fxn_brd_monitor pfn_brd_monitor; /* Load and/or start monitor */
++ fxn_brd_start pfn_brd_start; /* Start DSP program. */
++ fxn_brd_stop pfn_brd_stop; /* Stop/reset board. */
++ fxn_brd_status pfn_brd_status; /* Get current board status. */
++ fxn_brd_read pfn_brd_read; /* Read board memory */
++ fxn_brd_write pfn_brd_write; /* Write board memory. */
++ fxn_brd_setstate pfn_brd_set_state; /* Sets the Board State */
++ fxn_brd_memcopy pfn_brd_mem_copy; /* Copies DSP Memory */
++ fxn_brd_memwrite pfn_brd_mem_write; /* Write DSP Memory w/o halt */
++ fxn_brd_memmap pfn_brd_mem_map; /* Maps MPU mem to DSP mem */
++ fxn_brd_memunmap pfn_brd_mem_un_map; /* Unmaps MPU mem to DSP mem */
++ fxn_chnl_create pfn_chnl_create; /* Create channel manager. */
++ fxn_chnl_destroy pfn_chnl_destroy; /* Destroy channel manager. */
++ fxn_chnl_open pfn_chnl_open; /* Create a new channel. */
++ fxn_chnl_close pfn_chnl_close; /* Close a channel. */
++ fxn_chnl_addioreq pfn_chnl_add_io_req; /* Req I/O on a channel. */
++ fxn_chnl_getioc pfn_chnl_get_ioc; /* Wait for I/O completion. */
++ fxn_chnl_cancelio pfn_chnl_cancel_io; /* Cancl I/O on a channel. */
++ fxn_chnl_flushio pfn_chnl_flush_io; /* Flush I/O. */
++ fxn_chnl_getinfo pfn_chnl_get_info; /* Get channel specific info */
++ /* Get channel manager info. */
++ fxn_chnl_getmgrinfo pfn_chnl_get_mgr_info;
++ fxn_chnl_idle pfn_chnl_idle; /* Idle the channel */
++ /* Register for notif. */
++ fxn_chnl_registernotify pfn_chnl_register_notify;
++ fxn_io_create pfn_io_create; /* Create IO manager */
++ fxn_io_destroy pfn_io_destroy; /* Destroy IO manager */
++ fxn_io_onloaded pfn_io_on_loaded; /* Notify of program loaded */
++ /* Get Processor's current and predicted load */
++ fxn_io_getprocload pfn_io_get_proc_load;
++ fxn_msg_create pfn_msg_create; /* Create message manager */
++ /* Create message queue */
++ fxn_msg_createqueue pfn_msg_create_queue;
++ fxn_msg_delete pfn_msg_delete; /* Delete message manager */
++ /* Delete message queue */
++ fxn_msg_deletequeue pfn_msg_delete_queue;
++ fxn_msg_get pfn_msg_get; /* Get a message */
++ fxn_msg_put pfn_msg_put; /* Send a message */
++ /* Register for notif. */
++ fxn_msg_registernotify pfn_msg_register_notify;
++ /* Set message queue id */
++ fxn_msg_setqueueid pfn_msg_set_queue_id;
++};
++
++/*
++ * ======== bridge_drv_entry ========
++ * Purpose:
++ * Registers Bridge driver functions with the DSP API. Called only once
++ * by the DSP API. The caller will first check DSP API version
++ * compatibility, and then copy the interface functions into its own
++ * memory space.
++ * Parameters:
++ * drv_intf Pointer to a location to receive a pointer to the
++ * Bridge driver interface.
++ * Returns:
++ * Requires:
++ * The code segment this function resides in must expect to be discarded
++ * after completion.
++ * Ensures:
++ * drv_intf pointer initialized to Bridge driver's function
++ * interface. No system resources are acquired by this function.
++ * Details:
++ * Called during the Device_Init phase.
++ */
++void bridge_drv_entry(struct bridge_drv_interface **drv_intf,
++ const char *driver_file_name);
++
++#endif /* DSPDEFS_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/dspdeh.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/dspdeh.h 2010-08-18 11:24:23.190052789 +0300
+@@ -0,0 +1,43 @@
++/*
++ * dspdeh.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Defines upper edge DEH functions required by all Bridge driver/DSP API
++ * interface tables.
++ *
++ * Notes:
++ * Function comment headers reside with the function typedefs in dspdefs.h.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ * Copyright (C) 2010 Felipe Contreras
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef DSPDEH_
++#define DSPDEH_
++
++struct deh_mgr;
++struct dev_object;
++struct dsp_notification;
++
++int bridge_deh_create(struct deh_mgr **ret_deh,
++ struct dev_object *hdev_obj);
++
++int bridge_deh_destroy(struct deh_mgr *deh);
++
++int bridge_deh_register_notify(struct deh_mgr *deh,
++ u32 event_mask,
++ u32 notify_type,
++ struct dsp_notification *hnotification);
++
++void bridge_deh_notify(struct deh_mgr *deh, int event, int info);
++
++#endif /* DSPDEH_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/dspdrv.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/dspdrv.h 2010-08-18 11:24:23.190052789 +0300
+@@ -0,0 +1,62 @@
++/*
++ * dspdrv.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * This is the Stream Interface for the DSp API.
++ * All Device operations are performed via DeviceIOControl.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#if !defined _DSPDRV_H_
++#define _DSPDRV_H_
++
++#define MAX_DEV 10 /* Max support of 10 devices */
++
++/*
++ * ======== dsp_deinit ========
++ * Purpose:
++ * This function is called by Device Manager to de-initialize a device.
++ * This function is not called by applications.
++ * Parameters:
++ * device_context:Handle to the device context. The XXX_Init function
++ * creates and returns this identifier.
++ * Returns:
++ * TRUE indicates the device successfully de-initialized. Otherwise it
++ * returns FALSE.
++ * Requires:
++ * device_context!= NULL. For a built in device this should never
++ * get called.
++ * Ensures:
++ */
++extern bool dsp_deinit(u32 device_context);
++
++/*
++ * ======== dsp_init ========
++ * Purpose:
++ * This function is called by Device Manager to initialize a device.
++ * This function is not called by applications
++ * Parameters:
++ * dw_context: Specifies a pointer to a string containing the registry
++ * path to the active key for the stream interface driver.
++ * HKEY_LOCAL_MACHINE\Drivers\Active
++ * Returns:
++ * Returns a handle to the device context created. This is the our actual
++ * Device Object representing the DSP Device instance.
++ * Requires:
++ * Ensures:
++ * Succeeded: device context > 0
++ * Failed: device Context = 0
++ */
++extern u32 dsp_init(u32 *init_status);
++
++#endif
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/dspio.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/dspio.h 2010-08-18 11:24:23.190052789 +0300
+@@ -0,0 +1,41 @@
++/*
++ * dspio.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Declares the upper edge IO functions required by all Bridge driver /DSP API
++ * interface tables.
++ *
++ * Notes:
++ * Function comment headers reside in dspdefs.h.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef DSPIO_
++#define DSPIO_
++
++#include <dspbridge/devdefs.h>
++#include <dspbridge/iodefs.h>
++
++extern int bridge_io_create(struct io_mgr **io_man,
++ struct dev_object *hdev_obj,
++ const struct io_attrs *mgr_attrts);
++
++extern int bridge_io_destroy(struct io_mgr *hio_mgr);
++
++extern int bridge_io_on_loaded(struct io_mgr *hio_mgr);
++
++extern int iva_io_on_loaded(struct io_mgr *hio_mgr);
++extern int bridge_io_get_proc_load(struct io_mgr *hio_mgr,
++ struct dsp_procloadstat *proc_lstat);
++
++#endif /* DSPIO_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/dspioctl.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/dspioctl.h 2010-08-18 11:24:23.190052789 +0300
+@@ -0,0 +1,73 @@
++/*
++ * dspioctl.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Bridge driver BRD_IOCtl reserved command definitions.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef DSPIOCTL_
++#define DSPIOCTL_
++
++/* ------------------------------------ Hardware Abstraction Layer */
++#include <hw_defs.h>
++#include <hw_mmu.h>
++
++/*
++ * Any IOCTLS at or above this value are reserved for standard Bridge driver
++ * interfaces.
++ */
++#define BRDIOCTL_RESERVEDBASE 0x8000
++
++#define BRDIOCTL_CHNLREAD (BRDIOCTL_RESERVEDBASE + 0x10)
++#define BRDIOCTL_CHNLWRITE (BRDIOCTL_RESERVEDBASE + 0x20)
++#define BRDIOCTL_GETINTRCOUNT (BRDIOCTL_RESERVEDBASE + 0x30)
++#define BRDIOCTL_RESETINTRCOUNT (BRDIOCTL_RESERVEDBASE + 0x40)
++#define BRDIOCTL_INTERRUPTDSP (BRDIOCTL_RESERVEDBASE + 0x50)
++/* DMMU */
++#define BRDIOCTL_SETMMUCONFIG (BRDIOCTL_RESERVEDBASE + 0x60)
++/* PWR */
++#define BRDIOCTL_PWRCONTROL (BRDIOCTL_RESERVEDBASE + 0x70)
++
++/* attention, modifiers:
++ * Some of these control enumerations are made visible to user for power
++ * control, so any changes to this list, should also be updated in the user
++ * header file 'dbdefs.h' ***/
++/* These ioctls are reserved for PWR power commands for the DSP */
++#define BRDIOCTL_DEEPSLEEP (BRDIOCTL_PWRCONTROL + 0x0)
++#define BRDIOCTL_EMERGENCYSLEEP (BRDIOCTL_PWRCONTROL + 0x1)
++#define BRDIOCTL_WAKEUP (BRDIOCTL_PWRCONTROL + 0x2)
++#define BRDIOCTL_PWRENABLE (BRDIOCTL_PWRCONTROL + 0x3)
++#define BRDIOCTL_PWRDISABLE (BRDIOCTL_PWRCONTROL + 0x4)
++#define BRDIOCTL_CLK_CTRL (BRDIOCTL_PWRCONTROL + 0x7)
++/* DSP Initiated Hibernate */
++#define BRDIOCTL_PWR_HIBERNATE (BRDIOCTL_PWRCONTROL + 0x8)
++#define BRDIOCTL_PRESCALE_NOTIFY (BRDIOCTL_PWRCONTROL + 0x9)
++#define BRDIOCTL_POSTSCALE_NOTIFY (BRDIOCTL_PWRCONTROL + 0xA)
++#define BRDIOCTL_CONSTRAINT_REQUEST (BRDIOCTL_PWRCONTROL + 0xB)
++
++/* Number of actual DSP-MMU TLB entrries */
++#define BRDIOCTL_NUMOFMMUTLB 32
++
++struct bridge_ioctl_extproc {
++ u32 ul_dsp_va; /* DSP virtual address */
++ u32 ul_gpp_pa; /* GPP physical address */
++ /* GPP virtual address. __va does not work for ioremapped addresses */
++ u32 ul_gpp_va;
++ u32 ul_size; /* Size of the mapped memory in bytes */
++ enum hw_endianism_t endianism;
++ enum hw_mmu_mixed_size_t mixed_mode;
++ enum hw_element_size_t elem_size;
++};
++
++#endif /* DSPIOCTL_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/dspmsg.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/dspmsg.h 2010-08-18 11:24:23.190052789 +0300
+@@ -0,0 +1,56 @@
++/*
++ * dspmsg.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Declares the upper edge message class library functions required by
++ * all Bridge driver / DSP API interface tables. These functions are
++ * implemented by every class of Bridge driver channel library.
++ *
++ * Notes:
++ * Function comment headers reside in dspdefs.h.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef DSPMSG_
++#define DSPMSG_
++
++#include <dspbridge/msgdefs.h>
++
++extern int bridge_msg_create(struct msg_mgr **msg_man,
++ struct dev_object *hdev_obj,
++ msg_onexit msg_callback);
++
++extern int bridge_msg_create_queue(struct msg_mgr *hmsg_mgr,
++ struct msg_queue **msgq,
++ u32 msgq_id, u32 max_msgs, void *arg);
++
++extern void bridge_msg_delete(struct msg_mgr *hmsg_mgr);
++
++extern void bridge_msg_delete_queue(struct msg_queue *msg_queue_obj);
++
++extern int bridge_msg_get(struct msg_queue *msg_queue_obj,
++ struct dsp_msg *pmsg, u32 utimeout);
++
++extern int bridge_msg_put(struct msg_queue *msg_queue_obj,
++ const struct dsp_msg *pmsg, u32 utimeout);
++
++extern int bridge_msg_register_notify(struct msg_queue *msg_queue_obj,
++ u32 event_mask,
++ u32 notify_type,
++ struct dsp_notification
++ *hnotification);
++
++extern void bridge_msg_set_queue_id(struct msg_queue *msg_queue_obj,
++ u32 msgq_id);
++
++#endif /* DSPMSG_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/dynamic_loader.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/dynamic_loader.h 2010-08-18 11:24:23.190052789 +0300
+@@ -0,0 +1,492 @@
++/*
++ * dynamic_loader.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Copyright (C) 2008 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef _DYNAMIC_LOADER_H_
++#define _DYNAMIC_LOADER_H_
++#include <linux/kernel.h>
++#include <linux/types.h>
++
++/*
++ * Dynamic Loader
++ *
++ * The function of the dynamic loader is to load a "module" containing
++ * instructions for a "target" processor into that processor. In the process
++ * it assigns memory for the module, resolves symbol references made by the
++ * module, and remembers symbols defined by the module.
++ *
++ * The dynamic loader is parameterized for a particular system by 4 classes
++ * that supply the module and system specific functions it requires
++ */
++ /* The read functions for the module image to be loaded */
++struct dynamic_loader_stream;
++
++ /* This class defines "host" symbol and support functions */
++struct dynamic_loader_sym;
++
++ /* This class defines the allocator for "target" memory */
++struct dynamic_loader_allocate;
++
++ /* This class defines the copy-into-target-memory functions */
++struct dynamic_loader_initialize;
++
++/*
++ * Option flags to modify the behavior of module loading
++ */
++#define DLOAD_INITBSS 0x1 /* initialize BSS sections to zero */
++#define DLOAD_BIGEND 0x2 /* require big-endian load module */
++#define DLOAD_LITTLE 0x4 /* require little-endian load module */
++
++/*****************************************************************************
++ * Procedure dynamic_load_module
++ *
++ * Parameters:
++ * module The input stream that supplies the module image
++ * syms Host-side symbol table and malloc/free functions
++ * alloc Target-side memory allocation
++ * init Target-side memory initialization, or NULL for symbol read only
++ * options Option flags DLOAD_*
++ * mhandle A module handle for use with Dynamic_Unload
++ *
++ * Effect:
++ * The module image is read using *module. Target storage for the new image is
++ * obtained from *alloc. Symbols defined and referenced by the module are
++ * managed using *syms. The image is then relocated and references resolved
++ * as necessary, and the resulting executable bits are placed into target memory
++ * using *init.
++ *
++ * Returns:
++ * On a successful load, a module handle is placed in *mhandle, and zero is
++ * returned. On error, the number of errors detected is returned. Individual
++ * errors are reported during the load process using syms->error_report().
++ **************************************************************************** */
++extern int dynamic_load_module(
++ /* the source for the module image */
++ struct dynamic_loader_stream *module,
++ /* host support for symbols and storage */
++ struct dynamic_loader_sym *syms,
++ /* the target memory allocator */
++ struct dynamic_loader_allocate *alloc,
++ /* the target memory initializer */
++ struct dynamic_loader_initialize *init,
++ unsigned options, /* option flags */
++ /* the returned module handle */
++ void **mhandle);
++
++/*****************************************************************************
++ * Procedure dynamic_open_module
++ *
++ * Parameters:
++ * module The input stream that supplies the module image
++ * syms Host-side symbol table and malloc/free functions
++ * alloc Target-side memory allocation
++ * init Target-side memory initialization, or NULL for symbol read only
++ * options Option flags DLOAD_*
++ * mhandle A module handle for use with Dynamic_Unload
++ *
++ * Effect:
++ * The module image is read using *module. Target storage for the new image is
++ * obtained from *alloc. Symbols defined and referenced by the module are
++ * managed using *syms. The image is then relocated and references resolved
++ * as necessary, and the resulting executable bits are placed into target memory
++ * using *init.
++ *
++ * Returns:
++ * On a successful load, a module handle is placed in *mhandle, and zero is
++ * returned. On error, the number of errors detected is returned. Individual
++ * errors are reported during the load process using syms->error_report().
++ **************************************************************************** */
++extern int dynamic_open_module(
++ /* the source for the module image */
++ struct dynamic_loader_stream *module,
++ /* host support for symbols and storage */
++ struct dynamic_loader_sym *syms,
++ /* the target memory allocator */
++ struct dynamic_loader_allocate *alloc,
++ /* the target memory initializer */
++ struct dynamic_loader_initialize *init,
++ unsigned options, /* option flags */
++ /* the returned module handle */
++ void **mhandle);
++
++/*****************************************************************************
++ * Procedure dynamic_unload_module
++ *
++ * Parameters:
++ * mhandle A module handle from dynamic_load_module
++ * syms Host-side symbol table and malloc/free functions
++ * alloc Target-side memory allocation
++ *
++ * Effect:
++ * The module specified by mhandle is unloaded. Unloading causes all
++ * target memory to be deallocated, all symbols defined by the module to
++ * be purged, and any host-side storage used by the dynamic loader for
++ * this module to be released.
++ *
++ * Returns:
++ * Zero for success. On error, the number of errors detected is returned.
++ * Individual errors are reported using syms->error_report().
++ **************************************************************************** */
++extern int dynamic_unload_module(void *mhandle, /* the module
++ * handle */
++ /* host support for symbols and
++ * storage */
++ struct dynamic_loader_sym *syms,
++ /* the target memory allocator */
++ struct dynamic_loader_allocate *alloc,
++ /* the target memory initializer */
++ struct dynamic_loader_initialize *init);
++
++/*****************************************************************************
++ *****************************************************************************
++ * A class used by the dynamic loader for input of the module image
++ *****************************************************************************
++ **************************************************************************** */
++struct dynamic_loader_stream {
++/* public: */
++ /*************************************************************************
++ * read_buffer
++ *
++ * PARAMETERS :
++ * buffer Pointer to the buffer to fill
++ * bufsiz Amount of data desired in sizeof() units
++ *
++ * EFFECT :
++ * Reads the specified amount of data from the module input stream
++ * into the specified buffer. Returns the amount of data read in sizeof()
++ * units (which if less than the specification, represents an error).
++ *
++ * NOTES:
++ * In release 1 increments the file position by the number of bytes read
++ *
++ ************************************************************************ */
++ int (*read_buffer) (struct dynamic_loader_stream *thisptr,
++ void *buffer, unsigned bufsiz);
++
++ /*************************************************************************
++ * set_file_posn (release 1 only)
++ *
++ * PARAMETERS :
++ * posn Desired file position relative to start of file in sizeof() units.
++ *
++ * EFFECT :
++ * Adjusts the internal state of the stream object so that the next
++ * read_buffer call will begin to read at the specified offset from
++ * the beginning of the input module. Returns 0 for success, non-zero
++ * for failure.
++ *
++ ************************************************************************ */
++ int (*set_file_posn) (struct dynamic_loader_stream *thisptr,
++ /* to be eliminated in release 2 */
++ unsigned int posn);
++
++};
++
++/*****************************************************************************
++ *****************************************************************************
++ * A class used by the dynamic loader for symbol table support and
++ * miscellaneous host-side functions
++ *****************************************************************************
++ **************************************************************************** */
++
++typedef u32 ldr_addr;
++
++/*
++ * the structure of a symbol known to the dynamic loader
++ */
++struct dynload_symbol {
++ ldr_addr value;
++};
++
++struct dynamic_loader_sym {
++/* public: */
++ /*************************************************************************
++ * find_matching_symbol
++ *
++ * PARAMETERS :
++ * name The name of the desired symbol
++ *
++ * EFFECT :
++ * Locates a symbol matching the name specified. A pointer to the
++ * symbol is returned if it exists; 0 is returned if no such symbol is
++ * found.
++ *
++ ************************************************************************ */
++ struct dynload_symbol *(*find_matching_symbol)
++ (struct dynamic_loader_sym *thisptr, const char *name);
++
++ /*************************************************************************
++ * add_to_symbol_table
++ *
++ * PARAMETERS :
++ * nname Pointer to the name of the new symbol
++ * moduleid An opaque module id assigned by the dynamic loader
++ *
++ * EFFECT :
++ * The new symbol is added to the table. A pointer to the symbol is
++ * returned, or NULL is returned for failure.
++ *
++ * NOTES:
++ * It is permissible for this function to return NULL; the effect is that
++ * the named symbol will not be available to resolve references in
++ * subsequent loads. Returning NULL will not cause the current load
++ * to fail.
++ ************************************************************************ */
++ struct dynload_symbol *(*add_to_symbol_table)
++ (struct dynamic_loader_sym *
++ thisptr, const char *nname, unsigned moduleid);
++
++ /*************************************************************************
++ * purge_symbol_table
++ *
++ * PARAMETERS :
++ * moduleid An opaque module id assigned by the dynamic loader
++ *
++ * EFFECT :
++ * Each symbol in the symbol table whose moduleid matches the argument
++ * is removed from the table.
++ ************************************************************************ */
++ void (*purge_symbol_table) (struct dynamic_loader_sym *thisptr,
++ unsigned moduleid);
++
++ /*************************************************************************
++ * dload_allocate
++ *
++ * PARAMETERS :
++ * memsiz size of desired memory in sizeof() units
++ *
++ * EFFECT :
++ * Returns a pointer to some "host" memory for use by the dynamic
++ * loader, or NULL for failure.
++ * This function is serves as a replaceable form of "malloc" to
++ * allow the user to configure the memory usage of the dynamic loader.
++ ************************************************************************ */
++ void *(*dload_allocate) (struct dynamic_loader_sym *thisptr,
++ unsigned memsiz);
++
++ /*************************************************************************
++ * dload_deallocate
++ *
++ * PARAMETERS :
++ * memptr pointer to previously allocated memory
++ *
++ * EFFECT :
++ * Releases the previously allocated "host" memory.
++ ************************************************************************ */
++ void (*dload_deallocate) (struct dynamic_loader_sym *thisptr,
++ void *memptr);
++
++ /*************************************************************************
++ * error_report
++ *
++ * PARAMETERS :
++ * errstr pointer to an error string
++ * args additional arguments
++ *
++ * EFFECT :
++ * This function provides an error reporting interface for the dynamic
++ * loader. The error string and arguments are designed as for the
++ * library function vprintf.
++ ************************************************************************ */
++ void (*error_report) (struct dynamic_loader_sym *thisptr,
++ const char *errstr, va_list args);
++
++}; /* class dynamic_loader_sym */
++
++/*****************************************************************************
++ *****************************************************************************
++ * A class used by the dynamic loader to allocate and deallocate target memory.
++ *****************************************************************************
++ **************************************************************************** */
++
++struct ldr_section_info {
++ /* Name of the memory section assigned at build time */
++ const char *name;
++ ldr_addr run_addr; /* execution address of the section */
++ ldr_addr load_addr; /* load address of the section */
++ ldr_addr size; /* size of the section in addressable units */
++#ifndef _BIG_ENDIAN
++ u16 page; /* memory page or view */
++ u16 type; /* one of the section types below */
++#else
++ u16 type; /* one of the section types below */
++ u16 page; /* memory page or view */
++#endif
++ /* a context field for use by dynamic_loader_allocate;
++ * ignored but maintained by the dynamic loader */
++ u32 context;
++};
++
++/* use this macro to extract type of section from ldr_section_info.type field */
++#define DLOAD_SECTION_TYPE(typeinfo) (typeinfo & 0xF)
++
++/* type of section to be allocated */
++#define DLOAD_TEXT 0
++#define DLOAD_DATA 1
++#define DLOAD_BSS 2
++ /* internal use only, run-time cinit will be of type DLOAD_DATA */
++#define DLOAD_CINIT 3
++
++struct dynamic_loader_allocate {
++/* public: */
++
++ /*************************************************************************
++ * Function allocate
++ *
++ * Parameters:
++ * info A pointer to an information block for the section
++ * align The alignment of the storage in target AUs
++ *
++ * Effect:
++ * Allocates target memory for the specified section and fills in the
++ * load_addr and run_addr fields of the section info structure. Returns TRUE
++ * for success, FALSE for failure.
++ *
++ * Notes:
++ * Frequently load_addr and run_addr are the same, but if they are not
++ * load_addr is used with dynamic_loader_initialize, and run_addr is
++ * used for almost all relocations. This function should always initialize
++ * both fields.
++ ************************************************************************ */
++ int (*dload_allocate) (struct dynamic_loader_allocate *thisptr,
++ struct ldr_section_info *info, unsigned align);
++
++ /*************************************************************************
++ * Function deallocate
++ *
++ * Parameters:
++ * info A pointer to an information block for the section
++ *
++ * Effect:
++ * Releases the target memory previously allocated.
++ *
++ * Notes:
++ * The content of the info->name field is undefined on call to this function.
++ ************************************************************************ */
++ void (*dload_deallocate) (struct dynamic_loader_allocate *thisptr,
++ struct ldr_section_info *info);
++
++}; /* class dynamic_loader_allocate */
++
++/*****************************************************************************
++ *****************************************************************************
++ * A class used by the dynamic loader to load data into a target. This class
++ * provides the interface-specific functions needed to load data.
++ *****************************************************************************
++ **************************************************************************** */
++
++struct dynamic_loader_initialize {
++/* public: */
++ /*************************************************************************
++ * Function connect
++ *
++ * Parameters:
++ * none
++ *
++ * Effect:
++ * Connect to the initialization interface. Returns TRUE for success,
++ * FALSE for failure.
++ *
++ * Notes:
++ * This function is called prior to use of any other functions in
++ * this interface.
++ ************************************************************************ */
++ int (*connect) (struct dynamic_loader_initialize *thisptr);
++
++ /*************************************************************************
++ * Function readmem
++ *
++ * Parameters:
++ * bufr Pointer to a word-aligned buffer for the result
++ * locn Target address of first data element
++ * info Section info for the section in which the address resides
++ * bytsiz Size of the data to be read in sizeof() units
++ *
++ * Effect:
++ * Fills the specified buffer with data from the target. Returns TRUE for
++ * success, FALSE for failure.
++ ************************************************************************ */
++ int (*readmem) (struct dynamic_loader_initialize *thisptr,
++ void *bufr,
++ ldr_addr locn,
++ struct ldr_section_info *info, unsigned bytsiz);
++
++ /*************************************************************************
++ * Function writemem
++ *
++ * Parameters:
++ * bufr Pointer to a word-aligned buffer of data
++ * locn Target address of first data element to be written
++ * info Section info for the section in which the address resides
++ * bytsiz Size of the data to be written in sizeof() units
++ *
++ * Effect:
++ * Writes the specified buffer to the target. Returns TRUE for success,
++ * FALSE for failure.
++ ************************************************************************ */
++ int (*writemem) (struct dynamic_loader_initialize *thisptr,
++ void *bufr,
++ ldr_addr locn,
++ struct ldr_section_info *info, unsigned bytsiz);
++
++ /*************************************************************************
++ * Function fillmem
++ *
++ * Parameters:
++ * locn Target address of first data element to be written
++ * info Section info for the section in which the address resides
++ * bytsiz Size of the data to be written in sizeof() units
++ * val Value to be written in each byte
++ * Effect:
++ * Fills the specified area of target memory. Returns TRUE for success,
++ * FALSE for failure.
++ ************************************************************************ */
++ int (*fillmem) (struct dynamic_loader_initialize *thisptr,
++ ldr_addr locn, struct ldr_section_info *info,
++ unsigned bytsiz, unsigned val);
++
++ /*************************************************************************
++ * Function execute
++ *
++ * Parameters:
++ * start Starting address
++ *
++ * Effect:
++ * The target code at the specified starting address is executed.
++ *
++ * Notes:
++ * This function is called at the end of the dynamic load process
++ * if the input module has specified a starting address.
++ ************************************************************************ */
++ int (*execute) (struct dynamic_loader_initialize *thisptr,
++ ldr_addr start);
++
++ /*************************************************************************
++ * Function release
++ *
++ * Parameters:
++ * none
++ *
++ * Effect:
++ * Releases the connection to the load interface.
++ *
++ * Notes:
++ * This function is called at the end of the dynamic load process.
++ ************************************************************************ */
++ void (*release) (struct dynamic_loader_initialize *thisptr);
++
++}; /* class dynamic_loader_initialize */
++
++#endif /* _DYNAMIC_LOADER_H_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/gb.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/gb.h 2010-08-18 11:24:23.190052789 +0300
+@@ -0,0 +1,79 @@
++/*
++ * gb.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Generic bitmap manager.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef GB_
++#define GB_
++
++#define GB_NOBITS (~0)
++#include <dspbridge/host_os.h>
++
++struct gb_t_map;
++
++/*
++ * ======== gb_clear ========
++ * Clear the bit in position bitn in the bitmap map. Bit positions are
++ * zero based.
++ */
++
++extern void gb_clear(struct gb_t_map *map, u32 bitn);
++
++/*
++ * ======== gb_create ========
++ * Create a bit map with len bits. Initially all bits are cleared.
++ */
++
++extern struct gb_t_map *gb_create(u32 len);
++
++/*
++ * ======== gb_delete ========
++ * Delete previously created bit map
++ */
++
++extern void gb_delete(struct gb_t_map *map);
++
++/*
++ * ======== gb_findandset ========
++ * Finds a clear bit, sets it, and returns the position
++ */
++
++extern u32 gb_findandset(struct gb_t_map *map);
++
++/*
++ * ======== gb_minclear ========
++ * gb_minclear returns the minimum clear bit position. If no bit is
++ * clear, gb_minclear returns -1.
++ */
++extern u32 gb_minclear(struct gb_t_map *map);
++
++/*
++ * ======== gb_set ========
++ * Set the bit in position bitn in the bitmap map. Bit positions are
++ * zero based.
++ */
++
++extern void gb_set(struct gb_t_map *map, u32 bitn);
++
++/*
++ * ======== gb_test ========
++ * Returns TRUE if the bit in position bitn is set in map; otherwise
++ * gb_test returns FALSE. Bit positions are zero based.
++ */
++
++extern bool gb_test(struct gb_t_map *map, u32 bitn);
++
++#endif /*GB_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/getsection.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/getsection.h 2010-08-18 11:24:23.190052789 +0300
+@@ -0,0 +1,108 @@
++/*
++ * getsection.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * This file provides an API add-on to the dynamic loader that allows the user
++ * to query section information and extract section data from dynamic load
++ * modules.
++ *
++ * Notes:
++ * Functions in this API assume that the supplied dynamic_loader_stream
++ * object supports the set_file_posn method.
++ *
++ * Copyright (C) 2008 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef _GETSECTION_H_
++#define _GETSECTION_H_
++
++#include "dynamic_loader.h"
++
++/*
++ * Procedure dload_module_open
++ *
++ * Parameters:
++ * module The input stream that supplies the module image
++ * syms Host-side malloc/free and error reporting functions.
++ * Other methods are unused.
++ *
++ * Effect:
++ * Reads header information from a dynamic loader module using the specified
++ * stream object, and returns a handle for the module information. This
++ * handle may be used in subsequent query calls to obtain information
++ * contained in the module.
++ *
++ * Returns:
++ * NULL if an error is encountered, otherwise a module handle for use
++ * in subsequent operations.
++ */
++extern void *dload_module_open(struct dynamic_loader_stream
++ *module, struct dynamic_loader_sym
++ *syms);
++
++/*
++ * Procedure dload_get_section_info
++ *
++ * Parameters:
++ * minfo Handle from dload_module_open for this module
++ * section_name Pointer to the string name of the section desired
++ * section_info Address of a section info structure pointer to be initialized
++ *
++ * Effect:
++ * Finds the specified section in the module information, and fills in
++ * the provided ldr_section_info structure.
++ *
++ * Returns:
++ * TRUE for success, FALSE for section not found
++ */
++extern int dload_get_section_info(void *minfo,
++ const char *section_name,
++ const struct ldr_section_info
++ **const section_info);
++
++/*
++ * Procedure dload_get_section
++ *
++ * Parameters:
++ * minfo Handle from dload_module_open for this module
++ * section_info Pointer to a section info structure for the desired section
++ * section_data Buffer to contain the section initialized data
++ *
++ * Effect:
++ * Copies the initialized data for the specified section into the
++ * supplied buffer.
++ *
++ * Returns:
++ * TRUE for success, FALSE for section not found
++ */
++extern int dload_get_section(void *minfo,
++ const struct ldr_section_info *section_info,
++ void *section_data);
++
++/*
++ * Procedure dload_module_close
++ *
++ * Parameters:
++ * minfo Handle from dload_module_open for this module
++ *
++ * Effect:
++ * Releases any storage associated with the module handle. On return,
++ * the module handle is invalid.
++ *
++ * Returns:
++ * Zero for success. On error, the number of errors detected is returned.
++ * Individual errors are reported using syms->error_report(), where syms was
++ * an argument to dload_module_open
++ */
++extern void dload_module_close(void *minfo);
++
++#endif /* _GETSECTION_H_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/gh.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/gh.h 2010-08-18 11:24:23.190052789 +0300
+@@ -0,0 +1,34 @@
++/*
++ * gh.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef GH_
++#define GH_
++#include <dspbridge/host_os.h>
++
++extern struct gh_t_hash_tab *gh_create(u16 max_bucket, u16 val_size,
++ u16(*hash) (void *, u16),
++ bool(*match) (void *, void *),
++ void (*delete) (void *));
++extern void gh_delete(struct gh_t_hash_tab *hash_tab);
++extern void gh_exit(void);
++extern void *gh_find(struct gh_t_hash_tab *hash_tab, void *key);
++extern void gh_init(void);
++extern void *gh_insert(struct gh_t_hash_tab *hash_tab, void *key, void *value);
++#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
++void gh_iterate(struct gh_t_hash_tab *hash_tab,
++ void (*callback)(void *, void *), void *user_data);
++#endif
++#endif /* GH_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/gs.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/gs.h 2010-08-18 11:24:23.194052669 +0300
+@@ -0,0 +1,59 @@
++/*
++ * gs.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Memory allocation/release wrappers. This module allows clients to
++ * avoid OS spacific issues related to memory allocation. It also provides
++ * simple diagnostic capabilities to assist in the detection of memory
++ * leaks.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef GS_
++#define GS_
++
++/*
++ * ======== gs_alloc ========
++ * Alloc size bytes of space. Returns pointer to space
++ * allocated, otherwise NULL.
++ */
++extern void *gs_alloc(u32 size);
++
++/*
++ * ======== gs_exit ========
++ * Module exit. Do not change to "#define gs_init()"; in
++ * some environments this operation must actually do some work!
++ */
++extern void gs_exit(void);
++
++/*
++ * ======== gs_free ========
++ * Free space allocated by gs_alloc() or GS_calloc().
++ */
++extern void gs_free(void *ptr);
++
++/*
++ * ======== gs_frees ========
++ * Free space allocated by gs_alloc() or GS_calloc() and assert that
++ * the size of the allocation is size bytes.
++ */
++extern void gs_frees(void *ptr, u32 size);
++
++/*
++ * ======== gs_init ========
++ * Module initialization. Do not change to "#define gs_init()"; in
++ * some environments this operation must actually do some work!
++ */
++extern void gs_init(void);
++
++#endif /*GS_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/host_os.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/host_os.h 2010-08-18 11:24:23.194052669 +0300
+@@ -0,0 +1,88 @@
++/*
++ * host_os.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Copyright (C) 2008 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef _HOST_OS_H_
++#define _HOST_OS_H_
++
++#include <asm/system.h>
++#include <asm/atomic.h>
++#include <linux/semaphore.h>
++#include <linux/uaccess.h>
++#include <linux/irq.h>
++#include <linux/io.h>
++#include <linux/syscalls.h>
++#include <linux/version.h>
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/stddef.h>
++#include <linux/types.h>
++#include <linux/interrupt.h>
++#include <linux/spinlock.h>
++#include <linux/sched.h>
++#include <linux/fs.h>
++#include <linux/file.h>
++#include <linux/slab.h>
++#include <linux/delay.h>
++#include <linux/ctype.h>
++#include <linux/mm.h>
++#include <linux/device.h>
++#include <linux/vmalloc.h>
++#include <linux/ioport.h>
++#include <linux/platform_device.h>
++#include <plat/clock.h>
++#include <linux/clk.h>
++#include <plat/mailbox.h>
++#include <linux/pagemap.h>
++#include <asm/cacheflush.h>
++#include <linux/dma-mapping.h>
++
++/* TODO -- Remove, once BP defines them */
++#define INT_DSP_MMU_IRQ 28
++
++struct dspbridge_platform_data {
++ void (*dsp_set_min_opp) (u8 opp_id);
++ u8(*dsp_get_opp) (void);
++ void (*cpu_set_freq) (unsigned long f);
++ unsigned long (*cpu_get_freq) (void);
++ unsigned long mpu_speed[6];
++
++ /* functions to write and read PRCM registers */
++ void (*dsp_prm_write)(u32, s16 , u16);
++ u32 (*dsp_prm_read)(s16 , u16);
++ u32 (*dsp_prm_rmw_bits)(u32, u32, s16, s16);
++ void (*dsp_cm_write)(u32, s16 , u16);
++ u32 (*dsp_cm_read)(s16 , u16);
++ u32 (*dsp_cm_rmw_bits)(u32, u32, s16, s16);
++
++ u32 phys_mempool_base;
++ u32 phys_mempool_size;
++};
++
++#define PRCM_VDD1 1
++
++extern struct platform_device *omap_dspbridge_dev;
++extern struct device *bridge;
++
++#if defined(CONFIG_TIDSPBRIDGE) || defined(CONFIG_TIDSPBRIDGE_MODULE)
++extern void dspbridge_reserve_sdram(void);
++#else
++static inline void dspbridge_reserve_sdram(void)
++{
++}
++#endif
++
++extern unsigned long dspbridge_get_mempool_base(void);
++#endif
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/io.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/io.h 2010-08-18 11:24:23.194052669 +0300
+@@ -0,0 +1,114 @@
++/*
++ * io.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * The io module manages IO between CHNL and msg_ctrl.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef IO_
++#define IO_
++
++#include <dspbridge/cfgdefs.h>
++#include <dspbridge/devdefs.h>
++
++#include <dspbridge/iodefs.h>
++
++/*
++ * ======== io_create ========
++ * Purpose:
++ * Create an IO manager object, responsible for managing IO between
++ * CHNL and msg_ctrl.
++ * Parameters:
++ * channel_mgr: Location to store a channel manager object on
++ * output.
++ * hdev_obj: Handle to a device object.
++ * mgr_attrts: IO manager attributes.
++ * mgr_attrts->birq: I/O IRQ number.
++ * mgr_attrts->irq_shared: TRUE if the IRQ is shareable.
++ * mgr_attrts->word_size: DSP Word size in equivalent PC bytes..
++ * Returns:
++ * 0: Success;
++ * -ENOMEM: Insufficient memory for requested resources.
++ * -EIO: Unable to plug channel ISR for configured IRQ.
++ * -EINVAL: Invalid DSP word size (must be > 0).
++ * Invalid base address for DSP communications.
++ * Requires:
++ * io_init(void) called.
++ * io_man != NULL.
++ * mgr_attrts != NULL.
++ * Ensures:
++ */
++extern int io_create(struct io_mgr **io_man,
++ struct dev_object *hdev_obj,
++ const struct io_attrs *mgr_attrts);
++
++/*
++ * ======== io_destroy ========
++ * Purpose:
++ * Destroy the IO manager.
++ * Parameters:
++ * hio_mgr: IOmanager object.
++ * Returns:
++ * 0: Success.
++ * -EFAULT: hio_mgr was invalid.
++ * Requires:
++ * io_init(void) called.
++ * Ensures:
++ */
++extern int io_destroy(struct io_mgr *hio_mgr);
++
++/*
++ * ======== io_exit ========
++ * Purpose:
++ * Discontinue usage of the IO module.
++ * Parameters:
++ * Returns:
++ * Requires:
++ * io_init(void) previously called.
++ * Ensures:
++ * Resources, if any acquired in io_init(void), are freed when the last
++ * client of IO calls io_exit(void).
++ */
++extern void io_exit(void);
++
++/*
++ * ======== io_init ========
++ * Purpose:
++ * Initialize the IO module's private state.
++ * Parameters:
++ * Returns:
++ * TRUE if initialized; FALSE if error occurred.
++ * Requires:
++ * Ensures:
++ * A requirement for each of the other public CHNL functions.
++ */
++extern bool io_init(void);
++
++/*
++ * ======== io_on_loaded ========
++ * Purpose:
++ * Called when a program is loaded so IO manager can update its
++ * internal state.
++ * Parameters:
++ * hio_mgr: IOmanager object.
++ * Returns:
++ * 0: Success.
++ * -EFAULT: hio_mgr was invalid.
++ * Requires:
++ * io_init(void) called.
++ * Ensures:
++ */
++extern int io_on_loaded(struct io_mgr *hio_mgr);
++
++#endif /* CHNL_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/io_sm.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/io_sm.h 2010-08-18 11:24:23.194052669 +0300
+@@ -0,0 +1,298 @@
++/*
++ * io_sm.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * IO dispatcher for a shared memory channel driver.
++ * Also, includes macros to simulate shm via port io calls.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef IOSM_
++#define IOSM_
++
++#include <dspbridge/_chnl_sm.h>
++#include <dspbridge/host_os.h>
++
++#include <dspbridge/iodefs.h>
++
++#define IO_INPUT 0
++#define IO_OUTPUT 1
++#define IO_SERVICE 2
++#define IO_MAXSERVICE IO_SERVICE
++
++#ifdef CONFIG_TIDSPBRIDGE_DVFS
++/* The maximum number of OPPs that are supported */
++extern s32 dsp_max_opps;
++/* The Vdd1 opp table information */
++extern u32 vdd1_dsp_freq[6][4];
++#endif
++
++/*
++ * ======== io_cancel_chnl ========
++ * Purpose:
++ * Cancel IO on a given channel.
++ * Parameters:
++ * hio_mgr: IO Manager.
++ * chnl: Index of channel to cancel IO on.
++ * Returns:
++ * Requires:
++ * Valid hio_mgr.
++ * Ensures:
++ */
++extern void io_cancel_chnl(struct io_mgr *hio_mgr, u32 chnl);
++
++/*
++ * ======== io_dpc ========
++ * Purpose:
++ * Deferred procedure call for shared memory channel driver ISR. Carries
++ * out the dispatch of I/O.
++ * Parameters:
++ * ref_data: Pointer to reference data registered via a call to
++ * DPC_Create().
++ * Returns:
++ * Requires:
++ * Must not block.
++ * Must not acquire resources.
++ * All data touched must be locked in memory if running in kernel mode.
++ * Ensures:
++ * Non-preemptible (but interruptible).
++ */
++extern void io_dpc(unsigned long ref_data);
++
++/*
++ * ======== io_mbox_msg ========
++ * Purpose:
++ * Main interrupt handler for the shared memory Bridge channel manager.
++ * Calls the Bridge's chnlsm_isr to determine if this interrupt is ours,
++ * then schedules a DPC to dispatch I/O.
++ * Parameters:
++ * ref_data: Pointer to the channel manager object for this board.
++ * Set in an initial call to ISR_Install().
++ * Returns:
++ * TRUE if interrupt handled; FALSE otherwise.
++ * Requires:
++ * Must be in locked memory if executing in kernel mode.
++ * Must only call functions which are in locked memory if Kernel mode.
++ * Must only call asynchronous services.
++ * Interrupts are disabled and EOI for this interrupt has been sent.
++ * Ensures:
++ */
++void io_mbox_msg(u32 msg);
++
++/*
++ * ======== io_request_chnl ========
++ * Purpose:
++ * Request I/O from the DSP. Sets flags in shared memory, then interrupts
++ * the DSP.
++ * Parameters:
++ * hio_mgr: IO manager handle.
++ * pchnl: Ptr to the channel requesting I/O.
++ * io_mode: Mode of channel: {IO_INPUT | IO_OUTPUT}.
++ * Returns:
++ * Requires:
++ * pchnl != NULL
++ * Ensures:
++ */
++extern void io_request_chnl(struct io_mgr *io_manager,
++ struct chnl_object *pchnl,
++ u8 io_mode, u16 *mbx_val);
++
++/*
++ * ======== iosm_schedule ========
++ * Purpose:
++ * Schedule DPC for IO.
++ * Parameters:
++ * pio_mgr: Ptr to a I/O manager.
++ * Returns:
++ * Requires:
++ * pchnl != NULL
++ * Ensures:
++ */
++extern void iosm_schedule(struct io_mgr *io_manager);
++
++/*
++ * DSP-DMA IO functions
++ */
++
++/*
++ * ======== io_ddma_init_chnl_desc ========
++ * Purpose:
++ * Initialize DSP DMA channel descriptor.
++ * Parameters:
++ * hio_mgr: Handle to a I/O manager.
++ * ddma_chnl_id: DDMA channel identifier.
++ * num_desc: Number of buffer descriptors(equals # of IOReqs &
++ * Chirps)
++ * dsp: Dsp address;
++ * Returns:
++ * Requires:
++ * ddma_chnl_id < DDMA_MAXDDMACHNLS
++ * num_desc > 0
++ * pVa != NULL
++ * pDspPa != NULL
++ *
++ * Ensures:
++ */
++extern void io_ddma_init_chnl_desc(struct io_mgr *hio_mgr, u32 ddma_chnl_id,
++ u32 num_desc, void *dsp);
++
++/*
++ * ======== io_ddma_clear_chnl_desc ========
++ * Purpose:
++ * Clear DSP DMA channel descriptor.
++ * Parameters:
++ * hio_mgr: Handle to a I/O manager.
++ * ddma_chnl_id: DDMA channel identifier.
++ * Returns:
++ * Requires:
++ * ddma_chnl_id < DDMA_MAXDDMACHNLS
++ * Ensures:
++ */
++extern void io_ddma_clear_chnl_desc(struct io_mgr *hio_mgr, u32 ddma_chnl_id);
++
++/*
++ * ======== io_ddma_request_chnl ========
++ * Purpose:
++ * Request channel DSP-DMA from the DSP. Sets up SM descriptors and
++ * control fields in shared memory.
++ * Parameters:
++ * hio_mgr: Handle to a I/O manager.
++ * pchnl: Ptr to channel object
++ * chnl_packet_obj: Ptr to channel i/o request packet.
++ * Returns:
++ * Requires:
++ * pchnl != NULL
++ * pchnl->cio_reqs > 0
++ * chnl_packet_obj != NULL
++ * Ensures:
++ */
++extern void io_ddma_request_chnl(struct io_mgr *hio_mgr,
++ struct chnl_object *pchnl,
++ struct chnl_irp *chnl_packet_obj,
++ u16 *mbx_val);
++
++/*
++ * Zero-copy IO functions
++ */
++
++/*
++ * ======== io_ddzc_init_chnl_desc ========
++ * Purpose:
++ * Initialize ZCPY channel descriptor.
++ * Parameters:
++ * hio_mgr: Handle to a I/O manager.
++ * zid: zero-copy channel identifier.
++ * Returns:
++ * Requires:
++ * ddma_chnl_id < DDMA_MAXZCPYCHNLS
++ * hio_mgr != Null
++ * Ensures:
++ */
++extern void io_ddzc_init_chnl_desc(struct io_mgr *hio_mgr, u32 zid);
++
++/*
++ * ======== io_ddzc_clear_chnl_desc ========
++ * Purpose:
++ * Clear DSP ZC channel descriptor.
++ * Parameters:
++ * hio_mgr: Handle to a I/O manager.
++ * ch_id: ZC channel identifier.
++ * Returns:
++ * Requires:
++ * hio_mgr is valid
++ * ch_id < DDMA_MAXZCPYCHNLS
++ * Ensures:
++ */
++extern void io_ddzc_clear_chnl_desc(struct io_mgr *hio_mgr, u32 ch_id);
++
++/*
++ * ======== io_ddzc_request_chnl ========
++ * Purpose:
++ * Request zero-copy channel transfer. Sets up SM descriptors and
++ * control fields in shared memory.
++ * Parameters:
++ * hio_mgr: Handle to a I/O manager.
++ * pchnl: Ptr to channel object
++ * chnl_packet_obj: Ptr to channel i/o request packet.
++ * Returns:
++ * Requires:
++ * pchnl != NULL
++ * pchnl->cio_reqs > 0
++ * chnl_packet_obj != NULL
++ * Ensures:
++ */
++extern void io_ddzc_request_chnl(struct io_mgr *hio_mgr,
++ struct chnl_object *pchnl,
++ struct chnl_irp *chnl_packet_obj,
++ u16 *mbx_val);
++
++/*
++ * ======== io_sh_msetting ========
++ * Purpose:
++ * Sets the shared memory setting
++ * Parameters:
++ * hio_mgr: Handle to a I/O manager.
++ * desc: Shared memory type
++ * pargs: Ptr to shm setting
++ * Returns:
++ * Requires:
++ * hio_mgr != NULL
++ * pargs != NULL
++ * Ensures:
++ */
++extern int io_sh_msetting(struct io_mgr *hio_mgr, u8 desc, void *pargs);
++
++/*
++ * Misc functions for the CHNL_IO shared memory library:
++ */
++
++/* Maximum channel bufsize that can be used. */
++extern u32 io_buf_size(struct io_mgr *hio_mgr);
++
++extern u32 io_read_value(struct bridge_dev_context *dev_ctxt, u32 dsp_addr);
++
++extern void io_write_value(struct bridge_dev_context *dev_ctxt,
++ u32 dsp_addr, u32 value);
++
++extern u32 io_read_value_long(struct bridge_dev_context *dev_ctxt,
++ u32 dsp_addr);
++
++extern void io_write_value_long(struct bridge_dev_context *dev_ctxt,
++ u32 dsp_addr, u32 value);
++
++extern void io_or_set_value(struct bridge_dev_context *dev_ctxt,
++ u32 dsp_addr, u32 value);
++
++extern void io_and_set_value(struct bridge_dev_context *dev_ctxt,
++ u32 dsp_addr, u32 value);
++
++extern void io_sm_init(void);
++
++#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
++/*
++ * ========print_dsp_trace_buffer ========
++ * Print DSP tracebuffer.
++ */
++extern int print_dsp_trace_buffer(struct bridge_dev_context
++ *hbridge_context);
++
++int dump_dsp_stack(struct bridge_dev_context *bridge_context);
++
++void dump_dl_modules(struct bridge_dev_context *bridge_context);
++
++#endif
++#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
++void print_dsp_debug_trace(struct io_mgr *hio_mgr);
++#endif
++
++#endif /* IOSM_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/iodefs.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/iodefs.h 2010-08-18 11:24:23.194052669 +0300
+@@ -0,0 +1,36 @@
++/*
++ * iodefs.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * System-wide channel objects and constants.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef IODEFS_
++#define IODEFS_
++
++#define IO_MAXIRQ 0xff /* Arbitrarily large number. */
++
++/* IO Objects: */
++struct io_mgr;
++
++/* IO manager attributes: */
++struct io_attrs {
++ u8 birq; /* Channel's I/O IRQ number. */
++ bool irq_shared; /* TRUE if the IRQ is shareable. */
++ u32 word_size; /* DSP Word size. */
++ u32 shm_base; /* Physical base address of shared memory. */
++ u32 usm_length; /* Size (in bytes) of shared memory. */
++};
++
++#endif /* IODEFS_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/ldr.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/ldr.h 2010-08-18 11:24:23.194052669 +0300
+@@ -0,0 +1,29 @@
++/*
++ * ldr.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Provide module loading services and symbol export services.
++ *
++ * Notes:
++ * This service is meant to be used by modules of the DSP/BIOS Bridge
++ * driver.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef LDR_
++#define LDR_
++
++/* Loader objects: */
++struct ldr_module;
++
++#endif /* LDR_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/list.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/list.h 2010-08-18 11:24:23.194052669 +0300
+@@ -0,0 +1,225 @@
++/*
++ * list.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Declarations of list management control structures and definitions
++ * of inline list management functions.
++ *
++ * Copyright (C) 2008 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef LIST_
++#define LIST_
++
++#include <dspbridge/host_os.h>
++#include <linux/list.h>
++
++#define LST_IS_EMPTY(l) list_empty(&(l)->head)
++
++struct lst_list {
++ struct list_head head;
++};
++
++/*
++ * ======== lst_first ========
++ * Purpose:
++ * Returns a pointer to the first element of the list, or NULL if the list
++ * is empty.
++ * Parameters:
++ * lst: Pointer to list control structure.
++ * Returns:
++ * Pointer to first list element, or NULL.
++ * Requires:
++ * - LST initialized.
++ * - lst != NULL.
++ * Ensures:
++ */
++static inline struct list_head *lst_first(struct lst_list *lst)
++{
++ if (lst && !list_empty(&lst->head))
++ return lst->head.next;
++ return NULL;
++}
++
++/*
++ * ======== lst_get_head ========
++ * Purpose:
++ * Pops the head off the list and returns a pointer to it.
++ * Details:
++ * If the list is empty, returns NULL.
++ * Else, removes the element at the head of the list, making the next
++ * element the head of the list.
++ * The head is removed by making the tail element of the list point its
++ * "next" pointer at the next element after the head, and by making the
++ * "prev" pointer of the next element after the head point at the tail
++ * element. So the next element after the head becomes the new head of
++ * the list.
++ * Parameters:
++ * lst: Pointer to list control structure of list whose head
++ * element is to be removed
++ * Returns:
++ * Pointer to element that was at the head of the list (success)
++ * NULL No elements in list
++ * Requires:
++ * - LST initialized.
++ * - lst != NULL.
++ * Ensures:
++ * Notes:
++ * Because the tail of the list points forward (its "next" pointer) to
++ * the head of the list, and the head of the list points backward (its
++ * "prev" pointer) to the tail of the list, this list is circular.
++ */
++static inline struct list_head *lst_get_head(struct lst_list *lst)
++{
++ struct list_head *elem_list;
++
++ if (!lst || list_empty(&lst->head))
++ return NULL;
++
++ elem_list = lst->head.next;
++ lst->head.next = elem_list->next;
++ elem_list->next->prev = &lst->head;
++
++ return elem_list;
++}
++
++/*
++ * ======== lst_init_elem ========
++ * Purpose:
++ * Initializes a list element to default (cleared) values
++ * Details:
++ * Parameters:
++ * elem_list: Pointer to list element to be reset
++ * Returns:
++ * Requires:
++ * LST initialized.
++ * Ensures:
++ * Notes:
++ * This function must not be called to "reset" an element in the middle
++ * of a list chain -- that would break the chain.
++ *
++ */
++static inline void lst_init_elem(struct list_head *elem_list)
++{
++ if (elem_list) {
++ elem_list->next = NULL;
++ elem_list->prev = NULL;
++ }
++}
++
++/*
++ * ======== lst_insert_before ========
++ * Purpose:
++ * Insert the element before the existing element.
++ * Parameters:
++ * lst: Pointer to list control structure.
++ * elem_list: Pointer to element in list to insert.
++ * elem_existing: Pointer to existing list element.
++ * Returns:
++ * Requires:
++ * - LST initialized.
++ * - lst != NULL.
++ * - elem_list != NULL.
++ * - elem_existing != NULL.
++ * Ensures:
++ */
++static inline void lst_insert_before(struct lst_list *lst,
++ struct list_head *elem_list,
++ struct list_head *elem_existing)
++{
++ if (lst && elem_list && elem_existing)
++ list_add_tail(elem_list, elem_existing);
++}
++
++/*
++ * ======== lst_next ========
++ * Purpose:
++ * Returns a pointer to the next element of the list, or NULL if the next
++ * element is the head of the list or the list is empty.
++ * Parameters:
++ * lst: Pointer to list control structure.
++ * cur_elem: Pointer to element in list to remove.
++ * Returns:
++ * Pointer to list element, or NULL.
++ * Requires:
++ * - LST initialized.
++ * - lst != NULL.
++ * - cur_elem != NULL.
++ * Ensures:
++ */
++static inline struct list_head *lst_next(struct lst_list *lst,
++ struct list_head *cur_elem)
++{
++ if (lst && !list_empty(&lst->head) && cur_elem &&
++ (cur_elem->next != &lst->head))
++ return cur_elem->next;
++ return NULL;
++}
++
++/*
++ * ======== lst_put_tail ========
++ * Purpose:
++ * Adds the specified element to the tail of the list
++ * Details:
++ * Sets new element's "prev" pointer to the address previously held by
++ * the head element's prev pointer. This is the previous tail member of
++ * the list.
++ * Sets the new head's prev pointer to the address of the element.
++ * Sets next pointer of the previous tail member of the list to point to
++ * the new element (rather than the head, which it had been pointing at).
++ * Sets new element's next pointer to the address of the head element.
++ * Sets head's prev pointer to the address of the new element.
++ * Parameters:
++ * lst: Pointer to list control structure to which *elem_list will be
++ * added
++ * elem_list: Pointer to list element to be added
++ * Returns:
++ * Void
++ * Requires:
++ * *elem_list and *lst must both exist.
++ * LST initialized.
++ * Ensures:
++ * Notes:
++ * Because the tail is always "just before" the head of the list (the
++ * tail's "next" pointer points at the head of the list, and the head's
++ * "prev" pointer points at the tail of the list), the list is circular.
++ */
++static inline void lst_put_tail(struct lst_list *lst,
++ struct list_head *elem_list)
++{
++ if (lst && elem_list)
++ list_add_tail(elem_list, &lst->head);
++}
++
++/*
++ * ======== lst_remove_elem ========
++ * Purpose:
++ * Removes (unlinks) the given element from the list, if the list is not
++ * empty. Does not free the list element.
++ * Parameters:
++ * lst: Pointer to list control structure.
++ * cur_elem: Pointer to element in list to remove.
++ * Returns:
++ * Requires:
++ * - LST initialized.
++ * - lst != NULL.
++ * - cur_elem != NULL.
++ * Ensures:
++ */
++static inline void lst_remove_elem(struct lst_list *lst,
++ struct list_head *cur_elem)
++{
++ if (lst && !list_empty(&lst->head) && cur_elem)
++ list_del_init(cur_elem);
++}
++
++#endif /* LIST_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/mbx_sh.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/mbx_sh.h 2010-08-18 11:24:23.194052669 +0300
+@@ -0,0 +1,184 @@
++/*
++ * mbx_sh.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Definitions for shared mailbox cmd/data values.(used on both
++ * the GPP and DSP sides).
++ *
++ * Copyright (C) 2008 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++/*
++ * Bridge usage of OMAP mailbox 1 is determined by the "class" of the
++ * mailbox interrupt's cmd value received. The class value are defined
++ * as a bit (10 thru 15) being set.
++ *
++ * Note: Only 16 bits of each is used. Other 16 bit data reg available.
++ *
++ * 16 bit Mbx bit defns:
++ *
++ * A). Exception/Error handling (Module DEH) : class = 0.
++ *
++ * 15 10 0
++ * ---------------------------------
++ * |0|0|0|0|0|0|x|x|x|x|x|x|x|x|x|x|
++ * ---------------------------------
++ * | (class) | (module specific) |
++ *
++ *
++ * B: DSP-DMA link driver channels (DDMA) : class = 1.
++ *
++ * 15 10 0
++ * ---------------------------------
++ * |0|0|0|0|0|1|b|b|b|b|b|c|c|c|c|c|
++ * ---------------------------------
++ * | (class) | (module specific) |
++ *
++ * where b -> buffer index (32 DDMA buffers/chnl max)
++ * c -> channel Id (32 DDMA chnls max)
++ *
++ *
++ * C: Proc-copy link driver channels (PCPY) : class = 2.
++ *
++ * 15 10 0
++ * ---------------------------------
++ * |0|0|0|0|1|0|x|x|x|x|x|x|x|x|x|x|
++ * ---------------------------------
++ * | (class) | (module specific) |
++ *
++ *
++ * D: Zero-copy link driver channels (DDZC) : class = 4.
++ *
++ * 15 10 0
++ * ---------------------------------
++ * |0|0|0|1|0|0|x|x|x|x|x|c|c|c|c|c|
++ * ---------------------------------
++ * | (class) | (module specific) |
++ *
++ * where x -> not used
++ * c -> channel Id (32 ZCPY chnls max)
++ *
++ *
++ * E: Power management : class = 8.
++ *
++ * 15 10 0
++ * ---------------------------------
++ * |0|0|1|0|0|0|x|x|x|x|x|c|c|c|c|c|
++
++ * 0010 00xx xxxc cccc
++ * 0010 00nn pppp qqqq
++ * nn:
++ * 00 = reserved
++ * 01 = pwr state change
++ * 10 = opp pre-change
++ * 11 = opp post-change
++ *
++ * if nn = pwr state change:
++ * pppp = don't care
++ * qqqq:
++ * 0010 = hibernate
++ * 0010 0001 0000 0010
++ * 0110 = retention
++ * 0010 0001 0000 0110
++ * others reserved
++ *
++ * if nn = opp pre-change:
++ * pppp = current opp
++ * qqqq = next opp
++ *
++ * if nn = opp post-change:
++ * pppp = prev opp
++ * qqqq = current opp
++ *
++ * ---------------------------------
++ * | (class) | (module specific) |
++ *
++ * where x -> not used
++ * c -> Power management command
++ *
++ */
++
++#ifndef _MBX_SH_H
++#define _MBX_SH_H
++
++#define MBX_CLASS_MSK 0xFC00 /* Class bits are 10 thru 15 */
++#define MBX_VALUE_MSK 0x03FF /* Value is 0 thru 9 */
++
++#define MBX_DEH_CLASS 0x0000 /* DEH owns Mbx INTR */
++#define MBX_DDMA_CLASS 0x0400 /* DSP-DMA link drvr chnls owns INTR */
++#define MBX_PCPY_CLASS 0x0800 /* PROC-COPY " */
++#define MBX_ZCPY_CLASS 0x1000 /* ZERO-COPY " */
++#define MBX_PM_CLASS 0x2000 /* Power Management */
++#define MBX_DBG_CLASS 0x4000 /* For debugging purpose */
++
++/*
++ * Exception Handler codes
++ * Magic code used to determine if DSP signaled exception.
++ */
++#define MBX_DEH_BASE 0x0
++#define MBX_DEH_USERS_BASE 0x100 /* 256 */
++#define MBX_DEH_LIMIT 0x3FF /* 1023 */
++#define MBX_DEH_RESET 0x101 /* DSP RESET (DEH) */
++#define MBX_DEH_EMMU 0X103 /*DSP MMU FAULT RECOVERY */
++
++/*
++ * Link driver command/status codes.
++ */
++/* DSP-DMA */
++#define MBX_DDMA_NUMCHNLBITS 5 /* # chnl Id: # bits available */
++#define MBX_DDMA_CHNLSHIFT 0 /* # of bits to shift */
++#define MBX_DDMA_CHNLMSK 0x01F /* bits 0 thru 4 */
++
++#define MBX_DDMA_NUMBUFBITS 5 /* buffer index: # of bits avail */
++#define MBX_DDMA_BUFSHIFT (MBX_DDMA_NUMCHNLBITS + MBX_DDMA_CHNLSHIFT)
++#define MBX_DDMA_BUFMSK 0x3E0 /* bits 5 thru 9 */
++
++/* Zero-Copy */
++#define MBX_ZCPY_NUMCHNLBITS 5 /* # chnl Id: # bits available */
++#define MBX_ZCPY_CHNLSHIFT 0 /* # of bits to shift */
++#define MBX_ZCPY_CHNLMSK 0x01F /* bits 0 thru 4 */
++
++/* Power Management Commands */
++#define MBX_PM_DSPIDLE (MBX_PM_CLASS + 0x0)
++#define MBX_PM_DSPWAKEUP (MBX_PM_CLASS + 0x1)
++#define MBX_PM_EMERGENCYSLEEP (MBX_PM_CLASS + 0x2)
++#define MBX_PM_SLEEPUNTILRESTART (MBX_PM_CLASS + 0x3)
++#define MBX_PM_DSPGLOBALIDLE_OFF (MBX_PM_CLASS + 0x4)
++#define MBX_PM_DSPGLOBALIDLE_ON (MBX_PM_CLASS + 0x5)
++#define MBX_PM_SETPOINT_PRENOTIFY (MBX_PM_CLASS + 0x6)
++#define MBX_PM_SETPOINT_POSTNOTIFY (MBX_PM_CLASS + 0x7)
++#define MBX_PM_DSPRETN (MBX_PM_CLASS + 0x8)
++#define MBX_PM_DSPRETENTION (MBX_PM_CLASS + 0x8)
++#define MBX_PM_DSPHIBERNATE (MBX_PM_CLASS + 0x9)
++#define MBX_PM_HIBERNATE_EN (MBX_PM_CLASS + 0xA)
++#define MBX_PM_OPP_REQ (MBX_PM_CLASS + 0xB)
++#define MBX_PM_OPP_CHG (MBX_PM_CLASS + 0xC)
++
++#define MBX_PM_TYPE_MASK 0x0300
++#define MBX_PM_TYPE_PWR_CHNG 0x0100
++#define MBX_PM_TYPE_OPP_PRECHNG 0x0200
++#define MBX_PM_TYPE_OPP_POSTCHNG 0x0300
++#define MBX_PM_TYPE_OPP_MASK 0x0300
++#define MBX_PM_OPP_PRECHNG (MBX_PM_CLASS | MBX_PM_TYPE_OPP_PRECHNG)
++/* DSP to MPU */
++#define MBX_PM_OPP_CHNG(OPP) (MBX_PM_CLASS | MBX_PM_TYPE_OPP_PRECHNG | (OPP))
++#define MBX_PM_RET (MBX_PM_CLASS | MBX_PM_TYPE_PWR_CHNG | 0x0006)
++#define MBX_PM_HIB (MBX_PM_CLASS | MBX_PM_TYPE_PWR_CHNG | 0x0002)
++#define MBX_PM_OPP1 0
++#define MBX_PM_OPP2 1
++#define MBX_PM_OPP3 2
++#define MBX_PM_OPP4 3
++
++/* Bridge Debug Commands */
++#define MBX_DBG_SYSPRINTF (MBX_DBG_CLASS + 0x0)
++
++#endif /* _MBX_SH_H */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/memdefs.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/memdefs.h 2010-08-18 11:24:23.194052669 +0300
+@@ -0,0 +1,30 @@
++/*
++ * memdefs.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Global MEM constants and types, shared between Bridge driver and DSP API.
++ *
++ * Copyright (C) 2008 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef MEMDEFS_
++#define MEMDEFS_
++
++/*
++ * MEM_VIRTUALSEGID is used by Node & Strm to access virtual address space in
++ * the correct client process context.
++ */
++#define MEM_SETVIRTUALSEGID 0x10000000
++#define MEM_GETVIRTUALSEGID 0x20000000
++#define MEM_MASKVIRTUALSEGID (MEM_SETVIRTUALSEGID | MEM_GETVIRTUALSEGID)
++
++#endif /* MEMDEFS_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/mgr.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/mgr.h 2010-08-18 11:24:23.194052669 +0300
+@@ -0,0 +1,205 @@
++/*
++ * mgr.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * This is the DSP API RM module interface.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef MGR_
++#define MGR_
++
++#include <dspbridge/mgrpriv.h>
++
++#define MAX_EVENTS 32
++
++/*
++ * ======== mgr_wait_for_bridge_events ========
++ * Purpose:
++ * Block on any Bridge event(s)
++ * Parameters:
++ * anotifications : array of pointers to notification objects.
++ * count : number of elements in above array
++ * pu_index : index of signaled event object
++ * utimeout : timeout interval in milliseocnds
++ * Returns:
++ * 0 : Success.
++ * -ETIME : Wait timed out. *pu_index is undetermined.
++ * Details:
++ */
++
++int mgr_wait_for_bridge_events(struct dsp_notification
++ **anotifications,
++ u32 count, u32 *pu_index,
++ u32 utimeout);
++
++/*
++ * ======== mgr_create ========
++ * Purpose:
++ * Creates the Manager Object. This is done during the driver loading.
++ * There is only one Manager Object in the DSP/BIOS Bridge.
++ * Parameters:
++ * mgr_obj: Location to store created MGR Object handle.
++ * dev_node_obj: Device object as known to the system.
++ * Returns:
++ * 0: Success
++ * -ENOMEM: Failed to Create the Object
++ * -EPERM: General Failure
++ * Requires:
++ * MGR Initialized (refs > 0 )
++ * mgr_obj != NULL.
++ * Ensures:
++ * 0: *mgr_obj is a valid MGR interface to the device.
++ * MGR Object stores the DCD Manager Handle.
++ * MGR Object stored in the Regsitry.
++ * !0: MGR Object not created
++ * Details:
++ * DCD Dll is loaded and MGR Object stores the handle of the DLL.
++ */
++extern int mgr_create(struct mgr_object **mgr_obj,
++ struct cfg_devnode *dev_node_obj);
++
++/*
++ * ======== mgr_destroy ========
++ * Purpose:
++ * Destroys the MGR object. Called upon driver unloading.
++ * Parameters:
++ * hmgr_obj: Handle to Manager object .
++ * Returns:
++ * 0: Success.
++ * DCD Manager freed; MGR Object destroyed;
++ * MGR Object deleted from the Registry.
++ * -EPERM: Failed to destroy MGR Object
++ * Requires:
++ * MGR Initialized (refs > 0 )
++ * hmgr_obj is a valid MGR handle .
++ * Ensures:
++ * 0: MGR Object destroyed and hmgr_obj is Invalid MGR
++ * Handle.
++ */
++extern int mgr_destroy(struct mgr_object *hmgr_obj);
++
++/*
++ * ======== mgr_enum_node_info ========
++ * Purpose:
++ * Enumerate and get configuration information about nodes configured
++ * in the node database.
++ * Parameters:
++ * node_id: The node index (base 0).
++ * pndb_props: Ptr to the dsp_ndbprops structure for output.
++ * undb_props_size: Size of the dsp_ndbprops structure.
++ * pu_num_nodes: Location where the number of nodes configured
++ * in the database will be returned.
++ * Returns:
++ * 0: Success.
++ * -EINVAL: Parameter node_id is > than the number of nodes.
++ * configutred in the system
++ * -EIDRM: During Enumeration there has been a change in
++ * the number of nodes configured or in the
++ * the properties of the enumerated nodes.
++ * -EPERM: Failed to querry the Node Data Base
++ * Requires:
++ * pNDBPROPS is not null
++ * undb_props_size >= sizeof(dsp_ndbprops)
++ * pu_num_nodes is not null
++ * MGR Initialized (refs > 0 )
++ * Ensures:
++ * SUCCESS on successful retreival of data and *pu_num_nodes > 0 OR
++ * DSP_FAILED && *pu_num_nodes == 0.
++ * Details:
++ */
++extern int mgr_enum_node_info(u32 node_id,
++ struct dsp_ndbprops *pndb_props,
++ u32 undb_props_size,
++ u32 *pu_num_nodes);
++
++/*
++ * ======== mgr_enum_processor_info ========
++ * Purpose:
++ * Enumerate and get configuration information about available DSP
++ * processors
++ * Parameters:
++ * processor_id: The processor index (zero-based).
++ * processor_info: Ptr to the dsp_processorinfo structure .
++ * processor_info_size: Size of dsp_processorinfo structure.
++ * pu_num_procs: Location where the number of DSPs configured
++ * in the database will be returned
++ * Returns:
++ * 0: Success.
++ * -EINVAL: Parameter processor_id is > than the number of
++ * DSP Processors in the system.
++ * -EPERM: Failed to querry the Node Data Base
++ * Requires:
++ * processor_info is not null
++ * pu_num_procs is not null
++ * processor_info_size >= sizeof(dsp_processorinfo)
++ * MGR Initialized (refs > 0 )
++ * Ensures:
++ * SUCCESS on successful retreival of data and *pu_num_procs > 0 OR
++ * DSP_FAILED && *pu_num_procs == 0.
++ * Details:
++ */
++extern int mgr_enum_processor_info(u32 processor_id,
++ struct dsp_processorinfo
++ *processor_info,
++ u32 processor_info_size,
++ u8 *pu_num_procs);
++/*
++ * ======== mgr_exit ========
++ * Purpose:
++ * Decrement reference count, and free resources when reference count is
++ * 0.
++ * Parameters:
++ * Returns:
++ * Requires:
++ * MGR is initialized.
++ * Ensures:
++ * When reference count == 0, MGR's private resources are freed.
++ */
++extern void mgr_exit(void);
++
++/*
++ * ======== mgr_get_dcd_handle ========
++ * Purpose:
++ * Retrieves the MGR handle. Accessor Function
++ * Parameters:
++ * mgr_handle: Handle to the Manager Object
++ * dcd_handle: Ptr to receive the DCD Handle.
++ * Returns:
++ * 0: Sucess
++ * -EPERM: Failure to get the Handle
++ * Requires:
++ * MGR is initialized.
++ * dcd_handle != NULL
++ * Ensures:
++ * 0 and *dcd_handle != NULL ||
++ * -EPERM and *dcd_handle == NULL
++ */
++extern int mgr_get_dcd_handle(struct mgr_object
++ *mgr_handle, u32 *dcd_handle);
++
++/*
++ * ======== mgr_init ========
++ * Purpose:
++ * Initialize MGR's private state, keeping a reference count on each
++ * call. Intializes the DCD.
++ * Parameters:
++ * Returns:
++ * TRUE if initialized; FALSE if error occured.
++ * Requires:
++ * Ensures:
++ * TRUE: A requirement for the other public MGR functions.
++ */
++extern bool mgr_init(void);
++
++#endif /* MGR_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/mgrpriv.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/mgrpriv.h 2010-08-18 11:24:23.198052688 +0300
+@@ -0,0 +1,45 @@
++/*
++ * mgrpriv.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Global MGR constants and types, shared by PROC, MGR, and DSP API.
++ *
++ * Copyright (C) 2008 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef MGRPRIV_
++#define MGRPRIV_
++
++/*
++ * OMAP1510 specific
++ */
++#define MGR_MAXTLBENTRIES 32
++
++/* RM MGR Object */
++struct mgr_object;
++
++struct mgr_tlbentry {
++ u32 ul_dsp_virt; /* DSP virtual address */
++ u32 ul_gpp_phys; /* GPP physical address */
++};
++
++/*
++ * The DSP_PROCESSOREXTINFO structure describes additional extended
++ * capabilities of a DSP processor not exposed to user.
++ */
++struct mgr_processorextinfo {
++ struct dsp_processorinfo ty_basic; /* user processor info */
++ /* private dsp mmu entries */
++ struct mgr_tlbentry ty_tlb[MGR_MAXTLBENTRIES];
++};
++
++#endif /* MGRPRIV_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/msg.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/msg.h 2010-08-18 11:24:23.198052688 +0300
+@@ -0,0 +1,86 @@
++/*
++ * msg.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * DSP/BIOS Bridge msg_ctrl Module.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef MSG_
++#define MSG_
++
++#include <dspbridge/devdefs.h>
++#include <dspbridge/msgdefs.h>
++
++/*
++ * ======== msg_create ========
++ * Purpose:
++ * Create an object to manage message queues. Only one of these objects
++ * can exist per device object. The msg_ctrl manager must be created before
++ * the IO Manager.
++ * Parameters:
++ * msg_man: Location to store msg_ctrl manager handle on output.
++ * hdev_obj: The device object.
++ * msg_callback: Called whenever an RMS_EXIT message is received.
++ * Returns:
++ * Requires:
++ * msg_mod_init(void) called.
++ * msg_man != NULL.
++ * hdev_obj != NULL.
++ * msg_callback != NULL.
++ * Ensures:
++ */
++extern int msg_create(struct msg_mgr **msg_man,
++ struct dev_object *hdev_obj,
++ msg_onexit msg_callback);
++
++/*
++ * ======== msg_delete ========
++ * Purpose:
++ * Delete a msg_ctrl manager allocated in msg_create().
++ * Parameters:
++ * hmsg_mgr: Handle returned from msg_create().
++ * Returns:
++ * Requires:
++ * msg_mod_init(void) called.
++ * Valid hmsg_mgr.
++ * Ensures:
++ */
++extern void msg_delete(struct msg_mgr *hmsg_mgr);
++
++/*
++ * ======== msg_exit ========
++ * Purpose:
++ * Discontinue usage of msg_ctrl module.
++ * Parameters:
++ * Returns:
++ * Requires:
++ * msg_mod_init(void) successfully called before.
++ * Ensures:
++ * Any resources acquired in msg_mod_init(void) will be freed when last
++ * msg_ctrl client calls msg_exit(void).
++ */
++extern void msg_exit(void);
++
++/*
++ * ======== msg_mod_init ========
++ * Purpose:
++ * Initialize the msg_ctrl module.
++ * Parameters:
++ * Returns:
++ * TRUE if initialization succeeded, FALSE otherwise.
++ * Ensures:
++ */
++extern bool msg_mod_init(void);
++
++#endif /* MSG_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/msgdefs.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/msgdefs.h 2010-08-18 11:24:23.198052688 +0300
+@@ -0,0 +1,29 @@
++/*
++ * msgdefs.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Global msg_ctrl constants and types.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef MSGDEFS_
++#define MSGDEFS_
++
++/* msg_ctrl Objects: */
++struct msg_mgr;
++struct msg_queue;
++
++/* Function prototype for callback to be called on RMS_EXIT message received */
++typedef void (*msg_onexit) (void *h, s32 node_status);
++
++#endif /* MSGDEFS_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/nldr.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/nldr.h 2010-08-18 11:24:23.198052688 +0300
+@@ -0,0 +1,57 @@
++/*
++ * nldr.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * DSP/BIOS Bridge dynamic loader interface.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#include <dspbridge/dbdefs.h>
++#include <dspbridge/dbdcddef.h>
++#include <dspbridge/dev.h>
++#include <dspbridge/rmm.h>
++#include <dspbridge/nldrdefs.h>
++
++#ifndef NLDR_
++#define NLDR_
++
++extern int nldr_allocate(struct nldr_object *nldr_obj,
++ void *priv_ref, const struct dcd_nodeprops
++ *node_props,
++ struct nldr_nodeobject **nldr_nodeobj,
++ bool *pf_phase_split);
++
++extern int nldr_create(struct nldr_object **nldr,
++ struct dev_object *hdev_obj,
++ const struct nldr_attrs *pattrs);
++
++extern void nldr_delete(struct nldr_object *nldr_obj);
++extern void nldr_exit(void);
++
++extern int nldr_get_fxn_addr(struct nldr_nodeobject *nldr_node_obj,
++ char *str_fxn, u32 * addr);
++
++extern int nldr_get_rmm_manager(struct nldr_object *nldr,
++ struct rmm_target_obj **rmm_mgr);
++
++extern bool nldr_init(void);
++extern int nldr_load(struct nldr_nodeobject *nldr_node_obj,
++ enum nldr_phase phase);
++extern int nldr_unload(struct nldr_nodeobject *nldr_node_obj,
++ enum nldr_phase phase);
++#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
++int nldr_find_addr(struct nldr_nodeobject *nldr_node, u32 sym_addr,
++ u32 offset_range, void *offset_output, char *sym_name);
++#endif
++
++#endif /* NLDR_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/nldrdefs.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/nldrdefs.h 2010-08-18 11:24:23.198052688 +0300
+@@ -0,0 +1,293 @@
++/*
++ * nldrdefs.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Global Dynamic + static/overlay Node loader (NLDR) constants and types.
++ *
++ * Copyright (C) 2008 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef NLDRDEFS_
++#define NLDRDEFS_
++
++#include <dspbridge/dbdcddef.h>
++#include <dspbridge/devdefs.h>
++
++#define NLDR_MAXPATHLENGTH 255
++/* NLDR Objects: */
++struct nldr_object;
++struct nldr_nodeobject;
++
++/*
++ * ======== nldr_loadtype ========
++ * Load types for a node. Must match values in node.h55.
++ */
++enum nldr_loadtype {
++ NLDR_STATICLOAD, /* Linked in base image, not overlay */
++ NLDR_DYNAMICLOAD, /* Dynamically loaded node */
++ NLDR_OVLYLOAD /* Linked in base image, overlay node */
++};
++
++/*
++ * ======== nldr_ovlyfxn ========
++ * Causes code or data to be copied from load address to run address. This
++ * is the "cod_writefxn" that gets passed to the DBLL_Library and is used as
++ * the ZL write function.
++ *
++ * Parameters:
++ * priv_ref: Handle to identify the node.
++ * dsp_run_addr: Run address of code or data.
++ * dsp_load_addr: Load address of code or data.
++ * ul_num_bytes: Number of (GPP) bytes to copy.
++ * mem_space: RMS_CODE or RMS_DATA.
++ * Returns:
++ * ul_num_bytes: Success.
++ * 0: Failure.
++ * Requires:
++ * Ensures:
++ */
++typedef u32(*nldr_ovlyfxn) (void *priv_ref, u32 dsp_run_addr,
++ u32 dsp_load_addr, u32 ul_num_bytes, u32 mem_space);
++
++/*
++ * ======== nldr_writefxn ========
++ * Write memory function. Used for dynamic load writes.
++ * Parameters:
++ * priv_ref: Handle to identify the node.
++ * dsp_add: Address of code or data.
++ * pbuf: Code or data to be written
++ * ul_num_bytes: Number of (GPP) bytes to write.
++ * mem_space: DBLL_DATA or DBLL_CODE.
++ * Returns:
++ * ul_num_bytes: Success.
++ * 0: Failure.
++ * Requires:
++ * Ensures:
++ */
++typedef u32(*nldr_writefxn) (void *priv_ref,
++ u32 dsp_add, void *pbuf,
++ u32 ul_num_bytes, u32 mem_space);
++
++/*
++ * ======== nldr_attrs ========
++ * Attributes passed to nldr_create function.
++ */
++struct nldr_attrs {
++ nldr_ovlyfxn pfn_ovly;
++ nldr_writefxn pfn_write;
++ u16 us_dsp_word_size;
++ u16 us_dsp_mau_size;
++};
++
++/*
++ * ======== nldr_phase ========
++ * Indicates node create, delete, or execute phase function.
++ */
++enum nldr_phase {
++ NLDR_CREATE,
++ NLDR_DELETE,
++ NLDR_EXECUTE,
++ NLDR_NOPHASE
++};
++
++/*
++ * Typedefs of loader functions imported from a DLL, or defined in a
++ * function table.
++ */
++
++/*
++ * ======== nldr_allocate ========
++ * Allocate resources to manage the loading of a node on the DSP.
++ *
++ * Parameters:
++ * nldr_obj: Handle of loader that will load the node.
++ * priv_ref: Handle to identify the node.
++ * node_props: Pointer to a dcd_nodeprops for the node.
++ * nldr_nodeobj: Location to store node handle on output. This handle
++ * will be passed to nldr_load/nldr_unload.
++ * pf_phase_split: pointer to int variable referenced in node.c
++ * Returns:
++ * 0: Success.
++ * -ENOMEM: Insufficient memory on GPP.
++ * Requires:
++ * nldr_init(void) called.
++ * Valid nldr_obj.
++ * node_props != NULL.
++ * nldr_nodeobj != NULL.
++ * Ensures:
++ * 0: IsValidNode(*nldr_nodeobj).
++ * error: *nldr_nodeobj == NULL.
++ */
++typedef int(*nldr_allocatefxn) (struct nldr_object *nldr_obj,
++ void *priv_ref,
++ const struct dcd_nodeprops
++ * node_props,
++ struct nldr_nodeobject
++ **nldr_nodeobj,
++ bool *pf_phase_split);
++
++/*
++ * ======== nldr_create ========
++ * Create a loader object. This object handles the loading and unloading of
++ * create, delete, and execute phase functions of nodes on the DSP target.
++ *
++ * Parameters:
++ * nldr: Location to store loader handle on output.
++ * hdev_obj: Device for this processor.
++ * pattrs: Loader attributes.
++ * Returns:
++ * 0: Success;
++ * -ENOMEM: Insufficient memory for requested resources.
++ * Requires:
++ * nldr_init(void) called.
++ * nldr != NULL.
++ * hdev_obj != NULL.
++ * pattrs != NULL.
++ * Ensures:
++ * 0: Valid *nldr.
++ * error: *nldr == NULL.
++ */
++typedef int(*nldr_createfxn) (struct nldr_object **nldr,
++ struct dev_object *hdev_obj,
++ const struct nldr_attrs *pattrs);
++
++/*
++ * ======== nldr_delete ========
++ * Delete the NLDR loader.
++ *
++ * Parameters:
++ * nldr_obj: Node manager object.
++ * Returns:
++ * Requires:
++ * nldr_init(void) called.
++ * Valid nldr_obj.
++ * Ensures:
++ * nldr_obj invalid
++ */
++typedef void (*nldr_deletefxn) (struct nldr_object *nldr_obj);
++
++/*
++ * ======== nldr_exit ========
++ * Discontinue usage of NLDR module.
++ *
++ * Parameters:
++ * Returns:
++ * Requires:
++ * nldr_init(void) successfully called before.
++ * Ensures:
++ * Any resources acquired in nldr_init(void) will be freed when last NLDR
++ * client calls nldr_exit(void).
++ */
++typedef void (*nldr_exitfxn) (void);
++
++/*
++ * ======== NLDR_Free ========
++ * Free resources allocated in nldr_allocate.
++ *
++ * Parameters:
++ * nldr_node_obj: Handle returned from nldr_allocate().
++ * Returns:
++ * Requires:
++ * nldr_init(void) called.
++ * Valid nldr_node_obj.
++ * Ensures:
++ */
++typedef void (*nldr_freefxn) (struct nldr_nodeobject *nldr_node_obj);
++
++/*
++ * ======== nldr_get_fxn_addr ========
++ * Get address of create, delete, or execute phase function of a node on
++ * the DSP.
++ *
++ * Parameters:
++ * nldr_node_obj: Handle returned from nldr_allocate().
++ * str_fxn: Name of function.
++ * addr: Location to store function address.
++ * Returns:
++ * 0: Success.
++ * -ESPIPE: Address of function not found.
++ * Requires:
++ * nldr_init(void) called.
++ * Valid nldr_node_obj.
++ * addr != NULL;
++ * str_fxn != NULL;
++ * Ensures:
++ */
++typedef int(*nldr_getfxnaddrfxn) (struct nldr_nodeobject
++ * nldr_node_obj,
++ char *str_fxn, u32 * addr);
++
++/*
++ * ======== nldr_init ========
++ * Initialize the NLDR module.
++ *
++ * Parameters:
++ * Returns:
++ * TRUE if initialization succeeded, FALSE otherwise.
++ * Ensures:
++ */
++typedef bool(*nldr_initfxn) (void);
++
++/*
++ * ======== nldr_load ========
++ * Load create, delete, or execute phase function of a node on the DSP.
++ *
++ * Parameters:
++ * nldr_node_obj: Handle returned from nldr_allocate().
++ * phase: Type of function to load (create, delete, or execute).
++ * Returns:
++ * 0: Success.
++ * -ENOMEM: Insufficient memory on GPP.
++ * -ENXIO: Can't overlay phase because overlay memory
++ * is already in use.
++ * -EILSEQ: Failure in dynamic loader library.
++ * Requires:
++ * nldr_init(void) called.
++ * Valid nldr_node_obj.
++ * Ensures:
++ */
++typedef int(*nldr_loadfxn) (struct nldr_nodeobject *nldr_node_obj,
++ enum nldr_phase phase);
++
++/*
++ * ======== nldr_unload ========
++ * Unload create, delete, or execute phase function of a node on the DSP.
++ *
++ * Parameters:
++ * nldr_node_obj: Handle returned from nldr_allocate().
++ * phase: Node function to unload (create, delete, or execute).
++ * Returns:
++ * 0: Success.
++ * -ENOMEM: Insufficient memory on GPP.
++ * Requires:
++ * nldr_init(void) called.
++ * Valid nldr_node_obj.
++ * Ensures:
++ */
++typedef int(*nldr_unloadfxn) (struct nldr_nodeobject *nldr_node_obj,
++ enum nldr_phase phase);
++
++/*
++ * ======== node_ldr_fxns ========
++ */
++struct node_ldr_fxns {
++ nldr_allocatefxn pfn_allocate;
++ nldr_createfxn pfn_create;
++ nldr_deletefxn pfn_delete;
++ nldr_exitfxn pfn_exit;
++ nldr_getfxnaddrfxn pfn_get_fxn_addr;
++ nldr_initfxn pfn_init;
++ nldr_loadfxn pfn_load;
++ nldr_unloadfxn pfn_unload;
++};
++
++#endif /* NLDRDEFS_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/node.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/node.h 2010-08-18 11:24:23.198052688 +0300
+@@ -0,0 +1,583 @@
++/*
++ * node.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * DSP/BIOS Bridge Node Manager.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef NODE_
++#define NODE_
++
++#include <dspbridge/procpriv.h>
++
++#include <dspbridge/nodedefs.h>
++#include <dspbridge/dispdefs.h>
++#include <dspbridge/nldrdefs.h>
++#include <dspbridge/drv.h>
++
++/*
++ * ======== node_allocate ========
++ * Purpose:
++ * Allocate GPP resources to manage a node on the DSP.
++ * Parameters:
++ * hprocessor: Handle of processor that is allocating the node.
++ * node_uuid: Pointer to a dsp_uuid for the node.
++ * pargs: Optional arguments to be passed to the node.
++ * attr_in: Optional pointer to node attributes (priority,
++ * timeout...)
++ * noderes: Location to store node resource info.
++ * Returns:
++ * 0: Success.
++ * -ENOMEM: Insufficient memory on GPP.
++ * -ENOKEY: Node UUID has not been registered.
++ * -ESPIPE: iAlg functions not found for a DAIS node.
++ * -EDOM: attr_in != NULL and attr_in->prio out of
++ * range.
++ * -EPERM: A failure occured, unable to allocate node.
++ * -EBADR: Proccessor is not in the running state.
++ * Requires:
++ * node_init(void) called.
++ * hprocessor != NULL.
++ * node_uuid != NULL.
++ * noderes != NULL.
++ * Ensures:
++ * 0: IsValidNode(*ph_node).
++ * error: *noderes == NULL.
++ */
++extern int node_allocate(struct proc_object *hprocessor,
++ const struct dsp_uuid *node_uuid,
++ const struct dsp_cbdata
++ *pargs, const struct dsp_nodeattrin
++ *attr_in,
++ struct node_res_object **noderes,
++ struct process_context *pr_ctxt);
++
++/*
++ * ======== node_alloc_msg_buf ========
++ * Purpose:
++ * Allocate and Prepare a buffer whose descriptor will be passed to a
++ * Node within a (dsp_msg)message
++ * Parameters:
++ * hnode: The node handle.
++ * usize: The size of the buffer to be allocated.
++ * pattr: Pointer to a dsp_bufferattr structure.
++ * pbuffer: Location to store the address of the allocated
++ * buffer on output.
++ * Returns:
++ * 0: Success.
++ * -EFAULT: Invalid node handle.
++ * -ENOMEM: Insufficent memory.
++ * -EPERM: General Failure.
++ * -EINVAL: Invalid Size.
++ * Requires:
++ * node_init(void) called.
++ * pbuffer != NULL.
++ * Ensures:
++ */
++extern int node_alloc_msg_buf(struct node_object *hnode,
++ u32 usize, struct dsp_bufferattr
++ *pattr, u8 **pbuffer);
++
++/*
++ * ======== node_change_priority ========
++ * Purpose:
++ * Change the priority of an allocated node.
++ * Parameters:
++ * hnode: Node handle returned from node_allocate.
++ * prio: New priority level to set node's priority to.
++ * Returns:
++ * 0: Success.
++ * -EFAULT: Invalid hnode.
++ * -EDOM: prio is out of range.
++ * -EPERM: The specified node is not a task node.
++ * Unable to change node's runtime priority level.
++ * -EBADR: Node is not in the NODE_ALLOCATED, NODE_PAUSED,
++ * or NODE_RUNNING state.
++ * -ETIME: A timeout occurred before the DSP responded.
++ * Requires:
++ * node_init(void) called.
++ * Ensures:
++ * 0 && (Node's current priority == prio)
++ */
++extern int node_change_priority(struct node_object *hnode, s32 prio);
++
++/*
++ * ======== node_close_orphans ========
++ * Purpose:
++ * Delete all nodes whose owning processor is being destroyed.
++ * Parameters:
++ * hnode_mgr: Node manager object.
++ * proc: Handle to processor object being destroyed.
++ * Returns:
++ * 0: Success.
++ * -EPERM: Unable to delete all nodes belonging to proc.
++ * Requires:
++ * Valid hnode_mgr.
++ * proc != NULL.
++ * Ensures:
++ */
++extern int node_close_orphans(struct node_mgr *hnode_mgr,
++ struct proc_object *proc);
++
++/*
++ * ======== node_connect ========
++ * Purpose:
++ * Connect two nodes on the DSP, or a node on the DSP to the GPP. In the
++ * case that the connnection is being made between a node on the DSP and
++ * the GPP, one of the node handles (either node1 or node2) must be
++ * the constant NODE_HGPPNODE.
++ * Parameters:
++ * node1: Handle of first node to connect to second node. If
++ * this is a connection from the GPP to node2, node1
++ * must be the constant NODE_HGPPNODE. Otherwise, node1
++ * must be a node handle returned from a successful call
++ * to Node_Allocate().
++ * node2: Handle of second node. Must be either NODE_HGPPNODE
++ * if this is a connection from DSP node to GPP, or a
++ * node handle returned from a successful call to
++ * node_allocate().
++ * stream1: Output stream index on first node, to be connected
++ * to second node's input stream. Value must range from
++ * 0 <= stream1 < number of output streams.
++ * stream2: Input stream index on second node. Value must range
++ * from 0 <= stream2 < number of input streams.
++ * pattrs: Stream attributes (NULL ==> use defaults).
++ * conn_param: A pointer to a dsp_cbdata structure that defines
++ * connection parameter for device nodes to pass to DSP
++ * side.
++ * If the value of this parameter is NULL, then this API
++ * behaves like DSPNode_Connect. This parameter will have
++ * length of the string and the null terminated string in
++ * dsp_cbdata struct. This can be extended in future tp
++ * pass binary data.
++ * Returns:
++ * 0: Success.
++ * -EFAULT: Invalid node1 or node2.
++ * -ENOMEM: Insufficient host memory.
++ * -EINVAL: A stream index parameter is invalid.
++ * -EISCONN: A connection already exists for one of the
++ * indices stream1 or stream2.
++ * -EBADR: Either node1 or node2 is not in the
++ * NODE_ALLOCATED state.
++ * -ECONNREFUSED: No more connections available.
++ * -EPERM: Attempt to make an illegal connection (eg,
++ * Device node to device node, or device node to
++ * GPP), the two nodes are on different DSPs.
++ * Requires:
++ * node_init(void) called.
++ * Ensures:
++ */
++extern int node_connect(struct node_object *node1,
++ u32 stream1,
++ struct node_object *node2,
++ u32 stream2,
++ struct dsp_strmattr *pattrs,
++ struct dsp_cbdata
++ *conn_param);
++
++/*
++ * ======== node_create ========
++ * Purpose:
++ * Create a node on the DSP by remotely calling the node's create
++ * function. If necessary, load code that contains the node's create
++ * function.
++ * Parameters:
++ * hnode: Node handle returned from node_allocate().
++ * Returns:
++ * 0: Success.
++ * -EFAULT: Invalid hnode.
++ * -ESPIPE: Create function not found in the COFF file.
++ * -EBADR: Node is not in the NODE_ALLOCATED state.
++ * -ENOMEM: Memory allocation failure on the DSP.
++ * -ETIME: A timeout occurred before the DSP responded.
++ * -EPERM: A failure occurred, unable to create node.
++ * Requires:
++ * node_init(void) called.
++ * Ensures:
++ */
++extern int node_create(struct node_object *hnode);
++
++/*
++ * ======== node_create_mgr ========
++ * Purpose:
++ * Create a NODE Manager object. This object handles the creation,
++ * deletion, and execution of nodes on the DSP target. The NODE Manager
++ * also maintains a pipe map of used and available node connections.
++ * Each DEV object should have exactly one NODE Manager object.
++ *
++ * Parameters:
++ * node_man: Location to store node manager handle on output.
++ * hdev_obj: Device for this processor.
++ * Returns:
++ * 0: Success;
++ * -ENOMEM: Insufficient memory for requested resources.
++ * -EPERM: General failure.
++ * Requires:
++ * node_init(void) called.
++ * node_man != NULL.
++ * hdev_obj != NULL.
++ * Ensures:
++ * 0: Valide *node_man.
++ * error: *node_man == NULL.
++ */
++extern int node_create_mgr(struct node_mgr **node_man,
++ struct dev_object *hdev_obj);
++
++/*
++ * ======== node_delete ========
++ * Purpose:
++ * Delete resources allocated in node_allocate(). If the node was
++ * created, delete the node on the DSP by remotely calling the node's
++ * delete function. Loads the node's delete function if necessary.
++ * GPP side resources are freed after node's delete function returns.
++ * Parameters:
++ * noderes: Node resource info handle returned from
++ * node_allocate().
++ * pr_ctxt: Poninter to process context data.
++ * Returns:
++ * 0: Success.
++ * -EFAULT: Invalid hnode.
++ * -ETIME: A timeout occurred before the DSP responded.
++ * -EPERM: A failure occurred in deleting the node.
++ * -ESPIPE: Delete function not found in the COFF file.
++ * Requires:
++ * node_init(void) called.
++ * Ensures:
++ * 0: hnode is invalid.
++ */
++extern int node_delete(struct node_res_object *noderes,
++ struct process_context *pr_ctxt);
++
++/*
++ * ======== node_delete_mgr ========
++ * Purpose:
++ * Delete the NODE Manager.
++ * Parameters:
++ * hnode_mgr: Node manager object.
++ * Returns:
++ * 0: Success.
++ * Requires:
++ * node_init(void) called.
++ * Valid hnode_mgr.
++ * Ensures:
++ */
++extern int node_delete_mgr(struct node_mgr *hnode_mgr);
++
++/*
++ * ======== node_enum_nodes ========
++ * Purpose:
++ * Enumerate the nodes currently allocated for the DSP.
++ * Parameters:
++ * hnode_mgr: Node manager returned from node_create_mgr().
++ * node_tab: Array to copy node handles into.
++ * node_tab_size: Number of handles that can be written to node_tab.
++ * pu_num_nodes: Location where number of node handles written to
++ * node_tab will be written.
++ * pu_allocated: Location to write total number of allocated nodes.
++ * Returns:
++ * 0: Success.
++ * -EINVAL: node_tab is too small to hold all node handles.
++ * Requires:
++ * Valid hnode_mgr.
++ * node_tab != NULL || node_tab_size == 0.
++ * pu_num_nodes != NULL.
++ * pu_allocated != NULL.
++ * Ensures:
++ * - (-EINVAL && *pu_num_nodes == 0)
++ * - || (0 && *pu_num_nodes <= node_tab_size) &&
++ * (*pu_allocated == *pu_num_nodes)
++ */
++extern int node_enum_nodes(struct node_mgr *hnode_mgr,
++ void **node_tab,
++ u32 node_tab_size,
++ u32 *pu_num_nodes,
++ u32 *pu_allocated);
++
++/*
++ * ======== node_exit ========
++ * Purpose:
++ * Discontinue usage of NODE module.
++ * Parameters:
++ * Returns:
++ * Requires:
++ * node_init(void) successfully called before.
++ * Ensures:
++ * Any resources acquired in node_init(void) will be freed when last NODE
++ * client calls node_exit(void).
++ */
++extern void node_exit(void);
++
++/*
++ * ======== node_free_msg_buf ========
++ * Purpose:
++ * Free a message buffer previously allocated with node_alloc_msg_buf.
++ * Parameters:
++ * hnode: The node handle.
++ * pbuffer: (Address) Buffer allocated by node_alloc_msg_buf.
++ * pattr: Same buffer attributes passed to node_alloc_msg_buf.
++ * Returns:
++ * 0: Success.
++ * -EFAULT: Invalid node handle.
++ * -EPERM: Failure to free the buffer.
++ * Requires:
++ * node_init(void) called.
++ * pbuffer != NULL.
++ * Ensures:
++ */
++extern int node_free_msg_buf(struct node_object *hnode,
++ u8 *pbuffer,
++ struct dsp_bufferattr
++ *pattr);
++
++/*
++ * ======== node_get_attr ========
++ * Purpose:
++ * Copy the current attributes of the specified node into a dsp_nodeattr
++ * structure.
++ * Parameters:
++ * hnode: Node object allocated from node_allocate().
++ * pattr: Pointer to dsp_nodeattr structure to copy node's
++ * attributes.
++ * attr_size: Size of pattr.
++ * Returns:
++ * 0: Success.
++ * -EFAULT: Invalid hnode.
++ * Requires:
++ * node_init(void) called.
++ * pattr != NULL.
++ * Ensures:
++ * 0: *pattrs contains the node's current attributes.
++ */
++extern int node_get_attr(struct node_object *hnode,
++ struct dsp_nodeattr *pattr, u32 attr_size);
++
++/*
++ * ======== node_get_message ========
++ * Purpose:
++ * Retrieve a message from a node on the DSP. The node must be either a
++ * message node, task node, or XDAIS socket node.
++ * If a message is not available, this function will block until a
++ * message is available, or the node's timeout value is reached.
++ * Parameters:
++ * hnode: Node handle returned from node_allocate().
++ * message: Pointer to dsp_msg structure to copy the
++ * message into.
++ * utimeout: Timeout in milliseconds to wait for message.
++ * Returns:
++ * 0: Success.
++ * -EFAULT: Invalid hnode.
++ * -EPERM: Cannot retrieve messages from this type of node.
++ * Error occurred while trying to retrieve a message.
++ * -ETIME: Timeout occurred and no message is available.
++ * Requires:
++ * node_init(void) called.
++ * message != NULL.
++ * Ensures:
++ */
++extern int node_get_message(struct node_object *hnode,
++ struct dsp_msg *message, u32 utimeout);
++
++/*
++ * ======== node_get_nldr_obj ========
++ * Purpose:
++ * Retrieve the Nldr manager
++ * Parameters:
++ * hnode_mgr: Node Manager
++ * nldr_ovlyobj: Pointer to a Nldr manager handle
++ * Returns:
++ * 0: Success.
++ * -EFAULT: Invalid hnode.
++ * Ensures:
++ */
++extern int node_get_nldr_obj(struct node_mgr *hnode_mgr,
++ struct nldr_object **nldr_ovlyobj);
++
++/*
++ * ======== node_init ========
++ * Purpose:
++ * Initialize the NODE module.
++ * Parameters:
++ * Returns:
++ * TRUE if initialization succeeded, FALSE otherwise.
++ * Ensures:
++ */
++extern bool node_init(void);
++
++/*
++ * ======== node_on_exit ========
++ * Purpose:
++ * Gets called when RMS_EXIT is received for a node. PROC needs to pass
++ * this function as a parameter to msg_create(). This function then gets
++ * called by the Bridge driver when an exit message for a node is received.
++ * Parameters:
++ * hnode: Handle of the node that the exit message is for.
++ * node_status: Return status of the node's execute phase.
++ * Returns:
++ * Ensures:
++ */
++void node_on_exit(struct node_object *hnode, s32 node_status);
++
++/*
++ * ======== node_pause ========
++ * Purpose:
++ * Suspend execution of a node currently running on the DSP.
++ * Parameters:
++ * hnode: Node object representing a node currently
++ * running on the DSP.
++ * Returns:
++ * 0: Success.
++ * -EFAULT: Invalid hnode.
++ * -EPERM: Node is not a task or socket node.
++ * Failed to pause node.
++ * -ETIME: A timeout occurred before the DSP responded.
++ * DSP_EWRONGSTSATE: Node is not in NODE_RUNNING state.
++ * Requires:
++ * node_init(void) called.
++ * Ensures:
++ */
++extern int node_pause(struct node_object *hnode);
++
++/*
++ * ======== node_put_message ========
++ * Purpose:
++ * Send a message to a message node, task node, or XDAIS socket node.
++ * This function will block until the message stream can accommodate
++ * the message, or a timeout occurs. The message will be copied, so Msg
++ * can be re-used immediately after return.
++ * Parameters:
++ * hnode: Node handle returned by node_allocate().
++ * pmsg: Location of message to be sent to the node.
++ * utimeout: Timeout in msecs to wait.
++ * Returns:
++ * 0: Success.
++ * -EFAULT: Invalid hnode.
++ * -EPERM: Messages can't be sent to this type of node.
++ * Unable to send message.
++ * -ETIME: Timeout occurred before message could be set.
++ * -EBADR: Node is in invalid state for sending messages.
++ * Requires:
++ * node_init(void) called.
++ * pmsg != NULL.
++ * Ensures:
++ */
++extern int node_put_message(struct node_object *hnode,
++ const struct dsp_msg *pmsg, u32 utimeout);
++
++/*
++ * ======== node_register_notify ========
++ * Purpose:
++ * Register to be notified on specific events for this node.
++ * Parameters:
++ * hnode: Node handle returned by node_allocate().
++ * event_mask: Mask of types of events to be notified about.
++ * notify_type: Type of notification to be sent.
++ * hnotification: Handle to be used for notification.
++ * Returns:
++ * 0: Success.
++ * -EFAULT: Invalid hnode.
++ * -ENOMEM: Insufficient memory on GPP.
++ * -EINVAL: event_mask is invalid.
++ * -ENOSYS: Notification type specified by notify_type is not
++ * supported.
++ * Requires:
++ * node_init(void) called.
++ * hnotification != NULL.
++ * Ensures:
++ */
++extern int node_register_notify(struct node_object *hnode,
++ u32 event_mask, u32 notify_type,
++ struct dsp_notification
++ *hnotification);
++
++/*
++ * ======== node_run ========
++ * Purpose:
++ * Start execution of a node's execute phase, or resume execution of
++ * a node that has been suspended (via node_pause()) on the DSP. Load
++ * the node's execute function if necessary.
++ * Parameters:
++ * hnode: Node object representing a node currently
++ * running on the DSP.
++ * Returns:
++ * 0: Success.
++ * -EFAULT: Invalid hnode.
++ * -EPERM: hnode doesn't represent a message, task or dais socket node.
++ * Unable to start or resume execution.
++ * -ETIME: A timeout occurred before the DSP responded.
++ * DSP_EWRONGSTSATE: Node is not in NODE_PAUSED or NODE_CREATED state.
++ * -ESPIPE: Execute function not found in the COFF file.
++ * Requires:
++ * node_init(void) called.
++ * Ensures:
++ */
++extern int node_run(struct node_object *hnode);
++
++/*
++ * ======== node_terminate ========
++ * Purpose:
++ * Signal a node running on the DSP that it should exit its execute
++ * phase function.
++ * Parameters:
++ * hnode: Node object representing a node currently
++ * running on the DSP.
++ * pstatus: Location to store execute-phase function return
++ * value.
++ * Returns:
++ * 0: Success.
++ * -EFAULT: Invalid hnode.
++ * -ETIME: A timeout occurred before the DSP responded.
++ * -EPERM: Type of node specified cannot be terminated.
++ * Unable to terminate the node.
++ * -EBADR: Operation not valid for the current node state.
++ * Requires:
++ * node_init(void) called.
++ * pstatus != NULL.
++ * Ensures:
++ */
++extern int node_terminate(struct node_object *hnode,
++ int *pstatus);
++
++/*
++ * ======== node_get_uuid_props ========
++ * Purpose:
++ * Fetch Node properties given the UUID
++ * Parameters:
++ *
++ */
++extern int node_get_uuid_props(void *hprocessor,
++ const struct dsp_uuid *node_uuid,
++ struct dsp_ndbprops
++ *node_props);
++
++#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
++/**
++ * node_find_addr() - Find the closest symbol to the given address.
++ *
++ * @node_mgr: Node manager handle
++ * @sym_addr: Given address to find the closest symbol
++ * @offset_range: offset range to look fo the closest symbol
++ * @sym_addr_output: Symbol Output address
++ * @sym_name: String with the symbol name of the closest symbol
++ *
++ * This function finds the closest symbol to the address where a MMU
++ * Fault occurred on the DSP side.
++ */
++int node_find_addr(struct node_mgr *node_mgr, u32 sym_addr,
++ u32 offset_range, void *sym_addr_output,
++ char *sym_name);
++
++enum node_state node_get_state(void *hnode);
++#endif
++
++#endif /* NODE_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/nodedefs.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/nodedefs.h 2010-08-18 11:24:23.198052688 +0300
+@@ -0,0 +1,28 @@
++/*
++ * nodedefs.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Global NODE constants and types, shared by PROCESSOR, NODE, and DISP.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef NODEDEFS_
++#define NODEDEFS_
++
++#define NODE_SUSPENDEDPRI -1
++
++/* NODE Objects: */
++struct node_mgr;
++struct node_object;
++
++#endif /* NODEDEFS_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/nodepriv.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/nodepriv.h 2010-08-18 11:24:23.198052688 +0300
+@@ -0,0 +1,182 @@
++/*
++ * nodepriv.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Private node header shared by NODE and DISP.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef NODEPRIV_
++#define NODEPRIV_
++
++#include <dspbridge/strmdefs.h>
++#include <dspbridge/nodedefs.h>
++#include <dspbridge/nldrdefs.h>
++
++/* DSP address of node environment structure */
++typedef u32 nodeenv;
++
++/*
++ * Node create structures
++ */
++
++/* Message node */
++struct node_msgargs {
++ u32 max_msgs; /* Max # of simultaneous messages for node */
++ u32 seg_id; /* Segment for allocating message buffers */
++ u32 notify_type; /* Notify type (SEM_post, SWI_post, etc.) */
++ u32 arg_length; /* Length in 32-bit words of arg data block */
++ u8 *pdata; /* Argument data for node */
++};
++
++struct node_strmdef {
++ u32 buf_size; /* Size of buffers for SIO stream */
++ u32 num_bufs; /* max # of buffers in SIO stream at once */
++ u32 seg_id; /* Memory segment id to allocate buffers */
++ u32 utimeout; /* Timeout for blocking SIO calls */
++ u32 buf_alignment; /* Buffer alignment */
++ char *sz_device; /* Device name for stream */
++};
++
++/* Task node */
++struct node_taskargs {
++ struct node_msgargs node_msg_args;
++ s32 prio;
++ u32 stack_size;
++ u32 sys_stack_size;
++ u32 stack_seg;
++ u32 udsp_heap_res_addr; /* DSP virtual heap address */
++ u32 udsp_heap_addr; /* DSP virtual heap address */
++ u32 heap_size; /* Heap size */
++ u32 ugpp_heap_addr; /* GPP virtual heap address */
++ u32 profile_id; /* Profile ID */
++ u32 num_inputs;
++ u32 num_outputs;
++ u32 ul_dais_arg; /* Address of iAlg object */
++ struct node_strmdef *strm_in_def;
++ struct node_strmdef *strm_out_def;
++};
++
++/*
++ * ======== node_createargs ========
++ */
++struct node_createargs {
++ union {
++ struct node_msgargs node_msg_args;
++ struct node_taskargs task_arg_obj;
++ } asa;
++};
++
++/*
++ * ======== node_get_channel_id ========
++ * Purpose:
++ * Get the channel index reserved for a stream connection between the
++ * host and a node. This index is reserved when node_connect() is called
++ * to connect the node with the host. This index should be passed to
++ * the CHNL_Open function when the stream is actually opened.
++ * Parameters:
++ * hnode: Node object allocated from node_allocate().
++ * dir: Input (DSP_TONODE) or output (DSP_FROMNODE).
++ * index: Stream index.
++ * chan_id: Location to store channel index.
++ * Returns:
++ * 0: Success.
++ * -EFAULT: Invalid hnode.
++ * -EPERM: Not a task or DAIS socket node.
++ * -EINVAL: The node's stream corresponding to index and dir
++ * is not a stream to or from the host.
++ * Requires:
++ * node_init(void) called.
++ * Valid dir.
++ * chan_id != NULL.
++ * Ensures:
++ */
++extern int node_get_channel_id(struct node_object *hnode,
++ u32 dir, u32 index, u32 *chan_id);
++
++/*
++ * ======== node_get_strm_mgr ========
++ * Purpose:
++ * Get the STRM manager for a node.
++ * Parameters:
++ * hnode: Node allocated with node_allocate().
++ * strm_man: Location to store STRM manager on output.
++ * Returns:
++ * 0: Success.
++ * -EFAULT: Invalid hnode.
++ * Requires:
++ * strm_man != NULL.
++ * Ensures:
++ */
++extern int node_get_strm_mgr(struct node_object *hnode,
++ struct strm_mgr **strm_man);
++
++/*
++ * ======== node_get_timeout ========
++ * Purpose:
++ * Get the timeout value of a node.
++ * Parameters:
++ * hnode: Node allocated with node_allocate(), or DSP_HGPPNODE.
++ * Returns:
++ * Node's timeout value.
++ * Requires:
++ * Valid hnode.
++ * Ensures:
++ */
++extern u32 node_get_timeout(struct node_object *hnode);
++
++/*
++ * ======== node_get_type ========
++ * Purpose:
++ * Get the type (device, message, task, or XDAIS socket) of a node.
++ * Parameters:
++ * hnode: Node allocated with node_allocate(), or DSP_HGPPNODE.
++ * Returns:
++ * Node type: NODE_DEVICE, NODE_TASK, NODE_XDAIS, or NODE_GPP.
++ * Requires:
++ * Valid hnode.
++ * Ensures:
++ */
++extern enum node_type node_get_type(struct node_object *hnode);
++
++/*
++ * ======== get_node_info ========
++ * Purpose:
++ * Get node information without holding semaphore.
++ * Parameters:
++ * hnode: Node allocated with node_allocate(), or DSP_HGPPNODE.
++ * Returns:
++ * Node info: priority, device owner, no. of streams, execution state
++ * NDB properties.
++ * Requires:
++ * Valid hnode.
++ * Ensures:
++ */
++extern void get_node_info(struct node_object *hnode,
++ struct dsp_nodeinfo *node_info);
++
++/*
++ * ======== node_get_load_type ========
++ * Purpose:
++ * Get the load type (dynamic, overlay, static) of a node.
++ * Parameters:
++ * hnode: Node allocated with node_allocate(), or DSP_HGPPNODE.
++ * Returns:
++ * Node type: NLDR_DYNAMICLOAD, NLDR_OVLYLOAD, NLDR_STATICLOAD
++ * Requires:
++ * Valid hnode.
++ * Ensures:
++ */
++extern enum nldr_loadtype node_get_load_type(struct node_object *hnode);
++
++#endif /* NODEPRIV_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/ntfy.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/ntfy.h 2010-08-18 11:24:23.198052688 +0300
+@@ -0,0 +1,217 @@
++/*
++ * ntfy.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Manage lists of notification events.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef NTFY_
++#define NTFY_
++
++#include <dspbridge/host_os.h>
++#include <dspbridge/dbdefs.h>
++#include <dspbridge/sync.h>
++
++/**
++ * ntfy_object - head structure to nofify dspbridge events
++ * @head: List of notify objects
++ * @ntfy_lock: lock for list access.
++ *
++ */
++struct ntfy_object {
++ struct raw_notifier_head head;/* List of notifier objects */
++ spinlock_t ntfy_lock; /* For critical sections */
++};
++
++/**
++ * ntfy_event - structure store specify event to be notified
++ * @noti_block: List of notify objects
++ * @event: event that it respond
++ * @type: event type (only DSP_SIGNALEVENT supported)
++ * @sync_obj: sync_event used to set the event
++ *
++ */
++struct ntfy_event {
++ struct notifier_block noti_block;
++ u32 event; /* Events to be notified about */
++ u32 type; /* Type of notification to be sent */
++ struct sync_object sync_obj;
++};
++
++
++/**
++ * dsp_notifier_event() - callback function to nofity events
++ * @this: pointer to itself struct notifier_block
++ * @event: event to be notified.
++ * @data: Currently not used.
++ *
++ */
++int dsp_notifier_event(struct notifier_block *this, unsigned long event,
++ void *data);
++
++/**
++ * ntfy_init() - Set the initial state of the ntfy_object structure.
++ * @no: pointer to ntfy_object structure.
++ *
++ * This function sets the initial state of the ntfy_object in order it
++ * can be used by the other ntfy functions.
++ */
++
++static inline void ntfy_init(struct ntfy_object *no)
++{
++ spin_lock_init(&no->ntfy_lock);
++ RAW_INIT_NOTIFIER_HEAD(&no->head);
++}
++
++/**
++ * ntfy_delete() - delete list of nofy events registered.
++ * @ntfy_obj: Pointer to the ntfy object structure.
++ *
++ * This function is used to remove all the notify events registered.
++ * unregister function is not needed in this function, to unregister
++ * a ntfy_event please look at ntfy_register function.
++ *
++ */
++static inline void ntfy_delete(struct ntfy_object *ntfy_obj)
++{
++ struct ntfy_event *ne;
++ struct notifier_block *nb;
++
++ spin_lock_bh(&ntfy_obj->ntfy_lock);
++ nb = ntfy_obj->head.head;
++ while (nb) {
++ ne = container_of(nb, struct ntfy_event, noti_block);
++ nb = nb->next;
++ kfree(ne);
++ }
++ spin_unlock_bh(&ntfy_obj->ntfy_lock);
++}
++
++/**
++ * ntfy_notify() - nofity all event register for an specific event.
++ * @ntfy_obj: Pointer to the ntfy_object structure.
++ * @event: event to be notified.
++ *
++ * This function traverses all the ntfy events registers and
++ * set the event with mach with @event.
++ */
++static inline void ntfy_notify(struct ntfy_object *ntfy_obj, u32 event)
++{
++ spin_lock_bh(&ntfy_obj->ntfy_lock);
++ raw_notifier_call_chain(&ntfy_obj->head, event, NULL);
++ spin_unlock_bh(&ntfy_obj->ntfy_lock);
++}
++
++
++
++/**
++ * ntfy_init() - Create and initialize a ntfy_event structure.
++ * @event: event that the ntfy event will respond
++ * @type event type (only DSP_SIGNALEVENT supported)
++ *
++ * This function create a ntfy_event element and sets the event it will
++ * respond the ntfy_event in order it can be used by the other ntfy functions.
++ * In case of success it will return a pointer to the ntfy_event struct
++ * created. Otherwise it will return NULL;
++ */
++
++static inline struct ntfy_event *ntfy_event_create(u32 event, u32 type)
++{
++ struct ntfy_event *ne;
++ ne = kmalloc(sizeof(struct ntfy_event), GFP_KERNEL);
++ if (ne) {
++ sync_init_event(&ne->sync_obj);
++ ne->noti_block.notifier_call = dsp_notifier_event;
++ ne->event = event;
++ ne->type = type;
++ }
++ return ne;
++}
++
++/**
++ * ntfy_register() - register new ntfy_event into a given ntfy_object
++ * @ntfy_obj: Pointer to the ntfy_object structure.
++ * @noti: Pointer to the handle to be returned to the user space.
++ * @event event that the ntfy event will respond
++ * @type event type (only DSP_SIGNALEVENT supported)
++ *
++ * This function register a new ntfy_event into the ntfy_object list,
++ * which will respond to the @event passed.
++ * This function will return 0 in case of error.
++ * -EFAULT in case of bad pointers and
++ * DSP_EMemory in case of no memory to create ntfy_event.
++ */
++static inline int ntfy_register(struct ntfy_object *ntfy_obj,
++ struct dsp_notification *noti,
++ u32 event, u32 type)
++{
++ struct ntfy_event *ne;
++ int status = 0;
++
++ if (!noti || !ntfy_obj) {
++ status = -EFAULT;
++ goto func_end;
++ }
++ if (!event) {
++ status = -EINVAL;
++ goto func_end;
++ }
++ ne = ntfy_event_create(event, type);
++ if (!ne) {
++ status = -ENOMEM;
++ goto func_end;
++ }
++ noti->handle = &ne->sync_obj;
++
++ spin_lock_bh(&ntfy_obj->ntfy_lock);
++ raw_notifier_chain_register(&ntfy_obj->head, &ne->noti_block);
++ spin_unlock_bh(&ntfy_obj->ntfy_lock);
++func_end:
++ return status;
++}
++
++/**
++ * ntfy_unregister() - unregister a ntfy_event from a given ntfy_object
++ * @ntfy_obj: Pointer to the ntfy_object structure.
++ * @noti: Pointer to the event that will be removed.
++ *
++ * This function unregister a ntfy_event from the ntfy_object list,
++ * @noti contains the event which is wanted to be removed.
++ * This function will return 0 in case of error.
++ * -EFAULT in case of bad pointers and
++ * DSP_EMemory in case of no memory to create ntfy_event.
++ */
++static inline int ntfy_unregister(struct ntfy_object *ntfy_obj,
++ struct dsp_notification *noti)
++{
++ int status = 0;
++ struct ntfy_event *ne;
++
++ if (!noti || !ntfy_obj) {
++ status = -EFAULT;
++ goto func_end;
++ }
++
++ ne = container_of((struct sync_object *)noti, struct ntfy_event,
++ sync_obj);
++ spin_lock_bh(&ntfy_obj->ntfy_lock);
++ raw_notifier_chain_unregister(&ntfy_obj->head,
++ &ne->noti_block);
++ kfree(ne);
++ spin_unlock_bh(&ntfy_obj->ntfy_lock);
++func_end:
++ return status;
++}
++
++#endif /* NTFY_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/proc.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/proc.h 2010-08-18 11:24:23.202053057 +0300
+@@ -0,0 +1,621 @@
++/*
++ * proc.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * This is the DSP API RM module interface.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef PROC_
++#define PROC_
++
++#include <dspbridge/cfgdefs.h>
++#include <dspbridge/devdefs.h>
++#include <dspbridge/drv.h>
++
++extern char *iva_img;
++
++/*
++ * ======== proc_attach ========
++ * Purpose:
++ * Prepare for communication with a particular DSP processor, and return
++ * a handle to the processor object. The PROC Object gets created
++ * Parameters:
++ * processor_id : The processor index (zero-based).
++ * hmgr_obj : Handle to the Manager Object
++ * attr_in : Ptr to the dsp_processorattrin structure.
++ * A NULL value means use default values.
++ * ph_processor : Ptr to location to store processor handle.
++ * Returns:
++ * 0 : Success.
++ * -EPERM : General failure.
++ * -EFAULT : Invalid processor handle.
++ * 0: Success; Processor already attached.
++ * Requires:
++ * ph_processor != NULL.
++ * PROC Initialized.
++ * Ensures:
++ * -EPERM, and *ph_processor == NULL, OR
++ * Success and *ph_processor is a Valid Processor handle OR
++ * 0 and *ph_processor is a Valid Processor.
++ * Details:
++ * When attr_in is NULL, the default timeout value is 10 seconds.
++ */
++extern int proc_attach(u32 processor_id,
++ const struct dsp_processorattrin
++ *attr_in, void **ph_processor,
++ struct process_context *pr_ctxt);
++
++/*
++ * ======== proc_auto_start =========
++ * Purpose:
++ * A Particular device gets loaded with the default image
++ * if the AutoStart flag is set.
++ * Parameters:
++ * hdev_obj : Handle to the Device
++ * Returns:
++ * 0 : On Successful Loading
++ * -ENOENT : No DSP exec file found.
++ * -EPERM : General Failure
++ * Requires:
++ * hdev_obj != NULL.
++ * dev_node_obj != NULL.
++ * PROC Initialized.
++ * Ensures:
++ */
++extern int proc_auto_start(struct cfg_devnode *dev_node_obj,
++ struct dev_object *hdev_obj);
++
++/*
++ * ======== proc_ctrl ========
++ * Purpose:
++ * Pass control information to the GPP device driver managing the DSP
++ * processor. This will be an OEM-only function, and not part of the
++ * 'Bridge application developer's API.
++ * Parameters:
++ * hprocessor : The processor handle.
++ * dw_cmd : Private driver IOCTL cmd ID.
++ * pargs : Ptr to an driver defined argument structure.
++ * Returns:
++ * 0 : SUCCESS
++ * -EFAULT : Invalid processor handle.
++ * -ETIME: A Timeout Occured before the Control information
++ * could be sent.
++ * -EPERM : General Failure.
++ * Requires:
++ * PROC Initialized.
++ * Ensures
++ * Details:
++ * This function Calls bridge_dev_ctrl.
++ */
++extern int proc_ctrl(void *hprocessor,
++ u32 dw_cmd, struct dsp_cbdata *arg);
++
++/*
++ * ======== proc_detach ========
++ * Purpose:
++ * Close a DSP processor and de-allocate all (GPP) resources reserved
++ * for it. The Processor Object is deleted.
++ * Parameters:
++ * pr_ctxt : The processor handle.
++ * Returns:
++ * 0 : Success.
++ * -EFAULT : InValid Handle.
++ * -EPERM : General failure.
++ * Requires:
++ * PROC Initialized.
++ * Ensures:
++ * PROC Object is destroyed.
++ */
++extern int proc_detach(struct process_context *pr_ctxt);
++
++/*
++ * ======== proc_enum_nodes ========
++ * Purpose:
++ * Enumerate the nodes currently allocated on a processor.
++ * Parameters:
++ * hprocessor : The processor handle.
++ * node_tab : The first Location of an array allocated for node
++ * handles.
++ * node_tab_size: The number of (DSP_HNODE) handles that can be held
++ * to the memory the client has allocated for node_tab
++ * pu_num_nodes : Location where DSPProcessor_EnumNodes will return
++ * the number of valid handles written to node_tab
++ * pu_allocated : Location where DSPProcessor_EnumNodes will return
++ * the number of nodes that are allocated on the DSP.
++ * Returns:
++ * 0 : Success.
++ * -EFAULT : Invalid processor handle.
++ * -EINVAL : The amount of memory allocated for node_tab is
++ * insufficent. That is the number of nodes actually
++ * allocated on the DSP is greater than the value
++ * specified for node_tab_size.
++ * -EPERM : Unable to get Resource Information.
++ * Details:
++ * Requires
++ * pu_num_nodes is not NULL.
++ * pu_allocated is not NULL.
++ * node_tab is not NULL.
++ * PROC Initialized.
++ * Ensures:
++ * Details:
++ */
++extern int proc_enum_nodes(void *hprocessor,
++ void **node_tab,
++ u32 node_tab_size,
++ u32 *pu_num_nodes,
++ u32 *pu_allocated);
++
++/*
++ * ======== proc_get_resource_info ========
++ * Purpose:
++ * Enumerate the resources currently available on a processor.
++ * Parameters:
++ * hprocessor : The processor handle.
++ * resource_type: Type of resource .
++ * resource_info: Ptr to the dsp_resourceinfo structure.
++ * resource_info_size: Size of the structure.
++ * Returns:
++ * 0 : Success.
++ * -EFAULT : Invalid processor handle.
++ * -EBADR: The processor is not in the PROC_RUNNING state.
++ * -ETIME: A timeout occured before the DSP responded to the
++ * querry.
++ * -EPERM : Unable to get Resource Information
++ * Requires:
++ * resource_info is not NULL.
++ * Parameter resource_type is Valid.[TBD]
++ * resource_info_size is >= sizeof dsp_resourceinfo struct.
++ * PROC Initialized.
++ * Ensures:
++ * Details:
++ * This function currently returns
++ * -ENOSYS, and does not write any data to the resource_info struct.
++ */
++extern int proc_get_resource_info(void *hprocessor,
++ u32 resource_type,
++ struct dsp_resourceinfo
++ *resource_info,
++ u32 resource_info_size);
++
++/*
++ * ======== proc_exit ========
++ * Purpose:
++ * Decrement reference count, and free resources when reference count is
++ * 0.
++ * Parameters:
++ * Returns:
++ * Requires:
++ * PROC is initialized.
++ * Ensures:
++ * When reference count == 0, PROC's private resources are freed.
++ */
++extern void proc_exit(void);
++
++/*
++ * ======== proc_get_dev_object =========
++ * Purpose:
++ * Returns the DEV Hanlde for a given Processor handle
++ * Parameters:
++ * hprocessor : Processor Handle
++ * device_obj : Location to store the DEV Handle.
++ * Returns:
++ * 0 : Success; *device_obj has Dev handle
++ * -EPERM : Failure; *device_obj is zero.
++ * Requires:
++ * device_obj is not NULL
++ * PROC Initialized.
++ * Ensures:
++ * 0 : *device_obj is not NULL
++ * -EPERM : *device_obj is NULL.
++ */
++extern int proc_get_dev_object(void *hprocessor,
++ struct dev_object **device_obj);
++
++/*
++ * ======== proc_init ========
++ * Purpose:
++ * Initialize PROC's private state, keeping a reference count on each
++ * call.
++ * Parameters:
++ * Returns:
++ * TRUE if initialized; FALSE if error occured.
++ * Requires:
++ * Ensures:
++ * TRUE: A requirement for the other public PROC functions.
++ */
++extern bool proc_init(void);
++
++/*
++ * ======== proc_get_state ========
++ * Purpose:
++ * Report the state of the specified DSP processor.
++ * Parameters:
++ * hprocessor : The processor handle.
++ * proc_state_obj : Ptr to location to store the dsp_processorstate
++ * structure.
++ * state_info_size: Size of dsp_processorstate.
++ * Returns:
++ * 0 : Success.
++ * -EFAULT : Invalid processor handle.
++ * -EPERM : General failure while querying processor state.
++ * Requires:
++ * proc_state_obj is not NULL
++ * state_info_size is >= than the size of dsp_processorstate structure.
++ * PROC Initialized.
++ * Ensures:
++ * Details:
++ */
++extern int proc_get_state(void *hprocessor, struct dsp_processorstate
++ *proc_state_obj, u32 state_info_size);
++
++/*
++ * ======== PROC_GetProcessorID ========
++ * Purpose:
++ * Report the state of the specified DSP processor.
++ * Parameters:
++ * hprocessor : The processor handle.
++ * proc_id : Processor ID
++ *
++ * Returns:
++ * 0 : Success.
++ * -EFAULT : Invalid processor handle.
++ * -EPERM : General failure while querying processor state.
++ * Requires:
++ * proc_state_obj is not NULL
++ * state_info_size is >= than the size of dsp_processorstate structure.
++ * PROC Initialized.
++ * Ensures:
++ * Details:
++ */
++extern int proc_get_processor_id(void *proc, u32 * proc_id);
++
++/*
++ * ======== proc_get_trace ========
++ * Purpose:
++ * Retrieve the trace buffer from the specified DSP processor.
++ * Parameters:
++ * hprocessor : The processor handle.
++ * pbuf : Ptr to buffer to hold trace output.
++ * max_size : Maximum size of the output buffer.
++ * Returns:
++ * 0 : Success.
++ * -EFAULT : Invalid processor handle.
++ * -EPERM : General failure while retireving processor trace
++ * Buffer.
++ * Requires:
++ * pbuf is not NULL
++ * max_size is > 0.
++ * PROC Initialized.
++ * Ensures:
++ * Details:
++ */
++extern int proc_get_trace(void *hprocessor, u8 * pbuf, u32 max_size);
++
++/*
++ * ======== proc_load ========
++ * Purpose:
++ * Reset a processor and load a new base program image.
++ * This will be an OEM-only function.
++ * Parameters:
++ * hprocessor: The processor handle.
++ * argc_index: The number of Arguments(strings)in the aArgV[]
++ * user_args: An Array of Arguments(Unicode Strings)
++ * user_envp: An Array of Environment settings(Unicode Strings)
++ * Returns:
++ * 0: Success.
++ * -ENOENT: The DSP Execuetable was not found.
++ * -EFAULT: Invalid processor handle.
++ * -EPERM : Unable to Load the Processor
++ * Requires:
++ * user_args is not NULL
++ * argc_index is > 0
++ * PROC Initialized.
++ * Ensures:
++ * Success and ProcState == PROC_LOADED
++ * or DSP_FAILED status.
++ * Details:
++ * Does not implement access rights to control which GPP application
++ * can load the processor.
++ */
++extern int proc_load(void *hprocessor,
++ const s32 argc_index, const char **user_args,
++ const char **user_envp);
++
++/*
++ * ======== proc_register_notify ========
++ * Purpose:
++ * Register to be notified of specific processor events
++ * Parameters:
++ * hprocessor : The processor handle.
++ * event_mask : Mask of types of events to be notified about.
++ * notify_type : Type of notification to be sent.
++ * hnotification: Handle to be used for notification.
++ * Returns:
++ * 0 : Success.
++ * -EFAULT : Invalid processor handle or hnotification.
++ * -EINVAL : Parameter event_mask is Invalid
++ * DSP_ENOTIMP : The notification type specified in uNotifyMask
++ * is not supported.
++ * -EPERM : Unable to register for notification.
++ * Requires:
++ * hnotification is not NULL
++ * PROC Initialized.
++ * Ensures:
++ * Details:
++ */
++extern int proc_register_notify(void *hprocessor,
++ u32 event_mask, u32 notify_type,
++ struct dsp_notification
++ *hnotification);
++
++/*
++ * ======== proc_notify_clients ========
++ * Purpose:
++ * Notify the Processor Clients
++ * Parameters:
++ * proc : The processor handle.
++ * events : Event to be notified about.
++ * Returns:
++ * 0 : Success.
++ * -EFAULT : Invalid processor handle.
++ * -EPERM : Failure to Set or Reset the Event
++ * Requires:
++ * events is Supported or Valid type of Event
++ * proc is a valid handle
++ * PROC Initialized.
++ * Ensures:
++ */
++extern int proc_notify_clients(void *proc, u32 events);
++
++/*
++ * ======== proc_notify_all_clients ========
++ * Purpose:
++ * Notify the Processor Clients
++ * Parameters:
++ * proc : The processor handle.
++ * events : Event to be notified about.
++ * Returns:
++ * 0 : Success.
++ * -EFAULT : Invalid processor handle.
++ * -EPERM : Failure to Set or Reset the Event
++ * Requires:
++ * events is Supported or Valid type of Event
++ * proc is a valid handle
++ * PROC Initialized.
++ * Ensures:
++ * Details:
++ * NODE And STRM would use this function to notify their clients
++ * about the state changes in NODE or STRM.
++ */
++extern int proc_notify_all_clients(void *proc, u32 events);
++
++/*
++ * ======== proc_start ========
++ * Purpose:
++ * Start a processor running.
++ * Processor must be in PROC_LOADED state.
++ * This will be an OEM-only function, and not part of the 'Bridge
++ * application developer's API.
++ * Parameters:
++ * hprocessor : The processor handle.
++ * Returns:
++ * 0 : Success.
++ * -EFAULT : Invalid processor handle.
++ * -EBADR: Processor is not in PROC_LOADED state.
++ * -EPERM : Unable to start the processor.
++ * Requires:
++ * PROC Initialized.
++ * Ensures:
++ * Success and ProcState == PROC_RUNNING or DSP_FAILED status.
++ * Details:
++ */
++extern int proc_start(void *hprocessor);
++
++/*
++ * ======== proc_stop ========
++ * Purpose:
++ * Start a processor running.
++ * Processor must be in PROC_LOADED state.
++ * This will be an OEM-only function, and not part of the 'Bridge
++ * application developer's API.
++ * Parameters:
++ * hprocessor : The processor handle.
++ * Returns:
++ * 0 : Success.
++ * -EFAULT : Invalid processor handle.
++ * -EBADR: Processor is not in PROC_LOADED state.
++ * -EPERM : Unable to start the processor.
++ * Requires:
++ * PROC Initialized.
++ * Ensures:
++ * Success and ProcState == PROC_RUNNING or DSP_FAILED status.
++ * Details:
++ */
++extern int proc_stop(void *hprocessor);
++
++/*
++ * ======== proc_end_dma ========
++ * Purpose:
++ * Begin a DMA transfer
++ * Parameters:
++ * hprocessor : The processor handle.
++ * pmpu_addr : Buffer start address
++ * ul_size : Buffer size
++ * dir : The direction of the transfer
++ * Requires:
++ * Memory was previously mapped.
++ */
++extern int proc_end_dma(void *hprocessor, void *pmpu_addr, u32 ul_size,
++ enum dma_data_direction dir);
++/*
++ * ======== proc_begin_dma ========
++ * Purpose:
++ * Begin a DMA transfer
++ * Parameters:
++ * hprocessor : The processor handle.
++ * pmpu_addr : Buffer start address
++ * ul_size : Buffer size
++ * dir : The direction of the transfer
++ * Requires:
++ * Memory was previously mapped.
++ */
++extern int proc_begin_dma(void *hprocessor, void *pmpu_addr, u32 ul_size,
++ enum dma_data_direction dir);
++
++/*
++ * ======== proc_flush_memory ========
++ * Purpose:
++ * Flushes a buffer from the MPU data cache.
++ * Parameters:
++ * hprocessor : The processor handle.
++ * pmpu_addr : Buffer start address
++ * ul_size : Buffer size
++ * ul_flags : Reserved.
++ * Returns:
++ * 0 : Success.
++ * -EFAULT : Invalid processor handle.
++ * -EPERM : General failure.
++ * Requires:
++ * PROC Initialized.
++ * Ensures:
++ * Details:
++ * All the arguments are currently ignored.
++ */
++extern int proc_flush_memory(void *hprocessor,
++ void *pmpu_addr, u32 ul_size, u32 ul_flags);
++
++/*
++ * ======== proc_invalidate_memory ========
++ * Purpose:
++ * Invalidates a buffer from the MPU data cache.
++ * Parameters:
++ * hprocessor : The processor handle.
++ * pmpu_addr : Buffer start address
++ * ul_size : Buffer size
++ * Returns:
++ * 0 : Success.
++ * -EFAULT : Invalid processor handle.
++ * -EPERM : General failure.
++ * Requires:
++ * PROC Initialized.
++ * Ensures:
++ * Details:
++ * All the arguments are currently ignored.
++ */
++extern int proc_invalidate_memory(void *hprocessor,
++ void *pmpu_addr, u32 ul_size);
++
++/*
++ * ======== proc_map ========
++ * Purpose:
++ * Maps a MPU buffer to DSP address space.
++ * Parameters:
++ * hprocessor : The processor handle.
++ * pmpu_addr : Starting address of the memory region to map.
++ * ul_size : Size of the memory region to map.
++ * req_addr : Requested DSP start address. Offset-adjusted actual
++ * mapped address is in the last argument.
++ * pp_map_addr : Ptr to DSP side mapped u8 address.
++ * ul_map_attr : Optional endianness attributes, virt to phys flag.
++ * Returns:
++ * 0 : Success.
++ * -EFAULT : Invalid processor handle.
++ * -EPERM : General failure.
++ * -ENOMEM : MPU side memory allocation error.
++ * -ENOENT : Cannot find a reserved region starting with this
++ * : address.
++ * Requires:
++ * pmpu_addr is not NULL
++ * ul_size is not zero
++ * pp_map_addr is not NULL
++ * PROC Initialized.
++ * Ensures:
++ * Details:
++ */
++extern int proc_map(void *hprocessor,
++ void *pmpu_addr,
++ u32 ul_size,
++ void *req_addr,
++ void **pp_map_addr, u32 ul_map_attr,
++ struct process_context *pr_ctxt);
++
++/*
++ * ======== proc_reserve_memory ========
++ * Purpose:
++ * Reserve a virtually contiguous region of DSP address space.
++ * Parameters:
++ * hprocessor : The processor handle.
++ * ul_size : Size of the address space to reserve.
++ * pp_rsv_addr : Ptr to DSP side reserved u8 address.
++ * Returns:
++ * 0 : Success.
++ * -EFAULT : Invalid processor handle.
++ * -EPERM : General failure.
++ * -ENOMEM : Cannot reserve chunk of this size.
++ * Requires:
++ * pp_rsv_addr is not NULL
++ * PROC Initialized.
++ * Ensures:
++ * Details:
++ */
++extern int proc_reserve_memory(void *hprocessor,
++ u32 ul_size, void **pp_rsv_addr,
++ struct process_context *pr_ctxt);
++
++/*
++ * ======== proc_un_map ========
++ * Purpose:
++ * Removes a MPU buffer mapping from the DSP address space.
++ * Parameters:
++ * hprocessor : The processor handle.
++ * map_addr : Starting address of the mapped memory region.
++ * Returns:
++ * 0 : Success.
++ * -EFAULT : Invalid processor handle.
++ * -EPERM : General failure.
++ * -ENOENT : Cannot find a mapped region starting with this
++ * : address.
++ * Requires:
++ * map_addr is not NULL
++ * PROC Initialized.
++ * Ensures:
++ * Details:
++ */
++extern int proc_un_map(void *hprocessor, void *map_addr,
++ struct process_context *pr_ctxt);
++
++/*
++ * ======== proc_un_reserve_memory ========
++ * Purpose:
++ * Frees a previously reserved region of DSP address space.
++ * Parameters:
++ * hprocessor : The processor handle.
++ * prsv_addr : Ptr to DSP side reservedBYTE address.
++ * Returns:
++ * 0 : Success.
++ * -EFAULT : Invalid processor handle.
++ * -EPERM : General failure.
++ * -ENOENT : Cannot find a reserved region starting with this
++ * : address.
++ * Requires:
++ * prsv_addr is not NULL
++ * PROC Initialized.
++ * Ensures:
++ * Details:
++ */
++extern int proc_un_reserve_memory(void *hprocessor,
++ void *prsv_addr,
++ struct process_context *pr_ctxt);
++
++#endif /* PROC_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/procpriv.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/procpriv.h 2010-08-18 11:24:23.202053057 +0300
+@@ -0,0 +1,25 @@
++/*
++ * procpriv.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Global PROC constants and types, shared by PROC, MGR and DSP API.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef PROCPRIV_
++#define PROCPRIV_
++
++/* RM PROC Object */
++struct proc_object;
++
++#endif /* PROCPRIV_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/pwr.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/pwr.h 2010-08-18 11:24:23.202053057 +0300
+@@ -0,0 +1,107 @@
++/*
++ * pwr.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef PWR_
++#define PWR_
++
++#include <dspbridge/dbdefs.h>
++#include <dspbridge/pwr_sh.h>
++
++/*
++ * ======== pwr_sleep_dsp ========
++ * Signal the DSP to go to sleep.
++ *
++ * Parameters:
++ * sleep_code: New sleep state for DSP. (Initially, valid codes
++ * are PWR_DEEPSLEEP or PWR_EMERGENCYDEEPSLEEP; both of
++ * these codes will simply put the DSP in deep sleep.)
++ *
++ * timeout: Maximum time (msec) that PWR should wait for
++ * confirmation that the DSP sleep state has been
++ * reached. If PWR should simply send the command to
++ * the DSP to go to sleep and then return (i.e.,
++ * asynchrounous sleep), the timeout should be
++ * specified as zero.
++ *
++ * Returns:
++ * 0: Success.
++ * 0: Success, but the DSP was already asleep.
++ * -EINVAL: The specified sleep_code is not supported.
++ * -ETIME: A timeout occured while waiting for DSP sleep
++ * confirmation.
++ * -EPERM: General failure, unable to send sleep command to
++ * the DSP.
++ */
++extern int pwr_sleep_dsp(const u32 sleep_code, const u32 timeout);
++
++/*
++ * ======== pwr_wake_dsp ========
++ * Signal the DSP to wake from sleep.
++ *
++ * Parameters:
++ * timeout: Maximum time (msec) that PWR should wait for
++ * confirmation that the DSP is awake. If PWR should
++ * simply send a command to the DSP to wake and then
++ * return (i.e., asynchrounous wake), timeout should
++ * be specified as zero.
++ *
++ * Returns:
++ * 0: Success.
++ * 0: Success, but the DSP was already awake.
++ * -ETIME: A timeout occured while waiting for wake
++ * confirmation.
++ * -EPERM: General failure, unable to send wake command to
++ * the DSP.
++ */
++extern int pwr_wake_dsp(const u32 timeout);
++
++/*
++ * ======== pwr_pm_pre_scale ========
++ * Prescale notification to DSP.
++ *
++ * Parameters:
++ * voltage_domain: The voltage domain for which notification is sent
++ * level: The level of voltage domain
++ *
++ * Returns:
++ * 0: Success.
++ * 0: Success, but the DSP was already awake.
++ * -ETIME: A timeout occured while waiting for wake
++ * confirmation.
++ * -EPERM: General failure, unable to send wake command to
++ * the DSP.
++ */
++extern int pwr_pm_pre_scale(u16 voltage_domain, u32 level);
++
++/*
++ * ======== pwr_pm_post_scale ========
++ * PostScale notification to DSP.
++ *
++ * Parameters:
++ * voltage_domain: The voltage domain for which notification is sent
++ * level: The level of voltage domain
++ *
++ * Returns:
++ * 0: Success.
++ * 0: Success, but the DSP was already awake.
++ * -ETIME: A timeout occured while waiting for wake
++ * confirmation.
++ * -EPERM: General failure, unable to send wake command to
++ * the DSP.
++ */
++extern int pwr_pm_post_scale(u16 voltage_domain, u32 level);
++
++#endif /* PWR_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/pwr_sh.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/pwr_sh.h 2010-08-18 11:24:23.202053057 +0300
+@@ -0,0 +1,33 @@
++/*
++ * pwr_sh.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Power Manager shared definitions (used on both GPP and DSP sides).
++ *
++ * Copyright (C) 2008 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef PWR_SH_
++#define PWR_SH_
++
++#include <dspbridge/mbx_sh.h>
++
++/* valid sleep command codes that can be sent by GPP via mailbox: */
++#define PWR_DEEPSLEEP MBX_PM_DSPIDLE
++#define PWR_EMERGENCYDEEPSLEEP MBX_PM_EMERGENCYSLEEP
++#define PWR_SLEEPUNTILRESTART MBX_PM_SLEEPUNTILRESTART
++#define PWR_WAKEUP MBX_PM_DSPWAKEUP
++#define PWR_AUTOENABLE MBX_PM_PWRENABLE
++#define PWR_AUTODISABLE MBX_PM_PWRDISABLE
++#define PWR_RETENTION MBX_PM_DSPRETN
++
++#endif /* PWR_SH_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/resourcecleanup.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/resourcecleanup.h 2010-08-18 11:24:23.202053057 +0300
+@@ -0,0 +1,52 @@
++/*
++ * resourcecleanup.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#include <dspbridge/nodepriv.h>
++#include <dspbridge/drv.h>
++
++extern int drv_get_proc_ctxt_list(struct process_context **pctxt,
++ struct drv_object *hdrv_obj);
++
++extern int drv_insert_proc_context(struct drv_object *driver_obj,
++ void *process_ctxt);
++
++extern int drv_remove_all_dmm_res_elements(void *process_ctxt);
++
++extern int drv_remove_all_node_res_elements(void *process_ctxt);
++
++extern int drv_proc_set_pid(void *ctxt, s32 process);
++
++extern int drv_remove_all_resources(void *process_ctxt);
++
++extern int drv_remove_proc_context(struct drv_object *driver_obj,
++ void *pr_ctxt);
++
++extern int drv_insert_node_res_element(void *hnode, void *node_resource,
++ void *process_ctxt);
++
++extern void drv_proc_node_update_heap_status(void *node_resource, s32 status);
++
++extern void drv_proc_node_update_status(void *node_resource, s32 status);
++
++extern int drv_proc_update_strm_res(u32 num_bufs, void *strm_resources);
++
++extern int drv_proc_insert_strm_res_element(void *stream_obj,
++ void *strm_res,
++ void *process_ctxt);
++
++extern int drv_remove_all_strm_res_elements(void *process_ctxt);
++
++extern enum node_state node_get_state(void *hnode);
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/rmm.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/rmm.h 2010-08-18 11:24:23.202053057 +0300
+@@ -0,0 +1,181 @@
++/*
++ * rmm.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * This memory manager provides general heap management and arbitrary
++ * alignment for any number of memory segments, and management of overlay
++ * memory.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef RMM_
++#define RMM_
++
++/*
++ * ======== rmm_addr ========
++ * DSP address + segid
++ */
++struct rmm_addr {
++ u32 addr;
++ s32 segid;
++};
++
++/*
++ * ======== rmm_segment ========
++ * Memory segment on the DSP available for remote allocations.
++ */
++struct rmm_segment {
++ u32 base; /* Base of the segment */
++ u32 length; /* Size of the segment (target MAUs) */
++ s32 space; /* Code or data */
++ u32 number; /* Number of Allocated Blocks */
++};
++
++/*
++ * ======== RMM_Target ========
++ */
++struct rmm_target_obj;
++
++/*
++ * ======== rmm_alloc ========
++ *
++ * rmm_alloc is used to remotely allocate or reserve memory on the DSP.
++ *
++ * Parameters:
++ * target - Target returned from rmm_create().
++ * segid - Memory segment to allocate from.
++ * size - Size (target MAUS) to allocate.
++ * align - alignment.
++ * dsp_address - If reserve is FALSE, the location to store allocated
++ * address on output, otherwise, the DSP address to
++ * reserve.
++ * reserve - If TRUE, reserve the memory specified by dsp_address.
++ * Returns:
++ * 0: Success.
++ * -ENOMEM: Memory allocation on GPP failed.
++ * -ENXIO: Cannot "allocate" overlay memory because it's
++ * already in use.
++ * Requires:
++ * RMM initialized.
++ * Valid target.
++ * dsp_address != NULL.
++ * size > 0
++ * reserve || target->num_segs > 0.
++ * Ensures:
++ */
++extern int rmm_alloc(struct rmm_target_obj *target, u32 segid, u32 size,
++ u32 align, u32 *dsp_address, bool reserve);
++
++/*
++ * ======== rmm_create ========
++ * Create a target object with memory segments for remote allocation. If
++ * seg_tab == NULL or num_segs == 0, memory can only be reserved through
++ * rmm_alloc().
++ *
++ * Parameters:
++ * target_obj: - Location to store target on output.
++ * seg_tab: - Table of memory segments.
++ * num_segs: - Number of memory segments.
++ * Returns:
++ * 0: Success.
++ * -ENOMEM: Memory allocation failed.
++ * Requires:
++ * RMM initialized.
++ * target_obj != NULL.
++ * num_segs == 0 || seg_tab != NULL.
++ * Ensures:
++ * Success: Valid *target_obj.
++ * Failure: *target_obj == NULL.
++ */
++extern int rmm_create(struct rmm_target_obj **target_obj,
++ struct rmm_segment seg_tab[], u32 num_segs);
++
++/*
++ * ======== rmm_delete ========
++ * Delete target allocated in rmm_create().
++ *
++ * Parameters:
++ * target - Target returned from rmm_create().
++ * Returns:
++ * Requires:
++ * RMM initialized.
++ * Valid target.
++ * Ensures:
++ */
++extern void rmm_delete(struct rmm_target_obj *target);
++
++/*
++ * ======== rmm_exit ========
++ * Exit the RMM module
++ *
++ * Parameters:
++ * Returns:
++ * Requires:
++ * rmm_init successfully called.
++ * Ensures:
++ */
++extern void rmm_exit(void);
++
++/*
++ * ======== rmm_free ========
++ * Free or unreserve memory allocated through rmm_alloc().
++ *
++ * Parameters:
++ * target: - Target returned from rmm_create().
++ * segid: - Segment of memory to free.
++ * dsp_address: - Address to free or unreserve.
++ * size: - Size of memory to free or unreserve.
++ * reserved: - TRUE if memory was reserved only, otherwise FALSE.
++ * Returns:
++ * Requires:
++ * RMM initialized.
++ * Valid target.
++ * reserved || segid < target->num_segs.
++ * reserve || [dsp_address, dsp_address + size] is a valid memory range.
++ * Ensures:
++ */
++extern bool rmm_free(struct rmm_target_obj *target, u32 segid, u32 dsp_addr,
++ u32 size, bool reserved);
++
++/*
++ * ======== rmm_init ========
++ * Initialize the RMM module
++ *
++ * Parameters:
++ * Returns:
++ * TRUE: Success.
++ * FALSE: Failure.
++ * Requires:
++ * Ensures:
++ */
++extern bool rmm_init(void);
++
++/*
++ * ======== rmm_stat ========
++ * Obtain memory segment status
++ *
++ * Parameters:
++ * segid: Segment ID of the dynamic loading segment.
++ * mem_stat_buf: Pointer to allocated buffer into which memory stats are
++ * placed.
++ * Returns:
++ * TRUE: Success.
++ * FALSE: Failure.
++ * Requires:
++ * segid < target->num_segs
++ * Ensures:
++ */
++extern bool rmm_stat(struct rmm_target_obj *target, enum dsp_memtype segid,
++ struct dsp_memstat *mem_stat_buf);
++
++#endif /* RMM_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/rms_sh.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/rms_sh.h 2010-08-18 11:24:23.202053057 +0300
+@@ -0,0 +1,95 @@
++/*
++ * rms_sh.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * DSP/BIOS Bridge Resource Manager Server shared definitions (used on both
++ * GPP and DSP sides).
++ *
++ * Copyright (C) 2008 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef RMS_SH_
++#define RMS_SH_
++
++#include <dspbridge/rmstypes.h>
++
++/* Node Types: */
++#define RMS_TASK 1 /* Task node */
++#define RMS_DAIS 2 /* xDAIS socket node */
++#define RMS_MSG 3 /* Message node */
++
++/* Memory Types: */
++#define RMS_CODE 0 /* Program space */
++#define RMS_DATA 1 /* Data space */
++#define RMS_IO 2 /* I/O space */
++
++/* RM Server Command and Response Buffer Sizes: */
++#define RMS_COMMANDBUFSIZE 256 /* Size of command buffer */
++#define RMS_RESPONSEBUFSIZE 16 /* Size of response buffer */
++
++/* Pre-Defined Command/Response Codes: */
++#define RMS_EXIT 0x80000000 /* GPP->Node: shutdown */
++#define RMS_EXITACK 0x40000000 /* Node->GPP: ack shutdown */
++#define RMS_BUFDESC 0x20000000 /* Arg1 SM buf, Arg2 SM size */
++#define RMS_KILLTASK 0x10000000 /* GPP->Node: Kill Task */
++#define RMS_USER 0x0 /* Start of user-defined msg codes */
++#define RMS_MAXUSERCODES 0xfff /* Maximum user defined C/R Codes */
++
++/* RM Server RPC Command Structure: */
++struct rms_command {
++ rms_word fxn; /* Server function address */
++ rms_word arg1; /* First argument */
++ rms_word arg2; /* Second argument */
++ rms_word data; /* Function-specific data array */
++};
++
++/*
++ * The rms_strm_def structure defines the parameters for both input and output
++ * streams, and is passed to a node's create function.
++ */
++struct rms_strm_def {
++ rms_word bufsize; /* Buffer size (in DSP words) */
++ rms_word nbufs; /* Max number of bufs in stream */
++ rms_word segid; /* Segment to allocate buffers */
++ rms_word align; /* Alignment for allocated buffers */
++ rms_word timeout; /* Timeout (msec) for blocking calls */
++ char name[1]; /* Device Name (terminated by '\0') */
++};
++
++/* Message node create args structure: */
++struct rms_msg_args {
++ rms_word max_msgs; /* Max # simultaneous msgs to node */
++ rms_word segid; /* Mem segment for NODE_allocMsgBuf */
++ rms_word notify_type; /* Type of message notification */
++ rms_word arg_length; /* Length (in DSP chars) of arg data */
++ rms_word arg_data; /* Arg data for node */
++};
++
++/* Partial task create args structure */
++struct rms_more_task_args {
++ rms_word priority; /* Task's runtime priority level */
++ rms_word stack_size; /* Task's stack size */
++ rms_word sysstack_size; /* Task's system stack size (55x) */
++ rms_word stack_seg; /* Memory segment for task's stack */
++ rms_word heap_addr; /* base address of the node memory heap in
++ * external memory (DSP virtual address) */
++ rms_word heap_size; /* size in MAUs of the node memory heap in
++ * external memory */
++ rms_word misc; /* Misc field. Not used for 'normal'
++ * task nodes; for xDAIS socket nodes
++ * specifies the IALG_Fxn pointer.
++ */
++ /* # input STRM definition structures */
++ rms_word num_input_streams;
++};
++
++#endif /* RMS_SH_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/rmstypes.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/rmstypes.h 2010-08-18 11:24:23.202053057 +0300
+@@ -0,0 +1,24 @@
++/*
++ * rmstypes.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * DSP/BIOS Bridge Resource Manager Server shared data type definitions.
++ *
++ * Copyright (C) 2008 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef RMSTYPES_
++#define RMSTYPES_
++#include <linux/types.h>
++typedef u32 rms_word;
++
++#endif /* RMSTYPES_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/services.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/services.h 2010-08-18 11:24:23.202053057 +0300
+@@ -0,0 +1,50 @@
++/*
++ * services.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Provide loading and unloading of SERVICES modules.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef SERVICES_
++#define SERVICES_
++
++#include <dspbridge/host_os.h>
++/*
++ * ======== services_exit ========
++ * Purpose:
++ * Discontinue usage of module; free resources when reference count
++ * reaches 0.
++ * Parameters:
++ * Returns:
++ * Requires:
++ * SERVICES initialized.
++ * Ensures:
++ * Resources used by module are freed when cRef reaches zero.
++ */
++extern void services_exit(void);
++
++/*
++ * ======== services_init ========
++ * Purpose:
++ * Initializes SERVICES modules.
++ * Parameters:
++ * Returns:
++ * TRUE if all modules initialized; otherwise FALSE.
++ * Requires:
++ * Ensures:
++ * SERVICES modules initialized.
++ */
++extern bool services_init(void);
++
++#endif /* SERVICES_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/strm.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/strm.h 2010-08-18 11:24:23.202053057 +0300
+@@ -0,0 +1,404 @@
++/*
++ * strm.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * DSPBridge Stream Manager.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef STRM_
++#define STRM_
++
++#include <dspbridge/dev.h>
++
++#include <dspbridge/strmdefs.h>
++#include <dspbridge/proc.h>
++
++/*
++ * ======== strm_allocate_buffer ========
++ * Purpose:
++ * Allocate data buffer(s) for use with a stream.
++ * Parameter:
++ * strmres: Stream resource info handle returned from strm_open().
++ * usize: Size (GPP bytes) of the buffer(s).
++ * num_bufs: Number of buffers to allocate.
++ * ap_buffer: Array to hold buffer addresses.
++ * Returns:
++ * 0: Success.
++ * -EFAULT: Invalid stream_obj.
++ * -ENOMEM: Insufficient memory.
++ * -EPERM: Failure occurred, unable to allocate buffers.
++ * -EINVAL: usize must be > 0 bytes.
++ * Requires:
++ * strm_init(void) called.
++ * ap_buffer != NULL.
++ * Ensures:
++ */
++extern int strm_allocate_buffer(struct strm_res_object *strmres,
++ u32 usize,
++ u8 **ap_buffer,
++ u32 num_bufs,
++ struct process_context *pr_ctxt);
++
++/*
++ * ======== strm_close ========
++ * Purpose:
++ * Close a stream opened with strm_open().
++ * Parameter:
++ * strmres: Stream resource info handle returned from strm_open().
++ * Returns:
++ * 0: Success.
++ * -EFAULT: Invalid stream_obj.
++ * -EPIPE: Some data buffers issued to the stream have not
++ * been reclaimed.
++ * -EPERM: Failure to close stream.
++ * Requires:
++ * strm_init(void) called.
++ * Ensures:
++ */
++extern int strm_close(struct strm_res_object *strmres,
++ struct process_context *pr_ctxt);
++
++/*
++ * ======== strm_create ========
++ * Purpose:
++ * Create a STRM manager object. This object holds information about the
++ * device needed to open streams.
++ * Parameters:
++ * strm_man: Location to store handle to STRM manager object on
++ * output.
++ * dev_obj: Device for this processor.
++ * Returns:
++ * 0: Success;
++ * -ENOMEM: Insufficient memory for requested resources.
++ * -EPERM: General failure.
++ * Requires:
++ * strm_init(void) called.
++ * strm_man != NULL.
++ * dev_obj != NULL.
++ * Ensures:
++ * 0: Valid *strm_man.
++ * error: *strm_man == NULL.
++ */
++extern int strm_create(struct strm_mgr **strm_man,
++ struct dev_object *dev_obj);
++
++/*
++ * ======== strm_delete ========
++ * Purpose:
++ * Delete the STRM Object.
++ * Parameters:
++ * strm_mgr_obj: Handle to STRM manager object from strm_create.
++ * Returns:
++ * Requires:
++ * strm_init(void) called.
++ * Valid strm_mgr_obj.
++ * Ensures:
++ * strm_mgr_obj is not valid.
++ */
++extern void strm_delete(struct strm_mgr *strm_mgr_obj);
++
++/*
++ * ======== strm_exit ========
++ * Purpose:
++ * Discontinue usage of STRM module.
++ * Parameters:
++ * Returns:
++ * Requires:
++ * strm_init(void) successfully called before.
++ * Ensures:
++ */
++extern void strm_exit(void);
++
++/*
++ * ======== strm_free_buffer ========
++ * Purpose:
++ * Free buffer(s) allocated with strm_allocate_buffer.
++ * Parameter:
++ * strmres: Stream resource info handle returned from strm_open().
++ * ap_buffer: Array containing buffer addresses.
++ * num_bufs: Number of buffers to be freed.
++ * Returns:
++ * 0: Success.
++ * -EFAULT: Invalid stream handle.
++ * -EPERM: Failure occurred, unable to free buffers.
++ * Requires:
++ * strm_init(void) called.
++ * ap_buffer != NULL.
++ * Ensures:
++ */
++extern int strm_free_buffer(struct strm_res_object *strmres,
++ u8 **ap_buffer, u32 num_bufs,
++ struct process_context *pr_ctxt);
++
++/*
++ * ======== strm_get_event_handle ========
++ * Purpose:
++ * Get stream's user event handle. This function is used when closing
++ * a stream, so the event can be closed.
++ * Parameter:
++ * stream_obj: Stream handle returned from strm_open().
++ * ph_event: Location to store event handle on output.
++ * Returns:
++ * 0: Success.
++ * -EFAULT: Invalid stream_obj.
++ * Requires:
++ * strm_init(void) called.
++ * ph_event != NULL.
++ * Ensures:
++ */
++extern int strm_get_event_handle(struct strm_object *stream_obj,
++ void **ph_event);
++
++/*
++ * ======== strm_get_info ========
++ * Purpose:
++ * Get information about a stream. User's dsp_streaminfo is contained
++ * in stream_info struct. stream_info also contains Bridge private info.
++ * Parameters:
++ * stream_obj: Stream handle returned from strm_open().
++ * stream_info: Location to store stream info on output.
++ * uSteamInfoSize: Size of user's dsp_streaminfo structure.
++ * Returns:
++ * 0: Success.
++ * -EFAULT: Invalid stream_obj.
++ * -EINVAL: stream_info_size < sizeof(dsp_streaminfo).
++ * -EPERM: Unable to get stream info.
++ * Requires:
++ * strm_init(void) called.
++ * stream_info != NULL.
++ * Ensures:
++ */
++extern int strm_get_info(struct strm_object *stream_obj,
++ struct stream_info *stream_info,
++ u32 stream_info_size);
++
++/*
++ * ======== strm_idle ========
++ * Purpose:
++ * Idle a stream and optionally flush output data buffers.
++ * If this is an output stream and flush_data is TRUE, all data currently
++ * enqueued will be discarded.
++ * If this is an output stream and flush_data is FALSE, this function
++ * will block until all currently buffered data is output, or the timeout
++ * specified has been reached.
++ * After a successful call to strm_idle(), all buffers can immediately
++ * be reclaimed.
++ * Parameters:
++ * stream_obj: Stream handle returned from strm_open().
++ * flush_data: If TRUE, discard output buffers.
++ * Returns:
++ * 0: Success.
++ * -EFAULT: Invalid stream_obj.
++ * -ETIME: A timeout occurred before the stream could be idled.
++ * -EPERM: Unable to idle stream.
++ * Requires:
++ * strm_init(void) called.
++ * Ensures:
++ */
++extern int strm_idle(struct strm_object *stream_obj, bool flush_data);
++
++/*
++ * ======== strm_init ========
++ * Purpose:
++ * Initialize the STRM module.
++ * Parameters:
++ * Returns:
++ * TRUE if initialization succeeded, FALSE otherwise.
++ * Requires:
++ * Ensures:
++ */
++extern bool strm_init(void);
++
++/*
++ * ======== strm_issue ========
++ * Purpose:
++ * Send a buffer of data to a stream.
++ * Parameters:
++ * stream_obj: Stream handle returned from strm_open().
++ * pbuf: Pointer to buffer of data to be sent to the stream.
++ * ul_bytes: Number of bytes of data in the buffer.
++ * ul_buf_size: Actual buffer size in bytes.
++ * dw_arg: A user argument that travels with the buffer.
++ * Returns:
++ * 0: Success.
++ * -EFAULT: Invalid stream_obj.
++ * -ENOSR: The stream is full.
++ * -EPERM: Failure occurred, unable to issue buffer.
++ * Requires:
++ * strm_init(void) called.
++ * pbuf != NULL.
++ * Ensures:
++ */
++extern int strm_issue(struct strm_object *stream_obj, u8 * pbuf,
++ u32 ul_bytes, u32 ul_buf_size, u32 dw_arg);
++
++/*
++ * ======== strm_open ========
++ * Purpose:
++ * Open a stream for sending/receiving data buffers to/from a task of
++ * DAIS socket node on the DSP.
++ * Parameters:
++ * hnode: Node handle returned from node_allocate().
++ * dir: DSP_TONODE or DSP_FROMNODE.
++ * index: Stream index.
++ * pattr: Pointer to structure containing attributes to be
++ * applied to stream. Cannot be NULL.
++ * strmres: Location to store stream resuorce info handle on output.
++ * Returns:
++ * 0: Success.
++ * -EFAULT: Invalid hnode.
++ * -EPERM: Invalid direction.
++ * hnode is not a task or DAIS socket node.
++ * Unable to open stream.
++ * -EINVAL: Invalid index.
++ * Requires:
++ * strm_init(void) called.
++ * strmres != NULL.
++ * pattr != NULL.
++ * Ensures:
++ * 0: *strmres is valid.
++ * error: *strmres == NULL.
++ */
++extern int strm_open(struct node_object *hnode, u32 dir,
++ u32 index, struct strm_attr *pattr,
++ struct strm_res_object **strmres,
++ struct process_context *pr_ctxt);
++
++/*
++ * ======== strm_prepare_buffer ========
++ * Purpose:
++ * Prepare a data buffer not allocated by DSPStream_AllocateBuffers()
++ * for use with a stream.
++ * Parameter:
++ * stream_obj: Stream handle returned from strm_open().
++ * usize: Size (GPP bytes) of the buffer.
++ * pbuffer: Buffer address.
++ * Returns:
++ * 0: Success.
++ * -EFAULT: Invalid stream_obj.
++ * -EPERM: Failure occurred, unable to prepare buffer.
++ * Requires:
++ * strm_init(void) called.
++ * pbuffer != NULL.
++ * Ensures:
++ */
++extern int strm_prepare_buffer(struct strm_object *stream_obj,
++ u32 usize, u8 *pbuffer);
++
++/*
++ * ======== strm_reclaim ========
++ * Purpose:
++ * Request a buffer back from a stream.
++ * Parameters:
++ * stream_obj: Stream handle returned from strm_open().
++ * buf_ptr: Location to store pointer to reclaimed buffer.
++ * nbytes: Location where number of bytes of data in the
++ * buffer will be written.
++ * buff_size: Location where actual buffer size will be written.
++ * pdw_arg: Location where user argument that travels with
++ * the buffer will be written.
++ * Returns:
++ * 0: Success.
++ * -EFAULT: Invalid stream_obj.
++ * -ETIME: A timeout occurred before a buffer could be
++ * retrieved.
++ * -EPERM: Failure occurred, unable to reclaim buffer.
++ * Requires:
++ * strm_init(void) called.
++ * buf_ptr != NULL.
++ * nbytes != NULL.
++ * pdw_arg != NULL.
++ * Ensures:
++ */
++extern int strm_reclaim(struct strm_object *stream_obj,
++ u8 **buf_ptr, u32 * nbytes,
++ u32 *buff_size, u32 *pdw_arg);
++
++/*
++ * ======== strm_register_notify ========
++ * Purpose:
++ * Register to be notified on specific events for this stream.
++ * Parameters:
++ * stream_obj: Stream handle returned by strm_open().
++ * event_mask: Mask of types of events to be notified about.
++ * notify_type: Type of notification to be sent.
++ * hnotification: Handle to be used for notification.
++ * Returns:
++ * 0: Success.
++ * -EFAULT: Invalid stream_obj.
++ * -ENOMEM: Insufficient memory on GPP.
++ * -EINVAL: event_mask is invalid.
++ * -ENOSYS: Notification type specified by notify_type is not
++ * supported.
++ * Requires:
++ * strm_init(void) called.
++ * hnotification != NULL.
++ * Ensures:
++ */
++extern int strm_register_notify(struct strm_object *stream_obj,
++ u32 event_mask, u32 notify_type,
++ struct dsp_notification
++ *hnotification);
++
++/*
++ * ======== strm_select ========
++ * Purpose:
++ * Select a ready stream.
++ * Parameters:
++ * strm_tab: Array of stream handles returned from strm_open().
++ * strms: Number of stream handles in array.
++ * pmask: Location to store mask of ready streams on output.
++ * utimeout: Timeout value (milliseconds).
++ * Returns:
++ * 0: Success.
++ * -EDOM: strms out of range.
++
++ * -EFAULT: Invalid stream handle in array.
++ * -ETIME: A timeout occurred before a stream became ready.
++ * -EPERM: Failure occurred, unable to select a stream.
++ * Requires:
++ * strm_init(void) called.
++ * strm_tab != NULL.
++ * strms > 0.
++ * pmask != NULL.
++ * Ensures:
++ * 0: *pmask != 0 || utimeout == 0.
++ * Error: *pmask == 0.
++ */
++extern int strm_select(struct strm_object **strm_tab,
++ u32 strms, u32 *pmask, u32 utimeout);
++
++/*
++ * ======== strm_unprepare_buffer ========
++ * Purpose:
++ * Unprepare a data buffer that was previously prepared for a stream
++ * with DSPStream_PrepareBuffer(), and that will no longer be used with
++ * the stream.
++ * Parameter:
++ * stream_obj: Stream handle returned from strm_open().
++ * usize: Size (GPP bytes) of the buffer.
++ * pbuffer: Buffer address.
++ * Returns:
++ * 0: Success.
++ * -EFAULT: Invalid stream_obj.
++ * -EPERM: Failure occurred, unable to unprepare buffer.
++ * Requires:
++ * strm_init(void) called.
++ * pbuffer != NULL.
++ * Ensures:
++ */
++extern int strm_unprepare_buffer(struct strm_object *stream_obj,
++ u32 usize, u8 *pbuffer);
++
++#endif /* STRM_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/strmdefs.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/strmdefs.h 2010-08-18 11:24:23.202053057 +0300
+@@ -0,0 +1,46 @@
++/*
++ * strmdefs.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Global STRM constants and types.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef STRMDEFS_
++#define STRMDEFS_
++
++#define STRM_MAXEVTNAMELEN 32
++
++struct strm_mgr;
++
++struct strm_object;
++
++struct strm_attr {
++ void *user_event;
++ char *pstr_event_name;
++ void *virt_base; /* Process virtual base address of
++ * mapped SM */
++ u32 ul_virt_size; /* Size of virtual space in bytes */
++ struct dsp_streamattrin *stream_attr_in;
++};
++
++struct stream_info {
++ enum dsp_strmmode strm_mode; /* transport mode of
++ * stream(DMA, ZEROCOPY..) */
++ u32 segment_id; /* Segment strm allocs from. 0 is local mem */
++ void *virt_base; /* " " Stream'process virt base */
++ struct dsp_streaminfo *user_strm; /* User's stream information
++ * returned */
++};
++
++#endif /* STRMDEFS_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/sync.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/sync.h 2010-08-18 11:24:23.206068651 +0300
+@@ -0,0 +1,109 @@
++/*
++ * sync.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Provide synchronization services.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef _SYNC_H
++#define _SYNC_H
++
++#include <dspbridge/dbdefs.h>
++
++
++/* Special timeout value indicating an infinite wait: */
++#define SYNC_INFINITE 0xffffffff
++
++/**
++ * struct sync_object - the basic sync_object structure
++ * @comp: use to signal events
++ * @multi_comp: use to signal multiple events.
++ *
++ */
++struct sync_object{
++ struct completion comp;
++ struct completion *multi_comp;
++};
++
++/**
++ * sync_init_event() - set initial state for a sync_event element
++ * @event: event to be initialized.
++ *
++ * Set the initial state for a sync_event element.
++ */
++
++static inline void sync_init_event(struct sync_object *event)
++{
++ init_completion(&event->comp);
++ event->multi_comp = NULL;
++}
++
++/**
++ * sync_reset_event() - reset a sync_event element
++ * @event: event to be reset.
++ *
++ * This function reset to the initial state to @event.
++ */
++
++static inline void sync_reset_event(struct sync_object *event)
++{
++ INIT_COMPLETION(event->comp);
++ event->multi_comp = NULL;
++}
++
++/**
++ * sync_set_event() - set or signal and specified event
++ * @event: Event to be set..
++ *
++ * set the @event, if there is an thread waiting for the event
++ * it will be waken up, this function only wakes one thread.
++ */
++
++void sync_set_event(struct sync_object *event);
++
++/**
++ * sync_wait_on_event() - waits for a event to be set.
++ * @event: events to wait for it.
++ * @timeout timeout on waiting for the evetn.
++ *
++ * This functios will wait until @event is set or until timeout. In case of
++ * success the function will return 0 and
++ * in case of timeout the function will return -ETIME
++ */
++
++static inline int sync_wait_on_event(struct sync_object *event,
++ unsigned timeout)
++{
++ return wait_for_completion_timeout(&event->comp,
++ msecs_to_jiffies(timeout)) ? 0 : -ETIME;
++}
++
++/**
++ * sync_wait_on_multiple_events() - waits for multiple events to be set.
++ * @events: Array of events to wait for them.
++ * @count: number of elements of the array.
++ * @timeout timeout on waiting for the evetns.
++ * @pu_index index of the event set.
++ *
++ * This functios will wait until any of the array element is set or until
++ * timeout. In case of success the function will return 0 and
++ * @pu_index will store the index of the array element set and in case
++ * of timeout the function will return -ETIME.
++ */
++
++int sync_wait_on_multiple_events(struct sync_object **events,
++ unsigned count, unsigned timeout,
++ unsigned *index);
++
++#endif /* _SYNC_H */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/utildefs.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/utildefs.h 2010-08-18 11:24:23.206068651 +0300
+@@ -0,0 +1,39 @@
++/*
++ * utildefs.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Global UTIL constants and types, shared between DSP API and DSPSYS.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef UTILDEFS_
++#define UTILDEFS_
++
++/* constants taken from configmg.h */
++#define UTIL_MAXMEMREGS 9
++#define UTIL_MAXIOPORTS 20
++#define UTIL_MAXIRQS 7
++#define UTIL_MAXDMACHNLS 7
++
++/* misc. constants */
++#define UTIL_MAXARGVS 10
++
++/* Platform specific important info */
++struct util_sysinfo {
++ /* Granularity of page protection; usually 1k or 4k */
++ u32 dw_page_size;
++ u32 dw_allocation_granularity; /* VM granularity, usually 64K */
++ u32 dw_number_of_processors; /* Used as sanity check */
++};
++
++#endif /* UTILDEFS_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/uuidutil.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/uuidutil.h 2010-08-18 11:24:23.206068651 +0300
+@@ -0,0 +1,62 @@
++/*
++ * uuidutil.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * This file contains the specification of UUID helper functions.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef UUIDUTIL_
++#define UUIDUTIL_
++
++#define MAXUUIDLEN 37
++
++/*
++ * ======== uuid_uuid_to_string ========
++ * Purpose:
++ * Converts a dsp_uuid to an ANSI string.
++ * Parameters:
++ * uuid_obj: Pointer to a dsp_uuid object.
++ * sz_uuid: Pointer to a buffer to receive a NULL-terminated UUID
++ * string.
++ * size: Maximum size of the sz_uuid string.
++ * Returns:
++ * Requires:
++ * uuid_obj & sz_uuid are non-NULL values.
++ * Ensures:
++ * Lenghth of sz_uuid is less than MAXUUIDLEN.
++ * Details:
++ * UUID string limit currently set at MAXUUIDLEN.
++ */
++void uuid_uuid_to_string(struct dsp_uuid *uuid_obj, char *sz_uuid,
++ s32 size);
++
++/*
++ * ======== uuid_uuid_from_string ========
++ * Purpose:
++ * Converts an ANSI string to a dsp_uuid.
++ * Parameters:
++ * sz_uuid: Pointer to a string that represents a dsp_uuid object.
++ * uuid_obj: Pointer to a dsp_uuid object.
++ * Returns:
++ * Requires:
++ * uuid_obj & sz_uuid are non-NULL values.
++ * Ensures:
++ * Details:
++ * We assume the string representation of a UUID has the following format:
++ * "12345678_1234_1234_1234_123456789abc".
++ */
++extern void uuid_uuid_from_string(char *sz_uuid,
++ struct dsp_uuid *uuid_obj);
++
++#endif /* UUIDUTIL_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/wdt.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/include/dspbridge/wdt.h 2010-08-18 11:24:23.206068651 +0300
+@@ -0,0 +1,79 @@
++/*
++ * wdt.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * IO dispatcher for a shared memory channel driver.
++ *
++ * Copyright (C) 2010 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++#ifndef __DSP_WDT3_H_
++#define __DSP_WDT3_H_
++
++/* WDT defines */
++#define OMAP3_WDT3_ISR_OFFSET 0x0018
++
++
++/**
++ * struct dsp_wdt_setting - the basic dsp_wdt_setting structure
++ * @reg_base: pointer to the base of the wdt registers
++ * @sm_wdt: pointer to flags in shared memory
++ * @wdt3_tasklet tasklet to manage wdt event
++ * @fclk handle to wdt3 functional clock
++ * @iclk handle to wdt3 interface clock
++ *
++ * This struct is used in the function to manage wdt3.
++ */
++
++struct dsp_wdt_setting {
++ void __iomem *reg_base;
++ struct shm *sm_wdt;
++ struct tasklet_struct wdt3_tasklet;
++ struct clk *fclk;
++ struct clk *iclk;
++};
++
++/**
++ * dsp_wdt_init() - initialize wdt3 module.
++ *
++ * This function initilize to wdt3 module, so that
++ * other wdt3 function can be used.
++ */
++int dsp_wdt_init(void);
++
++/**
++ * dsp_wdt_exit() - initialize wdt3 module.
++ *
++ * This function frees all resources allocated for wdt3 module.
++ */
++void dsp_wdt_exit(void);
++
++/**
++ * dsp_wdt_enable() - enable/disable wdt3
++ * @enable: bool value to enable/disable wdt3
++ *
++ * This function enables or disables wdt3 base on @enable value.
++ *
++ */
++void dsp_wdt_enable(bool enable);
++
++/**
++ * dsp_wdt_sm_set() - store pointer to the share memory
++ * @data: pointer to dspbridge share memory
++ *
++ * This function is used to pass a valid pointer to share memory,
++ * so that the flags can be set in order DSP side can read them.
++ *
++ */
++void dsp_wdt_sm_set(void *data);
++
++#endif
++
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/pmgr/chnl.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/pmgr/chnl.c 2010-08-18 11:24:23.206068651 +0300
+@@ -0,0 +1,163 @@
++/*
++ * chnl.c
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * DSP API channel interface: multiplexes data streams through the single
++ * physical link managed by a Bridge Bridge driver.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#include <linux/types.h>
++/* ----------------------------------- Host OS */
++#include <dspbridge/host_os.h>
++
++/* ----------------------------------- DSP/BIOS Bridge */
++#include <dspbridge/dbdefs.h>
++
++/* ----------------------------------- Trace & Debug */
++#include <dspbridge/dbc.h>
++
++/* ----------------------------------- OS Adaptation Layer */
++#include <dspbridge/cfg.h>
++#include <dspbridge/sync.h>
++
++/* ----------------------------------- Platform Manager */
++#include <dspbridge/proc.h>
++#include <dspbridge/dev.h>
++
++/* ----------------------------------- Others */
++#include <dspbridge/chnlpriv.h>
++#include <chnlobj.h>
++
++/* ----------------------------------- This */
++#include <dspbridge/chnl.h>
++
++/* ----------------------------------- Globals */
++static u32 refs;
++
++/*
++ * ======== chnl_create ========
++ * Purpose:
++ * Create a channel manager object, responsible for opening new channels
++ * and closing old ones for a given 'Bridge board.
++ */
++int chnl_create(struct chnl_mgr **channel_mgr,
++ struct dev_object *hdev_obj,
++ const struct chnl_mgrattrs *mgr_attrts)
++{
++ int status;
++ struct chnl_mgr *hchnl_mgr;
++ struct chnl_mgr_ *chnl_mgr_obj = NULL;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(channel_mgr != NULL);
++ DBC_REQUIRE(mgr_attrts != NULL);
++
++ *channel_mgr = NULL;
++
++ /* Validate args: */
++ if ((0 < mgr_attrts->max_channels) &&
++ (mgr_attrts->max_channels <= CHNL_MAXCHANNELS))
++ status = 0;
++ else if (mgr_attrts->max_channels == 0)
++ status = -EINVAL;
++ else
++ status = -ECHRNG;
++
++ if (mgr_attrts->word_size == 0)
++ status = -EINVAL;
++
++ if (!status) {
++ status = dev_get_chnl_mgr(hdev_obj, &hchnl_mgr);
++ if (!status && hchnl_mgr != NULL)
++ status = -EEXIST;
++
++ }
++
++ if (!status) {
++ struct bridge_drv_interface *intf_fxns;
++ dev_get_intf_fxns(hdev_obj, &intf_fxns);
++ /* Let Bridge channel module finish the create: */
++ status = (*intf_fxns->pfn_chnl_create) (&hchnl_mgr, hdev_obj,
++ mgr_attrts);
++ if (!status) {
++ /* Fill in DSP API channel module's fields of the
++ * chnl_mgr structure */
++ chnl_mgr_obj = (struct chnl_mgr_ *)hchnl_mgr;
++ chnl_mgr_obj->intf_fxns = intf_fxns;
++ /* Finally, return the new channel manager handle: */
++ *channel_mgr = hchnl_mgr;
++ }
++ }
++
++ DBC_ENSURE(status || chnl_mgr_obj);
++
++ return status;
++}
++
++/*
++ * ======== chnl_destroy ========
++ * Purpose:
++ * Close all open channels, and destroy the channel manager.
++ */
++int chnl_destroy(struct chnl_mgr *hchnl_mgr)
++{
++ struct chnl_mgr_ *chnl_mgr_obj = (struct chnl_mgr_ *)hchnl_mgr;
++ struct bridge_drv_interface *intf_fxns;
++ int status;
++
++ DBC_REQUIRE(refs > 0);
++
++ if (chnl_mgr_obj) {
++ intf_fxns = chnl_mgr_obj->intf_fxns;
++ /* Let Bridge channel module destroy the chnl_mgr: */
++ status = (*intf_fxns->pfn_chnl_destroy) (hchnl_mgr);
++ } else {
++ status = -EFAULT;
++ }
++
++ return status;
++}
++
++/*
++ * ======== chnl_exit ========
++ * Purpose:
++ * Discontinue usage of the CHNL module.
++ */
++void chnl_exit(void)
++{
++ DBC_REQUIRE(refs > 0);
++
++ refs--;
++
++ DBC_ENSURE(refs >= 0);
++}
++
++/*
++ * ======== chnl_init ========
++ * Purpose:
++ * Initialize the CHNL module's private state.
++ */
++bool chnl_init(void)
++{
++ bool ret = true;
++
++ DBC_REQUIRE(refs >= 0);
++
++ if (ret)
++ refs++;
++
++ DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
++
++ return ret;
++}
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/pmgr/chnlobj.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/pmgr/chnlobj.h 2010-08-18 11:24:23.206068651 +0300
+@@ -0,0 +1,46 @@
++/*
++ * chnlobj.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Structure subcomponents of channel class library channel objects which
++ * are exposed to DSP API from Bridge driver.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef CHNLOBJ_
++#define CHNLOBJ_
++
++#include <dspbridge/chnldefs.h>
++#include <dspbridge/dspdefs.h>
++
++/*
++ * This struct is the first field in a chnl_mgr struct. Other. implementation
++ * specific fields follow this structure in memory.
++ */
++struct chnl_mgr_ {
++ /* These must be the first fields in a chnl_mgr struct: */
++
++ /* Function interface to Bridge driver. */
++ struct bridge_drv_interface *intf_fxns;
++};
++
++/*
++ * This struct is the first field in a chnl_object struct. Other,
++ * implementation specific fields follow this structure in memory.
++ */
++struct chnl_object_ {
++ /* These must be the first fields in a chnl_object struct: */
++ struct chnl_mgr_ *chnl_mgr_obj; /* Pointer back to channel manager. */
++};
++
++#endif /* CHNLOBJ_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/pmgr/cmm.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/pmgr/cmm.c 2010-08-18 11:24:23.206068651 +0300
+@@ -0,0 +1,1154 @@
++/*
++ * cmm.c
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * The Communication(Shared) Memory Management(CMM) module provides
++ * shared memory management services for DSP/BIOS Bridge data streaming
++ * and messaging.
++ *
++ * Multiple shared memory segments can be registered with CMM.
++ * Each registered SM segment is represented by a SM "allocator" that
++ * describes a block of physically contiguous shared memory used for
++ * future allocations by CMM.
++ *
++ * Memory is coelesced back to the appropriate heap when a buffer is
++ * freed.
++ *
++ * Notes:
++ * Va: Virtual address.
++ * Pa: Physical or kernel system address.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++#include <linux/types.h>
++
++/* ----------------------------------- DSP/BIOS Bridge */
++#include <dspbridge/dbdefs.h>
++
++/* ----------------------------------- Trace & Debug */
++#include <dspbridge/dbc.h>
++
++/* ----------------------------------- OS Adaptation Layer */
++#include <dspbridge/cfg.h>
++#include <dspbridge/list.h>
++#include <dspbridge/sync.h>
++#include <dspbridge/utildefs.h>
++
++/* ----------------------------------- Platform Manager */
++#include <dspbridge/dev.h>
++#include <dspbridge/proc.h>
++
++/* ----------------------------------- This */
++#include <dspbridge/cmm.h>
++
++/* ----------------------------------- Defines, Data Structures, Typedefs */
++#define NEXT_PA(pnode) (pnode->dw_pa + pnode->ul_size)
++
++/* Other bus/platform translations */
++#define DSPPA2GPPPA(base, x, y) ((x)+(y))
++#define GPPPA2DSPPA(base, x, y) ((x)-(y))
++
++/*
++ * Allocators define a block of contiguous memory used for future allocations.
++ *
++ * sma - shared memory allocator.
++ * vma - virtual memory allocator.(not used).
++ */
++struct cmm_allocator { /* sma */
++ unsigned int shm_base; /* Start of physical SM block */
++ u32 ul_sm_size; /* Size of SM block in bytes */
++ unsigned int dw_vm_base; /* Start of VM block. (Dev driver
++ * context for 'sma') */
++ u32 dw_dsp_phys_addr_offset; /* DSP PA to GPP PA offset for this
++ * SM space */
++ s8 c_factor; /* DSPPa to GPPPa Conversion Factor */
++ unsigned int dw_dsp_base; /* DSP virt base byte address */
++ u32 ul_dsp_size; /* DSP seg size in bytes */
++ struct cmm_object *hcmm_mgr; /* back ref to parent mgr */
++ /* node list of available memory */
++ struct lst_list *free_list_head;
++ /* node list of memory in use */
++ struct lst_list *in_use_list_head;
++};
++
++struct cmm_xlator { /* Pa<->Va translator object */
++ /* CMM object this translator associated */
++ struct cmm_object *hcmm_mgr;
++ /*
++ * Client process virtual base address that corresponds to phys SM
++ * base address for translator's ul_seg_id.
++ * Only 1 segment ID currently supported.
++ */
++ unsigned int dw_virt_base; /* virtual base address */
++ u32 ul_virt_size; /* size of virt space in bytes */
++ u32 ul_seg_id; /* Segment Id */
++};
++
++/* CMM Mgr */
++struct cmm_object {
++ /*
++ * Cmm Lock is used to serialize access mem manager for multi-threads.
++ */
++ struct mutex cmm_lock; /* Lock to access cmm mgr */
++ struct lst_list *node_free_list_head; /* Free list of memory nodes */
++ u32 ul_min_block_size; /* Min SM block; default 16 bytes */
++ u32 dw_page_size; /* Memory Page size (1k/4k) */
++ /* GPP SM segment ptrs */
++ struct cmm_allocator *pa_gppsm_seg_tab[CMM_MAXGPPSEGS];
++};
++
++/* Default CMM Mgr attributes */
++static struct cmm_mgrattrs cmm_dfltmgrattrs = {
++ /* ul_min_block_size, min block size(bytes) allocated by cmm mgr */
++ 16
++};
++
++/* Default allocation attributes */
++static struct cmm_attrs cmm_dfltalctattrs = {
++ 1 /* ul_seg_id, default segment Id for allocator */
++};
++
++/* Address translator default attrs */
++static struct cmm_xlatorattrs cmm_dfltxlatorattrs = {
++ /* ul_seg_id, does not have to match cmm_dfltalctattrs ul_seg_id */
++ 1,
++ 0, /* dw_dsp_bufs */
++ 0, /* dw_dsp_buf_size */
++ NULL, /* vm_base */
++ 0, /* dw_vm_size */
++};
++
++/* SM node representing a block of memory. */
++struct cmm_mnode {
++ struct list_head link; /* must be 1st element */
++ u32 dw_pa; /* Phys addr */
++ u32 dw_va; /* Virtual address in device process context */
++ u32 ul_size; /* SM block size in bytes */
++ u32 client_proc; /* Process that allocated this mem block */
++};
++
++/* ----------------------------------- Globals */
++static u32 refs; /* module reference count */
++
++/* ----------------------------------- Function Prototypes */
++static void add_to_free_list(struct cmm_allocator *allocator,
++ struct cmm_mnode *pnode);
++static struct cmm_allocator *get_allocator(struct cmm_object *cmm_mgr_obj,
++ u32 ul_seg_id);
++static struct cmm_mnode *get_free_block(struct cmm_allocator *allocator,
++ u32 usize);
++static struct cmm_mnode *get_node(struct cmm_object *cmm_mgr_obj, u32 dw_pa,
++ u32 dw_va, u32 ul_size);
++/* get available slot for new allocator */
++static s32 get_slot(struct cmm_object *cmm_mgr_obj);
++static void un_register_gppsm_seg(struct cmm_allocator *psma);
++
++/*
++ * ======== cmm_calloc_buf ========
++ * Purpose:
++ * Allocate a SM buffer, zero contents, and return the physical address
++ * and optional driver context virtual address(pp_buf_va).
++ *
++ * The freelist is sorted in increasing size order. Get the first
++ * block that satifies the request and sort the remaining back on
++ * the freelist; if large enough. The kept block is placed on the
++ * inUseList.
++ */
++void *cmm_calloc_buf(struct cmm_object *hcmm_mgr, u32 usize,
++ struct cmm_attrs *pattrs, void **pp_buf_va)
++{
++ struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
++ void *buf_pa = NULL;
++ struct cmm_mnode *pnode = NULL;
++ struct cmm_mnode *new_node = NULL;
++ struct cmm_allocator *allocator = NULL;
++ u32 delta_size;
++ u8 *pbyte = NULL;
++ s32 cnt;
++
++ if (pattrs == NULL)
++ pattrs = &cmm_dfltalctattrs;
++
++ if (pp_buf_va != NULL)
++ *pp_buf_va = NULL;
++
++ if (cmm_mgr_obj && (usize != 0)) {
++ if (pattrs->ul_seg_id > 0) {
++ /* SegId > 0 is SM */
++ /* get the allocator object for this segment id */
++ allocator =
++ get_allocator(cmm_mgr_obj, pattrs->ul_seg_id);
++ /* keep block size a multiple of ul_min_block_size */
++ usize =
++ ((usize - 1) & ~(cmm_mgr_obj->ul_min_block_size -
++ 1))
++ + cmm_mgr_obj->ul_min_block_size;
++ mutex_lock(&cmm_mgr_obj->cmm_lock);
++ pnode = get_free_block(allocator, usize);
++ }
++ if (pnode) {
++ delta_size = (pnode->ul_size - usize);
++ if (delta_size >= cmm_mgr_obj->ul_min_block_size) {
++ /* create a new block with the leftovers and
++ * add to freelist */
++ new_node =
++ get_node(cmm_mgr_obj, pnode->dw_pa + usize,
++ pnode->dw_va + usize,
++ (u32) delta_size);
++ /* leftovers go free */
++ add_to_free_list(allocator, new_node);
++ /* adjust our node's size */
++ pnode->ul_size = usize;
++ }
++ /* Tag node with client process requesting allocation
++ * We'll need to free up a process's alloc'd SM if the
++ * client process goes away.
++ */
++ /* Return TGID instead of process handle */
++ pnode->client_proc = current->tgid;
++
++ /* put our node on InUse list */
++ lst_put_tail(allocator->in_use_list_head,
++ (struct list_head *)pnode);
++ buf_pa = (void *)pnode->dw_pa; /* physical address */
++ /* clear mem */
++ pbyte = (u8 *) pnode->dw_va;
++ for (cnt = 0; cnt < (s32) usize; cnt++, pbyte++)
++ *pbyte = 0;
++
++ if (pp_buf_va != NULL) {
++ /* Virtual address */
++ *pp_buf_va = (void *)pnode->dw_va;
++ }
++ }
++ mutex_unlock(&cmm_mgr_obj->cmm_lock);
++ }
++ return buf_pa;
++}
++
++/*
++ * ======== cmm_create ========
++ * Purpose:
++ * Create a communication memory manager object.
++ */
++int cmm_create(struct cmm_object **ph_cmm_mgr,
++ struct dev_object *hdev_obj,
++ const struct cmm_mgrattrs *mgr_attrts)
++{
++ struct cmm_object *cmm_obj = NULL;
++ int status = 0;
++ struct util_sysinfo sys_info;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(ph_cmm_mgr != NULL);
++
++ *ph_cmm_mgr = NULL;
++ /* create, zero, and tag a cmm mgr object */
++ cmm_obj = kzalloc(sizeof(struct cmm_object), GFP_KERNEL);
++ if (cmm_obj != NULL) {
++ if (mgr_attrts == NULL)
++ mgr_attrts = &cmm_dfltmgrattrs; /* set defaults */
++
++ /* 4 bytes minimum */
++ DBC_ASSERT(mgr_attrts->ul_min_block_size >= 4);
++ /* save away smallest block allocation for this cmm mgr */
++ cmm_obj->ul_min_block_size = mgr_attrts->ul_min_block_size;
++ /* save away the systems memory page size */
++ sys_info.dw_page_size = PAGE_SIZE;
++ sys_info.dw_allocation_granularity = PAGE_SIZE;
++ sys_info.dw_number_of_processors = 1;
++
++ cmm_obj->dw_page_size = sys_info.dw_page_size;
++
++ /* Note: DSP SM seg table(aDSPSMSegTab[]) zero'd by
++ * MEM_ALLOC_OBJECT */
++
++ /* create node free list */
++ cmm_obj->node_free_list_head =
++ kzalloc(sizeof(struct lst_list),
++ GFP_KERNEL);
++ if (cmm_obj->node_free_list_head == NULL) {
++ status = -ENOMEM;
++ cmm_destroy(cmm_obj, true);
++ } else {
++ INIT_LIST_HEAD(&cmm_obj->
++ node_free_list_head->head);
++ mutex_init(&cmm_obj->cmm_lock);
++ *ph_cmm_mgr = cmm_obj;
++ }
++ } else {
++ status = -ENOMEM;
++ }
++ return status;
++}
++
++/*
++ * ======== cmm_destroy ========
++ * Purpose:
++ * Release the communication memory manager resources.
++ */
++int cmm_destroy(struct cmm_object *hcmm_mgr, bool force)
++{
++ struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
++ struct cmm_info temp_info;
++ int status = 0;
++ s32 slot_seg;
++ struct cmm_mnode *pnode;
++
++ DBC_REQUIRE(refs > 0);
++ if (!hcmm_mgr) {
++ status = -EFAULT;
++ return status;
++ }
++ mutex_lock(&cmm_mgr_obj->cmm_lock);
++ /* If not force then fail if outstanding allocations exist */
++ if (!force) {
++ /* Check for outstanding memory allocations */
++ status = cmm_get_info(hcmm_mgr, &temp_info);
++ if (!status) {
++ if (temp_info.ul_total_in_use_cnt > 0) {
++ /* outstanding allocations */
++ status = -EPERM;
++ }
++ }
++ }
++ if (!status) {
++ /* UnRegister SM allocator */
++ for (slot_seg = 0; slot_seg < CMM_MAXGPPSEGS; slot_seg++) {
++ if (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] != NULL) {
++ un_register_gppsm_seg
++ (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg]);
++ /* Set slot to NULL for future reuse */
++ cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] = NULL;
++ }
++ }
++ }
++ if (cmm_mgr_obj->node_free_list_head != NULL) {
++ /* Free the free nodes */
++ while (!LST_IS_EMPTY(cmm_mgr_obj->node_free_list_head)) {
++ pnode = (struct cmm_mnode *)
++ lst_get_head(cmm_mgr_obj->node_free_list_head);
++ kfree(pnode);
++ }
++ /* delete NodeFreeList list */
++ kfree(cmm_mgr_obj->node_free_list_head);
++ }
++ mutex_unlock(&cmm_mgr_obj->cmm_lock);
++ if (!status) {
++ /* delete CS & cmm mgr object */
++ mutex_destroy(&cmm_mgr_obj->cmm_lock);
++ kfree(cmm_mgr_obj);
++ }
++ return status;
++}
++
++/*
++ * ======== cmm_exit ========
++ * Purpose:
++ * Discontinue usage of module; free resources when reference count
++ * reaches 0.
++ */
++void cmm_exit(void)
++{
++ DBC_REQUIRE(refs > 0);
++
++ refs--;
++}
++
++/*
++ * ======== cmm_free_buf ========
++ * Purpose:
++ * Free the given buffer.
++ */
++int cmm_free_buf(struct cmm_object *hcmm_mgr, void *buf_pa,
++ u32 ul_seg_id)
++{
++ struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
++ int status = -EFAULT;
++ struct cmm_mnode *mnode_obj = NULL;
++ struct cmm_allocator *allocator = NULL;
++ struct cmm_attrs *pattrs;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(buf_pa != NULL);
++
++ if (ul_seg_id == 0) {
++ pattrs = &cmm_dfltalctattrs;
++ ul_seg_id = pattrs->ul_seg_id;
++ }
++ if (!hcmm_mgr || !(ul_seg_id > 0)) {
++ status = -EFAULT;
++ return status;
++ }
++ /* get the allocator for this segment id */
++ allocator = get_allocator(cmm_mgr_obj, ul_seg_id);
++ if (allocator != NULL) {
++ mutex_lock(&cmm_mgr_obj->cmm_lock);
++ mnode_obj =
++ (struct cmm_mnode *)lst_first(allocator->in_use_list_head);
++ while (mnode_obj) {
++ if ((u32) buf_pa == mnode_obj->dw_pa) {
++ /* Found it */
++ lst_remove_elem(allocator->in_use_list_head,
++ (struct list_head *)mnode_obj);
++ /* back to freelist */
++ add_to_free_list(allocator, mnode_obj);
++ status = 0; /* all right! */
++ break;
++ }
++ /* next node. */
++ mnode_obj = (struct cmm_mnode *)
++ lst_next(allocator->in_use_list_head,
++ (struct list_head *)mnode_obj);
++ }
++ mutex_unlock(&cmm_mgr_obj->cmm_lock);
++ }
++ return status;
++}
++
++/*
++ * ======== cmm_get_handle ========
++ * Purpose:
++ * Return the communication memory manager object for this device.
++ * This is typically called from the client process.
++ */
++int cmm_get_handle(void *hprocessor, struct cmm_object ** ph_cmm_mgr)
++{
++ int status = 0;
++ struct dev_object *hdev_obj;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(ph_cmm_mgr != NULL);
++ if (hprocessor != NULL)
++ status = proc_get_dev_object(hprocessor, &hdev_obj);
++ else
++ hdev_obj = dev_get_first(); /* default */
++
++ if (!status)
++ status = dev_get_cmm_mgr(hdev_obj, ph_cmm_mgr);
++
++ return status;
++}
++
++/*
++ * ======== cmm_get_info ========
++ * Purpose:
++ * Return the current memory utilization information.
++ */
++int cmm_get_info(struct cmm_object *hcmm_mgr,
++ struct cmm_info *cmm_info_obj)
++{
++ struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
++ u32 ul_seg;
++ int status = 0;
++ struct cmm_allocator *altr;
++ struct cmm_mnode *mnode_obj = NULL;
++
++ DBC_REQUIRE(cmm_info_obj != NULL);
++
++ if (!hcmm_mgr) {
++ status = -EFAULT;
++ return status;
++ }
++ mutex_lock(&cmm_mgr_obj->cmm_lock);
++ cmm_info_obj->ul_num_gppsm_segs = 0; /* # of SM segments */
++ /* Total # of outstanding alloc */
++ cmm_info_obj->ul_total_in_use_cnt = 0;
++ /* min block size */
++ cmm_info_obj->ul_min_block_size = cmm_mgr_obj->ul_min_block_size;
++ /* check SM memory segments */
++ for (ul_seg = 1; ul_seg <= CMM_MAXGPPSEGS; ul_seg++) {
++ /* get the allocator object for this segment id */
++ altr = get_allocator(cmm_mgr_obj, ul_seg);
++ if (altr != NULL) {
++ cmm_info_obj->ul_num_gppsm_segs++;
++ cmm_info_obj->seg_info[ul_seg - 1].dw_seg_base_pa =
++ altr->shm_base - altr->ul_dsp_size;
++ cmm_info_obj->seg_info[ul_seg - 1].ul_total_seg_size =
++ altr->ul_dsp_size + altr->ul_sm_size;
++ cmm_info_obj->seg_info[ul_seg - 1].dw_gpp_base_pa =
++ altr->shm_base;
++ cmm_info_obj->seg_info[ul_seg - 1].ul_gpp_size =
++ altr->ul_sm_size;
++ cmm_info_obj->seg_info[ul_seg - 1].dw_dsp_base_va =
++ altr->dw_dsp_base;
++ cmm_info_obj->seg_info[ul_seg - 1].ul_dsp_size =
++ altr->ul_dsp_size;
++ cmm_info_obj->seg_info[ul_seg - 1].dw_seg_base_va =
++ altr->dw_vm_base - altr->ul_dsp_size;
++ cmm_info_obj->seg_info[ul_seg - 1].ul_in_use_cnt = 0;
++ mnode_obj = (struct cmm_mnode *)
++ lst_first(altr->in_use_list_head);
++ /* Count inUse blocks */
++ while (mnode_obj) {
++ cmm_info_obj->ul_total_in_use_cnt++;
++ cmm_info_obj->seg_info[ul_seg -
++ 1].ul_in_use_cnt++;
++ /* next node. */
++ mnode_obj = (struct cmm_mnode *)
++ lst_next(altr->in_use_list_head,
++ (struct list_head *)mnode_obj);
++ }
++ }
++ } /* end for */
++ mutex_unlock(&cmm_mgr_obj->cmm_lock);
++ return status;
++}
++
++/*
++ * ======== cmm_init ========
++ * Purpose:
++ * Initializes private state of CMM module.
++ */
++bool cmm_init(void)
++{
++ bool ret = true;
++
++ DBC_REQUIRE(refs >= 0);
++ if (ret)
++ refs++;
++
++ DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
++
++ return ret;
++}
++
++/*
++ * ======== cmm_register_gppsm_seg ========
++ * Purpose:
++ * Register a block of SM with the CMM to be used for later GPP SM
++ * allocations.
++ */
++int cmm_register_gppsm_seg(struct cmm_object *hcmm_mgr,
++ u32 dw_gpp_base_pa, u32 ul_size,
++ u32 dsp_addr_offset, s8 c_factor,
++ u32 dw_dsp_base, u32 ul_dsp_size,
++ u32 *sgmt_id, u32 gpp_base_va)
++{
++ struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
++ struct cmm_allocator *psma = NULL;
++ int status = 0;
++ struct cmm_mnode *new_node;
++ s32 slot_seg;
++
++ DBC_REQUIRE(ul_size > 0);
++ DBC_REQUIRE(sgmt_id != NULL);
++ DBC_REQUIRE(dw_gpp_base_pa != 0);
++ DBC_REQUIRE(gpp_base_va != 0);
++ DBC_REQUIRE((c_factor <= CMM_ADDTODSPPA) &&
++ (c_factor >= CMM_SUBFROMDSPPA));
++ dev_dbg(bridge, "%s: dw_gpp_base_pa %x ul_size %x dsp_addr_offset %x "
++ "dw_dsp_base %x ul_dsp_size %x gpp_base_va %x\n", __func__,
++ dw_gpp_base_pa, ul_size, dsp_addr_offset, dw_dsp_base,
++ ul_dsp_size, gpp_base_va);
++ if (!hcmm_mgr) {
++ status = -EFAULT;
++ return status;
++ }
++ /* make sure we have room for another allocator */
++ mutex_lock(&cmm_mgr_obj->cmm_lock);
++ slot_seg = get_slot(cmm_mgr_obj);
++ if (slot_seg < 0) {
++ /* get a slot number */
++ status = -EPERM;
++ goto func_end;
++ }
++ /* Check if input ul_size is big enough to alloc at least one block */
++ if (ul_size < cmm_mgr_obj->ul_min_block_size) {
++ status = -EINVAL;
++ goto func_end;
++ }
++
++ /* create, zero, and tag an SM allocator object */
++ psma = kzalloc(sizeof(struct cmm_allocator), GFP_KERNEL);
++ if (psma != NULL) {
++ psma->hcmm_mgr = hcmm_mgr; /* ref to parent */
++ psma->shm_base = dw_gpp_base_pa; /* SM Base phys */
++ psma->ul_sm_size = ul_size; /* SM segment size in bytes */
++ psma->dw_vm_base = gpp_base_va;
++ psma->dw_dsp_phys_addr_offset = dsp_addr_offset;
++ psma->c_factor = c_factor;
++ psma->dw_dsp_base = dw_dsp_base;
++ psma->ul_dsp_size = ul_dsp_size;
++ if (psma->dw_vm_base == 0) {
++ status = -EPERM;
++ goto func_end;
++ }
++ /* return the actual segment identifier */
++ *sgmt_id = (u32) slot_seg + 1;
++ /* create memory free list */
++ psma->free_list_head = kzalloc(sizeof(struct lst_list),
++ GFP_KERNEL);
++ if (psma->free_list_head == NULL) {
++ status = -ENOMEM;
++ goto func_end;
++ }
++ INIT_LIST_HEAD(&psma->free_list_head->head);
++
++ /* create memory in-use list */
++ psma->in_use_list_head = kzalloc(sizeof(struct
++ lst_list), GFP_KERNEL);
++ if (psma->in_use_list_head == NULL) {
++ status = -ENOMEM;
++ goto func_end;
++ }
++ INIT_LIST_HEAD(&psma->in_use_list_head->head);
++
++ /* Get a mem node for this hunk-o-memory */
++ new_node = get_node(cmm_mgr_obj, dw_gpp_base_pa,
++ psma->dw_vm_base, ul_size);
++ /* Place node on the SM allocator's free list */
++ if (new_node) {
++ lst_put_tail(psma->free_list_head,
++ (struct list_head *)new_node);
++ } else {
++ status = -ENOMEM;
++ goto func_end;
++ }
++ } else {
++ status = -ENOMEM;
++ goto func_end;
++ }
++ /* make entry */
++ cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] = psma;
++
++func_end:
++ if (status && psma) {
++ /* Cleanup allocator */
++ un_register_gppsm_seg(psma);
++ }
++
++ mutex_unlock(&cmm_mgr_obj->cmm_lock);
++ return status;
++}
++
++/*
++ * ======== cmm_un_register_gppsm_seg ========
++ * Purpose:
++ * UnRegister GPP SM segments with the CMM.
++ */
++int cmm_un_register_gppsm_seg(struct cmm_object *hcmm_mgr,
++ u32 ul_seg_id)
++{
++ struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
++ int status = 0;
++ struct cmm_allocator *psma;
++ u32 ul_id = ul_seg_id;
++
++ DBC_REQUIRE(ul_seg_id > 0);
++ if (hcmm_mgr) {
++ if (ul_seg_id == CMM_ALLSEGMENTS)
++ ul_id = 1;
++
++ if ((ul_id > 0) && (ul_id <= CMM_MAXGPPSEGS)) {
++ while (ul_id <= CMM_MAXGPPSEGS) {
++ mutex_lock(&cmm_mgr_obj->cmm_lock);
++ /* slot = seg_id-1 */
++ psma = cmm_mgr_obj->pa_gppsm_seg_tab[ul_id - 1];
++ if (psma != NULL) {
++ un_register_gppsm_seg(psma);
++ /* Set alctr ptr to NULL for future
++ * reuse */
++ cmm_mgr_obj->pa_gppsm_seg_tab[ul_id -
++ 1] = NULL;
++ } else if (ul_seg_id != CMM_ALLSEGMENTS) {
++ status = -EPERM;
++ }
++ mutex_unlock(&cmm_mgr_obj->cmm_lock);
++ if (ul_seg_id != CMM_ALLSEGMENTS)
++ break;
++
++ ul_id++;
++ } /* end while */
++ } else {
++ status = -EINVAL;
++ }
++ } else {
++ status = -EFAULT;
++ }
++ return status;
++}
++
++/*
++ * ======== un_register_gppsm_seg ========
++ * Purpose:
++ * UnRegister the SM allocator by freeing all its resources and
++ * nulling cmm mgr table entry.
++ * Note:
++ * This routine is always called within cmm lock crit sect.
++ */
++static void un_register_gppsm_seg(struct cmm_allocator *psma)
++{
++ struct cmm_mnode *mnode_obj = NULL;
++ struct cmm_mnode *next_node = NULL;
++
++ DBC_REQUIRE(psma != NULL);
++ if (psma->free_list_head != NULL) {
++ /* free nodes on free list */
++ mnode_obj = (struct cmm_mnode *)lst_first(psma->free_list_head);
++ while (mnode_obj) {
++ next_node =
++ (struct cmm_mnode *)lst_next(psma->free_list_head,
++ (struct list_head *)
++ mnode_obj);
++ lst_remove_elem(psma->free_list_head,
++ (struct list_head *)mnode_obj);
++ kfree((void *)mnode_obj);
++ /* next node. */
++ mnode_obj = next_node;
++ }
++ kfree(psma->free_list_head); /* delete freelist */
++ /* free nodes on InUse list */
++ mnode_obj =
++ (struct cmm_mnode *)lst_first(psma->in_use_list_head);
++ while (mnode_obj) {
++ next_node =
++ (struct cmm_mnode *)lst_next(psma->in_use_list_head,
++ (struct list_head *)
++ mnode_obj);
++ lst_remove_elem(psma->in_use_list_head,
++ (struct list_head *)mnode_obj);
++ kfree((void *)mnode_obj);
++ /* next node. */
++ mnode_obj = next_node;
++ }
++ kfree(psma->in_use_list_head); /* delete InUse list */
++ }
++ if ((void *)psma->dw_vm_base != NULL)
++ MEM_UNMAP_LINEAR_ADDRESS((void *)psma->dw_vm_base);
++
++ /* Free allocator itself */
++ kfree(psma);
++}
++
++/*
++ * ======== get_slot ========
++ * Purpose:
++ * An available slot # is returned. Returns negative on failure.
++ */
++static s32 get_slot(struct cmm_object *cmm_mgr_obj)
++{
++ s32 slot_seg = -1; /* neg on failure */
++ DBC_REQUIRE(cmm_mgr_obj != NULL);
++ /* get first available slot in cmm mgr SMSegTab[] */
++ for (slot_seg = 0; slot_seg < CMM_MAXGPPSEGS; slot_seg++) {
++ if (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] == NULL)
++ break;
++
++ }
++ if (slot_seg == CMM_MAXGPPSEGS)
++ slot_seg = -1; /* failed */
++
++ return slot_seg;
++}
++
++/*
++ * ======== get_node ========
++ * Purpose:
++ * Get a memory node from freelist or create a new one.
++ */
++static struct cmm_mnode *get_node(struct cmm_object *cmm_mgr_obj, u32 dw_pa,
++ u32 dw_va, u32 ul_size)
++{
++ struct cmm_mnode *pnode = NULL;
++
++ DBC_REQUIRE(cmm_mgr_obj != NULL);
++ DBC_REQUIRE(dw_pa != 0);
++ DBC_REQUIRE(dw_va != 0);
++ DBC_REQUIRE(ul_size != 0);
++ /* Check cmm mgr's node freelist */
++ if (LST_IS_EMPTY(cmm_mgr_obj->node_free_list_head)) {
++ pnode = kzalloc(sizeof(struct cmm_mnode), GFP_KERNEL);
++ } else {
++ /* surely a valid element */
++ pnode = (struct cmm_mnode *)
++ lst_get_head(cmm_mgr_obj->node_free_list_head);
++ }
++ if (pnode) {
++ lst_init_elem((struct list_head *)pnode); /* set self */
++ pnode->dw_pa = dw_pa; /* Physical addr of start of block */
++ pnode->dw_va = dw_va; /* Virtual " " */
++ pnode->ul_size = ul_size; /* Size of block */
++ }
++ return pnode;
++}
++
++/*
++ * ======== delete_node ========
++ * Purpose:
++ * Put a memory node on the cmm nodelist for later use.
++ * Doesn't actually delete the node. Heap thrashing friendly.
++ */
++static void delete_node(struct cmm_object *cmm_mgr_obj, struct cmm_mnode *pnode)
++{
++ DBC_REQUIRE(pnode != NULL);
++ lst_init_elem((struct list_head *)pnode); /* init .self ptr */
++ lst_put_tail(cmm_mgr_obj->node_free_list_head,
++ (struct list_head *)pnode);
++}
++
++/*
++ * ====== get_free_block ========
++ * Purpose:
++ * Scan the free block list and return the first block that satisfies
++ * the size.
++ */
++static struct cmm_mnode *get_free_block(struct cmm_allocator *allocator,
++ u32 usize)
++{
++ if (allocator) {
++ struct cmm_mnode *mnode_obj = (struct cmm_mnode *)
++ lst_first(allocator->free_list_head);
++ while (mnode_obj) {
++ if (usize <= (u32) mnode_obj->ul_size) {
++ lst_remove_elem(allocator->free_list_head,
++ (struct list_head *)mnode_obj);
++ return mnode_obj;
++ }
++ /* next node. */
++ mnode_obj = (struct cmm_mnode *)
++ lst_next(allocator->free_list_head,
++ (struct list_head *)mnode_obj);
++ }
++ }
++ return NULL;
++}
++
++/*
++ * ======== add_to_free_list ========
++ * Purpose:
++ * Coelesce node into the freelist in ascending size order.
++ */
++static void add_to_free_list(struct cmm_allocator *allocator,
++ struct cmm_mnode *pnode)
++{
++ struct cmm_mnode *node_prev = NULL;
++ struct cmm_mnode *node_next = NULL;
++ struct cmm_mnode *mnode_obj;
++ u32 dw_this_pa;
++ u32 dw_next_pa;
++
++ DBC_REQUIRE(pnode != NULL);
++ DBC_REQUIRE(allocator != NULL);
++ dw_this_pa = pnode->dw_pa;
++ dw_next_pa = NEXT_PA(pnode);
++ mnode_obj = (struct cmm_mnode *)lst_first(allocator->free_list_head);
++ while (mnode_obj) {
++ if (dw_this_pa == NEXT_PA(mnode_obj)) {
++ /* found the block ahead of this one */
++ node_prev = mnode_obj;
++ } else if (dw_next_pa == mnode_obj->dw_pa) {
++ node_next = mnode_obj;
++ }
++ if ((node_prev == NULL) || (node_next == NULL)) {
++ /* next node. */
++ mnode_obj = (struct cmm_mnode *)
++ lst_next(allocator->free_list_head,
++ (struct list_head *)mnode_obj);
++ } else {
++ /* got 'em */
++ break;
++ }
++ } /* while */
++ if (node_prev != NULL) {
++ /* combine with previous block */
++ lst_remove_elem(allocator->free_list_head,
++ (struct list_head *)node_prev);
++ /* grow node to hold both */
++ pnode->ul_size += node_prev->ul_size;
++ pnode->dw_pa = node_prev->dw_pa;
++ pnode->dw_va = node_prev->dw_va;
++ /* place node on mgr nodeFreeList */
++ delete_node((struct cmm_object *)allocator->hcmm_mgr,
++ node_prev);
++ }
++ if (node_next != NULL) {
++ /* combine with next block */
++ lst_remove_elem(allocator->free_list_head,
++ (struct list_head *)node_next);
++ /* grow da node */
++ pnode->ul_size += node_next->ul_size;
++ /* place node on mgr nodeFreeList */
++ delete_node((struct cmm_object *)allocator->hcmm_mgr,
++ node_next);
++ }
++ /* Now, let's add to freelist in increasing size order */
++ mnode_obj = (struct cmm_mnode *)lst_first(allocator->free_list_head);
++ while (mnode_obj) {
++ if (pnode->ul_size <= mnode_obj->ul_size)
++ break;
++
++ /* next node. */
++ mnode_obj =
++ (struct cmm_mnode *)lst_next(allocator->free_list_head,
++ (struct list_head *)mnode_obj);
++ }
++ /* if mnode_obj is NULL then add our pnode to the end of the freelist */
++ if (mnode_obj == NULL) {
++ lst_put_tail(allocator->free_list_head,
++ (struct list_head *)pnode);
++ } else {
++ /* insert our node before the current traversed node */
++ lst_insert_before(allocator->free_list_head,
++ (struct list_head *)pnode,
++ (struct list_head *)mnode_obj);
++ }
++}
++
++/*
++ * ======== get_allocator ========
++ * Purpose:
++ * Return the allocator for the given SM Segid.
++ * SegIds: 1,2,3..max.
++ */
++static struct cmm_allocator *get_allocator(struct cmm_object *cmm_mgr_obj,
++ u32 ul_seg_id)
++{
++ struct cmm_allocator *allocator = NULL;
++
++ DBC_REQUIRE(cmm_mgr_obj != NULL);
++ DBC_REQUIRE((ul_seg_id > 0) && (ul_seg_id <= CMM_MAXGPPSEGS));
++ allocator = cmm_mgr_obj->pa_gppsm_seg_tab[ul_seg_id - 1];
++ if (allocator != NULL) {
++ /* make sure it's for real */
++ if (!allocator) {
++ allocator = NULL;
++ DBC_ASSERT(false);
++ }
++ }
++ return allocator;
++}
++
++/*
++ * The CMM_Xlator[xxx] routines below are used by Node and Stream
++ * to perform SM address translation to the client process address space.
++ * A "translator" object is created by a node/stream for each SM seg used.
++ */
++
++/*
++ * ======== cmm_xlator_create ========
++ * Purpose:
++ * Create an address translator object.
++ */
++int cmm_xlator_create(struct cmm_xlatorobject **xlator,
++ struct cmm_object *hcmm_mgr,
++ struct cmm_xlatorattrs *xlator_attrs)
++{
++ struct cmm_xlator *xlator_object = NULL;
++ int status = 0;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(xlator != NULL);
++ DBC_REQUIRE(hcmm_mgr != NULL);
++
++ *xlator = NULL;
++ if (xlator_attrs == NULL)
++ xlator_attrs = &cmm_dfltxlatorattrs; /* set defaults */
++
++ xlator_object = kzalloc(sizeof(struct cmm_xlator), GFP_KERNEL);
++ if (xlator_object != NULL) {
++ xlator_object->hcmm_mgr = hcmm_mgr; /* ref back to CMM */
++ /* SM seg_id */
++ xlator_object->ul_seg_id = xlator_attrs->ul_seg_id;
++ } else {
++ status = -ENOMEM;
++ }
++ if (!status)
++ *xlator = (struct cmm_xlatorobject *)xlator_object;
++
++ return status;
++}
++
++/*
++ * ======== cmm_xlator_delete ========
++ * Purpose:
++ * Free the Xlator resources.
++ * VM gets freed later.
++ */
++int cmm_xlator_delete(struct cmm_xlatorobject *xlator, bool force)
++{
++ struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
++
++ DBC_REQUIRE(refs > 0);
++
++ kfree(xlator_obj);
++
++ return 0;
++}
++
++/*
++ * ======== cmm_xlator_alloc_buf ========
++ */
++void *cmm_xlator_alloc_buf(struct cmm_xlatorobject *xlator, void *va_buf,
++ u32 pa_size)
++{
++ struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
++ void *pbuf = NULL;
++ void *tmp_va_buff;
++ struct cmm_attrs attrs;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(xlator != NULL);
++ DBC_REQUIRE(xlator_obj->hcmm_mgr != NULL);
++ DBC_REQUIRE(va_buf != NULL);
++ DBC_REQUIRE(pa_size > 0);
++ DBC_REQUIRE(xlator_obj->ul_seg_id > 0);
++
++ if (xlator_obj) {
++ attrs.ul_seg_id = xlator_obj->ul_seg_id;
++ __raw_writel(0, va_buf);
++ /* Alloc SM */
++ pbuf =
++ cmm_calloc_buf(xlator_obj->hcmm_mgr, pa_size, &attrs, NULL);
++ if (pbuf) {
++ /* convert to translator(node/strm) process Virtual
++ * address */
++ tmp_va_buff = cmm_xlator_translate(xlator,
++ pbuf, CMM_PA2VA);
++ __raw_writel((u32)tmp_va_buff, va_buf);
++ }
++ }
++ return pbuf;
++}
++
++/*
++ * ======== cmm_xlator_free_buf ========
++ * Purpose:
++ * Free the given SM buffer and descriptor.
++ * Does not free virtual memory.
++ */
++int cmm_xlator_free_buf(struct cmm_xlatorobject *xlator, void *buf_va)
++{
++ struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
++ int status = -EPERM;
++ void *buf_pa = NULL;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(buf_va != NULL);
++ DBC_REQUIRE(xlator_obj->ul_seg_id > 0);
++
++ if (xlator_obj) {
++ /* convert Va to Pa so we can free it. */
++ buf_pa = cmm_xlator_translate(xlator, buf_va, CMM_VA2PA);
++ if (buf_pa) {
++ status = cmm_free_buf(xlator_obj->hcmm_mgr, buf_pa,
++ xlator_obj->ul_seg_id);
++ if (status) {
++ /* Uh oh, this shouldn't happen. Descriptor
++ * gone! */
++ DBC_ASSERT(false); /* CMM is leaking mem */
++ }
++ }
++ }
++ return status;
++}
++
++/*
++ * ======== cmm_xlator_info ========
++ * Purpose:
++ * Set/Get translator info.
++ */
++int cmm_xlator_info(struct cmm_xlatorobject *xlator, u8 ** paddr,
++ u32 ul_size, u32 segm_id, bool set_info)
++{
++ struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
++ int status = 0;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(paddr != NULL);
++ DBC_REQUIRE((segm_id > 0) && (segm_id <= CMM_MAXGPPSEGS));
++
++ if (xlator_obj) {
++ if (set_info) {
++ /* set translators virtual address range */
++ xlator_obj->dw_virt_base = (u32) *paddr;
++ xlator_obj->ul_virt_size = ul_size;
++ } else { /* return virt base address */
++ *paddr = (u8 *) xlator_obj->dw_virt_base;
++ }
++ } else {
++ status = -EFAULT;
++ }
++ return status;
++}
++
++/*
++ * ======== cmm_xlator_translate ========
++ */
++void *cmm_xlator_translate(struct cmm_xlatorobject *xlator, void *paddr,
++ enum cmm_xlatetype xtype)
++{
++ u32 dw_addr_xlate = 0;
++ struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
++ struct cmm_object *cmm_mgr_obj = NULL;
++ struct cmm_allocator *allocator = NULL;
++ u32 dw_offset = 0;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(paddr != NULL);
++ DBC_REQUIRE((xtype >= CMM_VA2PA) && (xtype <= CMM_DSPPA2PA));
++
++ if (!xlator_obj)
++ goto loop_cont;
++
++ cmm_mgr_obj = (struct cmm_object *)xlator_obj->hcmm_mgr;
++ /* get this translator's default SM allocator */
++ DBC_ASSERT(xlator_obj->ul_seg_id > 0);
++ allocator = cmm_mgr_obj->pa_gppsm_seg_tab[xlator_obj->ul_seg_id - 1];
++ if (!allocator)
++ goto loop_cont;
++
++ if ((xtype == CMM_VA2DSPPA) || (xtype == CMM_VA2PA) ||
++ (xtype == CMM_PA2VA)) {
++ if (xtype == CMM_PA2VA) {
++ /* Gpp Va = Va Base + offset */
++ dw_offset = (u8 *) paddr - (u8 *) (allocator->shm_base -
++ allocator->
++ ul_dsp_size);
++ dw_addr_xlate = xlator_obj->dw_virt_base + dw_offset;
++ /* Check if translated Va base is in range */
++ if ((dw_addr_xlate < xlator_obj->dw_virt_base) ||
++ (dw_addr_xlate >=
++ (xlator_obj->dw_virt_base +
++ xlator_obj->ul_virt_size))) {
++ dw_addr_xlate = 0; /* bad address */
++ }
++ } else {
++ /* Gpp PA = Gpp Base + offset */
++ dw_offset =
++ (u8 *) paddr - (u8 *) xlator_obj->dw_virt_base;
++ dw_addr_xlate =
++ allocator->shm_base - allocator->ul_dsp_size +
++ dw_offset;
++ }
++ } else {
++ dw_addr_xlate = (u32) paddr;
++ }
++ /*Now convert address to proper target physical address if needed */
++ if ((xtype == CMM_VA2DSPPA) || (xtype == CMM_PA2DSPPA)) {
++ /* Got Gpp Pa now, convert to DSP Pa */
++ dw_addr_xlate =
++ GPPPA2DSPPA((allocator->shm_base - allocator->ul_dsp_size),
++ dw_addr_xlate,
++ allocator->dw_dsp_phys_addr_offset *
++ allocator->c_factor);
++ } else if (xtype == CMM_DSPPA2PA) {
++ /* Got DSP Pa, convert to GPP Pa */
++ dw_addr_xlate =
++ DSPPA2GPPPA(allocator->shm_base - allocator->ul_dsp_size,
++ dw_addr_xlate,
++ allocator->dw_dsp_phys_addr_offset *
++ allocator->c_factor);
++ }
++loop_cont:
++ return (void *)dw_addr_xlate;
++}
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/pmgr/cod.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/pmgr/cod.c 2010-08-18 11:24:23.206068651 +0300
+@@ -0,0 +1,652 @@
++/*
++ * cod.c
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * This module implements DSP code management for the DSP/BIOS Bridge
++ * environment. It is mostly a thin wrapper.
++ *
++ * This module provides an interface for loading both static and
++ * dynamic code objects onto DSP systems.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#include <linux/types.h>
++
++/* ----------------------------------- Host OS */
++#include <dspbridge/host_os.h>
++#include <linux/fs.h>
++#include <linux/uaccess.h>
++
++/* ----------------------------------- DSP/BIOS Bridge */
++#include <dspbridge/dbdefs.h>
++
++/* ----------------------------------- Trace & Debug */
++#include <dspbridge/dbc.h>
++
++/* ----------------------------------- OS Adaptation Layer */
++#include <dspbridge/ldr.h>
++
++/* ----------------------------------- Platform Manager */
++/* Include appropriate loader header file */
++#include <dspbridge/dbll.h>
++
++/* ----------------------------------- This */
++#include <dspbridge/cod.h>
++
++/*
++ * ======== cod_manager ========
++ */
++struct cod_manager {
++ struct dbll_tar_obj *target;
++ struct dbll_library_obj *base_lib;
++ bool loaded; /* Base library loaded? */
++ u32 ul_entry;
++ struct ldr_module *dll_obj;
++ struct dbll_fxns fxns;
++ struct dbll_attrs attrs;
++ char sz_zl_file[COD_MAXPATHLENGTH];
++};
++
++/*
++ * ======== cod_libraryobj ========
++ */
++struct cod_libraryobj {
++ struct dbll_library_obj *dbll_lib;
++ struct cod_manager *cod_mgr;
++};
++
++static u32 refs = 0L;
++
++static struct dbll_fxns ldr_fxns = {
++ (dbll_close_fxn) dbll_close,
++ (dbll_create_fxn) dbll_create,
++ (dbll_delete_fxn) dbll_delete,
++ (dbll_exit_fxn) dbll_exit,
++ (dbll_get_attrs_fxn) dbll_get_attrs,
++ (dbll_get_addr_fxn) dbll_get_addr,
++ (dbll_get_c_addr_fxn) dbll_get_c_addr,
++ (dbll_get_sect_fxn) dbll_get_sect,
++ (dbll_init_fxn) dbll_init,
++ (dbll_load_fxn) dbll_load,
++ (dbll_load_sect_fxn) dbll_load_sect,
++ (dbll_open_fxn) dbll_open,
++ (dbll_read_sect_fxn) dbll_read_sect,
++ (dbll_set_attrs_fxn) dbll_set_attrs,
++ (dbll_unload_fxn) dbll_unload,
++ (dbll_unload_sect_fxn) dbll_unload_sect,
++};
++
++static bool no_op(void);
++
++/*
++ * File operations (originally were under kfile.c)
++ */
++static s32 cod_f_close(struct file *filp)
++{
++ /* Check for valid handle */
++ if (!filp)
++ return -EFAULT;
++
++ filp_close(filp, NULL);
++
++ /* we can't use 0 here */
++ return 0;
++}
++
++static struct file *cod_f_open(const char *psz_file_name, const char *sz_mode)
++{
++ mm_segment_t fs;
++ struct file *filp;
++
++ fs = get_fs();
++ set_fs(get_ds());
++
++ /* ignore given mode and open file as read-only */
++ filp = filp_open(psz_file_name, O_RDONLY, 0);
++
++ if (IS_ERR(filp))
++ filp = NULL;
++
++ set_fs(fs);
++
++ return filp;
++}
++
++static s32 cod_f_read(void __user *pbuffer, s32 size, s32 count,
++ struct file *filp)
++{
++ /* check for valid file handle */
++ if (!filp)
++ return -EFAULT;
++
++ if ((size > 0) && (count > 0) && pbuffer) {
++ u32 dw_bytes_read;
++ mm_segment_t fs;
++
++ /* read from file */
++ fs = get_fs();
++ set_fs(get_ds());
++ dw_bytes_read = filp->f_op->read(filp, pbuffer, size * count,
++ &(filp->f_pos));
++ set_fs(fs);
++
++ if (!dw_bytes_read)
++ return -EBADF;
++
++ return dw_bytes_read / size;
++ }
++
++ return -EINVAL;
++}
++
++static s32 cod_f_seek(struct file *filp, s32 offset, s32 origin)
++{
++ loff_t dw_cur_pos;
++
++ /* check for valid file handle */
++ if (!filp)
++ return -EFAULT;
++
++ /* based on the origin flag, move the internal pointer */
++ dw_cur_pos = filp->f_op->llseek(filp, offset, origin);
++
++ if ((s32) dw_cur_pos < 0)
++ return -EPERM;
++
++ /* we can't use 0 here */
++ return 0;
++}
++
++static s32 cod_f_tell(struct file *filp)
++{
++ loff_t dw_cur_pos;
++
++ if (!filp)
++ return -EFAULT;
++
++ /* Get current position */
++ dw_cur_pos = filp->f_op->llseek(filp, 0, SEEK_CUR);
++
++ if ((s32) dw_cur_pos < 0)
++ return -EPERM;
++
++ return dw_cur_pos;
++}
++
++/*
++ * ======== cod_close ========
++ */
++void cod_close(struct cod_libraryobj *lib)
++{
++ struct cod_manager *hmgr;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(lib != NULL);
++ DBC_REQUIRE(lib->cod_mgr);
++
++ hmgr = lib->cod_mgr;
++ hmgr->fxns.close_fxn(lib->dbll_lib);
++
++ kfree(lib);
++}
++
++/*
++ * ======== cod_create ========
++ * Purpose:
++ * Create an object to manage code on a DSP system.
++ * This object can be used to load an initial program image with
++ * arguments that can later be expanded with
++ * dynamically loaded object files.
++ *
++ */
++int cod_create(struct cod_manager **mgr, char *str_zl_file,
++ const struct cod_attrs *attrs)
++{
++ struct cod_manager *mgr_new;
++ struct dbll_attrs zl_attrs;
++ int status = 0;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(mgr != NULL);
++
++ /* assume failure */
++ *mgr = NULL;
++
++ /* we don't support non-default attrs yet */
++ if (attrs != NULL)
++ return -ENOSYS;
++
++ mgr_new = kzalloc(sizeof(struct cod_manager), GFP_KERNEL);
++ if (mgr_new == NULL)
++ return -ENOMEM;
++
++ /* Set up loader functions */
++ mgr_new->fxns = ldr_fxns;
++
++ /* initialize the ZL module */
++ mgr_new->fxns.init_fxn();
++
++ zl_attrs.alloc = (dbll_alloc_fxn) no_op;
++ zl_attrs.free = (dbll_free_fxn) no_op;
++ zl_attrs.fread = (dbll_read_fxn) cod_f_read;
++ zl_attrs.fseek = (dbll_seek_fxn) cod_f_seek;
++ zl_attrs.ftell = (dbll_tell_fxn) cod_f_tell;
++ zl_attrs.fclose = (dbll_f_close_fxn) cod_f_close;
++ zl_attrs.fopen = (dbll_f_open_fxn) cod_f_open;
++ zl_attrs.sym_lookup = NULL;
++ zl_attrs.base_image = true;
++ zl_attrs.log_write = NULL;
++ zl_attrs.log_write_handle = NULL;
++ zl_attrs.write = NULL;
++ zl_attrs.rmm_handle = NULL;
++ zl_attrs.input_params = NULL;
++ zl_attrs.sym_handle = NULL;
++ zl_attrs.sym_arg = NULL;
++
++ mgr_new->attrs = zl_attrs;
++
++ status = mgr_new->fxns.create_fxn(&mgr_new->target, &zl_attrs);
++
++ if (status) {
++ cod_delete(mgr_new);
++ return -ESPIPE;
++ }
++
++ /* return the new manager */
++ *mgr = mgr_new;
++
++ return 0;
++}
++
++/*
++ * ======== cod_delete ========
++ * Purpose:
++ * Delete a code manager object.
++ */
++void cod_delete(struct cod_manager *cod_mgr_obj)
++{
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(cod_mgr_obj);
++
++ if (cod_mgr_obj->base_lib) {
++ if (cod_mgr_obj->loaded)
++ cod_mgr_obj->fxns.unload_fxn(cod_mgr_obj->base_lib,
++ &cod_mgr_obj->attrs);
++
++ cod_mgr_obj->fxns.close_fxn(cod_mgr_obj->base_lib);
++ }
++ if (cod_mgr_obj->target) {
++ cod_mgr_obj->fxns.delete_fxn(cod_mgr_obj->target);
++ cod_mgr_obj->fxns.exit_fxn();
++ }
++ kfree(cod_mgr_obj);
++}
++
++/*
++ * ======== cod_exit ========
++ * Purpose:
++ * Discontinue usage of the COD module.
++ *
++ */
++void cod_exit(void)
++{
++ DBC_REQUIRE(refs > 0);
++
++ refs--;
++
++ DBC_ENSURE(refs >= 0);
++}
++
++/*
++ * ======== cod_get_base_lib ========
++ * Purpose:
++ * Get handle to the base image DBL library.
++ */
++int cod_get_base_lib(struct cod_manager *cod_mgr_obj,
++ struct dbll_library_obj **plib)
++{
++ int status = 0;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(cod_mgr_obj);
++ DBC_REQUIRE(plib != NULL);
++
++ *plib = (struct dbll_library_obj *)cod_mgr_obj->base_lib;
++
++ return status;
++}
++
++/*
++ * ======== cod_get_base_name ========
++ */
++int cod_get_base_name(struct cod_manager *cod_mgr_obj, char *sz_name,
++ u32 usize)
++{
++ int status = 0;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(cod_mgr_obj);
++ DBC_REQUIRE(sz_name != NULL);
++
++ if (usize <= COD_MAXPATHLENGTH)
++ strncpy(sz_name, cod_mgr_obj->sz_zl_file, usize);
++ else
++ status = -EPERM;
++
++ return status;
++}
++
++/*
++ * ======== cod_get_entry ========
++ * Purpose:
++ * Retrieve the entry point of a loaded DSP program image
++ *
++ */
++int cod_get_entry(struct cod_manager *cod_mgr_obj, u32 *entry_pt)
++{
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(cod_mgr_obj);
++ DBC_REQUIRE(entry_pt != NULL);
++
++ *entry_pt = cod_mgr_obj->ul_entry;
++
++ return 0;
++}
++
++/*
++ * ======== cod_get_loader ========
++ * Purpose:
++ * Get handle to the DBLL loader.
++ */
++int cod_get_loader(struct cod_manager *cod_mgr_obj,
++ struct dbll_tar_obj **loader)
++{
++ int status = 0;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(cod_mgr_obj);
++ DBC_REQUIRE(loader != NULL);
++
++ *loader = (struct dbll_tar_obj *)cod_mgr_obj->target;
++
++ return status;
++}
++
++/*
++ * ======== cod_get_section ========
++ * Purpose:
++ * Retrieve the starting address and length of a section in the COFF file
++ * given the section name.
++ */
++int cod_get_section(struct cod_libraryobj *lib, char *str_sect,
++ u32 *addr, u32 *len)
++{
++ struct cod_manager *cod_mgr_obj;
++ int status = 0;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(lib != NULL);
++ DBC_REQUIRE(lib->cod_mgr);
++ DBC_REQUIRE(str_sect != NULL);
++ DBC_REQUIRE(addr != NULL);
++ DBC_REQUIRE(len != NULL);
++
++ *addr = 0;
++ *len = 0;
++ if (lib != NULL) {
++ cod_mgr_obj = lib->cod_mgr;
++ status = cod_mgr_obj->fxns.get_sect_fxn(lib->dbll_lib, str_sect,
++ addr, len);
++ } else {
++ status = -ESPIPE;
++ }
++
++ DBC_ENSURE(!status || ((*addr == 0) && (*len == 0)));
++
++ return status;
++}
++
++/*
++ * ======== cod_get_sym_value ========
++ * Purpose:
++ * Retrieve the value for the specified symbol. The symbol is first
++ * searched for literally and then, if not found, searched for as a
++ * C symbol.
++ *
++ */
++int cod_get_sym_value(struct cod_manager *cod_mgr_obj, char *str_sym,
++ u32 *pul_value)
++{
++ struct dbll_sym_val *dbll_sym;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(cod_mgr_obj);
++ DBC_REQUIRE(str_sym != NULL);
++ DBC_REQUIRE(pul_value != NULL);
++
++ dev_dbg(bridge, "%s: cod_mgr_obj: %p str_sym: %s pul_value: %p\n",
++ __func__, cod_mgr_obj, str_sym, pul_value);
++ if (cod_mgr_obj->base_lib) {
++ if (!cod_mgr_obj->fxns.
++ get_addr_fxn(cod_mgr_obj->base_lib, str_sym, &dbll_sym)) {
++ if (!cod_mgr_obj->fxns.
++ get_c_addr_fxn(cod_mgr_obj->base_lib, str_sym,
++ &dbll_sym))
++ return -ESPIPE;
++ }
++ } else {
++ return -ESPIPE;
++ }
++
++ *pul_value = dbll_sym->value;
++
++ return 0;
++}
++
++/*
++ * ======== cod_init ========
++ * Purpose:
++ * Initialize the COD module's private state.
++ *
++ */
++bool cod_init(void)
++{
++ bool ret = true;
++
++ DBC_REQUIRE(refs >= 0);
++
++ if (ret)
++ refs++;
++
++ DBC_ENSURE((ret && refs > 0) || (!ret && refs >= 0));
++ return ret;
++}
++
++/*
++ * ======== cod_load_base ========
++ * Purpose:
++ * Load the initial program image, optionally with command-line arguments,
++ * on the DSP system managed by the supplied handle. The program to be
++ * loaded must be the first element of the args array and must be a fully
++ * qualified pathname.
++ * Details:
++ * if num_argc doesn't match the number of arguments in the args array, the
++ * args array is searched for a NULL terminating entry, and argc is
++ * recalculated to reflect this. In this way, we can support NULL
++ * terminating args arrays, if num_argc is very large.
++ */
++int cod_load_base(struct cod_manager *cod_mgr_obj, u32 num_argc, char *args[],
++ cod_writefxn pfn_write, void *arb, char *envp[])
++{
++ dbll_flags flags;
++ struct dbll_attrs save_attrs;
++ struct dbll_attrs new_attrs;
++ int status;
++ u32 i;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(cod_mgr_obj);
++ DBC_REQUIRE(num_argc > 0);
++ DBC_REQUIRE(args != NULL);
++ DBC_REQUIRE(args[0] != NULL);
++ DBC_REQUIRE(pfn_write != NULL);
++ DBC_REQUIRE(cod_mgr_obj->base_lib != NULL);
++
++ /*
++ * Make sure every argv[] stated in argc has a value, or change argc to
++ * reflect true number in NULL terminated argv array.
++ */
++ for (i = 0; i < num_argc; i++) {
++ if (args[i] == NULL) {
++ num_argc = i;
++ break;
++ }
++ }
++
++ /* set the write function for this operation */
++ cod_mgr_obj->fxns.get_attrs_fxn(cod_mgr_obj->target, &save_attrs);
++
++ new_attrs = save_attrs;
++ new_attrs.write = (dbll_write_fxn) pfn_write;
++ new_attrs.input_params = arb;
++ new_attrs.alloc = (dbll_alloc_fxn) no_op;
++ new_attrs.free = (dbll_free_fxn) no_op;
++ new_attrs.log_write = NULL;
++ new_attrs.log_write_handle = NULL;
++
++ /* Load the image */
++ flags = DBLL_CODE | DBLL_DATA | DBLL_SYMB;
++ status = cod_mgr_obj->fxns.load_fxn(cod_mgr_obj->base_lib, flags,
++ &new_attrs,
++ &cod_mgr_obj->ul_entry);
++ if (status)
++ cod_mgr_obj->fxns.close_fxn(cod_mgr_obj->base_lib);
++
++ if (!status)
++ cod_mgr_obj->loaded = true;
++ else
++ cod_mgr_obj->base_lib = NULL;
++
++ return status;
++}
++
++/*
++ * ======== cod_open ========
++ * Open library for reading sections.
++ */
++int cod_open(struct cod_manager *hmgr, char *sz_coff_path,
++ u32 flags, struct cod_libraryobj **lib_obj)
++{
++ int status = 0;
++ struct cod_libraryobj *lib = NULL;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(hmgr);
++ DBC_REQUIRE(sz_coff_path != NULL);
++ DBC_REQUIRE(flags == COD_NOLOAD || flags == COD_SYMB);
++ DBC_REQUIRE(lib_obj != NULL);
++
++ *lib_obj = NULL;
++
++ lib = kzalloc(sizeof(struct cod_libraryobj), GFP_KERNEL);
++ if (lib == NULL)
++ status = -ENOMEM;
++
++ if (!status) {
++ lib->cod_mgr = hmgr;
++ status = hmgr->fxns.open_fxn(hmgr->target, sz_coff_path, flags,
++ &lib->dbll_lib);
++ if (!status)
++ *lib_obj = lib;
++ }
++
++ if (status)
++ pr_err("%s: error status 0x%x, sz_coff_path: %s flags: 0x%x\n",
++ __func__, status, sz_coff_path, flags);
++ return status;
++}
++
++/*
++ * ======== cod_open_base ========
++ * Purpose:
++ * Open base image for reading sections.
++ */
++int cod_open_base(struct cod_manager *hmgr, char *sz_coff_path,
++ dbll_flags flags)
++{
++ int status = 0;
++ struct dbll_library_obj *lib;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(hmgr);
++ DBC_REQUIRE(sz_coff_path != NULL);
++
++ /* if we previously opened a base image, close it now */
++ if (hmgr->base_lib) {
++ if (hmgr->loaded) {
++ hmgr->fxns.unload_fxn(hmgr->base_lib, &hmgr->attrs);
++ hmgr->loaded = false;
++ }
++ hmgr->fxns.close_fxn(hmgr->base_lib);
++ hmgr->base_lib = NULL;
++ }
++ status = hmgr->fxns.open_fxn(hmgr->target, sz_coff_path, flags, &lib);
++ if (!status) {
++ /* hang onto the library for subsequent sym table usage */
++ hmgr->base_lib = lib;
++ strncpy(hmgr->sz_zl_file, sz_coff_path, COD_MAXPATHLENGTH - 1);
++ hmgr->sz_zl_file[COD_MAXPATHLENGTH - 1] = '\0';
++ }
++
++ if (status)
++ pr_err("%s: error status 0x%x sz_coff_path: %s\n", __func__,
++ status, sz_coff_path);
++ return status;
++}
++
++/*
++ * ======== cod_read_section ========
++ * Purpose:
++ * Retrieve the content of a code section given the section name.
++ */
++int cod_read_section(struct cod_libraryobj *lib, char *str_sect,
++ char *str_content, u32 content_size)
++{
++ int status = 0;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(lib != NULL);
++ DBC_REQUIRE(lib->cod_mgr);
++ DBC_REQUIRE(str_sect != NULL);
++ DBC_REQUIRE(str_content != NULL);
++
++ if (lib != NULL)
++ status =
++ lib->cod_mgr->fxns.read_sect_fxn(lib->dbll_lib, str_sect,
++ str_content, content_size);
++ else
++ status = -ESPIPE;
++
++ return status;
++}
++
++/*
++ * ======== no_op ========
++ * Purpose:
++ * No Operation.
++ *
++ */
++static bool no_op(void)
++{
++ return true;
++}
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/pmgr/dbll.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/pmgr/dbll.c 2010-08-18 11:24:23.210055890 +0300
+@@ -0,0 +1,1585 @@
++/*
++ * dbll.c
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++#include <linux/types.h>
++
++/* ----------------------------------- Host OS */
++#include <dspbridge/host_os.h>
++
++/* ----------------------------------- DSP/BIOS Bridge */
++#include <dspbridge/dbdefs.h>
++
++/* ----------------------------------- Trace & Debug */
++#include <dspbridge/dbc.h>
++#include <dspbridge/gh.h>
++
++/* ----------------------------------- OS Adaptation Layer */
++
++/* Dynamic loader library interface */
++#include <dspbridge/dynamic_loader.h>
++#include <dspbridge/getsection.h>
++
++/* ----------------------------------- This */
++#include <dspbridge/dbll.h>
++#include <dspbridge/rmm.h>
++
++/* Number of buckets for symbol hash table */
++#define MAXBUCKETS 211
++
++/* Max buffer length */
++#define MAXEXPR 128
++
++#define DOFF_ALIGN(x) (((x) + 3) & ~3UL)
++
++/*
++ * ======== struct dbll_tar_obj* ========
++ * A target may have one or more libraries of symbols/code/data loaded
++ * onto it, where a library is simply the symbols/code/data contained
++ * in a DOFF file.
++ */
++/*
++ * ======== dbll_tar_obj ========
++ */
++struct dbll_tar_obj {
++ struct dbll_attrs attrs;
++ struct dbll_library_obj *head; /* List of all opened libraries */
++};
++
++/*
++ * The following 4 typedefs are "super classes" of the dynamic loader
++ * library types used in dynamic loader functions (dynamic_loader.h).
++ */
++/*
++ * ======== dbll_stream ========
++ * Contains dynamic_loader_stream
++ */
++struct dbll_stream {
++ struct dynamic_loader_stream dl_stream;
++ struct dbll_library_obj *lib;
++};
++
++/*
++ * ======== ldr_symbol ========
++ */
++struct ldr_symbol {
++ struct dynamic_loader_sym dl_symbol;
++ struct dbll_library_obj *lib;
++};
++
++/*
++ * ======== dbll_alloc ========
++ */
++struct dbll_alloc {
++ struct dynamic_loader_allocate dl_alloc;
++ struct dbll_library_obj *lib;
++};
++
++/*
++ * ======== dbll_init_obj ========
++ */
++struct dbll_init_obj {
++ struct dynamic_loader_initialize dl_init;
++ struct dbll_library_obj *lib;
++};
++
++/*
++ * ======== DBLL_Library ========
++ * A library handle is returned by DBLL_Open() and is passed to dbll_load()
++ * to load symbols/code/data, and to dbll_unload(), to remove the
++ * symbols/code/data loaded by dbll_load().
++ */
++
++/*
++ * ======== dbll_library_obj ========
++ */
++struct dbll_library_obj {
++ struct dbll_library_obj *next; /* Next library in target's list */
++ struct dbll_library_obj *prev; /* Previous in the list */
++ struct dbll_tar_obj *target_obj; /* target for this library */
++
++ /* Objects needed by dynamic loader */
++ struct dbll_stream stream;
++ struct ldr_symbol symbol;
++ struct dbll_alloc allocate;
++ struct dbll_init_obj init;
++ void *dload_mod_obj;
++
++ char *file_name; /* COFF file name */
++ void *fp; /* Opaque file handle */
++ u32 entry; /* Entry point */
++ void *desc; /* desc of DOFF file loaded */
++ u32 open_ref; /* Number of times opened */
++ u32 load_ref; /* Number of times loaded */
++ struct gh_t_hash_tab *sym_tab; /* Hash table of symbols */
++ u32 ul_pos;
++};
++
++/*
++ * ======== dbll_symbol ========
++ */
++struct dbll_symbol {
++ struct dbll_sym_val value;
++ char *name;
++};
++
++static void dof_close(struct dbll_library_obj *zl_lib);
++static int dof_open(struct dbll_library_obj *zl_lib);
++static s32 no_op(struct dynamic_loader_initialize *thisptr, void *bufr,
++ ldr_addr locn, struct ldr_section_info *info,
++ unsigned bytsize);
++
++/*
++ * Functions called by dynamic loader
++ *
++ */
++/* dynamic_loader_stream */
++static int dbll_read_buffer(struct dynamic_loader_stream *this, void *buffer,
++ unsigned bufsize);
++static int dbll_set_file_posn(struct dynamic_loader_stream *this,
++ unsigned int pos);
++/* dynamic_loader_sym */
++static struct dynload_symbol *dbll_find_symbol(struct dynamic_loader_sym *this,
++ const char *name);
++static struct dynload_symbol *dbll_add_to_symbol_table(struct dynamic_loader_sym
++ *this, const char *name,
++ unsigned module_id);
++static struct dynload_symbol *find_in_symbol_table(struct dynamic_loader_sym
++ *this, const char *name,
++ unsigned moduleid);
++static void dbll_purge_symbol_table(struct dynamic_loader_sym *this,
++ unsigned module_id);
++static void *allocate(struct dynamic_loader_sym *this, unsigned memsize);
++static void deallocate(struct dynamic_loader_sym *this, void *mem_ptr);
++static void dbll_err_report(struct dynamic_loader_sym *this, const char *errstr,
++ va_list args);
++/* dynamic_loader_allocate */
++static int dbll_rmm_alloc(struct dynamic_loader_allocate *this,
++ struct ldr_section_info *info, unsigned align);
++static void rmm_dealloc(struct dynamic_loader_allocate *this,
++ struct ldr_section_info *info);
++
++/* dynamic_loader_initialize */
++static int connect(struct dynamic_loader_initialize *this);
++static int read_mem(struct dynamic_loader_initialize *this, void *buf,
++ ldr_addr addr, struct ldr_section_info *info,
++ unsigned bytes);
++static int write_mem(struct dynamic_loader_initialize *this, void *buf,
++ ldr_addr addr, struct ldr_section_info *info,
++ unsigned nbytes);
++static int fill_mem(struct dynamic_loader_initialize *this, ldr_addr addr,
++ struct ldr_section_info *info, unsigned bytes,
++ unsigned val);
++static int execute(struct dynamic_loader_initialize *this, ldr_addr start);
++static void release(struct dynamic_loader_initialize *this);
++
++/* symbol table hash functions */
++static u16 name_hash(void *key, u16 max_bucket);
++static bool name_match(void *key, void *sp);
++static void sym_delete(void *value);
++
++static u32 refs; /* module reference count */
++
++/* Symbol Redefinition */
++static int redefined_symbol;
++static int gbl_search = 1;
++
++/*
++ * ======== dbll_close ========
++ */
++void dbll_close(struct dbll_library_obj *zl_lib)
++{
++ struct dbll_tar_obj *zl_target;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(zl_lib);
++ DBC_REQUIRE(zl_lib->open_ref > 0);
++ zl_target = zl_lib->target_obj;
++ zl_lib->open_ref--;
++ if (zl_lib->open_ref == 0) {
++ /* Remove library from list */
++ if (zl_target->head == zl_lib)
++ zl_target->head = zl_lib->next;
++
++ if (zl_lib->prev)
++ (zl_lib->prev)->next = zl_lib->next;
++
++ if (zl_lib->next)
++ (zl_lib->next)->prev = zl_lib->prev;
++
++ /* Free DOF resources */
++ dof_close(zl_lib);
++ kfree(zl_lib->file_name);
++
++ /* remove symbols from symbol table */
++ if (zl_lib->sym_tab)
++ gh_delete(zl_lib->sym_tab);
++
++ /* remove the library object itself */
++ kfree(zl_lib);
++ zl_lib = NULL;
++ }
++}
++
++/*
++ * ======== dbll_create ========
++ */
++int dbll_create(struct dbll_tar_obj **target_obj,
++ struct dbll_attrs *pattrs)
++{
++ struct dbll_tar_obj *pzl_target;
++ int status = 0;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(pattrs != NULL);
++ DBC_REQUIRE(target_obj != NULL);
++
++ /* Allocate DBL target object */
++ pzl_target = kzalloc(sizeof(struct dbll_tar_obj), GFP_KERNEL);
++ if (target_obj != NULL) {
++ if (pzl_target == NULL) {
++ *target_obj = NULL;
++ status = -ENOMEM;
++ } else {
++ pzl_target->attrs = *pattrs;
++ *target_obj = (struct dbll_tar_obj *)pzl_target;
++ }
++ DBC_ENSURE((!status && *target_obj) ||
++ (status && *target_obj == NULL));
++ }
++
++ return status;
++}
++
++/*
++ * ======== dbll_delete ========
++ */
++void dbll_delete(struct dbll_tar_obj *target)
++{
++ struct dbll_tar_obj *zl_target = (struct dbll_tar_obj *)target;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(zl_target);
++
++ if (zl_target != NULL)
++ kfree(zl_target);
++
++}
++
++/*
++ * ======== dbll_exit ========
++ * Discontinue usage of DBL module.
++ */
++void dbll_exit(void)
++{
++ DBC_REQUIRE(refs > 0);
++
++ refs--;
++
++ if (refs == 0)
++ gh_exit();
++
++ DBC_ENSURE(refs >= 0);
++}
++
++/*
++ * ======== dbll_get_addr ========
++ * Get address of name in the specified library.
++ */
++bool dbll_get_addr(struct dbll_library_obj *zl_lib, char *name,
++ struct dbll_sym_val **sym_val)
++{
++ struct dbll_symbol *sym;
++ bool status = false;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(zl_lib);
++ DBC_REQUIRE(name != NULL);
++ DBC_REQUIRE(sym_val != NULL);
++ DBC_REQUIRE(zl_lib->sym_tab != NULL);
++
++ sym = (struct dbll_symbol *)gh_find(zl_lib->sym_tab, name);
++ if (sym != NULL) {
++ *sym_val = &sym->value;
++ status = true;
++ }
++
++ dev_dbg(bridge, "%s: lib: %p name: %s paddr: %p, status 0x%x\n",
++ __func__, zl_lib, name, sym_val, status);
++ return status;
++}
++
++/*
++ * ======== dbll_get_attrs ========
++ * Retrieve the attributes of the target.
++ */
++void dbll_get_attrs(struct dbll_tar_obj *target, struct dbll_attrs *pattrs)
++{
++ struct dbll_tar_obj *zl_target = (struct dbll_tar_obj *)target;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(zl_target);
++ DBC_REQUIRE(pattrs != NULL);
++
++ if ((pattrs != NULL) && (zl_target != NULL))
++ *pattrs = zl_target->attrs;
++
++}
++
++/*
++ * ======== dbll_get_c_addr ========
++ * Get address of a "C" name in the specified library.
++ */
++bool dbll_get_c_addr(struct dbll_library_obj *zl_lib, char *name,
++ struct dbll_sym_val **sym_val)
++{
++ struct dbll_symbol *sym;
++ char cname[MAXEXPR + 1];
++ bool status = false;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(zl_lib);
++ DBC_REQUIRE(sym_val != NULL);
++ DBC_REQUIRE(zl_lib->sym_tab != NULL);
++ DBC_REQUIRE(name != NULL);
++
++ cname[0] = '_';
++
++ strncpy(cname + 1, name, sizeof(cname) - 2);
++ cname[MAXEXPR] = '\0'; /* insure '\0' string termination */
++
++ /* Check for C name, if not found */
++ sym = (struct dbll_symbol *)gh_find(zl_lib->sym_tab, cname);
++
++ if (sym != NULL) {
++ *sym_val = &sym->value;
++ status = true;
++ }
++
++ return status;
++}
++
++/*
++ * ======== dbll_get_sect ========
++ * Get the base address and size (in bytes) of a COFF section.
++ */
++int dbll_get_sect(struct dbll_library_obj *lib, char *name, u32 *paddr,
++ u32 *psize)
++{
++ u32 byte_size;
++ bool opened_doff = false;
++ const struct ldr_section_info *sect = NULL;
++ struct dbll_library_obj *zl_lib = (struct dbll_library_obj *)lib;
++ int status = 0;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(name != NULL);
++ DBC_REQUIRE(paddr != NULL);
++ DBC_REQUIRE(psize != NULL);
++ DBC_REQUIRE(zl_lib);
++
++ /* If DOFF file is not open, we open it. */
++ if (zl_lib != NULL) {
++ if (zl_lib->fp == NULL) {
++ status = dof_open(zl_lib);
++ if (!status)
++ opened_doff = true;
++
++ } else {
++ (*(zl_lib->target_obj->attrs.fseek)) (zl_lib->fp,
++ zl_lib->ul_pos,
++ SEEK_SET);
++ }
++ } else {
++ status = -EFAULT;
++ }
++ if (!status) {
++ byte_size = 1;
++ if (dload_get_section_info(zl_lib->desc, name, &sect)) {
++ *paddr = sect->load_addr;
++ *psize = sect->size * byte_size;
++ /* Make sure size is even for good swap */
++ if (*psize % 2)
++ (*psize)++;
++
++ /* Align size */
++ *psize = DOFF_ALIGN(*psize);
++ } else {
++ status = -ENXIO;
++ }
++ }
++ if (opened_doff) {
++ dof_close(zl_lib);
++ opened_doff = false;
++ }
++
++ dev_dbg(bridge, "%s: lib: %p name: %s paddr: %p psize: %p, "
++ "status 0x%x\n", __func__, lib, name, paddr, psize, status);
++
++ return status;
++}
++
++/*
++ * ======== dbll_init ========
++ */
++bool dbll_init(void)
++{
++ DBC_REQUIRE(refs >= 0);
++
++ if (refs == 0)
++ gh_init();
++
++ refs++;
++
++ return true;
++}
++
++/*
++ * ======== dbll_load ========
++ */
++int dbll_load(struct dbll_library_obj *lib, dbll_flags flags,
++ struct dbll_attrs *attrs, u32 *entry)
++{
++ struct dbll_library_obj *zl_lib = (struct dbll_library_obj *)lib;
++ struct dbll_tar_obj *dbzl;
++ bool got_symbols = true;
++ s32 err;
++ int status = 0;
++ bool opened_doff = false;
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(zl_lib);
++ DBC_REQUIRE(entry != NULL);
++ DBC_REQUIRE(attrs != NULL);
++
++ /*
++ * Load if not already loaded.
++ */
++ if (zl_lib->load_ref == 0 || !(flags & DBLL_DYNAMIC)) {
++ dbzl = zl_lib->target_obj;
++ dbzl->attrs = *attrs;
++ /* Create a hash table for symbols if not already created */
++ if (zl_lib->sym_tab == NULL) {
++ got_symbols = false;
++ zl_lib->sym_tab = gh_create(MAXBUCKETS,
++ sizeof(struct dbll_symbol),
++ name_hash,
++ name_match, sym_delete);
++ if (zl_lib->sym_tab == NULL)
++ status = -ENOMEM;
++
++ }
++ /*
++ * Set up objects needed by the dynamic loader
++ */
++ /* Stream */
++ zl_lib->stream.dl_stream.read_buffer = dbll_read_buffer;
++ zl_lib->stream.dl_stream.set_file_posn = dbll_set_file_posn;
++ zl_lib->stream.lib = zl_lib;
++ /* Symbol */
++ zl_lib->symbol.dl_symbol.find_matching_symbol =
++ dbll_find_symbol;
++ if (got_symbols) {
++ zl_lib->symbol.dl_symbol.add_to_symbol_table =
++ find_in_symbol_table;
++ } else {
++ zl_lib->symbol.dl_symbol.add_to_symbol_table =
++ dbll_add_to_symbol_table;
++ }
++ zl_lib->symbol.dl_symbol.purge_symbol_table =
++ dbll_purge_symbol_table;
++ zl_lib->symbol.dl_symbol.dload_allocate = allocate;
++ zl_lib->symbol.dl_symbol.dload_deallocate = deallocate;
++ zl_lib->symbol.dl_symbol.error_report = dbll_err_report;
++ zl_lib->symbol.lib = zl_lib;
++ /* Allocate */
++ zl_lib->allocate.dl_alloc.dload_allocate = dbll_rmm_alloc;
++ zl_lib->allocate.dl_alloc.dload_deallocate = rmm_dealloc;
++ zl_lib->allocate.lib = zl_lib;
++ /* Init */
++ zl_lib->init.dl_init.connect = connect;
++ zl_lib->init.dl_init.readmem = read_mem;
++ zl_lib->init.dl_init.writemem = write_mem;
++ zl_lib->init.dl_init.fillmem = fill_mem;
++ zl_lib->init.dl_init.execute = execute;
++ zl_lib->init.dl_init.release = release;
++ zl_lib->init.lib = zl_lib;
++ /* If COFF file is not open, we open it. */
++ if (zl_lib->fp == NULL) {
++ status = dof_open(zl_lib);
++ if (!status)
++ opened_doff = true;
++
++ }
++ if (!status) {
++ zl_lib->ul_pos = (*(zl_lib->target_obj->attrs.ftell))
++ (zl_lib->fp);
++ /* Reset file cursor */
++ (*(zl_lib->target_obj->attrs.fseek)) (zl_lib->fp,
++ (long)0,
++ SEEK_SET);
++ symbols_reloaded = true;
++ /* The 5th argument, DLOAD_INITBSS, tells the DLL
++ * module to zero-init all BSS sections. In general,
++ * this is not necessary and also increases load time.
++ * We may want to make this configurable by the user */
++ err = dynamic_load_module(&zl_lib->stream.dl_stream,
++ &zl_lib->symbol.dl_symbol,
++ &zl_lib->allocate.dl_alloc,
++ &zl_lib->init.dl_init,
++ DLOAD_INITBSS,
++ &zl_lib->dload_mod_obj);
++
++ if (err != 0) {
++ status = -EILSEQ;
++ } else if (redefined_symbol) {
++ zl_lib->load_ref++;
++ dbll_unload(zl_lib, (struct dbll_attrs *)attrs);
++ redefined_symbol = false;
++ status = -EILSEQ;
++ } else {
++ *entry = zl_lib->entry;
++ }
++ }
++ }
++ if (!status)
++ zl_lib->load_ref++;
++
++ /* Clean up DOFF resources */
++ if (opened_doff)
++ dof_close(zl_lib);
++
++ DBC_ENSURE(status || zl_lib->load_ref > 0);
++
++ dev_dbg(bridge, "%s: lib: %p flags: 0x%x entry: %p, status 0x%x\n",
++ __func__, lib, flags, entry, status);
++
++ return status;
++}
++
++/*
++ * ======== dbll_load_sect ========
++ * Not supported for COFF.
++ */
++int dbll_load_sect(struct dbll_library_obj *zl_lib, char *sec_name,
++ struct dbll_attrs *attrs)
++{
++ DBC_REQUIRE(zl_lib);
++
++ return -ENOSYS;
++}
++
++/*
++ * ======== dbll_open ========
++ */
++int dbll_open(struct dbll_tar_obj *target, char *file, dbll_flags flags,
++ struct dbll_library_obj **lib_obj)
++{
++ struct dbll_tar_obj *zl_target = (struct dbll_tar_obj *)target;
++ struct dbll_library_obj *zl_lib = NULL;
++ s32 err;
++ int status = 0;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(zl_target);
++ DBC_REQUIRE(zl_target->attrs.fopen != NULL);
++ DBC_REQUIRE(file != NULL);
++ DBC_REQUIRE(lib_obj != NULL);
++
++ zl_lib = zl_target->head;
++ while (zl_lib != NULL) {
++ if (strcmp(zl_lib->file_name, file) == 0) {
++ /* Library is already opened */
++ zl_lib->open_ref++;
++ break;
++ }
++ zl_lib = zl_lib->next;
++ }
++ if (zl_lib == NULL) {
++ /* Allocate DBL library object */
++ zl_lib = kzalloc(sizeof(struct dbll_library_obj), GFP_KERNEL);
++ if (zl_lib == NULL) {
++ status = -ENOMEM;
++ } else {
++ zl_lib->ul_pos = 0;
++ /* Increment ref count to allow close on failure
++ * later on */
++ zl_lib->open_ref++;
++ zl_lib->target_obj = zl_target;
++ /* Keep a copy of the file name */
++ zl_lib->file_name = kzalloc(strlen(file) + 1,
++ GFP_KERNEL);
++ if (zl_lib->file_name == NULL) {
++ status = -ENOMEM;
++ } else {
++ strncpy(zl_lib->file_name, file,
++ strlen(file) + 1);
++ }
++ zl_lib->sym_tab = NULL;
++ }
++ }
++ /*
++ * Set up objects needed by the dynamic loader
++ */
++ if (status)
++ goto func_cont;
++
++ /* Stream */
++ zl_lib->stream.dl_stream.read_buffer = dbll_read_buffer;
++ zl_lib->stream.dl_stream.set_file_posn = dbll_set_file_posn;
++ zl_lib->stream.lib = zl_lib;
++ /* Symbol */
++ zl_lib->symbol.dl_symbol.add_to_symbol_table = dbll_add_to_symbol_table;
++ zl_lib->symbol.dl_symbol.find_matching_symbol = dbll_find_symbol;
++ zl_lib->symbol.dl_symbol.purge_symbol_table = dbll_purge_symbol_table;
++ zl_lib->symbol.dl_symbol.dload_allocate = allocate;
++ zl_lib->symbol.dl_symbol.dload_deallocate = deallocate;
++ zl_lib->symbol.dl_symbol.error_report = dbll_err_report;
++ zl_lib->symbol.lib = zl_lib;
++ /* Allocate */
++ zl_lib->allocate.dl_alloc.dload_allocate = dbll_rmm_alloc;
++ zl_lib->allocate.dl_alloc.dload_deallocate = rmm_dealloc;
++ zl_lib->allocate.lib = zl_lib;
++ /* Init */
++ zl_lib->init.dl_init.connect = connect;
++ zl_lib->init.dl_init.readmem = read_mem;
++ zl_lib->init.dl_init.writemem = write_mem;
++ zl_lib->init.dl_init.fillmem = fill_mem;
++ zl_lib->init.dl_init.execute = execute;
++ zl_lib->init.dl_init.release = release;
++ zl_lib->init.lib = zl_lib;
++ if (!status && zl_lib->fp == NULL)
++ status = dof_open(zl_lib);
++
++ zl_lib->ul_pos = (*(zl_lib->target_obj->attrs.ftell)) (zl_lib->fp);
++ (*(zl_lib->target_obj->attrs.fseek)) (zl_lib->fp, (long)0, SEEK_SET);
++ /* Create a hash table for symbols if flag is set */
++ if (zl_lib->sym_tab != NULL || !(flags & DBLL_SYMB))
++ goto func_cont;
++
++ zl_lib->sym_tab =
++ gh_create(MAXBUCKETS, sizeof(struct dbll_symbol), name_hash,
++ name_match, sym_delete);
++ if (zl_lib->sym_tab == NULL) {
++ status = -ENOMEM;
++ } else {
++ /* Do a fake load to get symbols - set write func to no_op */
++ zl_lib->init.dl_init.writemem = no_op;
++ err = dynamic_open_module(&zl_lib->stream.dl_stream,
++ &zl_lib->symbol.dl_symbol,
++ &zl_lib->allocate.dl_alloc,
++ &zl_lib->init.dl_init, 0,
++ &zl_lib->dload_mod_obj);
++ if (err != 0) {
++ status = -EILSEQ;
++ } else {
++ /* Now that we have the symbol table, we can unload */
++ err = dynamic_unload_module(zl_lib->dload_mod_obj,
++ &zl_lib->symbol.dl_symbol,
++ &zl_lib->allocate.dl_alloc,
++ &zl_lib->init.dl_init);
++ if (err != 0)
++ status = -EILSEQ;
++
++ zl_lib->dload_mod_obj = NULL;
++ }
++ }
++func_cont:
++ if (!status) {
++ if (zl_lib->open_ref == 1) {
++ /* First time opened - insert in list */
++ if (zl_target->head)
++ (zl_target->head)->prev = zl_lib;
++
++ zl_lib->prev = NULL;
++ zl_lib->next = zl_target->head;
++ zl_target->head = zl_lib;
++ }
++ *lib_obj = (struct dbll_library_obj *)zl_lib;
++ } else {
++ *lib_obj = NULL;
++ if (zl_lib != NULL)
++ dbll_close((struct dbll_library_obj *)zl_lib);
++
++ }
++ DBC_ENSURE((!status && (zl_lib->open_ref > 0) && *lib_obj)
++ || (status && *lib_obj == NULL));
++
++ dev_dbg(bridge, "%s: target: %p file: %s lib_obj: %p, status 0x%x\n",
++ __func__, target, file, lib_obj, status);
++
++ return status;
++}
++
++/*
++ * ======== dbll_read_sect ========
++ * Get the content of a COFF section.
++ */
++int dbll_read_sect(struct dbll_library_obj *lib, char *name,
++ char *buf, u32 size)
++{
++ struct dbll_library_obj *zl_lib = (struct dbll_library_obj *)lib;
++ bool opened_doff = false;
++ u32 byte_size; /* size of bytes */
++ u32 ul_sect_size; /* size of section */
++ const struct ldr_section_info *sect = NULL;
++ int status = 0;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(zl_lib);
++ DBC_REQUIRE(name != NULL);
++ DBC_REQUIRE(buf != NULL);
++ DBC_REQUIRE(size != 0);
++
++ /* If DOFF file is not open, we open it. */
++ if (zl_lib != NULL) {
++ if (zl_lib->fp == NULL) {
++ status = dof_open(zl_lib);
++ if (!status)
++ opened_doff = true;
++
++ } else {
++ (*(zl_lib->target_obj->attrs.fseek)) (zl_lib->fp,
++ zl_lib->ul_pos,
++ SEEK_SET);
++ }
++ } else {
++ status = -EFAULT;
++ }
++ if (status)
++ goto func_cont;
++
++ byte_size = 1;
++ if (!dload_get_section_info(zl_lib->desc, name, &sect)) {
++ status = -ENXIO;
++ goto func_cont;
++ }
++ /*
++ * Ensure the supplied buffer size is sufficient to store
++ * the section buf to be read.
++ */
++ ul_sect_size = sect->size * byte_size;
++ /* Make sure size is even for good swap */
++ if (ul_sect_size % 2)
++ ul_sect_size++;
++
++ /* Align size */
++ ul_sect_size = DOFF_ALIGN(ul_sect_size);
++ if (ul_sect_size > size) {
++ status = -EPERM;
++ } else {
++ if (!dload_get_section(zl_lib->desc, sect, buf))
++ status = -EBADF;
++
++ }
++func_cont:
++ if (opened_doff) {
++ dof_close(zl_lib);
++ opened_doff = false;
++ }
++
++ dev_dbg(bridge, "%s: lib: %p name: %s buf: %p size: 0x%x, "
++ "status 0x%x\n", __func__, lib, name, buf, size, status);
++ return status;
++}
++
++/*
++ * ======== dbll_set_attrs ========
++ * Set the attributes of the target.
++ */
++void dbll_set_attrs(struct dbll_tar_obj *target, struct dbll_attrs *pattrs)
++{
++ struct dbll_tar_obj *zl_target = (struct dbll_tar_obj *)target;
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(zl_target);
++ DBC_REQUIRE(pattrs != NULL);
++
++ if ((pattrs != NULL) && (zl_target != NULL))
++ zl_target->attrs = *pattrs;
++
++}
++
++/*
++ * ======== dbll_unload ========
++ */
++void dbll_unload(struct dbll_library_obj *lib, struct dbll_attrs *attrs)
++{
++ struct dbll_library_obj *zl_lib = (struct dbll_library_obj *)lib;
++ s32 err = 0;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(zl_lib);
++ DBC_REQUIRE(zl_lib->load_ref > 0);
++ dev_dbg(bridge, "%s: lib: %p\n", __func__, lib);
++ zl_lib->load_ref--;
++ /* Unload only if reference count is 0 */
++ if (zl_lib->load_ref != 0)
++ goto func_end;
++
++ zl_lib->target_obj->attrs = *attrs;
++ if (zl_lib->dload_mod_obj) {
++ err = dynamic_unload_module(zl_lib->dload_mod_obj,
++ &zl_lib->symbol.dl_symbol,
++ &zl_lib->allocate.dl_alloc,
++ &zl_lib->init.dl_init);
++ if (err != 0)
++ dev_dbg(bridge, "%s: failed: 0x%x\n", __func__, err);
++ }
++ /* remove symbols from symbol table */
++ if (zl_lib->sym_tab != NULL) {
++ gh_delete(zl_lib->sym_tab);
++ zl_lib->sym_tab = NULL;
++ }
++ /* delete DOFF desc since it holds *lots* of host OS
++ * resources */
++ dof_close(zl_lib);
++func_end:
++ DBC_ENSURE(zl_lib->load_ref >= 0);
++}
++
++/*
++ * ======== dbll_unload_sect ========
++ * Not supported for COFF.
++ */
++int dbll_unload_sect(struct dbll_library_obj *lib, char *sec_name,
++ struct dbll_attrs *attrs)
++{
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(sec_name != NULL);
++
++ return -ENOSYS;
++}
++
++/*
++ * ======== dof_close ========
++ */
++static void dof_close(struct dbll_library_obj *zl_lib)
++{
++ if (zl_lib->desc) {
++ dload_module_close(zl_lib->desc);
++ zl_lib->desc = NULL;
++ }
++ /* close file */
++ if (zl_lib->fp) {
++ (zl_lib->target_obj->attrs.fclose) (zl_lib->fp);
++ zl_lib->fp = NULL;
++ }
++}
++
++/*
++ * ======== dof_open ========
++ */
++static int dof_open(struct dbll_library_obj *zl_lib)
++{
++ void *open = *(zl_lib->target_obj->attrs.fopen);
++ int status = 0;
++
++ /* First open the file for the dynamic loader, then open COF */
++ zl_lib->fp =
++ (void *)((dbll_f_open_fxn) (open)) (zl_lib->file_name, "rb");
++
++ /* Open DOFF module */
++ if (zl_lib->fp && zl_lib->desc == NULL) {
++ (*(zl_lib->target_obj->attrs.fseek)) (zl_lib->fp, (long)0,
++ SEEK_SET);
++ zl_lib->desc =
++ dload_module_open(&zl_lib->stream.dl_stream,
++ &zl_lib->symbol.dl_symbol);
++ if (zl_lib->desc == NULL) {
++ (zl_lib->target_obj->attrs.fclose) (zl_lib->fp);
++ zl_lib->fp = NULL;
++ status = -EBADF;
++ }
++ } else {
++ status = -EBADF;
++ }
++
++ return status;
++}
++
++/*
++ * ======== name_hash ========
++ */
++static u16 name_hash(void *key, u16 max_bucket)
++{
++ u16 ret;
++ u16 hash;
++ char *name = (char *)key;
++
++ DBC_REQUIRE(name != NULL);
++
++ hash = 0;
++
++ while (*name) {
++ hash <<= 1;
++ hash ^= *name++;
++ }
++
++ ret = hash % max_bucket;
++
++ return ret;
++}
++
++/*
++ * ======== name_match ========
++ */
++static bool name_match(void *key, void *sp)
++{
++ DBC_REQUIRE(key != NULL);
++ DBC_REQUIRE(sp != NULL);
++
++ if ((key != NULL) && (sp != NULL)) {
++ if (strcmp((char *)key, ((struct dbll_symbol *)sp)->name) ==
++ 0)
++ return true;
++ }
++ return false;
++}
++
++/*
++ * ======== no_op ========
++ */
++static int no_op(struct dynamic_loader_initialize *thisptr, void *bufr,
++ ldr_addr locn, struct ldr_section_info *info, unsigned bytsize)
++{
++ return 1;
++}
++
++/*
++ * ======== sym_delete ========
++ */
++static void sym_delete(void *value)
++{
++ struct dbll_symbol *sp = (struct dbll_symbol *)value;
++
++ kfree(sp->name);
++}
++
++/*
++ * Dynamic Loader Functions
++ */
++
++/* dynamic_loader_stream */
++/*
++ * ======== dbll_read_buffer ========
++ */
++static int dbll_read_buffer(struct dynamic_loader_stream *this, void *buffer,
++ unsigned bufsize)
++{
++ struct dbll_stream *pstream = (struct dbll_stream *)this;
++ struct dbll_library_obj *lib;
++ int bytes_read = 0;
++
++ DBC_REQUIRE(this != NULL);
++ lib = pstream->lib;
++ DBC_REQUIRE(lib);
++
++ if (lib != NULL) {
++ bytes_read =
++ (*(lib->target_obj->attrs.fread)) (buffer, 1, bufsize,
++ lib->fp);
++ }
++ return bytes_read;
++}
++
++/*
++ * ======== dbll_set_file_posn ========
++ */
++static int dbll_set_file_posn(struct dynamic_loader_stream *this,
++ unsigned int pos)
++{
++ struct dbll_stream *pstream = (struct dbll_stream *)this;
++ struct dbll_library_obj *lib;
++ int status = 0; /* Success */
++
++ DBC_REQUIRE(this != NULL);
++ lib = pstream->lib;
++ DBC_REQUIRE(lib);
++
++ if (lib != NULL) {
++ status = (*(lib->target_obj->attrs.fseek)) (lib->fp, (long)pos,
++ SEEK_SET);
++ }
++
++ return status;
++}
++
++/* dynamic_loader_sym */
++
++/*
++ * ======== dbll_find_symbol ========
++ */
++static struct dynload_symbol *dbll_find_symbol(struct dynamic_loader_sym *this,
++ const char *name)
++{
++ struct dynload_symbol *ret_sym;
++ struct ldr_symbol *ldr_sym = (struct ldr_symbol *)this;
++ struct dbll_library_obj *lib;
++ struct dbll_sym_val *dbll_sym = NULL;
++ bool status = false; /* Symbol not found yet */
++
++ DBC_REQUIRE(this != NULL);
++ lib = ldr_sym->lib;
++ DBC_REQUIRE(lib);
++
++ if (lib != NULL) {
++ if (lib->target_obj->attrs.sym_lookup) {
++ /* Check current lib + base lib + dep lib +
++ * persistent lib */
++ status = (*(lib->target_obj->attrs.sym_lookup))
++ (lib->target_obj->attrs.sym_handle,
++ lib->target_obj->attrs.sym_arg,
++ lib->target_obj->attrs.rmm_handle, name,
++ &dbll_sym);
++ } else {
++ /* Just check current lib for symbol */
++ status = dbll_get_addr((struct dbll_library_obj *)lib,
++ (char *)name, &dbll_sym);
++ if (!status) {
++ status =
++ dbll_get_c_addr((struct dbll_library_obj *)
++ lib, (char *)name,
++ &dbll_sym);
++ }
++ }
++ }
++
++ if (!status && gbl_search)
++ dev_dbg(bridge, "%s: Symbol not found: %s\n", __func__, name);
++
++ DBC_ASSERT((status && (dbll_sym != NULL))
++ || (!status && (dbll_sym == NULL)));
++
++ ret_sym = (struct dynload_symbol *)dbll_sym;
++ return ret_sym;
++}
++
++/*
++ * ======== find_in_symbol_table ========
++ */
++static struct dynload_symbol *find_in_symbol_table(struct dynamic_loader_sym
++ *this, const char *name,
++ unsigned moduleid)
++{
++ struct dynload_symbol *ret_sym;
++ struct ldr_symbol *ldr_sym = (struct ldr_symbol *)this;
++ struct dbll_library_obj *lib;
++ struct dbll_symbol *sym;
++
++ DBC_REQUIRE(this != NULL);
++ lib = ldr_sym->lib;
++ DBC_REQUIRE(lib);
++ DBC_REQUIRE(lib->sym_tab != NULL);
++
++ sym = (struct dbll_symbol *)gh_find(lib->sym_tab, (char *)name);
++
++ ret_sym = (struct dynload_symbol *)&sym->value;
++ return ret_sym;
++}
++
++/*
++ * ======== dbll_add_to_symbol_table ========
++ */
++static struct dynload_symbol *dbll_add_to_symbol_table(struct dynamic_loader_sym
++ *this, const char *name,
++ unsigned module_id)
++{
++ struct dbll_symbol *sym_ptr = NULL;
++ struct dbll_symbol symbol;
++ struct dynload_symbol *dbll_sym = NULL;
++ struct ldr_symbol *ldr_sym = (struct ldr_symbol *)this;
++ struct dbll_library_obj *lib;
++ struct dynload_symbol *ret;
++
++ DBC_REQUIRE(this != NULL);
++ DBC_REQUIRE(name);
++ lib = ldr_sym->lib;
++ DBC_REQUIRE(lib);
++
++ /* Check to see if symbol is already defined in symbol table */
++ if (!(lib->target_obj->attrs.base_image)) {
++ gbl_search = false;
++ dbll_sym = dbll_find_symbol(this, name);
++ gbl_search = true;
++ if (dbll_sym) {
++ redefined_symbol = true;
++ dev_dbg(bridge, "%s already defined in symbol table\n",
++ name);
++ return NULL;
++ }
++ }
++ /* Allocate string to copy symbol name */
++ symbol.name = kzalloc(strlen((char *const)name) + 1, GFP_KERNEL);
++ if (symbol.name == NULL)
++ return NULL;
++
++ if (symbol.name != NULL) {
++ /* Just copy name (value will be filled in by dynamic loader) */
++ strncpy(symbol.name, (char *const)name,
++ strlen((char *const)name) + 1);
++
++ /* Add symbol to symbol table */
++ sym_ptr =
++ (struct dbll_symbol *)gh_insert(lib->sym_tab, (void *)name,
++ (void *)&symbol);
++ if (sym_ptr == NULL)
++ kfree(symbol.name);
++
++ }
++ if (sym_ptr != NULL)
++ ret = (struct dynload_symbol *)&sym_ptr->value;
++ else
++ ret = NULL;
++
++ return ret;
++}
++
++/*
++ * ======== dbll_purge_symbol_table ========
++ */
++static void dbll_purge_symbol_table(struct dynamic_loader_sym *this,
++ unsigned module_id)
++{
++ struct ldr_symbol *ldr_sym = (struct ldr_symbol *)this;
++ struct dbll_library_obj *lib;
++
++ DBC_REQUIRE(this != NULL);
++ lib = ldr_sym->lib;
++ DBC_REQUIRE(lib);
++
++ /* May not need to do anything */
++}
++
++/*
++ * ======== allocate ========
++ */
++static void *allocate(struct dynamic_loader_sym *this, unsigned memsize)
++{
++ struct ldr_symbol *ldr_sym = (struct ldr_symbol *)this;
++ struct dbll_library_obj *lib;
++ void *buf;
++
++ DBC_REQUIRE(this != NULL);
++ lib = ldr_sym->lib;
++ DBC_REQUIRE(lib);
++
++ buf = kzalloc(memsize, GFP_KERNEL);
++
++ return buf;
++}
++
++/*
++ * ======== deallocate ========
++ */
++static void deallocate(struct dynamic_loader_sym *this, void *mem_ptr)
++{
++ struct ldr_symbol *ldr_sym = (struct ldr_symbol *)this;
++ struct dbll_library_obj *lib;
++
++ DBC_REQUIRE(this != NULL);
++ lib = ldr_sym->lib;
++ DBC_REQUIRE(lib);
++
++ kfree(mem_ptr);
++}
++
++/*
++ * ======== dbll_err_report ========
++ */
++static void dbll_err_report(struct dynamic_loader_sym *this, const char *errstr,
++ va_list args)
++{
++ struct ldr_symbol *ldr_sym = (struct ldr_symbol *)this;
++ struct dbll_library_obj *lib;
++ char temp_buf[MAXEXPR];
++
++ DBC_REQUIRE(this != NULL);
++ lib = ldr_sym->lib;
++ DBC_REQUIRE(lib);
++ vsnprintf((char *)temp_buf, MAXEXPR, (char *)errstr, args);
++ dev_dbg(bridge, "%s\n", temp_buf);
++}
++
++/* dynamic_loader_allocate */
++
++/*
++ * ======== dbll_rmm_alloc ========
++ */
++static int dbll_rmm_alloc(struct dynamic_loader_allocate *this,
++ struct ldr_section_info *info, unsigned align)
++{
++ struct dbll_alloc *dbll_alloc_obj = (struct dbll_alloc *)this;
++ struct dbll_library_obj *lib;
++ int status = 0;
++ u32 mem_sect_type;
++ struct rmm_addr rmm_addr_obj;
++ s32 ret = true;
++ unsigned stype = DLOAD_SECTION_TYPE(info->type);
++ char *token = NULL;
++ char *sz_sec_last_token = NULL;
++ char *sz_last_token = NULL;
++ char *sz_sect_name = NULL;
++ char *psz_cur;
++ s32 token_len = 0;
++ s32 seg_id = -1;
++ s32 req = -1;
++ s32 count = 0;
++ u32 alloc_size = 0;
++ u32 run_addr_flag = 0;
++
++ DBC_REQUIRE(this != NULL);
++ lib = dbll_alloc_obj->lib;
++ DBC_REQUIRE(lib);
++
++ mem_sect_type =
++ (stype == DLOAD_TEXT) ? DBLL_CODE : (stype ==
++ DLOAD_BSS) ? DBLL_BSS :
++ DBLL_DATA;
++
++ /* Attempt to extract the segment ID and requirement information from
++ the name of the section */
++ DBC_REQUIRE(info->name);
++ token_len = strlen((char *)(info->name)) + 1;
++
++ sz_sect_name = kzalloc(token_len, GFP_KERNEL);
++ sz_last_token = kzalloc(token_len, GFP_KERNEL);
++ sz_sec_last_token = kzalloc(token_len, GFP_KERNEL);
++
++ if (sz_sect_name == NULL || sz_sec_last_token == NULL ||
++ sz_last_token == NULL) {
++ status = -ENOMEM;
++ goto func_cont;
++ }
++ strncpy(sz_sect_name, (char *)(info->name), token_len);
++ psz_cur = sz_sect_name;
++ while ((token = strsep(&psz_cur, ":")) && *token != '\0') {
++ strncpy(sz_sec_last_token, sz_last_token,
++ strlen(sz_last_token) + 1);
++ strncpy(sz_last_token, token, strlen(token) + 1);
++ token = strsep(&psz_cur, ":");
++ count++; /* optimizes processing */
++ }
++ /* If token is 0 or 1, and sz_sec_last_token is DYN_DARAM or DYN_SARAM,
++ or DYN_EXTERNAL, then mem granularity information is present
++ within the section name - only process if there are at least three
++ tokens within the section name (just a minor optimization) */
++ if (count >= 3)
++ strict_strtol(sz_last_token, 10, (long *)&req);
++
++ if ((req == 0) || (req == 1)) {
++ if (strcmp(sz_sec_last_token, "DYN_DARAM") == 0) {
++ seg_id = 0;
++ } else {
++ if (strcmp(sz_sec_last_token, "DYN_SARAM") == 0) {
++ seg_id = 1;
++ } else {
++ if (strcmp(sz_sec_last_token,
++ "DYN_EXTERNAL") == 0)
++ seg_id = 2;
++ }
++ }
++ }
++func_cont:
++ kfree(sz_sect_name);
++ sz_sect_name = NULL;
++ kfree(sz_last_token);
++ sz_last_token = NULL;
++ kfree(sz_sec_last_token);
++ sz_sec_last_token = NULL;
++
++ if (mem_sect_type == DBLL_CODE)
++ alloc_size = info->size + GEM_L1P_PREFETCH_SIZE;
++ else
++ alloc_size = info->size;
++
++ if (info->load_addr != info->run_addr)
++ run_addr_flag = 1;
++ /* TODO - ideally, we can pass the alignment requirement also
++ * from here */
++ if (lib != NULL) {
++ status =
++ (lib->target_obj->attrs.alloc) (lib->target_obj->attrs.
++ rmm_handle, mem_sect_type,
++ alloc_size, align,
++ (u32 *) &rmm_addr_obj,
++ seg_id, req, false);
++ }
++ if (status) {
++ ret = false;
++ } else {
++ /* RMM gives word address. Need to convert to byte address */
++ info->load_addr = rmm_addr_obj.addr * DSPWORDSIZE;
++ if (!run_addr_flag)
++ info->run_addr = info->load_addr;
++ info->context = (u32) rmm_addr_obj.segid;
++ dev_dbg(bridge, "%s: %s base = 0x%x len = 0x%x, "
++ "info->run_addr 0x%x, info->load_addr 0x%x\n",
++ __func__, info->name, info->load_addr / DSPWORDSIZE,
++ info->size / DSPWORDSIZE, info->run_addr,
++ info->load_addr);
++ }
++ return ret;
++}
++
++/*
++ * ======== rmm_dealloc ========
++ */
++static void rmm_dealloc(struct dynamic_loader_allocate *this,
++ struct ldr_section_info *info)
++{
++ struct dbll_alloc *dbll_alloc_obj = (struct dbll_alloc *)this;
++ struct dbll_library_obj *lib;
++ u32 segid;
++ int status = 0;
++ unsigned stype = DLOAD_SECTION_TYPE(info->type);
++ u32 mem_sect_type;
++ u32 free_size = 0;
++
++ mem_sect_type =
++ (stype == DLOAD_TEXT) ? DBLL_CODE : (stype ==
++ DLOAD_BSS) ? DBLL_BSS :
++ DBLL_DATA;
++ DBC_REQUIRE(this != NULL);
++ lib = dbll_alloc_obj->lib;
++ DBC_REQUIRE(lib);
++ /* segid was set by alloc function */
++ segid = (u32) info->context;
++ if (mem_sect_type == DBLL_CODE)
++ free_size = info->size + GEM_L1P_PREFETCH_SIZE;
++ else
++ free_size = info->size;
++ if (lib != NULL) {
++ status =
++ (lib->target_obj->attrs.free) (lib->target_obj->attrs.
++ sym_handle, segid,
++ info->load_addr /
++ DSPWORDSIZE, free_size,
++ false);
++ }
++}
++
++/* dynamic_loader_initialize */
++/*
++ * ======== connect ========
++ */
++static int connect(struct dynamic_loader_initialize *this)
++{
++ return true;
++}
++
++/*
++ * ======== read_mem ========
++ * This function does not need to be implemented.
++ */
++static int read_mem(struct dynamic_loader_initialize *this, void *buf,
++ ldr_addr addr, struct ldr_section_info *info,
++ unsigned nbytes)
++{
++ struct dbll_init_obj *init_obj = (struct dbll_init_obj *)this;
++ struct dbll_library_obj *lib;
++ int bytes_read = 0;
++
++ DBC_REQUIRE(this != NULL);
++ lib = init_obj->lib;
++ DBC_REQUIRE(lib);
++ /* Need bridge_brd_read function */
++ return bytes_read;
++}
++
++/*
++ * ======== write_mem ========
++ */
++static int write_mem(struct dynamic_loader_initialize *this, void *buf,
++ ldr_addr addr, struct ldr_section_info *info,
++ unsigned bytes)
++{
++ struct dbll_init_obj *init_obj = (struct dbll_init_obj *)this;
++ struct dbll_library_obj *lib;
++ struct dbll_tar_obj *target_obj;
++ struct dbll_sect_info sect_info;
++ u32 mem_sect_type;
++ bool ret = true;
++
++ DBC_REQUIRE(this != NULL);
++ lib = init_obj->lib;
++ if (!lib)
++ return false;
++
++ target_obj = lib->target_obj;
++
++ mem_sect_type =
++ (DLOAD_SECTION_TYPE(info->type) ==
++ DLOAD_TEXT) ? DBLL_CODE : DBLL_DATA;
++ if (target_obj && target_obj->attrs.write) {
++ ret =
++ (*target_obj->attrs.write) (target_obj->attrs.input_params,
++ addr, buf, bytes,
++ mem_sect_type);
++
++ if (target_obj->attrs.log_write) {
++ sect_info.name = info->name;
++ sect_info.sect_run_addr = info->run_addr;
++ sect_info.sect_load_addr = info->load_addr;
++ sect_info.size = info->size;
++ sect_info.type = mem_sect_type;
++ /* Pass the information about what we've written to
++ * another module */
++ (*target_obj->attrs.log_write) (target_obj->attrs.
++ log_write_handle,
++ &sect_info, addr,
++ bytes);
++ }
++ }
++ return ret;
++}
++
++/*
++ * ======== fill_mem ========
++ * Fill bytes of memory at a given address with a given value by
++ * writing from a buffer containing the given value. Write in
++ * sets of MAXEXPR (128) bytes to avoid large stack buffer issues.
++ */
++static int fill_mem(struct dynamic_loader_initialize *this, ldr_addr addr,
++ struct ldr_section_info *info, unsigned bytes, unsigned val)
++{
++ bool ret = true;
++ char *pbuf;
++ struct dbll_library_obj *lib;
++ struct dbll_init_obj *init_obj = (struct dbll_init_obj *)this;
++
++ DBC_REQUIRE(this != NULL);
++ lib = init_obj->lib;
++ pbuf = NULL;
++ /* Pass the NULL pointer to write_mem to get the start address of Shared
++ memory. This is a trick to just get the start address, there is no
++ writing taking place with this Writemem
++ */
++ if ((lib->target_obj->attrs.write) != (dbll_write_fxn) no_op)
++ write_mem(this, &pbuf, addr, info, 0);
++ if (pbuf)
++ memset(pbuf, val, bytes);
++
++ return ret;
++}
++
++/*
++ * ======== execute ========
++ */
++static int execute(struct dynamic_loader_initialize *this, ldr_addr start)
++{
++ struct dbll_init_obj *init_obj = (struct dbll_init_obj *)this;
++ struct dbll_library_obj *lib;
++ bool ret = true;
++
++ DBC_REQUIRE(this != NULL);
++ lib = init_obj->lib;
++ DBC_REQUIRE(lib);
++ /* Save entry point */
++ if (lib != NULL)
++ lib->entry = (u32) start;
++
++ return ret;
++}
++
++/*
++ * ======== release ========
++ */
++static void release(struct dynamic_loader_initialize *this)
++{
++}
++
++#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
++/**
++ * find_symbol_context - Basic symbol context structure
++ * @address: Symbol Adress
++ * @offset_range: Offset range where the search for the DSP symbol
++ * started.
++ * @cur_best_offset: Best offset to start looking for the DSP symbol
++ * @sym_addr: Address of the DSP symbol
++ * @name: Symbol name
++ *
++ */
++struct find_symbol_context {
++ /* input */
++ u32 address;
++ u32 offset_range;
++ /* state */
++ u32 cur_best_offset;
++ /* output */
++ u32 sym_addr;
++ char name[120];
++};
++
++/**
++ * find_symbol_callback() - Validates symbol address and copies the symbol name
++ * to the user data.
++ * @elem: dsp library context
++ * @user_data: Find symbol context
++ *
++ */
++void find_symbol_callback(void *elem, void *user_data)
++{
++ struct dbll_symbol *symbol = elem;
++ struct find_symbol_context *context = user_data;
++ u32 symbol_addr = symbol->value.value;
++ u32 offset = context->address - symbol_addr;
++
++ /*
++ * Address given should be greater than symbol address,
++ * symbol address should be within specified range
++ * and the offset should be better than previous one
++ */
++ if (context->address >= symbol_addr && symbol_addr < (u32)-1 &&
++ offset < context->cur_best_offset) {
++ context->cur_best_offset = offset;
++ context->sym_addr = symbol_addr;
++ strncpy(context->name, symbol->name, sizeof(context->name));
++ }
++
++ return;
++}
++
++/**
++ * dbll_find_dsp_symbol() - This function retrieves the dsp symbol from the dsp binary.
++ * @zl_lib: DSP binary obj library pointer
++ * @address: Given address to find the dsp symbol
++ * @offset_range: offset range to look for dsp symbol
++ * @sym_addr_output: Symbol Output address
++ * @name_output: String with the dsp symbol
++ *
++ * This function retrieves the dsp symbol from the dsp binary.
++ */
++bool dbll_find_dsp_symbol(struct dbll_library_obj *zl_lib, u32 address,
++ u32 offset_range, u32 *sym_addr_output,
++ char *name_output)
++{
++ bool status = false;
++ struct find_symbol_context context;
++
++ context.address = address;
++ context.offset_range = offset_range;
++ context.cur_best_offset = offset_range;
++ context.sym_addr = 0;
++ context.name[0] = '\0';
++
++ gh_iterate(zl_lib->sym_tab, find_symbol_callback, &context);
++
++ if (context.name[0]) {
++ status = true;
++ strcpy(name_output, context.name);
++ *sym_addr_output = context.sym_addr;
++ }
++
++ return status;
++}
++#endif
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/pmgr/dev.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/pmgr/dev.c 2010-08-18 11:24:23.210055890 +0300
+@@ -0,0 +1,1151 @@
++/*
++ * dev.c
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Implementation of Bridge Bridge driver device operations.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++#include <linux/types.h>
++
++/* ----------------------------------- Host OS */
++#include <dspbridge/host_os.h>
++
++/* ----------------------------------- DSP/BIOS Bridge */
++#include <dspbridge/dbdefs.h>
++
++/* ----------------------------------- Trace & Debug */
++#include <dspbridge/dbc.h>
++
++/* ----------------------------------- OS Adaptation Layer */
++#include <dspbridge/cfg.h>
++#include <dspbridge/ldr.h>
++#include <dspbridge/list.h>
++
++/* ----------------------------------- Platform Manager */
++#include <dspbridge/cod.h>
++#include <dspbridge/drv.h>
++#include <dspbridge/proc.h>
++#include <dspbridge/dmm.h>
++
++/* ----------------------------------- Resource Manager */
++#include <dspbridge/mgr.h>
++#include <dspbridge/node.h>
++
++/* ----------------------------------- Others */
++#include <dspbridge/dspapi.h> /* DSP API version info. */
++
++#include <dspbridge/chnl.h>
++#include <dspbridge/io.h>
++#include <dspbridge/msg.h>
++#include <dspbridge/cmm.h>
++#include <dspbridge/dspdeh.h>
++
++/* ----------------------------------- This */
++#include <dspbridge/dev.h>
++
++/* ----------------------------------- Defines, Data Structures, Typedefs */
++
++#define MAKEVERSION(major, minor) (major * 10 + minor)
++#define BRD_API_VERSION MAKEVERSION(BRD_API_MAJOR_VERSION, \
++ BRD_API_MINOR_VERSION)
++
++/* The Bridge device object: */
++struct dev_object {
++ /* LST requires "link" to be first field! */
++ struct list_head link; /* Link to next dev_object. */
++ u8 dev_type; /* Device Type */
++ struct cfg_devnode *dev_node_obj; /* Platform specific dev id */
++ /* Bridge Context Handle */
++ struct bridge_dev_context *hbridge_context;
++ /* Function interface to Bridge driver. */
++ struct bridge_drv_interface bridge_interface;
++ struct brd_object *lock_owner; /* Client with exclusive access. */
++ struct cod_manager *cod_mgr; /* Code manager handle. */
++ struct chnl_mgr *hchnl_mgr; /* Channel manager. */
++ struct deh_mgr *hdeh_mgr; /* DEH manager. */
++ struct msg_mgr *hmsg_mgr; /* Message manager. */
++ struct io_mgr *hio_mgr; /* IO manager (CHNL, msg_ctrl) */
++ struct cmm_object *hcmm_mgr; /* SM memory manager. */
++ struct dmm_object *dmm_mgr; /* Dynamic memory manager. */
++ struct ldr_module *module_obj; /* Bridge Module handle. */
++ u32 word_size; /* DSP word size: quick access. */
++ struct drv_object *hdrv_obj; /* Driver Object */
++ struct lst_list *proc_list; /* List of Proceeosr attached to
++ * this device */
++ struct node_mgr *hnode_mgr;
++};
++
++/* ----------------------------------- Globals */
++static u32 refs; /* Module reference count */
++
++/* ----------------------------------- Function Prototypes */
++static int fxn_not_implemented(int arg, ...);
++static int init_cod_mgr(struct dev_object *dev_obj);
++static void store_interface_fxns(struct bridge_drv_interface *drv_fxns,
++ struct bridge_drv_interface *intf_fxns);
++/*
++ * ======== dev_brd_write_fxn ========
++ * Purpose:
++ * Exported function to be used as the COD write function. This function
++ * is passed a handle to a DEV_hObject, then calls the
++ * device's bridge_brd_write() function.
++ */
++u32 dev_brd_write_fxn(void *arb, u32 dsp_add, void *host_buf,
++ u32 ul_num_bytes, u32 mem_space)
++{
++ struct dev_object *dev_obj = (struct dev_object *)arb;
++ u32 ul_written = 0;
++ int status;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(host_buf != NULL); /* Required of BrdWrite(). */
++ if (dev_obj) {
++ /* Require of BrdWrite() */
++ DBC_ASSERT(dev_obj->hbridge_context != NULL);
++ status = (*dev_obj->bridge_interface.pfn_brd_write) (
++ dev_obj->hbridge_context, host_buf,
++ dsp_add, ul_num_bytes, mem_space);
++ /* Special case of getting the address only */
++ if (ul_num_bytes == 0)
++ ul_num_bytes = 1;
++ if (!status)
++ ul_written = ul_num_bytes;
++
++ }
++ return ul_written;
++}
++
++/*
++ * ======== dev_create_device ========
++ * Purpose:
++ * Called by the operating system to load the PM Bridge Driver for a
++ * PM board (device).
++ */
++int dev_create_device(struct dev_object **device_obj,
++ const char *driver_file_name,
++ struct cfg_devnode *dev_node_obj)
++{
++ struct cfg_hostres *host_res;
++ struct ldr_module *module_obj = NULL;
++ struct bridge_drv_interface *drv_fxns = NULL;
++ struct dev_object *dev_obj = NULL;
++ struct chnl_mgrattrs mgr_attrs;
++ struct io_attrs io_mgr_attrs;
++ u32 num_windows;
++ struct drv_object *hdrv_obj = NULL;
++ int status = 0;
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(device_obj != NULL);
++ DBC_REQUIRE(driver_file_name != NULL);
++
++ status = drv_request_bridge_res_dsp((void *)&host_res);
++
++ if (status) {
++ dev_dbg(bridge, "%s: Failed to reserve bridge resources\n",
++ __func__);
++ goto leave;
++ }
++
++ /* Get the Bridge driver interface functions */
++ bridge_drv_entry(&drv_fxns, driver_file_name);
++ if (cfg_get_object((u32 *) &hdrv_obj, REG_DRV_OBJECT)) {
++ /* don't propogate CFG errors from this PROC function */
++ status = -EPERM;
++ }
++ /* Create the device object, and pass a handle to the Bridge driver for
++ * storage. */
++ if (!status) {
++ DBC_ASSERT(drv_fxns);
++ dev_obj = kzalloc(sizeof(struct dev_object), GFP_KERNEL);
++ if (dev_obj) {
++ /* Fill out the rest of the Dev Object structure: */
++ dev_obj->dev_node_obj = dev_node_obj;
++ dev_obj->module_obj = module_obj;
++ dev_obj->cod_mgr = NULL;
++ dev_obj->hchnl_mgr = NULL;
++ dev_obj->hdeh_mgr = NULL;
++ dev_obj->lock_owner = NULL;
++ dev_obj->word_size = DSPWORDSIZE;
++ dev_obj->hdrv_obj = hdrv_obj;
++ dev_obj->dev_type = DSP_UNIT;
++ /* Store this Bridge's interface functions, based on its
++ * version. */
++ store_interface_fxns(drv_fxns,
++ &dev_obj->bridge_interface);
++
++ /* Call fxn_dev_create() to get the Bridge's device
++ * context handle. */
++ status = (dev_obj->bridge_interface.pfn_dev_create)
++ (&dev_obj->hbridge_context, dev_obj,
++ host_res);
++ /* Assert bridge_dev_create()'s ensure clause: */
++ DBC_ASSERT(status
++ || (dev_obj->hbridge_context != NULL));
++ } else {
++ status = -ENOMEM;
++ }
++ }
++ /* Attempt to create the COD manager for this device: */
++ if (!status)
++ status = init_cod_mgr(dev_obj);
++
++ /* Attempt to create the channel manager for this device: */
++ if (!status) {
++ mgr_attrs.max_channels = CHNL_MAXCHANNELS;
++ io_mgr_attrs.birq = host_res->birq_registers;
++ io_mgr_attrs.irq_shared =
++ (host_res->birq_attrib & CFG_IRQSHARED);
++ io_mgr_attrs.word_size = DSPWORDSIZE;
++ mgr_attrs.word_size = DSPWORDSIZE;
++ num_windows = host_res->num_mem_windows;
++ if (num_windows) {
++ /* Assume last memory window is for CHNL */
++ io_mgr_attrs.shm_base = host_res->dw_mem_base[1] +
++ host_res->dw_offset_for_monitor;
++ io_mgr_attrs.usm_length =
++ host_res->dw_mem_length[1] -
++ host_res->dw_offset_for_monitor;
++ } else {
++ io_mgr_attrs.shm_base = 0;
++ io_mgr_attrs.usm_length = 0;
++ pr_err("%s: No memory reserved for shared structures\n",
++ __func__);
++ }
++ status = chnl_create(&dev_obj->hchnl_mgr, dev_obj, &mgr_attrs);
++ if (status == -ENOSYS) {
++ /* It's OK for a device not to have a channel
++ * manager: */
++ status = 0;
++ }
++ /* Create CMM mgr even if Msg Mgr not impl. */
++ status = cmm_create(&dev_obj->hcmm_mgr,
++ (struct dev_object *)dev_obj, NULL);
++ /* Only create IO manager if we have a channel manager */
++ if (!status && dev_obj->hchnl_mgr) {
++ status = io_create(&dev_obj->hio_mgr, dev_obj,
++ &io_mgr_attrs);
++ }
++ /* Only create DEH manager if we have an IO manager */
++ if (!status) {
++ /* Instantiate the DEH module */
++ status = bridge_deh_create(&dev_obj->hdeh_mgr, dev_obj);
++ }
++ /* Create DMM mgr . */
++ status = dmm_create(&dev_obj->dmm_mgr,
++ (struct dev_object *)dev_obj, NULL);
++ }
++ /* Add the new DEV_Object to the global list: */
++ if (!status) {
++ lst_init_elem(&dev_obj->link);
++ status = drv_insert_dev_object(hdrv_obj, dev_obj);
++ }
++ /* Create the Processor List */
++ if (!status) {
++ dev_obj->proc_list = kzalloc(sizeof(struct lst_list),
++ GFP_KERNEL);
++ if (!(dev_obj->proc_list))
++ status = -EPERM;
++ else
++ INIT_LIST_HEAD(&dev_obj->proc_list->head);
++ }
++leave:
++ /* If all went well, return a handle to the dev object;
++ * else, cleanup and return NULL in the OUT parameter. */
++ if (!status) {
++ *device_obj = dev_obj;
++ } else {
++ if (dev_obj) {
++ kfree(dev_obj->proc_list);
++ if (dev_obj->cod_mgr)
++ cod_delete(dev_obj->cod_mgr);
++ if (dev_obj->dmm_mgr)
++ dmm_destroy(dev_obj->dmm_mgr);
++ kfree(dev_obj);
++ }
++
++ *device_obj = NULL;
++ }
++
++ DBC_ENSURE((!status && *device_obj) || (status && !*device_obj));
++ return status;
++}
++
++/*
++ * ======== dev_create2 ========
++ * Purpose:
++ * After successful loading of the image from api_init_complete2
++ * (PROC Auto_Start) or proc_load this fxn is called. This creates
++ * the Node Manager and updates the DEV Object.
++ */
++int dev_create2(struct dev_object *hdev_obj)
++{
++ int status = 0;
++ struct dev_object *dev_obj = hdev_obj;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(hdev_obj);
++
++ /* There can be only one Node Manager per DEV object */
++ DBC_ASSERT(!dev_obj->hnode_mgr);
++ status = node_create_mgr(&dev_obj->hnode_mgr, hdev_obj);
++ if (status)
++ dev_obj->hnode_mgr = NULL;
++
++ DBC_ENSURE((!status && dev_obj->hnode_mgr != NULL)
++ || (status && dev_obj->hnode_mgr == NULL));
++ return status;
++}
++
++/*
++ * ======== dev_destroy2 ========
++ * Purpose:
++ * Destroys the Node manager for this device.
++ */
++int dev_destroy2(struct dev_object *hdev_obj)
++{
++ int status = 0;
++ struct dev_object *dev_obj = hdev_obj;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(hdev_obj);
++
++ if (dev_obj->hnode_mgr) {
++ if (node_delete_mgr(dev_obj->hnode_mgr))
++ status = -EPERM;
++ else
++ dev_obj->hnode_mgr = NULL;
++
++ }
++
++ DBC_ENSURE((!status && dev_obj->hnode_mgr == NULL) || status);
++ return status;
++}
++
++/*
++ * ======== dev_destroy_device ========
++ * Purpose:
++ * Destroys the channel manager for this device, if any, calls
++ * bridge_dev_destroy(), and then attempts to unload the Bridge module.
++ */
++int dev_destroy_device(struct dev_object *hdev_obj)
++{
++ int status = 0;
++ struct dev_object *dev_obj = hdev_obj;
++
++ DBC_REQUIRE(refs > 0);
++
++ if (hdev_obj) {
++ if (dev_obj->cod_mgr) {
++ cod_delete(dev_obj->cod_mgr);
++ dev_obj->cod_mgr = NULL;
++ }
++
++ if (dev_obj->hnode_mgr) {
++ node_delete_mgr(dev_obj->hnode_mgr);
++ dev_obj->hnode_mgr = NULL;
++ }
++
++ /* Free the io, channel, and message managers for this board: */
++ if (dev_obj->hio_mgr) {
++ io_destroy(dev_obj->hio_mgr);
++ dev_obj->hio_mgr = NULL;
++ }
++ if (dev_obj->hchnl_mgr) {
++ chnl_destroy(dev_obj->hchnl_mgr);
++ dev_obj->hchnl_mgr = NULL;
++ }
++ if (dev_obj->hmsg_mgr) {
++ msg_delete(dev_obj->hmsg_mgr);
++ dev_obj->hmsg_mgr = NULL;
++ }
++
++ if (dev_obj->hdeh_mgr) {
++ /* Uninitialize DEH module. */
++ bridge_deh_destroy(dev_obj->hdeh_mgr);
++ dev_obj->hdeh_mgr = NULL;
++ }
++ if (dev_obj->hcmm_mgr) {
++ cmm_destroy(dev_obj->hcmm_mgr, true);
++ dev_obj->hcmm_mgr = NULL;
++ }
++
++ if (dev_obj->dmm_mgr) {
++ dmm_destroy(dev_obj->dmm_mgr);
++ dev_obj->dmm_mgr = NULL;
++ }
++
++ /* Call the driver's bridge_dev_destroy() function: */
++ /* Require of DevDestroy */
++ if (dev_obj->hbridge_context) {
++ status = (*dev_obj->bridge_interface.pfn_dev_destroy)
++ (dev_obj->hbridge_context);
++ dev_obj->hbridge_context = NULL;
++ } else
++ status = -EPERM;
++ if (!status) {
++ kfree(dev_obj->proc_list);
++ dev_obj->proc_list = NULL;
++
++ /* Remove this DEV_Object from the global list: */
++ drv_remove_dev_object(dev_obj->hdrv_obj, dev_obj);
++ /* Free The library * LDR_FreeModule
++ * (dev_obj->module_obj); */
++ /* Free this dev object: */
++ kfree(dev_obj);
++ dev_obj = NULL;
++ }
++ } else {
++ status = -EFAULT;
++ }
++
++ return status;
++}
++
++/*
++ * ======== dev_get_chnl_mgr ========
++ * Purpose:
++ * Retrieve the handle to the channel manager handle created for this
++ * device.
++ */
++int dev_get_chnl_mgr(struct dev_object *hdev_obj,
++ struct chnl_mgr **mgr)
++{
++ int status = 0;
++ struct dev_object *dev_obj = hdev_obj;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(mgr != NULL);
++
++ if (hdev_obj) {
++ *mgr = dev_obj->hchnl_mgr;
++ } else {
++ *mgr = NULL;
++ status = -EFAULT;
++ }
++
++ DBC_ENSURE(!status || (mgr != NULL && *mgr == NULL));
++ return status;
++}
++
++/*
++ * ======== dev_get_cmm_mgr ========
++ * Purpose:
++ * Retrieve the handle to the shared memory manager created for this
++ * device.
++ */
++int dev_get_cmm_mgr(struct dev_object *hdev_obj,
++ struct cmm_object **mgr)
++{
++ int status = 0;
++ struct dev_object *dev_obj = hdev_obj;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(mgr != NULL);
++
++ if (hdev_obj) {
++ *mgr = dev_obj->hcmm_mgr;
++ } else {
++ *mgr = NULL;
++ status = -EFAULT;
++ }
++
++ DBC_ENSURE(!status || (mgr != NULL && *mgr == NULL));
++ return status;
++}
++
++/*
++ * ======== dev_get_dmm_mgr ========
++ * Purpose:
++ * Retrieve the handle to the dynamic memory manager created for this
++ * device.
++ */
++int dev_get_dmm_mgr(struct dev_object *hdev_obj,
++ struct dmm_object **mgr)
++{
++ int status = 0;
++ struct dev_object *dev_obj = hdev_obj;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(mgr != NULL);
++
++ if (hdev_obj) {
++ *mgr = dev_obj->dmm_mgr;
++ } else {
++ *mgr = NULL;
++ status = -EFAULT;
++ }
++
++ DBC_ENSURE(!status || (mgr != NULL && *mgr == NULL));
++ return status;
++}
++
++/*
++ * ======== dev_get_cod_mgr ========
++ * Purpose:
++ * Retrieve the COD manager create for this device.
++ */
++int dev_get_cod_mgr(struct dev_object *hdev_obj,
++ struct cod_manager **cod_mgr)
++{
++ int status = 0;
++ struct dev_object *dev_obj = hdev_obj;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(cod_mgr != NULL);
++
++ if (hdev_obj) {
++ *cod_mgr = dev_obj->cod_mgr;
++ } else {
++ *cod_mgr = NULL;
++ status = -EFAULT;
++ }
++
++ DBC_ENSURE(!status || (cod_mgr != NULL && *cod_mgr == NULL));
++ return status;
++}
++
++/*
++ * ========= dev_get_deh_mgr ========
++ */
++int dev_get_deh_mgr(struct dev_object *hdev_obj,
++ struct deh_mgr **deh_manager)
++{
++ int status = 0;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(deh_manager != NULL);
++ DBC_REQUIRE(hdev_obj);
++ if (hdev_obj) {
++ *deh_manager = hdev_obj->hdeh_mgr;
++ } else {
++ *deh_manager = NULL;
++ status = -EFAULT;
++ }
++ return status;
++}
++
++/*
++ * ======== dev_get_dev_node ========
++ * Purpose:
++ * Retrieve the platform specific device ID for this device.
++ */
++int dev_get_dev_node(struct dev_object *hdev_obj,
++ struct cfg_devnode **dev_nde)
++{
++ int status = 0;
++ struct dev_object *dev_obj = hdev_obj;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(dev_nde != NULL);
++
++ if (hdev_obj) {
++ *dev_nde = dev_obj->dev_node_obj;
++ } else {
++ *dev_nde = NULL;
++ status = -EFAULT;
++ }
++
++ DBC_ENSURE(!status || (dev_nde != NULL && *dev_nde == NULL));
++ return status;
++}
++
++/*
++ * ======== dev_get_first ========
++ * Purpose:
++ * Retrieve the first Device Object handle from an internal linked list
++ * DEV_OBJECTs maintained by DEV.
++ */
++struct dev_object *dev_get_first(void)
++{
++ struct dev_object *dev_obj = NULL;
++
++ dev_obj = (struct dev_object *)drv_get_first_dev_object();
++
++ return dev_obj;
++}
++
++/*
++ * ======== dev_get_intf_fxns ========
++ * Purpose:
++ * Retrieve the Bridge interface function structure for the loaded driver.
++ * if_fxns != NULL.
++ */
++int dev_get_intf_fxns(struct dev_object *hdev_obj,
++ struct bridge_drv_interface **if_fxns)
++{
++ int status = 0;
++ struct dev_object *dev_obj = hdev_obj;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(if_fxns != NULL);
++
++ if (hdev_obj) {
++ *if_fxns = &dev_obj->bridge_interface;
++ } else {
++ *if_fxns = NULL;
++ status = -EFAULT;
++ }
++
++ DBC_ENSURE(!status || ((if_fxns != NULL) && (*if_fxns == NULL)));
++ return status;
++}
++
++/*
++ * ========= dev_get_io_mgr ========
++ */
++int dev_get_io_mgr(struct dev_object *hdev_obj,
++ struct io_mgr **io_man)
++{
++ int status = 0;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(io_man != NULL);
++ DBC_REQUIRE(hdev_obj);
++
++ if (hdev_obj) {
++ *io_man = hdev_obj->hio_mgr;
++ } else {
++ *io_man = NULL;
++ status = -EFAULT;
++ }
++
++ return status;
++}
++
++/*
++ * ======== dev_get_next ========
++ * Purpose:
++ * Retrieve the next Device Object handle from an internal linked list
++ * of DEV_OBJECTs maintained by DEV, after having previously called
++ * dev_get_first() and zero or more dev_get_next
++ */
++struct dev_object *dev_get_next(struct dev_object *hdev_obj)
++{
++ struct dev_object *next_dev_object = NULL;
++
++ if (hdev_obj) {
++ next_dev_object = (struct dev_object *)
++ drv_get_next_dev_object((u32) hdev_obj);
++ }
++
++ return next_dev_object;
++}
++
++/*
++ * ========= dev_get_msg_mgr ========
++ */
++void dev_get_msg_mgr(struct dev_object *hdev_obj, struct msg_mgr **msg_man)
++{
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(msg_man != NULL);
++ DBC_REQUIRE(hdev_obj);
++
++ *msg_man = hdev_obj->hmsg_mgr;
++}
++
++/*
++ * ======== dev_get_node_manager ========
++ * Purpose:
++ * Retrieve the Node Manager Handle
++ */
++int dev_get_node_manager(struct dev_object *hdev_obj,
++ struct node_mgr **node_man)
++{
++ int status = 0;
++ struct dev_object *dev_obj = hdev_obj;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(node_man != NULL);
++
++ if (hdev_obj) {
++ *node_man = dev_obj->hnode_mgr;
++ } else {
++ *node_man = NULL;
++ status = -EFAULT;
++ }
++
++ DBC_ENSURE(!status || (node_man != NULL && *node_man == NULL));
++ return status;
++}
++
++/*
++ * ======== dev_get_symbol ========
++ */
++int dev_get_symbol(struct dev_object *hdev_obj,
++ const char *str_sym, u32 * pul_value)
++{
++ int status = 0;
++ struct cod_manager *cod_mgr;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(str_sym != NULL && pul_value != NULL);
++
++ if (hdev_obj) {
++ status = dev_get_cod_mgr(hdev_obj, &cod_mgr);
++ if (cod_mgr)
++ status = cod_get_sym_value(cod_mgr, (char *)str_sym,
++ pul_value);
++ else
++ status = -EFAULT;
++ }
++
++ return status;
++}
++
++/*
++ * ======== dev_get_bridge_context ========
++ * Purpose:
++ * Retrieve the Bridge Context handle, as returned by the
++ * bridge_dev_create fxn.
++ */
++int dev_get_bridge_context(struct dev_object *hdev_obj,
++ struct bridge_dev_context **phbridge_context)
++{
++ int status = 0;
++ struct dev_object *dev_obj = hdev_obj;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(phbridge_context != NULL);
++
++ if (hdev_obj) {
++ *phbridge_context = dev_obj->hbridge_context;
++ } else {
++ *phbridge_context = NULL;
++ status = -EFAULT;
++ }
++
++ DBC_ENSURE(!status || ((phbridge_context != NULL) &&
++ (*phbridge_context == NULL)));
++ return status;
++}
++
++/*
++ * ======== dev_exit ========
++ * Purpose:
++ * Decrement reference count, and free resources when reference count is
++ * 0.
++ */
++void dev_exit(void)
++{
++ DBC_REQUIRE(refs > 0);
++
++ refs--;
++
++ if (refs == 0) {
++ cmm_exit();
++ dmm_exit();
++ }
++
++ DBC_ENSURE(refs >= 0);
++}
++
++/*
++ * ======== dev_init ========
++ * Purpose:
++ * Initialize DEV's private state, keeping a reference count on each call.
++ */
++bool dev_init(void)
++{
++ bool cmm_ret, dmm_ret, ret = true;
++
++ DBC_REQUIRE(refs >= 0);
++
++ if (refs == 0) {
++ cmm_ret = cmm_init();
++ dmm_ret = dmm_init();
++
++ ret = cmm_ret && dmm_ret;
++
++ if (!ret) {
++ if (cmm_ret)
++ cmm_exit();
++
++ if (dmm_ret)
++ dmm_exit();
++
++ }
++ }
++
++ if (ret)
++ refs++;
++
++ DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
++
++ return ret;
++}
++
++/*
++ * ======== dev_notify_clients ========
++ * Purpose:
++ * Notify all clients of this device of a change in device status.
++ */
++int dev_notify_clients(struct dev_object *hdev_obj, u32 ret)
++{
++ int status = 0;
++
++ struct dev_object *dev_obj = hdev_obj;
++ void *proc_obj;
++
++ for (proc_obj = (void *)lst_first(dev_obj->proc_list);
++ proc_obj != NULL;
++ proc_obj = (void *)lst_next(dev_obj->proc_list,
++ (struct list_head *)proc_obj))
++ proc_notify_clients(proc_obj, (u32) ret);
++
++ return status;
++}
++
++/*
++ * ======== dev_remove_device ========
++ */
++int dev_remove_device(struct cfg_devnode *dev_node_obj)
++{
++ struct dev_object *hdev_obj; /* handle to device object */
++ int status = 0;
++ struct dev_object *dev_obj;
++
++ /* Retrieve the device object handle originaly stored with
++ * the dev_node: */
++ status = cfg_get_dev_object(dev_node_obj, (u32 *) &hdev_obj);
++ if (!status) {
++ /* Remove the Processor List */
++ dev_obj = (struct dev_object *)hdev_obj;
++ /* Destroy the device object. */
++ status = dev_destroy_device(hdev_obj);
++ }
++
++ return status;
++}
++
++/*
++ * ======== dev_set_chnl_mgr ========
++ * Purpose:
++ * Set the channel manager for this device.
++ */
++int dev_set_chnl_mgr(struct dev_object *hdev_obj,
++ struct chnl_mgr *hmgr)
++{
++ int status = 0;
++ struct dev_object *dev_obj = hdev_obj;
++
++ DBC_REQUIRE(refs > 0);
++
++ if (hdev_obj)
++ dev_obj->hchnl_mgr = hmgr;
++ else
++ status = -EFAULT;
++
++ DBC_ENSURE(status || (dev_obj->hchnl_mgr == hmgr));
++ return status;
++}
++
++/*
++ * ======== dev_set_msg_mgr ========
++ * Purpose:
++ * Set the message manager for this device.
++ */
++void dev_set_msg_mgr(struct dev_object *hdev_obj, struct msg_mgr *hmgr)
++{
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(hdev_obj);
++
++ hdev_obj->hmsg_mgr = hmgr;
++}
++
++/*
++ * ======== dev_start_device ========
++ * Purpose:
++ * Initializes the new device with the BRIDGE environment.
++ */
++int dev_start_device(struct cfg_devnode *dev_node_obj)
++{
++ struct dev_object *hdev_obj = NULL; /* handle to 'Bridge Device */
++ /* Bridge driver filename */
++ char bridge_file_name[CFG_MAXSEARCHPATHLEN] = "UMA";
++ int status;
++ struct mgr_object *hmgr_obj = NULL;
++
++ DBC_REQUIRE(refs > 0);
++
++ /* Given all resources, create a device object. */
++ status = dev_create_device(&hdev_obj, bridge_file_name,
++ dev_node_obj);
++ if (!status) {
++ /* Store away the hdev_obj with the DEVNODE */
++ status = cfg_set_dev_object(dev_node_obj, (u32) hdev_obj);
++ if (status) {
++ /* Clean up */
++ dev_destroy_device(hdev_obj);
++ hdev_obj = NULL;
++ }
++ }
++ if (!status) {
++ /* Create the Manager Object */
++ status = mgr_create(&hmgr_obj, dev_node_obj);
++ }
++ if (status) {
++ if (hdev_obj)
++ dev_destroy_device(hdev_obj);
++
++ /* Ensure the device extension is NULL */
++ cfg_set_dev_object(dev_node_obj, 0L);
++ }
++
++ return status;
++}
++
++/*
++ * ======== fxn_not_implemented ========
++ * Purpose:
++ * Takes the place of a Bridge Null Function.
++ * Parameters:
++ * Multiple, optional.
++ * Returns:
++ * -ENOSYS: Always.
++ */
++static int fxn_not_implemented(int arg, ...)
++{
++ return -ENOSYS;
++}
++
++/*
++ * ======== init_cod_mgr ========
++ * Purpose:
++ * Create a COD manager for this device.
++ * Parameters:
++ * dev_obj: Pointer to device object created with
++ * dev_create_device()
++ * Returns:
++ * 0: Success.
++ * -EFAULT: Invalid hdev_obj.
++ * Requires:
++ * Should only be called once by dev_create_device() for a given DevObject.
++ * Ensures:
++ */
++static int init_cod_mgr(struct dev_object *dev_obj)
++{
++ int status = 0;
++ char *sz_dummy_file = "dummy";
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(!dev_obj || (dev_obj->cod_mgr == NULL));
++
++ status = cod_create(&dev_obj->cod_mgr, sz_dummy_file, NULL);
++
++ return status;
++}
++
++/*
++ * ======== dev_insert_proc_object ========
++ * Purpose:
++ * Insert a ProcObject into the list maintained by DEV.
++ * Parameters:
++ * p_proc_object: Ptr to ProcObject to insert.
++ * dev_obj: Ptr to Dev Object where the list is.
++ * already_attached: Ptr to return the bool
++ * Returns:
++ * 0: If successful.
++ * Requires:
++ * List Exists
++ * hdev_obj is Valid handle
++ * DEV Initialized
++ * already_attached != NULL
++ * proc_obj != 0
++ * Ensures:
++ * 0 and List is not Empty.
++ */
++int dev_insert_proc_object(struct dev_object *hdev_obj,
++ u32 proc_obj, bool *already_attached)
++{
++ int status = 0;
++ struct dev_object *dev_obj = (struct dev_object *)hdev_obj;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(dev_obj);
++ DBC_REQUIRE(proc_obj != 0);
++ DBC_REQUIRE(dev_obj->proc_list != NULL);
++ DBC_REQUIRE(already_attached != NULL);
++ if (!LST_IS_EMPTY(dev_obj->proc_list))
++ *already_attached = true;
++
++ /* Add DevObject to tail. */
++ lst_put_tail(dev_obj->proc_list, (struct list_head *)proc_obj);
++
++ DBC_ENSURE(!status && !LST_IS_EMPTY(dev_obj->proc_list));
++
++ return status;
++}
++
++/*
++ * ======== dev_remove_proc_object ========
++ * Purpose:
++ * Search for and remove a Proc object from the given list maintained
++ * by the DEV
++ * Parameters:
++ * p_proc_object: Ptr to ProcObject to insert.
++ * dev_obj Ptr to Dev Object where the list is.
++ * Returns:
++ * 0: If successful.
++ * Requires:
++ * List exists and is not empty
++ * proc_obj != 0
++ * hdev_obj is a valid Dev handle.
++ * Ensures:
++ * Details:
++ * List will be deleted when the DEV is destroyed.
++ */
++int dev_remove_proc_object(struct dev_object *hdev_obj, u32 proc_obj)
++{
++ int status = -EPERM;
++ struct list_head *cur_elem;
++ struct dev_object *dev_obj = (struct dev_object *)hdev_obj;
++
++ DBC_REQUIRE(dev_obj);
++ DBC_REQUIRE(proc_obj != 0);
++ DBC_REQUIRE(dev_obj->proc_list != NULL);
++ DBC_REQUIRE(!LST_IS_EMPTY(dev_obj->proc_list));
++
++ /* Search list for dev_obj: */
++ for (cur_elem = lst_first(dev_obj->proc_list); cur_elem != NULL;
++ cur_elem = lst_next(dev_obj->proc_list, cur_elem)) {
++ /* If found, remove it. */
++ if ((u32) cur_elem == proc_obj) {
++ lst_remove_elem(dev_obj->proc_list, cur_elem);
++ status = 0;
++ break;
++ }
++ }
++
++ return status;
++}
++
++int dev_get_dev_type(struct dev_object *device_obj, u8 *dev_type)
++{
++ int status = 0;
++ struct dev_object *dev_obj = (struct dev_object *)device_obj;
++
++ *dev_type = dev_obj->dev_type;
++
++ return status;
++}
++
++/*
++ * ======== store_interface_fxns ========
++ * Purpose:
++ * Copy the Bridge's interface functions into the device object,
++ * ensuring that fxn_not_implemented() is set for:
++ *
++ * 1. All Bridge function pointers which are NULL; and
++ * 2. All function slots in the struct dev_object structure which have no
++ * corresponding slots in the the Bridge's interface, because the Bridge
++ * is of an *older* version.
++ * Parameters:
++ * intf_fxns: Interface fxn Structure of the Bridge's Dev Object.
++ * drv_fxns: Interface Fxns offered by the Bridge during DEV_Create().
++ * Returns:
++ * Requires:
++ * Input pointers are valid.
++ * Bridge driver is *not* written for a newer DSP API.
++ * Ensures:
++ * All function pointers in the dev object's fxn interface are not NULL.
++ */
++static void store_interface_fxns(struct bridge_drv_interface *drv_fxns,
++ struct bridge_drv_interface *intf_fxns)
++{
++ u32 bridge_version;
++
++ /* Local helper macro: */
++#define STORE_FXN(cast, pfn) \
++ (intf_fxns->pfn = ((drv_fxns->pfn != NULL) ? drv_fxns->pfn : \
++ (cast)fxn_not_implemented))
++
++ DBC_REQUIRE(intf_fxns != NULL);
++ DBC_REQUIRE(drv_fxns != NULL);
++ DBC_REQUIRE(MAKEVERSION(drv_fxns->brd_api_major_version,
++ drv_fxns->brd_api_minor_version) <= BRD_API_VERSION);
++ bridge_version = MAKEVERSION(drv_fxns->brd_api_major_version,
++ drv_fxns->brd_api_minor_version);
++ intf_fxns->brd_api_major_version = drv_fxns->brd_api_major_version;
++ intf_fxns->brd_api_minor_version = drv_fxns->brd_api_minor_version;
++ /* Install functions up to DSP API version .80 (first alpha): */
++ if (bridge_version > 0) {
++ STORE_FXN(fxn_dev_create, pfn_dev_create);
++ STORE_FXN(fxn_dev_destroy, pfn_dev_destroy);
++ STORE_FXN(fxn_dev_ctrl, pfn_dev_cntrl);
++ STORE_FXN(fxn_brd_monitor, pfn_brd_monitor);
++ STORE_FXN(fxn_brd_start, pfn_brd_start);
++ STORE_FXN(fxn_brd_stop, pfn_brd_stop);
++ STORE_FXN(fxn_brd_status, pfn_brd_status);
++ STORE_FXN(fxn_brd_read, pfn_brd_read);
++ STORE_FXN(fxn_brd_write, pfn_brd_write);
++ STORE_FXN(fxn_brd_setstate, pfn_brd_set_state);
++ STORE_FXN(fxn_brd_memcopy, pfn_brd_mem_copy);
++ STORE_FXN(fxn_brd_memwrite, pfn_brd_mem_write);
++ STORE_FXN(fxn_brd_memmap, pfn_brd_mem_map);
++ STORE_FXN(fxn_brd_memunmap, pfn_brd_mem_un_map);
++ STORE_FXN(fxn_chnl_create, pfn_chnl_create);
++ STORE_FXN(fxn_chnl_destroy, pfn_chnl_destroy);
++ STORE_FXN(fxn_chnl_open, pfn_chnl_open);
++ STORE_FXN(fxn_chnl_close, pfn_chnl_close);
++ STORE_FXN(fxn_chnl_addioreq, pfn_chnl_add_io_req);
++ STORE_FXN(fxn_chnl_getioc, pfn_chnl_get_ioc);
++ STORE_FXN(fxn_chnl_cancelio, pfn_chnl_cancel_io);
++ STORE_FXN(fxn_chnl_flushio, pfn_chnl_flush_io);
++ STORE_FXN(fxn_chnl_getinfo, pfn_chnl_get_info);
++ STORE_FXN(fxn_chnl_getmgrinfo, pfn_chnl_get_mgr_info);
++ STORE_FXN(fxn_chnl_idle, pfn_chnl_idle);
++ STORE_FXN(fxn_chnl_registernotify, pfn_chnl_register_notify);
++ STORE_FXN(fxn_io_create, pfn_io_create);
++ STORE_FXN(fxn_io_destroy, pfn_io_destroy);
++ STORE_FXN(fxn_io_onloaded, pfn_io_on_loaded);
++ STORE_FXN(fxn_io_getprocload, pfn_io_get_proc_load);
++ STORE_FXN(fxn_msg_create, pfn_msg_create);
++ STORE_FXN(fxn_msg_createqueue, pfn_msg_create_queue);
++ STORE_FXN(fxn_msg_delete, pfn_msg_delete);
++ STORE_FXN(fxn_msg_deletequeue, pfn_msg_delete_queue);
++ STORE_FXN(fxn_msg_get, pfn_msg_get);
++ STORE_FXN(fxn_msg_put, pfn_msg_put);
++ STORE_FXN(fxn_msg_registernotify, pfn_msg_register_notify);
++ STORE_FXN(fxn_msg_setqueueid, pfn_msg_set_queue_id);
++ }
++ /* Add code for any additional functions in newerBridge versions here */
++ /* Ensure postcondition: */
++ DBC_ENSURE(intf_fxns->pfn_dev_create != NULL);
++ DBC_ENSURE(intf_fxns->pfn_dev_destroy != NULL);
++ DBC_ENSURE(intf_fxns->pfn_dev_cntrl != NULL);
++ DBC_ENSURE(intf_fxns->pfn_brd_monitor != NULL);
++ DBC_ENSURE(intf_fxns->pfn_brd_start != NULL);
++ DBC_ENSURE(intf_fxns->pfn_brd_stop != NULL);
++ DBC_ENSURE(intf_fxns->pfn_brd_status != NULL);
++ DBC_ENSURE(intf_fxns->pfn_brd_read != NULL);
++ DBC_ENSURE(intf_fxns->pfn_brd_write != NULL);
++ DBC_ENSURE(intf_fxns->pfn_chnl_create != NULL);
++ DBC_ENSURE(intf_fxns->pfn_chnl_destroy != NULL);
++ DBC_ENSURE(intf_fxns->pfn_chnl_open != NULL);
++ DBC_ENSURE(intf_fxns->pfn_chnl_close != NULL);
++ DBC_ENSURE(intf_fxns->pfn_chnl_add_io_req != NULL);
++ DBC_ENSURE(intf_fxns->pfn_chnl_get_ioc != NULL);
++ DBC_ENSURE(intf_fxns->pfn_chnl_cancel_io != NULL);
++ DBC_ENSURE(intf_fxns->pfn_chnl_flush_io != NULL);
++ DBC_ENSURE(intf_fxns->pfn_chnl_get_info != NULL);
++ DBC_ENSURE(intf_fxns->pfn_chnl_get_mgr_info != NULL);
++ DBC_ENSURE(intf_fxns->pfn_chnl_idle != NULL);
++ DBC_ENSURE(intf_fxns->pfn_chnl_register_notify != NULL);
++ DBC_ENSURE(intf_fxns->pfn_io_create != NULL);
++ DBC_ENSURE(intf_fxns->pfn_io_destroy != NULL);
++ DBC_ENSURE(intf_fxns->pfn_io_on_loaded != NULL);
++ DBC_ENSURE(intf_fxns->pfn_io_get_proc_load != NULL);
++ DBC_ENSURE(intf_fxns->pfn_msg_set_queue_id != NULL);
++
++#undef STORE_FXN
++}
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/pmgr/dmm.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/pmgr/dmm.c 2010-08-18 11:24:23.210055890 +0300
+@@ -0,0 +1,533 @@
++/*
++ * dmm.c
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * The Dynamic Memory Manager (DMM) module manages the DSP Virtual address
++ * space that can be directly mapped to any MPU buffer or memory region
++ *
++ * Notes:
++ * Region: Generic memory entitiy having a start address and a size
++ * Chunk: Reserved region
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++#include <linux/types.h>
++
++/* ----------------------------------- Host OS */
++#include <dspbridge/host_os.h>
++
++/* ----------------------------------- DSP/BIOS Bridge */
++#include <dspbridge/dbdefs.h>
++
++/* ----------------------------------- Trace & Debug */
++#include <dspbridge/dbc.h>
++
++/* ----------------------------------- OS Adaptation Layer */
++#include <dspbridge/sync.h>
++
++/* ----------------------------------- Platform Manager */
++#include <dspbridge/dev.h>
++#include <dspbridge/proc.h>
++
++/* ----------------------------------- This */
++#include <dspbridge/dmm.h>
++
++/* ----------------------------------- Defines, Data Structures, Typedefs */
++#define DMM_ADDR_VIRTUAL(a) \
++ (((struct map_page *)(a) - virtual_mapping_table) * PG_SIZE4K +\
++ dyn_mem_map_beg)
++#define DMM_ADDR_TO_INDEX(a) (((a) - dyn_mem_map_beg) / PG_SIZE4K)
++
++/* DMM Mgr */
++struct dmm_object {
++ /* Dmm Lock is used to serialize access mem manager for
++ * multi-threads. */
++ spinlock_t dmm_lock; /* Lock to access dmm mgr */
++};
++
++/* ----------------------------------- Globals */
++static u32 refs; /* module reference count */
++struct map_page {
++ u32 region_size:15;
++ u32 mapped_size:15;
++ u32 reserved:1;
++ u32 mapped:1;
++};
++
++/* Create the free list */
++static struct map_page *virtual_mapping_table;
++static u32 free_region; /* The index of free region */
++static u32 free_size;
++static u32 dyn_mem_map_beg; /* The Beginning of dynamic memory mapping */
++static u32 table_size; /* The size of virt and phys pages tables */
++
++/* ----------------------------------- Function Prototypes */
++static struct map_page *get_region(u32 addr);
++static struct map_page *get_free_region(u32 len);
++static struct map_page *get_mapped_region(u32 addrs);
++
++/* ======== dmm_create_tables ========
++ * Purpose:
++ * Create table to hold the information of physical address
++ * the buffer pages that is passed by the user, and the table
++ * to hold the information of the virtual memory that is reserved
++ * for DSP.
++ */
++int dmm_create_tables(struct dmm_object *dmm_mgr, u32 addr, u32 size)
++{
++ struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
++ int status = 0;
++
++ status = dmm_delete_tables(dmm_obj);
++ if (!status) {
++ dyn_mem_map_beg = addr;
++ table_size = PG_ALIGN_HIGH(size, PG_SIZE4K) / PG_SIZE4K;
++ /* Create the free list */
++ virtual_mapping_table = __vmalloc(table_size *
++ sizeof(struct map_page), GFP_KERNEL |
++ __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
++ if (virtual_mapping_table == NULL)
++ status = -ENOMEM;
++ else {
++ /* On successful allocation,
++ * all entries are zero ('free') */
++ free_region = 0;
++ free_size = table_size * PG_SIZE4K;
++ virtual_mapping_table[0].region_size = table_size;
++ }
++ }
++
++ if (status)
++ pr_err("%s: failure, status 0x%x\n", __func__, status);
++
++ return status;
++}
++
++/*
++ * ======== dmm_create ========
++ * Purpose:
++ * Create a dynamic memory manager object.
++ */
++int dmm_create(struct dmm_object **dmm_manager,
++ struct dev_object *hdev_obj,
++ const struct dmm_mgrattrs *mgr_attrts)
++{
++ struct dmm_object *dmm_obj = NULL;
++ int status = 0;
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(dmm_manager != NULL);
++
++ *dmm_manager = NULL;
++ /* create, zero, and tag a cmm mgr object */
++ dmm_obj = kzalloc(sizeof(struct dmm_object), GFP_KERNEL);
++ if (dmm_obj != NULL) {
++ spin_lock_init(&dmm_obj->dmm_lock);
++ *dmm_manager = dmm_obj;
++ } else {
++ status = -ENOMEM;
++ }
++
++ return status;
++}
++
++/*
++ * ======== dmm_destroy ========
++ * Purpose:
++ * Release the communication memory manager resources.
++ */
++int dmm_destroy(struct dmm_object *dmm_mgr)
++{
++ struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
++ int status = 0;
++
++ DBC_REQUIRE(refs > 0);
++ if (dmm_mgr) {
++ status = dmm_delete_tables(dmm_obj);
++ if (!status)
++ kfree(dmm_obj);
++ } else
++ status = -EFAULT;
++
++ return status;
++}
++
++/*
++ * ======== dmm_delete_tables ========
++ * Purpose:
++ * Delete DMM Tables.
++ */
++int dmm_delete_tables(struct dmm_object *dmm_mgr)
++{
++ int status = 0;
++
++ DBC_REQUIRE(refs > 0);
++ /* Delete all DMM tables */
++ if (dmm_mgr)
++ vfree(virtual_mapping_table);
++ else
++ status = -EFAULT;
++ return status;
++}
++
++/*
++ * ======== dmm_exit ========
++ * Purpose:
++ * Discontinue usage of module; free resources when reference count
++ * reaches 0.
++ */
++void dmm_exit(void)
++{
++ DBC_REQUIRE(refs > 0);
++
++ refs--;
++}
++
++/*
++ * ======== dmm_get_handle ========
++ * Purpose:
++ * Return the dynamic memory manager object for this device.
++ * This is typically called from the client process.
++ */
++int dmm_get_handle(void *hprocessor, struct dmm_object **dmm_manager)
++{
++ int status = 0;
++ struct dev_object *hdev_obj;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(dmm_manager != NULL);
++ if (hprocessor != NULL)
++ status = proc_get_dev_object(hprocessor, &hdev_obj);
++ else
++ hdev_obj = dev_get_first(); /* default */
++
++ if (!status)
++ status = dev_get_dmm_mgr(hdev_obj, dmm_manager);
++
++ return status;
++}
++
++/*
++ * ======== dmm_init ========
++ * Purpose:
++ * Initializes private state of DMM module.
++ */
++bool dmm_init(void)
++{
++ bool ret = true;
++
++ DBC_REQUIRE(refs >= 0);
++
++ if (ret)
++ refs++;
++
++ DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
++
++ virtual_mapping_table = NULL;
++ table_size = 0;
++
++ return ret;
++}
++
++/*
++ * ======== dmm_map_memory ========
++ * Purpose:
++ * Add a mapping block to the reserved chunk. DMM assumes that this block
++ * will be mapped in the DSP/IVA's address space. DMM returns an error if a
++ * mapping overlaps another one. This function stores the info that will be
++ * required later while unmapping the block.
++ */
++int dmm_map_memory(struct dmm_object *dmm_mgr, u32 addr, u32 size)
++{
++ struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
++ struct map_page *chunk;
++ int status = 0;
++
++ spin_lock(&dmm_obj->dmm_lock);
++ /* Find the Reserved memory chunk containing the DSP block to
++ * be mapped */
++ chunk = (struct map_page *)get_region(addr);
++ if (chunk != NULL) {
++ /* Mark the region 'mapped', leave the 'reserved' info as-is */
++ chunk->mapped = true;
++ chunk->mapped_size = (size / PG_SIZE4K);
++ } else
++ status = -ENOENT;
++ spin_unlock(&dmm_obj->dmm_lock);
++
++ dev_dbg(bridge, "%s dmm_mgr %p, addr %x, size %x\n\tstatus %x, "
++ "chunk %p", __func__, dmm_mgr, addr, size, status, chunk);
++
++ return status;
++}
++
++/*
++ * ======== dmm_reserve_memory ========
++ * Purpose:
++ * Reserve a chunk of virtually contiguous DSP/IVA address space.
++ */
++int dmm_reserve_memory(struct dmm_object *dmm_mgr, u32 size,
++ u32 *prsv_addr)
++{
++ int status = 0;
++ struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
++ struct map_page *node;
++ u32 rsv_addr = 0;
++ u32 rsv_size = 0;
++
++ spin_lock(&dmm_obj->dmm_lock);
++
++ /* Try to get a DSP chunk from the free list */
++ node = get_free_region(size);
++ if (node != NULL) {
++ /* DSP chunk of given size is available. */
++ rsv_addr = DMM_ADDR_VIRTUAL(node);
++ /* Calculate the number entries to use */
++ rsv_size = size / PG_SIZE4K;
++ if (rsv_size < node->region_size) {
++ /* Mark remainder of free region */
++ node[rsv_size].mapped = false;
++ node[rsv_size].reserved = false;
++ node[rsv_size].region_size =
++ node->region_size - rsv_size;
++ node[rsv_size].mapped_size = 0;
++ }
++ /* get_region will return first fit chunk. But we only use what
++ is requested. */
++ node->mapped = false;
++ node->reserved = true;
++ node->region_size = rsv_size;
++ node->mapped_size = 0;
++ /* Return the chunk's starting address */
++ *prsv_addr = rsv_addr;
++ } else
++ /*dSP chunk of given size is not available */
++ status = -ENOMEM;
++
++ spin_unlock(&dmm_obj->dmm_lock);
++
++ dev_dbg(bridge, "%s dmm_mgr %p, size %x, prsv_addr %p\n\tstatus %x, "
++ "rsv_addr %x, rsv_size %x\n", __func__, dmm_mgr, size,
++ prsv_addr, status, rsv_addr, rsv_size);
++
++ return status;
++}
++
++/*
++ * ======== dmm_un_map_memory ========
++ * Purpose:
++ * Remove the mapped block from the reserved chunk.
++ */
++int dmm_un_map_memory(struct dmm_object *dmm_mgr, u32 addr, u32 *psize)
++{
++ struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
++ struct map_page *chunk;
++ int status = 0;
++
++ spin_lock(&dmm_obj->dmm_lock);
++ chunk = get_mapped_region(addr);
++ if (chunk == NULL)
++ status = -ENOENT;
++
++ if (!status) {
++ /* Unmap the region */
++ *psize = chunk->mapped_size * PG_SIZE4K;
++ chunk->mapped = false;
++ chunk->mapped_size = 0;
++ }
++ spin_unlock(&dmm_obj->dmm_lock);
++
++ dev_dbg(bridge, "%s: dmm_mgr %p, addr %x, psize %p\n\tstatus %x, "
++ "chunk %p\n", __func__, dmm_mgr, addr, psize, status, chunk);
++
++ return status;
++}
++
++/*
++ * ======== dmm_un_reserve_memory ========
++ * Purpose:
++ * Free a chunk of reserved DSP/IVA address space.
++ */
++int dmm_un_reserve_memory(struct dmm_object *dmm_mgr, u32 rsv_addr)
++{
++ struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
++ struct map_page *chunk;
++ u32 i;
++ int status = 0;
++ u32 chunk_size;
++
++ spin_lock(&dmm_obj->dmm_lock);
++
++ /* Find the chunk containing the reserved address */
++ chunk = get_mapped_region(rsv_addr);
++ if (chunk == NULL)
++ status = -ENOENT;
++
++ if (!status) {
++ /* Free all the mapped pages for this reserved region */
++ i = 0;
++ while (i < chunk->region_size) {
++ if (chunk[i].mapped) {
++ /* Remove mapping from the page tables. */
++ chunk_size = chunk[i].mapped_size;
++ /* Clear the mapping flags */
++ chunk[i].mapped = false;
++ chunk[i].mapped_size = 0;
++ i += chunk_size;
++ } else
++ i++;
++ }
++ /* Clear the flags (mark the region 'free') */
++ chunk->reserved = false;
++ /* NOTE: We do NOT coalesce free regions here.
++ * Free regions are coalesced in get_region(), as it traverses
++ *the whole mapping table
++ */
++ }
++ spin_unlock(&dmm_obj->dmm_lock);
++
++ dev_dbg(bridge, "%s: dmm_mgr %p, rsv_addr %x\n\tstatus %x chunk %p",
++ __func__, dmm_mgr, rsv_addr, status, chunk);
++
++ return status;
++}
++
++/*
++ * ======== get_region ========
++ * Purpose:
++ * Returns a region containing the specified memory region
++ */
++static struct map_page *get_region(u32 addr)
++{
++ struct map_page *curr_region = NULL;
++ u32 i = 0;
++
++ if (virtual_mapping_table != NULL) {
++ /* find page mapped by this address */
++ i = DMM_ADDR_TO_INDEX(addr);
++ if (i < table_size)
++ curr_region = virtual_mapping_table + i;
++ }
++
++ dev_dbg(bridge, "%s: curr_region %p, free_region %d, free_size %d\n",
++ __func__, curr_region, free_region, free_size);
++ return curr_region;
++}
++
++/*
++ * ======== get_free_region ========
++ * Purpose:
++ * Returns the requested free region
++ */
++static struct map_page *get_free_region(u32 len)
++{
++ struct map_page *curr_region = NULL;
++ u32 i = 0;
++ u32 region_size = 0;
++ u32 next_i = 0;
++
++ if (virtual_mapping_table == NULL)
++ return curr_region;
++ if (len > free_size) {
++ /* Find the largest free region
++ * (coalesce during the traversal) */
++ while (i < table_size) {
++ region_size = virtual_mapping_table[i].region_size;
++ next_i = i + region_size;
++ if (virtual_mapping_table[i].reserved == false) {
++ /* Coalesce, if possible */
++ if (next_i < table_size &&
++ virtual_mapping_table[next_i].reserved
++ == false) {
++ virtual_mapping_table[i].region_size +=
++ virtual_mapping_table
++ [next_i].region_size;
++ continue;
++ }
++ region_size *= PG_SIZE4K;
++ if (region_size > free_size) {
++ free_region = i;
++ free_size = region_size;
++ }
++ }
++ i = next_i;
++ }
++ }
++ if (len <= free_size) {
++ curr_region = virtual_mapping_table + free_region;
++ free_region += (len / PG_SIZE4K);
++ free_size -= len;
++ }
++ return curr_region;
++}
++
++/*
++ * ======== get_mapped_region ========
++ * Purpose:
++ * Returns the requestedmapped region
++ */
++static struct map_page *get_mapped_region(u32 addrs)
++{
++ u32 i = 0;
++ struct map_page *curr_region = NULL;
++
++ if (virtual_mapping_table == NULL)
++ return curr_region;
++
++ i = DMM_ADDR_TO_INDEX(addrs);
++ if (i < table_size && (virtual_mapping_table[i].mapped ||
++ virtual_mapping_table[i].reserved))
++ curr_region = virtual_mapping_table + i;
++ return curr_region;
++}
++
++#ifdef DSP_DMM_DEBUG
++u32 dmm_mem_map_dump(struct dmm_object *dmm_mgr)
++{
++ struct map_page *curr_node = NULL;
++ u32 i;
++ u32 freemem = 0;
++ u32 bigsize = 0;
++
++ spin_lock(&dmm_mgr->dmm_lock);
++
++ if (virtual_mapping_table != NULL) {
++ for (i = 0; i < table_size; i +=
++ virtual_mapping_table[i].region_size) {
++ curr_node = virtual_mapping_table + i;
++ if (curr_node->reserved) {
++ /*printk("RESERVED size = 0x%x, "
++ "Map size = 0x%x\n",
++ (curr_node->region_size * PG_SIZE4K),
++ (curr_node->mapped == false) ? 0 :
++ (curr_node->mapped_size * PG_SIZE4K));
++ */
++ } else {
++/* printk("UNRESERVED size = 0x%x\n",
++ (curr_node->region_size * PG_SIZE4K));
++ */
++ freemem += (curr_node->region_size * PG_SIZE4K);
++ if (curr_node->region_size > bigsize)
++ bigsize = curr_node->region_size;
++ }
++ }
++ }
++ spin_unlock(&dmm_mgr->dmm_lock);
++ printk(KERN_INFO "Total DSP VA FREE memory = %d Mbytes\n",
++ freemem / (1024 * 1024));
++ printk(KERN_INFO "Total DSP VA USED memory= %d Mbytes \n",
++ (((table_size * PG_SIZE4K) - freemem)) / (1024 * 1024));
++ printk(KERN_INFO "DSP VA - Biggest FREE block = %d Mbytes \n\n",
++ (bigsize * PG_SIZE4K / (1024 * 1024)));
++
++ return 0;
++}
++#endif
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/pmgr/dspapi.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/pmgr/dspapi.c 2010-08-18 11:24:23.214050741 +0300
+@@ -0,0 +1,1906 @@
++/*
++ * dspapi.c
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Common DSP API functions, also includes the wrapper
++ * functions called directly by the DeviceIOControl interface.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++#include <linux/types.h>
++
++/* ----------------------------------- Host OS */
++#include <dspbridge/host_os.h>
++
++/* ----------------------------------- DSP/BIOS Bridge */
++#include <dspbridge/dbdefs.h>
++
++/* ----------------------------------- Trace & Debug */
++#include <dspbridge/dbc.h>
++
++/* ----------------------------------- OS Adaptation Layer */
++#include <dspbridge/cfg.h>
++#include <dspbridge/ntfy.h>
++#include <dspbridge/services.h>
++
++/* ----------------------------------- Platform Manager */
++#include <dspbridge/chnl.h>
++#include <dspbridge/dev.h>
++#include <dspbridge/drv.h>
++
++#include <dspbridge/proc.h>
++#include <dspbridge/strm.h>
++
++/* ----------------------------------- Resource Manager */
++#include <dspbridge/disp.h>
++#include <dspbridge/mgr.h>
++#include <dspbridge/node.h>
++#include <dspbridge/rmm.h>
++
++/* ----------------------------------- Others */
++#include <dspbridge/msg.h>
++#include <dspbridge/cmm.h>
++#include <dspbridge/io.h>
++
++/* ----------------------------------- This */
++#include <dspbridge/dspapi.h>
++#include <dspbridge/dbdcd.h>
++
++#include <dspbridge/resourcecleanup.h>
++
++/* ----------------------------------- Defines, Data Structures, Typedefs */
++#define MAX_TRACEBUFLEN 255
++#define MAX_LOADARGS 16
++#define MAX_NODES 64
++#define MAX_STREAMS 16
++#define MAX_BUFS 64
++
++/* Used to get dspbridge ioctl table */
++#define DB_GET_IOC_TABLE(cmd) (DB_GET_MODULE(cmd) >> DB_MODULE_SHIFT)
++
++/* Device IOCtl function pointer */
++struct api_cmd {
++ u32(*fxn) (union trapped_args *args, void *pr_ctxt);
++ u32 dw_index;
++};
++
++/* ----------------------------------- Globals */
++static u32 api_c_refs;
++
++/*
++ * Function tables.
++ * The order of these functions MUST be the same as the order of the command
++ * numbers defined in dspapi-ioctl.h This is how an IOCTL number in user mode
++ * turns into a function call in kernel mode.
++ */
++
++/* MGR wrapper functions */
++static struct api_cmd mgr_cmd[] = {
++ {mgrwrap_enum_node_info}, /* MGR_ENUMNODE_INFO */
++ {mgrwrap_enum_proc_info}, /* MGR_ENUMPROC_INFO */
++ {mgrwrap_register_object}, /* MGR_REGISTEROBJECT */
++ {mgrwrap_unregister_object}, /* MGR_UNREGISTEROBJECT */
++ {mgrwrap_wait_for_bridge_events}, /* MGR_WAIT */
++ {mgrwrap_get_process_resources_info}, /* MGR_GET_PROC_RES */
++};
++
++/* PROC wrapper functions */
++static struct api_cmd proc_cmd[] = {
++ {procwrap_attach}, /* PROC_ATTACH */
++ {procwrap_ctrl}, /* PROC_CTRL */
++ {procwrap_detach}, /* PROC_DETACH */
++ {procwrap_enum_node_info}, /* PROC_ENUMNODE */
++ {procwrap_enum_resources}, /* PROC_ENUMRESOURCES */
++ {procwrap_get_state}, /* PROC_GET_STATE */
++ {procwrap_get_trace}, /* PROC_GET_TRACE */
++ {procwrap_load}, /* PROC_LOAD */
++ {procwrap_register_notify}, /* PROC_REGISTERNOTIFY */
++ {procwrap_start}, /* PROC_START */
++ {procwrap_reserve_memory}, /* PROC_RSVMEM */
++ {procwrap_un_reserve_memory}, /* PROC_UNRSVMEM */
++ {procwrap_map}, /* PROC_MAPMEM */
++ {procwrap_un_map}, /* PROC_UNMAPMEM */
++ {procwrap_flush_memory}, /* PROC_FLUSHMEMORY */
++ {procwrap_stop}, /* PROC_STOP */
++ {procwrap_invalidate_memory}, /* PROC_INVALIDATEMEMORY */
++ {procwrap_begin_dma}, /* PROC_BEGINDMA */
++ {procwrap_end_dma}, /* PROC_ENDDMA */
++};
++
++/* NODE wrapper functions */
++static struct api_cmd node_cmd[] = {
++ {nodewrap_allocate}, /* NODE_ALLOCATE */
++ {nodewrap_alloc_msg_buf}, /* NODE_ALLOCMSGBUF */
++ {nodewrap_change_priority}, /* NODE_CHANGEPRIORITY */
++ {nodewrap_connect}, /* NODE_CONNECT */
++ {nodewrap_create}, /* NODE_CREATE */
++ {nodewrap_delete}, /* NODE_DELETE */
++ {nodewrap_free_msg_buf}, /* NODE_FREEMSGBUF */
++ {nodewrap_get_attr}, /* NODE_GETATTR */
++ {nodewrap_get_message}, /* NODE_GETMESSAGE */
++ {nodewrap_pause}, /* NODE_PAUSE */
++ {nodewrap_put_message}, /* NODE_PUTMESSAGE */
++ {nodewrap_register_notify}, /* NODE_REGISTERNOTIFY */
++ {nodewrap_run}, /* NODE_RUN */
++ {nodewrap_terminate}, /* NODE_TERMINATE */
++ {nodewrap_get_uuid_props}, /* NODE_GETUUIDPROPS */
++};
++
++/* STRM wrapper functions */
++static struct api_cmd strm_cmd[] = {
++ {strmwrap_allocate_buffer}, /* STRM_ALLOCATEBUFFER */
++ {strmwrap_close}, /* STRM_CLOSE */
++ {strmwrap_free_buffer}, /* STRM_FREEBUFFER */
++ {strmwrap_get_event_handle}, /* STRM_GETEVENTHANDLE */
++ {strmwrap_get_info}, /* STRM_GETINFO */
++ {strmwrap_idle}, /* STRM_IDLE */
++ {strmwrap_issue}, /* STRM_ISSUE */
++ {strmwrap_open}, /* STRM_OPEN */
++ {strmwrap_reclaim}, /* STRM_RECLAIM */
++ {strmwrap_register_notify}, /* STRM_REGISTERNOTIFY */
++ {strmwrap_select}, /* STRM_SELECT */
++};
++
++/* CMM wrapper functions */
++static struct api_cmd cmm_cmd[] = {
++ {cmmwrap_calloc_buf}, /* CMM_ALLOCBUF */
++ {cmmwrap_free_buf}, /* CMM_FREEBUF */
++ {cmmwrap_get_handle}, /* CMM_GETHANDLE */
++ {cmmwrap_get_info}, /* CMM_GETINFO */
++};
++
++/* Array used to store ioctl table sizes. It can hold up to 8 entries */
++static u8 size_cmd[] = {
++ ARRAY_SIZE(mgr_cmd),
++ ARRAY_SIZE(proc_cmd),
++ ARRAY_SIZE(node_cmd),
++ ARRAY_SIZE(strm_cmd),
++ ARRAY_SIZE(cmm_cmd),
++};
++
++static inline void _cp_fm_usr(void *to, const void __user * from,
++ int *err, unsigned long bytes)
++{
++ if (*err)
++ return;
++
++ if (unlikely(!from)) {
++ *err = -EFAULT;
++ return;
++ }
++
++ if (unlikely(copy_from_user(to, from, bytes)))
++ *err = -EFAULT;
++}
++
++#define CP_FM_USR(to, from, err, n) \
++ _cp_fm_usr(to, from, &(err), (n) * sizeof(*(to)))
++
++static inline void _cp_to_usr(void __user *to, const void *from,
++ int *err, unsigned long bytes)
++{
++ if (*err)
++ return;
++
++ if (unlikely(!to)) {
++ *err = -EFAULT;
++ return;
++ }
++
++ if (unlikely(copy_to_user(to, from, bytes)))
++ *err = -EFAULT;
++}
++
++#define CP_TO_USR(to, from, err, n) \
++ _cp_to_usr(to, from, &(err), (n) * sizeof(*(from)))
++
++/*
++ * ======== api_call_dev_ioctl ========
++ * Purpose:
++ * Call the (wrapper) function for the corresponding API IOCTL.
++ */
++inline int api_call_dev_ioctl(u32 cmd, union trapped_args *args,
++ u32 *result, void *pr_ctxt)
++{
++ u32(*ioctl_cmd) (union trapped_args *args, void *pr_ctxt) = NULL;
++ int i;
++
++ if (_IOC_TYPE(cmd) != DB) {
++ pr_err("%s: Incompatible dspbridge ioctl number\n", __func__);
++ goto err;
++ }
++
++ if (DB_GET_IOC_TABLE(cmd) > ARRAY_SIZE(size_cmd)) {
++ pr_err("%s: undefined ioctl module\n", __func__);
++ goto err;
++ }
++
++ /* Check the size of the required cmd table */
++ i = DB_GET_IOC(cmd);
++ if (i > size_cmd[DB_GET_IOC_TABLE(cmd)]) {
++ pr_err("%s: requested ioctl %d out of bounds for table %d\n",
++ __func__, i, DB_GET_IOC_TABLE(cmd));
++ goto err;
++ }
++
++ switch (DB_GET_MODULE(cmd)) {
++ case DB_MGR:
++ ioctl_cmd = mgr_cmd[i].fxn;
++ break;
++ case DB_PROC:
++ ioctl_cmd = proc_cmd[i].fxn;
++ break;
++ case DB_NODE:
++ ioctl_cmd = node_cmd[i].fxn;
++ break;
++ case DB_STRM:
++ ioctl_cmd = strm_cmd[i].fxn;
++ break;
++ case DB_CMM:
++ ioctl_cmd = cmm_cmd[i].fxn;
++ break;
++ }
++
++ if (!ioctl_cmd) {
++ pr_err("%s: requested ioctl not defined\n", __func__);
++ goto err;
++ } else {
++ *result = (*ioctl_cmd) (args, pr_ctxt);
++ }
++
++ return 0;
++
++err:
++ return -EINVAL;
++}
++
++/*
++ * ======== api_exit ========
++ */
++void api_exit(void)
++{
++ DBC_REQUIRE(api_c_refs > 0);
++ api_c_refs--;
++
++ if (api_c_refs == 0) {
++ /* Release all modules initialized in api_init(). */
++ cod_exit();
++ dev_exit();
++ chnl_exit();
++ msg_exit();
++ io_exit();
++ strm_exit();
++ disp_exit();
++ node_exit();
++ proc_exit();
++ mgr_exit();
++ rmm_exit();
++ drv_exit();
++ }
++ DBC_ENSURE(api_c_refs >= 0);
++}
++
++/*
++ * ======== api_init ========
++ * Purpose:
++ * Module initialization used by Bridge API.
++ */
++bool api_init(void)
++{
++ bool ret = true;
++ bool fdrv, fdev, fcod, fchnl, fmsg, fio;
++ bool fmgr, fproc, fnode, fdisp, fstrm, frmm;
++
++ if (api_c_refs == 0) {
++ /* initialize driver and other modules */
++ fdrv = drv_init();
++ fmgr = mgr_init();
++ fproc = proc_init();
++ fnode = node_init();
++ fdisp = disp_init();
++ fstrm = strm_init();
++ frmm = rmm_init();
++ fchnl = chnl_init();
++ fmsg = msg_mod_init();
++ fio = io_init();
++ fdev = dev_init();
++ fcod = cod_init();
++ ret = fdrv && fdev && fchnl && fcod && fmsg && fio;
++ ret = ret && fmgr && fproc && frmm;
++ if (!ret) {
++ if (fdrv)
++ drv_exit();
++
++ if (fmgr)
++ mgr_exit();
++
++ if (fstrm)
++ strm_exit();
++
++ if (fproc)
++ proc_exit();
++
++ if (fnode)
++ node_exit();
++
++ if (fdisp)
++ disp_exit();
++
++ if (fchnl)
++ chnl_exit();
++
++ if (fmsg)
++ msg_exit();
++
++ if (fio)
++ io_exit();
++
++ if (fdev)
++ dev_exit();
++
++ if (fcod)
++ cod_exit();
++
++ if (frmm)
++ rmm_exit();
++
++ }
++ }
++ if (ret)
++ api_c_refs++;
++
++ return ret;
++}
++
++/*
++ * ======== api_init_complete2 ========
++ * Purpose:
++ * Perform any required bridge initialization which cannot
++ * be performed in api_init() or dev_start_device() due
++ * to the fact that some services are not yet
++ * completely initialized.
++ * Parameters:
++ * Returns:
++ * 0: Allow this device to load
++ * -EPERM: Failure.
++ * Requires:
++ * Bridge API initialized.
++ * Ensures:
++ */
++int api_init_complete2(void)
++{
++ int status = 0;
++ struct cfg_devnode *dev_node;
++ struct dev_object *hdev_obj;
++ u8 dev_type;
++ u32 tmp;
++
++ DBC_REQUIRE(api_c_refs > 0);
++
++ /* Walk the list of DevObjects, get each devnode, and attempting to
++ * autostart the board. Note that this requires COF loading, which
++ * requires KFILE. */
++ for (hdev_obj = dev_get_first(); hdev_obj != NULL;
++ hdev_obj = dev_get_next(hdev_obj)) {
++ if (dev_get_dev_node(hdev_obj, &dev_node))
++ continue;
++
++ if (dev_get_dev_type(hdev_obj, &dev_type))
++ continue;
++
++ if ((dev_type == DSP_UNIT) || (dev_type == IVA_UNIT))
++ if (cfg_get_auto_start(dev_node, &tmp) == 0
++ && tmp)
++ proc_auto_start(dev_node, hdev_obj);
++ }
++
++ return status;
++}
++
++/* TODO: Remove deprecated and not implemented ioctl wrappers */
++
++/*
++ * ======== mgrwrap_enum_node_info ========
++ */
++u32 mgrwrap_enum_node_info(union trapped_args *args, void *pr_ctxt)
++{
++ u8 *pndb_props;
++ u32 num_nodes;
++ int status = 0;
++ u32 size = args->args_mgr_enumnode_info.undb_props_size;
++
++ if (size < sizeof(struct dsp_ndbprops))
++ return -EINVAL;
++
++ pndb_props = kmalloc(size, GFP_KERNEL);
++ if (pndb_props == NULL)
++ status = -ENOMEM;
++
++ if (!status) {
++ status =
++ mgr_enum_node_info(args->args_mgr_enumnode_info.node_id,
++ (struct dsp_ndbprops *)pndb_props, size,
++ &num_nodes);
++ }
++ CP_TO_USR(args->args_mgr_enumnode_info.pndb_props, pndb_props, status,
++ size);
++ CP_TO_USR(args->args_mgr_enumnode_info.pu_num_nodes, &num_nodes, status,
++ 1);
++ kfree(pndb_props);
++
++ return status;
++}
++
++/*
++ * ======== mgrwrap_enum_proc_info ========
++ */
++u32 mgrwrap_enum_proc_info(union trapped_args *args, void *pr_ctxt)
++{
++ u8 *processor_info;
++ u8 num_procs;
++ int status = 0;
++ u32 size = args->args_mgr_enumproc_info.processor_info_size;
++
++ if (size < sizeof(struct dsp_processorinfo))
++ return -EINVAL;
++
++ processor_info = kmalloc(size, GFP_KERNEL);
++ if (processor_info == NULL)
++ status = -ENOMEM;
++
++ if (!status) {
++ status =
++ mgr_enum_processor_info(args->args_mgr_enumproc_info.
++ processor_id,
++ (struct dsp_processorinfo *)
++ processor_info, size, &num_procs);
++ }
++ CP_TO_USR(args->args_mgr_enumproc_info.processor_info, processor_info,
++ status, size);
++ CP_TO_USR(args->args_mgr_enumproc_info.pu_num_procs, &num_procs,
++ status, 1);
++ kfree(processor_info);
++
++ return status;
++}
++
++#define WRAP_MAP2CALLER(x) x
++/*
++ * ======== mgrwrap_register_object ========
++ */
++u32 mgrwrap_register_object(union trapped_args *args, void *pr_ctxt)
++{
++ u32 ret;
++ struct dsp_uuid uuid_obj;
++ u32 path_size = 0;
++ char *psz_path_name = NULL;
++ int status = 0;
++
++ CP_FM_USR(&uuid_obj, args->args_mgr_registerobject.uuid_obj, status, 1);
++ if (status)
++ goto func_end;
++ /* path_size is increased by 1 to accommodate NULL */
++ path_size = strlen_user((char *)
++ args->args_mgr_registerobject.psz_path_name) +
++ 1;
++ psz_path_name = kmalloc(path_size, GFP_KERNEL);
++ if (!psz_path_name)
++ goto func_end;
++ ret = strncpy_from_user(psz_path_name,
++ (char *)args->args_mgr_registerobject.
++ psz_path_name, path_size);
++ if (!ret) {
++ status = -EFAULT;
++ goto func_end;
++ }
++
++ if (args->args_mgr_registerobject.obj_type >= DSP_DCDMAXOBJTYPE)
++ return -EINVAL;
++
++ status = dcd_register_object(&uuid_obj,
++ args->args_mgr_registerobject.obj_type,
++ (char *)psz_path_name);
++func_end:
++ kfree(psz_path_name);
++ return status;
++}
++
++/*
++ * ======== mgrwrap_unregister_object ========
++ */
++u32 mgrwrap_unregister_object(union trapped_args *args, void *pr_ctxt)
++{
++ int status = 0;
++ struct dsp_uuid uuid_obj;
++
++ CP_FM_USR(&uuid_obj, args->args_mgr_registerobject.uuid_obj, status, 1);
++ if (status)
++ goto func_end;
++
++ status = dcd_unregister_object(&uuid_obj,
++ args->args_mgr_unregisterobject.
++ obj_type);
++func_end:
++ return status;
++
++}
++
++/*
++ * ======== mgrwrap_wait_for_bridge_events ========
++ */
++u32 mgrwrap_wait_for_bridge_events(union trapped_args *args, void *pr_ctxt)
++{
++ int status = 0;
++ struct dsp_notification *anotifications[MAX_EVENTS];
++ struct dsp_notification notifications[MAX_EVENTS];
++ u32 index, i;
++ u32 count = args->args_mgr_wait.count;
++
++ if (count > MAX_EVENTS)
++ status = -EINVAL;
++
++ /* get the array of pointers to user structures */
++ CP_FM_USR(anotifications, args->args_mgr_wait.anotifications,
++ status, count);
++ /* get the events */
++ for (i = 0; i < count; i++) {
++ CP_FM_USR(&notifications[i], anotifications[i], status, 1);
++ if (status || !notifications[i].handle) {
++ status = -EINVAL;
++ break;
++ }
++ /* set the array of pointers to kernel structures */
++ anotifications[i] = &notifications[i];
++ }
++ if (!status) {
++ status = mgr_wait_for_bridge_events(anotifications, count,
++ &index,
++ args->args_mgr_wait.
++ utimeout);
++ }
++ CP_TO_USR(args->args_mgr_wait.pu_index, &index, status, 1);
++ return status;
++}
++
++/*
++ * ======== MGRWRAP_GetProcessResourceInfo ========
++ */
++u32 __deprecated mgrwrap_get_process_resources_info(union trapped_args * args,
++ void *pr_ctxt)
++{
++ pr_err("%s: deprecated dspbridge ioctl\n", __func__);
++ return 0;
++}
++
++/*
++ * ======== procwrap_attach ========
++ */
++u32 procwrap_attach(union trapped_args *args, void *pr_ctxt)
++{
++ void *processor;
++ int status = 0;
++ struct dsp_processorattrin proc_attr_in, *attr_in = NULL;
++
++ /* Optional argument */
++ if (args->args_proc_attach.attr_in) {
++ CP_FM_USR(&proc_attr_in, args->args_proc_attach.attr_in, status,
++ 1);
++ if (!status)
++ attr_in = &proc_attr_in;
++ else
++ goto func_end;
++
++ }
++ status = proc_attach(args->args_proc_attach.processor_id, attr_in,
++ &processor, pr_ctxt);
++ CP_TO_USR(args->args_proc_attach.ph_processor, &processor, status, 1);
++func_end:
++ return status;
++}
++
++/*
++ * ======== procwrap_ctrl ========
++ */
++u32 procwrap_ctrl(union trapped_args *args, void *pr_ctxt)
++{
++ u32 cb_data_size, __user * psize = (u32 __user *)
++ args->args_proc_ctrl.pargs;
++ u8 *pargs = NULL;
++ int status = 0;
++ void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor;
++
++ if (psize) {
++ if (get_user(cb_data_size, psize)) {
++ status = -EPERM;
++ goto func_end;
++ }
++ cb_data_size += sizeof(u32);
++ pargs = kmalloc(cb_data_size, GFP_KERNEL);
++ if (pargs == NULL) {
++ status = -ENOMEM;
++ goto func_end;
++ }
++
++ CP_FM_USR(pargs, args->args_proc_ctrl.pargs, status,
++ cb_data_size);
++ }
++ if (!status) {
++ status = proc_ctrl(hprocessor,
++ args->args_proc_ctrl.dw_cmd,
++ (struct dsp_cbdata *)pargs);
++ }
++
++ /* CP_TO_USR(args->args_proc_ctrl.pargs, pargs, status, 1); */
++ kfree(pargs);
++func_end:
++ return status;
++}
++
++/*
++ * ======== procwrap_detach ========
++ */
++u32 __deprecated procwrap_detach(union trapped_args * args, void *pr_ctxt)
++{
++ /* proc_detach called at bridge_release only */
++ pr_err("%s: deprecated dspbridge ioctl\n", __func__);
++ return 0;
++}
++
++/*
++ * ======== procwrap_enum_node_info ========
++ */
++u32 procwrap_enum_node_info(union trapped_args *args, void *pr_ctxt)
++{
++ int status;
++ void *node_tab[MAX_NODES];
++ u32 num_nodes;
++ u32 alloc_cnt;
++ void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor;
++
++ if (!args->args_proc_enumnode_info.node_tab_size)
++ return -EINVAL;
++
++ status = proc_enum_nodes(hprocessor,
++ node_tab,
++ args->args_proc_enumnode_info.node_tab_size,
++ &num_nodes, &alloc_cnt);
++ CP_TO_USR(args->args_proc_enumnode_info.node_tab, node_tab, status,
++ num_nodes);
++ CP_TO_USR(args->args_proc_enumnode_info.pu_num_nodes, &num_nodes,
++ status, 1);
++ CP_TO_USR(args->args_proc_enumnode_info.pu_allocated, &alloc_cnt,
++ status, 1);
++ return status;
++}
++
++u32 procwrap_end_dma(union trapped_args *args, void *pr_ctxt)
++{
++ int status;
++
++ if (args->args_proc_dma.dir >= DMA_NONE)
++ return -EINVAL;
++
++ status = proc_end_dma(pr_ctxt,
++ args->args_proc_dma.pmpu_addr,
++ args->args_proc_dma.ul_size,
++ args->args_proc_dma.dir);
++ return status;
++}
++
++u32 procwrap_begin_dma(union trapped_args *args, void *pr_ctxt)
++{
++ int status;
++
++ if (args->args_proc_dma.dir >= DMA_NONE)
++ return -EINVAL;
++
++ status = proc_begin_dma(pr_ctxt,
++ args->args_proc_dma.pmpu_addr,
++ args->args_proc_dma.ul_size,
++ args->args_proc_dma.dir);
++ return status;
++}
++
++/*
++ * ======== procwrap_flush_memory ========
++ */
++u32 procwrap_flush_memory(union trapped_args *args, void *pr_ctxt)
++{
++ int status;
++
++ if (args->args_proc_flushmemory.ul_flags >
++ PROC_WRITEBACK_INVALIDATE_MEM)
++ return -EINVAL;
++
++ status = proc_flush_memory(pr_ctxt,
++ args->args_proc_flushmemory.pmpu_addr,
++ args->args_proc_flushmemory.ul_size,
++ args->args_proc_flushmemory.ul_flags);
++ return status;
++}
++
++/*
++ * ======== procwrap_invalidate_memory ========
++ */
++u32 procwrap_invalidate_memory(union trapped_args *args, void *pr_ctxt)
++{
++ int status;
++
++ status =
++ proc_invalidate_memory(pr_ctxt,
++ args->args_proc_invalidatememory.pmpu_addr,
++ args->args_proc_invalidatememory.ul_size);
++ return status;
++}
++
++/*
++ * ======== procwrap_enum_resources ========
++ */
++u32 procwrap_enum_resources(union trapped_args *args, void *pr_ctxt)
++{
++ int status = 0;
++ struct dsp_resourceinfo resource_info;
++ void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor;
++
++ if (args->args_proc_enumresources.resource_info_size <
++ sizeof(struct dsp_resourceinfo))
++ return -EINVAL;
++
++ status =
++ proc_get_resource_info(hprocessor,
++ args->args_proc_enumresources.resource_type,
++ &resource_info,
++ args->args_proc_enumresources.
++ resource_info_size);
++
++ CP_TO_USR(args->args_proc_enumresources.resource_info, &resource_info,
++ status, 1);
++
++ return status;
++
++}
++
++/*
++ * ======== procwrap_get_state ========
++ */
++u32 procwrap_get_state(union trapped_args *args, void *pr_ctxt)
++{
++ int status;
++ struct dsp_processorstate proc_state;
++ void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor;
++
++ if (args->args_proc_getstate.state_info_size <
++ sizeof(struct dsp_processorstate))
++ return -EINVAL;
++
++ status = proc_get_state(hprocessor, &proc_state,
++ args->args_proc_getstate.state_info_size);
++ CP_TO_USR(args->args_proc_getstate.proc_state_obj, &proc_state, status,
++ 1);
++ return status;
++
++}
++
++/*
++ * ======== procwrap_get_trace ========
++ */
++u32 procwrap_get_trace(union trapped_args *args, void *pr_ctxt)
++{
++ int status;
++ u8 *pbuf;
++ void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor;
++
++ if (args->args_proc_gettrace.max_size > MAX_TRACEBUFLEN)
++ return -EINVAL;
++
++ pbuf = kzalloc(args->args_proc_gettrace.max_size, GFP_KERNEL);
++ if (pbuf != NULL) {
++ status = proc_get_trace(hprocessor, pbuf,
++ args->args_proc_gettrace.max_size);
++ } else {
++ status = -ENOMEM;
++ }
++ CP_TO_USR(args->args_proc_gettrace.pbuf, pbuf, status,
++ args->args_proc_gettrace.max_size);
++ kfree(pbuf);
++
++ return status;
++}
++
++/*
++ * ======== procwrap_load ========
++ */
++u32 procwrap_load(union trapped_args *args, void *pr_ctxt)
++{
++ s32 i, len;
++ int status = 0;
++ char *temp;
++ s32 count = args->args_proc_load.argc_index;
++ u8 **argv = NULL, **envp = NULL;
++ void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor;
++
++ if (count <= 0 || count > MAX_LOADARGS) {
++ status = -EINVAL;
++ goto func_cont;
++ }
++
++ argv = kmalloc(count * sizeof(u8 *), GFP_KERNEL);
++ if (!argv) {
++ status = -ENOMEM;
++ goto func_cont;
++ }
++
++ CP_FM_USR(argv, args->args_proc_load.user_args, status, count);
++ if (status) {
++ kfree(argv);
++ argv = NULL;
++ goto func_cont;
++ }
++
++ for (i = 0; i < count; i++) {
++ if (argv[i]) {
++ /* User space pointer to argument */
++ temp = (char *)argv[i];
++ /* len is increased by 1 to accommodate NULL */
++ len = strlen_user((char *)temp) + 1;
++ /* Kernel space pointer to argument */
++ argv[i] = kmalloc(len, GFP_KERNEL);
++ if (argv[i]) {
++ CP_FM_USR(argv[i], temp, status, len);
++ if (status) {
++ kfree(argv[i]);
++ argv[i] = NULL;
++ goto func_cont;
++ }
++ } else {
++ status = -ENOMEM;
++ goto func_cont;
++ }
++ }
++ }
++ /* TODO: validate this */
++ if (args->args_proc_load.user_envp) {
++ /* number of elements in the envp array including NULL */
++ count = 0;
++ do {
++ get_user(temp, args->args_proc_load.user_envp + count);
++ count++;
++ } while (temp);
++ envp = kmalloc(count * sizeof(u8 *), GFP_KERNEL);
++ if (!envp) {
++ status = -ENOMEM;
++ goto func_cont;
++ }
++
++ CP_FM_USR(envp, args->args_proc_load.user_envp, status, count);
++ if (status) {
++ kfree(envp);
++ envp = NULL;
++ goto func_cont;
++ }
++ for (i = 0; envp[i]; i++) {
++ /* User space pointer to argument */
++ temp = (char *)envp[i];
++ /* len is increased by 1 to accommodate NULL */
++ len = strlen_user((char *)temp) + 1;
++ /* Kernel space pointer to argument */
++ envp[i] = kmalloc(len, GFP_KERNEL);
++ if (envp[i]) {
++ CP_FM_USR(envp[i], temp, status, len);
++ if (status) {
++ kfree(envp[i]);
++ envp[i] = NULL;
++ goto func_cont;
++ }
++ } else {
++ status = -ENOMEM;
++ goto func_cont;
++ }
++ }
++ }
++
++ if (!status) {
++ status = proc_load(hprocessor,
++ args->args_proc_load.argc_index,
++ (const char **)argv, (const char **)envp);
++ }
++func_cont:
++ if (envp) {
++ i = 0;
++ while (envp[i])
++ kfree(envp[i++]);
++
++ kfree(envp);
++ }
++
++ if (argv) {
++ count = args->args_proc_load.argc_index;
++ for (i = 0; (i < count) && argv[i]; i++)
++ kfree(argv[i]);
++
++ kfree(argv);
++ }
++
++ return status;
++}
++
++/*
++ * ======== procwrap_map ========
++ */
++u32 procwrap_map(union trapped_args *args, void *pr_ctxt)
++{
++ int status;
++ void *map_addr;
++ void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor;
++
++ if (!args->args_proc_mapmem.ul_size)
++ return -EINVAL;
++
++ status = proc_map(args->args_proc_mapmem.hprocessor,
++ args->args_proc_mapmem.pmpu_addr,
++ args->args_proc_mapmem.ul_size,
++ args->args_proc_mapmem.req_addr, &map_addr,
++ args->args_proc_mapmem.ul_map_attr, pr_ctxt);
++ if (!status) {
++ if (put_user(map_addr, args->args_proc_mapmem.pp_map_addr)) {
++ status = -EINVAL;
++ proc_un_map(hprocessor, map_addr, pr_ctxt);
++ }
++
++ }
++ return status;
++}
++
++/*
++ * ======== procwrap_register_notify ========
++ */
++u32 procwrap_register_notify(union trapped_args *args, void *pr_ctxt)
++{
++ int status;
++ struct dsp_notification notification;
++ void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor;
++
++ /* Initialize the notification data structure */
++ notification.ps_name = NULL;
++ notification.handle = NULL;
++
++ status = proc_register_notify(hprocessor,
++ args->args_proc_register_notify.event_mask,
++ args->args_proc_register_notify.notify_type,
++ &notification);
++ CP_TO_USR(args->args_proc_register_notify.hnotification, &notification,
++ status, 1);
++ return status;
++}
++
++/*
++ * ======== procwrap_reserve_memory ========
++ */
++u32 procwrap_reserve_memory(union trapped_args *args, void *pr_ctxt)
++{
++ int status;
++ void *prsv_addr;
++ void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor;
++
++ if ((args->args_proc_rsvmem.ul_size <= 0) ||
++ (args->args_proc_rsvmem.ul_size & (PG_SIZE4K - 1)) != 0)
++ return -EINVAL;
++
++ status = proc_reserve_memory(hprocessor,
++ args->args_proc_rsvmem.ul_size, &prsv_addr,
++ pr_ctxt);
++ if (!status) {
++ if (put_user(prsv_addr, args->args_proc_rsvmem.pp_rsv_addr)) {
++ status = -EINVAL;
++ proc_un_reserve_memory(args->args_proc_rsvmem.
++ hprocessor, prsv_addr, pr_ctxt);
++ }
++ }
++ return status;
++}
++
++/*
++ * ======== procwrap_start ========
++ */
++u32 procwrap_start(union trapped_args *args, void *pr_ctxt)
++{
++ u32 ret;
++
++ ret = proc_start(((struct process_context *)pr_ctxt)->hprocessor);
++ return ret;
++}
++
++/*
++ * ======== procwrap_un_map ========
++ */
++u32 procwrap_un_map(union trapped_args *args, void *pr_ctxt)
++{
++ int status;
++
++ status = proc_un_map(((struct process_context *)pr_ctxt)->hprocessor,
++ args->args_proc_unmapmem.map_addr, pr_ctxt);
++ return status;
++}
++
++/*
++ * ======== procwrap_un_reserve_memory ========
++ */
++u32 procwrap_un_reserve_memory(union trapped_args *args, void *pr_ctxt)
++{
++ int status;
++ void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor;
++
++ status = proc_un_reserve_memory(hprocessor,
++ args->args_proc_unrsvmem.prsv_addr,
++ pr_ctxt);
++ return status;
++}
++
++/*
++ * ======== procwrap_stop ========
++ */
++u32 procwrap_stop(union trapped_args *args, void *pr_ctxt)
++{
++ u32 ret;
++
++ ret = proc_stop(((struct process_context *)pr_ctxt)->hprocessor);
++
++ return ret;
++}
++
++/*
++ * ======== find_handle =========
++ */
++inline void find_node_handle(struct node_res_object **noderes,
++ void *pr_ctxt, void *hnode)
++{
++ rcu_read_lock();
++ *noderes = idr_find(((struct process_context *)pr_ctxt)->node_id,
++ (int)hnode - 1);
++ rcu_read_unlock();
++ return;
++}
++
++
++/*
++ * ======== nodewrap_allocate ========
++ */
++u32 nodewrap_allocate(union trapped_args *args, void *pr_ctxt)
++{
++ int status = 0;
++ struct dsp_uuid node_uuid;
++ u32 cb_data_size = 0;
++ u32 __user *psize = (u32 __user *) args->args_node_allocate.pargs;
++ u8 *pargs = NULL;
++ struct dsp_nodeattrin proc_attr_in, *attr_in = NULL;
++ struct node_res_object *node_res;
++ int nodeid;
++ void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor;
++
++ /* Optional argument */
++ if (psize) {
++ if (get_user(cb_data_size, psize))
++ status = -EPERM;
++
++ cb_data_size += sizeof(u32);
++ if (!status) {
++ pargs = kmalloc(cb_data_size, GFP_KERNEL);
++ if (pargs == NULL)
++ status = -ENOMEM;
++
++ }
++ CP_FM_USR(pargs, args->args_node_allocate.pargs, status,
++ cb_data_size);
++ }
++ CP_FM_USR(&node_uuid, args->args_node_allocate.node_id_ptr, status, 1);
++ if (status)
++ goto func_cont;
++ /* Optional argument */
++ if (args->args_node_allocate.attr_in) {
++ CP_FM_USR(&proc_attr_in, args->args_node_allocate.attr_in,
++ status, 1);
++ if (!status)
++ attr_in = &proc_attr_in;
++ else
++ status = -ENOMEM;
++
++ }
++ if (!status) {
++ status = node_allocate(hprocessor,
++ &node_uuid, (struct dsp_cbdata *)pargs,
++ attr_in, &node_res, pr_ctxt);
++ }
++ if (!status) {
++ nodeid = node_res->id + 1;
++ CP_TO_USR(args->args_node_allocate.ph_node, &nodeid,
++ status, 1);
++ if (status) {
++ status = -EFAULT;
++ node_delete(node_res, pr_ctxt);
++ }
++ }
++func_cont:
++ kfree(pargs);
++
++ return status;
++}
++
++/*
++ * ======== nodewrap_alloc_msg_buf ========
++ */
++u32 nodewrap_alloc_msg_buf(union trapped_args *args, void *pr_ctxt)
++{
++ int status = 0;
++ struct dsp_bufferattr *pattr = NULL;
++ struct dsp_bufferattr attr;
++ u8 *pbuffer = NULL;
++ struct node_res_object *node_res;
++
++ find_node_handle(&node_res, pr_ctxt,
++ args->args_node_allocmsgbuf.hnode);
++
++ if (!node_res)
++ return -EFAULT;
++
++ if (!args->args_node_allocmsgbuf.usize)
++ return -EINVAL;
++
++ if (args->args_node_allocmsgbuf.pattr) { /* Optional argument */
++ CP_FM_USR(&attr, args->args_node_allocmsgbuf.pattr, status, 1);
++ if (!status)
++ pattr = &attr;
++
++ }
++ /* argument */
++ CP_FM_USR(&pbuffer, args->args_node_allocmsgbuf.pbuffer, status, 1);
++ if (!status) {
++ status = node_alloc_msg_buf(node_res->hnode,
++ args->args_node_allocmsgbuf.usize,
++ pattr, &pbuffer);
++ }
++ CP_TO_USR(args->args_node_allocmsgbuf.pbuffer, &pbuffer, status, 1);
++ return status;
++}
++
++/*
++ * ======== nodewrap_change_priority ========
++ */
++u32 nodewrap_change_priority(union trapped_args *args, void *pr_ctxt)
++{
++ u32 ret;
++ struct node_res_object *node_res;
++
++ find_node_handle(&node_res, pr_ctxt,
++ args->args_node_changepriority.hnode);
++
++ if (!node_res)
++ return -EFAULT;
++
++ ret = node_change_priority(node_res->hnode,
++ args->args_node_changepriority.prio);
++
++ return ret;
++}
++
++/*
++ * ======== nodewrap_connect ========
++ */
++u32 nodewrap_connect(union trapped_args *args, void *pr_ctxt)
++{
++ int status = 0;
++ struct dsp_strmattr attrs;
++ struct dsp_strmattr *pattrs = NULL;
++ u32 cb_data_size;
++ u32 __user *psize = (u32 __user *) args->args_node_connect.conn_param;
++ u8 *pargs = NULL;
++ struct node_res_object *node_res1, *node_res2;
++ struct node_object *node1 = NULL, *node2 = NULL;
++
++ if ((int)args->args_node_connect.hnode != DSP_HGPPNODE) {
++ find_node_handle(&node_res1, pr_ctxt,
++ args->args_node_connect.hnode);
++ if (node_res1)
++ node1 = node_res1->hnode;
++ } else {
++ node1 = args->args_node_connect.hnode;
++ }
++
++ if ((int)args->args_node_connect.other_node != DSP_HGPPNODE) {
++ find_node_handle(&node_res2, pr_ctxt,
++ args->args_node_connect.other_node);
++ if (node_res2)
++ node2 = node_res2->hnode;
++ } else {
++ node2 = args->args_node_connect.other_node;
++ }
++
++ if (!node1 || !node2)
++ return -EFAULT;
++
++ /* Optional argument */
++ if (psize) {
++ if (get_user(cb_data_size, psize))
++ status = -EPERM;
++
++ cb_data_size += sizeof(u32);
++ if (!status) {
++ pargs = kmalloc(cb_data_size, GFP_KERNEL);
++ if (pargs == NULL) {
++ status = -ENOMEM;
++ goto func_cont;
++ }
++
++ }
++ CP_FM_USR(pargs, args->args_node_connect.conn_param, status,
++ cb_data_size);
++ if (status)
++ goto func_cont;
++ }
++ if (args->args_node_connect.pattrs) { /* Optional argument */
++ CP_FM_USR(&attrs, args->args_node_connect.pattrs, status, 1);
++ if (!status)
++ pattrs = &attrs;
++
++ }
++ if (!status) {
++ status = node_connect(node1,
++ args->args_node_connect.stream_id,
++ node2,
++ args->args_node_connect.other_stream,
++ pattrs, (struct dsp_cbdata *)pargs);
++ }
++func_cont:
++ kfree(pargs);
++
++ return status;
++}
++
++/*
++ * ======== nodewrap_create ========
++ */
++u32 nodewrap_create(union trapped_args *args, void *pr_ctxt)
++{
++ u32 ret;
++ struct node_res_object *node_res;
++
++ find_node_handle(&node_res, pr_ctxt, args->args_node_create.hnode);
++
++ if (!node_res)
++ return -EFAULT;
++
++ ret = node_create(node_res->hnode);
++
++ return ret;
++}
++
++/*
++ * ======== nodewrap_delete ========
++ */
++u32 nodewrap_delete(union trapped_args *args, void *pr_ctxt)
++{
++ u32 ret;
++ struct node_res_object *node_res;
++
++ find_node_handle(&node_res, pr_ctxt, args->args_node_delete.hnode);
++
++ if (!node_res)
++ return -EFAULT;
++
++ ret = node_delete(node_res, pr_ctxt);
++
++ return ret;
++}
++
++/*
++ * ======== nodewrap_free_msg_buf ========
++ */
++u32 nodewrap_free_msg_buf(union trapped_args *args, void *pr_ctxt)
++{
++ int status = 0;
++ struct dsp_bufferattr *pattr = NULL;
++ struct dsp_bufferattr attr;
++ struct node_res_object *node_res;
++
++ find_node_handle(&node_res, pr_ctxt, args->args_node_freemsgbuf.hnode);
++
++ if (!node_res)
++ return -EFAULT;
++
++ if (args->args_node_freemsgbuf.pattr) { /* Optional argument */
++ CP_FM_USR(&attr, args->args_node_freemsgbuf.pattr, status, 1);
++ if (!status)
++ pattr = &attr;
++
++ }
++
++ if (!args->args_node_freemsgbuf.pbuffer)
++ return -EFAULT;
++
++ if (!status) {
++ status = node_free_msg_buf(node_res->hnode,
++ args->args_node_freemsgbuf.pbuffer,
++ pattr);
++ }
++
++ return status;
++}
++
++/*
++ * ======== nodewrap_get_attr ========
++ */
++u32 nodewrap_get_attr(union trapped_args *args, void *pr_ctxt)
++{
++ int status = 0;
++ struct dsp_nodeattr attr;
++ struct node_res_object *node_res;
++
++ find_node_handle(&node_res, pr_ctxt, args->args_node_getattr.hnode);
++
++ if (!node_res)
++ return -EFAULT;
++
++ status = node_get_attr(node_res->hnode, &attr,
++ args->args_node_getattr.attr_size);
++ CP_TO_USR(args->args_node_getattr.pattr, &attr, status, 1);
++
++ return status;
++}
++
++/*
++ * ======== nodewrap_get_message ========
++ */
++u32 nodewrap_get_message(union trapped_args *args, void *pr_ctxt)
++{
++ int status;
++ struct dsp_msg msg;
++ struct node_res_object *node_res;
++
++ find_node_handle(&node_res, pr_ctxt, args->args_node_getmessage.hnode);
++
++ if (!node_res)
++ return -EFAULT;
++
++ status = node_get_message(node_res->hnode, &msg,
++ args->args_node_getmessage.utimeout);
++
++ CP_TO_USR(args->args_node_getmessage.message, &msg, status, 1);
++
++ return status;
++}
++
++/*
++ * ======== nodewrap_pause ========
++ */
++u32 nodewrap_pause(union trapped_args *args, void *pr_ctxt)
++{
++ u32 ret;
++ struct node_res_object *node_res;
++
++ find_node_handle(&node_res, pr_ctxt, args->args_node_pause.hnode);
++
++ if (!node_res)
++ return -EFAULT;
++
++ ret = node_pause(node_res->hnode);
++
++ return ret;
++}
++
++/*
++ * ======== nodewrap_put_message ========
++ */
++u32 nodewrap_put_message(union trapped_args *args, void *pr_ctxt)
++{
++ int status = 0;
++ struct dsp_msg msg;
++ struct node_res_object *node_res;
++
++ find_node_handle(&node_res, pr_ctxt, args->args_node_putmessage.hnode);
++
++ if (!node_res)
++ return -EFAULT;
++
++ CP_FM_USR(&msg, args->args_node_putmessage.message, status, 1);
++
++ if (!status) {
++ status =
++ node_put_message(node_res->hnode, &msg,
++ args->args_node_putmessage.utimeout);
++ }
++
++ return status;
++}
++
++/*
++ * ======== nodewrap_register_notify ========
++ */
++u32 nodewrap_register_notify(union trapped_args *args, void *pr_ctxt)
++{
++ int status = 0;
++ struct dsp_notification notification;
++ struct node_res_object *node_res;
++
++ find_node_handle(&node_res, pr_ctxt,
++ args->args_node_registernotify.hnode);
++
++ if (!node_res)
++ return -EFAULT;
++
++ /* Initialize the notification data structure */
++ notification.ps_name = NULL;
++ notification.handle = NULL;
++
++ if (!args->args_proc_register_notify.event_mask)
++ CP_FM_USR(&notification,
++ args->args_proc_register_notify.hnotification,
++ status, 1);
++
++ status = node_register_notify(node_res->hnode,
++ args->args_node_registernotify.event_mask,
++ args->args_node_registernotify.
++ notify_type, &notification);
++ CP_TO_USR(args->args_node_registernotify.hnotification, &notification,
++ status, 1);
++ return status;
++}
++
++/*
++ * ======== nodewrap_run ========
++ */
++u32 nodewrap_run(union trapped_args *args, void *pr_ctxt)
++{
++ u32 ret;
++ struct node_res_object *node_res;
++
++ find_node_handle(&node_res, pr_ctxt, args->args_node_run.hnode);
++
++ if (!node_res)
++ return -EFAULT;
++
++ ret = node_run(node_res->hnode);
++
++ return ret;
++}
++
++/*
++ * ======== nodewrap_terminate ========
++ */
++u32 nodewrap_terminate(union trapped_args *args, void *pr_ctxt)
++{
++ int status;
++ int tempstatus;
++ struct node_res_object *node_res;
++
++ find_node_handle(&node_res, pr_ctxt, args->args_node_terminate.hnode);
++
++ if (!node_res)
++ return -EFAULT;
++
++ status = node_terminate(node_res->hnode, &tempstatus);
++
++ CP_TO_USR(args->args_node_terminate.pstatus, &tempstatus, status, 1);
++
++ return status;
++}
++
++/*
++ * ======== nodewrap_get_uuid_props ========
++ */
++u32 nodewrap_get_uuid_props(union trapped_args *args, void *pr_ctxt)
++{
++ int status = 0;
++ struct dsp_uuid node_uuid;
++ struct dsp_ndbprops *pnode_props = NULL;
++ void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor;
++
++ CP_FM_USR(&node_uuid, args->args_node_getuuidprops.node_id_ptr, status,
++ 1);
++ if (status)
++ goto func_cont;
++ pnode_props = kmalloc(sizeof(struct dsp_ndbprops), GFP_KERNEL);
++ if (pnode_props != NULL) {
++ status =
++ node_get_uuid_props(hprocessor, &node_uuid, pnode_props);
++ CP_TO_USR(args->args_node_getuuidprops.node_props, pnode_props,
++ status, 1);
++ } else
++ status = -ENOMEM;
++func_cont:
++ kfree(pnode_props);
++ return status;
++}
++
++/*
++ * ======== find_strm_handle =========
++ */
++inline void find_strm_handle(struct strm_res_object **strmres,
++ void *pr_ctxt, void *hstream)
++{
++ rcu_read_lock();
++ *strmres = idr_find(((struct process_context *)pr_ctxt)->stream_id,
++ (int)hstream - 1);
++ rcu_read_unlock();
++ return;
++}
++
++/*
++ * ======== strmwrap_allocate_buffer ========
++ */
++u32 strmwrap_allocate_buffer(union trapped_args *args, void *pr_ctxt)
++{
++ int status;
++ u8 **ap_buffer = NULL;
++ u32 num_bufs = args->args_strm_allocatebuffer.num_bufs;
++ struct strm_res_object *strm_res;
++
++ find_strm_handle(&strm_res, pr_ctxt,
++ args->args_strm_allocatebuffer.hstream);
++
++ if (!strm_res)
++ return -EFAULT;
++
++ if (num_bufs > MAX_BUFS)
++ return -EINVAL;
++
++ ap_buffer = kmalloc((num_bufs * sizeof(u8 *)), GFP_KERNEL);
++ if (ap_buffer == NULL)
++ return -ENOMEM;
++
++ status = strm_allocate_buffer(strm_res,
++ args->args_strm_allocatebuffer.usize,
++ ap_buffer, num_bufs, pr_ctxt);
++ if (!status) {
++ CP_TO_USR(args->args_strm_allocatebuffer.ap_buffer, ap_buffer,
++ status, num_bufs);
++ if (status) {
++ status = -EFAULT;
++ strm_free_buffer(strm_res,
++ ap_buffer, num_bufs, pr_ctxt);
++ }
++ }
++ kfree(ap_buffer);
++
++ return status;
++}
++
++/*
++ * ======== strmwrap_close ========
++ */
++u32 strmwrap_close(union trapped_args *args, void *pr_ctxt)
++{
++ struct strm_res_object *strm_res;
++
++ find_strm_handle(&strm_res, pr_ctxt, args->args_strm_close.hstream);
++
++ if (!strm_res)
++ return -EFAULT;
++
++ return strm_close(strm_res, pr_ctxt);
++}
++
++/*
++ * ======== strmwrap_free_buffer ========
++ */
++u32 strmwrap_free_buffer(union trapped_args *args, void *pr_ctxt)
++{
++ int status = 0;
++ u8 **ap_buffer = NULL;
++ u32 num_bufs = args->args_strm_freebuffer.num_bufs;
++ struct strm_res_object *strm_res;
++
++ find_strm_handle(&strm_res, pr_ctxt,
++ args->args_strm_freebuffer.hstream);
++
++ if (!strm_res)
++ return -EFAULT;
++
++ if (num_bufs > MAX_BUFS)
++ return -EINVAL;
++
++ ap_buffer = kmalloc((num_bufs * sizeof(u8 *)), GFP_KERNEL);
++ if (ap_buffer == NULL)
++ return -ENOMEM;
++
++ CP_FM_USR(ap_buffer, args->args_strm_freebuffer.ap_buffer, status,
++ num_bufs);
++
++ if (!status)
++ status = strm_free_buffer(strm_res,
++ ap_buffer, num_bufs, pr_ctxt);
++
++ CP_TO_USR(args->args_strm_freebuffer.ap_buffer, ap_buffer, status,
++ num_bufs);
++ kfree(ap_buffer);
++
++ return status;
++}
++
++/*
++ * ======== strmwrap_get_event_handle ========
++ */
++u32 __deprecated strmwrap_get_event_handle(union trapped_args * args,
++ void *pr_ctxt)
++{
++ pr_err("%s: deprecated dspbridge ioctl\n", __func__);
++ return -ENOSYS;
++}
++
++/*
++ * ======== strmwrap_get_info ========
++ */
++u32 strmwrap_get_info(union trapped_args *args, void *pr_ctxt)
++{
++ int status = 0;
++ struct stream_info strm_info;
++ struct dsp_streaminfo user;
++ struct dsp_streaminfo *temp;
++ struct strm_res_object *strm_res;
++
++ find_strm_handle(&strm_res, pr_ctxt,
++ args->args_strm_getinfo.hstream);
++
++ if (!strm_res)
++ return -EFAULT;
++
++ CP_FM_USR(&strm_info, args->args_strm_getinfo.stream_info, status, 1);
++ temp = strm_info.user_strm;
++
++ strm_info.user_strm = &user;
++
++ if (!status) {
++ status = strm_get_info(strm_res->hstream,
++ &strm_info,
++ args->args_strm_getinfo.
++ stream_info_size);
++ }
++ CP_TO_USR(temp, strm_info.user_strm, status, 1);
++ strm_info.user_strm = temp;
++ CP_TO_USR(args->args_strm_getinfo.stream_info, &strm_info, status, 1);
++ return status;
++}
++
++/*
++ * ======== strmwrap_idle ========
++ */
++u32 strmwrap_idle(union trapped_args *args, void *pr_ctxt)
++{
++ u32 ret;
++ struct strm_res_object *strm_res;
++
++ find_strm_handle(&strm_res, pr_ctxt, args->args_strm_idle.hstream);
++
++ if (!strm_res)
++ return -EFAULT;
++
++ ret = strm_idle(strm_res->hstream, args->args_strm_idle.flush_flag);
++
++ return ret;
++}
++
++/*
++ * ======== strmwrap_issue ========
++ */
++u32 strmwrap_issue(union trapped_args *args, void *pr_ctxt)
++{
++ int status = 0;
++ struct strm_res_object *strm_res;
++
++ find_strm_handle(&strm_res, pr_ctxt, args->args_strm_issue.hstream);
++
++ if (!strm_res)
++ return -EFAULT;
++
++ if (!args->args_strm_issue.pbuffer)
++ return -EFAULT;
++
++ /* No need of doing CP_FM_USR for the user buffer (pbuffer)
++ as this is done in Bridge internal function bridge_chnl_add_io_req
++ in chnl_sm.c */
++ status = strm_issue(strm_res->hstream,
++ args->args_strm_issue.pbuffer,
++ args->args_strm_issue.dw_bytes,
++ args->args_strm_issue.dw_buf_size,
++ args->args_strm_issue.dw_arg);
++
++ return status;
++}
++
++/*
++ * ======== strmwrap_open ========
++ */
++u32 strmwrap_open(union trapped_args *args, void *pr_ctxt)
++{
++ int status = 0;
++ struct strm_attr attr;
++ struct strm_res_object *strm_res_obj;
++ struct dsp_streamattrin strm_attr_in;
++ struct node_res_object *node_res;
++ int strmid;
++
++ find_node_handle(&node_res, pr_ctxt, args->args_strm_open.hnode);
++
++ if (!node_res)
++ return -EFAULT;
++
++ CP_FM_USR(&attr, args->args_strm_open.attr_in, status, 1);
++
++ if (attr.stream_attr_in != NULL) { /* Optional argument */
++ CP_FM_USR(&strm_attr_in, attr.stream_attr_in, status, 1);
++ if (!status) {
++ attr.stream_attr_in = &strm_attr_in;
++ if (attr.stream_attr_in->strm_mode == STRMMODE_LDMA)
++ return -ENOSYS;
++ }
++
++ }
++ status = strm_open(node_res->hnode,
++ args->args_strm_open.direction,
++ args->args_strm_open.index, &attr, &strm_res_obj,
++ pr_ctxt);
++ if (!status) {
++ strmid = strm_res_obj->id + 1;
++ CP_TO_USR(args->args_strm_open.ph_stream, &strmid, status, 1);
++ }
++ return status;
++}
++
++/*
++ * ======== strmwrap_reclaim ========
++ */
++u32 strmwrap_reclaim(union trapped_args *args, void *pr_ctxt)
++{
++ int status = 0;
++ u8 *buf_ptr;
++ u32 ul_bytes;
++ u32 dw_arg;
++ u32 ul_buf_size;
++ struct strm_res_object *strm_res;
++
++ find_strm_handle(&strm_res, pr_ctxt, args->args_strm_reclaim.hstream);
++
++ if (!strm_res)
++ return -EFAULT;
++
++ status = strm_reclaim(strm_res->hstream, &buf_ptr,
++ &ul_bytes, &ul_buf_size, &dw_arg);
++ CP_TO_USR(args->args_strm_reclaim.buf_ptr, &buf_ptr, status, 1);
++ CP_TO_USR(args->args_strm_reclaim.bytes, &ul_bytes, status, 1);
++ CP_TO_USR(args->args_strm_reclaim.pdw_arg, &dw_arg, status, 1);
++
++ if (args->args_strm_reclaim.buf_size_ptr != NULL) {
++ CP_TO_USR(args->args_strm_reclaim.buf_size_ptr, &ul_buf_size,
++ status, 1);
++ }
++
++ return status;
++}
++
++/*
++ * ======== strmwrap_register_notify ========
++ */
++u32 strmwrap_register_notify(union trapped_args *args, void *pr_ctxt)
++{
++ int status = 0;
++ struct dsp_notification notification;
++ struct strm_res_object *strm_res;
++
++ find_strm_handle(&strm_res, pr_ctxt,
++ args->args_strm_registernotify.hstream);
++
++ if (!strm_res)
++ return -EFAULT;
++
++ /* Initialize the notification data structure */
++ notification.ps_name = NULL;
++ notification.handle = NULL;
++
++ status = strm_register_notify(strm_res->hstream,
++ args->args_strm_registernotify.event_mask,
++ args->args_strm_registernotify.
++ notify_type, &notification);
++ CP_TO_USR(args->args_strm_registernotify.hnotification, &notification,
++ status, 1);
++
++ return status;
++}
++
++/*
++ * ======== strmwrap_select ========
++ */
++u32 strmwrap_select(union trapped_args *args, void *pr_ctxt)
++{
++ u32 mask;
++ struct strm_object *strm_tab[MAX_STREAMS];
++ int status = 0;
++ struct strm_res_object *strm_res;
++ int *ids[MAX_STREAMS];
++ int i;
++
++ if (args->args_strm_select.strm_num > MAX_STREAMS)
++ return -EINVAL;
++
++ CP_FM_USR(ids, args->args_strm_select.stream_tab, status,
++ args->args_strm_select.strm_num);
++
++ if (status)
++ return status;
++
++ for (i = 0; i < args->args_strm_select.strm_num; i++) {
++ find_strm_handle(&strm_res, pr_ctxt, ids[i]);
++
++ if (!strm_res)
++ return -EFAULT;
++
++ strm_tab[i] = strm_res->hstream;
++ }
++
++ if (!status) {
++ status = strm_select(strm_tab, args->args_strm_select.strm_num,
++ &mask, args->args_strm_select.utimeout);
++ }
++ CP_TO_USR(args->args_strm_select.pmask, &mask, status, 1);
++ return status;
++}
++
++/* CMM */
++
++/*
++ * ======== cmmwrap_calloc_buf ========
++ */
++u32 __deprecated cmmwrap_calloc_buf(union trapped_args * args, void *pr_ctxt)
++{
++ /* This operation is done in kernel */
++ pr_err("%s: deprecated dspbridge ioctl\n", __func__);
++ return -ENOSYS;
++}
++
++/*
++ * ======== cmmwrap_free_buf ========
++ */
++u32 __deprecated cmmwrap_free_buf(union trapped_args * args, void *pr_ctxt)
++{
++ /* This operation is done in kernel */
++ pr_err("%s: deprecated dspbridge ioctl\n", __func__);
++ return -ENOSYS;
++}
++
++/*
++ * ======== cmmwrap_get_handle ========
++ */
++u32 cmmwrap_get_handle(union trapped_args *args, void *pr_ctxt)
++{
++ int status = 0;
++ struct cmm_object *hcmm_mgr;
++ void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor;
++
++ status = cmm_get_handle(hprocessor, &hcmm_mgr);
++
++ CP_TO_USR(args->args_cmm_gethandle.ph_cmm_mgr, &hcmm_mgr, status, 1);
++
++ return status;
++}
++
++/*
++ * ======== cmmwrap_get_info ========
++ */
++u32 cmmwrap_get_info(union trapped_args *args, void *pr_ctxt)
++{
++ int status = 0;
++ struct cmm_info cmm_info_obj;
++
++ status = cmm_get_info(args->args_cmm_getinfo.hcmm_mgr, &cmm_info_obj);
++
++ CP_TO_USR(args->args_cmm_getinfo.cmm_info_obj, &cmm_info_obj, status,
++ 1);
++
++ return status;
++}
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/pmgr/io.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/pmgr/io.c 2010-08-18 11:24:23.214050741 +0300
+@@ -0,0 +1,142 @@
++/*
++ * io.c
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * IO manager interface: Manages IO between CHNL and msg_ctrl.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++#include <linux/types.h>
++
++/* ----------------------------------- Host OS */
++#include <dspbridge/host_os.h>
++
++/* ----------------------------------- DSP/BIOS Bridge */
++#include <dspbridge/dbdefs.h>
++
++/* ----------------------------------- Trace & Debug */
++#include <dspbridge/dbc.h>
++
++/* ----------------------------------- OS Adaptation Layer */
++#include <dspbridge/cfg.h>
++
++/* ----------------------------------- Platform Manager */
++#include <dspbridge/dev.h>
++
++/* ----------------------------------- This */
++#include <ioobj.h>
++#include <dspbridge/iodefs.h>
++#include <dspbridge/io.h>
++
++/* ----------------------------------- Globals */
++static u32 refs;
++
++/*
++ * ======== io_create ========
++ * Purpose:
++ * Create an IO manager object, responsible for managing IO between
++ * CHNL and msg_ctrl
++ */
++int io_create(struct io_mgr **io_man, struct dev_object *hdev_obj,
++ const struct io_attrs *mgr_attrts)
++{
++ struct bridge_drv_interface *intf_fxns;
++ struct io_mgr *hio_mgr = NULL;
++ struct io_mgr_ *pio_mgr = NULL;
++ int status = 0;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(io_man != NULL);
++ DBC_REQUIRE(mgr_attrts != NULL);
++
++ *io_man = NULL;
++
++ /* A memory base of 0 implies no memory base: */
++ if ((mgr_attrts->shm_base != 0) && (mgr_attrts->usm_length == 0))
++ status = -EINVAL;
++
++ if (mgr_attrts->word_size == 0)
++ status = -EINVAL;
++
++ if (!status) {
++ dev_get_intf_fxns(hdev_obj, &intf_fxns);
++
++ /* Let Bridge channel module finish the create: */
++ status = (*intf_fxns->pfn_io_create) (&hio_mgr, hdev_obj,
++ mgr_attrts);
++
++ if (!status) {
++ pio_mgr = (struct io_mgr_ *)hio_mgr;
++ pio_mgr->intf_fxns = intf_fxns;
++ pio_mgr->hdev_obj = hdev_obj;
++
++ /* Return the new channel manager handle: */
++ *io_man = hio_mgr;
++ }
++ }
++
++ return status;
++}
++
++/*
++ * ======== io_destroy ========
++ * Purpose:
++ * Delete IO manager.
++ */
++int io_destroy(struct io_mgr *hio_mgr)
++{
++ struct bridge_drv_interface *intf_fxns;
++ struct io_mgr_ *pio_mgr = (struct io_mgr_ *)hio_mgr;
++ int status;
++
++ DBC_REQUIRE(refs > 0);
++
++ intf_fxns = pio_mgr->intf_fxns;
++
++ /* Let Bridge channel module destroy the io_mgr: */
++ status = (*intf_fxns->pfn_io_destroy) (hio_mgr);
++
++ return status;
++}
++
++/*
++ * ======== io_exit ========
++ * Purpose:
++ * Discontinue usage of the IO module.
++ */
++void io_exit(void)
++{
++ DBC_REQUIRE(refs > 0);
++
++ refs--;
++
++ DBC_ENSURE(refs >= 0);
++}
++
++/*
++ * ======== io_init ========
++ * Purpose:
++ * Initialize the IO module's private state.
++ */
++bool io_init(void)
++{
++ bool ret = true;
++
++ DBC_REQUIRE(refs >= 0);
++
++ if (ret)
++ refs++;
++
++ DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
++
++ return ret;
++}
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/pmgr/ioobj.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/pmgr/ioobj.h 2010-08-18 11:24:23.214050741 +0300
+@@ -0,0 +1,38 @@
++/*
++ * ioobj.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Structure subcomponents of channel class library IO objects which
++ * are exposed to DSP API from Bridge driver.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef IOOBJ_
++#define IOOBJ_
++
++#include <dspbridge/devdefs.h>
++#include <dspbridge/dspdefs.h>
++
++/*
++ * This struct is the first field in a io_mgr struct. Other, implementation
++ * specific fields follow this structure in memory.
++ */
++struct io_mgr_ {
++ /* These must be the first fields in a io_mgr struct: */
++ struct bridge_dev_context *hbridge_context; /* Bridge context. */
++ /* Function interface to Bridge driver. */
++ struct bridge_drv_interface *intf_fxns;
++ struct dev_object *hdev_obj; /* Device this board represents. */
++};
++
++#endif /* IOOBJ_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/pmgr/msg.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/pmgr/msg.c 2010-08-18 11:24:23.214050741 +0300
+@@ -0,0 +1,129 @@
++/*
++ * msg.c
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * DSP/BIOS Bridge msg_ctrl Module.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++#include <linux/types.h>
++
++/* ----------------------------------- Host OS */
++#include <dspbridge/host_os.h>
++
++/* ----------------------------------- DSP/BIOS Bridge */
++#include <dspbridge/dbdefs.h>
++
++/* ----------------------------------- Trace & Debug */
++#include <dspbridge/dbc.h>
++
++/* ----------------------------------- Bridge Driver */
++#include <dspbridge/dspdefs.h>
++
++/* ----------------------------------- Platform Manager */
++#include <dspbridge/dev.h>
++
++/* ----------------------------------- This */
++#include <msgobj.h>
++#include <dspbridge/msg.h>
++
++/* ----------------------------------- Globals */
++static u32 refs; /* module reference count */
++
++/*
++ * ======== msg_create ========
++ * Purpose:
++ * Create an object to manage message queues. Only one of these objects
++ * can exist per device object.
++ */
++int msg_create(struct msg_mgr **msg_man,
++ struct dev_object *hdev_obj, msg_onexit msg_callback)
++{
++ struct bridge_drv_interface *intf_fxns;
++ struct msg_mgr_ *msg_mgr_obj;
++ struct msg_mgr *hmsg_mgr;
++ int status = 0;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(msg_man != NULL);
++ DBC_REQUIRE(msg_callback != NULL);
++ DBC_REQUIRE(hdev_obj != NULL);
++
++ *msg_man = NULL;
++
++ dev_get_intf_fxns(hdev_obj, &intf_fxns);
++
++ /* Let Bridge message module finish the create: */
++ status =
++ (*intf_fxns->pfn_msg_create) (&hmsg_mgr, hdev_obj, msg_callback);
++
++ if (!status) {
++ /* Fill in DSP API message module's fields of the msg_mgr
++ * structure */
++ msg_mgr_obj = (struct msg_mgr_ *)hmsg_mgr;
++ msg_mgr_obj->intf_fxns = intf_fxns;
++
++ /* Finally, return the new message manager handle: */
++ *msg_man = hmsg_mgr;
++ } else {
++ status = -EPERM;
++ }
++ return status;
++}
++
++/*
++ * ======== msg_delete ========
++ * Purpose:
++ * Delete a msg_ctrl manager allocated in msg_create().
++ */
++void msg_delete(struct msg_mgr *hmsg_mgr)
++{
++ struct msg_mgr_ *msg_mgr_obj = (struct msg_mgr_ *)hmsg_mgr;
++ struct bridge_drv_interface *intf_fxns;
++
++ DBC_REQUIRE(refs > 0);
++
++ if (msg_mgr_obj) {
++ intf_fxns = msg_mgr_obj->intf_fxns;
++
++ /* Let Bridge message module destroy the msg_mgr: */
++ (*intf_fxns->pfn_msg_delete) (hmsg_mgr);
++ } else {
++ dev_dbg(bridge, "%s: Error hmsg_mgr handle: %p\n",
++ __func__, hmsg_mgr);
++ }
++}
++
++/*
++ * ======== msg_exit ========
++ */
++void msg_exit(void)
++{
++ DBC_REQUIRE(refs > 0);
++ refs--;
++
++ DBC_ENSURE(refs >= 0);
++}
++
++/*
++ * ======== msg_mod_init ========
++ */
++bool msg_mod_init(void)
++{
++ DBC_REQUIRE(refs >= 0);
++
++ refs++;
++
++ DBC_ENSURE(refs >= 0);
++
++ return true;
++}
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/pmgr/msgobj.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/pmgr/msgobj.h 2010-08-18 11:24:23.214050741 +0300
+@@ -0,0 +1,38 @@
++/*
++ * msgobj.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Structure subcomponents of channel class library msg_ctrl objects which
++ * are exposed to DSP API from Bridge driver.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef MSGOBJ_
++#define MSGOBJ_
++
++#include <dspbridge/dspdefs.h>
++
++#include <dspbridge/msgdefs.h>
++
++/*
++ * This struct is the first field in a msg_mgr struct. Other, implementation
++ * specific fields follow this structure in memory.
++ */
++struct msg_mgr_ {
++ /* The first field must match that in _msg_sm.h */
++
++ /* Function interface to Bridge driver. */
++ struct bridge_drv_interface *intf_fxns;
++};
++
++#endif /* MSGOBJ_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/rmgr/dbdcd.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/rmgr/dbdcd.c 2010-08-18 11:24:23.214050741 +0300
+@@ -0,0 +1,1512 @@
++/*
++ * dbdcd.c
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * This file contains the implementation of the DSP/BIOS Bridge
++ * Configuration Database (DCD).
++ *
++ * Notes:
++ * The fxn dcd_get_objects can apply a callback fxn to each DCD object
++ * that is located in a specified COFF file. At the moment,
++ * dcd_auto_register, dcd_auto_unregister, and NLDR module all use
++ * dcd_get_objects.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++#include <linux/types.h>
++
++/* ----------------------------------- Host OS */
++#include <dspbridge/host_os.h>
++
++/* ----------------------------------- DSP/BIOS Bridge */
++#include <dspbridge/dbdefs.h>
++/* ----------------------------------- Trace & Debug */
++#include <dspbridge/dbc.h>
++
++/* ----------------------------------- Platform Manager */
++#include <dspbridge/cod.h>
++
++/* ----------------------------------- Others */
++#include <dspbridge/uuidutil.h>
++
++/* ----------------------------------- This */
++#include <dspbridge/dbdcd.h>
++
++/* ----------------------------------- Global defines. */
++#define MAX_INT2CHAR_LENGTH 16 /* Max int2char len of 32 bit int */
++
++/* Name of section containing dependent libraries */
++#define DEPLIBSECT ".dspbridge_deplibs"
++
++/* DCD specific structures. */
++struct dcd_manager {
++ struct cod_manager *cod_mgr; /* Handle to COD manager object. */
++};
++
++/* Pointer to the registry support key */
++static struct list_head reg_key_list;
++static DEFINE_SPINLOCK(dbdcd_lock);
++
++/* Global reference variables. */
++static u32 refs;
++static u32 enum_refs;
++
++/* Helper function prototypes. */
++static s32 atoi(char *psz_buf);
++static int get_attrs_from_buf(char *psz_buf, u32 ul_buf_size,
++ enum dsp_dcdobjtype obj_type,
++ struct dcd_genericobj *gen_obj);
++static void compress_buf(char *psz_buf, u32 ul_buf_size, s32 char_size);
++static char dsp_char2_gpp_char(char *word, s32 dsp_char_size);
++static int get_dep_lib_info(struct dcd_manager *hdcd_mgr,
++ struct dsp_uuid *uuid_obj,
++ u16 *num_libs,
++ u16 *num_pers_libs,
++ struct dsp_uuid *dep_lib_uuids,
++ bool *prstnt_dep_libs,
++ enum nldr_phase phase);
++
++/*
++ * ======== dcd_auto_register ========
++ * Purpose:
++ * Parses the supplied image and resigsters with DCD.
++ */
++int dcd_auto_register(struct dcd_manager *hdcd_mgr,
++ char *sz_coff_path)
++{
++ int status = 0;
++
++ DBC_REQUIRE(refs > 0);
++
++ if (hdcd_mgr)
++ status = dcd_get_objects(hdcd_mgr, sz_coff_path,
++ (dcd_registerfxn) dcd_register_object,
++ (void *)sz_coff_path);
++ else
++ status = -EFAULT;
++
++ return status;
++}
++
++/*
++ * ======== dcd_auto_unregister ========
++ * Purpose:
++ * Parses the supplied DSP image and unresiters from DCD.
++ */
++int dcd_auto_unregister(struct dcd_manager *hdcd_mgr,
++ char *sz_coff_path)
++{
++ int status = 0;
++
++ DBC_REQUIRE(refs > 0);
++
++ if (hdcd_mgr)
++ status = dcd_get_objects(hdcd_mgr, sz_coff_path,
++ (dcd_registerfxn) dcd_register_object,
++ NULL);
++ else
++ status = -EFAULT;
++
++ return status;
++}
++
++/*
++ * ======== dcd_create_manager ========
++ * Purpose:
++ * Creates DCD manager.
++ */
++int dcd_create_manager(char *sz_zl_dll_name,
++ struct dcd_manager **dcd_mgr)
++{
++ struct cod_manager *cod_mgr; /* COD manager handle */
++ struct dcd_manager *dcd_mgr_obj = NULL; /* DCD Manager pointer */
++ int status = 0;
++
++ DBC_REQUIRE(refs >= 0);
++ DBC_REQUIRE(dcd_mgr);
++
++ status = cod_create(&cod_mgr, sz_zl_dll_name, NULL);
++ if (status)
++ goto func_end;
++
++ /* Create a DCD object. */
++ dcd_mgr_obj = kzalloc(sizeof(struct dcd_manager), GFP_KERNEL);
++ if (dcd_mgr_obj != NULL) {
++ /* Fill out the object. */
++ dcd_mgr_obj->cod_mgr = cod_mgr;
++
++ /* Return handle to this DCD interface. */
++ *dcd_mgr = dcd_mgr_obj;
++ } else {
++ status = -ENOMEM;
++
++ /*
++ * If allocation of DcdManager object failed, delete the
++ * COD manager.
++ */
++ cod_delete(cod_mgr);
++ }
++
++ DBC_ENSURE((!status) ||
++ ((dcd_mgr_obj == NULL) && (status == -ENOMEM)));
++
++func_end:
++ return status;
++}
++
++/*
++ * ======== dcd_destroy_manager ========
++ * Purpose:
++ * Frees DCD Manager object.
++ */
++int dcd_destroy_manager(struct dcd_manager *hdcd_mgr)
++{
++ struct dcd_manager *dcd_mgr_obj = hdcd_mgr;
++ int status = -EFAULT;
++
++ DBC_REQUIRE(refs >= 0);
++
++ if (hdcd_mgr) {
++ /* Delete the COD manager. */
++ cod_delete(dcd_mgr_obj->cod_mgr);
++
++ /* Deallocate a DCD manager object. */
++ kfree(dcd_mgr_obj);
++
++ status = 0;
++ }
++
++ return status;
++}
++
++/*
++ * ======== dcd_enumerate_object ========
++ * Purpose:
++ * Enumerates objects in the DCD.
++ */
++int dcd_enumerate_object(s32 index, enum dsp_dcdobjtype obj_type,
++ struct dsp_uuid *uuid_obj)
++{
++ int status = 0;
++ char sz_reg_key[DCD_MAXPATHLENGTH];
++ char sz_value[DCD_MAXPATHLENGTH];
++ struct dsp_uuid dsp_uuid_obj;
++ char sz_obj_type[MAX_INT2CHAR_LENGTH]; /* str. rep. of obj_type. */
++ u32 dw_key_len = 0;
++ struct dcd_key_elem *dcd_key;
++ int len;
++
++ DBC_REQUIRE(refs >= 0);
++ DBC_REQUIRE(index >= 0);
++ DBC_REQUIRE(uuid_obj != NULL);
++
++ if ((index != 0) && (enum_refs == 0)) {
++ /*
++ * If an enumeration is being performed on an index greater
++ * than zero, then the current enum_refs must have been
++ * incremented to greater than zero.
++ */
++ status = -EIDRM;
++ } else {
++ /*
++ * Pre-determine final key length. It's length of DCD_REGKEY +
++ * "_\0" + length of sz_obj_type string + terminating NULL.
++ */
++ dw_key_len = strlen(DCD_REGKEY) + 1 + sizeof(sz_obj_type) + 1;
++ DBC_ASSERT(dw_key_len < DCD_MAXPATHLENGTH);
++
++ /* Create proper REG key; concatenate DCD_REGKEY with
++ * obj_type. */
++ strncpy(sz_reg_key, DCD_REGKEY, strlen(DCD_REGKEY) + 1);
++ if ((strlen(sz_reg_key) + strlen("_\0")) <
++ DCD_MAXPATHLENGTH) {
++ strncat(sz_reg_key, "_\0", 2);
++ } else {
++ status = -EPERM;
++ }
++
++ /* This snprintf is guaranteed not to exceed max size of an
++ * integer. */
++ status = snprintf(sz_obj_type, MAX_INT2CHAR_LENGTH, "%d",
++ obj_type);
++
++ if (status == -1) {
++ status = -EPERM;
++ } else {
++ status = 0;
++ if ((strlen(sz_reg_key) + strlen(sz_obj_type)) <
++ DCD_MAXPATHLENGTH) {
++ strncat(sz_reg_key, sz_obj_type,
++ strlen(sz_obj_type) + 1);
++ } else {
++ status = -EPERM;
++ }
++ }
++
++ if (!status) {
++ len = strlen(sz_reg_key);
++ spin_lock(&dbdcd_lock);
++ list_for_each_entry(dcd_key, &reg_key_list, link) {
++ if (!strncmp(dcd_key->name, sz_reg_key, len)
++ && !index--) {
++ strncpy(sz_value, &dcd_key->name[len],
++ strlen(&dcd_key->name[len]) + 1);
++ break;
++ }
++ }
++ spin_unlock(&dbdcd_lock);
++
++ if (&dcd_key->link == &reg_key_list)
++ status = -ENODATA;
++ }
++
++ if (!status) {
++ /* Create UUID value using string retrieved from
++ * registry. */
++ uuid_uuid_from_string(sz_value, &dsp_uuid_obj);
++
++ *uuid_obj = dsp_uuid_obj;
++
++ /* Increment enum_refs to update reference count. */
++ enum_refs++;
++
++ status = 0;
++ } else if (status == -ENODATA) {
++ /* At the end of enumeration. Reset enum_refs. */
++ enum_refs = 0;
++
++ /*
++ * TODO: Revisit, this is not an errror case but code
++ * expects non-zero value.
++ */
++ status = ENODATA;
++ } else {
++ status = -EPERM;
++ }
++ }
++
++ DBC_ENSURE(uuid_obj || (status == -EPERM));
++
++ return status;
++}
++
++/*
++ * ======== dcd_exit ========
++ * Purpose:
++ * Discontinue usage of the DCD module.
++ */
++void dcd_exit(void)
++{
++ struct dcd_key_elem *rv, *rv_tmp;
++ DBC_REQUIRE(refs > 0);
++
++ refs--;
++ if (refs == 0) {
++ cod_exit();
++ list_for_each_entry_safe(rv, rv_tmp, &reg_key_list, link) {
++ list_del(&rv->link);
++ kfree(rv->path);
++ kfree(rv);
++ }
++ }
++
++ DBC_ENSURE(refs >= 0);
++}
++
++/*
++ * ======== dcd_get_dep_libs ========
++ */
++int dcd_get_dep_libs(struct dcd_manager *hdcd_mgr,
++ struct dsp_uuid *uuid_obj,
++ u16 num_libs, struct dsp_uuid *dep_lib_uuids,
++ bool *prstnt_dep_libs,
++ enum nldr_phase phase)
++{
++ int status = 0;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(hdcd_mgr);
++ DBC_REQUIRE(uuid_obj != NULL);
++ DBC_REQUIRE(dep_lib_uuids != NULL);
++ DBC_REQUIRE(prstnt_dep_libs != NULL);
++
++ status =
++ get_dep_lib_info(hdcd_mgr, uuid_obj, &num_libs, NULL, dep_lib_uuids,
++ prstnt_dep_libs, phase);
++
++ return status;
++}
++
++/*
++ * ======== dcd_get_num_dep_libs ========
++ */
++int dcd_get_num_dep_libs(struct dcd_manager *hdcd_mgr,
++ struct dsp_uuid *uuid_obj,
++ u16 *num_libs, u16 *num_pers_libs,
++ enum nldr_phase phase)
++{
++ int status = 0;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(hdcd_mgr);
++ DBC_REQUIRE(num_libs != NULL);
++ DBC_REQUIRE(num_pers_libs != NULL);
++ DBC_REQUIRE(uuid_obj != NULL);
++
++ status = get_dep_lib_info(hdcd_mgr, uuid_obj, num_libs, num_pers_libs,
++ NULL, NULL, phase);
++
++ return status;
++}
++
++/*
++ * ======== dcd_get_object_def ========
++ * Purpose:
++ * Retrieves the properties of a node or processor based on the UUID and
++ * object type.
++ */
++int dcd_get_object_def(struct dcd_manager *hdcd_mgr,
++ struct dsp_uuid *obj_uuid,
++ enum dsp_dcdobjtype obj_type,
++ struct dcd_genericobj *obj_def)
++{
++ struct dcd_manager *dcd_mgr_obj = hdcd_mgr; /* ptr to DCD mgr */
++ struct cod_libraryobj *lib = NULL;
++ int status = 0;
++ u32 ul_addr = 0; /* Used by cod_get_section */
++ u32 ul_len = 0; /* Used by cod_get_section */
++ u32 dw_buf_size; /* Used by REG functions */
++ char sz_reg_key[DCD_MAXPATHLENGTH];
++ char *sz_uuid; /*[MAXUUIDLEN]; */
++ struct dcd_key_elem *dcd_key = NULL;
++ char sz_sect_name[MAXUUIDLEN + 2]; /* ".[UUID]\0" */
++ char *psz_coff_buf;
++ u32 dw_key_len; /* Len of REG key. */
++ char sz_obj_type[MAX_INT2CHAR_LENGTH]; /* str. rep. of obj_type. */
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(obj_def != NULL);
++ DBC_REQUIRE(obj_uuid != NULL);
++
++ sz_uuid = kzalloc(MAXUUIDLEN, GFP_KERNEL);
++ if (!sz_uuid) {
++ status = -ENOMEM;
++ goto func_end;
++ }
++
++ if (!hdcd_mgr) {
++ status = -EFAULT;
++ goto func_end;
++ }
++
++ /* Pre-determine final key length. It's length of DCD_REGKEY +
++ * "_\0" + length of sz_obj_type string + terminating NULL */
++ dw_key_len = strlen(DCD_REGKEY) + 1 + sizeof(sz_obj_type) + 1;
++ DBC_ASSERT(dw_key_len < DCD_MAXPATHLENGTH);
++
++ /* Create proper REG key; concatenate DCD_REGKEY with obj_type. */
++ strncpy(sz_reg_key, DCD_REGKEY, strlen(DCD_REGKEY) + 1);
++
++ if ((strlen(sz_reg_key) + strlen("_\0")) < DCD_MAXPATHLENGTH)
++ strncat(sz_reg_key, "_\0", 2);
++ else
++ status = -EPERM;
++
++ status = snprintf(sz_obj_type, MAX_INT2CHAR_LENGTH, "%d", obj_type);
++ if (status == -1) {
++ status = -EPERM;
++ } else {
++ status = 0;
++
++ if ((strlen(sz_reg_key) + strlen(sz_obj_type)) <
++ DCD_MAXPATHLENGTH) {
++ strncat(sz_reg_key, sz_obj_type,
++ strlen(sz_obj_type) + 1);
++ } else {
++ status = -EPERM;
++ }
++
++ /* Create UUID value to set in registry. */
++ uuid_uuid_to_string(obj_uuid, sz_uuid, MAXUUIDLEN);
++
++ if ((strlen(sz_reg_key) + MAXUUIDLEN) < DCD_MAXPATHLENGTH)
++ strncat(sz_reg_key, sz_uuid, MAXUUIDLEN);
++ else
++ status = -EPERM;
++
++ /* Retrieve paths from the registry based on struct dsp_uuid */
++ dw_buf_size = DCD_MAXPATHLENGTH;
++ }
++ if (!status) {
++ spin_lock(&dbdcd_lock);
++ list_for_each_entry(dcd_key, &reg_key_list, link) {
++ if (!strncmp(dcd_key->name, sz_reg_key,
++ strlen(sz_reg_key) + 1))
++ break;
++ }
++ spin_unlock(&dbdcd_lock);
++ if (&dcd_key->link == &reg_key_list) {
++ status = -ENOKEY;
++ goto func_end;
++ }
++ }
++
++
++ /* Open COFF file. */
++ status = cod_open(dcd_mgr_obj->cod_mgr, dcd_key->path,
++ COD_NOLOAD, &lib);
++ if (status) {
++ status = -EACCES;
++ goto func_end;
++ }
++
++ /* Ensure sz_uuid + 1 is not greater than sizeof sz_sect_name. */
++ DBC_ASSERT((strlen(sz_uuid) + 1) < sizeof(sz_sect_name));
++
++ /* Create section name based on node UUID. A period is
++ * pre-pended to the UUID string to form the section name.
++ * I.e. ".24BC8D90_BB45_11d4_B756_006008BDB66F" */
++ strncpy(sz_sect_name, ".", 2);
++ strncat(sz_sect_name, sz_uuid, strlen(sz_uuid));
++
++ /* Get section information. */
++ status = cod_get_section(lib, sz_sect_name, &ul_addr, &ul_len);
++ if (status) {
++ status = -EACCES;
++ goto func_end;
++ }
++
++ /* Allocate zeroed buffer. */
++ psz_coff_buf = kzalloc(ul_len + 4, GFP_KERNEL);
++#ifdef _DB_TIOMAP
++ if (strstr(dcd_key->path, "iva") == NULL) {
++ /* Locate section by objectID and read its content. */
++ status =
++ cod_read_section(lib, sz_sect_name, psz_coff_buf, ul_len);
++ } else {
++ status =
++ cod_read_section(lib, sz_sect_name, psz_coff_buf, ul_len);
++ dev_dbg(bridge, "%s: Skipped Byte swap for IVA!!\n", __func__);
++ }
++#else
++ status = cod_read_section(lib, sz_sect_name, psz_coff_buf, ul_len);
++#endif
++ if (!status) {
++ /* Compres DSP buffer to conform to PC format. */
++ if (strstr(dcd_key->path, "iva") == NULL) {
++ compress_buf(psz_coff_buf, ul_len, DSPWORDSIZE);
++ } else {
++ compress_buf(psz_coff_buf, ul_len, 1);
++ dev_dbg(bridge, "%s: Compressing IVA COFF buffer by 1 "
++ "for IVA!!\n", __func__);
++ }
++
++ /* Parse the content of the COFF buffer. */
++ status =
++ get_attrs_from_buf(psz_coff_buf, ul_len, obj_type, obj_def);
++ if (status)
++ status = -EACCES;
++ } else {
++ status = -EACCES;
++ }
++
++ /* Free the previously allocated dynamic buffer. */
++ kfree(psz_coff_buf);
++func_end:
++ if (lib)
++ cod_close(lib);
++
++ kfree(sz_uuid);
++
++ return status;
++}
++
++/*
++ * ======== dcd_get_objects ========
++ */
++int dcd_get_objects(struct dcd_manager *hdcd_mgr,
++ char *sz_coff_path, dcd_registerfxn register_fxn,
++ void *handle)
++{
++ struct dcd_manager *dcd_mgr_obj = hdcd_mgr;
++ int status = 0;
++ char *psz_coff_buf;
++ char *psz_cur;
++ struct cod_libraryobj *lib = NULL;
++ u32 ul_addr = 0; /* Used by cod_get_section */
++ u32 ul_len = 0; /* Used by cod_get_section */
++ char seps[] = ":, ";
++ char *token = NULL;
++ struct dsp_uuid dsp_uuid_obj;
++ s32 object_type;
++
++ DBC_REQUIRE(refs > 0);
++ if (!hdcd_mgr) {
++ status = -EFAULT;
++ goto func_end;
++ }
++
++ /* Open DSP coff file, don't load symbols. */
++ status = cod_open(dcd_mgr_obj->cod_mgr, sz_coff_path, COD_NOLOAD, &lib);
++ if (status) {
++ status = -EACCES;
++ goto func_cont;
++ }
++
++ /* Get DCD_RESIGER_SECTION section information. */
++ status = cod_get_section(lib, DCD_REGISTER_SECTION, &ul_addr, &ul_len);
++ if (status || !(ul_len > 0)) {
++ status = -EACCES;
++ goto func_cont;
++ }
++
++ /* Allocate zeroed buffer. */
++ psz_coff_buf = kzalloc(ul_len + 4, GFP_KERNEL);
++#ifdef _DB_TIOMAP
++ if (strstr(sz_coff_path, "iva") == NULL) {
++ /* Locate section by objectID and read its content. */
++ status = cod_read_section(lib, DCD_REGISTER_SECTION,
++ psz_coff_buf, ul_len);
++ } else {
++ dev_dbg(bridge, "%s: Skipped Byte swap for IVA!!\n", __func__);
++ status = cod_read_section(lib, DCD_REGISTER_SECTION,
++ psz_coff_buf, ul_len);
++ }
++#else
++ status =
++ cod_read_section(lib, DCD_REGISTER_SECTION, psz_coff_buf, ul_len);
++#endif
++ if (!status) {
++ /* Compress DSP buffer to conform to PC format. */
++ if (strstr(sz_coff_path, "iva") == NULL) {
++ compress_buf(psz_coff_buf, ul_len, DSPWORDSIZE);
++ } else {
++ compress_buf(psz_coff_buf, ul_len, 1);
++ dev_dbg(bridge, "%s: Compress COFF buffer with 1 word "
++ "for IVA!!\n", __func__);
++ }
++
++ /* Read from buffer and register object in buffer. */
++ psz_cur = psz_coff_buf;
++ while ((token = strsep(&psz_cur, seps)) && *token != '\0') {
++ /* Retrieve UUID string. */
++ uuid_uuid_from_string(token, &dsp_uuid_obj);
++
++ /* Retrieve object type */
++ token = strsep(&psz_cur, seps);
++
++ /* Retrieve object type */
++ object_type = atoi(token);
++
++ /*
++ * Apply register_fxn to the found DCD object.
++ * Possible actions include:
++ *
++ * 1) Register found DCD object.
++ * 2) Unregister found DCD object (when handle == NULL)
++ * 3) Add overlay node.
++ */
++ status =
++ register_fxn(&dsp_uuid_obj, object_type, handle);
++ if (status) {
++ /* if error occurs, break from while loop. */
++ break;
++ }
++ }
++ } else {
++ status = -EACCES;
++ }
++
++ /* Free the previously allocated dynamic buffer. */
++ kfree(psz_coff_buf);
++func_cont:
++ if (lib)
++ cod_close(lib);
++
++func_end:
++ return status;
++}
++
++/*
++ * ======== dcd_get_library_name ========
++ * Purpose:
++ * Retrieves the library name for the given UUID.
++ *
++ */
++int dcd_get_library_name(struct dcd_manager *hdcd_mgr,
++ struct dsp_uuid *uuid_obj,
++ char *str_lib_name,
++ u32 *buff_size,
++ enum nldr_phase phase, bool *phase_split)
++{
++ char sz_reg_key[DCD_MAXPATHLENGTH];
++ char sz_uuid[MAXUUIDLEN];
++ u32 dw_key_len; /* Len of REG key. */
++ char sz_obj_type[MAX_INT2CHAR_LENGTH]; /* str. rep. of obj_type. */
++ int status = 0;
++ struct dcd_key_elem *dcd_key = NULL;
++
++ DBC_REQUIRE(uuid_obj != NULL);
++ DBC_REQUIRE(str_lib_name != NULL);
++ DBC_REQUIRE(buff_size != NULL);
++ DBC_REQUIRE(hdcd_mgr);
++
++ dev_dbg(bridge, "%s: hdcd_mgr %p, uuid_obj %p, str_lib_name %p,"
++ " buff_size %p\n", __func__, hdcd_mgr, uuid_obj, str_lib_name,
++ buff_size);
++
++ /*
++ * Pre-determine final key length. It's length of DCD_REGKEY +
++ * "_\0" + length of sz_obj_type string + terminating NULL.
++ */
++ dw_key_len = strlen(DCD_REGKEY) + 1 + sizeof(sz_obj_type) + 1;
++ DBC_ASSERT(dw_key_len < DCD_MAXPATHLENGTH);
++
++ /* Create proper REG key; concatenate DCD_REGKEY with obj_type. */
++ strncpy(sz_reg_key, DCD_REGKEY, strlen(DCD_REGKEY) + 1);
++ if ((strlen(sz_reg_key) + strlen("_\0")) < DCD_MAXPATHLENGTH)
++ strncat(sz_reg_key, "_\0", 2);
++ else
++ status = -EPERM;
++
++ switch (phase) {
++ case NLDR_CREATE:
++ /* create phase type */
++ sprintf(sz_obj_type, "%d", DSP_DCDCREATELIBTYPE);
++ break;
++ case NLDR_EXECUTE:
++ /* execute phase type */
++ sprintf(sz_obj_type, "%d", DSP_DCDEXECUTELIBTYPE);
++ break;
++ case NLDR_DELETE:
++ /* delete phase type */
++ sprintf(sz_obj_type, "%d", DSP_DCDDELETELIBTYPE);
++ break;
++ case NLDR_NOPHASE:
++ /* known to be a dependent library */
++ sprintf(sz_obj_type, "%d", DSP_DCDLIBRARYTYPE);
++ break;
++ default:
++ status = -EINVAL;
++ DBC_ASSERT(false);
++ }
++ if (!status) {
++ if ((strlen(sz_reg_key) + strlen(sz_obj_type)) <
++ DCD_MAXPATHLENGTH) {
++ strncat(sz_reg_key, sz_obj_type,
++ strlen(sz_obj_type) + 1);
++ } else {
++ status = -EPERM;
++ }
++ /* Create UUID value to find match in registry. */
++ uuid_uuid_to_string(uuid_obj, sz_uuid, MAXUUIDLEN);
++ if ((strlen(sz_reg_key) + MAXUUIDLEN) < DCD_MAXPATHLENGTH)
++ strncat(sz_reg_key, sz_uuid, MAXUUIDLEN);
++ else
++ status = -EPERM;
++ }
++ if (!status) {
++ spin_lock(&dbdcd_lock);
++ list_for_each_entry(dcd_key, &reg_key_list, link) {
++ /* See if the name matches. */
++ if (!strncmp(dcd_key->name, sz_reg_key,
++ strlen(sz_reg_key) + 1))
++ break;
++ }
++ spin_unlock(&dbdcd_lock);
++ }
++
++ if (&dcd_key->link == &reg_key_list)
++ status = -ENOKEY;
++
++ /* If can't find, phases might be registered as generic LIBRARYTYPE */
++ if (status && phase != NLDR_NOPHASE) {
++ if (phase_split)
++ *phase_split = false;
++
++ strncpy(sz_reg_key, DCD_REGKEY, strlen(DCD_REGKEY) + 1);
++ if ((strlen(sz_reg_key) + strlen("_\0")) <
++ DCD_MAXPATHLENGTH) {
++ strncat(sz_reg_key, "_\0", 2);
++ } else {
++ status = -EPERM;
++ }
++ sprintf(sz_obj_type, "%d", DSP_DCDLIBRARYTYPE);
++ if ((strlen(sz_reg_key) + strlen(sz_obj_type))
++ < DCD_MAXPATHLENGTH) {
++ strncat(sz_reg_key, sz_obj_type,
++ strlen(sz_obj_type) + 1);
++ } else {
++ status = -EPERM;
++ }
++ uuid_uuid_to_string(uuid_obj, sz_uuid, MAXUUIDLEN);
++ if ((strlen(sz_reg_key) + MAXUUIDLEN) < DCD_MAXPATHLENGTH)
++ strncat(sz_reg_key, sz_uuid, MAXUUIDLEN);
++ else
++ status = -EPERM;
++
++ spin_lock(&dbdcd_lock);
++ list_for_each_entry(dcd_key, &reg_key_list, link) {
++ /* See if the name matches. */
++ if (!strncmp(dcd_key->name, sz_reg_key,
++ strlen(sz_reg_key) + 1))
++ break;
++ }
++ spin_unlock(&dbdcd_lock);
++
++ status = (&dcd_key->link != &reg_key_list) ?
++ 0 : -ENOKEY;
++ }
++
++ if (!status)
++ memcpy(str_lib_name, dcd_key->path, strlen(dcd_key->path) + 1);
++ return status;
++}
++
++/*
++ * ======== dcd_init ========
++ * Purpose:
++ * Initialize the DCD module.
++ */
++bool dcd_init(void)
++{
++ bool init_cod;
++ bool ret = true;
++
++ DBC_REQUIRE(refs >= 0);
++
++ if (refs == 0) {
++ /* Initialize required modules. */
++ init_cod = cod_init();
++
++ if (!init_cod) {
++ ret = false;
++ /* Exit initialized modules. */
++ if (init_cod)
++ cod_exit();
++ }
++
++ INIT_LIST_HEAD(&reg_key_list);
++ }
++
++ if (ret)
++ refs++;
++
++ DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs == 0)));
++
++ return ret;
++}
++
++/*
++ * ======== dcd_register_object ========
++ * Purpose:
++ * Registers a node or a processor with the DCD.
++ * If psz_path_name == NULL, unregister the specified DCD object.
++ */
++int dcd_register_object(struct dsp_uuid *uuid_obj,
++ enum dsp_dcdobjtype obj_type,
++ char *psz_path_name)
++{
++ int status = 0;
++ char sz_reg_key[DCD_MAXPATHLENGTH];
++ char sz_uuid[MAXUUIDLEN + 1];
++ u32 dw_path_size = 0;
++ u32 dw_key_len; /* Len of REG key. */
++ char sz_obj_type[MAX_INT2CHAR_LENGTH]; /* str. rep. of obj_type. */
++ struct dcd_key_elem *dcd_key = NULL;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(uuid_obj != NULL);
++ DBC_REQUIRE((obj_type == DSP_DCDNODETYPE) ||
++ (obj_type == DSP_DCDPROCESSORTYPE) ||
++ (obj_type == DSP_DCDLIBRARYTYPE) ||
++ (obj_type == DSP_DCDCREATELIBTYPE) ||
++ (obj_type == DSP_DCDEXECUTELIBTYPE) ||
++ (obj_type == DSP_DCDDELETELIBTYPE));
++
++ dev_dbg(bridge, "%s: object UUID %p, obj_type %d, szPathName %s\n",
++ __func__, uuid_obj, obj_type, psz_path_name);
++
++ /*
++ * Pre-determine final key length. It's length of DCD_REGKEY +
++ * "_\0" + length of sz_obj_type string + terminating NULL.
++ */
++ dw_key_len = strlen(DCD_REGKEY) + 1 + sizeof(sz_obj_type) + 1;
++ DBC_ASSERT(dw_key_len < DCD_MAXPATHLENGTH);
++
++ /* Create proper REG key; concatenate DCD_REGKEY with obj_type. */
++ strncpy(sz_reg_key, DCD_REGKEY, strlen(DCD_REGKEY) + 1);
++ if ((strlen(sz_reg_key) + strlen("_\0")) < DCD_MAXPATHLENGTH)
++ strncat(sz_reg_key, "_\0", 2);
++ else {
++ status = -EPERM;
++ goto func_end;
++ }
++
++ status = snprintf(sz_obj_type, MAX_INT2CHAR_LENGTH, "%d", obj_type);
++ if (status == -1) {
++ status = -EPERM;
++ } else {
++ status = 0;
++ if ((strlen(sz_reg_key) + strlen(sz_obj_type)) <
++ DCD_MAXPATHLENGTH) {
++ strncat(sz_reg_key, sz_obj_type,
++ strlen(sz_obj_type) + 1);
++ } else
++ status = -EPERM;
++
++ /* Create UUID value to set in registry. */
++ uuid_uuid_to_string(uuid_obj, sz_uuid, MAXUUIDLEN);
++ if ((strlen(sz_reg_key) + MAXUUIDLEN) < DCD_MAXPATHLENGTH)
++ strncat(sz_reg_key, sz_uuid, MAXUUIDLEN);
++ else
++ status = -EPERM;
++ }
++
++ if (status)
++ goto func_end;
++
++ /*
++ * If psz_path_name != NULL, perform registration, otherwise,
++ * perform unregistration.
++ */
++
++ if (psz_path_name) {
++ dw_path_size = strlen(psz_path_name) + 1;
++ spin_lock(&dbdcd_lock);
++ list_for_each_entry(dcd_key, &reg_key_list, link) {
++ /* See if the name matches. */
++ if (!strncmp(dcd_key->name, sz_reg_key,
++ strlen(sz_reg_key) + 1))
++ break;
++ }
++ spin_unlock(&dbdcd_lock);
++ if (&dcd_key->link == &reg_key_list) {
++ /*
++ * Add new reg value (UUID+obj_type)
++ * with COFF path info
++ */
++
++ dcd_key = kmalloc(sizeof(struct dcd_key_elem),
++ GFP_KERNEL);
++ if (!dcd_key) {
++ status = -ENOMEM;
++ goto func_end;
++ }
++
++ dcd_key->path = kmalloc(strlen(sz_reg_key) + 1,
++ GFP_KERNEL);
++
++ if (!dcd_key->path) {
++ kfree(dcd_key);
++ status = -ENOMEM;
++ goto func_end;
++ }
++
++ strncpy(dcd_key->name, sz_reg_key,
++ strlen(sz_reg_key) + 1);
++ strncpy(dcd_key->path, psz_path_name ,
++ dw_path_size);
++ spin_lock(&dbdcd_lock);
++ list_add_tail(&dcd_key->link, &reg_key_list);
++ spin_unlock(&dbdcd_lock);
++ } else {
++ /* Make sure the new data is the same. */
++ if (strncmp(dcd_key->path, psz_path_name,
++ dw_path_size)) {
++ /* The caller needs a different data size! */
++ kfree(dcd_key->path);
++ dcd_key->path = kmalloc(dw_path_size,
++ GFP_KERNEL);
++ if (dcd_key->path == NULL) {
++ status = -ENOMEM;
++ goto func_end;
++ }
++ }
++
++ /* We have a match! Copy out the data. */
++ memcpy(dcd_key->path, psz_path_name, dw_path_size);
++ }
++ dev_dbg(bridge, "%s: psz_path_name=%s, dw_path_size=%d\n",
++ __func__, psz_path_name, dw_path_size);
++ } else {
++ /* Deregister an existing object */
++ spin_lock(&dbdcd_lock);
++ list_for_each_entry(dcd_key, &reg_key_list, link) {
++ if (!strncmp(dcd_key->name, sz_reg_key,
++ strlen(sz_reg_key) + 1)) {
++ list_del(&dcd_key->link);
++ kfree(dcd_key->path);
++ kfree(dcd_key);
++ break;
++ }
++ }
++ spin_unlock(&dbdcd_lock);
++ if (&dcd_key->link == &reg_key_list)
++ status = -EPERM;
++ }
++
++ if (!status) {
++ /*
++ * Because the node database has been updated through a
++ * successful object registration/de-registration operation,
++ * we need to reset the object enumeration counter to allow
++ * current enumerations to reflect this update in the node
++ * database.
++ */
++ enum_refs = 0;
++ }
++func_end:
++ return status;
++}
++
++/*
++ * ======== dcd_unregister_object ========
++ * Call DCD_Register object with psz_path_name set to NULL to
++ * perform actual object de-registration.
++ */
++int dcd_unregister_object(struct dsp_uuid *uuid_obj,
++ enum dsp_dcdobjtype obj_type)
++{
++ int status = 0;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(uuid_obj != NULL);
++ DBC_REQUIRE((obj_type == DSP_DCDNODETYPE) ||
++ (obj_type == DSP_DCDPROCESSORTYPE) ||
++ (obj_type == DSP_DCDLIBRARYTYPE) ||
++ (obj_type == DSP_DCDCREATELIBTYPE) ||
++ (obj_type == DSP_DCDEXECUTELIBTYPE) ||
++ (obj_type == DSP_DCDDELETELIBTYPE));
++
++ /*
++ * When dcd_register_object is called with NULL as pathname,
++ * it indicates an unregister object operation.
++ */
++ status = dcd_register_object(uuid_obj, obj_type, NULL);
++
++ return status;
++}
++
++/*
++ **********************************************************************
++ * DCD Helper Functions
++ **********************************************************************
++ */
++
++/*
++ * ======== atoi ========
++ * Purpose:
++ * This function converts strings in decimal or hex format to integers.
++ */
++static s32 atoi(char *psz_buf)
++{
++ char *pch = psz_buf;
++ s32 base = 0;
++ unsigned long res;
++ int ret_val;
++
++ while (isspace(*pch))
++ pch++;
++
++ if (*pch == '-' || *pch == '+') {
++ base = 10;
++ pch++;
++ } else if (*pch && tolower(pch[strlen(pch) - 1]) == 'h') {
++ base = 16;
++ }
++
++ ret_val = strict_strtoul(pch, base, &res);
++
++ return ret_val ? : res;
++}
++
++/*
++ * ======== get_attrs_from_buf ========
++ * Purpose:
++ * Parse the content of a buffer filled with DSP-side data and
++ * retrieve an object's attributes from it. IMPORTANT: Assume the
++ * buffer has been converted from DSP format to GPP format.
++ */
++static int get_attrs_from_buf(char *psz_buf, u32 ul_buf_size,
++ enum dsp_dcdobjtype obj_type,
++ struct dcd_genericobj *gen_obj)
++{
++ int status = 0;
++ char seps[] = ", ";
++ char *psz_cur;
++ char *token;
++ s32 token_len = 0;
++ u32 i = 0;
++#ifdef _DB_TIOMAP
++ s32 entry_id;
++#endif
++
++ DBC_REQUIRE(psz_buf != NULL);
++ DBC_REQUIRE(ul_buf_size != 0);
++ DBC_REQUIRE((obj_type == DSP_DCDNODETYPE)
++ || (obj_type == DSP_DCDPROCESSORTYPE));
++ DBC_REQUIRE(gen_obj != NULL);
++
++ switch (obj_type) {
++ case DSP_DCDNODETYPE:
++ /*
++ * Parse COFF sect buffer to retrieve individual tokens used
++ * to fill in object attrs.
++ */
++ psz_cur = psz_buf;
++ token = strsep(&psz_cur, seps);
++
++ /* u32 cb_struct */
++ gen_obj->obj_data.node_obj.ndb_props.cb_struct =
++ (u32) atoi(token);
++ token = strsep(&psz_cur, seps);
++
++ /* dsp_uuid ui_node_id */
++ uuid_uuid_from_string(token,
++ &gen_obj->obj_data.node_obj.ndb_props.
++ ui_node_id);
++ token = strsep(&psz_cur, seps);
++
++ /* ac_name */
++ DBC_REQUIRE(token);
++ token_len = strlen(token);
++ if (token_len > DSP_MAXNAMELEN - 1)
++ token_len = DSP_MAXNAMELEN - 1;
++
++ strncpy(gen_obj->obj_data.node_obj.ndb_props.ac_name,
++ token, token_len);
++ gen_obj->obj_data.node_obj.ndb_props.ac_name[token_len] = '\0';
++ token = strsep(&psz_cur, seps);
++ /* u32 ntype */
++ gen_obj->obj_data.node_obj.ndb_props.ntype = atoi(token);
++ token = strsep(&psz_cur, seps);
++ /* u32 cache_on_gpp */
++ gen_obj->obj_data.node_obj.ndb_props.cache_on_gpp = atoi(token);
++ token = strsep(&psz_cur, seps);
++ /* dsp_resourcereqmts dsp_resource_reqmts */
++ gen_obj->obj_data.node_obj.ndb_props.dsp_resource_reqmts.
++ cb_struct = (u32) atoi(token);
++ token = strsep(&psz_cur, seps);
++
++ gen_obj->obj_data.node_obj.ndb_props.
++ dsp_resource_reqmts.static_data_size = atoi(token);
++ token = strsep(&psz_cur, seps);
++ gen_obj->obj_data.node_obj.ndb_props.
++ dsp_resource_reqmts.global_data_size = atoi(token);
++ token = strsep(&psz_cur, seps);
++ gen_obj->obj_data.node_obj.ndb_props.
++ dsp_resource_reqmts.program_mem_size = atoi(token);
++ token = strsep(&psz_cur, seps);
++ gen_obj->obj_data.node_obj.ndb_props.
++ dsp_resource_reqmts.uwc_execution_time = atoi(token);
++ token = strsep(&psz_cur, seps);
++ gen_obj->obj_data.node_obj.ndb_props.
++ dsp_resource_reqmts.uwc_period = atoi(token);
++ token = strsep(&psz_cur, seps);
++
++ gen_obj->obj_data.node_obj.ndb_props.
++ dsp_resource_reqmts.uwc_deadline = atoi(token);
++ token = strsep(&psz_cur, seps);
++
++ gen_obj->obj_data.node_obj.ndb_props.
++ dsp_resource_reqmts.avg_exection_time = atoi(token);
++ token = strsep(&psz_cur, seps);
++
++ gen_obj->obj_data.node_obj.ndb_props.
++ dsp_resource_reqmts.minimum_period = atoi(token);
++ token = strsep(&psz_cur, seps);
++
++ /* s32 prio */
++ gen_obj->obj_data.node_obj.ndb_props.prio = atoi(token);
++ token = strsep(&psz_cur, seps);
++
++ /* u32 stack_size */
++ gen_obj->obj_data.node_obj.ndb_props.stack_size = atoi(token);
++ token = strsep(&psz_cur, seps);
++
++ /* u32 sys_stack_size */
++ gen_obj->obj_data.node_obj.ndb_props.sys_stack_size =
++ atoi(token);
++ token = strsep(&psz_cur, seps);
++
++ /* u32 stack_seg */
++ gen_obj->obj_data.node_obj.ndb_props.stack_seg = atoi(token);
++ token = strsep(&psz_cur, seps);
++
++ /* u32 message_depth */
++ gen_obj->obj_data.node_obj.ndb_props.message_depth =
++ atoi(token);
++ token = strsep(&psz_cur, seps);
++
++ /* u32 num_input_streams */
++ gen_obj->obj_data.node_obj.ndb_props.num_input_streams =
++ atoi(token);
++ token = strsep(&psz_cur, seps);
++
++ /* u32 num_output_streams */
++ gen_obj->obj_data.node_obj.ndb_props.num_output_streams =
++ atoi(token);
++ token = strsep(&psz_cur, seps);
++
++ /* u32 utimeout */
++ gen_obj->obj_data.node_obj.ndb_props.utimeout = atoi(token);
++ token = strsep(&psz_cur, seps);
++
++ /* char *pstr_create_phase_fxn */
++ DBC_REQUIRE(token);
++ token_len = strlen(token);
++ gen_obj->obj_data.node_obj.pstr_create_phase_fxn =
++ kzalloc(token_len + 1, GFP_KERNEL);
++ strncpy(gen_obj->obj_data.node_obj.pstr_create_phase_fxn,
++ token, token_len);
++ gen_obj->obj_data.node_obj.pstr_create_phase_fxn[token_len] =
++ '\0';
++ token = strsep(&psz_cur, seps);
++
++ /* char *pstr_execute_phase_fxn */
++ DBC_REQUIRE(token);
++ token_len = strlen(token);
++ gen_obj->obj_data.node_obj.pstr_execute_phase_fxn =
++ kzalloc(token_len + 1, GFP_KERNEL);
++ strncpy(gen_obj->obj_data.node_obj.pstr_execute_phase_fxn,
++ token, token_len);
++ gen_obj->obj_data.node_obj.pstr_execute_phase_fxn[token_len] =
++ '\0';
++ token = strsep(&psz_cur, seps);
++
++ /* char *pstr_delete_phase_fxn */
++ DBC_REQUIRE(token);
++ token_len = strlen(token);
++ gen_obj->obj_data.node_obj.pstr_delete_phase_fxn =
++ kzalloc(token_len + 1, GFP_KERNEL);
++ strncpy(gen_obj->obj_data.node_obj.pstr_delete_phase_fxn,
++ token, token_len);
++ gen_obj->obj_data.node_obj.pstr_delete_phase_fxn[token_len] =
++ '\0';
++ token = strsep(&psz_cur, seps);
++
++ /* Segment id for message buffers */
++ gen_obj->obj_data.node_obj.msg_segid = atoi(token);
++ token = strsep(&psz_cur, seps);
++
++ /* Message notification type */
++ gen_obj->obj_data.node_obj.msg_notify_type = atoi(token);
++ token = strsep(&psz_cur, seps);
++
++ /* char *pstr_i_alg_name */
++ if (token) {
++ token_len = strlen(token);
++ gen_obj->obj_data.node_obj.pstr_i_alg_name =
++ kzalloc(token_len + 1, GFP_KERNEL);
++ strncpy(gen_obj->obj_data.node_obj.pstr_i_alg_name,
++ token, token_len);
++ gen_obj->obj_data.node_obj.pstr_i_alg_name[token_len] =
++ '\0';
++ token = strsep(&psz_cur, seps);
++ }
++
++ /* Load type (static, dynamic, or overlay) */
++ if (token) {
++ gen_obj->obj_data.node_obj.us_load_type = atoi(token);
++ token = strsep(&psz_cur, seps);
++ }
++
++ /* Dynamic load data requirements */
++ if (token) {
++ gen_obj->obj_data.node_obj.ul_data_mem_seg_mask =
++ atoi(token);
++ token = strsep(&psz_cur, seps);
++ }
++
++ /* Dynamic load code requirements */
++ if (token) {
++ gen_obj->obj_data.node_obj.ul_code_mem_seg_mask =
++ atoi(token);
++ token = strsep(&psz_cur, seps);
++ }
++
++ /* Extract node profiles into node properties */
++ if (token) {
++
++ gen_obj->obj_data.node_obj.ndb_props.count_profiles =
++ atoi(token);
++ for (i = 0;
++ i <
++ gen_obj->obj_data.node_obj.
++ ndb_props.count_profiles; i++) {
++ token = strsep(&psz_cur, seps);
++ if (token) {
++ /* Heap Size for the node */
++ gen_obj->obj_data.node_obj.
++ ndb_props.node_profiles[i].
++ ul_heap_size = atoi(token);
++ }
++ }
++ }
++ token = strsep(&psz_cur, seps);
++ if (token) {
++ gen_obj->obj_data.node_obj.ndb_props.stack_seg_name =
++ (u32) (token);
++ }
++
++ break;
++
++ case DSP_DCDPROCESSORTYPE:
++ /*
++ * Parse COFF sect buffer to retrieve individual tokens used
++ * to fill in object attrs.
++ */
++ psz_cur = psz_buf;
++ token = strsep(&psz_cur, seps);
++
++ gen_obj->obj_data.proc_info.cb_struct = atoi(token);
++ token = strsep(&psz_cur, seps);
++
++ gen_obj->obj_data.proc_info.processor_family = atoi(token);
++ token = strsep(&psz_cur, seps);
++
++ gen_obj->obj_data.proc_info.processor_type = atoi(token);
++ token = strsep(&psz_cur, seps);
++
++ gen_obj->obj_data.proc_info.clock_rate = atoi(token);
++ token = strsep(&psz_cur, seps);
++
++ gen_obj->obj_data.proc_info.ul_internal_mem_size = atoi(token);
++ token = strsep(&psz_cur, seps);
++
++ gen_obj->obj_data.proc_info.ul_external_mem_size = atoi(token);
++ token = strsep(&psz_cur, seps);
++
++ gen_obj->obj_data.proc_info.processor_id = atoi(token);
++ token = strsep(&psz_cur, seps);
++
++ gen_obj->obj_data.proc_info.ty_running_rtos = atoi(token);
++ token = strsep(&psz_cur, seps);
++
++ gen_obj->obj_data.proc_info.node_min_priority = atoi(token);
++ token = strsep(&psz_cur, seps);
++
++ gen_obj->obj_data.proc_info.node_max_priority = atoi(token);
++
++#ifdef _DB_TIOMAP
++ /* Proc object may contain additional(extended) attributes. */
++ /* attr must match proc.hxx */
++ for (entry_id = 0; entry_id < 7; entry_id++) {
++ token = strsep(&psz_cur, seps);
++ gen_obj->obj_data.ext_proc_obj.ty_tlb[entry_id].
++ ul_gpp_phys = atoi(token);
++
++ token = strsep(&psz_cur, seps);
++ gen_obj->obj_data.ext_proc_obj.ty_tlb[entry_id].
++ ul_dsp_virt = atoi(token);
++ }
++#endif
++
++ break;
++
++ default:
++ status = -EPERM;
++ break;
++ }
++
++ return status;
++}
++
++/*
++ * ======== CompressBuffer ========
++ * Purpose:
++ * Compress the DSP buffer, if necessary, to conform to PC format.
++ */
++static void compress_buf(char *psz_buf, u32 ul_buf_size, s32 char_size)
++{
++ char *p;
++ char ch;
++ char *q;
++
++ p = psz_buf;
++ if (p == NULL)
++ return;
++
++ for (q = psz_buf; q < (psz_buf + ul_buf_size);) {
++ ch = dsp_char2_gpp_char(q, char_size);
++ if (ch == '\\') {
++ q += char_size;
++ ch = dsp_char2_gpp_char(q, char_size);
++ switch (ch) {
++ case 't':
++ *p = '\t';
++ break;
++
++ case 'n':
++ *p = '\n';
++ break;
++
++ case 'r':
++ *p = '\r';
++ break;
++
++ case '0':
++ *p = '\0';
++ break;
++
++ default:
++ *p = ch;
++ break;
++ }
++ } else {
++ *p = ch;
++ }
++ p++;
++ q += char_size;
++ }
++
++ /* NULL out remainder of buffer. */
++ while (p < q)
++ *p++ = '\0';
++}
++
++/*
++ * ======== dsp_char2_gpp_char ========
++ * Purpose:
++ * Convert DSP char to host GPP char in a portable manner
++ */
++static char dsp_char2_gpp_char(char *word, s32 dsp_char_size)
++{
++ char ch = '\0';
++ char *ch_src;
++ s32 i;
++
++ for (ch_src = word, i = dsp_char_size; i > 0; i--)
++ ch |= *ch_src++;
++
++ return ch;
++}
++
++/*
++ * ======== get_dep_lib_info ========
++ */
++static int get_dep_lib_info(struct dcd_manager *hdcd_mgr,
++ struct dsp_uuid *uuid_obj,
++ u16 *num_libs,
++ u16 *num_pers_libs,
++ struct dsp_uuid *dep_lib_uuids,
++ bool *prstnt_dep_libs,
++ enum nldr_phase phase)
++{
++ struct dcd_manager *dcd_mgr_obj = hdcd_mgr;
++ char *psz_coff_buf = NULL;
++ char *psz_cur;
++ char *psz_file_name = NULL;
++ struct cod_libraryobj *lib = NULL;
++ u32 ul_addr = 0; /* Used by cod_get_section */
++ u32 ul_len = 0; /* Used by cod_get_section */
++ u32 dw_data_size = COD_MAXPATHLENGTH;
++ char seps[] = ", ";
++ char *token = NULL;
++ bool get_uuids = (dep_lib_uuids != NULL);
++ u16 dep_libs = 0;
++ int status = 0;
++
++ DBC_REQUIRE(refs > 0);
++
++ DBC_REQUIRE(hdcd_mgr);
++ DBC_REQUIRE(num_libs != NULL);
++ DBC_REQUIRE(uuid_obj != NULL);
++
++ /* Initialize to 0 dependent libraries, if only counting number of
++ * dependent libraries */
++ if (!get_uuids) {
++ *num_libs = 0;
++ *num_pers_libs = 0;
++ }
++
++ /* Allocate a buffer for file name */
++ psz_file_name = kzalloc(dw_data_size, GFP_KERNEL);
++ if (psz_file_name == NULL) {
++ status = -ENOMEM;
++ } else {
++ /* Get the name of the library */
++ status = dcd_get_library_name(hdcd_mgr, uuid_obj, psz_file_name,
++ &dw_data_size, phase, NULL);
++ }
++
++ /* Open the library */
++ if (!status) {
++ status = cod_open(dcd_mgr_obj->cod_mgr, psz_file_name,
++ COD_NOLOAD, &lib);
++ }
++ if (!status) {
++ /* Get dependent library section information. */
++ status = cod_get_section(lib, DEPLIBSECT, &ul_addr, &ul_len);
++
++ if (status) {
++ /* Ok, no dependent libraries */
++ ul_len = 0;
++ status = 0;
++ }
++ }
++
++ if (status || !(ul_len > 0))
++ goto func_cont;
++
++ /* Allocate zeroed buffer. */
++ psz_coff_buf = kzalloc(ul_len + 4, GFP_KERNEL);
++ if (psz_coff_buf == NULL)
++ status = -ENOMEM;
++
++ /* Read section contents. */
++ status = cod_read_section(lib, DEPLIBSECT, psz_coff_buf, ul_len);
++ if (status)
++ goto func_cont;
++
++ /* Compress and format DSP buffer to conform to PC format. */
++ compress_buf(psz_coff_buf, ul_len, DSPWORDSIZE);
++
++ /* Read from buffer */
++ psz_cur = psz_coff_buf;
++ while ((token = strsep(&psz_cur, seps)) && *token != '\0') {
++ if (get_uuids) {
++ if (dep_libs >= *num_libs) {
++ /* Gone beyond the limit */
++ break;
++ } else {
++ /* Retrieve UUID string. */
++ uuid_uuid_from_string(token,
++ &(dep_lib_uuids
++ [dep_libs]));
++ /* Is this library persistent? */
++ token = strsep(&psz_cur, seps);
++ prstnt_dep_libs[dep_libs] = atoi(token);
++ dep_libs++;
++ }
++ } else {
++ /* Advanc to next token */
++ token = strsep(&psz_cur, seps);
++ if (atoi(token))
++ (*num_pers_libs)++;
++
++ /* Just counting number of dependent libraries */
++ (*num_libs)++;
++ }
++ }
++func_cont:
++ if (lib)
++ cod_close(lib);
++
++ /* Free previously allocated dynamic buffers. */
++ kfree(psz_file_name);
++
++ kfree(psz_coff_buf);
++
++ return status;
++}
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/rmgr/disp.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/rmgr/disp.c 2010-08-18 11:24:23.218052787 +0300
+@@ -0,0 +1,752 @@
++/*
++ * disp.c
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Node Dispatcher interface. Communicates with Resource Manager Server
++ * (RMS) on DSP. Access to RMS is synchronized in NODE.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++#include <linux/types.h>
++
++/* ----------------------------------- Host OS */
++#include <dspbridge/host_os.h>
++
++/* ----------------------------------- DSP/BIOS Bridge */
++#include <dspbridge/dbdefs.h>
++
++/* ----------------------------------- Trace & Debug */
++#include <dspbridge/dbc.h>
++
++/* ----------------------------------- OS Adaptation Layer */
++#include <dspbridge/sync.h>
++
++/* ----------------------------------- Link Driver */
++#include <dspbridge/dspdefs.h>
++
++/* ----------------------------------- Platform Manager */
++#include <dspbridge/dev.h>
++#include <dspbridge/chnldefs.h>
++
++/* ----------------------------------- Resource Manager */
++#include <dspbridge/nodedefs.h>
++#include <dspbridge/nodepriv.h>
++#include <dspbridge/rms_sh.h>
++
++/* ----------------------------------- This */
++#include <dspbridge/disp.h>
++
++/* Size of a reply from RMS */
++#define REPLYSIZE (3 * sizeof(rms_word))
++
++/* Reserved channel offsets for communication with RMS */
++#define CHNLTORMSOFFSET 0
++#define CHNLFROMRMSOFFSET 1
++
++#define CHNLIOREQS 1
++
++/*
++ * ======== disp_object ========
++ */
++struct disp_object {
++ struct dev_object *hdev_obj; /* Device for this processor */
++ /* Function interface to Bridge driver */
++ struct bridge_drv_interface *intf_fxns;
++ struct chnl_mgr *hchnl_mgr; /* Channel manager */
++ struct chnl_object *chnl_to_dsp; /* Chnl for commands to RMS */
++ struct chnl_object *chnl_from_dsp; /* Chnl for replies from RMS */
++ u8 *pbuf; /* Buffer for commands, replies */
++ u32 ul_bufsize; /* pbuf size in bytes */
++ u32 ul_bufsize_rms; /* pbuf size in RMS words */
++ u32 char_size; /* Size of DSP character */
++ u32 word_size; /* Size of DSP word */
++ u32 data_mau_size; /* Size of DSP Data MAU */
++};
++
++static u32 refs;
++
++static void delete_disp(struct disp_object *disp_obj);
++static int fill_stream_def(rms_word *pdw_buf, u32 *ptotal, u32 offset,
++ struct node_strmdef strm_def, u32 max,
++ u32 chars_in_rms_word);
++static int send_message(struct disp_object *disp_obj, u32 timeout,
++ u32 ul_bytes, u32 *pdw_arg);
++
++/*
++ * ======== disp_create ========
++ * Create a NODE Dispatcher object.
++ */
++int disp_create(struct disp_object **dispatch_obj,
++ struct dev_object *hdev_obj,
++ const struct disp_attr *disp_attrs)
++{
++ struct disp_object *disp_obj;
++ struct bridge_drv_interface *intf_fxns;
++ u32 ul_chnl_id;
++ struct chnl_attr chnl_attr_obj;
++ int status = 0;
++ u8 dev_type;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(dispatch_obj != NULL);
++ DBC_REQUIRE(disp_attrs != NULL);
++ DBC_REQUIRE(hdev_obj != NULL);
++
++ *dispatch_obj = NULL;
++
++ /* Allocate Node Dispatcher object */
++ disp_obj = kzalloc(sizeof(struct disp_object), GFP_KERNEL);
++ if (disp_obj == NULL)
++ status = -ENOMEM;
++ else
++ disp_obj->hdev_obj = hdev_obj;
++
++ /* Get Channel manager and Bridge function interface */
++ if (!status) {
++ status = dev_get_chnl_mgr(hdev_obj, &(disp_obj->hchnl_mgr));
++ if (!status) {
++ (void)dev_get_intf_fxns(hdev_obj, &intf_fxns);
++ disp_obj->intf_fxns = intf_fxns;
++ }
++ }
++
++ /* check device type and decide if streams or messag'ing is used for
++ * RMS/EDS */
++ if (status)
++ goto func_cont;
++
++ status = dev_get_dev_type(hdev_obj, &dev_type);
++
++ if (status)
++ goto func_cont;
++
++ if (dev_type != DSP_UNIT) {
++ status = -EPERM;
++ goto func_cont;
++ }
++
++ disp_obj->char_size = DSPWORDSIZE;
++ disp_obj->word_size = DSPWORDSIZE;
++ disp_obj->data_mau_size = DSPWORDSIZE;
++ /* Open channels for communicating with the RMS */
++ chnl_attr_obj.uio_reqs = CHNLIOREQS;
++ chnl_attr_obj.event_obj = NULL;
++ ul_chnl_id = disp_attrs->ul_chnl_offset + CHNLTORMSOFFSET;
++ status = (*intf_fxns->pfn_chnl_open) (&(disp_obj->chnl_to_dsp),
++ disp_obj->hchnl_mgr,
++ CHNL_MODETODSP, ul_chnl_id,
++ &chnl_attr_obj);
++
++ if (!status) {
++ ul_chnl_id = disp_attrs->ul_chnl_offset + CHNLFROMRMSOFFSET;
++ status =
++ (*intf_fxns->pfn_chnl_open) (&(disp_obj->chnl_from_dsp),
++ disp_obj->hchnl_mgr,
++ CHNL_MODEFROMDSP, ul_chnl_id,
++ &chnl_attr_obj);
++ }
++ if (!status) {
++ /* Allocate buffer for commands, replies */
++ disp_obj->ul_bufsize = disp_attrs->ul_chnl_buf_size;
++ disp_obj->ul_bufsize_rms = RMS_COMMANDBUFSIZE;
++ disp_obj->pbuf = kzalloc(disp_obj->ul_bufsize, GFP_KERNEL);
++ if (disp_obj->pbuf == NULL)
++ status = -ENOMEM;
++ }
++func_cont:
++ if (!status)
++ *dispatch_obj = disp_obj;
++ else
++ delete_disp(disp_obj);
++
++ DBC_ENSURE((status && *dispatch_obj == NULL) ||
++ (!status && *dispatch_obj));
++ return status;
++}
++
++/*
++ * ======== disp_delete ========
++ * Delete the NODE Dispatcher.
++ */
++void disp_delete(struct disp_object *disp_obj)
++{
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(disp_obj);
++
++ delete_disp(disp_obj);
++}
++
++/*
++ * ======== disp_exit ========
++ * Discontinue usage of DISP module.
++ */
++void disp_exit(void)
++{
++ DBC_REQUIRE(refs > 0);
++
++ refs--;
++
++ DBC_ENSURE(refs >= 0);
++}
++
++/*
++ * ======== disp_init ========
++ * Initialize the DISP module.
++ */
++bool disp_init(void)
++{
++ bool ret = true;
++
++ DBC_REQUIRE(refs >= 0);
++
++ if (ret)
++ refs++;
++
++ DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
++ return ret;
++}
++
++/*
++ * ======== disp_node_change_priority ========
++ * Change the priority of a node currently running on the target.
++ */
++int disp_node_change_priority(struct disp_object *disp_obj,
++ struct node_object *hnode,
++ u32 rms_fxn, nodeenv node_env, s32 prio)
++{
++ u32 dw_arg;
++ struct rms_command *rms_cmd;
++ int status = 0;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(disp_obj);
++ DBC_REQUIRE(hnode != NULL);
++
++ /* Send message to RMS to change priority */
++ rms_cmd = (struct rms_command *)(disp_obj->pbuf);
++ rms_cmd->fxn = (rms_word) (rms_fxn);
++ rms_cmd->arg1 = (rms_word) node_env;
++ rms_cmd->arg2 = prio;
++ status = send_message(disp_obj, node_get_timeout(hnode),
++ sizeof(struct rms_command), &dw_arg);
++
++ return status;
++}
++
++/*
++ * ======== disp_node_create ========
++ * Create a node on the DSP by remotely calling the node's create function.
++ */
++int disp_node_create(struct disp_object *disp_obj,
++ struct node_object *hnode, u32 rms_fxn,
++ u32 ul_create_fxn,
++ const struct node_createargs *pargs,
++ nodeenv *node_env)
++{
++ struct node_msgargs node_msg_args;
++ struct node_taskargs task_arg_obj;
++ struct rms_command *rms_cmd;
++ struct rms_msg_args *pmsg_args;
++ struct rms_more_task_args *more_task_args;
++ enum node_type node_type;
++ u32 dw_length;
++ rms_word *pdw_buf = NULL;
++ u32 ul_bytes;
++ u32 i;
++ u32 total;
++ u32 chars_in_rms_word;
++ s32 task_args_offset;
++ s32 sio_in_def_offset;
++ s32 sio_out_def_offset;
++ s32 sio_defs_offset;
++ s32 args_offset = -1;
++ s32 offset;
++ struct node_strmdef strm_def;
++ u32 max;
++ int status = 0;
++ struct dsp_nodeinfo node_info;
++ u8 dev_type;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(disp_obj);
++ DBC_REQUIRE(hnode != NULL);
++ DBC_REQUIRE(node_get_type(hnode) != NODE_DEVICE);
++ DBC_REQUIRE(node_env != NULL);
++
++ status = dev_get_dev_type(disp_obj->hdev_obj, &dev_type);
++
++ if (status)
++ goto func_end;
++
++ if (dev_type != DSP_UNIT) {
++ dev_dbg(bridge, "%s: unknown device type = 0x%x\n",
++ __func__, dev_type);
++ goto func_end;
++ }
++ DBC_REQUIRE(pargs != NULL);
++ node_type = node_get_type(hnode);
++ node_msg_args = pargs->asa.node_msg_args;
++ max = disp_obj->ul_bufsize_rms; /*Max # of RMS words that can be sent */
++ DBC_ASSERT(max == RMS_COMMANDBUFSIZE);
++ chars_in_rms_word = sizeof(rms_word) / disp_obj->char_size;
++ /* Number of RMS words needed to hold arg data */
++ dw_length =
++ (node_msg_args.arg_length + chars_in_rms_word -
++ 1) / chars_in_rms_word;
++ /* Make sure msg args and command fit in buffer */
++ total = sizeof(struct rms_command) / sizeof(rms_word) +
++ sizeof(struct rms_msg_args)
++ / sizeof(rms_word) - 1 + dw_length;
++ if (total >= max) {
++ status = -EPERM;
++ dev_dbg(bridge, "%s: Message args too large for buffer! size "
++ "= %d, max = %d\n", __func__, total, max);
++ }
++ /*
++ * Fill in buffer to send to RMS.
++ * The buffer will have the following format:
++ *
++ * RMS command:
++ * Address of RMS_CreateNode()
++ * Address of node's create function
++ * dummy argument
++ * node type
++ *
++ * Message Args:
++ * max number of messages
++ * segid for message buffer allocation
++ * notification type to use when message is received
++ * length of message arg data
++ * message args data
++ *
++ * Task Args (if task or socket node):
++ * priority
++ * stack size
++ * system stack size
++ * stack segment
++ * misc
++ * number of input streams
++ * pSTRMInDef[] - offsets of STRM definitions for input streams
++ * number of output streams
++ * pSTRMOutDef[] - offsets of STRM definitions for output
++ * streams
++ * STRMInDef[] - array of STRM definitions for input streams
++ * STRMOutDef[] - array of STRM definitions for output streams
++ *
++ * Socket Args (if DAIS socket node):
++ *
++ */
++ if (!status) {
++ total = 0; /* Total number of words in buffer so far */
++ pdw_buf = (rms_word *) disp_obj->pbuf;
++ rms_cmd = (struct rms_command *)pdw_buf;
++ rms_cmd->fxn = (rms_word) (rms_fxn);
++ rms_cmd->arg1 = (rms_word) (ul_create_fxn);
++ if (node_get_load_type(hnode) == NLDR_DYNAMICLOAD) {
++ /* Flush ICACHE on Load */
++ rms_cmd->arg2 = 1; /* dummy argument */
++ } else {
++ /* Do not flush ICACHE */
++ rms_cmd->arg2 = 0; /* dummy argument */
++ }
++ rms_cmd->data = node_get_type(hnode);
++ /*
++ * args_offset is the offset of the data field in struct
++ * rms_command structure. We need this to calculate stream
++ * definition offsets.
++ */
++ args_offset = 3;
++ total += sizeof(struct rms_command) / sizeof(rms_word);
++ /* Message args */
++ pmsg_args = (struct rms_msg_args *)(pdw_buf + total);
++ pmsg_args->max_msgs = node_msg_args.max_msgs;
++ pmsg_args->segid = node_msg_args.seg_id;
++ pmsg_args->notify_type = node_msg_args.notify_type;
++ pmsg_args->arg_length = node_msg_args.arg_length;
++ total += sizeof(struct rms_msg_args) / sizeof(rms_word) - 1;
++ memcpy(pdw_buf + total, node_msg_args.pdata,
++ node_msg_args.arg_length);
++ total += dw_length;
++ }
++ if (status)
++ goto func_end;
++
++ /* If node is a task node, copy task create arguments into buffer */
++ if (node_type == NODE_TASK || node_type == NODE_DAISSOCKET) {
++ task_arg_obj = pargs->asa.task_arg_obj;
++ task_args_offset = total;
++ total += sizeof(struct rms_more_task_args) / sizeof(rms_word) +
++ 1 + task_arg_obj.num_inputs + task_arg_obj.num_outputs;
++ /* Copy task arguments */
++ if (total < max) {
++ total = task_args_offset;
++ more_task_args = (struct rms_more_task_args *)(pdw_buf +
++ total);
++ /*
++ * Get some important info about the node. Note that we
++ * don't just reach into the hnode struct because
++ * that would break the node object's abstraction.
++ */
++ get_node_info(hnode, &node_info);
++ more_task_args->priority = node_info.execution_priority;
++ more_task_args->stack_size = task_arg_obj.stack_size;
++ more_task_args->sysstack_size =
++ task_arg_obj.sys_stack_size;
++ more_task_args->stack_seg = task_arg_obj.stack_seg;
++ more_task_args->heap_addr = task_arg_obj.udsp_heap_addr;
++ more_task_args->heap_size = task_arg_obj.heap_size;
++ more_task_args->misc = task_arg_obj.ul_dais_arg;
++ more_task_args->num_input_streams =
++ task_arg_obj.num_inputs;
++ total +=
++ sizeof(struct rms_more_task_args) /
++ sizeof(rms_word);
++ dev_dbg(bridge, "%s: udsp_heap_addr %x, heap_size %x\n",
++ __func__, task_arg_obj.udsp_heap_addr,
++ task_arg_obj.heap_size);
++ /* Keep track of pSIOInDef[] and pSIOOutDef[]
++ * positions in the buffer, since this needs to be
++ * filled in later. */
++ sio_in_def_offset = total;
++ total += task_arg_obj.num_inputs;
++ pdw_buf[total++] = task_arg_obj.num_outputs;
++ sio_out_def_offset = total;
++ total += task_arg_obj.num_outputs;
++ sio_defs_offset = total;
++ /* Fill SIO defs and offsets */
++ offset = sio_defs_offset;
++ for (i = 0; i < task_arg_obj.num_inputs; i++) {
++ if (status)
++ break;
++
++ pdw_buf[sio_in_def_offset + i] =
++ (offset - args_offset)
++ * (sizeof(rms_word) / DSPWORDSIZE);
++ strm_def = task_arg_obj.strm_in_def[i];
++ status =
++ fill_stream_def(pdw_buf, &total, offset,
++ strm_def, max,
++ chars_in_rms_word);
++ offset = total;
++ }
++ for (i = 0; (i < task_arg_obj.num_outputs) &&
++ (!status); i++) {
++ pdw_buf[sio_out_def_offset + i] =
++ (offset - args_offset)
++ * (sizeof(rms_word) / DSPWORDSIZE);
++ strm_def = task_arg_obj.strm_out_def[i];
++ status =
++ fill_stream_def(pdw_buf, &total, offset,
++ strm_def, max,
++ chars_in_rms_word);
++ offset = total;
++ }
++ } else {
++ /* Args won't fit */
++ status = -EPERM;
++ }
++ }
++ if (!status) {
++ ul_bytes = total * sizeof(rms_word);
++ DBC_ASSERT(ul_bytes < (RMS_COMMANDBUFSIZE * sizeof(rms_word)));
++ status = send_message(disp_obj, node_get_timeout(hnode),
++ ul_bytes, node_env);
++ if (status >= 0) {
++ /*
++ * Message successfully received from RMS.
++ * Return the status of the Node's create function
++ * on the DSP-side
++ */
++ status = (((rms_word *) (disp_obj->pbuf))[0]);
++ if (status < 0)
++ dev_dbg(bridge, "%s: DSP-side failed: 0x%x\n",
++ __func__, status);
++ }
++ }
++func_end:
++ return status;
++}
++
++/*
++ * ======== disp_node_delete ========
++ * purpose:
++ * Delete a node on the DSP by remotely calling the node's delete function.
++ *
++ */
++int disp_node_delete(struct disp_object *disp_obj,
++ struct node_object *hnode, u32 rms_fxn,
++ u32 ul_delete_fxn, nodeenv node_env)
++{
++ u32 dw_arg;
++ struct rms_command *rms_cmd;
++ int status = 0;
++ u8 dev_type;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(disp_obj);
++ DBC_REQUIRE(hnode != NULL);
++
++ status = dev_get_dev_type(disp_obj->hdev_obj, &dev_type);
++
++ if (!status) {
++
++ if (dev_type == DSP_UNIT) {
++
++ /*
++ * Fill in buffer to send to RMS
++ */
++ rms_cmd = (struct rms_command *)disp_obj->pbuf;
++ rms_cmd->fxn = (rms_word) (rms_fxn);
++ rms_cmd->arg1 = (rms_word) node_env;
++ rms_cmd->arg2 = (rms_word) (ul_delete_fxn);
++ rms_cmd->data = node_get_type(hnode);
++
++ status = send_message(disp_obj, node_get_timeout(hnode),
++ sizeof(struct rms_command),
++ &dw_arg);
++ if (status >= 0) {
++ /*
++ * Message successfully received from RMS.
++ * Return the status of the Node's delete
++ * function on the DSP-side
++ */
++ status = (((rms_word *) (disp_obj->pbuf))[0]);
++ if (status < 0)
++ dev_dbg(bridge, "%s: DSP-side failed: "
++ "0x%x\n", __func__, status);
++ }
++
++ }
++ }
++ return status;
++}
++
++/*
++ * ======== disp_node_run ========
++ * purpose:
++ * Start execution of a node's execute phase, or resume execution of a node
++ * that has been suspended (via DISP_NodePause()) on the DSP.
++ */
++int disp_node_run(struct disp_object *disp_obj,
++ struct node_object *hnode, u32 rms_fxn,
++ u32 ul_execute_fxn, nodeenv node_env)
++{
++ u32 dw_arg;
++ struct rms_command *rms_cmd;
++ int status = 0;
++ u8 dev_type;
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(disp_obj);
++ DBC_REQUIRE(hnode != NULL);
++
++ status = dev_get_dev_type(disp_obj->hdev_obj, &dev_type);
++
++ if (!status) {
++
++ if (dev_type == DSP_UNIT) {
++
++ /*
++ * Fill in buffer to send to RMS.
++ */
++ rms_cmd = (struct rms_command *)disp_obj->pbuf;
++ rms_cmd->fxn = (rms_word) (rms_fxn);
++ rms_cmd->arg1 = (rms_word) node_env;
++ rms_cmd->arg2 = (rms_word) (ul_execute_fxn);
++ rms_cmd->data = node_get_type(hnode);
++
++ status = send_message(disp_obj, node_get_timeout(hnode),
++ sizeof(struct rms_command),
++ &dw_arg);
++ if (status >= 0) {
++ /*
++ * Message successfully received from RMS.
++ * Return the status of the Node's execute
++ * function on the DSP-side
++ */
++ status = (((rms_word *) (disp_obj->pbuf))[0]);
++ if (status < 0)
++ dev_dbg(bridge, "%s: DSP-side failed: "
++ "0x%x\n", __func__, status);
++ }
++
++ }
++ }
++
++ return status;
++}
++
++/*
++ * ======== delete_disp ========
++ * purpose:
++ * Frees the resources allocated for the dispatcher.
++ */
++static void delete_disp(struct disp_object *disp_obj)
++{
++ int status = 0;
++ struct bridge_drv_interface *intf_fxns;
++
++ if (disp_obj) {
++ intf_fxns = disp_obj->intf_fxns;
++
++ /* Free Node Dispatcher resources */
++ if (disp_obj->chnl_from_dsp) {
++ /* Channel close can fail only if the channel handle
++ * is invalid. */
++ status = (*intf_fxns->pfn_chnl_close)
++ (disp_obj->chnl_from_dsp);
++ if (status) {
++ dev_dbg(bridge, "%s: Failed to close channel "
++ "from RMS: 0x%x\n", __func__, status);
++ }
++ }
++ if (disp_obj->chnl_to_dsp) {
++ status =
++ (*intf_fxns->pfn_chnl_close) (disp_obj->
++ chnl_to_dsp);
++ if (status) {
++ dev_dbg(bridge, "%s: Failed to close channel to"
++ " RMS: 0x%x\n", __func__, status);
++ }
++ }
++ kfree(disp_obj->pbuf);
++
++ kfree(disp_obj);
++ }
++}
++
++/*
++ * ======== fill_stream_def ========
++ * purpose:
++ * Fills stream definitions.
++ */
++static int fill_stream_def(rms_word *pdw_buf, u32 *ptotal, u32 offset,
++ struct node_strmdef strm_def, u32 max,
++ u32 chars_in_rms_word)
++{
++ struct rms_strm_def *strm_def_obj;
++ u32 total = *ptotal;
++ u32 name_len;
++ u32 dw_length;
++ int status = 0;
++
++ if (total + sizeof(struct rms_strm_def) / sizeof(rms_word) >= max) {
++ status = -EPERM;
++ } else {
++ strm_def_obj = (struct rms_strm_def *)(pdw_buf + total);
++ strm_def_obj->bufsize = strm_def.buf_size;
++ strm_def_obj->nbufs = strm_def.num_bufs;
++ strm_def_obj->segid = strm_def.seg_id;
++ strm_def_obj->align = strm_def.buf_alignment;
++ strm_def_obj->timeout = strm_def.utimeout;
++ }
++
++ if (!status) {
++ /*
++ * Since we haven't added the device name yet, subtract
++ * 1 from total.
++ */
++ total += sizeof(struct rms_strm_def) / sizeof(rms_word) - 1;
++ DBC_REQUIRE(strm_def.sz_device);
++ dw_length = strlen(strm_def.sz_device) + 1;
++
++ /* Number of RMS_WORDS needed to hold device name */
++ name_len =
++ (dw_length + chars_in_rms_word - 1) / chars_in_rms_word;
++
++ if (total + name_len >= max) {
++ status = -EPERM;
++ } else {
++ /*
++ * Zero out last word, since the device name may not
++ * extend to completely fill this word.
++ */
++ pdw_buf[total + name_len - 1] = 0;
++ /** TODO USE SERVICES * */
++ memcpy(pdw_buf + total, strm_def.sz_device, dw_length);
++ total += name_len;
++ *ptotal = total;
++ }
++ }
++
++ return status;
++}
++
++/*
++ * ======== send_message ======
++ * Send command message to RMS, get reply from RMS.
++ */
++static int send_message(struct disp_object *disp_obj, u32 timeout,
++ u32 ul_bytes, u32 *pdw_arg)
++{
++ struct bridge_drv_interface *intf_fxns;
++ struct chnl_object *chnl_obj;
++ u32 dw_arg = 0;
++ u8 *pbuf;
++ struct chnl_ioc chnl_ioc_obj;
++ int status = 0;
++
++ DBC_REQUIRE(pdw_arg != NULL);
++
++ *pdw_arg = (u32) NULL;
++ intf_fxns = disp_obj->intf_fxns;
++ chnl_obj = disp_obj->chnl_to_dsp;
++ pbuf = disp_obj->pbuf;
++
++ /* Send the command */
++ status = (*intf_fxns->pfn_chnl_add_io_req) (chnl_obj, pbuf, ul_bytes, 0,
++ 0L, dw_arg);
++ if (status)
++ goto func_end;
++
++ status =
++ (*intf_fxns->pfn_chnl_get_ioc) (chnl_obj, timeout, &chnl_ioc_obj);
++ if (!status) {
++ if (!CHNL_IS_IO_COMPLETE(chnl_ioc_obj)) {
++ if (CHNL_IS_TIMED_OUT(chnl_ioc_obj))
++ status = -ETIME;
++ else
++ status = -EPERM;
++ }
++ }
++ /* Get the reply */
++ if (status)
++ goto func_end;
++
++ chnl_obj = disp_obj->chnl_from_dsp;
++ ul_bytes = REPLYSIZE;
++ status = (*intf_fxns->pfn_chnl_add_io_req) (chnl_obj, pbuf, ul_bytes,
++ 0, 0L, dw_arg);
++ if (status)
++ goto func_end;
++
++ status =
++ (*intf_fxns->pfn_chnl_get_ioc) (chnl_obj, timeout, &chnl_ioc_obj);
++ if (!status) {
++ if (CHNL_IS_TIMED_OUT(chnl_ioc_obj)) {
++ status = -ETIME;
++ } else if (chnl_ioc_obj.byte_size < ul_bytes) {
++ /* Did not get all of the reply from the RMS */
++ status = -EPERM;
++ } else {
++ if (CHNL_IS_IO_COMPLETE(chnl_ioc_obj)) {
++ DBC_ASSERT(chnl_ioc_obj.pbuf == pbuf);
++ status = (*((rms_word *) chnl_ioc_obj.pbuf));
++ *pdw_arg =
++ (((rms_word *) (chnl_ioc_obj.pbuf))[1]);
++ } else {
++ status = -EPERM;
++ }
++ }
++ }
++func_end:
++ return status;
++}
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/rmgr/drv.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/rmgr/drv.c 2010-08-18 11:24:23.218052787 +0300
+@@ -0,0 +1,929 @@
++/*
++ * drv.c
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * DSP/BIOS Bridge resource allocation module.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++#include <linux/types.h>
++
++/* ----------------------------------- Host OS */
++#include <dspbridge/host_os.h>
++
++/* ----------------------------------- DSP/BIOS Bridge */
++#include <dspbridge/dbdefs.h>
++
++/* ----------------------------------- Trace & Debug */
++#include <dspbridge/dbc.h>
++
++/* ----------------------------------- OS Adaptation Layer */
++#include <dspbridge/cfg.h>
++#include <dspbridge/list.h>
++
++/* ----------------------------------- This */
++#include <dspbridge/drv.h>
++#include <dspbridge/dev.h>
++
++#include <dspbridge/node.h>
++#include <dspbridge/proc.h>
++#include <dspbridge/strm.h>
++#include <dspbridge/nodepriv.h>
++#include <dspbridge/dspchnl.h>
++#include <dspbridge/resourcecleanup.h>
++
++/* ----------------------------------- Defines, Data Structures, Typedefs */
++struct drv_object {
++ struct lst_list *dev_list;
++ struct lst_list *dev_node_string;
++};
++
++/*
++ * This is the Device Extension. Named with the Prefix
++ * DRV_ since it is living in this module
++ */
++struct drv_ext {
++ struct list_head link;
++ char sz_string[MAXREGPATHLENGTH];
++};
++
++/* ----------------------------------- Globals */
++static s32 refs;
++static bool ext_phys_mem_pool_enabled;
++struct ext_phys_mem_pool {
++ u32 phys_mem_base;
++ u32 phys_mem_size;
++ u32 virt_mem_base;
++ u32 next_phys_alloc_ptr;
++};
++static struct ext_phys_mem_pool ext_mem_pool;
++
++/* ----------------------------------- Function Prototypes */
++static int request_bridge_resources(struct cfg_hostres *res);
++
++
++/* GPP PROCESS CLEANUP CODE */
++
++static int drv_proc_free_node_res(int id, void *p, void *data);
++
++/* Allocate and add a node resource element
++* This function is called from .Node_Allocate. */
++int drv_insert_node_res_element(void *hnode, void *node_resource,
++ void *process_ctxt)
++{
++ struct node_res_object **node_res_obj =
++ (struct node_res_object **)node_resource;
++ struct process_context *ctxt = (struct process_context *)process_ctxt;
++ int status = 0;
++ int retval;
++
++ *node_res_obj = kzalloc(sizeof(struct node_res_object), GFP_KERNEL);
++ if (!*node_res_obj) {
++ status = -ENOMEM;
++ goto func_end;
++ }
++
++ (*node_res_obj)->hnode = hnode;
++ retval = idr_get_new(ctxt->node_id, *node_res_obj,
++ &(*node_res_obj)->id);
++ if (retval == -EAGAIN) {
++ if (!idr_pre_get(ctxt->node_id, GFP_KERNEL)) {
++ pr_err("%s: OUT OF MEMORY\n", __func__);
++ status = -ENOMEM;
++ goto func_end;
++ }
++
++ retval = idr_get_new(ctxt->node_id, *node_res_obj,
++ &(*node_res_obj)->id);
++ }
++ if (retval) {
++ pr_err("%s: FAILED, IDR is FULL\n", __func__);
++ status = -EFAULT;
++ }
++func_end:
++ if (status)
++ kfree(*node_res_obj);
++
++ return status;
++}
++
++/* Release all Node resources and its context
++ * Actual Node De-Allocation */
++static int drv_proc_free_node_res(int id, void *p, void *data)
++{
++ struct process_context *ctxt = data;
++ int status;
++ struct node_res_object *node_res_obj = p;
++ u32 node_state;
++
++ if (node_res_obj->node_allocated) {
++ node_state = node_get_state(node_res_obj->hnode);
++ if (node_state <= NODE_DELETING) {
++ if ((node_state == NODE_RUNNING) ||
++ (node_state == NODE_PAUSED) ||
++ (node_state == NODE_TERMINATING))
++ node_terminate
++ (node_res_obj->hnode, &status);
++
++ node_delete(node_res_obj, ctxt);
++ }
++ }
++
++ return 0;
++}
++
++/* Release all Mapped and Reserved DMM resources */
++int drv_remove_all_dmm_res_elements(void *process_ctxt)
++{
++ struct process_context *ctxt = (struct process_context *)process_ctxt;
++ int status = 0;
++ struct dmm_map_object *temp_map, *map_obj;
++ struct dmm_rsv_object *temp_rsv, *rsv_obj;
++
++ /* Free DMM mapped memory resources */
++ list_for_each_entry_safe(map_obj, temp_map, &ctxt->dmm_map_list, link) {
++ status = proc_un_map(ctxt->hprocessor,
++ (void *)map_obj->dsp_addr, ctxt);
++ if (status)
++ pr_err("%s: proc_un_map failed!"
++ " status = 0x%xn", __func__, status);
++ }
++
++ /* Free DMM reserved memory resources */
++ list_for_each_entry_safe(rsv_obj, temp_rsv, &ctxt->dmm_rsv_list, link) {
++ status = proc_un_reserve_memory(ctxt->hprocessor, (void *)
++ rsv_obj->dsp_reserved_addr,
++ ctxt);
++ if (status)
++ pr_err("%s: proc_un_reserve_memory failed!"
++ " status = 0x%xn", __func__, status);
++ }
++ return status;
++}
++
++/* Update Node allocation status */
++void drv_proc_node_update_status(void *node_resource, s32 status)
++{
++ struct node_res_object *node_res_obj =
++ (struct node_res_object *)node_resource;
++ DBC_ASSERT(node_resource != NULL);
++ node_res_obj->node_allocated = status;
++}
++
++/* Update Node Heap status */
++void drv_proc_node_update_heap_status(void *node_resource, s32 status)
++{
++ struct node_res_object *node_res_obj =
++ (struct node_res_object *)node_resource;
++ DBC_ASSERT(node_resource != NULL);
++ node_res_obj->heap_allocated = status;
++}
++
++/* Release all Node resources and its context
++* This is called from .bridge_release.
++ */
++int drv_remove_all_node_res_elements(void *process_ctxt)
++{
++ struct process_context *ctxt = process_ctxt;
++
++ idr_for_each(ctxt->node_id, drv_proc_free_node_res, ctxt);
++ idr_destroy(ctxt->node_id);
++
++ return 0;
++}
++
++/* Allocate the STRM resource element
++* This is called after the actual resource is allocated
++ */
++int drv_proc_insert_strm_res_element(void *stream_obj,
++ void *strm_res, void *process_ctxt)
++{
++ struct strm_res_object **pstrm_res =
++ (struct strm_res_object **)strm_res;
++ struct process_context *ctxt = (struct process_context *)process_ctxt;
++ int status = 0;
++ int retval;
++
++ *pstrm_res = kzalloc(sizeof(struct strm_res_object), GFP_KERNEL);
++ if (*pstrm_res == NULL) {
++ status = -EFAULT;
++ goto func_end;
++ }
++
++ (*pstrm_res)->hstream = stream_obj;
++ retval = idr_get_new(ctxt->stream_id, *pstrm_res,
++ &(*pstrm_res)->id);
++ if (retval == -EAGAIN) {
++ if (!idr_pre_get(ctxt->stream_id, GFP_KERNEL)) {
++ pr_err("%s: OUT OF MEMORY\n", __func__);
++ status = -ENOMEM;
++ goto func_end;
++ }
++
++ retval = idr_get_new(ctxt->stream_id, *pstrm_res,
++ &(*pstrm_res)->id);
++ }
++ if (retval) {
++ pr_err("%s: FAILED, IDR is FULL\n", __func__);
++ status = -EPERM;
++ }
++
++func_end:
++ return status;
++}
++
++static int drv_proc_free_strm_res(int id, void *p, void *process_ctxt)
++{
++ struct process_context *ctxt = process_ctxt;
++ struct strm_res_object *strm_res = p;
++ struct stream_info strm_info;
++ struct dsp_streaminfo user;
++ u8 **ap_buffer = NULL;
++ u8 *buf_ptr;
++ u32 ul_bytes;
++ u32 dw_arg;
++ s32 ul_buf_size;
++
++ if (strm_res->num_bufs) {
++ ap_buffer = kmalloc((strm_res->num_bufs *
++ sizeof(u8 *)), GFP_KERNEL);
++ if (ap_buffer) {
++ strm_free_buffer(strm_res,
++ ap_buffer,
++ strm_res->num_bufs,
++ ctxt);
++ kfree(ap_buffer);
++ }
++ }
++ strm_info.user_strm = &user;
++ user.number_bufs_in_stream = 0;
++ strm_get_info(strm_res->hstream, &strm_info, sizeof(strm_info));
++ while (user.number_bufs_in_stream--)
++ strm_reclaim(strm_res->hstream, &buf_ptr, &ul_bytes,
++ (u32 *) &ul_buf_size, &dw_arg);
++ strm_close(strm_res, ctxt);
++ return 0;
++}
++
++/* Release all Stream resources and its context
++* This is called from .bridge_release.
++ */
++int drv_remove_all_strm_res_elements(void *process_ctxt)
++{
++ struct process_context *ctxt = process_ctxt;
++
++ idr_for_each(ctxt->stream_id, drv_proc_free_strm_res, ctxt);
++ idr_destroy(ctxt->stream_id);
++
++ return 0;
++}
++
++/* Updating the stream resource element */
++int drv_proc_update_strm_res(u32 num_bufs, void *strm_resources)
++{
++ int status = 0;
++ struct strm_res_object **strm_res =
++ (struct strm_res_object **)strm_resources;
++
++ (*strm_res)->num_bufs = num_bufs;
++ return status;
++}
++
++/* GPP PROCESS CLEANUP CODE END */
++
++/*
++ * ======== = drv_create ======== =
++ * Purpose:
++ * DRV Object gets created only once during Driver Loading.
++ */
++int drv_create(struct drv_object **drv_obj)
++{
++ int status = 0;
++ struct drv_object *pdrv_object = NULL;
++
++ DBC_REQUIRE(drv_obj != NULL);
++ DBC_REQUIRE(refs > 0);
++
++ pdrv_object = kzalloc(sizeof(struct drv_object), GFP_KERNEL);
++ if (pdrv_object) {
++ /* Create and Initialize List of device objects */
++ pdrv_object->dev_list = kzalloc(sizeof(struct lst_list),
++ GFP_KERNEL);
++ if (pdrv_object->dev_list) {
++ /* Create and Initialize List of device Extension */
++ pdrv_object->dev_node_string =
++ kzalloc(sizeof(struct lst_list), GFP_KERNEL);
++ if (!(pdrv_object->dev_node_string)) {
++ status = -EPERM;
++ } else {
++ INIT_LIST_HEAD(&pdrv_object->
++ dev_node_string->head);
++ INIT_LIST_HEAD(&pdrv_object->dev_list->head);
++ }
++ } else {
++ status = -ENOMEM;
++ }
++ } else {
++ status = -ENOMEM;
++ }
++ /* Store the DRV Object in the Registry */
++ if (!status)
++ status = cfg_set_object((u32) pdrv_object, REG_DRV_OBJECT);
++ if (!status) {
++ *drv_obj = pdrv_object;
++ } else {
++ kfree(pdrv_object->dev_list);
++ kfree(pdrv_object->dev_node_string);
++ /* Free the DRV Object */
++ kfree(pdrv_object);
++ }
++
++ DBC_ENSURE(status || pdrv_object);
++ return status;
++}
++
++/*
++ * ======== drv_exit ========
++ * Purpose:
++ * Discontinue usage of the DRV module.
++ */
++void drv_exit(void)
++{
++ DBC_REQUIRE(refs > 0);
++
++ refs--;
++
++ DBC_ENSURE(refs >= 0);
++}
++
++/*
++ * ======== = drv_destroy ======== =
++ * purpose:
++ * Invoked during bridge de-initialization
++ */
++int drv_destroy(struct drv_object *driver_obj)
++{
++ int status = 0;
++ struct drv_object *pdrv_object = (struct drv_object *)driver_obj;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(pdrv_object);
++
++ /*
++ * Delete the List if it exists.Should not come here
++ * as the drv_remove_dev_object and the Last drv_request_resources
++ * removes the list if the lists are empty.
++ */
++ kfree(pdrv_object->dev_list);
++ kfree(pdrv_object->dev_node_string);
++ kfree(pdrv_object);
++ /* Update the DRV Object in Registry to be 0 */
++ (void)cfg_set_object(0, REG_DRV_OBJECT);
++
++ return status;
++}
++
++/*
++ * ======== drv_get_dev_object ========
++ * Purpose:
++ * Given a index, returns a handle to DevObject from the list.
++ */
++int drv_get_dev_object(u32 index, struct drv_object *hdrv_obj,
++ struct dev_object **device_obj)
++{
++ int status = 0;
++#ifdef CONFIG_TIDSPBRIDGE_DEBUG
++ /* used only for Assertions and debug messages */
++ struct drv_object *pdrv_obj = (struct drv_object *)hdrv_obj;
++#endif
++ struct dev_object *dev_obj;
++ u32 i;
++ DBC_REQUIRE(pdrv_obj);
++ DBC_REQUIRE(device_obj != NULL);
++ DBC_REQUIRE(index >= 0);
++ DBC_REQUIRE(refs > 0);
++ DBC_ASSERT(!(LST_IS_EMPTY(pdrv_obj->dev_list)));
++
++ dev_obj = (struct dev_object *)drv_get_first_dev_object();
++ for (i = 0; i < index; i++) {
++ dev_obj =
++ (struct dev_object *)drv_get_next_dev_object((u32) dev_obj);
++ }
++ if (dev_obj) {
++ *device_obj = (struct dev_object *)dev_obj;
++ } else {
++ *device_obj = NULL;
++ status = -EPERM;
++ }
++
++ return status;
++}
++
++/*
++ * ======== drv_get_first_dev_object ========
++ * Purpose:
++ * Retrieve the first Device Object handle from an internal linked list of
++ * of DEV_OBJECTs maintained by DRV.
++ */
++u32 drv_get_first_dev_object(void)
++{
++ u32 dw_dev_object = 0;
++ struct drv_object *pdrv_obj;
++
++ if (!cfg_get_object((u32 *) &pdrv_obj, REG_DRV_OBJECT)) {
++ if ((pdrv_obj->dev_list != NULL) &&
++ !LST_IS_EMPTY(pdrv_obj->dev_list))
++ dw_dev_object = (u32) lst_first(pdrv_obj->dev_list);
++ }
++
++ return dw_dev_object;
++}
++
++/*
++ * ======== DRV_GetFirstDevNodeString ========
++ * Purpose:
++ * Retrieve the first Device Extension from an internal linked list of
++ * of Pointer to dev_node Strings maintained by DRV.
++ */
++u32 drv_get_first_dev_extension(void)
++{
++ u32 dw_dev_extension = 0;
++ struct drv_object *pdrv_obj;
++
++ if (!cfg_get_object((u32 *) &pdrv_obj, REG_DRV_OBJECT)) {
++
++ if ((pdrv_obj->dev_node_string != NULL) &&
++ !LST_IS_EMPTY(pdrv_obj->dev_node_string)) {
++ dw_dev_extension =
++ (u32) lst_first(pdrv_obj->dev_node_string);
++ }
++ }
++
++ return dw_dev_extension;
++}
++
++/*
++ * ======== drv_get_next_dev_object ========
++ * Purpose:
++ * Retrieve the next Device Object handle from an internal linked list of
++ * of DEV_OBJECTs maintained by DRV, after having previously called
++ * drv_get_first_dev_object() and zero or more DRV_GetNext.
++ */
++u32 drv_get_next_dev_object(u32 hdev_obj)
++{
++ u32 dw_next_dev_object = 0;
++ struct drv_object *pdrv_obj;
++
++ DBC_REQUIRE(hdev_obj != 0);
++
++ if (!cfg_get_object((u32 *) &pdrv_obj, REG_DRV_OBJECT)) {
++
++ if ((pdrv_obj->dev_list != NULL) &&
++ !LST_IS_EMPTY(pdrv_obj->dev_list)) {
++ dw_next_dev_object = (u32) lst_next(pdrv_obj->dev_list,
++ (struct list_head *)
++ hdev_obj);
++ }
++ }
++ return dw_next_dev_object;
++}
++
++/*
++ * ======== drv_get_next_dev_extension ========
++ * Purpose:
++ * Retrieve the next Device Extension from an internal linked list of
++ * of pointer to DevNodeString maintained by DRV, after having previously
++ * called drv_get_first_dev_extension() and zero or more
++ * drv_get_next_dev_extension().
++ */
++u32 drv_get_next_dev_extension(u32 dev_extension)
++{
++ u32 dw_dev_extension = 0;
++ struct drv_object *pdrv_obj;
++
++ DBC_REQUIRE(dev_extension != 0);
++
++ if (!cfg_get_object((u32 *) &pdrv_obj, REG_DRV_OBJECT)) {
++ if ((pdrv_obj->dev_node_string != NULL) &&
++ !LST_IS_EMPTY(pdrv_obj->dev_node_string)) {
++ dw_dev_extension =
++ (u32) lst_next(pdrv_obj->dev_node_string,
++ (struct list_head *)dev_extension);
++ }
++ }
++
++ return dw_dev_extension;
++}
++
++/*
++ * ======== drv_init ========
++ * Purpose:
++ * Initialize DRV module private state.
++ */
++int drv_init(void)
++{
++ s32 ret = 1; /* function return value */
++
++ DBC_REQUIRE(refs >= 0);
++
++ if (ret)
++ refs++;
++
++ DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
++
++ return ret;
++}
++
++/*
++ * ======== drv_insert_dev_object ========
++ * Purpose:
++ * Insert a DevObject into the list of Manager object.
++ */
++int drv_insert_dev_object(struct drv_object *driver_obj,
++ struct dev_object *hdev_obj)
++{
++ struct drv_object *pdrv_object = (struct drv_object *)driver_obj;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(hdev_obj != NULL);
++ DBC_REQUIRE(pdrv_object);
++ DBC_ASSERT(pdrv_object->dev_list);
++
++ lst_put_tail(pdrv_object->dev_list, (struct list_head *)hdev_obj);
++
++ DBC_ENSURE(!LST_IS_EMPTY(pdrv_object->dev_list));
++
++ return 0;
++}
++
++/*
++ * ======== drv_remove_dev_object ========
++ * Purpose:
++ * Search for and remove a DeviceObject from the given list of DRV
++ * objects.
++ */
++int drv_remove_dev_object(struct drv_object *driver_obj,
++ struct dev_object *hdev_obj)
++{
++ int status = -EPERM;
++ struct drv_object *pdrv_object = (struct drv_object *)driver_obj;
++ struct list_head *cur_elem;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(pdrv_object);
++ DBC_REQUIRE(hdev_obj != NULL);
++
++ DBC_REQUIRE(pdrv_object->dev_list != NULL);
++ DBC_REQUIRE(!LST_IS_EMPTY(pdrv_object->dev_list));
++
++ /* Search list for p_proc_object: */
++ for (cur_elem = lst_first(pdrv_object->dev_list); cur_elem != NULL;
++ cur_elem = lst_next(pdrv_object->dev_list, cur_elem)) {
++ /* If found, remove it. */
++ if ((struct dev_object *)cur_elem == hdev_obj) {
++ lst_remove_elem(pdrv_object->dev_list, cur_elem);
++ status = 0;
++ break;
++ }
++ }
++ /* Remove list if empty. */
++ if (LST_IS_EMPTY(pdrv_object->dev_list)) {
++ kfree(pdrv_object->dev_list);
++ pdrv_object->dev_list = NULL;
++ }
++ DBC_ENSURE((pdrv_object->dev_list == NULL) ||
++ !LST_IS_EMPTY(pdrv_object->dev_list));
++
++ return status;
++}
++
++/*
++ * ======== drv_request_resources ========
++ * Purpose:
++ * Requests resources from the OS.
++ */
++int drv_request_resources(u32 dw_context, u32 *dev_node_strg)
++{
++ int status = 0;
++ struct drv_object *pdrv_object;
++ struct drv_ext *pszdev_node;
++
++ DBC_REQUIRE(dw_context != 0);
++ DBC_REQUIRE(dev_node_strg != NULL);
++
++ /*
++ * Allocate memory to hold the string. This will live untill
++ * it is freed in the Release resources. Update the driver object
++ * list.
++ */
++
++ status = cfg_get_object((u32 *) &pdrv_object, REG_DRV_OBJECT);
++ if (!status) {
++ pszdev_node = kzalloc(sizeof(struct drv_ext), GFP_KERNEL);
++ if (pszdev_node) {
++ lst_init_elem(&pszdev_node->link);
++ strncpy(pszdev_node->sz_string,
++ (char *)dw_context, MAXREGPATHLENGTH - 1);
++ pszdev_node->sz_string[MAXREGPATHLENGTH - 1] = '\0';
++ /* Update the Driver Object List */
++ *dev_node_strg = (u32) pszdev_node->sz_string;
++ lst_put_tail(pdrv_object->dev_node_string,
++ (struct list_head *)pszdev_node);
++ } else {
++ status = -ENOMEM;
++ *dev_node_strg = 0;
++ }
++ } else {
++ dev_dbg(bridge, "%s: Failed to get Driver Object from Registry",
++ __func__);
++ *dev_node_strg = 0;
++ }
++
++ DBC_ENSURE((!status && dev_node_strg != NULL &&
++ !LST_IS_EMPTY(pdrv_object->dev_node_string)) ||
++ (status && *dev_node_strg == 0));
++
++ return status;
++}
++
++/*
++ * ======== drv_release_resources ========
++ * Purpose:
++ * Releases resources from the OS.
++ */
++int drv_release_resources(u32 dw_context, struct drv_object *hdrv_obj)
++{
++ int status = 0;
++ struct drv_object *pdrv_object = (struct drv_object *)hdrv_obj;
++ struct drv_ext *pszdev_node;
++
++ /*
++ * Irrespective of the status go ahead and clean it
++ * The following will over write the status.
++ */
++ for (pszdev_node = (struct drv_ext *)drv_get_first_dev_extension();
++ pszdev_node != NULL; pszdev_node = (struct drv_ext *)
++ drv_get_next_dev_extension((u32) pszdev_node)) {
++ if (!pdrv_object->dev_node_string) {
++ /* When this could happen? */
++ continue;
++ }
++ if ((u32) pszdev_node == dw_context) {
++ /* Found it */
++ /* Delete from the Driver object list */
++ lst_remove_elem(pdrv_object->dev_node_string,
++ (struct list_head *)pszdev_node);
++ kfree((void *)pszdev_node);
++ break;
++ }
++ /* Delete the List if it is empty */
++ if (LST_IS_EMPTY(pdrv_object->dev_node_string)) {
++ kfree(pdrv_object->dev_node_string);
++ pdrv_object->dev_node_string = NULL;
++ }
++ }
++ return status;
++}
++
++/*
++ * ======== request_bridge_resources ========
++ * Purpose:
++ * Reserves shared memory for bridge.
++ */
++static int request_bridge_resources(struct cfg_hostres *res)
++{
++ struct cfg_hostres *host_res = res;
++
++ /* num_mem_windows must not be more than CFG_MAXMEMREGISTERS */
++ host_res->num_mem_windows = 2;
++
++ /* First window is for DSP internal memory */
++ host_res->dw_sys_ctrl_base = ioremap(OMAP_SYSC_BASE, OMAP_SYSC_SIZE);
++ dev_dbg(bridge, "dw_mem_base[0] 0x%x\n", host_res->dw_mem_base[0]);
++ dev_dbg(bridge, "dw_mem_base[3] 0x%x\n", host_res->dw_mem_base[3]);
++ dev_dbg(bridge, "dw_dmmu_base %p\n", host_res->dw_dmmu_base);
++
++ /* for 24xx base port is not mapping the mamory for DSP
++ * internal memory TODO Do a ioremap here */
++ /* Second window is for DSP external memory shared with MPU */
++
++ /* These are hard-coded values */
++ host_res->birq_registers = 0;
++ host_res->birq_attrib = 0;
++ host_res->dw_offset_for_monitor = 0;
++ host_res->dw_chnl_offset = 0;
++ /* CHNL_MAXCHANNELS */
++ host_res->dw_num_chnls = CHNL_MAXCHANNELS;
++ host_res->dw_chnl_buf_size = 0x400;
++
++ return 0;
++}
++
++/*
++ * ======== drv_request_bridge_res_dsp ========
++ * Purpose:
++ * Reserves shared memory for bridge.
++ */
++int drv_request_bridge_res_dsp(void **phost_resources)
++{
++ int status = 0;
++ struct cfg_hostres *host_res;
++ u32 dw_buff_size;
++ u32 dma_addr;
++ u32 shm_size;
++ struct drv_data *drv_datap = dev_get_drvdata(bridge);
++
++ dw_buff_size = sizeof(struct cfg_hostres);
++
++ host_res = kzalloc(dw_buff_size, GFP_KERNEL);
++
++ if (host_res != NULL) {
++ request_bridge_resources(host_res);
++ /* num_mem_windows must not be more than CFG_MAXMEMREGISTERS */
++ host_res->num_mem_windows = 4;
++
++ host_res->dw_mem_base[0] = 0;
++ host_res->dw_mem_base[2] = (u32) ioremap(OMAP_DSP_MEM1_BASE,
++ OMAP_DSP_MEM1_SIZE);
++ host_res->dw_mem_base[3] = (u32) ioremap(OMAP_DSP_MEM2_BASE,
++ OMAP_DSP_MEM2_SIZE);
++ host_res->dw_mem_base[4] = (u32) ioremap(OMAP_DSP_MEM3_BASE,
++ OMAP_DSP_MEM3_SIZE);
++ host_res->dw_per_base = ioremap(OMAP_PER_CM_BASE,
++ OMAP_PER_CM_SIZE);
++ host_res->dw_per_pm_base = (u32) ioremap(OMAP_PER_PRM_BASE,
++ OMAP_PER_PRM_SIZE);
++ host_res->dw_core_pm_base = (u32) ioremap(OMAP_CORE_PRM_BASE,
++ OMAP_CORE_PRM_SIZE);
++ host_res->dw_dmmu_base = ioremap(OMAP_DMMU_BASE,
++ OMAP_DMMU_SIZE);
++
++ dev_dbg(bridge, "dw_mem_base[0] 0x%x\n",
++ host_res->dw_mem_base[0]);
++ dev_dbg(bridge, "dw_mem_base[1] 0x%x\n",
++ host_res->dw_mem_base[1]);
++ dev_dbg(bridge, "dw_mem_base[2] 0x%x\n",
++ host_res->dw_mem_base[2]);
++ dev_dbg(bridge, "dw_mem_base[3] 0x%x\n",
++ host_res->dw_mem_base[3]);
++ dev_dbg(bridge, "dw_mem_base[4] 0x%x\n",
++ host_res->dw_mem_base[4]);
++ dev_dbg(bridge, "dw_dmmu_base %p\n", host_res->dw_dmmu_base);
++
++ shm_size = drv_datap->shm_size;
++ if (shm_size >= 0x10000) {
++ /* Allocate Physically contiguous,
++ * non-cacheable memory */
++ host_res->dw_mem_base[1] =
++ (u32) mem_alloc_phys_mem(shm_size, 0x100000,
++ &dma_addr);
++ if (host_res->dw_mem_base[1] == 0) {
++ status = -ENOMEM;
++ pr_err("shm reservation Failed\n");
++ } else {
++ host_res->dw_mem_length[1] = shm_size;
++ host_res->dw_mem_phys[1] = dma_addr;
++
++ dev_dbg(bridge, "%s: Bridge shm address 0x%x "
++ "dma_addr %x size %x\n", __func__,
++ host_res->dw_mem_base[1],
++ dma_addr, shm_size);
++ }
++ }
++ if (!status) {
++ /* These are hard-coded values */
++ host_res->birq_registers = 0;
++ host_res->birq_attrib = 0;
++ host_res->dw_offset_for_monitor = 0;
++ host_res->dw_chnl_offset = 0;
++ /* CHNL_MAXCHANNELS */
++ host_res->dw_num_chnls = CHNL_MAXCHANNELS;
++ host_res->dw_chnl_buf_size = 0x400;
++ dw_buff_size = sizeof(struct cfg_hostres);
++ }
++ *phost_resources = host_res;
++ }
++ /* End Mem alloc */
++ return status;
++}
++
++void mem_ext_phys_pool_init(u32 pool_phys_base, u32 pool_size)
++{
++ u32 pool_virt_base;
++
++ /* get the virtual address for the physical memory pool passed */
++ pool_virt_base = (u32) ioremap(pool_phys_base, pool_size);
++
++ if ((void **)pool_virt_base == NULL) {
++ pr_err("%s: external physical memory map failed\n", __func__);
++ ext_phys_mem_pool_enabled = false;
++ } else {
++ ext_mem_pool.phys_mem_base = pool_phys_base;
++ ext_mem_pool.phys_mem_size = pool_size;
++ ext_mem_pool.virt_mem_base = pool_virt_base;
++ ext_mem_pool.next_phys_alloc_ptr = pool_phys_base;
++ ext_phys_mem_pool_enabled = true;
++ }
++}
++
++void mem_ext_phys_pool_release(void)
++{
++ if (ext_phys_mem_pool_enabled) {
++ iounmap((void *)(ext_mem_pool.virt_mem_base));
++ ext_phys_mem_pool_enabled = false;
++ }
++}
++
++/*
++ * ======== mem_ext_phys_mem_alloc ========
++ * Purpose:
++ * Allocate physically contiguous, uncached memory from external memory pool
++ */
++
++static void *mem_ext_phys_mem_alloc(u32 bytes, u32 align, u32 * phys_addr)
++{
++ u32 new_alloc_ptr;
++ u32 offset;
++ u32 virt_addr;
++
++ if (align == 0)
++ align = 1;
++
++ if (bytes > ((ext_mem_pool.phys_mem_base + ext_mem_pool.phys_mem_size)
++ - ext_mem_pool.next_phys_alloc_ptr)) {
++ phys_addr = NULL;
++ return NULL;
++ } else {
++ offset = (ext_mem_pool.next_phys_alloc_ptr & (align - 1));
++ if (offset == 0)
++ new_alloc_ptr = ext_mem_pool.next_phys_alloc_ptr;
++ else
++ new_alloc_ptr = (ext_mem_pool.next_phys_alloc_ptr) +
++ (align - offset);
++ if ((new_alloc_ptr + bytes) <=
++ (ext_mem_pool.phys_mem_base + ext_mem_pool.phys_mem_size)) {
++ /* we can allocate */
++ *phys_addr = new_alloc_ptr;
++ ext_mem_pool.next_phys_alloc_ptr =
++ new_alloc_ptr + bytes;
++ virt_addr =
++ ext_mem_pool.virt_mem_base + (new_alloc_ptr -
++ ext_mem_pool.
++ phys_mem_base);
++ return (void *)virt_addr;
++ } else {
++ *phys_addr = 0;
++ return NULL;
++ }
++ }
++}
++
++/*
++ * ======== mem_alloc_phys_mem ========
++ * Purpose:
++ * Allocate physically contiguous, uncached memory
++ */
++void *mem_alloc_phys_mem(u32 byte_size, u32 align_mask,
++ u32 *physical_address)
++{
++ void *va_mem = NULL;
++ dma_addr_t pa_mem;
++
++ if (byte_size > 0) {
++ if (ext_phys_mem_pool_enabled) {
++ va_mem = mem_ext_phys_mem_alloc(byte_size, align_mask,
++ (u32 *) &pa_mem);
++ } else
++ va_mem = dma_alloc_coherent(NULL, byte_size, &pa_mem,
++ GFP_KERNEL);
++ if (va_mem == NULL)
++ *physical_address = 0;
++ else
++ *physical_address = pa_mem;
++ }
++ return va_mem;
++}
++
++/*
++ * ======== mem_free_phys_mem ========
++ * Purpose:
++ * Free the given block of physically contiguous memory.
++ */
++void mem_free_phys_mem(void *virtual_address, u32 physical_address,
++ u32 byte_size)
++{
++ DBC_REQUIRE(virtual_address != NULL);
++
++ if (!ext_phys_mem_pool_enabled)
++ dma_free_coherent(NULL, byte_size, virtual_address,
++ physical_address);
++}
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/rmgr/drv_interface.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/rmgr/drv_interface.c 2010-08-18 11:24:23.218052787 +0300
+@@ -0,0 +1,656 @@
++/*
++ * drv_interface.c
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * DSP/BIOS Bridge driver interface.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++/* ----------------------------------- Host OS */
++
++#include <dspbridge/host_os.h>
++#include <linux/types.h>
++#include <linux/platform_device.h>
++#include <linux/pm.h>
++
++#ifdef MODULE
++#include <linux/module.h>
++#endif
++
++#include <linux/device.h>
++#include <linux/init.h>
++#include <linux/moduleparam.h>
++#include <linux/cdev.h>
++
++/* ----------------------------------- DSP/BIOS Bridge */
++#include <dspbridge/dbdefs.h>
++
++/* ----------------------------------- Trace & Debug */
++#include <dspbridge/dbc.h>
++
++/* ----------------------------------- OS Adaptation Layer */
++#include <dspbridge/services.h>
++#include <dspbridge/clk.h>
++#include <dspbridge/sync.h>
++
++/* ----------------------------------- Platform Manager */
++#include <dspbridge/dspapi-ioctl.h>
++#include <dspbridge/dspapi.h>
++#include <dspbridge/dspdrv.h>
++
++/* ----------------------------------- Resource Manager */
++#include <dspbridge/pwr.h>
++
++/* ----------------------------------- This */
++#include <drv_interface.h>
++
++#include <dspbridge/cfg.h>
++#include <dspbridge/resourcecleanup.h>
++#include <dspbridge/chnl.h>
++#include <dspbridge/proc.h>
++#include <dspbridge/dev.h>
++#include <dspbridge/drvdefs.h>
++#include <dspbridge/drv.h>
++
++#ifdef CONFIG_TIDSPBRIDGE_DVFS
++#include <mach-omap2/omap3-opp.h>
++#endif
++
++#define BRIDGE_NAME "C6410"
++/* ----------------------------------- Globals */
++#define DRIVER_NAME "DspBridge"
++#define DSPBRIDGE_VERSION "0.3"
++s32 dsp_debug;
++
++struct platform_device *omap_dspbridge_dev;
++struct device *bridge;
++
++/* This is a test variable used by Bridge to test different sleep states */
++s32 dsp_test_sleepstate;
++
++static struct cdev bridge_cdev;
++
++static struct class *bridge_class;
++
++static u32 driver_context;
++static s32 driver_major;
++static char *base_img;
++char *iva_img;
++static s32 shm_size = 0x500000; /* 5 MB */
++static int tc_wordswapon; /* Default value is always false */
++#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
++#define REC_TIMEOUT 5000 /*recovery timeout in msecs */
++static atomic_t bridge_cref; /* number of bridge open handles */
++static struct workqueue_struct *bridge_rec_queue;
++static struct work_struct bridge_recovery_work;
++static DECLARE_COMPLETION(bridge_comp);
++static DECLARE_COMPLETION(bridge_open_comp);
++static bool recover;
++#endif
++
++#ifdef CONFIG_PM
++struct omap34_xx_bridge_suspend_data {
++ int suspended;
++ wait_queue_head_t suspend_wq;
++};
++
++static struct omap34_xx_bridge_suspend_data bridge_suspend_data;
++
++static int omap34_xxbridge_suspend_lockout(struct omap34_xx_bridge_suspend_data
++ *s, struct file *f)
++{
++ if ((s)->suspended) {
++ if ((f)->f_flags & O_NONBLOCK)
++ return -EPERM;
++ wait_event_interruptible((s)->suspend_wq, (s)->suspended == 0);
++ }
++ return 0;
++}
++#endif
++
++module_param(dsp_debug, int, 0);
++MODULE_PARM_DESC(dsp_debug, "Wait after loading DSP image. default = false");
++
++module_param(dsp_test_sleepstate, int, 0);
++MODULE_PARM_DESC(dsp_test_sleepstate, "DSP Sleep state = 0");
++
++module_param(base_img, charp, 0);
++MODULE_PARM_DESC(base_img, "DSP base image, default = NULL");
++
++module_param(shm_size, int, 0);
++MODULE_PARM_DESC(shm_size, "shm size, default = 4 MB, minimum = 64 KB");
++
++module_param(tc_wordswapon, int, 0);
++MODULE_PARM_DESC(tc_wordswapon, "TC Word Swap Option. default = 0");
++
++MODULE_AUTHOR("Texas Instruments");
++MODULE_LICENSE("GPL");
++MODULE_VERSION(DSPBRIDGE_VERSION);
++
++static char *driver_name = DRIVER_NAME;
++
++static const struct file_operations bridge_fops = {
++ .open = bridge_open,
++ .release = bridge_release,
++ .unlocked_ioctl = bridge_ioctl,
++ .mmap = bridge_mmap,
++};
++
++#ifdef CONFIG_PM
++static u32 time_out = 1000;
++#ifdef CONFIG_TIDSPBRIDGE_DVFS
++s32 dsp_max_opps = VDD1_OPP5;
++#endif
++
++/* Maximum Opps that can be requested by IVA */
++/*vdd1 rate table */
++#ifdef CONFIG_TIDSPBRIDGE_DVFS
++const struct omap_opp vdd1_rate_table_bridge[] = {
++ {0, 0, 0},
++ /*OPP1 */
++ {S125M, VDD1_OPP1, 0},
++ /*OPP2 */
++ {S250M, VDD1_OPP2, 0},
++ /*OPP3 */
++ {S500M, VDD1_OPP3, 0},
++ /*OPP4 */
++ {S550M, VDD1_OPP4, 0},
++ /*OPP5 */
++ {S600M, VDD1_OPP5, 0},
++};
++#endif
++#endif
++
++struct dspbridge_platform_data *omap_dspbridge_pdata;
++
++u32 vdd1_dsp_freq[6][4] = {
++ {0, 0, 0, 0},
++ /*OPP1 */
++ {0, 90000, 0, 86000},
++ /*OPP2 */
++ {0, 180000, 80000, 170000},
++ /*OPP3 */
++ {0, 360000, 160000, 340000},
++ /*OPP4 */
++ {0, 396000, 325000, 376000},
++ /*OPP5 */
++ {0, 430000, 355000, 430000},
++};
++
++#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
++static void bridge_recover(struct work_struct *work)
++{
++ struct dev_object *dev;
++ struct cfg_devnode *dev_node;
++ if (atomic_read(&bridge_cref)) {
++ INIT_COMPLETION(bridge_comp);
++ while (!wait_for_completion_timeout(&bridge_comp,
++ msecs_to_jiffies(REC_TIMEOUT)))
++ pr_info("%s:%d handle(s) still opened\n",
++ __func__, atomic_read(&bridge_cref));
++ }
++ dev = dev_get_first();
++ dev_get_dev_node(dev, &dev_node);
++ if (!dev_node || proc_auto_start(dev_node, dev))
++ pr_err("DSP could not be restarted\n");
++ recover = false;
++ complete_all(&bridge_open_comp);
++}
++
++void bridge_recover_schedule(void)
++{
++ INIT_COMPLETION(bridge_open_comp);
++ recover = true;
++ queue_work(bridge_rec_queue, &bridge_recovery_work);
++}
++#endif
++#ifdef CONFIG_TIDSPBRIDGE_DVFS
++static int dspbridge_scale_notification(struct notifier_block *op,
++ unsigned long val, void *ptr)
++{
++ struct dspbridge_platform_data *pdata =
++ omap_dspbridge_dev->dev.platform_data;
++
++ if (CPUFREQ_POSTCHANGE == val && pdata->dsp_get_opp)
++ pwr_pm_post_scale(PRCM_VDD1, pdata->dsp_get_opp());
++
++ return 0;
++}
++
++static struct notifier_block iva_clk_notifier = {
++ .notifier_call = dspbridge_scale_notification,
++ NULL,
++};
++#endif
++
++/**
++ * omap3_bridge_startup() - perform low lever initializations
++ * @pdev: pointer to platform device
++ *
++ * Initializes recovery, PM and DVFS required data, before calling
++ * clk and memory init routines.
++ */
++static int omap3_bridge_startup(struct platform_device *pdev)
++{
++ struct dspbridge_platform_data *pdata = pdev->dev.platform_data;
++ struct drv_data *drv_datap = NULL;
++ u32 phys_membase, phys_memsize;
++ int err;
++
++#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
++ bridge_rec_queue = create_workqueue("bridge_rec_queue");
++ INIT_WORK(&bridge_recovery_work, bridge_recover);
++ INIT_COMPLETION(bridge_comp);
++#endif
++
++#ifdef CONFIG_PM
++ /* Initialize the wait queue */
++ bridge_suspend_data.suspended = 0;
++ init_waitqueue_head(&bridge_suspend_data.suspend_wq);
++
++#ifdef CONFIG_TIDSPBRIDGE_DVFS
++ for (i = 0; i < 6; i++)
++ pdata->mpu_speed[i] = vdd1_rate_table_bridge[i].rate;
++
++ err = cpufreq_register_notifier(&iva_clk_notifier,
++ CPUFREQ_TRANSITION_NOTIFIER);
++ if (err)
++ pr_err("%s: clk_notifier_register failed for iva2_ck\n",
++ __func__);
++#endif
++#endif
++
++ dsp_clk_init();
++ services_init();
++
++ drv_datap = kzalloc(sizeof(struct drv_data), GFP_KERNEL);
++ if (!drv_datap) {
++ err = -ENOMEM;
++ goto err1;
++ }
++
++ drv_datap->shm_size = shm_size;
++ drv_datap->tc_wordswapon = tc_wordswapon;
++
++ if (base_img) {
++ drv_datap->base_img = kmalloc(strlen(base_img) + 1, GFP_KERNEL);
++ if (!drv_datap->base_img) {
++ err = -ENOMEM;
++ goto err2;
++ }
++ strncpy(drv_datap->base_img, base_img, strlen(base_img) + 1);
++ }
++
++ dev_set_drvdata(bridge, drv_datap);
++
++ if (shm_size < 0x10000) { /* 64 KB */
++ err = -EINVAL;
++ pr_err("%s: shm size must be at least 64 KB\n", __func__);
++ goto err3;
++ }
++ dev_dbg(bridge, "%s: requested shm_size = 0x%x\n", __func__, shm_size);
++
++ phys_membase = pdata->phys_mempool_base;
++ phys_memsize = pdata->phys_mempool_size;
++ if (phys_membase > 0 && phys_memsize > 0)
++ mem_ext_phys_pool_init(phys_membase, phys_memsize);
++
++ if (tc_wordswapon)
++ dev_dbg(bridge, "%s: TC Word Swap is enabled\n", __func__);
++
++ driver_context = dsp_init(&err);
++ if (err) {
++ pr_err("DSP Bridge driver initialization failed\n");
++ goto err4;
++ }
++
++ return 0;
++
++err4:
++ mem_ext_phys_pool_release();
++err3:
++ kfree(drv_datap->base_img);
++err2:
++ kfree(drv_datap);
++err1:
++#ifdef CONFIG_TIDSPBRIDGE_DVFS
++ cpufreq_unregister_notifier(&iva_clk_notifier,
++ CPUFREQ_TRANSITION_NOTIFIER);
++#endif
++ dsp_clk_exit();
++ services_exit();
++
++ return err;
++}
++
++static int __devinit omap34_xx_bridge_probe(struct platform_device *pdev)
++{
++ int err;
++ dev_t dev = 0;
++#ifdef CONFIG_TIDSPBRIDGE_DVFS
++ int i = 0;
++#endif
++
++ omap_dspbridge_dev = pdev;
++
++ /* Global bridge device */
++ bridge = &omap_dspbridge_dev->dev;
++
++ /* Bridge low level initializations */
++ err = omap3_bridge_startup(pdev);
++ if (err)
++ goto err1;
++
++ /* use 2.6 device model */
++ err = alloc_chrdev_region(&dev, 0, 1, driver_name);
++ if (err) {
++ pr_err("%s: Can't get major %d\n", __func__, driver_major);
++ goto err1;
++ }
++
++ cdev_init(&bridge_cdev, &bridge_fops);
++ bridge_cdev.owner = THIS_MODULE;
++
++ err = cdev_add(&bridge_cdev, dev, 1);
++ if (err) {
++ pr_err("%s: Failed to add bridge device\n", __func__);
++ goto err2;
++ }
++
++ /* udev support */
++ bridge_class = class_create(THIS_MODULE, "ti_bridge");
++ if (IS_ERR(bridge_class)) {
++ pr_err("%s: Error creating bridge class\n", __func__);
++ goto err3;
++ }
++
++ driver_major = MAJOR(dev);
++ device_create(bridge_class, NULL, MKDEV(driver_major, 0),
++ NULL, "DspBridge");
++ pr_info("DSP Bridge driver loaded\n");
++
++ return 0;
++
++err3:
++ cdev_del(&bridge_cdev);
++err2:
++ unregister_chrdev_region(dev, 1);
++err1:
++ return err;
++}
++
++static int __devexit omap34_xx_bridge_remove(struct platform_device *pdev)
++{
++ dev_t devno;
++ bool ret;
++ int status = 0;
++ void *hdrv_obj = NULL;
++
++ status = cfg_get_object((u32 *) &hdrv_obj, REG_DRV_OBJECT);
++ if (status)
++ goto func_cont;
++
++#ifdef CONFIG_TIDSPBRIDGE_DVFS
++ if (cpufreq_unregister_notifier(&iva_clk_notifier,
++ CPUFREQ_TRANSITION_NOTIFIER))
++ pr_err("%s: cpufreq_unregister_notifier failed for iva2_ck\n",
++ __func__);
++#endif /* #ifdef CONFIG_TIDSPBRIDGE_DVFS */
++
++ if (driver_context) {
++ /* Put the DSP in reset state */
++ ret = dsp_deinit(driver_context);
++ driver_context = 0;
++ DBC_ASSERT(ret == true);
++ }
++
++func_cont:
++ mem_ext_phys_pool_release();
++
++ dsp_clk_exit();
++ services_exit();
++
++ devno = MKDEV(driver_major, 0);
++ cdev_del(&bridge_cdev);
++ unregister_chrdev_region(devno, 1);
++ if (bridge_class) {
++ /* remove the device from sysfs */
++ device_destroy(bridge_class, MKDEV(driver_major, 0));
++ class_destroy(bridge_class);
++
++ }
++ return 0;
++}
++
++#ifdef CONFIG_PM
++static int BRIDGE_SUSPEND(struct platform_device *pdev, pm_message_t state)
++{
++ u32 status;
++ u32 command = PWR_EMERGENCYDEEPSLEEP;
++
++ status = pwr_sleep_dsp(command, time_out);
++ if (status)
++ return -1;
++
++ bridge_suspend_data.suspended = 1;
++ return 0;
++}
++
++static int BRIDGE_RESUME(struct platform_device *pdev)
++{
++ u32 status;
++
++ status = pwr_wake_dsp(time_out);
++ if (status)
++ return -1;
++
++ bridge_suspend_data.suspended = 0;
++ wake_up(&bridge_suspend_data.suspend_wq);
++ return 0;
++}
++#else
++#define BRIDGE_SUSPEND NULL
++#define BRIDGE_RESUME NULL
++#endif
++
++static struct platform_driver bridge_driver = {
++ .driver = {
++ .name = BRIDGE_NAME,
++ },
++ .probe = omap34_xx_bridge_probe,
++ .remove = __devexit_p(omap34_xx_bridge_remove),
++ .suspend = BRIDGE_SUSPEND,
++ .resume = BRIDGE_RESUME,
++};
++
++static int __init bridge_init(void)
++{
++ return platform_driver_register(&bridge_driver);
++}
++
++static void __exit bridge_exit(void)
++{
++ platform_driver_unregister(&bridge_driver);
++}
++
++/*
++ * This function is called when an application opens handle to the
++ * bridge driver.
++ */
++static int bridge_open(struct inode *ip, struct file *filp)
++{
++ int status = 0;
++ struct process_context *pr_ctxt = NULL;
++
++ /*
++ * Allocate a new process context and insert it into global
++ * process context list.
++ */
++
++#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
++ if (recover) {
++ if (filp->f_flags & O_NONBLOCK ||
++ wait_for_completion_interruptible(&bridge_open_comp))
++ return -EBUSY;
++ }
++#endif
++ pr_ctxt = kzalloc(sizeof(struct process_context), GFP_KERNEL);
++ if (pr_ctxt) {
++ pr_ctxt->res_state = PROC_RES_ALLOCATED;
++ spin_lock_init(&pr_ctxt->dmm_map_lock);
++ INIT_LIST_HEAD(&pr_ctxt->dmm_map_list);
++ spin_lock_init(&pr_ctxt->dmm_rsv_lock);
++ INIT_LIST_HEAD(&pr_ctxt->dmm_rsv_list);
++
++ pr_ctxt->node_id = kzalloc(sizeof(struct idr), GFP_KERNEL);
++ if (pr_ctxt->node_id) {
++ idr_init(pr_ctxt->node_id);
++ } else {
++ status = -ENOMEM;
++ goto err;
++ }
++
++ pr_ctxt->stream_id = kzalloc(sizeof(struct idr), GFP_KERNEL);
++ if (pr_ctxt->stream_id)
++ idr_init(pr_ctxt->stream_id);
++ else
++ status = -ENOMEM;
++ } else {
++ status = -ENOMEM;
++ }
++err:
++ filp->private_data = pr_ctxt;
++#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
++ if (!status)
++ atomic_inc(&bridge_cref);
++#endif
++ return status;
++}
++
++/*
++ * This function is called when an application closes handle to the bridge
++ * driver.
++ */
++static int bridge_release(struct inode *ip, struct file *filp)
++{
++ int status = 0;
++ struct process_context *pr_ctxt;
++
++ if (!filp->private_data) {
++ status = -EIO;
++ goto err;
++ }
++
++ pr_ctxt = filp->private_data;
++ flush_signals(current);
++ drv_remove_all_resources(pr_ctxt);
++ proc_detach(pr_ctxt);
++ kfree(pr_ctxt);
++
++ filp->private_data = NULL;
++
++err:
++#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
++ if (!atomic_dec_return(&bridge_cref))
++ complete(&bridge_comp);
++#endif
++ return status;
++}
++
++/* This function provides IO interface to the bridge driver. */
++static long bridge_ioctl(struct file *filp, unsigned int code,
++ unsigned long args)
++{
++ int status;
++ u32 retval = 0;
++ union trapped_args buf_in;
++
++ DBC_REQUIRE(filp != NULL);
++#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
++ if (recover) {
++ status = -EIO;
++ goto err;
++ }
++#endif
++#ifdef CONFIG_PM
++ status = omap34_xxbridge_suspend_lockout(&bridge_suspend_data, filp);
++ if (status != 0)
++ return status;
++#endif
++
++ if (!filp->private_data) {
++ status = -EIO;
++ goto err;
++ }
++
++ status = copy_from_user(&buf_in, (union trapped_args *)args,
++ sizeof(union trapped_args));
++
++ if (!status) {
++ status = api_call_dev_ioctl(code, &buf_in, &retval,
++ filp->private_data);
++
++ if (!status) {
++ status = retval;
++ } else {
++ dev_dbg(bridge, "%s: IOCTL Failed, code: 0x%x "
++ "status 0x%x\n", __func__, code, status);
++ status = -1;
++ }
++
++ }
++
++err:
++ return status;
++}
++
++/* This function maps kernel space memory to user space memory. */
++static int bridge_mmap(struct file *filp, struct vm_area_struct *vma)
++{
++ u32 offset = vma->vm_pgoff << PAGE_SHIFT;
++ u32 status;
++
++ DBC_ASSERT(vma->vm_start < vma->vm_end);
++
++ vma->vm_flags |= VM_RESERVED | VM_IO;
++ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
++
++ dev_dbg(bridge, "%s: vm filp %p offset %x start %lx end %lx page_prot "
++ "%lx flags %lx\n", __func__, filp, offset,
++ vma->vm_start, vma->vm_end, vma->vm_page_prot, vma->vm_flags);
++
++ status = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
++ vma->vm_end - vma->vm_start,
++ vma->vm_page_prot);
++ if (status != 0)
++ status = -EAGAIN;
++
++ return status;
++}
++
++/* To remove all process resources before removing the process from the
++ * process context list */
++int drv_remove_all_resources(void *process_ctxt)
++{
++ int status = 0;
++ struct process_context *ctxt = (struct process_context *)process_ctxt;
++ drv_remove_all_strm_res_elements(ctxt);
++ drv_remove_all_node_res_elements(ctxt);
++ drv_remove_all_dmm_res_elements(ctxt);
++ ctxt->res_state = PROC_RES_FREED;
++ return status;
++}
++
++/* Bridge driver initialization and de-initialization functions */
++module_init(bridge_init);
++module_exit(bridge_exit);
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/rmgr/drv_interface.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/rmgr/drv_interface.h 2010-08-18 11:24:23.218052787 +0300
+@@ -0,0 +1,28 @@
++/*
++ * drv_interface.h
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#ifndef _DRV_INTERFACE_H_
++#define _DRV_INTERFACE_H_
++
++/* Prototypes for all functions in this bridge */
++static int __init bridge_init(void); /* Initialize bridge */
++static void __exit bridge_exit(void); /* Opposite of initialize */
++static int bridge_open(struct inode *ip, struct file *filp); /* Open */
++static int bridge_release(struct inode *ip, struct file *filp); /* Release */
++static long bridge_ioctl(struct file *filp, unsigned int code,
++ unsigned long args);
++static int bridge_mmap(struct file *filp, struct vm_area_struct *vma);
++#endif /* ifndef _DRV_INTERFACE_H_ */
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/rmgr/dspdrv.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/rmgr/dspdrv.c 2010-08-18 11:24:23.218052787 +0300
+@@ -0,0 +1,142 @@
++/*
++ * dspdrv.c
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Interface to allocate and free bridge resources.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++/* ----------------------------------- Host OS */
++#include <linux/types.h>
++#include <dspbridge/host_os.h>
++
++/* ----------------------------------- DSP/BIOS Bridge */
++#include <dspbridge/dbdefs.h>
++
++/* ----------------------------------- Trace & Debug */
++#include <dspbridge/dbc.h>
++
++/* ----------------------------------- OS Adaptation Layer */
++#include <dspbridge/cfg.h>
++
++/* ----------------------------------- Platform Manager */
++#include <dspbridge/drv.h>
++#include <dspbridge/dev.h>
++#include <dspbridge/dspapi.h>
++
++/* ----------------------------------- Resource Manager */
++#include <dspbridge/mgr.h>
++
++/* ----------------------------------- This */
++#include <dspbridge/dspdrv.h>
++
++/*
++ * ======== dsp_init ========
++ * Allocates bridge resources. Loads a base image onto DSP, if specified.
++ */
++u32 dsp_init(u32 *init_status)
++{
++ char dev_node[MAXREGPATHLENGTH] = "TIOMAP1510";
++ int status = -EPERM;
++ struct drv_object *drv_obj = NULL;
++ u32 device_node;
++ u32 device_node_string;
++
++ if (!api_init())
++ goto func_cont;
++
++ status = drv_create(&drv_obj);
++ if (status) {
++ api_exit();
++ goto func_cont;
++ }
++
++ /* End drv_create */
++ /* Request Resources */
++ status = drv_request_resources((u32) &dev_node, &device_node_string);
++ if (!status) {
++ /* Attempt to Start the Device */
++ status = dev_start_device((struct cfg_devnode *)
++ device_node_string);
++ if (status)
++ (void)drv_release_resources
++ ((u32) device_node_string, drv_obj);
++ } else {
++ dev_dbg(bridge, "%s: drv_request_resources Failed\n", __func__);
++ status = -EPERM;
++ }
++
++ /* Unwind whatever was loaded */
++ if (status) {
++ /* irrespective of the status of dev_remove_device we conitinue
++ * unloading. Get the Driver Object iterate through and remove.
++ * Reset the status to E_FAIL to avoid going through
++ * api_init_complete2. */
++ for (device_node = drv_get_first_dev_extension();
++ device_node != 0;
++ device_node = drv_get_next_dev_extension(device_node)) {
++ (void)dev_remove_device((struct cfg_devnode *)
++ device_node);
++ (void)drv_release_resources((u32) device_node, drv_obj);
++ }
++ /* Remove the Driver Object */
++ (void)drv_destroy(drv_obj);
++ drv_obj = NULL;
++ api_exit();
++ dev_dbg(bridge, "%s: Logical device failed init\n", __func__);
++ } /* Unwinding the loaded drivers */
++func_cont:
++ /* Attempt to Start the Board */
++ if (!status) {
++ /* BRD_AutoStart could fail if the dsp execuetable is not the
++ * correct one. We should not propagate that error
++ * into the device loader. */
++ (void)api_init_complete2();
++ } else {
++ dev_dbg(bridge, "%s: Failed\n", __func__);
++ } /* End api_init_complete2 */
++ DBC_ENSURE((!status && drv_obj != NULL) ||
++ (status && drv_obj == NULL));
++ *init_status = status;
++ /* Return the Driver Object */
++ return (u32) drv_obj;
++}
++
++/*
++ * ======== dsp_deinit ========
++ * Frees the resources allocated for bridge.
++ */
++bool dsp_deinit(u32 device_context)
++{
++ bool ret = true;
++ u32 device_node;
++ struct mgr_object *mgr_obj = NULL;
++
++ while ((device_node = drv_get_first_dev_extension()) != 0) {
++ (void)dev_remove_device((struct cfg_devnode *)device_node);
++
++ (void)drv_release_resources((u32) device_node,
++ (struct drv_object *)device_context);
++ }
++
++ (void)drv_destroy((struct drv_object *)device_context);
++
++ /* Get the Manager Object from Registry
++ * MGR Destroy will unload the DCD dll */
++ if (!cfg_get_object((u32 *) &mgr_obj, REG_MGR_OBJECT))
++ (void)mgr_destroy(mgr_obj);
++
++ api_exit();
++
++ return ret;
++}
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/rmgr/mgr.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/rmgr/mgr.c 2010-08-18 11:24:23.218052787 +0300
+@@ -0,0 +1,375 @@
++/*
++ * mgr.c
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Implementation of Manager interface to the device object at the
++ * driver level. This queries the NDB data base and retrieves the
++ * data about Node and Processor.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#include <linux/types.h>
++
++/* ----------------------------------- DSP/BIOS Bridge */
++#include <dspbridge/dbdefs.h>
++
++/* ----------------------------------- Trace & Debug */
++#include <dspbridge/dbc.h>
++
++/* ----------------------------------- OS Adaptation Layer */
++#include <dspbridge/cfg.h>
++#include <dspbridge/sync.h>
++
++/* ----------------------------------- Others */
++#include <dspbridge/dbdcd.h>
++#include <dspbridge/drv.h>
++#include <dspbridge/dev.h>
++
++/* ----------------------------------- This */
++#include <dspbridge/mgr.h>
++
++/* ----------------------------------- Defines, Data Structures, Typedefs */
++#define ZLDLLNAME ""
++
++struct mgr_object {
++ struct dcd_manager *hdcd_mgr; /* Proc/Node data manager */
++};
++
++/* ----------------------------------- Globals */
++static u32 refs;
++
++/*
++ * ========= mgr_create =========
++ * Purpose:
++ * MGR Object gets created only once during driver Loading.
++ */
++int mgr_create(struct mgr_object **mgr_obj,
++ struct cfg_devnode *dev_node_obj)
++{
++ int status = 0;
++ struct mgr_object *pmgr_obj = NULL;
++
++ DBC_REQUIRE(mgr_obj != NULL);
++ DBC_REQUIRE(refs > 0);
++
++ pmgr_obj = kzalloc(sizeof(struct mgr_object), GFP_KERNEL);
++ if (pmgr_obj) {
++ status = dcd_create_manager(ZLDLLNAME, &pmgr_obj->hdcd_mgr);
++ if (!status) {
++ /* If succeeded store the handle in the MGR Object */
++ status = cfg_set_object((u32) pmgr_obj, REG_MGR_OBJECT);
++ if (!status) {
++ *mgr_obj = pmgr_obj;
++ } else {
++ dcd_destroy_manager(pmgr_obj->hdcd_mgr);
++ kfree(pmgr_obj);
++ }
++ } else {
++ /* failed to Create DCD Manager */
++ kfree(pmgr_obj);
++ }
++ } else {
++ status = -ENOMEM;
++ }
++
++ DBC_ENSURE(status || pmgr_obj);
++ return status;
++}
++
++/*
++ * ========= mgr_destroy =========
++ * This function is invoked during bridge driver unloading.Frees MGR object.
++ */
++int mgr_destroy(struct mgr_object *hmgr_obj)
++{
++ int status = 0;
++ struct mgr_object *pmgr_obj = (struct mgr_object *)hmgr_obj;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(hmgr_obj);
++
++ /* Free resources */
++ if (hmgr_obj->hdcd_mgr)
++ dcd_destroy_manager(hmgr_obj->hdcd_mgr);
++
++ kfree(pmgr_obj);
++ /* Update the Registry with NULL for MGR Object */
++ (void)cfg_set_object(0, REG_MGR_OBJECT);
++
++ return status;
++}
++
++/*
++ * ======== mgr_enum_node_info ========
++ * Enumerate and get configuration information about nodes configured
++ * in the node database.
++ */
++int mgr_enum_node_info(u32 node_id, struct dsp_ndbprops *pndb_props,
++ u32 undb_props_size, u32 *pu_num_nodes)
++{
++ int status = 0;
++ struct dsp_uuid node_uuid, temp_uuid;
++ u32 temp_index = 0;
++ u32 node_index = 0;
++ struct dcd_genericobj gen_obj;
++ struct mgr_object *pmgr_obj = NULL;
++
++ DBC_REQUIRE(pndb_props != NULL);
++ DBC_REQUIRE(pu_num_nodes != NULL);
++ DBC_REQUIRE(undb_props_size >= sizeof(struct dsp_ndbprops));
++ DBC_REQUIRE(refs > 0);
++
++ *pu_num_nodes = 0;
++ /* Get The Manager Object from the Registry */
++ status = cfg_get_object((u32 *) &pmgr_obj, REG_MGR_OBJECT);
++ if (status)
++ goto func_cont;
++
++ DBC_ASSERT(pmgr_obj);
++ /* Forever loop till we hit failed or no more items in the
++ * Enumeration. We will exit the loop other than 0; */
++ while (status == 0) {
++ status = dcd_enumerate_object(temp_index++, DSP_DCDNODETYPE,
++ &temp_uuid);
++ if (status == 0) {
++ node_index++;
++ if (node_id == (node_index - 1))
++ node_uuid = temp_uuid;
++
++ }
++ }
++ if (!status) {
++ if (node_id > (node_index - 1)) {
++ status = -EINVAL;
++ } else {
++ status = dcd_get_object_def(pmgr_obj->hdcd_mgr,
++ (struct dsp_uuid *)
++ &node_uuid, DSP_DCDNODETYPE,
++ &gen_obj);
++ if (!status) {
++ /* Get the Obj def */
++ *pndb_props =
++ gen_obj.obj_data.node_obj.ndb_props;
++ *pu_num_nodes = node_index;
++ }
++ }
++ }
++
++func_cont:
++ DBC_ENSURE((!status && *pu_num_nodes > 0) ||
++ (status && *pu_num_nodes == 0));
++
++ return status;
++}
++
++/*
++ * ======== mgr_enum_processor_info ========
++ * Enumerate and get configuration information about available
++ * DSP processors.
++ */
++int mgr_enum_processor_info(u32 processor_id,
++ struct dsp_processorinfo *
++ processor_info, u32 processor_info_size,
++ u8 *pu_num_procs)
++{
++ int status = 0;
++ int status1 = 0;
++ int status2 = 0;
++ struct dsp_uuid temp_uuid;
++ u32 temp_index = 0;
++ u32 proc_index = 0;
++ struct dcd_genericobj gen_obj;
++ struct mgr_object *pmgr_obj = NULL;
++ struct mgr_processorextinfo *ext_info;
++ struct dev_object *hdev_obj;
++ struct drv_object *hdrv_obj;
++ u8 dev_type;
++ struct cfg_devnode *dev_node;
++ bool proc_detect = false;
++
++ DBC_REQUIRE(processor_info != NULL);
++ DBC_REQUIRE(pu_num_procs != NULL);
++ DBC_REQUIRE(processor_info_size >= sizeof(struct dsp_processorinfo));
++ DBC_REQUIRE(refs > 0);
++
++ *pu_num_procs = 0;
++ status = cfg_get_object((u32 *) &hdrv_obj, REG_DRV_OBJECT);
++ if (!status) {
++ status = drv_get_dev_object(processor_id, hdrv_obj, &hdev_obj);
++ if (!status) {
++ status = dev_get_dev_type(hdev_obj, (u8 *) &dev_type);
++ status = dev_get_dev_node(hdev_obj, &dev_node);
++ if (dev_type != DSP_UNIT)
++ status = -EPERM;
++
++ if (!status)
++ processor_info->processor_type = DSPTYPE64;
++ }
++ }
++ if (status)
++ goto func_end;
++
++ /* Get The Manager Object from the Registry */
++ if (cfg_get_object((u32 *) &pmgr_obj, REG_MGR_OBJECT)) {
++ dev_dbg(bridge, "%s: Failed to get MGR Object\n", __func__);
++ goto func_end;
++ }
++ DBC_ASSERT(pmgr_obj);
++ /* Forever loop till we hit no more items in the
++ * Enumeration. We will exit the loop other than 0; */
++ while (status1 == 0) {
++ status1 = dcd_enumerate_object(temp_index++,
++ DSP_DCDPROCESSORTYPE,
++ &temp_uuid);
++ if (status1 != 0)
++ break;
++
++ proc_index++;
++ /* Get the Object properties to find the Device/Processor
++ * Type */
++ if (proc_detect != false)
++ continue;
++
++ status2 = dcd_get_object_def(pmgr_obj->hdcd_mgr,
++ (struct dsp_uuid *)&temp_uuid,
++ DSP_DCDPROCESSORTYPE, &gen_obj);
++ if (!status2) {
++ /* Get the Obj def */
++ if (processor_info_size <
++ sizeof(struct mgr_processorextinfo)) {
++ *processor_info = gen_obj.obj_data.proc_info;
++ } else {
++ /* extended info */
++ ext_info = (struct mgr_processorextinfo *)
++ processor_info;
++ *ext_info = gen_obj.obj_data.ext_proc_obj;
++ }
++ dev_dbg(bridge, "%s: Got proctype from DCD %x\n",
++ __func__, processor_info->processor_type);
++ /* See if we got the needed processor */
++ if (dev_type == DSP_UNIT) {
++ if (processor_info->processor_type ==
++ DSPPROCTYPE_C64)
++ proc_detect = true;
++ } else if (dev_type == IVA_UNIT) {
++ if (processor_info->processor_type ==
++ IVAPROCTYPE_ARM7)
++ proc_detect = true;
++ }
++ /* User applciatiuons aonly check for chip type, so
++ * this clumsy overwrite */
++ processor_info->processor_type = DSPTYPE64;
++ } else {
++ dev_dbg(bridge, "%s: Failed to get DCD processor info "
++ "%x\n", __func__, status2);
++ status = -EPERM;
++ }
++ }
++ *pu_num_procs = proc_index;
++ if (proc_detect == false) {
++ dev_dbg(bridge, "%s: Failed to get proc info from DCD, so use "
++ "CFG registry\n", __func__);
++ processor_info->processor_type = DSPTYPE64;
++ }
++func_end:
++ return status;
++}
++
++/*
++ * ======== mgr_exit ========
++ * Decrement reference count, and free resources when reference count is
++ * 0.
++ */
++void mgr_exit(void)
++{
++ DBC_REQUIRE(refs > 0);
++ refs--;
++ if (refs == 0)
++ dcd_exit();
++
++ DBC_ENSURE(refs >= 0);
++}
++
++/*
++ * ======== mgr_get_dcd_handle ========
++ * Retrieves the MGR handle. Accessor Function.
++ */
++int mgr_get_dcd_handle(struct mgr_object *mgr_handle,
++ u32 *dcd_handle)
++{
++ int status = -EPERM;
++ struct mgr_object *pmgr_obj = (struct mgr_object *)mgr_handle;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(dcd_handle != NULL);
++
++ *dcd_handle = (u32) NULL;
++ if (pmgr_obj) {
++ *dcd_handle = (u32) pmgr_obj->hdcd_mgr;
++ status = 0;
++ }
++ DBC_ENSURE((!status && *dcd_handle != (u32) NULL) ||
++ (status && *dcd_handle == (u32) NULL));
++
++ return status;
++}
++
++/*
++ * ======== mgr_init ========
++ * Initialize MGR's private state, keeping a reference count on each call.
++ */
++bool mgr_init(void)
++{
++ bool ret = true;
++ bool init_dcd = false;
++
++ DBC_REQUIRE(refs >= 0);
++
++ if (refs == 0) {
++ init_dcd = dcd_init(); /* DCD Module */
++
++ if (!init_dcd)
++ ret = false;
++ }
++
++ if (ret)
++ refs++;
++
++ DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
++
++ return ret;
++}
++
++/*
++ * ======== mgr_wait_for_bridge_events ========
++ * Block on any Bridge event(s)
++ */
++int mgr_wait_for_bridge_events(struct dsp_notification **anotifications,
++ u32 count, u32 *pu_index,
++ u32 utimeout)
++{
++ int status;
++ struct sync_object *sync_events[MAX_EVENTS];
++ u32 i;
++
++ DBC_REQUIRE(count < MAX_EVENTS);
++
++ for (i = 0; i < count; i++)
++ sync_events[i] = anotifications[i]->handle;
++
++ status = sync_wait_on_multiple_events(sync_events, count, utimeout,
++ pu_index);
++
++ return status;
++
++}
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/rmgr/nldr.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/rmgr/nldr.c 2010-08-18 11:24:23.222051758 +0300
+@@ -0,0 +1,1974 @@
++/*
++ * nldr.c
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * DSP/BIOS Bridge dynamic + overlay Node loader.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#include <linux/types.h>
++
++#include <dspbridge/host_os.h>
++
++#include <dspbridge/dbdefs.h>
++
++#include <dspbridge/dbc.h>
++
++/* Platform manager */
++#include <dspbridge/cod.h>
++#include <dspbridge/dev.h>
++
++/* Resource manager */
++#include <dspbridge/dbll.h>
++#include <dspbridge/dbdcd.h>
++#include <dspbridge/rmm.h>
++#include <dspbridge/uuidutil.h>
++
++#include <dspbridge/nldr.h>
++#include <linux/gcd.h>
++
++/* Name of section containing dynamic load mem */
++#define DYNMEMSECT ".dspbridge_mem"
++
++/* Name of section containing dependent library information */
++#define DEPLIBSECT ".dspbridge_deplibs"
++
++/* Max depth of recursion for loading node's dependent libraries */
++#define MAXDEPTH 5
++
++/* Max number of persistent libraries kept by a node */
++#define MAXLIBS 5
++
++/*
++ * Defines for extracting packed dynamic load memory requirements from two
++ * masks.
++ * These defines must match node.cdb and dynm.cdb
++ * Format of data/code mask is:
++ * uuuuuuuu|fueeeeee|fudddddd|fucccccc|
++ * where
++ * u = unused
++ * cccccc = prefered/required dynamic mem segid for create phase data/code
++ * dddddd = prefered/required dynamic mem segid for delete phase data/code
++ * eeeeee = prefered/req. dynamic mem segid for execute phase data/code
++ * f = flag indicating if memory is preferred or required:
++ * f = 1 if required, f = 0 if preferred.
++ *
++ * The 6 bits of the segid are interpreted as follows:
++ *
++ * If the 6th bit (bit 5) is not set, then this specifies a memory segment
++ * between 0 and 31 (a maximum of 32 dynamic loading memory segments).
++ * If the 6th bit (bit 5) is set, segid has the following interpretation:
++ * segid = 32 - Any internal memory segment can be used.
++ * segid = 33 - Any external memory segment can be used.
++ * segid = 63 - Any memory segment can be used (in this case the
++ * required/preferred flag is irrelevant).
++ *
++ */
++/* Maximum allowed dynamic loading memory segments */
++#define MAXMEMSEGS 32
++
++#define MAXSEGID 3 /* Largest possible (real) segid */
++#define MEMINTERNALID 32 /* Segid meaning use internal mem */
++#define MEMEXTERNALID 33 /* Segid meaning use external mem */
++#define NULLID 63 /* Segid meaning no memory req/pref */
++#define FLAGBIT 7 /* 7th bit is pref./req. flag */
++#define SEGMASK 0x3f /* Bits 0 - 5 */
++
++#define CREATEBIT 0 /* Create segid starts at bit 0 */
++#define DELETEBIT 8 /* Delete segid starts at bit 8 */
++#define EXECUTEBIT 16 /* Execute segid starts at bit 16 */
++
++/*
++ * Masks that define memory type. Must match defines in dynm.cdb.
++ */
++#define DYNM_CODE 0x2
++#define DYNM_DATA 0x4
++#define DYNM_CODEDATA (DYNM_CODE | DYNM_DATA)
++#define DYNM_INTERNAL 0x8
++#define DYNM_EXTERNAL 0x10
++
++/*
++ * Defines for packing memory requirement/preference flags for code and
++ * data of each of the node's phases into one mask.
++ * The bit is set if the segid is required for loading code/data of the
++ * given phase. The bit is not set, if the segid is preferred only.
++ *
++ * These defines are also used as indeces into a segid array for the node.
++ * eg node's segid[CREATEDATAFLAGBIT] is the memory segment id that the
++ * create phase data is required or preferred to be loaded into.
++ */
++#define CREATEDATAFLAGBIT 0
++#define CREATECODEFLAGBIT 1
++#define EXECUTEDATAFLAGBIT 2
++#define EXECUTECODEFLAGBIT 3
++#define DELETEDATAFLAGBIT 4
++#define DELETECODEFLAGBIT 5
++#define MAXFLAGS 6
++
++ /*
++ * These names may be embedded in overlay sections to identify which
++ * node phase the section should be overlayed.
++ */
++#define PCREATE "create"
++#define PDELETE "delete"
++#define PEXECUTE "execute"
++
++static inline bool is_equal_uuid(struct dsp_uuid *uuid1,
++ struct dsp_uuid *uuid2)
++{
++ return !memcmp(uuid1, uuid2, sizeof(struct dsp_uuid));
++}
++
++ /*
++ * ======== mem_seg_info ========
++ * Format of dynamic loading memory segment info in coff file.
++ * Must match dynm.h55.
++ */
++struct mem_seg_info {
++ u32 segid; /* Dynamic loading memory segment number */
++ u32 base;
++ u32 len;
++ u32 type; /* Mask of DYNM_CODE, DYNM_INTERNAL, etc. */
++};
++
++/*
++ * ======== lib_node ========
++ * For maintaining a tree of library dependencies.
++ */
++struct lib_node {
++ struct dbll_library_obj *lib; /* The library */
++ u16 dep_libs; /* Number of dependent libraries */
++ struct lib_node *dep_libs_tree; /* Dependent libraries of lib */
++};
++
++/*
++ * ======== ovly_sect ========
++ * Information needed to overlay a section.
++ */
++struct ovly_sect {
++ struct ovly_sect *next_sect;
++ u32 sect_load_addr; /* Load address of section */
++ u32 sect_run_addr; /* Run address of section */
++ u32 size; /* Size of section */
++ u16 page; /* DBL_CODE, DBL_DATA */
++};
++
++/*
++ * ======== ovly_node ========
++ * For maintaining a list of overlay nodes, with sections that need to be
++ * overlayed for each of the nodes phases.
++ */
++struct ovly_node {
++ struct dsp_uuid uuid;
++ char *node_name;
++ struct ovly_sect *create_sects_list;
++ struct ovly_sect *delete_sects_list;
++ struct ovly_sect *execute_sects_list;
++ struct ovly_sect *other_sects_list;
++ u16 create_sects;
++ u16 delete_sects;
++ u16 execute_sects;
++ u16 other_sects;
++ u16 create_ref;
++ u16 delete_ref;
++ u16 execute_ref;
++ u16 other_ref;
++};
++
++/*
++ * ======== nldr_object ========
++ * Overlay loader object.
++ */
++struct nldr_object {
++ struct dev_object *hdev_obj; /* Device object */
++ struct dcd_manager *hdcd_mgr; /* Proc/Node data manager */
++ struct dbll_tar_obj *dbll; /* The DBL loader */
++ struct dbll_library_obj *base_lib; /* Base image library */
++ struct rmm_target_obj *rmm; /* Remote memory manager for DSP */
++ struct dbll_fxns ldr_fxns; /* Loader function table */
++ struct dbll_attrs ldr_attrs; /* attrs to pass to loader functions */
++ nldr_ovlyfxn ovly_fxn; /* "write" for overlay nodes */
++ nldr_writefxn write_fxn; /* "write" for dynamic nodes */
++ struct ovly_node *ovly_table; /* Table of overlay nodes */
++ u16 ovly_nodes; /* Number of overlay nodes in base */
++ u16 ovly_nid; /* Index for tracking overlay nodes */
++ u16 dload_segs; /* Number of dynamic load mem segs */
++ u32 *seg_table; /* memtypes of dynamic memory segs
++ * indexed by segid
++ */
++ u16 us_dsp_mau_size; /* Size of DSP MAU */
++ u16 us_dsp_word_size; /* Size of DSP word */
++};
++
++/*
++ * ======== nldr_nodeobject ========
++ * Dynamic node object. This object is created when a node is allocated.
++ */
++struct nldr_nodeobject {
++ struct nldr_object *nldr_obj; /* Dynamic loader handle */
++ void *priv_ref; /* Handle to pass to dbl_write_fxn */
++ struct dsp_uuid uuid; /* Node's UUID */
++ bool dynamic; /* Dynamically loaded node? */
++ bool overlay; /* Overlay node? */
++ bool *pf_phase_split; /* Multiple phase libraries? */
++ struct lib_node root; /* Library containing node phase */
++ struct lib_node create_lib; /* Library with create phase lib */
++ struct lib_node execute_lib; /* Library with execute phase lib */
++ struct lib_node delete_lib; /* Library with delete phase lib */
++ /* libs remain loaded until Delete */
++ struct lib_node pers_lib_table[MAXLIBS];
++ s32 pers_libs; /* Number of persistent libraries */
++ /* Path in lib dependency tree */
++ struct dbll_library_obj *lib_path[MAXDEPTH + 1];
++ enum nldr_phase phase; /* Node phase currently being loaded */
++
++ /*
++ * Dynamic loading memory segments for data and code of each phase.
++ */
++ u16 seg_id[MAXFLAGS];
++
++ /*
++ * Mask indicating whether each mem segment specified in seg_id[]
++ * is preferred or required.
++ * For example
++ * if (code_data_flag_mask & (1 << EXECUTEDATAFLAGBIT)) != 0,
++ * then it is required to load execute phase data into the memory
++ * specified by seg_id[EXECUTEDATAFLAGBIT].
++ */
++ u32 code_data_flag_mask;
++};
++
++/* Dynamic loader function table */
++static struct dbll_fxns ldr_fxns = {
++ (dbll_close_fxn) dbll_close,
++ (dbll_create_fxn) dbll_create,
++ (dbll_delete_fxn) dbll_delete,
++ (dbll_exit_fxn) dbll_exit,
++ (dbll_get_attrs_fxn) dbll_get_attrs,
++ (dbll_get_addr_fxn) dbll_get_addr,
++ (dbll_get_c_addr_fxn) dbll_get_c_addr,
++ (dbll_get_sect_fxn) dbll_get_sect,
++ (dbll_init_fxn) dbll_init,
++ (dbll_load_fxn) dbll_load,
++ (dbll_load_sect_fxn) dbll_load_sect,
++ (dbll_open_fxn) dbll_open,
++ (dbll_read_sect_fxn) dbll_read_sect,
++ (dbll_set_attrs_fxn) dbll_set_attrs,
++ (dbll_unload_fxn) dbll_unload,
++ (dbll_unload_sect_fxn) dbll_unload_sect,
++};
++
++static u32 refs; /* module reference count */
++
++static int add_ovly_info(void *handle, struct dbll_sect_info *sect_info,
++ u32 addr, u32 bytes);
++static int add_ovly_node(struct dsp_uuid *uuid_obj,
++ enum dsp_dcdobjtype obj_type, void *handle);
++static int add_ovly_sect(struct nldr_object *nldr_obj,
++ struct ovly_sect **lst,
++ struct dbll_sect_info *sect_inf,
++ bool *exists, u32 addr, u32 bytes);
++static s32 fake_ovly_write(void *handle, u32 dsp_address, void *buf, u32 bytes,
++ s32 mtype);
++static void free_sects(struct nldr_object *nldr_obj,
++ struct ovly_sect *phase_sects, u16 alloc_num);
++static bool get_symbol_value(void *handle, void *parg, void *rmm_handle,
++ char *sym_name, struct dbll_sym_val **sym);
++static int load_lib(struct nldr_nodeobject *nldr_node_obj,
++ struct lib_node *root, struct dsp_uuid uuid,
++ bool root_prstnt,
++ struct dbll_library_obj **lib_path,
++ enum nldr_phase phase, u16 depth);
++static int load_ovly(struct nldr_nodeobject *nldr_node_obj,
++ enum nldr_phase phase);
++static int remote_alloc(void **ref, u16 mem_sect, u32 size,
++ u32 align, u32 *dsp_address,
++ s32 segmnt_id,
++ s32 req, bool reserve);
++static int remote_free(void **ref, u16 space, u32 dsp_address, u32 size,
++ bool reserve);
++
++static void unload_lib(struct nldr_nodeobject *nldr_node_obj,
++ struct lib_node *root);
++static void unload_ovly(struct nldr_nodeobject *nldr_node_obj,
++ enum nldr_phase phase);
++static bool find_in_persistent_lib_array(struct nldr_nodeobject *nldr_node_obj,
++ struct dbll_library_obj *lib);
++static u32 find_lcm(u32 a, u32 b);
++
++/*
++ * ======== nldr_allocate ========
++ */
++int nldr_allocate(struct nldr_object *nldr_obj, void *priv_ref,
++ const struct dcd_nodeprops *node_props,
++ struct nldr_nodeobject **nldr_nodeobj,
++ bool *pf_phase_split)
++{
++ struct nldr_nodeobject *nldr_node_obj = NULL;
++ int status = 0;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(node_props != NULL);
++ DBC_REQUIRE(nldr_nodeobj != NULL);
++ DBC_REQUIRE(nldr_obj);
++
++ /* Initialize handle in case of failure */
++ *nldr_nodeobj = NULL;
++ /* Allocate node object */
++ nldr_node_obj = kzalloc(sizeof(struct nldr_nodeobject), GFP_KERNEL);
++
++ if (nldr_node_obj == NULL) {
++ status = -ENOMEM;
++ } else {
++ nldr_node_obj->pf_phase_split = pf_phase_split;
++ nldr_node_obj->pers_libs = 0;
++ nldr_node_obj->nldr_obj = nldr_obj;
++ nldr_node_obj->priv_ref = priv_ref;
++ /* Save node's UUID. */
++ nldr_node_obj->uuid = node_props->ndb_props.ui_node_id;
++ /*
++ * Determine if node is a dynamically loaded node from
++ * ndb_props.
++ */
++ if (node_props->us_load_type == NLDR_DYNAMICLOAD) {
++ /* Dynamic node */
++ nldr_node_obj->dynamic = true;
++ /*
++ * Extract memory requirements from ndb_props masks
++ */
++ /* Create phase */
++ nldr_node_obj->seg_id[CREATEDATAFLAGBIT] = (u16)
++ (node_props->ul_data_mem_seg_mask >> CREATEBIT) &
++ SEGMASK;
++ nldr_node_obj->code_data_flag_mask |=
++ ((node_props->ul_data_mem_seg_mask >>
++ (CREATEBIT + FLAGBIT)) & 1) << CREATEDATAFLAGBIT;
++ nldr_node_obj->seg_id[CREATECODEFLAGBIT] = (u16)
++ (node_props->ul_code_mem_seg_mask >>
++ CREATEBIT) & SEGMASK;
++ nldr_node_obj->code_data_flag_mask |=
++ ((node_props->ul_code_mem_seg_mask >>
++ (CREATEBIT + FLAGBIT)) & 1) << CREATECODEFLAGBIT;
++ /* Execute phase */
++ nldr_node_obj->seg_id[EXECUTEDATAFLAGBIT] = (u16)
++ (node_props->ul_data_mem_seg_mask >>
++ EXECUTEBIT) & SEGMASK;
++ nldr_node_obj->code_data_flag_mask |=
++ ((node_props->ul_data_mem_seg_mask >>
++ (EXECUTEBIT + FLAGBIT)) & 1) <<
++ EXECUTEDATAFLAGBIT;
++ nldr_node_obj->seg_id[EXECUTECODEFLAGBIT] = (u16)
++ (node_props->ul_code_mem_seg_mask >>
++ EXECUTEBIT) & SEGMASK;
++ nldr_node_obj->code_data_flag_mask |=
++ ((node_props->ul_code_mem_seg_mask >>
++ (EXECUTEBIT + FLAGBIT)) & 1) <<
++ EXECUTECODEFLAGBIT;
++ /* Delete phase */
++ nldr_node_obj->seg_id[DELETEDATAFLAGBIT] = (u16)
++ (node_props->ul_data_mem_seg_mask >> DELETEBIT) &
++ SEGMASK;
++ nldr_node_obj->code_data_flag_mask |=
++ ((node_props->ul_data_mem_seg_mask >>
++ (DELETEBIT + FLAGBIT)) & 1) << DELETEDATAFLAGBIT;
++ nldr_node_obj->seg_id[DELETECODEFLAGBIT] = (u16)
++ (node_props->ul_code_mem_seg_mask >>
++ DELETEBIT) & SEGMASK;
++ nldr_node_obj->code_data_flag_mask |=
++ ((node_props->ul_code_mem_seg_mask >>
++ (DELETEBIT + FLAGBIT)) & 1) << DELETECODEFLAGBIT;
++ } else {
++ /* Non-dynamically loaded nodes are part of the
++ * base image */
++ nldr_node_obj->root.lib = nldr_obj->base_lib;
++ /* Check for overlay node */
++ if (node_props->us_load_type == NLDR_OVLYLOAD)
++ nldr_node_obj->overlay = true;
++
++ }
++ *nldr_nodeobj = (struct nldr_nodeobject *)nldr_node_obj;
++ }
++ /* Cleanup on failure */
++ if (status && nldr_node_obj)
++ kfree(nldr_node_obj);
++
++ DBC_ENSURE((!status && *nldr_nodeobj)
++ || (status && *nldr_nodeobj == NULL));
++ return status;
++}
++
++/*
++ * ======== nldr_create ========
++ */
++int nldr_create(struct nldr_object **nldr,
++ struct dev_object *hdev_obj,
++ const struct nldr_attrs *pattrs)
++{
++ struct cod_manager *cod_mgr; /* COD manager */
++ char *psz_coff_buf = NULL;
++ char sz_zl_file[COD_MAXPATHLENGTH];
++ struct nldr_object *nldr_obj = NULL;
++ struct dbll_attrs save_attrs;
++ struct dbll_attrs new_attrs;
++ dbll_flags flags;
++ u32 ul_entry;
++ u16 dload_segs = 0;
++ struct mem_seg_info *mem_info_obj;
++ u32 ul_len = 0;
++ u32 ul_addr;
++ struct rmm_segment *rmm_segs = NULL;
++ u16 i;
++ int status = 0;
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(nldr != NULL);
++ DBC_REQUIRE(hdev_obj != NULL);
++ DBC_REQUIRE(pattrs != NULL);
++ DBC_REQUIRE(pattrs->pfn_ovly != NULL);
++ DBC_REQUIRE(pattrs->pfn_write != NULL);
++
++ /* Allocate dynamic loader object */
++ nldr_obj = kzalloc(sizeof(struct nldr_object), GFP_KERNEL);
++ if (nldr_obj) {
++ nldr_obj->hdev_obj = hdev_obj;
++ /* warning, lazy status checking alert! */
++ dev_get_cod_mgr(hdev_obj, &cod_mgr);
++ if (cod_mgr) {
++ status = cod_get_loader(cod_mgr, &nldr_obj->dbll);
++ DBC_ASSERT(!status);
++ status = cod_get_base_lib(cod_mgr, &nldr_obj->base_lib);
++ DBC_ASSERT(!status);
++ status =
++ cod_get_base_name(cod_mgr, sz_zl_file,
++ COD_MAXPATHLENGTH);
++ DBC_ASSERT(!status);
++ }
++ status = 0;
++ /* end lazy status checking */
++ nldr_obj->us_dsp_mau_size = pattrs->us_dsp_mau_size;
++ nldr_obj->us_dsp_word_size = pattrs->us_dsp_word_size;
++ nldr_obj->ldr_fxns = ldr_fxns;
++ if (!(nldr_obj->ldr_fxns.init_fxn()))
++ status = -ENOMEM;
++
++ } else {
++ status = -ENOMEM;
++ }
++ /* Create the DCD Manager */
++ if (!status)
++ status = dcd_create_manager(NULL, &nldr_obj->hdcd_mgr);
++
++ /* Get dynamic loading memory sections from base lib */
++ if (!status) {
++ status =
++ nldr_obj->ldr_fxns.get_sect_fxn(nldr_obj->base_lib,
++ DYNMEMSECT, &ul_addr,
++ &ul_len);
++ if (!status) {
++ psz_coff_buf =
++ kzalloc(ul_len * nldr_obj->us_dsp_mau_size,
++ GFP_KERNEL);
++ if (!psz_coff_buf)
++ status = -ENOMEM;
++ } else {
++ /* Ok to not have dynamic loading memory */
++ status = 0;
++ ul_len = 0;
++ dev_dbg(bridge, "%s: failed - no dynamic loading mem "
++ "segments: 0x%x\n", __func__, status);
++ }
++ }
++ if (!status && ul_len > 0) {
++ /* Read section containing dynamic load mem segments */
++ status =
++ nldr_obj->ldr_fxns.read_sect_fxn(nldr_obj->base_lib,
++ DYNMEMSECT, psz_coff_buf,
++ ul_len);
++ }
++ if (!status && ul_len > 0) {
++ /* Parse memory segment data */
++ dload_segs = (u16) (*((u32 *) psz_coff_buf));
++ if (dload_segs > MAXMEMSEGS)
++ status = -EBADF;
++ }
++ /* Parse dynamic load memory segments */
++ if (!status && dload_segs > 0) {
++ rmm_segs = kzalloc(sizeof(struct rmm_segment) * dload_segs,
++ GFP_KERNEL);
++ nldr_obj->seg_table =
++ kzalloc(sizeof(u32) * dload_segs, GFP_KERNEL);
++ if (rmm_segs == NULL || nldr_obj->seg_table == NULL) {
++ status = -ENOMEM;
++ } else {
++ nldr_obj->dload_segs = dload_segs;
++ mem_info_obj = (struct mem_seg_info *)(psz_coff_buf +
++ sizeof(u32));
++ for (i = 0; i < dload_segs; i++) {
++ rmm_segs[i].base = (mem_info_obj + i)->base;
++ rmm_segs[i].length = (mem_info_obj + i)->len;
++ rmm_segs[i].space = 0;
++ nldr_obj->seg_table[i] =
++ (mem_info_obj + i)->type;
++ dev_dbg(bridge,
++ "(proc) DLL MEMSEGMENT: %d, "
++ "Base: 0x%x, Length: 0x%x\n", i,
++ rmm_segs[i].base, rmm_segs[i].length);
++ }
++ }
++ }
++ /* Create Remote memory manager */
++ if (!status)
++ status = rmm_create(&nldr_obj->rmm, rmm_segs, dload_segs);
++
++ if (!status) {
++ /* set the alloc, free, write functions for loader */
++ nldr_obj->ldr_fxns.get_attrs_fxn(nldr_obj->dbll, &save_attrs);
++ new_attrs = save_attrs;
++ new_attrs.alloc = (dbll_alloc_fxn) remote_alloc;
++ new_attrs.free = (dbll_free_fxn) remote_free;
++ new_attrs.sym_lookup = (dbll_sym_lookup) get_symbol_value;
++ new_attrs.sym_handle = nldr_obj;
++ new_attrs.write = (dbll_write_fxn) pattrs->pfn_write;
++ nldr_obj->ovly_fxn = pattrs->pfn_ovly;
++ nldr_obj->write_fxn = pattrs->pfn_write;
++ nldr_obj->ldr_attrs = new_attrs;
++ }
++ kfree(rmm_segs);
++
++ kfree(psz_coff_buf);
++
++ /* Get overlay nodes */
++ if (!status) {
++ status =
++ cod_get_base_name(cod_mgr, sz_zl_file, COD_MAXPATHLENGTH);
++ /* lazy check */
++ DBC_ASSERT(!status);
++ /* First count number of overlay nodes */
++ status =
++ dcd_get_objects(nldr_obj->hdcd_mgr, sz_zl_file,
++ add_ovly_node, (void *)nldr_obj);
++ /* Now build table of overlay nodes */
++ if (!status && nldr_obj->ovly_nodes > 0) {
++ /* Allocate table for overlay nodes */
++ nldr_obj->ovly_table =
++ kzalloc(sizeof(struct ovly_node) *
++ nldr_obj->ovly_nodes, GFP_KERNEL);
++ /* Put overlay nodes in the table */
++ nldr_obj->ovly_nid = 0;
++ status = dcd_get_objects(nldr_obj->hdcd_mgr, sz_zl_file,
++ add_ovly_node,
++ (void *)nldr_obj);
++ }
++ }
++ /* Do a fake reload of the base image to get overlay section info */
++ if (!status && nldr_obj->ovly_nodes > 0) {
++ save_attrs.write = fake_ovly_write;
++ save_attrs.log_write = add_ovly_info;
++ save_attrs.log_write_handle = nldr_obj;
++ flags = DBLL_CODE | DBLL_DATA | DBLL_SYMB;
++ status = nldr_obj->ldr_fxns.load_fxn(nldr_obj->base_lib, flags,
++ &save_attrs, &ul_entry);
++ }
++ if (!status) {
++ *nldr = (struct nldr_object *)nldr_obj;
++ } else {
++ if (nldr_obj)
++ nldr_delete((struct nldr_object *)nldr_obj);
++
++ *nldr = NULL;
++ }
++ /* FIXME:Temp. Fix. Must be removed */
++ DBC_ENSURE((!status && *nldr) || (status && *nldr == NULL));
++ return status;
++}
++
++/*
++ * ======== nldr_delete ========
++ */
++void nldr_delete(struct nldr_object *nldr_obj)
++{
++ struct ovly_sect *ovly_section;
++ struct ovly_sect *next;
++ u16 i;
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(nldr_obj);
++
++ nldr_obj->ldr_fxns.exit_fxn();
++ if (nldr_obj->rmm)
++ rmm_delete(nldr_obj->rmm);
++
++ kfree(nldr_obj->seg_table);
++
++ if (nldr_obj->hdcd_mgr)
++ dcd_destroy_manager(nldr_obj->hdcd_mgr);
++
++ /* Free overlay node information */
++ if (nldr_obj->ovly_table) {
++ for (i = 0; i < nldr_obj->ovly_nodes; i++) {
++ ovly_section =
++ nldr_obj->ovly_table[i].create_sects_list;
++ while (ovly_section) {
++ next = ovly_section->next_sect;
++ kfree(ovly_section);
++ ovly_section = next;
++ }
++ ovly_section =
++ nldr_obj->ovly_table[i].delete_sects_list;
++ while (ovly_section) {
++ next = ovly_section->next_sect;
++ kfree(ovly_section);
++ ovly_section = next;
++ }
++ ovly_section =
++ nldr_obj->ovly_table[i].execute_sects_list;
++ while (ovly_section) {
++ next = ovly_section->next_sect;
++ kfree(ovly_section);
++ ovly_section = next;
++ }
++ ovly_section = nldr_obj->ovly_table[i].other_sects_list;
++ while (ovly_section) {
++ next = ovly_section->next_sect;
++ kfree(ovly_section);
++ ovly_section = next;
++ }
++ }
++ kfree(nldr_obj->ovly_table);
++ }
++ kfree(nldr_obj);
++}
++
++/*
++ * ======== nldr_exit ========
++ * Discontinue usage of NLDR module.
++ */
++void nldr_exit(void)
++{
++ DBC_REQUIRE(refs > 0);
++
++ refs--;
++
++ if (refs == 0)
++ rmm_exit();
++
++ DBC_ENSURE(refs >= 0);
++}
++
++/*
++ * ======== nldr_get_fxn_addr ========
++ */
++int nldr_get_fxn_addr(struct nldr_nodeobject *nldr_node_obj,
++ char *str_fxn, u32 * addr)
++{
++ struct dbll_sym_val *dbll_sym;
++ struct nldr_object *nldr_obj;
++ int status = 0;
++ bool status1 = false;
++ s32 i = 0;
++ struct lib_node root = { NULL, 0, NULL };
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(nldr_node_obj);
++ DBC_REQUIRE(addr != NULL);
++ DBC_REQUIRE(str_fxn != NULL);
++
++ nldr_obj = nldr_node_obj->nldr_obj;
++ /* Called from node_create(), node_delete(), or node_run(). */
++ if (nldr_node_obj->dynamic && *nldr_node_obj->pf_phase_split) {
++ switch (nldr_node_obj->phase) {
++ case NLDR_CREATE:
++ root = nldr_node_obj->create_lib;
++ break;
++ case NLDR_EXECUTE:
++ root = nldr_node_obj->execute_lib;
++ break;
++ case NLDR_DELETE:
++ root = nldr_node_obj->delete_lib;
++ break;
++ default:
++ DBC_ASSERT(false);
++ break;
++ }
++ } else {
++ /* for Overlay nodes or non-split Dynamic nodes */
++ root = nldr_node_obj->root;
++ }
++ status1 =
++ nldr_obj->ldr_fxns.get_c_addr_fxn(root.lib, str_fxn, &dbll_sym);
++ if (!status1)
++ status1 =
++ nldr_obj->ldr_fxns.get_addr_fxn(root.lib, str_fxn,
++ &dbll_sym);
++
++ /* If symbol not found, check dependent libraries */
++ if (!status1) {
++ for (i = 0; i < root.dep_libs; i++) {
++ status1 =
++ nldr_obj->ldr_fxns.get_addr_fxn(root.dep_libs_tree
++ [i].lib, str_fxn,
++ &dbll_sym);
++ if (!status1) {
++ status1 =
++ nldr_obj->ldr_fxns.
++ get_c_addr_fxn(root.dep_libs_tree[i].lib,
++ str_fxn, &dbll_sym);
++ }
++ if (status1) {
++ /* Symbol found */
++ break;
++ }
++ }
++ }
++ /* Check persistent libraries */
++ if (!status1) {
++ for (i = 0; i < nldr_node_obj->pers_libs; i++) {
++ status1 =
++ nldr_obj->ldr_fxns.
++ get_addr_fxn(nldr_node_obj->pers_lib_table[i].lib,
++ str_fxn, &dbll_sym);
++ if (!status1) {
++ status1 =
++ nldr_obj->ldr_fxns.
++ get_c_addr_fxn(nldr_node_obj->pers_lib_table
++ [i].lib, str_fxn, &dbll_sym);
++ }
++ if (status1) {
++ /* Symbol found */
++ break;
++ }
++ }
++ }
++
++ if (status1)
++ *addr = dbll_sym->value;
++ else
++ status = -ESPIPE;
++
++ return status;
++}
++
++/*
++ * ======== nldr_get_rmm_manager ========
++ * Given a NLDR object, retrieve RMM Manager Handle
++ */
++int nldr_get_rmm_manager(struct nldr_object *nldr,
++ struct rmm_target_obj **rmm_mgr)
++{
++ int status = 0;
++ struct nldr_object *nldr_obj = nldr;
++ DBC_REQUIRE(rmm_mgr != NULL);
++
++ if (nldr) {
++ *rmm_mgr = nldr_obj->rmm;
++ } else {
++ *rmm_mgr = NULL;
++ status = -EFAULT;
++ }
++
++ DBC_ENSURE(!status || (rmm_mgr != NULL && *rmm_mgr == NULL));
++
++ return status;
++}
++
++/*
++ * ======== nldr_init ========
++ * Initialize the NLDR module.
++ */
++bool nldr_init(void)
++{
++ DBC_REQUIRE(refs >= 0);
++
++ if (refs == 0)
++ rmm_init();
++
++ refs++;
++
++ DBC_ENSURE(refs > 0);
++ return true;
++}
++
++/*
++ * ======== nldr_load ========
++ */
++int nldr_load(struct nldr_nodeobject *nldr_node_obj,
++ enum nldr_phase phase)
++{
++ struct nldr_object *nldr_obj;
++ struct dsp_uuid lib_uuid;
++ int status = 0;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(nldr_node_obj);
++
++ nldr_obj = nldr_node_obj->nldr_obj;
++
++ if (nldr_node_obj->dynamic) {
++ nldr_node_obj->phase = phase;
++
++ lib_uuid = nldr_node_obj->uuid;
++
++ /* At this point, we may not know if node is split into
++ * different libraries. So we'll go ahead and load the
++ * library, and then save the pointer to the appropriate
++ * location after we know. */
++
++ status =
++ load_lib(nldr_node_obj, &nldr_node_obj->root, lib_uuid,
++ false, nldr_node_obj->lib_path, phase, 0);
++
++ if (!status) {
++ if (*nldr_node_obj->pf_phase_split) {
++ switch (phase) {
++ case NLDR_CREATE:
++ nldr_node_obj->create_lib =
++ nldr_node_obj->root;
++ break;
++
++ case NLDR_EXECUTE:
++ nldr_node_obj->execute_lib =
++ nldr_node_obj->root;
++ break;
++
++ case NLDR_DELETE:
++ nldr_node_obj->delete_lib =
++ nldr_node_obj->root;
++ break;
++
++ default:
++ DBC_ASSERT(false);
++ break;
++ }
++ }
++ }
++ } else {
++ if (nldr_node_obj->overlay)
++ status = load_ovly(nldr_node_obj, phase);
++
++ }
++
++ return status;
++}
++
++/*
++ * ======== nldr_unload ========
++ */
++int nldr_unload(struct nldr_nodeobject *nldr_node_obj,
++ enum nldr_phase phase)
++{
++ int status = 0;
++ struct lib_node *root_lib = NULL;
++ s32 i = 0;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(nldr_node_obj);
++
++ if (nldr_node_obj != NULL) {
++ if (nldr_node_obj->dynamic) {
++ if (*nldr_node_obj->pf_phase_split) {
++ switch (phase) {
++ case NLDR_CREATE:
++ root_lib = &nldr_node_obj->create_lib;
++ break;
++ case NLDR_EXECUTE:
++ root_lib = &nldr_node_obj->execute_lib;
++ break;
++ case NLDR_DELETE:
++ root_lib = &nldr_node_obj->delete_lib;
++ /* Unload persistent libraries */
++ for (i = 0;
++ i < nldr_node_obj->pers_libs;
++ i++) {
++ unload_lib(nldr_node_obj,
++ &nldr_node_obj->
++ pers_lib_table[i]);
++ }
++ nldr_node_obj->pers_libs = 0;
++ break;
++ default:
++ DBC_ASSERT(false);
++ break;
++ }
++ } else {
++ /* Unload main library */
++ root_lib = &nldr_node_obj->root;
++ }
++ if (root_lib)
++ unload_lib(nldr_node_obj, root_lib);
++ } else {
++ if (nldr_node_obj->overlay)
++ unload_ovly(nldr_node_obj, phase);
++
++ }
++ }
++ return status;
++}
++
++/*
++ * ======== add_ovly_info ========
++ */
++static int add_ovly_info(void *handle, struct dbll_sect_info *sect_info,
++ u32 addr, u32 bytes)
++{
++ char *node_name;
++ char *sect_name = (char *)sect_info->name;
++ bool sect_exists = false;
++ char seps = ':';
++ char *pch;
++ u16 i;
++ struct nldr_object *nldr_obj = (struct nldr_object *)handle;
++ int status = 0;
++
++ /* Is this an overlay section (load address != run address)? */
++ if (sect_info->sect_load_addr == sect_info->sect_run_addr)
++ goto func_end;
++
++ /* Find the node it belongs to */
++ for (i = 0; i < nldr_obj->ovly_nodes; i++) {
++ node_name = nldr_obj->ovly_table[i].node_name;
++ DBC_REQUIRE(node_name);
++ if (strncmp(node_name, sect_name + 1, strlen(node_name)) == 0) {
++ /* Found the node */
++ break;
++ }
++ }
++ if (!(i < nldr_obj->ovly_nodes))
++ goto func_end;
++
++ /* Determine which phase this section belongs to */
++ for (pch = sect_name + 1; *pch && *pch != seps; pch++)
++ ;;
++
++ if (*pch) {
++ pch++; /* Skip over the ':' */
++ if (strncmp(pch, PCREATE, strlen(PCREATE)) == 0) {
++ status =
++ add_ovly_sect(nldr_obj,
++ &nldr_obj->
++ ovly_table[i].create_sects_list,
++ sect_info, &sect_exists, addr, bytes);
++ if (!status && !sect_exists)
++ nldr_obj->ovly_table[i].create_sects++;
++
++ } else if (strncmp(pch, PDELETE, strlen(PDELETE)) == 0) {
++ status =
++ add_ovly_sect(nldr_obj,
++ &nldr_obj->
++ ovly_table[i].delete_sects_list,
++ sect_info, &sect_exists, addr, bytes);
++ if (!status && !sect_exists)
++ nldr_obj->ovly_table[i].delete_sects++;
++
++ } else if (strncmp(pch, PEXECUTE, strlen(PEXECUTE)) == 0) {
++ status =
++ add_ovly_sect(nldr_obj,
++ &nldr_obj->
++ ovly_table[i].execute_sects_list,
++ sect_info, &sect_exists, addr, bytes);
++ if (!status && !sect_exists)
++ nldr_obj->ovly_table[i].execute_sects++;
++
++ } else {
++ /* Put in "other" sectins */
++ status =
++ add_ovly_sect(nldr_obj,
++ &nldr_obj->
++ ovly_table[i].other_sects_list,
++ sect_info, &sect_exists, addr, bytes);
++ if (!status && !sect_exists)
++ nldr_obj->ovly_table[i].other_sects++;
++
++ }
++ }
++func_end:
++ return status;
++}
++
++/*
++ * ======== add_ovly_node =========
++ * Callback function passed to dcd_get_objects.
++ */
++static int add_ovly_node(struct dsp_uuid *uuid_obj,
++ enum dsp_dcdobjtype obj_type, void *handle)
++{
++ struct nldr_object *nldr_obj = (struct nldr_object *)handle;
++ char *node_name = NULL;
++ char *pbuf = NULL;
++ u32 len;
++ struct dcd_genericobj obj_def;
++ int status = 0;
++
++ if (obj_type != DSP_DCDNODETYPE)
++ goto func_end;
++
++ status =
++ dcd_get_object_def(nldr_obj->hdcd_mgr, uuid_obj, obj_type,
++ &obj_def);
++ if (status)
++ goto func_end;
++
++ /* If overlay node, add to the list */
++ if (obj_def.obj_data.node_obj.us_load_type == NLDR_OVLYLOAD) {
++ if (nldr_obj->ovly_table == NULL) {
++ nldr_obj->ovly_nodes++;
++ } else {
++ /* Add node to table */
++ nldr_obj->ovly_table[nldr_obj->ovly_nid].uuid =
++ *uuid_obj;
++ DBC_REQUIRE(obj_def.obj_data.node_obj.ndb_props.
++ ac_name);
++ len =
++ strlen(obj_def.obj_data.node_obj.ndb_props.ac_name);
++ node_name = obj_def.obj_data.node_obj.ndb_props.ac_name;
++ pbuf = kzalloc(len + 1, GFP_KERNEL);
++ if (pbuf == NULL) {
++ status = -ENOMEM;
++ } else {
++ strncpy(pbuf, node_name, len);
++ nldr_obj->ovly_table[nldr_obj->ovly_nid].
++ node_name = pbuf;
++ nldr_obj->ovly_nid++;
++ }
++ }
++ }
++ /* These were allocated in dcd_get_object_def */
++ kfree(obj_def.obj_data.node_obj.pstr_create_phase_fxn);
++
++ kfree(obj_def.obj_data.node_obj.pstr_execute_phase_fxn);
++
++ kfree(obj_def.obj_data.node_obj.pstr_delete_phase_fxn);
++
++ kfree(obj_def.obj_data.node_obj.pstr_i_alg_name);
++
++func_end:
++ return status;
++}
++
++/*
++ * ======== add_ovly_sect ========
++ */
++static int add_ovly_sect(struct nldr_object *nldr_obj,
++ struct ovly_sect **lst,
++ struct dbll_sect_info *sect_inf,
++ bool *exists, u32 addr, u32 bytes)
++{
++ struct ovly_sect *new_sect = NULL;
++ struct ovly_sect *last_sect;
++ struct ovly_sect *ovly_section;
++ int status = 0;
++
++ ovly_section = last_sect = *lst;
++ *exists = false;
++ while (ovly_section) {
++ /*
++ * Make sure section has not already been added. Multiple
++ * 'write' calls may be made to load the section.
++ */
++ if (ovly_section->sect_load_addr == addr) {
++ /* Already added */
++ *exists = true;
++ break;
++ }
++ last_sect = ovly_section;
++ ovly_section = ovly_section->next_sect;
++ }
++
++ if (!ovly_section) {
++ /* New section */
++ new_sect = kzalloc(sizeof(struct ovly_sect), GFP_KERNEL);
++ if (new_sect == NULL) {
++ status = -ENOMEM;
++ } else {
++ new_sect->sect_load_addr = addr;
++ new_sect->sect_run_addr = sect_inf->sect_run_addr +
++ (addr - sect_inf->sect_load_addr);
++ new_sect->size = bytes;
++ new_sect->page = sect_inf->type;
++ }
++
++ /* Add to the list */
++ if (!status) {
++ if (*lst == NULL) {
++ /* First in the list */
++ *lst = new_sect;
++ } else {
++ last_sect->next_sect = new_sect;
++ }
++ }
++ }
++
++ return status;
++}
++
++/*
++ * ======== fake_ovly_write ========
++ */
++static s32 fake_ovly_write(void *handle, u32 dsp_address, void *buf, u32 bytes,
++ s32 mtype)
++{
++ return (s32) bytes;
++}
++
++/*
++ * ======== free_sects ========
++ */
++static void free_sects(struct nldr_object *nldr_obj,
++ struct ovly_sect *phase_sects, u16 alloc_num)
++{
++ struct ovly_sect *ovly_section = phase_sects;
++ u16 i = 0;
++ bool ret;
++
++ while (ovly_section && i < alloc_num) {
++ /* 'Deallocate' */
++ /* segid - page not supported yet */
++ /* Reserved memory */
++ ret =
++ rmm_free(nldr_obj->rmm, 0, ovly_section->sect_run_addr,
++ ovly_section->size, true);
++ DBC_ASSERT(ret);
++ ovly_section = ovly_section->next_sect;
++ i++;
++ }
++}
++
++/*
++ * ======== get_symbol_value ========
++ * Find symbol in library's base image. If not there, check dependent
++ * libraries.
++ */
++static bool get_symbol_value(void *handle, void *parg, void *rmm_handle,
++ char *sym_name, struct dbll_sym_val **sym)
++{
++ struct nldr_object *nldr_obj = (struct nldr_object *)handle;
++ struct nldr_nodeobject *nldr_node_obj =
++ (struct nldr_nodeobject *)rmm_handle;
++ struct lib_node *root = (struct lib_node *)parg;
++ u16 i;
++ bool status = false;
++
++ /* check the base image */
++ status = nldr_obj->ldr_fxns.get_addr_fxn(nldr_obj->base_lib,
++ sym_name, sym);
++ if (!status)
++ status =
++ nldr_obj->ldr_fxns.get_c_addr_fxn(nldr_obj->base_lib,
++ sym_name, sym);
++
++ /*
++ * Check in root lib itself. If the library consists of
++ * multiple object files linked together, some symbols in the
++ * library may need to be resolved.
++ */
++ if (!status) {
++ status = nldr_obj->ldr_fxns.get_addr_fxn(root->lib, sym_name,
++ sym);
++ if (!status) {
++ status =
++ nldr_obj->ldr_fxns.get_c_addr_fxn(root->lib,
++ sym_name, sym);
++ }
++ }
++
++ /*
++ * Check in root lib's dependent libraries, but not dependent
++ * libraries' dependents.
++ */
++ if (!status) {
++ for (i = 0; i < root->dep_libs; i++) {
++ status =
++ nldr_obj->ldr_fxns.get_addr_fxn(root->
++ dep_libs_tree
++ [i].lib,
++ sym_name, sym);
++ if (!status) {
++ status =
++ nldr_obj->ldr_fxns.
++ get_c_addr_fxn(root->dep_libs_tree[i].lib,
++ sym_name, sym);
++ }
++ if (status) {
++ /* Symbol found */
++ break;
++ }
++ }
++ }
++ /*
++ * Check in persistent libraries
++ */
++ if (!status) {
++ for (i = 0; i < nldr_node_obj->pers_libs; i++) {
++ status =
++ nldr_obj->ldr_fxns.
++ get_addr_fxn(nldr_node_obj->pers_lib_table[i].lib,
++ sym_name, sym);
++ if (!status) {
++ status = nldr_obj->ldr_fxns.get_c_addr_fxn
++ (nldr_node_obj->pers_lib_table[i].lib,
++ sym_name, sym);
++ }
++ if (status) {
++ /* Symbol found */
++ break;
++ }
++ }
++ }
++
++ return status;
++}
++
++/*
++ * ======== load_lib ========
++ * Recursively load library and all its dependent libraries. The library
++ * we're loading is specified by a uuid.
++ */
++static int load_lib(struct nldr_nodeobject *nldr_node_obj,
++ struct lib_node *root, struct dsp_uuid uuid,
++ bool root_prstnt,
++ struct dbll_library_obj **lib_path,
++ enum nldr_phase phase, u16 depth)
++{
++ struct nldr_object *nldr_obj = nldr_node_obj->nldr_obj;
++ u16 nd_libs = 0; /* Number of dependent libraries */
++ u16 np_libs = 0; /* Number of persistent libraries */
++ u16 nd_libs_loaded = 0; /* Number of dep. libraries loaded */
++ u16 i;
++ u32 entry;
++ u32 dw_buf_size = NLDR_MAXPATHLENGTH;
++ dbll_flags flags = DBLL_SYMB | DBLL_CODE | DBLL_DATA | DBLL_DYNAMIC;
++ struct dbll_attrs new_attrs;
++ char *psz_file_name = NULL;
++ struct dsp_uuid *dep_lib_uui_ds = NULL;
++ bool *persistent_dep_libs = NULL;
++ int status = 0;
++ bool lib_status = false;
++ struct lib_node *dep_lib;
++
++ if (depth > MAXDEPTH) {
++ /* Error */
++ DBC_ASSERT(false);
++ }
++ root->lib = NULL;
++ /* Allocate a buffer for library file name of size DBL_MAXPATHLENGTH */
++ psz_file_name = kzalloc(DBLL_MAXPATHLENGTH, GFP_KERNEL);
++ if (psz_file_name == NULL)
++ status = -ENOMEM;
++
++ if (!status) {
++ /* Get the name of the library */
++ if (depth == 0) {
++ status =
++ dcd_get_library_name(nldr_node_obj->nldr_obj->
++ hdcd_mgr, &uuid, psz_file_name,
++ &dw_buf_size, phase,
++ nldr_node_obj->pf_phase_split);
++ } else {
++ /* Dependent libraries are registered with a phase */
++ status =
++ dcd_get_library_name(nldr_node_obj->nldr_obj->
++ hdcd_mgr, &uuid, psz_file_name,
++ &dw_buf_size, NLDR_NOPHASE,
++ NULL);
++ }
++ }
++ if (!status) {
++ /* Open the library, don't load symbols */
++ status =
++ nldr_obj->ldr_fxns.open_fxn(nldr_obj->dbll, psz_file_name,
++ DBLL_NOLOAD, &root->lib);
++ }
++ /* Done with file name */
++ kfree(psz_file_name);
++
++ /* Check to see if library not already loaded */
++ if (!status && root_prstnt) {
++ lib_status =
++ find_in_persistent_lib_array(nldr_node_obj, root->lib);
++ /* Close library */
++ if (lib_status) {
++ nldr_obj->ldr_fxns.close_fxn(root->lib);
++ return 0;
++ }
++ }
++ if (!status) {
++ /* Check for circular dependencies. */
++ for (i = 0; i < depth; i++) {
++ if (root->lib == lib_path[i]) {
++ /* This condition could be checked by a
++ * tool at build time. */
++ status = -EILSEQ;
++ }
++ }
++ }
++ if (!status) {
++ /* Add library to current path in dependency tree */
++ lib_path[depth] = root->lib;
++ depth++;
++ /* Get number of dependent libraries */
++ status =
++ dcd_get_num_dep_libs(nldr_node_obj->nldr_obj->hdcd_mgr,
++ &uuid, &nd_libs, &np_libs, phase);
++ }
++ DBC_ASSERT(nd_libs >= np_libs);
++ if (!status) {
++ if (!(*nldr_node_obj->pf_phase_split))
++ np_libs = 0;
++
++ /* nd_libs = #of dependent libraries */
++ root->dep_libs = nd_libs - np_libs;
++ if (nd_libs > 0) {
++ dep_lib_uui_ds = kzalloc(sizeof(struct dsp_uuid) *
++ nd_libs, GFP_KERNEL);
++ persistent_dep_libs =
++ kzalloc(sizeof(bool) * nd_libs, GFP_KERNEL);
++ if (!dep_lib_uui_ds || !persistent_dep_libs)
++ status = -ENOMEM;
++
++ if (root->dep_libs > 0) {
++ /* Allocate arrays for dependent lib UUIDs,
++ * lib nodes */
++ root->dep_libs_tree = kzalloc
++ (sizeof(struct lib_node) *
++ (root->dep_libs), GFP_KERNEL);
++ if (!(root->dep_libs_tree))
++ status = -ENOMEM;
++
++ }
++
++ if (!status) {
++ /* Get the dependent library UUIDs */
++ status =
++ dcd_get_dep_libs(nldr_node_obj->
++ nldr_obj->hdcd_mgr, &uuid,
++ nd_libs, dep_lib_uui_ds,
++ persistent_dep_libs,
++ phase);
++ }
++ }
++ }
++
++ /*
++ * Recursively load dependent libraries.
++ */
++ if (!status) {
++ for (i = 0; i < nd_libs; i++) {
++ /* If root library is NOT persistent, and dep library
++ * is, then record it. If root library IS persistent,
++ * the deplib is already included */
++ if (!root_prstnt && persistent_dep_libs[i] &&
++ *nldr_node_obj->pf_phase_split) {
++ if ((nldr_node_obj->pers_libs) >= MAXLIBS) {
++ status = -EILSEQ;
++ break;
++ }
++
++ /* Allocate library outside of phase */
++ dep_lib =
++ &nldr_node_obj->pers_lib_table
++ [nldr_node_obj->pers_libs];
++ } else {
++ if (root_prstnt)
++ persistent_dep_libs[i] = true;
++
++ /* Allocate library within phase */
++ dep_lib = &root->dep_libs_tree[nd_libs_loaded];
++ }
++
++ status = load_lib(nldr_node_obj, dep_lib,
++ dep_lib_uui_ds[i],
++ persistent_dep_libs[i], lib_path,
++ phase, depth);
++
++ if (!status) {
++ if ((status != 0) &&
++ !root_prstnt && persistent_dep_libs[i] &&
++ *nldr_node_obj->pf_phase_split) {
++ (nldr_node_obj->pers_libs)++;
++ } else {
++ if (!persistent_dep_libs[i] ||
++ !(*nldr_node_obj->pf_phase_split)) {
++ nd_libs_loaded++;
++ }
++ }
++ } else {
++ break;
++ }
++ }
++ }
++
++ /* Now we can load the root library */
++ if (!status) {
++ new_attrs = nldr_obj->ldr_attrs;
++ new_attrs.sym_arg = root;
++ new_attrs.rmm_handle = nldr_node_obj;
++ new_attrs.input_params = nldr_node_obj->priv_ref;
++ new_attrs.base_image = false;
++
++ status =
++ nldr_obj->ldr_fxns.load_fxn(root->lib, flags, &new_attrs,
++ &entry);
++ }
++
++ /*
++ * In case of failure, unload any dependent libraries that
++ * were loaded, and close the root library.
++ * (Persistent libraries are unloaded from the very top)
++ */
++ if (status) {
++ if (phase != NLDR_EXECUTE) {
++ for (i = 0; i < nldr_node_obj->pers_libs; i++)
++ unload_lib(nldr_node_obj,
++ &nldr_node_obj->pers_lib_table[i]);
++
++ nldr_node_obj->pers_libs = 0;
++ }
++ for (i = 0; i < nd_libs_loaded; i++)
++ unload_lib(nldr_node_obj, &root->dep_libs_tree[i]);
++
++ if (root->lib)
++ nldr_obj->ldr_fxns.close_fxn(root->lib);
++
++ }
++
++ /* Going up one node in the dependency tree */
++ depth--;
++
++ kfree(dep_lib_uui_ds);
++ dep_lib_uui_ds = NULL;
++
++ kfree(persistent_dep_libs);
++ persistent_dep_libs = NULL;
++
++ return status;
++}
++
++/*
++ * ======== load_ovly ========
++ */
++static int load_ovly(struct nldr_nodeobject *nldr_node_obj,
++ enum nldr_phase phase)
++{
++ struct nldr_object *nldr_obj = nldr_node_obj->nldr_obj;
++ struct ovly_node *po_node = NULL;
++ struct ovly_sect *phase_sects = NULL;
++ struct ovly_sect *other_sects_list = NULL;
++ u16 i;
++ u16 alloc_num = 0;
++ u16 other_alloc = 0;
++ u16 *ref_count = NULL;
++ u16 *other_ref = NULL;
++ u32 bytes;
++ struct ovly_sect *ovly_section;
++ int status = 0;
++
++ /* Find the node in the table */
++ for (i = 0; i < nldr_obj->ovly_nodes; i++) {
++ if (is_equal_uuid
++ (&nldr_node_obj->uuid, &nldr_obj->ovly_table[i].uuid)) {
++ /* Found it */
++ po_node = &(nldr_obj->ovly_table[i]);
++ break;
++ }
++ }
++
++ DBC_ASSERT(i < nldr_obj->ovly_nodes);
++
++ if (!po_node) {
++ status = -ENOENT;
++ goto func_end;
++ }
++
++ switch (phase) {
++ case NLDR_CREATE:
++ ref_count = &(po_node->create_ref);
++ other_ref = &(po_node->other_ref);
++ phase_sects = po_node->create_sects_list;
++ other_sects_list = po_node->other_sects_list;
++ break;
++
++ case NLDR_EXECUTE:
++ ref_count = &(po_node->execute_ref);
++ phase_sects = po_node->execute_sects_list;
++ break;
++
++ case NLDR_DELETE:
++ ref_count = &(po_node->delete_ref);
++ phase_sects = po_node->delete_sects_list;
++ break;
++
++ default:
++ DBC_ASSERT(false);
++ break;
++ }
++
++ if (ref_count == NULL)
++ goto func_end;
++
++ if (*ref_count != 0)
++ goto func_end;
++
++ /* 'Allocate' memory for overlay sections of this phase */
++ ovly_section = phase_sects;
++ while (ovly_section) {
++ /* allocate *//* page not supported yet */
++ /* reserve *//* align */
++ status = rmm_alloc(nldr_obj->rmm, 0, ovly_section->size, 0,
++ &(ovly_section->sect_run_addr), true);
++ if (!status) {
++ ovly_section = ovly_section->next_sect;
++ alloc_num++;
++ } else {
++ break;
++ }
++ }
++ if (other_ref && *other_ref == 0) {
++ /* 'Allocate' memory for other overlay sections
++ * (create phase) */
++ if (!status) {
++ ovly_section = other_sects_list;
++ while (ovly_section) {
++ /* page not supported *//* align */
++ /* reserve */
++ status =
++ rmm_alloc(nldr_obj->rmm, 0,
++ ovly_section->size, 0,
++ &(ovly_section->sect_run_addr),
++ true);
++ if (!status) {
++ ovly_section = ovly_section->next_sect;
++ other_alloc++;
++ } else {
++ break;
++ }
++ }
++ }
++ }
++ if (*ref_count == 0) {
++ if (!status) {
++ /* Load sections for this phase */
++ ovly_section = phase_sects;
++ while (ovly_section && !status) {
++ bytes =
++ (*nldr_obj->ovly_fxn) (nldr_node_obj->
++ priv_ref,
++ ovly_section->
++ sect_run_addr,
++ ovly_section->
++ sect_load_addr,
++ ovly_section->size,
++ ovly_section->page);
++ if (bytes != ovly_section->size)
++ status = -EPERM;
++
++ ovly_section = ovly_section->next_sect;
++ }
++ }
++ }
++ if (other_ref && *other_ref == 0) {
++ if (!status) {
++ /* Load other sections (create phase) */
++ ovly_section = other_sects_list;
++ while (ovly_section && !status) {
++ bytes =
++ (*nldr_obj->ovly_fxn) (nldr_node_obj->
++ priv_ref,
++ ovly_section->
++ sect_run_addr,
++ ovly_section->
++ sect_load_addr,
++ ovly_section->size,
++ ovly_section->page);
++ if (bytes != ovly_section->size)
++ status = -EPERM;
++
++ ovly_section = ovly_section->next_sect;
++ }
++ }
++ }
++ if (status) {
++ /* 'Deallocate' memory */
++ free_sects(nldr_obj, phase_sects, alloc_num);
++ free_sects(nldr_obj, other_sects_list, other_alloc);
++ }
++func_end:
++ if (!status && (ref_count != NULL)) {
++ *ref_count += 1;
++ if (other_ref)
++ *other_ref += 1;
++
++ }
++
++ return status;
++}
++
++/*
++ * ======== remote_alloc ========
++ */
++static int remote_alloc(void **ref, u16 mem_sect, u32 size,
++ u32 align, u32 *dsp_address,
++ s32 segmnt_id, s32 req,
++ bool reserve)
++{
++ struct nldr_nodeobject *hnode = (struct nldr_nodeobject *)ref;
++ struct nldr_object *nldr_obj;
++ struct rmm_target_obj *rmm;
++ u16 mem_phase_bit = MAXFLAGS;
++ u16 segid = 0;
++ u16 i;
++ u16 mem_sect_type;
++ u32 word_size;
++ struct rmm_addr *rmm_addr_obj = (struct rmm_addr *)dsp_address;
++ bool mem_load_req = false;
++ int status = -ENOMEM; /* Set to fail */
++ DBC_REQUIRE(hnode);
++ DBC_REQUIRE(mem_sect == DBLL_CODE || mem_sect == DBLL_DATA ||
++ mem_sect == DBLL_BSS);
++ nldr_obj = hnode->nldr_obj;
++ rmm = nldr_obj->rmm;
++ /* Convert size to DSP words */
++ word_size =
++ (size + nldr_obj->us_dsp_word_size -
++ 1) / nldr_obj->us_dsp_word_size;
++ /* Modify memory 'align' to account for DSP cache line size */
++ align = find_lcm(GEM_CACHE_LINE_SIZE, align);
++ dev_dbg(bridge, "%s: memory align to 0x%x\n", __func__, align);
++ if (segmnt_id != -1) {
++ rmm_addr_obj->segid = segmnt_id;
++ segid = segmnt_id;
++ mem_load_req = req;
++ } else {
++ switch (hnode->phase) {
++ case NLDR_CREATE:
++ mem_phase_bit = CREATEDATAFLAGBIT;
++ break;
++ case NLDR_DELETE:
++ mem_phase_bit = DELETEDATAFLAGBIT;
++ break;
++ case NLDR_EXECUTE:
++ mem_phase_bit = EXECUTEDATAFLAGBIT;
++ break;
++ default:
++ DBC_ASSERT(false);
++ break;
++ }
++ if (mem_sect == DBLL_CODE)
++ mem_phase_bit++;
++
++ if (mem_phase_bit < MAXFLAGS)
++ segid = hnode->seg_id[mem_phase_bit];
++
++ /* Determine if there is a memory loading requirement */
++ if ((hnode->code_data_flag_mask >> mem_phase_bit) & 0x1)
++ mem_load_req = true;
++
++ }
++ mem_sect_type = (mem_sect == DBLL_CODE) ? DYNM_CODE : DYNM_DATA;
++
++ /* Find an appropriate segment based on mem_sect */
++ if (segid == NULLID) {
++ /* No memory requirements of preferences */
++ DBC_ASSERT(!mem_load_req);
++ goto func_cont;
++ }
++ if (segid <= MAXSEGID) {
++ DBC_ASSERT(segid < nldr_obj->dload_segs);
++ /* Attempt to allocate from segid first. */
++ rmm_addr_obj->segid = segid;
++ status =
++ rmm_alloc(rmm, segid, word_size, align, dsp_address, false);
++ if (status) {
++ dev_dbg(bridge, "%s: Unable allocate from segment %d\n",
++ __func__, segid);
++ }
++ } else {
++ /* segid > MAXSEGID ==> Internal or external memory */
++ DBC_ASSERT(segid == MEMINTERNALID || segid == MEMEXTERNALID);
++ /* Check for any internal or external memory segment,
++ * depending on segid. */
++ mem_sect_type |= segid == MEMINTERNALID ?
++ DYNM_INTERNAL : DYNM_EXTERNAL;
++ for (i = 0; i < nldr_obj->dload_segs; i++) {
++ if ((nldr_obj->seg_table[i] & mem_sect_type) !=
++ mem_sect_type)
++ continue;
++
++ status = rmm_alloc(rmm, i, word_size, align,
++ dsp_address, false);
++ if (!status) {
++ /* Save segid for freeing later */
++ rmm_addr_obj->segid = i;
++ break;
++ }
++ }
++ }
++func_cont:
++ /* Haven't found memory yet, attempt to find any segment that works */
++ if (status == -ENOMEM && !mem_load_req) {
++ dev_dbg(bridge, "%s: Preferred segment unavailable, trying "
++ "another\n", __func__);
++ for (i = 0; i < nldr_obj->dload_segs; i++) {
++ /* All bits of mem_sect_type must be set */
++ if ((nldr_obj->seg_table[i] & mem_sect_type) !=
++ mem_sect_type)
++ continue;
++
++ status = rmm_alloc(rmm, i, word_size, align,
++ dsp_address, false);
++ if (!status) {
++ /* Save segid */
++ rmm_addr_obj->segid = i;
++ break;
++ }
++ }
++ }
++
++ return status;
++}
++
++static int remote_free(void **ref, u16 space, u32 dsp_address,
++ u32 size, bool reserve)
++{
++ struct nldr_object *nldr_obj = (struct nldr_object *)ref;
++ struct rmm_target_obj *rmm;
++ u32 word_size;
++ int status = -ENOMEM; /* Set to fail */
++
++ DBC_REQUIRE(nldr_obj);
++
++ rmm = nldr_obj->rmm;
++
++ /* Convert size to DSP words */
++ word_size =
++ (size + nldr_obj->us_dsp_word_size -
++ 1) / nldr_obj->us_dsp_word_size;
++
++ if (rmm_free(rmm, space, dsp_address, word_size, reserve))
++ status = 0;
++
++ return status;
++}
++
++/*
++ * ======== unload_lib ========
++ */
++static void unload_lib(struct nldr_nodeobject *nldr_node_obj,
++ struct lib_node *root)
++{
++ struct dbll_attrs new_attrs;
++ struct nldr_object *nldr_obj = nldr_node_obj->nldr_obj;
++ u16 i;
++
++ DBC_ASSERT(root != NULL);
++
++ /* Unload dependent libraries */
++ for (i = 0; i < root->dep_libs; i++)
++ unload_lib(nldr_node_obj, &root->dep_libs_tree[i]);
++
++ root->dep_libs = 0;
++
++ new_attrs = nldr_obj->ldr_attrs;
++ new_attrs.rmm_handle = nldr_obj->rmm;
++ new_attrs.input_params = nldr_node_obj->priv_ref;
++ new_attrs.base_image = false;
++ new_attrs.sym_arg = root;
++
++ if (root->lib) {
++ /* Unload the root library */
++ nldr_obj->ldr_fxns.unload_fxn(root->lib, &new_attrs);
++ nldr_obj->ldr_fxns.close_fxn(root->lib);
++ }
++
++ /* Free dependent library list */
++ kfree(root->dep_libs_tree);
++ root->dep_libs_tree = NULL;
++}
++
++/*
++ * ======== unload_ovly ========
++ */
++static void unload_ovly(struct nldr_nodeobject *nldr_node_obj,
++ enum nldr_phase phase)
++{
++ struct nldr_object *nldr_obj = nldr_node_obj->nldr_obj;
++ struct ovly_node *po_node = NULL;
++ struct ovly_sect *phase_sects = NULL;
++ struct ovly_sect *other_sects_list = NULL;
++ u16 i;
++ u16 alloc_num = 0;
++ u16 other_alloc = 0;
++ u16 *ref_count = NULL;
++ u16 *other_ref = NULL;
++
++ /* Find the node in the table */
++ for (i = 0; i < nldr_obj->ovly_nodes; i++) {
++ if (is_equal_uuid
++ (&nldr_node_obj->uuid, &nldr_obj->ovly_table[i].uuid)) {
++ /* Found it */
++ po_node = &(nldr_obj->ovly_table[i]);
++ break;
++ }
++ }
++
++ DBC_ASSERT(i < nldr_obj->ovly_nodes);
++
++ if (!po_node)
++ /* TODO: Should we print warning here? */
++ return;
++
++ switch (phase) {
++ case NLDR_CREATE:
++ ref_count = &(po_node->create_ref);
++ phase_sects = po_node->create_sects_list;
++ alloc_num = po_node->create_sects;
++ break;
++ case NLDR_EXECUTE:
++ ref_count = &(po_node->execute_ref);
++ phase_sects = po_node->execute_sects_list;
++ alloc_num = po_node->execute_sects;
++ break;
++ case NLDR_DELETE:
++ ref_count = &(po_node->delete_ref);
++ other_ref = &(po_node->other_ref);
++ phase_sects = po_node->delete_sects_list;
++ /* 'Other' overlay sections are unloaded in the delete phase */
++ other_sects_list = po_node->other_sects_list;
++ alloc_num = po_node->delete_sects;
++ other_alloc = po_node->other_sects;
++ break;
++ default:
++ DBC_ASSERT(false);
++ break;
++ }
++ DBC_ASSERT(ref_count && (*ref_count > 0));
++ if (ref_count && (*ref_count > 0)) {
++ *ref_count -= 1;
++ if (other_ref) {
++ DBC_ASSERT(*other_ref > 0);
++ *other_ref -= 1;
++ }
++ }
++
++ if (ref_count && *ref_count == 0) {
++ /* 'Deallocate' memory */
++ free_sects(nldr_obj, phase_sects, alloc_num);
++ }
++ if (other_ref && *other_ref == 0)
++ free_sects(nldr_obj, other_sects_list, other_alloc);
++}
++
++/*
++ * ======== find_in_persistent_lib_array ========
++ */
++static bool find_in_persistent_lib_array(struct nldr_nodeobject *nldr_node_obj,
++ struct dbll_library_obj *lib)
++{
++ s32 i = 0;
++
++ for (i = 0; i < nldr_node_obj->pers_libs; i++) {
++ if (lib == nldr_node_obj->pers_lib_table[i].lib)
++ return true;
++
++ }
++
++ return false;
++}
++
++/*
++ * ================ Find LCM (Least Common Multiplier ===
++ */
++static u32 find_lcm(u32 a, u32 b)
++{
++ u32 ret;
++
++ ret = a * b / gcd(a, b);
++
++ return ret;
++}
++
++#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
++/**
++ * nldr_find_addr() - Find the closest symbol to the given address based on
++ * dynamic node object.
++ *
++ * @nldr_node: Dynamic node object
++ * @sym_addr: Given address to find the dsp symbol
++ * @offset_range: offset range to look for dsp symbol
++ * @offset_output: Symbol Output address
++ * @sym_name: String with the dsp symbol
++ *
++ * This function finds the node library for a given address and
++ * retrieves the dsp symbol by calling dbll_find_dsp_symbol.
++ */
++int nldr_find_addr(struct nldr_nodeobject *nldr_node, u32 sym_addr,
++ u32 offset_range, void *offset_output, char *sym_name)
++{
++ int status = 0;
++ bool status1 = false;
++ s32 i = 0;
++ struct lib_node root = { NULL, 0, NULL };
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(offset_output != NULL);
++ DBC_REQUIRE(sym_name != NULL);
++ pr_debug("%s(0x%x, 0x%x, 0x%x, 0x%x, %s)\n", __func__, (u32) nldr_node,
++ sym_addr, offset_range, (u32) offset_output, sym_name);
++
++ if (nldr_node->dynamic && *nldr_node->pf_phase_split) {
++ switch (nldr_node->phase) {
++ case NLDR_CREATE:
++ root = nldr_node->create_lib;
++ break;
++ case NLDR_EXECUTE:
++ root = nldr_node->execute_lib;
++ break;
++ case NLDR_DELETE:
++ root = nldr_node->delete_lib;
++ break;
++ default:
++ DBC_ASSERT(false);
++ break;
++ }
++ } else {
++ /* for Overlay nodes or non-split Dynamic nodes */
++ root = nldr_node->root;
++ }
++
++ status1 = dbll_find_dsp_symbol(root.lib, sym_addr,
++ offset_range, offset_output, sym_name);
++
++ /* If symbol not found, check dependent libraries */
++ if (!status1)
++ for (i = 0; i < root.dep_libs; i++) {
++ status1 = dbll_find_dsp_symbol(
++ root.dep_libs_tree[i].lib, sym_addr,
++ offset_range, offset_output, sym_name);
++ if (status1)
++ /* Symbol found */
++ break;
++ }
++ /* Check persistent libraries */
++ if (!status1)
++ for (i = 0; i < nldr_node->pers_libs; i++) {
++ status1 = dbll_find_dsp_symbol(
++ nldr_node->pers_lib_table[i].lib, sym_addr,
++ offset_range, offset_output, sym_name);
++ if (status1)
++ /* Symbol found */
++ break;
++ }
++
++ if (!status1) {
++ pr_debug("%s: Address 0x%x not found in range %d.\n",
++ __func__, sym_addr, offset_range);
++ status = -ESPIPE;
++ }
++
++ return status;
++}
++#endif
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/rmgr/node.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/rmgr/node.c 2010-08-18 11:24:23.226051079 +0300
+@@ -0,0 +1,3234 @@
++/*
++ * node.c
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * DSP/BIOS Bridge Node Manager.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#include <linux/types.h>
++/* ----------------------------------- Host OS */
++#include <dspbridge/host_os.h>
++
++/* ----------------------------------- DSP/BIOS Bridge */
++#include <dspbridge/dbdefs.h>
++
++/* ----------------------------------- Trace & Debug */
++#include <dspbridge/dbc.h>
++
++/* ----------------------------------- OS Adaptation Layer */
++#include <dspbridge/cfg.h>
++#include <dspbridge/list.h>
++#include <dspbridge/memdefs.h>
++#include <dspbridge/proc.h>
++#include <dspbridge/strm.h>
++#include <dspbridge/sync.h>
++#include <dspbridge/ntfy.h>
++
++/* ----------------------------------- Platform Manager */
++#include <dspbridge/cmm.h>
++#include <dspbridge/cod.h>
++#include <dspbridge/dev.h>
++#include <dspbridge/msg.h>
++
++/* ----------------------------------- Resource Manager */
++#include <dspbridge/dbdcd.h>
++#include <dspbridge/disp.h>
++#include <dspbridge/rms_sh.h>
++
++/* ----------------------------------- Link Driver */
++#include <dspbridge/dspdefs.h>
++#include <dspbridge/dspioctl.h>
++
++/* ----------------------------------- Others */
++#include <dspbridge/gb.h>
++#include <dspbridge/uuidutil.h>
++
++/* ----------------------------------- This */
++#include <dspbridge/nodepriv.h>
++#include <dspbridge/node.h>
++#include <dspbridge/dmm.h>
++
++/* Static/Dynamic Loader includes */
++#include <dspbridge/dbll.h>
++#include <dspbridge/nldr.h>
++
++#include <dspbridge/drv.h>
++#include <dspbridge/drvdefs.h>
++#include <dspbridge/resourcecleanup.h>
++#include <_tiomap.h>
++
++#include <dspbridge/dspdeh.h>
++
++#define HOSTPREFIX "/host"
++#define PIPEPREFIX "/dbpipe"
++
++#define MAX_INPUTS(h) \
++ ((h)->dcd_props.obj_data.node_obj.ndb_props.num_input_streams)
++#define MAX_OUTPUTS(h) \
++ ((h)->dcd_props.obj_data.node_obj.ndb_props.num_output_streams)
++
++#define NODE_GET_PRIORITY(h) ((h)->prio)
++#define NODE_SET_PRIORITY(hnode, prio) ((hnode)->prio = prio)
++#define NODE_SET_STATE(hnode, state) ((hnode)->node_state = state)
++
++#define MAXPIPES 100 /* Max # of /pipe connections (CSL limit) */
++#define MAXDEVSUFFIXLEN 2 /* Max(Log base 10 of MAXPIPES, MAXSTREAMS) */
++
++#define PIPENAMELEN (sizeof(PIPEPREFIX) + MAXDEVSUFFIXLEN)
++#define HOSTNAMELEN (sizeof(HOSTPREFIX) + MAXDEVSUFFIXLEN)
++
++#define MAXDEVNAMELEN 32 /* dsp_ndbprops.ac_name size */
++#define CREATEPHASE 1
++#define EXECUTEPHASE 2
++#define DELETEPHASE 3
++
++/* Define default STRM parameters */
++/*
++ * TBD: Put in header file, make global DSP_STRMATTRS with defaults,
++ * or make defaults configurable.
++ */
++#define DEFAULTBUFSIZE 32
++#define DEFAULTNBUFS 2
++#define DEFAULTSEGID 0
++#define DEFAULTALIGNMENT 0
++#define DEFAULTTIMEOUT 10000
++
++#define RMSQUERYSERVER 0
++#define RMSCONFIGURESERVER 1
++#define RMSCREATENODE 2
++#define RMSEXECUTENODE 3
++#define RMSDELETENODE 4
++#define RMSCHANGENODEPRIORITY 5
++#define RMSREADMEMORY 6
++#define RMSWRITEMEMORY 7
++#define RMSCOPY 8
++#define MAXTIMEOUT 2000
++
++#define NUMRMSFXNS 9
++
++#define PWR_TIMEOUT 500 /* default PWR timeout in msec */
++
++#define STACKSEGLABEL "L1DSRAM_HEAP" /* Label for DSP Stack Segment Addr */
++
++/*
++ * ======== node_mgr ========
++ */
++struct node_mgr {
++ struct dev_object *hdev_obj; /* Device object */
++ /* Function interface to Bridge driver */
++ struct bridge_drv_interface *intf_fxns;
++ struct dcd_manager *hdcd_mgr; /* Proc/Node data manager */
++ struct disp_object *disp_obj; /* Node dispatcher */
++ struct lst_list *node_list; /* List of all allocated nodes */
++ u32 num_nodes; /* Number of nodes in node_list */
++ u32 num_created; /* Number of nodes *created* on DSP */
++ struct gb_t_map *pipe_map; /* Pipe connection bit map */
++ struct gb_t_map *pipe_done_map; /* Pipes that are half free */
++ struct gb_t_map *chnl_map; /* Channel allocation bit map */
++ struct gb_t_map *dma_chnl_map; /* DMA Channel allocation bit map */
++ struct gb_t_map *zc_chnl_map; /* Zero-Copy Channel alloc bit map */
++ struct ntfy_object *ntfy_obj; /* Manages registered notifications */
++ struct mutex node_mgr_lock; /* For critical sections */
++ u32 ul_fxn_addrs[NUMRMSFXNS]; /* RMS function addresses */
++ struct msg_mgr *msg_mgr_obj;
++
++ /* Processor properties needed by Node Dispatcher */
++ u32 ul_num_chnls; /* Total number of channels */
++ u32 ul_chnl_offset; /* Offset of chnl ids rsvd for RMS */
++ u32 ul_chnl_buf_size; /* Buffer size for data to RMS */
++ int proc_family; /* eg, 5000 */
++ int proc_type; /* eg, 5510 */
++ u32 udsp_word_size; /* Size of DSP word on host bytes */
++ u32 udsp_data_mau_size; /* Size of DSP data MAU */
++ u32 udsp_mau_size; /* Size of MAU */
++ s32 min_pri; /* Minimum runtime priority for node */
++ s32 max_pri; /* Maximum runtime priority for node */
++
++ struct strm_mgr *strm_mgr_obj; /* STRM manager */
++
++ /* Loader properties */
++ struct nldr_object *nldr_obj; /* Handle to loader */
++ struct node_ldr_fxns nldr_fxns; /* Handle to loader functions */
++ bool loader_init; /* Loader Init function succeeded? */
++};
++
++/*
++ * ======== connecttype ========
++ */
++enum connecttype {
++ NOTCONNECTED = 0,
++ NODECONNECT,
++ HOSTCONNECT,
++ DEVICECONNECT,
++};
++
++/*
++ * ======== stream_chnl ========
++ */
++struct stream_chnl {
++ enum connecttype type; /* Type of stream connection */
++ u32 dev_id; /* pipe or channel id */
++};
++
++/*
++ * ======== node_object ========
++ */
++struct node_object {
++ struct list_head list_elem;
++ struct node_mgr *hnode_mgr; /* The manager of this node */
++ struct proc_object *hprocessor; /* Back pointer to processor */
++ struct dsp_uuid node_uuid; /* Node's ID */
++ s32 prio; /* Node's current priority */
++ u32 utimeout; /* Timeout for blocking NODE calls */
++ u32 heap_size; /* Heap Size */
++ u32 udsp_heap_virt_addr; /* Heap Size */
++ u32 ugpp_heap_virt_addr; /* Heap Size */
++ enum node_type ntype; /* Type of node: message, task, etc */
++ enum node_state node_state; /* NODE_ALLOCATED, NODE_CREATED, ... */
++ u32 num_inputs; /* Current number of inputs */
++ u32 num_outputs; /* Current number of outputs */
++ u32 max_input_index; /* Current max input stream index */
++ u32 max_output_index; /* Current max output stream index */
++ struct stream_chnl *inputs; /* Node's input streams */
++ struct stream_chnl *outputs; /* Node's output streams */
++ struct node_createargs create_args; /* Args for node create func */
++ nodeenv node_env; /* Environment returned by RMS */
++ struct dcd_genericobj dcd_props; /* Node properties from DCD */
++ struct dsp_cbdata *pargs; /* Optional args to pass to node */
++ struct ntfy_object *ntfy_obj; /* Manages registered notifications */
++ char *pstr_dev_name; /* device name, if device node */
++ struct sync_object *sync_done; /* Synchronize node_terminate */
++ s32 exit_status; /* execute function return status */
++
++ /* Information needed for node_get_attr() */
++ void *device_owner; /* If dev node, task that owns it */
++ u32 num_gpp_inputs; /* Current # of from GPP streams */
++ u32 num_gpp_outputs; /* Current # of to GPP streams */
++ /* Current stream connections */
++ struct dsp_streamconnect *stream_connect;
++
++ /* Message queue */
++ struct msg_queue *msg_queue_obj;
++
++ /* These fields used for SM messaging */
++ struct cmm_xlatorobject *xlator; /* Node's SM addr translator */
++
++ /* Handle to pass to dynamic loader */
++ struct nldr_nodeobject *nldr_node_obj;
++ bool loaded; /* Code is (dynamically) loaded */
++ bool phase_split; /* Phases split in many libs or ovly */
++
++};
++
++/* Default buffer attributes */
++static struct dsp_bufferattr node_dfltbufattrs = {
++ 0, /* cb_struct */
++ 1, /* segment_id */
++ 0, /* buf_alignment */
++};
++
++static void delete_node(struct node_object *hnode,
++ struct process_context *pr_ctxt);
++static void delete_node_mgr(struct node_mgr *hnode_mgr);
++static void fill_stream_connect(struct node_object *node1,
++ struct node_object *node2, u32 stream1,
++ u32 stream2);
++static void fill_stream_def(struct node_object *hnode,
++ struct node_strmdef *pstrm_def,
++ struct dsp_strmattr *pattrs);
++static void free_stream(struct node_mgr *hnode_mgr, struct stream_chnl stream);
++static int get_fxn_address(struct node_object *hnode, u32 * fxn_addr,
++ u32 phase);
++static int get_node_props(struct dcd_manager *hdcd_mgr,
++ struct node_object *hnode,
++ const struct dsp_uuid *node_uuid,
++ struct dcd_genericobj *dcd_prop);
++static int get_proc_props(struct node_mgr *hnode_mgr,
++ struct dev_object *hdev_obj);
++static int get_rms_fxns(struct node_mgr *hnode_mgr);
++static u32 ovly(void *priv_ref, u32 dsp_run_addr, u32 dsp_load_addr,
++ u32 ul_num_bytes, u32 mem_space);
++static u32 mem_write(void *priv_ref, u32 dsp_add, void *pbuf,
++ u32 ul_num_bytes, u32 mem_space);
++
++static u32 refs; /* module reference count */
++
++/* Dynamic loader functions. */
++static struct node_ldr_fxns nldr_fxns = {
++ nldr_allocate,
++ nldr_create,
++ nldr_delete,
++ nldr_exit,
++ nldr_get_fxn_addr,
++ nldr_init,
++ nldr_load,
++ nldr_unload,
++};
++
++enum node_state node_get_state(void *hnode)
++{
++ struct node_object *pnode = (struct node_object *)hnode;
++ if (!pnode)
++ return -1;
++ else
++ return pnode->node_state;
++}
++
++/*
++ * ======== node_allocate ========
++ * Purpose:
++ * Allocate GPP resources to manage a node on the DSP.
++ */
++int node_allocate(struct proc_object *hprocessor,
++ const struct dsp_uuid *node_uuid,
++ const struct dsp_cbdata *pargs,
++ const struct dsp_nodeattrin *attr_in,
++ struct node_res_object **noderes,
++ struct process_context *pr_ctxt)
++{
++ struct node_mgr *hnode_mgr;
++ struct dev_object *hdev_obj;
++ struct node_object *pnode = NULL;
++ enum node_type node_type = NODE_TASK;
++ struct node_msgargs *pmsg_args;
++ struct node_taskargs *ptask_args;
++ u32 num_streams;
++ struct bridge_drv_interface *intf_fxns;
++ int status = 0;
++ struct cmm_object *hcmm_mgr = NULL; /* Shared memory manager hndl */
++ u32 proc_id;
++ u32 pul_value;
++ u32 dynext_base;
++ u32 off_set = 0;
++ u32 ul_stack_seg_addr, ul_stack_seg_val;
++ u32 ul_gpp_mem_base;
++ struct cfg_hostres *host_res;
++ struct bridge_dev_context *pbridge_context;
++ u32 mapped_addr = 0;
++ u32 map_attrs = 0x0;
++ struct dsp_processorstate proc_state;
++#ifdef DSP_DMM_DEBUG
++ struct dmm_object *dmm_mgr;
++ struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
++#endif
++
++ void *node_res;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(hprocessor != NULL);
++ DBC_REQUIRE(noderes != NULL);
++ DBC_REQUIRE(node_uuid != NULL);
++
++ *noderes = NULL;
++
++ status = proc_get_processor_id(hprocessor, &proc_id);
++
++ if (proc_id != DSP_UNIT)
++ goto func_end;
++
++ status = proc_get_dev_object(hprocessor, &hdev_obj);
++ if (!status) {
++ status = dev_get_node_manager(hdev_obj, &hnode_mgr);
++ if (hnode_mgr == NULL)
++ status = -EPERM;
++
++ }
++
++ if (status)
++ goto func_end;
++
++ status = dev_get_bridge_context(hdev_obj, &pbridge_context);
++ if (!pbridge_context) {
++ status = -EFAULT;
++ goto func_end;
++ }
++
++ status = proc_get_state(hprocessor, &proc_state,
++ sizeof(struct dsp_processorstate));
++ if (status)
++ goto func_end;
++ /* If processor is in error state then don't attempt
++ to send the message */
++ if (proc_state.proc_state == PROC_ERROR) {
++ status = -EPERM;
++ goto func_end;
++ }
++
++ /* Assuming that 0 is not a valid function address */
++ if (hnode_mgr->ul_fxn_addrs[0] == 0) {
++ /* No RMS on target - we currently can't handle this */
++ pr_err("%s: Failed, no RMS in base image\n", __func__);
++ status = -EPERM;
++ } else {
++ /* Validate attr_in fields, if non-NULL */
++ if (attr_in) {
++ /* Check if attr_in->prio is within range */
++ if (attr_in->prio < hnode_mgr->min_pri ||
++ attr_in->prio > hnode_mgr->max_pri)
++ status = -EDOM;
++ }
++ }
++ /* Allocate node object and fill in */
++ if (status)
++ goto func_end;
++
++ pnode = kzalloc(sizeof(struct node_object), GFP_KERNEL);
++ if (pnode == NULL) {
++ status = -ENOMEM;
++ goto func_end;
++ }
++ pnode->hnode_mgr = hnode_mgr;
++ /* This critical section protects get_node_props */
++ mutex_lock(&hnode_mgr->node_mgr_lock);
++
++ /* Get dsp_ndbprops from node database */
++ status = get_node_props(hnode_mgr->hdcd_mgr, pnode, node_uuid,
++ &(pnode->dcd_props));
++ if (status)
++ goto func_cont;
++
++ pnode->node_uuid = *node_uuid;
++ pnode->hprocessor = hprocessor;
++ pnode->ntype = pnode->dcd_props.obj_data.node_obj.ndb_props.ntype;
++ pnode->utimeout = pnode->dcd_props.obj_data.node_obj.ndb_props.utimeout;
++ pnode->prio = pnode->dcd_props.obj_data.node_obj.ndb_props.prio;
++
++ /* Currently only C64 DSP builds support Node Dynamic * heaps */
++ /* Allocate memory for node heap */
++ pnode->create_args.asa.task_arg_obj.heap_size = 0;
++ pnode->create_args.asa.task_arg_obj.udsp_heap_addr = 0;
++ pnode->create_args.asa.task_arg_obj.udsp_heap_res_addr = 0;
++ pnode->create_args.asa.task_arg_obj.ugpp_heap_addr = 0;
++ if (!attr_in)
++ goto func_cont;
++
++ /* Check if we have a user allocated node heap */
++ if (!(attr_in->pgpp_virt_addr))
++ goto func_cont;
++
++ /* check for page aligned Heap size */
++ if (((attr_in->heap_size) & (PG_SIZE4K - 1))) {
++ pr_err("%s: node heap size not aligned to 4K, size = 0x%x \n",
++ __func__, attr_in->heap_size);
++ status = -EINVAL;
++ } else {
++ pnode->create_args.asa.task_arg_obj.heap_size =
++ attr_in->heap_size;
++ pnode->create_args.asa.task_arg_obj.ugpp_heap_addr =
++ (u32) attr_in->pgpp_virt_addr;
++ }
++ if (status)
++ goto func_cont;
++
++ status = proc_reserve_memory(hprocessor,
++ pnode->create_args.asa.task_arg_obj.
++ heap_size + PAGE_SIZE,
++ (void **)&(pnode->create_args.asa.
++ task_arg_obj.udsp_heap_res_addr),
++ pr_ctxt);
++ if (status) {
++ pr_err("%s: Failed to reserve memory for heap: 0x%x\n",
++ __func__, status);
++ goto func_cont;
++ }
++#ifdef DSP_DMM_DEBUG
++ status = dmm_get_handle(p_proc_object, &dmm_mgr);
++ if (!dmm_mgr) {
++ status = DSP_EHANDLE;
++ goto func_cont;
++ }
++
++ dmm_mem_map_dump(dmm_mgr);
++#endif
++
++ map_attrs |= DSP_MAPLITTLEENDIAN;
++ map_attrs |= DSP_MAPELEMSIZE32;
++ map_attrs |= DSP_MAPVIRTUALADDR;
++ status = proc_map(hprocessor, (void *)attr_in->pgpp_virt_addr,
++ pnode->create_args.asa.task_arg_obj.heap_size,
++ (void *)pnode->create_args.asa.task_arg_obj.
++ udsp_heap_res_addr, (void **)&mapped_addr, map_attrs,
++ pr_ctxt);
++ if (status)
++ pr_err("%s: Failed to map memory for Heap: 0x%x\n",
++ __func__, status);
++ else
++ pnode->create_args.asa.task_arg_obj.udsp_heap_addr =
++ (u32) mapped_addr;
++
++func_cont:
++ mutex_unlock(&hnode_mgr->node_mgr_lock);
++ if (attr_in != NULL) {
++ /* Overrides of NBD properties */
++ pnode->utimeout = attr_in->utimeout;
++ pnode->prio = attr_in->prio;
++ }
++ /* Create object to manage notifications */
++ if (!status) {
++ pnode->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
++ GFP_KERNEL);
++ if (pnode->ntfy_obj)
++ ntfy_init(pnode->ntfy_obj);
++ else
++ status = -ENOMEM;
++ }
++
++ if (!status) {
++ node_type = node_get_type(pnode);
++ /* Allocate dsp_streamconnect array for device, task, and
++ * dais socket nodes. */
++ if (node_type != NODE_MESSAGE) {
++ num_streams = MAX_INPUTS(pnode) + MAX_OUTPUTS(pnode);
++ pnode->stream_connect = kzalloc(num_streams *
++ sizeof(struct dsp_streamconnect),
++ GFP_KERNEL);
++ if (num_streams > 0 && pnode->stream_connect == NULL)
++ status = -ENOMEM;
++
++ }
++ if (!status && (node_type == NODE_TASK ||
++ node_type == NODE_DAISSOCKET)) {
++ /* Allocate arrays for maintainig stream connections */
++ pnode->inputs = kzalloc(MAX_INPUTS(pnode) *
++ sizeof(struct stream_chnl), GFP_KERNEL);
++ pnode->outputs = kzalloc(MAX_OUTPUTS(pnode) *
++ sizeof(struct stream_chnl), GFP_KERNEL);
++ ptask_args = &(pnode->create_args.asa.task_arg_obj);
++ ptask_args->strm_in_def = kzalloc(MAX_INPUTS(pnode) *
++ sizeof(struct node_strmdef),
++ GFP_KERNEL);
++ ptask_args->strm_out_def = kzalloc(MAX_OUTPUTS(pnode) *
++ sizeof(struct node_strmdef),
++ GFP_KERNEL);
++ if ((MAX_INPUTS(pnode) > 0 && (pnode->inputs == NULL ||
++ ptask_args->strm_in_def
++ == NULL))
++ || (MAX_OUTPUTS(pnode) > 0
++ && (pnode->outputs == NULL
++ || ptask_args->strm_out_def == NULL)))
++ status = -ENOMEM;
++ }
++ }
++ if (!status && (node_type != NODE_DEVICE)) {
++ /* Create an event that will be posted when RMS_EXIT is
++ * received. */
++ pnode->sync_done = kzalloc(sizeof(struct sync_object),
++ GFP_KERNEL);
++ if (pnode->sync_done)
++ sync_init_event(pnode->sync_done);
++ else
++ status = -ENOMEM;
++
++ if (!status) {
++ /*Get the shared mem mgr for this nodes dev object */
++ status = cmm_get_handle(hprocessor, &hcmm_mgr);
++ if (!status) {
++ /* Allocate a SM addr translator for this node
++ * w/ deflt attr */
++ status = cmm_xlator_create(&pnode->xlator,
++ hcmm_mgr, NULL);
++ }
++ }
++ if (!status) {
++ /* Fill in message args */
++ if ((pargs != NULL) && (pargs->cb_data > 0)) {
++ pmsg_args =
++ &(pnode->create_args.asa.node_msg_args);
++ pmsg_args->pdata = kzalloc(pargs->cb_data,
++ GFP_KERNEL);
++ if (pmsg_args->pdata == NULL) {
++ status = -ENOMEM;
++ } else {
++ pmsg_args->arg_length = pargs->cb_data;
++ memcpy(pmsg_args->pdata,
++ pargs->node_data,
++ pargs->cb_data);
++ }
++ }
++ }
++ }
++
++ if (!status && node_type != NODE_DEVICE) {
++ /* Create a message queue for this node */
++ intf_fxns = hnode_mgr->intf_fxns;
++ status =
++ (*intf_fxns->pfn_msg_create_queue) (hnode_mgr->msg_mgr_obj,
++ &pnode->msg_queue_obj,
++ 0,
++ pnode->create_args.asa.
++ node_msg_args.max_msgs,
++ pnode);
++ }
++
++ if (!status) {
++ /* Create object for dynamic loading */
++
++ status = hnode_mgr->nldr_fxns.pfn_allocate(hnode_mgr->nldr_obj,
++ (void *)pnode,
++ &pnode->dcd_props.
++ obj_data.node_obj,
++ &pnode->
++ nldr_node_obj,
++ &pnode->phase_split);
++ }
++
++ /* Compare value read from Node Properties and check if it is same as
++ * STACKSEGLABEL, if yes read the Address of STACKSEGLABEL, calculate
++ * GPP Address, Read the value in that address and override the
++ * stack_seg value in task args */
++ if (!status &&
++ (char *)pnode->dcd_props.obj_data.node_obj.ndb_props.
++ stack_seg_name != NULL) {
++ if (strcmp((char *)
++ pnode->dcd_props.obj_data.node_obj.ndb_props.
++ stack_seg_name, STACKSEGLABEL) == 0) {
++ status =
++ hnode_mgr->nldr_fxns.
++ pfn_get_fxn_addr(pnode->nldr_node_obj, "DYNEXT_BEG",
++ &dynext_base);
++ if (status)
++ pr_err("%s: Failed to get addr for DYNEXT_BEG"
++ " status = 0x%x\n", __func__, status);
++
++ status =
++ hnode_mgr->nldr_fxns.
++ pfn_get_fxn_addr(pnode->nldr_node_obj,
++ "L1DSRAM_HEAP", &pul_value);
++
++ if (status)
++ pr_err("%s: Failed to get addr for L1DSRAM_HEAP"
++ " status = 0x%x\n", __func__, status);
++
++ host_res = pbridge_context->resources;
++ if (!host_res)
++ status = -EPERM;
++
++ if (status) {
++ pr_err("%s: Failed to get host resource, status"
++ " = 0x%x\n", __func__, status);
++ goto func_end;
++ }
++
++ ul_gpp_mem_base = (u32) host_res->dw_mem_base[1];
++ off_set = pul_value - dynext_base;
++ ul_stack_seg_addr = ul_gpp_mem_base + off_set;
++ ul_stack_seg_val = readl(ul_stack_seg_addr);
++
++ dev_dbg(bridge, "%s: StackSegVal = 0x%x, StackSegAddr ="
++ " 0x%x\n", __func__, ul_stack_seg_val,
++ ul_stack_seg_addr);
++
++ pnode->create_args.asa.task_arg_obj.stack_seg =
++ ul_stack_seg_val;
++
++ }
++ }
++
++ if (!status) {
++ /* Add the node to the node manager's list of allocated
++ * nodes. */
++ lst_init_elem((struct list_head *)pnode);
++ NODE_SET_STATE(pnode, NODE_ALLOCATED);
++
++ mutex_lock(&hnode_mgr->node_mgr_lock);
++
++ lst_put_tail(hnode_mgr->node_list, (struct list_head *) pnode);
++ ++(hnode_mgr->num_nodes);
++
++ /* Exit critical section */
++ mutex_unlock(&hnode_mgr->node_mgr_lock);
++
++ /* Preset this to assume phases are split
++ * (for overlay and dll) */
++ pnode->phase_split = true;
++
++ /* Notify all clients registered for DSP_NODESTATECHANGE. */
++ proc_notify_all_clients(hprocessor, DSP_NODESTATECHANGE);
++ } else {
++ /* Cleanup */
++ if (pnode)
++ delete_node(pnode, pr_ctxt);
++
++ }
++
++ if (!status) {
++ status = drv_insert_node_res_element(pnode, &node_res, pr_ctxt);
++ if (status) {
++ delete_node(pnode, pr_ctxt);
++ goto func_end;
++ }
++
++ *noderes = (struct node_res_object *)node_res;
++ drv_proc_node_update_heap_status(node_res, true);
++ drv_proc_node_update_status(node_res, true);
++ }
++ DBC_ENSURE((status && *noderes == NULL) || (!status && *noderes));
++func_end:
++ dev_dbg(bridge, "%s: hprocessor: %p pNodeId: %p pargs: %p attr_in: %p "
++ "node_res: %p status: 0x%x\n", __func__, hprocessor,
++ node_uuid, pargs, attr_in, noderes, status);
++ return status;
++}
++
++/*
++ * ======== node_alloc_msg_buf ========
++ * Purpose:
++ * Allocates buffer for zero copy messaging.
++ */
++DBAPI node_alloc_msg_buf(struct node_object *hnode, u32 usize,
++ struct dsp_bufferattr *pattr,
++ u8 **pbuffer)
++{
++ struct node_object *pnode = (struct node_object *)hnode;
++ int status = 0;
++ bool va_flag = false;
++ bool set_info;
++ u32 proc_id;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(pbuffer != NULL);
++
++ DBC_REQUIRE(usize > 0);
++
++ if (!pnode)
++ status = -EFAULT;
++ else if (node_get_type(pnode) == NODE_DEVICE)
++ status = -EPERM;
++
++ if (status)
++ goto func_end;
++
++ if (pattr == NULL)
++ pattr = &node_dfltbufattrs; /* set defaults */
++
++ status = proc_get_processor_id(pnode->hprocessor, &proc_id);
++ if (proc_id != DSP_UNIT) {
++ DBC_ASSERT(NULL);
++ goto func_end;
++ }
++ /* If segment ID includes MEM_SETVIRTUALSEGID then pbuffer is a
++ * virt address, so set this info in this node's translator
++ * object for future ref. If MEM_GETVIRTUALSEGID then retrieve
++ * virtual address from node's translator. */
++ if ((pattr->segment_id & MEM_SETVIRTUALSEGID) ||
++ (pattr->segment_id & MEM_GETVIRTUALSEGID)) {
++ va_flag = true;
++ set_info = (pattr->segment_id & MEM_SETVIRTUALSEGID) ?
++ true : false;
++ /* Clear mask bits */
++ pattr->segment_id &= ~MEM_MASKVIRTUALSEGID;
++ /* Set/get this node's translators virtual address base/size */
++ status = cmm_xlator_info(pnode->xlator, pbuffer, usize,
++ pattr->segment_id, set_info);
++ }
++ if (!status && (!va_flag)) {
++ if (pattr->segment_id != 1) {
++ /* Node supports single SM segment only. */
++ status = -EBADR;
++ }
++ /* Arbitrary SM buffer alignment not supported for host side
++ * allocs, but guaranteed for the following alignment
++ * values. */
++ switch (pattr->buf_alignment) {
++ case 0:
++ case 1:
++ case 2:
++ case 4:
++ break;
++ default:
++ /* alignment value not suportted */
++ status = -EPERM;
++ break;
++ }
++ if (!status) {
++ /* allocate physical buffer from seg_id in node's
++ * translator */
++ (void)cmm_xlator_alloc_buf(pnode->xlator, pbuffer,
++ usize);
++ if (*pbuffer == NULL) {
++ pr_err("%s: error - Out of shared memory\n",
++ __func__);
++ status = -ENOMEM;
++ }
++ }
++ }
++func_end:
++ return status;
++}
++
++/*
++ * ======== node_change_priority ========
++ * Purpose:
++ * Change the priority of a node in the allocated state, or that is
++ * currently running or paused on the target.
++ */
++int node_change_priority(struct node_object *hnode, s32 prio)
++{
++ struct node_object *pnode = (struct node_object *)hnode;
++ struct node_mgr *hnode_mgr = NULL;
++ enum node_type node_type;
++ enum node_state state;
++ int status = 0;
++ u32 proc_id;
++
++ DBC_REQUIRE(refs > 0);
++
++ if (!hnode || !hnode->hnode_mgr) {
++ status = -EFAULT;
++ } else {
++ hnode_mgr = hnode->hnode_mgr;
++ node_type = node_get_type(hnode);
++ if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET)
++ status = -EPERM;
++ else if (prio < hnode_mgr->min_pri || prio > hnode_mgr->max_pri)
++ status = -EDOM;
++ }
++ if (status)
++ goto func_end;
++
++ /* Enter critical section */
++ mutex_lock(&hnode_mgr->node_mgr_lock);
++
++ state = node_get_state(hnode);
++ if (state == NODE_ALLOCATED || state == NODE_PAUSED) {
++ NODE_SET_PRIORITY(hnode, prio);
++ } else {
++ if (state != NODE_RUNNING) {
++ status = -EBADR;
++ goto func_cont;
++ }
++ status = proc_get_processor_id(pnode->hprocessor, &proc_id);
++ if (proc_id == DSP_UNIT) {
++ status =
++ disp_node_change_priority(hnode_mgr->disp_obj,
++ hnode,
++ hnode_mgr->ul_fxn_addrs
++ [RMSCHANGENODEPRIORITY],
++ hnode->node_env, prio);
++ }
++ if (status >= 0)
++ NODE_SET_PRIORITY(hnode, prio);
++
++ }
++func_cont:
++ /* Leave critical section */
++ mutex_unlock(&hnode_mgr->node_mgr_lock);
++func_end:
++ return status;
++}
++
++/*
++ * ======== node_connect ========
++ * Purpose:
++ * Connect two nodes on the DSP, or a node on the DSP to the GPP.
++ */
++int node_connect(struct node_object *node1, u32 stream1,
++ struct node_object *node2,
++ u32 stream2, struct dsp_strmattr *pattrs,
++ struct dsp_cbdata *conn_param)
++{
++ struct node_mgr *hnode_mgr;
++ char *pstr_dev_name = NULL;
++ enum node_type node1_type = NODE_TASK;
++ enum node_type node2_type = NODE_TASK;
++ struct node_strmdef *pstrm_def;
++ struct node_strmdef *input = NULL;
++ struct node_strmdef *output = NULL;
++ struct node_object *dev_node_obj;
++ struct node_object *hnode;
++ struct stream_chnl *pstream;
++ u32 pipe_id = GB_NOBITS;
++ u32 chnl_id = GB_NOBITS;
++ s8 chnl_mode;
++ u32 dw_length;
++ int status = 0;
++ DBC_REQUIRE(refs > 0);
++
++ if ((node1 != (struct node_object *)DSP_HGPPNODE && !node1) ||
++ (node2 != (struct node_object *)DSP_HGPPNODE && !node2))
++ status = -EFAULT;
++
++ if (!status) {
++ /* The two nodes must be on the same processor */
++ if (node1 != (struct node_object *)DSP_HGPPNODE &&
++ node2 != (struct node_object *)DSP_HGPPNODE &&
++ node1->hnode_mgr != node2->hnode_mgr)
++ status = -EPERM;
++ /* Cannot connect a node to itself */
++ if (node1 == node2)
++ status = -EPERM;
++
++ }
++ if (!status) {
++ /* node_get_type() will return NODE_GPP if hnode =
++ * DSP_HGPPNODE. */
++ node1_type = node_get_type(node1);
++ node2_type = node_get_type(node2);
++ /* Check stream indices ranges */
++ if ((node1_type != NODE_GPP && node1_type != NODE_DEVICE &&
++ stream1 >= MAX_OUTPUTS(node1)) || (node2_type != NODE_GPP
++ && node2_type !=
++ NODE_DEVICE
++ && stream2 >=
++ MAX_INPUTS(node2)))
++ status = -EINVAL;
++ }
++ if (!status) {
++ /*
++ * Only the following types of connections are allowed:
++ * task/dais socket < == > task/dais socket
++ * task/dais socket < == > device
++ * task/dais socket < == > GPP
++ *
++ * ie, no message nodes, and at least one task or dais
++ * socket node.
++ */
++ if (node1_type == NODE_MESSAGE || node2_type == NODE_MESSAGE ||
++ (node1_type != NODE_TASK && node1_type != NODE_DAISSOCKET &&
++ node2_type != NODE_TASK && node2_type != NODE_DAISSOCKET))
++ status = -EPERM;
++ }
++ /*
++ * Check stream mode. Default is STRMMODE_PROCCOPY.
++ */
++ if (!status && pattrs) {
++ if (pattrs->strm_mode != STRMMODE_PROCCOPY)
++ status = -EPERM; /* illegal stream mode */
++
++ }
++ if (status)
++ goto func_end;
++
++ if (node1_type != NODE_GPP) {
++ hnode_mgr = node1->hnode_mgr;
++ } else {
++ DBC_ASSERT(node2 != (struct node_object *)DSP_HGPPNODE);
++ hnode_mgr = node2->hnode_mgr;
++ }
++ /* Enter critical section */
++ mutex_lock(&hnode_mgr->node_mgr_lock);
++
++ /* Nodes must be in the allocated state */
++ if (node1_type != NODE_GPP && node_get_state(node1) != NODE_ALLOCATED)
++ status = -EBADR;
++
++ if (node2_type != NODE_GPP && node_get_state(node2) != NODE_ALLOCATED)
++ status = -EBADR;
++
++ if (!status) {
++ /* Check that stream indices for task and dais socket nodes
++ * are not already be used. (Device nodes checked later) */
++ if (node1_type == NODE_TASK || node1_type == NODE_DAISSOCKET) {
++ output =
++ &(node1->create_args.asa.
++ task_arg_obj.strm_out_def[stream1]);
++ if (output->sz_device != NULL)
++ status = -EISCONN;
++
++ }
++ if (node2_type == NODE_TASK || node2_type == NODE_DAISSOCKET) {
++ input =
++ &(node2->create_args.asa.
++ task_arg_obj.strm_in_def[stream2]);
++ if (input->sz_device != NULL)
++ status = -EISCONN;
++
++ }
++ }
++ /* Connecting two task nodes? */
++ if (!status && ((node1_type == NODE_TASK ||
++ node1_type == NODE_DAISSOCKET)
++ && (node2_type == NODE_TASK
++ || node2_type == NODE_DAISSOCKET))) {
++ /* Find available pipe */
++ pipe_id = gb_findandset(hnode_mgr->pipe_map);
++ if (pipe_id == GB_NOBITS) {
++ status = -ECONNREFUSED;
++ } else {
++ node1->outputs[stream1].type = NODECONNECT;
++ node2->inputs[stream2].type = NODECONNECT;
++ node1->outputs[stream1].dev_id = pipe_id;
++ node2->inputs[stream2].dev_id = pipe_id;
++ output->sz_device = kzalloc(PIPENAMELEN + 1,
++ GFP_KERNEL);
++ input->sz_device = kzalloc(PIPENAMELEN + 1, GFP_KERNEL);
++ if (output->sz_device == NULL ||
++ input->sz_device == NULL) {
++ /* Undo the connection */
++ kfree(output->sz_device);
++
++ kfree(input->sz_device);
++
++ output->sz_device = NULL;
++ input->sz_device = NULL;
++ gb_clear(hnode_mgr->pipe_map, pipe_id);
++ status = -ENOMEM;
++ } else {
++ /* Copy "/dbpipe<pipId>" name to device names */
++ sprintf(output->sz_device, "%s%d",
++ PIPEPREFIX, pipe_id);
++ strcpy(input->sz_device, output->sz_device);
++ }
++ }
++ }
++ /* Connecting task node to host? */
++ if (!status && (node1_type == NODE_GPP ||
++ node2_type == NODE_GPP)) {
++ if (node1_type == NODE_GPP) {
++ chnl_mode = CHNL_MODETODSP;
++ } else {
++ DBC_ASSERT(node2_type == NODE_GPP);
++ chnl_mode = CHNL_MODEFROMDSP;
++ }
++ /* Reserve a channel id. We need to put the name "/host<id>"
++ * in the node's create_args, but the host
++ * side channel will not be opened until DSPStream_Open is
++ * called for this node. */
++ if (pattrs) {
++ if (pattrs->strm_mode == STRMMODE_RDMA) {
++ chnl_id =
++ gb_findandset(hnode_mgr->dma_chnl_map);
++ /* dma chans are 2nd transport chnl set
++ * ids(e.g. 16-31) */
++ (chnl_id != GB_NOBITS) ?
++ (chnl_id =
++ chnl_id +
++ hnode_mgr->ul_num_chnls) : chnl_id;
++ } else if (pattrs->strm_mode == STRMMODE_ZEROCOPY) {
++ chnl_id = gb_findandset(hnode_mgr->zc_chnl_map);
++ /* zero-copy chans are 3nd transport set
++ * (e.g. 32-47) */
++ (chnl_id != GB_NOBITS) ? (chnl_id = chnl_id +
++ (2 *
++ hnode_mgr->
++ ul_num_chnls))
++ : chnl_id;
++ } else { /* must be PROCCOPY */
++ DBC_ASSERT(pattrs->strm_mode ==
++ STRMMODE_PROCCOPY);
++ chnl_id = gb_findandset(hnode_mgr->chnl_map);
++ /* e.g. 0-15 */
++ }
++ } else {
++ /* default to PROCCOPY */
++ chnl_id = gb_findandset(hnode_mgr->chnl_map);
++ }
++ if (chnl_id == GB_NOBITS) {
++ status = -ECONNREFUSED;
++ goto func_cont2;
++ }
++ pstr_dev_name = kzalloc(HOSTNAMELEN + 1, GFP_KERNEL);
++ if (pstr_dev_name != NULL)
++ goto func_cont2;
++
++ if (pattrs) {
++ if (pattrs->strm_mode == STRMMODE_RDMA) {
++ gb_clear(hnode_mgr->dma_chnl_map, chnl_id -
++ hnode_mgr->ul_num_chnls);
++ } else if (pattrs->strm_mode == STRMMODE_ZEROCOPY) {
++ gb_clear(hnode_mgr->zc_chnl_map, chnl_id -
++ (2 * hnode_mgr->ul_num_chnls));
++ } else {
++ DBC_ASSERT(pattrs->strm_mode ==
++ STRMMODE_PROCCOPY);
++ gb_clear(hnode_mgr->chnl_map, chnl_id);
++ }
++ } else {
++ gb_clear(hnode_mgr->chnl_map, chnl_id);
++ }
++ status = -ENOMEM;
++func_cont2:
++ if (!status) {
++ if (node1 == (struct node_object *)DSP_HGPPNODE) {
++ node2->inputs[stream2].type = HOSTCONNECT;
++ node2->inputs[stream2].dev_id = chnl_id;
++ input->sz_device = pstr_dev_name;
++ } else {
++ node1->outputs[stream1].type = HOSTCONNECT;
++ node1->outputs[stream1].dev_id = chnl_id;
++ output->sz_device = pstr_dev_name;
++ }
++ sprintf(pstr_dev_name, "%s%d", HOSTPREFIX, chnl_id);
++ }
++ }
++ /* Connecting task node to device node? */
++ if (!status && ((node1_type == NODE_DEVICE) ||
++ (node2_type == NODE_DEVICE))) {
++ if (node2_type == NODE_DEVICE) {
++ /* node1 == > device */
++ dev_node_obj = node2;
++ hnode = node1;
++ pstream = &(node1->outputs[stream1]);
++ pstrm_def = output;
++ } else {
++ /* device == > node2 */
++ dev_node_obj = node1;
++ hnode = node2;
++ pstream = &(node2->inputs[stream2]);
++ pstrm_def = input;
++ }
++ /* Set up create args */
++ pstream->type = DEVICECONNECT;
++ dw_length = strlen(dev_node_obj->pstr_dev_name);
++ if (conn_param != NULL) {
++ pstrm_def->sz_device = kzalloc(dw_length + 1 +
++ conn_param->cb_data,
++ GFP_KERNEL);
++ } else {
++ pstrm_def->sz_device = kzalloc(dw_length + 1,
++ GFP_KERNEL);
++ }
++ if (pstrm_def->sz_device == NULL) {
++ status = -ENOMEM;
++ } else {
++ /* Copy device name */
++ strncpy(pstrm_def->sz_device,
++ dev_node_obj->pstr_dev_name, dw_length);
++ if (conn_param != NULL) {
++ strncat(pstrm_def->sz_device,
++ (char *)conn_param->node_data,
++ (u32) conn_param->cb_data);
++ }
++ dev_node_obj->device_owner = hnode;
++ }
++ }
++ if (!status) {
++ /* Fill in create args */
++ if (node1_type == NODE_TASK || node1_type == NODE_DAISSOCKET) {
++ node1->create_args.asa.task_arg_obj.num_outputs++;
++ fill_stream_def(node1, output, pattrs);
++ }
++ if (node2_type == NODE_TASK || node2_type == NODE_DAISSOCKET) {
++ node2->create_args.asa.task_arg_obj.num_inputs++;
++ fill_stream_def(node2, input, pattrs);
++ }
++ /* Update node1 and node2 stream_connect */
++ if (node1_type != NODE_GPP && node1_type != NODE_DEVICE) {
++ node1->num_outputs++;
++ if (stream1 > node1->max_output_index)
++ node1->max_output_index = stream1;
++
++ }
++ if (node2_type != NODE_GPP && node2_type != NODE_DEVICE) {
++ node2->num_inputs++;
++ if (stream2 > node2->max_input_index)
++ node2->max_input_index = stream2;
++
++ }
++ fill_stream_connect(node1, node2, stream1, stream2);
++ }
++ /* end of sync_enter_cs */
++ /* Exit critical section */
++ mutex_unlock(&hnode_mgr->node_mgr_lock);
++func_end:
++ dev_dbg(bridge, "%s: node1: %p stream1: %d node2: %p stream2: %d"
++ "pattrs: %p status: 0x%x\n", __func__, node1,
++ stream1, node2, stream2, pattrs, status);
++ return status;
++}
++
++/*
++ * ======== node_create ========
++ * Purpose:
++ * Create a node on the DSP by remotely calling the node's create function.
++ */
++int node_create(struct node_object *hnode)
++{
++ struct node_object *pnode = (struct node_object *)hnode;
++ struct node_mgr *hnode_mgr;
++ struct bridge_drv_interface *intf_fxns;
++ u32 ul_create_fxn;
++ enum node_type node_type;
++ int status = 0;
++ int status1 = 0;
++ struct dsp_cbdata cb_data;
++ u32 proc_id = 255;
++ struct dsp_processorstate proc_state;
++ struct proc_object *hprocessor;
++#if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
++ struct dspbridge_platform_data *pdata =
++ omap_dspbridge_dev->dev.platform_data;
++#endif
++
++ DBC_REQUIRE(refs > 0);
++ if (!pnode) {
++ status = -EFAULT;
++ goto func_end;
++ }
++ hprocessor = hnode->hprocessor;
++ status = proc_get_state(hprocessor, &proc_state,
++ sizeof(struct dsp_processorstate));
++ if (status)
++ goto func_end;
++ /* If processor is in error state then don't attempt to create
++ new node */
++ if (proc_state.proc_state == PROC_ERROR) {
++ status = -EPERM;
++ goto func_end;
++ }
++ /* create struct dsp_cbdata struct for PWR calls */
++ cb_data.cb_data = PWR_TIMEOUT;
++ node_type = node_get_type(hnode);
++ hnode_mgr = hnode->hnode_mgr;
++ intf_fxns = hnode_mgr->intf_fxns;
++ /* Get access to node dispatcher */
++ mutex_lock(&hnode_mgr->node_mgr_lock);
++
++ /* Check node state */
++ if (node_get_state(hnode) != NODE_ALLOCATED)
++ status = -EBADR;
++
++ if (!status)
++ status = proc_get_processor_id(pnode->hprocessor, &proc_id);
++
++ if (status)
++ goto func_cont2;
++
++ if (proc_id != DSP_UNIT)
++ goto func_cont2;
++
++ /* Make sure streams are properly connected */
++ if ((hnode->num_inputs && hnode->max_input_index >
++ hnode->num_inputs - 1) ||
++ (hnode->num_outputs && hnode->max_output_index >
++ hnode->num_outputs - 1))
++ status = -ENOTCONN;
++
++ if (!status) {
++ /* If node's create function is not loaded, load it */
++ /* Boost the OPP level to max level that DSP can be requested */
++#if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
++ if (pdata->cpu_set_freq)
++ (*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP3]);
++#endif
++ status = hnode_mgr->nldr_fxns.pfn_load(hnode->nldr_node_obj,
++ NLDR_CREATE);
++ /* Get address of node's create function */
++ if (!status) {
++ hnode->loaded = true;
++ if (node_type != NODE_DEVICE) {
++ status = get_fxn_address(hnode, &ul_create_fxn,
++ CREATEPHASE);
++ }
++ } else {
++ pr_err("%s: failed to load create code: 0x%x\n",
++ __func__, status);
++ }
++ /* Request the lowest OPP level */
++#if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
++ if (pdata->cpu_set_freq)
++ (*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP1]);
++#endif
++ /* Get address of iAlg functions, if socket node */
++ if (!status) {
++ if (node_type == NODE_DAISSOCKET) {
++ status = hnode_mgr->nldr_fxns.pfn_get_fxn_addr
++ (hnode->nldr_node_obj,
++ hnode->dcd_props.obj_data.node_obj.
++ pstr_i_alg_name,
++ &hnode->create_args.asa.
++ task_arg_obj.ul_dais_arg);
++ }
++ }
++ }
++ if (!status) {
++ if (node_type != NODE_DEVICE) {
++ status = disp_node_create(hnode_mgr->disp_obj, hnode,
++ hnode_mgr->ul_fxn_addrs
++ [RMSCREATENODE],
++ ul_create_fxn,
++ &(hnode->create_args),
++ &(hnode->node_env));
++ if (status >= 0) {
++ /* Set the message queue id to the node env
++ * pointer */
++ intf_fxns = hnode_mgr->intf_fxns;
++ (*intf_fxns->pfn_msg_set_queue_id) (hnode->
++ msg_queue_obj,
++ hnode->node_env);
++ }
++ }
++ }
++ /* Phase II/Overlays: Create, execute, delete phases possibly in
++ * different files/sections. */
++ if (hnode->loaded && hnode->phase_split) {
++ /* If create code was dynamically loaded, we can now unload
++ * it. */
++ status1 = hnode_mgr->nldr_fxns.pfn_unload(hnode->nldr_node_obj,
++ NLDR_CREATE);
++ hnode->loaded = false;
++ }
++ if (status1)
++ pr_err("%s: Failed to unload create code: 0x%x\n",
++ __func__, status1);
++func_cont2:
++ /* Update node state and node manager state */
++ if (status >= 0) {
++ NODE_SET_STATE(hnode, NODE_CREATED);
++ hnode_mgr->num_created++;
++ goto func_cont;
++ }
++ if (status != -EBADR) {
++ /* Put back in NODE_ALLOCATED state if error occurred */
++ NODE_SET_STATE(hnode, NODE_ALLOCATED);
++ }
++func_cont:
++ /* Free access to node dispatcher */
++ mutex_unlock(&hnode_mgr->node_mgr_lock);
++func_end:
++ if (status >= 0) {
++ proc_notify_clients(hnode->hprocessor, DSP_NODESTATECHANGE);
++ ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
++ }
++
++ dev_dbg(bridge, "%s: hnode: %p status: 0x%x\n", __func__,
++ hnode, status);
++ return status;
++}
++
++/*
++ * ======== node_create_mgr ========
++ * Purpose:
++ * Create a NODE Manager object.
++ */
++int node_create_mgr(struct node_mgr **node_man,
++ struct dev_object *hdev_obj)
++{
++ u32 i;
++ struct node_mgr *node_mgr_obj = NULL;
++ struct disp_attr disp_attr_obj;
++ char *sz_zl_file = "";
++ struct nldr_attrs nldr_attrs_obj;
++ int status = 0;
++ u8 dev_type;
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(node_man != NULL);
++ DBC_REQUIRE(hdev_obj != NULL);
++
++ *node_man = NULL;
++ /* Allocate Node manager object */
++ node_mgr_obj = kzalloc(sizeof(struct node_mgr), GFP_KERNEL);
++ if (node_mgr_obj) {
++ node_mgr_obj->hdev_obj = hdev_obj;
++ node_mgr_obj->node_list = kzalloc(sizeof(struct lst_list),
++ GFP_KERNEL);
++ node_mgr_obj->pipe_map = gb_create(MAXPIPES);
++ node_mgr_obj->pipe_done_map = gb_create(MAXPIPES);
++ if (node_mgr_obj->node_list == NULL
++ || node_mgr_obj->pipe_map == NULL
++ || node_mgr_obj->pipe_done_map == NULL) {
++ status = -ENOMEM;
++ } else {
++ INIT_LIST_HEAD(&node_mgr_obj->node_list->head);
++ node_mgr_obj->ntfy_obj = kmalloc(
++ sizeof(struct ntfy_object), GFP_KERNEL);
++ if (node_mgr_obj->ntfy_obj)
++ ntfy_init(node_mgr_obj->ntfy_obj);
++ else
++ status = -ENOMEM;
++ }
++ node_mgr_obj->num_created = 0;
++ } else {
++ status = -ENOMEM;
++ }
++ /* get devNodeType */
++ if (!status)
++ status = dev_get_dev_type(hdev_obj, &dev_type);
++
++ /* Create the DCD Manager */
++ if (!status) {
++ status =
++ dcd_create_manager(sz_zl_file, &node_mgr_obj->hdcd_mgr);
++ if (!status)
++ status = get_proc_props(node_mgr_obj, hdev_obj);
++
++ }
++ /* Create NODE Dispatcher */
++ if (!status) {
++ disp_attr_obj.ul_chnl_offset = node_mgr_obj->ul_chnl_offset;
++ disp_attr_obj.ul_chnl_buf_size = node_mgr_obj->ul_chnl_buf_size;
++ disp_attr_obj.proc_family = node_mgr_obj->proc_family;
++ disp_attr_obj.proc_type = node_mgr_obj->proc_type;
++ status =
++ disp_create(&node_mgr_obj->disp_obj, hdev_obj,
++ &disp_attr_obj);
++ }
++ /* Create a STRM Manager */
++ if (!status)
++ status = strm_create(&node_mgr_obj->strm_mgr_obj, hdev_obj);
++
++ if (!status) {
++ dev_get_intf_fxns(hdev_obj, &node_mgr_obj->intf_fxns);
++ /* Get msg_ctrl queue manager */
++ dev_get_msg_mgr(hdev_obj, &node_mgr_obj->msg_mgr_obj);
++ mutex_init(&node_mgr_obj->node_mgr_lock);
++ node_mgr_obj->chnl_map = gb_create(node_mgr_obj->ul_num_chnls);
++ /* dma chnl map. ul_num_chnls is # per transport */
++ node_mgr_obj->dma_chnl_map =
++ gb_create(node_mgr_obj->ul_num_chnls);
++ node_mgr_obj->zc_chnl_map =
++ gb_create(node_mgr_obj->ul_num_chnls);
++ if ((node_mgr_obj->chnl_map == NULL)
++ || (node_mgr_obj->dma_chnl_map == NULL)
++ || (node_mgr_obj->zc_chnl_map == NULL)) {
++ status = -ENOMEM;
++ } else {
++ /* Block out reserved channels */
++ for (i = 0; i < node_mgr_obj->ul_chnl_offset; i++)
++ gb_set(node_mgr_obj->chnl_map, i);
++
++ /* Block out channels reserved for RMS */
++ gb_set(node_mgr_obj->chnl_map,
++ node_mgr_obj->ul_chnl_offset);
++ gb_set(node_mgr_obj->chnl_map,
++ node_mgr_obj->ul_chnl_offset + 1);
++ }
++ }
++ if (!status) {
++ /* NO RM Server on the IVA */
++ if (dev_type != IVA_UNIT) {
++ /* Get addresses of any RMS functions loaded */
++ status = get_rms_fxns(node_mgr_obj);
++ }
++ }
++
++ /* Get loader functions and create loader */
++ if (!status)
++ node_mgr_obj->nldr_fxns = nldr_fxns; /* Dyn loader funcs */
++
++ if (!status) {
++ nldr_attrs_obj.pfn_ovly = ovly;
++ nldr_attrs_obj.pfn_write = mem_write;
++ nldr_attrs_obj.us_dsp_word_size = node_mgr_obj->udsp_word_size;
++ nldr_attrs_obj.us_dsp_mau_size = node_mgr_obj->udsp_mau_size;
++ node_mgr_obj->loader_init = node_mgr_obj->nldr_fxns.pfn_init();
++ status =
++ node_mgr_obj->nldr_fxns.pfn_create(&node_mgr_obj->nldr_obj,
++ hdev_obj,
++ &nldr_attrs_obj);
++ }
++ if (!status)
++ *node_man = node_mgr_obj;
++ else
++ delete_node_mgr(node_mgr_obj);
++
++ DBC_ENSURE((status && *node_man == NULL) || (!status && *node_man));
++
++ return status;
++}
++
++/*
++ * ======== node_delete ========
++ * Purpose:
++ * Delete a node on the DSP by remotely calling the node's delete function.
++ * Loads the node's delete function if necessary. Free GPP side resources
++ * after node's delete function returns.
++ */
++int node_delete(struct node_res_object *noderes,
++ struct process_context *pr_ctxt)
++{
++ struct node_object *pnode = noderes->hnode;
++ struct node_mgr *hnode_mgr;
++ struct proc_object *hprocessor;
++ struct disp_object *disp_obj;
++ u32 ul_delete_fxn;
++ enum node_type node_type;
++ enum node_state state;
++ int status = 0;
++ int status1 = 0;
++ struct dsp_cbdata cb_data;
++ u32 proc_id;
++ struct bridge_drv_interface *intf_fxns;
++
++ void *node_res = noderes;
++
++ struct dsp_processorstate proc_state;
++ DBC_REQUIRE(refs > 0);
++
++ if (!pnode) {
++ status = -EFAULT;
++ goto func_end;
++ }
++ /* create struct dsp_cbdata struct for PWR call */
++ cb_data.cb_data = PWR_TIMEOUT;
++ hnode_mgr = pnode->hnode_mgr;
++ hprocessor = pnode->hprocessor;
++ disp_obj = hnode_mgr->disp_obj;
++ node_type = node_get_type(pnode);
++ intf_fxns = hnode_mgr->intf_fxns;
++ /* Enter critical section */
++ mutex_lock(&hnode_mgr->node_mgr_lock);
++
++ state = node_get_state(pnode);
++ /* Execute delete phase code for non-device node in all cases
++ * except when the node was only allocated. Delete phase must be
++ * executed even if create phase was executed, but failed.
++ * If the node environment pointer is non-NULL, the delete phase
++ * code must be executed. */
++ if (!(state == NODE_ALLOCATED && pnode->node_env == (u32) NULL) &&
++ node_type != NODE_DEVICE) {
++ status = proc_get_processor_id(pnode->hprocessor, &proc_id);
++ if (status)
++ goto func_cont1;
++
++ if (proc_id == DSP_UNIT || proc_id == IVA_UNIT) {
++ /* If node has terminated, execute phase code will
++ * have already been unloaded in node_on_exit(). If the
++ * node is PAUSED, the execute phase is loaded, and it
++ * is now ok to unload it. If the node is running, we
++ * will unload the execute phase only after deleting
++ * the node. */
++ if (state == NODE_PAUSED && pnode->loaded &&
++ pnode->phase_split) {
++ /* Ok to unload execute code as long as node
++ * is not * running */
++ status1 =
++ hnode_mgr->nldr_fxns.
++ pfn_unload(pnode->nldr_node_obj,
++ NLDR_EXECUTE);
++ pnode->loaded = false;
++ NODE_SET_STATE(pnode, NODE_DONE);
++ }
++ /* Load delete phase code if not loaded or if haven't
++ * * unloaded EXECUTE phase */
++ if ((!(pnode->loaded) || (state == NODE_RUNNING)) &&
++ pnode->phase_split) {
++ status =
++ hnode_mgr->nldr_fxns.
++ pfn_load(pnode->nldr_node_obj, NLDR_DELETE);
++ if (!status)
++ pnode->loaded = true;
++ else
++ pr_err("%s: fail - load delete code:"
++ " 0x%x\n", __func__, status);
++ }
++ }
++func_cont1:
++ if (!status) {
++ /* Unblock a thread trying to terminate the node */
++ (void)sync_set_event(pnode->sync_done);
++ if (proc_id == DSP_UNIT) {
++ /* ul_delete_fxn = address of node's delete
++ * function */
++ status = get_fxn_address(pnode, &ul_delete_fxn,
++ DELETEPHASE);
++ } else if (proc_id == IVA_UNIT)
++ ul_delete_fxn = (u32) pnode->node_env;
++ if (!status) {
++ status = proc_get_state(hprocessor,
++ &proc_state,
++ sizeof(struct
++ dsp_processorstate));
++ if (proc_state.proc_state != PROC_ERROR) {
++ status =
++ disp_node_delete(disp_obj, pnode,
++ hnode_mgr->
++ ul_fxn_addrs
++ [RMSDELETENODE],
++ ul_delete_fxn,
++ pnode->node_env);
++ } else
++ NODE_SET_STATE(pnode, NODE_DONE);
++
++ /* Unload execute, if not unloaded, and delete
++ * function */
++ if (state == NODE_RUNNING &&
++ pnode->phase_split) {
++ status1 =
++ hnode_mgr->nldr_fxns.
++ pfn_unload(pnode->nldr_node_obj,
++ NLDR_EXECUTE);
++ }
++ if (status1)
++ pr_err("%s: fail - unload execute code:"
++ " 0x%x\n", __func__, status1);
++
++ status1 =
++ hnode_mgr->nldr_fxns.pfn_unload(pnode->
++ nldr_node_obj,
++ NLDR_DELETE);
++ pnode->loaded = false;
++ if (status1)
++ pr_err("%s: fail - unload delete code: "
++ "0x%x\n", __func__, status1);
++ }
++ }
++ }
++ /* Free host side resources even if a failure occurred */
++ /* Remove node from hnode_mgr->node_list */
++ lst_remove_elem(hnode_mgr->node_list, (struct list_head *)pnode);
++ hnode_mgr->num_nodes--;
++ /* Decrement count of nodes created on DSP */
++ if ((state != NODE_ALLOCATED) || ((state == NODE_ALLOCATED) &&
++ (pnode->node_env != (u32) NULL)))
++ hnode_mgr->num_created--;
++ /* Free host-side resources allocated by node_create()
++ * delete_node() fails if SM buffers not freed by client! */
++ drv_proc_node_update_status(node_res, false);
++ delete_node(pnode, pr_ctxt);
++
++ /*
++ * Release all Node resources and its context
++ */
++ idr_remove(pr_ctxt->node_id, ((struct node_res_object *)node_res)->id);
++ kfree(node_res);
++
++ /* Exit critical section */
++ mutex_unlock(&hnode_mgr->node_mgr_lock);
++ proc_notify_clients(hprocessor, DSP_NODESTATECHANGE);
++func_end:
++ dev_dbg(bridge, "%s: pnode: %p status 0x%x\n", __func__, pnode, status);
++ return status;
++}
++
++/*
++ * ======== node_delete_mgr ========
++ * Purpose:
++ * Delete the NODE Manager.
++ */
++int node_delete_mgr(struct node_mgr *hnode_mgr)
++{
++ int status = 0;
++
++ DBC_REQUIRE(refs > 0);
++
++ if (hnode_mgr)
++ delete_node_mgr(hnode_mgr);
++ else
++ status = -EFAULT;
++
++ return status;
++}
++
++/*
++ * ======== node_enum_nodes ========
++ * Purpose:
++ * Enumerate currently allocated nodes.
++ */
++int node_enum_nodes(struct node_mgr *hnode_mgr, void **node_tab,
++ u32 node_tab_size, u32 *pu_num_nodes,
++ u32 *pu_allocated)
++{
++ struct node_object *hnode;
++ u32 i;
++ int status = 0;
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(node_tab != NULL || node_tab_size == 0);
++ DBC_REQUIRE(pu_num_nodes != NULL);
++ DBC_REQUIRE(pu_allocated != NULL);
++
++ if (!hnode_mgr) {
++ status = -EFAULT;
++ goto func_end;
++ }
++ /* Enter critical section */
++ mutex_lock(&hnode_mgr->node_mgr_lock);
++
++ if (hnode_mgr->num_nodes > node_tab_size) {
++ *pu_allocated = hnode_mgr->num_nodes;
++ *pu_num_nodes = 0;
++ status = -EINVAL;
++ } else {
++ hnode = (struct node_object *)lst_first(hnode_mgr->
++ node_list);
++ for (i = 0; i < hnode_mgr->num_nodes; i++) {
++ DBC_ASSERT(hnode);
++ node_tab[i] = hnode;
++ hnode = (struct node_object *)lst_next
++ (hnode_mgr->node_list,
++ (struct list_head *)hnode);
++ }
++ *pu_allocated = *pu_num_nodes = hnode_mgr->num_nodes;
++ }
++ /* end of sync_enter_cs */
++ /* Exit critical section */
++ mutex_unlock(&hnode_mgr->node_mgr_lock);
++func_end:
++ return status;
++}
++
++/*
++ * ======== node_exit ========
++ * Purpose:
++ * Discontinue usage of NODE module.
++ */
++void node_exit(void)
++{
++ DBC_REQUIRE(refs > 0);
++
++ refs--;
++
++ DBC_ENSURE(refs >= 0);
++}
++
++/*
++ * ======== node_free_msg_buf ========
++ * Purpose:
++ * Frees the message buffer.
++ */
++int node_free_msg_buf(struct node_object *hnode, u8 * pbuffer,
++ struct dsp_bufferattr *pattr)
++{
++ struct node_object *pnode = (struct node_object *)hnode;
++ int status = 0;
++ u32 proc_id;
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(pbuffer != NULL);
++ DBC_REQUIRE(pnode != NULL);
++ DBC_REQUIRE(pnode->xlator != NULL);
++
++ if (!hnode) {
++ status = -EFAULT;
++ goto func_end;
++ }
++ status = proc_get_processor_id(pnode->hprocessor, &proc_id);
++ if (proc_id == DSP_UNIT) {
++ if (!status) {
++ if (pattr == NULL) {
++ /* set defaults */
++ pattr = &node_dfltbufattrs;
++ }
++ /* Node supports single SM segment only */
++ if (pattr->segment_id != 1)
++ status = -EBADR;
++
++ /* pbuffer is clients Va. */
++ status = cmm_xlator_free_buf(pnode->xlator, pbuffer);
++ }
++ } else {
++ DBC_ASSERT(NULL); /* BUG */
++ }
++func_end:
++ return status;
++}
++
++/*
++ * ======== node_get_attr ========
++ * Purpose:
++ * Copy the current attributes of the specified node into a dsp_nodeattr
++ * structure.
++ */
++int node_get_attr(struct node_object *hnode,
++ struct dsp_nodeattr *pattr, u32 attr_size)
++{
++ struct node_mgr *hnode_mgr;
++ int status = 0;
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(pattr != NULL);
++ DBC_REQUIRE(attr_size >= sizeof(struct dsp_nodeattr));
++
++ if (!hnode) {
++ status = -EFAULT;
++ } else {
++ hnode_mgr = hnode->hnode_mgr;
++ /* Enter hnode_mgr critical section (since we're accessing
++ * data that could be changed by node_change_priority() and
++ * node_connect(). */
++ mutex_lock(&hnode_mgr->node_mgr_lock);
++ pattr->cb_struct = sizeof(struct dsp_nodeattr);
++ /* dsp_nodeattrin */
++ pattr->in_node_attr_in.cb_struct =
++ sizeof(struct dsp_nodeattrin);
++ pattr->in_node_attr_in.prio = hnode->prio;
++ pattr->in_node_attr_in.utimeout = hnode->utimeout;
++ pattr->in_node_attr_in.heap_size =
++ hnode->create_args.asa.task_arg_obj.heap_size;
++ pattr->in_node_attr_in.pgpp_virt_addr = (void *)
++ hnode->create_args.asa.task_arg_obj.ugpp_heap_addr;
++ pattr->node_attr_inputs = hnode->num_gpp_inputs;
++ pattr->node_attr_outputs = hnode->num_gpp_outputs;
++ /* dsp_nodeinfo */
++ get_node_info(hnode, &(pattr->node_info));
++ /* end of sync_enter_cs */
++ /* Exit critical section */
++ mutex_unlock(&hnode_mgr->node_mgr_lock);
++ }
++ return status;
++}
++
++/*
++ * ======== node_get_channel_id ========
++ * Purpose:
++ * Get the channel index reserved for a stream connection between the
++ * host and a node.
++ */
++int node_get_channel_id(struct node_object *hnode, u32 dir, u32 index,
++ u32 *chan_id)
++{
++ enum node_type node_type;
++ int status = -EINVAL;
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(dir == DSP_TONODE || dir == DSP_FROMNODE);
++ DBC_REQUIRE(chan_id != NULL);
++
++ if (!hnode) {
++ status = -EFAULT;
++ return status;
++ }
++ node_type = node_get_type(hnode);
++ if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET) {
++ status = -EPERM;
++ return status;
++ }
++ if (dir == DSP_TONODE) {
++ if (index < MAX_INPUTS(hnode)) {
++ if (hnode->inputs[index].type == HOSTCONNECT) {
++ *chan_id = hnode->inputs[index].dev_id;
++ status = 0;
++ }
++ }
++ } else {
++ DBC_ASSERT(dir == DSP_FROMNODE);
++ if (index < MAX_OUTPUTS(hnode)) {
++ if (hnode->outputs[index].type == HOSTCONNECT) {
++ *chan_id = hnode->outputs[index].dev_id;
++ status = 0;
++ }
++ }
++ }
++ return status;
++}
++
++/*
++ * ======== node_get_message ========
++ * Purpose:
++ * Retrieve a message from a node on the DSP.
++ */
++int node_get_message(struct node_object *hnode,
++ struct dsp_msg *message, u32 utimeout)
++{
++ struct node_mgr *hnode_mgr;
++ enum node_type node_type;
++ struct bridge_drv_interface *intf_fxns;
++ int status = 0;
++ void *tmp_buf;
++ struct dsp_processorstate proc_state;
++ struct proc_object *hprocessor;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(message != NULL);
++
++ if (!hnode) {
++ status = -EFAULT;
++ goto func_end;
++ }
++ hprocessor = hnode->hprocessor;
++ status = proc_get_state(hprocessor, &proc_state,
++ sizeof(struct dsp_processorstate));
++ if (status)
++ goto func_end;
++ /* If processor is in error state then don't attempt to get the
++ message */
++ if (proc_state.proc_state == PROC_ERROR) {
++ status = -EPERM;
++ goto func_end;
++ }
++ hnode_mgr = hnode->hnode_mgr;
++ node_type = node_get_type(hnode);
++ if (node_type != NODE_MESSAGE && node_type != NODE_TASK &&
++ node_type != NODE_DAISSOCKET) {
++ status = -EPERM;
++ goto func_end;
++ }
++ /* This function will block unless a message is available. Since
++ * DSPNode_RegisterNotify() allows notification when a message
++ * is available, the system can be designed so that
++ * DSPNode_GetMessage() is only called when a message is
++ * available. */
++ intf_fxns = hnode_mgr->intf_fxns;
++ status =
++ (*intf_fxns->pfn_msg_get) (hnode->msg_queue_obj, message, utimeout);
++ /* Check if message contains SM descriptor */
++ if (status || !(message->dw_cmd & DSP_RMSBUFDESC))
++ goto func_end;
++
++ /* Translate DSP byte addr to GPP Va. */
++ tmp_buf = cmm_xlator_translate(hnode->xlator,
++ (void *)(message->dw_arg1 *
++ hnode->hnode_mgr->
++ udsp_word_size), CMM_DSPPA2PA);
++ if (tmp_buf != NULL) {
++ /* now convert this GPP Pa to Va */
++ tmp_buf = cmm_xlator_translate(hnode->xlator, tmp_buf,
++ CMM_PA2VA);
++ if (tmp_buf != NULL) {
++ /* Adjust SM size in msg */
++ message->dw_arg1 = (u32) tmp_buf;
++ message->dw_arg2 *= hnode->hnode_mgr->udsp_word_size;
++ } else {
++ status = -ESRCH;
++ }
++ } else {
++ status = -ESRCH;
++ }
++func_end:
++ dev_dbg(bridge, "%s: hnode: %p message: %p utimeout: 0x%x\n", __func__,
++ hnode, message, utimeout);
++ return status;
++}
++
++/*
++ * ======== node_get_nldr_obj ========
++ */
++int node_get_nldr_obj(struct node_mgr *hnode_mgr,
++ struct nldr_object **nldr_ovlyobj)
++{
++ int status = 0;
++ struct node_mgr *node_mgr_obj = hnode_mgr;
++ DBC_REQUIRE(nldr_ovlyobj != NULL);
++
++ if (!hnode_mgr)
++ status = -EFAULT;
++ else
++ *nldr_ovlyobj = node_mgr_obj->nldr_obj;
++
++ DBC_ENSURE(!status || (nldr_ovlyobj != NULL && *nldr_ovlyobj == NULL));
++ return status;
++}
++
++/*
++ * ======== node_get_strm_mgr ========
++ * Purpose:
++ * Returns the Stream manager.
++ */
++int node_get_strm_mgr(struct node_object *hnode,
++ struct strm_mgr **strm_man)
++{
++ int status = 0;
++
++ DBC_REQUIRE(refs > 0);
++
++ if (!hnode)
++ status = -EFAULT;
++ else
++ *strm_man = hnode->hnode_mgr->strm_mgr_obj;
++
++ return status;
++}
++
++/*
++ * ======== node_get_load_type ========
++ */
++enum nldr_loadtype node_get_load_type(struct node_object *hnode)
++{
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(hnode);
++ if (!hnode) {
++ dev_dbg(bridge, "%s: Failed. hnode: %p\n", __func__, hnode);
++ return -1;
++ } else {
++ return hnode->dcd_props.obj_data.node_obj.us_load_type;
++ }
++}
++
++/*
++ * ======== node_get_timeout ========
++ * Purpose:
++ * Returns the timeout value for this node.
++ */
++u32 node_get_timeout(struct node_object *hnode)
++{
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(hnode);
++ if (!hnode) {
++ dev_dbg(bridge, "%s: failed. hnode: %p\n", __func__, hnode);
++ return 0;
++ } else {
++ return hnode->utimeout;
++ }
++}
++
++/*
++ * ======== node_get_type ========
++ * Purpose:
++ * Returns the node type.
++ */
++enum node_type node_get_type(struct node_object *hnode)
++{
++ enum node_type node_type;
++
++ if (hnode == (struct node_object *)DSP_HGPPNODE)
++ node_type = NODE_GPP;
++ else {
++ if (!hnode)
++ node_type = -1;
++ else
++ node_type = hnode->ntype;
++ }
++ return node_type;
++}
++
++/*
++ * ======== node_init ========
++ * Purpose:
++ * Initialize the NODE module.
++ */
++bool node_init(void)
++{
++ DBC_REQUIRE(refs >= 0);
++
++ refs++;
++
++ return true;
++}
++
++/*
++ * ======== node_on_exit ========
++ * Purpose:
++ * Gets called when RMS_EXIT is received for a node.
++ */
++void node_on_exit(struct node_object *hnode, s32 node_status)
++{
++ if (!hnode)
++ return;
++
++ /* Set node state to done */
++ NODE_SET_STATE(hnode, NODE_DONE);
++ hnode->exit_status = node_status;
++ if (hnode->loaded && hnode->phase_split) {
++ (void)hnode->hnode_mgr->nldr_fxns.pfn_unload(hnode->
++ nldr_node_obj,
++ NLDR_EXECUTE);
++ hnode->loaded = false;
++ }
++ /* Unblock call to node_terminate */
++ (void)sync_set_event(hnode->sync_done);
++ /* Notify clients */
++ proc_notify_clients(hnode->hprocessor, DSP_NODESTATECHANGE);
++ ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
++}
++
++/*
++ * ======== node_pause ========
++ * Purpose:
++ * Suspend execution of a node currently running on the DSP.
++ */
++int node_pause(struct node_object *hnode)
++{
++ struct node_object *pnode = (struct node_object *)hnode;
++ enum node_type node_type;
++ enum node_state state;
++ struct node_mgr *hnode_mgr;
++ int status = 0;
++ u32 proc_id;
++ struct dsp_processorstate proc_state;
++ struct proc_object *hprocessor;
++
++ DBC_REQUIRE(refs > 0);
++
++ if (!hnode) {
++ status = -EFAULT;
++ } else {
++ node_type = node_get_type(hnode);
++ if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET)
++ status = -EPERM;
++ }
++ if (status)
++ goto func_end;
++
++ status = proc_get_processor_id(pnode->hprocessor, &proc_id);
++
++ if (proc_id == IVA_UNIT)
++ status = -ENOSYS;
++
++ if (!status) {
++ hnode_mgr = hnode->hnode_mgr;
++
++ /* Enter critical section */
++ mutex_lock(&hnode_mgr->node_mgr_lock);
++ state = node_get_state(hnode);
++ /* Check node state */
++ if (state != NODE_RUNNING)
++ status = -EBADR;
++
++ if (status)
++ goto func_cont;
++ hprocessor = hnode->hprocessor;
++ status = proc_get_state(hprocessor, &proc_state,
++ sizeof(struct dsp_processorstate));
++ if (status)
++ goto func_cont;
++ /* If processor is in error state then don't attempt
++ to send the message */
++ if (proc_state.proc_state == PROC_ERROR) {
++ status = -EPERM;
++ goto func_cont;
++ }
++
++ status = disp_node_change_priority(hnode_mgr->disp_obj, hnode,
++ hnode_mgr->ul_fxn_addrs[RMSCHANGENODEPRIORITY],
++ hnode->node_env, NODE_SUSPENDEDPRI);
++
++ /* Update state */
++ if (status >= 0)
++ NODE_SET_STATE(hnode, NODE_PAUSED);
++
++func_cont:
++ /* End of sync_enter_cs */
++ /* Leave critical section */
++ mutex_unlock(&hnode_mgr->node_mgr_lock);
++ if (status >= 0) {
++ proc_notify_clients(hnode->hprocessor,
++ DSP_NODESTATECHANGE);
++ ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
++ }
++ }
++func_end:
++ dev_dbg(bridge, "%s: hnode: %p status 0x%x\n", __func__, hnode, status);
++ return status;
++}
++
++/*
++ * ======== node_put_message ========
++ * Purpose:
++ * Send a message to a message node, task node, or XDAIS socket node. This
++ * function will block until the message stream can accommodate the
++ * message, or a timeout occurs.
++ */
++int node_put_message(struct node_object *hnode,
++ const struct dsp_msg *pmsg, u32 utimeout)
++{
++ struct node_mgr *hnode_mgr = NULL;
++ enum node_type node_type;
++ struct bridge_drv_interface *intf_fxns;
++ enum node_state state;
++ int status = 0;
++ void *tmp_buf;
++ struct dsp_msg new_msg;
++ struct dsp_processorstate proc_state;
++ struct proc_object *hprocessor;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(pmsg != NULL);
++
++ if (!hnode) {
++ status = -EFAULT;
++ goto func_end;
++ }
++ hprocessor = hnode->hprocessor;
++ status = proc_get_state(hprocessor, &proc_state,
++ sizeof(struct dsp_processorstate));
++ if (status)
++ goto func_end;
++ /* If processor is in bad state then don't attempt sending the
++ message */
++ if (proc_state.proc_state == PROC_ERROR) {
++ status = -EPERM;
++ goto func_end;
++ }
++ hnode_mgr = hnode->hnode_mgr;
++ node_type = node_get_type(hnode);
++ if (node_type != NODE_MESSAGE && node_type != NODE_TASK &&
++ node_type != NODE_DAISSOCKET)
++ status = -EPERM;
++
++ if (!status) {
++ /* Check node state. Can't send messages to a node after
++ * we've sent the RMS_EXIT command. There is still the
++ * possibility that node_terminate can be called after we've
++ * checked the state. Could add another SYNC object to
++ * prevent this (can't use node_mgr_lock, since we don't
++ * want to block other NODE functions). However, the node may
++ * still exit on its own, before this message is sent. */
++ mutex_lock(&hnode_mgr->node_mgr_lock);
++ state = node_get_state(hnode);
++ if (state == NODE_TERMINATING || state == NODE_DONE)
++ status = -EBADR;
++
++ /* end of sync_enter_cs */
++ mutex_unlock(&hnode_mgr->node_mgr_lock);
++ }
++ if (status)
++ goto func_end;
++
++ /* assign pmsg values to new msg */
++ new_msg = *pmsg;
++ /* Now, check if message contains a SM buffer descriptor */
++ if (pmsg->dw_cmd & DSP_RMSBUFDESC) {
++ /* Translate GPP Va to DSP physical buf Ptr. */
++ tmp_buf = cmm_xlator_translate(hnode->xlator,
++ (void *)new_msg.dw_arg1,
++ CMM_VA2DSPPA);
++ if (tmp_buf != NULL) {
++ /* got translation, convert to MAUs in msg */
++ if (hnode->hnode_mgr->udsp_word_size != 0) {
++ new_msg.dw_arg1 =
++ (u32) tmp_buf /
++ hnode->hnode_mgr->udsp_word_size;
++ /* MAUs */
++ new_msg.dw_arg2 /= hnode->hnode_mgr->
++ udsp_word_size;
++ } else {
++ pr_err("%s: udsp_word_size is zero!\n",
++ __func__);
++ status = -EPERM; /* bad DSPWordSize */
++ }
++ } else { /* failed to translate buffer address */
++ status = -ESRCH;
++ }
++ }
++ if (!status) {
++ intf_fxns = hnode_mgr->intf_fxns;
++ status = (*intf_fxns->pfn_msg_put) (hnode->msg_queue_obj,
++ &new_msg, utimeout);
++ }
++func_end:
++ dev_dbg(bridge, "%s: hnode: %p pmsg: %p utimeout: 0x%x, "
++ "status 0x%x\n", __func__, hnode, pmsg, utimeout, status);
++ return status;
++}
++
++/*
++ * ======== node_register_notify ========
++ * Purpose:
++ * Register to be notified on specific events for this node.
++ */
++int node_register_notify(struct node_object *hnode, u32 event_mask,
++ u32 notify_type,
++ struct dsp_notification *hnotification)
++{
++ struct bridge_drv_interface *intf_fxns;
++ int status = 0;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(hnotification != NULL);
++
++ if (!hnode) {
++ status = -EFAULT;
++ } else {
++ /* Check if event mask is a valid node related event */
++ if (event_mask & ~(DSP_NODESTATECHANGE | DSP_NODEMESSAGEREADY))
++ status = -EINVAL;
++
++ /* Check if notify type is valid */
++ if (notify_type != DSP_SIGNALEVENT)
++ status = -EINVAL;
++
++ /* Only one Notification can be registered at a
++ * time - Limitation */
++ if (event_mask == (DSP_NODESTATECHANGE | DSP_NODEMESSAGEREADY))
++ status = -EINVAL;
++ }
++ if (!status) {
++ if (event_mask == DSP_NODESTATECHANGE) {
++ status = ntfy_register(hnode->ntfy_obj, hnotification,
++ event_mask & DSP_NODESTATECHANGE,
++ notify_type);
++ } else {
++ /* Send Message part of event mask to msg_ctrl */
++ intf_fxns = hnode->hnode_mgr->intf_fxns;
++ status = (*intf_fxns->pfn_msg_register_notify)
++ (hnode->msg_queue_obj,
++ event_mask & DSP_NODEMESSAGEREADY, notify_type,
++ hnotification);
++ }
++
++ }
++ dev_dbg(bridge, "%s: hnode: %p event_mask: 0x%x notify_type: 0x%x "
++ "hnotification: %p status 0x%x\n", __func__, hnode,
++ event_mask, notify_type, hnotification, status);
++ return status;
++}
++
++/*
++ * ======== node_run ========
++ * Purpose:
++ * Start execution of a node's execute phase, or resume execution of a node
++ * that has been suspended (via NODE_NodePause()) on the DSP. Load the
++ * node's execute function if necessary.
++ */
++int node_run(struct node_object *hnode)
++{
++ struct node_object *pnode = (struct node_object *)hnode;
++ struct node_mgr *hnode_mgr;
++ enum node_type node_type;
++ enum node_state state;
++ u32 ul_execute_fxn;
++ u32 ul_fxn_addr;
++ int status = 0;
++ u32 proc_id;
++ struct bridge_drv_interface *intf_fxns;
++ struct dsp_processorstate proc_state;
++ struct proc_object *hprocessor;
++
++ DBC_REQUIRE(refs > 0);
++
++ if (!hnode) {
++ status = -EFAULT;
++ goto func_end;
++ }
++ hprocessor = hnode->hprocessor;
++ status = proc_get_state(hprocessor, &proc_state,
++ sizeof(struct dsp_processorstate));
++ if (status)
++ goto func_end;
++ /* If processor is in error state then don't attempt to run the node */
++ if (proc_state.proc_state == PROC_ERROR) {
++ status = -EPERM;
++ goto func_end;
++ }
++ node_type = node_get_type(hnode);
++ if (node_type == NODE_DEVICE)
++ status = -EPERM;
++ if (status)
++ goto func_end;
++
++ hnode_mgr = hnode->hnode_mgr;
++ if (!hnode_mgr) {
++ status = -EFAULT;
++ goto func_end;
++ }
++ intf_fxns = hnode_mgr->intf_fxns;
++ /* Enter critical section */
++ mutex_lock(&hnode_mgr->node_mgr_lock);
++
++ state = node_get_state(hnode);
++ if (state != NODE_CREATED && state != NODE_PAUSED)
++ status = -EBADR;
++
++ if (!status)
++ status = proc_get_processor_id(pnode->hprocessor, &proc_id);
++
++ if (status)
++ goto func_cont1;
++
++ if ((proc_id != DSP_UNIT) && (proc_id != IVA_UNIT))
++ goto func_cont1;
++
++ if (state == NODE_CREATED) {
++ /* If node's execute function is not loaded, load it */
++ if (!(hnode->loaded) && hnode->phase_split) {
++ status =
++ hnode_mgr->nldr_fxns.pfn_load(hnode->nldr_node_obj,
++ NLDR_EXECUTE);
++ if (!status) {
++ hnode->loaded = true;
++ } else {
++ pr_err("%s: fail - load execute code: 0x%x\n",
++ __func__, status);
++ }
++ }
++ if (!status) {
++ /* Get address of node's execute function */
++ if (proc_id == IVA_UNIT)
++ ul_execute_fxn = (u32) hnode->node_env;
++ else {
++ status = get_fxn_address(hnode, &ul_execute_fxn,
++ EXECUTEPHASE);
++ }
++ }
++ if (!status) {
++ ul_fxn_addr = hnode_mgr->ul_fxn_addrs[RMSEXECUTENODE];
++ status =
++ disp_node_run(hnode_mgr->disp_obj, hnode,
++ ul_fxn_addr, ul_execute_fxn,
++ hnode->node_env);
++ }
++ } else if (state == NODE_PAUSED) {
++ ul_fxn_addr = hnode_mgr->ul_fxn_addrs[RMSCHANGENODEPRIORITY];
++ status = disp_node_change_priority(hnode_mgr->disp_obj, hnode,
++ ul_fxn_addr, hnode->node_env,
++ NODE_GET_PRIORITY(hnode));
++ } else {
++ /* We should never get here */
++ DBC_ASSERT(false);
++ }
++func_cont1:
++ /* Update node state. */
++ if (status >= 0)
++ NODE_SET_STATE(hnode, NODE_RUNNING);
++ else /* Set state back to previous value */
++ NODE_SET_STATE(hnode, state);
++ /*End of sync_enter_cs */
++ /* Exit critical section */
++ mutex_unlock(&hnode_mgr->node_mgr_lock);
++ if (status >= 0) {
++ proc_notify_clients(hnode->hprocessor, DSP_NODESTATECHANGE);
++ ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
++ }
++func_end:
++ dev_dbg(bridge, "%s: hnode: %p status 0x%x\n", __func__, hnode, status);
++ return status;
++}
++
++/*
++ * ======== node_terminate ========
++ * Purpose:
++ * Signal a node running on the DSP that it should exit its execute phase
++ * function.
++ */
++int node_terminate(struct node_object *hnode, int *pstatus)
++{
++ struct node_object *pnode = (struct node_object *)hnode;
++ struct node_mgr *hnode_mgr = NULL;
++ enum node_type node_type;
++ struct bridge_drv_interface *intf_fxns;
++ enum node_state state;
++ struct dsp_msg msg, killmsg;
++ int status = 0;
++ u32 proc_id, kill_time_out;
++ struct deh_mgr *hdeh_mgr;
++ struct dsp_processorstate proc_state;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(pstatus != NULL);
++
++ if (!hnode || !hnode->hnode_mgr) {
++ status = -EFAULT;
++ goto func_end;
++ }
++ if (pnode->hprocessor == NULL) {
++ status = -EFAULT;
++ goto func_end;
++ }
++ status = proc_get_processor_id(pnode->hprocessor, &proc_id);
++
++ if (!status) {
++ hnode_mgr = hnode->hnode_mgr;
++ node_type = node_get_type(hnode);
++ if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET)
++ status = -EPERM;
++ }
++ if (!status) {
++ /* Check node state */
++ mutex_lock(&hnode_mgr->node_mgr_lock);
++ state = node_get_state(hnode);
++ if (state != NODE_RUNNING) {
++ status = -EBADR;
++ /* Set the exit status if node terminated on
++ * its own. */
++ if (state == NODE_DONE)
++ *pstatus = hnode->exit_status;
++
++ } else {
++ NODE_SET_STATE(hnode, NODE_TERMINATING);
++ }
++ /* end of sync_enter_cs */
++ mutex_unlock(&hnode_mgr->node_mgr_lock);
++ }
++ if (!status) {
++ /*
++ * Send exit message. Do not change state to NODE_DONE
++ * here. That will be done in callback.
++ */
++ status = proc_get_state(pnode->hprocessor, &proc_state,
++ sizeof(struct dsp_processorstate));
++ if (status)
++ goto func_cont;
++ /* If processor is in error state then don't attempt to send
++ * A kill task command */
++ if (proc_state.proc_state == PROC_ERROR) {
++ status = -EPERM;
++ goto func_cont;
++ }
++
++ msg.dw_cmd = RMS_EXIT;
++ msg.dw_arg1 = hnode->node_env;
++ killmsg.dw_cmd = RMS_KILLTASK;
++ killmsg.dw_arg1 = hnode->node_env;
++ intf_fxns = hnode_mgr->intf_fxns;
++
++ if (hnode->utimeout > MAXTIMEOUT)
++ kill_time_out = MAXTIMEOUT;
++ else
++ kill_time_out = (hnode->utimeout) * 2;
++
++ status = (*intf_fxns->pfn_msg_put) (hnode->msg_queue_obj, &msg,
++ hnode->utimeout);
++ if (status)
++ goto func_cont;
++
++ /*
++ * Wait on synchronization object that will be
++ * posted in the callback on receiving RMS_EXIT
++ * message, or by node_delete. Check for valid hnode,
++ * in case posted by node_delete().
++ */
++ status = sync_wait_on_event(hnode->sync_done,
++ kill_time_out / 2);
++ if (status != ETIME)
++ goto func_cont;
++
++ status = (*intf_fxns->pfn_msg_put)(hnode->msg_queue_obj,
++ &killmsg, hnode->utimeout);
++ if (status)
++ goto func_cont;
++ status = sync_wait_on_event(hnode->sync_done,
++ kill_time_out / 2);
++ if (status) {
++ /*
++ * Here it goes the part of the simulation of
++ * the DSP exception.
++ */
++ dev_get_deh_mgr(hnode_mgr->hdev_obj, &hdeh_mgr);
++ if (!hdeh_mgr)
++ goto func_cont;
++
++ bridge_deh_notify(hdeh_mgr, DSP_SYSERROR, DSP_EXCEPTIONABORT);
++ }
++ }
++func_cont:
++ if (!status) {
++ /* Enter CS before getting exit status, in case node was
++ * deleted. */
++ mutex_lock(&hnode_mgr->node_mgr_lock);
++ /* Make sure node wasn't deleted while we blocked */
++ if (!hnode) {
++ status = -EPERM;
++ } else {
++ *pstatus = hnode->exit_status;
++ dev_dbg(bridge, "%s: hnode: %p env 0x%x status 0x%x\n",
++ __func__, hnode, hnode->node_env, status);
++ }
++ mutex_unlock(&hnode_mgr->node_mgr_lock);
++ } /*End of sync_enter_cs */
++func_end:
++ return status;
++}
++
++/*
++ * ======== delete_node ========
++ * Purpose:
++ * Free GPP resources allocated in node_allocate() or node_connect().
++ */
++static void delete_node(struct node_object *hnode,
++ struct process_context *pr_ctxt)
++{
++ struct node_mgr *hnode_mgr;
++ struct cmm_xlatorobject *xlator;
++ struct bridge_drv_interface *intf_fxns;
++ u32 i;
++ enum node_type node_type;
++ struct stream_chnl stream;
++ struct node_msgargs node_msg_args;
++ struct node_taskargs task_arg_obj;
++#ifdef DSP_DMM_DEBUG
++ struct dmm_object *dmm_mgr;
++ struct proc_object *p_proc_object =
++ (struct proc_object *)hnode->hprocessor;
++#endif
++ int status;
++ if (!hnode)
++ goto func_end;
++ hnode_mgr = hnode->hnode_mgr;
++ if (!hnode_mgr)
++ goto func_end;
++ xlator = hnode->xlator;
++ node_type = node_get_type(hnode);
++ if (node_type != NODE_DEVICE) {
++ node_msg_args = hnode->create_args.asa.node_msg_args;
++ kfree(node_msg_args.pdata);
++
++ /* Free msg_ctrl queue */
++ if (hnode->msg_queue_obj) {
++ intf_fxns = hnode_mgr->intf_fxns;
++ (*intf_fxns->pfn_msg_delete_queue) (hnode->
++ msg_queue_obj);
++ hnode->msg_queue_obj = NULL;
++ }
++
++ kfree(hnode->sync_done);
++
++ /* Free all stream info */
++ if (hnode->inputs) {
++ for (i = 0; i < MAX_INPUTS(hnode); i++) {
++ stream = hnode->inputs[i];
++ free_stream(hnode_mgr, stream);
++ }
++ kfree(hnode->inputs);
++ hnode->inputs = NULL;
++ }
++ if (hnode->outputs) {
++ for (i = 0; i < MAX_OUTPUTS(hnode); i++) {
++ stream = hnode->outputs[i];
++ free_stream(hnode_mgr, stream);
++ }
++ kfree(hnode->outputs);
++ hnode->outputs = NULL;
++ }
++ task_arg_obj = hnode->create_args.asa.task_arg_obj;
++ if (task_arg_obj.strm_in_def) {
++ for (i = 0; i < MAX_INPUTS(hnode); i++) {
++ kfree(task_arg_obj.strm_in_def[i].sz_device);
++ task_arg_obj.strm_in_def[i].sz_device = NULL;
++ }
++ kfree(task_arg_obj.strm_in_def);
++ task_arg_obj.strm_in_def = NULL;
++ }
++ if (task_arg_obj.strm_out_def) {
++ for (i = 0; i < MAX_OUTPUTS(hnode); i++) {
++ kfree(task_arg_obj.strm_out_def[i].sz_device);
++ task_arg_obj.strm_out_def[i].sz_device = NULL;
++ }
++ kfree(task_arg_obj.strm_out_def);
++ task_arg_obj.strm_out_def = NULL;
++ }
++ if (task_arg_obj.udsp_heap_res_addr) {
++ status = proc_un_map(hnode->hprocessor, (void *)
++ task_arg_obj.udsp_heap_addr,
++ pr_ctxt);
++
++ status = proc_un_reserve_memory(hnode->hprocessor,
++ (void *)
++ task_arg_obj.
++ udsp_heap_res_addr,
++ pr_ctxt);
++#ifdef DSP_DMM_DEBUG
++ status = dmm_get_handle(p_proc_object, &dmm_mgr);
++ if (dmm_mgr)
++ dmm_mem_map_dump(dmm_mgr);
++ else
++ status = DSP_EHANDLE;
++#endif
++ }
++ }
++ if (node_type != NODE_MESSAGE) {
++ kfree(hnode->stream_connect);
++ hnode->stream_connect = NULL;
++ }
++ kfree(hnode->pstr_dev_name);
++ hnode->pstr_dev_name = NULL;
++
++ if (hnode->ntfy_obj) {
++ ntfy_delete(hnode->ntfy_obj);
++ kfree(hnode->ntfy_obj);
++ hnode->ntfy_obj = NULL;
++ }
++
++ /* These were allocated in dcd_get_object_def (via node_allocate) */
++ kfree(hnode->dcd_props.obj_data.node_obj.pstr_create_phase_fxn);
++ hnode->dcd_props.obj_data.node_obj.pstr_create_phase_fxn = NULL;
++
++ kfree(hnode->dcd_props.obj_data.node_obj.pstr_execute_phase_fxn);
++ hnode->dcd_props.obj_data.node_obj.pstr_execute_phase_fxn = NULL;
++
++ kfree(hnode->dcd_props.obj_data.node_obj.pstr_delete_phase_fxn);
++ hnode->dcd_props.obj_data.node_obj.pstr_delete_phase_fxn = NULL;
++
++ kfree(hnode->dcd_props.obj_data.node_obj.pstr_i_alg_name);
++ hnode->dcd_props.obj_data.node_obj.pstr_i_alg_name = NULL;
++
++ /* Free all SM address translator resources */
++ if (xlator) {
++ (void)cmm_xlator_delete(xlator, true); /* force free */
++ xlator = NULL;
++ }
++
++ kfree(hnode->nldr_node_obj);
++ hnode->nldr_node_obj = NULL;
++ hnode->hnode_mgr = NULL;
++ kfree(hnode);
++ hnode = NULL;
++func_end:
++ return;
++}
++
++/*
++ * ======== delete_node_mgr ========
++ * Purpose:
++ * Frees the node manager.
++ */
++static void delete_node_mgr(struct node_mgr *hnode_mgr)
++{
++ struct node_object *hnode;
++
++ if (hnode_mgr) {
++ /* Free resources */
++ if (hnode_mgr->hdcd_mgr)
++ dcd_destroy_manager(hnode_mgr->hdcd_mgr);
++
++ /* Remove any elements remaining in lists */
++ if (hnode_mgr->node_list) {
++ while ((hnode = (struct node_object *)
++ lst_get_head(hnode_mgr->node_list)))
++ delete_node(hnode, NULL);
++
++ DBC_ASSERT(LST_IS_EMPTY(hnode_mgr->node_list));
++ kfree(hnode_mgr->node_list);
++ }
++ mutex_destroy(&hnode_mgr->node_mgr_lock);
++ if (hnode_mgr->ntfy_obj) {
++ ntfy_delete(hnode_mgr->ntfy_obj);
++ kfree(hnode_mgr->ntfy_obj);
++ }
++
++ if (hnode_mgr->pipe_map)
++ gb_delete(hnode_mgr->pipe_map);
++
++ if (hnode_mgr->pipe_done_map)
++ gb_delete(hnode_mgr->pipe_done_map);
++
++ if (hnode_mgr->chnl_map)
++ gb_delete(hnode_mgr->chnl_map);
++
++ if (hnode_mgr->dma_chnl_map)
++ gb_delete(hnode_mgr->dma_chnl_map);
++
++ if (hnode_mgr->zc_chnl_map)
++ gb_delete(hnode_mgr->zc_chnl_map);
++
++ if (hnode_mgr->disp_obj)
++ disp_delete(hnode_mgr->disp_obj);
++
++ if (hnode_mgr->strm_mgr_obj)
++ strm_delete(hnode_mgr->strm_mgr_obj);
++
++ /* Delete the loader */
++ if (hnode_mgr->nldr_obj)
++ hnode_mgr->nldr_fxns.pfn_delete(hnode_mgr->nldr_obj);
++
++ if (hnode_mgr->loader_init)
++ hnode_mgr->nldr_fxns.pfn_exit();
++
++ kfree(hnode_mgr);
++ }
++}
++
++/*
++ * ======== fill_stream_connect ========
++ * Purpose:
++ * Fills stream information.
++ */
++static void fill_stream_connect(struct node_object *node1,
++ struct node_object *node2,
++ u32 stream1, u32 stream2)
++{
++ u32 strm_index;
++ struct dsp_streamconnect *strm1 = NULL;
++ struct dsp_streamconnect *strm2 = NULL;
++ enum node_type node1_type = NODE_TASK;
++ enum node_type node2_type = NODE_TASK;
++
++ node1_type = node_get_type(node1);
++ node2_type = node_get_type(node2);
++ if (node1 != (struct node_object *)DSP_HGPPNODE) {
++
++ if (node1_type != NODE_DEVICE) {
++ strm_index = node1->num_inputs +
++ node1->num_outputs - 1;
++ strm1 = &(node1->stream_connect[strm_index]);
++ strm1->cb_struct = sizeof(struct dsp_streamconnect);
++ strm1->this_node_stream_index = stream1;
++ }
++
++ if (node2 != (struct node_object *)DSP_HGPPNODE) {
++ /* NODE == > NODE */
++ if (node1_type != NODE_DEVICE) {
++ strm1->connected_node = node2;
++ strm1->ui_connected_node_id = node2->node_uuid;
++ strm1->connected_node_stream_index = stream2;
++ strm1->connect_type = CONNECTTYPE_NODEOUTPUT;
++ }
++ if (node2_type != NODE_DEVICE) {
++ strm_index = node2->num_inputs +
++ node2->num_outputs - 1;
++ strm2 = &(node2->stream_connect[strm_index]);
++ strm2->cb_struct =
++ sizeof(struct dsp_streamconnect);
++ strm2->this_node_stream_index = stream2;
++ strm2->connected_node = node1;
++ strm2->ui_connected_node_id = node1->node_uuid;
++ strm2->connected_node_stream_index = stream1;
++ strm2->connect_type = CONNECTTYPE_NODEINPUT;
++ }
++ } else if (node1_type != NODE_DEVICE)
++ strm1->connect_type = CONNECTTYPE_GPPOUTPUT;
++ } else {
++ /* GPP == > NODE */
++ DBC_ASSERT(node2 != (struct node_object *)DSP_HGPPNODE);
++ strm_index = node2->num_inputs + node2->num_outputs - 1;
++ strm2 = &(node2->stream_connect[strm_index]);
++ strm2->cb_struct = sizeof(struct dsp_streamconnect);
++ strm2->this_node_stream_index = stream2;
++ strm2->connect_type = CONNECTTYPE_GPPINPUT;
++ }
++}
++
++/*
++ * ======== fill_stream_def ========
++ * Purpose:
++ * Fills Stream attributes.
++ */
++static void fill_stream_def(struct node_object *hnode,
++ struct node_strmdef *pstrm_def,
++ struct dsp_strmattr *pattrs)
++{
++ struct node_mgr *hnode_mgr = hnode->hnode_mgr;
++
++ if (pattrs != NULL) {
++ pstrm_def->num_bufs = pattrs->num_bufs;
++ pstrm_def->buf_size =
++ pattrs->buf_size / hnode_mgr->udsp_data_mau_size;
++ pstrm_def->seg_id = pattrs->seg_id;
++ pstrm_def->buf_alignment = pattrs->buf_alignment;
++ pstrm_def->utimeout = pattrs->utimeout;
++ } else {
++ pstrm_def->num_bufs = DEFAULTNBUFS;
++ pstrm_def->buf_size =
++ DEFAULTBUFSIZE / hnode_mgr->udsp_data_mau_size;
++ pstrm_def->seg_id = DEFAULTSEGID;
++ pstrm_def->buf_alignment = DEFAULTALIGNMENT;
++ pstrm_def->utimeout = DEFAULTTIMEOUT;
++ }
++}
++
++/*
++ * ======== free_stream ========
++ * Purpose:
++ * Updates the channel mask and frees the pipe id.
++ */
++static void free_stream(struct node_mgr *hnode_mgr, struct stream_chnl stream)
++{
++ /* Free up the pipe id unless other node has not yet been deleted. */
++ if (stream.type == NODECONNECT) {
++ if (gb_test(hnode_mgr->pipe_done_map, stream.dev_id)) {
++ /* The other node has already been deleted */
++ gb_clear(hnode_mgr->pipe_done_map, stream.dev_id);
++ gb_clear(hnode_mgr->pipe_map, stream.dev_id);
++ } else {
++ /* The other node has not been deleted yet */
++ gb_set(hnode_mgr->pipe_done_map, stream.dev_id);
++ }
++ } else if (stream.type == HOSTCONNECT) {
++ if (stream.dev_id < hnode_mgr->ul_num_chnls) {
++ gb_clear(hnode_mgr->chnl_map, stream.dev_id);
++ } else if (stream.dev_id < (2 * hnode_mgr->ul_num_chnls)) {
++ /* dsp-dma */
++ gb_clear(hnode_mgr->dma_chnl_map, stream.dev_id -
++ (1 * hnode_mgr->ul_num_chnls));
++ } else if (stream.dev_id < (3 * hnode_mgr->ul_num_chnls)) {
++ /* zero-copy */
++ gb_clear(hnode_mgr->zc_chnl_map, stream.dev_id -
++ (2 * hnode_mgr->ul_num_chnls));
++ }
++ }
++}
++
++/*
++ * ======== get_fxn_address ========
++ * Purpose:
++ * Retrieves the address for create, execute or delete phase for a node.
++ */
++static int get_fxn_address(struct node_object *hnode, u32 * fxn_addr,
++ u32 phase)
++{
++ char *pstr_fxn_name = NULL;
++ struct node_mgr *hnode_mgr = hnode->hnode_mgr;
++ int status = 0;
++ DBC_REQUIRE(node_get_type(hnode) == NODE_TASK ||
++ node_get_type(hnode) == NODE_DAISSOCKET ||
++ node_get_type(hnode) == NODE_MESSAGE);
++
++ switch (phase) {
++ case CREATEPHASE:
++ pstr_fxn_name =
++ hnode->dcd_props.obj_data.node_obj.pstr_create_phase_fxn;
++ break;
++ case EXECUTEPHASE:
++ pstr_fxn_name =
++ hnode->dcd_props.obj_data.node_obj.pstr_execute_phase_fxn;
++ break;
++ case DELETEPHASE:
++ pstr_fxn_name =
++ hnode->dcd_props.obj_data.node_obj.pstr_delete_phase_fxn;
++ break;
++ default:
++ /* Should never get here */
++ DBC_ASSERT(false);
++ break;
++ }
++
++ status =
++ hnode_mgr->nldr_fxns.pfn_get_fxn_addr(hnode->nldr_node_obj,
++ pstr_fxn_name, fxn_addr);
++
++ return status;
++}
++
++/*
++ * ======== get_node_info ========
++ * Purpose:
++ * Retrieves the node information.
++ */
++void get_node_info(struct node_object *hnode, struct dsp_nodeinfo *node_info)
++{
++ u32 i;
++
++ DBC_REQUIRE(hnode);
++ DBC_REQUIRE(node_info != NULL);
++
++ node_info->cb_struct = sizeof(struct dsp_nodeinfo);
++ node_info->nb_node_database_props =
++ hnode->dcd_props.obj_data.node_obj.ndb_props;
++ node_info->execution_priority = hnode->prio;
++ node_info->device_owner = hnode->device_owner;
++ node_info->number_streams = hnode->num_inputs + hnode->num_outputs;
++ node_info->node_env = hnode->node_env;
++
++ node_info->ns_execution_state = node_get_state(hnode);
++
++ /* Copy stream connect data */
++ for (i = 0; i < hnode->num_inputs + hnode->num_outputs; i++)
++ node_info->sc_stream_connection[i] = hnode->stream_connect[i];
++
++}
++
++/*
++ * ======== get_node_props ========
++ * Purpose:
++ * Retrieve node properties.
++ */
++static int get_node_props(struct dcd_manager *hdcd_mgr,
++ struct node_object *hnode,
++ const struct dsp_uuid *node_uuid,
++ struct dcd_genericobj *dcd_prop)
++{
++ u32 len;
++ struct node_msgargs *pmsg_args;
++ struct node_taskargs *task_arg_obj;
++ enum node_type node_type = NODE_TASK;
++ struct dsp_ndbprops *pndb_props =
++ &(dcd_prop->obj_data.node_obj.ndb_props);
++ int status = 0;
++ char sz_uuid[MAXUUIDLEN];
++
++ status = dcd_get_object_def(hdcd_mgr, (struct dsp_uuid *)node_uuid,
++ DSP_DCDNODETYPE, dcd_prop);
++
++ if (!status) {
++ hnode->ntype = node_type = pndb_props->ntype;
++
++ /* Create UUID value to set in registry. */
++ uuid_uuid_to_string((struct dsp_uuid *)node_uuid, sz_uuid,
++ MAXUUIDLEN);
++ dev_dbg(bridge, "(node) UUID: %s\n", sz_uuid);
++
++ /* Fill in message args that come from NDB */
++ if (node_type != NODE_DEVICE) {
++ pmsg_args = &(hnode->create_args.asa.node_msg_args);
++ pmsg_args->seg_id =
++ dcd_prop->obj_data.node_obj.msg_segid;
++ pmsg_args->notify_type =
++ dcd_prop->obj_data.node_obj.msg_notify_type;
++ pmsg_args->max_msgs = pndb_props->message_depth;
++ dev_dbg(bridge, "(node) Max Number of Messages: 0x%x\n",
++ pmsg_args->max_msgs);
++ } else {
++ /* Copy device name */
++ DBC_REQUIRE(pndb_props->ac_name);
++ len = strlen(pndb_props->ac_name);
++ DBC_ASSERT(len < MAXDEVNAMELEN);
++ hnode->pstr_dev_name = kzalloc(len + 1, GFP_KERNEL);
++ if (hnode->pstr_dev_name == NULL) {
++ status = -ENOMEM;
++ } else {
++ strncpy(hnode->pstr_dev_name,
++ pndb_props->ac_name, len);
++ }
++ }
++ }
++ if (!status) {
++ /* Fill in create args that come from NDB */
++ if (node_type == NODE_TASK || node_type == NODE_DAISSOCKET) {
++ task_arg_obj = &(hnode->create_args.asa.task_arg_obj);
++ task_arg_obj->prio = pndb_props->prio;
++ task_arg_obj->stack_size = pndb_props->stack_size;
++ task_arg_obj->sys_stack_size =
++ pndb_props->sys_stack_size;
++ task_arg_obj->stack_seg = pndb_props->stack_seg;
++ dev_dbg(bridge, "(node) Priority: 0x%x Stack Size: "
++ "0x%x words System Stack Size: 0x%x words "
++ "Stack Segment: 0x%x profile count : 0x%x\n",
++ task_arg_obj->prio, task_arg_obj->stack_size,
++ task_arg_obj->sys_stack_size,
++ task_arg_obj->stack_seg,
++ pndb_props->count_profiles);
++ }
++ }
++
++ return status;
++}
++
++/*
++ * ======== get_proc_props ========
++ * Purpose:
++ * Retrieve the processor properties.
++ */
++static int get_proc_props(struct node_mgr *hnode_mgr,
++ struct dev_object *hdev_obj)
++{
++ struct cfg_hostres *host_res;
++ struct bridge_dev_context *pbridge_context;
++ int status = 0;
++
++ status = dev_get_bridge_context(hdev_obj, &pbridge_context);
++ if (!pbridge_context)
++ status = -EFAULT;
++
++ if (!status) {
++ host_res = pbridge_context->resources;
++ if (!host_res)
++ return -EPERM;
++ hnode_mgr->ul_chnl_offset = host_res->dw_chnl_offset;
++ hnode_mgr->ul_chnl_buf_size = host_res->dw_chnl_buf_size;
++ hnode_mgr->ul_num_chnls = host_res->dw_num_chnls;
++
++ /*
++ * PROC will add an API to get dsp_processorinfo.
++ * Fill in default values for now.
++ */
++ /* TODO -- Instead of hard coding, take from registry */
++ hnode_mgr->proc_family = 6000;
++ hnode_mgr->proc_type = 6410;
++ hnode_mgr->min_pri = DSP_NODE_MIN_PRIORITY;
++ hnode_mgr->max_pri = DSP_NODE_MAX_PRIORITY;
++ hnode_mgr->udsp_word_size = DSPWORDSIZE;
++ hnode_mgr->udsp_data_mau_size = DSPWORDSIZE;
++ hnode_mgr->udsp_mau_size = 1;
++
++ }
++ return status;
++}
++
++/*
++ * ======== node_get_uuid_props ========
++ * Purpose:
++ * Fetch Node UUID properties from DCD/DOF file.
++ */
++int node_get_uuid_props(void *hprocessor,
++ const struct dsp_uuid *node_uuid,
++ struct dsp_ndbprops *node_props)
++{
++ struct node_mgr *hnode_mgr = NULL;
++ struct dev_object *hdev_obj;
++ int status = 0;
++ struct dcd_nodeprops dcd_node_props;
++ struct dsp_processorstate proc_state;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(hprocessor != NULL);
++ DBC_REQUIRE(node_uuid != NULL);
++
++ if (hprocessor == NULL || node_uuid == NULL) {
++ status = -EFAULT;
++ goto func_end;
++ }
++ status = proc_get_state(hprocessor, &proc_state,
++ sizeof(struct dsp_processorstate));
++ if (status)
++ goto func_end;
++ /* If processor is in error state then don't attempt
++ to send the message */
++ if (proc_state.proc_state == PROC_ERROR) {
++ status = -EPERM;
++ goto func_end;
++ }
++
++ status = proc_get_dev_object(hprocessor, &hdev_obj);
++ if (hdev_obj) {
++ status = dev_get_node_manager(hdev_obj, &hnode_mgr);
++ if (hnode_mgr == NULL) {
++ status = -EFAULT;
++ goto func_end;
++ }
++ }
++
++ /*
++ * Enter the critical section. This is needed because
++ * dcd_get_object_def will ultimately end up calling dbll_open/close,
++ * which needs to be protected in order to not corrupt the zlib manager
++ * (COD).
++ */
++ mutex_lock(&hnode_mgr->node_mgr_lock);
++
++ dcd_node_props.pstr_create_phase_fxn = NULL;
++ dcd_node_props.pstr_execute_phase_fxn = NULL;
++ dcd_node_props.pstr_delete_phase_fxn = NULL;
++ dcd_node_props.pstr_i_alg_name = NULL;
++
++ status = dcd_get_object_def(hnode_mgr->hdcd_mgr,
++ (struct dsp_uuid *)node_uuid, DSP_DCDNODETYPE,
++ (struct dcd_genericobj *)&dcd_node_props);
++
++ if (!status) {
++ *node_props = dcd_node_props.ndb_props;
++ kfree(dcd_node_props.pstr_create_phase_fxn);
++
++ kfree(dcd_node_props.pstr_execute_phase_fxn);
++
++ kfree(dcd_node_props.pstr_delete_phase_fxn);
++
++ kfree(dcd_node_props.pstr_i_alg_name);
++ }
++ /* Leave the critical section, we're done. */
++ mutex_unlock(&hnode_mgr->node_mgr_lock);
++func_end:
++ return status;
++}
++
++/*
++ * ======== get_rms_fxns ========
++ * Purpose:
++ * Retrieve the RMS functions.
++ */
++static int get_rms_fxns(struct node_mgr *hnode_mgr)
++{
++ s32 i;
++ struct dev_object *dev_obj = hnode_mgr->hdev_obj;
++ int status = 0;
++
++ static char *psz_fxns[NUMRMSFXNS] = {
++ "RMS_queryServer", /* RMSQUERYSERVER */
++ "RMS_configureServer", /* RMSCONFIGURESERVER */
++ "RMS_createNode", /* RMSCREATENODE */
++ "RMS_executeNode", /* RMSEXECUTENODE */
++ "RMS_deleteNode", /* RMSDELETENODE */
++ "RMS_changeNodePriority", /* RMSCHANGENODEPRIORITY */
++ "RMS_readMemory", /* RMSREADMEMORY */
++ "RMS_writeMemory", /* RMSWRITEMEMORY */
++ "RMS_copy", /* RMSCOPY */
++ };
++
++ for (i = 0; i < NUMRMSFXNS; i++) {
++ status = dev_get_symbol(dev_obj, psz_fxns[i],
++ &(hnode_mgr->ul_fxn_addrs[i]));
++ if (status) {
++ if (status == -ESPIPE) {
++ /*
++ * May be loaded dynamically (in the future),
++ * but return an error for now.
++ */
++ dev_dbg(bridge, "%s: RMS function: %s currently"
++ " not loaded\n", __func__, psz_fxns[i]);
++ } else {
++ dev_dbg(bridge, "%s: Symbol not found: %s "
++ "status = 0x%x\n", __func__,
++ psz_fxns[i], status);
++ break;
++ }
++ }
++ }
++
++ return status;
++}
++
++/*
++ * ======== ovly ========
++ * Purpose:
++ * Called during overlay.Sends command to RMS to copy a block of data.
++ */
++static u32 ovly(void *priv_ref, u32 dsp_run_addr, u32 dsp_load_addr,
++ u32 ul_num_bytes, u32 mem_space)
++{
++ struct node_object *hnode = (struct node_object *)priv_ref;
++ struct node_mgr *hnode_mgr;
++ u32 ul_bytes = 0;
++ u32 ul_size;
++ u32 ul_timeout;
++ int status = 0;
++ struct bridge_dev_context *hbridge_context;
++ /* Function interface to Bridge driver*/
++ struct bridge_drv_interface *intf_fxns;
++
++ DBC_REQUIRE(hnode);
++
++ hnode_mgr = hnode->hnode_mgr;
++
++ ul_size = ul_num_bytes / hnode_mgr->udsp_word_size;
++ ul_timeout = hnode->utimeout;
++
++ /* Call new MemCopy function */
++ intf_fxns = hnode_mgr->intf_fxns;
++ status = dev_get_bridge_context(hnode_mgr->hdev_obj, &hbridge_context);
++ if (!status) {
++ status =
++ (*intf_fxns->pfn_brd_mem_copy) (hbridge_context,
++ dsp_run_addr, dsp_load_addr,
++ ul_num_bytes, (u32) mem_space);
++ if (!status)
++ ul_bytes = ul_num_bytes;
++ else
++ pr_debug("%s: failed to copy brd memory, status 0x%x\n",
++ __func__, status);
++ } else {
++ pr_debug("%s: failed to get Bridge context, status 0x%x\n",
++ __func__, status);
++ }
++
++ return ul_bytes;
++}
++
++/*
++ * ======== mem_write ========
++ */
++static u32 mem_write(void *priv_ref, u32 dsp_add, void *pbuf,
++ u32 ul_num_bytes, u32 mem_space)
++{
++ struct node_object *hnode = (struct node_object *)priv_ref;
++ struct node_mgr *hnode_mgr;
++ u16 mem_sect_type;
++ u32 ul_timeout;
++ int status = 0;
++ struct bridge_dev_context *hbridge_context;
++ /* Function interface to Bridge driver */
++ struct bridge_drv_interface *intf_fxns;
++
++ DBC_REQUIRE(hnode);
++ DBC_REQUIRE(mem_space & DBLL_CODE || mem_space & DBLL_DATA);
++
++ hnode_mgr = hnode->hnode_mgr;
++
++ ul_timeout = hnode->utimeout;
++ mem_sect_type = (mem_space & DBLL_CODE) ? RMS_CODE : RMS_DATA;
++
++ /* Call new MemWrite function */
++ intf_fxns = hnode_mgr->intf_fxns;
++ status = dev_get_bridge_context(hnode_mgr->hdev_obj, &hbridge_context);
++ status = (*intf_fxns->pfn_brd_mem_write) (hbridge_context, pbuf,
++ dsp_add, ul_num_bytes, mem_sect_type);
++
++ return ul_num_bytes;
++}
++
++#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
++/*
++ * ======== node_find_addr ========
++ */
++int node_find_addr(struct node_mgr *node_mgr, u32 sym_addr,
++ u32 offset_range, void *sym_addr_output, char *sym_name)
++{
++ struct node_object *node_obj;
++ int status = -ENOENT;
++ u32 n;
++
++ pr_debug("%s(0x%x, 0x%x, 0x%x, 0x%x, %s)\n", __func__,
++ (unsigned int) node_mgr,
++ sym_addr, offset_range,
++ (unsigned int) sym_addr_output, sym_name);
++
++ node_obj = (struct node_object *)(node_mgr->node_list->head.next);
++
++ for (n = 0; n < node_mgr->num_nodes; n++) {
++ status = nldr_find_addr(node_obj->nldr_node_obj, sym_addr,
++ offset_range, sym_addr_output, sym_name);
++
++ if (!status)
++ break;
++
++ node_obj = (struct node_object *) (node_obj->list_elem.next);
++ }
++
++ return status;
++}
++#endif
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/rmgr/proc.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/rmgr/proc.c 2010-08-18 11:24:23.226051079 +0300
+@@ -0,0 +1,1936 @@
++/*
++ * proc.c
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Processor interface at the driver level.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#include <linux/types.h>
++/* ------------------------------------ Host OS */
++#include <linux/dma-mapping.h>
++#include <linux/scatterlist.h>
++#include <dspbridge/host_os.h>
++
++/* ----------------------------------- DSP/BIOS Bridge */
++#include <dspbridge/dbdefs.h>
++
++/* ----------------------------------- Trace & Debug */
++#include <dspbridge/dbc.h>
++
++/* ----------------------------------- OS Adaptation Layer */
++#include <dspbridge/cfg.h>
++#include <dspbridge/list.h>
++#include <dspbridge/ntfy.h>
++#include <dspbridge/sync.h>
++/* ----------------------------------- Bridge Driver */
++#include <dspbridge/dspdefs.h>
++#include <dspbridge/dspdeh.h>
++/* ----------------------------------- Platform Manager */
++#include <dspbridge/cod.h>
++#include <dspbridge/dev.h>
++#include <dspbridge/procpriv.h>
++#include <dspbridge/dmm.h>
++
++/* ----------------------------------- Resource Manager */
++#include <dspbridge/mgr.h>
++#include <dspbridge/node.h>
++#include <dspbridge/nldr.h>
++#include <dspbridge/rmm.h>
++
++/* ----------------------------------- Others */
++#include <dspbridge/dbdcd.h>
++#include <dspbridge/msg.h>
++#include <dspbridge/dspioctl.h>
++#include <dspbridge/drv.h>
++
++/* ----------------------------------- This */
++#include <dspbridge/proc.h>
++#include <dspbridge/pwr.h>
++
++#include <dspbridge/resourcecleanup.h>
++/* ----------------------------------- Defines, Data Structures, Typedefs */
++#define MAXCMDLINELEN 255
++#define PROC_ENVPROCID "PROC_ID=%d"
++#define MAXPROCIDLEN (8 + 5)
++#define PROC_DFLT_TIMEOUT 10000 /* Time out in milliseconds */
++#define PWR_TIMEOUT 500 /* Sleep/wake timout in msec */
++#define EXTEND "_EXT_END" /* Extmem end addr in DSP binary */
++
++#define DSP_CACHE_LINE 128
++
++#define BUFMODE_MASK (3 << 14)
++
++/* Buffer modes from DSP perspective */
++#define RBUF 0x4000 /* Input buffer */
++#define WBUF 0x8000 /* Output Buffer */
++
++extern struct device *bridge;
++
++/* ----------------------------------- Globals */
++
++/* The proc_object structure. */
++struct proc_object {
++ struct list_head link; /* Link to next proc_object */
++ struct dev_object *hdev_obj; /* Device this PROC represents */
++ u32 process; /* Process owning this Processor */
++ struct mgr_object *hmgr_obj; /* Manager Object Handle */
++ u32 attach_count; /* Processor attach count */
++ u32 processor_id; /* Processor number */
++ u32 utimeout; /* Time out count */
++ enum dsp_procstate proc_state; /* Processor state */
++ u32 ul_unit; /* DDSP unit number */
++ bool is_already_attached; /*
++ * True if the Device below has
++ * GPP Client attached
++ */
++ struct ntfy_object *ntfy_obj; /* Manages notifications */
++ /* Bridge Context Handle */
++ struct bridge_dev_context *hbridge_context;
++ /* Function interface to Bridge driver */
++ struct bridge_drv_interface *intf_fxns;
++ char *psz_last_coff;
++ struct list_head proc_list;
++};
++
++static u32 refs;
++
++DEFINE_MUTEX(proc_lock); /* For critical sections */
++
++/* ----------------------------------- Function Prototypes */
++static int proc_monitor(struct proc_object *proc_obj);
++static s32 get_envp_count(char **envp);
++static char **prepend_envp(char **new_envp, char **envp, s32 envp_elems,
++ s32 cnew_envp, char *sz_var);
++
++/* remember mapping information */
++static struct dmm_map_object *add_mapping_info(struct process_context *pr_ctxt,
++ u32 mpu_addr, u32 dsp_addr, u32 size)
++{
++ struct dmm_map_object *map_obj;
++
++ u32 num_usr_pgs = size / PG_SIZE4K;
++
++ pr_debug("%s: adding map info: mpu_addr 0x%x virt 0x%x size 0x%x\n",
++ __func__, mpu_addr,
++ dsp_addr, size);
++
++ map_obj = kzalloc(sizeof(struct dmm_map_object), GFP_KERNEL);
++ if (!map_obj) {
++ pr_err("%s: kzalloc failed\n", __func__);
++ return NULL;
++ }
++ INIT_LIST_HEAD(&map_obj->link);
++
++ map_obj->pages = kcalloc(num_usr_pgs, sizeof(struct page *),
++ GFP_KERNEL);
++ if (!map_obj->pages) {
++ pr_err("%s: kzalloc failed\n", __func__);
++ kfree(map_obj);
++ return NULL;
++ }
++
++ map_obj->mpu_addr = mpu_addr;
++ map_obj->dsp_addr = dsp_addr;
++ map_obj->size = size;
++ map_obj->num_usr_pgs = num_usr_pgs;
++
++ spin_lock(&pr_ctxt->dmm_map_lock);
++ list_add(&map_obj->link, &pr_ctxt->dmm_map_list);
++ spin_unlock(&pr_ctxt->dmm_map_lock);
++
++ return map_obj;
++}
++
++static int match_exact_map_obj(struct dmm_map_object *map_obj,
++ u32 dsp_addr, u32 size)
++{
++ if (map_obj->dsp_addr == dsp_addr && map_obj->size != size)
++ pr_err("%s: addr match (0x%x), size don't (0x%x != 0x%x)\n",
++ __func__, dsp_addr, map_obj->size, size);
++
++ return map_obj->dsp_addr == dsp_addr &&
++ map_obj->size == size;
++}
++
++static void remove_mapping_information(struct process_context *pr_ctxt,
++ u32 dsp_addr, u32 size)
++{
++ struct dmm_map_object *map_obj;
++
++ pr_debug("%s: looking for virt 0x%x size 0x%x\n", __func__,
++ dsp_addr, size);
++
++ spin_lock(&pr_ctxt->dmm_map_lock);
++ list_for_each_entry(map_obj, &pr_ctxt->dmm_map_list, link) {
++ pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x size 0x%x\n",
++ __func__,
++ map_obj->mpu_addr,
++ map_obj->dsp_addr,
++ map_obj->size);
++
++ if (match_exact_map_obj(map_obj, dsp_addr, size)) {
++ pr_debug("%s: match, deleting map info\n", __func__);
++ list_del(&map_obj->link);
++ kfree(map_obj->dma_info.sg);
++ kfree(map_obj->pages);
++ kfree(map_obj);
++ goto out;
++ }
++ pr_debug("%s: candidate didn't match\n", __func__);
++ }
++
++ pr_err("%s: failed to find given map info\n", __func__);
++out:
++ spin_unlock(&pr_ctxt->dmm_map_lock);
++}
++
++static int match_containing_map_obj(struct dmm_map_object *map_obj,
++ u32 mpu_addr, u32 size)
++{
++ u32 map_obj_end = map_obj->mpu_addr + map_obj->size;
++
++ return mpu_addr >= map_obj->mpu_addr &&
++ mpu_addr + size <= map_obj_end;
++}
++
++static struct dmm_map_object *find_containing_mapping(
++ struct process_context *pr_ctxt,
++ u32 mpu_addr, u32 size)
++{
++ struct dmm_map_object *map_obj;
++ pr_debug("%s: looking for mpu_addr 0x%x size 0x%x\n", __func__,
++ mpu_addr, size);
++
++ spin_lock(&pr_ctxt->dmm_map_lock);
++ list_for_each_entry(map_obj, &pr_ctxt->dmm_map_list, link) {
++ pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x size 0x%x\n",
++ __func__,
++ map_obj->mpu_addr,
++ map_obj->dsp_addr,
++ map_obj->size);
++ if (match_containing_map_obj(map_obj, mpu_addr, size)) {
++ pr_debug("%s: match!\n", __func__);
++ goto out;
++ }
++
++ pr_debug("%s: no match!\n", __func__);
++ }
++
++ map_obj = NULL;
++out:
++ spin_unlock(&pr_ctxt->dmm_map_lock);
++ return map_obj;
++}
++
++static int find_first_page_in_cache(struct dmm_map_object *map_obj,
++ unsigned long mpu_addr)
++{
++ u32 mapped_base_page = map_obj->mpu_addr >> PAGE_SHIFT;
++ u32 requested_base_page = mpu_addr >> PAGE_SHIFT;
++ int pg_index = requested_base_page - mapped_base_page;
++
++ if (pg_index < 0 || pg_index >= map_obj->num_usr_pgs) {
++ pr_err("%s: failed (got %d)\n", __func__, pg_index);
++ return -1;
++ }
++
++ pr_debug("%s: first page is %d\n", __func__, pg_index);
++ return pg_index;
++}
++
++static inline struct page *get_mapping_page(struct dmm_map_object *map_obj,
++ int pg_i)
++{
++ pr_debug("%s: looking for pg_i %d, num_usr_pgs: %d\n", __func__,
++ pg_i, map_obj->num_usr_pgs);
++
++ if (pg_i < 0 || pg_i >= map_obj->num_usr_pgs) {
++ pr_err("%s: requested pg_i %d is out of mapped range\n",
++ __func__, pg_i);
++ return NULL;
++ }
++
++ return map_obj->pages[pg_i];
++}
++
++/*
++ * ======== proc_attach ========
++ * Purpose:
++ * Prepare for communication with a particular DSP processor, and return
++ * a handle to the processor object.
++ */
++int
++proc_attach(u32 processor_id,
++ const struct dsp_processorattrin *attr_in,
++ void **ph_processor, struct process_context *pr_ctxt)
++{
++ int status = 0;
++ struct dev_object *hdev_obj;
++ struct proc_object *p_proc_object = NULL;
++ struct mgr_object *hmgr_obj = NULL;
++ struct drv_object *hdrv_obj = NULL;
++ u8 dev_type;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(ph_processor != NULL);
++
++ if (pr_ctxt->hprocessor) {
++ *ph_processor = pr_ctxt->hprocessor;
++ return status;
++ }
++
++ /* Get the Driver and Manager Object Handles */
++ status = cfg_get_object((u32 *) &hdrv_obj, REG_DRV_OBJECT);
++ if (!status)
++ status = cfg_get_object((u32 *) &hmgr_obj, REG_MGR_OBJECT);
++
++ if (!status) {
++ /* Get the Device Object */
++ status = drv_get_dev_object(processor_id, hdrv_obj, &hdev_obj);
++ }
++ if (!status)
++ status = dev_get_dev_type(hdev_obj, &dev_type);
++
++ if (status)
++ goto func_end;
++
++ /* If we made it this far, create the Proceesor object: */
++ p_proc_object = kzalloc(sizeof(struct proc_object), GFP_KERNEL);
++ /* Fill out the Processor Object: */
++ if (p_proc_object == NULL) {
++ status = -ENOMEM;
++ goto func_end;
++ }
++ p_proc_object->hdev_obj = hdev_obj;
++ p_proc_object->hmgr_obj = hmgr_obj;
++ p_proc_object->processor_id = dev_type;
++ /* Store TGID instead of process handle */
++ p_proc_object->process = current->tgid;
++
++ INIT_LIST_HEAD(&p_proc_object->proc_list);
++
++ if (attr_in)
++ p_proc_object->utimeout = attr_in->utimeout;
++ else
++ p_proc_object->utimeout = PROC_DFLT_TIMEOUT;
++
++ status = dev_get_intf_fxns(hdev_obj, &p_proc_object->intf_fxns);
++ if (!status) {
++ status = dev_get_bridge_context(hdev_obj,
++ &p_proc_object->hbridge_context);
++ if (status)
++ kfree(p_proc_object);
++ } else
++ kfree(p_proc_object);
++
++ if (status)
++ goto func_end;
++
++ /* Create the Notification Object */
++ /* This is created with no event mask, no notify mask
++ * and no valid handle to the notification. They all get
++ * filled up when proc_register_notify is called */
++ p_proc_object->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
++ GFP_KERNEL);
++ if (p_proc_object->ntfy_obj)
++ ntfy_init(p_proc_object->ntfy_obj);
++ else
++ status = -ENOMEM;
++
++ if (!status) {
++ /* Insert the Processor Object into the DEV List.
++ * Return handle to this Processor Object:
++ * Find out if the Device is already attached to a
++ * Processor. If so, return AlreadyAttached status */
++ lst_init_elem(&p_proc_object->link);
++ status = dev_insert_proc_object(p_proc_object->hdev_obj,
++ (u32) p_proc_object,
++ &p_proc_object->
++ is_already_attached);
++ if (!status) {
++ if (p_proc_object->is_already_attached)
++ status = 0;
++ } else {
++ if (p_proc_object->ntfy_obj) {
++ ntfy_delete(p_proc_object->ntfy_obj);
++ kfree(p_proc_object->ntfy_obj);
++ }
++
++ kfree(p_proc_object);
++ }
++ if (!status) {
++ *ph_processor = (void *)p_proc_object;
++ pr_ctxt->hprocessor = *ph_processor;
++ (void)proc_notify_clients(p_proc_object,
++ DSP_PROCESSORATTACH);
++ }
++ } else {
++ /* Don't leak memory if status is failed */
++ kfree(p_proc_object);
++ }
++func_end:
++ DBC_ENSURE((status == -EPERM && *ph_processor == NULL) ||
++ (!status && p_proc_object) ||
++ (status == 0 && p_proc_object));
++
++ return status;
++}
++
++static int get_exec_file(struct cfg_devnode *dev_node_obj,
++ struct dev_object *hdev_obj,
++ u32 size, char *exec_file)
++{
++ u8 dev_type;
++ s32 len;
++
++ dev_get_dev_type(hdev_obj, (u8 *) &dev_type);
++ if (dev_type == DSP_UNIT) {
++ return cfg_get_exec_file(dev_node_obj, size, exec_file);
++ } else if (dev_type == IVA_UNIT) {
++ if (iva_img) {
++ len = strlen(iva_img);
++ strncpy(exec_file, iva_img, len + 1);
++ return 0;
++ }
++ }
++ return -ENOENT;
++}
++
++/*
++ * ======== proc_auto_start ======== =
++ * Purpose:
++ * A Particular device gets loaded with the default image
++ * if the AutoStart flag is set.
++ * Parameters:
++ * hdev_obj: Handle to the Device
++ * Returns:
++ * 0: On Successful Loading
++ * -EPERM General Failure
++ * Requires:
++ * hdev_obj != NULL
++ * Ensures:
++ */
++int proc_auto_start(struct cfg_devnode *dev_node_obj,
++ struct dev_object *hdev_obj)
++{
++ int status = -EPERM;
++ struct proc_object *p_proc_object;
++ char sz_exec_file[MAXCMDLINELEN];
++ char *argv[2];
++ struct mgr_object *hmgr_obj = NULL;
++ u8 dev_type;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(dev_node_obj != NULL);
++ DBC_REQUIRE(hdev_obj != NULL);
++
++ /* Create a Dummy PROC Object */
++ status = cfg_get_object((u32 *) &hmgr_obj, REG_MGR_OBJECT);
++ if (status)
++ goto func_end;
++
++ p_proc_object = kzalloc(sizeof(struct proc_object), GFP_KERNEL);
++ if (p_proc_object == NULL) {
++ status = -ENOMEM;
++ goto func_end;
++ }
++ p_proc_object->hdev_obj = hdev_obj;
++ p_proc_object->hmgr_obj = hmgr_obj;
++ status = dev_get_intf_fxns(hdev_obj, &p_proc_object->intf_fxns);
++ if (!status)
++ status = dev_get_bridge_context(hdev_obj,
++ &p_proc_object->hbridge_context);
++ if (status)
++ goto func_cont;
++
++ /* Stop the Device, put it into standby mode */
++ status = proc_stop(p_proc_object);
++
++ if (status)
++ goto func_cont;
++
++ /* Get the default executable for this board... */
++ dev_get_dev_type(hdev_obj, (u8 *) &dev_type);
++ p_proc_object->processor_id = dev_type;
++ status = get_exec_file(dev_node_obj, hdev_obj, sizeof(sz_exec_file),
++ sz_exec_file);
++ if (!status) {
++ argv[0] = sz_exec_file;
++ argv[1] = NULL;
++ /* ...and try to load it: */
++ status = proc_load(p_proc_object, 1, (const char **)argv, NULL);
++ if (!status)
++ status = proc_start(p_proc_object);
++ }
++ kfree(p_proc_object->psz_last_coff);
++ p_proc_object->psz_last_coff = NULL;
++func_cont:
++ kfree(p_proc_object);
++func_end:
++ return status;
++}
++
++/*
++ * ======== proc_ctrl ========
++ * Purpose:
++ * Pass control information to the GPP device driver managing the
++ * DSP processor.
++ *
++ * This will be an OEM-only function, and not part of the DSP/BIOS Bridge
++ * application developer's API.
++ * Call the bridge_dev_ctrl fxn with the Argument. This is a Synchronous
++ * Operation. arg can be null.
++ */
++int proc_ctrl(void *hprocessor, u32 dw_cmd, struct dsp_cbdata * arg)
++{
++ int status = 0;
++ struct proc_object *p_proc_object = hprocessor;
++ u32 timeout = 0;
++
++ DBC_REQUIRE(refs > 0);
++
++ if (p_proc_object) {
++ /* intercept PWR deep sleep command */
++ if (dw_cmd == BRDIOCTL_DEEPSLEEP) {
++ timeout = arg->cb_data;
++ status = pwr_sleep_dsp(PWR_DEEPSLEEP, timeout);
++ }
++ /* intercept PWR emergency sleep command */
++ else if (dw_cmd == BRDIOCTL_EMERGENCYSLEEP) {
++ timeout = arg->cb_data;
++ status = pwr_sleep_dsp(PWR_EMERGENCYDEEPSLEEP, timeout);
++ } else if (dw_cmd == PWR_DEEPSLEEP) {
++ /* timeout = arg->cb_data; */
++ status = pwr_sleep_dsp(PWR_DEEPSLEEP, timeout);
++ }
++ /* intercept PWR wake commands */
++ else if (dw_cmd == BRDIOCTL_WAKEUP) {
++ timeout = arg->cb_data;
++ status = pwr_wake_dsp(timeout);
++ } else if (dw_cmd == PWR_WAKEUP) {
++ /* timeout = arg->cb_data; */
++ status = pwr_wake_dsp(timeout);
++ } else
++ if (!((*p_proc_object->intf_fxns->pfn_dev_cntrl)
++ (p_proc_object->hbridge_context, dw_cmd,
++ arg))) {
++ status = 0;
++ } else {
++ status = -EPERM;
++ }
++ } else {
++ status = -EFAULT;
++ }
++
++ return status;
++}
++
++/*
++ * ======== proc_detach ========
++ * Purpose:
++ * Destroys the Processor Object. Removes the notification from the Dev
++ * List.
++ */
++int proc_detach(struct process_context *pr_ctxt)
++{
++ int status = 0;
++ struct proc_object *p_proc_object = NULL;
++
++ DBC_REQUIRE(refs > 0);
++
++ p_proc_object = (struct proc_object *)pr_ctxt->hprocessor;
++
++ if (p_proc_object) {
++ /* Notify the Client */
++ ntfy_notify(p_proc_object->ntfy_obj, DSP_PROCESSORDETACH);
++ /* Remove the notification memory */
++ if (p_proc_object->ntfy_obj) {
++ ntfy_delete(p_proc_object->ntfy_obj);
++ kfree(p_proc_object->ntfy_obj);
++ }
++
++ kfree(p_proc_object->psz_last_coff);
++ p_proc_object->psz_last_coff = NULL;
++ /* Remove the Proc from the DEV List */
++ (void)dev_remove_proc_object(p_proc_object->hdev_obj,
++ (u32) p_proc_object);
++ /* Free the Processor Object */
++ kfree(p_proc_object);
++ pr_ctxt->hprocessor = NULL;
++ } else {
++ status = -EFAULT;
++ }
++
++ return status;
++}
++
++/*
++ * ======== proc_enum_nodes ========
++ * Purpose:
++ * Enumerate and get configuration information about nodes allocated
++ * on a DSP processor.
++ */
++int proc_enum_nodes(void *hprocessor, void **node_tab,
++ u32 node_tab_size, u32 *pu_num_nodes,
++ u32 *pu_allocated)
++{
++ int status = -EPERM;
++ struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
++ struct node_mgr *hnode_mgr = NULL;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(node_tab != NULL || node_tab_size == 0);
++ DBC_REQUIRE(pu_num_nodes != NULL);
++ DBC_REQUIRE(pu_allocated != NULL);
++
++ if (p_proc_object) {
++ if (!(dev_get_node_manager(p_proc_object->hdev_obj,
++ &hnode_mgr))) {
++ if (hnode_mgr) {
++ status = node_enum_nodes(hnode_mgr, node_tab,
++ node_tab_size,
++ pu_num_nodes,
++ pu_allocated);
++ }
++ }
++ } else {
++ status = -EFAULT;
++ }
++
++ return status;
++}
++
++/* Cache operation against kernel address instead of users */
++static int build_dma_sg(struct dmm_map_object *map_obj, unsigned long start,
++ ssize_t len, int pg_i)
++{
++ struct page *page;
++ unsigned long offset;
++ ssize_t rest;
++ int ret = 0, i = 0;
++ struct scatterlist *sg = map_obj->dma_info.sg;
++
++ while (len) {
++ page = get_mapping_page(map_obj, pg_i);
++ if (!page) {
++ pr_err("%s: no page for %08lx\n", __func__, start);
++ ret = -EINVAL;
++ goto out;
++ } else if (IS_ERR(page)) {
++ pr_err("%s: err page for %08lx(%lu)\n", __func__, start,
++ PTR_ERR(page));
++ ret = PTR_ERR(page);
++ goto out;
++ }
++
++ offset = start & ~PAGE_MASK;
++ rest = min_t(ssize_t, PAGE_SIZE - offset, len);
++
++ sg_set_page(&sg[i], page, rest, offset);
++
++ len -= rest;
++ start += rest;
++ pg_i++, i++;
++ }
++
++ if (i != map_obj->dma_info.num_pages) {
++ pr_err("%s: bad number of sg iterations\n", __func__);
++ ret = -EFAULT;
++ goto out;
++ }
++
++out:
++ return ret;
++}
++
++static int memory_regain_ownership(struct dmm_map_object *map_obj,
++ unsigned long start, ssize_t len, enum dma_data_direction dir)
++{
++ int ret = 0;
++ unsigned long first_data_page = start >> PAGE_SHIFT;
++ unsigned long last_data_page = ((u32)(start + len - 1) >> PAGE_SHIFT);
++ /* calculating the number of pages this area spans */
++ unsigned long num_pages = last_data_page - first_data_page + 1;
++ struct bridge_dma_map_info *dma_info = &map_obj->dma_info;
++
++ if (!dma_info->sg)
++ goto out;
++
++ if (dma_info->dir != dir || dma_info->num_pages != num_pages) {
++ pr_err("%s: dma info doesn't match given params\n", __func__);
++ return -EINVAL;
++ }
++
++ dma_unmap_sg(bridge, dma_info->sg, num_pages, dma_info->dir);
++
++ pr_debug("%s: dma_map_sg unmapped\n", __func__);
++
++ kfree(dma_info->sg);
++
++ map_obj->dma_info.sg = NULL;
++
++out:
++ return ret;
++}
++
++/* Cache operation against kernel address instead of users */
++static int memory_give_ownership(struct dmm_map_object *map_obj,
++ unsigned long start, ssize_t len, enum dma_data_direction dir)
++{
++ int pg_i, ret, sg_num;
++ struct scatterlist *sg;
++ unsigned long first_data_page = start >> PAGE_SHIFT;
++ unsigned long last_data_page = ((u32)(start + len - 1) >> PAGE_SHIFT);
++ /* calculating the number of pages this area spans */
++ unsigned long num_pages = last_data_page - first_data_page + 1;
++
++ pg_i = find_first_page_in_cache(map_obj, start);
++ if (pg_i < 0) {
++ pr_err("%s: failed to find first page in cache\n", __func__);
++ ret = -EINVAL;
++ goto out;
++ }
++
++ sg = kcalloc(num_pages, sizeof(*sg), GFP_KERNEL);
++ if (!sg) {
++ pr_err("%s: kcalloc failed\n", __func__);
++ ret = -ENOMEM;
++ goto out;
++ }
++
++ sg_init_table(sg, num_pages);
++
++ /* cleanup a previous sg allocation */
++ /* this may happen if application doesn't signal for e/o DMA */
++ kfree(map_obj->dma_info.sg);
++
++ map_obj->dma_info.sg = sg;
++ map_obj->dma_info.dir = dir;
++ map_obj->dma_info.num_pages = num_pages;
++
++ ret = build_dma_sg(map_obj, start, len, pg_i);
++ if (ret)
++ goto kfree_sg;
++
++ sg_num = dma_map_sg(bridge, sg, num_pages, dir);
++ if (sg_num < 1) {
++ pr_err("%s: dma_map_sg failed: %d\n", __func__, sg_num);
++ ret = -EFAULT;
++ goto kfree_sg;
++ }
++
++ pr_debug("%s: dma_map_sg mapped %d elements\n", __func__, sg_num);
++ map_obj->dma_info.sg_num = sg_num;
++
++ return 0;
++
++kfree_sg:
++ kfree(sg);
++ map_obj->dma_info.sg = NULL;
++out:
++ return ret;
++}
++
++int proc_begin_dma(void *hprocessor, void *pmpu_addr, u32 ul_size,
++ enum dma_data_direction dir)
++{
++ /* Keep STATUS here for future additions to this function */
++ int status = 0;
++ struct process_context *pr_ctxt = (struct process_context *) hprocessor;
++ struct dmm_map_object *map_obj;
++
++ DBC_REQUIRE(refs > 0);
++
++ if (!pr_ctxt) {
++ status = -EFAULT;
++ goto err_out;
++ }
++
++ pr_debug("%s: addr 0x%x, size 0x%x, type %d\n", __func__,
++ (u32)pmpu_addr,
++ ul_size, dir);
++
++ /* find requested memory are in cached mapping information */
++ map_obj = find_containing_mapping(pr_ctxt, (u32) pmpu_addr, ul_size);
++ if (!map_obj) {
++ pr_err("%s: find_containing_mapping failed\n", __func__);
++ status = -EFAULT;
++ goto err_out;
++ }
++
++ if (memory_give_ownership(map_obj, (u32) pmpu_addr, ul_size, dir)) {
++ pr_err("%s: InValid address parameters %p %x\n",
++ __func__, pmpu_addr, ul_size);
++ status = -EFAULT;
++ }
++
++err_out:
++
++ return status;
++}
++
++int proc_end_dma(void *hprocessor, void *pmpu_addr, u32 ul_size,
++ enum dma_data_direction dir)
++{
++ /* Keep STATUS here for future additions to this function */
++ int status = 0;
++ struct process_context *pr_ctxt = (struct process_context *) hprocessor;
++ struct dmm_map_object *map_obj;
++
++ DBC_REQUIRE(refs > 0);
++
++ if (!pr_ctxt) {
++ status = -EFAULT;
++ goto err_out;
++ }
++
++ pr_debug("%s: addr 0x%x, size 0x%x, type %d\n", __func__,
++ (u32)pmpu_addr,
++ ul_size, dir);
++
++ /* find requested memory are in cached mapping information */
++ map_obj = find_containing_mapping(pr_ctxt, (u32) pmpu_addr, ul_size);
++ if (!map_obj) {
++ pr_err("%s: find_containing_mapping failed\n", __func__);
++ status = -EFAULT;
++ goto err_out;
++ }
++
++ if (memory_regain_ownership(map_obj, (u32) pmpu_addr, ul_size, dir)) {
++ pr_err("%s: InValid address parameters %p %x\n",
++ __func__, pmpu_addr, ul_size);
++ status = -EFAULT;
++ goto err_out;
++ }
++
++err_out:
++ return status;
++}
++
++/*
++ * ======== proc_flush_memory ========
++ * Purpose:
++ * Flush cache
++ */
++int proc_flush_memory(void *hprocessor, void *pmpu_addr,
++ u32 ul_size, u32 ul_flags)
++{
++ enum dma_data_direction dir = DMA_BIDIRECTIONAL;
++
++ return proc_begin_dma(hprocessor, pmpu_addr, ul_size, dir);
++}
++
++/*
++ * ======== proc_invalidate_memory ========
++ * Purpose:
++ * Invalidates the memory specified
++ */
++int proc_invalidate_memory(void *hprocessor, void *pmpu_addr, u32 size)
++{
++ enum dma_data_direction dir = DMA_FROM_DEVICE;
++
++ return proc_begin_dma(hprocessor, pmpu_addr, size, dir);
++}
++
++/*
++ * ======== proc_get_resource_info ========
++ * Purpose:
++ * Enumerate the resources currently available on a processor.
++ */
++int proc_get_resource_info(void *hprocessor, u32 resource_type,
++ struct dsp_resourceinfo *resource_info,
++ u32 resource_info_size)
++{
++ int status = -EPERM;
++ struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
++ struct node_mgr *hnode_mgr = NULL;
++ struct nldr_object *nldr_obj = NULL;
++ struct rmm_target_obj *rmm = NULL;
++ struct io_mgr *hio_mgr = NULL; /* IO manager handle */
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(resource_info != NULL);
++ DBC_REQUIRE(resource_info_size >= sizeof(struct dsp_resourceinfo));
++
++ if (!p_proc_object) {
++ status = -EFAULT;
++ goto func_end;
++ }
++ switch (resource_type) {
++ case DSP_RESOURCE_DYNDARAM:
++ case DSP_RESOURCE_DYNSARAM:
++ case DSP_RESOURCE_DYNEXTERNAL:
++ case DSP_RESOURCE_DYNSRAM:
++ status = dev_get_node_manager(p_proc_object->hdev_obj,
++ &hnode_mgr);
++ if (!hnode_mgr) {
++ status = -EFAULT;
++ goto func_end;
++ }
++
++ status = node_get_nldr_obj(hnode_mgr, &nldr_obj);
++ if (!status) {
++ status = nldr_get_rmm_manager(nldr_obj, &rmm);
++ if (rmm) {
++ if (!rmm_stat(rmm,
++ (enum dsp_memtype)resource_type,
++ (struct dsp_memstat *)
++ &(resource_info->result.
++ mem_stat)))
++ status = -EINVAL;
++ } else {
++ status = -EFAULT;
++ }
++ }
++ break;
++ case DSP_RESOURCE_PROCLOAD:
++ status = dev_get_io_mgr(p_proc_object->hdev_obj, &hio_mgr);
++ if (hio_mgr)
++ status =
++ p_proc_object->intf_fxns->
++ pfn_io_get_proc_load(hio_mgr,
++ (struct dsp_procloadstat *)
++ &(resource_info->result.
++ proc_load_stat));
++ else
++ status = -EFAULT;
++ break;
++ default:
++ status = -EPERM;
++ break;
++ }
++func_end:
++ return status;
++}
++
++/*
++ * ======== proc_exit ========
++ * Purpose:
++ * Decrement reference count, and free resources when reference count is
++ * 0.
++ */
++void proc_exit(void)
++{
++ DBC_REQUIRE(refs > 0);
++
++ refs--;
++
++ DBC_ENSURE(refs >= 0);
++}
++
++/*
++ * ======== proc_get_dev_object ========
++ * Purpose:
++ * Return the Dev Object handle for a given Processor.
++ *
++ */
++int proc_get_dev_object(void *hprocessor,
++ struct dev_object **device_obj)
++{
++ int status = -EPERM;
++ struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(device_obj != NULL);
++
++ if (p_proc_object) {
++ *device_obj = p_proc_object->hdev_obj;
++ status = 0;
++ } else {
++ *device_obj = NULL;
++ status = -EFAULT;
++ }
++
++ DBC_ENSURE((!status && *device_obj != NULL) ||
++ (status && *device_obj == NULL));
++
++ return status;
++}
++
++/*
++ * ======== proc_get_state ========
++ * Purpose:
++ * Report the state of the specified DSP processor.
++ */
++int proc_get_state(void *hprocessor,
++ struct dsp_processorstate *proc_state_obj,
++ u32 state_info_size)
++{
++ int status = 0;
++ struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
++ int brd_status;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(proc_state_obj != NULL);
++ DBC_REQUIRE(state_info_size >= sizeof(struct dsp_processorstate));
++
++ if (p_proc_object) {
++ /* First, retrieve BRD state information */
++ status = (*p_proc_object->intf_fxns->pfn_brd_status)
++ (p_proc_object->hbridge_context, &brd_status);
++ if (!status) {
++ switch (brd_status) {
++ case BRD_STOPPED:
++ proc_state_obj->proc_state = PROC_STOPPED;
++ break;
++ case BRD_SLEEP_TRANSITION:
++ case BRD_DSP_HIBERNATION:
++ /* Fall through */
++ case BRD_RUNNING:
++ proc_state_obj->proc_state = PROC_RUNNING;
++ break;
++ case BRD_LOADED:
++ proc_state_obj->proc_state = PROC_LOADED;
++ break;
++ case BRD_ERROR:
++ proc_state_obj->proc_state = PROC_ERROR;
++ break;
++ default:
++ proc_state_obj->proc_state = 0xFF;
++ status = -EPERM;
++ break;
++ }
++ }
++ } else {
++ status = -EFAULT;
++ }
++ dev_dbg(bridge, "%s, results: status: 0x%x proc_state_obj: 0x%x\n",
++ __func__, status, proc_state_obj->proc_state);
++ return status;
++}
++
++/*
++ * ======== proc_get_trace ========
++ * Purpose:
++ * Retrieve the current contents of the trace buffer, located on the
++ * Processor. Predefined symbols for the trace buffer must have been
++ * configured into the DSP executable.
++ * Details:
++ * We support using the symbols SYS_PUTCBEG and SYS_PUTCEND to define a
++ * trace buffer, only. Treat it as an undocumented feature.
++ * This call is destructive, meaning the processor is placed in the monitor
++ * state as a result of this function.
++ */
++int proc_get_trace(void *hprocessor, u8 * pbuf, u32 max_size)
++{
++ int status;
++ status = -ENOSYS;
++ return status;
++}
++
++/*
++ * ======== proc_init ========
++ * Purpose:
++ * Initialize PROC's private state, keeping a reference count on each call
++ */
++bool proc_init(void)
++{
++ bool ret = true;
++
++ DBC_REQUIRE(refs >= 0);
++
++ if (ret)
++ refs++;
++
++ DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
++
++ return ret;
++}
++
++/*
++ * ======== proc_load ========
++ * Purpose:
++ * Reset a processor and load a new base program image.
++ * This will be an OEM-only function, and not part of the DSP/BIOS Bridge
++ * application developer's API.
++ */
++int proc_load(void *hprocessor, const s32 argc_index,
++ const char **user_args, const char **user_envp)
++{
++ int status = 0;
++ struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
++ struct io_mgr *hio_mgr; /* IO manager handle */
++ struct msg_mgr *hmsg_mgr;
++ struct cod_manager *cod_mgr; /* Code manager handle */
++ char *pargv0; /* temp argv[0] ptr */
++ char **new_envp; /* Updated envp[] array. */
++ char sz_proc_id[MAXPROCIDLEN]; /* Size of "PROC_ID=<n>" */
++ s32 envp_elems; /* Num elements in envp[]. */
++ s32 cnew_envp; /* " " in new_envp[] */
++ s32 nproc_id = 0; /* Anticipate MP version. */
++ struct dcd_manager *hdcd_handle;
++ struct dmm_object *dmm_mgr;
++ u32 dw_ext_end;
++ u32 proc_id;
++ int brd_state;
++ struct drv_data *drv_datap = dev_get_drvdata(bridge);
++
++#ifdef OPT_LOAD_TIME_INSTRUMENTATION
++ struct timeval tv1;
++ struct timeval tv2;
++#endif
++
++#if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
++ struct dspbridge_platform_data *pdata =
++ omap_dspbridge_dev->dev.platform_data;
++#endif
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(argc_index > 0);
++ DBC_REQUIRE(user_args != NULL);
++
++#ifdef OPT_LOAD_TIME_INSTRUMENTATION
++ do_gettimeofday(&tv1);
++#endif
++ if (!p_proc_object) {
++ status = -EFAULT;
++ goto func_end;
++ }
++ dev_get_cod_mgr(p_proc_object->hdev_obj, &cod_mgr);
++ if (!cod_mgr) {
++ status = -EPERM;
++ goto func_end;
++ }
++ status = proc_stop(hprocessor);
++ if (status)
++ goto func_end;
++
++ /* Place the board in the monitor state. */
++ status = proc_monitor(hprocessor);
++ if (status)
++ goto func_end;
++
++ /* Save ptr to original argv[0]. */
++ pargv0 = (char *)user_args[0];
++ /*Prepend "PROC_ID=<nproc_id>"to envp array for target. */
++ envp_elems = get_envp_count((char **)user_envp);
++ cnew_envp = (envp_elems ? (envp_elems + 1) : (envp_elems + 2));
++ new_envp = kzalloc(cnew_envp * sizeof(char **), GFP_KERNEL);
++ if (new_envp) {
++ status = snprintf(sz_proc_id, MAXPROCIDLEN, PROC_ENVPROCID,
++ nproc_id);
++ if (status == -1) {
++ dev_dbg(bridge, "%s: Proc ID string overflow\n",
++ __func__);
++ status = -EPERM;
++ } else {
++ new_envp =
++ prepend_envp(new_envp, (char **)user_envp,
++ envp_elems, cnew_envp, sz_proc_id);
++ /* Get the DCD Handle */
++ status = mgr_get_dcd_handle(p_proc_object->hmgr_obj,
++ (u32 *) &hdcd_handle);
++ if (!status) {
++ /* Before proceeding with new load,
++ * check if a previously registered COFF
++ * exists.
++ * If yes, unregister nodes in previously
++ * registered COFF. If any error occurred,
++ * set previously registered COFF to NULL. */
++ if (p_proc_object->psz_last_coff != NULL) {
++ status =
++ dcd_auto_unregister(hdcd_handle,
++ p_proc_object->
++ psz_last_coff);
++ /* Regardless of auto unregister status,
++ * free previously allocated
++ * memory. */
++ kfree(p_proc_object->psz_last_coff);
++ p_proc_object->psz_last_coff = NULL;
++ }
++ }
++ /* On success, do cod_open_base() */
++ status = cod_open_base(cod_mgr, (char *)user_args[0],
++ COD_SYMB);
++ }
++ } else {
++ status = -ENOMEM;
++ }
++ if (!status) {
++ /* Auto-register data base */
++ /* Get the DCD Handle */
++ status = mgr_get_dcd_handle(p_proc_object->hmgr_obj,
++ (u32 *) &hdcd_handle);
++ if (!status) {
++ /* Auto register nodes in specified COFF
++ * file. If registration did not fail,
++ * (status = 0 or -EACCES)
++ * save the name of the COFF file for
++ * de-registration in the future. */
++ status =
++ dcd_auto_register(hdcd_handle,
++ (char *)user_args[0]);
++ if (status == -EACCES)
++ status = 0;
++
++ if (status) {
++ status = -EPERM;
++ } else {
++ DBC_ASSERT(p_proc_object->psz_last_coff ==
++ NULL);
++ /* Allocate memory for pszLastCoff */
++ p_proc_object->psz_last_coff =
++ kzalloc((strlen(user_args[0]) +
++ 1), GFP_KERNEL);
++ /* If memory allocated, save COFF file name */
++ if (p_proc_object->psz_last_coff) {
++ strncpy(p_proc_object->psz_last_coff,
++ (char *)user_args[0],
++ (strlen((char *)user_args[0]) +
++ 1));
++ }
++ }
++ }
++ }
++ /* Update shared memory address and size */
++ if (!status) {
++ /* Create the message manager. This must be done
++ * before calling the IOOnLoaded function. */
++ dev_get_msg_mgr(p_proc_object->hdev_obj, &hmsg_mgr);
++ if (!hmsg_mgr) {
++ status = msg_create(&hmsg_mgr, p_proc_object->hdev_obj,
++ (msg_onexit) node_on_exit);
++ DBC_ASSERT(!status);
++ dev_set_msg_mgr(p_proc_object->hdev_obj, hmsg_mgr);
++ }
++ }
++ if (!status) {
++ /* Set the Device object's message manager */
++ status = dev_get_io_mgr(p_proc_object->hdev_obj, &hio_mgr);
++ if (hio_mgr)
++ status = (*p_proc_object->intf_fxns->pfn_io_on_loaded)
++ (hio_mgr);
++ else
++ status = -EFAULT;
++ }
++ if (!status) {
++ /* Now, attempt to load an exec: */
++
++ /* Boost the OPP level to Maximum level supported by baseport */
++#if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
++ if (pdata->cpu_set_freq)
++ (*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP5]);
++#endif
++ status = cod_load_base(cod_mgr, argc_index, (char **)user_args,
++ dev_brd_write_fxn,
++ p_proc_object->hdev_obj, NULL);
++ if (status) {
++ if (status == -EBADF) {
++ dev_dbg(bridge, "%s: Failure to Load the EXE\n",
++ __func__);
++ }
++ if (status == -ESPIPE) {
++ pr_err("%s: Couldn't parse the file\n",
++ __func__);
++ }
++ }
++ /* Requesting the lowest opp supported */
++#if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
++ if (pdata->cpu_set_freq)
++ (*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP1]);
++#endif
++
++ }
++ if (!status) {
++ /* Update the Processor status to loaded */
++ status = (*p_proc_object->intf_fxns->pfn_brd_set_state)
++ (p_proc_object->hbridge_context, BRD_LOADED);
++ if (!status) {
++ p_proc_object->proc_state = PROC_LOADED;
++ if (p_proc_object->ntfy_obj)
++ proc_notify_clients(p_proc_object,
++ DSP_PROCESSORSTATECHANGE);
++ }
++ }
++ if (!status) {
++ status = proc_get_processor_id(hprocessor, &proc_id);
++ if (proc_id == DSP_UNIT) {
++ /* Use all available DSP address space after EXTMEM
++ * for DMM */
++ if (!status)
++ status = cod_get_sym_value(cod_mgr, EXTEND,
++ &dw_ext_end);
++
++ /* Reset DMM structs and add an initial free chunk */
++ if (!status) {
++ status =
++ dev_get_dmm_mgr(p_proc_object->hdev_obj,
++ &dmm_mgr);
++ if (dmm_mgr) {
++ /* Set dw_ext_end to DMM START u8
++ * address */
++ dw_ext_end =
++ (dw_ext_end + 1) * DSPWORDSIZE;
++ /* DMM memory is from EXT_END */
++ status = dmm_create_tables(dmm_mgr,
++ dw_ext_end,
++ DMMPOOLSIZE);
++ } else {
++ status = -EFAULT;
++ }
++ }
++ }
++ }
++ /* Restore the original argv[0] */
++ kfree(new_envp);
++ user_args[0] = pargv0;
++ if (!status) {
++ if (!((*p_proc_object->intf_fxns->pfn_brd_status)
++ (p_proc_object->hbridge_context, &brd_state))) {
++ pr_info("%s: Processor Loaded %s\n", __func__, pargv0);
++ kfree(drv_datap->base_img);
++ drv_datap->base_img = kmalloc(strlen(pargv0) + 1,
++ GFP_KERNEL);
++ if (drv_datap->base_img)
++ strncpy(drv_datap->base_img, pargv0,
++ strlen(pargv0) + 1);
++ else
++ status = -ENOMEM;
++ DBC_ASSERT(brd_state == BRD_LOADED);
++ }
++ }
++
++func_end:
++ if (status) {
++ pr_err("%s: Processor failed to load\n", __func__);
++ proc_stop(p_proc_object);
++ }
++ DBC_ENSURE((!status
++ && p_proc_object->proc_state == PROC_LOADED)
++ || status);
++#ifdef OPT_LOAD_TIME_INSTRUMENTATION
++ do_gettimeofday(&tv2);
++ if (tv2.tv_usec < tv1.tv_usec) {
++ tv2.tv_usec += 1000000;
++ tv2.tv_sec--;
++ }
++ dev_dbg(bridge, "%s: time to load %d sec and %d usec\n", __func__,
++ tv2.tv_sec - tv1.tv_sec, tv2.tv_usec - tv1.tv_usec);
++#endif
++ return status;
++}
++
++/*
++ * ======== proc_map ========
++ * Purpose:
++ * Maps a MPU buffer to DSP address space.
++ */
++int proc_map(void *hprocessor, void *pmpu_addr, u32 ul_size,
++ void *req_addr, void **pp_map_addr, u32 ul_map_attr,
++ struct process_context *pr_ctxt)
++{
++ u32 va_align;
++ u32 pa_align;
++ struct dmm_object *dmm_mgr;
++ u32 size_align;
++ int status = 0;
++ struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
++ struct dmm_map_object *map_obj;
++ u32 tmp_addr = 0;
++
++#ifdef CONFIG_TIDSPBRIDGE_CACHE_LINE_CHECK
++ if ((ul_map_attr & BUFMODE_MASK) != RBUF) {
++ if (!IS_ALIGNED((u32)pmpu_addr, DSP_CACHE_LINE) ||
++ !IS_ALIGNED(ul_size, DSP_CACHE_LINE)) {
++ pr_err("%s: not aligned: 0x%x (%d)\n", __func__,
++ (u32)pmpu_addr, ul_size);
++ return -EFAULT;
++ }
++ }
++#endif
++
++ /* Calculate the page-aligned PA, VA and size */
++ va_align = PG_ALIGN_LOW((u32) req_addr, PG_SIZE4K);
++ pa_align = PG_ALIGN_LOW((u32) pmpu_addr, PG_SIZE4K);
++ size_align = PG_ALIGN_HIGH(ul_size + (u32) pmpu_addr - pa_align,
++ PG_SIZE4K);
++
++ if (!p_proc_object) {
++ status = -EFAULT;
++ goto func_end;
++ }
++ /* Critical section */
++ mutex_lock(&proc_lock);
++ dmm_get_handle(p_proc_object, &dmm_mgr);
++ if (dmm_mgr)
++ status = dmm_map_memory(dmm_mgr, va_align, size_align);
++ else
++ status = -EFAULT;
++
++ /* Add mapping to the page tables. */
++ if (!status) {
++
++ /* Mapped address = MSB of VA | LSB of PA */
++ tmp_addr = (va_align | ((u32) pmpu_addr & (PG_SIZE4K - 1)));
++ /* mapped memory resource tracking */
++ map_obj = add_mapping_info(pr_ctxt, pa_align, tmp_addr,
++ size_align);
++ if (!map_obj)
++ status = -ENOMEM;
++ else
++ status = (*p_proc_object->intf_fxns->pfn_brd_mem_map)
++ (p_proc_object->hbridge_context, pa_align, va_align,
++ size_align, ul_map_attr, map_obj->pages);
++ }
++ if (!status) {
++ /* Mapped address = MSB of VA | LSB of PA */
++ *pp_map_addr = (void *) tmp_addr;
++ } else {
++ remove_mapping_information(pr_ctxt, tmp_addr, size_align);
++ dmm_un_map_memory(dmm_mgr, va_align, &size_align);
++ }
++ mutex_unlock(&proc_lock);
++
++ if (status)
++ goto func_end;
++
++func_end:
++ dev_dbg(bridge, "%s: hprocessor %p, pmpu_addr %p, ul_size %x, "
++ "req_addr %p, ul_map_attr %x, pp_map_addr %p, va_align %x, "
++ "pa_align %x, size_align %x status 0x%x\n", __func__,
++ hprocessor, pmpu_addr, ul_size, req_addr, ul_map_attr,
++ pp_map_addr, va_align, pa_align, size_align, status);
++
++ return status;
++}
++
++/*
++ * ======== proc_register_notify ========
++ * Purpose:
++ * Register to be notified of specific processor events.
++ */
++int proc_register_notify(void *hprocessor, u32 event_mask,
++ u32 notify_type, struct dsp_notification
++ * hnotification)
++{
++ int status = 0;
++ struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
++ struct deh_mgr *hdeh_mgr;
++
++ DBC_REQUIRE(hnotification != NULL);
++ DBC_REQUIRE(refs > 0);
++
++ /* Check processor handle */
++ if (!p_proc_object) {
++ status = -EFAULT;
++ goto func_end;
++ }
++ /* Check if event mask is a valid processor related event */
++ if (event_mask & ~(DSP_PROCESSORSTATECHANGE | DSP_PROCESSORATTACH |
++ DSP_PROCESSORDETACH | DSP_PROCESSORRESTART |
++ DSP_MMUFAULT | DSP_SYSERROR | DSP_PWRERROR |
++ DSP_WDTOVERFLOW))
++ status = -EINVAL;
++
++ /* Check if notify type is valid */
++ if (notify_type != DSP_SIGNALEVENT)
++ status = -EINVAL;
++
++ if (!status) {
++ /* If event mask is not DSP_SYSERROR, DSP_MMUFAULT,
++ * or DSP_PWRERROR then register event immediately. */
++ if (event_mask &
++ ~(DSP_SYSERROR | DSP_MMUFAULT | DSP_PWRERROR |
++ DSP_WDTOVERFLOW)) {
++ status = ntfy_register(p_proc_object->ntfy_obj,
++ hnotification, event_mask,
++ notify_type);
++ /* Special case alert, special case alert!
++ * If we're trying to *deregister* (i.e. event_mask
++ * is 0), a DSP_SYSERROR or DSP_MMUFAULT notification,
++ * we have to deregister with the DEH manager.
++ * There's no way to know, based on event_mask which
++ * manager the notification event was registered with,
++ * so if we're trying to deregister and ntfy_register
++ * failed, we'll give the deh manager a shot.
++ */
++ if ((event_mask == 0) && status) {
++ status =
++ dev_get_deh_mgr(p_proc_object->hdev_obj,
++ &hdeh_mgr);
++ status =
++ bridge_deh_register_notify(hdeh_mgr,
++ event_mask,
++ notify_type,
++ hnotification);
++ }
++ } else {
++ status = dev_get_deh_mgr(p_proc_object->hdev_obj,
++ &hdeh_mgr);
++ status =
++ bridge_deh_register_notify(hdeh_mgr,
++ event_mask,
++ notify_type,
++ hnotification);
++
++ }
++ }
++func_end:
++ return status;
++}
++
++/*
++ * ======== proc_reserve_memory ========
++ * Purpose:
++ * Reserve a virtually contiguous region of DSP address space.
++ */
++int proc_reserve_memory(void *hprocessor, u32 ul_size,
++ void **pp_rsv_addr,
++ struct process_context *pr_ctxt)
++{
++ struct dmm_object *dmm_mgr;
++ int status = 0;
++ struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
++ struct dmm_rsv_object *rsv_obj;
++
++ if (!p_proc_object) {
++ status = -EFAULT;
++ goto func_end;
++ }
++
++ status = dmm_get_handle(p_proc_object, &dmm_mgr);
++ if (!dmm_mgr) {
++ status = -EFAULT;
++ goto func_end;
++ }
++
++ status = dmm_reserve_memory(dmm_mgr, ul_size, (u32 *) pp_rsv_addr);
++ if (status != 0)
++ goto func_end;
++
++ /*
++ * A successful reserve should be followed by insertion of rsv_obj
++ * into dmm_rsv_list, so that reserved memory resource tracking
++ * remains uptodate
++ */
++ rsv_obj = kmalloc(sizeof(struct dmm_rsv_object), GFP_KERNEL);
++ if (rsv_obj) {
++ rsv_obj->dsp_reserved_addr = (u32) *pp_rsv_addr;
++ spin_lock(&pr_ctxt->dmm_rsv_lock);
++ list_add(&rsv_obj->link, &pr_ctxt->dmm_rsv_list);
++ spin_unlock(&pr_ctxt->dmm_rsv_lock);
++ }
++
++func_end:
++ dev_dbg(bridge, "%s: hprocessor: 0x%p ul_size: 0x%x pp_rsv_addr: 0x%p "
++ "status 0x%x\n", __func__, hprocessor,
++ ul_size, pp_rsv_addr, status);
++ return status;
++}
++
++/*
++ * ======== proc_start ========
++ * Purpose:
++ * Start a processor running.
++ */
++int proc_start(void *hprocessor)
++{
++ int status = 0;
++ struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
++ struct cod_manager *cod_mgr; /* Code manager handle */
++ u32 dw_dsp_addr; /* Loaded code's entry point. */
++ int brd_state;
++
++ DBC_REQUIRE(refs > 0);
++ if (!p_proc_object) {
++ status = -EFAULT;
++ goto func_end;
++ }
++ /* Call the bridge_brd_start */
++ if (p_proc_object->proc_state != PROC_LOADED) {
++ status = -EBADR;
++ goto func_end;
++ }
++ status = dev_get_cod_mgr(p_proc_object->hdev_obj, &cod_mgr);
++ if (!cod_mgr) {
++ status = -EFAULT;
++ goto func_cont;
++ }
++
++ status = cod_get_entry(cod_mgr, &dw_dsp_addr);
++ if (status)
++ goto func_cont;
++
++ status = (*p_proc_object->intf_fxns->pfn_brd_start)
++ (p_proc_object->hbridge_context, dw_dsp_addr);
++ if (status)
++ goto func_cont;
++
++ /* Call dev_create2 */
++ status = dev_create2(p_proc_object->hdev_obj);
++ if (!status) {
++ p_proc_object->proc_state = PROC_RUNNING;
++ /* Deep sleep switces off the peripheral clocks.
++ * we just put the DSP CPU in idle in the idle loop.
++ * so there is no need to send a command to DSP */
++
++ if (p_proc_object->ntfy_obj) {
++ proc_notify_clients(p_proc_object,
++ DSP_PROCESSORSTATECHANGE);
++ }
++ } else {
++ /* Failed to Create Node Manager and DISP Object
++ * Stop the Processor from running. Put it in STOPPED State */
++ (void)(*p_proc_object->intf_fxns->
++ pfn_brd_stop) (p_proc_object->hbridge_context);
++ p_proc_object->proc_state = PROC_STOPPED;
++ }
++func_cont:
++ if (!status) {
++ if (!((*p_proc_object->intf_fxns->pfn_brd_status)
++ (p_proc_object->hbridge_context, &brd_state))) {
++ pr_info("%s: dsp in running state\n", __func__);
++ DBC_ASSERT(brd_state != BRD_HIBERNATION);
++ }
++ } else {
++ pr_err("%s: Failed to start the dsp\n", __func__);
++ proc_stop(p_proc_object);
++ }
++
++func_end:
++ DBC_ENSURE((!status && p_proc_object->proc_state ==
++ PROC_RUNNING) || status);
++ return status;
++}
++
++/*
++ * ======== proc_stop ========
++ * Purpose:
++ * Stop a processor running.
++ */
++int proc_stop(void *hprocessor)
++{
++ int status = 0;
++ struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
++ struct msg_mgr *hmsg_mgr;
++ struct node_mgr *hnode_mgr;
++ void *hnode;
++ u32 node_tab_size = 1;
++ u32 num_nodes = 0;
++ u32 nodes_allocated = 0;
++ int brd_state;
++
++ DBC_REQUIRE(refs > 0);
++ if (!p_proc_object) {
++ status = -EFAULT;
++ goto func_end;
++ }
++ /* check if there are any running nodes */
++ status = dev_get_node_manager(p_proc_object->hdev_obj, &hnode_mgr);
++ if (!status && hnode_mgr) {
++ status = node_enum_nodes(hnode_mgr, &hnode, node_tab_size,
++ &num_nodes, &nodes_allocated);
++ if ((status == -EINVAL) || (nodes_allocated > 0)) {
++ pr_err("%s: Can't stop device, active nodes = %d \n",
++ __func__, nodes_allocated);
++ return -EBADR;
++ }
++ }
++ /* Call the bridge_brd_stop */
++ /* It is OK to stop a device that does n't have nodes OR not started */
++ status =
++ (*p_proc_object->intf_fxns->
++ pfn_brd_stop) (p_proc_object->hbridge_context);
++ if (!status) {
++ dev_dbg(bridge, "%s: processor in standby mode\n", __func__);
++ p_proc_object->proc_state = PROC_STOPPED;
++ /* Destory the Node Manager, msg_ctrl Manager */
++ if (!(dev_destroy2(p_proc_object->hdev_obj))) {
++ /* Destroy the msg_ctrl by calling msg_delete */
++ dev_get_msg_mgr(p_proc_object->hdev_obj, &hmsg_mgr);
++ if (hmsg_mgr) {
++ msg_delete(hmsg_mgr);
++ dev_set_msg_mgr(p_proc_object->hdev_obj, NULL);
++ }
++ if (!((*p_proc_object->
++ intf_fxns->pfn_brd_status) (p_proc_object->
++ hbridge_context,
++ &brd_state)))
++ DBC_ASSERT(brd_state == BRD_STOPPED);
++ }
++ } else {
++ pr_err("%s: Failed to stop the processor\n", __func__);
++ }
++func_end:
++
++ return status;
++}
++
++/*
++ * ======== proc_un_map ========
++ * Purpose:
++ * Removes a MPU buffer mapping from the DSP address space.
++ */
++int proc_un_map(void *hprocessor, void *map_addr,
++ struct process_context *pr_ctxt)
++{
++ int status = 0;
++ struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
++ struct dmm_object *dmm_mgr;
++ u32 va_align;
++ u32 size_align;
++
++ va_align = PG_ALIGN_LOW((u32) map_addr, PG_SIZE4K);
++ if (!p_proc_object) {
++ status = -EFAULT;
++ goto func_end;
++ }
++
++ status = dmm_get_handle(hprocessor, &dmm_mgr);
++ if (!dmm_mgr) {
++ status = -EFAULT;
++ goto func_end;
++ }
++
++ /* Critical section */
++ mutex_lock(&proc_lock);
++ /*
++ * Update DMM structures. Get the size to unmap.
++ * This function returns error if the VA is not mapped
++ */
++ status = dmm_un_map_memory(dmm_mgr, (u32) va_align, &size_align);
++ /* Remove mapping from the page tables. */
++ if (!status) {
++ status = (*p_proc_object->intf_fxns->pfn_brd_mem_un_map)
++ (p_proc_object->hbridge_context, va_align, size_align);
++ }
++
++ mutex_unlock(&proc_lock);
++ if (status)
++ goto func_end;
++
++ /*
++ * A successful unmap should be followed by removal of map_obj
++ * from dmm_map_list, so that mapped memory resource tracking
++ * remains uptodate
++ */
++ remove_mapping_information(pr_ctxt, (u32) map_addr, size_align);
++
++func_end:
++ dev_dbg(bridge, "%s: hprocessor: 0x%p map_addr: 0x%p status: 0x%x\n",
++ __func__, hprocessor, map_addr, status);
++ return status;
++}
++
++/*
++ * ======== proc_un_reserve_memory ========
++ * Purpose:
++ * Frees a previously reserved region of DSP address space.
++ */
++int proc_un_reserve_memory(void *hprocessor, void *prsv_addr,
++ struct process_context *pr_ctxt)
++{
++ struct dmm_object *dmm_mgr;
++ int status = 0;
++ struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
++ struct dmm_rsv_object *rsv_obj;
++
++ if (!p_proc_object) {
++ status = -EFAULT;
++ goto func_end;
++ }
++
++ status = dmm_get_handle(p_proc_object, &dmm_mgr);
++ if (!dmm_mgr) {
++ status = -EFAULT;
++ goto func_end;
++ }
++
++ status = dmm_un_reserve_memory(dmm_mgr, (u32) prsv_addr);
++ if (status != 0)
++ goto func_end;
++
++ /*
++ * A successful unreserve should be followed by removal of rsv_obj
++ * from dmm_rsv_list, so that reserved memory resource tracking
++ * remains uptodate
++ */
++ spin_lock(&pr_ctxt->dmm_rsv_lock);
++ list_for_each_entry(rsv_obj, &pr_ctxt->dmm_rsv_list, link) {
++ if (rsv_obj->dsp_reserved_addr == (u32) prsv_addr) {
++ list_del(&rsv_obj->link);
++ kfree(rsv_obj);
++ break;
++ }
++ }
++ spin_unlock(&pr_ctxt->dmm_rsv_lock);
++
++func_end:
++ dev_dbg(bridge, "%s: hprocessor: 0x%p prsv_addr: 0x%p status: 0x%x\n",
++ __func__, hprocessor, prsv_addr, status);
++ return status;
++}
++
++/*
++ * ======== = proc_monitor ======== ==
++ * Purpose:
++ * Place the Processor in Monitor State. This is an internal
++ * function and a requirement before Processor is loaded.
++ * This does a bridge_brd_stop, dev_destroy2 and bridge_brd_monitor.
++ * In dev_destroy2 we delete the node manager.
++ * Parameters:
++ * p_proc_object: Pointer to Processor Object
++ * Returns:
++ * 0: Processor placed in monitor mode.
++ * !0: Failed to place processor in monitor mode.
++ * Requires:
++ * Valid Processor Handle
++ * Ensures:
++ * Success: ProcObject state is PROC_IDLE
++ */
++static int proc_monitor(struct proc_object *proc_obj)
++{
++ int status = -EPERM;
++ struct msg_mgr *hmsg_mgr;
++ int brd_state;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(proc_obj);
++
++ /* This is needed only when Device is loaded when it is
++ * already 'ACTIVE' */
++ /* Destory the Node Manager, msg_ctrl Manager */
++ if (!dev_destroy2(proc_obj->hdev_obj)) {
++ /* Destroy the msg_ctrl by calling msg_delete */
++ dev_get_msg_mgr(proc_obj->hdev_obj, &hmsg_mgr);
++ if (hmsg_mgr) {
++ msg_delete(hmsg_mgr);
++ dev_set_msg_mgr(proc_obj->hdev_obj, NULL);
++ }
++ }
++ /* Place the Board in the Monitor State */
++ if (!((*proc_obj->intf_fxns->pfn_brd_monitor)
++ (proc_obj->hbridge_context))) {
++ status = 0;
++ if (!((*proc_obj->intf_fxns->pfn_brd_status)
++ (proc_obj->hbridge_context, &brd_state)))
++ DBC_ASSERT(brd_state == BRD_IDLE);
++ }
++
++ DBC_ENSURE((!status && brd_state == BRD_IDLE) ||
++ status);
++ return status;
++}
++
++/*
++ * ======== get_envp_count ========
++ * Purpose:
++ * Return the number of elements in the envp array, including the
++ * terminating NULL element.
++ */
++static s32 get_envp_count(char **envp)
++{
++ s32 ret = 0;
++ if (envp) {
++ while (*envp++)
++ ret++;
++
++ ret += 1; /* Include the terminating NULL in the count. */
++ }
++
++ return ret;
++}
++
++/*
++ * ======== prepend_envp ========
++ * Purpose:
++ * Prepend an environment variable=value pair to the new envp array, and
++ * copy in the existing var=value pairs in the old envp array.
++ */
++static char **prepend_envp(char **new_envp, char **envp, s32 envp_elems,
++ s32 cnew_envp, char *sz_var)
++{
++ char **pp_envp = new_envp;
++
++ DBC_REQUIRE(new_envp);
++
++ /* Prepend new environ var=value string */
++ *new_envp++ = sz_var;
++
++ /* Copy user's environment into our own. */
++ while (envp_elems--)
++ *new_envp++ = *envp++;
++
++ /* Ensure NULL terminates the new environment strings array. */
++ if (envp_elems == 0)
++ *new_envp = NULL;
++
++ return pp_envp;
++}
++
++/*
++ * ======== proc_notify_clients ========
++ * Purpose:
++ * Notify the processor the events.
++ */
++int proc_notify_clients(void *proc, u32 events)
++{
++ int status = 0;
++ struct proc_object *p_proc_object = (struct proc_object *)proc;
++
++ DBC_REQUIRE(p_proc_object);
++ DBC_REQUIRE(is_valid_proc_event(events));
++ DBC_REQUIRE(refs > 0);
++ if (!p_proc_object) {
++ status = -EFAULT;
++ goto func_end;
++ }
++
++ ntfy_notify(p_proc_object->ntfy_obj, events);
++func_end:
++ return status;
++}
++
++/*
++ * ======== proc_notify_all_clients ========
++ * Purpose:
++ * Notify the processor the events. This includes notifying all clients
++ * attached to a particulat DSP.
++ */
++int proc_notify_all_clients(void *proc, u32 events)
++{
++ int status = 0;
++ struct proc_object *p_proc_object = (struct proc_object *)proc;
++
++ DBC_REQUIRE(is_valid_proc_event(events));
++ DBC_REQUIRE(refs > 0);
++
++ if (!p_proc_object) {
++ status = -EFAULT;
++ goto func_end;
++ }
++
++ dev_notify_clients(p_proc_object->hdev_obj, events);
++
++func_end:
++ return status;
++}
++
++/*
++ * ======== proc_get_processor_id ========
++ * Purpose:
++ * Retrieves the processor ID.
++ */
++int proc_get_processor_id(void *proc, u32 * proc_id)
++{
++ int status = 0;
++ struct proc_object *p_proc_object = (struct proc_object *)proc;
++
++ if (p_proc_object)
++ *proc_id = p_proc_object->processor_id;
++ else
++ status = -EFAULT;
++
++ return status;
++}
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/rmgr/pwr.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/rmgr/pwr.c 2010-08-18 11:24:23.230054242 +0300
+@@ -0,0 +1,176 @@
++/*
++ * pwr.c
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * PWR API for controlling DSP power states.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++/* ----------------------------------- Host OS */
++#include <dspbridge/host_os.h>
++
++/* ----------------------------------- This */
++#include <dspbridge/pwr.h>
++
++/* ----------------------------------- Resource Manager */
++#include <dspbridge/devdefs.h>
++#include <dspbridge/drv.h>
++
++/* ----------------------------------- Platform Manager */
++#include <dspbridge/dev.h>
++
++/* ----------------------------------- Link Driver */
++#include <dspbridge/dspioctl.h>
++
++/*
++ * ======== pwr_sleep_dsp ========
++ * Send command to DSP to enter sleep state.
++ */
++int pwr_sleep_dsp(const u32 sleep_code, const u32 timeout)
++{
++ struct bridge_drv_interface *intf_fxns;
++ struct bridge_dev_context *dw_context;
++ int status = -EPERM;
++ struct dev_object *hdev_obj = NULL;
++ u32 ioctlcode = 0;
++ u32 arg = timeout;
++
++ for (hdev_obj = (struct dev_object *)drv_get_first_dev_object();
++ hdev_obj != NULL;
++ hdev_obj =
++ (struct dev_object *)drv_get_next_dev_object((u32) hdev_obj)) {
++ if (dev_get_bridge_context(hdev_obj,
++ (struct bridge_dev_context **)
++ &dw_context)) {
++ continue;
++ }
++ if (dev_get_intf_fxns(hdev_obj,
++ (struct bridge_drv_interface **)
++ &intf_fxns)) {
++ continue;
++ }
++ if (sleep_code == PWR_DEEPSLEEP)
++ ioctlcode = BRDIOCTL_DEEPSLEEP;
++ else if (sleep_code == PWR_EMERGENCYDEEPSLEEP)
++ ioctlcode = BRDIOCTL_EMERGENCYSLEEP;
++ else
++ status = -EINVAL;
++
++ if (status != -EINVAL) {
++ status = (*intf_fxns->pfn_dev_cntrl) (dw_context,
++ ioctlcode,
++ (void *)&arg);
++ }
++ }
++ return status;
++}
++
++/*
++ * ======== pwr_wake_dsp ========
++ * Send command to DSP to wake it from sleep.
++ */
++int pwr_wake_dsp(const u32 timeout)
++{
++ struct bridge_drv_interface *intf_fxns;
++ struct bridge_dev_context *dw_context;
++ int status = -EPERM;
++ struct dev_object *hdev_obj = NULL;
++ u32 arg = timeout;
++
++ for (hdev_obj = (struct dev_object *)drv_get_first_dev_object();
++ hdev_obj != NULL;
++ hdev_obj = (struct dev_object *)drv_get_next_dev_object
++ ((u32) hdev_obj)) {
++ if (!(dev_get_bridge_context(hdev_obj,
++ (struct bridge_dev_context
++ **)&dw_context))) {
++ if (!(dev_get_intf_fxns(hdev_obj,
++ (struct bridge_drv_interface **)&intf_fxns))) {
++ status =
++ (*intf_fxns->pfn_dev_cntrl) (dw_context,
++ BRDIOCTL_WAKEUP,
++ (void *)&arg);
++ }
++ }
++ }
++ return status;
++}
++
++/*
++ * ======== pwr_pm_pre_scale========
++ * Sends pre-notification message to DSP.
++ */
++int pwr_pm_pre_scale(u16 voltage_domain, u32 level)
++{
++ struct bridge_drv_interface *intf_fxns;
++ struct bridge_dev_context *dw_context;
++ int status = -EPERM;
++ struct dev_object *hdev_obj = NULL;
++ u32 arg[2];
++
++ arg[0] = voltage_domain;
++ arg[1] = level;
++
++ for (hdev_obj = (struct dev_object *)drv_get_first_dev_object();
++ hdev_obj != NULL;
++ hdev_obj = (struct dev_object *)drv_get_next_dev_object
++ ((u32) hdev_obj)) {
++ if (!(dev_get_bridge_context(hdev_obj,
++ (struct bridge_dev_context
++ **)&dw_context))) {
++ if (!(dev_get_intf_fxns(hdev_obj,
++ (struct bridge_drv_interface **)&intf_fxns))) {
++ status =
++ (*intf_fxns->pfn_dev_cntrl) (dw_context,
++ BRDIOCTL_PRESCALE_NOTIFY,
++ (void *)&arg);
++ }
++ }
++ }
++ return status;
++}
++
++/*
++ * ======== pwr_pm_post_scale========
++ * Sends post-notification message to DSP.
++ */
++int pwr_pm_post_scale(u16 voltage_domain, u32 level)
++{
++ struct bridge_drv_interface *intf_fxns;
++ struct bridge_dev_context *dw_context;
++ int status = -EPERM;
++ struct dev_object *hdev_obj = NULL;
++ u32 arg[2];
++
++ arg[0] = voltage_domain;
++ arg[1] = level;
++
++ for (hdev_obj = (struct dev_object *)drv_get_first_dev_object();
++ hdev_obj != NULL;
++ hdev_obj = (struct dev_object *)drv_get_next_dev_object
++ ((u32) hdev_obj)) {
++ if (!(dev_get_bridge_context(hdev_obj,
++ (struct bridge_dev_context
++ **)&dw_context))) {
++ if (!(dev_get_intf_fxns(hdev_obj,
++ (struct bridge_drv_interface **)&intf_fxns))) {
++ status =
++ (*intf_fxns->pfn_dev_cntrl) (dw_context,
++ BRDIOCTL_POSTSCALE_NOTIFY,
++ (void *)&arg);
++ }
++ }
++ }
++ return status;
++
++}
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/rmgr/rmm.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/rmgr/rmm.c 2010-08-18 11:24:23.230054242 +0300
+@@ -0,0 +1,537 @@
++/*
++ * rmm.c
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++/*
++ * This memory manager provides general heap management and arbitrary
++ * alignment for any number of memory segments.
++ *
++ * Notes:
++ *
++ * Memory blocks are allocated from the end of the first free memory
++ * block large enough to satisfy the request. Alignment requirements
++ * are satisfied by "sliding" the block forward until its base satisfies
++ * the alignment specification; if this is not possible then the next
++ * free block large enough to hold the request is tried.
++ *
++ * Since alignment can cause the creation of a new free block - the
++ * unused memory formed between the start of the original free block
++ * and the start of the allocated block - the memory manager must free
++ * this memory to prevent a memory leak.
++ *
++ * Overlay memory is managed by reserving through rmm_alloc, and freeing
++ * it through rmm_free. The memory manager prevents DSP code/data that is
++ * overlayed from being overwritten as long as the memory it runs at has
++ * been allocated, and not yet freed.
++ */
++
++#include <linux/types.h>
++
++/* ----------------------------------- DSP/BIOS Bridge */
++#include <dspbridge/dbdefs.h>
++
++/* ----------------------------------- Trace & Debug */
++#include <dspbridge/dbc.h>
++
++/* ----------------------------------- OS Adaptation Layer */
++#include <dspbridge/list.h>
++
++/* ----------------------------------- This */
++#include <dspbridge/rmm.h>
++
++/*
++ * ======== rmm_header ========
++ * This header is used to maintain a list of free memory blocks.
++ */
++struct rmm_header {
++ struct rmm_header *next; /* form a free memory link list */
++ u32 size; /* size of the free memory */
++ u32 addr; /* DSP address of memory block */
++};
++
++/*
++ * ======== rmm_ovly_sect ========
++ * Keeps track of memory occupied by overlay section.
++ */
++struct rmm_ovly_sect {
++ struct list_head list_elem;
++ u32 addr; /* Start of memory section */
++ u32 size; /* Length (target MAUs) of section */
++ s32 page; /* Memory page */
++};
++
++/*
++ * ======== rmm_target_obj ========
++ */
++struct rmm_target_obj {
++ struct rmm_segment *seg_tab;
++ struct rmm_header **free_list;
++ u32 num_segs;
++ struct lst_list *ovly_list; /* List of overlay memory in use */
++};
++
++static u32 refs; /* module reference count */
++
++static bool alloc_block(struct rmm_target_obj *target, u32 segid, u32 size,
++ u32 align, u32 *dsp_address);
++static bool free_block(struct rmm_target_obj *target, u32 segid, u32 addr,
++ u32 size);
++
++/*
++ * ======== rmm_alloc ========
++ */
++int rmm_alloc(struct rmm_target_obj *target, u32 segid, u32 size,
++ u32 align, u32 *dsp_address, bool reserve)
++{
++ struct rmm_ovly_sect *sect;
++ struct rmm_ovly_sect *prev_sect = NULL;
++ struct rmm_ovly_sect *new_sect;
++ u32 addr;
++ int status = 0;
++
++ DBC_REQUIRE(target);
++ DBC_REQUIRE(dsp_address != NULL);
++ DBC_REQUIRE(size > 0);
++ DBC_REQUIRE(reserve || (target->num_segs > 0));
++ DBC_REQUIRE(refs > 0);
++
++ if (!reserve) {
++ if (!alloc_block(target, segid, size, align, dsp_address)) {
++ status = -ENOMEM;
++ } else {
++ /* Increment the number of allocated blocks in this
++ * segment */
++ target->seg_tab[segid].number++;
++ }
++ goto func_end;
++ }
++ /* An overlay section - See if block is already in use. If not,
++ * insert into the list in ascending address size. */
++ addr = *dsp_address;
++ sect = (struct rmm_ovly_sect *)lst_first(target->ovly_list);
++ /* Find place to insert new list element. List is sorted from
++ * smallest to largest address. */
++ while (sect != NULL) {
++ if (addr <= sect->addr) {
++ /* Check for overlap with sect */
++ if ((addr + size > sect->addr) || (prev_sect &&
++ (prev_sect->addr +
++ prev_sect->size >
++ addr))) {
++ status = -ENXIO;
++ }
++ break;
++ }
++ prev_sect = sect;
++ sect = (struct rmm_ovly_sect *)lst_next(target->ovly_list,
++ (struct list_head *)
++ sect);
++ }
++ if (!status) {
++ /* No overlap - allocate list element for new section. */
++ new_sect = kzalloc(sizeof(struct rmm_ovly_sect), GFP_KERNEL);
++ if (new_sect == NULL) {
++ status = -ENOMEM;
++ } else {
++ lst_init_elem((struct list_head *)new_sect);
++ new_sect->addr = addr;
++ new_sect->size = size;
++ new_sect->page = segid;
++ if (sect == NULL) {
++ /* Put new section at the end of the list */
++ lst_put_tail(target->ovly_list,
++ (struct list_head *)new_sect);
++ } else {
++ /* Put new section just before sect */
++ lst_insert_before(target->ovly_list,
++ (struct list_head *)new_sect,
++ (struct list_head *)sect);
++ }
++ }
++ }
++func_end:
++ return status;
++}
++
++/*
++ * ======== rmm_create ========
++ */
++int rmm_create(struct rmm_target_obj **target_obj,
++ struct rmm_segment seg_tab[], u32 num_segs)
++{
++ struct rmm_header *hptr;
++ struct rmm_segment *sptr, *tmp;
++ struct rmm_target_obj *target;
++ s32 i;
++ int status = 0;
++
++ DBC_REQUIRE(target_obj != NULL);
++ DBC_REQUIRE(num_segs == 0 || seg_tab != NULL);
++
++ /* Allocate DBL target object */
++ target = kzalloc(sizeof(struct rmm_target_obj), GFP_KERNEL);
++
++ if (target == NULL)
++ status = -ENOMEM;
++
++ if (status)
++ goto func_cont;
++
++ target->num_segs = num_segs;
++ if (!(num_segs > 0))
++ goto func_cont;
++
++ /* Allocate the memory for freelist from host's memory */
++ target->free_list = kzalloc(num_segs * sizeof(struct rmm_header *),
++ GFP_KERNEL);
++ if (target->free_list == NULL) {
++ status = -ENOMEM;
++ } else {
++ /* Allocate headers for each element on the free list */
++ for (i = 0; i < (s32) num_segs; i++) {
++ target->free_list[i] =
++ kzalloc(sizeof(struct rmm_header), GFP_KERNEL);
++ if (target->free_list[i] == NULL) {
++ status = -ENOMEM;
++ break;
++ }
++ }
++ /* Allocate memory for initial segment table */
++ target->seg_tab = kzalloc(num_segs * sizeof(struct rmm_segment),
++ GFP_KERNEL);
++ if (target->seg_tab == NULL) {
++ status = -ENOMEM;
++ } else {
++ /* Initialize segment table and free list */
++ sptr = target->seg_tab;
++ for (i = 0, tmp = seg_tab; num_segs > 0;
++ num_segs--, i++) {
++ *sptr = *tmp;
++ hptr = target->free_list[i];
++ hptr->addr = tmp->base;
++ hptr->size = tmp->length;
++ hptr->next = NULL;
++ tmp++;
++ sptr++;
++ }
++ }
++ }
++func_cont:
++ /* Initialize overlay memory list */
++ if (!status) {
++ target->ovly_list = kzalloc(sizeof(struct lst_list),
++ GFP_KERNEL);
++ if (target->ovly_list == NULL)
++ status = -ENOMEM;
++ else
++ INIT_LIST_HEAD(&target->ovly_list->head);
++ }
++
++ if (!status) {
++ *target_obj = target;
++ } else {
++ *target_obj = NULL;
++ if (target)
++ rmm_delete(target);
++
++ }
++
++ DBC_ENSURE((!status && *target_obj)
++ || (status && *target_obj == NULL));
++
++ return status;
++}
++
++/*
++ * ======== rmm_delete ========
++ */
++void rmm_delete(struct rmm_target_obj *target)
++{
++ struct rmm_ovly_sect *ovly_section;
++ struct rmm_header *hptr;
++ struct rmm_header *next;
++ u32 i;
++
++ DBC_REQUIRE(target);
++
++ kfree(target->seg_tab);
++
++ if (target->ovly_list) {
++ while ((ovly_section = (struct rmm_ovly_sect *)lst_get_head
++ (target->ovly_list))) {
++ kfree(ovly_section);
++ }
++ DBC_ASSERT(LST_IS_EMPTY(target->ovly_list));
++ kfree(target->ovly_list);
++ }
++
++ if (target->free_list != NULL) {
++ /* Free elements on freelist */
++ for (i = 0; i < target->num_segs; i++) {
++ hptr = next = target->free_list[i];
++ while (next) {
++ hptr = next;
++ next = hptr->next;
++ kfree(hptr);
++ }
++ }
++ kfree(target->free_list);
++ }
++
++ kfree(target);
++}
++
++/*
++ * ======== rmm_exit ========
++ */
++void rmm_exit(void)
++{
++ DBC_REQUIRE(refs > 0);
++
++ refs--;
++
++ DBC_ENSURE(refs >= 0);
++}
++
++/*
++ * ======== rmm_free ========
++ */
++bool rmm_free(struct rmm_target_obj *target, u32 segid, u32 dsp_addr, u32 size,
++ bool reserved)
++{
++ struct rmm_ovly_sect *sect;
++ bool ret = true;
++
++ DBC_REQUIRE(target);
++
++ DBC_REQUIRE(reserved || segid < target->num_segs);
++ DBC_REQUIRE(reserved || (dsp_addr >= target->seg_tab[segid].base &&
++ (dsp_addr + size) <= (target->seg_tab[segid].
++ base +
++ target->seg_tab[segid].
++ length)));
++
++ /*
++ * Free or unreserve memory.
++ */
++ if (!reserved) {
++ ret = free_block(target, segid, dsp_addr, size);
++ if (ret)
++ target->seg_tab[segid].number--;
++
++ } else {
++ /* Unreserve memory */
++ sect = (struct rmm_ovly_sect *)lst_first(target->ovly_list);
++ while (sect != NULL) {
++ if (dsp_addr == sect->addr) {
++ DBC_ASSERT(size == sect->size);
++ /* Remove from list */
++ lst_remove_elem(target->ovly_list,
++ (struct list_head *)sect);
++ kfree(sect);
++ break;
++ }
++ sect =
++ (struct rmm_ovly_sect *)lst_next(target->ovly_list,
++ (struct list_head
++ *)sect);
++ }
++ if (sect == NULL)
++ ret = false;
++
++ }
++ return ret;
++}
++
++/*
++ * ======== rmm_init ========
++ */
++bool rmm_init(void)
++{
++ DBC_REQUIRE(refs >= 0);
++
++ refs++;
++
++ return true;
++}
++
++/*
++ * ======== rmm_stat ========
++ */
++bool rmm_stat(struct rmm_target_obj *target, enum dsp_memtype segid,
++ struct dsp_memstat *mem_stat_buf)
++{
++ struct rmm_header *head;
++ bool ret = false;
++ u32 max_free_size = 0;
++ u32 total_free_size = 0;
++ u32 free_blocks = 0;
++
++ DBC_REQUIRE(mem_stat_buf != NULL);
++ DBC_ASSERT(target != NULL);
++
++ if ((u32) segid < target->num_segs) {
++ head = target->free_list[segid];
++
++ /* Collect data from free_list */
++ while (head != NULL) {
++ max_free_size = max(max_free_size, head->size);
++ total_free_size += head->size;
++ free_blocks++;
++ head = head->next;
++ }
++
++ /* ul_size */
++ mem_stat_buf->ul_size = target->seg_tab[segid].length;
++
++ /* ul_num_free_blocks */
++ mem_stat_buf->ul_num_free_blocks = free_blocks;
++
++ /* ul_total_free_size */
++ mem_stat_buf->ul_total_free_size = total_free_size;
++
++ /* ul_len_max_free_block */
++ mem_stat_buf->ul_len_max_free_block = max_free_size;
++
++ /* ul_num_alloc_blocks */
++ mem_stat_buf->ul_num_alloc_blocks =
++ target->seg_tab[segid].number;
++
++ ret = true;
++ }
++
++ return ret;
++}
++
++/*
++ * ======== balloc ========
++ * This allocation function allocates memory from the lowest addresses
++ * first.
++ */
++static bool alloc_block(struct rmm_target_obj *target, u32 segid, u32 size,
++ u32 align, u32 *dsp_address)
++{
++ struct rmm_header *head;
++ struct rmm_header *prevhead = NULL;
++ struct rmm_header *next;
++ u32 tmpalign;
++ u32 alignbytes;
++ u32 hsize;
++ u32 allocsize;
++ u32 addr;
++
++ alignbytes = (align == 0) ? 1 : align;
++ prevhead = NULL;
++ head = target->free_list[segid];
++
++ do {
++ hsize = head->size;
++ next = head->next;
++
++ addr = head->addr; /* alloc from the bottom */
++
++ /* align allocation */
++ (tmpalign = (u32) addr % alignbytes);
++ if (tmpalign != 0)
++ tmpalign = alignbytes - tmpalign;
++
++ allocsize = size + tmpalign;
++
++ if (hsize >= allocsize) { /* big enough */
++ if (hsize == allocsize && prevhead != NULL) {
++ prevhead->next = next;
++ kfree(head);
++ } else {
++ head->size = hsize - allocsize;
++ head->addr += allocsize;
++ }
++
++ /* free up any hole created by alignment */
++ if (tmpalign)
++ free_block(target, segid, addr, tmpalign);
++
++ *dsp_address = addr + tmpalign;
++ return true;
++ }
++
++ prevhead = head;
++ head = next;
++
++ } while (head != NULL);
++
++ return false;
++}
++
++/*
++ * ======== free_block ========
++ * TO DO: free_block() allocates memory, which could result in failure.
++ * Could allocate an rmm_header in rmm_alloc(), to be kept in a pool.
++ * free_block() could use an rmm_header from the pool, freeing as blocks
++ * are coalesced.
++ */
++static bool free_block(struct rmm_target_obj *target, u32 segid, u32 addr,
++ u32 size)
++{
++ struct rmm_header *head;
++ struct rmm_header *thead;
++ struct rmm_header *rhead;
++ bool ret = true;
++
++ /* Create a memory header to hold the newly free'd block. */
++ rhead = kzalloc(sizeof(struct rmm_header), GFP_KERNEL);
++ if (rhead == NULL) {
++ ret = false;
++ } else {
++ /* search down the free list to find the right place for addr */
++ head = target->free_list[segid];
++
++ if (addr >= head->addr) {
++ while (head->next != NULL && addr > head->next->addr)
++ head = head->next;
++
++ thead = head->next;
++
++ head->next = rhead;
++ rhead->next = thead;
++ rhead->addr = addr;
++ rhead->size = size;
++ } else {
++ *rhead = *head;
++ head->next = rhead;
++ head->addr = addr;
++ head->size = size;
++ thead = rhead->next;
++ }
++
++ /* join with upper block, if possible */
++ if (thead != NULL && (rhead->addr + rhead->size) ==
++ thead->addr) {
++ head->next = rhead->next;
++ thead->size = size + thead->size;
++ thead->addr = addr;
++ kfree(rhead);
++ rhead = thead;
++ }
++
++ /* join with the lower block, if possible */
++ if ((head->addr + head->size) == rhead->addr) {
++ head->next = rhead->next;
++ head->size = head->size + rhead->size;
++ kfree(rhead);
++ }
++ }
++
++ return ret;
++}
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/rmgr/strm.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/rmgr/strm.c 2010-08-18 11:24:23.230054242 +0300
+@@ -0,0 +1,853 @@
++/*
++ * strm.c
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * DSP/BIOS Bridge Stream Manager.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#include <linux/types.h>
++
++/* ----------------------------------- Host OS */
++#include <dspbridge/host_os.h>
++
++/* ----------------------------------- DSP/BIOS Bridge */
++#include <dspbridge/dbdefs.h>
++
++/* ----------------------------------- Trace & Debug */
++#include <dspbridge/dbc.h>
++
++/* ----------------------------------- OS Adaptation Layer */
++#include <dspbridge/sync.h>
++
++/* ----------------------------------- Bridge Driver */
++#include <dspbridge/dspdefs.h>
++
++/* ----------------------------------- Resource Manager */
++#include <dspbridge/nodepriv.h>
++
++/* ----------------------------------- Others */
++#include <dspbridge/cmm.h>
++
++/* ----------------------------------- This */
++#include <dspbridge/strm.h>
++
++#include <dspbridge/cfg.h>
++#include <dspbridge/resourcecleanup.h>
++
++/* ----------------------------------- Defines, Data Structures, Typedefs */
++#define DEFAULTTIMEOUT 10000
++#define DEFAULTNUMBUFS 2
++
++/*
++ * ======== strm_mgr ========
++ * The strm_mgr contains device information needed to open the underlying
++ * channels of a stream.
++ */
++struct strm_mgr {
++ struct dev_object *dev_obj; /* Device for this processor */
++ struct chnl_mgr *hchnl_mgr; /* Channel manager */
++ /* Function interface to Bridge driver */
++ struct bridge_drv_interface *intf_fxns;
++};
++
++/*
++ * ======== strm_object ========
++ * This object is allocated in strm_open().
++ */
++struct strm_object {
++ struct strm_mgr *strm_mgr_obj;
++ struct chnl_object *chnl_obj;
++ u32 dir; /* DSP_TONODE or DSP_FROMNODE */
++ u32 utimeout;
++ u32 num_bufs; /* Max # of bufs allowed in stream */
++ u32 un_bufs_in_strm; /* Current # of bufs in stream */
++ u32 ul_n_bytes; /* bytes transferred since idled */
++ /* STREAM_IDLE, STREAM_READY, ... */
++ enum dsp_streamstate strm_state;
++ void *user_event; /* Saved for strm_get_info() */
++ enum dsp_strmmode strm_mode; /* STRMMODE_[PROCCOPY][ZEROCOPY]... */
++ u32 udma_chnl_id; /* DMA chnl id */
++ u32 udma_priority; /* DMA priority:DMAPRI_[LOW][HIGH] */
++ u32 segment_id; /* >0 is SM segment.=0 is local heap */
++ u32 buf_alignment; /* Alignment for stream bufs */
++ /* Stream's SM address translator */
++ struct cmm_xlatorobject *xlator;
++};
++
++/* ----------------------------------- Globals */
++static u32 refs; /* module reference count */
++
++/* ----------------------------------- Function Prototypes */
++static int delete_strm(struct strm_object *stream_obj);
++
++/*
++ * ======== strm_allocate_buffer ========
++ * Purpose:
++ * Allocates buffers for a stream.
++ */
++int strm_allocate_buffer(struct strm_res_object *strmres, u32 usize,
++ u8 **ap_buffer, u32 num_bufs,
++ struct process_context *pr_ctxt)
++{
++ int status = 0;
++ u32 alloc_cnt = 0;
++ u32 i;
++ struct strm_object *stream_obj = strmres->hstream;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(ap_buffer != NULL);
++
++ if (stream_obj) {
++ /*
++ * Allocate from segment specified at time of stream open.
++ */
++ if (usize == 0)
++ status = -EINVAL;
++
++ } else {
++ status = -EFAULT;
++ }
++
++ if (status)
++ goto func_end;
++
++ for (i = 0; i < num_bufs; i++) {
++ DBC_ASSERT(stream_obj->xlator != NULL);
++ (void)cmm_xlator_alloc_buf(stream_obj->xlator, &ap_buffer[i],
++ usize);
++ if (ap_buffer[i] == NULL) {
++ status = -ENOMEM;
++ alloc_cnt = i;
++ break;
++ }
++ }
++ if (status)
++ strm_free_buffer(strmres, ap_buffer, alloc_cnt, pr_ctxt);
++
++ if (status)
++ goto func_end;
++
++ drv_proc_update_strm_res(num_bufs, strmres);
++
++func_end:
++ return status;
++}
++
++/*
++ * ======== strm_close ========
++ * Purpose:
++ * Close a stream opened with strm_open().
++ */
++int strm_close(struct strm_res_object *strmres,
++ struct process_context *pr_ctxt)
++{
++ struct bridge_drv_interface *intf_fxns;
++ struct chnl_info chnl_info_obj;
++ int status = 0;
++ struct strm_object *stream_obj = strmres->hstream;
++
++ DBC_REQUIRE(refs > 0);
++
++ if (!stream_obj) {
++ status = -EFAULT;
++ } else {
++ /* Have all buffers been reclaimed? If not, return
++ * -EPIPE */
++ intf_fxns = stream_obj->strm_mgr_obj->intf_fxns;
++ status =
++ (*intf_fxns->pfn_chnl_get_info) (stream_obj->chnl_obj,
++ &chnl_info_obj);
++ DBC_ASSERT(!status);
++
++ if (chnl_info_obj.cio_cs > 0 || chnl_info_obj.cio_reqs > 0)
++ status = -EPIPE;
++ else
++ status = delete_strm(stream_obj);
++ }
++
++ if (status)
++ goto func_end;
++
++ idr_remove(pr_ctxt->stream_id, strmres->id);
++func_end:
++ DBC_ENSURE(status == 0 || status == -EFAULT ||
++ status == -EPIPE || status == -EPERM);
++
++ dev_dbg(bridge, "%s: stream_obj: %p, status 0x%x\n", __func__,
++ stream_obj, status);
++ return status;
++}
++
++/*
++ * ======== strm_create ========
++ * Purpose:
++ * Create a STRM manager object.
++ */
++int strm_create(struct strm_mgr **strm_man,
++ struct dev_object *dev_obj)
++{
++ struct strm_mgr *strm_mgr_obj;
++ int status = 0;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(strm_man != NULL);
++ DBC_REQUIRE(dev_obj != NULL);
++
++ *strm_man = NULL;
++ /* Allocate STRM manager object */
++ strm_mgr_obj = kzalloc(sizeof(struct strm_mgr), GFP_KERNEL);
++ if (strm_mgr_obj == NULL)
++ status = -ENOMEM;
++ else
++ strm_mgr_obj->dev_obj = dev_obj;
++
++ /* Get Channel manager and Bridge function interface */
++ if (!status) {
++ status = dev_get_chnl_mgr(dev_obj, &(strm_mgr_obj->hchnl_mgr));
++ if (!status) {
++ (void)dev_get_intf_fxns(dev_obj,
++ &(strm_mgr_obj->intf_fxns));
++ DBC_ASSERT(strm_mgr_obj->intf_fxns != NULL);
++ }
++ }
++
++ if (!status)
++ *strm_man = strm_mgr_obj;
++ else
++ kfree(strm_mgr_obj);
++
++ DBC_ENSURE((!status && *strm_man) || (status && *strm_man == NULL));
++
++ return status;
++}
++
++/*
++ * ======== strm_delete ========
++ * Purpose:
++ * Delete the STRM Manager Object.
++ */
++void strm_delete(struct strm_mgr *strm_mgr_obj)
++{
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(strm_mgr_obj);
++
++ kfree(strm_mgr_obj);
++}
++
++/*
++ * ======== strm_exit ========
++ * Purpose:
++ * Discontinue usage of STRM module.
++ */
++void strm_exit(void)
++{
++ DBC_REQUIRE(refs > 0);
++
++ refs--;
++
++ DBC_ENSURE(refs >= 0);
++}
++
++/*
++ * ======== strm_free_buffer ========
++ * Purpose:
++ * Frees the buffers allocated for a stream.
++ */
++int strm_free_buffer(struct strm_res_object *strmres, u8 ** ap_buffer,
++ u32 num_bufs, struct process_context *pr_ctxt)
++{
++ int status = 0;
++ u32 i = 0;
++ struct strm_object *stream_obj = strmres->hstream;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(ap_buffer != NULL);
++
++ if (!stream_obj)
++ status = -EFAULT;
++
++ if (!status) {
++ for (i = 0; i < num_bufs; i++) {
++ DBC_ASSERT(stream_obj->xlator != NULL);
++ status =
++ cmm_xlator_free_buf(stream_obj->xlator,
++ ap_buffer[i]);
++ if (status)
++ break;
++ ap_buffer[i] = NULL;
++ }
++ }
++ drv_proc_update_strm_res(num_bufs - i, strmres);
++
++ return status;
++}
++
++/*
++ * ======== strm_get_info ========
++ * Purpose:
++ * Retrieves information about a stream.
++ */
++int strm_get_info(struct strm_object *stream_obj,
++ struct stream_info *stream_info,
++ u32 stream_info_size)
++{
++ struct bridge_drv_interface *intf_fxns;
++ struct chnl_info chnl_info_obj;
++ int status = 0;
++ void *virt_base = NULL; /* NULL if no SM used */
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(stream_info != NULL);
++ DBC_REQUIRE(stream_info_size >= sizeof(struct stream_info));
++
++ if (!stream_obj) {
++ status = -EFAULT;
++ } else {
++ if (stream_info_size < sizeof(struct stream_info)) {
++ /* size of users info */
++ status = -EINVAL;
++ }
++ }
++ if (status)
++ goto func_end;
++
++ intf_fxns = stream_obj->strm_mgr_obj->intf_fxns;
++ status =
++ (*intf_fxns->pfn_chnl_get_info) (stream_obj->chnl_obj,
++ &chnl_info_obj);
++ if (status)
++ goto func_end;
++
++ if (stream_obj->xlator) {
++ /* We have a translator */
++ DBC_ASSERT(stream_obj->segment_id > 0);
++ cmm_xlator_info(stream_obj->xlator, (u8 **) &virt_base, 0,
++ stream_obj->segment_id, false);
++ }
++ stream_info->segment_id = stream_obj->segment_id;
++ stream_info->strm_mode = stream_obj->strm_mode;
++ stream_info->virt_base = virt_base;
++ stream_info->user_strm->number_bufs_allowed = stream_obj->num_bufs;
++ stream_info->user_strm->number_bufs_in_stream = chnl_info_obj.cio_cs +
++ chnl_info_obj.cio_reqs;
++ /* # of bytes transferred since last call to DSPStream_Idle() */
++ stream_info->user_strm->ul_number_bytes = chnl_info_obj.bytes_tx;
++ stream_info->user_strm->sync_object_handle = chnl_info_obj.event_obj;
++ /* Determine stream state based on channel state and info */
++ if (chnl_info_obj.dw_state & CHNL_STATEEOS) {
++ stream_info->user_strm->ss_stream_state = STREAM_DONE;
++ } else {
++ if (chnl_info_obj.cio_cs > 0)
++ stream_info->user_strm->ss_stream_state = STREAM_READY;
++ else if (chnl_info_obj.cio_reqs > 0)
++ stream_info->user_strm->ss_stream_state =
++ STREAM_PENDING;
++ else
++ stream_info->user_strm->ss_stream_state = STREAM_IDLE;
++
++ }
++func_end:
++ return status;
++}
++
++/*
++ * ======== strm_idle ========
++ * Purpose:
++ * Idles a particular stream.
++ */
++int strm_idle(struct strm_object *stream_obj, bool flush_data)
++{
++ struct bridge_drv_interface *intf_fxns;
++ int status = 0;
++
++ DBC_REQUIRE(refs > 0);
++
++ if (!stream_obj) {
++ status = -EFAULT;
++ } else {
++ intf_fxns = stream_obj->strm_mgr_obj->intf_fxns;
++
++ status = (*intf_fxns->pfn_chnl_idle) (stream_obj->chnl_obj,
++ stream_obj->utimeout,
++ flush_data);
++ }
++
++ dev_dbg(bridge, "%s: stream_obj: %p flush_data: 0x%x status: 0x%x\n",
++ __func__, stream_obj, flush_data, status);
++ return status;
++}
++
++/*
++ * ======== strm_init ========
++ * Purpose:
++ * Initialize the STRM module.
++ */
++bool strm_init(void)
++{
++ bool ret = true;
++
++ DBC_REQUIRE(refs >= 0);
++
++ if (ret)
++ refs++;
++
++ DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
++
++ return ret;
++}
++
++/*
++ * ======== strm_issue ========
++ * Purpose:
++ * Issues a buffer on a stream
++ */
++int strm_issue(struct strm_object *stream_obj, u8 *pbuf, u32 ul_bytes,
++ u32 ul_buf_size, u32 dw_arg)
++{
++ struct bridge_drv_interface *intf_fxns;
++ int status = 0;
++ void *tmp_buf = NULL;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(pbuf != NULL);
++
++ if (!stream_obj) {
++ status = -EFAULT;
++ } else {
++ intf_fxns = stream_obj->strm_mgr_obj->intf_fxns;
++
++ if (stream_obj->segment_id != 0) {
++ tmp_buf = cmm_xlator_translate(stream_obj->xlator,
++ (void *)pbuf,
++ CMM_VA2DSPPA);
++ if (tmp_buf == NULL)
++ status = -ESRCH;
++
++ }
++ if (!status) {
++ status = (*intf_fxns->pfn_chnl_add_io_req)
++ (stream_obj->chnl_obj, pbuf, ul_bytes, ul_buf_size,
++ (u32) tmp_buf, dw_arg);
++ }
++ if (status == -EIO)
++ status = -ENOSR;
++ }
++
++ dev_dbg(bridge, "%s: stream_obj: %p pbuf: %p ul_bytes: 0x%x dw_arg:"
++ " 0x%x status: 0x%x\n", __func__, stream_obj, pbuf,
++ ul_bytes, dw_arg, status);
++ return status;
++}
++
++/*
++ * ======== strm_open ========
++ * Purpose:
++ * Open a stream for sending/receiving data buffers to/from a task or
++ * XDAIS socket node on the DSP.
++ */
++int strm_open(struct node_object *hnode, u32 dir, u32 index,
++ struct strm_attr *pattr,
++ struct strm_res_object **strmres,
++ struct process_context *pr_ctxt)
++{
++ struct strm_mgr *strm_mgr_obj;
++ struct bridge_drv_interface *intf_fxns;
++ u32 ul_chnl_id;
++ struct strm_object *strm_obj = NULL;
++ s8 chnl_mode;
++ struct chnl_attr chnl_attr_obj;
++ int status = 0;
++ struct cmm_object *hcmm_mgr = NULL; /* Shared memory manager hndl */
++
++ void *stream_res;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(strmres != NULL);
++ DBC_REQUIRE(pattr != NULL);
++ *strmres = NULL;
++ if (dir != DSP_TONODE && dir != DSP_FROMNODE) {
++ status = -EPERM;
++ } else {
++ /* Get the channel id from the node (set in node_connect()) */
++ status = node_get_channel_id(hnode, dir, index, &ul_chnl_id);
++ }
++ if (!status)
++ status = node_get_strm_mgr(hnode, &strm_mgr_obj);
++
++ if (!status) {
++ strm_obj = kzalloc(sizeof(struct strm_object), GFP_KERNEL);
++ if (strm_obj == NULL) {
++ status = -ENOMEM;
++ } else {
++ strm_obj->strm_mgr_obj = strm_mgr_obj;
++ strm_obj->dir = dir;
++ strm_obj->strm_state = STREAM_IDLE;
++ strm_obj->user_event = pattr->user_event;
++ if (pattr->stream_attr_in != NULL) {
++ strm_obj->utimeout =
++ pattr->stream_attr_in->utimeout;
++ strm_obj->num_bufs =
++ pattr->stream_attr_in->num_bufs;
++ strm_obj->strm_mode =
++ pattr->stream_attr_in->strm_mode;
++ strm_obj->segment_id =
++ pattr->stream_attr_in->segment_id;
++ strm_obj->buf_alignment =
++ pattr->stream_attr_in->buf_alignment;
++ strm_obj->udma_chnl_id =
++ pattr->stream_attr_in->udma_chnl_id;
++ strm_obj->udma_priority =
++ pattr->stream_attr_in->udma_priority;
++ chnl_attr_obj.uio_reqs =
++ pattr->stream_attr_in->num_bufs;
++ } else {
++ strm_obj->utimeout = DEFAULTTIMEOUT;
++ strm_obj->num_bufs = DEFAULTNUMBUFS;
++ strm_obj->strm_mode = STRMMODE_PROCCOPY;
++ strm_obj->segment_id = 0; /* local mem */
++ strm_obj->buf_alignment = 0;
++ strm_obj->udma_chnl_id = 0;
++ strm_obj->udma_priority = 0;
++ chnl_attr_obj.uio_reqs = DEFAULTNUMBUFS;
++ }
++ chnl_attr_obj.reserved1 = NULL;
++ /* DMA chnl flush timeout */
++ chnl_attr_obj.reserved2 = strm_obj->utimeout;
++ chnl_attr_obj.event_obj = NULL;
++ if (pattr->user_event != NULL)
++ chnl_attr_obj.event_obj = pattr->user_event;
++
++ }
++ }
++ if (status)
++ goto func_cont;
++
++ if ((pattr->virt_base == NULL) || !(pattr->ul_virt_size > 0))
++ goto func_cont;
++
++ /* No System DMA */
++ DBC_ASSERT(strm_obj->strm_mode != STRMMODE_LDMA);
++ /* Get the shared mem mgr for this streams dev object */
++ status = dev_get_cmm_mgr(strm_mgr_obj->dev_obj, &hcmm_mgr);
++ if (!status) {
++ /*Allocate a SM addr translator for this strm. */
++ status = cmm_xlator_create(&strm_obj->xlator, hcmm_mgr, NULL);
++ if (!status) {
++ DBC_ASSERT(strm_obj->segment_id > 0);
++ /* Set translators Virt Addr attributes */
++ status = cmm_xlator_info(strm_obj->xlator,
++ (u8 **) &pattr->virt_base,
++ pattr->ul_virt_size,
++ strm_obj->segment_id, true);
++ }
++ }
++func_cont:
++ if (!status) {
++ /* Open channel */
++ chnl_mode = (dir == DSP_TONODE) ?
++ CHNL_MODETODSP : CHNL_MODEFROMDSP;
++ intf_fxns = strm_mgr_obj->intf_fxns;
++ status = (*intf_fxns->pfn_chnl_open) (&(strm_obj->chnl_obj),
++ strm_mgr_obj->hchnl_mgr,
++ chnl_mode, ul_chnl_id,
++ &chnl_attr_obj);
++ if (status) {
++ /*
++ * over-ride non-returnable status codes so we return
++ * something documented
++ */
++ if (status != -ENOMEM && status !=
++ -EINVAL && status != -EPERM) {
++ /*
++ * We got a status that's not return-able.
++ * Assert that we got something we were
++ * expecting (-EFAULT isn't acceptable,
++ * strm_mgr_obj->hchnl_mgr better be valid or we
++ * assert here), and then return -EPERM.
++ */
++ DBC_ASSERT(status == -ENOSR ||
++ status == -ECHRNG ||
++ status == -EALREADY ||
++ status == -EIO);
++ status = -EPERM;
++ }
++ }
++ }
++ if (!status) {
++ status = drv_proc_insert_strm_res_element(strm_obj,
++ &stream_res, pr_ctxt);
++ if (status)
++ delete_strm(strm_obj);
++ else
++ *strmres = (struct strm_res_object *)stream_res;
++ } else {
++ (void)delete_strm(strm_obj);
++ }
++
++ /* ensure we return a documented error code */
++ DBC_ENSURE((!status && strm_obj) ||
++ (*strmres == NULL && (status == -EFAULT ||
++ status == -EPERM
++ || status == -EINVAL)));
++
++ dev_dbg(bridge, "%s: hnode: %p dir: 0x%x index: 0x%x pattr: %p "
++ "strmres: %p status: 0x%x\n", __func__,
++ hnode, dir, index, pattr, strmres, status);
++ return status;
++}
++
++/*
++ * ======== strm_reclaim ========
++ * Purpose:
++ * Relcaims a buffer from a stream.
++ */
++int strm_reclaim(struct strm_object *stream_obj, u8 ** buf_ptr,
++ u32 *nbytes, u32 *buff_size, u32 *pdw_arg)
++{
++ struct bridge_drv_interface *intf_fxns;
++ struct chnl_ioc chnl_ioc_obj;
++ int status = 0;
++ void *tmp_buf = NULL;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(buf_ptr != NULL);
++ DBC_REQUIRE(nbytes != NULL);
++ DBC_REQUIRE(pdw_arg != NULL);
++
++ if (!stream_obj) {
++ status = -EFAULT;
++ goto func_end;
++ }
++ intf_fxns = stream_obj->strm_mgr_obj->intf_fxns;
++
++ status =
++ (*intf_fxns->pfn_chnl_get_ioc) (stream_obj->chnl_obj,
++ stream_obj->utimeout,
++ &chnl_ioc_obj);
++ if (!status) {
++ *nbytes = chnl_ioc_obj.byte_size;
++ if (buff_size)
++ *buff_size = chnl_ioc_obj.buf_size;
++
++ *pdw_arg = chnl_ioc_obj.dw_arg;
++ if (!CHNL_IS_IO_COMPLETE(chnl_ioc_obj)) {
++ if (CHNL_IS_TIMED_OUT(chnl_ioc_obj)) {
++ status = -ETIME;
++ } else {
++ /* Allow reclaims after idle to succeed */
++ if (!CHNL_IS_IO_CANCELLED(chnl_ioc_obj))
++ status = -EPERM;
++
++ }
++ }
++ /* Translate zerocopy buffer if channel not canceled. */
++ if (!status
++ && (!CHNL_IS_IO_CANCELLED(chnl_ioc_obj))
++ && (stream_obj->strm_mode == STRMMODE_ZEROCOPY)) {
++ /*
++ * This is a zero-copy channel so chnl_ioc_obj.pbuf
++ * contains the DSP address of SM. We need to
++ * translate it to a virtual address for the user
++ * thread to access.
++ * Note: Could add CMM_DSPPA2VA to CMM in the future.
++ */
++ tmp_buf = cmm_xlator_translate(stream_obj->xlator,
++ chnl_ioc_obj.pbuf,
++ CMM_DSPPA2PA);
++ if (tmp_buf != NULL) {
++ /* now convert this GPP Pa to Va */
++ tmp_buf = cmm_xlator_translate(stream_obj->
++ xlator,
++ tmp_buf,
++ CMM_PA2VA);
++ }
++ if (tmp_buf == NULL)
++ status = -ESRCH;
++
++ chnl_ioc_obj.pbuf = tmp_buf;
++ }
++ *buf_ptr = chnl_ioc_obj.pbuf;
++ }
++func_end:
++ /* ensure we return a documented return code */
++ DBC_ENSURE(!status || status == -EFAULT ||
++ status == -ETIME || status == -ESRCH ||
++ status == -EPERM);
++
++ dev_dbg(bridge, "%s: stream_obj: %p buf_ptr: %p nbytes: %p "
++ "pdw_arg: %p status 0x%x\n", __func__, stream_obj,
++ buf_ptr, nbytes, pdw_arg, status);
++ return status;
++}
++
++/*
++ * ======== strm_register_notify ========
++ * Purpose:
++ * Register to be notified on specific events for this stream.
++ */
++int strm_register_notify(struct strm_object *stream_obj, u32 event_mask,
++ u32 notify_type, struct dsp_notification
++ * hnotification)
++{
++ struct bridge_drv_interface *intf_fxns;
++ int status = 0;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(hnotification != NULL);
++
++ if (!stream_obj) {
++ status = -EFAULT;
++ } else if ((event_mask & ~((DSP_STREAMIOCOMPLETION) |
++ DSP_STREAMDONE)) != 0) {
++ status = -EINVAL;
++ } else {
++ if (notify_type != DSP_SIGNALEVENT)
++ status = -ENOSYS;
++
++ }
++ if (!status) {
++ intf_fxns = stream_obj->strm_mgr_obj->intf_fxns;
++
++ status =
++ (*intf_fxns->pfn_chnl_register_notify) (stream_obj->
++ chnl_obj,
++ event_mask,
++ notify_type,
++ hnotification);
++ }
++ /* ensure we return a documented return code */
++ DBC_ENSURE(!status || status == -EFAULT ||
++ status == -ETIME || status == -ESRCH ||
++ status == -ENOSYS || status == -EPERM);
++ return status;
++}
++
++/*
++ * ======== strm_select ========
++ * Purpose:
++ * Selects a ready stream.
++ */
++int strm_select(struct strm_object **strm_tab, u32 strms,
++ u32 *pmask, u32 utimeout)
++{
++ u32 index;
++ struct chnl_info chnl_info_obj;
++ struct bridge_drv_interface *intf_fxns;
++ struct sync_object **sync_events = NULL;
++ u32 i;
++ int status = 0;
++
++ DBC_REQUIRE(refs > 0);
++ DBC_REQUIRE(strm_tab != NULL);
++ DBC_REQUIRE(pmask != NULL);
++ DBC_REQUIRE(strms > 0);
++
++ *pmask = 0;
++ for (i = 0; i < strms; i++) {
++ if (!strm_tab[i]) {
++ status = -EFAULT;
++ break;
++ }
++ }
++ if (status)
++ goto func_end;
++
++ /* Determine which channels have IO ready */
++ for (i = 0; i < strms; i++) {
++ intf_fxns = strm_tab[i]->strm_mgr_obj->intf_fxns;
++ status = (*intf_fxns->pfn_chnl_get_info) (strm_tab[i]->chnl_obj,
++ &chnl_info_obj);
++ if (status) {
++ break;
++ } else {
++ if (chnl_info_obj.cio_cs > 0)
++ *pmask |= (1 << i);
++
++ }
++ }
++ if (!status && utimeout > 0 && *pmask == 0) {
++ /* Non-zero timeout */
++ sync_events = kmalloc(strms * sizeof(struct sync_object *),
++ GFP_KERNEL);
++
++ if (sync_events == NULL) {
++ status = -ENOMEM;
++ } else {
++ for (i = 0; i < strms; i++) {
++ intf_fxns =
++ strm_tab[i]->strm_mgr_obj->intf_fxns;
++ status = (*intf_fxns->pfn_chnl_get_info)
++ (strm_tab[i]->chnl_obj, &chnl_info_obj);
++ if (status)
++ break;
++ else
++ sync_events[i] =
++ chnl_info_obj.sync_event;
++
++ }
++ }
++ if (!status) {
++ status =
++ sync_wait_on_multiple_events(sync_events, strms,
++ utimeout, &index);
++ if (!status) {
++ /* Since we waited on the event, we have to
++ * reset it */
++ sync_set_event(sync_events[index]);
++ *pmask = 1 << index;
++ }
++ }
++ }
++func_end:
++ kfree(sync_events);
++
++ DBC_ENSURE((!status && (*pmask != 0 || utimeout == 0)) ||
++ (status && *pmask == 0));
++
++ return status;
++}
++
++/*
++ * ======== delete_strm ========
++ * Purpose:
++ * Frees the resources allocated for a stream.
++ */
++static int delete_strm(struct strm_object *stream_obj)
++{
++ struct bridge_drv_interface *intf_fxns;
++ int status = 0;
++
++ if (stream_obj) {
++ if (stream_obj->chnl_obj) {
++ intf_fxns = stream_obj->strm_mgr_obj->intf_fxns;
++ /* Channel close can fail only if the channel handle
++ * is invalid. */
++ status = (*intf_fxns->pfn_chnl_close)
++ (stream_obj->chnl_obj);
++ /* Free all SM address translator resources */
++ if (!status) {
++ if (stream_obj->xlator) {
++ /* force free */
++ (void)cmm_xlator_delete(stream_obj->
++ xlator,
++ true);
++ }
++ }
++ }
++ kfree(stream_obj);
++ } else {
++ status = -EFAULT;
++ }
++ return status;
++}
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/services/cfg.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/services/cfg.c 2010-08-18 11:24:23.230054242 +0300
+@@ -0,0 +1,253 @@
++/*
++ * cfg.c
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Implementation of platform specific config services.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#include <linux/types.h>
++
++/* ----------------------------------- DSP/BIOS Bridge */
++#include <dspbridge/dbdefs.h>
++
++/* ----------------------------------- Trace & Debug */
++#include <dspbridge/dbc.h>
++
++/* ----------------------------------- OS Adaptation Layer */
++
++/* ----------------------------------- This */
++#include <dspbridge/cfg.h>
++#include <dspbridge/drv.h>
++
++struct drv_ext {
++ struct list_head link;
++ char sz_string[MAXREGPATHLENGTH];
++};
++
++/*
++ * ======== cfg_exit ========
++ * Purpose:
++ * Discontinue usage of the CFG module.
++ */
++void cfg_exit(void)
++{
++ /* Do nothing */
++}
++
++/*
++ * ======== cfg_get_auto_start ========
++ * Purpose:
++ * Retreive the autostart mask, if any, for this board.
++ */
++int cfg_get_auto_start(struct cfg_devnode *dev_node_obj,
++ u32 *auto_start)
++{
++ int status = 0;
++ u32 dw_buf_size;
++ struct drv_data *drv_datap = dev_get_drvdata(bridge);
++
++ dw_buf_size = sizeof(*auto_start);
++ if (!dev_node_obj)
++ status = -EFAULT;
++ if (!auto_start || !drv_datap)
++ status = -EFAULT;
++ if (!status)
++ *auto_start = (drv_datap->base_img) ? 1 : 0;
++
++ DBC_ENSURE((status == 0 &&
++ (*auto_start == 0 || *auto_start == 1))
++ || status != 0);
++ return status;
++}
++
++/*
++ * ======== cfg_get_dev_object ========
++ * Purpose:
++ * Retrieve the Device Object handle for a given devnode.
++ */
++int cfg_get_dev_object(struct cfg_devnode *dev_node_obj,
++ u32 *value)
++{
++ int status = 0;
++ u32 dw_buf_size;
++ struct drv_data *drv_datap = dev_get_drvdata(bridge);
++
++ if (!drv_datap)
++ status = -EPERM;
++
++ if (!dev_node_obj)
++ status = -EFAULT;
++
++ if (!value)
++ status = -EFAULT;
++
++ dw_buf_size = sizeof(value);
++ if (!status) {
++
++ /* check the device string and then store dev object */
++ if (!
++ (strcmp
++ ((char *)((struct drv_ext *)dev_node_obj)->sz_string,
++ "TIOMAP1510")))
++ *value = (u32)drv_datap->dev_object;
++ }
++ if (status)
++ pr_err("%s: Failed, status 0x%x\n", __func__, status);
++ return status;
++}
++
++/*
++ * ======== cfg_get_exec_file ========
++ * Purpose:
++ * Retreive the default executable, if any, for this board.
++ */
++int cfg_get_exec_file(struct cfg_devnode *dev_node_obj, u32 buf_size,
++ char *str_exec_file)
++{
++ int status = 0;
++ struct drv_data *drv_datap = dev_get_drvdata(bridge);
++
++ if (!dev_node_obj)
++ status = -EFAULT;
++
++ else if (!str_exec_file || !drv_datap)
++ status = -EFAULT;
++
++ if (strlen(drv_datap->base_img) > buf_size)
++ status = -EINVAL;
++
++ if (!status && drv_datap->base_img)
++ strcpy(str_exec_file, drv_datap->base_img);
++
++ if (status)
++ pr_err("%s: Failed, status 0x%x\n", __func__, status);
++ DBC_ENSURE(((status == 0) &&
++ (strlen(str_exec_file) <= buf_size))
++ || (status != 0));
++ return status;
++}
++
++/*
++ * ======== cfg_get_object ========
++ * Purpose:
++ * Retrieve the Object handle from the Registry
++ */
++int cfg_get_object(u32 *value, u8 dw_type)
++{
++ int status = -EINVAL;
++ struct drv_data *drv_datap = dev_get_drvdata(bridge);
++
++ DBC_REQUIRE(value != NULL);
++
++ if (!drv_datap)
++ return -EPERM;
++
++ switch (dw_type) {
++ case (REG_DRV_OBJECT):
++ if (drv_datap->drv_object) {
++ *value = (u32)drv_datap->drv_object;
++ status = 0;
++ } else {
++ status = -ENODATA;
++ }
++ break;
++ case (REG_MGR_OBJECT):
++ if (drv_datap->mgr_object) {
++ *value = (u32)drv_datap->mgr_object;
++ status = 0;
++ } else {
++ status = -ENODATA;
++ }
++ break;
++
++ default:
++ break;
++ }
++ if (status) {
++ *value = 0;
++ pr_err("%s: Failed, status 0x%x\n", __func__, status);
++ }
++ DBC_ENSURE((!status && *value != 0) || (status && *value == 0));
++ return status;
++}
++
++/*
++ * ======== cfg_init ========
++ * Purpose:
++ * Initialize the CFG module's private state.
++ */
++bool cfg_init(void)
++{
++ return true;
++}
++
++/*
++ * ======== cfg_set_dev_object ========
++ * Purpose:
++ * Store the Device Object handle and dev_node pointer for a given devnode.
++ */
++int cfg_set_dev_object(struct cfg_devnode *dev_node_obj, u32 value)
++{
++ int status = 0;
++ struct drv_data *drv_datap = dev_get_drvdata(bridge);
++
++ if (!drv_datap) {
++ pr_err("%s: Failed, status 0x%x\n", __func__, status);
++ return -EPERM;
++ }
++
++ if (!dev_node_obj)
++ status = -EFAULT;
++
++ if (!status) {
++ /* Store the Bridge device object in the Registry */
++
++ if (!(strcmp((char *)dev_node_obj, "TIOMAP1510")))
++ drv_datap->dev_object = (void *) value;
++ }
++ if (status)
++ pr_err("%s: Failed, status 0x%x\n", __func__, status);
++
++ return status;
++}
++
++/*
++ * ======== cfg_set_object ========
++ * Purpose:
++ * Store the Driver Object handle
++ */
++int cfg_set_object(u32 value, u8 dw_type)
++{
++ int status = -EINVAL;
++ struct drv_data *drv_datap = dev_get_drvdata(bridge);
++
++ if (!drv_datap)
++ return -EPERM;
++
++ switch (dw_type) {
++ case (REG_DRV_OBJECT):
++ drv_datap->drv_object = (void *)value;
++ status = 0;
++ break;
++ case (REG_MGR_OBJECT):
++ drv_datap->mgr_object = (void *)value;
++ status = 0;
++ break;
++ default:
++ break;
++ }
++ if (status)
++ pr_err("%s: Failed, status 0x%x\n", __func__, status);
++ return status;
++}
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/services/ntfy.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/services/ntfy.c 2010-08-18 11:24:23.234067810 +0300
+@@ -0,0 +1,31 @@
++/*
++ * ntfy.c
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Manage lists of notification events.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++/* ----------------------------------- This */
++#include <dspbridge/ntfy.h>
++
++int dsp_notifier_event(struct notifier_block *this, unsigned long event,
++ void *data)
++{
++ struct ntfy_event *ne = container_of(this, struct ntfy_event,
++ noti_block);
++ if (ne->event & event)
++ sync_set_event(&ne->sync_obj);
++ return NOTIFY_OK;
++}
++
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/services/services.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/services/services.c 2010-08-18 11:24:23.234067810 +0300
+@@ -0,0 +1,70 @@
++/*
++ * services.c
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Provide SERVICES loading.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++#include <linux/types.h>
++
++#include <dspbridge/host_os.h>
++
++/* ----------------------------------- DSP/BIOS Bridge */
++#include <dspbridge/dbdefs.h>
++
++/* ----------------------------------- Trace & Debug */
++#include <dspbridge/dbc.h>
++
++/* ----------------------------------- OS Adaptation Layer */
++#include <dspbridge/cfg.h>
++#include <dspbridge/ntfy.h>
++#include <dspbridge/sync.h>
++#include <dspbridge/clk.h>
++
++/* ----------------------------------- This */
++#include <dspbridge/services.h>
++
++/*
++ * ======== services_exit ========
++ * Purpose:
++ * Discontinue usage of module; free resources when reference count
++ * reaches 0.
++ */
++void services_exit(void)
++{
++ cfg_exit();
++}
++
++/*
++ * ======== services_init ========
++ * Purpose:
++ * Initializes SERVICES modules.
++ */
++bool services_init(void)
++{
++ bool ret = true;
++ bool fcfg;
++
++ /* Perform required initialization of SERVICES modules. */
++ fcfg = cfg_init();
++
++ ret = fcfg;
++
++ if (!ret) {
++ if (fcfg)
++ cfg_exit();
++ }
++
++ return ret;
++}
+Index: linux-2.6.35-master/drivers/staging/tidspbridge/services/sync.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.35-master/drivers/staging/tidspbridge/services/sync.c 2010-08-18 11:24:23.234067810 +0300
+@@ -0,0 +1,104 @@
++/*
++ * sync.c
++ *
++ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
++ *
++ * Synchronization services.
++ *
++ * Copyright (C) 2005-2006 Texas Instruments, Inc.
++ *
++ * This package is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
++ */
++
++/* ----------------------------------- Host OS */
++#include <dspbridge/host_os.h>
++
++/* ----------------------------------- This */
++#include <dspbridge/sync.h>
++
++DEFINE_SPINLOCK(sync_lock);
++
++/**
++ * sync_set_event() - set or signal and specified event
++ * @event: Event to be set..
++ *
++ * set the @event, if there is an thread waiting for the event
++ * it will be waken up, this function only wakes one thread.
++ */
++
++void sync_set_event(struct sync_object *event)
++{
++ spin_lock_bh(&sync_lock);
++ complete(&event->comp);
++ if (event->multi_comp)
++ complete(event->multi_comp);
++ spin_unlock_bh(&sync_lock);
++}
++
++/**
++ * sync_wait_on_multiple_events() - waits for multiple events to be set.
++ * @events: Array of events to wait for them.
++ * @count: number of elements of the array.
++ * @timeout timeout on waiting for the evetns.
++ * @pu_index index of the event set.
++ *
++ * This functios will wait until any of the array element is set or until
++ * timeout. In case of success the function will return 0 and
++ * @pu_index will store the index of the array element set or in case
++ * of timeout the function will return -ETIME or in case of
++ * interrupting by a signal it will return -EPERM.
++ */
++
++int sync_wait_on_multiple_events(struct sync_object **events,
++ unsigned count, unsigned timeout,
++ unsigned *index)
++{
++ unsigned i;
++ int status = -EPERM;
++ struct completion m_comp;
++
++ init_completion(&m_comp);
++
++ if (SYNC_INFINITE == timeout)
++ timeout = MAX_SCHEDULE_TIMEOUT;
++
++ spin_lock_bh(&sync_lock);
++ for (i = 0; i < count; i++) {
++ if (completion_done(&events[i]->comp)) {
++ INIT_COMPLETION(events[i]->comp);
++ *index = i;
++ spin_unlock_bh(&sync_lock);
++ status = 0;
++ goto func_end;
++ }
++ }
++
++ for (i = 0; i < count; i++)
++ events[i]->multi_comp = &m_comp;
++
++ spin_unlock_bh(&sync_lock);
++
++ if (!wait_for_completion_interruptible_timeout(&m_comp,
++ msecs_to_jiffies(timeout)))
++ status = -ETIME;
++
++ spin_lock_bh(&sync_lock);
++ for (i = 0; i < count; i++) {
++ if (completion_done(&events[i]->comp)) {
++ INIT_COMPLETION(events[i]->comp);
++ *index = i;
++ status = 0;
++ }
++ events[i]->multi_comp = NULL;
++ }
++ spin_unlock_bh(&sync_lock);
++func_end:
++ return status;
++}
++
diff --git a/recipes/linux/linux-2.6.35/nokia900/linux-2.6.36-wl1251-Use-MODULE_ALIAS-macro-at-correct-postion-for.patch b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.36-wl1251-Use-MODULE_ALIAS-macro-at-correct-postion-for.patch
new file mode 100644
index 0000000000..cb3cb656e5
--- /dev/null
+++ b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.36-wl1251-Use-MODULE_ALIAS-macro-at-correct-postion-for.patch
@@ -0,0 +1,43 @@
+From da33fbfe1cf4cbab1bbca7c33b4f150c73aa9340 Mon Sep 17 00:00:00 2001
+From: Ameya Palande <ameya.palande@nokia.com>
+Date: Mon, 5 Jul 2010 17:06:14 +0300
+Subject: [PATCH 10/11] wl1251: Use MODULE_ALIAS macro at correct postion for SPI bus
+
+Signed-off-by: Ameya Palande <ameya.palande@nokia.com>
+---
+ drivers/net/wireless/wl12xx/wl1251_main.c | 1 -
+ drivers/net/wireless/wl12xx/wl1251_spi.c | 1 +
+ drivers/net/wireless/wl12xx/wl1271_spi.c | 1 +
+ 3 files changed, 2 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/net/wireless/wl12xx/wl1251_main.c b/drivers/net/wireless/wl12xx/wl1251_main.c
+index 00b2428..35fdf88 100644
+--- a/drivers/net/wireless/wl12xx/wl1251_main.c
++++ b/drivers/net/wireless/wl12xx/wl1251_main.c
+@@ -1419,5 +1419,4 @@ EXPORT_SYMBOL_GPL(wl1251_free_hw);
+ MODULE_DESCRIPTION("TI wl1251 Wireles LAN Driver Core");
+ MODULE_LICENSE("GPL");
+ MODULE_AUTHOR("Kalle Valo <kalle.valo@nokia.com>");
+-MODULE_ALIAS("spi:wl1251");
+ MODULE_FIRMWARE(WL1251_FW_NAME);
+diff --git a/drivers/net/wireless/wl12xx/wl1251_spi.c b/drivers/net/wireless/wl12xx/wl1251_spi.c
+index e814742..27fdfaa 100644
+--- a/drivers/net/wireless/wl12xx/wl1251_spi.c
++++ b/drivers/net/wireless/wl12xx/wl1251_spi.c
+@@ -345,3 +345,4 @@ module_exit(wl1251_spi_exit);
+
+ MODULE_LICENSE("GPL");
+ MODULE_AUTHOR("Kalle Valo <kalle.valo@nokia.com>");
++MODULE_ALIAS("spi:wl1251");
+diff --git a/drivers/net/wireless/wl12xx/wl1271_spi.c b/drivers/net/wireless/wl12xx/wl1271_spi.c
+index 5189b81..96d25fb 100644
+--- a/drivers/net/wireless/wl12xx/wl1271_spi.c
++++ b/drivers/net/wireless/wl12xx/wl1271_spi.c
+@@ -461,3 +461,4 @@ MODULE_LICENSE("GPL");
+ MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
+ MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
+ MODULE_FIRMWARE(WL1271_FW_NAME);
++MODULE_ALIAS("spi:wl1271");
+--
+1.7.0.4
+
diff --git a/recipes/linux/linux-2.6.35/nokia900/linux-2.6.36-wl1251-fix-trigger-scan-timeout-usage.patch b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.36-wl1251-fix-trigger-scan-timeout-usage.patch
new file mode 100644
index 0000000000..3a39735915
--- /dev/null
+++ b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.36-wl1251-fix-trigger-scan-timeout-usage.patch
@@ -0,0 +1,37 @@
+From: "Kululin Yuri (EXT-Teleca/RussianFed)" <EXT-Yuri.Kululin@nokia.com>
+To: "kvalo@adurom.com" <kvalo@adurom.com>
+CC: "linux-wireless@vger.kernel.org" <linux-wireless@vger.kernel.org>, "Palande Ameya (Nokia-MS/Helsinki)" <ameya.palande@nokia.com>, "Kululin Yuri (EXT-Teleca/RussianFed)" <EXT-Yuri.Kululin@nokia.com>
+Date: Fri, 13 Aug 2010 11:46:12 +0200
+Subject: [PATCH 1/1] wl1251: fix trigger scan timeout usage
+
+From: Yuri Kululin <ext-yuri.kululin@nokia.com>
+
+Use appropriate command (CMD_TRIGGER_SCAN_TO) instead of scan command
+(CMD_SCAN) to configure trigger scan timeout.
+
+This was broken in commit 3a98c30f3e8bb1f32b5bcb74a39647b3670de275.
+
+Cc: stable@kernel.org
+Signed-off-by: Yuri Ershov <ext-yuri.ershov@nokia.com>
+Signed-off-by: Yuri Kululin <ext-yuri.kululin@nokia.com>
+---
+ drivers/net/wireless/wl12xx/wl1251_cmd.c | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/net/wireless/wl12xx/wl1251_cmd.c b/drivers/net/wireless/wl12xx/wl1251_cmd.c
+index a37b30c..ce3722f 100644
+--- a/drivers/net/wireless/wl12xx/wl1251_cmd.c
++++ b/drivers/net/wireless/wl12xx/wl1251_cmd.c
+@@ -484,7 +484,7 @@ int wl1251_cmd_trigger_scan_to(struct wl1251 *wl, u32 timeout)
+
+ cmd->timeout = timeout;
+
+- ret = wl1251_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd));
++ ret = wl1251_cmd_send(wl, CMD_TRIGGER_SCAN_TO, cmd, sizeof(*cmd));
+ if (ret < 0) {
+ wl1251_error("cmd trigger scan to failed: %d", ret);
+ goto out;
+--
+1.7.1.1
+
+
diff --git a/recipes/linux/linux-2.6.35/nokia900/linux-2.6.37-EEM-support-for-g_nokia.patch b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.37-EEM-support-for-g_nokia.patch
new file mode 100644
index 0000000000..8542543a3e
--- /dev/null
+++ b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.37-EEM-support-for-g_nokia.patch
@@ -0,0 +1,86 @@
+From: Ameya Palande <ameya.palande@nokia.com>
+Date: Fri, 13 Aug 2010 10:32:08 +0300
+Subject: [PATCH] EEM support for g_nokia
+
+Signed-off-by: Ameya Palande <ameya.palande@nokia.com>
+---
+ drivers/usb/gadget/Kconfig | 16 ++++++++++++++++
+ drivers/usb/gadget/nokia.c | 21 ++++++++++++++++++---
+ 2 files changed, 34 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
+index 591ae9f..9f3031c 100644
+--- a/drivers/usb/gadget/Kconfig
++++ b/drivers/usb/gadget/Kconfig
+@@ -860,6 +860,22 @@ config USB_G_NOKIA
+ It's only really useful for N900 hardware. If you're building
+ a kernel for N900, say Y or M here. If unsure, say N.
+
++config USB_G_NOKIA_EEM
++ bool "Ethernet Emulation Model (EEM) support"
++ depends on USB_G_NOKIA
++ default n
++ help
++ CDC EEM is a newer USB standard that is somewhat simpler than CDC ECM
++ and therefore can be supported by more hardware. Technically ECM and
++ EEM are designed for different applications. The ECM model extends
++ the network interface to the target (e.g. a USB cable modem), and the
++ EEM model is for mobile devices to communicate with hosts using
++ ethernet over USB. For Linux gadgets, however, the interface with
++ the host is the same (a usbX device), so the differences are minimal.
++
++ If you say "y" here, the Ethernet gadget driver will use the EEM
++ protocol rather than ECM. If unsure, say "n".
++
+ config USB_G_MULTI
+ tristate "Multifunction Composite Gadget (EXPERIMENTAL)"
+ depends on BLOCK && NET
+diff --git a/drivers/usb/gadget/nokia.c b/drivers/usb/gadget/nokia.c
+index 7d6b66a..f1c2647 100644
+--- a/drivers/usb/gadget/nokia.c
++++ b/drivers/usb/gadget/nokia.c
+@@ -46,6 +46,7 @@
+ #include "u_serial.c"
+ #include "f_acm.c"
+ #include "f_ecm.c"
++#include "f_eem.c"
+ #include "f_obex.c"
+ #include "f_serial.c"
+ #include "f_phonet.c"
+@@ -97,6 +98,14 @@ static struct usb_device_descriptor device_desc = {
+
+ /*-------------------------------------------------------------------------*/
+
++#ifdef CONFIG_USB_G_NOKIA_EEM
++static int use_eem = 1;
++#else
++static int use_eem;
++#endif
++module_param(use_eem, bool, 0);
++MODULE_PARM_DESC(use_eem, "use CDC EEM mode");
++
+ /* Module */
+ MODULE_DESCRIPTION("Nokia composite gadget driver for N900");
+ MODULE_AUTHOR("Felipe Balbi");
+@@ -126,9 +135,15 @@ static int __init nokia_bind_config(struct usb_configuration *c)
+ if (status)
+ printk(KERN_DEBUG "could not bind acm config\n");
+
+- status = ecm_bind_config(c, hostaddr);
+- if (status)
+- printk(KERN_DEBUG "could not bind ecm config\n");
++ if (use_eem) {
++ status = eem_bind_config(c);
++ if (status)
++ printk(KERN_DEBUG "could not bind eem config\n");
++ } else {
++ status = ecm_bind_config(c, hostaddr);
++ if (status)
++ printk(KERN_DEBUG "could not bind ecm config\n");
++ }
+
+ return status;
+ }
+--
+1.7.0.4
+
diff --git a/recipes/linux/linux-2.6.35/nokia900/linux-2.6.37-omap-rx51-add-support-for-USB-chargers.patch b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.37-omap-rx51-add-support-for-USB-chargers.patch
new file mode 100644
index 0000000000..f0b2a5f161
--- /dev/null
+++ b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.37-omap-rx51-add-support-for-USB-chargers.patch
@@ -0,0 +1,34 @@
+From 5f4f847f1b506f45f7e64eb2add20e29f496b0cb Mon Sep 17 00:00:00 2001
+From: Heikki Krogerus <ext-heikki.krogerus@nokia.com>
+Date: Thu, 19 Aug 2010 14:09:37 +0200
+Subject: [PATCH 2/2] omap: rx51: add support for USB chargers
+
+This enables isp1704 power supply driver on RX51, allowing
+USB charger detection with N900.
+
+Backported to 2.6.35 by: Ameya Palande <ameya.palande@nokia.com>
+
+Signed-off-by: Heikki Krogerus <ext-heikki.krogerus@nokia.com>
+---
+ arch/arm/mach-omap2/board-rx51-peripherals.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
++++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
+@@ -274,6 +274,10 @@
+ },
+ };
+
++static struct platform_device rx51_charger_device = {
++ .name = "isp1704_charger",
++};
++
+ #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
+
+ #define RX51_GPIO_CAMERA_LENS_COVER 110
+@@ -1316,4 +1320,5 @@
+ spi_register_board_info(rx51_peripherals_spi_board_info,
+ ARRAY_SIZE(rx51_peripherals_spi_board_info));
+ omap2_hsmmc_init(mmc);
++ platform_device_register(&rx51_charger_device);
+ }
diff --git a/recipes/linux/linux-2.6.35/nokia900/linux-2.6.37-omap3-rx51-Platform-support-for-lp5523-led-chip.patch b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.37-omap3-rx51-Platform-support-for-lp5523-led-chip.patch
new file mode 100644
index 0000000000..136bd34497
--- /dev/null
+++ b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.37-omap3-rx51-Platform-support-for-lp5523-led-chip.patch
@@ -0,0 +1,127 @@
+From 918726de27c3a817f1f2ae27a802ad74762d60eb Mon Sep 17 00:00:00 2001
+From: Ameya Palande <ameya.palande@nokia.com>
+Date: Wed, 18 Aug 2010 17:50:23 +0300
+Subject: [PATCH] omap3: rx51: Platform support for lp5523 led chip
+
+Signed-off-by: Ameya Palande <ameya.palande@nokia.com>
+---
+ arch/arm/mach-omap2/board-rx51-peripherals.c | 83 ++++++++++++++++++++++++++-
+ 1 file changed, 82 insertions(+), 1 deletion(-)
+
+--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
++++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
+@@ -46,6 +46,7 @@
+
+ #include <sound/tlv320aic3x.h>
+ #include <sound/tpa6130a2-plat.h>
++#include <linux/leds-lp5523.h>
+
+ #include "mux.h"
+ #include "hsmmc.h"
+@@ -68,6 +69,8 @@
+ #define LIS302_IRQ1_GPIO 181
+ #define LIS302_IRQ2_GPIO 180 /* Not yet in use */
+
++#define RX51_LP5523_CHIP_EN_GPIO 41
++
+ /* list all spi devices here */
+ enum {
+ RX51_SPI_WL1251,
+@@ -155,6 +158,79 @@
+ };
+ #endif
+
++#if defined(CONFIG_LEDS_LP5523) || defined(CONFIG_LEDS_LP5523_MODULE)
++static struct lp5523_led_config rx51_lp5523_led_config[] = {
++ {
++ .chan_nr = 0,
++ .led_current = 50,
++ }, {
++ .chan_nr = 1,
++ .led_current = 50,
++ }, {
++ .chan_nr = 2,
++ .led_current = 50,
++ }, {
++ .chan_nr = 3,
++ .led_current = 50,
++ }, {
++ .chan_nr = 4,
++ .led_current = 50,
++ }, {
++ .chan_nr = 5,
++ .led_current = 50,
++ }, {
++ .chan_nr = 6,
++ .led_current = 50,
++ }, {
++ .chan_nr = 7,
++ .led_current = 50,
++ }, {
++ .chan_nr = 8,
++ .led_current = 50,
++ }
++};
++
++static int rx51_lp5523_setup(void)
++{
++ int err;
++
++ err = gpio_request(RX51_LP5523_CHIP_EN_GPIO, "lp5523_enable");
++ if (err < 0) {
++ pr_err("Unable to get lp5523_enable GPIO\n");
++ return err;
++ }
++
++ err = gpio_direction_output(RX51_LP5523_CHIP_EN_GPIO, 1);
++ if (err < 0) {
++ pr_err("Failed to change direction for %d GPIO\n",
++ RX51_LP5523_CHIP_EN_GPIO);
++ }
++ return err;
++}
++
++static void rx51_lp5523_release(void)
++{
++ gpio_free(RX51_LP5523_CHIP_EN_GPIO);
++}
++
++static void rx51_lp5523_enable(bool state)
++{
++ if (state)
++ gpio_set_value(RX51_LP5523_CHIP_EN_GPIO, 1);
++ else
++ gpio_set_value(RX51_LP5523_CHIP_EN_GPIO, 0);
++}
++
++static struct lp5523_platform_data rx51_lp5523_platform_data = {
++ .led_config = rx51_lp5523_led_config,
++ .num_channels = ARRAY_SIZE(rx51_lp5523_led_config),
++ .clock_mode = LP5523_CLOCK_AUTO,
++ .setup_resources = rx51_lp5523_setup,
++ .release_resources = rx51_lp5523_release,
++ .enable = rx51_lp5523_enable,
++};
++#endif
++
+ static struct omap2_mcspi_device_config wl1251_mcspi_config = {
+ .turbo_mode = 0,
+ .single_channel = 1,
+@@ -886,6 +962,12 @@
+ .platform_data = &rx51_tsl2563_platform_data,
+ },
+ #endif
++#if defined(CONFIG_LEDS_LP5523) || defined(CONFIG_LEDS_LP5523_MODULE)
++ {
++ I2C_BOARD_INFO("lp5523", 0x32),
++ .platform_data = &rx51_lp5523_platform_data,
++ },
++#endif
+ {
+ I2C_BOARD_INFO("tpa6130a2", 0x60),
+ .platform_data = &rx51_tpa6130a2_data,
+@@ -1235,4 +1317,3 @@
+ ARRAY_SIZE(rx51_peripherals_spi_board_info));
+ omap2_hsmmc_init(mmc);
+ }
+-
diff --git a/recipes/linux/linux-2.6.35/nokia900/linux-2.6.37-power_supply-add-isp1704-charger-detection-driver.patch b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.37-power_supply-add-isp1704-charger-detection-driver.patch
new file mode 100644
index 0000000000..d4982f0561
--- /dev/null
+++ b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.37-power_supply-add-isp1704-charger-detection-driver.patch
@@ -0,0 +1,413 @@
+From bb4ae2fb03f0777a4083c21992b96b1d8374bac7 Mon Sep 17 00:00:00 2001
+From: Heikki Krogerus <ext-heikki.krogerus@nokia.com>
+Date: Thu, 19 Aug 2010 14:09:36 +0200
+Subject: [PATCH 1/2] power_supply: add isp1704 charger detection driver
+
+NXP ISP1704 is Battery Charging Specification 1.0 compliant USB
+transceiver. This adds a power supply driver for ISP1704 and
+ISP1707 USB transceivers.
+
+Backported to 2.6.35 by: Ameya Palande <ameya.palande@nokia.com>
+
+Signed-off-by: Heikki Krogerus <ext-heikki.krogerus@nokia.com>
+---
+ drivers/power/Kconfig | 7
+ drivers/power/Makefile | 1
+ drivers/power/isp1704_charger.c | 370 ++++++++++++++++++++++++++++++++++++++++
+ 3 files changed, 378 insertions(+)
+ create mode 100644 drivers/power/isp1704_charger.c
+
+--- a/drivers/power/Kconfig
++++ b/drivers/power/Kconfig
+@@ -149,4 +149,11 @@
+ Say Y here to enable the battery driver on Intel MID
+ platforms.
+
++config CHARGER_ISP1704
++ tristate "ISP1704 USB Charger Detection"
++ depends on USB_OTG_UTILS
++ help
++ Say Y to enable support for USB Charger Detection with
++ ISP1707/ISP1704 USB transceivers.
++
+ endif # POWER_SUPPLY
+--- a/drivers/power/Makefile
++++ b/drivers/power/Makefile
+@@ -35,3 +35,4 @@
+ obj-$(CONFIG_BATTERY_Z2) += z2_battery.o
+ obj-$(CONFIG_CHARGER_PCF50633) += pcf50633-charger.o
+ obj-$(CONFIG_BATTERY_INTEL_MID) += intel_mid_battery.o
++obj-$(CONFIG_CHARGER_ISP1704) += isp1704_charger.o
+--- /dev/null
++++ b/drivers/power/isp1704_charger.c
+@@ -0,0 +1,370 @@
++/*
++ * isp1704_charger.c - ISP1704 USB Charger Detection driver
++ *
++ * Copyright (C) 2010 Nokia Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/err.h>
++#include <linux/init.h>
++#include <linux/types.h>
++#include <linux/device.h>
++#include <linux/sysfs.h>
++#include <linux/platform_device.h>
++#include <linux/power_supply.h>
++#include <linux/delay.h>
++
++#include <linux/usb/otg.h>
++#include <linux/usb/ulpi.h>
++#include <linux/usb/ch9.h>
++#include <linux/usb/gadget.h>
++
++/* Vendor specific Power Control register */
++#define ISP1704_PWR_CTRL 0x3d
++#define ISP1704_PWR_CTRL_SWCTRL (1 << 0)
++#define ISP1704_PWR_CTRL_DET_COMP (1 << 1)
++#define ISP1704_PWR_CTRL_BVALID_RISE (1 << 2)
++#define ISP1704_PWR_CTRL_BVALID_FALL (1 << 3)
++#define ISP1704_PWR_CTRL_DP_WKPU_EN (1 << 4)
++#define ISP1704_PWR_CTRL_VDAT_DET (1 << 5)
++#define ISP1704_PWR_CTRL_DPVSRC_EN (1 << 6)
++#define ISP1704_PWR_CTRL_HWDETECT (1 << 7)
++
++#define NXP_VENDOR_ID 0x04cc
++
++static u16 isp170x_id[] = {
++ 0x1704,
++ 0x1707,
++};
++
++struct isp1704_charger {
++ struct device *dev;
++ struct power_supply psy;
++ struct otg_transceiver *otg;
++ struct notifier_block nb;
++ struct work_struct work;
++
++ char model[7];
++ unsigned present:1;
++};
++
++/*
++ * ISP1704 detects PS/2 adapters as charger. To make sure the detected charger
++ * is actually a dedicated charger, the following steps need to be taken.
++ */
++static inline int isp1704_charger_verify(struct isp1704_charger *isp)
++{
++ int ret = 0;
++ u8 r;
++
++ /* Reset the transceiver */
++ r = otg_io_read(isp->otg, ULPI_FUNC_CTRL);
++ r |= ULPI_FUNC_CTRL_RESET;
++ otg_io_write(isp->otg, ULPI_FUNC_CTRL, r);
++ msleep(1);
++
++ /* Set normal mode */
++ r &= ~(ULPI_FUNC_CTRL_RESET | ULPI_FUNC_CTRL_OPMODE_MASK);
++ otg_io_write(isp->otg, ULPI_FUNC_CTRL, r);
++
++ /* Clear the DP and DM pull-down bits */
++ r = ULPI_OTG_CTRL_DP_PULLDOWN | ULPI_OTG_CTRL_DM_PULLDOWN;
++ otg_io_write(isp->otg, ULPI_CLR(ULPI_OTG_CTRL), r);
++
++ /* Enable strong pull-up on DP (1.5K) and reset */
++ r = ULPI_FUNC_CTRL_TERMSELECT | ULPI_FUNC_CTRL_RESET;
++ otg_io_write(isp->otg, ULPI_SET(ULPI_FUNC_CTRL), r);
++ msleep(1);
++
++ /* Read the line state */
++ if (!otg_io_read(isp->otg, ULPI_DEBUG)) {
++ /* Disable strong pull-up on DP (1.5K) */
++ otg_io_write(isp->otg, ULPI_CLR(ULPI_FUNC_CTRL),
++ ULPI_FUNC_CTRL_TERMSELECT);
++ return 1;
++ }
++
++ /* Is it a charger or PS/2 connection */
++
++ /* Enable weak pull-up resistor on DP */
++ otg_io_write(isp->otg, ULPI_SET(ISP1704_PWR_CTRL),
++ ISP1704_PWR_CTRL_DP_WKPU_EN);
++
++ /* Disable strong pull-up on DP (1.5K) */
++ otg_io_write(isp->otg, ULPI_CLR(ULPI_FUNC_CTRL),
++ ULPI_FUNC_CTRL_TERMSELECT);
++
++ /* Enable weak pull-down resistor on DM */
++ otg_io_write(isp->otg, ULPI_SET(ULPI_OTG_CTRL),
++ ULPI_OTG_CTRL_DM_PULLDOWN);
++
++ /* It's a charger if the line states are clear */
++ if (!(otg_io_read(isp->otg, ULPI_DEBUG)))
++ ret = 1;
++
++ /* Disable weak pull-up resistor on DP */
++ otg_io_write(isp->otg, ULPI_CLR(ISP1704_PWR_CTRL),
++ ISP1704_PWR_CTRL_DP_WKPU_EN);
++
++ return ret;
++}
++
++static inline int isp1704_charger_detect(struct isp1704_charger *isp)
++{
++ unsigned long timeout;
++ u8 r;
++ int ret = 0;
++
++ /* set SW control bit in PWR_CTRL register */
++ otg_io_write(isp->otg, ISP1704_PWR_CTRL,
++ ISP1704_PWR_CTRL_SWCTRL);
++
++ /* enab1e manual charger detection */
++ r = (ISP1704_PWR_CTRL_SWCTRL | ISP1704_PWR_CTRL_DPVSRC_EN);
++ otg_io_write(isp->otg, ULPI_SET(ISP1704_PWR_CTRL), r);
++ msleep(1);
++
++ timeout = jiffies + msecs_to_jiffies(300);
++ do {
++ /* Check if there is a charger */
++ if (otg_io_read(isp->otg, ISP1704_PWR_CTRL)
++ & ISP1704_PWR_CTRL_VDAT_DET) {
++ ret = isp1704_charger_verify(isp);
++ break;
++ }
++ } while (!time_after(jiffies, timeout));
++
++ return ret;
++}
++
++static void isp1704_charger_work(struct work_struct *data)
++{
++ int detect;
++ struct isp1704_charger *isp =
++ container_of(data, struct isp1704_charger, work);
++
++ /*
++ * FIXME Only supporting dedicated chargers even though isp1704 can
++ * detect HUB and HOST chargers. If the device has already been
++ * enumerated, the detection will break the connection.
++ */
++ if (isp->otg->state != OTG_STATE_B_IDLE)
++ return;
++
++ /* disable data pullups */
++ if (isp->otg->gadget)
++ usb_gadget_disconnect(isp->otg->gadget);
++
++ /* detect charger */
++ detect = isp1704_charger_detect(isp);
++
++ if (detect) {
++ isp->present = detect;
++ power_supply_changed(&isp->psy);
++ }
++
++ /* enable data pullups */
++ if (isp->otg->gadget)
++ usb_gadget_connect(isp->otg->gadget);
++}
++
++static int isp1704_notifier_call(struct notifier_block *nb,
++ unsigned long event, void *unused)
++{
++ struct isp1704_charger *isp =
++ container_of(nb, struct isp1704_charger, nb);
++
++ switch (event) {
++ case USB_EVENT_VBUS:
++ schedule_work(&isp->work);
++ break;
++ case USB_EVENT_NONE:
++ if (isp->present) {
++ isp->present = 0;
++ power_supply_changed(&isp->psy);
++ }
++ break;
++ default:
++ return NOTIFY_DONE;
++ }
++
++ return NOTIFY_OK;
++}
++
++static int isp1704_charger_get_property(struct power_supply *psy,
++ enum power_supply_property psp,
++ union power_supply_propval *val)
++{
++ struct isp1704_charger *isp =
++ container_of(psy, struct isp1704_charger, psy);
++
++ switch (psp) {
++ case POWER_SUPPLY_PROP_PRESENT:
++ val->intval = isp->present;
++ break;
++ case POWER_SUPPLY_PROP_MODEL_NAME:
++ val->strval = isp->model;
++ break;
++ case POWER_SUPPLY_PROP_MANUFACTURER:
++ val->strval = "NXP";
++ break;
++ default:
++ return -EINVAL;
++ }
++ return 0;
++}
++
++static enum power_supply_property power_props[] = {
++ POWER_SUPPLY_PROP_PRESENT,
++ POWER_SUPPLY_PROP_MODEL_NAME,
++ POWER_SUPPLY_PROP_MANUFACTURER,
++};
++
++static inline int isp1704_test_ulpi(struct isp1704_charger *isp)
++{
++ int vendor;
++ int product;
++ int i;
++ int ret = -ENODEV;
++
++ /* Test ULPI interface */
++ ret = otg_io_write(isp->otg, ULPI_SCRATCH, 0xaa);
++ if (ret < 0)
++ return ret;
++
++ ret = otg_io_read(isp->otg, ULPI_SCRATCH);
++ if (ret < 0)
++ return ret;
++
++ if (ret != 0xaa)
++ return -ENODEV;
++
++ /* Verify the product and vendor id matches */
++ vendor = otg_io_read(isp->otg, ULPI_VENDOR_ID_LOW);
++ vendor |= otg_io_read(isp->otg, ULPI_VENDOR_ID_HIGH) << 8;
++ if (vendor != NXP_VENDOR_ID)
++ return -ENODEV;
++
++ product = otg_io_read(isp->otg, ULPI_PRODUCT_ID_LOW);
++ product |= otg_io_read(isp->otg, ULPI_PRODUCT_ID_HIGH) << 8;
++
++ for (i = 0; i < ARRAY_SIZE(isp170x_id); i++) {
++ if (product == isp170x_id[i]) {
++ sprintf(isp->model, "isp%x", product);
++ return product;
++ }
++ }
++
++ dev_err(isp->dev, "product id %x not matching known ids", product);
++
++ return -ENODEV;
++}
++
++static int __devinit isp1704_charger_probe(struct platform_device *pdev)
++{
++ struct isp1704_charger *isp;
++ int ret = -ENODEV;
++
++ isp = kzalloc(sizeof *isp, GFP_KERNEL);
++ if (!isp)
++ return -ENOMEM;
++
++ isp->otg = otg_get_transceiver();
++ if (!isp->otg)
++ goto fail0;
++
++ ret = isp1704_test_ulpi(isp);
++ if (ret < 0)
++ goto fail1;
++
++ isp->dev = &pdev->dev;
++ platform_set_drvdata(pdev, isp);
++
++ isp->psy.name = "isp1704";
++ isp->psy.type = POWER_SUPPLY_TYPE_USB;
++ isp->psy.properties = power_props;
++ isp->psy.num_properties = ARRAY_SIZE(power_props);
++ isp->psy.get_property = isp1704_charger_get_property;
++
++ ret = power_supply_register(isp->dev, &isp->psy);
++ if (ret)
++ goto fail1;
++
++ /*
++ * REVISIT: using work in order to allow the otg notifications to be
++ * made atomically in the future.
++ */
++ INIT_WORK(&isp->work, isp1704_charger_work);
++
++ isp->nb.notifier_call = isp1704_notifier_call;
++
++ ret = otg_register_notifier(isp->otg, &isp->nb);
++ if (ret)
++ goto fail2;
++
++ dev_info(isp->dev, "registered with product id %s\n", isp->model);
++
++ return 0;
++fail2:
++ power_supply_unregister(&isp->psy);
++fail1:
++ otg_put_transceiver(isp->otg);
++fail0:
++ kfree(isp);
++
++ dev_err(&pdev->dev, "failed to register isp1704 with error %d\n", ret);
++
++ return ret;
++}
++
++static int __devexit isp1704_charger_remove(struct platform_device *pdev)
++{
++ struct isp1704_charger *isp = platform_get_drvdata(pdev);
++
++ otg_unregister_notifier(isp->otg, &isp->nb);
++ power_supply_unregister(&isp->psy);
++ otg_put_transceiver(isp->otg);
++ kfree(isp);
++
++ return 0;
++}
++
++static struct platform_driver isp1704_charger_driver = {
++ .driver = {
++ .name = "isp1704_charger",
++ },
++ .probe = isp1704_charger_probe,
++ .remove = __devexit_p(isp1704_charger_remove),
++};
++
++static int __init isp1704_charger_init(void)
++{
++ return platform_driver_register(&isp1704_charger_driver);
++}
++module_init(isp1704_charger_init);
++
++static void __exit isp1704_charger_exit(void)
++{
++ platform_driver_unregister(&isp1704_charger_driver);
++}
++module_exit(isp1704_charger_exit);
++
++MODULE_ALIAS("platform:isp1704_charger");
++MODULE_AUTHOR("Nokia Corporation");
++MODULE_DESCRIPTION("ISP170x USB Charger driver");
++MODULE_LICENSE("GPL");
diff --git a/recipes/linux/linux-2.6.35/nokia900/linux-2.6.37-power_supply-add-types-for-USB-chargers.patch b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.37-power_supply-add-types-for-USB-chargers.patch
new file mode 100644
index 0000000000..f350c4515f
--- /dev/null
+++ b/recipes/linux/linux-2.6.35/nokia900/linux-2.6.37-power_supply-add-types-for-USB-chargers.patch
@@ -0,0 +1,47 @@
+From e4516f4634cdef198063b399108bbeee15fc37b6 Mon Sep 17 00:00:00 2001
+From: Heikki Krogerus <ext-heikki.krogerus@nokia.com>
+Date: Thu, 9 Sep 2010 14:37:15 +0200
+Subject: [PATCH 4/5] power_supply: add types for USB chargers
+
+This adds power supply types for USB chargers defined in
+Battery Charging Specification 1.1.
+
+Signed-off-by: Heikki Krogerus <ext-heikki.krogerus@nokia.com>
+---
+ drivers/power/power_supply_sysfs.c | 3 ++-
+ include/linux/power_supply.h | 5 ++++-
+ 2 files changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
+index 9d30eeb..88f5e43 100644
+--- a/drivers/power/power_supply_sysfs.c
++++ b/drivers/power/power_supply_sysfs.c
+@@ -42,7 +42,8 @@ static ssize_t power_supply_show_property(struct device *dev,
+ struct device_attribute *attr,
+ char *buf) {
+ static char *type_text[] = {
+- "Battery", "UPS", "Mains", "USB"
++ "Battery", "UPS", "Mains", "USB",
++ "USB_DCP", "USB_CDP", "USB_ACA"
+ };
+ static char *status_text[] = {
+ "Unknown", "Charging", "Discharging", "Not charging", "Full"
+diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
+index 30083a8..d37fef6 100644
+--- a/include/linux/power_supply.h
++++ b/include/linux/power_supply.h
+@@ -125,7 +125,10 @@ enum power_supply_type {
+ POWER_SUPPLY_TYPE_BATTERY = 0,
+ POWER_SUPPLY_TYPE_UPS,
+ POWER_SUPPLY_TYPE_MAINS,
+- POWER_SUPPLY_TYPE_USB,
++ POWER_SUPPLY_TYPE_USB, /* Standard Downstream Port */
++ POWER_SUPPLY_TYPE_USB_DCP, /* Dedicated Charging Port */
++ POWER_SUPPLY_TYPE_USB_CDP, /* Charging Downstream Port */
++ POWER_SUPPLY_TYPE_USB_ACA, /* Accessory Charger Adapters */
+ };
+
+ union power_supply_propval {
+--
+1.7.0.4
+
diff --git a/recipes/linux/linux_2.6.35.bb b/recipes/linux/linux_2.6.35.bb
index 9e9eb5a0f4..f0e6644270 100644
--- a/recipes/linux/linux_2.6.35.bb
+++ b/recipes/linux/linux_2.6.35.bb
@@ -14,6 +14,7 @@ DEFAULT_PREFERENCE_qemumipsel = "1"
DEFAULT_PREFERENCE_qemumips64 = "1"
DEFAULT_PREFERENCE_qemuppc = "1"
DEFAULT_PREFERENCE_qemux86 = "1"
+DEFAULT_PREFERENCE_nokia900 = "1"
SRC_URI = "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/${P}.tar.bz2;name=kernel \
${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/patch-${PV}.${STABLEV}.bz2;apply=yes;name=stablepatch \
@@ -29,7 +30,97 @@ SRC_URI_append_rx1950 = "file://0001-s3c2410_ts-add-fake-pressure-events.patch \
file://0006-s3cmci-minor-fixups.patch \
file://0007-Add-s3c-adc-battery-driver.patch"
+SRC_URI_nokia900 = "\
+ ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/${P}.tar.bz2;name=kernel \
+ ${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/patch-${PV}.3.bz2;apply=yes;name=stable2patch \
+# backported patches
+ file://linux-2.6.36-fix-unprotected-acess-to-task-credentials.patch \
+ file://linux-2.6.36-battery.patch \
+ file://linux-2.6.36-battery2.patch \
+ file://linux-2.6.35-stable-cherry-picks.patch \
+# patches from mrst/fldt tree
+ file://linux-2.6.35-ac-2010-08-24.patch \
+ file://linux-2.6.35-ac-pending.patch \
+ file://linux-2.6.35-ac-revert-mmc-hacks.patch \
+# file://linux-2.6.35-aava-firmware-workaround.patch \
+# file://linux-2.6.35-aava-firmware-workaround-wifi.patch \
+# file://linux-2.6.35-make-gma600-work-on-IA.patch \
+# file://linux-2.6.35-mrst-rtc.patch \
+#
+# file://linux-2.6.35-Add-MIP-header-update-when-FW-is-upgraded.patch
+# file://linux-2.6.35-Bug-fix-for-camera-flash-IC-not-found.patch
+# file://linux-2.6.35-DMA-driver-Add-runtime-PM.patch
+# file://linux-2.6.35-Fix-loadfw.patch
+# file://linux-2.6.35-Subject-Moblin-kernel-MeeGo-AC-Tree-CI-add-supp.patch
+# file://linux-2.6.35-apds9802als-fix-als-sensing-range-value.patch
+# file://linux-2.6.35-mrst_max3110-Make-the-IRQ-option-runtime.patch
+# file://linux-2.6.35-lednames.patch
+# file://linux-2.6.35-mrst-i2c-power-fix.patch
+# file://linux-2.6.35-fix-build-for-cy8ctmg110-driver.patch
+# N900 patches
+# Hacks
+ file://linux-2.6-Hacks-for-Nokia-N900.patch \
+# Touch screen
+ file://linux-2.6.36-Introduce-and-enable-tsc2005-driver.patch \
+# GPU
+ file://linux-2.6-SGX-PVR-driver-for-N900.patch \
+# Bluetooth
+ file://linux-2.6-Bluetooth-Support-for-n900-bluetooth-hardware.patch \
+# TWL4030 MADC (Battery Charging)
+ file://linux-2.6-mfd-twl4030-Driver-for-twl4030-madc-module.patch \
+# Ambient light sensor
+ file://linux-2.6.36-omap-rx51-Platform-support-for-tsl2563-ALS.patch \
+# Accelerometer
+ file://linux-2.6.36-omap-rx51-Platform-support-for-lis3lv02d-acceleromet.patch \
+# FM TX, headphone, TV-out and basic jack detection
+ file://linux-2.6.36-FM-TX-headphone-TV-out-and-basic-jack-detection-supp.patch \
+# Earpiece and headset support
+ file://linux-2.6-Earpiece-and-headset-support-for-N900.patch \
+# Fixes
+ file://linux-2.6.36-wl1251-Use-MODULE_ALIAS-macro-at-correct-postion-for.patch \
+# Cellular modem support
+ file://linux-2.6-n900-modem-support.patch \
+# Fix wl1251 scanning while associated
+# file://linux-2.6.36-wl1251-fix-trigger-scan-timeout-usage.patch \
+# Introduce EEM support in g_nokia which should fix usb networking
+ file://linux-2.6.37-EEM-support-for-g_nokia.patch \
+# omap3isp-rx51 driver
+ file://linux-2.6-omap3isp-rx51.patch \
+# omap3camera driver
+#linux-2.6-omap3camera.patch
+# TI dspbridge driver
+ file://linux-2.6.36-tidspbridge.patch \
+# lp5523 platform data for rx51 board
+ file://linux-2.6.37-omap3-rx51-Platform-support-for-lp5523-led-chip.patch \
+# usb charger platform device support
+ file://linux-2.6.37-omap-rx51-add-support-for-USB-chargers.patch \
+# isp1704 usb charger detection driver
+ file://linux-2.6.37-power_supply-add-isp1704-charger-detection-driver.patch \
+ file://linux-2.6.37-power_supply-add-types-for-USB-chargers.patch \
+ file://linux-2.6-usb-musb-ignore-spurious-SESSREQ-interrupts.patch \
+# boot time and power patches
+ file://linux-2.6.29-dont-wait-for-mouse.patch \
+ file://linux-2.6-usb-uvc-autosuspend.patch \
+ file://linux-2.6-usb-bt-autosuspend.patch \
+# Patches to help PowerTOP
+ file://linux-2.6.33-vfs-tracepoints.patch \
+ file://linux-2.6.33-ahci-alpm-accounting.patch \
+ file://linux-2.6.35-rc4-annotate-device-pm.patch \
+ file://linux-2.6.36-powertop-timer-tracing.patch \
+# Fix the slab timer to not be a power hog
+ file://linux-2.6.35-slab-timer.patch \
+# Fix Linux deliberately skewing the timer for
+# historic reasons that are no longer true.
+ file://linux-2.6.35-dont-skew-the-tick.patch \
+#fix inconsistent mmc device naming at boot time which prevent booting sometimes
+ file://inconsistent-mmc-fix-2.6.35.patch \
+ file://defconfig "
+
+CMDLINE_nokia900_shr = "snd-soc-rx51.hp_lim=42 snd-soc-tlv320aic3x.hp_dac_lim=6 console=tty1 root=/dev/mmcblk1p2 rootwait panic=20 debug"
+
SRC_URI[kernel.md5sum] = "091abeb4684ce03d1d936851618687b6"
SRC_URI[kernel.sha256sum] = "18b2e2c336032e366c942622b77302cb05fc034fb19018f086a4ebc9ed41bfcf"
SRC_URI[stablepatch.md5sum] = "198e4e72ea9cc7f9f25bb5881167aa2e"
SRC_URI[stablepatch.sha256sum] = "cc8bd636ba49ee7ad1095cebf32a4bf0d2edcd60a5aaf29206297e9218904eb1"
+SRC_URI[stable2patch.md5sum] = "a921f7789b7047b84f30a6f283cf6d07"
+SRC_URI[stable2patch.sha256sum] = "94d321099f20f47dc681304a630391322e0e4d6672bb1106a621e6347c44db83"